aboutsummaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/common_test/doc/src/ct.xml10
-rw-r--r--lib/common_test/src/Makefile2
-rw-r--r--lib/common_test/src/ct.erl17
-rw-r--r--lib/common_test/src/test_server_ctrl.erl2
-rw-r--r--lib/common_test/src/test_server_node.erl8
-rw-r--r--lib/common_test/test_server/ts_erl_config.erl10
-rw-r--r--lib/common_test/test_server/ts_run.erl2
-rw-r--r--lib/compiler/src/beam_dead.erl20
-rw-r--r--lib/compiler/src/beam_peep.erl6
-rw-r--r--lib/compiler/src/beam_type.erl3
-rw-r--r--lib/compiler/src/cerl_trees.erl7
-rw-r--r--lib/compiler/src/compile.erl2
-rw-r--r--lib/compiler/src/erl_bifs.erl1
-rw-r--r--lib/compiler/src/v3_codegen.erl6
-rw-r--r--lib/compiler/test/map_SUITE.erl18
-rw-r--r--lib/crypto/c_src/crypto.c34
-rw-r--r--lib/crypto/c_src/otp_test_engine.c19
-rw-r--r--lib/crypto/doc/src/notes.xml22
-rw-r--r--lib/crypto/vsn.mk2
-rw-r--r--lib/debugger/src/dbg_icmd.erl2
-rw-r--r--lib/debugger/src/dbg_wx_win.erl2
-rw-r--r--lib/dialyzer/test/options1_SUITE_data/results/compiler2
-rw-r--r--lib/dialyzer/test/options1_SUITE_data/src/compiler/compile.erl6
-rw-r--r--lib/dialyzer/test/r9c_SUITE_data/src/inets/mod_esi.erl44
-rw-r--r--lib/edoc/src/edoc_doclet.erl2
-rw-r--r--lib/hipe/cerl/erl_bif_types.erl6
-rw-r--r--lib/hipe/cerl/erl_types.erl192
-rw-r--r--lib/hipe/main/hipe.erl4
-rw-r--r--lib/hipe/opt/hipe_schedule.erl1483
-rw-r--r--lib/hipe/opt/hipe_schedule_prio.erl53
-rw-r--r--lib/hipe/opt/hipe_target_machine.erl87
-rw-r--r--lib/hipe/opt/hipe_ultra_mod2.erl233
-rw-r--r--lib/hipe/opt/hipe_ultra_prio.erl298
-rw-r--r--lib/hipe/test/Makefile3
-rw-r--r--lib/hipe/test/erl_types_SUITE.erl197
-rw-r--r--lib/inets/src/http_server/mod_esi.erl44
-rw-r--r--lib/kernel/doc/src/Makefile1
-rw-r--r--lib/kernel/doc/src/erl_epmd.xml104
-rw-r--r--lib/kernel/doc/src/kernel_app.xml45
-rw-r--r--lib/kernel/doc/src/logger.xml409
-rw-r--r--lib/kernel/doc/src/logger_chapter.xml35
-rw-r--r--lib/kernel/doc/src/logger_filters.xml14
-rw-r--r--lib/kernel/doc/src/logger_formatter.xml290
-rw-r--r--lib/kernel/doc/src/ref_man.xml1
-rw-r--r--lib/kernel/doc/src/specs.xml1
-rw-r--r--lib/kernel/src/Makefile2
-rw-r--r--lib/kernel/src/application_controller.erl12
-rw-r--r--lib/kernel/src/erl_epmd.erl64
-rw-r--r--lib/kernel/src/erl_signal_handler.erl11
-rw-r--r--lib/kernel/src/error_logger.erl6
-rw-r--r--lib/kernel/src/hipe_unified_loader.erl2
-rw-r--r--lib/kernel/src/inet_tcp_dist.erl148
-rw-r--r--lib/kernel/src/kernel.erl8
-rw-r--r--lib/kernel/src/kernel_config.erl7
-rw-r--r--lib/kernel/src/logger.erl61
-rw-r--r--lib/kernel/src/logger_disk_log_h.erl4
-rw-r--r--lib/kernel/src/logger_formatter.erl7
-rw-r--r--lib/kernel/src/logger_internal.hrl1
-rw-r--r--lib/kernel/src/logger_server.erl10
-rw-r--r--lib/kernel/src/logger_simple.erl4
-rw-r--r--lib/kernel/src/logger_std_h.erl4
-rw-r--r--lib/kernel/test/application_SUITE.erl12
-rw-r--r--lib/kernel/test/erl_distribution_SUITE.erl2
-rw-r--r--lib/kernel/test/heart_SUITE.erl8
-rw-r--r--lib/kernel/test/kernel_config_SUITE.erl2
-rw-r--r--lib/kernel/test/logger_SUITE.erl5
-rw-r--r--lib/kernel/test/logger_disk_log_h_SUITE.erl55
-rw-r--r--lib/kernel/test/logger_formatter_SUITE.erl46
-rw-r--r--lib/kernel/test/logger_std_h_SUITE.erl163
-rw-r--r--lib/kernel/test/os_SUITE.erl6
-rw-r--r--lib/observer/src/observer_lib.erl2
-rw-r--r--lib/parsetools/src/yecc.erl8
-rw-r--r--lib/sasl/src/sasl.erl1
-rw-r--r--lib/ssh/doc/src/notes.xml50
-rw-r--r--lib/ssh/doc/src/ssh.xml18
-rw-r--r--lib/ssh/src/ssh.hrl8
-rw-r--r--lib/ssh/src/ssh_connection_handler.erl424
-rw-r--r--lib/ssh/src/ssh_options.erl21
-rw-r--r--lib/ssh/src/ssh_transport.erl8
-rw-r--r--lib/ssh/test/ssh_basic_SUITE.erl242
-rw-r--r--lib/ssh/test/ssh_test_lib.erl2
-rw-r--r--lib/ssh/vsn.mk3
-rw-r--r--lib/ssl/src/inet_tls_dist.erl100
-rw-r--r--lib/stdlib/doc/src/Makefile1
-rw-r--r--lib/stdlib/doc/src/lib.xml103
-rw-r--r--lib/stdlib/doc/src/ref_man.xml1
-rw-r--r--lib/stdlib/doc/src/specs.xml1
-rw-r--r--lib/stdlib/doc/src/string.xml2
-rw-r--r--lib/stdlib/src/Makefile3
-rw-r--r--lib/stdlib/src/erl_error.erl (renamed from lib/stdlib/src/lib.erl)327
-rw-r--r--lib/stdlib/src/erl_eval.erl221
-rw-r--r--lib/stdlib/src/erl_internal.erl2
-rw-r--r--lib/stdlib/src/escript.erl2
-rw-r--r--lib/stdlib/src/gen_event.erl6
-rw-r--r--lib/stdlib/src/gen_server.erl2
-rw-r--r--lib/stdlib/src/ms_transform.erl1
-rw-r--r--lib/stdlib/src/proc_lib.erl4
-rw-r--r--lib/stdlib/src/qlc.erl8
-rw-r--r--lib/stdlib/src/shell.erl6
-rw-r--r--lib/stdlib/src/slave.erl14
-rw-r--r--lib/stdlib/src/stdlib.app.src4
-rw-r--r--lib/stdlib/test/io_SUITE.erl2
-rw-r--r--lib/stdlib/test/qlc_SUITE.erl6
-rw-r--r--lib/stdlib/test/shell_SUITE.erl6
-rw-r--r--lib/tools/emacs/erlang.el1
-rw-r--r--lib/tools/src/lcnt.erl5
-rw-r--r--lib/tools/src/xref.erl6
-rw-r--r--lib/tools/test/eprof_SUITE_data/eed.erl6
108 files changed, 2402 insertions, 3624 deletions
diff --git a/lib/common_test/doc/src/ct.xml b/lib/common_test/doc/src/ct.xml
index afd8741cd1..3d35ae4f54 100644
--- a/lib/common_test/doc/src/ct.xml
+++ b/lib/common_test/doc/src/ct.xml
@@ -572,6 +572,16 @@
</func>
<func>
+ <name>get_progname() -&gt; string()</name>
+ <fsummary>Returns the command used to start this Erlang instance.</fsummary>
+ <desc><marker id="get_progname-0"/>
+ <p>Returns the command used to start this Erlang instance.
+ If this information could not be found, the string
+ <c>"no_prog_name"</c> is returned.</p>
+ </desc>
+ </func>
+
+ <func>
<name>get_status() -&gt; TestStatus | {error, Reason} | no_tests_running</name>
<fsummary>Returns status of ongoing test.</fsummary>
<type>
diff --git a/lib/common_test/src/Makefile b/lib/common_test/src/Makefile
index 2a2a9cb5bc..9adcf2f13b 100644
--- a/lib/common_test/src/Makefile
+++ b/lib/common_test/src/Makefile
@@ -166,4 +166,4 @@ release_tests_spec: opt
release_docs_spec: docs
# Include dependencies -- list below added by Kostis Sagonas
-$(EBIN)/cth_log_redirect.beam: ../../kernel/include/logger.hrl
+$(EBIN)/cth_log_redirect.beam: ../../kernel/include/logger.hrl ../../kernel/src/logger_internal.hrl
diff --git a/lib/common_test/src/ct.erl b/lib/common_test/src/ct.erl
index fd7fa07b81..14a9ec07cf 100644
--- a/lib/common_test/src/ct.erl
+++ b/lib/common_test/src/ct.erl
@@ -87,6 +87,7 @@
decrypt_config_file/2, decrypt_config_file/3]).
-export([get_target_name/1]).
+-export([get_progname/0]).
-export([parse_table/1, listenv/1]).
-export([remaining_test_procs/0]).
@@ -975,7 +976,20 @@ make_priv_dir() ->
%%% belongs to.
get_target_name(Handle) ->
ct_util:get_target_name(Handle).
-
+
+%%%-----------------------------------------------------------------
+%%% @doc Return the command used to start (this) erlang
+
+-spec get_progname() -> string().
+
+get_progname() ->
+ case init:get_argument(progname) of
+ {ok, [[Prog]]} ->
+ Prog;
+ _Other ->
+ "no_prog_name"
+ end.
+
%%%-----------------------------------------------------------------
%%% @spec parse_table(Data) -> {Heading,Table}
%%% Data = [string()]
@@ -1006,7 +1020,6 @@ parse_table(Data) ->
listenv(Telnet) ->
ct_util:listenv(Telnet).
-
%%%-----------------------------------------------------------------
%%% @spec testcases(TestDir, Suite) -> Testcases | {error,Reason}
%%% TestDir = string()
diff --git a/lib/common_test/src/test_server_ctrl.erl b/lib/common_test/src/test_server_ctrl.erl
index 1ae6c8c7c7..67645cac08 100644
--- a/lib/common_test/src/test_server_ctrl.erl
+++ b/lib/common_test/src/test_server_ctrl.erl
@@ -4382,7 +4382,7 @@ do_format_exception(Reason={Error,Stack}) ->
PF = fun(Term, I) ->
io_lib:format("~." ++ integer_to_list(I) ++ "tp", [Term])
end,
- case catch lib:format_exception(1, error, Error, Stack, StackFun, PF, utf8) of
+ case catch erl_error:format_exception(1, error, Error, Stack, StackFun, PF, utf8) of
{'EXIT',_R} ->
{"~tp",Reason};
Formatted ->
diff --git a/lib/common_test/src/test_server_node.erl b/lib/common_test/src/test_server_node.erl
index b2d4f199c3..76588e6887 100644
--- a/lib/common_test/src/test_server_node.erl
+++ b/lib/common_test/src/test_server_node.erl
@@ -591,7 +591,7 @@ cast_to_list(X) -> lists:flatten(io_lib:format("~tw", [X])).
%%% this
%%%
pick_erl_program(default) ->
- cast_to_list(lib:progname());
+ ct:get_progname();
pick_erl_program(L) ->
P = random_element(L),
case P of
@@ -600,7 +600,7 @@ pick_erl_program(L) ->
{release, S} ->
find_release(S);
this ->
- cast_to_list(lib:progname())
+ ct:get_progname()
end.
%% This is an attempt to distinguish between spaces in the program
@@ -611,8 +611,8 @@ pick_erl_program(L) ->
%% ({prog,String}) or if the -program switch to beam is used and
%% includes arguments (typically done by cerl in OTP test environment
%% in order to ensure that slave/peer nodes are started with the same
-%% emulator and flags as the test node. The return from lib:progname()
-%% could then typically be '/<full_path_to>/cerl -gcov').
+%% emulator and flags as the test node. The return from ct:get_progname()
+%% could then typically be "/<full_path_to>/cerl -gcov").
quote_progname(Progname) ->
do_quote_progname(string:lexemes(Progname," ")).
diff --git a/lib/common_test/test_server/ts_erl_config.erl b/lib/common_test/test_server/ts_erl_config.erl
index c7fe4ccf83..e37fa844bb 100644
--- a/lib/common_test/test_server/ts_erl_config.erl
+++ b/lib/common_test/test_server/ts_erl_config.erl
@@ -358,7 +358,15 @@ link_library(_LibName,_Other) ->
%% Returns emulator specific variables.
emu_vars(Vars) ->
[{is_source_build, is_source_build()},
- {erl_name, atom_to_list(lib:progname())}|Vars].
+ {erl_name, get_progname()}|Vars].
+
+get_progname() ->
+ case init:get_argument(progname) of
+ {ok, [[Prog]]} ->
+ Prog;
+ _Other ->
+ "no_prog_name"
+ end.
is_source_build() ->
string:find(erlang:system_info(system_version), "source") =/= nomatch.
diff --git a/lib/common_test/test_server/ts_run.erl b/lib/common_test/test_server/ts_run.erl
index 3f594236bc..5dbbaca916 100644
--- a/lib/common_test/test_server/ts_run.erl
+++ b/lib/common_test/test_server/ts_run.erl
@@ -199,7 +199,7 @@ make_command(Vars, Spec, State) ->
TestPath = filename:nativename(TestDir),
Erl = case os:getenv("TS_RUN_VALGRIND") of
false ->
- atom_to_list(lib:progname());
+ ct:get_progname();
_ ->
case State#state.file of
Dir when is_list(Dir) ->
diff --git a/lib/compiler/src/beam_dead.erl b/lib/compiler/src/beam_dead.erl
index dbbaae05eb..762c7bdf9e 100644
--- a/lib/compiler/src/beam_dead.erl
+++ b/lib/compiler/src/beam_dead.erl
@@ -392,6 +392,26 @@ backward([{bif,'or',{f,To0},[Dst,{atom,false}],Dst}=I|Is], D,
_ ->
backward(Is, D, [I|Acc])
end;
+backward([{bif,map_get,{f,FF},[Key,Map],_}=I0,
+ {test,has_map_fields,{f,FT}=F,[Map|Keys0]}=I1|Is], D, Acc) when FF =/= 0 ->
+ case shortcut_label(FF, D) of
+ FT ->
+ case lists:delete(Key, Keys0) of
+ [] ->
+ backward([I0|Is], D, Acc);
+ Keys ->
+ Test = {test,has_map_fields,F,[Map|Keys]},
+ backward([Test|Is], D, [I0|Acc])
+ end;
+ _ ->
+ backward([I1|Is], D, [I0|Acc])
+ end;
+backward([{bif,map_get,{f,FF},[_,Map],_}=I0,
+ {test,is_map,{f,FT},[Map]}=I1|Is], D, Acc) when FF =/= 0 ->
+ case shortcut_label(FF, D) of
+ FT -> backward([I0|Is], D, Acc);
+ _ -> backward([I1|Is], D, [I0|Acc])
+ end;
backward([I|Is], D, Acc) ->
backward(Is, D, [I|Acc]);
backward([], _D, Acc) -> Acc.
diff --git a/lib/compiler/src/beam_peep.erl b/lib/compiler/src/beam_peep.erl
index eb3192fe8f..920fb00397 100644
--- a/lib/compiler/src/beam_peep.erl
+++ b/lib/compiler/src/beam_peep.erl
@@ -77,6 +77,12 @@ peep([{bif,tuple_size,_,[_]=Ops,Dst}=I|Is], SeenTests0, Acc) ->
%% Kill all remembered tests that depend on the destination register.
SeenTests = kill_seen(Dst, SeenTests1),
peep(Is, SeenTests, [I|Acc]);
+peep([{bif,map_get,_,[Key,Map],Dst}=I|Is], SeenTests0, Acc) ->
+ %% Pretend that we have seen {test,has_map_fields,_,[Map,Key]}
+ SeenTests1 = gb_sets:add({has_map_fields,[Map,Key]}, SeenTests0),
+ %% Kill all remembered tests that depend on the destination register.
+ SeenTests = kill_seen(Dst, SeenTests1),
+ peep(Is, SeenTests, [I|Acc]);
peep([{bif,_,_,_,Dst}=I|Is], SeenTests0, Acc) ->
%% Kill all remembered tests that depend on the destination register.
SeenTests = kill_seen(Dst, SeenTests0),
diff --git a/lib/compiler/src/beam_type.erl b/lib/compiler/src/beam_type.erl
index 28f36db399..12da8c9446 100644
--- a/lib/compiler/src/beam_type.erl
+++ b/lib/compiler/src/beam_type.erl
@@ -462,6 +462,9 @@ update({set,[D],[Index,Reg],{bif,element,_}}, Ts0) ->
end,
Ts = tdb_meet(Reg, {tuple,min_size,MinSize,[]}, Ts0),
tdb_store(D, any, Ts);
+update({set,[D],[_Key,Map],{bif,map_get,_}}, Ts0) ->
+ Ts = tdb_meet(Map, map, Ts0),
+ tdb_store(D, any, Ts);
update({set,[D],Args,{bif,N,_}}, Ts) ->
Ar = length(Args),
BoolOp = erl_internal:new_type_test(N, Ar) orelse
diff --git a/lib/compiler/src/cerl_trees.erl b/lib/compiler/src/cerl_trees.erl
index c7a129b42c..533c984221 100644
--- a/lib/compiler/src/cerl_trees.erl
+++ b/lib/compiler/src/cerl_trees.erl
@@ -351,10 +351,9 @@ mapfold(F, S0, T) ->
mapfold(fun(T0, A) -> {T0, A} end, F, S0, T).
-%% @spec mapfold(Pre, Post, Initial::term(), Tree::cerl()) ->
-%% {cerl(), term()}
-%%
-%% Pre = Post = (cerl(), term()) -> {cerl(), term()}
+%% @spec mapfold(Pre, Post, Initial::term(), Tree::cerl()) -> {cerl(), term()}
+%% Pre = (cerl(), term()) -> {cerl(), term()}
+%% Post = (cerl(), term()) -> {cerl(), term()}
%%
%% @doc Does a combined map/fold operation on the nodes of the
%% tree. It begins by calling <code>Pre</code> on the tree, using the
diff --git a/lib/compiler/src/compile.erl b/lib/compiler/src/compile.erl
index c6a0056a70..a37b2064b2 100644
--- a/lib/compiler/src/compile.erl
+++ b/lib/compiler/src/compile.erl
@@ -295,7 +295,7 @@ format_error_reason({Reason, Stack}) when is_list(Stack) ->
end,
FormatFun = fun (Term, _) -> io_lib:format("~tp", [Term]) end,
[io_lib:format("~tp", [Reason]),"\n\n",
- lib:format_stacktrace(1, Stack, StackFun, FormatFun)];
+ erl_error:format_stacktrace(1, Stack, StackFun, FormatFun)];
format_error_reason(Reason) ->
io_lib:format("~tp", [Reason]).
diff --git a/lib/compiler/src/erl_bifs.erl b/lib/compiler/src/erl_bifs.erl
index 70b36f029e..a7452aebc8 100644
--- a/lib/compiler/src/erl_bifs.erl
+++ b/lib/compiler/src/erl_bifs.erl
@@ -94,6 +94,7 @@ is_pure(erlang, is_function, 1) -> true;
is_pure(erlang, is_integer, 1) -> true;
is_pure(erlang, is_list, 1) -> true;
is_pure(erlang, is_map, 1) -> true;
+is_pure(erlang, is_map_key, 2) -> true;
is_pure(erlang, is_number, 1) -> true;
is_pure(erlang, is_pid, 1) -> true;
is_pure(erlang, is_port, 1) -> true;
diff --git a/lib/compiler/src/v3_codegen.erl b/lib/compiler/src/v3_codegen.erl
index 8e73b613a0..9652a8476d 100644
--- a/lib/compiler/src/v3_codegen.erl
+++ b/lib/compiler/src/v3_codegen.erl
@@ -589,6 +589,7 @@ is_gc_bif(element, 2) -> false;
is_gc_bif(get, 1) -> false;
is_gc_bif(tuple_size, 1) -> false;
is_gc_bif(map_get, 2) -> false;
+is_gc_bif(is_map_key, 2) -> false;
is_gc_bif(Bif, Arity) ->
not (erl_internal:bool_op(Bif, Arity) orelse
erl_internal:new_type_test(Bif, Arity) orelse
@@ -1620,6 +1621,11 @@ test_cg(is_boolean, [#k_atom{val=Val}], Fail, I, Vdb, Bef, St) ->
false -> [{jump,{f,Fail}}]
end,
{Is,Aft,St};
+test_cg(is_map_key, As, Fail, I, Vdb, Bef, St) ->
+ [Key,Map] = cg_reg_args(As, Bef),
+ Aft = clear_dead(Bef, I, Vdb),
+ F = {f,Fail},
+ {[{test,is_map,F,[Map]},{test,has_map_fields,F,Map,{list,[Key]}}],Aft,St};
test_cg(Test, As, Fail, I, Vdb, Bef, St) ->
Args = cg_reg_args(As, Bef),
Aft = clear_dead(Bef, I, Vdb),
diff --git a/lib/compiler/test/map_SUITE.erl b/lib/compiler/test/map_SUITE.erl
index e98c295da6..6badc7a8b8 100644
--- a/lib/compiler/test/map_SUITE.erl
+++ b/lib/compiler/test/map_SUITE.erl
@@ -1203,12 +1203,18 @@ t_guard_bifs(Config) when is_list(Config) ->
true = map_guard_empty_2(),
true = map_guard_head(#{a=>1}),
false = map_guard_head([]),
+ true = map_get_head(#{a=>1}),
+ false = map_get_head([]),
+ true = map_is_key_head(#{a=>1}),
+ false = map_is_key_head(#{}),
true = map_guard_body(#{a=>1}),
false = map_guard_body({}),
true = map_guard_pattern(#{a=>1, <<"hi">> => "hi" }),
false = map_guard_pattern("list"),
true = map_guard_tautology(),
true = map_guard_ill_map_size(),
+ true = map_field_check_sequence(#{a=>1}),
+ false = map_field_check_sequence(#{}),
ok.
map_guard_empty() when is_map(#{}); false -> true.
@@ -1218,6 +1224,12 @@ map_guard_empty_2() when true; #{} andalso false -> true.
map_guard_head(M) when is_map(M) -> true;
map_guard_head(_) -> false.
+map_get_head(M) when map_get(a, M) =:= 1 -> true;
+map_get_head(_) -> false.
+
+map_is_key_head(M) when is_map_key(a, M) -> true;
+map_is_key_head(M) -> false.
+
map_guard_body(M) -> is_map(M).
map_guard_pattern(#{}) -> true;
@@ -1227,6 +1239,12 @@ map_guard_tautology() when #{} =:= #{}; true -> true.
map_guard_ill_map_size() when true; map_size(0) -> true.
+map_field_check_sequence(M)
+ when is_map(M) andalso is_map_key(a, M) andalso (map_get(a, M) == 1) ->
+ true;
+map_field_check_sequence(_) ->
+ false.
+
t_guard_sequence(Config) when is_list(Config) ->
{1, "a"} = map_guard_sequence_1(#{seq=>1,val=>id("a")}),
{2, "b"} = map_guard_sequence_1(#{seq=>2,val=>id("b")}),
diff --git a/lib/crypto/c_src/crypto.c b/lib/crypto/c_src/crypto.c
index dbb6bf8135..df4e2245f4 100644
--- a/lib/crypto/c_src/crypto.c
+++ b/lib/crypto/c_src/crypto.c
@@ -60,7 +60,6 @@
#include <openssl/rand.h>
#include <openssl/evp.h>
#include <openssl/hmac.h>
-#include <openssl/engine.h>
#include <openssl/err.h>
/* Helper macro to construct a OPENSSL_VERSION_NUMBER.
@@ -102,8 +101,10 @@
# undef FIPS_SUPPORT
# endif
+# if LIBRESSL_VERSION_NUMBER < PACKED_OPENSSL_VERSION_PLAIN(2,7,0)
/* LibreSSL wants the 1.0.1 API */
# define NEED_EVP_COMPATIBILITY_FUNCTIONS
+# endif
#endif
@@ -112,8 +113,10 @@
#endif
-#if OPENSSL_VERSION_NUMBER >= PACKED_OPENSSL_VERSION_PLAIN(1,0,0)
-# define HAS_EVP_PKEY_CTX
+#ifndef HAS_LIBRESSL
+# if OPENSSL_VERSION_NUMBER >= PACKED_OPENSSL_VERSION_PLAIN(1,0,0)
+# define HAS_EVP_PKEY_CTX
+# endif
#endif
@@ -121,10 +124,6 @@
#include <openssl/modes.h>
#endif
-#if OPENSSL_VERSION_NUMBER >= PACKED_OPENSSL_VERSION(0,9,8,'h')
-#define HAS_ENGINE_SUPPORT
-#endif
-
#include "crypto_callback.h"
#if OPENSSL_VERSION_NUMBER >= PACKED_OPENSSL_VERSION_PLAIN(0,9,8) \
@@ -185,6 +184,19 @@
# undef HAVE_RSA_SSLV23_PADDING
#endif
+#if OPENSSL_VERSION_NUMBER >= PACKED_OPENSSL_VERSION(0,9,8,'h') \
+ && defined(HAVE_EC)
+/* If OPENSSL_NO_EC is set, there will be an error in ec.h included from engine.h
+ So if EC is disabled, you can't use Engine either....
+*/
+# define HAS_ENGINE_SUPPORT
+#endif
+
+
+#if defined(HAS_ENGINE_SUPPORT)
+# include <openssl/engine.h>
+#endif
+
#if defined(HAVE_CMAC)
#include <openssl/cmac.h>
#endif
@@ -500,7 +512,6 @@ static ERL_NIF_TERM aes_gcm_decrypt_NO_EVP(ErlNifEnv* env, int argc, const ERL_N
static ERL_NIF_TERM chacha20_poly1305_encrypt(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]);
static ERL_NIF_TERM chacha20_poly1305_decrypt(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]);
-static int get_engine_load_cmd_list(ErlNifEnv* env, const ERL_NIF_TERM term, char **cmds, int i);
static ERL_NIF_TERM engine_by_id_nif(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]);
static ERL_NIF_TERM engine_init_nif(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]);
static ERL_NIF_TERM engine_finish_nif(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]);
@@ -528,6 +539,7 @@ static int term2point(ErlNifEnv* env, ERL_NIF_TERM term,
static ERL_NIF_TERM bin_from_bn(ErlNifEnv* env, const BIGNUM *bn);
#ifdef HAS_ENGINE_SUPPORT
+static int get_engine_load_cmd_list(ErlNifEnv* env, const ERL_NIF_TERM term, char **cmds, int i);
static int zero_terminate(ErlNifBinary bin, char **buf);
#endif
@@ -5407,9 +5419,9 @@ static ERL_NIF_TERM engine_get_id_nif(ErlNifEnv* env, int argc, const ERL_NIF_TE
#endif
}
+#ifdef HAS_ENGINE_SUPPORT
static int get_engine_load_cmd_list(ErlNifEnv* env, const ERL_NIF_TERM term, char **cmds, int i)
{
-#ifdef HAS_ENGINE_SUPPORT
ERL_NIF_TERM head, tail;
const ERL_NIF_TERM *tmp_tuple;
ErlNifBinary tmpbin;
@@ -5454,10 +5466,8 @@ static int get_engine_load_cmd_list(ErlNifEnv* env, const ERL_NIF_TERM term, cha
cmds[i] = NULL;
return 0;
}
-#else
- return atom_notsup;
-#endif
}
+#endif
static ERL_NIF_TERM engine_get_all_methods_nif(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
{/* () */
diff --git a/lib/crypto/c_src/otp_test_engine.c b/lib/crypto/c_src/otp_test_engine.c
index 5c6122c06a..d0e23a2a3e 100644
--- a/lib/crypto/c_src/otp_test_engine.c
+++ b/lib/crypto/c_src/otp_test_engine.c
@@ -24,10 +24,8 @@
#include <stdio.h>
#include <string.h>
-#include <openssl/engine.h>
#include <openssl/md5.h>
#include <openssl/rsa.h>
-#include <openssl/pem.h>
#define PACKED_OPENSSL_VERSION(MAJ, MIN, FIX, P) \
((((((((MAJ << 8) | MIN) << 8 ) | FIX) << 8) | (P-'a'+1)) << 4) | 0xf)
@@ -40,6 +38,21 @@
#define OLD
#endif
+#if OPENSSL_VERSION_NUMBER >= PACKED_OPENSSL_VERSION(0,9,8,'o') \
+ && !defined(OPENSSL_NO_EC) \
+ && !defined(OPENSSL_NO_ECDH) \
+ && !defined(OPENSSL_NO_ECDSA)
+# define HAVE_EC
+#endif
+
+#if defined(HAVE_EC)
+/* If OPENSSL_NO_EC is set, there will be an error in ec.h included from engine.h
+ So if EC is disabled, you can't use Engine either....
+*/
+#include <openssl/engine.h>
+#include <openssl/pem.h>
+
+
static const char *test_engine_id = "MD5";
static const char *test_engine_name = "MD5 test engine";
@@ -262,3 +275,5 @@ int pem_passwd_cb_fun(char *buf, int size, int rwflag, void *password)
return 0;
}
}
+
+#endif
diff --git a/lib/crypto/doc/src/notes.xml b/lib/crypto/doc/src/notes.xml
index 1f788a4e35..66619c9e11 100644
--- a/lib/crypto/doc/src/notes.xml
+++ b/lib/crypto/doc/src/notes.xml
@@ -31,6 +31,28 @@
</header>
<p>This document describes the changes made to the Crypto application.</p>
+<section><title>Crypto 4.2.2</title>
+
+ <section><title>Fixed Bugs and Malfunctions</title>
+ <list>
+ <item>
+ <p>
+ If OPENSSL_NO_EC was set, the compilation of the crypto
+ nifs failed.</p>
+ <p>
+ Own Id: OTP-15073</p>
+ </item>
+ <item>
+ <p>
+ C-compile errors for LibreSSL 2.7.0 - 2.7.2 fixed</p>
+ <p>
+ Own Id: OTP-15074 Aux Id: ERL-618 </p>
+ </item>
+ </list>
+ </section>
+
+</section>
+
<section><title>Crypto 4.2.1</title>
<section><title>Fixed Bugs and Malfunctions</title>
diff --git a/lib/crypto/vsn.mk b/lib/crypto/vsn.mk
index 3432f00836..778aff9d13 100644
--- a/lib/crypto/vsn.mk
+++ b/lib/crypto/vsn.mk
@@ -1 +1 @@
-CRYPTO_VSN = 4.2.1
+CRYPTO_VSN = 4.2.2
diff --git a/lib/debugger/src/dbg_icmd.erl b/lib/debugger/src/dbg_icmd.erl
index 4cd3dce670..55cbada53b 100644
--- a/lib/debugger/src/dbg_icmd.erl
+++ b/lib/debugger/src/dbg_icmd.erl
@@ -467,7 +467,7 @@ mark_break(Cm, LineNo, Le) ->
parse_cmd(Cmd, LineNo) ->
{ok,Tokens,_} = erl_scan:string(Cmd, LineNo, [text]),
- {ok,Forms,Bs} = lib:extended_parse_exprs(Tokens),
+ {ok,Forms,Bs} = erl_eval:extended_parse_exprs(Tokens),
{Forms, Bs}.
%%====================================================================
diff --git a/lib/debugger/src/dbg_wx_win.erl b/lib/debugger/src/dbg_wx_win.erl
index f1298154ab..fea94156c1 100644
--- a/lib/debugger/src/dbg_wx_win.erl
+++ b/lib/debugger/src/dbg_wx_win.erl
@@ -275,7 +275,7 @@ entry(Parent, Title, Prompt, {Type, Value}) ->
verify(Type, Str) ->
case erl_scan:string(Str, 1, [text]) of
{ok, Tokens, _EndLine} when Type==term ->
- case lib:extended_parse_term(Tokens++[{dot, erl_anno:new(1)}]) of
+ case erl_eval:extended_parse_term(Tokens++[{dot, erl_anno:new(1)}]) of
{ok, Value} -> {edit, Value};
_Error ->
ignore
diff --git a/lib/dialyzer/test/options1_SUITE_data/results/compiler b/lib/dialyzer/test/options1_SUITE_data/results/compiler
index cbb5115c91..e1dc038800 100644
--- a/lib/dialyzer/test/options1_SUITE_data/results/compiler
+++ b/lib/dialyzer/test/options1_SUITE_data/results/compiler
@@ -28,7 +28,7 @@ cerl_inline.erl:2750: The pattern <{[], L, D}, Vs> can never match the type <[1.
cerl_inline.erl:2752: The pattern <{[], _L, D}, Vs> can never match the type <[1..255,...],[any()]>
cerl_inline.erl:2754: The pattern <{F, L, D}, Vs> can never match the type <[1..255,...],[any()]>
cerl_inline.erl:2756: The pattern <{F, _L, D}, Vs> can never match the type <[1..255,...],[any()]>
-compile.erl:788: The pattern {'error', Es} can never match the type {'ok',<<_:64,_:_*8>>}
+compile.erl:792: The pattern {'error', Es} can never match the type {'ok',<<_:64,_:_*8>>}
core_lint.erl:473: The pattern <{'c_atom', _, 'all'}, 'binary', _Def, St> can never match the type <_,#c_nil{} | {'c_atom' | 'c_char' | 'c_float' | 'c_int' | 'c_string' | 'c_tuple',_,_} | #c_cons{hd::#c_nil{} | {'c_atom' | 'c_char' | 'c_float' | 'c_int' | 'c_string' | 'c_tuple',_,_} | #c_cons{hd::{_,_} | {_,_,_} | {_,_,_,_},tl::{_,_} | {_,_,_} | {_,_,_,_}},tl::#c_nil{} | {'c_atom' | 'c_char' | 'c_float' | 'c_int' | 'c_string' | 'c_tuple',_,_} | #c_cons{hd::{_,_} | {_,_,_} | {_,_,_,_},tl::{_,_} | {_,_,_} | {_,_,_,_}}},[any()],_>
core_lint.erl:505: The pattern <_Req, 'unknown', St> can never match the type <non_neg_integer(),non_neg_integer(),_>
sys_pre_expand.erl:625: Call to missing or unexported function erlang:hash/2
diff --git a/lib/dialyzer/test/options1_SUITE_data/src/compiler/compile.erl b/lib/dialyzer/test/options1_SUITE_data/src/compiler/compile.erl
index 7e5ccde2fd..6838cf6734 100644
--- a/lib/dialyzer/test/options1_SUITE_data/src/compiler/compile.erl
+++ b/lib/dialyzer/test/options1_SUITE_data/src/compiler/compile.erl
@@ -228,11 +228,15 @@ os_process_size() ->
case os:type() of
{unix, sunos} ->
Size = os:cmd("ps -o vsz -p " ++ os:getpid() ++ " | tail -1"),
- list_to_integer(lib:nonl(Size));
+ list_to_integer(nonl(Size));
_ ->
0
end.
+nonl([$\n]) -> [];
+nonl([]) -> [];
+nonl([H|T]) -> [H|nonl(T)].
+
run_tc({Name,Fun}, St) ->
Before0 = statistics(runtime),
Val = (catch Fun(St)),
diff --git a/lib/dialyzer/test/r9c_SUITE_data/src/inets/mod_esi.erl b/lib/dialyzer/test/r9c_SUITE_data/src/inets/mod_esi.erl
index a48f73274b..ce144e061f 100644
--- a/lib/dialyzer/test/r9c_SUITE_data/src/inets/mod_esi.erl
+++ b/lib/dialyzer/test/r9c_SUITE_data/src/inets/mod_esi.erl
@@ -285,7 +285,7 @@ eval(Info,"GET",CGIBody,Modules) ->
"~n Modules: ~p",[Modules]),
case auth(CGIBody,Modules) of
true ->
- case lib:eval_str(string:concat(CGIBody,". ")) of
+ case eval_str(string:concat(CGIBody,". ")) of
{error,Reason} ->
?vlog("eval -> error:"
"~n Reason: ~p",[Reason]),
@@ -318,6 +318,48 @@ auth(CGIBody,Modules) ->
false
end.
+%% eval_str(InStr) -> {ok, OutStr} | {error, ErrStr'}
+%% InStr must represent a body
+%% Note: If InStr is a binary it has to be a Latin-1 string.
+%% If you have a UTF-8 encoded binary you have to call
+%% unicode:characters_to_list/1 before the call to eval_str().
+
+-define(result(F,D), lists:flatten(io_lib:format(F, D))).
+
+-spec eval_str(string() | unicode:latin1_binary()) ->
+ {'ok', string()} | {'error', string()}.
+
+eval_str(Str) when is_list(Str) ->
+ case erl_scan:tokens([], Str, 0) of
+ {more, _} ->
+ {error, "Incomplete form (missing .<cr>)??"};
+ {done, {ok, Toks, _}, Rest} ->
+ case all_white(Rest) of
+ true ->
+ case erl_parse:parse_exprs(Toks) of
+ {ok, Exprs} ->
+ case catch erl_eval:exprs(Exprs, erl_eval:new_bindings()) of
+ {value, Val, _} ->
+ {ok, Val};
+ Other ->
+ {error, ?result("*** eval: ~p", [Other])}
+ end;
+ {error, {_Line, Mod, Args}} ->
+ Msg = ?result("*** ~ts",[Mod:format_error(Args)]),
+ {error, Msg}
+ end;
+ false ->
+ {error, ?result("Non-white space found after "
+ "end-of-form :~ts", [Rest])}
+ end
+ end.
+
+all_white([$\s|T]) -> all_white(T);
+all_white([$\n|T]) -> all_white(T);
+all_white([$\t|T]) -> all_white(T);
+all_white([]) -> true;
+all_white(_) -> false.
+
%%----------------------------------------------------------------------
%%Creates the environment list that will be the first arg to the
%%Functions that is called through the ErlScript Schema
diff --git a/lib/edoc/src/edoc_doclet.erl b/lib/edoc/src/edoc_doclet.erl
index f55cffe158..6cb3095507 100644
--- a/lib/edoc/src/edoc_doclet.erl
+++ b/lib/edoc/src/edoc_doclet.erl
@@ -40,7 +40,7 @@
-import(edoc_report, [report/2, warning/2]).
-%% @headerfile "edoc_doclet.hrl"
+%% @headerfile "../include/edoc_doclet.hrl"
-include("../include/edoc_doclet.hrl").
-define(EDOC_APP, edoc).
diff --git a/lib/hipe/cerl/erl_bif_types.erl b/lib/hipe/cerl/erl_bif_types.erl
index fe6ab0659c..48ce641ab9 100644
--- a/lib/hipe/cerl/erl_bif_types.erl
+++ b/lib/hipe/cerl/erl_bif_types.erl
@@ -665,6 +665,8 @@ type(erlang, is_map, 1, Xs, Opaques) ->
check_guard(X, fun (Y) -> t_is_map(Y, Opaques) end,
t_map(), Opaques) end,
strict(erlang, is_map, 1, Xs, Fun, Opaques);
+type(erlang, is_map_key, 2, Xs, Opaques) ->
+ type(maps, is_key, 2, Xs, Opaques);
type(erlang, is_number, 1, Xs, Opaques) ->
Fun = fun (X) ->
check_guard(X, fun (Y) -> t_is_number(Y, Opaques) end,
@@ -2374,6 +2376,8 @@ arg_types(erlang, is_list, 1) ->
[t_any()];
arg_types(erlang, is_map, 1) ->
[t_any()];
+arg_types(erlang, is_map_key, 2) ->
+ [t_any(), t_map()];
arg_types(erlang, is_number, 1) ->
[t_any()];
arg_types(erlang, is_pid, 1) ->
@@ -2396,7 +2400,7 @@ arg_types(erlang, map_size, 1) ->
[t_map()];
%% Guard bif, needs to be here.
arg_types(erlang, map_get, 2) ->
- [t_map(), t_any()];
+ [t_any(), t_map()];
arg_types(erlang, make_fun, 3) ->
[t_atom(), t_atom(), t_arity()];
arg_types(erlang, make_tuple, 2) ->
diff --git a/lib/hipe/cerl/erl_types.erl b/lib/hipe/cerl/erl_types.erl
index a91da97f93..9abb4d31d9 100644
--- a/lib/hipe/cerl/erl_types.erl
+++ b/lib/hipe/cerl/erl_types.erl
@@ -108,13 +108,14 @@
t_is_bitstr/1, t_is_bitstr/2,
t_is_bitwidth/1,
t_is_boolean/1, t_is_boolean/2,
- %% t_is_byte/1,
- %% t_is_char/1,
+ t_is_byte/1,
+ t_is_char/1,
t_is_cons/1, t_is_cons/2,
t_is_equal/2,
t_is_fixnum/1,
t_is_float/1, t_is_float/2,
t_is_fun/1, t_is_fun/2,
+ t_is_identifier/1,
t_is_instance/2,
t_is_integer/1, t_is_integer/2,
t_is_list/1,
@@ -216,19 +217,8 @@
cache__new/0
]).
-%%-define(DO_ERL_TYPES_TEST, true).
-compile({no_auto_import,[min/2,max/2,map_get/2]}).
--ifdef(DO_ERL_TYPES_TEST).
--export([test/0]).
--else.
--define(NO_UNUSED, true).
--endif.
-
--ifndef(NO_UNUSED).
--export([t_is_identifier/1]).
--endif.
-
-export_type([erl_type/0, opaques/0, type_table/0,
var_table/0, cache/0]).
@@ -1190,12 +1180,10 @@ is_fun(_) -> false.
t_identifier() ->
?identifier(?any).
--ifdef(DO_ERL_TYPES_TEST).
--spec t_is_identifier(erl_type()) -> erl_type().
+-spec t_is_identifier(erl_type()) -> boolean().
t_is_identifier(?identifier(_)) -> true;
t_is_identifier(_) -> false.
--endif.
%%------------------------------------
@@ -1366,7 +1354,6 @@ is_integer1(_) -> false.
t_byte() ->
?byte.
--ifdef(DO_ERL_TYPES_TEST).
-spec t_is_byte(erl_type()) -> boolean().
t_is_byte(?int_range(neg_inf, _)) -> false;
@@ -1376,7 +1363,6 @@ t_is_byte(?int_range(From, To))
t_is_byte(?int_set(Set)) ->
(set_min(Set) >= 0) andalso (set_max(Set) =< ?MAX_BYTE);
t_is_byte(_) -> false.
--endif.
%%------------------------------------
@@ -5693,173 +5679,3 @@ family(L) ->
var_table__new() ->
maps:new().
-
-%%=============================================================================
-%% Consistency-testing function(s) below
-%%=============================================================================
-
--ifdef(DO_ERL_TYPES_TEST).
-
-test() ->
- Atom1 = t_atom(),
- Atom2 = t_atom(foo),
- Atom3 = t_atom(bar),
- true = t_is_atom(Atom2),
-
- True = t_atom(true),
- False = t_atom(false),
- Bool = t_boolean(),
- true = t_is_boolean(True),
- true = t_is_boolean(Bool),
- false = t_is_boolean(Atom1),
-
- Binary = t_binary(),
- true = t_is_binary(Binary),
-
- Bitstr = t_bitstr(),
- true = t_is_bitstr(Bitstr),
-
- Bitstr1 = t_bitstr(7, 3),
- true = t_is_bitstr(Bitstr1),
- false = t_is_binary(Bitstr1),
-
- Bitstr2 = t_bitstr(16, 8),
- true = t_is_bitstr(Bitstr2),
- true = t_is_binary(Bitstr2),
-
- ?bitstr(8, 16) = t_subtract(t_bitstr(4, 12), t_bitstr(8, 12)),
- ?bitstr(8, 16) = t_subtract(t_bitstr(4, 12), t_bitstr(8, 12)),
-
- Int1 = t_integer(),
- Int2 = t_integer(1),
- Int3 = t_integer(16#ffffffff),
- true = t_is_integer(Int2),
- true = t_is_byte(Int2),
- false = t_is_byte(Int3),
- false = t_is_byte(t_from_range(-1, 1)),
- true = t_is_byte(t_from_range(1, ?MAX_BYTE)),
-
- Tuple1 = t_tuple(),
- Tuple2 = t_tuple(3),
- Tuple3 = t_tuple([Atom1, Int1]),
- Tuple4 = t_tuple([Tuple1, Tuple2]),
- Tuple5 = t_tuple([Tuple3, Tuple4]),
- Tuple6 = t_limit(Tuple5, 2),
- Tuple7 = t_limit(Tuple5, 3),
- true = t_is_tuple(Tuple1),
-
- Port = t_port(),
- Pid = t_pid(),
- Ref = t_reference(),
- Identifier = t_identifier(),
- false = t_is_reference(Port),
- true = t_is_identifier(Port),
-
- Function1 = t_fun(),
- Function2 = t_fun(Pid),
- Function3 = t_fun([], Pid),
- Function4 = t_fun([Port, Pid], Pid),
- Function5 = t_fun([Pid, Atom1], Int2),
- true = t_is_fun(Function3),
-
- List1 = t_list(),
- List2 = t_list(t_boolean()),
- List3 = t_cons(t_boolean(), List2),
- List4 = t_cons(t_boolean(), t_atom()),
- List5 = t_cons(t_boolean(), t_nil()),
- List6 = t_cons_tl(List5),
- List7 = t_sup(List4, List5),
- List8 = t_inf(List7, t_list()),
- List9 = t_cons(),
- List10 = t_cons_tl(List9),
- true = t_is_boolean(t_cons_hd(List5)),
- true = t_is_list(List5),
- false = t_is_list(List4),
-
- Product1 = t_product([Atom1, Atom2]),
- Product2 = t_product([Atom3, Atom1]),
- Product3 = t_product([Atom3, Atom2]),
-
- Union1 = t_sup(Atom2, Atom3),
- Union2 = t_sup(Tuple2, Tuple3),
- Union3 = t_sup(Int2, Atom3),
- Union4 = t_sup(Port, Pid),
- Union5 = t_sup(Union4, Int1),
- Union6 = t_sup(Function1, Function2),
- Union7 = t_sup(Function4, Function5),
- Union8 = t_sup(True, False),
- true = t_is_boolean(Union8),
- Union9 = t_sup(Int2, t_integer(2)),
- true = t_is_byte(Union9),
- Union10 = t_sup(t_tuple([t_atom(true), ?any]),
- t_tuple([t_atom(false), ?any])),
-
- ?any = t_sup(Product3, Function5),
-
- Atom3 = t_inf(Union3, Atom1),
- Union2 = t_inf(Union2, Tuple1),
- Int2 = t_inf(Int1, Union3),
- Union4 = t_inf(Union4, Identifier),
- Port = t_inf(Union5, Port),
- Function4 = t_inf(Union7, Function4),
- ?none = t_inf(Product2, Atom1),
- Product3 = t_inf(Product1, Product2),
- Function5 = t_inf(Union7, Function5),
- true = t_is_byte(t_inf(Union9, t_number())),
- true = t_is_char(t_inf(Union9, t_number())),
-
- io:format("3? ~p ~n", [?int_set([3])]),
-
- RecDict = dict:store({foo, 2}, [bar, baz], dict:new()),
- Record1 = t_from_term({foo, [1,2], {1,2,3}}),
-
- Types = [
- Atom1,
- Atom2,
- Atom3,
- Binary,
- Int1,
- Int2,
- Tuple1,
- Tuple2,
- Tuple3,
- Tuple4,
- Tuple5,
- Tuple6,
- Tuple7,
- Ref,
- Port,
- Pid,
- Identifier,
- List1,
- List2,
- List3,
- List4,
- List5,
- List6,
- List7,
- List8,
- List9,
- List10,
- Function1,
- Function2,
- Function3,
- Function4,
- Function5,
- Product1,
- Product2,
- Record1,
- Union1,
- Union2,
- Union3,
- Union4,
- Union5,
- Union6,
- Union7,
- Union8,
- Union10,
- t_inf(Union10, t_tuple([t_atom(true), t_integer()]))
- ],
- io:format("~p\n", [[t_to_string(X, RecDict) || X <- Types]]).
-
--endif.
diff --git a/lib/hipe/main/hipe.erl b/lib/hipe/main/hipe.erl
index 97814fe217..5e6a60326d 100644
--- a/lib/hipe/main/hipe.erl
+++ b/lib/hipe/main/hipe.erl
@@ -852,8 +852,8 @@ finalize_fun_sequential({MFA, Icode}, Opts, Servers) ->
print_crash_message(What, Error, StackTrace) ->
StackFun = fun(_,_,_) -> false end,
FormatFun = fun (Term, _) -> io_lib:format("~p", [Term]) end,
- StackTrace = lib:format_stacktrace(1, StackTrace,
- StackFun, FormatFun),
+ StackTrace = erl_error:format_stacktrace(1, StackTrace,
+ StackFun, FormatFun),
WhatS = case What of
{M,F,A} -> io_lib:format("~w:~w/~w", [M,F,A]);
Mod -> io_lib:format("~w", [Mod])
diff --git a/lib/hipe/opt/hipe_schedule.erl b/lib/hipe/opt/hipe_schedule.erl
deleted file mode 100644
index 0f25940e3d..0000000000
--- a/lib/hipe/opt/hipe_schedule.erl
+++ /dev/null
@@ -1,1483 +0,0 @@
-%% Licensed under the Apache License, Version 2.0 (the "License");
-%% you may not use this file except in compliance with the License.
-%% You may obtain a copy of the License at
-%%
-%% http://www.apache.org/licenses/LICENSE-2.0
-%%
-%% Unless required by applicable law or agreed to in writing, software
-%% distributed under the License is distributed on an "AS IS" BASIS,
-%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-%% See the License for the specific language governing permissions and
-%% limitations under the License.
-%%
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-%%
-%% INSTRUCTION SCHEDULER
-%%
-%% This is a basic ILP cycle scheduler:
-%% * set cycle = 0
-%% * while ready[cycle] nonempty do
-%% - take x with greatest priority from ready[cycle]
-%% - try to schedule x;
-%% * if scheduling x was possible,
-%% - reserve resources
-%% - add x to schedule and delete x from dag
-%% - update earliest-time for all successor nodes
-%% as max[earliest[y],cycle+latency[x]]
-%% - if some node y now has no predecessors,
-%% add y to ready[earliest[y]]
-%% * if it was impossible, put x in ready[cycle+1]
-%% (= try again)
-%%
-%% We use the following data structures:
-%% 1. all nodes are numbered and indices used as array keys
-%% 2. priority per node can be computed statically or dynamically
-%% * statically: before scheduling, each node gets a priority value
-%% * dynamically: at each cycle, compute priorities for all ready nodes
-%% 3. earliest: earliest cycle of issue, starts at 0
-%% and is updated as predecessors issue
-%% 4. predecessors: number of predecessors (0 = ready to issue)
-%% 5. successors: list of {Latency,NodeID}
-%% 6. ready: an array indexed by cycle-time (integer), where
-%% ready nodes are kept.
-%% 7. resources: a resource representation (ADT) that answers
-%% certain queries, e.g., "can x be scheduled this cycle"
-%% and "reserve resources for x".
-%% 8. schedule: list of scheduled instructions {Instr,Cycle}
-%% in the order of issue
-%% 9. instructions: maps IDs back to instructions
-%%
-%% Inputs:
-%% - a list of {ID,Node} pairs (where ID is a unique key)
-%% - a dependence list {ID0,Latency,ID1}, which is used to
-%% build the DAG.
-%%
-%% Note that there is some leeway in how things are represented
-%% from here.
-%%
-%% MODIFICATIONS:
-%% - Some basic blocks are not worth scheduling (e.g., GC save/restore code)
-%% yet are pretty voluminous. How do we skip them?
-%% - Scheduling should be done at finalization time: when basic block is
-%% linearized and is definitely at Sparc assembly level, THEN reorder
-%% stuff.
-
--module(hipe_schedule).
--export([cfg/1, est_cfg/1, delete_node/5]).
-
--include("../sparc/hipe_sparc.hrl").
-
-%%-define(debug1,true).
-
--define(debug2(Str,Args),ok).
-%%-define(debug2(Str,Args),io:format(Str,Args)).
-
--define(debug3(Str,Args),ok).
-%%-define(debug3(Str,Args),io:format(Str,Args)).
-
--define(debug4(Str,Args),ok).
-%%-define(debug4(Str,Args),io:format(Str,Args)).
-
--define(debug5(Str,Args),ok).
-%%-define(debug5(Str,Args),io:format(Str,Args)).
-
--define(debug(Str,Args),ok).
-%%-define(debug(Str,Args),io:format(Str,Args)).
-
-
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-%% Function : cfg
-%% Argument : CFG - the control flow graph
-%% Returns : CFG - A new cfg with scheduled blocks
-%% Description : Takes each basic block and schedules them one by one.
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-cfg(CFG) ->
- ?debug3("CFG: ~n~p", [CFG]),
- update_all( [ {L,
- hipe_bb:mk_bb(
- block(L,hipe_bb:code(hipe_sparc_cfg:bb(CFG,L))) )}
- || L <- hipe_sparc_cfg:labels(CFG) ], CFG).
-
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-%% Function : update_all
-%% Argument : Blocks - [{Label, Block}] , a list with labels and new code
-%% used for updating the old CFG.
-%% CFG - The old controlflow graph
-%% Returns : An updated controlflow graph.
-%% Description : Just swappes the basic blocks in the CFG to the scheduled one.
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-update_all([],CFG) -> CFG;
-update_all([{L,NewB}|Ls],CFG) ->
- update_all(Ls,hipe_sparc_cfg:bb_add(CFG,L,NewB)).
-
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-
-est_cfg(CFG) ->
- update_all([ {L, hipe_bb:mk_bb(est_block(hipe_bb:code(hipe_sparc_cfg:bb(CFG,L))))}
- || L <- hipe_sparc_cfg:labels(CFG) ], CFG).
-
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-%%
-%% Provides an estimation of how quickly a block will execute.
-%% This is done by chaining all instructions in sequential order
-%% by 0-cycle dependences (which means they will never be reordered),
-%% then scheduling the mess.
-
-est_block([]) -> [];
-est_block([I]) -> [I];
-est_block(Blk) ->
- {IxBlk,DAG} = est_deps(Blk),
- Sch = bb(IxBlk,DAG),
- separate_block(Sch,IxBlk).
-
-est_deps(Blk) ->
- IxBlk = indexed_bb(Blk),
- DAG = deps(IxBlk),
- {IxBlk, chain_instrs(IxBlk,DAG)}.
-
-chain_instrs([{N,_}|Xs],DAG) ->
- chain_i(N,Xs,DAG).
-
-chain_i(_,[],DAG) -> DAG;
-chain_i(N,[{M,_}|Xs],DAG) ->
- NewDAG = dep_arc(N,zero_latency(),M,DAG),
- chain_i(M,Xs,NewDAG).
-
-zero_latency() -> 0.
-
-lookup_instr([{N,I}|_], N) -> I;
-lookup_instr([_|Xs], N) -> lookup_instr(Xs, N).
-
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-%% Function : block
-%% Argument : Instrs - [Instr], list of all the instructions in a basic
-%% block.
-%% Returns : A new scheduled block
-%% Description : Schedule a basic block
-%%
-%% Note: does not consider delay slots!
-%% (another argument for using only annulled delay slots?)
-%% * how do we add delay slots? somewhat tricky to
-%% reconcile with the sort of scheduling we consider.
-%% (as-early-as-possible)
-%% => rewrite scheduler into as-late-as-possible?
-%% (=> just reverse the dependence arcs??)
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-
-%% Don't fire up the scheduler if there's no work to do.
-block(_, []) ->
- [];
-block(_L, [I]) ->
- case hipe_sparc:is_any_branch(I) of
- true -> [hipe_sparc:nop_create(), I];
- false -> [I]
- end;
-block(_L, Blk) ->
- IxBlk = indexed_bb(Blk),
- case IxBlk of
- [{_N, I}] -> % comments and nops may have been removed.
- case hipe_sparc:is_any_branch(I) of
- true -> [hipe_sparc:nop_create(), I];
- false -> [I]
- end;
- _ ->
- Sch = bb(IxBlk, {DAG, _Preds} = deps(IxBlk)),
- {NewSch, NewIxBlk} = fill_delays(Sch, IxBlk, DAG),
- X = finalize_block(NewSch, NewIxBlk),
- debug1_stuff(Blk, DAG, IxBlk, Sch, X),
- X
- end.
-
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-%% Function : fill_delays
-%% Argument : Sch - List of {{cycle, C}, {node, N}} : C = current cycle
-%% N = node index
-%% IxBlk - Indexed block [{N, Instr}]
-%% DAG - Dependence graph
-%% Returns : {NewSch, NewIxBlk} - vector with new schedule and vector
-%% with {N, Instr}
-%% Description : Goes through the schedule from back to front looking for
-%% branches/jumps. If one is found fill_del tries to find
-%% an instr to fill the delayslot.
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-
-fill_delays(Sch, IxBlk, DAG) ->
- NewIxBlk = hipe_vectors:list_to_vector(IxBlk),
- %% NewSch = hipe_vectors:list_to_vector(Sch),
- NewSch = fill_del(length(Sch), hipe_vectors:list_to_vector(Sch),
- NewIxBlk, DAG),
- {NewSch, NewIxBlk}.
-
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-%% Function : fill_del
-%% Argument : N - current index in the schedule
-%% Sch - schedule
-%% IxBlk - indexed block
-%% DAG - dependence graph
-%% Returns : Sch - New schedule with possibly a delay instr in the last
-%% position.
-%% Description : If a call/jump is found fill_branch_delay/fill_call_delay
-%% is called to find a delay-filler.
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-
-fill_del(N, Sch, _IxBlk, _DAG) when N < 1 -> Sch;
-fill_del(N, Sch, IxBlk, DAG) ->
- Index = get_index(Sch, N),
- ?debug2("Index for ~p: ~p~nInstr: ~p~n",
- [N, Index, get_instr(IxBlk, Index)]),
- NewSch =
- case get_instr(IxBlk, Index) of
- #call_link{} ->
- fill_branch_delay(N - 1, N, Sch, IxBlk, DAG);
- #jmp_link{} ->
- fill_call_delay(N - 1, N, Sch, IxBlk, DAG);
- #jmp{} ->
- fill_call_delay(N - 1, N, Sch, IxBlk, DAG);
- #b{} ->
- fill_branch_delay(N - 1, N, Sch, IxBlk, DAG);
- #br{} ->
- fill_branch_delay(N - 1, N, Sch, IxBlk, DAG);
- #goto{} ->
- fill_branch_delay(N - 1, N, Sch, IxBlk, DAG);
- _Other ->
- Sch
- end,
- NewSch.
- %% fill_del(N - 1, NewSch, IxBlk, DAG).
-
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-%% Function : fill_call_delay
-%% Argument : Cand - index in schedule of delay-candidate
-%% Call - index in schedule of call
-%% Sch - schedule vector: < {{cycle,Ci},{node,Nj}}, ... >
-%% IxBlk - block vector: < {N, Instr1}, {N+1, Instr2} ... >
-%% DAG - dependence graph
-%% Returns : Sch - new updated schedule.
-%% Description : Searches backwards through the schedule trying to find an
-%% instr without conflicts with the Call-instr.
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-
-fill_call_delay(Cand, _Call, Sch, _IxBlk, _DAG) when Cand < 1 -> Sch;
-fill_call_delay(Cand, Call, Sch, IxBlk, DAG) ->
- CandIndex = get_index(Sch, Cand),
- CallIndex = get_index(Sch, Call),
- CandI = get_instr(IxBlk, CandIndex),
- case move_or_alu(CandI) of
- true ->
- case single_depend(CandIndex, CallIndex, DAG) of
- false -> % Other instrs depends on Cand ...
- fill_call_delay(Cand - 1, Call, Sch, IxBlk, DAG);
-
- true ->
- CallI = get_instr(IxBlk, CallIndex),
-
- CandDefs = ordsets:from_list(hipe_sparc:defines(CandI)),
- %% CandUses = ordsets:from_list(hipe_sparc:uses(CandI)),
- %% CallDefs = ordsets:from_list(hipe_sparc:defines(CallI)),
- CallUses = ordsets:from_list(hipe_sparc:uses(CallI)),
-
- Args = case CallI of
- #jmp_link{} ->
- ordsets:from_list(
- hipe_sparc:jmp_link_args(CallI));
- #jmp{} ->
- ordsets:from_list(hipe_sparc:jmp_args(CallI));
- #call_link{} ->
- ordsets:from_list(
- hipe_sparc:call_link_args(CallI))
- end,
- CallUses2 = ordsets:subtract(CallUses, Args),
- Conflict = ordsets:intersection(CandDefs, CallUses2),
- %% io:format("single_depend -> true:~n ~p~n, ~p~n,~p~n",[CandI,CallI,DAG]),
- %% io:format("Cand = ~p~nCall = ~p~n",[CandI,CallI]),
- %% io:format("CandDefs = ~p~nCallDefs = ~p~n",[CandDefs,CallDefs]),
- %% io:format("CandUses = ~p~nCallUses = ~p~n",[CandUses,CallUses]),
- %% io:format("Args = ~p~nCallUses2 = ~p~n",[Args,CallUses2]),
- %% io:format("Conflict = ~p~n",[Conflict]),
-
- case Conflict of
- [] -> % No conflicts ==> Cand can fill delayslot after Call
- update_schedule(Cand, Call, Sch);
- _ -> % Conflict: try with preceeding instrs
- fill_call_delay(Cand - 1, Call, Sch, IxBlk, DAG)
- end
- end;
- false ->
- fill_call_delay(Cand - 1, Call, Sch, IxBlk, DAG)
- end.
-
-
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-%% Function : fill_branch_delay
-%% Argument : Cand - index in schedule of delay-candidate
-%% Branch - index in schedule of branch
-%% Sch - schedule
-%% IxBlk - indexed block
-%% DAG - dependence graph
-%% Returns : Sch - new updated schedule.
-%% Description : Searches backwards through the schedule trying to find an
-%% instr without conflicts with the Branch-instr.
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-
-fill_branch_delay(Cand, _Br, Sch, _IxBlk, _DAG) when Cand < 1 -> Sch;
-fill_branch_delay(Cand, Br, Sch, IxBlk, DAG) ->
- CandIndex = get_index(Sch, Cand),
- BrIndex = get_index(Sch, Br),
- CandI = get_instr(IxBlk, CandIndex),
- case move_or_alu(CandI) of
- true ->
- case single_depend(CandIndex, BrIndex, DAG) of
- false -> % Other instrs depends on Cand ...
- fill_branch_delay(Cand - 1, Br, Sch, IxBlk, DAG);
-
- true ->
- BrI = get_instr(IxBlk, BrIndex),
- CandDefs = ordsets:from_list(hipe_sparc:defines(CandI)),
- %% CandUses = ordsets:from_list(hipe_sparc:uses(CandI)),
- %% BrDefs = ordsets:from_list(hipe_sparc:defines(BrI)),
- BrUses = ordsets:from_list(hipe_sparc:uses(BrI)),
-
- Conflict = ordsets:intersection(CandDefs, BrUses),
- %% io:format("single_depend -> true: ~p~n, ~p~n,~p~n", [CandI, BrI, DAG]),
- %% io:format("Cand = ~p~nBr = ~p~n",[CandI,BrI]),
- %% io:format("CandDefs = ~p~nBrDefs = ~p~n",[CandDefs,BrDefs]),
- %% io:format("CandUses = ~p~nBrUses = ~p~n",[CandUses,BrUses]),
- %% io:format("Conflict = ~p~n",[Conflict]);
-
- case Conflict of
- [] -> % No conflicts ==>
- % Cand can fill delayslot after Branch
- update_schedule(Cand, Br, Sch);
- _ -> % Conflict: try with preceeding instrs
- fill_branch_delay(Cand - 1, Br, Sch, IxBlk, DAG)
- end
- end;
- false ->
- fill_branch_delay(Cand - 1, Br, Sch, IxBlk, DAG)
- end.
-
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-%% Function : update_schedule
-%% Argument : From - the position from where to switch indexes in Sch
-%% To - the position to where to switch indexes in Sch
-%% Sch - schedule
-%% Returns : Sch - an updated schedule
-%% Description : If From is the delay-filler and To is the Call/jump, the
-%% schedule is updated so From gets index To, To gets index
-%% To - 1, and the nodes between From and To gets old_index - 1.
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-update_schedule(To, To, Sch) ->
- {{cycle, C}, {node, _N} = Node} = hipe_vectors:get(Sch, To-1),
- hipe_vectors:set(Sch, To-1, {{cycle, C+1}, Node});
-update_schedule(From, To, Sch) ->
- Temp = hipe_vectors:get(Sch, From-1),
- Sch1 = hipe_vectors:set(Sch, From-1, hipe_vectors:get(Sch, From)),
- update_schedule(From + 1, To, hipe_vectors:set(Sch1, From, Temp)).
-
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-%% Function : single_depend
-%% Argument : N - Index of the delayslot candidate
-%% M - Index of the node that N possibly has a single
-%% depend to.
-%% DAG - The dependence graph
-%% Returns : true if no other nodes than N os depending on N
-%% Description : Checks that no other nodes than M depends on N and that the
-%% latency between them is zero or 1.
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-single_depend(N, M, DAG) ->
- Deps = hipe_vectors:get(DAG, N-1),
- single_depend(M, Deps).
-
-single_depend(_N, []) -> true;
-single_depend(N, [{0, N}]) -> true;
-single_depend(N, [{1, N}]) -> true;
-single_depend(_N, [{_Lat, _}|_]) -> false.
-
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-%% Function : get_index
-%% Argument : Sch - schedule
-%% N - index in schedule
-%% Returns : Index - index of the node
-%% Description : Returns the index of the node on position N in the schedule.
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-get_index(Sch, N) ->
- {{cycle, _C}, {node, Index}} = hipe_vectors:get(Sch,N-1),
- Index.
-
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-%% Function : get_instr
-%% Argument : IxBlk - indexed block
-%% N - index in block
-%% Returns : Instr
-%% Description : Returns the instr on position N in the indexed block.
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-get_instr(IxBlk, N) ->
- {_, Instr} = hipe_vectors:get(IxBlk, N-1),
- Instr.
-
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-%% Function : get_instr
-%% Argument : Sch - schedule
-%% IxBlk - indexed block
-%% N - index in schedule
-%% Returns : Instr
-%% Description : Returns the instr on position N in the schedule.
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-get_instr(Sch, IxBlk, N) ->
- {{cycle, _C}, {node, Index}} = hipe_vectors:get(Sch, N-1),
- {_, Instr} = hipe_vectors:get(IxBlk, Index-1),
- Instr.
-
-separate_block(Sch,IxBlk) ->
- sep_comments([{C,lookup_instr(IxBlk,N)} || {{cycle,C},{node,N}} <- Sch]).
-
-sep_comments([]) -> [];
-sep_comments([{C,I}|Xs]) ->
- [hipe_sparc:comment_create({cycle,C}), I | sep_comments(Xs,C)].
-
-sep_comments([], _) -> [];
-sep_comments([{C1,I}|Xs], C0) ->
- if
- C1 > C0 ->
- [hipe_sparc:comment_create({cycle,C1}),I|sep_comments(Xs,C1)];
- true ->
- [I|sep_comments(Xs, C0)]
- end.
-
-finalize_block(Sch, IxBlk) ->
- ?debug5("Sch: ~p~nIxBlk: ~p~n",[Sch,IxBlk]),
- finalize_block(1, hipe_vectors:size(Sch), 1, Sch, IxBlk, []).
-
-finalize_block(N, End, _C, Sch, IxBlk, _Instrs) when N =:= End - 1 ->
- NextLast = get_instr(Sch, IxBlk, N),
- Last = get_instr(Sch, IxBlk, End),
- ?debug5("NextLast: ~p~nLast: ~p~n",[NextLast,Last]),
- case hipe_sparc:is_any_branch(Last) of
- true -> % Couldn't fill delayslot ==> add NOP
- [NextLast , hipe_sparc:nop_create(), Last];
- false -> % Last is a delayslot-filler ==> change order...
- [Last, NextLast]
- end;
-finalize_block(N, End, C0, Sch, IxBlk, Instrs) ->
- {{cycle, _C1}, {node, _M}} = hipe_vectors:get(Sch, N-1),
- Instr = get_instr(Sch, IxBlk, N),
- ?debug5("Instr: ~p~n~n",[Instr]),
- [Instr | finalize_block(N + 1, End, C0, Sch, IxBlk, Instrs)].
-
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-%% Function : bb
-%% Argument : IxBlk - indexed block
-%% DAG - {Dag, Preds} where Dag is dependence graph and
-%% Preds is number of predecessors for each node.
-%% Returns : Sch
-%% Description : Initializes earliest-list, ready-list, priorities, resources
-%% and so on, and calls the cycle_sched which does the scheduling
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-bb(IxBlk,DAG) ->
- bb(length(IxBlk), IxBlk, DAG).
-
-bb(N,IxBlk,{DAG, Preds}) ->
- Earliest = init_earliest(N),
- BigArray = N*10, % "nothing" is this big :-)
- Ready = hipe_schedule_prio:init_ready(BigArray,Preds),
- I_res = init_instr_resources(N, IxBlk),
-
- Prio = hipe_schedule_prio:init_instr_prio(N,DAG),
- Rsrc = init_resources(BigArray),
- ?debug4("I_res: ~n~p~nPrio: ~n~p~nRsrc: ~n~p~n", [I_res,Prio,Rsrc]),
- ?debug('cycle 1~n',[]),
- Sch = empty_schedule(),
- cycle_sched(1,Ready,DAG,Preds,Earliest,Rsrc,I_res,Prio,Sch,N,IxBlk).
-
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-%% Function : cycle_sched
-%% Argument : - C is current cycle, 1 or more.
-%% - Ready is an array (Cycle -> [Node])
-%% yielding the collection of nodes ready to be
-%% scheduled in a cycle.
-%% - DAG is an array (Instr -> [{Latency,Instr}])
-%% represents the dependence DAG.
-%% - Preds is an array (Instr -> NumPreds)
-%% counts the number of predecessors
-%% (0 preds = ready to be scheduled).
-%% - Earl is an array (Instr -> EarliestCycle)
-%% holds the earliest cycle an instruction can be scheduled.
-%% - Rsrc is a 'resource ADT' that handles scheduler resource
-%% management checks whether instruction can be scheduled
-%% this cycle without a stall.
-%% - I_res is an array (Instr -> Required_resources)
-%% holds the resources required to schedule an instruction.
-%% - Sch is the representation of the schedule current schedule.
-%% - N is the number of nodes remaining to be scheduled
-%% tells us when to stop the scheduler.
-%% - IxBlk is the indexed block with instrs
-%% Returns : present schedule
-%% Description : Scheduler main loop.
-%% Pick next ready node in priority order for cycle C until
-%% none remain.
-%% * check each node if it can be scheduled w/o stalling
-%% * if so, schedule it
-%% * otherwise, bump the node to the next cycle
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-cycle_sched(C,Ready,DAG,Preds,Earl,Rsrc,I_res,Prio,Sch,N,IxBlk) ->
- case hipe_schedule_prio:next_ready(C,Ready,Prio,IxBlk,DAG,Preds,Earl) of
-% case hipe_schedule_prio:next_ready(C,Ready,Prio,IxBlk) of
- {next,I,Ready1} ->
- ?debug('try ~p~n==> ready = ~p~n',[I, Ready1]),
- case resources_available(C,I,Rsrc,I_res) of
- {yes,NewRsrc} ->
- ?debug(' scheduled~n==> Rscrs = ~p~n',[NewRsrc]),
- NewSch = add_to_schedule(I,C,Sch),
- {ReadyNs,NewDAG,NewPreds,NewEarl} =
- delete_node(C,I,DAG,Preds,Earl),
- ?debug("NewPreds : ~p~n",[Preds]),
- ?debug(' ReadyNs: ~p~n',[ReadyNs]),
- NewReady = hipe_schedule_prio:add_ready_nodes(ReadyNs,
- Ready1),
- ?debug(' New ready: ~p~n',[NewReady]),
- cycle_sched(C,NewReady,NewDAG,NewPreds,NewEarl,
- NewRsrc,I_res,Prio,NewSch,N-1, IxBlk);
- no ->
- ?debug(' resource conflict~n',[]),
- NewReady = hipe_schedule_prio:insert_node(C+1,I,Ready1),
- cycle_sched(C,NewReady,DAG,Preds,Earl,Rsrc,
- I_res,Prio,Sch,N,IxBlk)
- end;
- none -> % schedule next cycle if some node remains
- if
- N > 0 ->
- ?debug('cycle ~p~n',[C+1]),
- cycle_sched(C+1,Ready,DAG,Preds,Earl,
- advance_cycle(Rsrc),
- I_res,Prio,Sch,N, IxBlk);
- true ->
- present_schedule(Sch)
- end
- end.
-
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-%% Function : init_earliest
-%% Argument : N - number of instrs
-%% Returns :
-%% Description :
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-init_earliest(N) ->
- hipe_vectors:new(N,1).
-
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-%%
-%% Schedule is kept reversed until the end.
-
--define(present_node(I,Cycle),{{cycle,Cycle},{node,I}}).
-
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-%% Function : empty_schedule
-%% Description : Returns an empty schedule.
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-empty_schedule() -> [].
-
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-%% Function : add_to_schedule
-%% Argument : I - instr
-%% Cycle - cycle when I was placed
-%% Sch - schedule
-%% Description : Adds instr to schedule
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-add_to_schedule(I,Cycle,Sch) ->
- [?present_node(I,Cycle)|Sch].
-
-present_schedule(Sch) -> lists:reverse(Sch).
-
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-%%
-%% Interface to resource manager:
-%%
-
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-%% Function : init_resources
-%% Description : Yields a 'big enough' array mapping (Cycle -> Resources);
-%% this array is called Rsrc below.
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-init_resources(S) ->
- hipe_target_machine:init_resources(S).
-
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-%% Function : init_instr_resources
-%% Argument : Nodes - a list of the instructions
-%% N - is the number of nodes
-%% Description : return a vector (NodeID -> Resource_requirements)
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-init_instr_resources(N,Nodes) ->
- hipe_target_machine:init_instr_resources(N,Nodes).
-
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-%% Function : resources_available
-%% Argument : Cycle - the current cycle
-%% I - the current instruction (index = NodeID)
-%% Rsrc - a map (Cycle -> Resources)
-%% I_res - maps (NodeID -> Resource_requirements)
-%% Description : returns {yes,NewResTab} | no
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-resources_available(Cycle,I,Rsrc,I_res) ->
- hipe_target_machine:resources_available(Cycle,I,Rsrc,I_res).
-
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-%% Function : advance_cycle
-%% Argument : Rsrc - resources
-%% Description : Returns an empty resources-state
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-advance_cycle(Rsrc) ->
- hipe_target_machine:advance_cycle(Rsrc).
-
-
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-%% Function : delete_node
-%% Argument : Cycle - current cycle
-%% I - index of instr
-%% DAG - dependence dag
-%% Preds - array with number of predecessors for nodes
-%% Earl - array with earliest-times for nodes
-%% Returns : {ReadyNs,NewDAG,NewPreds,NewEarl}
-%% Description : Deletes node I and updates earliest times for the rest.
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-delete_node(Cycle,I,DAG,Preds,Earl) ->
- Succ = hipe_vectors:get(DAG,I-1),
- NewDAG = hipe_vectors:set(DAG,I-1,scheduled), % provides debug 'support'
- {ReadyNs,NewPreds,NewEarl} = update_earliest(Succ,Cycle,Preds,Earl,[]),
- ?debug('earliest after ~p: ~p~n',[I,[{Ix+1,V} || {Ix,V} <- hipe_vectors:list(NewEarl)]]),
- {ReadyNs,NewDAG,NewPreds,NewEarl}.
-
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-%% Function : update_earliest
-%% Argument : Succ - successor list
-%% Cycle - current cycle
-%% Preds - predecessors
-%% Earl - earliest times for nodes
-%% Ready - array with readynodes for cycles
-%% Returns : {Ready,Preds,Earl}
-%% Description : Updates the earliest times for nodes and updates number of
-%% predecessors for nodes
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-update_earliest([],_Cycle,Preds,Earl,Ready) ->
- {Ready,Preds,Earl};
-update_earliest([{Lat,N}|Xs],Cycle,Preds,Earl,Ready) ->
- Old_earl = hipe_vectors:get(Earl,N-1),
- New_earl = erlang:max(Old_earl,Cycle+Lat),
- NewEarl = hipe_vectors:set(Earl,N-1,New_earl),
- Num_preds = hipe_vectors:get(Preds,N-1),
- NewPreds = hipe_vectors:set(Preds,N-1,Num_preds-1),
- if
- Num_preds =:= 0 ->
- ?debug('inconsistent DAG~n',[]),
- exit({update_earliest,N});
- Num_preds =:= 1 ->
- NewReady = [{New_earl,N}|Ready],
- NewPreds2 = hipe_vectors:set(NewPreds,N-1,0),
- update_earliest(Xs,Cycle,NewPreds2,NewEarl,NewReady);
- is_integer(Num_preds), Num_preds > 1 ->
- update_earliest(Xs,Cycle,NewPreds,NewEarl,Ready)
- end.
-
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-%%
-%% Collect instruction dependences.
-%%
-%% Three forms:
-%% - data/register
-%% * insert RAW, WAR, WAW dependences
-%% - memory
-%% * stores serialize memory references
-%% * alias analysis may allow loads to bypass stores
-%% - control
-%% * unsafe operations are 'trapped' between branches
-%% * branches are ordered
-%%
-%% returns { [{Index,Instr}], DepDAG }
-%% DepDAG is defined below.
-
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-%% Function : deps
-%% Argument : BB - Basic block
-%% Returns : {IxBB,DAG} - indexed block and dependence graph. DAG consists
-%% of both Dag and Preds, where Preds is number
-%% of predecessors for nodes.
-%% Description : Collect instruction dependences.
-%%
-%% Three forms:
-%% - data/register
-%% * insert RAW, WAR, WAW dependences
-%% - memory
-%% * stores serialize memory references
-%% * alias analysis may allow loads to bypass stores
-%% - control
-%% * unsafe operations are 'trapped' between branches
-%% * branches are ordered
-%%
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-deps(IxBB) ->
- N = length(IxBB),
- DAG = empty_dag(N), % The DAG contains both dependence-arcs and
- % number of predeccessors...
- {_DepTab,DAG1} = dd(IxBB, DAG),
- DAG2 = md(IxBB, DAG1),
- cd(IxBB, DAG2).
-
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-%% Function : empty_dag
-%% Argument : N - number of nodes
-%% Returns : empty DAG
-%% Description : DAG consists of dependence graph and predeccessors
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-empty_dag(N) ->
- {hipe_vectors:new(N, []), hipe_vectors:new(N, 0)}.
-
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-%% Function : indexed_bb
-%% Argument : BB - basic block
-%% Returns : [{N, Instr}]
-%% Description : Puts indexes to all instrs of a block, removes comments.
-%% NOP's are also removed because if both sparc_schedule and
-%% sparc_post_schedule options are used, the first pass will
-%% add nop's before the branch if necessary, and these are
-%% removed before scheduling the second pass.
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-indexed_bb(BB) ->
- indexed_bb(BB,1).
-
-indexed_bb([],_N) -> [];
-indexed_bb([X|Xs],N) ->
- case X of
- #comment{} ->
- indexed_bb(Xs,N);
- #nop{} ->
- indexed_bb(Xs,N);
- _Other ->
- [{N,X}|indexed_bb(Xs,N+1)]
- end.
-
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-%% Function : dep_arc
-%% Argument : N - Current node
-%% Lat - Latency from current node to M
-%% M - The dependent node
-%% DAG - The dependence graph. Consists of both DAG and
-%% predeccessors
-%% Returns : A new DAG with the arc added and number of predeccessors for
-%% M increased.
-%% Description : Adds a new arc to the graph, if an older arc goes from N to M
-%% it will be replaced with a new arc {max(OldLat, NewLat), M}.
-%% Number of predeccessors for node M is increased.
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-dep_arc(N, Lat, M, {Dag,Preds}) ->
- OldDeps = hipe_vectors:get(Dag, N-1),
- %% io:format("{OldDeps} = {~p}~n",[OldDeps]),
- {NewDeps, Status} = add_arc(Lat, M, OldDeps),
- %% io:format("{NewDeps, Status} = {~p, ~p}~n",[NewDeps, Status]),
- NewDag = hipe_vectors:set(Dag, N-1, NewDeps),
- NewPreds = case Status of
- added -> % just increase preds if new arc was added
- OldPreds = hipe_vectors:get(Preds, M-1),
- hipe_vectors:set(Preds, M-1, OldPreds + 1);
- non_added ->
- Preds
- end,
- {NewDag, NewPreds}.
-
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-%% Function : add_arc
-%% Argument : Lat - The latency from current node to To.
-%% To - The instr-id of the node which the dependence goes to
-%% Arcs - The dependecies that are already in the dep-graph
-%% Returns : A dependence graph sorted by To.
-%% Description : A new arc that is added is sorted in the right place, and if
-%% there is already an arc between nodes A and B, the one with
-%% the greatest latency is chosen.
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-add_arc(Lat,To, []) -> {[{Lat, To}], added};
-add_arc(Lat1, To, [{Lat2, To} | Arcs]) ->
- {[{erlang:max(Lat1, Lat2), To} | Arcs], non_added};
-add_arc(Lat1,To1, [{Lat2, To2} | Arcs]) when To1 < To2 ->
- {[{Lat1, To1}, {Lat2, To2} | Arcs], added};
-add_arc(Lat1 ,To1, [{Lat2, To2} | Arcs]) ->
- {Arcs1, Status} = add_arc(Lat1, To1, Arcs),
- {[{Lat2, To2} | Arcs1], Status}.
-
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-%%
-%% The register/data dependence DAG of a block is represented
-%% as a mapping (Variable -> {NextWriter,NextReaders})
-%% where NextWriter is a pair {Ix,Type}
-%% and NextReaders is a list of pairs {Ix,Type}.
-%%
-%% Type is used to determine latencies of operations; on the UltraSparc,
-%% latencies of arcs (n -> m) are determined by both n and m. (E.g., if
-%% n is an integer op and m is a store, then latency is 0; if m is an
-%% integer op, it's 1.)
-
-dd([],DAG) -> { empty_deptab(), DAG };
-dd([{N,I}|Is],DAG0) ->
- {DepTab,DAG1} = dd(Is,DAG0),
- add_deps(N,I,DepTab,DAG1).
-
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-%% Function : add_deps
-%% Argument : N - current node
-%% Instr - current instr
-%% DepTab - hashtable with {next-writer, next-readers} for reg
-%% DAG - dependence graph
-%% Returns : {DepTab, BlockInfo, DAG} - with new values
-%% Description : Adds dependencies for node N to the graph. The registers that
-%% node N defines and uses are used for computing the
-%% dependencies to the following nodes.
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-add_deps(N,Instr,DepTab,DAG) ->
- {Ds,Us} = def_use(Instr),
- Type = dd_type(Instr),
- {DepTab1,DAG1} = add_write_deps(Ds,N,Type,DepTab,DAG),
- add_read_deps(Us,N,Type,DepTab1,DAG1).
-
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-%% Instructions are classified into symbolic categories,
-%% which are subsequently used to determine operation latencies
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-dd_type(Instr) ->
- case Instr of
- #b{} -> branch;
- %% #br{} -> branch;
- #call_link{} -> branch;
- #jmp_link{} -> branch;
- #jmp{} -> branch;
- #goto{} -> branch;
- #load{} -> load;
- #store{} -> store;
- #alu{} -> alu;
- #move{} -> alu;
- #multimove{} ->
- Src = hipe_sparc:multimove_src(Instr),
- Lat = round(length(Src)/2),
- {mmove,Lat};
- #sethi{} -> alu;
- #alu_cc{} -> alu_cc;
- %% #cmov_cc{} -> cmov_cc;
- %% #cmov_r{} -> alu;
- #load_atom{} -> alu;
- #load_address{} -> alu;
- #pseudo_enter{} -> pseudo;
- #pseudo_pop{} -> pseudo;
- #pseudo_return{} -> pseudo;
- #pseudo_spill{} -> pseudo;
- #pseudo_unspill{} -> pseudo
- end.
-
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-%% Function : add_write_deps
-%% Argument : Defs - registers that node N defines.
-%% N - current node
-%% Ty - the type of current instr
-%% DepTab - Dependence-table
-%% DAG - The dependence graph.
-%% Returns : {DepTab,DAG} - with new values
-%% Description : Adds dependencies to the graph for nodes that depends on the
-%% registers that N defines.
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-add_write_deps([],_N,_Ty,DepTab,DAG) -> {DepTab,DAG};
-add_write_deps([D|Ds],N,Ty,DepTab,DAG) ->
- {NewDepTab,NewDAG} = add_write_dep(D,N,Ty,DepTab,DAG),
- add_write_deps(Ds,N,Ty,NewDepTab,NewDAG).
-
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-%% Function : add_write_dep
-%% Description : Updates the dependence table with N as next writer, and
-%% updates the DAG with the dependencies from N to subsequent
-%% nodes.
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-add_write_dep(X,N,Ty,DepTab,DAG) ->
- {NxtWriter,NxtReaders} = lookup(X,DepTab),
- NewDepTab = writer(X,N,Ty,DepTab),
- NewDAG = write_deps(N,Ty,NxtWriter,NxtReaders,DAG),
- {NewDepTab, NewDAG}.
-
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-%% Function : write_deps
-%% Argument : Instr - Current instr
-%% Ty - Type of current instr
-%% NxtWriter - The node that is the next writer of the ragister
-%% that Instr defines.
-%% NxtReaders - The nodes that are subsequent readers of the
-%% register that N defines.
-%% DAG - The dependence graph
-%% Returns : Calls raw_deps that finally returns a new DAG with the new
-%% dependence arcs added.
-%% Description : If a next writer exists a dependence arc for this node is
-%% added, and after this raw_deps is called to compute the
-%% arcs for read-after-write dependencies.
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-write_deps(Instr,Ty,NxtWriter,NxtReaders,DAG) ->
- DAG1 = case NxtWriter of
- none ->
- DAG;
- {Instr,_} ->
- DAG;
- {Wr,WrTy} ->
- dep_arc(Instr,
- hipe_target_machine:waw_latency(Ty,WrTy),
- Wr, DAG)
- end,
- raw_deps(Instr,Ty,NxtReaders,DAG1).
-
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-%% Function : raw_deps
-%% Argument : Instr - current instr
-%% Type - type of instr
-%% Readers - subsequent readers
-%% DAG - dependence graph
-%% Returns : DAG - A new DAG with read-after-write dependencies added
-%% Description : Updates the DAG with the dependence-arcs from Instr to the
-%% subsequent readers, with the appropriate latencies.
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-raw_deps(_Instr,_Type,[],DAG) -> DAG;
-raw_deps(Instr,Ty,[{Rd,RdTy}|Xs],DAG) ->
- raw_deps(Instr,Ty,Xs,
- dep_arc(Instr,hipe_target_machine:raw_latency(Ty,RdTy),
- Rd,DAG)).
-
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-%% Function : add_read_deps
-%% Argument : Uses - The registers that node N uses.
-%% N - Index of the current node.
-%% Ty - Type of current node.
-%% DepTab - Dependence table
-%% DAG - Dependence graph
-%% Returns : {DepTab, DAG} - with updated values.
-%% Description : Adds the read dependencies from node N to subsequent ones,
-%% according to the registers that N uses.
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-add_read_deps([],_N,_Ty,DepTab,DAG) -> {DepTab,DAG};
-add_read_deps([U|Us],N,Ty,DepTab,DAG) ->
- {NewDepTab,NewDAG} = add_read_dep(U,N,Ty,DepTab,DAG),
- add_read_deps(Us,N,Ty,NewDepTab,NewDAG).
-
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-%% Function : add_read_dep
-%% Argument : X - Used register
-%% N - Index of checked instr
-%% Ty - Type of checked instr
-%% DepTab - Hashtable with {next-writer, next-readers}
-%% DAG - Dependence graph
-%% Returns : {DepTab, DAG} - with updated values
-%% Description : Looks up what the next-writer/next-readers are, and adjusts
-%% the table with current node as new reader. Finally
-%% read-dependencies are added to the DAG.
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-add_read_dep(X,N,Ty,DepTab,DAG) ->
- {NxtWriter,_NxtReaders} = lookup(X,DepTab),
- NewDepTab = reader(X,N,Ty,DepTab),
- NewDAG = read_deps(N,Ty,NxtWriter,DAG),
- {NewDepTab, NewDAG}.
-
-% If NxtWriter is 'none', then this var is not written subsequently
-% Add WAR from Instr to NxtWriter (if it exists)
-% *** UNFINISHED ***
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-%% Function : read_deps
-%% Argument : N - Index of current node
-%% Ty - Type of current node
-%% Writer - tuple {NextWriter, WrType} where NextWriter is the
-%% subsequent instr that writes this register next time,
-%% and WrType is the type of that instr.
-%% DAG - The dependence graph
-%% Returns : DAG
-%% Description : Returns a new DAG if a next-writer exists, otherwise the old
-%% DAG is returned.
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-read_deps(_Instr,_Ty,none,DAG) ->
- DAG;
-read_deps(_Instr,_Ty,{_Instr,_},DAG) ->
- DAG;
-read_deps(Instr,Ty,{NxtWr,NxtWrTy},DAG) ->
- dep_arc(Instr,hipe_target_machine:war_latency(Ty,NxtWrTy),NxtWr,
- DAG).
-
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-%% Function : empty_deptab
-%% Description : Creates an empty dependence table (hash-table)
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-empty_deptab() ->
- gb_trees:empty().
-
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-%% Function : lookup
-%% Argument : X - key (register)
-%% DepTab - dependence table
-%% Returns : {NextWriter, NextReaders}
-%% Description : Returns next writer and a list of following readers on
-%% register X.
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-lookup(X, DepTab) ->
- case gb_trees:lookup(X, DepTab) of
- none ->
- {none, []};
- {value, {W, Rs} = Val} ->
- Val
- end.
-
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-%% Function : writer
-%% Argument : X - key (register)
-%% N - index of writer
-%% Ty - type of writer
-%% DepTab - dependence table to be updated
-%% Returns : DepTab - new dependence table
-%% Description : Sets N tobe next writer on X
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-writer(X, N, Ty, DepTab) ->
- gb_trees:enter(X, {{N, Ty}, []}, DepTab).
-
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-%% Function : reader
-%% Argument : X - key (register)
-%% N - index of reader
-%% Ty - type of reader
-%% DepTab - dependence table to be updated
-%% Returns : DepTab - new dependence table
-%% Description : Adds N to the dependence table as a reader.
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-reader(X,N,Ty,DepTab) ->
- {W,Rs} = lookup(X,DepTab),
- gb_trees:enter(X,{W,[{N,Ty}|Rs]},DepTab).
-
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-%%
-%% The following version of md/2 separates heap- and stack operations,
-%% which allows for greater reordering.
-%%
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-%% Function : md
-%% Argument : IxBB - indexed block
-%% DAG - dependence graph
-%% Returns : DAG - new dependence graph
-%% Description : Adds arcs for load/store dependencies to the DAG.
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-md(IxBB, DAG) ->
- md(IxBB,empty_md_state(),DAG).
-
-md([],_,DAG) -> DAG;
-md([{N,I}|Is],St,DAG) ->
- case md_type(I) of
- other ->
- md(Is,St,DAG);
- {st,T} ->
- { WAW_nodes, WAR_nodes, NewSt } = st_overlap(N,T,St),
- md(Is,NewSt,
- md_war_deps(WAR_nodes,N,md_waw_deps(WAW_nodes,N,DAG)));
- {ld,T} ->
- { RAW_nodes, NewSt } = ld_overlap(N,T,St),
- md(Is,NewSt,
- md_raw_deps(RAW_nodes,N,DAG))
- end.
-
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-%% Function : md_war_deps
-%% Argument : WAR_nodes - write-after-read nodes depending on N
-%% N - index of current instr
-%% DAG - dependence graph
-%% Returns : DAG - updated DAG
-%% Description : Adds arcs for write-after-read dependencies for N
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-md_war_deps([],_,DAG) -> DAG;
-md_war_deps([M|Ms],N,DAG) ->
- md_war_deps(Ms,N,dep_arc(M,hipe_target_machine:m_war_latency(),N,DAG)).
-
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-%% Function : md_waw_deps
-%% Argument : WAW_nodes - write-after-write nodes depending on N
-%% N - index of current instr
-%% DAG - dependence graph
-%% Returns : DAG - updated DAG
-%% Description : Adds arcs for write-after-write dependencies for N
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-md_waw_deps([],_,DAG) -> DAG;
-md_waw_deps([M|Ms],N,DAG) ->
- md_waw_deps(Ms,N,dep_arc(M,hipe_target_machine:m_waw_latency(),N,DAG)).
-
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-%% Function : md_raw_deps
-%% Argument : RAW_nodes - read-after-write nodes depending on N
-%% N - index of current instr
-%% DAG - dependence graph
-%% Returns : DAG - updated DAG
-%% Description : Adds arcs for read-after-write dependencies for N
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-md_raw_deps([],_,DAG) -> DAG;
-md_raw_deps([M|Ms],N,DAG) ->
- md_raw_deps(Ms,N,dep_arc(M,hipe_target_machine:m_raw_latency(),N,DAG)).
-
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-%% Function : empty_md_state
-%% Description : Returns an empty memorydependence state, eg. 4 lists
-%% representing {StackStores, HeapStores, StackLoads, HeapLoads}
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-empty_md_state() -> {[], [], [], []}.
-
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-%% Function : md_type
-%% Argument : I - instr
-%% Description : Maps the instr-type to a simplified type, telling if it's
-%% store/load resp. heap or stack.
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-md_type(I) ->
- case I of
- #load{} ->
- Sp = hipe_sparc_registers:stack_pointer(),
- Src = hipe_sparc:load_src(I),
- N = hipe_sparc:reg_nr(Src),
- Off = hipe_sparc:load_off(I),
- if
- N =:= Sp -> % operation on stack
- {ld,{sp,Off}};
- true ->
- {ld,{hp,Src,Off}}
- end;
- #store{} ->
- Sp = hipe_sparc_registers:stack_pointer(),
- Dst = hipe_sparc:store_dest(I),
- N = hipe_sparc:reg_nr(Dst),
- Off = hipe_sparc:store_off(I),
- if
- N =:= Sp ->
- {st,{sp,Off}};
- true ->
- {st,{hp,Dst,Off}}
- end;
- _ ->
- other
- end.
-
-%% Given a memory operation and a 'memory op state',
-%% overlap(N,MemOp,State) returns { Preceding_Dependent_Ops, NewState }.
-%% which are either a tuple { WAW_deps, WAR_deps } or a list RAW_deps.
-%%
-%% NOTES:
-%% Note that Erlang's semantics ("heap stores never overwrite existing data")
-%% means we can be quite free in reordering stores to the heap.
-%% Ld/St to the stack are simply handled by their offsets; since we do not
-%% rename the stack pointer, this is sufficient.
-%% *** We assume all memory ops have uniform size = 4 ***
-%%
-%% NOTES:
-%% The method mentioned above has now been changed because the assumption that
-%% "heap stores never overwrite existing data" caused a bug when the
-%% process-pointer was treated the same way as the heap. We were also told
-%% that the semantics can possibly change in the future, so it would be more
-%% safe to treat the heap store/loads as the stack.
-%% A future improvement can be to do an alias analysis to give more freedom
-%% in reordering stuff...
-%%
-%% Alias state:
-%% { [StackOp], [HeapOp], [StackOp], [HeapOp] }
-%% where StackOp = {InstrID, Offset}
-%% HeapOp = {InstrID, Reg, Offset}
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-%% Function : st_overlap
-%% Argument : N - Index of current node
-%% Type - {sp,Off} or {hp,Dst,Off}, store on stack or heap
-%% State - { [StackStrs], [HeapStrs], [StackLds], [HeapLds] }
-%% where StackStrs/StackLds = {InstrID, Offset}
-%% and HeapStrs/HeapLds = {InstrID, Reg, Offset}
-%% Returns : { DepStrs, DepLds, State } -
-%% where DepStrs/DepLds = [NodeId]
-%% and State is the new state
-%% Description : Adds dependencies for overlapping stores.
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-st_overlap(N, {sp, Off}, {St_Sp, St_Hp, Ld_Sp, Ld_Hp}) ->
- {DepSt, IndepSt_Sp} = st_sp_dep(St_Sp, Off),
- {DepLd, IndepLd_Sp} = ld_sp_dep(Ld_Sp, Off),
- {DepSt, DepLd, {[{N, Off}|IndepSt_Sp], St_Hp, IndepLd_Sp, Ld_Hp}};
-st_overlap(N, {hp, Dst, Off}, {St_Sp, St_Hp, Ld_Sp, Ld_Hp}) ->
- DstOff = {Dst, Off},
- {DepSt,_IndepSt_Hp} = st_hp_dep(St_Hp, DstOff),
- {DepLd, IndepLd_Hp} = ld_hp_dep(Ld_Hp, DstOff),
- {DepSt, DepLd, {St_Sp, [{N, Dst, Off}|St_Hp], Ld_Sp, IndepLd_Hp}}.
-
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-%% Function : ld_overlap
-%% Argument : N - Index of current node
-%% Type - {sp,Off} or {hp,Dst,Off}, store on stack or heap
-%% State - { [StackStrs], [HeapStrs], [StackLds], [HeapLds] }
-%% where StackStrs/StackLds = {InstrID, Offset}
-%% and HeapStrs/HeapLds = {InstrID, Reg, Offset}
-%% Returns : { DepStrs, State }
-%% Description : Adds dependencies for overlapping laods
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-ld_overlap(N, {sp, Off}, {St_Sp, St_Hp, Ld_Sp, Ld_Hp}) ->
- DepSt = sp_dep_only(St_Sp, Off),
- {DepSt, {St_Sp, St_Hp, [{N, Off}|Ld_Sp], Ld_Hp}};
-ld_overlap(N, {hp, Src, Off}, {St_Sp, St_Hp, Ld_Sp, Ld_Hp}) ->
- DepSt = hp_dep_only(St_Hp, Src, Off),
- {DepSt, {St_Sp, St_Hp, Ld_Sp, [{N, Src, Off}|Ld_Hp]}}.
-
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-%% Function : st_sp_dep
-%% Description : Adds dependencies that are depending on a stack store
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-st_sp_dep(Stores, Off) ->
- sp_dep(Stores, Off, [], []).
-
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-%% Function : ld_sp_dep
-%% Description : Adds dependencies that are depending on a stack load
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-ld_sp_dep(Loads, Off) ->
- sp_dep(Loads, Off, [], []).
-
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-%% Function : st_hp_dep
-%% Description : Adds dependencies that are depending on a heap store
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-st_hp_dep(Stores, {_Reg, _Off} = RegOff) ->
- hp_dep(Stores, RegOff, [], []).
-
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-%% Function : ld_hp_dep
-%% Description : Adds dependencies that are depending on a heap load
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-ld_hp_dep(Loads, {_Reg, _Off} = RegOff) ->
- hp_dep(Loads, RegOff, [], []).
-
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-%% Function : sp_dep
-%% Description : Returns {Dependent, Independent} which are lists of nodes
-%% that depends or not on a stack load/store
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-sp_dep([], _Off, Dep, Indep) -> {Dep, Indep};
-sp_dep([{N,Off}|Xs], Off, Dep, Indep) ->
- sp_dep(Xs, Off, [N|Dep], Indep);
-sp_dep([X|Xs], Off, Dep, Indep) ->
- sp_dep(Xs, Off, Dep, [X|Indep]).
-
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-%% Function : hp_dep
-%% Description : Returns {Dependent, Independent} which are lists of nodes
-%% that depends or not on a heap load/store
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-hp_dep([], {_Reg,_Off}, Dep, Indep) -> {Dep,Indep};
-hp_dep([{N,Reg,Off1}|Xs], {Reg,Off}, Dep, Indep) when Off1 =/= Off ->
- hp_dep(Xs, {Reg,Off}, Dep, [{N,Reg,Off1}|Indep]);
-hp_dep([{N,_,_}|Xs], {Reg,Off}, Dep, Indep) ->
- hp_dep(Xs, {Reg,Off}, [N|Dep], Indep).
-
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-%% Function : sp_dep_only
-%% Description : Returns a list of nodes that are depending on a stack store
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-sp_dep_only(Stores, Off) ->
- [N || {N,Off0} <- Stores, Off =:= Off0].
-
-%% Dependences from heap stores to heap loads.
-%% *** UNFINISHED ***
-%% - but works
-%% This is somewhat subtle:
-%% - a heap load can only bypass a heap store if we KNOW it won't
-%% load the stored value
-%% - unfortunately, we do not know the relationships between registers
-%% at this point, so we can't say that store(p+4) is independent of
-%% load(q+0).
-%% (OR CAN WE? A bit closer reasoning might show that it's possible?)
-%% - We can ONLY say that st(p+c) and ld(p+c') are independent when c /= c'
-%%
-%% (As said before, it might be possible to lighten this restriction?)
-
-hp_dep_only([], _Reg, _Off) -> [];
-hp_dep_only([{_N,Reg,Off_1}|Xs], Reg, Off) when Off_1 =/= Off ->
- hp_dep_only(Xs, Reg, Off);
-hp_dep_only([{N,_,_}|Xs], Reg, Off) ->
- [N|hp_dep_only(Xs, Reg, Off)].
-
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-%% Control dependences:
-%% - add dependences so that
-%% * branches are performed in order
-%% * unsafe operations are 'fenced in' by surrounding branches
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-%% Function : cd
-%% Argument : IxBB - indexed block
-%% DAG - dependence graph
-%% Returns : DAG - new dependence graph
-%% Description : Adds conditional dependencies to the DAG
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-cd(IxBB,DAG) ->
- cd(IxBB, DAG, none, [], []).
-
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-%% Function : cd
-%% Argument : IxBB - indexed block
-%% DAG - dependence graph
-%% PrevBr - previous branch
-%% PrevUnsafe - previous unsafe instr (mem-op)
-%% PrevOthers - previous other instrs, used to "fix" preceeding
-%% instrs so they don't bypass a branch.
-%% Returns : DAG - new dependence graph
-%% Description : Adds conditional dependencies to the graph.
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-cd([], DAG, _PrevBr, _PrevUnsafe, _PrevOthers) ->
- DAG;
-cd([{N,I}|Xs], DAG, PrevBr, PrevUnsafe, PrevOthers) ->
- case cd_type(I) of
- {branch,Ty} ->
- DAG1 = cd_branch_to_other_deps(N, PrevOthers, DAG),
- NewDAG = cd_branch_deps(PrevBr, PrevUnsafe, N, Ty, DAG1),
- cd(Xs,NewDAG,{N,Ty},[],[]);
- {unsafe,Ty} ->
- NewDAG = cd_unsafe_deps(PrevBr,N,Ty,DAG),
- cd(Xs, NewDAG, PrevBr, [{N,Ty}|PrevUnsafe], PrevOthers);
- {other,_Ty} ->
- cd(Xs, DAG, PrevBr, PrevUnsafe, [N|PrevOthers])
- end.
-
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-%% Function : cd_branch_to_other_deps
-%% Argument : N - index of branch
-%% Ms - list of indexes of "others" preceding instrs
-%% DAG - dependence graph
-%% Returns : DAG - new graph
-%% Description : Makes preceding instrs fixed so they don't bypass a branch
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-cd_branch_to_other_deps(_, [], DAG) ->
- DAG;
-cd_branch_to_other_deps(N, [M | Ms], DAG) ->
- cd_branch_to_other_deps(N, Ms, dep_arc(M, zero_latency(), N, DAG)).
-
-%% Is the operation a branch, an unspeculable op or something else?
-
-%% Returns
-%% {branch,BranchType}
-%% {unsafe,OpType}
-%% {other,OpType}
-
-%% *** UNFINISHED ***
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-%% Function : cd_type
-%% Argument : I - instr
-%% Description : Maps instrs to a simpler type.
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-cd_type(I) ->
- case I of
- #goto{} ->
- {branch,uncond};
- #br{} ->
- {branch,'cond'};
- #b{} ->
- {branch,'cond'};
- #call_link{} ->
- {branch,call};
- #jmp_link{} ->
- {branch,call};
- #jmp{} ->
- {branch,call};
- #load{} ->
- {unsafe,load};
- #store{} ->
- {unsafe,load};
- T ->
- {other,T}
- end.
-
-%% add dependences to keep order of branches + unspeculable ops:
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-%% Function : cd_branch_deps
-%% Argument : PrevBr - preceeding branch
-%% PrevUnsafe - preceeding unsafe ops, eg, mem-ops
-%% N - current id.
-%% Ty - type of current instr
-%% DAG - dependence graph
-%% Returns : DAG - new DAG
-%% Description : Adds arcs between branches and calls deps_to_unsafe that adds
-%% arcs between branches and unsafe ops.
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-cd_branch_deps(PrevBr, PrevUnsafe, N, Ty, DAG) ->
- DAG1 = case PrevBr of
- none ->
- DAG;
- {Br,BrTy} ->
- dep_arc(Br,
- hipe_target_machine:br_br_latency(BrTy,Ty),
- N, DAG)
- end,
- deps_to_unsafe(PrevUnsafe, N, Ty, DAG1).
-
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-%% Function : deps_to_unsafe
-%% Description : Adds dependencies between unsafe's and branches
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-deps_to_unsafe([], _, _, DAG) -> DAG;
-deps_to_unsafe([{M,UTy}|Us], N, Ty, DAG) ->
- deps_to_unsafe(Us,N,Ty,
- dep_arc(M, hipe_target_machine:unsafe_to_br_latency(UTy,Ty),
- N, DAG)).
-
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-%% Function : cd_unsafe_deps
-%% Description : Adds dependencies between branches and unsafe's
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-cd_unsafe_deps(none, _, _, DAG) ->
- DAG;
-cd_unsafe_deps({Br,BrTy}, N, Ty, DAG) ->
- dep_arc(Br, hipe_target_machine:br_to_unsafe_latency(BrTy, Ty), N, DAG).
-
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-%% Function : def_use
-%% Argument : Instr
-%% Description : Returns the registers that Instr defines resp. uses as 2 lists
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-def_use(Instr) ->
- {hipe_sparc:defines(Instr), hipe_sparc:uses(Instr)}.
-
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-%% Function : move_or_alu
-%% Description : True if the instruction is a move or an alu; false otherwise
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-move_or_alu(#move{}) -> true;
-move_or_alu(#alu{}) -> true;
-move_or_alu(_) -> false.
-
-%% Debugging stuff below %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-
--ifdef(debug1).
-debug1_stuff(Blk, DAG, IxBlk, Sch, X) ->
- io:format("Blk: ~p~n",[Blk]),
- io:format("DAG: ~n~p~n~p",[DAG,IxBlk]),
- io:format("~n"),
- print_instrs(IxBlk),
- print_sch(Sch, IxBlk),
- print_instrs2(X).
-
-print_instrs([]) ->
- io:format("~n");
-print_instrs([{N,Instr} | Instrs]) ->
- io:format("(~p): ",[N]),
- hipe_sparc_pp:pp_instr(Instr),
- io:format("~p~n",[element(1,Instr)]),
- print_instrs(Instrs).
-
-print_instrs2([]) ->
- io:format("~n");
-print_instrs2([Instr | Instrs]) ->
- hipe_sparc_pp:pp_instr(Instr),
- print_instrs2(Instrs).
-
-print_sch([],_) -> io:format("~n");
-print_sch([{{cycle,Cycle},{node,I}} | Rest], IxBlk) ->
- io:format("{C~p, N~p} ",[Cycle,I]),
- print_node(I, IxBlk),
- print_sch(Rest, IxBlk).
-
-print_node(_, []) ->
- io:format("~n");
-print_node(I, [{I, Instr} | _]) ->
- hipe_sparc_pp:pp_instr(Instr);
-print_node(I, [_ | IxBlk]) ->
- print_node(I, IxBlk).
--else.
-debug1_stuff(_Blk, _DAG, _IxBlk, _Sch, _X) ->
- ok.
--endif.
diff --git a/lib/hipe/opt/hipe_schedule_prio.erl b/lib/hipe/opt/hipe_schedule_prio.erl
deleted file mode 100644
index 339bb82aab..0000000000
--- a/lib/hipe/opt/hipe_schedule_prio.erl
+++ /dev/null
@@ -1,53 +0,0 @@
-%% -*- erlang-indent-level: 2 -*-
-%%
-%% Licensed under the Apache License, Version 2.0 (the "License");
-%% you may not use this file except in compliance with the License.
-%% You may obtain a copy of the License at
-%%
-%% http://www.apache.org/licenses/LICENSE-2.0
-%%
-%% Unless required by applicable law or agreed to in writing, software
-%% distributed under the License is distributed on an "AS IS" BASIS,
-%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-%% See the License for the specific language governing permissions and
-%% limitations under the License.
-%%
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-%%
-%% PRIORITY HANDLING AND PRIORITY CALCULATION
-%%
-%% Handling of ready nodes and priorities.
-%% - at present, all nodes have the same priority and so on.
-%%
-%% *** UNFINISHED ***
-%% - should compute a static priority estimate
-%% - should dynamically modify priorities + possibly insert NOPs
-%% (e.g., to separate branches, etc.)
-%% - thus, ought to be passed the current schedule and/or resources as well
-
--module(hipe_schedule_prio).
--export([init_ready/2,
- init_instr_prio/2,
- %% initial_ready_set/4,
- next_ready/7,
- add_ready_nodes/2,
- insert_node/3
- ]).
-
-init_ready(Size,Preds) ->
- hipe_ultra_prio:init_ready(Size,Preds).
-
-init_instr_prio(N,DAG) ->
- hipe_ultra_prio:init_instr_prio(N,DAG).
-
-%% initial_ready_set(M,N,Preds,Ready) ->
-%% hipe_ultra_prio:initial_ready_set(M,N,Preds,Ready).
-
-next_ready(C,Ready,Prio,Nodes,DAG,Preds,Earl) ->
- hipe_ultra_prio:next_ready(C,Ready,Prio,Nodes,DAG,Preds,Earl).
-
-add_ready_nodes(NodeLst,Ready) ->
- hipe_ultra_prio:add_ready_nodes(NodeLst,Ready).
-
-insert_node(C,I,Ready) ->
- hipe_ultra_prio:insert_node(C,I,Ready).
diff --git a/lib/hipe/opt/hipe_target_machine.erl b/lib/hipe/opt/hipe_target_machine.erl
deleted file mode 100644
index 75993cb95e..0000000000
--- a/lib/hipe/opt/hipe_target_machine.erl
+++ /dev/null
@@ -1,87 +0,0 @@
-%% Licensed under the Apache License, Version 2.0 (the "License");
-%% you may not use this file except in compliance with the License.
-%% You may obtain a copy of the License at
-%%
-%% http://www.apache.org/licenses/LICENSE-2.0
-%%
-%% Unless required by applicable law or agreed to in writing, software
-%% distributed under the License is distributed on an "AS IS" BASIS,
-%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-%% See the License for the specific language governing permissions and
-%% limitations under the License.
-%%
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-%%
-%% INTERFACE TO TARGET MACHINE MODEL
-%%
-%% Interfaces the instruction scheduler to the (resource) machine model.
-
--module(hipe_target_machine).
--export([init_resources/1,
- init_instr_resources/2,
- resources_available/4,
- advance_cycle/1
- ]).
--export([raw_latency/2,
- war_latency/2,
- waw_latency/2,
- %% m_raw_latency/2,
- %% m_war_latency/2,
- %% m_waw_latency/2,
- m_raw_latency/0,
- m_war_latency/0,
- m_waw_latency/0,
- br_to_unsafe_latency/2,
- unsafe_to_br_latency/2,
- br_br_latency/2
- ]).
-
--define(target,hipe_ultra_mod2).
-
-init_resources(X) ->
- ?target:init_resources(X).
-
-init_instr_resources(X,Y) ->
- ?target:init_instr_resources(X,Y).
-
-resources_available(X,Y,Z,W) ->
- ?target:resources_available(X,Y,Z,W).
-
-advance_cycle(X) ->
- ?target:advance_cycle(X).
-
-raw_latency(From,To) ->
- ?target:raw_latency(From,To).
-
-war_latency(From,To) ->
- ?target:war_latency(From,To).
-
-waw_latency(From,To) ->
- ?target:waw_latency(From,To).
-
-%% m_raw_latency(From,To) ->
-%% ?target:m_raw_latency(From,To).
-
-%% m_war_latency(From,To) ->
-%% ?target:m_war_latency(From,To).
-
-%% m_waw_latency(From,To) ->
-%% ?target:m_waw_latency(From,To).
-
-m_raw_latency() ->
- ?target:m_raw_latency().
-
-m_war_latency() ->
- ?target:m_war_latency().
-
-m_waw_latency() ->
- ?target:m_waw_latency().
-
-br_to_unsafe_latency(Br,U) ->
- ?target:br_to_unsafe_latency(Br,U).
-
-unsafe_to_br_latency(U,Br) ->
- ?target:unsafe_to_br_latency(U,Br).
-
-br_br_latency(Br1,Br2) ->
- ?target:br_br_latency(Br1,Br2).
diff --git a/lib/hipe/opt/hipe_ultra_mod2.erl b/lib/hipe/opt/hipe_ultra_mod2.erl
deleted file mode 100644
index cec9c56a1e..0000000000
--- a/lib/hipe/opt/hipe_ultra_mod2.erl
+++ /dev/null
@@ -1,233 +0,0 @@
-%% Licensed under the Apache License, Version 2.0 (the "License");
-%% you may not use this file except in compliance with the License.
-%% You may obtain a copy of the License at
-%%
-%% http://www.apache.org/licenses/LICENSE-2.0
-%%
-%% Unless required by applicable law or agreed to in writing, software
-%% distributed under the License is distributed on an "AS IS" BASIS,
-%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-%% See the License for the specific language governing permissions and
-%% limitations under the License.
-%%
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-%%
-%% ULTRASPARC MACHINE MODEL
-%%
-%% This module is used by the scheduler.
-%% The following interface is used:
-%% ...
-%%
-%% NOTES:
-%% - the machine model is simple (on the verge of simplistic)
-%% * all FUs are pipelined => model only one cycle at a time
-%% * instruction latencies are mostly 1
-%% * floating point is left for later (I _think_ it works, but ...)
-%% - conservative: instructions that require multiple resources are
-%% modelled as 'single'; instead, they could reserve IEU+BR or whatever
-%% - possibly inefficient: I think machine state model could be turned into
-%% a bitvector.
-
--module(hipe_ultra_mod2).
--export([init_resources/1,
- init_instr_resources/2,
- resources_available/4,
- advance_cycle/1
- ]).
--export([raw_latency/2,
- war_latency/2,
- waw_latency/2,
- %% m_raw_latency/2,
- %% m_war_latency/2,
- %% m_waw_latency/2,
- m_raw_latency/0,
- m_war_latency/0,
- m_waw_latency/0,
- br_to_unsafe_latency/2,
- unsafe_to_br_latency/2,
- br_br_latency/2
- ]).
-
--include("../sparc/hipe_sparc.hrl").
-
--define(debug(Str,Args),ok).
-%-define(debug(Str,Args),io:format(Str,Args)).
-
--define(debug_ultra(Str,Args),ok).
-%-define(debug_ultra(Str,Args),io:format(Str,Args)).
-
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-%%
-%% Straightforward and somewhat simplistic model for UltraSparc:
-%% - only one cycle at a time is modelled
-%% - resources are simplified:
-%% * ieu0, ieu1, ieu, mem, br, single
-%% * per-cycle state = done | { I0, I1, NumI, X, Mem, Br }
-%% * unoptimized representation (could be bit vector)
-
-init_resources(_Size) ->
- ?debug_ultra('init res ~p~n',[_Size]),
- empty_state().
-
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-
-init_instr_resources(N,Nodes) ->
- ultra_instr_rsrcs(Nodes,hipe_vectors:new(N, '')).
-
-ultra_instr_rsrcs([],I_res) -> I_res;
-ultra_instr_rsrcs([N|Ns],I_res) ->
- ultra_instr_rsrcs(Ns,ultra_instr_type(N,I_res)).
-
-ultra_instr_type({N,I},I_res) ->
- hipe_vectors:set(I_res,N-1,instr_type(I)).
-
-instr_type(I) ->
- case I of
- #move{} ->
- ieu;
- #multimove{} -> %% TODO: expand multimoves before scheduling
- ieu;
- #alu{} ->
- case hipe_sparc:alu_operator(I) of
- '>>' -> ieu0;
- '<<' -> ieu0;
- _ -> ieu
- end;
- #alu_cc{} ->
- ieu1;
- #sethi{} ->
- ieu;
- #load{} ->
- mem;
- #store{} ->
- mem;
- #b{} ->
- br;
- #br{} ->
- br;
- #goto{} ->
- br;
- #jmp_link{} -> % imprecise; should be mem+br?
- single;
- #jmp{} -> % imprecise
- br;
- #call_link{} -> % imprecise; should be mem+br?
- single;
- #cmov_cc{} -> % imprecise
- single;
- #cmov_r{} -> % imprecise
- single;
- #load_atom{} -> % should be resolved to sethi/or
- single;
- #load_address{} -> % should be resolved to sethi/or
- single;
- #load_word_index{} -> % should be resolved to sethi/or
- single;
- %% uncommon types:
- #label{} ->
- none;
- #nop{} ->
- none;
- #comment{} ->
- none;
- _ ->
- exit({ultrasparc_instr_type,{cant_schedule,I}})
- end.
-
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-
-resources_available(_Cycle, I, Rsrc, I_res) ->
- res_avail(instruction_resource(I_res, I), Rsrc).
-
-instruction_resource(I_res, I) ->
- hipe_vectors:get(I_res, I-1).
-
-%% The following function checks resource availability.
-%% * all function units are assumed to be fully pipelined, so only
-%% one cycle at a time is modelled.
-%% * for IEU0 and IEU1, these must precede all generic IEU instructions
-%% (handled by X bit)
-%% * at most 2 integer instructions can issue in a cycle
-%% * mem is straightforward
-%% * br closes the cycle (= returns done).
-%% * single requires an entirely empty state and closes the cycle
-
-res_avail(ieu0, { free, I1, NumI, free, Mem, Br })
- when is_integer(NumI), NumI < 2 ->
- { yes, { occ, I1, NumI+1, free, Mem, Br }};
-res_avail(ieu1, { _I0, free, NumI, free, Mem, Br })
- when is_integer(NumI), NumI < 2 ->
- { yes, { free, occ, NumI+1, free, Mem, Br }};
-res_avail(ieu, { I0, I1, NumI, _X, Mem, Br })
- when is_integer(NumI), NumI < 2 ->
- { yes, { I0, I1, NumI+1, occ, Mem, Br }};
-res_avail(mem, { I0, I1, NumI, X, free, Br }) ->
- { yes, { I0, I1, NumI, X, occ, Br }};
-res_avail(br, { _I0, _I1, _NumI, _X, _Mem, free }) ->
- { yes, done };
-res_avail(single, { free, free, 0, free, free, free }) ->
- { yes, done };
-res_avail(_, _) ->
- no.
-
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-
-advance_cycle(_Rsrc) ->
- empty_state().
-
-empty_state() -> { free, free, 0, free, free, free }.
-
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-%% Latencies are taken from UltraSparc hardware manual
-%%
-%% *** UNFINISHED ***
-%% more precisely, they are taken from my memory of the US-manual
-%% at the moment.
-%%
-%% Note: all ld/st are assumed to hit in the L1 cache (D-cache),
-%% which is sort of imprecise.
-
-raw_latency(alu, store) -> 0;
-raw_latency(load, _) -> 2; % only if load is L1 hit
-raw_latency(alu_cc, b) -> 0;
-raw_latency(_I0, _I1) ->
- 1.
-
-war_latency(_I0, _I1) ->
- 0.
-
-waw_latency(_I0, _I1) ->
- 1.
-
-%% *** UNFINISHED ***
-%% At present, all load/stores are assumed to hit in the L1 cache,
-%% which isn't really satisfying.
-
-%% m_raw_latency(_St, _Ld) ->
-%% 1.
-%%
-%% m_war_latency(_Ld, _St) ->
-%% 1.
-%%
-%% m_waw_latency(_St1, _St2) ->
-%% 1.
-
-%% Use these for 'default latencies' = do not permit reordering.
-
-m_raw_latency() ->
- 1.
-
-m_war_latency() ->
- 1.
-
-m_waw_latency() ->
- 1.
-
-br_to_unsafe_latency(_BrTy, _UTy) ->
- 0.
-
-unsafe_to_br_latency(_UTy, _BrTy) ->
- 0.
-
-br_br_latency(_BrTy1, _BrTy2) ->
- 0.
diff --git a/lib/hipe/opt/hipe_ultra_prio.erl b/lib/hipe/opt/hipe_ultra_prio.erl
deleted file mode 100644
index 6dd240a33a..0000000000
--- a/lib/hipe/opt/hipe_ultra_prio.erl
+++ /dev/null
@@ -1,298 +0,0 @@
-%% Licensed under the Apache License, Version 2.0 (the "License");
-%% you may not use this file except in compliance with the License.
-%% You may obtain a copy of the License at
-%%
-%% http://www.apache.org/licenses/LICENSE-2.0
-%%
-%% Unless required by applicable law or agreed to in writing, software
-%% distributed under the License is distributed on an "AS IS" BASIS,
-%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-%% See the License for the specific language governing permissions and
-%% limitations under the License.
-%%
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-%%
-%% PRIORITY HANDLING AND PRIORITY CALCULATION
-%%
-%% Handling of ready nodes and priorities.
-%% Priorities are mainly from the critical path. More priorities are added.
-%% * One version is adding priorities just depending on the instr, so
-%% for example loads get higher priority than stores, and ordered
-%% after reg's and offset for better cache performance.
-%% * The other version gives higher priority to a node that adds more new
-%% nodes to the ready list. This one is maybe not so effectively
-%% implemented, but was added too late for smarter solutions.
-%% One version is commented away
-
--module(hipe_ultra_prio).
--export([init_ready/2,
- init_instr_prio/2,
- %% initial_ready_set/4,
- next_ready/7,
- add_ready_nodes/2,
- insert_node/3
- ]).
-
--include("../sparc/hipe_sparc.hrl").
-
-% At first, only nodes with no predecessors are selected.
-% - if R is empty, there is an error (unless BB itself is empty)
-
-%% Arguments : Size - size of ready-array
-%% Preds - array with number of predecessors for each node
-%% Returns : An array with list of ready-nodes for each cycle.
-
-init_ready(Size, Preds) ->
- P = hipe_vectors:size(Preds),
- Ready = hipe_vectors:new(Size, []),
- R = initial_ready_set(1, P, Preds, []),
- hipe_vectors:set(Ready, 0, R).
-
-init_instr_prio(N, DAG) ->
- critical_path(N, DAG).
-
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-%% Function : initial_ready_set
-%% Argument : M - current node-index
-%% N - where to stop
-%% Preds - array with number of predecessors for each node
-%% Ready - list with ready-nodes
-%% Returns : Ready - list with ready-nodes
-%% Description : Finds all nodes with no predecessors and adds them to ready.
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-
-initial_ready_set(M, N, Preds, Ready) ->
- if
- M > N ->
- Ready;
- true ->
- case hipe_vectors:get(Preds, M-1) of
- 0 ->
- initial_ready_set(M+1, N, Preds, [M|Ready]);
- V when is_integer(V), V > 0 ->
- initial_ready_set(M+1, N, Preds, Ready)
- end
- end.
-
-%% The following handles the nodes ready to schedule:
-%% 1. select the ready queue of given cycle
-%% 2. if queue empty, return none
-%% 3. otherwise, remove entry with highest priority
-%% and return {next,Highest_Prio,NewReady}
-
-
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-%% Function : next_ready
-%% Argument : C - current cycle
-%% Ready - array with ready nodes
-%% Prio - array with cpath-priorities for all nodes
-%% Nodes - indexed list [{N, Instr}]
-%% Returns : none / {next,Highest_Prio,NewReady}
-%% Description : 1. select the ready queue of given cycle
-%% 2. if queue empty, return none
-%% 3. otherwise, remove entry with highest priority
-%% and return {next,Highest_Prio,NewReady} where Highest_Prio
-%% = Id of instr and NewReady = updated ready-array.
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-
-next_ready(C, Ready, Prio, Nodes, DAG, Preds, Earl) ->
- Curr = hipe_vectors:get(Ready, C-1),
- case Curr of
- [] ->
- none;
- Instrs ->
- {BestI,RestIs} =
- get_best_instr(Instrs, Prio, Nodes, DAG, Preds, Earl, C),
- {next,BestI,hipe_vectors:set(Ready,C-1,RestIs)}
- end.
-
-% next_ready(C,Ready,Prio,Nodes) ->
-% Curr = hipe_vectors:get(Ready,C-1),
-% case Curr of
-% [] ->
-% none;
-% Instrs ->
-% {BestInstr,RestInstrs} = get_best_instr(Instrs, Prio, Nodes),
-% {next,BestInstr,hipe_vectors:set(Ready,C-1,RestInstrs)}
-% end.
-
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-%% Function : get_best_instr
-%% Argument : Instrs - list of node-id's
-%% Prio - array with cpath-priorities for the nodes
-%% Nodes - indexed list [{Id, Instr}]
-%% Returns : {BestSoFar, Rest} - Id of best instr and the rest of id's
-%% Description : Returns the id of the instr that is the best choice.
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-
-get_best_instr([Instr|Instrs], Prio, Nodes, DAG, Preds, Earl, C) ->
- get_best_instr(Instrs, [], Instr, Prio, Nodes, DAG, Preds, Earl, C).
-
-get_best_instr([], Rest, BestSoFar, _Prio, _Nodes, _DAG, _Preds, _Earl, _C) ->
- {BestSoFar, Rest};
-get_best_instr([Instr|Instrs], PassedInstrs, BestSoFar, Prio, Nodes,
- DAG, Preds, Earl, C) ->
- case better(Instr, BestSoFar, Prio, Nodes, DAG, Preds, Earl, C) of
- true ->
- get_best_instr(Instrs, [BestSoFar|PassedInstrs],
- Instr, Prio, Nodes, DAG, Preds, Earl, C);
- false ->
- get_best_instr(Instrs, [Instr|PassedInstrs], BestSoFar, Prio,
- Nodes, DAG, Preds, Earl, C)
- end.
-
-% get_best_instr([Instr|Instrs], Prio, Nodes) ->
-% get_best_instr(Instrs, [], Instr, Prio, Nodes).
-
-% get_best_instr([], Rest, BestSoFar, Prio, Nodes) -> {BestSoFar, Rest};
-% get_best_instr([Instr|Instrs], PassedInstrs, BestSoFar, Prio, Nodes) ->
-% case better(Instr, BestSoFar, Prio, Nodes) of
-% true ->
-% get_best_instr(Instrs, [BestSoFar|PassedInstrs],
-% Instr, Prio, Nodes);
-% false ->
-% get_best_instr(Instrs, [Instr|PassedInstrs],BestSoFar, Prio, Nodes)
-% end.
-
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-%% Function : better
-%% Argument : Instr1 - Id of instr 1
-%% Instr2 - Id of instr 2
-%% Prio - array with cpath-priorities for the nodes
-%% Nodes - indexed list [{Id, Instr}]
-%% Returns : true if Instr1 has higher priority than Instr2
-%% Description : Checks if Instr1 is a better choice than Instr2 for scheduling
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-
-better(Instr1, Instr2, Prio, Nodes, DAG, Preds, Earl, C) ->
- better_hlp(priority(Instr1, Prio, Nodes, DAG, Preds, Earl, C),
- priority(Instr2, Prio, Nodes, DAG, Preds, Earl, C)).
-
-better_hlp([], []) -> false;
-better_hlp([], [_|_]) -> false;
-better_hlp([_|_], []) -> true;
-better_hlp([X|Xs], [Y|Ys]) -> (X > Y) or ((X =:= Y) and better_hlp(Xs,Ys)).
-
-%%
-%% Returns the instr corresponding to id
-%%
-get_instr(InstrId, [{InstrId,Instr}|_]) -> Instr;
-get_instr(InstrId, [_|Xs]) -> get_instr(InstrId, Xs).
-
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-%% Function : priority
-%% Argument : InstrId - Id
-%% Prio - array with cpath-priorities for the nodes
-%% Nodes - indexed list [{Id, Instr}]
-%% Returns : PrioList - list of priorities [MostSignificant, LessSign, ...]
-%% Description : Returns a list of priorities where the first element is the
-%% cpath-priority and the rest are added depending on what kind
-%% of instr it is. Used to order loads/stores sequentially and
-%% there is possibility to add whatever stuff...
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-
-priority(InstrId, Prio, Nodes, DAG, Preds, Earl, C) ->
- {ReadyNodes,_,_,_} = hipe_schedule:delete_node(C,InstrId,DAG,Preds,Earl),
- Instr = get_instr(InstrId, Nodes),
- Prio1 = hipe_vectors:get(Prio, InstrId-1),
- Prio2 = length(ReadyNodes),
- PrioRest =
- case Instr of
- #load_atom{} ->
- [3];
- #move{} ->
- [3];
- #load{} ->
- Src = hipe_sparc:load_src(Instr),
- Off = hipe_sparc:load_off(Instr),
- case hipe_sparc:is_reg(Off) of
- false -> [3,
- -(hipe_sparc:reg_nr(Src)),
- -(hipe_sparc:imm_value(Off))];
- true -> [1]
- end;
- #store{} ->
- Src = hipe_sparc:store_dest(Instr),
- Off = hipe_sparc:store_off(Instr),
- case hipe_sparc:is_reg(Off) of
- false -> [2,
- -(hipe_sparc:reg_nr(Src)),
- -(hipe_sparc:imm_value(Off))];
- true -> [1]
- end;
- _ -> [0]
- end,
- [Prio1,Prio2|PrioRest].
-
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-%% Function : add_ready_nodes
-%% Argument : Nodes - list of [{Cycle,Id}]
-%% Ready - array of ready nodes for all cycles
-%% Returns : NewReady - updated ready-array
-%% Description : Gets a list of instrs and adds them to the ready-array
-%% to the corresponding cycle.
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-
-add_ready_nodes([], Ready) -> Ready;
-add_ready_nodes([{C,I}|Xs], Ready) ->
- add_ready_nodes(Xs, insert_node(C, I, Ready)).
-
-insert_node(C, I, Ready) ->
- Old = hipe_vectors:get(Ready, C-1),
- hipe_vectors:set(Ready, C-1, [I|Old]).
-
-%%
-%% Computes the latency for the "most expensive" way through the graph
-%% for all nodes. Returns an array of priorities for all nodes.
-%%
-critical_path(N, DAG) ->
- critical_path(1, N, DAG, hipe_vectors:new(N, -1)).
-
-critical_path(M, N, DAG, Prio) ->
- if
- M > N ->
- Prio;
- true ->
- critical_path(M+1, N, DAG, cpath(M, DAG, Prio))
- end.
-
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-%% Function : cpath
-%% Argument : M - current node id
-%% DAG - the dependence graph
-%% Prio - array of priorities for all nodes
-%% Returns : Prio - updated prio array
-%% Description : If node has prio -1, it has not been visited
-%% - otherwise, compute priority as max of priorities of
-%% successors (+ latency)
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-
-cpath(M, DAG, Prio) ->
- InitPrio = hipe_vectors:get(Prio, M-1),
- if
- InitPrio =:= -1 ->
- cpath_node(M, DAG, Prio);
- true ->
- Prio
- end.
-
-cpath_node(N, DAG, Prio) ->
- SuccL = dag_succ(DAG, N),
- {Max, NewPrio} = cpath_succ(SuccL, DAG, Prio),
- hipe_vectors:set(NewPrio, N-1, Max).
-
-cpath_succ(SuccL, DAG, Prio) ->
- cpath_succ(SuccL, DAG, Prio, 0).
-
-%% performs an unnecessary lookup of priority of Succ, but that might
-%% not be such a big deal
-
-cpath_succ([], _DAG, Prio, NodePrio) -> {NodePrio,Prio};
-cpath_succ([{Lat,Succ}|Xs], DAG, Prio, NodePrio) ->
- NewPrio = cpath(Succ, DAG, Prio),
- NewNodePrio = erlang:max(hipe_vectors:get(NewPrio, Succ - 1) + Lat, NodePrio),
- cpath_succ(Xs, DAG, NewPrio, NewNodePrio).
-
-dag_succ(DAG, N) when is_integer(N) ->
- hipe_vectors:get(DAG, N-1).
-
diff --git a/lib/hipe/test/Makefile b/lib/hipe/test/Makefile
index 544888719f..efeb0887ab 100644
--- a/lib/hipe/test/Makefile
+++ b/lib/hipe/test/Makefile
@@ -7,7 +7,8 @@ include $(ERL_TOP)/make/$(TARGET)/otp.mk
MODULES= \
hipe_SUITE \
- opt_verify_SUITE
+ opt_verify_SUITE \
+ erl_types_SUITE
# .erl files for these modules are automatically generated
GEN_MODULES= \
diff --git a/lib/hipe/test/erl_types_SUITE.erl b/lib/hipe/test/erl_types_SUITE.erl
new file mode 100644
index 0000000000..7d7c144b69
--- /dev/null
+++ b/lib/hipe/test/erl_types_SUITE.erl
@@ -0,0 +1,197 @@
+%% -*- erlang-indent-level: 4 -*-
+%%
+%% Licensed under the Apache License, Version 2.0 (the "License");
+%% you may not use this file except in compliance with the License.
+%% You may obtain a copy of the License at
+%%
+%% http://www.apache.org/licenses/LICENSE-2.0
+%%
+%% Unless required by applicable law or agreed to in writing, software
+%% distributed under the License is distributed on an "AS IS" BASIS,
+%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+%% See the License for the specific language governing permissions and
+%% limitations under the License.
+%%
+-module(erl_types_SUITE).
+
+-export([all/0,
+ consistency_and_to_string/1]).
+
+%% Simplify calls into erl_types and avoid importing the entire module.
+-define(M, erl_types).
+
+-include_lib("common_test/include/ct.hrl").
+
+all() ->
+ [consistency_and_to_string].
+
+consistency_and_to_string(_Config) ->
+ %% Check consistency of types
+ Atom1 = ?M:t_atom(),
+ Atom2 = ?M:t_atom(foo),
+ Atom3 = ?M:t_atom(bar),
+ true = ?M:t_is_atom(Atom2),
+
+ True = ?M:t_atom(true),
+ False = ?M:t_atom(false),
+ Bool = ?M:t_boolean(),
+ true = ?M:t_is_boolean(True),
+ true = ?M:t_is_boolean(Bool),
+ false = ?M:t_is_boolean(Atom1),
+
+ Binary = ?M:t_binary(),
+ true = ?M:t_is_binary(Binary),
+
+ Bitstr = ?M:t_bitstr(),
+ true = ?M:t_is_bitstr(Bitstr),
+
+ Bitstr1 = ?M:t_bitstr(7, 3),
+ true = ?M:t_is_bitstr(Bitstr1),
+ false = ?M:t_is_binary(Bitstr1),
+
+ Bitstr2 = ?M:t_bitstr(16, 8),
+ true = ?M:t_is_bitstr(Bitstr2),
+ true = ?M:t_is_binary(Bitstr2),
+
+ BitStr816 = ?M:t_bitstr(8,16),
+ BitStr816 = ?M:t_subtract(?M:t_bitstr(4, 12), ?M:t_bitstr(8, 12)),
+
+ Int1 = ?M:t_integer(),
+ Int2 = ?M:t_integer(1),
+ Int3 = ?M:t_integer(16#ffffffff),
+ true = ?M:t_is_integer(Int2),
+ true = ?M:t_is_byte(Int2),
+ false = ?M:t_is_byte(Int3),
+ false = ?M:t_is_byte(?M:t_from_range(-1, 1)),
+ true = ?M:t_is_byte(?M:t_from_range(1, 255)),
+
+ Tuple1 = ?M:t_tuple(),
+ Tuple2 = ?M:t_tuple(3),
+ Tuple3 = ?M:t_tuple([Atom1, Int1]),
+ Tuple4 = ?M:t_tuple([Tuple1, Tuple2]),
+ Tuple5 = ?M:t_tuple([Tuple3, Tuple4]),
+ Tuple6 = ?M:t_limit(Tuple5, 2),
+ Tuple7 = ?M:t_limit(Tuple5, 3),
+ true = ?M:t_is_tuple(Tuple1),
+
+ Port = ?M:t_port(),
+ Pid = ?M:t_pid(),
+ Ref = ?M:t_reference(),
+ Identifier = ?M:t_identifier(),
+ false = ?M:t_is_reference(Port),
+ true = ?M:t_is_identifier(Port),
+
+ Function1 = ?M:t_fun(),
+ Function2 = ?M:t_fun(Pid),
+ Function3 = ?M:t_fun([], Pid),
+ Function4 = ?M:t_fun([Port, Pid], Pid),
+ Function5 = ?M:t_fun([Pid, Atom1], Int2),
+ true = ?M:t_is_fun(Function3),
+
+ List1 = ?M:t_list(),
+ List2 = ?M:t_list(?M:t_boolean()),
+ List3 = ?M:t_cons(?M:t_boolean(), List2),
+ List4 = ?M:t_cons(?M:t_boolean(), ?M:t_atom()),
+ List5 = ?M:t_cons(?M:t_boolean(), ?M:t_nil()),
+ List6 = ?M:t_cons_tl(List5),
+ List7 = ?M:t_sup(List4, List5),
+ List8 = ?M:t_inf(List7, ?M:t_list()),
+ List9 = ?M:t_cons(),
+ List10 = ?M:t_cons_tl(List9),
+ true = ?M:t_is_boolean(?M:t_cons_hd(List5)),
+ true = ?M:t_is_list(List5),
+ false = ?M:t_is_list(List4),
+
+ Product1 = ?M:t_product([Atom1, Atom2]),
+ Product2 = ?M:t_product([Atom3, Atom1]),
+ Product3 = ?M:t_product([Atom3, Atom2]),
+
+ Union1 = ?M:t_sup(Atom2, Atom3),
+ Union2 = ?M:t_sup(Tuple2, Tuple3),
+ Union3 = ?M:t_sup(Int2, Atom3),
+ Union4 = ?M:t_sup(Port, Pid),
+ Union5 = ?M:t_sup(Union4, Int1),
+ Union6 = ?M:t_sup(Function1, Function2),
+ Union7 = ?M:t_sup(Function4, Function5),
+ Union8 = ?M:t_sup(True, False),
+ true = ?M:t_is_boolean(Union8),
+ Union9 = ?M:t_sup(Int2, ?M:t_integer(2)),
+ true = ?M:t_is_byte(Union9),
+ Union10 = ?M:t_sup(?M:t_tuple([?M:t_atom(true), ?M:t_any()]),
+ ?M:t_tuple([?M:t_atom(false), ?M:t_any()])),
+
+ Any = ?M:t_any(),
+ Any = ?M:t_sup(Product3, Function5),
+
+ Atom3 = ?M:t_inf(Union3, Atom1),
+ Union2 = ?M:t_inf(Union2, Tuple1),
+ Int2 = ?M:t_inf(Int1, Union3),
+ Union4 = ?M:t_inf(Union4, Identifier),
+ Port = ?M:t_inf(Union5, Port),
+ Function4 = ?M:t_inf(Union7, Function4),
+ None = ?M:t_none(),
+ None = ?M:t_inf(Product2, Atom1),
+ Product3 = ?M:t_inf(Product1, Product2),
+ Function5 = ?M:t_inf(Union7, Function5),
+ true = ?M:t_is_byte(?M:t_inf(Union9, ?M:t_number())),
+ true = ?M:t_is_char(?M:t_inf(Union9, ?M:t_number())),
+
+ RecDict = #{{record, foo} => {{?FILE, ?LINE}, [{2, [{bar, [], ?M:t_any()},
+ {baz, [], ?M:t_any()}]}]}},
+ Record1 = ?M:t_from_term({foo, [1,2], {1,2,3}}),
+
+ %% Check string representations
+ "atom()" = ?M:t_to_string(Atom1),
+ "'foo'" = ?M:t_to_string(Atom2),
+ "'bar'" = ?M:t_to_string(Atom3),
+
+ "binary()" = ?M:t_to_string(Binary),
+
+ "integer()" = ?M:t_to_string(Int1),
+ "1" = ?M:t_to_string(Int2),
+
+ "tuple()" = ?M:t_to_string(Tuple1),
+ "{_,_,_}" = ?M:t_to_string(Tuple2),
+ "{atom(),integer()}" = ?M:t_to_string(Tuple3),
+ "{tuple(),{_,_,_}}" = ?M:t_to_string(Tuple4),
+ "{{atom(),integer()},{tuple(),{_,_,_}}}" = ?M:t_to_string(Tuple5),
+ "{{_,_},{_,_}}" = ?M:t_to_string(Tuple6),
+ "{{atom(),integer()},{tuple(),{_,_,_}}}" = ?M:t_to_string(Tuple7),
+
+ "reference()" = ?M:t_to_string(Ref),
+ "port()" = ?M:t_to_string(Port),
+ "pid()" = ?M:t_to_string(Pid),
+ "identifier()" = ?M:t_to_string(Identifier),
+
+ "[any()]" = ?M:t_to_string(List1),
+ "[boolean()]" = ?M:t_to_string(List2),
+ "[boolean(),...]" = ?M:t_to_string(List3),
+ "nonempty_improper_list(boolean(),atom())" = ?M:t_to_string(List4),
+ "[boolean(),...]" = ?M:t_to_string(List5),
+ "[boolean()]" = ?M:t_to_string(List6),
+ "nonempty_maybe_improper_list(boolean(),atom() | [])" = ?M:t_to_string(List7),
+ "[boolean(),...]" = ?M:t_to_string(List8),
+ "nonempty_maybe_improper_list()" = ?M:t_to_string(List9),
+ "any()" = ?M:t_to_string(List10),
+
+ "fun()" = ?M:t_to_string(Function1),
+ "fun((...) -> pid())" = ?M:t_to_string(Function2),
+ "fun(() -> pid())" = ?M:t_to_string(Function3),
+ "fun((port(),pid()) -> pid())" = ?M:t_to_string(Function4),
+ "fun((pid(),atom()) -> 1)" = ?M:t_to_string(Function5),
+
+ "<atom(),'foo'>" = ?M:t_to_string(Product1),
+ "<'bar',atom()>" = ?M:t_to_string(Product2),
+
+ "#foo{bar::[1 | 2,...],baz::{1,2,3}}" = ?M:t_to_string(Record1, RecDict),
+
+ "'bar' | 'foo'" = ?M:t_to_string(Union1),
+ "{atom(),integer()} | {_,_,_}" = ?M:t_to_string(Union2),
+ "'bar' | 1" = ?M:t_to_string(Union3),
+ "pid() | port()" = ?M:t_to_string(Union4),
+ "pid() | port() | integer()" = ?M:t_to_string(Union5),
+ "fun()" = ?M:t_to_string(Union6),
+ "fun((pid() | port(),atom() | pid()) -> pid() | 1)" = ?M:t_to_string(Union7),
+ "boolean()" = ?M:t_to_string(Union8),
+ "{'false',_} | {'true',_}" = ?M:t_to_string(Union10),
+ "{'true',integer()}" = ?M:t_to_string(?M:t_inf(Union10, ?M:t_tuple([?M:t_atom(true), ?M:t_integer()]))).
diff --git a/lib/inets/src/http_server/mod_esi.erl b/lib/inets/src/http_server/mod_esi.erl
index 3206d957d9..b49b3a7093 100644
--- a/lib/inets/src/http_server/mod_esi.erl
+++ b/lib/inets/src/http_server/mod_esi.erl
@@ -561,7 +561,7 @@ eval(#mod{method = Method} = ModData, ESIBody, Modules)
end.
generate_webpage(ESIBody) ->
- (catch lib:eval_str(string:concat(ESIBody,". "))).
+ (catch eval_str(string:concat(ESIBody,". "))).
is_authorized(_ESIBody, [all]) ->
true;
@@ -573,3 +573,45 @@ is_authorized(ESIBody, Modules) ->
nomatch ->
false
end.
+
+%% eval_str(InStr) -> {ok, OutStr} | {error, ErrStr'}
+%% InStr must represent a body
+%% Note: If InStr is a binary it has to be a Latin-1 string.
+%% If you have a UTF-8 encoded binary you have to call
+%% unicode:characters_to_list/1 before the call to eval_str().
+
+-define(result(F,D), lists:flatten(io_lib:format(F, D))).
+
+-spec eval_str(string()) ->
+ {'ok', string()} | {'error', string()}.
+
+eval_str(Str) when is_list(Str) ->
+ case erl_scan:tokens([], Str, 0) of
+ {more, _} ->
+ {error, "Incomplete form (missing .<cr>)??"};
+ {done, {ok, Toks, _}, Rest} ->
+ case all_white(Rest) of
+ true ->
+ case erl_parse:parse_exprs(Toks) of
+ {ok, Exprs} ->
+ case catch erl_eval:exprs(Exprs, erl_eval:new_bindings()) of
+ {value, Val, _} ->
+ {ok, Val};
+ Other ->
+ {error, ?result("*** eval: ~p", [Other])}
+ end;
+ {error, {_Line, Mod, Args}} ->
+ Msg = ?result("*** ~ts",[Mod:format_error(Args)]),
+ {error, Msg}
+ end;
+ false ->
+ {error, ?result("Non-white space found after "
+ "end-of-form :~ts", [Rest])}
+ end
+ end.
+
+all_white([$\s|T]) -> all_white(T);
+all_white([$\n|T]) -> all_white(T);
+all_white([$\t|T]) -> all_white(T);
+all_white([]) -> true;
+all_white(_) -> false.
diff --git a/lib/kernel/doc/src/Makefile b/lib/kernel/doc/src/Makefile
index 82869d7b15..29dc73a523 100644
--- a/lib/kernel/doc/src/Makefile
+++ b/lib/kernel/doc/src/Makefile
@@ -42,6 +42,7 @@ XML_REF3_FILES = application.xml \
disk_log.xml \
erl_boot_server.xml \
erl_ddll.xml \
+ erl_epmd.xml \
erl_prim_loader_stub.xml \
erlang_stub.xml \
error_handler.xml \
diff --git a/lib/kernel/doc/src/erl_epmd.xml b/lib/kernel/doc/src/erl_epmd.xml
new file mode 100644
index 0000000000..8b076cd2d7
--- /dev/null
+++ b/lib/kernel/doc/src/erl_epmd.xml
@@ -0,0 +1,104 @@
+<?xml version="1.0" encoding="utf-8" ?>
+<!DOCTYPE erlref SYSTEM "erlref.dtd">
+
+<erlref>
+ <header>
+ <copyright>
+ <year>2018</year><year>2018</year>
+ <holder>Ericsson AB. All Rights Reserved.</holder>
+ </copyright>
+ <legalnotice>
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+
+ </legalnotice>
+
+ <title>erl_epmd</title>
+ <prepared>Timmo Verlaan</prepared>
+ <docno>1</docno>
+ <date>2018-02-19</date>
+ <rev>A</rev>
+ </header>
+ <module>erl_epmd</module>
+ <modulesummary>
+ Erlang interface towards epmd
+ </modulesummary>
+ <description>
+ <p>This module communicates with the EPMD daemon, see <seealso
+ marker="erts:epmd">epmd</seealso>. To implement your own epmd module please
+ see <seealso marker="erts:alt_disco">ERTS User's Guide: How to Implement an
+ Alternative Service Discovery for Erlang Distribution</seealso></p>
+ </description>
+
+ <funcs>
+ <func>
+ <name name="start_link" arity="0"/>
+ <fsummary>Callback for erl_distribution supervisor.</fsummary>
+ <desc>
+ <p>This function is invoked as this module is added as a child of the
+ <c>erl_distribution</c> supervisor.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name name="register_node" arity="2"/>
+ <name name="register_node" arity="3"/>
+ <fsummary>Registers the node with <c>epmd</c>.</fsummary>
+ <desc>
+ <p>Registers the node with <c>epmd</c> and tells epmd what port will be
+ used for the current node. It returns a creation number. This number is
+ incremented on each register to help with identifying if a node is
+ reconnecting to epmd.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name name="port_please" arity="2"/>
+ <name name="port_please" arity="3"/>
+ <fsummary>Returns the port number for a given node.</fsummary>
+ <desc>
+ <p>Requests the distribution port for the given node of an EPMD
+ instance. Together with the port it returns a distribution protocol
+ version which has been 5 since Erlang/OTP R6.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name name="address_please" arity="3"/>
+ <fsummary>Returns address and port.</fsummary>
+ <desc>
+ <p>Called by the distribution module. Resolves the <c>Host</c> to an IP
+ address.</p>
+ <p>Another epmd module may return port and distribution protocol version
+ as well.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name name="names" arity="1"/>
+ <fsummary>Names of Erlang nodes at a host.</fsummary>
+ <desc>
+ <p>Called by <seealso marker="net_adm"><c>net_adm:names/0</c></seealso>.
+ <c>Host</c> defaults to the localhost. Returns the names and associated
+ port numbers of the Erlang nodes that <c>epmd</c> registered at the
+ specified host. Returns <c>{error, address}</c> if <c>epmd</c> is not
+ operational.</p>
+ <p><em>Example:</em></p>
+ <pre>
+(arne@dunn)1> <input>erl_epmd:names(localhost).</input>
+{ok,[{"arne",40262}]}</pre>
+ </desc>
+ </func>
+ </funcs>
+
+</erlref>
+
diff --git a/lib/kernel/doc/src/kernel_app.xml b/lib/kernel/doc/src/kernel_app.xml
index 554d675383..7894600c21 100644
--- a/lib/kernel/doc/src/kernel_app.xml
+++ b/lib/kernel/doc/src/kernel_app.xml
@@ -122,21 +122,6 @@
application. For more information about configuration parameters,
see file <seealso marker="app"><c>app(4)</c></seealso>.</p>
<taglist>
- <tag><c>browser_cmd = string() | {M,F,A}</c></tag>
- <item>
- <p>When pressing the <em>Help</em> button in a tool such as Debugger,
- the help text (an HTML file <c>File</c>) is by default
- displayed in a Netscape browser, which is required to be
- operational. This parameter can be used to change the command for
- how to display the help text if another browser than Netscape
- is preferred, or if another platform than Unix or Windows is
- used.</p>
- <p>If set to a string <c>Command</c>, the command
- <c>"Command File"</c> is evaluated using
- <seealso marker="os#cmd/1"><c>os:cmd/1</c></seealso>.</p>
- <p>If set to a module-function-args tuple, <c>{M,F,A}</c>,
- the call <c>apply(M,F,[File|A])</c> is evaluated.</p>
- </item>
<tag><c>distributed = [Distrib]</c></tag>
<item>
<p>Specifies which applications that are distributed and on which
@@ -226,7 +211,7 @@
<p>This configuration parameter is used both for the global
logger level, and for the standard handler started by
the Kernel application (see <c>logger_dest</c> variable above).</p>
- <p>The default value is <c>info</c></p>
+ <p>The default value is <c>info</c>.</p>
</item>
<tag><marker id="disk_log_vars"/>
<c>logger_disk_log_type = halt | wrap</c></tag>
@@ -251,14 +236,14 @@ logger_disk_log_maxbytes = 1048576</code>
<item>
<p>If this parameter is set to true, then the logger handler
started by kernel will not log any progress-, crash-, or
- supervisor reports. If the SASL application is starated,
+ supervisor reports. If the SASL application is started,
these log events will be sent to a second handler instance
- named sasl_h, according to values of the SASL environment
- variables <c>sasl_error_logger</c>
+ named <c>sasl_h</c>, according to values of the SASL
+ environment variables <c>sasl_error_logger</c>
and <c>sasl_errlog_type</c>, see
<seealso marker="sasl:sasl_app#configuration">SASL(6)
</seealso></p>
- <p>The default value is <c>false</c></p>
+ <p>The default value is <c>false</c>.</p>
<p>See chapter <seealso marker="logger_chapter#compatibility">Backwards
compatibility with error_logger</seealso> for more
information about handling of the so called SASL reports.</p>
@@ -271,7 +256,7 @@ logger_disk_log_maxbytes = 1048576</code>
reports from <c>supervisor</c>
and <c>application_controller</c> shall be logged or
not.</p>
- <p>If <c>logger_sasl_compatible = false</c>,
+ <p>If <c>logger_sasl_compatible = true</c>,
then <c>logger_log_progress</c> is ignored.</p>
</item>
<tag><marker id="logger_format_depth"/>
@@ -280,14 +265,6 @@ logger_disk_log_maxbytes = 1048576</code>
<p>Can be used to limit the size of the
formatted output from the logger handlers.</p>
- <note><p>This configuration parameter was introduced in OTP 18.1
- and is experimental. Based on user feedback, it
- can be changed or improved in future releases, for example,
- to gain better control over how to limit the size of the
- formatted output. We have no plans to remove this
- new feature entirely, unless it turns out to be
- useless.</p></note>
-
<p><c>Depth</c> is a positive integer representing the maximum
depth to which terms are printed by the logger
handlers included in OTP. This
@@ -312,11 +289,11 @@ logger_disk_log_maxbytes = 1048576</code>
</item>
<tag><c>logger_max_size = integer() | unlimited</c></tag>
<item>
- <p>This parameter specifies the maximum size (bytes) each
- log event can have when printed by the standard logger
- handler. If the resulting string after formatting an event
- is bigger than this, it will be truncated before printed
- to the handler's destination.</p>
+ <p>This parameter specifies a hard maximum size limit (number
+ of characters) each log event can have when printed by the
+ default logger formatter. If the resulting string after
+ formatting an event is bigger than this, it will be
+ truncated before printed to the handler's destination.</p>
</item>
<tag><c>logger_utc = boolean()</c></tag>
<item>
diff --git a/lib/kernel/doc/src/logger.xml b/lib/kernel/doc/src/logger.xml
index 66e6e5c689..d901454e62 100644
--- a/lib/kernel/doc/src/logger.xml
+++ b/lib/kernel/doc/src/logger.xml
@@ -67,37 +67,86 @@
<datatype>
<name name="metadata"/>
<desc>
- <p>Metadata associated with the message to be logged.</p>
+ <p>Metadata for the log event.</p>
+ <p>Logger adds the following metadata to each log event:</p>
+ <list>
+ <item><c>pid => self()</c></item>
+ <item><c>gl => group_leader()</c></item>
+ <item><c>time => erlang:monotonic_time(microsecond)</c></item>
+ </list>
+ <p>When a log macro is used, Logger also inserts location
+ information:</p>
+ <list>
+ <item><c>mfa => {?MODULE,?FUNCTION_NAME,?FUNCTION_ARITY}</c></item>
+ <item><c>file => ?FILE</c></item>
+ <item><c>line => ?LINE</c></item>
+ </list>
+ <p>You can add custom metadata, either by specifying a map as
+ the last parameter to any of the log macros or the API
+ functions, or by setting process metadata
+ with <seealso marker="#set_process_metadata-1">
+ <c>set_process_metadata/1</c></seealso>
+ or <seealso marker="#update_process_metadata-1">
+ <c>update_process_metadata/1</c></seealso>.</p>
+ <p>Logger merges all the metadata maps before forwarding the
+ log event to the handlers. If the same keys occur, values
+ from the log call overwrites process metadata, which in turn
+ overwrites values set by Logger.</p>
</desc>
</datatype>
<datatype>
<name name="config"/>
<desc>
- <p></p>
+ <p>Configuration data for the logger part of Logger, or for a handler.</p>
+ <p>The following default values apply:</p>
+ <list>
+ <item><c>level => info</c></item>
+ <item><c>filter_default => log</c></item>
+ <item><c>filters => []</c></item>
+ <item><c>formatter => {logger_formatter,DefaultFormatterConfig</c>}</item>
+ </list>
+ <p>See the <seealso marker="logger_formatter#configuration">
+ <c>logger_formatter(3)</c></seealso> manual page for
+ information about the default configuration for this
+ formatter.</p>
</desc>
</datatype>
<datatype>
<name name="handler_id"/>
<desc>
- <p></p>
+ <p>A unique identifier for a handler instance.</p>
</desc>
</datatype>
<datatype>
<name name="filter_id"/>
<desc>
- <p></p>
+ <p>A unique identifier for a filter.</p>
</desc>
</datatype>
<datatype>
<name name="filter"/>
<desc>
- <p></p>
+ <p>A filter which can be installed for logger or for a handler.</p>
+ </desc>
+ </datatype>
+ <datatype>
+ <name name="filter_arg"/>
+ <desc>
+ <p>The second argument to the filter fun.</p>
</desc>
</datatype>
<datatype>
<name name="filter_return"/>
<desc>
- <p></p>
+ <p>The return value from the filter fun.</p>
+ </desc>
+ </datatype>
+ <datatype>
+ <name name="timestamp"/>
+ <desc>
+ <p>A timestamp produced
+ with <seealso marker="erts:erlang#monotonic_time-1">
+ <c>erlang:monotonic_time(microsecond)</c></seealso>.</p>
</desc>
</datatype>
</datatypes>
@@ -126,14 +175,10 @@
</list>
<p>All macros expand to a call to logger, where <c>Level</c> is
- taken from the macro name, and the following metadata is added,
- or merged with the given <c>Metadata</c>:</p>
-
- <code>
-#{mfa=>{?MODULE,?FUNCTION_NAME,?FUNCTION_ARITY},
- file=>?FILE,
- line=>?LINE}
- </code>
+ taken from the macro name, and location data is added. See the
+ description of
+ the <seealso marker="#type-metadata"><c>metadata()</c></seealso>
+ type for more information about the location data.</p>
<p>The call is wrapped in a case statement and will be evaluated
only if <c>Level</c> is equal to or below the configured log
@@ -267,7 +312,7 @@
<func>
<name name="i" arity="0"/>
- <fsummary>Get information about all logger configurations</fsummary>
+ <fsummary>Get all logger configurations</fsummary>
<desc>
<p>Same as <seealso marker="#i/1"><c>logger:i(term)</c></seealso></p>
</desc>
@@ -277,27 +322,30 @@
<name name="i" arity="1" clause_i="1"/>
<name name="i" arity="1" clause_i="2"/>
<name name="i" arity="1" clause_i="3"/>
- <fsummary>Get information about all logger configurations</fsummary>
+ <fsummary>Get all logger configurations</fsummary>
<desc>
- <p>The <c>logger:i/1</c> function can be used to get all
- current logger configuration. The way that the information
- is returned depends on the <c><anno>Action</anno></c></p>
+ <p>Display or return all current logger configuration.</p>
<taglist>
- <tag>string</tag>
- <item>Return the pretty printed current logger configuration
- as iodata.</item>
- <tag>term</tag>
- <item>Return the current logger configuration as a term. The
- format of this term may change inbetween releases. For a
- stable format use <seealso marker="#get_handler_config/1">
+ <tag><c><anno>Action</anno> = string</c></tag>
+ <item>
+ <p>Return the pretty printed current logger configuration
+ as iodata.</p>
+ </item>
+ <tag><c><anno>Action</anno> = term</c></tag>
+ <item>
+ <p>Return the current logger configuration as a term. The
+ format of this term may change inbetween releases. For a
+ stable format use <seealso marker="#get_handler_config/1">
<c>logger:get_handler_config/1</c></seealso>
- and <seealso marker="#get_logger_config/0">
+ and <seealso marker="#get_logger_config/0">
<c>logger:get_logger_config/0</c></seealso>.
- The same as calling <c>logger:i()</c>.</item>
- <tag>print</tag>
- <item>Pretty print all the current logger configuration to
- standard out. Example:
- <code><![CDATA[1> logger:i().
+ The same as calling <c>logger:i()</c>.</p>
+ </item>
+ <tag><c><anno>Action</anno> = print</c></tag>
+ <item>
+ <p>Pretty print all the current logger configuration to
+ standard out. Example:</p>
+ <code><![CDATA[1> logger:i(print).
Current logger configuration:
Level: info
FilterDefault: log
@@ -339,6 +387,39 @@ Current logger configuration:
<fsummary>Add a filter to the logger.</fsummary>
<desc>
<p>Add a filter to the logger.</p>
+ <p>The filter fun is called with the log event as the first
+ parameter, and the specified <c>filter_args()</c> as the
+ second parameter.</p>
+ <p>The return value of the fun specifies if a log event is to
+ be discarded or forwarded to the handlers:</p>
+ <taglist>
+ <tag><c>log()</c></tag>
+ <item>
+ <p>The filter <em>passed</em>. The next logger filter, if
+ any, is applied. If no more logger filters exist, the
+ log event is forwarded to the handler part of the
+ logger, where handler filters are applied.</p>
+ </item>
+ <tag><c>stop</c></tag>
+ <item>
+ <p>The filter <em>did not pass</em>, and the log event is
+ immediately discarded.</p>
+ </item>
+ <tag><c>ignore</c></tag>
+ <item>
+ <p>The filter has no knowledge of the log event. The next
+ logger filter, if any, is applied. If no more logger
+ filters exist, the value of the <c>filter_default</c>
+ configuration parameter for the logger specifies if the
+ log event shall be discarded or forwarded to the handler
+ part.</p>
+ </item>
+ </taglist>
+ <p>See section <seealso marker="logger_chapter#Filter">
+ Filter</seealso> in the User's Guide for more information
+ about filters.</p>
+ <p>Some built-in filters exist. These are defined
+ in <seealso marker="logger_filters"><c>logger_filters</c></seealso>.</p>
</desc>
</func>
@@ -347,6 +428,39 @@ Current logger configuration:
<fsummary>Add a filter to the specified handler.</fsummary>
<desc>
<p>Add a filter to the specified handler.</p>
+ <p>The filter fun is called with the log event as the first
+ parameter, and the specified <c>filter_args()</c> as the
+ second parameter.</p>
+ <p>The return value of the fun specifies if a log event is to
+ be discarded or forwarded to the handler callback:</p>
+ <taglist>
+ <tag><c>log()</c></tag>
+ <item>
+ <p>The filter <em>passed</em>. The next handler filter, if
+ any, is applied. If no more filters exist for this
+ handler, the log event is forwarded to the handler
+ callback.</p>
+ </item>
+ <tag><c>stop</c></tag>
+ <item>
+ <p>The filter <em>did not pass</em>, and the log event is
+ immediately discarded.</p>
+ </item>
+ <tag><c>ignore</c></tag>
+ <item>
+ <p>The filter has no knowledge of the log event. The next
+ handler filter, if any, is applied. If no more filters
+ exist for this handler, the value of
+ the <c>filter_default</c> configuration parameter for
+ the handler specifies if the log event shall be
+ discarded or forwarded to the handler callback.</p>
+ </item>
+ </taglist>
+ <p>See
+ section <seealso marker="logger_chapter#Filter">Filter</seealso>
+ in the User's Guide for more information about filters.</p>
+ <p>Some built-in filters exist. These are defined in
+ <seealso marker="logger_filters"><c>logger_filters</c></seealso>.</p>
</desc>
</func>
@@ -354,7 +468,8 @@ Current logger configuration:
<name name="remove_logger_filter" arity="1"/>
<fsummary>Remove a filter from the logger.</fsummary>
<desc>
- <p>Remove the filter with the specified identity from the logger.</p>
+ <p>Remove the filter identified
+ by <c><anno>FilterId</anno></c> from the logger.</p>
</desc>
</func>
@@ -362,7 +477,9 @@ Current logger configuration:
<name name="remove_handler_filter" arity="2"/>
<fsummary>Remove a filter from the specified handler.</fsummary>
<desc>
- <p>Remove the filter with the specified identity from the given handler.</p>
+ <p>Remove the filter identified
+ by <c><anno>FilterId</anno></c> from the handler identified
+ by <c><anno>HandlerId</anno></c>.</p>
</desc>
</func>
@@ -371,6 +488,9 @@ Current logger configuration:
<fsummary>Add a handler with the given configuration.</fsummary>
<desc>
<p>Add a handler with the given configuration.</p>
+ <p><c><anno>HandlerId</anno></c> is a unique identifier which
+ must be used in all subsequent calls reffering to this
+ handler.</p>
</desc>
</func>
@@ -378,7 +498,7 @@ Current logger configuration:
<name name="remove_handler" arity="1"/>
<fsummary>Remove the handler with the specified identity.</fsummary>
<desc>
- <p>Remove the handler with the specified identity.</p>
+ <p>Remove the handler identified by <c><anno>HandlerId</anno></c>.</p>
</desc>
</func>
@@ -386,10 +506,37 @@ Current logger configuration:
<name name="set_module_level" arity="2"/>
<fsummary>Set the log level for the specified module.</fsummary>
<desc>
- <p>Set the log level for the specified module.</p>
- <p>To change the logging level globally, use
- <seealso marker="#set_logger_config/2"><c>logger:set_logger_config(level, Level)</c></seealso>.
- </p>
+ <p>Set the log level for the
+ specified <c><anno>Module</anno></c>.</p>
+ <p>The log level for a module overrides the global log level
+ of the logger for log event originating from the module in
+ question. Notice, however, that it does not override the
+ level configuration for any handler.</p>
+ <p>For example: Assume that the global log level for the
+ logger is <c>info</c>, and there is one handler, <c>h1</c>,
+ with level <c>info</c> and one handler, <c>h2</c>, with
+ level <c>debug</c>.</p>
+ <p>With this configuration, no debug messages will be logged,
+ since they are all stopped by the global log level.</p>
+ <p>If the level for <c>mymodule</c> is set now set
+ to <c>debug</c>, then debug events from this module will be
+ logged by the handler <c>h2</c>, but not by
+ handler <c>h1</c>.</p>
+ <p>Debug events from other modules are still not logged.</p>
+ <p>To change the global log level for the logger, use
+ <seealso marker="#set_logger_config/2">
+ <c>logger:set_logger_config(level,Level)</c></seealso>.</p>
+ <p>To change the log level for a handler, use
+ <seealso marker="#set_handler_config/3">
+ <c>logger:set_handler_config(HandlerId,level,Level)</c></seealso>.</p>
+ <note>
+ <p>The originating module for a log event is only detected
+ if <c>mfa=>{Module,Function,Arity}</c> exists in the
+ metadata. When log macros are used, this association is
+ automatically added to all log events. If the logger API
+ is called directly, without using a macro, the logging
+ client must explicitly add this information.</p>
+ </note>
</desc>
</func>
@@ -404,21 +551,62 @@ Current logger configuration:
<func>
<name name="set_logger_config" arity="1"/>
+ <fsummary>Set configuration data for the logger.</fsummary>
+ <desc>
+ <p>Set configuration data for the logger. This overwrites the
+ current logger configuration.</p>
+ <p>To modify the existing configuration,
+ use <seealso marker="#set_logger_config-2"><c>set_logger_config/2</c>
+ </seealso>, or read the current configuration
+ with <seealso marker="#get_logger_config-0"><c>get_logger_config/0</c>
+ </seealso>, then merge in your added or updated
+ associations before writing it back.</p>
+ <p>If a key is removed compared to the current configuration,
+ the default value is used.</p>
+ </desc>
+ </func>
+
+ <func>
<name name="set_logger_config" arity="2"/>
<fsummary>Add or update configuration data for the logger.</fsummary>
<desc>
- <p>Add or update configuration data for the logger.</p>
+ <p>Add or update configuration data for the logger. If the
+ given <c><anno>Key</anno></c> already exists, its associated
+ value will be changed to <c><anno>Value</anno></c>. If it
+ doesn't exist, it will be added.</p>
</desc>
</func>
<func>
<name name="set_handler_config" arity="2"/>
+ <fsummary>Set configuration data for the specified handler.</fsummary>
+ <desc>
+ <p>Set configuration data for the specified handler. This
+ overwrites the current handler configuration.</p>
+ <p>To modify the existing configuration,
+ use <seealso marker="#set_handler_config-3"><c>set_handler_config/3</c>
+ </seealso>, or read the current configuration
+ with <seealso marker="#get_handler_config-1"><c>get_handler_config/1</c>
+ </seealso>, then merge in your added or updated
+ associations before writing it back.</p>
+ <p>If a key is removed compared to the current configuration,
+ and the key is know by Logger, the default value is used. If
+ it is a custom key, then it is up to the handler
+ implementation if the value is removed or a default value is
+ inserted.</p>
+ </desc>
+ </func>
+
+ <func>
<name name="set_handler_config" arity="3"/>
<fsummary>Add or update configuration data for the specified
handler.</fsummary>
<desc>
<p>Add or update configuration data for the specified
- handler.</p>
+ handler. If the given <c><anno>Key</anno></c> already
+ exists, its associated value will be changed
+ to <c><anno>Value</anno></c>. If it doesn't exist, it will
+ be added.</p>
</desc>
</func>
@@ -437,17 +625,37 @@ Current logger configuration:
<name name="set_process_metadata" arity="1"/>
<fsummary>Set metadata to use when logging from current process.</fsummary>
<desc>
- <p>Set metadata which <c>logger</c> automatically inserts it
- in all log events produced on the current
- process. Subsequent calls will overwrite previous data set
- by this function.</p>
- <p>When logging, location data produced by the log macros,
- and/or metadata given as argument to the log call (API
- function or macro), will be merged with the process
- metadata. If the same keys occur, values from the metadata
- argument to the log call will overwrite values in the
- process metadata, which in turn will overwrite values from
- the location data.</p>
+ <p>Set metadata which Logger shall automatically insert in
+ all log events produced on the current process.</p>
+ <p>Location data produced by the log macros, and/or metadata
+ given as argument to the log call (API function or macro),
+ are merged with the process metadata. If the same keys
+ occur, values from the metadata argument to the log call
+ overwrite values from the process metadata, which in turn
+ overwrite values from the location data.</p>
+ <p>Subsequent calls to this function overwrites previous data
+ set. To update existing data instead of overwriting it,
+ see <seealso marker="#update_process_metadata-1">
+ <c>update_process_metadata/1</c></seealso>.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name name="update_process_metadata" arity="1"/>
+ <fsummary>Set or update metadata to use when logging from
+ current process.</fsummary>
+ <desc>
+ <p>Set or update metadata to use when logging from current
+ process</p>
+ <p>If process metadata exists for the current process, this
+ function behaves as if it was implemented as follows:</p>
+ <code type="erl">
+logger:set_process_metadata(maps:merge(logger:get_process_metadata(),Meta))
+ </code>
+ <p>If no process metadata exists, the function behaves as
+ <seealso marker="#set_process_metadata-1">
+ <c>set_process_metadata/1</c>
+ </seealso>.</p>
</desc>
</func>
@@ -457,7 +665,9 @@ Current logger configuration:
<desc>
<p>Retrieve data set
with <seealso marker="#set_process_metadata-1">
- <c>set_process_metadata/1</c></seealso>.</p>
+ <c>set_process_metadata/1</c></seealso> or
+ <seealso marker="#update_process_metadata-1">
+ <c>update_process_metadata/1</c></seealso>.</p>
</desc>
</func>
@@ -467,12 +677,103 @@ Current logger configuration:
<desc>
<p>Delete data set
with <seealso marker="#set_process_metadata-1">
- <c>set_process_metadata/1</c></seealso>.</p>
+ <c>set_process_metadata/1</c></seealso> or
+ <seealso marker="#update_process_metadata-1">
+ <c>update_process_metadata/1</c></seealso>.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name name="format_report" arity="1"/>
+ <fsummary>Convert a log message on report form to {Format,Args}.</fsummary>
+ <desc>
+ <p>Convert a log message on report form to <c>{Format,Args}</c>.</p>
+ <p>This is the default report callback used
+ by <seealso marker="logger_formatter">
+ <c>logger_formatter</c></seealso> when no custom report
+ callback is found.</p>
+ <p>The function produces lines of <c>Key: Value</c> from
+ key-value lists. Strings are printed with <c>~ts</c> and
+ other terms with <c>~tp</c>.</p>
+ <p>If the <c><anno>Report</anno></c> is a map, it is
+ converted to a key-value list before formatting as such.</p>
</desc>
</func>
</funcs>
+ <section>
+ <title>Callback Functions</title>
+ <p>The following functions are to be exported from a handler
+ callback module.</p>
+ </section>
+
+ <funcs>
+ <func>
+ <name>Module:adding_handler(HandlerId,Config1) -> {ok,Config2} | {error,Reason}</name>
+ <fsummary>An instance of this handler is about to be added.</fsummary>
+ <type>
+ <v>HandlerId =
+ <seealso marker="#type-handler_id">handler_id()</seealso></v>
+ <v>Config1 = Config2 =
+ <seealso marker="#type-config">config()</seealso></v>
+ <v>Reason = term()</v>
+ </type>
+ <desc>
+ <p>This callback function is optional.</p>
+ <p>The function is called when an new handler is about to be
+ added, and the purpose is to verify the configuration and
+ initiate all resourced needed by the handler.</p>
+ <p>If everything succeeds, the callback function can add
+ possible default values or internal state values to the
+ configuration, and return the adjusted map
+ in <c>{ok,Config2}</c>.</p>
+ <p>If the configuration is faulty, or if the initiation fails,
+ the callback function must return <c>{error,Reason}</c>.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name>Module:removing_handler(HandlerId,Config) -> ok</name>
+ <fsummary>The given handler is about to be removed.</fsummary>
+ <type>
+ <v>HandlerId =
+ <seealso marker="#type-handler_id">handler_id()</seealso></v>
+ <v>Config =
+ <seealso marker="#type-config">config()</seealso></v>
+ </type>
+ <desc>
+ <p>This callback function is optional.</p>
+ <p>The function is called when a handler is about to be
+ removed, and the purpose is to release all resources used by
+ the handler. The return value is ignored by Logger.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name>Module:changing_config(HandlerId,Config1,Config2) -> {ok,Config3} | {error,Reason}</name>
+ <fsummary>The configuration for this handler is about to change.</fsummary>
+ <type>
+ <v>HandlerId =
+ <seealso marker="#type-handler_id">handler_id()</seealso></v>
+ <v>Config1 = Config2 = Config3 =
+ <seealso marker="#type-config">config()</seealso></v>
+ <v>Reason = term()</v>
+ </type>
+ <desc>
+ <p>This callback function is optional.</p>
+ <p>The function is called when the configuration for a handler
+ is about to change, and the purpose is to verify and act on
+ the new configuration.</p>
+ <p><c>Config1</c> is the existing configuration
+ and <c>Config2</c> is the new configuration.</p>
+ <p>If everything succeeds, the callback function must return a
+ possibly adjusted configuration in <c>{ok,Config3}</c>.</p>
+ <p>If the configuration is faulty, the callback function must
+ return <c>{error,Reason}</c>.</p>
+ </desc>
+ </func>
+ </funcs>
</erlref>
diff --git a/lib/kernel/doc/src/logger_chapter.xml b/lib/kernel/doc/src/logger_chapter.xml
index 88dcfbe8d9..519df2ba48 100644
--- a/lib/kernel/doc/src/logger_chapter.xml
+++ b/lib/kernel/doc/src/logger_chapter.xml
@@ -157,7 +157,7 @@
<p>A formatter is defined as a module exporting the following
function:</p>
- <code>format(Log,Extra) -> string()</code>
+ <code>format(Log,Extra) -> unicode:chardata()</code>
<p>The formatter plugin is called by each handler, and the
returned string can be printed to the handler's destination
@@ -322,19 +322,6 @@
return <c>ignore</c>.</p>
<p>Default is <c>log</c>.</p>
</item>
- <tag><c>depth = pos_integer() | unlimited</c></tag>
- <item>
- <p>Specifies if the depth of terms in the log events shall
- be limited by using control characters <c>~P</c>
- and <c>~W</c> instead of <c>~p</c> and <c>~w</c>,
- respectively. See
- <seealso marker="stdlib:io#format-1"><c>io:format</c></seealso>.</p>
- </item>
- <tag><c>max_size = pos_integer() | unlimited</c></tag>
- <item>
- <p>Specifies if the size of a log event shall be limited by
- truncating the formatted string.</p>
- </item>
<tag><c>formatter = {Module::module(),Extra::term()}</c></tag>
<item>
<p>See <seealso marker="#Formatter">Formatter</seealso> for more
@@ -347,10 +334,9 @@
<p>Note that <c>level</c> and <c>filters</c> are obeyed by
Logger itself before forwarding the log events to each
- handler, while <c>depth</c>, <c>max_size</c>
- and <c>formatter</c> are left to the handler
- implementation. All Logger's built-in handlers do apply these
- configuration parameters before printing.</p>
+ handler, while <c>formatter</c> is left to the handler
+ implementation. All Logger's built-in handlers will call the
+ given formatter before printing.</p>
</section>
</section>
@@ -488,8 +474,9 @@ error_logger:add_report_handler/1,2.
level => debug}
2> <input>logger:add_handler(debug_handler,logger_std_h,Config).</input>
ok</pre>
- <p>By default, the handler receives all events, so we need to add a filter
- to stop all non-debug events:</p>
+ <p>By default, the handler receives all events
+ (<c>filter_defalt=log</c>), so we need to add a filter to stop
+ all non-debug events:</p>
<pre>
3> <input>Fun = fun(#{level:=debug}=Log,_) -> Log; (_,_) -> stop end.</input>
#Fun&lt;erl_eval.12.98642416>
@@ -516,7 +503,7 @@ ok</pre>
<p>It may also implement the following callbacks:</p>
<code>
adding_handler(logger:handler_id(),logger:config()) -> {ok,logger:config()} | {error,term()}
-removing_handler(logger:handler_id()) -> ok
+removing_handler(logger:handler_id(),logger:config()) -> ok
changing_config(logger:handler_id(),logger:config(),logger:config()) -> {ok,logger:config()} | {error,term()}
</code>
<p>When logger:add_handler(Id,Module,Config) is called, logger
@@ -526,7 +513,7 @@ changing_config(logger:handler_id(),logger:config(),logger:config()) -> {ok,logg
events as calls to Module:log/2.</p>
<p>A handler can be removed by calling
logger:remove_handler(Id). logger will call
- Module:removing_handler(Id), and then remove the handler's
+ Module:removing_handler(Id,Config), and then remove the handler's
configuration from the configuration database.</p>
<p>When logger:set_handler_config is called, logger calls
Module:changing_config(Id,OldConfig,NewConfig). If this function
@@ -547,7 +534,7 @@ log(Log,#{formatter:={FModule,FConfig}) ->
this:</p>
<code>
-module(myhandler).
--export([adding_handler/2, removing_handler/1, log/2]).
+-export([adding_handler/2, removing_handler/2, log/2]).
-export([init/1, handle_call/3, handle_cast/2, terminate/2]).
adding_handler(Id,Config) ->
@@ -576,7 +563,7 @@ log(Log,#{myhandler_fd:=Fd,formatter:={FModule,FConfig}}) ->
single process.</p>
<code>
-module(myhandler).
--export([adding_handler/2, removing_handler/1, log/2]).
+-export([adding_handler/2, removing_handler/2, log/2]).
-export([init/1, handle_call/3, handle_cast/2, terminate/2]).
adding_handler(Id,Config) ->
diff --git a/lib/kernel/doc/src/logger_filters.xml b/lib/kernel/doc/src/logger_filters.xml
index d742391e35..c34ec7d14c 100644
--- a/lib/kernel/doc/src/logger_filters.xml
+++ b/lib/kernel/doc/src/logger_filters.xml
@@ -33,16 +33,20 @@
<file>logger_filters.xml</file>
</header>
<module>logger_filters</module>
- <modulesummary>Filters to use with logger.</modulesummary>
+ <modulesummary>Filters to use with Logger.</modulesummary>
<description>
- <p>Filters to use with logger. All functions exported from this
- module can be used as logger or handler
+ <p>All functions exported from this module can be used as logger
+ or handler
filters. See <seealso marker="logger#add_logger_filter-2">
<c>logger:add_logger_filter/2</c></seealso>
and <seealso marker="logger#add_handler_filter-3">
- <c>logger:add_handler_filter/3</c></seealso>
- for more information about how filters are added.</p>
+ <c>logger:add_handler_filter/3</c></seealso> for more information
+ about how filters are added.</p>
+ <p>Filters are removed with <seealso marker="logger#remove_logger_filter-1">
+ <c>logger:remove_logger_filter/1</c></seealso>
+ and <seealso marker="logger#remove_handler_filter-2">
+ <c>logger:remove_handler_filter/2</c></seealso>.</p>
</description>
<funcs>
diff --git a/lib/kernel/doc/src/logger_formatter.xml b/lib/kernel/doc/src/logger_formatter.xml
index 6a17e3641f..7df4c88f40 100644
--- a/lib/kernel/doc/src/logger_formatter.xml
+++ b/lib/kernel/doc/src/logger_formatter.xml
@@ -33,12 +33,187 @@
<file>logger_formatter.xml</file>
</header>
<module>logger_formatter</module>
- <modulesummary>Default formatter for the Logger application.</modulesummary>
+ <modulesummary>Default formatter for Logger.</modulesummary>
<description>
- <p>Default formatter for the Logger application.</p>
+ <p>Each log handler has a configured formatter specified as a
+ module and a configuration term. The purpose of the formatter is
+ to translate the log events to a final printable string
+ (<c>unicode:chardata()</c>) which can be written to the output
+ device of the handler.</p>
+ <p><c>logger_formatter</c> is the default formatter used by
+ Logger.</p>
</description>
+ <section>
+ <title>Configuration</title>
+ <p>The configuration term for <c>logger_formatter</c> is a map,
+ and the following keys can be set as configuration
+ parameters:</p>
+ <taglist>
+ <tag><c>chars_limit = pos_integer() | unlimited</c></tag>
+ <item>
+ <p>A positive integer representing the value of the option
+ with the same name to be used when calling
+ <seealso marker="stdlib:io_lib#format-3">io_lib:format/3</seealso>.
+ This value limits the total number of characters printed
+ for each log event. Notice that this is a soft limit. For a
+ hard truncation limit, see option <c>max_size</c>.</p>
+ <p>Default is <c>unlimited</c>.</p>
+ <note>
+ <p><c>chars_limit</c> has no effect on log messages on
+ string form. These are expected to be short, but can still
+ be truncated by the <c>max_size</c> parameter.</p>
+ </note>
+ </item>
+ <tag><c>depth = pos_integer() | unlimited</c></tag>
+ <item>
+ <p>A positive integer representing the maximum depth to
+ which terms shall be printed by this formatter. Format
+ strings passed to this formatter are rewritten. The format
+ controls ~p and ~w are replaced with ~P and ~W,
+ respectively, and the value is used as the depth
+ parameter. For details, see
+ <seealso marker="stdlib:io#format-2">io:format/2,3</seealso>
+ in STDLIB.</p>
+ <p>Default is <c>unlimited</c>.</p>
+ <note>
+ <p><c>depth</c> has no effect on log messages on string
+ form. These are expected to be short, but can still be
+ truncated by the <c>max_size</c> parameter.</p>
+ </note>
+ </item>
+ <tag><c>max_size = pos_integer() | unlimited</c></tag>
+ <item>
+ <p>A positive integer representing the absolute maximum size a
+ string returned from this formatter can have. If the
+ formatted string is longer, after possibly being limited
+ by <c>chars_limit</c> or <c>depth</c>, it is truncated.</p>
+ <p>Default is <c>unlimited</c>.</p>
+ </item>
+ <tag><c>single_line = boolean()</c></tag>
+ <item>
+ <p>If set to <c>true</c>, all newlines in the message are
+ replaced with <c>", "</c>, and whitespaces following
+ directly after newlines are removed. Note that newlines
+ added by the <c>template</c> parameter are not replaced.</p>
+ <p>Default is <c>true</c>.</p>
+ </item>
+ <tag><c>legacy_header = boolen()</c></tag>
+ <item>
+ <p>If set to <c>true</c> a header field is added to
+ logger_formatter's part of <c>Metadata</c>. The value of
+ this field is a string similar to the header created by the
+ old <c>error_logger</c> event handlers. It can be included
+ in the log event by adding the
+ tuple <c>{logger_formatter,header}</c> to the template. See
+ section <seealso marker="#default_templates">Default
+ Templates</seealso> for more information.</p>
+ <p>Default is <c>false</c>.</p>
+ </item>
+ <tag><c>report_cb = fun((</c><seealso marker="logger#type-report"><c>logger:report()</c></seealso><c>) -> {</c><seealso marker="stdlib:io#type-format"><c>io:format()</c></seealso><c>,[term()]})</c></tag>
+ <item>
+ <p>A report callback is used by the formatter to transform log
+ messages on report form to a format string and
+ arguments. The report callback can be specified in the
+ metadata for the log event. If no report callback exist in
+ metadata, <c>logger_formatter</c> will
+ use <seealso marker="logger#format_report-1">
+ <c>logger:format_report/1</c></seealso> as default
+ callback.</p>
+ <p>If this configuration parameter is set, it replaces both
+ the default report callback, and any report callback found
+ in metadata. That is, all reports are converted by this
+ configured function.</p>
+ <p>The value must be a function with arity 1,
+ returning <c>{Format,Args}</c>, and it will be called with a
+ report as only argument.</p>
+ </item>
+ <tag><c>template = </c><seealso marker="#type-template"><c>template()</c></seealso></tag>
+ <item>
+ <p>The template is a list of atoms, tuples and strings. The
+ atoms <c>level</c> or <c>msg</c>, are treated as
+ placeholders for the severity level and the log message,
+ repectively. Other atoms or tuples are interpreted as
+ placeholders for metadata, where atoms are expected to match
+ top level keys, and tuples represent paths to sub keys when
+ the metadata is a nested map. For example the
+ tuple <c>{key1,key2}</c> is replaced by the value of
+ the <c>key2</c> field in the nested map below. The
+ atom <c>key1</c> on its own is replaced by the complete
+ value of the <c>key1</c> field. The values are converted to
+ strings.</p>
+
+<code>
+#{key1=>#{key2=>my_value,
+ ...}
+ ...}</code>
+
+ <p>Strings in the template are printed literally.</p>
+ <p>The default template differs depending on the values
+ of <c>legacy_header</c>
+ and <c>single_line</c>. See <seealso marker="#default_templates">Default
+ Templates</seealso> for more information</p>
+ </item>
+ <tag><c>utc = boolean()</c></tag>
+ <item>
+ <p>If set to <c>true</c>, all dates are displayed in Universal
+ Coordinated Time.</p>
+ <p>Default is <c>false</c>.</p>
+ </item>
+ </taglist>
+ </section>
+
+ <section>
+ <marker id="default_templates"/>
+ <title>Default templates</title>
+
+ <p>The default value for the <c>template</c> configuration
+ parameter depends on the value of <c>single_line</c>
+ and <c>legacy_header</c> as follows.</p>
+
+ <p>The log event used in the examples is:</p>
+ <code>
+?LOG_ERROR("name: ~p~nexit_reason: ~p",[my_reg_name,"It crashed"])</code>
+
+ <taglist>
+ <tag><c>legacy_header=true</c></tag>
+ <item>
+ <p>Default template: <c>[{logger_formatter,header},"\n",msg,"\n"]</c></p>
+
+ <p>Example log entry:</p>
+ <code>
+=ERROR REPORT==== 29-Dec-2017::13:30:51.245123 ===
+name: my_reg_name
+exit_reason: "It crashed"</code>
+
+ <p>Notice that all eight levels might occur in the heading,
+ not only <c>ERROR</c>, <c>WARNING</c> or <c>INFO</c> as the
+ old <c>error_logger</c> produced. And microseconds are
+ added at the end of the timestamp.</p>
+ </item>
+
+ <tag><c>single_line=true</c></tag>
+ <item>
+ <p>Default template: <c>[time," ",level,": ",msg,"\n"]</c></p>
+
+ <p>Example log entry:</p>
+ <code>2017-12-29 13:31:49.640317 error: name: my_reg_name, exit_reason: "It crashed"</code>
+ </item>
+
+ <tag><c>legacy_header=false, single_line=false</c></tag>
+ <item>
+ <p>Default template: <c>[time," ",level,":\n",msg,"\n"]</c></p>
+
+ <p>Example log entry:</p>
+ <code>
+2017-12-29 13:32:25.191925 error:
+name: my_reg_name
+exit_reason: "It crashed"</code>
+ </item>
+ </taglist>
+ </section>
+
<datatypes>
<datatype>
<name name="template"/>
@@ -52,101 +227,22 @@
<name name="format" arity="2"/>
<fsummary>Formats the given message.</fsummary>
<desc>
- <p>Formats the given message.</p>
- <p>The template is a list of atoms, tuples and strings. Atoms
- can be <c>level</c> or <c>msg</c>, which are placeholders
- for the severity level and the log message,
- repectively. Tuples are interpreted as placeholders for
- metadata. Each element in the tuple must be an atom which
- matches a key in the nested metadata map, e.g. the
- tuple <c>{key1,key2}</c> will be replaced by the value of
- the key2 field in this nested map (the value vill be
- converted to a string):</p>
-
-<code>
-#{key1=>#{key2=>my_value,
- ...},
- ...}</code>
-
-
- <p> Strings are printed literally.</p>
-
- <p><c>depth</c> is a positive integer representing the maximum
- depth to which terms shall be printed by this
- formatter. Format strings passed to this formatter are
- rewritten. The format controls ~p and ~w are replaced with
- ~P and ~W, respectively, and the value is used as the depth
- parameter. For details, see
- <seealso marker="stdlib:io#format-2">io:format/2,3</seealso>
- in STDLIB.</p>
-
- <p><c>chars_limit</c> is a positive integer representing the
- value of the option with the same name to be used when calling
- <seealso marker="stdlib:io#format-3">io:format/3</seealso>. This
- value limits the total number of characters printed bu the
- formatter. Notes that this is a soft limit. For a hard
- truncation limit, see option <c>max_size</c>.</p>
-
- <p><c>max_size</c> is a positive integer representing the
- maximum size a string returned from this formatter can
- have. If the formatted string is longer, after possibly
- being limited by <c>depth</c> and/or <c>chars_limit</c>, it
- will be truncated.</p>
-
- <p><c>utc</c> is a boolean. If set to true, all dates are
- displayed in Universal Coordinated Time. Default
- is <c>false</c>.</p>
-
- <p><c>report_cb</c> must be a function with arity 1,
- returning <c>{Format,Args}</c>. This function will replace
- any <c>report_cb</c> found in metadata.</p>
-
- <p>If <c>single_line=true</c>, all newlines in the message are
- replaced with <c>", "</c>, and whitespaces following directly
- after newlines are removed. Note that newlines added by the
- formatter template are not replaced.</p>
-
- <p>If <c>legacy_header=true</c> a header field is added to
- logger_formatter's part of <c>Metadata</c>. The value of
- this field is a string similar to the header created by the
- old <c>error_logger</c> event handlers. It can be included
- in the log event by adding the
- tuple <c>{logger_formatter,header}</c> to the template.</p>
-
- <p>The default template when <c>legacy_header=true</c> is</p>
-
- <code>[{logger_formatter,header},"\n",msg,"\n"]</code>
-
- <p>which will cause log entries like this:</p>
-
- <code>=ERROR REPORT==== 29-Dec-2017::13:30:51.245123 ===
- process: &lt;0.74.0&gt;
- exit_reason: "Something went wrong"</code>
-
- <p>Note that all eight levels might occur here, not
- only <c>ERROR</c>, <c>WARNING</c> or <c>INFO</c>. And also
- that micro seconds are added at the end of the
- timestamp.</p>
-
- <p>The default template when <c>single_line=true</c> is</p>
-
- <code>[time," ",level,": ",msg,"\n"]</code>
-
- <p>which will cause log entries like this:</p>
-
- <code>2017-12-29 13:31:49.640317 error: process: &lt;0.74.0&gt;, exit_reason: "Something went wrong"</code>
-
- <p>The default template when both <c>legacy_header</c> and
- <c>single_line</c> are set to false is:</p>
-
- <code>[time," ",level,":\n",msg,"\n"]</code>
-
- <p>which will cause log entries like this:</p>
-
- <code>2017-12-29 13:32:25.191925 error:
- process: &lt;0.74.0&gt;
- exit_reason: "Something went wrong"</code>
-
+ <p>This the formatter callback function to be called from
+ handlers. The log event is processed as follows:</p>
+ <list>
+ <item>If the message is on report form, it is converted to
+ <c>{Format,Args}</c> by calling the report
+ callback.</item>
+ <item>The size is limited according to the values of
+ configuration parameters <c>chars_limit</c>
+ and <c>depth</c>. Notice that this does not apply to
+ messages on string form.</item>
+ <item>The full log entry is composed according to
+ the <c>template</c>.</item>
+ <item>If the final string is too long, it is truncated
+ according to the value of configuration
+ parameter <c>max_size</c>.</item>
+ </list>
</desc>
</func>
diff --git a/lib/kernel/doc/src/ref_man.xml b/lib/kernel/doc/src/ref_man.xml
index c06914d23d..a633ae4832 100644
--- a/lib/kernel/doc/src/ref_man.xml
+++ b/lib/kernel/doc/src/ref_man.xml
@@ -38,6 +38,7 @@
<xi:include href="disk_log.xml"/>
<xi:include href="erl_boot_server.xml"/>
<xi:include href="erl_ddll.xml"/>
+ <xi:include href="erl_epmd.xml"/>
<xi:include href="erl_prim_loader_stub.xml"/>
<xi:include href="erlang_stub.xml"/>
<xi:include href="error_handler.xml"/>
diff --git a/lib/kernel/doc/src/specs.xml b/lib/kernel/doc/src/specs.xml
index bcc422930e..b8c25ca53b 100644
--- a/lib/kernel/doc/src/specs.xml
+++ b/lib/kernel/doc/src/specs.xml
@@ -6,6 +6,7 @@
<xi:include href="../specs/specs_disk_log.xml"/>
<xi:include href="../specs/specs_erl_boot_server.xml"/>
<xi:include href="../specs/specs_erl_ddll.xml"/>
+ <xi:include href="../specs/specs_erl_epmd.xml"/>
<xi:include href="../specs/specs_erl_prim_loader_stub.xml"/>
<xi:include href="../specs/specs_erlang_stub.xml"/>
<xi:include href="../specs/specs_error_handler.xml"/>
diff --git a/lib/kernel/src/Makefile b/lib/kernel/src/Makefile
index 702845512c..eeb8c6ab2f 100644
--- a/lib/kernel/src/Makefile
+++ b/lib/kernel/src/Makefile
@@ -146,7 +146,7 @@ HRL_FILES= ../include/file.hrl ../include/inet.hrl ../include/inet_sctp.hrl \
../include/net_address.hrl ../include/logger.hrl
INTERNAL_HRL_FILES= application_master.hrl disk_log.hrl \
- erl_epmd.hrl hipe_ext_format.hrl \
+ erl_epmd.hrl file_int.hrl hipe_ext_format.hrl \
inet_dns.hrl inet_res.hrl \
inet_boot.hrl inet_config.hrl inet_int.hrl \
inet_dns_record_adts.hrl \
diff --git a/lib/kernel/src/application_controller.erl b/lib/kernel/src/application_controller.erl
index b9cb722575..ff5df667b5 100644
--- a/lib/kernel/src/application_controller.erl
+++ b/lib/kernel/src/application_controller.erl
@@ -1272,9 +1272,7 @@ load(S, {ApplData, ApplEnv, IncApps, Descr, Id, Vsn, Apps}) ->
NewEnv = merge_app_env(ApplEnv, ConfEnv),
CmdLineEnv = get_cmd_env(Name),
NewEnv2 = merge_app_env(NewEnv, CmdLineEnv),
- NewEnv3 = keyreplaceadd(included_applications, 1, NewEnv2,
- {included_applications, IncApps}),
- add_env(Name, NewEnv3),
+ add_env(Name, NewEnv2),
Appl = #appl{name = Name, descr = Descr, id = Id, vsn = Vsn,
appl_data = ApplData, inc_apps = IncApps, apps = Apps},
ets:insert(ac_tab, {{loaded, Name}, Appl}),
@@ -1292,7 +1290,7 @@ load(S, {ApplData, ApplEnv, IncApps, Descr, Id, Vsn, Apps}) ->
{ok, NewS}.
unload(AppName, S) ->
- {ok, IncApps} = get_env(AppName, included_applications),
+ {ok, IncApps} = get_key(AppName, included_applications),
del_env(AppName),
ets:delete(ac_tab, {loaded, AppName}),
foldl(fun(App, S1) ->
@@ -1583,13 +1581,9 @@ do_change_appl({ok, {ApplData, Env, IncApps, Descr, Id, Vsn, Apps}},
CmdLineEnv = get_cmd_env(AppName),
NewEnv2 = merge_app_env(NewEnv1, CmdLineEnv),
- %% included_apps is made into an env parameter as well
- NewEnv3 = keyreplaceadd(included_applications, 1, NewEnv2,
- {included_applications, IncApps}),
-
%% Update ets table with new application env
del_env(AppName),
- add_env(AppName, NewEnv3),
+ add_env(AppName, NewEnv2),
OldAppl#appl{appl_data=ApplData,
descr=Descr,
diff --git a/lib/kernel/src/erl_epmd.erl b/lib/kernel/src/erl_epmd.erl
index f96bc88913..9a0939972d 100644
--- a/lib/kernel/src/erl_epmd.erl
+++ b/lib/kernel/src/erl_epmd.erl
@@ -29,10 +29,20 @@
-define(port_please_failure2(Term), noop).
-endif.
+-ifndef(erlang_daemon_port).
+-define(erlang_daemon_port, 4369).
+-endif.
+-ifndef(epmd_dist_high).
+-define(epmd_dist_high, 4370).
+-endif.
+-ifndef(epmd_dist_low).
+-define(epmd_dist_low, 4370).
+-endif.
+
%% External exports
-export([start/0, start_link/0, stop/0, port_please/2,
port_please/3, names/0, names/1,
- register_node/2, register_node/3, open/0, open/1, open/2]).
+ register_node/2, register_node/3, address_please/3, open/0, open/1, open/2]).
%% gen_server callbacks
-export([init/1, handle_call/3, handle_cast/2, handle_info/2,
@@ -53,7 +63,7 @@
start() ->
gen_server:start({local, erl_epmd}, ?MODULE, [], []).
-
+-spec start_link() -> {ok, pid()} | ignore | {error,term()}.
start_link() ->
gen_server:start_link({local, erl_epmd}, ?MODULE, [], []).
@@ -66,9 +76,22 @@ stop() ->
%% return {port, P, Version} | noport
%%
+-spec port_please(Name, Host) -> {ok, Port, Version} | noport when
+ Name :: string(),
+ Host :: inet:ip_address(),
+ Port :: non_neg_integer(),
+ Version :: non_neg_integer().
+
port_please(Node, Host) ->
port_please(Node, Host, infinity).
+-spec port_please(Name, Host, Timeout) -> {ok, Port, Version} | noport when
+ Name :: string(),
+ Host :: inet:ip_address(),
+ Timeout :: non_neg_integer() | infinity,
+ Port :: non_neg_integer(),
+ Version :: non_neg_integer().
+
port_please(Node,HostName, Timeout) when is_atom(HostName) ->
port_please1(Node,atom_to_list(HostName), Timeout);
port_please(Node,HostName, Timeout) when is_list(HostName) ->
@@ -92,10 +115,21 @@ port_please1(Node,HostName, Timeout) ->
Else
end.
+-spec names() -> {ok, [{Name, Port}]} | {error, Reason} when
+ Name :: string(),
+ Port :: non_neg_integer(),
+ Reason :: address | file:posix().
+
names() ->
{ok, H} = inet:gethostname(),
names(H).
+-spec names(Host) -> {ok, [{Name, Port}]} | {error, Reason} when
+ Host :: atom() | string() | inet:ip_address(),
+ Name :: string(),
+ Port :: non_neg_integer(),
+ Reason :: address | file:posix().
+
names(HostName) when is_atom(HostName); is_list(HostName) ->
case inet:gethostbyname(HostName) of
{ok,{hostent, _Name, _ , _Af, _Size, [EpmdAddr | _]}} ->
@@ -106,9 +140,22 @@ names(HostName) when is_atom(HostName); is_list(HostName) ->
names(EpmdAddr) ->
get_names(EpmdAddr).
+-spec register_node(Name, Port) -> Result when
+ Name :: string(),
+ Port :: non_neg_integer(),
+ Creation :: non_neg_integer(),
+ Result :: {ok, Creation} | {error, already_registered} | term().
register_node(Name, PortNo) ->
- register_node(Name, PortNo, inet).
+ register_node(Name, PortNo, inet).
+
+-spec register_node(Name, Port, Driver) -> Result when
+ Name :: string(),
+ Port :: non_neg_integer(),
+ Driver :: inet_tcp | inet6_tcp | inet | inet6,
+ Creation :: non_neg_integer(),
+ Result :: {ok, Creation} | {error, already_registered} | term().
+
register_node(Name, PortNo, inet_tcp) ->
register_node(Name, PortNo, inet);
register_node(Name, PortNo, inet6_tcp) ->
@@ -116,6 +163,17 @@ register_node(Name, PortNo, inet6_tcp) ->
register_node(Name, PortNo, Family) ->
gen_server:call(erl_epmd, {register, Name, PortNo, Family}, infinity).
+-spec address_please(Name, Host, AddressFamily) -> Success | {error, term()} when
+ Name :: string(),
+ Host :: string() | inet:ip_address(),
+ AddressFamily :: inet | inet6,
+ Port :: non_neg_integer(),
+ Version :: non_neg_integer(),
+ Success :: {ok, inet:ip_address()} | {ok, inet:ip_address(), Port, Version}.
+
+address_please(_Name, Host, AddressFamily) ->
+ inet:getaddr(Host, AddressFamily).
+
%%%----------------------------------------------------------------------
%%% Callback functions from gen_server
%%%----------------------------------------------------------------------
diff --git a/lib/kernel/src/erl_signal_handler.erl b/lib/kernel/src/erl_signal_handler.erl
index 22f235d4e4..b76c2a217a 100644
--- a/lib/kernel/src/erl_signal_handler.erl
+++ b/lib/kernel/src/erl_signal_handler.erl
@@ -19,12 +19,21 @@
-module(erl_signal_handler).
-behaviour(gen_event).
--export([init/1, format_status/2,
+-export([start/0, init/1, format_status/2,
handle_event/2, handle_call/2, handle_info/2,
terminate/2, code_change/3]).
-record(state,{}).
+start() ->
+ %% add signal handler
+ case whereis(erl_signal_server) of
+ %% in case of minimal mode
+ undefined -> ok;
+ _ ->
+ gen_event:add_handler(erl_signal_server, erl_signal_handler, [])
+ end.
+
init(_Args) ->
{ok, #state{}}.
diff --git a/lib/kernel/src/error_logger.erl b/lib/kernel/src/error_logger.erl
index 0706220a94..47d0ca5ea3 100644
--- a/lib/kernel/src/error_logger.erl
+++ b/lib/kernel/src/error_logger.erl
@@ -32,7 +32,7 @@
which_report_handlers/0]).
%% logger callbacks
--export([adding_handler/2, removing_handler/1, log/2]).
+-export([adding_handler/2, removing_handler/2, log/2]).
-export([get_format_depth/0, limit_term/1]).
@@ -111,8 +111,8 @@ adding_handler(?MODULE,Config) ->
Error
end.
--spec removing_handler(logger:handler_id()) -> ok.
-removing_handler(?MODULE) ->
+-spec removing_handler(logger:handler_id(),logger:config()) -> ok.
+removing_handler(?MODULE,_Config) ->
stop(),
ok.
diff --git a/lib/kernel/src/hipe_unified_loader.erl b/lib/kernel/src/hipe_unified_loader.erl
index fd06f0f7d8..5704cc79c2 100644
--- a/lib/kernel/src/hipe_unified_loader.erl
+++ b/lib/kernel/src/hipe_unified_loader.erl
@@ -453,7 +453,7 @@ make_beam_stub(Mod, LoaderState, MD5, Beam, FunDefs, ClosuresToPatch) ->
%%========================================================================
%% Patching
%% @spec patch(refs(), BaseAddress::integer(), ConstAndZone::term(),
-%% FunDefs::term(), TrampolineMap::term()) -> 'ok'.
+%% FunDefs::term(), TrampolineMap::term()) -> 'ok'
%% @type refs()=[{RefType::integer(), Reflist::reflist()} | refs()]
%%
%% @type reflist()= [{Data::term(), Offsets::offests()}|reflist()]
diff --git a/lib/kernel/src/inet_tcp_dist.erl b/lib/kernel/src/inet_tcp_dist.erl
index e3fdb1bb22..b4b50899f7 100644
--- a/lib/kernel/src/inet_tcp_dist.erl
+++ b/lib/kernel/src/inet_tcp_dist.erl
@@ -283,73 +283,22 @@ do_setup(Driver, Kernel, Node, Type, MyNode, LongOrShortNames, SetupTime) ->
?trace("~p~n",[{inet_tcp_dist,self(),setup,Node}]),
[Name, Address] = splitnode(Driver, Node, LongOrShortNames),
AddressFamily = Driver:family(),
- case inet:getaddr(Address, AddressFamily) of
+ ErlEpmd = net_kernel:epmd_module(),
+ {ARMod, ARFun} = get_address_resolver(ErlEpmd),
+ Timer = dist_util:start_timer(SetupTime),
+ case ARMod:ARFun(Name, Address, AddressFamily) of
+ {ok, Ip, TcpPort, Version} ->
+ ?trace("address_please(~p) -> version ~p~n",
+ [Node,Version]),
+ do_setup_connect(Driver, Kernel, Node, Address, AddressFamily,
+ Ip, TcpPort, Version, Type, MyNode, Timer);
{ok, Ip} ->
- Timer = dist_util:start_timer(SetupTime),
- ErlEpmd = net_kernel:epmd_module(),
case ErlEpmd:port_please(Name, Ip) of
{port, TcpPort, Version} ->
?trace("port_please(~p) -> version ~p~n",
[Node,Version]),
- dist_util:reset_timer(Timer),
- case
- Driver:connect(
- Ip, TcpPort,
- connect_options([{active, false}, {packet, 2}]))
- of
- {ok, Socket} ->
- HSData = #hs_data{
- kernel_pid = Kernel,
- other_node = Node,
- this_node = MyNode,
- socket = Socket,
- timer = Timer,
- this_flags = 0,
- other_version = Version,
- f_send = fun Driver:send/2,
- f_recv = fun Driver:recv/3,
- f_setopts_pre_nodeup =
- fun(S) ->
- inet:setopts
- (S,
- [{active, false},
- {packet, 4},
- nodelay()])
- end,
- f_setopts_post_nodeup =
- fun(S) ->
- inet:setopts
- (S,
- [{active, true},
- {deliver, port},
- {packet, 4},
- nodelay()])
- end,
-
- f_getll = fun inet:getll/1,
- f_address =
- fun(_,_) ->
- #net_address{
- address = {Ip,TcpPort},
- host = Address,
- protocol = tcp,
- family = AddressFamily}
- end,
- mf_tick = fun(S) -> ?MODULE:tick(Driver, S) end,
- mf_getstat = fun ?MODULE:getstat/1,
- request_type = Type,
- mf_setopts = fun ?MODULE:setopts/2,
- mf_getopts = fun ?MODULE:getopts/2
- },
- dist_util:handshake_we_started(HSData);
- _ ->
- %% Other Node may have closed since
- %% port_please !
- ?trace("other node (~p) "
- "closed since port_please.~n",
- [Node]),
- ?shutdown(Node)
- end;
+ do_setup_connect(Driver, Kernel, Node, Address, AddressFamily,
+ Ip, TcpPort, Version, Type, MyNode, Timer);
_ ->
?trace("port_please (~p) "
"failed.~n", [Node]),
@@ -361,6 +310,71 @@ do_setup(Driver, Kernel, Node, Type, MyNode, LongOrShortNames, SetupTime) ->
?shutdown(Node)
end.
+%%
+%% Actual setup of connection
+%%
+do_setup_connect(Driver, Kernel, Node, Address, AddressFamily,
+ Ip, TcpPort, Version, Type, MyNode, Timer) ->
+ dist_util:reset_timer(Timer),
+ case
+ Driver:connect(
+ Ip, TcpPort,
+ connect_options([{active, false}, {packet, 2}]))
+ of
+ {ok, Socket} ->
+ HSData = #hs_data{
+ kernel_pid = Kernel,
+ other_node = Node,
+ this_node = MyNode,
+ socket = Socket,
+ timer = Timer,
+ this_flags = 0,
+ other_version = Version,
+ f_send = fun Driver:send/2,
+ f_recv = fun Driver:recv/3,
+ f_setopts_pre_nodeup =
+ fun(S) ->
+ inet:setopts
+ (S,
+ [{active, false},
+ {packet, 4},
+ nodelay()])
+ end,
+ f_setopts_post_nodeup =
+ fun(S) ->
+ inet:setopts
+ (S,
+ [{active, true},
+ {deliver, port},
+ {packet, 4},
+ nodelay()])
+ end,
+
+ f_getll = fun inet:getll/1,
+ f_address =
+ fun(_,_) ->
+ #net_address{
+ address = {Ip,TcpPort},
+ host = Address,
+ protocol = tcp,
+ family = AddressFamily}
+ end,
+ mf_tick = fun(S) -> ?MODULE:tick(Driver, S) end,
+ mf_getstat = fun ?MODULE:getstat/1,
+ request_type = Type,
+ mf_setopts = fun ?MODULE:setopts/2,
+ mf_getopts = fun ?MODULE:getopts/2
+ },
+ dist_util:handshake_we_started(HSData);
+ _ ->
+ %% Other Node may have closed since
+ %% discovery !
+ ?trace("other node (~p) "
+ "closed since discovery (port_please).~n",
+ [Node]),
+ ?shutdown(Node)
+ end.
+
connect_options(Opts) ->
case application:get_env(kernel, inet_dist_connect_options) of
{ok,ConnectOpts} ->
@@ -430,6 +444,16 @@ get_tcp_address(Driver, Socket) ->
}.
%% ------------------------------------------------------------
+%% Determine if EPMD module supports address resolving. Default
+%% is to use inet:getaddr/2.
+%% ------------------------------------------------------------
+get_address_resolver(EpmdModule) ->
+ case erlang:function_exported(EpmdModule, address_please, 3) of
+ true -> {EpmdModule, address_please};
+ _ -> {inet, getaddr}
+ end.
+
+%% ------------------------------------------------------------
%% Do only accept new connection attempts from nodes at our
%% own LAN, if the check_ip environment parameter is true.
%% ------------------------------------------------------------
diff --git a/lib/kernel/src/kernel.erl b/lib/kernel/src/kernel.erl
index 20aa47f602..ae982c1741 100644
--- a/lib/kernel/src/kernel.erl
+++ b/lib/kernel/src/kernel.erl
@@ -32,13 +32,7 @@
start(_, []) ->
case supervisor:start_link({local, kernel_sup}, kernel, []) of
{ok, Pid} ->
- %% add signal handler
- case whereis(erl_signal_server) of
- %% in case of minimal mode
- undefined -> ok;
- _ ->
- ok = gen_event:add_handler(erl_signal_server, erl_signal_handler, [])
- end,
+ ok = erl_signal_handler:start(),
%% add error handler
case logger:setup_standard_handler() of
ok -> {ok, Pid, []};
diff --git a/lib/kernel/src/kernel_config.erl b/lib/kernel/src/kernel_config.erl
index 535083ef27..c5ff1887c2 100644
--- a/lib/kernel/src/kernel_config.erl
+++ b/lib/kernel/src/kernel_config.erl
@@ -30,11 +30,8 @@
%%%-----------------------------------------------------------------
%%% This module implements a process that configures the kernel
%%% application.
-%%% Its purpose is that in the init phase add an error_logger
-%%% and when it dies (when the kernel application dies) deleting the
-%%% previously installed error_logger.
-%%% Also, this process waits for other nodes at startup, if
-%%% specified.
+%%% Its purpose is that in the init phase waits for other nodes at startup,
+%%% if specified.
%%%-----------------------------------------------------------------
start_link() -> gen_server:start_link(kernel_config, [], []).
diff --git a/lib/kernel/src/logger.erl b/lib/kernel/src/logger.erl
index 943ef8c2d1..98a9937111 100644
--- a/lib/kernel/src/logger.erl
+++ b/lib/kernel/src/logger.erl
@@ -44,8 +44,8 @@
%% Misc
-export([compare_levels/2]).
--export([set_process_metadata/1, unset_process_metadata/0,
- get_process_metadata/0]).
+-export([set_process_metadata/1, update_process_metadata/1,
+ unset_process_metadata/0, get_process_metadata/0]).
-export([i/0, i/1]).
-export([setup_standard_handler/0, replace_simple_handler/3]).
-export([limit_term/1, get_format_depth/0, get_max_size/0, get_utc_config/0]).
@@ -60,27 +60,41 @@
%%%-----------------------------------------------------------------
%%% Types
--type log() :: #{level=>level(),
- msg=>{io:format(),[term()]} |
+-type log() :: #{level:=level(),
+ msg:={io:format(),[term()]} |
{report,report()} |
{string,unicode:chardata()},
- meta=>metadata()}.
+ meta:=metadata()}.
-type level() :: emergency | alert | critical | error |
warning | notice | info | debug.
-type report() :: map() | [{atom(),term()}].
-type msg_fun() :: fun((term()) -> {io:format(),[term()]} |
report() |
unicode:chardata()).
--type metadata() :: map().
-
+-type metadata() :: #{pid => pid(),
+ gl => pid(),
+ time => timestamp(),
+ mfa => {module(),atom(),non_neg_integer()},
+ file => file:filename(),
+ line => non_neg_integer(),
+ term() => term()}.
+-type location() :: #{mfa := {module(),atom(),non_neg_integer()},
+ file := file:filename(),
+ line := non_neg_integer()}.
-type handler_id() :: atom().
-type filter_id() :: atom().
--type filter() :: {fun((log(),term()) -> filter_return()),term()}.
+-type filter() :: {fun((log(),filter_arg()) -> filter_return()),filter_arg()}.
+-type filter_arg() :: term().
-type filter_return() :: stop | ignore | log().
--type config() :: map().
+-type config() :: #{level => level(),
+ filter_default => log | stop,
+ filters => [{filter_id(),filter()}],
+ formatter => {module(),term()},
+ term() => term()}.
+-type timestamp() :: integer().
-export_type([log/0,level/0,report/0,msg_fun/0,metadata/0,config/0,handler_id/0,
- filter_id/0,filter/0,filter_return/0]).
+ filter_id/0,filter/0,filter_arg/0,filter_return/0]).
%%%-----------------------------------------------------------------
%%% API
@@ -185,24 +199,24 @@ allow(Level,Module) when ?IS_LEVEL(Level), is_atom(Module) ->
-spec macro_log(Location,Level,StringOrReport) -> ok when
- Location :: map(),
+ Location :: location(),
Level :: level(),
StringOrReport :: unicode:chardata() | report().
macro_log(Location,Level,StringOrReport) ->
log_allowed(Location,Level,StringOrReport,#{}).
-spec macro_log(Location,Level,StringOrReport,Meta) -> ok when
- Location :: map(),
+ Location :: location(),
Level :: level(),
StringOrReport :: unicode:chardata() | report(),
Meta :: metadata();
(Location,Level,Format,Args) -> ok when
- Location :: map(),
+ Location :: location(),
Level :: level(),
Format :: io:format(),
Args ::[term()];
(Location,Level,Fun,FunArgs) -> ok when
- Location :: map(),
+ Location :: location(),
Level :: level(),
Fun :: msg_fun(),
FunArgs :: term().
@@ -213,13 +227,13 @@ macro_log(Location,Level,FunOrFormat,Args) ->
log_allowed(Location,Level,{FunOrFormat,Args},#{}).
-spec macro_log(Location,Level,Format,Args,Meta) -> ok when
- Location :: map(),
+ Location :: location(),
Level :: level(),
Format :: io:format(),
Args ::[term()],
Meta :: metadata();
(Location,Level,Fun,FunArgs,Meta) -> ok when
- Location :: map(),
+ Location :: location(),
Level :: level(),
Fun :: msg_fun(),
FunArgs :: term(),
@@ -390,6 +404,19 @@ set_process_metadata(Meta) when is_map(Meta) ->
set_process_metadata(Meta) ->
erlang:error(badarg,[Meta]).
+-spec update_process_metadata(Meta) -> ok when
+ Meta :: metadata().
+update_process_metadata(Meta) when is_map(Meta) ->
+ case get_process_metadata() of
+ undefined ->
+ set_process_metadata(Meta);
+ Meta0 when is_map(Meta0) ->
+ set_process_metadata(maps:merge(Meta0,Meta)),
+ ok
+ end;
+update_process_metadata(Meta) ->
+ erlang:error(badarg,[Meta]).
+
-spec get_process_metadata() -> Meta | undefined when
Meta :: metadata().
get_process_metadata() ->
@@ -699,7 +726,7 @@ do_log_1(Level,Msg,Meta) ->
end.
-spec log_allowed(Location,Level,Msg,Meta) -> ok when
- Location :: map(),
+ Location :: location() | #{},
Level :: level(),
Msg :: {msg_fun(),term()} |
{io:format(),[term()]} |
diff --git a/lib/kernel/src/logger_disk_log_h.erl b/lib/kernel/src/logger_disk_log_h.erl
index 3b71f936d8..0150fa781a 100644
--- a/lib/kernel/src/logger_disk_log_h.erl
+++ b/lib/kernel/src/logger_disk_log_h.erl
@@ -34,7 +34,7 @@
%% logger callbacks
-export([log/2,
- adding_handler/2, removing_handler/1,
+ adding_handler/2, removing_handler/2,
changing_config/3, swap_buffer/2]).
%%%===================================================================
@@ -223,7 +223,7 @@ check_my_config([]) ->
%%%-----------------------------------------------------------------
%%% Handler being removed
-removing_handler(Name) ->
+removing_handler(Name, _Config) ->
stop(Name).
%%%-----------------------------------------------------------------
diff --git a/lib/kernel/src/logger_formatter.erl b/lib/kernel/src/logger_formatter.erl
index 386e7832e2..8e954f8d98 100644
--- a/lib/kernel/src/logger_formatter.erl
+++ b/lib/kernel/src/logger_formatter.erl
@@ -29,7 +29,7 @@
%%%-----------------------------------------------------------------
%%% API
--spec format(Log,Config) -> String when
+-spec format(Log,Config) -> unicode:chardata() when
Log :: logger:log(),
Config :: #{single_line=>boolean(),
legacy_header=>boolean(),
@@ -38,8 +38,7 @@
max_size=>pos_integer() | unlimited,
depth=>pos_integer() | unlimited,
template=>template(),
- utc=>boolean()},
- String :: string().
+ utc=>boolean()}.
format(#{level:=Level,msg:=Msg0,meta:=Meta},Config0)
when is_map(Config0) ->
Config = add_default_config(Config0),
@@ -263,7 +262,7 @@ utcstr(_) -> "".
add_default_config(#{utc:=_}=Config0) ->
Default =
#{legacy_header=>false,
- single_line=>false,
+ single_line=>true,
chars_limit=>unlimited},
MaxSize = get_max_size(maps:get(max_size,Config0,false)),
Depth = get_depth(maps:get(depth,Config0,false)),
diff --git a/lib/kernel/src/logger_internal.hrl b/lib/kernel/src/logger_internal.hrl
index 82df499c2b..8c0fc2725d 100644
--- a/lib/kernel/src/logger_internal.hrl
+++ b/lib/kernel/src/logger_internal.hrl
@@ -31,6 +31,7 @@
{no_domain,{fun logger_filters:domain/2,{log,no_domain,[]}}}]).
-define(DEFAULT_FORMATTER,logger_formatter).
-define(DEFAULT_FORMAT_CONFIG,#{legacy_header=>true,
+ single_line=>false,
template=>?DEFAULT_FORMAT_TEMPLATE_HEADER}).
-define(DEFAULT_FORMAT_TEMPLATE_HEADER,
[{logger_formatter,header},"\n",msg,"\n"]).
diff --git a/lib/kernel/src/logger_server.erl b/lib/kernel/src/logger_server.erl
index 6ef3b8582a..a7f302ac8f 100644
--- a/lib/kernel/src/logger_server.erl
+++ b/lib/kernel/src/logger_server.erl
@@ -158,7 +158,7 @@ handle_call({remove_handler,HandlerId}, _From, #state{tid=Tid}=State) ->
Handlers0 = maps:get(handlers,Config,[]),
Handlers = lists:delete(HandlerId,Handlers0),
%% inform the handler
- _ = call_h(Module,removing_handler,[HandlerId],ok),
+ _ = call_h(Module,removing_handler,[HandlerId,Config],ok),
do_set_config(Tid,logger,Config#{handlers=>Handlers}),
logger_config:delete(Tid,HandlerId),
ok;
@@ -234,7 +234,13 @@ handle_info({log,Level,Report,Meta}, State) ->
{noreply, State};
handle_info({Ref,_Reply},State) when is_reference(Ref) ->
%% Assuming this is a timed-out gen_server reply - ignoring
- {noreply, State}.
+ {noreply, State};
+handle_info(Unexpected,State) ->
+ ?LOG_INTERNAL(debug,
+ [{logger,got_unexpected_message},
+ {process,?SERVER},
+ {message,Unexpected}]),
+ {noreply,State}.
terminate(_Reason, _State) ->
ok.
diff --git a/lib/kernel/src/logger_simple.erl b/lib/kernel/src/logger_simple.erl
index 23ff6ccd2e..a1b427b96c 100644
--- a/lib/kernel/src/logger_simple.erl
+++ b/lib/kernel/src/logger_simple.erl
@@ -19,7 +19,7 @@
%%
-module(logger_simple).
--export([adding_handler/2, removing_handler/1, log/2]).
+-export([adding_handler/2, removing_handler/2, log/2]).
-export([get_buffer/0]).
%% This module implements a simple handler for logger. It is the
@@ -63,7 +63,7 @@ adding_handler(?MODULE,Config) ->
{error,{handler_process_name_already_exists,?MODULE}}
end.
-removing_handler(?MODULE) ->
+removing_handler(?MODULE,_Config) ->
case whereis(?MODULE) of
undefined ->
ok;
diff --git a/lib/kernel/src/logger_std_h.erl b/lib/kernel/src/logger_std_h.erl
index cbc9db372c..31edcfea8b 100644
--- a/lib/kernel/src/logger_std_h.erl
+++ b/lib/kernel/src/logger_std_h.erl
@@ -35,7 +35,7 @@
terminate/2, code_change/3]).
%% logger callbacks
--export([log/2, adding_handler/2, removing_handler/1,
+-export([log/2, adding_handler/2, removing_handler/2,
changing_config/3, swap_buffer/2]).
%%%===================================================================
@@ -207,7 +207,7 @@ check_my_config([]) ->
%%%-----------------------------------------------------------------
%%% Handler being removed
-removing_handler(Name) ->
+removing_handler(Name,_Config) ->
stop(Name).
%%%-----------------------------------------------------------------
diff --git a/lib/kernel/test/application_SUITE.erl b/lib/kernel/test/application_SUITE.erl
index c00fb44c46..988f26280f 100644
--- a/lib/kernel/test/application_SUITE.erl
+++ b/lib/kernel/test/application_SUITE.erl
@@ -1603,8 +1603,7 @@ get_key(Conf) when is_list(Conf) ->
{ok, [{init, [kalle]}, {takeover, []}, {go, [sune]}]} =
rpc:call(Cp1, application, get_key, [appinc, start_phases]),
{ok, Env} = rpc:call(Cp1, application, get_key, [appinc ,env]),
- [{included_applications,[appinc1,appinc2]},
- {own2,val2},{own_env1,value1}] = lists:sort(Env),
+ [{own2,val2},{own_env1,value1}] = lists:sort(Env),
{ok, []} = rpc:call(Cp1, application, get_key, [appinc, modules]),
{ok, {application_starter, [ch_sup, {appinc, 41, 43}] }} =
rpc:call(Cp1, application, get_key, [appinc, mod]),
@@ -1625,8 +1624,7 @@ get_key(Conf) when is_list(Conf) ->
{mod, {application_starter, [ch_sup, {appinc, 41, 43}] }},
{start_phases, [{init, [kalle]}, {takeover, []}, {go, [sune]}]}]} =
rpc:call(Cp1, application, get_all_key, [appinc]),
- [{included_applications,[appinc1,appinc2]},
- {own2,val2},{own_env1,value1}] = lists:sort(Env),
+ [{own2,val2},{own_env1,value1}] = lists:sort(Env),
{ok, "Test of new app file, including appnew"} =
gen_server:call({global, {ch,41}}, {get_pid_key, description}),
@@ -1643,8 +1641,7 @@ get_key(Conf) when is_list(Conf) ->
{ok, [{init, [kalle]}, {takeover, []}, {go, [sune]}]} =
gen_server:call({global, {ch,41}}, {get_pid_key, start_phases}),
{ok, Env} = gen_server:call({global, {ch,41}}, {get_pid_key, env}),
- [{included_applications,[appinc1,appinc2]},
- {own2,val2},{own_env1,value1}] = lists:sort(Env),
+ [{own2,val2},{own_env1,value1}] = lists:sort(Env),
{ok, []} =
gen_server:call({global, {ch,41}}, {get_pid_key, modules}),
{ok, {application_starter, [ch_sup, {appinc, 41, 43}] }} =
@@ -1671,8 +1668,7 @@ get_key(Conf) when is_list(Conf) ->
{mod, {application_starter, [ch_sup, {appinc, 41, 43}] }},
{start_phases, [{init, [kalle]}, {takeover, []}, {go, [sune]}]}]} =
gen_server:call({global, {ch,41}}, get_pid_all_key),
- [{included_applications,[appinc1,appinc2]},
- {own2,val2},{own_env1,value1}] = lists:sort(Env),
+ [{own2,val2},{own_env1,value1}] = lists:sort(Env),
stop_node_nice(Cp1),
ok.
diff --git a/lib/kernel/test/erl_distribution_SUITE.erl b/lib/kernel/test/erl_distribution_SUITE.erl
index 0470f09f29..9c6712ad74 100644
--- a/lib/kernel/test/erl_distribution_SUITE.erl
+++ b/lib/kernel/test/erl_distribution_SUITE.erl
@@ -244,7 +244,7 @@ illegal(Name) ->
test_node(Name) ->
test_node(Name, false).
test_node(Name, Illigal) ->
- ProgName = atom_to_list(lib:progname()),
+ ProgName = ct:get_progname(),
Command = ProgName ++ " -noinput " ++ long_or_short() ++ Name ++
" -eval \"net_adm:ping('" ++ atom_to_list(node()) ++ "')\"" ++
case Illigal of
diff --git a/lib/kernel/test/heart_SUITE.erl b/lib/kernel/test/heart_SUITE.erl
index 22db24de5f..e95635b800 100644
--- a/lib/kernel/test/heart_SUITE.erl
+++ b/lib/kernel/test/heart_SUITE.erl
@@ -168,7 +168,7 @@ reboot(Config) when is_list(Config) ->
{ok, Node} = start_check(slave, ?UNIQ_NODE_NAME),
ok = rpc:call(Node, heart, set_cmd,
- [atom_to_list(lib:progname()) ++
+ [ct:get_progname() ++
" -noshell -heart " ++ name(Node) ++ "&"]),
rpc:call(Node, init, reboot, []),
receive
@@ -203,7 +203,7 @@ node_start_immediately_after_crash_test(Config) when is_list(Config) ->
[{"ERL_CRASH_DUMP_SECONDS", "0"}]),
ok = rpc:call(Node, heart, set_cmd,
- [atom_to_list(lib:progname()) ++
+ [ct:get_progname() ++
" -noshell -heart " ++ name(Node) ++ "&"]),
Mod = exhaust_atoms,
@@ -254,7 +254,7 @@ node_start_soon_after_crash_test(Config) when is_list(Config) ->
[{"ERL_CRASH_DUMP_SECONDS", "10"}]),
ok = rpc:call(Node, heart, set_cmd,
- [atom_to_list(lib:progname()) ++
+ [ct:get_progname() ++
" -noshell -heart " ++ name(Node) ++ "&"]),
Mod = exhaust_atoms,
@@ -309,7 +309,7 @@ set_cmd(Config) when is_list(Config) ->
clear_cmd(Config) when is_list(Config) ->
{ok, Node} = start_check(slave, ?UNIQ_NODE_NAME),
ok = rpc:call(Node, heart, set_cmd,
- [atom_to_list(lib:progname()) ++
+ [ct:get_progname() ++
" -noshell -heart " ++ name(Node) ++ "&"]),
rpc:call(Node, init, reboot, []),
receive
diff --git a/lib/kernel/test/kernel_config_SUITE.erl b/lib/kernel/test/kernel_config_SUITE.erl
index 9a4578917d..a21020ff97 100644
--- a/lib/kernel/test/kernel_config_SUITE.erl
+++ b/lib/kernel/test/kernel_config_SUITE.erl
@@ -76,7 +76,7 @@ sync(Conf) when is_list(Conf) ->
%% Reset wall_clock
{T1,_} = erlang:statistics(wall_clock),
io:format("~p~n", [{t1, T1}]),
- Command = lists:concat([lib:progname(),
+ Command = lists:append([ct:get_progname(),
" -detached -sname cp1 ",
"-config ", Config,
" -env ERL_CRASH_DUMP erl_crash_dump.cp1"]),
diff --git a/lib/kernel/test/logger_SUITE.erl b/lib/kernel/test/logger_SUITE.erl
index 0edce3e34c..f311a9c7ed 100644
--- a/lib/kernel/test/logger_SUITE.erl
+++ b/lib/kernel/test/logger_SUITE.erl
@@ -666,6 +666,9 @@ process_metadata(_Config) ->
check_logged(info,S3,#{time=>Time,line=>0,custom=>func}),
ProcMeta = logger:get_process_metadata(),
+ ok = logger:update_process_metadata(#{custom=>changed,custom2=>added}),
+ Expected = ProcMeta#{custom:=changed,custom2=>added},
+ Expected = logger:get_process_metadata(),
ok = logger:unset_process_metadata(),
undefined = logger:get_process_metadata(),
@@ -720,7 +723,7 @@ check_maps(Expected,Got,What) ->
adding_handler(_Id,Config) ->
maybe_send(add),
{ok,Config}.
-removing_handler(_Id) ->
+removing_handler(_Id,_Config) ->
maybe_send(remove),
ok.
changing_config(_Id,_Old,#{call:=Fun}) ->
diff --git a/lib/kernel/test/logger_disk_log_h_SUITE.erl b/lib/kernel/test/logger_disk_log_h_SUITE.erl
index c7c6137380..63e5b56021 100644
--- a/lib/kernel/test/logger_disk_log_h_SUITE.erl
+++ b/lib/kernel/test/logger_disk_log_h_SUITE.erl
@@ -497,24 +497,19 @@ disk_log_sync(Config) ->
filters=>?DEFAULT_HANDLER_FILTERS([?MODULE]),
formatter=>{?MODULE,nl}}),
- start_tracer([{?MODULE,format,2},
- {disk_log,blog,2},
+ start_tracer([{disk_log,blog,2},
{disk_log,sync,1}],
- [{formatter,"first"},
- {disk_log,blog},
+ [{disk_log,blog,<<"first\n">>},
{disk_log,sync}]),
logger:info("first", ?domain),
%% wait for automatic disk_log_sync
check_tracer(?FILESYNC_REPEAT_INTERVAL*2),
- start_tracer([{?MODULE,format,2},
- {disk_log,blog,2},
+ start_tracer([{disk_log,blog,2},
{disk_log,sync,1}],
- [{formatter,"second"},
- {formatter,"third"},
- {disk_log,blog},
- {disk_log,blog},
+ [{disk_log,blog,<<"second\n">>},
+ {disk_log,blog,<<"third\n">>},
{disk_log,sync}]),
%% two log requests in fast succession will make the handler skip
%% an automatic disk log sync
@@ -531,13 +526,10 @@ disk_log_sync(Config) ->
no_repeat = maps:get(filesync_repeat_interval,
logger_disk_log_h:info(?MODULE)),
- start_tracer([{?MODULE,format,2},
- {disk_log,blog,2},
+ start_tracer([{disk_log,blog,2},
{disk_log,sync,1}],
- [{formatter,"fourth"},
- {disk_log,blog},
- {formatter,"fifth"},
- {disk_log,blog},
+ [{disk_log,blog,<<"fourth\n">>},
+ {disk_log,blog,<<"fifth\n">>},
{disk_log,sync}]),
logger:info("fourth", ?domain),
@@ -566,6 +558,7 @@ disk_log_sync(Config) ->
check_tracer(100),
ok.
disk_log_sync(cleanup,_Config) ->
+ dbg:stop_clear(),
logger:remove_handler(?MODULE).
disk_log_wrap(Config) ->
@@ -623,6 +616,7 @@ disk_log_wrap(Config) ->
ok.
disk_log_wrap(cleanup,_Config) ->
+ dbg:stop_clear(),
logger:remove_handler(?MODULE).
disk_log_full(Config) ->
@@ -668,6 +662,7 @@ disk_log_full(Config) ->
{trace,{error_status,{error,{full,_}}}}] = Received,
ok.
disk_log_full(cleanup, _Config) ->
+ dbg:stop_clear(),
logger:remove_handler(?MODULE).
disk_log_events(Config) ->
@@ -713,6 +708,7 @@ disk_log_events(Config) ->
end, Received),
ok.
disk_log_events(cleanup, _Config) ->
+ dbg:stop_clear(),
logger:remove_handler(?MODULE).
write_failure(Config) ->
@@ -763,7 +759,7 @@ sync_failure(Config) ->
Dir = ?config(priv_dir, Config),
FileName = lists:concat([?MODULE,"_",?FUNCTION_NAME]),
File = filename:join(Dir, FileName),
- Log = lists:concat([File,".1"]),
+
Node = start_h_on_new_node(Config, ?FUNCTION_NAME, File),
false = (undefined == rpc:call(Node, ets, whereis, [?TEST_HOOKS_TAB])),
@@ -832,10 +828,10 @@ log_on_remote_node(Node,Msg) ->
ok.
%% functions for test hook macros to be called by rpc
-set_internal_log(Mod, Func) ->
- ?set_internal_log({Mod,Func}).
-set_result(Op, Result) ->
- ?set_result(Op, Result).
+set_internal_log(_Mod, _Func) ->
+ ?set_internal_log({_Mod,_Func}).
+set_result(_Op, _Result) ->
+ ?set_result(_Op, _Result).
set_defaults() ->
?set_defaults().
@@ -885,7 +881,7 @@ op_switch_to_drop(cleanup, _Config) ->
ok = stop_handler(?MODULE).
op_switch_to_flush() ->
- [{timetrap,{seconds,60}}].
+ [{timetrap,{minutes,3}}].
op_switch_to_flush(Config) ->
{Log,HConfig,DLHConfig} = start_handler(?MODULE, ?FUNCTION_NAME, Config),
@@ -988,7 +984,7 @@ kill_disabled(cleanup, _Config) ->
ok = stop_handler(?MODULE).
qlen_kill_new(Config) ->
- {Log,HConfig,DLHConfig} = start_handler(?MODULE, ?FUNCTION_NAME, Config),
+ {_Log,HConfig,DLHConfig} = start_handler(?MODULE, ?FUNCTION_NAME, Config),
Pid0 = whereis(?MODULE),
{_,Mem0} = process_info(Pid0, memory),
RestartAfter = 2000,
@@ -1025,7 +1021,7 @@ qlen_kill_new(cleanup, _Config) ->
ok = stop_handler(?MODULE).
mem_kill_new(Config) ->
- {Log,HConfig,DLHConfig} = start_handler(?MODULE, ?FUNCTION_NAME, Config),
+ {_Log,HConfig,DLHConfig} = start_handler(?MODULE, ?FUNCTION_NAME, Config),
Pid0 = whereis(?MODULE),
{_,Mem0} = process_info(Pid0, memory),
RestartAfter = 2000,
@@ -1111,7 +1107,7 @@ restart_after(cleanup, _Config) ->
%% during high load to verify that sync, dropping and flushing is
%% handled correctly.
handler_requests_under_load() ->
- [{timetrap,{seconds,60}}].
+ [{timetrap,{minutes,3}}].
handler_requests_under_load(Config) ->
{Log,HConfig,DLHConfig} = start_handler(?MODULE, ?FUNCTION_NAME, Config),
NewHConfig =
@@ -1140,7 +1136,7 @@ handler_requests_under_load(Config) ->
NoOfReqs = lists:foldl(fun({_,Res}, N) -> N + length(Res) end, 0, ReqResult),
ct:pal("~w requests made. Errors: ~n~p", [NoOfReqs,Errors]),
ok = file:delete(Log).
-handler_requests_under_load(cleanup, Config) ->
+handler_requests_under_load(cleanup, _Config) ->
ok = stop_handler(?MODULE).
send_requests(HName, TO, Reqs = [{Req,Res}|Rs]) ->
@@ -1369,7 +1365,6 @@ start_tracer(Trace,Expected) ->
Pid = self(),
dbg:tracer(process,{fun tracer/2,{Pid,Expected}}),
dbg:p(whereis(?MODULE),[c]),
- dbg:p(Pid,[c]),
tpl(Trace),
ok.
@@ -1387,13 +1382,14 @@ tpl([{M,F,A}|Trace]) ->
tpl([]) ->
ok.
-tracer({trace,_,call,{?MODULE,format,[#{msg:={string,Msg}}|_]}}, {Pid,[{formatter,Msg}|Expected]}) ->
- maybe_tracer_done(Pid,Expected,{formatter,Msg});
tracer({trace,_,call,{logger_disk_log_h,handle_cast,[{Op,_}|_]}}, {Pid,[{Mod,Func,Op}|Expected]}) ->
maybe_tracer_done(Pid,Expected,{Mod,Func,Op});
+tracer({trace,_,call,{Mod=disk_log,Func=blog,[_,Data]}}, {Pid,[{Mod,Func,Data}|Expected]}) ->
+ maybe_tracer_done(Pid,Expected,{Mod,Func,Data});
tracer({trace,_,call,{Mod,Func,_}}, {Pid,[{Mod,Func}|Expected]}) ->
maybe_tracer_done(Pid,Expected,{Mod,Func});
tracer({trace,_,call,Call}, {Pid,Expected}) ->
+ ct:log("Tracer got unexpected: ~p~nExpected: ~p~n",[Call,Expected]),
Pid ! {tracer_got_unexpected,Call,Expected},
{Pid,Expected}.
@@ -1413,5 +1409,6 @@ check_tracer(T) ->
dbg:stop_clear(),
ct:fail({tracer_got_unexpected,Got,Expected})
after T ->
+ dbg:stop_clear(),
ct:fail({timeout,tracer})
end.
diff --git a/lib/kernel/test/logger_formatter_SUITE.erl b/lib/kernel/test/logger_formatter_SUITE.erl
index ac1abba629..7d1f33746d 100644
--- a/lib/kernel/test/logger_formatter_SUITE.erl
+++ b/lib/kernel/test/logger_formatter_SUITE.erl
@@ -73,18 +73,19 @@ all() ->
default(_Config) ->
String1 = format(info,{"~p",[term]},#{},#{}),
ct:log(String1),
- [_Date,_Time,"info:\nterm\n"] = string:lexemes(String1," "),
+ [_Date,_Time,"info:","term\n"] = string:lexemes(String1," "),
Time = timestamp(),
ExpectedTimestamp = default_time_format(Time),
String2 = format(info,{"~p",[term]},#{time=>Time},#{}),
ct:log(String2),
- " info:\nterm\n" = string:prefix(String2,ExpectedTimestamp),
+ " info: term\n" = string:prefix(String2,ExpectedTimestamp),
ok.
legacy_header(_Config) ->
Time = timestamp(),
- String1 = format(info,{"~p",[term]},#{time=>Time},#{legacy_header=>true}),
+ String1 = format(info,{"~p",[term]},#{time=>Time},#{legacy_header=>true,
+ single_line=>false}),
ct:log(String1),
"=INFO REPORT==== "++Rest = String1,
[Timestamp,"\nterm\n"] = string:lexemes(Rest," ="),
@@ -98,12 +99,14 @@ legacy_header(_Config) ->
true = lists:member(M,["Jan","Feb","Mar","Apr","May","Jun",
"Jul","Aug","Sep","Oct","Nov","Dec"]),
- String2 = format(info,{"~p",[term]},#{time=>Time},#{legacy_header=>false}),
+ String2 = format(info,{"~p",[term]},#{time=>Time},#{legacy_header=>false,
+ single_line=>false}),
ct:log(String2),
ExpectedTimestamp = default_time_format(Time),
" info:\nterm\n" = string:prefix(String2,ExpectedTimestamp),
- String3 = format(info,{"~p",[term]},#{time=>Time},#{legacy_header=>bad}),
+ String3 = format(info,{"~p",[term]},#{time=>Time},#{legacy_header=>bad,
+ single_line=>false}),
ct:log(String3),
String3 = String2,
@@ -114,7 +117,8 @@ legacy_header(_Config) ->
String4 = String1,
String5 = format(info,{"~p",[term]},#{}, % <--- no time
- #{legacy_header=>true}),
+ #{legacy_header=>true,
+ single_line=>false}),
ct:log(String5),
"=INFO REPORT==== "++_ = String5,
ok.
@@ -289,38 +293,36 @@ report_cb(_Config) ->
ok.
max_size(_Config) ->
- Template = [msg],
+ Cfg = #{template=>[msg],
+ single_line=>false},
"12345678901234567890" =
- format(info,{"12345678901234567890",[]},#{},#{template=>Template}),
+ format(info,{"12345678901234567890",[]},#{},Cfg),
application:set_env(kernel,logger_max_size,11),
"12345678901234567890" = % min value is 50, so this is not limited
- format(info,{"12345678901234567890",[]},#{},#{template=>Template}),
+ format(info,{"12345678901234567890",[]},#{},Cfg),
"12345678901234567890123456789012345678901234567..." = % 50
format(info,
{"123456789012345678901234567890123456789012345678901234567890",
[]},
#{},
- #{template=>Template}),
+ Cfg),
application:set_env(kernel,logger_max_size,53),
"12345678901234567890123456789012345678901234567890..." = %53
format(info,
{"123456789012345678901234567890123456789012345678901234567890",
[]},
#{},
- #{template=>Template}),
+ Cfg),
"123456789012..." =
- format(info,{"12345678901234567890",[]},#{},#{template=>Template,
- max_size=>15}),
+ format(info,{"12345678901234567890",[]},#{},Cfg#{max_size=>15}),
"12345678901234567890" =
- format(info,{"12345678901234567890",[]},#{},#{template=>Template,
- max_size=>unlimited}),
+ format(info,{"12345678901234567890",[]},#{},Cfg#{max_size=>unlimited}),
%% Check that one newline at the end of the line is kept (if it exists)
"12345678901...\n" =
- format(info,{"12345678901234567890\n",[]},#{},#{template=>Template,
- max_size=>15}),
+ format(info,{"12345678901234567890\n",[]},#{},Cfg#{max_size=>15}),
"12345678901...\n" =
- format(info,{"12345678901234567890",[]},#{},#{template=>[msg,"\n"],
- max_size=>15}),
+ format(info,{"12345678901234567890",[]},#{},Cfg#{template=>[msg,"\n"],
+ max_size=>15}),
ok.
max_size(cleanup,_Config) ->
application:unset_env(kernel,logger_max_size),
@@ -441,20 +443,20 @@ format_time(_Config) ->
ExpectedTimestamp1 = default_time_format(Time1),
String1 = format(info,{"~p",[term]},#{time=>Time1},#{}),
ct:log(String1),
- " info:\nterm\n" = string:prefix(String1,ExpectedTimestamp1),
+ " info: term\n" = string:prefix(String1,ExpectedTimestamp1),
Time2 = timestamp(),
ExpectedTimestamp2 = default_time_format(Time2,true),
String2 = format(info,{"~p",[term]},#{time=>Time2},#{utc=>true}),
ct:log(String2),
- " info:\nterm\n" = string:prefix(String2,ExpectedTimestamp2),
+ " info: term\n" = string:prefix(String2,ExpectedTimestamp2),
application:set_env(kernel,logger_utc,true),
Time3 = timestamp(),
ExpectedTimestamp3 = default_time_format(Time3,true),
String3 = format(info,{"~p",[term]},#{time=>Time3},#{}),
ct:log(String3),
- " info:\nterm\n" = string:prefix(String3,ExpectedTimestamp3),
+ " info: term\n" = string:prefix(String3,ExpectedTimestamp3),
ok.
diff --git a/lib/kernel/test/logger_std_h_SUITE.erl b/lib/kernel/test/logger_std_h_SUITE.erl
index e940e0a026..7c8d63cbbd 100644
--- a/lib/kernel/test/logger_std_h_SUITE.erl
+++ b/lib/kernel/test/logger_std_h_SUITE.erl
@@ -499,86 +499,79 @@ filesync(Config) ->
#{logger_std_h => #{type => Type},
filter_default=>log,
filters=>?DEFAULT_HANDLER_FILTERS([?MODULE]),
- formatter=>{?MODULE,self()}}),
- Tester = self(),
- TraceFun = fun({trace,_,call,{Mod,Func,Details}}, Pid) ->
- Pid ! {trace,Mod,Func,Details},
- Pid;
- ({trace,TPid,'receive',Received}, Pid) ->
- Pid ! {trace,TPid,Received},
- Pid
- end,
- {ok,_} = dbg:tracer(process, {TraceFun, Tester}),
- FileCtrlPid = maps:get(file_ctrl_pid , logger_std_h:info(?MODULE)),
- {ok,_} = dbg:p(FileCtrlPid, [c]),
- {ok,_} = dbg:tpl(logger_std_h, write_to_dev, 5, []),
- {ok,_} = dbg:tpl(logger_std_h, sync_dev, 4, []),
- {ok,_} = dbg:tp(file, datasync, 1, []),
+ formatter=>{?MODULE,nl}}),
+
+ %% check repeated filesync happens
+ start_tracer([{logger_std_h, write_to_dev, 5},
+ {logger_std_h, sync_dev, 4},
+ {file, datasync, 1}],
+ [{logger_std_h, write_to_dev, <<"first\n">>},
+ {logger_std_h, sync_dev},
+ {file,datasync}]),
logger:info("first", ?domain),
%% wait for automatic filesync
- timer:sleep(?FILESYNC_REP_INT),
- Expected1 = [{log,"first"}, {trace,logger_std_h,write_to_dev},
- {trace,logger_std_h,sync_dev}, {trace,file,datasync}],
-
+ check_tracer(?FILESYNC_REP_INT*2),
+
+ %% check that explicit filesync is only done once
+ start_tracer([{logger_std_h, write_to_dev, 5},
+ {logger_std_h, sync_dev, 4},
+ {file, datasync, 1}],
+ [{logger_std_h, write_to_dev, <<"second\n">>},
+ {logger_std_h, sync_dev},
+ {file,datasync},
+ {no_more,500}
+ ]),
logger:info("second", ?domain),
%% do explicit filesync
logger_std_h:filesync(?MODULE),
%% a second filesync should be ignored
logger_std_h:filesync(?MODULE),
- Expected2 = [{log,"second"}, {trace,logger_std_h,write_to_dev},
- {trace,logger_std_h,sync_dev}, {trace,file,datasync}],
+ check_tracer(100),
%% check that if there's no repeated filesync active,
%% a filesync is still performed when handler goes idle
logger:set_handler_config(?MODULE, logger_std_h,
#{filesync_repeat_interval => no_repeat}),
no_repeat = maps:get(filesync_repeat_interval, logger_std_h:info(?MODULE)),
+ %% The following timer is to make sure the time from last log
+ %% ("second") to next ("third") is long enough, so the a flush is
+ %% triggered by the idle timeout between "thrid" and "fourth".
+ timer:sleep(?IDLE_DETECT_TIME_MSEC*2),
+ start_tracer([{logger_std_h, write_to_dev, 5},
+ {logger_std_h, sync_dev, 4},
+ {file, datasync, 1}],
+ [{logger_std_h, write_to_dev, <<"third\n">>},
+ {logger_std_h, sync_dev},
+ {file,datasync},
+ {logger_std_h, write_to_dev, <<"fourth\n">>},
+ {logger_std_h, sync_dev},
+ {file,datasync}]),
logger:info("third", ?domain),
+ %% wait for automatic filesync
timer:sleep(?IDLE_DETECT_TIME_MSEC*2),
logger:info("fourth", ?domain),
%% wait for automatic filesync
- timer:sleep(?IDLE_DETECT_TIME_MSEC*2),
- Expected3 = [{log,"third"}, {trace,logger_std_h,write_to_dev},
- {log,"fourth"}, {trace,logger_std_h,write_to_dev},
- {trace,logger_std_h,sync_dev}, {trace,file,datasync}],
-
- dbg:stop_clear(),
-
- %% verify that filesync has been performed as expected
- Received1 = lists:map(fun({trace,M,F,_}) -> {trace,M,F};
- (Other) -> Other
- end, test_server:messages_get()),
- ct:pal("Trace #1 =~n~p", [Received1]),
- Received1 = Expected1 ++ Expected2 ++ Expected3,
-
- try_read_file(Log, {ok,<<"first\nsecond\nthird\nfourth\n">>}, 1000),
-
- {ok,_} = dbg:tracer(process, {TraceFun, Tester}),
- {ok,_} = dbg:p(whereis(?MODULE), [c]),
- {ok,_} = dbg:tpl(logger_std_h, handle_cast, 2, []),
+ check_tracer(?IDLE_DETECT_TIME_MSEC*2),
%% switch repeated filesync on and verify that the looping works
SyncInt = 1000,
WaitT = 4500,
+ OneSync = {logger_std_h,handle_cast,repeated_filesync},
+ %% receive 1 initial repeated_filesync, then 1 per sec
+ start_tracer([{logger_std_h,handle_cast,2}],
+ [OneSync || _ <- lists:seq(1, 1 + trunc(WaitT/SyncInt))]),
+
logger:set_handler_config(?MODULE, logger_std_h,
#{filesync_repeat_interval => SyncInt}),
SyncInt = maps:get(filesync_repeat_interval, logger_std_h:info(?MODULE)),
timer:sleep(WaitT),
logger:set_handler_config(?MODULE, logger_std_h,
- #{filesync_repeat_interval => no_repeat}),
- dbg:stop_clear(),
-
- Received2 = lists:map(fun({trace,_M,handle_cast,[{Op,_},_]}) -> {trace,Op};
- (Other) -> Other
- end, test_server:messages_get()),
- ct:pal("Trace #2 =~n~p", [Received2]),
- OneSync = [{trace,repeated_filesync}],
- %% receive 1 initial repeated_filesync, then 1 per sec
- Received2 =
- lists:flatten([OneSync || _ <- lists:seq(1, 1 + trunc(WaitT/SyncInt))]),
+ #{filesync_repeat_interval => no_repeat}),
+ check_tracer(100),
ok.
filesync(cleanup, _Config) ->
+ dbg:stop_clear(),
logger:remove_handler(?MODULE).
write_failure(Config) ->
@@ -785,7 +778,7 @@ op_switch_to_drop_tty(cleanup, _Config) ->
ok = stop_handler(?MODULE).
op_switch_to_flush_file() ->
- [{timetrap,{seconds,60}}].
+ [{timetrap,{minutes,3}}].
op_switch_to_flush_file(Config) ->
{Log,HConfig,StdHConfig} = start_handler(?MODULE, ?FUNCTION_NAME, Config),
@@ -1052,7 +1045,7 @@ restart_after(cleanup, _Config) ->
%% during high load to verify that sync, dropping and flushing is
%% handled correctly.
handler_requests_under_load() ->
- [{timetrap,{seconds,60}}].
+ [{timetrap,{minutes,3}}].
handler_requests_under_load(Config) ->
{Log,HConfig,StdHConfig} =
start_handler(?MODULE, ?FUNCTION_NAME, Config),
@@ -1394,3 +1387,67 @@ analyse(Msgs) ->
From ! {result,self(),TestFun(Msgs)},
analyse(Msgs)
end.
+
+start_tracer(Trace,Expected) ->
+ Pid = self(),
+ FileCtrlPid = maps:get(file_ctrl_pid, logger_std_h:info(?MODULE)),
+ dbg:tracer(process,{fun tracer/2,{Pid,Expected}}),
+ dbg:p(whereis(?MODULE),[c]),
+ dbg:p(FileCtrlPid,[c]),
+ tpl(Trace),
+ ok.
+
+tpl([{M,F,A}|Trace]) ->
+ {ok,Match} = dbg:tpl(M,F,A,[]),
+ case lists:keyfind(matched,1,Match) of
+ {_,_,1} ->
+ ok;
+ _ ->
+ dbg:stop_clear(),
+ throw({skip,"Can't trace "++atom_to_list(M)++":"++
+ atom_to_list(F)++"/"++integer_to_list(A)})
+ end,
+ tpl(Trace);
+tpl([]) ->
+ ok.
+
+tracer({trace,_,call,{logger_std_h,handle_cast,[{Op,_}|_]}},
+ {Pid,[{Mod,Func,Op}|Expected]}) ->
+ maybe_tracer_done(Pid,Expected,{Mod,Func,Op});
+tracer({trace,_,call,{Mod=logger_std_h,Func=write_to_dev,[_,Data,_,_,_]}},
+ {Pid,[{Mod,Func,Data}|Expected]}) ->
+ maybe_tracer_done(Pid,Expected,{Mod,Func,Data});
+tracer({trace,_,call,{Mod,Func,_}}, {Pid,[{Mod,Func}|Expected]}) ->
+ maybe_tracer_done(Pid,Expected,{Mod,Func});
+tracer({trace,_,call,Call}, {Pid,Expected}) ->
+ ct:log("Tracer got unexpected: ~p~nExpected: ~p~n",[Call,Expected]),
+ Pid ! {tracer_got_unexpected,Call,Expected},
+ {Pid,Expected}.
+
+maybe_tracer_done(Pid,[]=Expected,Got) ->
+ ct:log("Tracer got: ~p~n",[Got]),
+ Pid ! {tracer_done,0},
+ {Pid,Expected};
+maybe_tracer_done(Pid,[{no_more,T}]=Expected,Got) ->
+ ct:log("Tracer got: ~p~n",[Got]),
+ Pid ! {tracer_done,T},
+ {Pid,Expected};
+maybe_tracer_done(Pid,Expected,Got) ->
+ ct:log("Tracer got: ~p~n",[Got]),
+ {Pid,Expected}.
+
+check_tracer(T) ->
+ check_tracer(T,fun() -> ct:fail({timeout,tracer}) end).
+check_tracer(T,TimeoutFun) ->
+ receive
+ {tracer_done,Delay} ->
+ %% Possibly wait Delay ms to check that no unexpected
+ %% traces are received
+ check_tracer(Delay,fun() -> ok end);
+ {tracer_got_unexpected,Got,Expected} ->
+ dbg:stop_clear(),
+ ct:fail({tracer_got_unexpected,Got,Expected})
+ after T ->
+ dbg:stop_clear(),
+ TimeoutFun()
+ end.
diff --git a/lib/kernel/test/os_SUITE.erl b/lib/kernel/test/os_SUITE.erl
index 591fbb2125..abbc301360 100644
--- a/lib/kernel/test/os_SUITE.erl
+++ b/lib/kernel/test/os_SUITE.erl
@@ -227,8 +227,8 @@ find_executable(Config) when is_list(Config) ->
DataDir = proplists:get_value(data_dir, Config),
%% Smoke test.
- case lib:progname() of
- erl ->
+ case ct:get_progname() of
+ "erl" ->
ErlPath = os:find_executable("erl"),
true = is_list(ErlPath),
true = filelib:is_regular(ErlPath);
@@ -388,7 +388,7 @@ comp(Expected, Got) ->
ct:fail(failed)
end.
-%% Like lib:nonl/1, but strips \r as well as \n.
+%% strips \n and \r\n from end of string
strip_nl([$\r, $\n]) -> [];
strip_nl([$\n]) -> [];
diff --git a/lib/observer/src/observer_lib.erl b/lib/observer/src/observer_lib.erl
index 0678b64134..718ef91942 100644
--- a/lib/observer/src/observer_lib.erl
+++ b/lib/observer/src/observer_lib.erl
@@ -682,7 +682,7 @@ parse_string(Str) ->
{error, {_SLine, SMod, SError}, _} ->
throw(io_lib:format("~ts", [SMod:format_error(SError)]))
end,
- case lib:extended_parse_term(Tokens) of
+ case erl_eval:extended_parse_term(Tokens) of
{error, {_PLine, PMod, PError}} ->
throw(io_lib:format("~ts", [PMod:format_error(PError)]));
Res -> Res
diff --git a/lib/parsetools/src/yecc.erl b/lib/parsetools/src/yecc.erl
index b4e1cfe5e3..ce1b9468fd 100644
--- a/lib/parsetools/src/yecc.erl
+++ b/lib/parsetools/src/yecc.erl
@@ -455,10 +455,14 @@ os_process_size() ->
case os:type() of
{unix, sunos} ->
Size = os:cmd("ps -o vsz -p " ++ os:getpid() ++ " | tail -1"),
- list_to_integer(lib:nonl(Size));
+ list_to_integer(nonl(Size));
_ ->
0
- end.
+ end.
+
+nonl([$\n]) -> [];
+nonl([]) -> [];
+nonl([H|T]) -> [H|nonl(T)].
timeit(Name, Fun, St0) ->
Time = runtime,
diff --git a/lib/sasl/src/sasl.erl b/lib/sasl/src/sasl.erl
index 657eb6688a..2bf11bdcdf 100644
--- a/lib/sasl/src/sasl.erl
+++ b/lib/sasl/src/sasl.erl
@@ -130,6 +130,7 @@ add_sasl_logger(undefined, _Level) -> ok;
add_sasl_logger(std, undefined) -> ok;
add_sasl_logger(Dest, Level) ->
FC0 = #{legacy_header=>true,
+ single_line=>false,
template=>[{logger_formatter,header},"\n",msg,"\n"]},
FC = case application:get_env(sasl,utc_log) of
{ok,Bool} when is_boolean(Bool) ->
diff --git a/lib/ssh/doc/src/notes.xml b/lib/ssh/doc/src/notes.xml
index d78309167a..d0ed674eee 100644
--- a/lib/ssh/doc/src/notes.xml
+++ b/lib/ssh/doc/src/notes.xml
@@ -30,6 +30,28 @@
<file>notes.xml</file>
</header>
+<section><title>Ssh 4.6.9</title>
+
+ <section><title>Fixed Bugs and Malfunctions</title>
+ <list>
+ <item>
+ <p>
+ Host key hash erroneously calculated for clients
+ following draft-00 of RFC 4419, for example PuTTY</p>
+ <p>
+ Own Id: OTP-15064</p>
+ </item>
+ <item>
+ <p>
+ Renegotiation could fail in some states</p>
+ <p>
+ Own Id: OTP-15066</p>
+ </item>
+ </list>
+ </section>
+
+</section>
+
<section><title>Ssh 4.6.8</title>
<section><title>Fixed Bugs and Malfunctions</title>
<list>
@@ -487,6 +509,34 @@
</section>
+<section><title>Ssh 4.4.2.4</title>
+
+ <section><title>Fixed Bugs and Malfunctions</title>
+ <list>
+ <item>
+ <p>
+ Fix rare spurios shutdowns of ssh servers when receiveing
+ <c>{'EXIT',_,normal}</c> messages.</p>
+ <p>
+ Own Id: OTP-15018</p>
+ </item>
+ <item>
+ <p>
+ Host key hash erroneously calculated for clients
+ following draft-00 of RFC 4419, for example PuTTY</p>
+ <p>
+ Own Id: OTP-15064</p>
+ </item>
+ <item>
+ <p>
+ Renegotiation could fail in some states</p>
+ <p>
+ Own Id: OTP-15066</p>
+ </item>
+ </list>
+ </section>
+
+</section>
<section><title>Ssh 4.4.2.3</title>
<section><title>Fixed Bugs and Malfunctions</title>
diff --git a/lib/ssh/doc/src/ssh.xml b/lib/ssh/doc/src/ssh.xml
index 0223831cb1..407956cc6f 100644
--- a/lib/ssh/doc/src/ssh.xml
+++ b/lib/ssh/doc/src/ssh.xml
@@ -762,9 +762,23 @@
<datatype>
<name name="rekey_limit_common_option"/>
+ <name name="limit_bytes"/>
+ <name name="limit_time"/>
<desc>
- <p>Sets a limit, in bytes, when rekeying is to be initiated.
- Defaults to once per each GB and once per hour.</p>
+ <p>Sets the limit when rekeying is to be initiated. Both the max time and max amount of data
+ could be configured:
+ </p>
+ <list>
+ <item><c>{Minutes, Bytes}</c> initiate rekeying when any of the limits are reached.</item>
+ <item><c>Bytes</c> initiate rekeying when <c>Bytes</c> number of bytes are transferred,
+ or at latest after one hour.</item>
+ </list>
+ <p>When a rekeying is done, both the timer and the byte counter are restarted.
+ Defaults to one hour and one GByte.</p>
+ <p>If <c>Minutes</c> is set to <c>infinity</c>, no rekeying will ever occur due to that max time has passed.
+ Setting <c>Bytes</c> to <c>infinity</c> will inhibit rekeying after a certain amount of data has been transferred.
+ If the option value is set to <c>{infinity, infinity}</c>, no rekeying will be initiated. Note that rekeying initiated
+ by the peer will still be performed.</p>
</desc>
</datatype>
diff --git a/lib/ssh/src/ssh.hrl b/lib/ssh/src/ssh.hrl
index a3d9a1b1cb..2efd239aae 100644
--- a/lib/ssh/src/ssh.hrl
+++ b/lib/ssh/src/ssh.hrl
@@ -29,7 +29,6 @@
-define(SSH_DEFAULT_PORT, 22).
-define(SSH_MAX_PACKET_SIZE, (256*1024)).
--define(REKEY_TIMOUT, 3600000).
-define(REKEY_DATA_TIMOUT, 60000).
-define(DEFAULT_PROFILE, default).
@@ -192,7 +191,12 @@
-type user_dir_common_option() :: {user_dir, false | string()}.
-type profile_common_option() :: {profile, atom() }.
-type max_idle_time_common_option() :: {idle_time, timeout()}.
--type rekey_limit_common_option() :: {rekey_limit, non_neg_integer() }.
+-type rekey_limit_common_option() :: {rekey_limit, Bytes::limit_bytes() |
+ {Minutes::limit_time(), Bytes::limit_bytes()}
+ }.
+
+-type limit_bytes() :: non_neg_integer() | infinity . % non_neg_integer due to compatibility
+-type limit_time() :: pos_integer() | infinity .
-type key_cb_common_option() :: {key_cb, Module::atom() | {Module::atom(),Opts::[term()]} } .
-type disconnectfun_common_option() ::
diff --git a/lib/ssh/src/ssh_connection_handler.erl b/lib/ssh/src/ssh_connection_handler.erl
index 57641cf74c..f1ff3a70e2 100644
--- a/lib/ssh/src/ssh_connection_handler.erl
+++ b/lib/ssh/src/ssh_connection_handler.erl
@@ -71,7 +71,7 @@
-export([init_connection_handler/3, % proc_lib:spawn needs this
init_ssh_record/3, % Export of this internal function
% intended for low-level protocol test suites
- renegotiate/1, renegotiate_data/1, alg/1 % Export intended for test cases
+ renegotiate/1, alg/1 % Export intended for test cases
]).
-export([dbg_trace/3]).
@@ -325,14 +325,7 @@ close(ConnectionHandler, ChannelId) ->
) -> ok.
%% . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
renegotiate(ConnectionHandler) ->
- cast(ConnectionHandler, renegotiate).
-
-%%--------------------------------------------------------------------
--spec renegotiate_data(connection_ref()
- ) -> ok.
-%% . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
-renegotiate_data(ConnectionHandler) ->
- cast(ConnectionHandler, data_size).
+ cast(ConnectionHandler, force_renegotiate).
%%--------------------------------------------------------------------
alg(ConnectionHandler) ->
@@ -349,11 +342,6 @@ alg(ConnectionHandler) ->
connection_state :: #connection{},
latest_channel_id = 0 :: non_neg_integer()
| undefined,
- idle_timer_ref :: undefined
- | infinity
- | reference(),
- idle_timer_value = infinity :: infinity
- | pos_integer(),
transport_protocol :: atom()
| undefined, % ex: tcp
transport_cb :: atom()
@@ -429,20 +417,16 @@ init([Role,Socket,Opts]) ->
},
D = case Role of
client ->
- %% Start the renegotiation timers
- timer:apply_after(?REKEY_TIMOUT, gen_statem, cast, [self(), renegotiate]),
- timer:apply_after(?REKEY_DATA_TIMOUT, gen_statem, cast, [self(), data_size]),
- cache_init_idle_timer(D0);
+ D0;
server ->
Sups = ?GET_INTERNAL_OPT(supervisors, Opts),
- cache_init_idle_timer(
- D0#data{connection_state =
- C#connection{cli_spec = ?GET_OPT(ssh_cli, Opts, {ssh_cli,[?GET_OPT(shell, Opts)]}),
- exec = ?GET_OPT(exec, Opts),
- system_supervisor = proplists:get_value(system_sup, Sups),
- sub_system_supervisor = proplists:get_value(subsystem_sup, Sups),
- connection_supervisor = proplists:get_value(connection_sup, Sups)
- }})
+ D0#data{connection_state =
+ C#connection{cli_spec = ?GET_OPT(ssh_cli, Opts, {ssh_cli,[?GET_OPT(shell, Opts)]}),
+ exec = ?GET_OPT(exec, Opts),
+ system_supervisor = proplists:get_value(system_sup, Sups),
+ sub_system_supervisor = proplists:get_value(subsystem_sup, Sups),
+ connection_supervisor = proplists:get_value(connection_sup, Sups)
+ }}
end,
{ok, {hello,Role}, D};
@@ -544,7 +528,7 @@ role({_,Role}) -> Role;
role({_,Role,_}) -> Role.
-spec renegotiation(state_name()) -> boolean().
-renegotiation({_,_,ReNeg}) -> ReNeg == renegotiation;
+renegotiation({_,_,ReNeg}) -> ReNeg == renegotiate;
renegotiation(_) -> false.
@@ -558,10 +542,15 @@ renegotiation(_) -> false.
#data{}
) -> gen_statem:event_handler_result(state_name()) .
+-define(CONNECTION_MSG(Msg),
+ [{next_event, internal, prepare_next_packet},
+ {next_event,internal,{conn_msg,Msg}}]).
+
%% . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
callback_mode() ->
- handle_event_function.
+ [handle_event_function,
+ state_enter].
handle_event(_, _Event, {init_error,Error}=StateName, D) ->
@@ -1016,95 +1005,92 @@ handle_event(_, #ssh_msg_debug{} = Msg, _, D) ->
debug_fun(Msg, D),
keep_state_and_data;
-handle_event(internal, Msg=#ssh_msg_global_request{}, StateName, D) ->
- handle_connection_msg(Msg, StateName, D);
-
-handle_event(internal, Msg=#ssh_msg_request_success{}, StateName, D) ->
- handle_connection_msg(Msg, StateName, D);
-
-handle_event(internal, Msg=#ssh_msg_request_failure{}, StateName, D) ->
- handle_connection_msg(Msg, StateName, D);
-
-handle_event(internal, Msg=#ssh_msg_channel_open{}, StateName, D) ->
- handle_connection_msg(Msg, StateName, D);
-
-handle_event(internal, Msg=#ssh_msg_channel_open_confirmation{}, StateName, D) ->
- handle_connection_msg(Msg, StateName, D);
-
-handle_event(internal, Msg=#ssh_msg_channel_open_failure{}, StateName, D) ->
- handle_connection_msg(Msg, StateName, D);
-
-handle_event(internal, Msg=#ssh_msg_channel_window_adjust{}, StateName, D) ->
- handle_connection_msg(Msg, StateName, D);
-
-handle_event(internal, Msg=#ssh_msg_channel_data{}, StateName, D) ->
- handle_connection_msg(Msg, StateName, D);
-
-handle_event(internal, Msg=#ssh_msg_channel_extended_data{}, StateName, D) ->
- handle_connection_msg(Msg, StateName, D);
-
-handle_event(internal, Msg=#ssh_msg_channel_eof{}, StateName, D) ->
- handle_connection_msg(Msg, StateName, D);
-
-handle_event(internal, Msg=#ssh_msg_channel_close{}, {connected,server} = StateName, D) ->
- handle_connection_msg(Msg, StateName, cache_request_idle_timer_check(D));
-
-handle_event(internal, Msg=#ssh_msg_channel_close{}, StateName, D) ->
- handle_connection_msg(Msg, StateName, D);
+handle_event(internal, {conn_msg,Msg}, StateName, #data{starter = User,
+ connection_state = Connection0,
+ event_queue = Qev0} = D0) ->
+ Role = role(StateName),
+ Rengotation = renegotiation(StateName),
+ try ssh_connection:handle_msg(Msg, Connection0, Role) of
+ {disconnect, Reason0, RepliesConn} ->
+ {Repls, D} = send_replies(RepliesConn, D0),
+ case {Reason0,Role} of
+ {{_, Reason}, client} when ((StateName =/= {connected,client})
+ and (not Rengotation)) ->
+ User ! {self(), not_connected, Reason};
+ _ ->
+ ok
+ end,
+ {stop_and_reply, {shutdown,normal}, Repls, D};
-handle_event(internal, Msg=#ssh_msg_channel_request{}, StateName, D) ->
- handle_connection_msg(Msg, StateName, D);
+ {Replies, Connection} when is_list(Replies) ->
+ {Repls, D} =
+ case StateName of
+ {connected,_} ->
+ send_replies(Replies, D0#data{connection_state=Connection});
+ _ ->
+ {ConnReplies, NonConnReplies} = lists:splitwith(fun not_connected_filter/1, Replies),
+ send_replies(NonConnReplies, D0#data{event_queue = Qev0 ++ ConnReplies})
+ end,
+ case {Msg, StateName} of
+ {#ssh_msg_channel_close{}, {connected,_}} ->
+ {keep_state, D, [cond_set_idle_timer(D)|Repls]};
+ {#ssh_msg_channel_success{}, _} ->
+ update_inet_buffers(D#data.socket),
+ {keep_state, D, Repls};
+ _ ->
+ {keep_state, D, Repls}
+ end
-handle_event(internal, Msg=#ssh_msg_channel_success{}, StateName, D) ->
- update_inet_buffers(D#data.socket),
- handle_connection_msg(Msg, StateName, D);
+ catch
+ Class:Error ->
+ {Repls, D1} = send_replies(ssh_connection:handle_stop(Connection0), D0),
+ {Shutdown, D} = ?send_disconnect(?SSH_DISCONNECT_BY_APPLICATION,
+ io_lib:format("Internal error: ~p:~p",[Class,Error]),
+ StateName, D1),
+ {stop_and_reply, Shutdown, Repls, D}
+ end;
-handle_event(internal, Msg=#ssh_msg_channel_failure{}, StateName, D) ->
- handle_connection_msg(Msg, StateName, D);
+handle_event(enter, _OldState, {connected,_}=State, D) ->
+ %% Entering the state where re-negotiation is possible
+ init_renegotiate_timers(State, D);
+
+handle_event(enter, _OldState, {ext_info,_,renegotiate}=State, D) ->
+ %% Could be hanging in exit_info state if nothing else arrives
+ init_renegotiate_timers(State, D);
+
+handle_event(enter, {connected,_}, State, D) ->
+ %% Exiting the state where re-negotiation is possible
+ pause_renegotiate_timers(State, D);
+
+handle_event(cast, force_renegotiate, StateName, D) ->
+ handle_event({timeout,renegotiate}, undefined, StateName, D);
+
+handle_event({timeout,renegotiate}, _, StateName, D0) ->
+ case StateName of
+ {connected,Role} ->
+ start_rekeying(Role, D0);
+ {ext_info,Role,renegotiate} ->
+ start_rekeying(Role, D0);
+ _ ->
+ %% Wrong state for starting a renegotiation, must be in re-negotiation
+ keep_state_and_data
+ end;
-handle_event(cast, renegotiate, {connected,Role}, D) ->
- {KeyInitMsg, SshPacket, Ssh} = ssh_transport:key_exchange_init_msg(D#data.ssh_params),
- send_bytes(SshPacket, D),
- timer:apply_after(?REKEY_TIMOUT, gen_statem, cast, [self(), renegotiate]),
- {next_state, {kexinit,Role,renegotiate}, D#data{ssh_params = Ssh,
- key_exchange_init_msg = KeyInitMsg}};
+handle_event({timeout,check_data_size}, _, StateName, D0) ->
+ %% Rekey due to sent data limit reached? (Can't be in {ext_info,...} if data is sent)
+ case StateName of
+ {connected,Role} ->
+ check_data_rekeying(Role, D0);
+ _ ->
+ %% Wrong state for starting a renegotiation, must be in re-negotiation
+ keep_state_and_data
+ end;
handle_event({call,From}, get_alg, _, D) ->
#ssh{algorithms=Algs} = D#data.ssh_params,
{keep_state_and_data, [{reply,From,Algs}]};
-handle_event(cast, renegotiate, _, _) ->
- %% Already in key-exchange so safe to ignore
- timer:apply_after(?REKEY_TIMOUT, gen_statem, cast, [self(), renegotiate]), % FIXME: not here in original
- keep_state_and_data;
-
-
-%% Rekey due to sent data limit reached?
-handle_event(cast, data_size, {connected,Role}, D) ->
- {ok, [{send_oct,Sent0}]} = inet:getstat(D#data.socket, [send_oct]),
- Sent = Sent0 - D#data.last_size_rekey,
- MaxSent = ?GET_OPT(rekey_limit, (D#data.ssh_params)#ssh.opts),
- timer:apply_after(?REKEY_DATA_TIMOUT, gen_statem, cast, [self(), data_size]),
- case Sent >= MaxSent of
- true ->
- {KeyInitMsg, SshPacket, Ssh} =
- ssh_transport:key_exchange_init_msg(D#data.ssh_params),
- send_bytes(SshPacket, D),
- {next_state, {kexinit,Role,renegotiate}, D#data{ssh_params = Ssh,
- key_exchange_init_msg = KeyInitMsg,
- last_size_rekey = Sent0}};
- _ ->
- keep_state_and_data
- end;
-
-handle_event(cast, data_size, _, _) ->
- %% Already in key-exchange so safe to ignore
- timer:apply_after(?REKEY_DATA_TIMOUT, gen_statem, cast, [self(), data_size]), % FIXME: not here in original
- keep_state_and_data;
-
-
-
handle_event(cast, _, StateName, _) when not ?CONNECTED(StateName) ->
{keep_state_and_data, [postpone]};
@@ -1218,7 +1204,7 @@ handle_event({call,From}, {request, ChannelPid, ChannelId, Type, Data, Timeout},
D ->
%% Note reply to channel will happen later when reply is recived from peer on the socket
start_channel_request_timer(ChannelId, From, Timeout),
- {keep_state, cache_request_idle_timer_check(D)}
+ {keep_state, D, cond_set_idle_timer(D)}
end;
handle_event({call,From}, {request, ChannelId, Type, Data, Timeout}, StateName, D0)
@@ -1229,7 +1215,7 @@ handle_event({call,From}, {request, ChannelId, Type, Data, Timeout}, StateName,
D ->
%% Note reply to channel will happen later when reply is recived from peer on the socket
start_channel_request_timer(ChannelId, From, Timeout),
- {keep_state, cache_request_idle_timer_check(D)}
+ {keep_state, D, cond_set_idle_timer(D)}
end;
handle_event({call,From}, {data, ChannelId, Type, Data, Timeout}, StateName, D0)
@@ -1270,7 +1256,7 @@ handle_event({call,From},
}),
D = add_request(true, ChannelId, From, D2),
start_channel_request_timer(ChannelId, From, Timeout),
- {keep_state, cache_cancel_idle_timer(D)};
+ {keep_state, D, cond_set_idle_timer(D)};
handle_event({call,From}, {send_window, ChannelId}, StateName, D)
when ?CONNECTED(StateName) ->
@@ -1300,7 +1286,7 @@ handle_event({call,From}, {close, ChannelId}, StateName, D0)
#channel{remote_id = Id} = Channel ->
D1 = send_msg(ssh_connection:channel_close_msg(Id), D0),
ssh_client_channel:cache_update(cache(D1), Channel#channel{sent_close = true}),
- {keep_state, cache_request_idle_timer_check(D1), [{reply,From,ok}]};
+ {keep_state, D1, [cond_set_idle_timer(D1), {reply,From,ok}]};
undefined ->
{keep_state_and_data, [{reply,From,ok}]}
end;
@@ -1316,6 +1302,7 @@ handle_event(info, {Proto, Sock, Info}, {hello,_}, #data{socket = Sock,
{keep_state_and_data, [{next_event, internal, {info_line,Info}}]}
end;
+
handle_event(info, {Proto, Sock, NewData}, StateName, D0 = #data{socket = Sock,
transport_protocol = Proto}) ->
try ssh_transport:handle_packet_part(
@@ -1333,13 +1320,29 @@ handle_event(info, {Proto, Sock, NewData}, StateName, D0 = #data{socket = Sock,
try
ssh_message:decode(set_kex_overload_prefix(DecryptedBytes,D1))
of
- Msg = #ssh_msg_kexinit{} ->
+ #ssh_msg_kexinit{} = Msg ->
{keep_state, D1, [{next_event, internal, prepare_next_packet},
{next_event, internal, {Msg,DecryptedBytes}}
]};
+
+ #ssh_msg_global_request{} = Msg -> {keep_state, D1, ?CONNECTION_MSG(Msg)};
+ #ssh_msg_request_success{} = Msg -> {keep_state, D1, ?CONNECTION_MSG(Msg)};
+ #ssh_msg_request_failure{} = Msg -> {keep_state, D1, ?CONNECTION_MSG(Msg)};
+ #ssh_msg_channel_open{} = Msg -> {keep_state, D1, ?CONNECTION_MSG(Msg)};
+ #ssh_msg_channel_open_confirmation{} = Msg -> {keep_state, D1, ?CONNECTION_MSG(Msg)};
+ #ssh_msg_channel_open_failure{} = Msg -> {keep_state, D1, ?CONNECTION_MSG(Msg)};
+ #ssh_msg_channel_window_adjust{} = Msg -> {keep_state, D1, ?CONNECTION_MSG(Msg)};
+ #ssh_msg_channel_data{} = Msg -> {keep_state, D1, ?CONNECTION_MSG(Msg)};
+ #ssh_msg_channel_extended_data{} = Msg -> {keep_state, D1, ?CONNECTION_MSG(Msg)};
+ #ssh_msg_channel_eof{} = Msg -> {keep_state, D1, ?CONNECTION_MSG(Msg)};
+ #ssh_msg_channel_close{} = Msg -> {keep_state, D1, ?CONNECTION_MSG(Msg)};
+ #ssh_msg_channel_request{} = Msg -> {keep_state, D1, ?CONNECTION_MSG(Msg)};
+ #ssh_msg_channel_failure{} = Msg -> {keep_state, D1, ?CONNECTION_MSG(Msg)};
+ #ssh_msg_channel_success{} = Msg -> {keep_state, D1, ?CONNECTION_MSG(Msg)};
+
Msg ->
{keep_state, D1, [{next_event, internal, prepare_next_packet},
- {next_event, internal, Msg}
+ {next_event, internal, Msg}
]}
catch
C:E ->
@@ -1418,8 +1421,20 @@ handle_event(info, {timeout, {_, From} = Request}, _,
end;
%%% Handle that ssh channels user process goes down
-handle_event(info, {'DOWN', _Ref, process, ChannelPid, _Reason}, _, D0) ->
- {keep_state, handle_channel_down(ChannelPid, D0)};
+handle_event(info, {'DOWN', _Ref, process, ChannelPid, _Reason}, _, D) ->
+ Cache = cache(D),
+ ssh_client_channel:cache_foldl(
+ fun(#channel{user=U,
+ local_id=Id}, Acc) when U == ChannelPid ->
+ ssh_client_channel:cache_delete(Cache, Id),
+ Acc;
+ (_,Acc) ->
+ Acc
+ end, [], Cache),
+ {keep_state, D, cond_set_idle_timer(D)};
+
+handle_event({timeout,idle_time}, _Data, _StateName, _D) ->
+ {stop, {shutdown, "Timeout"}};
%%% So that terminate will be run when supervisor is shutdown
handle_event(info, {'EXIT', _Sup, Reason}, StateName, _) ->
@@ -1439,7 +1454,7 @@ handle_event(info, {'EXIT', _Sup, Reason}, StateName, _) ->
end;
handle_event(info, check_cache, _, D) ->
- {keep_state, cache_check_set_idle_timer(D)};
+ {keep_state, D, cond_set_idle_timer(D)};
handle_event(info, UnexpectedMessage, StateName, D = #data{ssh_params = Ssh}) ->
case unexpected_fun(UnexpectedMessage, D) of
@@ -1486,6 +1501,11 @@ handle_event(internal, {send_disconnect,Code,DetailedText,Module,Line}, StateNam
send_disconnect(Code, DetailedText, Module, Line, StateName, D0),
{stop, Shutdown, D};
+
+handle_event(enter, _OldState, State, D) ->
+ %% Just skip
+ {next_state, State, D};
+
handle_event(_Type, _Msg, {ext_info,Role,_ReNegFlag}, D) ->
%% If something else arrives, goto next state and handle the event in that one
{next_state, {connected,Role}, D, [postpone]};
@@ -1743,46 +1763,6 @@ call(FsmPid, Event, Timeout) ->
end.
-handle_connection_msg(Msg, StateName, D0 = #data{starter = User,
- connection_state = Connection0,
- event_queue = Qev0}) ->
- Renegotiation = renegotiation(StateName),
- Role = role(StateName),
- try ssh_connection:handle_msg(Msg, Connection0, Role) of
- {disconnect, Reason0, RepliesConn} ->
- {Repls, D} = send_replies(RepliesConn, D0),
- case {Reason0,Role} of
- {{_, Reason}, client} when ((StateName =/= {connected,client}) and (not Renegotiation)) ->
- User ! {self(), not_connected, Reason};
- _ ->
- ok
- end,
- {stop_and_reply, {shutdown,normal}, Repls, D};
-
- {[], Connection} ->
- {keep_state, D0#data{connection_state = Connection}};
-
- {Replies, Connection} when is_list(Replies) ->
- {Repls, D} =
- case StateName of
- {connected,_} ->
- send_replies(Replies, D0#data{connection_state=Connection});
- _ ->
- {ConnReplies, NonConnReplies} = lists:splitwith(fun not_connected_filter/1, Replies),
- send_replies(NonConnReplies, D0#data{event_queue = Qev0 ++ ConnReplies})
- end,
- {keep_state, D, Repls}
-
- catch
- Class:Error ->
- {Repls, D1} = send_replies(ssh_connection:handle_stop(Connection0), D0),
- {Shutdown, D} = ?send_disconnect(?SSH_DISCONNECT_BY_APPLICATION,
- io_lib:format("Internal error: ~p:~p",[Class,Error]),
- StateName, D1),
- {stop_and_reply, Shutdown, Repls, D}
- end.
-
-
set_kex_overload_prefix(Msg = <<?BYTE(Op),_/binary>>, #data{ssh_params=SshParams})
when Op == 30;
Op == 31
@@ -1888,19 +1868,6 @@ handle_request(ChannelId, Type, Data, WantReply, From, D) ->
end.
%%%----------------------------------------------------------------
-handle_channel_down(ChannelPid, D) ->
- Cache = cache(D),
- ssh_client_channel:cache_foldl(
- fun(#channel{user=U,
- local_id=Id}, Acc) when U == ChannelPid ->
- ssh_client_channel:cache_delete(Cache, Id),
- Acc;
- (_,Acc) ->
- Acc
- end, [], Cache),
- cache_check_set_idle_timer(D).
-
-
update_sys(Cache, Channel, Type, ChannelPid) ->
ssh_client_channel:cache_update(Cache,
Channel#channel{sys = Type, user = ChannelPid}).
@@ -1919,6 +1886,42 @@ new_channel_id(#data{connection_state = #connection{channel_id_seed = Id} =
{Id, State#data{connection_state =
Connection#connection{channel_id_seed = Id + 1}}}.
+
+%%%----------------------------------------------------------------
+start_rekeying(Role, D0) ->
+ {KeyInitMsg, SshPacket, Ssh} = ssh_transport:key_exchange_init_msg(D0#data.ssh_params),
+ send_bytes(SshPacket, D0),
+ D = D0#data{ssh_params = Ssh,
+ key_exchange_init_msg = KeyInitMsg},
+ {next_state, {kexinit,Role,renegotiate}, D}.
+
+
+init_renegotiate_timers(State, D) ->
+ {RekeyTimeout,_MaxSent} = ?GET_OPT(rekey_limit, (D#data.ssh_params)#ssh.opts),
+ {next_state, State, D, [{{timeout,renegotiate}, RekeyTimeout, none},
+ {{timeout,check_data_size}, ?REKEY_DATA_TIMOUT, none} ]}.
+
+
+pause_renegotiate_timers(State, D) ->
+ {next_state, State, D, [{{timeout,renegotiate}, infinity, none},
+ {{timeout,check_data_size}, infinity, none} ]}.
+
+check_data_rekeying(Role, D) ->
+ {ok, [{send_oct,SocketSentTotal}]} = inet:getstat(D#data.socket, [send_oct]),
+ SentSinceRekey = SocketSentTotal - D#data.last_size_rekey,
+ {_RekeyTimeout,MaxSent} = ?GET_OPT(rekey_limit, (D#data.ssh_params)#ssh.opts),
+ case check_data_rekeying_dbg(SentSinceRekey, MaxSent) of
+ true ->
+ start_rekeying(Role, D#data{last_size_rekey = SocketSentTotal});
+ _ ->
+ %% Not enough data sent for a re-negotiation. Restart timer.
+ {keep_state, D, {{timeout,check_data_size}, ?REKEY_DATA_TIMOUT, none}}
+ end.
+
+check_data_rekeying_dbg(SentSinceRekey, MaxSent) ->
+ %% This function is for the ssh_dbg to trace on. See dbg_trace/3 at the end.
+ SentSinceRekey >= MaxSent.
+
%%%----------------------------------------------------------------
%%% This server/client has decided to disconnect via the state machine:
%%% The unused arguments are for debugging.
@@ -2131,60 +2134,12 @@ retry_fun(User, Reason, #data{ssh_params = #ssh{opts = Opts,
%%% Cache idle timer that closes the connection if there are no
%%% channels open for a while.
-cache_init_idle_timer(D) ->
- case ?GET_OPT(idle_time, (D#data.ssh_params)#ssh.opts) of
- infinity ->
- D#data{idle_timer_value = infinity,
- idle_timer_ref = infinity % A flag used later...
- };
- IdleTime ->
- %% We dont want to set the timeout on first connect
- D#data{idle_timer_value = IdleTime}
- end.
-
-
-cache_check_set_idle_timer(D = #data{idle_timer_ref = undefined,
- idle_timer_value = IdleTime}) ->
- %% No timer set - shall we set one?
+cond_set_idle_timer(D) ->
case ssh_client_channel:cache_info(num_entries, cache(D)) of
- 0 when IdleTime == infinity ->
- %% No. Meaningless to set a timer that fires in an infinite time...
- D;
- 0 ->
- %% Yes, we'll set one since the cache is empty and it should not
- %% be that for a specified time
- D#data{idle_timer_ref =
- erlang:send_after(IdleTime, self(), {'EXIT',[],"Timeout"})};
- _ ->
- %% No - there are entries in the cache
- D
- end;
-cache_check_set_idle_timer(D) ->
- %% There is already a timer set or the timeout time is infinite
- D.
-
-
-cache_cancel_idle_timer(D) ->
- case D#data.idle_timer_ref of
- infinity ->
- %% The timer is not activated
- D;
- undefined ->
- %% The timer is already cancelled
- D;
- TimerRef ->
- %% The timer is active
- erlang:cancel_timer(TimerRef),
- D#data{idle_timer_ref = undefined}
+ 0 -> {{timeout,idle_time}, ?GET_OPT(idle_time, (D#data.ssh_params)#ssh.opts), none};
+ _ -> {{timeout,idle_time}, infinity, none}
end.
-
-cache_request_idle_timer_check(D = #data{idle_timer_value = infinity}) ->
- D;
-cache_request_idle_timer_check(D = #data{idle_timer_value = IdleTime}) ->
- erlang:send_after(IdleTime, self(), check_cache),
- D.
-
%%%----------------------------------------------------------------
start_channel_request_timer(_,_, infinity) ->
ok;
@@ -2245,7 +2200,7 @@ update_inet_buffers(Socket) ->
%%%# Tracing
%%%#
-dbg_trace(points, _, _) -> [terminate, disconnect, connections, connection_events];
+dbg_trace(points, _, _) -> [terminate, disconnect, connections, connection_events, renegotiation];
dbg_trace(flags, connections, A) -> [c] ++ dbg_trace(flags, terminate, A);
dbg_trace(on, connections, A) -> dbg:tp(?MODULE, init_connection_handler, 3, x),
@@ -2288,6 +2243,33 @@ dbg_trace(format, connection_events, {return_from, {?MODULE,handle_event,4}, Ret
io_lib:format("~p~n", [event_handler_result(Ret)])
];
+dbg_trace(flags, renegotiation, _) -> [c];
+dbg_trace(on, renegotiation, _) -> dbg:tpl(?MODULE, init_renegotiate_timers, 2, x),
+ dbg:tpl(?MODULE, pause_renegotiate_timers, 2, x),
+ dbg:tpl(?MODULE, check_data_rekeying_dbg, 2, x),
+ dbg:tpl(?MODULE, start_rekeying, 2, x);
+dbg_trace(off, renegotiation, _) -> dbg:ctpl(?MODULE, init_renegotiate_timers, 2),
+ dbg:ctpl(?MODULE, pause_renegotiate_timers, 2),
+ dbg:ctpl(?MODULE, check_data_rekeying_dbg, 2),
+ dbg:ctpl(?MODULE, start_rekeying, 2);
+dbg_trace(format, renegotiation, {call, {?MODULE,init_renegotiate_timers,[_State,D]}}) ->
+ ["Renegotiation init\n",
+ io_lib:format("rekey_limit: ~p ({ms,bytes})~ncheck_data_size: ~p (ms)~n",
+ [?GET_OPT(rekey_limit, (D#data.ssh_params)#ssh.opts),
+ ?REKEY_DATA_TIMOUT])
+ ];
+dbg_trace(format, renegotiation, {call, {?MODULE,pause_renegotiate_timers,[_State,_D]}}) ->
+ ["Renegotiation pause\n"];
+dbg_trace(format, renegotiation, {call, {?MODULE,start_rekeying,[_Role,_D]}}) ->
+ ["Renegotiation start rekeying\n"];
+dbg_trace(format, renegotiation, {call, {?MODULE,check_data_rekeying_dbg,[SentSinceRekey, MaxSent]}}) ->
+ ["Renegotiation check data sent\n",
+ io_lib:format("TotalSentSinceRekey: ~p~nMaxBeforeRekey: ~p~nStartRekey: ~p~n",
+ [SentSinceRekey, MaxSent, SentSinceRekey >= MaxSent])
+ ];
+
+
+
dbg_trace(flags, terminate, _) -> [c];
dbg_trace(on, terminate, _) -> dbg:tp(?MODULE, terminate, 3, x);
dbg_trace(off, terminate, _) -> dbg:ctpg(?MODULE, terminate, 3);
diff --git a/lib/ssh/src/ssh_options.erl b/lib/ssh/src/ssh_options.erl
index 4dd9082250..fe95d2ac54 100644
--- a/lib/ssh/src/ssh_options.erl
+++ b/lib/ssh/src/ssh_options.erl
@@ -599,9 +599,24 @@ default(common) ->
class => user_options
},
- {rekey_limit, def} => % FIXME: Why not common?
- #{default => 1024000000,
- chk => fun check_non_neg_integer/1,
+ {rekey_limit, def} =>
+ #{default => {3600000, 1024000000}, % {1 hour, 1 GB}
+ chk => fun({infinity, infinity}) ->
+ true;
+ ({Mins, infinity}) when is_integer(Mins), Mins>0 ->
+ {true, {Mins*60*1000, infinity}};
+ ({infinity, Bytes}) when is_integer(Bytes), Bytes>=0 ->
+ true;
+ ({Mins, Bytes}) when is_integer(Mins), Mins>0,
+ is_integer(Bytes), Bytes>=0 ->
+ {true, {Mins*60*1000, Bytes}};
+ (infinity) ->
+ {true, {3600000, infinity}};
+ (Bytes) when is_integer(Bytes), Bytes>=0 ->
+ {true, {3600000, Bytes}};
+ (_) ->
+ false
+ end,
class => user_options
},
diff --git a/lib/ssh/src/ssh_transport.erl b/lib/ssh/src/ssh_transport.erl
index f5bba9f824..631c4d0213 100644
--- a/lib/ssh/src/ssh_transport.erl
+++ b/lib/ssh/src/ssh_transport.erl
@@ -1808,9 +1808,10 @@ kex_alg_dependent({E, F, K}) ->
%% diffie-hellman and ec diffie-hellman (with E = Q_c, F = Q_s)
<<?Empint(E), ?Empint(F), ?Empint(K)>>;
-kex_alg_dependent({-1, _, -1, _, _, E, F, K}) ->
+kex_alg_dependent({-1, NBits, -1, Prime, Gen, E, F, K}) ->
%% ssh_msg_kex_dh_gex_request_old
- <<?Empint(E), ?Empint(F), ?Empint(K)>>;
+ <<?Euint32(NBits),
+ ?Empint(Prime), ?Empint(Gen), ?Empint(E), ?Empint(F), ?Empint(K)>>;
kex_alg_dependent({Min, NBits, Max, Prime, Gen, E, F, K}) ->
%% diffie-hellman group exchange
@@ -1849,9 +1850,6 @@ public_algo({#'ECPoint'{},{namedCurve,OID}}) ->
Curve = public_key:oid2ssh_curvename(OID),
list_to_atom("ecdsa-sha2-" ++ binary_to_list(Curve)).
-
-
-
sha('ssh-rsa') -> sha;
sha('rsa-sha2-256') -> sha256;
sha('rsa-sha2-384') -> sha384;
diff --git a/lib/ssh/test/ssh_basic_SUITE.erl b/lib/ssh/test/ssh_basic_SUITE.erl
index 1fa94bef11..807e23ff01 100644
--- a/lib/ssh/test/ssh_basic_SUITE.erl
+++ b/lib/ssh/test/ssh_basic_SUITE.erl
@@ -32,7 +32,7 @@
-define(NEWLINE, <<"\r\n">>).
--define(REKEY_DATA_TMO, 65000).
+-define(REKEY_DATA_TMO, 1 * 60000). % Should be multiples of 60000
%%--------------------------------------------------------------------
%% Common Test interface functions -----------------------------------
@@ -45,7 +45,6 @@ suite() ->
all() ->
[{group, all_tests}].
-
groups() ->
[{all_tests, [parallel], [{group, ssh_renegotiate_SUITE},
{group, ssh_basic_SUITE}
@@ -76,8 +75,17 @@ groups() ->
shell_exit_status
]},
- {ssh_renegotiate_SUITE, [parallel], [rekey,
- rekey_limit,
+ {ssh_renegotiate_SUITE, [parallel], [rekey0,
+ rekey1,
+ rekey2,
+ rekey3,
+ rekey4,
+ rekey_limit_client,
+ rekey_limit_daemon,
+ rekey_time_limit_client,
+ rekey_time_limit_daemon,
+ norekey_limit_client,
+ norekey_limit_daemon,
renegotiate1,
renegotiate2]},
@@ -1325,69 +1333,231 @@ shell_exit_status(Config) when is_list(Config) ->
ssh:stop_daemon(Pid).
+%%----------------------------------------------------------------------------
%%% Idle timeout test
-rekey() -> [{timetrap,{seconds,90}}].
+rekey0() -> [{timetrap,{seconds,90}}].
+rekey1() -> [{timetrap,{seconds,90}}].
+rekey2() -> [{timetrap,{seconds,90}}].
+rekey3() -> [{timetrap,{seconds,90}}].
+rekey4() -> [{timetrap,{seconds,90}}].
-rekey(Config) ->
- {Pid, Host, Port} =
- ssh_test_lib:std_daemon(Config,
- [{rekey_limit, 0}]),
- ConnectionRef =
- ssh_test_lib:std_connect(Config, Host, Port,
- [{rekey_limit, 0}]),
+rekey0(Config) -> rekey_chk(Config, 0, 0).
+rekey1(Config) -> rekey_chk(Config, infinity, 0).
+rekey2(Config) -> rekey_chk(Config, {infinity,infinity}, 0).
+rekey3(Config) -> rekey_chk(Config, 0, infinity).
+rekey4(Config) -> rekey_chk(Config, 0, {infinity,infinity}).
+
+rekey_chk(Config, RLdaemon, RLclient) ->
+ {Pid, Host, Port} = ssh_test_lib:std_daemon(Config, [{rekey_limit, RLdaemon}]),
+ ConnectionRef = ssh_test_lib:std_connect(Config, Host, Port, [{rekey_limit, RLclient}]),
Kex1 = ssh_test_lib:get_kex_init(ConnectionRef),
- receive
- after ?REKEY_DATA_TMO ->
- %%By this time rekeying would have been done
- Kex2 = ssh_test_lib:get_kex_init(ConnectionRef),
- false = (Kex2 == Kex1),
- ssh:close(ConnectionRef),
- ssh:stop_daemon(Pid)
- end.
-%%--------------------------------------------------------------------
+ %% Make both sides send something:
+ {ok, SftpPid} = ssh_sftp:start_channel(ConnectionRef),
-%%% Test rekeying by data volume
+ %% Check rekeying
+ timer:sleep(?REKEY_DATA_TMO),
+ ?wait_match(false, Kex1==ssh_test_lib:get_kex_init(ConnectionRef), [], 2000, 10),
-rekey_limit() -> [{timetrap,{seconds,400}}].
+ ssh:close(ConnectionRef),
+ ssh:stop_daemon(Pid).
-rekey_limit(Config) ->
+%%--------------------------------------------------------------------
+%%% Test rekeying by data volume
+
+rekey_limit_client() -> [{timetrap,{seconds,400}}].
+rekey_limit_client(Config) ->
+ Limit = 6000,
UserDir = proplists:get_value(priv_dir, Config),
DataFile = filename:join(UserDir, "rekey.data"),
-
+ Data = lists:duplicate(Limit+10,1),
Algs = proplists:get_value(preferred_algorithms, Config),
{Pid, Host, Port} = ssh_test_lib:std_daemon(Config,[{max_random_length_padding,0},
{preferred_algorithms,Algs}]),
- ConnectionRef = ssh_test_lib:std_connect(Config, Host, Port, [{rekey_limit, 6000},
+ ConnectionRef = ssh_test_lib:std_connect(Config, Host, Port, [{rekey_limit, Limit},
{max_random_length_padding,0}]),
{ok, SftpPid} = ssh_sftp:start_channel(ConnectionRef),
+ %% Check that it doesn't rekey without data transfer
Kex1 = ssh_test_lib:get_kex_init(ConnectionRef),
+ timer:sleep(?REKEY_DATA_TMO),
+ true = (Kex1 == ssh_test_lib:get_kex_init(ConnectionRef)),
+ %% Check that datatransfer triggers rekeying
+ ok = ssh_sftp:write_file(SftpPid, DataFile, Data),
timer:sleep(?REKEY_DATA_TMO),
- Kex1 = ssh_test_lib:get_kex_init(ConnectionRef),
+ ?wait_match(false, Kex1==(Kex2=ssh_test_lib:get_kex_init(ConnectionRef)), Kex2, 2000, 10),
- Data = lists:duplicate(159000,1),
+ %% Check that datatransfer continues to trigger rekeying
ok = ssh_sftp:write_file(SftpPid, DataFile, Data),
+ timer:sleep(?REKEY_DATA_TMO),
+ ?wait_match(false, Kex2==(Kex3=ssh_test_lib:get_kex_init(ConnectionRef)), Kex3, 2000, 10),
+ %% Check that it doesn't rekey without data transfer
timer:sleep(?REKEY_DATA_TMO),
- Kex2 = ssh_test_lib:get_kex_init(ConnectionRef),
+ true = (Kex3 == ssh_test_lib:get_kex_init(ConnectionRef)),
- false = (Kex2 == Kex1),
+ %% Check that it doesn't rekey on a small datatransfer
+ ok = ssh_sftp:write_file(SftpPid, DataFile, "hi\n"),
+ timer:sleep(?REKEY_DATA_TMO),
+ true = (Kex3 == ssh_test_lib:get_kex_init(ConnectionRef)),
+ %% Check that it doesn't rekey without data transfer
timer:sleep(?REKEY_DATA_TMO),
- Kex2 = ssh_test_lib:get_kex_init(ConnectionRef),
+ true = (Kex3 == ssh_test_lib:get_kex_init(ConnectionRef)),
- ok = ssh_sftp:write_file(SftpPid, DataFile, "hi\n"),
+ ssh_sftp:stop_channel(SftpPid),
+ ssh:close(ConnectionRef),
+ ssh:stop_daemon(Pid).
+
+
+
+rekey_limit_daemon() -> [{timetrap,{seconds,400}}].
+rekey_limit_daemon(Config) ->
+ Limit = 6000,
+ UserDir = proplists:get_value(priv_dir, Config),
+ DataFile1 = filename:join(UserDir, "rekey1.data"),
+ DataFile2 = filename:join(UserDir, "rekey2.data"),
+ file:write_file(DataFile1, lists:duplicate(Limit+10,1)),
+ file:write_file(DataFile2, "hi\n"),
+ Algs = proplists:get_value(preferred_algorithms, Config),
+ {Pid, Host, Port} = ssh_test_lib:std_daemon(Config,[{rekey_limit, Limit},
+ {max_random_length_padding,0},
+ {preferred_algorithms,Algs}]),
+ ConnectionRef = ssh_test_lib:std_connect(Config, Host, Port, [{max_random_length_padding,0}]),
+ {ok, SftpPid} = ssh_sftp:start_channel(ConnectionRef),
+
+ %% Check that it doesn't rekey without data transfer
+ Kex1 = ssh_test_lib:get_kex_init(ConnectionRef),
timer:sleep(?REKEY_DATA_TMO),
- Kex2 = ssh_test_lib:get_kex_init(ConnectionRef),
+ Kex1 = ssh_test_lib:get_kex_init(ConnectionRef),
- false = (Kex2 == Kex1),
+ %% Check that datatransfer triggers rekeying
+ {ok,_} = ssh_sftp:read_file(SftpPid, DataFile1),
+ timer:sleep(?REKEY_DATA_TMO),
+ ?wait_match(false, Kex1==(Kex2=ssh_test_lib:get_kex_init(ConnectionRef)), Kex2, 2000, 10),
+ %% Check that datatransfer continues to trigger rekeying
+ {ok,_} = ssh_sftp:read_file(SftpPid, DataFile1),
timer:sleep(?REKEY_DATA_TMO),
- Kex2 = ssh_test_lib:get_kex_init(ConnectionRef),
+ ?wait_match(false, Kex2==(Kex3=ssh_test_lib:get_kex_init(ConnectionRef)), Kex3, 2000, 10),
+
+ %% Check that it doesn't rekey without data transfer
+ timer:sleep(?REKEY_DATA_TMO),
+ true = (Kex3 == ssh_test_lib:get_kex_init(ConnectionRef)),
+
+ %% Check that it doesn't rekey on a small datatransfer
+ {ok,_} = ssh_sftp:read_file(SftpPid, DataFile2),
+ timer:sleep(?REKEY_DATA_TMO),
+ true = (Kex3 == ssh_test_lib:get_kex_init(ConnectionRef)),
+
+ %% Check that it doesn't rekey without data transfer
+ timer:sleep(?REKEY_DATA_TMO),
+ true = (Kex3 == ssh_test_lib:get_kex_init(ConnectionRef)),
+
+ ssh_sftp:stop_channel(SftpPid),
+ ssh:close(ConnectionRef),
+ ssh:stop_daemon(Pid).
+
+
+%%--------------------------------------------------------------------
+%% Check that datatransfer in the other direction does not trigger re-keying
+norekey_limit_client() -> [{timetrap,{seconds,400}}].
+norekey_limit_client(Config) ->
+ Limit = 6000,
+ UserDir = proplists:get_value(priv_dir, Config),
+ DataFile = filename:join(UserDir, "rekey3.data"),
+ file:write_file(DataFile, lists:duplicate(Limit+10,1)),
+
+ Algs = proplists:get_value(preferred_algorithms, Config),
+ {Pid, Host, Port} = ssh_test_lib:std_daemon(Config,[{max_random_length_padding,0},
+ {preferred_algorithms,Algs}]),
+
+ ConnectionRef = ssh_test_lib:std_connect(Config, Host, Port, [{rekey_limit, Limit},
+ {max_random_length_padding,0}]),
+ {ok, SftpPid} = ssh_sftp:start_channel(ConnectionRef),
+
+ Kex1 = ssh_test_lib:get_kex_init(ConnectionRef),
+ timer:sleep(?REKEY_DATA_TMO),
+ true = (Kex1 == ssh_test_lib:get_kex_init(ConnectionRef)),
+
+ {ok,_} = ssh_sftp:read_file(SftpPid, DataFile),
+ timer:sleep(?REKEY_DATA_TMO),
+ true = (Kex1 == ssh_test_lib:get_kex_init(ConnectionRef)),
+
+ ssh_sftp:stop_channel(SftpPid),
+ ssh:close(ConnectionRef),
+ ssh:stop_daemon(Pid).
+
+%% Check that datatransfer in the other direction does not trigger re-keying
+norekey_limit_daemon() -> [{timetrap,{seconds,400}}].
+norekey_limit_daemon(Config) ->
+ Limit = 6000,
+ UserDir = proplists:get_value(priv_dir, Config),
+ DataFile = filename:join(UserDir, "rekey4.data"),
+
+ Algs = proplists:get_value(preferred_algorithms, Config),
+ {Pid, Host, Port} = ssh_test_lib:std_daemon(Config,[{rekey_limit, Limit},
+ {max_random_length_padding,0},
+ {preferred_algorithms,Algs}]),
+
+ ConnectionRef = ssh_test_lib:std_connect(Config, Host, Port, [{max_random_length_padding,0}]),
+ {ok, SftpPid} = ssh_sftp:start_channel(ConnectionRef),
+
+ Kex1 = ssh_test_lib:get_kex_init(ConnectionRef),
+ timer:sleep(?REKEY_DATA_TMO),
+ true = (Kex1 == ssh_test_lib:get_kex_init(ConnectionRef)),
+
+ ok = ssh_sftp:write_file(SftpPid, DataFile, lists:duplicate(Limit+10,1)),
+ timer:sleep(?REKEY_DATA_TMO),
+ true = (Kex1 == ssh_test_lib:get_kex_init(ConnectionRef)),
+
+ ssh_sftp:stop_channel(SftpPid),
+ ssh:close(ConnectionRef),
+ ssh:stop_daemon(Pid).
+
+%%--------------------------------------------------------------------
+%%% Test rekeying by time
+
+rekey_time_limit_client() -> [{timetrap,{seconds,400}}].
+rekey_time_limit_client(Config) ->
+ Minutes = ?REKEY_DATA_TMO div 60000,
+ GB = 1024*1000*1000,
+ Algs = proplists:get_value(preferred_algorithms, Config),
+ {Pid, Host, Port} = ssh_test_lib:std_daemon(Config,[{max_random_length_padding,0},
+ {preferred_algorithms,Algs}]),
+ ConnectionRef = ssh_test_lib:std_connect(Config, Host, Port, [{rekey_limit, {Minutes, GB}},
+ {max_random_length_padding,0}]),
+ rekey_time_limit(Pid, ConnectionRef).
+
+rekey_time_limit_daemon() -> [{timetrap,{seconds,400}}].
+rekey_time_limit_daemon(Config) ->
+ Minutes = ?REKEY_DATA_TMO div 60000,
+ GB = 1024*1000*1000,
+ Algs = proplists:get_value(preferred_algorithms, Config),
+ {Pid, Host, Port} = ssh_test_lib:std_daemon(Config,[{rekey_limit, {Minutes, GB}},
+ {max_random_length_padding,0},
+ {preferred_algorithms,Algs}]),
+ ConnectionRef = ssh_test_lib:std_connect(Config, Host, Port, [{max_random_length_padding,0}]),
+ rekey_time_limit(Pid, ConnectionRef).
+
+
+rekey_time_limit(Pid, ConnectionRef) ->
+ {ok, SftpPid} = ssh_sftp:start_channel(ConnectionRef),
+ Kex1 = ssh_test_lib:get_kex_init(ConnectionRef),
+
+ timer:sleep(5000),
+ true = (Kex1 == ssh_test_lib:get_kex_init(ConnectionRef)),
+
+ %% Check that it rekeys when the max time + 30s has passed
+ timer:sleep(?REKEY_DATA_TMO + 30*1000),
+ ?wait_match(false, Kex1==(Kex2=ssh_test_lib:get_kex_init(ConnectionRef)), Kex2, 2000, 10),
+
+ %% Check that it does not rekey when nothing is transferred
+ timer:sleep(?REKEY_DATA_TMO + 30*1000),
+ ?wait_match(false, Kex2==ssh_test_lib:get_kex_init(ConnectionRef), [], 2000, 10),
ssh_sftp:stop_channel(SftpPid),
ssh:close(ConnectionRef),
@@ -1395,7 +1565,7 @@ rekey_limit(Config) ->
%%--------------------------------------------------------------------
-%%% Test rekeying with simulataneous send request
+%%% Test rekeying with simultaneous send request
renegotiate1(Config) ->
UserDir = proplists:get_value(priv_dir, Config),
diff --git a/lib/ssh/test/ssh_test_lib.erl b/lib/ssh/test/ssh_test_lib.erl
index 57ae2dbac2..65970535f4 100644
--- a/lib/ssh/test/ssh_test_lib.erl
+++ b/lib/ssh/test/ssh_test_lib.erl
@@ -926,7 +926,7 @@ get_kex_init(Conn, Ref, TRef) ->
end;
false ->
- ct:log("Not in 'connected' state: ~p",[State]),
+ ct:log("~p:~p Not in 'connected' state: ~p",[?MODULE,?LINE,State]),
receive
{reneg_timeout,Ref} ->
ct:log("S = ~p", [S]),
diff --git a/lib/ssh/vsn.mk b/lib/ssh/vsn.mk
index f327d2ec11..f10e7aa96a 100644
--- a/lib/ssh/vsn.mk
+++ b/lib/ssh/vsn.mk
@@ -1,4 +1,5 @@
#-*-makefile-*- ; force emacs to enter makefile-mode
-SSH_VSN = 4.6.8
+SSH_VSN = 4.6.9
+
APP_VSN = "ssh-$(SSH_VSN)"
diff --git a/lib/ssl/src/inet_tls_dist.erl b/lib/ssl/src/inet_tls_dist.erl
index 3e9828a2fe..d45f209838 100644
--- a/lib/ssl/src/inet_tls_dist.erl
+++ b/lib/ssl/src/inet_tls_dist.erl
@@ -518,51 +518,16 @@ gen_setup(Driver, Node, Type, MyNode, LongOrShortNames, SetupTime) ->
do_setup(Driver, Kernel, Node, Type, MyNode, LongOrShortNames, SetupTime) ->
{Name, Address} = split_node(Driver, Node, LongOrShortNames),
- case Driver:getaddr(Address) of
+ ErlEpmd = net_kernel:epmd_module(),
+ {ARMod, ARFun} = get_address_resolver(ErlEpmd, Driver),
+ Timer = trace(dist_util:start_timer(SetupTime)),
+ case ARMod:ARFun(Address) of
+ {ok, Ip, TcpPort, Version} ->
+ do_setup_connect(Driver, Kernel, Node, Address, Ip, TcpPort, Version, Type, MyNode, Timer);
{ok, Ip} ->
- Timer = trace(dist_util:start_timer(SetupTime)),
- ErlEpmd = net_kernel:epmd_module(),
case ErlEpmd:port_please(Name, Ip) of
{port, TcpPort, Version} ->
- Opts =
- trace(
- connect_options(
- %%
- %% Use verify_server/3 to verify that
- %% the server's certificate is for Node
- %%
- setup_verify_server(
- get_ssl_options(client), Node))),
- dist_util:reset_timer(Timer),
- case ssl:connect(
- Address, TcpPort,
- [binary, {active, false}, {packet, 4},
- Driver:family(), nodelay()] ++ Opts,
- net_kernel:connecttime()) of
- {ok, #sslsocket{pid = DistCtrl} = SslSocket} ->
- _ = monitor_pid(DistCtrl),
- ok = ssl:controlling_process(SslSocket, self()),
- HSData0 = hs_data_common(SslSocket),
- HSData =
- HSData0#hs_data{
- kernel_pid = Kernel,
- other_node = Node,
- this_node = MyNode,
- socket = DistCtrl,
- timer = Timer,
- this_flags = 0,
- other_version = Version,
- request_type = Type},
- link(DistCtrl),
- dist_util:handshake_we_started(trace(HSData));
- Other ->
- %% Other Node may have closed since
- %% port_please !
- ?shutdown2(
- Node,
- trace(
- {ssl_connect_failed, Ip, TcpPort, Other}))
- end;
+ do_setup_connect(Driver, Kernel, Node, Address, Ip, TcpPort, Version, Type, MyNode, Timer);
Other ->
?shutdown2(
Node,
@@ -575,6 +540,47 @@ do_setup(Driver, Kernel, Node, Type, MyNode, LongOrShortNames, SetupTime) ->
trace({getaddr_failed, Driver, Address, Other}))
end.
+do_setup_connect(Driver, Kernel, Node, Address, Ip, TcpPort, Version, Type, MyNode, Timer) ->
+ Opts =
+ trace(
+ connect_options(
+ %%
+ %% Use verify_server/3 to verify that
+ %% the server's certificate is for Node
+ %%
+ setup_verify_server(
+ get_ssl_options(client), Node))),
+ dist_util:reset_timer(Timer),
+ case ssl:connect(
+ Address, TcpPort,
+ [binary, {active, false}, {packet, 4},
+ Driver:family(), nodelay()] ++ Opts,
+ net_kernel:connecttime()) of
+ {ok, #sslsocket{pid = DistCtrl} = SslSocket} ->
+ _ = monitor_pid(DistCtrl),
+ ok = ssl:controlling_process(SslSocket, self()),
+ HSData0 = hs_data_common(SslSocket),
+ HSData =
+ HSData0#hs_data{
+ kernel_pid = Kernel,
+ other_node = Node,
+ this_node = MyNode,
+ socket = DistCtrl,
+ timer = Timer,
+ this_flags = 0,
+ other_version = Version,
+ request_type = Type},
+ link(DistCtrl),
+ dist_util:handshake_we_started(trace(HSData));
+ Other ->
+ %% Other Node may have closed since
+ %% port_please !
+ ?shutdown2(
+ Node,
+ trace(
+ {ssl_connect_failed, Ip, TcpPort, Other}))
+ end.
+
close(Socket) ->
gen_close(inet, Socket).
@@ -644,6 +650,16 @@ verify_server(PeerCert, valid_peer, {CertNodesFun,Node} = S) ->
%% ------------------------------------------------------------
+%% Determine if EPMD module supports address resolving. Default
+%% is to use inet_tcp:getaddr/2.
+%% ------------------------------------------------------------
+get_address_resolver(EpmdModule, Driver) ->
+ case erlang:function_exported(EpmdModule, address_please, 3) of
+ true -> {EpmdModule, address_please};
+ _ -> {Driver, getaddr}
+ end.
+
+%% ------------------------------------------------------------
%% Do only accept new connection attempts from nodes at our
%% own LAN, if the check_ip environment parameter is true.
%% ------------------------------------------------------------
diff --git a/lib/stdlib/doc/src/Makefile b/lib/stdlib/doc/src/Makefile
index 508a4fa2de..5c6b714f80 100644
--- a/lib/stdlib/doc/src/Makefile
+++ b/lib/stdlib/doc/src/Makefile
@@ -71,7 +71,6 @@ XML_REF3_FILES = \
gen_statem.xml \
io.xml \
io_lib.xml \
- lib.xml \
lists.xml \
log_mf_h.xml \
maps.xml \
diff --git a/lib/stdlib/doc/src/lib.xml b/lib/stdlib/doc/src/lib.xml
deleted file mode 100644
index 58dad7c9e0..0000000000
--- a/lib/stdlib/doc/src/lib.xml
+++ /dev/null
@@ -1,103 +0,0 @@
-<?xml version="1.0" encoding="utf-8" ?>
-<!DOCTYPE erlref SYSTEM "erlref.dtd">
-
-<erlref>
- <header>
- <copyright>
- <year>1996</year><year>2016</year>
- <holder>Ericsson AB. All Rights Reserved.</holder>
- </copyright>
- <legalnotice>
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
-
- </legalnotice>
-
- <title>lib</title>
- <prepared></prepared>
- <docno></docno>
- <date></date>
- <rev></rev>
- </header>
- <module>lib</module>
- <modulesummary>Useful library functions.</modulesummary>
- <description>
- <warning>
- <p>This module is retained for backward compatibility. It can disappear
- without warning in a future Erlang/OTP release.</p>
- </warning>
- </description>
-
- <funcs>
- <func>
- <name name="error_message" arity="2"/>
- <fsummary>Print error message.</fsummary>
- <desc>
- <p>Prints error message <c><anno>Args</anno></c> in accordance with
- <c><anno>Format</anno></c>. Similar to
- <seealso marker="io#format/1"><c>io:format/2</c></seealso>.</p>
- </desc>
- </func>
-
- <func>
- <name name="flush_receive" arity="0"/>
- <fsummary>Flush messages.</fsummary>
- <desc>
- <p>Flushes the message buffer of the current process.</p>
- </desc>
- </func>
-
- <func>
- <name name="nonl" arity="1"/>
- <fsummary>Remove last newline.</fsummary>
- <desc>
- <p>Removes the last newline character, if any, in
- <c><anno>String1</anno></c>.</p>
- </desc>
- </func>
-
- <func>
- <name name="progname" arity="0"/>
- <fsummary>Return name of Erlang start script.</fsummary>
- <desc>
- <p>Returns the name of the script that started the current
- Erlang session.</p>
- </desc>
- </func>
-
- <func>
- <name name="send" arity="2"/>
- <fsummary>Send a message.</fsummary>
- <desc>
- <p>Makes it possible to send a message using the <c>apply/3</c> BIF.</p>
- </desc>
- </func>
-
- <func>
- <name name="sendw" arity="2"/>
- <fsummary>Send a message and wait for an answer.</fsummary>
- <desc>
- <p>As <seealso marker="#send/2"><c>send/2</c></seealso>,
- but waits for an answer. It is implemented as follows:</p>
- <code type="none">
-sendw(To, Msg) ->
- To ! {self(),Msg},
- receive
- Reply -> Reply
- end.</code>
- <p>The returned message is not necessarily a reply to the sent
- message.</p>
- </desc>
- </func>
- </funcs>
-</erlref>
-
diff --git a/lib/stdlib/doc/src/ref_man.xml b/lib/stdlib/doc/src/ref_man.xml
index 68bfddbc71..c6f30d272d 100644
--- a/lib/stdlib/doc/src/ref_man.xml
+++ b/lib/stdlib/doc/src/ref_man.xml
@@ -66,7 +66,6 @@
<xi:include href="gen_statem.xml"/>
<xi:include href="io.xml"/>
<xi:include href="io_lib.xml"/>
- <xi:include href="lib.xml"/>
<xi:include href="lists.xml"/>
<xi:include href="log_mf_h.xml"/>
<xi:include href="maps.xml"/>
diff --git a/lib/stdlib/doc/src/specs.xml b/lib/stdlib/doc/src/specs.xml
index d559adf9b6..fd2d625685 100644
--- a/lib/stdlib/doc/src/specs.xml
+++ b/lib/stdlib/doc/src/specs.xml
@@ -33,7 +33,6 @@
<xi:include href="../specs/specs_gen_statem.xml"/>
<xi:include href="../specs/specs_io.xml"/>
<xi:include href="../specs/specs_io_lib.xml"/>
- <xi:include href="../specs/specs_lib.xml"/>
<xi:include href="../specs/specs_lists.xml"/>
<xi:include href="../specs/specs_log_mf_h.xml"/>
<xi:include href="../specs/specs_maps.xml"/>
diff --git a/lib/stdlib/doc/src/string.xml b/lib/stdlib/doc/src/string.xml
index c7772d63a3..4a3d37dcb6 100644
--- a/lib/stdlib/doc/src/string.xml
+++ b/lib/stdlib/doc/src/string.xml
@@ -641,7 +641,7 @@ ÖÄÅ</pre>
<note><p>
The functions are kept for backward compatibility, but are
not recommended.
- They will be deprecated in Erlang/OTP 21.
+ They will be deprecated in a future release.
</p>
<p>Any undocumented functions in <c>string</c> are not to be used.</p>
</note>
diff --git a/lib/stdlib/src/Makefile b/lib/stdlib/src/Makefile
index dc3735055a..dfe6bf3e68 100644
--- a/lib/stdlib/src/Makefile
+++ b/lib/stdlib/src/Makefile
@@ -62,6 +62,7 @@ MODULES= \
erl_anno \
erl_bits \
erl_compile \
+ erl_error \
erl_eval \
erl_expand_records \
erl_internal \
@@ -91,7 +92,6 @@ MODULES= \
io_lib_format \
io_lib_fread \
io_lib_pretty \
- lib \
lists \
log_mf_h \
maps \
@@ -176,6 +176,7 @@ docs:
primary_bootstrap_compiler: \
$(BOOTSTRAP_COMPILER)/ebin/epp.beam \
$(BOOTSTRAP_COMPILER)/ebin/erl_anno.beam \
+ $(BOOTSTRAP_COMPILER)/ebin/erl_error.beam \
$(BOOTSTRAP_COMPILER)/ebin/erl_scan.beam \
$(BOOTSTRAP_COMPILER)/ebin/erl_parse.beam \
$(BOOTSTRAP_COMPILER)/ebin/erl_lint.beam \
diff --git a/lib/stdlib/src/lib.erl b/lib/stdlib/src/erl_error.erl
index 51e0c3f77e..fdcb9e824c 100644
--- a/lib/stdlib/src/lib.erl
+++ b/lib/stdlib/src/erl_error.erl
@@ -17,337 +17,12 @@
%%
%% %CopyrightEnd%
%%
--module(lib).
-
--export([flush_receive/0, error_message/2, progname/0, nonl/1, send/2,
- sendw/2, eval_str/1]).
-
--export([extended_parse_exprs/1, extended_parse_term/1,
- subst_values_for_vars/2]).
+-module(erl_error).
-export([format_exception/6, format_exception/7,
format_stacktrace/4, format_stacktrace/5,
format_call/4, format_call/5, format_fun/1, format_fun/2]).
--spec flush_receive() -> 'ok'.
-
-flush_receive() ->
- receive
- _Any ->
- flush_receive()
- after
- 0 ->
- ok
- end.
-
-%%
-%% Functions for doing standard system format i/o.
-%%
--spec error_message(Format, Args) -> 'ok' when
- Format :: io:format(),
- Args :: [term()].
-
-error_message(Format, Args) ->
- io:format(<<"** ~ts **\n">>, [io_lib:format(Format, Args)]).
-
-%% Return the name of the script that starts (this) erlang
-%%
--spec progname() -> atom().
-
-progname() ->
- case init:get_argument(progname) of
- {ok, [[Prog]]} ->
- list_to_atom(Prog);
- _Other ->
- no_prog_name
- end.
-
--spec nonl(String1) -> String2 when
- String1 :: string(),
- String2 :: string().
-
-nonl([10]) -> [];
-nonl([]) -> [];
-nonl([H|T]) -> [H|nonl(T)].
-
--spec send(To, Msg) -> Msg when
- To :: pid() | atom() | {atom(), node()},
- Msg :: term().
-
-send(To, Msg) -> To ! Msg.
-
--spec sendw(To, Msg) -> term() when
- To :: pid() | atom() | {atom(), node()},
- Msg :: term().
-
-sendw(To, Msg) ->
- To ! {self(), Msg},
- receive
- Reply -> Reply
- end.
-
-%% eval_str(InStr) -> {ok, OutStr} | {error, ErrStr'}
-%% InStr must represent a body
-%% Note: If InStr is a binary it has to be a Latin-1 string.
-%% If you have a UTF-8 encoded binary you have to call
-%% unicode:characters_to_list/1 before the call to eval_str().
-
--define(result(F,D), lists:flatten(io_lib:format(F, D))).
-
--spec eval_str(string() | unicode:latin1_binary()) ->
- {'ok', string()} | {'error', string()}.
-
-eval_str(Str) when is_list(Str) ->
- case erl_scan:tokens([], Str, 0) of
- {more, _} ->
- {error, "Incomplete form (missing .<cr>)??"};
- {done, {ok, Toks, _}, Rest} ->
- case all_white(Rest) of
- true ->
- case erl_parse:parse_exprs(Toks) of
- {ok, Exprs} ->
- case catch erl_eval:exprs(Exprs, erl_eval:new_bindings()) of
- {value, Val, _} ->
- {ok, Val};
- Other ->
- {error, ?result("*** eval: ~p", [Other])}
- end;
- {error, {_Line, Mod, Args}} ->
- Msg = ?result("*** ~ts",[Mod:format_error(Args)]),
- {error, Msg}
- end;
- false ->
- {error, ?result("Non-white space found after "
- "end-of-form :~ts", [Rest])}
- end
- end;
-eval_str(Bin) when is_binary(Bin) ->
- eval_str(binary_to_list(Bin)).
-
-all_white([$\s|T]) -> all_white(T);
-all_white([$\n|T]) -> all_white(T);
-all_white([$\t|T]) -> all_white(T);
-all_white([]) -> true;
-all_white(_) -> false.
-
-%% `Tokens' is assumed to have been scanned with the 'text' option.
-%% The annotations of the returned expressions are locations.
-%%
-%% Can handle pids, ports, references, and external funs ("items").
-%% Known items are represented by variables in the erl_parse tree, and
-%% the items themselves are stored in the returned bindings.
-
--spec extended_parse_exprs(Tokens) ->
- {'ok', ExprList, Bindings} | {'error', ErrorInfo} when
- Tokens :: [erl_scan:token()],
- ExprList :: [erl_parse:abstract_expr()],
- Bindings :: erl_eval:binding_struct(),
- ErrorInfo :: erl_parse:error_info().
-
-extended_parse_exprs(Tokens) ->
- Ts = tokens_fixup(Tokens),
- case erl_parse:parse_exprs(Ts) of
- {ok, Exprs0} ->
- {Exprs, Bs} = expr_fixup(Exprs0),
- {ok, reset_expr_anno(Exprs), Bs};
- _ErrorInfo ->
- erl_parse:parse_exprs(reset_token_anno(Ts))
- end.
-
-tokens_fixup([]) -> [];
-tokens_fixup([T|Ts]=Ts0) ->
- try token_fixup(Ts0) of
- {NewT, NewTs} ->
- [NewT|tokens_fixup(NewTs)]
- catch
- _:_ ->
- [T|tokens_fixup(Ts)]
- end.
-
-token_fixup(Ts) ->
- {AnnoL, NewTs, FixupTag} = unscannable(Ts),
- String = lists:append([erl_anno:text(A) || A <- AnnoL]),
- _ = (fixup_fun(FixupTag))(String),
- NewAnno = erl_anno:set_text(fixup_text(FixupTag), hd(AnnoL)),
- {{string, NewAnno, String}, NewTs}.
-
-unscannable([{'#', A1}, {var, A2, 'Fun'}, {'<', A3}, {atom, A4, _},
- {'.', A5}, {float, A6, _}, {'>', A7}|Ts]) ->
- {[A1, A2, A3, A4, A5, A6, A7], Ts, function};
-unscannable([{'#', A1}, {var, A2, 'Fun'}, {'<', A3}, {atom, A4, _},
- {'.', A5}, {atom, A6, _}, {'.', A7}, {integer, A8, _},
- {'>', A9}|Ts]) ->
- {[A1, A2, A3, A4, A5, A6, A7, A8, A9], Ts, function};
-unscannable([{'<', A1}, {float, A2, _}, {'.', A3}, {integer, A4, _},
- {'>', A5}|Ts]) ->
- {[A1, A2, A3, A4, A5], Ts, pid};
-unscannable([{'#', A1}, {var, A2, 'Port'}, {'<', A3}, {float, A4, _},
- {'>', A5}|Ts]) ->
- {[A1, A2, A3, A4, A5], Ts, port};
-unscannable([{'#', A1}, {var, A2, 'Ref'}, {'<', A3}, {float, A4, _},
- {'.', A5}, {float, A6, _}, {'>', A7}|Ts]) ->
- {[A1, A2, A3, A4, A5, A6, A7], Ts, reference}.
-
-expr_fixup(Expr0) ->
- {Expr, Bs, _} = expr_fixup(Expr0, erl_eval:new_bindings(), 1),
- {Expr, Bs}.
-
-expr_fixup({string,A,S}=T, Bs0, I) ->
- try string_fixup(A, S) of
- Value ->
- Var = new_var(I),
- Bs = erl_eval:add_binding(Var, Value, Bs0),
- {{var, A, Var}, Bs, I+1}
- catch
- _:_ ->
- {T, Bs0, I}
- end;
-expr_fixup(Tuple, Bs0, I0) when is_tuple(Tuple) ->
- {L, Bs, I} = expr_fixup(tuple_to_list(Tuple), Bs0, I0),
- {list_to_tuple(L), Bs, I};
-expr_fixup([E0|Es0], Bs0, I0) ->
- {E, Bs1, I1} = expr_fixup(E0, Bs0, I0),
- {Es, Bs, I} = expr_fixup(Es0, Bs1, I1),
- {[E|Es], Bs, I};
-expr_fixup(T, Bs, I) ->
- {T, Bs, I}.
-
-string_fixup(A, S) ->
- Text = erl_anno:text(A),
- FixupTag = fixup_tag(Text, S),
- (fixup_fun(FixupTag))(S).
-
-new_var(I) ->
- list_to_atom(lists:concat(['__ExtendedParseExprs_', I, '__'])).
-
-reset_token_anno(Tokens) ->
- [setelement(2, T, (reset_anno())(element(2, T))) || T <- Tokens].
-
-reset_expr_anno(Exprs) ->
- [erl_parse:map_anno(reset_anno(), E) || E <- Exprs].
-
-reset_anno() ->
- fun(A) -> erl_anno:new(erl_anno:location(A)) end.
-
-fixup_fun(function) -> fun function/1;
-fixup_fun(pid) -> fun erlang:list_to_pid/1;
-fixup_fun(port) -> fun erlang:list_to_port/1;
-fixup_fun(reference) -> fun erlang:list_to_ref/1.
-
-function(S) ->
- %% External function.
- {ok, [_, _, _,
- {atom, _, Module}, _,
- {atom, _, Function}, _,
- {integer, _, Arity}|_], _} = erl_scan:string(S),
- erlang:make_fun(Module, Function, Arity).
-
-fixup_text(function) -> "function";
-fixup_text(pid) -> "pid";
-fixup_text(port) -> "port";
-fixup_text(reference) -> "reference".
-
-fixup_tag("function", "#"++_) -> function;
-fixup_tag("pid", "<"++_) -> pid;
-fixup_tag("port", "#"++_) -> port;
-fixup_tag("reference", "#"++_) -> reference.
-
-%%% End of extended_parse_exprs.
-
-%% `Tokens' is assumed to have been scanned with the 'text' option.
-%%
-%% Can handle pids, ports, references, and external funs.
-
--spec extended_parse_term(Tokens) ->
- {'ok', Term} | {'error', ErrorInfo} when
- Tokens :: [erl_scan:token()],
- Term :: term(),
- ErrorInfo :: erl_parse:error_info().
-
-extended_parse_term(Tokens) ->
- case extended_parse_exprs(Tokens) of
- {ok, [Expr], Bindings} ->
- try normalise(Expr, Bindings) of
- Term ->
- {ok, Term}
- catch
- _:_ ->
- Loc = erl_anno:location(element(2, Expr)),
- {error,{Loc,?MODULE,"bad term"}}
- end;
- {ok, [_,Expr|_], _Bindings} ->
- Loc = erl_anno:location(element(2, Expr)),
- {error,{Loc,?MODULE,"bad term"}};
- {error, _} = Error ->
- Error
- end.
-
-%% From erl_parse.
-normalise({var, _, V}, Bs) ->
- {value, Value} = erl_eval:binding(V, Bs),
- Value;
-normalise({char,_,C}, _Bs) -> C;
-normalise({integer,_,I}, _Bs) -> I;
-normalise({float,_,F}, _Bs) -> F;
-normalise({atom,_,A}, _Bs) -> A;
-normalise({string,_,S}, _Bs) -> S;
-normalise({nil,_}, _Bs) -> [];
-normalise({bin,_,Fs}, Bs) ->
- {value, B, _} =
- eval_bits:expr_grp(Fs, [],
- fun(E, _) ->
- {value, normalise(E, Bs), []}
- end, [], true),
- B;
-normalise({cons,_,Head,Tail}, Bs) ->
- [normalise(Head, Bs)|normalise(Tail, Bs)];
-normalise({tuple,_,Args}, Bs) ->
- list_to_tuple(normalise_list(Args, Bs));
-normalise({map,_,Pairs}, Bs) ->
- maps:from_list(lists:map(fun
- %% only allow '=>'
- ({map_field_assoc,_,K,V}) ->
- {normalise(K, Bs),normalise(V, Bs)}
- end, Pairs));
-%% Special case for unary +/-.
-normalise({op,_,'+',{char,_,I}}, _Bs) -> I;
-normalise({op,_,'+',{integer,_,I}}, _Bs) -> I;
-normalise({op,_,'+',{float,_,F}}, _Bs) -> F;
-normalise({op,_,'-',{char,_,I}}, _Bs) -> -I; %Weird, but compatible!
-normalise({op,_,'-',{integer,_,I}}, _Bs) -> -I;
-normalise({op,_,'-',{float,_,F}}, _Bs) -> -F;
-normalise({'fun',_,{function,{atom,_,M},{atom,_,F},{integer,_,A}}}, _Bs) ->
- %% Since "#Fun<M.F.A>" is recognized, "fun M:F/A" should be too.
- fun M:F/A.
-
-normalise_list([H|T], Bs) ->
- [normalise(H, Bs)|normalise_list(T, Bs)];
-normalise_list([], _Bs) ->
- [].
-
-%% To be used on ExprList and Bindings returned from extended_parse_exprs().
-%% Substitute {value, A, Item} for {var, A, ExtendedParseVar}.
-%% {value, A, Item} is a shell/erl_eval convention, and for example
-%% the linter cannot handle it.
-
--spec subst_values_for_vars(ExprList, Bindings) -> [term()] when
- ExprList :: [erl_parse:abstract_expr()],
- Bindings :: erl_eval:binding_struct().
-
-subst_values_for_vars({var, A, V}=Var, Bs) ->
- case erl_eval:binding(V, Bs) of
- {value, Value} ->
- {value, A, Value};
- unbound ->
- Var
- end;
-subst_values_for_vars(L, Bs) when is_list(L) ->
- [subst_values_for_vars(E, Bs) || E <- L];
-subst_values_for_vars(T, Bs) when is_tuple(T) ->
- list_to_tuple(subst_values_for_vars(tuple_to_list(T), Bs));
-subst_values_for_vars(T, _Bs) ->
- T.
-
%%% Formatting of exceptions, mfa:s and funs.
%% -> iolist() (no \n at end)
diff --git a/lib/stdlib/src/erl_eval.erl b/lib/stdlib/src/erl_eval.erl
index 4ee11383da..0f6d48b9a3 100644
--- a/lib/stdlib/src/erl_eval.erl
+++ b/lib/stdlib/src/erl_eval.erl
@@ -27,7 +27,8 @@
-export([exprs/2,exprs/3,exprs/4,expr/2,expr/3,expr/4,expr/5,
expr_list/2,expr_list/3,expr_list/4]).
-export([new_bindings/0,bindings/1,binding/2,add_binding/3,del_binding/2]).
-
+-export([extended_parse_exprs/1, extended_parse_term/1,
+ subst_values_for_vars/2]).
-export([is_constant_expr/1, partial_eval/1]).
%% Is used by standalone Erlang (escript).
@@ -1286,6 +1287,224 @@ merge_bindings(Bs1, Bs2) ->
%% error -> Bs
%% end
%% end, Bs2, Bs1).
+
+%% Substitute {value, A, Item} for {var, A, Var}, preserving A.
+%% {value, A, Item} is a shell/erl_eval convention, and for example
+%% the linter cannot handle it.
+
+-spec subst_values_for_vars(ExprList, Bindings) -> [term()] when
+ ExprList :: [erl_parse:abstract_expr()],
+ Bindings :: binding_struct().
+
+subst_values_for_vars({var, A, V}=Var, Bs) ->
+ case erl_eval:binding(V, Bs) of
+ {value, Value} ->
+ {value, A, Value};
+ unbound ->
+ Var
+ end;
+subst_values_for_vars(L, Bs) when is_list(L) ->
+ [subst_values_for_vars(E, Bs) || E <- L];
+subst_values_for_vars(T, Bs) when is_tuple(T) ->
+ list_to_tuple(subst_values_for_vars(tuple_to_list(T), Bs));
+subst_values_for_vars(T, _Bs) ->
+ T.
+
+%% `Tokens' is assumed to have been scanned with the 'text' option.
+%% The annotations of the returned expressions are locations.
+%%
+%% Can handle pids, ports, references, and external funs ("items").
+%% Known items are represented by variables in the erl_parse tree, and
+%% the items themselves are stored in the returned bindings.
+
+-spec extended_parse_exprs(Tokens) ->
+ {'ok', ExprList, Bindings} | {'error', ErrorInfo} when
+ Tokens :: [erl_scan:token()],
+ ExprList :: [erl_parse:abstract_expr()],
+ Bindings :: erl_eval:binding_struct(),
+ ErrorInfo :: erl_parse:error_info().
+
+extended_parse_exprs(Tokens) ->
+ Ts = tokens_fixup(Tokens),
+ case erl_parse:parse_exprs(Ts) of
+ {ok, Exprs0} ->
+ {Exprs, Bs} = expr_fixup(Exprs0),
+ {ok, reset_expr_anno(Exprs), Bs};
+ _ErrorInfo ->
+ erl_parse:parse_exprs(reset_token_anno(Ts))
+ end.
+
+tokens_fixup([]) -> [];
+tokens_fixup([T|Ts]=Ts0) ->
+ try token_fixup(Ts0) of
+ {NewT, NewTs} ->
+ [NewT|tokens_fixup(NewTs)]
+ catch
+ _:_ ->
+ [T|tokens_fixup(Ts)]
+ end.
+
+token_fixup(Ts) ->
+ {AnnoL, NewTs, FixupTag} = unscannable(Ts),
+ String = lists:append([erl_anno:text(A) || A <- AnnoL]),
+ _ = (fixup_fun(FixupTag))(String),
+ NewAnno = erl_anno:set_text(fixup_text(FixupTag), hd(AnnoL)),
+ {{string, NewAnno, String}, NewTs}.
+
+unscannable([{'#', A1}, {var, A2, 'Fun'}, {'<', A3}, {atom, A4, _},
+ {'.', A5}, {float, A6, _}, {'>', A7}|Ts]) ->
+ {[A1, A2, A3, A4, A5, A6, A7], Ts, function};
+unscannable([{'#', A1}, {var, A2, 'Fun'}, {'<', A3}, {atom, A4, _},
+ {'.', A5}, {atom, A6, _}, {'.', A7}, {integer, A8, _},
+ {'>', A9}|Ts]) ->
+ {[A1, A2, A3, A4, A5, A6, A7, A8, A9], Ts, function};
+unscannable([{'<', A1}, {float, A2, _}, {'.', A3}, {integer, A4, _},
+ {'>', A5}|Ts]) ->
+ {[A1, A2, A3, A4, A5], Ts, pid};
+unscannable([{'#', A1}, {var, A2, 'Port'}, {'<', A3}, {float, A4, _},
+ {'>', A5}|Ts]) ->
+ {[A1, A2, A3, A4, A5], Ts, port};
+unscannable([{'#', A1}, {var, A2, 'Ref'}, {'<', A3}, {float, A4, _},
+ {'.', A5}, {float, A6, _}, {'>', A7}|Ts]) ->
+ {[A1, A2, A3, A4, A5, A6, A7], Ts, reference}.
+
+expr_fixup(Expr0) ->
+ {Expr, Bs, _} = expr_fixup(Expr0, erl_eval:new_bindings(), 1),
+ {Expr, Bs}.
+
+expr_fixup({string,A,S}=T, Bs0, I) ->
+ try string_fixup(A, S) of
+ Value ->
+ Var = new_var(I),
+ Bs = erl_eval:add_binding(Var, Value, Bs0),
+ {{var, A, Var}, Bs, I+1}
+ catch
+ _:_ ->
+ {T, Bs0, I}
+ end;
+expr_fixup(Tuple, Bs0, I0) when is_tuple(Tuple) ->
+ {L, Bs, I} = expr_fixup(tuple_to_list(Tuple), Bs0, I0),
+ {list_to_tuple(L), Bs, I};
+expr_fixup([E0|Es0], Bs0, I0) ->
+ {E, Bs1, I1} = expr_fixup(E0, Bs0, I0),
+ {Es, Bs, I} = expr_fixup(Es0, Bs1, I1),
+ {[E|Es], Bs, I};
+expr_fixup(T, Bs, I) ->
+ {T, Bs, I}.
+
+string_fixup(A, S) ->
+ Text = erl_anno:text(A),
+ FixupTag = fixup_tag(Text, S),
+ (fixup_fun(FixupTag))(S).
+
+new_var(I) ->
+ list_to_atom(lists:concat(['__ExtendedParseExprs_', I, '__'])).
+
+reset_token_anno(Tokens) ->
+ [setelement(2, T, (reset_anno())(element(2, T))) || T <- Tokens].
+
+reset_expr_anno(Exprs) ->
+ [erl_parse:map_anno(reset_anno(), E) || E <- Exprs].
+
+reset_anno() ->
+ fun(A) -> erl_anno:new(erl_anno:location(A)) end.
+
+fixup_fun(function) -> fun function/1;
+fixup_fun(pid) -> fun erlang:list_to_pid/1;
+fixup_fun(port) -> fun erlang:list_to_port/1;
+fixup_fun(reference) -> fun erlang:list_to_ref/1.
+
+function(S) ->
+ %% External function.
+ {ok, [_, _, _,
+ {atom, _, Module}, _,
+ {atom, _, Function}, _,
+ {integer, _, Arity}|_], _} = erl_scan:string(S),
+ erlang:make_fun(Module, Function, Arity).
+
+fixup_text(function) -> "function";
+fixup_text(pid) -> "pid";
+fixup_text(port) -> "port";
+fixup_text(reference) -> "reference".
+
+fixup_tag("function", "#"++_) -> function;
+fixup_tag("pid", "<"++_) -> pid;
+fixup_tag("port", "#"++_) -> port;
+fixup_tag("reference", "#"++_) -> reference.
+
+%%% End of extended_parse_exprs.
+
+%% `Tokens' is assumed to have been scanned with the 'text' option.
+%%
+%% Can handle pids, ports, references, and external funs.
+
+-spec extended_parse_term(Tokens) ->
+ {'ok', Term} | {'error', ErrorInfo} when
+ Tokens :: [erl_scan:token()],
+ Term :: term(),
+ ErrorInfo :: erl_parse:error_info().
+
+extended_parse_term(Tokens) ->
+ case extended_parse_exprs(Tokens) of
+ {ok, [Expr], Bindings} ->
+ try normalise(Expr, Bindings) of
+ Term ->
+ {ok, Term}
+ catch
+ _:_ ->
+ Loc = erl_anno:location(element(2, Expr)),
+ {error,{Loc,?MODULE,"bad term"}}
+ end;
+ {ok, [_,Expr|_], _Bindings} ->
+ Loc = erl_anno:location(element(2, Expr)),
+ {error,{Loc,?MODULE,"bad term"}};
+ {error, _} = Error ->
+ Error
+ end.
+
+%% From erl_parse.
+normalise({var, _, V}, Bs) ->
+ {value, Value} = erl_eval:binding(V, Bs),
+ Value;
+normalise({char,_,C}, _Bs) -> C;
+normalise({integer,_,I}, _Bs) -> I;
+normalise({float,_,F}, _Bs) -> F;
+normalise({atom,_,A}, _Bs) -> A;
+normalise({string,_,S}, _Bs) -> S;
+normalise({nil,_}, _Bs) -> [];
+normalise({bin,_,Fs}, Bs) ->
+ {value, B, _} =
+ eval_bits:expr_grp(Fs, [],
+ fun(E, _) ->
+ {value, normalise(E, Bs), []}
+ end, [], true),
+ B;
+normalise({cons,_,Head,Tail}, Bs) ->
+ [normalise(Head, Bs)|normalise(Tail, Bs)];
+normalise({tuple,_,Args}, Bs) ->
+ list_to_tuple(normalise_list(Args, Bs));
+normalise({map,_,Pairs}, Bs) ->
+ maps:from_list(lists:map(fun
+ %% only allow '=>'
+ ({map_field_assoc,_,K,V}) ->
+ {normalise(K, Bs),normalise(V, Bs)}
+ end, Pairs));
+%% Special case for unary +/-.
+normalise({op,_,'+',{char,_,I}}, _Bs) -> I;
+normalise({op,_,'+',{integer,_,I}}, _Bs) -> I;
+normalise({op,_,'+',{float,_,F}}, _Bs) -> F;
+normalise({op,_,'-',{char,_,I}}, _Bs) -> -I; %Weird, but compatible!
+normalise({op,_,'-',{integer,_,I}}, _Bs) -> -I;
+normalise({op,_,'-',{float,_,F}}, _Bs) -> -F;
+normalise({'fun',_,{function,{atom,_,M},{atom,_,F},{integer,_,A}}}, _Bs) ->
+ %% Since "#Fun<M.F.A>" is recognized, "fun M:F/A" should be too.
+ fun M:F/A.
+
+normalise_list([H|T], Bs) ->
+ [normalise(H, Bs)|normalise_list(T, Bs)];
+normalise_list([], _Bs) ->
+ [].
+
%%----------------------------------------------------------------------------
%%
%% Evaluate expressions:
diff --git a/lib/stdlib/src/erl_internal.erl b/lib/stdlib/src/erl_internal.erl
index 6d3d5baa23..dd509191ef 100644
--- a/lib/stdlib/src/erl_internal.erl
+++ b/lib/stdlib/src/erl_internal.erl
@@ -109,6 +109,7 @@ new_type_test(is_function, 2) -> true;
new_type_test(is_integer, 1) -> true;
new_type_test(is_list, 1) -> true;
new_type_test(is_map, 1) -> true;
+new_type_test(is_map_key, 2) -> true;
new_type_test(is_number, 1) -> true;
new_type_test(is_pid, 1) -> true;
new_type_test(is_port, 1) -> true;
@@ -315,6 +316,7 @@ bif(is_function, 2) -> true;
bif(is_integer, 1) -> true;
bif(is_list, 1) -> true;
bif(is_map, 1) -> true;
+bif(is_map_key, 2) -> true;
bif(is_number, 1) -> true;
bif(is_pid, 1) -> true;
bif(is_port, 1) -> true;
diff --git a/lib/stdlib/src/escript.erl b/lib/stdlib/src/escript.erl
index beea9927d2..89a81684f5 100644
--- a/lib/stdlib/src/escript.erl
+++ b/lib/stdlib/src/escript.erl
@@ -882,7 +882,7 @@ format_exception(Class, Reason, StackTrace) ->
io_lib:format("~." ++ integer_to_list(I) ++ P, [Term, 50])
end,
StackFun = fun(M, _F, _A) -> (M =:= erl_eval) or (M =:= ?MODULE) end,
- lib:format_exception(1, Class, Reason, StackTrace, StackFun, PF, Enc).
+ erl_error:format_exception(1, Class, Reason, StackTrace, StackFun, PF, Enc).
encoding() ->
[{encoding, Encoding}] = enc(),
diff --git a/lib/stdlib/src/gen_event.erl b/lib/stdlib/src/gen_event.erl
index 53042251cc..3ee2031d02 100644
--- a/lib/stdlib/src/gen_event.erl
+++ b/lib/stdlib/src/gen_event.erl
@@ -122,7 +122,7 @@
-type add_handler_ret() :: ok | term() | {'EXIT',term()}.
-type del_handler_ret() :: ok | term() | {'EXIT',term()}.
--type emgr_name() :: {'local', atom()} | {'global', atom()}
+-type emgr_name() :: {'local', atom()} | {'global', term()}
| {'via', atom(), term()}.
-type debug_flag() :: 'trace' | 'log' | 'statistics' | 'debug'
| {'logfile', string()}.
@@ -130,7 +130,7 @@
| {'debug', [debug_flag()]}
| {'spawn_opt', [proc_lib:spawn_option()]}
| {'hibernate_after', timeout()}.
--type emgr_ref() :: atom() | {atom(), atom()} | {'global', atom()}
+-type emgr_ref() :: atom() | {atom(), atom()} | {'global', term()}
| {'via', atom(), term()} | pid().
-type start_ret() :: {'ok', pid()} | {'error', term()}.
@@ -146,7 +146,7 @@
%% start_link()
%% start_link(MgrName | Options)
%% start_link(MgrName, Options)
-%% MgrName ::= {local, atom()} | {global, atom()} | {via, atom(), term()}
+%% MgrName ::= {local, atom()} | {global, term()} | {via, atom(), term()}
%% Options ::= [{timeout, Timeout} | {debug, [Flag]} | {spawn_opt,SOpts}]
%% Flag ::= trace | log | {logfile, File} | statistics | debug
%% (debug == log && statistics)
diff --git a/lib/stdlib/src/gen_server.erl b/lib/stdlib/src/gen_server.erl
index f65ef78636..035dd871ff 100644
--- a/lib/stdlib/src/gen_server.erl
+++ b/lib/stdlib/src/gen_server.erl
@@ -166,7 +166,7 @@
%%% start(Name, Mod, Args, Options)
%%% start_link(Mod, Args, Options)
%%% start_link(Name, Mod, Args, Options) where:
-%%% Name ::= {local, atom()} | {global, atom()} | {via, atom(), term()}
+%%% Name ::= {local, atom()} | {global, term()} | {via, atom(), term()}
%%% Mod ::= atom(), callback module implementing the 'real' server
%%% Args ::= term(), init arguments (to Mod:init/1)
%%% Options ::= [{timeout, Timeout} | {debug, [Flag]}]
diff --git a/lib/stdlib/src/ms_transform.erl b/lib/stdlib/src/ms_transform.erl
index ec8cfd56c2..428c23524b 100644
--- a/lib/stdlib/src/ms_transform.erl
+++ b/lib/stdlib/src/ms_transform.erl
@@ -929,6 +929,7 @@ bool_test(is_port,1) -> true;
bool_test(is_reference,1) -> true;
bool_test(is_tuple,1) -> true;
bool_test(is_map,1) -> true;
+bool_test(is_map_key, 2) -> true;
bool_test(is_binary,1) -> true;
bool_test(is_function,1) -> true;
bool_test(is_record,2) -> true;
diff --git a/lib/stdlib/src/proc_lib.erl b/lib/stdlib/src/proc_lib.erl
index 8d01840313..9094e0c0cd 100644
--- a/lib/stdlib/src/proc_lib.erl
+++ b/lib/stdlib/src/proc_lib.erl
@@ -841,8 +841,8 @@ format_exception(Class, Reason, StackTrace, {Enc,_}=Extra) ->
StackFun = fun(M, _F, _A) -> (M =:= erl_eval) or (M =:= ?MODULE) end,
%% EI = " exception: ",
EI = " ",
- [EI, lib:format_exception(1+length(EI), Class, Reason,
- StackTrace, StackFun, PF, Enc), "\n"].
+ [EI, erl_error:format_exception(1+length(EI), Class, Reason,
+ StackTrace, StackFun, PF, Enc), "\n"].
to_string(A, latin1) ->
io_lib:write_atom_as_latin1(A);
diff --git a/lib/stdlib/src/qlc.erl b/lib/stdlib/src/qlc.erl
index 3a66f6930b..4a0e976ba4 100644
--- a/lib/stdlib/src/qlc.erl
+++ b/lib/stdlib/src/qlc.erl
@@ -638,7 +638,7 @@ string_to_handle(Str, Options, Bindings) when is_list(Str) ->
case erl_scan:string(Str, 1, [text]) of
{ok, Tokens, _} ->
ScanRes =
- case lib:extended_parse_exprs(Tokens) of
+ case erl_eval:extended_parse_exprs(Tokens) of
{ok, [Expr0], SBs} ->
{ok, Expr0, SBs};
{ok, _ExprList, _SBs} ->
@@ -1196,8 +1196,8 @@ abstract1({table, TableDesc}, _NElements, _Depth, _A) ->
{ok, Tokens, _} =
erl_scan:string(lists:flatten(TableDesc++"."), 1, [text]),
{ok, Es, Bs} =
- lib:extended_parse_exprs(Tokens),
- [Expr] = lib:subst_values_for_vars(Es, Bs),
+ erl_eval:extended_parse_exprs(Tokens),
+ [Expr] = erl_eval:subst_values_for_vars(Es, Bs),
special(Expr);
false -> % abstract expression
TableDesc
@@ -3749,7 +3749,7 @@ maybe_error_logger(Name, Why) ->
expand_stacktrace(),
Trimmer = fun(M, _F, _A) -> M =:= erl_eval end,
Formater = fun(Term, I) -> io_lib:print(Term, I, 80, -1) end,
- X = lib:format_stacktrace(1, Stacktrace, Trimmer, Formater),
+ X = erl_error:format_stacktrace(1, Stacktrace, Trimmer, Formater),
error_logger:Name("qlc: temporary file was needed for ~w\n~ts\n",
[Why, lists:flatten(X)]).
diff --git a/lib/stdlib/src/shell.erl b/lib/stdlib/src/shell.erl
index 1be37672e7..c73cf22943 100644
--- a/lib/stdlib/src/shell.erl
+++ b/lib/stdlib/src/shell.erl
@@ -230,7 +230,7 @@ server_loop(N0, Eval_0, Bs00, RT, Ds00, History0, Results0) ->
{Res,Eval0} = get_command(Prompt, Eval_1, Bs0, RT, Ds0),
case Res of
{ok,Es0,XBs} ->
- Es1 = lib:subst_values_for_vars(Es0, XBs),
+ Es1 = erl_eval:subst_values_for_vars(Es0, XBs),
case expand_hist(Es1, N) of
{ok,Es} ->
{V,Eval,Bs,Ds} = shell_cmd(Es, Eval0, Bs0, RT, Ds0, cmd),
@@ -280,7 +280,7 @@ get_command(Prompt, Eval, Bs, RT, Ds) ->
io:scan_erl_exprs(group_leader(), Prompt, 1, [text])
of
{ok,Toks,_EndPos} ->
- lib:extended_parse_exprs(Toks);
+ erl_eval:extended_parse_exprs(Toks);
{eof,_EndPos} ->
eof;
{error,ErrorInfo,_EndPos} ->
@@ -589,7 +589,7 @@ report_exception(Class, Severity, {Reason,Stacktrace}, RT) ->
PF = fun(Term, I1) -> pp(Term, I1, RT) end,
SF = fun(M, _F, _A) -> (M =:= erl_eval) or (M =:= ?MODULE) end,
Enc = encoding(),
- Str = lib:format_exception(I, Class, Reason, Stacktrace, SF, PF, Enc),
+ Str = erl_error:format_exception(I, Class, Reason, Stacktrace, SF, PF, Enc),
io:requests([{put_chars, latin1, Tag},
{put_chars, unicode, Str},
nl]).
diff --git a/lib/stdlib/src/slave.erl b/lib/stdlib/src/slave.erl
index b3f3206d67..37c1f6bfd9 100644
--- a/lib/stdlib/src/slave.erl
+++ b/lib/stdlib/src/slave.erl
@@ -187,7 +187,7 @@ start_link(Host, Name, Args) ->
start(Host, Name, Args, self()).
start(Host0, Name, Args, LinkTo) ->
- Prog = lib:progname(),
+ Prog = progname(),
start(Host0, Name, Args, LinkTo, Prog).
start(Host0, Name, Args, LinkTo, Prog) ->
@@ -296,7 +296,6 @@ mk_cmd(Host, Name, Args, Waiter, Prog0) ->
" -s slave slave_start ", node(),
" ", Waiter,
" ", Args]),
-
case after_char($@, atom_to_list(node())) of
Host ->
{ok, BasicCmd};
@@ -309,6 +308,15 @@ mk_cmd(Host, Name, Args, Waiter, Prog0) ->
end
end.
+%% Return the name of the script that starts (this) erlang
+progname() ->
+ case init:get_argument(progname) of
+ {ok, [[Prog]]} ->
+ Prog;
+ _Other ->
+ "no_prog_name"
+ end.
+
%% This is an attempt to distinguish between spaces in the program
%% path and spaces that separate arguments. The program is quoted to
%% allow spaces in the path.
@@ -317,7 +325,7 @@ mk_cmd(Host, Name, Args, Waiter, Prog0) ->
%% (through start/5) or if the -program switch to beam is used and
%% includes arguments (typically done by cerl in OTP test environment
%% in order to ensure that slave/peer nodes are started with the same
-%% emulator and flags as the test node. The return from lib:progname()
+%% emulator and flags as the test node. The result from progname()
%% could then typically be '/<full_path_to>/cerl -gcov').
quote_progname(Progname) ->
do_quote_progname(string:lexemes(to_list(Progname)," ")).
diff --git a/lib/stdlib/src/stdlib.app.src b/lib/stdlib/src/stdlib.app.src
index 5fb48acfab..cd09872b87 100644
--- a/lib/stdlib/src/stdlib.app.src
+++ b/lib/stdlib/src/stdlib.app.src
@@ -2,7 +2,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 1996-2017. All Rights Reserved.
+%% Copyright Ericsson AB 1996-2018. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
@@ -43,6 +43,7 @@
erl_anno,
erl_bits,
erl_compile,
+ erl_error,
erl_eval,
erl_expand_records,
erl_internal,
@@ -71,7 +72,6 @@
io_lib_format,
io_lib_fread,
io_lib_pretty,
- lib,
lists,
log_mf_h,
maps,
diff --git a/lib/stdlib/test/io_SUITE.erl b/lib/stdlib/test/io_SUITE.erl
index 9f48fbf5e3..13f2cbd27b 100644
--- a/lib/stdlib/test/io_SUITE.erl
+++ b/lib/stdlib/test/io_SUITE.erl
@@ -1808,7 +1808,7 @@ rpc_call_max(Node, M, F, Args) ->
%% Make sure that a bad specification for a printable range is rejected.
bad_printable_range(Config) when is_list(Config) ->
- Cmd = lists:concat([lib:progname()," +pcunnnnnicode -run erlang halt"]),
+ Cmd = ct:get_progname() ++ " +pcunnnnnicode -run erlang halt",
P = open_port({spawn, Cmd}, [stderr_to_stdout, {line, 200}]),
ok = receive
{P, {data, {eol , "bad range of printable characters" ++ _}}} ->
diff --git a/lib/stdlib/test/qlc_SUITE.erl b/lib/stdlib/test/qlc_SUITE.erl
index 8f8a0f6e73..5c189a6c73 100644
--- a/lib/stdlib/test/qlc_SUITE.erl
+++ b/lib/stdlib/test/qlc_SUITE.erl
@@ -7468,7 +7468,7 @@ strip_qlc_call(H) ->
strip_qlc_call2(H) ->
S = qlc:info(H, {flat, false}),
{ok, Tokens, _EndLine} = erl_scan:string(S++".", 1, [text]),
- {ok, [Expr], Bs} = lib:extended_parse_exprs(Tokens),
+ {ok, [Expr], Bs} = erl_eval:extended_parse_exprs(Tokens),
{case Expr of
{call,_,{remote,_,{atom,_,qlc},{atom,_,q}},[LC]} ->
{qlc, lists:flatten([erl_pp:expr(LC), "."]), []};
@@ -7489,7 +7489,7 @@ strip_qlc_call2(H) ->
join_info_count(H) ->
S = qlc:info(H, {flat, false}),
{ok, Tokens, _EndLine} = erl_scan:string(S++".", 1, [text]),
- {ok, [Expr], _Bs} = lib:extended_parse_exprs(Tokens),
+ {ok, [Expr], _Bs} = erl_eval:extended_parse_exprs(Tokens),
#ji{nmerge = Nmerge, nlookup = Nlookup,
nkeysort = NKeysort, nnested_loop = Nnested_loop} =
ji(Expr, #ji{}),
@@ -7533,7 +7533,7 @@ lookup_keys({generate,_,Q}, L) ->
lookup_keys(Q, L);
lookup_keys({table,Chars}, L) when is_list(Chars) ->
{ok, Tokens, _} = erl_scan:string(lists:flatten(Chars++"."), 1, [text]),
- {ok, [Expr], _Bs} = lib:extended_parse_exprs(Tokens),
+ {ok, [Expr], _Bs} = erl_eval:extended_parse_exprs(Tokens),
case Expr of
{call,_,_,[_fun,AKs]} ->
case erl_parse:normalise(AKs) of
diff --git a/lib/stdlib/test/shell_SUITE.erl b/lib/stdlib/test/shell_SUITE.erl
index ca85314775..22136d687c 100644
--- a/lib/stdlib/test/shell_SUITE.erl
+++ b/lib/stdlib/test/shell_SUITE.erl
@@ -2780,7 +2780,7 @@ otp_10302(Config) when is_list(Config) ->
rpc:call(Node,shell, prompt_func, [default]),
_ = shell:prompt_func(default),
- %% Test lib:format_exception() (cf. OTP-6554)
+ %% Test erl_error:format_exception() (cf. OTP-6554)
Test6 =
<<"begin
A = <<\"\\xaa\">>,
@@ -2967,10 +2967,10 @@ otp_14296(Config) when is_list(Config) ->
R = t(S)
end(),
- %% Test lib:extended_parse_term/1
+ %% Test erl_eval:extended_parse_term/1
TF = fun(S) ->
{ok, Ts, _} = erl_scan:string(S++".", 1, [text]),
- case lib:extended_parse_term(Ts) of
+ case erl_eval:extended_parse_term(Ts) of
{ok, Term} -> Term;
{error, _}=Error -> Error
end
diff --git a/lib/tools/emacs/erlang.el b/lib/tools/emacs/erlang.el
index fd51aca861..e08db0ea79 100644
--- a/lib/tools/emacs/erlang.el
+++ b/lib/tools/emacs/erlang.el
@@ -804,6 +804,7 @@ resulting regexp is surrounded by \\_< and \\_>."
"is_integer"
"is_list"
"is_map"
+ "is_map_key"
"is_number"
"is_pid"
"is_port"
diff --git a/lib/tools/src/lcnt.erl b/lib/tools/src/lcnt.erl
index d0152a4915..1db90c1d86 100644
--- a/lib/tools/src/lcnt.erl
+++ b/lib/tools/src/lcnt.erl
@@ -125,7 +125,7 @@
%% -------------------------------------------------------------------- %%
start() -> gen_server:start({local, ?MODULE}, ?MODULE, [], []).
-stop() -> gen_server:call(?MODULE, stop, infinity).
+stop() -> gen_server:stop(?MODULE, normal, infinity).
init([]) -> {ok, #state{ locks = [], duration = 0 } }.
start_internal() ->
@@ -442,9 +442,6 @@ handle_call({save, Filename}, _From, State) ->
{reply, {error, Error}, State}
end;
-handle_call(stop, _From, State) ->
- {stop, normal, ok, State};
-
handle_call(Command, _From, State) ->
{reply, {error, {undefined, Command}}, State}.
diff --git a/lib/tools/src/xref.erl b/lib/tools/src/xref.erl
index 32efa36fa2..466ec7d331 100644
--- a/lib/tools/src/xref.erl
+++ b/lib/tools/src/xref.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 2000-2016. All Rights Reserved.
+%% Copyright Ericsson AB 2000-2018. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
@@ -182,7 +182,9 @@ split_args(Opts) ->
end.
stop(Name) ->
- gen_server:call(Name, stop, infinity).
+ try gen_server:call(Name, stop, infinity)
+ after catch unregister(Name) % ensure the name is gone
+ end.
add_release(Name, Dir) ->
gen_server:call(Name, {add_release, Dir}, infinity).
diff --git a/lib/tools/test/eprof_SUITE_data/eed.erl b/lib/tools/test/eprof_SUITE_data/eed.erl
index 5f2a21aa60..9fe49c6f5c 100644
--- a/lib/tools/test/eprof_SUITE_data/eed.erl
+++ b/lib/tools/test/eprof_SUITE_data/eed.erl
@@ -54,7 +54,7 @@ edit(Name) ->
loop(St0) ->
{ok, St1, Cmd} = get_line(St0),
- case catch command(lib:nonl(Cmd), St1) of
+ case catch command(nonl(Cmd), St1) of
{'EXIT', Reason} ->
%% XXX Should clear outstanding global command here.
loop(print_error({'EXIT', Reason}, St1));
@@ -66,6 +66,10 @@ loop(St0) ->
loop(St2)
end.
+nonl([$\n]) -> [];
+nonl([]) -> [];
+nonl([H|T]) -> [H|nonl(T)].
+
command(Cmd, St) ->
case parse_command(Cmd, St) of
quit ->