aboutsummaryrefslogtreecommitdiffstats
path: root/lib/compiler
diff options
context:
space:
mode:
Diffstat (limited to 'lib/compiler')
-rw-r--r--lib/compiler/doc/src/compile.xml20
-rw-r--r--lib/compiler/internal_doc/cerl-notes.md75
-rw-r--r--lib/compiler/src/Makefile13
-rw-r--r--lib/compiler/src/beam_a.erl14
-rw-r--r--lib/compiler/src/beam_asm.erl57
-rw-r--r--lib/compiler/src/beam_block.erl383
-rw-r--r--lib/compiler/src/beam_bs.erl3
-rw-r--r--lib/compiler/src/beam_bsm.erl22
-rw-r--r--lib/compiler/src/beam_clean.erl58
-rw-r--r--lib/compiler/src/beam_dead.erl72
-rw-r--r--lib/compiler/src/beam_disasm.erl99
-rw-r--r--lib/compiler/src/beam_disasm.hrl3
-rw-r--r--lib/compiler/src/beam_except.erl3
-rw-r--r--lib/compiler/src/beam_flatten.erl6
-rw-r--r--lib/compiler/src/beam_jump.erl212
-rw-r--r--lib/compiler/src/beam_listing.erl80
-rw-r--r--lib/compiler/src/beam_peep.erl31
-rw-r--r--lib/compiler/src/beam_receive.erl5
-rw-r--r--lib/compiler/src/beam_record.erl129
-rw-r--r--lib/compiler/src/beam_reorder.erl3
-rw-r--r--lib/compiler/src/beam_type.erl288
-rw-r--r--lib/compiler/src/beam_utils.erl592
-rw-r--r--lib/compiler/src/beam_validator.erl46
-rw-r--r--lib/compiler/src/beam_z.erl33
-rw-r--r--lib/compiler/src/cerl_clauses.erl2
-rw-r--r--lib/compiler/src/compile.erl92
-rw-r--r--lib/compiler/src/compiler.app.src4
-rw-r--r--lib/compiler/src/core_lint.erl6
-rw-r--r--lib/compiler/src/core_pp.erl4
-rw-r--r--lib/compiler/src/core_scan.erl4
-rwxr-xr-xlib/compiler/src/genop.tab29
-rw-r--r--lib/compiler/src/sys_core_alias.erl307
-rw-r--r--lib/compiler/src/sys_core_bsm.erl206
-rw-r--r--lib/compiler/src/sys_core_dsetel.erl3
-rw-r--r--lib/compiler/src/sys_core_fold.erl273
-rw-r--r--lib/compiler/src/v3_codegen.erl1678
-rw-r--r--lib/compiler/src/v3_core.erl73
-rw-r--r--lib/compiler/src/v3_kernel.erl188
-rw-r--r--lib/compiler/src/v3_kernel_pp.erl4
-rw-r--r--lib/compiler/src/v3_life.erl468
-rw-r--r--lib/compiler/src/v3_life.hrl29
-rw-r--r--lib/compiler/test/Makefile1
-rw-r--r--lib/compiler/test/beam_block_SUITE.erl72
-rw-r--r--lib/compiler/test/beam_type_SUITE.erl42
-rw-r--r--lib/compiler/test/beam_utils_SUITE.erl135
-rw-r--r--lib/compiler/test/beam_validator_SUITE.erl24
-rw-r--r--lib/compiler/test/beam_validator_SUITE_data/bad_tuples.S88
-rw-r--r--lib/compiler/test/bs_match_SUITE.erl128
-rw-r--r--lib/compiler/test/compilation_SUITE_data/opt_crash.erl6
-rw-r--r--lib/compiler/test/compile_SUITE.erl109
-rw-r--r--lib/compiler/test/compile_SUITE_data/big.erl4
-rw-r--r--lib/compiler/test/core_alias_SUITE.erl195
-rw-r--r--lib/compiler/test/core_fold_SUITE.erl62
-rw-r--r--lib/compiler/test/guard_SUITE.erl38
-rw-r--r--lib/compiler/test/map_SUITE.erl20
-rw-r--r--lib/compiler/test/match_SUITE.erl148
-rw-r--r--lib/compiler/test/misc_SUITE.erl26
-rw-r--r--lib/compiler/test/receive_SUITE.erl13
-rw-r--r--lib/compiler/test/trycatch_SUITE.erl234
-rw-r--r--lib/compiler/test/z_SUITE.erl3
60 files changed, 4793 insertions, 2172 deletions
diff --git a/lib/compiler/doc/src/compile.xml b/lib/compiler/doc/src/compile.xml
index 10164890f2..06afc55c07 100644
--- a/lib/compiler/doc/src/compile.xml
+++ b/lib/compiler/doc/src/compile.xml
@@ -123,6 +123,17 @@
in the Efficiency Guide.</p>
</item>
+ <tag><c>{compile_info, [{atom(), term()}]}</c></tag>
+ <item>
+ <p>Allows compilers built on top of <c>compile</c> to attach
+ extra compilation metadata to the <c>compile_info</c> chunk
+ in the generated beam file.</p>
+
+ <p>It is advised for compilers to remove all non-deterministic
+ information if the <c>deterministic</c> option is supported and
+ it was supplied by the user.</p>
+ </item>
+
<tag><c>compressed</c></tag>
<item>
<p>The compiler will compress the generated object code,
@@ -222,6 +233,15 @@ module.beam: module.erl \
header.hrl</code>
</item>
+ <tag><c>makedep_side_effect</c></tag>
+ <item>
+ <p>The dependecies are created as a side effect to the
+ normal compilation process. This means that the object
+ file will also be produced. This option override the
+ <c>makedep</c> option.
+ </p>
+ </item>
+
<tag><c>{makedep_output, Output}</c></tag>
<item>
<p>Writes generated rules to <c>Output</c> instead of the
diff --git a/lib/compiler/internal_doc/cerl-notes.md b/lib/compiler/internal_doc/cerl-notes.md
new file mode 100644
index 0000000000..705fe8f42d
--- /dev/null
+++ b/lib/compiler/internal_doc/cerl-notes.md
@@ -0,0 +1,75 @@
+Some notes on the cerl modules
+==============================
+
+Maps in cerl_clauses:match/3
+----------------------------
+
+Not much optimization is done for maps in `cerl_clauses:match/3`.
+
+The reason is that the inliner (`cerl_inline`) was not designed for
+data types that depend on variables bound in the enclosing environment
+(for example, the keys for maps). If we attempt to extend the
+optimizations for maps similar to the optimizations for the other data
+types, the inliner will crash for certain code. Here is an example of
+code that would crash the inliner:
+
+ t() ->
+ f(key).
+
+ f(K) ->
+ case #{K => value} of
+ #{key := V} -> V
+ end.
+
+The reason for the crash is that the inliner works in several
+passes and calls `cerl_clauses:match/3` twice. The inliner will
+assume that the same result will be returned both times, but
+for this example different bindings will be returned.
+
+Here is the rough outline of what happens:
+
+* The first time `cerl_clauses:match/3` will be asked to match the
+pattern `#{K := V}` against `#{key => value}`. It cannot say more
+than that the pattern *may* match.
+
+* Now the inliner will add the bindings to body of the clause (which
+is simply `V`). In this case, the bindings are still empty, so
+nothing is added.
+
+* The inliner will now do some substitutions and renaming. The
+variable `K` will be replaced with `key`.
+
+* The next time `cerl_clauses:match/3` is called, it will be asked to
+match the pattern `#{key := V}` against `#{key => value#}`. In this
+case, there will be a match and the bindings can be extended with
+`{V,value}`.
+
+* The inliner will see that the entire case can be removed. Only
+the body for the clause needs to be kept.
+
+Thus, after inlining the function `t/0` will look like this:
+
+ t() ->
+ V.
+
+The problem here is that the inliner assumed that the bindings from
+the first and second call to `cer_clauses:match/3` would be the same.
+It used the empty bindings from the first call for the body.
+
+The correct way would be to add the bindings from the second call:
+
+ t() ->
+ let V = value in V.
+
+### How to fix this ###
+
+The inliner will need to call `cerl_clauses:match/3` after doing
+all substitutions and renaming. It is not clear to me how difficult
+that would be. I assume that the inliner is written the way it is
+for a good reason. That means that switching the order things are
+done would lead to correctness and/or performance problems.
+
+### What must also be done to fix this ###
+
+`cerl_inline:make_template/3` must be extended to create a template
+for maps. That is relatively straightforward.
diff --git a/lib/compiler/src/Makefile b/lib/compiler/src/Makefile
index ef6db66ff6..9e96147787 100644
--- a/lib/compiler/src/Makefile
+++ b/lib/compiler/src/Makefile
@@ -83,6 +83,7 @@ MODULES = \
core_scan \
erl_bifs \
rec_env \
+ sys_core_alias \
sys_core_bsm \
sys_core_dsetel \
sys_core_fold \
@@ -92,16 +93,14 @@ MODULES = \
v3_codegen \
v3_core \
v3_kernel \
- v3_kernel_pp \
- v3_life
+ v3_kernel_pp
BEAM_H = $(wildcard ../priv/beam_h/*.h)
HRL_FILES= \
beam_disasm.hrl \
core_parse.hrl \
- v3_kernel.hrl \
- v3_life.hrl
+ v3_kernel.hrl
YRL_FILE = core_parse.yrl
@@ -186,7 +185,7 @@ release_docs_spec:
# ----------------------------------------------------
$(EBIN)/beam_disasm.beam: $(EGEN)/beam_opcodes.hrl beam_disasm.hrl
-$(EBIN)/beam_listing.beam: v3_life.hrl
+$(EBIN)/beam_listing.beam: core_parse.hrl v3_kernel.hrl
$(EBIN)/beam_validator.beam: beam_disasm.hrl
$(EBIN)/cerl.beam: core_parse.hrl
$(EBIN)/compile.beam: core_parse.hrl ../../stdlib/include/erl_compile.hrl
@@ -194,12 +193,12 @@ $(EBIN)/core_lib.beam: core_parse.hrl
$(EBIN)/core_lint.beam: core_parse.hrl
$(EBIN)/core_parse.beam: core_parse.hrl $(EGEN)/core_parse.erl
$(EBIN)/core_pp.beam: core_parse.hrl
+$(EBIN)/sys_core_alias.beam: core_parse.hrl
$(EBIN)/sys_core_dsetel.beam: core_parse.hrl
$(EBIN)/sys_core_fold.beam: core_parse.hrl
$(EBIN)/sys_core_fold_lists.beam: core_parse.hrl
$(EBIN)/sys_core_inline.beam: core_parse.hrl
-$(EBIN)/v3_codegen.beam: v3_life.hrl
+$(EBIN)/v3_codegen.beam: v3_kernel.hrl
$(EBIN)/v3_core.beam: core_parse.hrl
$(EBIN)/v3_kernel.beam: core_parse.hrl v3_kernel.hrl
$(EBIN)/v3_kernel_pp.beam: v3_kernel.hrl
-$(EBIN)/v3_life.beam: v3_kernel.hrl v3_life.hrl
diff --git a/lib/compiler/src/beam_a.erl b/lib/compiler/src/beam_a.erl
index cdb32d5d55..91acb19971 100644
--- a/lib/compiler/src/beam_a.erl
+++ b/lib/compiler/src/beam_a.erl
@@ -42,8 +42,7 @@ function({function,Name,Arity,CLabel,Is0}) ->
Is = beam_jump:remove_unused_labels(Is1),
{function,Name,Arity,CLabel,Is}
catch
- Class:Error ->
- Stack = erlang:get_stacktrace(),
+ Class:Error:Stack ->
io:fwrite("Function: ~w/~w\n", [Name,Arity]),
erlang:raise(Class, Error, Stack)
end.
@@ -59,8 +58,17 @@ rename_instrs([{call_only,A,F}|Is]) ->
rename_instrs([{call_ext_only,A,F}|Is]) ->
[{call_ext,A,F},return|rename_instrs(Is)];
rename_instrs([{'%live',_}|Is]) ->
- %% When compiling from old .S files.
+ %% Ignore old type of live annotation. Only happens when compiling
+ %% from very old .S files.
rename_instrs(Is);
+rename_instrs([{get_list,S,D1,D2}|Is]) ->
+ %% Only happens when compiling from old .S files.
+ if
+ D1 =:= S ->
+ [{get_tl,S,D2},{get_hd,S,D1}|rename_instrs(Is)];
+ true ->
+ [{get_hd,S,D1},{get_tl,S,D2}|rename_instrs(Is)]
+ end;
rename_instrs([I|Is]) ->
[rename_instr(I)|rename_instrs(Is)];
rename_instrs([]) -> [].
diff --git a/lib/compiler/src/beam_asm.erl b/lib/compiler/src/beam_asm.erl
index f7c838e392..fa919ca862 100644
--- a/lib/compiler/src/beam_asm.erl
+++ b/lib/compiler/src/beam_asm.erl
@@ -21,10 +21,10 @@
-module(beam_asm).
--export([module/5]).
+-export([module/4]).
-export([encode/2]).
--export_type([fail/0,label/0,reg/0,src/0,module_code/0,function_name/0]).
+-export_type([fail/0,label/0,reg/0,reg_num/0,src/0,module_code/0,function_name/0]).
-import(lists, [map/2,member/2,keymember/3,duplicate/2,splitwith/2]).
-include("beam_opcodes.hrl").
@@ -55,20 +55,20 @@
-type module_code() ::
{module(),[_],[_],[asm_function()],pos_integer()}.
--spec module(module_code(), [{binary(), binary()}], [_], [compile:option()], [compile:option()]) ->
+-spec module(module_code(), [{binary(), binary()}], [{atom(),term()}], [compile:option()]) ->
{'ok',binary()}.
-module(Code, ExtraChunks, SourceFile, Opts, CompilerOpts) ->
- {ok,assemble(Code, ExtraChunks, SourceFile, Opts, CompilerOpts)}.
+module(Code, ExtraChunks, CompileInfo, CompilerOpts) ->
+ {ok,assemble(Code, ExtraChunks, CompileInfo, CompilerOpts)}.
-assemble({Mod,Exp0,Attr0,Asm0,NumLabels}, ExtraChunks, SourceFile, Opts, CompilerOpts) ->
+assemble({Mod,Exp0,Attr0,Asm0,NumLabels}, ExtraChunks, CompileInfo, CompilerOpts) ->
{1,Dict0} = beam_dict:atom(Mod, beam_dict:new()),
{0,Dict1} = beam_dict:fname(atom_to_list(Mod) ++ ".erl", Dict0),
NumFuncs = length(Asm0),
{Asm,Attr} = on_load(Asm0, Attr0),
Exp = cerl_sets:from_list(Exp0),
{Code,Dict2} = assemble_1(Asm, Exp, Dict1, []),
- build_file(Code, Attr, Dict2, NumLabels, NumFuncs, ExtraChunks, SourceFile, Opts, CompilerOpts).
+ build_file(Code, Attr, Dict2, NumLabels, NumFuncs, ExtraChunks, CompileInfo, CompilerOpts).
on_load(Fs0, Attr0) ->
case proplists:get_value(on_load, Attr0) of
@@ -111,7 +111,7 @@ assemble_function([H|T], Acc, Dict0) ->
assemble_function([], Code, Dict) ->
{Code, Dict}.
-build_file(Code, Attr, Dict, NumLabels, NumFuncs, ExtraChunks, SourceFile, Opts, CompilerOpts) ->
+build_file(Code, Attr, Dict, NumLabels, NumFuncs, ExtraChunks, CompileInfo, CompilerOpts) ->
%% Create the code chunk.
CodeChunk = chunk(<<"Code">>,
@@ -182,8 +182,7 @@ build_file(Code, Attr, Dict, NumLabels, NumFuncs, ExtraChunks, SourceFile, Opts,
Essentials1 = [iolist_to_binary(C) || C <- Essentials0],
MD5 = module_md5(Essentials1),
Essentials = finalize_fun_table(Essentials1, MD5),
- {Attributes,Compile} = build_attributes(Opts, CompilerOpts, SourceFile,
- Attr, MD5),
+ {Attributes,Compile} = build_attributes(Attr, CompileInfo, MD5),
AttrChunk = chunk(<<"Attr">>, Attributes),
CompileChunk = chunk(<<"CInf">>, Compile),
@@ -193,7 +192,7 @@ build_file(Code, Attr, Dict, NumLabels, NumFuncs, ExtraChunks, SourceFile, Opts,
%% Create IFF chunk.
- Chunks = case member(slim, Opts) of
+ Chunks = case member(slim, CompilerOpts) of
true ->
[Essentials,AttrChunk];
false ->
@@ -241,9 +240,7 @@ build_form(Id, Chunks0) when byte_size(Id) =:= 4, is_list(Chunks0) ->
chunk(Id, Contents) when byte_size(Id) =:= 4, is_binary(Contents) ->
Size = byte_size(Contents),
- [<<Id/binary,Size:32>>,Contents|pad(Size)];
-chunk(Id, Contents) when is_list(Contents) ->
- chunk(Id, list_to_binary(Contents)).
+ [<<Id/binary,Size:32>>,Contents|pad(Size)].
%% Build a correctly padded chunk (with a sub-header).
@@ -265,22 +262,10 @@ flatten_exports(Exps) ->
flatten_imports(Imps) ->
list_to_binary(map(fun({M,F,A}) -> <<M:32,F:32,A:32>> end, Imps)).
-build_attributes(Opts, CompilerOpts, SourceFile, Attr, MD5) ->
- Misc0 = case SourceFile of
- [] -> [];
- [_|_] -> [{source,SourceFile}]
- end,
- Misc = case member(slim, CompilerOpts) of
- false -> Misc0;
- true -> []
- end,
- Compile = case member(deterministic, CompilerOpts) of
- false ->
- [{options,Opts},{version,?COMPILER_VSN}|Misc];
- true ->
- [{version,?COMPILER_VSN}]
- end,
- {term_to_binary(set_vsn_attribute(Attr, MD5)),term_to_binary(Compile)}.
+build_attributes(Attr, Compile, MD5) ->
+ AttrBinary = term_to_binary(set_vsn_attribute(Attr, MD5)),
+ CompileBinary = term_to_binary([{version,?COMPILER_VSN}|Compile]),
+ {AttrBinary,CompileBinary}.
build_line_table(Dict) ->
{NumLineInstrs,NumFnames0,Fnames0,NumLines,Lines0} =
@@ -422,7 +407,17 @@ encode_arg({atom, Atom}, Dict0) when is_atom(Atom) ->
{Index, Dict} = beam_dict:atom(Atom, Dict0),
{encode(?tag_a, Index), Dict};
encode_arg({integer, N}, Dict) ->
- {encode(?tag_i, N), Dict};
+ %% Conservatily assume that all integers whose absolute
+ %% value is greater than 1 bsl 128 will be bignums in
+ %% the runtime system.
+ if
+ N >= 1 bsl 128 ->
+ encode_arg({literal, N}, Dict);
+ N =< -(1 bsl 128) ->
+ encode_arg({literal, N}, Dict);
+ true ->
+ {encode(?tag_i, N), Dict}
+ end;
encode_arg(nil, Dict) ->
{encode(?tag_a, 0), Dict};
encode_arg({f, W}, Dict) ->
diff --git a/lib/compiler/src/beam_block.erl b/lib/compiler/src/beam_block.erl
index 6543e05e20..7183381334 100644
--- a/lib/compiler/src/beam_block.erl
+++ b/lib/compiler/src/beam_block.erl
@@ -23,30 +23,38 @@
-module(beam_block).
-export([module/2]).
--import(lists, [reverse/1,reverse/2,foldl/3,member/2]).
+-import(lists, [reverse/1,reverse/2,member/2]).
-spec module(beam_utils:module_code(), [compile:option()]) ->
{'ok',beam_utils:module_code()}.
-module({Mod,Exp,Attr,Fs0,Lc}, _Opt) ->
- Fs = [function(F) || F <- Fs0],
+module({Mod,Exp,Attr,Fs0,Lc}, Opts) ->
+ Blockify = not member(no_blockify, Opts),
+ Fs = [function(F, Blockify) || F <- Fs0],
{ok,{Mod,Exp,Attr,Fs,Lc}}.
-function({function,Name,Arity,CLabel,Is0}) ->
+function({function,Name,Arity,CLabel,Is0}, Blockify) ->
try
%% Collect basic blocks and optimize them.
- Is1 = blockify(Is0),
- Is2 = embed_lines(Is1),
- Is3 = move_allocates(Is2),
- Is4 = beam_utils:live_opt(Is3),
- Is5 = opt_blocks(Is4),
- Is6 = beam_utils:delete_live_annos(Is5),
-
- %% Done.
- {function,Name,Arity,CLabel,Is6}
+ Is2 = case Blockify of
+ true ->
+ Is1 = blockify(Is0),
+ embed_lines(Is1);
+ false ->
+ Is0
+ end,
+ Is3 = local_cse(Is2),
+ Is4 = beam_utils:anno_defs(Is3),
+ Is5 = move_allocates(Is4),
+ Is6 = beam_utils:live_opt(Is5),
+ Is7 = opt_blocks(Is6),
+ Is8 = beam_utils:delete_annos(Is7),
+ Is = opt_allocs(Is8),
+
+ %% Done.
+ {function,Name,Arity,CLabel,Is}
catch
- Class:Error ->
- Stack = erlang:get_stacktrace(),
+ Class:Error:Stack ->
io:fwrite("Function: ~w/~w\n", [Name,Arity]),
erlang:raise(Class, Error, Stack)
end.
@@ -102,7 +110,8 @@ collect({put_tuple,A,D}) -> {set,[D],[],{put_tuple,A}};
collect({put,S}) -> {set,[],[S],put};
collect({get_tuple_element,S,I,D}) -> {set,[D],[S],{get_tuple_element,I}};
collect({set_tuple_element,S,D,I}) -> {set,[],[S,D],{set_tuple_element,I}};
-collect({get_list,S,D1,D2}) -> {set,[D1,D2],[S],get_list};
+collect({get_hd,S,D}) -> {set,[D],[S],get_hd};
+collect({get_tl,S,D}) -> {set,[D],[S],get_tl};
collect(remove_message) -> {set,[],[],remove_message};
collect({put_map,F,Op,S,D,R,{list,Puts}}) ->
{set,[D],[S|Puts],{alloc,R,{put_map,Op,F}}};
@@ -136,17 +145,16 @@ embed_lines([], Acc) -> Acc.
opt_blocks([{block,Bl0}|Is]) ->
%% The live annotation at the beginning is not useful.
- [{'%live',_,_}|Bl] = Bl0,
+ [{'%anno',_}|Bl] = Bl0,
[{block,opt_block(Bl)}|opt_blocks(Is)];
opt_blocks([I|Is]) ->
[I|opt_blocks(Is)];
opt_blocks([]) -> [].
opt_block(Is0) ->
- Is = find_fixpoint(fun(Is) ->
- opt_tuple_element(opt(Is))
- end, Is0),
- opt_alloc(Is).
+ find_fixpoint(fun(Is) ->
+ opt_tuple_element(opt(Is))
+ end, Is0).
find_fixpoint(OptFun, Is0) ->
case OptFun(Is0) of
@@ -173,7 +181,7 @@ find_fixpoint(OptFun, Is0) ->
%% safe to assume that if x(N) is initialized, then all lower-numbered
%% x registers are also initialized.
%%
-%% For example, in general it is not safe to transform the following
+%% For example, we must be careful when transforming the following
%% instructions:
%%
%% get_tuple_element x(0) Element => x(1)
@@ -185,13 +193,9 @@ find_fixpoint(OptFun, Is0) ->
%% get_tuple_element x(0) Element => x(1)
%%
%% The transformation is safe if and only if x(1) has been
-%% initialized previously. Unfortunately, beam_reorder may have moved
-%% a get_tuple_element instruction so that x(1) is not always
-%% initialized when this code is reached. To find whether or not x(1)
-%% is initialized, we would need to analyze all code preceding these
-%% two instructions (across branches). Since we currently don't have
-%% any practical mechanism for doing that, we will have to
-%% conservatively assume that the transformation is unsafe.
+%% initialized previously. We will use the annotations added by
+%% beam_utils:anno_defs/1 to determine whether x(a) has been
+%% initialized.
move_allocates([{block,Bl0}|Is]) ->
Bl = move_allocates_1(reverse(Bl0), []),
@@ -200,40 +204,77 @@ move_allocates([I|Is]) ->
[I|move_allocates(Is)];
move_allocates([]) -> [].
-move_allocates_1([I|Is], [{set,[],[],{alloc,Live0,Info}}|Acc]=Acc0) ->
- case {alloc_may_pass(I),alloc_live_regs(I, Live0)} of
- {false,_} ->
- move_allocates_1(Is, [I|Acc0]);
- {true,not_possible} ->
- move_allocates_1(Is, [I|Acc0]);
- {true,Live} when is_integer(Live) ->
- A = {set,[],[],{alloc,Live,Info}},
- move_allocates_1(Is, [A,I|Acc])
+move_allocates_1([{'%anno',_}|Is], Acc) ->
+ move_allocates_1(Is, Acc);
+move_allocates_1([I|Is], [{set,[],[],{alloc,Live0,Info0}}|Acc]=Acc0) ->
+ case alloc_may_pass(I) of
+ false ->
+ move_allocates_1(Is, [I|Acc0]);
+ true ->
+ case alloc_live_regs(I, Is, Live0) of
+ not_possible ->
+ move_allocates_1(Is, [I|Acc0]);
+ Live when is_integer(Live) ->
+ Info = safe_info(Info0),
+ A = {set,[],[],{alloc,Live,Info}},
+ move_allocates_1(Is, [A,I|Acc])
+ end
end;
move_allocates_1([I|Is], Acc) ->
move_allocates_1(Is, [I|Acc]);
move_allocates_1([], Acc) -> Acc.
+alloc_may_pass({set,_,[{fr,_}],fmove}) -> false;
alloc_may_pass({set,_,_,{alloc,_,_}}) -> false;
alloc_may_pass({set,_,_,{set_tuple_element,_}}) -> false;
alloc_may_pass({set,_,_,put_list}) -> false;
alloc_may_pass({set,_,_,put}) -> false;
alloc_may_pass({set,_,_,_}) -> true.
-
+
+safe_info({nozero,Stack,Heap,_}) ->
+ %% nozero is not safe if the allocation instruction is moved
+ %% upwards past an instruction that may throw an exception
+ %% (such as element/2).
+ {zero,Stack,Heap,[]};
+safe_info(Info) -> Info.
+
%% opt([Instruction]) -> [Instruction]
%% Optimize the instruction stream inside a basic block.
opt([{set,[X],[X],move}|Is]) -> opt(Is);
+opt([{set,[Dst],_,move},{set,[Dst],[Src],move}=I|Is]) when Dst =/= Src ->
+ opt([I|Is]);
+opt([{set,[{x,0}],[S1],move}=I1,{set,[D2],[{x,0}],move}|Is]) ->
+ opt([I1,{set,[D2],[S1],move}|Is]);
+opt([{set,[{x,0}],[S1],move}=I1,{set,[D2],[S2],move}|Is0]) when S1 =/= D2 ->
+ %% Place move S x0 at the end of move sequences so that
+ %% loader can merge with the following instruction
+ {Ds,Is} = opt_moves([D2], Is0),
+ [{set,Ds,[S2],move}|opt([I1|Is])];
opt([{set,_,_,{line,_}}=Line1,
{set,[D1],[{integer,Idx1},Reg],{bif,element,{f,0}}}=I1,
{set,_,_,{line,_}}=Line2,
{set,[D2],[{integer,Idx2},Reg],{bif,element,{f,0}}}=I2|Is])
when Idx1 < Idx2, D1 =/= D2, D1 =/= Reg, D2 =/= Reg ->
opt([Line2,I2,Line1,I1|Is]);
+opt([{set,[D1],[{integer,Idx1},Reg],{bif,element,{f,L}}}=I1,
+ {set,[D2],[{integer,Idx2},Reg],{bif,element,{f,L}}}=I2|Is])
+ when Idx1 < Idx2, D1 =/= D2, D1 =/= Reg, D2 =/= Reg ->
+ opt([I2,I1|Is]);
+opt([{set,Hd0,Cons,get_hd}=GetHd,
+ {set,Tl0,Cons,get_tl}=GetTl|Is0]) ->
+ case {opt_moves(Hd0, [GetTl|Is0]),opt_moves(Tl0, [GetHd|Is0])} of
+ {{Hd0,Is},{Tl0,_}} ->
+ [GetHd|opt(Is)];
+ {{Hd,Is},{Tl0,_}} ->
+ [{set,Hd,Cons,get_hd}|opt(Is)];
+ {{_,_},{Tl,Is}} ->
+ [{set,Tl,Cons,get_tl}|opt(Is)]
+ end;
opt([{set,Ds0,Ss,Op}|Is0]) ->
{Ds,Is} = opt_moves(Ds0, Is0),
[{set,Ds,Ss,Op}|opt(Is)];
-opt([{'%live',_,_}=I|Is]) ->
+opt([{'%anno',_}=I|Is]) ->
[I|opt(Is)];
opt([]) -> [].
@@ -245,17 +286,6 @@ opt_moves([D0]=Ds, Is0) ->
case opt_move(D0, Is0) of
not_possible -> {Ds,Is0};
{D1,Is} -> {[D1],Is}
- end;
-opt_moves([X0,Y0], Is0) ->
- {X,Is2} = case opt_move(X0, Is0) of
- not_possible -> {X0,Is0};
- {Y0,_} -> {X0,Is0};
- {_X1,_Is1} = XIs1 -> XIs1
- end,
- case opt_move(Y0, Is2) of
- not_possible -> {[X,Y0],Is2};
- {X,_} -> {[X,Y0],Is2};
- {Y,Is} -> {[X,Y],Is}
end.
%% opt_move(Dest, [Instruction]) -> {UpdatedDest,[Instruction]} | not_possible
@@ -269,7 +299,7 @@ opt_move(Dest, Is) ->
opt_move_1(R, [{set,[D],[R],move}|Is0], Acc) ->
%% Provided that the source register is killed by instructions
%% that follow, the optimization is safe.
- case eliminate_use_of_from_reg(Is0, R, D, []) of
+ case eliminate_use_of_from_reg(Is0, R, D) of
{yes,Is} -> opt_move_rev(D, Acc, Is);
no -> not_possible
end;
@@ -327,7 +357,7 @@ opt_tuple_element_1([{set,_,_,{alloc,_,_}}|_], _, _, _) ->
opt_tuple_element_1([{set,_,_,{try_catch,_,_}}|_], _, _, _) ->
no;
opt_tuple_element_1([{set,[D],[S],move}|Is0], I0, {_,S}, Acc) ->
- case eliminate_use_of_from_reg(Is0, S, D, []) of
+ case eliminate_use_of_from_reg(Is0, S, D) of
no ->
no;
{yes,Is} ->
@@ -369,6 +399,14 @@ is_killed_or_used(R, {set,Ss,Ds,_}) ->
%% that FromRegister is still used and that the optimization is not
%% possible.
+eliminate_use_of_from_reg(Is, From, To) ->
+ try
+ eliminate_use_of_from_reg(Is, From, To, [])
+ catch
+ throw:not_possible ->
+ no
+ end.
+
eliminate_use_of_from_reg([{set,_,_,{alloc,Live,_}}|_]=Is0, {x,X}, _, Acc) ->
if
X < Live ->
@@ -377,21 +415,32 @@ eliminate_use_of_from_reg([{set,_,_,{alloc,Live,_}}|_]=Is0, {x,X}, _, Acc) ->
{yes,reverse(Acc, Is0)}
end;
eliminate_use_of_from_reg([{set,Ds,Ss0,Op}=I0|Is], From, To, Acc) ->
+ ensure_safe_tuple(I0, To),
I = case member(From, Ss0) of
- true ->
- Ss = [case S of
- From -> To;
- _ -> S
- end || S <- Ss0],
- {set,Ds,Ss,Op};
- false ->
- I0
- end,
+ true ->
+ Ss = [case S of
+ From -> To;
+ _ -> S
+ end || S <- Ss0],
+ {set,Ds,Ss,Op};
+ false ->
+ I0
+ end,
case member(From, Ds) of
- true ->
- {yes,reverse(Acc, [I|Is])};
- false ->
- eliminate_use_of_from_reg(Is, From, To, [I|Acc])
+ true ->
+ {yes,reverse(Acc, [I|Is])};
+ false ->
+ case member(To, Ds) of
+ true ->
+ case beam_utils:is_killed_block(From, Is) of
+ true ->
+ {yes,reverse(Acc, [I|Is])};
+ false ->
+ no
+ end;
+ false ->
+ eliminate_use_of_from_reg(Is, From, To, [I|Acc])
+ end
end;
eliminate_use_of_from_reg([I]=Is, From, _To, Acc) ->
case beam_utils:is_killed_block(From, [I]) of
@@ -401,31 +450,51 @@ eliminate_use_of_from_reg([I]=Is, From, _To, Acc) ->
no
end.
+ensure_safe_tuple({set,[To],[],{put_tuple,_}}, To) ->
+ throw(not_possible);
+ensure_safe_tuple(_, _) -> ok.
+
+%% opt_allocs(Instructions) -> Instructions. Optimize allocate
+%% instructions inside blocks. If safe, replace an allocate_zero
+%% instruction with the slightly cheaper allocate instruction.
+
+opt_allocs(Is) ->
+ D = beam_utils:index_labels(Is),
+ opt_allocs_1(Is, D).
+
+opt_allocs_1([{block,Bl0}|Is], D) ->
+ Bl = opt_alloc(Bl0, {D,Is}),
+ [{block,Bl}|opt_allocs_1(Is, D)];
+opt_allocs_1([I|Is], D) ->
+ [I|opt_allocs_1(Is, D)];
+opt_allocs_1([], _) -> [].
+
%% opt_alloc(Instructions) -> Instructions'
%% Optimises all allocate instructions.
opt_alloc([{set,[],[],{alloc,Live0,Info0}},
- {set,[],[],{alloc,Live,Info}}|Is]) ->
+ {set,[],[],{alloc,Live,Info}}|Is], D) ->
Live = Live0, %Assertion.
Alloc = combine_alloc(Info0, Info),
I = {set,[],[],{alloc,Live,Alloc}},
- opt_alloc([I|Is]);
-opt_alloc([{set,[],[],{alloc,R,{_,Ns,Nh,[]}}}|Is]) ->
- [{set,[],[],opt_alloc(Is, Ns, Nh, R)}|Is];
-opt_alloc([I|Is]) -> [I|opt_alloc(Is)];
-opt_alloc([]) -> [].
+ opt_alloc([I|Is], D);
+opt_alloc([{set,[],[],{alloc,R,{_,Ns,Nh,[]}}}|Is], D) ->
+ [{set,[],[],opt_alloc(Is, D, Ns, Nh, R)}|Is];
+opt_alloc([I|Is], D) -> [I|opt_alloc(Is, D)];
+opt_alloc([], _) -> [].
combine_alloc({_,Ns,Nh1,Init}, {_,nostack,Nh2,[]}) ->
{zero,Ns,beam_utils:combine_heap_needs(Nh1, Nh2),Init}.
-
+
%% opt_alloc(Instructions, FrameSize, HeapNeed, LivingRegs) -> [Instr]
%% Generates the optimal sequence of instructions for
%% allocating and initalizing the stack frame and needed heap.
-opt_alloc(_Is, nostack, Nh, LivingRegs) ->
+opt_alloc(_Is, _D, nostack, Nh, LivingRegs) ->
{alloc,LivingRegs,{nozero,nostack,Nh,[]}};
-opt_alloc(Is, Ns, Nh, LivingRegs) ->
- InitRegs = init_yreg(Is, 0),
+opt_alloc(Bl, {D,OuterIs}, Ns, Nh, LivingRegs) ->
+ Is = [{block,Bl}|OuterIs],
+ InitRegs = init_yregs(Ns, Is, D),
case count_ones(InitRegs) of
N when N*2 > Ns ->
{alloc,LivingRegs,{nozero,Ns,Nh,gen_init(Ns, InitRegs)}};
@@ -441,19 +510,14 @@ gen_init(Fs, Regs, Y, Acc) when Regs band 1 =:= 0 ->
gen_init(Fs, Regs, Y, Acc) ->
gen_init(Fs, Regs bsr 1, Y+1, Acc).
-%% init_yreg(Instructions, RegSet) -> RegSetInitialized
-%% Calculate the set of initialized y registers.
-
-init_yreg([{set,_,_,{bif,_,_}}|_], Reg) -> Reg;
-init_yreg([{set,_,_,{alloc,_,{gc_bif,_,_}}}|_], Reg) -> Reg;
-init_yreg([{set,_,_,{alloc,_,{put_map,_,_}}}|_], Reg) -> Reg;
-init_yreg([{set,Ds,_,_}|Is], Reg) -> init_yreg(Is, add_yregs(Ds, Reg));
-init_yreg(_Is, Reg) -> Reg.
-
-add_yregs(Ys, Reg) -> foldl(fun(Y, R0) -> add_yreg(Y, R0) end, Reg, Ys).
-
-add_yreg({y,Y}, Reg) -> Reg bor (1 bsl Y);
-add_yreg(_, Reg) -> Reg.
+init_yregs(Y, Is, D) when Y >= 0 ->
+ case beam_utils:is_killed({y,Y}, Is, D) of
+ true ->
+ (1 bsl Y) bor init_yregs(Y-1, Is, D);
+ false ->
+ init_yregs(Y-1, Is, D)
+ end;
+init_yregs(_, _, _) -> 0.
count_ones(Bits) -> count_ones(Bits, 0).
count_ones(0, Acc) -> Acc;
@@ -463,16 +527,34 @@ count_ones(Bits, Acc) ->
%% Calculate the new number of live registers when we move an allocate
%% instruction upwards, passing a 'set' instruction.
-alloc_live_regs({set,Ds,Ss,_}, Regs0) ->
+alloc_live_regs({set,Ds,Ss,_}, Is, Regs0) ->
Rset = x_live(Ss, x_dead(Ds, (1 bsl Regs0)-1)),
- live_regs(0, Rset).
+ Live = live_regs(0, Rset),
+ case ensure_contiguous(Rset, Live) of
+ not_possible ->
+ %% Liveness information (looking forward in the
+ %% instruction stream) can't prove that moving this
+ %% allocation instruction is safe. Now use the annotation
+ %% of defined registers at the beginning of the current
+ %% block to see whether moving would be safe.
+ Def0 = defined_regs(Is, 0),
+ Def = Def0 band ((1 bsl Live) - 1),
+ ensure_contiguous(Rset bor Def, Live);
+ Live ->
+ %% Safe based on liveness information.
+ Live
+ end.
live_regs(N, 0) ->
N;
-live_regs(N, Regs) when Regs band 1 =:= 1 ->
- live_regs(N+1, Regs bsr 1);
-live_regs(_, _) ->
- not_possible.
+live_regs(N, Regs) ->
+ live_regs(N+1, Regs bsr 1).
+
+ensure_contiguous(Regs, Live) ->
+ case (1 bsl Live) - 1 of
+ Regs -> Live;
+ _ -> not_possible
+ end.
x_dead([{x,N}|Rs], Regs) -> x_dead(Rs, Regs band (bnot (1 bsl N)));
x_dead([_|Rs], Regs) -> x_dead(Rs, Regs);
@@ -481,3 +563,114 @@ x_dead([], Regs) -> Regs.
x_live([{x,N}|Rs], Regs) -> x_live(Rs, Regs bor (1 bsl N));
x_live([_|Rs], Regs) -> x_live(Rs, Regs);
x_live([], Regs) -> Regs.
+
+%% defined_regs(ReversedInstructions) -> RegBitmap.
+%% Given a reversed instruction stream, determine the
+%% the registers that are defined.
+
+defined_regs([{'%anno',{def,Def}}|_], Regs) ->
+ Def bor Regs;
+defined_regs([{set,Ds,_,{alloc,Live,_}}|_], Regs) ->
+ x_live(Ds, Regs bor ((1 bsl Live) - 1));
+defined_regs([{set,Ds,_,_}|Is], Regs) ->
+ defined_regs(Is, x_live(Ds, Regs)).
+
+%%%
+%%% Do local common sub expression elimination (CSE) in each block.
+%%%
+
+local_cse([{block,Bl0}|Is]) ->
+ Bl = cse_block(Bl0, orddict:new(), []),
+ [{block,Bl}|local_cse(Is)];
+local_cse([I|Is]) ->
+ [I|local_cse(Is)];
+local_cse([]) -> [].
+
+cse_block([I|Is], Es0, Acc0) ->
+ Es1 = cse_clear(I, Es0),
+ case cse_expr(I) of
+ none ->
+ %% Instruction is not suitable for CSE.
+ cse_block(Is, Es1, [I|Acc0]);
+ {ok,D,Expr} ->
+ %% Suitable instruction. First update the dictionary of
+ %% suitable expressions for the next iteration.
+ Es = cse_add(D, Expr, Es1),
+
+ %% Search for a previous identical expression.
+ case cse_find(Expr, Es0) of
+ error ->
+ %% Nothing found
+ cse_block(Is, Es, [I|Acc0]);
+ Src ->
+ %% Use the previously calculated result.
+ %% Also eliminate any line instruction.
+ Move = {set,[D],[Src],move},
+ case Acc0 of
+ [{set,_,_,{line,_}}|Acc] ->
+ cse_block(Is, Es, [Move|Acc]);
+ [_|_] ->
+ cse_block(Is, Es, [Move|Acc0])
+ end
+ end
+ end;
+cse_block([], _, Acc) ->
+ reverse(Acc).
+
+%% cse_find(Expr, Expressions) -> error | Register.
+%% Find a previously evaluated expression whose result can be reused,
+%% or return 'error' if no such expression is found.
+
+cse_find(Expr, Es) ->
+ case orddict:find(Expr, Es) of
+ {ok,{Src,_}} -> Src;
+ error -> error
+ end.
+
+cse_expr({set,[D],Ss,{bif,N,_}}) ->
+ {ok,D,{{bif,N},Ss}};
+cse_expr({set,[D],Ss,{alloc,_,{gc_bif,N,_}}}) ->
+ {ok,D,{{gc_bif,N},Ss}};
+cse_expr({set,[D],Ss,put_list}) ->
+ {ok,D,{put_list,Ss}};
+cse_expr(_) -> none.
+
+%% cse_clear(Instr, Expressions0) -> Expressions.
+%% Remove all previous expressions that will become
+%% invalid when this instruction is executed. Basically,
+%% an expression is no longer safe to reuse when the
+%% register it has been stored to has been modified, killed,
+%% or if any of the source operands have changed.
+
+cse_clear({set,Ds,_,{alloc,Live,_}}, Es) ->
+ cse_clear_1(Es, Live, Ds);
+cse_clear({set,Ds,_,_}, Es) ->
+ cse_clear_1(Es, all, Ds).
+
+cse_clear_1(Es, Live, Ds0) ->
+ Ds = ordsets:from_list(Ds0),
+ [E || E <- Es, cse_is_safe(E, Live, Ds)].
+
+cse_is_safe({_,{Dst,Interfering}}, Live, Ds) ->
+ ordsets:is_disjoint(Interfering, Ds) andalso
+ case Dst of
+ {x,X} ->
+ X < Live;
+ _ ->
+ true
+ end.
+
+%% cse_add(Dest, Expr, Expressions0) -> Expressions.
+%% Provided that it is safe, add a new expression to the dictionary
+%% of already evaluated expressions.
+
+cse_add(D, {_,Ss}=Expr, Es) ->
+ case member(D, Ss) of
+ false ->
+ Interfering = ordsets:from_list([D|Ss]),
+ orddict:store(Expr, {D,Interfering}, Es);
+ true ->
+ %% Unsafe because the instruction overwrites one of
+ %% source operands.
+ Es
+ end.
diff --git a/lib/compiler/src/beam_bs.erl b/lib/compiler/src/beam_bs.erl
index beb055b23d..14cedbb582 100644
--- a/lib/compiler/src/beam_bs.erl
+++ b/lib/compiler/src/beam_bs.erl
@@ -38,8 +38,7 @@ function({function,Name,Arity,CLabel,Is0}, Lc0) ->
{Is,Lc} = bsm_opt(Is1, Lc0),
{{function,Name,Arity,CLabel,Is},Lc}
catch
- Class:Error ->
- Stack = erlang:get_stacktrace(),
+ Class:Error:Stack ->
io:fwrite("Function: ~w/~w\n", [Name,Arity]),
erlang:raise(Class, Error, Stack)
end.
diff --git a/lib/compiler/src/beam_bsm.erl b/lib/compiler/src/beam_bsm.erl
index 9a4e7fb133..9f3b9d788f 100644
--- a/lib/compiler/src/beam_bsm.erl
+++ b/lib/compiler/src/beam_bsm.erl
@@ -105,8 +105,7 @@ function({function,Name,Arity,Entry,Is}, FIndex) ->
D = #btb{f=FIndex,index=Index},
{function,Name,Arity,Entry,btb_opt_1(Is, D, [])}
catch
- Class:Error ->
- Stack = erlang:get_stacktrace(),
+ Class:Error:Stack ->
io:fwrite("Function: ~w/~w\n", [Name,Arity]),
erlang:raise(Class, Error, Stack)
end.
@@ -125,20 +124,21 @@ btb_opt_1([{test,bs_get_binary2,F,_,[Reg,{atom,all},U,Fs],Reg}=I0|Is], D, Acc0)
end,
btb_opt_1(Is, D, Acc)
end;
-btb_opt_1([{test,bs_get_binary2,F,_,[Ctx,{atom,all},U,Fs],Dst}=I0|Is], D, Acc0) ->
- case btb_reaches_match(Is, [Ctx,Dst], D) of
+btb_opt_1([{test,bs_get_binary2,F,_,[Ctx,{atom,all},U,Fs],Dst}=I0|Is0], D, Acc0) ->
+ case btb_reaches_match(Is0, [Ctx,Dst], D) of
{error,Reason} ->
Comment = btb_comment_no_opt(Reason, Fs),
- btb_opt_1(Is, D, [Comment,I0|Acc0]);
+ btb_opt_1(Is0, D, [Comment,I0|Acc0]);
{ok,MustSave} when U =:= 1 ->
Comment = btb_comment_opt(Fs),
- Acc1 = btb_gen_save(MustSave, Ctx, [Comment|Acc0]),
- Acc = [{move,Ctx,Dst}|Acc1],
+ Acc = btb_gen_save(MustSave, Ctx, [Comment|Acc0]),
+ Is = prepend_move(Ctx, Dst, Is0),
btb_opt_1(Is, D, Acc);
{ok,MustSave} ->
Comment = btb_comment_opt(Fs),
Acc1 = btb_gen_save(MustSave, Ctx, [Comment|Acc0]),
- Acc = [{move,Ctx,Dst},{test,bs_test_unit,F,[Ctx,U]}|Acc1],
+ Acc = [{test,bs_test_unit,F,[Ctx,U]}|Acc1],
+ Is = prepend_move(Ctx, Dst, Is0),
btb_opt_1(Is, D, Acc)
end;
btb_opt_1([I|Is], D, Acc) ->
@@ -151,6 +151,12 @@ btb_gen_save(true, Reg, Acc) ->
[{bs_save2,Reg,{atom,start}}|Acc];
btb_gen_save(false, _, Acc) -> Acc.
+prepend_move(Ctx, Dst, [{block,Bl0}|Is]) ->
+ Bl = [{set,[Dst],[Ctx],move}|Bl0],
+ [{block,Bl}|Is];
+prepend_move(Ctx, Dst, Is) ->
+ [{move,Ctx,Dst}|Is].
+
%% btb_reaches_match([Instruction], [Register], D) ->
%% {ok,MustSave}|{error,Reason}
%%
diff --git a/lib/compiler/src/beam_clean.erl b/lib/compiler/src/beam_clean.erl
index b736d39f9c..e094c2c320 100644
--- a/lib/compiler/src/beam_clean.erl
+++ b/lib/compiler/src/beam_clean.erl
@@ -24,7 +24,7 @@
-export([module/2]).
-export([bs_clean_saves/1]).
-export([clean_labels/1]).
--import(lists, [map/2,foldl/3,reverse/1,filter/2]).
+-import(lists, [foldl/3,reverse/1,filter/2]).
-spec module(beam_utils:module_code(), [compile:option()]) ->
{'ok',beam_utils:module_code()}.
@@ -118,7 +118,7 @@ add_to_work_list(F, {Fs,Used}=Sets) ->
clean_labels(Fs0) ->
St0 = #st{lmap=[],entry=1,lc=1},
{Fs1,#st{lmap=Lmap0,lc=Lc}} = function_renumber(Fs0, St0, []),
- Lmap = gb_trees:from_orddict(ordsets:from_list(Lmap0)),
+ Lmap = maps:from_list(Lmap0),
Fs = function_replace(Fs1, Lmap, []),
{Fs,Lc}.
@@ -187,7 +187,8 @@ is_record_tuple(_, _, _) -> no.
function_replace([{function,Name,Arity,Entry,Asm0}|Fs], Dict, Acc) ->
Asm = try
- replace(Asm0, [], Dict)
+ Fb = fun(Old) -> throw({error,{undefined_label,Old}}) end,
+ beam_utils:replace_labels(Asm0, [], Dict, Fb)
catch
throw:{error,{undefined_label,Lbl}=Reason} ->
io:format("Function ~s/~w refers to undefined label ~w\n",
@@ -197,57 +198,6 @@ function_replace([{function,Name,Arity,Entry,Asm0}|Fs], Dict, Acc) ->
function_replace(Fs, Dict, [{function,Name,Arity,Entry,Asm}|Acc]);
function_replace([], _, Acc) -> Acc.
-replace([{test,Test,{f,Lbl},Ops}|Is], Acc, D) ->
- replace(Is, [{test,Test,{f,label(Lbl, D)},Ops}|Acc], D);
-replace([{test,Test,{f,Lbl},Live,Ops,Dst}|Is], Acc, D) ->
- replace(Is, [{test,Test,{f,label(Lbl, D)},Live,Ops,Dst}|Acc], D);
-replace([{select,I,R,{f,Fail0},Vls0}|Is], Acc, D) ->
- Vls = map(fun ({f,L}) -> {f,label(L, D)};
- (Other) -> Other
- end, Vls0),
- Fail = label(Fail0, D),
- replace(Is, [{select,I,R,{f,Fail},Vls}|Acc], D);
-replace([{'try',R,{f,Lbl}}|Is], Acc, D) ->
- replace(Is, [{'try',R,{f,label(Lbl, D)}}|Acc], D);
-replace([{'catch',R,{f,Lbl}}|Is], Acc, D) ->
- replace(Is, [{'catch',R,{f,label(Lbl, D)}}|Acc], D);
-replace([{jump,{f,Lbl}}|Is], Acc, D) ->
- replace(Is, [{jump,{f,label(Lbl, D)}}|Acc], D);
-replace([{loop_rec,{f,Lbl},R}|Is], Acc, D) ->
- replace(Is, [{loop_rec,{f,label(Lbl, D)},R}|Acc], D);
-replace([{loop_rec_end,{f,Lbl}}|Is], Acc, D) ->
- replace(Is, [{loop_rec_end,{f,label(Lbl, D)}}|Acc], D);
-replace([{wait,{f,Lbl}}|Is], Acc, D) ->
- replace(Is, [{wait,{f,label(Lbl, D)}}|Acc], D);
-replace([{wait_timeout,{f,Lbl},To}|Is], Acc, D) ->
- replace(Is, [{wait_timeout,{f,label(Lbl, D)},To}|Acc], D);
-replace([{bif,Name,{f,Lbl},As,R}|Is], Acc, D) when Lbl =/= 0 ->
- replace(Is, [{bif,Name,{f,label(Lbl, D)},As,R}|Acc], D);
-replace([{gc_bif,Name,{f,Lbl},Live,As,R}|Is], Acc, D) when Lbl =/= 0 ->
- replace(Is, [{gc_bif,Name,{f,label(Lbl, D)},Live,As,R}|Acc], D);
-replace([{call,Ar,{f,Lbl}}|Is], Acc, D) ->
- replace(Is, [{call,Ar,{f,label(Lbl,D)}}|Acc], D);
-replace([{make_fun2,{f,Lbl},U1,U2,U3}|Is], Acc, D) ->
- replace(Is, [{make_fun2,{f,label(Lbl, D)},U1,U2,U3}|Acc], D);
-replace([{bs_init,{f,Lbl},Info,Live,Ss,Dst}|Is], Acc, D) when Lbl =/= 0 ->
- replace(Is, [{bs_init,{f,label(Lbl, D)},Info,Live,Ss,Dst}|Acc], D);
-replace([{bs_put,{f,Lbl},Info,Ss}|Is], Acc, D) when Lbl =/= 0 ->
- replace(Is, [{bs_put,{f,label(Lbl, D)},Info,Ss}|Acc], D);
-replace([{put_map=I,{f,Lbl},Op,Src,Dst,Live,List}|Is], Acc, D)
- when Lbl =/= 0 ->
- replace(Is, [{I,{f,label(Lbl, D)},Op,Src,Dst,Live,List}|Acc], D);
-replace([{get_map_elements=I,{f,Lbl},Src,List}|Is], Acc, D) when Lbl =/= 0 ->
- replace(Is, [{I,{f,label(Lbl, D)},Src,List}|Acc], D);
-replace([I|Is], Acc, D) ->
- replace(Is, [I|Acc], D);
-replace([], Acc, _) -> Acc.
-
-label(Old, D) ->
- case gb_trees:lookup(Old, D) of
- {value,Val} -> Val;
- none -> throw({error,{undefined_label,Old}})
- end.
-
%%%
%%% Final fixup of bs_start_match2/5,bs_save2/bs_restore2 instructions for
%%% new bit syntax matching (introduced in R11B).
diff --git a/lib/compiler/src/beam_dead.erl b/lib/compiler/src/beam_dead.erl
index d379fdc4eb..dbbaae05eb 100644
--- a/lib/compiler/src/beam_dead.erl
+++ b/lib/compiler/src/beam_dead.erl
@@ -56,8 +56,7 @@ function({function,Name,Arity,CLabel,Is0}, Lc0) ->
Is = move_move_into_block(Is3, []),
{{function,Name,Arity,CLabel,Is},Lc}
catch
- Class:Error ->
- Stack = erlang:get_stacktrace(),
+ Class:Error:Stack ->
io:fwrite("Function: ~w/~w\n", [Name,Arity]),
erlang:raise(Class, Error, Stack)
end.
@@ -272,7 +271,8 @@ backward([{jump,{f,To0}},{move,Src,Reg}=Move|Is], D, Acc) ->
end;
backward([{jump,{f,To}}=J|[{bif,Op,{f,BifFail},Ops,Reg}|Is]=Is0], D, Acc) ->
try replace_comp_op(To, Reg, Op, Ops, D) of
- I -> backward(Is, D, I++Acc)
+ {Test,Jump} ->
+ backward([Jump,Test|Is], D, Acc)
catch
throw:not_possible ->
case To =:= BifFail of
@@ -294,24 +294,25 @@ backward([{jump,{f,To}}=J|[{gc_bif,_,{f,To},_,_,_Dst}|Is]], D, Acc) ->
%% register is initialized, and it is therefore no need to test
%% for liveness of the destination register at label To.
backward([J|Is], D, Acc);
-backward([{test,bs_start_match2,F,Live,[R,_]=Args,Ctxt}|Is], D,
- [{test,bs_match_string,F,[Ctxt,Bs]},
- {test,bs_test_tail2,F,[Ctxt,0]}|Acc0]=Acc) ->
+backward([{test,bs_start_match2,F,Live,[Src,_]=Args,Ctxt}|Is], D, Acc0) ->
{f,To0} = F,
- case beam_utils:is_killed(Ctxt, Acc0, D) of
- true ->
- To = shortcut_bs_context_to_binary(To0, R, D),
- Eq = {test,is_eq_exact,{f,To},[R,{literal,Bs}]},
- backward(Is, D, [Eq|Acc0]);
- false ->
- To = shortcut_bs_start_match(To0, R, D),
- I = {test,bs_start_match2,{f,To},Live,Args,Ctxt},
- backward(Is, D, [I|Acc])
+ case test_bs_literal(F, Ctxt, D, Acc0) of
+ {none,Acc} ->
+ %% Ctxt killed immediately after bs_start_match2.
+ To = shortcut_bs_context_to_binary(To0, Src, D),
+ I = {test,is_bitstr,{f,To},[Src]},
+ backward(Is, D, [I|Acc]);
+ {Literal,Acc} ->
+ %% Ctxt killed after matching a literal.
+ To = shortcut_bs_context_to_binary(To0, Src, D),
+ Eq = {test,is_eq_exact,{f,To},[Src,{literal,Literal}]},
+ backward(Is, D, [Eq|Acc]);
+ not_killed ->
+ %% Ctxt not killed. Not much to do.
+ To = shortcut_bs_start_match(To0, Src, D),
+ I = {test,bs_start_match2,{f,To},Live,Args,Ctxt},
+ backward(Is, D, [I|Acc0])
end;
-backward([{test,bs_start_match2,{f,To0},Live,[Src|_]=Info,Dst}|Is], D, Acc) ->
- To = shortcut_bs_start_match(To0, Src, D),
- I = {test,bs_start_match2,{f,To},Live,Info,Dst},
- backward(Is, D, [I|Acc]);
backward([{test,Op,{f,To0},Ops0}|Is], D, Acc) ->
To1 = shortcut_bs_test(To0, Is, D),
To2 = shortcut_label(To1, D),
@@ -446,7 +447,7 @@ prune_redundant([], _) -> [].
replace_comp_op(To, Reg, Op, Ops, D) ->
False = comp_op_find_shortcut(To, Reg, {atom,false}, D),
True = comp_op_find_shortcut(To, Reg, {atom,true}, D),
- [bif_to_test(Op, Ops, False),{jump,{f,True}}].
+ {bif_to_test(Op, Ops, False),{jump,{f,True}}}.
comp_op_find_shortcut(To0, Reg, Val, D) ->
case shortcut_select_label(To0, Reg, Val, D) of
@@ -483,15 +484,22 @@ not_possible() -> throw(not_possible).
%% F1: is_eq_exact F2 Reg Lit2 F1: is_eq_exact F2 Reg Lit2
%% L2: .... L2:
%%
-combine_eqs(To, [Reg,{Type,_}=Lit1]=Ops, D, [{label,L1}|_])
- when Type =:= atom; Type =:= integer ->
+combine_eqs(To, [Reg,{Type,_}=Lit1]=Ops, D, Acc)
+ when Type =:= atom; Type =:= integer ->
+ Next = case Acc of
+ [{label,Lbl}|_] -> Lbl;
+ [{jump,{f,Lbl}}|_] -> Lbl
+ end,
case beam_utils:code_at(To, D) of
[{test,is_eq_exact,{f,F2},[Reg,{Type,_}=Lit2]},
{label,L2}|_] when Lit1 =/= Lit2 ->
- {select,select_val,Reg,{f,F2},[Lit1,{f,L1},Lit2,{f,L2}]};
+ {select,select_val,Reg,{f,F2},[Lit1,{f,Next},Lit2,{f,L2}]};
+ [{test,is_eq_exact,{f,F2},[Reg,{Type,_}=Lit2]},
+ {jump,{f,L2}}|_] when Lit1 =/= Lit2 ->
+ {select,select_val,Reg,{f,F2},[Lit1,{f,Next},Lit2,{f,L2}]};
[{select,select_val,Reg,{f,F2},[{Type,_}|_]=List0}|_] ->
List = remove_from_list(Lit1, List0),
- {select,select_val,Reg,{f,F2},[Lit1,{f,L1}|List]};
+ {select,select_val,Reg,{f,F2},[Lit1,{f,Next}|List]};
_Is ->
{test,is_eq_exact,{f,To},Ops}
end;
@@ -504,6 +512,22 @@ remove_from_list(Lit, [Val,{f,_}=Fail|T]) ->
[Val,Fail|remove_from_list(Lit, T)];
remove_from_list(_, []) -> [].
+
+test_bs_literal(F, Ctxt, D,
+ [{test,bs_match_string,F,[Ctxt,Bs]},
+ {test,bs_test_tail2,F,[Ctxt,0]}|Acc]) ->
+ test_bs_literal_1(Ctxt, Acc, D, Bs);
+test_bs_literal(F, Ctxt, D, [{test,bs_test_tail2,F,[Ctxt,0]}|Acc]) ->
+ test_bs_literal_1(Ctxt, Acc, D, <<>>);
+test_bs_literal(_, Ctxt, D, Acc) ->
+ test_bs_literal_1(Ctxt, Acc, D, none).
+
+test_bs_literal_1(Ctxt, Is, D, Literal) ->
+ case beam_utils:is_killed(Ctxt, Is, D) of
+ true -> {Literal,Is};
+ false -> not_killed
+ end.
+
%% shortcut_bs_test(TargetLabel, ReversedInstructions, D) -> TargetLabel'
%% Try to shortcut the failure label for bit syntax matching.
diff --git a/lib/compiler/src/beam_disasm.erl b/lib/compiler/src/beam_disasm.erl
index 8fd0b36d05..a68c4b5367 100644
--- a/lib/compiler/src/beam_disasm.erl
+++ b/lib/compiler/src/beam_disasm.erl
@@ -163,8 +163,8 @@ pp_instr(I) ->
file(File) ->
try process_chunks(File)
- catch error:Reason ->
- {error,?MODULE,{internal,{Reason,erlang:get_stacktrace()}}}
+ catch error:Reason:Stack ->
+ {error,?MODULE,{internal,{Reason,Stack}}}
end.
%%-----------------------------------------------------------------------
@@ -719,42 +719,6 @@ resolve_inst({wait,[Lbl]},_,_,_) ->
{wait,Lbl};
resolve_inst({wait_timeout,[Lbl,Int]},_,_,_) ->
{wait_timeout,Lbl,resolve_arg(Int)};
-resolve_inst({m_plus,Args},_,_,_) ->
- [W,SrcR1,SrcR2,DstR] = resolve_args(Args),
- {arithbif,'+',W,[SrcR1,SrcR2],DstR};
-resolve_inst({m_minus,Args},_,_,_) ->
- [W,SrcR1,SrcR2,DstR] = resolve_args(Args),
- {arithbif,'-',W,[SrcR1,SrcR2],DstR};
-resolve_inst({m_times,Args},_,_,_) ->
- [W,SrcR1,SrcR2,DstR] = resolve_args(Args),
- {arithbif,'*',W,[SrcR1,SrcR2],DstR};
-resolve_inst({m_div,Args},_,_,_) ->
- [W,SrcR1,SrcR2,DstR] = resolve_args(Args),
- {arithbif,'/',W,[SrcR1,SrcR2],DstR};
-resolve_inst({int_div,Args},_,_,_) ->
- [W,SrcR1,SrcR2,DstR] = resolve_args(Args),
- {arithbif,'div',W,[SrcR1,SrcR2],DstR};
-resolve_inst({int_rem,Args},_,_,_) ->
- [W,SrcR1,SrcR2,DstR] = resolve_args(Args),
- {arithbif,'rem',W,[SrcR1,SrcR2],DstR};
-resolve_inst({int_band,Args},_,_,_) ->
- [W,SrcR1,SrcR2,DstR] = resolve_args(Args),
- {arithbif,'band',W,[SrcR1,SrcR2],DstR};
-resolve_inst({int_bor,Args},_,_,_) ->
- [W,SrcR1,SrcR2,DstR] = resolve_args(Args),
- {arithbif,'bor',W,[SrcR1,SrcR2],DstR};
-resolve_inst({int_bxor,Args},_,_,_) ->
- [W,SrcR1,SrcR2,DstR] = resolve_args(Args),
- {arithbif,'bxor',W,[SrcR1,SrcR2],DstR};
-resolve_inst({int_bsl,Args},_,_,_) ->
- [W,SrcR1,SrcR2,DstR] = resolve_args(Args),
- {arithbif,'bsl',W,[SrcR1,SrcR2],DstR};
-resolve_inst({int_bsr,Args},_,_,_) ->
- [W,SrcR1,SrcR2,DstR] = resolve_args(Args),
- {arithbif,'bsr',W,[SrcR1,SrcR2],DstR};
-resolve_inst({int_bnot,Args},_,_,_) ->
- [W,SrcR,DstR] = resolve_args(Args),
- {arithbif,'bnot',W,[SrcR],DstR};
resolve_inst({is_lt=I,Args0},_,_,_) ->
[L|Args] = resolve_args(Args0),
{test,I,L,Args};
@@ -800,9 +764,6 @@ resolve_inst({is_nil=I,Args0},_,_,_) ->
resolve_inst({is_binary=I,Args0},_,_,_) ->
[L|Args] = resolve_args(Args0),
{test,I,L,Args};
-resolve_inst({is_constant=I,Args0},_,_,_) ->
- [L|Args] = resolve_args(Args0),
- {test,I,L,Args};
resolve_inst({is_list=I,Args0},_,_,_) ->
[L|Args] = resolve_args(Args0),
{test,I,L,Args};
@@ -840,11 +801,6 @@ resolve_inst({get_tuple_element,[Src,{u,Off},Dst]},_,_,_) ->
{get_tuple_element,resolve_arg(Src),Off,resolve_arg(Dst)};
resolve_inst({set_tuple_element,[Src,Dst,{u,Off}]},_,_,_) ->
{set_tuple_element,resolve_arg(Src),resolve_arg(Dst),Off};
-resolve_inst({put_string,[{u,Len},{u,Off},Dst]},_,Strings,_) ->
- String = if Len > 0 -> binary_to_list(Strings, Off+1, Off+Len);
- true -> ""
- end,
- {put_string,Len,{string,String},Dst};
resolve_inst({put_list,[Src1,Src2,Dst]},_,_,_) ->
{put_list,resolve_arg(Src1),resolve_arg(Src2),Dst};
resolve_inst({put_tuple,[{u,Arity},Dst]},_,_,_) ->
@@ -859,9 +815,6 @@ resolve_inst({case_end,[X]},_,_,_) ->
{case_end,resolve_arg(X)};
resolve_inst({call_fun,[{u,N}]},_,_,_) ->
{call_fun,N};
-resolve_inst({make_fun,Args},_,_,Lbls) ->
- [{f,L},Magic,FreeVars] = resolve_args(Args),
- {make_fun,lookup(L,Lbls),Magic,FreeVars};
resolve_inst({is_function=I,Args0},_,_,_) ->
[L|Args] = resolve_args(Args0),
{test,I,L,Args};
@@ -870,30 +823,6 @@ resolve_inst({call_ext_only,[{u,N},{u,MFAix}]},Imports,_,_) ->
%%
%% Instructions for handling binaries added in R7A & R7B
%%
-resolve_inst({bs_start_match,[F,Reg]},_,_,_) ->
- {bs_start_match,F,Reg};
-resolve_inst({bs_get_integer=I,[Lbl,Arg2,{u,N},{u,U},Arg5]},_,_,_) ->
- [A2,A5] = resolve_args([Arg2,Arg5]),
- {test,I,Lbl,[A2,N,decode_field_flags(U),A5]};
-resolve_inst({bs_get_float=I,[Lbl,Arg2,{u,N},{u,U},Arg5]},_,_,_) ->
- [A2,A5] = resolve_args([Arg2,Arg5]),
- {test,I,Lbl,[A2,N,decode_field_flags(U),A5]};
-resolve_inst({bs_get_binary=I,[Lbl,Arg2,{u,N},{u,U},Arg5]},_,_,_) ->
- [A2,A5] = resolve_args([Arg2,Arg5]),
- {test,I,Lbl,[A2,N,decode_field_flags(U),A5]};
-resolve_inst({bs_skip_bits,[Lbl,Arg2,{u,N},{u,U}]},_,_,_) ->
- A2 = resolve_arg(Arg2),
- {test,bs_skip_bits,Lbl,[A2,N,decode_field_flags(U)]};
-resolve_inst({bs_test_tail,[F,{u,N}]},_,_,_) ->
- {test,bs_test_tail,F,[N]};
-resolve_inst({bs_save,[{u,N}]},_,_,_) ->
- {bs_save,N};
-resolve_inst({bs_restore,[{u,N}]},_,_,_) ->
- {bs_restore,N};
-resolve_inst({bs_init,[{u,N},{u,U}]},_,_,_) ->
- {bs_init,N,decode_field_flags(U)};
-resolve_inst({bs_final,[F,X]},_,_,_) ->
- {bs_final,F,X};
resolve_inst({bs_put_integer,[Lbl,Arg2,{u,N},{u,U},Arg5]},_,_,_) ->
[A2,A5] = resolve_args([Arg2,Arg5]),
{bs_put_integer,Lbl,A2,N,decode_field_flags(U),A5};
@@ -908,8 +837,6 @@ resolve_inst({bs_put_string,[{u,Len},{u,Off}]},_,Strings,_) ->
true -> ""
end,
{bs_put_string,Len,{string,String}};
-resolve_inst({bs_need_buf,[{u,N}]},_,_,_) ->
- {bs_need_buf,N};
%%
%% Instructions for handling floating point numbers added in June 2001 (R8).
@@ -961,9 +888,6 @@ resolve_inst({raise,[_Reg1,_Reg2]=Regs},_,_,_) ->
resolve_inst({bs_init2,[Lbl,Arg2,{u,W},{u,R},{u,F},Arg6]},_,_,_) ->
[A2,A6] = resolve_args([Arg2,Arg6]),
{bs_init2,Lbl,A2,W,R,decode_field_flags(F),A6};
-resolve_inst({bs_bits_to_bytes,[Lbl,Arg2,Arg3]},_,_,_) ->
- [A2,A3] = resolve_args([Arg2,Arg3]),
- {bs_bits_to_bytes,Lbl,A2,A3};
resolve_inst({bs_add=I,[Lbl,Arg2,Arg3,Arg4,Arg5]},_,_,_) ->
[A2,A3,A4,A5] = resolve_args([Arg2,Arg3,Arg4,Arg5]),
{I,Lbl,[A2,A3,A4],A5};
@@ -1041,12 +965,6 @@ resolve_inst({gc_bif3,Args},Imports,_,_) ->
{gc_bif,BifName,F,Live,[A1,A2,A3],Reg};
%%
-%% New instructions for creating non-byte aligned binaries.
-%%
-resolve_inst({bs_final2,[X,Y]},_,_,_) ->
- {bs_final2,X,Y};
-
-%%
%% R11B-5.
%%
resolve_inst({is_bitstr=I,Args0},_,_,_) ->
@@ -1165,6 +1083,19 @@ resolve_inst({get_map_elements,Args0},_,_,_) ->
{get_map_elements,FLbl,Src,{list,List}};
%%
+%% OTP 21.
+%%
+
+resolve_inst({build_stacktrace,[]},_,_,_) ->
+ build_stacktrace;
+resolve_inst({raw_raise,[]},_,_,_) ->
+ raw_raise;
+resolve_inst({get_hd,[Src,Dst]},_,_,_) ->
+ {get_hd,Src,Dst};
+resolve_inst({get_tl,[Src,Dst]},_,_,_) ->
+ {get_tl,Src,Dst};
+
+%%
%% Catches instructions that are not yet handled.
%%
resolve_inst(X,_,_,_) -> ?exit({resolve_inst,X}).
diff --git a/lib/compiler/src/beam_disasm.hrl b/lib/compiler/src/beam_disasm.hrl
index d968cd9587..c3326c15a0 100644
--- a/lib/compiler/src/beam_disasm.hrl
+++ b/lib/compiler/src/beam_disasm.hrl
@@ -26,7 +26,8 @@
%% IT SHOULD BE MOVED TO A FILE THAT DEFINES (AND EXPORTS)
%% PROPER TYPES FOR THE SET OF BEAM INSTRUCTIONS.
%%
--type beam_instr() :: 'bs_init_writable' | 'fclearerror' | 'if_end'
+-type beam_instr() :: 'bs_init_writable' | 'build_stacktrace'
+ | 'fclearerror' | 'if_end' | 'raw_raise'
| 'remove_message' | 'return' | 'send' | 'timeout'
| tuple(). %% XXX: Very underspecified - FIX THIS
diff --git a/lib/compiler/src/beam_except.erl b/lib/compiler/src/beam_except.erl
index 9801c68ee2..abd39c661d 100644
--- a/lib/compiler/src/beam_except.erl
+++ b/lib/compiler/src/beam_except.erl
@@ -45,8 +45,7 @@ function({function,Name,Arity,CLabel,Is0}) ->
Is = function_1(Is0),
{function,Name,Arity,CLabel,Is}
catch
- Class:Error ->
- Stack = erlang:get_stacktrace(),
+ Class:Error:Stack ->
io:fwrite("Function: ~w/~w\n", [Name,Arity]),
erlang:raise(Class, Error, Stack)
end.
diff --git a/lib/compiler/src/beam_flatten.erl b/lib/compiler/src/beam_flatten.erl
index a4d45a4ca6..4045ab6dc5 100644
--- a/lib/compiler/src/beam_flatten.erl
+++ b/lib/compiler/src/beam_flatten.erl
@@ -50,6 +50,9 @@ norm_block([{set,[],[],{alloc,R,Alloc}}|Is], Acc0) ->
Acc ->
norm_block(Is, Acc)
end;
+norm_block([{set,[D1],[S],get_hd},{set,[D2],[S],get_tl}|Is], Acc) ->
+ I = {get_list,S,D1,D2},
+ norm_block(Is, [I|Acc]);
norm_block([I|Is], Acc) -> norm_block(Is, [norm(I)|Acc]);
norm_block([], Acc) -> Acc.
@@ -64,7 +67,8 @@ norm({set,[D],[],{put_tuple,A}}) -> {put_tuple,A,D};
norm({set,[],[S],put}) -> {put,S};
norm({set,[D],[S],{get_tuple_element,I}}) -> {get_tuple_element,S,I,D};
norm({set,[],[S,D],{set_tuple_element,I}}) -> {set_tuple_element,S,D,I};
-norm({set,[D1,D2],[S],get_list}) -> {get_list,S,D1,D2};
+norm({set,[D],[S],get_hd}) -> {get_hd,S,D};
+norm({set,[D],[S],get_tl}) -> {get_tl,S,D};
norm({set,[D],[S|Puts],{alloc,R,{put_map,Op,F}}}) ->
{put_map,F,Op,S,D,R,{list,Puts}};
norm({set,[],[],remove_message}) -> remove_message;
diff --git a/lib/compiler/src/beam_jump.erl b/lib/compiler/src/beam_jump.erl
index 4365451356..c33de217bd 100644
--- a/lib/compiler/src/beam_jump.erl
+++ b/lib/compiler/src/beam_jump.erl
@@ -71,9 +71,9 @@
%%%
%%% jump L2
%%% . . .
-%%% L1:
%%% L2: ...
%%%
+%%% and all preceding uses of L1 renamed to L2.
%%% If the jump is unreachable, it will be removed according to (1).
%%%
%%% (5) In
@@ -156,41 +156,46 @@ function({function,Name,Arity,CLabel,Asm0}) ->
%%%
share(Is0) ->
- %% We will get more sharing if we never fall through to a label.
- Is = eliminate_fallthroughs(Is0, []),
- share_1(Is, #{}, [], []).
+ Is1 = eliminate_fallthroughs(Is0, []),
+ Is2 = find_fixpoint(fun(Is) ->
+ share_1(Is, #{}, #{}, [], [])
+ end, Is1),
+ reverse(Is2).
-share_1([{label,L}=Lbl|Is], Dict0, [_|_]=Seq, Acc) ->
+share_1([{label,L}=Lbl|Is], Dict0, Lbls0, [_|_]=Seq, Acc) ->
case maps:find(Seq, Dict0) of
error ->
Dict = maps:put(Seq, L, Dict0),
- share_1(Is, Dict, [], [Lbl|Seq ++ Acc]);
+ share_1(Is, Dict, Lbls0, [], [Lbl|Seq ++ Acc]);
{ok,Label} ->
- share_1(Is, Dict0, [], [Lbl,{jump,{f,Label}}|Acc])
+ Lbls = maps:put(L, Label, Lbls0),
+ share_1(Is, Dict0, Lbls, [], [Lbl,{jump,{f,Label}}|Acc])
end;
-share_1([{func_info,_,_,_}=I|Is], _, [], Acc) ->
- reverse(Is, [I|Acc]);
-share_1([{'catch',_,_}=I|Is], Dict0, Seq, Acc) ->
- Dict = clean_non_sharable(Dict0),
- share_1(Is, Dict, [I|Seq], Acc);
-share_1([{'try',_,_}=I|Is], Dict0, Seq, Acc) ->
- Dict = clean_non_sharable(Dict0),
- share_1(Is, Dict, [I|Seq], Acc);
-share_1([{try_case,_}=I|Is], Dict0, Seq, Acc) ->
- Dict = clean_non_sharable(Dict0),
- share_1(Is, Dict, [I|Seq], Acc);
-share_1([{catch_end,_}=I|Is], Dict0, Seq, Acc) ->
- Dict = clean_non_sharable(Dict0),
- share_1(Is, Dict, [I|Seq], Acc);
-share_1([I|Is], Dict, Seq, Acc) ->
+share_1([{func_info,_,_,_}|_]=Is, _, Lbls, [], Acc) when Lbls =/= #{} ->
+ beam_utils:replace_labels(Acc, Is, Lbls, fun(Old) -> Old end);
+share_1([{func_info,_,_,_}|_]=Is, _, Lbls, [], Acc) when Lbls =:= #{} ->
+ reverse(Acc, Is);
+share_1([{'catch',_,_}=I|Is], Dict0, Lbls0, Seq, Acc) ->
+ {Dict,Lbls} = clean_non_sharable(Dict0, Lbls0),
+ share_1(Is, Dict, Lbls, [I|Seq], Acc);
+share_1([{'try',_,_}=I|Is], Dict0, Lbls0, Seq, Acc) ->
+ {Dict,Lbls} = clean_non_sharable(Dict0, Lbls0),
+ share_1(Is, Dict, Lbls, [I|Seq], Acc);
+share_1([{try_case,_}=I|Is], Dict0, Lbls0, Seq, Acc) ->
+ {Dict,Lbls} = clean_non_sharable(Dict0, Lbls0),
+ share_1(Is, Dict, Lbls, [I|Seq], Acc);
+share_1([{catch_end,_}=I|Is], Dict0, Lbls0, Seq, Acc) ->
+ {Dict,Lbls} = clean_non_sharable(Dict0, Lbls0),
+ share_1(Is, Dict, Lbls, [I|Seq], Acc);
+share_1([I|Is], Dict, Lbls, Seq, Acc) ->
case is_unreachable_after(I) of
false ->
- share_1(Is, Dict, [I|Seq], Acc);
+ share_1(Is, Dict, Lbls, [I|Seq], Acc);
true ->
- share_1(Is, Dict, [I], Acc)
+ share_1(Is, Dict, Lbls, [I], Acc)
end.
-clean_non_sharable(Dict) ->
+clean_non_sharable(Dict0, Lbls0) ->
%% We are passing in or out of a 'catch' or 'try' block. Remove
%% sequences that should not be shared over the boundaries of the
%% block. Since the end of the sequence must match, the only
@@ -198,7 +203,17 @@ clean_non_sharable(Dict) ->
%% the 'catch'/'try' block is a sequence that ends with an
%% instruction that causes an exception. Any sequence that causes
%% an exception must contain a line/1 instruction.
- maps:filter(fun(K, _V) -> sharable_with_try(K) end, Dict).
+ Dict1 = maps:to_list(Dict0),
+ Lbls1 = maps:to_list(Lbls0),
+ {Dict2,Lbls2} = foldl(fun({K, V}, {Dict,Lbls}) ->
+ case sharable_with_try(K) of
+ true ->
+ {[{K,V}|Dict],lists:keydelete(V, 2, Lbls)};
+ false ->
+ {Dict,Lbls}
+ end
+ end, {[],Lbls1}, Dict1),
+ {maps:from_list(Dict2),maps:from_list(Lbls2)}.
sharable_with_try([{line,_}|_]) ->
%% This sequence may cause an exception and may potentially
@@ -275,14 +290,15 @@ extract_seq_1(_, _) -> no.
-record(st,
{
entry :: beam_asm:label(), %Entry label (must not be moved).
- mlbl :: #{beam_asm:label() := [beam_asm:label()]}, %Moved labels.
- labels :: cerl_sets:set() %Set of referenced labels.
+ replace :: #{beam_asm:label() := beam_asm:label()}, %Labels to replace.
+ labels :: cerl_sets:set(), %Set of referenced labels.
+ index :: beam_utils:code_index() | {lazy,[beam_utils:instruction()]} %Index built lazily only if needed
}).
opt(Is0, CLabel) ->
find_fixpoint(fun(Is) ->
Lbls = initial_labels(Is),
- St = #st{entry=CLabel,mlbl=#{},labels=Lbls},
+ St = #st{entry=CLabel,replace=#{},labels=Lbls,index={lazy,Is}},
opt(Is, [], St)
end, Is0).
@@ -292,7 +308,7 @@ find_fixpoint(OptFun, Is0) ->
Is -> find_fixpoint(OptFun, Is)
end.
-opt([{test,_,{f,L}=Lbl,_}=I|[{jump,{f,L}}|_]=Is], Acc, St) ->
+opt([{test,_,{f,L}=Lbl,_}=I|[{jump,{f,L}}|_]=Is], Acc0, St0) ->
%% We have
%% Test Label Ops
%% jump Label
@@ -301,10 +317,23 @@ opt([{test,_,{f,L}=Lbl,_}=I|[{jump,{f,L}}|_]=Is], Acc, St) ->
case beam_utils:is_pure_test(I) of
false ->
%% Test is not pure; we must keep it.
- opt(Is, [I|Acc], label_used(Lbl, St));
+ opt(Is, [I|Acc0], label_used(Lbl, St0));
true ->
%% The test is pure and its failure label is the same
%% as in the jump that follows -- thus it is not needed.
+ %% Check if any of the previous instructions could also be eliminated.
+ {Acc,St} = opt_useless_loads(Acc0, L, St0),
+ opt(Is, Acc, St)
+ end;
+opt([{test,_,{f,L}=Lbl,_}=I|[{label,L}|_]=Is], Acc0, St0) ->
+ %% Similar to the above, except we have a fall-through rather than jump
+ %% Test Label Ops
+ %% label Label
+ case beam_utils:is_pure_test(I) of
+ false ->
+ opt(Is, [I|Acc0], label_used(Lbl, St0));
+ true ->
+ {Acc,St} = opt_useless_loads(Acc0, L, St0),
opt(Is, Acc, St)
end;
opt([{test,Test0,{f,L}=Lbl,Ops}=I|[{jump,To}|Is]=Is0], Acc, St) ->
@@ -326,30 +355,16 @@ opt([{test,_,{f,_}=Lbl,_,_,_}=I|Is], Acc, St) ->
opt(Is, [I|Acc], label_used(Lbl, St));
opt([{select,_,_R,Fail,Vls}=I|Is], Acc, St) ->
skip_unreachable(Is, [I|Acc], label_used([Fail|Vls], St));
-opt([{label,Lbl}=I|Is], Acc, #st{mlbl=Mlbl}=St0) ->
- case maps:find(Lbl, Mlbl) of
- {ok,Lbls} ->
- %% Essential to remove the list of labels from the dictionary,
- %% since we will rescan the inserted labels. We MUST rescan.
- St = St0#st{mlbl=maps:remove(Lbl, Mlbl)},
- insert_labels([Lbl|Lbls], Is, Acc, St);
- error ->
- opt(Is, [I|Acc], St0)
- end;
+opt([{label,From}=I,{label,To}|Is], Acc, #st{replace=Replace}=St) ->
+ opt([I|Is], Acc, St#st{replace=Replace#{To => From}});
opt([{jump,{f,_}=X}|[{label,_},{jump,X}|_]=Is], Acc, St) ->
opt(Is, Acc, St);
opt([{jump,{f,Lbl}}|[{label,Lbl}|_]=Is], Acc, St) ->
opt(Is, Acc, St);
-opt([{jump,{f,L}=Lbl}=I|Is], Acc0, #st{mlbl=Mlbl0}=St0) ->
- %% All labels before this jump instruction should now be
- %% moved to the location of the jump's target.
- {Lbls,Acc} = collect_labels(Acc0, St0),
- St = case Lbls of
- [] -> St0;
- [_|_] ->
- Mlbl = maps_append_list(L, Lbls, Mlbl0),
- St0#st{mlbl=Mlbl}
- end,
+opt([{jump,{f,L}=Lbl}=I|Is], Acc0, St0) ->
+ %% Replace all labels before this jump instruction into the
+ %% location of the jump's target.
+ {Acc,St} = collect_labels(Acc0, L, St0),
skip_unreachable(Is, [I|Acc], label_used(Lbl, St));
%% Optimization: quickly handle some common instructions that don't
%% have any failure labels and where is_unreachable_after(I) =:= false.
@@ -369,36 +384,72 @@ opt([I|Is], Acc, #st{labels=Used0}=St0) ->
true -> skip_unreachable(Is, [I|Acc], St);
false -> opt(Is, [I|Acc], St)
end;
-opt([], Acc, #st{mlbl=Mlbl}) ->
- Code = reverse(Acc),
- insert_fc_labels(Code, Mlbl).
-
-insert_fc_labels([{label,L}=I|Is0], Mlbl) ->
- case maps:find(L, Mlbl) of
- error ->
- [I|insert_fc_labels(Is0, Mlbl)];
- {ok,Lbls} ->
- Is = [{label,Lb} || Lb <- Lbls] ++ Is0,
- [I|insert_fc_labels(Is, maps:remove(L, Mlbl))]
+opt([], Acc, #st{replace=Replace0}) when Replace0 =/= #{} ->
+ Replace = normalize_replace(maps:to_list(Replace0), Replace0, []),
+ beam_utils:replace_labels(Acc, [], Replace, fun(Old) -> Old end);
+opt([], Acc, #st{replace=Replace}) when Replace =:= #{} ->
+ reverse(Acc).
+
+normalize_replace([{From,To0}|Rest], Replace, Acc) ->
+ case Replace of
+ #{To0 := To} ->
+ normalize_replace([{From,To}|Rest], Replace, Acc);
+ _ ->
+ normalize_replace(Rest, Replace, [{From,To0}|Acc])
end;
-insert_fc_labels([_|_]=Is, _) -> Is.
-
-maps_append_list(K,Vs,M) ->
- case M of
- #{K:=Vs0} -> M#{K:=Vs0++Vs}; % same order as dict
- _ -> M#{K => Vs}
- end.
+normalize_replace([], _Replace, Acc) ->
+ maps:from_list(Acc).
+
+%% After eliminating a test, it might happen, that a register was only used
+%% in this test. Let's check if that was the case and if it was so, we can
+%% eliminate the load into the register completely.
+opt_useless_loads([{block,_}|_]=Is, L, #st{index={lazy,FIs}}=St) ->
+ opt_useless_loads(Is, L, St#st{index=beam_utils:index_labels(FIs)});
+opt_useless_loads([{block,Block0}|Is], L, #st{index=Index}=St) ->
+ case opt_useless_block_loads(Block0, L, Index) of
+ [] ->
+ opt_useless_loads(Is, L, St);
+ [_|_]=Block ->
+ {[{block,Block}|Is],St}
+ end;
+%% After eliminating the test and useless blocks, it might happen,
+%% that the previous test could also be eliminated.
+%% It might be that the label was already marked as used, even if ultimately,
+%% it never will be - we can't do much about it at that point, though
+opt_useless_loads([{test,_,{f,L},_}=I|Is], L, St) ->
+ case beam_utils:is_pure_test(I) of
+ false ->
+ {[I|Is],St};
+ true ->
+ opt_useless_loads(Is, L, St)
+ end;
+opt_useless_loads(Is, _L, St) ->
+ {Is,St}.
+
+opt_useless_block_loads([{set,[Dst],_,_}=I|Is], L, Index) ->
+ BlockJump = [{block,Is},{jump,{f,L}}],
+ case beam_utils:is_killed(Dst, BlockJump, Index) of
+ true ->
+ %% The register is killed and not used, we can remove the load
+ opt_useless_block_loads(Is, L, Index);
+ false ->
+ [I|opt_useless_block_loads(Is, L, Index)]
+ end;
+opt_useless_block_loads([I|Is], L, Index) ->
+ [I|opt_useless_block_loads(Is, L, Index)];
+opt_useless_block_loads([], _L, _Index) ->
+ [].
-collect_labels(Is, #st{entry=Entry}) ->
- collect_labels_1(Is, Entry, []).
+collect_labels(Is, Label, #st{entry=Entry,replace=Replace} = St) ->
+ collect_labels_1(Is, Label, Entry, Replace, St).
-collect_labels_1([{label,Entry}|_]=Is, Entry, Acc) ->
+collect_labels_1([{label,Entry}|_]=Is, _Label, Entry, Acc, St) ->
%% Never move the entry label.
- {Acc,Is};
-collect_labels_1([{label,L}|Is], Entry, Acc) ->
- collect_labels_1(Is, Entry, [L|Acc]);
-collect_labels_1(Is, _Entry, Acc) ->
- {Acc,Is}.
+ {Is,St#st{replace=Acc}};
+collect_labels_1([{label,L}|Is], Label, Entry, Acc, St) ->
+ collect_labels_1(Is, Label, Entry, Acc#{L => Label}, St);
+collect_labels_1(Is, _Label, _Entry, Acc, St) ->
+ {Is,St#st{replace=Acc}}.
%% label_defined(Is, Label) -> true | false.
%% Test whether the label Label is defined at the start of the instruction
@@ -418,13 +469,6 @@ invert_test(is_eq_exact) -> is_ne_exact;
invert_test(is_ne_exact) -> is_eq_exact;
invert_test(_) -> not_possible.
-insert_labels([L|Ls], Is, [{jump,{f,L}}|Acc], St) ->
- insert_labels(Ls, [{label,L}|Is], Acc, St);
-insert_labels([L|Ls], Is, Acc, St) ->
- insert_labels(Ls, [{label,L}|Is], Acc, St);
-insert_labels([], Is, Acc, St) ->
- opt(Is, Acc, St).
-
%% skip_unreachable([Instruction], St).
%% Remove all instructions (including definitions of labels
%% that have not been referenced yet) up to the next
diff --git a/lib/compiler/src/beam_listing.erl b/lib/compiler/src/beam_listing.erl
index 836378727b..73c6501fe5 100644
--- a/lib/compiler/src/beam_listing.erl
+++ b/lib/compiler/src/beam_listing.erl
@@ -23,7 +23,6 @@
-include("core_parse.hrl").
-include("v3_kernel.hrl").
--include("v3_life.hrl").
-include("beam_disasm.hrl").
-import(lists, [foreach/2]).
@@ -31,7 +30,6 @@
-type code() :: cerl:c_module()
| beam_utils:module_code()
| #k_mdef{}
- | {module(),_,_,_} %v3_life
| [_]. %form-based format
-spec module(file:io_device(), code()) -> 'ok'.
@@ -43,13 +41,9 @@ module(File, #k_mdef{}=Kern) ->
%% This is a kernel module.
io:put_chars(File, v3_kernel_pp:format(Kern));
%%io:put_chars(File, io_lib:format("~p~n", [Kern]));
-module(File, {Mod,Exp,Attr,Kern}) ->
- %% This is output from beam_life (v3).
- io:fwrite(File, "~w.~n~p.~n~p.~n", [Mod,Exp,Attr]),
- foreach(fun (F) -> function(File, F) end, Kern);
module(Stream, {Mod,Exp,Attr,Code,NumLabels}) ->
- %% This is output from beam_codegen.
- io:format(Stream, "{module, ~p}. %% version = ~w\n",
+ %% This is output from v3_codegen.
+ io:format(Stream, "{module, ~p}. %% version = ~w\n",
[Mod, beam_opcodes:format_number()]),
io:format(Stream, "\n{exports, ~p}.\n", [Exp]),
io:format(Stream, "\n{attributes, ~p}.\n", [Attr]),
@@ -60,19 +54,6 @@ module(Stream, {Mod,Exp,Attr,Code,NumLabels}) ->
[Name, Arity, Entry]),
io:put_chars(Stream, format_asm(Asm))
end, Code);
-module(Stream, Code) when is_binary(Code) ->
- #beam_file{ module = Module, compile_info = CInfo } = beam_disasm:file(Code),
- Loaded = code:is_loaded(Module),
- Sticky = code:is_sticky(Module),
- [code:unstick_mod(Module) || Sticky],
-
- {module, Module} = code:load_binary(Module, proplists:get_value(source, CInfo), Code),
- ok = erts_debug:df(Stream, Module),
-
- %% Restore loaded module
- _ = [{module, Module} = code:load_file(Module) || Loaded =/= false],
- [code:stick_mod(Module) || Sticky],
- ok;
module(Stream, [_|_]=Fs) ->
%% Form-based abstract format.
foreach(fun (F) -> io:format(Stream, "~p.\n", [F]) end, Fs).
@@ -82,60 +63,3 @@ format_asm([{label,L}|Is]) ->
format_asm([I|Is]) ->
[io_lib:format(" ~p", [I]),".\n"|format_asm(Is)];
format_asm([]) -> [].
-
-function(File, {function,Name,Arity,Args,Body,Vdb,_Anno}) ->
- io:nl(File),
- io:format(File, "function ~p/~p.\n", [Name,Arity]),
- io:format(File, " ~p.\n", [Args]),
- print_vdb(File, Vdb),
- put(beam_listing_nl, false),
- nl(File),
- foreach(fun(F) -> format(File, F, []) end, Body),
- nl(File),
- erase(beam_listing_nl).
-
-format(File, #l{ke=Ke,i=I,vdb=Vdb}, Ind) ->
- nl(File),
- ind_format(File, Ind, "~p ", [I]),
- print_vdb(File, Vdb),
- nl(File),
- format(File, Ke, Ind);
-format(File, Tuple, Ind) when is_tuple(Tuple) ->
- ind_format(File, Ind, "{", []),
- format_list(File, tuple_to_list(Tuple), [$\s|Ind]),
- ind_format(File, Ind, "}", []);
-format(File, List, Ind) when is_list(List) ->
- ind_format(File, Ind, "[", []),
- format_list(File, List, [$\s|Ind]),
- ind_format(File, Ind, "]", []);
-format(File, F, Ind) ->
- ind_format(File, Ind, "~p", [F]).
-
-format_list(File, [F], Ind) ->
- format(File, F, Ind);
-format_list(File, [F|Fs], Ind) ->
- format(File, F, Ind),
- ind_format(File, Ind, ",", []),
- format_list(File, Fs, Ind);
-format_list(_, [], _) -> ok.
-
-
-print_vdb(File, [{Var,F,E}|Vs]) ->
- io:format(File, "~p:~p..~p ", [Var,F,E]),
- print_vdb(File, Vs);
-print_vdb(_, []) -> ok.
-
-ind_format(File, Ind, Format, Args) ->
- case get(beam_listing_nl) of
- true ->
- put(beam_listing_nl, false),
- io:put_chars(File, Ind);
- false -> ok
- end,
- io:format(File, Format, Args).
-
-nl(File) ->
- case put(beam_listing_nl, true) of
- true -> ok;
- false -> io:nl(File)
- end.
diff --git a/lib/compiler/src/beam_peep.erl b/lib/compiler/src/beam_peep.erl
index 6df5c02334..eb3192fe8f 100644
--- a/lib/compiler/src/beam_peep.erl
+++ b/lib/compiler/src/beam_peep.erl
@@ -41,8 +41,7 @@ function({function,Name,Arity,CLabel,Is0}) ->
Is = beam_jump:remove_unused_labels(Is1),
{function,Name,Arity,CLabel,Is}
catch
- Class:Error ->
- Stack = erlang:get_stacktrace(),
+ Class:Error:Stack ->
io:fwrite("Function: ~w/~w\n", [Name,Arity]),
erlang:raise(Class, Error, Stack)
end.
@@ -89,15 +88,37 @@ peep([{gc_bif,_,_,_,_,Dst}=I|Is], SeenTests0, Acc) ->
peep([{jump,{f,L}},{label,L}=I|Is], _, Acc) ->
%% Sometimes beam_jump has missed this optimization.
peep(Is, gb_sets:empty(), [I|Acc]);
-peep([{select,Op,R,F,Vls0}|Is], _, Acc) ->
+peep([{select,Op,R,F,Vls0}|Is], SeenTests0, Acc0) ->
case prune_redundant_values(Vls0, F) of
[] ->
%% No values left. Must convert to plain jump.
I = {jump,F},
- peep(Is, gb_sets:empty(), [I|Acc]);
+ peep([I|Is], gb_sets:empty(), Acc0);
+ [{atom,_}=Value,Lbl] when Op =:= select_val ->
+ %% Single value left. Convert to regular test and pop redundant tests.
+ Is1 = [{test,is_eq_exact,F,[R,Value]},{jump,Lbl}|Is],
+ case Acc0 of
+ [{test,is_atom,F,[R]}|Acc] ->
+ peep(Is1, SeenTests0, Acc);
+ _ ->
+ peep(Is1, SeenTests0, Acc0)
+ end;
+ [{integer,_}=Value,Lbl] when Op =:= select_val ->
+ %% Single value left. Convert to regular test and pop redundant tests.
+ Is1 = [{test,is_eq_exact,F,[R,Value]},{jump,Lbl}|Is],
+ case Acc0 of
+ [{test,is_integer,F,[R]}|Acc] ->
+ peep(Is1, SeenTests0, Acc);
+ _ ->
+ peep(Is1, SeenTests0, Acc0)
+ end;
+ [Arity,Lbl] when Op =:= select_tuple_arity ->
+ %% Single value left. Convert to regular test
+ Is1 = [{test,test_arity,F,[R,Arity]},{jump,Lbl}|Is],
+ peep(Is1, SeenTests0, Acc0);
[_|_]=Vls ->
I = {select,Op,R,F,Vls},
- peep(Is, gb_sets:empty(), [I|Acc])
+ peep(Is, gb_sets:empty(), [I|Acc0])
end;
peep([{test,Op,_,Ops}=I|Is], SeenTests0, Acc) ->
case beam_utils:is_pure_test(I) of
diff --git a/lib/compiler/src/beam_receive.erl b/lib/compiler/src/beam_receive.erl
index 1403e1e05e..3c8efa577c 100644
--- a/lib/compiler/src/beam_receive.erl
+++ b/lib/compiler/src/beam_receive.erl
@@ -82,8 +82,7 @@ function({function,Name,Arity,Entry,Is}) ->
D = beam_utils:index_labels(Is),
{function,Name,Arity,Entry,opt(Is, D, [])}
catch
- Class:Error ->
- Stack = erlang:get_stacktrace(),
+ Class:Error:Stack ->
io:fwrite("Function: ~w/~w\n", [Name,Arity]),
erlang:raise(Class, Error, Stack)
end.
@@ -207,6 +206,8 @@ opt_update_regs({label,Lbl}, R, L) ->
%% A catch label for a previously seen catch instruction is OK.
{R,L}
end;
+opt_update_regs({'try',_,{f,Lbl}}, R, L) ->
+ {R,gb_sets:add(Lbl, L)};
opt_update_regs({try_end,_}, R, L) ->
{R,L};
opt_update_regs({line,_}, R, L) ->
diff --git a/lib/compiler/src/beam_record.erl b/lib/compiler/src/beam_record.erl
index 419089b1bc..58a6de6775 100644
--- a/lib/compiler/src/beam_record.erl
+++ b/lib/compiler/src/beam_record.erl
@@ -15,19 +15,12 @@
%%
%% %CopyrightEnd%
%%
-%% File: beam_record.erl
-%% Author: Björn-Egil Dahlberg
-%% Created: 2014-09-03
-%%
-
--module(beam_record).
--export([module/2]).
%% Rewrite the instruction stream on tagged tuple tests.
-%% Tagged tuples means a tuple of any arity with an atom as its first element.
-%% Typically records, ok-tuples and error-tuples.
-%%
-%% from:
+%% Tagged tuples means a tuple of any arity with an atom as its
+%% first element, such as records and error tuples.
+%%
+%% From:
%% ...
%% {test,is_tuple,Fail,[Src]}.
%% {test,test_arity,Fail,[Src,Sz]}.
@@ -36,13 +29,16 @@
%% ...
%% {test,is_eq_exact,Fail,[Dst,Atom]}.
%% ...
-%% to:
+%% To:
%% ...
%% {test,is_tagged_tuple,Fail,[Src,Sz,Atom]}.
%% ...
+%%
+-module(beam_record).
+-export([module/2]).
--import(lists, [reverse/1]).
+-import(lists, [reverse/1,reverse/2]).
-spec module(beam_utils:module_code(), [compile:option()]) ->
{'ok',beam_utils:module_code()}.
@@ -51,56 +47,85 @@ module({Mod,Exp,Attr,Fs0,Lc}, _Opt) ->
Fs = [function(F) || F <- Fs0],
{ok,{Mod,Exp,Attr,Fs,Lc}}.
-function({function,Name,Arity,CLabel,Is}) ->
+function({function,Name,Arity,CLabel,Is0}) ->
try
- Idx = beam_utils:index_labels(Is),
- {function,Name,Arity,CLabel,rewrite(Is,Idx)}
+ Is1 = beam_utils:anno_defs(Is0),
+ Idx = beam_utils:index_labels(Is1),
+ Is = rewrite(reverse(Is1), Idx),
+ {function,Name,Arity,CLabel,Is}
catch
- Class:Error ->
- Stack = erlang:get_stacktrace(),
+ Class:Error:Stack ->
io:fwrite("Function: ~w/~w\n", [Name,Arity]),
erlang:raise(Class, Error, Stack)
end.
-rewrite(Is,Idx) ->
- rewrite(Is,Idx,[]).
+rewrite(Is, Idx) ->
+ rewrite(Is, Idx, 0, []).
-rewrite([{test,is_tuple,Fail,[Src]}=I1,
- {test,test_arity,Fail,[Src,N]}=I2|Is],Idx,Acc) ->
- case is_tagged_tuple(Is,Fail,Src,Idx) of
+rewrite([{test,test_arity,Fail,[Src,N]}=TA,
+ {test,is_tuple,Fail,[Src]}=TT|Is], Idx, Def, Acc0) ->
+ case is_tagged_tuple(Acc0, Def, Fail, Src, Idx) of
no ->
- rewrite(Is,Idx,[I2,I1|Acc]);
- {Atom,[{block,[]}|Is1]} ->
- rewrite(Is1,Idx,[{test,is_tagged_tuple,Fail,[Src,N,Atom]}|Acc]);
- {Atom,Is1} ->
- rewrite(Is1,Idx,[{test,is_tagged_tuple,Fail,[Src,N,Atom]}|Acc])
+ rewrite(Is, Idx, 0, [TT,TA|Acc0]);
+ {yes,Atom,Acc} ->
+ I = {test,is_tagged_tuple,Fail,[Src,N,Atom]},
+ rewrite(Is, Idx, Def, [I|Acc])
end;
-rewrite([I|Is],Idx,Acc) ->
- rewrite(Is,Idx,[I|Acc]);
-rewrite([],_,Acc) -> reverse(Acc).
-
-is_tagged_tuple([{block,[{set,[Dst],[Src],{get_tuple_element,0}}=B|Bs]},
- {test,is_eq_exact,Fail,[Dst,{atom,_}=Atom]}|Is],Fail,Src,Idx) ->
+rewrite([{block,[{'%anno',{def,Def}}|Bl]}|Is], Idx, _Def, Acc) ->
+ rewrite(Is, Idx, Def, [{block,Bl}|Acc]);
+rewrite([{label,L}=I|Is], Idx0, Def, Acc) ->
+ Idx = beam_utils:index_label(L, Acc, Idx0),
+ rewrite(Is, Idx, Def, [I|Acc]);
+rewrite([I|Is], Idx, Def, Acc) ->
+ rewrite(Is, Idx, Def, [I|Acc]);
+rewrite([], _, _, Acc) -> Acc.
- %% if Dst is killed in the instruction stream and at fail label,
- %% we can safely remove get_tuple_element.
- %%
- %% if Dst is not killed in the stream, we cannot remove get_tuple_element
- %% since it is referenced.
-
- case is_killed(Dst,Is,Fail,Idx) of
- true -> {Atom,[{block,Bs}|Is]};
- false -> {Atom,[{block,[B|Bs]}|Is]}
+is_tagged_tuple([{block,Bl},
+ {test,is_eq_exact,Fail,[Dst,{atom,_}=Atom]}|Is],
+ Def, Fail, Src, Idx) ->
+ case is_tagged_tuple_1(Bl, Is, Fail, Src, Dst, Idx, Def, []) of
+ no ->
+ no;
+ {yes,[]} ->
+ {yes,Atom,Is};
+ {yes,[_|_]=Block} ->
+ {yes,Atom,[{block,Block}|Is]}
end;
-is_tagged_tuple([{block,[{set,_,_,_}=B|Bs]},
- {test,is_eq_exact,_,_}=I|Is],Fail,Src,Idx) ->
- case is_tagged_tuple([{block,Bs},I|Is],Fail,Src,Idx) of
- {Atom,[{block,Bsr}|Isr]} -> {Atom,[{block,[B|Bsr]}|Isr]};
- no -> no
+is_tagged_tuple(_, _, _, _, _) ->
+ no.
+
+is_tagged_tuple_1([{set,[Dst],[Src],{get_tuple_element,0}}=I|Bl],
+ Is, Fail, Src, Dst, Idx, Def, Acc) ->
+ %% Check usage of Dst to find out whether the get_tuple_element
+ %% is needed.
+ case usage(Dst, Is, Fail, Idx) of
+ killed ->
+ %% Safe to remove the get_tuple_element instruction.
+ {yes,reverse(Acc, Bl)};
+ used ->
+ %% Actively used. Must keep instruction.
+ {yes,reverse(Acc, [I|Bl])};
+ not_used ->
+ %% Not actually used (but must be initialized).
+ case is_defined(Dst, Def) of
+ false ->
+ %% Dst must be initialized, but the
+ %% actual value does not matter.
+ Kill = {set,[Dst],[nil],move},
+ {yes,reverse(Acc, [Kill|Bl])};
+ true ->
+ %% The register is previously initialized.
+ %% We can remove the instruction.
+ {yes,reverse(Acc, Bl)}
+ end
end;
-is_tagged_tuple(_Is,_Fail,_Src,_Idx) ->
+is_tagged_tuple_1([I|Bl], Is, Fail, Src, Dst, Idx, Def, Acc) ->
+ is_tagged_tuple_1(Bl, Is, Fail, Src, Dst, Idx, Def, [I|Acc]);
+is_tagged_tuple_1(_, _, _, _, _, _, _, _) ->
no.
-is_killed(Dst,Is,{_,Lbl},Idx) ->
- beam_utils:is_killed(Dst,Is,Idx) andalso
- beam_utils:is_killed_at(Dst,Lbl,Idx).
+usage(Dst, Is, Fail, Idx) ->
+ beam_utils:usage(Dst, [{test,is_number,Fail,[nil]}|Is], Idx).
+
+is_defined({x,X}, Def) ->
+ (Def bsr X) band 1 =:= 1.
diff --git a/lib/compiler/src/beam_reorder.erl b/lib/compiler/src/beam_reorder.erl
index 910b7f6b0a..63bb57a1ac 100644
--- a/lib/compiler/src/beam_reorder.erl
+++ b/lib/compiler/src/beam_reorder.erl
@@ -35,8 +35,7 @@ function({function,Name,Arity,CLabel,Is0}) ->
Is = reorder(Is0),
{function,Name,Arity,CLabel,Is}
catch
- Class:Error ->
- Stack = erlang:get_stacktrace(),
+ Class:Error:Stack ->
io:fwrite("Function: ~w/~w\n", [Name,Arity]),
erlang:raise(Class, Error, Stack)
end.
diff --git a/lib/compiler/src/beam_type.erl b/lib/compiler/src/beam_type.erl
index fc2c7a991b..b83ed17b55 100644
--- a/lib/compiler/src/beam_type.erl
+++ b/lib/compiler/src/beam_type.erl
@@ -40,11 +40,10 @@ function({function,Name,Arity,CLabel,Asm0}) ->
Asm1 = beam_utils:live_opt(Asm0),
Asm2 = opt(Asm1, [], tdb_new()),
Asm3 = beam_utils:live_opt(Asm2),
- Asm = beam_utils:delete_live_annos(Asm3),
+ Asm = beam_utils:delete_annos(Asm3),
{function,Name,Arity,CLabel,Asm}
catch
- Class:Error ->
- Stack = erlang:get_stacktrace(),
+ Class:Error:Stack ->
io:fwrite("Function: ~w/~w\n", [Name,Arity]),
erlang:raise(Class, Error, Stack)
end.
@@ -81,80 +80,99 @@ simplify(Is0, TypeDb0) ->
%% Basic simplification, mostly tuples, no floating point optimizations.
simplify_basic(Is, Ts) ->
- simplify_basic_1(Is, Ts, []).
-
-simplify_basic_1([{set,[D],[{integer,Index},Reg],{bif,element,_}}=I0|Is], Ts0, Acc) ->
- I = case max_tuple_size(Reg, Ts0) of
- Sz when 0 < Index, Index =< Sz ->
- {set,[D],[Reg],{get_tuple_element,Index-1}};
- _Other -> I0
- end,
- Ts = update(I, Ts0),
- simplify_basic_1(Is, Ts, [I|Acc]);
-simplify_basic_1([{set,[D],[TupleReg],{get_tuple_element,0}}=I|Is0], Ts0, Acc) ->
- case tdb_find(TupleReg, Ts0) of
- {tuple,_,[Contents]} ->
- simplify_basic_1([{set,[D],[Contents],move}|Is0], Ts0, Acc);
- _ ->
- Ts = update(I, Ts0),
- simplify_basic_1(Is0, Ts, [I|Acc])
+ simplify_basic(Is, Ts, []).
+
+simplify_basic([I0|Is], Ts0, Acc) ->
+ case simplify_instr(I0, Ts0) of
+ [] ->
+ simplify_basic(Is, Ts0, Acc);
+ [I] ->
+ Ts = update(I, Ts0),
+ simplify_basic(Is, Ts, [I|Acc])
+ end;
+simplify_basic([], Ts, Acc) ->
+ {reverse(Acc),Ts}.
+
+simplify_instr({set,[D],[{integer,Index},Reg],{bif,element,_}}=I, Ts) ->
+ case max_tuple_size(Reg, Ts) of
+ Sz when 0 < Index, Index =< Sz ->
+ [{set,[D],[Reg],{get_tuple_element,Index-1}}];
+ _ -> [I]
end;
-simplify_basic_1([{set,_,_,{try_catch,_,_}}=I|Is], _Ts, Acc) ->
- simplify_basic_1(Is, tdb_new(), [I|Acc]);
-simplify_basic_1([{test,is_atom,_,[R]}=I|Is], Ts, Acc) ->
+simplify_instr({test,is_atom,_,[R]}=I, Ts) ->
case tdb_find(R, Ts) of
- boolean -> simplify_basic_1(Is, Ts, Acc);
- _ -> simplify_basic_1(Is, Ts, [I|Acc])
+ boolean -> [];
+ _ -> [I]
end;
-simplify_basic_1([{test,is_integer,_,[R]}=I|Is], Ts, Acc) ->
+simplify_instr({test,is_integer,_,[R]}=I, Ts) ->
case tdb_find(R, Ts) of
- integer -> simplify_basic_1(Is, Ts, Acc);
- {integer,_} -> simplify_basic_1(Is, Ts, Acc);
- _ -> simplify_basic_1(Is, Ts, [I|Acc])
+ integer -> [];
+ {integer,_} -> [];
+ _ -> [I]
end;
-simplify_basic_1([{test,is_tuple,_,[R]}=I|Is], Ts, Acc) ->
+simplify_instr({set,[D],[TupleReg],{get_tuple_element,0}}=I, Ts) ->
+ case tdb_find(TupleReg, Ts) of
+ {tuple,_,_,[Contents]} ->
+ [{set,[D],[Contents],move}];
+ _ ->
+ [I]
+ end;
+simplify_instr({test,is_tuple,_,[R]}=I, Ts) ->
case tdb_find(R, Ts) of
- {tuple,_,_} -> simplify_basic_1(Is, Ts, Acc);
- _ -> simplify_basic_1(Is, Ts, [I|Acc])
+ {tuple,_,_,_} -> [];
+ _ -> [I]
end;
-simplify_basic_1([{test,is_map,_,[R]}=I|Is], Ts0, Acc) ->
- case tdb_find(R, Ts0) of
- map -> simplify_basic_1(Is, Ts0, Acc);
- _Other ->
- Ts = update(I, Ts0),
- simplify_basic_1(Is, Ts, [I|Acc])
+simplify_instr({test,test_arity,_,[R,Arity]}=I, Ts) ->
+ case tdb_find(R, Ts) of
+ {tuple,exact_size,Arity,_} -> [];
+ _ -> [I]
end;
-simplify_basic_1([{test,is_nonempty_list,_,[R]}=I|Is], Ts0, Acc) ->
- case tdb_find(R, Ts0) of
- nonempty_list -> simplify_basic_1(Is, Ts0, Acc);
- _Other ->
- Ts = update(I, Ts0),
- simplify_basic_1(Is, Ts, [I|Acc])
- end;
-simplify_basic_1([{test,is_eq_exact,Fail,[R,{atom,_}=Atom]}=I|Is0], Ts0, Acc0) ->
- Acc = case tdb_find(R, Ts0) of
- {atom,_}=Atom -> Acc0;
- {atom,_} -> [{jump,Fail}|Acc0];
- _ -> [I|Acc0]
- end,
- Ts = update(I, Ts0),
- simplify_basic_1(Is0, Ts, Acc);
-simplify_basic_1([{select,select_val,Reg,_,_}=I0|Is], Ts, Acc) ->
- I = case tdb_find(Reg, Ts) of
- {integer,Range} ->
- simplify_select_val_int(I0, Range);
- boolean ->
- simplify_select_val_bool(I0);
- _ ->
- I0
- end,
- simplify_basic_1(Is, tdb_new(), [I|Acc]);
-simplify_basic_1([I|Is], Ts0, Acc) ->
- Ts = update(I, Ts0),
- simplify_basic_1(Is, Ts, [I|Acc]);
-simplify_basic_1([], Ts, Acc) ->
- Is = reverse(Acc),
- {Is,Ts}.
+simplify_instr({test,is_map,_,[R]}=I, Ts) ->
+ case tdb_find(R, Ts) of
+ map -> [];
+ _ -> [I]
+ end;
+simplify_instr({test,is_nonempty_list,_,[R]}=I, Ts) ->
+ case tdb_find(R, Ts) of
+ nonempty_list -> [];
+ _ -> [I]
+ end;
+simplify_instr({test,is_eq_exact,Fail,[R,{atom,_}=Atom]}=I, Ts) ->
+ case tdb_find(R, Ts) of
+ {atom,_}=Atom -> [];
+ {atom,_} -> [{jump,Fail}];
+ _ -> [I]
+ end;
+simplify_instr({test,is_record,_,[R,{atom,_}=Tag,{integer,Arity}]}=I, Ts) ->
+ case tdb_find(R, Ts) of
+ {tuple,exact_size,Arity,[Tag]} -> [];
+ _ -> [I]
+ end;
+simplify_instr({select,select_val,Reg,_,_}=I, Ts) ->
+ [case tdb_find(Reg, Ts) of
+ {integer,Range} ->
+ simplify_select_val_int(I, Range);
+ boolean ->
+ simplify_select_val_bool(I);
+ _ ->
+ I
+ end];
+simplify_instr({test,bs_test_unit,_,[Src,Unit]}=I, Ts) ->
+ case tdb_find(Src, Ts) of
+ {binary,U} when U rem Unit =:= 0 -> [];
+ _ -> [I]
+ end;
+simplify_instr({test,is_binary,_,[Src]}=I, Ts) ->
+ case tdb_find(Src, Ts) of
+ {binary,U} when U rem 8 =:= 0 -> [];
+ _ -> [I]
+ end;
+simplify_instr({test,is_bitstr,_,[Src]}=I, Ts) ->
+ case tdb_find(Src, Ts) of
+ {binary,_} -> [];
+ _ -> [I]
+ end;
+simplify_instr(I, _) -> [I].
simplify_select_val_int({select,select_val,R,_,L0}=I, {Min,Max}) ->
Vs = sort([V || {integer,V} <- L0]),
@@ -285,7 +303,7 @@ clearerror([], OrigIs) -> [{set,[],[],fclearerror}|OrigIs].
%% Combine two blocks and eliminate any move instructions that assign
%% to registers that are killed later in the block.
%%
-merge_blocks(B1, [{'%live',_,_}|B2]) ->
+merge_blocks(B1, [{'%anno',_}|B2]) ->
merge_blocks_1(B1++[{set,[],[],stop_here}|B2]).
merge_blocks_1([{set,[],_,stop_here}|Is]) -> Is;
@@ -334,29 +352,17 @@ flt_need_heap_2({set,_,_,{put_tuple,_}}, H, Fl) ->
{[],H+1,Fl};
flt_need_heap_2({set,_,_,put}, H, Fl) ->
{[],H+1,Fl};
-%% Then the "neutral" instructions. We just pass them.
-flt_need_heap_2({set,[{fr,_}],_,_}, H, Fl) ->
- {[],H,Fl};
-flt_need_heap_2({set,[],[],fclearerror}, H, Fl) ->
- {[],H,Fl};
-flt_need_heap_2({set,[],[],fcheckerror}, H, Fl) ->
- {[],H,Fl};
-flt_need_heap_2({set,_,_,{bif,_,_}}, H, Fl) ->
- {[],H,Fl};
-flt_need_heap_2({set,_,_,move}, H, Fl) ->
- {[],H,Fl};
-flt_need_heap_2({set,_,_,{get_tuple_element,_}}, H, Fl) ->
- {[],H,Fl};
-flt_need_heap_2({set,_,_,get_list}, H, Fl) ->
- {[],H,Fl};
-flt_need_heap_2({set,_,_,{try_catch,_,_}}, H, Fl) ->
- {[],H,Fl};
-flt_need_heap_2({set,_,_,init}, H, Fl) ->
- {[],H,Fl};
-%% All other instructions should cause the insertion of an allocation
+%% The following instructions cause the insertion of an allocation
%% instruction if needed.
+flt_need_heap_2({set,_,_,{alloc,_,_}}, H, Fl) ->
+ {flt_alloc(H, Fl),0,0};
+flt_need_heap_2({set,_,_,{set_tuple_element,_}}, H, Fl) ->
+ {flt_alloc(H, Fl),0,0};
+flt_need_heap_2({'%anno',_}, H, Fl) ->
+ {flt_alloc(H, Fl),0,0};
+%% All other instructions are "neutral". We just pass them.
flt_need_heap_2(_, H, Fl) ->
- {flt_alloc(H, Fl),0,0}.
+ {[],H,Fl}.
flt_alloc(0, 0) ->
[];
@@ -379,7 +385,7 @@ build_alloc(Words, Floats) -> {alloc,[{words,Words},{floats,Floats}]}.
%% is not continous at an allocation function (e.g. if {x,0} and {x,2}
%% are live, but not {x,1}).
-flt_liveness([{'%live',_Live,Regs}=LiveInstr|Is]) ->
+flt_liveness([{'%anno',{used,Regs}}=LiveInstr|Is]) ->
flt_liveness_1(Is, Regs, [LiveInstr]).
flt_liveness_1([{set,Ds,Ss,{alloc,Live0,Alloc}}|Is], Regs0, Acc) ->
@@ -391,7 +397,7 @@ flt_liveness_1([{set,Ds,Ss,{alloc,Live0,Alloc}}|Is], Regs0, Acc) ->
flt_liveness_1([{set,Ds,_,_}=I|Is], Regs0, Acc) ->
Regs = x_live(Ds, Regs0),
flt_liveness_1(Is, Regs, [I|Acc]);
-flt_liveness_1([{'%live',_,_}], _Regs, Acc) ->
+flt_liveness_1([{'%anno',_}], _Regs, Acc) ->
reverse(Acc).
init_regs(Live) ->
@@ -415,13 +421,14 @@ x_live([], Regs) -> Regs.
%% Update the type database to account for executing an instruction.
%%
%% First the cases for instructions inside basic blocks.
-update({'%live',_,_}, Ts) -> Ts;
+update({'%anno',_}, Ts) ->
+ Ts;
update({set,[D],[S],move}, Ts) ->
tdb_copy(S, D, Ts);
update({set,[D],[{integer,I},Reg],{bif,element,_}}, Ts0) ->
- tdb_update([{Reg,{tuple,I,[]}},{D,kill}], Ts0);
+ tdb_update([{Reg,{tuple,min_size,I,[]}},{D,kill}], Ts0);
update({set,[D],[_Index,Reg],{bif,element,_}}, Ts0) ->
- tdb_update([{Reg,{tuple,0,[]}},{D,kill}], Ts0);
+ tdb_update([{Reg,{tuple,min_size,0,[]}},{D,kill}], Ts0);
update({set,[D],Args,{bif,N,_}}, Ts0) ->
Ar = length(Args),
BoolOp = erl_internal:new_type_test(N, Ar) orelse
@@ -470,8 +477,6 @@ update({set,[D],[S1,S2],{alloc,_,{gc_bif,Op,{f,0}}}}, Ts0) ->
update({set,[],_Src,_Op}, Ts0) -> Ts0;
update({set,[D],_Src,_Op}, Ts0) ->
tdb_update([{D,kill}], Ts0);
-update({set,[D1,D2],_Src,_Op}, Ts0) ->
- tdb_update([{D1,kill},{D2,kill}], Ts0);
update({kill,D}, Ts) ->
tdb_update([{D,kill}], Ts);
@@ -479,7 +484,7 @@ update({kill,D}, Ts) ->
update({test,is_float,_Fail,[Src]}, Ts0) ->
tdb_update([{Src,float}], Ts0);
update({test,test_arity,_Fail,[Src,Arity]}, Ts0) ->
- tdb_update([{Src,{tuple,Arity,[]}}], Ts0);
+ tdb_update([{Src,{tuple,exact_size,Arity,[]}}], Ts0);
update({test,is_map,_Fail,[Src]}, Ts0) ->
tdb_update([{Src,map}], Ts0);
update({get_map_elements,_,Src,{list,Elems0}}, Ts0) ->
@@ -493,15 +498,19 @@ update({test,is_eq_exact,_,[Reg,{atom,_}=Atom]}, Ts) ->
error ->
Ts;
{tuple_element,TupleReg,0} ->
- tdb_update([{TupleReg,{tuple,1,[Atom]}}], Ts);
+ tdb_update([{TupleReg,{tuple,min_size,1,[Atom]}}], Ts);
_ ->
Ts
end;
update({test,is_record,_Fail,[Src,Tag,{integer,Arity}]}, Ts) ->
- tdb_update([{Src,{tuple,Arity,[Tag]}}], Ts);
+ tdb_update([{Src,{tuple,exact_size,Arity,[Tag]}}], Ts);
-%% Binary matching
+%% Binaries and binary matching.
+update({test,is_binary,_Fail,[Src]}, Ts0) ->
+ tdb_update([{Src,{binary,8}}], Ts0);
+update({test,is_bitstr,_Fail,[Src]}, Ts0) ->
+ tdb_update([{Src,{binary,1}}], Ts0);
update({test,bs_get_integer2,_,_,Args,Dst}, Ts) ->
tdb_update([{Dst,get_bs_integer_type(Args)}], Ts);
update({test,bs_get_utf8,_,_,_,Dst}, Ts) ->
@@ -510,8 +519,10 @@ update({test,bs_get_utf16,_,_,_,Dst}, Ts) ->
tdb_update([{Dst,?UNICODE_INT}], Ts);
update({test,bs_get_utf32,_,_,_,Dst}, Ts) ->
tdb_update([{Dst,?UNICODE_INT}], Ts);
+update({bs_init,_,{bs_init2,_,_},_,_,Dst}, Ts) ->
+ tdb_update([{Dst,{binary,8}}], Ts);
update({bs_init,_,_,_,_,Dst}, Ts) ->
- tdb_update([{Dst,kill}], Ts);
+ tdb_update([{Dst,{binary,1}}], Ts);
update({bs_put,_,_,_}, Ts) ->
Ts;
update({bs_save2,_,_}, Ts) ->
@@ -520,12 +531,19 @@ update({bs_restore2,_,_}, Ts) ->
Ts;
update({bs_context_to_binary,Dst}, Ts) ->
tdb_update([{Dst,kill}], Ts);
-update({test,bs_start_match2,_,_,_,Dst}, Ts) ->
- tdb_update([{Dst,kill}], Ts);
-update({test,bs_get_binary2,_,_,_,Dst}, Ts) ->
- tdb_update([{Dst,kill}], Ts);
+update({test,bs_start_match2,_,_,[Src,_],Dst}, Ts) ->
+ Type = case tdb_find(Src, Ts) of
+ {binary,_}=Type0 -> Type0;
+ _ -> {binary,1}
+ end,
+ tdb_update([{Dst,Type}], Ts);
+update({test,bs_get_binary2,_,_,[_,_,Unit,_],Dst}, Ts) ->
+ true = is_integer(Unit), %Assertion.
+ tdb_update([{Dst,{binary,Unit}}], Ts);
update({test,bs_get_float2,_,_,_,Dst}, Ts) ->
tdb_update([{Dst,float}], Ts);
+update({test,bs_test_unit,_,[Src,Unit]}, Ts) ->
+ tdb_update([{Src,{binary,Unit}}], Ts);
update({test,_Test,_Fail,_Other}, Ts) ->
Ts;
@@ -540,7 +558,7 @@ update({call_ext,Ar,{extfunc,math,Math,Ar}}, Ts) ->
update({call_ext,3,{extfunc,erlang,setelement,3}}, Ts0) ->
Ts = tdb_kill_xregs(Ts0),
case tdb_find({x,1}, Ts0) of
- {tuple,Sz,_}=T0 ->
+ {tuple,SzKind,Sz,_}=T0 ->
T = case tdb_find({x,0}, Ts0) of
{integer,{I,I}} when I > 1 ->
%% First element is not changed. The result
@@ -549,7 +567,7 @@ update({call_ext,3,{extfunc,erlang,setelement,3}}, Ts0) ->
_ ->
%% Position is 1 or unknown. May change the
%% first element of the tuple.
- {tuple,Sz,[]}
+ {tuple,SzKind,Sz,[]}
end,
tdb_update([{{x,0},T}], Ts);
_ ->
@@ -562,6 +580,7 @@ update({call_fun, _}, Ts) -> tdb_kill_xregs(Ts);
update({apply, _}, Ts) -> tdb_kill_xregs(Ts);
update({line,_}, Ts) -> Ts;
+update({'%',_}, Ts) -> Ts;
%% The instruction is unknown. Kill all information.
update(_I, _Ts) -> tdb_new().
@@ -633,7 +652,7 @@ possibly_numeric(_) -> false.
max_tuple_size(Reg, Ts) ->
case tdb_find(Reg, Ts) of
- {tuple,Sz,_} -> Sz;
+ {tuple,_,Sz,_} -> Sz;
_Other -> 0
end.
@@ -789,16 +808,20 @@ checkerror_2(OrigIs) -> [{set,[],[],fcheckerror}|OrigIs].
%%% Routines for maintaining a type database. The type database
%%% associates type information with registers.
%%%
-%%% {tuple,Size,First} means that the corresponding register contains a
-%%% tuple with *at least* Size elements. An tuple with unknown
-%%% size is represented as {tuple,0,[]}. First is either [] (meaning that
-%%% the tuple's first element is unknown) or [FirstElement] (the contents
-%%% of the first element).
+%%% {tuple,min_size,Size,First} means that the corresponding register contains
+%%% a tuple with *at least* Size elements (conversely, exact_size means that it
+%%% contains a tuple with *exactly* Size elements). An tuple with unknown size
+%%% is represented as {tuple,min_size,0,[]}. First is either [] (meaning that
+%%% the tuple's first element is unknown) or [FirstElement] (the contents of
+%%% the first element).
%%%
%%% 'float' means that the register contains a float.
%%%
%%% 'integer' or {integer,{Min,Max}} that the register contains an
%%% integer.
+%%%
+%%% {binary,Unit} means that the register contains a binary/bitstring aligned
+%%% to unit Unit.
%% tdb_new() -> EmptyDataBase
%% Creates a new, empty type database.
@@ -837,7 +860,7 @@ tdb_copy(Literal, D, Ts) ->
{literal,#{}} -> map;
{literal,Tuple} when tuple_size(Tuple) >= 1 ->
Lit = tag_literal(element(1, Tuple)),
- {tuple,tuple_size(Tuple),[Lit]};
+ {tuple,exact_size,tuple_size(Tuple),[Lit]};
_ -> term
end,
if
@@ -859,14 +882,14 @@ tag_literal(Lit) -> {literal,Lit}.
%% Updates a type database. If a 'kill' operation is given, the type
%% information for that register will be removed from the database.
%% A kill operation takes precedence over other operations for the same
-%% register (i.e. [{{x,0},kill},{{x,0},{tuple,5,[]}}] means that the
+%% register (i.e. [{{x,0},kill},{{x,0},{tuple,min_size,5,[]}}] means that the
%% the existing type information, if any, will be discarded, and the
-%% the '{tuple,5,[]}' information ignored.
+%% the '{tuple,min_size,5,[]}' information ignored.
%%
%% If NewInfo information is given and there exists information about
%% the register, the old and new type information will be merged.
-%% For instance, {tuple,5,_} and {tuple,10,_} will be merged to produce
-%% {tuple,10,_}.
+%% For instance, {tuple,min_size,5,_} and {tuple,min_size,10,_} will be merged
+%% to produce {tuple,min_size,10,_}.
tdb_update(Uis0, Ts0) ->
Uis1 = filter(fun ({{x,_},_Op}) -> true;
@@ -904,34 +927,41 @@ tdb_kill_xregs([]) -> [].
remove_key(Key, [{Key,_Op}|Ops]) -> remove_key(Key, Ops);
remove_key(_, Ops) -> Ops.
-
+
merge_type_info(I, I) -> I;
-merge_type_info({tuple,Sz1,Same}, {tuple,Sz2,Same}=Max) when Sz1 < Sz2 ->
+merge_type_info({tuple,min_size,Sz1,Same}, {tuple,min_size,Sz2,Same}=Max) when Sz1 < Sz2 ->
Max;
-merge_type_info({tuple,Sz1,Same}=Max, {tuple,Sz2,Same}) when Sz1 > Sz2 ->
+merge_type_info({tuple,min_size,Sz1,Same}=Max, {tuple,min_size,Sz2,Same}) when Sz1 > Sz2 ->
Max;
-merge_type_info({tuple,Sz1,[]}, {tuple,_Sz2,First}=Tuple2) ->
- merge_type_info({tuple,Sz1,First}, Tuple2);
-merge_type_info({tuple,_Sz1,First}=Tuple1, {tuple,Sz2,_}) ->
- merge_type_info(Tuple1, {tuple,Sz2,First});
+merge_type_info({tuple,exact_size,_,Same}=Exact, {tuple,_,_,Same}) ->
+ Exact;
+merge_type_info({tuple,_,_,Same},{tuple,exact_size,_,Same}=Exact) ->
+ Exact;
+merge_type_info({tuple,SzKind1,Sz1,[]}, {tuple,_SzKind2,_Sz2,First}=Tuple2) ->
+ merge_type_info({tuple,SzKind1,Sz1,First}, Tuple2);
+merge_type_info({tuple,_SzKind1,_Sz1,First}=Tuple1, {tuple,SzKind2,Sz2,_}) ->
+ merge_type_info(Tuple1, {tuple,SzKind2,Sz2,First});
merge_type_info(integer, {integer,_}) ->
integer;
merge_type_info({integer,_}, integer) ->
integer;
merge_type_info({integer,{Min1,Max1}}, {integer,{Min2,Max2}}) ->
{integer,{max(Min1, Min2),min(Max1, Max2)}};
+merge_type_info({binary,U1}, {binary,U2}) ->
+ {binary,max(U1, U2)};
merge_type_info(NewType, _) ->
verify_type(NewType),
NewType.
verify_type({atom,_}) -> ok;
+verify_type({binary,U}) when is_integer(U) -> ok;
verify_type(boolean) -> ok;
verify_type(integer) -> ok;
verify_type({integer,{Min,Max}})
when is_integer(Min), is_integer(Max) -> ok;
verify_type(map) -> ok;
verify_type(nonempty_list) -> ok;
-verify_type({tuple,Sz,[]}) when is_integer(Sz) -> ok;
-verify_type({tuple,Sz,[_]}) when is_integer(Sz) -> ok;
+verify_type({tuple,_,Sz,[]}) when is_integer(Sz) -> ok;
+verify_type({tuple,_,Sz,[_]}) when is_integer(Sz) -> ok;
verify_type({tuple_element,_,_}) -> ok;
verify_type(float) -> ok.
diff --git a/lib/compiler/src/beam_utils.erl b/lib/compiler/src/beam_utils.erl
index e39fbdc3b7..a57dbbbc2f 100644
--- a/lib/compiler/src/beam_utils.erl
+++ b/lib/compiler/src/beam_utils.erl
@@ -22,15 +22,23 @@
-module(beam_utils).
-export([is_killed_block/2,is_killed/3,is_killed_at/3,
- is_not_used/3,
- empty_label_index/0,index_label/3,index_labels/1,
+ is_not_used/3,usage/3,
+ empty_label_index/0,index_label/3,index_labels/1,replace_labels/4,
code_at/2,bif_to_test/3,is_pure_test/1,
- live_opt/1,delete_live_annos/1,combine_heap_needs/2,
- split_even/1]).
+ live_opt/1,delete_annos/1,combine_heap_needs/2,
+ anno_defs/1,
+ split_even/1
+ ]).
-export_type([code_index/0,module_code/0,instruction/0]).
--import(lists, [member/2,sort/1,reverse/1,splitwith/2]).
+-import(lists, [flatmap/2,map/2,member/2,sort/1,reverse/1,splitwith/2]).
+
+-define(is_const(Val), (Val =:= nil orelse
+ element(1, Val) =:= integer orelse
+ element(1, Val) =:= float orelse
+ element(1, Val) =:= atom orelse
+ element(1, Val) =:= literal)).
%% instruction() describes all instructions that are used during optimzation
%% (from beam_a to beam_z).
@@ -54,6 +62,23 @@
{lbl :: code_index(), %Label to code index.
res :: result_cache()}). %Result cache for each label.
+%% usage(Register, [Instruction], State) -> killed|not_used|used.
+%% Determine the usage of Register in the instruction sequence.
+%% The return value is one of:
+%%
+%% killed - The register is not used in any way.
+%% not_used - The register is referenced only by an allocating instruction
+%% (the actual value does not matter).
+%% used - The register is used (its value do matter).
+
+-spec usage(beam_asm:reg(), [instruction()], code_index()) ->
+ 'killed' | 'not_used' | 'used'.
+
+usage(R, Is, D) ->
+ St = #live{lbl=D,res=gb_trees:empty()},
+ {Usage,_} = check_liveness(R, Is, St),
+ Usage.
+
%% is_killed_block(Register, [Instruction]) -> true|false
%% Determine whether a register is killed by the instruction sequence inside
@@ -70,7 +95,7 @@ is_killed_block({x,X}, [{set,_,_,{alloc,Live,_}}|_]) ->
X >= Live;
is_killed_block(R, [{set,Ds,Ss,_Op}|Is]) ->
not member(R, Ss) andalso (member(R, Ds) orelse is_killed_block(R, Is));
-is_killed_block(R, [{'%live',_,Regs}|Is]) ->
+is_killed_block(R, [{'%anno',{used,Regs}}|Is]) ->
case R of
{x,X} when (Regs bsr X) band 1 =:= 0 -> true;
_ -> is_killed_block(R, Is)
@@ -93,6 +118,7 @@ is_killed(R, Is, D) ->
St = #live{lbl=D,res=gb_trees:empty()},
case check_liveness(R, Is, St) of
{killed,_} -> true;
+ {exit_not_used,_} -> true;
{_,_} -> false
end.
@@ -105,6 +131,7 @@ is_killed_at(R, Lbl, D) when is_integer(Lbl) ->
St0 = #live{lbl=D,res=gb_trees:empty()},
case check_liveness_at(R, Lbl, St0) of
{killed,_} -> true;
+ {exit_not_used,_} -> true;
{_,_} -> false
end.
@@ -121,6 +148,7 @@ is_not_used(R, Is, D) ->
St = #live{lbl=D,res=gb_trees:empty()},
case check_liveness(R, Is, St) of
{used,_} -> false;
+ {exit_not_used,_} -> false;
{_,_} -> true
end.
@@ -160,6 +188,18 @@ index_label(Lbl, Is0, Acc) ->
code_at(L, Ll) ->
gb_trees:get(L, Ll).
+%% replace_labels(FunctionIs, Tail, ReplaceDb, Fallback) -> FunctionIs.
+%% Replace all labels in instructions according to the ReplaceDb.
+%% If label is not found the Fallback is called with the label to
+%% produce a new one.
+
+-spec replace_labels([instruction()],
+ [instruction()],
+ #{beam_asm:label() => beam_asm:label()},
+ fun((beam_asm:label()) -> term())) -> [instruction()].
+replace_labels(Is, Acc, D, Fb) ->
+ replace_labels_1(Is, Acc, D, Fb).
+
%% bif_to_test(Bif, [Op], Fail) -> {test,Test,Fail,[Op]}
%% Convert a BIF to a test. Fail if not possible.
@@ -185,10 +225,20 @@ bif_to_test('>', [A,B], Fail) -> {test,is_lt,Fail,[B,A]};
bif_to_test('<', [_,_]=Ops, Fail) -> {test,is_lt,Fail,Ops};
bif_to_test('>=', [_,_]=Ops, Fail) -> {test,is_ge,Fail,Ops};
bif_to_test('==', [A,nil], Fail) -> {test,is_nil,Fail,[A]};
+bif_to_test('==', [nil,A], Fail) -> {test,is_nil,Fail,[A]};
+bif_to_test('==', [C,A], Fail) when ?is_const(C) ->
+ {test,is_eq,Fail,[A,C]};
bif_to_test('==', [_,_]=Ops, Fail) -> {test,is_eq,Fail,Ops};
+bif_to_test('/=', [C,A], Fail) when ?is_const(C) ->
+ {test,is_ne,Fail,[A,C]};
bif_to_test('/=', [_,_]=Ops, Fail) -> {test,is_ne,Fail,Ops};
bif_to_test('=:=', [A,nil], Fail) -> {test,is_nil,Fail,[A]};
+bif_to_test('=:=', [nil,A], Fail) -> {test,is_nil,Fail,[A]};
+bif_to_test('=:=', [C,A], Fail) when ?is_const(C) ->
+ {test,is_eq_exact,Fail,[A,C]};
bif_to_test('=:=', [_,_]=Ops, Fail) -> {test,is_eq_exact,Fail,Ops};
+bif_to_test('=/=', [C,A], Fail) when ?is_const(C) ->
+ {test,is_ne_exact,Fail,[A,C]};
bif_to_test('=/=', [_,_]=Ops, Fail) -> {test,is_ne_exact,Fail,Ops};
bif_to_test(is_record, [_,_,_]=Ops, Fail) -> {test,is_record,Fail,Ops}.
@@ -220,7 +270,7 @@ is_pure_test({test,Op,_,Ops}) ->
%% Go through the instruction sequence in reverse execution
%% order, keep track of liveness and remove 'move' instructions
%% whose destination is a register that will not be used.
-%% Also insert {'%live',Live,Regs} annotations at the beginning
+%% Also insert {used,Regs} annotations at the beginning
%% and end of each block.
-spec live_opt([instruction()]) -> [instruction()].
@@ -235,35 +285,52 @@ live_opt(Is0) ->
Bef ++ [Fi|live_opt(reverse(Is), 0, D, [])].
-%% delete_live_annos([Instruction]) -> [Instruction].
-%% Delete all live annotations.
+%% delete_annos([Instruction]) -> [Instruction].
+%% Delete all annotations.
--spec delete_live_annos([instruction()]) -> [instruction()].
+-spec delete_annos([instruction()]) -> [instruction()].
-delete_live_annos([{block,Bl0}|Is]) ->
- case delete_live_annos(Bl0) of
- [] -> delete_live_annos(Is);
- [_|_]=Bl -> [{block,Bl}|delete_live_annos(Is)]
+delete_annos([{block,Bl0}|Is]) ->
+ case delete_annos(Bl0) of
+ [] -> delete_annos(Is);
+ [_|_]=Bl -> [{block,Bl}|delete_annos(Is)]
end;
-delete_live_annos([{'%live',_,_}|Is]) ->
- delete_live_annos(Is);
-delete_live_annos([I|Is]) ->
- [I|delete_live_annos(Is)];
-delete_live_annos([]) -> [].
-
+delete_annos([{'%anno',_}|Is]) ->
+ delete_annos(Is);
+delete_annos([I|Is]) ->
+ [I|delete_annos(Is)];
+delete_annos([]) -> [].
+
%% combine_heap_needs(HeapNeed1, HeapNeed2) -> HeapNeed
%% Combine the heap need for two allocation instructions.
--spec combine_heap_needs(term(), term()) -> term().
+-type heap_need_tag() :: 'floats' | 'words'.
+-type heap_need() :: non_neg_integer() |
+ {'alloc',[{heap_need_tag(),non_neg_integer()}]}.
+-spec combine_heap_needs(heap_need(), heap_need()) -> heap_need().
-combine_heap_needs({alloc,Alloc1}, {alloc,Alloc2}) ->
- {alloc,combine_alloc_lists(Alloc1, Alloc2)};
-combine_heap_needs({alloc,Alloc}, Words) when is_integer(Words) ->
- {alloc,combine_alloc_lists(Alloc, [{words,Words}])};
-combine_heap_needs(Words, {alloc,Alloc}) when is_integer(Words) ->
- {alloc,combine_alloc_lists(Alloc, [{words,Words}])};
combine_heap_needs(H1, H2) when is_integer(H1), is_integer(H2) ->
- H1+H2.
+ H1 + H2;
+combine_heap_needs(H1, H2) ->
+ {alloc,combine_alloc_lists([H1,H2])}.
+
+
+%% anno_defs(Instructions) -> Instructions'
+%% Add {def,RegisterBitmap} annotations to the beginning of
+%% each block. Iff bit X is set in the the bitmap, it means
+%% that {x,X} is defined when the block is entered.
+
+-spec anno_defs([instruction()]) -> [instruction()].
+
+anno_defs(Is0) ->
+ {Bef,[Fi|Is1]} =
+ splitwith(fun({func_info,_,_,_}) -> false;
+ (_) -> true
+ end, Is0),
+ {func_info,_,_,Arity} = Fi,
+ Regs = init_def_regs(Arity),
+ Is = defs(Is1, Regs, #{}),
+ Bef ++ [Fi|Is].
%% split_even/1
%% [1,2,3,4,5,6] -> {[1,3,5],[2,4,6]}
@@ -272,7 +339,6 @@ combine_heap_needs(H1, H2) when is_integer(H1), is_integer(H2) ->
split_even(Rs) -> split_even(Rs, [], []).
-
%%%
%%% Local functions.
%%%
@@ -284,12 +350,19 @@ split_even(Rs) -> split_even(Rs, [], []).
%%
%% killed - Reg is assigned or killed by an allocation instruction.
%% not_used - the value of Reg is not used, but Reg must not be garbage
+%% exit_not_used - the value of Reg is not used, but must not be garbage
+%% because the stack will be scanned because an
+%% exit BIF will raise an exception
%% used - Reg is used
check_liveness(R, [{block,Blk}|Is], St0) ->
case check_liveness_block(R, Blk, St0) of
{transparent,St1} ->
check_liveness(R, Is, St1);
+ {alloc_used,St1} ->
+ %% Used by an allocating instruction, but value not referenced.
+ %% Must check the rest of the instructions.
+ not_used(check_liveness(R, Is, St1));
{Other,_}=Res when is_atom(Other) ->
Res
end;
@@ -303,6 +376,8 @@ check_liveness(R, [{test,_,{f,Fail},As}|Is], St0) ->
case check_liveness_at(R, Fail, St0) of
{killed,St1} ->
check_liveness(R, Is, St1);
+ {exit_not_used,St1} ->
+ check_liveness(R, Is, St1);
{not_used,St1} ->
not_used(check_liveness(R, Is, St1));
{used,_}=Used ->
@@ -322,6 +397,8 @@ check_liveness(R, [{jump,{f,F}}|_], St) ->
check_liveness_at(R, F, St);
check_liveness(R, [{case_end,Used}|_], St) ->
check_liveness_ret(R, Used, St);
+check_liveness(R, [{try_case_end,Used}|_], St) ->
+ check_liveness_ret(R, Used, St);
check_liveness(R, [{badmatch,Used}|_], St) ->
check_liveness_ret(R, Used, St);
check_liveness(_, [if_end|_], St) ->
@@ -348,17 +425,27 @@ check_liveness(R, [{bs_init,_,_,none,Ss,Dst}|Is], St) ->
check_liveness(R, [{bs_init,_,_,Live,Ss,Dst}|Is], St) ->
case R of
{x,X} ->
- case X < Live orelse member(R, Ss) of
- true -> {used,St};
- false -> {killed,St}
+ case member(R, Ss) of
+ true ->
+ {used,St};
+ false ->
+ if
+ X < Live ->
+ not_used(check_liveness(R, Is, St));
+ true ->
+ {killed,St}
+ end
end;
{y,_} ->
case member(R, Ss) of
true -> {used,St};
false ->
+ %% If the exception is taken, the stack may
+ %% be scanned. Therefore the register is not
+ %% guaranteed to be killed.
if
- R =:= Dst -> {killed,St};
- true -> check_liveness(R, Is, St)
+ R =:= Dst -> {not_used,St};
+ true -> not_used(check_liveness(R, Is, St))
end
end
end;
@@ -392,7 +479,7 @@ check_liveness(R, [{call_ext,Live,_}=I|Is], St) ->
%% We must make sure we don't check beyond this
%% instruction or we will fall through into random
%% unrelated code and get stuck in a loop.
- {killed,St}
+ {exit_not_used,St}
end
end;
check_liveness(R, [{call_fun,Live}|Is], St) ->
@@ -440,16 +527,12 @@ check_liveness(R, [{make_fun2,_,_,_,NumFree}|Is], St) ->
{x,_} -> {killed,St};
{y,_} -> not_used(check_liveness(R, Is, St))
end;
-check_liveness({x,_}=R, [{'catch',_,_}|Is], St) ->
- %% All x registers will be killed if an exception occurs.
- %% Therefore we only need to check the liveness for the
- %% instructions following the catch instruction.
- check_liveness(R, Is, St);
-check_liveness({x,_}=R, [{'try',_,_}|Is], St) ->
- %% All x registers will be killed if an exception occurs.
- %% Therefore we only need to check the liveness for the
- %% instructions inside the 'try' block.
- check_liveness(R, Is, St);
+check_liveness(R, [{'catch'=Op,Y,Fail}|Is], St) ->
+ Set = {set,[Y],[],{try_catch,Op,Fail}},
+ check_liveness(R, [{block,[Set]}|Is], St);
+check_liveness(R, [{'try'=Op,Y,Fail}|Is], St) ->
+ Set = {set,[Y],[],{try_catch,Op,Fail}},
+ check_liveness(R, [{block,[Set]}|Is], St);
check_liveness(R, [{try_end,Y}|Is], St) ->
case R of
Y ->
@@ -510,15 +593,34 @@ check_liveness(R, [{get_map_elements,{f,Fail},S,{list,L}}|Is], St0) ->
check_liveness(R, [{put_map,F,Op,S,D,Live,{list,Puts}}|Is], St) ->
Set = {set,[D],[S|Puts],{alloc,Live,{put_map,Op,F}}},
check_liveness(R, [{block,[Set]}||Is], St);
+check_liveness(R, [{put_tuple,Ar,D}|Is], St) ->
+ Set = {set,[D],[],{put_tuple,Ar}},
+ check_liveness(R, [{block,[Set]}||Is], St);
+check_liveness(R, [{put_list,S1,S2,D}|Is], St) ->
+ Set = {set,[D],[S1,S2],put_list},
+ check_liveness(R, [{block,[Set]}||Is], St);
check_liveness(R, [{test_heap,N,Live}|Is], St) ->
I = {block,[{set,[],[],{alloc,Live,{nozero,nostack,N,[]}}}]},
check_liveness(R, [I|Is], St);
check_liveness(R, [{allocate_zero,N,Live}|Is], St) ->
I = {block,[{set,[],[],{alloc,Live,{zero,N,0,[]}}}]},
check_liveness(R, [I|Is], St);
-check_liveness(R, [{get_list,S,D1,D2}|Is], St) ->
- I = {block,[{set,[D1,D2],[S],get_list}]},
+check_liveness(R, [{get_hd,S,D}|Is], St) ->
+ I = {block,[{set,[D],[S],get_hd}]},
check_liveness(R, [I|Is], St);
+check_liveness(R, [{get_tl,S,D}|Is], St) ->
+ I = {block,[{set,[D],[S],get_tl}]},
+ check_liveness(R, [I|Is], St);
+check_liveness(R, [remove_message|Is], St) ->
+ check_liveness(R, Is, St);
+check_liveness({x,X}, [build_stacktrace|_], St) when X > 0 ->
+ {killed,St};
+check_liveness(R, [{recv_mark,_}|Is], St) ->
+ check_liveness(R, Is, St);
+check_liveness(R, [{recv_set,_}|Is], St) ->
+ check_liveness(R, Is, St);
+check_liveness(R, [{'%',_}|Is], St) ->
+ check_liveness(R, Is, St);
check_liveness(_R, Is, St) when is_list(Is) ->
%% Not implemented. Conservatively assume that the register is used.
{used,St}.
@@ -553,6 +655,7 @@ check_liveness_at(R, Lbl, #live{lbl=Ll,res=ResMemorized}=St0) ->
{Res,St#live{res=gb_trees:insert(Lbl, Res, St#live.res)}}
end.
+not_used({exit_not_used,St}) -> {not_used,St};
not_used({killed,St}) -> {not_used,St};
not_used({_,_}=Res) -> Res.
@@ -560,7 +663,7 @@ check_liveness_ret(R, R, St) -> {used,St};
check_liveness_ret(_, _, St) -> {killed,St}.
%% check_liveness_block(Reg, [Instruction], State) ->
-%% {killed | not_used | used | transparent,State'}
+%% {killed | not_used | used | alloc_used | transparent,State'}
%% Finds out how Reg is used in the instruction sequence inside a block.
%% Returns one of:
%% killed - Reg is assigned a new value or killed by an
@@ -568,9 +671,10 @@ check_liveness_ret(_, _, St) -> {killed,St}.
%% not_used - The value is not used, but the register is referenced
%% e.g. by an allocation instruction
%% transparent - Reg is neither used nor killed
+%% alloc_used - Used only in an allocate instruction
%% used - Reg is explicitly used by an instruction
%%
-%% '%live' annotations are not allowed.
+%% Annotations are not allowed.
%%
%% (Unknown instructions will cause an exception.)
@@ -580,13 +684,30 @@ check_liveness_block({x,X}=R, [{set,Ds,Ss,{alloc,Live,Op}}|Is], St0) ->
{killed,St0};
true ->
case check_liveness_block_1(R, Ss, Ds, Op, Is, St0) of
- {killed,St} -> {not_used,St};
- {transparent,St} -> {not_used,St};
- {_,_}=Res -> Res
+ {transparent,St} -> {alloc_used,St};
+ {_,_}=Res -> not_used(Res)
end
end;
-check_liveness_block({y,_}=R, [{set,Ds,Ss,{alloc,_Live,Op}}|Is], St) ->
- check_liveness_block_1(R, Ss, Ds, Op, Is, St);
+check_liveness_block({y,_}=R, [{set,Ds,Ss,{alloc,_Live,Op}}|Is], St0) ->
+ case check_liveness_block_1(R, Ss, Ds, Op, Is, St0) of
+ {transparent,St} -> {alloc_used,St};
+ {_,_}=Res -> not_used(Res)
+ end;
+check_liveness_block({y,_}=R, [{set,Ds,Ss,{try_catch,_,Op}}|Is], St0) ->
+ case Ds of
+ [R] ->
+ {killed,St0};
+ _ ->
+ case check_liveness_block_1(R, Ss, Ds, Op, Is, St0) of
+ {exit_not_used,St} ->
+ {used,St};
+ {transparent,St} ->
+ %% Conservatively assumed that it is used.
+ {used,St};
+ {_,_}=Res ->
+ Res
+ end
+ end;
check_liveness_block(R, [{set,Ds,Ss,Op}|Is], St) ->
check_liveness_block_1(R, Ss, Ds, Op, Is, St);
check_liveness_block(_, [], St) -> {transparent,St}.
@@ -602,6 +723,11 @@ check_liveness_block_1(R, Ss, Ds, Op, Is, St0) ->
true -> {killed,St};
false -> check_liveness_block(R, Is, St)
end;
+ {exit_not_used,St} ->
+ case member(R, Ds) of
+ true -> {exit_not_used,St};
+ false -> check_liveness_block(R, Is, St)
+ end;
{not_used,St} ->
not_used(case member(R, Ds) of
true -> {killed,St};
@@ -612,8 +738,8 @@ check_liveness_block_1(R, Ss, Ds, Op, Is, St0) ->
end
end.
-check_liveness_block_2(R, {gc_bif,_Op,{f,Lbl}}, _Ss, St) ->
- check_liveness_block_3(R, Lbl, St);
+check_liveness_block_2(R, {gc_bif,Op,{f,Lbl}}, Ss, St) ->
+ check_liveness_block_3(R, Lbl, {Op,length(Ss)}, St);
check_liveness_block_2(R, {bif,Op,{f,Lbl}}, Ss, St) ->
Arity = length(Ss),
case erl_internal:comp_op(Op, Arity) orelse
@@ -621,16 +747,23 @@ check_liveness_block_2(R, {bif,Op,{f,Lbl}}, Ss, St) ->
true ->
{killed,St};
false ->
- check_liveness_block_3(R, Lbl, St)
+ check_liveness_block_3(R, Lbl, {Op,length(Ss)}, St)
end;
check_liveness_block_2(R, {put_map,_Op,{f,Lbl}}, _Ss, St) ->
- check_liveness_block_3(R, Lbl, St);
+ check_liveness_block_3(R, Lbl, {unsafe,0}, St);
check_liveness_block_2(_, _, _, St) ->
{killed,St}.
-check_liveness_block_3(_, 0, St) ->
+check_liveness_block_3({x,_}, 0, _FA, St) ->
{killed,St};
-check_liveness_block_3(R, Lbl, St0) ->
+check_liveness_block_3({y,_}, 0, {F,A}, St) ->
+ %% If the exception is thrown, the stack may be scanned,
+ %% thus implicitly using the y register.
+ case erl_bifs:is_safe(erlang, F, A) of
+ true -> {killed,St};
+ false -> {used,St}
+ end;
+check_liveness_block_3(R, Lbl, _FA, St0) ->
check_liveness_at(R, Lbl, St0).
index_labels_1([{label,Lbl}|Is0], Acc) ->
@@ -643,22 +776,70 @@ index_labels_1([], Acc) -> gb_trees:from_orddict(sort(Acc)).
drop_labels([{label,_}|Is]) -> drop_labels(Is);
drop_labels(Is) -> Is.
-%% Help functions for combine_heap_needs.
-combine_alloc_lists(Al1, Al2) ->
- combine_alloc_lists_1(sort(Al1++Al2)).
+replace_labels_1([{test,Test,{f,Lbl},Ops}|Is], Acc, D, Fb) ->
+ replace_labels_1(Is, [{test,Test,{f,label(Lbl, D, Fb)},Ops}|Acc], D, Fb);
+replace_labels_1([{test,Test,{f,Lbl},Live,Ops,Dst}|Is], Acc, D, Fb) ->
+ replace_labels_1(Is, [{test,Test,{f,label(Lbl, D, Fb)},Live,Ops,Dst}|Acc], D, Fb);
+replace_labels_1([{select,I,R,{f,Fail0},Vls0}|Is], Acc, D, Fb) ->
+ Vls = map(fun ({f,L}) -> {f,label(L, D, Fb)};
+ (Other) -> Other
+ end, Vls0),
+ Fail = label(Fail0, D, Fb),
+ replace_labels_1(Is, [{select,I,R,{f,Fail},Vls}|Acc], D, Fb);
+replace_labels_1([{'try',R,{f,Lbl}}|Is], Acc, D, Fb) ->
+ replace_labels_1(Is, [{'try',R,{f,label(Lbl, D, Fb)}}|Acc], D, Fb);
+replace_labels_1([{'catch',R,{f,Lbl}}|Is], Acc, D, Fb) ->
+ replace_labels_1(Is, [{'catch',R,{f,label(Lbl, D, Fb)}}|Acc], D, Fb);
+replace_labels_1([{jump,{f,Lbl}}|Is], Acc, D, Fb) ->
+ replace_labels_1(Is, [{jump,{f,label(Lbl, D, Fb)}}|Acc], D, Fb);
+replace_labels_1([{loop_rec,{f,Lbl},R}|Is], Acc, D, Fb) ->
+ replace_labels_1(Is, [{loop_rec,{f,label(Lbl, D, Fb)},R}|Acc], D, Fb);
+replace_labels_1([{loop_rec_end,{f,Lbl}}|Is], Acc, D, Fb) ->
+ replace_labels_1(Is, [{loop_rec_end,{f,label(Lbl, D, Fb)}}|Acc], D, Fb);
+replace_labels_1([{wait,{f,Lbl}}|Is], Acc, D, Fb) ->
+ replace_labels_1(Is, [{wait,{f,label(Lbl, D, Fb)}}|Acc], D, Fb);
+replace_labels_1([{wait_timeout,{f,Lbl},To}|Is], Acc, D, Fb) ->
+ replace_labels_1(Is, [{wait_timeout,{f,label(Lbl, D, Fb)},To}|Acc], D, Fb);
+replace_labels_1([{bif,Name,{f,Lbl},As,R}|Is], Acc, D, Fb) when Lbl =/= 0 ->
+ replace_labels_1(Is, [{bif,Name,{f,label(Lbl, D, Fb)},As,R}|Acc], D, Fb);
+replace_labels_1([{gc_bif,Name,{f,Lbl},Live,As,R}|Is], Acc, D, Fb) when Lbl =/= 0 ->
+ replace_labels_1(Is, [{gc_bif,Name,{f,label(Lbl, D, Fb)},Live,As,R}|Acc], D, Fb);
+replace_labels_1([{call,Ar,{f,Lbl}}|Is], Acc, D, Fb) ->
+ replace_labels_1(Is, [{call,Ar,{f,label(Lbl, D, Fb)}}|Acc], D, Fb);
+replace_labels_1([{make_fun2,{f,Lbl},U1,U2,U3}|Is], Acc, D, Fb) ->
+ replace_labels_1(Is, [{make_fun2,{f,label(Lbl, D, Fb)},U1,U2,U3}|Acc], D, Fb);
+replace_labels_1([{bs_init,{f,Lbl},Info,Live,Ss,Dst}|Is], Acc, D, Fb) when Lbl =/= 0 ->
+ replace_labels_1(Is, [{bs_init,{f,label(Lbl, D, Fb)},Info,Live,Ss,Dst}|Acc], D, Fb);
+replace_labels_1([{bs_put,{f,Lbl},Info,Ss}|Is], Acc, D, Fb) when Lbl =/= 0 ->
+ replace_labels_1(Is, [{bs_put,{f,label(Lbl, D, Fb)},Info,Ss}|Acc], D, Fb);
+replace_labels_1([{put_map=I,{f,Lbl},Op,Src,Dst,Live,List}|Is], Acc, D, Fb)
+ when Lbl =/= 0 ->
+ replace_labels_1(Is, [{I,{f,label(Lbl, D, Fb)},Op,Src,Dst,Live,List}|Acc], D, Fb);
+replace_labels_1([{get_map_elements=I,{f,Lbl},Src,List}|Is], Acc, D, Fb) when Lbl =/= 0 ->
+ replace_labels_1(Is, [{I,{f,label(Lbl, D, Fb)},Src,List}|Acc], D, Fb);
+replace_labels_1([I|Is], Acc, D, Fb) ->
+ replace_labels_1(Is, [I|Acc], D, Fb);
+replace_labels_1([], Acc, _, _) -> Acc.
+
+label(Old, D, Fb) ->
+ case D of
+ #{Old := New} -> New;
+ _ -> Fb(Old)
+ end.
+
+%% Help function for combine_heap_needs.
-combine_alloc_lists_1([{words,W1},{words,W2}|T])
- when is_integer(W1), is_integer(W2) ->
- [{words,W1+W2}|combine_alloc_lists_1(T)];
-combine_alloc_lists_1([{floats,F1},{floats,F2}|T])
- when is_integer(F1), is_integer(F2) ->
- [{floats,F1+F2}|combine_alloc_lists_1(T)];
-combine_alloc_lists_1([{words,_}=W|T]) ->
- [W|combine_alloc_lists_1(T)];
-combine_alloc_lists_1([{floats,_}=F|T]) ->
- [F|combine_alloc_lists_1(T)];
-combine_alloc_lists_1([]) -> [].
+combine_alloc_lists(Al0) ->
+ Al1 = flatmap(fun(Words) when is_integer(Words) ->
+ [{words,Words}];
+ ({alloc,List}) ->
+ List
+ end, Al0),
+ Al2 = sofs:relation(Al1),
+ Al3 = sofs:relation_to_family(Al2),
+ Al4 = sofs:to_external(Al3),
+ [{Tag,lists:sum(L)} || {Tag,L} <- Al4].
%% live_opt/4.
@@ -697,10 +878,14 @@ live_opt([{test,bs_start_match2,Fail,Live,[Src,_],_}=I|Is], _, D, Acc) ->
%% Other instructions.
live_opt([{block,Bl0}|Is], Regs0, D, Acc) ->
- Live0 = {'%live',live_regs(Regs0),Regs0},
+ Live0 = make_anno({used,Regs0}),
{Bl,Regs} = live_opt_block(reverse(Bl0), Regs0, D, [Live0]),
- Live = {'%live',live_regs(Regs),Regs},
+ Live = make_anno({used,Regs}),
live_opt(Is, Regs, D, [{block,[Live|Bl]}|Acc]);
+live_opt([build_stacktrace=I|Is], _, D, Acc) ->
+ live_opt(Is, live_call(1), D, [I|Acc]);
+live_opt([raw_raise=I|Is], _, D, Acc) ->
+ live_opt(Is, live_call(3), D, [I|Acc]);
live_opt([{label,L}=I|Is], Regs, D0, Acc) ->
D = gb_trees:insert(L, Regs, D0),
live_opt(Is, Regs, D, [I|Acc]);
@@ -742,12 +927,19 @@ live_opt([{test,_,Fail,Live,Ss,_}=I|Is], _, D, Acc) ->
Regs1 = x_live(Ss, Regs0),
Regs = live_join_label(Fail, D, Regs1),
live_opt(Is, Regs, D, [I|Acc]);
-live_opt([{select,_,Src,Fail,List}=I|Is], Regs0, D, Acc) ->
+live_opt([{select,_,Src,Fail,List}=I|Is], _, D, Acc) ->
+ Regs0 = 0,
Regs1 = x_live([Src], Regs0),
Regs = live_join_labels([Fail|List], D, Regs1),
live_opt(Is, Regs, D, [I|Acc]);
-live_opt([{try_case,_}=I|Is], _, D, Acc) ->
- live_opt(Is, live_call(1), D, [I|Acc]);
+live_opt([{try_case,Y}=I|Is], Regs0, D, Acc) ->
+ Regs = live_call(1),
+ case Regs0 of
+ 0 ->
+ live_opt(Is, Regs, D, [{try_end,Y}|Acc]);
+ _ ->
+ live_opt(Is, live_call(1), D, [I|Acc])
+ end;
live_opt([{loop_rec,_Fail,_Dst}=I|Is], _, D, Acc) ->
live_opt(Is, 0, D, [I|Acc]);
live_opt([timeout=I|Is], _, D, Acc) ->
@@ -759,6 +951,25 @@ live_opt([{get_map_elements,Fail,Src,{list,List}}=I|Is], Regs0, D, Acc) ->
Regs1 = x_live([Src|Ss], x_dead(Ds, Regs0)),
Regs = live_join_label(Fail, D, Regs1),
live_opt(Is, Regs, D, [I|Acc]);
+live_opt([{gc_bif,N,F,R,As,Dst}=I|Is], Regs0, D, Acc) ->
+ Bl = [{set,[Dst],As,{alloc,R,{gc_bif,N,F}}}],
+ {_,Regs} = live_opt_block(Bl, Regs0, D, []),
+ live_opt(Is, Regs, D, [I|Acc]);
+live_opt([{bif,N,F,As,Dst}=I|Is], Regs0, D, Acc) ->
+ Bl = [{set,[Dst],As,{bif,N,F}}],
+ {_,Regs} = live_opt_block(Bl, Regs0, D, []),
+ live_opt(Is, Regs, D, [I|Acc]);
+live_opt([{get_tuple_element,Src,Idx,Dst}=I|Is], Regs0, D, Acc) ->
+ Bl = [{set,[Dst],[Src],{get_tuple_element,Idx}}],
+ {_,Regs} = live_opt_block(Bl, Regs0, D, []),
+ live_opt(Is, Regs, D, [I|Acc]);
+live_opt([{move,Src,Dst}=I|Is], Regs0, D, Acc) ->
+ Regs = x_live([Src], x_dead([Dst], Regs0)),
+ live_opt(Is, Regs, D, [I|Acc]);
+live_opt([{put_map,F,Op,S,Dst,R,{list,Puts}}=I|Is], Regs0, D, Acc) ->
+ Bl = [{set,[Dst],[S|Puts],{alloc,R,{put_map,Op,F}}}],
+ {_,Regs} = live_opt_block(Bl, Regs0, D, []),
+ live_opt(Is, Regs, D, [I|Acc]);
%% Transparent instructions - they neither use nor modify x registers.
live_opt([{deallocate,_}=I|Is], Regs, D, Acc) ->
@@ -775,6 +986,10 @@ live_opt([{wait_timeout,_,{Tag,_}}=I|Is], Regs, D, Acc) when Tag =/= x ->
live_opt(Is, Regs, D, [I|Acc]);
live_opt([{line,_}=I|Is], Regs, D, Acc) ->
live_opt(Is, Regs, D, [I|Acc]);
+live_opt([{'catch',_,_}=I|Is], Regs, D, Acc) ->
+ live_opt(Is, Regs, D, [I|Acc]);
+live_opt([{'try',_,_}=I|Is], Regs, D, Acc) ->
+ live_opt(Is, Regs, D, [I|Acc]);
%% The following instructions can occur if the "compilation" has been
%% started from a .S file using the 'from_asm' option.
@@ -805,7 +1020,7 @@ live_opt_block([{set,Ds,Ss,Op0}|Is], Regs0, D, Acc) ->
_ ->
live_opt_block(Is, Regs, D, [I|Acc])
end;
-live_opt_block([{'%live',_,_}|Is], Regs, D, Acc) ->
+live_opt_block([{'%anno',_}|Is], Regs, D, Acc) ->
live_opt_block(Is, Regs, D, Acc);
live_opt_block([], Regs, _, Acc) -> {Acc,Regs}.
@@ -865,3 +1080,220 @@ split_even([], Ss, Ds) ->
{reverse(Ss),reverse(Ds)};
split_even([S,D|Rs], Ss, Ds) ->
split_even(Rs, [S|Ss], [D|Ds]).
+
+%%%
+%%% Add annotations for defined registers.
+%%%
+%%% This analysis is done by scanning the instructions in
+%%% execution order.
+%%%
+
+defs([{apply,_}=I|Is], _Regs, D) ->
+ [I|defs(Is, 1, D)];
+defs([{bif,_,{f,Fail},_Src,Dst}=I|Is], Regs0, D) ->
+ Regs = def_regs([Dst], Regs0),
+ [I|defs(Is, Regs, update_regs(Fail, Regs0, D))];
+defs([{block,Block0}|Is], Regs0, D0) ->
+ {Block,Regs,D} = defs_list(Block0, Regs0, D0),
+ [{block,[make_anno({def,Regs0})|Block]}|defs(Is, Regs, D)];
+defs([{bs_init,{f,L},_,_,_,Dst}=I|Is], Regs0, D) ->
+ Regs = def_regs([Dst], Regs0),
+ [I|defs(Is, Regs, update_regs(L, Regs, D))];
+defs([{bs_put,{f,L},_,_}=I|Is], Regs, D) ->
+ [I|defs(Is, Regs, update_regs(L, Regs, D))];
+defs([build_stacktrace=I|Is], _Regs, D) ->
+ [I|defs(Is, 1, D)];
+defs([{call,_,_}=I|Is], _Regs, D) ->
+ [I|defs(Is, 1, D)];
+defs([{call_ext,_,{extfunc,M,F,A}}=I|Is], _Regs, D) ->
+ case erl_bifs:is_exit_bif(M, F, A) of
+ false ->
+ [I|defs(Is, 1, D)];
+ true ->
+ [I|defs_unreachable(Is, D)]
+ end;
+defs([{call_ext,_,_}=I|Is], _Regs, D) ->
+ [I|defs(Is, 1, D)];
+defs([{call_fun,_}=I|Is], _Regs, D) ->
+ [I|defs(Is, 1, D)];
+defs([{'catch',_,{f,L}}=I|Is], Regs, D) ->
+ RegsAtLabel = init_def_regs(1),
+ [I|defs(Is, Regs, update_regs(L, RegsAtLabel, D))];
+defs([{catch_end,_}=I|Is], _Regs, D) ->
+ Regs = init_def_regs(1),
+ [I|defs(Is, Regs, D)];
+defs([{gc_bif,_,{f,Fail},Live,_Src,Dst}=I|Is], Regs0, D) ->
+ true = all_defined(Live, Regs0), %Assertion.
+ Regs = def_regs([Dst], init_def_regs(Live)),
+ [I|defs(Is, Regs, update_regs(Fail, Regs0, D))];
+defs([{get_map_elements,{f,L},_Src,{list,DstList}}=I|Is], Regs0, D) ->
+ {_,Ds} = beam_utils:split_even(DstList),
+ Regs = def_regs(Ds, Regs0),
+ [I|defs(Is, Regs, update_regs(L, Regs0, D))];
+defs([{get_tuple_element,_,_,Dst}=I|Is], Regs0, D) ->
+ Regs = def_regs([Dst], Regs0),
+ [I|defs(Is, Regs, D)];
+defs([{jump,{f,L}}=I|Is], Regs, D) ->
+ [I|defs_unreachable(Is, update_regs(L, Regs, D))];
+defs([{label,L}=I|Is], Regs0, D) ->
+ case D of
+ #{L:=Regs1} ->
+ Regs = Regs0 band Regs1,
+ [I|defs(Is, Regs, D)];
+ #{} ->
+ [I|defs(Is, Regs0, D)]
+ end;
+defs([{loop_rec,{f,L},{x,0}}=I|Is], _Regs, D0) ->
+ RegsAtLabel = init_def_regs(0),
+ D = update_regs(L, RegsAtLabel, D0),
+ [I|defs(Is, init_def_regs(1), D)];
+defs([{loop_rec_end,_}=I|Is], _Regs, D) ->
+ [I|defs(Is, 0, D)];
+defs([{make_fun2,_,_,_,_}=I|Is], _Regs, D) ->
+ [I|defs(Is, 1, D)];
+defs([{move,_,Dst}=I|Is], Regs0, D) ->
+ Regs = def_regs([Dst], Regs0),
+ [I|defs(Is, Regs, D)];
+defs([{put_map,{f,Fail},_,_,Dst,_,_}=I|Is], Regs0, D) ->
+ Regs = def_regs([Dst], Regs0),
+ [I|defs(Is, Regs, update_regs(Fail, Regs0, D))];
+defs([raw_raise=I|Is], _Regs, D) ->
+ [I|defs(Is, 1, D)];
+defs([return=I|Is], _Regs, D) ->
+ [I|defs_unreachable(Is, D)];
+defs([{select,_,_Src,Fail,List}=I|Is], Regs, D0) ->
+ D = update_list([Fail|List], Regs, D0),
+ [I|defs_unreachable(Is, D)];
+defs([{test,_,{f,L},_}=I|Is], Regs, D) ->
+ [I|defs(Is, Regs, update_regs(L, Regs, D))];
+defs([{test,_,{f,L},Live,_,Dst}=I|Is], Regs0, D) ->
+ true = all_defined(Live, Regs0), %Assertion.
+ Regs = def_regs([Dst], init_def_regs(Live)),
+ [I|defs(Is, Regs, update_regs(L, Regs0, D))];
+defs([{'try',_,{f,L}}=I|Is], Regs, D) ->
+ RegsAtLabel = init_def_regs(3),
+ [I|defs(Is, Regs, update_regs(L, RegsAtLabel, D))];
+defs([{try_case,_}=I|Is], _Regs, D) ->
+ [I|defs(Is, init_def_regs(3), D)];
+defs([{wait,_}=I|Is], _Regs, D) ->
+ [I|defs_unreachable(Is, D)];
+defs([{wait_timeout,_,_}=I|Is], _Regs, D) ->
+ [I|defs(Is, 0, D)];
+
+%% Exceptions.
+defs([{badmatch,_}=I|Is], _Regs, D) ->
+ [I|defs_unreachable(Is, D)];
+defs([{case_end,_}=I|Is], _Regs, D) ->
+ [I|defs_unreachable(Is, D)];
+defs([if_end=I|Is], _Regs, D) ->
+ [I|defs_unreachable(Is, D)];
+defs([{try_case_end,_}=I|Is], _Regs, D) ->
+ [I|defs_unreachable(Is, D)];
+
+%% Neutral instructions
+defs([{bs_context_to_binary,_}=I|Is], Regs, D) ->
+ [I|defs(Is, Regs, D)];
+defs([{bs_restore2,_,_}=I|Is], Regs, D) ->
+ [I|defs(Is, Regs, D)];
+defs([{bs_save2,_,_}=I|Is], Regs, D) ->
+ [I|defs(Is, Regs, D)];
+defs([{deallocate,_}=I|Is], Regs, D) ->
+ [I|defs(Is, Regs, D)];
+defs([{kill,_}=I|Is], Regs, D) ->
+ [I|defs(Is, Regs, D)];
+defs([{line,_}=I|Is], Regs, D) ->
+ [I|defs(Is, Regs, D)];
+defs([{recv_mark,_}=I|Is], Regs, D) ->
+ [I|defs(Is, Regs, D)];
+defs([{recv_set,_}=I|Is], Regs, D) ->
+ [I|defs(Is, Regs, D)];
+defs([timeout=I|Is], Regs, D) ->
+ [I|defs(Is, Regs, D)];
+defs([{trim,_,_}=I|Is], Regs, D) ->
+ [I|defs(Is, Regs, D)];
+defs([{try_end,_}=I|Is], Regs, D) ->
+ [I|defs(Is, Regs, D)];
+defs([{'%',_}=I|Is], Regs, D) ->
+ [I|defs(Is, Regs, D)];
+defs([], _, _) -> [].
+
+defs_unreachable([{label,L}=I|Is], D) ->
+ case D of
+ #{L:=Regs} ->
+ [I|defs(Is, Regs, D)];
+ #{} ->
+ defs_unreachable(Is, D)
+ end;
+defs_unreachable([_|Is], D) ->
+ defs_unreachable(Is, D);
+defs_unreachable([], _D) -> [].
+
+defs_list(Is, Regs, D) ->
+ defs_list(Is, Regs, D, []).
+
+defs_list([{set,Ds,_,{alloc,Live,Info}}=I|Is], Regs0, D0, Acc) ->
+ true = all_defined(Live, Regs0), %Assertion.
+ D = case Info of
+ {gc_bif,_,{f,Fail}} ->
+ update_regs(Fail, Regs0, D0);
+ {put_map,_,{f,Fail}} ->
+ update_regs(Fail, Regs0, D0);
+ _ ->
+ D0
+ end,
+ Regs = def_regs(Ds, init_def_regs(Live)),
+ defs_list(Is, Regs, D, [I|Acc]);
+defs_list([{set,Ds,_,Info}=I|Is], Regs0, D0, Acc) ->
+ D = case Info of
+ {bif,_,{f,Fail}} ->
+ update_regs(Fail, Regs0, D0);
+ {try_catch,'catch',{f,Fail}} ->
+ update_regs(Fail, init_def_regs(1), D0);
+ {try_catch,'try',{f,Fail}} ->
+ update_regs(Fail, init_def_regs(3), D0);
+ _ ->
+ D0
+ end,
+ Regs = def_regs(Ds, Regs0),
+ defs_list(Is, Regs, D, [I|Acc]);
+defs_list([], Regs, D, Acc) ->
+ {reverse(Acc),Regs,D}.
+
+init_def_regs(Arity) ->
+ (1 bsl Arity) - 1.
+
+def_regs([{x,X}|T], Regs) ->
+ def_regs(T, Regs bor (1 bsl X));
+def_regs([_|T], Regs) ->
+ def_regs(T, Regs);
+def_regs([], Regs) -> Regs.
+
+update_list([{f,L}|T], Regs, D0) ->
+ D = update_regs(L, Regs, D0),
+ update_list(T, Regs, D);
+update_list([_|T], Regs, D) ->
+ update_list(T, Regs, D);
+update_list([], _Regs, D) -> D.
+
+update_regs(L, Regs0, D) ->
+ case D of
+ #{L:=Regs1} ->
+ Regs = Regs0 band Regs1,
+ D#{L:=Regs};
+ #{} ->
+ D#{L=>Regs0}
+ end.
+
+all_defined(Live, Regs) ->
+ All = (1 bsl Live) - 1,
+ Regs band All =:= All.
+
+%%%
+%%% Utilities.
+%%%
+
+%% make_anno(Anno) -> WrappedAnno.
+%% Wrap an annotation term.
+
+make_anno(Anno) ->
+ {'%anno',Anno}.
diff --git a/lib/compiler/src/beam_validator.erl b/lib/compiler/src/beam_validator.erl
index ea38969814..9de773542e 100644
--- a/lib/compiler/src/beam_validator.erl
+++ b/lib/compiler/src/beam_validator.erl
@@ -85,8 +85,6 @@ format_error(Error) ->
%%% Things currently not checked. XXX
%%%
%%% - Heap allocation for binaries.
-%%% - That put_tuple is followed by the correct number of
-%%% put instructions.
%%%
%% validate(Module, [Function]) -> [] | [Error]
@@ -130,9 +128,8 @@ validate_0(Module, [{function,Name,Ar,Entry,Code}|Fs], Ft) ->
throw:Error ->
%% Controlled error.
[Error|validate_0(Module, Fs, Ft)];
- Class:Error ->
+ Class:Error:Stack ->
%% Crash.
- Stack = erlang:get_stacktrace(),
io:fwrite("Function: ~w/~w\n", [Name,Ar]),
erlang:raise(Class, Error, Stack)
end.
@@ -149,7 +146,8 @@ validate_0(Module, [{function,Name,Ar,Entry,Code}|Fs], Ft) ->
hf=0, %Available heap size for floats.
fls=undefined, %Floating point state.
ct=[], %List of hot catch/try labels
- setelem=false %Previous instruction was setelement/3.
+ setelem=false, %Previous instruction was setelement/3.
+ puts_left=none %put/1 instructions left.
}).
-type label() :: integer().
@@ -294,6 +292,8 @@ valfun_1({bs_context_to_binary,Ctx}, #vst{current=#st{x=Xs}}=Vst) ->
end;
valfun_1(bs_init_writable=I, Vst) ->
call(I, 1, Vst);
+valfun_1(build_stacktrace=I, Vst) ->
+ call(I, 1, Vst);
valfun_1({move,{y,_}=Src,{y,_}=Dst}, Vst) ->
%% The stack trimming optimization may generate a move from an initialized
%% but unassigned Y register to another Y register.
@@ -339,11 +339,25 @@ valfun_1({put_list,A,B,Dst}, Vst0) ->
Vst = eat_heap(2, Vst0),
set_type_reg(cons, Dst, Vst);
valfun_1({put_tuple,Sz,Dst}, Vst0) when is_integer(Sz) ->
+ Vst1 = eat_heap(1, Vst0),
+ Vst = set_type_reg(tuple_in_progress, Dst, Vst1),
+ #vst{current=St0} = Vst,
+ St = St0#st{puts_left={Sz,{Dst,{tuple,Sz}}}},
+ Vst#vst{current=St};
+valfun_1({put,Src}, Vst0) ->
+ assert_term(Src, Vst0),
Vst = eat_heap(1, Vst0),
- set_type_reg({tuple,Sz}, Dst, Vst);
-valfun_1({put,Src}, Vst) ->
- assert_term(Src, Vst),
- eat_heap(1, Vst);
+ #vst{current=St0} = Vst,
+ case St0 of
+ #st{puts_left=none} ->
+ error(not_building_a_tuple);
+ #st{puts_left={1,{Dst,Type}}} ->
+ St = St0#st{puts_left=none},
+ set_type_reg(Type, Dst, Vst#vst{current=St});
+ #st{puts_left={PutsLeft,Info}} when is_integer(PutsLeft) ->
+ St = St0#st{puts_left={PutsLeft-1,Info}},
+ Vst#vst{current=St}
+ end;
%% Instructions for optimization of selective receives.
valfun_1({recv_mark,{f,Fail}}, Vst) when is_integer(Fail) ->
Vst;
@@ -523,6 +537,8 @@ valfun_4({bif,element,{f,Fail},[Pos,Tuple],Dst}, Vst0) ->
valfun_4({bif,raise,{f,0},Src,_Dst}, Vst) ->
validate_src(Src, Vst),
kill_state(Vst);
+valfun_4(raw_raise=I, Vst) ->
+ call(I, 3, Vst);
valfun_4({bif,Op,{f,Fail},Src,Dst}, Vst0) ->
validate_src(Src, Vst0),
Vst = branch_state(Fail, Vst0),
@@ -575,6 +591,12 @@ valfun_4({get_list,Src,D1,D2}, Vst0) ->
assert_type(cons, Src, Vst0),
Vst = set_type_reg(term, D1, Vst0),
set_type_reg(term, D2, Vst);
+valfun_4({get_hd,Src,Dst}, Vst) ->
+ assert_type(cons, Src, Vst),
+ set_type_reg(term, Dst, Vst);
+valfun_4({get_tl,Src,Dst}, Vst) ->
+ assert_type(cons, Src, Vst),
+ set_type_reg(term, Dst, Vst);
valfun_4({get_tuple_element,Src,I,Dst}, Vst) ->
assert_type({tuple_element,I+1}, Src, Vst),
set_type_reg(term, Dst, Vst);
@@ -1271,6 +1293,7 @@ get_move_term_type(Src, Vst) ->
initialized -> error({unassigned,Src});
{catchtag,_} -> error({catchtag,Src});
{trytag,_} -> error({trytag,Src});
+ tuple_in_progress -> error({tuple_in_progress,Src});
Type -> Type
end.
@@ -1279,10 +1302,7 @@ get_move_term_type(Src, Vst) ->
%% a standard Erlang type (no catch/try tags or match contexts).
get_term_type(Src, Vst) ->
- case get_term_type_1(Src, Vst) of
- initialized -> error({unassigned,Src});
- {catchtag,_} -> error({catchtag,Src});
- {trytag,_} -> error({trytag,Src});
+ case get_move_term_type(Src, Vst) of
#ms{} -> error({match_context,Src});
Type -> Type
end.
diff --git a/lib/compiler/src/beam_z.erl b/lib/compiler/src/beam_z.erl
index 787e33c142..6c3a6995d7 100644
--- a/lib/compiler/src/beam_z.erl
+++ b/lib/compiler/src/beam_z.erl
@@ -24,22 +24,23 @@
-export([module/2]).
--import(lists, [dropwhile/2]).
+-import(lists, [dropwhile/2,map/2]).
-spec module(beam_utils:module_code(), [compile:option()]) ->
{'ok',beam_asm:module_code()}.
-module({Mod,Exp,Attr,Fs0,Lc}, _Opt) ->
- Fs = [function(F) || F <- Fs0],
+module({Mod,Exp,Attr,Fs0,Lc}, Opts) ->
+ NoGetHdTl = proplists:get_bool(no_get_hd_tl, Opts),
+ Fs = [function(F, NoGetHdTl) || F <- Fs0],
{ok,{Mod,Exp,Attr,Fs,Lc}}.
-function({function,Name,Arity,CLabel,Is0}) ->
+function({function,Name,Arity,CLabel,Is0}, NoGetHdTl) ->
try
- Is = undo_renames(Is0),
+ Is1 = undo_renames(Is0),
+ Is = maybe_eliminate_get_hd_tl(Is1, NoGetHdTl),
{function,Name,Arity,CLabel,Is}
catch
- Class:Error ->
- Stack = erlang:get_stacktrace(),
+ Class:Error:Stack ->
io:fwrite("Function: ~w/~w\n", [Name,Arity]),
erlang:raise(Class, Error, Stack)
end.
@@ -66,6 +67,10 @@ undo_renames([{bif,raise,_,_,_}=I|Is0]) ->
(_) -> true
end, Is0),
[I|undo_renames(Is)];
+undo_renames([{get_hd,Src,Dst1},{get_tl,Src,Dst2}|Is]) ->
+ [{get_list,Src,Dst1,Dst2}|undo_renames(Is)];
+undo_renames([{get_tl,Src,Dst2},{get_hd,Src,Dst1}|Is]) ->
+ [{get_list,Src,Dst1,Dst2}|undo_renames(Is)];
undo_renames([I|Is]) ->
[undo_rename(I)|undo_renames(Is)];
undo_renames([]) -> [].
@@ -108,3 +113,17 @@ undo_rename({get_map_elements,Fail,Src,{list,List}}) ->
undo_rename({select,I,Reg,Fail,List}) ->
{I,Reg,Fail,{list,List}};
undo_rename(I) -> I.
+
+%%%
+%%% Eliminate get_hd/get_tl instructions if requested by
+%%% the no_get_hd_tl option.
+%%%
+
+maybe_eliminate_get_hd_tl(Is, true) ->
+ map(fun({get_hd,Cons,Hd}) ->
+ {get_list,Cons,Hd,{x,1022}};
+ ({get_tl,Cons,Tl}) ->
+ {get_list,Cons,{x,1022},Tl};
+ (I) -> I
+ end, Is);
+maybe_eliminate_get_hd_tl(Is, false) -> Is.
diff --git a/lib/compiler/src/cerl_clauses.erl b/lib/compiler/src/cerl_clauses.erl
index 7d6518c3c6..fa5104c01b 100644
--- a/lib/compiler/src/cerl_clauses.erl
+++ b/lib/compiler/src/cerl_clauses.erl
@@ -353,6 +353,8 @@ match(P, E, Bs) ->
map ->
%% The most we can do is to say "definitely no match" if a
%% map pattern is matched against non-map data.
+ %% (Note: See the document internal_doc/cerl-notes.md for
+ %% information why we don't try to do more here.)
case E of
any ->
{false, Bs};
diff --git a/lib/compiler/src/compile.erl b/lib/compiler/src/compile.erl
index 50b0ba76f8..c6a0056a70 100644
--- a/lib/compiler/src/compile.erl
+++ b/lib/compiler/src/compile.erl
@@ -203,7 +203,12 @@ expand_opts(Opts0) ->
{_,_,undefined} -> [debug_info|Opts0];
{_,_,_} -> Opts0
end,
- foldr(fun expand_opt/2, [], Opts).
+ %% iff,unless processing is to complex...
+ Opts1 = case proplists:is_defined(makedep_side_effect,Opts) of
+ true -> proplists:delete(makedep,Opts);
+ false -> Opts
+ end,
+ foldr(fun expand_opt/2, [], Opts1).
expand_opt(basic_validation, Os) ->
[no_code_generation,to_pp,binary|Os];
@@ -214,13 +219,15 @@ expand_opt(report, Os) ->
expand_opt(return, Os) ->
[return_errors,return_warnings|Os];
expand_opt(r16, Os) ->
- [no_record_opt,no_utf8_atoms|Os];
+ [no_get_hd_tl,no_record_opt,no_utf8_atoms|Os];
expand_opt(r17, Os) ->
- [no_record_opt,no_utf8_atoms|Os];
+ [no_get_hd_tl,no_record_opt,no_utf8_atoms|Os];
expand_opt(r18, Os) ->
- [no_record_opt,no_utf8_atoms|Os];
+ [no_get_hd_tl,no_record_opt,no_utf8_atoms|Os];
expand_opt(r19, Os) ->
- [no_record_opt,no_utf8_atoms|Os];
+ [no_get_hd_tl,no_record_opt,no_utf8_atoms|Os];
+expand_opt(r20, Os) ->
+ [no_get_hd_tl,no_record_opt,no_utf8_atoms|Os];
expand_opt({debug_info_key,_}=O, Os) ->
[encrypt_debug_info,O|Os];
expand_opt(no_float_opt, Os) ->
@@ -674,6 +681,7 @@ select_list_passes_1([], _, Acc) ->
standard_passes() ->
[?pass(transform_module),
+ {iff,makedep_side_effect,?pass(makedep_and_output)},
{iff,makedep,[
?pass(makedep),
{unless,binary,?pass(makedep_output)}
@@ -706,14 +714,16 @@ core_passes() ->
[{unless,no_copt,
[{core_old_inliner,fun test_old_inliner/1,fun core_old_inliner/2},
{iff,doldinline,{listing,"oldinline"}},
- {pass,sys_core_fold},
+ {unless,no_fold,{pass,sys_core_fold}},
{iff,dcorefold,{listing,"corefold"}},
{core_inline_module,fun test_core_inliner/1,fun core_inline_module/2},
{iff,dinline,{listing,"inline"}},
{core_fold_after_inlining,fun test_any_inliner/1,
fun core_fold_module_after_inlining/2},
+ {iff,dcopt,{listing,"copt"}},
+ {unless,no_alias,{pass,sys_core_alias}},
+ {iff,dalias,{listing,"core_alias"}},
?pass(core_transforms)]},
- {iff,dcopt,{listing,"copt"}},
{iff,'to_core',{done,"core"}}]}
| kernel_passes()].
@@ -731,8 +741,6 @@ kernel_passes() ->
?pass(v3_kernel),
{iff,dkern,{listing,"kernel"}},
{iff,'to_kernel',{done,"kernel"}},
- {pass,v3_life},
- {iff,dlife,{listing,"life"}},
{pass,v3_codegen},
{iff,dcg,{listing,"codegen"}}
| asm_passes()].
@@ -769,6 +777,8 @@ asm_passes() ->
{iff,drecv,{listing,"recv"}},
{unless,no_record_opt,{pass,beam_record}},
{iff,drecord,{listing,"record"}},
+ {unless,no_blk2,?pass(block2)},
+ {iff,dblk2,{listing,"block2"}},
{unless,no_stack_trimming,{pass,beam_trim}},
{iff,dtrim,{listing,"trim"}},
{pass,beam_flatten}]},
@@ -787,7 +797,7 @@ asm_passes() ->
| binary_passes()].
binary_passes() ->
- [{iff,'to_dis',{listing,"dis"}},
+ [{iff,'to_dis',?pass(to_dis)},
{native_compile,fun test_native/1,fun native_compile/2},
{unless,binary,?pass(save_binary,not_werror)}
].
@@ -1128,6 +1138,16 @@ core_lint_module(Code, St) ->
errors=St#compile.errors ++ Es}}
end.
+%% makedep + output and continue
+makedep_and_output(Code0, St) ->
+ {ok,DepCode,St1} = makedep(Code0,St),
+ case makedep_output(DepCode, St1) of
+ {ok,_IgnoreCode,St2} ->
+ {ok,Code0,St2};
+ {error,St2} ->
+ {error,St2}
+ end.
+
makedep(Code0, #compile{ifile=Ifile,ofile=Ofile,options=Opts}=St) ->
%% Get the target of the Makefile rule.
@@ -1334,6 +1354,10 @@ v3_kernel(Code0, #compile{options=Opts,warnings=Ws0}=St) ->
{ok,Code,St}
end.
+block2(Code0, #compile{options=Opts}=St) ->
+ {ok,Code} = beam_block:module(Code0, [no_blockify|Opts]),
+ {ok,Code,St}.
+
test_old_inliner(#compile{options=Opts}) ->
%% The point of this test is to avoid loading the old inliner
%% if we know that it will not be used.
@@ -1448,15 +1472,33 @@ save_core_code(Code, St) ->
beam_asm(Code0, #compile{ifile=File,extra_chunks=ExtraChunks,options=CompilerOpts}=St) ->
case debug_info(St) of
{ok,DebugInfo,Opts0} ->
- Source = paranoid_absname(File),
Opts1 = [O || O <- Opts0, effects_code_generation(O)],
Chunks = [{<<"Dbgi">>, DebugInfo} | ExtraChunks],
- {ok,Code} = beam_asm:module(Code0, Chunks, Source, Opts1, CompilerOpts),
+ CompileInfo = compile_info(File, CompilerOpts, Opts1),
+ {ok,Code} = beam_asm:module(Code0, Chunks, CompileInfo, CompilerOpts),
{ok,Code,St#compile{abstract_code=[]}};
{error,Es} ->
{error,St#compile{errors=St#compile.errors ++ [{File,Es}]}}
end.
+compile_info(File, CompilerOpts, Opts) ->
+ IsSlim = member(slim, CompilerOpts),
+ IsDeterministic = member(deterministic, CompilerOpts),
+ Info0 = proplists:get_value(compile_info, Opts, []),
+ Info1 =
+ case paranoid_absname(File) of
+ [_|_] = Source when not IsSlim, not IsDeterministic ->
+ [{source,Source} | Info0];
+ _ ->
+ Info0
+ end,
+ Info2 =
+ case IsDeterministic of
+ false -> [{options,proplists:delete(compile_info, Opts)} | Info1];
+ true -> Info1
+ end,
+ Info2.
+
paranoid_absname(""=File) ->
File;
paranoid_absname(File) ->
@@ -1512,15 +1554,14 @@ native_compile_1(Code, St) ->
{error,St#compile{errors=St#compile.errors ++ Es}}
end
catch
- Class:R ->
- Stk = erlang:get_stacktrace(),
+ Class:R:Stack ->
case IgnoreErrors of
true ->
Ws = [{St#compile.ifile,
- [{none,?MODULE,{native_crash,R,Stk}}]}],
+ [{none,?MODULE,{native_crash,R,Stack}}]}],
{ok,St#compile{warnings=St#compile.warnings ++ Ws}};
false ->
- erlang:raise(Class, R, Stk)
+ erlang:raise(Class, R, Stack)
end
end.
@@ -1748,6 +1789,21 @@ listing(LFun, Ext, Code, St) ->
{error,St#compile{errors=St#compile.errors ++ Es}}
end.
+to_dis(Code, #compile{module=Module,ofile=Outfile}=St) ->
+ Loaded = code:is_loaded(Module),
+ Sticky = code:is_sticky(Module),
+ _ = [code:unstick_mod(Module) || Sticky],
+
+ {module,Module} = code:load_binary(Module, "", Code),
+ DestDir = filename:dirname(Outfile),
+ DisFile = filename:join(DestDir, atom_to_list(Module) ++ ".dis"),
+ ok = erts_debug:dis_to_file(Module, DisFile),
+
+ %% Restore loaded module
+ _ = [{module, Module} = code:load_file(Module) || Loaded =/= false],
+ [code:stick_mod(Module) || Sticky],
+ {ok,Code,St}.
+
output_encoding(F, #compile{encoding = none}) ->
ok = io:setopts(F, [{encoding, epp:default_encoding()}]);
output_encoding(F, #compile{encoding = Encoding}) ->
@@ -1923,12 +1979,12 @@ pre_load() ->
erl_lint,
erl_parse,
erl_scan,
+ sys_core_alias,
sys_core_bsm,
sys_core_dsetel,
sys_core_fold,
v3_codegen,
v3_core,
- v3_kernel,
- v3_life],
+ v3_kernel],
_ = code:ensure_modules_loaded(L),
ok.
diff --git a/lib/compiler/src/compiler.app.src b/lib/compiler/src/compiler.app.src
index 3139d68902..cf32fd251c 100644
--- a/lib/compiler/src/compiler.app.src
+++ b/lib/compiler/src/compiler.app.src
@@ -58,6 +58,7 @@
core_lib,
erl_bifs,
rec_env,
+ sys_core_alias,
sys_core_bsm,
sys_core_dsetel,
sys_core_fold,
@@ -67,8 +68,7 @@
v3_codegen,
v3_core,
v3_kernel,
- v3_kernel_pp,
- v3_life
+ v3_kernel_pp
]},
{registered, []},
{applications, [kernel, stdlib]},
diff --git a/lib/compiler/src/core_lint.erl b/lib/compiler/src/core_lint.erl
index 7d3513c0ba..6e2114be56 100644
--- a/lib/compiler/src/core_lint.erl
+++ b/lib/compiler/src/core_lint.erl
@@ -353,12 +353,6 @@ expr(#c_case{arg=Arg,clauses=Cs}, Def, Rt, St0) ->
Pc = case_patcount(Cs),
St1 = body(Arg, Def, Pc, St0),
clauses(Cs, Def, Pc, Rt, St1);
-expr(#c_receive{clauses=Cs,timeout=#c_literal{val=infinity},
- action=#c_literal{}},
- Def, Rt, St) ->
- %% If the timeout is 'infinity', the after code can never
- %% be reached. We don't care if the return count is wrong.
- clauses(Cs, Def, 1, Rt, St);
expr(#c_receive{clauses=Cs,timeout=T,action=A}, Def, Rt, St0) ->
St1 = expr(T, Def, 1, St0),
St2 = body(A, Def, Rt, St1),
diff --git a/lib/compiler/src/core_pp.erl b/lib/compiler/src/core_pp.erl
index cff6c7098b..2516a9a1e1 100644
--- a/lib/compiler/src/core_pp.erl
+++ b/lib/compiler/src/core_pp.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 1999-2016. All Rights Reserved.
+%% Copyright Ericsson AB 1999-2017. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
@@ -464,7 +464,7 @@ indent(#ctxt{indent=N}) ->
N =< 0 ->
"";
true ->
- string:chars($\t, N div ?TAB_WIDTH, spaces(N rem ?TAB_WIDTH))
+ lists:duplicate(N div ?TAB_WIDTH, $\t) ++ spaces(N rem ?TAB_WIDTH)
end.
nl_indent(Ctxt) -> [$\n|indent(Ctxt)].
diff --git a/lib/compiler/src/core_scan.erl b/lib/compiler/src/core_scan.erl
index 9f0676538f..a50a2ffa8d 100644
--- a/lib/compiler/src/core_scan.erl
+++ b/lib/compiler/src/core_scan.erl
@@ -200,8 +200,8 @@ pre_string(eof, Q, _, Sp, SoFar, Pos) ->
pre_string_error(Q, Sp, SoFar, Pos).
pre_string_error(Q, Sp, SoFar, Pos) ->
- S = reverse(string:substr(SoFar, 1, string:chr(SoFar, Q)-1)),
- pre_error({string,Q,string:substr(S, 1, 16)}, Sp, Pos).
+ [S,_] = string:split(SoFar, [Q]),
+ pre_error({string,Q,string:slice(string:reverse(S), 0, 16)}, Sp, Pos).
pre_char([C|Cs], SoFar) -> pre_char(C, Cs, SoFar);
pre_char([], _) -> more;
diff --git a/lib/compiler/src/genop.tab b/lib/compiler/src/genop.tab
index b5688de339..a47d4e8cf7 100755
--- a/lib/compiler/src/genop.tab
+++ b/lib/compiler/src/genop.tab
@@ -545,3 +545,32 @@ BEAM_FORMAT_NUMBER=0
## Test the arity of Reg and jumps to Lbl if it is not N.
## Test the first element of the tuple and jumps to Lbl if it is not Atom.
159: is_tagged_tuple/4
+
+# OTP 21
+
+## @spec build_stacktrace
+## @doc Given the raw stacktrace in x(0), build a cooked stacktrace suitable
+## for human consumption. Store it in x(0). Destroys all other registers.
+## Do a garbage collection if necessary to allocate space on the heap
+## for the result.
+160: build_stacktrace/0
+
+## @spec raw_raise
+## @doc This instruction works like the erlang:raise/3 BIF, except that the
+## stacktrace in x(2) must be a raw stacktrace.
+## x(0) is the class of the exception (error, exit, or throw),
+## x(1) is the exception term, and x(2) is the raw stackframe.
+## If x(0) is not a valid class, the instruction will not throw an
+## exception, but store the atom 'badarg' in x(0) and execute the
+## next instruction.
+161: raw_raise/0
+
+## @spec get_hd Source Head
+## @doc Get the head (or car) part of a list (a cons cell) from Source and
+## put it into the register Head.
+162: get_hd/2
+
+## @spec get_tl Source Tail
+## @doc Get the tail (or cdr) part of a list (a cons cell) from Source and
+## put it into the register Tail.
+163: get_tl/2
diff --git a/lib/compiler/src/sys_core_alias.erl b/lib/compiler/src/sys_core_alias.erl
new file mode 100644
index 0000000000..1bce1577d1
--- /dev/null
+++ b/lib/compiler/src/sys_core_alias.erl
@@ -0,0 +1,307 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 1999-2016. All Rights Reserved.
+%%
+%% Licensed under the Apache License, Version 2.0 (the "License");
+%% you may not use this file except in compliance with the License.
+%% You may obtain a copy of the License at
+%%
+%% http://www.apache.org/licenses/LICENSE-2.0
+%%
+%% Unless required by applicable law or agreed to in writing, software
+%% distributed under the License is distributed on an "AS IS" BASIS,
+%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+%% See the License for the specific language governing permissions and
+%% limitations under the License.
+%%
+%% %CopyrightEnd%
+%%
+%% Purpose : Replace values by aliases from patterns optimisation for Core
+
+%% Replace expressions by aliases from patterns. For example:
+%%
+%% example({ok, Val}) ->
+%% {ok, Val}.
+%%
+%% will become:
+%%
+%% example({ok, Val} = Tuple) ->
+%% Tuple.
+%%
+%% Currently this pass aliases tuple and cons nodes made of literals,
+%% variables and other cons. The tuple/cons may appear anywhere in the
+%% pattern and it will be aliased if used later on.
+%%
+%% Notice a tuple/cons made only of literals is not aliased as it may
+%% be part of the literal pool.
+
+-module(sys_core_alias).
+
+-export([module/2]).
+
+-include("core_parse.hrl").
+
+-define(NOTSET, 0).
+
+-record(sub, {p=#{} :: #{term() => ?NOTSET | atom()}, %% Found pattern substitutions
+ v=cerl_sets:new() :: cerl_sets:set(cerl:var_name()), %% Variables used by patterns
+ t=undefined :: term()}). %% Temporary information from pre to post
+
+-type sub() :: #sub{}.
+
+-spec module(cerl:c_module(), [compile:option()]) ->
+ {'ok',cerl:c_module(),[]}.
+
+module(#c_module{defs=Ds0}=Mod, _Opts) ->
+ Ds1 = [def(D) || D <- Ds0],
+ {ok,Mod#c_module{defs=Ds1},[]}.
+
+def({#c_var{name={F,Arity}}=Name,B0}) ->
+ try
+ put(new_var_num, 0),
+ {B1,_} = cerl_trees:mapfold(fun pre/2, fun post/2, sub_new(undefined), B0),
+ erase(new_var_num),
+ {Name,B1}
+ catch
+ Class:Error:Stack ->
+ io:fwrite("Function: ~w/~w\n", [F,Arity]),
+ erlang:raise(Class, Error, Stack)
+ end.
+
+pre(#c_let{vars=Vars}=Node, Sub) ->
+ {Node,sub_fold(get_variables(Vars), Sub)};
+
+pre(#c_fun{vars=Vars}=Node, Sub) ->
+ {Node,sub_fold(get_variables(Vars), Sub)};
+
+pre(#c_clause{pats=Pats}=Node, Sub0) ->
+ VarNames = get_variables(Pats),
+ Sub1 = sub_fold(VarNames, Sub0),
+ Keys = get_pattern_keys(Pats),
+ Sub2 = sub_add_keys(Keys, Sub1),
+
+ #sub{v=SubNames,t=Temp} = Sub2,
+ Sub3 = Sub2#sub{v=merge_variables(VarNames, SubNames),
+ t={clause,Pats,Keys,SubNames,Temp}},
+
+ {Node#c_clause{pats=[]},Sub3};
+
+pre(Node, Sub0) ->
+ %% We cache only tuples and cons.
+ case cerl:is_data(Node) andalso not cerl:is_literal(Node) of
+ false ->
+ {Node,Sub0};
+ true ->
+ Kind = cerl:data_type(Node),
+ Es = cerl:data_es(Node),
+ case sub_cache_nodes(Kind, Es, Sub0) of
+ {Name,Sub1} ->
+ {cerl:ann_c_var(cerl:get_ann(Node), Name),Sub1};
+ error ->
+ {Node,Sub0}
+ end
+ end.
+
+post(#c_let{}=Node, Sub) ->
+ {Node,sub_unfold(Sub)};
+
+post(#c_fun{}=Node, Sub) ->
+ {Node,sub_unfold(Sub)};
+
+post(#c_clause{}=Node, #sub{t={clause,Pats0,Keys,V,T}}=Sub0) ->
+ {Sub1,PostKeys} = sub_take_keys(Keys, Sub0),
+ Pats1 = put_pattern_keys(Pats0, PostKeys),
+ Sub2 = sub_unfold(Sub1#sub{v=V,t=T}),
+ {Node#c_clause{pats=Pats1},Sub2};
+
+post(Node, Sub) ->
+ {Node,Sub}.
+
+%% sub_new/1
+%% sub_add_keys/2
+%% sub_take_keys/3
+%% sub_cache_nodes/3
+%%
+%% Manages the substitutions record.
+
+%% Builds a new sub.
+-spec sub_new(term()) -> sub().
+sub_new(Temp) ->
+ #sub{t=Temp}.
+
+%% Folds the sub into a new one if the variables in nodes are not disjoint
+sub_fold(VarNames, #sub{v=SubNames}=Sub) ->
+ case is_disjoint_variables(VarNames, SubNames) of
+ true -> Sub#sub{t={temp,Sub#sub.t}};
+ false -> sub_new({sub,Sub})
+ end.
+
+%% Unfolds the sub in case one was folded in the previous step
+sub_unfold(#sub{t={temp,Temp}}=Sub) ->
+ Sub#sub{t=Temp};
+sub_unfold(#sub{t={sub,Sub}}) ->
+ Sub.
+
+%% Adds the keys extracted from patterns to the state.
+-spec sub_add_keys([term()], sub()) -> sub().
+sub_add_keys(Keys, #sub{p=Pat0}=Sub) ->
+ Pat1 =
+ lists:foldl(fun(Key, Acc) ->
+ false = maps:is_key(Key, Acc), %Assertion.
+ maps:put(Key, ?NOTSET, Acc)
+ end, Pat0, Keys),
+ Sub#sub{p=Pat1}.
+
+%% Take the keys from the map taking into account the keys
+%% that have changed as those must become aliases in the pattern.
+-spec sub_take_keys([term()], sub()) -> {sub(), [{term(), atom()}]}.
+sub_take_keys(Keys, #sub{p=Pat0}=Sub) ->
+ {Pat1,Acc} = sub_take_keys(Keys, Pat0, []),
+ {Sub#sub{p=Pat1},Acc}.
+
+sub_take_keys([K|T], Sub0, Acc) ->
+ case maps:take(K, Sub0) of
+ {?NOTSET,Sub1} ->
+ sub_take_keys(T, Sub1, Acc);
+ {Name,Sub1} ->
+ sub_take_keys(T, Sub1, [{K,Name}|Acc])
+ end;
+sub_take_keys([], Sub, Acc) ->
+ {Sub,Acc}.
+
+%% Check if the node can be cached based on the state information.
+%% If it can be cached and it does not have an alias for it, we
+%% build one.
+-spec sub_cache_nodes(atom(), [cerl:cerl()], sub()) -> {atom(), sub()} | error.
+sub_cache_nodes(Kind, Nodes, #sub{p=Pat}=Sub) ->
+ case nodes_to_key(Kind, Nodes) of
+ {ok, Key} ->
+ case Pat of
+ #{Key := ?NOTSET} ->
+ new_var_name(Key, Sub);
+ #{Key := Name} ->
+ {Name,Sub};
+ #{} ->
+ error
+ end;
+ error ->
+ error
+ end.
+
+new_var_name(Key, #sub{p=Pat}=Sub) ->
+ Counter = get(new_var_num),
+ Name = list_to_atom("@r" ++ integer_to_list(Counter)),
+ put(new_var_num, Counter + 1),
+ {Name,Sub#sub{p=maps:put(Key, Name, Pat)}}.
+
+%% get_variables/1
+%% is_disjoint_variables/2
+%% merge_variables/2
+
+get_variables(NodesList) ->
+ cerl_sets:from_list([Var || Node <- NodesList, Var <- cerl_trees:variables(Node)]).
+
+is_disjoint_variables(Vars1, Vars2) ->
+ cerl_sets:is_disjoint(Vars1, Vars2).
+
+merge_variables(Vars1, Vars2) ->
+ cerl_sets:union(Vars1, Vars2).
+
+%% get_pattern_keys/2
+%% put_pattern_keys/2
+%%
+%% Gets keys from patterns or add them as aliases.
+
+get_pattern_keys(Patterns) ->
+ lists:foldl(fun get_pattern_keys/2, [], Patterns).
+
+get_pattern_keys(#c_tuple{es=Es}, Acc0) ->
+ Acc1 = accumulate_pattern_keys(tuple, Es, Acc0),
+ lists:foldl(fun get_pattern_keys/2, Acc1, Es);
+get_pattern_keys(#c_cons{hd=Hd,tl=Tl}, Acc0) ->
+ Acc1 = accumulate_pattern_keys(cons, [Hd, Tl], Acc0),
+ get_pattern_keys(Tl, get_pattern_keys(Hd, Acc1));
+get_pattern_keys(#c_alias{pat=Pat}, Acc0) ->
+ get_pattern_keys(Pat, Acc0);
+get_pattern_keys(#c_map{es=Es}, Acc0) ->
+ lists:foldl(fun get_pattern_keys/2, Acc0, Es);
+get_pattern_keys(#c_map_pair{val=Val}, Acc0) ->
+ get_pattern_keys(Val, Acc0);
+get_pattern_keys(_, Acc) ->
+ Acc.
+
+accumulate_pattern_keys(Kind, Nodes, Acc) ->
+ case nodes_to_key(Kind, Nodes) of
+ {ok,Key} -> [Key|Acc];
+ error -> Acc
+ end.
+
+put_pattern_keys(Patterns, []) ->
+ Patterns;
+put_pattern_keys(Patterns, Keys) ->
+ {NewPatterns,Map} =
+ lists:mapfoldl(fun alias_pattern_keys/2, maps:from_list(Keys), Patterns),
+ %% Check all aliases have been consumed from the map.
+ 0 = map_size(Map),
+ NewPatterns.
+
+alias_pattern_keys(#c_tuple{anno=Anno,es=Es0}=Node, Acc0) ->
+ {Es1,Acc1} = lists:mapfoldl(fun alias_pattern_keys/2, Acc0, Es0),
+ nodes_to_alias(tuple, Es0, Anno, Node#c_tuple{es=Es1}, Acc1);
+alias_pattern_keys(#c_cons{anno=Anno,hd=Hd0,tl=Tl0}=Node, Acc0) ->
+ {Hd1,Acc1} = alias_pattern_keys(Hd0, Acc0),
+ {Tl1,Acc2} = alias_pattern_keys(Tl0, Acc1),
+ nodes_to_alias(cons, [Hd0, Tl0], Anno, Node#c_cons{hd=Hd1,tl=Tl1}, Acc2);
+alias_pattern_keys(#c_alias{pat=Pat0}=Node, Acc0) ->
+ {Pat1,Acc1} = alias_pattern_keys(Pat0, Acc0),
+ {Node#c_alias{pat=Pat1}, Acc1};
+alias_pattern_keys(#c_map{es=Es0}=Node, Acc0) ->
+ {Es1,Acc1} = lists:mapfoldl(fun alias_pattern_keys/2, Acc0, Es0),
+ {Node#c_map{es=Es1}, Acc1};
+alias_pattern_keys(#c_map_pair{val=Val0}=Node, Acc0) ->
+ {Val1,Acc1} = alias_pattern_keys(Val0, Acc0),
+ {Node#c_map_pair{val=Val1}, Acc1};
+alias_pattern_keys(Pattern, Acc) ->
+ {Pattern,Acc}.
+
+%% Check if a node must become an alias because
+%% its pattern was used later on as an expression.
+nodes_to_alias(Kind, Inner, Anno, Node, Keys0) ->
+ case nodes_to_key(Kind, Inner) of
+ {ok,Key} ->
+ case maps:take(Key, Keys0) of
+ {Name,Keys1} ->
+ Var = cerl:ann_c_var(Anno, Name),
+ {cerl:ann_c_alias(Anno, Var, Node), Keys1};
+ error ->
+ {Node,Keys0}
+ end;
+ error ->
+ {Node,Keys0}
+ end.
+
+%% Builds the key used to check if a value can be
+%% replaced by an alias. It considers literals,
+%% aliases, variables, tuples and cons recursively.
+nodes_to_key(Kind, Nodes) ->
+ nodes_to_key(Nodes, [], Kind).
+
+nodes_to_key([#c_alias{var=Var}|T], Acc, Kind) ->
+ nodes_to_key([Var|T], Acc, Kind);
+nodes_to_key([#c_var{name=Name}|T], Acc, Kind) ->
+ nodes_to_key(T, [[var,Name]|Acc], Kind);
+nodes_to_key([Node|T], Acc0, Kind) ->
+ case cerl:is_data(Node) of
+ false ->
+ error;
+ true ->
+ case nodes_to_key(cerl:data_es(Node), [], cerl:data_type(Node)) of
+ {ok,Key} ->
+ nodes_to_key(T, [Key|Acc0], Kind);
+ error ->
+ error
+ end
+ end;
+nodes_to_key([], Acc, Kind) ->
+ {ok,[Kind|Acc]}.
diff --git a/lib/compiler/src/sys_core_bsm.erl b/lib/compiler/src/sys_core_bsm.erl
index 3e04cc33df..65580f79e3 100644
--- a/lib/compiler/src/sys_core_bsm.erl
+++ b/lib/compiler/src/sys_core_bsm.erl
@@ -24,7 +24,7 @@
-export([module/2,format_error/1]).
-include("core_parse.hrl").
--import(lists, [member/2,nth/2,reverse/1,usort/1]).
+-import(lists, [member/2,reverse/1,usort/1]).
-spec module(cerl:c_module(), [compile:option()]) -> {'ok', cerl:c_module()}.
@@ -44,8 +44,7 @@ function([{#c_var{name={F,Arity}}=Name,B0}|Fs], FsAcc, Ws0) ->
{B,Ws} ->
function(Fs, [{Name,B}|FsAcc], Ws)
catch
- Class:Error ->
- Stack = erlang:get_stacktrace(),
+ Class:Error:Stack ->
io:fwrite("Function: ~w/~w\n", [F,Arity]),
erlang:raise(Class, Error, Stack)
end;
@@ -60,13 +59,6 @@ format_error(bin_opt_alias) ->
format_error(bin_partition) ->
"INFO: matching non-variables after a previous clause matching a variable "
"will prevent delayed sub binary optimization";
-format_error(bin_left_var_used_in_guard) ->
- "INFO: a variable to the left of the binary pattern is used in a guard; "
- "will prevent delayed sub binary optimization";
-format_error(bin_argument_order) ->
- "INFO: matching anything else but a plain variable to the left of "
- "binary pattern will prevent delayed sub binary optimization; "
- "SUGGEST changing argument order";
format_error(bin_var_used) ->
"INFO: using a matched out sub binary will prevent "
"delayed sub binary optimization";
@@ -97,46 +89,41 @@ bsm_an(#c_case{arg=#c_values{es=Es}}=Case) ->
bsm_an(Other) ->
{ok,Other}.
-bsm_an_1(Vs, #c_case{clauses=Cs}=Case) ->
- case bsm_leftmost(Cs) of
- none -> {ok,Case};
- Pos -> bsm_an_2(Vs, Cs, Case, Pos)
- end.
-
-bsm_an_2(Vs, Cs, Case, Pos) ->
- case bsm_nonempty(Cs, Pos) of
- true -> bsm_an_3(Vs, Cs, Case, Pos);
- false -> {ok,Case}
+bsm_an_1(Vs0, #c_case{clauses=Cs0}=Case) ->
+ case bsm_leftmost(Cs0) of
+ none ->
+ {ok,Case};
+ 1 ->
+ bsm_an_2(Vs0, Cs0, Case);
+ Pos ->
+ Vs = move_from_col(Pos, Vs0),
+ Cs = [C#c_clause{pats=move_from_col(Pos, Ps)} ||
+ #c_clause{pats=Ps}=C <- Cs0],
+ bsm_an_2(Vs, Cs, Case)
end.
-bsm_an_3(Vs, Cs, Case, Pos) ->
+bsm_an_2(Vs, Cs, Case) ->
try
- bsm_ensure_no_partition(Cs, Pos),
- {ok,bsm_do_an(Vs, Pos, Cs, Case)}
+ bsm_ensure_no_partition(Cs),
+ {ok,bsm_do_an(Vs, Cs, Case)}
catch
- throw:{problem,Where,What} ->
- {ok,Case,{Where,What}}
+ throw:{problem,Where,What} ->
+ {ok,Case,{Where,What}}
end.
-bsm_do_an(Vs0, Pos, Cs0, Case) ->
- case nth(Pos, Vs0) of
- #c_var{name=Vname}=V0 ->
- Cs = bsm_do_an_var(Vname, Pos, Cs0, []),
- V = bsm_annotate_for_reuse(V0),
- Bef = lists:sublist(Vs0, Pos-1),
- Aft = lists:nthtail(Pos, Vs0),
- case Bef ++ [V|Aft] of
- [_] ->
- Case#c_case{arg=V,clauses=Cs};
- Vs ->
- Case#c_case{arg=#c_values{es=Vs},clauses=Cs}
- end;
- _ ->
- Case
- end.
+move_from_col(Pos, L) ->
+ {First,[Col|Rest]} = lists:split(Pos - 1, L),
+ [Col|First] ++ Rest.
-bsm_do_an_var(V, S, [#c_clause{pats=Ps,guard=G,body=B0}=C0|Cs], Acc) ->
- case nth(S, Ps) of
+bsm_do_an([#c_var{name=Vname}=V0|Vs0], Cs0, Case) ->
+ Cs = bsm_do_an_var(Vname, Cs0),
+ V = bsm_annotate_for_reuse(V0),
+ Vs = core_lib:make_values([V|Vs0]),
+ Case#c_case{arg=Vs,clauses=Cs};
+bsm_do_an(_Vs, _Cs, Case) -> Case.
+
+bsm_do_an_var(V, [#c_clause{pats=[P|_],guard=G,body=B0}=C0|Cs]) ->
+ case P of
#c_var{name=VarName} ->
case core_lib:is_var_used(V, G) of
true -> bsm_problem(C0, orig_bin_var_used_in_guard);
@@ -149,23 +136,23 @@ bsm_do_an_var(V, S, [#c_clause{pats=Ps,guard=G,body=B0}=C0|Cs], Acc) ->
B1 = bsm_maybe_ctx_to_binary(VarName, B0),
B = bsm_maybe_ctx_to_binary(V, B1),
C = C0#c_clause{body=B},
- bsm_do_an_var(V, S, Cs, [C|Acc]);
- #c_alias{}=P ->
+ [C|bsm_do_an_var(V, Cs)];
+ #c_alias{} ->
case bsm_could_match_binary(P) of
false ->
- bsm_do_an_var(V, S, Cs, [C0|Acc]);
+ [C0|bsm_do_an_var(V, Cs)];
true ->
bsm_problem(C0, bin_opt_alias)
end;
- P ->
+ _ ->
case bsm_could_match_binary(P) andalso bsm_is_var_used(V, G, B0) of
false ->
- bsm_do_an_var(V, S, Cs, [C0|Acc]);
+ [C0|bsm_do_an_var(V, Cs)];
true ->
bsm_problem(C0, bin_var_used)
end
end;
-bsm_do_an_var(_, _, [], Acc) -> reverse(Acc).
+bsm_do_an_var(_, []) -> [].
bsm_annotate_for_reuse(#c_var{anno=Anno}=Var) ->
Var#c_var{anno=[reuse_for_context|Anno]}.
@@ -193,131 +180,82 @@ previous_ctx_to_binary(V, Core) ->
end.
%% bsm_leftmost(Cs) -> none | ArgumentNumber
-%% Find the leftmost argument that does binary matching. Return
-%% the number of the argument (1-N).
+%% Find the leftmost argument that matches a nonempty binary.
+%% Return either 'none' or the argument number (1-N).
bsm_leftmost(Cs) ->
bsm_leftmost_1(Cs, none).
+bsm_leftmost_1([_|_], 1) ->
+ 1;
bsm_leftmost_1([#c_clause{pats=Ps}|Cs], Pos) ->
bsm_leftmost_2(Ps, Cs, 1, Pos);
bsm_leftmost_1([], Pos) -> Pos.
bsm_leftmost_2(_, Cs, Pos, Pos) ->
bsm_leftmost_1(Cs, Pos);
-bsm_leftmost_2([#c_binary{}|_], Cs, N, _) ->
+bsm_leftmost_2([#c_binary{segments=[_|_]}|_], Cs, N, _) ->
bsm_leftmost_1(Cs, N);
bsm_leftmost_2([_|Ps], Cs, N, Pos) ->
bsm_leftmost_2(Ps, Cs, N+1, Pos);
bsm_leftmost_2([], Cs, _, Pos) ->
bsm_leftmost_1(Cs, Pos).
-%% bsm_nonempty(Cs, Pos) -> true|false
-%% Check if at least one of the clauses matches a non-empty
-%% binary in the given argument position.
+%% bsm_ensure_no_partition(Cs) -> ok (exception if problem)
+%% There must only be a single bs_start_match2 instruction if we
+%% are to reuse the binary variable for the match context.
+%%
+%% To make sure that there is only a single bs_start_match2
+%% instruction, we will check for partitions such as:
%%
-bsm_nonempty([#c_clause{pats=Ps}|Cs], Pos) ->
- case nth(Pos, Ps) of
- #c_binary{segments=[_|_]} ->
- true;
- _ ->
- bsm_nonempty(Cs, Pos)
- end;
-bsm_nonempty([], _ ) -> false.
-
-%% bsm_ensure_no_partition(Cs, Pos) -> ok (exception if problem)
-%% We must make sure that matching is not partitioned between
-%% variables like this:
%% foo(<<...>>) -> ...
%% foo(<Variable>) when ... -> ...
-%% foo(<Any non-variable pattern>) ->
-%% If there is such partition, we are not allowed to reuse the binary variable
-%% for the match context.
+%% foo(<Non-variable pattern>) ->
%%
-%% Also, arguments to the left of the argument that is matched
-%% against a binary, are only allowed to be simple variables, not
-%% used in guards. The reason is that we must know that the binary is
-%% only matched in one place (i.e. there must be only one bs_start_match2
-%% instruction emitted).
+%% If there is such partition, we reject the optimization.
-bsm_ensure_no_partition(Cs, Pos) ->
- bsm_ensure_no_partition_1(Cs, Pos, before).
+bsm_ensure_no_partition(Cs) ->
+ bsm_ensure_no_partition_1(Cs, before).
%% Loop through each clause.
-bsm_ensure_no_partition_1([#c_clause{pats=Ps,guard=G}|Cs], Pos, State0) ->
- State = bsm_ensure_no_partition_2(Ps, Pos, G, simple_vars, State0),
+bsm_ensure_no_partition_1([#c_clause{pats=Ps,guard=G}|Cs], State0) ->
+ State = bsm_ensure_no_partition_2(Ps, G, State0),
case State of
'after' ->
- bsm_ensure_no_partition_after(Cs, Pos);
+ bsm_ensure_no_partition_after(Cs);
_ ->
ok
end,
- bsm_ensure_no_partition_1(Cs, Pos, State);
-bsm_ensure_no_partition_1([], _, _) -> ok.
+ bsm_ensure_no_partition_1(Cs, State);
+bsm_ensure_no_partition_1([], _) -> ok.
-%% Loop through each pattern for this clause.
-bsm_ensure_no_partition_2([#c_binary{}=Where|_], 1, _, Vstate, State) ->
- case State of
- before when Vstate =:= simple_vars -> within;
- before -> bsm_problem(Where, Vstate);
- within when Vstate =:= simple_vars -> within;
- within -> bsm_problem(Where, Vstate)
- end;
-bsm_ensure_no_partition_2([#c_alias{}=Alias|_], 1, N, Vstate, State) ->
+bsm_ensure_no_partition_2([#c_binary{}|_], _, _State) ->
+ within;
+bsm_ensure_no_partition_2([#c_alias{}=Alias|_], N, State) ->
%% Retrieve the real pattern that the alias refers to and check that.
P = bsm_real_pattern(Alias),
- bsm_ensure_no_partition_2([P], 1, N, Vstate, State);
-bsm_ensure_no_partition_2([_|_], 1, _, _Vstate, before=State) ->
+ bsm_ensure_no_partition_2([P], N, State);
+bsm_ensure_no_partition_2([_|_], _, before=State) ->
%% No binary matching yet - therefore no partition.
State;
-bsm_ensure_no_partition_2([P|_], 1, _, Vstate, State) ->
+bsm_ensure_no_partition_2([P|_], _, State) ->
case bsm_could_match_binary(P) of
false ->
- %% If clauses can be freely arranged (Vstate =:= simple_vars),
- %% a clause that cannot match a binary will not partition the clause.
- %% Example:
- %%
- %% a(Var, <<>>) -> ...
- %% a(Var, []) -> ...
- %% a(Var, <<B>>) -> ...
- %%
- %% But if the clauses can't be freely rearranged, as in
- %%
- %% b(Var, <<X>>) -> ...
- %% b(1, 2) -> ...
- %%
- %% we do have a problem.
- %%
- case Vstate of
- simple_vars -> State;
- _ -> bsm_problem(P, Vstate)
- end;
+ State;
true ->
%% The pattern P *may* match a binary, so we must update the state.
%% (P must be a variable.)
- case State of
- within -> 'after';
- 'after' -> 'after'
- end
- end;
-bsm_ensure_no_partition_2([#c_var{name=V}|Ps], N, G, Vstate, S) ->
- case core_lib:is_var_used(V, G) of
- false ->
- bsm_ensure_no_partition_2(Ps, N-1, G, Vstate, S);
- true ->
- bsm_ensure_no_partition_2(Ps, N-1, G, bin_left_var_used_in_guard, S)
- end;
-bsm_ensure_no_partition_2([_|Ps], N, G, _, S) ->
- bsm_ensure_no_partition_2(Ps, N-1, G, bin_argument_order, S).
+ 'after'
+ end.
-bsm_ensure_no_partition_after([#c_clause{pats=Ps}=C|Cs], Pos) ->
- case nth(Pos, Ps) of
- #c_var{} ->
- bsm_ensure_no_partition_after(Cs, Pos);
- _ ->
- bsm_problem(C, bin_partition)
+bsm_ensure_no_partition_after([#c_clause{pats=Ps}=C|Cs]) ->
+ case Ps of
+ [#c_var{}|_] ->
+ bsm_ensure_no_partition_after(Cs);
+ _ ->
+ bsm_problem(C, bin_partition)
end;
-bsm_ensure_no_partition_after([], _) -> ok.
+bsm_ensure_no_partition_after([]) -> ok.
bsm_could_match_binary(#c_alias{pat=P}) -> bsm_could_match_binary(P);
bsm_could_match_binary(#c_cons{}) -> false;
diff --git a/lib/compiler/src/sys_core_dsetel.erl b/lib/compiler/src/sys_core_dsetel.erl
index bd3eeae238..9e2df69b33 100644
--- a/lib/compiler/src/sys_core_dsetel.erl
+++ b/lib/compiler/src/sys_core_dsetel.erl
@@ -81,8 +81,7 @@ visit_module_1([{Name,F0}|Fs], Env, Acc) ->
{F,_} ->
visit_module_1(Fs, Env, [{Name,F}|Acc])
catch
- Class:Error ->
- Stack = erlang:get_stacktrace(),
+ Class:Error:Stack ->
#c_var{name={Func,Arity}} = Name,
io:fwrite("Function: ~w/~w\n", [Func,Arity]),
erlang:raise(Class, Error, Stack)
diff --git a/lib/compiler/src/sys_core_fold.erl b/lib/compiler/src/sys_core_fold.erl
index d73060fb7e..a9bd363ee1 100644
--- a/lib/compiler/src/sys_core_fold.erl
+++ b/lib/compiler/src/sys_core_fold.erl
@@ -125,8 +125,7 @@ function_1({#c_var{name={F,Arity}}=Name,B0}) ->
end, B0, 20),
{Name,B}
catch
- Class:Error ->
- Stack = erlang:get_stacktrace(),
+ Class:Error:Stack ->
io:fwrite("Function: ~w/~w\n", [F,Arity]),
erlang:raise(Class, Error, Stack)
end.
@@ -146,14 +145,9 @@ find_fixpoint(OptFun, Core0, Max) ->
body(Body, Sub) ->
body(Body, value, Sub).
-body(#c_values{anno=A,es=Es0}, Ctxt, Sub) ->
- Es1 = expr_list(Es0, Ctxt, Sub),
- case Ctxt of
- value ->
- #c_values{anno=A,es=Es1};
- effect ->
- make_effect_seq(Es1, Sub)
- end;
+body(#c_values{anno=A,es=Es0}, value, Sub) ->
+ Es1 = expr_list(Es0, value, Sub),
+ #c_values{anno=A,es=Es1};
body(E, Ctxt, Sub) ->
?ASSERT(verify_scope(E, Sub)),
expr(E, Ctxt, Sub).
@@ -314,9 +308,15 @@ expr(#c_seq{arg=Arg0,body=B0}=Seq0, Ctxt, Sub) ->
false ->
%% Arg cannot be "values" here - only a single value
%% make sense here.
- case is_safe_simple(Arg, Sub) of
- true -> B1;
- false -> Seq0#c_seq{arg=Arg,body=B1}
+ case {Ctxt,is_safe_simple(Arg, Sub)} of
+ {effect,true} -> B1;
+ {effect,false} ->
+ case is_safe_simple(B1, Sub) of
+ true -> Arg;
+ false -> Seq0#c_seq{arg=Arg,body=B1}
+ end;
+ {value,true} -> B1;
+ {value,false} -> Seq0#c_seq{arg=Arg,body=B1}
end
end;
expr(#c_let{}=Let0, Ctxt, Sub) ->
@@ -380,10 +380,7 @@ expr(#c_case{}=Case0, Ctxt, Sub) ->
Case = Case1#c_case{arg=Arg2,clauses=Cs2},
warn_no_clause_match(Case1, Case),
Expr = eval_case(Case, Sub),
- case move_case_into_arg(Case, Sub) of
- impossible -> Expr;
- Other -> Other
- end;
+ move_case_into_arg(Expr, Sub);
Other ->
expr(Other, Ctxt, Sub)
end;
@@ -415,9 +412,20 @@ expr(#c_call{module=M0,name=N0}=Call0, Ctxt, Sub) ->
no -> call(Call, M1, N1, Sub);
{yes,Seq} -> expr(Seq, Ctxt, Sub)
end;
+expr(#c_primop{name=#c_literal{val=build_stacktrace}}, effect, _Sub) ->
+ void();
expr(#c_primop{args=As0}=Prim, _, Sub) ->
As1 = expr_list(As0, value, Sub),
Prim#c_primop{args=As1};
+expr(#c_catch{anno=Anno,body=B}, effect, Sub) ->
+ %% When the return value of the 'catch' is ignored, we can replace it
+ %% with a try/catch to avoid building a stack trace when an exception
+ %% occurs.
+ Var = #c_var{name='catch_value'},
+ Evs = [#c_var{name='Class'},#c_var{name='Reason'},#c_var{name='Stk'}],
+ Try = #c_try{anno=Anno,arg=B,vars=[Var],body=Var,
+ evars=Evs,handler=void()},
+ expr(Try, effect, Sub);
expr(#c_catch{body=B0}=Catch, _, Sub) ->
%% We can remove catch if the value is simple
B1 = body(B0, value, Sub),
@@ -2422,16 +2430,10 @@ move_let_into_expr(#c_let{vars=InnerVs0,body=InnerBody0}=Inner,
Outer#c_let{vars=OuterVs,arg=Arg,
body=Inner#c_let{vars=InnerVs,arg=OuterBody,body=InnerBody}};
move_let_into_expr(#c_let{vars=Lvs0,body=Lbody0}=Let,
- #c_case{arg=Cexpr0,clauses=[Ca0,Cb0|Cs]}=Case, Sub0) ->
- %% Test if there are no more clauses than Ca0 and Cb0, or if
- %% Cb0 is guaranteed to match.
- TwoClauses = Cs =:= [] orelse
- case Cb0 of
- #c_clause{pats=[#c_var{}],guard=#c_literal{val=true}} -> true;
- _ -> false
- end,
- case {TwoClauses,is_failing_clause(Ca0),is_failing_clause(Cb0)} of
- {true,false,true} ->
+ #c_case{arg=Cexpr0,clauses=[Ca0|Cs0]}=Case, Sub0) ->
+ case not is_failing_clause(Ca0) andalso
+ are_all_failing_clauses(Cs0) of
+ true ->
%% let <Lvars> = case <Case-expr> of
%% <Cpats> -> <Clause-body>;
%% <OtherCpats> -> erlang:error(...)
@@ -2467,8 +2469,8 @@ move_let_into_expr(#c_let{vars=Lvs0,body=Lbody0}=Let,
body=Lbody},
Ca = Ca0#c_clause{pats=CaPats,guard=G,body=B},
- Cb = clause(Cb0, Cexpr, value, Sub0),
- Case#c_case{arg=Cexpr,clauses=[Ca,Cb]}
+ Cs = [clause(C, Cexpr, value, Sub0) || C <- Cs0],
+ Case#c_case{arg=Cexpr,clauses=[Ca|Cs]}
catch
nomatch ->
%% This is not a defeat. The code will eventually
@@ -2476,7 +2478,7 @@ move_let_into_expr(#c_let{vars=Lvs0,body=Lbody0}=Let,
%% optimizations done in this module.
impossible
end;
- {_,_,_} -> impossible
+ false -> impossible
end;
move_let_into_expr(#c_let{vars=Lvs0,body=Lbody0}=Let,
#c_seq{arg=Sarg0,body=Sbody0}=Seq, Sub0) ->
@@ -2499,9 +2501,78 @@ move_let_into_expr(#c_let{vars=Lvs0,body=Lbody0}=Let,
body=Lbody}};
move_let_into_expr(_Let, _Expr, _Sub) -> impossible.
+are_all_failing_clauses(Cs) ->
+ all(fun is_failing_clause/1, Cs).
+
is_failing_clause(#c_clause{body=B}) ->
will_fail(B).
+%% opt_build_stacktrace(Let) -> Core.
+%% If the stacktrace is *only* used in a call to erlang:raise/3,
+%% there is no need to build a cooked stackframe using build_stacktrace/1.
+
+opt_build_stacktrace(#c_let{vars=[#c_var{name=Cooked}],
+ arg=#c_primop{name=#c_literal{val=build_stacktrace},
+ args=[RawStk]},
+ body=Body}=Let) ->
+ case Body of
+ #c_call{module=#c_literal{val=erlang},
+ name=#c_literal{val=raise},
+ args=[Class,Exp,#c_var{name=Cooked}]} ->
+ %% The stacktrace is only used in a call to erlang:raise/3.
+ %% There is no need to build the stacktrace. Replace the
+ %% call to erlang:raise/3 with the the raw_raise/3 instruction,
+ %% which will use a raw stacktrace.
+ #c_primop{name=#c_literal{val=raw_raise},
+ args=[Class,Exp,RawStk]};
+ #c_let{vars=[#c_var{name=V}],arg=Arg,body=B0} when V =/= Cooked ->
+ case core_lib:is_var_used(Cooked, Arg) of
+ false ->
+ %% The built stacktrace is not used in the argument,
+ %% so we can sink the building of the stacktrace into
+ %% the body of the let.
+ B = opt_build_stacktrace(Let#c_let{body=B0}),
+ Body#c_let{body=B};
+ true ->
+ Let
+ end;
+ #c_seq{arg=Arg,body=B0} ->
+ case core_lib:is_var_used(Cooked, Arg) of
+ false ->
+ %% The built stacktrace is not used in the argument,
+ %% so we can sink the building of the stacktrace into
+ %% the body of the sequence.
+ B = opt_build_stacktrace(Let#c_let{body=B0}),
+ Body#c_seq{body=B};
+ true ->
+ Let
+ end;
+ #c_case{arg=Arg,clauses=Cs0} ->
+ case core_lib:is_var_used(Cooked, Arg) orelse
+ is_used_in_any_guard(Cooked, Cs0) of
+ false ->
+ %% The built stacktrace is not used in the argument,
+ %% so we can sink the building of the stacktrace into
+ %% each arm of the case.
+ Cs = [begin
+ B = opt_build_stacktrace(Let#c_let{body=B0}),
+ C#c_clause{body=B}
+ end || #c_clause{body=B0}=C <- Cs0],
+ Body#c_case{clauses=Cs};
+ true ->
+ Let
+ end;
+ _ ->
+ Let
+ end;
+opt_build_stacktrace(Expr) ->
+ Expr.
+
+is_used_in_any_guard(V, Cs) ->
+ any(fun(#c_clause{guard=G}) ->
+ core_lib:is_var_used(V, G)
+ end, Cs).
+
%% opt_case_in_let(Let) -> Let'
%% Try to avoid building tuples that are immediately matched.
%% A common pattern is:
@@ -2616,9 +2687,13 @@ delay_build_expr_1(#c_receive{clauses=Cs0,
timeout=Timeout,
action=A0}=Rec, TypeSig) ->
Cs = delay_build_cs(Cs0, TypeSig),
- A = case Timeout of
- #c_literal{val=infinity} -> A0;
- _ -> delay_build_expr(A0, TypeSig)
+ A = case {Timeout,A0} of
+ {#c_literal{val=infinity},#c_literal{}} ->
+ {_Type,Arity} = TypeSig,
+ Es = lists:duplicate(Arity, A0),
+ core_lib:make_values(Es);
+ _ ->
+ delay_build_expr(A0, TypeSig)
end,
Rec#c_receive{clauses=Cs,action=A};
delay_build_expr_1(#c_seq{body=B0}=Seq, TypeSig) ->
@@ -2653,53 +2728,94 @@ opt_simple_let_1(#c_let{vars=Vs0,body=B0}=Let, Arg0, Ctxt, Sub0) ->
%% Optimise let and add new substitutions.
{Vs,Args,Sub1} = let_substs(Vs0, Arg0, Sub0),
BodySub = update_let_types(Vs, Args, Sub1),
+ Sub = Sub1#sub{v=[],s=cerl_sets:new()},
B = body(B0, Ctxt, BodySub),
Arg = core_lib:make_values(Args),
- opt_simple_let_2(Let, Vs, Arg, B, B0, Ctxt, Sub1).
+ opt_simple_let_2(Let, Vs, Arg, B, B0, Sub).
+
+
+%% opt_simple_let_2(Let0, Vs0, Arg0, Body, PrevBody, Ctxt, Sub) -> Core.
+%% Do final simplifications of the let.
+%%
+%% Note that the substitutions and scope in Sub have been cleared
+%% and should not be used.
-opt_simple_let_2(Let0, Vs0, Arg0, Body, PrevBody, Ctxt, Sub) ->
+opt_simple_let_2(Let0, Vs0, Arg0, Body, PrevBody, Sub) ->
case {Vs0,Arg0,Body} of
- {[#c_var{name=N1}],Arg1,#c_var{name=N2}} ->
- case N1 =:= N2 of
- true ->
- %% let <Var> = Arg in <Var> ==> Arg
- Arg1;
- false ->
- %% let <Var> = Arg in <OtherVar> ==> seq Arg OtherVar
- Arg = maybe_suppress_warnings(Arg1, Vs0, PrevBody),
- #c_seq{arg=Arg,body=Body}
- end;
+ {[#c_var{name=V}],Arg1,#c_var{name=V}} ->
+ %% let <Var> = Arg in <Var> ==> Arg
+ Arg1;
{[],#c_values{es=[]},_} ->
%% No variables left.
Body;
- {Vs,Arg1,#c_literal{}} ->
- Arg = maybe_suppress_warnings(Arg1, Vs, PrevBody),
- case Ctxt of
- effect ->
- %% Throw away the literal body.
- Arg;
- value ->
- %% Since the variable is not used in the body, we
- %% can rewrite the let to a sequence.
- %% let <Var> = Arg in Literal ==> seq Arg Literal
- #c_seq{arg=Arg,body=Body}
- end;
- {Vs,Arg1,Body} ->
- %% If none of the variables are used in the body, we can
- %% rewrite the let to a sequence:
- %% let <Var> = Arg in BodyWithoutVar ==>
- %% seq Arg BodyWithoutVar
- case is_any_var_used(Vs, Body) of
- false ->
- Arg = maybe_suppress_warnings(Arg1, Vs, PrevBody),
- #c_seq{arg=Arg,body=Body};
- true ->
- Let1 = Let0#c_let{vars=Vs,arg=Arg1,body=Body},
- opt_bool_case_in_let(Let1, Sub)
+ {[#c_var{name=V}=Var|Vars]=Vars0,Arg1,Body} ->
+ case core_lib:is_var_used(V, Body) of
+ false when Vars =:= [] ->
+ %% If the variable is not used in the body, we can
+ %% rewrite the let to a sequence:
+ %% let <Var> = Arg in BodyWithoutVar ==>
+ %% seq Arg BodyWithoutVar
+ Arg = maybe_suppress_warnings(Arg1, Var, PrevBody),
+ #c_seq{arg=Arg,body=Body};
+ false ->
+ %% There are multiple values returned by the argument
+ %% and the first value is not used (this is a 'case'
+ %% with exported variables, but the return value is
+ %% ignored). We can remove the first variable and the
+ %% the first value returned from the 'let' argument.
+ Arg2 = remove_first_value(Arg1, Sub),
+ Let1 = Let0#c_let{vars=Vars,arg=Arg2,body=Body},
+ post_opt_let(Let1, Sub);
+ true ->
+ Let1 = Let0#c_let{vars=Vars0,arg=Arg1,body=Body},
+ post_opt_let(Let1, Sub)
end
end.
-%% maybe_suppress_warnings(Arg, [#c_var{}], PreviousBody) -> Arg'
+%% post_opt_let(Let, Sub)
+%% Final optimizations of the let.
+%%
+%% Note that the substitutions and scope in Sub have been cleared
+%% and should not be used.
+
+post_opt_let(Let0, Sub) ->
+ Let1 = opt_bool_case_in_let(Let0, Sub),
+ opt_build_stacktrace(Let1).
+
+
+%% remove_first_value(Core0, Sub) -> Core.
+%% Core0 is an expression that returns at least two values.
+%% Remove the first value returned from Core0.
+
+remove_first_value(#c_values{es=[V|Vs]}, Sub) ->
+ Values = core_lib:make_values(Vs),
+ case is_safe_simple(V, Sub) of
+ false ->
+ #c_seq{arg=V,body=Values};
+ true ->
+ Values
+ end;
+remove_first_value(#c_case{clauses=Cs0}=Core, Sub) ->
+ Cs = remove_first_value_cs(Cs0, Sub),
+ Core#c_case{clauses=Cs};
+remove_first_value(#c_receive{clauses=Cs0,action=Act0}=Core, Sub) ->
+ Cs = remove_first_value_cs(Cs0, Sub),
+ Act = remove_first_value(Act0, Sub),
+ Core#c_receive{clauses=Cs,action=Act};
+remove_first_value(#c_let{body=B}=Core, Sub) ->
+ Core#c_let{body=remove_first_value(B, Sub)};
+remove_first_value(#c_seq{body=B}=Core, Sub) ->
+ Core#c_seq{body=remove_first_value(B, Sub)};
+remove_first_value(#c_primop{}=Core, _Sub) ->
+ Core;
+remove_first_value(#c_call{}=Core, _Sub) ->
+ Core.
+
+remove_first_value_cs(Cs, Sub) ->
+ [C#c_clause{body=remove_first_value(B, Sub)} ||
+ #c_clause{body=B}=C <- Cs].
+
+%% maybe_suppress_warnings(Arg, #c_var{}, PreviousBody) -> Arg'
%% Try to suppress false warnings when a variable is not used.
%% For instance, we don't expect a warning for useless building in:
%%
@@ -2710,12 +2826,12 @@ opt_simple_let_2(Let0, Vs0, Arg0, Body, PrevBody, Ctxt, Sub) ->
%% referenced in the original unoptimized code. If they were, we will
%% consider the warning false and suppress it.
-maybe_suppress_warnings(Arg, Vs, PrevBody) ->
+maybe_suppress_warnings(Arg, #c_var{name=V}, PrevBody) ->
case should_suppress_warning(Arg) of
true ->
Arg; %Already suppressed.
false ->
- case is_any_var_used(Vs, PrevBody) of
+ case core_lib:is_var_used(V, PrevBody) of
true ->
suppress_warning([Arg]);
false ->
@@ -2804,7 +2920,7 @@ move_case_into_arg(#c_case{arg=#c_case{arg=OuterArg,
Outer#c_case{arg=OuterArg,
clauses=[OuterCa,OuterCb]};
false ->
- impossible
+ Inner0
end;
move_case_into_arg(#c_case{arg=#c_seq{arg=OuterArg,body=InnerArg}=Outer,
clauses=InnerClauses}=Inner, _Sub) ->
@@ -2820,15 +2936,8 @@ move_case_into_arg(#c_case{arg=#c_seq{arg=OuterArg,body=InnerArg}=Outer,
%%
Outer#c_seq{arg=OuterArg,
body=Inner#c_case{arg=InnerArg,clauses=InnerClauses}};
-move_case_into_arg(_, _) ->
- impossible.
-
-is_any_var_used([#c_var{name=V}|Vs], Expr) ->
- case core_lib:is_var_used(V, Expr) of
- false -> is_any_var_used(Vs, Expr);
- true -> true
- end;
-is_any_var_used([], _) -> false.
+move_case_into_arg(Expr, _) ->
+ Expr.
%%%
%%% Retrieving information about types.
diff --git a/lib/compiler/src/v3_codegen.erl b/lib/compiler/src/v3_codegen.erl
index 47c1567f10..a8f4926e55 100644
--- a/lib/compiler/src/v3_codegen.erl
+++ b/lib/compiler/src/v3_codegen.erl
@@ -19,25 +19,6 @@
%%
%% Purpose : Code generator for Beam.
-%% The following assumptions have been made:
-%%
-%% 1. Matches, i.e. things with {match,M,Ret} wrappers, only return
-%% values; no variables are exported. If the match would have returned
-%% extra variables then these have been transformed to multiple return
-%% values.
-%%
-%% 2. All BIF's called in guards are gc-safe so there is no need to
-%% put thing on the stack in the guard. While this would in principle
-%% work it would be difficult to keep track of the stack depth when
-%% trimming.
-%%
-%% The code generation uses variable lifetime information added by
-%% the v3_life module to save variables, allocate registers and
-%% move registers to the stack when necessary.
-%%
-%% We try to use a consistent variable name scheme throughout. The
-%% StackReg record is always called Bef,Int<n>,Aft.
-
-module(v3_codegen).
%% The main interface.
@@ -45,12 +26,14 @@
-import(lists, [member/2,keymember/3,keysort/2,keydelete/3,
append/1,flatmap/2,filter/2,foldl/3,foldr/3,mapfoldl/3,
- sort/1,reverse/1,reverse/2]).
--import(v3_life, [vdb_find/2]).
+ sort/1,reverse/1,reverse/2,map/2]).
+-import(ordsets, [add_element/2,intersection/2,union/2]).
-%%-compile([export_all]).
+-include("v3_kernel.hrl").
--include("v3_life.hrl").
+%% These are not defined in v3_kernel.hrl.
+get_kanno(Kthing) -> element(2, Kthing).
+set_kanno(Kthing, Anno) -> setelement(2, Kthing, Anno).
%% Main codegen structure.
-record(cg, {lcount=1, %Label counter
@@ -61,38 +44,400 @@
functable=#{}, %Map of local functions: {Name,Arity}=>Label
in_catch=false, %Inside a catch or not.
need_frame, %Need a stack frame.
- ultimate_failure %Label for ultimate match failure.
- }).
+ ultimate_failure, %Label for ultimate match failure.
+ ctx %Match context.
+ }).
%% Stack/register state record.
-record(sr, {reg=[], %Register table
stk=[], %Stack table
- res=[]}). %Reserved regs: [{reserved,I,V}]
+ res=[]}). %Registers to reserve
+
+%% Internal records.
+-record(cg_need_heap, {anno=[] :: term(),
+ h=0 :: integer()}).
+-record(cg_block, {anno=[] :: term(),
+ es=[] :: [term()]}).
--type life_module() :: {module(),_,_,[_]}.
+-type vdb_entry() :: {atom(),non_neg_integer(),non_neg_integer()}.
--spec module(life_module(), [compile:option()]) -> {'ok',beam_asm:module_code()}.
+-record(l, {i=0 :: non_neg_integer(), %Op number
+ vdb=[] :: [vdb_entry()], %Variable database
+ a=[] :: [term()]}). %Core annotation
-module({Mod,Exp,Attr,Forms}, _Options) ->
- {Fs,St} = functions(Forms, {atom,Mod}),
- {ok,{Mod,Exp,Attr,Fs,St#cg.lcount}}.
+-spec module(#k_mdef{}, [compile:option()]) -> {'ok',beam_asm:module_code()}.
+
+module(#k_mdef{name=Mod,exports=Es,attributes=Attr,body=Forms}, _Opts) ->
+ {Asm,St} = functions(Forms, {atom,Mod}),
+ {ok,{Mod,Es,Attr,Asm,St#cg.lcount}}.
functions(Forms, AtomMod) ->
mapfoldl(fun (F, St) -> function(F, AtomMod, St) end, #cg{lcount=1}, Forms).
-function({function,Name,Arity,Asm0,Vb,Vdb,Anno}, AtomMod, St0) ->
+function(#k_fdef{anno=#k{a=Anno},func=Name,arity=Arity,
+ vars=As,body=Kb}, AtomMod, St0) ->
+ try
+ #k_match{} = Kb, %Assertion.
+
+ %% Try to suppress the stack frame unless it is
+ %% really needed.
+ Body0 = avoid_stack_frame(Kb),
+
+ %% Annotate kernel records with variable usage.
+ Vdb0 = init_vars(As),
+ {Body,_,Vdb} = body(Body0, 1, Vdb0),
+
+ %% Generate the BEAM assembly code.
+ {Asm,EntryLabel,St} = cg_fun(Body, As, Vdb, AtomMod,
+ {Name,Arity}, Anno, St0),
+ Func = {function,Name,Arity,EntryLabel,Asm},
+ {Func,St}
+ catch
+ Class:Error:Stack ->
+ io:fwrite("Function: ~w/~w\n", [Name,Arity]),
+ erlang:raise(Class, Error, Stack)
+ end.
+
+
+%% avoid_stack_frame(Kernel) -> Kernel'
+%% If possible, avoid setting up a stack frame. Functions
+%% that only do matching, calls to guard BIFs, and tail-recursive
+%% calls don't need a stack frame.
+
+avoid_stack_frame(#k_match{body=Body}=M) ->
try
- {Asm,EntryLabel,St} = cg_fun(Vb, Asm0, Vdb, AtomMod,
- {Name,Arity}, Anno, St0),
- Func = {function,Name,Arity,EntryLabel,Asm},
- {Func,St}
+ M#k_match{body=avoid_stack_frame_1(Body)}
catch
- Class:Error ->
- Stack = erlang:get_stacktrace(),
- io:fwrite("Function: ~w/~w\n", [Name,Arity]),
- erlang:raise(Class, Error, Stack)
+ impossible ->
+ M
end.
+avoid_stack_frame_1(#k_alt{first=First0,then=Then0}=Alt) ->
+ First = avoid_stack_frame_1(First0),
+ Then = avoid_stack_frame_1(Then0),
+ Alt#k_alt{first=First,then=Then};
+avoid_stack_frame_1(#k_bif{op=Op}=Bif) ->
+ case Op of
+ #k_internal{} ->
+ %% Most internal BIFs clobber the X registers.
+ throw(impossible);
+ _ ->
+ Bif
+ end;
+avoid_stack_frame_1(#k_break{anno=Anno,args=Args}) ->
+ #k_guard_break{anno=Anno,args=Args};
+avoid_stack_frame_1(#k_guard_break{}=Break) ->
+ Break;
+avoid_stack_frame_1(#k_enter{}=Enter) ->
+ %% Tail-recursive calls don't need a stack frame.
+ Enter;
+avoid_stack_frame_1(#k_guard{clauses=Cs0}=Guard) ->
+ Cs = avoid_stack_frame_list(Cs0),
+ Guard#k_guard{clauses=Cs};
+avoid_stack_frame_1(#k_guard_clause{guard=G0,body=B0}=C) ->
+ G = avoid_stack_frame_1(G0),
+ B = avoid_stack_frame_1(B0),
+ C#k_guard_clause{guard=G,body=B};
+avoid_stack_frame_1(#k_match{anno=A,vars=Vs,body=B0,ret=Ret}) ->
+ %% Use #k_guard_match{} instead to avoid saving the X registers
+ %% to the stack before matching.
+ B = avoid_stack_frame_1(B0),
+ #k_guard_match{anno=A,vars=Vs,body=B,ret=Ret};
+avoid_stack_frame_1(#k_guard_match{body=B0}=M) ->
+ B = avoid_stack_frame_1(B0),
+ M#k_guard_match{body=B};
+avoid_stack_frame_1(#k_protected{arg=Arg0}=Prot) ->
+ Arg = avoid_stack_frame_1(Arg0),
+ Prot#k_protected{arg=Arg};
+avoid_stack_frame_1(#k_put{}=Put) ->
+ Put;
+avoid_stack_frame_1(#k_return{}=Ret) ->
+ Ret;
+avoid_stack_frame_1(#k_select{var=#k_var{anno=Vanno},types=Types0}=Select) ->
+ case member(reuse_for_context, Vanno) of
+ false ->
+ Types = avoid_stack_frame_list(Types0),
+ Select#k_select{types=Types};
+ true ->
+ %% Including binary patterns that overwrite the register containing
+ %% the binary with the match context may not be safe. For example,
+ %% bs_match_SUITE:bin_tail_e/1 with inlining will be rejected by
+ %% beam_validator.
+ %%
+ %% Essentially the following code is produced:
+ %%
+ %% bs_match {x,0} => {x,0}
+ %% ...
+ %% bs_match {x,0} => {x,1} %% ILLEGAL
+ %%
+ %% A bs_match instruction will only accept a match context as the
+ %% source operand if the source and destination registers are the
+ %% the same (as in the first bs_match instruction above).
+ %% The second bs_match instruction is therefore illegal.
+ %%
+ %% This situation is avoided if there is a stack frame:
+ %%
+ %% move {x,0} => {y,0}
+ %% bs_match {x,0} => {x,0}
+ %% ...
+ %% bs_match {y,0} => {x,1} %% LEGAL
+ %%
+ throw(impossible)
+ end;
+avoid_stack_frame_1(#k_seq{arg=#k_call{anno=Anno,op=Op}=Call,
+ body=#k_break{args=BrArgs0}}=Seq) ->
+ case Op of
+ #k_remote{mod=#k_atom{val=Mod},
+ name=#k_atom{val=Name},
+ arity=Arity} ->
+ case erl_bifs:is_exit_bif(Mod, Name, Arity) of
+ false ->
+ %% Will clobber X registers. Must have a stack frame.
+ throw(impossible);
+ true ->
+ %% The call to this BIF will never return. It is safe
+ %% to suppress the stack frame.
+ Bif = #k_bif{anno=Anno,
+ op=#k_internal{name=guard_error,arity=1},
+ args=[Call],ret=[]},
+ BrArgs = lists:duplicate(length(BrArgs0), #k_nil{}),
+ GB = #k_guard_break{anno=#k{us=[],ns=[],a=[]},args=BrArgs},
+ Seq#k_seq{arg=Bif,body=GB}
+ end;
+ _ ->
+ %% Will clobber X registers. Must have a stack frame.
+ throw(impossible)
+ end;
+avoid_stack_frame_1(#k_seq{arg=A0,body=B0}=Seq) ->
+ A = avoid_stack_frame_1(A0),
+ B = avoid_stack_frame_1(B0),
+ Seq#k_seq{arg=A,body=B};
+avoid_stack_frame_1(#k_test{}=Test) ->
+ Test;
+avoid_stack_frame_1(#k_type_clause{values=Values0}=TC) ->
+ Values = avoid_stack_frame_list(Values0),
+ TC#k_type_clause{values=Values};
+avoid_stack_frame_1(#k_val_clause{body=B0}=VC) ->
+ B = avoid_stack_frame_1(B0),
+ VC#k_val_clause{body=B};
+avoid_stack_frame_1(_Body) ->
+ throw(impossible).
+
+avoid_stack_frame_list([H|T]) ->
+ [avoid_stack_frame_1(H)|avoid_stack_frame_list(T)];
+avoid_stack_frame_list([]) -> [].
+
+
+%% This pass creates beam format annotated with variable lifetime
+%% information. Each thing is given an index and for each variable we
+%% store the first and last index for its occurrence. The variable
+%% database, VDB, attached to each thing is only relevant internally
+%% for that thing.
+%%
+%% For nested things like matches the numbering continues locally and
+%% the VDB for that thing refers to the variable usage within that
+%% thing. Variables which live through a such a thing are internally
+%% given a very large last index. Internally the indexes continue
+%% after the index of that thing. This creates no problems as the
+%% internal variable info never escapes and externally we only see
+%% variable which are alive both before or after.
+%%
+%% This means that variables never "escape" from a thing and the only
+%% way to get values from a thing is to "return" them, with 'break' or
+%% 'return'. Externally these values become the return values of the
+%% thing. This is no real limitation as most nested things have
+%% multiple threads so working out a common best variable usage is
+%% difficult.
+
+%% body(Kbody, I, Vdb) -> {[Expr],MaxI,Vdb}.
+%% Handle a body.
+
+body(#k_seq{arg=Ke,body=Kb}, I, Vdb0) ->
+ %%ok = io:fwrite("life ~w:~p~n", [?LINE,{Ke,I,Vdb0}]),
+ A = get_kanno(Ke),
+ Vdb1 = use_vars(union(A#k.us, A#k.ns), I, Vdb0),
+ {Es,MaxI,Vdb2} = body(Kb, I+1, Vdb1),
+ E = expr(Ke, I, Vdb2),
+ {[E|Es],MaxI,Vdb2};
+body(Ke, I, Vdb0) ->
+ %%ok = io:fwrite("life ~w:~p~n", [?LINE,{Ke,I,Vdb0}]),
+ A = get_kanno(Ke),
+ Vdb1 = use_vars(union(A#k.us, A#k.ns), I, Vdb0),
+ E = expr(Ke, I, Vdb1),
+ {[E],I,Vdb1}.
+
+%% expr(Kexpr, I, Vdb) -> Expr.
+
+expr(#k_test{anno=A}=Test, I, _Vdb) ->
+ Test#k_test{anno=#l{i=I,a=A#k.a}};
+expr(#k_call{anno=A}=Call, I, _Vdb) ->
+ Call#k_call{anno=#l{i=I,a=A#k.a}};
+expr(#k_enter{anno=A}=Enter, I, _Vdb) ->
+ Enter#k_enter{anno=#l{i=I,a=A#k.a}};
+expr(#k_bif{anno=A}=Bif, I, _Vdb) ->
+ Bif#k_bif{anno=#l{i=I,a=A#k.a}};
+expr(#k_match{anno=A,body=Kb,ret=Rs}, I, Vdb) ->
+ %% Work out imported variables which need to be locked.
+ Mdb = vdb_sub(I, I+1, Vdb),
+ M = match(Kb, A#k.us, I+1, Mdb),
+ L = #l{i=I,vdb=use_vars(A#k.us, I+1, Mdb),a=A#k.a},
+ #k_match{anno=L,body=M,ret=Rs};
+expr(#k_guard_match{anno=A,body=Kb,ret=Rs}, I, Vdb) ->
+ %% Work out imported variables which need to be locked.
+ Mdb = vdb_sub(I, I+1, Vdb),
+ M = match(Kb, A#k.us, I+1, Mdb),
+ L = #l{i=I,vdb=use_vars(A#k.us, I+1, Mdb),a=A#k.a},
+ #k_guard_match{anno=L,body=M,ret=Rs};
+expr(#k_protected{}=Protected, I, Vdb) ->
+ protected(Protected, I, Vdb);
+expr(#k_try{anno=A,arg=Ka,vars=Vs,body=Kb,evars=Evs,handler=Kh}=Try, I, Vdb) ->
+ %% Lock variables that are alive before the catch and used afterwards.
+ %% Don't lock variables that are only used inside the try.
+ Tdb0 = vdb_sub(I, I+1, Vdb),
+ %% This is the tricky bit. Lock variables in Arg that are used in
+ %% the body and handler. Add try tag 'variable'.
+ Ab = get_kanno(Kb),
+ Ah = get_kanno(Kh),
+ Tdb1 = use_vars(union(Ab#k.us, Ah#k.us), I+3, Tdb0),
+ Tdb2 = vdb_sub(I, I+2, Tdb1),
+ Vnames = fun (Kvar) -> Kvar#k_var.name end, %Get the variable names
+ {Aes,_,Adb} = body(Ka, I+2, add_var({catch_tag,I+1}, I+1, locked, Tdb2)),
+ {Bes,_,Bdb} = body(Kb, I+4, new_vars(sort(map(Vnames, Vs)), I+3, Tdb2)),
+ {Hes,_,Hdb} = body(Kh, I+4, new_vars(sort(map(Vnames, Evs)), I+3, Tdb2)),
+ L = #l{i=I,vdb=Tdb1,a=A#k.a},
+ Try#k_try{anno=L,
+ arg=#cg_block{es=Aes,anno=#l{i=I+1,vdb=Adb,a=[]}},
+ vars=Vs,body=#cg_block{es=Bes,anno=#l{i=I+3,vdb=Bdb,a=[]}},
+ evars=Evs,handler=#cg_block{es=Hes,anno=#l{i=I+3,vdb=Hdb,a=[]}}};
+expr(#k_try_enter{anno=A,arg=Ka,vars=Vs,body=Kb,evars=Evs,handler=Kh}, I, Vdb) ->
+ %% Lock variables that are alive before the catch and used afterwards.
+ %% Don't lock variables that are only used inside the try.
+ Tdb0 = vdb_sub(I, I+1, Vdb),
+ %% This is the tricky bit. Lock variables in Arg that are used in
+ %% the body and handler. Add try tag 'variable'.
+ Ab = get_kanno(Kb),
+ Ah = get_kanno(Kh),
+ Tdb1 = use_vars(union(Ab#k.us, Ah#k.us), I+3, Tdb0),
+ Tdb2 = vdb_sub(I, I+2, Tdb1),
+ Vnames = fun (Kvar) -> Kvar#k_var.name end, %Get the variable names
+ {Aes,_,Adb} = body(Ka, I+2, add_var({catch_tag,I+1}, I+1, 1000000, Tdb2)),
+ {Bes,_,Bdb} = body(Kb, I+4, new_vars(sort(map(Vnames, Vs)), I+3, Tdb2)),
+ {Hes,_,Hdb} = body(Kh, I+4, new_vars(sort(map(Vnames, Evs)), I+3, Tdb2)),
+ L = #l{i=I,vdb=Tdb1,a=A#k.a},
+ #k_try_enter{anno=L,
+ arg=#cg_block{es=Aes,anno=#l{i=I+1,vdb=Adb,a=[]}},
+ vars=Vs,body=#cg_block{es=Bes,anno=#l{i=I+3,vdb=Bdb,a=[]}},
+ evars=Evs,handler=#cg_block{es=Hes,anno=#l{i=I+3,vdb=Hdb,a=[]}}};
+expr(#k_catch{anno=A,body=Kb}=Catch, I, Vdb) ->
+ %% Lock variables that are alive before the catch and used afterwards.
+ %% Don't lock variables that are only used inside the catch.
+ %% Add catch tag 'variable'.
+ Cdb0 = vdb_sub(I, I+1, Vdb),
+ {Es,_,Cdb1} = body(Kb, I+1, add_var({catch_tag,I}, I, locked, Cdb0)),
+ L = #l{i=I,vdb=Cdb1,a=A#k.a},
+ Catch#k_catch{anno=L,body=#cg_block{es=Es}};
+expr(#k_receive{anno=A,var=V,body=Kb,action=Ka}=Recv, I, Vdb) ->
+ %% Work out imported variables which need to be locked.
+ Rdb = vdb_sub(I, I+1, Vdb),
+ M = match(Kb, add_element(V#k_var.name, A#k.us), I+1,
+ new_vars([V#k_var.name], I, Rdb)),
+ {Tes,_,Adb} = body(Ka, I+1, Rdb),
+ Le = #l{i=I,vdb=use_vars(A#k.us, I+1, Vdb),a=A#k.a},
+ Recv#k_receive{anno=Le,body=M,
+ action=#cg_block{anno=#l{i=I+1,vdb=Adb,a=[]},es=Tes}};
+expr(#k_receive_accept{anno=A}, I, _Vdb) ->
+ #k_receive_accept{anno=#l{i=I,a=A#k.a}};
+expr(#k_receive_next{anno=A}, I, _Vdb) ->
+ #k_receive_next{anno=#l{i=I,a=A#k.a}};
+expr(#k_put{anno=A}=Put, I, _Vdb) ->
+ Put#k_put{anno=#l{i=I,a=A#k.a}};
+expr(#k_break{anno=A}=Break, I, _Vdb) ->
+ Break#k_break{anno=#l{i=I,a=A#k.a}};
+expr(#k_guard_break{anno=A}=Break, I, _Vdb) ->
+ Break#k_guard_break{anno=#l{i=I,a=A#k.a}};
+expr(#k_return{anno=A}=Ret, I, _Vdb) ->
+ Ret#k_return{anno=#l{i=I,a=A#k.a}}.
+
+%% protected(Kprotected, I, Vdb) -> Protected.
+%% Only used in guards.
+
+protected(#k_protected{anno=A,arg=Ts}=Prot, I, Vdb) ->
+ %% Lock variables that are alive before try and used afterwards.
+ %% Don't lock variables that are only used inside the protected
+ %% expression.
+ Pdb0 = vdb_sub(I, I+1, Vdb),
+ {T,MaxI,Pdb1} = body(Ts, I+1, Pdb0),
+ Pdb2 = use_vars(A#k.ns, MaxI+1, Pdb1), %Save "return" values
+ Prot#k_protected{arg=T,anno=#l{i=I,a=A#k.a,vdb=Pdb2}}.
+
+%% match(Kexpr, [LockVar], I, Vdb) -> Expr.
+%% Convert match tree to old format.
+
+match(#k_alt{anno=A,first=Kf,then=Kt}, Ls, I, Vdb0) ->
+ Vdb1 = use_vars(union(A#k.us, Ls), I, Vdb0),
+ F = match(Kf, Ls, I+1, Vdb1),
+ T = match(Kt, Ls, I+1, Vdb1),
+ #k_alt{anno=[],first=F,then=T};
+match(#k_select{anno=A,types=Kts}=Select, Ls, I, Vdb0) ->
+ Vdb1 = use_vars(union(A#k.us, Ls), I, Vdb0),
+ Ts = [type_clause(Tc, Ls, I+1, Vdb1) || Tc <- Kts],
+ Select#k_select{anno=[],types=Ts};
+match(#k_guard{anno=A,clauses=Kcs}, Ls, I, Vdb0) ->
+ Vdb1 = use_vars(union(A#k.us, Ls), I, Vdb0),
+ Cs = [guard_clause(G, Ls, I+1, Vdb1) || G <- Kcs],
+ #k_guard{anno=[],clauses=Cs};
+match(Other, Ls, I, Vdb0) ->
+ Vdb1 = use_vars(Ls, I, Vdb0),
+ {B,_,Vdb2} = body(Other, I+1, Vdb1),
+ Le = #l{i=I,vdb=Vdb2,a=[]},
+ #cg_block{anno=Le,es=B}.
+
+type_clause(#k_type_clause{anno=A,type=T,values=Kvs}, Ls, I, Vdb0) ->
+ %%ok = io:format("life ~w: ~p~n", [?LINE,{T,Kvs}]),
+ Vdb1 = use_vars(union(A#k.us, Ls), I+1, Vdb0),
+ Vs = [val_clause(Vc, Ls, I+1, Vdb1) || Vc <- Kvs],
+ #k_type_clause{anno=[],type=T,values=Vs}.
+
+val_clause(#k_val_clause{anno=A,val=V,body=Kb}, Ls0, I, Vdb0) ->
+ New = (get_kanno(V))#k.ns,
+ Bus = (get_kanno(Kb))#k.us,
+ %%ok = io:format("Ls0 = ~p, Used=~p\n New=~p, Bus=~p\n", [Ls0,Used,New,Bus]),
+ Ls1 = union(intersection(New, Bus), Ls0), %Lock for safety
+ Vdb1 = use_vars(union(A#k.us, Ls1), I+1, new_vars(New, I, Vdb0)),
+ B = match(Kb, Ls1, I+1, Vdb1),
+ Le = #l{i=I,vdb=use_vars(Bus, I+1, Vdb1),a=A#k.a},
+ #k_val_clause{anno=Le,val=V,body=B}.
+
+guard_clause(#k_guard_clause{anno=A,guard=Kg,body=Kb}, Ls, I, Vdb0) ->
+ Vdb1 = use_vars(union(A#k.us, Ls), I+2, Vdb0),
+ Gdb = vdb_sub(I+1, I+2, Vdb1),
+ G = protected(Kg, I+1, Gdb),
+ B = match(Kb, Ls, I+2, Vdb1),
+ Le = #l{i=I,vdb=use_vars((get_kanno(Kg))#k.us, I+2, Vdb1),a=A#k.a},
+ #k_guard_clause{anno=Le,guard=G,body=B}.
+
+
+%% Here follows the code generator pass.
+%%
+%% The following assumptions have been made:
+%%
+%% 1. Matches, i.e. things with {match,M,Ret} wrappers, only return
+%% values; no variables are exported. If the match would have returned
+%% extra variables then these have been transformed to multiple return
+%% values.
+%%
+%% 2. All BIF's called in guards are gc-safe so there is no need to
+%% put thing on the stack in the guard. While this would in principle
+%% work it would be difficult to keep track of the stack depth when
+%% trimming.
+%%
+%% The code generation uses variable lifetime information added by
+%% the previous pass to save variables, allocate registers and
+%% move registers to the stack when necessary.
+%%
+%% We try to use a consistent variable name scheme throughout. The
+%% StackReg record is always called Bef,Int<n>,Aft.
+
%% cg_fun([Lkexpr], [HeadVar], Vdb, State) -> {[Ainstr],State}
cg_fun(Les, Hvs, Vdb, AtomMod, NameArity, Anno, St0) ->
@@ -114,18 +459,18 @@ cg_fun(Les, Hvs, Vdb, AtomMod, NameArity, Anno, St0) ->
%% Note that and 'if_end' instruction does not need any
%% live x registers, so it will always be safe to jump to
%% it. (We never ever expect the jump to be taken, and in
- %% must functions there will never be any references to
+ %% most functions there will never be any references to
%% the label in the first place.)
%%
{UltimateMatchFail,St3} = new_label(St2),
%% Create initial stack/register state, clear unused arguments.
- Bef = clear_dead(#sr{reg=foldl(fun ({var,V}, Reg) ->
+ Bef = clear_dead(#sr{reg=foldl(fun (#k_var{name=V}, Reg) ->
put_reg(V, Reg)
end, [], Hvs),
stk=[]}, 0, Vdb),
- {B,_Aft,St} = cg_list(Les, 0, Vdb, Bef,
+ {B,_Aft,St} = cg_list(Les, Vdb, Bef,
St3#cg{bfail=0,
ultimate_failure=UltimateMatchFail,
is_top_block=true}),
@@ -136,66 +481,64 @@ cg_fun(Les, Hvs, Vdb, AtomMod, NameArity, Anno, St0) ->
%% cg(Lkexpr, Vdb, StackReg, State) -> {[Ainstr],StackReg,State}.
%% Generate code for a kexpr.
-%% Split function into two steps for clarity, not efficiency.
-cg(Le, Vdb, Bef, St) ->
- cg(Le#l.ke, Le, Vdb, Bef, St).
-
-cg({block,Es}, Le, Vdb, Bef, St) ->
+cg(#cg_block{anno=Le,es=Es}, Vdb, Bef, St) ->
block_cg(Es, Le, Vdb, Bef, St);
-cg({match,M,Rs}, Le, Vdb, Bef, St) ->
+cg(#k_match{anno=Le,body=M,ret=Rs}, Vdb, Bef, St) ->
match_cg(M, Rs, Le, Vdb, Bef, St);
-cg({guard_match,M,Rs}, Le, Vdb, Bef, St) ->
+cg(#k_guard_match{anno=Le,body=M,ret=Rs}, Vdb, Bef, St) ->
guard_match_cg(M, Rs, Le, Vdb, Bef, St);
-cg({call,Func,As,Rs}, Le, Vdb, Bef, St) ->
+cg(#k_call{anno=Le,op=Func,args=As,ret=Rs}, Vdb, Bef, St) ->
call_cg(Func, As, Rs, Le, Vdb, Bef, St);
-cg({enter,Func,As}, Le, Vdb, Bef, St) ->
+cg(#k_enter{anno=Le,op=Func,args=As}, Vdb, Bef, St) ->
enter_cg(Func, As, Le, Vdb, Bef, St);
-cg({bif,Bif,As,Rs}, Le, Vdb, Bef, St) ->
- bif_cg(Bif, As, Rs, Le, Vdb, Bef, St);
-cg({gc_bif,Bif,As,Rs}, Le, Vdb, Bef, St) ->
- gc_bif_cg(Bif, As, Rs, Le, Vdb, Bef, St);
-cg({internal,Bif,As,Rs}, Le, Vdb, Bef, St) ->
- internal_cg(Bif, As, Rs, Le, Vdb, Bef, St);
-cg({receive_loop,Te,Rvar,Rm,Tes,Rs}, Le, Vdb, Bef, St) ->
+cg(#k_bif{anno=Le}=Bif, Vdb, Bef, St) ->
+ bif_cg(Bif, Le, Vdb, Bef, St);
+cg(#k_receive{anno=Le,timeout=Te,var=Rvar,body=Rm,action=Tes,ret=Rs},
+ Vdb, Bef, St) ->
recv_loop_cg(Te, Rvar, Rm, Tes, Rs, Le, Vdb, Bef, St);
-cg(receive_next, Le, Vdb, Bef, St) ->
+cg(#k_receive_next{anno=Le}, Vdb, Bef, St) ->
recv_next_cg(Le, Vdb, Bef, St);
-cg(receive_accept, _Le, _Vdb, Bef, St) -> {[remove_message],Bef,St};
-cg({'try',Ta,Vs,Tb,Evs,Th,Rs}, Le, Vdb, Bef, St) ->
+cg(#k_receive_accept{}, _Vdb, Bef, St) ->
+ {[remove_message],Bef,St};
+cg(#k_try{anno=Le,arg=Ta,vars=Vs,body=Tb,evars=Evs,handler=Th,ret=Rs},
+ Vdb, Bef, St) ->
try_cg(Ta, Vs, Tb, Evs, Th, Rs, Le, Vdb, Bef, St);
-cg({try_enter,Ta,Vs,Tb,Evs,Th}, Le, Vdb, Bef, St) ->
+cg(#k_try_enter{anno=Le,arg=Ta,vars=Vs,body=Tb,evars=Evs,handler=Th},
+ Vdb, Bef, St) ->
try_enter_cg(Ta, Vs, Tb, Evs, Th, Le, Vdb, Bef, St);
-cg({'catch',Cb,R}, Le, Vdb, Bef, St) ->
+cg(#k_catch{anno=Le,body=Cb,ret=[R]}, Vdb, Bef, St) ->
catch_cg(Cb, R, Le, Vdb, Bef, St);
-cg({set,Var,Con}, Le, Vdb, Bef, St) ->
- set_cg(Var, Con, Le, Vdb, Bef, St);
-cg({return,Rs}, Le, Vdb, Bef, St) -> return_cg(Rs, Le, Vdb, Bef, St);
-cg({break,Bs}, Le, Vdb, Bef, St) -> break_cg(Bs, Le, Vdb, Bef, St);
-cg({guard_break,Bs,N}, Le, Vdb, Bef, St) ->
- guard_break_cg(Bs, N, Le, Vdb, Bef, St);
-cg({need_heap,H}, _Le, _Vdb, Bef, St) ->
+cg(#k_put{anno=Le,arg=Con,ret=Var}, Vdb, Bef, St) ->
+ put_cg(Var, Con, Le, Vdb, Bef, St);
+cg(#k_return{anno=Le,args=Rs}, Vdb, Bef, St) ->
+ return_cg(Rs, Le, Vdb, Bef, St);
+cg(#k_break{anno=Le,args=Bs}, Vdb, Bef, St) ->
+ break_cg(Bs, Le, Vdb, Bef, St);
+cg(#k_guard_break{anno=Le,args=Bs}, Vdb, Bef, St) ->
+ guard_break_cg(Bs, Le, Vdb, Bef, St);
+cg(#cg_need_heap{h=H}, _Vdb, Bef, St) ->
{[{test_heap,H,max_reg(Bef#sr.reg)}],Bef,St}.
%% cg_list([Kexpr], FirstI, Vdb, StackReg, St) -> {[Ainstr],StackReg,St}.
-cg_list(Kes, I, Vdb, Bef, St0) ->
+cg_list(Kes, Vdb, Bef, St0) ->
{Keis,{Aft,St1}} =
flatmapfoldl(fun (Ke, {Inta,Sta}) ->
{Keis,Intb,Stb} = cg(Ke, Vdb, Inta, Sta),
{Keis,{Intb,Stb}}
- end, {Bef,St0}, need_heap(Kes, I)),
+ end, {Bef,St0}, need_heap(Kes)),
{Keis,Aft,St1}.
%% need_heap([Lkexpr], I, St) -> [Lkexpr].
%% Insert need_heap instructions in Kexpr list. Try to be smart and
%% collect them together as much as possible.
-need_heap(Kes0, I) ->
+need_heap(Kes0) ->
{Kes,H} = need_heap_0(reverse(Kes0), 0, []),
%% Prepend need_heap if necessary.
- need_heap_need(I, H) ++ Kes.
+ need_heap_need(H) ++ Kes.
need_heap_0([Ke|Kes], H0, Acc) ->
{Ns,H} = need_heap_1(Ke, H0),
@@ -203,27 +546,54 @@ need_heap_0([Ke|Kes], H0, Acc) ->
need_heap_0([], H, Acc) ->
{Acc,H}.
-need_heap_1(#l{ke={set,_,{binary,_}},i=I}, H) ->
- {need_heap_need(I, H),0};
-need_heap_1(#l{ke={set,_,{map,_,_,_}},i=I}, H) ->
- {need_heap_need(I, H),0};
-need_heap_1(#l{ke={set,_,Val}}, H) ->
+need_heap_1(#k_put{arg=#k_binary{}}, H) ->
+ {need_heap_need(H),0};
+need_heap_1(#k_put{arg=#k_map{}}, H) ->
+ {need_heap_need(H),0};
+need_heap_1(#k_put{arg=Val}, H) ->
%% Just pass through adding to needed heap.
{[],H + case Val of
- {cons,_} -> 2;
- {tuple,Es} -> 1 + length(Es);
+ #k_cons{} -> 2;
+ #k_tuple{es=Es} -> 1 + length(Es);
_Other -> 0
end};
-need_heap_1(#l{ke={bif,_Bif,_As,_Rs}}, H) ->
- {[],H};
-need_heap_1(#l{i=I}, H) ->
+need_heap_1(#k_bif{}=Bif, H) ->
+ case is_gc_bif(Bif) of
+ false ->
+ {[],H};
+ true ->
+ {need_heap_need(H),0}
+ end;
+need_heap_1(_Ke, H) ->
%% Call or call-like instruction such as set_tuple_element/3.
- {need_heap_need(I, H),0}.
-
-need_heap_need(_I, 0) -> [];
-need_heap_need(I, H) -> [#l{ke={need_heap,H},i=I}].
-
-%% match_cg(Match, [Ret], Le, Vdb, StackReg, State) ->
+ {need_heap_need(H),0}.
+
+need_heap_need(0) -> [];
+need_heap_need(H) -> [#cg_need_heap{h=H}].
+
+%% is_gc_bif(#k_bif{}) -> true|false.
+%% is_gc_bif(Name, Arity) -> true|false.
+%% Determines whether the BIF Name/Arity might do a GC.
+
+is_gc_bif(#k_bif{op=#k_remote{name=#k_atom{val=Name}},args=Args}) ->
+ is_gc_bif(Name, length(Args));
+is_gc_bif(#k_bif{op=#k_internal{}}) ->
+ true.
+
+is_gc_bif(hd, 1) -> false;
+is_gc_bif(tl, 1) -> false;
+is_gc_bif(self, 0) -> false;
+is_gc_bif(node, 0) -> false;
+is_gc_bif(node, 1) -> false;
+is_gc_bif(element, 2) -> false;
+is_gc_bif(get, 1) -> false;
+is_gc_bif(tuple_size, 1) -> false;
+is_gc_bif(Bif, Arity) ->
+ not (erl_internal:bool_op(Bif, Arity) orelse
+ erl_internal:new_type_test(Bif, Arity) orelse
+ erl_internal:comp_op(Bif, Arity)).
+
+%% match_cg(Matc, [Ret], Le, Vdb, StackReg, State) ->
%% {[Ainstr],StackReg,State}.
%% Generate code for a match. First save all variables on the stack
%% that are to survive after the match. We leave saved variables in
@@ -244,7 +614,10 @@ match_cg(M, Rs, Le, Vdb, Bef, St0) ->
guard_match_cg(M, Rs, Le, Vdb, Bef, St0) ->
I = Le#l.i,
{B,St1} = new_label(St0),
- #cg{bfail=Fail} = St1,
+ Fail = case St0 of
+ #cg{bfail=0,ultimate_failure=Fail0} -> Fail0;
+ #cg{bfail=Fail0} -> Fail0
+ end,
{Mis,Aft,St2} = match_cg(M, Fail, Bef, St1#cg{break=B}),
%% Update the register descriptors for the return registers.
Reg = guard_match_regs(Aft#sr.reg, Rs),
@@ -252,7 +625,7 @@ guard_match_cg(M, Rs, Le, Vdb, Bef, St0) ->
clear_dead(Aft#sr{reg=Reg}, I, Vdb),
St2#cg{break=St1#cg.break}}.
-guard_match_regs([{I,gbreakvar}|Rs], [{var,V}|Vs]) ->
+guard_match_regs([{I,gbreakvar}|Rs], [#k_var{name=V}|Vs]) ->
[{I,V}|guard_match_regs(Rs, Vs)];
guard_match_regs([R|Rs], Vs) ->
[R|guard_match_regs(Rs, Vs)];
@@ -264,17 +637,14 @@ guard_match_regs([], []) -> [].
%% down as each level which uses this takes its own internal Vdb not
%% the outer one.
-match_cg(Le, Fail, Bef, St) ->
- match_cg(Le#l.ke, Le, Fail, Bef, St).
-
-match_cg({alt,F,S}, _Le, Fail, Bef, St0) ->
+match_cg(#k_alt{first=F,then=S}, Fail, Bef, St0) ->
{Tf,St1} = new_label(St0),
{Fis,Faft,St2} = match_cg(F, Tf, Bef, St1),
{Sis,Saft,St3} = match_cg(S, Fail, Bef, St2),
Aft = sr_merge(Faft, Saft),
{Fis ++ [{label,Tf}] ++ Sis,Aft,St3};
-match_cg({select,{var,Vname}=V,Scs0}, #l{a=Anno}, Fail, Bef, St) ->
- ReuseForContext = member(reuse_for_context, Anno) andalso
+match_cg(#k_select{var=#k_var{anno=Vanno,name=Vname}=V,types=Scs0}, Fail, Bef, St) ->
+ ReuseForContext = member(reuse_for_context, Vanno) andalso
find_reg(Vname, Bef#sr.reg) =/= error,
Scs = case ReuseForContext of
false -> Scs0;
@@ -283,10 +653,10 @@ match_cg({select,{var,Vname}=V,Scs0}, #l{a=Anno}, Fail, Bef, St) ->
match_fmf(fun (S, F, Sta) ->
select_cg(S, V, F, Fail, Bef, Sta) end,
Fail, St, Scs);
-match_cg({guard,Gcs}, _Le, Fail, Bef, St) ->
+match_cg(#k_guard{clauses=Gcs}, Fail, Bef, St) ->
match_fmf(fun (G, F, Sta) -> guard_clause_cg(G, F, Bef, Sta) end,
Fail, St, Gcs);
-match_cg({block,Es}, Le, _Fail, Bef, St) ->
+match_cg(#cg_block{anno=Le,es=Es}, _Fail, Bef, St) ->
%% Must clear registers and stack of dead variables.
Int = clear_dead(Bef, Le#l.i, Le#l.vdb),
block_cg(Es, Le, Int, St).
@@ -294,8 +664,8 @@ match_cg({block,Es}, Le, _Fail, Bef, St) ->
%% bsm_rename_ctx([Clause], Var) -> [Clause]
%% We know from an annotation that the register for a binary can
%% be reused for the match context because the two are not truly
-%% alive at the same time (even though the conservative life time
-%% information calculated by v3_life says so).
+%% alive at the same time (even though the life time information
+%% says so).
%%
%% The easiest way to have those variables share the same register is
%% to rename the variable with the shortest life-span (the match
@@ -306,12 +676,14 @@ match_cg({block,Es}, Le, _Fail, Bef, St) ->
%% We must also remove all information about the match context
%% variable from all life-time information databases (Vdb).
-bsm_rename_ctx([#l{ke={type_clause,binary,
- [#l{ke={val_clause,{binary,{var,Old}},Ke0}}=L2]}}=L1|Cs], New) ->
+bsm_rename_ctx([#k_type_clause{type=k_binary,values=Vcs}=TC|Cs], New) ->
+ [#k_val_clause{val=#k_binary{segs=#k_var{name=Old}}=Bin,
+ body=Ke0}=VC0] = Vcs,
Ke = bsm_rename_ctx(Ke0, Old, New, false),
- [L1#l{ke={type_clause,binary,
- [L2#l{ke={val_clause,{binary,{var,New}},Ke}}]}}|bsm_rename_ctx(Cs, New)];
-bsm_rename_ctx([C|Cs], New) ->
+ VC = VC0#k_val_clause{val=Bin#k_binary{segs=#k_var{name=New}},
+ body=Ke},
+ [TC#k_type_clause{values=[VC]}|bsm_rename_ctx(Cs, New)];
+bsm_rename_ctx([C|Cs], New) ->
[C|bsm_rename_ctx(Cs, New)];
bsm_rename_ctx([], _) -> [].
@@ -321,34 +693,24 @@ bsm_rename_ctx([], _) -> [].
%% only complicatate things to recurse into blocks not in a protected
%% (the match context variable is not live inside them).
-bsm_rename_ctx(#l{ke={select,{var,V},Cs0}}=L, Old, New, InProt) ->
+bsm_rename_ctx(#k_select{var=#k_var{name=V},types=Cs0}=Sel,
+ Old, New, InProt) ->
Cs = bsm_rename_ctx_list(Cs0, Old, New, InProt),
- L#l{ke={select,{var,bsm_rename_var(V, Old, New)},Cs}};
-bsm_rename_ctx(#l{ke={type_clause,Type,Cs0}}=L, Old, New, InProt) ->
+ Sel#k_select{var=#k_var{name=bsm_rename_var(V, Old, New)},types=Cs};
+bsm_rename_ctx(#k_type_clause{values=Cs0}=TC, Old, New, InProt) ->
Cs = bsm_rename_ctx_list(Cs0, Old, New, InProt),
- L#l{ke={type_clause,Type,Cs}};
-bsm_rename_ctx(#l{ke={val_clause,{bin_end,V},Ke0}}=L, Old, New, InProt) ->
+ TC#k_type_clause{values=Cs};
+bsm_rename_ctx(#k_val_clause{body=Ke0}=VC, Old, New, InProt) ->
Ke = bsm_rename_ctx(Ke0, Old, New, InProt),
- L#l{ke={val_clause,{bin_end,bsm_rename_var(V, Old, New)},Ke}};
-bsm_rename_ctx(#l{ke={val_clause,{bin_seg,V,Sz,U,Type,Fl,Vs},Ke0}}=L,
- Old, New, InProt) ->
- Ke = bsm_rename_ctx(Ke0, Old, New, InProt),
- L#l{ke={val_clause,{bin_seg,bsm_rename_var(V, Old, New),Sz,U,Type,Fl,Vs},Ke}};
-bsm_rename_ctx(#l{ke={val_clause,{bin_int,V,Sz,U,Fl,Val,Vs},Ke0}}=L,
- Old, New, InProt) ->
- Ke = bsm_rename_ctx(Ke0, Old, New, InProt),
- L#l{ke={val_clause,{bin_int,bsm_rename_var(V, Old, New),Sz,U,Fl,Val,Vs},Ke}};
-bsm_rename_ctx(#l{ke={val_clause,Val,Ke0}}=L, Old, New, InProt) ->
- Ke = bsm_rename_ctx(Ke0, Old, New, InProt),
- L#l{ke={val_clause,Val,Ke}};
-bsm_rename_ctx(#l{ke={alt,F0,S0}}=L, Old, New, InProt) ->
+ VC#k_val_clause{body=Ke};
+bsm_rename_ctx(#k_alt{first=F0,then=S0}=Alt, Old, New, InProt) ->
F = bsm_rename_ctx(F0, Old, New, InProt),
S = bsm_rename_ctx(S0, Old, New, InProt),
- L#l{ke={alt,F,S}};
-bsm_rename_ctx(#l{ke={guard,Gcs0}}=L, Old, New, InProt) ->
+ Alt#k_alt{first=F,then=S};
+bsm_rename_ctx(#k_guard{clauses=Gcs0}=Guard, Old, New, InProt) ->
Gcs = bsm_rename_ctx_list(Gcs0, Old, New, InProt),
- L#l{ke={guard,Gcs}};
-bsm_rename_ctx(#l{ke={guard_clause,G0,B0}}=L, Old, New, InProt) ->
+ Guard#k_guard{clauses=Gcs};
+bsm_rename_ctx(#k_guard_clause{guard=G0,body=B0}=GC, Old, New, InProt) ->
G = bsm_rename_ctx(G0, Old, New, InProt),
B = bsm_rename_ctx(B0, Old, New, InProt),
%% A guard clause may cause unsaved variables to be saved on the stack.
@@ -356,49 +718,45 @@ bsm_rename_ctx(#l{ke={guard_clause,G0,B0}}=L, Old, New, InProt) ->
%% same register), it is neither in the stack nor register descriptor
%% lists and we would crash when we didn't find it unless we remove
%% it from the database.
- bsm_forget_var(L#l{ke={guard_clause,G,B}}, Old);
-bsm_rename_ctx(#l{ke={protected,Ts0,Rs}}=L, Old, New, _InProt) ->
+ bsm_forget_var(GC#k_guard_clause{guard=G,body=B}, Old);
+bsm_rename_ctx(#k_protected{arg=Ts0}=Prot, Old, New, _InProt) ->
InProt = true,
Ts = bsm_rename_ctx_list(Ts0, Old, New, InProt),
- bsm_forget_var(L#l{ke={protected,Ts,Rs}}, Old);
-bsm_rename_ctx(#l{ke={match,Ms0,Rs}}=L, Old, New, InProt) ->
+ bsm_forget_var(Prot#k_protected{arg=Ts}, Old);
+bsm_rename_ctx(#k_guard_match{body=Ms0}=Match, Old, New, InProt) ->
Ms = bsm_rename_ctx(Ms0, Old, New, InProt),
- L#l{ke={match,Ms,Rs}};
-bsm_rename_ctx(#l{ke={guard_match,Ms0,Rs}}=L, Old, New, InProt) ->
- Ms = bsm_rename_ctx(Ms0, Old, New, InProt),
- L#l{ke={guard_match,Ms,Rs}};
-bsm_rename_ctx(#l{ke={test,_,_,_}}=L, _, _, _) -> L;
-bsm_rename_ctx(#l{ke={bif,_,_,_}}=L, _, _, _) -> L;
-bsm_rename_ctx(#l{ke={gc_bif,_,_,_}}=L, _, _, _) -> L;
-bsm_rename_ctx(#l{ke={set,_,_}}=L, _, _, _) -> L;
-bsm_rename_ctx(#l{ke={call,_,_,_}}=L, _, _, _) -> L;
-bsm_rename_ctx(#l{ke={block,_}}=L, Old, _, false) ->
+ Match#k_guard_match{body=Ms};
+bsm_rename_ctx(#k_test{}=Test, _, _, _) -> Test;
+bsm_rename_ctx(#k_bif{}=Bif, _, _, _) -> Bif;
+bsm_rename_ctx(#k_put{}=Put, _, _, _) -> Put;
+bsm_rename_ctx(#k_call{}=Call, _, _, _) -> Call;
+bsm_rename_ctx(#cg_block{}=Block, Old, _, false) ->
%% This block is not inside a protected. The match context variable cannot
%% possibly be live inside the block.
- bsm_forget_var(L, Old);
-bsm_rename_ctx(#l{ke={block,Bl0}}=L, Old, New, true) ->
+ bsm_forget_var(Block, Old);
+bsm_rename_ctx(#cg_block{es=Es0}=Block, Old, New, true) ->
%% A block in a protected. We must recursively rename the variable
%% inside the block.
- Bl = bsm_rename_ctx_list(Bl0, Old, New, true),
- bsm_forget_var(L#l{ke={block,Bl}}, Old);
-bsm_rename_ctx(#l{ke={guard_break,Bs,Locked0}}=L0, Old, _New, _InProt) ->
- Locked = Locked0 -- [Old],
- L = L0#l{ke={guard_break,Bs,Locked}},
- bsm_forget_var(L, Old).
+ Es = bsm_rename_ctx_list(Es0, Old, New, true),
+ bsm_forget_var(Block#cg_block{es=Es}, Old);
+bsm_rename_ctx(#k_guard_break{}=Break, Old, _New, _InProt) ->
+ bsm_forget_var(Break, Old).
bsm_rename_ctx_list([C|Cs], Old, New, InProt) ->
[bsm_rename_ctx(C, Old, New, InProt)|
bsm_rename_ctx_list(Cs, Old, New, InProt)];
bsm_rename_ctx_list([], _, _, _) -> [].
-
+
bsm_rename_var(Old, Old, New) -> New;
bsm_rename_var(V, _, _) -> V.
%% bsm_forget_var(#l{}, Variable) -> #l{}
%% Remove a variable from the variable life-time database.
-bsm_forget_var(#l{vdb=Vdb}=L, V) ->
- L#l{vdb=keydelete(V, 1, Vdb)}.
+bsm_forget_var(Ke, V) ->
+ #l{vdb=Vdb} = L0 = get_kanno(Ke),
+ L = L0#l{vdb=keydelete(V, 1, Vdb)},
+ set_kanno(Ke, L).
%% block_cg([Kexpr], Le, Vdb, StackReg, St) -> {[Ainstr],StackReg,St}.
%% block_cg([Kexpr], Le, StackReg, St) -> {[Ainstr],StackReg,St}.
@@ -407,158 +765,227 @@ block_cg(Es, Le, _Vdb, Bef, St) ->
block_cg(Es, Le, Bef, St).
block_cg(Es, Le, Bef, #cg{is_top_block=false}=St) ->
- cg_block(Es, Le#l.i, Le#l.vdb, Bef, St);
-block_cg(Es, Le, Bef, St0) ->
- {Is0,Aft,St} = cg_block(Es, Le#l.i, Le#l.vdb, Bef,
- St0#cg{is_top_block=false,need_frame=false}),
- Is = top_level_block(Is0, Aft, max_reg(Bef#sr.reg), St),
- {Is,Aft,St#cg{is_top_block=true}}.
-
-cg_block([], _I, _Vdb, Bef, St0) ->
+ cg_block(Es, Le#l.vdb, Bef, St);
+block_cg(Es, Le, Bef, #cg{is_top_block=true}=St0) ->
+ %% No stack frame has been established yet. Do we need one?
+ case need_stackframe(Es) of
+ true ->
+ %% We need a stack frame. Generate the code and add the
+ %% code for creating and deallocating the stack frame.
+ {Is0,Aft,St} = cg_block(Es, Le#l.vdb, Bef,
+ St0#cg{is_top_block=false,need_frame=false}),
+ Is = top_level_block(Is0, Aft, max_reg(Bef#sr.reg), St),
+ {Is,Aft,St#cg{is_top_block=true}};
+ false ->
+ %% This sequence of instructions ending in a #k_match{} (a
+ %% 'case' or 'if') in the Erlang code does not need a
+ %% stack frame yet. Delay the creation (if a stack frame
+ %% is needed at all, it will be created inside the
+ %% #k_match{}).
+ cg_list(Es, Le#l.vdb, Bef, St0)
+ end.
+
+%% need_stackframe([Kexpr]) -> true|false.
+%% Does this list of instructions need a stack frame?
+%%
+%% A sequence of instructions that don't clobber the X registers
+%% followed by a single #k_match{} doesn't need a stack frame.
+
+need_stackframe([H|T]) ->
+ case H of
+ #k_bif{op=#k_internal{}} -> true;
+ #k_put{arg=#k_binary{}} -> true;
+ #k_bif{} -> need_stackframe(T);
+ #k_put{} -> need_stackframe(T);
+ #k_guard_match{} -> need_stackframe(T);
+ #k_match{} when T =:= [] -> false;
+ _ -> true
+ end;
+need_stackframe([]) -> false.
+
+cg_block([], _Vdb, Bef, St0) ->
{[],Bef,St0};
-cg_block(Kes0, I, Vdb, Bef, St0) ->
+cg_block(Kes0, Vdb, Bef, St0) ->
{Kes2,Int1,St1} =
case basic_block(Kes0) of
{Kes1,LastI,Args,Rest} ->
- Ke = hd(Kes1),
- Fb = Ke#l.i,
- cg_basic_block(Kes1, Fb, LastI, Args, Vdb, Bef, St0);
+ cg_basic_block(Kes1, LastI, Args, Vdb, Bef, St0);
{Kes1,Rest} ->
- cg_list(Kes1, I, Vdb, Bef, St0)
+ cg_list(Kes1, Vdb, Bef, St0)
end,
- {Kes3,Int2,St2} = cg_block(Rest, I, Vdb, Int1, St1),
+ {Kes3,Int2,St2} = cg_block(Rest, Vdb, Int1, St1),
{Kes2 ++ Kes3,Int2,St2}.
basic_block(Kes) -> basic_block(Kes, []).
-basic_block([Le|Les], Acc) ->
- case collect_block(Le#l.ke) of
- include -> basic_block(Les, [Le|Acc]);
+basic_block([Ke|Kes], Acc) ->
+ case collect_block(Ke) of
+ include -> basic_block(Kes, [Ke|Acc]);
{block_end,As} ->
case Acc of
[] ->
- %% If the basic block does not contain any set instructions,
+ %% If the basic block does not contain any #k_put{} instructions,
%% it serves no useful purpose to do basic block optimizations.
- {[Le],Les};
+ {[Ke],Kes};
_ ->
- {reverse(Acc, [Le]),Le#l.i,As,Les}
+ #l{i=I} = get_kanno(Ke),
+ {reverse(Acc, [Ke]),I,As,Kes}
end;
- no_block -> {reverse(Acc, [Le]),Les}
+ no_block -> {reverse(Acc, [Ke]),Kes}
end.
-%% sets that may garbage collect are not allowed in basic blocks.
-
-collect_block({set,_,{binary,_}}) -> no_block;
-collect_block({set,_,{map,_,_,_}}) -> no_block;
-collect_block({set,_,_}) -> include;
-collect_block({call,{var,_}=Var,As,_Rs}) -> {block_end,As++[Var]};
-collect_block({call,Func,As,_Rs}) -> {block_end,As++func_vars(Func)};
-collect_block({enter,{var,_}=Var,As})-> {block_end,As++[Var]};
-collect_block({enter,Func,As}) -> {block_end,As++func_vars(Func)};
-collect_block({return,Rs}) -> {block_end,Rs};
-collect_block({break,Bs}) -> {block_end,Bs};
-collect_block(_) -> no_block.
-
-func_vars({remote,M,F}) when element(1, M) =:= var;
- element(1, F) =:= var ->
+collect_block(#k_put{arg=Arg}) ->
+ %% #k_put{} instructions that may garbage collect are not allowed
+ %% in basic blocks.
+ case Arg of
+ #k_binary{} -> no_block;
+ #k_map{} -> no_block;
+ _ -> include
+ end;
+collect_block(#k_call{op=Func,args=As}) ->
+ {block_end,As++func_vars(Func)};
+collect_block(#k_enter{op=Func,args=As}) ->
+ {block_end,As++func_vars(Func)};
+collect_block(#k_return{args=Rs}) ->
+ {block_end,Rs};
+collect_block(#k_break{args=Bs}) ->
+ {block_end,Bs};
+collect_block(_) -> no_block.
+
+func_vars(#k_var{}=Var) ->
+ [Var];
+func_vars(#k_remote{mod=M,name=F})
+ when is_record(M, k_var); is_record(F, k_var) ->
[M,F];
func_vars(_) -> [].
-%% cg_basic_block([Kexpr], FirstI, LastI, As, Vdb, StackReg, State) ->
+%% cg_basic_block([Kexpr], FirstI, LastI, Arguments, Vdb, StackReg, State) ->
%% {[Ainstr],StackReg,State}.
-
-cg_basic_block(Kes, Fb, Lf, As, Vdb, Bef, St0) ->
- Res = make_reservation(As, 0),
- Regs0 = reserve(Res, Bef#sr.reg, Bef#sr.stk),
- Stk = extend_stack(Bef, Lf, Lf+1, Vdb),
- Int0 = Bef#sr{reg=Regs0,stk=Stk,res=Res},
- X0_v0 = x0_vars(As, Fb, Lf, Vdb),
- {Keis,{Aft,_,St1}} =
+%%
+%% Do a specialized code generation for a basic block of #put{}
+%% instructions (that don't do any garbage collection) followed by a
+%% call, break, or return.
+%%
+%% 'Arguments' is a list of the variables that must be loaded into
+%% consecutive X registers before the last instruction in the block.
+%% The point of this specialized code generation is to try put the
+%% all of the variables in 'Arguments' into the correct X register
+%% to begin with, instead of putting them into the first available
+%% X register and having to move them to the correct X register
+%% later.
+%%
+%% To achieve that, we attempt to reserve the X registers that the
+%% variables in 'Arguments' will need to be in when the block ends.
+%%
+%% To make it more likely that reservations will be successful, we
+%% will try to save variables that need to be saved to the stack as
+%% early as possible (if an X register needed by a variable in
+%% Arguments is occupied by another variable, the value in the
+%% X register can be evicted if it is saved on the stack).
+%%
+%% We will take care not to increase the size of stack frame compared
+%% to what the standard code generator would have done (that is, to
+%% save all X registers at the last possible moment). We will do that
+%% by extending the stack frame to the minimal size needed to save
+%% all that needs to be saved using extend_stack/4, and use
+%% save_carefully/4 during code generation to only save the variables
+%% that can be saved without growing the stack frame.
+
+cg_basic_block(Kes, Lf, As, Vdb, Bef, St0) ->
+ Int0 = reserve_arg_regs(As, Bef),
+ Int = extend_stack(Int0, Lf, Lf+1, Vdb),
+ {Keis,{Aft,St1}} =
flatmapfoldl(fun(Ke, St) -> cg_basic_block(Ke, St, Lf, Vdb) end,
- {Int0,X0_v0,St0}, need_heap(Kes, Fb)),
+ {Int,St0}, need_heap(Kes)),
{Keis,Aft,St1}.
-cg_basic_block(#l{ke={need_heap,_}}=Ke, {Inta,X0v,Sta}, _Lf, Vdb) ->
- {Keis,Intb,Stb} = cg(Ke, Vdb, Inta, Sta),
- {Keis, {Intb,X0v,Stb}};
-cg_basic_block(Ke, {Inta,X0_v1,Sta}, Lf, Vdb) ->
- {Sis,Intb} = save_carefully(Inta, Ke#l.i, Lf+1, Vdb),
- {X0_v2,Intc} = allocate_x0(X0_v1, Ke#l.i, Intb),
- Intd = reserve(Intc),
- {Keis,Inte,Stb} = cg(Ke, Vdb, Intd, Sta),
- {Sis ++ Keis, {Inte,X0_v2,Stb}}.
-
-make_reservation([], _) -> [];
-make_reservation([{var,V}|As], I) -> [{I,V}|make_reservation(As, I+1)];
-make_reservation([A|As], I) -> [{I,A}|make_reservation(As, I+1)].
-
-reserve(Sr) -> Sr#sr{reg=reserve(Sr#sr.res, Sr#sr.reg, Sr#sr.stk)}.
-
-reserve([{I,V}|Rs], [free|Regs], Stk) -> [{reserved,I,V}|reserve(Rs, Regs, Stk)];
-reserve([{I,V}|Rs], [{I,V}|Regs], Stk) -> [{I,V}|reserve(Rs, Regs, Stk)];
-reserve([{I,V}|Rs], [{I,Var}|Regs], Stk) ->
+cg_basic_block(#cg_need_heap{}=Ke, {Bef,St0}, _Lf, Vdb) ->
+ {Keis,Aft,St1} = cg(Ke, Vdb, Bef, St0),
+ {Keis,{Aft,St1}};
+cg_basic_block(Ke, {Bef,St0}, Lf, Vdb) ->
+ #l{i=I} = get_kanno(Ke),
+
+ %% Save all we can to increase the possibility that reserving
+ %% registers will succeed.
+ {Sis,Int0} = save_carefully(Bef, I, Lf+1, Vdb),
+ Int1 = reserve(Int0),
+ {Keis,Aft,St1} = cg(Ke, Vdb, Int1, St0),
+ {Sis ++ Keis,{Aft,St1}}.
+
+%% reserve_arg_regs([Argument], Bef) -> Aft.
+%% Try to reserve the X registers for all arguments. All registers
+%% that we wish to reserve will be saved in Bef#sr.res.
+
+reserve_arg_regs(As, Bef) ->
+ Res = reserve_arg_regs_1(As, 0),
+ reserve(Bef#sr{res=Res}).
+
+reserve_arg_regs_1([#k_var{name=V}|As], I) ->
+ [{I,V}|reserve_arg_regs_1(As, I+1)];
+reserve_arg_regs_1([A|As], I) ->
+ [{I,A}|reserve_arg_regs_1(As, I+1)];
+reserve_arg_regs_1([], _) -> [].
+
+%% reserve(Bef) -> Aft.
+%% Try to reserve more registers. The registers we wish to reserve
+%% are found in Bef#sr.res.
+
+reserve(#sr{reg=Regs,stk=Stk,res=Res}=Sr) ->
+ Sr#sr{reg=reserve_1(Res, Regs, Stk)}.
+
+reserve_1([{I,V}|Rs], [free|Regs], Stk) ->
+ [{reserved,I,V}|reserve_1(Rs, Regs, Stk)];
+reserve_1([{I,V}|Rs], [{I,V}|Regs], Stk) ->
+ [{I,V}|reserve_1(Rs, Regs, Stk)];
+reserve_1([{I,V}|Rs], [{I,Var}|Regs], Stk) ->
case on_stack(Var, Stk) of
- true -> [{reserved,I,V}|reserve(Rs, Regs, Stk)];
- false -> [{I,Var}|reserve(Rs, Regs, Stk)]
+ true -> [{reserved,I,V}|reserve_1(Rs, Regs, Stk)];
+ false -> [{I,Var}|reserve_1(Rs, Regs, Stk)]
end;
-reserve([{I,V}|Rs], [{reserved,I,_}|Regs], Stk) ->
- [{reserved,I,V}|reserve(Rs, Regs, Stk)];
-%reserve([{I,V}|Rs], [Other|Regs], Stk) -> [Other|reserve(Rs, Regs, Stk)];
-reserve([{I,V}|Rs], [], Stk) -> [{reserved,I,V}|reserve(Rs, [], Stk)];
-reserve([], Regs, _) -> Regs.
-
-extend_stack(Bef, Fb, Lf, Vdb) ->
- Stk0 = clear_dead_stk(Bef#sr.stk, Fb, Vdb),
- Saves = [V || {V,F,L} <- Vdb,
- F < Fb,
- L >= Lf,
- not on_stack(V, Stk0)],
- Stk1 = foldl(fun (V, Stk) -> put_stack(V, Stk) end, Stk0, Saves),
- Bef#sr.stk ++ lists:duplicate(length(Stk1) - length(Bef#sr.stk), free).
-
-save_carefully(Bef, Fb, Lf, Vdb) ->
- Stk = Bef#sr.stk,
- %% New variables that are in use but not on stack.
- New = [VFL || {V,F,L} = VFL <- Vdb,
- F < Fb,
- L >= Lf,
- not on_stack(V, Stk)],
- Saves = [V || {V,_,_} <- keysort(2, New)],
- save_carefully(Saves, Bef, []).
-
-save_carefully([], Bef, Acc) -> {reverse(Acc),Bef};
-save_carefully([V|Vs], Bef, Acc) ->
- case put_stack_carefully(V, Bef#sr.stk) of
- error -> {reverse(Acc),Bef};
+reserve_1([{I,V}|Rs], [{reserved,I,_}|Regs], Stk) ->
+ [{reserved,I,V}|reserve_1(Rs, Regs, Stk)];
+reserve_1([{I,V}|Rs], [], Stk) ->
+ [{reserved,I,V}|reserve_1(Rs, [], Stk)];
+reserve_1([], Regs, _) -> Regs.
+
+%% extend_stack(Bef, FirstBefore, LastFrom, Vdb) -> Aft.
+%% Extend the stack enough to fit all variables alive past LastFrom
+%% and not already on the stack.
+
+extend_stack(#sr{stk=Stk0}=Bef, Fb, Lf, Vdb) ->
+ Stk1 = clear_dead_stk(Stk0, Fb, Vdb),
+ New = new_not_on_stack(Stk1, Fb, Lf, Vdb),
+ Stk2 = foldl(fun ({V,_,_}, Stk) -> put_stack(V, Stk) end, Stk1, New),
+ Stk = Stk0 ++ lists:duplicate(length(Stk2) - length(Stk0), free),
+ Bef#sr{stk=Stk}.
+
+%% save_carefully(Bef, FirstBefore, LastFrom, Vdb) -> {[SaveVar],Aft}.
+%% Save variables which are used past current point and which are not
+%% already on the stack, but only if the variables can be saved without
+%% growing the stack frame.
+
+save_carefully(#sr{stk=Stk}=Bef, Fb, Lf, Vdb) ->
+ New0 = new_not_on_stack(Stk, Fb, Lf, Vdb),
+ New = keysort(2, New0),
+ save_carefully_1(New, Bef, []).
+
+save_carefully_1([{V,_,_}|Vs], #sr{reg=Regs,stk=Stk0}=Bef, Acc) ->
+ case put_stack_carefully(V, Stk0) of
+ error ->
+ {reverse(Acc),Bef};
Stk1 ->
- SrcReg = fetch_reg(V, Bef#sr.reg),
+ SrcReg = fetch_reg(V, Regs),
Move = {move,SrcReg,fetch_stack(V, Stk1)},
{x,_} = SrcReg, %Assertion - must be X register.
- save_carefully(Vs, Bef#sr{stk=Stk1}, [Move|Acc])
- end.
+ save_carefully_1(Vs, Bef#sr{stk=Stk1}, [Move|Acc])
+ end;
+save_carefully_1([], Bef, Acc) ->
+ {reverse(Acc),Bef}.
-x0_vars([], _Fb, _Lf, _Vdb) -> [];
-x0_vars([{var,V}|_], Fb, _Lf, Vdb) ->
- {V,F,_L} = VFL = vdb_find(V, Vdb),
- x0_vars1([VFL], Fb, F, Vdb);
-x0_vars([X0|_], Fb, Lf, Vdb) ->
- x0_vars1([{X0,Lf,Lf}], Fb, Lf, Vdb).
-
-x0_vars1(X0, Fb, Xf, Vdb) ->
- Vs0 = [VFL || {_V,F,L}=VFL <- Vdb,
- F >= Fb,
- L < Xf],
- Vs1 = keysort(3, Vs0),
- keysort(2, X0++Vs1).
-
-allocate_x0([], _, Bef) -> {[],Bef#sr{res=[]}};
-allocate_x0([{_,_,L}|Vs], I, Bef) when L =< I ->
- allocate_x0(Vs, I, Bef);
-allocate_x0([{V,_F,_L}=VFL|Vs], _, Bef) ->
- {[VFL|Vs],Bef#sr{res=reserve_x0(V, Bef#sr.res)}}.
-
-reserve_x0(V, [_|Res]) -> [{0,V}|Res];
-reserve_x0(V, []) -> [{0,V}].
+%% top_level_block([Instruction], Bef, MaxRegs, St) -> [Instruction].
+%% For the top-level block, allocate a stack frame a necessary,
+%% adjust Y register numbering and instructions that return
+%% from the function.
top_level_block(Keis, #sr{stk=[]}, _MaxRegs, #cg{need_frame=false}) ->
Keis;
@@ -640,21 +1067,27 @@ turn_yreg(Other, _MaxY) ->
%% wrong. These are different as in the second case there is no need
%% to try the next type, it will always fail.
-select_cg(#l{ke={type_clause,cons,[S]}}, {var,V}, Tf, Vf, Bef, St) ->
+select_cg(#k_type_clause{type=Type,values=Vs}, Var, Tf, Vf, Bef, St) ->
+ #k_var{name=V} = Var,
+ select_cg(Type, Vs, V, Tf, Vf, Bef, St).
+
+select_cg(k_cons, [S], V, Tf, Vf, Bef, St) ->
select_cons(S, V, Tf, Vf, Bef, St);
-select_cg(#l{ke={type_clause,nil,[S]}}, {var,V}, Tf, Vf, Bef, St) ->
+select_cg(k_nil, [S], V, Tf, Vf, Bef, St) ->
select_nil(S, V, Tf, Vf, Bef, St);
-select_cg(#l{ke={type_clause,binary,[S]}}, {var,V}, Tf, Vf, Bef, St) ->
+select_cg(k_binary, [S], V, Tf, Vf, Bef, St) ->
select_binary(S, V, Tf, Vf, Bef, St);
-select_cg(#l{ke={type_clause,bin_seg,S}}, {var,V}, Tf, _Vf, Bef, St) ->
+select_cg(k_bin_seg, S, V, Tf, _Vf, Bef, St) ->
select_bin_segs(S, V, Tf, Bef, St);
-select_cg(#l{ke={type_clause,bin_int,S}}, {var,V}, Tf, _Vf, Bef, St) ->
+select_cg(k_bin_int, S, V, Tf, _Vf, Bef, St) ->
select_bin_segs(S, V, Tf, Bef, St);
-select_cg(#l{ke={type_clause,bin_end,[S]}}, {var,V}, Tf, _Vf, Bef, St) ->
+select_cg(k_bin_end, [S], V, Tf, _Vf, Bef, St) ->
select_bin_end(S, V, Tf, Bef, St);
-select_cg(#l{ke={type_clause,map,S}}, {var,V}, Tf, Vf, Bef, St) ->
+select_cg(k_map, S, V, Tf, Vf, Bef, St) ->
select_map(S, V, Tf, Vf, Bef, St);
-select_cg(#l{ke={type_clause,Type,Scs}}, {var,V}, Tf, Vf, Bef, St0) ->
+select_cg(k_literal, S, V, Tf, Vf, Bef, St) ->
+ select_literal(S, V, Tf, Vf, Bef, St);
+select_cg(Type, Scs, V, Tf, Vf, Bef, St0) ->
{Vis,{Aft,St1}} =
mapfoldl(fun (S, {Int,Sta}) ->
{Val,Is,Inta,Stb} = select_val(S, V, Vf, Bef, Sta),
@@ -664,22 +1097,29 @@ select_cg(#l{ke={type_clause,Type,Scs}}, {var,V}, Tf, Vf, Bef, St0) ->
{Vls,Sis,St2} = select_labels(OptVls, St1, [], []),
{select_val_cg(Type, fetch_var(V, Bef), Vls, Tf, Vf, Sis), Aft, St2}.
-select_val_cg(tuple, R, [Arity,{f,Lbl}], Tf, Vf, [{label,Lbl}|Sis]) ->
+select_val_cg(k_tuple, R, [Arity,{f,Lbl}], Tf, Vf, [{label,Lbl}|Sis]) ->
[{test,is_tuple,{f,Tf},[R]},{test,test_arity,{f,Vf},[R,Arity]}|Sis];
-select_val_cg(tuple, R, Vls, Tf, Vf, Sis) ->
+select_val_cg(k_tuple, R, Vls, Tf, Vf, Sis) ->
[{test,is_tuple,{f,Tf},[R]},{select_tuple_arity,R,{f,Vf},{list,Vls}}|Sis];
select_val_cg(Type, R, [Val, {f,Lbl}], Fail, Fail, [{label,Lbl}|Sis]) ->
- [{test,is_eq_exact,{f,Fail},[R,{Type,Val}]}|Sis];
+ [{test,is_eq_exact,{f,Fail},[R,{type(Type),Val}]}|Sis];
select_val_cg(Type, R, [Val, {f,Lbl}], Tf, Vf, [{label,Lbl}|Sis]) ->
[{test,select_type_test(Type),{f,Tf},[R]},
- {test,is_eq_exact,{f,Vf},[R,{Type,Val}]}|Sis];
+ {test,is_eq_exact,{f,Vf},[R,{type(Type),Val}]}|Sis];
select_val_cg(Type, R, Vls0, Tf, Vf, Sis) ->
- Vls1 = [case Value of {f,_Lbl} -> Value; _ -> {Type,Value} end || Value <- Vls0],
+ Vls1 = [case Value of
+ {f,_Lbl} -> Value;
+ _ -> {type(Type),Value}
+ end || Value <- Vls0],
[{test,select_type_test(Type),{f,Tf},[R]}, {select_val,R,{f,Vf},{list,Vls1}}|Sis].
-
-select_type_test(integer) -> is_integer;
-select_type_test(atom) -> is_atom;
-select_type_test(float) -> is_float.
+
+type(k_atom) -> atom;
+type(k_float) -> float;
+type(k_int) -> integer.
+
+select_type_test(k_int) -> is_integer;
+select_type_test(k_atom) -> is_atom;
+select_type_test(k_float) -> is_float.
combine([{Is,Vs1}, {Is,Vs2}|Vis]) -> combine([{Is,Vs1 ++ Vs2}|Vis]);
combine([V|Vis]) -> [V|combine(Vis)];
@@ -695,36 +1135,49 @@ add_vls([V|Vs], Lbl, Acc) ->
add_vls(Vs, Lbl, [V, {f,Lbl}|Acc]);
add_vls([], _, Acc) -> Acc.
-select_cons(#l{ke={val_clause,{cons,Es},B},i=I,vdb=Vdb}, V, Tf, Vf, Bef, St0) ->
+select_literal(S, V, Tf, Vf, Bef, St) ->
+ Reg = fetch_var(V, Bef),
+ F = fun(ValClause, Fail, St0) ->
+ {Val,Is,Aft,St1} = select_val(ValClause, V, Vf, Bef, St0),
+ Test = {test,is_eq_exact,{f,Fail},[Reg,{literal,Val}]},
+ {[Test|Is],Aft,St1}
+ end,
+ match_fmf(F, Tf, St, S).
+
+select_cons(#k_val_clause{val=#k_cons{hd=Hd,tl=Tl},body=B,anno=#l{i=I,vdb=Vdb}},
+ V, Tf, Vf, Bef, St0) ->
+ Es = [Hd,Tl],
{Eis,Int,St1} = select_extract_cons(V, Es, I, Vdb, Bef, St0),
{Bis,Aft,St2} = match_cg(B, Vf, Int, St1),
{[{test,is_nonempty_list,{f,Tf},[fetch_var(V, Bef)]}] ++ Eis ++ Bis,Aft,St2}.
-select_nil(#l{ke={val_clause,nil,B}}, V, Tf, Vf, Bef, St0) ->
+select_nil(#k_val_clause{val=#k_nil{},body=B}, V, Tf, Vf, Bef, St0) ->
{Bis,Aft,St1} = match_cg(B, Vf, Bef, St0),
{[{test,is_nil,{f,Tf},[fetch_var(V, Bef)]}] ++ Bis,Aft,St1}.
-select_binary(#l{ke={val_clause,{binary,{var,V}},B},i=I,vdb=Vdb},
- V, Tf, Vf, Bef, St0) ->
+select_binary(#k_val_clause{val=#k_binary{segs=#k_var{name=V}},body=B,
+ anno=#l{i=I,vdb=Vdb}}, V, Tf, Vf, Bef, St0) ->
+ #cg{ctx=OldCtx} = St0,
Int0 = clear_dead(Bef#sr{reg=Bef#sr.reg}, I, Vdb),
- {Bis0,Aft,St1} = match_cg(B, Vf, Int0, St0),
+ {Bis0,Aft,St1} = match_cg(B, Vf, Int0, St0#cg{ctx=V}),
CtxReg = fetch_var(V, Int0),
Live = max_reg(Bef#sr.reg),
Bis1 = [{test,bs_start_match2,{f,Tf},Live,[CtxReg,V],CtxReg},
{bs_save2,CtxReg,{V,V}}|Bis0],
Bis = finish_select_binary(Bis1),
- {Bis,Aft,St1};
-select_binary(#l{ke={val_clause,{binary,{var,Ivar}},B},i=I,vdb=Vdb},
- V, Tf, Vf, Bef, St0) ->
+ {Bis,Aft,St1#cg{ctx=OldCtx}};
+select_binary(#k_val_clause{val=#k_binary{segs=#k_var{name=Ivar}},body=B,
+ anno=#l{i=I,vdb=Vdb}}, V, Tf, Vf, Bef, St0) ->
+ #cg{ctx=OldCtx} = St0,
Regs = put_reg(Ivar, Bef#sr.reg),
Int0 = clear_dead(Bef#sr{reg=Regs}, I, Vdb),
- {Bis0,Aft,St1} = match_cg(B, Vf, Int0, St0),
+ {Bis0,Aft,St1} = match_cg(B, Vf, Int0, St0#cg{ctx=Ivar}),
CtxReg = fetch_var(Ivar, Int0),
Live = max_reg(Bef#sr.reg),
Bis1 = [{test,bs_start_match2,{f,Tf},Live,[fetch_var(V, Bef),Ivar],CtxReg},
{bs_save2,CtxReg,{Ivar,Ivar}}|Bis0],
Bis = finish_select_binary(Bis1),
- {Bis,Aft,St1}.
+ {Bis,Aft,St1#cg{ctx=OldCtx}}.
finish_select_binary([{bs_save2,R,Point}=I,{bs_restore2,R,Point}|Is]) ->
[I|finish_select_binary(Is)];
@@ -746,9 +1199,16 @@ select_bin_segs(Scs, Ivar, Tf, Bef, St) ->
select_bin_seg(S, Ivar, Fail, Bef, Sta) end,
Tf, St, Scs).
-select_bin_seg(#l{ke={val_clause,{bin_seg,Ctx,Size,U,T,Fs0,Es},B},i=I,vdb=Vdb,a=A},
- Ivar, Fail, Bef, St0) ->
+select_bin_seg(#k_val_clause{val=#k_bin_seg{size=Size,unit=U,type=T,
+ seg=Seg,flags=Fs0,next=Next},
+ body=B,
+ anno=#l{i=I,vdb=Vdb,a=A}}, Ivar, Fail, Bef, St0) ->
+ Ctx = St0#cg.ctx,
Fs = [{anno,A}|Fs0],
+ Es = case Next of
+ [] -> [Seg];
+ _ -> [Seg,Next]
+ end,
{Mis,Int,St1} = select_extract_bin(Es, Size, U, T, Fs, Fail,
I, Vdb, Bef, Ctx, B, St0),
{Bis,Aft,St2} = match_cg(B, Fail, Int, St1),
@@ -761,9 +1221,12 @@ select_bin_seg(#l{ke={val_clause,{bin_seg,Ctx,Size,U,T,Fs0,Es},B},i=I,vdb=Vdb,a=
[{bs_restore2,CtxReg,{Ctx,Ivar}}|Mis++Bis]
end,
{Is,Aft,St2};
-select_bin_seg(#l{ke={val_clause,{bin_int,Ctx,Sz,U,Fs,Val,Es},B},i=I,vdb=Vdb},
- Ivar, Fail, Bef, St0) ->
- {Mis,Int,St1} = select_extract_int(Es, Val, Sz, U, Fs, Fail,
+select_bin_seg(#k_val_clause{val=#k_bin_int{size=Sz,unit=U,flags=Fs,
+ val=Val,next=Next},
+ body=B,
+ anno=#l{i=I,vdb=Vdb}}, Ivar, Fail, Bef, St0) ->
+ Ctx = St0#cg.ctx,
+ {Mis,Int,St1} = select_extract_int(Next, Val, Sz, U, Fs, Fail,
I, Vdb, Bef, Ctx, St0),
{Bis,Aft,St2} = match_cg(B, Fail, Int, St1),
CtxReg = fetch_var(Ctx, Bef),
@@ -784,7 +1247,7 @@ select_bin_seg(#l{ke={val_clause,{bin_int,Ctx,Sz,U,Fs,Val,Es},B},i=I,vdb=Vdb},
end,
{[{bs_restore2,CtxReg,{Ctx,Ivar}}|Is],Aft,St2}.
-select_extract_int([{var,Tl}], Val, {integer,Sz}, U, Fs, Vf,
+select_extract_int(#k_var{name=Tl}, Val, #k_int{val=Sz}, U, Fs, Vf,
I, Vdb, Bef, Ctx, St) ->
Bits = U*Sz,
Bin = case member(big, Fs) of
@@ -805,7 +1268,7 @@ select_extract_int([{var,Tl}], Val, {integer,Sz}, U, Fs, Vf,
end,
{Is,clear_dead(Bef, I, Vdb),St}.
-select_extract_bin([{var,Hd},{var,Tl}], Size0, Unit, Type, Flags, Vf,
+select_extract_bin([#k_var{name=Hd},#k_var{name=Tl}], Size0, Unit, Type, Flags, Vf,
I, Vdb, Bef, Ctx, _Body, St) ->
SizeReg = get_bin_size_reg(Size0, Bef),
{Es,Aft} =
@@ -828,11 +1291,11 @@ select_extract_bin([{var,Hd},{var,Tl}], Size0, Unit, Type, Flags, Vf,
{bs_save2,CtxReg,{Ctx,Tl}}],Int1}
end,
{Es,clear_dead(Aft, I, Vdb),St};
-select_extract_bin([{var,Hd}], Size, Unit, binary, Flags, Vf,
+select_extract_bin([#k_var{name=Hd}], Size, Unit, binary, Flags, Vf,
I, Vdb, Bef, Ctx, Body, St) ->
%% Match the last segment of a binary. We KNOW that the size
%% must be 'all'.
- Size = {atom,all}, %Assertion.
+ #k_atom{val=all} = Size, %Assertion.
{Es,Aft} =
case vdb_find(Hd, Vdb) of
{_,_,Lhd} when Lhd =< I ->
@@ -857,7 +1320,7 @@ select_extract_bin([{var,Hd}], Size, Unit, binary, Flags, Vf,
Name = bs_get_binary2,
Live = max_reg(Bef#sr.reg),
{[{test,Name,{f,Vf},Live,
- [CtxReg,Size,Unit,{field_flags,Flags}],Rhd}],
+ [CtxReg,atomic(Size),Unit,{field_flags,Flags}],Rhd}],
Int1};
true ->
%% Since the matching context will not be used again,
@@ -872,36 +1335,42 @@ select_extract_bin([{var,Hd}], Size, Unit, binary, Flags, Vf,
Name = bs_get_binary2,
Live = max_reg(Int1#sr.reg),
{[{test,Name,{f,Vf},Live,
- [CtxReg,Size,Unit,{field_flags,Flags}],CtxReg}],
+ [CtxReg,atomic(Size),Unit,{field_flags,Flags}],CtxReg}],
Int1}
end
end,
{Es,clear_dead(Aft, I, Vdb),St}.
%% is_context_unused(Ke) -> true | false
-%% Simple heurististic to determine whether the code that follows will
-%% use the current matching context again. (The information of liveness
-%% calculcated by v3_life is too conservative to be useful for this purpose.)
-%% 'true' means that the code that follows will definitely not use the context
-%% again (because it is a block, not guard or matching code); 'false' that we
-%% are not sure (there is either a guard, or more matching, either which may
-%% reference the context again).
-
-is_context_unused(#l{ke=Ke}) -> is_context_unused(Ke);
-is_context_unused({block,_}) -> true;
-is_context_unused(_) -> false.
-
-select_bin_end(#l{ke={val_clause,{bin_end,Ctx},B}},
- Ivar, Tf, Bef, St0) ->
+%% Simple heurististic to determine whether the code that follows
+%% will use the current matching context again. (The liveness
+%% information is too conservative to be useful for this purpose.)
+%% 'true' means that the code that follows will definitely not use
+%% the context again (because it is a block, not guard or matching
+%% code); 'false' that we are not sure (there could be more
+%% matching).
+
+is_context_unused(#k_alt{then=Then}) ->
+ %% #k_alt{} can be used for different purposes. If the Then part
+ %% is a block, it means that matching has finished and is used for a guard
+ %% to choose between the matched clauses.
+ is_context_unused(Then);
+is_context_unused(#cg_block{}) ->
+ true;
+is_context_unused(_) ->
+ false.
+
+select_bin_end(#k_val_clause{val=#k_bin_end{},body=B}, Ivar, Tf, Bef, St0) ->
+ Ctx = St0#cg.ctx,
{Bis,Aft,St2} = match_cg(B, Tf, Bef, St0),
CtxReg = fetch_var(Ctx, Bef),
{[{bs_restore2,CtxReg,{Ctx,Ivar}},
{test,bs_test_tail2,{f,Tf},[CtxReg,0]}|Bis],Aft,St2}.
-get_bin_size_reg({var,V}, Bef) ->
+get_bin_size_reg(#k_var{name=V}, Bef) ->
fetch_var(V, Bef);
get_bin_size_reg(Literal, _Bef) ->
- Literal.
+ atomic(Literal).
build_bs_instr(Type, Vf, CtxReg, Live, SizeReg, Unit, Flags, Rhd) ->
{Format,Name} = case Type of
@@ -935,11 +1404,18 @@ build_skip_instr(Type, Vf, CtxReg, Live, SizeReg, Unit, Flags) ->
{test,Name,{f,Vf},[CtxReg,Live,{field_flags,Flags}]}
end.
-select_val(#l{ke={val_clause,{tuple,Es},B},i=I,vdb=Vdb}, V, Vf, Bef, St0) ->
+select_val(#k_val_clause{val=#k_tuple{es=Es},body=B,anno=#l{i=I,vdb=Vdb}},
+ V, Vf, Bef, St0) ->
{Eis,Int,St1} = select_extract_tuple(V, Es, I, Vdb, Bef, St0),
{Bis,Aft,St2} = match_cg(B, Vf, Int, St1),
{length(Es),Eis ++ Bis,Aft,St2};
-select_val(#l{ke={val_clause,{_,Val},B}}, _V, Vf, Bef, St0) ->
+select_val(#k_val_clause{val=Val0,body=B}, _V, Vf, Bef, St0) ->
+ Val = case Val0 of
+ #k_atom{val=Lit} -> Lit;
+ #k_float{val=Lit} -> Lit;
+ #k_int{val=Lit} -> Lit;
+ #k_literal{val=Lit} -> Lit
+ end,
{Bis,Aft,St1} = match_cg(B, Vf, Bef, St0),
{Val,Bis,Aft,St1}.
@@ -948,7 +1424,7 @@ select_val(#l{ke={val_clause,{_,Val},B}}, _V, Vf, Bef, St0) ->
%% Extract tuple elements, but only if they do not immediately die.
select_extract_tuple(Src, Vs, I, Vdb, Bef, St) ->
- F = fun ({var,V}, {Int0,Elem}) ->
+ F = fun (#k_var{name=V}, {Int0,Elem}) ->
case vdb_find(V, Vdb) of
{V,_,L} when L =< I -> {[], {Int0,Elem+1}};
_Other ->
@@ -965,9 +1441,10 @@ select_extract_tuple(Src, Vs, I, Vdb, Bef, St) ->
select_map(Scs, V, Tf, Vf, Bef, St0) ->
Reg = fetch_var(V, Bef),
{Is,Aft,St1} =
- match_fmf(fun(#l{ke={val_clause,{map,exact,_,Es},B},i=I,vdb=Vdb}, Fail, St1) ->
- select_map_val(V, Es, B, Fail, I, Vdb, Bef, St1)
- end, Vf, St0, Scs),
+ match_fmf(fun(#k_val_clause{val=#k_map{op=exact,es=Es},
+ body=B,anno=#l{i=I,vdb=Vdb}}, Fail, St1) ->
+ select_map_val(V, Es, B, Fail, I, Vdb, Bef, St1)
+ end, Vf, St0, Scs),
{[{test,is_map,{f,Tf},[Reg]}|Is],Aft,St1}.
select_map_val(V, Es, B, Fail, I, Vdb, Bef, St0) ->
@@ -984,29 +1461,32 @@ select_extract_map(Src, Vs, Fail, I, Vdb, Bef, St) ->
%% Assume keys are term-sorted
Rsrc = fetch_var(Src, Bef),
- {{HasKs,GetVs,HasVarKs,GetVarVs},Aft} = lists:foldr(fun
- ({map_pair,{var,K},{var,V}},{{HasKsi,GetVsi,HasVarVsi,GetVarVsi},Int0}) ->
- case vdb_find(V, Vdb) of
- {V,_,L} when L =< I ->
- RK = fetch_var(K,Int0),
- {{HasKsi,GetVsi,[RK|HasVarVsi],GetVarVsi},Int0};
- _Other ->
- Reg1 = put_reg(V, Int0#sr.reg),
- Int1 = Int0#sr{reg=Reg1},
- RK = fetch_var(K,Int0),
- RV = fetch_reg(V,Reg1),
- {{HasKsi,GetVsi,HasVarVsi,[[RK,RV]|GetVarVsi]},Int1}
- end;
- ({map_pair,Key,{var,V}},{{HasKsi,GetVsi,HasVarVsi,GetVarVsi},Int0}) ->
- case vdb_find(V, Vdb) of
- {V,_,L} when L =< I ->
- {{[Key|HasKsi],GetVsi,HasVarVsi,GetVarVsi},Int0};
- _Other ->
- Reg1 = put_reg(V, Int0#sr.reg),
- Int1 = Int0#sr{reg=Reg1},
- {{HasKsi,[Key,fetch_reg(V, Reg1)|GetVsi],HasVarVsi,GetVarVsi},Int1}
- end
- end, {{[],[],[],[]},Bef}, Vs),
+ {{HasKs,GetVs,HasVarKs,GetVarVs},Aft} =
+ foldr(fun(#k_map_pair{key=#k_var{name=K},val=#k_var{name=V}},
+ {{HasKsi,GetVsi,HasVarVsi,GetVarVsi},Int0}) ->
+ case vdb_find(V, Vdb) of
+ {V,_,L} when L =< I ->
+ RK = fetch_var(K,Int0),
+ {{HasKsi,GetVsi,[RK|HasVarVsi],GetVarVsi},Int0};
+ _Other ->
+ Reg1 = put_reg(V, Int0#sr.reg),
+ Int1 = Int0#sr{reg=Reg1},
+ RK = fetch_var(K,Int0),
+ RV = fetch_reg(V,Reg1),
+ {{HasKsi,GetVsi,HasVarVsi,[[RK,RV]|GetVarVsi]},Int1}
+ end;
+ (#k_map_pair{key=Key,val=#k_var{name=V}},
+ {{HasKsi,GetVsi,HasVarVsi,GetVarVsi},Int0}) ->
+ case vdb_find(V, Vdb) of
+ {V,_,L} when L =< I ->
+ {{[atomic(Key)|HasKsi],GetVsi,HasVarVsi,GetVarVsi},Int0};
+ _Other ->
+ Reg1 = put_reg(V, Int0#sr.reg),
+ Int1 = Int0#sr{reg=Reg1},
+ {{HasKsi,[atomic(Key),fetch_reg(V, Reg1)|GetVsi],
+ HasVarVsi,GetVarVsi},Int1}
+ end
+ end, {{[],[],[],[]},Bef}, Vs),
Code = [{test,has_map_fields,{f,Fail},Rsrc,{list,HasKs}} || HasKs =/= []] ++
[{test,has_map_fields,{f,Fail},Rsrc,{list,[K]}} || K <- HasVarKs] ++
@@ -1015,30 +1495,36 @@ select_extract_map(Src, Vs, Fail, I, Vdb, Bef, St) ->
{Code, Aft, St}.
-select_extract_cons(Src, [{var,Hd}, {var,Tl}], I, Vdb, Bef, St) ->
- {Es,Aft} = case {vdb_find(Hd, Vdb), vdb_find(Tl, Vdb)} of
- {{_,_,Lhd}, {_,_,Ltl}} when Lhd =< I, Ltl =< I ->
- %% Both head and tail are dead. No need to generate
- %% any instruction.
- {[], Bef};
- _ ->
- %% At least one of head and tail will be used,
- %% but we must always fetch both. We will call
- %% clear_dead/2 to allow reuse of the register
- %% in case only of them is used.
-
- Reg0 = put_reg(Tl, put_reg(Hd, Bef#sr.reg)),
- Int0 = Bef#sr{reg=Reg0},
- Rsrc = fetch_var(Src, Int0),
- Rhd = fetch_reg(Hd, Reg0),
- Rtl = fetch_reg(Tl, Reg0),
- Int1 = clear_dead(Int0, I, Vdb),
- {[{get_list,Rsrc,Rhd,Rtl}], Int1}
- end,
- {Es,Aft,St}.
-
+select_extract_cons(Src, [#k_var{name=Hd},#k_var{name=Tl}], I, Vdb, Bef, St) ->
+ Rsrc = fetch_var(Src, Bef),
+ Int = clear_dead(Bef, I, Vdb),
+ {{_,_,Lhd},{_,_,Ltl}} = {vdb_find(Hd, Vdb),vdb_find(Tl, Vdb)},
+ case {Lhd =< I, Ltl =< I} of
+ {true,true} ->
+ %% Both dead.
+ {[],Bef,St};
+ {true,false} ->
+ %% Head dead.
+ Reg0 = put_reg(Tl, Bef#sr.reg),
+ Aft = Int#sr{reg=Reg0},
+ Rtl = fetch_reg(Tl, Reg0),
+ {[{get_tl,Rsrc,Rtl}],Aft,St};
+ {false,true} ->
+ %% Tail dead.
+ Reg0 = put_reg(Hd, Bef#sr.reg),
+ Aft = Int#sr{reg=Reg0},
+ Rhd = fetch_reg(Hd, Reg0),
+ {[{get_hd,Rsrc,Rhd}],Aft,St};
+ {false,false} ->
+ %% Both used.
+ Reg0 = put_reg(Tl, put_reg(Hd, Bef#sr.reg)),
+ Aft = Bef#sr{reg=Reg0},
+ Rhd = fetch_reg(Hd, Reg0),
+ Rtl = fetch_reg(Tl, Reg0),
+ {[{get_hd,Rsrc,Rhd},{get_tl,Rsrc,Rtl}],Aft,St}
+ end.
-guard_clause_cg(#l{ke={guard_clause,G,B},vdb=Vdb}, Fail, Bef, St0) ->
+guard_clause_cg(#k_guard_clause{anno=#l{vdb=Vdb},guard=G,body=B}, Fail, Bef, St0) ->
{Gis,Int,St1} = guard_cg(G, Fail, Vdb, Bef, St0),
{Bis,Aft,St} = match_cg(B, Fail, Int, St1),
{Gis ++ Bis,Aft,St}.
@@ -1051,11 +1537,11 @@ guard_clause_cg(#l{ke={guard_clause,G,B},vdb=Vdb}, Fail, Bef, St0) ->
%% the correct exit point. Primops and tests all go to the next
%% instruction on success or jump to a failure label.
-guard_cg(#l{ke={protected,Ts,Rs},i=I,vdb=Pdb}, Fail, _Vdb, Bef, St) ->
- protected_cg(Ts, Rs, Fail, I, Pdb, Bef, St);
-guard_cg(#l{ke={block,Ts},i=I,vdb=Bdb}, Fail, _Vdb, Bef, St) ->
- guard_cg_list(Ts, Fail, I, Bdb, Bef, St);
-guard_cg(#l{ke={test,Test,As,Inverted},i=I,vdb=_Tdb}, Fail, Vdb, Bef, St0) ->
+guard_cg(#k_protected{arg=Ts,ret=Rs,anno=#l{vdb=Pdb}}, Fail, _Vdb, Bef, St) ->
+ protected_cg(Ts, Rs, Fail, Pdb, Bef, St);
+guard_cg(#k_test{anno=#l{i=I},op=Test0,args=As,inverted=Inverted},
+ Fail, Vdb, Bef, St0) ->
+ #k_remote{mod=#k_atom{val=erlang},name=#k_atom{val=Test}} = Test0,
case Inverted of
false ->
test_cg(Test, As, Fail, I, Vdb, Bef, St0);
@@ -1070,6 +1556,18 @@ guard_cg(G, _Fail, Vdb, Bef, St) ->
%%ok = io:fwrite("cg ~w: ~p~n", [?LINE,{Aft}]),
{Gis,Aft,St1}.
+%% guard_cg_list([Kexpr], Fail, I, Vdb, StackReg, St) ->
+%% {[Ainstr],StackReg,St}.
+
+guard_cg_list(Kes, Fail, Vdb, Bef, St0) ->
+ {Keis,{Aft,St1}} =
+ flatmapfoldl(fun (Ke, {Inta,Sta}) ->
+ {Keis,Intb,Stb} =
+ guard_cg(Ke, Fail, Vdb, Inta, Sta),
+ {Keis,{Intb,Stb}}
+ end, {Bef,St0}, need_heap(Kes)),
+ {Keis,Aft,St1}.
+
%% protected_cg([Kexpr], [Ret], Fail, I, Vdb, Bef, St) -> {[Ainstr],Aft,St}.
%% Do a protected. Protecteds without return values are just done
%% for effect, the return value is not checked, success passes on to
@@ -1077,19 +1575,18 @@ guard_cg(G, _Fail, Vdb, Bef, St) ->
%% return values then these must be set to 'false' on failure,
%% control always passes to the next instruction.
-protected_cg(Ts, [], Fail, I, Vdb, Bef, St0) ->
+protected_cg(Ts, [], Fail, Vdb, Bef, St0) ->
%% Protect these calls, revert when done.
- {Tis,Aft,St1} = guard_cg_list(Ts, Fail, I, Vdb, Bef,
- St0#cg{bfail=Fail}),
+ {Tis,Aft,St1} = guard_cg_list(Ts, Fail, Vdb, Bef, St0#cg{bfail=Fail}),
{Tis,Aft,St1#cg{bfail=St0#cg.bfail}};
-protected_cg(Ts, Rs, _Fail, I, Vdb, Bef, St0) ->
+protected_cg(Ts, Rs, _Fail, Vdb, Bef, St0) ->
{Pfail,St1} = new_label(St0),
{Psucc,St2} = new_label(St1),
- {Tis,Aft,St3} = guard_cg_list(Ts, Pfail, I, Vdb, Bef,
+ {Tis,Aft,St3} = guard_cg_list(Ts, Pfail, Vdb, Bef,
St2#cg{bfail=Pfail}),
%%ok = io:fwrite("cg ~w: ~p~n", [?LINE,{Rs,I,Vdb,Aft}]),
%% Set return values to false.
- Mis = [{move,{atom,false},fetch_var(V,Aft)}||{var,V} <- Rs],
+ Mis = [{move,{atom,false},fetch_var(V,Aft)}||#k_var{name=V} <- Rs],
{Tis ++ [{jump,{f,Psucc}},
{label,Pfail}] ++ Mis ++ [{label,Psucc}],
Aft,St3#cg{bfail=St0#cg.bfail}}.
@@ -1114,7 +1611,7 @@ test_cg(is_map, [A], Fail, I, Vdb, Bef, St) ->
Arg = cg_reg_arg_prefer_y(A, Bef),
Aft = clear_dead(Bef, I, Vdb),
{[{test,is_map,{f,Fail},[Arg]}],Aft,St};
-test_cg(is_boolean, [{atom,Val}], Fail, I, Vdb, Bef, St) ->
+test_cg(is_boolean, [#k_atom{val=Val}], Fail, I, Vdb, Bef, St) ->
Aft = clear_dead(Bef, I, Vdb),
Is = case is_boolean(Val) of
true -> [];
@@ -1126,18 +1623,6 @@ test_cg(Test, As, Fail, I, Vdb, Bef, St) ->
Aft = clear_dead(Bef, I, Vdb),
{[beam_utils:bif_to_test(Test, Args, {f,Fail})],Aft,St}.
-%% guard_cg_list([Kexpr], Fail, I, Vdb, StackReg, St) ->
-%% {[Ainstr],StackReg,St}.
-
-guard_cg_list(Kes, Fail, I, Vdb, Bef, St0) ->
- {Keis,{Aft,St1}} =
- flatmapfoldl(fun (Ke, {Inta,Sta}) ->
- {Keis,Intb,Stb} =
- guard_cg(Ke, Fail, Vdb, Inta, Sta),
- {Keis,{Intb,Stb}}
- end, {Bef,St0}, need_heap(Kes, I)),
- {Keis,Aft,St1}.
-
%% match_fmf(Fun, LastFail, State, [Clause]) -> {Is,Aft,State}.
%% This is a special flatmapfoldl for match code gen where we
%% generate a "failure" label for each clause. The last clause uses
@@ -1160,7 +1645,7 @@ match_fmf(F, LastFail, St0, [H|T]) ->
%% frame size. Finally the actual call is made. Call then needs the
%% return values filled in.
-call_cg({var,_V} = Var, As, Rs, Le, Vdb, Bef, St0) ->
+call_cg(#k_var{}=Var, As, Rs, Le, Vdb, Bef, St0) ->
{Sis,Int} = cg_setup_call(As++[Var], Bef, Le#l.i, Vdb),
%% Put return values in registers.
Reg = load_vars(Rs, clear_regs(Int#sr.reg)),
@@ -1169,9 +1654,8 @@ call_cg({var,_V} = Var, As, Rs, Le, Vdb, Bef, St0) ->
{Frees,Aft} = free_dead(clear_dead(Int#sr{reg=Reg}, Le#l.i, Vdb)),
{Sis ++ Frees ++ [line(Le),{call_fun,Arity}],Aft,
need_stack_frame(St0)};
-call_cg({remote,Mod,Name}, As, Rs, Le, Vdb, Bef, St0)
- when element(1, Mod) =:= var;
- element(1, Name) =:= var ->
+call_cg(#k_remote{mod=Mod,name=Name}, As, Rs, Le, Vdb, Bef, St0)
+ when is_record(Mod, k_var); is_record(Name, k_var) ->
{Sis,Int} = cg_setup_call(As++[Mod,Name], Bef, Le#l.i, Vdb),
%% Put return values in registers.
Reg = load_vars(Rs, clear_regs(Int#sr.reg)),
@@ -1189,8 +1673,9 @@ call_cg(Func, As, Rs, Le, Vdb, Bef, St0) ->
%%
%% move {atom,ok} DestReg
%% jump FailureLabel
- {remote,{atom,erlang},{atom,error}} = Func, %Assertion.
- [{var,DestVar}] = Rs,
+ #k_remote{mod=#k_atom{val=erlang},
+ name=#k_atom{val=error}} = Func, %Assertion.
+ [#k_var{name=DestVar}] = Rs,
Int0 = clear_dead(Bef, Le#l.i, Vdb),
Reg = put_reg(DestVar, Int0#sr.reg),
Int = Int0#sr{reg=Reg},
@@ -1209,11 +1694,11 @@ call_cg(Func, As, Rs, Le, Vdb, Bef, St0) ->
{Sis ++ Frees ++ [line(Le)|Call],Aft,St1}
end.
-build_call({remote,{atom,erlang},{atom,'!'}}, 2, St0) ->
+build_call(#k_remote{mod=#k_atom{val=erlang},name=#k_atom{val='!'}}, 2, St0) ->
{[send],need_stack_frame(St0)};
-build_call({remote,{atom,Mod},{atom,Name}}, Arity, St0) ->
+build_call(#k_remote{mod=#k_atom{val=Mod},name=#k_atom{val=Name}}, Arity, St0) ->
{[{call_ext,Arity,{extfunc,Mod,Name,Arity}}],need_stack_frame(St0)};
-build_call(Name, Arity, St0) when is_atom(Name) ->
+build_call(#k_local{name=Name}, Arity, St0) when is_atom(Name) ->
{Lbl,St1} = local_func_label(Name, Arity, need_stack_frame(St0)),
{[{call,Arity,{f,Lbl}}],St1}.
@@ -1229,16 +1714,15 @@ free_dead([Any|Stk], Y, Instr, StkAcc) ->
free_dead(Stk, Y+1, Instr, [Any|StkAcc]);
free_dead([], _, Instr, StkAcc) -> {Instr,reverse(StkAcc)}.
-enter_cg({var,_V} = Var, As, Le, Vdb, Bef, St0) ->
+enter_cg(#k_var{} = Var, As, Le, Vdb, Bef, St0) ->
{Sis,Int} = cg_setup_call(As++[Var], Bef, Le#l.i, Vdb),
%% Build complete code and final stack/register state.
Arity = length(As),
{Sis ++ [line(Le),{call_fun,Arity},return],
clear_dead(Int#sr{reg=clear_regs(Int#sr.reg)}, Le#l.i, Vdb),
need_stack_frame(St0)};
-enter_cg({remote,Mod,Name}, As, Le, Vdb, Bef, St0)
- when element(1, Mod) =:= var;
- element(1, Name) =:= var ->
+enter_cg(#k_remote{mod=Mod,name=Name}, As, Le, Vdb, Bef, St0)
+ when is_record(Mod, k_var); is_record(Name, k_var) ->
{Sis,Int} = cg_setup_call(As++[Mod,Name], Bef, Le#l.i, Vdb),
%% Build complete code and final stack/register state.
Arity = length(As),
@@ -1256,19 +1740,19 @@ enter_cg(Func, As, Le, Vdb, Bef, St0) ->
clear_dead(Int#sr{reg=clear_regs(Int#sr.reg)}, Le#l.i, Vdb),
St1}.
-build_enter({remote,{atom,erlang},{atom,'!'}}, 2, St0) ->
+build_enter(#k_remote{mod=#k_atom{val=erlang},name=#k_atom{val='!'}}, 2, St0) ->
{[send,return],need_stack_frame(St0)};
-build_enter({remote,{atom,Mod},{atom,Name}}, Arity, St0) ->
+build_enter(#k_remote{mod=#k_atom{val=Mod},name=#k_atom{val=Name}}, Arity, St0) ->
St1 = case trap_bif(Mod, Name, Arity) of
true -> need_stack_frame(St0);
false -> St0
end,
{[{call_ext_only,Arity,{extfunc,Mod,Name,Arity}}],St1};
-build_enter(Name, Arity, St0) when is_atom(Name) ->
+build_enter(#k_local{name=Name}, Arity, St0) when is_atom(Name) ->
{Lbl,St1} = local_func_label(Name, Arity, St0),
{[{call_only,Arity,{f,Lbl}}],St1}.
-enter_line({remote,{atom,Mod},{atom,Name}}, Arity, Le) ->
+enter_line(#k_remote{mod=#k_atom{val=Mod},name=#k_atom{val=Name}}, Arity, Le) ->
case erl_bifs:is_safe(Mod, Name, Arity) of
false ->
%% Tail-recursive call, possibly to a BIF.
@@ -1316,17 +1800,28 @@ trap_bif(erlang, group_leader, 2) -> true;
trap_bif(erlang, exit, 2) -> true;
trap_bif(_, _, _) -> false.
+%% bif_cg(#k_bif{}, Le, Vdb, StackReg, State) ->
+%% {[Ainstr],StackReg,State}.
+%% Generate code a BIF.
+
+bif_cg(#k_bif{op=#k_internal{name=Name},args=As,ret=Rs}, Le, Vdb, Bef, St) ->
+ internal_cg(Name, As, Rs, Le, Vdb, Bef, St);
+bif_cg(#k_bif{op=#k_remote{mod=#k_atom{val=erlang},name=#k_atom{val=Name}},
+ args=As,ret=Rs}, Le, Vdb, Bef, St) ->
+ Ar = length(As),
+ case is_gc_bif(Name, Ar) of
+ false ->
+ bif_cg(Name, As, Rs, Le, Vdb, Bef, St);
+ true ->
+ gc_bif_cg(Name, As, Rs, Le, Vdb, Bef, St)
+ end.
+
%% internal_cg(Bif, [Arg], [Ret], Le, Vdb, StackReg, State) ->
%% {[Ainstr],StackReg,State}.
internal_cg(bs_context_to_binary=Instr, [Src0], [], Le, Vdb, Bef, St0) ->
[Src] = cg_reg_args([Src0], Bef),
- case is_register(Src) of
- false ->
- {[],clear_dead(Bef, Le#l.i, Vdb), St0};
- true ->
- {[{Instr,Src}],clear_dead(Bef, Le#l.i, Vdb), St0}
- end;
+ {[{Instr,Src}],clear_dead(Bef, Le#l.i, Vdb), St0};
internal_cg(dsetelement, [Index0,Tuple0,New0], _Rs, Le, Vdb, Bef, St0) ->
[New,Tuple,{integer,Index1}] = cg_reg_args([New0,Tuple0,Index0], Bef),
Index = Index1-1,
@@ -1334,8 +1829,8 @@ internal_cg(dsetelement, [Index0,Tuple0,New0], _Rs, Le, Vdb, Bef, St0) ->
clear_dead(Bef, Le#l.i, Vdb), St0};
internal_cg(make_fun, [Func0,Arity0|As], Rs, Le, Vdb, Bef, St0) ->
%% This behaves more like a function call.
- {atom,Func} = Func0,
- {integer,Arity} = Arity0,
+ #k_atom{val=Func} = Func0,
+ #k_int{val=Arity} = Arity0,
{Sis,Int} = cg_setup_call(As, Bef, Le#l.i, Vdb),
Reg = load_vars(Rs, clear_regs(Int#sr.reg)),
{FuncLbl,St1} = local_func_label(Func, Arity, St0),
@@ -1348,14 +1843,35 @@ internal_cg(bs_init_writable=I, As, Rs, Le, Vdb, Bef, St) ->
{Sis,Int} = cg_setup_call(As, Bef, Le#l.i, Vdb),
Reg = load_vars(Rs, clear_regs(Int#sr.reg)),
{Sis++[I],clear_dead(Int#sr{reg=Reg}, Le#l.i, Vdb),St};
+internal_cg(build_stacktrace=I, As, Rs, Le, Vdb, Bef, St) ->
+ %% This behaves like a function call.
+ {Sis,Int} = cg_setup_call(As, Bef, Le#l.i, Vdb),
+ Reg = load_vars(Rs, clear_regs(Int#sr.reg)),
+ {Sis++[I],clear_dead(Int#sr{reg=Reg}, Le#l.i, Vdb),St};
internal_cg(raise, As, Rs, Le, Vdb, Bef, St) ->
%% raise can be treated like a guard BIF.
- bif_cg(raise, As, Rs, Le, Vdb, Bef, St).
+ bif_cg(raise, As, Rs, Le, Vdb, Bef, St);
+internal_cg(guard_error, [ExitCall], _Rs, Le, Vdb, Bef, St) ->
+ %% A call an exit BIF from inside a #k_guard_match{}.
+ %% Generate a standard call, but leave the register descriptors
+ %% alone, effectively pretending that there was no call.
+ #k_call{op=#k_remote{mod=#k_atom{val=Mod},name=#k_atom{val=Name}},
+ args=As} = ExitCall,
+ Arity = length(As),
+ {Ms,_} = cg_call_args(As, Bef, Le#l.i, Vdb),
+ Call = {call_ext,Arity,{extfunc,Mod,Name,Arity}},
+ Is = Ms++[line(Le),Call],
+ {Is,Bef,St};
+internal_cg(raw_raise=I, As, Rs, Le, Vdb, Bef, St) ->
+ %% This behaves like a function call.
+ {Sis,Int} = cg_setup_call(As, Bef, Le#l.i, Vdb),
+ Reg = load_vars(Rs, clear_regs(Int#sr.reg)),
+ {Sis++[I],clear_dead(Int#sr{reg=Reg}, Le#l.i, Vdb),St}.
%% bif_cg(Bif, [Arg], [Ret], Le, Vdb, StackReg, State) ->
%% {[Ainstr],StackReg,State}.
-bif_cg(Bif, As, [{var,V}], Le, Vdb, Bef, St0) ->
+bif_cg(Bif, As, [#k_var{name=V}], Le, Vdb, Bef, St0) ->
Ars = cg_reg_args(As, Bef),
%% If we are inside a catch and in a body (not in guard) and the
@@ -1393,7 +1909,7 @@ bif_cg(Bif, As, [{var,V}], Le, Vdb, Bef, St0) ->
%% gc_bif_cg(Bif, [Arg], [Ret], Le, Vdb, StackReg, State) ->
%% {[Ainstr],StackReg,State}.
-gc_bif_cg(Bif, As, [{var,V}], Le, Vdb, Bef, St0) ->
+gc_bif_cg(Bif, As, [#k_var{name=V}], Le, Vdb, Bef, St0) ->
Ars = cg_reg_args(As, Bef),
%% If we are inside a catch and in a body (not in guard) and the
@@ -1439,7 +1955,7 @@ recv_loop_cg(Te, Rvar, Rm, Tes, Rs, Le, Vdb, Bef, St0) ->
%% cg_recv_mesg( ) -> {[Ainstr],Aft,St}.
-cg_recv_mesg({var,R}, Rm, Tl, Bef, St0) ->
+cg_recv_mesg(#k_var{name=R}, Rm, Tl, Bef, St0) ->
Int0 = Bef#sr{reg=put_reg(R, Bef#sr.reg)},
Ret = fetch_reg(R, Int0#sr.reg),
%% Int1 = clear_dead(Int0, I, Rm#l.vdb),
@@ -1449,22 +1965,22 @@ cg_recv_mesg({var,R}, Rm, Tl, Bef, St0) ->
%% cg_recv_wait(Te, Tes, I, Vdb, Int2, St3) -> {[Ainstr],Aft,St}.
-cg_recv_wait({atom,infinity}, Tes, I, Bef, St0) ->
+cg_recv_wait(#k_atom{val=infinity}, #cg_block{anno=Le,es=Tes}, I, Bef, St0) ->
%% We know that the 'after' body will never be executed.
%% But to keep the stack and register information up to date,
%% we will generate the code for the 'after' body, and then discard it.
- Int1 = clear_dead(Bef, I, Tes#l.vdb),
- {_,Int2,St1} = cg_block(Tes#l.ke, Tes#l.i, Tes#l.vdb,
- Int1#sr{reg=clear_regs(Int1#sr.reg)}, St0),
+ Int1 = clear_dead(Bef, I, Le#l.vdb),
+ {_,Int2,St1} = cg_block(Tes, Le#l.vdb,
+ Int1#sr{reg=clear_regs(Int1#sr.reg)}, St0),
{[{wait,{f,St1#cg.recv}}],Int2,St1};
-cg_recv_wait({integer,0}, Tes, _I, Bef, St0) ->
- {Tis,Int,St1} = cg_block(Tes#l.ke, Tes#l.i, Tes#l.vdb, Bef, St0),
+cg_recv_wait(#k_int{val=0}, #cg_block{anno=Le,es=Tes}, _I, Bef, St0) ->
+ {Tis,Int,St1} = cg_block(Tes, Le#l.vdb, Bef, St0),
{[timeout|Tis],Int,St1};
-cg_recv_wait(Te, Tes, I, Bef, St0) ->
+cg_recv_wait(Te, #cg_block{anno=Le,es=Tes}, I, Bef, St0) ->
Reg = cg_reg_arg(Te, Bef),
%% Must have empty registers here! Bug if anything in registers.
- Int0 = clear_dead(Bef, I, Tes#l.vdb),
- {Tis,Int,St1} = cg_block(Tes#l.ke, Tes#l.i, Tes#l.vdb,
+ Int0 = clear_dead(Bef, I, Le#l.vdb),
+ {Tis,Int,St1} = cg_block(Tes, Le#l.vdb,
Int0#sr{reg=clear_regs(Int0#sr.reg)}, St0),
{[{wait_timeout,{f,St1#cg.recv},Reg},timeout] ++ Tis,Int,St1}.
@@ -1482,7 +1998,7 @@ try_cg(Ta, Vs, Tb, Evs, Th, Rs, Le, Vdb, Bef, St0) ->
{B,St1} = new_label(St0), %Body label
{H,St2} = new_label(St1), %Handler label
{E,St3} = new_label(St2), %End label
- TryTag = Ta#l.i,
+ #l{i=TryTag} = get_kanno(Ta),
Int1 = Bef#sr{stk=put_catch(TryTag, Bef#sr.stk)},
TryReg = fetch_stack({catch_tag,TryTag}, Int1#sr.stk),
{Ais,Int2,St4} = cg(Ta, Vdb, Int1, St3#cg{break=B,in_catch=true}),
@@ -1502,7 +2018,7 @@ try_cg(Ta, Vs, Tb, Evs, Th, Rs, Le, Vdb, Bef, St0) ->
try_enter_cg(Ta, Vs, Tb, Evs, Th, Le, Vdb, Bef, St0) ->
{B,St1} = new_label(St0), %Body label
{H,St2} = new_label(St1), %Handler label
- TryTag = Ta#l.i,
+ #l{i=TryTag} = get_kanno(Ta),
Int1 = Bef#sr{stk=put_catch(TryTag, Bef#sr.stk)},
TryReg = fetch_stack({catch_tag,TryTag}, Int1#sr.stk),
{Ais,Int2,St3} = cg(Ta, Vdb, Int1, St2#cg{break=B,in_catch=true}),
@@ -1520,12 +2036,12 @@ try_enter_cg(Ta, Vs, Tb, Evs, Th, Le, Vdb, Bef, St0) ->
%% catch_cg(CatchBlock, Ret, Le, Vdb, Bef, St) -> {[Ainstr],Aft,St}.
-catch_cg(C, {var,R}, Le, Vdb, Bef, St0) ->
+catch_cg(#cg_block{es=C}, #k_var{name=R}, Le, Vdb, Bef, St0) ->
{B,St1} = new_label(St0),
CatchTag = Le#l.i,
Int1 = Bef#sr{stk=put_catch(CatchTag, Bef#sr.stk)},
CatchReg = fetch_stack({catch_tag,CatchTag}, Int1#sr.stk),
- {Cis,Int2,St2} = cg_block(C, Le#l.i, Le#l.vdb, Int1,
+ {Cis,Int2,St2} = cg_block(C, Le#l.vdb, Int1,
St1#cg{break=B,in_catch=true}),
[] = Int2#sr.reg, %Assertion.
Aft = Int2#sr{reg=[{0,R}],stk=drop_catch(CatchTag, Int2#sr.stk)},
@@ -1534,8 +2050,8 @@ catch_cg(C, {var,R}, Le, Vdb, Bef, St0) ->
clear_dead(Aft, Le#l.i, Vdb),
St2#cg{break=St1#cg.break,in_catch=St1#cg.in_catch}}.
-%% set_cg([Var], Constr, Le, Vdb, Bef, St) -> {[Ainstr],Aft,St}.
-%% We have to be careful how a 'set' works. First the structure is
+%% put_cg([Var], Constr, Le, Vdb, Bef, St) -> {[Ainstr],Aft,St}.
+%% We have to be careful how a 'put' works. First the structure is
%% built, then it is filled and finally things can be cleared. The
%% annotation must reflect this and make sure that the return
%% variable is allocated first.
@@ -1543,13 +2059,14 @@ catch_cg(C, {var,R}, Le, Vdb, Bef, St0) ->
%% put_list and put_map are atomic instructions, both of
%% which can safely resuse one of the source registers as target.
-set_cg([{var,R}], {cons,Es}, Le, Vdb, Bef, St) ->
- [S1,S2] = cg_reg_args(Es, Bef),
+put_cg([#k_var{name=R}], #k_cons{hd=Hd,tl=Tl}, Le, Vdb, Bef, St) ->
+ [S1,S2] = cg_reg_args([Hd,Tl], Bef),
Int0 = clear_dead(Bef, Le#l.i, Vdb),
Int1 = Int0#sr{reg=put_reg(R, Int0#sr.reg)},
Ret = fetch_reg(R, Int1#sr.reg),
{[{put_list,S1,S2,Ret}], Int1, St};
-set_cg([{var,R}], {binary,Segs}, Le, Vdb, Bef, #cg{bfail=Bfail}=St) ->
+put_cg([#k_var{name=R}], #k_binary{segs=Segs}, Le, Vdb, Bef,
+ #cg{bfail=Bfail}=St) ->
%% At run-time, binaries are constructed in three stages:
%% 1) First the size of the binary is calculated.
%% 2) Then the binary is allocated.
@@ -1577,7 +2094,9 @@ set_cg([{var,R}], {binary,Segs}, Le, Vdb, Bef, #cg{bfail=Bfail}=St) ->
{Sis++Code,Aft,St};
%% Map: single variable key.
-set_cg([{var,R}], {map,Op,Map,[{map_pair,{var,_}=K,V}]}, Le, Vdb, Bef, St0) ->
+put_cg([#k_var{name=R}], #k_map{op=Op,var=Map,
+ es=[#k_map_pair{key=#k_var{}=K,val=V}]},
+ Le, Vdb, Bef, St0) ->
{Sis,Int0} = maybe_adjust_stack(Bef, Le#l.i, Le#l.i+1, Vdb, St0),
SrcReg = cg_reg_arg_prefer_y(Map, Int0),
@@ -1592,22 +2111,23 @@ set_cg([{var,R}], {map,Op,Map,[{map_pair,{var,_}=K,V}]}, Le, Vdb, Bef, St0) ->
Aft = Aft0#sr{reg=put_reg(R, Aft0#sr.reg)},
Target = fetch_reg(R, Aft#sr.reg),
- {Is,St1} = set_cg_map(Line, Op, SrcReg, Target, Live, List, St0),
+ {Is,St1} = put_cg_map(Line, Op, SrcReg, Target, Live, List, St0),
{Sis++Is,Aft,St1};
%% Map: (possibly) multiple literal keys.
-set_cg([{var,R}], {map,Op,Map,Es}, Le, Vdb, Bef, St0) ->
+put_cg([#k_var{name=R}], #k_map{op=Op,var=Map,es=Es}, Le, Vdb, Bef, St0) ->
%% assert key literals
- [] = [Var||{map_pair,{var,_}=Var,_} <- Es],
+ [] = [Var || #k_map_pair{key=#k_var{}=Var} <- Es],
{Sis,Int0} = maybe_adjust_stack(Bef, Le#l.i, Le#l.i+1, Vdb, St0),
SrcReg = cg_reg_arg_prefer_y(Map, Int0),
Line = line(Le#l.a),
%% fetch registers for values to be put into the map
- Pairs = [{K,V} || {_,K,V} <- Es],
- List = flatmap(fun({K,V}) -> [K,cg_reg_arg(V,Int0)] end, Pairs),
+ List = flatmap(fun(#k_map_pair{key=K,val=V}) ->
+ [atomic(K),cg_reg_arg(V, Int0)]
+ end, Es),
Live = max_reg(Bef#sr.reg),
@@ -1616,16 +2136,16 @@ set_cg([{var,R}], {map,Op,Map,Es}, Le, Vdb, Bef, St0) ->
Aft = Aft0#sr{reg=put_reg(R, Aft0#sr.reg)},
Target = fetch_reg(R, Aft#sr.reg),
- {Is,St1} = set_cg_map(Line, Op, SrcReg, Target, Live, List, St0),
+ {Is,St1} = put_cg_map(Line, Op, SrcReg, Target, Live, List, St0),
{Sis++Is,Aft,St1};
%% Everything else.
-set_cg([{var,R}], Con, Le, Vdb, Bef, St) ->
+put_cg([#k_var{name=R}], Con, Le, Vdb, Bef, St) ->
%% Find a place for the return register first.
Int = Bef#sr{reg=put_reg(R, Bef#sr.reg)},
Ret = fetch_reg(R, Int#sr.reg),
Ais = case Con of
- {tuple,Es} ->
+ #k_tuple{es=Es} ->
[{put_tuple,length(Es),Ret}] ++ cg_build_args(Es, Bef);
Other ->
[{move,cg_reg_arg(Other, Int),Ret}]
@@ -1633,7 +2153,7 @@ set_cg([{var,R}], Con, Le, Vdb, Bef, St) ->
{Ais,clear_dead(Int, Le#l.i, Vdb),St}.
-set_cg_map(Line, Op0, SrcReg, Target, Live, List, St0) ->
+put_cg_map(Line, Op0, SrcReg, Target, Live, List, St0) ->
Bfail = St0#cg.bfail,
Fail = {f,St0#cg.bfail},
Op = case Op0 of
@@ -1811,24 +2331,44 @@ cg_gen_binsize([], _, _, _, _, Acc) -> Acc.
%% cg_bin_opt(Code0) -> Code
%% Optimize the size calculations for binary construction.
-cg_bin_opt([{move,Size,D},{bs_append,Fail,D,Extra,Regs,U,Bin,Flags,D}|Is]) ->
- cg_bin_opt([{bs_append,Fail,Size,Extra,Regs,U,Bin,Flags,D}|Is]);
-cg_bin_opt([{move,Size,D},{bs_private_append,Fail,D,U,Bin,Flags,D}|Is]) ->
- cg_bin_opt([{bs_private_append,Fail,Size,U,Bin,Flags,D}|Is]);
-cg_bin_opt([{move,{integer,0},D},{bs_add,_,[D,{integer,_}=S,1],Dst}|Is]) ->
- cg_bin_opt([{move,S,Dst}|Is]);
-cg_bin_opt([{move,{integer,0},D},{bs_add,Fail,[D,S,U],Dst}|Is]) ->
- cg_bin_opt([{bs_add,Fail,[{integer,0},S,U],Dst}|Is]);
-cg_bin_opt([{move,{integer,Bytes},D},{Op,Fail,D,Extra,Regs,Flags,D}|Is])
+cg_bin_opt([{move,S1,{x,X}=D},{gc_bif,Op,Fail,Live0,As,Dst}|Is]) ->
+ Live = if
+ X + 1 =:= Live0 -> X;
+ true -> Live0
+ end,
+ [{gc_bif,Op,Fail,Live,As,D}|cg_bin_opt([{move,S1,Dst}|Is])];
+cg_bin_opt([{move,_,_}=I1,{Op,_,_,_}=I2|Is])
+ when Op =:= bs_utf8_size orelse Op =:= bs_utf16_size ->
+ [I2|cg_bin_opt([I1|Is])];
+cg_bin_opt([{bs_add,_,[{integer,0},Src,1],Dst}|Is]) ->
+ cg_bin_opt_1([{move,Src,Dst}|Is]);
+cg_bin_opt([{bs_add,_,[Src,{integer,0},_],Dst}|Is]) ->
+ cg_bin_opt_1([{move,Src,Dst}|Is]);
+cg_bin_opt(Is) ->
+ cg_bin_opt_1(Is).
+
+cg_bin_opt_1([{move,Size,D},{bs_append,Fail,D,Extra,Regs,U,Bin,Flags,D}|Is]) ->
+ [{bs_append,Fail,Size,Extra,Regs,U,Bin,Flags,D}|cg_bin_opt(Is)];
+cg_bin_opt_1([{move,Size,D},{bs_private_append,Fail,D,U,Bin,Flags,D}|Is]) ->
+ [{bs_private_append,Fail,Size,U,Bin,Flags,D}|cg_bin_opt(Is)];
+cg_bin_opt_1([{move,Size,D},{Op,Fail,D,Extra,Regs,Flags,D}|Is])
when Op =:= bs_init2; Op =:= bs_init_bits ->
- cg_bin_opt([{Op,Fail,Bytes,Extra,Regs,Flags,D}|Is]);
-cg_bin_opt([{move,Src1,Dst},{bs_add,Fail,[Dst,Src2,U],Dst}|Is]) ->
- cg_bin_opt([{bs_add,Fail,[Src1,Src2,U],Dst}|Is]);
-cg_bin_opt([I|Is]) ->
+ Bytes = case Size of
+ {integer,Int} -> Int;
+ _ -> Size
+ end,
+ [{Op,Fail,Bytes,Extra,Regs,Flags,D}|cg_bin_opt(Is)];
+cg_bin_opt_1([{move,S1,D},{bs_add,Fail,[D,S2,U],Dst}|Is]) ->
+ cg_bin_opt([{bs_add,Fail,[S1,S2,U],Dst}|Is]);
+cg_bin_opt_1([{move,S1,D},{bs_add,Fail,[S2,D,U],Dst}|Is]) ->
+ cg_bin_opt([{bs_add,Fail,[S2,S1,U],Dst}|Is]);
+cg_bin_opt_1([I|Is]) ->
[I|cg_bin_opt(Is)];
-cg_bin_opt([]) -> [].
+cg_bin_opt_1([]) ->
+ [].
-cg_bin_put({bin_seg,[],S0,U,T,Fs,[E0,Next]}, Fail, Bef) ->
+cg_bin_put(#k_bin_seg{size=S0,unit=U,type=T,flags=Fs,seg=E0,next=Next},
+ Fail, Bef) ->
S1 = cg_reg_arg(S0, Bef),
E1 = cg_reg_arg(E0, Bef),
{Format,Op} = case T of
@@ -1845,7 +2385,7 @@ cg_bin_put({bin_seg,[],S0,U,T,Fs,[E0,Next]}, Fail, Bef) ->
utf ->
[{Op,Fail,{field_flags,Fs},E1}|cg_bin_put(Next, Fail, Bef)]
end;
-cg_bin_put({bin_end,[]}, _, _) -> [].
+cg_bin_put(#k_bin_end{}, _, _) -> [].
cg_build_args(As, Bef) ->
[{put,cg_reg_arg(A, Bef)} || A <- As].
@@ -1865,13 +2405,12 @@ break_cg(Bs, Le, Vdb, Bef, St) ->
{Ms ++ [{jump,{f,St#cg.break}}],
Int#sr{reg=clear_regs(Int#sr.reg)},St}.
-guard_break_cg(Bs, Locked, #l{i=I}, Vdb, #sr{reg=Reg0}=Bef, St) ->
- RegLocked = get_locked_regs(Reg0, Locked),
- #sr{reg=Reg1} = Int = clear_dead(Bef#sr{reg=RegLocked}, I, Vdb),
+guard_break_cg(Bs, #l{i=I}, Vdb, #sr{reg=Reg0}=Bef, St) ->
+ #sr{reg=Reg1} = Int = clear_dead(Bef, I, Vdb),
Reg2 = trim_free(Reg1),
NumLocked = length(Reg2),
Moves0 = gen_moves(Bs, Bef, NumLocked, []),
- Moves = order_moves(Moves0, find_scratch_reg(RegLocked)),
+ Moves = order_moves(Moves0, find_scratch_reg(Reg0)),
{BreakVars,_} = mapfoldl(fun(_, RegNum) ->
{{RegNum,gbreakvar},RegNum+1}
end, length(Reg2), Bs),
@@ -1879,31 +2418,17 @@ guard_break_cg(Bs, Locked, #l{i=I}, Vdb, #sr{reg=Reg0}=Bef, St) ->
Aft = Int#sr{reg=Reg},
{Moves ++ [{jump,{f,St#cg.break}}],Aft,St}.
-get_locked_regs([R|Rs0], Preserve) ->
- case {get_locked_regs(Rs0, Preserve),R} of
- {[],{_,V}} ->
- case lists:member(V, Preserve) of
- true -> [R];
- false -> []
- end;
- {[],_} ->
- [];
- {Rs,_} ->
- [R|Rs]
- end;
-get_locked_regs([], _) -> [].
-
%% cg_reg_arg(Arg0, Info) -> Arg
%% cg_reg_args([Arg0], Info) -> [Arg]
%% Convert argument[s] into registers. Literal values are returned unchanged.
cg_reg_args(As, Bef) -> [cg_reg_arg(A, Bef) || A <- As].
-cg_reg_arg({var,V}, Bef) -> fetch_var(V, Bef);
-cg_reg_arg(Literal, _) -> Literal.
+cg_reg_arg(#k_var{name=V}, Bef) -> fetch_var(V, Bef);
+cg_reg_arg(Literal, _) -> atomic(Literal).
-cg_reg_arg_prefer_y({var,V}, Bef) -> fetch_var_prefer_y(V, Bef);
-cg_reg_arg_prefer_y(Literal, _) -> Literal.
+cg_reg_arg_prefer_y(#k_var{name=V}, Bef) -> fetch_var_prefer_y(V, Bef);
+cg_reg_arg_prefer_y(Literal, _) -> atomic(Literal).
%% cg_setup_call([Arg], Bef, Cur, Vdb) -> {[Instr],Aft}.
%% Do the complete setup for a call/enter.
@@ -1941,9 +2466,9 @@ cg_call_args(As, Bef, I, Vdb) ->
load_arg_regs(Regs, As) -> load_arg_regs(Regs, As, 0).
-load_arg_regs([_|Rs], [{var,V}|As], I) -> [{I,V}|load_arg_regs(Rs, As, I+1)];
+load_arg_regs([_|Rs], [#k_var{name=V}|As], I) -> [{I,V}|load_arg_regs(Rs, As, I+1)];
load_arg_regs([_|Rs], [A|As], I) -> [{I,A}|load_arg_regs(Rs, As, I+1)];
-load_arg_regs([], [{var,V}|As], I) -> [{I,V}|load_arg_regs([], As, I+1)];
+load_arg_regs([], [#k_var{name=V}|As], I) -> [{I,V}|load_arg_regs([], As, I+1)];
load_arg_regs([], [A|As], I) -> [{I,A}|load_arg_regs([], As, I+1)];
load_arg_regs(Rs, [], _) -> Rs.
@@ -1979,12 +2504,13 @@ move_unsaved([], _, Regs, Acc) -> {Acc,Regs}.
gen_moves(As, Sr) -> gen_moves(As, Sr, 0, []).
-gen_moves([{var,V}|As], Sr, I, Acc) ->
+gen_moves([#k_var{name=V}|As], Sr, I, Acc) ->
case fetch_var(V, Sr) of
{x,I} -> gen_moves(As, Sr, I+1, Acc);
Reg -> gen_moves(As, Sr, I+1, [{move,Reg,{x,I}}|Acc])
end;
-gen_moves([A|As], Sr, I, Acc) ->
+gen_moves([A0|As], Sr, I, Acc) ->
+ A = atomic(A0),
gen_moves(As, Sr, I+1, [{move,A,{x,I}}|Acc]);
gen_moves([], _, _, Acc) -> lists:keysort(3, Acc).
@@ -2032,21 +2558,21 @@ break_up_cycle1(Dst, [M|Path], LastMove) ->
%% clear_dead(Sr, Until, Vdb) -> Aft.
%% Remove all variables in Sr which have died AT ALL so far.
-clear_dead(Sr, Until, Vdb) ->
- Sr#sr{reg=clear_dead_reg(Sr, Until, Vdb),
- stk=clear_dead_stk(Sr#sr.stk, Until, Vdb)}.
+clear_dead(#sr{stk=Stk}=Sr0, Until, Vdb) ->
+ Sr = Sr0#sr{reg=clear_dead_reg(Sr0, Until, Vdb),
+ stk=clear_dead_stk(Stk, Until, Vdb)},
+ reserve(Sr).
clear_dead_reg(Sr, Until, Vdb) ->
- Reg = [case R of
- {_I,V} = IV ->
- case vdb_find(V, Vdb) of
- {V,_,L} when L > Until -> IV;
- _ -> free %Remove anything else
- end;
- {reserved,_I,_V} = Reserved -> Reserved;
- free -> free
- end || R <- Sr#sr.reg],
- reserve(Sr#sr.res, Reg, Sr#sr.stk).
+ [case R of
+ {_I,V} = IV ->
+ case vdb_find(V, Vdb) of
+ {V,_,L} when L > Until -> IV;
+ _ -> free %Remove anything else
+ end;
+ {reserved,_I,_V}=Reserved -> Reserved;
+ free -> free
+ end || R <- Sr#sr.reg].
clear_dead_stk(Stk, Until, Vdb) ->
[case S of
@@ -2118,16 +2644,25 @@ adjust_stack(Bef, Fb, Lf, Vdb) ->
save_stack(Stk0, Fb, Lf, Vdb) ->
%% New variables that are in use but not on stack.
- New = [VFL || {V,F,L} = VFL <- Vdb,
- F < Fb,
- L >= Lf,
- not on_stack(V, Stk0)],
+ New = new_not_on_stack(Stk0, Fb, Lf, Vdb),
+
%% Add new variables that are not just dropped immediately.
%% N.B. foldr works backwards from the end!!
Saves = [V || {V,_,_} <- keysort(3, New)],
Stk1 = foldr(fun (V, Stk) -> put_stack(V, Stk) end, Stk0, Saves),
{Stk1,Saves}.
+%% new_not_on_stack(Stack, FirstBefore, LastFrom, Vdb) ->
+%% [{Variable,First,Last}]
+%% Return information about all variables that are used past current
+%% point and that are not already on the stack.
+
+new_not_on_stack(Stk, Fb, Lf, Vdb) ->
+ [VFL || {V,F,L} = VFL <- Vdb,
+ F < Fb,
+ L >= Lf,
+ not on_stack(V, Stk)].
+
%% saves([SaveVar], Reg, Stk) -> [{move,Reg,Stk}].
%% Generate move instructions to save variables onto stack. The
%% stack/reg info used is that after the new stack has been made.
@@ -2153,7 +2688,7 @@ fetch_var_prefer_y(V, #sr{reg=Reg,stk=Stk}) ->
end.
load_vars(Vs, Regs) ->
- foldl(fun ({var,V}, Rs) -> put_reg(V, Rs) end, Regs, Vs).
+ foldl(fun (#k_var{name=V}, Rs) -> put_reg(V, Rs) end, Regs, Vs).
%% put_reg(Val, Regs) -> Regs.
%% find_reg(Val, Regs) -> {ok,r{R}} | error.
@@ -2233,10 +2768,6 @@ find_stack(_, [], _) -> error.
on_stack(V, Stk) -> keymember(V, 1, Stk).
-is_register({x,_}) -> true;
-is_register({yy,_}) -> true;
-is_register(_) -> false.
-
%% put_catch(CatchTag, Stack) -> Stack'
%% drop_catch(CatchTag, Stack) -> Stack'
%% Special interface for putting and removing catch tags, to ensure that
@@ -2254,6 +2785,16 @@ put_catch(Tag, [Other|Stk], Acc) ->
drop_catch(Tag, [{{catch_tag,Tag}}|Stk]) -> [free|Stk];
drop_catch(Tag, [Other|Stk]) -> [Other|drop_catch(Tag, Stk)].
+%% atomic(Klit) -> Lit.
+%% atomic_list([Klit]) -> [Lit].
+
+atomic(#k_literal{val=V}) -> {literal,V};
+atomic(#k_int{val=I}) -> {integer,I};
+atomic(#k_float{val=F}) -> {float,F};
+atomic(#k_atom{val=A}) -> {atom,A};
+%%atomic(#k_char{val=C}) -> {char,C};
+atomic(#k_nil{}) -> nil.
+
%% new_label(St) -> {L,St}.
new_label(#cg{lcount=Next}=St) ->
@@ -2296,3 +2837,86 @@ flatmapfoldl(F, Accu0, [Hd|Tail]) ->
{Rs,Accu2} = flatmapfoldl(F, Accu1, Tail),
{R++Rs,Accu2};
flatmapfoldl(_, Accu, []) -> {[],Accu}.
+
+%% Keep track of life time for variables.
+%%
+%% init_vars([{var,VarName}]) -> Vdb.
+%% new_vars([VarName], I, Vdb) -> Vdb.
+%% use_vars([VarName], I, Vdb) -> Vdb.
+%% add_var(VarName, F, L, Vdb) -> Vdb.
+%%
+%% The list of variable names for new_vars/3 and use_vars/3
+%% must be sorted.
+
+init_vars(Vs) ->
+ vdb_new(Vs).
+
+new_vars([], _, Vdb) -> Vdb;
+new_vars([V], I, Vdb) -> vdb_store_new(V, {V,I,I}, Vdb);
+new_vars(Vs, I, Vdb) -> vdb_update_vars(Vs, Vdb, I).
+
+use_vars([], _, Vdb) ->
+ Vdb;
+use_vars([V], I, Vdb) ->
+ case vdb_find(V, Vdb) of
+ {V,F,L} when I > L -> vdb_update(V, {V,F,I}, Vdb);
+ {V,_,_} -> Vdb;
+ error -> vdb_store_new(V, {V,I,I}, Vdb)
+ end;
+use_vars(Vs, I, Vdb) -> vdb_update_vars(Vs, Vdb, I).
+
+add_var(V, F, L, Vdb) ->
+ vdb_store_new(V, {V,F,L}, Vdb).
+
+%% vdb
+
+vdb_new(Vs) ->
+ ordsets:from_list([{V,0,0} || #k_var{name=V} <- Vs]).
+
+-type var() :: atom().
+
+-spec vdb_find(var(), [vdb_entry()]) -> 'error' | vdb_entry().
+
+vdb_find(V, Vdb) ->
+ case lists:keyfind(V, 1, Vdb) of
+ false -> error;
+ Vd -> Vd
+ end.
+
+vdb_update(V, Update, [{V,_,_}|Vdb]) ->
+ [Update|Vdb];
+vdb_update(V, Update, [Vd|Vdb]) ->
+ [Vd|vdb_update(V, Update, Vdb)].
+
+vdb_store_new(V, New, [{V1,_,_}=Vd|Vdb]) when V > V1 ->
+ [Vd|vdb_store_new(V, New, Vdb)];
+vdb_store_new(V, New, [{V1,_,_}|_]=Vdb) when V < V1 ->
+ [New|Vdb];
+vdb_store_new(_, New, []) -> [New].
+
+vdb_update_vars([V|_]=Vs, [{V1,_,_}=Vd|Vdb], I) when V > V1 ->
+ [Vd|vdb_update_vars(Vs, Vdb, I)];
+vdb_update_vars([V|Vs], [{V1,_,_}|_]=Vdb, I) when V < V1 ->
+ %% New variable.
+ [{V,I,I}|vdb_update_vars(Vs, Vdb, I)];
+vdb_update_vars([V|Vs], [{_,F,L}=Vd|Vdb], I) ->
+ %% Existing variable.
+ if
+ I > L -> [{V,F,I}|vdb_update_vars(Vs, Vdb, I)];
+ true -> [Vd|vdb_update_vars(Vs, Vdb, I)]
+ end;
+vdb_update_vars([V|Vs], [], I) ->
+ %% New variable.
+ [{V,I,I}|vdb_update_vars(Vs, [], I)];
+vdb_update_vars([], Vdb, _) -> Vdb.
+
+%% vdb_sub(Min, Max, Vdb) -> Vdb.
+%% Extract variables which are used before and after Min. Lock
+%% variables alive after Max.
+
+vdb_sub(Min, Max, Vdb) ->
+ [ if L >= Max -> {V,F,locked};
+ true -> Vd
+ end || {V,F,L}=Vd <- Vdb,
+ F < Min,
+ L >= Min ].
diff --git a/lib/compiler/src/v3_core.erl b/lib/compiler/src/v3_core.erl
index ae650546e5..6029b91cdc 100644
--- a/lib/compiler/src/v3_core.erl
+++ b/lib/compiler/src/v3_core.erl
@@ -920,8 +920,9 @@ try_exception(Ecs0, St0) ->
%% Note that Tag is not needed for rethrow - it is already in Info.
{Evs,St1} = new_vars(3, St0), % Tag, Value, Info
{Ecs1,Ceps,St2} = clauses(Ecs0, St1),
+ Ecs2 = try_build_stacktrace(Ecs1, hd(Evs)),
[_,Value,Info] = Evs,
- LA = case Ecs1 of
+ LA = case Ecs2 of
[] -> [];
[C|_] -> get_lineno_anno(C)
end,
@@ -930,7 +931,7 @@ try_exception(Ecs0, St0) ->
body=[#iprimop{anno=#a{}, %Must have an #a{}
name=#c_literal{val=raise},
args=[Info,Value]}]},
- Hs = [#icase{anno=#a{anno=LA},args=[c_tuple(Evs)],clauses=Ecs1,fc=Ec}],
+ Hs = [#icase{anno=#a{anno=LA},args=[c_tuple(Evs)],clauses=Ecs2,fc=Ec}],
{Evs,Ceps++Hs,St2}.
try_after(As, St0) ->
@@ -946,6 +947,25 @@ try_after(As, St0) ->
Hs = [#icase{anno=#a{},args=[c_tuple(Evs)],clauses=[],fc=Ec}],
{Evs,Hs,St1}.
+try_build_stacktrace([#iclause{pats=Ps0,body=B0}=C0|Cs], RawStk) ->
+ [#c_tuple{es=[Class,Exc,Stk]}=Tup] = Ps0,
+ case Stk of
+ #c_var{name='_'} ->
+ %% Stacktrace variable is not used. Nothing to do.
+ [C0|try_build_stacktrace(Cs, RawStk)];
+ _ ->
+ %% Add code to build the stacktrace.
+ Ps = [Tup#c_tuple{es=[Class,Exc,RawStk]}],
+ Call = #iprimop{anno=#a{},
+ name=#c_literal{val=build_stacktrace},
+ args=[RawStk]},
+ Iset = #iset{var=Stk,arg=Call},
+ B = [Iset|B0],
+ C = C0#iclause{pats=Ps,body=B},
+ [C|try_build_stacktrace(Cs, RawStk)]
+ end;
+try_build_stacktrace([], _) -> [].
+
%% expr_bin([ArgExpr], St) -> {[Arg],[PreExpr],St}.
%% Flatten the arguments of a bin. Do this straight left to right!
%% Note that ibinary needs to have its annotation wrapped in a #a{}
@@ -2462,9 +2482,11 @@ cexpr(#icase{anno=A,args=Largs,clauses=Lcs,fc=Lfc}, As, St0) ->
cexpr(#ireceive1{anno=A,clauses=Lcs}, As, St0) ->
Exp = intersection(A#a.ns, As), %Exports
{Ccs,St1} = cclauses(Lcs, Exp, St0),
+ True = #c_literal{val=true},
+ Action = core_lib:make_values(lists:duplicate(1+length(Exp), True)),
{#c_receive{anno=A#a.anno,
clauses=Ccs,
- timeout=#c_literal{val=infinity},action=#c_literal{val=true}},
+ timeout=#c_literal{val=infinity},action=Action},
Exp,A#a.us,St1};
cexpr(#ireceive2{anno=A,clauses=Lcs,timeout=Lto,action=Les}, As, St0) ->
Exp = intersection(A#a.ns, As), %Exports
@@ -2505,8 +2527,46 @@ cexpr(#ifun{anno=#a{us=Us0}=A0,name={named,Name},fc=#iclause{pats=Ps}}=Fun0,
end;
cexpr(#iapply{anno=A,op=Op,args=Args}, _As, St) ->
{#c_apply{anno=A#a.anno,op=Op,args=Args},[],A#a.us,St};
-cexpr(#icall{anno=A,module=Mod,name=Name,args=Args}, _As, St) ->
- {#c_call{anno=A#a.anno,module=Mod,name=Name,args=Args},[],A#a.us,St};
+cexpr(#icall{anno=A,module=Mod,name=Name,args=Args}, _As, St0) ->
+ Anno = A#a.anno,
+ case (not cerl:is_c_atom(Mod)) andalso member(tuple_calls, St0#core.opts) of
+ true ->
+ GenAnno = [compiler_generated|Anno],
+
+ %% Generate the clause that matches on the tuple
+ {TupleVar,St1} = new_var(GenAnno, St0),
+ {TupleSizeVar, St2} = new_var(GenAnno, St1),
+ {TupleModVar, St3} = new_var(GenAnno, St2),
+ {TupleArgsVar, St4} = new_var(GenAnno, St3),
+ TryVar = cerl:c_var('Try'),
+
+ TupleGuardExpr =
+ cerl:c_let([TupleSizeVar],
+ c_call_erl(tuple_size, [TupleVar]),
+ c_call_erl('>', [TupleSizeVar, cerl:c_int(0)])),
+
+ TupleGuard =
+ cerl:c_try(TupleGuardExpr, [TryVar], TryVar,
+ [cerl:c_var('T'),cerl:c_var('R')], cerl:c_atom(false)),
+
+ TupleApply =
+ cerl:c_let([TupleModVar],
+ c_call_erl(element, [cerl:c_int(1),TupleVar]),
+ cerl:c_let([TupleArgsVar],
+ cerl:make_list(Args ++ [TupleVar]),
+ c_call_erl(apply, [TupleModVar,Name,TupleArgsVar]))),
+
+ TupleClause = cerl:ann_c_clause(GenAnno, [TupleVar], TupleGuard, TupleApply),
+
+ %% Generate the fallback clause
+ {OtherVar,St5} = new_var(GenAnno, St4),
+ OtherApply = cerl:ann_c_call(GenAnno, OtherVar, Name, Args),
+ OtherClause = cerl:ann_c_clause(GenAnno, [OtherVar], OtherApply),
+
+ {cerl:ann_c_case(GenAnno, Mod, [TupleClause,OtherClause]),[],A#a.us,St5};
+ false ->
+ {#c_call{anno=Anno,module=Mod,name=Name,args=Args},[],A#a.us,St0}
+ end;
cexpr(#iprimop{anno=A,name=Name,args=Args}, _As, St) ->
{#c_primop{anno=A#a.anno,name=Name,args=Args},[],A#a.us,St};
cexpr(#iprotect{anno=A,body=Es}, _As, St0) ->
@@ -2536,6 +2596,9 @@ cfun(#ifun{anno=A,id=Id,vars=Args,clauses=Lcs,fc=Lfc}, _As, St0) ->
clauses=Ccs ++ [Cfc]}},
[],A#a.us,St2}.
+c_call_erl(Fun, Args) ->
+ cerl:c_call(cerl:c_atom(erlang), cerl:c_atom(Fun), Args).
+
%% lit_vars(Literal) -> [Var].
lit_vars(Lit) -> lit_vars(Lit, []).
diff --git a/lib/compiler/src/v3_kernel.erl b/lib/compiler/src/v3_kernel.erl
index 1fc05109c5..fd73e5a7dc 100644
--- a/lib/compiler/src/v3_kernel.erl
+++ b/lib/compiler/src/v3_kernel.erl
@@ -82,7 +82,8 @@
-export([module/2,format_error/1]).
-import(lists, [map/2,foldl/3,foldr/3,mapfoldl/3,splitwith/2,member/2,
- keymember/3,keyfind/3,partition/2,droplast/1,last/1,sort/1]).
+ keymember/3,keyfind/3,partition/2,droplast/1,last/1,sort/1,
+ reverse/1]).
-import(ordsets, [add_element/2,del_element/2,union/2,union/1,subtract/2]).
-import(cerl, [c_tuple/1]).
@@ -107,6 +108,7 @@ copy_anno(Kdst, Ksrc) ->
-record(iclause, {anno=[],isub,osub,pats,guard,body}).
-record(ireceive_accept, {anno=[],arg}).
-record(ireceive_next, {anno=[],arg}).
+-record(ignored, {anno=[]}).
-type warning() :: term(). % XXX: REFINE
@@ -159,11 +161,9 @@ function({#c_var{name={F,Arity}=FA},Body}, St0) ->
{#ifun{anno=Ab,vars=Kvs,body=B0},[],St2} = expr(Body, new_sub(), St1),
{B1,_,St3} = ubody(B0, return, St2),
%%B1 = B0, St3 = St2, %Null second pass
- {#k_fdef{anno=#k{us=[],ns=[],a=Ab},
- func=F,arity=Arity,vars=Kvs,body=B1},St3}
+ {make_fdef(#k{us=[],ns=[],a=Ab}, F, Arity, Kvs, B1),St3}
catch
- Class:Error ->
- Stack = erlang:get_stacktrace(),
+ Class:Error:Stack ->
io:fwrite("Function: ~w/~w\n", [F,Arity]),
erlang:raise(Class, Error, Stack)
end.
@@ -489,7 +489,7 @@ make_alt(First0, Then0) ->
Then1 = pre_seq(droplast(Then0), last(Then0)),
First2 = make_protected(First1),
Then2 = make_protected(Then1),
- Body = #k_atom{val=ignored},
+ Body = #ignored{},
First3 = #k_guard_clause{guard=First2,body=Body},
Then3 = #k_guard_clause{guard=Then2,body=Body},
First = #k_guard{clauses=[First3]},
@@ -1589,23 +1589,18 @@ match_var([U|Us], Cs0, Def, St) ->
%% according to type, the order is really irrelevant but tries to be
%% smart.
-match_con(Us, [C], Def, St) ->
- %% There is only one clause. We can keep literal tuples and
- %% lists, but we must convert []/integer/float/atom literals
- %% to the proper record (#k_nil{} and so on).
- Cs = [expand_pat_lit_clause(C, false)],
- match_con_1(Us, Cs, Def, St);
match_con(Us, Cs0, Def, St) ->
- %% More than one clause. Remove literals at the top level.
- Cs = [expand_pat_lit_clause(C, true) || C <- Cs0],
+ %% Expand literals at the top level.
+ Cs = [expand_pat_lit_clause(C) || C <- Cs0],
match_con_1(Us, Cs, Def, St).
match_con_1([U|_Us] = L, Cs, Def, St0) ->
%% Extract clauses for different constructors (types).
%%ok = io:format("match_con ~p~n", [Cs]),
- Ttcs = select_types([k_binary], Cs) ++ select_bin_con(Cs) ++
- select_types([k_cons,k_tuple,k_map,k_atom,k_float,k_int,
- k_nil,k_literal], Cs),
+ Ttcs0 = select_types([k_binary], Cs) ++ select_bin_con(Cs) ++
+ select_types([k_cons,k_tuple,k_map,k_atom,k_float,
+ k_int,k_nil], Cs),
+ Ttcs = opt_single_valued(Ttcs0),
%%ok = io:format("ttcs = ~p~n", [Ttcs]),
{Scs,St1} =
mapfoldl(fun ({T,Tcs}, St) ->
@@ -1618,28 +1613,14 @@ match_con_1([U|_Us] = L, Cs, Def, St0) ->
select_types(Types, Cs) ->
[{T,Tcs} || T <- Types, begin Tcs = select(T, Cs), Tcs =/= [] end].
-
-expand_pat_lit_clause(#iclause{pats=[#ialias{pat=#k_literal{anno=A,val=Val}}=Alias|Ps]}=C, B) ->
- P = case B of
- true -> expand_pat_lit(Val, A);
- false -> literal(Val, A)
- end,
+
+expand_pat_lit_clause(#iclause{pats=[#ialias{pat=#k_literal{anno=A,val=Val}}=Alias|Ps]}=C) ->
+ P = expand_pat_lit(Val, A),
C#iclause{pats=[Alias#ialias{pat=P}|Ps]};
-expand_pat_lit_clause(#iclause{pats=[#k_literal{anno=A,val=Val}|Ps]}=C, B) ->
- P = case B of
- true -> expand_pat_lit(Val, A);
- false -> literal(Val, A)
- end,
+expand_pat_lit_clause(#iclause{pats=[#k_literal{anno=A,val=Val}|Ps]}=C) ->
+ P = expand_pat_lit(Val, A),
C#iclause{pats=[P|Ps]};
-expand_pat_lit_clause(#iclause{pats=[#k_binary{anno=A,segs=#k_bin_end{}}|Ps]}=C, B) ->
- case B of
- true ->
- C;
- false ->
- P = #k_literal{anno=A,val = <<>>},
- C#iclause{pats=[P|Ps]}
- end;
-expand_pat_lit_clause(C, _) -> C.
+expand_pat_lit_clause(C) -> C.
expand_pat_lit([H|T], A) ->
#k_cons{anno=A,hd=literal(H, A),tl=literal(T, A)};
@@ -1659,6 +1640,107 @@ literal(Val, A) when is_atom(Val) ->
literal(Val, A) when is_list(Val); is_tuple(Val) ->
#k_literal{anno=A,val=Val}.
+%% opt_singled_valued([{Type,Clauses}]) -> [{Type,Clauses}].
+%% If a type only has one clause and if the pattern is literal,
+%% the matching can be done more efficiently by directly comparing
+%% with the literal (that is especially true for binaries).
+
+opt_single_valued(Ttcs) ->
+ opt_single_valued(Ttcs, [], []).
+
+opt_single_valued([{_,[#iclause{pats=[P0|Ps]}=Tc]}=Ttc|Ttcs], TtcAcc, LitAcc) ->
+ try combine_lit_pat(P0) of
+ P ->
+ LitTtc = Tc#iclause{pats=[P|Ps]},
+ opt_single_valued(Ttcs, TtcAcc, [LitTtc|LitAcc])
+ catch
+ not_possible ->
+ opt_single_valued(Ttcs, [Ttc|TtcAcc], LitAcc)
+ end;
+opt_single_valued([Ttc|Ttcs], TtcAcc, LitAcc) ->
+ opt_single_valued(Ttcs, [Ttc|TtcAcc], LitAcc);
+opt_single_valued([], TtcAcc, []) ->
+ reverse(TtcAcc);
+opt_single_valued([], TtcAcc, LitAcc) ->
+ Literals = {k_literal,reverse(LitAcc)},
+ %% Test the literals as early as possible.
+ case reverse(TtcAcc) of
+ [{k_binary,_}=Bin|Ttcs] ->
+ %% The delayed creation of sub binaries requires
+ %% bs_start_match2 to be the first instruction in the
+ %% function.
+ [Bin,Literals|Ttcs];
+ Ttcs ->
+ [Literals|Ttcs]
+ end.
+
+combine_lit_pat(#ialias{pat=Pat0}=Alias) ->
+ Pat = combine_lit_pat(Pat0),
+ Alias#ialias{pat=Pat};
+combine_lit_pat(Pat) ->
+ case do_combine_lit_pat(Pat) of
+ #k_literal{val=Val} when is_atom(Val) ->
+ throw(not_possible);
+ #k_literal{val=Val} when is_number(Val) ->
+ throw(not_possible);
+ #k_literal{val=[]} ->
+ throw(not_possible);
+ #k_literal{}=Lit ->
+ Lit
+ end.
+
+do_combine_lit_pat(#k_atom{anno=A,val=Val}) ->
+ #k_literal{anno=A,val=Val};
+do_combine_lit_pat(#k_float{anno=A,val=Val}) ->
+ #k_literal{anno=A,val=Val};
+do_combine_lit_pat(#k_int{anno=A,val=Val}) ->
+ #k_literal{anno=A,val=Val};
+do_combine_lit_pat(#k_nil{anno=A}) ->
+ #k_literal{anno=A,val=[]};
+do_combine_lit_pat(#k_binary{anno=A,segs=Segs}) ->
+ Bin = combine_bin_segs(Segs),
+ #k_literal{anno=A,val=Bin};
+do_combine_lit_pat(#k_cons{anno=A,hd=Hd0,tl=Tl0}) ->
+ #k_literal{val=Hd} = do_combine_lit_pat(Hd0),
+ #k_literal{val=Tl} = do_combine_lit_pat(Tl0),
+ #k_literal{anno=A,val=[Hd|Tl]};
+do_combine_lit_pat(#k_literal{}=Lit) ->
+ Lit;
+do_combine_lit_pat(#k_tuple{anno=A,es=Es0}) ->
+ Es = [begin
+ #k_literal{val=Lit} = do_combine_lit_pat(El),
+ Lit
+ end || El <- Es0],
+ #k_literal{anno=A,val=list_to_tuple(Es)};
+do_combine_lit_pat(_) ->
+ throw(not_possible).
+
+combine_bin_segs(#k_bin_seg{size=Size0,unit=Unit,type=integer,
+ flags=[unsigned,big],seg=Seg,next=Next}) ->
+ #k_literal{val=Size1} = do_combine_lit_pat(Size0),
+ #k_literal{val=Int} = do_combine_lit_pat(Seg),
+ Size = Size1 * Unit,
+ if
+ 0 < Size, Size < 64 ->
+ Bin = <<Int:Size>>,
+ case Bin of
+ <<Int:Size>> ->
+ NextBin = combine_bin_segs(Next),
+ <<Bin/bits,NextBin/bits>>;
+ _ ->
+ %% The integer Int does not fit in the segment,
+ %% thus it will not match.
+ throw(not_possible)
+ end;
+ true ->
+ %% Avoid creating huge binary literals.
+ throw(not_possible)
+ end;
+combine_bin_segs(#k_bin_end{}) ->
+ <<>>;
+combine_bin_segs(_) ->
+ throw(not_possible).
+
%% select_bin_con([Clause]) -> [{Type,[Clause]}].
%% Extract clauses for the k_bin_seg constructor. As k_bin_seg
%% matching can overlap, the k_bin_seg constructors cannot be
@@ -2143,7 +2225,9 @@ ubody(E, return, St0) ->
{Ea,Pa,St1} = force_atomic(E, St0),
ubody(pre_seq(Pa, #ivalues{args=[Ea]}), return, St1)
end;
-ubody(E, {break,_Rs} = Break, St0) ->
+ubody(#ignored{}, {break,_} = Break, St) ->
+ ubody(#ivalues{args=[]}, Break, St);
+ubody(E, {break,[_]} = Break, St0) ->
%%ok = io:fwrite("ubody ~w:~p~n", [?LINE,{E,Br}]),
%% Exiting expressions need no trailing break.
case is_exit_expr(E) of
@@ -2151,6 +2235,16 @@ ubody(E, {break,_Rs} = Break, St0) ->
false ->
{Ea,Pa,St1} = force_atomic(E, St0),
ubody(pre_seq(Pa, #ivalues{args=[Ea]}), Break, St1)
+ end;
+ubody(E, {break,Rs}=Break, St0) ->
+ case is_exit_expr(E) of
+ true ->
+ uexpr(E, return, St0);
+ false ->
+ {Vs,St1} = new_vars(length(Rs), St0),
+ Iset = #iset{vars=Vs,arg=E},
+ PreSeq = pre_seq([Iset], #ivalues{args=Vs}),
+ ubody(PreSeq, Break, St1)
end.
iletrec_funs(#iletrec{defs=Fs}, St0) ->
@@ -2179,9 +2273,8 @@ iletrec_funs_gen(Fs, FreeVs, St) ->
Arity0 = length(Vs),
{Fb1,_,Lst1} = ubody(Fb0, return, Lst0#kern{ff={N,Arity0}}),
Arity = Arity0 + length(FreeVs),
- Fun = #k_fdef{anno=#k{us=[],ns=[],a=Fa},
- func=N,arity=Arity,
- vars=Vs ++ FreeVs,body=Fb1},
+ Fun = make_fdef(#k{us=[],ns=[],a=Fa}, N, Arity,
+ Vs++FreeVs, Fb1),
Lst1#kern{funs=[Fun|Lst1#kern.funs]}
end, St, Fs).
@@ -2325,8 +2418,7 @@ uexpr(#ifun{anno=A,vars=Vs,body=B0}, {break,Rs}, St0) ->
%% No id annotation. Must invent a fun name.
new_fun_name(St1)
end,
- Fun = #k_fdef{anno=#k{us=[],ns=[],a=A},func=Fname,arity=Arity,
- vars=Vs ++ Fvs,body=B1},
+ Fun = make_fdef(#k{us=[],ns=[],a=A}, Fname, Arity, Vs++Fvs, B1),
{#k_bif{anno=#k{us=Free,ns=lit_list_vars(Rs),a=A},
op=#k_internal{name=make_fun,arity=length(Free)+2},
args=[#k_atom{val=Fname},#k_int{val=Arity}|Fvs],
@@ -2343,6 +2435,16 @@ uexpr(Lit, {break,Rs0}, St0) ->
add_local_function(_, #kern{funs=ignore}=St) -> St;
add_local_function(F, #kern{funs=Funs}=St) -> St#kern{funs=[F|Funs]}.
+%% Make a #k_fdef{}, making sure that the body is always a #k_match{}.
+make_fdef(Anno, Name, Arity, Vs, #k_match{}=Body) ->
+ #k_fdef{anno=Anno,func=Name,arity=Arity,vars=Vs,body=Body};
+make_fdef(Anno, Name, Arity, Vs, Body) ->
+ Ka = get_kanno(Body),
+ Match = #k_match{anno=#k{us=Ka#k.us,ns=[],a=Ka#k.a},
+ vars=Vs,body=Body,ret=[]},
+ #k_fdef{anno=Anno,func=Name,arity=Arity,vars=Vs,body=Match}.
+
+
%% handle_reuse_annos([#k_var{}], State) -> State.
%% In general, it is only safe to reuse a variable for a match context
%% if the original value of the variable will no longer be needed.
diff --git a/lib/compiler/src/v3_kernel_pp.erl b/lib/compiler/src/v3_kernel_pp.erl
index 53097d0d7d..ac91039ae0 100644
--- a/lib/compiler/src/v3_kernel_pp.erl
+++ b/lib/compiler/src/v3_kernel_pp.erl
@@ -491,7 +491,7 @@ indent(Ctxt) -> indent(Ctxt#ctxt.indent, Ctxt).
indent(N, _Ctxt) when N =< 0 -> "";
indent(N, Ctxt) ->
T = Ctxt#ctxt.tab_width,
- string:chars($\t, N div T, string:chars($\s, N rem T)).
+ lists:duplicate(N div T, $\t) ++ lists:duplicate(N rem T, $\s).
nl_indent(Ctxt) -> [$\n|indent(Ctxt)].
@@ -508,7 +508,7 @@ unindent([$\t|T], N, Ctxt, C) ->
if N >= Tab ->
unindent(T, N - Tab, Ctxt, C);
true ->
- unindent([string:chars($\s, Tab - N)|T], 0, Ctxt, C)
+ unindent([lists:duplicate(Tab - N, $\s)|T], 0, Ctxt, C)
end;
unindent([L|T], N, Ctxt, C) when is_list(L) ->
unindent(L, N, Ctxt, [T|C]);
diff --git a/lib/compiler/src/v3_life.erl b/lib/compiler/src/v3_life.erl
deleted file mode 100644
index be3ade47ff..0000000000
--- a/lib/compiler/src/v3_life.erl
+++ /dev/null
@@ -1,468 +0,0 @@
-%%
-%% %CopyrightBegin%
-%%
-%% Copyright Ericsson AB 1999-2016. All Rights Reserved.
-%%
-%% Licensed under the Apache License, Version 2.0 (the "License");
-%% you may not use this file except in compliance with the License.
-%% You may obtain a copy of the License at
-%%
-%% http://www.apache.org/licenses/LICENSE-2.0
-%%
-%% Unless required by applicable law or agreed to in writing, software
-%% distributed under the License is distributed on an "AS IS" BASIS,
-%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-%% See the License for the specific language governing permissions and
-%% limitations under the License.
-%%
-%% %CopyrightEnd%
-%%
-%% Purpose : Convert annotated kernel expressions to annotated beam format.
-
-%% This module creates beam format annotated with variable lifetime
-%% information. Each thing is given an index and for each variable we
-%% store the first and last index for its occurrence. The variable
-%% database, VDB, attached to each thing is only relevant internally
-%% for that thing.
-%%
-%% For nested things like matches the numbering continues locally and
-%% the VDB for that thing refers to the variable usage within that
-%% thing. Variables which live through a such a thing are internally
-%% given a very large last index. Internally the indexes continue
-%% after the index of that thing. This creates no problems as the
-%% internal variable info never escapes and externally we only see
-%% variable which are alive both before or after.
-%%
-%% This means that variables never "escape" from a thing and the only
-%% way to get values from a thing is to "return" them, with 'break' or
-%% 'return'. Externally these values become the return values of the
-%% thing. This is no real limitation as most nested things have
-%% multiple threads so working out a common best variable usage is
-%% difficult.
-
--module(v3_life).
-
--export([module/2]).
-
--export([vdb_find/2]).
-
--import(lists, [member/2,map/2,reverse/1,sort/1]).
--import(ordsets, [add_element/2,intersection/2,union/2]).
-
--include("v3_kernel.hrl").
--include("v3_life.hrl").
-
--type fa() :: {atom(),arity()}.
-
-%% These are not defined in v3_kernel.hrl.
-get_kanno(Kthing) -> element(2, Kthing).
-%%set_kanno(Kthing, Anno) -> setelement(2, Kthing, Anno).
-
--spec module(#k_mdef{}, [compile:option()]) ->
- {'ok',{module(),[fa()],[_],[_]}}.
-
-module(#k_mdef{name=M,exports=Es,attributes=As,body=Fs0}, _Opts) ->
- Fs1 = functions(Fs0, []),
- {ok,{M,Es,As,Fs1}}.
-
-functions([F|Fs], Acc) ->
- functions(Fs, [function(F)|Acc]);
-functions([], Acc) -> reverse(Acc).
-
-%% function(Kfunc) -> Func.
-
-function(#k_fdef{anno=#k{a=Anno},func=F,arity=Ar,vars=Vs,body=Kb}) ->
- try
- As = var_list(Vs),
- Vdb0 = init_vars(As),
- %% Force a top-level match!
- B0 = case Kb of
- #k_match{} -> Kb;
- _ ->
- Ka = get_kanno(Kb),
- #k_match{anno=#k{us=Ka#k.us,ns=[],a=Ka#k.a},
- vars=Vs,body=Kb,ret=[]}
- end,
- {B1,_,Vdb1} = body(B0, 1, Vdb0),
- {function,F,Ar,As,B1,Vdb1,Anno}
- catch
- Class:Error ->
- Stack = erlang:get_stacktrace(),
- io:fwrite("Function: ~w/~w\n", [F,Ar]),
- erlang:raise(Class, Error, Stack)
- end.
-
-%% body(Kbody, I, Vdb) -> {[Expr],MaxI,Vdb}.
-%% Handle a body.
-
-body(#k_seq{arg=Ke,body=Kb}, I, Vdb0) ->
- %%ok = io:fwrite("life ~w:~p~n", [?LINE,{Ke,I,Vdb0}]),
- A = get_kanno(Ke),
- Vdb1 = use_vars(union(A#k.us, A#k.ns), I, Vdb0),
- {Es,MaxI,Vdb2} = body(Kb, I+1, Vdb1),
- E = expr(Ke, I, Vdb2),
- {[E|Es],MaxI,Vdb2};
-body(Ke, I, Vdb0) ->
- %%ok = io:fwrite("life ~w:~p~n", [?LINE,{Ke,I,Vdb0}]),
- A = get_kanno(Ke),
- Vdb1 = use_vars(union(A#k.us, A#k.ns), I, Vdb0),
- E = expr(Ke, I, Vdb1),
- {[E],I,Vdb1}.
-
-%% protected(Kprotected, I, Vdb) -> Protected.
-%% Only used in guards.
-
-protected(#k_protected{anno=A,arg=Ts,ret=Rs}, I, Vdb) ->
- %% Lock variables that are alive before try and used afterwards.
- %% Don't lock variables that are only used inside the protected
- %% expression.
- Pdb0 = vdb_sub(I, I+1, Vdb),
- {T,MaxI,Pdb1} = body(Ts, I+1, Pdb0),
- Pdb2 = use_vars(A#k.ns, MaxI+1, Pdb1), %Save "return" values
- #l{ke={protected,T,var_list(Rs)},i=I,a=A#k.a,vdb=Pdb2}.
-
-%% expr(Kexpr, I, Vdb) -> Expr.
-
-expr(#k_test{anno=A,op=Op,args=As,inverted=Inverted}, I, _Vdb) ->
- #l{ke={test,test_op(Op),atomic_list(As),Inverted},i=I,a=A#k.a};
-expr(#k_call{anno=A,op=Op,args=As,ret=Rs}, I, _Vdb) ->
- #l{ke={call,call_op(Op),atomic_list(As),var_list(Rs)},i=I,a=A#k.a};
-expr(#k_enter{anno=A,op=Op,args=As}, I, _Vdb) ->
- #l{ke={enter,call_op(Op),atomic_list(As)},i=I,a=A#k.a};
-expr(#k_bif{anno=A,op=Op,args=As,ret=Rs}, I, _Vdb) ->
- Bif = k_bif(A, Op, As, Rs),
- #l{ke=Bif,i=I,a=A#k.a};
-expr(#k_match{anno=A,body=Kb,ret=Rs}, I, Vdb) ->
- %% Work out imported variables which need to be locked.
- Mdb = vdb_sub(I, I+1, Vdb),
- M = match(Kb, A#k.us, I+1, [], Mdb),
- #l{ke={match,M,var_list(Rs)},i=I,vdb=use_vars(A#k.us, I+1, Mdb),a=A#k.a};
-expr(#k_guard_match{anno=A,body=Kb,ret=Rs}, I, Vdb) ->
- %% Work out imported variables which need to be locked.
- Mdb = vdb_sub(I, I+1, Vdb),
- M = match(Kb, A#k.us, I+1, [], Mdb),
- #l{ke={guard_match,M,var_list(Rs)},i=I,vdb=use_vars(A#k.us, I+1, Mdb),a=A#k.a};
-expr(#k_try{}=Try, I, Vdb) ->
- body_try(Try, I, Vdb);
-expr(#k_protected{}=Protected, I, Vdb) ->
- protected(Protected, I, Vdb);
-expr(#k_try_enter{anno=A,arg=Ka,vars=Vs,body=Kb,evars=Evs,handler=Kh}, I, Vdb) ->
- %% Lock variables that are alive before the catch and used afterwards.
- %% Don't lock variables that are only used inside the try.
- Tdb0 = vdb_sub(I, I+1, Vdb),
- %% This is the tricky bit. Lock variables in Arg that are used in
- %% the body and handler. Add try tag 'variable'.
- Ab = get_kanno(Kb),
- Ah = get_kanno(Kh),
- Tdb1 = use_vars(union(Ab#k.us, Ah#k.us), I+3, Tdb0),
- Tdb2 = vdb_sub(I, I+2, Tdb1),
- Vnames = fun (Kvar) -> Kvar#k_var.name end, %Get the variable names
- {Aes,_,Adb} = body(Ka, I+2, add_var({catch_tag,I+1}, I+1, 1000000, Tdb2)),
- {Bes,_,Bdb} = body(Kb, I+4, new_vars(sort(map(Vnames, Vs)), I+3, Tdb2)),
- {Hes,_,Hdb} = body(Kh, I+4, new_vars(sort(map(Vnames, Evs)), I+3, Tdb2)),
- #l{ke={try_enter,#l{ke={block,Aes},i=I+1,vdb=Adb,a=[]},
- var_list(Vs),#l{ke={block,Bes},i=I+3,vdb=Bdb,a=[]},
- var_list(Evs),#l{ke={block,Hes},i=I+3,vdb=Hdb,a=[]}},
- i=I,vdb=Tdb1,a=A#k.a};
-expr(#k_catch{anno=A,body=Kb,ret=[R]}, I, Vdb) ->
- %% Lock variables that are alive before the catch and used afterwards.
- %% Don't lock variables that are only used inside the catch.
- %% Add catch tag 'variable'.
- Cdb0 = vdb_sub(I, I+1, Vdb),
- {Es,_,Cdb1} = body(Kb, I+1, add_var({catch_tag,I}, I, locked, Cdb0)),
- #l{ke={'catch',Es,variable(R)},i=I,vdb=Cdb1,a=A#k.a};
-expr(#k_receive{anno=A,var=V,body=Kb,timeout=T,action=Ka,ret=Rs}, I, Vdb) ->
- %% Work out imported variables which need to be locked.
- Rdb = vdb_sub(I, I+1, Vdb),
- M = match(Kb, add_element(V#k_var.name, A#k.us), I+1, [],
- new_vars([V#k_var.name], I, Rdb)),
- {Tes,_,Adb} = body(Ka, I+1, Rdb),
- #l{ke={receive_loop,atomic(T),variable(V),M,
- #l{ke=Tes,i=I+1,vdb=Adb,a=[]},var_list(Rs)},
- i=I,vdb=use_vars(A#k.us, I+1, Vdb),a=A#k.a};
-expr(#k_receive_accept{anno=A}, I, _Vdb) ->
- #l{ke=receive_accept,i=I,a=A#k.a};
-expr(#k_receive_next{anno=A}, I, _Vdb) ->
- #l{ke=receive_next,i=I,a=A#k.a};
-expr(#k_put{anno=A,arg=Arg,ret=Rs}, I, _Vdb) ->
- #l{ke={set,var_list(Rs),literal(Arg, [])},i=I,a=A#k.a};
-expr(#k_break{anno=A,args=As}, I, _Vdb) ->
- #l{ke={break,atomic_list(As)},i=I,a=A#k.a};
-expr(#k_guard_break{anno=A,args=As}, I, Vdb) ->
- Locked = [V || {V,_,_} <- Vdb],
- #l{ke={guard_break,atomic_list(As),Locked},i=I,a=A#k.a};
-expr(#k_return{anno=A,args=As}, I, _Vdb) ->
- #l{ke={return,atomic_list(As)},i=I,a=A#k.a}.
-
-body_try(#k_try{anno=A,arg=Ka,vars=Vs,body=Kb,evars=Evs,handler=Kh,ret=Rs},
- I, Vdb) ->
- %% Lock variables that are alive before the catch and used afterwards.
- %% Don't lock variables that are only used inside the try.
- Tdb0 = vdb_sub(I, I+1, Vdb),
- %% This is the tricky bit. Lock variables in Arg that are used in
- %% the body and handler. Add try tag 'variable'.
- Ab = get_kanno(Kb),
- Ah = get_kanno(Kh),
- Tdb1 = use_vars(union(Ab#k.us, Ah#k.us), I+3, Tdb0),
- Tdb2 = vdb_sub(I, I+2, Tdb1),
- Vnames = fun (Kvar) -> Kvar#k_var.name end, %Get the variable names
- {Aes,_,Adb} = body(Ka, I+2, add_var({catch_tag,I+1}, I+1, locked, Tdb2)),
- {Bes,_,Bdb} = body(Kb, I+4, new_vars(sort(map(Vnames, Vs)), I+3, Tdb2)),
- {Hes,_,Hdb} = body(Kh, I+4, new_vars(sort(map(Vnames, Evs)), I+3, Tdb2)),
- #l{ke={'try',#l{ke={block,Aes},i=I+1,vdb=Adb,a=[]},
- var_list(Vs),#l{ke={block,Bes},i=I+3,vdb=Bdb,a=[]},
- var_list(Evs),#l{ke={block,Hes},i=I+3,vdb=Hdb,a=[]},
- var_list(Rs)},
- i=I,vdb=Tdb1,a=A#k.a}.
-
-%% call_op(Op) -> Op.
-%% test_op(Op) -> Op.
-%% Do any necessary name translations here to munge into beam format.
-
-call_op(#k_local{name=N}) -> N;
-call_op(#k_remote{mod=M,name=N}) -> {remote,atomic(M),atomic(N)};
-call_op(Other) -> variable(Other).
-
-test_op(#k_remote{mod=#k_atom{val=erlang},name=#k_atom{val=N}}) -> N.
-
-%% k_bif(Anno, Op, [Arg], [Ret], Vdb) -> Expr.
-%% Build bifs.
-
-k_bif(_A, #k_internal{name=Name}, As, Rs) ->
- {internal,Name,atomic_list(As),var_list(Rs)};
-k_bif(_A, #k_remote{mod=#k_atom{val=erlang},name=#k_atom{val=Name}}, As, Rs) ->
- Ar = length(As),
- case is_gc_bif(Name, Ar) of
- false ->
- {bif,Name,atomic_list(As),var_list(Rs)};
- true ->
- {gc_bif,Name,atomic_list(As),var_list(Rs)}
- end.
-
-%% match(Kexpr, [LockVar], I, Vdb) -> Expr.
-%% Convert match tree to old format.
-
-match(#k_alt{anno=A,first=Kf,then=Kt}, Ls, I, Ctxt, Vdb0) ->
- Vdb1 = use_vars(union(A#k.us, Ls), I, Vdb0),
- F = match(Kf, Ls, I+1, Ctxt, Vdb1),
- T = match(Kt, Ls, I+1, Ctxt, Vdb1),
- #l{ke={alt,F,T},i=I,vdb=Vdb1,a=A#k.a};
-match(#k_select{anno=A,var=V,types=Kts}, Ls0, I, Ctxt, Vdb0) ->
- Vanno = get_kanno(V),
- Ls1 = case member(no_usage, Vanno) of
- false -> add_element(V#k_var.name, Ls0);
- true -> Ls0
- end,
- Anno = case member(reuse_for_context, Vanno) of
- true -> [reuse_for_context|A#k.a];
- false -> A#k.a
- end,
- Vdb1 = use_vars(union(A#k.us, Ls1), I, Vdb0),
- Ts = [type_clause(Tc, Ls1, I+1, Ctxt, Vdb1) || Tc <- Kts],
- #l{ke={select,literal(V, Ctxt),Ts},i=I,vdb=Vdb1,a=Anno};
-match(#k_guard{anno=A,clauses=Kcs}, Ls, I, Ctxt, Vdb0) ->
- Vdb1 = use_vars(union(A#k.us, Ls), I, Vdb0),
- Cs = [guard_clause(G, Ls, I+1, Ctxt, Vdb1) || G <- Kcs],
- #l{ke={guard,Cs},i=I,vdb=Vdb1,a=A#k.a};
-match(Other, Ls, I, _Ctxt, Vdb0) ->
- Vdb1 = use_vars(Ls, I, Vdb0),
- {B,_,Vdb2} = body(Other, I+1, Vdb1),
- #l{ke={block,B},i=I,vdb=Vdb2,a=[]}.
-
-type_clause(#k_type_clause{anno=A,type=T,values=Kvs}, Ls, I, Ctxt, Vdb0) ->
- %%ok = io:format("life ~w: ~p~n", [?LINE,{T,Kvs}]),
- Vdb1 = use_vars(union(A#k.us, Ls), I+1, Vdb0),
- Vs = [val_clause(Vc, Ls, I+1, Ctxt, Vdb1) || Vc <- Kvs],
- #l{ke={type_clause,type(T),Vs},i=I,vdb=Vdb1,a=A#k.a}.
-
-val_clause(#k_val_clause{anno=A,val=V,body=Kb}, Ls0, I, Ctxt0, Vdb0) ->
- New = (get_kanno(V))#k.ns,
- Bus = (get_kanno(Kb))#k.us,
- %%ok = io:format("Ls0 = ~p, Used=~p\n New=~p, Bus=~p\n", [Ls0,Used,New,Bus]),
- Ls1 = union(intersection(New, Bus), Ls0), %Lock for safety
- Vdb1 = use_vars(union(A#k.us, Ls1), I+1, new_vars(New, I, Vdb0)),
- Ctxt = case V of
- #k_binary{segs=#k_var{name=C0}} -> C0;
- _ -> Ctxt0
- end,
- B = match(Kb, Ls1, I+1, Ctxt, Vdb1),
- #l{ke={val_clause,literal(V, Ctxt),B},i=I,vdb=use_vars(Bus, I+1, Vdb1),a=A#k.a}.
-
-guard_clause(#k_guard_clause{anno=A,guard=Kg,body=Kb}, Ls, I, Ctxt, Vdb0) ->
- Vdb1 = use_vars(union(A#k.us, Ls), I+2, Vdb0),
- Gdb = vdb_sub(I+1, I+2, Vdb1),
- G = protected(Kg, I+1, Gdb),
- B = match(Kb, Ls, I+2, Ctxt, Vdb1),
- #l{ke={guard_clause,G,B},
- i=I,vdb=use_vars((get_kanno(Kg))#k.us, I+2, Vdb1),
- a=A#k.a}.
-
-%% type(Ktype) -> Type.
-
-type(k_literal) -> literal;
-type(k_int) -> integer;
-%%type(k_char) -> integer; %Hhhmmm???
-type(k_float) -> float;
-type(k_atom) -> atom;
-type(k_nil) -> nil;
-type(k_cons) -> cons;
-type(k_tuple) -> tuple;
-type(k_binary) -> binary;
-type(k_bin_seg) -> bin_seg;
-type(k_bin_int) -> bin_int;
-type(k_bin_end) -> bin_end;
-type(k_map) -> map.
-
-%% variable(Klit) -> Lit.
-%% var_list([Klit]) -> [Lit].
-
-variable(#k_var{name=N}) -> {var,N}.
-
-var_list(Ks) -> [variable(K) || K <- Ks].
-
-%% atomic(Klit) -> Lit.
-%% atomic_list([Klit]) -> [Lit].
-
-atomic(#k_literal{val=V}) -> {literal,V};
-atomic(#k_var{name=N}) -> {var,N};
-atomic(#k_int{val=I}) -> {integer,I};
-atomic(#k_float{val=F}) -> {float,F};
-atomic(#k_atom{val=N}) -> {atom,N};
-%%atomic(#k_char{val=C}) -> {char,C};
-atomic(#k_nil{}) -> nil.
-
-atomic_list(Ks) -> [atomic(K) || K <- Ks].
-
-%% literal(Klit) -> Lit.
-%% literal_list([Klit]) -> [Lit].
-
-literal(#k_var{name=N}, _) -> {var,N};
-literal(#k_literal{val=I}, _) -> {literal,I};
-literal(#k_int{val=I}, _) -> {integer,I};
-literal(#k_float{val=F}, _) -> {float,F};
-literal(#k_atom{val=N}, _) -> {atom,N};
-%%literal(#k_char{val=C}, _) -> {char,C};
-literal(#k_nil{}, _) -> nil;
-literal(#k_cons{hd=H,tl=T}, Ctxt) ->
- {cons,[literal(H, Ctxt),literal(T, Ctxt)]};
-literal(#k_binary{segs=V}, Ctxt) ->
- {binary,literal(V, Ctxt)};
-literal(#k_bin_seg{size=S,unit=U,type=T,flags=Fs,seg=Seg,next=[]}, Ctxt) ->
- %% Only occurs in patterns.
- {bin_seg,Ctxt,literal(S, Ctxt),U,T,Fs,[literal(Seg, Ctxt)]};
-literal(#k_bin_seg{size=S,unit=U,type=T,flags=Fs,seg=Seg,next=N}, Ctxt) ->
- {bin_seg,Ctxt,literal(S, Ctxt),U,T,Fs,
- [literal(Seg, Ctxt),literal(N, Ctxt)]};
-literal(#k_bin_int{size=S,unit=U,flags=Fs,val=Int,next=N}, Ctxt) ->
- %% Only occurs in patterns.
- {bin_int,Ctxt,literal(S, Ctxt),U,Fs,Int,
- [literal(N, Ctxt)]};
-literal(#k_bin_end{}, Ctxt) ->
- {bin_end,Ctxt};
-literal(#k_tuple{es=Es}, Ctxt) ->
- {tuple,literal_list(Es, Ctxt)};
-literal(#k_map{op=Op,var=Var,es=Es0}, Ctxt) ->
- {map,Op,literal(Var, Ctxt),literal_list(Es0, Ctxt)};
-literal(#k_map_pair{key=K,val=V}, Ctxt) ->
- {map_pair,literal(K, Ctxt),literal(V, Ctxt)}.
-
-literal_list(Ks, Ctxt) ->
- [literal(K, Ctxt) || K <- Ks].
-
-
-%% is_gc_bif(Name, Arity) -> true|false
-%% Determines whether the BIF Name/Arity might do a GC.
-
-is_gc_bif(hd, 1) -> false;
-is_gc_bif(tl, 1) -> false;
-is_gc_bif(self, 0) -> false;
-is_gc_bif(node, 0) -> false;
-is_gc_bif(node, 1) -> false;
-is_gc_bif(element, 2) -> false;
-is_gc_bif(get, 1) -> false;
-is_gc_bif(tuple_size, 1) -> false;
-is_gc_bif(Bif, Arity) ->
- not (erl_internal:bool_op(Bif, Arity) orelse
- erl_internal:new_type_test(Bif, Arity) orelse
- erl_internal:comp_op(Bif, Arity)).
-
-%% Keep track of life time for variables.
-%%
-%% init_vars([{var,VarName}]) -> Vdb.
-%% new_vars([VarName], I, Vdb) -> Vdb.
-%% use_vars([VarName], I, Vdb) -> Vdb.
-%% add_var(VarName, F, L, Vdb) -> Vdb.
-%%
-%% The list of variable names for new_vars/3 and use_vars/3
-%% must be sorted.
-
-init_vars(Vs) ->
- vdb_new(Vs).
-
-new_vars([], _, Vdb) -> Vdb;
-new_vars([V], I, Vdb) -> vdb_store_new(V, {V,I,I}, Vdb);
-new_vars(Vs, I, Vdb) -> vdb_update_vars(Vs, Vdb, I).
-
-use_vars([], _, Vdb) ->
- Vdb;
-use_vars([V], I, Vdb) ->
- case vdb_find(V, Vdb) of
- {V,F,L} when I > L -> vdb_update(V, {V,F,I}, Vdb);
- {V,_,_} -> Vdb;
- error -> vdb_store_new(V, {V,I,I}, Vdb)
- end;
-use_vars(Vs, I, Vdb) -> vdb_update_vars(Vs, Vdb, I).
-
-add_var(V, F, L, Vdb) ->
- vdb_store_new(V, {V,F,L}, Vdb).
-
-%% vdb
-
-vdb_new(Vs) ->
- sort([{V,0,0} || {var,V} <- Vs]).
-
--type var() :: atom().
-
--spec vdb_find(var(), [vdb_entry()]) -> 'error' | vdb_entry().
-
-vdb_find(V, Vdb) ->
- case lists:keyfind(V, 1, Vdb) of
- false -> error;
- Vd -> Vd
- end.
-
-vdb_update(V, Update, [{V,_,_}|Vdb]) ->
- [Update|Vdb];
-vdb_update(V, Update, [Vd|Vdb]) ->
- [Vd|vdb_update(V, Update, Vdb)].
-
-vdb_store_new(V, New, [{V1,_,_}=Vd|Vdb]) when V > V1 ->
- [Vd|vdb_store_new(V, New, Vdb)];
-vdb_store_new(V, New, [{V1,_,_}|_]=Vdb) when V < V1 ->
- [New|Vdb];
-vdb_store_new(_, New, []) -> [New].
-
-vdb_update_vars([V|_]=Vs, [{V1,_,_}=Vd|Vdb], I) when V > V1 ->
- [Vd|vdb_update_vars(Vs, Vdb, I)];
-vdb_update_vars([V|Vs], [{V1,_,_}|_]=Vdb, I) when V < V1 ->
- %% New variable.
- [{V,I,I}|vdb_update_vars(Vs, Vdb, I)];
-vdb_update_vars([V|Vs], [{_,F,L}=Vd|Vdb], I) ->
- %% Existing variable.
- if
- I > L -> [{V,F,I}|vdb_update_vars(Vs, Vdb, I)];
- true -> [Vd|vdb_update_vars(Vs, Vdb, I)]
- end;
-vdb_update_vars([V|Vs], [], I) ->
- %% New variable.
- [{V,I,I}|vdb_update_vars(Vs, [], I)];
-vdb_update_vars([], Vdb, _) -> Vdb.
-
-%% vdb_sub(Min, Max, Vdb) -> Vdb.
-%% Extract variables which are used before and after Min. Lock
-%% variables alive after Max.
-
-vdb_sub(Min, Max, Vdb) ->
- [ if L >= Max -> {V,F,locked};
- true -> Vd
- end || {V,F,L}=Vd <- Vdb, F < Min, L >= Min ].
diff --git a/lib/compiler/src/v3_life.hrl b/lib/compiler/src/v3_life.hrl
deleted file mode 100644
index 5c76312067..0000000000
--- a/lib/compiler/src/v3_life.hrl
+++ /dev/null
@@ -1,29 +0,0 @@
-%%
-%% %CopyrightBegin%
-%%
-%% Copyright Ericsson AB 1999-2016. All Rights Reserved.
-%%
-%% Licensed under the Apache License, Version 2.0 (the "License");
-%% you may not use this file except in compliance with the License.
-%% You may obtain a copy of the License at
-%%
-%% http://www.apache.org/licenses/LICENSE-2.0
-%%
-%% Unless required by applicable law or agreed to in writing, software
-%% distributed under the License is distributed on an "AS IS" BASIS,
-%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-%% See the License for the specific language governing permissions and
-%% limitations under the License.
-%%
-%% %CopyrightEnd%
-%%
-%% This record contains variable life-time annotation for a
-%% kernel expression. Added by v3_life, used by v3_codegen.
-
--type vdb_entry() :: {atom(),non_neg_integer(),non_neg_integer()}.
-
--record(l, {ke, %Kernel expression
- i=0 :: non_neg_integer(), %Op number
- vdb=[] :: [vdb_entry()], %Variable database
- a=[] :: [term()]}). %Core annotation
-
diff --git a/lib/compiler/test/Makefile b/lib/compiler/test/Makefile
index 63763f31b2..da5d207db9 100644
--- a/lib/compiler/test/Makefile
+++ b/lib/compiler/test/Makefile
@@ -22,6 +22,7 @@ MODULES= \
bs_construct_SUITE \
bs_match_SUITE \
bs_utf_SUITE \
+ core_alias_SUITE \
core_fold_SUITE \
compile_SUITE \
compilation_SUITE \
diff --git a/lib/compiler/test/beam_block_SUITE.erl b/lib/compiler/test/beam_block_SUITE.erl
index 55d5f2dbe8..38ead96cc8 100644
--- a/lib/compiler/test/beam_block_SUITE.erl
+++ b/lib/compiler/test/beam_block_SUITE.erl
@@ -22,7 +22,7 @@
-export([all/0,suite/0,groups/0,init_per_suite/1,end_per_suite/1,
init_per_group/2,end_per_group/2,
get_map_elements/1,otp_7345/1,move_opt_across_gc_bif/1,
- erl_202/1,repro/1]).
+ erl_202/1,repro/1,local_cse/1,second_block_pass/1]).
%% The only test for the following functions is that
%% the code compiles and is accepted by beam_validator.
@@ -40,7 +40,9 @@ groups() ->
otp_7345,
move_opt_across_gc_bif,
erl_202,
- repro
+ repro,
+ local_cse,
+ second_block_pass
]}].
init_per_suite(Config) ->
@@ -237,6 +239,72 @@ find_operands(Cfg,XsiGraph,ActiveList,Count) ->
[Count+1, length(NewActiveList), length(digraph:vertices(XsiGraph))],
find_operands(NewCfg,XsiGraph,NewActiveList,Count+1).
+%% Some tests of local common subexpression elimination (CSE).
+
+local_cse(_Config) ->
+ {Self,{ok,Self}} = local_cse_1(),
+
+ local_cse_2([]),
+ local_cse_2(lists:seq(1, 512)),
+ local_cse_2(?MODULE:module_info()),
+
+ {[b],[a,b]} = local_cse_3(a, b),
+
+ {2000,Self,{Self,write_cache}} = local_cse_4(),
+
+ ok.
+
+local_cse_1() ->
+ %% Cover handling of unsafe tuple construction in
+ %% eliminate_use_of_from_reg/4. It became necessary to handle
+ %% unsafe tuples when local CSE was introduced.
+
+ {self(),{ok,self()}}.
+
+local_cse_2(Term) ->
+ case cse_make_binary(Term) of
+ <<Size:8,BinTerm:Size/binary>> ->
+ Term = binary_to_term(BinTerm);
+ <<Size:8,SizeTerm:Size/binary,BinTerm/binary>> ->
+ {'$size',TermSize} = binary_to_term(SizeTerm),
+ TermSize = byte_size(BinTerm),
+ Term = binary_to_term(BinTerm)
+ end.
+
+%% Copy of observer_backend:ttb_make_binary/1. During development of
+%% the local CSE optimization this function was incorrectly optimized.
+
+cse_make_binary(Term) ->
+ B = term_to_binary(Term),
+ SizeB = byte_size(B),
+ if SizeB > 255 ->
+ SB = term_to_binary({'$size',SizeB}),
+ <<(byte_size(SB)):8, SB/binary, B/binary>>;
+ true ->
+ <<SizeB:8, B/binary>>
+ end.
+
+local_cse_3(X, Y) ->
+ %% The following expression was incorrectly transformed to {[X,Y],[X,Y]}
+ %% during development of the local CSE optimization.
+
+ {[Y],[X,Y]}.
+
+local_cse_4() ->
+ do_local_cse_4(2000, self(), {self(), write_cache}).
+
+do_local_cse_4(X, Y, Z) ->
+ {X,Y,Z}.
+
+%% Tests previously found bugs when running beam_block the second time.
+
+second_block_pass(_Config) ->
+ [#{dts:=5.0}] = second_1([#{dts => 10.0}], 2.0),
+ ok.
+
+second_1(Fs, TS) ->
+ [F#{dts=>DTS / TS} || #{dts:=DTS} = F <- Fs].
+
%%%
%%% Common functions.
%%%
diff --git a/lib/compiler/test/beam_type_SUITE.erl b/lib/compiler/test/beam_type_SUITE.erl
index d44fa60997..e33df809ff 100644
--- a/lib/compiler/test/beam_type_SUITE.erl
+++ b/lib/compiler/test/beam_type_SUITE.erl
@@ -23,7 +23,7 @@
init_per_group/2,end_per_group/2,
integers/1,coverage/1,booleans/1,setelement/1,cons/1,
tuple/1,record_float/1,binary_float/1,float_compare/1,
- arity_checks/1]).
+ arity_checks/1,elixir_binaries/1]).
suite() -> [{ct_hooks,[ts_install_cth]}].
@@ -42,7 +42,8 @@ groups() ->
record_float,
binary_float,
float_compare,
- arity_checks
+ arity_checks,
+ elixir_binaries
]}].
init_per_suite(Config) ->
@@ -232,5 +233,42 @@ do_tuple_arity_check(RGB) when is_tuple(RGB),
_ -> ok
end.
+elixir_binaries(_Config) ->
+ <<"foo blitzky baz">> = elixir_binary_1(<<"blitzky">>),
+ <<"foo * baz">> = elixir_binary_2($*),
+ <<7:4,755:10>> = elixir_bitstring_3(<<755:10>>),
+ ok.
+
+elixir_binary_1(Bar) when is_binary(Bar) ->
+ <<"foo ",
+ case Bar of
+ Rewrite when is_binary(Rewrite) ->
+ Rewrite;
+ Rewrite ->
+ list_to_binary(Rewrite)
+ end/binary,
+ " baz">>.
+
+elixir_binary_2(Arg) ->
+ Bin = <<Arg>>,
+ <<"foo ",
+ case Bin of
+ Rewrite when is_binary(Rewrite) ->
+ Rewrite;
+ Rewrite ->
+ list_to_binary:to_string(Rewrite)
+ end/binary,
+ " baz">>.
+
+elixir_bitstring_3(Bar) when is_bitstring(Bar) ->
+ <<7:4,
+ case Bar of
+ Rewrite when is_bitstring(Rewrite) ->
+ Rewrite;
+ Rewrite ->
+ list_to_bitstring(Rewrite)
+ end/bitstring>>.
+
+
id(I) ->
I.
diff --git a/lib/compiler/test/beam_utils_SUITE.erl b/lib/compiler/test/beam_utils_SUITE.erl
index 710cb050d4..7686e69b63 100644
--- a/lib/compiler/test/beam_utils_SUITE.erl
+++ b/lib/compiler/test/beam_utils_SUITE.erl
@@ -24,7 +24,8 @@
apply_fun/1,apply_mf/1,bs_init/1,bs_save/1,
is_not_killed/1,is_not_used_at/1,
select/1,y_catch/1,otp_8949_b/1,liveopt/1,coverage/1,
- y_registers/1]).
+ y_registers/1,user_predef/1,scan_f/1,cafu/1,
+ receive_label/1,read_size_file_version/1]).
-export([id/1]).
suite() -> [{ct_hooks,[ts_install_cth]}].
@@ -46,7 +47,11 @@ groups() ->
otp_8949_b,
liveopt,
coverage,
- y_registers
+ y_registers,
+ user_predef,
+ scan_f,
+ cafu,
+ read_size_file_version
]}].
init_per_suite(Config) ->
@@ -117,6 +122,15 @@ bs_init(_Config) ->
{'EXIT',{badarg,_}} = (catch do_bs_init_2([0.5])),
{'EXIT',{badarg,_}} = (catch do_bs_init_2([-1])),
{'EXIT',{badarg,_}} = (catch do_bs_init_2([1 bsl 32])),
+
+ <<>> = do_bs_init_3({tag,0}, 0, 0),
+ <<0>> = do_bs_init_3({tag,0}, 2, 1),
+
+ <<"_build/shared">> = do_bs_init_4([], false),
+ <<"abc/shared">> = do_bs_init_4(<<"abc">>, false),
+ <<"foo/foo">> = do_bs_init_4(<<"foo">>, true),
+ error = do_bs_init_4([], not_boolean),
+
ok.
do_bs_init_1([?MODULE], Sz) ->
@@ -134,6 +148,45 @@ do_bs_init_2(SigNos) ->
erlang:error(badarg)
>>.
+do_bs_init_3({tag,Pos}, Offset, Len) ->
+ N0 = Offset - Pos,
+ N = if N0 > Len -> Len;
+ true -> N0
+ end,
+ <<0:N/unit:8>>.
+
+do_bs_init_4(Arg1, Arg2) ->
+ Build =
+ case id(Arg1) of
+ X when X =:= [] orelse X =:= false -> <<"_build">>;
+ X -> X
+ end,
+ case id(Arg2) of
+ true ->
+ id(<<case Build of
+ Rewrite when is_binary(Rewrite) ->
+ Rewrite;
+ Rewrite ->
+ id(Rewrite)
+ end/binary,
+ "/",
+ case id(<<"foo">>) of
+ Rewrite when is_binary(Rewrite) ->
+ Rewrite;
+ Rewrite ->
+ id(Rewrite)
+ end/binary>>);
+ false ->
+ id(<<case Build of
+ Rewrite when is_binary(Rewrite) ->
+ Rewrite;
+ Rewrite ->
+ id(Rewrite)
+ end/binary,
+ "/shared">>);
+ Other ->
+ error
+ end.
bs_save(_Config) ->
{a,30,<<>>} = do_bs_save(<<1:1,30:5>>),
@@ -376,5 +429,83 @@ do(A, B) -> {A,B}.
appointment(#{"resolution" := Url}) ->
do(receive _ -> Url end, #{true => Url}).
+%% From epp.erl.
+user_predef(_Config) ->
+ #{key:="value"} = user_predef({key,"value"}, #{}),
+ #{key:="value"} = user_predef({key,"value"}, #{key=>defined}),
+ error = user_predef({key,"value"}, #{key=>[defined]}),
+ ok.
+
+user_predef({M,Val}, Ms) ->
+ case Ms of
+ #{M:=Defs} when is_list(Defs) ->
+ error;
+ _ ->
+ Ms#{M=>Val}
+ end.
+
+%% From disk_log_1.erl.
+scan_f(_Config) ->
+ {1,<<>>,[]} = scan_f(<<1:32>>, 1, []),
+ {1,<<>>,[<<156>>]} = scan_f(<<1:32,156,1:32>>, 1, []),
+ ok.
+
+scan_f(<<Size:32,Tail/binary>>, FSz, Acc) when Size =< FSz ->
+ case Tail of
+ <<BinTerm:Size/binary,Tail2/binary>> ->
+ scan_f(Tail2, FSz, [BinTerm | Acc]);
+ _ ->
+ {Size,Tail,Acc}
+ end.
+
+%% From file_io_server.erl.
+cafu(_Config) ->
+ error = cafu(<<42:32>>, -1, 0, {utf32,big}),
+ error = cafu(<<42:32>>, 10, 0, {utf32,big}),
+ error = cafu(<<42:32>>, -1, 0, {utf32,little}),
+ ok.
+
+cafu(<<_/big-utf32,Rest/binary>>, N, Count, {utf32,big}) when N < 0 ->
+ cafu(Rest, -1, Count+1, {utf32,big});
+cafu(<<_/big-utf32,Rest/binary>>, N, Count, {utf32,big}) ->
+ cafu(Rest, N-1, Count+1, {utf32,big});
+cafu(<<_/little-utf32,Rest/binary>>, N, Count, {utf32,little}) when N < 0 ->
+ cafu(Rest, -1, Count+1, {utf32,little});
+cafu(_, _, _, _) ->
+ error.
+
+-record(rec_label, {bool}).
+
+receive_label(_Config) ->
+ Pid = spawn_link(fun() -> do_receive_label(#rec_label{bool=true}) end),
+ Msg = {a,b,c},
+ Pid ! {self(),Msg},
+ receive
+ {ok,Msg} ->
+ unlink(Pid),
+ exit(Pid, die),
+ ok
+ end.
+
+do_receive_label(Rec) ->
+ receive
+ {From,Message} when Rec#rec_label.bool ->
+ From ! {ok,Message},
+ do_receive_label(Rec)
+ end.
+
+read_size_file_version(_Config) ->
+ ok = do_read_size_file_version({ok,<<42>>}),
+ {ok,7777} = do_read_size_file_version({ok,<<7777:32>>}),
+ ok.
+
+do_read_size_file_version(E) ->
+ case E of
+ {ok,<<Version>>} when Version =:= 42 ->
+ ok;
+ {ok,<<MaxFiles:32>>} ->
+ {ok,MaxFiles}
+ end.
+
%% The identity function.
id(I) -> I.
diff --git a/lib/compiler/test/beam_validator_SUITE.erl b/lib/compiler/test/beam_validator_SUITE.erl
index c23514b36b..63a13281a8 100644
--- a/lib/compiler/test/beam_validator_SUITE.erl
+++ b/lib/compiler/test/beam_validator_SUITE.erl
@@ -33,8 +33,8 @@
state_after_fault_in_catch/1,no_exception_in_catch/1,
undef_label/1,illegal_instruction/1,failing_gc_guard_bif/1,
map_field_lists/1,cover_bin_opt/1,
- val_dsetel/1]).
-
+ val_dsetel/1,bad_tuples/1]).
+
-include_lib("common_test/include/ct.hrl").
init_per_testcase(Case, Config) when is_atom(Case), is_list(Config) ->
@@ -61,7 +61,8 @@ groups() ->
freg_state,bad_bin_match,bad_dsetel,
state_after_fault_in_catch,no_exception_in_catch,
undef_label,illegal_instruction,failing_gc_guard_bif,
- map_field_lists,cover_bin_opt,val_dsetel]}].
+ map_field_lists,cover_bin_opt,val_dsetel,
+ bad_tuples]}].
init_per_suite(Config) ->
Config.
@@ -421,9 +422,9 @@ try_bin_opt(Mod) ->
try
do_bin_opt(Mod)
catch
- Class:Error ->
+ Class:Error:Stk ->
io:format("~p: ~p ~p\n~p\n",
- [Mod,Class,Error,erlang:get_stacktrace()]),
+ [Mod,Class,Error,Stk]),
error
end.
@@ -509,6 +510,19 @@ destroy_reg({Tag,N}) ->
{y,N+1}
end.
+bad_tuples(Config) ->
+ Errors = do_val(bad_tuples, Config),
+ [{{bad_tuples,heap_overflow,1},
+ {{put,{x,0}},8,{heap_overflow,{left,0},{wanted,1}}}},
+ {{bad_tuples,long,2},
+ {{put,{atom,too_long}},8,not_building_a_tuple}},
+ {{bad_tuples,self_referential,1},
+ {{put,{x,1}},7,{tuple_in_progress,{x,1}}}},
+ {{bad_tuples,short,1},
+ {{move,{x,1},{x,0}},7,{tuple_in_progress,{x,1}}}}] = Errors,
+
+ ok.
+
%%%-------------------------------------------------------------------------
transform_remove(Remove, Module) ->
diff --git a/lib/compiler/test/beam_validator_SUITE_data/bad_tuples.S b/lib/compiler/test/beam_validator_SUITE_data/bad_tuples.S
new file mode 100644
index 0000000000..7980241c37
--- /dev/null
+++ b/lib/compiler/test/beam_validator_SUITE_data/bad_tuples.S
@@ -0,0 +1,88 @@
+{module, bad_tuples}. %% version = 0
+
+{exports, [{heap_overflow,1},
+ {long,2},
+ {module_info,0},
+ {module_info,1},
+ {self_referential,1},
+ {short,1}]}.
+
+{attributes, []}.
+
+{labels, 13}.
+
+
+{function, short, 1, 2}.
+ {label,1}.
+ {line,[{location,"bad_tuples.erl",4}]}.
+ {func_info,{atom,bad_tuples},{atom,short},1}.
+ {label,2}.
+ {test_heap,3,1}.
+ {put_tuple,2,{x,1}}.
+ {put,{atom,ok}}.
+ {move,{x,1},{x,0}}.
+ return.
+
+
+{function, long, 2, 4}.
+ {label,3}.
+ {line,[{location,"bad_tuples.erl",7}]}.
+ {func_info,{atom,bad_tuples},{atom,long},2}.
+ {label,4}.
+ {test_heap,6,2}.
+ {put_tuple,2,{x,2}}.
+ {put,{x,0}}.
+ {put,{x,1}}.
+ {put,{atom,too_long}}.
+ {put_tuple,2,{x,0}}.
+ {put,{atom,ok}}.
+ {put,{x,2}}.
+ return.
+
+
+{function, heap_overflow, 1, 6}.
+ {label,5}.
+ {line,[{location,"bad_tuples.erl",10}]}.
+ {func_info,{atom,bad_tuples},{atom,heap_overflow},1}.
+ {label,6}.
+ {test_heap,3,1}.
+ {put_tuple,2,{x,1}}.
+ {put,{atom,ok}}.
+ {put,{x,0}}.
+ {put,{x,0}}.
+ {move,{x,1},{x,0}}.
+ return.
+
+
+{function, self_referential, 1, 8}.
+ {label,7}.
+ {line,[{location,"bad_tuples.erl",13}]}.
+ {func_info,{atom,bad_tuples},{atom,self_referential},1}.
+ {label,8}.
+ {test_heap,3,1}.
+ {put_tuple,2,{x,1}}.
+ {put,{atom,ok}}.
+ {put,{x,1}}.
+ {move,{x,1},{x,0}}.
+ return.
+
+
+{function, module_info, 0, 10}.
+ {label,9}.
+ {line,[]}.
+ {func_info,{atom,bad_tuples},{atom,module_info},0}.
+ {label,10}.
+ {move,{atom,bad_tuples},{x,0}}.
+ {line,[]}.
+ {call_ext_only,1,{extfunc,erlang,get_module_info,1}}.
+
+
+{function, module_info, 1, 12}.
+ {label,11}.
+ {line,[]}.
+ {func_info,{atom,bad_tuples},{atom,module_info},1}.
+ {label,12}.
+ {move,{x,0},{x,1}}.
+ {move,{atom,bad_tuples},{x,0}}.
+ {line,[]}.
+ {call_ext_only,2,{extfunc,erlang,get_module_info,2}}.
diff --git a/lib/compiler/test/bs_match_SUITE.erl b/lib/compiler/test/bs_match_SUITE.erl
index 2fe8cd0cff..235956a714 100644
--- a/lib/compiler/test/bs_match_SUITE.erl
+++ b/lib/compiler/test/bs_match_SUITE.erl
@@ -39,7 +39,8 @@
match_string_opt/1,select_on_integer/1,
map_and_binary/1,unsafe_branch_caching/1,
bad_literals/1,good_literals/1,constant_propagation/1,
- parse_xml/1,get_payload/1,num_slots_different/1]).
+ parse_xml/1,get_payload/1,escape/1,num_slots_different/1,
+ beam_bsm/1,guard/1,is_ascii/1,non_opt_eq/1]).
-export([coverage_id/1,coverage_external_ignore/2]).
@@ -71,7 +72,8 @@ groups() ->
match_string_opt,select_on_integer,
map_and_binary,unsafe_branch_caching,
bad_literals,good_literals,constant_propagation,parse_xml,
- get_payload,num_slots_different]}].
+ get_payload,escape,num_slots_different,
+ beam_bsm,guard,is_ascii,non_opt_eq]}].
init_per_suite(Config) ->
@@ -676,6 +678,10 @@ coverage(Config) when is_list(Config) ->
<<>> = coverage_per_key(<<4:32>>),
<<$a,$b,$c>> = coverage_per_key(<<7:32,"abc">>),
+ binary = coverage_bitstring(<<>>),
+ binary = coverage_bitstring(<<7>>),
+ bitstring = coverage_bitstring(<<7:4>>),
+ other = coverage_bitstring([a]),
ok.
coverage_fold(Fun, Acc, <<H,T/binary>>) ->
@@ -766,6 +772,10 @@ coverage_per_key(<<BinSize:32,Bin/binary>> = B) ->
true = (byte_size(B) =:= BinSize),
Bin.
+coverage_bitstring(Bin) when is_binary(Bin) -> binary;
+coverage_bitstring(<<_/bitstring>>) -> bitstring;
+coverage_bitstring(_) -> other.
+
multiple_uses(Config) when is_list(Config) ->
{344,62879,345,<<245,159,1,89>>} = multiple_uses_1(<<1,88,245,159,1,89>>),
true = multiple_uses_2(<<0,0,197,18>>),
@@ -799,7 +809,7 @@ multiple_uses_cmp(<<_:16>>, <<_:16>>) -> false.
first_after(Data, Offset) ->
case byte_size(Data) > Offset of
false ->
- {First, Rest} = {ok, ok},
+ {_First, _Rest} = {ok, ok},
ok;
true ->
<<_:Offset/binary, Rest/binary>> = Data,
@@ -1513,7 +1523,7 @@ is_next_char_whitespace(<<C/utf8,_/binary>>) ->
{this_hdr = 17,
ext_hdr_opts}).
-get_payload(Config) ->
+get_payload(_Config) ->
<<3445:48>> = do_get_payload(#ext_header{ext_hdr_opts = <<3445:48>>}),
{'EXIT',_} = (catch do_get_payload(#ext_header{})),
ok.
@@ -1524,6 +1534,21 @@ do_get_payload(ExtHdr) ->
<<_:13,_:35>> = ExtHdr#ext_header.ext_hdr_opts,
ExtHdrOptions.
+escape(_Config) ->
+ 0 = escape(<<>>, 0),
+ 1 = escape(<<128>>, 0),
+ 2 = escape(<<128,255>>, 0),
+ 42 = escape(<<42>>, 0),
+ 50 = escape(<<42,8>>, 0),
+ ok.
+
+escape(<<Byte, Rest/bits>>, Pos) when Byte >= 127 ->
+ escape(Rest, Pos + 1);
+escape(<<Byte, Rest/bits>>, Pos) ->
+ escape(Rest, Pos + Byte);
+escape(<<_Rest/bits>>, Pos) ->
+ Pos.
+
%% ERL-490
num_slots_different(_Config) ->
Ts = [{<<"de">>, <<"default">>, <<"Remove">>, <<"a">>},
@@ -1557,6 +1582,101 @@ lgettext(<<"de">>, <<"navigation">>, <<"Results">>) ->
lgettext(<<"de">>, <<"navigation">>, <<"Resources">>) ->
{ok, <<"Ressourcen">>}.
+%% Test more code in beam_bsm.
+beam_bsm(_Config) ->
+ true = check_bitstring_list(<<1:1,0:1,1:1,1:1>>, [1,0,1,1]),
+ false = check_bitstring_list(<<1:1,0:1,1:1,1:1>>, [0]),
+
+ true = bsm_validate_scheme(<<>>),
+ true = bsm_validate_scheme(<<5,10>>),
+ false = bsm_validate_scheme(<<5,10,11,12>>),
+ true = bsm_validate_scheme([]),
+ true = bsm_validate_scheme([5,10]),
+ false = bsm_validate_scheme([5,6,7]),
+
+ <<1,2,3>> = bsm_must_save_and_not_save(<<1,2,3>>, []),
+ D = fun(N) -> 2*N end,
+ [2,4|<<3>>] = bsm_must_save_and_not_save(<<1,2,3>>, [D,D]),
+
+ ok.
+
+check_bitstring_list(<<H:1,T1/bitstring>>, [H|T2]) ->
+ check_bitstring_list(T1, T2);
+check_bitstring_list(<<>>, []) ->
+ true;
+check_bitstring_list(_, _) ->
+ false.
+
+bsm_validate_scheme([]) -> true;
+bsm_validate_scheme([H|T]) ->
+ case bsm_is_scheme(H) of
+ true -> bsm_validate_scheme(T);
+ false -> false
+ end;
+bsm_validate_scheme(<<>>) -> true;
+bsm_validate_scheme(<<H, Rest/binary>>) ->
+ case bsm_is_scheme(H) of
+ true -> bsm_validate_scheme(Rest);
+ false -> false
+ end.
+
+bsm_is_scheme(Int) ->
+ Int rem 5 =:= 0.
+
+%% NOT OPTIMIZED: different control paths use different positions in the binary
+bsm_must_save_and_not_save(Bin, []) ->
+ Bin;
+bsm_must_save_and_not_save(<<H,T/binary>>, [F|Fs]) ->
+ [F(H)|bsm_must_save_and_not_save(T, Fs)];
+bsm_must_save_and_not_save(<<>>, []) ->
+ [].
+
+guard(_Config) ->
+ _Tuple = id({a,b}),
+ ok = guard_1(<<1,2,3>>, {1,2,3}),
+ ok = guard_2(<<42>>, #{}),
+ ok.
+
+%% Cover handling of #k_put{} in v3_codegen:bsm_rename_ctx/4.
+guard_1(<<A,B,C>>, Tuple) when Tuple =:= {A,B,C} ->
+ ok.
+
+%% Cover handling of #k_call{} in v3_codegen:bsm_rename_ctx/4.
+guard_2(<<_>>, Healing) when Healing#{[] => Healing} =:= #{[] => #{}} ->
+ ok.
+
+is_ascii(_Config) ->
+ true = do_is_ascii(<<>>),
+ true = do_is_ascii(<<"string">>),
+ false = do_is_ascii(<<1024/utf8>>),
+ {'EXIT',{function_clause,_}} = (catch do_is_ascii(<<$A,0:3>>)),
+ {'EXIT',{function_clause,_}} = (catch do_is_ascii(<<16#80,0:3>>)),
+ ok.
+
+do_is_ascii(<<>>) ->
+ true;
+do_is_ascii(<<C,_/binary>>) when C >= 16#80 ->
+ %% This clause must fail to match if the size of the argument in
+ %% bits is not divisible by 8. Beware of unsafe optimizations.
+ false;
+do_is_ascii(<<_, T/binary>>) ->
+ do_is_ascii(T).
+
+non_opt_eq(_Config) ->
+ true = non_opt_eq([], <<>>),
+ true = non_opt_eq([$a], <<$a>>),
+ false = non_opt_eq([$a], <<$b>>),
+ ok.
+
+%% An example from the Efficiency Guide. It used to be not optimized,
+%% but now it can be optimized.
+
+non_opt_eq([H|T1], <<H,T2/binary>>) ->
+ non_opt_eq(T1, T2);
+non_opt_eq([_|_], <<_,_/binary>>) ->
+ false;
+non_opt_eq([], <<>>) ->
+ true.
check(F, R) ->
R = F().
diff --git a/lib/compiler/test/compilation_SUITE_data/opt_crash.erl b/lib/compiler/test/compilation_SUITE_data/opt_crash.erl
index f1607cca68..c65ec31593 100644
--- a/lib/compiler/test/compilation_SUITE_data/opt_crash.erl
+++ b/lib/compiler/test/compilation_SUITE_data/opt_crash.erl
@@ -33,7 +33,7 @@ test() ->
{userinfo,nil},
fun() -> nil end},
nil},
- {'query',nil}}},
+ {query,nil}}},
{absoluteURI,
{scheme,_},
@@ -43,7 +43,7 @@ test() ->
{userinfo,nil},
HostportBefore},
nil},
- {'query',nil}}} = URI_Before,
+ {query,nil}}} = URI_Before,
%% ... some funky code ommitted, not relevant ...
@@ -55,7 +55,7 @@ test() ->
{userinfo,nil},
HostportAfter},
nil},
- {'query',nil}}} = URI_Before,
+ {query,nil}}} = URI_Before,
%% NOTE: I intended to write URI_After instead of URI_Before
%% but the accident revealed that when you add the line below,
%% it causes internal error in v3_codegen on compilation
diff --git a/lib/compiler/test/compile_SUITE.erl b/lib/compiler/test/compile_SUITE.erl
index 96897d612d..eee5bc733f 100644
--- a/lib/compiler/test/compile_SUITE.erl
+++ b/lib/compiler/test/compile_SUITE.erl
@@ -27,12 +27,12 @@
-export([all/0, suite/0,groups/0,init_per_suite/1, end_per_suite/1,
init_per_group/2,end_per_group/2,
app_test/1,appup_test/1,
- debug_info/4, custom_debug_info/1,
+ debug_info/4, custom_debug_info/1, custom_compile_info/1,
file_1/1, forms_2/1, module_mismatch/1, big_file/1, outdir/1,
binary/1, makedep/1, cond_and_ifdef/1, listings/1, listings_big/1,
other_output/1, kernel_listing/1, encrypted_abstr/1,
strict_record/1, utf8_atoms/1, utf8_functions/1, extra_chunks/1,
- cover/1, env/1, core_pp/1,
+ cover/1, env/1, core_pp/1, tuple_calls/1,
core_roundtrip/1, asm/1, optimized_guards/1,
sys_pre_attributes/1, dialyzer/1,
warnings/1, pre_load_check/1, env_compiler_options/1,
@@ -49,11 +49,12 @@ all() ->
test_lib:recompile(?MODULE),
[app_test, appup_test, file_1, forms_2, module_mismatch, big_file, outdir,
binary, makedep, cond_and_ifdef, listings, listings_big,
- other_output, kernel_listing, encrypted_abstr,
+ other_output, kernel_listing, encrypted_abstr, tuple_calls,
strict_record, utf8_atoms, utf8_functions, extra_chunks,
cover, env, core_pp, core_roundtrip, asm, optimized_guards,
sys_pre_attributes, dialyzer, warnings, pre_load_check,
- env_compiler_options, custom_debug_info, bc_options].
+ env_compiler_options, custom_debug_info, bc_options,
+ custom_compile_info].
groups() ->
[].
@@ -384,7 +385,6 @@ do_file_listings(DataDir, PrivDir, [File|Files]) ->
do_listing(Simple, TargetDir, dcbsm, ".core_bsm"),
do_listing(Simple, TargetDir, dsetel, ".dsetel"),
do_listing(Simple, TargetDir, dkern, ".kernel"),
- do_listing(Simple, TargetDir, dlife, ".life"),
do_listing(Simple, TargetDir, dcg, ".codegen"),
do_listing(Simple, TargetDir, dblk, ".block"),
do_listing(Simple, TargetDir, dexcept, ".except"),
@@ -500,9 +500,8 @@ do_kernel_listing({M,A}) ->
io:format("*** compilation failure '~p' for module ~s\n",
[Error,M]),
error;
- Class:Error ->
- io:format("~p: ~p ~p\n~p\n",
- [M,Class,Error,erlang:get_stacktrace()]),
+ Class:Error:Stk ->
+ io:format("~p: ~p ~p\n~p\n", [M,Class,Error,Stk]),
error
end.
@@ -661,6 +660,23 @@ custom_debug_info(Config) when is_list(Config) ->
{ok,{simple,[{debug_info,{debug_info_v1,?MODULE,error}}]}} =
beam_lib:chunks(ErrorBin, [debug_info]).
+custom_compile_info(Config) when is_list(Config) ->
+ Anno = erl_anno:new(1),
+ Forms = [{attribute,Anno,module,custom_compile_info}],
+ Opts = [binary,{compile_info,[{another,version}]}],
+
+ {ok,custom_compile_info,Bin} = compile:forms(Forms, Opts),
+ {ok,{custom_compile_info,[{compile_info,CompileInfo}]}} =
+ beam_lib:chunks(Bin, [compile_info]),
+ version = proplists:get_value(another, CompileInfo),
+ CompileOpts = proplists:get_value(options, CompileInfo),
+ undefined = proplists:get_value(compile_info, CompileOpts),
+
+ {ok,custom_compile_info,DetBin} = compile:forms(Forms, [deterministic|Opts]),
+ {ok,{custom_compile_info,[{compile_info,DetInfo}]}} =
+ beam_lib:chunks(DetBin, [compile_info]),
+ version = proplists:get_value(another, DetInfo).
+
cover(Config) when is_list(Config) ->
io:format("~p\n", [compile:options()]),
ok.
@@ -793,6 +809,37 @@ extra_chunks(Config) when is_list(Config) ->
{ok,{extra_chunks,[{"ExCh",<<"Contents">>}]}} =
beam_lib:chunks(ExtraChunksBinary, ["ExCh"]).
+tuple_calls(Config) when is_list(Config) ->
+ Anno = erl_anno:new(1),
+ Forms = [{attribute,Anno,export,[{size,1},{store,1}]},
+ {function,Anno,size,1,
+ [{clause,Anno,[{var,[],mod}],[],
+ [{call,[],{remote,[],{var,[],mod},{atom,[],size}},[]}]}]},
+ {function,Anno,store,1,
+ [{clause,Anno,[{var,[],mod}],[],
+ [{call,[],{remote,[],{var,[],mod},{atom,[],store}},[{atom,[],key},{atom,[],value}]}]}]}],
+
+ TupleCallsFalse = [{attribute,Anno,module,tuple_calls_false}|Forms],
+ {ok,_,TupleCallsFalseBinary} = compile:forms(TupleCallsFalse, [binary]),
+ code:load_binary(tuple_calls_false, "compile_SUITE.erl", TupleCallsFalseBinary),
+ {'EXIT',{badarg,_}} = (catch tuple_calls_false:store(dict())),
+ {'EXIT',{badarg,_}} = (catch tuple_calls_false:size(dict())),
+ {'EXIT',{badarg,_}} = (catch tuple_calls_false:size(empty_tuple())),
+
+ TupleCallsTrue = [{attribute,Anno,module,tuple_calls_true}|Forms],
+ {ok,_,TupleCallsTrueBinary} = compile:forms(TupleCallsTrue, [binary,tuple_calls]),
+ code:load_binary(tuple_calls_true, "compile_SUITE.erl", TupleCallsTrueBinary),
+ Dict = tuple_calls_true:store(dict()),
+ 1 = tuple_calls_true:size(Dict),
+ {'EXIT',{badarg,_}} = (catch tuple_calls_true:size(empty_tuple())),
+
+ ok.
+
+dict() ->
+ dict:new().
+empty_tuple() ->
+ {}.
+
env(Config) when is_list(Config) ->
{Simple,Target} = get_files(Config, simple, env),
{ok,Cwd} = file:get_cwd(),
@@ -854,9 +901,8 @@ do_core_pp({M,A}, Outdir) ->
io:format("*** compilation failure '~p' for module ~s\n",
[Error,M]),
error;
- Class:Error ->
- io:format("~p: ~p ~p\n~p\n",
- [M,Class,Error,erlang:get_stacktrace()]),
+ Class:Error:Stk ->
+ io:format("~p: ~p ~p\n~p\n", [M,Class,Error,Stk]),
error
end.
@@ -913,9 +959,8 @@ do_core_roundtrip(Beam, Outdir) ->
io:format("*** compilation failure '~p' for file ~s\n",
[Error,Beam]),
error;
- Class:Error ->
- io:format("~p: ~p ~p\n~p\n",
- [Beam,Class,Error,erlang:get_stacktrace()]),
+ Class:Error:Stk ->
+ io:format("~p: ~p ~p\n~p\n", [Beam,Class,Error,Stk]),
error
end.
@@ -1100,9 +1145,8 @@ do_asm(Beam, Outdir) ->
[Other,AsmFile]),
error
end
- catch Class:Error ->
- io:format("~p: ~p ~p\n~p\n",
- [M,Class,Error,erlang:get_stacktrace()]),
+ catch Class:Error:Stk ->
+ io:format("~p: ~p ~p\n~p\n", [M,Class,Error,Stk]),
error
end.
@@ -1119,9 +1163,8 @@ do_opt_guards(Beam) ->
try
{ok,M,Asm} = compile:forms(A, ['S']),
do_opt_guards_mod(Asm)
- catch Class:Error ->
- io:format("~p: ~p ~p\n~p\n",
- [M,Class,Error,erlang:get_stacktrace()]),
+ catch Class:Error:Stk ->
+ io:format("~p: ~p ~p\n~p\n", [M,Class,Error,Stk]),
error
end.
@@ -1282,10 +1325,13 @@ do_warnings_2([], Next, F) ->
%% pre-loads the modules that are used by a typical compilation.
pre_load_check(Config) ->
- case test_server:is_cover() of
- true ->
+ case {test_server:is_cover(),code:module_info(native)} of
+ {true,_} ->
{skip,"Cover is running"};
- false ->
+ {false,true} ->
+ %% Tracing won't work.
+ {skip,"'code' is native-compiled"};
+ {false,false} ->
try
do_pre_load_check(Config)
after
@@ -1409,19 +1455,21 @@ env_compiler_options(_Config) ->
bc_options(Config) ->
DataDir = proplists:get_value(data_dir, Config),
- 101 = highest_opcode(DataDir, small_float, [no_line_info]),
+ 101 = highest_opcode(DataDir, small_float, [no_get_hd_tl,no_line_info]),
103 = highest_opcode(DataDir, big,
- [no_record_opt,no_line_info,no_stack_trimming]),
+ [no_get_hd_tl,no_record_opt,
+ no_line_info,no_stack_trimming]),
- 125 = highest_opcode(DataDir, small_float, [no_line_info,no_float_opt]),
+ 125 = highest_opcode(DataDir, small_float,
+ [no_get_hd_tl,no_line_info,no_float_opt]),
132 = highest_opcode(DataDir, small,
- [no_record_opt,no_float_opt,no_line_info]),
+ [no_get_hd_tl,no_record_opt,no_float_opt,no_line_info]),
- 136 = highest_opcode(DataDir, big, [no_record_opt,no_line_info]),
+ 136 = highest_opcode(DataDir, big, [no_get_hd_tl,no_record_opt,no_line_info]),
- 153 = highest_opcode(DataDir, big, [no_record_opt]),
+ 153 = highest_opcode(DataDir, big, [no_get_hd_tl,no_record_opt]),
153 = highest_opcode(DataDir, big, [r16]),
153 = highest_opcode(DataDir, big, [r17]),
153 = highest_opcode(DataDir, big, [r18]),
@@ -1432,9 +1480,10 @@ bc_options(Config) ->
158 = highest_opcode(DataDir, small_maps, [r17]),
158 = highest_opcode(DataDir, small_maps, [r18]),
158 = highest_opcode(DataDir, small_maps, [r19]),
+ 158 = highest_opcode(DataDir, small_maps, [r20]),
158 = highest_opcode(DataDir, small_maps, []),
- 159 = highest_opcode(DataDir, big, []),
+ 163 = highest_opcode(DataDir, big, []),
ok.
diff --git a/lib/compiler/test/compile_SUITE_data/big.erl b/lib/compiler/test/compile_SUITE_data/big.erl
index 2e54ee8660..1db07755a1 100644
--- a/lib/compiler/test/compile_SUITE_data/big.erl
+++ b/lib/compiler/test/compile_SUITE_data/big.erl
@@ -741,3 +741,7 @@ snmp_access(suite) ->
debug_support(suite) ->
[ info, schema, schema, kill, lkill ].
+%% Cover translation of get_hd/2 to get_list/3 when option no_get_hd_tl
+%% is given.
+cover_get_hd([Hd|_]) ->
+ Hd.
diff --git a/lib/compiler/test/core_alias_SUITE.erl b/lib/compiler/test/core_alias_SUITE.erl
new file mode 100644
index 0000000000..f3f15ef0f8
--- /dev/null
+++ b/lib/compiler/test/core_alias_SUITE.erl
@@ -0,0 +1,195 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 2007-2016. All Rights Reserved.
+%%
+%% Licensed under the Apache License, Version 2.0 (the "License");
+%% you may not use this file except in compliance with the License.
+%% You may obtain a copy of the License at
+%%
+%% http://www.apache.org/licenses/LICENSE-2.0
+%%
+%% Unless required by applicable law or agreed to in writing, software
+%% distributed under the License is distributed on an "AS IS" BASIS,
+%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+%% See the License for the specific language governing permissions and
+%% limitations under the License.
+%%
+%% %CopyrightEnd%
+%%
+-module(core_alias_SUITE).
+
+-export([all/0, suite/0, groups/0,init_per_suite/1, end_per_suite/1,
+ init_per_group/2, end_per_group/2,
+ tuples/1, cons/1]).
+
+-include_lib("common_test/include/ct.hrl").
+
+suite() -> [{ct_hooks,[ts_install_cth]}].
+
+all() ->
+ test_lib:recompile(?MODULE),
+ [{group,p}].
+
+groups() ->
+ [{p,[parallel],
+ [tuples, cons]}].
+
+init_per_suite(Config) ->
+ Config.
+
+end_per_suite(_Config) ->
+ ok.
+
+init_per_group(_GroupName, Config) ->
+ Config.
+
+end_per_group(_GroupName, Config) ->
+ Config.
+
+
+id(X) -> X.
+
+tuples(Config) when is_list(Config) ->
+ Tuple = {ok,id(value)},
+
+ true = erts_debug:same(Tuple, simple_tuple(Tuple)),
+ true = erts_debug:same(Tuple, simple_tuple_in_map(#{hello => Tuple})),
+ true = erts_debug:same(Tuple, simple_tuple_case_repeated(Tuple, Tuple)),
+ true = erts_debug:same(Tuple, simple_tuple_fun_repeated(Tuple, Tuple)),
+ true = erts_debug:same(Tuple, simple_tuple_twice_head(Tuple, Tuple)),
+
+ {Tuple1, Tuple2} = simple_tuple_twice_body(Tuple),
+ true = erts_debug:same(Tuple, Tuple1),
+ true = erts_debug:same(Tuple, Tuple2),
+
+ Nested = {nested,Tuple},
+ true = erts_debug:same(Tuple, nested_tuple_part(Nested)),
+ true = erts_debug:same(Nested, nested_tuple_whole(Nested)),
+ true = erts_debug:same(Nested, nested_tuple_with_alias(Nested)),
+
+ true = erts_debug:same(Tuple, tuple_rebinding_after(Tuple)),
+
+ Tuple = unaliased_tuple_rebinding_before(Tuple),
+ false = erts_debug:same(Tuple, unaliased_tuple_rebinding_before(Tuple)),
+ Nested = unaliased_literal_tuple_head(Nested),
+ false = erts_debug:same(Nested, unaliased_literal_tuple_head(Nested)),
+ Nested = unaliased_literal_tuple_body(Nested),
+ false = erts_debug:same(Nested, unaliased_literal_tuple_body(Nested)),
+ Nested = unaliased_different_var_tuple(Nested, Tuple),
+ false = erts_debug:same(Nested, unaliased_different_var_tuple(Nested, Tuple)).
+
+simple_tuple({ok,X}) ->
+ {ok,X}.
+simple_tuple_twice_head({ok,X}, {ok,X}) ->
+ {ok,X}.
+simple_tuple_twice_body({ok,X}) ->
+ {{ok,X},{ok,X}}.
+simple_tuple_in_map(#{hello := {ok,X}}) ->
+ {ok,X}.
+simple_tuple_fun_repeated({ok,X}, Y) ->
+ io:format("~p~n", [X]),
+ (fun({ok,X}) -> {ok,X} end)(Y).
+simple_tuple_case_repeated({ok,X}, Y) ->
+ io:format("~p~n", [X]),
+ case Y of {ok,X} -> {ok,X} end.
+
+nested_tuple_part({nested,{ok,X}}) ->
+ {ok,X}.
+nested_tuple_whole({nested,{ok,X}}) ->
+ {nested,{ok,X}}.
+nested_tuple_with_alias({nested,{ok,_}=Y}) ->
+ {nested,Y}.
+
+tuple_rebinding_after(Y) ->
+ (fun(X) -> {ok,X} end)(Y),
+ case Y of {ok,X} -> {ok,X} end.
+unaliased_tuple_rebinding_before({ok,X}) ->
+ io:format("~p~n", [X]),
+ (fun(X) -> {ok,X} end)(value).
+unaliased_literal_tuple_head({nested,{ok,value}=X}) ->
+ io:format("~p~n", [X]),
+ {nested,{ok,value}}.
+unaliased_literal_tuple_body({nested,{ok,value}=X}) ->
+ Res = {nested,Y={ok,value}},
+ io:format("~p~n", [[X,Y]]),
+ Res.
+unaliased_different_var_tuple({nested,{ok,value}=X}, Y) ->
+ io:format("~p~n", [X]),
+ {nested,Y}.
+
+cons(Config) when is_list(Config) ->
+ Cons = [ok|id(value)],
+
+ true = erts_debug:same(Cons, simple_cons(Cons)),
+ true = erts_debug:same(Cons, simple_cons_in_map(#{hello => Cons})),
+ true = erts_debug:same(Cons, simple_cons_case_repeated(Cons, Cons)),
+ true = erts_debug:same(Cons, simple_cons_fun_repeated(Cons, Cons)),
+ true = erts_debug:same(Cons, simple_cons_twice_head(Cons, Cons)),
+
+ {Cons1,Cons2} = simple_cons_twice_body(Cons),
+ true = erts_debug:same(Cons, Cons1),
+ true = erts_debug:same(Cons, Cons2),
+
+ Nested = [nested,Cons],
+ true = erts_debug:same(Cons, nested_cons_part(Nested)),
+ true = erts_debug:same(Nested, nested_cons_whole(Nested)),
+ true = erts_debug:same(Nested, nested_cons_with_alias(Nested)),
+ true = erts_debug:same(Cons, cons_rebinding_after(Cons)),
+
+ Unstripped = id([a,b]),
+ Stripped = cons_with_binary([<<>>|Unstripped]),
+ true = erts_debug:same(Unstripped, Stripped),
+
+ Cons = unaliased_cons_rebinding_before(Cons),
+ false = erts_debug:same(Cons, unaliased_cons_rebinding_before(Cons)),
+ Nested = unaliased_literal_cons_head(Nested),
+ false = erts_debug:same(Nested, unaliased_literal_cons_head(Nested)),
+ Nested = unaliased_literal_cons_body(Nested),
+ false = erts_debug:same(Nested, unaliased_literal_cons_body(Nested)),
+ Nested = unaliased_different_var_cons(Nested, Cons),
+ false = erts_debug:same(Nested, unaliased_different_var_cons(Nested, Cons)).
+
+simple_cons([ok|X]) ->
+ [ok|X].
+simple_cons_twice_head([ok|X], [ok|X]) ->
+ [ok|X].
+simple_cons_twice_body([ok|X]) ->
+ {[ok|X],[ok|X]}.
+simple_cons_in_map(#{hello := [ok|X]}) ->
+ [ok|X].
+simple_cons_fun_repeated([ok|X], Y) ->
+ io:format("~p~n", [X]),
+ (fun([ok|X]) -> [ok|X] end)(Y).
+simple_cons_case_repeated([ok|X], Y) ->
+ io:format("~p~n", [X]),
+ case Y of [ok|X] -> [ok|X] end.
+
+nested_cons_part([nested,[ok|X]]) ->
+ [ok|X].
+nested_cons_whole([nested,[ok|X]]) ->
+ [nested,[ok|X]].
+nested_cons_with_alias([nested,[ok|_]=Y]) ->
+ [nested,Y].
+
+cons_with_binary([<<>>,X|Y]) ->
+ cons_with_binary([X|Y]);
+cons_with_binary(A) ->
+ A.
+
+cons_rebinding_after(Y) ->
+ (fun(X) -> [ok|X] end)(Y),
+ case Y of [ok|X] -> [ok|X] end.
+unaliased_cons_rebinding_before([ok|X]) ->
+ io:format("~p~n", [X]),
+ (fun(X) -> [ok|X] end)(value).
+unaliased_literal_cons_head([nested,[ok|value]=X]) ->
+ io:format("~p~n", [X]),
+ [nested,[ok|value]].
+unaliased_literal_cons_body([nested,[ok|value]=X]) ->
+ Res = [nested,Y=[ok|value]],
+ io:format("~p~n", [[X, Y]]),
+ Res.
+unaliased_different_var_cons([nested,[ok|value]=X], Y) ->
+ io:format("~p~n", [X]),
+ [nested,Y].
diff --git a/lib/compiler/test/core_fold_SUITE.erl b/lib/compiler/test/core_fold_SUITE.erl
index 0097e28d4d..4fd1f84569 100644
--- a/lib/compiler/test/core_fold_SUITE.erl
+++ b/lib/compiler/test/core_fold_SUITE.erl
@@ -26,7 +26,8 @@
unused_multiple_values_error/1,unused_multiple_values/1,
multiple_aliases/1,redundant_boolean_clauses/1,
mixed_matching_clauses/1,unnecessary_building/1,
- no_no_file/1,configuration/1,supplies/1]).
+ no_no_file/1,configuration/1,supplies/1,
+ redundant_stack_frame/1,export_from_case/1]).
-export([foo/0,foo/1,foo/2,foo/3]).
@@ -45,7 +46,8 @@ groups() ->
unused_multiple_values_error,unused_multiple_values,
multiple_aliases,redundant_boolean_clauses,
mixed_matching_clauses,unnecessary_building,
- no_no_file,configuration,supplies]}].
+ no_no_file,configuration,supplies,
+ redundant_stack_frame,export_from_case]}].
init_per_suite(Config) ->
@@ -527,4 +529,60 @@ supplies(_Config) ->
do_supplies(#{1 := Value}) when byte_size(Value), byte_size(kg) -> working.
+redundant_stack_frame(_Config) ->
+ {1,2} = do_redundant_stack_frame(#{x=>1,y=>2}),
+ {'EXIT',{{badkey,_,x},_}} = (catch do_redundant_stack_frame(#{y=>2})),
+ {'EXIT',{{badkey,_,y},_}} = (catch do_redundant_stack_frame(#{x=>1})),
+ ok.
+
+do_redundant_stack_frame(Map) ->
+ %% There should not be a stack frame for this function.
+ X = case Map of
+ #{x := X0} ->
+ X0;
+ #{} ->
+ erlang:error({badkey, Map, x})
+ end,
+ Y = case Map of
+ #{y := Y0} ->
+ Y0;
+ #{} ->
+ erlang:error({badkey, Map, y})
+ end,
+ {X, Y}.
+
+%% Cover some clauses in sys_core_fold:remove_first_value/2.
+
+-record(export_from_case, {val}).
+
+export_from_case(_Config) ->
+ a = export_from_case_1(true),
+ b = export_from_case_1(false),
+
+ R = #export_from_case{val=0},
+ {ok,R} = export_from_case_2(false, R),
+ {ok,#export_from_case{val=42}} = export_from_case_2(true, R),
+
+ ok.
+
+export_from_case_1(Bool) ->
+ case Bool of
+ true ->
+ id(42),
+ Result = a;
+ false ->
+ Result = b
+ end,
+ id(Result).
+
+export_from_case_2(Bool, Rec) ->
+ case Bool of
+ false ->
+ Result = Rec;
+ true ->
+ Result = Rec#export_from_case{val=42}
+ end,
+ {ok,Result}.
+
+
id(I) -> I.
diff --git a/lib/compiler/test/guard_SUITE.erl b/lib/compiler/test/guard_SUITE.erl
index ccb9b58225..0d6f8c6f98 100644
--- a/lib/compiler/test/guard_SUITE.erl
+++ b/lib/compiler/test/guard_SUITE.erl
@@ -35,7 +35,8 @@
basic_andalso_orelse/1,traverse_dcd/1,
check_qlc_hrl/1,andalso_semi/1,t_tuple_size/1,binary_part/1,
bad_constants/1,bad_guards/1,
- guard_in_catch/1,beam_bool_SUITE/1]).
+ guard_in_catch/1,beam_bool_SUITE/1,
+ cover_beam_dead/1]).
suite() -> [{ct_hooks,[ts_install_cth]}].
@@ -54,7 +55,8 @@ groups() ->
rel_ops,rel_op_combinations,
literal_type_tests,basic_andalso_orelse,traverse_dcd,
check_qlc_hrl,andalso_semi,t_tuple_size,binary_part,
- bad_constants,bad_guards,guard_in_catch,beam_bool_SUITE]}].
+ bad_constants,bad_guards,guard_in_catch,beam_bool_SUITE,
+ cover_beam_dead]}].
init_per_suite(Config) ->
Config.
@@ -1291,6 +1293,10 @@ rel_ops(Config) when is_list(Config) ->
true = any_atom /= id(42),
true = [] /= id(42),
+ %% Coverage of beam_utils:bif_to_test/3
+ Empty = id([]),
+ ?T(==, [], Empty),
+
ok.
-undef(TestOp).
@@ -1615,7 +1621,9 @@ type_tests() ->
is_reference,
is_port,
is_binary,
- is_function].
+ is_bitstring,
+ is_function,
+ is_map].
basic_andalso_orelse(Config) when is_list(Config) ->
T = id({type,integers,23,42}),
@@ -2198,7 +2206,31 @@ maps() ->
evidence(#{0 := Charge}) when 0; #{[] => Charge} == #{[] => 42} ->
ok.
+cover_beam_dead(_Config) ->
+ Mod = ?FUNCTION_NAME,
+ Attr = [],
+ Fs = [{function,test,1,2,
+ [{label,1},
+ {line,[]},
+ {func_info,{atom,Mod},{atom,test},1},
+ {label,2},
+ %% Cover beam_dead:turn_op/1 using swapped operand order.
+ {test,is_ne_exact,{f,3},[{integer,1},{x,0}]},
+ {test,is_eq_exact,{f,1},[{atom,a},{x,0}]},
+ {label,3},
+ {move,{atom,ok},{x,0}},
+ return]}],
+ Exp = [{test,1}],
+ Asm = {Mod,Exp,Attr,Fs,3},
+ {ok,Mod,Beam} = compile:forms(Asm, [from_asm,binary,report]),
+ {module,Mod} = code:load_binary(Mod, Mod, Beam),
+ ok = Mod:test(1),
+ ok = Mod:test(a),
+ {'EXIT',_} = (catch Mod:test(other)),
+ true = code:delete(Mod),
+ _ = code:purge(Mod),
+ ok.
%% Call this function to turn off constant propagation.
id(I) -> I.
diff --git a/lib/compiler/test/map_SUITE.erl b/lib/compiler/test/map_SUITE.erl
index 5e90b79aa2..f15917e3cb 100644
--- a/lib/compiler/test/map_SUITE.erl
+++ b/lib/compiler/test/map_SUITE.erl
@@ -695,8 +695,28 @@ t_is_map(Config) when is_list(Config) ->
if is_map(#{b=>1}) -> ok end,
if not is_map([1,2,3]) -> ok end,
if not is_map(x) -> ok end,
+
+ ok = do_t_is_map(map, #{}),
+ error = do_t_is_map(map, {a,b,c}),
+ ok = do_t_is_map(number, 42),
+ ok = do_t_is_map(number, 42.0),
+ error = do_t_is_map(number, {a,b,c}),
ok.
+do_t_is_map(What, X) ->
+ B = case What of
+ map ->
+ %% Cover conversion of is_map/1 BIF to test instruction
+ %% in beam_utils:bif_to_test/3.
+ is_map(X);
+ number ->
+ is_number(X)
+ end,
+ case B of
+ true -> ok;
+ false -> error
+ end.
+
% test map updates without matching
t_update_literals(Config) when is_list(Config) ->
Map = #{x=>1,y=>2,z=>3,q=>4},
diff --git a/lib/compiler/test/match_SUITE.erl b/lib/compiler/test/match_SUITE.erl
index 52b2da05f7..4b26a8dcdc 100644
--- a/lib/compiler/test/match_SUITE.erl
+++ b/lib/compiler/test/match_SUITE.erl
@@ -23,8 +23,9 @@
init_per_group/2,end_per_group/2,
pmatch/1,mixed/1,aliases/1,non_matching_aliases/1,
match_in_call/1,untuplify/1,shortcut_boolean/1,letify_guard/1,
- selectify/1,underscore/1,match_map/1,map_vars_used/1,
- coverage/1,grab_bag/1,literal_binary/1]).
+ selectify/1,deselectify/1,underscore/1,match_map/1,map_vars_used/1,
+ coverage/1,grab_bag/1,literal_binary/1,
+ unary_op/1]).
-include_lib("common_test/include/ct.hrl").
@@ -38,9 +39,9 @@ groups() ->
[{p,[parallel],
[pmatch,mixed,aliases,non_matching_aliases,
match_in_call,untuplify,
- shortcut_boolean,letify_guard,selectify,
+ shortcut_boolean,letify_guard,selectify,deselectify,
underscore,match_map,map_vars_used,coverage,
- grab_bag,literal_binary]}].
+ grab_bag,literal_binary,unary_op]}].
init_per_suite(Config) ->
@@ -466,6 +467,66 @@ sel_same_value2(V) when V =:= 42; V =:= 43 ->
sel_same_value2(_) ->
error.
+%% Test deconstruction of select_val instructions in beam_peep into
+%% regular tests with just one possible value left. Hitting proper cases
+%% in beam_peep relies on unification of labels by beam_jump.
+
+deselectify(Config) when is_list(Config) ->
+ one_or_other = desel_tuple_arity({1}),
+ two = desel_tuple_arity({1,1}),
+ one_or_other = desel_tuple_arity({1,1,1}),
+
+ one_or_other = dsel_integer(1),
+ two = dsel_integer(2),
+ one_or_other = dsel_integer(3),
+
+ one_or_other = dsel_integer_typecheck(1),
+ two = dsel_integer_typecheck(2),
+ one_or_other = dsel_integer_typecheck(3),
+
+ one_or_other = dsel_atom(one),
+ two = dsel_atom(two),
+ one_or_other = dsel_atom(three),
+
+ one_or_other = dsel_atom_typecheck(one),
+ two = dsel_atom_typecheck(two),
+ one_or_other = dsel_atom_typecheck(three).
+
+desel_tuple_arity(Tuple) when is_tuple(Tuple) ->
+ case Tuple of
+ {_} -> one_or_other;
+ {_,_} -> two;
+ _ -> one_or_other
+ end.
+
+dsel_integer(Val) ->
+ case Val of
+ 1 -> one_or_other;
+ 2 -> two;
+ _ -> one_or_other
+ end.
+
+dsel_integer_typecheck(Val) when is_integer(Val) ->
+ case Val of
+ 1 -> one_or_other;
+ 2 -> two;
+ _ -> one_or_other
+ end.
+
+dsel_atom(Val) ->
+ case Val of
+ one -> one_or_other;
+ two -> two;
+ _ -> one_or_other
+ end.
+
+dsel_atom_typecheck(Val) when is_atom(Val) ->
+ case Val of
+ one -> one_or_other;
+ two -> two;
+ _ -> one_or_other
+ end.
+
underscore(Config) when is_list(Config) ->
case Config of
[] ->
@@ -557,6 +618,10 @@ grab_bag(_Config) ->
{bad,16#555555555555555555555555555555555555555555555555555}],
ok = grab_bag_remove_failure(L, unit, 0),
+ {42,<<43,44>>} = grab_bag_single_valued(<<42,43,44>>),
+ empty_list = grab_bag_single_valued([]),
+ empty_tuple = grab_bag_single_valued({}),
+
ok.
grab_bag_remove_failure([], _Unit, _MaxFailure) ->
@@ -574,6 +639,12 @@ grab_bag_remove_failure([{stretch,_,Mi}=Stretch | Specs], Unit, _MaxFailure) ->
ok
end.
+%% Cover a line v3_kernel that places binary matching first.
+grab_bag_single_valued(<<H,T/bytes>>) -> {H,T};
+grab_bag_single_valued([]) -> empty_list;
+grab_bag_single_valued({}) -> empty_tuple.
+
+
%% Regression in 19.0, reported by Alexei Sholik
literal_binary(_Config) ->
3 = literal_binary_match(bar, <<"y">>),
@@ -592,5 +663,74 @@ literal_binary_match(_, <<"x">>) -> 2;
literal_binary_match(_, <<"y">>) -> 3;
literal_binary_match(_, _) -> fail.
+unary_op(Config) ->
+ %% ERL-514. This test case only verifies that the code
+ %% calculates the correct result, not that the generated
+ %% code is optimial.
+
+ {non_associative,30} = unary_op_1('&'),
+ {non_associative,300} = unary_op_1('^'),
+ {non_associative,300} = unary_op_1('not'),
+ {non_associative,300} = unary_op_1('+'),
+ {non_associative,300} = unary_op_1('-'),
+ {non_associative,300} = unary_op_1('~~~'),
+ {non_associative,300} = unary_op_1('!'),
+ {non_associative,320} = unary_op_1('@'),
+
+ error = unary_op_1(Config),
+ error = unary_op_1(abc),
+ error = unary_op_1(42),
+
+ ok.
+
+unary_op_1(Vop@1) ->
+ %% If all optimizations are working as they should, there should
+ %% be no stack frame and all '=:=' tests should be coalesced into
+ %% a single select_val instruction.
+
+ case Vop@1 =:= '&' of
+ true ->
+ {non_associative,30};
+ false ->
+ case
+ case Vop@1 =:= '^' of
+ true ->
+ true;
+ false ->
+ case Vop@1 =:= 'not' of
+ true ->
+ true;
+ false ->
+ case Vop@1 =:= '+' of
+ true ->
+ true;
+ false ->
+ case Vop@1 =:= '-' of
+ true ->
+ true;
+ false ->
+ case Vop@1 =:= '~~~' of
+ true ->
+ true;
+ false ->
+ Vop@1 =:= '!'
+ end
+ end
+ end
+ end
+ end
+ of
+ true ->
+ {non_associative,300};
+ false ->
+ case Vop@1 =:= '@' of
+ true ->
+ {non_associative,320};
+ false ->
+ error
+ end
+ end
+ end.
+
id(I) -> I.
diff --git a/lib/compiler/test/misc_SUITE.erl b/lib/compiler/test/misc_SUITE.erl
index 4bd884d86b..4e39f4663e 100644
--- a/lib/compiler/test/misc_SUITE.erl
+++ b/lib/compiler/test/misc_SUITE.erl
@@ -161,16 +161,17 @@ md5_1(Beam) ->
%% Cover some code that handles internal errors.
silly_coverage(Config) when is_list(Config) ->
- %% sys_core_fold, sys_core_bsm, sys_core_setel, v3_kernel
+ %% sys_core_fold, sys_core_alias, sys_core_bsm, sys_core_setel, v3_kernel
BadCoreErlang = {c_module,[],
name,[],[],
[{{c_var,[],{foo,2}},seriously_bad_body}]},
expect_error(fun() -> sys_core_fold:module(BadCoreErlang, []) end),
+ expect_error(fun() -> sys_core_alias:module(BadCoreErlang, []) end),
expect_error(fun() -> sys_core_bsm:module(BadCoreErlang, []) end),
expect_error(fun() -> sys_core_dsetel:module(BadCoreErlang, []) end),
expect_error(fun() -> v3_kernel:module(BadCoreErlang, []) end),
- %% v3_life
+ %% v3_codegen
BadKernel = {k_mdef,[],?MODULE,
[{foo,0}],
[],
@@ -178,11 +179,7 @@ silly_coverage(Config) when is_list(Config) ->
{k,[],[],[]},
f,0,[],
seriously_bad_body}]},
- expect_error(fun() -> v3_life:module(BadKernel, []) end),
-
- %% v3_codegen
- CodegenInput = {?MODULE,[{foo,0}],[],[{function,foo,0,[a|b],a,b,[]}]},
- expect_error(fun() -> v3_codegen:module(CodegenInput, []) end),
+ expect_error(fun() -> v3_codegen:module(BadKernel, []) end),
%% beam_a
BeamAInput = {?MODULE,[{foo,0}],[],
@@ -321,8 +318,7 @@ expect_error(Fun) ->
io:format("~p", [Any]),
ct:fail(call_was_supposed_to_fail)
catch
- Class:Reason ->
- Stk = erlang:get_stacktrace(),
+ Class:Reason:Stk ->
io:format("~p:~p\n~p\n", [Class,Reason,Stk]),
case {Class,Reason} of
{error,undef} ->
@@ -363,9 +359,7 @@ integer_encoding_1(Config) ->
io:put_chars(Src, "t(Last) ->[\n"),
io:put_chars(Data, "[\n"),
- do_integer_encoding(-(id(1) bsl 10000), Src, Data),
- do_integer_encoding(id(1) bsl 10000, Src, Data),
- do_integer_encoding(1024, 0, Src, Data),
+ do_integer_encoding(137, 0, Src, Data),
_ = [begin
B = 1 bsl I,
do_integer_encoding(-B-1, Src, Data),
@@ -374,7 +368,7 @@ integer_encoding_1(Config) ->
do_integer_encoding(B-1, Src, Data),
do_integer_encoding(B, Src, Data),
do_integer_encoding(B+1, Src, Data)
- end || I <- lists:seq(1, 128)],
+ end || I <- lists:seq(1, 130)],
io:put_chars(Src, "Last].\n\n"),
ok = file:close(Src),
io:put_chars(Data, "0].\n\n"),
@@ -388,8 +382,6 @@ integer_encoding_1(Config) ->
%% Compare lists.
List = Mod:t(0),
{ok,[List]} = file:consult(DataFile),
- OneBsl10000 = id(1) bsl 10000,
- [-(1 bsl 10000),OneBsl10000|_] = List,
%% Cleanup.
file:delete(SrcFile),
@@ -408,7 +400,3 @@ do_integer_encoding(I, Src, Data) ->
Str = integer_to_list(I),
io:put_chars(Src, [Str,",\n"]),
io:put_chars(Data, [Str,",\n"]).
-
-
-id(I) -> I.
-
diff --git a/lib/compiler/test/receive_SUITE.erl b/lib/compiler/test/receive_SUITE.erl
index 8304672558..5e386790c0 100644
--- a/lib/compiler/test/receive_SUITE.erl
+++ b/lib/compiler/test/receive_SUITE.erl
@@ -222,9 +222,8 @@ do_ref_opt(Source, PrivDir) ->
collect_recv_opt_instrs(Code)
end,
ok
- catch Class:Error ->
- io:format("~s: ~p ~p\n~p\n",
- [Source,Class,Error,erlang:get_stacktrace()]),
+ catch Class:Error:Stk ->
+ io:format("~s: ~p ~p\n~p\n", [Source,Class,Error,Stk]),
error
end.
@@ -265,6 +264,10 @@ export(Config) when is_list(Config) ->
self() ! {result,Ref,42},
42 = export_1(Ref),
{error,timeout} = export_1(Ref),
+
+ self() ! {result,Ref},
+ {ok,Ref} = export_2(),
+
ok.
export_1(Reference) ->
@@ -281,6 +284,10 @@ export_1(Reference) ->
id({build,self()}),
Result.
+export_2() ->
+ receive {result,Result} -> ok end,
+ {ok,Result}.
+
wait(Config) when is_list(Config) ->
self() ! <<42>>,
<<42>> = wait_1(r, 1, 2),
diff --git a/lib/compiler/test/trycatch_SUITE.erl b/lib/compiler/test/trycatch_SUITE.erl
index a591d6cc93..d5a1dc642f 100644
--- a/lib/compiler/test/trycatch_SUITE.erl
+++ b/lib/compiler/test/trycatch_SUITE.erl
@@ -26,7 +26,8 @@
nested_of/1,nested_catch/1,nested_after/1,
nested_horrid/1,last_call_optimization/1,bool/1,
plain_catch_coverage/1,andalso_orelse/1,get_in_try/1,
- hockey/1,handle_info/1,catch_in_catch/1,grab_bag/1]).
+ hockey/1,handle_info/1,catch_in_catch/1,grab_bag/1,
+ stacktrace/1,nested_stacktrace/1,raise/1]).
-include_lib("common_test/include/ct.hrl").
@@ -42,7 +43,8 @@ groups() ->
after_oops,eclectic,rethrow,nested_of,nested_catch,
nested_after,nested_horrid,last_call_optimization,
bool,plain_catch_coverage,andalso_orelse,get_in_try,
- hockey,handle_info,catch_in_catch,grab_bag]}].
+ hockey,handle_info,catch_in_catch,grab_bag,
+ stacktrace,nested_stacktrace,raise]}].
init_per_suite(Config) ->
@@ -115,6 +117,16 @@ basic(Conf) when is_list(Conf) ->
catch nisse -> erro
end,
+ %% Unmatchable clauses.
+ try
+ throw(thrown)
+ catch
+ {a,b}={a,b,c} -> %Intentionally no match.
+ ok;
+ thrown ->
+ ok
+ end,
+
ok.
after_call() ->
@@ -324,11 +336,11 @@ eclectic(Conf) when is_list(Conf) ->
{{error,{exit,V},{'EXIT',V}},V} =
eclectic_1({foo,{error,{exit,V}}}, error, {value,V}),
{{value,{value,V},V},
- {'EXIT',{badarith,[{?MODULE,my_add,2,_}|_]}}} =
+ {'EXIT',{badarith,[{erlang,'+',[0,a],_},{?MODULE,my_add,2,_}|_]}}} =
eclectic_1({foo,{value,{value,V}}}, undefined, {'add',{0,a}}),
{{'EXIT',V},V} =
eclectic_1({catch_foo,{exit,V}}, undefined, {throw,V}),
- {{error,{'div',{1,0}},{'EXIT',{badarith,[{?MODULE,my_div,2,_}|_]}}},
+ {{error,{'div',{1,0}},{'EXIT',{badarith,[{erlang,'div',[1,0],_},{?MODULE,my_div,2,_}|_]}}},
{'EXIT',V}} =
eclectic_1({foo,{error,{'div',{1,0}}}}, error, {exit,V}),
{{{error,V},{'EXIT',{V,[{?MODULE,foo,1,_}|_]}}},
@@ -345,7 +357,7 @@ eclectic(Conf) when is_list(Conf) ->
eclectic_2({error,{value,V}}, throw, {error,V}),
{{caught,{'EXIT',{badarg,[{erlang,abs,[V],_}|_]}}},V} =
eclectic_2({value,{'abs',V}}, undefined, {value,V}),
- {{caught,{'EXIT',{badarith,[{?MODULE,my_add,2,_}|_]}}},V} =
+ {{caught,{'EXIT',{badarith,[{erlang,'+',[0,a],_},{?MODULE,my_add,2,_}|_]}}},V} =
eclectic_2({exit,{'add',{0,a}}}, exit, {value,V}),
{{caught,{'EXIT',V}},undefined} =
eclectic_2({value,{error,V}}, undefined, {exit,V}),
@@ -1039,5 +1051,217 @@ grab_bag(_Config) ->
ok.
+stacktrace(_Config) ->
+ V = [make_ref()|self()],
+ case ?MODULE:module_info(native) of
+ false ->
+ {value2,{caught1,badarg,[{erlang,abs,[V],_}|_]}} =
+ stacktrace_1({'abs',V}, error, {value,V}),
+ {caught2,{error,badarith},[{erlang,'+',[0,a],_},
+ {?MODULE,my_add,2,_}|_]} =
+ stacktrace_1({'div',{1,0}}, error, {'add',{0,a}});
+ true ->
+ {value2,{caught1,badarg,[{?MODULE,my_abs,1,_}|_]}} =
+ stacktrace_1({'abs',V}, error, {value,V}),
+ {caught2,{error,badarith},[{?MODULE,my_add,2,_}|_]} =
+ stacktrace_1({'div',{1,0}}, error, {'add',{0,a}})
+ end,
+ {caught2,{error,{try_clause,V}},[{?MODULE,stacktrace_1,3,_}|_]} =
+ stacktrace_1({value,V}, error, {value,V}),
+ {caught2,{throw,V},[{?MODULE,foo,1,_}|_]} =
+ stacktrace_1({value,V}, error, {throw,V}),
+
+ try
+ stacktrace_2()
+ catch
+ error:{badmatch,_}:Stk2 ->
+ [{?MODULE,stacktrace_2,0,_},
+ {?MODULE,stacktrace,1,_}|_] = Stk2,
+ Stk2 = erlang:get_stacktrace(),
+ ok
+ end,
+
+ try
+ stacktrace_3(a, b)
+ catch
+ error:function_clause:Stk3 ->
+ Stk3 = erlang:get_stacktrace(),
+ case lists:module_info(native) of
+ false ->
+ [{lists,prefix,[a,b],_}|_] = Stk3;
+ true ->
+ [{lists,prefix,2,_}|_] = Stk3
+ end
+ end,
+
+ try
+ throw(x)
+ catch
+ throw:x:IntentionallyUnused ->
+ ok
+ end.
+
+stacktrace_1(X, C1, Y) ->
+ try try foo(X) of
+ C1 -> value1
+ catch
+ C1:D1:Stk1 ->
+ Stk1 = erlang:get_stacktrace(),
+ {caught1,D1,Stk1}
+ after
+ foo(Y)
+ end of
+ V2 -> {value2,V2}
+ catch
+ C2:D2:Stk2 -> {caught2,{C2,D2},Stk2=erlang:get_stacktrace()}
+ end.
+
+stacktrace_2() ->
+ ok = erlang:process_info(self(), current_function),
+ ok.
+
+stacktrace_3(A, B) ->
+ {ok,lists:prefix(A, B)}.
+
+nested_stacktrace(_Config) ->
+ V = [{make_ref()}|[self()]],
+ value1 = nested_stacktrace_1({{value,{V,x1}},void,{V,x1}},
+ {void,void,void}),
+ case ?MODULE:module_info(native) of
+ false ->
+ {caught1,
+ [{erlang,'+',[V,x1],_},{?MODULE,my_add,2,_}|_],
+ value2} =
+ nested_stacktrace_1({{'add',{V,x1}},error,badarith},
+ {{value,{V,x2}},void,{V,x2}}),
+ {caught1,
+ [{erlang,'+',[V,x1],_},{?MODULE,my_add,2,_}|_],
+ {caught2,[{erlang,abs,[V],_}|_]}} =
+ nested_stacktrace_1({{'add',{V,x1}},error,badarith},
+ {{'abs',V},error,badarg});
+ true ->
+ {caught1,
+ [{?MODULE,my_add,2,_}|_],
+ value2} =
+ nested_stacktrace_1({{'add',{V,x1}},error,badarith},
+ {{value,{V,x2}},void,{V,x2}}),
+ {caught1,
+ [{?MODULE,my_add,2,_}|_],
+ {caught2,[{?MODULE,my_abs,1,_}|_]}} =
+ nested_stacktrace_1({{'add',{V,x1}},error,badarith},
+ {{'abs',V},error,badarg})
+ end,
+ ok.
+
+nested_stacktrace_1({X1,C1,V1}, {X2,C2,V2}) ->
+ try foo(X1) of
+ V1 -> value1
+ catch
+ C1:V1:S1 ->
+ S1 = erlang:get_stacktrace(),
+ T2 = try foo(X2) of
+ V2 -> value2
+ catch
+ C2:V2:S2 ->
+ S2 = erlang:get_stacktrace(),
+ {caught2,S2}
+ end,
+ {caught1,S1,T2}
+ end.
+
+raise(_Config) ->
+ test_raise(fun() -> exit({exit,tuple}) end),
+ test_raise(fun() -> abs(id(x)) end),
+ test_raise(fun() -> throw({was,thrown}) end),
+
+ badarg = bad_raise(fun() -> abs(id(x)) end),
+
+ ok.
+
+bad_raise(Expr) ->
+ try
+ Expr()
+ catch
+ _:E:Stk ->
+ erlang:raise(bad_class, E, Stk)
+ end.
+
+test_raise(Expr) ->
+ test_raise_1(Expr),
+ test_raise_2(Expr),
+ test_raise_3(Expr).
+
+test_raise_1(Expr) ->
+ erase(exception),
+ try
+ do_test_raise_1(Expr)
+ catch
+ C:E:Stk ->
+ {C,E,Stk} = erase(exception)
+ end.
+
+do_test_raise_1(Expr) ->
+ try
+ Expr()
+ catch
+ C:E:Stk ->
+ %% Here the stacktrace must be built.
+ put(exception, {C,E,Stk}),
+ erlang:raise(C, E, Stk)
+ end.
+
+test_raise_2(Expr) ->
+ erase(exception),
+ try
+ do_test_raise_2(Expr)
+ catch
+ C:E:Stk ->
+ {C,E} = erase(exception),
+ try
+ Expr()
+ catch
+ _:_:S ->
+ [StkTop|_] = S,
+ [StkTop|_] = Stk
+ end
+ end.
+
+do_test_raise_2(Expr) ->
+ try
+ Expr()
+ catch
+ C:E:Stk ->
+ %% Here it is possible to replace erlang:raise/3 with
+ %% the raw_raise/3 instruction since the stacktrace is
+ %% not actually used.
+ put(exception, {C,E}),
+ erlang:raise(C, E, Stk)
+ end.
+
+test_raise_3(Expr) ->
+ try
+ do_test_raise_3(Expr)
+ catch
+ exit:{exception,C,E}:Stk ->
+ try
+ Expr()
+ catch
+ C:E:S ->
+ [StkTop|_] = S,
+ [StkTop|_] = Stk
+ end
+ end.
+
+do_test_raise_3(Expr) ->
+ try
+ Expr()
+ catch
+ C:E:Stk ->
+ %% Here it is possible to replace erlang:raise/3 with
+ %% the raw_raise/3 instruction since the stacktrace is
+ %% not actually used.
+ erlang:raise(exit, {exception,C,E}, Stk)
+ end.
+
id(I) -> I.
diff --git a/lib/compiler/test/z_SUITE.erl b/lib/compiler/test/z_SUITE.erl
index d864184f4c..cd95d0e733 100644
--- a/lib/compiler/test/z_SUITE.erl
+++ b/lib/compiler/test/z_SUITE.erl
@@ -54,8 +54,7 @@ do_loaded([{M,_}|Ms], E0) ->
_ = M:module_info(functions),
E0
catch
- C:Error ->
- Stk = erlang:get_stacktrace(),
+ C:Error:Stk ->
io:format("~p:~p\n~p\n", [C,Error,Stk]),
E0 + 1
end,