aboutsummaryrefslogtreecommitdiffstats
path: root/lib/compiler
diff options
context:
space:
mode:
Diffstat (limited to 'lib/compiler')
-rw-r--r--lib/compiler/doc/src/Makefile2
-rw-r--r--lib/compiler/doc/src/compile.xml53
-rw-r--r--lib/compiler/doc/src/notes.xml191
-rw-r--r--lib/compiler/scripts/.gitignore1
-rwxr-xr-xlib/compiler/scripts/smoke122
-rw-r--r--lib/compiler/scripts/smoke-mix.exs95
-rw-r--r--lib/compiler/src/Makefile12
-rw-r--r--lib/compiler/src/beam_a.erl22
-rw-r--r--lib/compiler/src/beam_asm.erl4
-rw-r--r--lib/compiler/src/beam_block.erl54
-rw-r--r--lib/compiler/src/beam_bs.erl183
-rw-r--r--lib/compiler/src/beam_bsm.erl719
-rw-r--r--lib/compiler/src/beam_clean.erl18
-rw-r--r--lib/compiler/src/beam_dead.erl881
-rw-r--r--lib/compiler/src/beam_dict.erl15
-rw-r--r--lib/compiler/src/beam_disasm.erl27
-rw-r--r--lib/compiler/src/beam_except.erl155
-rw-r--r--lib/compiler/src/beam_flatten.erl73
-rw-r--r--lib/compiler/src/beam_jump.erl348
-rw-r--r--lib/compiler/src/beam_kernel_to_ssa.erl24
-rw-r--r--lib/compiler/src/beam_listing.erl2
-rw-r--r--lib/compiler/src/beam_peep.erl14
-rw-r--r--lib/compiler/src/beam_split.erl90
-rw-r--r--lib/compiler/src/beam_ssa.erl408
-rw-r--r--lib/compiler/src/beam_ssa_bsm.erl1046
-rw-r--r--lib/compiler/src/beam_ssa_codegen.erl334
-rw-r--r--lib/compiler/src/beam_ssa_dead.erl1076
-rw-r--r--lib/compiler/src/beam_ssa_funs.erl149
-rw-r--r--lib/compiler/src/beam_ssa_opt.erl1650
-rw-r--r--lib/compiler/src/beam_ssa_opt.hrl53
-rw-r--r--lib/compiler/src/beam_ssa_pp.erl6
-rw-r--r--lib/compiler/src/beam_ssa_pre_codegen.erl1084
-rw-r--r--lib/compiler/src/beam_ssa_recv.erl39
-rw-r--r--lib/compiler/src/beam_ssa_share.erl370
-rw-r--r--lib/compiler/src/beam_ssa_type.erl1497
-rw-r--r--lib/compiler/src/beam_trim.erl324
-rw-r--r--lib/compiler/src/beam_utils.erl615
-rw-r--r--lib/compiler/src/beam_validator.erl3146
-rw-r--r--lib/compiler/src/beam_z.erl29
-rw-r--r--lib/compiler/src/cerl_sets.erl2
-rw-r--r--lib/compiler/src/compile.erl108
-rw-r--r--lib/compiler/src/compiler.app.src9
-rw-r--r--lib/compiler/src/erl_bifs.erl18
-rwxr-xr-xlib/compiler/src/genop.tab23
-rw-r--r--lib/compiler/src/sys_core_bsm.erl250
-rw-r--r--lib/compiler/src/sys_core_dsetel.erl360
-rw-r--r--lib/compiler/src/sys_core_fold.erl173
-rw-r--r--lib/compiler/src/sys_core_fold_lists.erl101
-rw-r--r--lib/compiler/src/sys_core_inline.erl3
-rw-r--r--lib/compiler/src/v3_core.erl20
-rw-r--r--lib/compiler/src/v3_kernel.erl123
-rw-r--r--lib/compiler/test/Makefile37
-rw-r--r--lib/compiler/test/andor_SUITE.erl2
-rw-r--r--lib/compiler/test/apply_SUITE.erl10
-rw-r--r--lib/compiler/test/beam_except_SUITE.erl38
-rw-r--r--lib/compiler/test/beam_jump_SUITE.erl142
-rw-r--r--lib/compiler/test/beam_reorder_SUITE.erl2
-rw-r--r--lib/compiler/test/beam_ssa_SUITE.erl213
-rw-r--r--lib/compiler/test/beam_type_SUITE.erl116
-rw-r--r--lib/compiler/test/beam_utils_SUITE.erl27
-rw-r--r--lib/compiler/test/beam_validator_SUITE.erl137
-rw-r--r--lib/compiler/test/beam_validator_SUITE_data/bad_bin_match.S2
-rw-r--r--lib/compiler/test/beam_validator_SUITE_data/merge_undefined.S4
-rw-r--r--lib/compiler/test/beam_validator_SUITE_data/receive_stacked.S10
-rw-r--r--lib/compiler/test/bif_SUITE.erl9
-rw-r--r--lib/compiler/test/bs_bincomp_SUITE.erl2
-rw-r--r--lib/compiler/test/bs_bit_binaries_SUITE.erl2
-rw-r--r--lib/compiler/test/bs_construct_SUITE.erl5
-rw-r--r--lib/compiler/test/bs_match_SUITE.erl216
-rw-r--r--lib/compiler/test/bs_utf_SUITE.erl2
-rw-r--r--lib/compiler/test/compilation_SUITE.erl2
-rw-r--r--lib/compiler/test/compile_SUITE.erl311
-rw-r--r--lib/compiler/test/compiler.cover5
-rw-r--r--lib/compiler/test/core_alias_SUITE.erl2
-rw-r--r--lib/compiler/test/core_fold_SUITE.erl13
-rw-r--r--lib/compiler/test/error_SUITE.erl2
-rw-r--r--lib/compiler/test/float_SUITE.erl17
-rw-r--r--lib/compiler/test/fun_SUITE.erl7
-rw-r--r--lib/compiler/test/guard_SUITE.erl96
-rw-r--r--lib/compiler/test/guard_SUITE_data/guard_SUITE_tuple_size.S30
-rw-r--r--lib/compiler/test/inline_SUITE.erl32
-rw-r--r--lib/compiler/test/inline_SUITE_data/barnes2.erl2
-rw-r--r--lib/compiler/test/map_SUITE.erl95
-rw-r--r--lib/compiler/test/match_SUITE.erl183
-rw-r--r--lib/compiler/test/misc_SUITE.erl47
-rw-r--r--lib/compiler/test/overridden_bif_SUITE.erl2
-rw-r--r--lib/compiler/test/receive_SUITE.erl53
-rw-r--r--lib/compiler/test/record_SUITE.erl2
-rw-r--r--lib/compiler/test/regressions_SUITE.erl4
-rw-r--r--lib/compiler/test/test_lib.erl63
-rw-r--r--lib/compiler/test/trycatch_SUITE.erl26
-rw-r--r--lib/compiler/test/warnings_SUITE.erl86
-rw-r--r--lib/compiler/vsn.mk2
93 files changed, 11672 insertions, 6507 deletions
diff --git a/lib/compiler/doc/src/Makefile b/lib/compiler/doc/src/Makefile
index 661415899f..32f150eef8 100644
--- a/lib/compiler/doc/src/Makefile
+++ b/lib/compiler/doc/src/Makefile
@@ -1,7 +1,7 @@
#
# %CopyrightBegin%
#
-# Copyright Ericsson AB 1997-2017. All Rights Reserved.
+# Copyright Ericsson AB 1997-2018. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
diff --git a/lib/compiler/doc/src/compile.xml b/lib/compiler/doc/src/compile.xml
index 1a71c83521..5219ba0f5d 100644
--- a/lib/compiler/doc/src/compile.xml
+++ b/lib/compiler/doc/src/compile.xml
@@ -29,7 +29,7 @@
<rev>A</rev>
<file>compile.sgml</file>
</header>
- <module>compile</module>
+ <module since="">compile</module>
<modulesummary>Erlang Compiler</modulesummary>
<description>
<p>This module provides an interface to the standard Erlang
@@ -40,7 +40,7 @@
<funcs>
<func>
- <name>env_compiler_options()</name>
+ <name since="OTP 19.0">env_compiler_options()</name>
<fsummary>
Compiler options defined via the environment variable
<c>ERL_COMPILER_OPTIONS</c>
@@ -53,7 +53,7 @@
</desc>
</func>
<func>
- <name>file(File)</name>
+ <name since="">file(File)</name>
<fsummary>Compiles a file.</fsummary>
<desc>
<p>Is the same as
@@ -63,7 +63,7 @@
</func>
<func>
- <name>file(File, Options) -> CompRet</name>
+ <name since="">file(File, Options) -> CompRet</name>
<fsummary>Compiles a file.</fsummary>
<type>
<v>CompRet = ModRet | BinRet | ErrRet</v>
@@ -203,7 +203,8 @@
<tag><c>deterministic</c></tag>
<item>
<p>Omit the <c>options</c> and <c>source</c> tuples in
- the list returned by <c>Module:module_info(compile)</c>.
+ the list returned by <c>Module:module_info(compile)</c>, and
+ reduce the paths in stack traces to the module name alone.
This option will make it easier to achieve reproducible builds.
</p>
</item>
@@ -347,8 +348,8 @@ module.beam: module.erl \
<tag><c>{source,FileName}</c></tag>
<item>
- <p>Sets the value of the source, as returned by
- <c>module_info(compile)</c>.</p>
+ <p>Overrides the source file name as presented in
+ <c>module_info(compile)</c> and stack traces.</p>
</item>
<tag><c>{outdir,Dir}</c></tag>
@@ -415,6 +416,17 @@ module.beam: module.erl \
is not documented, and can change between releases.</p>
</item>
+ <tag><c>no_spawn_compiler_process</c></tag>
+ <item>
+ <p>By default, all code is compiled in a separate process
+ which is terminated at the end of compilation. However,
+ some tools, like Dialyzer or compilers for other BEAM languages,
+ may already manage their own worker processes and spawning
+ an extra process may slow the compilation down.
+ In such scenarios, you can pass this option to stop the
+ compiler from spawning an additional process.</p>
+ </item>
+
<tag><c>no_strict_record_tests</c></tag>
<item>
<p>This option is not recommended.</p>
@@ -683,12 +695,13 @@ module.beam: module.erl \
</note>
<note>
- <p>The options <c>{nowarn_unused_function, FAs}</c>,
- <c>{nowarn_bif_clash, FAs}</c>, and
- <c>{nowarn_deprecated_function, MFAs}</c> are only
- recognized when given in files. They are not affected by
- options <c>warn_unused_function</c>, <c>warn_bif_clash</c>, or
- <c>warn_deprecated_function</c>.</p>
+ <p>Before OTP 22, the option <c>{nowarn_deprecated_function,
+ MFAs}</c> was only recognized when given in the file with
+ attribute <c>-compile()</c>. (The option
+ <c>{nowarn_unused_function,FAs}</c> was incorrectly documented
+ to only work in a file, but it also worked when given in the
+ option list.) Starting from OTP 22, all options that can be
+ given in the file can also be given in the option list.</p>
</note>
<p>For debugging of the compiler, or for pure curiosity,
@@ -717,7 +730,7 @@ module.beam: module.erl \
</func>
<func>
- <name>forms(Forms)</name>
+ <name since="">forms(Forms)</name>
<fsummary>Compiles a list of forms.</fsummary>
<desc>
<p>Is the same as
@@ -727,7 +740,7 @@ module.beam: module.erl \
</func>
<func>
- <name>forms(Forms, Options) -> CompRet</name>
+ <name since="">forms(Forms, Options) -> CompRet</name>
<fsummary>Compiles a list of forms.</fsummary>
<type>
<v>Forms = [Form]</v>
@@ -748,7 +761,7 @@ module.beam: module.erl \
</func>
<func>
- <name>format_error(ErrorDescriptor) -> chars()</name>
+ <name since="">format_error(ErrorDescriptor) -> chars()</name>
<fsummary>Formats an error descriptor.</fsummary>
<type>
<v>ErrorDescriptor = errordesc()</v>
@@ -763,7 +776,7 @@ module.beam: module.erl \
</func>
<func>
- <name>output_generated(Options) -> true | false</name>
+ <name since="">output_generated(Options) -> true | false</name>
<fsummary>Determines whether the compiler generates an output file.</fsummary>
<type>
<v>Options = [term()]</v>
@@ -778,7 +791,7 @@ module.beam: module.erl \
</func>
<func>
- <name>noenv_file(File, Options) -> CompRet</name>
+ <name since="">noenv_file(File, Options) -> CompRet</name>
<fsummary>Compiles a file (ignoring <c>ERL_COMPILER_OPTIONS)</c>.</fsummary>
<desc>
<p>Works like <seealso marker="#file/2">file/2</seealso>,
@@ -788,7 +801,7 @@ module.beam: module.erl \
</func>
<func>
- <name>noenv_forms(Forms, Options) -> CompRet</name>
+ <name since="">noenv_forms(Forms, Options) -> CompRet</name>
<fsummary>Compiles a list of forms (ignoring <c>ERL_COMPILER_OPTIONS)</c>.</fsummary>
<desc>
<p>Works like <seealso marker="#forms/2">forms/2</seealso>,
@@ -798,7 +811,7 @@ module.beam: module.erl \
</func>
<func>
- <name>noenv_output_generated(Options) -> true | false</name>
+ <name since="">noenv_output_generated(Options) -> true | false</name>
<fsummary>Determines whether the compiler generates an output file
(ignoring <c>ERL_COMPILER_OPTIONS)</c>.</fsummary>
<type>
diff --git a/lib/compiler/doc/src/notes.xml b/lib/compiler/doc/src/notes.xml
index 671126b73b..02e6203137 100644
--- a/lib/compiler/doc/src/notes.xml
+++ b/lib/compiler/doc/src/notes.xml
@@ -32,6 +32,181 @@
<p>This document describes the changes made to the Compiler
application.</p>
+<section><title>Compiler 7.3.1</title>
+
+ <section><title>Fixed Bugs and Malfunctions</title>
+ <list>
+ <item>
+ <p>An optimization that avoided allocation of a stack
+ frame for some <c>case</c> expressions was introduced in
+ OTP 21. (ERL-504/OTP-14808) It turns out that in rare
+ circumstances, this optimization is not safe. Therefore,
+ this optimization has been disabled.</p>
+ <p>A similar optimization will be included in OTP 22 in a
+ safe way.</p>
+ <p>
+ Own Id: OTP-15501 Aux Id: ERL-807, ERL-514, OTP-14808 </p>
+ </item>
+ </list>
+ </section>
+
+</section>
+
+<section><title>Compiler 7.3</title>
+
+ <section><title>Fixed Bugs and Malfunctions</title>
+ <list>
+ <item>
+ <p>Fixed a rare internal consistency failure caused by a
+ bug in the <c>beam_jump</c> pass. (Thanks to Simon
+ Cornish for reporting this bug.)</p>
+ <p>
+ Own Id: OTP-15400 Aux Id: ERL-759 </p>
+ </item>
+ <item>
+ <p>The compiler could fail with an internal consistency
+ check failure when compiling code that used the
+ <c>is_function/2</c> BIF.</p>
+ <p>
+ Own Id: OTP-15435 Aux Id: ERL-778 </p>
+ </item>
+ <item>
+ <p>When an external fun was used, warnings for unused
+ variables could be suppressed.</p>
+ <p>
+ Own Id: OTP-15437 Aux Id: ERL-762 </p>
+ </item>
+ <item>
+ <p>The compiler would crash when compiling an
+ <c>after</c> block that called <c>erlang:raise/3</c> like
+ this: <c>erlang:raise(Class, Stacktrace,
+ Stacktrace)</c></p>
+ <p>
+ Own Id: OTP-15481</p>
+ </item>
+ </list>
+ </section>
+
+
+ <section><title>Improvements and New Features</title>
+ <list>
+ <item>
+ <p>When specified, the <c>+{source,Name}</c> option will
+ now override the actual file name in stack traces,
+ instead of only affecting the return value of
+ <c>Mod:module_info()</c>.</p>
+ <p>The <c>+deterministic</c> flag will also affect stack
+ traces now, omitting all path information except the file
+ name, fixing a long-standing issue where deterministic
+ builds required deterministic paths.</p>
+ <p>
+ Own Id: OTP-15245 Aux Id: ERL-706 </p>
+ </item>
+ </list>
+ </section>
+
+</section>
+
+<section><title>Compiler 7.2.7</title>
+
+ <section><title>Fixed Bugs and Malfunctions</title>
+ <list>
+ <item>
+ <p>Fixed a bug where incorrect code was generated
+ following a binary match guard.</p>
+ <p>
+ Own Id: OTP-15353 Aux Id: ERL-753 </p>
+ </item>
+ </list>
+ </section>
+
+</section>
+
+<section><title>Compiler 7.2.6</title>
+
+ <section><title>Fixed Bugs and Malfunctions</title>
+ <list>
+ <item>
+ <p>In rare circumstances, the matched out tail of a
+ binary could be the entire original binary. (There was
+ partial correction to this problem in version 7.2.5 of
+ the compiler application.)</p>
+ <p>
+ Own Id: OTP-15335 Aux Id: ERL-689, OTP-15219 </p>
+ </item>
+ </list>
+ </section>
+
+</section>
+
+<section><title>Compiler 7.2.5</title>
+
+ <section><title>Fixed Bugs and Malfunctions</title>
+ <list>
+ <item>
+ <p>Fixed a bug that prevented certain variable-sized
+ binary comprehensions from compiling.</p>
+ <p>
+ Own Id: OTP-15186 Aux Id: ERL-665 </p>
+ </item>
+ <item>
+ <p>When compiling from Core Erlang, funs created in
+ certain expressions that were only used for their
+ side-effects were subtly broken.</p>
+ <p>
+ Own Id: OTP-15188 Aux Id: ERL-658 </p>
+ </item>
+ <item>
+ <p>There could be an internal consistency failure when a
+ <c>receive</c> was nested in a
+ <c>try</c>/<c>catch</c>.</p>
+ <p>
+ Own Id: OTP-15218 Aux Id: ERL-684 </p>
+ </item>
+ <item>
+ <p>In rare circumstances, the matched out tail of a
+ binary could be the entire original binary.</p>
+ <p>
+ Own Id: OTP-15219 Aux Id: ERL-689 </p>
+ </item>
+ <item>
+ <p>When <c>is_map_key/2</c> was used in a guard together
+ with the <c>not/1</c> or <c>or/2</c> operators, the error
+ behavior could be wrong when <c>is_map_key/2</c> was
+ passed a non-map as the second argument. </p>
+ <p>In rare circumstances, compiling code that uses
+ <c>is_map_key/2</c> could cause an internal consistency
+ check failure.</p>
+ <p>
+ Own Id: OTP-15227 Aux Id: ERL-699 </p>
+ </item>
+ <item>
+ <p>The compiler could crash when compiling a function
+ with multiple receives in multiple clauses.</p>
+ <p>
+ Own Id: OTP-15235 Aux Id: ERL-703 </p>
+ </item>
+ </list>
+ </section>
+
+</section>
+
+<section><title>Compiler 7.2.4</title>
+
+ <section><title>Fixed Bugs and Malfunctions</title>
+ <list>
+ <item>
+ <p>Fix a regression in OTP-15204 that removed
+ <c>.beam</c> file metadata that some external build tools
+ relied on.</p>
+ <p>
+ Own Id: OTP-15292</p>
+ </item>
+ </list>
+ </section>
+
+</section>
+
<section><title>Compiler 7.2.3</title>
<section><title>Fixed Bugs and Malfunctions</title>
@@ -314,6 +489,22 @@
</section>
+<section><title>Compiler 7.1.5.2</title>
+
+ <section><title>Fixed Bugs and Malfunctions</title>
+ <list>
+ <item>
+ <p>Fix a regression in OTP-15204 that removed
+ <c>.beam</c> file metadata that some external build tools
+ relied on.</p>
+ <p>
+ Own Id: OTP-15292</p>
+ </item>
+ </list>
+ </section>
+
+</section>
+
<section><title>Compiler 7.1.5.1</title>
<section><title>Fixed Bugs and Malfunctions</title>
diff --git a/lib/compiler/scripts/.gitignore b/lib/compiler/scripts/.gitignore
new file mode 100644
index 0000000000..4e4eba766d
--- /dev/null
+++ b/lib/compiler/scripts/.gitignore
@@ -0,0 +1 @@
+/smoke-build
diff --git a/lib/compiler/scripts/smoke b/lib/compiler/scripts/smoke
new file mode 100755
index 0000000000..2429f104c0
--- /dev/null
+++ b/lib/compiler/scripts/smoke
@@ -0,0 +1,122 @@
+#!/usr/bin/env escript
+%% -*- erlang -*-
+-mode(compile).
+
+main(_Args) ->
+ setup(),
+ clone_elixir(),
+ build_elixir(),
+ test_elixir(),
+ setup_mix(),
+ smoke(main),
+ smoke(rabbitmq),
+ halt(0).
+
+setup() ->
+ ScriptsDir = scripts_dir(),
+ SmokeBuildDir = filename:join(ScriptsDir, "smoke-build"),
+ _ = file:make_dir(SmokeBuildDir),
+ ok = file:set_cwd(SmokeBuildDir),
+ ok.
+
+clone_elixir() ->
+ {ok,SmokeDir} = file:get_cwd(),
+ DotGitDir = filename:join([SmokeDir,"elixir",".git"]),
+ ElixirRepo = "[email protected]:elixir-lang/elixir.git",
+ case filelib:is_dir(DotGitDir) of
+ false ->
+ cmd("git clone " ++ ElixirRepo);
+ true ->
+ GetHeadSHA1 = "cd elixir && git rev-parse --verify HEAD",
+ Before = os:cmd(GetHeadSHA1),
+ cmd("cd elixir && git pull --ff-only origin master"),
+ case os:cmd(GetHeadSHA1) of
+ Before ->
+ ok;
+ _After ->
+ %% There were some changes. Clean to force a re-build.
+ cmd("cd elixir && make clean")
+ end
+ end.
+
+build_elixir() ->
+ cmd("cd elixir && make compile").
+
+test_elixir() ->
+ cmd("cd elixir && make test_stdlib").
+
+setup_mix() ->
+ MixExsFile = filename:join(scripts_dir(), "smoke-mix.exs"),
+ {ok,MixExs} = file:read_file(MixExsFile),
+ ok = file:write_file("mix.exs", MixExs),
+
+ {ok,SmokeDir} = file:get_cwd(),
+ ElixirBin = filename:join([SmokeDir,"elixir","bin"]),
+ PATH = ElixirBin ++ ":" ++ os:getenv("PATH"),
+ os:putenv("PATH", PATH),
+ mix("local.rebar --force"),
+ ok.
+
+smoke(Set) ->
+ os:putenv("SMOKE_DEPS_SET", atom_to_list(Set)),
+ _ = file:delete("mix.lock"),
+ cmd("touch mix.exs"),
+ mix("deps.clean --all"),
+ mix("deps.get"),
+ mix("deps.compile"),
+ ok.
+
+scripts_dir() ->
+ Root = code:lib_dir(compiler),
+ filename:join(Root, "scripts").
+
+mix(Cmd) ->
+ cmd("mix " ++ Cmd).
+
+cmd(Cmd) ->
+ run("sh", ["-c",Cmd]).
+
+run(Program0, Args) ->
+ Program = case os:find_executable(Program0) of
+ Path when is_list(Path) ->
+ Path;
+ false ->
+ abort("Unable to find program: ~s\n", [Program0])
+ end,
+ Cmd = case {Program0,Args} of
+ {"sh",["-c"|ShCmd]} ->
+ ShCmd;
+ {_,_} ->
+ lists:join(" ", [Program0|Args])
+ end,
+ io:format("\n# ~s\n", [Cmd]),
+ Options = [{args,Args},binary,exit_status,stderr_to_stdout],
+ try open_port({spawn_executable,Program}, Options) of
+ Port ->
+ case run_loop(Port, <<>>) of
+ 0 ->
+ ok;
+ ExitCode ->
+ abort("*** Failed with exit code: ~p\n",
+ [ExitCode])
+ end
+ catch
+ error:_ ->
+ abort("Failed to execute ~s\n", [Program0])
+ end.
+
+run_loop(Port, Output) ->
+ receive
+ {Port,{exit_status,Status}} ->
+ Status;
+ {Port,{data,Bin}} ->
+ io:put_chars(Bin),
+ run_loop(Port, <<Output/binary,Bin/binary>>);
+ Msg ->
+ io:format("L: ~p~n", [Msg]),
+ run_loop(Port, Output)
+ end.
+
+abort(Format, Args) ->
+ io:format(Format, Args),
+ halt(1).
diff --git a/lib/compiler/scripts/smoke-mix.exs b/lib/compiler/scripts/smoke-mix.exs
new file mode 100644
index 0000000000..82ae3370fe
--- /dev/null
+++ b/lib/compiler/scripts/smoke-mix.exs
@@ -0,0 +1,95 @@
+defmodule Smoke.MixProject do
+ use Mix.Project
+
+ def project do
+ [
+ app: :smoke,
+ version: "0.1.0",
+ elixir: "~> 1.8",
+ start_permanent: Mix.env() == :prod,
+ deps: deps()
+ ]
+ end
+
+ # Run "mix help compile.app" to learn about applications.
+ def application do
+ [
+ extra_applications: [:logger]
+ ]
+ end
+
+ # Run "mix help deps" to learn about dependencies.
+ defp deps do
+ case :os.getenv('SMOKE_DEPS_SET') do
+ 'main' ->
+ [
+ {:bear, "~> 0.8.7"},
+ {:cloudi_core, "~> 1.7"},
+ {:concuerror, "~> 0.20.0"},
+ {:cowboy, "~> 2.6.1"},
+ {:ecto, "~> 3.0.6"},
+ {:ex_doc, "~> 0.19.3"},
+ {:distillery, "~> 2.0.12"},
+ {:erlydtl, "~> 0.12.1"},
+ {:gen_smtp, "~> 0.13.0"},
+ {:getopt, "~> 1.0.1"},
+ {:gettext, "~> 0.16.1"},
+ {:gpb, "~> 4.6"},
+ {:gproc, "~> 0.8.0"},
+ {:graphql, "~> 0.15.0", hex: :graphql_erl},
+ {:hackney, "~> 1.15.0"},
+ {:ibrowse, "~> 4.4.1"},
+ {:jose, "~> 1.9.0"},
+ {:lager, "~> 3.6"},
+ {:locus, "~> 1.6"},
+ {:nimble_parsec, "~> 0.5.0"},
+ {:phoenix, "~> 1.4.0"},
+ {:riak_pb, "~> 2.3"},
+ {:scalaris, git: "https://github.com/scalaris-team/scalaris",
+ compile: build_scalaris()},
+ {:tdiff, "~> 0.1.2"},
+ {:webmachine, "~> 1.11"},
+ {:wings, git: "https://github.com/dgud/wings.git",
+ compile: build_wings()},
+ {:zotonic_stdlib, "~> 1.0"},
+ ]
+ 'rabbitmq' ->
+ [{:rabbit_common, "~> 3.7"}]
+ _ ->
+ []
+ end
+ end
+
+ defp build_scalaris do
+ # Only compile the Erlang code.
+
+ """
+ echo '-include("rt_simple.hrl").' >include/rt.hrl
+ (cd src && erlc -W0 -I ../include -I ../contrib/log4erl/include -I ../contrib/yaws/include *.erl)
+ (cd src/comm_layer && erlc -W0 -I ../../include -I *.erl)
+ (cd src/cp && erlc -W0 -I ../../include -I *.erl)
+ (cd src/crdt && erlc -W0 -I ../../include -I *.erl)
+ (cd src/json && erlc -W0 -I ../../include -I *.erl)
+ (cd src/paxos && erlc -W0 -I ../../include -I *.erl)
+ (cd src/rbr && erlc -W0 -I ../../include -I *.erl)
+ (cd src/rrepair && erlc -W0 -I ../../include -I *.erl)
+ (cd src/time && erlc -W0 -I ../../include -I *.erl)
+ (cd src/transactions && erlc -W0 -I ../../include -I *.erl)
+ (cd src/tx && erlc -W0 -I ../../include -I *.erl)
+ """
+ end
+
+ defp build_wings do
+ # If the Erlang system is not installed, the build will
+ # crash in plugins_src/accel when attempting to build
+ # the accel driver. Since there is very little Erlang code in
+ # the directory, skip the entire directory.
+
+ """
+ echo "all:\n\t" >plugins_src/accel/Makefile
+ git commit -a -m'Disable for smoke testing'
+ git tag -a -m'Smoke test' vsmoke_test
+ make
+ """
+ end
+end
diff --git a/lib/compiler/src/Makefile b/lib/compiler/src/Makefile
index bd35f20442..c971e8844d 100644
--- a/lib/compiler/src/Makefile
+++ b/lib/compiler/src/Makefile
@@ -49,10 +49,7 @@ MODULES = \
beam_a \
beam_asm \
beam_block \
- beam_bs \
- beam_bsm \
beam_clean \
- beam_dead \
beam_dict \
beam_disasm \
beam_except \
@@ -61,14 +58,17 @@ MODULES = \
beam_listing \
beam_opcodes \
beam_peep \
- beam_split \
beam_ssa \
+ beam_ssa_bsm \
beam_ssa_codegen \
+ beam_ssa_dead \
+ beam_ssa_funs \
beam_ssa_lint \
beam_ssa_opt \
beam_ssa_pp \
beam_ssa_pre_codegen \
beam_ssa_recv \
+ beam_ssa_share \
beam_ssa_type \
beam_kernel_to_ssa \
beam_trim \
@@ -90,7 +90,6 @@ MODULES = \
rec_env \
sys_core_alias \
sys_core_bsm \
- sys_core_dsetel \
sys_core_fold \
sys_core_fold_lists \
sys_core_inline \
@@ -103,6 +102,7 @@ BEAM_H = $(wildcard ../priv/beam_h/*.h)
HRL_FILES= \
beam_disasm.hrl \
+ beam_ssa_opt.hrl \
beam_ssa.hrl \
core_parse.hrl \
v3_kernel.hrl
@@ -194,6 +194,7 @@ $(EBIN)/beam_listing.beam: core_parse.hrl v3_kernel.hrl beam_ssa.hrl
$(EBIN)/beam_kernel_to_ssa.beam: v3_kernel.hrl beam_ssa.hrl
$(EBIN)/beam_ssa.beam: beam_ssa.hrl
$(EBIN)/beam_ssa_codegen.beam: beam_ssa.hrl
+$(EBIN)/beam_ssa_dead.beam: beam_ssa.hrl
$(EBIN)/beam_ssa_lint.beam: beam_ssa.hrl
$(EBIN)/beam_ssa_opt.beam: beam_ssa.hrl
$(EBIN)/beam_ssa_pp.beam: beam_ssa.hrl
@@ -207,7 +208,6 @@ $(EBIN)/core_lint.beam: core_parse.hrl
$(EBIN)/core_parse.beam: core_parse.hrl $(EGEN)/core_parse.erl
$(EBIN)/core_pp.beam: core_parse.hrl
$(EBIN)/sys_core_alias.beam: core_parse.hrl
-$(EBIN)/sys_core_dsetel.beam: core_parse.hrl
$(EBIN)/sys_core_fold.beam: core_parse.hrl
$(EBIN)/sys_core_fold_lists.beam: core_parse.hrl
$(EBIN)/sys_core_inline.beam: core_parse.hrl
diff --git a/lib/compiler/src/beam_a.erl b/lib/compiler/src/beam_a.erl
index 266e8f46c8..0bccad1ecd 100644
--- a/lib/compiler/src/beam_a.erl
+++ b/lib/compiler/src/beam_a.erl
@@ -52,6 +52,16 @@ function({function,Name,Arity,CLabel,Is0}) ->
erlang:raise(Class, Error, Stack)
end.
+rename_instrs([{test,is_eq_exact,_,[Dst,Src]}=Test,
+ {move,Src,Dst}|Is]) ->
+ %% The move instruction is not needed.
+ rename_instrs([Test|Is]);
+rename_instrs([{test,is_eq_exact,_,[Same,Same]}|Is]) ->
+ %% Same literal or same register. Will always succeed.
+ rename_instrs(Is);
+rename_instrs([{loop_rec,{f,Fail},{x,0}},{loop_rec_end,_},{label,Fail}|Is]) ->
+ %% This instruction sequence does nothing.
+ rename_instrs(Is);
rename_instrs([{apply_last,A,N}|Is]) ->
[{apply,A},{deallocate,N},return|rename_instrs(Is)];
rename_instrs([{call_last,A,F,N}|Is]) ->
@@ -90,8 +100,12 @@ rename_instr({bs_put_utf16=I,F,Fl,Src}) ->
{bs_put,F,{I,Fl},[Src]};
rename_instr({bs_put_utf32=I,F,Fl,Src}) ->
{bs_put,F,{I,Fl},[Src]};
-rename_instr({bs_put_string,_,_}=I) ->
- {bs_put,{f,0},I,[]};
+rename_instr({bs_put_string,_,{string,String}}) ->
+ %% Only happens when compiling from .S files. In old
+ %% .S files, String is a list. In .S in OTP 22 and later,
+ %% String is a binary.
+ {bs_put,{f,0},{bs_put_binary,8,{field_flags,[unsigned,big]}},
+ [{atom,all},{literal,iolist_to_binary([String])}]};
rename_instr({bs_add=I,F,[Src1,Src2,U],Dst}) when is_integer(U) ->
{bif,I,F,[Src1,Src2,{integer,U}],Dst};
rename_instr({bs_utf8_size=I,F,Src,Dst}) ->
@@ -108,10 +122,6 @@ rename_instr({bs_private_append=I,F,Sz,U,Src,Flags,Dst}) ->
{bs_init,F,{I,U,Flags},none,[Sz,Src],Dst};
rename_instr(bs_init_writable=I) ->
{bs_init,{f,0},I,1,[{x,0}],{x,0}};
-rename_instr({test,Op,F,[Ctx,Bits,{string,Str}]}) ->
- %% When compiling from a .S file.
- <<Bs:Bits/bits,_/bits>> = list_to_binary(Str),
- {test,Op,F,[Ctx,Bs]};
rename_instr({put_map_assoc,Fail,S,D,R,L}) ->
{put_map,Fail,assoc,S,D,R,L};
rename_instr({put_map_exact,Fail,S,D,R,L}) ->
diff --git a/lib/compiler/src/beam_asm.erl b/lib/compiler/src/beam_asm.erl
index df0321e85a..bc1290f6fd 100644
--- a/lib/compiler/src/beam_asm.erl
+++ b/lib/compiler/src/beam_asm.erl
@@ -424,8 +424,8 @@ encode_arg({f, W}, Dict) ->
{encode(?tag_f, W), Dict};
%% encode_arg({'char', C}, Dict) ->
%% {encode(?tag_h, C), Dict};
-encode_arg({string, String}, Dict0) ->
- {Offset, Dict} = beam_dict:string(String, Dict0),
+encode_arg({string, BinString}, Dict0) when is_binary(BinString) ->
+ {Offset, Dict} = beam_dict:string(BinString, Dict0),
{encode(?tag_u, Offset), Dict};
encode_arg({extfunc, M, F, A}, Dict0) ->
{Index, Dict} = beam_dict:import(M, F, A, Dict0),
diff --git a/lib/compiler/src/beam_block.erl b/lib/compiler/src/beam_block.erl
index c928fc7187..707974b2c1 100644
--- a/lib/compiler/src/beam_block.erl
+++ b/lib/compiler/src/beam_block.erl
@@ -22,7 +22,7 @@
-module(beam_block).
-export([module/2]).
--import(lists, [reverse/1,splitwith/2]).
+-import(lists, [keysort/2,reverse/1,splitwith/2]).
-spec module(beam_utils:module_code(), [compile:option()]) ->
{'ok',beam_utils:module_code()}.
@@ -49,14 +49,12 @@ function({function,Name,Arity,CLabel,Is0}) ->
blockify(Is) ->
blockify(Is, []).
-blockify([{loop_rec,{f,Fail},{x,0}},{loop_rec_end,_Lbl},{label,Fail}|Is], Acc) ->
- %% Useless instruction sequence.
- blockify(Is, Acc);
blockify([I|Is0]=IsAll, Acc) ->
case collect(I) of
error -> blockify(Is0, [I|Acc]);
Instr when is_tuple(Instr) ->
- {Block,Is} = collect_block(IsAll),
+ {Block0,Is} = collect_block(IsAll),
+ Block = sort_moves(Block0),
blockify(Is, [{block,Block}|Acc])
end;
blockify([], Acc) -> reverse(Acc).
@@ -83,23 +81,20 @@ collect({allocate_heap,Ns,Nh,R}) -> {set,[],[],{alloc,R,{nozero,Ns,Nh,[]}}};
collect({allocate_heap_zero,Ns,Nh,R}) -> {set,[],[],{alloc,R,{zero,Ns,Nh,[]}}};
collect({init,D}) -> {set,[D],[],init};
collect({test_heap,N,R}) -> {set,[],[],{alloc,R,{nozero,nostack,N,[]}}};
-collect({bif,N,F,As,D}) -> {set,[D],As,{bif,N,F}};
-collect({gc_bif,N,F,R,As,D}) -> {set,[D],As,{alloc,R,{gc_bif,N,F}}};
+collect({bif,N,{f,0},As,D}) -> {set,[D],As,{bif,N,{f,0}}};
+collect({gc_bif,N,{f,0},R,As,D}) -> {set,[D],As,{alloc,R,{gc_bif,N,{f,0}}}};
collect({move,S,D}) -> {set,[D],[S],move};
collect({put_list,S1,S2,D}) -> {set,[D],[S1,S2],put_list};
collect({put_tuple,A,D}) -> {set,[D],[],{put_tuple,A}};
collect({put,S}) -> {set,[],[S],put};
+collect({put_tuple2,D,{list,Els}}) -> {set,[D],Els,put_tuple2};
collect({get_tuple_element,S,I,D}) -> {set,[D],[S],{get_tuple_element,I}};
collect({set_tuple_element,S,D,I}) -> {set,[],[S,D],{set_tuple_element,I}};
collect({get_hd,S,D}) -> {set,[D],[S],get_hd};
collect({get_tl,S,D}) -> {set,[D],[S],get_tl};
collect(remove_message) -> {set,[],[],remove_message};
-collect({put_map,F,Op,S,D,R,{list,Puts}}) ->
- {set,[D],[S|Puts],{alloc,R,{put_map,Op,F}}};
-collect({'catch'=Op,R,L}) ->
- {set,[R],[],{try_catch,Op,L}};
-collect({'try'=Op,R,L}) ->
- {set,[R],[],{try_catch,Op,L}};
+collect({put_map,{f,0},Op,S,D,R,{list,Puts}}) ->
+ {set,[D],[S|Puts],{alloc,R,{put_map,Op,{f,0}}}};
collect(fclearerror) -> {set,[],[],fclearerror};
collect({fcheckerror,{f,0}}) -> {set,[],[],fcheckerror};
collect({fmove,S,D}) -> {set,[D],[S],fmove};
@@ -123,3 +118,36 @@ embed_lines([{block,B1},{line,_}=Line|T], Acc) ->
embed_lines([I|Is], Acc) ->
embed_lines(Is, [I|Acc]);
embed_lines([], Acc) -> Acc.
+
+%% sort_moves([Instruction]) -> [Instruction].
+%% Sort move instructions on the Y register to give the loader
+%% more opportunities for combining instructions.
+
+sort_moves([{set,[{x,_}],[{y,_}],move}=I|Is0]) ->
+ {Moves,Is} = sort_moves_1(Is0, x, y, [I]),
+ Moves ++ sort_moves(Is);
+sort_moves([{set,[{y,_}],[{x,_}],move}=I|Is0]) ->
+ {Moves,Is} = sort_moves_1(Is0, y, x, [I]),
+ Moves ++ sort_moves(Is);
+sort_moves([I|Is]) ->
+ [I|sort_moves(Is)];
+sort_moves([]) -> [].
+
+sort_moves_1([{set,[{x,0}],[_],move}=I|Is], _DTag, _STag, Acc) ->
+ %% The loader sometimes combines a move to x0 with the
+ %% instruction that follows, producing, for example, a move_call
+ %% instruction. Therefore, we don't want include this move
+ %% instruction in the sorting.
+ {sort_on_yreg(Acc)++[I],Is};
+sort_moves_1([{set,[{DTag,_}],[{STag,_}],move}=I|Is], DTag, STag, Acc) ->
+ sort_moves_1(Is, DTag, STag, [I|Acc]);
+sort_moves_1(Is, _DTag, _STag, Acc) ->
+ {sort_on_yreg(Acc),Is}.
+
+sort_on_yreg([{set,[Dst],[Src],move}|_]=Moves) ->
+ case {Dst,Src} of
+ {{y,_},{x,_}} ->
+ keysort(2, Moves);
+ {{x,_},{y,_}} ->
+ keysort(3, Moves)
+ end.
diff --git a/lib/compiler/src/beam_bs.erl b/lib/compiler/src/beam_bs.erl
deleted file mode 100644
index 15d8d687fc..0000000000
--- a/lib/compiler/src/beam_bs.erl
+++ /dev/null
@@ -1,183 +0,0 @@
-%%
-%% %CopyrightBegin%
-%%
-%% Copyright Ericsson AB 1999-2018. All Rights Reserved.
-%%
-%% Licensed under the Apache License, Version 2.0 (the "License");
-%% you may not use this file except in compliance with the License.
-%% You may obtain a copy of the License at
-%%
-%% http://www.apache.org/licenses/LICENSE-2.0
-%%
-%% Unless required by applicable law or agreed to in writing, software
-%% distributed under the License is distributed on an "AS IS" BASIS,
-%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-%% See the License for the specific language governing permissions and
-%% limitations under the License.
-%%
-%% %CopyrightEnd%
-%%
-%% Purpose: Peephole optimization of binary syntax instructions.
-
--module(beam_bs).
-
--export([module/2]).
--import(lists, [reverse/1]).
-
--spec module(beam_utils:module_code(), [compile:option()]) ->
- {'ok',beam_utils:module_code()}.
-
-module({Mod,Exp,Attr,Fs0,Lc}, _Opt) ->
- Fs = [function(F) || F <- Fs0],
- {ok,{Mod,Exp,Attr,Fs,Lc}}.
-
-function({function,Name,Arity,CLabel,Is0}) ->
- try
- Is = bs_opt(Is0),
- {function,Name,Arity,CLabel,Is}
- catch
- Class:Error:Stack ->
- io:fwrite("Function: ~w/~w\n", [Name,Arity]),
- erlang:raise(Class, Error, Stack)
- end.
-
-%%%
-%%% Evaluate construction of constant bit fields.
-%%% Combine bs_skip_bits2 and bs_test_tail2 instructions.
-%%%
-
-bs_opt([{bs_put,_,_,_}=I|Is0]) ->
- {BsPuts0,Is} = collect_bs_puts(Is0, [I]),
- BsPuts = opt_bs_puts(BsPuts0),
- BsPuts ++ bs_opt(Is);
-bs_opt([{test,bs_skip_bits2,F,[Ctx,{integer,I},Unit,_Flags]},
- {test,bs_test_tail2,F,[Ctx,Bits]}|Is]) ->
- [{test,bs_test_tail2,F,[Ctx,Bits+I*Unit]}|bs_opt(Is)];
-bs_opt([{test,bs_skip_bits2,F,[Ctx,{integer,I1},Unit1,Flags]},
- {test,bs_skip_bits2,F,[Ctx,{integer,I2},Unit2,_]}|Is]) ->
- I = {test,bs_skip_bits2,F,
- [Ctx,{integer,I1*Unit1+I2*Unit2},1,Flags]},
- bs_opt([I|Is]);
-bs_opt([I|Is]) ->
- [I|bs_opt(Is)];
-bs_opt([]) -> [].
-
-collect_bs_puts([{bs_put,_,_,_}=I|Is], Acc) ->
- collect_bs_puts(Is, [I|Acc]);
-collect_bs_puts([_|_]=Is, Acc) ->
- {reverse(Acc),Is}.
-
-opt_bs_puts(Is) ->
- opt_bs_1(Is, []).
-
-opt_bs_1([{bs_put,Fail,
- {bs_put_float,1,Flags0},[{integer,Sz},Src]}=I0|Is], Acc) ->
- try eval_put_float(Src, Sz, Flags0) of
- <<Int:Sz>> ->
- Flags = force_big(Flags0),
- I = {bs_put,Fail,{bs_put_integer,1,Flags},
- [{integer,Sz},{integer,Int}]},
- opt_bs_1([I|Is], Acc)
- catch
- error:_ ->
- opt_bs_1(Is, [I0|Acc])
- end;
-opt_bs_1([{bs_put,_,{bs_put_integer,1,_},[{integer,8},{integer,_}]}|_]=IsAll,
- Acc0) ->
- {Is,Acc} = bs_collect_string(IsAll, Acc0),
- opt_bs_1(Is, Acc);
-opt_bs_1([{bs_put,Fail,{bs_put_integer,1,F},[{integer,Sz},{integer,N}]}=I|Is0],
- Acc) when Sz > 8 ->
- case field_endian(F) of
- big ->
- %% We can do this optimization for any field size without
- %% risk for code explosion.
- case bs_split_int(N, Sz, Fail, Is0) of
- no_split -> opt_bs_1(Is0, [I|Acc]);
- Is -> opt_bs_1(Is, Acc)
- end;
- little when Sz < 128 ->
- %% We only try to optimize relatively small fields, to
- %% avoid an explosion in code size.
- <<Int:Sz>> = <<N:Sz/little>>,
- Flags = force_big(F),
- Is = [{bs_put,Fail,{bs_put_integer,1,Flags},
- [{integer,Sz},{integer,Int}]}|Is0],
- opt_bs_1(Is, Acc);
- _ -> %native or too wide little field
- opt_bs_1(Is0, [I|Acc])
- end;
-opt_bs_1([{bs_put,Fail,{Op,U,F},[{integer,Sz},Src]}|Is], Acc) when U > 1 ->
- opt_bs_1([{bs_put,Fail,{Op,1,F},[{integer,U*Sz},Src]}|Is], Acc);
-opt_bs_1([I|Is], Acc) ->
- opt_bs_1(Is, [I|Acc]);
-opt_bs_1([], Acc) -> reverse(Acc).
-
-eval_put_float(Src, Sz, Flags) when Sz =< 256 ->
- %%Only evaluate if Sz is reasonable.
- Val = value(Src),
- case field_endian(Flags) of
- little -> <<Val:Sz/little-float-unit:1>>;
- big -> <<Val:Sz/big-float-unit:1>>
- %% native intentionally not handled here - we can't optimize
- %% it.
- end.
-
-value({integer,I}) -> I;
-value({float,F}) -> F.
-
-bs_collect_string(Is, [{bs_put,_,{bs_put_string,Len,{string,Str}},[]}|Acc]) ->
- bs_coll_str_1(Is, Len, reverse(Str), Acc);
-bs_collect_string(Is, Acc) ->
- bs_coll_str_1(Is, 0, [], Acc).
-
-bs_coll_str_1([{bs_put,_,{bs_put_integer,U,_},[{integer,Sz},{integer,V}]}|Is],
- Len, StrAcc, IsAcc) when U*Sz =:= 8 ->
- Byte = V band 16#FF,
- bs_coll_str_1(Is, Len+1, [Byte|StrAcc], IsAcc);
-bs_coll_str_1(Is, Len, StrAcc, IsAcc) ->
- {Is,[{bs_put,{f,0},{bs_put_string,Len,{string,reverse(StrAcc)}},[]}|IsAcc]}.
-
-field_endian({field_flags,F}) -> field_endian_1(F).
-
-field_endian_1([big=E|_]) -> E;
-field_endian_1([little=E|_]) -> E;
-field_endian_1([native=E|_]) -> E;
-field_endian_1([_|Fs]) -> field_endian_1(Fs).
-
-force_big({field_flags,F}) ->
- {field_flags,force_big_1(F)}.
-
-force_big_1([big|_]=Fs) -> Fs;
-force_big_1([little|Fs]) -> [big|Fs];
-force_big_1([F|Fs]) -> [F|force_big_1(Fs)].
-
-bs_split_int(0, Sz, _, _) when Sz > 64 ->
- %% We don't want to split in this case because the
- %% string will consist of only zeroes.
- no_split;
-bs_split_int(-1, Sz, _, _) when Sz > 64 ->
- %% We don't want to split in this case because the
- %% string will consist of only 255 bytes.
- no_split;
-bs_split_int(N, Sz, Fail, Acc) ->
- FirstByteSz = case Sz rem 8 of
- 0 -> 8;
- Rem -> Rem
- end,
- bs_split_int_1(N, FirstByteSz, Sz, Fail, Acc).
-
-bs_split_int_1(-1, _, Sz, Fail, Acc) when Sz > 64 ->
- I = {bs_put,Fail,{bs_put_integer,1,{field_flags,[big]}},
- [{integer,Sz},{integer,-1}]},
- [I|Acc];
-bs_split_int_1(0, _, Sz, Fail, Acc) when Sz > 64 ->
- I = {bs_put,Fail,{bs_put_integer,1,{field_flags,[big]}},
- [{integer,Sz},{integer,0}]},
- [I|Acc];
-bs_split_int_1(N, ByteSz, Sz, Fail, Acc) when Sz > 0 ->
- Mask = (1 bsl ByteSz) - 1,
- I = {bs_put,Fail,{bs_put_integer,1,{field_flags,[big]}},
- [{integer,ByteSz},{integer,N band Mask}]},
- bs_split_int_1(N bsr ByteSz, 8, Sz-ByteSz, Fail, [I|Acc]);
-bs_split_int_1(_, _, _, _, Acc) -> Acc.
diff --git a/lib/compiler/src/beam_bsm.erl b/lib/compiler/src/beam_bsm.erl
deleted file mode 100644
index abc6e96c85..0000000000
--- a/lib/compiler/src/beam_bsm.erl
+++ /dev/null
@@ -1,719 +0,0 @@
-%%
-%% %CopyrightBegin%
-%%
-%% Copyright Ericsson AB 2007-2018. All Rights Reserved.
-%%
-%% Licensed under the Apache License, Version 2.0 (the "License");
-%% you may not use this file except in compliance with the License.
-%% You may obtain a copy of the License at
-%%
-%% http://www.apache.org/licenses/LICENSE-2.0
-%%
-%% Unless required by applicable law or agreed to in writing, software
-%% distributed under the License is distributed on an "AS IS" BASIS,
-%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-%% See the License for the specific language governing permissions and
-%% limitations under the License.
-%%
-%% %CopyrightEnd%
-%%
-
--module(beam_bsm).
--export([module/2,format_error/1]).
-
--import(lists, [member/2,foldl/3,reverse/1,sort/1,all/2]).
-
-%%%
-%%% We optimize bit syntax matching where the tail end of a binary is
-%%% matched out and immediately passed on to a bs_start_match2 instruction,
-%%% such as in this code sequence:
-%%%
-%%% func_info ...
-%%% L1 test bs_start_match2 {f,...} {x,0} Live SavePositions {x,0}
-%%% . . .
-%%% test bs_get_binary2 {f,...} {x,0} all 1 Flags {x,0}
-%%% . . .
-%%% call_only 2 L1
-%%%
-%%% The sequence can be optimized simply by removing the bs_get_binary2
-%%% instruction. Another example:
-%%%
-%%% func_info ...
-%%% L1 test bs_start_match2 {f,...} {x,0} Live SavePositions {x,0}
-%%% . . .
-%%% test bs_get_binary2 {f,...} {x,0} all 8 Flags {x,1}
-%%% . . .
-%%% move {x,1} {x,0}
-%%% call_only 2 L1
-%%%
-%%% In this case, the bs_get_binary2 instruction must be replaced by
-%%%
-%%% test bs_unit {x,1} 8
-%%%
-%%% to ensure that the match fail if the length of the binary in bits
-%%% is not evenly divisible by 8.
-%%%
-%%% Note that the bs_start_match2 instruction doesn't need to be in the same
-%%% function as the caller. It can be in the beginning of any function, or
-%%% follow the bs_get_binary2 instruction in the same function. The important
-%%% thing is that the match context register is not copied or built into
-%%% data structures or passed to BIFs.
-%%%
-
--type label() :: beam_asm:label().
--type func_info() :: {beam_asm:reg(),boolean()}.
-
--record(btb,
- {f :: gb_trees:tree(label(), func_info()),
- index :: beam_utils:code_index(), %{Label,Code} index (for liveness).
- ok_br=gb_sets:empty() :: gb_sets:set(label()), %Labels that are OK.
- must_not_save=false :: boolean(), %Must not save position when
- % optimizing (reaches
- % bs_context_to_binary).
- must_save=false :: boolean() %Must save position when optimizing.
- }).
-
-
--spec module(beam_utils:module_code(), [compile:option()]) ->
- {'ok',beam_utils:module_code()}.
-
-module({Mod,Exp,Attr,Fs0,Lc}, Opts) ->
- FIndex = btb_index(Fs0),
- Fs = [function(F, FIndex) || F <- Fs0],
- Code = {Mod,Exp,Attr,Fs,Lc},
- case proplists:get_bool(bin_opt_info, Opts) of
- true ->
- {ok,Code,collect_warnings(Fs)};
- false ->
- {ok,Code}
- end.
-
--spec format_error('bin_opt' | {'no_bin_opt', term()}) -> nonempty_string().
-
-format_error(bin_opt) ->
- "OPTIMIZED: creation of sub binary delayed";
-format_error({no_bin_opt,Reason}) ->
- lists:flatten(["NOT OPTIMIZED: "|format_error_1(Reason)]).
-
-%%%
-%%% Local functions.
-%%%
-
-function({function,Name,Arity,Entry,Is}, FIndex) ->
- try
- Index = beam_utils:index_labels(Is),
- D = #btb{f=FIndex,index=Index},
- {function,Name,Arity,Entry,btb_opt_1(Is, D, [])}
- catch
- Class:Error:Stack ->
- io:fwrite("Function: ~w/~w\n", [Name,Arity]),
- erlang:raise(Class, Error, Stack)
- end.
-
-btb_opt_1([{test,bs_get_binary2,F,_,[Reg,{atom,all},U,Fs],Reg}=I0|Is], D, Acc0) ->
- case btb_reaches_match(Is, [Reg], D) of
- {error,Reason} ->
- Comment = btb_comment_no_opt(Reason, Fs),
- btb_opt_1(Is, D, [Comment,I0|Acc0]);
- {ok,MustSave} ->
- Comment = btb_comment_opt(Fs),
- Acc1 = btb_gen_save(MustSave, Reg, [Comment|Acc0]),
- Acc = case U of
- 1 -> Acc1;
- _ -> [{test,bs_test_unit,F,[Reg,U]}|Acc1]
- end,
- btb_opt_1(Is, D, Acc)
- end;
-btb_opt_1([{test,bs_get_binary2,F,_,[Ctx,{atom,all},U,Fs],Dst}=I0|Is0], D, Acc0) ->
- case btb_reaches_match(Is0, [Ctx,Dst], D) of
- {error,Reason} ->
- Comment = btb_comment_no_opt(Reason, Fs),
- btb_opt_1(Is0, D, [Comment,I0|Acc0]);
- {ok,MustSave} when U =:= 1 ->
- Comment = btb_comment_opt(Fs),
- Acc = btb_gen_save(MustSave, Ctx, [Comment|Acc0]),
- Is = prepend_move(Ctx, Dst, Is0),
- btb_opt_1(Is, D, Acc);
- {ok,MustSave} ->
- Comment = btb_comment_opt(Fs),
- Acc1 = btb_gen_save(MustSave, Ctx, [Comment|Acc0]),
- Acc = [{test,bs_test_unit,F,[Ctx,U]}|Acc1],
- Is = prepend_move(Ctx, Dst, Is0),
- btb_opt_1(Is, D, Acc)
- end;
-btb_opt_1([I|Is], D, Acc) ->
- %%io:format("~p\n", [I]),
- btb_opt_1(Is, D, [I|Acc]);
-btb_opt_1([], _, Acc) ->
- reverse(Acc).
-
-btb_gen_save(true, Reg, Acc) ->
- [{bs_save2,Reg,{atom,start}}|Acc];
-btb_gen_save(false, _, Acc) -> Acc.
-
-prepend_move(Ctx, Dst, [{block,Bl0}|Is]) ->
- Bl = [{set,[Dst],[Ctx],move}|Bl0],
- [{block,Bl}|Is];
-prepend_move(Ctx, Dst, Is) ->
- [{move,Ctx,Dst}|Is].
-
-%% btb_reaches_match([Instruction], [Register], D) ->
-%% {ok,MustSave}|{error,Reason}
-%%
-%% The list of Registers should be a list of registers referencing a
-%% match context. The Register may contain one element if the
-%% bs_get_binary2 instruction looks like
-%%
-%% test bs_get_binary2 {f,...} Ctx all _ _ Ctx
-%%
-%% or two elements if the instruction looks like
-%%
-%% test bs_get_binary2 {f,...} Ctx all _ _ Dst
-%%
-%% This function determines whether the bs_get_binary2 instruction
-%% can be omitted (retaining the match context instead of creating
-%% a sub binary).
-%%
-%% The rule is that the match context ultimately must end up at a
-%% bs_start_match2 instruction and nowhere else. That it, it must not
-%% be passed to BIFs, or copied or put into data structures. There
-%% must only be one copy alive when the match context reaches the
-%% bs_start_match2 instruction.
-%%
-%% At a branch, we must follow all branches and make sure that the above
-%% rule is followed (or that the branch kills the match context).
-%%
-%% The MustSave return value will be true if control may end up at
-%% bs_context_to_binary instruction. Since that instruction uses the
-%% saved start position, we must use "bs_save2 Ctx start" to
-%% update the saved start position. An additional complication is that
-%% "bs_save2 Ctx start" must not be used if Dst and Ctx are
-%% different registers and both registers may be passed to
-%% a bs_context_to_binary instruction.
-%%
-
-btb_reaches_match(Is, RegList, D) ->
- try
- Regs = btb_regs_from_list(RegList),
- #btb{must_not_save=MustNotSave,must_save=MustSave} =
- btb_reaches_match_1(Is, Regs, D),
- case MustNotSave andalso MustSave of
- true -> btb_error(must_and_must_not_save);
- false -> {ok,MustSave}
- end
- catch
- throw:{error,_}=Error -> Error
- end.
-
-btb_reaches_match_1(Is, Regs, D) ->
- case btb_are_registers_empty(Regs) of
- false ->
- btb_reaches_match_2(Is, Regs, D);
- true ->
- %% The context was killed, which is OK.
- D
- end.
-
-btb_reaches_match_2([{block,Bl}|Is], Regs0, D) ->
- Regs = btb_reaches_match_block(Bl, Regs0),
- btb_reaches_match_1(Is, Regs, D);
-btb_reaches_match_2([{call,Arity,{f,Lbl}}|Is], Regs0, D) ->
- case is_tail_call(Is) of
- true ->
- Regs1 = btb_kill_not_live(Arity, Regs0),
- Regs = btb_kill_yregs(Regs1),
- btb_tail_call(Lbl, Regs, D);
- false ->
- btb_call(Arity, Lbl, Regs0, Is, D)
- end;
-btb_reaches_match_2([{apply,Arity}|Is], Regs, D) ->
- btb_call(Arity+2, apply, Regs, Is, D);
-btb_reaches_match_2([{call_fun,Live}=I|Is], Regs, D) ->
- btb_ensure_not_used([{x,Live}], I, Regs),
- btb_call(Live, I, Regs, Is, D);
-btb_reaches_match_2([{make_fun2,_,_,_,Live}|Is], Regs, D) ->
- btb_call(Live, make_fun2, Regs, Is, D);
-btb_reaches_match_2([{call_ext,Arity,Func}=I|Is], Regs0, D) ->
- %% Allow us scanning beyond the call in case the match
- %% context is saved on the stack.
- case beam_jump:is_exit_instruction(I) of
- false ->
- btb_call(Arity, Func, Regs0, Is, D);
- true ->
- Regs = btb_kill_not_live(Arity, Regs0),
- btb_tail_call(Func, Regs, D)
- end;
-btb_reaches_match_2([{kill,Y}|Is], Regs, D) ->
- btb_reaches_match_1(Is, btb_kill([Y], Regs), D);
-btb_reaches_match_2([{deallocate,_}|Is], Regs0, D) ->
- Regs = btb_kill_yregs(Regs0),
- btb_reaches_match_1(Is, Regs, D);
-btb_reaches_match_2([return=I|_], Regs0, D) ->
- btb_ensure_not_used([{x,0}], I, Regs0),
- D;
-btb_reaches_match_2([{gc_bif,_,{f,F},Live,Ss,Dst}=I|Is], Regs0, D0) ->
- btb_ensure_not_used(Ss, I, Regs0),
- Regs1 = btb_kill_not_live(Live, Regs0),
- Regs = btb_kill([Dst], Regs1),
- D = btb_follow_branch(F, Regs, D0),
- btb_reaches_match_1(Is, Regs, D);
-btb_reaches_match_2([{bif,_,{f,F},Ss,Dst}=I|Is], Regs0, D0) ->
- btb_ensure_not_used(Ss, I, Regs0),
- Regs = btb_kill([Dst], Regs0),
- D = btb_follow_branch(F, Regs, D0),
- btb_reaches_match_1(Is, Regs, D);
-btb_reaches_match_2([{get_map_elements,{f,F},Src,{list,Ls}}=I|Is], Regs0, D0) ->
- {Ss,Ds} = beam_utils:split_even(Ls),
- btb_ensure_not_used([Src|Ss], I, Regs0),
- Regs = btb_kill(Ds, Regs0),
- D = btb_follow_branch(F, Regs, D0),
- btb_reaches_match_1(Is, Regs, D);
-btb_reaches_match_2([{test,bs_start_match2,{f,F},Live,[Ctx,_],Ctx}=I|Is],
- Regs0, D0) ->
- CtxRegs = btb_context_regs(Regs0),
- case member(Ctx, CtxRegs) of
- false ->
- %% This bs_start_match2 instruction does not use "our"
- %% match state. Therefore we can continue the search
- %% for another bs_start_match2 instruction.
- D = btb_follow_branch(F, Regs0, D0),
- Regs = btb_kill_not_live(Live, Regs0),
- btb_reaches_match_2(Is, Regs, D);
- true ->
- %% OK. This instruction will use "our" match state,
- %% but we must make sure that all other copies of the
- %% match state are killed in the code that follows
- %% the instruction. (We know that the fail branch cannot
- %% be taken in this case.)
- OtherCtxRegs = CtxRegs -- [Ctx],
- case btb_are_all_unused(OtherCtxRegs, Is, D0) of
- false -> btb_error({OtherCtxRegs,not_all_unused_after,I});
- true -> D0
- end
- end;
-btb_reaches_match_2([{test,bs_start_match2,{f,F},Live,[Bin,_],Ctx}|Is],
- Regs0, D0) ->
- CtxRegs = btb_context_regs(Regs0),
- case member(Bin, CtxRegs) orelse member(Ctx, CtxRegs) of
- false ->
- %% This bs_start_match2 does not reference any copy of the
- %% match state. Therefore it can safely be passed on the
- %% way to another (perhaps more suitable) bs_start_match2
- %% instruction.
- D = btb_follow_branch(F, Regs0, D0),
- Regs = btb_kill_not_live(Live, Regs0),
- btb_reaches_match_2(Is, Regs, D);
- true ->
- %% This variant of the bs_start_match2 instruction does
- %% not accept a match state as source.
- btb_error(unsuitable_bs_start_match)
- end;
-btb_reaches_match_2([{test,_,{f,F},Ss}=I|Is], Regs, D0) ->
- btb_ensure_not_used(Ss, I, Regs),
- D1 = btb_follow_branch(F, Regs, D0),
- D = case Is of
- [{bs_context_to_binary,_}|_] ->
- %% bs_context_to_binary following a test instruction
- %% probably needs the current position to be saved as
- %% the new start position, but we can't be sure.
- %% Therefore, conservatively disable the optimization
- %% (instead of forcing a saving of the position).
- D1#btb{must_save=true,must_not_save=true};
- _ ->
- D1
- end,
- btb_reaches_match_1(Is, Regs, D);
-btb_reaches_match_2([{test,_,{f,F},_,Ss,_}=I|Is], Regs, D0) ->
- btb_ensure_not_used(Ss, I, Regs),
- D = btb_follow_branch(F, Regs, D0),
- btb_reaches_match_1(Is, Regs, D);
-btb_reaches_match_2([{select,_,Src,{f,F},Conds}=I|Is], Regs, D0) ->
- btb_ensure_not_used([Src], I, Regs),
- D1 = btb_follow_branch(F, Regs, D0),
- D = btb_follow_branches(Conds, Regs, D1),
- btb_reaches_match_1(Is, Regs, D);
-btb_reaches_match_2([{jump,{f,Lbl}}|_], Regs, #btb{index=Li}=D) ->
- Is = fetch_code_at(Lbl, Li),
- btb_reaches_match_2(Is, Regs, D);
-btb_reaches_match_2([{label,_}|Is], Regs, D) ->
- btb_reaches_match_2(Is, Regs, D);
-btb_reaches_match_2([{bs_init,{f,0},_,_,Ss,Dst}=I|Is], Regs, D) ->
- btb_ensure_not_used(Ss, I, Regs),
- btb_reaches_match_1(Is, btb_kill([Dst], Regs), D);
-btb_reaches_match_2([{bs_put,{f,0},_,Ss}=I|Is], Regs, D) ->
- btb_ensure_not_used(Ss, I, Regs),
- btb_reaches_match_1(Is, Regs, D);
-btb_reaches_match_2([{bs_restore2,Src,_}=I|Is], Regs0, D) ->
- case btb_contains_context(Src, Regs0) of
- false ->
- btb_reaches_match_1(Is, Regs0, D);
- true ->
- %% Check that all other copies of the context registers
- %% are unused by the following instructions.
- Regs = btb_kill([Src], Regs0),
- CtxRegs = btb_context_regs(Regs),
- case btb_are_all_unused(CtxRegs, Is, D) of
- false -> btb_error({CtxRegs,not_all_unused_after,I});
- true -> D#btb{must_not_save=true}
- end
- end;
-btb_reaches_match_2([{bs_context_to_binary,Src}=I|Is], Regs0, D) ->
- case btb_contains_context(Src, Regs0) of
- false ->
- btb_reaches_match_1(Is, Regs0, D);
- true ->
- %% Check that all other copies of the context registers
- %% are unused by the following instructions.
- Regs = btb_kill([Src], Regs0),
- CtxRegs = btb_context_regs(Regs),
- case btb_are_all_unused(CtxRegs, Is, D) of
- false -> btb_error({CtxRegs,not_all_unused_after,I});
- true -> D#btb{must_not_save=true}
- end
- end;
-btb_reaches_match_2([{badmatch,Src}=I|_], Regs, D) ->
- btb_ensure_not_used([Src], I, Regs),
- D;
-btb_reaches_match_2([{case_end,Src}=I|_], Regs, D) ->
- btb_ensure_not_used([Src], I, Regs),
- D;
-btb_reaches_match_2([if_end|_], _Regs, D) ->
- D;
-btb_reaches_match_2([{func_info,_,_,Arity}=I|_], Regs0, D) ->
- Regs = btb_kill_yregs(btb_kill_not_live(Arity, Regs0)),
- case btb_context_regs(Regs) of
- [] -> D;
- _ -> {binary_used_in,I}
- end;
-btb_reaches_match_2([{line,_}|Is], Regs, D) ->
- btb_reaches_match_1(Is, Regs, D);
-btb_reaches_match_2([I|_], Regs, _) ->
- btb_error({btb_context_regs(Regs),I,not_handled}).
-
-is_tail_call([{deallocate,_}|_]) -> true;
-is_tail_call([return|_]) -> true;
-is_tail_call(_) -> false.
-
-btb_call(Arity, Lbl, Regs0, Is, D0) ->
- Regs = btb_kill_not_live(Arity, Regs0),
- case btb_are_x_registers_empty(Regs) of
- false ->
- %% There is a match context in one of the x registers.
- %% First handle the call as if it were a tail call.
- D = btb_tail_call(Lbl, Regs, D0),
-
- %% No problem so far (the called function can handle a
- %% match context). Now we must make sure that we don't
- %% have any copies of the match context tucked away in an
- %% y register.
- RegList = btb_context_regs(Regs),
- case [R || {y,_}=R <- RegList] of
- [] ->
- D;
- [_|_] ->
- btb_error({multiple_uses,RegList})
- end;
- true ->
- %% No match context in any x register. It could have been
- %% saved to an y register, so continue to scan the code following
- %% the call.
- btb_reaches_match_1(Is, Regs, D0)
- end.
-
-btb_tail_call(Lbl, Regs, #btb{f=Ftree,must_save=MustSave0}=D) ->
- %% Ignore any y registers here.
- case [R || {x,_}=R <- btb_context_regs(Regs)] of
- [] ->
- D;
- [{x,_}=Reg] ->
- case gb_trees:lookup(Lbl, Ftree) of
- {value,{Reg,MustSave}} ->
- D#btb{must_save=MustSave0 or MustSave};
- _ when is_integer(Lbl) ->
- btb_error({{label,Lbl},no_suitable_bs_start_match});
- _ ->
- btb_error({binary_used_in,Lbl})
- end;
- [_|_] when not is_integer(Lbl) ->
- btb_error({binary_used_in,Lbl});
- [_|_]=RegList ->
- btb_error({multiple_uses,RegList})
- end.
-
-%% btb_follow_branches([Cond], Regs, D) -> D'
-%% Recursively follow all the branches.
-
-btb_follow_branches([{f,Lbl}|T], Regs, D0) ->
- D = btb_follow_branch(Lbl, Regs, D0),
- btb_follow_branches(T, Regs, D);
-btb_follow_branches([_|T], Regs, D) ->
- btb_follow_branches(T, Regs, D);
-btb_follow_branches([], _, D) -> D.
-
-%% btb_follow_branch(Lbl, Regs, D) -> D'
-%% Recursively follow the branch.
-
-btb_follow_branch(0, _Regs, D) -> D;
-btb_follow_branch(Lbl, Regs, #btb{ok_br=Br0,index=Li}=D) ->
- Key = {Lbl,Regs},
- case gb_sets:is_member(Key, Br0) of
- true ->
- %% We have already followed this branch and it was OK.
- D;
- false ->
- %% New branch. Try it.
- Is = fetch_code_at(Lbl, Li),
- #btb{ok_br=Br,must_not_save=MustNotSave,must_save=MustSave} =
- btb_reaches_match_1(Is, Regs, D),
-
- %% Since we got back, this branch is OK.
- D#btb{ok_br=gb_sets:insert(Key, Br),must_not_save=MustNotSave,
- must_save=MustSave}
- end.
-
-btb_reaches_match_block([{set,Ds,Ss,{alloc,Live,_}}=I|Is], Regs0) ->
- %% An allocation instruction or a GC bif. We'll kill all registers
- %% if any copy of the context is used as the source to the BIF.
- btb_ensure_not_used(Ss, I, Regs0),
- Regs1 = btb_kill_not_live(Live, Regs0),
- Regs = btb_kill(Ds, Regs1),
- btb_reaches_match_block(Is, Regs);
-btb_reaches_match_block([{set,[Dst]=Ds,[Src],move}|Is], Regs0) ->
- Regs1 = btb_kill(Ds, Regs0),
- Regs = case btb_contains_context(Src, Regs1) of
- false -> Regs1;
- true -> btb_set_context(Dst, Regs1)
- end,
- btb_reaches_match_block(Is, Regs);
-btb_reaches_match_block([{set,Ds,Ss,_}=I|Is], Regs0) ->
- btb_ensure_not_used(Ss, I, Regs0),
- Regs = btb_kill(Ds, Regs0),
- btb_reaches_match_block(Is, Regs);
-btb_reaches_match_block([], Regs) ->
- Regs.
-
-%% btb_are_all_killed([Register], [Instruction], D) -> true|false
-%% Test whether all of the register are unused in the instruction stream.
-
-btb_are_all_unused(RegList, Is, #btb{index=Li}) ->
- all(fun(R) ->
- beam_utils:is_not_used(R, Is, Li)
- end, RegList).
-
-%% btp_regs_from_list([Register]) -> RegisterSet.
-%% Create a register set from a list of registers.
-
-btb_regs_from_list(L) ->
- foldl(fun(R, Regs) ->
- btb_set_context(R, Regs)
- end, {0,0}, L).
-
-%% btb_set_context(Register, RegisterSet) -> RegisterSet'
-%% Update RegisterSet to indicate that Register contains the matching context.
-
-btb_set_context({x,N}, {Xregs,Yregs}) ->
- {Xregs bor (1 bsl N),Yregs};
-btb_set_context({y,N}, {Xregs,Yregs}) ->
- {Xregs,Yregs bor (1 bsl N)}.
-
-%% btb_ensure_not_used([Register], Instruction, RegisterSet) -> ok
-%% If any register in RegisterSet (the register(s) known to contain
-%% the match context) is used in the list of registers, generate an error.
-
-btb_ensure_not_used(Rs, I, Regs) ->
- case lists:any(fun(R) -> btb_contains_context(R, Regs) end, Rs) of
- true -> btb_error({binary_used_in,I});
- false -> ok
- end.
-
-%% btb_kill([Register], RegisterSet) -> RegisterSet'
-%% Kill all registers mentioned in the list of registers.
-
-btb_kill([{x,N}|Rs], {Xregs,Yregs}) ->
- btb_kill(Rs, {Xregs band (bnot (1 bsl N)),Yregs});
-btb_kill([{y,N}|Rs], {Xregs,Yregs}) ->
- btb_kill(Rs, {Xregs,Yregs band (bnot (1 bsl N))});
-btb_kill([{fr,_}|Rs], Regs) ->
- btb_kill(Rs, Regs);
-btb_kill([], Regs) -> Regs.
-
-%% btb_kill_not_live(Live, RegisterSet) -> RegisterSet'
-%% Kill all registers indicated not live by Live.
-
-btb_kill_not_live(Live, {Xregs,Yregs}) ->
- {Xregs band ((1 bsl Live)-1),Yregs}.
-
-%% btb_kill(Regs0) -> Regs
-%% Kill all y registers.
-
-btb_kill_yregs({Xregs,_}) -> {Xregs,0}.
-
-%% btb_are_registers_empty(RegisterSet) -> true|false
-%% Test whether the register set is empty.
-
-btb_are_registers_empty({0,0}) -> true;
-btb_are_registers_empty({_,_}) -> false.
-
-%% btb_are_x_registers_empty(Regs) -> true|false
-%% Test whether the x registers are empty.
-
-btb_are_x_registers_empty({0,_}) -> true;
-btb_are_x_registers_empty({_,_}) -> false.
-
-%% btb_contains_context(Register, RegisterSet) -> true|false
-%% Test whether Register contains the context.
-
-btb_contains_context({x,N}, {Regs,_}) -> Regs band (1 bsl N) =/= 0;
-btb_contains_context({y,N}, {_,Regs}) -> Regs band (1 bsl N) =/= 0;
-btb_contains_context(_, _) -> false.
-
-%% btb_context_regs(RegisterSet) -> [Register]
-%% Convert the register set to an explicit list of registers.
-btb_context_regs({Xregs,Yregs}) ->
- btb_context_regs_1(Xregs, 0, x, btb_context_regs_1(Yregs, 0, y, [])).
-
-btb_context_regs_1(0, _, _, Acc) ->
- Acc;
-btb_context_regs_1(Regs, N, Tag, Acc) when (Regs band 1) =:= 1 ->
- btb_context_regs_1(Regs bsr 1, N+1, Tag, [{Tag,N}|Acc]);
-btb_context_regs_1(Regs, N, Tag, Acc) ->
- btb_context_regs_1(Regs bsr 1, N+1, Tag, Acc).
-
-%% btb_index([Function]) -> GbTree({EntryLabel,{Register,MustSave}})
-%% Build an index of functions that accept a match context instead of
-%% a binary. MustSave is true if the function may pass the match
-%% context to the bs_context_to_binary instruction (in which case
-%% the current position in the binary must have saved into the
-%% start position using "bs_save_2 Ctx start").
-
-btb_index(Fs) ->
- btb_index_1(Fs, []).
-
-btb_index_1([{function,_,_,Entry,Is0}|Fs], Acc0) ->
- Is = drop_to_label(Is0, Entry),
- Acc = btb_index_2(Is, Entry, false, Acc0),
- btb_index_1(Fs, Acc);
-btb_index_1([], Acc) -> gb_trees:from_orddict(sort(Acc)).
-
-btb_index_2([{test,bs_start_match2,{f,_},_,[Reg,_],Reg}|_],
- Entry, MustSave, Acc) ->
- [{Entry,{Reg,MustSave}}|Acc];
-btb_index_2(Is0, Entry, _, Acc) ->
- try btb_index_find_start_match(Is0) of
- Is -> btb_index_2(Is, Entry, true, Acc)
- catch
- throw:none -> Acc
- end.
-
-drop_to_label([{label,L}|Is], L) -> Is;
-drop_to_label([_|Is], L) -> drop_to_label(Is, L).
-
-btb_index_find_start_match([{test,_,{f,F},_},{bs_context_to_binary,_}|Is]) ->
- btb_index_find_label(Is, F);
-btb_index_find_start_match(_) ->
- throw(none).
-
-btb_index_find_label([{label,L}|Is], L) -> Is;
-btb_index_find_label([_|Is], L) -> btb_index_find_label(Is, L).
-
-btb_error(Error) ->
- throw({error,Error}).
-
-fetch_code_at(Lbl, Li) ->
- case beam_utils:code_at(Lbl, Li) of
- Is when is_list(Is) -> Is
- end.
-
-%%%
-%%% Compilation information warnings.
-%%%
-
-btb_comment_opt({field_flags,[{anno,A}|_]}) ->
- {'%',{bin_opt,A}};
-btb_comment_opt(_) ->
- {'%',{bin_opt,[]}}.
-
-btb_comment_no_opt(Reason, {field_flags,[{anno,A}|_]}) ->
- {'%',{no_bin_opt,Reason,A}};
-btb_comment_no_opt(Reason, _) ->
- {'%',{no_bin_opt,Reason,[]}}.
-
-collect_warnings(Fs) ->
- D = warning_index_functions(Fs),
- foldl(fun(F, A) -> collect_warnings_fun(F, D, A) end, [], Fs).
-
-collect_warnings_fun({function,_,_,_,Is}, D, A) ->
- collect_warnings_instr(Is, D, A).
-
-collect_warnings_instr([{'%',{bin_opt,Where}}|Is], D, Acc0) ->
- Acc = add_warning(bin_opt, Where, Acc0),
- collect_warnings_instr(Is, D, Acc);
-collect_warnings_instr([{'%',{no_bin_opt,Reason0,Where}}|Is], D, Acc0) ->
- Reason = warning_translate_label(Reason0, D),
- Acc = add_warning({no_bin_opt,Reason}, Where, Acc0),
- collect_warnings_instr(Is, D, Acc);
-collect_warnings_instr([_|Is], D, Acc) ->
- collect_warnings_instr(Is, D, Acc);
-collect_warnings_instr([], _, Acc) -> Acc.
-
-add_warning(Term, Anno, Ws) ->
- Line = get_line(Anno),
- File = get_file(Anno),
- [{File,[{Line,?MODULE,Term}]}|Ws].
-
-warning_translate_label(Term, D) when is_tuple(Term) ->
- case element(1, Term) of
- {label,F} ->
- FA = gb_trees:get(F, D),
- setelement(1, Term, FA);
- _ -> Term
- end;
-warning_translate_label(Term, _) -> Term.
-
-get_line([Line|_]) when is_integer(Line) -> Line;
-get_line([_|T]) -> get_line(T);
-get_line([]) -> none.
-
-get_file([{file,File}|_]) -> File;
-get_file([_|T]) -> get_file(T);
-get_file([]) -> "no_file". % should not happen
-
-warning_index_functions(Fs) ->
- D = [{Entry,{F,A}} || {function,F,A,Entry,_} <- Fs],
- gb_trees:from_orddict(sort(D)).
-
-format_error_1({binary_used_in,{extfunc,M,F,A}}) ->
- [io_lib:format("sub binary used by ~p:~p/~p", [M,F,A])|
- case {M,F,A} of
- {erlang,split_binary,2} ->
- "; SUGGEST using binary matching instead of split_binary/2";
- _ ->
- ""
- end];
-format_error_1({binary_used_in,_}) ->
- "sub binary is used or returned";
-format_error_1({multiple_uses,_}) ->
- "sub binary is matched or used in more than one place";
-format_error_1(unsuitable_bs_start_match) ->
- "the binary matching instruction that follows in the same function "
- "have problems that prevent delayed sub binary optimization "
- "(probably indicated by INFO warnings)";
-format_error_1({{F,A},no_suitable_bs_start_match}) ->
- io_lib:format("called function ~p/~p does not begin with a suitable "
- "binary matching instruction", [F,A]);
-format_error_1(must_and_must_not_save) ->
- "different control paths use different positions in the binary";
-format_error_1({_,I,not_handled}) ->
- case I of
- {'catch',_,_} ->
- "the compiler currently does not attempt the delayed sub binary "
- "optimization when catch is used";
- {'try',_,_} ->
- "the compiler currently does not attempt the delayed sub binary "
- "optimization when try/catch is used";
- _ ->
- io_lib:format("compiler limitation: instruction ~p prevents "
- "delayed sub binary optimization", [I])
- end;
-format_error_1(Term) ->
- io_lib:format("~w", [Term]).
diff --git a/lib/compiler/src/beam_clean.erl b/lib/compiler/src/beam_clean.erl
index f5f0ac2218..7299654476 100644
--- a/lib/compiler/src/beam_clean.erl
+++ b/lib/compiler/src/beam_clean.erl
@@ -23,17 +23,15 @@
-export([module/2]).
-export([clean_labels/1]).
--import(lists, [foldl/3]).
-spec module(beam_utils:module_code(), [compile:option()]) ->
{'ok',beam_utils:module_code()}.
module({Mod,Exp,Attr,Fs0,_}, Opts) ->
Order = [Lbl || {function,_,_,Lbl,_} <- Fs0],
- All = foldl(fun({function,_,_,Lbl,_}=Func,D) -> dict:store(Lbl, Func, D) end,
- dict:new(), Fs0),
+ All = maps:from_list([{Lbl,Func} || {function,_,_,Lbl,_}=Func <- Fs0]),
WorkList = rootset(Fs0, Exp, Attr),
- Used = find_all_used(WorkList, All, sets:from_list(WorkList)),
+ Used = find_all_used(WorkList, All, cerl_sets:from_list(WorkList)),
Fs1 = remove_unused(Order, Used, All),
{Fs2,Lc} = clean_labels(Fs1),
Fs = maybe_remove_lines(Fs2, Opts),
@@ -55,16 +53,16 @@ rootset(Fs, Root0, Attr) ->
%% Remove the unused functions.
remove_unused([F|Fs], Used, All) ->
- case sets:is_element(F, Used) of
+ case cerl_sets:is_element(F, Used) of
false -> remove_unused(Fs, Used, All);
- true -> [dict:fetch(F, All)|remove_unused(Fs, Used, All)]
+ true -> [map_get(F, All)|remove_unused(Fs, Used, All)]
end;
remove_unused([], _, _) -> [].
-
+
%% Find all used functions.
find_all_used([F|Fs0], All, Used0) ->
- {function,_,_,_,Code} = dict:fetch(F, All),
+ {function,_,_,_,Code} = map_get(F, All),
{Fs,Used} = update_work_list(Code, {Fs0,Used0}),
find_all_used(Fs, All, Used);
find_all_used([], _All, Used) -> Used.
@@ -78,9 +76,9 @@ update_work_list([_|Is], Sets) ->
update_work_list([], Sets) -> Sets.
add_to_work_list(F, {Fs,Used}=Sets) ->
- case sets:is_element(F, Used) of
+ case cerl_sets:is_element(F, Used) of
true -> Sets;
- false -> {[F|Fs],sets:add_element(F, Used)}
+ false -> {[F|Fs],cerl_sets:add_element(F, Used)}
end.
diff --git a/lib/compiler/src/beam_dead.erl b/lib/compiler/src/beam_dead.erl
deleted file mode 100644
index 546f0461b9..0000000000
--- a/lib/compiler/src/beam_dead.erl
+++ /dev/null
@@ -1,881 +0,0 @@
-%%
-%% %CopyrightBegin%
-%%
-%% Copyright Ericsson AB 2002-2018. All Rights Reserved.
-%%
-%% Licensed under the Apache License, Version 2.0 (the "License");
-%% you may not use this file except in compliance with the License.
-%% You may obtain a copy of the License at
-%%
-%% http://www.apache.org/licenses/LICENSE-2.0
-%%
-%% Unless required by applicable law or agreed to in writing, software
-%% distributed under the License is distributed on an "AS IS" BASIS,
-%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-%% See the License for the specific language governing permissions and
-%% limitations under the License.
-%%
-%% %CopyrightEnd%
-%%
-
--module(beam_dead).
-
--export([module/2]).
-
-%%% Dead code is code that is executed but has no effect. This
-%%% optimization pass either removes dead code or jumps around it,
-%%% potentially making it unreachable and a target for the
-%%% the beam_jump pass.
-
--import(lists, [mapfoldl/3,reverse/1]).
-
-
--spec module(beam_utils:module_code(), [compile:option()]) ->
- {'ok',beam_utils:module_code()}.
-
-module({Mod,Exp,Attr,Fs0,_}, _Opts) ->
- {Fs1,Lc1} = beam_clean:clean_labels(Fs0),
- {Fs,Lc} = mapfoldl(fun function/2, Lc1, Fs1),
- %%{Fs,Lc} = {Fs1,Lc1},
- {ok,{Mod,Exp,Attr,Fs,Lc}}.
-
-function({function,Name,Arity,CLabel,Is0}, Lc0) ->
- try
- Is1 = beam_jump:remove_unused_labels(Is0),
-
- %% Initialize label information with the code
- %% for the func_info label. Without it, a register
- %% may seem to be live when it is not.
- [{label,L}|FiIs] = Is1,
- D0 = beam_utils:empty_label_index(),
- D = beam_utils:index_label(L, FiIs, D0),
-
- %% Optimize away dead code.
- {Is2,Lc} = forward(Is1, Lc0),
- Is3 = backward(Is2, D),
- Is = move_move_into_block(Is3, []),
- {{function,Name,Arity,CLabel,Is},Lc}
- catch
- Class:Error:Stack ->
- io:fwrite("Function: ~w/~w\n", [Name,Arity]),
- erlang:raise(Class, Error, Stack)
- end.
-
-%% 'move' instructions outside of blocks may thwart the jump optimizer.
-%% Move them back into the block.
-
-move_move_into_block([{block,Bl0},{move,S,D}|Is], Acc) ->
- Bl = Bl0 ++ [{set,[D],[S],move}],
- move_move_into_block([{block,Bl}|Is], Acc);
-move_move_into_block([{move,S,D}|Is], Acc) ->
- Bl = [{set,[D],[S],move}],
- move_move_into_block([{block,Bl}|Is], Acc);
-move_move_into_block([I|Is], Acc) ->
- move_move_into_block(Is, [I|Acc]);
-move_move_into_block([], Acc) -> reverse(Acc).
-
-%%%
-%%% Scan instructions in execution order and remove redundant 'move'
-%%% instructions. 'move' instructions are redundant if we know that
-%%% the register already contains the value being assigned, as in the
-%%% following code:
-%%%
-%%% test is_eq_exact SomeLabel Src Dst
-%%% move Src Dst
-%%%
-%%% or in:
-%%%
-%%% select_val Register FailLabel [... Literal => L1...]
-%%% .
-%%% .
-%%% .
-%%% L1: move Literal Register
-%%%
-%%% Also add extra labels to help the second backward pass.
-%%%
-
-forward(Is, Lc) ->
- forward(Is, #{}, Lc, []).
-
-forward([{move,_,_}=Move|[{label,L}|_]=Is], D, Lc, Acc) ->
- %% move/2 followed by jump/1 is optimized by backward/3.
- forward([Move,{jump,{f,L}}|Is], D, Lc, Acc);
-forward([{bif,_,_,_,_}=Bif|[{label,L}|_]=Is], D, Lc, Acc) ->
- %% bif/4 followed by jump/1 is optimized by backward/3.
- forward([Bif,{jump,{f,L}}|Is], D, Lc, Acc);
-forward([{block,[]}|Is], D, Lc, Acc) ->
- %% Empty blocks can prevent optimizations.
- forward(Is, D, Lc, Acc);
-forward([{select,select_val,Reg,_,List}=I|Is], D0, Lc, Acc) ->
- D = update_value_dict(List, Reg, D0),
- forward(Is, D, Lc, [I|Acc]);
-forward([{label,Lbl}=LblI,{block,[{set,[Dst],[Lit],move}|BlkIs]}=Blk|Is], D, Lc, Acc) ->
- %% Assumption: The target labels in a select_val/3 instruction
- %% cannot be reached in any other way than through the select_val/3
- %% instruction (i.e. there can be no fallthrough to such label and
- %% it cannot be referenced by, for example, a jump/1 instruction).
- Key = {Lbl,Dst},
- Block = case D of
- #{Key := Lit} -> {block,BlkIs}; %Safe to remove move instruction.
- _ -> Blk %Must keep move instruction.
- end,
- forward([Block|Is], D, Lc, [LblI|Acc]);
-forward([{label,Lbl}=LblI|[{move,Lit,Dst}|Is1]=Is0], D, Lc, Acc) ->
- %% Assumption: The target labels in a select_val/3 instruction
- %% cannot be reached in any other way than through the select_val/3
- %% instruction (i.e. there can be no fallthrough to such label and
- %% it cannot be referenced by, for example, a jump/1 instruction).
- Is = case maps:find({Lbl,Dst}, D) of
- {ok,Lit} -> Is1; %Safe to remove move instruction.
- _ -> Is0 %Keep move instruction.
- end,
- forward(Is, D, Lc, [LblI|Acc]);
-forward([{test,is_eq_exact,_,[Same,Same]}|Is], D, Lc, Acc) ->
- forward(Is, D, Lc, Acc);
-forward([{test,is_eq_exact,_,[Dst,Src]}=I,
- {block,[{set,[Dst],[Src],move}|Bl]}|Is], D, Lc, Acc) ->
- forward([I,{block,Bl}|Is], D, Lc, Acc);
-forward([{test,is_eq_exact,_,[Dst,Src]}=I,{move,Src,Dst}|Is], D, Lc, Acc) ->
- forward([I|Is], D, Lc, Acc);
-forward([{test,_,_,_}=I|Is]=Is0, D, Lc, Acc) ->
- %% Help the second, backward pass to by inserting labels after
- %% relational operators so that they can be skipped if they are
- %% known to be true.
- case useful_to_insert_label(Is0) of
- false -> forward(Is, D, Lc, [I|Acc]);
- true -> forward(Is, D, Lc+1, [{label,Lc},I|Acc])
- end;
-forward([I|Is], D, Lc, Acc) ->
- forward(Is, D, Lc, [I|Acc]);
-forward([], _, Lc, Acc) -> {Acc,Lc}.
-
-update_value_dict([Lit,{f,Lbl}|T], Reg, D0) ->
- Key = {Lbl,Reg},
- D = case D0 of
- #{Key := inconsistent} -> D0;
- #{Key := _} -> D0#{Key := inconsistent};
- _ -> D0#{Key => Lit}
- end,
- update_value_dict(T, Reg, D);
-update_value_dict([], _, D) -> D.
-
-useful_to_insert_label([_,{label,_}|_]) ->
- false;
-useful_to_insert_label([{test,Op,_,_}|_]) ->
- case Op of
- is_lt -> true;
- is_ge -> true;
- is_eq_exact -> true;
- is_ne_exact -> true;
- _ -> false
- end.
-
-%%%
-%%% Scan instructions in reverse execution order and try to
-%%% shortcut branch instructions.
-%%%
-%%% For example, in this code:
-%%%
-%%% move Literal Register
-%%% jump L1
-%%% .
-%%% .
-%%% .
-%%% L1: test is_{integer,atom} FailLabel Register
-%%% select_val {x,0} FailLabel [... Literal => L2...]
-%%% .
-%%% .
-%%% .
-%%% L2: ...
-%%%
-%%% the 'selectval' instruction will always transfer control to L2,
-%%% so we can just as well jump to L2 directly by rewriting the
-%%% first part of the sequence like this:
-%%%
-%%% move Literal Register
-%%% jump L2
-%%%
-%%% If register Register is killed at label L2, we can remove the
-%%% 'move' instruction, leaving just the 'jump' instruction:
-%%%
-%%% jump L2
-%%%
-%%% These transformations may leave parts of the code unreachable.
-%%% The beam_jump pass will remove the unreachable code.
-
-backward(Is, D) ->
- backward(Is, D, []).
-
-backward([{test,is_eq_exact,Fail,[Dst,{integer,Arity}]}=I|
- [{bif,tuple_size,Fail,[Reg],Dst}|Is]=Is0], D, Acc) ->
- %% Provided that Dst is killed following this sequence,
- %% we can rewrite the instructions like this:
- %%
- %% bif tuple_size Fail Reg Dst ==> is_tuple Fail Reg
- %% is_eq_exact Fail Dst Integer test_arity Fail Reg Integer
- %%
- %% (still two instructions, but they they will be combined to
- %% one by the loader).
- case beam_utils:is_killed(Dst, Acc, D) andalso (Arity bsr 32) =:= 0 of
- false ->
- %% Not safe because the register Dst is not killed
- %% (probably cannot not happen in practice) or the arity
- %% does not fit in 32 bits (the loader will fail to load
- %% the module). We must move the first instruction to the
- %% accumulator to avoid an infinite loop.
- backward(Is0, D, [I|Acc]);
- true ->
- %% Safe.
- backward([{test,test_arity,Fail,[Reg,Arity]},
- {test,is_tuple,Fail,[Reg]}|Is], D, Acc)
- end;
-backward([{label,Lbl}=L|Is], D, Acc) ->
- backward(Is, beam_utils:index_label(Lbl, Acc, D), [L|Acc]);
-backward([{select,select_val,Reg,{f,Fail0},List0}|Is], D, Acc) ->
- List1 = shortcut_select_list(List0, Reg, D, []),
- Fail = shortcut_label(Fail0, D),
- List = prune_redundant(List1, Fail),
- case List of
- [] ->
- Jump = {jump,{f,Fail}},
- backward([Jump|Is], D, Acc);
- [V,F] ->
- Test = {test,is_eq_exact,{f,Fail},[Reg,V]},
- Jump = {jump,F},
- backward([Jump,Test|Is], D, Acc);
- [{atom,B1},F,{atom,B2},F] when B1 =:= not B2 ->
- Test = {test,is_boolean,{f,Fail},[Reg]},
- Jump = {jump,F},
- backward([Jump,Test|Is], D, Acc);
- [_|_] ->
- Sel = {select,select_val,Reg,{f,Fail},List},
- backward(Is, D, [Sel|Acc])
- end;
-backward([{jump,{f,To0}},{move,Src,Reg}=Move|Is], D, Acc) ->
- To = shortcut_select_label(To0, Reg, Src, D),
- Jump = {jump,{f,To}},
- case is_killed_at(Reg, To, D) of
- false -> backward([Move|Is], D, [Jump|Acc]);
- true -> backward([Jump|Is], D, Acc)
- end;
-backward([{jump,{f,To}}=J|[{bif,Op,{f,BifFail},Ops,Reg}|Is]=Is0], D, Acc) ->
- try replace_comp_op(To, Reg, Op, Ops, D) of
- {Test,Jump} ->
- backward([Jump,Test|Is], D, Acc)
- catch
- throw:not_possible ->
- case To =:= BifFail of
- true ->
- %% The bif instruction is redundant. See the comment
- %% in the next clause for why there is no need to
- %% test for liveness of Reg at label To.
- backward([J|Is], D, Acc);
- false ->
- backward(Is0, D, [J|Acc])
- end
- end;
-backward([{jump,{f,To}}=J|[{gc_bif,_,{f,To},_,_,_Dst}|Is]], D, Acc) ->
- %% The gc_bif instruction is redundant, since either the gc_bif
- %% instruction itself or the jump instruction will transfer control
- %% to label To. Note that a gc_bif instruction does not assign its
- %% destination register if the failure branch is taken; therefore,
- %% the code at label To is not allowed to assume that the destination
- %% register is initialized, and it is therefore no need to test
- %% for liveness of the destination register at label To.
- backward([J|Is], D, Acc);
-backward([{test,bs_start_match2,F,Live,[Src,_]=Args,Ctxt}|Is], D, Acc0) ->
- {f,To0} = F,
- case test_bs_literal(F, Ctxt, D, Acc0) of
- {none,Acc} ->
- %% Ctxt killed immediately after bs_start_match2.
- To = shortcut_bs_context_to_binary(To0, Src, D),
- I = {test,is_bitstr,{f,To},[Src]},
- backward(Is, D, [I|Acc]);
- {Literal,Acc} ->
- %% Ctxt killed after matching a literal.
- To = shortcut_bs_context_to_binary(To0, Src, D),
- Eq = {test,is_eq_exact,{f,To},[Src,{literal,Literal}]},
- backward(Is, D, [Eq|Acc]);
- not_killed ->
- %% Ctxt not killed. Not much to do.
- To = shortcut_bs_start_match(To0, Src, D),
- I = {test,bs_start_match2,{f,To},Live,Args,Ctxt},
- backward(Is, D, [I|Acc0])
- end;
-backward([{test,Op,{f,To0},Ops0}|Is], D, Acc) ->
- To1 = shortcut_label(To0, D),
- To2 = shortcut_rel_op(To1, Op, Ops0, D),
-
- %% Try to shortcut a repeated test:
- %%
- %% test Op {f,Fail1} Operands test Op {f,Fail2} Operands
- %% . . . ==> ...
- %% Fail1: test Op {f,Fail2} Operands Fail1: test Op {f,Fail2} Operands
- %%
- To = case beam_utils:code_at(To2, D) of
- [{test,Op,{f,To3},Ops}|_] ->
- case equal_ops(Ops0, Ops) of
- true -> To3;
- false -> To2
- end;
- _Code ->
- To2
- end,
- I = case Op of
- is_eq_exact -> combine_eqs(To, Ops0, D, Acc);
- _ -> {test,Op,{f,To},Ops0}
- end,
- case I of
- {test,_,_,_} ->
- %% Still a test instruction. Done.
- backward(Is, D, [I|Acc]);
- _ ->
- %% Rewritten to a select_val. Rescan.
- backward([I|Is], D, Acc)
- end;
-backward([{test,Op,{f,To0},Live,Ops0,Dst}|Is], D, Acc) ->
- To1 = shortcut_label(To0, D),
-
- %% Try to shortcut a repeated test:
- %%
- %% test Op {f,Fail1} _ Ops _ test Op {f,Fail2} _ Ops _
- %% . . . ==> ...
- %% Fail1: test Op {f,Fail2} _ Ops _ Fail1: test Op {f,Fail2} _ Ops _
- %%
- To = case beam_utils:code_at(To1, D) of
- [{test,Op,{f,To2},_,Ops,_}|_] ->
- case equal_ops(Ops0, Ops) of
- true -> To2;
- false -> To1
- end;
- _Code ->
- To1
- end,
- I = {test,Op,{f,To},Live,Ops0,Dst},
- backward(Is, D, [I|Acc]);
-backward([{kill,_}=I|Is], D, [{line,_},Exit|_]=Acc) ->
- case beam_jump:is_exit_instruction(Exit) of
- false -> backward(Is, D, [I|Acc]);
- true -> backward(Is, D, Acc)
- end;
-backward([{bif,'or',{f,To0},[Dst,{atom,false}],Dst}=I|Is], D,
- [{test,is_eq_exact,{f,To},[Dst,{atom,true}]}|_]=Acc) ->
- case shortcut_label(To0, D) of
- To ->
- backward(Is, D, Acc);
- _ ->
- backward(Is, D, [I|Acc])
- end;
-backward([{bif,map_get,{f,FF},[Key,Map],_}=I0,
- {test,has_map_fields,{f,FT}=F,[Map|Keys0]}=I1|Is], D, Acc) when FF =/= 0 ->
- case shortcut_label(FF, D) of
- FT ->
- case lists:delete(Key, Keys0) of
- [] ->
- backward([I0|Is], D, Acc);
- Keys ->
- Test = {test,has_map_fields,F,[Map|Keys]},
- backward([Test|Is], D, [I0|Acc])
- end;
- _ ->
- backward([I1|Is], D, [I0|Acc])
- end;
-backward([{bif,map_get,{f,FF},[_,Map],_}=I0,
- {test,is_map,{f,FT},[Map]}=I1|Is], D, Acc) when FF =/= 0 ->
- case shortcut_label(FF, D) of
- FT -> backward([I0|Is], D, Acc);
- _ -> backward([I1|Is], D, [I0|Acc])
- end;
-backward([I|Is], D, Acc) ->
- backward(Is, D, [I|Acc]);
-backward([], _D, Acc) -> Acc.
-
-equal_ops([{field_flags,FlA0}|T0], [{field_flags,FlB0}|T1]) ->
- FlA = lists:keydelete(anno, 1, FlA0),
- FlB = lists:keydelete(anno, 1, FlB0),
- FlA =:= FlB andalso equal_ops(T0, T1);
-equal_ops([Op|T0], [Op|T1]) ->
- equal_ops(T0, T1);
-equal_ops([], []) -> true;
-equal_ops(_, _) -> false.
-
-shortcut_select_list([Lit,{f,To0}|T], Reg, D, Acc) ->
- To = shortcut_select_label(To0, Reg, Lit, D),
- shortcut_select_list(T, Reg, D, [{f,To},Lit|Acc]);
-shortcut_select_list([], _, _, Acc) -> reverse(Acc).
-
-shortcut_label(0, _) ->
- 0;
-shortcut_label(To0, D) ->
- case beam_utils:code_at(To0, D) of
- [{jump,{f,To}}|_] -> shortcut_label(To, D);
- _ -> To0
- end.
-
-shortcut_select_label(To, Reg, Lit, D) ->
- shortcut_rel_op(To, is_ne_exact, [Reg,Lit], D).
-
-prune_redundant([_,{f,Fail}|T], Fail) ->
- prune_redundant(T, Fail);
-prune_redundant([V,F|T], Fail) ->
- [V,F|prune_redundant(T, Fail)];
-prune_redundant([], _) -> [].
-
-%% Replace a comparison operator with a test instruction and a jump.
-%% For example, if we have this code:
-%%
-%% bif '=:=' Fail Src1 Src2 {x,0}
-%% jump L1
-%% .
-%% .
-%% .
-%% L1: select_val {x,0} FailLabel [... true => L2..., ...false => L3...]
-%%
-%% the first two instructions can be replaced with
-%%
-%% test is_eq_exact L3 Src1 Src2
-%% jump L2
-%%
-%% provided that {x,0} is killed at both L2 and L3.
-
-replace_comp_op(To, Reg, Op, Ops, D) ->
- False = comp_op_find_shortcut(To, Reg, {atom,false}, D),
- True = comp_op_find_shortcut(To, Reg, {atom,true}, D),
- {bif_to_test(Op, Ops, False),{jump,{f,True}}}.
-
-comp_op_find_shortcut(To0, Reg, Val, D) ->
- case shortcut_select_label(To0, Reg, Val, D) of
- To0 ->
- not_possible();
- To ->
- case is_killed_at(Reg, To, D) of
- false -> not_possible();
- true -> To
- end
- end.
-
-bif_to_test(Name, Args, Fail) ->
- try
- beam_utils:bif_to_test(Name, Args, {f,Fail})
- catch
- error:_ -> not_possible()
- end.
-
-not_possible() -> throw(not_possible).
-
-%% combine_eqs(To, Operands, Acc) -> Instruction.
-%% Combine two is_eq_exact instructions or (an is_eq_exact
-%% instruction and a select_val instruction) to a select_val
-%% instruction if possible.
-%%
-%% Example:
-%%
-%% is_eq_exact F1 Reg Lit1 select_val Reg F2 [ Lit1 L1
-%% L1: . Lit2 L2 ]
-%% .
-%% . ==>
-%% .
-%% F1: is_eq_exact F2 Reg Lit2 F1: is_eq_exact F2 Reg Lit2
-%% L2: .... L2:
-%%
-combine_eqs(To, [Reg,{Type,_}=Lit1]=Ops, D, Acc)
- when Type =:= atom; Type =:= integer ->
- Next = case Acc of
- [{label,Lbl}|_] -> Lbl;
- [{jump,{f,Lbl}}|_] -> Lbl
- end,
- case beam_utils:code_at(To, D) of
- [{test,is_eq_exact,{f,F2},[Reg,{Type,_}=Lit2]},
- {label,L2}|_] when Lit1 =/= Lit2 ->
- {select,select_val,Reg,{f,F2},[Lit1,{f,Next},Lit2,{f,L2}]};
- [{test,is_eq_exact,{f,F2},[Reg,{Type,_}=Lit2]},
- {jump,{f,L2}}|_] when Lit1 =/= Lit2 ->
- {select,select_val,Reg,{f,F2},[Lit1,{f,Next},Lit2,{f,L2}]};
- [{select,select_val,Reg,{f,F2},[{Type,_}|_]=List0}|_] ->
- List = remove_from_list(Lit1, List0),
- {select,select_val,Reg,{f,F2},[Lit1,{f,Next}|List]};
- _Is ->
- {test,is_eq_exact,{f,To},Ops}
- end;
-combine_eqs(To, Ops, _D, _Acc) ->
- {test,is_eq_exact,{f,To},Ops}.
-
-remove_from_list(Lit, [Lit,{f,_}|T]) ->
- T;
-remove_from_list(Lit, [Val,{f,_}=Fail|T]) ->
- [Val,Fail|remove_from_list(Lit, T)];
-remove_from_list(_, []) -> [].
-
-
-test_bs_literal(F, Ctxt, D,
- [{test,bs_match_string,F,[Ctxt,Bs]},
- {test,bs_test_tail2,F,[Ctxt,0]}|Acc]) ->
- test_bs_literal_1(Ctxt, Acc, D, Bs);
-test_bs_literal(F, Ctxt, D, [{test,bs_test_tail2,F,[Ctxt,0]}|Acc]) ->
- test_bs_literal_1(Ctxt, Acc, D, <<>>);
-test_bs_literal(_, Ctxt, D, Acc) ->
- test_bs_literal_1(Ctxt, Acc, D, none).
-
-test_bs_literal_1(Ctxt, Is, D, Literal) ->
- case beam_utils:is_killed(Ctxt, Is, D) of
- true -> {Literal,Is};
- false -> not_killed
- end.
-
-%% shortcut_bs_start_match(TargetLabel, Reg) -> TargetLabel
-%% A failing bs_start_match2 instruction means that the source (Reg)
-%% cannot be a binary. That means that it is safe to skip
-%% bs_context_to_binary instructions operating on Reg, and
-%% bs_start_match2 instructions operating on Reg.
-
-shortcut_bs_start_match(To, Reg, D) ->
- shortcut_bs_start_match_1(beam_utils:code_at(To, D), Reg, To, D).
-
-shortcut_bs_start_match_1([{bs_context_to_binary,Reg}|Is], Reg, To, D) ->
- shortcut_bs_start_match_1(Is, Reg, To, D);
-shortcut_bs_start_match_1([{jump,{f,To}}|_], Reg, _, D) ->
- Code = beam_utils:code_at(To, D),
- shortcut_bs_start_match_1(Code, Reg, To, D);
-shortcut_bs_start_match_1([{test,bs_start_match2,{f,To},_,[Reg|_],_}|_],
- Reg, _, D) ->
- Code = beam_utils:code_at(To, D),
- shortcut_bs_start_match_1(Code, Reg, To, D);
-shortcut_bs_start_match_1(_, _, To, _) ->
- To.
-
-%% shortcut_bs_context_to_binary(TargetLabel, Reg) -> TargetLabel
-%% If a bs_start_match2 instruction has been eliminated, the
-%% bs_context_to_binary instruction can be eliminated too.
-
-shortcut_bs_context_to_binary(To, Reg, D) ->
- shortcut_bs_ctb_1(beam_utils:code_at(To, D), Reg, To, D).
-
-shortcut_bs_ctb_1([{bs_context_to_binary,Reg}|Is], Reg, To, D) ->
- shortcut_bs_ctb_1(Is, Reg, To, D);
-shortcut_bs_ctb_1([{jump,{f,To}}|_], Reg, _, D) ->
- Code = beam_utils:code_at(To, D),
- shortcut_bs_ctb_1(Code, Reg, To, D);
-shortcut_bs_ctb_1(_, _, To, _) ->
- To.
-
-%% shortcut_rel_op(FailLabel, Operator, [Operand], D) -> FailLabel'
-%% Try to shortcut the given test instruction. Example:
-%%
-%% is_ge L1 {x,0} 48
-%% .
-%% .
-%% .
-%% L1: is_ge L2 {x,0} 65
-%%
-%% The first test instruction can be rewritten to "is_ge L2 {x,0} 48"
-%% since the instruction at L1 will also fail.
-%%
-%% If there are instructions between L1 and the other test instruction
-%% it may still be possible to do the shortcut. For example:
-%%
-%% L1: is_eq_exact L3 {x,0} 92
-%% is_ge L2 {x,0} 65
-%%
-%% Since the first test instruction failed, we know that {x,0} must
-%% be less than 48; therefore, we know that {x,0} cannot be equal to
-%% 92 and the jump to L3 cannot happen.
-
-shortcut_rel_op(To, Op, Ops, D) ->
- case normalize_op({test,Op,{f,To},Ops}) of
- {{NormOp,A,B},_} ->
- Normalized = {negate_op(NormOp),A,B},
- shortcut_rel_op_fp(To, Normalized, D);
- {_,_} ->
- To;
- error ->
- To
- end.
-
-shortcut_rel_op_fp(To0, Normalized, D) ->
- Code = beam_utils:code_at(To0, D),
- case shortcut_any_label(Code, Normalized) of
- error ->
- To0;
- To ->
- shortcut_rel_op_fp(To, Normalized, D)
- end.
-
-%% shortcut_any_label([Instruction], PrevCondition) -> FailLabel | error
-%% Using PrevCondition (a previous condition known to be true),
-%% try to shortcut to another failure label.
-
-shortcut_any_label([{jump,{f,Lbl}}|_], _Prev) ->
- Lbl;
-shortcut_any_label([{label,Lbl}|_], _Prev) ->
- Lbl;
-shortcut_any_label([{select,select_val,R,{f,Fail},L}|_], Prev) ->
- shortcut_selectval(L, R, Fail, Prev);
-shortcut_any_label([I|Is], Prev) ->
- case normalize_op(I) of
- error ->
- error;
- {Normalized,Fail} ->
- %% We have a relational operator.
- case will_succeed(Prev, Normalized) of
- no ->
- %% This test instruction will always branch
- %% to Fail.
- Fail;
- yes ->
- %% This test instruction will never branch,
- %% so we will look at the next instruction.
- shortcut_any_label(Is, Prev);
- maybe ->
- %% May or may not branch. From now on, we can only
- %% shortcut to the this specific failure label
- %% Fail.
- shortcut_specific_label(Is, Fail, Prev)
- end
- end.
-
-%% shortcut_specific_label([Instruction], FailLabel, PrevCondition) ->
-%% FailLabel | error
-%% We have previously encountered a test instruction that may or
-%% may not branch to FailLabel. Therefore we are only allowed
-%% to do the shortcut to the same fail label (FailLabel).
-
-shortcut_specific_label([{label,_}|Is], Fail, Prev) ->
- shortcut_specific_label(Is, Fail, Prev);
-shortcut_specific_label([{select,select_val,R,{f,F},L}|_], Fail, Prev) ->
- case shortcut_selectval(L, R, F, Prev) of
- Fail -> Fail;
- _ -> error
- end;
-shortcut_specific_label([I|Is], Fail, Prev) ->
- case normalize_op(I) of
- error ->
- error;
- {Normalized,Fail} ->
- case will_succeed(Prev, Normalized) of
- no ->
- %% Will branch to FailLabel.
- Fail;
- yes ->
- %% Will definitely never branch.
- shortcut_specific_label(Is, Fail, Prev);
- maybe ->
- %% May branch, but still OK since it will branch
- %% to FailLabel.
- shortcut_specific_label(Is, Fail, Prev)
- end;
- {Normalized,_} ->
- %% This test instruction will branch to a different
- %% fail label, if it branches at all.
- case will_succeed(Prev, Normalized) of
- yes ->
- %% Still OK, since the branch will never be
- %% taken.
- shortcut_specific_label(Is, Fail, Prev);
- no ->
- %% Give up. The branch will definitely be taken
- %% to a different fail label.
- error;
- maybe ->
- %% Give up. If the branch is taken, it will be
- %% to a different fail label.
- error
- end
- end.
-
-
-%% shortcut_selectval(List, Reg, Fail, PrevCond) -> FailLabel | error
-%% Try to shortcut a selectval instruction. A selectval instruction
-%% is equivalent to the following instruction sequence:
-%%
-%% is_ne_exact L1 Reg Value1
-%% .
-%% .
-%% .
-%% is_ne_exact LN Reg ValueN
-%% jump DefaultFailLabel
-%%
-shortcut_selectval([Val,{f,Lbl}|T], R, Fail, Prev) ->
- case will_succeed(Prev, {'=/=',R,get_literal(Val)}) of
- yes -> shortcut_selectval(T, R, Fail, Prev);
- no -> Lbl;
- maybe -> error
- end;
-shortcut_selectval([], _, Fail, _) -> Fail.
-
-%% will_succeed(PrevCondition, Condition) -> yes | no | maybe
-%% PrevCondition is a condition known to be true. This function
-%% will tell whether Condition will succeed.
-
-will_succeed({Op1,Reg,A}, {Op2,Reg,B}) ->
- will_succeed_1(Op1, A, Op2, B);
-will_succeed({'=:=',Reg,{literal,A}}, {TypeTest,Reg}) ->
- case erlang:TypeTest(A) of
- false -> no;
- true -> yes
- end;
-will_succeed({_,_,_}, maybe) ->
- maybe;
-will_succeed({_,_,_}, Test) when is_tuple(Test) ->
- maybe.
-
-will_succeed_1('=:=', A, '<', B) ->
- if
- B =< A -> no;
- true -> yes
- end;
-will_succeed_1('=:=', A, '=<', B) ->
- if
- B < A -> no;
- true -> yes
- end;
-will_succeed_1('=:=', A, '=:=', B) ->
- if
- A =:= B -> yes;
- true -> no
- end;
-will_succeed_1('=:=', A, '=/=', B) ->
- if
- A =:= B -> no;
- true -> yes
- end;
-will_succeed_1('=:=', A, '>=', B) ->
- if
- B > A -> no;
- true -> yes
- end;
-will_succeed_1('=:=', A, '>', B) ->
- if
- B >= A -> no;
- true -> yes
- end;
-
-will_succeed_1('=/=', A, '=/=', B) when A =:= B -> yes;
-will_succeed_1('=/=', A, '=:=', B) when A =:= B -> no;
-
-will_succeed_1('<', A, '=:=', B) when B >= A -> no;
-will_succeed_1('<', A, '=/=', B) when B >= A -> yes;
-will_succeed_1('<', A, '<', B) when B >= A -> yes;
-will_succeed_1('<', A, '=<', B) when B > A -> yes;
-will_succeed_1('<', A, '>=', B) when B > A -> no;
-will_succeed_1('<', A, '>', B) when B >= A -> no;
-
-will_succeed_1('=<', A, '=:=', B) when B > A -> no;
-will_succeed_1('=<', A, '=/=', B) when B > A -> yes;
-will_succeed_1('=<', A, '<', B) when B > A -> yes;
-will_succeed_1('=<', A, '=<', B) when B >= A -> yes;
-will_succeed_1('=<', A, '>=', B) when B > A -> no;
-will_succeed_1('=<', A, '>', B) when B >= A -> no;
-
-will_succeed_1('>=', A, '=:=', B) when B < A -> no;
-will_succeed_1('>=', A, '=/=', B) when B < A -> yes;
-will_succeed_1('>=', A, '<', B) when B =< A -> no;
-will_succeed_1('>=', A, '=<', B) when B < A -> no;
-will_succeed_1('>=', A, '>=', B) when B =< A -> yes;
-will_succeed_1('>=', A, '>', B) when B < A -> yes;
-
-will_succeed_1('>', A, '=:=', B) when B =< A -> no;
-will_succeed_1('>', A, '=/=', B) when B =< A -> yes;
-will_succeed_1('>', A, '<', B) when B =< A -> no;
-will_succeed_1('>', A, '=<', B) when B < A -> no;
-will_succeed_1('>', A, '>=', B) when B =< A -> yes;
-will_succeed_1('>', A, '>', B) when B < A -> yes;
-
-will_succeed_1(_, _, _, _) -> maybe.
-
-%% normalize_op(Instruction) -> {Normalized,FailLabel} | error
-%% Normalized = {Operator,Register,Literal} |
-%% {TypeTest,Register} |
-%% maybe
-%% Operation = '<' | '=<' | '=:=' | '=/=' | '>=' | '>'
-%% TypeTest = is_atom | is_integer ...
-%% Literal = {literal,Term}
-%%
-%% Normalize a relational operator to facilitate further
-%% comparisons between operators. Always make the register
-%% operand the first operand. Thus the following instruction:
-%%
-%% {test,is_ge,{f,99},{integer,13},{x,0}}
-%%
-%% will be normalized to:
-%%
-%% {'=<',{x,0},{literal,13}}
-%%
-%% NOTE: Bit syntax test instructions are scary. They may change the
-%% state of match contexts and update registers, so we don't dare
-%% mess with them.
-
-normalize_op({test,is_ge,{f,Fail},Ops}) ->
- normalize_op_1('>=', Ops, Fail);
-normalize_op({test,is_lt,{f,Fail},Ops}) ->
- normalize_op_1('<', Ops, Fail);
-normalize_op({test,is_eq_exact,{f,Fail},Ops}) ->
- normalize_op_1('=:=', Ops, Fail);
-normalize_op({test,is_ne_exact,{f,Fail},Ops}) ->
- normalize_op_1('=/=', Ops, Fail);
-normalize_op({test,Op,{f,Fail},[R]}) ->
- case erl_internal:new_type_test(Op, 1) of
- true -> {{Op,R},Fail};
- false -> {maybe,Fail}
- end;
-normalize_op({test,_,{f,Fail},_}=I) ->
- case beam_utils:is_pure_test(I) of
- true -> {maybe,Fail};
- false -> error
- end;
-normalize_op(_) ->
- error.
-
-normalize_op_1(Op, [Op1,Op2], Fail) ->
- case {get_literal(Op1),get_literal(Op2)} of
- {error,error} ->
- %% Both operands are registers.
- {maybe,Fail};
- {error,Lit} ->
- {{Op,Op1,Lit},Fail};
- {Lit,error} ->
- {{turn_op(Op),Op2,Lit},Fail};
- {_,_} ->
- %% Both operands are literals. Can probably only
- %% happen if the Core Erlang optimizations passes were
- %% turned off, so don't bother trying to do something
- %% smart here.
- {maybe,Fail}
- end.
-
-turn_op('<') -> '>';
-turn_op('>=') -> '=<';
-turn_op('=:='=Op) -> Op;
-turn_op('=/='=Op) -> Op.
-
-negate_op('>=') -> '<';
-negate_op('<') -> '>=';
-negate_op('=<') -> '>';
-negate_op('>') -> '=<';
-negate_op('=:=') -> '=/=';
-negate_op('=/=') -> '=:='.
-
-get_literal({atom,Val}) ->
- {literal,Val};
-get_literal({integer,Val}) ->
- {literal,Val};
-get_literal({float,Val}) ->
- {literal,Val};
-get_literal(nil) ->
- {literal,[]};
-get_literal({literal,_}=Lit) ->
- Lit;
-get_literal({_,_}) -> error.
-
-
-%%%
-%%% Removing stores to Y registers is not always safe
-%%% if there is an instruction that causes an exception
-%%% within a catch. In practice, there are few or no
-%%% opportunities for removing stores to Y registers anyway
-%%% if sys_core_fold has been run.
-%%%
-
-is_killed_at({x,_}=Reg, Lbl, D) ->
- beam_utils:is_killed_at(Reg, Lbl, D);
-is_killed_at({y,_}, _, _) ->
- false.
diff --git a/lib/compiler/src/beam_dict.erl b/lib/compiler/src/beam_dict.erl
index 990e86062a..b2056332e6 100644
--- a/lib/compiler/src/beam_dict.erl
+++ b/lib/compiler/src/beam_dict.erl
@@ -126,18 +126,17 @@ import(Mod0, Name0, Arity, #asm{imports=Imp0,next_import=NextIndex}=D0)
{NextIndex,D2#asm{imports=Imp,next_import=NextIndex+1}}
end.
-%% Returns the index for a string in the string table (adding the string to the
-%% table if necessary).
+%% Returns the index for a binary string in the string table (adding
+%% the string to the table if necessary).
%% string(String, Dict) -> {Offset, Dict'}
--spec string(string(), bdict()) -> {non_neg_integer(), bdict()}.
+-spec string(binary(), bdict()) -> {non_neg_integer(), bdict()}.
-string(Str, Dict) when is_list(Str) ->
+string(BinString, Dict) when is_binary(BinString) ->
#asm{strings=Strings,string_offset=NextOffset} = Dict,
- StrBin = list_to_binary(Str),
- case old_string(StrBin, Strings) of
+ case old_string(BinString, Strings) of
none ->
- NewDict = Dict#asm{strings = <<Strings/binary,StrBin/binary>>,
- string_offset=NextOffset+byte_size(StrBin)},
+ NewDict = Dict#asm{strings = <<Strings/binary,BinString/binary>>,
+ string_offset=NextOffset+byte_size(BinString)},
{NextOffset,NewDict};
Offset when is_integer(Offset) ->
{NextOffset-Offset,Dict}
diff --git a/lib/compiler/src/beam_disasm.erl b/lib/compiler/src/beam_disasm.erl
index 6cee9acae4..7d048716e4 100644
--- a/lib/compiler/src/beam_disasm.erl
+++ b/lib/compiler/src/beam_disasm.erl
@@ -373,6 +373,8 @@ disasm_instr(B, Bs, Atoms, Literals) ->
disasm_map_inst(get_map_elements, Arity, Bs, Atoms, Literals);
has_map_fields ->
disasm_map_inst(has_map_fields, Arity, Bs, Atoms, Literals);
+ put_tuple2 ->
+ disasm_put_tuple2(Bs, Atoms, Literals);
_ ->
try decode_n_args(Arity, Bs, Atoms, Literals) of
{Args, RestBs} ->
@@ -413,6 +415,14 @@ disasm_map_inst(Inst, Arity, Bs0, Atoms, Literals) ->
{List, RestBs} = decode_n_args(Len, Bs2, Atoms, Literals),
{{Inst, Args ++ [{Z,U,List}]}, RestBs}.
+disasm_put_tuple2(Bs, Atoms, Literals) ->
+ {X, Bs1} = decode_arg(Bs, Atoms, Literals),
+ {Z, Bs2} = decode_arg(Bs1, Atoms, Literals),
+ {U, Bs3} = decode_arg(Bs2, Atoms, Literals),
+ {u, Len} = U,
+ {List, RestBs} = decode_n_args(Len, Bs3, Atoms, Literals),
+ {{put_tuple2, [X,{Z,U,List}]}, RestBs}.
+
%%-----------------------------------------------------------------------
%% decode_arg([Byte]) -> {Arg, [Byte]}
%%
@@ -1095,6 +1105,23 @@ resolve_inst({get_hd,[Src,Dst]},_,_,_) ->
resolve_inst({get_tl,[Src,Dst]},_,_,_) ->
{get_tl,Src,Dst};
+%% OTP 22
+resolve_inst({bs_start_match3,[Fail,Bin,Live,Dst]},_,_,_) ->
+ {bs_start_match3,Fail,Bin,Live,Dst};
+resolve_inst({bs_get_tail,[Src,Dst,Live]},_,_,_) ->
+ {bs_get_tail,Src,Dst,Live};
+resolve_inst({bs_get_position,[Src,Dst,Live]},_,_,_) ->
+ {bs_get_position,Src,Dst,Live};
+resolve_inst({bs_set_position,[Src,Dst]},_,_,_) ->
+ {bs_set_position,Src,Dst};
+
+%%
+%% OTP 22.
+%%
+resolve_inst({put_tuple2,[Dst,{{z,1},{u,_},List0}]},_,_,_) ->
+ List = resolve_args(List0),
+ {put_tuple2,Dst,{list,List}};
+
%%
%% Catches instructions that are not yet handled.
%%
diff --git a/lib/compiler/src/beam_except.erl b/lib/compiler/src/beam_except.erl
index 366ec6cd44..28c89782c9 100644
--- a/lib/compiler/src/beam_except.erl
+++ b/lib/compiler/src/beam_except.erl
@@ -31,7 +31,7 @@
%%% erlang:error(function_clause, Args) => jump FuncInfoLabel
%%%
--import(lists, [reverse/1,seq/2]).
+-import(lists, [reverse/1,reverse/2,seq/2,splitwith/2]).
-spec module(beam_utils:module_code(), [compile:option()]) ->
{'ok',beam_utils:module_code()}.
@@ -53,7 +53,7 @@ function({function,Name,Arity,CLabel,Is0}) ->
-record(st,
{lbl :: beam_asm:label(), %func_info label
loc :: [_], %location for func_info
- arity :: arity() %arity for function
+ arity :: arity() %arity for function
}).
function_1(Is0) ->
@@ -74,27 +74,33 @@ translate([I|Is], St, Acc) ->
translate([], _, Acc) ->
reverse(Acc).
-translate_1(Ar, I, Is, St, [{line,_}=Line|Acc1]=Acc0) ->
- case dig_out(Ar, Acc1) of
+translate_1(Ar, I, Is, #st{arity=Arity}=St, [{line,_}=Line|Acc1]=Acc0) ->
+ case dig_out(Ar, Arity, Acc1) of
no ->
translate(Is, St, [I|Acc0]);
- {yes,{function_clause,Arity},Acc2} ->
- case {Line,St} of
- {{line,Loc},#st{lbl=Fi,loc=Loc,arity=Arity}} ->
+ {yes,function_clause,Acc2} ->
+ case {Is,Line,St} of
+ {[return|_],{line,Loc},#st{lbl=Fi,loc=Loc}} ->
Instr = {jump,{f,Fi}},
translate(Is, St, [Instr|Acc2]);
- {_,_} ->
- %% This must be "error(function_clause, Args)" in
- %% the Erlang source code or a fun. Don't translate.
+ {_,_,_} ->
+ %% Not a call_only instruction, or not the same
+ %% location information as in in the line instruction
+ %% before the func_info instruction. Not safe
+ %% to translate to a jump.
translate(Is, St, [I|Acc0])
end;
{yes,Instr,Acc2} ->
translate(Is, St, [Instr,Line|Acc2])
end.
-dig_out(Ar, [{kill,_}|Is]) ->
- dig_out(Ar, Is);
-dig_out(1, [{block,Bl0}|Is]) ->
+dig_out(1, _Arity, Is) ->
+ dig_out(Is);
+dig_out(2, Arity, Is) ->
+ dig_out_fc(Arity, Is);
+dig_out(_, _, _) -> no.
+
+dig_out([{block,Bl0}|Is]) ->
case dig_out_block(reverse(Bl0)) of
no -> no;
{yes,What,[]} ->
@@ -102,25 +108,13 @@ dig_out(1, [{block,Bl0}|Is]) ->
{yes,What,Bl} ->
{yes,What,[{block,Bl}|Is]}
end;
-dig_out(2, [{block,Bl}|Is]) ->
- case dig_out_block_fc(Bl) of
- no -> no;
- {yes,What} -> {yes,What,Is}
- end;
-dig_out(_, _) -> no.
+dig_out(_) -> no.
dig_out_block([{set,[{x,0}],[{atom,if_clause}],move}]) ->
{yes,if_end,[]};
dig_out_block([{set,[{x,0}],[{literal,{Exc,Value}}],move}|Is]) ->
translate_exception(Exc, {literal,Value}, Is, 0);
-dig_out_block([{set,[{x,0}],[Tuple],move},
- {set,[],[Value],put},
- {set,[],[{atom,Exc}],put},
- {set,[Tuple],[],{put_tuple,2}}|Is]) ->
- translate_exception(Exc, Value, Is, 3);
-dig_out_block([{set,[],[Value],put},
- {set,[],[{atom,Exc}],put},
- {set,[{x,0}],[],{put_tuple,2}}|Is]) ->
+dig_out_block([{set,[{x,0}],[{atom,Exc},Value],put_tuple2}|Is]) ->
translate_exception(Exc, Value, Is, 3);
dig_out_block(_) -> no.
@@ -148,36 +142,103 @@ fix_block_1([{set,[],[],{alloc,Live,{F1,F2,Needed0,F3}}}|Is], Words) ->
fix_block_1([I|Is], Words) ->
[I|fix_block_1(Is, Words)].
-dig_out_block_fc([{set,[],[],{alloc,Live,_}}|Bl]) ->
- Regs = maps:from_list([{{x,X},{arg,X}} || X <- seq(0, Live-1)]),
- dig_out_fc(Bl, Regs);
-dig_out_block_fc(_) -> no.
-dig_out_fc([{set,[Dst],[Hd,Tl],put_list}|Is], Regs0) ->
- Regs = Regs0#{Dst=>{cons,get_reg(Hd, Regs0),get_reg(Tl, Regs0)}},
- dig_out_fc(Is, Regs);
-dig_out_fc([{set,[Dst],[Src],move}|Is], Regs0) ->
- Regs = Regs0#{Dst=>get_reg(Src, Regs0)},
- dig_out_fc(Is, Regs);
-dig_out_fc([{set,_,_,_}|_], _Regs) ->
- %% Unknown instruction. It is not a function_clause error.
- no;
-dig_out_fc([], Regs) ->
+dig_out_fc(Arity, Is0) ->
+ Regs0 = maps:from_list([{{x,X},{arg,X}} || X <- seq(0, Arity-1)]),
+ {Is,Acc0} = splitwith(fun({label,_}) -> false;
+ ({test,_,_,_}) -> false;
+ (_) -> true
+ end, Is0),
+ {Regs,Acc} = dig_out_fc_1(reverse(Is), Regs0, Acc0),
case Regs of
#{{x,0}:={atom,function_clause},{x,1}:=Args} ->
- dig_out_fc_1(Args, 0);
+ case moves_from_stack(Args, 0, []) of
+ {Moves,Arity} ->
+ {yes,function_clause,reverse(Moves, Acc)};
+ {_,_} ->
+ no
+ end;
#{} ->
no
end.
-dig_out_fc_1({cons,{arg,I},T}, I) ->
- dig_out_fc_1(T, I+1);
-dig_out_fc_1(nil, I) ->
- {yes,{function_clause,I}};
-dig_out_fc_1(_, _) -> no.
+dig_out_fc_1([{block,Bl}|Is], Regs0, Acc) ->
+ Regs = dig_out_fc_block(Bl, Regs0),
+ dig_out_fc_1(Is, Regs, Acc);
+dig_out_fc_1([{bs_set_position,_,_}=I|Is], Regs, Acc) ->
+ dig_out_fc_1(Is, Regs, [I|Acc]);
+dig_out_fc_1([{bs_get_tail,Src,Dst,Live0}|Is], Regs0, Acc) ->
+ Regs = prune_xregs(Live0, Regs0),
+ Live = dig_out_stack_live(Regs, Live0),
+ I = {bs_get_tail,Src,Dst,Live},
+ dig_out_fc_1(Is, Regs, [I|Acc]);
+dig_out_fc_1([_|_], _Regs, _Acc) ->
+ {#{},[]};
+dig_out_fc_1([], Regs, Acc) ->
+ {Regs,Acc}.
+
+dig_out_fc_block([{set,[],[],{alloc,Live,_}}|Is], Regs0) ->
+ Regs = prune_xregs(Live, Regs0),
+ dig_out_fc_block(Is, Regs);
+dig_out_fc_block([{set,[Dst],[Hd,Tl],put_list}|Is], Regs0) ->
+ Regs = Regs0#{Dst=>{cons,get_reg(Hd, Regs0),get_reg(Tl, Regs0)}},
+ dig_out_fc_block(Is, Regs);
+dig_out_fc_block([{set,[Dst],[Src],move}|Is], Regs0) ->
+ Regs = Regs0#{Dst=>get_reg(Src, Regs0)},
+ dig_out_fc_block(Is, Regs);
+dig_out_fc_block([{set,_,_,_}|_], _Regs) ->
+ %% Unknown instruction. Fail.
+ #{};
+dig_out_fc_block([], Regs) -> Regs.
+
+dig_out_stack_live(Regs, Default) ->
+ Reg = {x,2},
+ case Regs of
+ #{Reg:=List} ->
+ dig_out_stack_live_1(List, Default);
+ #{} ->
+ Default
+ end.
+
+dig_out_stack_live_1({cons,{arg,N},T}, Live) ->
+ dig_out_stack_live_1(T, max(N + 1, Live));
+dig_out_stack_live_1({cons,_,T}, Live) ->
+ dig_out_stack_live_1(T, Live);
+dig_out_stack_live_1(nil, Live) ->
+ Live;
+dig_out_stack_live_1(_, Live) -> Live.
+
+prune_xregs(Live, Regs) ->
+ maps:filter(fun({x,X}, _) -> X < Live end, Regs).
+
+moves_from_stack({cons,{arg,N},_}, I, _Acc) when N =/= I ->
+ %% Wrong argument. Give up.
+ {[],-1};
+moves_from_stack({cons,H,T}, I, Acc) ->
+ case H of
+ {arg,I} ->
+ moves_from_stack(T, I+1, Acc);
+ _ ->
+ moves_from_stack(T, I+1, [{move,H,{x,I}}|Acc])
+ end;
+moves_from_stack(nil, I, Acc) ->
+ {reverse(Acc),I};
+moves_from_stack({literal,[H|T]}, I, Acc) ->
+ Cons = {cons,tag_literal(H),tag_literal(T)},
+ moves_from_stack(Cons, I, Acc);
+moves_from_stack(_, _, _) ->
+ %% Not understood. Give up.
+ {[],-1}.
+
get_reg(R, Regs) ->
case Regs of
#{R:=Val} -> Val;
#{} -> R
end.
+
+tag_literal([]) -> nil;
+tag_literal(T) when is_atom(T) -> {atom,T};
+tag_literal(T) when is_float(T) -> {float,T};
+tag_literal(T) when is_integer(T) -> {integer,T};
+tag_literal(T) -> {literal,T}.
diff --git a/lib/compiler/src/beam_flatten.erl b/lib/compiler/src/beam_flatten.erl
index 9a1c5a1021..3e6bc1b1ed 100644
--- a/lib/compiler/src/beam_flatten.erl
+++ b/lib/compiler/src/beam_flatten.erl
@@ -32,8 +32,7 @@ module({Mod,Exp,Attr,Fs,Lc}, _Opt) ->
{ok,{Mod,Exp,Attr,[function(F) || F <- Fs],Lc}}.
function({function,Name,Arity,CLabel,Is0}) ->
- Is1 = block(Is0),
- Is = opt(Is1),
+ Is = block(Is0),
{function,Name,Arity,CLabel,Is}.
block(Is) ->
@@ -43,21 +42,12 @@ block([{block,Is0}|Is1], Acc) -> block(Is1, norm_block(Is0, Acc));
block([I|Is], Acc) -> block(Is, [I|Acc]);
block([], Acc) -> reverse(Acc).
-norm_block([{set,[],[],{alloc,R,{_,nostack,_,_}=Alloc}}|Is], Acc0) ->
- case insert_alloc_in_bs_init(Acc0, Alloc) of
- impossible ->
- norm_block(Is, reverse(norm_allocate(Alloc, R), Acc0));
- Acc ->
- norm_block(Is, Acc)
- end;
norm_block([{set,[],[],{alloc,R,Alloc}}|Is], Acc0) ->
norm_block(Is, reverse(norm_allocate(Alloc, R), Acc0));
-norm_block([{set,[D1],[S],get_hd},{set,[D2],[S],get_tl}|Is], Acc) ->
- I = {get_list,S,D1,D2},
- norm_block(Is, [I|Acc]);
-norm_block([I|Is], Acc) -> norm_block(Is, [norm(I)|Acc]);
+norm_block([I|Is], Acc) ->
+ norm_block(Is, [norm(I)|Acc]);
norm_block([], Acc) -> Acc.
-
+
norm({set,[D],As,{bif,N,F}}) -> {bif,N,F,As,D};
norm({set,[D],As,{alloc,R,{gc_bif,N,F}}}) -> {gc_bif,N,F,R,As,D};
norm({set,[D],[],init}) -> {init,D};
@@ -65,6 +55,7 @@ norm({set,[D],[S],move}) -> {move,S,D};
norm({set,[D],[S],fmove}) -> {fmove,S,D};
norm({set,[D],[S],fconv}) -> {fconv,S,D};
norm({set,[D],[S1,S2],put_list}) -> {put_list,S1,S2,D};
+norm({set,[D],Els,put_tuple2}) -> {put_tuple2,D,{list,Els}};
norm({set,[D],[],{put_tuple,A}}) -> {put_tuple,A,D};
norm({set,[],[S],put}) -> {put,S};
norm({set,[D],[S],{get_tuple_element,I}}) -> {get_tuple_element,S,I,D};
@@ -90,57 +81,3 @@ norm_allocate({nozero,Ns,0,Inits}, Regs) ->
[{allocate,Ns,Regs}|Inits];
norm_allocate({nozero,Ns,Nh,Inits}, Regs) ->
[{allocate_heap,Ns,Nh,Regs}|Inits].
-
-%% insert_alloc_in_bs_init(ReverseInstructionStream, AllocationInfo) ->
-%% impossible | ReverseInstructionStream'
-%% A bs_init/6 instruction should not be followed by a test heap instruction.
-%% Given the AllocationInfo from a test heap instruction, merge the
-%% allocation amounts into the previous bs_init/6 instruction (if any).
-%%
-insert_alloc_in_bs_init([{bs_put,_,_,_}=I|Is], Alloc) ->
- %% The instruction sequence ends with an bs_put/4 instruction.
- %% We'll need to search backwards for the bs_init/6 instruction.
- insert_alloc_1(Is, Alloc, [I]);
-insert_alloc_in_bs_init(_, _) -> impossible.
-
-insert_alloc_1([{bs_init=Op,Fail,Info0,Live,Ss,Dst}|Is],
- {_,nostack,Ws2,[]}, Acc) when is_integer(Live) ->
- %% The number of extra heap words is always in the second position
- %% in the Info tuple.
- Ws1 = element(2, Info0),
- Al = beam_utils:combine_heap_needs(Ws1, Ws2),
- Info = setelement(2, Info0, Al),
- I = {Op,Fail,Info,Live,Ss,Dst},
- reverse(Acc, [I|Is]);
-insert_alloc_1([{bs_put,_,_,_}=I|Is], Alloc, Acc) ->
- insert_alloc_1(Is, Alloc, [I|Acc]).
-
-%% opt(Is0) -> Is
-%% Simple peep-hole optimization to move a {move,Any,{x,0}} past
-%% any kill up to the next call instruction. (To give the loader
-%% an opportunity to combine the 'move' and the 'call' instructions.)
-%%
-opt(Is) ->
- opt_1(Is, []).
-
-opt_1([{move,_,{x,0}}=I|Is0], Acc0) ->
- case move_past_kill(Is0, I, Acc0) of
- impossible -> opt_1(Is0, [I|Acc0]);
- {Is,Acc} -> opt_1(Is, Acc)
- end;
-opt_1([I|Is], Acc) ->
- opt_1(Is, [I|Acc]);
-opt_1([], Acc) -> reverse(Acc).
-
-move_past_kill([{kill,Src}|_], {move,Src,_}, _) ->
- impossible;
-move_past_kill([{kill,_}=I|Is], Move, Acc) ->
- move_past_kill(Is, Move, [I|Acc]);
-move_past_kill([{trim,N,_}=I|Is], {move,Src,Dst}=Move, Acc) ->
- case Src of
- {y,Y} when Y < N-> impossible;
- {y,Y} -> {Is,[{move,{y,Y-N},Dst},I|Acc]};
- _ -> {Is,[Move,I|Acc]}
- end;
-move_past_kill(Is, Move, Acc) ->
- {Is,[Move|Acc]}.
diff --git a/lib/compiler/src/beam_jump.erl b/lib/compiler/src/beam_jump.erl
index 42b4cdaf4f..74f80ca70e 100644
--- a/lib/compiler/src/beam_jump.erl
+++ b/lib/compiler/src/beam_jump.erl
@@ -22,7 +22,7 @@
-module(beam_jump).
-export([module/2,
- is_unreachable_after/1,is_exit_instruction/1,
+ is_exit_instruction/1,
remove_unused_labels/1]).
%%% The following optimisations are done:
@@ -101,6 +101,10 @@
%%% always keep the label. (beam_clean will remove any unused
%%% labels.)
%%%
+%%% (7) Replace a jump to a return instruction with a return instruction.
+%%% Similarly, replace a jump to deallocate + return with those
+%%% instructions.
+%%%
%%% Note: This modules depends on (almost) all branches and jumps only
%%% going forward, so that we can remove instructions (including definition
%%% of labels) after any label that has not been referenced by the code
@@ -128,27 +132,136 @@
%%% on the program state.
%%%
--import(lists, [reverse/1,reverse/2,foldl/3]).
+-import(lists, [foldl/3,mapfoldl/3,reverse/1,reverse/2]).
-type instruction() :: beam_utils:instruction().
-spec module(beam_utils:module_code(), [compile:option()]) ->
{'ok',beam_utils:module_code()}.
-module({Mod,Exp,Attr,Fs0,Lc}, _Opt) ->
- Fs = [function(F) || F <- Fs0],
+module({Mod,Exp,Attr,Fs0,Lc0}, _Opt) ->
+ {Fs,Lc} = mapfoldl(fun function/2, Lc0, Fs0),
{ok,{Mod,Exp,Attr,Fs,Lc}}.
%% function(Function) -> Function'
%% Optimize jumps and branches.
%%
%% NOTE: This function assumes that there are no labels inside blocks.
-function({function,Name,Arity,CLabel,Asm0}) ->
- Asm1 = share(Asm0),
- Asm2 = move(Asm1),
- Asm3 = opt(Asm2, CLabel),
- Asm = remove_unused_labels(Asm3),
- {function,Name,Arity,CLabel,Asm}.
+function({function,Name,Arity,CLabel,Asm0}, Lc0) ->
+ try
+ Asm1 = eliminate_moves(Asm0),
+ {Asm2,Lc} = insert_labels(Asm1, Lc0, []),
+ Asm3 = share(Asm2),
+ Asm4 = move(Asm3),
+ Asm5 = opt(Asm4, CLabel),
+ Asm6 = unshare(Asm5),
+ Asm = remove_unused_labels(Asm6),
+ {{function,Name,Arity,CLabel,Asm},Lc}
+ catch
+ Class:Error:Stack ->
+ io:fwrite("Function: ~w/~w\n", [Name,Arity]),
+ erlang:raise(Class, Error, Stack)
+ end.
+
+%%%
+%%% Scan instructions in execution order and remove redundant 'move'
+%%% instructions. 'move' instructions are redundant if we know that
+%%% the register already contains the value being assigned, as in the
+%%% following code:
+%%%
+%%% select_val Register FailLabel [... Literal => L1...]
+%%% .
+%%% .
+%%% .
+%%% L1: move Literal Register
+%%%
+
+eliminate_moves(Is) ->
+ eliminate_moves(Is, #{}, []).
+
+eliminate_moves([{select,select_val,Reg,{f,Fail},List}=I|Is], D0, Acc) ->
+ D1 = add_unsafe_label(Fail, D0),
+ D = update_value_dict(List, Reg, D1),
+ eliminate_moves(Is, D, [I|Acc]);
+eliminate_moves([{test,is_eq_exact,_,[Reg,Val]}=I,
+ {block,BlkIs0}|Is], D0, Acc) ->
+ D = update_unsafe_labels(I, D0),
+ RegVal = {Reg,Val},
+ BlkIs = eliminate_moves_blk(BlkIs0, RegVal),
+ eliminate_moves([{block,BlkIs}|Is], D, [I|Acc]);
+eliminate_moves([{label,Lbl},{block,BlkIs0}=Blk|Is], D, Acc0) ->
+ Acc = [{label,Lbl}|Acc0],
+ case {no_fallthrough(Acc0),D} of
+ {true,#{Lbl:={_,_}=RegVal}} ->
+ BlkIs = eliminate_moves_blk(BlkIs0, RegVal),
+ eliminate_moves([{block,BlkIs}|Is], D, Acc);
+ {_,_} ->
+ eliminate_moves([Blk|Is], D, Acc)
+ end;
+eliminate_moves([{block,[]}|Is], D, Acc) ->
+ %% Empty blocks can prevent further jump optimizations.
+ eliminate_moves(Is, D, Acc);
+eliminate_moves([I|Is], D0, Acc) ->
+ D = update_unsafe_labels(I, D0),
+ eliminate_moves(Is, D, [I|Acc]);
+eliminate_moves([], _, Acc) -> reverse(Acc).
+
+eliminate_moves_blk([{set,[Dst],[_],move}|_]=Is, {_,Dst}) ->
+ Is;
+eliminate_moves_blk([{set,[Dst],[Lit],move}|Is], {Dst,Lit}) ->
+ %% Remove redundant 'move' instruction.
+ Is;
+eliminate_moves_blk([{set,[Dst],[_],move}|_]=Is, {Dst,_}) ->
+ Is;
+eliminate_moves_blk([{set,[_],[_],move}=I|Is], {_,_}=RegVal) ->
+ [I|eliminate_moves_blk(Is, RegVal)];
+eliminate_moves_blk(Is, _) -> Is.
+
+no_fallthrough([I|_]) ->
+ is_unreachable_after(I).
+
+update_value_dict([Lit,{f,Lbl}|T], Reg, D0) ->
+ D = case D0 of
+ #{Lbl:=unsafe} -> D0;
+ #{Lbl:={Reg,Lit}} -> D0;
+ #{Lbl:=_} -> D0#{Lbl:=unsafe};
+ #{} -> D0#{Lbl=>{Reg,Lit}}
+ end,
+ update_value_dict(T, Reg, D);
+update_value_dict([], _, D) -> D.
+
+add_unsafe_label(L, D) ->
+ D#{L=>unsafe}.
+
+update_unsafe_labels(I, D) ->
+ Ls = instr_labels(I),
+ update_unsafe_labels_1(Ls, D).
+
+update_unsafe_labels_1([L|Ls], D) ->
+ update_unsafe_labels_1(Ls, D#{L=>unsafe});
+update_unsafe_labels_1([], D) -> D.
+
+%%%
+%%% It seems to be useful to insert extra labels after certain
+%%% test instructions. This used to be done by beam_dead.
+%%%
+
+insert_labels([{test,Op,_,_}=I|Is], Lc, Acc) ->
+ Useful = case Op of
+ is_lt -> true;
+ is_ge -> true;
+ is_eq_exact -> true;
+ is_ne_exact -> true;
+ _ -> false
+ end,
+ case Useful of
+ false -> insert_labels(Is, Lc, [I|Acc]);
+ true -> insert_labels(Is, Lc+1, [{label,Lc},I|Acc])
+ end;
+insert_labels([I|Is], Lc, Acc) ->
+ insert_labels(Is, Lc, [I|Acc]);
+insert_labels([], Lc, Acc) ->
+ {reverse(Acc),Lc}.
%%%
%%% (1) We try to share the code for identical code segments by replacing all
@@ -271,8 +384,6 @@ extract_seq([{line,_}=Line|Is], Acc) ->
extract_seq(Is, [Line|Acc]);
extract_seq([{block,_}=Bl|Is], Acc) ->
extract_seq_1(Is, [Bl|Acc]);
-extract_seq([{bs_context_to_binary,_}=I|Is], Acc) ->
- extract_seq_1(Is, [I|Acc]);
extract_seq([{label,_}|_]=Is, Acc) ->
extract_seq_1(Is, Acc);
extract_seq(_, _) -> no.
@@ -296,14 +407,13 @@ extract_seq_1(_, _) -> no.
{
entry :: beam_asm:label(), %Entry label (must not be moved).
replace :: #{beam_asm:label() := beam_asm:label()}, %Labels to replace.
- labels :: cerl_sets:set(), %Set of referenced labels.
- index :: beam_utils:code_index() | {lazy,[beam_utils:instruction()]} %Index built lazily only if needed
+ labels :: cerl_sets:set() %Set of referenced labels.
}).
opt(Is0, CLabel) ->
find_fixpoint(fun(Is) ->
Lbls = initial_labels(Is),
- St = #st{entry=CLabel,replace=#{},labels=Lbls,index={lazy,Is}},
+ St = #st{entry=CLabel,replace=#{},labels=Lbls},
opt(Is, [], St)
end, Is0).
@@ -313,7 +423,7 @@ find_fixpoint(OptFun, Is0) ->
Is -> find_fixpoint(OptFun, Is)
end.
-opt([{test,_,{f,L}=Lbl,_}=I|[{jump,{f,L}}|_]=Is], Acc0, St0) ->
+opt([{test,_,{f,L}=Lbl,_}=I|[{jump,{f,L}}|_]=Is], Acc, St) ->
%% We have
%% Test Label Ops
%% jump Label
@@ -322,23 +432,10 @@ opt([{test,_,{f,L}=Lbl,_}=I|[{jump,{f,L}}|_]=Is], Acc0, St0) ->
case beam_utils:is_pure_test(I) of
false ->
%% Test is not pure; we must keep it.
- opt(Is, [I|Acc0], label_used(Lbl, St0));
+ opt(Is, [I|Acc], label_used(Lbl, St));
true ->
%% The test is pure and its failure label is the same
%% as in the jump that follows -- thus it is not needed.
- %% Check if any of the previous instructions could also be eliminated.
- {Acc,St} = opt_useless_loads(Acc0, L, St0),
- opt(Is, Acc, St)
- end;
-opt([{test,_,{f,L}=Lbl,_}=I|[{label,L}|_]=Is], Acc0, St0) ->
- %% Similar to the above, except we have a fall-through rather than jump
- %% Test Label Ops
- %% label Label
- case beam_utils:is_pure_test(I) of
- false ->
- opt(Is, [I|Acc0], label_used(Lbl, St0));
- true ->
- {Acc,St} = opt_useless_loads(Acc0, L, St0),
opt(Is, Acc, St)
end;
opt([{test,Test0,{f,L}=Lbl,Ops}=I|[{jump,To}|Is]=Is0], Acc, St) ->
@@ -405,46 +502,6 @@ normalize_replace([{From,To0}|Rest], Replace, Acc) ->
normalize_replace([], _Replace, Acc) ->
maps:from_list(Acc).
-%% After eliminating a test, it might happen, that a register was only used
-%% in this test. Let's check if that was the case and if it was so, we can
-%% eliminate the load into the register completely.
-opt_useless_loads([{block,_}|_]=Is, L, #st{index={lazy,FIs}}=St) ->
- opt_useless_loads(Is, L, St#st{index=beam_utils:index_labels(FIs)});
-opt_useless_loads([{block,Block0}|Is], L, #st{index=Index}=St) ->
- case opt_useless_block_loads(Block0, L, Index) of
- [] ->
- opt_useless_loads(Is, L, St);
- [_|_]=Block ->
- {[{block,Block}|Is],St}
- end;
-%% After eliminating the test and useless blocks, it might happen,
-%% that the previous test could also be eliminated.
-%% It might be that the label was already marked as used, even if ultimately,
-%% it never will be - we can't do much about it at that point, though
-opt_useless_loads([{test,_,{f,L},_}=I|Is], L, St) ->
- case beam_utils:is_pure_test(I) of
- false ->
- {[I|Is],St};
- true ->
- opt_useless_loads(Is, L, St)
- end;
-opt_useless_loads(Is, _L, St) ->
- {Is,St}.
-
-opt_useless_block_loads([{set,[Dst],_,_}=I|Is], L, Index) ->
- BlockJump = [{block,Is},{jump,{f,L}}],
- case beam_utils:is_killed(Dst, BlockJump, Index) of
- true ->
- %% The register is killed and not used, we can remove the load
- opt_useless_block_loads(Is, L, Index);
- false ->
- [I|opt_useless_block_loads(Is, L, Index)]
- end;
-opt_useless_block_loads([I|Is], L, Index) ->
- [I|opt_useless_block_loads(Is, L, Index)];
-opt_useless_block_loads([], _L, _Index) ->
- [].
-
collect_labels(Is, Label, #st{entry=Entry,replace=Replace} = St) ->
collect_labels_1(Is, Label, Entry, Replace, St).
@@ -571,58 +628,109 @@ drop_upto_label([{label,_}|_]=Is) -> Is;
drop_upto_label([_|Is]) -> drop_upto_label(Is);
drop_upto_label([]) -> [].
-%% ulbl(Instruction, UsedGbSet) -> UsedGbSet'
-%% Update the gb_set UsedGbSet with any function-local labels
+%% unshare([Instruction]) -> [Instruction].
+%% Replace a jump to a return sequence (a `return` instruction
+%% optionally preced by a `deallocate` instruction) with the return
+%% sequence. This always saves execution time and may also save code
+%% space (depending on the architecture). Eliminating `jump`
+%% instructions also gives beam_trim more opportunities to trim the
+%% stack.
+
+unshare(Is) ->
+ Short = unshare_collect_short(Is, #{}),
+ unshare_short(Is, Short).
+
+unshare_collect_short([{label,L},return|Is], Map) ->
+ unshare_collect_short(Is, Map#{L=>[return]});
+unshare_collect_short([{label,L},{deallocate,_}=D,return|Is], Map) ->
+ %% `deallocate` and `return` are combined into one instruction by
+ %% the loader.
+ unshare_collect_short(Is, Map#{L=>[D,return]});
+unshare_collect_short([_|Is], Map) ->
+ unshare_collect_short(Is, Map);
+unshare_collect_short([], Map) -> Map.
+
+unshare_short([{jump,{f,F}}=I|Is], Map) ->
+ case Map of
+ #{F:=Seq} ->
+ Seq ++ unshare_short(Is, Map);
+ #{} ->
+ [I|unshare_short(Is, Map)]
+ end;
+unshare_short([I|Is], Map) ->
+ [I|unshare_short(Is, Map)];
+unshare_short([], _Map) -> [].
+
+%% ulbl(Instruction, UsedCerlSet) -> UsedCerlSet'
+%% Update the cerl_set UsedCerlSet with any function-local labels
%% (i.e. not with labels in call instructions) referenced by
%% the instruction Instruction.
%%
%% NOTE: This function does NOT look for labels inside blocks.
-ulbl({test,_,Fail,_}, Used) ->
- mark_used(Fail, Used);
-ulbl({test,_,Fail,_,_,_}, Used) ->
- mark_used(Fail, Used);
-ulbl({select,_,_,Fail,Vls}, Used) ->
- mark_used_list(Vls, mark_used(Fail, Used));
-ulbl({'try',_,Lbl}, Used) ->
- mark_used(Lbl, Used);
-ulbl({'catch',_,Lbl}, Used) ->
- mark_used(Lbl, Used);
-ulbl({jump,Lbl}, Used) ->
- mark_used(Lbl, Used);
-ulbl({loop_rec,Lbl,_}, Used) ->
- mark_used(Lbl, Used);
-ulbl({loop_rec_end,Lbl}, Used) ->
- mark_used(Lbl, Used);
-ulbl({wait,Lbl}, Used) ->
- mark_used(Lbl, Used);
-ulbl({wait_timeout,Lbl,_To}, Used) ->
- mark_used(Lbl, Used);
-ulbl({bif,_Name,Lbl,_As,_R}, Used) ->
- mark_used(Lbl, Used);
-ulbl({gc_bif,_Name,Lbl,_Live,_As,_R}, Used) ->
- mark_used(Lbl, Used);
-ulbl({bs_init,Lbl,_,_,_,_}, Used) ->
- mark_used(Lbl, Used);
-ulbl({bs_put,Lbl,_,_}, Used) ->
- mark_used(Lbl, Used);
-ulbl({put_map,Lbl,_Op,_Src,_Dst,_Live,_List}, Used) ->
- mark_used(Lbl, Used);
-ulbl({get_map_elements,Lbl,_Src,_List}, Used) ->
- mark_used(Lbl, Used);
-ulbl({recv_mark,Lbl}, Used) ->
- mark_used(Lbl, Used);
-ulbl({recv_set,Lbl}, Used) ->
- mark_used(Lbl, Used);
-ulbl({fcheckerror,Lbl}, Used) ->
- mark_used(Lbl, Used);
-ulbl(_, Used) -> Used.
-
-mark_used({f,0}, Used) -> Used;
-mark_used({f,L}, Used) -> cerl_sets:add_element(L, Used).
-
-mark_used_list([{f,L}|T], Used) ->
- mark_used_list(T, cerl_sets:add_element(L, Used));
-mark_used_list([_|T], Used) ->
- mark_used_list(T, Used);
-mark_used_list([], Used) -> Used.
+ulbl(I, Used) ->
+ case instr_labels(I) of
+ [] ->
+ Used;
+ [Lbl] ->
+ cerl_sets:add_element(Lbl, Used);
+ [_|_]=L ->
+ ulbl_list(L, Used)
+ end.
+
+ulbl_list([L|Ls], Used) ->
+ ulbl_list(Ls, cerl_sets:add_element(L, Used));
+ulbl_list([], Used) -> Used.
+
+-spec instr_labels(Instruction) -> Labels when
+ Instruction :: instruction(),
+ Labels :: [beam_asm:label()].
+
+instr_labels({test,_,Fail,_}) ->
+ do_instr_labels(Fail);
+instr_labels({test,_,Fail,_,_,_}) ->
+ do_instr_labels(Fail);
+instr_labels({select,_,_,Fail,Vls}) ->
+ do_instr_labels_list(Vls, do_instr_labels(Fail));
+instr_labels({'try',_,Lbl}) ->
+ do_instr_labels(Lbl);
+instr_labels({'catch',_,Lbl}) ->
+ do_instr_labels(Lbl);
+instr_labels({jump,Lbl}) ->
+ do_instr_labels(Lbl);
+instr_labels({loop_rec,Lbl,_}) ->
+ do_instr_labels(Lbl);
+instr_labels({loop_rec_end,Lbl}) ->
+ do_instr_labels(Lbl);
+instr_labels({wait,Lbl}) ->
+ do_instr_labels(Lbl);
+instr_labels({wait_timeout,Lbl,_To}) ->
+ do_instr_labels(Lbl);
+instr_labels({bif,_Name,Lbl,_As,_R}) ->
+ do_instr_labels(Lbl);
+instr_labels({gc_bif,_Name,Lbl,_Live,_As,_R}) ->
+ do_instr_labels(Lbl);
+instr_labels({bs_init,Lbl,_,_,_,_}) ->
+ do_instr_labels(Lbl);
+instr_labels({bs_put,Lbl,_,_}) ->
+ do_instr_labels(Lbl);
+instr_labels({put_map,Lbl,_Op,_Src,_Dst,_Live,_List}) ->
+ do_instr_labels(Lbl);
+instr_labels({get_map_elements,Lbl,_Src,_List}) ->
+ do_instr_labels(Lbl);
+instr_labels({recv_mark,Lbl}) ->
+ do_instr_labels(Lbl);
+instr_labels({recv_set,Lbl}) ->
+ do_instr_labels(Lbl);
+instr_labels({fcheckerror,Lbl}) ->
+ do_instr_labels(Lbl);
+instr_labels(_) -> [].
+
+do_instr_labels({f,0}) -> [];
+do_instr_labels({f,F}) -> [F].
+
+do_instr_labels_list([{f,F}|T], Acc) ->
+ do_instr_labels_list(T, [F|Acc]);
+do_instr_labels_list([_|T], Acc) ->
+ do_instr_labels_list(T, Acc);
+do_instr_labels_list([], Acc) -> Acc.
diff --git a/lib/compiler/src/beam_kernel_to_ssa.erl b/lib/compiler/src/beam_kernel_to_ssa.erl
index c55a57ab32..df95749fb3 100644
--- a/lib/compiler/src/beam_kernel_to_ssa.erl
+++ b/lib/compiler/src/beam_kernel_to_ssa.erl
@@ -276,12 +276,11 @@ select_nil(#k_val_clause{val=#k_nil{},body=B}, V, Tf, Vf, St0) ->
{Is ++ Bis,St}.
select_binary(#k_val_clause{val=#k_binary{segs=#k_var{name=Ctx0}},body=B},
- #k_var{anno=Anno0}=Src, Tf, Vf, St0) ->
- Anno = #{reuse_for_context=>member(reuse_for_context, Anno0)},
+ #k_var{}=Src, Tf, Vf, St0) ->
{Ctx,St1} = new_ssa_var(Ctx0, St0),
{Bis0,St2} = match_cg(B, Vf, St1),
{TestIs,St} = make_cond_branch(succeeded, [Ctx], Tf, St2),
- Bis1 = [#b_set{anno=Anno,op=bs_start_match,dst=Ctx,
+ Bis1 = [#b_set{op=bs_start_match,dst=Ctx,
args=[ssa_arg(Src, St)]}] ++ TestIs ++ Bis0,
Bis = finish_bs_matching(Bis1),
{Bis,St}.
@@ -328,7 +327,7 @@ select_bin_seg(#k_val_clause{val=#k_bin_seg{size=Size,unit=U,type=T,
{Mis,St1} = select_extract_bin(Next, Size, U, T, Fs, Fail,
Ctx, LineAnno, St0),
{Extracted,St2} = new_ssa_var(Seg#k_var.name, St1),
- {Bis,St} = bin_match_cg(Size, B, Fail, St2),
+ {Bis,St} = match_cg(B, Fail, St2),
BsGet = #b_set{op=bs_extract,dst=Extracted,args=[ssa_arg(Next, St)]},
Is = Mis ++ [BsGet] ++ Bis,
{Is,St};
@@ -363,14 +362,6 @@ select_bin_seg(#k_val_clause{val=#k_bin_int{size=Sz,unit=U,flags=Fs,
end,
{Is,St}.
-bin_match_cg(#k_atom{val=all}, B0, Fail, St) ->
- #k_select{types=Types} = B0,
- [#k_type_clause{type=k_bin_end,values=Values}] = Types,
- [#k_val_clause{val=#k_bin_end{},body=B}] = Values,
- match_cg(B, Fail, St);
-bin_match_cg(_, B, Fail, St) ->
- match_cg(B, Fail, St).
-
get_context(#k_var{}=Var, St) ->
ssa_arg(Var, St).
@@ -708,15 +699,6 @@ bif_cg(#k_bif{op=#k_remote{mod=#k_atom{val=erlang},name=#k_atom{val=Name}},
%% internal_cg(Bif, [Arg], [Ret], Le, State) ->
%% {[Ainstr],State}.
-internal_cg(bs_context_to_binary, [Src0], [], _Le, St) ->
- Src = ssa_arg(Src0, St),
- Set = #b_set{op=context_to_binary,args=[Src]},
- {[Set],St};
-internal_cg(dsetelement, [Index0,Tuple0,New0], _Rs, _Le, St) ->
- [New,Tuple,#b_literal{val=Index1}] = ssa_args([New0,Tuple0,Index0], St),
- Index = #b_literal{val=Index1-1},
- Set = #b_set{op=set_tuple_element,args=[New,Tuple,Index]},
- {[Set],St};
internal_cg(make_fun, [Name0,Arity0|As], Rs, _Le, St0) ->
#k_atom{val=Name} = Name0,
#k_int{val=Arity} = Arity0,
diff --git a/lib/compiler/src/beam_listing.erl b/lib/compiler/src/beam_listing.erl
index 8a0ce5b50a..6121593b11 100644
--- a/lib/compiler/src/beam_listing.erl
+++ b/lib/compiler/src/beam_listing.erl
@@ -66,7 +66,7 @@ module(Stream, [_|_]=Fs) ->
foreach(fun (F) -> io:format(Stream, "~p.\n", [F]) end, Fs).
format_asm([{label,L}|Is]) ->
- [" {label,",integer_to_list(L),"}.\n"|format_asm(Is)];
+ [io_lib:format(" {label,~p}.\n", [L])|format_asm(Is)];
format_asm([I|Is]) ->
[io_lib:format(" ~p", [I]),".\n"|format_asm(Is)];
format_asm([]) -> [].
diff --git a/lib/compiler/src/beam_peep.erl b/lib/compiler/src/beam_peep.erl
index 74da6aa704..5730e9704e 100644
--- a/lib/compiler/src/beam_peep.erl
+++ b/lib/compiler/src/beam_peep.erl
@@ -94,26 +94,26 @@ peep([{gc_bif,_,_,_,_,Dst}=I|Is], SeenTests0, Acc) ->
peep([{jump,{f,L}},{label,L}=I|Is], _, Acc) ->
%% Sometimes beam_jump has missed this optimization.
peep(Is, gb_sets:empty(), [I|Acc]);
-peep([{select,Op,R,F,Vls0}|Is], SeenTests0, Acc0) ->
+peep([{select,select_val,R,F,Vls0}|Is], SeenTests0, Acc0) ->
case prune_redundant_values(Vls0, F) of
[] ->
%% No values left. Must convert to plain jump.
I = {jump,F},
peep([I|Is], gb_sets:empty(), Acc0);
- [{atom,_}=Value,Lbl] when Op =:= select_val ->
+ [{atom,_}=Value,Lbl] ->
%% Single value left. Convert to regular test.
Is1 = [{test,is_eq_exact,F,[R,Value]},{jump,Lbl}|Is],
peep(Is1, SeenTests0, Acc0);
- [{integer,_}=Value,Lbl] when Op =:= select_val ->
+ [{integer,_}=Value,Lbl] ->
%% Single value left. Convert to regular test.
Is1 = [{test,is_eq_exact,F,[R,Value]},{jump,Lbl}|Is],
peep(Is1, SeenTests0, Acc0);
- [Arity,Lbl] when Op =:= select_tuple_arity ->
- %% Single value left. Convert to regular test
- Is1 = [{test,test_arity,F,[R,Arity]},{jump,Lbl}|Is],
+ [{atom,B1},Lbl,{atom,B2},Lbl] when B1 =:= not B2 ->
+ %% Replace with is_boolean test.
+ Is1 = [{test,is_boolean,F,[R]},{jump,Lbl}|Is],
peep(Is1, SeenTests0, Acc0);
[_|_]=Vls ->
- I = {select,Op,R,F,Vls},
+ I = {select,select_val,R,F,Vls},
peep(Is, gb_sets:empty(), [I|Acc0])
end;
peep([{get_map_elements,Fail,Src,List}=I|Is], _SeenTests, Acc0) ->
diff --git a/lib/compiler/src/beam_split.erl b/lib/compiler/src/beam_split.erl
deleted file mode 100644
index 7b18946ae0..0000000000
--- a/lib/compiler/src/beam_split.erl
+++ /dev/null
@@ -1,90 +0,0 @@
-%%
-%% %CopyrightBegin%
-%%
-%% Copyright Ericsson AB 2011-2018. All Rights Reserved.
-%%
-%% Licensed under the Apache License, Version 2.0 (the "License");
-%% you may not use this file except in compliance with the License.
-%% You may obtain a copy of the License at
-%%
-%% http://www.apache.org/licenses/LICENSE-2.0
-%%
-%% Unless required by applicable law or agreed to in writing, software
-%% distributed under the License is distributed on an "AS IS" BASIS,
-%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-%% See the License for the specific language governing permissions and
-%% limitations under the License.
-%%
-%% %CopyrightEnd%
-%%
-
--module(beam_split).
--export([module/2]).
-
--import(lists, [reverse/1]).
-
--spec module(beam_utils:module_code(), [compile:option()]) ->
- {'ok',beam_utils:module_code()}.
-
-module({Mod,Exp,Attr,Fs0,Lc}, _Opts) ->
- Fs = [split_blocks(F) || F <- Fs0],
- {ok,{Mod,Exp,Attr,Fs,Lc}}.
-
-%% We must split the basic block when we encounter instructions with labels,
-%% such as catches and BIFs. All labels must be visible outside the blocks.
-
-split_blocks({function,Name,Arity,CLabel,Is0}) ->
- Is = split_blocks(Is0, []),
- {function,Name,Arity,CLabel,Is}.
-
-split_blocks([{block,Bl}|Is], Acc0) ->
- Acc = split_block(Bl, [], Acc0),
- split_blocks(Is, Acc);
-split_blocks([I|Is], Acc) ->
- split_blocks(Is, [I|Acc]);
-split_blocks([], Acc) -> reverse(Acc).
-
-split_block([{set,[R],As,{bif,N,{f,Lbl}=Fail}}|Is], Bl, Acc) when Lbl =/= 0 ->
- split_block(Is, [], [{bif,N,Fail,As,R}|make_block(Bl, Acc)]);
-split_block([{set,[],[],{line,_}=Line},
- {set,[R],As,{bif,raise,{f,_}=Fail}}|Is], Bl, Acc) ->
- split_block(Is, [], [{bif,raise,Fail,As,R},Line|make_block(Bl, Acc)]);
-split_block([{set,[R],As,{alloc,Live,{gc_bif,N,{f,Lbl}=Fail}}}|Is], Bl, Acc)
- when Lbl =/= 0 ->
- split_block(Is, [], [{gc_bif,N,Fail,Live,As,R}|make_block(Bl, Acc)]);
-split_block([{set,[D],[S|Puts],{alloc,R,{put_map,Op,{f,Lbl}=Fail}}}|Is],
- Bl, Acc) when Lbl =/= 0 ->
- split_block(Is, [], [{put_map,Fail,Op,S,D,R,{list,Puts}}|
- make_block(Bl, Acc)]);
-split_block([{set,[R],[],{try_catch,Op,L}}|Is], Bl, Acc) ->
- split_block(Is, [], [{Op,R,L}|make_block(Bl, Acc)]);
-split_block([I|Is], Bl, Acc) ->
- split_block(Is, [I|Bl], Acc);
-split_block([], Bl, Acc) -> make_block(Bl, Acc).
-
-make_block([], Acc) -> Acc;
-make_block([{set,[D],Ss,{bif,Op,Fail}}|Bl]=Bl0, Acc) ->
- %% If the last instruction in the block is a comparison or boolean operator
- %% (such as '=:='), move it out of the block to facilitate further
- %% optimizations.
- Arity = length(Ss),
- case erl_internal:comp_op(Op, Arity) orelse
- erl_internal:new_type_test(Op, Arity) orelse
- erl_internal:bool_op(Op, Arity) of
- false ->
- [{block,reverse(Bl0)}|Acc];
- true ->
- I = {bif,Op,Fail,Ss,D},
- case Bl =:= [] of
- true -> [I|Acc];
- false -> [I,{block,reverse(Bl)}|Acc]
- end
- end;
-make_block([{set,[Dst],[Src],move}|Bl], Acc) ->
- %% Make optimization of {move,Src,Dst}, {jump,...} possible.
- I = {move,Src,Dst},
- case Bl =:= [] of
- true -> [I|Acc];
- false -> [I,{block,reverse(Bl)}|Acc]
- end;
-make_block(Bl, Acc) -> [{block,reverse(Bl)}|Acc].
diff --git a/lib/compiler/src/beam_ssa.erl b/lib/compiler/src/beam_ssa.erl
index a2766a0501..a9977b0b1d 100644
--- a/lib/compiler/src/beam_ssa.erl
+++ b/lib/compiler/src/beam_ssa.erl
@@ -20,26 +20,35 @@
%% Purpose: Type definitions and utilities for the SSA format.
-module(beam_ssa).
--export([add_anno/3,get_anno/2,
- clobbers_xregs/1,def/2,def_used/2,dominators/1,
+-export([add_anno/3,get_anno/2,get_anno/3,
+ clobbers_xregs/1,def/2,def_used/2,
+ definitions/1,
+ dominators/1,common_dominators/3,
flatmapfold_instrs_rpo/4,
fold_po/3,fold_po/4,fold_rpo/3,fold_rpo/4,
fold_instrs_rpo/4,
linearize/1,
+ mapfold_blocks_rpo/4,
mapfold_instrs_rpo/4,
+ normalize/1,
+ no_side_effect/1,
predecessors/1,
rename_vars/3,
rpo/1,rpo/2,
split_blocks/3,
successors/1,successors/2,
- update_phi_labels/4,used/1]).
+ trim_unreachable/1,
+ update_phi_labels/4,used/1,
+ uses/1,uses/2]).
-export_type([b_module/0,b_function/0,b_blk/0,b_set/0,
b_ret/0,b_br/0,b_switch/0,terminator/0,
b_var/0,b_literal/0,b_remote/0,b_local/0,
value/0,argument/0,label/0,
var_name/0,var_base/0,literal_value/0,
- op/0,anno/0,block_map/0]).
+ op/0,anno/0,block_map/0,dominator_map/0,
+ rename_map/0,rename_proplist/0,usage_map/0,
+ definition_map/0]).
-include("beam_ssa.hrl").
@@ -53,6 +62,9 @@
-type b_switch() :: #b_switch{}.
-type terminator() :: b_br() | b_ret() | b_switch().
+-type construct() :: b_module() | b_function() | b_blk() | b_set() |
+ terminator().
+
-type b_var() :: #b_var{}.
-type b_literal() :: #b_literal{}.
-type b_remote() :: #b_remote{}.
@@ -73,6 +85,12 @@
-type anno() :: #{atom() := any()}.
-type block_map() :: #{label():=b_blk()}.
+-type dominator_map() :: #{label():=[label()]}.
+-type numbering_map() :: #{label():=non_neg_integer()}.
+-type usage_map() :: #{b_var():=[{label(),b_set() | terminator()}]}.
+-type definition_map() :: #{b_var():=b_set()}.
+-type rename_map() :: #{b_var():=value()}.
+-type rename_proplist() :: [{b_var(),value()}].
%% Note: By default, dialyzer will collapse this type to atom().
%% To avoid the collapsing, change the value of SET_LIMIT to 50 in the
@@ -81,7 +99,7 @@
-type prim_op() :: 'bs_add' | 'bs_extract' | 'bs_init' | 'bs_init_writable' |
'bs_match' | 'bs_put' | 'bs_start_match' | 'bs_test_tail' |
'bs_utf16_size' | 'bs_utf8_size' | 'build_stacktrace' |
- 'call' | 'catch_end' | 'context_to_binary' |
+ 'call' | 'catch_end' |
'extract' |
'get_hd' | 'get_map_element' | 'get_tl' | 'get_tuple_element' |
'has_map_field' |
@@ -91,7 +109,7 @@
'make_fun' | 'new_try_tag' |
'peek_message' | 'phi' | 'put_list' | 'put_map' | 'put_tuple' |
'raw_raise' | 'recv_next' | 'remove_message' | 'resume' |
- 'set_tuple_element' | 'succeeded' |
+ 'succeeded' |
'timeout' |
'wait' | 'wait_timeout'.
@@ -100,14 +118,15 @@
%% Primops only used internally during code generation.
-type cg_prim_op() :: 'bs_get' | 'bs_match_string' | 'bs_restore' | 'bs_skip' |
-'copy' | 'put_tuple_arity' | 'put_tuple_element'.
+ 'copy' | 'put_tuple_arity' | 'put_tuple_element' |
+ 'set_tuple_element'.
--import(lists, [foldl/3,mapfoldl/3,reverse/1]).
+-import(lists, [foldl/3,keyfind/3,mapfoldl/3,member/2,reverse/1]).
-spec add_anno(Key, Value, Construct) -> Construct when
Key :: atom(),
Value :: any(),
- Construct :: b_function() | b_blk() | b_set() | terminator().
+ Construct :: construct().
add_anno(Key, Val, #b_function{anno=Anno}=Bl) ->
Bl#b_function{anno=Anno#{Key=>Val}};
@@ -122,11 +141,17 @@ add_anno(Key, Val, #b_ret{anno=Anno}=Bl) ->
add_anno(Key, Val, #b_switch{anno=Anno}=Bl) ->
Bl#b_switch{anno=Anno#{Key=>Val}}.
--spec get_anno(atom(), b_blk()|b_set()|terminator()) -> any().
+-spec get_anno(atom(), construct()) -> any().
get_anno(Key, Construct) ->
- maps:get(Key, get_anno(Construct)).
+ map_get(Key, get_anno(Construct)).
+
+-spec get_anno(atom(), construct(),any()) -> any().
+
+get_anno(Key, Construct, Default) ->
+ maps:get(Key, get_anno(Construct), Default).
+get_anno(#b_function{anno=Anno}) -> Anno;
get_anno(#b_blk{anno=Anno}) -> Anno;
get_anno(#b_set{anno=Anno}) -> Anno;
get_anno(#b_br{anno=Anno}) -> Anno;
@@ -150,6 +175,40 @@ clobbers_xregs(#b_set{op=Op}) ->
_ -> false
end.
+%% no_side_effect(#b_set{}) -> true|false.
+%% Test whether this instruction has no side effect and thus is safe
+%% not to execute if its value is not used. Note that even if `true`
+%% is returned, the instruction could still be impure (e.g. bif:get).
+
+-spec no_side_effect(b_set()) -> boolean().
+
+no_side_effect(#b_set{op=Op}) ->
+ case Op of
+ {bif,_} -> true;
+ {float,get} -> true;
+ bs_init -> true;
+ bs_extract -> true;
+ bs_match -> true;
+ bs_start_match -> true;
+ bs_test_tail -> true;
+ bs_get_tail -> true;
+ bs_put -> true;
+ extract -> true;
+ get_hd -> true;
+ get_tl -> true;
+ get_map_element -> true;
+ get_tuple_element -> true;
+ has_map_field -> true;
+ is_nonempty_list -> true;
+ is_tagged_tuple -> true;
+ make_fun -> true;
+ put_map -> true;
+ put_list -> true;
+ put_tuple -> true;
+ succeeded -> true;
+ _ -> false
+ end.
+
-spec predecessors(Blocks) -> #{BlockNumber:=[Predecessor]} when
Blocks :: block_map(),
BlockNumber :: label(),
@@ -180,10 +239,73 @@ successors(#b_blk{last=Terminator}) ->
[]
end.
+%% normalize(Instr0) -> Instr.
+%% Normalize instructions to help optimizations.
+%%
+%% For commutative operators (such as '+' and 'or'), always
+%% place a variable operand before a literal operand.
+%%
+%% Normalize #b_br{} to one of the following forms:
+%%
+%% #b_br{b_literal{val=true},succ=Label,fail=Label}
+%% #b_br{b_var{},succ=Label1,fail=Label2} where Label1 =/= Label2
+%%
+%% Simplify a #b_switch{} with a literal argument to a #b_br{}.
+%%
+%% Simplify a #b_switch{} with a variable argument and an empty
+%% switch list to a #b_br{}.
+
+-spec normalize(b_set() | terminator()) ->
+ b_set() | terminator().
+
+normalize(#b_set{op={bif,Bif},args=Args}=Set) ->
+ case {is_commutative(Bif),Args} of
+ {false,_} ->
+ Set;
+ {true,[#b_literal{}=Lit,#b_var{}=Var]} ->
+ Set#b_set{args=[Var,Lit]};
+ {true,_} ->
+ Set
+ end;
+normalize(#b_set{}=Set) ->
+ Set;
+normalize(#b_br{}=Br) ->
+ case Br of
+ #b_br{bool=Bool,succ=Same,fail=Same} ->
+ case Bool of
+ #b_literal{val=true} ->
+ Br;
+ _ ->
+ Br#b_br{bool=#b_literal{val=true}}
+ end;
+ #b_br{bool=#b_literal{val=true},succ=Succ} ->
+ Br#b_br{fail=Succ};
+ #b_br{bool=#b_literal{val=false},fail=Fail} ->
+ Br#b_br{bool=#b_literal{val=true},succ=Fail};
+ #b_br{} ->
+ Br
+ end;
+normalize(#b_switch{arg=Arg,fail=Fail,list=List}=Sw) ->
+ case Arg of
+ #b_literal{} ->
+ case keyfind(Arg, 1, List) of
+ false ->
+ #b_br{bool=#b_literal{val=true},succ=Fail,fail=Fail};
+ {Arg,L} ->
+ #b_br{bool=#b_literal{val=true},succ=L,fail=L}
+ end;
+ #b_var{} when List =:= [] ->
+ #b_br{bool=#b_literal{val=true},succ=Fail,fail=Fail};
+ #b_var{} ->
+ Sw
+ end;
+normalize(#b_ret{}=Ret) ->
+ Ret.
+
-spec successors(label(), block_map()) -> [label()].
successors(L, Blocks) ->
- successors(maps:get(L, Blocks)).
+ successors(map_get(L, Blocks)).
-spec def(Ls, Blocks) -> Def when
Ls :: [label()],
@@ -192,7 +314,7 @@ successors(L, Blocks) ->
def(Ls, Blocks) ->
Top = rpo(Ls, Blocks),
- Blks = [maps:get(L, Blocks) || L <- Top],
+ Blks = [map_get(L, Blocks) || L <- Top],
def_1(Blks, []).
-spec def_used(Ls, Blocks) -> {Def,Used} when
@@ -203,22 +325,45 @@ def(Ls, Blocks) ->
def_used(Ls, Blocks) ->
Top = rpo(Ls, Blocks),
- Blks = [maps:get(L, Blocks) || L <- Top],
- Preds = gb_sets:from_list(Top),
- def_used_1(Blks, Preds, [], gb_sets:empty()).
+ Blks = [map_get(L, Blocks) || L <- Top],
+ Preds = cerl_sets:from_list(Top),
+ def_used_1(Blks, Preds, [], []).
+
+%% dominators(BlockMap) -> {Dominators,Numbering}.
+%% Calculate the dominator tree, returning a map where each entry
+%% in the map is a list that gives the path from that block to
+%% the top of the dominator tree. (Note that the suffixes of the
+%% paths are shared with each other, which make the representation
+%% of the dominator tree highly memory-efficient.)
+%%
+%% The implementation is based on:
+%%
+%% http://www.hipersoft.rice.edu/grads/publications/dom14.pdf
+%% Cooper, Keith D.; Harvey, Timothy J; Kennedy, Ken (2001).
+%% A Simple, Fast Dominance Algorithm.
-spec dominators(Blocks) -> Result when
Blocks :: block_map(),
- Result :: #{label():=ordsets:ordset(label())}.
-
+ Result :: {dominator_map(), numbering_map()}.
dominators(Blocks) ->
Preds = predecessors(Blocks),
Top0 = rpo(Blocks),
- Top = [{L,maps:get(L, Preds)} || L <- Top0],
+ Df = maps:from_list(number(Top0, 0)),
+ [{0,[]}|Top] = [{L,map_get(L, Preds)} || L <- Top0],
%% The flow graph for an Erlang function is reducible, and
%% therefore one traversal in reverse postorder is sufficient.
- iter_dominators(Top, #{}).
+ Acc = #{0=>[0]},
+ {dominators_1(Top, Df, Acc),Df}.
+
+%% common_dominators([Label], Dominators, Numbering) -> [Label].
+%% Calculate the common dominators for the given list of blocks
+%% and Dominators and Numbering as returned from dominators/1.
+
+-spec common_dominators([label()], dominator_map(), numbering_map()) -> [label()].
+common_dominators(Ls, Dom, Numbering) ->
+ Doms = [map_get(L, Dom) || L <- Ls],
+ dom_intersection(Doms, Numbering).
-spec fold_instrs_rpo(Fun, From, Acc0, Blocks) -> any() when
Fun :: fun((b_blk()|terminator(), any()) -> any()),
@@ -230,6 +375,26 @@ fold_instrs_rpo(Fun, From, Acc0, Blocks) ->
Top = rpo(From, Blocks),
fold_instrs_rpo_1(Top, Fun, Blocks, Acc0).
+%% Like mapfold_instrs_rpo but at the block level to support lookahead and
+%% scope-dependent transformations.
+-spec mapfold_blocks_rpo(Fun, From, Acc, Blocks) -> Result when
+ Fun :: fun((label(), b_blk(), any()) -> {b_blk(), any()}),
+ From :: [label()],
+ Acc :: any(),
+ Blocks :: block_map(),
+ Result :: {block_map(), any()}.
+mapfold_blocks_rpo(Fun, From, Acc, Blocks) ->
+ Successors = rpo(From, Blocks),
+ foldl(fun(Lbl, A) ->
+ mapfold_blocks_rpo_1(Fun, Lbl, A)
+ end, {Blocks, Acc}, Successors).
+
+mapfold_blocks_rpo_1(Fun, Lbl, {Blocks0, Acc0}) ->
+ Block0 = map_get(Lbl, Blocks0),
+ {Block, Acc} = Fun(Lbl, Block0, Acc0),
+ Blocks = Blocks0#{Lbl:=Block},
+ {Blocks, Acc}.
+
-spec mapfold_instrs_rpo(Fun, From, Acc0, Blocks0) -> {Blocks,Acc} when
Fun :: fun((b_blk()|terminator(), any()) -> any()),
From :: [label()],
@@ -312,14 +477,19 @@ fold_po(Fun, From, Acc0, Blocks) ->
%% linearize(Blocks) -> [{BlockLabel,#b_blk{}}].
%% Linearize the intermediate representation of the code.
+%% Unreachable blocks will be discarded, and phi nodes will
+%% be adjusted so that they no longer refers to discarded
+%% blocks or to blocks that no longer are predecessors of
+%% the phi node block.
-spec linearize(Blocks) -> Linear when
Blocks :: block_map(),
Linear :: [{label(),b_blk()}].
linearize(Blocks) ->
- Seen = gb_sets:empty(),
- {Linear,_} = linearize_1([0], Blocks, Seen, []),
+ Seen = cerl_sets:new(),
+ {Linear0,_} = linearize_1([0], Blocks, Seen, []),
+ Linear = fix_phis(Linear0, #{}),
Linear.
-spec rpo(Blocks) -> [Label] when
@@ -335,18 +505,18 @@ rpo(Blocks) ->
Labels :: [label()].
rpo(From, Blocks) ->
- Seen = gb_sets:empty(),
+ Seen = cerl_sets:new(),
{Ls,_} = rpo_1(From, Blocks, Seen, []),
Ls.
-spec rename_vars(Rename, [label()], block_map()) -> block_map() when
- Rename :: [{var_name(),value()}] | #{var_name():=value()}.
+ Rename :: rename_map() | rename_proplist().
rename_vars(Rename, From, Blocks) when is_list(Rename) ->
rename_vars(maps:from_list(Rename), From, Blocks);
rename_vars(Rename, From, Blocks) when is_map(Rename)->
Top = rpo(From, Blocks),
- Preds = gb_sets:from_list(Top),
+ Preds = cerl_sets:from_list(Top),
F = fun(#b_set{op=phi,args=Args0}=Set) ->
Args = rename_phi_vars(Args0, Preds, Rename),
Set#b_set{args=Args};
@@ -379,6 +549,19 @@ split_blocks(P, Blocks, Count) ->
Ls = beam_ssa:rpo(Blocks),
split_blocks_1(Ls, P, Blocks, Count).
+-spec trim_unreachable(Blocks0) -> Blocks when
+ Blocks0 :: block_map(),
+ Blocks :: block_map().
+
+%% trim_unreachable(Blocks0) -> Blocks.
+%% Remove all unreachable blocks. Adjust all phi nodes so
+%% they don't refer to blocks that has been removed or no
+%% no longer branch to the phi node in question.
+
+trim_unreachable(Blocks) ->
+ %% Could perhaps be optimized if there is any need.
+ maps:from_list(linearize(Blocks)).
+
%% update_phi_labels([BlockLabel], Old, New, Blocks0) -> Blocks.
%% In the given blocks, replace label Old in with New in all
%% phi nodes. This is useful after merging or splitting
@@ -408,42 +591,83 @@ update_phi_labels([], _, _, Blocks) -> Blocks.
used(#b_blk{is=Is,last=Last}) ->
used_1([Last|Is], ordsets:new());
-used(#b_br{bool=#b_var{name=V}}) ->
+used(#b_br{bool=#b_var{}=V}) ->
[V];
-used(#b_ret{arg=#b_var{name=V}}) ->
+used(#b_ret{arg=#b_var{}=V}) ->
[V];
used(#b_set{op=phi,args=Args}) ->
- ordsets:from_list([V || {#b_var{name=V},_} <- Args]);
+ ordsets:from_list([V || {#b_var{}=V,_} <- Args]);
used(#b_set{args=Args}) ->
ordsets:from_list(used_args(Args));
-used(#b_switch{arg=#b_var{name=V}}) ->
+used(#b_switch{arg=#b_var{}=V}) ->
[V];
used(_) -> [].
+-spec definitions(Blocks :: block_map()) -> definition_map().
+definitions(Blocks) ->
+ fold_instrs_rpo(fun(#b_set{ dst = Var }=I, Acc) ->
+ Acc#{Var => I};
+ (_Terminator, Acc) ->
+ Acc
+ end, [0], #{}, Blocks).
+
+-spec uses(Blocks :: block_map()) -> usage_map().
+uses(Blocks) ->
+ uses([0], Blocks).
+
+-spec uses(From, Blocks) -> usage_map() when
+ From :: [label()],
+ Blocks :: block_map().
+uses(From, Blocks) ->
+ fold_rpo(fun fold_uses_block/3, From, #{}, Blocks).
+
+fold_uses_block(Lbl, #b_blk{is=Is,last=Last}, UseMap0) ->
+ F = fun(I, UseMap) ->
+ foldl(fun(Var, Acc) ->
+ Uses0 = maps:get(Var, Acc, []),
+ Uses = [{Lbl, I} | Uses0],
+ maps:put(Var, Uses, Acc)
+ end, UseMap, used(I))
+ end,
+ F(Last, foldl(F, UseMap0, Is)).
+
%%%
%%% Internal functions.
%%%
+is_commutative('and') -> true;
+is_commutative('or') -> true;
+is_commutative('xor') -> true;
+is_commutative('band') -> true;
+is_commutative('bor') -> true;
+is_commutative('bxor') -> true;
+is_commutative('+') -> true;
+is_commutative('*') -> true;
+is_commutative('=:=') -> true;
+is_commutative('==') -> true;
+is_commutative('=/=') -> true;
+is_commutative('/=') -> true;
+is_commutative(_) -> false.
+
def_used_1([#b_blk{is=Is,last=Last}|Bs], Preds, Def0, Used0) ->
{Def,Used1} = def_used_is(Is, Preds, Def0, Used0),
- Used = gb_sets:union(gb_sets:from_list(used(Last)), Used1),
+ Used = ordsets:union(used(Last), Used1),
def_used_1(Bs, Preds, Def, Used);
def_used_1([], _Preds, Def, Used) ->
- {ordsets:from_list(Def),gb_sets:to_list(Used)}.
+ {ordsets:from_list(Def),Used}.
-def_used_is([#b_set{op=phi,dst=#b_var{name=Dst},args=Args}|Is],
+def_used_is([#b_set{op=phi,dst=Dst,args=Args}|Is],
Preds, Def0, Used0) ->
Def = [Dst|Def0],
%% We must be careful to only include variables that will
%% be used when arriving from one of the predecessor blocks
%% in Preds.
- Used1 = [V || {#b_var{name=V},L} <- Args,
- gb_sets:is_member(L, Preds)],
- Used = gb_sets:union(gb_sets:from_list(Used1), Used0),
+ Used1 = [V || {#b_var{}=V,L} <- Args, cerl_sets:is_element(L, Preds)],
+ Used = ordsets:union(ordsets:from_list(Used1), Used0),
def_used_is(Is, Preds, Def, Used);
-def_used_is([#b_set{dst=#b_var{name=Dst}}=I|Is], Preds, Def0, Used0) ->
+def_used_is([#b_set{dst=Dst}=I|Is], Preds, Def0, Used0) ->
Def = [Dst|Def0],
- Used = gb_sets:union(gb_sets:from_list(used(I)), Used0),
+ Used = ordsets:union(used(I), Used0),
def_used_is(Is, Preds, Def, Used);
def_used_is([], _Preds, Def, Used) ->
{Def,Used}.
@@ -454,59 +678,82 @@ def_1([#b_blk{is=Is}|Bs], Def0) ->
def_1([], Def) ->
ordsets:from_list(Def).
-def_is([#b_set{dst=#b_var{name=Dst}}|Is], Def) ->
+def_is([#b_set{dst=Dst}|Is], Def) ->
def_is(Is, [Dst|Def]);
def_is([], Def) -> Def.
-iter_dominators([{0,[]}|Ls], _Doms) ->
- Dom = [0],
- iter_dominators(Ls, #{0=>Dom});
-iter_dominators([{L,Preds}|Ls], Doms) ->
- DomPreds = [maps:get(P, Doms) || P <- Preds, maps:is_key(P, Doms)],
- Dom = ordsets:add_element(L, ordsets:intersection(DomPreds)),
- iter_dominators(Ls, Doms#{L=>Dom});
-iter_dominators([], Doms) -> Doms.
+dominators_1([{L,Preds}|Ls], Df, Doms) ->
+ DomPreds = [map_get(P, Doms) || P <- Preds, is_map_key(P, Doms)],
+ Dom = [L|dom_intersection(DomPreds, Df)],
+ dominators_1(Ls, Df, Doms#{L=>Dom});
+dominators_1([], _Df, Doms) -> Doms.
+
+dom_intersection([S], _Df) ->
+ S;
+dom_intersection([S|Ss], Df) ->
+ dom_intersection(S, Ss, Df).
+
+dom_intersection(S1, [S2|Ss], Df) ->
+ dom_intersection(dom_intersection_1(S1, S2, Df), Ss, Df);
+dom_intersection(S, [], _Df) -> S.
+
+dom_intersection_1([E1|Es1]=Set1, [E2|Es2]=Set2, Df) ->
+ %% Blocks are numbered in the order they are found in
+ %% reverse postorder.
+ #{E1:=Df1,E2:=Df2} = Df,
+ if Df1 > Df2 ->
+ dom_intersection_1(Es1, Set2, Df);
+ Df2 > Df1 ->
+ dom_intersection_1(Es2, Set1, Df); %switch arguments!
+ true -> %Set1 == Set2
+ %% The common suffix of the sets is the intersection.
+ Set1
+ end.
+
+number([L|Ls], N) ->
+ [{L,N}|number(Ls, N+1)];
+number([], _) -> [].
fold_rpo_1([L|Ls], Fun, Blocks, Acc0) ->
- Block = maps:get(L, Blocks),
+ Block = map_get(L, Blocks),
Acc = Fun(L, Block, Acc0),
fold_rpo_1(Ls, Fun, Blocks, Acc);
fold_rpo_1([], _, _, Acc) -> Acc.
fold_instrs_rpo_1([L|Ls], Fun, Blocks, Acc0) ->
- #b_blk{is=Is,last=Last} = maps:get(L, Blocks),
+ #b_blk{is=Is,last=Last} = map_get(L, Blocks),
Acc1 = foldl(Fun, Acc0, Is),
Acc = Fun(Last, Acc1),
fold_instrs_rpo_1(Ls, Fun, Blocks, Acc);
fold_instrs_rpo_1([], _, _, Acc) -> Acc.
mapfold_instrs_rpo_1([L|Ls], Fun, Blocks0, Acc0) ->
- #b_blk{is=Is0,last=Last0} = Block0 = maps:get(L, Blocks0),
+ #b_blk{is=Is0,last=Last0} = Block0 = map_get(L, Blocks0),
{Is,Acc1} = mapfoldl(Fun, Acc0, Is0),
{Last,Acc} = Fun(Last0, Acc1),
Block = Block0#b_blk{is=Is,last=Last},
- Blocks = maps:put(L, Block, Blocks0),
+ Blocks = Blocks0#{L:=Block},
mapfold_instrs_rpo_1(Ls, Fun, Blocks, Acc);
mapfold_instrs_rpo_1([], _, Blocks, Acc) ->
{Blocks,Acc}.
flatmapfold_instrs_rpo_1([L|Ls], Fun, Blocks0, Acc0) ->
- #b_blk{is=Is0,last=Last0} = Block0 = maps:get(L, Blocks0),
+ #b_blk{is=Is0,last=Last0} = Block0 = map_get(L, Blocks0),
{Is,Acc1} = flatmapfoldl(Fun, Acc0, Is0),
{[Last],Acc} = Fun(Last0, Acc1),
Block = Block0#b_blk{is=Is,last=Last},
- Blocks = maps:put(L, Block, Blocks0),
+ Blocks = Blocks0#{L:=Block},
flatmapfold_instrs_rpo_1(Ls, Fun, Blocks, Acc);
flatmapfold_instrs_rpo_1([], _, Blocks, Acc) ->
{Blocks,Acc}.
linearize_1([L|Ls], Blocks, Seen0, Acc0) ->
- case gb_sets:is_member(L, Seen0) of
+ case cerl_sets:is_element(L, Seen0) of
true ->
linearize_1(Ls, Blocks, Seen0, Acc0);
false ->
- Seen1 = gb_sets:insert(L, Seen0),
- Block = maps:get(L, Blocks),
+ Seen1 = cerl_sets:add_element(L, Seen0),
+ Block = map_get(L, Blocks),
Successors = successors(Block),
{Acc,Seen} = linearize_1(Successors, Blocks, Seen1, Acc0),
linearize_1(Ls, Blocks, Seen, [{L,Block}|Acc])
@@ -514,13 +761,40 @@ linearize_1([L|Ls], Blocks, Seen0, Acc0) ->
linearize_1([], _, Seen, Acc) ->
{Acc,Seen}.
+fix_phis([{L,Blk0}|Bs], S) ->
+ Blk = case Blk0 of
+ #b_blk{is=[#b_set{op=phi}|_]=Is0} ->
+ Is = fix_phis_1(Is0, L, S),
+ Blk0#b_blk{is=Is};
+ #b_blk{} ->
+ Blk0
+ end,
+ Successors = successors(Blk),
+ [{L,Blk}|fix_phis(Bs, S#{L=>Successors})];
+fix_phis([], _) -> [].
+
+fix_phis_1([#b_set{op=phi,args=Args0}=I|Is], L, S) ->
+ Args = [{Val,Pred} || {Val,Pred} <- Args0,
+ is_successor(L, Pred, S)],
+ [I#b_set{args=Args}|fix_phis_1(Is, L, S)];
+fix_phis_1(Is, _, _) -> Is.
+
+is_successor(L, Pred, S) ->
+ case S of
+ #{Pred:=Successors} ->
+ member(L, Successors);
+ #{} ->
+ %% This block has been removed.
+ false
+ end.
+
rpo_1([L|Ls], Blocks, Seen0, Acc0) ->
- case gb_sets:is_member(L, Seen0) of
+ case cerl_sets:is_element(L, Seen0) of
true ->
rpo_1(Ls, Blocks, Seen0, Acc0);
false ->
- Block = maps:get(L, Blocks),
- Seen1 = gb_sets:insert(L, Seen0),
+ Block = map_get(L, Blocks),
+ Seen1 = cerl_sets:add_element(L, Seen0),
Successors = successors(Block),
{Acc,Seen} = rpo_1(Successors, Blocks, Seen1, Acc0),
rpo_1(Ls, Blocks, Seen, [L|Acc])
@@ -528,10 +802,10 @@ rpo_1([L|Ls], Blocks, Seen0, Acc0) ->
rpo_1([], _, Seen, Acc) ->
{Acc,Seen}.
-rename_var(#b_var{name=Old}=V, Rename) ->
+rename_var(#b_var{}=Old, Rename) ->
case Rename of
#{Old:=New} -> New;
- #{} -> V
+ #{} -> Old
end;
rename_var(#b_remote{mod=Mod0,name=Name0}=Remote, Rename) ->
Mod = rename_var(Mod0, Rename),
@@ -540,7 +814,7 @@ rename_var(#b_remote{mod=Mod0,name=Name0}=Remote, Rename) ->
rename_var(Old, _) -> Old.
rename_phi_vars([{Var,L}|As], Preds, Ren) ->
- case gb_sets:is_member(L, Preds) of
+ case cerl_sets:is_element(L, Preds) of
true ->
[{rename_var(Var, Ren),L}|rename_phi_vars(As, Preds, Ren)];
false ->
@@ -549,11 +823,11 @@ rename_phi_vars([{Var,L}|As], Preds, Ren) ->
rename_phi_vars([], _, _) -> [].
map_instrs_1([L|Ls], Fun, Blocks0) ->
- #b_blk{is=Is0,last=Last0} = Blk0 = maps:get(L, Blocks0),
+ #b_blk{is=Is0,last=Last0} = Blk0 = map_get(L, Blocks0),
Is = [Fun(I) || I <- Is0],
Last = Fun(Last0),
Blk = Blk0#b_blk{is=Is,last=Last},
- Blocks = maps:put(L, Blk, Blocks0),
+ Blocks = Blocks0#{L:=Blk},
map_instrs_1(Ls, Fun, Blocks);
map_instrs_1([], _, Blocks) -> Blocks.
@@ -564,7 +838,7 @@ flatmapfoldl(F, Accu0, [Hd|Tail]) ->
flatmapfoldl(_, Accu, []) -> {[],Accu}.
split_blocks_1([L|Ls], P, Blocks0, Count0) ->
- #b_blk{is=Is0} = Blk = maps:get(L, Blocks0),
+ #b_blk{is=Is0} = Blk = map_get(L, Blocks0),
case split_blocks_is(Is0, P, []) of
{yes,Bef,Aft} ->
NewLbl = Count0,
@@ -573,8 +847,8 @@ split_blocks_1([L|Ls], P, Blocks0, Count0) ->
BefBlk = Blk#b_blk{is=Bef,last=Br},
NewBlk = Blk#b_blk{is=Aft},
Blocks1 = Blocks0#{L:=BefBlk,NewLbl=>NewBlk},
- Successors = beam_ssa:successors(NewBlk),
- Blocks = beam_ssa:update_phi_labels(Successors, L, NewLbl, Blocks1),
+ Successors = successors(NewBlk),
+ Blocks = update_phi_labels(Successors, L, NewLbl, Blocks1),
split_blocks_1([NewLbl|Ls], P, Blocks, Count);
no ->
split_blocks_1(Ls, P, Blocks0, Count0)
@@ -602,7 +876,7 @@ update_phi_labels_is(Is, _, _) -> Is.
rename_label(Old, Old, New) -> New;
rename_label(Lbl, _Old, _New) -> Lbl.
-used_args([#b_var{name=V}|As]) ->
+used_args([#b_var{}=V|As]) ->
[V|used_args(As)];
used_args([#b_remote{mod=Mod,name=Name}|As]) ->
used_args([Mod,Name|As]);
diff --git a/lib/compiler/src/beam_ssa_bsm.erl b/lib/compiler/src/beam_ssa_bsm.erl
new file mode 100644
index 0000000000..382e6f635e
--- /dev/null
+++ b/lib/compiler/src/beam_ssa_bsm.erl
@@ -0,0 +1,1046 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 2018. All Rights Reserved.
+%%
+%% Licensed under the Apache License, Version 2.0 (the "License");
+%% you may not use this file except in compliance with the License.
+%% You may obtain a copy of the License at
+%%
+%% http://www.apache.org/licenses/LICENSE-2.0
+%%
+%% Unless required by applicable law or agreed to in writing, software
+%% distributed under the License is distributed on an "AS IS" BASIS,
+%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+%% See the License for the specific language governing permissions and
+%% limitations under the License.
+%%
+%% %CopyrightEnd%
+%%
+
+%%%
+%%% This pass optimizes bit syntax matching, and is centered around the concept
+%%% of "match context reuse" which is best explained through example. To put it
+%%% shortly we attempt to turn this:
+%%%
+%%% <<0,B/bits>> = A,
+%%% <<1,C/bits>> = B,
+%%% <<D,_/bits>> = C,
+%%% D.
+%%%
+%%% ... Into this:
+%%%
+%%% <<0,1,D,_/bits>>=A,
+%%% D.
+%%%
+%%% Which is much faster as it avoids the creation of intermediate terms. This
+%%% is especially noticeable in loops where such garbage is generated on each
+%%% iteration.
+%%%
+%%% The optimization itself is very simple and can be applied whenever there's
+%%% matching on the tail end of a binary; instead of creating a new binary and
+%%% starting a new match context on it, we reuse the match context used to
+%%% extract the tail and avoid the creation of both objects.
+%%%
+%%% The catch is that a match context isn't a proper type and nothing outside
+%%% of bit syntax match operations can handle them. We therefore need to make
+%%% sure that they never "leak" into other instructions, and most of the pass
+%%% revolves around getting around that limitation.
+%%%
+%%% Unlike most other passes we look at the whole module so we can combine
+%%% matches across function boundaries, greatly increasing the performance of
+%%% complex matches and loops.
+%%%
+
+-module(beam_ssa_bsm).
+
+-export([module/2, format_error/1]).
+
+-include("beam_ssa.hrl").
+
+-import(lists, [member/2, reverse/1, splitwith/2, map/2, foldl/3, mapfoldl/3,
+ nth/2, max/1, unzip/1]).
+
+-spec format_error(term()) -> nonempty_string().
+
+format_error(OptInfo) ->
+ format_opt_info(OptInfo).
+
+-spec module(Module, Options) -> Result when
+ Module :: beam_ssa:b_module(),
+ Options :: [compile:option()],
+ Result :: {ok, beam_ssa:b_module(), list()}.
+
+-define(PASS(N), {N,fun N/1}).
+
+module(#b_module{body=Fs0}=Module, Opts) ->
+ ModInfo = analyze_module(Module),
+
+ %% combine_matches is repeated after accept_context_args as the control
+ %% flow changes can enable further optimizations, as in the example below:
+ %%
+ %% a(<<0,X/binary>>) -> a(X);
+ %% a(A) when bit_size(A) =:= 52 -> bar;
+ %% a(<<1,X/binary>>) -> X. %% Match context will be reused here when
+ %% %% when repeated.
+
+ {Fs, _} = compile:run_sub_passes(
+ [?PASS(combine_matches),
+ ?PASS(accept_context_args),
+ ?PASS(combine_matches),
+ ?PASS(allow_context_passthrough),
+ ?PASS(skip_outgoing_tail_extraction),
+ ?PASS(annotate_context_parameters)],
+ {Fs0, ModInfo}),
+
+ Ws = case proplists:get_bool(bin_opt_info, Opts) of
+ true -> collect_opt_info(Fs);
+ false -> []
+ end,
+
+ {ok, Module#b_module{body=Fs}, Ws}.
+
+-type module_info() :: #{ func_id() => func_info() }.
+
+-type func_id() :: {Name :: atom(), Arity :: non_neg_integer()}.
+
+-type func_info() :: #{ has_bsm_ops => boolean(),
+ parameters => [#b_var{}],
+ parameter_info => #{ #b_var{} => param_info() } }.
+
+-type param_info() :: suitable_for_reuse |
+ {Problem :: atom(), Where :: term()}.
+
+-spec analyze_module(#b_module{}) -> module_info().
+
+analyze_module(#b_module{body=Fs}) ->
+ foldl(fun(#b_function{args=Parameters}=F, I) ->
+ FuncInfo = #{ has_bsm_ops => has_bsm_ops(F),
+ parameters => Parameters,
+ parameter_info => #{} },
+ FuncId = get_fa(F),
+ I#{ FuncId => FuncInfo }
+ end, #{}, Fs).
+
+has_bsm_ops(#b_function{bs=Blocks}) ->
+ hbo_blocks(maps:to_list(Blocks)).
+
+hbo_blocks([{_,#b_blk{is=Is}} | Blocks]) ->
+ case hbo_is(Is) of
+ false -> hbo_blocks(Blocks);
+ true -> true
+ end;
+hbo_blocks([]) ->
+ false.
+
+hbo_is([#b_set{op=bs_start_match} | _]) -> true;
+hbo_is([_I | Is]) -> hbo_is(Is);
+hbo_is([]) -> false.
+
+%% Checks whether it's legal to make a call with the given argument as a match
+%% context, returning the param_info() of the relevant parameter.
+-spec check_context_call(#b_set{}, Arg, CtxChain, ModInfo) -> param_info() when
+ Arg :: #b_var{},
+ CtxChain :: [#b_var{}],
+ ModInfo :: module_info().
+check_context_call(#b_set{args=Args}, Arg, CtxChain, ModInfo) ->
+ Aliases = [Arg | CtxChain],
+ ccc_1(Args, Arg, Aliases, ModInfo).
+
+ccc_1([#b_local{}=Call | Args], Ctx, Aliases, ModInfo) ->
+ %% Matching operations assume that their context isn't aliased (as in
+ %% pointer aliasing), so we must reject calls whose arguments contain more
+ %% than one reference to the context.
+ %%
+ %% TODO: Try to fall back to passing binaries in these cases. Partial reuse
+ %% is better than nothing.
+ UseCount = foldl(fun(Arg, C) ->
+ case member(Arg, Aliases) of
+ true -> C + 1;
+ false -> C
+ end
+ end, 0, Args),
+ if
+ UseCount =:= 1 ->
+ #b_local{name=#b_literal{val=Name},arity=Arity} = Call,
+ Callee = {Name, Arity},
+
+ ParamInfo = funcinfo_get(Callee, parameter_info, ModInfo),
+ Parameters = funcinfo_get(Callee, parameters, ModInfo),
+ Parameter = nth(1 + arg_index(Ctx, Args), Parameters),
+
+ case maps:find(Parameter, ParamInfo) of
+ {ok, suitable_for_reuse} ->
+ suitable_for_reuse;
+ {ok, Other} ->
+ {unsuitable_call, {Call, Other}};
+ error ->
+ {no_match_on_entry, Call}
+ end;
+ UseCount > 1 ->
+ {multiple_uses_in_call, Call}
+ end;
+ccc_1([#b_remote{}=Call | _Args], _Ctx, _CtxChain, _ModInfo) ->
+ {remote_call, Call};
+ccc_1([Fun | _Args], _Ctx, _CtxChain, _ModInfo) ->
+ %% TODO: It may be possible to support this in the future for locally
+ %% defined funs, including ones with free variables.
+ {fun_call, Fun}.
+
+%% Returns the index of Var in Args.
+arg_index(Var, Args) -> arg_index_1(Var, Args, 0).
+
+arg_index_1(Var, [Var | _Args], Index) -> Index;
+arg_index_1(Var, [_Arg | Args], Index) -> arg_index_1(Var, Args, Index + 1).
+
+is_tail_binary(#b_set{op=bs_match,args=[#b_literal{val=binary} | Rest]}) ->
+ member(#b_literal{val=all}, Rest);
+is_tail_binary(#b_set{op=bs_get_tail}) ->
+ true;
+is_tail_binary(_) ->
+ false.
+
+is_tail_binary(#b_var{}=Var, Defs) ->
+ case find_match_definition(Var, Defs) of
+ {ok, Def} -> is_tail_binary(Def);
+ _ -> false
+ end;
+is_tail_binary(_Literal, _Defs) ->
+ false.
+
+assert_match_context(#b_var{}=Var, Defs) ->
+ case maps:find(Var, Defs) of
+ {ok, #b_set{op=bs_match,args=[_,#b_var{}=Ctx|_]}} ->
+ assert_match_context(Ctx, Defs);
+ {ok, #b_set{op=bs_start_match}} ->
+ ok
+ end.
+
+find_match_definition(#b_var{}=Var, Defs) ->
+ case maps:find(Var, Defs) of
+ {ok, #b_set{op=bs_extract,args=[Ctx]}} -> maps:find(Ctx, Defs);
+ {ok, #b_set{op=bs_get_tail}=Def} -> {ok, Def};
+ _ -> error
+ end.
+
+%% Returns a list of all contexts that were used to extract Var.
+context_chain_of(#b_var{}=Var, Defs) ->
+ case maps:find(Var, Defs) of
+ {ok, #b_set{op=bs_match,args=[_,#b_var{}=Ctx|_]}} ->
+ [Ctx | context_chain_of(Ctx, Defs)];
+ {ok, #b_set{op=bs_get_tail,args=[Ctx]}} ->
+ [Ctx | context_chain_of(Ctx, Defs)];
+ {ok, #b_set{op=bs_extract,args=[Ctx]}} ->
+ [Ctx | context_chain_of(Ctx, Defs)];
+ _ ->
+ []
+ end.
+
+%% Grabs the match context used to produce the given variable.
+match_context_of(#b_var{}=Var, Defs) ->
+ Ctx = match_context_of_1(Var, Defs),
+ assert_match_context(Ctx, Defs),
+ Ctx.
+
+match_context_of_1(Var, Defs) ->
+ case maps:get(Var, Defs) of
+ #b_set{op=bs_extract,args=[#b_var{}=Ctx0]} ->
+ #b_set{op=bs_match,
+ args=[_,#b_var{}=Ctx|_]} = maps:get(Ctx0, Defs),
+ Ctx;
+ #b_set{op=bs_get_tail,args=[#b_var{}=Ctx]} ->
+ Ctx
+ end.
+
+funcinfo_get(#b_function{}=F, Attribute, ModInfo) ->
+ funcinfo_get(get_fa(F), Attribute, ModInfo);
+funcinfo_get({_,_}=Key, Attribute, ModInfo) ->
+ FuncInfo = maps:get(Key, ModInfo),
+ maps:get(Attribute, FuncInfo).
+
+funcinfo_set(#b_function{}=F, Attribute, Value, ModInfo) ->
+ funcinfo_set(get_fa(F), Attribute, Value, ModInfo);
+funcinfo_set(Key, Attribute, Value, ModInfo) ->
+ FuncInfo = maps:put(Attribute, Value, maps:get(Key, ModInfo, #{})),
+ maps:put(Key, FuncInfo, ModInfo).
+
+get_fa(#b_function{ anno = Anno }) ->
+ {_,Name,Arity} = maps:get(func_info, Anno),
+ {Name,Arity}.
+
+%% Replaces matched-out binaries with aliases that are lazily converted to
+%% binary form when used, allowing us to keep the "match path" free of binary
+%% creation.
+
+-spec alias_matched_binaries(Blocks, Counter, AliasMap) -> Result when
+ Blocks :: beam_ssa:block_map(),
+ Counter :: non_neg_integer(),
+ AliasMap :: match_alias_map(),
+ Result :: {Blocks, Counter}.
+
+-type match_alias_map() ::
+ #{ Binary :: #b_var{} =>
+ { %% Replace all uses of Binary with an alias after this
+ %% label.
+ AliasAfter :: beam_ssa:label(),
+ %% The match context whose tail is equal to Binary.
+ Context :: #b_var{} } }.
+
+%% Keeps track of the promotions we need to insert. They're partially keyed by
+%% location because they may not be valid on all execution paths and we may
+%% need to add redundant promotions in some cases.
+-type promotion_map() ::
+ #{ { PromoteAt :: beam_ssa:label(),
+ Variable :: #b_var{} } =>
+ Instruction :: #b_set{} }.
+
+-record(amb, { dominators :: beam_ssa:dominator_map(),
+ match_aliases :: match_alias_map(),
+ cnt :: non_neg_integer(),
+ promotions = #{} :: promotion_map() }).
+
+alias_matched_binaries(Blocks0, Counter, AliasMap) when AliasMap =/= #{} ->
+ {Dominators, _} = beam_ssa:dominators(Blocks0),
+ State0 = #amb{ dominators = Dominators,
+ match_aliases = AliasMap,
+ cnt = Counter },
+ {Blocks, State} = beam_ssa:mapfold_blocks_rpo(fun amb_1/3, [0], State0,
+ Blocks0),
+ {amb_insert_promotions(Blocks, State), State#amb.cnt};
+alias_matched_binaries(Blocks, Counter, _AliasMap) ->
+ {Blocks, Counter}.
+
+amb_1(Lbl, #b_blk{is=Is0,last=Last0}=Block, State0) ->
+ {Is, State1} = mapfoldl(fun(I, State) ->
+ amb_assign_set(I, Lbl, State)
+ end, State0, Is0),
+ {Last, State} = amb_assign_last(Last0, Lbl, State1),
+ {Block#b_blk{is=Is,last=Last}, State}.
+
+amb_assign_set(#b_set{op=phi,args=Args0}=I, _Lbl, State0) ->
+ %% Phi node aliases are relative to their source block, not their
+ %% containing block.
+ {Args, State} =
+ mapfoldl(fun({Arg0, Lbl}, Acc) ->
+ {Arg, State} = amb_get_alias(Arg0, Lbl, Acc),
+ {{Arg, Lbl}, State}
+ end, State0, Args0),
+ {I#b_set{args=Args}, State};
+amb_assign_set(#b_set{args=Args0}=I, Lbl, State0) ->
+ {Args, State} = mapfoldl(fun(Arg0, Acc) ->
+ amb_get_alias(Arg0, Lbl, Acc)
+ end, State0, Args0),
+ {I#b_set{args=Args}, State}.
+
+amb_assign_last(#b_ret{arg=Arg0}=T, Lbl, State0) ->
+ {Arg, State} = amb_get_alias(Arg0, Lbl, State0),
+ {T#b_ret{arg=Arg}, State};
+amb_assign_last(#b_switch{arg=Arg0}=T, Lbl, State0) ->
+ {Arg, State} = amb_get_alias(Arg0, Lbl, State0),
+ {T#b_switch{arg=Arg}, State};
+amb_assign_last(#b_br{bool=Arg0}=T, Lbl, State0) ->
+ {Arg, State} = amb_get_alias(Arg0, Lbl, State0),
+ {T#b_br{bool=Arg}, State}.
+
+amb_get_alias(#b_var{}=Arg, Lbl, State) ->
+ case maps:find(Arg, State#amb.match_aliases) of
+ {ok, {AliasAfter, Context}} ->
+ %% Our context may not have been created yet, so we skip assigning
+ %% an alias unless the given block is among our dominators.
+ Dominators = maps:get(Lbl, State#amb.dominators),
+ case member(AliasAfter, Dominators) of
+ true -> amb_create_alias(Arg, Context, Lbl, State);
+ false -> {Arg, State}
+ end;
+ error ->
+ {Arg, State}
+ end;
+amb_get_alias(#b_remote{mod=Mod0,name=Name0}=Arg0, Lbl, State0) ->
+ {Mod, State1} = amb_get_alias(Mod0, Lbl, State0),
+ {Name, State} = amb_get_alias(Name0, Lbl, State1),
+ Arg = Arg0#b_remote{mod=Mod,name=Name},
+ {Arg, State};
+amb_get_alias(Arg, _Lbl, State) ->
+ {Arg, State}.
+
+amb_create_alias(#b_var{}=Arg0, Context, Lbl, State0) ->
+ Dominators = maps:get(Lbl, State0#amb.dominators),
+ Promotions0 = State0#amb.promotions,
+
+ PrevPromotions =
+ [maps:get({Dom, Arg0}, Promotions0)
+ || Dom <- Dominators, is_map_key({Dom, Arg0}, Promotions0)],
+
+ case PrevPromotions of
+ [_|_] ->
+ %% We've already created an alias prior to this block, so we'll
+ %% grab the most recent one to minimize stack use.
+
+ #b_set{dst=Alias} = max(PrevPromotions),
+ {Alias, State0};
+ [] ->
+ %% If we haven't created an alias we need to do so now. The
+ %% promotion will be inserted later by amb_insert_promotions/2.
+
+ Counter = State0#amb.cnt,
+ Alias = #b_var{name={'@ssa_bsm_alias', Counter}},
+ Promotion = #b_set{op=bs_get_tail,dst=Alias,args=[Context]},
+
+ Promotions = maps:put({Lbl, Arg0}, Promotion, Promotions0),
+ State = State0#amb{ promotions=Promotions, cnt=Counter+1 },
+
+ {Alias, State}
+ end.
+
+amb_insert_promotions(Blocks0, State) ->
+ F = fun({Lbl, #b_var{}}, Promotion, Blocks) ->
+ Block = maps:get(Lbl, Blocks),
+
+ Alias = Promotion#b_set.dst,
+ {Before, After} = splitwith(
+ fun(#b_set{args=Args}) ->
+ not is_var_in_args(Alias, Args)
+ end, Block#b_blk.is),
+ Is = Before ++ [Promotion | After],
+
+ maps:put(Lbl, Block#b_blk{is=Is}, Blocks)
+ end,
+ maps:fold(F, Blocks0, State#amb.promotions).
+
+is_var_in_args(Var, [Var | _]) -> true;
+is_var_in_args(Var, [#b_remote{name=Var} | _]) -> true;
+is_var_in_args(Var, [#b_remote{mod=Var} | _]) -> true;
+is_var_in_args(Var, [_ | Args]) -> is_var_in_args(Var, Args);
+is_var_in_args(_Var, []) -> false.
+
+%%%
+%%% Subpasses
+%%%
+
+%% Removes superflous chained bs_start_match instructions in the same
+%% function. When matching on an extracted tail binary, or on a binary we've
+%% already matched on, we reuse the original match context.
+%%
+%% This pass runs first since it makes subsequent optimizations more effective
+%% by removing spots where promotion would be required.
+
+-type prior_match_map() ::
+ #{ Binary :: #b_var{} =>
+ [{ %% The context and success label of a previous
+ %% bs_start_match made on this binary.
+ ValidAfter :: beam_ssa:label(),
+ Context :: #b_var{} }] }.
+
+-record(cm, { definitions :: beam_ssa:definition_map(),
+ dominators :: beam_ssa:dominator_map(),
+ blocks :: beam_ssa:block_map(),
+ match_aliases = #{} :: match_alias_map(),
+ prior_matches = #{} :: prior_match_map(),
+ renames = #{} :: beam_ssa:rename_map() }).
+
+combine_matches({Fs0, ModInfo}) ->
+ Fs = map(fun(F) -> combine_matches(F, ModInfo) end, Fs0),
+ {Fs, ModInfo}.
+
+combine_matches(#b_function{bs=Blocks0,cnt=Counter0}=F, ModInfo) ->
+ case funcinfo_get(F, has_bsm_ops, ModInfo) of
+ true ->
+ {Dominators, _} = beam_ssa:dominators(Blocks0),
+ {Blocks1, State} =
+ beam_ssa:mapfold_blocks_rpo(
+ fun(Lbl, #b_blk{is=Is0}=Block0, State0) ->
+ {Is, State} = cm_1(Is0, [], Lbl, State0),
+ {Block0#b_blk{is=Is}, State}
+ end, [0],
+ #cm{ definitions = beam_ssa:definitions(Blocks0),
+ dominators = Dominators,
+ blocks = Blocks0 },
+ Blocks0),
+
+ Blocks2 = beam_ssa:rename_vars(State#cm.renames, [0], Blocks1),
+
+ {Blocks, Counter} = alias_matched_binaries(Blocks2, Counter0,
+ State#cm.match_aliases),
+
+ F#b_function{ bs=Blocks, cnt=Counter };
+ false ->
+ F
+ end.
+
+cm_1([#b_set{ op=bs_start_match,
+ dst=Ctx,
+ args=[Src] },
+ #b_set{ op=succeeded,
+ dst=Bool,
+ args=[Ctx] }]=MatchSeq, Acc0, Lbl, State0) ->
+ Acc = reverse(Acc0),
+ case is_tail_binary(Src, State0#cm.definitions) of
+ true -> cm_combine_tail(Src, Ctx, Bool, Acc, State0);
+ false -> cm_handle_priors(Src, Ctx, Bool, Acc, MatchSeq, Lbl, State0)
+ end;
+cm_1([I | Is], Acc, Lbl, State) ->
+ cm_1(Is, [I | Acc], Lbl, State);
+cm_1([], Acc, _Lbl, State) ->
+ {reverse(Acc), State}.
+
+%% If we're dominated by at least one match on the same source, we can reuse
+%% the context created by that match.
+cm_handle_priors(Src, DstCtx, Bool, Acc, MatchSeq, Lbl, State0) ->
+ PriorCtxs = case maps:find(Src, State0#cm.prior_matches) of
+ {ok, Priors} ->
+ %% We've seen other match contexts on this source, but
+ %% we can only consider the ones whose success path
+ %% dominate us.
+ Dominators = maps:get(Lbl, State0#cm.dominators, []),
+ [Ctx || {ValidAfter, Ctx} <- Priors,
+ member(ValidAfter, Dominators)];
+ error ->
+ []
+ end,
+ case PriorCtxs of
+ [Ctx|_] ->
+ Renames0 = State0#cm.renames,
+ Renames = Renames0#{ Bool => #b_literal{val=true}, DstCtx => Ctx },
+ {Acc, State0#cm{ renames = Renames }};
+ [] ->
+ %% Since we lack a prior match, we need to register this one in
+ %% case we dominate another.
+ State = cm_register_prior(Src, DstCtx, Lbl, State0),
+ {Acc ++ MatchSeq, State}
+ end.
+
+cm_register_prior(Src, DstCtx, Lbl, State) ->
+ Block = maps:get(Lbl, State#cm.blocks),
+ #b_br{succ=ValidAfter} = Block#b_blk.last,
+
+ Priors0 = maps:get(Src, State#cm.prior_matches, []),
+ Priors = [{ValidAfter, DstCtx} | Priors0],
+
+ PriorMatches = maps:put(Src, Priors, State#cm.prior_matches),
+ State#cm{ prior_matches = PriorMatches }.
+
+cm_combine_tail(Src, DstCtx, Bool, Acc, State0) ->
+ SrcCtx = match_context_of(Src, State0#cm.definitions),
+
+ %% We replace the source with a context alias as it normally won't be used
+ %% on the happy path after being matched, and the added cost of conversion
+ %% is negligible if it is.
+ Aliases = maps:put(Src, {0, SrcCtx}, State0#cm.match_aliases),
+
+ Renames0 = State0#cm.renames,
+ Renames = Renames0#{ Bool => #b_literal{val=true}, DstCtx => SrcCtx },
+
+ State = State0#cm{ match_aliases = Aliases, renames = Renames },
+
+ {Acc, State}.
+
+%% Lets functions accept match contexts as arguments. The parameter must be
+%% unused before the bs_start_match instruction, and it must be matched in the
+%% first block.
+
+-record(aca, { unused_parameters :: ordsets:ordset(#b_var{}),
+ counter :: non_neg_integer(),
+ parameter_info = #{} :: #{ #b_var{} => param_info() },
+ match_aliases = #{} :: match_alias_map() }).
+
+accept_context_args({Fs, ModInfo}) ->
+ mapfoldl(fun accept_context_args/2, ModInfo, Fs).
+
+accept_context_args(#b_function{bs=Blocks0}=F, ModInfo0) ->
+ case funcinfo_get(F, has_bsm_ops, ModInfo0) of
+ true ->
+ Parameters = ordsets:from_list(funcinfo_get(F, parameters, ModInfo0)),
+ State0 = #aca{ unused_parameters = Parameters,
+ counter = F#b_function.cnt },
+
+ {Blocks1, State} = aca_1(Blocks0, State0),
+ {Blocks, Counter} = alias_matched_binaries(Blocks1,
+ State#aca.counter,
+ State#aca.match_aliases),
+
+ ModInfo = funcinfo_set(F, parameter_info, State#aca.parameter_info,
+ ModInfo0),
+
+ {F#b_function{bs=Blocks,cnt=Counter}, ModInfo};
+ false ->
+ {F, ModInfo0}
+ end.
+
+aca_1(Blocks, State) ->
+ %% We only handle block 0 as we don't yet support starting a match after a
+ %% test. This is generally good enough as the sys_core_bsm pass makes the
+ %% match instruction come first if possible, and it's rare for a function
+ %% to binary-match several parameters at once.
+ EntryBlock = maps:get(0, Blocks),
+ aca_enable_reuse(EntryBlock#b_blk.is, EntryBlock, Blocks, [], State).
+
+aca_enable_reuse([#b_set{op=bs_start_match,args=[Src]}=I0 | Rest],
+ EntryBlock, Blocks0, Acc, State0) ->
+ case aca_is_reuse_safe(Src, State0) of
+ true ->
+ {I, Last, Blocks1, State} =
+ aca_reuse_context(I0, EntryBlock, Blocks0, State0),
+
+ Is = reverse([I|Acc]) ++ Rest,
+ Blocks = maps:put(0, EntryBlock#b_blk{is=Is,last=Last}, Blocks1),
+
+ {Blocks, State};
+ false ->
+ {Blocks0, State0}
+ end;
+aca_enable_reuse([I | Is], EntryBlock, Blocks, Acc, State0) ->
+ UnusedParams0 = State0#aca.unused_parameters,
+ case ordsets:intersection(UnusedParams0, beam_ssa:used(I)) of
+ [] ->
+ aca_enable_reuse(Is, EntryBlock, Blocks, [I | Acc], State0);
+ PrematureUses ->
+ UnusedParams = ordsets:subtract(UnusedParams0, PrematureUses),
+
+ %% Mark the offending parameters as unsuitable for context reuse.
+ ParamInfo = foldl(fun(A, Ps) ->
+ maps:put(A, {used_before_match, I}, Ps)
+ end, State0#aca.parameter_info, PrematureUses),
+
+ State = State0#aca{ unused_parameters = UnusedParams,
+ parameter_info = ParamInfo },
+ aca_enable_reuse(Is, EntryBlock, Blocks, [I | Acc], State)
+ end;
+aca_enable_reuse([], _EntryBlock, Blocks, _Acc, State) ->
+ {Blocks, State}.
+
+aca_is_reuse_safe(Src, State) ->
+ %% Context reuse is unsafe unless all uses are dominated by the start_match
+ %% instruction. Since we only process block 0 it's enough to check if
+ %% they're unused so far.
+ ordsets:is_element(Src, State#aca.unused_parameters).
+
+aca_reuse_context(#b_set{dst=Dst, args=[Src]}=I0, Block, Blocks0, State0) ->
+ %% When matching fails on a reused context it needs to be converted back
+ %% to a binary. We only need to do this on the success path since it can't
+ %% be a context on the type failure path, but it's very common for these
+ %% to converge which requires special handling.
+ {State1, Last, Blocks} =
+ aca_handle_convergence(Src, State0, Block#b_blk.last, Blocks0),
+
+ Aliases = maps:put(Src, {Last#b_br.succ, Dst}, State1#aca.match_aliases),
+ ParamInfo = maps:put(Src, suitable_for_reuse, State1#aca.parameter_info),
+
+ State = State1#aca{ match_aliases = Aliases,
+ parameter_info = ParamInfo },
+
+ I = beam_ssa:add_anno(accepts_match_contexts, true, I0),
+
+ {I, Last, Blocks, State}.
+
+aca_handle_convergence(Src, State0, Last0, Blocks0) ->
+ #b_br{fail=Fail0,succ=Succ0} = Last0,
+
+ SuccPath = beam_ssa:rpo([Succ0], Blocks0),
+ FailPath = beam_ssa:rpo([Fail0], Blocks0),
+
+ %% The promotion logic in alias_matched_binaries breaks down if the source
+ %% is used after the fail/success paths converge, as we have no way to tell
+ %% whether the source is a match context or something else past that point.
+ %%
+ %% We could handle this through clever insertion of phi nodes but it's
+ %% far simpler to copy either branch in its entirety. It doesn't matter
+ %% which one as long as they become disjoint.
+ ConvergedPaths = ordsets:intersection(
+ ordsets:from_list(SuccPath),
+ ordsets:from_list(FailPath)),
+
+ case maps:is_key(Src, beam_ssa:uses(ConvergedPaths, Blocks0)) of
+ true ->
+ case shortest(SuccPath, FailPath) of
+ left ->
+ {Succ, Blocks, Counter} =
+ aca_copy_successors(Succ0, Blocks0, State0#aca.counter),
+ State = State0#aca{ counter = Counter },
+ {State, Last0#b_br{succ=Succ}, Blocks};
+ right ->
+ {Fail, Blocks, Counter} =
+ aca_copy_successors(Fail0, Blocks0, State0#aca.counter),
+ State = State0#aca{ counter = Counter },
+ {State, Last0#b_br{fail=Fail}, Blocks}
+ end;
+ false ->
+ {State0, Last0, Blocks0}
+ end.
+
+shortest([_|As], [_|Bs]) -> shortest(As, Bs);
+shortest([], _) -> left;
+shortest(_, []) -> right.
+
+%% Copies all successor blocks of Lbl, returning the label to the entry block
+%% of this copy. Since the copied blocks aren't referenced anywhere else, they
+%% are all guaranteed to be dominated by Lbl.
+aca_copy_successors(Lbl0, Blocks0, Counter0) ->
+ %% Building the block rename map up front greatly simplifies phi node
+ %% handling.
+ Path = beam_ssa:rpo([Lbl0], Blocks0),
+ {BRs, Counter1} = aca_cs_build_brs(Path, Counter0, #{}),
+ {Blocks, Counter} = aca_cs_1(Path, Blocks0, Counter1, #{}, BRs, #{}),
+ Lbl = maps:get(Lbl0, BRs),
+ {Lbl, Blocks, Counter}.
+
+aca_cs_build_brs([Lbl | Path], Counter0, Acc) ->
+ aca_cs_build_brs(Path, Counter0 + 1, maps:put(Lbl, Counter0, Acc));
+aca_cs_build_brs([], Counter, Acc) ->
+ {Acc, Counter}.
+
+aca_cs_1([Lbl0 | Path], Blocks, Counter0, VRs0, BRs, Acc0) ->
+ Block0 = maps:get(Lbl0, Blocks),
+ Lbl = maps:get(Lbl0, BRs),
+ {VRs, Block, Counter} = aca_cs_block(Block0, Counter0, VRs0, BRs),
+ Acc = maps:put(Lbl, Block, Acc0),
+ aca_cs_1(Path, Blocks, Counter, VRs, BRs, Acc);
+aca_cs_1([], Blocks, Counter, _VRs, _BRs, Acc) ->
+ {maps:merge(Blocks, Acc), Counter}.
+
+aca_cs_block(#b_blk{is=Is0,last=Last0}=Block0, Counter0, VRs0, BRs) ->
+ {VRs, Is, Counter} = aca_cs_is(Is0, Counter0, VRs0, BRs, []),
+ Last = aca_cs_last(Last0, VRs, BRs),
+ Block = Block0#b_blk{is=Is,last=Last},
+ {VRs, Block, Counter}.
+
+aca_cs_is([#b_set{op=Op,
+ dst=Dst0,
+ args=Args0}=I0 | Is],
+ Counter0, VRs0, BRs, Acc) ->
+ Args = case Op of
+ phi -> aca_cs_args_phi(Args0, VRs0, BRs);
+ _ -> aca_cs_args(Args0, VRs0)
+ end,
+ Counter = Counter0 + 1,
+ Dst = #b_var{name={'@ssa_bsm_aca',Counter}},
+ I = I0#b_set{dst=Dst,args=Args},
+ VRs = maps:put(Dst0, Dst, VRs0),
+ aca_cs_is(Is, Counter, VRs, BRs, [I | Acc]);
+aca_cs_is([], Counter, VRs, _BRs, Acc) ->
+ {VRs, reverse(Acc), Counter}.
+
+aca_cs_last(#b_switch{arg=Arg0,list=Switch0,fail=Fail0}=Sw, VRs, BRs) ->
+ Switch = [{Literal, maps:get(Lbl, BRs)} || {Literal, Lbl} <- Switch0],
+ Sw#b_switch{arg=aca_cs_arg(Arg0, VRs),
+ fail=maps:get(Fail0, BRs),
+ list=Switch};
+aca_cs_last(#b_br{bool=Arg0,succ=Succ0,fail=Fail0}=Br, VRs, BRs) ->
+ Br#b_br{bool=aca_cs_arg(Arg0, VRs),
+ succ=maps:get(Succ0, BRs),
+ fail=maps:get(Fail0, BRs)};
+aca_cs_last(#b_ret{arg=Arg0}=Ret, VRs, _BRs) ->
+ Ret#b_ret{arg=aca_cs_arg(Arg0, VRs)}.
+
+aca_cs_args_phi([{Arg, Lbl} | Args], VRs, BRs) ->
+ case BRs of
+ #{ Lbl := New } ->
+ [{aca_cs_arg(Arg, VRs), New} | aca_cs_args_phi(Args, VRs, BRs)];
+ #{} ->
+ aca_cs_args_phi(Args, VRs, BRs)
+ end;
+aca_cs_args_phi([], _VRs, _BRs) ->
+ [].
+
+aca_cs_args([Arg | Args], VRs) ->
+ [aca_cs_arg(Arg, VRs) | aca_cs_args(Args, VRs)];
+aca_cs_args([], _VRs) ->
+ [].
+
+aca_cs_arg(#b_remote{mod=Mod0,name=Name0}=Rem, VRs) ->
+ Mod = aca_cs_arg(Mod0, VRs),
+ Name = aca_cs_arg(Name0, VRs),
+ Rem#b_remote{mod=Mod,name=Name};
+aca_cs_arg(Arg, VRs) ->
+ case VRs of
+ #{ Arg := New } -> New;
+ #{} -> Arg
+ end.
+
+%% Allows contexts to pass through "wrapper functions" where the context is
+%% passed directly to a function that accepts match contexts (including other
+%% wrappers).
+%%
+%% This does not alter the function in any way, it only changes parameter info
+%% so that skip_outgoing_tail_extraction is aware that it's safe to pass
+%% contexts to us.
+
+allow_context_passthrough({Fs, ModInfo0}) ->
+ ModInfo =
+ acp_forward_params([{F, beam_ssa:uses(F#b_function.bs)} || F <- Fs],
+ ModInfo0),
+ {Fs, ModInfo}.
+
+acp_forward_params(FsUses, ModInfo0) ->
+ F = fun({#b_function{args=Parameters}=Func, UseMap}, ModInfo) ->
+ ParamInfo =
+ foldl(fun(Param, ParamInfo) ->
+ Uses = maps:get(Param, UseMap, []),
+ acp_1(Param, Uses, ModInfo, ParamInfo)
+ end,
+ funcinfo_get(Func, parameter_info, ModInfo),
+ Parameters),
+ funcinfo_set(Func, parameter_info, ParamInfo, ModInfo)
+ end,
+ %% Allowing context passthrough on one function may make it possible to
+ %% enable it on another, so it needs to be repeated for maximum effect.
+ case foldl(F, ModInfo0, FsUses) of
+ ModInfo0 -> ModInfo0;
+ Changed -> acp_forward_params(FsUses, Changed)
+ end.
+
+%% We have no way to know if an argument is a context, so it's only safe to
+%% forward them if they're passed exactly once in the first block. Any other
+%% uses are unsafe, including function_clause errors.
+acp_1(Param, [{0, #b_set{op=call}=I}], ModInfo, ParamInfo) ->
+ %% We don't need to provide a context chain as our callers make sure that
+ %% multiple arguments never reference the same context.
+ case check_context_call(I, Param, [], ModInfo) of
+ {no_match_on_entry, _} -> ParamInfo;
+ Other -> maps:put(Param, Other, ParamInfo)
+ end;
+acp_1(_Param, _Uses, _ModInfo, ParamInfo) ->
+ ParamInfo.
+
+%% This is conceptually similar to combine_matches but operates across
+%% functions. Whenever a tail binary is passed to a parameter that accepts
+%% match contexts we'll pass the context instead, improving performance by
+%% avoiding the creation of a new match context in the callee.
+%%
+%% We also create an alias to delay extraction until it's needed as an actual
+%% binary, which is often rare on the happy path. The cost of being wrong is
+%% negligible (`bs_test_unit + bs_get_tail` vs `bs_get_binary`) so we're
+%% applying it unconditionally to keep things simple.
+
+-record(sote, { definitions :: beam_ssa:definition_map(),
+ mod_info :: module_info(),
+ match_aliases = #{} :: match_alias_map() }).
+
+skip_outgoing_tail_extraction({Fs0, ModInfo}) ->
+ Fs = map(fun(F) -> skip_outgoing_tail_extraction(F, ModInfo) end, Fs0),
+ {Fs, ModInfo}.
+
+skip_outgoing_tail_extraction(#b_function{bs=Blocks0}=F, ModInfo) ->
+ case funcinfo_get(F, has_bsm_ops, ModInfo) of
+ true ->
+ State0 = #sote{ definitions = beam_ssa:definitions(Blocks0),
+ mod_info = ModInfo },
+
+ {Blocks1, State} = beam_ssa:mapfold_instrs_rpo(
+ fun sote_rewrite_calls/2, [0], State0, Blocks0),
+
+ {Blocks, Counter} = alias_matched_binaries(Blocks1,
+ F#b_function.cnt,
+ State#sote.match_aliases),
+
+ F#b_function{bs=Blocks,cnt=Counter};
+ false ->
+ F
+ end.
+
+sote_rewrite_calls(#b_set{op=call,args=Args}=Call, State) ->
+ sote_rewrite_call(Call, Args, [], State);
+sote_rewrite_calls(I, State) ->
+ {I, State}.
+
+sote_rewrite_call(Call, [], ArgsOut, State) ->
+ {Call#b_set{args=reverse(ArgsOut)}, State};
+sote_rewrite_call(Call0, [Arg | ArgsIn], ArgsOut, State0) ->
+ case is_tail_binary(Arg, State0#sote.definitions) of
+ true ->
+ CtxChain = context_chain_of(Arg, State0#sote.definitions),
+ case check_context_call(Call0, Arg, CtxChain, State0#sote.mod_info) of
+ suitable_for_reuse ->
+ Ctx = match_context_of(Arg, State0#sote.definitions),
+
+ MatchAliases0 = State0#sote.match_aliases,
+ MatchAliases = maps:put(Arg, {0, Ctx}, MatchAliases0),
+ State = State0#sote{ match_aliases = MatchAliases },
+
+ Call = beam_ssa:add_anno(bsm_info, context_reused, Call0),
+ sote_rewrite_call(Call, ArgsIn, [Ctx | ArgsOut], State);
+ Other ->
+ Call = beam_ssa:add_anno(bsm_info, Other, Call0),
+ sote_rewrite_call(Call, ArgsIn, [Arg | ArgsOut], State0)
+ end;
+ false ->
+ sote_rewrite_call(Call0, ArgsIn, [Arg | ArgsOut], State0)
+ end.
+
+%% Adds parameter_type_info annotations to help the validator determine whether
+%% our optimizations were safe.
+
+annotate_context_parameters({Fs, ModInfo}) ->
+ mapfoldl(fun annotate_context_parameters/2, ModInfo, Fs).
+
+annotate_context_parameters(F, ModInfo) ->
+ ParamInfo = funcinfo_get(F, parameter_info, ModInfo),
+ TypeAnno0 = beam_ssa:get_anno(parameter_type_info, F, #{}),
+ TypeAnno = maps:fold(fun(K, _V, Acc) when is_map_key(K, Acc) ->
+ %% Assertion.
+ error(conflicting_parameter_types);
+ (K, suitable_for_reuse, Acc) ->
+ T = beam_validator:type_anno(match_context),
+ Acc#{ K => T };
+ (_K, _V, Acc) ->
+ Acc
+ end, TypeAnno0, ParamInfo),
+ {beam_ssa:add_anno(parameter_type_info, TypeAnno, F), ModInfo}.
+
+%%%
+%%% +bin_opt_info
+%%%
+
+collect_opt_info(Fs) ->
+ foldl(fun(#b_function{bs=Blocks}=F, Acc0) ->
+ UseMap = beam_ssa:uses(Blocks),
+ Where = beam_ssa:get_anno(location, F, []),
+ beam_ssa:fold_instrs_rpo(
+ fun(I, Acc) ->
+ collect_opt_info_1(I, Where, UseMap, Acc)
+ end, [0], Acc0, Blocks)
+ end, [], Fs).
+
+collect_opt_info_1(#b_set{op=Op,anno=Anno,dst=Dst}=I, Where, UseMap, Acc0) ->
+ case is_tail_binary(I) of
+ true when Op =:= bs_match ->
+ %% The uses include when the context is passed raw, so we discard
+ %% everything but the bs_extract instruction to limit warnings to
+ %% unoptimized uses.
+ Uses0 = maps:get(Dst, UseMap, []),
+ case [E || {_, #b_set{op=bs_extract}=E} <- Uses0] of
+ [Use] -> add_unopt_binary_info(Use, false, Where, UseMap, Acc0);
+ [] -> Acc0
+ end;
+ true ->
+ %% Add a warning for each use. Note that we don't do anything
+ %% special if unused as a later pass will remove this instruction
+ %% anyway.
+ Uses = maps:get(Dst, UseMap, []),
+ foldl(fun({_Lbl, Use}, Acc) ->
+ add_unopt_binary_info(Use, false, Where, UseMap, Acc)
+ end, Acc0, Uses);
+ false ->
+ add_opt_info(Anno, Where, Acc0)
+ end;
+collect_opt_info_1(#b_ret{anno=Anno}, Where, _UseMap, Acc) ->
+ add_opt_info(Anno, Where, Acc);
+collect_opt_info_1(_I, _Where, _Uses, Acc) ->
+ Acc.
+
+add_opt_info(Anno, Where, Acc) ->
+ case maps:find(bsm_info, Anno) of
+ {ok, Term} -> [make_warning(Term, Anno, Where) | Acc];
+ error -> Acc
+ end.
+
+%% When an alias is promoted we need to figure out where it goes to ignore
+%% warnings for compiler-generated things, and provide more useful warnings in
+%% general.
+%%
+%% We track whether the binary has been used to build another term because it
+%% can be helpful when there's no line information.
+
+add_unopt_binary_info(#b_set{op=Follow,dst=Dst}, _Nested, Where, UseMap, Acc0)
+ when Follow =:= put_tuple;
+ Follow =:= put_list;
+ Follow =:= put_map ->
+ %% Term-building instructions.
+ {_, Uses} = unzip(maps:get(Dst, UseMap, [])),
+ foldl(fun(Use, Acc) ->
+ add_unopt_binary_info(Use, true, Where, UseMap, Acc)
+ end, Acc0, Uses);
+add_unopt_binary_info(#b_set{op=Follow,dst=Dst}, Nested, Where, UseMap, Acc0)
+ when Follow =:= bs_extract;
+ Follow =:= phi ->
+ %% Non-building instructions that need to be followed.
+ {_, Uses} = unzip(maps:get(Dst, UseMap, [])),
+ foldl(fun(Use, Acc) ->
+ add_unopt_binary_info(Use, Nested, Where, UseMap, Acc)
+ end, Acc0, Uses);
+add_unopt_binary_info(#b_set{op=call,
+ args=[#b_remote{mod=#b_literal{val=erlang},
+ name=#b_literal{val=error}} |
+ _Ignored]},
+ _Nested, _Where, _UseMap, Acc) ->
+ %% There's no nice way to tell compiler-generated exceptions apart from
+ %% user ones so we ignore them all. I doubt anyone cares.
+ Acc;
+add_unopt_binary_info(#b_switch{anno=Anno}=I, Nested, Where, _UseMap, Acc) ->
+ [make_promotion_warning(I, Nested, Anno, Where) | Acc];
+add_unopt_binary_info(#b_set{anno=Anno}=I, Nested, Where, _UseMap, Acc) ->
+ [make_promotion_warning(I, Nested, Anno, Where) | Acc];
+add_unopt_binary_info(#b_ret{anno=Anno}=I, Nested, Where, _UseMap, Acc) ->
+ [make_promotion_warning(I, Nested, Anno, Where) | Acc];
+add_unopt_binary_info(#b_br{anno=Anno}=I, Nested, Where, _UseMap, Acc) ->
+ [make_promotion_warning(I, Nested, Anno, Where) | Acc].
+
+make_promotion_warning(I, Nested, Anno, Where) ->
+ make_warning({binary_created, I, Nested}, Anno, Where).
+
+make_warning(Term, Anno, Where) ->
+ {File, Line} = maps:get(location, Anno, Where),
+ {File,[{Line,?MODULE,Term}]}.
+
+format_opt_info(context_reused) ->
+ "OPTIMIZED: match context reused";
+format_opt_info({binary_created, _, _}=Promotion) ->
+ io_lib:format("BINARY CREATED: ~s", [format_opt_info_1(Promotion)]);
+format_opt_info(Other) ->
+ io_lib:format("NOT OPTIMIZED: ~s", [format_opt_info_1(Other)]).
+
+format_opt_info_1({binary_created, #b_set{op=call,args=[Call|_]}, false}) ->
+ io_lib:format("binary is used in call to ~s which doesn't support "
+ "context reuse", [format_call(Call)]);
+format_opt_info_1({binary_created, #b_set{op=call,args=[Call|_]}, true}) ->
+ io_lib:format("binary is used in term passed to ~s",
+ [format_call(Call)]);
+format_opt_info_1({binary_created, #b_set{op={bif, BIF},args=Args}, false}) ->
+ io_lib:format("binary is used in ~p/~p which doesn't support context "
+ "reuse", [BIF, length(Args)]);
+format_opt_info_1({binary_created, #b_set{op={bif, BIF},args=Args}, true}) ->
+ io_lib:format("binary is used in term passed to ~p/~p",
+ [BIF, length(Args)]);
+format_opt_info_1({binary_created, #b_set{op=Op}, false}) ->
+ io_lib:format("binary is used in '~p' which doesn't support context "
+ "reuse", [Op]);
+format_opt_info_1({binary_created, #b_set{op=Op}, true}) ->
+ io_lib:format("binary is used in term passed to '~p'", [Op]);
+format_opt_info_1({binary_created, #b_ret{}, false}) ->
+ io_lib:format("binary is returned from the function", []);
+format_opt_info_1({binary_created, #b_ret{}, true}) ->
+ io_lib:format("binary is used in a term that is returned from the "
+ "function", []);
+format_opt_info_1({unsuitable_call, {Call, Inner}}) ->
+ io_lib:format("binary used in call to ~s, where ~s",
+ [format_call(Call), format_opt_info_1(Inner)]);
+format_opt_info_1({remote_call, Call}) ->
+ io_lib:format("binary is used in remote call to ~s", [format_call(Call)]);
+format_opt_info_1({fun_call, Call}) ->
+ io_lib:format("binary is used in fun call (~s)",
+ [format_call(Call)]);
+format_opt_info_1({multiple_uses_in_call, Call}) ->
+ io_lib:format("binary is passed as multiple arguments to ~s",
+ [format_call(Call)]);
+format_opt_info_1({no_match_on_entry, Call}) ->
+ io_lib:format("binary is used in call to ~s which does not begin with a "
+ "suitable binary match", [format_call(Call)]);
+format_opt_info_1({used_before_match, #b_set{op=call,args=[Call|_]}}) ->
+ io_lib:format("binary is used in call to ~s before being matched",
+ [format_call(Call)]);
+format_opt_info_1({used_before_match, #b_set{op={bif, BIF},args=Args}}) ->
+ io_lib:format("binary is used in ~p/~p before being matched",
+ [BIF, length(Args)]);
+format_opt_info_1({used_before_match, #b_set{op=phi}}) ->
+ io_lib:format("binary is returned from an expression before being "
+ "matched", []);
+format_opt_info_1({used_before_match, #b_set{op=Op}}) ->
+ io_lib:format("binary is used in '~p' before being matched",[Op]);
+format_opt_info_1(Term) ->
+ io_lib:format("~w", [Term]).
+
+format_call(#b_local{name=#b_literal{val=F},arity=A}) ->
+ io_lib:format("~p/~p", [F, A]);
+format_call(#b_remote{mod=#b_literal{val=M},name=#b_literal{val=F},arity=A}) ->
+ io_lib:format("~p:~p/~p", [M, F, A]);
+format_call(Fun) ->
+ io_lib:format("~p", [Fun]).
diff --git a/lib/compiler/src/beam_ssa_codegen.erl b/lib/compiler/src/beam_ssa_codegen.erl
index 357352269c..c2d5035b19 100644
--- a/lib/compiler/src/beam_ssa_codegen.erl
+++ b/lib/compiler/src/beam_ssa_codegen.erl
@@ -108,7 +108,8 @@ module(#b_module{name=Mod,exports=Es,attributes=Attrs,body=Fs}, _Opts) ->
-type ssa_register() :: xreg() | yreg() | {'fr',reg_num()} | {'z',reg_num()}.
functions(Forms, AtomMod) ->
- mapfoldl(fun (F, St) -> function(F, AtomMod, St) end, #cg{lcount=1}, Forms).
+ mapfoldl(fun (F, St) -> function(F, AtomMod, St) end,
+ #cg{lcount=1}, Forms).
function(#b_function{anno=Anno,bs=Blocks}, AtomMod, St0) ->
#{func_info:={_,Name,Arity}} = Anno,
@@ -125,8 +126,9 @@ function(#b_function{anno=Anno,bs=Blocks}, AtomMod, St0) ->
ultimate_fail=Ult},
{Body,St} = cg_fun(Blocks, St5),
Asm = [{label,Fi},line(Anno),
- {func_info,AtomMod,{atom,Name},Arity}] ++ Body ++
- [{label,Ult},if_end],
+ {func_info,AtomMod,{atom,Name},Arity}] ++
+ add_parameter_annos(Body, Anno) ++
+ [{label,Ult},if_end],
Func = {function,Name,Arity,Entry,Asm},
{Func,St}
catch
@@ -150,6 +152,17 @@ assert_badarg_block(Blocks) ->
ok
end.
+add_parameter_annos([{label, _}=Entry | Body], Anno) ->
+ ParamInfo = maps:get(parameter_type_info, Anno, #{}),
+ Annos = maps:fold(
+ fun(K, V, Acc) when is_map_key(K, ParamInfo) ->
+ TypeInfo = maps:get(K, ParamInfo),
+ [{'%', {type_info, V, TypeInfo}} | Acc];
+ (_K, _V, Acc) ->
+ Acc
+ end, [], maps:get(registers, Anno)),
+ [Entry | sort(Annos)] ++ Body.
+
cg_fun(Blocks, St0) ->
Linear0 = linearize(Blocks),
St = collect_catch_labels(Linear0, St0),
@@ -218,7 +231,7 @@ need_heap_never(_) -> false.
need_heap_blks([{L,#cg_blk{is=Is0}=Blk0}|Bs], H0, Acc) ->
{Is1,H1} = need_heap_is(reverse(Is0), H0, []),
- {Ns,H} = need_heap_terminator(Bs, H1),
+ {Ns,H} = need_heap_terminator(Bs, L, H1),
Is = Ns ++ Is1,
Blk = Blk0#cg_blk{is=Is},
need_heap_blks(Bs, H, [{L,Blk}|Acc]);
@@ -228,6 +241,13 @@ need_heap_blks([], H, Acc) ->
need_heap_is([#cg_alloc{words=Words}=Alloc0|Is], N, Acc) ->
Alloc = Alloc0#cg_alloc{words=add_heap_words(N, Words)},
need_heap_is(Is, #need{}, [Alloc|Acc]);
+need_heap_is([#cg_set{anno=Anno,op=bs_init}=I0|Is], N, Acc) ->
+ Alloc = case need_heap_need(N) of
+ [#cg_alloc{words=Need}] -> alloc(Need);
+ [] -> 0
+ end,
+ I = I0#cg_set{anno=Anno#{alloc=>Alloc}},
+ need_heap_is(Is, #need{}, [I|Acc]);
need_heap_is([#cg_set{op=Op,args=Args}=I|Is], N, Acc) ->
case classify_heap_need(Op, Args) of
{put,Words} ->
@@ -243,11 +263,31 @@ need_heap_is([#cg_set{op=Op,args=Args}=I|Is], N, Acc) ->
need_heap_is([], N, Acc) ->
{Acc,N}.
-need_heap_terminator([{_,#cg_blk{last=#cg_br{succ=Same,fail=Same}}}|_], N) ->
+need_heap_terminator([{_,#cg_blk{last=#cg_br{succ=L,fail=L}}}|_], L, N) ->
+ %% Fallthrough.
{[],N};
-need_heap_terminator([{_,#cg_blk{}}|_], N) ->
+need_heap_terminator([{_,#cg_blk{is=Is,last=#cg_br{succ=L}}}|_], L, N) ->
+ case need_heap_need(N) of
+ [] ->
+ {[],#need{}};
+ [_|_]=Alloc ->
+ %% If the preceding instructions are a binary construction,
+ %% hoist the allocation and incorporate into the bs_init
+ %% instruction.
+ case reverse(Is) of
+ [#cg_set{op=succeeded},#cg_set{op=bs_init}|_] ->
+ {[],N};
+ [#cg_set{op=bs_put}|_] ->
+ {[],N};
+ _ ->
+ %% Not binary construction. Must emit an allocation
+ %% instruction in this block.
+ {Alloc,#need{}}
+ end
+ end;
+need_heap_terminator([{_,#cg_blk{}}|_], _, N) ->
{need_heap_need(N),#need{}};
-need_heap_terminator([], H) ->
+need_heap_terminator([], _, H) ->
{need_heap_need(H),#need{}}.
need_heap_need(#need{h=0,f=0}) -> [];
@@ -286,6 +326,8 @@ classify_heap_need(put_list, _) ->
{put,2};
classify_heap_need(put_tuple_arity, [#b_literal{val=Words}]) ->
{put,Words+1};
+classify_heap_need(put_tuple, Elements) ->
+ {put,length(Elements)+1};
classify_heap_need({bif,Name}, Args) ->
case is_gc_bif(Name, Args) of
false -> neutral;
@@ -313,12 +355,15 @@ classify_heap_need(Name, _Args) ->
classify_heap_need(bs_add) -> gc;
classify_heap_need(bs_get) -> gc;
+classify_heap_need(bs_get_tail) -> gc;
classify_heap_need(bs_init) -> gc;
classify_heap_need(bs_init_writable) -> gc;
classify_heap_need(bs_match_string) -> gc;
classify_heap_need(bs_put) -> neutral;
classify_heap_need(bs_restore) -> neutral;
classify_heap_need(bs_save) -> neutral;
+classify_heap_need(bs_get_position) -> gc;
+classify_heap_need(bs_set_position) -> neutral;
classify_heap_need(bs_skip) -> gc;
classify_heap_need(bs_start_match) -> neutral;
classify_heap_need(bs_test_tail) -> neutral;
@@ -327,7 +372,6 @@ classify_heap_need(bs_utf8_size) -> neutral;
classify_heap_need(build_stacktrace) -> gc;
classify_heap_need(call) -> gc;
classify_heap_need(catch_end) -> gc;
-classify_heap_need(context_to_binary) -> gc;
classify_heap_need(copy) -> neutral;
classify_heap_need(extract) -> gc;
classify_heap_need(get_hd) -> neutral;
@@ -590,8 +634,7 @@ liveness_successors(Terminator) ->
liveness_is([#cg_alloc{}=I0|Is], Regs, Live, Acc) ->
I = I0#cg_alloc{live=num_live(Live, Regs)},
liveness_is(Is, Regs, Live, [I|Acc]);
-liveness_is([#cg_set{dst=Dst0,args=Args}=I0|Is], Regs, Live0, Acc) ->
- #b_var{name=Dst} = Dst0,
+liveness_is([#cg_set{dst=Dst,args=Args}=I0|Is], Regs, Live0, Acc) ->
Live1 = liveness_clobber(I0, Live0, Regs),
I1 = liveness_yregs_anno(I0, Live1, Regs),
Live2 = liveness_args(Args, Live1),
@@ -608,7 +651,7 @@ liveness_terminator(#cg_switch{arg=Arg}, Live) ->
liveness_terminator(#cg_ret{arg=Arg}, Live) ->
liveness_terminator_1(Arg, Live).
-liveness_terminator_1(#b_var{name=V}, Live) ->
+liveness_terminator_1(#b_var{}=V, Live) ->
ordsets:add_element(V, Live);
liveness_terminator_1(#b_literal{}, Live) ->
Live;
@@ -616,7 +659,7 @@ liveness_terminator_1(Reg, Live) ->
_ = verify_beam_register(Reg),
ordsets:add_element(Reg, Live).
-liveness_args([#b_var{name=V}|As], Live) ->
+liveness_args([#b_var{}=V|As], Live) ->
liveness_args(As, ordsets:add_element(V, Live));
liveness_args([#b_remote{mod=Mod,name=Name}|As], Live) ->
liveness_args([Mod,Name|As], Live);
@@ -639,7 +682,7 @@ liveness_anno(#cg_set{op=Op}=I, Live, Regs) ->
I
end.
-liveness_yregs_anno(#cg_set{op=Op,dst=#b_var{name=Dst}}=I, Live0, Regs) ->
+liveness_yregs_anno(#cg_set{op=Op,dst=Dst}=I, Live0, Regs) ->
case need_live_anno(Op) of
true ->
Live = ordsets:del_element(Dst, Live0),
@@ -694,6 +737,8 @@ need_live_anno(Op) ->
{bif,_} -> true;
bs_get -> true;
bs_init -> true;
+ bs_get_position -> true;
+ bs_get_tail -> true;
bs_start_match -> true;
bs_skip -> true;
call -> true;
@@ -702,7 +747,16 @@ need_live_anno(Op) ->
end.
%%%
-%%% Add annotations for defined Y registers.
+%%% Add the following annotations for Y registers:
+%%%
+%%% def_yregs An ordset with variables that refer to live Y registers.
+%%% That is, Y registers that that have been killed
+%%% are not included. This annotation is added to all
+%%% instructions that require Y registers to be initialized.
+%%%
+%%% kill_yregs This annotation is added to call instructions. It is
+%%% an ordset containing variables referring to Y registers
+%%% that will no longer be used after the call instruction.
%%%
defined(Linear, #cg{regs=Regs}) ->
@@ -726,13 +780,13 @@ def_get(L, DefMap) ->
def_is([#cg_alloc{anno=Anno0}=I0|Is], Regs, Def, Acc) ->
I = I0#cg_alloc{anno=Anno0#{def_yregs=>Def}},
def_is(Is, Regs, Def, [I|Acc]);
-def_is([#cg_set{op=kill_try_tag,args=[#b_var{name=Tag}]}=I|Is], Regs, Def0, Acc) ->
+def_is([#cg_set{op=kill_try_tag,args=[#b_var{}=Tag]}=I|Is], Regs, Def0, Acc) ->
Def = ordsets:del_element(Tag, Def0),
def_is(Is, Regs, Def, [I|Acc]);
-def_is([#cg_set{op=catch_end,args=[#b_var{name=Tag}|_]}=I|Is], Regs, Def0, Acc) ->
+def_is([#cg_set{op=catch_end,args=[#b_var{}=Tag|_]}=I|Is], Regs, Def0, Acc) ->
Def = ordsets:del_element(Tag, Def0),
def_is(Is, Regs, Def, [I|Acc]);
-def_is([#cg_set{anno=Anno0,op=call,dst=#b_var{name=Dst}}=I0|Is],
+def_is([#cg_set{anno=Anno0,op=call,dst=Dst}=I0|Is],
Regs, Def0, Acc) ->
#{live_yregs:=LiveYregVars} = Anno0,
LiveRegs = gb_sets:from_list([maps:get(V, Regs) || V <- LiveYregVars]),
@@ -747,7 +801,7 @@ def_is([#cg_set{anno=Anno0,op=call,dst=#b_var{name=Dst}}=I0|Is],
Def1 = ordsets:subtract(Def0, Kill),
Def = def_add_yreg(Dst, Def1, Regs),
def_is(Is, Regs, Def, [I|Acc]);
-def_is([#cg_set{anno=Anno0,op={bif,Bif},dst=#b_var{name=Dst},args=Args}=I0|Is],
+def_is([#cg_set{anno=Anno0,op={bif,Bif},dst=Dst,args=Args}=I0|Is],
Regs, Def0, Acc) ->
Arity = length(Args),
I = case is_gc_bif(Bif, Args) orelse not erl_bifs:is_safe(erlang, Bif, Arity) of
@@ -758,7 +812,7 @@ def_is([#cg_set{anno=Anno0,op={bif,Bif},dst=#b_var{name=Dst},args=Args}=I0|Is],
end,
Def = def_add_yreg(Dst, Def0, Regs),
def_is(Is, Regs, Def, [I|Acc]);
-def_is([#cg_set{anno=Anno0,dst=#b_var{name=Dst}}=I0|Is], Regs, Def0, Acc) ->
+def_is([#cg_set{anno=Anno0,dst=Dst}=I0|Is], Regs, Def0, Acc) ->
I = case need_y_init(I0) of
true ->
I0#cg_set{anno=Anno0#{def_yregs=>Def0}};
@@ -793,6 +847,8 @@ def_successors([], _, DefMap) -> DefMap.
need_y_init(#cg_set{anno=#{clobbers:=Clobbers}}) -> Clobbers;
need_y_init(#cg_set{op=bs_get}) -> true;
+need_y_init(#cg_set{op=bs_get_position}) -> true;
+need_y_init(#cg_set{op=bs_get_tail}) -> true;
need_y_init(#cg_set{op=bs_init}) -> true;
need_y_init(#cg_set{op=bs_skip,args=[#b_literal{val=Type}|_]}) ->
case Type of
@@ -816,13 +872,35 @@ opt_allocate(Linear, #cg{regs=Regs}) ->
opt_allocate_1([{L,#cg_blk{is=[#cg_alloc{stack=Stk}=I0|Is]}=Blk0}|Bs]=Bs0, Regs)
when is_integer(Stk) ->
- Yregs = opt_alloc_def(Bs0, gb_sets:singleton(L), []),
- I = I0#cg_alloc{def_yregs=Yregs},
- [{L,Blk0#cg_blk{is=[I|Is]}}|opt_allocate_1(Bs, Regs)];
+ %% Collect the variables that are initialized by copy
+ %% instruction in this block.
+ case ordsets:from_list(opt_allocate_defs(Is, Regs)) of
+ Yregs when length(Yregs) =:= Stk ->
+ %% Those copy instructions are sufficient to fully
+ %% initialize the stack frame.
+ I = I0#cg_alloc{def_yregs=Yregs},
+ [{L,Blk0#cg_blk{is=[I|Is]}}|opt_allocate_1(Bs, Regs)];
+ Yregs0 ->
+ %% Determine a conservative approximation of the Y
+ %% registers that are guaranteed to be initialized by all
+ %% successors of this block, and to it add the variables
+ %% initialized by copy instructions in this block.
+ Yregs1 = opt_alloc_def(Bs0, gb_sets:singleton(L), []),
+ Yregs = ordsets:union(Yregs0, Yregs1),
+ I = I0#cg_alloc{def_yregs=Yregs},
+ [{L,Blk0#cg_blk{is=[I|Is]}}|opt_allocate_1(Bs, Regs)]
+ end;
opt_allocate_1([B|Bs], Regs) ->
[B|opt_allocate_1(Bs, Regs)];
opt_allocate_1([], _) -> [].
+opt_allocate_defs([#cg_set{op=copy,dst=Dst}|Is], Regs) ->
+ case is_yreg(Dst, Regs) of
+ true -> [Dst|opt_allocate_defs(Is, Regs)];
+ false -> []
+ end;
+opt_allocate_defs(_, _Regs) -> [].
+
opt_alloc_def([{L,#cg_blk{is=Is,last=Last}}|Bs], Ws0, Def0) ->
case gb_sets:is_member(L, Ws0) of
false ->
@@ -993,8 +1071,8 @@ cg_block([#cg_set{op={bif,Name},dst=Dst0,args=Args0}]=Is0, {Dst0,Fail}, St0) ->
{z,_} ->
%% The result of the BIF call will only be used once. Convert to
%% a test instruction.
- Test = bif_to_test(Name, Args, ensure_label(Fail, St0)),
- {Test,St0};
+ {Test,St1} = bif_to_test(Name, Args, ensure_label(Fail, St0), St0),
+ {Test,St1};
_ ->
%% Must explicitly call the BIF since the result will be used
%% more than once.
@@ -1021,12 +1099,13 @@ cg_block([#cg_set{op=bs_init,dst=Dst0,args=Args0,anno=Anno}=I,
#cg_set{op=succeeded,dst=Bool}], {Bool,Fail0}, St) ->
Fail = bif_fail(Fail0),
Line = line(Anno),
+ Alloc = map_get(alloc, Anno),
[#b_literal{val=Kind}|Args1] = Args0,
case Kind of
new ->
[Dst,Size,{integer,Unit}] = beam_args([Dst0|Args1], St),
Live = get_live(I),
- {[Line|cg_bs_init(Dst, Size, Unit, Live, Fail)],St};
+ {[Line|cg_bs_init(Dst, Size, Alloc, Unit, Live, Fail)],St};
private_append ->
[Dst,Src,Bits,{integer,Unit}] = beam_args([Dst0|Args1], St),
Flags = {field_flags,[]},
@@ -1036,17 +1115,23 @@ cg_block([#cg_set{op=bs_init,dst=Dst0,args=Args0,anno=Anno}=I,
[Dst,Src,Bits,{integer,Unit}] = beam_args([Dst0|Args1], St),
Flags = {field_flags,[]},
Live = get_live(I),
- Is = [Line,{bs_append,Fail,Bits,0,Live,Unit,Src,Flags,Dst}],
+ Is = [Line,{bs_append,Fail,Bits,Alloc,Live,Unit,Src,Flags,Dst}],
{Is,St}
end;
cg_block([#cg_set{anno=Anno,op=bs_start_match,dst=Ctx0,args=[Bin0]}=I,
#cg_set{op=succeeded,dst=Bool}], {Bool,Fail}, St) ->
- #{num_slots:=Slots} = Anno,
[Dst,Bin1] = beam_args([Ctx0,Bin0], St),
{Bin,Pre} = force_reg(Bin1, Dst),
Live = get_live(I),
- Is = Pre ++ [{test,bs_start_match2,Fail,Live,[Bin,Slots],Dst}],
- {Is,St};
+ %% num_slots is only set when using the old instructions.
+ case maps:find(num_slots, Anno) of
+ {ok, Slots} ->
+ Is = Pre ++ [{test,bs_start_match2,Fail,Live,[Bin,Slots],Dst}],
+ {Is,St};
+ error ->
+ Is = Pre ++ [{test,bs_start_match3,Fail,Live,[Bin],Dst}],
+ {Is,St}
+ end;
cg_block([#cg_set{op=bs_get}=Set,
#cg_set{op=succeeded,dst=Bool}], {Bool,Fail}, St) ->
{cg_bs_get(Fail, Set, St),St};
@@ -1178,18 +1263,119 @@ cg_copy_1([#cg_set{dst=Dst0,args=Args}|T], St) ->
end;
cg_copy_1([], _St) -> [].
+-define(IS_LITERAL(Val), (Val =:= nil orelse
+ element(1, Val) =:= integer orelse
+ element(1, Val) =:= float orelse
+ element(1, Val) =:= atom orelse
+ element(1, Val) =:= literal)).
+
+bif_to_test('or', [V1,V2], {f,Lbl}=Fail, St0) when Lbl =/= 0 ->
+ {SuccLabel,St} = new_label(St0),
+ {[{test,is_eq_exact,{f,SuccLabel},[V1,{atom,false}]},
+ {test,is_eq_exact,Fail,[V2,{atom,true}]},
+ {label,SuccLabel}],St};
+bif_to_test(Op, Args, Fail, St) ->
+ {bif_to_test(Op, Args, Fail),St}.
+
+bif_to_test('and', [V1,V2], Fail) ->
+ [{test,is_eq_exact,Fail,[V1,{atom,true}]},
+ {test,is_eq_exact,Fail,[V2,{atom,true}]}];
bif_to_test('not', [Var], Fail) ->
[{test,is_eq_exact,Fail,[Var,{atom,false}]}];
bif_to_test(Name, Args, Fail) ->
- [beam_utils:bif_to_test(Name, Args, Fail)].
+ [bif_to_test_1(Name, Args, Fail)].
+
+bif_to_test_1(is_atom, [_]=Ops, Fail) ->
+ {test,is_atom,Fail,Ops};
+bif_to_test_1(is_boolean, [_]=Ops, Fail) ->
+ {test,is_boolean,Fail,Ops};
+bif_to_test_1(is_binary, [_]=Ops, Fail) ->
+ {test,is_binary,Fail,Ops};
+bif_to_test_1(is_bitstring,[_]=Ops, Fail) ->
+ {test,is_bitstr,Fail,Ops};
+bif_to_test_1(is_float, [_]=Ops, Fail) ->
+ {test,is_float,Fail,Ops};
+bif_to_test_1(is_function, [_]=Ops, Fail) ->
+ {test,is_function,Fail,Ops};
+bif_to_test_1(is_function, [_,_]=Ops, Fail) ->
+ {test,is_function2,Fail,Ops};
+bif_to_test_1(is_integer, [_]=Ops, Fail) ->
+ {test,is_integer,Fail,Ops};
+bif_to_test_1(is_list, [_]=Ops, Fail) ->
+ {test,is_list,Fail,Ops};
+bif_to_test_1(is_map, [_]=Ops, Fail) ->
+ {test,is_map,Fail,Ops};
+bif_to_test_1(is_number, [_]=Ops, Fail) ->
+ {test,is_number,Fail,Ops};
+bif_to_test_1(is_pid, [_]=Ops, Fail) ->
+ {test,is_pid,Fail,Ops};
+bif_to_test_1(is_port, [_]=Ops, Fail) ->
+ {test,is_port,Fail,Ops};
+bif_to_test_1(is_reference, [_]=Ops, Fail) ->
+ {test,is_reference,Fail,Ops};
+bif_to_test_1(is_tuple, [_]=Ops, Fail) ->
+ {test,is_tuple,Fail,Ops};
+bif_to_test_1('=<', [A,B], Fail) ->
+ {test,is_ge,Fail,[B,A]};
+bif_to_test_1('>', [A,B], Fail) ->
+ {test,is_lt,Fail,[B,A]};
+bif_to_test_1('<', [_,_]=Ops, Fail) ->
+ {test,is_lt,Fail,Ops};
+bif_to_test_1('>=', [_,_]=Ops, Fail) ->
+ {test,is_ge,Fail,Ops};
+bif_to_test_1('==', [C,A], Fail) when ?IS_LITERAL(C) ->
+ {test,is_eq,Fail,[A,C]};
+bif_to_test_1('==', [_,_]=Ops, Fail) ->
+ {test,is_eq,Fail,Ops};
+bif_to_test_1('/=', [C,A], Fail) when ?IS_LITERAL(C) ->
+ {test,is_ne,Fail,[A,C]};
+bif_to_test_1('/=', [_,_]=Ops, Fail) ->
+ {test,is_ne,Fail,Ops};
+bif_to_test_1('=:=', [C,A], Fail) when ?IS_LITERAL(C) ->
+ {test,is_eq_exact,Fail,[A,C]};
+bif_to_test_1('=:=', [_,_]=Ops, Fail) ->
+ {test,is_eq_exact,Fail,Ops};
+bif_to_test_1('=/=', [C,A], Fail) when ?IS_LITERAL(C) ->
+ {test,is_ne_exact,Fail,[A,C]};
+bif_to_test_1('=/=', [_,_]=Ops, Fail) ->
+ {test,is_ne_exact,Fail,Ops}.
opt_call_moves(Is0, Arity) ->
{Moves0,Is} = splitwith(fun({move,_,_}) -> true;
+ ({kill,_}) -> true;
(_) -> false
end, Is0),
Moves = opt_call_moves_1(Moves0, Arity),
Moves ++ Is.
+opt_call_moves_1([{move,Src,{x,_}=Tmp}=M1|[{kill,_}|_]=Is], Arity) ->
+ %% There could be a {move,Tmp,{x,0}} instruction after the
+ %% kill/1 instructions (moved to there by opt_move_to_x0/1).
+ case splitwith(fun({kill,_}) -> true;
+ (_) -> false
+ end, Is) of
+ {Kills,[{move,{x,_}=Tmp,{x,0}}=M2]} ->
+ %% The two move/2 instructions (M1 and M2) can be combined
+ %% to one. The question is, though, is it safe to place
+ %% them after the kill/1 instructions?
+ case is_killed(Src, Kills, Arity) of
+ true ->
+ %% Src (a Y register) is killed by one of the
+ %% kill/1 instructions. Thus M1 and M2
+ %% must be placed before the kill/1 instructions
+ %% (essentially undoing what opt_move_to_x0/1
+ %% did, which turned out to be a pessimization
+ %% in this case).
+ opt_call_moves_1([M1,M2|Kills], Arity);
+ false ->
+ %% Src is not killed by any of the kill/1
+ %% instructions. Thus it is safe to place
+ %% M1 and M2 after the kill/1 instructions.
+ opt_call_moves_1(Kills++[M1,M2], Arity)
+ end;
+ {_,_} ->
+ [M1|Is]
+ end;
opt_call_moves_1([{move,Src,{x,_}=Tmp}=M1,{move,Tmp,Dst}=M2|Is], Arity) ->
case is_killed(Tmp, Is, Arity) of
true ->
@@ -1203,6 +1389,10 @@ opt_call_moves_1([M|Ms], Arity) ->
[M|opt_call_moves_1(Ms, Arity)];
opt_call_moves_1([], _Arity) -> [].
+is_killed(Y, [{kill,Y}|_], _) ->
+ true;
+is_killed(R, [{kill,_}|Is], Arity) ->
+ is_killed(R, Is, Arity);
is_killed(R, [{move,R,_}|_], _) ->
false;
is_killed(R, [{move,_,R}|_], _) ->
@@ -1210,7 +1400,9 @@ is_killed(R, [{move,_,R}|_], _) ->
is_killed(R, [{move,_,_}|Is], Arity) ->
is_killed(R, Is, Arity);
is_killed({x,X}, [], Arity) ->
- X >= Arity.
+ X >= Arity;
+is_killed({y,_}, [], _) ->
+ false.
cg_alloc(#cg_alloc{stack=none,words=#need{h=0,f=0}}, _St) ->
[];
@@ -1257,22 +1449,35 @@ cg_call(#cg_set{anno=Anno,op=call,dst=Dst0,args=[#b_local{}=Func0|Args0]},
Line = call_line(Where, local, Anno),
Call = build_call(call, Arity, {f,FuncLbl}, Context, Dst),
Is = setup_args(Args, Anno, Context, St) ++ Line ++ Call,
- {Is,St};
-cg_call(#cg_set{anno=Anno,op=call,dst=Dst0,args=[#b_remote{}=Func0|Args0]},
+ case Anno of
+ #{ result_type := Info } ->
+ {Is ++ [{'%', {type_info, Dst, Info}}], St};
+ #{} ->
+ {Is, St}
+ end;
+cg_call(#cg_set{anno=Anno0,op=call,dst=Dst0,args=[#b_remote{}=Func0|Args0]},
Where, Context, St) ->
[Dst|Args] = beam_args([Dst0|Args0], St),
#b_remote{mod=Mod0,name=Name0,arity=Arity} = Func0,
case {beam_arg(Mod0, St),beam_arg(Name0, St)} of
{{atom,Mod},{atom,Name}} ->
Func = {extfunc,Mod,Name,Arity},
- Line = call_line(Where, Func, Anno),
+ Line = call_line(Where, Func, Anno0),
Call = build_call(call_ext, Arity, Func, Context, Dst),
+ Anno = case erl_bifs:is_exit_bif(Mod, Name, Arity) of
+ true ->
+ %% There is no need to kill Y registers
+ %% before calling an exit BIF.
+ maps:remove(kill_yregs, Anno0);
+ false ->
+ Anno0
+ end,
Is = setup_args(Args, Anno, Context, St) ++ Line ++ Call,
{Is,St};
{Mod,Name} ->
Apply = build_apply(Arity, Context, Dst),
- Is = setup_args(Args++[Mod,Name], Anno, Context, St) ++
- [line(Anno)] ++ Apply,
+ Is = setup_args(Args++[Mod,Name], Anno0, Context, St) ++
+ [line(Anno0)] ++ Apply,
{Is,St}
end;
cg_call(#cg_set{anno=Anno,op=call,dst=Dst0,args=Args0},
@@ -1325,6 +1530,12 @@ build_apply(Arity, none, Dst) ->
cg_instr(put_map, [{atom,assoc},SrcMap|Ss], Dst, Set) ->
Live = get_live(Set),
[{put_map_assoc,{f,0},SrcMap,Dst,Live,{list,Ss}}];
+cg_instr(bs_get_tail, [Src], Dst, Set) ->
+ Live = get_live(Set),
+ [{bs_get_tail,Src,Dst,Live}];
+cg_instr(bs_get_position, [Ctx], Dst, Set) ->
+ Live = get_live(Set),
+ [{bs_get_position,Ctx,Dst,Live}];
cg_instr(Op, Args, Dst, _Set) ->
cg_instr(Op, Args, Dst).
@@ -1340,10 +1551,10 @@ cg_instr(bs_restore, [Ctx,Slot], _Dst) ->
cg_instr(bs_save, [Ctx,Slot], _Dst) ->
{integer,N} = Slot,
[{bs_save2,Ctx,N}];
+cg_instr(bs_set_position, [Ctx,Pos], _Dst) ->
+ [{bs_set_position,Ctx,Pos}];
cg_instr(build_stacktrace, Args, Dst) ->
setup_args(Args) ++ [build_stacktrace|copy({x,0}, Dst)];
-cg_instr(context_to_binary, [Src], _Dst) ->
- [{bs_context_to_binary,Src}];
cg_instr(set_tuple_element=Op, [New,Tuple,{integer,Index}], _Dst) ->
[{Op,New,Tuple,Index}];
cg_instr({float,clearerror}, [], _Dst) ->
@@ -1360,6 +1571,8 @@ cg_instr(get_tuple_element=Op, [Src,{integer,N}], Dst) ->
[{Op,Src,N,Dst}];
cg_instr(put_list=Op, [Hd,Tl], Dst) ->
[{Op,Hd,Tl,Dst}];
+cg_instr(put_tuple, Elements, Dst) ->
+ [{put_tuple2,Dst,{list,Elements}}];
cg_instr(put_tuple_arity, [{integer,Arity}], Dst) ->
[{put_tuple,Arity,Dst}];
cg_instr(put_tuple_elements, Elements, _Dst) ->
@@ -1475,13 +1688,13 @@ cg_bs_put(Fail, [{atom,Type},{literal,Flags}|Args]) ->
[{Op,Fail,{field_flags,Flags},Src}]
end.
-cg_bs_init(Dst, Size0, Unit, Live, Fail) ->
+cg_bs_init(Dst, Size0, Alloc, Unit, Live, Fail) ->
Op = case Unit of
1 -> bs_init_bits;
8 -> bs_init2
end,
Size = cg_bs_init_size(Size0),
- [{Op,Fail,Size,0,Live,{field_flags,[]},Dst}].
+ [{Op,Fail,Size,Alloc,Live,{field_flags,[]},Dst}].
cg_bs_init_size({x,_}=R) -> R;
cg_bs_init_size({y,_}=R) -> R;
@@ -1517,6 +1730,14 @@ copy(Src, Dst) -> [{move,Src,Dst}].
force_reg({literal,_}=Lit, Reg) ->
{Reg,[{move,Lit,Reg}]};
+force_reg({integer,_}=Lit, Reg) ->
+ {Reg,[{move,Lit,Reg}]};
+force_reg({atom,_}=Lit, Reg) ->
+ {Reg,[{move,Lit,Reg}]};
+force_reg({float,_}=Lit, Reg) ->
+ {Reg,[{move,Lit,Reg}]};
+force_reg(nil=Lit, Reg) ->
+ {Reg,[{move,Lit,Reg}]};
force_reg({Kind,_}=R, _) when Kind =:= x; Kind =:= y ->
{R,[]}.
@@ -1600,12 +1821,41 @@ phi_copies([#b_set{dst=Dst,args=PhiArgs}|Sets], L) ->
[#cg_set{op=copy,dst=Dst,args=CopyArgs}|phi_copies(Sets, L)];
phi_copies([], _) -> [].
+%% opt_move_to_x0([Instruction]) -> [Instruction].
+%% Simple peep-hole optimization to move a {move,Any,{x,0}} past
+%% any kill up to the next call instruction. (To give the loader
+%% an opportunity to combine the 'move' and the 'call' instructions.)
+
+opt_move_to_x0(Moves) ->
+ opt_move_to_x0(Moves, []).
+
+opt_move_to_x0([{move,_,{x,0}}=I|Is0], Acc0) ->
+ case move_past_kill(Is0, I, Acc0) of
+ impossible -> opt_move_to_x0(Is0, [I|Acc0]);
+ {Is,Acc} -> opt_move_to_x0(Is, Acc)
+ end;
+opt_move_to_x0([I|Is], Acc) ->
+ opt_move_to_x0(Is, [I|Acc]);
+opt_move_to_x0([], Acc) -> reverse(Acc).
+
+move_past_kill([{kill,Src}|_], {move,Src,_}, _) ->
+ impossible;
+move_past_kill([{kill,_}=I|Is], Move, Acc) ->
+ move_past_kill(Is, Move, [I|Acc]);
+move_past_kill(Is, Move, Acc) ->
+ {Is,[Move|Acc]}.
+
%% setup_args(Args, Anno, Context) -> [Instruction].
%% setup_args(Args) -> [Instruction].
%% Set up X registers for a call.
setup_args(Args, Anno, none, St) ->
- setup_args(Args) ++ kill_yregs(Anno, St);
+ case {setup_args(Args),kill_yregs(Anno, St)} of
+ {Moves,[]} ->
+ Moves;
+ {Moves,Kills} ->
+ opt_move_to_x0(Moves ++ Kills)
+ end;
setup_args(Args, _, _, _) ->
setup_args(Args).
@@ -1694,7 +1944,7 @@ get_register(V, Regs) ->
beam_args(As, St) ->
[beam_arg(A, St) || A <- As].
-beam_arg(#b_var{name=Name}, #cg{regs=Regs}) ->
+beam_arg(#b_var{}=Name, #cg{regs=Regs}) ->
maps:get(Name, Regs);
beam_arg(#b_literal{val=Val}, _) ->
if
diff --git a/lib/compiler/src/beam_ssa_dead.erl b/lib/compiler/src/beam_ssa_dead.erl
new file mode 100644
index 0000000000..bb43a550ae
--- /dev/null
+++ b/lib/compiler/src/beam_ssa_dead.erl
@@ -0,0 +1,1076 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 2018. All Rights Reserved.
+%%
+%% Licensed under the Apache License, Version 2.0 (the "License");
+%% you may not use this file except in compliance with the License.
+%% You may obtain a copy of the License at
+%%
+%% http://www.apache.org/licenses/LICENSE-2.0
+%%
+%% Unless required by applicable law or agreed to in writing, software
+%% distributed under the License is distributed on an "AS IS" BASIS,
+%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+%% See the License for the specific language governing permissions and
+%% limitations under the License.
+%%
+%% %CopyrightEnd%
+%%
+%% Dead code is code that is executed but has no effect. This
+%% optimization pass either removes dead code or jumps around it,
+%% potentially making it unreachable so that it can be dropped
+%% the next time beam_ssa:linearize/1 is called.
+%%
+
+-module(beam_ssa_dead).
+-export([opt/1]).
+
+-include("beam_ssa.hrl").
+-import(lists, [append/1,keymember/3,last/1,member/2,
+ takewhile/2,reverse/1]).
+
+-type used_vars() :: #{beam_ssa:label():=ordsets:ordset(beam_ssa:var_name())}.
+
+-type basic_type_test() :: atom() | {'is_tagged_tuple',pos_integer(),atom()}.
+-type type_test() :: basic_type_test() | {'not',basic_type_test()}.
+-type op_name() :: atom().
+-type basic_rel_op() :: {op_name(),beam_ssa:b_var(),beam_ssa:value()} |
+ {basic_type_test(),beam_ssa:value()}.
+-type rel_op() :: {op_name(),beam_ssa:b_var(),beam_ssa:value()} |
+ {type_test(),beam_ssa:value()}.
+
+-record(st,
+ {bs :: beam_ssa:block_map(),
+ us :: used_vars(),
+ skippable :: #{beam_ssa:label():='true'},
+ rel_op=none :: 'none' | rel_op(),
+ target=any :: 'any' | 'one_way' | beam_ssa:label()
+ }).
+
+-spec opt([{Label0,Block0}]) -> [{Label,Block}] when
+ Label0 :: beam_ssa:label(),
+ Block0 :: beam_ssa:b_blk(),
+ Label :: beam_ssa:label(),
+ Block :: beam_ssa:b_blk().
+
+opt(Linear) ->
+ {Used,Skippable} = used_vars(Linear),
+ Blocks0 = maps:from_list(Linear),
+ St0 = #st{bs=Blocks0,us=Used,skippable=Skippable},
+ St = shortcut_opt(St0),
+ #st{bs=Blocks} = combine_eqs(St#st{us=#{}}),
+ beam_ssa:linearize(Blocks).
+
+%%%
+%%% Shortcut br/switch targets.
+%%%
+%%% A br/switch may branch to another br/switch that in turn always
+%%% branches to another target. Rewrite br/switch to refer to the
+%%% ultimate targets directly. That will save execution time, but
+%%% could also reduce the size of the code if some of the original
+%%% targets become unreachable and be deleted.
+%%%
+%%% When rewriting branches, we must be careful not to skip instructions
+%%% that have side effects or that bind variables that will be used
+%%% at the new target.
+%%%
+%%% We must also avoid branching to phi nodes. The reason is
+%%% twofold. First, we might create a critical edge which is strictly
+%%% forbidden. Second, there will be a branch from a block that is not
+%%% listed in the list of predecessors in the phi node. Those
+%%% limitations could probably be overcome, but it is not clear how
+%%% much that would improve the code.
+%%%
+
+shortcut_opt(#st{bs=Blocks}=St) ->
+ %% Processing the blocks in reverse post order seems to give more
+ %% opportunities for optimizations compared to post order. (Based on
+ %% running scripts/diffable with both PO and RPO and looking at
+ %% the diff.)
+ %%
+ %% Unfortunately, processing the blocks in reverse post order
+ %% potentially makes the time complexity quadratic or even cubic if
+ %% the ordset of unset variables grows large, instead of
+ %% linear for post order processing. We try to still get reasonable
+ %% compilation times by optimizations that will keep the constant
+ %% factor as low as possible, and we try to avoid the cubic time
+ %% complexity by trying to keep the set of unset variables as small
+ %% as possible.
+
+ Ls = beam_ssa:rpo(Blocks),
+ shortcut_opt(Ls, #{}, St).
+
+shortcut_opt([L|Ls], Bs, #st{bs=Blocks0}=St) ->
+ #b_blk{is=Is,last=Last0} = Blk0 = get_block(L, St),
+ case shortcut_terminator(Last0, Is, L, Bs, St) of
+ Last0 ->
+ %% No change. No need to update the block.
+ shortcut_opt(Ls, Bs, St);
+ Last ->
+ %% The terminator was simplified in some way.
+ %% Update the block.
+ Blk = Blk0#b_blk{last=Last},
+ Blocks = Blocks0#{L=>Blk},
+ shortcut_opt(Ls, Bs, St#st{bs=Blocks})
+ end;
+shortcut_opt([], _, St) -> St.
+
+shortcut_terminator(#b_br{bool=#b_literal{val=true},succ=Succ0},
+ _Is, From, Bs, St0) ->
+ St = St0#st{rel_op=none},
+ shortcut(Succ0, From, Bs, St);
+shortcut_terminator(#b_br{bool=#b_var{}=Bool,succ=Succ0,fail=Fail0}=Br,
+ Is, From, Bs, St0) ->
+ St = St0#st{target=one_way},
+ RelOp = get_rel_op(Bool, Is),
+ SuccBs = bind_var(Bool, #b_literal{val=true}, Bs),
+ BrSucc = shortcut(Succ0, From, SuccBs, St#st{rel_op=RelOp}),
+ FailBs = bind_var(Bool, #b_literal{val=false}, Bs),
+ BrFail = shortcut(Fail0, From, FailBs, St#st{rel_op=invert_op(RelOp)}),
+ case {BrSucc,BrFail} of
+ {#b_br{bool=#b_literal{val=true},succ=Succ},
+ #b_br{bool=#b_literal{val=true},succ=Fail}}
+ when Succ =/= Succ0; Fail =/= Fail0 ->
+ %% One or both of the targets were cut short.
+ beam_ssa:normalize(Br#b_br{succ=Succ,fail=Fail});
+ {_,_} ->
+ %% No change.
+ Br
+ end;
+shortcut_terminator(#b_switch{arg=Bool,list=List0}=Sw, _Is, From, Bs, St) ->
+ List = shortcut_switch(List0, Bool, From, Bs, St),
+ beam_ssa:normalize(Sw#b_switch{list=List});
+shortcut_terminator(Last, _Is, _Bs, _From, _St) ->
+ Last.
+
+shortcut_switch([{Lit,L0}|T], Bool, From, Bs, St0) ->
+ RelOp = {'=:=',Bool,Lit},
+ St = St0#st{rel_op=RelOp},
+ #b_br{bool=#b_literal{val=true},succ=L} =
+ shortcut(L0, From, bind_var(Bool, Lit, Bs), St#st{target=one_way}),
+ [{Lit,L}|shortcut_switch(T, Bool, From, Bs, St0)];
+shortcut_switch([], _, _, _, _) -> [].
+
+shortcut(L, From, Bs, St) ->
+ shortcut_1(L, From, Bs, ordsets:new(), St).
+
+shortcut_1(L, From, Bs0, UnsetVars0, St) ->
+ case shortcut_2(L, From, Bs0, UnsetVars0, St) of
+ none ->
+ %% No more shortcuts found. Package up the previous
+ %% label in an unconditional branch.
+ #b_br{bool=#b_literal{val=true},succ=L,fail=L};
+ {#b_br{bool=#b_var{}}=Br,_,_} ->
+ %% This is a two-way branch. We can't do any better.
+ Br;
+ {#b_br{bool=#b_literal{val=true},succ=Succ},Bs,UnsetVars} ->
+ %% This is a safe `br`, but try to find a better one.
+ shortcut_1(Succ, L, Bs, UnsetVars, St)
+ end.
+
+%% Try to shortcut this block, branching to a successor.
+shortcut_2(L, From, Bs0, UnsetVars0, St) ->
+ #b_blk{is=Is,last=Last} = get_block(L, St),
+ case eval_is(Is, From, Bs0, St) of
+ none ->
+ %% It is not safe to avoid this block because it
+ %% has instructions with potential side effects.
+ none;
+ Bs ->
+ %% The instructions in the block (if any) don't
+ %% have any side effects and can be skipped.
+ %% Evaluate the terminator.
+ case eval_terminator(Last, Bs, St) of
+ none ->
+ %% The terminator is not suitable (could be
+ %% because it is a switch that can't be simplified
+ %% or it is a ret instruction).
+ none;
+ #b_br{}=Br ->
+ %% We have a potentially suitable br.
+ %% Now update the set of variables that will never
+ %% be set if this block will be skipped.
+ case update_unset_vars(L, Is, Br, UnsetVars0, St) of
+ unsafe ->
+ %% It is unsafe to use this br,
+ %% because it refers to a variable defined
+ %% in this block.
+ shortcut_unsafe_br(Br, L, Bs, UnsetVars0, St);
+ UnsetVars ->
+ %% Continue checking whether this br is
+ %% suitable.
+ shortcut_test_br(Br, L, Bs, UnsetVars, St)
+ end
+ end
+ end.
+
+shortcut_test_br(Br, From, Bs, UnsetVars, St) ->
+ case is_br_safe(UnsetVars, Br, St) of
+ false ->
+ shortcut_unsafe_br(Br, From, Bs, UnsetVars, St);
+ true ->
+ shortcut_safe_br(Br, From, Bs, UnsetVars, St)
+ end.
+
+shortcut_unsafe_br(Br, From, Bs, UnsetVars, #st{target=Target}=St) ->
+ %% Branching using this `br` is unsafe, either because it
+ %% is an unconditional branch to a phi node, or because
+ %% one or more of the variables that are not set will be
+ %% used. Try to follow branches of this `br`, to find a
+ %% safe `br`.
+ case Br of
+ #b_br{bool=#b_literal{val=true},succ=L} ->
+ case Target of
+ L ->
+ %% We have reached the forced target, and it
+ %% is unsafe. Give up.
+ none;
+ _ ->
+ %% Try following this branch to see whether it
+ %% leads to a safe `br`.
+ shortcut_2(L, From, Bs, UnsetVars, St)
+ end;
+ #b_br{bool=#b_var{},succ=Succ,fail=Fail} ->
+ case {Succ,Fail} of
+ {L,Target} ->
+ %% The failure label is the forced target.
+ %% Try following the success label to see
+ %% whether it also ultimately ends up at the
+ %% forced target.
+ shortcut_2(L, From, Bs, UnsetVars, St);
+ {Target,L} ->
+ %% The success label is the forced target.
+ %% Try following the failure label to see
+ %% whether it also ultimately ends up at the
+ %% forced target.
+ shortcut_2(L, From, Bs, UnsetVars, St);
+ {_,_} ->
+ case Target of
+ any ->
+ %% This two-way branch is unsafe. Try
+ %% reducing it to a one-way branch.
+ shortcut_two_way(Br, From, Bs, UnsetVars, St);
+ one_way ->
+ %% This two-way branch is unsafe. Try
+ %% reducing it to a one-way branch.
+ shortcut_two_way(Br, From, Bs, UnsetVars, St);
+ _ when is_integer(Target) ->
+ %% This two-way branch is unsafe, and
+ %% there already is a forced target.
+ %% Give up.
+ none
+ end
+ end
+ end.
+
+shortcut_safe_br(Br, From, Bs, UnsetVars, #st{target=Target}=St) ->
+ %% This `br` instruction is safe. It does not branch to a phi
+ %% node, and all variables that will be used are guaranteed to be
+ %% defined.
+ case Br of
+ #b_br{bool=#b_literal{val=true},succ=L} ->
+ %% This is a one-way branch.
+ case Target of
+ any ->
+ %% No forced target. Success!
+ {Br,Bs,UnsetVars};
+ one_way ->
+ %% The target must be a one-way branch, which this
+ %% `br` is. Success!
+ {Br,Bs,UnsetVars};
+ L when is_integer(Target) ->
+ %% The forced target is L. Success!
+ {Br,Bs,UnsetVars};
+ _ when is_integer(Target) ->
+ %% Wrong forced target. Try following this branch
+ %% to see if it ultimately ends up at the forced
+ %% target.
+ shortcut_2(L, From, Bs, UnsetVars, St)
+ end;
+ #b_br{bool=#b_var{}} ->
+ %% This is a two-way branch.
+ if
+ Target =:= any; Target =:= one_way ->
+ %% No specific forced target. Try to reduce the
+ %% two-way branch to an one-way branch.
+ case shortcut_two_way(Br, From, Bs, UnsetVars, St) of
+ none when Target =:= any ->
+ %% This `br` can't be reduced to a one-way
+ %% branch. Return the `br` as-is.
+ {Br,Bs,UnsetVars};
+ none when Target =:= one_way ->
+ %% This `br` can't be reduced to a one-way
+ %% branch. The caller wants a one-way
+ %% branch. Give up.
+ none;
+ {_,_,_}=Res ->
+ %% This `br` was successfully reduced to a
+ %% one-way branch.
+ Res
+ end;
+ is_integer(Target) ->
+ %% There is a forced target, which can't
+ %% be reached because this `br` is a two-way
+ %% branch. Give up.
+ none
+ end
+ end.
+
+update_unset_vars(L, Is, Br, UnsetVars, #st{skippable=Skippable}) ->
+ case is_map_key(L, Skippable) of
+ true ->
+ %% None of the variables used in this block are used in
+ %% the successors. Thus, there is no need to add the
+ %% variables to the set of unset variables.
+ case Br of
+ #b_br{bool=#b_var{}=Bool} ->
+ case keymember(Bool, #b_set.dst, Is) of
+ true ->
+ %% Bool is a variable defined in this
+ %% block. Using the br instruction from
+ %% this block (and skipping the body of
+ %% the block) is unsafe.
+ unsafe;
+ false ->
+ %% Bool is either a variable not defined
+ %% in this block or a literal. Adding it
+ %% to the UnsetVars set would not change
+ %% the outcome of the tests in
+ %% is_br_safe/2.
+ UnsetVars
+ end;
+ #b_br{} ->
+ UnsetVars
+ end;
+ false ->
+ %% Some variables defined in this block are used by
+ %% successors. We must update the set of unset variables.
+ SetInThisBlock = [V || #b_set{dst=V} <- Is],
+ ordsets:union(UnsetVars, ordsets:from_list(SetInThisBlock))
+ end.
+
+shortcut_two_way(#b_br{succ=Succ,fail=Fail}, From, Bs0, UnsetVars0, St0) ->
+ case shortcut_2(Succ, From, Bs0, UnsetVars0, St0#st{target=Fail}) of
+ {#b_br{bool=#b_literal{},succ=Fail},_,_}=Res ->
+ Res;
+ none ->
+ St = St0#st{target=Succ},
+ case shortcut_2(Fail, From, Bs0, UnsetVars0, St) of
+ {#b_br{bool=#b_literal{},succ=Succ},_,_}=Res ->
+ Res;
+ none ->
+ none
+ end
+ end.
+
+get_block(L, St) ->
+ #st{bs=#{L:=Blk}} = St,
+ Blk.
+
+is_br_safe(UnsetVars, Br, #st{us=Us}=St) ->
+ %% Check that none of the unset variables will be used.
+ case Br of
+ #b_br{bool=#b_var{}=V,succ=Succ,fail=Fail} ->
+ #{Succ:=Used0,Fail:=Used1} = Us,
+
+ %% A two-way branch never branches to a phi node, so there
+ %% is no need to check for phi nodes here.
+ not member(V, UnsetVars) andalso
+ ordsets:is_disjoint(Used0, UnsetVars) andalso
+ ordsets:is_disjoint(Used1, UnsetVars);
+ #b_br{succ=Same,fail=Same} ->
+ %% An unconditional branch must not jump to
+ %% a phi node.
+ not is_forbidden(Same, St) andalso
+ ordsets:is_disjoint(map_get(Same, Us), UnsetVars)
+ end.
+
+is_forbidden(L, St) ->
+ case get_block(L, St) of
+ #b_blk{is=[#b_set{op=phi}|_]} -> true;
+ #b_blk{is=[#b_set{op=peek_message}|_]} -> true;
+ #b_blk{} -> false
+ end.
+
+
+%% Evaluate the instructions in the block.
+%% Return the updated bindings, or 'none' if there is
+%% any instruction with potential side effects.
+
+eval_is([#b_set{op=phi,dst=Dst,args=Args}|Is], From, Bs0, St) ->
+ Val = get_phi_arg(Args, From),
+ Bs = bind_var(Dst, Val, Bs0),
+ eval_is(Is, From, Bs, St);
+eval_is([#b_set{op={bif,_},dst=Dst}=I0|Is], From, Bs, St) ->
+ I = sub(I0, Bs),
+ case eval_bif(I, St) of
+ #b_literal{}=Val ->
+ eval_is(Is, From, bind_var(Dst, Val, Bs), St);
+ none ->
+ eval_is(Is, From, Bs, St)
+ end;
+eval_is([#b_set{op=Op,dst=Dst}=I|Is], From, Bs, St)
+ when Op =:= is_tagged_tuple; Op =:= is_nonempty_list ->
+ #b_set{args=Args} = sub(I, Bs),
+ case eval_rel_op(Op, Args, St) of
+ #b_literal{}=Val ->
+ eval_is(Is, From, bind_var(Dst, Val, Bs), St);
+ none ->
+ eval_is(Is, From, Bs, St)
+ end;
+eval_is([#b_set{}=I|Is], From, Bs, St) ->
+ case beam_ssa:no_side_effect(I) of
+ true ->
+ %% This instruction has no side effects. It can
+ %% safely be omitted.
+ eval_is(Is, From, Bs, St);
+ false ->
+ %% This instruction may have some side effect.
+ %% It is not safe to avoid this instruction.
+ none
+ end;
+eval_is([], _From, Bs, _St) -> Bs.
+
+get_phi_arg([{Val,From}|_], From) -> Val;
+get_phi_arg([_|As], From) -> get_phi_arg(As, From).
+
+eval_terminator(#b_br{bool=#b_var{}=Bool}=Br, Bs, _St) ->
+ Val = get_value(Bool, Bs),
+ beam_ssa:normalize(Br#b_br{bool=Val});
+eval_terminator(#b_br{bool=#b_literal{}}=Br, _Bs, _St) ->
+ beam_ssa:normalize(Br);
+eval_terminator(#b_switch{arg=Arg,fail=Fail,list=List}=Sw, Bs, St) ->
+ case get_value(Arg, Bs) of
+ #b_literal{}=Val ->
+ %% Literal argument. Simplify to a `br`.
+ beam_ssa:normalize(Sw#b_switch{arg=Val});
+ #b_var{} ->
+ %% Try optimizing the switch.
+ case eval_switch(List, Arg, St, Fail) of
+ none ->
+ none;
+ To when is_integer(To) ->
+ %% Either one of the values in the switch
+ %% matched a previous value in a '=:=' test, or
+ %% none of the values matched a previous test.
+ #b_br{bool=#b_literal{val=true},succ=To,fail=To}
+ end
+ end;
+eval_terminator(#b_ret{}, _Bs, _St) ->
+ none.
+
+eval_switch(List, Arg, #st{rel_op={_,Arg,_}=PrevOp}, Fail) ->
+ %% There is a previous relational operator testing the same variable.
+ %% Optimization may be possible.
+ eval_switch_1(List, Arg, PrevOp, Fail);
+eval_switch(_, _, _, _) ->
+ %% There is either no previous relational operator, or it tests
+ %% a different variable. Nothing to optimize.
+ none.
+
+eval_switch_1([{Lit,Lbl}|T], Arg, PrevOp, Fail) ->
+ RelOp = {'=:=',Arg,Lit},
+ case will_succeed(PrevOp, RelOp) of
+ yes ->
+ %% Success. This branch will always be taken.
+ Lbl;
+ no ->
+ %% This branch will never be taken.
+ eval_switch_1(T, Arg, PrevOp, Fail);
+ maybe ->
+ %% This label could be reached.
+ eval_switch_1(T, Arg, PrevOp, none)
+ end;
+eval_switch_1([], _Arg, _PrevOp, Fail) ->
+ %% Fail is now either the failure label or 'none'.
+ Fail.
+
+bind_var(Var, Val0, Bs) ->
+ Val = get_value(Val0, Bs),
+ Bs#{Var=>Val}.
+
+get_value(#b_var{}=Var, Bs) ->
+ case Bs of
+ #{Var:=Val} -> get_value(Val, Bs);
+ #{} -> Var
+ end;
+get_value(#b_literal{}=Lit, _Bs) -> Lit.
+
+eval_bif(#b_set{op={bif,Bif},args=Args}, St) ->
+ Arity = length(Args),
+ case erl_bifs:is_pure(erlang, Bif, Arity) of
+ false ->
+ none;
+ true ->
+ case get_lit_args(Args) of
+ none ->
+ %% Not literal arguments. Try to evaluate
+ %% it based on a previous relational operator.
+ eval_rel_op({bif,Bif}, Args, St);
+ LitArgs ->
+ try apply(erlang, Bif, LitArgs) of
+ Val -> #b_literal{val=Val}
+ catch
+ error:_ -> none
+ end
+ end
+ end.
+
+get_lit_args([#b_literal{val=Lit1}]) ->
+ [Lit1];
+get_lit_args([#b_literal{val=Lit1},
+ #b_literal{val=Lit2}]) ->
+ [Lit1,Lit2];
+get_lit_args([#b_literal{val=Lit1},
+ #b_literal{val=Lit2},
+ #b_literal{val=Lit3}]) ->
+ [Lit1,Lit2,Lit3];
+get_lit_args(_) -> none.
+
+%%%
+%%% Handling of relational operators.
+%%%
+
+get_rel_op(Bool, [_|_]=Is) ->
+ case last(Is) of
+ #b_set{op=Op,dst=Bool,args=Args} ->
+ normalize_op(Op, Args);
+ #b_set{} ->
+ none
+ end;
+get_rel_op(_, []) -> none.
+
+%% normalize_op(Instruction) -> {Normalized,FailLabel} | error
+%% Normalized = {Operator,Variable,Variable|Literal} |
+%% {TypeTest,Variable}
+%% Operation = '<' | '=<' | '=:=' | '=/=' | '>=' | '>'
+%% TypeTest = is_atom | is_integer ...
+%% Variable = #b_var{}
+%% Literal = #b_literal{}
+%%
+%% Normalize a relational operator to facilitate further
+%% comparisons between operators. Always make the register
+%% operand the first operand. If there are two registers,
+%% order the registers in lexical order.
+%%
+%% For example, this instruction:
+%%
+%% #b_set{op={bif,=<},args=[#b_literal{}, #b_var{}}
+%%
+%% will be normalized to:
+%%
+%% {'=<',#b_var{},#b_literal{}}
+
+-spec normalize_op(Op, Args) -> NormalizedOp | 'none' when
+ Op :: beam_ssa:op(),
+ Args :: [beam_ssa:value()],
+ NormalizedOp :: basic_rel_op().
+
+normalize_op(is_tagged_tuple, [Arg,#b_literal{val=Size},#b_literal{val=Tag}])
+ when is_integer(Size), is_atom(Tag) ->
+ {{is_tagged_tuple,Size,Tag},Arg};
+normalize_op(is_nonempty_list, [Arg]) ->
+ {is_nonempty_list,Arg};
+normalize_op({bif,Bif}, [Arg]) ->
+ case erl_internal:new_type_test(Bif, 1) of
+ true -> {Bif,Arg};
+ false -> none
+ end;
+normalize_op({bif,Bif}, [_,_]=Args) ->
+ case erl_internal:comp_op(Bif, 2) of
+ true ->
+ normalize_op_1(Bif, Args);
+ false ->
+ none
+ end;
+normalize_op(_, _) -> none.
+
+normalize_op_1(Bif, Args) ->
+ case Args of
+ [#b_literal{}=Arg1,#b_var{}=Arg2] ->
+ {turn_op(Bif),Arg2,Arg1};
+ [#b_var{}=Arg1,#b_literal{}=Arg2] ->
+ {Bif,Arg1,Arg2};
+ [#b_var{}=A,#b_var{}=B] ->
+ if A < B -> {Bif,A,B};
+ true -> {turn_op(Bif),B,A}
+ end;
+ [#b_literal{},#b_literal{}] ->
+ none
+ end.
+
+-spec invert_op(basic_rel_op() | 'none') -> rel_op() | 'none'.
+
+invert_op({Op,Arg1,Arg2}) ->
+ {invert_op_1(Op),Arg1,Arg2};
+invert_op({TypeTest,Arg}) ->
+ {{'not',TypeTest},Arg};
+invert_op(none) -> none.
+
+invert_op_1('>=') -> '<';
+invert_op_1('<') -> '>=';
+invert_op_1('=<') -> '>';
+invert_op_1('>') -> '=<';
+invert_op_1('=:=') -> '=/=';
+invert_op_1('=/=') -> '=:=';
+invert_op_1('==') -> '/=';
+invert_op_1('/=') -> '=='.
+
+turn_op('<') -> '>';
+turn_op('=<') -> '>=';
+turn_op('>') -> '<';
+turn_op('>=') -> '=<';
+turn_op('=:='=Op) -> Op;
+turn_op('=/='=Op) -> Op;
+turn_op('=='=Op) -> Op;
+turn_op('/='=Op) -> Op.
+
+eval_rel_op(_Bif, _Args, #st{rel_op=none}) ->
+ none;
+eval_rel_op(Bif, Args, #st{rel_op=Prev}) ->
+ case normalize_op(Bif, Args) of
+ none ->
+ none;
+ RelOp ->
+ case will_succeed(Prev, RelOp) of
+ yes -> #b_literal{val=true};
+ no -> #b_literal{val=false};
+ maybe -> none
+ end
+ end.
+
+%% will_succeed(PrevCondition, Condition) -> yes | no | maybe
+%% PrevCondition is a condition known to be true. This function
+%% will tell whether Condition will succeed.
+
+will_succeed({_Op,_Var,_Value}=Same, {_Op,_Var,_Value}=Same) ->
+ %% Repeated test.
+ yes;
+will_succeed({Op1,Var,#b_literal{val=A}}, {Op2,Var,#b_literal{val=B}}) ->
+ will_succeed_1(Op1, A, Op2, B);
+will_succeed({Op1,Var,#b_var{}=A}, {Op2,Var,#b_var{}=B}) ->
+ will_succeed_vars(Op1, A, Op2, B);
+will_succeed({'=:=',Var,#b_literal{val=A}}, {TypeTest,Var}) ->
+ eval_type_test(TypeTest, A);
+will_succeed({_,_}=Same, {_,_}=Same) ->
+ %% Repeated type test.
+ yes;
+will_succeed({Test1,Var}, {Test2,Var}) ->
+ will_succeed_test(Test1, Test2);
+will_succeed({_,_}, {_,_}) ->
+ maybe;
+will_succeed({_,_}, {_,_,_}) ->
+ maybe;
+will_succeed({_,_,_}, {_,_}) ->
+ maybe;
+will_succeed({_,_,_}, {_,_,_}) ->
+ maybe.
+
+will_succeed_test({'not',Test1}, Test2) ->
+ case Test1 =:= Test2 of
+ true -> no;
+ false -> maybe
+ end;
+will_succeed_test(is_tuple, {is_tagged_tuple,_,_}) ->
+ maybe;
+will_succeed_test({is_tagged_tuple,_,_}, is_tuple) ->
+ yes;
+will_succeed_test(is_list, is_nonempty_list) ->
+ maybe;
+will_succeed_test(is_nonempty_list, is_list) ->
+ yes;
+will_succeed_test(T1, T2) ->
+ case is_numeric_test(T1) andalso is_numeric_test(T2) of
+ true -> maybe;
+ false -> no
+ end.
+
+will_succeed_1('=:=', A, '<', B) ->
+ if
+ B =< A -> no;
+ true -> yes
+ end;
+will_succeed_1('=:=', A, '=<', B) ->
+ if
+ B < A -> no;
+ true -> yes
+ end;
+will_succeed_1('=:=', A, '=:=', B) when A =/= B ->
+ no;
+will_succeed_1('=:=', A, '=/=', B) ->
+ if
+ A =:= B -> no;
+ true -> yes
+ end;
+will_succeed_1('=:=', A, '>=', B) ->
+ if
+ B > A -> no;
+ true -> yes
+ end;
+will_succeed_1('=:=', A, '>', B) ->
+ if
+ B >= A -> no;
+ true -> yes
+ end;
+
+will_succeed_1('=/=', A, '=:=', B) when A =:= B -> no;
+
+will_succeed_1('<', A, '=:=', B) when B >= A -> no;
+will_succeed_1('<', A, '=/=', B) when B >= A -> yes;
+will_succeed_1('<', A, '<', B) when B >= A -> yes;
+will_succeed_1('<', A, '=<', B) when B > A -> yes;
+will_succeed_1('<', A, '>=', B) when B > A -> no;
+will_succeed_1('<', A, '>', B) when B >= A -> no;
+
+will_succeed_1('=<', A, '=:=', B) when B > A -> no;
+will_succeed_1('=<', A, '=/=', B) when B > A -> yes;
+will_succeed_1('=<', A, '<', B) when B > A -> yes;
+will_succeed_1('=<', A, '=<', B) when B >= A -> yes;
+will_succeed_1('=<', A, '>=', B) when B > A -> no;
+will_succeed_1('=<', A, '>', B) when B >= A -> no;
+
+will_succeed_1('>=', A, '=:=', B) when B < A -> no;
+will_succeed_1('>=', A, '=/=', B) when B < A -> yes;
+will_succeed_1('>=', A, '<', B) when B =< A -> no;
+will_succeed_1('>=', A, '=<', B) when B < A -> no;
+will_succeed_1('>=', A, '>=', B) when B =< A -> yes;
+will_succeed_1('>=', A, '>', B) when B < A -> yes;
+
+will_succeed_1('>', A, '=:=', B) when B =< A -> no;
+will_succeed_1('>', A, '=/=', B) when B =< A -> yes;
+will_succeed_1('>', A, '<', B) when B =< A -> no;
+will_succeed_1('>', A, '=<', B) when B < A -> no;
+will_succeed_1('>', A, '>=', B) when B =< A -> yes;
+will_succeed_1('>', A, '>', B) when B < A -> yes;
+
+will_succeed_1('==', A, '==', B) ->
+ if
+ A == B -> yes;
+ true -> no
+ end;
+will_succeed_1('==', A, '/=', B) ->
+ if
+ A == B -> no;
+ true -> yes
+ end;
+will_succeed_1('/=', A, '/=', B) when A == B -> yes;
+will_succeed_1('/=', A, '==', B) when A == B -> no;
+
+will_succeed_1(_, _, _, _) -> maybe.
+
+will_succeed_vars('=/=', Val, '=:=', Val) -> no;
+will_succeed_vars('=:=', Val, '=/=', Val) -> no;
+will_succeed_vars('=:=', Val, '>=', Val) -> yes;
+will_succeed_vars('=:=', Val, '=<', Val) -> yes;
+
+will_succeed_vars('/=', Val1, '==', Val2) when Val1 == Val2 -> no;
+will_succeed_vars('==', Val1, '/=', Val2) when Val1 == Val2 -> no;
+
+will_succeed_vars(_, _, _, _) -> maybe.
+
+is_numeric_test(is_float) -> true;
+is_numeric_test(is_integer) -> true;
+is_numeric_test(is_number) -> true;
+is_numeric_test(_) -> false.
+
+eval_type_test(Test, Arg) ->
+ case eval_type_test_1(Test, Arg) of
+ true -> yes;
+ false -> no
+ end.
+
+eval_type_test_1(is_nonempty_list, Arg) ->
+ case Arg of
+ [_|_] -> true;
+ _ -> false
+ end;
+eval_type_test_1({is_tagged_tuple,Sz,Tag}, Arg) ->
+ if
+ tuple_size(Arg) =:= Sz, element(1, Arg) =:= Tag ->
+ true;
+ true ->
+ false
+ end;
+eval_type_test_1(Test, Arg) ->
+ erlang:Test(Arg).
+
+%%%
+%%% Combine bif:'=:=' and switch instructions
+%%% to switch instructions.
+%%%
+%%% Consider this code:
+%%%
+%%% 0:
+%%% @ssa_bool = bif:'=:=' Var, literal 1
+%%% br @ssa_bool, label 2, label 3
+%%%
+%%% 2:
+%%% ret literal a
+%%%
+%%% 3:
+%%% @ssa_bool:7 = bif:'=:=' Var, literal 2
+%%% br @ssa_bool:7, label 4, label 999
+%%%
+%%% 4:
+%%% ret literal b
+%%%
+%%% 999:
+%%% .
+%%% .
+%%% .
+%%%
+%%% The two bif:'=:=' instructions can be combined
+%%% to a switch:
+%%%
+%%% 0:
+%%% switch Var, label 999, [ { literal 1, label 2 },
+%%% { literal 2, label 3 } ]
+%%%
+%%% 2:
+%%% ret literal a
+%%%
+%%% 4:
+%%% ret literal b
+%%%
+%%% 999:
+%%% .
+%%% .
+%%% .
+%%%
+
+combine_eqs(#st{bs=Blocks}=St) ->
+ Ls = reverse(beam_ssa:rpo(Blocks)),
+ combine_eqs_1(Ls, St).
+
+combine_eqs_1([L|Ls], #st{bs=Blocks0}=St0) ->
+ case comb_get_sw(L, St0) of
+ none ->
+ combine_eqs_1(Ls, St0);
+ {_,Arg,_,Fail0,List0} ->
+ case comb_get_sw(Fail0, St0) of
+ {true,Arg,Fail1,Fail,List1} ->
+ %% Another switch/br with the same arguments was
+ %% found. Try combining them.
+ case combine_lists(Fail1, List0, List1, Blocks0) of
+ none ->
+ %% Different types of literals in the lists,
+ %% or the success cases in the first switch
+ %% could branch to the second switch
+ %% (increasing code size and repeating tests).
+ combine_eqs_1(Ls, St0);
+ List ->
+ %% Everything OK! Combine the lists.
+ Sw0 = #b_switch{arg=Arg,fail=Fail,list=List},
+ Sw = beam_ssa:normalize(Sw0),
+ Blk0 = map_get(L, Blocks0),
+ Blk = Blk0#b_blk{last=Sw},
+ Blocks = Blocks0#{L:=Blk},
+ St = St0#st{bs=Blocks},
+ combine_eqs_1(Ls, St)
+ end;
+ {true,_OtherArg,_,_,_} ->
+ %% The other switch/br uses a different Arg.
+ combine_eqs_1(Ls, St0);
+ {false,_,_,_,_} ->
+ %% Not safe: Bindings of variables that will be used
+ %% or execution of instructions with potential
+ %% side effects will be skipped.
+ combine_eqs_1(Ls, St0);
+ none ->
+ %% No switch/br at this label.
+ combine_eqs_1(Ls, St0)
+ end
+ end;
+combine_eqs_1([], St) -> St.
+
+comb_get_sw(L, Blocks) ->
+ comb_get_sw(L, true, Blocks).
+
+comb_get_sw(L, Safe0, #st{bs=Blocks,skippable=Skippable}) ->
+ #b_blk{is=Is,last=Last} = map_get(L, Blocks),
+ Safe1 = Safe0 andalso is_map_key(L, Skippable),
+ case Last of
+ #b_ret{} ->
+ none;
+ #b_br{bool=#b_var{}=Bool,succ=Succ,fail=Fail} ->
+ case comb_is(Is, Bool, Safe1) of
+ {none,_} ->
+ none;
+ {#b_set{op={bif,'=:='},args=[#b_var{}=Arg,#b_literal{}=Lit]},Safe} ->
+ {Safe,Arg,L,Fail,[{Lit,Succ}]};
+ {#b_set{},_} ->
+ none
+ end;
+ #b_br{} ->
+ none;
+ #b_switch{arg=#b_var{}=Arg,fail=Fail,list=List} ->
+ {none,Safe} = comb_is(Is, none, Safe1),
+ {Safe,Arg,L,Fail,List}
+ end.
+
+comb_is([#b_set{dst=#b_var{}=Bool}=I], Bool, Safe) ->
+ {I,Safe};
+comb_is([#b_set{}=I|Is], Bool, Safe0) ->
+ Safe = Safe0 andalso beam_ssa:no_side_effect(I),
+ comb_is(Is, Bool, Safe);
+comb_is([], _Bool, Safe) ->
+ {none,Safe}.
+
+%% combine_list(Fail, List1, List2, Blocks) -> List|none.
+%% Try to combine two switch lists, returning the combined
+%% list or 'none' if not possible.
+%%
+%% The values in the two lists must be all of the same type.
+%%
+%% The code reached from the labels in the first list must
+%% not reach the failure label (if they do, tests could
+%% be repeated).
+%%
+
+combine_lists(Fail, L1, L2, Blocks) ->
+ Ls = beam_ssa:rpo([Lbl || {_,Lbl} <- L1], Blocks),
+ case member(Fail, Ls) of
+ true ->
+ %% One or more of labels in the first list
+ %% could reach the failure label. That
+ %% means that the second switch/br instruction
+ %% will be retained, increasing code size and
+ %% potentially also execution time.
+ none;
+ false ->
+ %% The combined switch will replace both original
+ %% br/switch instructions, leading to a reduction in code
+ %% size and potentially also in execution time.
+ combine_lists_1(L1, L2)
+ end.
+
+combine_lists_1(List0, List1) ->
+ case are_lists_compatible(List0, List1) of
+ true ->
+ First = maps:from_list(List0),
+ List0 ++ [{Val,Lbl} || {Val,Lbl} <- List1,
+ not is_map_key(Val, First)];
+ false ->
+ none
+ end.
+
+are_lists_compatible([{#b_literal{val=Val1},_}|_],
+ [{#b_literal{val=Val2},_}|_]) ->
+ case lit_type(Val1) of
+ none -> false;
+ Type -> Type =:= lit_type(Val2)
+ end.
+
+lit_type(Val) ->
+ if
+ is_atom(Val) -> atom;
+ is_float(Val) -> float;
+ is_integer(Val) -> integer;
+ true -> none
+ end.
+
+%%%
+%%% Calculate used variables for each block.
+%%%
+
+used_vars(Linear) ->
+ used_vars(reverse(Linear), #{}, #{}).
+
+used_vars([{L,#b_blk{is=Is}=Blk}|Bs], UsedVars0, Skip0) ->
+ %% Calculate the variables used by each block and its
+ %% successors. This information is used by
+ %% shortcut_opt/1.
+
+ Successors = beam_ssa:successors(Blk),
+ Used0 = used_vars_succ(Successors, L, UsedVars0, []),
+ Used = used_vars_blk(Blk, Used0),
+ UsedVars = used_vars_phis(Is, L, Used, UsedVars0),
+
+ %% combine_eqs/1 needs different variable usage information than
+ %% shortcut_opt/1. The Skip map will have an entry for each block
+ %% that can be skipped (does not bind any variable used in
+ %% successor). This information is also useful for speeding up
+ %% shortcut_opt/1.
+
+ Defined0 = [Def || #b_set{dst=Def} <- Is],
+ Defined = ordsets:from_list(Defined0),
+ MaySkip = ordsets:is_disjoint(Defined, Used0),
+ case MaySkip of
+ true ->
+ Skip = Skip0#{L=>true},
+ used_vars(Bs, UsedVars, Skip);
+ false ->
+ used_vars(Bs, UsedVars, Skip0)
+ end;
+used_vars([], UsedVars, Skip) ->
+ {UsedVars,Skip}.
+
+used_vars_succ([S|Ss], L, LiveMap, Live0) ->
+ Key = {S,L},
+ case LiveMap of
+ #{Key:=Live} ->
+ %% The successor has a phi node, and the value for
+ %% this block in the phi node is a variable.
+ used_vars_succ(Ss, L, LiveMap, ordsets:union(Live, Live0));
+ #{S:=Live} ->
+ %% No phi node in the successor, or the value for
+ %% this block in the phi node is a literal.
+ used_vars_succ(Ss, L, LiveMap, ordsets:union(Live, Live0));
+ #{} ->
+ %% A peek_message block which has not been processed yet.
+ used_vars_succ(Ss, L, LiveMap, Live0)
+ end;
+used_vars_succ([], _, _, Acc) -> Acc.
+
+used_vars_phis(Is, L, Live0, UsedVars0) ->
+ UsedVars = UsedVars0#{L=>Live0},
+ Phis = takewhile(fun(#b_set{op=Op}) -> Op =:= phi end, Is),
+ case Phis of
+ [] ->
+ UsedVars;
+ [_|_] ->
+ PhiArgs = append([Args || #b_set{args=Args} <- Phis]),
+ case [{P,V} || {#b_var{}=V,P} <- PhiArgs] of
+ [_|_]=PhiVars ->
+ PhiLive0 = rel2fam(PhiVars),
+ PhiLive = [{{L,P},ordsets:union(ordsets:from_list(Vs), Live0)} ||
+ {P,Vs} <- PhiLive0],
+ maps:merge(UsedVars, maps:from_list(PhiLive));
+ [] ->
+ %% There were only literals in the phi node(s).
+ UsedVars
+ end
+ end.
+
+used_vars_blk(#b_blk{is=Is,last=Last}, Used0) ->
+ Used = ordsets:union(Used0, beam_ssa:used(Last)),
+ used_vars_is(reverse(Is), Used).
+
+used_vars_is([#b_set{op=phi}|Is], Used) ->
+ used_vars_is(Is, Used);
+used_vars_is([#b_set{dst=Dst}=I|Is], Used0) ->
+ Used1 = ordsets:union(Used0, beam_ssa:used(I)),
+ Used = ordsets:del_element(Dst, Used1),
+ used_vars_is(Is, Used);
+used_vars_is([], Used) ->
+ Used.
+
+%%%
+%%% Common utilities.
+%%%
+
+sub(#b_set{args=Args}=I, Sub) ->
+ I#b_set{args=[sub_arg(A, Sub) || A <- Args]}.
+
+sub_arg(#b_var{}=Old, Sub) ->
+ case Sub of
+ #{Old:=New} -> New;
+ #{} -> Old
+ end;
+sub_arg(Old, _Sub) -> Old.
+
+rel2fam(S0) ->
+ S1 = sofs:relation(S0),
+ S = sofs:rel2fam(S1),
+ sofs:to_external(S).
diff --git a/lib/compiler/src/beam_ssa_funs.erl b/lib/compiler/src/beam_ssa_funs.erl
new file mode 100644
index 0000000000..e77c00fa89
--- /dev/null
+++ b/lib/compiler/src/beam_ssa_funs.erl
@@ -0,0 +1,149 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 2018. All Rights Reserved.
+%%
+%% Licensed under the Apache License, Version 2.0 (the "License");
+%% you may not use this file except in compliance with the License.
+%% You may obtain a copy of the License at
+%%
+%% http://www.apache.org/licenses/LICENSE-2.0
+%%
+%% Unless required by applicable law or agreed to in writing, software
+%% distributed under the License is distributed on an "AS IS" BASIS,
+%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+%% See the License for the specific language governing permissions and
+%% limitations under the License.
+%%
+%% %CopyrightEnd%
+%%
+
+%%%
+%%% If a fun is defined locally and only used for calls, it can be replaced
+%%% with direct calls to the relevant function. This greatly speeds up "named
+%%% functions" (which rely on make_fun to recreate themselves) and macros that
+%%% wrap their body in a fun.
+%%%
+
+-module(beam_ssa_funs).
+
+-export([module/2]).
+
+-include("beam_ssa.hrl").
+
+-import(lists, [foldl/3]).
+
+-spec module(Module, Options) -> Result when
+ Module :: beam_ssa:b_module(),
+ Options :: [compile:option()],
+ Result :: {ok, beam_ssa:b_module()}.
+
+module(#b_module{body=Fs0}=Module, _Opts) ->
+ Trampolines = foldl(fun find_trampolines/2, #{}, Fs0),
+ Fs = [lfo(F, Trampolines) || F <- Fs0],
+ {ok, Module#b_module{body=Fs}}.
+
+%% If a function does absolutely nothing beyond calling another function with
+%% the same arguments in the same order, we can shave off a call by short-
+%% circuiting it.
+find_trampolines(#b_function{args=Args,bs=Blocks}=F, Trampolines) ->
+ case map_get(0, Blocks) of
+ #b_blk{is=[#b_set{op=call,
+ args=[#b_local{}=Actual | Args],
+ dst=Dst}],
+ last=#b_ret{arg=Dst}} ->
+ {_, Name, Arity} = beam_ssa:get_anno(func_info, F),
+ Trampoline = #b_local{name=#b_literal{val=Name},arity=Arity},
+ Trampolines#{Trampoline => Actual};
+ _ ->
+ Trampolines
+ end.
+
+lfo(#b_function{bs=Blocks0}=F, Trampolines) ->
+ Linear0 = beam_ssa:linearize(Blocks0),
+ Linear = lfo_optimize(Linear0, lfo_analyze(Linear0, #{}), Trampolines),
+ F#b_function{bs=maps:from_list(Linear)}.
+
+%% Gather a map of the locally defined funs that are only used for calls.
+lfo_analyze([{_L,#b_blk{is=Is,last=Last}}|Bs], LFuns0) ->
+ LFuns = lfo_analyze_last(Last, lfo_analyze_is(Is, LFuns0)),
+ lfo_analyze(Bs, LFuns);
+lfo_analyze([], LFuns) ->
+ LFuns.
+
+lfo_analyze_is([#b_set{op=make_fun,
+ dst=Dst,
+ args=[#b_local{} | FreeVars]}=Def | Is],
+ LFuns0) ->
+ LFuns = maps:put(Dst, Def, maps:without(FreeVars, LFuns0)),
+ lfo_analyze_is(Is, LFuns);
+lfo_analyze_is([#b_set{op=call,
+ args=[Fun | CallArgs]} | Is],
+ LFuns) when is_map_key(Fun, LFuns) ->
+ #b_set{args=[#b_local{arity=Arity} | FreeVars]} = map_get(Fun, LFuns),
+ case length(CallArgs) + length(FreeVars) of
+ Arity ->
+ lfo_analyze_is(Is, maps:without(CallArgs, LFuns));
+ _ ->
+ %% This will `badarity` at runtime, and it's easier to disable the
+ %% optimization than to simulate it.
+ lfo_analyze_is(Is, maps:without([Fun | CallArgs], LFuns))
+ end;
+lfo_analyze_is([#b_set{args=Args} | Is], LFuns) when map_size(LFuns) =/= 0 ->
+ %% We disqualify funs that are used outside calls because this forces them
+ %% to be created anyway, and the slight performance gain from direct calls
+ %% is not enough to offset the potential increase in stack frame size (the
+ %% free variables need to be kept alive until the call).
+ %%
+ %% This is also a kludge to make HiPE work, as the latter will generate
+ %% code with the assumption that the functions referenced in a make_fun
+ %% will only be used by funs, which will not be the case if we mix it with
+ %% direct calls. See cerl_cconv.erl for details.
+ %%
+ %% Future optimizations like delaying fun creation until use may require us
+ %% to copy affected functions so that HiPE gets its own to play with (until
+ %% HiPE is fixed anyway).
+ lfo_analyze_is(Is, maps:without(Args, LFuns));
+lfo_analyze_is([_ | Is], LFuns) ->
+ lfo_analyze_is(Is, LFuns);
+lfo_analyze_is([], LFuns) ->
+ LFuns.
+
+lfo_analyze_last(#b_switch{arg=Arg}, LFuns) ->
+ maps:remove(Arg, LFuns);
+lfo_analyze_last(#b_ret{arg=Arg}, LFuns) ->
+ maps:remove(Arg, LFuns);
+lfo_analyze_last(_, LFuns) ->
+ LFuns.
+
+%% Replace all calls of suitable funs with a direct call to their
+%% implementation. Liveness optimization will get rid of the make_fun
+%% instruction.
+lfo_optimize(Linear, LFuns, _Trampolines) when map_size(LFuns) =:= 0 ->
+ Linear;
+lfo_optimize(Linear, LFuns, Trampolines) ->
+ lfo_optimize_1(Linear, LFuns, Trampolines).
+
+lfo_optimize_1([{L,#b_blk{is=Is0}=Blk}|Bs], LFuns, Trampolines) ->
+ Is = lfo_optimize_is(Is0, LFuns, Trampolines),
+ [{L,Blk#b_blk{is=Is}} | lfo_optimize_1(Bs, LFuns, Trampolines)];
+lfo_optimize_1([], _LFuns, _Trampolines) ->
+ [].
+
+lfo_optimize_is([#b_set{op=call,
+ args=[Fun | CallArgs]}=Call0 | Is],
+ LFuns, Trampolines) when is_map_key(Fun, LFuns) ->
+ #b_set{args=[Local | FreeVars]} = map_get(Fun, LFuns),
+ Args = [lfo_short_circuit(Local, Trampolines) | CallArgs ++ FreeVars],
+ Call = beam_ssa:add_anno(local_fun_opt, Fun, Call0#b_set{args=Args}),
+ [Call | lfo_optimize_is(Is, LFuns, Trampolines)];
+lfo_optimize_is([I | Is], LFuns, Trampolines) ->
+ [I | lfo_optimize_is(Is, LFuns, Trampolines)];
+lfo_optimize_is([], _LFuns, _Trampolines) ->
+ [].
+
+lfo_short_circuit(Call, Trampolines) ->
+ case maps:find(Call, Trampolines) of
+ {ok, Other} -> lfo_short_circuit(Other, Trampolines);
+ error -> Call
+ end.
diff --git a/lib/compiler/src/beam_ssa_opt.erl b/lib/compiler/src/beam_ssa_opt.erl
index da466a3316..bcf55f3fda 100644
--- a/lib/compiler/src/beam_ssa_opt.erl
+++ b/lib/compiler/src/beam_ssa_opt.erl
@@ -18,49 +18,168 @@
%% %CopyrightEnd%
%%
+%%%
+%%% This is a collection of various optimizations that don't need a separate
+%%% pass by themselves and/or are mutually beneficial to other passes.
+%%%
+%%% The optimizations are applied in "phases," each with a list of sub-passes
+%%% to run. These sub-passes are applied on all functions in a module before
+%%% moving on to the next phase, which lets us gather module-level information
+%%% in one phase and then apply it in the next without having to risk working
+%%% with incomplete information.
+%%%
+%%% Each sub-pass operates on a #st{} record and a func_info_db(), where the
+%%% former is just a #b_function{} whose blocks can be represented either in
+%%% linear or map form, and the latter is a map with information about all
+%%% functions in the module (see beam_ssa_opt.hrl for more details).
+%%%
+
-module(beam_ssa_opt).
-export([module/2]).
--include("beam_ssa.hrl").
--import(lists, [all/2,append/1,foldl/3,keyfind/3,member/2,reverse/1,reverse/2,
- splitwith/2,takewhile/2,unzip/1]).
+-include("beam_ssa_opt.hrl").
+
+-import(lists, [all/2,append/1,duplicate/2,foldl/3,keyfind/3,member/2,
+ reverse/1,reverse/2,
+ splitwith/2,sort/1,takewhile/2,unzip/1]).
+
+-define(DEFAULT_REPETITIONS, 2).
-spec module(beam_ssa:b_module(), [compile:option()]) ->
{'ok',beam_ssa:b_module()}.
-module(#b_module{body=Fs0}=Module, Opts) ->
- Ps = passes(Opts),
- Fs = functions(Fs0, Ps),
- {ok,Module#b_module{body=Fs}}.
+-record(st, {ssa :: [{beam_ssa:label(),beam_ssa:b_blk()}] |
+ beam_ssa:block_map(),
+ args :: [beam_ssa:b_var()],
+ cnt :: beam_ssa:label(),
+ anno :: beam_ssa:anno()}).
+-type st_map() :: #{ func_id() => #st{} }.
+
+module(Module, Opts) ->
+ FuncDb0 = case proplists:get_value(no_module_opt, Opts, false) of
+ false -> build_func_db(Module);
+ true -> #{}
+ end,
+
+ %% Passes that perform module-level optimizations are often aided by
+ %% optimizing callers before callees and vice versa, so we optimize all
+ %% functions in call order, flipping it as required.
+ StMap0 = build_st_map(Module),
+ Order = get_call_order_po(StMap0, FuncDb0),
+
+ Phases =
+ [{Order, prologue_passes(Opts)}] ++
+ repeat(Opts, repeated_passes(Opts), Order) ++
+ [{Order, epilogue_passes(Opts)}],
+
+ {StMap, _FuncDb} = foldl(fun({FuncIds, Ps}, {StMap, FuncDb}) ->
+ phase(FuncIds, Ps, StMap, FuncDb)
+ end, {StMap0, FuncDb0}, Phases),
+
+ {ok, finish(Module, StMap)}.
+
+phase([FuncId | Ids], Ps, StMap, FuncDb0) ->
+ try compile:run_sub_passes(Ps, {map_get(FuncId, StMap), FuncDb0}) of
+ {St, FuncDb} ->
+ phase(Ids, Ps, StMap#{ FuncId => St }, FuncDb)
+ catch
+ Class:Error:Stack ->
+ #b_local{name=#b_literal{val=Name},arity=Arity} = FuncId,
+ io:fwrite("Function: ~w/~w\n", [Name,Arity]),
+ erlang:raise(Class, Error, Stack)
+ end;
+phase([], _Ps, StMap, FuncDb) ->
+ {StMap, FuncDb}.
+
+%% Repeats the given passes, alternating the order between runs to make the
+%% type pass more efficient.
+repeat(Opts, Ps, OrderA) ->
+ Repeat = proplists:get_value(ssa_opt_repeat, Opts, ?DEFAULT_REPETITIONS),
+ OrderB = reverse(OrderA),
+ repeat_1(Repeat, Ps, OrderA, OrderB).
+
+repeat_1(0, _Opts, _OrderA, _OrderB) ->
+ [];
+repeat_1(N, Ps, OrderA, OrderB) when N > 0, N rem 2 =:= 0 ->
+ [{OrderA, Ps} | repeat_1(N - 1, Ps, OrderA, OrderB)];
+repeat_1(N, Ps, OrderA, OrderB) when N > 0, N rem 2 =:= 1 ->
+ [{OrderB, Ps} | repeat_1(N - 1, Ps, OrderA, OrderB)].
+
+%%
+
+get_func_id(F) ->
+ {_Mod, Name, Arity} = beam_ssa:get_anno(func_info, F),
+ #b_local{name=#b_literal{val=Name}, arity=Arity}.
-functions([F|Fs], Ps) ->
- [function(F, Ps)|functions(Fs, Ps)];
-functions([], _Ps) -> [].
+-spec build_st_map(#b_module{}) -> st_map().
+build_st_map(#b_module{body=Fs}) ->
+ build_st_map_1(Fs, #{}).
--type b_blk() :: beam_ssa:b_blk().
--type b_var() :: beam_ssa:b_var().
--type label() :: beam_ssa:label().
+build_st_map_1([F | Fs], Map) ->
+ #b_function{anno=Anno,args=Args,cnt=Counter,bs=Bs} = F,
+ St = #st{anno=Anno,args=Args,cnt=Counter,ssa=Bs},
+ build_st_map_1(Fs, Map#{ get_func_id(F) => St });
+build_st_map_1([], Map) ->
+ Map.
+
+-spec finish(#b_module{}, st_map()) -> #b_module{}.
+finish(#b_module{body=Fs0}=Module, StMap) ->
+ Module#b_module{body=finish_1(Fs0, StMap)}.
+
+finish_1([F0 | Fs], StMap) ->
+ #st{anno=Anno,cnt=Counter,ssa=Blocks} = map_get(get_func_id(F0), StMap),
+ F = F0#b_function{anno=Anno,bs=Blocks,cnt=Counter},
+ [F | finish_1(Fs, StMap)];
+finish_1([], _StMap) ->
+ [].
+
+%%
--record(st, {ssa :: beam_ssa:block_map() | [{label(),b_blk()}],
- args :: [b_var()],
- cnt :: label()}).
-define(PASS(N), {N,fun N/1}).
-passes(Opts0) ->
+prologue_passes(Opts) ->
Ps = [?PASS(ssa_opt_split_blocks),
+ ?PASS(ssa_opt_coalesce_phis),
+ ?PASS(ssa_opt_tail_phis),
?PASS(ssa_opt_element),
?PASS(ssa_opt_linearize),
+ ?PASS(ssa_opt_tuple_size),
?PASS(ssa_opt_record),
+ ?PASS(ssa_opt_cse), %Helps the first type pass.
+ ?PASS(ssa_opt_type_start)],
+ passes_1(Ps, Opts).
+
+%% These passes all benefit from each other (in roughly this order), so they
+%% are repeated as required.
+repeated_passes(Opts) ->
+ Ps = [?PASS(ssa_opt_live),
+ ?PASS(ssa_opt_bs_puts),
+ ?PASS(ssa_opt_dead),
?PASS(ssa_opt_cse),
- ?PASS(ssa_opt_type),
+ ?PASS(ssa_opt_tail_phis),
+ ?PASS(ssa_opt_type_continue)], %Must run after ssa_opt_dead to
+ %clean up phi nodes.
+ passes_1(Ps, Opts).
+
+epilogue_passes(Opts) ->
+ Ps = [?PASS(ssa_opt_type_finish),
?PASS(ssa_opt_float),
+ ?PASS(ssa_opt_sw),
+
+ %% Run live one more time to clean up after the float and sw
+ %% passes.
?PASS(ssa_opt_live),
?PASS(ssa_opt_bsm),
+ ?PASS(ssa_opt_bsm_units),
?PASS(ssa_opt_bsm_shortcut),
- ?PASS(ssa_opt_misc),
?PASS(ssa_opt_blockify),
?PASS(ssa_opt_sink),
- ?PASS(ssa_opt_merge_blocks)],
+ ?PASS(ssa_opt_merge_blocks),
+ ?PASS(ssa_opt_get_tuple_element),
+ ?PASS(ssa_opt_trim_unreachable)],
+ passes_1(Ps, Opts).
+
+passes_1(Ps, Opts0) ->
Negations = [{list_to_atom("no_"++atom_to_list(N)),N} ||
{N,_} <- Ps],
Opts = proplists:substitute_negations(Negations, Opts0),
@@ -72,30 +191,127 @@ passes(Opts0) ->
{NoName,fun(S) -> S end}
end || {Name,_}=P <- Ps].
-function(#b_function{anno=Anno,bs=Blocks0,args=Args,cnt=Count0}=F, Ps) ->
+%% Builds a function information map with basic information about incoming and
+%% outgoing local calls, as well as whether the function is exported.
+-spec build_func_db(#b_module{}) -> func_info_db().
+build_func_db(#b_module{body=Fs,exports=Exports}) ->
try
- St = #st{ssa=Blocks0,args=Args,cnt=Count0},
- #st{ssa=Blocks,cnt=Count} = compile:run_sub_passes(Ps, St),
- F#b_function{bs=Blocks,cnt=Count}
+ fdb_1(Fs, gb_sets:from_list(Exports), #{})
catch
- Class:Error:Stack ->
- #{func_info:={_,Name,Arity}} = Anno,
- io:fwrite("Function: ~w/~w\n", [Name,Arity]),
- erlang:raise(Class, Error, Stack)
+ %% All module-level optimizations are invalid when a NIF can override a
+ %% function, so we have to bail out.
+ throw:load_nif -> #{}
end.
+fdb_1([#b_function{ args=Args,bs=Bs }=F | Fs], Exports, FuncDb0) ->
+ Id = get_func_id(F),
+
+ #b_local{name=#b_literal{val=Name}, arity=Arity} = Id,
+ Exported = gb_sets:is_element({Name, Arity}, Exports),
+ ArgTypes = duplicate(length(Args), #{}),
+
+ FuncDb1 = case FuncDb0 of
+ %% We may have an entry already if someone's called us.
+ #{ Id := Info } ->
+ FuncDb0#{ Id := Info#func_info{ exported=Exported,
+ arg_types=ArgTypes }};
+ #{} ->
+ FuncDb0#{ Id => #func_info{ exported=Exported,
+ arg_types=ArgTypes }}
+ end,
+
+ FuncDb = beam_ssa:fold_rpo(fun(_L, #b_blk{is=Is}, FuncDb) ->
+ fdb_is(Is, Id, FuncDb)
+ end, FuncDb1, Bs),
+
+ fdb_1(Fs, Exports, FuncDb);
+fdb_1([], _Exports, FuncDb) ->
+ FuncDb.
+
+fdb_is([#b_set{op=call,
+ args=[#b_local{}=Callee | _]} | Is],
+ Caller, FuncDb) ->
+ fdb_is(Is, Caller, fdb_update(Caller, Callee, FuncDb));
+fdb_is([#b_set{op=call,
+ args=[#b_remote{mod=#b_literal{val=erlang},
+ name=#b_literal{val=load_nif}},
+ _Path, _LoadInfo]} | _Is], _Caller, _FuncDb) ->
+ throw(load_nif);
+fdb_is([_ | Is], Caller, FuncDb) ->
+ fdb_is(Is, Caller, FuncDb);
+fdb_is([], _Caller, FuncDb) ->
+ FuncDb.
+
+fdb_update(Caller, Callee, FuncDb) ->
+ CallerVertex = maps:get(Caller, FuncDb, #func_info{}),
+ CalleeVertex = maps:get(Callee, FuncDb, #func_info{}),
+
+ Calls = ordsets:add_element(Callee, CallerVertex#func_info.out),
+ CalledBy = ordsets:add_element(Caller, CalleeVertex#func_info.in),
+
+ FuncDb#{ Caller => CallerVertex#func_info{out=Calls},
+ Callee => CalleeVertex#func_info{in=CalledBy} }.
+
+%% Returns the post-order of all local calls in this module. That is,
+%% called functions will be ordered before the functions calling them.
+%%
+%% Functions where module-level optimization is disabled are added last in
+%% arbitrary order.
+
+get_call_order_po(StMap, FuncDb) ->
+ Order = gco_po(FuncDb),
+ Order ++ maps:fold(fun(K, _V, Acc) ->
+ case is_map_key(K, FuncDb) of
+ false -> [K | Acc];
+ true -> Acc
+ end
+ end, [], StMap).
+
+gco_po(FuncDb) ->
+ All = sort(maps:keys(FuncDb)),
+ {RPO,_} = gco_rpo(All, FuncDb, cerl_sets:new(), []),
+ reverse(RPO).
+
+gco_rpo([Id|Ids], FuncDb, Seen0, Acc0) ->
+ case cerl_sets:is_element(Id, Seen0) of
+ true ->
+ gco_rpo(Ids, FuncDb, Seen0, Acc0);
+ false ->
+ #func_info{out=Successors} = map_get(Id, FuncDb),
+ Seen1 = cerl_sets:add_element(Id, Seen0),
+ {Acc,Seen} = gco_rpo(Successors, FuncDb, Seen1, Acc0),
+ gco_rpo(Ids, FuncDb, Seen, [Id|Acc])
+ end;
+gco_rpo([], _, Seen, Acc) ->
+ {Acc,Seen}.
+
%%%
%%% Trivial sub passes.
%%%
-ssa_opt_linearize(#st{ssa=Blocks}=St) ->
- St#st{ssa=beam_ssa:linearize(Blocks)}.
+ssa_opt_dead({#st{ssa=Linear}=St, FuncDb}) ->
+ {St#st{ssa=beam_ssa_dead:opt(Linear)}, FuncDb}.
+
+ssa_opt_linearize({#st{ssa=Blocks}=St, FuncDb}) ->
+ {St#st{ssa=beam_ssa:linearize(Blocks)}, FuncDb}.
+
+ssa_opt_type_start({#st{ssa=Linear0,args=Args,anno=Anno}=St0, FuncDb0}) ->
+ {Linear, FuncDb} = beam_ssa_type:opt_start(Linear0, Args, Anno, FuncDb0),
+ {St0#st{ssa=Linear}, FuncDb}.
+
+ssa_opt_type_continue({#st{ssa=Linear0,args=Args,anno=Anno}=St0, FuncDb0}) ->
+ {Linear, FuncDb} = beam_ssa_type:opt_continue(Linear0, Args, Anno, FuncDb0),
+ {St0#st{ssa=Linear}, FuncDb}.
-ssa_opt_type(#st{ssa=Linear,args=Args}=St) ->
- St#st{ssa=beam_ssa_type:opt(Linear, Args)}.
+ssa_opt_type_finish({#st{args=Args,anno=Anno0}=St0, FuncDb0}) ->
+ {Anno, FuncDb} = beam_ssa_type:opt_finish(Args, Anno0, FuncDb0),
+ {St0#st{anno=Anno}, FuncDb}.
-ssa_opt_blockify(#st{ssa=Linear}=St) ->
- St#st{ssa=maps:from_list(Linear)}.
+ssa_opt_blockify({#st{ssa=Linear}=St, FuncDb}) ->
+ {St#st{ssa=maps:from_list(Linear)}, FuncDb}.
+
+ssa_opt_trim_unreachable({#st{ssa=Blocks}=St, FuncDb}) ->
+ {St#st{ssa=beam_ssa:trim_unreachable(Blocks)}, FuncDb}.
%%%
%%% Split blocks before certain instructions to enable more optimizations.
@@ -107,14 +323,264 @@ ssa_opt_blockify(#st{ssa=Linear}=St) ->
%%% for sinking get_tuple_element instructions.
%%%
-ssa_opt_split_blocks(#st{ssa=Blocks0,cnt=Count0}=St) ->
+ssa_opt_split_blocks({#st{ssa=Blocks0,cnt=Count0}=St, FuncDb}) ->
P = fun(#b_set{op={bif,element}}) -> true;
(#b_set{op=call}) -> true;
(#b_set{op=make_fun}) -> true;
(_) -> false
end,
{Blocks,Count} = beam_ssa:split_blocks(P, Blocks0, Count0),
- St#st{ssa=Blocks,cnt=Count}.
+ {St#st{ssa=Blocks,cnt=Count}, FuncDb}.
+
+%%%
+%%% Coalesce phi nodes.
+%%%
+%%% Nested cases can led to code such as this:
+%%%
+%%% 10:
+%%% _1 = phi {literal value1, label 8}, {Var, label 9}
+%%% br 11
+%%%
+%%% 11:
+%%% _2 = phi {_1, label 10}, {literal false, label 3}
+%%%
+%%% The phi nodes can be coalesced like this:
+%%%
+%%% 11:
+%%% _2 = phi {literal value1, label 8}, {Var, label 9}, {literal false, label 3}
+%%%
+%%% Coalescing can help other optimizations, and can in some cases reduce register
+%%% shuffling (if the phi variables for two phi nodes happens to be allocated to
+%%% different registers).
+%%%
+
+ssa_opt_coalesce_phis({#st{ssa=Blocks0}=St, FuncDb}) ->
+ Ls = beam_ssa:rpo(Blocks0),
+ Blocks = c_phis_1(Ls, Blocks0),
+ {St#st{ssa=Blocks}, FuncDb}.
+
+c_phis_1([L|Ls], Blocks0) ->
+ case map_get(L, Blocks0) of
+ #b_blk{is=[#b_set{op=phi}|_]}=Blk ->
+ Blocks = c_phis_2(L, Blk, Blocks0),
+ c_phis_1(Ls, Blocks);
+ #b_blk{} ->
+ c_phis_1(Ls, Blocks0)
+ end;
+c_phis_1([], Blocks) -> Blocks.
+
+c_phis_2(L, #b_blk{is=Is0}=Blk0, Blocks0) ->
+ case c_phis_args(Is0, Blocks0) of
+ none ->
+ Blocks0;
+ {_,_,Preds}=Info ->
+ Is = c_rewrite_phis(Is0, Info),
+ Blk = Blk0#b_blk{is=Is},
+ Blocks = Blocks0#{L:=Blk},
+ c_fix_branches(Preds, L, Blocks)
+ end.
+
+c_phis_args([#b_set{op=phi,args=Args0}|Is], Blocks) ->
+ case c_phis_args_1(Args0, Blocks) of
+ none ->
+ c_phis_args(Is, Blocks);
+ Res ->
+ Res
+ end;
+c_phis_args(_, _Blocks) -> none.
+
+c_phis_args_1([{Var,Pred}|As], Blocks) ->
+ case c_get_pred_vars(Var, Pred, Blocks) of
+ none ->
+ c_phis_args_1(As, Blocks);
+ Result ->
+ Result
+ end;
+c_phis_args_1([], _Blocks) -> none.
+
+c_get_pred_vars(Var, Pred, Blocks) ->
+ case map_get(Pred, Blocks) of
+ #b_blk{is=[#b_set{op=phi,dst=Var,args=Args}]} ->
+ {Var,Pred,Args};
+ #b_blk{} ->
+ none
+ end.
+
+c_rewrite_phis([#b_set{op=phi,args=Args0}=I|Is], Info) ->
+ Args = c_rewrite_phi(Args0, Info),
+ [I#b_set{args=Args}|c_rewrite_phis(Is, Info)];
+c_rewrite_phis(Is, _Info) -> Is.
+
+c_rewrite_phi([{Var,Pred}|As], {Var,Pred,Values}) ->
+ Values ++ As;
+c_rewrite_phi([{Value,Pred}|As], {_,Pred,Values}) ->
+ [{Value,P} || {_,P} <- Values] ++ As;
+c_rewrite_phi([A|As], Info) ->
+ [A|c_rewrite_phi(As, Info)];
+c_rewrite_phi([], _Info) -> [].
+
+c_fix_branches([{_,Pred}|As], L, Blocks0) ->
+ #b_blk{last=Last0} = Blk0 = map_get(Pred, Blocks0),
+ #b_br{bool=#b_literal{val=true}} = Last0, %Assertion.
+ Last = Last0#b_br{bool=#b_literal{val=true},succ=L,fail=L},
+ Blk = Blk0#b_blk{last=Last},
+ Blocks = Blocks0#{Pred:=Blk},
+ c_fix_branches(As, L, Blocks);
+c_fix_branches([], _, Blocks) -> Blocks.
+
+%%%
+%%% Eliminate phi nodes in the tail of a function.
+%%%
+%%% Try to eliminate short blocks that starts with a phi node
+%%% and end in a return. For example:
+%%%
+%%% Result = phi { Res1, 4 }, { literal true, 5 }
+%%% Ret = put_tuple literal ok, Result
+%%% ret Ret
+%%%
+%%% The code in this block can be inserted at the end blocks 4 and
+%%% 5. Thus, the following code can be inserted into block 4:
+%%%
+%%% Ret:1 = put_tuple literal ok, Res1
+%%% ret Ret:1
+%%%
+%%% And the following code into block 5:
+%%%
+%%% Ret:2 = put_tuple literal ok, literal true
+%%% ret Ret:2
+%%%
+%%% Which can be further simplified to:
+%%%
+%%% ret literal {ok, true}
+%%%
+%%% This transformation may lead to more code improvements:
+%%%
+%%% - Stack trimming
+%%% - Fewer test_heap instructions
+%%% - Smaller stack frames
+%%%
+
+ssa_opt_tail_phis({#st{ssa=SSA0,cnt=Count0}=St, FuncDb}) ->
+ {SSA,Count} = opt_tail_phis(SSA0, Count0),
+ {St#st{ssa=SSA,cnt=Count}, FuncDb}.
+
+opt_tail_phis(Blocks, Count) when is_map(Blocks) ->
+ opt_tail_phis(maps:values(Blocks), Blocks, Count);
+opt_tail_phis(Linear0, Count0) when is_list(Linear0) ->
+ Blocks0 = maps:from_list(Linear0),
+ {Blocks,Count} = opt_tail_phis(Blocks0, Count0),
+ {beam_ssa:linearize(Blocks),Count}.
+
+opt_tail_phis([#b_blk{is=Is0,last=Last}|Bs], Blocks0, Count0) ->
+ case {Is0,Last} of
+ {[#b_set{op=phi,args=[_,_|_]}|_],#b_ret{arg=#b_var{}}=Ret} ->
+ {Phis,Is} = splitwith(fun(#b_set{op=Op}) -> Op =:= phi end, Is0),
+ case suitable_tail_ops(Is) of
+ true ->
+ {Blocks,Count} = opt_tail_phi(Phis, Is, Ret,
+ Blocks0, Count0),
+ opt_tail_phis(Bs, Blocks, Count);
+ false ->
+ opt_tail_phis(Bs, Blocks0, Count0)
+ end;
+ {_,_} ->
+ opt_tail_phis(Bs, Blocks0, Count0)
+ end;
+opt_tail_phis([], Blocks, Count) ->
+ {Blocks,Count}.
+
+opt_tail_phi(Phis0, Is, Ret, Blocks0, Count0) ->
+ Phis = rel2fam(reduce_phis(Phis0)),
+ {Blocks,Count,Cost} =
+ foldl(fun(PhiArg, Acc) ->
+ opt_tail_phi_arg(PhiArg, Is, Ret, Acc)
+ end, {Blocks0,Count0,0}, Phis),
+ MaxCost = length(Phis) * 3 + 2,
+ if
+ Cost =< MaxCost ->
+ %% The transformation would cause at most a slight
+ %% increase in code size if no more optimizations
+ %% can be applied.
+ {Blocks,Count};
+ true ->
+ %% The code size would be increased too much.
+ {Blocks0,Count0}
+ end.
+
+reduce_phis([#b_set{dst=PhiDst,args=PhiArgs}|Is]) ->
+ [{L,{PhiDst,Val}} || {Val,L} <- PhiArgs] ++ reduce_phis(Is);
+reduce_phis([]) -> [].
+
+opt_tail_phi_arg({PredL,Sub0}, Is0, Ret0, {Blocks0,Count0,Cost0}) ->
+ Blk0 = map_get(PredL, Blocks0),
+ #b_blk{is=IsPrefix,last=#b_br{succ=Next,fail=Next}} = Blk0,
+ case is_exit_bif(IsPrefix) of
+ false ->
+ Sub1 = maps:from_list(Sub0),
+ {Is1,Count,Sub} = new_names(Is0, Sub1, Count0, []),
+ Is2 = [sub(I, Sub) || I <- Is1],
+ Cost = build_cost(Is2, Cost0),
+ Is = IsPrefix ++ Is2,
+ Ret = sub(Ret0, Sub),
+ Blk = Blk0#b_blk{is=Is,last=Ret},
+ Blocks = Blocks0#{PredL:=Blk},
+ {Blocks,Count,Cost};
+ true ->
+ %% The block ends in a call to a function that
+ %% will cause an exception.
+ {Blocks0,Count0,Cost0+3}
+ end.
+
+is_exit_bif([#b_set{op=call,
+ args=[#b_remote{mod=#b_literal{val=Mod},
+ name=#b_literal{val=Name}}|Args]}]) ->
+ erl_bifs:is_exit_bif(Mod, Name, length(Args));
+is_exit_bif(_) -> false.
+
+new_names([#b_set{dst=Dst}=I|Is], Sub0, Count0, Acc) ->
+ {NewDst,Count} = new_var(Dst, Count0),
+ Sub = Sub0#{Dst=>NewDst},
+ new_names(Is, Sub, Count, [I#b_set{dst=NewDst}|Acc]);
+new_names([], Sub, Count, Acc) ->
+ {reverse(Acc),Count,Sub}.
+
+suitable_tail_ops(Is) ->
+ all(fun(#b_set{op=Op}) ->
+ is_suitable_tail_op(Op)
+ end, Is).
+
+is_suitable_tail_op({bif,_}) -> true;
+is_suitable_tail_op(put_list) -> true;
+is_suitable_tail_op(put_tuple) -> true;
+is_suitable_tail_op(_) -> false.
+
+build_cost([#b_set{op=put_list,args=Args}|Is], Cost) ->
+ case are_all_literals(Args) of
+ true ->
+ build_cost(Is, Cost);
+ false ->
+ build_cost(Is, Cost + 1)
+ end;
+build_cost([#b_set{op=put_tuple,args=Args}|Is], Cost) ->
+ case are_all_literals(Args) of
+ true ->
+ build_cost(Is, Cost);
+ false ->
+ build_cost(Is, Cost + length(Args) + 1)
+ end;
+build_cost([#b_set{op={bif,_},args=Args}|Is], Cost) ->
+ case are_all_literals(Args) of
+ true ->
+ build_cost(Is, Cost);
+ false ->
+ build_cost(Is, Cost + 1)
+ end;
+build_cost([], Cost) -> Cost.
+
+are_all_literals(Args) ->
+ all(fun(#b_literal{}) -> true;
+ (_) -> false
+ end, Args).
%%%
%%% Order element/2 calls.
@@ -125,7 +591,7 @@ ssa_opt_split_blocks(#st{ssa=Blocks0,cnt=Count0}=St) ->
%%% be replaced with get_tuple_element/3 instructions.
%%%
-ssa_opt_element(#st{ssa=Blocks}=St) ->
+ssa_opt_element({#st{ssa=Blocks}=St, FuncDb}) ->
%% Collect the information about element instructions in this
%% function.
GetEls = collect_element_calls(beam_ssa:linearize(Blocks)),
@@ -137,7 +603,7 @@ ssa_opt_element(#st{ssa=Blocks}=St) ->
%% For each chain, swap the first element call with the
%% element call with the highest index.
- St#st{ssa=swap_element_calls(Chains, Blocks)}.
+ {St#st{ssa=swap_element_calls(Chains, Blocks)}, FuncDb}.
collect_element_calls([{L,#b_blk{is=Is0,last=Last}}|Bs]) ->
case {Is0,Last} of
@@ -198,9 +664,9 @@ swap_element_calls_1([], _, Blocks) ->
%%% when applicable.
%%%
-ssa_opt_record(#st{ssa=Linear}=St) ->
+ssa_opt_record({#st{ssa=Linear}=St, FuncDb}) ->
Blocks = maps:from_list(Linear),
- St#st{ssa=record_opt(Linear, Blocks)}.
+ {St#st{ssa=record_opt(Linear, Blocks)}, FuncDb}.
record_opt([{L,#b_blk{is=Is0,last=Last}=Blk0}|Bs], Blocks) ->
Is = record_opt_is(Is0, Last, Blocks),
@@ -208,8 +674,7 @@ record_opt([{L,#b_blk{is=Is0,last=Last}=Blk0}|Bs], Blocks) ->
[{L,Blk}|record_opt(Bs, Blocks)];
record_opt([], _Blocks) -> [].
-record_opt_is([#b_set{op={bif,is_tuple},dst=#b_var{name=Bool},
- args=[Tuple]}=Set],
+record_opt_is([#b_set{op={bif,is_tuple},dst=Bool,args=[Tuple]}=Set],
Last, Blocks) ->
case is_tagged_tuple(Tuple, Bool, Last, Blocks) of
{yes,Size,Tag} ->
@@ -218,59 +683,67 @@ record_opt_is([#b_set{op={bif,is_tuple},dst=#b_var{name=Bool},
no ->
[Set]
end;
+record_opt_is([I|Is]=Is0, #b_br{bool=Bool}=Last, Blocks) ->
+ case is_tagged_tuple_1(Is0, Last, Blocks) of
+ {yes,_Fail,Tuple,Arity,Tag} ->
+ Args = [Tuple,Arity,Tag],
+ [I#b_set{op=is_tagged_tuple,dst=Bool,args=Args}];
+ no ->
+ [I|record_opt_is(Is, Last, Blocks)]
+ end;
record_opt_is([I|Is], Last, Blocks) ->
[I|record_opt_is(Is, Last, Blocks)];
record_opt_is([], _Last, _Blocks) -> [].
-is_tagged_tuple(#b_var{name=Tuple}, Bool,
- #b_br{bool=#b_var{name=Bool},succ=Succ,fail=Fail},
+is_tagged_tuple(#b_var{}=Tuple, Bool,
+ #b_br{bool=Bool,succ=Succ,fail=Fail},
Blocks) ->
- SuccBlk = maps:get(Succ, Blocks),
- is_tagged_tuple_1(SuccBlk, Tuple, Fail, Blocks);
+ #b_blk{is=Is,last=Last} = map_get(Succ, Blocks),
+ case is_tagged_tuple_1(Is, Last, Blocks) of
+ {yes,Fail,Tuple,Arity,Tag} ->
+ {yes,Arity,Tag};
+ _ ->
+ no
+ end;
is_tagged_tuple(_, _, _, _) -> no.
-is_tagged_tuple_1(#b_blk{is=Is,last=Last}, Tuple, Fail, Blocks) ->
- case Is of
- [#b_set{op={bif,tuple_size},dst=#b_var{name=ArityVar},
- args=[#b_var{name=Tuple}]},
- #b_set{op={bif,'=:='},
- dst=#b_var{name=Bool},
- args=[#b_var{name=ArityVar},
- #b_literal{val=ArityVal}=Arity]}]
- when is_integer(ArityVal) ->
- case Last of
- #b_br{bool=#b_var{name=Bool},succ=Succ,fail=Fail} ->
- SuccBlk = maps:get(Succ, Blocks),
- case is_tagged_tuple_2(SuccBlk, Tuple, Fail) of
- no ->
- no;
- {yes,Tag} ->
- {yes,Arity,Tag}
- end;
- _ ->
- no
+is_tagged_tuple_1(Is, Last, Blocks) ->
+ case {Is,Last} of
+ {[#b_set{op={bif,tuple_size},dst=ArityVar,
+ args=[#b_var{}=Tuple]},
+ #b_set{op={bif,'=:='},
+ dst=Bool,
+ args=[ArityVar, #b_literal{val=ArityVal}=Arity]}],
+ #b_br{bool=Bool,succ=Succ,fail=Fail}}
+ when is_integer(ArityVal) ->
+ SuccBlk = map_get(Succ, Blocks),
+ case is_tagged_tuple_2(SuccBlk, Tuple, Fail) of
+ no ->
+ no;
+ {yes,Tag} ->
+ {yes,Fail,Tuple,Arity,Tag}
end;
_ ->
no
end.
is_tagged_tuple_2(#b_blk{is=Is,
- last=#b_br{bool=#b_var{name=Bool},fail=Fail}},
+ last=#b_br{bool=#b_var{}=Bool,fail=Fail}},
Tuple, Fail) ->
is_tagged_tuple_3(Is, Bool, Tuple);
is_tagged_tuple_2(#b_blk{}, _, _) -> no.
is_tagged_tuple_3([#b_set{op=get_tuple_element,
- dst=#b_var{name=TagVar},
- args=[#b_var{name=Tuple},#b_literal{val=0}]}|Is],
+ dst=TagVar,
+ args=[#b_var{}=Tuple,#b_literal{val=0}]}|Is],
Bool, Tuple) ->
is_tagged_tuple_4(Is, Bool, TagVar);
is_tagged_tuple_3([_|Is], Bool, Tuple) ->
is_tagged_tuple_3(Is, Bool, Tuple);
is_tagged_tuple_3([], _, _) -> no.
-is_tagged_tuple_4([#b_set{op={bif,'=:='},dst=#b_var{name=Bool},
- args=[#b_var{name=TagVar},
+is_tagged_tuple_4([#b_set{op={bif,'=:='},dst=Bool,
+ args=[#b_var{}=TagVar,
#b_literal{val=TagVal}=Tag]}],
Bool, TagVar) when is_atom(TagVal) ->
{yes,Tag};
@@ -286,12 +759,12 @@ is_tagged_tuple_4([], _, _) -> no.
%%% subexpressions across instructions that clobber the X registers.
%%%
-ssa_opt_cse(#st{ssa=Linear}=St) ->
+ssa_opt_cse({#st{ssa=Linear}=St, FuncDb}) ->
M = #{0=>#{}},
- St#st{ssa=cse(Linear, #{}, M)}.
+ {St#st{ssa=cse(Linear, #{}, M)}, FuncDb}.
cse([{L,#b_blk{is=Is0,last=Last0}=Blk}|Bs], Sub0, M0) ->
- Es0 = maps:get(L, M0),
+ Es0 = map_get(L, M0),
{Is1,Es,Sub} = cse_is(Is0, Es0, Sub0, []),
Last = sub(Last0, Sub),
M = cse_successors(Is1, Blk, Es, M0),
@@ -318,7 +791,13 @@ cse_successors(_Is, Blk, Es, M) ->
cse_successors_1([L|Ls], Es0, M) ->
case M of
+ #{L:=Es1} when map_size(Es1) =:= 0 ->
+ %% The map is already empty. No need to do anything
+ %% since the intersection will be empty.
+ cse_successors_1(Ls, Es0, M);
#{L:=Es1} ->
+ %% Calculate the intersection of the two maps.
+ %% Both keys and values must match.
Es = maps:filter(fun(Key, Value) ->
case Es1 of
#{Key:=Value} -> true;
@@ -380,12 +859,22 @@ cse_expr(#b_set{op=Op,args=Args}=I) ->
cse_suitable(#b_set{op=get_hd}) -> true;
cse_suitable(#b_set{op=get_tl}) -> true;
cse_suitable(#b_set{op=put_list}) -> true;
+cse_suitable(#b_set{op=get_tuple_element}) -> true;
cse_suitable(#b_set{op=put_tuple}) -> true;
-cse_suitable(#b_set{op={bif,Name},args=Args}) ->
+cse_suitable(#b_set{op={bif,tuple_size}}) ->
+ %% Doing CSE for tuple_size/1 can prevent the
+ %% creation of test_arity and select_tuple_arity
+ %% instructions. That could decrease performance
+ %% and beam_validator could fail to understand
+ %% that tuple operations that follow are safe.
+ false;
+cse_suitable(#b_set{anno=Anno,op={bif,Name},args=Args}) ->
+ %% Doing CSE for floating point operators is unsafe.
%% Doing CSE for comparison operators would prevent
%% creation of 'test' instructions.
Arity = length(Args),
- not (erl_internal:new_type_test(Name, Arity) orelse
+ not (is_map_key(float_op, Anno) orelse
+ erl_internal:new_type_test(Name, Arity) orelse
erl_internal:comp_op(Name, Arity) orelse
erl_internal:bool_op(Name, Arity));
cse_suitable(#b_set{}) -> false.
@@ -410,16 +899,17 @@ cse_suitable(#b_set{}) -> false.
{s=undefined :: 'undefined' | 'cleared',
regs=#{} :: #{beam_ssa:b_var():=beam_ssa:b_var()},
fail=none :: 'none' | beam_ssa:label(),
- ren=#{} :: #{beam_ssa:label():=beam_ssa:label()},
- non_guards :: gb_sets:set(beam_ssa:label())
+ non_guards :: gb_sets:set(beam_ssa:label()),
+ bs :: beam_ssa:block_map()
}).
-ssa_opt_float(#st{ssa=Linear0,cnt=Count0}=St) ->
+ssa_opt_float({#st{ssa=Linear0,cnt=Count0}=St, FuncDb}) ->
NonGuards0 = float_non_guards(Linear0),
NonGuards = gb_sets:from_list(NonGuards0),
- Fs = #fs{non_guards=NonGuards},
+ Blocks = maps:from_list(Linear0),
+ Fs = #fs{non_guards=NonGuards,bs=Blocks},
{Linear,Count} = float_opt(Linear0, Count0, Fs),
- St#st{ssa=Linear,cnt=Count}.
+ {St#st{ssa=Linear,cnt=Count}, FuncDb}.
float_non_guards([{L,#b_blk{is=Is}}|Bs]) ->
case Is of
@@ -430,42 +920,35 @@ float_non_guards([{L,#b_blk{is=Is}}|Bs]) ->
end;
float_non_guards([]) -> [?BADARG_BLOCK].
-float_opt([{L,Blk0}|Bs], Count, Fs) ->
- Blk = float_rename_phis(Blk0, Fs),
- case float_need_flush(Blk, Fs) of
- true ->
- float_flush(L, Blk, Bs, Count, Fs);
- false ->
- float_opt_1(L, Blk, Bs, Count, Fs)
- end;
-float_opt([], Count, _Fs) ->
- {[],Count}.
-
-float_opt_1(L, #b_blk{last=#b_br{fail=F}}=Blk, Bs0,
+float_opt([{L,#b_blk{last=#b_br{fail=F}}=Blk}|Bs0],
Count0, #fs{non_guards=NonGuards}=Fs) ->
case gb_sets:is_member(F, NonGuards) of
true ->
%% This block is not inside a guard.
%% We can do the optimization.
- float_opt_2(L, Blk, Bs0, Count0, Fs);
+ float_opt_1(L, Blk, Bs0, Count0, Fs);
false ->
%% This block is inside a guard. Don't do
%% any floating point optimizations.
{Bs,Count} = float_opt(Bs0, Count0, Fs),
{[{L,Blk}|Bs],Count}
end;
-float_opt_1(L, Blk, Bs, Count, Fs) ->
- float_opt_2(L, Blk, Bs, Count, Fs).
+float_opt([{L,Blk}|Bs], Count, Fs) ->
+ float_opt_1(L, Blk, Bs, Count, Fs);
+float_opt([], Count, _Fs) ->
+ {[],Count}.
-float_opt_2(L, #b_blk{is=Is0}=Blk0, Bs0, Count0, Fs0) ->
+float_opt_1(L, #b_blk{is=Is0}=Blk0, Bs0, Count0, Fs0) ->
case float_opt_is(Is0, Fs0, Count0, []) of
{Is1,Fs1,Count1} ->
- Fs = float_fail_label(Blk0, Fs1),
- Split = float_split_conv(Is1, Blk0),
- {Blks0,Count2} = float_number(Split, L, Count1),
- {Blks,Count3} = float_conv(Blks0, Fs#fs.fail, Count2),
- {Bs,Count} = float_opt(Bs0, Count3, Fs),
- {Blks++Bs,Count};
+ Fs2 = float_fail_label(Blk0, Fs1),
+ Fail = Fs2#fs.fail,
+ {Flush,Blk,Fs,Count2} = float_maybe_flush(Blk0, Fs2, Count1),
+ Split = float_split_conv(Is1, Blk),
+ {Blks0,Count3} = float_number(Split, L, Count2),
+ {Blks,Count4} = float_conv(Blks0, Fail, Count3),
+ {Bs,Count} = float_opt(Bs0, Count4, Fs),
+ {Blks++Flush++Bs,Count};
none ->
{Bs,Count} = float_opt(Bs0, Count0, Fs0),
{[{L,Blk0}|Bs],Count}
@@ -525,14 +1008,42 @@ float_conv([{L,#b_blk{is=Is0}=Blk0}|Bs0], Fail, Count0) ->
end
end.
-float_need_flush(#b_blk{is=Is}, #fs{s=cleared}) ->
+float_maybe_flush(Blk0, #fs{s=cleared,fail=Fail,bs=Blocks}=Fs0, Count0) ->
+ #b_blk{last=#b_br{bool=#b_var{},succ=Succ}=Br} = Blk0,
+ #b_blk{is=Is} = map_get(Succ, Blocks),
case Is of
[#b_set{anno=#{float_op:=_}}|_] ->
- false;
+ %% The next operation is also a floating point operation.
+ %% No flush needed.
+ {[],Blk0,Fs0,Count0};
_ ->
- true
+ %% Flush needed.
+ {Bool0,Count1} = new_reg('@ssa_bool', Count0),
+ Bool = #b_var{name=Bool0},
+
+ %% Allocate block numbers.
+ CheckL = Count1, %For checkerror.
+ FlushL = Count1 + 1, %For flushing of float regs.
+ Count = Count1 + 2,
+ Blk = Blk0#b_blk{last=Br#b_br{succ=CheckL}},
+
+ %% Build the block with the checkerror instruction.
+ CheckIs = [#b_set{op={float,checkerror},dst=Bool}],
+ CheckBr = #b_br{bool=Bool,succ=FlushL,fail=Fail},
+ CheckBlk = #b_blk{is=CheckIs,last=CheckBr},
+
+ %% Build the block that flushes all registers.
+ FlushIs = float_flush_regs(Fs0),
+ FlushBr = #b_br{bool=#b_literal{val=true},succ=Succ,fail=Succ},
+ FlushBlk = #b_blk{is=FlushIs,last=FlushBr},
+
+ %% Update state and blocks.
+ Fs = Fs0#fs{s=undefined,regs=#{},fail=none},
+ FlushBs = [{CheckL,CheckBlk},{FlushL,FlushBlk}],
+ {FlushBs,Blk,Fs,Count}
end;
-float_need_flush(_, _) -> false.
+float_maybe_flush(Blk, Fs, Count) ->
+ {[],Blk,Fs,Count}.
float_opt_is([#b_set{op=succeeded,args=[Src]}=I0],
#fs{regs=Rs}=Fs, Count, Acc) ->
@@ -557,27 +1068,6 @@ float_opt_is([], Fs, _Count, _Acc) ->
#fs{s=undefined} = Fs, %Assertion.
none.
-float_rename_phis(#b_blk{is=Is}=Blk, #fs{ren=Ren}) ->
- if
- map_size(Ren) =:= 0 ->
- Blk;
- true ->
- Blk#b_blk{is=float_rename_phis_1(Is, Ren)}
- end.
-
-float_rename_phis_1([#b_set{op=phi,args=Args0}=I|Is], Ren) ->
- Args = [float_phi_arg(Arg, Ren) || Arg <- Args0],
- [I#b_set{args=Args}|float_rename_phis_1(Is, Ren)];
-float_rename_phis_1(Is, _) -> Is.
-
-float_phi_arg({Var,OldLbl}, Ren) ->
- case Ren of
- #{OldLbl:=NewLbl} ->
- {Var,NewLbl};
- #{} ->
- {Var,OldLbl}
- end.
-
float_make_op(#b_set{op={bif,Op},dst=Dst,args=As0}=I0,
Ts, #fs{s=S,regs=Rs0}=Fs, Count0) ->
{As1,Rs1,Count1} = float_load(As0, Ts, Rs0, Count0, []),
@@ -634,37 +1124,6 @@ new_reg(Base, Count) ->
Fr = {Base,Count},
{Fr,Count+1}.
-float_flush(L, Blk, Bs0, Count0, #fs{s=cleared,fail=Fail,ren=Ren0}=Fs0) ->
- {Bool0,Count1} = new_reg('@ssa_bool', Count0),
- Bool = #b_var{name=Bool0},
-
- %% Insert two blocks before the current block. First allocate
- %% block numbers.
- FirstL = L, %For checkerror.
- MiddleL = Count1, %For flushed float regs.
- LastL = Count1 + 1, %For original block.
- Count2 = Count1 + 2,
-
- %% Build the block with the checkerror instruction.
- CheckIs = [#b_set{op={float,checkerror},dst=Bool}],
- FirstBlk = #b_blk{is=CheckIs,last=#b_br{bool=Bool,succ=MiddleL,fail=Fail}},
-
- %% Build the block that flushes all registers. Note that this must be a
- %% separate block in case the original block begins with a phi instruction,
- %% to avoid embedding a phi instruction in the middle of a block.
- FlushIs = float_flush_regs(Fs0),
- MiddleBlk = #b_blk{is=FlushIs,last=#b_br{bool=#b_literal{val=true},
- succ=LastL,fail=LastL}},
-
- %% The last block is the original unmodified block.
- LastBlk = Blk,
-
- %% Update state and blocks.
- Ren = Ren0#{L=>LastL},
- Fs = Fs0#fs{s=undefined,regs=#{},fail=none,ren=Ren},
- Bs1 = [{FirstL,FirstBlk},{MiddleL,MiddleBlk},{LastL,LastBlk}|Bs0],
- float_opt(Bs1, Count2, Fs).
-
float_fail_label(#b_blk{last=Last}, Fs) ->
case Last of
#b_br{bool=#b_var{},fail=Fail} ->
@@ -688,30 +1147,38 @@ float_flush_regs(#fs{regs=Rs}) ->
%%% with a cheaper instructions
%%%
-ssa_opt_live(#st{ssa=Linear}=St) ->
- St#st{ssa=live_opt(reverse(Linear), #{}, [])}.
-
-live_opt([{L,Blk0}|Bs], LiveMap0, Acc) ->
- Successors = beam_ssa:successors(Blk0),
- Live0 = live_opt_succ(Successors, L, LiveMap0),
- {Blk,Live} = live_opt_blk(Blk0, Live0),
+ssa_opt_live({#st{ssa=Linear0}=St, FuncDb}) ->
+ RevLinear = reverse(Linear0),
+ Blocks0 = maps:from_list(RevLinear),
+ Blocks = live_opt(RevLinear, #{}, Blocks0),
+ Linear = beam_ssa:linearize(Blocks),
+ {St#st{ssa=Linear}, FuncDb}.
+
+live_opt([{L,Blk0}|Bs], LiveMap0, Blocks) ->
+ Blk1 = beam_ssa_share:block(Blk0, Blocks),
+ Successors = beam_ssa:successors(Blk1),
+ Live0 = live_opt_succ(Successors, L, LiveMap0, gb_sets:empty()),
+ {Blk,Live} = live_opt_blk(Blk1, Live0),
LiveMap = live_opt_phis(Blk#b_blk.is, L, Live, LiveMap0),
- live_opt(Bs, LiveMap, [{L,Blk}|Acc]);
+ live_opt(Bs, LiveMap, Blocks#{L:=Blk});
live_opt([], _, Acc) -> Acc.
-live_opt_succ([S|Ss], L, LiveMap) ->
- Live0 = live_opt_succ(Ss, L, LiveMap),
+live_opt_succ([S|Ss], L, LiveMap, Live0) ->
Key = {S,L},
case LiveMap of
#{Key:=Live} ->
- gb_sets:union(Live, Live0);
+ %% The successor has a phi node, and the value for
+ %% this block in the phi node is a variable.
+ live_opt_succ(Ss, L, LiveMap, gb_sets:union(Live, Live0));
#{S:=Live} ->
- gb_sets:union(Live, Live0);
+ %% No phi node in the successor, or the value for
+ %% this block in the phi node is a literal.
+ live_opt_succ(Ss, L, LiveMap, gb_sets:union(Live, Live0));
#{} ->
- Live0
+ %% A peek_message block which has not been processed yet.
+ live_opt_succ(Ss, L, LiveMap, Live0)
end;
-live_opt_succ([], _, _) ->
- gb_sets:empty().
+live_opt_succ([], _, _, Acc) -> Acc.
live_opt_phis(Is, L, Live0, LiveMap0) ->
LiveMap = LiveMap0#{L=>Live0},
@@ -721,11 +1188,16 @@ live_opt_phis(Is, L, Live0, LiveMap0) ->
LiveMap;
[_|_] ->
PhiArgs = append([Args || #b_set{args=Args} <- Phis]),
- PhiVars = [{P,V} || {#b_var{name=V},P} <- PhiArgs],
- PhiLive0 = rel2fam(PhiVars),
- PhiLive = [{{L,P},gb_sets:union(gb_sets:from_list(Vs), Live0)} ||
- {P,Vs} <- PhiLive0],
- maps:merge(LiveMap, maps:from_list(PhiLive))
+ case [{P,V} || {#b_var{}=V,P} <- PhiArgs] of
+ [_|_]=PhiVars ->
+ PhiLive0 = rel2fam(PhiVars),
+ PhiLive = [{{L,P},gb_sets:union(gb_sets:from_list(Vs), Live0)} ||
+ {P,Vs} <- PhiLive0],
+ maps:merge(LiveMap, maps:from_list(PhiLive));
+ [] ->
+ %% There were only literals in the phi node(s).
+ LiveMap
+ end
end.
live_opt_blk(#b_blk{is=Is0,last=Last}=Blk, Live0) ->
@@ -733,26 +1205,21 @@ live_opt_blk(#b_blk{is=Is0,last=Last}=Blk, Live0) ->
{Is,Live} = live_opt_is(reverse(Is0), Live1, []),
{Blk#b_blk{is=Is},Live}.
-live_opt_is([#b_set{op=phi,dst=#b_var{name=Dst}}=I|Is], Live, Acc) ->
+live_opt_is([#b_set{op=phi,dst=Dst}=I|Is], Live, Acc) ->
case gb_sets:is_member(Dst, Live) of
true ->
live_opt_is(Is, Live, [I|Acc]);
false ->
live_opt_is(Is, Live, Acc)
end;
-live_opt_is([#b_set{op=succeeded,dst=#b_var{name=SuccDst}=SuccDstVar,
- args=[#b_var{name=Dst}]}=SuccI,
- #b_set{dst=#b_var{name=Dst}}=I|Is], Live0, Acc) ->
+live_opt_is([#b_set{op=succeeded,dst=SuccDst=SuccDstVar,
+ args=[Dst]}=SuccI,
+ #b_set{dst=Dst}=I|Is], Live0, Acc) ->
case gb_sets:is_member(Dst, Live0) of
true ->
- case gb_sets:is_member(SuccDst, Live0) of
- true ->
- Live1 = gb_sets:add(Dst, Live0),
- Live = gb_sets:delete_any(SuccDst, Live1),
- live_opt_is([I|Is], Live, [SuccI|Acc]);
- false ->
- live_opt_is([I|Is], Live0, Acc)
- end;
+ Live1 = gb_sets:add(Dst, Live0),
+ Live = gb_sets:delete_any(SuccDst, Live1),
+ live_opt_is([I|Is], Live, [SuccI|Acc]);
false ->
case live_opt_unused(I) of
{replace,NewI0} ->
@@ -762,21 +1229,21 @@ live_opt_is([#b_set{op=succeeded,dst=#b_var{name=SuccDst}=SuccDstVar,
case gb_sets:is_member(SuccDst, Live0) of
true ->
Live1 = gb_sets:add(Dst, Live0),
- Live = gb_sets:delete_any(SuccDst, Live1),
+ Live = gb_sets:delete(SuccDst, Live1),
live_opt_is([I|Is], Live, [SuccI|Acc]);
false ->
live_opt_is([I|Is], Live0, Acc)
end
end
end;
-live_opt_is([#b_set{op=Op,dst=#b_var{name=Dst}}=I|Is], Live0, Acc) ->
+live_opt_is([#b_set{dst=Dst}=I|Is], Live0, Acc) ->
case gb_sets:is_member(Dst, Live0) of
true ->
Live1 = gb_sets:union(Live0, gb_sets:from_ordset(beam_ssa:used(I))),
- Live = gb_sets:delete_any(Dst, Live1),
+ Live = gb_sets:delete(Dst, Live1),
live_opt_is(Is, Live, [I|Acc]);
false ->
- case is_pure(Op) of
+ case beam_ssa:no_side_effect(I) of
true ->
live_opt_is(Is, Live0, Acc);
false ->
@@ -787,42 +1254,37 @@ live_opt_is([#b_set{op=Op,dst=#b_var{name=Dst}}=I|Is], Live0, Acc) ->
live_opt_is([], Live, Acc) ->
{Acc,Live}.
-is_pure({bif,_}) -> true;
-is_pure({float,get}) -> true;
-is_pure(bs_extract) -> true;
-is_pure(extract) -> true;
-is_pure(get_hd) -> true;
-is_pure(get_tl) -> true;
-is_pure(get_tuple_element) -> true;
-is_pure(is_nonempty_list) -> true;
-is_pure(is_tagged_tuple) -> true;
-is_pure(put_list) -> true;
-is_pure(put_tuple) -> true;
-is_pure(_) -> false.
-
live_opt_unused(#b_set{op=get_map_element}=Set) ->
{replace,Set#b_set{op=has_map_field}};
live_opt_unused(_) -> keep.
%%%
-%%% Optimize binary matching instructions.
+%%% Optimize binary matching.
+%%%
+%%% * If the value of segment is never extracted, rewrite
+%%% to a bs_skip instruction.
+%%%
+%%% * Coalesce adjacent bs_skip instructions and skip instructions
+%%% with bs_test_tail.
%%%
-ssa_opt_bsm(#st{ssa=Linear}=St) ->
+ssa_opt_bsm({#st{ssa=Linear}=St, FuncDb}) ->
Extracted0 = bsm_extracted(Linear),
Extracted = cerl_sets:from_list(Extracted0),
- St#st{ssa=bsm_skip(Linear, Extracted)}.
+ {St#st{ssa=bsm_skip(Linear, Extracted)}, FuncDb}.
-bsm_skip([{L,#b_blk{is=Is0}=Blk}|Bs], Extracted) ->
+bsm_skip([{L,#b_blk{is=Is0}=Blk}|Bs0], Extracted) ->
+ Bs = bsm_skip(Bs0, Extracted),
Is = bsm_skip_is(Is0, Extracted),
- [{L,Blk#b_blk{is=Is}}|bsm_skip(Bs, Extracted)];
+ coalesce_skips({L,Blk#b_blk{is=Is}}, Bs);
bsm_skip([], _) -> [].
bsm_skip_is([I0|Is], Extracted) ->
case I0 of
- #b_set{op=bs_match,args=[#b_literal{val=string}|_]} ->
- [I0|bsm_skip_is(Is, Extracted)];
- #b_set{op=bs_match,dst=Ctx,args=[Type,PrevCtx|Args0]} ->
+ #b_set{op=bs_match,
+ dst=Ctx,
+ args=[#b_literal{val=T}=Type,PrevCtx|Args0]}
+ when T =/= string, T =/= skip ->
I = case cerl_sets:is_element(Ctx, Extracted) of
true ->
I0;
@@ -846,18 +1308,75 @@ bsm_extracted([{_,#b_blk{is=Is}}|Bs]) ->
end;
bsm_extracted([]) -> [].
+coalesce_skips({L,#b_blk{is=[#b_set{op=bs_extract}=Extract|Is0],
+ last=Last0}=Blk0}, Bs0) ->
+ case coalesce_skips_is(Is0, Last0, Bs0) of
+ not_possible ->
+ [{L,Blk0}|Bs0];
+ {Is,Last,Bs} ->
+ Blk = Blk0#b_blk{is=[Extract|Is],last=Last},
+ [{L,Blk}|Bs]
+ end;
+coalesce_skips({L,#b_blk{is=Is0,last=Last0}=Blk0}, Bs0) ->
+ case coalesce_skips_is(Is0, Last0, Bs0) of
+ not_possible ->
+ [{L,Blk0}|Bs0];
+ {Is,Last,Bs} ->
+ Blk = Blk0#b_blk{is=Is,last=Last},
+ [{L,Blk}|Bs]
+ end.
+
+coalesce_skips_is([#b_set{op=bs_match,
+ args=[#b_literal{val=skip},
+ Ctx0,Type,Flags,
+ #b_literal{val=Size0},
+ #b_literal{val=Unit0}]}=Skip0,
+ #b_set{op=succeeded}],
+ #b_br{succ=L2,fail=Fail}=Br0,
+ Bs0) when is_integer(Size0) ->
+ case Bs0 of
+ [{L2,#b_blk{is=[#b_set{op=bs_match,
+ dst=SkipDst,
+ args=[#b_literal{val=skip},_,_,_,
+ #b_literal{val=Size1},
+ #b_literal{val=Unit1}]},
+ #b_set{op=succeeded}=Succeeded],
+ last=#b_br{fail=Fail}=Br}}|Bs] when is_integer(Size1) ->
+ SkipBits = Size0 * Unit0 + Size1 * Unit1,
+ Skip = Skip0#b_set{dst=SkipDst,
+ args=[#b_literal{val=skip},Ctx0,
+ Type,Flags,
+ #b_literal{val=SkipBits},
+ #b_literal{val=1}]},
+ Is = [Skip,Succeeded],
+ {Is,Br,Bs};
+ [{L2,#b_blk{is=[#b_set{op=bs_test_tail,
+ args=[_Ctx,#b_literal{val=TailSkip}]}],
+ last=#b_br{succ=NextSucc,fail=Fail}}}|Bs] ->
+ SkipBits = Size0 * Unit0,
+ TestTail = Skip0#b_set{op=bs_test_tail,
+ args=[Ctx0,#b_literal{val=SkipBits+TailSkip}]},
+ Br = Br0#b_br{bool=TestTail#b_set.dst,succ=NextSucc},
+ Is = [TestTail],
+ {Is,Br,Bs};
+ _ ->
+ not_possible
+ end;
+coalesce_skips_is(_, _, _) ->
+ not_possible.
+
%%%
%%% Short-cutting binary matching instructions.
%%%
-ssa_opt_bsm_shortcut(#st{ssa=Linear}=St) ->
+ssa_opt_bsm_shortcut({#st{ssa=Linear}=St, FuncDb}) ->
Positions = bsm_positions(Linear, #{}),
case map_size(Positions) of
0 ->
%% No binary matching instructions.
- St;
+ {St, FuncDb};
_ ->
- St#st{ssa=bsm_shortcut(Linear, Positions)}
+ {St#st{ssa=bsm_shortcut(Linear, Positions)}, FuncDb}
end.
bsm_positions([{L,#b_blk{is=Is,last=Last}}|Bs], PosMap0) ->
@@ -865,7 +1384,7 @@ bsm_positions([{L,#b_blk{is=Is,last=Last}}|Bs], PosMap0) ->
case {Is,Last} of
{[#b_set{op=bs_test_tail,dst=Bool,args=[Ctx,#b_literal{val=Bits0}]}],
#b_br{bool=Bool,fail=Fail}} ->
- Bits = Bits0 + maps:get(Ctx, PosMap0),
+ Bits = Bits0 + map_get(Ctx, PosMap0),
bsm_positions(Bs, PosMap#{L=>{Bits,Fail}});
{_,_} ->
bsm_positions(Bs, PosMap)
@@ -916,70 +1435,461 @@ bsm_shortcut([{L,#b_blk{is=Is,last=Last0}=Blk}|Bs], PosMap) ->
bsm_shortcut([], _PosMap) -> [].
%%%
-%%% Miscellanous optimizations in execution order.
+%%% Eliminate redundant bs_test_unit2 instructions.
+%%%
+
+ssa_opt_bsm_units({#st{ssa=Linear}=St, FuncDb}) ->
+ {St#st{ssa=bsm_units(Linear, #{})}, FuncDb}.
+
+bsm_units([{L,#b_blk{last=#b_br{succ=Succ,fail=Fail}}=Block0} | Bs], UnitMaps0) ->
+ UnitsIn = maps:get(L, UnitMaps0, #{}),
+ {Block, UnitsOut} = bsm_units_skip(Block0, UnitsIn),
+ UnitMaps1 = bsm_units_join(Succ, UnitsOut, UnitMaps0),
+ UnitMaps = bsm_units_join(Fail, UnitsIn, UnitMaps1),
+ [{L, Block} | bsm_units(Bs, UnitMaps)];
+bsm_units([{L,#b_blk{last=#b_switch{fail=Fail,list=Switch}}=Block} | Bs], UnitMaps0) ->
+ UnitsIn = maps:get(L, UnitMaps0, #{}),
+ Labels = [Fail | [Lbl || {_Arg, Lbl} <- Switch]],
+ UnitMaps = foldl(fun(Lbl, UnitMaps) ->
+ bsm_units_join(Lbl, UnitsIn, UnitMaps)
+ end, UnitMaps0, Labels),
+ [{L, Block} | bsm_units(Bs, UnitMaps)];
+bsm_units([{L, Block} | Bs], UnitMaps) ->
+ [{L, Block} | bsm_units(Bs, UnitMaps)];
+bsm_units([], _UnitMaps) ->
+ [].
+
+bsm_units_skip(Block, Units) ->
+ bsm_units_skip_1(Block#b_blk.is, Block, Units).
+
+bsm_units_skip_1([#b_set{op=bs_start_match,dst=New}|_], Block, Units) ->
+ %% We bail early since there can't be more than one match per block.
+ {Block, Units#{ New => 1 }};
+bsm_units_skip_1([#b_set{op=bs_match,
+ dst=New,
+ args=[#b_literal{val=skip},
+ Ctx,
+ #b_literal{val=binary},
+ _Flags,
+ #b_literal{val=all},
+ #b_literal{val=OpUnit}]}=Skip | Test],
+ Block0, Units) ->
+ [#b_set{op=succeeded,dst=Bool,args=[New]}] = Test, %Assertion.
+ #b_br{bool=Bool} = Last0 = Block0#b_blk.last, %Assertion.
+ CtxUnit = map_get(Ctx, Units),
+ if
+ CtxUnit rem OpUnit =:= 0 ->
+ Is = takewhile(fun(I) -> I =/= Skip end, Block0#b_blk.is),
+ Last = Last0#b_br{bool=#b_literal{val=true}},
+ Block = Block0#b_blk{is=Is,last=Last},
+ {Block, Units#{ New => CtxUnit }};
+ CtxUnit rem OpUnit =/= 0 ->
+ {Block0, Units#{ New => OpUnit, Ctx => OpUnit }}
+ end;
+bsm_units_skip_1([#b_set{op=bs_match,dst=New,args=Args}|_], Block, Units) ->
+ [_,Ctx|_] = Args,
+ CtxUnit = map_get(Ctx, Units),
+ OpUnit = bsm_op_unit(Args),
+ {Block, Units#{ New => gcd(OpUnit, CtxUnit) }};
+bsm_units_skip_1([_I | Is], Block, Units) ->
+ bsm_units_skip_1(Is, Block, Units);
+bsm_units_skip_1([], Block, Units) ->
+ {Block, Units}.
+
+bsm_op_unit([_,_,_,Size,#b_literal{val=U}]) ->
+ case Size of
+ #b_literal{val=Sz} when is_integer(Sz) -> Sz*U;
+ _ -> U
+ end;
+bsm_op_unit([#b_literal{val=string},_,#b_literal{val=String}]) ->
+ bit_size(String);
+bsm_op_unit([#b_literal{val=utf8}|_]) ->
+ 8;
+bsm_op_unit([#b_literal{val=utf16}|_]) ->
+ 16;
+bsm_op_unit([#b_literal{val=utf32}|_]) ->
+ 32;
+bsm_op_unit(_) ->
+ 1.
+
+%% Several paths can lead to the same match instruction and the inferred units
+%% may differ between them, so we can only keep the information that is common
+%% to all paths.
+bsm_units_join(Lbl, MapA, UnitMaps0) when is_map_key(Lbl, UnitMaps0) ->
+ MapB = map_get(Lbl, UnitMaps0),
+ Merged = if
+ map_size(MapB) =< map_size(MapA) ->
+ bsm_units_join_1(maps:keys(MapB), MapA, MapB);
+ map_size(MapB) > map_size(MapA) ->
+ bsm_units_join_1(maps:keys(MapA), MapB, MapA)
+ end,
+ UnitMaps0#{Lbl := Merged};
+bsm_units_join(Lbl, MapA, UnitMaps0) when MapA =/= #{} ->
+ UnitMaps0#{Lbl => MapA};
+bsm_units_join(_Lbl, _MapA, UnitMaps0) ->
+ UnitMaps0.
+
+bsm_units_join_1([Key | Keys], Left, Right) when is_map_key(Key, Left) ->
+ UnitA = map_get(Key, Left),
+ UnitB = map_get(Key, Right),
+ bsm_units_join_1(Keys, Left, Right#{Key := gcd(UnitA, UnitB)});
+bsm_units_join_1([Key | Keys], Left, Right) ->
+ bsm_units_join_1(Keys, Left, maps:remove(Key, Right));
+bsm_units_join_1([], _MapA, Right) ->
+ Right.
+
+%%%
+%%% Optimize binary construction.
+%%%
+%%% If an integer segment or a float segment has a literal size and
+%%% a literal value, convert to a binary segment. Coalesce adjacent
+%%% literal binary segments. Literal binary segments will be converted
+%%% to bs_put_string instructions in later pass.
%%%
-ssa_opt_misc(#st{ssa=Linear}=St) ->
- St#st{ssa=misc_opt(Linear, #{})}.
+ssa_opt_bs_puts({#st{ssa=Linear0,cnt=Count0}=St, FuncDb}) ->
+ {Linear,Count} = opt_bs_puts(Linear0, Count0, []),
+ {St#st{ssa=Linear,cnt=Count}, FuncDb}.
-misc_opt([{L,#b_blk{is=Is0,last=Last0}=Blk0}|Bs], Sub0) ->
- {Is,Sub} = misc_opt_is(Is0, Sub0, []),
- Last = sub(Last0, Sub),
- Blk = Blk0#b_blk{is=Is,last=Last},
- [{L,Blk}|misc_opt(Bs, Sub)];
-misc_opt([], _) -> [].
+opt_bs_puts([{L,#b_blk{is=Is}=Blk0}|Bs], Count0, Acc0) ->
+ case Is of
+ [#b_set{op=bs_put}=I0] ->
+ case opt_bs_put(L, I0, Blk0, Count0, Acc0) of
+ not_possible ->
+ opt_bs_puts(Bs, Count0, [{L,Blk0}|Acc0]);
+ {Count,Acc1} ->
+ Acc = opt_bs_puts_merge(Acc1),
+ opt_bs_puts(Bs, Count, Acc)
+ end;
+ _ ->
+ opt_bs_puts(Bs, Count0, [{L,Blk0}|Acc0])
+ end;
+opt_bs_puts([], Count, Acc) ->
+ {reverse(Acc),Count}.
+
+opt_bs_puts_merge([{L1,#b_blk{is=Is}=Blk0},{L2,#b_blk{is=AccIs}}=BAcc|Acc]) ->
+ case {AccIs,Is} of
+ {[#b_set{op=bs_put,
+ args=[#b_literal{val=binary},
+ #b_literal{},
+ #b_literal{val=Bin0},
+ #b_literal{val=all},
+ #b_literal{val=1}]}],
+ [#b_set{op=bs_put,
+ args=[#b_literal{val=binary},
+ #b_literal{},
+ #b_literal{val=Bin1},
+ #b_literal{val=all},
+ #b_literal{val=1}]}=I0]} ->
+ %% Coalesce the two segments to one.
+ Bin = <<Bin0/bitstring,Bin1/bitstring>>,
+ I = I0#b_set{args=bs_put_args(binary, Bin, all)},
+ Blk = Blk0#b_blk{is=[I]},
+ [{L2,Blk}|Acc];
+ {_,_} ->
+ [{L1,Blk0},BAcc|Acc]
+ end.
+
+opt_bs_put(L, I0, #b_blk{last=Br0}=Blk0, Count0, Acc) ->
+ case opt_bs_put(I0) of
+ [Bin] when is_bitstring(Bin) ->
+ Args = bs_put_args(binary, Bin, all),
+ I = I0#b_set{args=Args},
+ Blk = Blk0#b_blk{is=[I]},
+ {Count0,[{L,Blk}|Acc]};
+ [{int,Int,Size},Bin] when is_bitstring(Bin) ->
+ %% Construct a bs_put_integer instruction following
+ %% by a bs_put_binary instruction.
+ IntArgs = bs_put_args(integer, Int, Size),
+ BinArgs = bs_put_args(binary, Bin, all),
+ {BinL,BinVarNum} = {Count0,Count0+1},
+ Count = Count0 + 2,
+ BinVar = #b_var{name={'@ssa_bool',BinVarNum}},
+ BinI = I0#b_set{dst=BinVar,args=BinArgs},
+ BinBlk = Blk0#b_blk{is=[BinI],last=Br0#b_br{bool=BinVar}},
+ IntI = I0#b_set{args=IntArgs},
+ IntBlk = Blk0#b_blk{is=[IntI],last=Br0#b_br{succ=BinL}},
+ {Count,[{BinL,BinBlk},{L,IntBlk}|Acc]};
+ not_possible ->
+ not_possible
+ end.
-misc_opt_is([#b_set{op=phi}=I0|Is], Sub0, Acc) ->
- #b_set{dst=Dst,args=Args} = I = sub(I0, Sub0),
- case all_same(Args) of
+opt_bs_put(#b_set{args=[#b_literal{val=binary},_,#b_literal{val=Val},
+ #b_literal{val=all},#b_literal{val=Unit}]})
+ when is_bitstring(Val) ->
+ if
+ bit_size(Val) rem Unit =:= 0 ->
+ [Val];
true ->
- %% Eliminate the phi node if there is just one source
- %% value or if the values are identical.
- [{Val,_}|_] = Args,
- Sub = Sub0#{Dst=>Val},
- misc_opt_is(Is, Sub, Acc);
- false ->
- misc_opt_is(Is, Sub0, [I|Acc])
+ not_possible
end;
-misc_opt_is([#b_set{}=I0|Is], Sub, Acc) ->
- #b_set{op=Op,dst=Dst,args=Args} = I = sub(I0, Sub),
- case make_literal(Op, Args) of
- #b_literal{}=Literal ->
- misc_opt_is(Is, Sub#{Dst=>Literal}, Acc);
- error ->
- misc_opt_is(Is, Sub, [I|Acc])
+opt_bs_put(#b_set{args=[#b_literal{val=Type},#b_literal{val=Flags},
+ #b_literal{val=Val},#b_literal{val=Size},
+ #b_literal{val=Unit}]}=I0) when is_integer(Size) ->
+ EffectiveSize = Size * Unit,
+ if
+ EffectiveSize > 0 ->
+ case {Type,opt_bs_put_endian(Flags)} of
+ {integer,big} when is_integer(Val) ->
+ if
+ EffectiveSize < 64 ->
+ [<<Val:EffectiveSize>>];
+ true ->
+ opt_bs_put_split_int(Val, EffectiveSize)
+ end;
+ {integer,little} when is_integer(Val), EffectiveSize < 128 ->
+ %% To avoid an explosion in code size, we only try
+ %% to optimize relatively small fields.
+ <<Int:EffectiveSize>> = <<Val:EffectiveSize/little>>,
+ Args = bs_put_args(Type, Int, EffectiveSize),
+ I = I0#b_set{args=Args},
+ opt_bs_put(I);
+ {binary,_} when is_bitstring(Val) ->
+ <<Bitstring:EffectiveSize/bits,_/bits>> = Val,
+ [Bitstring];
+ {float,Endian} ->
+ try
+ [opt_bs_put_float(Val, EffectiveSize, Endian)]
+ catch error:_ ->
+ not_possible
+ end;
+ {_,_} ->
+ not_possible
+ end;
+ true ->
+ not_possible
end;
-misc_opt_is([], Sub, Acc) ->
- {reverse(Acc),Sub}.
-
-all_same([{H,_}|T]) ->
- all(fun({E,_}) -> E =:= H end, T).
-
-make_literal(put_tuple, Args) ->
- case make_literal_list(Args, []) of
- error ->
- error;
- List ->
- #b_literal{val=list_to_tuple(List)}
+opt_bs_put(#b_set{}) -> not_possible.
+
+opt_bs_put_float(N, Sz, Endian) ->
+ case Endian of
+ big -> <<N:Sz/big-float-unit:1>>;
+ little -> <<N:Sz/little-float-unit:1>>
+ end.
+
+bs_put_args(Type, Val, Size) ->
+ [#b_literal{val=Type},
+ #b_literal{val=[unsigned,big]},
+ #b_literal{val=Val},
+ #b_literal{val=Size},
+ #b_literal{val=1}].
+
+opt_bs_put_endian([big=E|_]) -> E;
+opt_bs_put_endian([little=E|_]) -> E;
+opt_bs_put_endian([native=E|_]) -> E;
+opt_bs_put_endian([_|Fs]) -> opt_bs_put_endian(Fs).
+
+opt_bs_put_split_int(Int, Size) ->
+ Pos = opt_bs_put_split_int_1(Int, 0, Size - 1),
+ UpperSize = Size - Pos,
+ if
+ Pos =:= 0 ->
+ %% Value is 0 or -1 -- keep the original instruction.
+ not_possible;
+ UpperSize < 64 ->
+ %% No or few leading zeroes or ones.
+ [<<Int:Size>>];
+ true ->
+ %% There are 64 or more leading ones or zeroes in
+ %% the resulting binary. Split into two separate
+ %% segments to avoid an explosion in code size.
+ [{int,Int bsr Pos,UpperSize},<<Int:Pos>>]
+ end.
+
+opt_bs_put_split_int_1(_Int, L, R) when L > R ->
+ 8 * ((L + 7) div 8);
+opt_bs_put_split_int_1(Int, L, R) ->
+ Mid = (L + R) div 2,
+ case Int bsr Mid of
+ Upper when Upper =:= 0; Upper =:= -1 ->
+ opt_bs_put_split_int_1(Int, L, Mid - 1);
+ _ ->
+ opt_bs_put_split_int_1(Int, Mid + 1, R)
+ end.
+
+%%%
+%%% Optimize expressions such as "tuple_size(Var) =:= 2".
+%%%
+%%% Consider this code:
+%%%
+%%% 0:
+%%% .
+%%% .
+%%% .
+%%% Size = bif:tuple_size Var
+%%% BoolVar1 = succeeded Size
+%%% br BoolVar1, label 4, label 3
+%%%
+%%% 4:
+%%% BoolVar2 = bif:'=:=' Size, literal 2
+%%% br BoolVar2, label 6, label 3
+%%%
+%%% 6: ... %% OK
+%%%
+%%% 3: ... %% Not a tuple of size 2
+%%%
+%%% The BEAM code will look this:
+%%%
+%%% {bif,tuple_size,{f,3},[{x,0}],{x,0}}.
+%%% {test,is_eq_exact,{f,3},[{x,0},{integer,2}]}.
+%%%
+%%% Better BEAM code will be produced if we transform the
+%%% code like this:
+%%%
+%%% 0:
+%%% .
+%%% .
+%%% .
+%%% br label 10
+%%%
+%%% 10:
+%%% NewBoolVar = bif:is_tuple Var
+%%% br NewBoolVar, label 11, label 3
+%%%
+%%% 11:
+%%% Size = bif:tuple_size Var
+%%% br label 4
+%%%
+%%% 4:
+%%% BoolVar2 = bif:'=:=' Size, literal 2
+%%% br BoolVar2, label 6, label 3
+%%%
+%%% (The key part of the transformation is the removal of
+%%% the 'succeeded' instruction to signal to the code generator
+%%% that the call to tuple_size/1 can't fail.)
+%%%
+%%% The BEAM code will look like:
+%%%
+%%% {test,is_tuple,{f,3},[{x,0}]}.
+%%% {test_arity,{f,3},[{x,0},2]}.
+%%%
+%%% Those two instructions will be combined into a single
+%%% is_tuple_of_arity instruction by the loader.
+%%%
+
+ssa_opt_tuple_size({#st{ssa=Linear0,cnt=Count0}=St, FuncDb}) ->
+ {Linear,Count} = opt_tup_size(Linear0, Count0, []),
+ {St#st{ssa=Linear,cnt=Count}, FuncDb}.
+
+opt_tup_size([{L,#b_blk{is=Is,last=Last}=Blk}|Bs], Count0, Acc0) ->
+ case {Is,Last} of
+ {[#b_set{op={bif,'=:='},dst=Bool,args=[#b_var{}=Tup,#b_literal{val=Arity}]}],
+ #b_br{bool=Bool}} when is_integer(Arity), Arity >= 0 ->
+ {Acc,Count} = opt_tup_size_1(Tup, L, Count0, Acc0),
+ opt_tup_size(Bs, Count, [{L,Blk}|Acc]);
+ {_,_} ->
+ opt_tup_size(Bs, Count0, [{L,Blk}|Acc0])
end;
-make_literal(put_list, [#b_literal{val=H},#b_literal{val=T}]) ->
- #b_literal{val=[H|T]};
-make_literal(_, _) -> error.
+opt_tup_size([], Count, Acc) ->
+ {reverse(Acc),Count}.
-make_literal_list([#b_literal{val=H}|T], Acc) ->
- make_literal_list(T, [H|Acc]);
-make_literal_list([_|_], _) ->
- error;
-make_literal_list([], Acc) ->
- reverse(Acc).
+opt_tup_size_1(Size, EqL, Count0, [{L,Blk0}|Acc]) ->
+ case Blk0 of
+ #b_blk{is=Is0,last=#b_br{bool=Bool,succ=EqL,fail=Fail}} ->
+ case opt_tup_size_is(Is0, Bool, Size, []) of
+ none ->
+ {[{L,Blk0}|Acc],Count0};
+ {PreIs,TupleSizeIs,Tuple} ->
+ opt_tup_size_2(PreIs, TupleSizeIs, L, EqL,
+ Tuple, Fail, Count0, Acc)
+ end;
+ #b_blk{} ->
+ {[{L,Blk0}|Acc],Count0}
+ end;
+opt_tup_size_1(_, _, Count, Acc) ->
+ {Acc,Count}.
+
+opt_tup_size_2(PreIs, TupleSizeIs, PreL, EqL, Tuple, Fail, Count0, Acc) ->
+ IsTupleL = Count0,
+ TupleSizeL = Count0 + 1,
+ Bool = #b_var{name={'@ssa_bool',Count0+2}},
+ Count = Count0 + 3,
+
+ True = #b_literal{val=true},
+ PreBr = #b_br{bool=True,succ=IsTupleL,fail=IsTupleL},
+ PreBlk = #b_blk{is=PreIs,last=PreBr},
+
+ IsTupleIs = [#b_set{op={bif,is_tuple},dst=Bool,args=[Tuple]}],
+ IsTupleBr = #b_br{bool=Bool,succ=TupleSizeL,fail=Fail},
+ IsTupleBlk = #b_blk{is=IsTupleIs,last=IsTupleBr},
+
+ TupleSizeBr = #b_br{bool=True,succ=EqL,fail=EqL},
+ TupleSizeBlk = #b_blk{is=TupleSizeIs,last=TupleSizeBr},
+ {[{TupleSizeL,TupleSizeBlk},
+ {IsTupleL,IsTupleBlk},
+ {PreL,PreBlk}|Acc],Count}.
+
+opt_tup_size_is([#b_set{op={bif,tuple_size},dst=Size,args=[Tuple]}=I,
+ #b_set{op=succeeded,dst=Bool,args=[Size]}],
+ Bool, Size, Acc) ->
+ {reverse(Acc),[I],Tuple};
+opt_tup_size_is([I|Is], Bool, Size, Acc) ->
+ opt_tup_size_is(Is, Bool, Size, [I|Acc]);
+opt_tup_size_is([], _, _, _Acc) -> none.
+
+%%%
+%%% Optimize #b_switch{} instructions.
+%%%
+%%% If the argument for a #b_switch{} comes from a phi node with all
+%%% literals, any values in the switch list which are not in the phi
+%%% node can be removed.
+%%%
+%%% If the values in the phi node and switch list are the same,
+%%% the failure label can't be reached and be eliminated.
+%%%
+%%% A #b_switch{} with only one value can be rewritten to
+%%% a #b_br{}. A switch that only verifies that the argument
+%%% is 'true' or 'false' can be rewritten to a is_boolean test.
+%%%
+
+ssa_opt_sw({#st{ssa=Linear0,cnt=Count0}=St, FuncDb}) ->
+ {Linear,Count} = opt_sw(Linear0, Count0, []),
+ {St#st{ssa=Linear,cnt=Count}, FuncDb}.
+
+opt_sw([{L,#b_blk{is=Is,last=#b_switch{}=Sw0}=Blk0}|Bs], Count0, Acc) ->
+ %% Ensure that no label in the switch list is the same
+ %% as the failure label.
+ #b_switch{fail=Fail,list=List0} = Sw0,
+ List = [{Val,Lbl} || {Val,Lbl} <- List0, Lbl =/= Fail],
+ Sw1 = beam_ssa:normalize(Sw0#b_switch{list=List}),
+ case Sw1 of
+ #b_switch{arg=Arg,fail=Fail,list=[{Lit,Lbl}]} ->
+ %% Rewrite a single value switch to a br.
+ Bool = #b_var{name={'@ssa_bool',Count0}},
+ Count = Count0 + 1,
+ IsEq = #b_set{op={bif,'=:='},dst=Bool,args=[Arg,Lit]},
+ Br = #b_br{bool=Bool,succ=Lbl,fail=Fail},
+ Blk = Blk0#b_blk{is=Is++[IsEq],last=Br},
+ opt_sw(Bs, Count, [{L,Blk}|Acc]);
+ #b_switch{arg=Arg,fail=Fail,
+ list=[{#b_literal{val=B1},Lbl},{#b_literal{val=B2},Lbl}]}
+ when B1 =:= not B2 ->
+ %% Replace with is_boolean test.
+ Bool = #b_var{name={'@ssa_bool',Count0}},
+ Count = Count0 + 1,
+ IsBool = #b_set{op={bif,is_boolean},dst=Bool,args=[Arg]},
+ Br = #b_br{bool=Bool,succ=Lbl,fail=Fail},
+ Blk = Blk0#b_blk{is=Is++[IsBool],last=Br},
+ opt_sw(Bs, Count, [{L,Blk}|Acc]);
+ Sw0 ->
+ opt_sw(Bs, Count0, [{L,Blk0}|Acc]);
+ Sw ->
+ Blk = Blk0#b_blk{last=Sw},
+ opt_sw(Bs, Count0, [{L,Blk}|Acc])
+ end;
+opt_sw([{L,#b_blk{}=Blk}|Bs], Count, Acc) ->
+ opt_sw(Bs, Count, [{L,Blk}|Acc]);
+opt_sw([], Count, Acc) ->
+ {reverse(Acc),Count}.
%%%
%%% Merge blocks.
%%%
-ssa_opt_merge_blocks(#st{ssa=Blocks}=St) ->
+ssa_opt_merge_blocks({#st{ssa=Blocks}=St, FuncDb}) ->
Preds = beam_ssa:predecessors(Blocks),
- St#st{ssa=merge_blocks_1(beam_ssa:rpo(Blocks), Preds, Blocks)}.
+ Merged = merge_blocks_1(beam_ssa:rpo(Blocks), Preds, Blocks),
+ {St#st{ssa=Merged}, FuncDb}.
merge_blocks_1([L|Ls], Preds0, Blocks0) ->
case Preds0 of
@@ -989,10 +1899,11 @@ merge_blocks_1([L|Ls], Preds0, Blocks0) ->
true ->
#b_blk{is=Is0} = Blk0,
#b_blk{is=Is1} = Blk1,
+ verify_merge_is(Is1),
Is = Is0 ++ Is1,
Blk = Blk1#b_blk{is=Is},
Blocks1 = maps:remove(L, Blocks0),
- Blocks2 = maps:put(P, Blk, Blocks1),
+ Blocks2 = Blocks1#{P:=Blk},
Successors = beam_ssa:successors(Blk),
Blocks = beam_ssa:update_phi_labels(Successors, L, P, Blocks2),
Preds = merge_update_preds(Successors, L, P, Preds0),
@@ -1006,21 +1917,32 @@ merge_blocks_1([L|Ls], Preds0, Blocks0) ->
merge_blocks_1([], _Preds, Blocks) -> Blocks.
merge_update_preds([L|Ls], From, To, Preds0) ->
- Ps = [rename_label(P, From, To) || P <- maps:get(L, Preds0)],
- Preds = maps:put(L, Ps, Preds0),
+ Ps = [rename_label(P, From, To) || P <- map_get(L, Preds0)],
+ Preds = Preds0#{L:=Ps},
merge_update_preds(Ls, From, To, Preds);
merge_update_preds([], _, _, Preds) -> Preds.
rename_label(From, From, To) -> To;
rename_label(Lbl, _, _) -> Lbl.
-is_merge_allowed(_, _, #b_blk{is=[#b_set{op=peek_message}|_]}) ->
+verify_merge_is([#b_set{op=Op}|_]) ->
+ %% The merged block has only one predecessor, so it should not have any phi
+ %% nodes.
+ true = Op =/= phi; %Assertion.
+verify_merge_is(_) ->
+ ok.
+
+is_merge_allowed(_, #b_blk{}, #b_blk{is=[#b_set{op=peek_message}|_]}) ->
false;
-is_merge_allowed(L, Blk0, #b_blk{}) ->
- case beam_ssa:successors(Blk0) of
+is_merge_allowed(L, #b_blk{last=#b_br{}}=Blk, #b_blk{}) ->
+ %% The predecessor block must have exactly one successor (L) for
+ %% the merge to be safe.
+ case beam_ssa:successors(Blk) of
[L] -> true;
[_|_] -> false
- end.
+ end;
+is_merge_allowed(_, #b_blk{last=#b_switch{}}, #b_blk{}) ->
+ false.
%%%
%%% When a tuple is matched, the pattern matching compiler generates a
@@ -1038,19 +1960,27 @@ is_merge_allowed(L, Blk0, #b_blk{}) ->
%%% extracted values.
%%%
-ssa_opt_sink(#st{ssa=Blocks0}=St) ->
+ssa_opt_sink({#st{ssa=Blocks0}=St, FuncDb}) ->
Linear = beam_ssa:linearize(Blocks0),
%% Create a map with all variables that define get_tuple_element
%% instructions. The variable name map to the block it is defined in.
- Defs = maps:from_list(def_blocks(Linear)),
+ case def_blocks(Linear) of
+ [] ->
+ %% No get_tuple_element instructions, so there is nothing to do.
+ {St, FuncDb};
+ [_|_]=Defs0 ->
+ Defs = maps:from_list(Defs0),
+ {do_ssa_opt_sink(Linear, Defs, St), FuncDb}
+ end.
+do_ssa_opt_sink(Linear, Defs, #st{ssa=Blocks0}=St) ->
%% Now find all the blocks that use variables defined by get_tuple_element
%% instructions.
Used = used_blocks(Linear, Defs, []),
%% Calculate dominators.
- Dom0 = beam_ssa:dominators(Blocks0),
+ {Dom,Numbering} = beam_ssa:dominators(Blocks0),
%% It is not safe to move get_tuple_element instructions to blocks
%% that begin with certain instructions. It is also unsafe to move
@@ -1058,25 +1988,15 @@ ssa_opt_sink(#st{ssa=Blocks0}=St) ->
%% unsafe moves, pretend that the unsuitable blocks are not
%% dominators.
Unsuitable = unsuitable(Linear, Blocks0),
- Dom = case gb_sets:is_empty(Unsuitable) of
- true ->
- Dom0;
- false ->
- F = fun(_, DomBy) ->
- [L || L <- DomBy,
- not gb_sets:is_element(L, Unsuitable)]
- end,
- maps:map(F, Dom0)
- end,
%% Calculate new positions for get_tuple_element instructions. The new
%% position is a block that dominates all uses of the variable.
- DefLoc = new_def_locations(Used, Defs, Dom),
+ DefLoc = new_def_locations(Used, Defs, Dom, Numbering, Unsuitable),
%% Now move all suitable get_tuple_element instructions to their
%% new blocks.
Blocks = foldl(fun({V,To}, A) ->
- From = maps:get(V, Defs),
+ From = map_get(V, Defs),
move_defs(V, From, To, A)
end, Blocks0, DefLoc),
St#st{ssa=Blocks}.
@@ -1085,7 +2005,7 @@ def_blocks([{L,#b_blk{is=Is}}|Bs]) ->
def_blocks_is(Is, L, def_blocks(Bs));
def_blocks([]) -> [].
-def_blocks_is([#b_set{op=get_tuple_element,dst=#b_var{name=Dst}}|Is], L, Acc) ->
+def_blocks_is([#b_set{op=get_tuple_element,dst=Dst}|Is], L, Acc) ->
def_blocks_is(Is, L, [{Dst,L}|Acc]);
def_blocks_is([_|Is], L, Acc) ->
def_blocks_is(Is, L, Acc);
@@ -1146,11 +2066,11 @@ unsuitable_loop(L, Blocks, Predecessors) ->
unsuitable_loop(L, Blocks, Predecessors, []).
unsuitable_loop(L, Blocks, Predecessors, Acc) ->
- Ps = maps:get(L, Predecessors),
+ Ps = map_get(L, Predecessors),
unsuitable_loop_1(Ps, Blocks, Predecessors, Acc).
unsuitable_loop_1([P|Ps], Blocks, Predecessors, Acc0) ->
- case maps:get(P, Blocks) of
+ case map_get(P, Blocks) of
#b_blk{is=[#b_set{op=peek_message}|_]} ->
unsuitable_loop_1(Ps, Blocks, Predecessors, Acc0);
#b_blk{} ->
@@ -1165,50 +2085,42 @@ unsuitable_loop_1([P|Ps], Blocks, Predecessors, Acc0) ->
end;
unsuitable_loop_1([], _, _, Acc) -> Acc.
-%% new_def_locations([{Variable,[UsedInBlock]}|Vs], Defs, Dominators) ->
-%% [{Variable,NewDefinitionBlock}]
-%% Calculate new locations for get_tuple_element instructions. For each
-%% variable, the new location is a block that dominates all uses of
-%% variable and as near to the uses of as possible. If no such block
-%% distinct from the block where the instruction currently is, the
-%% variable will not be included in the result list.
-
-new_def_locations([{V,UsedIn}|Vs], Defs, Dom) ->
- DefIn = maps:get(V, Defs),
- case common_dom(UsedIn, DefIn, Dom) of
- [] ->
- new_def_locations(Vs, Defs, Dom);
- [_|_]=BetterDef ->
- L = most_dominated(BetterDef, Dom),
- [{V,L}|new_def_locations(Vs, Defs, Dom)]
- end;
-new_def_locations([], _, _) -> [].
-
-common_dom([L|Ls], DefIn, Dom) ->
- DomBy0 = maps:get(L, Dom),
- DomBy = ordsets:subtract(DomBy0, maps:get(DefIn, Dom)),
- common_dom_1(Ls, Dom, DomBy).
-
-common_dom_1(_, _, []) ->
- [];
-common_dom_1([L|Ls], Dom, [_|_]=DomBy0) ->
- DomBy1 = maps:get(L, Dom),
- DomBy = ordsets:intersection(DomBy0, DomBy1),
- common_dom_1(Ls, Dom, DomBy);
-common_dom_1([], _, DomBy) -> DomBy.
-
-most_dominated([L|Ls], Dom) ->
- most_dominated(Ls, L, maps:get(L, Dom), Dom).
-
-most_dominated([L|Ls], L0, DomBy, Dom) ->
- case member(L, DomBy) of
+%% new_def_locations([{Variable,[UsedInBlock]}|Vs], Defs,
+%% Dominators, Numbering, Unsuitable) ->
+%% [{Variable,NewDefinitionBlock}]
+%%
+%% Calculate new locations for get_tuple_element instructions. For
+%% each variable, the new location is a block that dominates all uses
+%% of the variable and as near to the uses of as possible.
+
+new_def_locations([{V,UsedIn}|Vs], Defs, Dom, Numbering, Unsuitable) ->
+ DefIn = map_get(V, Defs),
+ Common = common_dominator(UsedIn, Dom, Numbering, Unsuitable),
+ case member(Common, map_get(DefIn, Dom)) of
true ->
- most_dominated(Ls, L0, DomBy, Dom);
+ %% The common dominator is either DefIn or an
+ %% ancestor of DefIn.
+ new_def_locations(Vs, Defs, Dom, Numbering, Unsuitable);
false ->
- most_dominated(Ls, L, maps:get(L, Dom), Dom)
+ %% We have found a suitable descendant of DefIn,
+ %% to which the get_tuple_element instruction can
+ %% be sunk.
+ [{V,Common}|new_def_locations(Vs, Defs, Dom, Numbering, Unsuitable)]
end;
-most_dominated([], L, _, _) -> L.
+new_def_locations([], _, _, _, _) -> [].
+common_dominator(Ls0, Dom, Numbering, Unsuitable) ->
+ [Common|_] = beam_ssa:common_dominators(Ls0, Dom, Numbering),
+ case gb_sets:is_member(Common, Unsuitable) of
+ true ->
+ %% It is not allowed to place the instruction here. Try
+ %% to find another suitable dominating block by going up
+ %% one step in the dominator tree.
+ [Common,OneUp|_] = map_get(Common, Dom),
+ common_dominator([OneUp], Dom, Numbering, Unsuitable);
+ false ->
+ Common
+ end.
%% Move get_tuple_element instructions to their new locations.
@@ -1228,7 +2140,7 @@ remove_def(V, #b_blk{is=Is0}=Blk) ->
{Def,Is} = remove_def_is(Is0, V, []),
{Def,Blk#b_blk{is=Is}}.
-remove_def_is([#b_set{dst=#b_var{name=Dst}}=Def|Is], Dst, Acc) ->
+remove_def_is([#b_set{dst=Dst}=Def|Is], Dst, Acc) ->
{Def,reverse(Acc, Is)};
remove_def_is([I|Is], Dst, Acc) ->
remove_def_is(Is, Dst, [I|Acc]).
@@ -1248,7 +2160,6 @@ insert_def_is([#b_set{op=Op}=I|Is]=Is0, V, Def) ->
Action0 = case Op of
call -> beyond;
'catch_end' -> beyond;
- set_tuple_element -> beyond;
timeout -> beyond;
_ -> here
end,
@@ -1273,30 +2184,79 @@ insert_def_is([#b_set{op=Op}=I|Is]=Is0, V, Def) ->
insert_def_is([], _V, Def) ->
[Def].
+%%%
+%%% Order consecutive get_tuple_element instructions in ascending
+%%% position order. This will give the loader more opportunities
+%%% for combining get_tuple_element instructions.
+%%%
+
+ssa_opt_get_tuple_element({#st{ssa=Blocks0}=St, FuncDb}) ->
+ Blocks = opt_get_tuple_element(maps:to_list(Blocks0), Blocks0),
+ {St#st{ssa=Blocks}, FuncDb}.
+
+opt_get_tuple_element([{L,#b_blk{is=Is0}=Blk0}|Bs], Blocks) ->
+ case opt_get_tuple_element_is(Is0, false, []) of
+ {yes,Is} ->
+ Blk = Blk0#b_blk{is=Is},
+ opt_get_tuple_element(Bs, Blocks#{L:=Blk});
+ no ->
+ opt_get_tuple_element(Bs, Blocks)
+ end;
+opt_get_tuple_element([], Blocks) -> Blocks.
+
+opt_get_tuple_element_is([#b_set{op=get_tuple_element,
+ args=[#b_var{}=Src,_]}=I0|Is0],
+ _AnyChange, Acc) ->
+ {GetIs0,Is} = collect_get_tuple_element(Is0, Src, [I0]),
+ GetIs1 = sort([{Pos,I} || #b_set{args=[_,Pos]}=I <- GetIs0]),
+ GetIs = [I || {_,I} <- GetIs1],
+ opt_get_tuple_element_is(Is, true, reverse(GetIs, Acc));
+opt_get_tuple_element_is([I|Is], AnyChange, Acc) ->
+ opt_get_tuple_element_is(Is, AnyChange, [I|Acc]);
+opt_get_tuple_element_is([], AnyChange, Acc) ->
+ case AnyChange of
+ true -> {yes,reverse(Acc)};
+ false -> no
+ end.
+
+collect_get_tuple_element([#b_set{op=get_tuple_element,
+ args=[Src,_]}=I|Is], Src, Acc) ->
+ collect_get_tuple_element(Is, Src, [I|Acc]);
+collect_get_tuple_element(Is, _Src, Acc) ->
+ {Acc,Is}.
%%%
%%% Common utilities.
%%%
+gcd(A, B) ->
+ case A rem B of
+ 0 -> B;
+ X -> gcd(B, X)
+ end.
+
rel2fam(S0) ->
S1 = sofs:relation(S0),
S = sofs:rel2fam(S1),
sofs:to_external(S).
-sub(#b_set{op=phi,args=Args}=I, Sub) ->
+sub(I, Sub) ->
+ beam_ssa:normalize(sub_1(I, Sub)).
+
+sub_1(#b_set{op=phi,args=Args}=I, Sub) ->
I#b_set{args=[{sub_arg(A, Sub),P} || {A,P} <- Args]};
-sub(#b_set{args=Args}=I, Sub) ->
+sub_1(#b_set{args=Args}=I, Sub) ->
I#b_set{args=[sub_arg(A, Sub) || A <- Args]};
-sub(#b_br{bool=#b_var{}=Old}=Br, Sub) ->
+sub_1(#b_br{bool=#b_var{}=Old}=Br, Sub) ->
New = sub_arg(Old, Sub),
Br#b_br{bool=New};
-sub(#b_switch{arg=#b_var{}=Old}=Sw, Sub) ->
+sub_1(#b_switch{arg=#b_var{}=Old}=Sw, Sub) ->
New = sub_arg(Old, Sub),
Sw#b_switch{arg=New};
-sub(#b_ret{arg=#b_var{}=Old}=Ret, Sub) ->
+sub_1(#b_ret{arg=#b_var{}=Old}=Ret, Sub) ->
New = sub_arg(Old, Sub),
Ret#b_ret{arg=New};
-sub(Last, _) -> Last.
+sub_1(Last, _) -> Last.
sub_arg(#b_remote{mod=Mod,name=Name}=Rem, Sub) ->
Rem#b_remote{mod=sub_arg(Mod, Sub),name=sub_arg(Name, Sub)};
@@ -1305,3 +2265,9 @@ sub_arg(Old, Sub) ->
#{Old:=New} -> New;
#{} -> Old
end.
+
+new_var(#b_var{name={Base,N}}, Count) ->
+ true = is_integer(N), %Assertion.
+ {#b_var{name={Base,Count}},Count+1};
+new_var(#b_var{name=Base}, Count) ->
+ {#b_var{name={Base,Count}},Count+1}.
diff --git a/lib/compiler/src/beam_ssa_opt.hrl b/lib/compiler/src/beam_ssa_opt.hrl
new file mode 100644
index 0000000000..37711a6f48
--- /dev/null
+++ b/lib/compiler/src/beam_ssa_opt.hrl
@@ -0,0 +1,53 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 2019. All Rights Reserved.
+%%
+%% Licensed under the Apache License, Version 2.0 (the "License");
+%% you may not use this file except in compliance with the License.
+%% You may obtain a copy of the License at
+%%
+%% http://www.apache.org/licenses/LICENSE-2.0
+%%
+%% Unless required by applicable law or agreed to in writing, software
+%% distributed under the License is distributed on an "AS IS" BASIS,
+%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+%% See the License for the specific language governing permissions and
+%% limitations under the License.
+%%
+%% %CopyrightEnd%
+%%
+
+-include("beam_ssa.hrl").
+
+-record(func_info,
+ {%% Local calls going in/out of this function.
+ in = ordsets:new() :: ordsets:ordset(func_id()),
+ out = ordsets:new() :: ordsets:ordset(func_id()),
+
+ %% Whether the function is exported or not; some optimizations may
+ %% need to be suppressed if it is.
+ exported = true :: boolean(),
+
+ %% The inferred types of each argument (as opposed to parameter),
+ %% indexed by call site.
+ %%
+ %% This is more effective than the naive approach of joining into a
+ %% "parameter_type" as we go as it lets us narrow parameter types
+ %% without having to visit all callers on each pass, which helps a lot
+ %% when dealing with co-recursive functions.
+ arg_types = [] :: list(arg_type_map()),
+
+ %% The inferred return type of this function, this is either [type()]
+ %% or [] to note absence.
+ ret_type = [] :: list()}).
+
+-type arg_key() :: {CallerId :: func_id(),
+ CallDst :: beam_ssa:b_var()}.
+-type arg_type_map() :: #{ arg_key() => term() }.
+
+%% Per-function metadata used by various optimization passes to perform
+%% module-level optimization. If a function is absent it means that
+%% module-level optimization has been turned off for said function.
+-type func_id() :: beam_ssa:b_local().
+-type func_info_db() :: #{ func_id() => #func_info{} }.
diff --git a/lib/compiler/src/beam_ssa_pp.erl b/lib/compiler/src/beam_ssa_pp.erl
index 9daa2c2523..34ac08b32e 100644
--- a/lib/compiler/src/beam_ssa_pp.erl
+++ b/lib/compiler/src/beam_ssa_pp.erl
@@ -158,7 +158,7 @@ format_op({Prefix,Name}) ->
format_op(Name) ->
io_lib:format("~p", [Name]).
-format_register(#b_var{name=V}, #{registers:=Regs}) ->
+format_register(#b_var{}=V, #{registers:=Regs}) ->
{Tag,N} = maps:get(V, Regs),
io_lib:format("~p~p", [Tag,N]);
format_register(_, #{}) -> "".
@@ -224,9 +224,9 @@ format_anno_1(Anno) ->
[io_lib:format(" %% Anno: ~p\n", [Anno])]
end.
-format_live_interval(#b_var{name=V}=Dst, #{live_intervals:=Intervals}) ->
+format_live_interval(#b_var{}=Dst, #{live_intervals:=Intervals}) ->
case Intervals of
- #{V:=Rs0} ->
+ #{Dst:=Rs0} ->
Rs1 = [io_lib:format("~p..~p", [Start,End]) ||
{Start,End} <- Rs0],
Rs = lists:join(" ", Rs1),
diff --git a/lib/compiler/src/beam_ssa_pre_codegen.erl b/lib/compiler/src/beam_ssa_pre_codegen.erl
index bbc3739eb5..bad43a9c4e 100644
--- a/lib/compiler/src/beam_ssa_pre_codegen.erl
+++ b/lib/compiler/src/beam_ssa_pre_codegen.erl
@@ -23,11 +23,10 @@
%% it has been annotated and transformed to help the code generator.
%%
%% * Some instructions are translated to other instructions closer to
-%% the BEAM instructions. For example, the put_tuple instruction is
-%% broken apart into the put_tuple_arity and put_tuple_elements
-%% instructions. Similary, the binary matching instructions are
-%% transformed from the optimization-friendly internal format to
-%% instruction more similar to the actual BEAM instructions.
+%% the BEAM instructions. For example, the binary matching
+%% instructions are transformed from the optimization-friendly
+%% internal format to instruction more similar to the actual BEAM
+%% instructions.
%%
%% * Blocks that will need an instruction for allocating a stack frame
%% are annotated with a {frame_size,Size} annotation.
@@ -73,20 +72,20 @@
-import(lists, [all/2,any/2,append/1,duplicate/2,
foldl/3,last/1,map/2,member/2,partition/2,
- reverse/1,reverse/2,sort/1,zip/2]).
+ reverse/1,reverse/2,sort/1,splitwith/2,zip/2]).
-spec module(beam_ssa:b_module(), [compile:option()]) ->
{'ok',beam_ssa:b_module()}.
module(#b_module{body=Fs0}=Module, Opts) ->
- ExtraAnnos = proplists:get_bool(dprecg, Opts),
- Ps = passes(ExtraAnnos),
- Fs = functions(Fs0, Ps),
+ UseBSM3 = not proplists:get_bool(no_bsm3, Opts),
+ Ps = passes(Opts),
+ Fs = functions(Fs0, Ps, UseBSM3),
{ok,Module#b_module{body=Fs}}.
-functions([F|Fs], Ps) ->
- [function(F, Ps)|functions(Fs, Ps)];
-functions([], _Ps) -> [].
+functions([F|Fs], Ps, UseBSM3) ->
+ [function(F, Ps, UseBSM3)|functions(Fs, Ps, UseBSM3)];
+functions([], _Ps, _UseBSM3) -> [].
-type b_var() :: beam_ssa:b_var().
-type var_name() :: beam_ssa:var_name().
@@ -104,22 +103,28 @@ functions([], _Ps) -> [].
-record(st, {ssa :: beam_ssa:block_map(),
args :: [b_var()],
cnt :: beam_ssa:label(),
+ use_bsm3 :: boolean(),
frames=[] :: [beam_ssa:label()],
intervals=[] :: [{b_var(),[range()]}],
- aliases=[] :: [{b_var(),b_var()}],
res=[] :: [{b_var(),reservation()}] | #{b_var():=reservation()},
regs=#{} :: #{b_var():=ssa_register()},
extra_annos=[] :: [{atom(),term()}]
}).
-define(PASS(N), {N,fun N/1}).
-passes(ExtraAnnos) ->
+passes(Opts) ->
+ AddPrecgAnnos = proplists:get_bool(dprecg, Opts),
+ FixTuples = proplists:get_bool(no_put_tuple2, Opts),
Ps = [?PASS(assert_no_critical_edges),
%% Preliminaries.
?PASS(fix_bs),
?PASS(sanitize),
- ?PASS(fix_tuples),
+ case FixTuples of
+ false -> ignore;
+ true -> ?PASS(fix_tuples)
+ end,
+ ?PASS(use_set_tuple_element),
?PASS(place_frames),
?PASS(fix_receives),
@@ -127,6 +132,10 @@ passes(ExtraAnnos) ->
?PASS(find_yregs),
?PASS(reserve_yregs),
+ %% Handle legacy binary match instruction that don't
+ %% accept a Y register as destination.
+ ?PASS(legacy_bs),
+
%% Improve reuse of Y registers to potentially
%% reduce the size of the stack frame.
?PASS(copy_retval),
@@ -135,30 +144,25 @@ passes(ExtraAnnos) ->
%% Calculate live intervals.
?PASS(number_instructions),
?PASS(live_intervals),
- ?PASS(remove_unsuitable_aliases),
?PASS(reserve_regs),
- ?PASS(merge_intervals),
%% If needed for a .precg file, save the live intervals
%% so they can be included in an annotation.
- case ExtraAnnos of
+ case AddPrecgAnnos of
false -> ignore;
true -> ?PASS(save_live_intervals)
end,
%% Allocate registers.
?PASS(linear_scan),
- ?PASS(fix_aliased_regs),
?PASS(frame_size),
?PASS(turn_yregs)],
- case ExtraAnnos of
- false -> [P || P <- Ps, P =/= ignore];
- true -> Ps
- end.
+ [P || P <- Ps, P =/= ignore].
-function(#b_function{anno=Anno,args=Args,bs=Blocks0,cnt=Count0}=F0, Ps) ->
+function(#b_function{anno=Anno,args=Args,bs=Blocks0,cnt=Count0}=F0,
+ Ps, UseBSM3) ->
try
- St0 = #st{ssa=Blocks0,args=Args,cnt=Count0},
+ St0 = #st{ssa=Blocks0,args=Args,use_bsm3=UseBSM3,cnt=Count0},
St = compile:run_sub_passes(Ps, St0),
#st{ssa=Blocks,cnt=Count,regs=Regs,extra_annos=ExtraAnnos} = St,
F1 = add_extra_annos(F0, ExtraAnnos),
@@ -174,14 +178,6 @@ function(#b_function{anno=Anno,args=Args,bs=Blocks0,cnt=Count0}=F0, Ps) ->
save_live_intervals(#st{intervals=Intervals}=St) ->
St#st{extra_annos=[{live_intervals,Intervals}]}.
-fix_aliased_regs(#st{aliases=Aliases,regs=Regs}=St) ->
- St#st{regs=fix_aliased_regs(Aliases, Regs)}.
-
-fix_aliased_regs([{Alias,V}|Aliases], Regs) ->
- #{V:=Reg} = Regs,
- fix_aliased_regs(Aliases, Regs#{Alias=>Reg});
-fix_aliased_regs([], Regs) -> Regs.
-
%% Add extra annotations when a .precg listing file is being produced.
add_extra_annos(F, Annos) ->
foldl(fun({Name,Value}, Acc) ->
@@ -214,7 +210,7 @@ assert_no_ces(_, _, Blocks) -> Blocks.
%% * Combine bs_match and bs_extract instructions to bs_get
%% instructions.
-fix_bs(#st{ssa=Blocks,cnt=Count0}=St) ->
+fix_bs(#st{ssa=Blocks,cnt=Count0,use_bsm3=UseBSM3}=St) ->
F = fun(#b_set{op=bs_start_match,dst=Dst}, A) ->
%% Mark the root of the match context list.
[{Dst,{context,Dst}}|A];
@@ -232,8 +228,13 @@ fix_bs(#st{ssa=Blocks,cnt=Count0}=St) ->
CtxChain = maps:from_list(M),
Linear0 = beam_ssa:linearize(Blocks),
- %% Insert bs_save / bs_restore instructions where needed.
- {Linear1,Count} = bs_save_restore(Linear0, CtxChain, Count0),
+ %% Insert position instructions where needed.
+ {Linear1,Count} = case UseBSM3 of
+ true ->
+ bs_pos_bsm3(Linear0, CtxChain, Count0);
+ false ->
+ bs_pos_bsm2(Linear0, CtxChain, Count0)
+ end,
%% Rename instructions.
Linear = bs_instrs(Linear1, CtxChain, []),
@@ -241,10 +242,54 @@ fix_bs(#st{ssa=Blocks,cnt=Count0}=St) ->
St#st{ssa=maps:from_list(Linear),cnt=Count}
end.
+%% Insert bs_get_position and bs_set_position instructions as needed.
+bs_pos_bsm3(Linear0, CtxChain, Count0) ->
+ Rs0 = bs_restores(Linear0, CtxChain, #{}, #{}),
+ Rs = maps:values(Rs0),
+ S0 = sofs:relation(Rs, [{context,save_point}]),
+ S1 = sofs:relation_to_family(S0),
+ S = sofs:to_external(S1),
+
+ {SavePoints,Count1} = make_bs_pos_dict(S, Count0, []),
+ {Gets,Count2} = make_bs_setpos_map(Rs, SavePoints, Count1, []),
+ {Sets,Count} = make_bs_getpos_map(maps:to_list(Rs0), SavePoints, Count2, []),
+
+ %% Now insert all saves and restores.
+ {bs_insert_bsm3(Linear0, Gets, Sets, SavePoints),Count}.
+
+make_bs_setpos_map([{Ctx,Save}=Ps|T], SavePoints, Count, Acc) ->
+ SavePoint = get_savepoint(Ps, SavePoints),
+ I = #b_set{op=bs_get_position,dst=SavePoint,args=[Ctx]},
+ make_bs_setpos_map(T, SavePoints, Count+1, [{Save,I}|Acc]);
+make_bs_setpos_map([], _, Count, Acc) ->
+ {maps:from_list(Acc),Count}.
+
+make_bs_getpos_map([{Bef,{Ctx,_}=Ps}|T], SavePoints, Count, Acc) ->
+ Ignored = #b_var{name={'@ssa_ignored',Count}},
+ Args = [Ctx, get_savepoint(Ps, SavePoints)],
+ I = #b_set{op=bs_set_position,dst=Ignored,args=Args},
+ make_bs_getpos_map(T, SavePoints, Count+1, [{Bef,I}|Acc]);
+make_bs_getpos_map([], _, Count, Acc) ->
+ {maps:from_list(Acc),Count}.
+
+get_savepoint({_,_}=Ps, SavePoints) ->
+ Name = {'@ssa_bs_position', map_get(Ps, SavePoints)},
+ #b_var{name=Name}.
-%% Insert bs_save and bs_restore instructions as needed.
+make_bs_pos_dict([{Ctx,Pts}|T], Count0, Acc0) ->
+ {Acc, Count} = make_bs_pos_dict_1(Pts, Ctx, Count0, Acc0),
+ make_bs_pos_dict(T, Count, Acc);
+make_bs_pos_dict([], Count, Acc) ->
+ {maps:from_list(Acc), Count}.
-bs_save_restore(Linear0, CtxChain, Count0) ->
+make_bs_pos_dict_1([H|T], Ctx, I, Acc) ->
+ make_bs_pos_dict_1(T, Ctx, I+1, [{{Ctx,H},I}|Acc]);
+make_bs_pos_dict_1([], Ctx, I, Acc) ->
+ {[{Ctx,I}|Acc], I}.
+
+%% As bs_position but without OTP-22 instructions. This is only used when
+%% cross-compiling to older versions.
+bs_pos_bsm2(Linear0, CtxChain, Count0) ->
Rs0 = bs_restores(Linear0, CtxChain, #{}, #{}),
Rs = maps:values(Rs0),
S0 = sofs:relation(Rs, [{context,save_point}]),
@@ -255,7 +300,7 @@ bs_save_restore(Linear0, CtxChain, Count0) ->
{Restores,Count} = make_restore_map(maps:to_list(Rs0), Slots, Count1, []),
%% Now insert all saves and restores.
- {bs_insert(Linear0, Saves, Restores, Slots),Count}.
+ {bs_insert_bsm2(Linear0, Saves, Restores, Slots),Count}.
make_save_map([{Ctx,Save}=Ps|T], Slots, Count, Acc) ->
Ignored = #b_var{name={'@ssa_ignored',Count}},
@@ -279,7 +324,7 @@ make_restore_map([], _, Count, Acc) ->
make_slot({Same,Same}, _Slots) ->
#b_literal{val=start};
make_slot({_,_}=Ps, Slots) ->
- #b_literal{val=maps:get(Ps, Slots)}.
+ #b_literal{val=map_get(Ps, Slots)}.
make_save_point_dict([{Ctx,Pts}|T], Acc0) ->
Acc = make_save_point_dict_1(Pts, Ctx, 0, Acc0),
@@ -308,8 +353,7 @@ bs_restores([], _, _, Rs) -> Rs.
bs_update_successors(#b_br{succ=Succ,fail=Fail}, SPos, FPos, D) ->
join_positions([{Succ,SPos},{Fail,FPos}], D);
-bs_update_successors(#b_switch{fail=Fail,list=List}, SPos, FPos, D) ->
- SPos = FPos, %Assertion.
+bs_update_successors(#b_switch{fail=Fail,list=List}, SPos, _FPos, D) ->
Update = [{L,SPos} || {_,L} <- List] ++ [{Fail,SPos}],
join_positions(Update, D);
bs_update_successors(#b_ret{}, _, _, D) -> D.
@@ -388,10 +432,19 @@ bs_restores_is([#b_set{op=bs_extract,args=[FromPos|_]}|Is],
Start = bs_subst_ctx(FromPos, CtxChain),
#{Start:=FromPos} = PosMap, %Assertion.
bs_restores_is(Is, CtxChain, PosMap, Rs);
+bs_restores_is([#b_set{op=call,dst=Dst,args=Args}|Is],
+ CtxChain, PosMap0, Rs0) ->
+ {Rs,PosMap1} = bs_restore_args(Args, PosMap0, CtxChain, Dst, Rs0),
+ PosMap = bs_invalidate_pos(Args, PosMap1, CtxChain),
+ bs_restores_is(Is, CtxChain, PosMap, Rs);
+bs_restores_is([#b_set{op=landingpad}|Is], CtxChain, PosMap0, Rs) ->
+ %% We can land here from any point, so all positions are invalid.
+ PosMap = maps:map(fun(_Start,_Pos) -> unknown end, PosMap0),
+ bs_restores_is(Is, CtxChain, PosMap, Rs);
bs_restores_is([#b_set{op=Op,dst=Dst,args=Args}|Is],
CtxChain, PosMap0, Rs0)
when Op =:= bs_test_tail;
- Op =:= call ->
+ Op =:= bs_get_tail ->
{Rs,PosMap} = bs_restore_args(Args, PosMap0, CtxChain, Dst, Rs0),
bs_restores_is(Is, CtxChain, PosMap, Rs);
bs_restores_is([_|Is], CtxChain, PosMap, Rs) ->
@@ -399,7 +452,6 @@ bs_restores_is([_|Is], CtxChain, PosMap, Rs) ->
bs_restores_is([], _CtxChain, PosMap, Rs) ->
{PosMap,Rs}.
-
bs_match_type(#b_set{args=[#b_literal{val=skip},_Ctx,
#b_literal{val=binary},_Flags,
#b_literal{val=all},#b_literal{val=U}]}) ->
@@ -410,6 +462,23 @@ bs_match_type(#b_set{args=[#b_literal{val=skip},_Ctx,
bs_match_type(_) ->
plain.
+%% Call instructions leave the match position in an undefined state,
+%% requiring us to invalidate each affected argument.
+bs_invalidate_pos([#b_var{}=Arg|Args], PosMap0, CtxChain) ->
+ Start = bs_subst_ctx(Arg, CtxChain),
+ case PosMap0 of
+ #{Start:=_} ->
+ PosMap = PosMap0#{Start:=unknown},
+ bs_invalidate_pos(Args, PosMap, CtxChain);
+ #{} ->
+ %% Not a match context.
+ bs_invalidate_pos(Args, PosMap0, CtxChain)
+ end;
+bs_invalidate_pos([_|Args], PosMap, CtxChain) ->
+ bs_invalidate_pos(Args, PosMap, CtxChain);
+bs_invalidate_pos([], PosMap, _CtxChain) ->
+ PosMap.
+
bs_restore_args([#b_var{}=Arg|Args], PosMap0, CtxChain, Dst, Rs0) ->
Start = bs_subst_ctx(Arg, CtxChain),
case PosMap0 of
@@ -432,33 +501,45 @@ bs_restore_args([], PosMap, _CtxChain, _Dst, Rs) ->
%% Insert all bs_save and bs_restore instructions.
-bs_insert([{L,#b_blk{is=Is0}=Blk}|Bs0], Saves, Restores, Slots) ->
- Is = bs_insert_is_1(Is0, Restores, Slots),
+bs_insert_bsm3(Blocks, Saves, Restores, SavePoints) ->
+ bs_insert_1(Blocks, Saves, Restores, SavePoints, fun(I) -> I end).
+
+bs_insert_bsm2(Blocks, Saves, Restores, SavePoints) ->
+ %% The old instructions require bs_start_match to be annotated with the
+ %% number of position slots it needs.
+ bs_insert_1(Blocks, Saves, Restores, SavePoints,
+ fun(#b_set{op=bs_start_match,dst=Dst}=I0) ->
+ NumSlots = case SavePoints of
+ #{Dst:=NumSlots0} -> NumSlots0;
+ #{} -> 0
+ end,
+ beam_ssa:add_anno(num_slots, NumSlots, I0);
+ (I) ->
+ I
+ end).
+
+bs_insert_1([{L,#b_blk{is=Is0}=Blk}|Bs0], Saves, Restores, Slots, XFrm) ->
+ Is = bs_insert_is_1(Is0, Restores, Slots, XFrm),
Bs = bs_insert_saves(Is, Bs0, Saves),
- [{L,Blk#b_blk{is=Is}}|bs_insert(Bs, Saves, Restores, Slots)];
-bs_insert([], _, _, _) -> [].
+ [{L,Blk#b_blk{is=Is}}|bs_insert_1(Bs, Saves, Restores, Slots, XFrm)];
+bs_insert_1([], _, _, _, _) -> [].
-bs_insert_is_1([#b_set{op=Op,dst=Dst}=I0|Is], Restores, Slots) ->
+bs_insert_is_1([#b_set{op=Op,dst=Dst}=I0|Is], Restores, SavePoints, XFrm) ->
+ I = XFrm(I0),
if
Op =:= bs_test_tail;
+ Op =:= bs_get_tail;
Op =:= bs_match;
Op =:= call ->
Rs = case Restores of
#{Dst:=R} -> [R];
#{} -> []
end,
- Rs ++ [I0|bs_insert_is_1(Is, Restores, Slots)];
- Op =:= bs_start_match ->
- NumSlots = case Slots of
- #{Dst:=NumSlots0} -> NumSlots0;
- #{} -> 0
- end,
- I = beam_ssa:add_anno(num_slots, NumSlots, I0),
- [I|bs_insert_is_1(Is, Restores, Slots)];
+ Rs ++ [I|bs_insert_is_1(Is, Restores, SavePoints, XFrm)];
true ->
- [I0|bs_insert_is_1(Is, Restores, Slots)]
+ [I|bs_insert_is_1(Is, Restores, SavePoints, XFrm)]
end;
-bs_insert_is_1([], _, _) -> [].
+bs_insert_is_1([], _, _, _) -> [].
bs_insert_saves([#b_set{dst=Dst}|Is], Bs, Saves) ->
case Saves of
@@ -504,6 +585,8 @@ bs_instrs_is([#b_set{op=Op,args=Args0}=I0|Is], CtxChain, Acc) ->
I1#b_set{op=bs_skip,args=[Type,Ctx|As]};
{bs_match,[#b_literal{val=string},Ctx|As]} ->
I1#b_set{op=bs_match_string,args=[Ctx|As]};
+ {bs_get_tail,[Ctx|As]} ->
+ I1#b_set{op=bs_get_tail,args=[Ctx|As]};
{_,_} ->
I1
end,
@@ -534,6 +617,59 @@ bs_subst_ctx(#b_var{}=Var, CtxChain) ->
bs_subst_ctx(Other, _CtxChain) ->
Other.
+%% legacy_bs(St0) -> St.
+%% Binary matching instructions in OTP 21 and earlier don't support
+%% a Y register as destination. If St#st.use_bsm3 is false,
+%% we will need to rewrite those instructions so that the result
+%% is first put in an X register and then moved to a Y register
+%% if the operation succeeded.
+
+legacy_bs(#st{use_bsm3=false,ssa=Blocks0,cnt=Count0,res=Res}=St) ->
+ IsYreg = maps:from_list([{V,true} || {V,{y,_}} <- Res]),
+ Linear0 = beam_ssa:linearize(Blocks0),
+ {Linear,Count} = legacy_bs(Linear0, IsYreg, Count0, #{}, []),
+ Blocks = maps:from_list(Linear),
+ St#st{ssa=Blocks,cnt=Count};
+legacy_bs(#st{use_bsm3=true}=St) -> St.
+
+legacy_bs([{L,Blk}|Bs], IsYreg, Count0, Copies0, Acc) ->
+ #b_blk{is=Is0,last=Last} = Blk,
+ Is1 = case Copies0 of
+ #{L:=Copy} -> [Copy|Is0];
+ #{} -> Is0
+ end,
+ {Is,Count,Copies} = legacy_bs_is(Is1, Last, IsYreg, Count0, Copies0, []),
+ legacy_bs(Bs, IsYreg, Count, Copies, [{L,Blk#b_blk{is=Is}}|Acc]);
+legacy_bs([], _IsYreg, Count, _Copies, Acc) ->
+ {Acc,Count}.
+
+legacy_bs_is([#b_set{op=Op,dst=Dst}=I0,
+ #b_set{op=succeeded,dst=SuccDst,args=[Dst]}=SuccI0],
+ Last, IsYreg, Count0, Copies0, Acc) ->
+ NeedsFix = is_map_key(Dst, IsYreg) andalso
+ case Op of
+ bs_get -> true;
+ bs_init -> true;
+ _ -> false
+ end,
+ case NeedsFix of
+ true ->
+ TempDst = #b_var{name={'@bs_temp_dst',Count0}},
+ Count = Count0 + 1,
+ I = I0#b_set{dst=TempDst},
+ SuccI = SuccI0#b_set{args=[TempDst]},
+ Copy = #b_set{op=copy,dst=Dst,args=[TempDst]},
+ #b_br{bool=SuccDst,succ=SuccL} = Last,
+ Copies = Copies0#{SuccL=>Copy},
+ legacy_bs_is([], Last, IsYreg, Count, Copies, [SuccI,I|Acc]);
+ false ->
+ legacy_bs_is([], Last, IsYreg, Count0, Copies0, [SuccI0,I0|Acc])
+ end;
+legacy_bs_is([I|Is], Last, IsYreg, Count, Copies, Acc) ->
+ legacy_bs_is(Is, Last, IsYreg, Count, Copies, [I|Acc]);
+legacy_bs_is([], _Last, _IsYreg, Count, Copies, Acc) ->
+ {reverse(Acc),Count,Copies}.
+
%% sanitize(St0) -> St.
%% Remove constructs that can cause problems later:
%%
@@ -549,7 +685,7 @@ sanitize(#st{ssa=Blocks0,cnt=Count0}=St) ->
St#st{ssa=Blocks,cnt=Count}.
sanitize([L|Ls], Count0, Blocks0, Values0) ->
- #b_blk{is=Is0} = Blk0 = maps:get(L, Blocks0),
+ #b_blk{is=Is0} = Blk0 = map_get(L, Blocks0),
case sanitize_is(Is0, Count0, Values0, false, []) of
no_change ->
sanitize(Ls, Count0, Blocks0, Values0);
@@ -574,23 +710,24 @@ sanitize([], Count, Blocks0, Values) ->
false -> remove_unreachable(Ls, Blocks, Reachable, [])
end,Count}.
-sanitize_is([#b_set{op=get_map_element,
- args=[#b_literal{}=Map,Key]}=I0|Is],
- Count0, Values, _Changed, Acc) ->
- {MapVarName,Count} = new_var_name('@ssa_map', Count0),
- MapVar = #b_var{name=MapVarName},
- I = I0#b_set{args=[MapVar,Key]},
- Copy = #b_set{op=copy,dst=MapVar,args=[Map]},
- sanitize_is(Is, Count, Values, true, [I,Copy|Acc]);
-sanitize_is([#b_set{op=Op,dst=#b_var{name=Dst},args=Args0}=I0|Is0],
- Count, Values, Changed, Acc) ->
- Args = map(fun(#b_var{name=V}=Var) ->
- case Values of
- #{V:=New} -> New;
- #{} -> Var
- end;
- (Lit) -> Lit
- end, Args0),
+sanitize_is([#b_set{op=get_map_element,args=Args0}=I0|Is],
+ Count0, Values, Changed, Acc) ->
+ case sanitize_args(Args0, Values) of
+ [#b_literal{}=Map,Key] ->
+ %% Bind the literal map to a variable.
+ {MapVar,Count} = new_var('@ssa_map', Count0),
+ I = I0#b_set{args=[MapVar,Key]},
+ Copy = #b_set{op=copy,dst=MapVar,args=[Map]},
+ sanitize_is(Is, Count, Values, true, [I,Copy|Acc]);
+ [_,_]=Args0 ->
+ sanitize_is(Is, Count0, Values, Changed, [I0|Acc]);
+ [_,_]=Args ->
+ I = I0#b_set{args=Args},
+ sanitize_is(Is, Count0, Values, Changed, [I|Acc])
+ end;
+sanitize_is([#b_set{op=Op,dst=Dst,args=Args0}=I0|Is0],
+ Count, Values, Changed0, Acc) ->
+ Args = sanitize_args(Args0, Values),
case sanitize_instr(Op, Args, I0) of
{value,Value0} ->
Value = #b_literal{val=Value0},
@@ -598,7 +735,9 @@ sanitize_is([#b_set{op=Op,dst=#b_var{name=Dst},args=Args0}=I0|Is0],
{ok,I} ->
sanitize_is(Is0, Count, Values, true, [I|Acc]);
ok ->
- sanitize_is(Is0, Count, Values, Changed, [I0|Acc])
+ I = I0#b_set{args=Args},
+ Changed = Changed0 orelse Args =/= Args0,
+ sanitize_is(Is0, Count, Values, Changed, [I|Acc])
end;
sanitize_is([], Count, Values, Changed, Acc) ->
case Changed of
@@ -608,6 +747,14 @@ sanitize_is([], Count, Values, Changed, Acc) ->
no_change
end.
+sanitize_args(Args, Values) ->
+ map(fun(Var) ->
+ case Values of
+ #{Var:=New} -> New;
+ #{} -> Var
+ end
+ end, Args).
+
sanitize_instr({bif,Bif}, [#b_literal{val=Lit}], _I) ->
case erl_bifs:is_pure(erlang, Bif, 1) of
false ->
@@ -671,7 +818,7 @@ sanitize_badarg(I) ->
I#b_set{op=call,args=[Func,#b_literal{val=badarg}]}.
remove_unreachable([L|Ls], Blocks, Reachable, Acc) ->
- #b_blk{is=Is0} = Blk0 = maps:get(L, Blocks),
+ #b_blk{is=Is0} = Blk0 = map_get(L, Blocks),
case split_phis(Is0) of
{[_|_]=Phis,Rest} ->
Is = [prune_phi(Phi, Reachable) || Phi <- Phis] ++ Rest,
@@ -693,15 +840,16 @@ prune_phi(#b_set{args=Args0}=Phi, Reachable) ->
%%%
%% fix_tuples(St0) -> St.
-%% We must split tuple creation into two instruction to mirror the
-%% the way tuples are created in BEAM. Each put_tuple instruction is
-%% split into put_tuple_arity followed by put_tuple_elements.
+%% If compatibility with a previous version of Erlang has been
+%% requested, tuple creation must be split into two instruction to
+%% mirror the the way tuples are created in BEAM prior to OTP 22.
+%% Each put_tuple instruction is split into put_tuple_arity followed
+%% by put_tuple_elements.
fix_tuples(#st{ssa=Blocks0,cnt=Count0}=St) ->
F = fun (#b_set{op=put_tuple,args=Args}=Put, C0) ->
Arity = #b_literal{val=length(Args)},
- {VarName,C} = new_var_name('@ssa_ignore', C0),
- Ignore = #b_var{name=VarName},
+ {Ignore,C} = new_var('@ssa_ignore', C0),
{[Put#b_set{op=put_tuple_arity,args=[Arity]},
#b_set{dst=Ignore,op=put_tuple_elements,args=Args}],C};
(I, C) -> {[I],C}
@@ -710,6 +858,202 @@ fix_tuples(#st{ssa=Blocks0,cnt=Count0}=St) ->
St#st{ssa=Blocks,cnt=Count}.
%%%
+%%% Introduce the set_tuple_element instructions to make
+%%% multiple-field record updates faster.
+%%%
+%%% The expansion of record field updates, when more than one field is
+%%% updated, but not a majority of the fields, will create a sequence of
+%%% calls to `erlang:setelement(Index, Value, Tuple)` where Tuple in the
+%%% first call is the original record tuple, and in the subsequent calls
+%%% Tuple is the result of the previous call. Furthermore, all Index
+%%% values are constant positive integers, and the first call to
+%%% `setelement` will have the greatest index. Thus all the following
+%%% calls do not actually need to test at run-time whether Tuple has type
+%%% tuple, nor that the index is within the tuple bounds.
+%%%
+%%% Since this optimization introduces destructive updates, it used to
+%%% be done as the very last Core Erlang pass before going to
+%%% lower-level code. However, it turns out that this kind of destructive
+%%% updates are awkward also in SSA code and can prevent or complicate
+%%% type analysis and aggressive optimizations.
+%%%
+%%% NOTE: Because there no write barriers in the system, this kind of
+%%% optimization can only be done when we are sure that garbage
+%%% collection will not be triggered between the creation of the tuple
+%%% and the destructive updates - otherwise we might insert pointers
+%%% from an older generation to a newer.
+%%%
+
+use_set_tuple_element(#st{ssa=Blocks0}=St) ->
+ Uses = count_uses(Blocks0),
+ RPO = reverse(beam_ssa:rpo(Blocks0)),
+ Blocks = use_ste_1(RPO, Uses, Blocks0),
+ St#st{ssa=Blocks}.
+
+use_ste_1([L|Ls], Uses, Blocks0) ->
+ {Blk0,Blocks} = use_ste_across(L, Uses, Blocks0),
+ #b_blk{is=Is0} = Blk0,
+ case use_ste_is(Is0, Uses) of
+ Is0 ->
+ use_ste_1(Ls, Uses, Blocks);
+ Is ->
+ Blk = Blk0#b_blk{is=Is},
+ use_ste_1(Ls, Uses, Blocks#{L:=Blk})
+ end;
+use_ste_1([], _, Blocks) -> Blocks.
+
+%%% Optimize within a single block.
+
+use_ste_is([#b_set{}=I|Is0], Uses) ->
+ Is = use_ste_is(Is0, Uses),
+ case extract_ste(I) of
+ none ->
+ [I|Is];
+ Extracted ->
+ use_ste_call(Extracted, I, Is, Uses)
+ end;
+use_ste_is([], _Uses) -> [].
+
+use_ste_call({Dst0,Pos0,_Var0,_Val0}, Call1, Is0, Uses) ->
+ case get_ste_call(Is0, []) of
+ {Prefix,{Dst1,Pos1,Dst0,Val1},Call2,Is}
+ when Pos1 > 0, Pos0 > Pos1 ->
+ case is_single_use(Dst0, Uses) of
+ true ->
+ Call = Call1#b_set{dst=Dst1},
+ Args = [Val1,Dst1,#b_literal{val=Pos1-1}],
+ Dsetel = Call2#b_set{op=set_tuple_element,
+ dst=Dst0,
+ args=Args},
+ [Call|Prefix] ++ [Dsetel|Is];
+ false ->
+ [Call1|Is0]
+ end;
+ _ ->
+ [Call1|Is0]
+ end.
+
+get_ste_call([#b_set{op=get_tuple_element}=I|Is], Acc) ->
+ get_ste_call(Is, [I|Acc]);
+get_ste_call([#b_set{op=call}=I|Is], Acc) ->
+ case extract_ste(I) of
+ none ->
+ none;
+ Extracted ->
+ {reverse(Acc),Extracted,I,Is}
+ end;
+get_ste_call(_, _) -> none.
+
+extract_ste(#b_set{op=call,dst=Dst,
+ args=[#b_remote{mod=#b_literal{val=M},
+ name=#b_literal{val=F}}|Args]}) ->
+ case {M,F,Args} of
+ {erlang,setelement,[#b_literal{val=Pos},Tuple,Val]} ->
+ {Dst,Pos,Tuple,Val};
+ {_,_,_} ->
+ none
+ end;
+extract_ste(#b_set{}) -> none.
+
+%%% Optimize accross blocks within a try/catch block.
+
+use_ste_across(L, Uses, Blocks) ->
+ case map_get(L, Blocks) of
+ #b_blk{last=#b_br{bool=#b_var{}}}=Blk ->
+ try
+ use_ste_across_1(L, Blk, Uses, Blocks)
+ catch
+ throw:not_possible ->
+ {Blk,Blocks}
+ end;
+ #b_blk{}=Blk ->
+ {Blk,Blocks}
+ end.
+
+use_ste_across_1(L, Blk0, Uses, Blocks0) ->
+ #b_blk{is=IsThis,last=#b_br{bool=Bool,succ=Next}} = Blk0,
+ case reverse(IsThis) of
+ [#b_set{op=succeeded,dst=Bool,args=[Result]}=Succ0,
+ #b_set{op=call,args=[#b_remote{}|_],dst=Result}=Call1|Prefix] ->
+ case is_single_use(Bool, Uses) andalso
+ is_n_uses(2, Result, Uses) of
+ true -> ok;
+ false -> throw(not_possible)
+ end,
+ Call2 = use_ste_across_next(Next, Uses, Blocks0),
+ Is = [Call1,Call2],
+ case use_ste_is(Is, decrement_uses(Result, Uses)) of
+ [#b_set{}=Call,#b_set{op=set_tuple_element}=Ste] ->
+ Blocks1 = use_ste_fix_next(Ste, Next, Blocks0),
+ Succ = Succ0#b_set{args=[Call#b_set.dst]},
+ Blk = Blk0#b_blk{is=reverse(Prefix, [Call,Succ])},
+ Blocks = Blocks1#{L:=Blk},
+ {Blk,Blocks};
+ _ ->
+ throw(not_possible)
+ end;
+ _ ->
+ throw(not_possible)
+ end.
+
+use_ste_across_next(Next, Uses, Blocks) ->
+ case map_get(Next, Blocks) of
+ #b_blk{is=[#b_set{op=call,dst=Result,args=[#b_remote{}|_]}=Call,
+ #b_set{op=succeeded,dst=Bool,args=[Result]}],
+ last=#b_br{bool=Bool}} ->
+ case is_single_use(Bool, Uses) andalso
+ is_n_uses(2, Result, Uses) of
+ true -> ok;
+ false -> throw(not_possible)
+ end,
+ Call;
+ #b_blk{} ->
+ throw(not_possible)
+ end.
+
+use_ste_fix_next(Ste, Next, Blocks) ->
+ Blk0 = map_get(Next, Blocks),
+ #b_blk{is=[#b_set{op=call},#b_set{op=succeeded}],last=Br0} = Blk0,
+ Br = beam_ssa:normalize(Br0#b_br{bool=#b_literal{val=true}}),
+ Blk = Blk0#b_blk{is=[Ste],last=Br},
+ Blocks#{Next:=Blk}.
+
+%% Count how many times each variable is used.
+
+count_uses(Blocks) ->
+ count_uses_blk(maps:values(Blocks), #{}).
+
+count_uses_blk([#b_blk{is=Is,last=Last}|Bs], CountMap0) ->
+ F = fun(I, CountMap) ->
+ foldl(fun(Var, Acc) ->
+ case Acc of
+ #{Var:=3} -> Acc;
+ #{Var:=C} -> Acc#{Var:=C+1};
+ #{} -> Acc#{Var=>1}
+ end
+ end, CountMap, beam_ssa:used(I))
+ end,
+ CountMap = F(Last, foldl(F, CountMap0, Is)),
+ count_uses_blk(Bs, CountMap);
+count_uses_blk([], CountMap) -> CountMap.
+
+decrement_uses(V, Uses) ->
+ #{V:=C} = Uses,
+ Uses#{V:=C-1}.
+
+is_n_uses(N, V, Uses) ->
+ case Uses of
+ #{V:=N} -> true;
+ #{} -> false
+ end.
+
+is_single_use(V, Uses) ->
+ case Uses of
+ #{V:=1} -> true;
+ #{} -> false
+ end.
+
+%%%
%%% Find out where frames should be placed.
%%%
@@ -727,7 +1071,7 @@ fix_tuples(#st{ssa=Blocks0,cnt=Count0}=St) ->
%% a stack frame or set up a stack frame with a different size.
place_frames(#st{ssa=Blocks}=St) ->
- Doms = beam_ssa:dominators(Blocks),
+ {Doms,_} = beam_ssa:dominators(Blocks),
Ls = beam_ssa:rpo(Blocks),
Tried = gb_sets:empty(),
Frames0 = [],
@@ -735,7 +1079,7 @@ place_frames(#st{ssa=Blocks}=St) ->
St#st{frames=Frames}.
place_frames_1([L|Ls], Blocks, Doms, Tried0, Frames0) ->
- Blk = maps:get(L, Blocks),
+ Blk = map_get(L, Blocks),
case need_frame(Blk) of
true ->
%% This block needs a frame. Try to place it here.
@@ -846,15 +1190,15 @@ place_frame_here(L, Blocks, Doms, Frames) ->
%% Return all predecessors referenced in phi nodes.
phi_predecessors(L, Blocks) ->
- #b_blk{is=Is} = maps:get(L, Blocks),
+ #b_blk{is=Is} = map_get(L, Blocks),
[P || #b_set{op=phi,args=Args} <- Is, {_,P} <- Args].
%% is_dominated_by(Label, DominatedBy, Dominators) -> true|false.
%% Test whether block Label is dominated by block DominatedBy.
is_dominated_by(L, DomBy, Doms) ->
- DominatedBy = maps:get(L, Doms),
- ordsets:is_element(DomBy, DominatedBy).
+ DominatedBy = map_get(L, Doms),
+ member(DomBy, DominatedBy).
%% need_frame(#b_blk{}) -> true|false.
%% Test whether any of the instructions in the block requires a stack frame.
@@ -864,12 +1208,12 @@ need_frame(#b_blk{is=Is,last=#b_ret{arg=Ret}}) ->
need_frame(#b_blk{is=Is}) ->
need_frame_1(Is, body).
-need_frame_1([#b_set{op=make_fun,dst=#b_var{name=Fun}}|Is], {return,_}=Context) ->
+need_frame_1([#b_set{op=make_fun,dst=Fun}|Is], {return,_}=Context) ->
%% Since make_fun clobbers X registers, a stack frame is needed if
%% any of the following instructions use any other variable than
%% the one holding the reference to the created fun.
need_frame_1(Is, Context) orelse
- case beam_ssa:used(#b_blk{is=Is,last=#b_ret{arg=#b_var{name=Fun}}}) of
+ case beam_ssa:used(#b_blk{is=Is,last=#b_ret{arg=Fun}}) of
[Fun] -> false;
[_|_] -> true
end;
@@ -884,7 +1228,7 @@ need_frame_1([#b_set{op=call,args=[Func|_]}|Is], Context) ->
case Func of
#b_remote{mod=#b_literal{val=Mod},
name=#b_literal{val=Name},
- arity=Arity} ->
+ arity=Arity} when is_atom(Mod), is_atom(Name) ->
case erl_bifs:is_exit_bif(Mod, Name, Arity) of
true ->
false;
@@ -896,11 +1240,11 @@ need_frame_1([#b_set{op=call,args=[Func|_]}|Is], Context) ->
#b_remote{} ->
%% This is an apply(), which always needs a frame.
true;
- #b_var{} ->
- %% A fun call always needs a frame.
- true;
+ #b_local{} ->
+ Context =:= body orelse Is =/= [];
_ ->
- Context =:= body orelse Is =/= []
+ %% A fun call always needs a frame.
+ true
end;
need_frame_1([I|Is], Context) ->
beam_ssa:clobbers_xregs(I) orelse need_frame_1(Is, Context);
@@ -932,11 +1276,11 @@ is_trap_bif(_, _, _) -> false.
%%% used during matching.
%%%
%%% Depending on where variables are defined and used, they must
-%%% be handling in two different ways.
+%%% be handled in two different ways.
%%%
%%% Variables that are always defined in the receive (before branching
%%% out into the different clauses of the receive) and used after the
-%%% receive, must be handled in the following way: Before each
+%%% receive must be handled in the following way: Before each
%%% remove_message instruction, each such variable must be copied, and
%%% all variables must be consolidated using a phi node in the
%%% common exit block for the receive.
@@ -984,15 +1328,13 @@ recv_common(Defs, Exit, Blocks) ->
%% in the exit block following the receive.
recv_fix_common([Msg0|T], Exit, Rm, Blocks0, Count0) ->
- {Msg1,Count1} = new_var_name('@recv', Count0),
- Msg = #b_var{name=Msg1},
+ {Msg,Count1} = new_var('@recv', Count0),
Blocks1 = beam_ssa:rename_vars(#{Msg0=>Msg}, [Exit], Blocks0),
N = length(Rm),
- {MsgVars0,Count} = new_var_names(duplicate(N, '@recv'), Count1),
- MsgVars = [#b_var{name=V} || V <- MsgVars0],
+ {MsgVars,Count} = new_vars(duplicate(N, '@recv'), Count1),
PhiArgs = fix_exit_phi_args(MsgVars, Rm, Exit, Blocks1),
Phi = #b_set{op=phi,dst=Msg,args=PhiArgs},
- ExitBlk0 = maps:get(Exit, Blocks1),
+ ExitBlk0 = map_get(Exit, Blocks1),
ExitBlk = ExitBlk0#b_blk{is=[Phi|ExitBlk0#b_blk.is]},
Blocks2 = Blocks1#{Exit:=ExitBlk},
Blocks = recv_fix_common_1(MsgVars, Rm, Msg0, Blocks2),
@@ -1003,8 +1345,8 @@ recv_fix_common([], _, _, Blocks, Count) ->
recv_fix_common_1([V|Vs], [Rm|Rms], Msg, Blocks0) ->
Ren = #{Msg=>V},
Blocks1 = beam_ssa:rename_vars(Ren, [Rm], Blocks0),
- #b_blk{is=Is0} = Blk0 = maps:get(Rm, Blocks1),
- Copy = #b_set{op=copy,dst=V,args=[#b_var{name=Msg}]},
+ #b_blk{is=Is0} = Blk0 = map_get(Rm, Blocks1),
+ Copy = #b_set{op=copy,dst=V,args=[Msg]},
Is = insert_after_phis(Is0, [Copy]),
Blk = Blk0#b_blk{is=Is},
Blocks = Blocks1#{Rm:=Blk},
@@ -1013,14 +1355,19 @@ recv_fix_common_1([], [], _Msg, Blocks) -> Blocks.
fix_exit_phi_args([V|Vs], [Rm|Rms], Exit, Blocks) ->
Path = beam_ssa:rpo([Rm], Blocks),
- Pred = exit_predecessor(Path, Exit),
- [{V,Pred}|fix_exit_phi_args(Vs, Rms, Exit, Blocks)];
+ Preds = exit_predecessors(Path, Exit, Blocks),
+ [{V,Pred} || Pred <- Preds] ++ fix_exit_phi_args(Vs, Rms, Exit, Blocks);
fix_exit_phi_args([], [], _, _) -> [].
-exit_predecessor([Pred,Exit|_], Exit) ->
- Pred;
-exit_predecessor([_|Bs], Exit) ->
- exit_predecessor(Bs, Exit).
+exit_predecessors([L|Ls], Exit, Blocks) ->
+ Blk = map_get(L, Blocks),
+ case member(Exit, beam_ssa:successors(Blk)) of
+ true ->
+ [L|exit_predecessors(Ls, Exit, Blocks)];
+ false ->
+ exit_predecessors(Ls, Exit, Blocks)
+ end;
+exit_predecessors([], _Exit, _Blocks) -> [].
%% fix_receive([Label], Defs, Blocks0, Count0) -> {Blocks,Count}.
%% Add a copy instruction for all variables that are matched out and
@@ -1030,16 +1377,14 @@ fix_receive([L|Ls], Defs, Blocks0, Count0) ->
{RmDefs,Used0} = beam_ssa:def_used([L], Blocks0),
Def = ordsets:subtract(Defs, RmDefs),
Used = ordsets:intersection(Def, Used0),
- {NewVs,Count} = new_var_names(Used, Count0),
- NewVars = [#b_var{name=V} || V <- NewVs],
+ {NewVars,Count} = new_vars([Base || #b_var{name=Base} <- Used], Count0),
Ren = zip(Used, NewVars),
Blocks1 = beam_ssa:rename_vars(Ren, [L], Blocks0),
- #b_blk{is=Is0} = Blk1 = maps:get(L, Blocks1),
- CopyIs = [#b_set{op=copy,dst=New,args=[#b_var{name=Old}]} ||
- {Old,New} <- Ren],
+ #b_blk{is=Is0} = Blk1 = map_get(L, Blocks1),
+ CopyIs = [#b_set{op=copy,dst=New,args=[Old]} || {Old,New} <- Ren],
Is = insert_after_phis(Is0, CopyIs),
Blk = Blk1#b_blk{is=Is},
- Blocks = maps:put(L, Blk, Blocks1),
+ Blocks = Blocks1#{L:=Blk},
fix_receive(Ls, Defs, Blocks, Count);
fix_receive([], _Defs, Blocks, Count) ->
{Blocks,Count}.
@@ -1064,7 +1409,7 @@ find_loop_exit_1(_, _, Exit) -> Exit.
find_rm_blocks(L, Blocks) ->
Seen = gb_sets:singleton(L),
- Blk = maps:get(L, Blocks),
+ Blk = map_get(L, Blocks),
Succ = beam_ssa:successors(Blk),
find_rm_blocks_1(Succ, Seen, Blocks).
@@ -1074,7 +1419,7 @@ find_rm_blocks_1([L|Ls], Seen0, Blocks) ->
find_rm_blocks_1(Ls, Seen0, Blocks);
false ->
Seen = gb_sets:insert(L, Seen0),
- Blk = maps:get(L, Blocks),
+ Blk = map_get(L, Blocks),
case find_rm_act(Blk#b_blk.is) of
prune ->
%% Looping back. Don't look at any successors.
@@ -1126,7 +1471,7 @@ find_rm_act([]) ->
find_yregs(#st{frames=[]}=St) ->
St;
find_yregs(#st{frames=[_|_]=Frames,args=Args,ssa=Blocks0}=St) ->
- FrameDefs = find_defs(Frames, Blocks0, [V || #b_var{name=V} <- Args]),
+ FrameDefs = find_defs(Frames, Blocks0, [V || #b_var{}=V <- Args]),
Blocks = find_yregs_1(FrameDefs, Blocks0),
St#st{ssa=Blocks}.
@@ -1136,16 +1481,16 @@ find_yregs_1([{F,Defs}|Fs], Blocks0) ->
Ls = beam_ssa:rpo([F], Blocks0),
Yregs0 = [],
Yregs = find_yregs_2(Ls, Blocks0, D0, Yregs0),
- Blk0 = maps:get(F, Blocks0),
+ Blk0 = map_get(F, Blocks0),
Blk = beam_ssa:add_anno(yregs, Yregs, Blk0),
Blocks = Blocks0#{F:=Blk},
find_yregs_1(Fs, Blocks);
find_yregs_1([], Blocks) -> Blocks.
find_yregs_2([L|Ls], Blocks0, D0, Yregs0) ->
- Blk0 = maps:get(L, Blocks0),
+ Blk0 = map_get(L, Blocks0),
#b_blk{is=Is,last=Last} = Blk0,
- Ys0 = maps:get(L, D0),
+ Ys0 = map_get(L, D0),
{Yregs1,Ys} = find_yregs_is(Is, Ys0, Yregs0),
Yregs = find_yregs_terminator(Last, Ys, Yregs1),
Successors = beam_ssa:successors(Blk0),
@@ -1172,7 +1517,7 @@ find_defs_1([L|Ls], Blocks, Frames, Seen0, Defs0, Acc0) ->
false ->
Seen1 = gb_sets:insert(L, Seen0),
{Acc,Seen} = find_defs_1(Ls, Blocks, Frames, Seen1, Defs0, Acc0),
- #b_blk{is=Is} = Blk = maps:get(L, Blocks),
+ #b_blk{is=Is} = Blk = map_get(L, Blocks),
Defs = find_defs_is(Is, Defs0),
Successors = beam_ssa:successors(Blk),
find_defs_1(Successors, Blocks, Frames, Seen, Defs, Acc)
@@ -1181,7 +1526,7 @@ find_defs_1([L|Ls], Blocks, Frames, Seen0, Defs0, Acc0) ->
find_defs_1([], _, _, Seen, _, Acc) ->
{Acc,Seen}.
-find_defs_is([#b_set{dst=#b_var{name=Dst}}|Is], Acc) ->
+find_defs_is([#b_set{dst=Dst}|Is], Acc) ->
find_defs_is(Is, [Dst|Acc]);
find_defs_is([], Acc) -> Acc.
@@ -1191,15 +1536,15 @@ find_update_succ([S|Ss], #dk{d=Defs0,k=Killed0}=DK0, D0) ->
Defs = ordsets:intersection(Defs0, Defs1),
Killed = ordsets:union(Killed0, Killed1),
DK = #dk{d=Defs,k=Killed},
- D = maps:put(S, DK, D0),
+ D = D0#{S:=DK},
find_update_succ(Ss, DK0, D);
#{} ->
- D = maps:put(S, DK0, D0),
+ D = D0#{S=>DK0},
find_update_succ(Ss, DK0, D)
end;
find_update_succ([], _, D) -> D.
-find_yregs_is([#b_set{dst=#b_var{name=Dst}}=I|Is], #dk{d=Defs0,k=Killed0}=Ys, Yregs0) ->
+find_yregs_is([#b_set{dst=Dst}=I|Is], #dk{d=Defs0,k=Killed0}=Ys, Yregs0) ->
Used = beam_ssa:used(I),
Yregs1 = ordsets:intersection(Used, Killed0),
Yregs = ordsets:union(Yregs0, Yregs1),
@@ -1284,7 +1629,7 @@ copy_retval(#st{frames=Frames,ssa=Blocks0,cnt=Count0}=St) ->
St#st{ssa=Blocks,cnt=Count}.
copy_retval_1([F|Fs], Blocks0, Count0) ->
- #b_blk{anno=#{yregs:=Yregs0},is=Is} = maps:get(F, Blocks0),
+ #b_blk{anno=#{yregs:=Yregs0},is=Is} = map_get(F, Blocks0),
Yregs1 = gb_sets:from_list(Yregs0),
Yregs = collect_yregs(Is, Yregs1),
Ls = beam_ssa:rpo([F], Blocks0),
@@ -1293,7 +1638,7 @@ copy_retval_1([F|Fs], Blocks0, Count0) ->
copy_retval_1([], Blocks, Count) ->
{Blocks,Count}.
-collect_yregs([#b_set{op=copy,dst=#b_var{name=Y},args=[#b_var{name=X}]}|Is],
+collect_yregs([#b_set{op=copy,dst=Y,args=[#b_var{}=X]}|Is],
Yregs0) ->
true = gb_sets:is_member(X, Yregs0), %Assertion.
Yregs = gb_sets:insert(Y, gb_sets:delete(X, Yregs0)),
@@ -1303,7 +1648,7 @@ collect_yregs([#b_set{}|Is], Yregs) ->
collect_yregs([], Yregs) -> Yregs.
copy_retval_2([L|Ls], Yregs, Copy0, Blocks0, Count0) ->
- #b_blk{is=Is0,last=Last} = Blk = maps:get(L, Blocks0),
+ #b_blk{is=Is0,last=Last} = Blk = map_get(L, Blocks0),
RC = case {Last,Ls} of
{#b_br{succ=Succ,fail=?BADARG_BLOCK},[Succ|_]} ->
true;
@@ -1330,17 +1675,20 @@ copy_retval_is([#b_set{op=put_tuple_elements,args=Args0}=I0], false, _Yregs,
Copy, Count, Acc) ->
I = I0#b_set{args=copy_sub_args(Args0, Copy)},
{reverse(Acc, [I|acc_copy([], Copy)]),Count};
+copy_retval_is([#b_set{op=Op}=I0], false, Yregs, Copy, Count0, Acc0)
+ when Op =:= call; Op =:= make_fun ->
+ {I,Count,Acc} = place_retval_copy(I0, Yregs, Copy, Count0, Acc0),
+ {reverse(Acc, [I]),Count};
copy_retval_is([#b_set{}]=Is, false, _Yregs, Copy, Count, Acc) ->
{reverse(Acc, acc_copy(Is, Copy)),Count};
copy_retval_is([#b_set{},#b_set{op=succeeded}]=Is, false, _Yregs, Copy, Count, Acc) ->
{reverse(Acc, acc_copy(Is, Copy)),Count};
-copy_retval_is([#b_set{op=Op,dst=#b_var{name=RetVal}=Dst}=I0|Is], RC, Yregs,
+copy_retval_is([#b_set{op=Op,dst=#b_var{name=RetName}=Dst}=I0|Is], RC, Yregs,
Copy0, Count0, Acc0) when Op =:= call; Op =:= make_fun ->
{I1,Count1,Acc} = place_retval_copy(I0, Yregs, Copy0, Count0, Acc0),
- case gb_sets:is_member(RetVal, Yregs) of
+ case gb_sets:is_member(Dst, Yregs) of
true ->
- {NewVarName,Count} = new_var_name(RetVal, Count1),
- NewVar = #b_var{name=NewVarName},
+ {NewVar,Count} = new_var(RetName, Count1),
Copy = #b_set{op=copy,dst=Dst,args=[NewVar]},
I = I1#b_set{dst=NewVar},
copy_retval_is(Is, RC, Yregs, Copy, Count, [I|Acc]);
@@ -1390,16 +1738,15 @@ copy_retval_is([], RC, _, Copy, Count, Acc) ->
place_retval_copy(I, _Yregs, none, Count, Acc) ->
{I,Count,Acc};
place_retval_copy(#b_set{args=[F|Args0]}=I, Yregs, Copy, Count0, Acc0) ->
- #b_set{dst=#b_var{name=Avoid}} = Copy,
+ #b_set{dst=Avoid} = Copy,
{Args,Acc1,Count} = copy_func_args(Args0, Yregs, Avoid, Acc0, [], Count0),
Acc = [Copy|Acc1],
{I#b_set{args=[F|Args]},Count,Acc}.
-copy_func_args([#b_var{name=V}=A|As], Yregs, Avoid, CopyAcc, Acc, Count0) ->
- case gb_sets:is_member(V, Yregs) of
- true when V =/= Avoid ->
- {NewVarName,Count} = new_var_name(V, Count0),
- NewVar = #b_var{name=NewVarName},
+copy_func_args([#b_var{name=AName}=A|As], Yregs, Avoid, CopyAcc, Acc, Count0) ->
+ case gb_sets:is_member(A, Yregs) of
+ true when A =/= Avoid ->
+ {NewVar,Count} = new_var(AName, Count0),
Copy = #b_set{op=copy,dst=NewVar,args=[A]},
copy_func_args(As, Yregs, Avoid, [Copy|CopyAcc], [NewVar|Acc], Count);
_ ->
@@ -1443,7 +1790,7 @@ opt_get_list(#st{ssa=Blocks,res=Res}=St) ->
St#st{ssa=opt_get_list_1(Ls, ResMap, Blocks)}.
opt_get_list_1([L|Ls], Res, Blocks0) ->
- #b_blk{is=Is0} = Blk = maps:get(L, Blocks0),
+ #b_blk{is=Is0} = Blk = map_get(L, Blocks0),
case opt_get_list_is(Is0, Res, [], false) of
no ->
opt_get_list_1(Ls, Res, Blocks0);
@@ -1453,9 +1800,9 @@ opt_get_list_1([L|Ls], Res, Blocks0) ->
end;
opt_get_list_1([], _, Blocks) -> Blocks.
-opt_get_list_is([#b_set{op=get_hd,dst=#b_var{name=Hd},
+opt_get_list_is([#b_set{op=get_hd,dst=Hd,
args=[Cons]}=GetHd,
- #b_set{op=get_tl,dst=#b_var{name=Tl},
+ #b_set{op=get_tl,dst=Tl,
args=[Cons]}=GetTl|Is],
Res, Acc, Changed) ->
%% Note that when this pass is run, only Y registers have
@@ -1497,12 +1844,12 @@ number_instructions(#st{ssa=Blocks0}=St) ->
St#st{ssa=number_is_1(Ls, 1, Blocks0)}.
number_is_1([L|Ls], N0, Blocks0) ->
- #b_blk{is=Is0,last=Last0} = Bl0 = maps:get(L, Blocks0),
+ #b_blk{is=Is0,last=Last0} = Bl0 = map_get(L, Blocks0),
{Is,N1} = number_is_2(Is0, N0, []),
Last = beam_ssa:add_anno(n, N1, Last0),
N = N1 + 2,
Bl = Bl0#b_blk{is=Is,last=Last},
- Blocks = maps:put(L, Bl, Blocks0),
+ Blocks = Blocks0#{L:=Bl},
number_is_1(Ls, N, Blocks);
number_is_1([], _, Blocks) -> Blocks.
@@ -1519,13 +1866,13 @@ number_is_2([], N, Acc) ->
%%%
live_intervals(#st{args=Args,ssa=Blocks}=St) ->
- Vars0 = [{V,{0,1}} || #b_var{name=V} <- Args],
+ Vars0 = [{V,{0,1}} || #b_var{}=V <- Args],
F = fun(L, _, A) -> live_interval_blk(L, Blocks, A) end,
LiveMap0 = #{},
- Acc0 = {[],[],LiveMap0},
- {Vars,Aliases,_} = beam_ssa:fold_po(F, Acc0, Blocks),
+ Acc0 = {[],LiveMap0},
+ {Vars,_} = beam_ssa:fold_po(F, Acc0, Blocks),
Intervals = merge_ranges(rel2fam(Vars0++Vars)),
- St#st{intervals=Intervals,aliases=Aliases}.
+ St#st{intervals=Intervals}.
merge_ranges([{V,Rs}|T]) ->
[{V,merge_ranges_1(Rs)}|merge_ranges(T)];
@@ -1537,20 +1884,19 @@ merge_ranges_1([R|Rs]) ->
[R|merge_ranges_1(Rs)];
merge_ranges_1([]) -> [].
-live_interval_blk(L, Blocks, {Vars0,Aliases0,LiveMap0}) ->
+live_interval_blk(L, Blocks, {Vars0,LiveMap0}) ->
Live0 = [],
Successors = beam_ssa:successors(L, Blocks),
Live1 = update_successors(Successors, L, Blocks, LiveMap0, Live0),
%% Add ranges for all variables that are live in the successors.
- #b_blk{is=Is,last=Last} = maps:get(L, Blocks),
+ #b_blk{is=Is,last=Last} = map_get(L, Blocks),
End = beam_ssa:get_anno(n, Last),
Use = [{V,{use,End+1}} || V <- Live1],
%% Determine used and defined variables in this block.
FirstNumber = first_number(Is, Last),
- {UseDef0,Aliases} = live_interval_blk_1([Last|reverse(Is)],
- FirstNumber, Aliases0, Use),
+ UseDef0 = live_interval_blk_1([Last|reverse(Is)], FirstNumber, Use),
UseDef = rel2fam(UseDef0),
%% Update what is live at the beginning of this block and
@@ -1563,7 +1909,7 @@ live_interval_blk(L, Blocks, {Vars0,Aliases0,LiveMap0}) ->
%% Construct the ranges for this block.
Vars = make_block_ranges(UseDef, FirstNumber, Vars0),
- {Vars,Aliases,LiveMap}.
+ {Vars,LiveMap}.
make_block_ranges([{V,[{def,Def}]}|Vs], First, Acc) ->
make_block_ranges(Vs, First, [{V,{Def,Def}}|Acc]);
@@ -1575,37 +1921,29 @@ make_block_ranges([{V,[{use,_}|_]=Uses}|Vs], First, Acc) ->
make_block_ranges(Vs, First, [{V,{First,Last}}|Acc]);
make_block_ranges([], _, Acc) -> Acc.
-live_interval_blk_1([#b_set{op=phi,dst=#b_var{name=Dst}}|Is],
- FirstNumber, Aliases, Acc0) ->
+live_interval_blk_1([#b_set{op=phi,dst=Dst}|Is], FirstNumber, Acc0) ->
Acc = [{Dst,{def,FirstNumber}}|Acc0],
- live_interval_blk_1(Is, FirstNumber, Aliases, Acc);
-live_interval_blk_1([#b_set{op=bs_start_match}=I|Is], FirstNumber,
- Aliases0, Acc0) ->
+ live_interval_blk_1(Is, FirstNumber, Acc);
+live_interval_blk_1([#b_set{op=bs_start_match}=I|Is],
+ FirstNumber, Acc0) ->
N = beam_ssa:get_anno(n, I),
- #b_set{dst=#b_var{name=Dst}} = I,
+ #b_set{dst=Dst} = I,
Acc1 = [{Dst,{def,N}}|Acc0],
- Aliases = case beam_ssa:get_anno(reuse_for_context, I) of
- true ->
- #b_set{args=[#b_var{name=Src}]} = I,
- [{Dst,Src}|Aliases0];
- false ->
- Aliases0
- end,
Acc = [{V,{use,N}} || V <- beam_ssa:used(I)] ++ Acc1,
- live_interval_blk_1(Is, FirstNumber, Aliases, Acc);
-live_interval_blk_1([I|Is], FirstNumber, Aliases, Acc0) ->
+ live_interval_blk_1(Is, FirstNumber, Acc);
+live_interval_blk_1([I|Is], FirstNumber, Acc0) ->
N = beam_ssa:get_anno(n, I),
Acc1 = case I of
- #b_set{dst=#b_var{name=Dst}} ->
+ #b_set{dst=Dst} ->
[{Dst,{def,N}}|Acc0];
_ ->
Acc0
end,
Used = beam_ssa:used(I),
Acc = [{V,{use,N}} || V <- Used] ++ Acc1,
- live_interval_blk_1(Is, FirstNumber, Aliases, Acc);
-live_interval_blk_1([], _FirstNumber, Aliases, Acc) ->
- {Acc,Aliases}.
+ live_interval_blk_1(Is, FirstNumber, Acc);
+live_interval_blk_1([], _FirstNumber, Acc) ->
+ Acc.
%% first_number([#b_set{}]) -> InstructionNumber.
%% Return the number for the first instruction for the block.
@@ -1621,7 +1959,7 @@ first_number([], Last) ->
update_successors([L|Ls], Pred, Blocks, LiveMap, Live0) ->
Live1 = ordsets:union(Live0, get_live(L, LiveMap)),
- #b_blk{is=Is} = maps:get(L, Blocks),
+ #b_blk{is=Is} = map_get(L, Blocks),
Live = update_live_phis(Is, Pred, Live1),
update_successors(Ls, Pred, Blocks, LiveMap, Live);
update_successors([], _, _, _, Live) -> Live.
@@ -1632,9 +1970,9 @@ get_live(L, LiveMap) ->
#{} -> []
end.
-update_live_phis([#b_set{op=phi,dst=#b_var{name=Killed},args=Args}|Is],
+update_live_phis([#b_set{op=phi,dst=Killed,args=Args}|Is],
Pred, Live0) ->
- Used = [V || {#b_var{name=V},L} <- Args, L =:= Pred],
+ Used = [V || {#b_var{}=V,L} <- Args, L =:= Pred],
Live1 = ordsets:union(ordsets:from_list(Used), Live0),
Live = ordsets:del_element(Killed, Live1),
update_live_phis(Is, Pred, Live);
@@ -1647,7 +1985,7 @@ update_live_phis(_, _, Live) -> Live.
%% reserve_yregs(St0) -> St.
%% In each block that allocates a stack frame, insert instructions
%% that copy variables that must be in Y registers (given by
-%% YRegisters) to new variables.
+%% the `yregs` annotation) to new variables.
%%
%% Also allocate specific Y registers for try and catch tags.
%% The outermost try/catch tag is placed in y0, any directly
@@ -1659,7 +1997,7 @@ reserve_yregs(#st{frames=Frames}=St0) ->
foldl(fun reserve_yregs_1/2, St0, Frames).
reserve_yregs_1(L, #st{ssa=Blocks0,cnt=Count0,res=Res0}=St) ->
- Blk = maps:get(L, Blocks0),
+ Blk = map_get(L, Blocks0),
Yregs = beam_ssa:get_anno(yregs, Blk),
{Def,Used} = beam_ssa:def_used([L], Blocks0),
UsedYregs = ordsets:intersection(Yregs, Used),
@@ -1685,7 +2023,7 @@ reserve_try_tags_1([L|Ls], Blocks, Seen0, ActMap0) ->
reserve_try_tags_1(Ls, Blocks, Seen0, ActMap0);
false ->
Seen1 = gb_sets:insert(L, Seen0),
- #b_blk{is=Is} = Blk = maps:get(L, Blocks),
+ #b_blk{is=Is} = Blk = map_get(L, Blocks),
Active0 = get_active(L, ActMap0),
Active = reserve_try_tags_is(Is, Active0),
Successors = beam_ssa:successors(Blk),
@@ -1702,10 +2040,10 @@ get_active(L, ActMap) ->
#{} -> #{}
end.
-reserve_try_tags_is([#b_set{op=new_try_tag,dst=#b_var{name=V}}|Is], Active) ->
+reserve_try_tags_is([#b_set{op=new_try_tag,dst=V}|Is], Active) ->
N = map_size(Active),
reserve_try_tags_is(Is, Active#{V=>N});
-reserve_try_tags_is([#b_set{op=kill_try_tag,args=[#b_var{name=Tag}]}|Is], Active) ->
+reserve_try_tags_is([#b_set{op=kill_try_tag,args=[Tag]}|Is], Active) ->
reserve_try_tags_is(Is, maps:remove(Tag, Active));
reserve_try_tags_is([_|Is], Active) ->
reserve_try_tags_is(Is, Active);
@@ -1725,17 +2063,15 @@ update_act_map([], _, ActMap) -> ActMap.
rename_vars([], _, Blocks, Count) ->
{[],Blocks,Count};
rename_vars(Vs, L, Blocks0, Count0) ->
- {NewVs,Count} = new_var_names(Vs, Count0),
- NewVars = [#b_var{name=V} || V <- NewVs],
+ {NewVars,Count} = new_vars([Base || #b_var{name=Base} <- Vs], Count0),
Ren = zip(Vs, NewVars),
Blocks1 = beam_ssa:rename_vars(Ren, [L], Blocks0),
- #b_blk{is=Is0} = Blk0 = maps:get(L, Blocks1),
- CopyIs = [#b_set{op=copy,dst=New,args=[#b_var{name=Old}]} ||
- {Old,New} <- Ren],
+ #b_blk{is=Is0} = Blk0 = map_get(L, Blocks1),
+ CopyIs = [#b_set{op=copy,dst=New,args=[Old]} || {Old,New} <- Ren],
Is = insert_after_phis(Is0, CopyIs),
Blk = Blk0#b_blk{is=Is},
- Blocks = maps:put(L, Blk, Blocks1),
- {NewVs,Blocks,Count}.
+ Blocks = Blocks1#{L:=Blk},
+ {NewVars,Blocks,Count}.
insert_after_phis([#b_set{op=phi}=I|Is], InsertIs) ->
[I|insert_after_phis(Is, InsertIs)];
@@ -1756,7 +2092,7 @@ frame_size(#st{frames=Frames,regs=Regs,ssa=Blocks0}=St) ->
frame_size_1(L, Regs, Blocks0) ->
Def = beam_ssa:def([L], Blocks0),
- Yregs0 = [maps:get(V, Regs) || V <- Def, is_yreg(maps:get(V, Regs))],
+ Yregs0 = [map_get(V, Regs) || V <- Def, is_yreg(map_get(V, Regs))],
Yregs = ordsets:from_list(Yregs0),
FrameSize = length(ordsets:from_list(Yregs)),
if
@@ -1768,17 +2104,17 @@ frame_size_1(L, Regs, Blocks0) ->
true ->
ok
end,
- Blk0 = maps:get(L, Blocks0),
+ Blk0 = map_get(L, Blocks0),
Blk = beam_ssa:add_anno(frame_size, FrameSize, Blk0),
%% Insert an annotation for frame deallocation on
%% each #b_ret{}.
- Blocks = maps:put(L, Blk, Blocks0),
+ Blocks = Blocks0#{L:=Blk},
Reachable = beam_ssa:rpo([L], Blocks),
frame_deallocate(Reachable, FrameSize, Blocks).
frame_deallocate([L|Ls], Size, Blocks0) ->
- Blk0 = maps:get(L, Blocks0),
+ Blk0 = map_get(L, Blocks0),
Blk = case Blk0 of
#b_blk{last=#b_ret{}=Ret0} ->
Ret = beam_ssa:add_anno(deallocate, Size, Ret0),
@@ -1786,7 +2122,7 @@ frame_deallocate([L|Ls], Size, Blocks0) ->
#b_blk{} ->
Blk0
end,
- Blocks = maps:put(L, Blk, Blocks0),
+ Blocks = Blocks0#{L:=Blk},
frame_deallocate(Ls, Size, Blocks);
frame_deallocate([], _, Blocks) -> Blocks.
@@ -1799,7 +2135,7 @@ frame_deallocate([], _, Blocks) -> Blocks.
turn_yregs(#st{frames=Frames,regs=Regs0,ssa=Blocks}=St) ->
Regs1 = foldl(fun(L, A) ->
- Blk = maps:get(L, Blocks),
+ Blk = map_get(L, Blocks),
FrameSize = beam_ssa:get_anno(frame_size, Blk),
Def = beam_ssa:def([L], Blocks),
[turn_yregs_1(Def, FrameSize, Regs0)|A]
@@ -1808,7 +2144,7 @@ turn_yregs(#st{frames=Frames,regs=Regs0,ssa=Blocks}=St) ->
St#st{regs=Regs}.
turn_yregs_1(Def, FrameSize, Regs) ->
- Yregs0 = [{maps:get(V, Regs),V} || V <- Def, is_yreg(maps:get(V, Regs))],
+ Yregs0 = [{map_get(V, Regs),V} || V <- Def, is_yreg(map_get(V, Regs))],
Yregs1 = rel2fam(Yregs0),
FrameSize = length(Yregs1),
Yregs2 = [{{y,FrameSize-Y-1},Vs} || {{y,Y},Vs} <- Yregs1],
@@ -1842,7 +2178,7 @@ reserve_regs(#st{args=Args,ssa=Blocks,intervals=Intervals,res=Res0}=St) ->
Res = maps:from_list(Res3),
St#st{res=reserve_xregs(Blocks, Res)}.
-reserve_arg_regs([#b_var{name=Arg}|Is], N, Acc) ->
+reserve_arg_regs([#b_var{}=Arg|Is], N, Acc) ->
reserve_arg_regs(Is, N+1, [{Arg,{x,N}}|Acc]);
reserve_arg_regs([], _, Acc) -> Acc.
@@ -1854,25 +2190,42 @@ reserve_zregs(Blocks, Intervals, Res) ->
end,
beam_ssa:fold_rpo(F, [0], Res, Blocks).
+reserve_zreg([#b_set{op=Op,dst=Dst}],
+ #b_br{bool=Dst}, _ShortLived, A) when Op =:= call;
+ Op =:= get_tuple_element ->
+ %% If type optimization has determined that the result of these
+ %% instructions can be used directly in a branch, we must avoid reserving a
+ %% z register or code generation will fail.
+ A;
reserve_zreg([#b_set{op={bif,tuple_size},dst=Dst},
- #b_set{op={bif,'=:='},args=[Dst,Val]}], _Last, ShortLived, A0) ->
- case Val of
- #b_literal{val=Arity} when Arity bsr 32 =:= 0 ->
+ #b_set{op={bif,'=:='},args=[Dst,Val]}], Last, ShortLived, A0) ->
+ case {Val,Last} of
+ {#b_literal{val=Arity},#b_br{bool=#b_var{}}} when Arity bsr 32 =:= 0 ->
%% These two instructions can be combined to a test_arity
%% instruction provided that the arity variable is short-lived.
reserve_zreg_1(Dst, ShortLived, A0);
- _ ->
+ {_,_} ->
+ %% Either the arity is too big, or the boolean value is not
+ %% used in a conditional branch.
A0
end;
reserve_zreg([#b_set{op={bif,tuple_size},dst=Dst}],
#b_switch{}, ShortLived, A) ->
reserve_zreg_1(Dst, ShortLived, A);
-reserve_zreg([#b_set{op=Op,dst=#b_var{name=Dst}}|Is], Last, ShortLived, A0) ->
+reserve_zreg([#b_set{op={bif,'xor'}}], _Last, _ShortLived, A) ->
+ %% There is no short, easy way to rewrite 'xor' to a series of
+ %% test instructions.
+ A;
+reserve_zreg([#b_set{op={bif,is_record}}], _Last, _ShortLived, A) ->
+ %% There is no short, easy way to rewrite is_record/2 to a series of
+ %% test instructions.
+ A;
+reserve_zreg([#b_set{op=Op,dst=Dst}|Is], Last, ShortLived, A0) ->
IsZReg = case Op of
- context_to_binary -> true;
bs_match_string -> true;
- bs_restore -> true;
bs_save -> true;
+ bs_restore -> true;
+ bs_set_position -> true;
{float,clearerror} -> true;
kill_try_tag -> true;
landingpad -> true;
@@ -1893,7 +2246,7 @@ reserve_zreg([], #b_br{bool=Bool}, ShortLived, A) ->
reserve_zreg_1(Bool, ShortLived, A);
reserve_zreg([], _, _, A) -> A.
-reserve_zreg_1(#b_var{name=V}, ShortLived, A) ->
+reserve_zreg_1(#b_var{}=V, ShortLived, A) ->
case cerl_sets:is_element(V, ShortLived) of
true -> [{V,z}|A];
false -> A
@@ -1906,7 +2259,7 @@ reserve_fregs(Blocks, Res) ->
end,
beam_ssa:fold_rpo(F, [0], Res, Blocks).
-reserve_freg([#b_set{op={float,Op},dst=#b_var{name=V}}|Is], Res) ->
+reserve_freg([#b_set{op={float,Op},dst=V}|Is], Res) ->
case Op of
get ->
reserve_freg(Is, Res);
@@ -1931,23 +2284,95 @@ reserve_freg([], Res) -> Res.
%% will allocate the lowest free X register for the variable.
reserve_xregs(Blocks, Res) ->
- F = fun(L, #b_blk{is=Is,last=Last}, R) ->
- {Xs0,Used0} = reserve_terminator(L, Last, Blocks, R),
- reserve_xregs_is(reverse(Is), R, Xs0, Used0)
- end,
- beam_ssa:fold_po(F, Res, Blocks).
-
-reserve_xregs_is([#b_set{op=Op,dst=#b_var{name=Dst},args=Args}=I|Is], Res0, Xs0, Used0) ->
- Xs1 = case is_gc_safe(I) of
- true ->
- Xs0;
- false ->
- %% There may be a garbage collection after executing this
- %% instruction. We will need prune the list of preferred
- %% X registers.
- res_xregs_prune(Xs0, Used0, Res0)
- end,
- Res = reserve_xreg(Dst, Xs1, Res0),
+ Ls = reverse(beam_ssa:rpo(Blocks)),
+ reserve_xregs(Ls, Blocks, #{}, Res).
+
+reserve_xregs([L|Ls], Blocks, XsMap0, Res0) ->
+ #b_blk{anno=Anno,is=Is0,last=Last} = map_get(L, Blocks),
+
+ %% Calculate mapping from variable name to the preferred
+ %% register.
+ Xs0 = reserve_terminator(L, Is0, Last, Blocks, XsMap0, Res0),
+
+ %% We need to figure out where the code generator will
+ %% place instructions that will do a garbage collection.
+ %% Insert 'gc' markers as pseudo-instructions in the
+ %% instruction sequence.
+ Is1 = reverse(Is0),
+ Is2 = res_place_gc_instrs(Is1, []),
+ Is = res_place_allocate(Anno, Is2),
+
+ %% Add register hints for variables that are defined
+ %% in the (reversed) instruction sequence.
+ {Res,Xs} = reserve_xregs_is(Is, Res0, Xs0, []),
+
+ XsMap = XsMap0#{L=>Xs},
+ reserve_xregs(Ls, Blocks, XsMap, Res);
+reserve_xregs([], _, _, Res) -> Res.
+
+%% Insert explicit 'gc' markers points where there will
+%% be a garbage collection. (Note that the instruction
+%% sequence passed to this function is reversed.)
+
+res_place_gc_instrs([#b_set{op=phi}=I|Is], Acc) ->
+ res_place_gc_instrs(Is, [I|Acc]);
+res_place_gc_instrs([#b_set{op=Op}=I|Is], Acc)
+ when Op =:= call; Op =:= make_fun ->
+ case Acc of
+ [] ->
+ res_place_gc_instrs(Is, [I|Acc]);
+ [GC|_] when GC =:= gc; GC =:= test_heap ->
+ res_place_gc_instrs(Is, [I,gc|Acc]);
+ [_|_] ->
+ res_place_gc_instrs(Is, [I,gc|Acc])
+ end;
+res_place_gc_instrs([#b_set{op=Op,args=Args}=I|Is], Acc0) ->
+ case beam_ssa_codegen:classify_heap_need(Op, Args) of
+ neutral ->
+ case Acc0 of
+ [test_heap|Acc] ->
+ res_place_gc_instrs(Is, [test_heap,I|Acc]);
+ Acc ->
+ res_place_gc_instrs(Is, [I|Acc])
+ end;
+ {put,_} ->
+ case Acc0 of
+ [test_heap|Acc] ->
+ res_place_gc_instrs(Is, [test_heap,I|Acc]);
+ Acc ->
+ res_place_gc_instrs(Is, [test_heap,I|Acc])
+ end;
+ _ ->
+ res_place_gc_instrs(Is, [gc,I|Acc0])
+ end;
+res_place_gc_instrs([], Acc) ->
+ %% Reverse and replace 'test_heap' markers with 'gc'.
+ %% (The distinction is no longer useful.)
+ res_place_gc_instrs_rev(Acc, []).
+
+res_place_gc_instrs_rev([test_heap|Is], [gc|_]=Acc) ->
+ res_place_gc_instrs_rev(Is, Acc);
+res_place_gc_instrs_rev([test_heap|Is], Acc) ->
+ res_place_gc_instrs_rev(Is, [gc|Acc]);
+res_place_gc_instrs_rev([gc|Is], [gc|_]=Acc) ->
+ res_place_gc_instrs_rev(Is, Acc);
+res_place_gc_instrs_rev([I|Is], Acc) ->
+ res_place_gc_instrs_rev(Is, [I|Acc]);
+res_place_gc_instrs_rev([], Acc) -> Acc.
+
+res_place_allocate(#{yregs:=_}, Is) ->
+ %% There will be an 'allocate' instruction inserted here.
+ Is ++ [gc];
+res_place_allocate(#{}, Is) -> Is.
+
+reserve_xregs_is([gc|Is], Res, Xs0, Used) ->
+ %% At this point, the code generator will place an instruction
+ %% that does a garbage collection. We must prune the remembered
+ %% registers.
+ Xs = res_xregs_prune(Xs0, Used, Res),
+ reserve_xregs_is(Is, Res, Xs, Used);
+reserve_xregs_is([#b_set{op=Op,dst=Dst,args=Args}=I|Is], Res0, Xs0, Used0) ->
+ Res = reserve_xreg(Dst, Xs0, Res0),
Used1 = ordsets:union(Used0, beam_ssa:used(I)),
Used = ordsets:del_element(Dst, Used1),
case Op of
@@ -1958,28 +2383,74 @@ reserve_xregs_is([#b_set{op=Op,dst=#b_var{name=Dst},args=Args}=I|Is], Res0, Xs0,
Xs = reserve_call_args(tl(Args)),
reserve_xregs_is(Is, Res, Xs, Used);
_ ->
- reserve_xregs_is(Is, Res, Xs1, Used)
+ reserve_xregs_is(Is, Res, Xs0, Used)
end;
-reserve_xregs_is([], Res, _Xs, _Used) -> Res.
-
-reserve_terminator(L, #b_br{bool=#b_literal{val=true},succ=Succ}, Blocks, Res) ->
- case maps:get(Succ, Blocks) of
+reserve_xregs_is([], Res, Xs, _Used) ->
+ {Res,Xs}.
+
+%% Pick up register hints from the successors of this blocks.
+reserve_terminator(_L, _Is, #b_br{bool=#b_var{},succ=Succ,fail=?BADARG_BLOCK},
+ _Blocks, XsMap, _Res) ->
+ %% We know that no variables are used at ?BADARG_BLOCK, so
+ %% any register hints from the success blocks are safe to use.
+ map_get(Succ, XsMap);
+reserve_terminator(L, Is, #b_br{bool=#b_var{},succ=Succ,fail=Fail},
+ Blocks, XsMap, Res) when Succ =/= Fail ->
+ #{Succ:=SuccBlk,Fail:=FailBlk} = Blocks,
+ case {SuccBlk,FailBlk} of
+ {#b_blk{is=[],last=#b_br{succ=PhiL,fail=PhiL}},
+ #b_blk{is=[],last=#b_br{succ=PhiL,fail=PhiL}}} ->
+ %% Both branches ultimately transfer to the same
+ %% block (via two blocks with no instructions).
+ %% Pick up register hints from the phi nodes
+ %% in the common block.
+ #{PhiL:=#b_blk{is=PhiIs}} = Blocks,
+ Xs = res_xregs_from_phi(PhiIs, Succ, Res, #{}),
+ res_xregs_from_phi(PhiIs, Fail, Res, Xs);
+ {_,_} when Is =/= [] ->
+ case last(Is) of
+ #b_set{op=succeeded,args=[Arg]} ->
+ %% We know that Arg will not be used at the failure
+ %% label, so we can pick up register hints from the
+ %% success label.
+ Br = #b_br{bool=#b_literal{val=true},succ=Succ,fail=Succ},
+ case reserve_terminator(L, [], Br, Blocks, XsMap, Res) of
+ #{Arg:=Reg} -> #{Arg=>Reg};
+ #{} -> #{}
+ end;
+ _ ->
+ %% Register hints from the success block may not
+ %% be safe at the failure block, and vice versa.
+ #{}
+ end;
+ {_,_} ->
+ %% Register hints from the success block may not
+ %% be safe at the failure block, and vice versa.
+ #{}
+ end;
+reserve_terminator(L, Is, #b_br{bool=#b_literal{val=true},succ=Succ},
+ Blocks, XsMap, Res) ->
+ case map_get(Succ, Blocks) of
#b_blk{is=[],last=Last} ->
- reserve_terminator(Succ, Last, Blocks, Res);
- #b_blk{is=[_|_]=Is} ->
- {res_xregs_from_phi(Is, L, Res, #{}),[]}
+ reserve_terminator(Succ, Is, Last, Blocks, XsMap, Res);
+ #b_blk{is=[_|_]=PhiIs} ->
+ res_xregs_from_phi(PhiIs, L, Res, #{})
end;
-reserve_terminator(_, Last, _, _) ->
- {#{},beam_ssa:used(Last)}.
+reserve_terminator(_, _, _, _, _, _) -> #{}.
-res_xregs_from_phi([#b_set{op=phi,dst=#b_var{name=Dst},args=Args}|Is],
+%% Pick up a reservation from a phi node.
+res_xregs_from_phi([#b_set{op=phi,dst=Dst,args=Args}|Is],
Pred, Res, Acc) ->
- case [V || {#b_var{name=V},L} <- Args, L =:= Pred] of
+ case [V || {#b_var{}=V,L} <- Args, L =:= Pred] of
[] ->
+ %% The value of the phi node for this predecessor
+ %% is a literal. Nothing to do here.
res_xregs_from_phi(Is, Pred, Res, Acc);
[V] ->
case Res of
#{Dst:={prefer,Reg}} ->
+ %% Try placing V in the same register as for
+ %% the phi node.
res_xregs_from_phi(Is, Pred, Res, Acc#{V=>Reg});
#{Dst:=_} ->
res_xregs_from_phi(Is, Pred, Res, Acc)
@@ -1990,8 +2461,8 @@ res_xregs_from_phi(_, _, _, Acc) -> Acc.
reserve_call_args(Args) ->
reserve_call_args(Args, 0, #{}).
-reserve_call_args([#b_var{name=Name}|As], X, Xs) ->
- reserve_call_args(As, X+1, Xs#{Name=>{x,X}});
+reserve_call_args([#b_var{}=Var|As], X, Xs) ->
+ reserve_call_args(As, X+1, Xs#{Var=>{x,X}});
reserve_call_args([#b_literal{}|As], X, Xs) ->
reserve_call_args(As, X+1, Xs);
reserve_call_args([], _, Xs) -> Xs.
@@ -1999,12 +2470,12 @@ reserve_call_args([], _, Xs) -> Xs.
reserve_xreg(V, Xs, Res) ->
case Res of
#{V:=_} ->
- %% Already reserved.
+ %% Already reserved (but not as an X register).
Res;
#{} ->
case Xs of
#{V:=X} ->
- %% Add a hint that a specific X register is
+ %% Add a hint that this specific X register is
%% preferred, unless it is already in use.
Res#{V=>{prefer,X}};
#{} ->
@@ -2013,23 +2484,15 @@ reserve_xreg(V, Xs, Res) ->
end
end.
-is_gc_safe(#b_set{op=phi}) ->
- false;
-is_gc_safe(#b_set{op=Op,args=Args}) ->
- case beam_ssa_codegen:classify_heap_need(Op, Args) of
- neutral -> true;
- {put,_} -> true;
- _ -> false
- end.
-
%% res_xregs_prune(PreferredRegs, Used, Res) -> PreferredRegs.
-%% Prune the list of preferred to only include X registers that
-%% are guaranteed to survice a garbage collection.
+%% Prune the list of preferred registers, to make sure that
+%% there are no "holes" (uninitialized X registers) when
+%% invoking the garbage collector.
-res_xregs_prune(Xs, Used, Res) ->
+res_xregs_prune(Xs, Used, Res) when map_size(Xs) =/= 0 ->
%% The number of safe registers is the number of the X registers
%% used after this point. The actual number of safe registers may
- %% be highter than this number, but this is a conservative safe
+ %% be higher than this number, but this is a conservative safe
%% estimate.
NumSafe = foldl(fun(V, N) ->
case Res of
@@ -2041,83 +2504,8 @@ res_xregs_prune(Xs, Used, Res) ->
%% Remove unsafe registers from the list of potential
%% preferred registers.
- maps:filter(fun(_, {x,X}) -> X < NumSafe end, Xs).
-
-%%%
-%%% Remove unsuitable aliases.
-%%%
-%%% If a binary is matched more than once, we must not put the
-%%% the match context in the same register as the binary to
-%%% avoid the following situation:
-%%%
-%%% {test,bs_start_match2,{f,3},1,[{x,0},0],{x,0}}.
-%%% .
-%%% .
-%%% .
-%%% {test,bs_start_match2,{f,6},1,[{x,0},0],{x,1}}. %% ILLEGAL!
-%%%
-%%% The second instruction is illegal because a match context source
-%%% is only allowed if source and destination registers are identical.
-%%%
-
-remove_unsuitable_aliases(#st{aliases=[_|_]=Aliases0,ssa=Blocks}=St) ->
- R = rem_unsuitable(maps:values(Blocks)),
- Unsuitable0 = [V || {V,[_,_|_]} <- rel2fam(R)],
- Unsuitable = gb_sets:from_list(Unsuitable0),
- Aliases =[P || {_,V}=P <- Aliases0,
- not gb_sets:is_member(V, Unsuitable)],
- St#st{aliases=Aliases};
-remove_unsuitable_aliases(#st{aliases=[]}=St) -> St.
-
-rem_unsuitable([#b_blk{is=Is}|Bs]) ->
- Vs = [{V,Dst} ||
- #b_set{op=bs_start_match,dst=#b_var{name=Dst},
- args=[#b_var{name=V}]} <- Is],
- Vs ++ rem_unsuitable(Bs);
-rem_unsuitable([]) -> [].
-
-%%%
-%%% Merge intervals.
-%%%
-
-merge_intervals(#st{aliases=Aliases0,intervals=Intervals0,
- res=Reserved}=St) ->
- Aliases1 = [A || A <- Aliases0,
- is_suitable_alias(A, Reserved)],
- case Aliases1 of
- [] ->
- St#st{aliases=Aliases1};
- [_|_] ->
- Intervals1 = maps:from_list(Intervals0),
- {Intervals,Aliases} =
- merge_intervals_1(Aliases1, Intervals1, []),
- St#st{aliases=Aliases,intervals=Intervals}
- end.
-
-merge_intervals_1([{Alias,V}|Vs], Intervals0, Acc) ->
- #{Alias:=Int1,V:=Int2} = Intervals0,
- Int3 = lists:merge(Int1, Int2),
- Int = merge_intervals_2(Int3),
- Intervals1 = maps:remove(Alias, Intervals0),
- Intervals = Intervals1#{V:=Int},
- merge_intervals_1(Vs, Intervals, [{Alias,V}|Acc]);
-merge_intervals_1([], Intervals, Acc) ->
- {maps:to_list(Intervals),Acc}.
-
-merge_intervals_2([{A1,B1},{A2,B2}|Is]) when A2 =< B1 ->
- merge_intervals_2([{min(A1, A2),max(B1, B2)}|Is]);
-merge_intervals_2([{_A1,B1}=R|[{A2,_B2}|_]=Is]) when B1 < A2 ->
- [R|merge_intervals_2(Is)];
-merge_intervals_2([_]=Is) -> Is.
-
-is_suitable_alias({V1,V2}, Reserved) ->
- #{V1:=Res1,V2:=Res2} = Reserved,
- case {Res1,Res2} of
- {x,x} -> true;
- {x,{x,_}} -> true;
- {{x,_},x} -> true;
- {_,_} -> false
- end.
+ maps:filter(fun(_, {x,X}) -> X < NumSafe end, Xs);
+res_xregs_prune(Xs, _Used, _Res) -> Xs.
%%%
%%% Register allocation using linear scan.
@@ -2152,7 +2540,13 @@ linear_scan(#st{intervals=Intervals0,res=Res}=St0) ->
Free = init_free(maps:to_list(Res)),
Intervals1 = [init_interval(Int, Res) || Int <- Intervals0],
Intervals = sort(Intervals1),
- IsReserved = fun (#i{reg=Reg}) -> Reg =/= none end,
+ IsReserved = fun(#i{reg=Reg}) ->
+ case Reg of
+ none -> false;
+ {prefer,{_,_}} -> false;
+ {_,_} -> true
+ end
+ end,
{UnhandledRes,Unhandled} = partition(IsReserved, Intervals),
L = #l{unhandled_res=UnhandledRes,
unhandled_any=Unhandled,free=Free},
@@ -2160,7 +2554,7 @@ linear_scan(#st{intervals=Intervals0,res=Res}=St0) ->
St#st{regs=maps:from_list(Regs)}.
init_interval({V,[{Start,_}|_]=Rs}, Res) ->
- Info = maps:get(V, Res),
+ Info = map_get(V, Res),
Pool = case Info of
{prefer,{x,_}} -> x;
x -> x;
@@ -2361,16 +2755,16 @@ free_reg(#i{reg={_,_}=Reg}=I, L) ->
update_pool(I, FreeRegs, L).
get_pool(#i{pool=Pool}, #l{free=Free}) ->
- maps:get(Pool, Free).
+ map_get(Pool, Free).
update_pool(#i{pool=Pool}, New, #l{free=Free0}=L) ->
- Free = maps:put(Pool, New, Free0),
+ Free = Free0#{Pool:=New},
L#l{free=Free}.
get_next_free(#i{pool=Pool}, #l{free=Free0}=L0) ->
K = {next,Pool},
- N = maps:get(K, Free0),
- Free = maps:put(K, N+1, Free0),
+ N = map_get(K, Free0),
+ Free = Free0#{K:=N+1},
L = L0#l{free=Free},
if
is_integer(Pool) -> {{y,N},L};
@@ -2406,7 +2800,7 @@ are_overlapping_1({_,_}, []) -> false.
is_loop_header(L, Blocks) ->
%% We KNOW that a loop header must start with a peek_message
%% instruction.
- case maps:get(L, Blocks) of
+ case map_get(L, Blocks) of
#b_blk{is=[#b_set{op=peek_message}|_]} -> true;
_ -> false
end.
@@ -2417,21 +2811,21 @@ rel2fam(S0) ->
sofs:to_external(S).
split_phis(Is) ->
- partition(fun(#b_set{op=Op}) -> Op =:= phi end, Is).
+ splitwith(fun(#b_set{op=Op}) -> Op =:= phi end, Is).
is_yreg({y,_}) -> true;
is_yreg({x,_}) -> false;
is_yreg({z,_}) -> false;
is_yreg({fr,_}) -> false.
-new_var_names([V0|Vs0], Count0) ->
- {V,Count1} = new_var_name(V0, Count0),
- {Vs,Count} = new_var_names(Vs0, Count1),
+new_vars([Base|Vs0], Count0) ->
+ {V,Count1} = new_var(Base, Count0),
+ {Vs,Count} = new_vars(Vs0, Count1),
{[V|Vs],Count};
-new_var_names([], Count) -> {[],Count}.
+new_vars([], Count) -> {[],Count}.
-new_var_name({Base,Int}, Count) ->
+new_var({Base,Int}, Count) ->
true = is_integer(Int), %Assertion.
- {{Base,Count},Count+1};
-new_var_name(Base, Count) ->
- {{Base,Count},Count+1}.
+ {#b_var{name={Base,Count}},Count+1};
+new_var(Base, Count) ->
+ {#b_var{name={Base,Count}},Count+1}.
diff --git a/lib/compiler/src/beam_ssa_recv.erl b/lib/compiler/src/beam_ssa_recv.erl
index 82fe006487..1e0e1ecac2 100644
--- a/lib/compiler/src/beam_ssa_recv.erl
+++ b/lib/compiler/src/beam_ssa_recv.erl
@@ -101,7 +101,7 @@ opt([{L,#b_blk{is=[#b_set{op=peek_message}|_]}=Blk0}|Bs], Blocks0, Preds) ->
case recv_opt(Preds, L, Blocks0) of
{yes,Blocks1} ->
Blk = beam_ssa:add_anno(recv_set, L, Blk0),
- Blocks = maps:put(L, Blk, Blocks1),
+ Blocks = Blocks1#{L:=Blk},
opt(Bs, Blocks, []);
no ->
opt(Bs, Blocks0, [])
@@ -111,11 +111,11 @@ opt([{L,_}|Bs], Blocks, Preds) ->
opt([], Blocks, _) -> Blocks.
recv_opt([L|Ls], RecvLbl, Blocks) ->
- #b_blk{is=Is0} = Blk0 = maps:get(L, Blocks),
+ #b_blk{is=Is0} = Blk0 = map_get(L, Blocks),
case recv_opt_is(Is0, RecvLbl, Blocks, []) of
{yes,Is} ->
Blk = Blk0#b_blk{is=Is},
- {yes,maps:put(L, Blk, Blocks)};
+ {yes,Blocks#{L:=Blk}};
no ->
recv_opt(Ls, RecvLbl, Blocks)
end;
@@ -138,7 +138,7 @@ recv_opt_is([I|Is], RecvLbl, Blocks, Acc) ->
recv_opt_is(Is, RecvLbl, Blocks, [I|Acc]);
recv_opt_is([], _, _, _) -> no.
-makes_ref(#b_set{dst=#b_var{name=Dst},args=[Func0|_]}, Blocks) ->
+makes_ref(#b_set{dst=Dst,args=[Func0|_]}, Blocks) ->
Func = case Func0 of
#b_remote{mod=#b_literal{val=erlang},
name=#b_literal{val=Name},arity=A0} ->
@@ -158,15 +158,15 @@ makes_ref(#b_set{dst=#b_var{name=Dst},args=[Func0|_]}, Blocks) ->
end.
ref_in_tuple(Tuple, Blocks) ->
- F = fun(#b_set{op=get_tuple_element,dst=#b_var{name=Ref},
- args=[#b_var{name=Tup},#b_literal{val=1}]}, no)
+ F = fun(#b_set{op=get_tuple_element,dst=Ref,
+ args=[#b_var{}=Tup,#b_literal{val=1}]}, no)
when Tup =:= Tuple -> {yes,Ref};
(_, A) -> A
end,
beam_ssa:fold_instrs_rpo(F, [0], no, Blocks).
opt_ref_used(RecvLbl, Ref, Blocks) ->
- Vs = #{{var,Ref}=>ref,ref=>Ref,ref_matched=>false},
+ Vs = #{Ref=>ref,ref=>Ref,ref_matched=>false},
case opt_ref_used_1(RecvLbl, Vs, Blocks) of
used -> true;
not_used -> false;
@@ -174,7 +174,7 @@ opt_ref_used(RecvLbl, Ref, Blocks) ->
end.
opt_ref_used_1(L, Vs0, Blocks) ->
- #b_blk{is=Is} = Blk = maps:get(L, Blocks),
+ #b_blk{is=Is} = Blk = map_get(L, Blocks),
case opt_ref_used_is(Is, Vs0) of
#{}=Vs ->
opt_ref_used_last(Blk, Vs, Blocks);
@@ -182,10 +182,10 @@ opt_ref_used_1(L, Vs0, Blocks) ->
Result
end.
-opt_ref_used_is([#b_set{op=peek_message,dst=#b_var{name=M}}|Is], Vs0) ->
- Vs = Vs0#{{var,M}=>message},
+opt_ref_used_is([#b_set{op=peek_message,dst=Msg}|Is], Vs0) ->
+ Vs = Vs0#{Msg=>message},
opt_ref_used_is(Is, Vs);
-opt_ref_used_is([#b_set{op={bif,Bif},args=Args,dst=#b_var{name=B}}=I|Is],
+opt_ref_used_is([#b_set{op={bif,Bif},args=Args,dst=Dst}=I|Is],
Vs0) ->
S = case Bif of
'=:=' -> true;
@@ -199,7 +199,7 @@ opt_ref_used_is([#b_set{op={bif,Bif},args=Args,dst=#b_var{name=B}}=I|Is],
Bool when is_boolean(Bool) ->
case is_ref_msg_comparison(Args, Vs0) of
true ->
- Vs = Vs0#{B=>{is_ref,Bool}},
+ Vs = Vs0#{Dst=>{is_ref,Bool}},
opt_ref_used_is(Is, Vs);
false ->
opt_ref_used_is(Is, Vs0)
@@ -225,7 +225,7 @@ opt_ref_used_is([], Vs) -> Vs.
opt_ref_used_last(#b_blk{last=Last}=Blk, Vs, Blocks) ->
case Last of
- #b_br{bool=#b_var{name=Bool},succ=Succ,fail=Fail} ->
+ #b_br{bool=#b_var{}=Bool,succ=Succ,fail=Fail} ->
case Vs of
#{Bool:={is_ref,Matched}} ->
ref_used_in([{Succ,Vs#{ref_matched:=Matched}},
@@ -252,17 +252,16 @@ ref_used_in([{L,Vs0}|Ls], Blocks) ->
end;
ref_used_in([], _) -> done.
-update_vars(#b_set{args=Args,dst=#b_var{name=B}}, Vs) ->
- Vars = [V || #b_var{name=V} <- Args],
- All = all(fun(V) ->
- Var = {var,V},
+update_vars(#b_set{args=Args,dst=Dst}, Vs) ->
+ Vars = [V || #b_var{}=V <- Args],
+ All = all(fun(Var) ->
case Vs of
#{Var:=message} -> true;
#{} -> false
end
end, Vars),
case All of
- true -> Vs#{{var,B}=>message};
+ true -> Vs#{Dst=>message};
false -> Vs
end.
@@ -270,9 +269,7 @@ update_vars(#b_set{args=Args,dst=#b_var{name=B}}, Vs) ->
%% Return 'true' if Args denotes a comparison between the
%% reference and message or part of the message.
-is_ref_msg_comparison([#b_var{name=A1},#b_var{name=A2}], Vs) ->
- V1 = {var,A1},
- V2 = {var,A2},
+is_ref_msg_comparison([#b_var{}=V1,#b_var{}=V2], Vs) ->
case Vs of
#{V1:=ref,V2:=message} -> true;
#{V1:=message,V2:=ref} -> true;
diff --git a/lib/compiler/src/beam_ssa_share.erl b/lib/compiler/src/beam_ssa_share.erl
new file mode 100644
index 0000000000..426efa2cc9
--- /dev/null
+++ b/lib/compiler/src/beam_ssa_share.erl
@@ -0,0 +1,370 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 2018. All Rights Reserved.
+%%
+%% Licensed under the Apache License, Version 2.0 (the "License");
+%% you may not use this file except in compliance with the License.
+%% You may obtain a copy of the License at
+%%
+%% http://www.apache.org/licenses/LICENSE-2.0
+%%
+%% Unless required by applicable law or agreed to in writing, software
+%% distributed under the License is distributed on an "AS IS" BASIS,
+%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+%% See the License for the specific language governing permissions and
+%% limitations under the License.
+%%
+%% %CopyrightEnd%
+%%
+
+%%
+%% Share code for semantically equivalent blocks referred to
+%% to by `br` and `switch` instructions.
+%%
+%% A similar optimization is done in beam_jump, but doing it here as
+%% well is beneficial as it may enable other optimizations. If there
+%% are many semantically equivalent clauses, this optimization can
+%% substanstially decrease compilation times.
+%%
+%% block/2 is called from the liveness optimization pass in
+%% beam_ssa_opt, as code sharing helps the liveness pass and vice
+%% versa.
+%%
+
+-module(beam_ssa_share).
+-export([module/2,block/2]).
+
+-include("beam_ssa.hrl").
+
+-import(lists, [keyfind/3,reverse/1,sort/1]).
+
+-spec module(beam_ssa:b_module(), [compile:option()]) ->
+ {'ok',beam_ssa:b_module()}.
+
+module(#b_module{body=Fs0}=Module, _Opts) ->
+ Fs = [function(F) || F <- Fs0],
+ {ok,Module#b_module{body=Fs}}.
+
+-spec block(Blk0, Blocks0) -> Blk when
+ Blk0 :: beam_ssa:b_blk(),
+ Blocks0 :: beam_ssa:block_map(),
+ Blk :: beam_ssa:b_blk().
+
+block(#b_blk{last=Last0}=Blk, Blocks) ->
+ case share_terminator(Last0, Blocks) of
+ none -> Blk;
+ Last -> Blk#b_blk{last=beam_ssa:normalize(Last)}
+ end.
+
+%%%
+%%% Local functions.
+%%%
+
+function(#b_function{anno=Anno,bs=Blocks0}=F) ->
+ try
+ PO = reverse(beam_ssa:rpo(Blocks0)),
+ {Blocks1,Changed} = blocks(PO, Blocks0, false),
+ Blocks = case Changed of
+ true ->
+ beam_ssa:trim_unreachable(Blocks1);
+ false ->
+ Blocks0
+ end,
+ F#b_function{bs=Blocks}
+ catch
+ Class:Error:Stack ->
+ #{func_info:={_,Name,Arity}} = Anno,
+ io:fwrite("Function: ~w/~w\n", [Name,Arity]),
+ erlang:raise(Class, Error, Stack)
+ end.
+
+blocks([L|Ls], Blocks, Changed) ->
+ #b_blk{last=Last0} = Blk0 = map_get(L, Blocks),
+ case block(Blk0, Blocks) of
+ #b_blk{last=Last0} ->
+ blocks(Ls, Blocks, Changed);
+ #b_blk{}=Blk ->
+ blocks(Ls, Blocks#{L:=Blk}, true)
+ end;
+blocks([], Blocks, Changed) ->
+ {Blocks,Changed}.
+
+share_terminator(#b_br{bool=#b_var{},succ=Succ0,fail=Fail0}=Br, Blocks) ->
+ {Succ,SuccBlk} = shortcut_nonempty_block(Succ0, Blocks),
+ {Fail,FailBlk} = shortcut_nonempty_block(Fail0, Blocks),
+ case are_equivalent(Succ, SuccBlk, Fail, FailBlk, Blocks) of
+ true ->
+ %% The blocks are semantically equivalent.
+ Br#b_br{succ=Succ,fail=Succ};
+ false ->
+ if
+ Succ =:= Succ0, Fail =:= Fail0 ->
+ %% None of blocks were cut short.
+ none;
+ true ->
+ %% One or both labels were cut short
+ %% to avoid jumping to an empty block.
+ Br#b_br{succ=Succ,fail=Fail}
+ end
+ end;
+share_terminator(#b_switch{}=Sw, Blocks) ->
+ share_switch(Sw, Blocks);
+share_terminator(_Last, _Blocks) -> none.
+
+%% Test whether the two blocks are semantically equivalent. This
+%% function is specially optimized to return `false` as fast as
+%% possible if the blocks are not equivalent, as that is the common
+%% case.
+
+are_equivalent(_Succ, _, ?BADARG_BLOCK, _, _Blocks) ->
+ %% ?BADARG_BLOCK is special. Sharing could be incorrect.
+ false;
+are_equivalent(_Succ, #b_blk{is=Is1,last=#b_ret{arg=RetVal1}=Ret1},
+ _Fail, #b_blk{is=Is2,last=#b_ret{arg=RetVal2}=Ret2}, _Blocks) ->
+ case {RetVal1,RetVal2} of
+ {#b_literal{},#b_literal{}} ->
+ case RetVal1 =:= RetVal2 of
+ true ->
+ %% The return values are identical literals. We
+ %% only need to compare the canonicalized bodies.
+ Can1 = canonical_is(Is1),
+ Can2 = canonical_is(Is2),
+ Can1 =:= Can2;
+ false ->
+ %% Non-equal literals.
+ false
+ end;
+ {#b_var{},#b_var{}} ->
+ %% The return values are varibles. We must canonicalize
+ %% the blocks (including returns) and compare them.
+ Can1 = canonical_is(Is1 ++ [Ret1]),
+ Can2 = canonical_is(Is2 ++ [Ret2]),
+ Can1 =:= Can2;
+ {_,_} ->
+ %% One literal and one variable.
+ false
+ end;
+are_equivalent(Succ,
+ #b_blk{is=Is1,
+ last=#b_br{bool=#b_literal{val=true},
+ succ=Target}},
+ Fail,
+ #b_blk{is=Is2,
+ last=#b_br{bool=#b_literal{val=true},
+ succ=Target}},
+ Blocks) ->
+ %% Both blocks end with an unconditional branch to the
+ %% same target block. If the target block has phi nodes,
+ %% we must pick up the values from the phi nodes and
+ %% compare them.
+ #b_blk{is=Is} = map_get(Target, Blocks),
+ Phis1 = canonical_terminator_phis(Is, Succ),
+ Phis2 = canonical_terminator_phis(Is, Fail),
+ case {Phis1,Phis2} of
+ {[#b_set{args=[#b_literal{}]}|_],_} when Phis1 =/= Phis2 ->
+ %% Different values are used in the phi nodes.
+ false;
+ {_,[#b_set{args=[#b_literal{}]}|_]} when Phis1 =/= Phis2 ->
+ %% Different values are used in the phi nodes.
+ false;
+ {_,_} ->
+ %% The values in the phi nodes are variables or identical
+ %% literals. We must canonicalize the blocks and compare
+ %% them.
+ Can1 = canonical_is(Is1 ++ Phis1),
+ Can2 = canonical_is(Is2 ++ Phis2),
+ Can1 =:= Can2
+ end;
+are_equivalent(Succ0, #b_blk{is=Is1,last=#b_br{bool=#b_var{},fail=Same}},
+ Fail0, #b_blk{is=Is2,last=#b_br{bool=#b_var{},fail=Same}},
+ Blocks) ->
+ %% Two-way branches with identical failure labels. First compare the
+ %% canonicalized bodies of the blocks.
+ case canonical_is(Is1) =:= canonical_is(Is2) of
+ false ->
+ %% Different bodies.
+ false;
+ true ->
+ %% Bodies were equal. That is fairly uncommon, so to keep
+ %% the code simple we will rewrite the `br` to a `switch`
+ %% and let share_switch/2 do the work of following the
+ %% branches.
+ Sw = #b_switch{arg=#b_var{name=not_used},fail=Fail0,
+ list=[{#b_literal{},Succ0}]},
+ #b_switch{fail=Fail,list=[{_,Succ}]} = share_switch(Sw, Blocks),
+ Fail =:= Succ
+ end;
+are_equivalent(_, _, _, _, _) -> false.
+
+share_switch(#b_switch{fail=Fail0,list=List0}=Sw, Blocks) ->
+ Prep = share_prepare_sw([{value,Fail0}|List0], Blocks, 0, []),
+ Res = do_share_switch(Prep, Blocks, []),
+ [{_,Fail}|List] = [VL || {_,VL} <- sort(Res)],
+ Sw#b_switch{fail=Fail,list=List}.
+
+share_prepare_sw([{V,L0}|T], Blocks, N, Acc) ->
+ {L,_Blk} = shortcut_nonempty_block(L0, Blocks),
+ share_prepare_sw(T, Blocks, N+1, [{{L,#{}},{N,{V,L}}}|Acc]);
+share_prepare_sw([], _, _, Acc) -> Acc.
+
+do_share_switch(Prep, Blocks, Acc) ->
+ Map = share_switch_1(Prep, Blocks, #{}),
+ share_switch_2(maps:values(Map), Blocks, Acc).
+
+share_switch_1([{Next0,Res}|T], Blocks, Map) ->
+ {Can,Next} = canonical_block(Next0, Blocks),
+ case Map of
+ #{Can:=Ls} ->
+ share_switch_1(T, Blocks, Map#{Can:=[{Next,Res}|Ls]});
+ #{} ->
+ share_switch_1(T, Blocks, Map#{Can=>[{Next,Res}]})
+ end;
+share_switch_1([], _Blocks, Map) -> Map.
+
+share_switch_2([[{_,{N,Res}}]|T], Blocks, Acc) ->
+ %% This block is not equivalent to any other block.
+ share_switch_2(T, Blocks, [{N,Res}|Acc]);
+share_switch_2([[{done,{_,{_,Common}}}|_]=Eqs|T], Blocks, Acc0) ->
+ %% Two or more blocks are semantically equivalent, and all blocks
+ %% are either terminated with a `ret` or a `br` to the same target
+ %% block. Replace the labels in the `switch` for all of those
+ %% blocks with the label for the first of the blocks.
+ Acc = [{N,{V,Common}} || {done,{N,{V,_}}} <- Eqs] ++ Acc0,
+ share_switch_2(T, Blocks, Acc);
+share_switch_2([[{_,_}|_]=Prep|T], Blocks, Acc0) ->
+ %% Two or more blocks are semantically equivalent, but they have
+ %% different successful successor blocks. Now we must check
+ %% recursively whether the successor blocks are equivalent too.
+ Acc = do_share_switch(Prep, Blocks, Acc0),
+ share_switch_2(T, Blocks, Acc);
+share_switch_2([], _, Acc) -> Acc.
+
+canonical_block({L,VarMap0}, Blocks) ->
+ #b_blk{is=Is,last=Last0} = map_get(L, Blocks),
+ case canonical_terminator(L, Last0, Blocks) of
+ none ->
+ %% The block has a terminator that we don't handle.
+ {{none,L},done};
+ {Last,done} ->
+ %% The block ends with a `ret` or an unconditional `br` to
+ %% another block.
+ {Can,_VarMap} = canonical_is(Is ++ Last, VarMap0, []),
+ {Can,done};
+ {Last,Next} ->
+ %% The block ends with a conditional branch.
+ {Can,VarMap} = canonical_is(Is ++ Last, VarMap0, []),
+ {Can,{Next,VarMap}}
+ end.
+
+%% Translate a sequence of instructions to a canonical representation. If the
+%% canonical representation of two blocks compare equal, the blocks are
+%% semantically equivalent. The following translations are done:
+%%
+%% * Variables defined in the instruction sequence are replaced with
+%% {var,0}, {var,1}, and so on. Free variables are not changed.
+%%
+%% * `location` annotations that would produce a `line` instruction are
+%% kept. All other annotations are cleared.
+%%
+%% * Instructions are repackaged into tuples instead of into the
+%% usual records. The main reason is to avoid violating the types for
+%% the SSA records. We can simplify things a little by linking the
+%% instructions directly instead of putting them into a list.
+
+canonical_is(Is) ->
+ {Can,_} = canonical_is(Is, #{}, []),
+ Can.
+
+canonical_is([#b_set{op=Op,dst=Dst,args=Args0}=I|Is], VarMap0, Acc) ->
+ Args = [canonical_arg(Arg, VarMap0) || Arg <-Args0],
+ Var = {var,map_size(VarMap0)},
+ VarMap = VarMap0#{Dst=>Var},
+ LineAnno = case Op of
+ bs_match ->
+ %% The location annotation for a bs_match instruction
+ %% is only used in warnings, never to emit a `line`
+ %% instruction. Therefore, it should not be included.
+ [];
+ _ ->
+ %% The location annotation will be used in a `line`
+ %% instruction. It must be included.
+ beam_ssa:get_anno(location, I, none)
+ end,
+ canonical_is(Is, VarMap, {Op,LineAnno,Var,Args,Acc});
+canonical_is([#b_ret{arg=Arg}], VarMap, Acc0) ->
+ Acc1 = case Acc0 of
+ {call,_Anno,Var,[#b_local{}|_]=Args,PrevAcc} ->
+ %% This is a tail-recursive call to a local function.
+ %% There will be no line instruction generated;
+ %% thus, the annotation is not significant.
+ {call,[],Var,Args,PrevAcc};
+ _ ->
+ Acc0
+ end,
+ {{ret,canonical_arg(Arg, VarMap),Acc1},VarMap};
+canonical_is([#b_br{bool=#b_var{},fail=Fail}], VarMap, Acc) ->
+ {{br,succ,Fail,Acc},VarMap};
+canonical_is([#b_br{succ=Succ}], VarMap, Acc) ->
+ {{br,Succ,Acc},VarMap};
+canonical_is([], VarMap, Acc) ->
+ {Acc,VarMap}.
+
+canonical_terminator(_L, #b_ret{}=Ret, _Blocks) ->
+ {[Ret],done};
+canonical_terminator(L, #b_br{bool=#b_literal{val=true},succ=Succ}=Br, Blocks) ->
+ #b_blk{is=Is} = map_get(Succ, Blocks),
+ case canonical_terminator_phis(Is, L) of
+ [] ->
+ {[],Succ};
+ [_|_]=Phis ->
+ {Phis ++ [Br],done}
+ end;
+canonical_terminator(_L, #b_br{bool=#b_var{},succ=Succ}=Br, _Blocks) ->
+ {[Br],Succ};
+canonical_terminator(_, _, _) -> none.
+
+canonical_terminator_phis([#b_set{op=phi,args=PhiArgs}=Phi|Is], L) ->
+ {Value,L} = keyfind(L, 2, PhiArgs),
+ [Phi#b_set{op=copy,args=[Value]}|canonical_terminator_phis(Is, L)];
+canonical_terminator_phis([#b_set{op=peek_message}=I|_], L) ->
+ %% We could get stuck into an infinite loop if we allowed the
+ %% comparisons to continue into this block. Force an unequal
+ %% compare with all other predecessors of this block.
+ [I#b_set{op=copy,args=[#b_literal{val=L}]}];
+canonical_terminator_phis(_, _) -> [].
+
+canonical_arg(#b_var{}=Var, VarMap) ->
+ case VarMap of
+ #{Var:=CanonicalVar} ->
+ CanonicalVar;
+ #{} ->
+ Var
+ end;
+canonical_arg(#b_remote{mod=Mod,name=Name}, VarMap) ->
+ {remote,canonical_arg(Mod, VarMap),
+ canonical_arg(Name, VarMap)};
+canonical_arg(Other, _VarMap) -> Other.
+
+%% Shortcut branches to empty blocks if safe.
+
+shortcut_nonempty_block(L, Blocks) ->
+ case map_get(L, Blocks) of
+ #b_blk{is=[],last=#b_br{bool=#b_literal{val=true},succ=Succ}}=Blk ->
+ %% This block is empty.
+ case is_forbidden(Succ, Blocks) of
+ false ->
+ shortcut_nonempty_block(Succ, Blocks);
+ true ->
+ {L,Blk}
+ end;
+ #b_blk{}=Blk ->
+ {L,Blk}
+ end.
+
+is_forbidden(L, Blocks) ->
+ case map_get(L, Blocks) of
+ #b_blk{is=[#b_set{op=phi}|_]} -> true;
+ #b_blk{is=[#b_set{op=peek_message}|_]} -> true;
+ #b_blk{} -> false
+ end.
diff --git a/lib/compiler/src/beam_ssa_type.erl b/lib/compiler/src/beam_ssa_type.erl
index e5f15da836..c01ea4af91 100644
--- a/lib/compiler/src/beam_ssa_type.erl
+++ b/lib/compiler/src/beam_ssa_type.erl
@@ -19,16 +19,23 @@
%%
-module(beam_ssa_type).
--export([opt/2]).
+-export([opt_start/4, opt_continue/4, opt_finish/3]).
--include("beam_ssa.hrl").
--import(lists, [any/2,droplast/1,foldl/3,last/1,member/2,
- reverse/1,search/2,sort/1]).
+-include("beam_ssa_opt.hrl").
+-import(lists, [all/2,any/2,droplast/1,foldl/3,last/1,member/2,
+ keyfind/3,partition/2,reverse/1,reverse/2,
+ seq/2,sort/1,split/2]).
-define(UNICODE_INT, #t_integer{elements={0,16#10FFFF}}).
--record(d, {ds :: #{beam_ssa:var_name():=beam_ssa:b_set()},
- ls :: #{beam_ssa:label():=type_db()}}).
+-record(d,
+ {ds :: #{beam_ssa:b_var():=beam_ssa:b_set()},
+ ls :: #{beam_ssa:label():=type_db()},
+ once :: cerl_sets:set(beam_ssa:b_var()),
+ func_id :: func_id(),
+ func_db :: func_info_db(),
+ sub = #{} :: #{beam_ssa:b_var():=beam_ssa:value()},
+ ret_type = [] :: [type()]}).
-define(ATOM_SET_SIZE, 5).
@@ -38,121 +45,462 @@
-record(t_bs_match, {type :: type()}).
-record(t_tuple, {size=0 :: integer(),
exact=false :: boolean(),
- elements=[] :: [any()]
- }).
+ %% Known element types (1-based index), unknown elements are
+ %% are assumed to be 'any'.
+ elements=#{} :: #{ non_neg_integer() => type() }}).
-type type() :: 'any' | 'none' |
#t_atom{} | #t_integer{} | #t_bs_match{} | #t_tuple{} |
- {'binary',pos_integer()} | 'cons' | 'float' | 'list' | 'map' | 'nil' |'number'.
+ {'binary',pos_integer()} | 'cons' | 'float' | 'list' | 'map' | 'nil' | 'number'.
-type type_db() :: #{beam_ssa:var_name():=type()}.
--spec opt([{Label0,Block0}], Args) -> [{Label,Block}] when
- Label0 :: beam_ssa:label(),
- Block0 :: beam_ssa:b_blk(),
+-spec opt_start(Linear, Args, Anno, FuncDb) -> {Linear, FuncDb} when
+ Linear :: [{non_neg_integer(), beam_ssa:b_blk()}],
Args :: [beam_ssa:b_var()],
- Label :: beam_ssa:label(),
- Block :: beam_ssa:b_blk().
+ Anno :: beam_ssa:anno(),
+ FuncDb :: func_info_db().
+opt_start(Linear, Args, Anno, FuncDb) ->
+ %% This is the first run through the module, so our arg_types can be
+ %% incomplete as we may not have visited all call sites at least once.
+ Ts = maps:from_list([{V,any} || #b_var{}=V <- Args]),
+ opt_continue_1(Linear, Args, get_func_id(Anno), Ts, FuncDb).
+
+-spec opt_continue(Linear, Args, Anno, FuncDb) -> {Linear, FuncDb} when
+ Linear :: [{non_neg_integer(), beam_ssa:b_blk()}],
+ Args :: [beam_ssa:b_var()],
+ Anno :: beam_ssa:anno(),
+ FuncDb :: func_info_db().
+opt_continue(Linear, Args, Anno, FuncDb) ->
+ Id = get_func_id(Anno),
+ case FuncDb of
+ #{ Id := #func_info{exported=false,arg_types=ArgTypes} } ->
+ %% This is a local function and we're guaranteed to have visited
+ %% every call site at least once, so we know that the parameter
+ %% types are at least as narrow as the join of all argument types.
+ Ts = join_arg_types(Args, ArgTypes, Anno),
+ opt_continue_1(Linear, Args, Id, Ts, FuncDb);
+ #{} ->
+ %% We can't infer the parameter types of exported functions, nor
+ %% the ones where module-level optimization is disabled, but
+ %% running the pass again could still help other functions.
+ Ts = maps:from_list([{V,any} || #b_var{}=V <- Args]),
+ opt_continue_1(Linear, Args, Id, Ts, FuncDb)
+ end.
-opt(Linear, Args) ->
- Ts = maps:from_list([{V,any} || #b_var{name=V} <- Args]),
+join_arg_types(Args, ArgTypes, Anno) ->
+ %% We suppress type optimization for parameters that have already been
+ %% optimized by another pass, as they may have done things we have no idea
+ %% how to interpret and running them over could generate incorrect code.
+ ParamTypes = maps:get(parameter_type_info, Anno, #{}),
+ Ts0 = join_arg_types_1(Args, ArgTypes, #{}),
+ maps:fold(fun(Arg, _V, Ts) ->
+ maps:put(Arg, any, Ts)
+ end, Ts0, ParamTypes).
+
+join_arg_types_1([Arg | Args], [TM | TMs], Ts) when map_size(TM) =/= 0 ->
+ join_arg_types_1(Args, TMs, Ts#{ Arg => join(maps:values(TM))});
+join_arg_types_1([Arg | Args], [_TM | TMs], Ts) ->
+ join_arg_types_1(Args, TMs, Ts#{ Arg => any });
+join_arg_types_1([], [], Ts) ->
+ Ts.
+
+-spec opt_continue_1(Linear, Args, Id, Ts, FuncDb) -> Result when
+ Linear :: [{non_neg_integer(), beam_ssa:b_blk()}],
+ Args :: [beam_ssa:b_var()],
+ Id :: func_id(),
+ Ts :: type_db(),
+ FuncDb :: func_info_db(),
+ Result :: {Linear, FuncDb}.
+opt_continue_1(Linear0, Args, Id, Ts, FuncDb0) ->
+ UsedOnce = used_once(Linear0, Args),
FakeCall = #b_set{op=call,args=[#b_remote{mod=#b_literal{val=unknown},
name=#b_literal{val=unknown},
arity=0}]},
- Defs = maps:from_list([{V,FakeCall#b_set{dst=Var}} ||
- #b_var{name=V}=Var <- Args]),
- D = #d{ds=Defs,ls=#{0=>Ts}},
- opt_1(Linear, D).
+ Defs = maps:from_list([{Var,FakeCall#b_set{dst=Var}} ||
+ #b_var{}=Var <- Args]),
+
+ D = #d{ func_db=FuncDb0,
+ func_id=Id,
+ ds=Defs,
+ ls=#{0=>Ts,?BADARG_BLOCK=>#{}},
+ once=UsedOnce },
+
+ {Linear, FuncDb, NewRet} = opt(Linear0, D, []),
+
+ case FuncDb of
+ #{ Id := Entry0 } ->
+ Entry = Entry0#func_info{ret_type=NewRet},
+ {Linear, FuncDb#{ Id := Entry }};
+ #{} ->
+ %% Module-level optimizations have been turned off for this
+ %% function.
+ {Linear, FuncDb}
+ end.
+
+-spec opt_finish(Args, Anno, FuncDb) -> {Anno, FuncDb} when
+ Args :: [beam_ssa:b_var()],
+ Anno :: beam_ssa:anno(),
+ FuncDb :: func_info_db().
+opt_finish(Args, Anno, FuncDb) ->
+ Id = get_func_id(Anno),
+ case FuncDb of
+ #{ Id := #func_info{exported=false,arg_types=ArgTypes} } ->
+ ParamInfo0 = maps:get(parameter_type_info, Anno, #{}),
+ ParamInfo = opt_finish_1(Args, ArgTypes, ParamInfo0),
+ {Anno#{ parameter_type_info => ParamInfo }, FuncDb};
+ #{} ->
+ {Anno, FuncDb}
+ end.
+
+opt_finish_1([Arg | Args], [TypeMap | TypeMaps], ParamInfo)
+ when is_map_key(Arg, ParamInfo); %% See join_arg_types/3
+ map_size(TypeMap) =:= 0 ->
+ opt_finish_1(Args, TypeMaps, ParamInfo);
+opt_finish_1([Arg | Args], [TypeMap | TypeMaps], ParamInfo0) ->
+ case join(maps:values(TypeMap)) of
+ any ->
+ opt_finish_1(Args, TypeMaps, ParamInfo0);
+ JoinedType ->
+ JoinedType = verified_type(JoinedType),
+ ParamInfo = ParamInfo0#{ Arg => validator_anno(JoinedType) },
+ opt_finish_1(Args, TypeMaps, ParamInfo)
+ end;
+opt_finish_1([], [], ParamInfo) ->
+ ParamInfo.
+
+validator_anno(#t_tuple{size=Size,exact=Exact,elements=Elements0}) ->
+ Elements = maps:fold(fun(Index, Type, Acc) ->
+ Key = beam_validator:type_anno(integer, Index),
+ Acc#{ Key => validator_anno(Type) }
+ end, #{}, Elements0),
+ beam_validator:type_anno(tuple, Size, Exact, Elements);
+validator_anno(#t_integer{elements={Same,Same}}) ->
+ beam_validator:type_anno(integer, Same);
+validator_anno(#t_integer{}) ->
+ beam_validator:type_anno(integer);
+validator_anno(float) ->
+ beam_validator:type_anno(float);
+validator_anno(#t_atom{elements=[Val]}) ->
+ beam_validator:type_anno(atom, Val);
+validator_anno(#t_atom{}=A) ->
+ case t_is_boolean(A) of
+ true -> beam_validator:type_anno(bool);
+ false -> beam_validator:type_anno(atom)
+ end;
+validator_anno(T) ->
+ beam_validator:type_anno(T).
+
+get_func_id(Anno) ->
+ #{func_info:={_Mod, Name, Arity}} = Anno,
+ #b_local{name=#b_literal{val=Name}, arity=Arity}.
-opt_1([{L,Blk}|Bs], #d{ls=Ls}=D) ->
+opt([{L,Blk}|Bs], #d{ls=Ls}=D, Acc) ->
case Ls of
#{L:=Ts} ->
- opt_2(L, Blk, Bs, Ts, D);
+ opt_1(L, Blk, Bs, Ts, D, Acc);
#{} ->
%% This block is never reached. Discard it.
- opt_1(Bs, D)
+ opt(Bs, D, Acc)
end;
-opt_1([], _) -> [].
-
-opt_2(L, #b_blk{is=Is0}=Blk0, Bs, Ts, D0) ->
- case Is0 of
- [#b_set{op=call,dst=Dst,
- args=[#b_remote{mod=#b_literal{val=Mod},
- name=#b_literal{val=Name}}=Rem|Args0]}=I0] ->
- case erl_bifs:is_exit_bif(Mod, Name, length(Args0)) of
- true ->
- %% This call will never reach the successor block.
- %% Rewrite the terminator to a 'ret', and remove
- %% all type information for this label. That will
- %% simplify the phi node in the former successor.
- Args = [simplify_arg(Arg, Ts) || Arg <- Args0],
- I = I0#b_set{args=[Rem|Args]},
- Ret = #b_ret{arg=Dst},
- Blk = Blk0#b_blk{is=[I],last=Ret},
- Ls = maps:remove(L, D0#d.ls),
- D = D0#d{ls=Ls},
- [{L,Blk}|opt_1(Bs, D)];
- false ->
- opt_3(L, Blk0, Bs, Ts, D0)
- end;
- _ ->
- opt_3(L, Blk0, Bs, Ts, D0)
+opt([], D, Acc) ->
+ #d{func_db=FuncDb,ret_type=NewRet} = D,
+ {reverse(Acc), FuncDb, NewRet}.
+
+opt_1(L, #b_blk{is=Is0,last=Last0}=Blk0, Bs, Ts0,
+ #d{ds=Ds0,sub=Sub0,func_db=Fdb0}=D0, Acc) ->
+ case opt_is(Is0, Ts0, Ds0, Fdb0, D0, Sub0, []) of
+ {Is,Ts,Ds,Fdb,Sub} ->
+ D1 = D0#d{ds=Ds,sub=Sub,func_db=Fdb},
+ Last1 = simplify_terminator(Last0, Sub, Ts, Ds),
+ Last = opt_terminator(Last1, Ts, Ds),
+ D = update_successors(Last, Ts, D1),
+ Blk = Blk0#b_blk{is=Is,last=Last},
+ opt(Bs, D, [{L,Blk}|Acc]);
+ {no_return,Ret,Is,Ds,Fdb,Sub} ->
+ %% This call will never reach the successor block.
+ %% Rewrite the terminator to a 'ret', and remove
+ %% all type information for this label. That can
+ %% potentially narrow the type of the phi node
+ %% in the former successor.
+ Ls = maps:remove(L, D0#d.ls),
+ RetType = join([none|D0#d.ret_type]),
+ D = D0#d{ds=Ds,ls=Ls,sub=Sub,
+ func_db=Fdb,ret_type=[RetType]},
+ Blk = Blk0#b_blk{is=Is,last=Ret},
+ opt(Bs, D, [{L,Blk}|Acc])
end.
-opt_3(L, #b_blk{is=Is0,last=Last0}=Blk0, Bs, Ts0, #d{ds=Ds0,ls=Ls0}=D0) ->
- {Is,Ts,Ds} = opt_is(Is0, Ts0, Ds0, Ls0, []),
- D1 = D0#d{ds=Ds},
- Last = opt_terminator(Last0, Ts, Ds),
- D = update_successors(Last, Ts, D1),
- Blk = Blk0#b_blk{is=Is,last=Last},
- [{L,Blk}|opt_1(Bs, D)].
+simplify_terminator(#b_br{bool=Bool}=Br, Sub, Ts, _Ds) ->
+ Br#b_br{bool=simplify_arg(Bool, Sub, Ts)};
+simplify_terminator(#b_switch{arg=Arg}=Sw, Sub, Ts, _Ds) ->
+ Sw#b_switch{arg=simplify_arg(Arg, Sub, Ts)};
+simplify_terminator(#b_ret{arg=Arg}=Ret, Sub, Ts, Ds) ->
+ %% Reducing the result of a call to a literal (fairly common for 'ok')
+ %% breaks tail call optimization.
+ case Ds of
+ #{ Arg := #b_set{op=call}} -> Ret;
+ #{} -> Ret#b_ret{arg=simplify_arg(Arg, Sub, Ts)}
+ end.
-opt_is([#b_set{op=phi,dst=#b_var{name=Dst},args=Args0}=I0|Is], Ts0, Ds0, Ls, Acc) ->
+opt_is([#b_set{op=phi,dst=Dst,args=Args0}=I0|Is],
+ Ts0, Ds0, Fdb, #d{ls=Ls}=D, Sub0, Acc) ->
%% Simplify the phi node by removing all predecessor blocks that no
%% longer exists or no longer branches to this block.
- Args = [P || {_,From}=P <- Args0, maps:is_key(From, Ls)],
- I = I0#b_set{args=Args},
- Ts = update_types(I, Ts0, Ds0),
- Ds = Ds0#{Dst=>I},
- opt_is(Is, Ts, Ds, Ls, [I|Acc]);
-opt_is([#b_set{dst=#b_var{name=Dst}}=I0|Is], Ts0, Ds0, Ls, Acc) ->
- I = simplify(I0, Ts0),
+ Args = [{simplify_arg(Arg, Sub0, Ts0),From} ||
+ {Arg,From} <- Args0, maps:is_key(From, Ls)],
+ case all_same(Args) of
+ true ->
+ %% Eliminate the phi node if there is just one source
+ %% value or if the values are identical.
+ [{Val,_}|_] = Args,
+ Sub = Sub0#{Dst=>Val},
+ opt_is(Is, Ts0, Ds0, Fdb, D, Sub, Acc);
+ false ->
+ I = I0#b_set{args=Args},
+ Ts = update_types(I, Ts0, Ds0),
+ Ds = Ds0#{Dst=>I},
+ opt_is(Is, Ts, Ds, Fdb, D, Sub0, [I|Acc])
+ end;
+opt_is([#b_set{op=call,args=Args0,dst=Dst}=I0|Is],
+ Ts0, Ds0, Fdb0, D, Sub0, Acc) ->
+ Args = simplify_args(Args0, Sub0, Ts0),
+ I1 = beam_ssa:normalize(I0#b_set{args=Args}),
+ {Ts1,Ds,Fdb,I2} = opt_call(I1, D, Ts0, Ds0, Fdb0),
+ case {map_get(Dst, Ts1),Is} of
+ {_,[#b_set{op=succeeded}]} ->
+ %% This call instruction is inside a try/catch
+ %% block. Don't attempt to optimize it.
+ opt_is(Is, Ts1, Ds, Fdb, D, Sub0, [I2|Acc]);
+ {none,_} ->
+ %% This call never returns. The rest of the
+ %% instructions will not be executed.
+ Ret = #b_ret{arg=Dst},
+ {no_return,Ret,reverse(Acc, [I2]),Ds,Fdb,Sub0};
+ {_,_} ->
+ case simplify_call(I2) of
+ #b_set{}=I ->
+ opt_is(Is, Ts1, Ds, Fdb, D, Sub0, [I|Acc]);
+ #b_literal{}=Lit ->
+ Sub = Sub0#{Dst=>Lit},
+ Ts = maps:remove(Dst, Ts1),
+ opt_is(Is, Ts, Ds0, Fdb, D, Sub, Acc);
+ #b_var{}=Var ->
+ Ts = maps:remove(Dst, Ts1),
+ Sub = Sub0#{Dst=>Var},
+ opt_is(Is, Ts, Ds0, Fdb, D, Sub, Acc)
+ end
+ end;
+opt_is([#b_set{op=succeeded,args=[Arg],dst=Dst}=I],
+ Ts0, Ds0, Fdb, D, Sub0, Acc) ->
+ case Ds0 of
+ #{ Arg := #b_set{op=call} } ->
+ %% The success check of a call is part of exception handling and
+ %% must not be optimized away. We still have to update its type
+ %% though.
+ Ts = update_types(I, Ts0, Ds0),
+ Ds = Ds0#{Dst=>I},
+
+ opt_is([], Ts, Ds, Fdb, D, Sub0, [I|Acc]);
+ #{} ->
+ Args = simplify_args([Arg], Sub0, Ts0),
+ Type = type(succeeded, Args, Ts0, Ds0),
+ case get_literal_from_type(Type) of
+ #b_literal{}=Lit ->
+ Sub = Sub0#{Dst=>Lit},
+ opt_is([], Ts0, Ds0, Fdb, D, Sub, Acc);
+ none ->
+ Ts = Ts0#{Dst=>Type},
+ Ds = Ds0#{Dst=>I},
+ opt_is([], Ts, Ds, Fdb, D, Sub0, [I|Acc])
+ end
+ end;
+opt_is([#b_set{args=Args0,dst=Dst}=I0|Is],
+ Ts0, Ds0, Fdb, D, Sub0, Acc) ->
+ Args = simplify_args(Args0, Sub0, Ts0),
+ I1 = beam_ssa:normalize(I0#b_set{args=Args}),
+ case simplify(I1, Ts0) of
+ #b_set{}=I2 ->
+ I = beam_ssa:normalize(I2),
+ Ts = update_types(I, Ts0, Ds0),
+ Ds = Ds0#{Dst=>I},
+ opt_is(Is, Ts, Ds, Fdb, D, Sub0, [I|Acc]);
+ #b_literal{}=Lit ->
+ Sub = Sub0#{Dst=>Lit},
+ opt_is(Is, Ts0, Ds0, Fdb, D, Sub, Acc);
+ #b_var{}=Var ->
+ case Is of
+ [#b_set{op=succeeded,dst=SuccDst,args=[Dst]}] ->
+ %% We must remove this 'succeeded' instruction.
+ Sub = Sub0#{Dst=>Var,SuccDst=>#b_literal{val=true}},
+ opt_is([], Ts0, Ds0, Fdb, D, Sub, Acc);
+ _ ->
+ Sub = Sub0#{Dst=>Var},
+ opt_is(Is, Ts0, Ds0, Fdb, D, Sub, Acc)
+ end
+ end;
+opt_is([], Ts, Ds, Fdb, _D, Sub, Acc) ->
+ {reverse(Acc), Ts, Ds, Fdb, Sub}.
+
+simplify_call(#b_set{op=call,args=[#b_remote{}=Rem|Args]}=I) ->
+ case Rem of
+ #b_remote{mod=#b_literal{val=Mod},
+ name=#b_literal{val=Name}} ->
+ case erl_bifs:is_pure(Mod, Name, length(Args)) of
+ true ->
+ simplify_remote_call(Mod, Name, Args, I);
+ false ->
+ I
+ end;
+ #b_remote{} ->
+ I
+ end;
+simplify_call(I) -> I.
+
+%% Simplify a remote call to a pure BIF.
+simplify_remote_call(erlang, '++', [#b_literal{val=[]},Tl], _I) ->
+ Tl;
+simplify_remote_call(erlang, setelement,
+ [#b_literal{val=Pos},
+ #b_literal{val=Tuple},
+ #b_var{}=Value], I)
+ when is_integer(Pos), 1 =< Pos, Pos =< tuple_size(Tuple) ->
+ %% Position is a literal integer and the shape of the
+ %% tuple is known.
+ Els0 = [#b_literal{val=El} || El <- tuple_to_list(Tuple)],
+ {Bef,[_|Aft]} = split(Pos - 1, Els0),
+ Els = Bef ++ [Value|Aft],
+ I#b_set{op=put_tuple,args=Els};
+simplify_remote_call(Mod, Name, Args0, I) ->
+ case make_literal_list(Args0) of
+ none ->
+ I;
+ Args ->
+ %% The arguments are literals. Try to evaluate the BIF.
+ try apply(Mod, Name, Args) of
+ Val ->
+ case cerl:is_literal_term(Val) of
+ true ->
+ #b_literal{val=Val};
+ false ->
+ %% The value can't be expressed as a literal
+ %% (e.g. a pid).
+ I
+ end
+ catch
+ _:_ ->
+ %% Failed. Don't bother trying to optimize
+ %% the call.
+ I
+ end
+ end.
+
+opt_call(#b_set{dst=Dst,args=[#b_local{}=Callee|Args]}=I0, D, Ts0, Ds0, Fdb0) ->
+ {Ts, Ds, I} = opt_local_call(I0, Ts0, Ds0, Fdb0),
+ case Fdb0 of
+ #{ Callee := #func_info{exported=false,arg_types=ArgTypes0}=Info } ->
+ %% Update the argument types of *this exact call*, the types
+ %% will be joined later when the callee is optimized.
+ CallId = {D#d.func_id, Dst},
+ ArgTypes = update_arg_types(Args, ArgTypes0, CallId, Ts0),
+
+ Fdb = Fdb0#{ Callee => Info#func_info{arg_types=ArgTypes} },
+ {Ts, Ds, Fdb, I};
+ #{} ->
+ %% We can't narrow the argument types of exported functions as they
+ %% can receive anything as part of an external call.
+ {Ts, Ds, Fdb0, I}
+ end;
+opt_call(#b_set{dst=Dst}=I, _D, Ts0, Ds0, Fdb) ->
Ts = update_types(I, Ts0, Ds0),
- Ds = Ds0#{Dst=>I},
- opt_is(Is, Ts, Ds, Ls, [I|Acc]);
-opt_is([], Ts, Ds, _Ls, Acc) ->
- {reverse(Acc),Ts,Ds}.
+ Ds = Ds0#{ Dst => I },
+ {Ts, Ds, Fdb, I}.
+
+opt_local_call(#b_set{dst=Dst,args=[Id|_]}=I0, Ts0, Ds0, Fdb) ->
+ Type = case Fdb of
+ #{ Id := #func_info{ret_type=[T]} } -> T;
+ #{} -> any
+ end,
+ I = case Type of
+ any -> I0;
+ none -> I0;
+ _ -> beam_ssa:add_anno(result_type, validator_anno(Type), I0)
+ end,
+ Ts = Ts0#{ Dst => Type },
+ Ds = Ds0#{ Dst => I },
+ {Ts, Ds, I}.
+
+update_arg_types([Arg | Args], [TypeMap0 | TypeMaps], CallId, Ts) ->
+ %% Match contexts are treated as bitstrings when optimizing arguments, as
+ %% we don't yet support removing the "bs_start_match3" instruction.
+ NewType = case get_type(Arg, Ts) of
+ #t_bs_match{} -> {binary, 1};
+ Type -> Type
+ end,
+ TypeMap = TypeMap0#{ CallId => NewType },
+ [TypeMap | update_arg_types(Args, TypeMaps, CallId, Ts)];
+update_arg_types([], [], _CallId, _Ts) ->
+ [].
-simplify(#b_set{op={bif,element},args=[#b_literal{val=Index},Tuple]}=I, Ts) ->
+simplify(#b_set{op={bif,'and'},args=Args}=I, Ts) ->
+ case is_safe_bool_op(Args, Ts) of
+ true ->
+ case Args of
+ [_,#b_literal{val=false}=Res] -> Res;
+ [Res,#b_literal{val=true}] -> Res;
+ _ -> eval_bif(I, Ts)
+ end;
+ false ->
+ I
+ end;
+simplify(#b_set{op={bif,'or'},args=Args}=I, Ts) ->
+ case is_safe_bool_op(Args, Ts) of
+ true ->
+ case Args of
+ [Res,#b_literal{val=false}] -> Res;
+ [_,#b_literal{val=true}=Res] -> Res;
+ _ -> eval_bif(I, Ts)
+ end;
+ false ->
+ I
+ end;
+simplify(#b_set{op={bif,element},args=[#b_literal{val=Index},Tuple]}=I0, Ts) ->
case t_tuple_size(get_type(Tuple, Ts)) of
{_,Size} when is_integer(Index), 1 =< Index, Index =< Size ->
- I#b_set{op=get_tuple_element,args=[Tuple,#b_literal{val=Index-1}]};
+ I = I0#b_set{op=get_tuple_element,
+ args=[Tuple,#b_literal{val=Index-1}]},
+ simplify(I, Ts);
_ ->
- I
+ eval_bif(I0, Ts)
end;
simplify(#b_set{op={bif,hd},args=[List]}=I, Ts) ->
case get_type(List, Ts) of
cons ->
I#b_set{op=get_hd};
_ ->
- I
+ eval_bif(I, Ts)
end;
simplify(#b_set{op={bif,tl},args=[List]}=I, Ts) ->
case get_type(List, Ts) of
cons ->
I#b_set{op=get_tl};
_ ->
- I
+ eval_bif(I, Ts)
end;
simplify(#b_set{op={bif,size},args=[Term]}=I, Ts) ->
case get_type(Term, Ts) of
#t_tuple{} ->
- I#b_set{op={bif,tuple_size}};
+ simplify(I#b_set{op={bif,tuple_size}}, Ts);
+ _ ->
+ eval_bif(I, Ts)
+ end;
+simplify(#b_set{op={bif,tuple_size},args=[Term]}=I, Ts) ->
+ case get_type(Term, Ts) of
+ #t_tuple{size=Size,exact=true} ->
+ #b_literal{val=Size};
_ ->
I
end;
-simplify(#b_set{op={bif,'=='},args=Args0}=I0, Ts) ->
- Args = [simplify_arg(Arg, Ts) || Arg <- Args0],
- I = I0#b_set{args=Args},
+simplify(#b_set{op={bif,'=='},args=Args}=I, Ts) ->
Types = get_types(Args, Ts),
EqEq = case {meet(Types),join(Types)} of
{none,any} -> true;
@@ -164,50 +512,146 @@ simplify(#b_set{op={bif,'=='},args=Args0}=I0, Ts) ->
end,
case EqEq of
true ->
- I#b_set{op={bif,'=:='}};
+ simplify(I#b_set{op={bif,'=:='}}, Ts);
false ->
- I
+ eval_bif(I, Ts)
end;
-simplify(#b_set{op={bif,Op},args=Args0}=I0, Ts) ->
- Args = [simplify_arg(Arg, Ts) || Arg <- Args0],
- I = I0#b_set{args=Args},
+simplify(#b_set{op={bif,'=:='},args=[Same,Same]}, _Ts) ->
+ #b_literal{val=true};
+simplify(#b_set{op={bif,'=:='},args=[A1,_A2]=Args}=I, Ts) ->
+ [T1,T2] = get_types(Args, Ts),
+ case meet(T1, T2) of
+ none ->
+ #b_literal{val=false};
+ _ ->
+ case {t_is_boolean(T1),T2} of
+ {true,#t_atom{elements=[true]}} ->
+ %% Bool =:= true ==> Bool
+ A1;
+ {_,_} ->
+ eval_bif(I, Ts)
+ end
+ end;
+simplify(#b_set{op={bif,Op},args=Args}=I, Ts) ->
Types = get_types(Args, Ts),
case is_float_op(Op, Types) of
false ->
- I;
+ eval_bif(I, Ts);
true ->
AnnoArgs = [anno_float_arg(A) || A <- Types],
- beam_ssa:add_anno(float_op, AnnoArgs, I)
+ eval_bif(beam_ssa:add_anno(float_op, AnnoArgs, I), Ts)
end;
-simplify(#b_set{op=wait_timeout,args=[Timeout0]}=I, Ts) ->
- case simplify_arg(Timeout0, Ts) of
- #b_literal{val=infinity} ->
- I#b_set{op=wait,args=[]};
- Timeout ->
- I#b_set{args=[Timeout]}
+simplify(#b_set{op=get_tuple_element,args=[Tuple,#b_literal{val=N}]}=I, Ts) ->
+ case get_type(Tuple, Ts) of
+ #t_tuple{size=Size,elements=Es} when Size > N ->
+ ElemType = get_element_type(N + 1, Es),
+ case get_literal_from_type(ElemType) of
+ #b_literal{}=Lit -> Lit;
+ none -> I
+ end;
+ none ->
+ %% Will never be executed because of type conflict.
+ %% #b_literal{val=ignored};
+ I
end;
-simplify(#b_set{op=Op,args=Args0}=I, Ts) ->
- Safe = case Op of
- call -> true;
- put_list -> true;
- put_tuple -> true;
- _ -> false
- end,
- case Safe of
- true ->
- Args = [simplify_arg(Arg, Ts) || Arg <- Args0],
- I#b_set{args=Args};
+simplify(#b_set{op=is_nonempty_list,args=[Src]}=I, Ts) ->
+ case get_type(Src, Ts) of
+ any -> I;
+ list -> I;
+ cons -> #b_literal{val=true};
+ _ -> #b_literal{val=false}
+ end;
+simplify(#b_set{op=is_tagged_tuple,
+ args=[Src,#b_literal{val=Size},#b_literal{}=Tag]}=I, Ts) ->
+ simplify_is_record(I, get_type(Src, Ts), Size, Tag, Ts);
+simplify(#b_set{op=put_list,args=[#b_literal{val=H},
+ #b_literal{val=T}]}, _Ts) ->
+ #b_literal{val=[H|T]};
+simplify(#b_set{op=put_tuple,args=Args}=I, _Ts) ->
+ case make_literal_list(Args) of
+ none -> I;
+ List -> #b_literal{val=list_to_tuple(List)}
+ end;
+simplify(#b_set{op=wait_timeout,args=[#b_literal{val=0}]}, _Ts) ->
+ #b_literal{val=true};
+simplify(#b_set{op=wait_timeout,args=[#b_literal{val=infinity}]}=I, _Ts) ->
+ I#b_set{op=wait,args=[]};
+simplify(I, _Ts) -> I.
+
+make_literal_list(Args) ->
+ make_literal_list(Args, []).
+
+make_literal_list([#b_literal{val=H}|T], Acc) ->
+ make_literal_list(T, [H|Acc]);
+make_literal_list([_|_], _) ->
+ none;
+make_literal_list([], Acc) ->
+ reverse(Acc).
+
+is_safe_bool_op(Args, Ts) ->
+ [T1,T2] = get_types(Args, Ts),
+ t_is_boolean(T1) andalso t_is_boolean(T2).
+
+all_same([{H,_}|T]) ->
+ all(fun({E,_}) -> E =:= H end, T).
+
+eval_bif(#b_set{op={bif,Bif},args=Args}=I, Ts) ->
+ Arity = length(Args),
+ case erl_bifs:is_pure(erlang, Bif, Arity) of
false ->
- I
+ I;
+ true ->
+ case make_literal_list(Args) of
+ none ->
+ case get_types(Args, Ts) of
+ [any] ->
+ I;
+ [Type] ->
+ case will_succeed(Bif, Type) of
+ yes ->
+ #b_literal{val=true};
+ no ->
+ #b_literal{val=false};
+ maybe ->
+ I
+ end;
+ _ ->
+ I
+ end;
+ LitArgs ->
+ try apply(erlang, Bif, LitArgs) of
+ Val -> #b_literal{val=Val}
+ catch
+ error:_ -> I
+ end
+
+ end
end.
-simplify_arg(#b_var{}=Arg, Ts) ->
- Type = get_type(Arg, Ts),
- case get_literal_from_type(Type) of
- none -> Arg;
- #b_literal{}=Lit -> Lit
+simplify_args(Args, Sub, Ts) ->
+ [simplify_arg(Arg, Sub, Ts) || Arg <- Args].
+
+simplify_arg(#b_var{}=Arg0, Sub, Ts) ->
+ case sub_arg(Arg0, Sub) of
+ #b_literal{}=LitArg ->
+ LitArg;
+ #b_var{}=Arg ->
+ Type = get_type(Arg, Ts),
+ case get_literal_from_type(Type) of
+ none -> Arg;
+ #b_literal{}=Lit -> Lit
+ end
end;
-simplify_arg(Arg, _Ts) -> Arg.
+simplify_arg(#b_remote{mod=Mod,name=Name}=Rem, Sub, Ts) ->
+ Rem#b_remote{mod=simplify_arg(Mod, Sub, Ts),
+ name=simplify_arg(Name, Sub, Ts)};
+simplify_arg(Arg, _Sub, _Ts) -> Arg.
+
+sub_arg(#b_var{}=Old, Sub) ->
+ case Sub of
+ #{Old:=New} -> New;
+ #{} -> Old
+ end.
is_float_op('-', [float]) ->
true;
@@ -228,75 +672,130 @@ anno_float_arg(float) -> float;
anno_float_arg(_) -> convert.
opt_terminator(#b_br{bool=#b_literal{}}=Br, _Ts, _Ds) ->
- Br;
-opt_terminator(#b_br{bool=#b_var{name=V}=Var}=Br, Ts, Ds) ->
- BoolType = get_type(Var, Ts),
- case get_literal_from_type(BoolType) of
- #b_literal{}=BoolLit ->
- Br#b_br{bool=BoolLit};
- none ->
- #{V:=Set} = Ds,
- case Set of
- #b_set{op={bif,'=:='},args=[Bool,#b_literal{val=true}]} ->
- case t_is_boolean(get_type(Bool, Ts)) of
- true ->
- %% Bool =:= true ==> Bool
- simplify_not(Br#b_br{bool=Bool}, Ts, Ds);
- false ->
- Br
- end;
- #b_set{} ->
- simplify_not(Br, Ts, Ds)
- end
+ beam_ssa:normalize(Br);
+opt_terminator(#b_br{bool=#b_var{}}=Br, Ts, Ds) ->
+ simplify_not(Br, Ts, Ds);
+opt_terminator(#b_switch{arg=#b_literal{}}=Sw, _Ts, _Ds) ->
+ beam_ssa:normalize(Sw);
+opt_terminator(#b_switch{arg=#b_var{}=V}=Sw, Ts, Ds) ->
+ case get_type(V, Ts) of
+ any ->
+ beam_ssa:normalize(Sw);
+ Type ->
+ beam_ssa:normalize(opt_switch(Sw, Type, Ts, Ds))
end;
-opt_terminator(#b_switch{arg=#b_literal{val=Val0}=Arg,fail=Fail,list=List},
- _Ts, _Ds) ->
- {value,{_,L}} = search(fun({#b_literal{val=Val1},_}) ->
- Val1 =:= Val0
- end, List ++ [{Arg,Fail}]),
- #b_br{bool=#b_literal{val=true},succ=L,fail=L};
-opt_terminator(#b_switch{arg=V}=Sw0, Ts, Ds) ->
- case get_literal_from_type(Ts) of
- #b_literal{}=Lit ->
- Sw = Sw0#b_switch{arg=Lit},
- opt_terminator(Sw, Ts, Ds);
+opt_terminator(#b_ret{}=Ret, _Ts, _Ds) -> Ret.
+
+
+opt_switch(#b_switch{fail=Fail,list=List0}=Sw0, Type, Ts, Ds) ->
+ List = prune_switch_list(List0, Fail, Type, Ts),
+ Sw1 = Sw0#b_switch{list=List},
+ case Type of
+ #t_integer{elements={_,_}=Range} ->
+ simplify_switch_int(Sw1, Range);
+ #t_atom{elements=[_|_]} ->
+ case t_is_boolean(Type) of
+ true ->
+ #b_br{} = Br = simplify_switch_bool(Sw1, Ts, Ds),
+ opt_terminator(Br, Ts, Ds);
+ false ->
+ simplify_switch_atom(Type, Sw1)
+ end;
+ _ ->
+ Sw1
+ end.
+
+prune_switch_list([{_,Fail}|T], Fail, Type, Ts) ->
+ prune_switch_list(T, Fail, Type, Ts);
+prune_switch_list([{Arg,_}=Pair|T], Fail, Type, Ts) ->
+ case meet(get_type(Arg, Ts), Type) of
none ->
- case get_type(V, Ts) of
- #t_integer{elements={_,_}=Range} ->
- simplify_switch_int(Sw0, Range);
- Type ->
- case t_is_boolean(Type) of
- true ->
- case simplify_switch_bool(Sw0, Ts, Ds) of
- #b_br{}=Br ->
- opt_terminator(Br, Ts, Ds);
- Sw ->
- Sw
- end;
- false ->
- Sw0
- end
- end
+ %% Different types. This value can never match.
+ prune_switch_list(T, Fail, Type, Ts);
+ _ ->
+ [Pair|prune_switch_list(T, Fail, Type, Ts)]
end;
-opt_terminator(#b_ret{}=Ret, _Ts, _Ds) ->
- Ret.
+prune_switch_list([], _, _, _) -> [].
-update_successors(#b_br{bool=#b_literal{val=false},fail=S}, Ts, D) ->
- update_successor(S, Ts, D);
update_successors(#b_br{bool=#b_literal{val=true},succ=S}, Ts, D) ->
update_successor(S, Ts, D);
-update_successors(#b_br{bool=#b_var{name=V},succ=Succ,fail=Fail}, Ts, D0) ->
- D = update_successor(Fail, Ts#{V:=t_atom(false)}, D0),
- SuccTs = infer_types(V, Ts, D0),
- update_successor(Succ, SuccTs#{V:=t_atom(true)}, D);
-update_successors(#b_switch{arg=#b_var{name=V},fail=Fail,list=List}, Ts, D0) ->
- D = update_successor(Fail, Ts, D0),
- foldl(fun({Val,S}, A) ->
- T = get_type(Val, Ts),
- update_successor(S, Ts#{V=>T}, A)
- end, D, List);
-update_successors(#b_ret{}, _Ts, D) -> D.
+update_successors(#b_br{bool=#b_var{}=Bool,succ=Succ,fail=Fail}, Ts0, D0) ->
+ case cerl_sets:is_element(Bool, D0#d.once) of
+ true ->
+ %% This variable is defined in this block and is only
+ %% referenced by this br terminator. Therefore, there is
+ %% no need to include it in the type database passed on to
+ %% the successors of this block.
+ Ts = maps:remove(Bool, Ts0),
+ {SuccTs,FailTs} = infer_types_br(Bool, Ts, D0),
+ D = update_successor(Fail, FailTs, D0),
+ update_successor(Succ, SuccTs, D);
+ false ->
+ {SuccTs,FailTs} = infer_types_br(Bool, Ts0, D0),
+ D = update_successor_bool(Bool, false, Fail, FailTs, D0),
+ update_successor_bool(Bool, true, Succ, SuccTs, D)
+ end;
+update_successors(#b_switch{arg=#b_var{}=V,fail=Fail,list=List}, Ts, D0) ->
+ case cerl_sets:is_element(V, D0#d.once) of
+ true ->
+ %% This variable is defined in this block and is only
+ %% referenced by this switch terminator. Therefore, there is
+ %% no need to include it in the type database passed on to
+ %% the successors of this block.
+ D = update_successor(Fail, Ts, D0),
+ F = fun({Val,S}, A) ->
+ SuccTs0 = infer_types_switch(V, Val, Ts, D),
+ SuccTs = maps:remove(V, SuccTs0),
+ update_successor(S, SuccTs, A)
+ end,
+ foldl(F, D, List);
+ false ->
+ %% V can not be equal to any of the values in List at the fail
+ %% block.
+ FailTs = subtract_sw_list(V, List, Ts),
+ D = update_successor(Fail, FailTs, D0),
+ F = fun({Val,S}, A) ->
+ SuccTs = infer_types_switch(V, Val, Ts, D),
+ update_successor(S, SuccTs, A)
+ end,
+ foldl(F, D, List)
+ end;
+update_successors(#b_ret{arg=Arg}, Ts, D) ->
+ FuncId = D#d.func_id,
+ case D#d.ds of
+ #{ Arg := #b_set{op=call,args=[FuncId | _]} } ->
+ %% Returning a call to ourselves doesn't affect our own return
+ %% type.
+ D;
+ #{} ->
+ RetType = join([get_type(Arg, Ts) | D#d.ret_type]),
+ D#d{ret_type=[RetType]}
+ end.
+
+subtract_sw_list(V, List, Ts) ->
+ Ts#{ V := sub_sw_list_1(get_type(V, Ts), List, Ts) }.
+
+sub_sw_list_1(Type, [{Val,_}|T], Ts) ->
+ ValType = get_type(Val, Ts),
+ sub_sw_list_1(subtract(Type, ValType), T, Ts);
+sub_sw_list_1(Type, [], _Ts) ->
+ Type.
+update_successor_bool(#b_var{}=Var, BoolValue, S, Ts, D) ->
+ case t_is_boolean(get_type(Var, Ts)) of
+ true ->
+ update_successor(S, Ts#{Var:=t_atom(BoolValue)}, D);
+ false ->
+ %% The `br` terminator is preceeded by an instruction that
+ %% does not produce a boolean value, such a `new_try_tag`.
+ update_successor(S, Ts, D)
+ end.
+
+update_successor(?BADARG_BLOCK, _Ts, #d{}=D) ->
+ %% We KNOW that no variables are used in the ?BADARG_BLOCK,
+ %% so there is no need to update the type information. That
+ %% can be a huge timesaver for huge functions.
+ D;
update_successor(S, Ts0, #d{ls=Ls}=D) ->
case Ls of
#{S:=Ts1} ->
@@ -306,48 +805,15 @@ update_successor(S, Ts0, #d{ls=Ls}=D) ->
D#d{ls=Ls#{S=>Ts0}}
end.
-update_types(#b_set{op=Op,dst=#b_var{name=Dst},args=Args}, Ts, Ds) ->
+update_types(#b_set{op=Op,dst=Dst,args=Args}, Ts, Ds) ->
T = type(Op, Args, Ts, Ds),
Ts#{Dst=>T}.
type(phi, Args, Ts, _Ds) ->
Types = [get_type(A, Ts) || {A,_} <- Args],
join(Types);
-type({bif,'=:='}, [Same,Same], _Ts, _Ds) ->
- t_atom(true);
-type({bif,'=:='}, [_,_]=Args, Ts, _Ds) ->
- case get_literals(Args, Ts) of
- [#b_literal{val=Lit1},#b_literal{val=Lit2}] ->
- t_atom(Lit1 =:= Lit2);
- [_,_] ->
- case meet(get_types(Args, Ts)) of
- none -> t_atom(false);
- _ -> t_boolean()
- end
- end;
-type({bif,tuple_size}, [Src], Ts, _Ds) ->
- case t_tuple_size(get_type(Src, Ts)) of
- {exact,Size} ->
- t_integer(Size);
- _ ->
- t_integer()
- end;
type({bif,'band'}, Args, Ts, _Ds) ->
band_type(Args, Ts);
-type({bif,Bif}, [Src]=Args, Ts, _Ds) ->
- case get_type(Src, Ts) of
- any ->
- bif_type(Bif, Args);
- Type ->
- case will_succeed(Bif, Type) of
- yes ->
- t_atom(true);
- no ->
- t_atom(false);
- maybe ->
- bif_type(Bif, Args)
- end
- end;
type({bif,Bif}, Args, Ts, _Ds) ->
case bif_type(Bif, Args) of
number ->
@@ -369,25 +835,59 @@ type(bs_extract, [Ctx], Ts, _Ds) ->
Type;
type(bs_match, Args, _Ts, _Ds) ->
#t_bs_match{type=bs_match_type(Args)};
+type(bs_get_tail, _Args, _Ts, _Ds) ->
+ {binary, 1};
type(call, [#b_remote{mod=#b_literal{val=Mod},
name=#b_literal{val=Name}}|Args], Ts, _Ds) ->
case {Mod,Name,Args} of
- {erlang,setelement,[Pos,Tuple,_]} ->
+ {erlang,setelement,[Pos,Tuple,Arg]} ->
case {get_type(Pos, Ts),get_type(Tuple, Ts)} of
- {#t_integer{elements={MinIndex,_}},#t_tuple{}=T}
- when MinIndex > 1 ->
- %% First element is not updated. The result
- %% will have the same type.
- T;
+ {#t_integer{elements={Index,Index}},
+ #t_tuple{elements=Es0,size=Size}=T} ->
+ %% This is an exact index, update the type of said element
+ %% or return 'none' if it's known to be out of bounds.
+ Es = set_element_type(Index, get_type(Arg, Ts), Es0),
+ case T#t_tuple.exact of
+ false ->
+ T#t_tuple{size=max(Index, Size),elements=Es};
+ true when Index =< Size ->
+ T#t_tuple{elements=Es};
+ true ->
+ none
+ end;
+ {#t_integer{elements={Min,Max}},
+ #t_tuple{elements=Es0,size=Size}=T} ->
+ %% We know this will land between Min and Max, so kill the
+ %% types for those indexes.
+ Es = maps:without(seq(Min, Max), Es0),
+ case T#t_tuple.exact of
+ false ->
+ T#t_tuple{elements=Es,size=max(Min, Size)};
+ true when Min =< Size ->
+ T#t_tuple{elements=Es,size=Size};
+ true ->
+ none
+ end;
{_,#t_tuple{}=T} ->
- %% Position is 1 or unknown. May update the first
- %% element of the tuple.
- T#t_tuple{elements=[]};
- {#t_integer{elements={MinIndex,_}},_} ->
- #t_tuple{size=MinIndex};
+ %% Position unknown, so we have to discard all element
+ %% information.
+ T#t_tuple{elements=#{}};
+ {#t_integer{elements={Min,_Max}},_} ->
+ #t_tuple{size=Min};
{_,_} ->
#t_tuple{}
end;
+ {erlang,'++',[List1,List2]} ->
+ case get_type(List1, Ts) =:= cons orelse
+ get_type(List2, Ts) =:= cons of
+ true -> cons;
+ false -> list
+ end;
+ {erlang,'--',[_,_]} ->
+ list;
+ {lists,F,Args} ->
+ Types = get_types(Args, Ts),
+ lists_function_type(F, Types);
{math,_,_} ->
case is_math_bif(Name, length(Args)) of
false -> any;
@@ -399,56 +899,36 @@ type(call, [#b_remote{mod=#b_literal{val=Mod},
false -> any
end
end;
-type(get_tuple_element, [Tuple,#b_literal{val=0}], Ts, _Ds) ->
- case get_type(Tuple, Ts) of
- #t_tuple{elements=[First]} ->
- get_type(#b_literal{val=First}, Ts);
- #t_tuple{} ->
- any
- end;
-type(is_nonempty_list, [Src], Ts, _Ds) ->
- case get_type(Src, Ts) of
- any ->
- t_boolean();
- list ->
- t_boolean();
- cons ->
- t_atom(true);
- _ ->
- t_atom(false)
- end;
-type(is_tagged_tuple, [Src,#b_literal{val=Size},#b_literal{val=Tag}], Ts, _Ds) ->
- case get_type(Src, Ts) of
- #t_tuple{exact=true,size=Size,elements=[Tag]} ->
- t_atom(true);
- #t_tuple{exact=true,size=ActualSize,elements=[]} ->
- if
- Size =/= ActualSize ->
- t_atom(false);
- true ->
- t_boolean()
- end;
- #t_tuple{exact=false} ->
- t_boolean();
- any ->
- t_boolean();
- _ ->
- t_atom(false)
- end;
+type(get_tuple_element, [Tuple, Offset], Ts, _Ds) ->
+ #t_tuple{size=Size,elements=Es} = get_type(Tuple, Ts),
+ #b_literal{val=N} = Offset,
+ true = Size > N, %Assertion.
+ get_element_type(N + 1, Es);
+type(is_nonempty_list, [_], _Ts, _Ds) ->
+ t_boolean();
+type(is_tagged_tuple, [_,#b_literal{},#b_literal{}], _Ts, _Ds) ->
+ t_boolean();
+type(put_map, _Args, _Ts, _Ds) ->
+ map;
type(put_list, _Args, _Ts, _Ds) ->
cons;
-type(put_tuple, Args, _Ts, _Ds) ->
- case Args of
- [#b_literal{val=First}|_] ->
- #t_tuple{exact=true,size=length(Args),elements=[First]};
- _ ->
- #t_tuple{exact=true,size=length(Args)}
- end;
-type(succeeded, [#b_var{name=Src}], Ts, Ds) ->
+type(put_tuple, Args, Ts, _Ds) ->
+ {Es, _} = foldl(fun(Arg, {Es0, Index}) ->
+ Type = get_type(Arg, Ts),
+ Es = set_element_type(Index, Type, Es0),
+ {Es, Index + 1}
+ end, {#{}, 1}, Args),
+ #t_tuple{exact=true,size=length(Args),elements=Es};
+type(succeeded, [#b_var{}=Src], Ts, Ds) ->
case maps:get(Src, Ds) of
#b_set{op={bif,Bif},args=BifArgs} ->
Types = get_types(BifArgs, Ts),
case {Bif,Types} of
+ {BoolOp,[T1,T2]} when BoolOp =:= 'and'; BoolOp =:= 'or' ->
+ case t_is_boolean(T1) andalso t_is_boolean(T2) of
+ true -> t_atom(true);
+ false -> t_boolean()
+ end;
{byte_size,[{binary,_}]} ->
t_atom(true);
{bit_size,[{binary,_}]} ->
@@ -478,6 +958,8 @@ type(succeeded, [#b_var{name=Src}], Ts, Ds) ->
#b_set{} ->
t_boolean()
end;
+type(succeeded, [#b_literal{}], _Ts, _Ds) ->
+ t_atom(true);
type(_, _, _, _) -> any.
arith_op_type(Args, Ts) ->
@@ -493,11 +975,74 @@ arith_op_type(Args, Ts) ->
(number, #t_integer{}) -> number;
(number, float) -> float;
(any, _) -> number;
- (_, any) -> number;
(Same, Same) -> Same;
(_, _) -> none
end, unknown, Types).
+lists_function_type(F, Types) ->
+ case {F,Types} of
+ %% Functions that return booleans.
+ {all,[_,_]} ->
+ t_boolean();
+ {any,[_,_]} ->
+ t_boolean();
+ {keymember,[_,_,_]} ->
+ t_boolean();
+ {member,[_,_]} ->
+ t_boolean();
+ {prefix,[_,_]} ->
+ t_boolean();
+ {suffix,[_,_]} ->
+ t_boolean();
+
+ %% Functions that return lists.
+ {dropwhile,[_,_]} ->
+ list;
+ {duplicate,[_,_]} ->
+ list;
+ {filter,[_,_]} ->
+ list;
+ {flatten,[_]} ->
+ list;
+ {map,[_Fun,List]} ->
+ same_length_type(List);
+ {MapFold,[_Fun,_Acc,List]} when MapFold =:= mapfoldl;
+ MapFold =:= mapfoldr ->
+ #t_tuple{size=2,exact=true,
+ elements=#{1=>same_length_type(List)}};
+ {partition,[_,_]} ->
+ t_two_tuple(list, list);
+ {reverse,[List]} ->
+ same_length_type(List);
+ {sort,[List]} ->
+ same_length_type(List);
+ {splitwith,[_,_]} ->
+ t_two_tuple(list, list);
+ {takewhile,[_,_]} ->
+ list;
+ {unzip,[List]} ->
+ ListType = same_length_type(List),
+ t_two_tuple(ListType, ListType);
+ {usort,[List]} ->
+ same_length_type(List);
+ {zip,[_,_]} ->
+ list;
+ {zipwith,[_,_,_]} ->
+ list;
+ {_,_} ->
+ any
+ end.
+
+%% For a lists function that return a list of the same
+%% length as the input list, return the type of the list.
+same_length_type(cons) -> cons;
+same_length_type(nil) -> nil;
+same_length_type(_) -> list.
+
+t_two_tuple(Type1, Type2) ->
+ #t_tuple{size=2,exact=true,
+ elements=#{1=>Type1,2=>Type2}}.
+
%% will_succeed(TestOperation, Type) -> yes|no|maybe.
%% Test whether TestOperation applied to an argument of type Type
%% will succeed. Return yes, no, or maybe.
@@ -554,7 +1099,6 @@ will_succeed(is_list, Type) ->
case Type of
list -> yes;
cons -> yes;
- nil -> yes;
_ -> no
end;
will_succeed(is_map, Type) ->
@@ -577,8 +1121,6 @@ will_succeed(is_tuple, Type) ->
will_succeed(_, _) -> maybe.
-band_type([#b_literal{val=Int},Other], Ts) when is_integer(Int) ->
- band_type_1(Int, Other, Ts);
band_type([Other,#b_literal{val=Int}], Ts) when is_integer(Int) ->
band_type_1(Int, Other, Ts);
band_type([_,_], _) -> t_integer().
@@ -637,6 +1179,17 @@ bs_match_type(utf16, _) ->
bs_match_type(utf32, _) ->
?UNICODE_INT.
+simplify_switch_atom(#t_atom{elements=Atoms}, #b_switch{list=List0}=Sw) ->
+ case sort([A || {#b_literal{val=A},_} <- List0]) of
+ Atoms ->
+ %% All possible atoms are included in the list. The
+ %% failure label will never be used.
+ [{_,Fail}|List] = List0,
+ Sw#b_switch{fail=Fail,list=List};
+ _ ->
+ Sw
+ end.
+
simplify_switch_int(#b_switch{list=List0}=Sw, {Min,Max}) ->
List1 = sort(List0),
Vs = [V || {#b_literal{val=V},_} <- List1],
@@ -653,37 +1206,120 @@ eq_ranges([H], H, H) -> true;
eq_ranges([H|T], H, Max) -> eq_ranges(T, H+1, Max);
eq_ranges(_, _, _) -> false.
-simplify_switch_bool(#b_switch{arg=B,list=List0}=Sw, Ts, Ds) ->
- List = sort(List0),
- case List of
- [{#b_literal{val=false},Fail},{#b_literal{val=true},Succ}] ->
- simplify_not(#b_br{bool=B,succ=Succ,fail=Fail}, Ts, Ds);
- [_|_] ->
- Sw
- end.
-
-simplify_not(#b_br{bool=#b_var{name=V},succ=Succ,fail=Fail}=Br, Ts, Ds) ->
+simplify_is_record(I, #t_tuple{exact=Exact,
+ size=Size,
+ elements=Es},
+ RecSize, RecTag, Ts) ->
+ TagType = maps:get(1, Es, any),
+ TagMatch = case get_literal_from_type(TagType) of
+ #b_literal{}=RecTag -> yes;
+ #b_literal{} -> no;
+ none ->
+ %% Is it at all possible for the tag to match?
+ case meet(get_type(RecTag, Ts), TagType) of
+ none -> no;
+ _ -> maybe
+ end
+ end,
+ if
+ Size =/= RecSize, Exact; Size > RecSize; TagMatch =:= no ->
+ #b_literal{val=false};
+ Size =:= RecSize, Exact, TagMatch =:= yes ->
+ #b_literal{val=true};
+ true ->
+ I
+ end;
+simplify_is_record(I, any, _Size, _Tag, _Ts) ->
+ I;
+simplify_is_record(_I, _Type, _Size, _Tag, _Ts) ->
+ #b_literal{val=false}.
+
+simplify_switch_bool(#b_switch{arg=B,fail=Fail,list=List0}, Ts, Ds) ->
+ FalseVal = #b_literal{val=false},
+ TrueVal = #b_literal{val=true},
+ List1 = List0 ++ [{FalseVal,Fail},{TrueVal,Fail}],
+ {_,FalseLbl} = keyfind(FalseVal, 1, List1),
+ {_,TrueLbl} = keyfind(TrueVal, 1, List1),
+ Br = beam_ssa:normalize(#b_br{bool=B,succ=TrueLbl,fail=FalseLbl}),
+ simplify_not(Br, Ts, Ds).
+
+simplify_not(#b_br{bool=#b_var{}=V,succ=Succ,fail=Fail}=Br0, Ts, Ds) ->
case Ds of
#{V:=#b_set{op={bif,'not'},args=[Bool]}} ->
case t_is_boolean(get_type(Bool, Ts)) of
true ->
- Br#b_br{bool=Bool,succ=Fail,fail=Succ};
+ Br = Br0#b_br{bool=Bool,succ=Fail,fail=Succ},
+ beam_ssa:normalize(Br);
false ->
- Br
+ Br0
end;
#{} ->
- Br
- end.
+ Br0
+ end;
+simplify_not(#b_br{bool=#b_literal{}}=Br, _Ts, _Ds) -> Br.
+
+%%%
+%%% Calculate the set of variables that are only used once in the
+%%% terminator of the block that defines them. That will allow us to
+%%% discard type information for variables that will never be
+%%% referenced by the successor blocks, potentially improving
+%%% compilation times.
+%%%
+
+used_once(Linear, Args) ->
+ Map0 = used_once_1(reverse(Linear), #{}),
+ Map = maps:without(Args, Map0),
+ cerl_sets:from_list(maps:keys(Map)).
+
+used_once_1([{L,#b_blk{is=Is,last=Last}}|Bs], Uses0) ->
+ Uses1 = used_once_last_uses(beam_ssa:used(Last), L, Uses0),
+ Uses = used_once_2(reverse(Is), L, Uses1),
+ used_once_1(Bs, Uses);
+used_once_1([], Uses) -> Uses.
+
+used_once_2([#b_set{dst=Dst}=I|Is], L, Uses0) ->
+ Uses = used_once_uses(beam_ssa:used(I), L, Uses0),
+ case Uses of
+ #{Dst:=[L]} ->
+ used_once_2(Is, L, Uses);
+ #{} ->
+ %% Used more than once or used once in
+ %% in another block.
+ used_once_2(Is, L, maps:remove(Dst, Uses))
+ end;
+used_once_2([], _, Uses) -> Uses.
+
+used_once_uses([V|Vs], L, Uses) ->
+ case Uses of
+ #{V:=more_than_once} ->
+ used_once_uses(Vs, L, Uses);
+ #{} ->
+ %% Already used or first use is not in
+ %% a terminator.
+ used_once_uses(Vs, L, Uses#{V=>more_than_once})
+ end;
+used_once_uses([], _, Uses) -> Uses.
+
+used_once_last_uses([V|Vs], L, Uses) ->
+ case Uses of
+ #{V:=[_]} ->
+ %% Second time this variable is used.
+ used_once_last_uses(Vs, L, Uses#{V:=more_than_once});
+ #{V:=more_than_once} ->
+ %% Used at least twice before.
+ used_once_last_uses(Vs, L, Uses);
+ #{} ->
+ %% First time this variable is used.
+ used_once_last_uses(Vs, L, Uses#{V=>[L]})
+ end;
+used_once_last_uses([], _, Uses) -> Uses.
-get_literals(Values, Ts) ->
- [get_literal_from_type(get_type(Val, Ts)) || Val <- Values].
get_types(Values, Ts) ->
[get_type(Val, Ts) || Val <- Values].
-
-spec get_type(beam_ssa:value(), type_db()) -> type().
-get_type(#b_var{name=V}, Ts) ->
+get_type(#b_var{}=V, Ts) ->
#{V:=T} = Ts,
T;
get_type(#b_literal{val=Val}, _Ts) ->
@@ -701,48 +1337,144 @@ get_type(#b_literal{val=Val}, _Ts) ->
Val =:= {} ->
#t_tuple{exact=true};
is_tuple(Val) ->
- #t_tuple{exact=true,size=tuple_size(Val),
- elements=[element(1, Val)]};
+ {Es, _} = foldl(fun(E, {Es0, Index}) ->
+ Type = get_type(#b_literal{val=E}, #{}),
+ Es = set_element_type(Index, Type, Es0),
+ {Es, Index + 1}
+ end, {#{}, 1}, tuple_to_list(Val)),
+ #t_tuple{exact=true,size=tuple_size(Val),elements=Es};
Val =:= [] ->
nil;
true ->
any
end.
-infer_types(V, Ts, #d{ds=Ds}) ->
+%% infer_types(Var, Types, #d{}) -> {SuccTypes,FailTypes}
+%% Looking at the expression that defines the variable Var, infer
+%% the types for the variables in the arguments. Return the updated
+%% type database for the case that the expression evaluates to
+%% true, and and for the case that it evaluates to false.
+%%
+%% Here is an example. The variable being asked about is
+%% the variable Bool, which is defined like this:
+%%
+%% Bool = is_nonempty_list L
+%%
+%% If 'is_nonempty_list L' evaluates to 'true', L must
+%% must be cons. The meet of the previously known type of L and 'cons'
+%% will be added to SuccTypes.
+%%
+%% On the other hand, if 'is_nonempty_list L' evaluates to false, L
+%% is not cons and cons can be subtracted from the previously known
+%% type for L. For example, if L was known to be 'list', subtracting
+%% 'cons' would give 'nil' as the only possible type. The result of the
+%% subtraction for L will be added to FailTypes.
+%%
+%% Here is another example, asking about the variable Bool:
+%%
+%% Head = bif:hd L
+%% Bool = succeeded Head
+%%
+%% 'succeeded Head' will evaluate to 'true' if the instrution that
+%% defined Head succeeded. In this case, it is the 'bif:hd L'
+%% instruction, which will succeed if L is 'cons'. Thus, the meet of
+%% the previous type for L and 'cons' will be added to SuccTypes.
+%%
+%% If 'succeeded Head' evaluates to 'false', it means that 'bif:hd L'
+%% failed and that L is not 'cons'. 'cons' can be subtracted from the
+%% previously known type for L and the result put in FailTypes.
+
+infer_types_br(#b_var{}=V, Ts, #d{ds=Ds}) ->
#{V:=#b_set{op=Op,args=Args}} = Ds,
- Types = infer_type(Op, Args, Ds),
+ Types0 = infer_type(Op, Args, Ds),
+
+ %% We must be careful with types inferred from '=:='.
+ %%
+ %% If we have seen L =:= [a], we know that L is 'cons' if the
+ %% comparison succeeds. However, if the comparison fails, L could
+ %% still be 'cons'. Therefore, we must not subtract 'cons' from the
+ %% previous type of L.
+ %%
+ %% However, it is safe to subtract a type inferred from '=:=' if
+ %% it is single-valued, e.g. if it is [] or the atom 'true'.
+ EqTypes0 = infer_eq_type(Op, Args, Ts, Ds),
+ {Types1,EqTypes} = partition(fun({_,T}) ->
+ is_singleton_type(T)
+ end, EqTypes0),
+
+ Types = Types1 ++ Types0,
+ {meet_types(EqTypes++Types, Ts),subtract_types(Types, Ts)}.
+
+infer_types_switch(V, Lit, Ts, #d{ds=Ds}) ->
+ Types = infer_eq_type({bif,'=:='}, [V, Lit], Ts, Ds),
meet_types(Types, Ts).
-infer_type({bif,element}, [#b_literal{val=Pos},#b_var{name=Tuple}], _Ds) ->
+infer_eq_type({bif,'=:='}, [#b_var{}=Src,#b_literal{}=Lit], Ts, Ds) ->
+ Def = maps:get(Src, Ds),
+ Type = get_type(Lit, Ts),
+ [{Src,Type} | infer_eq_lit(Def, Lit)];
+infer_eq_type({bif,'=:='}, [#b_var{}=Arg0,#b_var{}=Arg1], Ts, _Ds) ->
+ %% As an example, assume that L1 is known to be 'list', and L2 is
+ %% known to be 'cons'. Then if 'L1 =:= L2' evaluates to 'true', it can
+ %% be inferred that L1 is 'cons' (the meet of 'cons' and 'list').
+ Type0 = get_type(Arg0, Ts),
+ Type1 = get_type(Arg1, Ts),
+ Type = meet(Type0, Type1),
+ [{V,MeetType} ||
+ {V,OrigType,MeetType} <-
+ [{Arg0,Type0,Type},{Arg1,Type1,Type}],
+ OrigType =/= MeetType];
+infer_eq_type(_Op, _Args, _Ts, _Ds) ->
+ [].
+
+infer_eq_lit(#b_set{op={bif,tuple_size},args=[#b_var{}=Tuple]},
+ #b_literal{val=Size}) when is_integer(Size) ->
+ [{Tuple,#t_tuple{exact=true,size=Size}}];
+infer_eq_lit(#b_set{op=get_tuple_element,
+ args=[#b_var{}=Tuple,#b_literal{val=N}]},
+ #b_literal{}=Lit) ->
+ Index = N + 1,
+ Es = set_element_type(Index, get_type(Lit, #{}), #{}),
+ [{Tuple,#t_tuple{size=Index,elements=Es}}];
+infer_eq_lit(_, _) -> [].
+
+infer_type({bif,element}, [#b_literal{val=Pos},#b_var{}=Tuple], _Ds) ->
if
is_integer(Pos), 1 =< Pos ->
[{Tuple,#t_tuple{size=Pos}}];
true ->
[]
end;
-infer_type({bif,'=:='}, [#b_var{name=Src},#b_literal{}=Lit], Ds) ->
- Def = maps:get(Src, Ds),
- Type = get_type(Lit, #{}),
- [{Src,Type}|infer_tuple_size(Def, Lit) ++
- infer_first_element(Def, Lit)];
-infer_type({bif,Bif}, [#b_var{name=Src}]=Args, _Ds) ->
+infer_type({bif,element}, [#b_var{}=Position,#b_var{}=Tuple], _Ds) ->
+ [{Position,t_integer()},{Tuple,#t_tuple{}}];
+infer_type({bif,Bif}, [#b_var{}=Src]=Args, _Ds) ->
case inferred_bif_type(Bif, Args) of
any -> [];
T -> [{Src,T}]
end;
-infer_type({bif,is_map_key}, [_,#b_var{name=Src}], _Ds) ->
+infer_type({bif,binary_part}, [#b_var{}=Src,_], _Ds) ->
+ [{Src,{binary,8}}];
+infer_type({bif,is_map_key}, [_,#b_var{}=Src], _Ds) ->
[{Src,map}];
-infer_type({bif,map_get}, [_,#b_var{name=Src}], _Ds) ->
+infer_type({bif,map_get}, [_,#b_var{}=Src], _Ds) ->
[{Src,map}];
-infer_type(bs_start_match, [#b_var{name=Bin}], _Ds) ->
+infer_type({bif,Bif}, [_,_]=Args, _Ds) ->
+ case inferred_bif_type(Bif, Args) of
+ any -> [];
+ T -> [{A,T} || #b_var{}=A <- Args]
+ end;
+infer_type({bif,binary_part}, [#b_var{}=Src,Pos,Len], _Ds) ->
+ [{Src,{binary,8}}|
+ [{V,t_integer()} || #b_var{}=V <- [Pos,Len]]];
+infer_type(bs_start_match, [#b_var{}=Bin], _Ds) ->
[{Bin,{binary,1}}];
-infer_type(is_nonempty_list, [#b_var{name=Src}], _Ds) ->
+infer_type(is_nonempty_list, [#b_var{}=Src], _Ds) ->
[{Src,cons}];
-infer_type(is_tagged_tuple, [#b_var{name=Src},#b_literal{val=Size},
- #b_literal{val=Tag}], _Ds) ->
- [{Src,#t_tuple{exact=true,size=Size,elements=[Tag]}}];
-infer_type(succeeded, [#b_var{name=Src}], Ds) ->
+infer_type(is_tagged_tuple, [#b_var{}=Src,#b_literal{val=Size},
+ #b_literal{}=Tag], _Ds) ->
+ Es = set_element_type(1, get_type(Tag, #{}), #{}),
+ [{Src,#t_tuple{exact=true,size=Size,elements=Es}}];
+infer_type(succeeded, [#b_var{}=Src], Ds) ->
#b_set{op=Op,args=Args} = maps:get(Src, Ds),
infer_type(Op, Args, Ds);
infer_type(_Op, _Args, _Ds) ->
@@ -755,7 +1487,6 @@ infer_type(_Op, _Args, _Ds) ->
%% Note that that the following BIFs are handle elsewhere:
%%
%% band/2
-%% tuple_size/1
bif_type(abs, [_]) -> number;
bif_type(bit_size, [_]) -> t_integer();
@@ -766,9 +1497,12 @@ bif_type(floor, [_]) -> t_integer();
bif_type(is_map_key, [_,_]) -> t_boolean();
bif_type(length, [_]) -> t_integer();
bif_type(map_size, [_]) -> t_integer();
+bif_type(node, []) -> #t_atom{};
+bif_type(node, [_]) -> #t_atom{};
bif_type(round, [_]) -> t_integer();
bif_type(size, [_]) -> t_integer();
bif_type(trunc, [_]) -> t_integer();
+bif_type(tuple_size, [_]) -> t_integer();
bif_type('bnot', [_]) -> t_integer();
bif_type('bor', [_,_]) -> t_integer();
bif_type('bsl', [_,_]) -> t_integer();
@@ -803,26 +1537,35 @@ inferred_bif_type(is_number, [_]) -> number;
inferred_bif_type(is_tuple, [_]) -> #t_tuple{};
inferred_bif_type(abs, [_]) -> number;
inferred_bif_type(bit_size, [_]) -> {binary,1};
+inferred_bif_type('bnot', [_]) -> t_integer();
inferred_bif_type(byte_size, [_]) -> {binary,1};
inferred_bif_type(ceil, [_]) -> number;
inferred_bif_type(float, [_]) -> number;
inferred_bif_type(floor, [_]) -> number;
+inferred_bif_type(hd, [_]) -> cons;
+inferred_bif_type(length, [_]) -> list;
+inferred_bif_type(map_size, [_]) -> map;
+inferred_bif_type('not', [_]) -> t_boolean();
inferred_bif_type(round, [_]) -> number;
inferred_bif_type(trunc, [_]) -> number;
+inferred_bif_type(tl, [_]) -> cons;
inferred_bif_type(tuple_size, [_]) -> #t_tuple{};
+inferred_bif_type('and', [_,_]) -> t_boolean();
+inferred_bif_type('or', [_,_]) -> t_boolean();
+inferred_bif_type('xor', [_,_]) -> t_boolean();
+inferred_bif_type('band', [_,_]) -> t_integer();
+inferred_bif_type('bor', [_,_]) -> t_integer();
+inferred_bif_type('bsl', [_,_]) -> t_integer();
+inferred_bif_type('bsr', [_,_]) -> t_integer();
+inferred_bif_type('bxor', [_,_]) -> t_integer();
+inferred_bif_type('div', [_,_]) -> t_integer();
+inferred_bif_type('rem', [_,_]) -> t_integer();
+inferred_bif_type('+', [_,_]) -> number;
+inferred_bif_type('-', [_,_]) -> number;
+inferred_bif_type('*', [_,_]) -> number;
+inferred_bif_type('/', [_,_]) -> number;
inferred_bif_type(_, _) -> any.
-infer_tuple_size(#b_set{op={bif,tuple_size},args=[#b_var{name=Tuple}]},
- #b_literal{val=Size}) when is_integer(Size) ->
- [{Tuple,#t_tuple{exact=true,size=Size}}];
-infer_tuple_size(_, _) -> [].
-
-infer_first_element(#b_set{op=get_tuple_element,
- args=[#b_var{name=Tuple},#b_literal{val=0}]},
- #b_literal{val=First}) ->
- [{Tuple,#t_tuple{size=1,elements=[First]}}];
-infer_first_element(_, _) -> [].
-
is_math_bif(cos, 1) -> true;
is_math_bif(cosh, 1) -> true;
is_math_bif(sin, 1) -> true;
@@ -918,6 +1661,22 @@ t_tuple_size(#t_tuple{size=Size,exact=true}) ->
t_tuple_size(_) ->
none.
+is_singleton_type(Type) ->
+ get_literal_from_type(Type) =/= none.
+
+get_element_type(Index, Es) ->
+ case Es of
+ #{ Index := T } -> T;
+ #{} -> any
+ end.
+
+set_element_type(_Key, none, Es) ->
+ Es;
+set_element_type(Key, any, Es) ->
+ maps:remove(Key, Es);
+set_element_type(Key, Type, Es) ->
+ Es#{ Key => Type }.
+
%% join(Type1, Type2) -> Type
%% Return the "join" of Type1 and Type2. The join is a more general
%% type than Type1 and Type2. For example:
@@ -965,15 +1724,41 @@ join(#t_integer{}, number) -> number;
join(number, #t_integer{}) -> number;
join(float, number) -> number;
join(number, float) -> number;
-join(#t_tuple{size=Sz,exact=Exact1}, #t_tuple{size=Sz,exact=Exact2}) ->
- Exact = Exact1 and Exact2,
- #t_tuple{size=Sz,exact=Exact};
-join(#t_tuple{size=Sz1}, #t_tuple{size=Sz2}) ->
- #t_tuple{size=min(Sz1, Sz2)};
+join(#t_tuple{size=Sz,exact=ExactA,elements=EsA},
+ #t_tuple{size=Sz,exact=ExactB,elements=EsB}) ->
+ Exact = ExactA and ExactB,
+ Es = join_tuple_elements(Sz, EsA, EsB),
+ #t_tuple{size=Sz,exact=Exact,elements=Es};
+join(#t_tuple{size=SzA,elements=EsA}, #t_tuple{size=SzB,elements=EsB}) ->
+ Sz = min(SzA, SzB),
+ Es = join_tuple_elements(Sz, EsA, EsB),
+ #t_tuple{size=Sz,elements=Es};
join(_T1, _T2) ->
%%io:format("~p ~p\n", [_T1,_T2]),
any.
+join_tuple_elements(MinSize, EsA, EsB) ->
+ Es0 = join_elements(EsA, EsB),
+ maps:filter(fun(Index, _Type) -> Index =< MinSize end, Es0).
+
+join_elements(Es1, Es2) ->
+ Keys = if
+ map_size(Es1) =< map_size(Es2) -> maps:keys(Es1);
+ map_size(Es1) > map_size(Es2) -> maps:keys(Es2)
+ end,
+ join_elements_1(Keys, Es1, Es2, #{}).
+
+join_elements_1([Key | Keys], Es1, Es2, Acc0) ->
+ case {Es1, Es2} of
+ {#{ Key := Type1 }, #{ Key := Type2 }} ->
+ Acc = set_element_type(Key, join(Type1, Type2), Acc0),
+ join_elements_1(Keys, Es1, Es2, Acc);
+ {#{}, #{}} ->
+ join_elements_1(Keys, Es1, Es2, Acc0)
+ end;
+join_elements_1([], _Es1, _Es2, Acc) ->
+ Acc.
+
gcd(A, B) ->
case A rem B of
0 -> B;
@@ -982,14 +1767,40 @@ gcd(A, B) ->
meet_types([{V,T0}|Vs], Ts) ->
#{V:=T1} = Ts,
- T = meet(T0, T1),
- meet_types(Vs, Ts#{V:=T});
+ case meet(T0, T1) of
+ T1 -> meet_types(Vs, Ts);
+ T -> meet_types(Vs, Ts#{V:=T})
+ end;
meet_types([], Ts) -> Ts.
meet([T1,T2|Ts]) ->
meet([meet(T1, T2)|Ts]);
meet([T]) -> T.
+subtract_types([{V,T0}|Vs], Ts) ->
+ #{V:=T1} = Ts,
+ case subtract(T1, T0) of
+ T1 -> subtract_types(Vs, Ts);
+ T -> subtract_types(Vs, Ts#{V:=T})
+ end;
+subtract_types([], Ts) -> Ts.
+
+%% subtract(Type1, Type2) -> Type.
+%% Subtract Type2 from Type1. Example:
+%%
+%% subtract(list, cons) -> nil
+
+subtract(#t_atom{elements=[_|_]=Set0}, #t_atom{elements=[_|_]=Set1}) ->
+ case ordsets:subtract(Set0, Set1) of
+ [] -> none;
+ [_|_]=Set -> #t_atom{elements=Set}
+ end;
+subtract(number, float) -> #t_integer{};
+subtract(number, #t_integer{elements=any}) -> float;
+subtract(list, cons) -> nil;
+subtract(list, nil) -> cons;
+subtract(T, _) -> T.
+
%% meet(Type1, Type2) -> Type
%% Return the "meet" of Type1 and Type2. The meet is a narrower
%% type than Type1 and Type2. For example:
@@ -1025,13 +1836,9 @@ meet(#t_integer{elements={Min1,Max1}},
#t_integer{elements={Min2,Max2}}) ->
#t_integer{elements={max(Min1, Min2),min(Max1, Max2)}};
meet(#t_integer{}=T, number) -> T;
-meet(float, number) -> float;
-meet(#t_integer{}=T, number) -> T;
-meet(float, number) -> float;
+meet(float=T, number) -> T;
meet(number, #t_integer{}=T) -> T;
-meet(#t_integer{}=T, number) -> T;
meet(number, float=T) -> T;
-meet(float=T, number) -> T;
meet(list, cons) -> cons;
meet(list, nil) -> nil;
meet(cons, list) -> cons;
@@ -1048,9 +1855,6 @@ meet(_, _) ->
%% Inconsistent types. There will be an exception at runtime.
none.
-meet_tuples(#t_tuple{elements=[E1]}, #t_tuple{elements=[E2]})
- when E1 =/= E2 ->
- none;
meet_tuples(#t_tuple{size=Sz1,exact=true},
#t_tuple{size=Sz2,exact=true}) when Sz1 =/= Sz2 ->
none;
@@ -1058,12 +1862,31 @@ meet_tuples(#t_tuple{size=Sz1,exact=Ex1,elements=Es1},
#t_tuple{size=Sz2,exact=Ex2,elements=Es2}) ->
Size = max(Sz1, Sz2),
Exact = Ex1 or Ex2,
- Es = case {Es1,Es2} of
- {[],[_|_]} -> Es2;
- {[_|_],[]} -> Es1;
- {_,_} -> Es1
- end,
- #t_tuple{size=Size,exact=Exact,elements=Es}.
+ case meet_elements(Es1, Es2) of
+ none ->
+ none;
+ Es ->
+ #t_tuple{size=Size,exact=Exact,elements=Es}
+ end.
+
+meet_elements(Es1, Es2) ->
+ Keys = maps:keys(Es1) ++ maps:keys(Es2),
+ meet_elements_1(Keys, Es1, Es2, #{}).
+
+meet_elements_1([Key | Keys], Es1, Es2, Acc) ->
+ case {Es1, Es2} of
+ {#{ Key := Type1 }, #{ Key := Type2 }} ->
+ case meet(Type1, Type2) of
+ none -> none;
+ Type -> meet_elements_1(Keys, Es1, Es2, Acc#{ Key => Type })
+ end;
+ {#{ Key := Type1 }, _} ->
+ meet_elements_1(Keys, Es1, Es2, Acc#{ Key => Type1 });
+ {_, #{ Key := Type2 }} ->
+ meet_elements_1(Keys, Es1, Es2, Acc#{ Key => Type2 })
+ end;
+meet_elements_1([], _Es1, _Es2, Acc) ->
+ Acc.
%% verified_type(Type) -> Type
%% Returns the passed in type if it is one of the defined types.
@@ -1102,5 +1925,13 @@ verified_type(map=T) -> T;
verified_type(nil=T) -> T;
verified_type(cons=T) -> T;
verified_type(number=T) -> T;
-verified_type(#t_tuple{}=T) -> T;
+verified_type(#t_tuple{size=Size,elements=Es}=T) ->
+ %% All known elements must have a valid index and type. 'any' is prohibited
+ %% since it's implicit and should never be present in the map.
+ maps:fold(fun(Index, Element, _) when is_integer(Index),
+ 1 =< Index, Index =< Size,
+ Element =/= any, Element =/= none ->
+ verified_type(Element)
+ end, [], Es),
+ T;
verified_type(float=T) -> T.
diff --git a/lib/compiler/src/beam_trim.erl b/lib/compiler/src/beam_trim.erl
index 1acbedd45b..acf3838da4 100644
--- a/lib/compiler/src/beam_trim.erl
+++ b/lib/compiler/src/beam_trim.erl
@@ -21,12 +21,11 @@
-module(beam_trim).
-export([module/2]).
--import(lists, [reverse/1,reverse/2,splitwith/2,sort/1]).
+-import(lists, [any/2,member/2,reverse/1,reverse/2,splitwith/2,sort/1]).
-record(st,
- {safe :: gb_sets:set(beam_asm:label()), %Safe labels.
- lbl :: beam_utils:code_index() %Code at each label.
- }).
+ {safe :: cerl_sets:set(beam_asm:label()) %Safe labels.
+ }).
-spec module(beam_utils:module_code(), [compile:option()]) ->
{'ok',beam_utils:module_code()}.
@@ -36,10 +35,15 @@ module({Mod,Exp,Attr,Fs0,Lc}, _Opts) ->
{ok,{Mod,Exp,Attr,Fs,Lc}}.
function({function,Name,Arity,CLabel,Is0}) ->
- %%ok = io:fwrite("~w: ~p\n", [?LINE,{Name,Arity}]),
- St = #st{safe=safe_labels(Is0, []),lbl=beam_utils:index_labels(Is0)},
- Is = trim(Is0, St, []),
- {function,Name,Arity,CLabel,Is}.
+ try
+ St = #st{safe=safe_labels(Is0, [])},
+ Is = trim(Is0, St, []),
+ {function,Name,Arity,CLabel,Is}
+ catch
+ Class:Error:Stack ->
+ io:fwrite("Function: ~w/~w\n", [Name,Arity]),
+ erlang:raise(Class, Error, Stack)
+ end.
trim([{kill,_}|_]=Is0, St, Acc) ->
{Kills0,Is1} = splitwith(fun({kill,_}) -> true;
@@ -47,14 +51,33 @@ trim([{kill,_}|_]=Is0, St, Acc) ->
end, Is0),
Kills = sort(Kills0),
try
- {FrameSize,Layout} = frame_layout(Is1, Kills, St),
- Configs = trim_instructions(Layout),
- try_remap(Configs, Is1, FrameSize)
- of
+ %% Find out the size and layout of the stack frame.
+ %% Example of a layout:
+ %%
+ %% [{kill,{y,0}},{dead,{y,1},{live,{y,2}},{kill,{y,3}}]
+ %%
+ %% That means that y0 and y3 are to be killed, that y1
+ %% has been killed previously, and that y2 is live.
+ {FrameSize,Layout} = frame_layout(Is1, Kills, St),
+
+ %% Calculate all recipes that are not worse in terms
+ %% of estimated execution time. The recipes are ordered
+ %% in descending order from how much they trim.
+ Recipes = trim_recipes(Layout),
+
+ %% Try the recipes in order. A recipe may not work out because
+ %% a register that was previously killed may be
+ %% resurrected. If that happens, the next recipe, which trims
+ %% less, will be tried.
+ try_remap(Recipes, Is1, FrameSize)
+ of
{Is,TrimInstr} ->
+ %% One of the recipes was applied.
trim(Is, St, reverse(TrimInstr)++Acc)
catch
not_possible ->
+ %% No recipe worked out. Use the original kill
+ %% instructions.
trim(Is1, St, reverse(Kills, Acc))
end;
trim([I|Is], St, Acc) ->
@@ -62,34 +85,42 @@ trim([I|Is], St, Acc) ->
trim([], _, Acc) ->
reverse(Acc).
-%% trim_instructions([{kill,R}|{live,R}|{dead,R}]) -> {[Instruction],MapFun}
-%% Figure out the sequence of moves and trim to use.
+%% trim_recipes([{kill,R}|{live,R}|{dead,R}]) -> [Recipe].
+%% Recipe = {Kills,NumberToTrim,Moves}
+%% Kills = [{kill,Y}]
+%% Moves = [{move,SrcY,DstY}]
+%%
+%% Calculate how to best trim the stack and kill the correct
+%% Y registers. Return a list of possible recipes. The best
+%% recipe (the one that trims the most) is first in the list.
+%% All of the recipes are no worse in estimated execution time
+%% than the original sequences of kill instructions.
-trim_instructions(Layout) ->
+trim_recipes(Layout) ->
Cost = length([I || {kill,_}=I <- Layout]),
- trim_instructions_1(Layout, 0, [], {Cost,[]}).
+ trim_recipes_1(Layout, 0, [], {Cost,[]}).
-trim_instructions_1([{kill,{y,Trim0}}|Ks], Trim0, Moves, Config0) ->
+trim_recipes_1([{kill,{y,Trim0}}|Ks], Trim0, Moves, Recipes0) ->
Trim = Trim0 + 1,
- Config = save_config(Ks, Trim, Moves, Config0),
- trim_instructions_1(Ks, Trim, Moves, Config);
-trim_instructions_1([{dead,{y,Trim0}}|Ks], Trim0, Moves, Config0) ->
+ Recipes = save_recipe(Ks, Trim, Moves, Recipes0),
+ trim_recipes_1(Ks, Trim, Moves, Recipes);
+trim_recipes_1([{dead,{y,Trim0}}|Ks], Trim0, Moves, Recipes0) ->
Trim = Trim0 + 1,
- Config = save_config(Ks, Trim, Moves, Config0),
- trim_instructions_1(Ks, Trim, Moves, Config);
-trim_instructions_1([{live,{y,Trim0}=Src}|Ks0], Trim0, Moves0, Config0) ->
+ Recipes = save_recipe(Ks, Trim, Moves, Recipes0),
+ trim_recipes_1(Ks, Trim, Moves, Recipes);
+trim_recipes_1([{live,{y,Trim0}=Src}|Ks0], Trim0, Moves0, Recipes0) ->
case take_last_dead(Ks0) of
none ->
- {_,ConfigList} = Config0,
- ConfigList;
+ {_,RecipesList} = Recipes0,
+ RecipesList;
{Dst,Ks} ->
Trim = Trim0 + 1,
Moves = [{move,Src,Dst}|Moves0],
- Config = save_config(Ks, Trim, Moves, Config0),
- trim_instructions_1(Ks, Trim, Moves, Config)
+ Recipes = save_recipe(Ks, Trim, Moves, Recipes0),
+ trim_recipes_1(Ks, Trim, Moves, Recipes)
end;
-trim_instructions_1([], _, _, {_,ConfigList}) ->
- ConfigList.
+trim_recipes_1([], _, _, {_,RecipesList}) ->
+ RecipesList.
take_last_dead(L) ->
take_last_dead_1(reverse(L)).
@@ -100,28 +131,48 @@ take_last_dead_1([{dead,Reg}|Is]) ->
{Reg,reverse(Is)};
take_last_dead_1(_) -> none.
-save_config(Ks, Trim, Moves, {MaxCost,Acc}=Config) ->
- case config_cost(Ks, Moves) of
- Cost when Cost =< MaxCost ->
- {MaxCost,[{Ks,Trim,Moves}|Acc]};
+save_recipe(Ks, Trim, Moves, {MaxCost,Acc}=Recipes) ->
+ case recipe_cost(Ks, Moves) of
+ Cost when Cost =< MaxCost ->
+ %% The price is right.
+ {MaxCost,[{Ks,Trim,Moves}|Acc]};
_Cost ->
- Config
+ %% Too expensive.
+ Recipes
end.
-config_cost(Ks, Moves) ->
+recipe_cost(Ks, Moves) ->
%% We estimate that a {move,{y,_},{y,_}} instruction is roughly twice as
%% expensive as a {kill,{y,_}} instruction. A {trim,_} instruction is
%% roughly as expensive as a {kill,{y,_}} instruction.
- config_cost_1(Ks, 1+2*length(Moves)).
+ recipe_cost_1(Ks, 1+2*length(Moves)).
-config_cost_1([{kill,_}|Ks], Cost) ->
- config_cost_1(Ks, Cost+1);
-config_cost_1([_|Ks], Cost) ->
- config_cost_1(Ks, Cost);
-config_cost_1([], Cost) -> Cost.
+recipe_cost_1([{kill,_}|Ks], Cost) ->
+ recipe_cost_1(Ks, Cost+1);
+recipe_cost_1([_|Ks], Cost) ->
+ recipe_cost_1(Ks, Cost);
+recipe_cost_1([], Cost) -> Cost.
-expand_config({Layout,Trim,Moves}, FrameSize) ->
+%% try_remap([Recipe], [Instruction], FrameSize) ->
+%% {[Instruction],[TrimInstruction]}.
+%% Try to renumber Y registers in the instruction stream. The
+%% first rececipe that works will be used.
+%%
+%% This function will issue a `not_possible` exception if none
+%% of the recipes were possible to apply.
+
+try_remap([R|Rs], Is, FrameSize) ->
+ {TrimInstr,Map} = expand_recipe(R, FrameSize),
+ try
+ {remap(Is, Map, []),TrimInstr}
+ catch
+ throw:not_possible ->
+ try_remap(Rs, Is, FrameSize)
+ end;
+try_remap([], _, _) -> throw(not_possible).
+
+expand_recipe({Layout,Trim,Moves}, FrameSize) ->
Kills = [Kill || {kill,_}=Kill <- Layout],
{Kills++reverse(Moves, [{trim,Trim,FrameSize-Trim}]),create_map(Trim, Moves)}.
@@ -132,16 +183,16 @@ create_map(Trim, []) ->
(Any) -> Any
end;
create_map(Trim, Moves) ->
- GbTree0 = [{Src,Dst-Trim} || {move,{y,Src},{y,Dst}} <- Moves],
- GbTree = gb_trees:from_orddict(sort(GbTree0)),
- IllegalTargets = gb_sets:from_list([Dst || {move,_,{y,Dst}} <- Moves]),
+ Map0 = [{Src,Dst-Trim} || {move,{y,Src},{y,Dst}} <- Moves],
+ Map = maps:from_list(Map0),
+ IllegalTargets = cerl_sets:from_list([Dst || {move,_,{y,Dst}} <- Moves]),
fun({y,Y0}) when Y0 < Trim ->
- case gb_trees:lookup(Y0, GbTree) of
- {value,Y} -> {y,Y};
- none -> throw(not_possible)
- end;
+ case Map of
+ #{Y0:=Y} -> {y,Y};
+ #{} -> throw(not_possible)
+ end;
({y,Y}) ->
- case gb_sets:is_element(Y, IllegalTargets) of
+ case cerl_sets:is_element(Y, IllegalTargets) of
true -> throw(not_possible);
false -> {y,Y-Trim}
end;
@@ -149,19 +200,17 @@ create_map(Trim, Moves) ->
(Any) -> Any
end.
-try_remap([C|Cs], Is, FrameSize) ->
- {TrimInstr,Map} = expand_config(C, FrameSize),
- try
- {remap(Is, Map, []),TrimInstr}
- catch
- throw:not_possible ->
- try_remap(Cs, Is, FrameSize)
- end;
-try_remap([], _, _) -> throw(not_possible).
-
+remap([{'%',_}=I|Is], Map, Acc) ->
+ remap(Is, Map, [I|Acc]);
remap([{block,Bl0}|Is], Map, Acc) ->
Bl = remap_block(Bl0, Map, []),
remap(Is, Map, [{block,Bl}|Acc]);
+remap([{bs_get_tail,Src,Dst,Live}|Is], Map, Acc) ->
+ I = {bs_get_tail,Map(Src),Map(Dst),Live},
+ remap(Is, Map, [I|Acc]);
+remap([{bs_set_position,Src1,Src2}|Is], Map, Acc) ->
+ I = {bs_set_position,Map(Src1),Map(Src2)},
+ remap(Is, Map, [I|Acc]);
remap([{call_fun,_}=I|Is], Map, Acc) ->
remap(Is, Map, [I|Acc]);
remap([{call,_,_}=I|Is], Map, Acc) ->
@@ -205,35 +254,68 @@ remap([return|_]=Is, _, Acc) ->
reverse(Acc, Is);
remap([{line,_}=I|Is], Map, Acc) ->
remap(Is, Map, [I|Acc]).
-
+
remap_block([{set,Ds0,Ss0,Info}|Is], Map, Acc) ->
Ds = [Map(D) || D <- Ds0],
Ss = [Map(S) || S <- Ss0],
remap_block(Is, Map, [{set,Ds,Ss,Info}|Acc]);
remap_block([], _, Acc) -> reverse(Acc).
-
-safe_labels([{label,L},{line,_},{badmatch,{Tag,_}}|Is], Acc) when Tag =/= y ->
- safe_labels(Is, [L|Acc]);
-safe_labels([{label,L},{line,_},{case_end,{Tag,_}}|Is], Acc) when Tag =/= y ->
- safe_labels(Is, [L|Acc]);
-safe_labels([{label,L},{line,_},if_end|Is], Acc) ->
- safe_labels(Is, [L|Acc]);
-safe_labels([{label,L},
- {block,[{set,[{x,0}],[{Tag,_}],move}]},
- {line,_},
- {call_ext,1,{extfunc,erlang,error,1}}|Is], Acc) when Tag =/= y ->
- safe_labels(Is, [L|Acc]);
+
+%% safe_labels([Instruction], Accumulator) -> gb_set()
+%% Build a gb_set of safe labels. The code at a safe
+%% label does not depend on the values in a specific
+%% Y register, only that all Y registers are initialized
+%% so that it safe to scan the stack when an exception
+%% is generated.
+%%
+%% In other words, code at a safe label will continue
+%% to work if Y registers have been renumbered and
+%% the size of the stack frame has changed.
+
+safe_labels([{label,L}|Is], Acc) ->
+ case is_safe_label(Is) of
+ true -> safe_labels(Is, [L|Acc]);
+ false -> safe_labels(Is, Acc)
+ end;
safe_labels([_|Is], Acc) ->
safe_labels(Is, Acc);
-safe_labels([], Acc) -> gb_sets:from_list(Acc).
+safe_labels([], Acc) -> cerl_sets:from_list(Acc).
+
+is_safe_label([{'%',_}|Is]) ->
+ is_safe_label(Is);
+is_safe_label([{line,_}|Is]) ->
+ is_safe_label(Is);
+is_safe_label([{badmatch,{Tag,_}}|_]) ->
+ Tag =/= y;
+is_safe_label([{case_end,{Tag,_}}|_]) ->
+ Tag =/= y;
+is_safe_label([{try_case_end,{Tag,_}}|_]) ->
+ Tag =/= y;
+is_safe_label([if_end|_]) ->
+ true;
+is_safe_label([{block,Bl}|Is]) ->
+ is_safe_label_block(Bl) andalso is_safe_label(Is);
+is_safe_label([{call_ext,_,{extfunc,M,F,A}}|_]) ->
+ erl_bifs:is_exit_bif(M, F, A);
+is_safe_label(_) -> false.
+
+is_safe_label_block([{set,Ds,Ss,_}|Is]) ->
+ IsYreg = fun({y,_}) -> true;
+ (_) -> false
+ end,
+ %% This instruction is safe if the instruction
+ %% neither reads or writes Y registers.
+ not (any(IsYreg, Ss) orelse any(IsYreg, Ds)) andalso
+ is_safe_label_block(Is);
+is_safe_label_block([]) -> true.
%% frame_layout([Instruction], [{kill,_}], St) ->
%% [{kill,Reg} | {live,Reg} | {dead,Reg}]
%% Figure out the layout of the stack frame.
-frame_layout(Is, Kills, #st{safe=Safe,lbl=D}) ->
+frame_layout(Is, Kills, #st{safe=Safe}) ->
N = frame_size(Is, Safe),
- IsKilled = fun(R) -> beam_utils:is_not_used(R, Is, D) end,
+ IsKilled = fun(R) -> is_not_used(R, Is) end,
{N,frame_layout_1(Kills, 0, N, IsKilled, [])}.
frame_layout_1([{kill,{y,Y}}=I|Ks], Y, N, IsKilled, Acc) ->
@@ -253,7 +335,14 @@ frame_layout_2(Is) -> reverse(Is).
%% frame_size([Instruction], SafeLabels) -> FrameSize
%% Find out the frame size by looking at the code that follows.
+%%
+%% Implicitly, also check that the instructions are a straight
+%% sequence of code that ends in a return. Any branches are
+%% to safe labels (i.e., the code at those labels don't depend
+%% on the contents of any Y register).
+frame_size([{'%',_}|Is], Safe) ->
+ frame_size(Is, Safe);
frame_size([{block,_}|Is], Safe) ->
frame_size(Is, Safe);
frame_size([{call_fun,_}|Is], Safe) ->
@@ -285,15 +374,94 @@ frame_size([{make_fun2,_,_,_,_}|Is], Safe) ->
frame_size(Is, Safe);
frame_size([{get_map_elements,{f,L},_,_}|Is], Safe) ->
frame_size_branch(L, Is, Safe);
-frame_size([{deallocate,N}|_], _) -> N;
+frame_size([{deallocate,N}|_], _) ->
+ N;
frame_size([{line,_}|Is], Safe) ->
frame_size(Is, Safe);
+frame_size([{bs_set_position,_,_}|Is], Safe) ->
+ frame_size(Is, Safe);
+frame_size([{bs_get_tail,_,_,_}|Is], Safe) ->
+ frame_size(Is, Safe);
frame_size(_, _) -> throw(not_possible).
frame_size_branch(0, Is, Safe) ->
frame_size(Is, Safe);
frame_size_branch(L, Is, Safe) ->
- case gb_sets:is_member(L, Safe) of
+ case cerl_sets:is_element(L, Safe) of
false -> throw(not_possible);
true -> frame_size(Is, Safe)
end.
+
+%% is_not_used(Y, [Instruction]) -> true|false.
+%% Test whether the value of Y is unused in the instruction sequence.
+%% Return true if the value of Y is not used, and false if it is used.
+%%
+%% This function handles the same instructions as frame_size/2. It
+%% assumes that any labels in the instructions are safe labels.
+
+is_not_used(Y, [{'%',_}|Is]) ->
+ is_not_used(Y, Is);
+is_not_used(Y, [{apply,_}|Is]) ->
+ is_not_used(Y, Is);
+is_not_used(Y, [{bif,_,{f,_},Ss,Dst}|Is]) ->
+ is_not_used_ss_dst(Y, Ss, Dst, Is);
+is_not_used(Y, [{block,Bl}|Is]) ->
+ case is_not_used_block(Y, Bl) of
+ used -> false;
+ killed -> true;
+ transparent -> is_not_used(Y, Is)
+ end;
+is_not_used(Y, [{bs_get_tail,Src,Dst,_}|Is]) ->
+ is_not_used_ss_dst(Y, [Src], Dst, Is);
+is_not_used(Y, [{bs_init,_,_,_,Ss,Dst}|Is]) ->
+ is_not_used_ss_dst(Y, Ss, Dst, Is);
+is_not_used(Y, [{bs_put,{f,_},_,Ss}|Is]) ->
+ not member(Y, Ss) andalso is_not_used(Y, Is);
+is_not_used(Y, [{bs_set_position,Src1,Src2}|Is]) ->
+ Y =/= Src1 andalso Y =/= Src2 andalso
+ is_not_used(Y, Is);
+is_not_used(Y, [{call,_,_}|Is]) ->
+ is_not_used(Y, Is);
+is_not_used(Y, [{call_ext,_,_}=I|Is]) ->
+ beam_jump:is_exit_instruction(I) orelse is_not_used(Y, Is);
+is_not_used(Y, [{call_fun,_}|Is]) ->
+ is_not_used(Y, Is);
+is_not_used(_Y, [{deallocate,_}|_]) ->
+ true;
+is_not_used(Y, [{gc_bif,_,{f,_},_Live,Ss,Dst}|Is]) ->
+ is_not_used_ss_dst(Y, Ss, Dst, Is);
+is_not_used(Y, [{get_map_elements,{f,_},S,{list,List}}|Is]) ->
+ {Ss,Ds} = beam_utils:split_even(List),
+ case member(Y, [S|Ss]) of
+ true ->
+ false;
+ false ->
+ member(Y, Ds) orelse is_not_used(Y, Is)
+ end;
+is_not_used(Y, [{kill,Yreg}|Is]) ->
+ Y =:= Yreg orelse is_not_used(Y, Is);
+is_not_used(Y, [{line,_}|Is]) ->
+ is_not_used(Y, Is);
+is_not_used(Y, [{make_fun2,_,_,_,_}|Is]) ->
+ is_not_used(Y, Is);
+is_not_used(Y, [{test,_,_,Ss}|Is]) ->
+ not member(Y, Ss) andalso is_not_used(Y, Is);
+is_not_used(Y, [{test,_Op,{f,_},_Live,Ss,Dst}|Is]) ->
+ is_not_used_ss_dst(Y, Ss, Dst, Is).
+
+is_not_used_block(Y, [{set,Ds,Ss,_}|Is]) ->
+ case member(Y, Ss) of
+ true ->
+ used;
+ false ->
+ case member(Y, Ds) of
+ true ->
+ killed;
+ false ->
+ is_not_used_block(Y, Is)
+ end
+ end;
+is_not_used_block(_Y, []) -> transparent.
+
+is_not_used_ss_dst(Y, Ss, Dst, Is) ->
+ not member(Y, Ss) andalso (Y =:= Dst orelse is_not_used(Y, Is)).
diff --git a/lib/compiler/src/beam_utils.erl b/lib/compiler/src/beam_utils.erl
index 686d314c2d..6e6574c0b3 100644
--- a/lib/compiler/src/beam_utils.erl
+++ b/lib/compiler/src/beam_utils.erl
@@ -18,27 +18,16 @@
%% %CopyrightEnd%
%%
%% Purpose : Common utilities used by several optimization passes.
-%%
+%%
-module(beam_utils).
--export([is_killed/3,is_killed_at/3,is_not_used/3,
- empty_label_index/0,index_label/3,index_labels/1,replace_labels/4,
- code_at/2,bif_to_test/3,is_pure_test/1,
- combine_heap_needs/2,
- split_even/1
- ]).
+-export([replace_labels/4,is_pure_test/1,split_even/1]).
-export_type([code_index/0,module_code/0,instruction/0]).
--import(lists, [flatmap/2,map/2,member/2,sort/1,reverse/1]).
-
--define(is_const(Val), (Val =:= nil orelse
- element(1, Val) =:= integer orelse
- element(1, Val) =:= float orelse
- element(1, Val) =:= atom orelse
- element(1, Val) =:= literal)).
+-import(lists, [map/2,reverse/1]).
-%% instruction() describes all instructions that are used during optimzation
+%% instruction() describes all instructions that are used during optimization
%% (from beam_a to beam_z).
-type instruction() :: atom() | tuple().
@@ -54,97 +43,6 @@
-type fail() :: beam_asm:fail() | 'fail'.
-type test() :: {'test',atom(),fail(),[beam_asm:src()]} |
{'test',atom(),fail(),integer(),list(),beam_asm:reg()}.
--type result_cache() :: gb_trees:tree(beam_asm:label(), 'killed' | 'used').
-
--record(live,
- {lbl :: code_index(), %Label to code index.
- res :: result_cache()}). %Result cache for each label.
-
-%% is_killed(Register, [Instruction], State) -> true|false
-%% Determine whether a register is killed by the instruction sequence.
-%% If true is returned, it means that the register will not be
-%% referenced in ANY way (not even indirectly by an allocate instruction);
-%% i.e. it is OK to enter the instruction sequence with Register
-%% containing garbage.
-%%
-%% The state (constructed by index_instructions/1) is used to allow us
-%% to determine the kill state across branches.
-
--spec is_killed(beam_asm:reg(), [instruction()], code_index()) -> boolean().
-
-is_killed(R, Is, D) ->
- St = #live{lbl=D,res=gb_trees:empty()},
- case check_liveness(R, Is, St) of
- {killed,_} -> true;
- {exit_not_used,_} -> false;
- {_,_} -> false
- end.
-
-%% is_killed_at(Reg, Lbl, State) -> true|false
-%% Determine whether Reg is killed at label Lbl.
-
--spec is_killed_at(beam_asm:reg(), beam_asm:label(), code_index()) -> boolean().
-
-is_killed_at(R, Lbl, D) when is_integer(Lbl) ->
- St0 = #live{lbl=D,res=gb_trees:empty()},
- case check_liveness_at(R, Lbl, St0) of
- {killed,_} -> true;
- {exit_not_used,_} -> false;
- {_,_} -> false
- end.
-
-%% is_not_used(Register, [Instruction], State) -> true|false
-%% Determine whether a register is never used in the instruction sequence
-%% (it could still be referenced by an allocate instruction, meaning that
-%% it MUST be initialized, but that its value does not matter).
-%% The state is used to allow us to determine the usage state
-%% across branches.
-
--spec is_not_used(beam_asm:reg(), [instruction()], code_index()) -> boolean().
-
-is_not_used(R, Is, D) ->
- St = #live{lbl=D,res=gb_trees:empty()},
- case check_liveness(R, Is, St) of
- {used,_} -> false;
- {exit_not_used,_} -> true;
- {_,_} -> true
- end.
-
-%% index_labels(FunctionIs) -> State
-%% Index the instruction sequence so that we can quickly
-%% look up the instruction following a specific label.
-
--spec index_labels([instruction()]) -> code_index().
-
-index_labels(Is) ->
- index_labels_1(Is, []).
-
-%% empty_label_index() -> State
-%% Create an empty label index.
-
--spec empty_label_index() -> code_index().
-
-empty_label_index() ->
- gb_trees:empty().
-
-%% index_label(Label, [Instruction], State) -> State
-%% Add an index for a label.
-
--spec index_label(beam_asm:label(), [instruction()], code_index()) ->
- code_index().
-
-index_label(Lbl, Is0, Acc) ->
- Is = drop_labels(Is0),
- gb_trees:enter(Lbl, Is, Acc).
-
-
-%% code_at(Label, State) -> [I].
-%% Retrieve the code at the given label.
-
--spec code_at(beam_asm:label(), code_index()) -> [instruction()].
-
-code_at(L, Ll) ->
- gb_trees:get(L, Ll).
%% replace_labels(FunctionIs, Tail, ReplaceDb, Fallback) -> FunctionIs.
%% Replace all labels in instructions according to the ReplaceDb.
@@ -158,44 +56,6 @@ code_at(L, Ll) ->
replace_labels(Is, Acc, D, Fb) ->
replace_labels_1(Is, Acc, D, Fb).
-%% bif_to_test(Bif, [Op], Fail) -> {test,Test,Fail,[Op]}
-%% Convert a BIF to a test. Fail if not possible.
-
--spec bif_to_test(atom(), list(), fail()) -> test().
-
-bif_to_test(is_atom, [_]=Ops, Fail) -> {test,is_atom,Fail,Ops};
-bif_to_test(is_boolean, [_]=Ops, Fail) -> {test,is_boolean,Fail,Ops};
-bif_to_test(is_binary, [_]=Ops, Fail) -> {test,is_binary,Fail,Ops};
-bif_to_test(is_bitstring,[_]=Ops, Fail) -> {test,is_bitstr,Fail,Ops};
-bif_to_test(is_float, [_]=Ops, Fail) -> {test,is_float,Fail,Ops};
-bif_to_test(is_function, [_]=Ops, Fail) -> {test,is_function,Fail,Ops};
-bif_to_test(is_function, [_,_]=Ops, Fail) -> {test,is_function2,Fail,Ops};
-bif_to_test(is_integer, [_]=Ops, Fail) -> {test,is_integer,Fail,Ops};
-bif_to_test(is_list, [_]=Ops, Fail) -> {test,is_list,Fail,Ops};
-bif_to_test(is_map, [_]=Ops, Fail) -> {test,is_map,Fail,Ops};
-bif_to_test(is_number, [_]=Ops, Fail) -> {test,is_number,Fail,Ops};
-bif_to_test(is_pid, [_]=Ops, Fail) -> {test,is_pid,Fail,Ops};
-bif_to_test(is_port, [_]=Ops, Fail) -> {test,is_port,Fail,Ops};
-bif_to_test(is_reference, [_]=Ops, Fail) -> {test,is_reference,Fail,Ops};
-bif_to_test(is_tuple, [_]=Ops, Fail) -> {test,is_tuple,Fail,Ops};
-bif_to_test('=<', [A,B], Fail) -> {test,is_ge,Fail,[B,A]};
-bif_to_test('>', [A,B], Fail) -> {test,is_lt,Fail,[B,A]};
-bif_to_test('<', [_,_]=Ops, Fail) -> {test,is_lt,Fail,Ops};
-bif_to_test('>=', [_,_]=Ops, Fail) -> {test,is_ge,Fail,Ops};
-bif_to_test('==', [C,A], Fail) when ?is_const(C) ->
- {test,is_eq,Fail,[A,C]};
-bif_to_test('==', [_,_]=Ops, Fail) -> {test,is_eq,Fail,Ops};
-bif_to_test('/=', [C,A], Fail) when ?is_const(C) ->
- {test,is_ne,Fail,[A,C]};
-bif_to_test('/=', [_,_]=Ops, Fail) -> {test,is_ne,Fail,Ops};
-bif_to_test('=:=', [C,A], Fail) when ?is_const(C) ->
- {test,is_eq_exact,Fail,[A,C]};
-bif_to_test('=:=', [_,_]=Ops, Fail) -> {test,is_eq_exact,Fail,Ops};
-bif_to_test('=/=', [C,A], Fail) when ?is_const(C) ->
- {test,is_ne_exact,Fail,[A,C]};
-bif_to_test('=/=', [_,_]=Ops, Fail) -> {test,is_ne_exact,Fail,Ops}.
-
-
%% is_pure_test({test,Op,Fail,Ops}) -> true|false.
%% Return 'true' if the test instruction does not modify any
%% registers and/or bit syntax matching state.
@@ -215,22 +75,9 @@ is_pure_test({test,test_arity,_,[_,_]}) -> true;
is_pure_test({test,has_map_fields,_,[_|_]}) -> true;
is_pure_test({test,is_bitstr,_,[_]}) -> true;
is_pure_test({test,is_function2,_,[_,_]}) -> true;
-is_pure_test({test,Op,_,Ops}) ->
+is_pure_test({test,Op,_,Ops}) ->
erl_internal:new_type_test(Op, length(Ops)).
-%% combine_heap_needs(HeapNeed1, HeapNeed2) -> HeapNeed
-%% Combine the heap need for two allocation instructions.
-
--type heap_need_tag() :: 'floats' | 'words'.
--type heap_need() :: non_neg_integer() |
- {'alloc',[{heap_need_tag(),non_neg_integer()}]}.
--spec combine_heap_needs(heap_need(), heap_need()) -> heap_need().
-
-combine_heap_needs(H1, H2) when is_integer(H1), is_integer(H2) ->
- H1 + H2;
-combine_heap_needs(H1, H2) ->
- {alloc,combine_alloc_lists([H1,H2])}.
-
%% split_even/1
%% [1,2,3,4,5,6] -> {[1,3,5],[2,4,6]}
@@ -242,443 +89,6 @@ split_even(Rs) -> split_even(Rs, [], []).
%%% Local functions.
%%%
-
-%% check_liveness(Reg, [Instruction], #live{}) ->
-%% {killed | not_used | used, #live{}}
-%% Find out whether Reg is used or killed in instruction sequence.
-%%
-%% killed - Reg is assigned or killed by an allocation instruction.
-%% not_used - the value of Reg is not used, but Reg must not be garbage
-%% exit_not_used - the value of Reg is not used, but must not be garbage
-%% because the stack will be scanned because an
-%% exit BIF will raise an exception
-%% used - Reg is used
-
-check_liveness({fr,_}, _, St) ->
- %% Conservatively always consider the floating point register used.
- {used,St};
-check_liveness(R, [{block,Blk}|Is], St0) ->
- case check_liveness_block(R, Blk, St0) of
- {transparent,St1} ->
- check_liveness(R, Is, St1);
- {alloc_used,St1} ->
- %% Used by an allocating instruction, but value not referenced.
- %% Must check the rest of the instructions.
- not_used(check_liveness(R, Is, St1));
- {Other,_}=Res when is_atom(Other) ->
- Res
- end;
-check_liveness(R, [{label,_}|Is], St) ->
- check_liveness(R, Is, St);
-check_liveness(R, [{test,_,{f,Fail},As}|Is], St0) ->
- case member(R, As) of
- true ->
- {used,St0};
- false ->
- case check_liveness_at(R, Fail, St0) of
- {killed,St1} ->
- check_liveness(R, Is, St1);
- {exit_not_used,St1} ->
- not_used(check_liveness(R, Is, St1));
- {not_used,St1} ->
- not_used(check_liveness(R, Is, St1));
- {used,_}=Used ->
- Used
- end
- end;
-check_liveness(R, [{test,Op,Fail,Live,Ss,Dst}|Is], St) ->
- %% Check this instruction as a block to get a less conservative
- %% result if the caller is is_not_used/3.
- Block = [{set,[Dst],Ss,{alloc,Live,{bif,Op,Fail}}}],
- check_liveness(R, [{block,Block}|Is], St);
-check_liveness(R, [{select,_,R,_,_}|_], St) ->
- {used,St};
-check_liveness(R, [{select,_,_,Fail,Branches}|_], St) ->
- check_liveness_everywhere(R, [Fail|Branches], St);
-check_liveness(R, [{jump,{f,F}}|_], St) ->
- check_liveness_at(R, F, St);
-check_liveness(R, [{case_end,Used}|_], St) ->
- check_liveness_exit(R, Used, St);
-check_liveness(R, [{try_case_end,Used}|_], St) ->
- check_liveness_exit(R, Used, St);
-check_liveness(R, [{badmatch,Used}|_], St) ->
- check_liveness_exit(R, Used, St);
-check_liveness(R, [if_end|_], St) ->
- check_liveness_exit(R, ignore, St);
-check_liveness(R, [{func_info,_,_,Ar}|_], St) ->
- case R of
- {x,X} when X < Ar -> {used,St};
- _ -> {killed,St}
- end;
-check_liveness(R, [{kill,R}|_], St) ->
- {killed,St};
-check_liveness(R, [{kill,_}|Is], St) ->
- check_liveness(R, Is, St);
-check_liveness(R, [{bs_init,_,_,none,Ss,Dst}|Is], St) ->
- case member(R, Ss) of
- true ->
- {used,St};
- false ->
- if
- R =:= Dst -> {killed,St};
- true -> check_liveness(R, Is, St)
- end
- end;
-check_liveness(R, [{bs_init,_,_,Live,Ss,Dst}|Is], St) ->
- case R of
- {x,X} ->
- case member(R, Ss) of
- true ->
- {used,St};
- false ->
- if
- X < Live ->
- not_used(check_liveness(R, Is, St));
- true ->
- {killed,St}
- end
- end;
- {y,_} ->
- case member(R, Ss) of
- true -> {used,St};
- false ->
- %% If the exception is taken, the stack may
- %% be scanned. Therefore the register is not
- %% guaranteed to be killed.
- if
- R =:= Dst -> {not_used,St};
- true -> not_used(check_liveness(R, Is, St))
- end
- end
- end;
-check_liveness(R, [{deallocate,_}|Is], St) ->
- case R of
- {y,_} -> {killed,St};
- _ -> check_liveness(R, Is, St)
- end;
-check_liveness({x,_}=R, [return|_], St) ->
- case R of
- {x,0} -> {used,St};
- {x,_} -> {killed,St}
- end;
-check_liveness(R, [{call,Live,_}|Is], St) ->
- case R of
- {x,X} when X < Live -> {used,St};
- {x,_} -> {killed,St};
- {y,_} -> not_used(check_liveness(R, Is, St))
- end;
-check_liveness(R, [{call_ext,Live,_}=I|Is], St) ->
- case R of
- {x,X} when X < Live ->
- {used,St};
- {x,_} ->
- {killed,St};
- {y,_} ->
- case beam_jump:is_exit_instruction(I) of
- false ->
- not_used(check_liveness(R, Is, St));
- true ->
- %% We must make sure we don't check beyond this
- %% instruction or we will fall through into random
- %% unrelated code and get stuck in a loop.
- {exit_not_used,St}
- end
- end;
-check_liveness(R, [{call_fun,Live}|Is], St) ->
- case R of
- {x,X} when X =< Live -> {used,St};
- {x,_} -> {killed,St};
- {y,_} -> not_used(check_liveness(R, Is, St))
- end;
-check_liveness(R, [{apply,Args}|Is], St) ->
- case R of
- {x,X} when X < Args+2 -> {used,St};
- {x,_} -> {killed,St};
- {y,_} -> not_used(check_liveness(R, Is, St))
- end;
-check_liveness(R, [{bif,Op,Fail,Ss,D}|Is], St) ->
- Set = {set,[D],Ss,{bif,Op,Fail}},
- check_liveness(R, [{block,[Set]}|Is], St);
-check_liveness(R, [{gc_bif,Op,{f,Fail},Live,Ss,D}|Is], St) ->
- Set = {set,[D],Ss,{alloc,Live,{gc_bif,Op,Fail}}},
- check_liveness(R, [{block,[Set]}|Is], St);
-check_liveness(R, [{bs_put,{f,0},_,Ss}|Is], St) ->
- case member(R, Ss) of
- true -> {used,St};
- false -> check_liveness(R, Is, St)
- end;
-check_liveness(R, [{bs_restore2,S,_}|Is], St) ->
- case R of
- S -> {used,St};
- _ -> check_liveness(R, Is, St)
- end;
-check_liveness(R, [{bs_save2,S,_}|Is], St) ->
- case R of
- S -> {used,St};
- _ -> check_liveness(R, Is, St)
- end;
-check_liveness(R, [{move,S,D}|Is], St) ->
- case R of
- S -> {used,St};
- D -> {killed,St};
- _ -> check_liveness(R, Is, St)
- end;
-check_liveness(R, [{make_fun2,_,_,_,NumFree}|Is], St) ->
- case R of
- {x,X} when X < NumFree -> {used,St};
- {x,_} -> {killed,St};
- {y,_} -> not_used(check_liveness(R, Is, St))
- end;
-check_liveness(R, [{'catch'=Op,Y,Fail}|Is], St) ->
- Set = {set,[Y],[],{try_catch,Op,Fail}},
- check_liveness(R, [{block,[Set]}|Is], St);
-check_liveness(R, [{'try'=Op,Y,Fail}|Is], St) ->
- Set = {set,[Y],[],{try_catch,Op,Fail}},
- check_liveness(R, [{block,[Set]}|Is], St);
-check_liveness(R, [{try_end,Y}|Is], St) ->
- case R of
- Y ->
- {killed,St};
- {y,_} ->
- %% y registers will be used if an exception occurs and
- %% control transfers to the label given in the previous
- %% try/2 instruction.
- {used,St};
- _ ->
- check_liveness(R, Is, St)
- end;
-check_liveness(R, [{catch_end,Y}|Is], St) ->
- case R of
- Y -> {killed,St};
- _ -> check_liveness(R, Is, St)
- end;
-check_liveness(R, [{get_tuple_element,S,_,D}|Is], St) ->
- case R of
- S -> {used,St};
- D -> {killed,St};
- _ -> check_liveness(R, Is, St)
- end;
-check_liveness(R, [{bs_context_to_binary,S}|Is], St) ->
- case R of
- S -> {used,St};
- _ -> check_liveness(R, Is, St)
- end;
-check_liveness(R, [{loop_rec,{f,_},{x,0}}|_], St) ->
- case R of
- {x,_} ->
- {killed,St};
- _ ->
- %% y register. Rarely happens. Be very conversative and
- %% assume it's used.
- {used,St}
- end;
-check_liveness(R, [{loop_rec_end,{f,Fail}}|_], St) ->
- check_liveness_at(R, Fail, St);
-check_liveness(R, [{line,_}|Is], St) ->
- check_liveness(R, Is, St);
-check_liveness(R, [{get_map_elements,{f,Fail},S,{list,L}}|Is], St0) ->
- {Ss,Ds} = split_even(L),
- case member(R, [S|Ss]) of
- true ->
- {used,St0};
- false ->
- case check_liveness_at(R, Fail, St0) of
- {killed,St}=Killed ->
- case member(R, Ds) of
- true -> Killed;
- false -> check_liveness(R, Is, St)
- end;
- Other ->
- Other
- end
- end;
-check_liveness(R, [{put_map,F,Op,S,D,Live,{list,Puts}}|Is], St) ->
- Set = {set,[D],[S|Puts],{alloc,Live,{put_map,Op,F}}},
- check_liveness(R, [{block,[Set]}||Is], St);
-check_liveness(R, [{put_tuple,Ar,D}|Is], St) ->
- Set = {set,[D],[],{put_tuple,Ar}},
- check_liveness(R, [{block,[Set]}||Is], St);
-check_liveness(R, [{put_list,S1,S2,D}|Is], St) ->
- Set = {set,[D],[S1,S2],put_list},
- check_liveness(R, [{block,[Set]}||Is], St);
-check_liveness(R, [{test_heap,N,Live}|Is], St) ->
- I = {block,[{set,[],[],{alloc,Live,{nozero,nostack,N,[]}}}]},
- check_liveness(R, [I|Is], St);
-check_liveness(R, [{allocate_zero,N,Live}|Is], St) ->
- I = {block,[{set,[],[],{alloc,Live,{zero,N,0,[]}}}]},
- check_liveness(R, [I|Is], St);
-check_liveness(R, [{get_hd,S,D}|Is], St) ->
- I = {block,[{set,[D],[S],get_hd}]},
- check_liveness(R, [I|Is], St);
-check_liveness(R, [{get_tl,S,D}|Is], St) ->
- I = {block,[{set,[D],[S],get_tl}]},
- check_liveness(R, [I|Is], St);
-check_liveness(R, [remove_message|Is], St) ->
- check_liveness(R, Is, St);
-check_liveness({x,X}, [build_stacktrace|_], St) when X > 0 ->
- {killed,St};
-check_liveness(R, [{recv_mark,_}|Is], St) ->
- check_liveness(R, Is, St);
-check_liveness(R, [{recv_set,_}|Is], St) ->
- check_liveness(R, Is, St);
-check_liveness(R, [{'%',_}|Is], St) ->
- check_liveness(R, Is, St);
-check_liveness(_R, Is, St) when is_list(Is) ->
- %% Not implemented. Conservatively assume that the register is used.
- {used,St}.
-
-check_liveness_everywhere(R, Lbls, St0) ->
- check_liveness_everywhere_1(R, Lbls, killed, St0).
-
-check_liveness_everywhere_1(R, [{f,Lbl}|T], Res0, St0) ->
- {Res1,St} = check_liveness_at(R, Lbl, St0),
- Res = case Res1 of
- killed -> Res0;
- _ -> Res1
- end,
- case Res of
- used -> {used,St};
- _ -> check_liveness_everywhere_1(R, T, Res, St)
- end;
-check_liveness_everywhere_1(R, [_|T], Res, St) ->
- check_liveness_everywhere_1(R, T, Res, St);
-check_liveness_everywhere_1(_, [], Res, St) ->
- {Res,St}.
-
-check_liveness_at(R, Lbl, #live{lbl=Ll,res=ResMemorized}=St0) ->
- case gb_trees:lookup(Lbl, ResMemorized) of
- {value,Res} ->
- {Res,St0};
- none ->
- {Res,St} = case gb_trees:lookup(Lbl, Ll) of
- {value,Is} -> check_liveness(R, Is, St0);
- none -> {used,St0}
- end,
- {Res,St#live{res=gb_trees:insert(Lbl, Res, St#live.res)}}
- end.
-
-not_used({used,_}=Res) -> Res;
-not_used({_,St}) -> {not_used,St}.
-
-check_liveness_exit(R, R, St) -> {used,St};
-check_liveness_exit({x,_}, _, St) -> {killed,St};
-check_liveness_exit({y,_}, _, St) -> {exit_not_used,St}.
-
-%% check_liveness_block(Reg, [Instruction], State) ->
-%% {killed | not_used | used | alloc_used | transparent,State'}
-%% Finds out how Reg is used in the instruction sequence inside a block.
-%% Returns one of:
-%% killed - Reg is assigned a new value or killed by an
-%% allocation instruction
-%% not_used - The value is not used, but the register is referenced
-%% e.g. by an allocation instruction
-%% transparent - Reg is neither used nor killed
-%% alloc_used - Used only in an allocate instruction
-%% used - Reg is explicitly used by an instruction
-%%
-%% Annotations are not allowed.
-%%
-%% (Unknown instructions will cause an exception.)
-
-check_liveness_block({x,X}=R, [{set,Ds,Ss,{alloc,Live,Op}}|Is], St0) ->
- if
- X >= Live ->
- {killed,St0};
- true ->
- case check_liveness_block_1(R, Ss, Ds, Op, Is, St0) of
- {transparent,St} -> {alloc_used,St};
- {_,_}=Res -> not_used(Res)
- end
- end;
-check_liveness_block({y,_}=R, [{set,Ds,Ss,{alloc,_Live,Op}}|Is], St0) ->
- case check_liveness_block_1(R, Ss, Ds, Op, Is, St0) of
- {transparent,St} -> {alloc_used,St};
- {_,_}=Res -> not_used(Res)
- end;
-check_liveness_block({y,_}=R, [{set,Ds,Ss,{try_catch,_,Op}}|Is], St0) ->
- case Ds of
- [R] ->
- {killed,St0};
- _ ->
- case check_liveness_block_1(R, Ss, Ds, Op, Is, St0) of
- {exit_not_used,St} ->
- {used,St};
- {transparent,St} ->
- %% Conservatively assumed that it is used.
- {used,St};
- {_,_}=Res ->
- Res
- end
- end;
-check_liveness_block(R, [{set,Ds,Ss,Op}|Is], St) ->
- check_liveness_block_1(R, Ss, Ds, Op, Is, St);
-check_liveness_block(_, [], St) -> {transparent,St}.
-
-check_liveness_block_1(R, Ss, Ds, Op, Is, St0) ->
- case member(R, Ss) of
- true ->
- {used,St0};
- false ->
- case check_liveness_block_2(R, Op, Ss, St0) of
- {killed,St} ->
- case member(R, Ds) of
- true -> {killed,St};
- false -> check_liveness_block(R, Is, St)
- end;
- {exit_not_used,St} ->
- case member(R, Ds) of
- true -> {exit_not_used,St};
- false -> check_liveness_block(R, Is, St)
- end;
- {not_used,St} ->
- not_used(case member(R, Ds) of
- true -> {killed,St};
- false -> check_liveness_block(R, Is, St)
- end);
- {used,St} ->
- {used,St}
- end
- end.
-
-check_liveness_block_2(R, {gc_bif,Op,{f,Lbl}}, Ss, St) ->
- check_liveness_block_3(R, Lbl, {Op,length(Ss)}, St);
-check_liveness_block_2(R, {bif,Op,{f,Lbl}}, Ss, St) ->
- Arity = length(Ss),
- case erl_internal:comp_op(Op, Arity) orelse
- erl_internal:new_type_test(Op, Arity) of
- true ->
- {killed,St};
- false ->
- check_liveness_block_3(R, Lbl, {Op,length(Ss)}, St)
- end;
-check_liveness_block_2(R, {put_map,_Op,{f,Lbl}}, _Ss, St) ->
- check_liveness_block_3(R, Lbl, {unsafe,0}, St);
-check_liveness_block_2(_, _, _, St) ->
- {killed,St}.
-
-check_liveness_block_3({x,_}, 0, _FA, St) ->
- {killed,St};
-check_liveness_block_3({y,_}, 0, {F,A}, St) ->
- %% If the exception is thrown, the stack may be scanned,
- %% thus implicitly using the y register.
- case erl_bifs:is_safe(erlang, F, A) of
- true -> {killed,St};
- false -> {used,St}
- end;
-check_liveness_block_3(R, Lbl, _FA, St0) ->
- check_liveness_at(R, Lbl, St0).
-
-index_labels_1([{label,Lbl}|Is0], Acc) ->
- Is = drop_labels(Is0),
- index_labels_1(Is0, [{Lbl,Is}|Acc]);
-index_labels_1([_|Is], Acc) ->
- index_labels_1(Is, Acc);
-index_labels_1([], Acc) -> gb_trees:from_orddict(sort(Acc)).
-
-drop_labels([{label,_}|Is]) -> drop_labels(Is);
-drop_labels(Is) -> Is.
-
-
replace_labels_1([{test,Test,{f,Lbl},Ops}|Is], Acc, D, Fb) ->
replace_labels_1(Is, [{test,Test,{f,label(Lbl, D, Fb)},Ops}|Acc], D, Fb);
replace_labels_1([{test,Test,{f,Lbl},Live,Ops,Dst}|Is], Acc, D, Fb) ->
@@ -734,21 +144,6 @@ label(Old, D, Fb) ->
_ -> Fb(Old)
end.
-%% Help function for combine_heap_needs.
-
-combine_alloc_lists(Al0) ->
- Al1 = flatmap(fun(Words) when is_integer(Words) ->
- [{words,Words}];
- ({alloc,List}) ->
- List
- end, Al0),
- Al2 = sofs:relation(Al1),
- Al3 = sofs:relation_to_family(Al2),
- Al4 = sofs:to_external(Al3),
- [{Tag,lists:sum(L)} || {Tag,L} <- Al4].
-
-%% live_opt/4.
-
split_even([], Ss, Ds) ->
{reverse(Ss),reverse(Ds)};
split_even([S,D|Rs], Ss, Ds) ->
diff --git a/lib/compiler/src/beam_validator.erl b/lib/compiler/src/beam_validator.erl
index b44771d8a9..ab8caa1a0d 100644
--- a/lib/compiler/src/beam_validator.erl
+++ b/lib/compiler/src/beam_validator.erl
@@ -26,8 +26,9 @@
%% Interface for compiler.
-export([module/2, format_error/1]).
+-export([type_anno/1, type_anno/2, type_anno/4]).
--import(lists, [any/2,dropwhile/2,foldl/3,map/2,foreach/2,reverse/1]).
+-import(lists, [dropwhile/2,foldl/3,member/2,reverse/1,sort/1,zip/2]).
%% To be called by the compiler.
@@ -44,6 +45,34 @@ module({Mod,Exp,Attr,Fs,Lc}=Code, _Opts)
{error,[{atom_to_list(Mod),Es}]}
end.
+%% Provides a stable interface for type annotations, used by certain passes to
+%% indicate that we can safely assume that a register has a given type.
+-spec type_anno(term()) -> term().
+type_anno(atom) -> {atom,[]};
+type_anno(bool) -> bool;
+type_anno({binary,_}) -> binary;
+type_anno(cons) -> cons;
+type_anno(float) -> {float,[]};
+type_anno(integer) -> {integer,[]};
+type_anno(list) -> list;
+type_anno(map) -> map;
+type_anno(match_context) -> match_context;
+type_anno(number) -> number;
+type_anno(nil) -> nil.
+
+-spec type_anno(term(), term()) -> term().
+type_anno(atom, Value) when is_atom(Value) -> {atom, Value};
+type_anno(float, Value) when is_float(Value) -> {float, Value};
+type_anno(integer, Value) when is_integer(Value) -> {integer, Value}.
+
+-spec type_anno(term(), term(), term(), term()) -> term().
+type_anno(tuple, Size, Exact, Elements) when is_integer(Size), Size >= 0,
+ is_map(Elements) ->
+ case Exact of
+ true -> {tuple, Size, Elements};
+ false -> {tuple, [Size], Elements}
+ end.
+
-spec format_error(term()) -> iolist().
format_error({{_M,F,A},{I,Off,limit}}) ->
@@ -90,34 +119,9 @@ format_error(Error) ->
%% format as used in the compiler and in .S files.
validate(Module, Fs) ->
- Ft = index_bs_start_match(Fs, []),
+ Ft = index_parameter_types(Fs, []),
validate_0(Module, Fs, Ft).
-index_bs_start_match([{function,_,_,Entry,Code0}|Fs], Acc0) ->
- Code = dropwhile(fun({label,L}) when L =:= Entry -> false;
- (_) -> true
- end, Code0),
- case Code of
- [{label,Entry}|Is] ->
- Acc = index_bs_start_match_1(Is, Entry, Acc0),
- index_bs_start_match(Fs, Acc);
- _ ->
- %% Something serious is wrong. Ignore it for now.
- %% It will be detected and diagnosed later.
- index_bs_start_match(Fs, Acc0)
- end;
-index_bs_start_match([], Acc) ->
- gb_trees:from_orddict(lists:sort(Acc)).
-
-index_bs_start_match_1([{test,bs_start_match2,_,_,_,_}=I|_], Entry, Acc) ->
- [{Entry,[I]}|Acc];
-index_bs_start_match_1([{test,_,{f,F},_},{bs_context_to_binary,_}|Is0], Entry, Acc) ->
- [{label,F}|Is] = dropwhile(fun({label,L}) when L =:= F -> false;
- (_) -> true
- end, Is0),
- index_bs_start_match_1(Is, Entry, Acc);
-index_bs_start_match_1(_, _, Acc) -> Acc.
-
validate_0(_Module, [], _) -> [];
validate_0(Module, [{function,Name,Ar,Entry,Code}|Fs], Ft) ->
try validate_1(Code, Name, Ar, Entry, Ft) of
@@ -132,43 +136,126 @@ validate_0(Module, [{function,Name,Ar,Entry,Code}|Fs], Ft) ->
erlang:raise(Class, Error, Stack)
end.
+-record(value_ref, {id :: index()}).
+-record(value, {op :: term(), args :: [argument()], type :: type()}).
+
+-type argument() :: #value_ref{} | literal().
+
-type index() :: non_neg_integer().
--type reg_tab() :: gb_trees:tree(index(), 'none' | {'value', _}).
-
--record(st, %Emulation state
- {x=init_regs(0, term) :: reg_tab(),%x register info.
- y=init_regs(0, initialized) :: reg_tab(),%y register info.
- f=init_fregs(), %
- numy=none, %Number of y registers.
- h=0, %Available heap size.
- hf=0, %Available heap size for floats.
- fls=undefined, %Floating point state.
- ct=[], %List of hot catch/try labels
- setelem=false, %Previous instruction was setelement/3.
- puts_left=none, %put/1 instructions left.
- defs=#{}, %Defining expression for each register.
- aliases=#{}
- }).
+
+-type literal() :: {atom, [] | atom()} |
+ {float, [] | float()} |
+ {integer, [] | integer()} |
+ {literal, term()} |
+ nil.
+
+-type tuple_sz() :: [non_neg_integer()] | %% Inexact
+ non_neg_integer(). %% Exact.
+
+%% Match context type.
+-record(ms,
+ {id=make_ref() :: reference(), %Unique ID.
+ valid=0 :: non_neg_integer(), %Valid slots
+ slots=0 :: non_neg_integer() %Number of slots
+ }).
+
+-type type() :: binary |
+ cons |
+ list |
+ map |
+ nil |
+ #ms{} |
+ ms_position |
+ none |
+ number |
+ term |
+ tuple_in_progress |
+ {tuple, tuple_sz(), #{ literal() => type() }} |
+ literal().
+
+-type tag() :: initialized |
+ uninitialized |
+ {catchtag, [label()]} |
+ {trytag, [label()]}.
+
+-type x_regs() :: #{ {x, index()} => #value_ref{} }.
+-type y_regs() :: #{ {y, index()} => tag() | #value_ref{} }.
+
+%% Emulation state
+-record(st,
+ {%% All known values.
+ vs=#{} :: #{ #value_ref{} => #value{} },
+ %% Register states.
+ xs=#{} :: x_regs(),
+ ys=#{} :: y_regs(),
+ f=init_fregs(),
+ %% A set of all registers containing "fragile" terms. That is, terms
+ %% that don't exist on our process heap and would be destroyed by a
+ %% GC.
+ fragile=cerl_sets:new() :: cerl_sets:set(),
+ %% Number of Y registers.
+ %%
+ %% Note that this may be 0 if there's a frame without saved values,
+ %% such as on a body-recursive call.
+ numy=none :: none | undecided | index(),
+ %% Available heap size.
+ h=0,
+ %Available heap size for floats.
+ hf=0,
+ %% Floating point state.
+ fls=undefined,
+ %% List of hot catch/try labels
+ ct=[],
+ %% Previous instruction was setelement/3.
+ setelem=false,
+ %% put/1 instructions left.
+ puts_left=none
+ }).
-type label() :: integer().
-type label_set() :: gb_sets:set(label()).
-type branched_tab() :: gb_trees:tree(label(), #st{}).
-type ft_tab() :: gb_trees:tree().
--record(vst, %Validator state
- {current=none :: #st{} | 'none', %Current state
- branched=gb_trees:empty() :: branched_tab(), %States at jumps
- labels=gb_sets:empty() :: label_set(), %All defined labels
- ft=gb_trees:empty() :: ft_tab() %Some other functions
- % in the module (those that start with bs_start_match2).
- }).
-
-%% Match context type.
--record(ms,
- {id=make_ref() :: reference(), %Unique ID.
- valid=0 :: non_neg_integer(), %Valid slots
- slots=0 :: non_neg_integer() %Number of slots
- }).
+%% Validator state
+-record(vst,
+ {%% Current state
+ current=none :: #st{} | 'none',
+ %% States at labels
+ branched=gb_trees:empty() :: branched_tab(),
+ %% All defined labels
+ labels=gb_sets:empty() :: label_set(),
+ %% Argument information of other functions in the module
+ ft=gb_trees:empty() :: ft_tab(),
+ %% Counter for #value_ref{} creation
+ ref_ctr=0 :: index()
+ }).
+
+index_parameter_types([{function,_,_,Entry,Code0}|Fs], Acc0) ->
+ Code = dropwhile(fun({label,L}) when L =:= Entry -> false;
+ (_) -> true
+ end, Code0),
+ case Code of
+ [{label,Entry}|Is] ->
+ Acc = index_parameter_types_1(Is, Entry, Acc0),
+ index_parameter_types(Fs, Acc);
+ _ ->
+ %% Something serious is wrong. Ignore it for now.
+ %% It will be detected and diagnosed later.
+ index_parameter_types(Fs, Acc0)
+ end;
+index_parameter_types([], Acc) ->
+ gb_trees:from_orddict(sort(Acc)).
+
+index_parameter_types_1([{'%', {type_info, Reg, Type0}} | Is], Entry, Acc) ->
+ Type = case Type0 of
+ match_context -> #ms{};
+ _ -> Type0
+ end,
+ Key = {Entry, Reg},
+ index_parameter_types_1(Is, Entry, [{Key, Type} | Acc]);
+index_parameter_types_1(_, _, Acc) ->
+ Acc.
validate_1(Is, Name, Arity, Entry, Ft) ->
validate_2(labels(Is), Name, Arity, Entry, Ft).
@@ -181,14 +268,10 @@ validate_2({Ls1,Is}, Name, Arity, _Entry, _Ft) ->
validate_3({Ls2,Is}, Name, Arity, Entry, Mod, Ls1, Ft) ->
Offset = 1 + length(Ls1) + 1 + length(Ls2),
- EntryOK = lists:member(Entry, Ls2),
+ EntryOK = member(Entry, Ls2),
if
EntryOK ->
- St = init_state(Arity),
- Vst0 = #vst{current=St,
- branched=gb_trees_from_list([{L,St} || L <- Ls1]),
- labels=gb_sets:from_list(Ls1++Ls2),
- ft=Ft},
+ Vst0 = init_vst(Arity, Ls1, Ls2, Ft),
MFA = {Mod,Name,Arity},
Vst = valfun(Is, MFA, Offset, Vst0),
validate_fun_info_branches(Ls1, MFA, Vst);
@@ -211,7 +294,7 @@ validate_fun_info_branches_1(X, {Mod,Name,Arity}=MFA, Vst) ->
#vst{current=#st{numy=Size}} ->
error({unexpected_stack_frame,Size})
end,
- get_term_type({x,X}, Vst)
+ assert_term({x,X}, Vst)
catch Error ->
I = {func_info,{atom,Mod},{atom,Name},Arity},
Offset = 2,
@@ -232,19 +315,22 @@ labels_1([{line,_}|Is], R) ->
labels_1(Is, R) ->
{reverse(R),Is}.
-init_state(Arity) ->
- Xs = init_regs(Arity, term),
- Ys = init_regs(0, initialized),
- kill_heap_allocation(#st{x=Xs,y=Ys,numy=none,ct=[]}).
+init_vst(Arity, Ls1, Ls2, Ft) ->
+ Vst0 = init_function_args(Arity - 1, #vst{current=#st{}}),
+ Branches = gb_trees_from_list([{L,Vst0#vst.current} || L <- Ls1]),
+ Labels = gb_sets:from_list(Ls1++Ls2),
+ Vst0#vst{branched=Branches,
+ labels=Labels,
+ ft=Ft}.
+
+init_function_args(-1, Vst) ->
+ Vst;
+init_function_args(X, Vst) ->
+ init_function_args(X - 1, create_term(term, argument, [], {x,X}, Vst)).
kill_heap_allocation(St) ->
St#st{h=0,hf=0}.
-init_regs(0, _) ->
- gb_trees:empty();
-init_regs(N, Type) ->
- gb_trees_from_list([{R,Type} || R <- lists:seq(0, N-1)]).
-
valfun([], MFA, _Offset, #vst{branched=Targets0,labels=Labels0}=Vst) ->
Targets = gb_trees:keys(Targets0),
Labels = gb_sets:to_list(Labels0),
@@ -265,20 +351,25 @@ valfun([I|Is], MFA, Offset, Vst0) ->
%% Instructions that are allowed in dead code or when failing,
%% that is while the state is undecided in some way.
-valfun_1({label,Lbl}, #vst{current=St0,branched=B,labels=Lbls}=Vst) ->
- St = merge_states(Lbl, St0, B),
- Vst#vst{current=St,branched=gb_trees:enter(Lbl, St, B),
- labels=gb_sets:add(Lbl, Lbls)};
+valfun_1({label,Lbl}, #vst{current=St0,
+ ref_ctr=Counter0,
+ branched=B,
+ labels=Lbls}=Vst) ->
+ {St, Counter} = merge_states(Lbl, St0, B, Counter0),
+ Vst#vst{current=St,
+ ref_ctr=Counter,
+ branched=gb_trees:enter(Lbl, St, B),
+ labels=gb_sets:add(Lbl, Lbls)};
valfun_1(_I, #vst{current=none}=Vst) ->
%% Ignore instructions after erlang:error/1,2, which
%% the original R10B compiler thought would return.
Vst;
valfun_1({badmatch,Src}, Vst) ->
- assert_term(Src, Vst),
+ assert_durable_term(Src, Vst),
verify_y_init(Vst),
kill_state(Vst);
valfun_1({case_end,Src}, Vst) ->
- assert_term(Src, Vst),
+ assert_durable_term(Src, Vst),
verify_y_init(Vst),
kill_state(Vst);
valfun_1(if_end, Vst) ->
@@ -286,36 +377,21 @@ valfun_1(if_end, Vst) ->
kill_state(Vst);
valfun_1({try_case_end,Src}, Vst) ->
verify_y_init(Vst),
- assert_term(Src, Vst),
+ assert_durable_term(Src, Vst),
kill_state(Vst);
%% Instructions that cannot cause exceptions
-valfun_1({bs_context_to_binary,Ctx}, #vst{current=#st{x=Xs}}=Vst) ->
- case Ctx of
- {Tag,X} when Tag =:= x; Tag =:= y ->
- Type = case gb_trees:lookup(X, Xs) of
- {value,#ms{}} -> term;
- _ -> get_term_type(Ctx, Vst)
- end,
- set_type_reg(Type, Ctx, Vst);
- _ ->
- error({bad_source,Ctx})
- end;
+valfun_1({bs_get_tail,Ctx,Dst,Live}, Vst0) ->
+ bsm_validate_context(Ctx, Vst0),
+ verify_live(Live, Vst0),
+ verify_y_init(Vst0),
+ Vst = prune_x_regs(Live, Vst0),
+ extract_term(binary, bs_get_tail, [Ctx], Dst, Vst, Vst0);
valfun_1(bs_init_writable=I, Vst) ->
call(I, 1, Vst);
valfun_1(build_stacktrace=I, Vst) ->
call(I, 1, Vst);
-valfun_1({move,{y,_}=Src,{y,_}=Dst}, Vst) ->
- %% The stack trimming optimization may generate a move from an initialized
- %% but unassigned Y register to another Y register.
- case get_term_type_1(Src, Vst) of
- {catchtag,_} -> error({catchtag,Src});
- {trytag,_} -> error({trytag,Src});
- Type -> set_type_reg(Type, Dst, Vst)
- end;
-valfun_1({move,Src,Dst}, Vst0) ->
- Type = get_move_term_type(Src, Vst0),
- Vst = set_type_reg(Type, Dst, Vst0),
- set_alias(Src, Dst, Vst);
+valfun_1({move,Src,Dst}, Vst) ->
+ assign(Src, Dst, Vst);
valfun_1({fmove,Src,{fr,_}=Dst}, Vst) ->
assert_type(float, Src, Vst),
set_freg(Dst, Vst);
@@ -323,15 +399,15 @@ valfun_1({fmove,{fr,_}=Src,Dst}, Vst0) ->
assert_freg_set(Src, Vst0),
assert_fls(checked, Vst0),
Vst = eat_heap_float(Vst0),
- set_type_reg({float,[]}, Dst, Vst);
-valfun_1({kill,{y,_}=Reg}, Vst) ->
- set_type_y(initialized, Reg, Vst);
-valfun_1({init,{y,_}=Reg}, Vst) ->
- set_type_y(initialized, Reg, Vst);
+ create_term({float,[]}, fmove, [], Dst, Vst);
+valfun_1({kill,Reg}, Vst) ->
+ create_tag(initialized, kill, [], Reg, Vst);
+valfun_1({init,Reg}, Vst) ->
+ create_tag(initialized, init, [], Reg, Vst);
valfun_1({test_heap,Heap,Live}, Vst) ->
test_heap(Heap, Live, Vst);
-valfun_1({bif,Op,{f,_},Src,Dst}=I, Vst) ->
- case is_bif_safe(Op, length(Src)) of
+valfun_1({bif,Op,{f,_},Ss,Dst}=I, Vst) ->
+ case is_bif_safe(Op, length(Ss)) of
false ->
%% Since the BIF can fail, make sure that any catch state
%% is updated.
@@ -339,21 +415,32 @@ valfun_1({bif,Op,{f,_},Src,Dst}=I, Vst) ->
true ->
%% It can't fail, so we finish handling it here (not updating
%% catch state).
- validate_src(Src, Vst),
- Type = bif_type(Op, Src, Vst),
- set_type_reg_expr(Type, I, Dst, Vst)
+ validate_src(Ss, Vst),
+ Type = bif_return_type(Op, Ss, Vst),
+ extract_term(Type, {bif,Op}, Ss, Dst, Vst)
end;
%% Put instructions.
valfun_1({put_list,A,B,Dst}, Vst0) ->
assert_term(A, Vst0),
assert_term(B, Vst0),
Vst = eat_heap(2, Vst0),
- set_type_reg(cons, Dst, Vst);
+ create_term(cons, put_list, [A, B], Dst, Vst);
+valfun_1({put_tuple2,Dst,{list,Elements}}, Vst0) ->
+ _ = [assert_term(El, Vst0) || El <- Elements],
+ Size = length(Elements),
+ Vst = eat_heap(Size+1, Vst0),
+ {Es,_} = foldl(fun(Val, {Es0, Index}) ->
+ Type = get_term_type(Val, Vst0),
+ Es = set_element_type({integer,Index}, Type, Es0),
+ {Es, Index + 1}
+ end, {#{}, 1}, Elements),
+ Type = {tuple,Size,Es},
+ create_term(Type, put_tuple2, [], Dst, Vst);
valfun_1({put_tuple,Sz,Dst}, Vst0) when is_integer(Sz) ->
Vst1 = eat_heap(1, Vst0),
- Vst = set_type_reg(tuple_in_progress, Dst, Vst1),
+ Vst = create_term(tuple_in_progress, put_tuple, [], Dst, Vst1),
#vst{current=St0} = Vst,
- St = St0#st{puts_left={Sz,{Dst,{tuple,Sz}}}},
+ St = St0#st{puts_left={Sz,{Dst,Sz,#{}}}},
Vst#vst{current=St};
valfun_1({put,Src}, Vst0) ->
assert_term(Src, Vst0),
@@ -362,11 +449,14 @@ valfun_1({put,Src}, Vst0) ->
case St0 of
#st{puts_left=none} ->
error(not_building_a_tuple);
- #st{puts_left={1,{Dst,Type}}} ->
+ #st{puts_left={1,{Dst,Sz,Es0}}} ->
+ Es = Es0#{ {integer,Sz} => get_term_type(Src, Vst0) },
St = St0#st{puts_left=none},
- set_type_reg(Type, Dst, Vst#vst{current=St});
- #st{puts_left={PutsLeft,Info}} when is_integer(PutsLeft) ->
- St = St0#st{puts_left={PutsLeft-1,Info}},
+ create_term({tuple,Sz,Es}, put_tuple, [], Dst, Vst#vst{current=St});
+ #st{puts_left={PutsLeft,{Dst,Sz,Es0}}} when is_integer(PutsLeft) ->
+ Index = Sz - PutsLeft + 1,
+ Es = Es0#{ {integer,Index} => get_term_type(Src, Vst0) },
+ St = St0#st{puts_left={PutsLeft-1,{Dst,Sz,Es}}},
Vst#vst{current=St}
end;
%% Instructions for optimization of selective receives.
@@ -379,13 +469,28 @@ valfun_1(remove_message, Vst) ->
%% The message term is no longer fragile. It can be used
%% without restrictions.
remove_fragility(Vst);
+valfun_1({'%', {type_info, Reg, match_context}}, Vst) ->
+ update_type(fun meet/2, #ms{}, Reg, Vst);
+valfun_1({'%', {type_info, Reg, Type}}, Vst) ->
+ %% Explicit type information inserted by optimization passes to indicate
+ %% that Reg has a certain type, so that we can accept cross-function type
+ %% optimizations.
+ update_type(fun meet/2, Type, Reg, Vst);
+valfun_1({'%', {remove_fragility, Reg}}, Vst) ->
+ %% This is a hack to make prim_eval:'receive'/2 work.
+ %%
+ %% Normally it's illegal to pass fragile terms as a function argument as we
+ %% have no way of knowing what the callee will do with it, but we know that
+ %% prim_eval:'receive'/2 won't leak the term, nor cause a GC since it's
+ %% disabled while matching messages.
+ remove_fragility(Reg, Vst);
valfun_1({'%',_}, Vst) ->
Vst;
valfun_1({line,_}, Vst) ->
Vst;
%% Exception generating calls
valfun_1({call_ext,Live,Func}=I, Vst) ->
- case return_type(Func, Vst) of
+ case call_return_type(Func, Vst) of
exception ->
verify_live(Live, Vst),
%% The stack will be scanned, so Y registers
@@ -400,100 +505,122 @@ valfun_1(_I, #vst{current=#st{ct=undecided}}) ->
%%
%% Allocate and deallocate, et.al
valfun_1({allocate,Stk,Live}, Vst) ->
- allocate(false, Stk, 0, Live, Vst);
+ allocate(uninitialized, Stk, 0, Live, Vst);
valfun_1({allocate_heap,Stk,Heap,Live}, Vst) ->
- allocate(false, Stk, Heap, Live, Vst);
+ allocate(uninitialized, Stk, Heap, Live, Vst);
valfun_1({allocate_zero,Stk,Live}, Vst) ->
- allocate(true, Stk, 0, Live, Vst);
+ allocate(initialized, Stk, 0, Live, Vst);
valfun_1({allocate_heap_zero,Stk,Heap,Live}, Vst) ->
- allocate(true, Stk, Heap, Live, Vst);
+ allocate(initialized, Stk, Heap, Live, Vst);
valfun_1({deallocate,StkSize}, #vst{current=#st{numy=StkSize}}=Vst) ->
verify_no_ct(Vst),
deallocate(Vst);
valfun_1({deallocate,_}, #vst{current=#st{numy=NumY}}) ->
error({allocated,NumY});
-valfun_1({trim,N,Remaining}, #vst{current=#st{y=Yregs0,numy=NumY}=St}=Vst) ->
+valfun_1({trim,N,Remaining}, #vst{current=St0}=Vst) ->
+ #st{numy=NumY} = St0,
if
- N =< NumY, N+Remaining =:= NumY ->
- Yregs1 = [{Y-N,Type} || {Y,Type} <- gb_trees:to_list(Yregs0), Y >= N],
- Yregs = gb_trees_from_list(Yregs1),
- Vst#vst{current=St#st{y=Yregs,numy=NumY-N,aliases=#{}}};
- true ->
- error({trim,N,Remaining,allocated,NumY})
+ N =< NumY, N+Remaining =:= NumY ->
+ Vst#vst{current=trim_stack(N, 0, NumY, St0)};
+ N > NumY; N+Remaining =/= NumY ->
+ error({trim,N,Remaining,allocated,NumY})
end;
%% Catch & try.
valfun_1({'catch',Dst,{f,Fail}}, Vst) when Fail =/= none ->
init_try_catch_branch(catchtag, Dst, Fail, Vst);
valfun_1({'try',Dst,{f,Fail}}, Vst) when Fail =/= none ->
init_try_catch_branch(trytag, Dst, Fail, Vst);
-valfun_1({catch_end,Reg}, #vst{current=#st{ct=[Fail|Fails]}}=Vst0) ->
- case get_special_y_type(Reg, Vst0) of
- {catchtag,Fail} ->
- Vst = #vst{current=St} = set_catch_end(Reg, Vst0),
- Xregs = gb_trees:enter(0, term, St#st.x),
- Vst#vst{current=St#st{x=Xregs,ct=Fails,fls=undefined,aliases=#{}}};
- Type ->
- error({bad_type,Type})
+valfun_1({catch_end,Reg}, #vst{current=#st{ct=[Fail|_]}}=Vst0) ->
+ case get_tag_type(Reg, Vst0) of
+ {catchtag,Fail} ->
+ %% {x,0} contains the caught term, if any.
+ create_term(term, catch_end, [], {x,0}, kill_catch_tag(Reg, Vst0));
+ Type ->
+ error({wrong_tag_type,Type})
end;
-valfun_1({try_end,Reg}, #vst{current=#st{ct=[Fail|Fails]}=St0}=Vst) ->
- case get_special_y_type(Reg, Vst) of
- {trytag,Fail} ->
- St = St0#st{ct=Fails,fls=undefined},
- set_catch_end(Reg, Vst#vst{current=St});
- Type ->
- error({bad_type,Type})
+valfun_1({try_end,Reg}, #vst{current=#st{ct=[Fail|_]}}=Vst) ->
+ case get_tag_type(Reg, Vst) of
+ {trytag,Fail} ->
+ %% Kill the catch tag, note that x registers are unaffected.
+ kill_catch_tag(Reg, Vst);
+ Type ->
+ error({wrong_tag_type,Type})
end;
-valfun_1({try_case,Reg}, #vst{current=#st{ct=[Fail|Fails]}}=Vst0) ->
- case get_special_y_type(Reg, Vst0) of
- {trytag,Fail} ->
- Vst = #vst{current=St} = set_catch_end(Reg, Vst0),
- Xs = gb_trees_from_list([{0,{atom,[]}},{1,term},{2,term}]),
- Vst#vst{current=St#st{x=Xs,ct=Fails,fls=undefined,aliases=#{}}};
- Type ->
- error({bad_type,Type})
+valfun_1({try_case,Reg}, #vst{current=#st{ct=[Fail|_]}}=Vst0) ->
+ case get_tag_type(Reg, Vst0) of
+ {trytag,Fail} ->
+ %% Kill the catch tag and all x registers.
+ Vst1 = prune_x_regs(0, kill_catch_tag(Reg, Vst0)),
+
+ %% Class:Error:Stacktrace
+ Vst2 = create_term({atom,[]}, try_case, [], {x,0}, Vst1),
+ Vst = create_term(term, try_case, [], {x,1}, Vst2),
+ create_term(term, try_case, [], {x,2}, Vst);
+ Type ->
+ error({wrong_tag_type,Type})
end;
valfun_1({get_list,Src,D1,D2}, Vst0) ->
+ assert_not_literal(Src),
assert_type(cons, Src, Vst0),
- Vst = set_type_reg(term, Src, D1, Vst0),
- set_type_reg(term, Src, D2, Vst);
+ Vst = extract_term(term, get_hd, [Src], D1, Vst0),
+ extract_term(term, get_tl, [Src], D2, Vst);
valfun_1({get_hd,Src,Dst}, Vst) ->
+ assert_not_literal(Src),
assert_type(cons, Src, Vst),
- set_type_reg(term, Src, Dst, Vst);
+ extract_term(term, get_hd, [Src], Dst, Vst);
valfun_1({get_tl,Src,Dst}, Vst) ->
+ assert_not_literal(Src),
assert_type(cons, Src, Vst),
- set_type_reg(term, Src, Dst, Vst);
-valfun_1({get_tuple_element,Src,I,Dst}, Vst) ->
- assert_type({tuple_element,I+1}, Src, Vst),
- set_type_reg(term, Src, Dst, Vst);
+ extract_term(term, get_tl, [Src], Dst, Vst);
+valfun_1({get_tuple_element,Src,N,Dst}, Vst) ->
+ assert_not_literal(Src),
+ assert_type({tuple_element,N+1}, Src, Vst),
+ Index = {integer,N+1},
+ Type = get_element_type(Index, Src, Vst),
+ extract_term(Type, {bif,element}, [Index, Src], Dst, Vst);
valfun_1({jump,{f,Lbl}}, Vst) ->
- kill_state(branch_state(Lbl, Vst));
+ branch(Lbl, Vst,
+ fun(SuccVst) ->
+ %% The next instruction is never executed.
+ kill_state(SuccVst)
+ end);
valfun_1(I, Vst) ->
valfun_2(I, Vst).
init_try_catch_branch(Tag, Dst, Fail, Vst0) ->
- Vst1 = set_type_y({Tag,[Fail]}, Dst, Vst0),
+ Vst1 = create_tag({Tag,[Fail]}, 'try_catch', [], Dst, Vst0),
#vst{current=#st{ct=Fails}=St0} = Vst1,
- CurrentSt = St0#st{ct=[[Fail]|Fails]},
-
- %% Set the initial state at the try/catch label.
- %% Assume that Y registers contain terms or try/catch
- %% tags.
- Yregs0 = map(fun({Y,uninitialized}) -> {Y,term};
- ({Y,initialized}) -> {Y,term};
- (E) -> E
- end, gb_trees:to_list(CurrentSt#st.y)),
- Yregs = gb_trees:from_orddict(Yregs0),
- BranchSt = CurrentSt#st{y=Yregs},
-
- Vst = branch_state(Fail, Vst1#vst{current=BranchSt}),
- Vst#vst{current=CurrentSt}.
-
-%% Update branched state if necessary and try next set of instructions.
-valfun_2(I, #vst{current=#st{ct=[]}}=Vst) ->
- valfun_3(I, Vst);
+ St = St0#st{ct=[[Fail]|Fails]},
+ Vst = Vst0#vst{current=St},
+
+ branch(Fail, Vst,
+ fun(CatchVst) ->
+ #vst{current=#st{ys=Ys}} = CatchVst,
+ maps:fold(fun init_catch_handler_1/3, CatchVst, Ys)
+ end,
+ fun(SuccVst) ->
+ %% All potentially-throwing instructions after this
+ %% one will implicitly branch to the fail label;
+ %% see valfun_2/2
+ SuccVst
+ end).
+
+%% Set the initial state at the try/catch label. Assume that Y registers
+%% contain terms or try/catch tags.
+init_catch_handler_1(Reg, initialized, Vst) ->
+ create_term(term, 'catch_handler', [], Reg, Vst);
+init_catch_handler_1(Reg, uninitialized, Vst) ->
+ create_term(term, 'catch_handler', [], Reg, Vst);
+init_catch_handler_1(_, _, Vst) ->
+ Vst.
+
valfun_2(I, #vst{current=#st{ct=[[Fail]|_]}}=Vst) when is_integer(Fail) ->
- %% Update branched state.
+ %% We have an active try/catch tag and we can jump there from this
+ %% instruction, so we need to update the branched state of the try/catch
+ %% handler.
valfun_3(I, branch_state(Fail, Vst));
+valfun_2(I, #vst{current=#st{ct=[]}}=Vst) ->
+ valfun_3(I, Vst);
valfun_2(_, _) ->
error(ambiguous_catch_try_state).
@@ -501,17 +628,23 @@ valfun_2(_, _) ->
%% Floating point.
valfun_3({fconv,Src,{fr,_}=Dst}, Vst) ->
assert_term(Src, Vst),
- set_freg(Dst, Vst);
-valfun_3({bif,fadd,_,[_,_]=Src,Dst}, Vst) ->
- float_op(Src, Dst, Vst);
-valfun_3({bif,fdiv,_,[_,_]=Src,Dst}, Vst) ->
- float_op(Src, Dst, Vst);
-valfun_3({bif,fmul,_,[_,_]=Src,Dst}, Vst) ->
- float_op(Src, Dst, Vst);
-valfun_3({bif,fnegate,_,[_]=Src,Dst}, Vst) ->
- float_op(Src, Dst, Vst);
-valfun_3({bif,fsub,_,[_,_]=Src,Dst}, Vst) ->
- float_op(Src, Dst, Vst);
+
+ %% An exception is raised on error, hence branching to 0.
+ branch(0, Vst,
+ fun(SuccVst0) ->
+ SuccVst = update_type(fun meet/2, number, Src, SuccVst0),
+ set_freg(Dst, SuccVst)
+ end);
+valfun_3({bif,fadd,_,[_,_]=Ss,Dst}, Vst) ->
+ float_op(Ss, Dst, Vst);
+valfun_3({bif,fdiv,_,[_,_]=Ss,Dst}, Vst) ->
+ float_op(Ss, Dst, Vst);
+valfun_3({bif,fmul,_,[_,_]=Ss,Dst}, Vst) ->
+ float_op(Ss, Dst, Vst);
+valfun_3({bif,fnegate,_,[_]=Ss,Dst}, Vst) ->
+ float_op(Ss, Dst, Vst);
+valfun_3({bif,fsub,_,[_,_]=Ss,Dst}, Vst) ->
+ float_op(Ss, Dst, Vst);
valfun_3(fclearerror, Vst) ->
case get_fls(Vst) of
undefined -> ok;
@@ -562,65 +695,87 @@ valfun_4({call_ext_last,_,_,_}, #vst{current=#st{numy=NumY}}) ->
valfun_4({make_fun2,_,_,_,Live}, Vst) ->
call(make_fun, Live, Vst);
%% Other BIFs
-valfun_4({bif,tuple_size,{f,Fail},[Tuple],Dst}=I, Vst0) ->
- TupleType0 = get_term_type(Tuple, Vst0),
- Vst1 = branch_state(Fail, Vst0),
- TupleType = upgrade_tuple_type({tuple,[0]}, TupleType0),
- Vst = set_type(TupleType, Tuple, Vst1),
- set_type_reg_expr({integer,[]}, I, Dst, Vst);
-valfun_4({bif,element,{f,Fail},[Pos,Tuple],Dst}, Vst0) ->
- TupleType0 = get_term_type(Tuple, Vst0),
- PosType = get_term_type(Pos, Vst0),
- Vst1 = branch_state(Fail, Vst0),
- TupleType = upgrade_tuple_type({tuple,[get_tuple_size(PosType)]}, TupleType0),
- Vst = set_aliased_type(TupleType, Tuple, Vst1),
- set_type_reg(term, Tuple, Dst, Vst);
+valfun_4({bif,element,{f,Fail},[Pos,Src],Dst}, Vst) ->
+ branch(Fail, Vst,
+ fun(SuccVst0) ->
+ PosType = get_term_type(Pos, SuccVst0),
+ TupleType = {tuple,[get_tuple_size(PosType)],#{}},
+
+ SuccVst1 = update_type(fun meet/2, TupleType,
+ Src, SuccVst0),
+ SuccVst = update_type(fun meet/2, {integer,[]},
+ Pos, SuccVst1),
+
+ ElementType = get_element_type(PosType, Src, SuccVst),
+ extract_term(ElementType, {bif,element}, [Pos,Src],
+ Dst, SuccVst)
+ end);
valfun_4({bif,raise,{f,0},Src,_Dst}, Vst) ->
validate_src(Src, Vst),
kill_state(Vst);
valfun_4(raw_raise=I, Vst) ->
call(I, 3, Vst);
-valfun_4({bif,map_get,{f,Fail},[_Key,Map]=Src,Dst}, Vst0) ->
- validate_src(Src, Vst0),
- Vst1 = branch_state(Fail, Vst0),
- Vst = set_type(map, Map, Vst1),
- Type = propagate_fragility(term, Src, Vst),
- set_type_reg(Type, Dst, Vst);
-valfun_4({bif,is_map_key,{f,Fail},[_Key,Map]=Src,Dst}, Vst0) ->
- validate_src(Src, Vst0),
- Vst1 = branch_state(Fail, Vst0),
- Vst = set_type(map, Map, Vst1),
- Type = propagate_fragility(bool, Src, Vst),
- set_type_reg(Type, Dst, Vst);
-valfun_4({bif,Op,{f,Fail},Src,Dst}, Vst0) ->
- validate_src(Src, Vst0),
- Vst = branch_state(Fail, Vst0),
- Type0 = bif_type(Op, Src, Vst),
- Type = propagate_fragility(Type0, Src, Vst),
- set_type_reg(Type, Dst, Vst);
-valfun_4({gc_bif,Op,{f,Fail},Live,Src,Dst}, #vst{current=St0}=Vst0) ->
+valfun_4({bif,Op,{f,Fail},[Src]=Ss,Dst}, Vst) when Op =:= hd; Op =:= tl ->
+ assert_term(Src, Vst),
+ branch(Fail, Vst,
+ fun(FailVst) ->
+ update_type(fun subtract/2, cons, Src, FailVst)
+ end,
+ fun(SuccVst0) ->
+ SuccVst = update_type(fun meet/2, cons, Src, SuccVst0),
+ extract_term(term, {bif,Op}, Ss, Dst, SuccVst)
+ end);
+valfun_4({bif,Op,{f,Fail},Ss,Dst}, Vst) ->
+ validate_src(Ss, Vst),
+ branch(Fail, Vst,
+ fun(SuccVst0) ->
+ %% Infer argument types. Note that we can't subtract
+ %% types as the BIF could fail for reasons other than
+ %% bad argument types.
+ ArgTypes = bif_arg_types(Op, Ss),
+ SuccVst = foldl(fun({Arg, T}, V) ->
+ update_type(fun meet/2, T, Arg, V)
+ end, SuccVst0, zip(Ss, ArgTypes)),
+ Type = bif_return_type(Op, Ss, SuccVst),
+ extract_term(Type, {bif,Op}, Ss, Dst, SuccVst)
+ end);
+valfun_4({gc_bif,Op,{f,Fail},Live,Ss,Dst}, #vst{current=St0}=Vst0) ->
+ validate_src(Ss, Vst0),
verify_live(Live, Vst0),
verify_y_init(Vst0),
+
+ %% Heap allocations and X registers are killed regardless of whether we
+ %% fail or not, as we may fail after GC.
St = kill_heap_allocation(St0),
- Vst1 = Vst0#vst{current=St},
- Vst2 = branch_state(Fail, Vst1),
- Vst = prune_x_regs(Live, Vst2),
- validate_src(Src, Vst),
- Type0 = bif_type(Op, Src, Vst),
- Type = propagate_fragility(Type0, Src, Vst),
- set_type_reg(Type, Dst, Vst);
+ Vst = prune_x_regs(Live, Vst0#vst{current=St}),
+
+ branch(Fail, Vst,
+ fun(SuccVst0) ->
+ ArgTypes = bif_arg_types(Op, Ss),
+ SuccVst = foldl(fun({Arg, T}, V) ->
+ update_type(fun meet/2, T, Arg, V)
+ end, SuccVst0, zip(Ss, ArgTypes)),
+
+ Type = bif_return_type(Op, Ss, SuccVst),
+
+ %% We're passing Vst0 as the original because the
+ %% registers were pruned before the branch.
+ extract_term(Type, {gc_bif,Op}, Ss, Dst, SuccVst, Vst0)
+ end);
valfun_4(return, #vst{current=#st{numy=none}}=Vst) ->
- assert_term({x,0}, Vst),
+ assert_durable_term({x,0}, Vst),
kill_state(Vst);
valfun_4(return, #vst{current=#st{numy=NumY}}) ->
error({stack_frame,NumY});
-valfun_4({loop_rec,{f,Fail},Dst}, Vst0) ->
- Vst = branch_state(Fail, Vst0),
- %% This term may not be part of the root set until
- %% remove_message/0 is executed. If control transfers
- %% to the loop_rec_end/1 instruction, no part of
- %% this term must be stored in a Y register.
- set_type_reg({fragile,term}, Dst, Vst);
+valfun_4({loop_rec,{f,Fail},Dst}, Vst) ->
+ %% This term may not be part of the root set until remove_message/0 is
+ %% executed. If control transfers to the loop_rec_end/1 instruction, no
+ %% part of this term must be stored in a Y register.
+ branch(Fail, Vst,
+ fun(SuccVst0) ->
+ {Ref, SuccVst} = new_value(term, loop_rec, [], SuccVst0),
+ mark_fragile(Dst, set_reg_vref(Ref, Dst, SuccVst))
+ end);
valfun_4({wait,_}, Vst) ->
verify_y_init(Vst),
kill_state(Vst);
@@ -631,154 +786,169 @@ valfun_4({wait_timeout,_,Src}, Vst) ->
valfun_4({loop_rec_end,_}, Vst) ->
verify_y_init(Vst),
kill_state(Vst);
-valfun_4(timeout, #vst{current=St}=Vst) ->
- Vst#vst{current=St#st{x=init_regs(0, term)}};
+valfun_4(timeout, Vst) ->
+ prune_x_regs(0, Vst);
valfun_4(send, Vst) ->
call(send, 2, Vst);
-valfun_4({set_tuple_element,Src,Tuple,I}, Vst) ->
+valfun_4({set_tuple_element,Src,Tuple,N}, Vst) ->
+ I = N + 1,
assert_term(Src, Vst),
- assert_type({tuple_element,I+1}, Tuple, Vst),
- Vst;
+ assert_type({tuple_element,I}, Tuple, Vst),
+ %% Manually update the tuple type; we can't rely on the ordinary update
+ %% helpers as we must support overwriting (rather than just widening or
+ %% narrowing) known elements, and we can't use extract_term either since
+ %% the source tuple may be aliased.
+ {tuple, Sz, Es0} = get_term_type(Tuple, Vst),
+ Es = set_element_type({integer,I}, get_term_type(Src, Vst), Es0),
+ override_type({tuple, Sz, Es}, Tuple, Vst);
%% Match instructions.
-valfun_4({select_val,Src,{f,Fail},{list,Choices}}, Vst0) ->
- assert_term(Src, Vst0),
- Vst = branch_state(Fail, Vst0),
- kill_state(select_val_branches(Src, Choices, Vst));
+valfun_4({select_val,Src,{f,Fail},{list,Choices}}, Vst) ->
+ assert_term(Src, Vst),
+ assert_choices(Choices),
+ validate_select_val(Fail, Choices, Src, Vst);
valfun_4({select_tuple_arity,Tuple,{f,Fail},{list,Choices}}, Vst) ->
assert_type(tuple, Tuple, Vst),
- TupleType = case get_term_type(Tuple, Vst) of
- {fragile,TupleType0} -> TupleType0;
- TupleType0 -> TupleType0
- end,
- kill_state(branch_arities(Choices, Tuple, TupleType,
- branch_state(Fail, Vst)));
+ assert_arities(Choices),
+ validate_select_tuple_arity(Fail, Choices, Tuple, Vst);
%% New bit syntax matching instructions.
-valfun_4({test,bs_start_match2,{f,Fail},Live,[Ctx,NeedSlots],Ctx}, Vst0) ->
- %% If source and destination registers are the same, match state
- %% is OK as input.
- CtxType = get_move_term_type(Ctx, Vst0),
- verify_live(Live, Vst0),
- verify_y_init(Vst0),
- Vst1 = prune_x_regs(Live, Vst0),
- BranchVst = case CtxType of
- #ms{} ->
- %% The failure branch will never be taken when Ctx
- %% is a match context. Therefore, the type for Ctx
- %% at the failure label must not be match_context
- %% (or we could reject legal code).
- set_type_reg(term, Ctx, Vst1);
- _ ->
- Vst1
- end,
- Vst = branch_state(Fail, BranchVst),
- set_type_reg(bsm_match_state(NeedSlots), Ctx, Vst);
-valfun_4({test,bs_start_match2,{f,Fail},Live,[Src,Slots],Dst}, Vst0) ->
- assert_term(Src, Vst0),
- verify_live(Live, Vst0),
- verify_y_init(Vst0),
- Vst1 = prune_x_regs(Live, Vst0),
- Vst = branch_state(Fail, Vst1),
- set_type_reg(bsm_match_state(Slots), Src, Dst, Vst);
+valfun_4({test,bs_start_match3,{f,Fail},Live,[Src],Dst}, Vst) ->
+ validate_bs_start_match(Fail, Live, bsm_match_state(), Src, Dst, Vst);
+valfun_4({test,bs_start_match2,{f,Fail},Live,[Src,Slots],Dst}, Vst) ->
+ validate_bs_start_match(Fail, Live, bsm_match_state(Slots), Src, Dst, Vst);
valfun_4({test,bs_match_string,{f,Fail},[Ctx,_,_]}, Vst) ->
bsm_validate_context(Ctx, Vst),
- branch_state(Fail, Vst);
+ branch(Fail, Vst, fun(V) -> V end);
valfun_4({test,bs_skip_bits2,{f,Fail},[Ctx,Src,_,_]}, Vst) ->
bsm_validate_context(Ctx, Vst),
assert_term(Src, Vst),
- branch_state(Fail, Vst);
+ branch(Fail, Vst, fun(V) -> V end);
valfun_4({test,bs_test_tail2,{f,Fail},[Ctx,_]}, Vst) ->
bsm_validate_context(Ctx, Vst),
- branch_state(Fail, Vst);
+ branch(Fail, Vst, fun(V) -> V end);
valfun_4({test,bs_test_unit,{f,Fail},[Ctx,_]}, Vst) ->
bsm_validate_context(Ctx, Vst),
- branch_state(Fail, Vst);
+ branch(Fail, Vst, fun(V) -> V end);
valfun_4({test,bs_skip_utf8,{f,Fail},[Ctx,Live,_]}, Vst) ->
validate_bs_skip_utf(Fail, Ctx, Live, Vst);
valfun_4({test,bs_skip_utf16,{f,Fail},[Ctx,Live,_]}, Vst) ->
validate_bs_skip_utf(Fail, Ctx, Live, Vst);
valfun_4({test,bs_skip_utf32,{f,Fail},[Ctx,Live,_]}, Vst) ->
validate_bs_skip_utf(Fail, Ctx, Live, Vst);
-valfun_4({test,bs_get_integer2,{f,Fail},Live,[Ctx,_,_,_],Dst}, Vst) ->
- validate_bs_get(Fail, Ctx, Live, {integer, []}, Dst, Vst);
-valfun_4({test,bs_get_float2,{f,Fail},Live,[Ctx,_,_,_],Dst}, Vst) ->
- validate_bs_get(Fail, Ctx, Live, {float, []}, Dst, Vst);
-valfun_4({test,bs_get_binary2,{f,Fail},Live,[Ctx,_,_,_],Dst}, Vst) ->
- Type = propagate_fragility(term, [Ctx], Vst),
- validate_bs_get(Fail, Ctx, Live, Type, Dst, Vst);
-valfun_4({test,bs_get_utf8,{f,Fail},Live,[Ctx,_],Dst}, Vst) ->
- validate_bs_get(Fail, Ctx, Live, {integer, []}, Dst, Vst);
-valfun_4({test,bs_get_utf16,{f,Fail},Live,[Ctx,_],Dst}, Vst) ->
- validate_bs_get(Fail, Ctx, Live, {integer, []}, Dst, Vst);
-valfun_4({test,bs_get_utf32,{f,Fail},Live,[Ctx,_],Dst}, Vst) ->
- validate_bs_get(Fail, Ctx, Live, {integer, []}, Dst, Vst);
+valfun_4({test,bs_get_integer2=Op,{f,Fail},Live,[Ctx,_,_,_],Dst}, Vst) ->
+ validate_bs_get(Op, Fail, Ctx, Live, {integer, []}, Dst, Vst);
+valfun_4({test,bs_get_float2=Op,{f,Fail},Live,[Ctx,_,_,_],Dst}, Vst) ->
+ validate_bs_get(Op, Fail, Ctx, Live, {float, []}, Dst, Vst);
+valfun_4({test,bs_get_binary2=Op,{f,Fail},Live,[Ctx,_,_,_],Dst}, Vst) ->
+ validate_bs_get(Op, Fail, Ctx, Live, binary, Dst, Vst);
+valfun_4({test,bs_get_utf8=Op,{f,Fail},Live,[Ctx,_],Dst}, Vst) ->
+ validate_bs_get(Op, Fail, Ctx, Live, {integer, []}, Dst, Vst);
+valfun_4({test,bs_get_utf16=Op,{f,Fail},Live,[Ctx,_],Dst}, Vst) ->
+ validate_bs_get(Op, Fail, Ctx, Live, {integer, []}, Dst, Vst);
+valfun_4({test,bs_get_utf32=Op,{f,Fail},Live,[Ctx,_],Dst}, Vst) ->
+ validate_bs_get(Op, Fail, Ctx, Live, {integer, []}, Dst, Vst);
valfun_4({bs_save2,Ctx,SavePoint}, Vst) ->
bsm_save(Ctx, SavePoint, Vst);
valfun_4({bs_restore2,Ctx,SavePoint}, Vst) ->
bsm_restore(Ctx, SavePoint, Vst);
+valfun_4({bs_get_position, Ctx, Dst, Live}, Vst0) ->
+ bsm_validate_context(Ctx, Vst0),
+ verify_live(Live, Vst0),
+ verify_y_init(Vst0),
+ Vst = prune_x_regs(Live, Vst0),
+ create_term(ms_position, bs_get_position, [Ctx], Dst, Vst, Vst0);
+valfun_4({bs_set_position, Ctx, Pos}, Vst) ->
+ bsm_validate_context(Ctx, Vst),
+ assert_type(ms_position, Pos, Vst),
+ Vst;
%% Other test instructions.
-valfun_4({test,is_float,{f,Lbl},[Float]}, Vst) ->
- assert_term(Float, Vst),
- set_type({float,[]}, Float, branch_state(Lbl, Vst));
-valfun_4({test,is_tuple,{f,Lbl},[Tuple]}, Vst) ->
- Type0 = get_term_type(Tuple, Vst),
- Type = upgrade_tuple_type({tuple,[0]}, Type0),
- set_aliased_type(Type, Tuple, branch_state(Lbl, Vst));
-valfun_4({test,is_nonempty_list,{f,Lbl},[Cons]}, Vst) ->
- assert_term(Cons, Vst),
- Type = cons,
- set_aliased_type(Type, Cons, branch_state(Lbl, Vst));
-valfun_4({test,test_arity,{f,Lbl},[Tuple,Sz]}, Vst) when is_integer(Sz) ->
- assert_type(tuple, Tuple, Vst),
- Type = {tuple,Sz},
- set_aliased_type(Type, Tuple, branch_state(Lbl, Vst));
-valfun_4({test,is_tagged_tuple,{f,Lbl},[Src,Sz,_Atom]}, Vst) ->
- validate_src([Src], Vst),
- Type = {tuple,Sz},
- set_aliased_type(Type, Src, branch_state(Lbl, Vst));
valfun_4({test,has_map_fields,{f,Lbl},Src,{list,List}}, Vst) ->
assert_type(map, Src, Vst),
assert_unique_map_keys(List),
- branch_state(Lbl, Vst);
-valfun_4({test,is_map,{f,Lbl},[Src]}, Vst0) ->
- Vst = branch_state(Lbl, Vst0),
- case Src of
- {Tag,_} when Tag =:= x; Tag =:= y ->
- Type = map,
- set_aliased_type(Type, Src, Vst);
- {literal,Map} when is_map(Map) ->
- Vst0;
- _ ->
- kill_state(Vst0)
- end;
-valfun_4({test,is_eq_exact,{f,Lbl},[Src,Val]=Ss}, Vst0) ->
- validate_src(Ss, Vst0),
- Infer = infer_types(Src, Vst0),
- Vst1 = Infer(Val, Vst0),
- Vst = branch_state(Lbl, Vst1),
- case Val of
- {literal,Tuple} when is_tuple(Tuple) ->
- Type0 = get_term_type(Val, Vst),
- Type = upgrade_tuple_type({tuple,tuple_size(Tuple)},
- Type0),
- set_aliased_type(Type, Src, Vst);
- _ ->
- Vst
- end;
+ branch(Lbl, Vst, fun(V) -> V end);
+valfun_4({test,is_atom,{f,Lbl},[Src]}, Vst) ->
+ type_test(Lbl, {atom,[]}, Src, Vst);
+valfun_4({test,is_binary,{f,Lbl},[Src]}, Vst) ->
+ type_test(Lbl, binary, Src, Vst);
+valfun_4({test,is_bitstr,{f,Lbl},[Src]}, Vst) ->
+ type_test(Lbl, binary, Src, Vst);
+valfun_4({test,is_boolean,{f,Lbl},[Src]}, Vst) ->
+ type_test(Lbl, bool, Src, Vst);
+valfun_4({test,is_float,{f,Lbl},[Src]}, Vst) ->
+ type_test(Lbl, {float,[]}, Src, Vst);
+valfun_4({test,is_tuple,{f,Lbl},[Src]}, Vst) ->
+ type_test(Lbl, {tuple,[0],#{}}, Src, Vst);
+valfun_4({test,is_integer,{f,Lbl},[Src]}, Vst) ->
+ type_test(Lbl, {integer,[]}, Src, Vst);
+valfun_4({test,is_nonempty_list,{f,Lbl},[Src]}, Vst) ->
+ type_test(Lbl, cons, Src, Vst);
+valfun_4({test,is_number,{f,Lbl},[Src]}, Vst) ->
+ type_test(Lbl, number, Src, Vst);
+valfun_4({test,is_list,{f,Lbl},[Src]}, Vst) ->
+ type_test(Lbl, list, Src, Vst);
+valfun_4({test,is_map,{f,Lbl},[Src]}, Vst) ->
+ type_test(Lbl, map, Src, Vst);
+valfun_4({test,is_nil,{f,Lbl},[Src]}, Vst) ->
+ %% is_nil is an exact check against the 'nil' value, and should not be
+ %% treated as a simple type test.
+ assert_term(Src, Vst),
+ branch(Lbl, Vst,
+ fun(FailVst) ->
+ update_ne_types(Src, nil, FailVst)
+ end,
+ fun(SuccVst) ->
+ update_eq_types(Src, nil, SuccVst)
+ end);
+valfun_4({test,test_arity,{f,Lbl},[Tuple,Sz]}, Vst) when is_integer(Sz) ->
+ assert_type(tuple, Tuple, Vst),
+ Type = {tuple, Sz, #{}},
+ type_test(Lbl, Type, Tuple, Vst);
+valfun_4({test,is_tagged_tuple,{f,Lbl},[Src,Sz,Atom]}, Vst) ->
+ assert_term(Src, Vst),
+ Type = {tuple, Sz, #{ {integer,1} => Atom }},
+ type_test(Lbl, Type, Src, Vst);
+valfun_4({test,is_eq_exact,{f,Lbl},[Src,Val]=Ss}, Vst) ->
+ validate_src(Ss, Vst),
+ branch(Lbl, Vst,
+ fun(FailVst) ->
+ update_ne_types(Src, Val, FailVst)
+ end,
+ fun(SuccVst) ->
+ update_eq_types(Src, Val, SuccVst)
+ end);
+valfun_4({test,is_ne_exact,{f,Lbl},[Src,Val]=Ss}, Vst) ->
+ validate_src(Ss, Vst),
+ branch(Lbl, Vst,
+ fun(FailVst) ->
+ update_eq_types(Src, Val, FailVst)
+ end,
+ fun(SuccVst) ->
+ update_ne_types(Src, Val, SuccVst)
+ end);
valfun_4({test,_Op,{f,Lbl},Src}, Vst) ->
+ %% is_pid, is_reference, et cetera.
validate_src(Src, Vst),
- branch_state(Lbl, Vst);
+ branch(Lbl, Vst, fun(V) -> V end);
valfun_4({bs_add,{f,Fail},[A,B,_],Dst}, Vst) ->
assert_term(A, Vst),
assert_term(B, Vst),
- set_type_reg({integer,[]}, Dst, branch_state(Fail, Vst));
+ branch(Fail, Vst,
+ fun(SuccVst) ->
+ create_term({integer,[]}, bs_add, [A, B], Dst, SuccVst)
+ end);
valfun_4({bs_utf8_size,{f,Fail},A,Dst}, Vst) ->
assert_term(A, Vst),
- set_type_reg({integer,[]}, Dst, branch_state(Fail, Vst));
+ branch(Fail, Vst,
+ fun(SuccVst) ->
+ create_term({integer,[]}, bs_utf8_size, [A], Dst, SuccVst)
+ end);
valfun_4({bs_utf16_size,{f,Fail},A,Dst}, Vst) ->
assert_term(A, Vst),
- set_type_reg({integer,[]}, Dst, branch_state(Fail, Vst));
+ branch(Fail, Vst,
+ fun(SuccVst) ->
+ create_term({integer,[]}, bs_utf16_size, [A], Dst, SuccVst)
+ end);
valfun_4({bs_init2,{f,Fail},Sz,Heap,Live,_,Dst}, Vst0) ->
verify_live(Live, Vst0),
verify_y_init(Vst0),
@@ -788,10 +958,12 @@ valfun_4({bs_init2,{f,Fail},Sz,Heap,Live,_,Dst}, Vst0) ->
true ->
assert_term(Sz, Vst0)
end,
- Vst1 = heap_alloc(Heap, Vst0),
- Vst2 = branch_state(Fail, Vst1),
- Vst = prune_x_regs(Live, Vst2),
- set_type_reg(binary, Dst, Vst);
+ Vst = heap_alloc(Heap, Vst0),
+ branch(Fail, Vst,
+ fun(SuccVst0) ->
+ SuccVst = prune_x_regs(Live, SuccVst0),
+ create_term(binary, bs_init2, [], Dst, SuccVst, SuccVst0)
+ end);
valfun_4({bs_init_bits,{f,Fail},Sz,Heap,Live,_,Dst}, Vst0) ->
verify_live(Live, Vst0),
verify_y_init(Vst0),
@@ -801,116 +973,203 @@ valfun_4({bs_init_bits,{f,Fail},Sz,Heap,Live,_,Dst}, Vst0) ->
true ->
assert_term(Sz, Vst0)
end,
- Vst1 = heap_alloc(Heap, Vst0),
- Vst2 = branch_state(Fail, Vst1),
- Vst = prune_x_regs(Live, Vst2),
- set_type_reg(binary, Dst, Vst);
+ Vst = heap_alloc(Heap, Vst0),
+ branch(Fail, Vst,
+ fun(SuccVst0) ->
+ SuccVst = prune_x_regs(Live, SuccVst0),
+ create_term(binary, bs_init_bits, [], Dst, SuccVst)
+ end);
valfun_4({bs_append,{f,Fail},Bits,Heap,Live,_Unit,Bin,_Flags,Dst}, Vst0) ->
verify_live(Live, Vst0),
verify_y_init(Vst0),
assert_term(Bits, Vst0),
assert_term(Bin, Vst0),
- Vst1 = heap_alloc(Heap, Vst0),
- Vst2 = branch_state(Fail, Vst1),
- Vst = prune_x_regs(Live, Vst2),
- set_type_reg(binary, Dst, Vst);
-valfun_4({bs_private_append,{f,Fail},Bits,_Unit,Bin,_Flags,Dst}, Vst0) ->
- assert_term(Bits, Vst0),
- assert_term(Bin, Vst0),
- Vst = branch_state(Fail, Vst0),
- set_type_reg(binary, Dst, Vst);
+ Vst = heap_alloc(Heap, Vst0),
+ branch(Fail, Vst,
+ fun(SuccVst0) ->
+ SuccVst = prune_x_regs(Live, SuccVst0),
+ create_term(binary, bs_append, [Bin], Dst, SuccVst, SuccVst0)
+ end);
+valfun_4({bs_private_append,{f,Fail},Bits,_Unit,Bin,_Flags,Dst}, Vst) ->
+ assert_term(Bits, Vst),
+ assert_term(Bin, Vst),
+ branch(Fail, Vst,
+ fun(SuccVst) ->
+ create_term(binary, bs_private_append, [Bin], Dst, SuccVst)
+ end);
valfun_4({bs_put_string,Sz,_}, Vst) when is_integer(Sz) ->
Vst;
valfun_4({bs_put_binary,{f,Fail},Sz,_,_,Src}, Vst) ->
assert_term(Sz, Vst),
assert_term(Src, Vst),
- branch_state(Fail, Vst);
+ branch(Fail, Vst,
+ fun(SuccVst) ->
+ update_type(fun meet/2, binary, Src, SuccVst)
+ end);
valfun_4({bs_put_float,{f,Fail},Sz,_,_,Src}, Vst) ->
assert_term(Sz, Vst),
assert_term(Src, Vst),
- branch_state(Fail, Vst);
+ branch(Fail, Vst,
+ fun(SuccVst) ->
+ update_type(fun meet/2, {float,[]}, Src, SuccVst)
+ end);
valfun_4({bs_put_integer,{f,Fail},Sz,_,_,Src}, Vst) ->
assert_term(Sz, Vst),
assert_term(Src, Vst),
- branch_state(Fail, Vst);
+ branch(Fail, Vst,
+ fun(SuccVst) ->
+ update_type(fun meet/2, {integer,[]}, Src, SuccVst)
+ end);
valfun_4({bs_put_utf8,{f,Fail},_,Src}, Vst) ->
assert_term(Src, Vst),
- branch_state(Fail, Vst);
+ branch(Fail, Vst,
+ fun(SuccVst) ->
+ update_type(fun meet/2, {integer,[]}, Src, SuccVst)
+ end);
valfun_4({bs_put_utf16,{f,Fail},_,Src}, Vst) ->
assert_term(Src, Vst),
- branch_state(Fail, Vst);
+ branch(Fail, Vst,
+ fun(SuccVst) ->
+ update_type(fun meet/2, {integer,[]}, Src, SuccVst)
+ end);
valfun_4({bs_put_utf32,{f,Fail},_,Src}, Vst) ->
assert_term(Src, Vst),
- branch_state(Fail, Vst);
+ branch(Fail, Vst,
+ fun(SuccVst) ->
+ update_type(fun meet/2, {integer,[]}, Src, SuccVst)
+ end);
%% Map instructions.
-valfun_4({put_map_assoc,{f,Fail},Src,Dst,Live,{list,List}}, Vst) ->
- verify_put_map(Fail, Src, Dst, Live, List, Vst);
-valfun_4({put_map_exact,{f,Fail},Src,Dst,Live,{list,List}}, Vst) ->
- verify_put_map(Fail, Src, Dst, Live, List, Vst);
+valfun_4({put_map_assoc=Op,{f,Fail},Src,Dst,Live,{list,List}}, Vst) ->
+ verify_put_map(Op, Fail, Src, Dst, Live, List, Vst);
+valfun_4({put_map_exact=Op,{f,Fail},Src,Dst,Live,{list,List}}, Vst) ->
+ verify_put_map(Op, Fail, Src, Dst, Live, List, Vst);
valfun_4({get_map_elements,{f,Fail},Src,{list,List}}, Vst) ->
verify_get_map(Fail, Src, List, Vst);
valfun_4(_, _) ->
error(unknown_instruction).
verify_get_map(Fail, Src, List, Vst0) ->
+ assert_not_literal(Src), %OTP 22.
assert_type(map, Src, Vst0),
- Vst1 = foldl(fun(D, Vsti) ->
- case is_reg_defined(D,Vsti) of
- true -> set_type_reg(term,D,Vsti);
- false -> Vsti
- end
- end, Vst0, extract_map_vals(List)),
- Vst2 = branch_state(Fail, Vst1),
- Keys = extract_map_keys(List),
- assert_unique_map_keys(Keys),
- verify_get_map_pair(List, Src, Vst0, Vst2).
-
-extract_map_vals([_Key,Val|T]) ->
- [Val|extract_map_vals(T)];
-extract_map_vals([]) -> [].
+
+ branch(Fail, Vst0,
+ fun(FailVst) ->
+ clobber_map_vals(List, Src, FailVst)
+ end,
+ fun(SuccVst) ->
+ Keys = extract_map_keys(List),
+ assert_unique_map_keys(Keys),
+ extract_map_vals(List, Src, SuccVst, SuccVst)
+ end).
+
+%% get_map_elements may leave its destinations in an inconsistent state when
+%% the fail label is taken. Consider the following:
+%%
+%% {get_map_elements,{f,7},{x,1},{list,[{atom,a},{x,1},{atom,b},{x,2}]}}.
+%%
+%% If 'a' exists but not 'b', {x,1} is overwritten when we jump to {f,7}.
+clobber_map_vals([Key,Dst|T], Map, Vst0) ->
+ case is_reg_defined(Dst, Vst0) of
+ true ->
+ Vst = extract_term(term, {bif,map_get}, [Key, Map], Dst, Vst0),
+ clobber_map_vals(T, Map, Vst);
+ false ->
+ clobber_map_vals(T, Map, Vst0)
+ end;
+clobber_map_vals([], _Map, Vst) ->
+ Vst.
extract_map_keys([Key,_Val|T]) ->
[Key|extract_map_keys(T)];
extract_map_keys([]) -> [].
-verify_get_map_pair([Src,Dst|Vs], Map, Vst0, Vsti0) ->
- assert_term(Src, Vst0),
- Vsti = set_type_reg(term, Map, Dst, Vsti0),
- verify_get_map_pair(Vs, Map, Vst0, Vsti);
-verify_get_map_pair([], _Map, _Vst0, Vst) -> Vst.
+extract_map_vals([Key,Dst|Vs], Map, Vst0, Vsti0) ->
+ assert_term(Key, Vst0),
+ Vsti = extract_term(term, {bif,map_get}, [Key, Map], Dst, Vsti0),
+ extract_map_vals(Vs, Map, Vst0, Vsti);
+extract_map_vals([], _Map, _Vst0, Vst) ->
+ Vst.
-verify_put_map(Fail, Src, Dst, Live, List, Vst0) ->
+verify_put_map(Op, Fail, Src, Dst, Live, List, Vst0) ->
assert_type(map, Src, Vst0),
verify_live(Live, Vst0),
verify_y_init(Vst0),
- foreach(fun (Term) -> assert_term(Term, Vst0) end, List),
- Vst1 = heap_alloc(0, Vst0),
- Vst2 = branch_state(Fail, Vst1),
- Vst = prune_x_regs(Live, Vst2),
- Keys = extract_map_keys(List),
- assert_unique_map_keys(Keys),
- set_type_reg(map, Dst, Vst).
+ _ = [assert_term(Term, Vst0) || Term <- List],
+ Vst = heap_alloc(0, Vst0),
+
+ branch(Fail, Vst,
+ fun(SuccVst0) ->
+ SuccVst = prune_x_regs(Live, SuccVst0),
+ Keys = extract_map_keys(List),
+ assert_unique_map_keys(Keys),
+ create_term(map, Op, [Src], Dst, SuccVst, SuccVst0)
+ end).
+
+%%
+%% Common code for validating bs_start_match* instructions.
+%%
+
+validate_bs_start_match(Fail, Live, Type, Src, Dst, Vst) ->
+ verify_live(Live, Vst),
+ verify_y_init(Vst),
+
+ %% #ms{} can represent either a match context or a term, so we have to mark
+ %% the source as a term if it fails with a match context as an input. This
+ %% hack is only needed until we get proper union types.
+ branch(Fail, Vst,
+ fun(FailVst) ->
+ case get_movable_term_type(Src, FailVst) of
+ #ms{} -> override_type(term, Src, FailVst);
+ _ -> FailVst
+ end
+ end,
+ fun(SuccVst0) ->
+ SuccVst1 = update_type(fun meet/2, binary,
+ Src, SuccVst0),
+ SuccVst = prune_x_regs(Live, SuccVst1),
+ extract_term(Type, bs_start_match, [Src], Dst,
+ SuccVst, SuccVst0)
+ end).
%%
%% Common code for validating bs_get* instructions.
%%
-validate_bs_get(Fail, Ctx, Live, Type, Dst, Vst0) ->
- bsm_validate_context(Ctx, Vst0),
- verify_live(Live, Vst0),
- verify_y_init(Vst0),
- Vst1 = prune_x_regs(Live, Vst0),
- Vst = branch_state(Fail, Vst1),
- set_type_reg(Type, Dst, Vst).
+validate_bs_get(Op, Fail, Ctx, Live, Type, Dst, Vst) ->
+ bsm_validate_context(Ctx, Vst),
+ verify_live(Live, Vst),
+ verify_y_init(Vst),
+
+ branch(Fail, Vst,
+ fun(SuccVst0) ->
+ SuccVst = prune_x_regs(Live, SuccVst0),
+ extract_term(Type, Op, [Ctx], Dst, SuccVst, SuccVst0)
+ end).
%%
%% Common code for validating bs_skip_utf* instructions.
%%
-validate_bs_skip_utf(Fail, Ctx, Live, Vst0) ->
- bsm_validate_context(Ctx, Vst0),
- verify_y_init(Vst0),
- verify_live(Live, Vst0),
- Vst = prune_x_regs(Live, Vst0),
- branch_state(Fail, Vst).
+validate_bs_skip_utf(Fail, Ctx, Live, Vst) ->
+ bsm_validate_context(Ctx, Vst),
+ verify_y_init(Vst),
+ verify_live(Live, Vst),
+
+ branch(Fail, Vst,
+ fun(SuccVst) ->
+ prune_x_regs(Live, SuccVst)
+ end).
+
+%%
+%% Common code for is_$type instructions.
+%%
+type_test(Fail, Type, Reg, Vst) ->
+ assert_term(Reg, Vst),
+ branch(Fail, Vst,
+ fun(FailVst) ->
+ update_type(fun subtract/2, Type, Reg, FailVst)
+ end,
+ fun(SuccVst) ->
+ update_type(fun meet/2, Type, Reg, SuccVst)
+ end).
%%
%% Special state handling for setelement/3 and set_tuple_element/3 instructions.
@@ -941,14 +1200,15 @@ kill_state(Vst) ->
%% A "plain" call.
%% The stackframe must be initialized.
%% The instruction will return to the instruction following the call.
-call(Name, Live, #vst{current=St}=Vst) ->
- verify_call_args(Name, Live, Vst),
- verify_y_init(Vst),
- case return_type(Name, Vst) of
- Type when Type =/= exception ->
- %% Type is never 'exception' because it has been handled earlier.
- Xs = gb_trees_from_list([{0,Type}]),
- Vst#vst{current=St#st{x=Xs,f=init_fregs(),aliases=#{}}}
+call(Name, Live, #vst{current=St0}=Vst0) ->
+ verify_call_args(Name, Live, Vst0),
+ verify_y_init(Vst0),
+ case call_return_type(Name, Vst0) of
+ Type when Type =/= exception ->
+ %% Type is never 'exception' because it has been handled earlier.
+ St = St0#st{f=init_fregs()},
+ Vst = prune_x_regs(0, Vst0#vst{current=St}),
+ create_term(Type, call, [], {x,0}, Vst)
end.
%% Tail call.
@@ -964,79 +1224,131 @@ tail_call(Name, Live, Vst0) ->
verify_call_args(_, 0, #vst{}) ->
ok;
verify_call_args({f,Lbl}, Live, Vst) when is_integer(Live)->
- verify_local_call(Lbl, Live, Vst);
+ verify_local_args(Live - 1, Lbl, #{}, Vst);
verify_call_args(_, Live, Vst) when is_integer(Live)->
- verify_call_args_1(Live, Vst);
+ verify_remote_args_1(Live - 1, Vst);
verify_call_args(_, Live, _) ->
error({bad_number_of_live_regs,Live}).
-verify_call_args_1(0, _) -> ok;
-verify_call_args_1(N, Vst) ->
- X = N - 1,
- get_term_type({x,X}, Vst),
- verify_call_args_1(X, Vst).
-
-verify_local_call(Lbl, Live, Vst) ->
- case all_ms_in_x_regs(Live, Vst) of
- [{R,Ctx}] ->
- %% Verify that there is a suitable bs_start_match2 instruction.
- verify_call_match_context(Lbl, R, Vst),
-
- %% Since the callee has consumed the match context,
- %% there must be no additional copies in Y registers.
- #ms{id=Id} = Ctx,
- case ms_in_y_regs(Id, Vst) of
- [] ->
- ok;
- [_|_]=Ys ->
- error({multiple_match_contexts,[R|Ys]})
- end;
- [_,_|_]=Xs0 ->
- Xs = [R || {R,_} <- Xs0],
- error({multiple_match_contexts,Xs});
- [] ->
- ok
+verify_remote_args_1(-1, _) ->
+ ok;
+verify_remote_args_1(X, Vst) ->
+ assert_durable_term({x, X}, Vst),
+ verify_remote_args_1(X - 1, Vst).
+
+verify_local_args(-1, _Lbl, _CtxIds, _Vst) ->
+ ok;
+verify_local_args(X, Lbl, CtxIds, Vst) ->
+ Reg = {x, X},
+ assert_not_fragile(Reg, Vst),
+ case get_movable_term_type(Reg, Vst) of
+ #ms{id=Id}=Type ->
+ case CtxIds of
+ #{ Id := Other } ->
+ error({multiple_match_contexts, [Reg, Other]});
+ #{} ->
+ verify_arg_type(Lbl, Reg, Type, Vst),
+ verify_local_args(X - 1, Lbl, CtxIds#{ Id => Reg }, Vst)
+ end;
+ Type ->
+ verify_arg_type(Lbl, Reg, Type, Vst),
+ verify_local_args(X - 1, Lbl, CtxIds, Vst)
end.
-all_ms_in_x_regs(0, _Vst) ->
- [];
-all_ms_in_x_regs(Live0, Vst) ->
- Live = Live0 - 1,
- R = {x,Live},
- case get_move_term_type(R, Vst) of
- #ms{}=M ->
- [{R,M}|all_ms_in_x_regs(Live, Vst)];
- _ ->
- all_ms_in_x_regs(Live, Vst)
+%% Verifies that the given argument narrows to what the function expects.
+verify_arg_type(Lbl, Reg, #ms{}, #vst{ft=Ft}) ->
+ %% Match contexts require explicit support, and may not be passed to a
+ %% function that accepts arbitrary terms.
+ case gb_trees:lookup({Lbl, Reg}, Ft) of
+ {value, #ms{}} -> ok;
+ _ -> error(no_bs_start_match2)
+ end;
+verify_arg_type(Lbl, Reg, GivenType, #vst{ft=Ft}) ->
+ case gb_trees:lookup({Lbl, Reg}, Ft) of
+ {value, #ms{}} ->
+ %% Functions that accept match contexts also accept all other
+ %% terms. This will change once we support union types.
+ ok;
+ {value, RequiredType} ->
+ case vat_1(GivenType, RequiredType) of
+ true -> ok;
+ false -> error({bad_arg_type, Reg, GivenType, RequiredType})
+ end;
+ none ->
+ ok
end.
-ms_in_y_regs(Id, #vst{current=#st{y=Ys0}}) ->
- Ys = gb_trees:to_list(Ys0),
- [{y,Y} || {Y,#ms{id=OtherId}} <- Ys, OtherId =:= Id].
+%% Checks whether the Given argument is compatible with the Required one. This
+%% is essentially a relaxed version of 'meet(Given, Req) =:= Given', where we
+%% accept that the Given value has the right type but not necessarily the exact
+%% same value; if {atom,gurka} is required, we'll consider {atom,[]} valid.
+%%
+%% This will catch all problems that could crash the emulator, like passing a
+%% 1-tuple when the callee expects a 3-tuple, but some value errors might slip
+%% through.
+vat_1(Same, Same) -> true;
+vat_1({atom,A}, {atom,B}) -> A =:= B orelse is_list(A) orelse is_list(B);
+vat_1({atom,A}, bool) -> is_boolean(A) orelse is_list(A);
+vat_1(bool, {atom,B}) -> is_boolean(B) orelse is_list(B);
+vat_1(cons, list) -> true;
+vat_1({float,A}, {float,B}) -> A =:= B orelse is_list(A) orelse is_list(B);
+vat_1({float,_}, number) -> true;
+vat_1({integer,A}, {integer,B}) -> A =:= B orelse is_list(A) orelse is_list(B);
+vat_1({integer,_}, number) -> true;
+vat_1(_, {literal,_}) -> false;
+vat_1({literal,_}=Lit, Required) -> vat_1(get_literal_type(Lit), Required);
+vat_1(nil, list) -> true;
+vat_1({tuple,SzA,EsA}, {tuple,SzB,EsB}) ->
+ if
+ is_list(SzB) ->
+ tuple_sz(SzA) >= tuple_sz(SzB) andalso vat_elements(EsA, EsB);
+ SzA =:= SzB ->
+ vat_elements(EsA, EsB);
+ SzA =/= SzB ->
+ false
+ end;
+vat_1(_, _) -> false.
-verify_call_match_context(Lbl, Ctx, #vst{ft=Ft}) ->
- case gb_trees:lookup(Lbl, Ft) of
- none ->
- error(no_bs_start_match2);
- {value,[{test,bs_start_match2,_,_,[Ctx,_],Ctx}|_]} ->
- ok;
- {value,[{test,bs_start_match2,_,_,_,_}=I|_]} ->
- error({unsuitable_bs_start_match2,I})
- end.
+vat_elements(EsA, EsB) ->
+ maps:fold(fun(Key, Req, Acc) ->
+ case EsA of
+ #{ Key := Given } -> Acc andalso vat_1(Given, Req);
+ #{} -> false
+ end
+ end, true, EsB).
-allocate(Zero, Stk, Heap, Live, #vst{current=#st{numy=none}}=Vst0) ->
+allocate(Tag, Stk, Heap, Live, #vst{current=#st{numy=none}=St}=Vst0) ->
verify_live(Live, Vst0),
- Vst = #vst{current=St} = prune_x_regs(Live, Vst0),
- Ys = init_regs(Stk, case Zero of
- true -> initialized;
- false -> uninitialized
- end),
- heap_alloc(Heap, Vst#vst{current=St#st{y=Ys,numy=Stk}});
+ Vst1 = Vst0#vst{current=St#st{numy=Stk}},
+ Vst2 = prune_x_regs(Live, Vst1),
+ Vst = init_stack(Tag, Stk - 1, Vst2),
+ heap_alloc(Heap, Vst);
allocate(_, _, _, _, #vst{current=#st{numy=Numy}}) ->
error({existing_stack_frame,{size,Numy}}).
deallocate(#vst{current=St}=Vst) ->
- Vst#vst{current=St#st{y=init_regs(0, initialized),numy=none}}.
+ Vst#vst{current=St#st{ys=#{},numy=none}}.
+
+init_stack(_Tag, -1, Vst) ->
+ Vst;
+init_stack(Tag, Y, Vst) ->
+ init_stack(Tag, Y - 1, create_tag(Tag, allocate, [], {y,Y}, Vst)).
+
+trim_stack(From, To, Top, #st{ys=Ys0}=St) when From =:= Top ->
+ Ys = maps:filter(fun({y,Y}, _) -> Y < To end, Ys0),
+ St#st{numy=To,ys=Ys};
+trim_stack(From, To, Top, St0) ->
+ Src = {y, From},
+ Dst = {y, To},
+
+ #st{ys=Ys0} = St0,
+ Ys = case Ys0 of
+ #{ Src := Ref } -> Ys0#{ Dst => Ref };
+ #{} -> error({invalid_shift,Src,Dst})
+ end,
+ St = St0#st{ys=Ys},
+
+ trim_stack(From + 1, To + 1, Top, St).
test_heap(Heap, Live, Vst0) ->
verify_live(Live, Vst0),
@@ -1062,26 +1374,42 @@ heap_alloc_2([{floats,Floats}|T], St0) ->
heap_alloc_2(T, St);
heap_alloc_2([], St) -> St.
-prune_x_regs(Live, #vst{current=St0}=Vst)
- when is_integer(Live) ->
- #st{x=Xs0,defs=Defs0,aliases=Aliases0} = St0,
- Xs1 = gb_trees:to_list(Xs0),
- Xs = [P || {R,_}=P <- Xs1, R < Live],
- Defs = maps:filter(fun({x,X}, _) -> X < Live;
- ({y,_}, _) -> true
- end, Defs0),
- Aliases = maps:filter(fun({x,X1}, {x,X2}) ->
- X1 < Live andalso X2 < Live;
- ({x,X}, _) ->
- X < Live;
- (_, {x,X}) ->
- X < Live;
- (_, _) ->
- true
- end, Aliases0),
- St = St0#st{x=gb_trees:from_orddict(Xs),defs=Defs,aliases=Aliases},
+prune_x_regs(Live, #vst{current=St0}=Vst) when is_integer(Live) ->
+ #st{fragile=Fragile0,xs=Xs0} = St0,
+ Fragile = cerl_sets:filter(fun({x,X}) ->
+ X < Live;
+ ({y,_}) ->
+ true
+ end, Fragile0),
+ Xs = maps:filter(fun({x,X}, _) ->
+ X < Live
+ end, Xs0),
+ St = St0#st{fragile=Fragile,xs=Xs},
Vst#vst{current=St}.
+%% All choices in a select_val list must be integers, floats, or atoms.
+%% All must be of the same type.
+assert_choices([{Tag,_},{f,_}|T]) ->
+ if
+ Tag =:= atom; Tag =:= float; Tag =:= integer ->
+ assert_choices_1(T, Tag);
+ true ->
+ error(bad_select_list)
+ end;
+assert_choices([]) -> ok.
+
+assert_choices_1([{Tag,_},{f,_}|T], Tag) ->
+ assert_choices_1(T, Tag);
+assert_choices_1([_,{f,_}|_], _Tag) ->
+ error(bad_select_list);
+assert_choices_1([], _Tag) -> ok.
+
+assert_arities([Arity,{f,_}|T]) when is_integer(Arity) ->
+ assert_arities(T);
+assert_arities([]) -> ok;
+assert_arities(_) -> error(bad_tuple_arity_list).
+
+
%%%
%%% Floating point checking.
%%%
@@ -1101,8 +1429,8 @@ prune_x_regs(Live, #vst{current=St0}=Vst)
%%% fmove Src {fr,_} %% Move INTO floating point register.
%%%
-float_op(Src, Dst, Vst0) ->
- foreach (fun(S) -> assert_freg_set(S, Vst0) end, Src),
+float_op(Ss, Dst, Vst0) ->
+ _ = [assert_freg_set(S, Vst0) || S <- Ss],
assert_fls(cleared, Vst0),
Vst = set_fls(cleared, Vst0),
set_freg(Dst, Vst).
@@ -1120,8 +1448,7 @@ get_fls(#vst{current=#st{fls=Fls}}) when is_atom(Fls) -> Fls.
init_fregs() -> 0.
-set_freg({fr,Fr}=Freg, #vst{current=#st{f=Fregs0}=St}=Vst)
- when is_integer(Fr), 0 =< Fr ->
+set_freg({fr,Fr}=Freg, #vst{current=#st{f=Fregs0}=St}=Vst) ->
check_limit(Freg),
Bit = 1 bsl Fr,
if
@@ -1157,7 +1484,10 @@ assert_unique_map_keys([]) ->
assert_unique_map_keys([_]) ->
ok;
assert_unique_map_keys([_,_|_]=Ls) ->
- Vs = [get_literal(L) || L <- Ls],
+ Vs = [begin
+ assert_literal(L),
+ L
+ end || L <- Ls],
case length(Vs) =:= sets:size(sets:from_list(Vs)) of
true -> ok;
false -> error(keys_not_unique)
@@ -1167,6 +1497,8 @@ assert_unique_map_keys([_,_|_]=Ls) ->
%%% New binary matching instructions.
%%%
+bsm_match_state() ->
+ #ms{}.
bsm_match_state(Slots) ->
#ms{slots=Slots}.
@@ -1174,13 +1506,13 @@ bsm_validate_context(Reg, Vst) ->
_ = bsm_get_context(Reg, Vst),
ok.
-bsm_get_context({x,X}=Reg, #vst{current=#st{x=Xs}}=_Vst) when is_integer(X) ->
- case gb_trees:lookup(X, Xs) of
- {value,#ms{}=Ctx} -> Ctx;
- {value,{fragile,#ms{}=Ctx}} -> Ctx;
- _ -> error({no_bsm_context,Reg})
+bsm_get_context({Kind,_}=Reg, Vst) when Kind =:= x; Kind =:= y->
+ case get_movable_term_type(Reg, Vst) of
+ #ms{}=Ctx -> Ctx;
+ _ -> error({no_bsm_context,Reg})
end;
-bsm_get_context(Reg, _) -> error({bad_source,Reg}).
+bsm_get_context(Reg, _) ->
+ error({bad_source,Reg}).
bsm_save(Reg, {atom,start}, Vst) ->
%% Save point refering to where the match started.
@@ -1191,7 +1523,7 @@ bsm_save(Reg, SavePoint, Vst) ->
case bsm_get_context(Reg, Vst) of
#ms{valid=Bits,slots=Slots}=Ctxt0 when SavePoint < Slots ->
Ctx = Ctxt0#ms{valid=Bits bor (1 bsl SavePoint),slots=Slots},
- set_type_reg(Ctx, Reg, Vst);
+ override_type(Ctx, Reg, Vst);
_ -> error({illegal_save,SavePoint})
end.
@@ -1210,178 +1542,363 @@ bsm_restore(Reg, SavePoint, Vst) ->
_ -> error({illegal_restore,SavePoint,range})
end.
-select_val_branches(Src, Choices, Vst) ->
- Infer = infer_types(Src, Vst),
- select_val_branches_1(Choices, Infer, Vst).
-
-select_val_branches_1([Val,{f,L}|T], Infer, Vst0) ->
- Vst = branch_state(L, Infer(Val, Vst0)),
- select_val_branches_1(T, Infer, Vst);
-select_val_branches_1([], _, Vst) -> Vst.
-
-infer_types(Src, Vst) ->
- case get_def(Src, Vst) of
- {bif,is_map,{f,_},[Map],_} ->
- fun({atom,true}, S) -> set_type_reg(map, Map, S);
- (_, S) -> S
- end;
- {bif,tuple_size,{f,_},[Tuple],_} ->
- fun({integer,Arity}, S) ->
- Type0 = get_term_type(Tuple, S),
- Type = upgrade_tuple_type({tuple,Arity}, Type0),
- set_type(Type, Tuple, S);
- (_, S) -> S
- end;
- {bif,'=:=',{f,_},[ArityReg,{integer,_}=Val],_} when ArityReg =/= Src ->
- fun({atom,true}, S) ->
- Infer = infer_types(ArityReg, S),
- Infer(Val, S);
- (_, S) -> S
- end;
- _ ->
- fun(_, S) -> S end
+validate_select_val(_Fail, _Choices, _Src, #vst{current=none}=Vst) ->
+ %% We've already branched on all of Src's possible values, so we know we
+ %% can't reach the fail label or any of the remaining choices.
+ Vst;
+validate_select_val(Fail, [Val,{f,L}|T], Src, Vst0) ->
+ Vst = branch(L, Vst0,
+ fun(BranchVst) ->
+ update_eq_types(Src, Val, BranchVst)
+ end,
+ fun(FailVst) ->
+ update_ne_types(Src, Val, FailVst)
+ end),
+ validate_select_val(Fail, T, Src, Vst);
+validate_select_val(Fail, [], _, Vst) ->
+ branch(Fail, Vst,
+ fun(SuccVst) ->
+ %% The next instruction is never executed.
+ kill_state(SuccVst)
+ end).
+
+validate_select_tuple_arity(_Fail, _Choices, _Src, #vst{current=none}=Vst) ->
+ %% We've already branched on all of Src's possible values, so we know we
+ %% can't reach the fail label or any of the remaining choices.
+ Vst;
+validate_select_tuple_arity(Fail, [Arity,{f,L}|T], Tuple, Vst0) ->
+ Type = {tuple, Arity, #{}},
+ Vst = branch(L, Vst0,
+ fun(BranchVst) ->
+ update_type(fun meet/2, Type, Tuple, BranchVst)
+ end,
+ fun(FailVst) ->
+ update_type(fun subtract/2, Type, Tuple, FailVst)
+ end),
+ validate_select_tuple_arity(Fail, T, Tuple, Vst);
+validate_select_tuple_arity(Fail, [], _, #vst{}=Vst) ->
+ branch(Fail, Vst,
+ fun(SuccVst) ->
+ %% The next instruction is never executed.
+ kill_state(SuccVst)
+ end).
+
+infer_types({Kind,_}=Reg, Vst) when Kind =:= x; Kind =:= y ->
+ infer_types(get_reg_vref(Reg, Vst), Vst);
+infer_types(#value_ref{}=Ref, #vst{current=#st{vs=Vs}}) ->
+ case Vs of
+ #{ Ref := Entry } -> infer_types_1(Entry);
+ #{} -> fun(_, S) -> S end
+ end;
+infer_types(_, #vst{}) ->
+ fun(_, S) -> S end.
+
+infer_types_1(#value{op={bif,'=:='},args=[LHS,RHS]}) ->
+ fun({atom,true}, S) ->
+ Infer = infer_types(RHS, S),
+ Infer(LHS, S);
+ (_, S) -> S
+ end;
+infer_types_1(#value{op={bif,element},args=[{integer,Index}=Key,Tuple]}) ->
+ fun(Val, S) ->
+ Type = get_term_type(Val, S),
+ update_type(fun meet/2,{tuple,[Index],#{ Key => Type }}, Tuple, S)
+ end;
+infer_types_1(#value{op={bif,is_atom},args=[Src]}) ->
+ infer_type_test_bif({atom,[]}, Src);
+infer_types_1(#value{op={bif,is_boolean},args=[Src]}) ->
+ infer_type_test_bif(bool, Src);
+infer_types_1(#value{op={bif,is_binary},args=[Src]}) ->
+ infer_type_test_bif(binary, Src);
+infer_types_1(#value{op={bif,is_bitstring},args=[Src]}) ->
+ infer_type_test_bif(binary, Src);
+infer_types_1(#value{op={bif,is_float},args=[Src]}) ->
+ infer_type_test_bif(float, Src);
+infer_types_1(#value{op={bif,is_integer},args=[Src]}) ->
+ infer_type_test_bif({integer,{}}, Src);
+infer_types_1(#value{op={bif,is_list},args=[Src]}) ->
+ infer_type_test_bif(list, Src);
+infer_types_1(#value{op={bif,is_map},args=[Src]}) ->
+ infer_type_test_bif(map, Src);
+infer_types_1(#value{op={bif,is_number},args=[Src]}) ->
+ infer_type_test_bif(number, Src);
+infer_types_1(#value{op={bif,is_tuple},args=[Src]}) ->
+ infer_type_test_bif({tuple,[0],#{}}, Src);
+infer_types_1(#value{op={bif,tuple_size}, args=[Tuple]}) ->
+ fun({integer,Arity}, S) ->
+ update_type(fun meet/2, {tuple,Arity,#{}}, Tuple, S);
+ (_, S) -> S
+ end;
+infer_types_1(_) ->
+ fun(_, S) -> S end.
+
+infer_type_test_bif(Type, Src) ->
+ fun({atom,true}, S) ->
+ update_type(fun meet/2, Type, Src, S);
+ (_, S) ->
+ S
end.
%%%
%%% Keeping track of types.
%%%
-set_alias(Reg1, Reg2, #vst{current=St0}=Vst) ->
- case Reg1 of
- {Kind,_} when Kind =:= x; Kind =:= y ->
- #st{aliases=Aliases0} = St0,
- Aliases = Aliases0#{Reg1=>Reg2,Reg2=>Reg1},
- St = St0#st{aliases=Aliases},
- Vst#vst{current=St};
+%% Assigns Src to Dst and marks them as aliasing each other.
+assign({y,_}=Src, {y,_}=Dst, Vst) ->
+ %% The stack trimming optimization may generate a move from an initialized
+ %% but unassigned Y register to another Y register.
+ case get_raw_type(Src, Vst) of
+ initialized -> create_tag(initialized, init, [], Dst, Vst);
+ _ -> assign_1(Src, Dst, Vst)
+ end;
+assign({Kind,_}=Src, Dst, Vst) when Kind =:= x; Kind =:= y ->
+ assign_1(Src, Dst, Vst);
+assign(Literal, Dst, Vst) ->
+ Type = get_literal_type(Literal),
+ create_term(Type, move, [Literal], Dst, Vst).
+
+%% Creates a special tag value that isn't a regular term, such as 'initialized'
+%% or 'catchtag'
+create_tag(Tag, _Op, _Ss, {y,_}=Dst, #vst{current=#st{ys=Ys0}=St0}=Vst) ->
+ case maps:get(Dst, Ys0, uninitialized) of
+ {catchtag,_}=Prev ->
+ error(Prev);
+ {trytag,_}=Prev ->
+ error(Prev);
_ ->
- Vst
+ check_try_catch_tags(Tag, Dst, Vst),
+ Ys = Ys0#{ Dst => Tag },
+ St = St0#st{ys=Ys},
+ remove_fragility(Dst, Vst#vst{current=St})
+ end;
+create_tag(_Tag, _Op, _Ss, Dst, _Vst) ->
+ error({invalid_tag_register, Dst}).
+
+%% Wipes a special tag, leaving the register initialized but empty.
+kill_tag({y,_}=Reg, #vst{current=#st{ys=Ys0}=St0}=Vst) ->
+ _ = get_tag_type(Reg, Vst), %Assertion.
+ Ys = Ys0#{ Reg => initialized },
+ Vst#vst{current=St0#st{ys=Ys}}.
+
+%% Creates a completely new term with the given type.
+create_term(Type, Op, Ss0, Dst, Vst0) ->
+ create_term(Type, Op, Ss0, Dst, Vst0, Vst0).
+
+%% As create_term/4, but uses the incoming Vst for argument resolution in
+%% case x-regs have been pruned and the sources can no longer be found.
+create_term(Type, Op, Ss0, Dst, Vst0, OrigVst) ->
+ {Ref, Vst1} = new_value(Type, Op, resolve_args(Ss0, OrigVst), Vst0),
+ Vst = remove_fragility(Dst, Vst1),
+ set_reg_vref(Ref, Dst, Vst).
+
+%% Extracts a term from Ss, propagating fragility.
+extract_term(Type, Op, Ss0, Dst, Vst0) ->
+ extract_term(Type, Op, Ss0, Dst, Vst0, Vst0).
+
+%% As extract_term/4, but uses the incoming Vst for argument resolution in
+%% case x-regs have been pruned and the sources can no longer be found.
+extract_term(Type, Op, Ss0, Dst, Vst0, OrigVst) ->
+ {Ref, Vst1} = new_value(Type, Op, resolve_args(Ss0, OrigVst), Vst0),
+ Vst = propagate_fragility(Dst, Ss0, Vst1),
+ set_reg_vref(Ref, Dst, Vst).
+
+%% Translates instruction arguments into the argument() type, decoupling them
+%% from their registers, allowing us to infer their types after they've been
+%% clobbered or moved.
+resolve_args([{Kind,_}=Src | Args], Vst) when Kind =:= x; Kind =:= y ->
+ [get_reg_vref(Src, Vst) | resolve_args(Args, Vst)];
+resolve_args([Lit | Args], Vst) ->
+ assert_literal(Lit),
+ [Lit | resolve_args(Args, Vst)];
+resolve_args([], _Vst) ->
+ [].
+
+%% Overrides the type of Reg. This is ugly but a necessity for certain
+%% destructive operations.
+override_type(Type, Reg, Vst) ->
+ update_type(fun(_, T) -> T end, Type, Reg, Vst).
+
+%% This is used when linear code finds out more and more information about a
+%% type, so that the type gets more specialized.
+update_type(Merge, With, #value_ref{}=Ref, Vst) ->
+ %% If the old type can't be merged with the new one, the type information
+ %% is inconsistent and we know that some instructions will never be
+ %% executed at run-time. For example:
+ %%
+ %% {test,is_list,Fail,[Reg]}.
+ %% {test,is_tuple,Fail,[Reg]}.
+ %% {test,test_arity,Fail,[Reg,5]}.
+ %%
+ %% Note that the test_arity instruction can never be reached, so we need to
+ %% kill the state to avoid raising an error when we encounter it.
+ %%
+ %% Simply returning `kill_state(Vst)` is unsafe however as we might be in
+ %% the middle of an instruction, and altering the rest of the validator
+ %% (eg. prune_x_regs/2) to no-op on dead states is prone to error.
+ %%
+ %% We therefore throw a 'type_conflict' error instead, which causes
+ %% validation to fail unless we're in a context where such errors can be
+ %% handled, such as in a branch handler.
+ Current = get_raw_type(Ref, Vst),
+ case Merge(Current, With) of
+ none -> throw({type_conflict, Current, With});
+ Type -> set_type(Type, Ref, Vst)
+ end;
+update_type(Merge, With, {Kind,_}=Reg, Vst) when Kind =:= x; Kind =:= y ->
+ update_type(Merge, With, get_reg_vref(Reg, Vst), Vst);
+update_type(Merge, With, Literal, Vst) ->
+ assert_literal(Literal),
+ %% Literals always retain their type, but we still need to bail on type
+ %% conflicts.
+ case Merge(Literal, With) of
+ none -> throw({type_conflict, Literal, With});
+ _Type -> Vst
end.
-set_aliased_type(Type, Reg, #vst{current=#st{aliases=Aliases}}=Vst0) ->
- Vst1 = set_type(Type, Reg, Vst0),
- case Aliases of
- #{Reg:=OtherReg} ->
- Vst = set_type_reg(Type, OtherReg, Vst1),
- #vst{current=St} = Vst,
- Vst#vst{current=St#st{aliases=Aliases}};
- #{} ->
- Vst1
+update_ne_types(LHS, RHS, Vst) ->
+ %% While updating types on equality is fairly straightforward, inequality
+ %% is a bit trickier since all we know is that the *value* of LHS differs
+ %% from RHS, so we can't blindly subtract their types.
+ %%
+ %% Consider `number =/= {integer,[]}`; all we know is that LHS isn't equal
+ %% to some *specific integer* of unknown value, and if we were to subtract
+ %% {integer,[]} we would erroneously infer that the new type is {float,[]}.
+ %%
+ %% Therefore, we only subtract when we know that RHS has a specific value.
+ RType = get_term_type(RHS, Vst),
+ case is_literal(RType) of
+ true -> update_type(fun subtract/2, RType, LHS, Vst);
+ false -> Vst
end.
-kill_aliases(Reg, #st{aliases=Aliases0}=St) ->
- case Aliases0 of
- #{Reg:=OtherReg} ->
- Aliases = maps:without([Reg,OtherReg], Aliases0),
- St#st{aliases=Aliases};
+update_eq_types(LHS, RHS, Vst0) ->
+ Infer = infer_types(LHS, Vst0),
+ Vst1 = Infer(RHS, Vst0),
+
+ T1 = get_term_type(LHS, Vst1),
+ T2 = get_term_type(RHS, Vst1),
+
+ Vst = update_type(fun meet/2, T2, LHS, Vst1),
+ update_type(fun meet/2, T1, RHS, Vst).
+
+%% Helper functions for the above.
+
+assign_1(Src, Dst, Vst0) ->
+ assert_movable(Src, Vst0),
+ Vst = propagate_fragility(Dst, [Src], Vst0),
+ set_reg_vref(get_reg_vref(Src, Vst), Dst, Vst).
+
+set_reg_vref(Ref, {x,_}=Dst, Vst) ->
+ check_limit(Dst),
+ #vst{current=#st{xs=Xs0}=St0} = Vst,
+ St = St0#st{xs=Xs0#{ Dst => Ref }},
+ Vst#vst{current=St};
+set_reg_vref(Ref, {y,_}=Dst, #vst{current=#st{ys=Ys0}=St0} = Vst) ->
+ check_limit(Dst),
+ case Ys0 of
+ #{ Dst := {catchtag,_}=Tag } ->
+ error(Tag);
+ #{ Dst := {trytag,_}=Tag } ->
+ error(Tag);
+ #{ Dst := _ } ->
+ St = St0#st{ys=Ys0#{ Dst => Ref }},
+ Vst#vst{current=St};
#{} ->
- St
+ %% Storing into a non-existent Y register means that we haven't set
+ %% up a (sufficiently large) stack.
+ error({invalid_store, Dst})
end.
-set_type(Type, {x,_}=Reg, Vst) -> set_type_reg(Type, Reg, Vst);
-set_type(Type, {y,_}=Reg, Vst) -> set_type_y(Type, Reg, Vst);
-set_type(_, _, #vst{}=Vst) -> Vst.
+get_reg_vref({x,_}=Src, #vst{current=#st{xs=Xs}}) ->
+ check_limit(Src),
+ case Xs of
+ #{ Src := #value_ref{}=Ref } ->
+ Ref;
+ #{} ->
+ error({uninitialized_reg, Src})
+ end;
+get_reg_vref({y,_}=Src, #vst{current=#st{ys=Ys}}) ->
+ check_limit(Src),
+ case Ys of
+ #{ Src := #value_ref{}=Ref } ->
+ Ref;
+ #{ Src := initialized } ->
+ error({unassigned, Src});
+ #{ Src := Tag } when Tag =/= uninitialized ->
+ error(Tag);
+ #{} ->
+ error({uninitialized_reg, Src})
+ end.
-set_type_reg(Type, Src, Dst, Vst) ->
- case get_term_type_1(Src, Vst) of
- {fragile,_} ->
- set_type_reg(make_fragile(Type), Dst, Vst);
- _ ->
- set_type_reg(Type, Dst, Vst)
+set_type(Type, #value_ref{}=Ref, #vst{current=#st{vs=Vs0}=St}=Vst) ->
+ case Vs0 of
+ #{ Ref := #value{}=Entry } ->
+ Vs = Vs0#{ Ref => Entry#value{type=Type} },
+ Vst#vst{current=St#st{vs=Vs}};
+ #{} ->
+ %% Dead references may happen during type inference and are not an
+ %% error in and of themselves. If a problem were to arise from this
+ %% it'll explode elsewhere.
+ Vst
end.
-set_type_reg(Type, Reg, Vst) ->
- set_type_reg_expr(Type, none, Reg, Vst).
-
-set_type_reg_expr(Type, Expr, {x,_}=Reg, Vst) ->
- set_type_x(Type, Expr, Reg, Vst);
-set_type_reg_expr(Type, Expr, Reg, Vst) ->
- set_type_y(Type, Expr, Reg, Vst).
-
-set_type_y(Type, Reg, Vst) ->
- set_type_y(Type, none, Reg, Vst).
-
-set_type_x(Type, Expr, {x,X}=Reg, #vst{current=#st{x=Xs0,defs=Defs0}=St0}=Vst)
- when is_integer(X), 0 =< X ->
- check_limit(Reg),
- Xs = case gb_trees:lookup(X, Xs0) of
- none ->
- gb_trees:insert(X, Type, Xs0);
- {value,{fragile,_}} ->
- gb_trees:update(X, make_fragile(Type), Xs0);
- {value,_} ->
- gb_trees:update(X, Type, Xs0)
- end,
- Defs = Defs0#{Reg=>Expr},
- St = kill_aliases(Reg, St0),
- Vst#vst{current=St#st{x=Xs,defs=Defs}};
-set_type_x(Type, _Expr, Reg, #vst{}) ->
- error({invalid_store,Reg,Type}).
-
-set_type_y(Type, Expr, {y,Y}=Reg, #vst{current=#st{y=Ys0,defs=Defs0}=St0}=Vst)
- when is_integer(Y), 0 =< Y ->
- check_limit(Reg),
- Ys = case gb_trees:lookup(Y, Ys0) of
- none ->
- error({invalid_store,Reg,Type});
- {value,{catchtag,_}=Tag} ->
- error(Tag);
- {value,{trytag,_}=Tag} ->
- error(Tag);
- {value,_} ->
- gb_trees:update(Y, Type, Ys0)
- end,
- check_try_catch_tags(Type, Y, Ys0),
- Defs = Defs0#{Reg=>Expr},
- St = kill_aliases(Reg, St0),
- Vst#vst{current=St#st{y=Ys,defs=Defs}};
-set_type_y(Type, _Expr, Reg, #vst{}) ->
- error({invalid_store,Reg,Type}).
-
-make_fragile({fragile,_}=Type) -> Type;
-make_fragile(Type) -> {fragile,Type}.
-
-set_catch_end({y,Y}, #vst{current=#st{y=Ys0}=St}=Vst) ->
- Ys = gb_trees:update(Y, initialized, Ys0),
- Vst#vst{current=St#st{y=Ys}}.
-
-check_try_catch_tags(Type, LastY, Ys) ->
+new_value(Type, Op, Ss, #vst{current=#st{vs=Vs0}=St,ref_ctr=Counter}=Vst) ->
+ Ref = #value_ref{id=Counter},
+ Vs = Vs0#{ Ref => #value{op=Op,args=Ss,type=Type} },
+
+ {Ref, Vst#vst{current=St#st{vs=Vs},ref_ctr=Counter+1}}.
+
+kill_catch_tag(Reg, #vst{current=#st{ct=[Fail|Fails]}=St}=Vst0) ->
+ Vst = Vst0#vst{current=St#st{ct=Fails,fls=undefined}},
+ {_, Fail} = get_tag_type(Reg, Vst), %Assertion.
+ kill_tag(Reg, Vst).
+
+check_try_catch_tags(Type, {y,N}=Reg, Vst) ->
+ %% Every catch or try/catch must use a lower Y register number than any
+ %% enclosing catch or try/catch. That will ensure that when the stack is
+ %% scanned when an exception occurs, the innermost try/catch tag is found
+ %% first.
case is_try_catch_tag(Type) of
- false ->
- ok;
true ->
- %% Every catch or try/catch must use a lower Y register
- %% number than any enclosing catch or try/catch. That will
- %% ensure that when the stack is scanned when an
- %% exception occurs, the innermost try/catch tag is found
- %% first.
- Bad = [{{y,Y},Tag} || {Y,Tag} <- gb_trees:to_list(Ys),
- Y < LastY, is_try_catch_tag(Tag)],
- case Bad of
- [] ->
- ok;
- [_|_] ->
- error({bad_try_catch_nesting,{y,LastY},Bad})
- end
+ case collect_try_catch_tags(N - 1, Vst, []) of
+ [_|_]=Bad -> error({bad_try_catch_nesting, Reg, Bad});
+ [] -> ok
+ end;
+ false ->
+ ok
end.
-is_try_catch_tag({catchtag,_}) -> true;
-is_try_catch_tag({trytag,_}) -> true;
-is_try_catch_tag(_) -> false.
-
-is_reg_defined({x,_}=Reg, Vst) -> is_type_defined_x(Reg, Vst);
-is_reg_defined({y,_}=Reg, Vst) -> is_type_defined_y(Reg, Vst);
+is_reg_defined({x,_}=Reg, #vst{current=#st{xs=Xs}}) -> is_map_key(Reg, Xs);
+is_reg_defined({y,_}=Reg, #vst{current=#st{ys=Ys}}) -> is_map_key(Reg, Ys);
is_reg_defined(V, #vst{}) -> error({not_a_register, V}).
-is_type_defined_x({x,X}, #vst{current=#st{x=Xs}}) ->
- gb_trees:is_defined(X,Xs).
-
-is_type_defined_y({y,Y}, #vst{current=#st{y=Ys}}) ->
- gb_trees:is_defined(Y,Ys).
-
assert_term(Src, Vst) ->
- get_term_type(Src, Vst),
+ _ = get_term_type(Src, Vst),
+ ok.
+
+assert_movable(Src, Vst) ->
+ _ = get_movable_term_type(Src, Vst),
ok.
+assert_literal(Src) ->
+ case is_literal(Src) of
+ true -> ok;
+ false -> error({literal_required,Src})
+ end.
+
+assert_not_literal(Src) ->
+ case is_literal(Src) of
+ true -> error({literal_not_allowed,Src});
+ false -> ok
+ end.
+
+is_literal(nil) -> true;
+is_literal({atom,A}) when is_atom(A) -> true;
+is_literal({float,F}) when is_float(F) -> true;
+is_literal({integer,I}) when is_integer(I) -> true;
+is_literal({literal,_L}) -> true;
+is_literal(_) -> false.
+
%% The possible types.
%%
%% First non-term types:
@@ -1400,10 +1917,10 @@ assert_term(Src, Vst) ->
%% used by the catch instructions; NOT safe to use in other
%% instructions.
%%
-%% exception Can only be used as a type returned by return_type/2
-%% (which gives the type of the value returned by a BIF).
-%% Thus 'exception' is never stored as type descriptor
-%% for a register.
+%% exception Can only be used as a type returned by
+%% call_return_type/2 (which gives the type of the value
+%% returned by a call). Thus 'exception' is never stored
+%% as type descriptor for a register.
%%
%% #ms{} A match context for bit syntax matching. We do allow
%% it to moved/to from stack, but otherwise it must only
@@ -1414,17 +1931,22 @@ assert_term(Src, Vst) ->
%%
%% term Any valid Erlang (but not of the special types above).
%%
+%% binary Binary or bitstring.
+%%
%% bool The atom 'true' or the atom 'false'.
%%
%% cons Cons cell: [_|_]
%%
%% nil Empty list: []
%%
-%% {tuple,[Sz]} Tuple. An element has been accessed using
-%% element/2 or setelement/3 so that it is known that
-%% the type is a tuple of size at least Sz.
+%% list List: [] or [_|_]
+%%
+%% {tuple,[Sz],Es} Tuple. An element has been accessed using
+%% element/2 or setelement/3 so that it is known that
+%% the type is a tuple of size at least Sz. Es is a map
+%% containing known types by tuple index.
%%
-%% {tuple,Sz} Tuple. A test_arity instruction has been seen
+%% {tuple,Sz,Es} Tuple. A test_arity instruction has been seen
%% so that it is known that the size is exactly Sz.
%%
%% {atom,[]} Atom.
@@ -1440,35 +1962,214 @@ assert_term(Src, Vst) ->
%%
%% map Map.
%%
+%% none A conflict in types. There will be an exception at runtime.
%%
-%%
-%% FRAGILITY
-%% ---------
-%%
-%% The loop_rec/2 instruction may return a reference to a term that is
-%% not part of the root set. That term or any part of it must not be
-%% included in a garbage collection. Therefore, the term (or any part
-%% of it) must not be stored in an Y register.
-%%
-%% Such terms are wrapped in a {fragile,Type} tuple, where Type is one
-%% of the types described above.
-assert_type(WantedType, Term, Vst) ->
- case get_term_type(Term, Vst) of
- {fragile,Type} ->
- assert_type(WantedType, Type);
- Type ->
- assert_type(WantedType, Type)
+%% join(Type1, Type2) -> Type
+%% Return the most specific type possible.
+join(Same, Same) ->
+ Same;
+join(none, Other) ->
+ Other;
+join(Other, none) ->
+ Other;
+join({literal,_}=T1, T2) ->
+ join_literal(T1, T2);
+join(T1, {literal,_}=T2) ->
+ join_literal(T2, T1);
+join({tuple,Size,EsA}, {tuple,Size,EsB}) ->
+ Es = join_tuple_elements(tuple_sz(Size), EsA, EsB),
+ {tuple, Size, Es};
+join({tuple,A,EsA}, {tuple,B,EsB}) ->
+ Size = min(tuple_sz(A), tuple_sz(B)),
+ Es = join_tuple_elements(Size, EsA, EsB),
+ {tuple, [Size], Es};
+join({Type,A}, {Type,B})
+ when Type =:= atom; Type =:= integer; Type =:= float ->
+ if A =:= B -> {Type,A};
+ true -> {Type,[]}
+ end;
+join({Type,_}, number)
+ when Type =:= integer; Type =:= float ->
+ number;
+join(number, {Type,_})
+ when Type =:= integer; Type =:= float ->
+ number;
+join({integer,_}, {float,_}) ->
+ number;
+join({float,_}, {integer,_}) ->
+ number;
+join(bool, {atom,A}) ->
+ join_bool(A);
+join({atom,A}, bool) ->
+ join_bool(A);
+join({atom,A}, {atom,B}) when is_boolean(A), is_boolean(B) ->
+ bool;
+join({atom,_}, {atom,_}) ->
+ {atom,[]};
+join(#ms{id=Id1,valid=B1,slots=Slots1},
+ #ms{id=Id2,valid=B2,slots=Slots2}) ->
+ Id = if
+ Id1 =:= Id2 -> Id1;
+ true -> make_ref()
+ end,
+ #ms{id=Id,valid=B1 band B2,slots=min(Slots1, Slots2)};
+join(T1, T2) when T1 =/= T2 ->
+ %% We've exhaused all other options, so the type must either be a list or
+ %% a 'term'.
+ join_list(T1, T2).
+
+join_tuple_elements(Limit, EsA, EsB) ->
+ Es0 = join_elements(EsA, EsB),
+ maps:filter(fun({integer,Index}, _Type) -> Index =< Limit end, Es0).
+
+join_elements(Es1, Es2) ->
+ Keys = if
+ map_size(Es1) =< map_size(Es2) -> maps:keys(Es1);
+ map_size(Es1) > map_size(Es2) -> maps:keys(Es2)
+ end,
+ join_elements_1(Keys, Es1, Es2, #{}).
+
+join_elements_1([Key | Keys], Es1, Es2, Acc0) ->
+ Type = case {Es1, Es2} of
+ {#{ Key := Same }, #{ Key := Same }} -> Same;
+ {#{ Key := Type1 }, #{ Key := Type2 }} -> join(Type1, Type2);
+ {#{}, #{}} -> term
+ end,
+ Acc = set_element_type(Key, Type, Acc0),
+ join_elements_1(Keys, Es1, Es2, Acc);
+join_elements_1([], _Es1, _Es2, Acc) ->
+ Acc.
+
+%% Joins types of literals; note that the left argument must either be a
+%% literal or exactly equal to the second argument.
+join_literal(Same, Same) ->
+ Same;
+join_literal({literal,_}=Lit, T) ->
+ join_literal(T, get_literal_type(Lit));
+join_literal(T1, T2) ->
+ %% We're done extracting the types, try merging them again.
+ join(T1, T2).
+
+join_list(nil, cons) -> list;
+join_list(nil, list) -> list;
+join_list(cons, list) -> list;
+join_list(T, nil) -> join_list(nil, T);
+join_list(T, cons) -> join_list(cons, T);
+join_list(_, _) ->
+ %% Not a list, so it must be a term.
+ term.
+
+join_bool([]) -> {atom,[]};
+join_bool(true) -> bool;
+join_bool(false) -> bool;
+join_bool(_) -> {atom,[]}.
+
+%% meet(Type1, Type2) -> Type
+%% Return the meet of two types. The meet is a more specific type.
+%% It will be 'none' if the types are in conflict.
+
+meet(Same, Same) ->
+ Same;
+meet(term, Other) ->
+ Other;
+meet(Other, term) ->
+ Other;
+meet(#ms{}, binary) ->
+ #ms{};
+meet(binary, #ms{}) ->
+ #ms{};
+meet({literal,_}, {literal,_}) ->
+ none;
+meet(T1, {literal,_}=T2) ->
+ meet(T2, T1);
+meet({literal,_}=T1, T2) ->
+ case meet(get_literal_type(T1), T2) of
+ none -> none;
+ _ -> T1
+ end;
+meet(T1, T2) ->
+ case {erlang:min(T1, T2),erlang:max(T1, T2)} of
+ {{atom,_}=A,{atom,[]}} -> A;
+ {bool,{atom,B}=Atom} when is_boolean(B) -> Atom;
+ {bool,{atom,[]}} -> bool;
+ {cons,list} -> cons;
+ {{float,_}=T,{float,[]}} -> T;
+ {{integer,_}=T,{integer,[]}} -> T;
+ {list,nil} -> nil;
+ {number,{integer,_}=T} -> T;
+ {number,{float,_}=T} -> T;
+ {{tuple,Size1,Es1},{tuple,Size2,Es2}} ->
+ Es = meet_elements(Es1, Es2),
+ case {Size1,Size2,Es} of
+ {_, _, none} ->
+ none;
+ {[Sz1],[Sz2],_} ->
+ Sz = erlang:max(Sz1, Sz2),
+ assert_tuple_elements(Sz, Es),
+ {tuple,[Sz],Es};
+ {Sz1,[Sz2],_} when Sz2 =< Sz1 ->
+ assert_tuple_elements(Sz1, Es),
+ {tuple,Sz1,Es};
+ {Sz,Sz,_} ->
+ assert_tuple_elements(Sz, Es),
+ {tuple,Sz,Es};
+ {_,_,_} ->
+ none
+ end;
+ {_,_} -> none
end.
+meet_elements(Es1, Es2) ->
+ Keys = maps:keys(Es1) ++ maps:keys(Es2),
+ meet_elements_1(Keys, Es1, Es2, #{}).
+
+meet_elements_1([Key | Keys], Es1, Es2, Acc) ->
+ case {Es1, Es2} of
+ {#{ Key := Type1 }, #{ Key := Type2 }} ->
+ case meet(Type1, Type2) of
+ none -> none;
+ Type -> meet_elements_1(Keys, Es1, Es2, Acc#{ Key => Type })
+ end;
+ {#{ Key := Type1 }, _} ->
+ meet_elements_1(Keys, Es1, Es2, Acc#{ Key => Type1 });
+ {_, #{ Key := Type2 }} ->
+ meet_elements_1(Keys, Es1, Es2, Acc#{ Key => Type2 })
+ end;
+meet_elements_1([], _Es1, _Es2, Acc) ->
+ Acc.
+
+%% No tuple elements may have an index above the known size.
+assert_tuple_elements(Limit, Es) ->
+ true = maps:fold(fun({integer,Index}, _T, true) ->
+ Index =< Limit
+ end, true, Es). %Assertion.
+
+%% subtract(Type1, Type2) -> Type
+%% Subtract Type2 from Type2. Example:
+%% subtract(list, nil) -> cons
+
+subtract(Same, Same) -> none;
+subtract(list, nil) -> cons;
+subtract(list, cons) -> nil;
+subtract(number, {integer,[]}) -> {float,[]};
+subtract(number, {float,[]}) -> {integer,[]};
+subtract(bool, {atom,false}) -> {atom, true};
+subtract(bool, {atom,true}) -> {atom, false};
+subtract(Type, _) -> Type.
+
+assert_type(WantedType, Term, Vst) ->
+ Type = get_term_type(Term, Vst),
+ assert_type(WantedType, Type).
+
assert_type(Correct, Correct) -> ok;
assert_type(float, {float,_}) -> ok;
-assert_type(tuple, {tuple,_}) -> ok;
+assert_type(tuple, {tuple,_,_}) -> ok;
assert_type(tuple, {literal,Tuple}) when is_tuple(Tuple) -> ok;
-assert_type({tuple_element,I}, {tuple,[Sz]})
+assert_type({tuple_element,I}, {tuple,[Sz],_})
when 1 =< I, I =< Sz ->
ok;
-assert_type({tuple_element,I}, {tuple,Sz})
+assert_type({tuple_element,I}, {tuple,Sz,_})
when is_integer(Sz), 1 =< I, I =< Sz ->
ok;
assert_type({tuple_element,I}, {literal,Lit}) when I =< tuple_size(Lit) ->
@@ -1478,156 +2179,297 @@ assert_type(cons, {literal,[_|_]}) ->
assert_type(Needed, Actual) ->
error({bad_type,{needed,Needed},{actual,Actual}}).
-%% upgrade_tuple_type(NewTupleType, OldType) -> TupleType.
-%% upgrade_tuple_type/2 is used when linear code finds out more and
-%% more information about a tuple type, so that the type gets more
-%% specialized. If OldType is not a tuple type, the type information
-%% is inconsistent, and we know that some instructions will never
-%% be executed at run-time.
-
-upgrade_tuple_type(NewType, {fragile,OldType}) ->
- make_fragile(upgrade_tuple_type_1(NewType, OldType));
-upgrade_tuple_type(NewType, OldType) ->
- upgrade_tuple_type_1(NewType, OldType).
-
-upgrade_tuple_type_1({tuple,[Sz]}, {tuple,[OldSz]}=T) when Sz < OldSz ->
- %% The old type has a higher value for the least tuple size.
- T;
-upgrade_tuple_type_1({tuple,[Sz]}, {tuple,OldSz}=T)
- when is_integer(Sz), is_integer(OldSz), Sz =< OldSz ->
- %% The old size is exact, and the new size is smaller than the old size.
- T;
-upgrade_tuple_type_1({tuple,_}=T, _) ->
- %% The new type information is exact or has a higher value for
- %% the least tuple size.
- %% Note that inconsistencies are also handled in this
- %% clause, e.g. if the old type was an integer or a tuple accessed
- %% outside its size; inconsistences will generally cause an exception
- %% at run-time but are safe from our point of view.
- T.
+get_element_type(Key, Src, Vst) ->
+ get_element_type_1(Key, get_term_type(Src, Vst)).
+
+get_element_type_1({integer,_}=Key, {tuple,_Sz,Es}) ->
+ case Es of
+ #{ Key := Type } -> Type;
+ #{} -> term
+ end;
+get_element_type_1(_Index, _Type) ->
+ term.
+
+set_element_type(_Key, none, Es) ->
+ Es;
+set_element_type(Key, term, Es) ->
+ maps:remove(Key, Es);
+set_element_type(Key, Type, Es) ->
+ Es#{ Key => Type }.
get_tuple_size({integer,[]}) -> 0;
get_tuple_size({integer,Sz}) -> Sz;
get_tuple_size(_) -> 0.
validate_src(Ss, Vst) when is_list(Ss) ->
- foreach(fun(S) -> get_term_type(S, Vst) end, Ss).
+ _ = [assert_term(S, Vst) || S <- Ss],
+ ok.
-%% get_move_term_type(Src, ValidatorState) -> Type
+%% get_term_type(Src, ValidatorState) -> Type
%% Get the type of the source Src. The returned type Type will be
-%% a standard Erlang type (no catch/try tags). Match contexts are OK.
+%% a standard Erlang type (no catch/try tags or match contexts).
-get_move_term_type(Src, Vst) ->
- case get_term_type_1(Src, Vst) of
- initialized -> error({unassigned,Src});
- {catchtag,_} -> error({catchtag,Src});
- {trytag,_} -> error({trytag,Src});
- tuple_in_progress -> error({tuple_in_progress,Src});
- Type -> Type
+get_term_type(Src, Vst) ->
+ case get_movable_term_type(Src, Vst) of
+ #ms{} -> error({match_context,Src});
+ Type -> Type
end.
-%% get_term_type(Src, ValidatorState) -> Type
+%% get_movable_term_type(Src, ValidatorState) -> Type
%% Get the type of the source Src. The returned type Type will be
-%% a standard Erlang type (no catch/try tags or match contexts).
+%% a standard Erlang type (no catch/try tags). Match contexts are OK.
-get_term_type(Src, Vst) ->
- case get_move_term_type(Src, Vst) of
- #ms{} -> error({match_context,Src});
- Type -> Type
+get_movable_term_type(Src, Vst) ->
+ case get_raw_type(Src, Vst) of
+ initialized -> error({unassigned,Src});
+ uninitialized -> error({uninitialized_reg,Src});
+ {catchtag,_} -> error({catchtag,Src});
+ {trytag,_} -> error({trytag,Src});
+ tuple_in_progress -> error({tuple_in_progress,Src});
+ {literal,_}=Lit -> get_literal_type(Lit);
+ Type -> Type
end.
-%% get_special_y_type(Src, ValidatorState) -> Type
-%% Return the type for the Y register without doing any validity checks.
-
-get_special_y_type({y,_}=Reg, Vst) -> get_term_type_1(Reg, Vst);
-get_special_y_type(Src, _) -> error({source_not_y_reg,Src}).
-
-get_term_type_1(nil=T, _) -> T;
-get_term_type_1({atom,A}=T, _) when is_atom(A) -> T;
-get_term_type_1({float,F}=T, _) when is_float(F) -> T;
-get_term_type_1({integer,I}=T, _) when is_integer(I) -> T;
-get_term_type_1({literal,Map}, _) when is_map(Map) -> map;
-get_term_type_1({literal,Tuple}, _) when is_tuple(Tuple) ->
- {tuple,tuple_size(Tuple)};
-get_term_type_1({literal,_}=T, _) -> T;
-get_term_type_1({x,X}=Reg, #vst{current=#st{x=Xs}}) when is_integer(X) ->
- case gb_trees:lookup(X, Xs) of
- {value,Type} -> Type;
- none -> error({uninitialized_reg,Reg})
+%% get_tag_type(Src, ValidatorState) -> Type
+%% Return the tag type of a Y register, erroring out if it contains a term.
+
+get_tag_type({y,_}=Src, Vst) ->
+ case get_raw_type(Src, Vst) of
+ {catchtag, _}=Tag -> Tag;
+ {trytag, _}=Tag -> Tag;
+ uninitialized=Tag -> Tag;
+ initialized=Tag -> Tag;
+ Other -> error({invalid_tag,Src,Other})
end;
-get_term_type_1({y,Y}=Reg, #vst{current=#st{y=Ys}}) when is_integer(Y) ->
- case gb_trees:lookup(Y, Ys) of
- none -> error({uninitialized_reg,Reg});
- {value,uninitialized} -> error({uninitialized_reg,Reg});
- {value,Type} -> Type
+get_tag_type(Src, _) ->
+ error({invalid_tag_register,Src}).
+
+%% get_raw_type(Src, ValidatorState) -> Type
+%% Return the type of a register without doing any validity checks or
+%% conversions.
+get_raw_type({x,X}=Src, #vst{current=#st{xs=Xs}}=Vst) when is_integer(X) ->
+ check_limit(Src),
+ case Xs of
+ #{ Src := #value_ref{}=Ref } -> get_raw_type(Ref, Vst);
+ #{} -> uninitialized
end;
-get_term_type_1(Src, _) -> error({bad_source,Src}).
-
-get_def(Src, #vst{current=#st{defs=Defs}}) ->
- case Defs of
- #{Src:=Def} -> Def;
+get_raw_type({y,Y}=Src, #vst{current=#st{ys=Ys}}=Vst) when is_integer(Y) ->
+ check_limit(Src),
+ case Ys of
+ #{ Src := #value_ref{}=Ref } -> get_raw_type(Ref, Vst);
+ #{ Src := Tag } -> Tag;
+ #{} -> uninitialized
+ end;
+get_raw_type(#value_ref{}=Ref, #vst{current=#st{vs=Vs}}) ->
+ case Vs of
+ #{ Ref := #value{type=Type} } -> Type;
#{} -> none
+ end;
+get_raw_type(Src, #vst{}) ->
+ get_literal_type(Src).
+
+get_literal_type(nil=T) -> T;
+get_literal_type({atom,A}=T) when is_atom(A) -> T;
+get_literal_type({float,F}=T) when is_float(F) -> T;
+get_literal_type({integer,I}=T) when is_integer(I) -> T;
+get_literal_type({literal,[_|_]}) -> cons;
+get_literal_type({literal,Bitstring}) when is_bitstring(Bitstring) -> binary;
+get_literal_type({literal,Map}) when is_map(Map) -> map;
+get_literal_type({literal,Tuple}) when is_tuple(Tuple) -> glt_1(Tuple);
+get_literal_type({literal,_}) -> term;
+get_literal_type(T) -> error({not_literal,T}).
+
+glt_1([]) -> nil;
+glt_1(A) when is_atom(A) -> {atom, A};
+glt_1(F) when is_float(F) -> {float, F};
+glt_1(I) when is_integer(I) -> {integer, I};
+glt_1(T) when is_tuple(T) ->
+ {Es,_} = foldl(fun(Val, {Es0, Index}) ->
+ Type = glt_1(Val),
+ Es = set_element_type({integer,Index}, Type, Es0),
+ {Es, Index + 1}
+ end, {#{}, 1}, tuple_to_list(T)),
+ {tuple, tuple_size(T), Es};
+glt_1(L) ->
+ {literal, L}.
+
+%%%
+%%% Branch tracking
+%%%
+
+%% Forks the execution flow, with the provided funs returning the new state of
+%% their respective branch; the "fail" fun returns the state where the branch
+%% is taken, and the "success" fun returns the state where it's not.
+%%
+%% If either path is known not to be taken at runtime (eg. due to a type
+%% conflict), it will simply be discarded.
+-spec branch(Lbl :: label(),
+ Original :: #vst{},
+ FailFun :: BranchFun,
+ SuccFun :: BranchFun) -> #vst{} when
+ BranchFun :: fun((#vst{}) -> #vst{}).
+branch(Lbl, Vst0, FailFun, SuccFun) ->
+ #vst{current=St0} = Vst0,
+ try FailFun(Vst0) of
+ Vst1 ->
+ Vst2 = branch_state(Lbl, Vst1),
+ Vst = Vst2#vst{current=St0},
+ try SuccFun(Vst) of
+ V -> V
+ catch
+ {type_conflict, _, _} ->
+ %% The instruction is guaranteed to fail; kill the state.
+ kill_state(Vst)
+ end
+ catch
+ {type_conflict, _, _} ->
+ %% This instruction is guaranteed not to fail, so we run the
+ %% success branch *without* catching type conflicts to avoid hiding
+ %% errors in the validator itself; one of the branches must
+ %% succeed.
+ SuccFun(Vst0)
end.
-%% get_literal(Src) -> literal_value().
-get_literal(nil) -> [];
-get_literal({atom,A}) when is_atom(A) -> A;
-get_literal({float,F}) when is_float(F) -> F;
-get_literal({integer,I}) when is_integer(I) -> I;
-get_literal({literal,L}) -> L;
-get_literal(T) -> error({not_literal,T}).
-
-branch_arities([Sz,{f,L}|T], Tuple, {tuple,[_]}=Type0, Vst0) when is_integer(Sz) ->
- Vst1 = set_aliased_type({tuple,Sz}, Tuple, Vst0),
- Vst = branch_state(L, Vst1),
- branch_arities(T, Tuple, Type0, Vst);
-branch_arities([Sz,{f,L}|T], Tuple, {tuple,Sz}=Type, Vst0) when is_integer(Sz) ->
- %% The type is already correct. (This test is redundant.)
- Vst = branch_state(L, Vst0),
- branch_arities(T, Tuple, Type, Vst);
-branch_arities([Sz0,{f,_}|T], Tuple, {tuple,Sz}=Type, Vst)
- when is_integer(Sz), Sz0 =/= Sz ->
- %% We already have an established different exact size for the tuple.
- %% This label can't possibly be reached.
- branch_arities(T, Tuple, Type, Vst);
-branch_arities([], _, _, #vst{}=Vst) -> Vst.
+%% A shorthand version of branch/4 for when the state is only altered on
+%% success.
+branch(Fail, Vst, SuccFun) ->
+ branch(Fail, Vst, fun(V) -> V end, SuccFun).
+%% Directly branches off the state. This is an "internal" operation that should
+%% be used sparingly.
branch_state(0, #vst{}=Vst) ->
- %% If the instruction fails, the stack may be scanned
- %% looking for a catch tag. Therefore the Y registers
- %% must be initialized at this point.
+ %% If the instruction fails, the stack may be scanned looking for a catch
+ %% tag. Therefore the Y registers must be initialized at this point.
verify_y_init(Vst),
Vst;
-branch_state(L, #vst{current=St,branched=B}=Vst) ->
- Vst#vst{
- branched=case gb_trees:is_defined(L, B) of
- false ->
- gb_trees:insert(L, St, B);
- true ->
- MergedSt = merge_states(L, St, B),
- gb_trees:update(L, MergedSt, B)
- end}.
-
-%% merge_states/3 is used when there are more than one way to arrive
-%% at this point, and the type states for the different paths has
-%% to be merged. The type states are downgraded to the least common
-%% subset for the subsequent code.
-
-merge_states(L, St, Branched) when L =/= 0 ->
+branch_state(L, #vst{current=St,branched=B,ref_ctr=Counter0}=Vst) ->
+ case gb_trees:is_defined(L, B) of
+ true ->
+ {MergedSt, Counter} = merge_states(L, St, B, Counter0),
+ Branched = gb_trees:update(L, MergedSt, B),
+ Vst#vst{branched=Branched,ref_ctr=Counter};
+ false ->
+ Vst#vst{branched=gb_trees:insert(L, St, B)}
+ end.
+
+%% merge_states/3 is used when there's more than one way to arrive at a
+%% certain point, requiring the states to be merged down to the least
+%% common subset for the subsequent code.
+
+merge_states(L, St, Branched, Counter) when L =/= 0 ->
case gb_trees:lookup(L, Branched) of
- none -> St;
- {value,OtherSt} when St =:= none -> OtherSt;
- {value,OtherSt} -> merge_states_1(St, OtherSt)
+ none ->
+ {St, Counter};
+ {value,OtherSt} when St =:= none ->
+ {OtherSt, Counter};
+ {value,OtherSt} ->
+ merge_states_1(St, OtherSt, Counter)
end.
-merge_states_1(#st{x=Xs0,y=Ys0,numy=NumY0,h=H0,ct=Ct0,aliases=Aliases0},
- #st{x=Xs1,y=Ys1,numy=NumY1,h=H1,ct=Ct1,aliases=Aliases1}) ->
- NumY = merge_stk(NumY0, NumY1),
- Xs = merge_regs(Xs0, Xs1),
- Ys = merge_y_regs(Ys0, Ys1),
- Ct = merge_ct(Ct0, Ct1),
- Aliases = merge_aliases(Aliases0, Aliases1),
- #st{x=Xs,y=Ys,numy=NumY,h=min(H0, H1),ct=Ct,aliases=Aliases}.
+merge_states_1(#st{xs=XsA,ys=YsA,vs=VsA,fragile=FragA,numy=NumYA,h=HA,ct=CtA},
+ #st{xs=XsB,ys=YsB,vs=VsB,fragile=FragB,numy=NumYB,h=HB,ct=CtB},
+ Counter0) ->
+ %% When merging registers we drop all registers that aren't defined in both
+ %% states, and resolve conflicts by creating new values (similar to phi
+ %% nodes in SSA).
+ %%
+ %% While doing this we build a "merge map" detailing which values need to
+ %% be kept and which new values need to be created to resolve conflicts.
+ %% This map is then used to create a new value database where the types of
+ %% all values have been joined.
+ {Xs, Merge0, Counter1} = merge_regs(XsA, XsB, #{}, Counter0),
+ {Ys, Merge, Counter} = merge_regs(YsA, YsB, Merge0, Counter1),
+ Vs = merge_values(Merge, VsA, VsB),
+
+ Fragile = merge_fragility(FragA, FragB),
+ NumY = merge_stk(NumYA, NumYB),
+ Ct = merge_ct(CtA, CtB),
+
+ St = #st{xs=Xs,ys=Ys,vs=Vs,fragile=Fragile,numy=NumY,h=min(HA, HB),ct=Ct},
+ {St, Counter}.
+
+%% Merges the contents of two register maps, returning the updated "merge map"
+%% and the new registers.
+merge_regs(RsA, RsB, Merge, Counter) ->
+ Keys = if
+ map_size(RsA) =< map_size(RsB) -> maps:keys(RsA);
+ map_size(RsA) > map_size(RsB) -> maps:keys(RsB)
+ end,
+ merge_regs_1(Keys, RsA, RsB, #{}, Merge, Counter).
+
+merge_regs_1([Reg | Keys], RsA, RsB, Regs, Merge0, Counter0) ->
+ case {RsA, RsB} of
+ {#{ Reg := #value_ref{}=RefA }, #{ Reg := #value_ref{}=RefB }} ->
+ {Ref, Merge, Counter} = merge_vrefs(RefA, RefB, Merge0, Counter0),
+ merge_regs_1(Keys, RsA, RsB, Regs#{ Reg => Ref }, Merge, Counter);
+ {#{ Reg := TagA }, #{ Reg := TagB }} ->
+ %% Tags describe the state of the register rather than the value it
+ %% contains, so if a register contains a tag in one state we have
+ %% to merge it as a tag regardless of whether the other state says
+ %% it's a value.
+ {y, _} = Reg, %Assertion.
+ merge_regs_1(Keys, RsA, RsB, Regs#{ Reg => merge_tags(TagA,TagB) },
+ Merge0, Counter0);
+ {#{}, #{}} ->
+ merge_regs_1(Keys, RsA, RsB, Regs, Merge0, Counter0)
+ end;
+merge_regs_1([], _, _, Regs, Merge, Counter) ->
+ {Regs, Merge, Counter}.
+
+merge_tags(Same, Same) ->
+ Same;
+merge_tags(uninitialized, _) ->
+ uninitialized;
+merge_tags(_, uninitialized) ->
+ uninitialized;
+merge_tags({catchtag,T0}, {catchtag,T1}) ->
+ {catchtag, ordsets:from_list(T0 ++ T1)};
+merge_tags({trytag,T0}, {trytag,T1}) ->
+ {trytag, ordsets:from_list(T0 ++ T1)};
+merge_tags(_A, _B) ->
+ %% All other combinations leave the register initialized. Errors arising
+ %% from this will be caught later on.
+ initialized.
+
+merge_vrefs(Ref, Ref, Merge, Counter) ->
+ %% We have two (potentially) different versions of the same value, so we
+ %% should join their types into the same value.
+ {Ref, Merge#{ Ref => Ref }, Counter};
+merge_vrefs(RefA, RefB, Merge, Counter) ->
+ %% We have two different values, so we need to create a new value from
+ %% their joined type if we haven't already done so.
+ Key = {RefA, RefB},
+ case Merge of
+ #{ Key := Ref } ->
+ {Ref, Merge, Counter};
+ #{} ->
+ Ref = #value_ref{id=Counter},
+ {Ref, Merge#{ Key => Ref }, Counter + 1}
+ end.
+
+merge_values(Merge, VsA, VsB) ->
+ maps:fold(fun(Spec, New, Acc) ->
+ merge_values_1(Spec, New, VsA, VsB, Acc)
+ end, #{}, Merge).
+
+merge_values_1(Same, Same, VsA, VsB, Acc) ->
+ %% We're merging different versions of the same value, so it's safe to
+ %% reuse old entries if the type's unchanged.
+ #value{type=TypeA}=EntryA = map_get(Same, VsA),
+ #value{type=TypeB}=EntryB = map_get(Same, VsB),
+ Entry = case join(TypeA, TypeB) of
+ TypeA -> EntryA;
+ TypeB -> EntryB;
+ JoinedType -> EntryA#value{type=JoinedType}
+ end,
+ Acc#{ Same => Entry };
+merge_values_1({RefA, RefB}, New, VsA, VsB, Acc) ->
+ #value{type=TypeA} = map_get(RefA, VsA),
+ #value{type=TypeB} = map_get(RefB, VsB),
+ Acc#{ New => #value{op=join,args=[],type=join(TypeA, TypeB)} }.
+
+merge_fragility(FragileA, FragileB) ->
+ cerl_sets:union(FragileA, FragileB).
merge_stk(S, S) -> S;
merge_stk(_, _) -> undecided.
@@ -1640,151 +2482,70 @@ merge_ct_1([C0|Ct0], [C1|Ct1]) ->
merge_ct_1([], []) -> [];
merge_ct_1(_, _) -> undecided.
-merge_regs(Rs0, Rs1) ->
- Rs = merge_regs_1(gb_trees:to_list(Rs0), gb_trees:to_list(Rs1)),
- gb_trees_from_list(Rs).
-
-merge_regs_1([Same|Rs1], [Same|Rs2]) ->
- [Same|merge_regs_1(Rs1, Rs2)];
-merge_regs_1([{R1,_}|Rs1], [{R2,_}|_]=Rs2) when R1 < R2 ->
- merge_regs_1(Rs1, Rs2);
-merge_regs_1([{R1,_}|_]=Rs1, [{R2,_}|Rs2]) when R1 > R2 ->
- merge_regs_1(Rs1, Rs2);
-merge_regs_1([{R,Type1}|Rs1], [{R,Type2}|Rs2]) ->
- [{R,merge_types(Type1, Type2)}|merge_regs_1(Rs1, Rs2)];
-merge_regs_1([], []) -> [];
-merge_regs_1([], [_|_]) -> [];
-merge_regs_1([_|_], []) -> [].
-
-merge_y_regs(Rs0, Rs1) ->
- case {gb_trees:size(Rs0),gb_trees:size(Rs1)} of
- {Sz0,Sz1} when Sz0 < Sz1 ->
- merge_y_regs_1(Sz0-1, Rs1, Rs0);
- {_,Sz1} ->
- merge_y_regs_1(Sz1-1, Rs0, Rs1)
- end.
+tuple_sz([Sz]) -> Sz;
+tuple_sz(Sz) -> Sz.
-merge_y_regs_1(Y, S, Regs0) when Y >= 0 ->
- Type0 = gb_trees:get(Y, Regs0),
- case gb_trees:get(Y, S) of
- Type0 ->
- merge_y_regs_1(Y-1, S, Regs0);
- Type1 ->
- Type = merge_types(Type0, Type1),
- Regs = gb_trees:update(Y, Type, Regs0),
- merge_y_regs_1(Y-1, S, Regs)
- end;
-merge_y_regs_1(_, _, Regs) -> Regs.
+verify_y_init(#vst{current=#st{numy=NumY,ys=Ys}}=Vst) when is_integer(NumY) ->
+ HighestY = maps:fold(fun({y,Y}, _, Acc) -> max(Y, Acc) end, -1, Ys),
+ true = NumY > HighestY, %Assertion.
+ verify_y_init_1(NumY - 1, Vst),
+ ok;
+verify_y_init(#vst{current=#st{numy=undecided,ys=Ys}}=Vst) ->
+ HighestY = maps:fold(fun({y,Y}, _, Acc) -> max(Y, Acc) end, -1, Ys),
+ verify_y_init_1(HighestY, Vst);
+verify_y_init(#vst{}) ->
+ ok.
-%% merge_types(Type1, Type2) -> Type
-%% Return the most specific type possible.
-%% Note: Type1 must NOT be the same as Type2.
-merge_types({fragile,Same}=Type, Same) ->
- Type;
-merge_types({fragile,T1}, T2) ->
- make_fragile(merge_types(T1, T2));
-merge_types(Same, {fragile,Same}=Type) ->
- Type;
-merge_types(T1, {fragile,T2}) ->
- make_fragile(merge_types(T1, T2));
-merge_types(uninitialized=I, _) -> I;
-merge_types(_, uninitialized=I) -> I;
-merge_types(initialized=I, _) -> I;
-merge_types(_, initialized=I) -> I;
-merge_types({catchtag,T0},{catchtag,T1}) ->
- {catchtag,ordsets:from_list(T0++T1)};
-merge_types({trytag,T0},{trytag,T1}) ->
- {trytag,ordsets:from_list(T0++T1)};
-merge_types({tuple,A}, {tuple,B}) ->
- {tuple,[min(tuple_sz(A), tuple_sz(B))]};
-merge_types({Type,A}, {Type,B})
- when Type =:= atom; Type =:= integer; Type =:= float ->
- if A =:= B -> {Type,A};
- true -> {Type,[]}
- end;
-merge_types({Type,_}, number)
- when Type =:= integer; Type =:= float ->
- number;
-merge_types(number, {Type,_})
- when Type =:= integer; Type =:= float ->
- number;
-merge_types(bool, {atom,A}) ->
- merge_bool(A);
-merge_types({atom,A}, bool) ->
- merge_bool(A);
-merge_types(cons, {literal,[_|_]}) ->
- cons;
-merge_types({literal,[_|_]}, cons) ->
- cons;
-merge_types({literal,[_|_]}, {literal,[_|_]}) ->
- cons;
-merge_types(#ms{id=Id1,valid=B1,slots=Slots1},
- #ms{id=Id2,valid=B2,slots=Slots2}) ->
- Id = if
- Id1 =:= Id2 -> Id1;
- true -> make_ref()
- end,
- #ms{id=Id,valid=B1 band B2,slots=min(Slots1, Slots2)};
-merge_types(T1, T2) when T1 =/= T2 ->
- %% Too different. All we know is that the type is a 'term'.
- term.
+verify_y_init_1(-1, _Vst) ->
+ ok;
+verify_y_init_1(Y, Vst) ->
+ Reg = {y, Y},
+ assert_not_fragile(Reg, Vst),
+ case get_raw_type(Reg, Vst) of
+ uninitialized -> error({uninitialized_reg,Reg});
+ _ -> verify_y_init_1(Y - 1, Vst)
+ end.
-tuple_sz([Sz]) -> Sz;
-tuple_sz(Sz) -> Sz.
+verify_live(0, _Vst) ->
+ ok;
+verify_live(Live, Vst) when is_integer(Live), 0 < Live, Live =< 1023 ->
+ verify_live_1(Live - 1, Vst);
+verify_live(Live, _Vst) ->
+ error({bad_number_of_live_regs,Live}).
-merge_bool([]) -> {atom,[]};
-merge_bool(true) -> bool;
-merge_bool(false) -> bool;
-merge_bool(_) -> {atom,[]}.
-
-merge_aliases(Al0, Al1) when map_size(Al0) =< map_size(Al1) ->
- maps:filter(fun(K, V) ->
- case Al1 of
- #{K:=V} -> true;
- #{} -> false
- end
- end, Al0);
-merge_aliases(Al0, Al1) ->
- merge_aliases(Al1, Al0).
-
-verify_y_init(#vst{current=#st{y=Ys}}) ->
- verify_y_init_1(gb_trees:to_list(Ys)).
-
-verify_y_init_1([]) -> ok;
-verify_y_init_1([{Y,uninitialized}|_]) ->
- error({uninitialized_reg,{y,Y}});
-verify_y_init_1([{Y,{fragile,_}}|_]) ->
- %% Unsafe. This term may be outside any heap belonging
- %% to the process and would be corrupted by a GC.
- error({fragile_message_reference,{y,Y}});
-verify_y_init_1([{_,_}|Ys]) ->
- verify_y_init_1(Ys).
-
-verify_live(0, #vst{}) -> ok;
-verify_live(N, #vst{current=#st{x=Xs}}) ->
- verify_live_1(N, Xs).
-
-verify_live_1(0, _) -> ok;
-verify_live_1(N, Xs) when is_integer(N) ->
- X = N-1,
- case gb_trees:is_defined(X, Xs) of
- false -> error({{x,X},not_live});
- true -> verify_live_1(X, Xs)
- end;
-verify_live_1(N, _) -> error({bad_number_of_live_regs,N}).
+verify_live_1(-1, _) ->
+ ok;
+verify_live_1(X, Vst) when is_integer(X) ->
+ Reg = {x, X},
+ case get_raw_type(Reg, Vst) of
+ uninitialized -> error({Reg, not_live});
+ _ -> verify_live_1(X - 1, Vst)
+ end.
-verify_no_ct(#vst{current=#st{numy=none}}) -> ok;
+verify_no_ct(#vst{current=#st{numy=none}}) ->
+ ok;
verify_no_ct(#vst{current=#st{numy=undecided}}) ->
error(unknown_size_of_stackframe);
-verify_no_ct(#vst{current=#st{y=Ys}}) ->
- case [Y || Y <- gb_trees:to_list(Ys), verify_no_ct_1(Y)] of
- [] -> ok;
- CT -> error({unfinished_catch_try,CT})
+verify_no_ct(#vst{current=St}=Vst) ->
+ case collect_try_catch_tags(St#st.numy - 1, Vst, []) of
+ [_|_]=Bad -> error({unfinished_catch_try,Bad});
+ [] -> ok
end.
-verify_no_ct_1({_, {catchtag, _}}) -> true;
-verify_no_ct_1({_, {trytag, _}}) -> true;
-verify_no_ct_1({_, _}) -> false.
+%% Collects all try/catch tags, walking down from the Nth stack position.
+collect_try_catch_tags(-1, _Vst, Acc) ->
+ Acc;
+collect_try_catch_tags(Y, Vst, Acc0) ->
+ Tag = get_raw_type({y, Y}, Vst),
+ Acc = case is_try_catch_tag(Tag) of
+ true -> [{{y, Y}, Tag} | Acc0];
+ false -> Acc0
+ end,
+ collect_try_catch_tags(Y - 1, Vst, Acc).
+
+is_try_catch_tag({catchtag,_}) -> true;
+is_try_catch_tag({trytag,_}) -> true;
+is_try_catch_tag(_) -> false.
eat_heap(N, #vst{current=#st{h=Heap0}=St}=Vst) ->
case Heap0-N of
@@ -1802,89 +2563,190 @@ eat_heap_float(#vst{current=#st{hf=HeapFloats0}=St}=Vst) ->
Vst#vst{current=St#st{hf=HeapFloats}}
end.
-remove_fragility(#vst{current=#st{x=Xs0,y=Ys0}=St0}=Vst) ->
- F = fun(_, {fragile,Type}) -> Type;
- (_, Type) -> Type
- end,
- Xs = gb_trees:map(F, Xs0),
- Ys = gb_trees:map(F, Ys0),
- St = St0#st{x=Xs,y=Ys},
+%%% FRAGILITY
+%%%
+%%% The loop_rec/2 instruction may return a reference to a term that is not
+%%% part of the root set. That term or any part of it must not be included in a
+%%% garbage collection. Therefore, the term (or any part of it) must not be
+%%% passed to another function, placed in another term, or live in a Y register
+%%% over an instruction that may GC.
+%%%
+%%% Fragility is marked on a per-register (rather than per-value) basis.
+
+%% Marks Reg as fragile.
+mark_fragile(Reg, Vst) ->
+ #vst{current=#st{fragile=Fragile0}=St0} = Vst,
+ Fragile = cerl_sets:add_element(Reg, Fragile0),
+ St = St0#st{fragile=Fragile},
Vst#vst{current=St}.
-propagate_fragility(Type, Ss, Vst) ->
- F = fun(S) ->
- case get_term_type_1(S, Vst) of
- {fragile,_} -> true;
- _ -> false
- end
- end,
- case any(F, Ss) of
- true -> make_fragile(Type);
- false -> Type
+propagate_fragility(Reg, Args, #vst{current=St0}=Vst) ->
+ #st{fragile=Fragile0} = St0,
+
+ Sources = cerl_sets:from_list(Args),
+ Fragile = case cerl_sets:is_disjoint(Sources, Fragile0) of
+ true -> cerl_sets:del_element(Reg, Fragile0);
+ false -> cerl_sets:add_element(Reg, Fragile0)
+ end,
+
+ St = St0#st{fragile=Fragile},
+ Vst#vst{current=St}.
+
+%% Marks Reg as durable, must be used when assigning a newly created value to
+%% a register.
+remove_fragility(Reg, Vst) ->
+ #vst{current=#st{fragile=Fragile0}=St0} = Vst,
+ case cerl_sets:is_element(Reg, Fragile0) of
+ true ->
+ Fragile = cerl_sets:del_element(Reg, Fragile0),
+ St = St0#st{fragile=Fragile},
+ Vst#vst{current=St};
+ false ->
+ Vst
end.
-bif_type('-', Src, Vst) ->
- arith_type(Src, Vst);
-bif_type('+', Src, Vst) ->
- arith_type(Src, Vst);
-bif_type('*', Src, Vst) ->
- arith_type(Src, Vst);
-bif_type(abs, [Num], Vst) ->
+%% Marks all registers as durable.
+remove_fragility(#vst{current=St0}=Vst) ->
+ St = St0#st{fragile=cerl_sets:new()},
+ Vst#vst{current=St}.
+
+assert_durable_term(Src, Vst) ->
+ assert_term(Src, Vst),
+ assert_not_fragile(Src, Vst).
+
+assert_not_fragile({Kind,_}=Src, Vst) when Kind =:= x; Kind =:= y ->
+ check_limit(Src),
+ #vst{current=#st{fragile=Fragile}} = Vst,
+ case cerl_sets:is_element(Src, Fragile) of
+ true -> error({fragile_message_reference, Src});
+ false -> ok
+ end;
+assert_not_fragile(Lit, #vst{}) ->
+ assert_literal(Lit),
+ ok.
+
+%%%
+%%% Return/argument types of BIFs
+%%%
+
+bif_return_type('-', Src, Vst) ->
+ arith_return_type(Src, Vst);
+bif_return_type('+', Src, Vst) ->
+ arith_return_type(Src, Vst);
+bif_return_type('*', Src, Vst) ->
+ arith_return_type(Src, Vst);
+bif_return_type(abs, [Num], Vst) ->
case get_term_type(Num, Vst) of
- {float,_}=T -> T;
- {integer,_}=T -> T;
- _ -> number
+ {float,_}=T -> T;
+ {integer,_}=T -> T;
+ _ -> number
end;
-bif_type(float, _, _) -> {float,[]};
-bif_type('/', _, _) -> {float,[]};
+bif_return_type(float, _, _) -> {float,[]};
+bif_return_type('/', _, _) -> {float,[]};
+%% Binary operations
+bif_return_type('binary_part', [_,_], _) -> binary;
+bif_return_type('binary_part', [_,_,_], _) -> binary;
+bif_return_type('bit_size', [_], _) -> {integer,[]};
+bif_return_type('byte_size', [_], _) -> {integer,[]};
%% Integer operations.
-bif_type(ceil, [_], _) -> {integer,[]};
-bif_type('div', [_,_], _) -> {integer,[]};
-bif_type(floor, [_], _) -> {integer,[]};
-bif_type('rem', [_,_], _) -> {integer,[]};
-bif_type(length, [_], _) -> {integer,[]};
-bif_type(size, [_], _) -> {integer,[]};
-bif_type(trunc, [_], _) -> {integer,[]};
-bif_type(round, [_], _) -> {integer,[]};
-bif_type('band', [_,_], _) -> {integer,[]};
-bif_type('bor', [_,_], _) -> {integer,[]};
-bif_type('bxor', [_,_], _) -> {integer,[]};
-bif_type('bnot', [_], _) -> {integer,[]};
-bif_type('bsl', [_,_], _) -> {integer,[]};
-bif_type('bsr', [_,_], _) -> {integer,[]};
+bif_return_type(ceil, [_], _) -> {integer,[]};
+bif_return_type('div', [_,_], _) -> {integer,[]};
+bif_return_type(floor, [_], _) -> {integer,[]};
+bif_return_type('rem', [_,_], _) -> {integer,[]};
+bif_return_type(length, [_], _) -> {integer,[]};
+bif_return_type(size, [_], _) -> {integer,[]};
+bif_return_type(trunc, [_], _) -> {integer,[]};
+bif_return_type(round, [_], _) -> {integer,[]};
+bif_return_type('band', [_,_], _) -> {integer,[]};
+bif_return_type('bor', [_,_], _) -> {integer,[]};
+bif_return_type('bxor', [_,_], _) -> {integer,[]};
+bif_return_type('bnot', [_], _) -> {integer,[]};
+bif_return_type('bsl', [_,_], _) -> {integer,[]};
+bif_return_type('bsr', [_,_], _) -> {integer,[]};
%% Booleans.
-bif_type('==', [_,_], _) -> bool;
-bif_type('/=', [_,_], _) -> bool;
-bif_type('=<', [_,_], _) -> bool;
-bif_type('<', [_,_], _) -> bool;
-bif_type('>=', [_,_], _) -> bool;
-bif_type('>', [_,_], _) -> bool;
-bif_type('=:=', [_,_], _) -> bool;
-bif_type('=/=', [_,_], _) -> bool;
-bif_type('not', [_], _) -> bool;
-bif_type('and', [_,_], _) -> bool;
-bif_type('or', [_,_], _) -> bool;
-bif_type('xor', [_,_], _) -> bool;
-bif_type(is_atom, [_], _) -> bool;
-bif_type(is_boolean, [_], _) -> bool;
-bif_type(is_binary, [_], _) -> bool;
-bif_type(is_float, [_], _) -> bool;
-bif_type(is_function, [_], _) -> bool;
-bif_type(is_integer, [_], _) -> bool;
-bif_type(is_list, [_], _) -> bool;
-bif_type(is_map, [_], _) -> bool;
-bif_type(is_number, [_], _) -> bool;
-bif_type(is_pid, [_], _) -> bool;
-bif_type(is_port, [_], _) -> bool;
-bif_type(is_reference, [_], _) -> bool;
-bif_type(is_tuple, [_], _) -> bool;
+bif_return_type('==', [_,_], _) -> bool;
+bif_return_type('/=', [_,_], _) -> bool;
+bif_return_type('=<', [_,_], _) -> bool;
+bif_return_type('<', [_,_], _) -> bool;
+bif_return_type('>=', [_,_], _) -> bool;
+bif_return_type('>', [_,_], _) -> bool;
+bif_return_type('=:=', [_,_], _) -> bool;
+bif_return_type('=/=', [_,_], _) -> bool;
+bif_return_type('not', [_], _) -> bool;
+bif_return_type('and', [_,_], _) -> bool;
+bif_return_type('or', [_,_], _) -> bool;
+bif_return_type('xor', [_,_], _) -> bool;
+bif_return_type(is_atom, [_], _) -> bool;
+bif_return_type(is_boolean, [_], _) -> bool;
+bif_return_type(is_binary, [_], _) -> bool;
+bif_return_type(is_float, [_], _) -> bool;
+bif_return_type(is_function, [_], _) -> bool;
+bif_return_type(is_function, [_,_], _) -> bool;
+bif_return_type(is_integer, [_], _) -> bool;
+bif_return_type(is_list, [_], _) -> bool;
+bif_return_type(is_map, [_], _) -> bool;
+bif_return_type(is_number, [_], _) -> bool;
+bif_return_type(is_pid, [_], _) -> bool;
+bif_return_type(is_port, [_], _) -> bool;
+bif_return_type(is_reference, [_], _) -> bool;
+bif_return_type(is_tuple, [_], _) -> bool;
%% Misc.
-bif_type(node, [], _) -> {atom,[]};
-bif_type(node, [_], _) -> {atom,[]};
-bif_type(hd, [_], _) -> term;
-bif_type(tl, [_], _) -> term;
-bif_type(get, [_], _) -> term;
-bif_type(Bif, _, _) when is_atom(Bif) -> term.
+bif_return_type(tuple_size, [_], _) -> {integer,[]};
+bif_return_type(map_size, [_], _) -> {integer,[]};
+bif_return_type(node, [], _) -> {atom,[]};
+bif_return_type(node, [_], _) -> {atom,[]};
+bif_return_type(hd, [_], _) -> term;
+bif_return_type(tl, [_], _) -> term;
+bif_return_type(get, [_], _) -> term;
+bif_return_type(Bif, _, _) when is_atom(Bif) -> term.
+
+%% Generic
+bif_arg_types(tuple_size, [_]) -> [{tuple,[0],#{}}];
+bif_arg_types(map_size, [_]) -> [map];
+bif_arg_types(is_map_key, [_,_]) -> [term, map];
+bif_arg_types(map_get, [_,_]) -> [term, map];
+bif_arg_types(length, [_]) -> [list];
+bif_arg_types(hd, [_]) -> [cons];
+bif_arg_types(tl, [_]) -> [cons];
+%% Boolean
+bif_arg_types('not', [_]) -> [bool];
+bif_arg_types('and', [_,_]) -> [bool, bool];
+bif_arg_types('or', [_,_]) -> [bool, bool];
+bif_arg_types('xor', [_,_]) -> [bool, bool];
+%% Binary
+bif_arg_types('binary_part', [_,_]) ->
+ PosLen = {tuple, 2, #{ {integer,1} => {integer,[]},
+ {integer,2} => {integer,[]} }},
+ [binary, PosLen];
+bif_arg_types('binary_part', [_,_,_]) ->
+ [binary, {integer,[]}, {integer,[]}];
+bif_arg_types('bit_size', [_]) -> [binary];
+bif_arg_types('byte_size', [_]) -> [binary];
+%% Numerical
+bif_arg_types('-', [_]) -> [number];
+bif_arg_types('-', [_,_]) -> [number,number];
+bif_arg_types('+', [_]) -> [number];
+bif_arg_types('+', [_,_]) -> [number,number];
+bif_arg_types('*', [_,_]) -> [number, number];
+bif_arg_types('/', [_,_]) -> [number, number];
+bif_arg_types(abs, [_]) -> [number];
+bif_arg_types(ceil, [_]) -> [number];
+bif_arg_types(float, [_]) -> [number];
+bif_arg_types(floor, [_]) -> [number];
+bif_arg_types(trunc, [_]) -> [number];
+bif_arg_types(round, [_]) -> [number];
+%% Integer-specific
+bif_arg_types('div', [_,_]) -> [{integer,[]}, {integer,[]}];
+bif_arg_types('rem', [_,_]) -> [{integer,[]}, {integer,[]}];
+bif_arg_types('band', [_,_]) -> [{integer,[]}, {integer,[]}];
+bif_arg_types('bor', [_,_]) -> [{integer,[]}, {integer,[]}];
+bif_arg_types('bxor', [_,_]) -> [{integer,[]}, {integer,[]}];
+bif_arg_types('bnot', [_]) -> [{integer,[]}];
+bif_arg_types('bsl', [_,_]) -> [{integer,[]}, {integer,[]}];
+bif_arg_types('bsr', [_,_]) -> [{integer,[]}, {integer,[]}];
+%% Unsafe type tests that may fail if an argument doesn't have the right type.
+bif_arg_types(is_function, [_,_]) -> [term, {integer,[]}];
+bif_arg_types(_, Args) -> [term || _Arg <- Args].
is_bif_safe('/=', 2) -> true;
is_bif_safe('<', 2) -> true;
@@ -1913,86 +2775,200 @@ is_bif_safe(self, 0) -> true;
is_bif_safe(node, 0) -> true;
is_bif_safe(_, _) -> false.
-arith_type([A,B], Vst) ->
- case {get_term_type(A, Vst),get_term_type(B, Vst)} of
+arith_return_type([A], Vst) ->
+ %% Unary '+' or '-'.
+ case get_term_type(A, Vst) of
+ {integer,_} -> {integer,[]};
+ {float,_} -> {float,[]};
+ _ -> number
+ end;
+arith_return_type([A,B], Vst) ->
+ TypeA = get_term_type(A, Vst),
+ TypeB = get_term_type(B, Vst),
+ case {TypeA, TypeB} of
+ {{integer,_},{integer,_}} -> {integer,[]};
{{float,_},_} -> {float,[]};
{_,{float,_}} -> {float,[]};
{_,_} -> number
end;
-arith_type(_, _) -> number.
+arith_return_type(_, _) -> number.
+
+%%%
+%%% Return/argument types of calls
+%%%
-return_type({extfunc,M,F,A}, Vst) -> return_type_1(M, F, A, Vst);
-return_type(_, _) -> term.
+call_return_type({extfunc,M,F,A}, Vst) -> call_return_type_1(M, F, A, Vst);
+call_return_type(_, _) -> term.
-return_type_1(erlang, setelement, 3, Vst) ->
- Tuple = {x,1},
+call_return_type_1(erlang, setelement, 3, Vst) ->
+ IndexType = get_term_type({x,0}, Vst),
TupleType =
- case get_term_type(Tuple, Vst) of
- {tuple,_}=TT ->
- TT;
- {literal,Lit} when is_tuple(Lit) ->
- {tuple,tuple_size(Lit)};
- _ ->
- {tuple,[0]}
- end,
- case get_term_type({x,0}, Vst) of
- {integer,[]} -> TupleType;
- {integer,I} -> upgrade_tuple_type({tuple,[I]}, TupleType);
- _ -> TupleType
+ case get_term_type({x,1}, Vst) of
+ {literal,Tuple}=Lit when is_tuple(Tuple) -> get_literal_type(Lit);
+ {tuple,_,_}=TT -> TT;
+ _ -> {tuple,[0],#{}}
+ end,
+ case IndexType of
+ {integer,I} when is_integer(I) ->
+ case meet({tuple,[I],#{}}, TupleType) of
+ {tuple, Sz, Es0} ->
+ ValueType = get_term_type({x,2}, Vst),
+ Es = set_element_type({integer,I}, ValueType, Es0),
+ {tuple, Sz, Es};
+ none ->
+ TupleType
+ end;
+ _ ->
+ %% The index could point anywhere, so we must discard all element
+ %% information.
+ setelement(3, TupleType, #{})
+ end;
+call_return_type_1(erlang, '++', 2, Vst) ->
+ case get_term_type({x,0}, Vst) =:= cons orelse
+ get_term_type({x,1}, Vst) =:= cons of
+ true -> cons;
+ false -> list
end;
-return_type_1(erlang, F, A, _) ->
- return_type_erl(F, A);
-return_type_1(math, F, A, _) ->
- return_type_math(F, A);
-return_type_1(M, F, A, _) when is_atom(M), is_atom(F), is_integer(A), A >= 0 ->
+call_return_type_1(erlang, '--', 2, _Vst) ->
+ list;
+call_return_type_1(erlang, F, A, _) ->
+ erlang_mod_return_type(F, A);
+call_return_type_1(lists, F, A, Vst) ->
+ lists_mod_return_type(F, A, Vst);
+call_return_type_1(math, F, A, _) ->
+ math_mod_return_type(F, A);
+call_return_type_1(M, F, A, _) when is_atom(M), is_atom(F), is_integer(A), A >= 0 ->
term.
-return_type_erl(exit, 1) -> exception;
-return_type_erl(throw, 1) -> exception;
-return_type_erl(error, 1) -> exception;
-return_type_erl(error, 2) -> exception;
-return_type_erl(F, A) when is_atom(F), is_integer(A), A >= 0 -> term.
-
-return_type_math(cos, 1) -> {float,[]};
-return_type_math(cosh, 1) -> {float,[]};
-return_type_math(sin, 1) -> {float,[]};
-return_type_math(sinh, 1) -> {float,[]};
-return_type_math(tan, 1) -> {float,[]};
-return_type_math(tanh, 1) -> {float,[]};
-return_type_math(acos, 1) -> {float,[]};
-return_type_math(acosh, 1) -> {float,[]};
-return_type_math(asin, 1) -> {float,[]};
-return_type_math(asinh, 1) -> {float,[]};
-return_type_math(atan, 1) -> {float,[]};
-return_type_math(atanh, 1) -> {float,[]};
-return_type_math(erf, 1) -> {float,[]};
-return_type_math(erfc, 1) -> {float,[]};
-return_type_math(exp, 1) -> {float,[]};
-return_type_math(log, 1) -> {float,[]};
-return_type_math(log2, 1) -> {float,[]};
-return_type_math(log10, 1) -> {float,[]};
-return_type_math(sqrt, 1) -> {float,[]};
-return_type_math(atan2, 2) -> {float,[]};
-return_type_math(pow, 2) -> {float,[]};
-return_type_math(ceil, 1) -> {float,[]};
-return_type_math(floor, 1) -> {float,[]};
-return_type_math(fmod, 2) -> {float,[]};
-return_type_math(pi, 0) -> {float,[]};
-return_type_math(F, A) when is_atom(F), is_integer(A), A >= 0 -> term.
-
-check_limit({x,X}) when is_integer(X), X < 1023 ->
- %% Note: x(1023) is reserved for use by the BEAM loader.
- ok;
-check_limit({y,Y}) when is_integer(Y), Y < 1024 ->
- ok;
-check_limit({fr,Fr}) when is_integer(Fr), Fr < 1024 ->
- ok;
-check_limit(_) ->
- error(limit).
+erlang_mod_return_type(exit, 1) -> exception;
+erlang_mod_return_type(throw, 1) -> exception;
+erlang_mod_return_type(error, 1) -> exception;
+erlang_mod_return_type(error, 2) -> exception;
+erlang_mod_return_type(F, A) when is_atom(F), is_integer(A), A >= 0 -> term.
+
+math_mod_return_type(cos, 1) -> {float,[]};
+math_mod_return_type(cosh, 1) -> {float,[]};
+math_mod_return_type(sin, 1) -> {float,[]};
+math_mod_return_type(sinh, 1) -> {float,[]};
+math_mod_return_type(tan, 1) -> {float,[]};
+math_mod_return_type(tanh, 1) -> {float,[]};
+math_mod_return_type(acos, 1) -> {float,[]};
+math_mod_return_type(acosh, 1) -> {float,[]};
+math_mod_return_type(asin, 1) -> {float,[]};
+math_mod_return_type(asinh, 1) -> {float,[]};
+math_mod_return_type(atan, 1) -> {float,[]};
+math_mod_return_type(atanh, 1) -> {float,[]};
+math_mod_return_type(erf, 1) -> {float,[]};
+math_mod_return_type(erfc, 1) -> {float,[]};
+math_mod_return_type(exp, 1) -> {float,[]};
+math_mod_return_type(log, 1) -> {float,[]};
+math_mod_return_type(log2, 1) -> {float,[]};
+math_mod_return_type(log10, 1) -> {float,[]};
+math_mod_return_type(sqrt, 1) -> {float,[]};
+math_mod_return_type(atan2, 2) -> {float,[]};
+math_mod_return_type(pow, 2) -> {float,[]};
+math_mod_return_type(ceil, 1) -> {float,[]};
+math_mod_return_type(floor, 1) -> {float,[]};
+math_mod_return_type(fmod, 2) -> {float,[]};
+math_mod_return_type(pi, 0) -> {float,[]};
+math_mod_return_type(F, A) when is_atom(F), is_integer(A), A >= 0 -> term.
+
+lists_mod_return_type(all, 2, _Vst) ->
+ bool;
+lists_mod_return_type(any, 2, _Vst) ->
+ bool;
+lists_mod_return_type(keymember, 3, _Vst) ->
+ bool;
+lists_mod_return_type(member, 2, _Vst) ->
+ bool;
+lists_mod_return_type(prefix, 2, _Vst) ->
+ bool;
+lists_mod_return_type(suffix, 2, _Vst) ->
+ bool;
+lists_mod_return_type(dropwhile, 2, _Vst) ->
+ list;
+lists_mod_return_type(duplicate, 2, _Vst) ->
+ list;
+lists_mod_return_type(filter, 2, _Vst) ->
+ list;
+lists_mod_return_type(flatten, 1, _Vst) ->
+ list;
+lists_mod_return_type(flatten, 2, _Vst) ->
+ list;
+lists_mod_return_type(map, 2, Vst) ->
+ same_length_type({x,1}, Vst);
+lists_mod_return_type(MF, 3, Vst) when MF =:= mapfoldl; MF =:= mapfoldr ->
+ ListType = same_length_type({x,2}, Vst),
+ {tuple,2,#{ {integer,1} => ListType} };
+lists_mod_return_type(partition, 2, _Vst) ->
+ two_tuple(list, list);
+lists_mod_return_type(reverse, 1, Vst) ->
+ same_length_type({x,0}, Vst);
+lists_mod_return_type(seq, 2, _Vst) ->
+ list;
+lists_mod_return_type(seq, 3, _Vst) ->
+ list;
+lists_mod_return_type(sort, 1, Vst) ->
+ same_length_type({x,0}, Vst);
+lists_mod_return_type(sort, 2, Vst) ->
+ same_length_type({x,1}, Vst);
+lists_mod_return_type(splitwith, 2, _Vst) ->
+ two_tuple(list, list);
+lists_mod_return_type(takewhile, 2, _Vst) ->
+ list;
+lists_mod_return_type(unzip, 1, Vst) ->
+ ListType = same_length_type({x,0}, Vst),
+ two_tuple(ListType, ListType);
+lists_mod_return_type(usort, 1, Vst) ->
+ same_length_type({x,0}, Vst);
+lists_mod_return_type(usort, 2, Vst) ->
+ same_length_type({x,1}, Vst);
+lists_mod_return_type(zip, 2, _Vst) ->
+ list;
+lists_mod_return_type(zip3, 3, _Vst) ->
+ list;
+lists_mod_return_type(zipwith, 3, _Vst) ->
+ list;
+lists_mod_return_type(zipwith3, 4, _Vst) ->
+ list;
+lists_mod_return_type(_, _, _) ->
+ term.
+
+two_tuple(Type1, Type2) ->
+ {tuple,2,#{ {integer,1} => Type1,
+ {integer,2} => Type2 }}.
+
+same_length_type(Reg, Vst) ->
+ case get_term_type(Reg, Vst) of
+ {literal,[_|_]} -> cons;
+ cons -> cons;
+ nil -> nil;
+ _ -> list
+ end.
+
+check_limit({x,X}=Src) when is_integer(X) ->
+ if
+ %% Note: x(1023) is reserved for use by the BEAM loader.
+ 0 =< X, X < 1023 -> ok;
+ 1023 =< X -> error(limit);
+ X < 0 -> error({bad_register, Src})
+ end;
+check_limit({y,Y}=Src) when is_integer(Y) ->
+ if
+ 0 =< Y, Y < 1024 -> ok;
+ 1024 =< Y -> error(limit);
+ Y < 0 -> error({bad_register, Src})
+ end;
+check_limit({fr,Fr}=Src) when is_integer(Fr) ->
+ if
+ 0 =< Fr, Fr < 1023 -> ok;
+ 1023 =< Fr -> error(limit);
+ Fr < 0 -> error({bad_register, Src})
+ end.
min(A, B) when is_integer(A), is_integer(B), A < B -> A;
min(A, B) when is_integer(A), is_integer(B) -> B.
-gb_trees_from_list(L) -> gb_trees:from_orddict(lists:sort(L)).
+gb_trees_from_list(L) -> gb_trees:from_orddict(sort(L)).
error(Error) -> throw(Error).
diff --git a/lib/compiler/src/beam_z.erl b/lib/compiler/src/beam_z.erl
index 677094b3cd..415b579240 100644
--- a/lib/compiler/src/beam_z.erl
+++ b/lib/compiler/src/beam_z.erl
@@ -71,6 +71,31 @@ undo_renames([{get_hd,Src,Dst1},{get_tl,Src,Dst2}|Is]) ->
[{get_list,Src,Dst1,Dst2}|undo_renames(Is)];
undo_renames([{get_tl,Src,Dst2},{get_hd,Src,Dst1}|Is]) ->
[{get_list,Src,Dst1,Dst2}|undo_renames(Is)];
+undo_renames([{bs_put,_,{bs_put_binary,1,_},
+ [{atom,all},{literal,<<>>}]}|Is]) ->
+ undo_renames(Is);
+undo_renames([{bs_put,Fail,{bs_put_binary,1,_Flags},
+ [{atom,all},{literal,BinString}]}|Is0]) ->
+ Bits = bit_size(BinString),
+ Bytes = Bits div 8,
+ case Bits rem 8 of
+ 0 ->
+ I = {bs_put_string,byte_size(BinString),
+ {string,BinString}},
+ [undo_rename(I)|undo_renames(Is0)];
+ Rem ->
+ <<Binary:Bytes/bytes,Int:Rem>> = BinString,
+ PutInt = {bs_put_integer,Fail,{integer,Rem},1,
+ {field_flags,[unsigned,big]},{integer,Int}},
+ Is = [PutInt|undo_renames(Is0)],
+ case Binary of
+ <<>> ->
+ Is;
+ _ ->
+ [{bs_put_string,byte_size(Binary),
+ {string,Binary}}|Is]
+ end
+ end;
undo_renames([I|Is]) ->
[undo_rename(I)|undo_renames(Is)];
undo_renames([]) -> [].
@@ -79,8 +104,6 @@ undo_rename({bs_put,F,{I,U,Fl},[Sz,Src]}) ->
{I,F,Sz,U,Fl,Src};
undo_rename({bs_put,F,{I,Fl},[Src]}) ->
{I,F,Fl,Src};
-undo_rename({bs_put,{f,0},{bs_put_string,_,_}=I,[]}) ->
- I;
undo_rename({bif,bs_add=I,F,[Src1,Src2,{integer,U}],Dst}) ->
{I,F,[Src1,Src2,U],Dst};
undo_rename({bif,bs_utf8_size=I,F,[Src],Dst}) ->
@@ -101,7 +124,7 @@ undo_rename({test,bs_match_string=Op,F,[Ctx,Bin0]}) ->
0 -> Bin0;
Rem -> <<Bin0/bitstring,0:(8-Rem)>>
end,
- {test,Op,F,[Ctx,Bits,{string,binary_to_list(Bin)}]};
+ {test,Op,F,[Ctx,Bits,{string,Bin}]};
undo_rename({put_map,Fail,assoc,S,D,R,L}) ->
{put_map_assoc,Fail,S,D,R,L};
undo_rename({put_map,Fail,exact,S,D,R,L}) ->
diff --git a/lib/compiler/src/cerl_sets.erl b/lib/compiler/src/cerl_sets.erl
index 0361186713..f489baf238 100644
--- a/lib/compiler/src/cerl_sets.erl
+++ b/lib/compiler/src/cerl_sets.erl
@@ -204,4 +204,4 @@ fold(F, Init, D) ->
Set2 :: set(Element).
filter(F, D) ->
- maps:from_list(lists:filter(fun({K,_}) -> F(K) end, maps:to_list(D))).
+ maps:filter(fun(K,_) -> F(K) end, D).
diff --git a/lib/compiler/src/compile.erl b/lib/compiler/src/compile.erl
index 3a835cfb2f..11dea9524b 100644
--- a/lib/compiler/src/compile.erl
+++ b/lib/compiler/src/compile.erl
@@ -210,8 +210,11 @@ do_compile(Input, Opts0) ->
{error,Reason}
end
end,
- %% Dialyzer has already spawned workers.
- case lists:member(dialyzer, Opts) of
+ %% Some tools, like Dialyzer, has already spawned workers
+ %% and spawning extra workers actually slow the compilation
+ %% down instead of speeding it up, so we provide a mechanism
+ %% to bypass the compiler process.
+ case lists:member(no_spawn_compiler_process, Opts) of
true ->
IntFun();
false ->
@@ -248,6 +251,9 @@ expand_opt(report, Os) ->
[report_errors,report_warnings|Os];
expand_opt(return, Os) ->
[return_errors,return_warnings|Os];
+expand_opt(no_bsm3, Os) ->
+ %% The new bsm pass requires bsm3 instructions.
+ [no_bsm3,no_bsm_opt|Os];
expand_opt(r16, Os) ->
expand_opt_before_21(Os);
expand_opt(r17, Os) ->
@@ -259,13 +265,18 @@ expand_opt(r19, Os) ->
expand_opt(r20, Os) ->
expand_opt_before_21(Os);
expand_opt(r21, Os) ->
- Os;
+ [no_put_tuple2 | expand_opt(no_bsm3, Os)];
expand_opt({debug_info_key,_}=O, Os) ->
[encrypt_debug_info,O|Os];
+expand_opt(no_type_opt, Os) ->
+ [no_ssa_opt_type_start,
+ no_ssa_opt_type_continue,
+ no_ssa_opt_type_finish | Os];
expand_opt(O, Os) -> [O|Os].
expand_opt_before_21(Os) ->
- [no_get_hd_tl,no_ssa_opt_record,no_utf8_atoms|Os].
+ [no_put_tuple2, no_get_hd_tl, no_ssa_opt_record,
+ no_utf8_atoms | expand_opt(no_bsm3, Os)].
%% format_error(ErrorDescriptor) -> string()
@@ -803,8 +814,6 @@ kernel_passes() ->
%% Optimizations that must be done after all other optimizations.
[{pass,sys_core_bsm},
{iff,dcbsm,{listing,"core_bsm"}},
- {pass,sys_core_dsetel},
- {iff,dsetel,{listing,"dsetel"}},
{iff,clint,?pass(core_lint_module)},
{iff,core,?pass(save_core_code)},
@@ -816,11 +825,21 @@ kernel_passes() ->
{pass,beam_kernel_to_ssa},
{iff,dssa,{listing,"ssa"}},
{iff,ssalint,{pass,beam_ssa_lint}},
- {unless,no_ssa_opt,{pass,beam_ssa_opt}},
- {iff,dssaopt,{listing,"ssaopt"}},
- {iff,ssalint,{pass,beam_ssa_lint}},
- {unless,no_recv_opt,{pass,beam_ssa_recv}},
- {iff,drecv,{listing,"recv"}},
+ {delay,
+ [{unless,no_share_opt,{pass,beam_ssa_share}},
+ {iff,dssashare,{listing,"ssashare"}},
+ {iff,ssalint,{pass,beam_ssa_lint}},
+ {unless,no_bsm_opt,{pass,beam_ssa_bsm}},
+ {iff,dssabsm,{listing,"ssabsm"}},
+ {iff,ssalint,{pass,beam_ssa_lint}},
+ {unless,no_fun_opt,{pass,beam_ssa_funs}},
+ {iff,dssafuns,{listing,"ssafuns"}},
+ {iff,ssalint,{pass,beam_ssa_lint}},
+ {unless,no_ssa_opt,{pass,beam_ssa_opt}},
+ {iff,dssaopt,{listing,"ssaopt"}},
+ {iff,ssalint,{pass,beam_ssa_lint}},
+ {unless,no_recv_opt,{pass,beam_ssa_recv}},
+ {iff,drecv,{listing,"recv"}}]},
{pass,beam_ssa_pre_codegen},
{iff,dprecg,{listing,"precodegen"}},
{iff,ssalint,{pass,beam_ssa_lint}},
@@ -839,20 +858,12 @@ asm_passes() ->
{iff,dblk,{listing,"block"}},
{unless,no_except,{pass,beam_except}},
{iff,dexcept,{listing,"except"}},
- {unless,no_bs_opt,{pass,beam_bs}},
- {iff,dbs,{listing,"bs"}},
- {pass,beam_split},
- {iff,dsplit,{listing,"split"}},
- {unless,no_dead,{pass,beam_dead}},
- {iff,ddead,{listing,"dead"}},
{unless,no_jopt,{pass,beam_jump}},
{iff,djmp,{listing,"jump"}},
{unless,no_peep_opt,{pass,beam_peep}},
{iff,dpeep,{listing,"peep"}},
{pass,beam_clean},
{iff,dclean,{listing,"clean"}},
- {unless,no_bsm_opt,{pass,beam_bsm}},
- {iff,dbsm,{listing,"bsm"}},
{unless,no_stack_trimming,{pass,beam_trim}},
{iff,dtrim,{listing,"trim"}},
{pass,beam_flatten}]},
@@ -861,7 +872,9 @@ asm_passes() ->
%% need to do a few clean-ups to code.
{iff,no_postopt,[{pass,beam_clean}]},
+ {iff,diffable,?pass(diffable)},
{pass,beam_z},
+ {iff,diffable,{listing,"S"}},
{iff,dz,{listing,"z"}},
{iff,dopt,{listing,"optimize"}},
{iff,'S',{listing,"S"}},
@@ -1005,11 +1018,17 @@ parse_module(_Code, St0) ->
end.
do_parse_module(DefEncoding, #compile{ifile=File,options=Opts,dir=Dir}=St) ->
+ SourceName0 = proplists:get_value(source, Opts, File),
+ SourceName = case member(deterministic, Opts) of
+ true -> filename:basename(SourceName0);
+ false -> SourceName0
+ end,
R = epp:parse_file(File,
- [{includes,[".",Dir|inc_paths(Opts)]},
- {macros,pre_defs(Opts)},
- {default_encoding,DefEncoding},
- extra]),
+ [{includes,[".",Dir|inc_paths(Opts)]},
+ {source_name, SourceName},
+ {macros,pre_defs(Opts)},
+ {default_encoding,DefEncoding},
+ extra]),
case R of
{ok,Forms,Extra} ->
Encoding = proplists:get_value(encoding, Extra),
@@ -1673,7 +1692,6 @@ effects_code_generation(Option) ->
binary -> false;
verbose -> false;
{cwd,_} -> false;
- {i,_} -> false;
{outdir, _} -> false;
_ -> true
end.
@@ -1914,6 +1932,39 @@ restore_expand_module([F|Fs]) ->
[F|restore_expand_module(Fs)];
restore_expand_module([]) -> [].
+%%%
+%%% Transform the BEAM code to make it more friendly for
+%%% diffing: using function names instead of labels for
+%%% local calls and number labels relative to each function.
+%%%
+
+diffable(Code0, St) ->
+ {Mod,Exp,Attr,Fs0,NumLabels} = Code0,
+ EntryLabels0 = [{Entry,{Name,Arity}} ||
+ {function,Name,Arity,Entry,_} <- Fs0],
+ EntryLabels = maps:from_list(EntryLabels0),
+ Fs = [diffable_fix_function(F, EntryLabels) || F <- Fs0],
+ Code = {Mod,Exp,Attr,Fs,NumLabels},
+ {ok,Code,St}.
+
+diffable_fix_function({function,Name,Arity,Entry0,Is0}, LabelMap0) ->
+ Entry = maps:get(Entry0, LabelMap0),
+ {Is1,LabelMap} = diffable_label_map(Is0, 1, LabelMap0, []),
+ Fb = fun(Old) -> error({no_fb,Old}) end,
+ Is = beam_utils:replace_labels(Is1, [], LabelMap, Fb),
+ {function,Name,Arity,Entry,Is}.
+
+diffable_label_map([{label,Old}|Is], New, Map, Acc) ->
+ case Map of
+ #{Old:=NewLabel} ->
+ diffable_label_map(Is, New, Map, [{label,NewLabel}|Acc]);
+ #{} ->
+ diffable_label_map(Is, New+1, Map#{Old=>New}, [{label,New}|Acc])
+ end;
+diffable_label_map([I|Is], New, Map, Acc) ->
+ diffable_label_map(Is, New, Map, [I|Acc]);
+diffable_label_map([], _New, Map, Acc) ->
+ {Acc,Map}.
-spec options() -> 'ok'.
@@ -2034,10 +2085,7 @@ pre_load() ->
L = [beam_a,
beam_asm,
beam_block,
- beam_bs,
- beam_bsm,
beam_clean,
- beam_dead,
beam_dict,
beam_except,
beam_flatten,
@@ -2046,12 +2094,15 @@ pre_load() ->
beam_opcodes,
beam_peep,
beam_ssa,
+ beam_ssa_bsm,
beam_ssa_codegen,
+ beam_ssa_dead,
+ beam_ssa_funs,
beam_ssa_opt,
beam_ssa_pre_codegen,
beam_ssa_recv,
+ beam_ssa_share,
beam_ssa_type,
- beam_split,
beam_trim,
beam_utils,
beam_validator,
@@ -2069,7 +2120,6 @@ pre_load() ->
erl_scan,
sys_core_alias,
sys_core_bsm,
- sys_core_dsetel,
sys_core_fold,
v3_core,
v3_kernel],
diff --git a/lib/compiler/src/compiler.app.src b/lib/compiler/src/compiler.app.src
index 74529f7fef..a086a3a8d3 100644
--- a/lib/compiler/src/compiler.app.src
+++ b/lib/compiler/src/compiler.app.src
@@ -24,10 +24,7 @@
beam_a,
beam_asm,
beam_block,
- beam_bs,
- beam_bsm,
beam_clean,
- beam_dead,
beam_dict,
beam_disasm,
beam_except,
@@ -38,14 +35,17 @@
beam_opcodes,
beam_peep,
beam_ssa,
+ beam_ssa_bsm,
beam_ssa_codegen,
+ beam_ssa_dead,
+ beam_ssa_funs,
beam_ssa_lint,
beam_ssa_opt,
beam_ssa_pp,
beam_ssa_pre_codegen,
beam_ssa_recv,
+ beam_ssa_share,
beam_ssa_type,
- beam_split,
beam_trim,
beam_utils,
beam_validator,
@@ -65,7 +65,6 @@
rec_env,
sys_core_alias,
sys_core_bsm,
- sys_core_dsetel,
sys_core_fold,
sys_core_fold_lists,
sys_core_inline,
diff --git a/lib/compiler/src/erl_bifs.erl b/lib/compiler/src/erl_bifs.erl
index 71ab0e872a..94a5dfe012 100644
--- a/lib/compiler/src/erl_bifs.erl
+++ b/lib/compiler/src/erl_bifs.erl
@@ -32,6 +32,22 @@
%% Returns `true' if the function `Module:Name/Arity' does not
%% affect the state, nor depend on the state, although its
%% evaluation is not guaranteed to complete normally for all input.
+%%
+%% NOTE: There is no need to include every new pure BIF
+%% here. Including it here means that the value of the function
+%% will be evaluated at compile-time if the arguments are
+%% constant. If that optimization is not useful/desired, there is
+%% no need to include the new BIF here.
+%%
+%% Functions whose return value could conceivably change in a
+%% future version of the runtime system must NOT be included here.
+%%
+%% Here are some example of functions that should not be
+%% included: `term_to_binary/1', hashing functions, non-trivial
+%% encode/decode functions.
+%%
+%% When unsure whether a new BIF should be included here, the
+%% conservative safe choice is NOT to include it.
-spec is_pure(atom(), atom(), arity()) -> boolean().
@@ -108,6 +124,7 @@ is_pure(erlang, list_to_atom, 1) -> true;
is_pure(erlang, list_to_binary, 1) -> true;
is_pure(erlang, list_to_float, 1) -> true;
is_pure(erlang, list_to_integer, 1) -> true;
+is_pure(erlang, list_to_integer, 2) -> true;
is_pure(erlang, list_to_pid, 1) -> true;
is_pure(erlang, list_to_tuple, 1) -> true;
is_pure(erlang, max, 2) -> true;
@@ -194,6 +211,7 @@ is_safe(erlang, is_float, 1) -> true;
is_safe(erlang, is_function, 1) -> true;
is_safe(erlang, is_integer, 1) -> true;
is_safe(erlang, is_list, 1) -> true;
+is_safe(erlang, is_map, 1) -> true;
is_safe(erlang, is_number, 1) -> true;
is_safe(erlang, is_pid, 1) -> true;
is_safe(erlang, is_port, 1) -> true;
diff --git a/lib/compiler/src/genop.tab b/lib/compiler/src/genop.tab
index f35ae09fe7..86590fad87 100755
--- a/lib/compiler/src/genop.tab
+++ b/lib/compiler/src/genop.tab
@@ -573,3 +573,26 @@ BEAM_FORMAT_NUMBER=0
## @doc Get the tail (or cdr) part of a list (a cons cell) from Source and
## put it into the register Tail.
163: get_tl/2
+
+# OTP 22
+
+## @spec put_tuple2 Destination Elements
+## @doc Build a tuple with the elements in the list Elements and put it
+## put into register Destination.
+164: put_tuple2/2
+
+## @spec bs_get_tail Ctx Dst Live
+## @doc Sets Dst to the tail of Ctx at the current position
+165: bs_get_tail/3
+
+## @spec bs_start_match3 Fail Bin Live Dst
+## @doc Starts a binary match sequence
+166: bs_start_match3/4
+
+## @spec bs_get_position Ctx Dst Live
+## @doc Sets Dst to the current position of Ctx
+167: bs_get_position/3
+
+## @spec bs_set_positon Ctx Pos
+## @doc Sets the current position of Ctx to Pos
+168: bs_set_position/2
diff --git a/lib/compiler/src/sys_core_bsm.erl b/lib/compiler/src/sys_core_bsm.erl
index d7b26c3a56..685e807e65 100644
--- a/lib/compiler/src/sys_core_bsm.erl
+++ b/lib/compiler/src/sys_core_bsm.erl
@@ -24,161 +24,52 @@
-export([module/2,format_error/1]).
-include("core_parse.hrl").
--import(lists, [member/2,reverse/1,usort/1]).
-spec module(cerl:c_module(), [compile:option()]) -> {'ok', cerl:c_module()}.
-module(#c_module{defs=Ds0}=Mod, Opts) ->
- {Ds,Ws0} = function(Ds0, [], []),
- case member(bin_opt_info, Opts) of
- false ->
- {ok,Mod#c_module{defs=Ds}};
- true ->
- Ws1 = [make_warning(Where, What) || {Where,What} <- Ws0],
- Ws = usort(Ws1),
- {ok,Mod#c_module{defs=Ds},Ws}
- end.
+module(#c_module{defs=Ds}=Mod, _Opts) ->
+ {ok,Mod#c_module{defs=function(Ds)}}.
-function([{#c_var{name={F,Arity}}=Name,B0}|Fs], FsAcc, Ws0) ->
- try cerl_trees:mapfold(fun bsm_an/2, Ws0, B0) of
- {B,Ws} ->
- function(Fs, [{Name,B}|FsAcc], Ws)
+function([{#c_var{name={F,Arity}}=Name,B0}|Fs]) ->
+ try cerl_trees:map(fun bsm_reorder/1, B0) of
+ B -> [{Name,B} | function(Fs)]
catch
Class:Error:Stack ->
- io:fwrite("Function: ~w/~w\n", [F,Arity]),
- erlang:raise(Class, Error, Stack)
+ io:fwrite("Function: ~w/~w\n", [F,Arity]),
+ erlang:raise(Class, Error, Stack)
end;
-function([], Fs, Ws) ->
- {reverse(Fs),Ws}.
+function([]) ->
+ [].
-type error() :: atom().
-spec format_error(error()) -> nonempty_string().
-format_error(bin_opt_alias) ->
- "INFO: the '=' operator will prevent delayed sub binary optimization";
-format_error(bin_partition) ->
- "INFO: matching non-variables after a previous clause matching a variable "
- "will prevent delayed sub binary optimization";
-format_error(bin_var_used) ->
- "INFO: using a matched out sub binary will prevent "
- "delayed sub binary optimization";
-format_error(orig_bin_var_used_in_guard) ->
- "INFO: using the original binary variable in a guard will prevent "
- "delayed sub binary optimization";
-format_error(bin_var_used_in_guard) ->
- "INFO: using a matched out sub binary in a guard will prevent "
- "delayed sub binary optimization".
-
+format_error(_) -> error(badarg).
-%%%
-%%% Annotate bit syntax matching to faciliate optimization in further passes.
-%%%
+%%% Reorder bit syntax matching to faciliate optimization in further passes.
-bsm_an(Core0, Ws0) ->
- case bsm_an(Core0) of
- {ok,Core} ->
- {Core,Ws0};
- {ok,Core,W} ->
- {Core,[W|Ws0]}
- end.
+bsm_reorder(#c_case{arg=#c_var{}=V}=Case) ->
+ bsm_reorder_1([V], Case);
+bsm_reorder(#c_case{arg=#c_values{es=Es}}=Case) ->
+ bsm_reorder_1(Es, Case);
+bsm_reorder(Core) ->
+ Core.
-bsm_an(#c_case{arg=#c_var{}=V}=Case) ->
- bsm_an_1([V], Case);
-bsm_an(#c_case{arg=#c_values{es=Es}}=Case) ->
- bsm_an_1(Es, Case);
-bsm_an(Other) ->
- {ok,Other}.
-
-bsm_an_1(Vs0, #c_case{clauses=Cs0}=Case) ->
+bsm_reorder_1(Vs0, #c_case{clauses=Cs0}=Case) ->
case bsm_leftmost(Cs0) of
- none ->
- {ok,Case};
- 1 ->
- bsm_an_2(Vs0, Cs0, Case);
- Pos ->
- Vs = move_from_col(Pos, Vs0),
- Cs = [C#c_clause{pats=move_from_col(Pos, Ps)} ||
- #c_clause{pats=Ps}=C <- Cs0],
- bsm_an_2(Vs, Cs, Case)
- end.
-
-bsm_an_2(Vs, Cs, Case) ->
- try
- bsm_ensure_no_partition(Cs),
- {ok,bsm_do_an(Vs, Cs, Case)}
- catch
- throw:{problem,Where,What} ->
- {ok,Case,{Where,What}}
+ Pos when Pos > 0, Pos =/= none ->
+ Vs = core_lib:make_values(move_from_col(Pos, Vs0)),
+ Cs = [C#c_clause{pats=move_from_col(Pos, Ps)}
+ || #c_clause{pats=Ps}=C <- Cs0],
+ Case#c_case{arg=Vs,clauses=Cs};
+ _ ->
+ Case
end.
move_from_col(Pos, L) ->
{First,[Col|Rest]} = lists:split(Pos - 1, L),
[Col|First] ++ Rest.
-bsm_do_an([#c_var{name=Vname}=V0|Vs0], Cs0, Case) ->
- Cs = bsm_do_an_var(Vname, Cs0),
- V = bsm_annotate_for_reuse(V0),
- Vs = core_lib:make_values([V|Vs0]),
- Case#c_case{arg=Vs,clauses=Cs};
-bsm_do_an(_Vs, _Cs, Case) -> Case.
-
-bsm_do_an_var(V, [#c_clause{pats=[P|_],guard=G,body=B0}=C0|Cs]) ->
- case P of
- #c_var{name=VarName} ->
- case core_lib:is_var_used(V, G) of
- true -> bsm_problem(C0, orig_bin_var_used_in_guard);
- false -> ok
- end,
- case core_lib:is_var_used(VarName, G) of
- true -> bsm_problem(C0, bin_var_used_in_guard);
- false -> ok
- end,
- B1 = bsm_maybe_ctx_to_binary(VarName, B0),
- B = bsm_maybe_ctx_to_binary(V, B1),
- C = C0#c_clause{body=B},
- [C|bsm_do_an_var(V, Cs)];
- #c_alias{} ->
- case bsm_could_match_binary(P) of
- false ->
- [C0|bsm_do_an_var(V, Cs)];
- true ->
- bsm_problem(C0, bin_opt_alias)
- end;
- _ ->
- case bsm_could_match_binary(P) andalso bsm_is_var_used(V, G, B0) of
- false ->
- [C0|bsm_do_an_var(V, Cs)];
- true ->
- bsm_problem(C0, bin_var_used)
- end
- end;
-bsm_do_an_var(_, []) -> [].
-
-bsm_annotate_for_reuse(#c_var{anno=Anno}=Var) ->
- Var#c_var{anno=[reuse_for_context|Anno]}.
-
-bsm_is_var_used(V, G, B) ->
- core_lib:is_var_used(V, G) orelse core_lib:is_var_used(V, B).
-
-bsm_maybe_ctx_to_binary(V, B) ->
- case core_lib:is_var_used(V, B) andalso not previous_ctx_to_binary(V, B) of
- false ->
- B;
- true ->
- #c_seq{arg=#c_primop{name=#c_literal{val=bs_context_to_binary},
- args=[#c_var{name=V}]},
- body=B}
- end.
-
-previous_ctx_to_binary(V, Core) ->
- case Core of
- #c_seq{arg=#c_primop{name=#c_literal{val=bs_context_to_binary},
- args=[#c_var{name=V}]}} ->
- true;
- _ ->
- false
- end.
-
%% bsm_leftmost(Cs) -> none | ArgumentNumber
%% Find the leftmost argument that matches a nonempty binary.
%% Return either 'none' or the argument number (1-N).
@@ -200,94 +91,3 @@ bsm_leftmost_2([_|Ps], Cs, N, Pos) ->
bsm_leftmost_2(Ps, Cs, N+1, Pos);
bsm_leftmost_2([], Cs, _, Pos) ->
bsm_leftmost_1(Cs, Pos).
-
-%% bsm_ensure_no_partition(Cs) -> ok (exception if problem)
-%% There must only be a single bs_start_match2 instruction if we
-%% are to reuse the binary variable for the match context.
-%%
-%% To make sure that there is only a single bs_start_match2
-%% instruction, we will check for partitions such as:
-%%
-%% foo(<<...>>) -> ...
-%% foo(<Variable>) when ... -> ...
-%% foo(<Non-variable pattern>) ->
-%%
-%% If there is such partition, we reject the optimization.
-
-bsm_ensure_no_partition(Cs) ->
- bsm_ensure_no_partition_1(Cs, before).
-
-%% Loop through each clause.
-bsm_ensure_no_partition_1([#c_clause{pats=Ps,guard=G}|Cs], State0) ->
- State = bsm_ensure_no_partition_2(Ps, G, State0),
- case State of
- 'after' ->
- bsm_ensure_no_partition_after(Cs);
- _ ->
- ok
- end,
- bsm_ensure_no_partition_1(Cs, State);
-bsm_ensure_no_partition_1([], _) -> ok.
-
-bsm_ensure_no_partition_2([#c_binary{}|_], _, _State) ->
- within;
-bsm_ensure_no_partition_2([#c_alias{}=Alias|_], N, State) ->
- %% Retrieve the real pattern that the alias refers to and check that.
- P = bsm_real_pattern(Alias),
- bsm_ensure_no_partition_2([P], N, State);
-bsm_ensure_no_partition_2([_|_], _, before=State) ->
- %% No binary matching yet - therefore no partition.
- State;
-bsm_ensure_no_partition_2([P|_], _, State) ->
- case bsm_could_match_binary(P) of
- false ->
- State;
- true ->
- %% The pattern P *may* match a binary, so we must update the state.
- %% (P must be a variable.)
- 'after'
- end.
-
-bsm_ensure_no_partition_after([#c_clause{pats=Ps}=C|Cs]) ->
- case Ps of
- [#c_var{}|_] ->
- bsm_ensure_no_partition_after(Cs);
- _ ->
- bsm_problem(C, bin_partition)
- end;
-bsm_ensure_no_partition_after([]) -> ok.
-
-bsm_could_match_binary(#c_alias{pat=P}) -> bsm_could_match_binary(P);
-bsm_could_match_binary(#c_cons{}) -> false;
-bsm_could_match_binary(#c_tuple{}) -> false;
-bsm_could_match_binary(#c_literal{val=Lit}) -> is_bitstring(Lit);
-bsm_could_match_binary(_) -> true.
-
-bsm_real_pattern(#c_alias{pat=P}) -> bsm_real_pattern(P);
-bsm_real_pattern(P) -> P.
-
-bsm_problem(Where, What) ->
- throw({problem,Where,What}).
-
-make_warning(Core, Term) ->
- case should_suppress_warning(Core) of
- true ->
- ok;
- false ->
- Anno = cerl:get_ann(Core),
- Line = get_line(Anno),
- File = get_file(Anno),
- {File,[{Line,?MODULE,Term}]}
- end.
-
-should_suppress_warning(Core) ->
- Ann = cerl:get_ann(Core),
- member(compiler_generated, Ann).
-
-get_line([Line|_]) when is_integer(Line) -> Line;
-get_line([_|T]) -> get_line(T);
-get_line([]) -> none.
-
-get_file([{file,File}|_]) -> File;
-get_file([_|T]) -> get_file(T);
-get_file([]) -> "no_file". % should not happen
diff --git a/lib/compiler/src/sys_core_dsetel.erl b/lib/compiler/src/sys_core_dsetel.erl
deleted file mode 100644
index 9ab83c210f..0000000000
--- a/lib/compiler/src/sys_core_dsetel.erl
+++ /dev/null
@@ -1,360 +0,0 @@
-%%
-%% %CopyrightBegin%
-%%
-%% Copyright Ericsson AB 2002-2018. All Rights Reserved.
-%%
-%% Licensed under the Apache License, Version 2.0 (the "License");
-%% you may not use this file except in compliance with the License.
-%% You may obtain a copy of the License at
-%%
-%% http://www.apache.org/licenses/LICENSE-2.0
-%%
-%% Unless required by applicable law or agreed to in writing, software
-%% distributed under the License is distributed on an "AS IS" BASIS,
-%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-%% See the License for the specific language governing permissions and
-%% limitations under the License.
-%%
-%% %CopyrightEnd%
-%%
-%% Purpose : Using dsetelement to make multiple-field record updates
-%% faster.
-
-%% The expansion of record field updates, when more than one field is
-%% updated, but not a majority of the fields, will create a sequence of
-%% calls to 'erlang:setelement(Index, Value, Tuple)' where Tuple in the
-%% first call is the original record tuple, and in the subsequent calls
-%% Tuple is the result of the previous call. Furthermore, all Index
-%% values are constant positive integers, and the first call to
-%% 'setelement' will have the greatest index. Thus all the following
-%% calls do not actually need to test at run-time whether Tuple has type
-%% tuple, nor that the index is within the tuple bounds.
-%%
-%% Since this introduces destructive updates in the Core Erlang code, it
-%% must be done as a last stage before going to lower-level code.
-%%
-%% NOTE: Because there are currently no write barriers in the system,
-%% this kind of optimization can only be done when we are sure that
-%% garbage collection will not be triggered between the creation of the
-%% tuple and the destructive updates - otherwise we might insert
-%% pointers from an older generation to a newer.
-%%
-%% The rewriting is done as follows:
-%%
-%% let X1 = call 'erlang':'setelement(5, Tuple, Value1)
-%% in call 'erlang':'setelement(3, X1, Value2)
-%% =>
-%% let X1 = call 'erlang':'setelement(5, Tuple, Value1)
-%% in do primop dsetelement(3, X1, Value2)
-%% X1
-%% and
-%% let X1 = call 'erlang':'setelement(5, Tuple, Value1)
-%% in let X2 = call 'erlang':'setelement(3, X1, Value2)
-%% in ...
-%% =>
-%% let X2 = call 'erlang':'setelement(5, Tuple, Value1)
-%% in do primop 'dsetelement(3, X2, Value2)
-%% ...
-%% if X1 is used exactly once.
-%% Thus, we need to track variable usage.
-%%
-
--module(sys_core_dsetel).
-
--export([module/2]).
-
--include("core_parse.hrl").
-
--spec module(cerl:c_module(), [compile:option()]) -> {'ok', cerl:c_module()}.
-
-module(M0, _Options) ->
- M = visit_module(M0),
- {ok,M}.
-
-visit_module(#c_module{defs=Ds0}=R) ->
- Env = #{},
- Ds = visit_module_1(Ds0, Env, []),
- R#c_module{defs=Ds}.
-
-visit_module_1([{Name,F0}|Fs], Env, Acc) ->
- try visit(Env, F0) of
- {F,_} ->
- visit_module_1(Fs, Env, [{Name,F}|Acc])
- catch
- Class:Error:Stack ->
- #c_var{name={Func,Arity}} = Name,
- io:fwrite("Function: ~w/~w\n", [Func,Arity]),
- erlang:raise(Class, Error, Stack)
- end;
-visit_module_1([], _, Acc) ->
- lists:reverse(Acc).
-
-visit(Env, #c_var{name={_,_}}=R) ->
- %% Ignore local function name.
- {R, Env};
-visit(Env0, #c_var{name=X}=R) ->
- %% There should not be any free variables. If there are,
- %% the case will fail with an exception.
- case Env0 of
- #{X:=N} ->
- {R, Env0#{X:=N+1}}
- end;
-visit(Env, #c_literal{}=R) ->
- {R, Env};
-visit(Env0, #c_tuple{es=Es0}=R) ->
- {Es1,Env1} = visit_list(Env0, Es0),
- {R#c_tuple{es=Es1}, Env1};
-visit(Env0, #c_map{es=Es0}=R) ->
- {Es1,Env1} = visit_list(Env0, Es0),
- {R#c_map{es=Es1}, Env1};
-visit(Env0, #c_map_pair{key=K0,val=V0}=R) ->
- {K,Env1} = visit(Env0, K0),
- {V,Env2} = visit(Env1, V0),
- {R#c_map_pair{key=K,val=V}, Env2};
-visit(Env0, #c_cons{hd=H0,tl=T0}=R) ->
- {H1,Env1} = visit(Env0, H0),
- {T1,Env2} = visit(Env1, T0),
- {R#c_cons{hd=H1,tl=T1}, Env2};
-visit(Env0, #c_binary{segments=Segs}=R) ->
- Env = visit_bin_segs(Env0, Segs),
- {R, Env};
-visit(Env0, #c_values{es=Es0}=R) ->
- {Es1,Env1} = visit_list(Env0, Es0),
- {R#c_values{es=Es1}, Env1};
-visit(Env0, #c_fun{vars=Vs, body=B0}=R) ->
- {Xs, Env1} = bind_vars(Vs, Env0),
- {B1,Env2} = visit(Env1, B0),
- {R#c_fun{body=B1}, restore_vars(Xs, Env0, Env2)};
-visit(Env0, #c_let{vars=Vs, arg=A0, body=B0}=R) ->
- {A1,Env1} = visit(Env0, A0),
- {Xs,Env2} = bind_vars(Vs, Env1),
- {B1,Env3} = visit(Env2, B0),
- rewrite(R#c_let{arg=A1,body=B1}, Env3, restore_vars(Xs, Env1, Env3));
-visit(Env0, #c_seq{arg=A0, body=B0}=R) ->
- {A1,Env1} = visit(Env0, A0),
- {B1,Env2} = visit(Env1, B0),
- {R#c_seq{arg=A1,body=B1}, Env2};
-visit(Env0, #c_case{arg=A0,clauses=Cs0}=R) ->
- {A1,Env1} = visit(Env0, A0),
- {Cs1,Env2} = visit_list(Env1, Cs0),
- {R#c_case{arg=A1,clauses=Cs1}, Env2};
-visit(Env0, #c_clause{pats=Ps,guard=G0,body=B0}=R) ->
- {Vs, Env1} = visit_pats(Ps, Env0),
- {G1,Env2} = visit(Env1, G0),
- {B1,Env3} = visit(Env2, B0),
- {R#c_clause{guard=G1,body=B1}, restore_vars(Vs, Env0, Env3)};
-visit(Env0, #c_receive{clauses=Cs0,timeout=T0,action=A0}=R) ->
- {T1,Env1} = visit(Env0, T0),
- {Cs1,Env2} = visit_list(Env1, Cs0),
- {A1,Env3} = visit(Env2, A0),
- {R#c_receive{clauses=Cs1,timeout=T1,action=A1}, Env3};
-visit(Env0, #c_apply{op=Op0, args=As0}=R) ->
- {Op1,Env1} = visit(Env0, Op0),
- {As1,Env2} = visit_list(Env1, As0),
- {R#c_apply{op=Op1,args=As1}, Env2};
-visit(Env0, #c_call{module=M0,name=N0,args=As0}=R) ->
- {M1,Env1} = visit(Env0, M0),
- {N1,Env2} = visit(Env1, N0),
- {As1,Env3} = visit_list(Env2, As0),
- {R#c_call{module=M1,name=N1,args=As1}, Env3};
-visit(Env0, #c_primop{name=N0, args=As0}=R) ->
- {N1,Env1} = visit(Env0, N0),
- {As1,Env2} = visit_list(Env1, As0),
- {R#c_primop{name=N1,args=As1}, Env2};
-visit(Env0, #c_try{arg=E0, vars=Vs, body=B0, evars=Evs, handler=H0}=R) ->
- {E1,Env1} = visit(Env0, E0),
- {Xs, Env2} = bind_vars(Vs, Env1),
- {B1,Env3} = visit(Env2, B0),
- Env4 = restore_vars(Xs, Env1, Env3),
- {Ys, Env5} = bind_vars(Evs, Env4),
- {H1,Env6} = visit(Env5, H0),
- {R#c_try{arg=E1,body=B1,handler=H1}, restore_vars(Ys, Env4, Env6)};
-visit(Env0, #c_catch{body=B0}=R) ->
- {B1,Env1} = visit(Env0, B0),
- {R#c_catch{body=B1}, Env1};
-visit(Env0, #c_letrec{defs=Ds0,body=B0}=R) ->
- {Xs, Env1} = bind_vars([V || {V,_} <- Ds0], Env0),
- {Ds1,Env2} = visit_def_list(Env1, Ds0),
- {B1,Env3} = visit(Env2, B0),
- {R#c_letrec{defs=Ds1,body=B1}, restore_vars(Xs, Env0, Env3)}.
-%% The following general code for handling modules is slow if a module
-%% contains very many functions. There is special code in visit_module/1
-%% which is much faster.
-%% visit(Env0, #c_module{defs=D0}=R) ->
-%% {R1,Env1} = visit(Env0, #c_letrec{defs=D0,body=#c_nil{}}),
-%% {R#c_module{defs=R1#c_letrec.defs}, Env1};
-
-visit_list(Env, L) ->
- lists:mapfoldl(fun (E, A) -> visit(A, E) end, Env, L).
-
-visit_def_list(Env, L) ->
- lists:mapfoldl(fun ({Name,V0}, E0) ->
- {V1,E1} = visit(E0, V0),
- {{Name,V1}, E1}
- end, Env, L).
-
-visit_bin_segs(Env, Segs) ->
- lists:foldl(fun (#c_bitstr{val=Val,size=Sz}, E0) ->
- {_, E1} = visit(E0, Val),
- {_, E2} = visit(E1, Sz),
- E2
- end, Env, Segs).
-
-bind_vars(Vs, Env) ->
- bind_vars(Vs, Env, []).
-
-bind_vars([#c_var{name=X}|Vs], Env0, Xs)->
- bind_vars(Vs, Env0#{X=>0}, [X|Xs]);
-bind_vars([], Env,Xs) ->
- {Xs, Env}.
-
-visit_pats(Ps, Env) ->
- visit_pats(Ps, Env, []).
-
-visit_pats([P|Ps], Env0, Vs0) ->
- {Vs1, Env1} = visit_pat(Env0, P, Vs0),
- visit_pats(Ps, Env1, Vs1);
-visit_pats([], Env, Vs) ->
- {Vs, Env}.
-
-visit_pat(Env0, #c_var{name=V}, Vs) ->
- {[V|Vs], Env0#{V=>0}};
-visit_pat(Env0, #c_tuple{es=Es}, Vs) ->
- visit_pats(Es, Env0, Vs);
-visit_pat(Env0, #c_map{es=Es}, Vs) ->
- visit_pats(Es, Env0, Vs);
-visit_pat(Env0, #c_map_pair{op=#c_literal{val=exact},key=V,val=K}, Vs0) ->
- {Vs1, Env1} = visit_pat(Env0, V, Vs0),
- visit_pat(Env1, K, Vs1);
-visit_pat(Env0, #c_cons{hd=H,tl=T}, Vs0) ->
- {Vs1, Env1} = visit_pat(Env0, H, Vs0),
- visit_pat(Env1, T, Vs1);
-visit_pat(Env0, #c_binary{segments=Segs}, Vs) ->
- visit_pats(Segs, Env0, Vs);
-visit_pat(Env0, #c_bitstr{val=Val,size=Sz}, Vs0) ->
- {Vs1, Env1} =
- case Sz of
- #c_var{name=V} ->
- %% We don't tolerate free variables.
- case Env0 of
- #{V:=N} ->
- {Vs0, Env0#{V:=N+1}}
- end;
- _ ->
- visit_pat(Env0, Sz, Vs0)
- end,
- visit_pat(Env1, Val, Vs1);
-visit_pat(Env0, #c_alias{pat=P,var=#c_var{name=V}}, Vs) ->
- visit_pat(Env0#{V=>0}, P, [V|Vs]);
-visit_pat(Env, #c_literal{}, Vs) ->
- {Vs, Env}.
-
-restore_vars([V|Vs], Env0, Env1) ->
- case Env0 of
- #{V:=N} ->
- restore_vars(Vs, Env0, Env1#{V=>N});
- _ ->
- restore_vars(Vs, Env0, maps:remove(V, Env1))
- end;
-restore_vars([], _, Env1) ->
- Env1.
-
-
-%% let X1 = call 'erlang':'setelement(5, Tuple, Value1)
-%% in call 'erlang':'setelement(3, X1, Value2)
-%% =>
-%% let X1 = call 'erlang':'setelement(5, Tuple, Value1)
-%% in do primop dsetelement(3, X1, Value2)
-%% X1
-
-rewrite(#c_let{vars=[#c_var{name=X}=V]=Vs,
- arg=#c_call{module=#c_literal{val='erlang'},
- name=#c_literal{val='setelement'},
- args=[#c_literal{val=Index1}, _Tuple, _Val1]
- }=A,
- body=#c_call{anno=Banno,module=#c_literal{val='erlang'},
- name=#c_literal{val='setelement'},
- args=[#c_literal{val=Index2},
- #c_var{name=X},
- Val2]
- }
- }=R,
- _BodyEnv, FinalEnv)
- when is_integer(Index1), is_integer(Index2), Index2 > 0, Index1 > Index2 ->
- case is_safe(Val2) of
- true ->
- {R#c_let{vars=Vs,
- arg=A,
- body=#c_seq{arg=#c_primop{
- anno=Banno,
- name=#c_literal{val='dsetelement'},
- args=[#c_literal{val=Index2},
- V,
- Val2]},
- body=V}
- },
- FinalEnv};
- false ->
- {R, FinalEnv}
- end;
-
-%% let X1 = call 'erlang':'setelement(5, Tuple, Value1)
-%% in let X2 = 'erlang':'setelement(3, X1, Value2)
-%% in ...
-%% =>
-%% let X2 = call 'erlang':'setelement(5, Tuple, Value1)
-%% in do primop dsetelement(3, X2, Value2)
-%% ...
-%% if X1 is used exactly once.
-
-rewrite(#c_let{vars=[#c_var{name=X1}],
- arg=#c_call{module=#c_literal{val='erlang'},
- name=#c_literal{val='setelement'},
- args=[#c_literal{val=Index1}, _Tuple, _Val1]
- }=A,
- body=#c_let{vars=[#c_var{}=V]=Vs,
- arg=#c_call{anno=Banno,
- module=#c_literal{val='erlang'},
- name=#c_literal{val='setelement'},
- args=[#c_literal{val=Index2},
- #c_var{name=X1},
- Val2]},
- body=B}
- }=R,
- BodyEnv, FinalEnv)
- when is_integer(Index1), is_integer(Index2), Index2 > 0, Index1 > Index2 ->
- case is_single_use(X1, BodyEnv) andalso is_safe(Val2) of
- true ->
- {R#c_let{vars=Vs,
- arg=A,
- body=#c_seq{arg=#c_primop{
- anno=Banno,
- name=#c_literal{val='dsetelement'},
- args=[#c_literal{val=Index2},
- V,
- Val2]},
- body=B}
- },
- FinalEnv};
- false ->
- {R, FinalEnv}
- end;
-
-rewrite(R, _, FinalEnv) ->
- {R, FinalEnv}.
-
-%% is_safe(CoreExpr) -> true|false
-%% Determines whether the Core expression can cause a GC collection at run-time.
-%% Note: Assumes that the constant pool is turned on.
-
-is_safe(#c_var{}) -> true;
-is_safe(#c_literal{}) -> true;
-is_safe(_) -> false.
-
-is_single_use(V, Env) ->
- case Env of
- #{V:=1} ->
- true;
- _ ->
- false
- end.
diff --git a/lib/compiler/src/sys_core_fold.erl b/lib/compiler/src/sys_core_fold.erl
index d848cd8f19..7e219da0af 100644
--- a/lib/compiler/src/sys_core_fold.erl
+++ b/lib/compiler/src/sys_core_fold.erl
@@ -961,18 +961,12 @@ fold_lit_args(Call, Module, Name, Args0) ->
%%
fold_non_lit_args(Call, erlang, is_boolean, [Arg], Sub) ->
eval_is_boolean(Call, Arg, Sub);
-fold_non_lit_args(Call, erlang, element, [Arg1,Arg2], Sub) ->
- eval_element(Call, Arg1, Arg2, Sub);
fold_non_lit_args(Call, erlang, length, [Arg], _) ->
eval_length(Call, Arg);
fold_non_lit_args(Call, erlang, '++', [Arg1,Arg2], _) ->
eval_append(Call, Arg1, Arg2);
fold_non_lit_args(Call, lists, append, [Arg1,Arg2], _) ->
eval_append(Call, Arg1, Arg2);
-fold_non_lit_args(Call, erlang, setelement, [Arg1,Arg2,Arg3], _) ->
- eval_setelement(Call, Arg1, Arg2, Arg3);
-fold_non_lit_args(Call, erlang, is_record, [Arg1,Arg2,Arg3], Sub) ->
- eval_is_record(Call, Arg1, Arg2, Arg3, Sub);
fold_non_lit_args(Call, erlang, is_function, [Arg1], Sub) ->
eval_is_function_1(Call, Arg1, Sub);
fold_non_lit_args(Call, erlang, is_function, [Arg1,Arg2], Sub) ->
@@ -1141,96 +1135,6 @@ eval_append(Call, #c_cons{anno=Anno,hd=H,tl=T}, List) ->
eval_append(Call, X, Y) ->
Call#c_call{args=[X,Y]}. %Rebuild call arguments.
-%% eval_element(Call, Pos, Tuple, Types) -> Val.
-%% Evaluates element/2 if the position Pos is a literal and
-%% the shape of the tuple Tuple is known.
-%%
-eval_element(Call, #c_literal{val=Pos}, Tuple, Types)
- when is_integer(Pos) ->
- case get_type(Tuple, Types) of
- none ->
- Call;
- Type ->
- Es = case cerl:is_c_tuple(Type) of
- false -> [];
- true -> cerl:tuple_es(Type)
- end,
- if
- 1 =< Pos, Pos =< length(Es) ->
- El = lists:nth(Pos, Es),
- try
- cerl:set_ann(pat_to_expr(El), [compiler_generated])
- catch
- throw:impossible ->
- Call
- end;
- true ->
- %% Index outside tuple or not a tuple.
- eval_failure(Call, badarg)
- end
- end;
-eval_element(Call, Pos, Tuple, Sub) ->
- case is_int_type(Pos, Sub) =:= no orelse
- is_tuple_type(Tuple, Sub) =:= no of
- true ->
- eval_failure(Call, badarg);
- false ->
- Call
- end.
-
-%% eval_is_record(Call, Var, Tag, Size, Types) -> Val.
-%% Evaluates is_record/3 using type information.
-%%
-eval_is_record(Call, Term, #c_literal{val=NeededTag},
- #c_literal{val=Size}, Types) ->
- case get_type(Term, Types) of
- none ->
- Call;
- Type ->
- Es = case cerl:is_c_tuple(Type) of
- false -> [];
- true -> cerl:tuple_es(Type)
- end,
- case Es of
- [#c_literal{val=Tag}|_] ->
- Bool = Tag =:= NeededTag andalso
- length(Es) =:= Size,
- #c_literal{val=Bool};
- _ ->
- #c_literal{val=false}
- end
- end;
-eval_is_record(Call, _, _, _, _) -> Call.
-
-%% eval_setelement(Call, Pos, Tuple, NewVal) -> Core.
-%% Evaluates setelement/3 if position Pos is an integer
-%% and the shape of the tuple Tuple is known.
-%%
-eval_setelement(Call, #c_literal{val=Pos}, Tuple, NewVal)
- when is_integer(Pos) ->
- case cerl:is_data(Tuple) of
- false ->
- Call;
- true ->
- Es0 = case cerl:is_c_tuple(Tuple) of
- false -> [];
- true -> cerl:tuple_es(Tuple)
- end,
- if
- 1 =< Pos, Pos =< length(Es0) ->
- Es = eval_setelement_1(Pos, Es0, NewVal),
- cerl:update_c_tuple(Tuple, Es);
- true ->
- eval_failure(Call, badarg)
- end
- end;
-eval_setelement(Call, _, _, _) -> Call.
-
-eval_setelement_1(1, [_|T], NewVal) ->
- [NewVal|T];
-eval_setelement_1(Pos, [H|T], NewVal) when Pos > 1 ->
- [H|eval_setelement_1(Pos-1, T, NewVal)].
-
%% eval_failure(Call, Reason) -> Core.
%% Warn for a call that will fail and replace the call with
%% a call to erlang:error(Reason).
@@ -1290,16 +1194,15 @@ clause(#c_clause{pats=Ps0}=Cl, Cexpr, Ctxt, Sub0) ->
end.
clause_1(#c_clause{guard=G0,body=B0}=Cl, Ps1, Cexpr, Ctxt, Sub1) ->
- Sub2 = update_types(Cexpr, Ps1, Sub1),
GSub = case {Cexpr,Ps1,G0} of
{_,_,#c_literal{}} ->
%% No need for substitution tricks when the guard
%% does not contain any variables.
- Sub2;
+ Sub1;
{#c_var{name='_'},_,_} ->
%% In a 'receive', Cexpr is the variable '_', which represents the
%% message being matched. We must NOT do any extra substiutions.
- Sub2;
+ Sub1;
{#c_var{},[#c_var{}=Var],_} ->
%% The idea here is to optimize expressions such as
%%
@@ -1321,16 +1224,16 @@ clause_1(#c_clause{guard=G0,body=B0}=Cl, Ps1, Cexpr, Ctxt, Sub1) ->
%%
case cerl:is_c_fname(Cexpr) of
false ->
- sub_set_var(Var, Cexpr, Sub2);
+ sub_set_var(Var, Cexpr, Sub1);
true ->
%% We must not copy funs, and especially not into guards.
- Sub2
+ Sub1
end;
_ ->
- Sub2
+ Sub1
end,
G1 = guard(G0, GSub),
- B1 = body(B0, Ctxt, Sub2),
+ B1 = body(B0, Ctxt, Sub1),
Cl#c_clause{pats=Ps1,guard=G1,body=B1}.
%% let_substs(LetVars, LetArg, Sub) -> {[Var],[Val],Sub}.
@@ -1414,8 +1317,7 @@ pattern(#c_binary{segments=V0}=Pat, Isub, Osub0) ->
{Pat#c_binary{segments=V1},Osub1};
pattern(#c_alias{var=V0,pat=P0}=Pat, Isub, Osub0) ->
{V1,Osub1} = pattern(V0, Isub, Osub0),
- {P1,Osub2} = pattern(P0, Isub, Osub1),
- Osub = update_types(V1, [P1], Osub2),
+ {P1,Osub} = pattern(P0, Isub, Osub1),
{Pat#c_alias{var=V1,pat=P1},Osub}.
map_pair_pattern_list(Ps0, Isub, Osub0) ->
@@ -2137,14 +2039,9 @@ case_expand_var(E, #sub{t=Tdb}) ->
%% encountered.
coerce_to_data(C) ->
- case cerl:is_c_alias(C) of
- false ->
- case cerl:is_data(C) orelse cerl:is_c_var(C) of
- true -> C;
- false -> throw(impossible)
- end;
- true ->
- coerce_to_data(cerl:alias_pat(C))
+ case cerl:is_data(C) orelse cerl:is_c_var(C) of
+ true -> C;
+ false -> throw(impossible)
end.
%% case_opt_nomatch(E, Clauses, LitExpr) -> Clauses'
@@ -2667,12 +2564,20 @@ opt_build_stacktrace(#c_let{vars=[#c_var{name=Cooked}],
#c_call{module=#c_literal{val=erlang},
name=#c_literal{val=raise},
args=[Class,Exp,#c_var{name=Cooked}]} ->
- %% The stacktrace is only used in a call to erlang:raise/3.
- %% There is no need to build the stacktrace. Replace the
- %% call to erlang:raise/3 with the the raw_raise/3 instruction,
- %% which will use a raw stacktrace.
- #c_primop{name=#c_literal{val=raw_raise},
- args=[Class,Exp,RawStk]};
+ case core_lib:is_var_used(Cooked, #c_cons{hd=Class,tl=Exp}) of
+ true ->
+ %% Not safe. The stacktrace is used in the class or
+ %% reason.
+ Let;
+ false ->
+ %% The stacktrace is only used in the last
+ %% argument for erlang:raise/3. There is no need
+ %% to build the stacktrace. Replace the call to
+ %% erlang:raise/3 with the the raw_raise/3
+ %% instruction, which will use a raw stacktrace.
+ #c_primop{name=#c_literal{val=raw_raise},
+ args=[Class,Exp,RawStk]}
+ end;
#c_let{vars=[#c_var{name=V}],arg=Arg,body=B0} when V =/= Cooked ->
case core_lib:is_var_used(Cooked, Arg) of
false ->
@@ -3132,14 +3037,6 @@ is_int_type(Var, Sub) ->
C -> yes_no(cerl:is_c_int(C))
end.
--spec is_tuple_type(cerl:cerl(), sub()) -> yes_no_maybe().
-
-is_tuple_type(Var, Sub) ->
- case get_type(Var, Sub) of
- none -> maybe;
- C -> yes_no(cerl:is_c_tuple(C))
- end.
-
yes_no(true) -> yes;
yes_no(false) -> no.
@@ -3201,27 +3098,23 @@ returns_integer(_, _) -> false.
%% update_types(Expr, Pattern, Sub) -> Sub'
%% Update the type database.
--spec update_types(cerl:cerl(), [type_info()], sub()) -> sub().
+-spec update_types(cerl:c_var(), [type_info()], sub()) -> sub().
-update_types(Expr, Pat, #sub{t=Tdb0}=Sub) ->
- Tdb = update_types_1(Expr, Pat, Tdb0),
+update_types(#c_var{name=V}, Pat, #sub{t=Tdb0}=Sub) ->
+ Tdb = update_types_1(V, Pat, Tdb0),
Sub#sub{t=Tdb}.
-update_types_1(#c_var{name=V}, Pat, Types) ->
- update_types_2(V, Pat, Types);
-update_types_1(_, _, Types) -> Types.
-
-update_types_2(V, [#c_tuple{}=P], Types) ->
+update_types_1(V, [#c_tuple{}=P], Types) ->
Types#{V=>P};
-update_types_2(V, [#c_literal{val=Bool}], Types) when is_boolean(Bool) ->
+update_types_1(V, [#c_literal{val=Bool}], Types) when is_boolean(Bool) ->
Types#{V=>bool};
-update_types_2(V, [#c_fun{vars=Vars}], Types) ->
+update_types_1(V, [#c_fun{vars=Vars}], Types) ->
Types#{V=>{'fun',length(Vars)}};
-update_types_2(V, [#c_var{name={_,Arity}}], Types) ->
+update_types_1(V, [#c_var{name={_,Arity}}], Types) ->
Types#{V=>{'fun',Arity}};
-update_types_2(V, [Type], Types) when is_atom(Type) ->
+update_types_1(V, [Type], Types) when is_atom(Type) ->
Types#{V=>Type};
-update_types_2(_, _, Types) -> Types.
+update_types_1(_, _, Types) -> Types.
%% kill_types(V, Tdb) -> Tdb'
%% Kill any entries that references the variable,
diff --git a/lib/compiler/src/sys_core_fold_lists.erl b/lib/compiler/src/sys_core_fold_lists.erl
index 9867fab46a..e93b435011 100644
--- a/lib/compiler/src/sys_core_fold_lists.erl
+++ b/lib/compiler/src/sys_core_fold_lists.erl
@@ -37,22 +37,27 @@ call(#c_call{anno=Anno}, lists, all, [Arg1,Arg2]) ->
Xs = #c_var{name='Xs'},
X = #c_var{name='X'},
Err1 = #c_tuple{es=[#c_literal{val='case_clause'}, X]},
- CC1 = #c_clause{pats=[#c_literal{val=true}], guard=#c_literal{val=true},
+ CC1 = #c_clause{anno=Anno,
+ pats=[#c_literal{val=true}], guard=#c_literal{val=true},
body=#c_apply{anno=Anno, op=Loop, args=[Xs]}},
- CC2 = #c_clause{pats=[#c_literal{val=false}], guard=#c_literal{val=true},
+ CC2 = #c_clause{anno=Anno,
+ pats=[#c_literal{val=false}], guard=#c_literal{val=true},
body=#c_literal{val=false}},
- CC3 = #c_clause{pats=[X], guard=#c_literal{val=true},
+ CC3 = #c_clause{anno=Anno,
+ pats=[X], guard=#c_literal{val=true},
body=match_fail(Anno, Err1)},
- C1 = #c_clause{pats=[#c_cons{hd=X, tl=Xs}], guard=#c_literal{val=true},
+ C1 = #c_clause{anno=Anno,
+ pats=[#c_cons{hd=X, tl=Xs}], guard=#c_literal{val=true},
body=#c_case{arg=#c_apply{anno=Anno, op=F, args=[X]},
clauses = [CC1, CC2, CC3]}},
- C2 = #c_clause{pats=[#c_literal{val=[]}],
+ C2 = #c_clause{anno=Anno,
+ pats=[#c_literal{val=[]}],
guard=#c_call{module=#c_literal{val=erlang},
name=#c_literal{val=is_function},
args=[F, #c_literal{val=1}]},
body=#c_literal{val=true}},
Err2 = #c_tuple{es=[#c_literal{val='function_clause'}, F, Xs]},
- C3 = #c_clause{pats=[Xs], guard=#c_literal{val=true},
+ C3 = #c_clause{anno=Anno, pats=[Xs], guard=#c_literal{val=true},
body=match_fail([{function_name,{'lists^all',1}}|Anno], Err2)},
Fun = #c_fun{vars=[Xs],
body=#c_case{arg=Xs, clauses=[C1, C2, C3]}},
@@ -66,16 +71,21 @@ call(#c_call{anno=Anno}, lists, any, [Arg1,Arg2]) ->
Xs = #c_var{name='Xs'},
X = #c_var{name='X'},
Err1 = #c_tuple{es=[#c_literal{val='case_clause'}, X]},
- CC1 = #c_clause{pats=[#c_literal{val=true}], guard=#c_literal{val=true},
+ CC1 = #c_clause{anno=Anno,
+ pats=[#c_literal{val=true}], guard=#c_literal{val=true},
body=#c_literal{val=true}},
- CC2 = #c_clause{pats=[#c_literal{val=false}], guard=#c_literal{val=true},
+ CC2 = #c_clause{anno=Anno,
+ pats=[#c_literal{val=false}], guard=#c_literal{val=true},
body=#c_apply{anno=Anno, op=Loop, args=[Xs]}},
- CC3 = #c_clause{pats=[X], guard=#c_literal{val=true},
+ CC3 = #c_clause{anno=Anno,
+ pats=[X], guard=#c_literal{val=true},
body=match_fail(Anno, Err1)},
- C1 = #c_clause{pats=[#c_cons{hd=X, tl=Xs}], guard=#c_literal{val=true},
+ C1 = #c_clause{anno=Anno,
+ pats=[#c_cons{hd=X, tl=Xs}], guard=#c_literal{val=true},
body=#c_case{arg=#c_apply{anno=Anno, op=F, args=[X]},
clauses = [CC1, CC2, CC3]}},
- C2 = #c_clause{pats=[#c_literal{val=[]}],
+ C2 = #c_clause{anno=Anno,
+ pats=[#c_literal{val=[]}],
guard=#c_call{module=#c_literal{val=erlang},
name=#c_literal{val=is_function},
args=[F, #c_literal{val=1}]},
@@ -94,16 +104,17 @@ call(#c_call{anno=Anno}, lists, foreach, [Arg1,Arg2]) ->
F = #c_var{name='F'},
Xs = #c_var{name='Xs'},
X = #c_var{name='X'},
- C1 = #c_clause{pats=[#c_cons{hd=X, tl=Xs}], guard=#c_literal{val=true},
+ C1 = #c_clause{anno=Anno,
+ pats=[#c_cons{hd=X, tl=Xs}], guard=#c_literal{val=true},
body=#c_seq{arg=#c_apply{anno=Anno, op=F, args=[X]},
body=#c_apply{anno=Anno, op=Loop, args=[Xs]}}},
- C2 = #c_clause{pats=[#c_literal{val=[]}],
+ C2 = #c_clause{anno=Anno, pats=[#c_literal{val=[]}],
guard=#c_call{module=#c_literal{val=erlang},
name=#c_literal{val=is_function},
args=[F, #c_literal{val=1}]},
body=#c_literal{val=ok}},
Err = #c_tuple{es=[#c_literal{val='function_clause'}, F, Xs]},
- C3 = #c_clause{pats=[Xs], guard=#c_literal{val=true},
+ C3 = #c_clause{anno=Anno, pats=[Xs], guard=#c_literal{val=true},
body=match_fail([{function_name,{'lists^foreach',1}}|Anno], Err)},
Fun = #c_fun{vars=[Xs],
body=#c_case{arg=Xs, clauses=[C1, C2, C3]}},
@@ -117,7 +128,8 @@ call(#c_call{anno=Anno}, lists, map, [Arg1,Arg2]) ->
Xs = #c_var{name='Xs'},
X = #c_var{name='X'},
H = #c_var{name='H'},
- C1 = #c_clause{pats=[#c_cons{hd=X, tl=Xs}], guard=#c_literal{val=true},
+ C1 = #c_clause{anno=Anno,
+ pats=[#c_cons{hd=X, tl=Xs}], guard=#c_literal{val=true},
body=#c_let{vars=[H], arg=#c_apply{anno=Anno,
op=F,
args=[X]},
@@ -126,7 +138,7 @@ call(#c_call{anno=Anno}, lists, map, [Arg1,Arg2]) ->
tl=#c_apply{anno=Anno,
op=Loop,
args=[Xs]}}}},
- C2 = #c_clause{pats=[#c_literal{val=[]}],
+ C2 = #c_clause{anno=Anno, pats=[#c_literal{val=[]}],
guard=#c_call{module=#c_literal{val=erlang},
name=#c_literal{val=is_function},
args=[F, #c_literal{val=1}]},
@@ -146,7 +158,8 @@ call(#c_call{anno=Anno}, lists, flatmap, [Arg1,Arg2]) ->
Xs = #c_var{name='Xs'},
X = #c_var{name='X'},
H = #c_var{name='H'},
- C1 = #c_clause{pats=[#c_cons{hd=X, tl=Xs}], guard=#c_literal{val=true},
+ C1 = #c_clause{anno=Anno,
+ pats=[#c_cons{hd=X, tl=Xs}], guard=#c_literal{val=true},
body=#c_let{vars=[H],
arg=#c_apply{anno=Anno, op=F, args=[X]},
body=#c_call{anno=[compiler_generated|Anno],
@@ -156,13 +169,13 @@ call(#c_call{anno=Anno}, lists, flatmap, [Arg1,Arg2]) ->
#c_apply{anno=Anno,
op=Loop,
args=[Xs]}]}}},
- C2 = #c_clause{pats=[#c_literal{val=[]}],
+ C2 = #c_clause{anno=Anno, pats=[#c_literal{val=[]}],
guard=#c_call{module=#c_literal{val=erlang},
name=#c_literal{val=is_function},
args=[F, #c_literal{val=1}]},
body=#c_literal{val=[]}},
Err = #c_tuple{es=[#c_literal{val='function_clause'}, F, Xs]},
- C3 = #c_clause{pats=[Xs], guard=#c_literal{val=true},
+ C3 = #c_clause{anno=Anno, pats=[Xs], guard=#c_literal{val=true},
body=match_fail([{function_name,{'lists^flatmap',1}}|Anno], Err)},
Fun = #c_fun{vars=[Xs],
body=#c_case{arg=Xs, clauses=[C1, C2, C3]}},
@@ -177,11 +190,13 @@ call(#c_call{anno=Anno}, lists, filter, [Arg1,Arg2]) ->
X = #c_var{name='X'},
B = #c_var{name='B'},
Err1 = #c_tuple{es=[#c_literal{val='case_clause'}, X]},
- CC1 = #c_clause{pats=[#c_literal{val=true}], guard=#c_literal{val=true},
+ CC1 = #c_clause{anno=Anno,
+ pats=[#c_literal{val=true}], guard=#c_literal{val=true},
body=#c_cons{anno=[compiler_generated], hd=X, tl=Xs}},
- CC2 = #c_clause{pats=[#c_literal{val=false}], guard=#c_literal{val=true},
+ CC2 = #c_clause{anno=Anno,
+ pats=[#c_literal{val=false}], guard=#c_literal{val=true},
body=Xs},
- CC3 = #c_clause{pats=[X], guard=#c_literal{val=true},
+ CC3 = #c_clause{anno=Anno, pats=[X], guard=#c_literal{val=true},
body=match_fail(Anno, Err1)},
Case = #c_case{arg=B, clauses = [CC1, CC2, CC3]},
C1 = #c_clause{pats=[#c_cons{hd=X, tl=Xs}], guard=#c_literal{val=true},
@@ -192,13 +207,15 @@ call(#c_call{anno=Anno}, lists, filter, [Arg1,Arg2]) ->
op=Loop,
args=[Xs]},
body=Case}}},
- C2 = #c_clause{pats=[#c_literal{val=[]}],
+ C2 = #c_clause{anno=Anno,
+ pats=[#c_literal{val=[]}],
guard=#c_call{module=#c_literal{val=erlang},
name=#c_literal{val=is_function},
args=[F, #c_literal{val=1}]},
body=#c_literal{val=[]}},
Err2 = #c_tuple{es=[#c_literal{val='function_clause'}, F, Xs]},
- C3 = #c_clause{pats=[Xs], guard=#c_literal{val=true},
+ C3 = #c_clause{anno=Anno,
+ pats=[Xs], guard=#c_literal{val=true},
body=match_fail([{function_name,{'lists^filter',1}}|Anno], Err2)},
Fun = #c_fun{vars=[Xs],
body=#c_case{arg=Xs, clauses=[C1, C2, C3]}},
@@ -212,19 +229,20 @@ call(#c_call{anno=Anno}, lists, foldl, [Arg1,Arg2,Arg3]) ->
Xs = #c_var{name='Xs'},
X = #c_var{name='X'},
A = #c_var{name='A'},
- C1 = #c_clause{pats=[#c_cons{hd=X, tl=Xs}], guard=#c_literal{val=true},
+ C1 = #c_clause{anno=Anno,
+ pats=[#c_cons{hd=X, tl=Xs}], guard=#c_literal{val=true},
body=#c_apply{anno=Anno,
op=Loop,
args=[Xs, #c_apply{anno=Anno,
op=F,
args=[X, A]}]}},
- C2 = #c_clause{pats=[#c_literal{val=[]}],
+ C2 = #c_clause{anno=Anno, pats=[#c_literal{val=[]}],
guard=#c_call{module=#c_literal{val=erlang},
name=#c_literal{val=is_function},
args=[F, #c_literal{val=2}]},
body=A},
Err = #c_tuple{es=[#c_literal{val='function_clause'}, F, A, Xs]},
- C3 = #c_clause{pats=[Xs], guard=#c_literal{val=true},
+ C3 = #c_clause{anno=Anno, pats=[Xs], guard=#c_literal{val=true},
body=match_fail([{function_name,{'lists^foldl',2}}|Anno], Err)},
Fun = #c_fun{vars=[Xs, A],
body=#c_case{arg=Xs, clauses=[C1, C2, C3]}},
@@ -238,19 +256,20 @@ call(#c_call{anno=Anno}, lists, foldr, [Arg1,Arg2,Arg3]) ->
Xs = #c_var{name='Xs'},
X = #c_var{name='X'},
A = #c_var{name='A'},
- C1 = #c_clause{pats=[#c_cons{hd=X, tl=Xs}], guard=#c_literal{val=true},
+ C1 = #c_clause{anno=Anno,
+ pats=[#c_cons{hd=X, tl=Xs}], guard=#c_literal{val=true},
body=#c_apply{anno=Anno,
op=F,
args=[X, #c_apply{anno=Anno,
op=Loop,
args=[Xs, A]}]}},
- C2 = #c_clause{pats=[#c_literal{val=[]}],
+ C2 = #c_clause{anno=Anno, pats=[#c_literal{val=[]}],
guard=#c_call{module=#c_literal{val=erlang},
name=#c_literal{val=is_function},
args=[F, #c_literal{val=2}]},
body=A},
Err = #c_tuple{es=[#c_literal{val='function_clause'}, F, A, Xs]},
- C3 = #c_clause{pats=[Xs], guard=#c_literal{val=true},
+ C3 = #c_clause{anno=Anno, pats=[Xs], guard=#c_literal{val=true},
body=match_fail([{function_name,{'lists^foldr',2}}|Anno], Err)},
Fun = #c_fun{vars=[Xs, A],
body=#c_case{arg=Xs, clauses=[C1, C2, C3]}},
@@ -266,13 +285,14 @@ call(#c_call{anno=Anno}, lists, mapfoldl, [Arg1,Arg2,Arg3]) ->
Avar = #c_var{name='A'},
Match =
fun (A, P, E) ->
- C1 = #c_clause{pats=[P], guard=#c_literal{val=true}, body=E},
+ C1 = #c_clause{anno=Anno, pats=[P], guard=#c_literal{val=true}, body=E},
Err = #c_tuple{es=[#c_literal{val='badmatch'}, X]},
- C2 = #c_clause{pats=[X], guard=#c_literal{val=true},
+ C2 = #c_clause{anno=Anno, pats=[X], guard=#c_literal{val=true},
body=match_fail(Anno, Err)},
#c_case{arg=A, clauses=[C1, C2]}
end,
- C1 = #c_clause{pats=[#c_cons{hd=X, tl=Xs}], guard=#c_literal{val=true},
+ C1 = #c_clause{anno=Anno,
+ pats=[#c_cons{hd=X, tl=Xs}], guard=#c_literal{val=true},
body=Match(#c_apply{anno=Anno, op=F, args=[X, Avar]},
#c_tuple{es=[X, Avar]},
%%% Tuple passing version
@@ -292,7 +312,7 @@ call(#c_call{anno=Anno}, lists, mapfoldl, [Arg1,Arg2,Arg3]) ->
%%% body=#c_values{es=[#c_cons{hd=X, tl=Xs},
%%% A]}}
)},
- C2 = #c_clause{pats=[#c_literal{val=[]}],
+ C2 = #c_clause{anno=Anno, pats=[#c_literal{val=[]}],
guard=#c_call{module=#c_literal{val=erlang},
name=#c_literal{val=is_function},
args=[F, #c_literal{val=2}]},
@@ -302,7 +322,7 @@ call(#c_call{anno=Anno}, lists, mapfoldl, [Arg1,Arg2,Arg3]) ->
%%% Multiple-value version
%%% body=#c_values{es=[#c_literal{val=[]}, A]}},
Err = #c_tuple{es=[#c_literal{val='function_clause'}, F, Avar, Xs]},
- C3 = #c_clause{pats=[Xs], guard=#c_literal{val=true},
+ C3 = #c_clause{anno=Anno, pats=[Xs], guard=#c_literal{val=true},
body=match_fail([{function_name,{'lists^mapfoldl',2}}|Anno], Err)},
Fun = #c_fun{vars=[Xs, Avar],
body=#c_case{arg=Xs, clauses=[C1, C2, C3]}},
@@ -326,13 +346,13 @@ call(#c_call{anno=Anno}, lists, mapfoldr, [Arg1,Arg2,Arg3]) ->
Avar = #c_var{name='A'},
Match =
fun (A, P, E) ->
- C1 = #c_clause{pats=[P], guard=#c_literal{val=true}, body=E},
+ C1 = #c_clause{anno=Anno, pats=[P], guard=#c_literal{val=true}, body=E},
Err = #c_tuple{es=[#c_literal{val='badmatch'}, X]},
- C2 = #c_clause{pats=[X], guard=#c_literal{val=true},
+ C2 = #c_clause{anno=Anno, pats=[X], guard=#c_literal{val=true},
body=match_fail(Anno, Err)},
#c_case{arg=A, clauses=[C1, C2]}
end,
- C1 = #c_clause{pats=[#c_cons{hd=X, tl=Xs}], guard=#c_literal{val=true},
+ C1 = #c_clause{anno=Anno, pats=[#c_cons{hd=X, tl=Xs}], guard=#c_literal{val=true},
%%% Tuple passing version
body=Match(#c_apply{anno=Anno,
op=Loop,
@@ -352,7 +372,8 @@ call(#c_call{anno=Anno}, lists, mapfoldr, [Arg1,Arg2,Arg3]) ->
%%% #c_values{es=[#c_cons{hd=X, tl=Xs},
%%% A]})}
},
- C2 = #c_clause{pats=[#c_literal{val=[]}],
+ C2 = #c_clause{anno=Anno,
+ pats=[#c_literal{val=[]}],
guard=#c_call{module=#c_literal{val=erlang},
name=#c_literal{val=is_function},
args=[F, #c_literal{val=2}]},
@@ -362,7 +383,7 @@ call(#c_call{anno=Anno}, lists, mapfoldr, [Arg1,Arg2,Arg3]) ->
%%% Multiple-value version
%%% body=#c_values{es=[#c_literal{val=[]}, A]}},
Err = #c_tuple{es=[#c_literal{val='function_clause'}, F, Avar, Xs]},
- C3 = #c_clause{pats=[Xs], guard=#c_literal{val=true},
+ C3 = #c_clause{anno=Anno, pats=[Xs], guard=#c_literal{val=true},
body=match_fail([{function_name,{'lists^mapfoldr',2}}|Anno], Err)},
Fun = #c_fun{vars=[Xs, Avar],
body=#c_case{arg=Xs, clauses=[C1, C2, C3]}},
diff --git a/lib/compiler/src/sys_core_inline.erl b/lib/compiler/src/sys_core_inline.erl
index 5a6cc45e4a..3380e3f1bd 100644
--- a/lib/compiler/src/sys_core_inline.erl
+++ b/lib/compiler/src/sys_core_inline.erl
@@ -195,6 +195,9 @@ kill_id_anns(Body) ->
cerl_trees:map(fun(#c_fun{anno=A0}=CFun) ->
A = kill_id_anns_1(A0),
CFun#c_fun{anno=A};
+ (#c_var{anno=A0}=Var) ->
+ A = kill_id_anns_1(A0),
+ Var#c_var{anno=A};
(Expr) ->
%% Mark everything as compiler generated to
%% suppress bogus warnings.
diff --git a/lib/compiler/src/v3_core.erl b/lib/compiler/src/v3_core.erl
index 5aaf5bfd51..3699c9d22e 100644
--- a/lib/compiler/src/v3_core.erl
+++ b/lib/compiler/src/v3_core.erl
@@ -329,14 +329,16 @@ gexpr({protect,Line,Arg}, Bools0, St0) ->
Anno = lineno_anno(Line, St),
{#iprotect{anno=#a{anno=Anno},body=Eps++[E]},[],Bools0,St}
end;
-gexpr({op,L,'andalso',E1,E2}, Bools, St0) ->
+gexpr({op,_,'andalso',_,_}=E0, Bools, St0) ->
+ {op,L,'andalso',E1,E2} = right_assoc(E0, 'andalso'),
Anno = lineno_anno(L, St0),
{#c_var{name=V0},St} = new_var(Anno, St0),
V = {var,L,V0},
False = {atom,L,false},
E = make_bool_switch_guard(L, E1, V, E2, False),
gexpr(E, Bools, St);
-gexpr({op,L,'orelse',E1,E2}, Bools, St0) ->
+gexpr({op,_,'orelse',_,_}=E0, Bools, St0) ->
+ {op,L,'orelse',E1,E2} = right_assoc(E0, 'orelse'),
Anno = lineno_anno(L, St0),
{#c_var{name=V0},St} = new_var(Anno, St0),
V = {var,L,V0},
@@ -765,14 +767,16 @@ expr({op,_,'++',{lc,Llc,E,Qs0},More}, St0) ->
{Qs,St2} = preprocess_quals(Llc, Qs0, St1),
{Y,Yps,St} = lc_tq(Llc, E, Qs, Mc, St2),
{Y,Mps++Yps,St};
-expr({op,L,'andalso',E1,E2}, St0) ->
+expr({op,_,'andalso',_,_}=E0, St0) ->
+ {op,L,'andalso',E1,E2} = right_assoc(E0, 'andalso'),
Anno = lineno_anno(L, St0),
{#c_var{name=V0},St} = new_var(Anno, St0),
V = {var,L,V0},
False = {atom,L,false},
E = make_bool_switch(L, E1, V, E2, False, St0),
expr(E, St);
-expr({op,L,'orelse',E1,E2}, St0) ->
+expr({op,_,'orelse',_,_}=E0, St0) ->
+ {op,L,'orelse',E1,E2} = right_assoc(E0, 'orelse'),
Anno = lineno_anno(L, St0),
{#c_var{name=V0},St} = new_var(Anno, St0),
V = {var,L,V0},
@@ -2055,6 +2059,11 @@ fail_clause(Pats, Anno, Arg) ->
body=[#iprimop{anno=#a{anno=Anno},name=#c_literal{val=match_fail},
args=[Arg]}]}.
+%% Optimization for Dialyzer.
+right_assoc({op,L1,Op,{op,L2,Op,E1,E2},E3}, Op) ->
+ right_assoc({op,L2,Op,E1,{op,L1,Op,E2,E3}}, Op);
+right_assoc(E, _Op) -> E.
+
annotate_tuple(A, Es, St) ->
case member(dialyzer, St#core.opts) of
true ->
@@ -2612,7 +2621,8 @@ cfun(#ifun{anno=A,id=Id,vars=Args,clauses=Lcs,fc=Lfc}, _As, St0) ->
[],A#a.us,St2}.
c_call_erl(Fun, Args) ->
- cerl:c_call(cerl:c_atom(erlang), cerl:c_atom(Fun), Args).
+ As = [compiler_generated],
+ cerl:ann_c_call(As, cerl:c_atom(erlang), cerl:c_atom(Fun), Args).
%% lit_vars(Literal) -> [Var].
diff --git a/lib/compiler/src/v3_kernel.erl b/lib/compiler/src/v3_kernel.erl
index fe8e252e5a..e2b8787224 100644
--- a/lib/compiler/src/v3_kernel.erl
+++ b/lib/compiler/src/v3_kernel.erl
@@ -82,8 +82,7 @@
-export([module/2,format_error/1]).
-import(lists, [map/2,foldl/3,foldr/3,mapfoldl/3,splitwith/2,member/2,
- keymember/3,keyfind/3,partition/2,droplast/1,last/1,sort/1,
- reverse/1]).
+ keyfind/3,partition/2,droplast/1,last/1,sort/1,reverse/1]).
-import(ordsets, [add_element/2,del_element/2,union/2,union/1,subtract/2]).
-import(cerl, [c_tuple/1]).
@@ -1415,8 +1414,6 @@ is_remote_bif(_, _, _) -> false.
%% return multiple values. Only used in bodies where a BIF may be
%% called for effect only.
-bif_vals(dsetelement, 3) -> 0;
-bif_vals(bs_context_to_binary, 1) -> 0;
bif_vals(_, _) -> 1.
bif_vals(_, _, _) -> 1.
@@ -1593,19 +1590,12 @@ match_var([U|Us], Cs0, Def, St) ->
%% constructor/constant as first argument. Group the constructors
%% according to type, the order is really irrelevant but tries to be
%% smart.
-
-match_con(Us, Cs0, Def, St) ->
- %% Expand literals at the top level.
- Cs = [expand_pat_lit_clause(C) || C <- Cs0],
- match_con_1(Us, Cs, Def, St).
-
-match_con_1([U|_Us] = L, Cs, Def, St0) ->
+match_con([U|_Us] = L, Cs, Def, St0) ->
%% Extract clauses for different constructors (types).
%%ok = io:format("match_con ~p~n", [Cs]),
- Ttcs0 = select_types([k_binary], Cs) ++ select_bin_con(Cs) ++
- select_types([k_cons,k_tuple,k_map,k_atom,k_float,
- k_int,k_nil], Cs),
- Ttcs = opt_single_valued(Ttcs0),
+ Ttcs0 = select_types(Cs, [], [], [], [], [], [], [], [], []),
+ Ttcs1 = [{T, Types} || {T, [_ | _] = Types} <- Ttcs0],
+ Ttcs = opt_single_valued(Ttcs1),
%%ok = io:format("ttcs = ~p~n", [Ttcs]),
{Scs,St1} =
mapfoldl(fun ({T,Tcs}, St) ->
@@ -1616,8 +1606,41 @@ match_con_1([U|_Us] = L, Cs, Def, St0) ->
St0, Ttcs),
{build_alt_1st_no_fail(build_select(U, Scs), Def),St1}.
-select_types(Types, Cs) ->
- [{T,Tcs} || T <- Types, begin Tcs = select(T, Cs), Tcs =/= [] end].
+select_types([NoExpC | Cs], Bin, BinCon, Cons, Tuple, Map, Atom, Float, Int, Nil) ->
+ C = expand_pat_lit_clause(NoExpC),
+ case clause_con(C) of
+ k_binary ->
+ select_types(Cs, [C |Bin], BinCon, Cons, Tuple, Map, Atom, Float, Int, Nil);
+ k_bin_seg ->
+ select_types(Cs, Bin, [C | BinCon], Cons, Tuple, Map, Atom, Float, Int, Nil);
+ k_bin_end ->
+ select_types(Cs, Bin, [C | BinCon], Cons, Tuple, Map, Atom, Float, Int, Nil);
+ k_cons ->
+ select_types(Cs, Bin, BinCon, [C | Cons], Tuple, Map, Atom, Float, Int, Nil);
+ k_tuple ->
+ select_types(Cs, Bin, BinCon, Cons, [C | Tuple], Map, Atom, Float, Int, Nil);
+ k_map ->
+ select_types(Cs, Bin, BinCon, Cons, Tuple, [C | Map], Atom, Float, Int, Nil);
+ k_atom ->
+ select_types(Cs, Bin, BinCon, Cons, Tuple, Map, [C | Atom], Float, Int, Nil);
+ k_float ->
+ select_types(Cs, Bin, BinCon, Cons, Tuple, Map, Atom, [C | Float], Int, Nil);
+ k_int ->
+ select_types(Cs, Bin, BinCon, Cons, Tuple, Map, Atom, Float, [C | Int], Nil);
+ k_nil ->
+ select_types(Cs, Bin, BinCon, Cons, Tuple, Map, Atom, Float, Int, [C | Nil])
+ end;
+select_types([], Bin, BinCon, Cons, Tuple, Map, Atom, Float, Int, Nil) ->
+ [{k_binary, reverse(Bin)}] ++ handle_bin_con(reverse(BinCon)) ++
+ [
+ {k_cons, reverse(Cons)},
+ {k_tuple, reverse(Tuple)},
+ {k_map, reverse(Map)},
+ {k_atom, reverse(Atom)},
+ {k_float, reverse(Float)},
+ {k_int, reverse(Int)},
+ {k_nil, reverse(Nil)}
+ ].
expand_pat_lit_clause(#iclause{pats=[#ialias{pat=#k_literal{anno=A,val=Val}}=Alias|Ps]}=C) ->
P = expand_pat_lit(Val, A),
@@ -1746,20 +1769,12 @@ combine_bin_segs(#k_bin_end{}) ->
combine_bin_segs(_) ->
throw(not_possible).
-%% select_bin_con([Clause]) -> [{Type,[Clause]}].
-%% Extract clauses for the k_bin_seg constructor. As k_bin_seg
+%% handle_bin_con([Clause]) -> [{Type,[Clause]}].
+%% Handle clauses for the k_bin_seg constructor. As k_bin_seg
%% matching can overlap, the k_bin_seg constructors cannot be
%% reordered, only grouped.
-select_bin_con(Cs0) ->
- Cs1 = lists:filter(fun (C) ->
- Con = clause_con(C),
- (Con =:= k_bin_seg) or (Con =:= k_bin_end)
- end, Cs0),
- select_bin_con_1(Cs1).
-
-
-select_bin_con_1(Cs) ->
+handle_bin_con(Cs) ->
try
%% The usual way to match literals is to first extract the
%% value to a register, and then compare the register to the
@@ -1798,14 +1813,14 @@ select_bin_con_1(Cs) ->
end
catch
throw:not_possible ->
- select_bin_con_2(Cs)
+ handle_bin_con_not_possible(Cs)
end.
-select_bin_con_2([C1|Cs]) ->
+handle_bin_con_not_possible([C1|Cs]) ->
Con = clause_con(C1),
{More,Rest} = splitwith(fun (C) -> clause_con(C) =:= Con end, Cs),
- [{Con,[C1|More]}|select_bin_con_2(Rest)];
-select_bin_con_2([]) -> [].
+ [{Con,[C1|More]}|handle_bin_con_not_possible(Rest)];
+handle_bin_con_not_possible([]) -> [].
%% select_bin_int([Clause]) -> {k_bin_int,[Clause]}
%% If the first pattern in each clause selects the same integer,
@@ -1905,10 +1920,6 @@ select_utf8(Val0) ->
throw(not_possible)
end.
-%% select(Con, [Clause]) -> [Clause].
-
-select(T, Cs) -> [ C || C <- Cs, clause_con(C) =:= T ].
-
%% match_value([Var], Con, [Clause], Default, State) -> {SelectExpr,State}.
%% At this point all the clauses have the same constructor, we must
%% now separate them according to value.
@@ -2043,6 +2054,10 @@ get_match(#k_cons{}, St0) ->
get_match(#k_binary{}, St0) ->
{[V]=Mes,St1} = new_vars(1, St0),
{#k_binary{segs=V},Mes,St1};
+get_match(#k_bin_seg{size=#k_atom{val=all},next={k_bin_end,[]}}=Seg, St0) ->
+ {[S,N0],St1} = new_vars(2, St0),
+ N = set_kanno(N0, [no_usage]),
+ {Seg#k_bin_seg{seg=S,next=N},[S],St1};
get_match(#k_bin_seg{}=Seg, St0) ->
{[S,N0],St1} = new_vars(2, St0),
N = set_kanno(N0, [no_usage]),
@@ -2070,6 +2085,9 @@ new_clauses(Cs0, U, St) ->
#k_cons{hd=H,tl=T} -> [H,T|As];
#k_tuple{es=Es} -> Es ++ As;
#k_binary{segs=E} -> [E|As];
+ #k_bin_seg{size=#k_atom{val=all},
+ seg=S,next={k_bin_end,[]}} ->
+ [S|As];
#k_bin_seg{seg=S,next=N} ->
[S,N|As];
#k_bin_int{next=N} ->
@@ -2337,8 +2355,7 @@ uexpr(#k_bif{anno=A,op=Op,args=As}=Bif, {break,Rs}, St0) ->
{Brs,St1} = bif_returns(Op, Rs, St0),
{Bif#k_bif{anno=#k{us=Used,ns=lit_list_vars(Brs),a=A},ret=Brs},
Used,St1};
-uexpr(#k_match{anno=A,vars=Vs0,body=B0}, Br, St0) ->
- Vs = handle_reuse_annos(Vs0, St0),
+uexpr(#k_match{anno=A,vars=Vs,body=B0}, Br, St0) ->
Rs = break_rets(Br),
{B1,Bu,St1} = umatch(B0, Br, St0),
case is_in_guard(St1) of
@@ -2441,33 +2458,6 @@ make_fdef(Anno, Name, Arity, Vs, Body) ->
vars=Vs,body=Body,ret=[]},
#k_fdef{anno=Anno,func=Name,arity=Arity,vars=Vs,body=Match}.
-
-%% handle_reuse_annos([#k_var{}], State) -> State.
-%% In general, it is only safe to reuse a variable for a match context
-%% if the original value of the variable will no longer be needed.
-%%
-%% If a variable has been bound in an outer letrec and is therefore
-%% free in the current function, the variable may still be used.
-%% We don't bother to check whether the variable is actually used,
-%% but simply clears the 'reuse_for_context' annotation for any variable
-%% that is free.
-handle_reuse_annos(Vs, St) ->
- [handle_reuse_anno(V, St) || V <- Vs].
-
-handle_reuse_anno(#k_var{anno=A}=V, St) ->
- case member(reuse_for_context, A) of
- false -> V;
- true -> handle_reuse_anno_1(V, St)
- end.
-
-handle_reuse_anno_1(#k_var{anno=Anno,name=Vname}=V, #kern{ff={F,A}}=St) ->
- FreeVs = get_free(F, A, St),
- case keymember(Vname, #k_var.name, FreeVs) of
- true -> V#k_var{anno=Anno--[reuse_for_context]};
- false -> V
- end;
-handle_reuse_anno_1(V, _St) -> V.
-
%% get_free(Name, Arity, State) -> [Free].
%% store_free(Name, Arity, [Free], State) -> State.
@@ -2511,8 +2501,7 @@ umatch(#k_alt{anno=A,first=F0,then=T0}, Br, St0) ->
Used = union(Fu, Tu),
{#k_alt{anno=#k{us=Used,ns=[],a=A},first=F1,then=T1},
Used,St2};
-umatch(#k_select{anno=A,var=V0,types=Ts0}, Br, St0) ->
- V = handle_reuse_anno(V0, St0),
+umatch(#k_select{anno=A,var=V,types=Ts0}, Br, St0) ->
{Ts1,Tus,St1} = umatch_list(Ts0, Br, St0),
Used = case member(no_usage, get_kanno(V)) of
true -> Tus;
diff --git a/lib/compiler/test/Makefile b/lib/compiler/test/Makefile
index 2a5004aa4c..db8eb7e2e1 100644
--- a/lib/compiler/test/Makefile
+++ b/lib/compiler/test/Makefile
@@ -97,16 +97,30 @@ INLINE= \
receive \
record
+R21= \
+ bs_construct \
+ bs_match
+
CORE_MODULES = \
lfe_andor_SUITE \
lfe_guard_SUITE
+NO_MOD_OPT = $(NO_OPT)
+
+NO_SSA_OPT = $(NO_OPT)
+
NO_OPT_MODULES= $(NO_OPT:%=%_no_opt_SUITE)
NO_OPT_ERL_FILES= $(NO_OPT_MODULES:%=%.erl)
POST_OPT_MODULES= $(NO_OPT:%=%_post_opt_SUITE)
POST_OPT_ERL_FILES= $(POST_OPT_MODULES:%=%.erl)
INLINE_MODULES= $(INLINE:%=%_inline_SUITE)
INLINE_ERL_FILES= $(INLINE_MODULES:%=%.erl)
+R21_MODULES= $(R21:%=%_r21_SUITE)
+R21_ERL_FILES= $(R21_MODULES:%=%.erl)
+NO_MOD_OPT_MODULES= $(NO_MOD_OPT:%=%_no_module_opt_SUITE)
+NO_MOD_OPT_ERL_FILES= $(NO_MOD_OPT_MODULES:%=%.erl)
+NO_SSA_OPT_MODULES= $(NO_SSA_OPT:%=%_no_ssa_opt_SUITE)
+NO_SSA_OPT_ERL_FILES= $(NO_SSA_OPT_MODULES:%=%.erl)
ERL_FILES= $(MODULES:%=%.erl)
CORE_FILES= $(CORE_MODULES:%=%.core)
@@ -135,16 +149,24 @@ EBIN = .
# Targets
# ----------------------------------------------------
-make_emakefile: $(NO_OPT_ERL_FILES) $(POST_OPT_ERL_FILES) $(INLINE_ERL_FILES)
+make_emakefile: $(NO_OPT_ERL_FILES) $(POST_OPT_ERL_FILES) $(NO_SSA_OPT_ERL_FILES) \
+ $(INLINE_ERL_FILES) $(R21_ERL_FILES) $(NO_MOD_OPT_ERL_FILES)
$(ERL_TOP)/make/make_emakefile $(ERL_COMPILE_FLAGS) -o$(EBIN) $(MODULES) \
> $(EMAKEFILE)
$(ERL_TOP)/make/make_emakefile +no_copt +no_postopt \
+no_ssa_opt +no_recv_opt $(ERL_COMPILE_FLAGS) \
-o$(EBIN) $(NO_OPT_MODULES) >> $(EMAKEFILE)
+ $(ERL_TOP)/make/make_emakefile +no_share_opt +no_bsm_opt +no_fun_opt \
+ +no_ssa_opt +no_recv_opt $(ERL_COMPILE_FLAGS) \
+ -o$(EBIN) $(NO_SSA_OPT_MODULES) >> $(EMAKEFILE)
$(ERL_TOP)/make/make_emakefile +no_copt $(ERL_COMPILE_FLAGS) \
-o$(EBIN) $(POST_OPT_MODULES) >> $(EMAKEFILE)
$(ERL_TOP)/make/make_emakefile +inline $(ERL_COMPILE_FLAGS) \
-o$(EBIN) $(INLINE_MODULES) >> $(EMAKEFILE)
+ $(ERL_TOP)/make/make_emakefile +r21 $(ERL_COMPILE_FLAGS) \
+ -o$(EBIN) $(R21_MODULES) >> $(EMAKEFILE)
+ $(ERL_TOP)/make/make_emakefile +no_module_opt $(ERL_COMPILE_FLAGS) \
+ -o$(EBIN) $(NO_MOD_OPT_MODULES) >> $(EMAKEFILE)
$(ERL_TOP)/make/make_emakefile +from_core $(ERL_COMPILE_FLAGS) \
-o$(EBIN) $(CORE_MODULES) >> $(EMAKEFILE)
@@ -165,12 +187,21 @@ docs:
%_no_opt_SUITE.erl: %_SUITE.erl
sed -e 's;-module($(basename $<));-module($(basename $@));' $< > $@
+%_no_ssa_opt_SUITE.erl: %_SUITE.erl
+ sed -e 's;-module($(basename $<));-module($(basename $@));' $< > $@
+
%_post_opt_SUITE.erl: %_SUITE.erl
sed -e 's;-module($(basename $<));-module($(basename $@));' $< > $@
%_inline_SUITE.erl: %_SUITE.erl
sed -e 's;-module($(basename $<));-module($(basename $@));' $< > $@
+%_r21_SUITE.erl: %_SUITE.erl
+ sed -e 's;-module($(basename $<));-module($(basename $@));' $< > $@
+
+%_no_module_opt_SUITE.erl: %_SUITE.erl
+ sed -e 's;-module($(basename $<));-module($(basename $@));' $< > $@
+
# ----------------------------------------------------
# Release Target
# ----------------------------------------------------
@@ -183,7 +214,9 @@ release_tests_spec: make_emakefile
$(INSTALL_DATA) compiler.spec compiler.cover \
$(EMAKEFILE) $(ERL_FILES) "$(RELSYSDIR)"
$(INSTALL_DATA) $(NO_OPT_ERL_FILES) $(POST_OPT_ERL_FILES) \
- $(INLINE_ERL_FILES) "$(RELSYSDIR)"
+ $(INLINE_ERL_FILES) $(R21_ERL_FILES) \
+ $(NO_MOD_OPT_ERL_FILES) \
+ $(NO_SSA_OPT_ERL_FILES) "$(RELSYSDIR)"
$(INSTALL_DATA) $(CORE_FILES) "$(RELSYSDIR)"
for file in $(ERL_DUMMY_FILES); do \
module=`basename $$file .erl`; \
diff --git a/lib/compiler/test/andor_SUITE.erl b/lib/compiler/test/andor_SUITE.erl
index 721f77f0f6..5c463063c1 100644
--- a/lib/compiler/test/andor_SUITE.erl
+++ b/lib/compiler/test/andor_SUITE.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 2001-2016. All Rights Reserved.
+%% Copyright Ericsson AB 2001-2018. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
diff --git a/lib/compiler/test/apply_SUITE.erl b/lib/compiler/test/apply_SUITE.erl
index be49cff9b9..2ee518b1a0 100644
--- a/lib/compiler/test/apply_SUITE.erl
+++ b/lib/compiler/test/apply_SUITE.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 2005-2016. All Rights Reserved.
+%% Copyright Ericsson AB 2005-2018. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
@@ -73,6 +73,7 @@ mfa(Config) when is_list(Config) ->
{'EXIT',_} = (catch ?APPLY2(Mod, (id(bazzzzzz)), a, b)),
{'EXIT',_} = (catch ?APPLY2({}, baz, a, b)),
{'EXIT',_} = (catch ?APPLY2(?MODULE, [], a, b)),
+ {'EXIT',_} = (catch bad_literal_call(1)),
ok = apply(Mod, foo, id([])),
{[a,b|c]} = apply(Mod, bar, id([[a,b|c]])),
@@ -92,6 +93,13 @@ mfa(Config) when is_list(Config) ->
apply(Mod, foo, []).
+%% The single call to this function with a literal argument caused type
+%% optimization to swap out the 'mod' field of a #b_remote{}, which was
+%% mishandled during code generation as it assumed that the module would always
+%% be an atom.
+bad_literal_call(I) ->
+ I:foo().
+
foo() ->
ok.
diff --git a/lib/compiler/test/beam_except_SUITE.erl b/lib/compiler/test/beam_except_SUITE.erl
index 1eb07c8c85..9380fe06c8 100644
--- a/lib/compiler/test/beam_except_SUITE.erl
+++ b/lib/compiler/test/beam_except_SUITE.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 2011-2016. All Rights Reserved.
+%% Copyright Ericsson AB 2011-2018. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
@@ -21,7 +21,7 @@
-export([all/0, suite/0,groups/0,init_per_suite/1, end_per_suite/1,
init_per_group/2,end_per_group/2,
- multiple_allocs/1,coverage/1]).
+ multiple_allocs/1,bs_get_tail/1,coverage/1]).
suite() -> [{ct_hooks,[ts_install_cth]}].
@@ -31,6 +31,7 @@ all() ->
groups() ->
[{p,[parallel],
[multiple_allocs,
+ bs_get_tail,
coverage]}].
init_per_suite(Config) ->
@@ -63,6 +64,17 @@ place(lee) ->
conditions() ->
(talking = going) = storage + [large = wanted].
+bs_get_tail(Config) ->
+ {<<"abc">>,0,0,Config} = bs_get_tail_1(id(<<0:32, "abc">>), 0, 0, Config),
+ {'EXIT',
+ {function_clause,
+ [{?MODULE,bs_get_tail_1,[<<>>,0,0,Config],_}|_]}} =
+ (catch bs_get_tail_1(id(<<>>), 0, 0, Config)),
+ ok.
+
+bs_get_tail_1(<<_:32, Rest/binary>>, Z1, Z2, F1) ->
+ {Rest,Z1,Z2,F1}.
+
coverage(_) ->
File = {file,"fake.erl"},
ok = fc(a),
@@ -83,8 +95,24 @@ coverage(_) ->
(catch bar(x)),
{'EXIT',{{case_clause,{1}},[{?MODULE,bar,1,[File,{line,9}]}|_]}} =
(catch bar(0)),
+
+ Self = self(),
+ {'EXIT',{{strange,Self},[{?MODULE,foo,[any],[File,{line,14}]}|_]}} =
+ (catch foo(any)),
+
+ {ok,succeed,1,2} = foobar(succeed, 1, 2),
+ {'EXIT',{function_clause,[{?MODULE,foobar,[[fail],1,2],
+ [{file,"fake.erl"},{line,16}]}|_]}} =
+ (catch foobar([fail], 1, 2)),
+ {'EXIT',{function_clause,[{?MODULE,fake_function_clause,[{a,b},42.0],_}|_]}} =
+ (catch fake_function_clause({a,b})),
+
ok.
+fake_function_clause(A) -> error(function_clause, [A,42.0]).
+
+id(I) -> I.
+
-file("fake.erl", 1).
fc(a) -> %Line 2
ok; %Line 3
@@ -96,3 +124,9 @@ bar(X) -> %Line 8
case {X+1} of %Line 9
1 -> ok %Line 10
end. %Line 11
+%% Cover collection code for function_clause exceptions.
+foo(A) -> %Line 13
+ error({strange,self()}, [A]). %Line 14
+%% Cover beam_except:tag_literal/1.
+foobar(A, B, C) when is_atom(A) -> %Line 16
+ {ok,A,B,C}. %Line 17
diff --git a/lib/compiler/test/beam_jump_SUITE.erl b/lib/compiler/test/beam_jump_SUITE.erl
index 488c30919b..a456f31d79 100644
--- a/lib/compiler/test/beam_jump_SUITE.erl
+++ b/lib/compiler/test/beam_jump_SUITE.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 2016. All Rights Reserved.
+%% Copyright Ericsson AB 2016-2018. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
@@ -21,7 +21,9 @@
-export([all/0,suite/0,groups/0,init_per_suite/1,end_per_suite/1,
init_per_group/2,end_per_group/2,
- undefined_label/1,ambiguous_catch_try_state/1]).
+ undefined_label/1,ambiguous_catch_try_state/1,
+ unsafe_move_elimination/1,build_tuple/1,
+ coverage/1]).
suite() ->
[{ct_hooks,[ts_install_cth]}].
@@ -32,7 +34,10 @@ all() ->
groups() ->
[{p,[parallel],
[undefined_label,
- ambiguous_catch_try_state
+ ambiguous_catch_try_state,
+ unsafe_move_elimination,
+ build_tuple,
+ coverage
]}].
init_per_suite(Config) ->
@@ -72,3 +77,134 @@ river() -> song.
checks(Wanted) ->
%% Must be one line to cause the unsafe optimization.
{catch case river() of sheet -> begin +Wanted, if "da" -> Wanted end end end, catch case river() of sheet -> begin + Wanted, if "da" -> Wanted end end end}.
+
+unsafe_move_elimination(_Config) ->
+ {{left,right,false},false} = unsafe_move_elimination_1(left, right, false),
+ {{false,right,false},false} = unsafe_move_elimination_1(false, right, true),
+ {{true,right,right},right} = unsafe_move_elimination_1(true, right, true),
+ [ok = unsafe_move_elimination_2(I) || I <- lists:seq(0,16)],
+ ok.
+
+unsafe_move_elimination_1(Left, Right, Simple0) ->
+ id(1),
+
+ %% The move at label 29 would be removed by beam_jump, which is unsafe because
+ %% the two select_val instructions have different source registers.
+ %%
+ %% {select_val,{y,0},{f,25},{list,[{atom,true},{f,27},{atom,false},{f,29}]}}.
+ %% ^^^^^ ^^^^^^^^^^^^^^^^^^^
+ %% {label,27}.
+ %% {kill,{y,0}}.
+ %% {move,{y,2},{x,0}}.
+ %% {line,...}.
+ %% {call,1,{f,31}}.
+ %% {select_val,{x,0},{f,33},{list,[{atom,true},{f,35},{atom,false},{f,29}]}}.
+ %% ^^^^^ ^^^^^^^^^^^^^^^^^^^
+ %% {label,29}.
+ %% {move,{atom,false},{y,0}}. <=== REMOVED (unsafely).
+ %% {jump,{f,37}}.
+
+ Simple = case case Simple0 of
+ false -> false;
+ true -> id(Left)
+ end
+ of
+ false ->
+ false;
+ true ->
+ id(Right)
+ end,
+ {id({Left,Right,Simple}),Simple}.
+
+unsafe_move_elimination_2(Int) ->
+ %% The type optimization pass would recognize that TagInt can only be
+ %% [0 .. 7], so the first 'case' would select_val over [0 .. 6] and swap
+ %% out the fail label with the block for 7.
+ %%
+ %% A later optimization would merge this block with 'expects_h' in the
+ %% second case, as the latter is only reachable from the former.
+ %%
+ %% ... but this broke down when the move elimination optimization didn't
+ %% take the fail label of the first select_val into account. This caused it
+ %% to believe that the only way to reach 'expects_h' was through the second
+ %% case when 'Tag' =:= 'h', which made it remove the move instruction
+ %% added in the first case, passing garbage to expects_h/2.
+ TagInt = Int band 2#111,
+ Tag = case TagInt of
+ 0 -> a;
+ 1 -> b;
+ 2 -> c;
+ 3 -> d;
+ 4 -> e;
+ 5 -> f;
+ 6 -> g;
+ 7 -> h
+ end,
+ case Tag of
+ g -> expects_g(TagInt, Tag);
+ h -> expects_h(TagInt, Tag);
+ _ -> Tag = id(Tag), ok
+ end.
+
+expects_g(6, Atom) ->
+ Atom = id(g),
+ ok.
+
+expects_h(7, Atom) ->
+ Atom = id(h),
+ ok.
+
+-record(message2, {id, p1}).
+-record(message3, {id, p1, p2}).
+
+build_tuple(_Config) ->
+ {'EXIT',{{badrecord,message3},_}} = (catch do_build_tuple(#message2{})),
+ ok.
+
+do_build_tuple(Message) ->
+ if is_record(Message, message2) ->
+ Res = {res, rand:uniform(100)},
+ {Message#message3.id, Res}
+ end.
+
+coverage(_Config) ->
+ ok = coverage_1(ok),
+ {error,badarg} = coverage_1({error,badarg}),
+
+ gt = coverage_2(100, 42),
+ le = coverage_2(100, 999),
+ le = coverage_2([], []),
+ gt = coverage_2([], xxx),
+
+ ok.
+
+coverage_1(Var) ->
+ case id(Var) of
+ ok -> ok;
+ Error -> Error
+ end.
+
+%% Cover beam_jump:invert_test(is_ne_exact).
+coverage_2(Pre1, Pre2) ->
+ case
+ case Pre1 == [] of
+ false ->
+ false;
+ true ->
+ Pre2 /= []
+ end
+ of
+ true ->
+ gt;
+ false ->
+ case Pre1 > Pre2 of
+ true ->
+ gt;
+ false ->
+ le
+ end
+ end.
+
+
+id(I) ->
+ I.
diff --git a/lib/compiler/test/beam_reorder_SUITE.erl b/lib/compiler/test/beam_reorder_SUITE.erl
index 33b27b9f9f..c8a4f9a75f 100644
--- a/lib/compiler/test/beam_reorder_SUITE.erl
+++ b/lib/compiler/test/beam_reorder_SUITE.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 2015-2016. All Rights Reserved.
+%% Copyright Ericsson AB 2015-2018. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
diff --git a/lib/compiler/test/beam_ssa_SUITE.erl b/lib/compiler/test/beam_ssa_SUITE.erl
index 0356c99c5a..15cf9bcbf3 100644
--- a/lib/compiler/test/beam_ssa_SUITE.erl
+++ b/lib/compiler/test/beam_ssa_SUITE.erl
@@ -21,7 +21,8 @@
-export([all/0,suite/0,groups/0,init_per_suite/1,end_per_suite/1,
init_per_group/2,end_per_group/2,
- calls/1,tuple_matching/1,recv/1,maps/1]).
+ calls/1,tuple_matching/1,recv/1,maps/1,
+ cover_ssa_dead/1,combine_sw/1,share_opt/1]).
suite() -> [{ct_hooks,[ts_install_cth]}].
@@ -33,7 +34,10 @@ groups() ->
[tuple_matching,
calls,
recv,
- maps
+ maps,
+ cover_ssa_dead,
+ combine_sw,
+ share_opt
]}].
init_per_suite(Config) ->
@@ -88,7 +92,13 @@ start_it([_|_]=MFA) ->
end.
tuple_matching(_Config) ->
- do_tuple_matching({tag,42}).
+ do_tuple_matching({tag,42}),
+
+ true = is_two_tuple({a,b}),
+ false = is_two_tuple({a,b,c}),
+ false = is_two_tuple(atom),
+
+ ok.
do_tuple_matching(Arg) ->
Res = do_tuple_matching_1(Arg),
@@ -114,6 +124,12 @@ do_tuple_matching_3(Tuple) when is_tuple(Tuple) ->
{ok,element(2, Tuple)}
end.
+is_two_tuple(Arg) ->
+ case is_tuple(Arg) of
+ false -> false;
+ true -> tuple_size(Arg) == 2
+ end.
+
-record(reporter_state, {res,run_config}).
-record(run_config, {report_interval=0}).
@@ -286,5 +302,196 @@ maps_1(K) ->
#{K:=V} = #{},
V.
+-record(wx_ref, {type=any_type,ref=any_ref}).
+
+cover_ssa_dead(_Config) ->
+ str = format_str(str, escapable, [], true),
+ [iolist,str] = format_str(str, escapable, iolist, true),
+ bad = format_str(str, not_escapable, [], true),
+ bad = format_str(str, not_escapable, iolist, true),
+ bad = format_str(str, escapable, [], false),
+ bad = format_str(str, escapable, [], bad),
+
+ DefWxRef = #wx_ref{},
+ {DefWxRef,77,9999,[]} = contains(#wx_ref{}, 77, 9999),
+ {DefWxRef,77.0,9999,[]} = contains(#wx_ref{}, 77.0, 9999),
+ {DefWxRef,77,9999.0,[]} = contains(#wx_ref{}, 77, 9999.0),
+ {DefWxRef,77.0,9999.0,[]} = contains(#wx_ref{}, 77.0, 9999.0),
+ {any_type,any_ref,42,43,[option]} = contains(#wx_ref{}, {42,43}, [option]),
+ {any_type,any_ref,42,43,[]} = contains(#wx_ref{}, {42,43}, []),
+ {any_type,any_ref,42.0,43,[]} = contains(#wx_ref{}, {42.0,43}, []),
+ {any_type,any_ref,42,43.0,[]} = contains(#wx_ref{}, {42,43.0}, []),
+ {any_type,any_ref,42.0,43.0,[]} = contains(#wx_ref{}, {42.0,43.0}, []),
+
+ nope = conv_alub(false, '=:='),
+ ok = conv_alub(true, '=:='),
+ ok = conv_alub(true, none),
+ error = conv_alub(false, none),
+
+ {false,false} = eval_alu(false, false, false),
+ {true,false} = eval_alu(false, false, true),
+ {false,true} = eval_alu(false, true, false),
+ {false,false} = eval_alu(false, true, true),
+ {false,true} = eval_alu(true, false, false),
+ {false,false} = eval_alu(true, false, true),
+ {true,true} = eval_alu(true, true, false),
+ {false,true} = eval_alu(true, true, true),
+
+ 100.0 = percentage(1.0, 0.0),
+ 100.0 = percentage(1, 0),
+ 0.0 = percentage(0, 0),
+ 0.0 = percentage(0.0, 0.0),
+ 40.0 = percentage(4.0, 10.0),
+ 60.0 = percentage(6, 10),
+
+ %% Cover '=:=', followed by '=/='.
+ false = 'cover__=:=__=/='(41),
+ true = 'cover__=:=__=/='(42),
+ false = 'cover__=:=__=/='(43),
+
+ %% Cover '<', followed by '=/='.
+ true = 'cover__<__=/='(41),
+ false = 'cover__<__=/='(42),
+ false = 'cover__<__=/='(43),
+
+ %% Cover '=<', followed by '=/='.
+ true = 'cover__=<__=/='(41),
+ true = 'cover__=<__=/='(42),
+ false = 'cover__=<__=/='(43),
+
+ %% Cover '>=', followed by '=/='.
+ false = 'cover__>=__=/='(41),
+ true = 'cover__>=__=/='(42),
+ true = 'cover__>=__=/='(43),
+
+ %% Cover '>', followed by '=/='.
+ false = 'cover__>__=/='(41),
+ false = 'cover__>__=/='(42),
+ true = 'cover__>__=/='(43),
+
+ ok.
+
+'cover__=:=__=/='(X) when X =:= 42 -> X =/= 43;
+'cover__=:=__=/='(_) -> false.
+
+'cover__<__=/='(X) when X < 42 -> X =/= 42;
+'cover__<__=/='(_) -> false.
+
+'cover__=<__=/='(X) when X =< 42 -> X =/= 43;
+'cover__=<__=/='(_) -> false.
+
+'cover__>=__=/='(X) when X >= 42 -> X =/= 41;
+'cover__>=__=/='(_) -> false.
+
+'cover__>__=/='(X) when X > 42 -> X =/= 42;
+'cover__>__=/='(_) -> false.
+
+format_str(Str, FormatData, IoList, EscChars) ->
+ Escapable = FormatData =:= escapable,
+ case id(Str) of
+ IoStr when Escapable, EscChars, IoList == [] ->
+ id(IoStr);
+ IoStr when Escapable, EscChars ->
+ [IoList,id(IoStr)];
+ _ ->
+ bad
+ end.
+
+contains(This, X, Y) when is_record(This, wx_ref), is_number(X), is_number(Y) ->
+ {This,X,Y,[]};
+contains(#wx_ref{type=ThisT,ref=ThisRef}, {CX,CY}, Options)
+ when is_number(CX), is_number(CY), is_list(Options) ->
+ {ThisT,ThisRef,CX,CY,Options}.
+
+conv_alub(HasDst, CmpOp) ->
+ case (not HasDst) andalso CmpOp =/= none of
+ true -> nope;
+ false ->
+ case HasDst of
+ false -> error;
+ true -> ok
+ end
+ end.
+
+eval_alu(Sign1, Sign2, N) ->
+ V = (Sign1 andalso Sign2 andalso (not N))
+ or ((not Sign1) andalso (not Sign2) andalso N),
+ C = (Sign1 andalso Sign2)
+ or ((not N) andalso (Sign1 orelse Sign2)),
+ {V,C}.
+
+percentage(Divident, Divisor) ->
+ if Divisor == 0 andalso Divident /= 0 ->
+ 100.0;
+ Divisor == 0 ->
+ 0.0;
+ true ->
+ Divident / Divisor * 100
+ end.
+
+combine_sw(_Config) ->
+ [a] = do_comb_sw_1(a),
+ [b,b] = do_comb_sw_1(b),
+ [c] = do_comb_sw_1(c),
+ [c] = do_comb_sw_1(c),
+ [] = do_comb_sw_1(z),
+
+ [a] = do_comb_sw_2(a),
+ [b2,b1] = do_comb_sw_2(b),
+ [c] = do_comb_sw_2(c),
+ [c] = do_comb_sw_2(c),
+ [] = do_comb_sw_2(z),
+
+ ok.
+
+do_comb_sw_1(X) ->
+ put(?MODULE, []),
+ if
+ X == a; X == b ->
+ put(?MODULE, [X|get(?MODULE)]);
+ true ->
+ ok
+ end,
+ if
+ X == b; X == c ->
+ put(?MODULE, [X|get(?MODULE)]);
+ true ->
+ ok
+ end,
+ erase(?MODULE).
+
+do_comb_sw_2(X) ->
+ put(?MODULE, []),
+ case X of
+ a ->
+ put(?MODULE, [a|get(?MODULE)]);
+ b ->
+ put(?MODULE, [b1|get(?MODULE)]);
+ _ ->
+ ok
+ end,
+ case X of
+ b ->
+ put(?MODULE, [b2|get(?MODULE)]);
+ c ->
+ put(?MODULE, [c|get(?MODULE)]);
+ _ ->
+ ok
+ end,
+ erase(?MODULE).
+
+share_opt(_Config) ->
+ ok = do_share_opt(0).
+
+do_share_opt(A) ->
+ %% The compiler would be stuck in an infinite loop in beam_ssa_share.
+ case A of
+ 0 -> a;
+ 1 -> b;
+ 2 -> c
+ end,
+ receive after 1 -> ok end.
+
+
%% The identity function.
id(I) -> I.
diff --git a/lib/compiler/test/beam_type_SUITE.erl b/lib/compiler/test/beam_type_SUITE.erl
index caf43dad40..a7ffc3f60a 100644
--- a/lib/compiler/test/beam_type_SUITE.erl
+++ b/lib/compiler/test/beam_type_SUITE.erl
@@ -21,8 +21,8 @@
-export([all/0,suite/0,groups/0,init_per_suite/1,end_per_suite/1,
init_per_group/2,end_per_group/2,
- integers/1,coverage/1,booleans/1,setelement/1,cons/1,
- tuple/1,record_float/1,binary_float/1,float_compare/1,
+ integers/1,numbers/1,coverage/1,booleans/1,setelement/1,
+ cons/1,tuple/1,record_float/1,binary_float/1,float_compare/1,
arity_checks/1,elixir_binaries/1,find_best/1,
test_size/1]).
@@ -34,6 +34,7 @@ all() ->
groups() ->
[{p,[parallel],
[integers,
+ numbers,
coverage,
booleans,
setelement,
@@ -115,8 +116,8 @@ do_integers_4(_, _, Res) ->
Res.
do_integers_5(X0, Y0) ->
- %% X and Y will use the same register.
- X = X0 band 1,
+ %% _X and Y will use the same register.
+ _X = X0 band 1,
Y = Y0 band 3,
case Y of
0 -> zero;
@@ -125,6 +126,59 @@ do_integers_5(X0, Y0) ->
3 -> three
end.
+numbers(_Config) ->
+ Int = id(42),
+ true = is_integer(Int),
+ true = is_number(Int),
+ false = is_float(Int),
+
+ Float = id(42.0),
+ true = is_float(Float),
+ true = is_number(Float),
+ false = is_integer(Float),
+
+ Number = id(1) + id(2),
+ true = is_number(Number),
+ true = is_integer(Number),
+ false = is_float(Number),
+
+ AnotherNumber = id(99.0) + id(1),
+ true = is_float(AnotherNumber),
+ true = is_number(AnotherNumber),
+ false = is_integer(AnotherNumber),
+
+ NotNumber = id(atom),
+ true = is_atom(NotNumber),
+ false = is_number(NotNumber),
+ false = is_integer(NotNumber),
+ false = is_float(NotNumber),
+
+ true = is_number(Int),
+ true = is_number(Float),
+ true = is_number(Number),
+ true = is_number(AnotherNumber),
+
+ %% Cover beam_ssa_type:join/2.
+
+ Join1 = case id(a) of
+ a -> 3 + id(7); %Number.
+ b -> id(5) / id(2) %Float.
+ end,
+ true = is_integer(Join1),
+
+ Join2 = case id(a) of
+ a -> id(5) / 2; %Float.
+ b -> 3 + id(7) %Number.
+ end,
+ true = is_float(Join2),
+
+ %% Cover beam_ssa_type:meet/2.
+
+ Meet1 = id(0) + -10.0, %Float.
+ 10.0 = abs(Meet1), %Number.
+
+ ok.
+
coverage(Config) ->
{'EXIT',{badarith,_}} = (catch id(1) bsl 0.5),
{'EXIT',{badarith,_}} = (catch id(2.0) bsl 2),
@@ -159,18 +213,59 @@ coverage(Config) ->
[_|_] ->
ok
end,
+
+ %% Cover beam_type:verified_type(none).
+ {'EXIT',{badarith,_}} = (catch (id(2) / id(1)) band 16#ff),
+
ok.
booleans(_Config) ->
- {'EXIT',{{case_clause,_},_}} = (catch do_booleans(42)),
+ {'EXIT',{{case_clause,_},_}} = (catch do_booleans_1(42)),
+
+ ok = do_booleans_2(42, 41),
+ error = do_booleans_2(42, 42),
+
+ AnyAtom = id(atom),
+ true = is_atom(AnyAtom),
+ false = is_boolean(AnyAtom),
+
+ MaybeBool = id(maybe),
+ case MaybeBool of
+ true -> ok;
+ maybe -> ok;
+ false -> ok
+ end,
+ false = is_boolean(MaybeBool),
+
+ NotBool = id(a),
+ case NotBool of
+ a -> ok;
+ b -> ok;
+ c -> ok
+ end,
+ false = is_boolean(NotBool),
+
ok.
-do_booleans(B) ->
+do_booleans_1(B) ->
case is_integer(B) of
yes -> yes;
no -> no
end.
+do_booleans_2(A, B) ->
+ Not = not do_booleans_cmp(A, B),
+ case Not of
+ true ->
+ case Not of
+ true -> error;
+ false -> ok
+ end;
+ false -> ok
+ end.
+
+do_booleans_cmp(A, B) -> A > B.
+
setelement(_Config) ->
T0 = id({a,42}),
{a,_} = T0,
@@ -223,8 +318,15 @@ cons_hdtl(B) ->
id(1),
{id(hd(Cons)),id(tl(Cons))}.
+-record(bird, {a=a,b=id(42)}).
+
tuple(_Config) ->
{'EXIT',{{badmatch,{necessary}},_}} = (catch do_tuple()),
+
+ [] = [X || X <- [], #bird{a = a} == {r,X,foo}],
+ [] = [X || X <- [], #bird{b = b} == {bird,X}],
+ [] = [X || X <- [], 3 == X#bird.a],
+
ok.
do_tuple() ->
@@ -361,7 +463,7 @@ find_best([], <<"a">>) ->
find_best([], nil) ->
{error,<<"should not get here">>}.
-test_size(Config) ->
+test_size(_Config) ->
2 = do_test_size({a,b}),
4 = do_test_size(<<42:32>>),
ok.
diff --git a/lib/compiler/test/beam_utils_SUITE.erl b/lib/compiler/test/beam_utils_SUITE.erl
index ac19305d69..eb0af59f9d 100644
--- a/lib/compiler/test/beam_utils_SUITE.erl
+++ b/lib/compiler/test/beam_utils_SUITE.erl
@@ -26,7 +26,7 @@
select/1,y_catch/1,otp_8949_b/1,liveopt/1,coverage/1,
y_registers/1,user_predef/1,scan_f/1,cafu/1,
receive_label/1,read_size_file_version/1,not_used/1,
- is_used_fr/1]).
+ is_used_fr/1,unsafe_is_function/1]).
-export([id/1]).
suite() -> [{ct_hooks,[ts_install_cth]}].
@@ -53,7 +53,8 @@ groups() ->
cafu,
read_size_file_version,
not_used,
- is_used_fr
+ is_used_fr,
+ unsafe_is_function
]}].
init_per_suite(Config) ->
@@ -196,7 +197,7 @@ do_bs_init_4(Arg1, Arg2) ->
id(Rewrite)
end/binary,
"/shared">>);
- Other ->
+ _Other ->
error
end.
@@ -552,7 +553,7 @@ not_used_p(_C, S, K, L) when is_record(K, k) ->
id(K)
end.
-is_used_fr(Config) ->
+is_used_fr(_Config) ->
1 = is_used_fr(self(), self()),
1 = is_used_fr(self(), other),
receive 1 -> ok end,
@@ -570,6 +571,24 @@ is_used_fr(X, Y) ->
end,
X ! 1.
+%% ERL-778.
+unsafe_is_function(_Config) ->
+ {undefined,any} = unsafe_is_function(undefined, any),
+ {ok,any} = unsafe_is_function(fun() -> ok end, any),
+ {'EXIT',{{case_clause,_},_}} = (catch unsafe_is_function(fun(_) -> ok end, any)),
+ ok.
+
+unsafe_is_function(F, M) ->
+ %% There would be an internal consistency failure:
+ %% Instruction: {bif,is_function,{f,0},[{x,0},{integer,0}],{x,2}}
+ %% Error: {uninitialized_reg,{y,0}}:
+
+ NewValue = case is_function(F, 0) of
+ true -> F();
+ false when F =:= undefined -> undefined
+ end,
+ {NewValue,M}.
+
%% The identity function.
id(I) -> I.
diff --git a/lib/compiler/test/beam_validator_SUITE.erl b/lib/compiler/test/beam_validator_SUITE.erl
index d3e544a9cc..8b39fce479 100644
--- a/lib/compiler/test/beam_validator_SUITE.erl
+++ b/lib/compiler/test/beam_validator_SUITE.erl
@@ -34,7 +34,7 @@
undef_label/1,illegal_instruction/1,failing_gc_guard_bif/1,
map_field_lists/1,cover_bin_opt/1,
val_dsetel/1,bad_tuples/1,bad_try_catch_nesting/1,
- receive_stacked/1]).
+ receive_stacked/1,aliased_types/1,type_conflict/1]).
-include_lib("common_test/include/ct.hrl").
@@ -63,7 +63,7 @@ groups() ->
undef_label,illegal_instruction,failing_gc_guard_bif,
map_field_lists,cover_bin_opt,val_dsetel,
bad_tuples,bad_try_catch_nesting,
- receive_stacked]}].
+ receive_stacked,aliased_types,type_conflict]}].
init_per_suite(Config) ->
test_lib:recompile(?MODULE),
@@ -107,13 +107,12 @@ xrange(Config) when is_list(Config) ->
Errors = do_val(xrange, Config),
[{{t,sum_1,2},
{{bif,'+',{f,0},[{x,-1},{x,1}],{x,0}},4,
- {uninitialized_reg,{x,-1}}}},
+ {bad_register,{x,-1}}}},
{{t,sum_2,2},
- {{bif,'+',{f,0},[{x,0},{x,1023}],{x,0}},4,
- {uninitialized_reg,{x,1023}}}},
+ {{bif,'+',{f,0},[{x,0},{x,1023}],{x,0}},4,limit}},
{{t,sum_3,2},
{{bif,'+',{f,0},[{x,0},{x,1}],{x,-1}},4,
- {invalid_store,{x,-1},number}}},
+ {bad_register,{x,-1}}}},
{{t,sum_4,2},
{{bif,'+',{f,0},[{x,0},{x,1}],{x,1023}},4,limit}}] = Errors,
ok.
@@ -122,15 +121,15 @@ yrange(Config) when is_list(Config) ->
Errors = do_val(yrange, Config),
[{{t,sum_1,2},
{{move,{x,1},{y,-1}},5,
- {invalid_store,{y,-1},term}}},
+ {bad_register,{y,-1}}}},
{{t,sum_2,2},
{{bif,'+',{f,0},[{x,0},{y,1024}],{x,0}},7,
- {uninitialized_reg,{y,1024}}}},
+ limit}},
{{t,sum_3,2},
{{move,{x,1},{y,1024}},5,limit}},
{{t,sum_4,2},
{{move,{x,1},{y,-1}},5,
- {invalid_store,{y,-1},term}}}] = Errors,
+ {bad_register,{y,-1}}}}] = Errors,
ok.
stack(Config) when is_list(Config) ->
@@ -157,9 +156,9 @@ call_last(Config) when is_list(Config) ->
merge_undefined(Config) when is_list(Config) ->
Errors = do_val(merge_undefined, Config),
[{{t,handle_call,2},
- {{call_ext,1,{extfunc,erlang,exit,1}},
- 10,
- {uninitialized_reg,{y,0}}}}] = Errors,
+ {{call_ext,2,{extfunc,debug,filter,2}},
+ 22,
+ {uninitialized_reg,{y,_}}}}] = Errors,
ok.
uninit(Config) when is_list(Config) ->
@@ -178,7 +177,7 @@ unsafe_catch(Config) when is_list(Config) ->
Errors = do_val(unsafe_catch, Config),
[{{t,small,2},
{{bs_put_integer,{f,0},{integer,16},1,
- {field_flags,[unsigned,big]},{y,0}},
+ {field_flags,[unsigned,big]},{y,0}},
20,
{unassigned,{y,0}}}}] = Errors,
ok.
@@ -211,19 +210,19 @@ bad_catch_try(Config) when is_list(Config) ->
Errors = do_val(bad_catch_try, Config),
[{{bad_catch_try,bad_1,1},
{{'catch',{x,0},{f,3}},
- 5,{invalid_store,{x,0},{catchtag,[3]}}}},
+ 5,{invalid_tag_register,{x,0}}}},
{{bad_catch_try,bad_2,1},
{{catch_end,{x,9}},
- 8,{source_not_y_reg,{x,9}}}},
+ 8,{invalid_tag_register,{x,9}}}},
{{bad_catch_try,bad_3,1},
- {{catch_end,{y,1}},9,{bad_type,{atom,kalle}}}},
+ {{catch_end,{y,1}},9,{invalid_tag,{y,1},{atom,kalle}}}},
{{bad_catch_try,bad_4,1},
- {{'try',{x,0},{f,15}},5,{invalid_store,{x,0},{trytag,[15]}}}},
+ {{'try',{x,0},{f,15}},5,{invalid_tag_register,{x,0}}}},
{{bad_catch_try,bad_5,1},
- {{try_case,{y,1}},12,{bad_type,term}}},
+ {{try_case,{y,1}},12,{invalid_tag,{y,1},term}}},
{{bad_catch_try,bad_6,1},
{{move,{integer,1},{y,1}},7,
- {invalid_store,{y,1},{integer,1}}}}] = Errors,
+ {invalid_store,{y,1}}}}] = Errors,
ok.
cons_guard(Config) when is_list(Config) ->
@@ -247,7 +246,7 @@ freg_range(Config) when is_list(Config) ->
{{t,sum_3,2},
{{bif,fadd,{f,0},[{fr,0},{fr,1}],{fr,-1}},
7,
- {bad_target,{fr,-1}}}},
+ {bad_register,{fr,-1}}}},
{{t,sum_4,2},
{{bif,fadd,{f,0},[{fr,0},{fr,1}],{fr,1024}},
7,
@@ -539,37 +538,37 @@ receive_stacked(Config) ->
[{{receive_stacked,f1,0},
{{loop_rec_end,{f,3}},
17,
- {fragile_message_reference,{y,0}}}},
+ {fragile_message_reference,{y,_}}}},
{{receive_stacked,f2,0},
- {{test_heap,3,0},10,{fragile_message_reference,{y,1}}}},
+ {{test_heap,3,0},10,{fragile_message_reference,{y,_}}}},
{{receive_stacked,f3,0},
- {{test_heap,3,0},10,{fragile_message_reference,{y,1}}}},
+ {{test_heap,3,0},10,{fragile_message_reference,{y,_}}}},
{{receive_stacked,f4,0},
- {{test_heap,3,0},10,{fragile_message_reference,{y,1}}}},
+ {{test_heap,3,0},10,{fragile_message_reference,{y,_}}}},
{{receive_stacked,f5,0},
{{loop_rec_end,{f,23}},
23,
- {fragile_message_reference,{y,1}}}},
+ {fragile_message_reference,{y,_}}}},
{{receive_stacked,f6,0},
- {{gc_bif,byte_size,{f,29},0,[{y,0}],{x,0}},
+ {{gc_bif,byte_size,{f,29},0,[{y,_}],{x,0}},
12,
- {fragile_message_reference,{y,0}}}},
+ {fragile_message_reference,{y,_}}}},
{{receive_stacked,f7,0},
{{loop_rec_end,{f,33}},
20,
- {fragile_message_reference,{y,0}}}},
+ {fragile_message_reference,{y,_}}}},
{{receive_stacked,f8,0},
{{loop_rec_end,{f,38}},
20,
- {fragile_message_reference,{y,0}}}},
+ {fragile_message_reference,{y,_}}}},
{{receive_stacked,m1,0},
{{loop_rec_end,{f,43}},
19,
- {fragile_message_reference,{y,0}}}},
+ {fragile_message_reference,{y,_}}}},
{{receive_stacked,m2,0},
{{loop_rec_end,{f,48}},
33,
- {fragile_message_reference,{y,0}}}}] = Errors,
+ {fragile_message_reference,{y,_}}}}] = Errors,
%% Compile the original source code as a smoke test.
Data = proplists:get_value(data_dir, Config),
@@ -579,6 +578,79 @@ receive_stacked(Config) ->
ok.
+aliased_types(Config) ->
+ Seq = lists:seq(1, 5),
+ 1 = aliased_types_1(Seq, Config),
+
+ {1,1} = aliased_types_2(Seq),
+ {42,none} = aliased_types_2([]),
+
+ gurka = aliased_types_3([gurka]),
+ gaffel = aliased_types_3([gaffel]),
+
+ ok.
+
+%% ERL-735: validator failed to track types on aliased registers, rejecting
+%% legitimate optimizations.
+%%
+%% move x0 y0
+%% bif hd L1 x0
+%% get_hd y0 %% The validator failed to see that y0 was a list
+%%
+aliased_types_1(Bug, Config) ->
+ if
+ Config =/= [gurka, gaffel] -> %% Pointless branch.
+ _ = hd(Bug),
+ lists:seq(1, 5),
+ hd(Bug)
+ end.
+
+%% ERL-832: validator failed to realize that a Y register was a cons.
+aliased_types_2(Bug) ->
+ Res = case Bug of
+ [] -> id(42);
+ _ -> hd(Bug)
+ end,
+ {Res,case Bug of
+ [] -> none;
+ _ -> hd(Bug)
+ end}.
+
+%% ERL-832 part deux; validator failed to realize that an aliased register was
+%% a cons.
+aliased_types_3(Bug) ->
+ List = [Y || Y <- Bug],
+ case List of
+ [] -> Bug;
+ _ ->
+ if
+ hd(List) -> a:a();
+ true -> ok
+ end,
+ hd(List)
+ end.
+
+
+%% ERL-867; validation proceeded after a type conflict, causing incorrect types
+%% to be joined.
+
+-record(r, { e1 = e1, e2 = e2 }).
+
+type_conflict(Config) when is_list(Config) ->
+ {e1, e2} = type_conflict_1(#r{}),
+ ok.
+
+type_conflict_1(C) ->
+ Src = id(C#r.e2),
+ TRes = try id(Src) of
+ R -> R
+ catch
+ %% C:R can never match, yet it assumed that the type of 'C' was
+ %% an atom from here on.
+ C:R -> R
+ end,
+ {C#r.e1, TRes}.
+
%%%-------------------------------------------------------------------------
transform_remove(Remove, Module) ->
@@ -637,3 +709,6 @@ night(Turned) ->
ok.
participating(_, _, _, _) -> ok.
+
+id(I) ->
+ I.
diff --git a/lib/compiler/test/beam_validator_SUITE_data/bad_bin_match.S b/lib/compiler/test/beam_validator_SUITE_data/bad_bin_match.S
index a60ca1e89a..c7610971f1 100644
--- a/lib/compiler/test/beam_validator_SUITE_data/bad_bin_match.S
+++ b/lib/compiler/test/beam_validator_SUITE_data/bad_bin_match.S
@@ -11,5 +11,5 @@
{label,1}.
{func_info,{atom,t},{atom,t},1}.
{label,2}.
- {test,bs_start_match2,{f,1},1,[{x,0},0],{x,0}}.
+ {test,bs_start_match3,{f,1},1,[{x,0}],{x,0}}.
return.
diff --git a/lib/compiler/test/beam_validator_SUITE_data/merge_undefined.S b/lib/compiler/test/beam_validator_SUITE_data/merge_undefined.S
index 481d55045d..aa344807e4 100644
--- a/lib/compiler/test/beam_validator_SUITE_data/merge_undefined.S
+++ b/lib/compiler/test/beam_validator_SUITE_data/merge_undefined.S
@@ -15,8 +15,9 @@
{select_val,{x,0},{f,1},{list,[{atom,gurka},{f,3},{atom,delete},{f,4}]}}.
{label,3}.
{allocate_heap,2,6,2}.
- %% The Y registers are not initialized here.
{test,is_eq_exact,{f,5},[{x,0},{atom,ok}]}.
+ %% This is unreachable since {x,0} is known not to be 'ok'. We should not
+ %% fail with "uninitialized y registers" on erlang:exit/1
{move,{atom,nisse},{x,0}}.
{call_ext,1,{extfunc,erlang,exit,1}}.
{label,4}.
@@ -29,6 +30,7 @@
{call_ext,2,{extfunc,io,format,2}}.
{test,is_ne_exact,{f,6},[{x,0},{atom,ok}]}.
{label,5}.
+ %% The Y registers are not initialized here.
{move,{atom,logReader},{x,1}}.
{move,{atom,console},{x,0}}.
{call_ext,2,{extfunc,debug,filter,2}}.
diff --git a/lib/compiler/test/beam_validator_SUITE_data/receive_stacked.S b/lib/compiler/test/beam_validator_SUITE_data/receive_stacked.S
index cca052a9c4..a878204d16 100644
--- a/lib/compiler/test/beam_validator_SUITE_data/receive_stacked.S
+++ b/lib/compiler/test/beam_validator_SUITE_data/receive_stacked.S
@@ -172,7 +172,7 @@
{allocate_zero,1,0}.
{label,28}.
{loop_rec,{f,30},{x,0}}.
- {test,bs_start_match2,{f,29},1,[{x,0},0],{x,0}}.
+ {test,bs_start_match3,{f,29},1,[{x,0}],{x,0}}.
{test,bs_get_integer2,
{f,29},
1,
@@ -219,7 +219,7 @@
{allocate_zero,1,0}.
{label,33}.
{loop_rec,{f,35},{x,0}}.
- {test,bs_start_match2,{f,34},1,[{x,0},0],{x,0}}.
+ {test,bs_start_match3,{f,34},1,[{x,0}],{x,0}}.
{test,bs_get_integer2,
{f,34},
1,
@@ -240,7 +240,7 @@
{y,0}}.
{'%',{no_bin_opt,{binary_used_in,{test,is_binary,{f,34},[{y,0}]}},
[63,{file,"receive_stacked.erl"}]}}.
- {test,is_binary,{f,34},[{y,0}]}.
+ {test,is_eq_exact,{f,34},[{y,0},{literal,<<0,1,2,3>>}]}.
remove_message.
{move,{integer,42},{x,0}}.
{line,[{location,"receive_stacked.erl",64}]}.
@@ -262,7 +262,7 @@
{allocate_zero,1,0}.
{label,38}.
{loop_rec,{f,40},{x,0}}.
- {test,bs_start_match2,{f,39},1,[{x,0},0],{x,1}}.
+ {test,bs_start_match3,{f,39},1,[{x,0}],{x,1}}.
{test,bs_get_integer2,
{f,39},
2,
@@ -283,7 +283,7 @@
{y,0}}.
{'%',{no_bin_opt,{[{x,1},{y,0}],{loop_rec_end,{f,38}},not_handled},
[70,{file,"receive_stacked.erl"}]}}.
- {test,is_binary,{f,39},[{x,0}]}.
+ {test,is_eq_exact,{f,39},[{x,0},{literal,<<0,1,2,3>>}]}.
remove_message.
{move,{integer,42},{x,0}}.
{line,[{location,"receive_stacked.erl",71}]}.
diff --git a/lib/compiler/test/bif_SUITE.erl b/lib/compiler/test/bif_SUITE.erl
index c4c709eb3a..423a7666af 100644
--- a/lib/compiler/test/bif_SUITE.erl
+++ b/lib/compiler/test/bif_SUITE.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 2016. All Rights Reserved.
+%% Copyright Ericsson AB 2016-2018. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
@@ -23,7 +23,7 @@
-export([all/0,suite/0,groups/0,init_per_suite/1,end_per_suite/1,
init_per_group/2,end_per_group/2,
- beam_validator/1,trunc_and_friends/1,cover_safe_bifs/1]).
+ beam_validator/1,trunc_and_friends/1,cover_safe_and_pure_bifs/1]).
suite() ->
[{ct_hooks,[ts_install_cth]}].
@@ -35,7 +35,7 @@ groups() ->
[{p,[parallel],
[beam_validator,
trunc_and_friends,
- cover_safe_bifs
+ cover_safe_and_pure_bifs
]}].
init_per_suite(Config) ->
@@ -106,7 +106,7 @@ trunc_template(Func, Bif) ->
catch error:badarg -> ok end,
ok.").
-cover_safe_bifs(Config) ->
+cover_safe_and_pure_bifs(Config) ->
_ = get(),
_ = get_keys(a),
_ = group_leader(),
@@ -118,5 +118,6 @@ cover_safe_bifs(Config) ->
_ = processes(),
_ = registered(),
_ = term_to_binary(Config),
+ 42 = list_to_integer("2A", 16),
ok.
diff --git a/lib/compiler/test/bs_bincomp_SUITE.erl b/lib/compiler/test/bs_bincomp_SUITE.erl
index a5d49020a9..0419b16eea 100644
--- a/lib/compiler/test/bs_bincomp_SUITE.erl
+++ b/lib/compiler/test/bs_bincomp_SUITE.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 2006-2016. All Rights Reserved.
+%% Copyright Ericsson AB 2006-2018. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
diff --git a/lib/compiler/test/bs_bit_binaries_SUITE.erl b/lib/compiler/test/bs_bit_binaries_SUITE.erl
index 17faa012bc..526769f3a6 100644
--- a/lib/compiler/test/bs_bit_binaries_SUITE.erl
+++ b/lib/compiler/test/bs_bit_binaries_SUITE.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 2006-2016. All Rights Reserved.
+%% Copyright Ericsson AB 2006-2018. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
diff --git a/lib/compiler/test/bs_construct_SUITE.erl b/lib/compiler/test/bs_construct_SUITE.erl
index ccc49df005..69017d87e7 100644
--- a/lib/compiler/test/bs_construct_SUITE.erl
+++ b/lib/compiler/test/bs_construct_SUITE.erl
@@ -153,6 +153,8 @@ l(I_13, I_big1, I_16, Bin) ->
[0,0,0,0, 0,0,0,0, 0,0,0,0, 0,0,0,0,
16#77,16#FF,16#FF,16#FF,16#FF,16#FF,16#FF,16#FF,16#FF,16#FF,16#FF,
16#FF,16#FF,16#FF,16#FF,16#FF,16#FF]),
+ ?T(<< (<<"abc",7:3>>):3/binary >>,
+ [$a,$b,$c]),
%% Mix different units.
?T(<<37558955:(I_16-12)/unit:8,1:1>>,
@@ -311,6 +313,9 @@ fail(Config) when is_list(Config) ->
{'EXIT',{badarg,_}} = (catch <<0:(-(1 bsl 100))>>),
{'EXIT',{badarg,_}} = (catch <<Bin/binary,0:(-(1 bsl 100))>>),
+ %% Unaligned sizes with literal binaries.
+ {'EXIT',{badarg,_}} = (catch <<0,(<<7777:17>>)/binary>>),
+
ok.
float_bin(Config) when is_list(Config) ->
diff --git a/lib/compiler/test/bs_match_SUITE.erl b/lib/compiler/test/bs_match_SUITE.erl
index b4277f0705..2cfcb841a7 100644
--- a/lib/compiler/test/bs_match_SUITE.erl
+++ b/lib/compiler/test/bs_match_SUITE.erl
@@ -40,7 +40,10 @@
map_and_binary/1,unsafe_branch_caching/1,
bad_literals/1,good_literals/1,constant_propagation/1,
parse_xml/1,get_payload/1,escape/1,num_slots_different/1,
- beam_bsm/1,guard/1,is_ascii/1,non_opt_eq/1,erl_689/1]).
+ beam_bsm/1,guard/1,is_ascii/1,non_opt_eq/1,
+ expression_before_match/1,erl_689/1,restore_on_call/1,
+ restore_after_catch/1,matches_on_parameter/1,big_positions/1,
+ matching_meets_apply/1,bs_start_match2_defs/1]).
-export([coverage_id/1,coverage_external_ignore/2]).
@@ -56,7 +59,7 @@ all() ->
[{group,p}].
groups() ->
- [{p,[parallel],
+ [{p,[],
[size_shadow,int_float,otp_5269,null_fields,wiger,
bin_tail,save_restore,
partitioned_bs_match,function_clause,unit,
@@ -72,7 +75,10 @@ groups() ->
map_and_binary,unsafe_branch_caching,
bad_literals,good_literals,constant_propagation,parse_xml,
get_payload,escape,num_slots_different,
- beam_bsm,guard,is_ascii,non_opt_eq,erl_689]}].
+ beam_bsm,guard,is_ascii,non_opt_eq,
+ expression_before_match,erl_689,restore_on_call,
+ matches_on_parameter,big_positions,
+ matching_meets_apply,bs_start_match2_defs]}].
init_per_suite(Config) ->
@@ -736,6 +742,20 @@ coverage(Config) when is_list(Config) ->
binary = coverage_bitstring(<<7>>),
bitstring = coverage_bitstring(<<7:4>>),
other = coverage_bitstring([a]),
+
+ %% Cover code in beam_trim.
+
+ {done,<<17,53>>,[253,155,200]} =
+ coverage_trim(<<253,155,200,17,53>>, e0, e1, e2, e3, []),
+
+ <<"(right|linux)">> = coverage_trim_1(<<"">>, <<"right">>, <<"linux">>),
+ <<"/(right|linux)">> = coverage_trim_1(<<"/">>, <<"right">>, <<"linux">>),
+ <<"(left|linux)/(right|linux)">> =
+ coverage_trim_1(<<"left">>, <<"right">>, <<"linux">>),
+
+ {10,<<"-">>,""} = coverage_trim_2(<<"-">>, 10, []),
+ {8,<<"-">>,"aa"} = coverage_trim_2(<<"aa-">>, 10, []),
+
ok.
coverage_fold(Fun, Acc, <<H,T/binary>>) ->
@@ -830,6 +850,37 @@ coverage_bitstring(Bin) when is_binary(Bin) -> binary;
coverage_bitstring(<<_/bitstring>>) -> bitstring;
coverage_bitstring(_) -> other.
+coverage_trim(<<C:8,T/binary>> = Bin, E0, E1, E2, E3, Acc) ->
+ case id(C > 128) of
+ true ->
+ coverage_trim(T, E0, E1, E2, E3, [C|Acc]);
+ false ->
+ {done,Bin,lists:reverse(Acc)}
+ end.
+
+coverage_trim_1(<<>>, Right, OsType) ->
+ do_coverage_trim_1(Right, OsType);
+coverage_trim_1(<<"/">>, Right, OsType) ->
+ <<"/",(do_coverage_trim_1(Right, OsType))/binary>>;
+coverage_trim_1(Left, Right, OsType) ->
+ <<(do_coverage_trim_1(Left, OsType))/binary,
+ "/",
+ (do_coverage_trim_1(Right, OsType))/binary>>.
+
+do_coverage_trim_1(A, OsType) ->
+ <<"(",A/binary,"|",OsType/binary,")">>.
+
+coverage_trim_2(<<C/utf8,R/binary>> = Bin, I, L) ->
+ case printable_char(C) of
+ true ->
+ coverage_trim_2(R, I - 1, [C | L]);
+ false ->
+ {I,Bin,lists:reverse(L)}
+ end.
+
+printable_char($a) -> true;
+printable_char(_) -> false.
+
multiple_uses(Config) when is_list(Config) ->
{344,62879,345,<<245,159,1,89>>} = multiple_uses_1(<<1,88,245,159,1,89>>),
true = multiple_uses_2(<<0,0,197,18>>),
@@ -1734,14 +1785,22 @@ non_opt_eq([], <<>>) ->
%% ERL-689
-erl_689(Config) ->
+erl_689(_Config) ->
{{0, 0, 0}, <<>>} = do_erl_689_1(<<0>>, ?MODULE),
{{2018, 8, 7}, <<>>} = do_erl_689_1(<<4,2018:16/little,8,7>>, ?MODULE),
{{0, 0, 0}, <<>>} = do_erl_689_2(?MODULE, <<0>>),
{{2018, 8, 7}, <<>>} = do_erl_689_2(?MODULE, <<4,2018:16/little,8,7>>),
ok.
-do_erl_689_1(<<Length, Data/binary>>, _) ->
+do_erl_689_1(Arg1, Arg2) ->
+ Res = do_erl_689_1a(Arg1, Arg2),
+ Res = do_erl_689_1b(Arg1, Arg2).
+
+do_erl_689_2(Arg1, Arg2) ->
+ Res = do_erl_689_2a(Arg1, Arg2),
+ Res = do_erl_689_2b(Arg1, Arg2).
+
+do_erl_689_1a(<<Length, Data/binary>>, _) ->
case {Data, Length} of
{_, 0} ->
%% bs_context_to_binary would incorrectly set Data to the original
@@ -1751,7 +1810,19 @@ do_erl_689_1(<<Length, Data/binary>>, _) ->
{{Y, M, D}, Rest}
end.
-do_erl_689_2(_, <<Length, Data/binary>>) ->
+do_erl_689_1b(<<Length, Data/binary>>, _) ->
+ case {Data, Length} of
+ {_, 0} ->
+ %% bs_context_to_binary would incorrectly set Data to the original
+ %% binary (before matching in the function head).
+ id(0),
+ {{0, 0, 0}, Data};
+ {<<Y:16/little, M, D, Rest/binary>>, 4} ->
+ id(1),
+ {{Y, M, D}, Rest}
+ end.
+
+do_erl_689_2a(_, <<Length, Data/binary>>) ->
case {Length, Data} of
{0, _} ->
%% bs_context_to_binary would incorrectly set Data to the original
@@ -1761,7 +1832,140 @@ do_erl_689_2(_, <<Length, Data/binary>>) ->
{{Y, M, D}, Rest}
end.
+do_erl_689_2b(_, <<Length, Data/binary>>) ->
+ case {Length, Data} of
+ {0, _} ->
+ %% bs_context_to_binary would incorrectly set Data to the original
+ %% binary (before matching in the function head).
+ id(0),
+ {{0, 0, 0}, Data};
+ {4, <<Y:16/little, M, D, Rest/binary>>} ->
+ id(1),
+ {{Y, M, D}, Rest}
+ end.
+
+%% ERL-753
+
+bs_start_match2_defs(_Config) ->
+ {<<"http://127.0.0.1:1234/vsaas/hello">>} = api_url(<<"hello">>),
+ {"https://127.0.0.1:4321/vsaas/hello"} = api_url({https, "hello"}).
+
+api_url(URL) ->
+ case URL of
+ <<_/binary>> -> {<<"http://127.0.0.1:1234/vsaas/",URL/binary>>};
+ {https, [_|_] = URL1} -> {"https://127.0.0.1:4321/vsaas/"++URL1}
+ end.
+
check(F, R) ->
R = F().
+%% Make sure that an expression that comes between function start and a match
+%% expression passes validation.
+expression_before_match(Config) when is_list(Config) ->
+ <<_,R/binary>> = id(<<0,1,2,3>>),
+ {1, <<2,3>>} = expression_before_match_1(R),
+ ok.
+
+expression_before_match_1(R) ->
+ A = id(1),
+ case R of
+ <<1,Bar/binary>> -> {A, Bar};
+ <<>> -> {A, baz}
+ end.
+
+%% Make sure that context positions are updated on calls.
+restore_on_call(Config) when is_list(Config) ->
+ ok = restore_on_call_1(<<0, 1, 2>>).
+
+restore_on_call_1(<<0, Rest/binary>>) ->
+ <<2>> = restore_on_call_2(Rest),
+ <<2>> = restore_on_call_2(Rest), %% {badmatch, <<>>} on missing restore.
+ ok.
+
+restore_on_call_2(<<1, Rest/binary>>) -> Rest;
+restore_on_call_2(Other) -> Other.
+
+%% 'catch' must invalidate positions.
+restore_after_catch(Config) when is_list(Config) ->
+ <<0, 1>> = restore_after_catch_1(<<0, 1>>),
+ ok.
+
+restore_after_catch_1(<<A/binary>>) ->
+ try throw_after_byte(A) of
+ _ -> impossible
+ catch
+ throw:_Any ->
+ %% Will equal <<1>> if the bug is present.
+ A
+ end.
+
+throw_after_byte(<<_,_/binary>>) ->
+ throw(away).
+
+matches_on_parameter(Config) when is_list(Config) ->
+ %% This improves coverage for matching on "naked" parameters.
+ {<<"urka">>, <<"a">>} = matches_on_parameter_1(<<"gurka">>),
+ ok = (catch matches_on_parameter_2(<<"10001110101">>, 0)).
+
+matches_on_parameter_1(Bin) ->
+ <<"g", A/binary>> = Bin,
+ <<_,_,"rk", B/binary>> = Bin,
+ {A, B}.
+
+matches_on_parameter_2(Bin, Offset) ->
+ <<_:Offset, Bit:1, Rest/bits>> = Bin,
+ case bit_size(Rest) of
+ 0 -> throw(ok);
+ _ -> [Bit | matches_on_parameter_2(Bin, Offset + 1)]
+ end.
+
+big_positions(Config) when is_list(Config) ->
+ %% This provides coverage for when match context positions no longer fit
+ %% into an immediate on 32-bit platforms.
+
+ A = <<0:((1 bsl 27) - 8), $A, 1:1, "gurka", $A>>,
+ B = <<0:((1 bsl 27) - 8), $B, "hello", $B>>,
+
+ {a,$A} = bp_start_match(A),
+ {b,$B} = bp_start_match(B),
+ {a,$A} = bp_getpos(A),
+ {b,$B} = bp_getpos(B),
+
+ ok.
+
+%% After the first iteration the context's position will no longer fit into an
+%% immediate. To improve performance the bs_start_match3 instruction will
+%% return a new context with an updated base position so that we won't have to
+%% resort to using bigints.
+bp_start_match(<<_:(1 bsl 27),T/bits>>) -> bp_start_match(T);
+bp_start_match(<<1:1,"gurka",A>>) -> {a,A};
+bp_start_match(<<"hello",B>>) -> {b,B}.
+
+%% This is a corner case where the above didn't work perfectly; if the position
+%% was _just_ small enough to fit into an immediate when bs_start_match3 was
+%% hit, but too large at bs_get_position, then it must be saved as a bigint.
+bp_getpos(<<_:((1 bsl 27) - 8),T/bits>>) -> bp_getpos(T);
+bp_getpos(<<A,1:1,"gurka",A>>) -> {a,A};
+bp_getpos(<<B,"hello",B>>) -> {b,B}.
+
+matching_meets_apply(_Config) ->
+ <<"abc">> = do_matching_meets_apply(<<"/abc">>, []),
+ 42 = do_matching_meets_apply(<<"">>, {erlang,-42}),
+ 100 = do_matching_meets_apply(no_binary, {erlang,-100}),
+ ok.
+
+do_matching_meets_apply(<<$/, Rest/binary>>, _Handler) ->
+ id(Rest);
+do_matching_meets_apply(<<_/binary>>=Name, never_matches_a) ->
+ %% Used to crash the compiler because variables in a remote
+ %% were not handled properly by beam_ssa_bsm.
+ Name:foo(gurka);
+do_matching_meets_apply(<<_/binary>>=Name, never_matches_b) ->
+ %% Another case of the above.
+ foo:Name(gurka);
+do_matching_meets_apply(_Bin, {Handler, State}) ->
+ %% Another case of the above.
+ Handler:abs(State).
+
+
id(I) -> I.
diff --git a/lib/compiler/test/bs_utf_SUITE.erl b/lib/compiler/test/bs_utf_SUITE.erl
index 4330677260..8ea4a849ec 100644
--- a/lib/compiler/test/bs_utf_SUITE.erl
+++ b/lib/compiler/test/bs_utf_SUITE.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 2008-2016. All Rights Reserved.
+%% Copyright Ericsson AB 2008-2018. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
diff --git a/lib/compiler/test/compilation_SUITE.erl b/lib/compiler/test/compilation_SUITE.erl
index def7a60f45..74f9dbd9b4 100644
--- a/lib/compiler/test/compilation_SUITE.erl
+++ b/lib/compiler/test/compilation_SUITE.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 1997-2017. All Rights Reserved.
+%% Copyright Ericsson AB 1997-2018. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
diff --git a/lib/compiler/test/compile_SUITE.erl b/lib/compiler/test/compile_SUITE.erl
index 85f0b7dc46..408af80dd9 100644
--- a/lib/compiler/test/compile_SUITE.erl
+++ b/lib/compiler/test/compile_SUITE.erl
@@ -28,15 +28,15 @@
init_per_group/2,end_per_group/2,
app_test/1,appup_test/1,
debug_info/4, custom_debug_info/1, custom_compile_info/1,
- file_1/1, forms_2/1, module_mismatch/1, big_file/1, outdir/1,
+ file_1/1, forms_2/1, module_mismatch/1, outdir/1,
binary/1, makedep/1, cond_and_ifdef/1, listings/1, listings_big/1,
other_output/1, kernel_listing/1, encrypted_abstr/1,
strict_record/1, utf8_atoms/1, utf8_functions/1, extra_chunks/1,
cover/1, env/1, core_pp/1, tuple_calls/1,
- core_roundtrip/1, asm/1, optimized_guards/1,
+ core_roundtrip/1, asm/1,
sys_pre_attributes/1, dialyzer/1,
warnings/1, pre_load_check/1, env_compiler_options/1,
- bc_options/1, deterministic_include/1
+ bc_options/1, deterministic_include/1, deterministic_paths/1
]).
suite() -> [{ct_hooks,[ts_install_cth]}].
@@ -46,14 +46,14 @@ suite() -> [{ct_hooks,[ts_install_cth]}].
-spec all() -> all_return_type().
all() ->
- [app_test, appup_test, file_1, forms_2, module_mismatch, big_file, outdir,
+ [app_test, appup_test, file_1, forms_2, module_mismatch, outdir,
binary, makedep, cond_and_ifdef, listings, listings_big,
other_output, kernel_listing, encrypted_abstr, tuple_calls,
strict_record, utf8_atoms, utf8_functions, extra_chunks,
- cover, env, core_pp, core_roundtrip, asm, optimized_guards,
+ cover, env, core_pp, core_roundtrip, asm,
sys_pre_attributes, dialyzer, warnings, pre_load_check,
env_compiler_options, custom_debug_info, bc_options,
- custom_compile_info, deterministic_include].
+ custom_compile_info, deterministic_include, deterministic_paths].
groups() ->
[].
@@ -104,6 +104,7 @@ file_1(Config) when is_list(Config) ->
compile_and_verify(Simple, Target, []),
compile_and_verify(Simple, Target, [native]),
compile_and_verify(Simple, Target, [debug_info]),
+ compile_and_verify(Simple, Target, [no_postopt]),
{ok,simple} = compile:file(Simple, [no_line_info]), %Coverage
{ok,simple} = compile:file(Simple, [{eprof,beam_z}]), %Coverage
@@ -231,17 +232,6 @@ module_mismatch(Config) when is_list(Config) ->
ok.
-big_file(Config) when is_list(Config) ->
- {Big,Target} = get_files(Config, big, "big_file"),
- ok = file:set_cwd(filename:dirname(Target)),
- compile_and_verify(Big, Target, []),
- compile_and_verify(Big, Target, [debug_info]),
- compile_and_verify(Big, Target, [no_postopt]),
-
- %% Cleanup.
- ok = file:delete(Target),
- ok.
-
%% Tests that the {outdir, Dir} option works.
outdir(Config) when is_list(Config) ->
@@ -370,43 +360,37 @@ do_file_listings(DataDir, PrivDir, [File|Files]) ->
TargetDir = filename:join(PrivDir, listings),
ok = file:make_dir(TargetDir),
- %% Test all dedicated listing options.
- do_listing(Simple, TargetDir, 'S'),
- do_listing(Simple, TargetDir, 'E'),
- do_listing(Simple, TargetDir, 'P'),
- do_listing(Simple, TargetDir, dpp, ".pp"),
- do_listing(Simple, TargetDir, dabstr, ".abstr"),
- do_listing(Simple, TargetDir, dexp, ".expand"),
- do_listing(Simple, TargetDir, dcore, ".core"),
- do_listing(Simple, TargetDir, doldinline, ".oldinline"),
- do_listing(Simple, TargetDir, dinline, ".inline"),
- do_listing(Simple, TargetDir, dcore, ".core"),
- do_listing(Simple, TargetDir, dcopt, ".copt"),
- do_listing(Simple, TargetDir, dcbsm, ".core_bsm"),
- do_listing(Simple, TargetDir, dsetel, ".dsetel"),
- do_listing(Simple, TargetDir, dkern, ".kernel"),
- do_listing(Simple, TargetDir, dssa, ".ssa"),
- do_listing(Simple, TargetDir, dssaopt, ".ssaopt"),
- do_listing(Simple, TargetDir, dprecg, ".precodegen"),
- do_listing(Simple, TargetDir, dcg, ".codegen"),
- do_listing(Simple, TargetDir, dblk, ".block"),
- do_listing(Simple, TargetDir, dexcept, ".except"),
- do_listing(Simple, TargetDir, dbs, ".bs"),
- do_listing(Simple, TargetDir, ddead, ".dead"),
- do_listing(Simple, TargetDir, djmp, ".jump"),
- do_listing(Simple, TargetDir, dclean, ".clean"),
- do_listing(Simple, TargetDir, dpeep, ".peep"),
- do_listing(Simple, TargetDir, dopt, ".optimize"),
-
- %% First clean up.
- Listings = filename:join(PrivDir, listings),
- lists:foreach(fun(F) -> ok = file:delete(F) end,
- filelib:wildcard(filename:join(Listings, "*"))),
+ List = [{'S',".S"},
+ {'E',".E"},
+ {'P',".P"},
+ {dpp, ".pp"},
+ {dabstr, ".abstr"},
+ {dexp, ".expand"},
+ {dcore, ".core"},
+ {doldinline, ".oldinline"},
+ {dinline, ".inline"},
+ {dcore, ".core"},
+ {dcopt, ".copt"},
+ {dcbsm, ".core_bsm"},
+ {dkern, ".kernel"},
+ {dssa, ".ssa"},
+ {dssaopt, ".ssaopt"},
+ {dprecg, ".precodegen"},
+ {dcg, ".codegen"},
+ {dblk, ".block"},
+ {dexcept, ".except"},
+ {djmp, ".jump"},
+ {dclean, ".clean"},
+ {dpeep, ".peep"},
+ {dopt, ".optimize"},
+ {diffable, ".S"}],
+ p_listings(List, Simple, TargetDir),
%% Test options that produce a listing file if 'binary' is not given.
do_listing(Simple, TargetDir, to_pp, ".P"),
do_listing(Simple, TargetDir, to_exp, ".E"),
do_listing(Simple, TargetDir, to_core0, ".core"),
+ Listings = filename:join(PrivDir, listings),
ok = file:delete(filename:join(Listings, File ++ ".core")),
do_listing(Simple, TargetDir, to_core, ".core"),
do_listing(Simple, TargetDir, to_kernel, ".kernel"),
@@ -422,24 +406,35 @@ do_file_listings(DataDir, PrivDir, [File|Files]) ->
listings_big(Config) when is_list(Config) ->
{Big,Target} = get_files(Config, big, listings_big),
TargetDir = filename:dirname(Target),
- do_listing(Big, TargetDir, 'S'),
- do_listing(Big, TargetDir, 'E'),
- do_listing(Big, TargetDir, 'P'),
- do_listing(Big, TargetDir, dkern, ".kernel"),
- do_listing(Big, TargetDir, dssa, ".ssa"),
- do_listing(Big, TargetDir, dssaopt, ".ssaopt"),
- do_listing(Big, TargetDir, dprecg, ".precodegen"),
- do_listing(Big, TargetDir, to_dis, ".dis"),
-
- TargetNoext = filename:rootname(Target, code:objfile_extension()),
- {ok,big} = compile:file(TargetNoext, [from_asm,{outdir,TargetDir}]),
-
- %% Cleanup.
- ok = file:delete(Target),
- lists:foreach(fun(F) -> ok = file:delete(F) end,
- filelib:wildcard(filename:join(TargetDir, "*"))),
- ok = file:del_dir(TargetDir),
- ok.
+ List = [{'S',".S"},
+ {'E',".E"},
+ {'P',".P"},
+ {dkern, ".kernel"},
+ {dssa, ".ssa"},
+ {dssaopt, ".ssaopt"},
+ {dprecg, ".precodegen"},
+ {to_dis, ".dis"}],
+ p_listings(List, Big, TargetDir).
+
+p_listings(List, File, BaseDir) ->
+ Run = fun({Option,Extension}) ->
+ Uniq = erlang:unique_integer([positive]),
+ Dir = filename:join(BaseDir, integer_to_list(Uniq)),
+ ok = file:make_dir(Dir),
+ try
+ do_listing(File, Dir, Option, Extension),
+ ok
+ catch
+ Class:Error:Stk ->
+ io:format("~p:~p\n~p\n", [Class,Error,Stk]),
+ error
+ after
+ _ = [ok = file:delete(F) ||
+ F <- filelib:wildcard(filename:join(Dir, "*"))],
+ ok = file:del_dir(Dir)
+ end
+ end,
+ test_lib:p_run(Run, List).
other_output(Config) when is_list(Config) ->
{Simple,_Target} = get_files(Config, simple, "other_output"),
@@ -686,9 +681,6 @@ cover(Config) when is_list(Config) ->
io:format("~p\n", [compile:options()]),
ok.
-do_listing(Source, TargetDir, Type) ->
- do_listing(Source, TargetDir, Type, "." ++ atom_to_list(Type)).
-
do_listing(Source, TargetDir, Type, Ext) ->
io:format("Source: ~p TargetDir: ~p\n Type: ~p Ext: ~p\n",
[Source, TargetDir, Type, Ext]),
@@ -1175,88 +1167,6 @@ do_asm(Beam, Outdir) ->
error
end.
-%% Make sure that guards are fully optimized. Guards should
-%% should use 'test' instructions, not 'bif' instructions.
-
-optimized_guards(_Config) ->
- TestBeams = get_unique_beam_files(),
- test_lib:p_run(fun(F) -> do_opt_guards(F) end, TestBeams).
-
-do_opt_guards(Beam) ->
- {ok,{M,[{abstract_code,{raw_abstract_v1,A}}]}} =
- beam_lib:chunks(Beam, [abstract_code]),
- try
- {ok,M,Asm} = compile:forms(A, ['S']),
- do_opt_guards_mod(Asm)
- catch Class:Error:Stk ->
- io:format("~p: ~p ~p\n~p\n", [M,Class,Error,Stk]),
- error
- end.
-
-do_opt_guards_mod({Mod,_Exp,_Attr,Asm,_NumLabels}) ->
- case do_opt_guards_fs(Mod, Asm) of
- [] ->
- ok;
- [_|_]=Bifs ->
- io:format("ERRORS FOR ~p:\n~p\n", [Mod,Bifs]),
- error
- end.
-
-do_opt_guards_fs(Mod, [{function,Name,Arity,_,Is}|Fs]) ->
- Bifs0 = do_opt_guards_fun(Is),
-
- %% The compiler does not attempt to optimize 'xor'.
- %% Therefore, ignore all functions that use 'xor' in
- %% a guard.
- Bifs = case lists:any(fun({bif,'xor',_,_,_}) -> true;
- (_) -> false
- end, Bifs0) of
- true -> [];
- false -> Bifs0
- end,
-
- %% Filter out the allowed exceptions.
- FA = {Name,Arity},
- case {Bifs,is_exception(Mod, FA)} of
- {[_|_],true} ->
- io:format("~p:~p/~p IGNORED:\n~p\n",
- [Mod,Name,Arity,Bifs]),
- do_opt_guards_fs(Mod, Fs);
- {[_|_],false} ->
- [{FA,Bifs}|do_opt_guards_fs(Mod, Fs)];
- {[],false} ->
- do_opt_guards_fs(Mod, Fs);
- {[],true} ->
- io:format("Redundant exception for ~p:~p/~p\n",
- [Mod,Name,Arity]),
- error(redundant)
- end;
-do_opt_guards_fs(_, []) -> [].
-
-do_opt_guards_fun([{bif,Name,{f,F},As,_}=I|Is]) when F =/= 0 ->
- Arity = length(As),
- case erl_internal:comp_op(Name, Arity) orelse
- erl_internal:bool_op(Name, Arity) orelse
- erl_internal:new_type_test(Name, Arity) of
- true ->
- [I|do_opt_guards_fun(Is)];
- false ->
- do_opt_guards_fun(Is)
- end;
-do_opt_guards_fun([_|Is]) ->
- do_opt_guards_fun(Is);
-do_opt_guards_fun([]) -> [].
-
-is_exception(guard_SUITE, {'-complex_not/1-fun-4-',1}) -> true;
-is_exception(guard_SUITE, {'-complex_not/1-fun-5-',1}) -> true;
-is_exception(guard_SUITE, {bad_guards,1}) -> true;
-is_exception(guard_SUITE, {bad_guards_2,2}) -> true;
-is_exception(guard_SUITE, {bad_guards_3,2}) -> true;
-is_exception(guard_SUITE, {csemi7,3}) -> true;
-is_exception(guard_SUITE, {nested_not_2b,4}) -> true;
-is_exception(guard_SUITE, {tricky_1,2}) -> true;
-is_exception(_, _) -> false.
-
sys_pre_attributes(Config) ->
DataDir = proplists:get_value(data_dir, Config),
File = filename:join(DataDir, "attributes.erl"),
@@ -1473,36 +1383,49 @@ env_compiler_options(_Config) ->
bc_options(Config) ->
DataDir = proplists:get_value(data_dir, Config),
- 101 = highest_opcode(DataDir, small_float, [no_get_hd_tl,no_line_info]),
-
- 103 = highest_opcode(DataDir, big,
- [no_get_hd_tl,no_ssa_opt_record,
- no_line_info,no_stack_trimming]),
-
- 125 = highest_opcode(DataDir, small_float,
- [no_get_hd_tl,no_line_info,no_ssa_opt_float]),
-
- 132 = highest_opcode(DataDir, small,
- [no_get_hd_tl,no_ssa_opt_record,no_ssa_opt_float,no_line_info]),
-
- 136 = highest_opcode(DataDir, big, [no_get_hd_tl,no_ssa_opt_record,no_line_info]),
-
- 153 = highest_opcode(DataDir, big, [no_get_hd_tl,no_ssa_opt_record]),
- 153 = highest_opcode(DataDir, big, [r16]),
- 153 = highest_opcode(DataDir, big, [r17]),
- 153 = highest_opcode(DataDir, big, [r18]),
- 153 = highest_opcode(DataDir, big, [r19]),
- 153 = highest_opcode(DataDir, small_float, [r16]),
- 153 = highest_opcode(DataDir, small_float, []),
-
- 158 = highest_opcode(DataDir, small_maps, [r17]),
- 158 = highest_opcode(DataDir, small_maps, [r18]),
- 158 = highest_opcode(DataDir, small_maps, [r19]),
- 158 = highest_opcode(DataDir, small_maps, [r20]),
- 158 = highest_opcode(DataDir, small_maps, []),
-
- 163 = highest_opcode(DataDir, big, []),
-
+ L = [{101, small_float, [no_get_hd_tl,no_line_info]},
+ {103, big, [no_put_tuple2,no_get_hd_tl,no_ssa_opt_record,
+ no_line_info,no_stack_trimming]},
+ {125, small_float, [no_get_hd_tl,no_line_info,no_ssa_opt_float]},
+
+ {132, small, [no_put_tuple2,no_get_hd_tl,no_ssa_opt_record,
+ no_ssa_opt_float,no_line_info,no_bsm3]},
+
+ {153, small, [r20]},
+ {153, small, [r21]},
+
+ {136, big, [no_put_tuple2,no_get_hd_tl,
+ no_ssa_opt_record,no_line_info]},
+
+ {153, big, [no_put_tuple2,no_get_hd_tl, no_ssa_opt_record]},
+ {153, big, [r16]},
+ {153, big, [r17]},
+ {153, big, [r18]},
+ {153, big, [r19]},
+ {153, small_float, [r16]},
+ {153, small_float, []},
+
+ {158, small_maps, [r17]},
+ {158, small_maps, [r18]},
+ {158, small_maps, [r19]},
+ {158, small_maps, [r20]},
+ {158, small_maps, [r21]},
+
+ {164, small_maps, []},
+ {164, big, []}
+ ],
+
+ Test = fun({Expected,Mod,Options}) ->
+ case highest_opcode(DataDir, Mod, Options) of
+ Expected ->
+ ok;
+ Got ->
+ io:format("*** module ~p, options ~p => got ~p; expected ~p\n",
+ [Mod,Options,Got,Expected]),
+ error
+ end
+ end,
+ test_lib:p_run(Test, L),
ok.
highest_opcode(DataDir, Mod, Opt) ->
@@ -1529,6 +1452,30 @@ deterministic_include(Config) when is_list(Config) ->
ok.
+deterministic_paths(Config) when is_list(Config) ->
+ DataDir = proplists:get_value(data_dir, Config),
+
+ %% Files without +deterministic should differ if they were compiled from a
+ %% different directory.
+ true = deterministic_paths_1(DataDir, "simple", []),
+
+ %% ... but files with +deterministic shouldn't.
+ false = deterministic_paths_1(DataDir, "simple", [deterministic]),
+
+ ok.
+
+deterministic_paths_1(DataDir, Name, Opts) ->
+ Simple = filename:join(DataDir, "simple"),
+ {ok, Cwd} = file:get_cwd(),
+ try
+ {ok,_,A} = compile:file(Simple, [binary | Opts]),
+ ok = file:set_cwd(DataDir),
+ {ok,_,B} = compile:file(Name, [binary | Opts]),
+ A =/= B
+ after
+ file:set_cwd(Cwd)
+ end.
+
%%%
%%% Utilities.
%%%
diff --git a/lib/compiler/test/compiler.cover b/lib/compiler/test/compiler.cover
index 3fd7fc1937..fac0f9947c 100644
--- a/lib/compiler/test/compiler.cover
+++ b/lib/compiler/test/compiler.cover
@@ -1,5 +1,4 @@
-{incl_app,compiler,details}.
-
%% -*- erlang -*-
+{local_only,compiler,true}.
+{incl_app,compiler,details}.
{excl_mods,compiler,[core_scan,core_parse]}.
-
diff --git a/lib/compiler/test/core_alias_SUITE.erl b/lib/compiler/test/core_alias_SUITE.erl
index 4f96576621..737b1567d4 100644
--- a/lib/compiler/test/core_alias_SUITE.erl
+++ b/lib/compiler/test/core_alias_SUITE.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 2007-2016. All Rights Reserved.
+%% Copyright Ericsson AB 2007-2018. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
diff --git a/lib/compiler/test/core_fold_SUITE.erl b/lib/compiler/test/core_fold_SUITE.erl
index 68bc5c6e2f..adfebd5158 100644
--- a/lib/compiler/test/core_fold_SUITE.erl
+++ b/lib/compiler/test/core_fold_SUITE.erl
@@ -212,9 +212,14 @@ bifs(Config) when is_list(Config) ->
{ok,#{K:=V}} = id(list_to_tuple([ok,#{K=>V}])),
ok.
--define(CMP_SAME(A0, B), (fun(A) -> true = A == B, false = A /= B end)(id(A0))).
--define(CMP_DIFF(A0, B), (fun(A) -> false = A == B, true = A /= B end)(id(A0))).
-
+-define(CMP_SAME0(A0, B), (fun(A) -> true = A == B, false = A /= B end)(id(A0))).
+-define(CMP_SAME1(A0, B), (fun(A) -> false = A /= B, true = A == B end)(id(A0))).
+-define(CMP_SAME(A0, B), (true = ?CMP_SAME0(A0, B) =:= not ?CMP_SAME1(A0, B))).
+
+-define(CMP_DIFF0(A0, B), (fun(A) -> false = A == B, true = A /= B end)(id(A0))).
+-define(CMP_DIFF1(A0, B), (fun(A) -> true = A /= B, false = A == B end)(id(A0))).
+-define(CMP_DIFF(A0, B), (true = ?CMP_DIFF0(A0, B) =:= not ?CMP_DIFF1(A0, B))).
+
eq(Config) when is_list(Config) ->
?CMP_SAME([a,b,c], [a,b,c]),
?CMP_SAME([42.0], [42.0]),
@@ -497,7 +502,7 @@ source(true, Activities) ->
Activities
end.
-tim(#{reduction := Emergency}) ->
+tim(#{reduction := _Emergency}) ->
try
fun() -> surgery end
catch
diff --git a/lib/compiler/test/error_SUITE.erl b/lib/compiler/test/error_SUITE.erl
index da291bdc8b..8b9dbe4aa0 100644
--- a/lib/compiler/test/error_SUITE.erl
+++ b/lib/compiler/test/error_SUITE.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 1998-2016. All Rights Reserved.
+%% Copyright Ericsson AB 1998-2018. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
diff --git a/lib/compiler/test/float_SUITE.erl b/lib/compiler/test/float_SUITE.erl
index 39867021cb..831e8279aa 100644
--- a/lib/compiler/test/float_SUITE.erl
+++ b/lib/compiler/test/float_SUITE.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 2002-2016. All Rights Reserved.
+%% Copyright Ericsson AB 2002-2018. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
@@ -20,7 +20,8 @@
-module(float_SUITE).
-export([all/0, suite/0,groups/0,init_per_suite/1, end_per_suite/1,
init_per_group/2,end_per_group/2,
- pending/1,bif_calls/1,math_functions/1,mixed_float_and_int/1]).
+ pending/1,bif_calls/1,math_functions/1,mixed_float_and_int/1,
+ subtract_number_type/1]).
-include_lib("common_test/include/ct.hrl").
@@ -28,7 +29,7 @@ suite() -> [{ct_hooks,[ts_install_cth]}].
all() ->
[pending, bif_calls, math_functions,
- mixed_float_and_int].
+ mixed_float_and_int, subtract_number_type].
groups() ->
[].
@@ -176,5 +177,15 @@ mixed_float_and_int(Config) when is_list(Config) ->
pc(Cov, NotCov, X) ->
round(Cov/(Cov+NotCov)*100) + 42 + 2.0*X.
+subtract_number_type(Config) when is_list(Config) ->
+ 120 = fact(5).
+
+fact(N) ->
+ fact(N, 1).
+
+fact(0, P) -> P;
+fact(1, P) -> P;
+fact(N, P) -> fact(N-1, P*N).
+
id(I) -> I.
diff --git a/lib/compiler/test/fun_SUITE.erl b/lib/compiler/test/fun_SUITE.erl
index e00885fcd6..1df0a05275 100644
--- a/lib/compiler/test/fun_SUITE.erl
+++ b/lib/compiler/test/fun_SUITE.erl
@@ -249,6 +249,13 @@ badfun(_Config) ->
expect_badfun(X, catch X(put(?FUNCTION_NAME, of_course))),
of_course = erase(?FUNCTION_NAME),
+ %% A literal as a Fun used to crash the code generator. This only happened
+ %% when type optimization had reduced `Fun` to a literal, hence the match.
+ Literal = fun(literal = Fun) ->
+ Fun()
+ end,
+ expect_badfun(literal, catch Literal(literal)),
+
ok.
expect_badfun(Term, Exit) ->
diff --git a/lib/compiler/test/guard_SUITE.erl b/lib/compiler/test/guard_SUITE.erl
index 73a8dc0fda..ed0a56f064 100644
--- a/lib/compiler/test/guard_SUITE.erl
+++ b/lib/compiler/test/guard_SUITE.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 2001-2017. All Rights Reserved.
+%% Copyright Ericsson AB 2001-2018. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
@@ -35,8 +35,7 @@
basic_andalso_orelse/1,traverse_dcd/1,
check_qlc_hrl/1,andalso_semi/1,t_tuple_size/1,binary_part/1,
bad_constants/1,bad_guards/1,
- guard_in_catch/1,beam_bool_SUITE/1,
- cover_beam_dead/1]).
+ guard_in_catch/1,beam_bool_SUITE/1]).
suite() -> [{ct_hooks,[ts_install_cth]}].
@@ -54,8 +53,7 @@ groups() ->
rel_ops,rel_op_combinations,
literal_type_tests,basic_andalso_orelse,traverse_dcd,
check_qlc_hrl,andalso_semi,t_tuple_size,binary_part,
- bad_constants,bad_guards,guard_in_catch,beam_bool_SUITE,
- cover_beam_dead]}].
+ bad_constants,bad_guards,guard_in_catch,beam_bool_SUITE]}].
init_per_suite(Config) ->
test_lib:recompile(?MODULE),
@@ -1297,6 +1295,32 @@ rel_ops(Config) when is_list(Config) ->
Empty = id([]),
?T(==, [], Empty),
+ %% Cover beam_ssa_dead:turn_op('/=').
+ ok = (fun(A, B) when is_atom(A) ->
+ X = id(A /= B),
+ if
+ X -> ok;
+ true -> error
+ end
+ end)(a, b),
+ ok = (fun(A, B) when is_atom(A) ->
+ X = id(B /= A),
+ if
+ X -> ok;
+ true -> error
+ end
+ end)(a, b),
+
+ %% Cover beam_ssa_dead.
+ Arrow = fun([T1,T2]) when T1 == $>, T2 == $>;
+ T1 == $<, T2 == $| -> true;
+ (_) -> false
+ end,
+ true = Arrow(">>"),
+ true = Arrow("<|"),
+ false = Arrow("><"),
+ false = Arrow(""),
+
ok.
-undef(TestOp).
@@ -1330,6 +1354,9 @@ rel_op_combinations_1(N, Digits) ->
Bool = is_digit_6(N),
Bool = is_digit_7(N),
Bool = is_digit_8(N),
+ Bool = is_digit_9(42, N),
+ Bool = is_digit_10(N, 0),
+ Bool = is_digit_11(N, 0),
rel_op_combinations_1(N-1, Digits).
is_digit_1(X) when 16#0660 =< X, X =< 16#0669 -> true;
@@ -1373,6 +1400,24 @@ is_digit_8(X) when X =< 16#0669, X > (16#0660-1) -> true;
is_digit_8(16#0670) -> false;
is_digit_8(_) -> false.
+is_digit_9(A, 0) when A =:= 42 -> false;
+is_digit_9(_, X) when X > 16#065F, X < 16#066A -> true;
+is_digit_9(_, X) when 16#0030 =< X, X =< 16#0039 -> true;
+is_digit_9(_, X) when 16#06F0 =< X, X =< 16#06F9 -> true;
+is_digit_9(_, _) -> false.
+
+is_digit_10(0, 0) -> false;
+is_digit_10(X, _) when X < 16#066A, 16#0660 =< X -> true;
+is_digit_10(X, _) when 16#0030 =< X, X =< 16#0039 -> true;
+is_digit_10(X, _) when 16#06F0 =< X, X =< 16#06F9 -> true;
+is_digit_10(_, _) -> false.
+
+is_digit_11(0, 0) -> false;
+is_digit_11(X, _) when X =< 16#0669, 16#0660 =< X -> true;
+is_digit_11(X, _) when 16#0030 =< X, X =< 16#0039 -> true;
+is_digit_11(X, _) when 16#06F0 =< X, X =< 16#06F9 -> true;
+is_digit_11(_, _) -> false.
+
rel_op_combinations_2(0, _) ->
ok;
rel_op_combinations_2(N, Range) ->
@@ -1473,6 +1518,7 @@ rel_op_combinations_3(N, Red) ->
Val = redundant_9(N),
Val = redundant_10(N),
Val = redundant_11(N),
+ Val = redundant_11(N),
rel_op_combinations_3(N-1, Red).
redundant_1(X) when X >= 51, X =< 80 -> 5*X;
@@ -1527,6 +1573,10 @@ redundant_11(X) when X =:= 10 -> 2*X;
redundant_11(X) when X >= 51, X =< 80 -> 5*X;
redundant_11(_) -> none.
+redundant_12(X) when X >= 50, X =< 80 -> 2*X;
+redundant_12(X) when X < 51 -> 5*X;
+redundant_12(_) -> none.
+
%% Test type tests on literal values. (From emulator test suites.)
literal_type_tests(Config) when is_list(Config) ->
case ?MODULE of
@@ -1779,16 +1829,6 @@ t_tuple_size(Config) when is_list(Config) ->
error = ludicrous_tuple_size({a,b,c}),
error = ludicrous_tuple_size([a,b,c]),
- %% Test the "unsafe case" - the register assigned the tuple size is
- %% not killed.
- DataDir = test_lib:get_data_dir(Config),
- File = filename:join(DataDir, "guard_SUITE_tuple_size"),
- {ok,Mod,Code} = compile:file(File, [from_asm,binary]),
- code:load_binary(Mod, File, Code),
- 14 = Mod:t({1,2,3,4}),
- _ = code:delete(Mod),
- _ = code:purge(Mod),
-
good_ip({1,2,3,4}),
good_ip({1,2,3,4,5,6,7,8}),
error = validate_ip({42,11}),
@@ -2221,32 +2261,6 @@ maps() ->
evidence(#{0 := Charge}) when 0; #{[] => Charge} == #{[] => 42} ->
ok.
-cover_beam_dead(_Config) ->
- Mod = ?FUNCTION_NAME,
- Attr = [],
- Fs = [{function,test,1,2,
- [{label,1},
- {line,[]},
- {func_info,{atom,Mod},{atom,test},1},
- {label,2},
- %% Cover beam_dead:turn_op/1 using swapped operand order.
- {test,is_ne_exact,{f,3},[{integer,1},{x,0}]},
- {test,is_eq_exact,{f,1},[{atom,a},{x,0}]},
- {label,3},
- {move,{atom,ok},{x,0}},
- return]}],
- Exp = [{test,1}],
- Asm = {Mod,Exp,Attr,Fs,3},
- {ok,Mod,Beam} = compile:forms(Asm, [from_asm,binary,report]),
- {module,Mod} = code:load_binary(Mod, Mod, Beam),
- ok = Mod:test(1),
- ok = Mod:test(a),
- {'EXIT',_} = (catch Mod:test(other)),
- true = code:delete(Mod),
- _ = code:purge(Mod),
-
- ok.
-
%% Call this function to turn off constant propagation.
id(I) -> I.
diff --git a/lib/compiler/test/guard_SUITE_data/guard_SUITE_tuple_size.S b/lib/compiler/test/guard_SUITE_data/guard_SUITE_tuple_size.S
deleted file mode 100644
index cffb792920..0000000000
--- a/lib/compiler/test/guard_SUITE_data/guard_SUITE_tuple_size.S
+++ /dev/null
@@ -1,30 +0,0 @@
-{module, guard_SUITE_tuple_size}. %% version = 0
-
-{exports, [{t,1}]}.
-
-{attributes, []}.
-
-{labels, 5}.
-
-
-{function, t, 1, 2}.
- {label,1}.
- {func_info,{atom,guard_SUITE_tuple_size},{atom,t},1}.
- {label,2}.
- {bif,tuple_size,{f,4},[{x,0}],{x,1}}.
- {test,is_eq_exact,{f,4},[{x,1},{integer,4}]}.
- {test,is_tuple,{f,3},[{x,0}]}.
- {test,test_arity,{f,3},[{x,0},4]}.
- {get_tuple_element,{x,0},0,{x,5}}.
- {get_tuple_element,{x,0},1,{x,2}}.
- {get_tuple_element,{x,0},2,{x,3}}.
- {get_tuple_element,{x,0},3,{x,4}}.
- {gc_bif,'+',{f,0},6,[{x,1},{x,2}],{x,0}}.
- {gc_bif,'+',{f,0},6,[{x,0},{x,3}],{x,0}}.
- {gc_bif,'+',{f,0},6,[{x,0},{x,4}],{x,0}}.
- {gc_bif,'+',{f,0},6,[{x,0},{x,5}],{x,0}}.
- return.
- {label,3}.
- {badmatch,{x,0}}.
- {label,4}.
- {jump,{f,1}}.
diff --git a/lib/compiler/test/inline_SUITE.erl b/lib/compiler/test/inline_SUITE.erl
index e03769012b..aff1a56c47 100644
--- a/lib/compiler/test/inline_SUITE.erl
+++ b/lib/compiler/test/inline_SUITE.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 2000-2016. All Rights Reserved.
+%% Copyright Ericsson AB 2000-2018. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
@@ -42,13 +42,9 @@ groups() ->
init_per_suite(Config) ->
test_lib:recompile(?MODULE),
- Pa = "-pa " ++ filename:dirname(code:which(?MODULE)),
- {ok,Node} = start_node(compiler, Pa),
- [{testing_node,Node}|Config].
+ Config.
-end_per_suite(Config) ->
- Node = proplists:get_value(testing_node, Config),
- test_server:stop_node(Node),
+end_per_suite(_Config) ->
ok.
init_per_group(_GroupName, Config) ->
@@ -89,7 +85,6 @@ attribute(Config) when is_list(Config) ->
?comp(maps_inline_test).
try_inline(Mod, Config) ->
- Node = proplists:get_value(testing_node, Config),
Src = filename:join(proplists:get_value(data_dir, Config),
atom_to_list(Mod)),
Out = proplists:get_value(priv_dir,Config),
@@ -100,7 +95,7 @@ try_inline(Mod, Config) ->
bin_opt_info,clint,ssalint]),
ct:timetrap({minutes,10}),
- NormalResult = rpc:call(Node, ?MODULE, load_and_call, [Out,Mod]),
+ NormalResult = load_and_call(Out, Mod),
%% Inlining.
io:format("Compiling with old inliner: ~s\n", [Src]),
@@ -109,7 +104,7 @@ try_inline(Mod, Config) ->
%% Run inlined code.
ct:timetrap({minutes,10}),
- OldInlinedResult = rpc:call(Node, ?MODULE, load_and_call, [Out,Mod]),
+ OldInlinedResult = load_and_call(Out, Mod),
%% Compare results.
compare(NormalResult, OldInlinedResult),
@@ -122,7 +117,7 @@ try_inline(Mod, Config) ->
%% Run inlined code.
ct:timetrap({minutes,10}),
- InlinedResult = rpc:call(Node, ?MODULE, load_and_call, [Out,Mod]),
+ InlinedResult = load_and_call(Out, Mod),
%% Compare results.
compare(NormalResult, InlinedResult),
@@ -131,6 +126,11 @@ try_inline(Mod, Config) ->
%% Delete Beam file.
ok = file:delete(filename:join(Out, atom_to_list(Mod)++code:objfile_extension())),
+ %% Delete loaded module.
+ _ = code:purge(Mod),
+ _ = code:delete(Mod),
+ _ = code:purge(Mod),
+
ok.
compare(Same, Same) -> ok;
@@ -144,12 +144,6 @@ compare([H1|_], [H2|_]) ->
ct:fail(different);
compare([], []) -> ok.
-start_node(Name, Args) ->
- case test_server:start_node(Name, slave, [{args,Args}]) of
- {ok,Node} -> {ok, Node};
- Error -> ct:fail(Error)
- end.
-
load_and_call(Out, Module) ->
io:format("Loading...\n",[]),
code:purge(Module),
@@ -350,10 +344,8 @@ otp_7223_2({a}) ->
1.
coverage(Config) when is_list(Config) ->
- Mod = bsdecode,
+ Mod = attribute,
Src = filename:join(proplists:get_value(data_dir, Config), Mod),
{ok,Mod,_} = compile:file(Src, [binary,report,{inline,0},
clint,ssalint]),
- {ok,Mod,_} = compile:file(Src, [binary,report,{inline,20},
- verbose,clint,ssalint]),
ok.
diff --git a/lib/compiler/test/inline_SUITE_data/barnes2.erl b/lib/compiler/test/inline_SUITE_data/barnes2.erl
index a986331060..49e9bdfb6b 100644
--- a/lib/compiler/test/inline_SUITE_data/barnes2.erl
+++ b/lib/compiler/test/inline_SUITE_data/barnes2.erl
@@ -6,7 +6,7 @@
?MODULE() ->
Stars = create_scenario(1000, 1.0),
R = hd(loop(10,1000.0,Stars,0)),
- Str = lists:flatten(io:lib_format("~s", [R])),
+ Str = lists:flatten(io_lib:format("~p", [R])),
{R,Str =:= {1.00000,-1.92269e+4,-1.92269e+4,2.86459e-2,2.86459e-2}}.
create_scenario(N, M) ->
diff --git a/lib/compiler/test/map_SUITE.erl b/lib/compiler/test/map_SUITE.erl
index 3e0ab78390..440b632381 100644
--- a/lib/compiler/test/map_SUITE.erl
+++ b/lib/compiler/test/map_SUITE.erl
@@ -70,7 +70,10 @@
t_bad_update/1,
%% new in OTP 21
- t_reused_key_variable/1
+ t_reused_key_variable/1,
+
+ %% new in OTP 22
+ t_mixed_clause/1,cover_beam_trim/1
]).
suite() -> [].
@@ -124,7 +127,10 @@ all() ->
t_bad_update,
%% new in OTP 21
- t_reused_key_variable
+ t_reused_key_variable,
+
+ %% new in OTP 22
+ t_mixed_clause,cover_beam_trim
].
groups() -> [].
@@ -1373,22 +1379,22 @@ map_usage(Def, Used) ->
t_guard_sequence(Config) when is_list(Config) ->
- {1, "a"} = map_guard_sequence_1(#{seq=>1,val=>id("a")}),
- {2, "b"} = map_guard_sequence_1(#{seq=>2,val=>id("b")}),
- {3, "c"} = map_guard_sequence_1(#{seq=>3,val=>id("c")}),
- {4, "d"} = map_guard_sequence_1(#{seq=>4,val=>id("d")}),
- {5, "e"} = map_guard_sequence_1(#{seq=>5,val=>id("e")}),
-
- {1,M1} = map_guard_sequence_2(M1 = id(#{a=>3})),
- {2,M2} = map_guard_sequence_2(M2 = id(#{a=>4, b=>4})),
- {3,gg,M3} = map_guard_sequence_2(M3 = id(#{a=>gg, b=>4})),
- {4,sc,sc,M4} = map_guard_sequence_2(M4 = id(#{a=>sc, b=>3, c=>sc2})),
- {5,kk,kk,M5} = map_guard_sequence_2(M5 = id(#{a=>kk, b=>other, c=>sc2})),
-
- %% error case
- {'EXIT',{function_clause,_}} = (catch map_guard_sequence_1(#{seq=>6,val=>id("e")})),
- {'EXIT',{function_clause,_}} = (catch map_guard_sequence_2(#{b=>5})),
- ok.
+ {1, "a"} = map_guard_sequence_1(#{seq=>1,val=>id("a")}),
+ {2, "b"} = map_guard_sequence_1(#{seq=>2,val=>id("b")}),
+ {3, "c"} = map_guard_sequence_1(#{seq=>3,val=>id("c")}),
+ {4, "d"} = map_guard_sequence_1(#{seq=>4,val=>id("d")}),
+ {5, "e"} = map_guard_sequence_1(#{seq=>5,val=>id("e")}),
+
+ {1,M1} = map_guard_sequence_2(M1 = id(#{a=>3})),
+ {2,M2} = map_guard_sequence_2(M2 = id(#{a=>4, b=>4})),
+ {3,gg,M3} = map_guard_sequence_2(M3 = id(#{a=>gg, b=>4})),
+ {4,sc,sc,M4} = map_guard_sequence_2(M4 = id(#{a=>sc, b=>3, c=>sc2})),
+ {5,kk,kk,M5} = map_guard_sequence_2(M5 = id(#{a=>kk, b=>other, c=>sc2})),
+
+ %% error case
+ {'EXIT',{function_clause,_}} = (catch map_guard_sequence_1(#{seq=>6,val=>id("e")})),
+ {'EXIT',{function_clause,_}} = (catch map_guard_sequence_2(#{b=>5})),
+ ok.
t_guard_sequence_large(Config) when is_list(Config) ->
M0 = id(#{ 10=>a0,20=>b0,30=>"c0","40"=>"d0",<<"50">>=>"e0",{["00",03]}=>"10",
@@ -1443,21 +1449,21 @@ t_guard_sequence_large(Config) when is_list(Config) ->
18=>a8,28=>b8,38=>"c8","48"=>"d8",<<"58">>=>"e8",{["08"]}=>"18",
19=>a9,29=>b9,39=>"c9","49"=>"d9",<<"59">>=>"e9",{["09"]}=>"19" } => "large map key 2" }),
- {1, "a"} = map_guard_sequence_1(M0#{seq=>1,val=>id("a")}),
- {2, "b"} = map_guard_sequence_1(M0#{seq=>2,val=>id("b")}),
- {3, "c"} = map_guard_sequence_1(M0#{seq=>3,val=>id("c")}),
- {4, "d"} = map_guard_sequence_1(M0#{seq=>4,val=>id("d")}),
- {5, "e"} = map_guard_sequence_1(M0#{seq=>5,val=>id("e")}),
+ {1, "a"} = map_guard_sequence_1(M0#{seq=>1,val=>id("a")}),
+ {2, "b"} = map_guard_sequence_1(M0#{seq=>2,val=>id("b")}),
+ {3, "c"} = map_guard_sequence_1(M0#{seq=>3,val=>id("c")}),
+ {4, "d"} = map_guard_sequence_1(M0#{seq=>4,val=>id("d")}),
+ {5, "e"} = map_guard_sequence_1(M0#{seq=>5,val=>id("e")}),
- {1,M1} = map_guard_sequence_2(M1 = id(M0#{a=>3})),
- {2,M2} = map_guard_sequence_2(M2 = id(M0#{a=>4, b=>4})),
- {3,gg,M3} = map_guard_sequence_2(M3 = id(M0#{a=>gg, b=>4})),
- {4,sc,sc,M4} = map_guard_sequence_2(M4 = id(M0#{a=>sc, b=>3, c=>sc2})),
- {5,kk,kk,M5} = map_guard_sequence_2(M5 = id(M0#{a=>kk, b=>other, c=>sc2})),
+ {1,M1} = map_guard_sequence_2(M1 = id(M0#{a=>3})),
+ {2,M2} = map_guard_sequence_2(M2 = id(M0#{a=>4, b=>4})),
+ {3,gg,M3} = map_guard_sequence_2(M3 = id(M0#{a=>gg, b=>4})),
+ {4,sc,sc,M4} = map_guard_sequence_2(M4 = id(M0#{a=>sc, b=>3, c=>sc2})),
+ {5,kk,kk,M5} = map_guard_sequence_2(M5 = id(M0#{a=>kk, b=>other, c=>sc2})),
- {'EXIT',{function_clause,_}} = (catch map_guard_sequence_1(M0#{seq=>6,val=>id("e")})),
- {'EXIT',{function_clause,_}} = (catch map_guard_sequence_2(M0#{b=>5})),
- ok.
+ {'EXIT',{function_clause,_}} = (catch map_guard_sequence_1(M0#{seq=>6,val=>id("e")})),
+ {'EXIT',{function_clause,_}} = (catch map_guard_sequence_2(M0#{b=>5})),
+ ok.
map_guard_sequence_1(#{seq:=1=Seq, val:=Val}) -> {Seq,Val};
map_guard_sequence_1(#{seq:=2=Seq, val:=Val}) -> {Seq,Val};
@@ -2079,7 +2085,7 @@ t_register_corruption(Config) when is_list(Config) ->
{3,wanted,<<"value">>} = register_corruption_foo(wanted,M),
ok.
-register_corruption_foo(A,#{a := V1, b := V2}) ->
+register_corruption_foo(_,#{a := V1, b := V2}) ->
register_corruption_dummy_call(1,V1,V2);
register_corruption_foo(A,#{b := V}) ->
register_corruption_dummy_call(2,A,V);
@@ -2161,6 +2167,31 @@ t_reused_key_variable(Config) when is_list(Config) ->
ok
end.
+t_mixed_clause(_Config) ->
+ put(fool_inliner, x),
+ K = get(fool_inliner),
+ {42,100} = case #{K=>42,y=>100} of
+ #{x:=X,y:=Y} ->
+ {X,Y}
+ end,
+ nomatch = case #{K=>42,y=>100} of
+ #{x:=X,y:=0} ->
+ {X,Y};
+ #{} ->
+ nomatch
+ end,
+ ok.
+
+cover_beam_trim(_Config) ->
+ val = do_cover_beam_trim(id, max, max, id, #{id=>val}),
+ ok.
+
+do_cover_beam_trim(Id, OldMax, Max, Id, M) ->
+ OldMax = id(Max),
+ #{Id:=Val} = id(M),
+ Val.
+
+
%% aux
rand_terms(0) -> [];
diff --git a/lib/compiler/test/match_SUITE.erl b/lib/compiler/test/match_SUITE.erl
index e3f842b668..94bfbb0efe 100644
--- a/lib/compiler/test/match_SUITE.erl
+++ b/lib/compiler/test/match_SUITE.erl
@@ -25,7 +25,7 @@
match_in_call/1,untuplify/1,shortcut_boolean/1,letify_guard/1,
selectify/1,deselectify/1,underscore/1,match_map/1,map_vars_used/1,
coverage/1,grab_bag/1,literal_binary/1,
- unary_op/1]).
+ unary_op/1,eq_types/1,match_after_return/1]).
-include_lib("common_test/include/ct.hrl").
@@ -40,7 +40,8 @@ groups() ->
match_in_call,untuplify,
shortcut_boolean,letify_guard,selectify,deselectify,
underscore,match_map,map_vars_used,coverage,
- grab_bag,literal_binary,unary_op]}].
+ grab_bag,literal_binary,unary_op,eq_types,
+ match_after_return]}].
init_per_suite(Config) ->
@@ -254,6 +255,8 @@ non_matching_aliases(_Config) ->
none = mixed_aliases([d]),
none = mixed_aliases({a,42}),
none = mixed_aliases(42),
+ none = mixed_aliases(<<6789:16>>),
+ none = mixed_aliases(#{key=>value}),
{'EXIT',{{badmatch,42},_}} = (catch nomatch_alias(42)),
{'EXIT',{{badmatch,job},_}} = (catch entirely()),
@@ -279,6 +282,16 @@ mixed_aliases(<<X:8>> = x) -> {a,X};
mixed_aliases([b] = <<X:8>>) -> {b,X};
mixed_aliases(<<X:8>> = {a,X}) -> {c,X};
mixed_aliases([X] = <<X:8>>) -> {d,X};
+mixed_aliases(<<X:16>> = X) -> {e,X};
+mixed_aliases(X = <<X:16>>) -> {f,X};
+mixed_aliases(<<X:16,_/binary>> = X) -> {g,X};
+mixed_aliases(X = <<X:16,_/binary>>) -> {h,X};
+mixed_aliases(X = #{key:=X}) -> {i,X};
+mixed_aliases(#{key:=X} = X) -> {j,X};
+mixed_aliases([X] = #{key:=X}) -> {k,X};
+mixed_aliases(#{key:=X} = [X]) -> {l,X};
+mixed_aliases({a,X} = #{key:=X}) -> {m,X};
+mixed_aliases(#{key:=X} = {a,X}) -> {n,X};
mixed_aliases(_) -> none.
nomatch_alias(I) ->
@@ -378,6 +391,13 @@ untuplify(Config) when is_list(Config) ->
%% We do this to cover sys_core_fold:unalias_pat/1.
{1,2,3,4,alias,{[1,2],{3,4},alias}} = untuplify_1([1,2], {3,4}, alias),
error = untuplify_1([1,2], {3,4}, 42),
+
+ %% Test that a previous bug in v3_codegen is gone. (The sinking of
+ %% stack frames into only the case arms that needed them was not always
+ %% safe.)
+ [33, -1, -33, 1] = untuplify_2(32, 65),
+ {33, 1, -33, -1} = untuplify_2(65, 32),
+
ok.
untuplify_1(A, B, C) ->
@@ -390,6 +410,21 @@ untuplify_1(A, B, C) ->
error
end.
+untuplify_2(V1, V2) ->
+ {D1,D2,D3,D4} =
+ if V1 > V2 ->
+ %% The 1 value was overwritten by the value of V2-V1.
+ {V1-V2, 1, V2-V1, -1};
+ true ->
+ {V2-V1, -1, V1-V2, 1}
+ end,
+ if
+ D2 > D4 ->
+ {D1, D2, D3, D4};
+ true ->
+ [D1, D2, D3, D4]
+ end.
+
%% Coverage of beam_dead:shortcut_boolean_label/4.
shortcut_boolean(Config) when is_list(Config) ->
false = shortcut_boolean_1([0]),
@@ -434,6 +469,7 @@ letify_guard(A, B) ->
selectify(Config) when is_list(Config) ->
integer = sel_different_types({r,42}),
atom = sel_different_types({r,forty_two}),
+ float = sel_different_types({r,100.0}),
none = sel_different_types({r,18}),
{'EXIT',_} = (catch sel_different_types([a,b,c])),
@@ -444,12 +480,15 @@ selectify(Config) when is_list(Config) ->
integer42 = sel_same_value2(42),
integer43 = sel_same_value2(43),
error = sel_same_value2(44),
+
ok.
sel_different_types({r,_}=T) when element(2, T) =:= forty_two ->
atom;
sel_different_types({r,_}=T) when element(2, T) =:= 42 ->
integer;
+sel_different_types({r,_}=T) when element(2, T) =:= 100.0 ->
+ float;
sel_different_types({r,_}) ->
none.
@@ -467,9 +506,8 @@ sel_same_value2(V) when V =:= 42; V =:= 43 ->
sel_same_value2(_) ->
error.
-%% Test deconstruction of select_val instructions in beam_peep into
-%% regular tests with just one possible value left. Hitting proper cases
-%% in beam_peep relies on unification of labels by beam_jump.
+%% Test deconstruction of select_val instructions to regular tests
+%% with zero or one values left.
deselectify(Config) when is_list(Config) ->
one_or_other = desel_tuple_arity({1}),
@@ -490,7 +528,31 @@ deselectify(Config) when is_list(Config) ->
one_or_other = dsel_atom_typecheck(one),
two = dsel_atom_typecheck(two),
- one_or_other = dsel_atom_typecheck(three).
+ one_or_other = dsel_atom_typecheck(three),
+
+ %% Cover deconstruction of select_val instructions in
+ %% beam_peep.
+
+ stop = dsel_peek_0(stop),
+ ignore = dsel_peek_0(ignore),
+ Config = dsel_peek_0(Config),
+
+ stop = dsel_peek_1(stop, any),
+ Config = dsel_peek_1(ignore, Config),
+ other = dsel_peek_1(other, ignored),
+
+ 0 = dsel_peek_2(0, any),
+ Config = dsel_peek_2(1, Config),
+ 2 = dsel_peek_2(2, ignored),
+
+ true = dsel_peek_3(true),
+ false = dsel_peek_3(false),
+ {error,Config} = dsel_peek_3(Config),
+
+ ok.
+
+%% The following will be optimized by the sharing optimizations
+%% in beam_ssa_opt.
desel_tuple_arity(Tuple) when is_tuple(Tuple) ->
case Tuple of
@@ -527,6 +589,39 @@ dsel_atom_typecheck(Val) when is_atom(Val) ->
_ -> one_or_other
end.
+%% The following functions are carefully crafted so that the sharing
+%% optimizations in beam_ssa_opt can't be applied. After applying the
+%% beam_jump:eliminate_moves/1 optimization and beam_clean:clean_labels/1
+%% has unified labels, beam_peep is able to optimize these functions.
+
+dsel_peek_0(A0) ->
+ case id(A0) of
+ stop -> stop;
+ ignore -> ignore;
+ A -> A
+ end.
+
+dsel_peek_1(A0, B) ->
+ case id(A0) of
+ stop -> stop;
+ ignore -> B;
+ A -> A
+ end.
+
+dsel_peek_2(A0, B) ->
+ case id(A0) of
+ 0 -> 0;
+ 1 -> B;
+ A -> A
+ end.
+
+dsel_peek_3(A0) ->
+ case id(A0) of
+ true -> true;
+ false -> false;
+ Other -> {error,Other}
+ end.
+
underscore(Config) when is_list(Config) ->
case Config of
[] ->
@@ -569,13 +664,26 @@ do_map_vars_used(X, Y, Map) ->
Val
end.
+-record(coverage_id, {bool=false,id}).
coverage(Config) when is_list(Config) ->
%% Cover beam_dead.
ok = coverage_1(x, a),
ok = coverage_1(x, b),
%% Cover sys_pre_expand.
- ok = coverage_3("abc").
+ ok = coverage_3("abc"),
+
+ %% Cover beam_ssa_dead.
+ {expr,key} = coverage_4([literal,get], [[expr,key]]),
+ {expr,key} = coverage_4([expr,key], []),
+
+ a = coverage_5([8,8,8], #coverage_id{bool=true}),
+ b = coverage_5([], #coverage_id{bool=true}),
+
+ %% Cover beam_ssa_opt.
+ ok = coverage_6(),
+
+ ok.
coverage_1(B, Tag) ->
case Tag of
@@ -588,6 +696,37 @@ coverage_2(2, b, x) -> ok.
coverage_3([$a]++[]++"bc") -> ok.
+%% Cover beam_ssa_dead:eval_type_test_1(is_nonempty_list, Arg).
+coverage_4([literal,get], [Expr]) ->
+ coverage_4(Expr, []);
+coverage_4([Expr,Key], []) ->
+ {Expr,Key}.
+
+%% Cover beam_ssa_dead:eval_type_test_1(is_tagged_tuple, Arg).
+coverage_5(Config, TermId)
+ when TermId =:= #coverage_id{bool=true},
+ Config =:= [8,8,8] ->
+ a;
+coverage_5(_Config, #coverage_id{bool=true}) ->
+ b.
+
+coverage_6() ->
+ X = 17,
+ case
+ case id(1) > 0 of
+ true ->
+ 17;
+ false ->
+ 42
+ end
+ of
+ X ->
+ ok;
+ V ->
+ %% Cover beam_ssa_opt:make_literal/2.
+ error([error,X,V])
+ end.
+
grab_bag(_Config) ->
[_|T] = id([a,b,c]),
[b,c] = id(T),
@@ -732,5 +871,35 @@ unary_op_1(Vop@1) ->
end
end.
+eq_types(_Config) ->
+ Ref = make_ref(),
+ Ref = eq_types(Ref, any),
+ ok.
+
+eq_types(A, B) ->
+ %% {put_tuple2,{y,0},{list,[{x,0},{x,1}]}}.
+ Term0 = {A, B},
+ Term = id(Term0),
+
+ %% {test,is_eq_exact,{f,3},[{y,0},{x,0}]}.
+ %% Here beam_validator must infer that {x,0} has the
+ %% same type as {y,0}.
+ Term = Term0,
+
+ %% {get_tuple_element,{x,0},0,{x,0}}.
+ {Ref22,_} = Term,
+
+ Ref22.
+
+match_after_return(Config) when is_list(Config) ->
+ %% The return type of the following call will never match the 'wont_happen'
+ %% clauses below, and the beam_ssa_type was clever enough to see that but
+ %% didn't remove the blocks, so it crashed when trying to extract A.
+ ok = case mar_test_tuple(erlang:unique_integer()) of
+ {gurka, never_matches, A} -> {wont_happen, A};
+ _ -> ok
+ end.
+
+mar_test_tuple(I) -> {gurka, I}.
id(I) -> I.
diff --git a/lib/compiler/test/misc_SUITE.erl b/lib/compiler/test/misc_SUITE.erl
index beae5eb618..a0b415ceaa 100644
--- a/lib/compiler/test/misc_SUITE.erl
+++ b/lib/compiler/test/misc_SUITE.erl
@@ -161,14 +161,13 @@ md5_1(Beam) ->
%% Cover some code that handles internal errors.
silly_coverage(Config) when is_list(Config) ->
- %% sys_core_fold, sys_core_alias, sys_core_bsm, sys_core_setel, v3_kernel
+ %% sys_core_fold, sys_core_alias, sys_core_bsm, v3_kernel
BadCoreErlang = {c_module,[],
name,[],[],
[{{c_var,[],{foo,2}},seriously_bad_body}]},
expect_error(fun() -> sys_core_fold:module(BadCoreErlang, []) end),
expect_error(fun() -> sys_core_alias:module(BadCoreErlang, []) end),
expect_error(fun() -> sys_core_bsm:module(BadCoreErlang, []) end),
- expect_error(fun() -> sys_core_dsetel:module(BadCoreErlang, []) end),
expect_error(fun() -> v3_kernel:module(BadCoreErlang, []) end),
%% beam_kernel_to_ssa
@@ -183,17 +182,24 @@ silly_coverage(Config) when is_list(Config) ->
%% beam_ssa_lint
%% beam_ssa_recv
+ %% beam_ssa_share
%% beam_ssa_pre_codegen
- %% beam_ssa_opt
%% beam_ssa_codegen
BadSSA = {b_module,#{},a,b,c,
[{b_function,#{func_info=>{mod,foo,0}},args,bad_blocks,0}]},
expect_error(fun() -> beam_ssa_lint:module(BadSSA, []) end),
expect_error(fun() -> beam_ssa_recv:module(BadSSA, []) end),
+ expect_error(fun() -> beam_ssa_share:module(BadSSA, []) end),
expect_error(fun() -> beam_ssa_pre_codegen:module(BadSSA, []) end),
- expect_error(fun() -> beam_ssa_opt:module(BadSSA, []) end),
expect_error(fun() -> beam_ssa_codegen:module(BadSSA, []) end),
+ %% beam_ssa_opt
+ BadSSABlocks = #{0 => {b_blk,#{},[bad_code],{b_ret,#{},arg}}},
+ BadSSAOpt = {b_module,#{},a,[],c,
+ [{b_function,#{func_info=>{mod,foo,0}},[],
+ BadSSABlocks,0}]},
+ expect_error(fun() -> beam_ssa_opt:module(BadSSAOpt, []) end),
+
%% beam_ssa_lint, beam_ssa_pp
{error,[{_,Errors}]} = beam_ssa_lint:module(bad_ssa_lint_input(), []),
_ = [io:put_chars(Mod:format_error(Reason)) ||
@@ -221,10 +227,6 @@ silly_coverage(Config) when is_list(Config) ->
{label,2}|non_proper_list]}],99},
expect_error(fun() -> beam_block:module(BlockInput, []) end),
- %% beam_bs
- BsInput = BlockInput,
- expect_error(fun() -> beam_bs:module(BsInput, []) end),
-
%% beam_except
ExceptInput = {?MODULE,[{foo,0}],[],
[{function,foo,0,2,
@@ -234,15 +236,9 @@ silly_coverage(Config) when is_list(Config) ->
{label,2}|non_proper_list]}],99},
expect_error(fun() -> beam_except:module(ExceptInput, []) end),
- %% beam_dead. This is tricky. Our function must look OK to
- %% beam_utils:clean_labels/1, but must crash beam_dead.
- DeadInput = {?MODULE,[{foo,0}],[],
- [{function,foo,0,2,
- [{label,1},
- {func_info,{atom,?MODULE},{atom,foo},0},
- {label,2},
- {test,is_eq_exact,{f,1},[bad,operands]}]}],99},
- expect_error(fun() -> beam_dead:module(DeadInput, []) end),
+ %% beam_jump
+ JumpInput = BlockInput,
+ expect_error(fun() -> beam_jump:module(JumpInput, []) end),
%% beam_clean
CleanInput = {?MODULE,[{foo,0}],[],
@@ -253,6 +249,10 @@ silly_coverage(Config) when is_list(Config) ->
{jump,{f,42}}]}],99},
expect_error(fun() -> beam_clean:module(CleanInput, []) end),
+ %% beam_jump
+ TrimInput = BlockInput,
+ expect_error(fun() -> beam_trim:module(TrimInput, []) end),
+
%% beam_peep. This is tricky. Use a select instruction with
%% an odd number of elements in the list to crash
%% prune_redundant_values/2 but not beam_clean:clean_labels/1.
@@ -260,21 +260,10 @@ silly_coverage(Config) when is_list(Config) ->
[{function,foo,0,2,
[{label,1},
{func_info,{atom,?MODULE},{atom,foo},0},
- {label,2},{select,op,r,{f,2},[{f,2}]}]}],
+ {label,2},{select,select_val,r,{f,2},[{f,2}]}]}],
2},
expect_error(fun() -> beam_peep:module(PeepInput, []) end),
- %% beam_bsm. This is tricky. Our function must be sane enough to not crash
- %% btb_index/1, but must crash the main optimization pass.
- BsmInput = {?MODULE,[{foo,0}],[],
- [{function,foo,0,2,
- [{label,1},
- {func_info,{atom,?MODULE},{atom,foo},0},
- {label,2},
- {test,bs_get_binary2,{f,99},0,[{x,0},{atom,all},1,[]],{x,0}},
- {block,[a|b]}]}],0},
- expect_error(fun() -> beam_bsm:module(BsmInput, []) end),
-
BeamZInput = {?MODULE,[{foo,0}],[],
[{function,foo,0,2,
[{label,1},
diff --git a/lib/compiler/test/overridden_bif_SUITE.erl b/lib/compiler/test/overridden_bif_SUITE.erl
index a46abe8dcf..6b8a9591c9 100644
--- a/lib/compiler/test/overridden_bif_SUITE.erl
+++ b/lib/compiler/test/overridden_bif_SUITE.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 2001-2016. All Rights Reserved.
+%% Copyright Ericsson AB 2001-2018. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
diff --git a/lib/compiler/test/receive_SUITE.erl b/lib/compiler/test/receive_SUITE.erl
index 4219768d6f..0038eb1a4b 100644
--- a/lib/compiler/test/receive_SUITE.erl
+++ b/lib/compiler/test/receive_SUITE.erl
@@ -25,7 +25,8 @@
init_per_group/2,end_per_group/2,
init_per_testcase/2,end_per_testcase/2,
export/1,recv/1,coverage/1,otp_7980/1,ref_opt/1,
- wait/1,recv_in_try/1,double_recv/1]).
+ wait/1,recv_in_try/1,double_recv/1,receive_var_zero/1,
+ match_built_terms/1]).
-include_lib("common_test/include/ct.hrl").
@@ -45,7 +46,8 @@ all() ->
groups() ->
[{p,test_lib:parallel(),
[recv,coverage,otp_7980,ref_opt,export,wait,
- recv_in_try,double_recv]}].
+ recv_in_try,double_recv,receive_var_zero,
+ match_built_terms]}].
init_per_suite(Config) ->
@@ -378,4 +380,51 @@ do_double_recv(_, Msg) ->
error
end.
+%% Test 'after Z', when Z =:= 0 been propagated as an immediate by the type
+%% optimization pass.
+receive_var_zero(Config) when is_list(Config) ->
+ self() ! x,
+ self() ! y,
+ Z = zero(),
+ timeout = receive
+ z -> ok
+ after Z -> timeout
+ end,
+ timeout = receive
+ after Z -> timeout
+ end,
+ self() ! w,
+ receive
+ x -> ok;
+ Other ->
+ ct:fail({bad_message,Other})
+ end.
+
+zero() -> 0.
+
+%% ERL-862; the validator would explode when a term was constructed in a
+%% receive guard.
+
+-define(MATCH_BUILT_TERM(Ref, Expr),
+ (fun() ->
+ Ref = make_ref(),
+ A = id($a),
+ B = id($b),
+ Built = id(Expr),
+ self() ! {Ref, A, B},
+ receive
+ {Ref, A, B} when Expr =:= Built ->
+ ok
+ after 5000 ->
+ ct:fail("Failed to match message with term built in "
+ "receive guard.")
+ end
+ end)()).
+
+match_built_terms(Config) when is_list(Config) ->
+ ?MATCH_BUILT_TERM(Ref, [A, B]),
+ ?MATCH_BUILT_TERM(Ref, {A, B}),
+ ?MATCH_BUILT_TERM(Ref, <<A, B>>),
+ ?MATCH_BUILT_TERM(Ref, #{ 1 => A, 2 => B}).
+
id(I) -> I.
diff --git a/lib/compiler/test/record_SUITE.erl b/lib/compiler/test/record_SUITE.erl
index 118e0a241c..4ed7f39780 100644
--- a/lib/compiler/test/record_SUITE.erl
+++ b/lib/compiler/test/record_SUITE.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 2003-2016. All Rights Reserved.
+%% Copyright Ericsson AB 2003-2018. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
diff --git a/lib/compiler/test/regressions_SUITE.erl b/lib/compiler/test/regressions_SUITE.erl
index f448d54933..39febf060f 100644
--- a/lib/compiler/test/regressions_SUITE.erl
+++ b/lib/compiler/test/regressions_SUITE.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 2015-2016. All Rights Reserved.
+%% Copyright Ericsson AB 2015-2018. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
@@ -23,7 +23,7 @@
-export([all/0,groups/0,init_per_testcase/2,end_per_testcase/2,
init_per_group/2,end_per_group/2,
- init_per_testcase/2,end_per_testcase/2,
+ init_per_suite/1,end_per_suite/1,
suite/0]).
-export([maps/1]).
diff --git a/lib/compiler/test/test_lib.erl b/lib/compiler/test/test_lib.erl
index c6baa611ec..39c26c6142 100644
--- a/lib/compiler/test/test_lib.erl
+++ b/lib/compiler/test/test_lib.erl
@@ -50,12 +50,8 @@ smoke_disasm(File) when is_list(File) ->
Res = beam_disasm:file(File),
{beam_file,_Mod} = {element(1, Res),element(2, Res)}.
-%% If we are running cover, we don't want to run test cases that
-%% invokes the compiler in parallel, as doing so would probably
-%% be slower than running them sequentially.
-
parallel() ->
- case test_server:is_cover() orelse erlang:system_info(schedulers) =:= 1 of
+ case erlang:system_info(schedulers) =:= 1 of
true -> [];
false -> [parallel]
end.
@@ -70,16 +66,24 @@ uniq() ->
opt_opts(Mod) ->
Comp = Mod:module_info(compile),
{options,Opts} = lists:keyfind(options, 1, Comp),
- lists:filter(fun(no_copt) -> true;
- (no_postopt) -> true;
- (no_ssa_opt) -> true;
- (no_recv_opt) -> true;
- (no_ssa_float) -> true;
- (no_stack_trimming) -> true;
- (debug_info) -> true;
- (inline) -> true;
- (_) -> false
- end, Opts).
+ lists:filter(fun
+ (debug_info) -> true;
+ (inline) -> true;
+ (no_bsm3) -> true;
+ (no_bsm_opt) -> true;
+ (no_copt) -> true;
+ (no_fun_opt) -> true;
+ (no_module_opt) -> true;
+ (no_postopt) -> true;
+ (no_put_tuple2) -> true;
+ (no_recv_opt) -> true;
+ (no_share_opt) -> true;
+ (no_ssa_float) -> true;
+ (no_ssa_opt) -> true;
+ (no_stack_trimming) -> true;
+ (no_type_opt) -> true;
+ (_) -> false
+ end, Opts).
%% Some test suites gets cloned (e.g. to "record_SUITE" to
%% "record_no_opt_SUITE"), but the data directory is not cloned.
@@ -89,17 +93,23 @@ get_data_dir(Config) ->
Data0 = proplists:get_value(data_dir, Config),
Opts = [{return,list}],
Data1 = re:replace(Data0, "_no_opt_SUITE", "_SUITE", Opts),
- Data = re:replace(Data1, "_post_opt_SUITE", "_SUITE", Opts),
- re:replace(Data, "_inline_SUITE", "_SUITE", Opts).
+ Data2 = re:replace(Data1, "_post_opt_SUITE", "_SUITE", Opts),
+ Data3 = re:replace(Data2, "_inline_SUITE", "_SUITE", Opts),
+ Data4 = re:replace(Data3, "_r21_SUITE", "_SUITE", Opts),
+ Data = re:replace(Data4, "_no_module_opt_SUITE", "_SUITE", Opts),
+ re:replace(Data, "_no_ssa_opt_SUITE", "_SUITE", Opts).
is_cloned_mod(Mod) ->
is_cloned_mod_1(atom_to_list(Mod)).
%% Test whether Mod is a cloned module.
-is_cloned_mod_1("no_opt_SUITE") -> true;
-is_cloned_mod_1("post_opt_SUITE") -> true;
-is_cloned_mod_1("inline_SUITE") -> true;
+is_cloned_mod_1("_no_opt_SUITE") -> true;
+is_cloned_mod_1("_no_ssa_opt_SUITE") -> true;
+is_cloned_mod_1("_post_opt_SUITE") -> true;
+is_cloned_mod_1("_inline_SUITE") -> true;
+is_cloned_mod_1("_21_SUITE") -> true;
+is_cloned_mod_1("_no_module_opt_SUITE") -> true;
is_cloned_mod_1([_|T]) -> is_cloned_mod_1(T);
is_cloned_mod_1([]) -> false.
@@ -108,18 +118,7 @@ is_cloned_mod_1([]) -> false.
p_run(Test, List) ->
S = erlang:system_info(schedulers),
- N = case test_server:is_cover() of
- false ->
- S + 1;
- true ->
- %% Cover is running. Using too many processes
- %% could slow us down. Measurements on my computer
- %% showed that using 4 parallel processes was
- %% slightly faster than using 3. Using more than
- %% 4 would not buy us much and could actually be
- %% slower.
- min(S, 4)
- end,
+ N = S + 1,
io:format("p_run: ~p parallel processes\n", [N]),
p_run_loop(Test, List, N, [], 0, 0).
diff --git a/lib/compiler/test/trycatch_SUITE.erl b/lib/compiler/test/trycatch_SUITE.erl
index 1b7ef4ddb0..8f9cd9ab1e 100644
--- a/lib/compiler/test/trycatch_SUITE.erl
+++ b/lib/compiler/test/trycatch_SUITE.erl
@@ -1189,7 +1189,8 @@ bad_raise(Expr) ->
test_raise(Expr) ->
test_raise_1(Expr),
test_raise_2(Expr),
- test_raise_3(Expr).
+ test_raise_3(Expr),
+ test_raise_4(Expr).
test_raise_1(Expr) ->
erase(exception),
@@ -1263,5 +1264,28 @@ do_test_raise_3(Expr) ->
erlang:raise(exit, {exception,C,E}, Stk)
end.
+test_raise_4(Expr) ->
+ try
+ do_test_raise_4(Expr)
+ catch
+ exit:{exception,C,E,Stk}:Stk ->
+ try
+ Expr()
+ catch
+ C:E:S ->
+ [StkTop|_] = S,
+ [StkTop|_] = Stk
+ end
+ end.
+
+do_test_raise_4(Expr) ->
+ try
+ Expr()
+ catch
+ C:E:Stk ->
+ %% Here the stacktrace must be built.
+ erlang:raise(exit, {exception,C,E,Stk}, Stk)
+ end.
+
id(I) -> I.
diff --git a/lib/compiler/test/warnings_SUITE.erl b/lib/compiler/test/warnings_SUITE.erl
index 42ff4f6133..70b7100451 100644
--- a/lib/compiler/test/warnings_SUITE.erl
+++ b/lib/compiler/test/warnings_SUITE.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 2003-2017. All Rights Reserved.
+%% Copyright Ericsson AB 2003-2018. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
@@ -42,7 +42,7 @@
comprehensions/1,maps/1,maps_bin_opt_info/1,
redundant_boolean_clauses/1,
latin1_fallback/1,underscore/1,no_warnings/1,
- bit_syntax/1,inlining/1]).
+ bit_syntax/1,inlining/1,tuple_calls/1]).
init_per_testcase(_Case, Config) ->
Config.
@@ -64,7 +64,8 @@ groups() ->
bin_opt_info,bin_construction,comprehensions,maps,
maps_bin_opt_info,
redundant_boolean_clauses,latin1_fallback,
- underscore,no_warnings,bit_syntax,inlining]}].
+ underscore,no_warnings,bit_syntax,inlining,
+ tuple_calls]}].
init_per_suite(Config) ->
test_lib:recompile(?MODULE),
@@ -239,19 +240,7 @@ guard(Config) when is_list(Config) ->
{4,sys_core_fold,nomatch_guard},
{6,sys_core_fold,no_clause_match},
{6,sys_core_fold,nomatch_guard},
- {6,sys_core_fold,{eval_failure,badarg}},
- {8,sys_core_fold,no_clause_match},
- {8,sys_core_fold,nomatch_guard},
- {8,sys_core_fold,{eval_failure,badarg}},
- {9,sys_core_fold,no_clause_match},
- {9,sys_core_fold,nomatch_guard},
- {9,sys_core_fold,{eval_failure,badarg}},
- {10,sys_core_fold,no_clause_match},
- {10,sys_core_fold,nomatch_guard},
- {10,sys_core_fold,{eval_failure,badarg}},
- {11,sys_core_fold,no_clause_match},
- {11,sys_core_fold,nomatch_guard},
- {11,sys_core_fold,{eval_failure,badarg}}
+ {6,sys_core_fold,{eval_failure,badarg}}
]}}],
[] = run(Config, Ts),
@@ -522,25 +511,43 @@ bin_opt_info(Config) when is_list(Config) ->
<<>> -> ok
end.
+ %% We use a tail in a BIF instruction, remote call, function
+ %% return, and an optimizable tail call for better coverage.
+ t2(<<A,B,T/bytes>>) ->
+ if
+ A > B -> t2(T);
+ A =< B -> T
+ end;
+ t2(<<_,T/bytes>>) when byte_size(T) < 4 ->
+ foo;
t2(<<_,T/bytes>>) ->
- split_binary(T, 4).
+ split_binary(T, 4).
">>,
- Ts1 = [{bsm1,
- Code,
- [bin_opt_info],
- {warnings,
- [{4,sys_core_bsm,orig_bin_var_used_in_guard},
- {5,beam_bsm,{no_bin_opt,{{t1,1},no_suitable_bs_start_match}}},
- {9,beam_bsm,{no_bin_opt,
- {binary_used_in,{extfunc,erlang,split_binary,2}}}} ]}}],
- [] = run(Config, Ts1),
+
+ Ws = (catch run_test(Config, Code, [bin_opt_info])),
+
+ %% This is an inexact match since the pass reports exact instructions as
+ %% part of the warnings, which may include annotations that vary from run
+ %% to run.
+ {warnings,
+ [{5,beam_ssa_bsm,{unsuitable_call,
+ {{b_local,{b_literal,t1},1},
+ {used_before_match,
+ {b_set,_,_,{bif,byte_size},[_]}}}}},
+ {5,beam_ssa_bsm,{binary_created,_,_}},
+ {11,beam_ssa_bsm,{binary_created,_,_}}, %% A =< B -> T
+ {13,beam_ssa_bsm,context_reused}, %% A > B -> t2(T);
+ {16,beam_ssa_bsm,{binary_created,_,_}}, %% when byte_size(T) < 4 ->
+ {19,beam_ssa_bsm,{remote_call,
+ {b_remote,
+ {b_literal,erlang},
+ {b_literal,split_binary},2}}},
+ {19,beam_ssa_bsm,{binary_created,_,_}} %% split_binary(T, 4)
+ ]} = Ws,
%% For coverage: don't give the bin_opt_info option.
- Ts2 = [{bsm2,
- Code,
- [],
- []}],
- [] = run(Config, Ts2),
+ [] = (catch run_test(Config, Code, [])),
+
ok.
bin_construction(Config) when is_list(Config) ->
@@ -746,7 +753,7 @@ maps_bin_opt_info(Config) when is_list(Config) ->
M.
">>,
[bin_opt_info],
- {warnings,[{2,beam_bsm,bin_opt}]}}],
+ {warnings,[{3,beam_ssa_bsm,context_reused}]}}],
[] = run(Config, Ts),
ok.
@@ -952,6 +959,20 @@ inlining(Config) ->
run(Config, Ts),
ok.
+tuple_calls(Config) ->
+ %% Make sure that no spurious warnings are generated.
+ Ts = [{inlining_1,
+ <<"-compile(tuple_calls).
+ dispatch(X) ->
+ (list_to_atom(\"prefix_\" ++
+ atom_to_list(suffix))):doit(X).
+ ">>,
+ [],
+ []}
+ ],
+ run(Config, Ts),
+ ok.
+
%%%
%%% End of test cases.
%%%
@@ -969,7 +990,6 @@ run(Config, Tests) ->
end,
lists:foldl(F, [], Tests).
-
%% Compiles a test module and returns the list of errors and warnings.
run_test(Conf, Test0, Warnings) ->
diff --git a/lib/compiler/vsn.mk b/lib/compiler/vsn.mk
index 355113a94d..efedb414ad 100644
--- a/lib/compiler/vsn.mk
+++ b/lib/compiler/vsn.mk
@@ -1 +1 @@
-COMPILER_VSN = 7.2.3
+COMPILER_VSN = 7.3.1