aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--INSTALL.md31
-rw-r--r--bootstrap/bin/start.bootbin5306 -> 5248 bytes
-rw-r--r--bootstrap/bin/start_clean.bootbin5306 -> 5248 bytes
-rw-r--r--bootstrap/lib/compiler/ebin/beam_asm.beambin11572 -> 11576 bytes
-rw-r--r--bootstrap/lib/compiler/ebin/compiler.app69
-rw-r--r--bootstrap/lib/compiler/ebin/compiler.appup1
-rw-r--r--bootstrap/lib/compiler/ebin/sys_pre_expand.beambin16188 -> 16292 bytes
-rw-r--r--bootstrap/lib/kernel/ebin/file.beambin14424 -> 14416 bytes
-rw-r--r--bootstrap/lib/kernel/ebin/hipe_unified_loader.beambin12732 -> 12732 bytes
-rw-r--r--bootstrap/lib/kernel/ebin/inet.beambin20148 -> 20160 bytes
-rw-r--r--bootstrap/lib/kernel/ebin/kernel.app120
-rw-r--r--bootstrap/lib/kernel/ebin/kernel.appup27
-rw-r--r--bootstrap/lib/stdlib/ebin/filelib.beambin7280 -> 7308 bytes
-rw-r--r--bootstrap/lib/stdlib/ebin/lib.beambin9060 -> 9068 bytes
-rw-r--r--bootstrap/lib/stdlib/ebin/otp_internal.beambin7160 -> 7156 bytes
-rw-r--r--bootstrap/lib/stdlib/ebin/stdlib.app105
-rw-r--r--bootstrap/lib/stdlib/ebin/stdlib.appup27
-rw-r--r--erts/configure.in2
-rw-r--r--erts/emulator/Makefile.in5
-rw-r--r--erts/emulator/beam/beam_bif_load.c14
-rw-r--r--erts/emulator/beam/beam_bp.c1997
-rw-r--r--erts/emulator/beam/beam_bp.h233
-rw-r--r--erts/emulator/beam/beam_debug.c13
-rw-r--r--erts/emulator/beam/beam_emu.c242
-rw-r--r--erts/emulator/beam/beam_load.h1
-rw-r--r--erts/emulator/beam/code_ix.c16
-rw-r--r--erts/emulator/beam/erl_alloc_util.c2
-rw-r--r--erts/emulator/beam/erl_bif_trace.c559
-rw-r--r--erts/emulator/beam/erl_nif.c7
-rw-r--r--erts/emulator/beam/erl_process.c109
-rw-r--r--erts/emulator/beam/erl_process.h8
-rw-r--r--erts/emulator/beam/erl_sched_spec_pre_alloc.c2
-rw-r--r--erts/emulator/beam/erl_thr_progress.c8
-rw-r--r--erts/emulator/beam/erl_thr_progress.h36
-rw-r--r--erts/emulator/beam/erl_thr_queue.c2
-rw-r--r--erts/emulator/beam/erl_trace.c181
-rw-r--r--erts/emulator/beam/export.c6
-rw-r--r--erts/emulator/beam/export.h11
-rwxr-xr-xerts/emulator/beam/global.h13
-rw-r--r--erts/emulator/beam/ops.tab6
-rw-r--r--erts/emulator/hipe/hipe_arm.c89
-rw-r--r--erts/emulator/hipe/hipe_arm_bifs.m418
-rw-r--r--erts/emulator/sys/common/erl_check_io.c5
-rw-r--r--erts/emulator/sys/common/erl_poll.c32
-rw-r--r--erts/emulator/sys/common/erl_poll.h2
-rw-r--r--erts/emulator/test/call_trace_SUITE.erl13
-rw-r--r--erts/emulator/test/trace_local_SUITE.erl53
-rwxr-xr-xerts/emulator/utils/make_tables2
-rw-r--r--lib/compiler/src/sys_pre_expand.erl20
-rw-r--r--lib/et/doc/src/et_intro.xml4
-rw-r--r--lib/kernel/src/disk_log.erl7
-rw-r--r--lib/kernel/src/disk_log.hrl4
-rw-r--r--lib/kernel/test/disk_log_SUITE.erl24
-rw-r--r--lib/ssl/src/ssl_connection.erl53
-rw-r--r--lib/ssl/src/ssl_manager.erl4
-rw-r--r--lib/ssl/test/ssl_basic_SUITE.erl57
-rw-r--r--lib/tools/doc/src/eprof.xml10
-rw-r--r--lib/tools/emacs/erlang.el58
-rw-r--r--lib/tools/emacs/test.erl.indented38
-rw-r--r--lib/tools/emacs/test.erl.orig38
60 files changed, 2488 insertions, 1896 deletions
diff --git a/INSTALL.md b/INSTALL.md
index a7bc0a53e4..70d465831d 100644
--- a/INSTALL.md
+++ b/INSTALL.md
@@ -693,20 +693,27 @@ Universal binaries and 64bit binaries are mutually exclusive options.
Building a fast Erlang VM on Mac OS Lion
----------------------------------------
-Starting with XCode 4.2, Apple no longer includes a "real" `gcc`
+Starting with Xcode 4.2, Apple no longer includes a "real" `gcc`
compiler (not based on the LLVM). Building with `llvm-gcc` or `clang`
will work, but the performance of the Erlang run-time system will not
be the best possible.
Note that if you have `gcc-4.2` installed and included in `PATH`
-(from a previous version of XCode), `configure` will automatically
+(from a previous version of Xcode), `configure` will automatically
make sure that `gcc-4.2` will be used to compile `beam_emu.c`
(the source file most in need of `gcc`).
If you don't have `gcc-4.2.` and want to build a run-time system with
the best possible performance, do like this:
-Install XCode from the AppStore if it is not already installed.
+Install Xcode from the AppStore if it is not already installed.
+
+For Xcode 4.3 you will also need to download "Command Line Tools"
+via the Downloads preference pane i Xcode.
+
+Some tools may still be lacking or out-of-date, we recommend using
+[Homebrew](https://github.com/mxcl/homebrew/wiki/installation) or
+Macports update those tools.
Install MacPorts (<http://www.macports.org/>). Then:
@@ -717,7 +724,23 @@ If you want to build the `wx` application, get wxMac-2.8.12
(`wxMac-2.8.12.tar.gz` from
<http://sourceforge.net/projects/wxwindows/files/2.8.12/>) and build:
- $ arch_flags="-arch i386" ./configure CFLAGS="$arch_flags" CXXFLAGS="$arch_flags" CPPFLAGS="$arch_flags" LDFLAGS="$arch_flags" OBJCFLAGS="$arch_flags" OBJCXXFLAGS="$arch_flags" --prefix=/usr/local --with-macosx-sdk=/Developer/SDKs/MacOSX10.6.sdk --with-macosx-version-min=10.6 --enable-unicode --with-opengl --disable-shared
+Export the path for MacOSX10.6.sdk,
+
+ $ export SDK=/Developer/SDKs/MacOSX10.6.sdk
+
+In Xcode 4.3 the path has changed so use the following instead,
+
+ $ export SDK=/Applications/Xcode.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX10.6.sdk
+
+Then configure and build wx,
+
+ $ arch_flags="-arch i386" ./configure CFLAGS="$arch_flags" CXXFLAGS="$arch_flags" CPPFLAGS="$arch_flags" LDFLAGS="$arch_flags" OBJCFLAGS="$arch_flags" OBJCXXFLAGS="$arch_flags" --prefix=/usr/local --with-macosx-sdk="$SDK" --with-macosx-version-min=10.6 --enable-unicode --with-opengl --disable-shared
+ $ make
+ $ sudo make install
+
+To link wx properly we will also need to build and install `wxStyledTextCtrl`
+
+ $ cd contrib/src/stc
$ make
$ sudo make install
diff --git a/bootstrap/bin/start.boot b/bootstrap/bin/start.boot
index 6d62af557e..c6ebc852a2 100644
--- a/bootstrap/bin/start.boot
+++ b/bootstrap/bin/start.boot
Binary files differ
diff --git a/bootstrap/bin/start_clean.boot b/bootstrap/bin/start_clean.boot
index 6d62af557e..c6ebc852a2 100644
--- a/bootstrap/bin/start_clean.boot
+++ b/bootstrap/bin/start_clean.boot
Binary files differ
diff --git a/bootstrap/lib/compiler/ebin/beam_asm.beam b/bootstrap/lib/compiler/ebin/beam_asm.beam
index 4da4c26d92..7b1ee38ef8 100644
--- a/bootstrap/lib/compiler/ebin/beam_asm.beam
+++ b/bootstrap/lib/compiler/ebin/beam_asm.beam
Binary files differ
diff --git a/bootstrap/lib/compiler/ebin/compiler.app b/bootstrap/lib/compiler/ebin/compiler.app
new file mode 100644
index 0000000000..5170231c59
--- /dev/null
+++ b/bootstrap/lib/compiler/ebin/compiler.app
@@ -0,0 +1,69 @@
+% This is an -*- erlang -*- file.
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 1997-2011. All Rights Reserved.
+%%
+%% The contents of this file are subject to the Erlang Public License,
+%% Version 1.1, (the "License"); you may not use this file except in
+%% compliance with the License. You should have received a copy of the
+%% Erlang Public License along with this software. If not, it can be
+%% retrieved online at http://www.erlang.org/.
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% %CopyrightEnd%
+
+{application, compiler,
+ [{description, "ERTS CXC 138 10"},
+ {vsn, "4.8.1"},
+ {modules, [
+ beam_asm,
+ beam_block,
+ beam_bool,
+ beam_bsm,
+ beam_clean,
+ beam_dead,
+ beam_dict,
+ beam_disasm,
+ beam_except,
+ beam_flatten,
+ beam_jump,
+ beam_listing,
+ beam_opcodes,
+ beam_peep,
+ beam_receive,
+ beam_split,
+ beam_trim,
+ beam_type,
+ beam_utils,
+ beam_validator,
+ cerl,
+ cerl_clauses,
+ cerl_inline,
+ cerl_trees,
+ compile,
+ core_scan,
+ core_lint,
+ core_parse,
+ core_pp,
+ core_lib,
+ erl_bifs,
+ rec_env,
+ sys_core_dsetel,
+ sys_core_fold,
+ sys_core_inline,
+ sys_expand_pmod,
+ sys_pre_attributes,
+ sys_pre_expand,
+ v3_codegen,
+ v3_core,
+ v3_kernel,
+ v3_kernel_pp,
+ v3_life
+ ]},
+ {registered, []},
+ {applications, [kernel, stdlib]},
+ {env, []}]}.
diff --git a/bootstrap/lib/compiler/ebin/compiler.appup b/bootstrap/lib/compiler/ebin/compiler.appup
new file mode 100644
index 0000000000..887d074b16
--- /dev/null
+++ b/bootstrap/lib/compiler/ebin/compiler.appup
@@ -0,0 +1 @@
+{"4.8.1",[],[]}.
diff --git a/bootstrap/lib/compiler/ebin/sys_pre_expand.beam b/bootstrap/lib/compiler/ebin/sys_pre_expand.beam
index d716583b97..f7e9541e4c 100644
--- a/bootstrap/lib/compiler/ebin/sys_pre_expand.beam
+++ b/bootstrap/lib/compiler/ebin/sys_pre_expand.beam
Binary files differ
diff --git a/bootstrap/lib/kernel/ebin/file.beam b/bootstrap/lib/kernel/ebin/file.beam
index 00725935d8..243be75c43 100644
--- a/bootstrap/lib/kernel/ebin/file.beam
+++ b/bootstrap/lib/kernel/ebin/file.beam
Binary files differ
diff --git a/bootstrap/lib/kernel/ebin/hipe_unified_loader.beam b/bootstrap/lib/kernel/ebin/hipe_unified_loader.beam
index fb71997f32..1574686ba6 100644
--- a/bootstrap/lib/kernel/ebin/hipe_unified_loader.beam
+++ b/bootstrap/lib/kernel/ebin/hipe_unified_loader.beam
Binary files differ
diff --git a/bootstrap/lib/kernel/ebin/inet.beam b/bootstrap/lib/kernel/ebin/inet.beam
index e47766329b..ddb1f2a24b 100644
--- a/bootstrap/lib/kernel/ebin/inet.beam
+++ b/bootstrap/lib/kernel/ebin/inet.beam
Binary files differ
diff --git a/bootstrap/lib/kernel/ebin/kernel.app b/bootstrap/lib/kernel/ebin/kernel.app
new file mode 100644
index 0000000000..48f6250828
--- /dev/null
+++ b/bootstrap/lib/kernel/ebin/kernel.app
@@ -0,0 +1,120 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 1996-2009. All Rights Reserved.
+%%
+%% The contents of this file are subject to the Erlang Public License,
+%% Version 1.1, (the "License"); you may not use this file except in
+%% compliance with the License. You should have received a copy of the
+%% Erlang Public License along with this software. If not, it can be
+%% retrieved online at http://www.erlang.org/.
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% %CopyrightEnd%
+%%
+%% This is an -*- erlang -*- file.
+%%
+{application, kernel,
+ [
+ {description, "ERTS CXC 138 10"},
+ {vsn, "2.16"},
+ {modules, [application,
+ application_controller,
+ application_master,
+ application_starter,
+ auth,
+ code,
+ packages,
+ code_server,
+ dist_util,
+ erl_boot_server,
+ erl_distribution,
+ erl_reply,
+ error_handler,
+ error_logger,
+ file,
+ file_server,
+ file_io_server,
+ global,
+ global_group,
+ global_search,
+ group,
+ heart,
+ hipe_unified_loader,
+ inet6_tcp,
+ inet6_tcp_dist,
+ inet6_udp,
+ inet6_sctp,
+ inet_config,
+ inet_hosts,
+ inet_gethost_native,
+ inet_tcp_dist,
+ kernel,
+ kernel_config,
+ net,
+ net_adm,
+ net_kernel,
+ os,
+ ram_file,
+ rpc,
+ user,
+ user_drv,
+ user_sup,
+ disk_log,
+ disk_log_1,
+ disk_log_server,
+ disk_log_sup,
+ dist_ac,
+ erl_ddll,
+ erl_epmd,
+ erts_debug,
+ gen_tcp,
+ gen_udp,
+ gen_sctp,
+ inet,
+ inet_db,
+ inet_dns,
+ inet_parse,
+ inet_res,
+ inet_tcp,
+ inet_udp,
+ inet_sctp,
+ pg2,
+ seq_trace,
+ standard_error,
+ wrap_log_reader]},
+ {registered, [application_controller,
+ erl_reply,
+ auth,
+ boot_server,
+ code_server,
+ disk_log_server,
+ disk_log_sup,
+ erl_prim_loader,
+ error_logger,
+ file_server_2,
+ fixtable_server,
+ global_group,
+ global_name_server,
+ heart,
+ init,
+ kernel_config,
+ kernel_sup,
+ net_kernel,
+ net_sup,
+ rex,
+ user,
+ os_server,
+ ddll_server,
+ erl_epmd,
+ inet_db,
+ pg2]},
+ {applications, []},
+ {env, [{error_logger, tty}]},
+ {mod, {kernel, []}}
+ ]
+}.
diff --git a/bootstrap/lib/kernel/ebin/kernel.appup b/bootstrap/lib/kernel/ebin/kernel.appup
new file mode 100644
index 0000000000..b534b736be
--- /dev/null
+++ b/bootstrap/lib/kernel/ebin/kernel.appup
@@ -0,0 +1,27 @@
+%% -*- erlang -*-
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 1999-2011. All Rights Reserved.
+%%
+%% The contents of this file are subject to the Erlang Public License,
+%% Version 1.1, (the "License"); you may not use this file except in
+%% compliance with the License. You should have received a copy of the
+%% Erlang Public License along with this software. If not, it can be
+%% retrieved online at http://www.erlang.org/.
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% %CopyrightEnd%
+{"2.16",
+ %% Up from - max two major revisions back
+ [{<<"2\\.16(\\.[0-9]+)*">>,[restart_new_emulator]}, %% R16
+ {<<"2\\.15(\\.[0-9]+)*">>,[restart_new_emulator]}, %% R15
+ {<<"2\\.14(\\.[0-9]+)*">>,[restart_new_emulator]}],%% R14
+ %% Down to - max two major revisions back
+ [{<<"2\\.16(\\.[0-9]+)*">>,[restart_new_emulator]}, %% R16
+ {<<"2\\.15(\\.[0-9]+)*">>,[restart_new_emulator]}, %% R15
+ {<<"2\\.14(\\.[0-9]+)*">>,[restart_new_emulator]}] %% R14
+}.
diff --git a/bootstrap/lib/stdlib/ebin/filelib.beam b/bootstrap/lib/stdlib/ebin/filelib.beam
index 5d8a6f7b07..20f48fe18e 100644
--- a/bootstrap/lib/stdlib/ebin/filelib.beam
+++ b/bootstrap/lib/stdlib/ebin/filelib.beam
Binary files differ
diff --git a/bootstrap/lib/stdlib/ebin/lib.beam b/bootstrap/lib/stdlib/ebin/lib.beam
index 4b67bbb774..3865cfb227 100644
--- a/bootstrap/lib/stdlib/ebin/lib.beam
+++ b/bootstrap/lib/stdlib/ebin/lib.beam
Binary files differ
diff --git a/bootstrap/lib/stdlib/ebin/otp_internal.beam b/bootstrap/lib/stdlib/ebin/otp_internal.beam
index 1a9a55dc24..270d35ef33 100644
--- a/bootstrap/lib/stdlib/ebin/otp_internal.beam
+++ b/bootstrap/lib/stdlib/ebin/otp_internal.beam
Binary files differ
diff --git a/bootstrap/lib/stdlib/ebin/stdlib.app b/bootstrap/lib/stdlib/ebin/stdlib.app
new file mode 100644
index 0000000000..461531e1df
--- /dev/null
+++ b/bootstrap/lib/stdlib/ebin/stdlib.app
@@ -0,0 +1,105 @@
+%% This is an -*- erlang -*- file.
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 1996-2011. All Rights Reserved.
+%%
+%% The contents of this file are subject to the Erlang Public License,
+%% Version 1.1, (the "License"); you may not use this file except in
+%% compliance with the License. You should have received a copy of the
+%% Erlang Public License along with this software. If not, it can be
+%% retrieved online at http://www.erlang.org/.
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% %CopyrightEnd%
+%%
+{application, stdlib,
+ [{description, "ERTS CXC 138 10"},
+ {vsn, "1.19"},
+ {modules, [array,
+ base64,
+ beam_lib,
+ binary,
+ c,
+ calendar,
+ dets,
+ dets_server,
+ dets_sup,
+ dets_utils,
+ dets_v8,
+ dets_v9,
+ dict,
+ digraph,
+ digraph_utils,
+ edlin,
+ edlin_expand,
+ epp,
+ eval_bits,
+ erl_bits,
+ erl_compile,
+ erl_eval,
+ erl_expand_records,
+ erl_internal,
+ erl_lint,
+ erl_parse,
+ erl_posix_msg,
+ erl_pp,
+ erl_scan,
+ erl_tar,
+ error_logger_file_h,
+ error_logger_tty_h,
+ escript,
+ ets,
+ file_sorter,
+ filelib,
+ filename,
+ gb_trees,
+ gb_sets,
+ gen,
+ gen_event,
+ gen_fsm,
+ gen_server,
+ io,
+ io_lib,
+ io_lib_format,
+ io_lib_fread,
+ io_lib_pretty,
+ lib,
+ lists,
+ log_mf_h,
+ math,
+ ms_transform,
+ orddict,
+ ordsets,
+ otp_internal,
+ pg,
+ pool,
+ proc_lib,
+ proplists,
+ qlc,
+ qlc_pt,
+ queue,
+ random,
+ re,
+ sets,
+ shell,
+ shell_default,
+ slave,
+ sofs,
+ string,
+ supervisor,
+ supervisor_bridge,
+ sys,
+ timer,
+ unicode,
+ win32reg,
+ zip]},
+ {registered,[timer_server,rsh_starter,take_over_monitor,pool_master,
+ dets]},
+ {applications, [kernel]},
+ {env, []}]}.
+
diff --git a/bootstrap/lib/stdlib/ebin/stdlib.appup b/bootstrap/lib/stdlib/ebin/stdlib.appup
new file mode 100644
index 0000000000..ff9b5387c9
--- /dev/null
+++ b/bootstrap/lib/stdlib/ebin/stdlib.appup
@@ -0,0 +1,27 @@
+%% -*- erlang -*-
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 1999-2011. All Rights Reserved.
+%%
+%% The contents of this file are subject to the Erlang Public License,
+%% Version 1.1, (the "License"); you may not use this file except in
+%% compliance with the License. You should have received a copy of the
+%% Erlang Public License along with this software. If not, it can be
+%% retrieved online at http://www.erlang.org/.
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% %CopyrightEnd%
+{"1.19",
+ %% Up from - max two major revisions back
+ [{<<"1\\.19(\\.[0-9]+)*">>,[restart_new_emulator]}, %% R16
+ {<<"1\\.18(\\.[0-9]+)*">>,[restart_new_emulator]}, %% R15
+ {<<"1\\.17(\\.[0-9]+)*">>,[restart_new_emulator]}],%% R14
+ %% Down to - max two major revisions back
+ [{<<"1\\.19(\\.[0-9]+)*">>,[restart_new_emulator]}, %% R16
+ {<<"1\\.18(\\.[0-9]+)*">>,[restart_new_emulator]}, %% R15
+ {<<"1\\.17(\\.[0-9]+)*">>,[restart_new_emulator]}] %% R14
+}.
diff --git a/erts/configure.in b/erts/configure.in
index 500d1490e9..b3289bf84c 100644
--- a/erts/configure.in
+++ b/erts/configure.in
@@ -618,6 +618,8 @@ case $chk_arch_ in
armv5b) ARCH=arm;;
armv5teb) ARCH=arm;;
armv5tel) ARCH=arm;;
+ armv5tejl) ARCH=arm;;
+ armv7l) ARCH=arm;;
tile) ARCH=tile;;
*) ARCH=noarch;;
esac
diff --git a/erts/emulator/Makefile.in b/erts/emulator/Makefile.in
index de4be0551f..e20e38d61b 100644
--- a/erts/emulator/Makefile.in
+++ b/erts/emulator/Makefile.in
@@ -926,6 +926,11 @@ $(TTF_DIR)/hipe_arm_bifs.S: hipe/hipe_arm_bifs.m4 hipe/hipe_arm_asm.m4 \
$(OBJDIR)/hipe_arm_bifs.o: $(TTF_DIR)/hipe_arm_bifs.S \
$(TTF_DIR)/hipe_literals.h
+# Use -fomit-frame-pointer to work around gcc (v4.5.2) bug causing
+# "error: r7 cannot be used in asm here" for DEBUG build.
+$(OBJDIR)/hipe_arm.o: hipe/hipe_arm.c
+ $(CC) $(subst O2,O3, $(CFLAGS)) -fomit-frame-pointer $(INCLUDES) -c $< -o $@
+
# end of HiPE section
########################################
diff --git a/erts/emulator/beam/beam_bif_load.c b/erts/emulator/beam/beam_bif_load.c
index 4bf1c13421..94f8edf165 100644
--- a/erts/emulator/beam/beam_bif_load.c
+++ b/erts/emulator/beam/beam_bif_load.c
@@ -361,7 +361,7 @@ staging_epilogue(Process* c_p, int commit, Eterm res, int is_blocking,
* without any memory barriers at all.
*/
- later = erts_thr_progress_later();
+ later = erts_thr_progress_later(c_p->scheduler_data);
erts_thr_progress_wakeup(c_p->scheduler_data, later);
erts_notify_code_ix_activation(c_p, later);
erts_suspend(c_p, ERTS_PROC_LOCK_MAIN, NULL);
@@ -672,11 +672,11 @@ set_default_trace_pattern(Eterm module)
if (trace_pattern_is_on) {
Eterm mfa[1];
mfa[0] = module;
- (void) erts_set_trace_pattern(mfa, 1,
+ (void) erts_set_trace_pattern(0, mfa, 1,
match_spec,
meta_match_spec,
1, trace_pattern_flags,
- meta_tracer_pid);
+ meta_tracer_pid, 1);
}
}
@@ -1006,12 +1006,11 @@ delete_code(Module* modp)
if (ep->code[3] == (BeamInstr) em_apply_bif) {
continue;
}
- else if (ep->code[3] == (BeamInstr) em_call_traced_function) {
+ else if (ep->code[3] ==
+ (BeamInstr) BeamOp(op_i_generic_breakpoint)) {
ERTS_SMP_LC_ASSERT(erts_smp_thr_progress_is_blocking());
ASSERT(modp->curr.num_traced_exports > 0);
- --modp->curr.num_traced_exports;
- MatchSetUnref(ep->match_prog_set);
- ep->match_prog_set = NULL;
+ erts_clear_export_break(modp, ep->code+3);
}
else ASSERT(ep->code[3] == (BeamInstr) em_call_error_handler
|| !erts_initialized);
@@ -1019,7 +1018,6 @@ delete_code(Module* modp)
ep->addressv[code_ix] = ep->code+3;
ep->code[3] = (BeamInstr) em_call_error_handler;
ep->code[4] = 0;
- ASSERT(ep->match_prog_set == NULL);
}
}
diff --git a/erts/emulator/beam/beam_bp.c b/erts/emulator/beam/beam_bp.c
index 26dadfbbc0..50d18b0347 100644
--- a/erts/emulator/beam/beam_bp.c
+++ b/erts/emulator/beam/beam_bp.c
@@ -44,67 +44,34 @@
#define ReAlloc(P, SIZ) erts_realloc(ERTS_ALC_T_BPD, (P), (SZ))
#define Free(P) erts_free(ERTS_ALC_T_BPD, (P))
-/*
-** Doubly linked ring macros
-*/
-
-#define BpInit(a,i) \
-do { \
- (a)->orig_instr = (i); \
- (a)->next = (a); \
- (a)->prev = (a); \
-} while (0)
-
-#define BpSpliceNext(a,b) \
-do { \
- register BpData *c = (a), *d = (b), *e; \
- e = c->next->prev; \
- c->next->prev = d->next->prev; \
- d->next->prev = e; \
- e = c->next; \
- c->next = d->next; \
- d->next = e; \
-} while (0)
-
-#define BpSplicePrev(a,b) \
-do { \
- register BpData *c = (a), *d = (b), *e; \
- e = c->prev->next; \
- c->prev->next = d->prev->next; \
- d->prev->next = e; \
- e = c->prev; \
- c->prev = d->prev; \
- d->prev = e; \
-} while (0)
-
-#ifdef DEBUG
-# define BpSingleton(a) ((a)->next == (a) && (a)->prev == (a))
+#if defined(ERTS_ENABLE_LOCK_CHECK) && defined(ERTS_SMP)
+# define ERTS_SMP_REQ_PROC_MAIN_LOCK(P) \
+ if ((P)) erts_proc_lc_require_lock((P), ERTS_PROC_LOCK_MAIN)
+# define ERTS_SMP_UNREQ_PROC_MAIN_LOCK(P) \
+ if ((P)) erts_proc_lc_unrequire_lock((P), ERTS_PROC_LOCK_MAIN)
#else
-# define BpSingleton(a) ((a)->next == (a))
+# define ERTS_SMP_REQ_PROC_MAIN_LOCK(P)
+# define ERTS_SMP_UNREQ_PROC_MAIN_LOCK(P)
#endif
-#define BpInitAndSpliceNext(a,i,b) \
-do { \
- (a)->orig_instr = (i); \
- (a)->prev = (b); \
- (b)->next->prev = (a); \
- (a)->next = (b)->next; \
- (b)->next = (a); \
-} while (0)
+#define ERTS_BPF_LOCAL_TRACE 0x01
+#define ERTS_BPF_META_TRACE 0x02
+#define ERTS_BPF_COUNT 0x04
+#define ERTS_BPF_COUNT_ACTIVE 0x08
+#define ERTS_BPF_DEBUG 0x10
+#define ERTS_BPF_TIME_TRACE 0x20
+#define ERTS_BPF_TIME_TRACE_ACTIVE 0x40
+#define ERTS_BPF_GLOBAL_TRACE 0x80
-#define BpInitAndSplicePrev(a,i,b) \
-do { \
- (a)->orig_instr = (i); \
- (a)->next = (b); \
- (b)->prev->next = (a); \
- (a)->prev = (b)->prev; \
- (b)->prev = (a); \
-} while (0)
+#define ERTS_BPF_ALL 0xFF
+extern Eterm beam_return_to_trace[1]; /* OpCode(i_return_to_trace) */
+extern Eterm beam_return_trace[1]; /* OpCode(i_return_trace) */
+extern Eterm beam_exception_trace[1]; /* OpCode(i_exception_trace) */
+extern Eterm beam_return_time_trace[1]; /* OpCode(i_return_time_trace) */
-#define BREAK_IS_BIF (1)
-#define BREAK_IS_ERL (0)
-
+erts_smp_atomic32_t erts_active_bp_index;
+erts_smp_atomic32_t erts_staging_bp_index;
/* *************************************************************************
** Local prototypes
@@ -113,26 +80,30 @@ do { \
/*
** Helpers
*/
-
-static int set_break(Eterm mfa[3], int specified,
- Binary *match_spec, BeamInstr break_op,
- enum erts_break_op count_op, Eterm tracer_pid);
-static int set_module_break(Module *modp, Eterm mfa[3], int specified,
- Binary *match_spec, BeamInstr break_op,
- enum erts_break_op count_op, Eterm tracer_pid);
-static int set_function_break(Module *modp, BeamInstr *pc, int bif,
- Binary *match_spec, BeamInstr break_op,
- enum erts_break_op count_op, Eterm tracer_pid);
-
-static int clear_break(Eterm mfa[3], int specified,
- BeamInstr break_op);
-static int clear_module_break(Module *modp, Eterm mfa[3], int specified,
- BeamInstr break_op);
-static int clear_function_break(Module *modp, BeamInstr *pc, int bif,
- BeamInstr break_op);
-
-static BpData *is_break(BeamInstr *pc, BeamInstr break_op);
-static BpData *get_break(Process *p, BeamInstr *pc, BeamInstr break_op);
+static Eterm do_call_trace(Process* c_p, BeamInstr* I, Eterm* reg,
+ int local, Binary* ms, Eterm tracer_pid);
+static void set_break(BpFunctions* f, Binary *match_spec, Uint break_flags,
+ enum erts_break_op count_op, Eterm tracer_pid);
+static void set_function_break(BeamInstr *pc,
+ Binary *match_spec,
+ Uint break_flags,
+ enum erts_break_op count_op,
+ Eterm tracer_pid);
+
+static void clear_break(BpFunctions* f, Uint break_flags);
+static int clear_function_break(BeamInstr *pc, Uint break_flags);
+
+static BpDataTime* get_time_break(BeamInstr *pc);
+static GenericBpData* check_break(BeamInstr *pc, Uint break_flags);
+static void bp_time_diff(bp_data_time_item_t *item,
+ process_breakpoint_time_t *pbt,
+ Uint ms, Uint s, Uint us);
+
+static void bp_meta_unref(BpMetaPid* bmp);
+static void bp_count_unref(BpCount* bcp);
+static void bp_time_unref(BpDataTime* bdt);
+static void consolidate_bp_data(Module* modp, BeamInstr* pc, int local);
+static void uninstall_breakpoint(BeamInstr* pc);
/* bp_hash */
#define BP_TIME_ADD(pi0, pi1) \
@@ -152,240 +123,996 @@ static ERTS_INLINE bp_data_time_item_t * bp_hash_get(bp_time_hash_t *hash, bp_da
static ERTS_INLINE bp_data_time_item_t * bp_hash_put(bp_time_hash_t *hash, bp_data_time_item_t *sitem);
static void bp_hash_delete(bp_time_hash_t *hash);
-
/* *************************************************************************
** External interfaces
*/
-erts_smp_spinlock_t erts_bp_lock;
-
void
erts_bp_init(void) {
- erts_smp_spinlock_init(&erts_bp_lock, "breakpoints");
+ erts_smp_atomic32_init_nob(&erts_active_bp_index, 0);
+ erts_smp_atomic32_init_nob(&erts_staging_bp_index, 1);
}
-int
-erts_set_trace_break(Eterm mfa[3], int specified, Binary *match_spec,
- Eterm tracer_pid) {
- ERTS_SMP_LC_ASSERT(erts_smp_thr_progress_is_blocking());
- return set_break(mfa, specified, match_spec,
- (BeamInstr) BeamOp(op_i_trace_breakpoint), 0, tracer_pid);
+
+void
+erts_bp_match_functions(BpFunctions* f, Eterm mfa[3], int specified)
+{
+ ErtsCodeIndex code_ix = erts_active_code_ix();
+ Uint max_funcs = 0;
+ int current;
+ int max_modules = module_code_size(code_ix);
+ int num_modules = 0;
+ Module* modp;
+ Module** module;
+ Uint i;
+
+ module = (Module **) Alloc(max_modules*sizeof(Module *));
+ num_modules = 0;
+ for (current = 0; current < max_modules; current++) {
+ modp = module_code(current, code_ix);
+ if (modp->curr.code) {
+ max_funcs += modp->curr.code[MI_NUM_FUNCTIONS];
+ module[num_modules++] = modp;
+ }
+ }
+
+ f->matching = (BpFunction *) Alloc(max_funcs*sizeof(BpFunction));
+ i = 0;
+ for (current = 0; current < num_modules; current++) {
+ BeamInstr** code_base = (BeamInstr **) module[current]->curr.code;
+ BeamInstr* code;
+ Uint num_functions = (Uint) code_base[MI_NUM_FUNCTIONS];
+ Uint fi;
+
+ if (specified > 0) {
+ if (mfa[0] != make_atom(module[current]->module)) {
+ /* Wrong module name */
+ continue;
+ }
+ }
+
+ for (fi = 0; fi < num_functions; fi++) {
+ Eterm* pc;
+ int wi;
+
+ code = code_base[MI_FUNCTIONS+fi];
+ ASSERT(code[0] == (BeamInstr) BeamOp(op_i_func_info_IaaI));
+ pc = code+5;
+ if (erts_is_native_break(pc)) {
+ continue;
+ }
+ if (is_nil(code[3])) { /* Ignore BIF stub */
+ continue;
+ }
+ for (wi = 0;
+ wi < specified && (Eterm) code[2+wi] == mfa[wi];
+ wi++) {
+ /* Empty loop body */
+ }
+ if (wi == specified) {
+ /* Store match */
+ f->matching[i].pc = pc;
+ f->matching[i].mod = module[current];
+ i++;
+ }
+ }
+ }
+ f->matched = i;
+ Free(module);
}
-int
-erts_set_mtrace_break(Eterm mfa[3], int specified, Binary *match_spec,
- Eterm tracer_pid) {
- ERTS_SMP_LC_ASSERT(erts_smp_thr_progress_is_blocking());
- return set_break(mfa, specified, match_spec,
- (BeamInstr) BeamOp(op_i_mtrace_breakpoint), 0, tracer_pid);
+void
+erts_bp_match_export(BpFunctions* f, Eterm mfa[3], int specified)
+{
+ ErtsCodeIndex code_ix = erts_active_code_ix();
+ int i;
+ int num_exps = export_list_size(code_ix);
+ int ne;
+
+ f->matching = (BpFunction *) Alloc(num_exps*sizeof(BpFunction));
+ ne = 0;
+ for (i = 0; i < num_exps; i++) {
+ Export* ep = export_list(i, code_ix);
+ BeamInstr* pc;
+ int j;
+
+ for (j = 0; j < specified && mfa[j] == ep->code[j]; j++) {
+ /* Empty loop body */
+ }
+ if (j < specified) {
+ continue;
+ }
+ pc = ep->code+3;
+ if (ep->addressv[code_ix] == pc) {
+ if ((*pc == (BeamInstr) em_apply_bif ||
+ *pc == (BeamInstr) em_call_error_handler)) {
+ continue;
+ }
+ ASSERT(*pc == (BeamInstr) BeamOp(op_i_generic_breakpoint));
+ } else if (erts_is_native_break(ep->addressv[code_ix])) {
+ continue;
+ }
+
+ f->matching[ne].pc = pc;
+ f->matching[ne].mod = erts_get_module(ep->code[0], code_ix);
+ ne++;
+
+ }
+ f->matched = ne;
}
-/* set breakpoint data for on exported bif entry */
+void
+erts_bp_free_matched_functions(BpFunctions* f)
+{
+ Free(f->matching);
+}
void
-erts_set_mtrace_bif(BeamInstr *pc, Binary *match_spec, Eterm tracer_pid) {
- ERTS_SMP_LC_ASSERT(erts_smp_thr_progress_is_blocking());
- set_function_break(NULL, pc, BREAK_IS_BIF, match_spec, (BeamInstr) BeamOp(op_i_mtrace_breakpoint), 0, tracer_pid);
+erts_consolidate_bp_data(BpFunctions* f, int local)
+{
+ BpFunction* fs = f->matching;
+ Uint i;
+ Uint n = f->matched;
+
+ ERTS_SMP_LC_ASSERT(erts_is_code_ix_locked());
+
+ for (i = 0; i < n; i++) {
+ consolidate_bp_data(fs[i].mod, fs[i].pc, local);
+ }
}
-void erts_set_time_trace_bif(BeamInstr *pc, enum erts_break_op count_op) {
- set_function_break(NULL, pc, BREAK_IS_BIF, NULL, (BeamInstr) BeamOp(op_i_time_breakpoint), count_op, NIL);
+void
+erts_consolidate_bif_bp_data(void)
+{
+ int i;
+
+ ERTS_SMP_LC_ASSERT(erts_is_code_ix_locked());
+ for (i = 0; i < BIF_SIZE; i++) {
+ Export *ep = bif_export[i];
+ consolidate_bp_data(0, ep->code+3, 0);
+ }
}
-void erts_clear_time_trace_bif(BeamInstr *pc) {
- clear_function_break(NULL, pc, BREAK_IS_BIF, (BeamInstr) BeamOp(op_i_time_breakpoint));
+static void
+consolidate_bp_data(Module* modp, BeamInstr* pc, int local)
+{
+ GenericBp* g = (GenericBp *) pc[-4];
+ GenericBpData* src;
+ GenericBpData* dst;
+ Uint flags;
+
+ if (g == 0) {
+ return;
+ }
+
+ src = &g->data[erts_active_bp_ix()];
+ dst = &g->data[erts_staging_bp_ix()];
+
+ /*
+ * The contents of the staging area may be out of date.
+ * Decrement all reference pointers.
+ */
+
+ flags = dst->flags;
+ if (flags & (ERTS_BPF_LOCAL_TRACE|ERTS_BPF_GLOBAL_TRACE)) {
+ MatchSetUnref(dst->local_ms);
+ }
+ if (flags & ERTS_BPF_META_TRACE) {
+ bp_meta_unref(dst->meta_pid);
+ MatchSetUnref(dst->meta_ms);
+ }
+ if (flags & ERTS_BPF_COUNT) {
+ bp_count_unref(dst->count);
+ }
+ if (flags & ERTS_BPF_TIME_TRACE) {
+ bp_time_unref(dst->time);
+ }
+
+ /*
+ * If all flags are zero, deallocate all breakpoint data.
+ */
+
+ flags = dst->flags = src->flags;
+ if (flags == 0) {
+ if (modp) {
+ if (local) {
+ modp->curr.num_breakpoints--;
+ } else {
+ modp->curr.num_traced_exports--;
+ }
+ ASSERT(modp->curr.num_breakpoints >= 0);
+ ASSERT(modp->curr.num_traced_exports >= 0);
+ ASSERT(*pc != (BeamInstr) BeamOp(op_i_generic_breakpoint));
+ }
+ pc[-4] = 0;
+ Free(g);
+ return;
+ }
+
+ /*
+ * Copy the active data to the staging area (making it ready
+ * for the next time it will be used).
+ */
+
+ if (flags & (ERTS_BPF_LOCAL_TRACE|ERTS_BPF_GLOBAL_TRACE)) {
+ dst->local_ms = src->local_ms;
+ MatchSetRef(dst->local_ms);
+ }
+ if (flags & ERTS_BPF_META_TRACE) {
+ dst->meta_pid = src->meta_pid;
+ erts_refc_inc(&dst->meta_pid->refc, 1);
+ dst->meta_ms = src->meta_ms;
+ MatchSetRef(dst->meta_ms);
+ }
+ if (flags & ERTS_BPF_COUNT) {
+ dst->count = src->count;
+ erts_refc_inc(&dst->count->refc, 1);
+ }
+ if (flags & ERTS_BPF_TIME_TRACE) {
+ dst->time = src->time;
+ erts_refc_inc(&dst->time->refc, 1);
+ ASSERT(dst->time->hash);
+ }
}
-int
-erts_set_debug_break(Eterm mfa[3], int specified) {
- ERTS_SMP_LC_ASSERT(erts_smp_thr_progress_is_blocking());
- return set_break(mfa, specified, NULL,
- (BeamInstr) BeamOp(op_i_debug_breakpoint), 0, NIL);
+void
+erts_commit_staged_bp(void)
+{
+ ErtsBpIndex staging = erts_staging_bp_ix();
+ ErtsBpIndex active = erts_active_bp_ix();
+
+ erts_smp_atomic32_set_nob(&erts_active_bp_index, staging);
+ erts_smp_atomic32_set_nob(&erts_staging_bp_index, active);
}
-int
-erts_set_count_break(Eterm mfa[3], int specified, enum erts_break_op count_op) {
- ERTS_SMP_LC_ASSERT(erts_smp_thr_progress_is_blocking());
- return set_break(mfa, specified, NULL,
- (BeamInstr) BeamOp(op_i_count_breakpoint), count_op, NIL);
+void
+erts_install_breakpoints(BpFunctions* f)
+{
+ Uint i;
+ Uint n = f->matched;
+ BeamInstr br = (BeamInstr) BeamOp(op_i_generic_breakpoint);
+
+ for (i = 0; i < n; i++) {
+ BeamInstr* pc = f->matching[i].pc;
+ GenericBp* g = (GenericBp *) pc[-4];
+ if (*pc != br && g) {
+ Module* modp = f->matching[i].mod;
+
+ /*
+ * The breakpoint must be disabled in the active data
+ * (it will enabled later by switching bp indices),
+ * and enabled in the staging data.
+ */
+ ASSERT(g->data[erts_active_bp_ix()].flags == 0);
+ ASSERT(g->data[erts_staging_bp_ix()].flags != 0);
+
+ /*
+ * The following write is not protected by any lock. We
+ * assume that the hardware guarantees that a write of an
+ * aligned word-size (or half-word) writes is atomic
+ * (i.e. that other processes executing this code will not
+ * see a half pointer).
+ */
+ *pc = br;
+ modp->curr.num_breakpoints++;
+ }
+ }
}
-int
-erts_set_time_break(Eterm mfa[3], int specified, enum erts_break_op count_op) {
- ERTS_SMP_LC_ASSERT(erts_smp_thr_progress_is_blocking());
- return set_break(mfa, specified, NULL,
- (BeamInstr) BeamOp(op_i_time_breakpoint), count_op, NIL);
+void
+erts_uninstall_breakpoints(BpFunctions* f)
+{
+ Uint i;
+ Uint n = f->matched;
+
+ for (i = 0; i < n; i++) {
+ BeamInstr* pc = f->matching[i].pc;
+ uninstall_breakpoint(pc);
+ }
}
-int
-erts_clear_trace_break(Eterm mfa[3], int specified) {
- ERTS_SMP_LC_ASSERT(erts_smp_thr_progress_is_blocking());
- return clear_break(mfa, specified,
- (BeamInstr) BeamOp(op_i_trace_breakpoint));
+static void
+uninstall_breakpoint(BeamInstr* pc)
+{
+ if (*pc == (BeamInstr) BeamOp(op_i_generic_breakpoint)) {
+ GenericBp* g = (GenericBp *) pc[-4];
+ if (g->data[erts_active_bp_ix()].flags == 0) {
+ /*
+ * The following write is not protected by any lock. We
+ * assume that the hardware guarantees that a write of an
+ * aligned word-size (or half-word) writes is atomic
+ * (i.e. that other processes executing this code will not
+ * see a half pointer).
+ */
+ *pc = g->orig_instr;
+ }
+ }
}
-int
-erts_clear_mtrace_break(Eterm mfa[3], int specified) {
- ERTS_SMP_LC_ASSERT(erts_smp_thr_progress_is_blocking());
- return clear_break(mfa, specified,
- (BeamInstr) BeamOp(op_i_mtrace_breakpoint));
+void
+erts_set_trace_break(BpFunctions* f, Binary *match_spec)
+{
+ set_break(f, match_spec, ERTS_BPF_LOCAL_TRACE, 0, am_true);
}
void
-erts_clear_mtrace_bif(BeamInstr *pc) {
- clear_function_break(NULL, pc, BREAK_IS_BIF, (BeamInstr) BeamOp(op_i_mtrace_breakpoint));
+erts_set_mtrace_break(BpFunctions* f, Binary *match_spec, Eterm tracer_pid)
+{
+ set_break(f, match_spec, ERTS_BPF_META_TRACE, 0, tracer_pid);
}
-int
-erts_clear_debug_break(Eterm mfa[3], int specified) {
- ERTS_SMP_LC_ASSERT(erts_smp_thr_progress_is_blocking());
- return clear_break(mfa, specified,
- (BeamInstr) BeamOp(op_i_debug_breakpoint));
+void
+erts_set_call_trace_bif(BeamInstr *pc, Binary *match_spec, int local)
+{
+ Uint flags = local ? ERTS_BPF_LOCAL_TRACE : ERTS_BPF_GLOBAL_TRACE;
+
+ set_function_break(pc, match_spec, flags, 0, NIL);
}
-int
-erts_clear_count_break(Eterm mfa[3], int specified) {
- ERTS_SMP_LC_ASSERT(erts_smp_thr_progress_is_blocking());
- return clear_break(mfa, specified,
- (BeamInstr) BeamOp(op_i_count_breakpoint));
+void
+erts_set_mtrace_bif(BeamInstr *pc, Binary *match_spec, Eterm tracer_pid)
+{
+ set_function_break(pc, match_spec, ERTS_BPF_META_TRACE, 0, tracer_pid);
}
-int
-erts_clear_time_break(Eterm mfa[3], int specified) {
- ERTS_SMP_LC_ASSERT(erts_smp_thr_progress_is_blocking());
- return clear_break(mfa, specified,
- (BeamInstr) BeamOp(op_i_time_breakpoint));
+void
+erts_set_time_trace_bif(BeamInstr *pc, enum erts_break_op count_op)
+{
+ set_function_break(pc, NULL,
+ ERTS_BPF_TIME_TRACE|ERTS_BPF_TIME_TRACE_ACTIVE,
+ count_op, NIL);
}
-int
-erts_clear_break(Eterm mfa[3], int specified) {
+void
+erts_clear_time_trace_bif(BeamInstr *pc) {
+ clear_function_break(pc, ERTS_BPF_TIME_TRACE|ERTS_BPF_TIME_TRACE_ACTIVE);
+}
+
+void
+erts_set_debug_break(BpFunctions* f) {
+ set_break(f, NULL, ERTS_BPF_DEBUG, 0, NIL);
+}
+
+void
+erts_set_count_break(BpFunctions* f, enum erts_break_op count_op)
+{
+ set_break(f, 0, ERTS_BPF_COUNT|ERTS_BPF_COUNT_ACTIVE,
+ count_op, NIL);
+}
+
+void
+erts_set_time_break(BpFunctions* f, enum erts_break_op count_op)
+{
+ set_break(f, 0, ERTS_BPF_TIME_TRACE|ERTS_BPF_TIME_TRACE_ACTIVE,
+ count_op, NIL);
+}
+
+void
+erts_clear_trace_break(BpFunctions* f)
+{
+ clear_break(f, ERTS_BPF_LOCAL_TRACE);
+}
+
+void
+erts_clear_call_trace_bif(BeamInstr *pc, int local)
+{
+ GenericBp* g = (GenericBp *) pc[-4];
+
+ if (g) {
+ Uint flags = local ? ERTS_BPF_LOCAL_TRACE : ERTS_BPF_GLOBAL_TRACE;
+ if (g->data[erts_staging_bp_ix()].flags & flags) {
+ clear_function_break(pc, flags);
+ }
+ }
+}
+
+void
+erts_clear_mtrace_break(BpFunctions* f)
+{
+ clear_break(f, ERTS_BPF_META_TRACE);
+}
+
+void
+erts_clear_mtrace_bif(BeamInstr *pc)
+{
+ clear_function_break(pc, ERTS_BPF_META_TRACE);
+}
+
+void
+erts_clear_debug_break(BpFunctions* f)
+{
ERTS_SMP_LC_ASSERT(erts_smp_thr_progress_is_blocking());
- return clear_break(mfa, specified, 0);
+ clear_break(f, ERTS_BPF_DEBUG);
+}
+
+void
+erts_clear_count_break(BpFunctions* f)
+{
+ clear_break(f, ERTS_BPF_COUNT|ERTS_BPF_COUNT_ACTIVE);
+}
+
+void
+erts_clear_time_break(BpFunctions* f)
+{
+ clear_break(f, ERTS_BPF_TIME_TRACE|ERTS_BPF_TIME_TRACE_ACTIVE);
+}
+
+void
+erts_clear_all_breaks(BpFunctions* f)
+{
+ clear_break(f, ERTS_BPF_ALL);
}
int
erts_clear_module_break(Module *modp) {
+ BeamInstr** code_base;
+ Uint n;
+ Uint i;
+
ERTS_SMP_LC_ASSERT(erts_smp_thr_progress_is_blocking());
ASSERT(modp);
- return clear_module_break(modp, NULL, 0, 0);
+ code_base = (BeamInstr **) modp->curr.code;
+ if (code_base == NULL) {
+ return 0;
+ }
+ n = (Uint) code_base[MI_NUM_FUNCTIONS];
+ for (i = 0; i < n; ++i) {
+ BeamInstr* pc;
+
+ pc = code_base[MI_FUNCTIONS+i] + 5;
+ if (erts_is_native_break(pc)) {
+ continue;
+ }
+ clear_function_break(pc, ERTS_BPF_ALL);
+ }
+
+ erts_commit_staged_bp();
+
+ for (i = 0; i < n; ++i) {
+ BeamInstr* pc;
+
+ pc = code_base[MI_FUNCTIONS+i] + 5;
+ if (erts_is_native_break(pc)) {
+ continue;
+ }
+ uninstall_breakpoint(pc);
+ consolidate_bp_data(modp, pc, 1);
+ ASSERT(pc[-4] == 0);
+ }
+ return n;
}
-int
-erts_clear_function_break(Module *modp, BeamInstr *pc) {
+void
+erts_clear_export_break(Module* modp, BeamInstr* pc)
+{
ERTS_SMP_LC_ASSERT(erts_smp_thr_progress_is_blocking());
- ASSERT(modp);
- return clear_function_break(modp, pc, BREAK_IS_ERL, 0);
+
+ clear_function_break(pc, ERTS_BPF_ALL);
+ erts_commit_staged_bp();
+ *pc = (BeamInstr) 0;
+ consolidate_bp_data(modp, pc, 0);
+ ASSERT(pc[-4] == 0);
}
+BeamInstr
+erts_generic_breakpoint(Process* c_p, BeamInstr* I, Eterm* reg)
+{
+ GenericBp* g;
+ GenericBpData* bp;
+ Uint bp_flags;
+ ErtsBpIndex ix = erts_active_bp_ix();
+
+ g = (GenericBp *) I[-4];
+ bp = &g->data[ix];
+ bp_flags = bp->flags;
+ ASSERT((bp_flags & ~ERTS_BPF_ALL) == 0);
+ if (bp_flags & (ERTS_BPF_LOCAL_TRACE|
+ ERTS_BPF_GLOBAL_TRACE|
+ ERTS_BPF_TIME_TRACE_ACTIVE) &&
+ !IS_TRACED_FL(c_p, F_TRACE_CALLS)) {
+ bp_flags &= ~(ERTS_BPF_LOCAL_TRACE|
+ ERTS_BPF_GLOBAL_TRACE|
+ ERTS_BPF_TIME_TRACE|
+ ERTS_BPF_TIME_TRACE_ACTIVE);
+ if (bp_flags == 0) { /* Quick exit */
+ return g->orig_instr;
+ }
+ }
+
+ if (bp_flags & ERTS_BPF_LOCAL_TRACE) {
+ ASSERT((bp_flags & ERTS_BPF_GLOBAL_TRACE) == 0);
+ (void) do_call_trace(c_p, I, reg, 1, bp->local_ms, am_true);
+ } else if (bp_flags & ERTS_BPF_GLOBAL_TRACE) {
+ (void) do_call_trace(c_p, I, reg, 0, bp->local_ms, am_true);
+ }
+
+ if (bp_flags & ERTS_BPF_META_TRACE) {
+ Eterm old_pid;
+ Eterm new_pid;
+
+ old_pid = (Eterm) erts_smp_atomic_read_nob(&bp->meta_pid->pid);
+ new_pid = do_call_trace(c_p, I, reg, 1, bp->meta_ms, old_pid);
+ if (new_pid != old_pid) {
+ erts_smp_atomic_set_nob(&bp->meta_pid->pid, new_pid);
+ }
+ }
+
+ if (bp_flags & ERTS_BPF_COUNT_ACTIVE) {
+ erts_smp_atomic_inc_nob(&bp->count->acount);
+ }
+
+ if (bp_flags & ERTS_BPF_TIME_TRACE_ACTIVE) {
+ Eterm w;
+ erts_trace_time_call(c_p, I, bp->time);
+ w = (BeamInstr) *c_p->cp;
+ if (! (w == (BeamInstr) BeamOp(op_i_return_time_trace) ||
+ w == (BeamInstr) BeamOp(op_return_trace) ||
+ w == (BeamInstr) BeamOp(op_i_return_to_trace)) ) {
+ Eterm* E = c_p->stop;
+ ASSERT(c_p->htop <= E && E <= c_p->hend);
+ if (E - 2 < c_p->htop) {
+ (void) erts_garbage_collect(c_p, 2, reg, I[-1]);
+ ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p);
+ }
+ E = c_p->stop;
+ ASSERT(c_p->htop <= E && E <= c_p->hend);
+
+ E -= 2;
+ E[0] = make_cp(I);
+ E[1] = make_cp(c_p->cp); /* original return address */
+ c_p->cp = beam_return_time_trace;
+ c_p->stop = E;
+ }
+ }
+
+ if (bp_flags & ERTS_BPF_DEBUG) {
+ return (BeamInstr) BeamOp(op_i_debug_breakpoint);
+ } else {
+ return g->orig_instr;
+ }
+}
/*
- * SMP NOTE: Process p may have become exiting on return!
+ * Entry point called by the trace wrap functions in erl_bif_wrap.c
+ *
+ * The trace wrap functions are themselves called through the export
+ * entries instead of the original BIF functions.
*/
-BeamInstr
-erts_trace_break(Process *p, BeamInstr *pc, Eterm *args,
- Uint32 *ret_flags, Eterm *tracer_pid) {
- Eterm tpid1, tpid2;
- BpData **bds = (BpData **) (pc)[-4];
- BpDataTrace *bdt = NULL;
+Eterm
+erts_bif_trace(int bif_index, Process* p, Eterm* args, BeamInstr* I)
+{
+ Eterm result;
+ Eterm (*func)(Process*, Eterm*, BeamInstr*);
+ Export* ep = bif_export[bif_index];
+ Uint32 flags = 0, flags_meta = 0;
+ Eterm meta_tracer_pid = NIL;
+ int applying = (I == &(ep->code[3])); /* Yup, the apply code for a bif
+ * is actually in the
+ * export entry */
+ BeamInstr *cp = p->cp;
+ GenericBp* g;
+ GenericBpData* bp;
+ Uint bp_flags = 0;
+
+ ERTS_SMP_CHK_HAVE_ONLY_MAIN_PROC_LOCK(p);
+
+ g = (GenericBp *) ep->fake_op_func_info_for_hipe[1];
+ if (g) {
+ bp = &g->data[erts_active_bp_ix()];
+ bp_flags = bp->flags;
+ }
- ASSERT(bds);
- ASSERT(pc[-5] == (BeamInstr) BeamOp(op_i_func_info_IaaI));
- bdt = (BpDataTrace *) bds[bp_sched2ix_proc(p)];
- ASSERT(bdt);
- bdt = (BpDataTrace *) bdt->next;
- ASSERT(bdt);
- ASSERT(ret_flags);
- ASSERT(tracer_pid);
-
- ErtsSmpBPLock(bdt);
- tpid1 = tpid2 = bdt->tracer_pid;
- ErtsSmpBPUnlock(bdt);
-
- *ret_flags = erts_call_trace(p, pc-3/*mfa*/, bdt->match_spec, args,
- 1, &tpid2);
- *tracer_pid = tpid2;
- if (tpid1 != tpid2) {
- ErtsSmpBPLock(bdt);
- bdt->tracer_pid = tpid2;
- ErtsSmpBPUnlock(bdt);
- }
- bds[bp_sched2ix_proc(p)] = (BpData *) bdt;
- return bdt->orig_instr;
+ /*
+ * Make continuation pointer OK, it is not during direct BIF calls,
+ * but it is correct during apply of bif.
+ */
+ if (!applying) {
+ p->cp = I;
+ }
+ if (bp_flags & (ERTS_BPF_LOCAL_TRACE|ERTS_BPF_GLOBAL_TRACE) &&
+ IS_TRACED_FL(p, F_TRACE_CALLS)) {
+ int local = !!(bp_flags & ERTS_BPF_LOCAL_TRACE);
+ flags = erts_call_trace(p, ep->code, bp->local_ms, args,
+ local, &p->tracer_proc);
+ }
+ if (bp_flags & ERTS_BPF_META_TRACE) {
+ Eterm tpid1, tpid2;
+
+ tpid1 = tpid2 =
+ (Eterm) erts_smp_atomic_read_nob(&bp->meta_pid->pid);
+ flags_meta = erts_call_trace(p, ep->code, bp->meta_ms, args,
+ 0, &tpid2);
+ meta_tracer_pid = tpid2;
+ if (tpid1 != tpid2) {
+ erts_smp_atomic_set_nob(&bp->meta_pid->pid, tpid2);
+ }
+ }
+ if (bp_flags & ERTS_BPF_TIME_TRACE_ACTIVE &&
+ IS_TRACED_FL(p, F_TRACE_CALLS)) {
+ BeamInstr *pc = (BeamInstr *)ep->code+3;
+ erts_trace_time_call(p, pc, bp->time);
+ }
+
+ /* Restore original continuation pointer (if changed). */
+ p->cp = cp;
+
+ func = bif_table[bif_index].f;
+
+ result = func(p, args, I);
+
+ if (applying && (flags & MATCH_SET_RETURN_TO_TRACE)) {
+ BeamInstr i_return_trace = beam_return_trace[0];
+ BeamInstr i_return_to_trace = beam_return_to_trace[0];
+ BeamInstr i_return_time_trace = beam_return_time_trace[0];
+ Eterm *cpp;
+ /* Maybe advance cp to skip trace stack frames */
+ for (cpp = p->stop; ; cp = cp_val(*cpp++)) {
+ if (*cp == i_return_trace) {
+ /* Skip stack frame variables */
+ while (is_not_CP(*cpp)) cpp++;
+ cpp += 2; /* Skip return_trace parameters */
+ } else if (*cp == i_return_time_trace) {
+ /* Skip stack frame variables */
+ while (is_not_CP(*cpp)) cpp++;
+ cpp += 1; /* Skip return_time_trace parameters */
+ } else if (*cp == i_return_to_trace) {
+ /* A return_to trace message is going to be generated
+ * by normal means, so we do not have to.
+ */
+ cp = NULL;
+ break;
+ } else break;
+ }
+ }
+
+ /* Try to get these in the order
+ * they usually appear in normal code... */
+ if (is_non_value(result)) {
+ Uint reason = p->freason;
+ if (reason != TRAP) {
+ Eterm class;
+ Eterm value = p->fvalue;
+ DeclareTmpHeapNoproc(nocatch,3);
+ UseTmpHeapNoproc(3);
+ /* Expand error value like in handle_error() */
+ if (reason & EXF_ARGLIST) {
+ Eterm *tp;
+ ASSERT(is_tuple(value));
+ tp = tuple_val(value);
+ value = tp[1];
+ }
+ if ((reason & EXF_THROWN) && (p->catches <= 0)) {
+ value = TUPLE2(nocatch, am_nocatch, value);
+ reason = EXC_ERROR;
+ }
+ /* Note: expand_error_value() could theoretically
+ * allocate on the heap, but not for any error
+ * returned by a BIF, and it would do no harm,
+ * just be annoying.
+ */
+ value = expand_error_value(p, reason, value);
+ class = exception_tag[GET_EXC_CLASS(reason)];
+
+ if (flags_meta & MATCH_SET_EXCEPTION_TRACE) {
+ erts_trace_exception(p, ep->code, class, value,
+ &meta_tracer_pid);
+ }
+ if (flags & MATCH_SET_EXCEPTION_TRACE) {
+ erts_trace_exception(p, ep->code, class, value,
+ &p->tracer_proc);
+ }
+ if ((flags & MATCH_SET_RETURN_TO_TRACE) && p->catches > 0) {
+ /* can only happen if(local)*/
+ Eterm *ptr = p->stop;
+ ASSERT(is_CP(*ptr));
+ ASSERT(ptr <= STACK_START(p));
+ /* Search the nearest stack frame for a catch */
+ while (++ptr < STACK_START(p)) {
+ if (is_CP(*ptr)) break;
+ if (is_catch(*ptr)) {
+ if (applying) {
+ /* Apply of BIF, cp is in calling function */
+ if (cp) erts_trace_return_to(p, cp);
+ } else {
+ /* Direct bif call, I points into
+ * calling function */
+ erts_trace_return_to(p, I);
+ }
+ }
+ }
+ }
+ UnUseTmpHeapNoproc(3);
+ if ((flags_meta|flags) & MATCH_SET_EXCEPTION_TRACE) {
+ erts_smp_proc_lock(p, ERTS_PROC_LOCKS_ALL_MINOR);
+ p->trace_flags |= F_EXCEPTION_TRACE;
+ erts_smp_proc_unlock(p, ERTS_PROC_LOCKS_ALL_MINOR);
+ }
+ }
+ } else {
+ if (flags_meta & MATCH_SET_RX_TRACE) {
+ erts_trace_return(p, ep->code, result, &meta_tracer_pid);
+ }
+ /* MATCH_SET_RETURN_TO_TRACE cannot occur if(meta) */
+ if (flags & MATCH_SET_RX_TRACE) {
+ erts_trace_return(p, ep->code, result, &p->tracer_proc);
+ }
+ if (flags & MATCH_SET_RETURN_TO_TRACE) {
+ /* can only happen if(local)*/
+ if (applying) {
+ /* Apply of BIF, cp is in calling function */
+ if (cp) erts_trace_return_to(p, cp);
+ } else {
+ /* Direct bif call, I points into calling function */
+ erts_trace_return_to(p, I);
+ }
+ }
+ }
+ ERTS_SMP_CHK_HAVE_ONLY_MAIN_PROC_LOCK(p);
+ return result;
+}
+
+static Eterm
+do_call_trace(Process* c_p, BeamInstr* I, Eterm* reg,
+ int local, Binary* ms, Eterm tracer_pid)
+{
+ Eterm* cpp;
+ int return_to_trace = 0;
+ BeamInstr w;
+ BeamInstr *cp_save;
+ Uint32 flags;
+ Uint need = 0;
+ Eterm* E = c_p->stop;
+
+ w = *c_p->cp;
+ if (w == (BeamInstr) BeamOp(op_return_trace)) {
+ cpp = &E[2];
+ } else if (w == (BeamInstr) BeamOp(op_i_return_to_trace)) {
+ return_to_trace = 1;
+ cpp = &E[0];
+ } else if (w == (BeamInstr) BeamOp(op_i_return_time_trace)) {
+ cpp = &E[0];
+ } else {
+ cpp = NULL;
+ }
+ if (cpp) {
+ for (;;) {
+ BeamInstr w = *cp_val(*cpp);
+ if (w == (BeamInstr) BeamOp(op_return_trace)) {
+ cpp += 3;
+ } else if (w == (BeamInstr) BeamOp(op_i_return_to_trace)) {
+ return_to_trace = 1;
+ cpp += 1;
+ } else if (w == (BeamInstr) BeamOp(op_i_return_time_trace)) {
+ cpp += 2;
+ } else {
+ break;
+ }
+ }
+ cp_save = c_p->cp;
+ c_p->cp = (BeamInstr *) cp_val(*cpp);
+ ASSERT(is_CP(*cpp));
+ }
+ ERTS_SMP_UNREQ_PROC_MAIN_LOCK(c_p);
+ flags = erts_call_trace(c_p, I-3, ms, reg, local, &tracer_pid);
+ ERTS_SMP_REQ_PROC_MAIN_LOCK(c_p);
+ if (cpp) {
+ c_p->cp = cp_save;
+ }
+
+ ASSERT(!ERTS_PROC_IS_EXITING(c_p));
+ if ((flags & MATCH_SET_RETURN_TO_TRACE) && !return_to_trace) {
+ need += 1;
+ }
+ if (flags & MATCH_SET_RX_TRACE) {
+ need += 3;
+ }
+ if (need) {
+ ASSERT(c_p->htop <= E && E <= c_p->hend);
+ if (E - need < c_p->htop) {
+ (void) erts_garbage_collect(c_p, need, reg, I[-1]);
+ ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p);
+ E = c_p->stop;
+ }
+ }
+ if (flags & MATCH_SET_RETURN_TO_TRACE && !return_to_trace) {
+ E -= 1;
+ ASSERT(c_p->htop <= E && E <= c_p->hend);
+ E[0] = make_cp(c_p->cp);
+ c_p->cp = (BeamInstr *) beam_return_to_trace;
+ }
+ if (flags & MATCH_SET_RX_TRACE) {
+ E -= 3;
+ ASSERT(c_p->htop <= E && E <= c_p->hend);
+ ASSERT(is_CP((Eterm) (UWord) (I - 3)));
+ ASSERT(am_true == tracer_pid ||
+ is_internal_pid(tracer_pid) || is_internal_port(tracer_pid));
+ E[2] = make_cp(c_p->cp);
+ E[1] = tracer_pid;
+ E[0] = make_cp(I - 3); /* We ARE at the beginning of an
+ instruction,
+ the funcinfo is above i. */
+ c_p->cp = (flags & MATCH_SET_EXCEPTION_TRACE) ?
+ beam_exception_trace : beam_return_trace;
+ erts_smp_proc_lock(c_p, ERTS_PROC_LOCKS_ALL_MINOR);
+ c_p->trace_flags |= F_EXCEPTION_TRACE;
+ erts_smp_proc_unlock(c_p, ERTS_PROC_LOCKS_ALL_MINOR);
+ }
+ c_p->stop = E;
+ return tracer_pid;
}
+void
+erts_trace_time_call(Process* c_p, BeamInstr* I, BpDataTime* bdt)
+{
+ Uint ms,s,us;
+ process_breakpoint_time_t *pbt = NULL;
+ bp_data_time_item_t sitem, *item = NULL;
+ bp_time_hash_t *h = NULL;
+ BpDataTime *pbdt = NULL;
+ ASSERT(c_p);
+ ASSERT(erts_smp_atomic32_read_acqb(&c_p->state) & ERTS_PSFLG_RUNNING);
-/*
- * SMP NOTE: Process p may have become exiting on return!
- */
-Uint32
-erts_bif_mtrace(Process *p, BeamInstr *pc, Eterm *args, int local,
- Eterm *tracer_pid) {
- BpData **bds = (BpData **) (pc)[-4];
- BpDataTrace *bdt = NULL;
+ /* get previous timestamp and breakpoint
+ * from the process psd */
+ pbt = ERTS_PROC_GET_CALL_TIME(c_p);
+ get_sys_now(&ms, &s, &us);
- ASSERT(tracer_pid);
- if (bds) {
- Eterm tpid1, tpid2;
- Uint32 flags;
- bdt = (BpDataTrace *)bds[bp_sched2ix_proc(p)];
+ /* get pbt
+ * timestamp = t0
+ * lookup bdt from code
+ * set ts0 to pbt
+ * add call count here?
+ */
+ if (pbt == 0) {
+ /* First call of process to instrumented function */
+ pbt = Alloc(sizeof(process_breakpoint_time_t));
+ (void *) ERTS_PROC_SET_CALL_TIME(c_p, ERTS_PROC_LOCK_MAIN, pbt);
+ } else {
+ ASSERT(pbt->pc);
+ /* add time to previous code */
+ bp_time_diff(&sitem, pbt, ms, s, us);
+ sitem.pid = c_p->id;
+ sitem.count = 0;
- ErtsSmpBPLock(bdt);
- tpid1 = tpid2 = bdt->tracer_pid;
- ErtsSmpBPUnlock(bdt);
+ /* previous breakpoint */
+ pbdt = get_time_break(pbt->pc);
- flags = erts_call_trace(p, pc-3/*mfa*/, bdt->match_spec, args,
- local, &tpid2);
- *tracer_pid = tpid2;
- if (tpid1 != tpid2) {
- ErtsSmpBPLock(bdt);
- bdt->tracer_pid = tpid2;
- ErtsSmpBPUnlock(bdt);
+ /* if null then the breakpoint was removed */
+ if (pbdt) {
+ h = &(pbdt->hash[bp_sched2ix_proc(c_p)]);
+
+ ASSERT(h);
+ ASSERT(h->item);
+
+ item = bp_hash_get(h, &sitem);
+ if (!item) {
+ item = bp_hash_put(h, &sitem);
+ } else {
+ BP_TIME_ADD(item, &sitem);
+ }
}
- return flags;
}
- *tracer_pid = NIL;
- return 0;
+
+ /* Add count to this code */
+ sitem.pid = c_p->id;
+ sitem.count = 1;
+ sitem.s_time = 0;
+ sitem.us_time = 0;
+
+ /* this breakpoint */
+ ASSERT(bdt);
+ h = &(bdt->hash[bp_sched2ix_proc(c_p)]);
+
+ ASSERT(h);
+ ASSERT(h->item);
+
+ item = bp_hash_get(h, &sitem);
+ if (!item) {
+ item = bp_hash_put(h, &sitem);
+ } else {
+ BP_TIME_ADD(item, &sitem);
+ }
+
+ pbt->pc = I;
+ pbt->ms = ms;
+ pbt->s = s;
+ pbt->us = us;
}
+void
+erts_trace_time_return(Process *p, BeamInstr *pc)
+{
+ Uint ms,s,us;
+ process_breakpoint_time_t *pbt = NULL;
+ bp_data_time_item_t sitem, *item = NULL;
+ bp_time_hash_t *h = NULL;
+ BpDataTime *pbdt = NULL;
+
+ ASSERT(p);
+ ASSERT(erts_smp_atomic32_read_acqb(&p->state) & ERTS_PSFLG_RUNNING);
+
+ /* get previous timestamp and breakpoint
+ * from the process psd */
+
+ pbt = ERTS_PROC_GET_CALL_TIME(p);
+ get_sys_now(&ms,&s,&us);
+
+ /* get pbt
+ * lookup bdt from code
+ * timestamp = t1
+ * get ts0 from pbt
+ * get item from bdt->hash[bp_hash(p->id)]
+ * ack diff (t1, t0) to item
+ */
+
+ if (pbt) {
+ /* might have been removed due to
+ * trace_pattern(false)
+ */
+ ASSERT(pbt->pc);
+
+ bp_time_diff(&sitem, pbt, ms, s, us);
+ sitem.pid = p->id;
+ sitem.count = 0;
+
+ /* previous breakpoint */
+ pbdt = get_time_break(pbt->pc);
+
+ /* beware, the trace_pattern might have been removed */
+ if (pbdt) {
+ h = &(pbdt->hash[bp_sched2ix_proc(p)]);
+ ASSERT(h);
+ ASSERT(h->item);
+
+ item = bp_hash_get(h, &sitem);
+ if (!item) {
+ item = bp_hash_put(h, &sitem);
+ } else {
+ BP_TIME_ADD(item, &sitem);
+ }
+ }
+
+ pbt->pc = pc;
+ pbt->ms = ms;
+ pbt->s = s;
+ pbt->us = us;
+ }
+}
int
-erts_is_trace_break(BeamInstr *pc, Binary **match_spec_ret, Eterm *tracer_pid_ret) {
- BpDataTrace *bdt =
- (BpDataTrace *) is_break(pc, (BeamInstr) BeamOp(op_i_trace_breakpoint));
-
- if (bdt) {
+erts_is_trace_break(BeamInstr *pc, Binary **match_spec_ret, int local)
+{
+ Uint flags = local ? ERTS_BPF_LOCAL_TRACE : ERTS_BPF_GLOBAL_TRACE;
+ GenericBpData* bp = check_break(pc, flags);
+
+ if (bp) {
if (match_spec_ret) {
- *match_spec_ret = bdt->match_spec;
- }
- if (tracer_pid_ret) {
- ErtsSmpBPLock(bdt);
- *tracer_pid_ret = bdt->tracer_pid;
- ErtsSmpBPUnlock(bdt);
+ *match_spec_ret = bp->local_ms;
}
- return !0;
+ return 1;
}
return 0;
}
int
-erts_is_mtrace_break(BeamInstr *pc, Binary **match_spec_ret, Eterm *tracer_pid_ret) {
- BpDataTrace *bdt =
- (BpDataTrace *) is_break(pc, (BeamInstr) BeamOp(op_i_mtrace_breakpoint));
+erts_is_mtrace_break(BeamInstr *pc, Binary **match_spec_ret,
+ Eterm *tracer_pid_ret)
+{
+ GenericBpData* bp = check_break(pc, ERTS_BPF_META_TRACE);
- if (bdt) {
+ if (bp) {
if (match_spec_ret) {
- *match_spec_ret = bdt->match_spec;
+ *match_spec_ret = bp->meta_ms;
}
if (tracer_pid_ret) {
- ErtsSmpBPLock(bdt);
- *tracer_pid_ret = bdt->tracer_pid;
- ErtsSmpBPUnlock(bdt);
+ *tracer_pid_ret =
+ (Eterm) erts_smp_atomic_read_nob(&bp->meta_pid->pid);
}
- return !0;
+ return 1;
}
return 0;
}
@@ -402,15 +1129,15 @@ erts_is_native_break(BeamInstr *pc) {
}
int
-erts_is_count_break(BeamInstr *pc, Sint *count_ret) {
- BpDataCount *bdc =
- (BpDataCount *) is_break(pc, (BeamInstr) BeamOp(op_i_count_breakpoint));
+erts_is_count_break(BeamInstr *pc, Uint *count_ret)
+{
+ GenericBpData* bp = check_break(pc, ERTS_BPF_COUNT);
- if (bdc) {
+ if (bp) {
if (count_ret) {
- *count_ret = (Sint) erts_smp_atomic_read_nob(&bdc->acount);
+ *count_ret = (Uint) erts_smp_atomic_read_nob(&bp->count->acount);
}
- return !0;
+ return 1;
}
return 0;
}
@@ -421,7 +1148,7 @@ int erts_is_time_break(Process *p, BeamInstr *pc, Eterm *retval) {
Uint size;
Eterm *hp, t;
bp_data_time_item_t *item = NULL;
- BpDataTime *bdt = (BpDataTime *) is_break(pc, (BeamInstr) BeamOp(op_i_time_breakpoint));
+ BpDataTime *bdt = get_time_break(pc);
if (bdt) {
if (retval) {
@@ -464,7 +1191,7 @@ int erts_is_time_break(Process *p, BeamInstr *pc, Eterm *retval) {
}
bp_hash_delete(&hash);
}
- return !0;
+ return 1;
}
return 0;
@@ -655,7 +1382,7 @@ void erts_schedule_time_break(Process *p, Uint schedule) {
* the previous breakpoint.
*/
- pbdt = (BpDataTime *) get_break(p, pbt->pc, (BeamInstr) BeamOp(op_i_time_breakpoint));
+ pbdt = get_time_break(pbt->pc);
if (pbdt) {
get_sys_now(&ms,&s,&us);
bp_time_diff(&sitem, pbt, ms, s, us);
@@ -693,671 +1420,259 @@ void erts_schedule_time_break(Process *p, Uint schedule) {
} /* pbt */
}
-/* call_time breakpoint
- * Accumulated times are added to the previous bp,
- * not the current one. The current one is saved
- * for future reference.
- * The previous breakpoint is stored in the process it self, the psd.
- * We do not need to store in a stack frame.
- * There is no need for locking, each thread has its own
- * area in each bp to save data.
- * Since we need to diffrentiate between processes for each bp,
- * every bp has a hash (per thread) to process-bp statistics.
- * - egil
- */
-
-void erts_trace_time_break(Process *p, BeamInstr *pc, BpDataTime *bdt, Uint type) {
- Uint ms,s,us;
- process_breakpoint_time_t *pbt = NULL;
- bp_data_time_item_t sitem, *item = NULL;
- bp_time_hash_t *h = NULL;
- BpDataTime *pbdt = NULL;
-
- ASSERT(p);
- ASSERT(ERTS_PSFLG_RUNNING & erts_smp_atomic32_read_acqb(&p->state));
-
- /* get previous timestamp and breakpoint
- * from the process psd */
-
- pbt = ERTS_PROC_GET_CALL_TIME(p);
- get_sys_now(&ms,&s,&us);
-
- switch(type) {
- /* get pbt
- * timestamp = t0
- * lookup bdt from code
- * set ts0 to pbt
- * add call count here?
- */
- case ERTS_BP_CALL_TIME_CALL:
- case ERTS_BP_CALL_TIME_TAIL_CALL:
-
- if (pbt) {
- ASSERT(pbt->pc);
- /* add time to previous code */
- bp_time_diff(&sitem, pbt, ms, s, us);
- sitem.pid = p->id;
- sitem.count = 0;
-
- /* previous breakpoint */
- pbdt = (BpDataTime *) get_break(p, pbt->pc, (BeamInstr) BeamOp(op_i_time_breakpoint));
-
- /* if null then the breakpoint was removed */
- if (pbdt) {
- h = &(pbdt->hash[bp_sched2ix_proc(p)]);
-
- ASSERT(h);
- ASSERT(h->item);
-
- item = bp_hash_get(h, &sitem);
- if (!item) {
- item = bp_hash_put(h, &sitem);
- } else {
- BP_TIME_ADD(item, &sitem);
- }
- }
-
- } else {
- /* first call of process to instrumented function */
- pbt = Alloc(sizeof(process_breakpoint_time_t));
- (void *) ERTS_PROC_SET_CALL_TIME(p, ERTS_PROC_LOCK_MAIN, pbt);
- }
- /* add count to this code */
- sitem.pid = p->id;
- sitem.count = 1;
- sitem.s_time = 0;
- sitem.us_time = 0;
-
- /* this breakpoint */
- ASSERT(bdt);
- h = &(bdt->hash[bp_sched2ix_proc(p)]);
-
- ASSERT(h);
- ASSERT(h->item);
-
- item = bp_hash_get(h, &sitem);
- if (!item) {
- item = bp_hash_put(h, &sitem);
- } else {
- BP_TIME_ADD(item, &sitem);
- }
-
- pbt->pc = pc;
- pbt->ms = ms;
- pbt->s = s;
- pbt->us = us;
- break;
-
- case ERTS_BP_CALL_TIME_RETURN:
- /* get pbt
- * lookup bdt from code
- * timestamp = t1
- * get ts0 from pbt
- * get item from bdt->hash[bp_hash(p->id)]
- * ack diff (t1, t0) to item
- */
-
- if(pbt) {
- /* might have been removed due to
- * trace_pattern(false)
- */
- ASSERT(pbt->pc);
-
- bp_time_diff(&sitem, pbt, ms, s, us);
- sitem.pid = p->id;
- sitem.count = 0;
-
- /* previous breakpoint */
- pbdt = (BpDataTime *) get_break(p, pbt->pc, (BeamInstr) BeamOp(op_i_time_breakpoint));
-
- /* beware, the trace_pattern might have been removed */
- if (pbdt) {
- h = &(pbdt->hash[bp_sched2ix_proc(p)]);
-
- ASSERT(h);
- ASSERT(h->item);
-
- item = bp_hash_get(h, &sitem);
- if (!item) {
- item = bp_hash_put(h, &sitem);
- } else {
- BP_TIME_ADD(item, &sitem);
- }
- }
-
- pbt->pc = pc;
- pbt->ms = ms;
- pbt->s = s;
- pbt->us = us;
- }
- break;
- default :
- ASSERT(0);
- /* will never happen */
- break;
- }
-}
-
-
/* *************************************************************************
** Local helpers
*/
-static int set_break(Eterm mfa[3], int specified,
- Binary *match_spec, BeamInstr break_op,
- enum erts_break_op count_op, Eterm tracer_pid)
+static void
+set_break(BpFunctions* f, Binary *match_spec, Uint break_flags,
+ enum erts_break_op count_op, Eterm tracer_pid)
{
- Module *modp;
- int num_processed = 0;
- ErtsCodeIndex code_ix = erts_active_code_ix();
- if (!specified) {
- /* Find and process all modules in the system... */
- int current;
- int last = module_code_size(code_ix);
- for (current = 0; current < last; current++) {
- modp = module_code(current, code_ix);
- ASSERT(modp != NULL);
- num_processed +=
- set_module_break(modp, mfa, specified,
- match_spec, break_op, count_op,
- tracer_pid);
- }
- } else {
- /* Process a single module */
- if ((modp = erts_get_module(mfa[0], code_ix)) != NULL) {
- num_processed +=
- set_module_break(modp, mfa, specified,
- match_spec, break_op, count_op,
- tracer_pid);
- }
- }
- return num_processed;
-}
-
-static int set_module_break(Module *modp, Eterm mfa[3], int specified,
- Binary *match_spec, BeamInstr break_op,
- enum erts_break_op count_op, Eterm tracer_pid) {
- BeamInstr** code_base;
- BeamInstr* code_ptr;
- int num_processed = 0;
- Uint i,n;
+ Uint i;
+ Uint n;
- ASSERT(break_op);
- ASSERT(modp);
- code_base = (BeamInstr **) modp->curr.code;
- if (code_base == NULL) {
- return 0;
+ n = f->matched;
+ for (i = 0; i < n; i++) {
+ BeamInstr* pc = f->matching[i].pc;
+ set_function_break(pc, match_spec, break_flags,
+ count_op, tracer_pid);
}
- n = (BeamInstr) code_base[MI_NUM_FUNCTIONS];
- for (i = 0; i < n; ++i) {
- code_ptr = code_base[MI_FUNCTIONS+i];
- ASSERT(code_ptr[0] == (BeamInstr) BeamOp(op_i_func_info_IaaI));
- if ((specified < 2 || mfa[1] == ((Eterm) code_ptr[3])) &&
- (specified < 3 || ((int) mfa[2]) == ((int) code_ptr[4]))) {
- BeamInstr *pc = code_ptr+5;
-
- num_processed +=
- set_function_break(modp, pc, BREAK_IS_ERL, match_spec,
- break_op, count_op, tracer_pid);
- }
- }
- return num_processed;
}
-static int set_function_break(Module *modp, BeamInstr *pc, int bif,
- Binary *match_spec, BeamInstr break_op,
- enum erts_break_op count_op, Eterm tracer_pid) {
-
- BeamInstr **code_base = NULL;
- BpData *bd, **r, ***rs;
- size_t size;
- Uint ix = 0;
-
- if (bif == BREAK_IS_ERL) {
- code_base = (BeamInstr **)modp->curr.code;
- ASSERT(code_base);
- ASSERT(code_base <= (BeamInstr **)pc);
- ASSERT((BeamInstr **)pc < code_base + (modp->curr.code_length/sizeof(BeamInstr *)));
- } else {
- ASSERT(*pc == (BeamInstr) em_apply_bif);
- ASSERT(modp == NULL);
+static void
+set_function_break(BeamInstr *pc, Binary *match_spec, Uint break_flags,
+ enum erts_break_op count_op, Eterm tracer_pid)
+{
+ GenericBp* g;
+ GenericBpData* bp;
+ Uint common;
+ ErtsBpIndex ix = erts_staging_bp_ix();
+
+ ERTS_SMP_LC_ASSERT(erts_is_code_ix_locked());
+ g = (GenericBp *) pc[-4];
+ if (g == 0) {
+ int i;
+ if (count_op == erts_break_reset || count_op == erts_break_stop) {
+ /* Do not insert a new breakpoint */
+ return;
+ }
+ g = Alloc(sizeof(GenericBp));
+ g->orig_instr = *pc;
+ for (i = 0; i < ERTS_NUM_BP_IX; i++) {
+ g->data[i].flags = 0;
+ }
+ pc[-4] = (BeamInstr) g;
}
+ bp = &g->data[ix];
/*
- * Currently no trace support for native code.
+ * If we are changing an existing breakpoint, clean up old data.
*/
- if (erts_is_native_break(pc)) {
- return 0;
- }
- /* Do not allow two breakpoints of the same kind */
- if ( (bd = is_break(pc, break_op))) {
- if (break_op == (BeamInstr) BeamOp(op_i_trace_breakpoint)
- || break_op == (BeamInstr) BeamOp(op_i_mtrace_breakpoint)) {
-
- BpDataTrace *bdt = (BpDataTrace *) bd;
- Binary *old_match_spec;
-
- /* Update match spec and tracer */
- MatchSetRef(match_spec);
- ErtsSmpBPLock(bdt);
- old_match_spec = bdt->match_spec;
- bdt->match_spec = match_spec;
- bdt->tracer_pid = tracer_pid;
- ErtsSmpBPUnlock(bdt);
- MatchSetUnref(old_match_spec);
- } else {
- BpDataCount *bdc = (BpDataCount *) bd;
- erts_aint_t count = 0;
- erts_aint_t res = 0;
-
- ASSERT(! match_spec);
- ASSERT(is_nil(tracer_pid));
-
- if (break_op == (BeamInstr) BeamOp(op_i_count_breakpoint)) {
- if (count_op == erts_break_stop) {
- count = erts_smp_atomic_read_nob(&bdc->acount);
- if (count >= 0) {
- while(1) {
- res = erts_smp_atomic_cmpxchg_nob(&bdc->acount, -count - 1, count);
- if ((res == count) || count < 0) break;
- count = res;
- }
- }
- } else {
- /* Reset call counter */
- erts_smp_atomic_set_nob(&bdc->acount, 0);
- }
-
- } else if (break_op == (BeamInstr) BeamOp(op_i_time_breakpoint)) {
- BpDataTime *bdt = (BpDataTime *) bd;
- Uint i = 0;
-
- ERTS_SMP_LC_ASSERT(erts_smp_thr_progress_is_blocking());
-
- if (count_op == erts_break_stop) {
- bdt->pause = 1;
- } else {
- bdt->pause = 0;
- for (i = 0; i < bdt->n; i++) {
- bp_hash_delete(&(bdt->hash[i]));
- bp_hash_init(&(bdt->hash[i]), 32);
- }
- }
- } else {
- ASSERT (! count_op);
- }
- }
- return 1;
- }
- if (break_op == (BeamInstr) BeamOp(op_i_trace_breakpoint) ||
- break_op == (BeamInstr) BeamOp(op_i_mtrace_breakpoint)) {
- size = sizeof(BpDataTrace);
- } else {
- ASSERT(! match_spec);
- ASSERT(is_nil(tracer_pid));
- if (break_op == (BeamInstr) BeamOp(op_i_count_breakpoint)) {
- if (count_op == erts_break_reset || count_op == erts_break_stop) {
- /* Do not insert a new breakpoint */
- return 1;
- }
- size = sizeof(BpDataCount);
- } else if (break_op == (BeamInstr) BeamOp(op_i_time_breakpoint)) {
- if (count_op == erts_break_reset || count_op == erts_break_stop) {
- /* Do not insert a new breakpoint */
- return 1;
- }
- size = sizeof(BpDataTime);
+ common = break_flags & bp->flags;
+ if (common & (ERTS_BPF_LOCAL_TRACE|ERTS_BPF_GLOBAL_TRACE)) {
+ MatchSetUnref(bp->local_ms);
+ } else if (common & ERTS_BPF_META_TRACE) {
+ MatchSetUnref(bp->meta_ms);
+ bp_meta_unref(bp->meta_pid);
+ } else if (common & ERTS_BPF_COUNT) {
+ if (count_op == erts_break_stop) {
+ bp->flags &= ~ERTS_BPF_COUNT_ACTIVE;
} else {
- ASSERT(! count_op);
- ASSERT(break_op == (BeamInstr) BeamOp(op_i_debug_breakpoint));
- size = sizeof(BpDataDebug);
+ bp->flags |= ERTS_BPF_COUNT_ACTIVE;
+ erts_smp_atomic_set_nob(&bp->count->acount, 0);
}
- }
- rs = (BpData ***) (pc-4);
- if (! *rs) {
- size_t ssize = sizeof(BeamInstr) * erts_no_schedulers;
- *rs = (BpData **) Alloc(ssize);
- sys_memzero(*rs, ssize);
- }
-
- r = &((*rs)[0]);
-
- if (! *r) {
- ASSERT(*pc != (BeamInstr) BeamOp(op_i_trace_breakpoint));
- ASSERT(*pc != (BeamInstr) BeamOp(op_i_mtrace_breakpoint));
- ASSERT(*pc != (BeamInstr) BeamOp(op_i_debug_breakpoint));
- ASSERT(*pc != (BeamInstr) BeamOp(op_i_count_breakpoint));
- ASSERT(*pc != (BeamInstr) BeamOp(op_i_time_breakpoint));
- /* First breakpoint; create singleton ring */
- bd = Alloc(size);
- BpInit(bd, *pc);
- *r = bd;
- if (bif == BREAK_IS_ERL) {
- *pc = break_op;
- }
- } else {
- ASSERT(*pc == (BeamInstr) BeamOp(op_i_trace_breakpoint) ||
- *pc == (BeamInstr) BeamOp(op_i_mtrace_breakpoint) ||
- *pc == (BeamInstr) BeamOp(op_i_debug_breakpoint) ||
- *pc == (BeamInstr) BeamOp(op_i_time_breakpoint) ||
- *pc == (BeamInstr) BeamOp(op_i_count_breakpoint) ||
- *pc == (BeamInstr) em_apply_bif);
- if (*pc == (BeamInstr) BeamOp(op_i_debug_breakpoint)) {
- /* Debug bp must be last, so if it is also first;
- * it must be singleton. */
- ASSERT(BpSingleton(*r));
- /* Insert new bp first in the ring, i.e second to last. */
- bd = Alloc(size);
- BpInitAndSpliceNext(bd, *pc, *r);
- if (bif == BREAK_IS_ERL) {
- *pc = break_op;
- }
- } else if ((*r)->prev->orig_instr
- == (BeamInstr) BeamOp(op_i_debug_breakpoint)) {
- /* Debug bp last in the ring; insert new second to last. */
- bd = Alloc(size);
- BpInitAndSplicePrev(bd, (*r)->prev->orig_instr, *r);
- (*r)->prev->orig_instr = break_op;
+ ASSERT((bp->flags & ~ERTS_BPF_ALL) == 0);
+ return;
+ } else if (common & ERTS_BPF_TIME_TRACE) {
+ BpDataTime* bdt = bp->time;
+ Uint i = 0;
+
+ if (count_op == erts_break_stop) {
+ bp->flags &= ~ERTS_BPF_TIME_TRACE_ACTIVE;
} else {
- /* Just insert last in the ring */
- bd = Alloc(size);
- BpInitAndSpliceNext(bd, (*r)->orig_instr, *r);
- (*r)->orig_instr = break_op;
- *r = bd;
+ bp->flags |= ERTS_BPF_TIME_TRACE_ACTIVE;
+ for (i = 0; i < bdt->n; i++) {
+ bp_hash_delete(&(bdt->hash[i]));
+ bp_hash_init(&(bdt->hash[i]), 32);
+ }
}
- }
- for (ix = 1; ix < erts_no_schedulers; ++ix) {
- (*rs)[ix] = (*rs)[0];
+ ASSERT((bp->flags & ~ERTS_BPF_ALL) == 0);
+ return;
}
- bd->this_instr = break_op;
- /* Init the bp type specific data */
- if (break_op == (BeamInstr) BeamOp(op_i_trace_breakpoint) ||
- break_op == (BeamInstr) BeamOp(op_i_mtrace_breakpoint)) {
-
- BpDataTrace *bdt = (BpDataTrace *) bd;
-
- MatchSetRef(match_spec);
- bdt->match_spec = match_spec;
- bdt->tracer_pid = tracer_pid;
- } else if (break_op == (BeamInstr) BeamOp(op_i_time_breakpoint)) {
- BpDataTime *bdt = (BpDataTime *) bd;
- Uint i = 0;
-
- bdt->pause = 0;
- bdt->n = erts_no_schedulers;
- bdt->hash = Alloc(sizeof(bp_time_hash_t)*(bdt->n));
+ /*
+ * Initialize the new breakpoint data.
+ */
+ if (break_flags & (ERTS_BPF_LOCAL_TRACE|ERTS_BPF_GLOBAL_TRACE)) {
+ MatchSetRef(match_spec);
+ bp->local_ms = match_spec;
+ } else if (break_flags & ERTS_BPF_META_TRACE) {
+ BpMetaPid* bmp;
+ MatchSetRef(match_spec);
+ bp->meta_ms = match_spec;
+ bmp = Alloc(sizeof(BpMetaPid));
+ erts_refc_init(&bmp->refc, 1);
+ erts_smp_atomic_init_nob(&bmp->pid, tracer_pid);
+ bp->meta_pid = bmp;
+ } else if (break_flags & ERTS_BPF_COUNT) {
+ BpCount* bcp;
+
+ ASSERT((bp->flags & ERTS_BPF_COUNT) == 0);
+ bcp = Alloc(sizeof(BpCount));
+ erts_refc_init(&bcp->refc, 1);
+ erts_smp_atomic_init_nob(&bcp->acount, 0);
+ bp->count = bcp;
+ } else if (break_flags & ERTS_BPF_TIME_TRACE) {
+ BpDataTime* bdt;
+ int i;
+
+ ASSERT((bp->flags & ERTS_BPF_TIME_TRACE) == 0);
+ bdt = Alloc(sizeof(BpDataTime));
+ erts_refc_init(&bdt->refc, 1);
+ bdt->n = erts_no_schedulers;
+ bdt->hash = Alloc(sizeof(bp_time_hash_t)*(bdt->n));
for (i = 0; i < bdt->n; i++) {
bp_hash_init(&(bdt->hash[i]), 32);
}
- } else if (break_op == (BeamInstr) BeamOp(op_i_count_breakpoint)) {
- BpDataCount *bdc = (BpDataCount *) bd;
- erts_smp_atomic_init_nob(&bdc->acount, 0);
+ bp->time = bdt;
}
- if (bif == BREAK_IS_ERL) {
- ++modp->curr.num_breakpoints;
+ bp->flags |= break_flags;
+ ASSERT((bp->flags & ~ERTS_BPF_ALL) == 0);
+}
+
+static void
+clear_break(BpFunctions* f, Uint break_flags)
+{
+ Uint i;
+ Uint n;
+
+ n = f->matched;
+ for (i = 0; i < n; i++) {
+ BeamInstr* pc = f->matching[i].pc;
+ clear_function_break(pc, break_flags);
}
- return 1;
}
-static int clear_break(Eterm mfa[3], int specified, BeamInstr break_op)
+static int
+clear_function_break(BeamInstr *pc, Uint break_flags)
{
- ErtsCodeIndex code_ix = erts_active_code_ix();
- int num_processed = 0;
- Module *modp;
+ GenericBp* g;
+ GenericBpData* bp;
+ Uint common;
+ ErtsBpIndex ix = erts_staging_bp_ix();
- if (!specified) {
- /* Iterate over all modules */
- int current;
- int last = module_code_size(code_ix);
+ ERTS_SMP_LC_ASSERT(erts_is_code_ix_locked());
- for (current = 0; current < last; current++) {
- modp = module_code(current, code_ix);
- ASSERT(modp != NULL);
- num_processed += clear_module_break(modp, mfa, specified, break_op);
- }
- } else {
- /* Process a single module */
- if ((modp = erts_get_module(mfa[0], code_ix)) != NULL) {
- num_processed +=
- clear_module_break(modp, mfa, specified, break_op);
- }
+ if ((g = (GenericBp *) pc[-4]) == 0) {
+ return 1;
}
- return num_processed;
-}
-static int clear_module_break(Module *m, Eterm mfa[3], int specified,
- BeamInstr break_op) {
- BeamInstr** code_base;
- BeamInstr* code_ptr;
- int num_processed = 0;
- Uint i;
- BeamInstr n;
-
- ASSERT(m);
- code_base = (BeamInstr **) m->curr.code;
- if (code_base == NULL) {
- return 0;
+ bp = &g->data[ix];
+ ASSERT((bp->flags & ~ERTS_BPF_ALL) == 0);
+ common = bp->flags & break_flags;
+ bp->flags &= ~break_flags;
+ if (common & (ERTS_BPF_LOCAL_TRACE|ERTS_BPF_GLOBAL_TRACE)) {
+ MatchSetUnref(bp->local_ms);
}
- n = (BeamInstr) code_base[MI_NUM_FUNCTIONS];
- for (i = 0; i < n; ++i) {
- code_ptr = code_base[MI_FUNCTIONS+i];
- if ((specified < 2 || mfa[1] == ((Eterm) code_ptr[3])) &&
- (specified < 3 || ((int) mfa[2]) == ((int) code_ptr[4]))) {
- BeamInstr *pc = code_ptr + 5;
-
- num_processed +=
- clear_function_break(m, pc, BREAK_IS_ERL, break_op);
- }
+ if (common & ERTS_BPF_META_TRACE) {
+ MatchSetUnref(bp->meta_ms);
+ }
+ if (common & ERTS_BPF_COUNT) {
+ ASSERT((bp->flags & ERTS_BPF_COUNT_ACTIVE) == 0);
+ bp_count_unref(bp->count);
+ }
+ if (common & ERTS_BPF_TIME_TRACE) {
+ ASSERT((bp->flags & ERTS_BPF_TIME_TRACE_ACTIVE) == 0);
+ bp_time_unref(bp->time);
}
- return num_processed;
-}
-static int clear_function_break(Module *m, BeamInstr *pc, int bif, BeamInstr break_op) {
- BpData *bd;
- Uint ix = 0;
- BeamInstr **code_base = NULL;
+ ASSERT((bp->flags & ~ERTS_BPF_ALL) == 0);
+ return 1;
+}
- if (bif == BREAK_IS_ERL) {
- code_base = (BeamInstr **)m->curr.code;
- ASSERT(code_base);
- ASSERT(code_base <= (BeamInstr **)pc);
- ASSERT((BeamInstr **)pc < code_base + (m->curr.code_length/sizeof(BeamInstr *)));
- } else {
- ASSERT(*pc == (BeamInstr) em_apply_bif);
- ASSERT(m == NULL);
+static void
+bp_meta_unref(BpMetaPid* bmp)
+{
+ if (erts_refc_dectest(&bmp->refc, 0) <= 0) {
+ Free(bmp);
}
+}
- /*
- * Currently no trace support for native code.
- */
- if (erts_is_native_break(pc)) {
- return 0;
+static void
+bp_count_unref(BpCount* bcp)
+{
+ if (erts_refc_dectest(&bcp->refc, 0) <= 0) {
+ Free(bcp);
}
+}
- while ( (bd = is_break(pc, break_op))) {
- /* Remove all breakpoints of this type.
- * There should be only one of each type,
- * but break_op may be 0 which matches any type.
+static void
+bp_time_unref(BpDataTime* bdt)
+{
+ if (erts_refc_dectest(&bdt->refc, 0) <= 0) {
+ Uint i = 0;
+ Uint j = 0;
+ Process *h_p = NULL;
+ bp_data_time_item_t* item = NULL;
+ process_breakpoint_time_t* pbt = NULL;
+
+ /* remove all psd associated with the hash
+ * and then delete the hash.
+ * ... sigh ...
*/
- BeamInstr op;
- BpData ***rs = (BpData ***) (pc - 4);
- BpData **r = NULL;
-#ifdef DEBUG
- for (ix = 1; ix < erts_no_schedulers; ++ix) {
- ASSERT((*rs)[ix] == (*rs)[0]);
- }
-#endif
-
- r = &((*rs)[0]);
-
- ASSERT(*r);
- /* Find opcode for this breakpoint */
- if (break_op) {
- op = break_op;
- } else {
- if (bd == (*r)->next) {
- /* First breakpoint in ring */
- op = *pc;
- } else {
- op = bd->prev->orig_instr;
- }
- }
- if (BpSingleton(bd)) {
- ASSERT(*r == bd);
- /* Only one breakpoint to remove */
- if (bif == BREAK_IS_ERL) {
- *pc = bd->orig_instr;
- }
- Free(*rs);
- *rs = NULL;
- } else {
- BpData *bd_prev = bd->prev;
-
- BpSpliceNext(bd, bd_prev);
- ASSERT(BpSingleton(bd));
- if (bd == *r) {
- /* We removed the last breakpoint in the ring */
- *r = bd_prev;
- bd_prev->orig_instr = bd->orig_instr;
- } else if (bd_prev == *r) {
- /* We removed the first breakpoint in the ring */
- if (bif == BREAK_IS_ERL) {
- *pc = bd->orig_instr;
- }
- } else {
- bd_prev->orig_instr = bd->orig_instr;
- }
- }
- if (op == (BeamInstr) BeamOp(op_i_trace_breakpoint) ||
- op == (BeamInstr) BeamOp(op_i_mtrace_breakpoint)) {
-
- BpDataTrace *bdt = (BpDataTrace *) bd;
- MatchSetUnref(bdt->match_spec);
- }
- if (op == (BeamInstr) BeamOp(op_i_time_breakpoint)) {
- BpDataTime *bdt = (BpDataTime *) bd;
- Uint i = 0;
- Uint j = 0;
- Process *h_p = NULL;
- bp_data_time_item_t *item = NULL;
- process_breakpoint_time_t *pbt = NULL;
-
- /* remove all psd associated with the hash
- * and then delete the hash.
- * ... sigh ...
- */
-
- for( i = 0; i < bdt->n; ++i) {
- if (bdt->hash[i].used) {
- for (j = 0; j < bdt->hash[i].n; ++j) {
- item = &(bdt->hash[i].item[j]);
- if (item->pid != NIL) {
- h_p = erts_proc_lookup(item->pid);
- if (h_p) {
- pbt = ERTS_PROC_SET_CALL_TIME(h_p, ERTS_PROC_LOCK_MAIN, NULL);
- if (pbt) {
- Free(pbt);
- }
+ for (i = 0; i < bdt->n; ++i) {
+ if (bdt->hash[i].used) {
+ for (j = 0; j < bdt->hash[i].n; ++j) {
+ item = &(bdt->hash[i].item[j]);
+ if (item->pid != NIL) {
+ h_p = erts_pid2proc(NULL, 0, item->pid,
+ ERTS_PROC_LOCK_MAIN);
+ if (h_p) {
+ pbt = ERTS_PROC_SET_CALL_TIME(h_p,
+ ERTS_PROC_LOCK_MAIN,
+ NULL);
+ if (pbt) {
+ Free(pbt);
}
+ erts_smp_proc_unlock(h_p, ERTS_PROC_LOCK_MAIN);
}
}
}
- bp_hash_delete(&(bdt->hash[i]));
}
- Free(bdt->hash);
- bdt->hash = NULL;
- bdt->n = 0;
+ bp_hash_delete(&(bdt->hash[i]));
}
- Free(bd);
- if (bif == BREAK_IS_ERL) {
- ASSERT(m->curr.num_breakpoints > 0);
- --m->curr.num_breakpoints;
- }
- if (*rs) {
- for (ix = 1; ix < erts_no_schedulers; ++ix) {
- (*rs)[ix] = (*rs)[0];
- }
- }
- } /* while bd != NULL */
- return 1;
+ Free(bdt->hash);
+ Free(bdt);
+ }
}
-
-
-/*
-** Searches (linear forward) the breakpoint ring for a specified opcode
-** and returns a pointer to the breakpoint data structure or NULL if
-** not found. If the specified opcode is 0, the last breakpoint is
-** returned. The program counter must point to the first executable
-** (breakpoint) instruction of the function.
-*/
-
-BpData *erts_get_time_break(Process *p, BeamInstr *pc) {
- return get_break(p, pc, (BeamInstr) BeamOp(op_i_time_breakpoint));
+static BpDataTime*
+get_time_break(BeamInstr *pc)
+{
+ GenericBpData* bp = check_break(pc, ERTS_BPF_TIME_TRACE);
+ return bp ? bp->time : 0;
}
-static BpData *get_break(Process *p, BeamInstr *pc, BeamInstr break_op) {
- ASSERT(pc[-5] == (BeamInstr) BeamOp(op_i_func_info_IaaI));
- if (! erts_is_native_break(pc)) {
- BpData **rs = (BpData **) pc[-4];
- BpData *bd = NULL, *ebd = NULL;
-
- if (! rs) {
- return NULL;
- }
-
- bd = ebd = rs[bp_sched2ix_proc(p)];
- ASSERT(bd);
- if (bd->this_instr == break_op) {
- return bd;
- }
-
- bd = bd->next;
- while (bd != ebd) {
- ASSERT(bd);
- if (bd->this_instr == break_op) {
- ASSERT(bd);
- return bd;
- }
- bd = bd->next;
- }
- }
- return NULL;
-}
+static GenericBpData*
+check_break(BeamInstr *pc, Uint break_flags)
+{
+ GenericBp* g = (GenericBp *) pc[-4];
-static BpData *is_break(BeamInstr *pc, BeamInstr break_op) {
- BpData **rs;
- BpData *bd = NULL, *ebd = NULL;
ASSERT(pc[-5] == (BeamInstr) BeamOp(op_i_func_info_IaaI));
-
if (erts_is_native_break(pc)) {
- return NULL;
- }
- rs = (BpData **) pc[-4];
- if (! rs) {
- return NULL;
- }
-
- bd = ebd = rs[erts_bp_sched2ix()];
- ASSERT(bd);
- if ( (break_op == 0) || (bd->this_instr == break_op)) {
- return bd;
+ return 0;
}
-
- bd = bd->next;
- while (bd != ebd) {
- ASSERT(bd);
- if (bd->this_instr == break_op) {
- ASSERT(bd);
- return bd;
+ if (g) {
+ GenericBpData* bp = &g->data[erts_active_bp_ix()];
+ ASSERT((bp->flags & ~ERTS_BPF_ALL) == 0);
+ if (bp->flags & break_flags) {
+ return bp;
}
- bd = bd->next;
}
- return NULL;
+ return 0;
}
diff --git a/erts/emulator/beam/beam_bp.h b/erts/emulator/beam/beam_bp.h
index 167069552f..28aaaa462a 100644
--- a/erts/emulator/beam/beam_bp.h
+++ b/erts/emulator/beam/beam_bp.h
@@ -25,77 +25,6 @@
#include "erl_vm.h"
#include "global.h"
-
-
-/* A couple of gotchas:
- *
- * The breakpoint structure from BeamInstr,
- * In beam_emu where the instruction counter pointer, I (or pc),
- * points to the *current* instruction. At that time, if the instruction
- * is a breakpoint instruction the pc looks like the following,
- *
- * I[-5] | op_i_func_info_IaaI | scheduler specific entries
- * I[-4] | BpData** bpa | --> | BpData * bdas1 | ... | BpData * bdasN |
- * I[-3] | Tagged Module | | |
- * I[-2] | Tagged Function | V V
- * I[-1] | Arity | BpData -> BpData -> BpData -> BpData
- * I[0] | The bp instruction | ^ * the bp wheel * |
- * |------------------------------
- *
- * Common struct to all bp_data_*
- *
- * 1) The type of bp_data structure in the ring is deduced from the
- * orig_instr field of the structure _before_ in the ring, except for
- * the first structure in the ring that has its instruction in
- * pc[0] of the code to execute.
- * This is valid as long as you don't search for the function while it is
- * being executed by something else. Or is in the middle of its rotation for
- * any other reason.
- * A key, the bp beam instruction, is included for this reason.
- *
- * 2) pc[-4][sched_id - 1] points to the _last_ structure in the ring before the
- * breakpoints are being executed.
- *
- * So, as an example, when a breakpointed function starts to execute,
- * the first instruction that is a breakpoint instruction at pc[0] finds
- * its data at ((BpData **) pc[-4][sched_id - 1])->next and has to cast that pointer
- * to the correct bp_data type.
-*/
-
-typedef struct bp_data {
- struct bp_data *next; /* Doubly linked ring pointers */
- struct bp_data *prev; /* -"- */
- BeamInstr orig_instr; /* The original instruction to execute */
- BeamInstr this_instr; /* key */
-} BpData;
-/*
-** All the following bp_data_.. structs must begin the same way
-*/
-
-typedef struct bp_data_trace {
- struct bp_data *next;
- struct bp_data *prev;
- BeamInstr orig_instr;
- BeamInstr this_instr; /* key */
- Binary *match_spec;
- Eterm tracer_pid;
-} BpDataTrace;
-
-typedef struct bp_data_debug {
- struct bp_data *next;
- struct bp_data *prev;
- BeamInstr orig_instr;
- BeamInstr this_instr; /* key */
-} BpDataDebug;
-
-typedef struct bp_data_count { /* Call count */
- struct bp_data *next;
- struct bp_data *prev;
- BeamInstr orig_instr;
- BeamInstr this_instr; /* key */
- erts_smp_atomic_t acount;
-} BpDataCount;
-
typedef struct {
Eterm pid;
Sint count;
@@ -110,13 +39,9 @@ typedef struct {
} bp_time_hash_t;
typedef struct bp_data_time { /* Call time */
- struct bp_data *next;
- struct bp_data *prev;
- BeamInstr orig_instr;
- BeamInstr this_instr; /* key */
- Uint pause;
- Uint n;
- bp_time_hash_t *hash;
+ Uint n;
+ bp_time_hash_t *hash;
+ erts_refc_t refc;
} BpDataTime;
typedef struct {
@@ -126,64 +51,42 @@ typedef struct {
BeamInstr *pc;
} process_breakpoint_time_t; /* used within psd */
-extern erts_smp_spinlock_t erts_bp_lock;
+typedef struct {
+ erts_smp_atomic_t acount;
+ erts_refc_t refc;
+} BpCount;
+
+typedef struct {
+ erts_smp_atomic_t pid;
+ erts_refc_t refc;
+} BpMetaPid;
+
+typedef struct generic_bp_data {
+ Uint flags;
+ Binary* local_ms; /* Match spec for local call trace */
+ Binary* meta_ms; /* Match spec for meta trace */
+ BpMetaPid* meta_pid; /* Meta trace pid */
+ BpCount* count; /* For call count */
+ BpDataTime* time; /* For time trace */
+} GenericBpData;
+
+#define ERTS_NUM_BP_IX 2
+
+typedef struct generic_bp {
+ BeamInstr orig_instr;
+ GenericBpData data[ERTS_NUM_BP_IX];
+} GenericBp;
#define ERTS_BP_CALL_TIME_SCHEDULE_IN (0)
#define ERTS_BP_CALL_TIME_SCHEDULE_OUT (1)
#define ERTS_BP_CALL_TIME_SCHEDULE_EXITING (2)
-#define ERTS_BP_CALL_TIME_CALL (0)
-#define ERTS_BP_CALL_TIME_RETURN (1)
-#define ERTS_BP_CALL_TIME_TAIL_CALL (2)
-
-#ifdef ERTS_SMP
-#define ErtsSmpBPLock(BDC) erts_smp_spin_lock(&erts_bp_lock)
-#define ErtsSmpBPUnlock(BDC) erts_smp_spin_unlock(&erts_bp_lock)
-#else
-#define ErtsSmpBPLock(BDC)
-#define ErtsSmpBPUnlock(BDC)
-#endif
-
#ifdef ERTS_SMP
#define bp_sched2ix_proc(p) ((p)->scheduler_data->no - 1)
#else
#define bp_sched2ix_proc(p) (0)
#endif
-#define ErtsCountBreak(p, pc,instr_result) \
-do { \
- BpData **bds = (BpData **) (pc)[-4]; \
- BpDataCount *bdc = NULL; \
- Uint ix = bp_sched2ix_proc( (p) ); \
- erts_aint_t count = 0; \
- \
- ASSERT((pc)[-5] == (BeamInstr) BeamOp(op_i_func_info_IaaI)); \
- ASSERT(bds); \
- bdc = (BpDataCount *) bds[ix]; \
- bdc = (BpDataCount *) bdc->next; \
- ASSERT(bdc); \
- bds[ix] = (BpData *) bdc; \
- count = erts_smp_atomic_read_nob(&bdc->acount); \
- if (count >= 0) erts_smp_atomic_inc_nob(&bdc->acount); \
- *(instr_result) = bdc->orig_instr; \
-} while (0)
-
-#define ErtsBreakSkip(p, pc,instr_result) \
-do { \
- BpData **bds = (BpData **) (pc)[-4]; \
- BpData *bd = NULL; \
- Uint ix = bp_sched2ix_proc( (p) ); \
- \
- ASSERT((pc)[-5] == (BeamInstr) BeamOp(op_i_func_info_IaaI)); \
- ASSERT(bds); \
- bd = bds[ix]; \
- ASSERT(bd); \
- bd = bd->next; \
- ASSERT(bd); \
- bds[ix] = bd; \
- *(instr_result) = bd->orig_instr; \
-} while (0)
-
enum erts_break_op{
erts_break_nop = 0, /* Must be false */
erts_break_set = !0, /* Must be true */
@@ -191,7 +94,17 @@ enum erts_break_op{
erts_break_stop
};
+typedef Uint32 ErtsBpIndex;
+typedef struct {
+ BeamInstr* pc;
+ Module* mod;
+} BpFunction;
+
+typedef struct {
+ Uint matched; /* Number matched */
+ BpFunction* matching; /* Matching functions */
+} BpFunctions;
/*
** Function interface exported from beam_bp.c
@@ -199,49 +112,66 @@ enum erts_break_op{
void erts_bp_init(void);
-int erts_set_trace_break(Eterm mfa[3], int specified, Binary *match_spec,
- Eterm tracer_pid);
-int erts_clear_trace_break(Eterm mfa[3], int specified);
-int erts_set_mtrace_break(Eterm mfa[3], int specified, Binary *match_spec,
+void erts_prepare_bp_staging(void);
+void erts_commit_staged_bp(void);
+
+ERTS_GLB_INLINE ErtsBpIndex erts_active_bp_ix(void);
+ERTS_GLB_INLINE ErtsBpIndex erts_staging_bp_ix(void);
+
+void erts_bp_match_functions(BpFunctions* f, Eterm mfa[3], int specified);
+void erts_bp_match_export(BpFunctions* f, Eterm mfa[3], int specified);
+void erts_bp_free_matched_functions(BpFunctions* f);
+
+void erts_install_breakpoints(BpFunctions* f);
+void erts_uninstall_breakpoints(BpFunctions* f);
+void erts_consolidate_bp_data(BpFunctions* f, int local);
+void erts_consolidate_bif_bp_data(void);
+
+void erts_set_trace_break(BpFunctions *f, Binary *match_spec);
+void erts_clear_trace_break(BpFunctions *f);
+
+void erts_set_call_trace_bif(BeamInstr *pc, Binary *match_spec, int local);
+void erts_clear_call_trace_bif(BeamInstr *pc, int local);
+
+void erts_set_mtrace_break(BpFunctions *f, Binary *match_spec,
Eterm tracer_pid);
-int erts_clear_mtrace_break(Eterm mfa[3], int specified);
+void erts_clear_mtrace_break(BpFunctions *f);
void erts_set_mtrace_bif(BeamInstr *pc, Binary *match_spec,
Eterm tracer_pid);
void erts_clear_mtrace_bif(BeamInstr *pc);
-int erts_set_debug_break(Eterm mfa[3], int specified);
-int erts_clear_debug_break(Eterm mfa[3], int specified);
-int erts_set_count_break(Eterm mfa[3], int specified, enum erts_break_op);
-int erts_clear_count_break(Eterm mfa[3], int specified);
+void erts_set_debug_break(BpFunctions *f);
+void erts_clear_debug_break(BpFunctions *f);
+void erts_set_count_break(BpFunctions *f, enum erts_break_op);
+void erts_clear_count_break(BpFunctions *f);
-int erts_clear_break(Eterm mfa[3], int specified);
+
+void erts_clear_all_breaks(BpFunctions* f);
int erts_clear_module_break(Module *modp);
-int erts_clear_function_break(Module *modp, BeamInstr *pc);
+void erts_clear_export_break(Module *modp, BeamInstr* pc);
+BeamInstr erts_generic_breakpoint(Process* c_p, BeamInstr* I, Eterm* reg);
BeamInstr erts_trace_break(Process *p, BeamInstr *pc, Eterm *args,
Uint32 *ret_flags, Eterm *tracer_pid);
-Uint32 erts_bif_mtrace(Process *p, BeamInstr *pc, Eterm *args,
- int local, Eterm *tracer_pid);
-int erts_is_trace_break(BeamInstr *pc, Binary **match_spec_ret,
- Eterm *tracer_pid_ret);
+int erts_is_trace_break(BeamInstr *pc, Binary **match_spec_ret, int local);
int erts_is_mtrace_break(BeamInstr *pc, Binary **match_spec_ret,
Eterm *tracer_pid_rte);
int erts_is_mtrace_bif(BeamInstr *pc, Binary **match_spec_ret,
Eterm *tracer_pid_ret);
int erts_is_native_break(BeamInstr *pc);
-int erts_is_count_break(BeamInstr *pc, Sint *count_ret);
+int erts_is_count_break(BeamInstr *pc, Uint *count_ret);
int erts_is_time_break(Process *p, BeamInstr *pc, Eterm *call_time);
-void erts_trace_time_break(Process *p, BeamInstr *pc, BpDataTime *bdt, Uint type);
+void erts_trace_time_call(Process* c_p, BeamInstr* pc, BpDataTime* bdt);
+void erts_trace_time_return(Process* c_p, BeamInstr* pc);
void erts_schedule_time_break(Process *p, Uint out);
-int erts_set_time_break(Eterm mfa[3], int specified, enum erts_break_op);
-int erts_clear_time_break(Eterm mfa[3], int specified);
+void erts_set_time_break(BpFunctions *f, enum erts_break_op);
+void erts_clear_time_break(BpFunctions *f);
int erts_is_time_trace_bif(Process *p, BeamInstr *pc, Eterm *call_time);
void erts_set_time_trace_bif(BeamInstr *pc, enum erts_break_op);
void erts_clear_time_trace_bif(BeamInstr *pc);
-BpData *erts_get_time_break(Process *p, BeamInstr *pc);
BeamInstr *erts_find_local_func(Eterm mfa[3]);
@@ -258,6 +188,19 @@ ERTS_GLB_INLINE Uint erts_bp_sched2ix(void)
return 0;
#endif
}
+
+extern erts_smp_atomic32_t erts_active_bp_index;
+extern erts_smp_atomic32_t erts_staging_bp_index;
+
+ERTS_GLB_INLINE ErtsBpIndex erts_active_bp_ix(void)
+{
+ return erts_smp_atomic32_read_nob(&erts_active_bp_index);
+}
+
+ERTS_GLB_INLINE ErtsBpIndex erts_staging_bp_ix(void)
+{
+ return erts_smp_atomic32_read_nob(&erts_staging_bp_index);
+}
#endif
#endif /* _BEAM_BP_H */
diff --git a/erts/emulator/beam/beam_debug.c b/erts/emulator/beam/beam_debug.c
index e69cbc3048..a609ed8c71 100644
--- a/erts/emulator/beam/beam_debug.c
+++ b/erts/emulator/beam/beam_debug.c
@@ -84,6 +84,7 @@ erts_debug_breakpoint_2(BIF_ALIST_2)
int i;
int specified = 0;
Eterm res;
+ BpFunctions f;
if (bool != am_true && bool != am_false)
goto error;
@@ -121,11 +122,19 @@ erts_debug_breakpoint_2(BIF_ALIST_2)
erts_smp_proc_unlock(p, ERTS_PROC_LOCK_MAIN);
erts_smp_thr_progress_block();
+ erts_bp_match_functions(&f, mfa, specified);
if (bool == am_true) {
- res = make_small(erts_set_debug_break(mfa, specified));
+ erts_set_debug_break(&f);
+ erts_install_breakpoints(&f);
+ erts_commit_staged_bp();
} else {
- res = make_small(erts_clear_debug_break(mfa, specified));
+ erts_clear_debug_break(&f);
+ erts_commit_staged_bp();
+ erts_uninstall_breakpoints(&f);
}
+ erts_consolidate_bp_data(&f, 1);
+ res = make_small(f.matched);
+ erts_bp_free_matched_functions(&f);
erts_smp_thr_progress_unblock();
erts_smp_proc_lock(p, ERTS_PROC_LOCK_MAIN);
diff --git a/erts/emulator/beam/beam_emu.c b/erts/emulator/beam/beam_emu.c
index 15a937289d..efce070061 100644
--- a/erts/emulator/beam/beam_emu.c
+++ b/erts/emulator/beam/beam_emu.c
@@ -217,7 +217,6 @@ BeamInstr beam_continue_exit[1];
BeamInstr* em_call_error_handler;
BeamInstr* em_apply_bif;
-BeamInstr* em_call_traced_function;
/* NOTE These should be the only variables containing trace instructions.
@@ -4571,64 +4570,6 @@ void process_main(void)
* Trace and debugging support.
*/
- /*
- * At this point, I points to the code[3] in the export entry for
- * a trace-enabled function.
- *
- * code[0]: Module
- * code[1]: Function
- * code[2]: Arity
- * code[3]: &&call_traced_function
- * code[4]: Address of function.
- */
- OpCase(call_traced_function): {
- if (IS_TRACED_FL(c_p, F_TRACE_CALLS)) {
- unsigned offset = offsetof(Export, code) + 3*sizeof(BeamInstr);
- Export* ep = (Export *) (((char *)I)-offset);
- Uint32 flags;
-
- SWAPOUT;
- reg[0] = r(0);
- PROCESS_MAIN_CHK_LOCKS(c_p);
- ERTS_SMP_UNREQ_PROC_MAIN_LOCK(c_p);
- flags = erts_call_trace(c_p, ep->code, ep->match_prog_set, reg,
- 0, &c_p->tracer_proc);
- ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p);
- ERTS_SMP_REQ_PROC_MAIN_LOCK(c_p);
- PROCESS_MAIN_CHK_LOCKS(c_p);
- ASSERT(!ERTS_PROC_IS_EXITING(c_p));
- SWAPIN;
-
- if (flags & MATCH_SET_RX_TRACE) {
- ASSERT(c_p->htop <= E && E <= c_p->hend);
- if (E - 3 < HTOP) {
- /* SWAPOUT, SWAPIN was done and r(0) was saved above */
- PROCESS_MAIN_CHK_LOCKS(c_p);
- FCALLS -= erts_garbage_collect(c_p, 3, reg, ep->code[2]);
- ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p);
- PROCESS_MAIN_CHK_LOCKS(c_p);
- r(0) = reg[0];
- SWAPIN;
- }
- E -= 3;
- ASSERT(c_p->htop <= E && E <= c_p->hend);
- ASSERT(is_CP((BeamInstr)(ep->code)));
- ASSERT(is_internal_pid(c_p->tracer_proc) ||
- is_internal_port(c_p->tracer_proc));
- E[2] = make_cp(c_p->cp); /* Code in lower range on halfword */
- E[1] = am_true; /* Process tracer */
- E[0] = make_cp(ep->code);
- c_p->cp = (flags & MATCH_SET_EXCEPTION_TRACE)
- ? beam_exception_trace : beam_return_trace;
- erts_smp_proc_lock(c_p, ERTS_PROC_LOCKS_ALL_MINOR);
- c_p->trace_flags |= F_EXCEPTION_TRACE;
- erts_smp_proc_unlock(c_p, ERTS_PROC_LOCKS_ALL_MINOR);
- }
- }
- SET_I((BeamInstr *)Arg(0));
- Dispatch();
- }
-
OpCase(return_trace): {
BeamInstr* code = (BeamInstr *) (UWord) E[0];
@@ -4643,80 +4584,22 @@ void process_main(void)
Goto(*I);
}
- OpCase(i_count_breakpoint): {
+ OpCase(i_generic_breakpoint): {
BeamInstr real_I;
-
- ErtsCountBreak(c_p, (BeamInstr *) I, &real_I);
- ASSERT(VALID_INSTR(real_I));
- Goto(real_I);
- }
-
- /* need to send mfa instead of bdt pointer
- * the pointer might be deallocated.
- */
-
- OpCase(i_time_breakpoint): {
- BeamInstr real_I;
- BpData **bds = (BpData **) (I)[-4];
- BpDataTime *bdt = NULL;
- Uint ix = 0;
-#ifdef ERTS_SMP
- ix = c_p->scheduler_data->no - 1;
-#else
- ix = 0;
-#endif
- bdt = (BpDataTime *)bds[ix];
-
- ASSERT((I)[-5] == (BeamInstr) BeamOp(op_i_func_info_IaaI));
- ASSERT(bdt);
- bdt = (BpDataTime *) bdt->next;
- ASSERT(bdt);
- bds[ix] = (BpData *) bdt;
- real_I = bdt->orig_instr;
+ ASSERT(I[-5] == (BeamInstr) BeamOp(op_i_func_info_IaaI));
+ SWAPOUT;
+ reg[0] = r(0);
+ real_I = erts_generic_breakpoint(c_p, I, reg);
+ r(0) = reg[0];
+ SWAPIN;
ASSERT(VALID_INSTR(real_I));
-
- if (IS_TRACED_FL(c_p, F_TRACE_CALLS) && !(bdt->pause)) {
- if ( (*(c_p->cp) == (BeamInstr) OpCode(i_return_time_trace)) ||
- (*(c_p->cp) == (BeamInstr) OpCode(return_trace)) ||
- (*(c_p->cp) == (BeamInstr) OpCode(i_return_to_trace))) {
- /* This _IS_ a tail recursive call */
- SWAPOUT;
- erts_trace_time_break(c_p, I, bdt, ERTS_BP_CALL_TIME_TAIL_CALL);
- SWAPIN;
- } else {
- SWAPOUT;
- erts_trace_time_break(c_p, I, bdt, ERTS_BP_CALL_TIME_CALL);
-
- /* r register needs to be copied to the array
- * for the garbage collector
- */
- ASSERT(c_p->htop <= E && E <= c_p->hend);
- if (E - 2 < HTOP) {
- reg[0] = r(0);
- PROCESS_MAIN_CHK_LOCKS(c_p);
- FCALLS -= erts_garbage_collect(c_p, 2, reg, I[-1]);
- ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p);
- PROCESS_MAIN_CHK_LOCKS(c_p);
- r(0) = reg[0];
- }
- SWAPIN;
-
- ASSERT(c_p->htop <= E && E <= c_p->hend);
-
- E -= 2;
- E[0] = make_cp(I);
- E[1] = make_cp(c_p->cp); /* original return address */
- c_p->cp = beam_return_time_trace;
- }
- }
-
Goto(real_I);
}
OpCase(i_return_time_trace): {
BeamInstr *pc = (BeamInstr *) (UWord) E[0];
SWAPOUT;
- erts_trace_time_break(c_p, pc, NULL, ERTS_BP_CALL_TIME_RETURN);
+ erts_trace_time_return(c_p, pc);
SWAPIN;
c_p->cp = NULL;
SET_I((BeamInstr *) cp_val(E[1]));
@@ -4724,114 +4607,6 @@ void process_main(void)
Goto(*I);
}
- OpCase(i_trace_breakpoint):
- if (! IS_TRACED_FL(c_p, F_TRACE_CALLS)) {
- BeamInstr real_I;
-
- ErtsBreakSkip(c_p, (BeamInstr *) I, &real_I);
- Goto(real_I);
- }
- /* Fall through to next case */
- OpCase(i_mtrace_breakpoint): {
- BeamInstr real_I;
- Uint32 flags;
- Eterm tracer_pid;
- Uint* cpp;
- int return_to_trace = 0, need = 0;
- flags = 0;
- SWAPOUT;
- reg[0] = r(0);
-
- if (*(c_p->cp) == (BeamInstr) OpCode(return_trace)) {
- cpp = &E[2];
- } else if (*(c_p->cp) == (BeamInstr) OpCode(i_return_to_trace)) {
- return_to_trace = !0;
- cpp = &E[0];
- } else if (*(c_p->cp) == (BeamInstr) OpCode(i_return_time_trace)) {
- return_to_trace = !0;
- cpp = &E[0];
- } else {
- cpp = NULL;
- }
- if (cpp) {
- /* This _IS_ a tail recursive call, if there are
- * return_trace and/or i_return_to_trace stackframes
- * on the stack, they are not intermixed with y registers
- */
- BeamInstr *cp_save = c_p->cp;
- for (;;) {
- ASSERT(is_CP(*cpp));
- if (*cp_val(*cpp) == (BeamInstr) OpCode(return_trace)) {
- cpp += 3;
- } else if (*cp_val(*cpp) == (BeamInstr) OpCode(i_return_to_trace)) {
- return_to_trace = !0;
- cpp += 1;
- } else if (*cp_val(*cpp) == (BeamInstr) OpCode(i_return_time_trace)) {
- cpp += 2;
- } else
- break;
- }
- c_p->cp = (BeamInstr *) cp_val(*cpp);
- ASSERT(is_CP(*cpp));
- ERTS_SMP_UNREQ_PROC_MAIN_LOCK(c_p);
- real_I = erts_trace_break(c_p, I, reg, &flags, &tracer_pid);
- ERTS_SMP_REQ_PROC_MAIN_LOCK(c_p);
- SWAPIN; /* Needed by shared heap. */
- c_p->cp = cp_save;
- } else {
- ERTS_SMP_UNREQ_PROC_MAIN_LOCK(c_p);
- real_I = erts_trace_break(c_p, I, reg, &flags, &tracer_pid);
- ERTS_SMP_REQ_PROC_MAIN_LOCK(c_p);
- SWAPIN; /* Needed by shared heap. */
- }
-
- ASSERT(!ERTS_PROC_IS_EXITING(c_p));
-
- if ((flags & MATCH_SET_RETURN_TO_TRACE) && !return_to_trace) {
- need += 1;
- }
- if (flags & MATCH_SET_RX_TRACE) {
- need += 3;
- }
- if (need) {
- ASSERT(c_p->htop <= E && E <= c_p->hend);
- if (E - need < HTOP) {
- /* SWAPOUT was done and r(0) was saved above */
- PROCESS_MAIN_CHK_LOCKS(c_p);
- FCALLS -= erts_garbage_collect(c_p, need, reg, I[-1]);
- ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p);
- PROCESS_MAIN_CHK_LOCKS(c_p);
- r(0) = reg[0];
- SWAPIN;
- }
- }
- if ((flags & MATCH_SET_RETURN_TO_TRACE) && !return_to_trace) {
- E -= 1;
- ASSERT(c_p->htop <= E && E <= c_p->hend);
- E[0] = make_cp(c_p->cp);
- c_p->cp = (BeamInstr *) beam_return_to_trace;
- }
- if (flags & MATCH_SET_RX_TRACE) {
- E -= 3;
- ASSERT(c_p->htop <= E && E <= c_p->hend);
- ASSERT(is_CP((Eterm) (UWord) (I - 3)));
- ASSERT(am_true == tracer_pid ||
- is_internal_pid(tracer_pid) || is_internal_port(tracer_pid));
- E[2] = make_cp(c_p->cp);
- E[1] = tracer_pid;
- E[0] = make_cp(I - 3); /* We ARE at the beginning of an
- instruction,
- the funcinfo is above i. */
- c_p->cp =
- (flags & MATCH_SET_EXCEPTION_TRACE)
- ? beam_exception_trace : beam_return_trace;
- erts_smp_proc_lock(c_p, ERTS_PROC_LOCKS_ALL_MINOR);
- c_p->trace_flags |= F_EXCEPTION_TRACE;
- erts_smp_proc_unlock(c_p, ERTS_PROC_LOCKS_ALL_MINOR);
- }
- Goto(real_I);
- }
-
OpCase(i_return_to_trace): {
if (IS_TRACED_FL(c_p, F_TRACE_RETURN_TO)) {
Uint *cpp = (Uint*) E;
@@ -5181,7 +4956,6 @@ void process_main(void)
#endif /* NO_JUMP_TABLE */
em_call_error_handler = OpCode(call_error_handler);
- em_call_traced_function = OpCode(call_traced_function);
em_apply_bif = OpCode(apply_bif);
beam_apply[0] = (BeamInstr) OpCode(i_apply);
diff --git a/erts/emulator/beam/beam_load.h b/erts/emulator/beam/beam_load.h
index f1506a8684..1048f258a5 100644
--- a/erts/emulator/beam/beam_load.h
+++ b/erts/emulator/beam/beam_load.h
@@ -49,7 +49,6 @@ extern void** beam_ops;
extern BeamInstr beam_debug_apply[];
extern BeamInstr* em_call_error_handler;
extern BeamInstr* em_apply_bif;
-extern BeamInstr* em_call_traced_function;
/*
* The following variables keep a sorted list of address ranges for
diff --git a/erts/emulator/beam/code_ix.c b/erts/emulator/beam/code_ix.c
index ae4cca1e58..8025058ee0 100644
--- a/erts/emulator/beam/code_ix.c
+++ b/erts/emulator/beam/code_ix.c
@@ -44,6 +44,10 @@ struct code_ix_queue_item {
static struct code_ix_queue_item* the_code_ix_queue = NULL;
static erts_smp_mtx_t the_code_ix_queue_lock;
+#ifdef ERTS_ENABLE_LOCK_CHECK
+static erts_tsd_key_t has_code_write_permission;
+#endif
+
void erts_code_ix_init(void)
{
/* We start emulator by initializing preloaded modules
@@ -53,6 +57,9 @@ void erts_code_ix_init(void)
erts_smp_atomic32_init_nob(&the_active_code_index, 0);
erts_smp_atomic32_init_nob(&the_staging_code_index, 0);
erts_smp_mtx_init(&the_code_ix_queue_lock, "code_ix_queue");
+#ifdef ERTS_ENABLE_LOCK_CHECK
+ erts_tsd_key_create(&has_code_write_permission);
+#endif
CIX_TRACE("init");
}
@@ -112,6 +119,9 @@ int erts_try_seize_code_write_permission(Process* c_p)
success = !the_code_ix_lock;
if (success) {
the_code_ix_lock = 1;
+#ifdef ERTS_ENABLE_LOCK_CHECK
+ erts_tsd_set(has_code_write_permission, (void *) 1);
+#endif
}
else { /* Already locked */
struct code_ix_queue_item* qitem;
@@ -128,6 +138,7 @@ int erts_try_seize_code_write_permission(Process* c_p)
void erts_release_code_write_permission(void)
{
+ ERTS_SMP_LC_ASSERT(erts_is_code_ix_locked());
erts_smp_mtx_lock(&the_code_ix_queue_lock);
while (the_code_ix_queue != NULL) { /* unleash the entire herd */
struct code_ix_queue_item* qitem = the_code_ix_queue;
@@ -141,12 +152,15 @@ void erts_release_code_write_permission(void)
erts_free(ERTS_ALC_T_CODE_IX_LOCK_Q, qitem);
}
the_code_ix_lock = 0;
+#ifdef ERTS_ENABLE_LOCK_CHECK
+ erts_tsd_set(has_code_write_permission, (void *) 0);
+#endif
erts_smp_mtx_unlock(&the_code_ix_queue_lock);
}
#ifdef ERTS_ENABLE_LOCK_CHECK
int erts_is_code_ix_locked(void)
{
- return the_code_ix_lock;
+ return the_code_ix_lock && erts_tsd_get(has_code_write_permission);
}
#endif
diff --git a/erts/emulator/beam/erl_alloc_util.c b/erts/emulator/beam/erl_alloc_util.c
index 62225d3572..9a011e2adc 100644
--- a/erts/emulator/beam/erl_alloc_util.c
+++ b/erts/emulator/beam/erl_alloc_util.c
@@ -952,7 +952,7 @@ ddq_check_incoming(ErtsAllctrDDQueue_t *ddq)
ERTS_THR_MEMORY_BARRIER;
else {
ddq->head.next.unref_end = (ErtsAllctrDDBlock_t *) ilast;
- ddq->head.next.thr_progress = erts_thr_progress_later();
+ ddq->head.next.thr_progress = erts_thr_progress_later(NULL);
erts_atomic32_set_relb(&ddq->tail.data.um_refc_ix,
um_refc_ix);
ddq->head.next.um_refc_ix = um_refc_ix == 0 ? 1 : 0;
diff --git a/erts/emulator/beam/erl_bif_trace.c b/erts/emulator/beam/erl_bif_trace.c
index 7f1b02b9b4..e88fb8c9f4 100644
--- a/erts/emulator/beam/erl_bif_trace.c
+++ b/erts/emulator/beam/erl_bif_trace.c
@@ -42,12 +42,24 @@
#define DECL_AM(S) Eterm AM_ ## S = am_atom_put(#S, sizeof(#S) - 1)
const struct trace_pattern_flags erts_trace_pattern_flags_off = {0, 0, 0, 0, 0};
+
+/*
+ * The following variables are protected by code write permission.
+ */
static int erts_default_trace_pattern_is_on;
static Binary *erts_default_match_spec;
static Binary *erts_default_meta_match_spec;
static struct trace_pattern_flags erts_default_trace_pattern_flags;
static Eterm erts_default_meta_tracer_pid;
+static struct { /* Protected by code write permission */
+ int current;
+ int install;
+ int local;
+ BpFunctions f; /* Local functions */
+ BpFunctions e; /* Export entries */
+} finish_bp;
+
static Eterm
trace_pattern(Process* p, Eterm MFA, Eterm Pattern, Eterm flaglist);
static BIF_RETTYPE
@@ -60,12 +72,11 @@ static Eterm trace_info_pid(Process* p, Eterm pid_spec, Eterm key);
static Eterm trace_info_func(Process* p, Eterm pid_spec, Eterm key);
static Eterm trace_info_on_load(Process* p, Eterm key);
-static int setup_func_trace(Export* ep, void* match_prog, ErtsCodeIndex);
-static int reset_func_trace(Export* ep, ErtsCodeIndex);
-static void reset_bif_trace(int bif_index);
-static void setup_bif_trace(int bif_index);
-static void set_trace_bif(int bif_index, void* match_prog);
-static void clear_trace_bif(int bif_index);
+static void reset_bif_trace(void);
+static void setup_bif_trace(void);
+static void install_exp_breakpoints(BpFunctions* f);
+static void uninstall_exp_breakpoints(BpFunctions* f);
+static void clean_export_entries(BpFunctions* f);
void
erts_bif_trace_init(void)
@@ -107,12 +118,12 @@ trace_pattern(Process* p, Eterm MFA, Eterm Pattern, Eterm flaglist)
int is_global;
Process *meta_tracer_proc = p;
Eterm meta_tracer_pid = p->id;
+ int is_blocking = 0;
if (!erts_try_seize_code_write_permission(p)) {
ERTS_BIF_YIELD3(bif_export[BIF_trace_pattern_3], p, MFA, Pattern, flaglist);
}
- erts_smp_proc_unlock(p, ERTS_PROC_LOCK_MAIN);
- erts_smp_thr_progress_block();
+ finish_bp.current = -1;
UseTmpHeap(3,p);
/*
@@ -328,16 +339,24 @@ trace_pattern(Process* p, Eterm MFA, Eterm Pattern, Eterm flaglist)
meta_tracer_proc->trace_flags |= F_TRACER;
}
- matches = erts_set_trace_pattern(mfa, specified,
+ matches = erts_set_trace_pattern(p, mfa, specified,
match_prog_set, match_prog_set,
- on, flags, meta_tracer_pid);
+ on, flags, meta_tracer_pid, 0);
}
error:
MatchSetUnref(match_prog_set);
UnUseTmpHeap(3,p);
- erts_smp_thr_progress_unblock();
- erts_smp_proc_lock(p, ERTS_PROC_LOCK_MAIN);
+
+#ifdef ERTS_SMP
+ if (finish_bp.current >= 0) {
+ ASSERT(matches >= 0);
+ erts_notify_finish_breakpointing(p);
+ erts_suspend(p, ERTS_PROC_LOCK_MAIN, NULL);
+ ERTS_BIF_YIELD_RETURN(p, make_small(matches));
+ }
+#endif
+
erts_release_code_write_permission();
if (matches >= 0) {
@@ -355,6 +374,8 @@ erts_get_default_trace_pattern(int *trace_pattern_is_on,
struct trace_pattern_flags *trace_pattern_flags,
Eterm *meta_tracer_pid)
{
+ ERTS_SMP_LC_ASSERT(erts_is_code_ix_locked() ||
+ erts_smp_thr_progress_is_blocking());
if (trace_pattern_is_on)
*trace_pattern_is_on = erts_default_trace_pattern_is_on;
if (match_spec)
@@ -369,6 +390,8 @@ erts_get_default_trace_pattern(int *trace_pattern_is_on,
int erts_is_default_trace_enabled(void)
{
+ ERTS_SMP_LC_ASSERT(erts_is_code_ix_locked() ||
+ erts_smp_thr_progress_is_blocking());
return erts_default_trace_pattern_is_on;
}
@@ -842,6 +865,11 @@ Eterm trace_info_2(BIF_ALIST_2)
Eterm What = BIF_ARG_1;
Eterm Key = BIF_ARG_2;
Eterm res;
+
+ if (!erts_try_seize_code_write_permission(p)) {
+ ERTS_BIF_YIELD2(bif_export[BIF_trace_info_2], p, What, Key);
+ }
+
if (What == am_on_load) {
res = trace_info_on_load(p, Key);
} else if (is_atom(What) || is_pid(What)) {
@@ -849,8 +877,10 @@ Eterm trace_info_2(BIF_ALIST_2)
} else if (is_tuple(What)) {
res = trace_info_func(p, What, Key);
} else {
+ erts_release_code_write_permission();
BIF_ERROR(p, BADARG);
}
+ erts_release_code_write_permission();
BIF_RET(res);
}
@@ -978,64 +1008,54 @@ static int function_is_traced(Process *p,
Binary **ms, /* out */
Binary **ms_meta, /* out */
Eterm *tracer_pid_meta, /* out */
- Sint *count, /* out */
+ Uint *count, /* out */
Eterm *call_time) /* out */
{
Export e;
Export* ep;
- int i;
- BeamInstr *code;
+ BeamInstr* pc;
/* First look for an export entry */
e.code[0] = mfa[0];
e.code[1] = mfa[1];
e.code[2] = mfa[2];
if ((ep = export_get(&e)) != NULL) {
- if (ep->addressv[erts_active_code_ix()] == ep->code+3 &&
- ep->code[3] != (BeamInstr) em_call_error_handler) {
- if (ep->code[3] == (BeamInstr) em_call_traced_function) {
- *ms = ep->match_prog_set;
+ pc = ep->code+3;
+ if (ep->addressv[erts_active_code_ix()] == pc &&
+ *pc != (BeamInstr) em_call_error_handler) {
+
+ int r = 0;
+
+ ASSERT(*pc == (BeamInstr) em_apply_bif ||
+ *pc == (BeamInstr) BeamOp(op_i_generic_breakpoint));
+
+ if (erts_is_trace_break(pc, ms, 0)) {
return FUNC_TRACE_GLOBAL_TRACE;
}
- if (ep->code[3] == (BeamInstr) em_apply_bif) {
- for (i = 0; i < BIF_SIZE; ++i) {
- if (bif_export[i] == ep) {
- int r = 0;
-
- if (erts_bif_trace_flags[i] & BIF_TRACE_AS_GLOBAL) {
- *ms = ep->match_prog_set;
- return FUNC_TRACE_GLOBAL_TRACE;
- } else {
- if (erts_bif_trace_flags[i] & BIF_TRACE_AS_LOCAL) {
- r |= FUNC_TRACE_LOCAL_TRACE;
- *ms = ep->match_prog_set;
- }
- if (erts_is_mtrace_break(ep->code+3, ms_meta,
- tracer_pid_meta)) {
- r |= FUNC_TRACE_META_TRACE;
- }
- if (erts_is_time_break(p, ep->code+3, call_time)) {
- r |= FUNC_TRACE_TIME_TRACE;
- }
- }
- return r ? r : FUNC_TRACE_UNTRACED;
- }
- }
- erl_exit(1,"Impossible ghost bif encountered in trace_info.");
+
+ if (erts_is_trace_break(pc, ms, 1)) {
+ r |= FUNC_TRACE_LOCAL_TRACE;
+ }
+ if (erts_is_mtrace_break(pc, ms_meta, tracer_pid_meta)) {
+ r |= FUNC_TRACE_META_TRACE;
}
+ if (erts_is_time_break(p, pc, call_time)) {
+ r |= FUNC_TRACE_TIME_TRACE;
+ }
+ return r ? r : FUNC_TRACE_UNTRACED;
}
}
/* OK, now look for breakpoint tracing */
- if ((code = erts_find_local_func(mfa)) != NULL) {
+ if ((pc = erts_find_local_func(mfa)) != NULL) {
int r =
- (erts_is_trace_break(code, ms, NULL)
+ (erts_is_trace_break(pc, ms, 1)
? FUNC_TRACE_LOCAL_TRACE : 0)
- | (erts_is_mtrace_break(code, ms_meta, tracer_pid_meta)
+ | (erts_is_mtrace_break(pc, ms_meta, tracer_pid_meta)
? FUNC_TRACE_META_TRACE : 0)
- | (erts_is_count_break(code, count)
+ | (erts_is_count_break(pc, count)
? FUNC_TRACE_COUNT_TRACE : 0)
- | (erts_is_time_break(p, code, call_time)
+ | (erts_is_time_break(p, pc, call_time)
? FUNC_TRACE_TIME_TRACE : 0);
return r ? r : FUNC_TRACE_UNTRACED;
@@ -1050,7 +1070,7 @@ trace_info_func(Process* p, Eterm func_spec, Eterm key)
Eterm* hp;
DeclareTmpHeap(mfa,3,p); /* Not really heap here, but might be when setting pattern */
Binary *ms = NULL, *ms_meta = NULL;
- Sint count = 0;
+ Uint count = 0;
Eterm traced = am_false;
Eterm match_spec = am_false;
Eterm retval = am_false;
@@ -1138,9 +1158,7 @@ trace_info_func(Process* p, Eterm func_spec, Eterm key)
break;
case am_call_count:
if (r & FUNC_TRACE_COUNT_TRACE) {
- retval = count < 0 ?
- erts_make_integer(-count-1, p) :
- erts_make_integer(count, p);
+ retval = erts_make_integer(count, p);
}
break;
case am_call_time:
@@ -1329,39 +1347,46 @@ trace_info_on_load(Process* p, Eterm key)
#undef FUNC_TRACE_LOCAL_TRACE
int
-erts_set_trace_pattern(Eterm* mfa, int specified,
+erts_set_trace_pattern(Process*p, Eterm* mfa, int specified,
Binary* match_prog_set, Binary *meta_match_prog_set,
int on, struct trace_pattern_flags flags,
- Eterm meta_tracer_pid)
+ Eterm meta_tracer_pid, int is_blocking)
{
const ErtsCodeIndex code_ix = erts_active_code_ix();
int matches = 0;
int i;
+ int n;
+ BpFunction* fp;
/*
* First work on normal functions (not real BIFs).
*/
-
- for (i = 0; i < export_list_size(code_ix); i++) {
- Export* ep = export_list(i, code_ix);
- int j;
-
- if (ExportIsBuiltIn(ep)) {
- continue;
- }
-
- for (j = 0; j < specified && mfa[j] == ep->code[j]; j++) {
- /* Empty loop body */
- }
- if (j == specified) {
- if (on) {
- if (! flags.breakpoint)
- matches += setup_func_trace(ep, match_prog_set, code_ix);
- else
- reset_func_trace(ep, code_ix);
- } else if (! flags.breakpoint) {
- matches += reset_func_trace(ep, code_ix);
+ erts_bp_match_export(&finish_bp.e, mfa, specified);
+ fp = finish_bp.e.matching;
+ n = finish_bp.e.matched;
+
+ for (i = 0; i < n; i++) {
+ BeamInstr* pc = fp[i].pc;
+ Export* ep = (Export *)(((char *)(pc-3)) - offsetof(Export, code));
+
+ if (!on || flags.breakpoint) {
+ erts_clear_call_trace_bif(pc, 0);
+ if (pc[0] == (BeamInstr) BeamOp(op_i_generic_breakpoint)) {
+ pc[0] = (BeamInstr) BeamOp(op_jump_f);
+ }
+ } else {
+ if (ep->addressv[code_ix] != pc) {
+ fp[i].mod->curr.num_traced_exports++;
+#ifdef DEBUG
+ pc[-5] = (BeamInstr) BeamOp(op_i_func_info_IaaI);
+#endif
+ pc[0] = (BeamInstr) BeamOp(op_jump_f);
+ pc[1] = (BeamInstr) ep->addressv[code_ix];
+ }
+ erts_set_call_trace_bif(pc, match_prog_set, 0);
+ if (ep->addressv[code_ix] != pc) {
+ pc[0] = (BeamInstr) BeamOp(op_i_generic_breakpoint);
}
}
}
@@ -1386,26 +1411,15 @@ erts_set_trace_pattern(Eterm* mfa, int specified,
/* Empty loop body */
}
if (j == specified) {
+ BeamInstr* pc = (BeamInstr *)bif_export[i]->code + 3;
+
if (! flags.breakpoint) { /* Export entry call trace */
if (on) {
- if (erts_bif_trace_flags[i] & BIF_TRACE_AS_META) {
- ASSERT(ExportIsBuiltIn(bif_export[i]));
- erts_clear_mtrace_bif
- ((BeamInstr *)bif_export[i]->code + 3);
- erts_bif_trace_flags[i] &= ~BIF_TRACE_AS_META;
- }
- set_trace_bif(i, match_prog_set);
- erts_bif_trace_flags[i] &= ~BIF_TRACE_AS_LOCAL;
- erts_bif_trace_flags[i] |= BIF_TRACE_AS_GLOBAL;
- setup_bif_trace(i);
+ erts_clear_call_trace_bif(pc, 1);
+ erts_clear_mtrace_bif(pc);
+ erts_set_call_trace_bif(pc, match_prog_set, 0);
} else { /* off */
- if (erts_bif_trace_flags[i] & BIF_TRACE_AS_GLOBAL) {
- clear_trace_bif(i);
- erts_bif_trace_flags[i] &= ~BIF_TRACE_AS_GLOBAL;
- }
- if (! erts_bif_trace_flags[i]) {
- reset_bif_trace(i);
- }
+ erts_clear_call_trace_bif(pc, 0);
}
matches++;
} else { /* Breakpoint call trace */
@@ -1413,52 +1427,33 @@ erts_set_trace_pattern(Eterm* mfa, int specified,
if (on) {
if (flags.local) {
- set_trace_bif(i, match_prog_set);
- erts_bif_trace_flags[i] |= BIF_TRACE_AS_LOCAL;
- erts_bif_trace_flags[i] &= ~BIF_TRACE_AS_GLOBAL;
+ erts_clear_call_trace_bif(pc, 0);
+ erts_set_call_trace_bif(pc, match_prog_set, 1);
m = 1;
}
if (flags.meta) {
- erts_set_mtrace_bif
- ((BeamInstr *)bif_export[i]->code + 3,
- meta_match_prog_set, meta_tracer_pid);
- erts_bif_trace_flags[i] |= BIF_TRACE_AS_META;
- erts_bif_trace_flags[i] &= ~BIF_TRACE_AS_GLOBAL;
+ erts_set_mtrace_bif(pc, meta_match_prog_set,
+ meta_tracer_pid);
m = 1;
}
if (flags.call_time) {
- erts_set_time_trace_bif(bif_export[i]->code + 3, on);
+ erts_set_time_trace_bif(pc, on);
/* I don't want to remove any other tracers */
- erts_bif_trace_flags[i] |= BIF_TRACE_AS_CALL_TIME;
m = 1;
}
- if (erts_bif_trace_flags[i]) {
- setup_bif_trace(i);
- }
} else { /* off */
if (flags.local) {
- if (erts_bif_trace_flags[i] & BIF_TRACE_AS_LOCAL) {
- clear_trace_bif(i);
- erts_bif_trace_flags[i] &= ~BIF_TRACE_AS_LOCAL;
- }
+ erts_clear_call_trace_bif(pc, 1);
m = 1;
}
if (flags.meta) {
- if (erts_bif_trace_flags[i] & BIF_TRACE_AS_META) {
- erts_clear_mtrace_bif
- ((BeamInstr *)bif_export[i]->code + 3);
- erts_bif_trace_flags[i] &= ~BIF_TRACE_AS_META;
- }
+ erts_clear_mtrace_bif(pc);
m = 1;
}
if (flags.call_time) {
- erts_clear_time_trace_bif(bif_export[i]->code + 3);
- erts_bif_trace_flags[i] &= ~BIF_TRACE_AS_CALL_TIME;
+ erts_clear_time_trace_bif(pc);
m = 1;
}
- if (! erts_bif_trace_flags[i]) {
- reset_bif_trace(i);
- }
}
matches += m;
}
@@ -1468,186 +1463,242 @@ erts_set_trace_pattern(Eterm* mfa, int specified,
/*
** So, now for breakpoint tracing
*/
+ erts_bp_match_functions(&finish_bp.f, mfa, specified);
if (on) {
if (! flags.breakpoint) {
- erts_clear_trace_break(mfa, specified);
- erts_clear_mtrace_break(mfa, specified);
- erts_clear_count_break(mfa, specified);
- erts_clear_time_break(mfa, specified);
+ erts_clear_all_breaks(&finish_bp.f);
} else {
- int m = 0;
if (flags.local) {
- m = erts_set_trace_break(mfa, specified, match_prog_set,
- am_true);
+ erts_set_trace_break(&finish_bp.f, match_prog_set);
}
if (flags.meta) {
- m = erts_set_mtrace_break(mfa, specified, meta_match_prog_set,
- meta_tracer_pid);
+ erts_set_mtrace_break(&finish_bp.f, meta_match_prog_set,
+ meta_tracer_pid);
}
if (flags.call_count) {
- m = erts_set_count_break(mfa, specified, on);
+ erts_set_count_break(&finish_bp.f, on);
}
if (flags.call_time) {
- m = erts_set_time_break(mfa, specified, on);
+ erts_set_time_break(&finish_bp.f, on);
}
- /* All assignments to 'm' above should give the same value,
- * so just use the last */
- matches += m;
}
} else {
- int m = 0;
if (flags.local) {
- m = erts_clear_trace_break(mfa, specified);
+ erts_clear_trace_break(&finish_bp.f);
}
if (flags.meta) {
- m = erts_clear_mtrace_break(mfa, specified);
+ erts_clear_mtrace_break(&finish_bp.f);
}
if (flags.call_count) {
- m = erts_clear_count_break(mfa, specified);
+ erts_clear_count_break(&finish_bp.f);
}
if (flags.call_time) {
- m = erts_clear_time_break(mfa, specified);
+ erts_clear_time_break(&finish_bp.f);
}
- /* All assignments to 'm' above should give the same value,
- * so just use the last */
- matches += m;
}
+ finish_bp.current = 0;
+ finish_bp.install = on;
+ finish_bp.local = flags.breakpoint;
+
+#ifdef ERTS_SMP
+ if (is_blocking) {
+ ERTS_SMP_LC_ASSERT(erts_smp_thr_progress_is_blocking());
+#endif
+ while (erts_finish_breakpointing()) {
+ /* Empty loop body */
+ }
+#ifdef ERTS_SMP
+ finish_bp.current = -1;
+ }
+#endif
+
+ if (flags.breakpoint) {
+ matches += finish_bp.f.matched;
+ } else {
+ matches += finish_bp.e.matched;
+ }
return matches;
}
-/*
- * Setup function tracing for the given exported function.
- *
- * Return Value: 1 if entry refers to a BIF or loaded function,
- * 0 if the entry refers to a function not loaded.
- */
-
-static int
-setup_func_trace(Export* ep, void* match_prog, ErtsCodeIndex code_ix)
+int
+erts_finish_breakpointing(void)
{
- Module* modp;
-
- if (ep->addressv[code_ix] == ep->code+3) {
- if (ep->code[3] == (BeamInstr) em_call_error_handler) {
- return 0;
- } else if (ep->code[3] == (BeamInstr) em_call_traced_function) {
- MatchSetUnref(ep->match_prog_set);
- ep->match_prog_set = match_prog;
- MatchSetRef(ep->match_prog_set);
- return 1;
- } else {
- /*
- * We ignore apply/3 and anything else.
- */
- return 0;
- }
- }
-
+ ERTS_SMP_LC_ASSERT(erts_is_code_ix_locked());
+
/*
- * Currently no trace support for native code.
+ * Memory barriers will be issued for all processes *before*
+ * each of the stages below. (Unless the other schedulers
+ * are blocked, in which case memory barriers will be issued
+ * when they are awaken.)
*/
- if (erts_is_native_break(ep->addressv[code_ix])) {
+
+ switch (finish_bp.current++) {
+ case 0:
+ /*
+ * At this point, in all functions that are to be breakpointed,
+ * a pointer to a GenericBp struct has already been added,
+ *
+ * Insert the new breakpoints (if any) into the
+ * code. Different schedulers may see breakpoint instruction
+ * at different times, but it does not matter since the newly
+ * added breakpoints are disabled.
+ */
+ if (finish_bp.install) {
+ if (finish_bp.local) {
+ erts_install_breakpoints(&finish_bp.f);
+ } else {
+ install_exp_breakpoints(&finish_bp.e);
+ }
+ }
+ setup_bif_trace();
+ return 1;
+ case 1:
+ /*
+ * Switch index for the breakpoint data, activating the staged
+ * data. (Depending on the changes in the breakpoint data,
+ * that could either activate breakpoints or disable
+ * breakpoints.)
+ */
+ erts_commit_staged_bp();
+ return 1;
+ case 2:
+ /*
+ * Remove breakpoints instructions for disabled breakpoints
+ * (if any).
+ */
+ if (finish_bp.install) {
+ if (finish_bp.local) {
+ uninstall_exp_breakpoints(&finish_bp.e);
+ } else {
+ erts_uninstall_breakpoints(&finish_bp.f);
+ }
+ } else {
+ if (finish_bp.local) {
+ erts_uninstall_breakpoints(&finish_bp.f);
+ } else {
+ uninstall_exp_breakpoints(&finish_bp.e);
+ }
+ }
+ reset_bif_trace();
+ return 1;
+ case 3:
+ /*
+ * Now all breakpoints have either been inserted or removed.
+ * For all updated breakpoints, copy the active breakpoint
+ * data to the staged breakpoint data to make them equal
+ * (simplifying for the next time breakpoints are to be
+ * updated). If any breakpoints have been totally disabled,
+ * deallocate the GenericBp structs for them.
+ */
+ erts_consolidate_bif_bp_data();
+ clean_export_entries(&finish_bp.e);
+ erts_consolidate_bp_data(&finish_bp.e, 0);
+ erts_consolidate_bp_data(&finish_bp.f, 1);
+ erts_bp_free_matched_functions(&finish_bp.e);
+ erts_bp_free_matched_functions(&finish_bp.f);
return 0;
+ default:
+ ASSERT(0);
}
+ return 0;
+}
- ep->code[3] = (BeamInstr) em_call_traced_function;
- ep->code[4] = (BeamInstr) ep->addressv[code_ix];
- ep->addressv[code_ix] = ep->code+3;
- ep->match_prog_set = match_prog;
- MatchSetRef(ep->match_prog_set);
+static void
+install_exp_breakpoints(BpFunctions* f)
+{
+ const ErtsCodeIndex code_ix = erts_active_code_ix();
+ BpFunction* fp = f->matching;
+ Uint ne = f->matched;
+ Uint i;
+ Uint offset = offsetof(Export, code) + 3*sizeof(BeamInstr);
- modp = erts_get_module(ep->code[0], code_ix);
- ASSERT(modp);
- modp->curr.num_traced_exports++;
- return 1;
-}
+ for (i = 0; i < ne; i++) {
+ BeamInstr* pc = fp[i].pc;
+ Export* ep = (Export *) (((char *)pc)-offset);
-static void setup_bif_trace(int bif_index) {
- Export *ep = bif_export[bif_index];
-
- ASSERT(ExportIsBuiltIn(ep));
- ASSERT(ep->code[4]);
- ep->code[4] = (BeamInstr) bif_table[bif_index].traced;
+ ep->addressv[code_ix] = pc;
+ }
}
-static void set_trace_bif(int bif_index, void* match_prog) {
- Export *ep = bif_export[bif_index];
-
-#ifdef HARDDEBUG
- erts_fprintf(stderr, "set_trace_bif: %T:%T/%bpu\n",
- ep->code[0], ep->code[1], ep->code[2]);
-#endif
- ASSERT(ExportIsBuiltIn(ep));
- MatchSetUnref(ep->match_prog_set);
- ep->match_prog_set = match_prog;
- MatchSetRef(ep->match_prog_set);
-}
+static void
+uninstall_exp_breakpoints(BpFunctions* f)
+{
+ const ErtsCodeIndex code_ix = erts_active_code_ix();
+ BpFunction* fp = f->matching;
+ Uint ne = f->matched;
+ Uint i;
+ Uint offset = offsetof(Export, code) + 3*sizeof(BeamInstr);
-/*
- * Reset function tracing for the given exported function.
- *
- * Return Value: 1 if entry refers to a BIF or loaded function,
- * 0 if the entry refers to a function not loaded.
- */
+ for (i = 0; i < ne; i++) {
+ BeamInstr* pc = fp[i].pc;
+ Export* ep = (Export *) (((char *)pc)-offset);
-static int
-reset_func_trace(Export* ep, ErtsCodeIndex code_ix)
-{
- if (ep->addressv[code_ix] == ep->code+3) {
- if (ep->code[3] == (BeamInstr) em_call_error_handler) {
- return 0;
- } else if (ep->code[3] == (BeamInstr) em_call_traced_function) {
- Module* modp = erts_get_module(ep->code[0], code_ix);
- ASSERT(modp);
- modp->curr.num_traced_exports--;
-
- ep->addressv[code_ix] = (Uint *) ep->code[4];
- MatchSetUnref(ep->match_prog_set);
- ep->match_prog_set = NULL;
- return 1;
- } else {
- /*
- * We ignore apply/3 and anything else.
- */
- return 0;
+ if (ep->addressv[code_ix] != pc) {
+ continue;
}
+ ASSERT(*pc == (BeamInstr) BeamOp(op_jump_f));
+ ep->addressv[code_ix] = (BeamInstr *) ep->code[4];
}
-
- /*
- * Currently no trace support for native code.
- */
- if (erts_is_native_break(ep->addressv[code_ix])) {
- return 0;
- }
-
- /*
- * Nothing to do, but the export entry matches.
- */
+}
+
+static void
+clean_export_entries(BpFunctions* f)
+{
+ const ErtsCodeIndex code_ix = erts_active_code_ix();
+ BpFunction* fp = f->matching;
+ Uint ne = f->matched;
+ Uint i;
+ Uint offset = offsetof(Export, code) + 3*sizeof(BeamInstr);
+
+ for (i = 0; i < ne; i++) {
+ BeamInstr* pc = fp[i].pc;
+ Export* ep = (Export *) (((char *)pc)-offset);
- return 1;
+ if (ep->addressv[code_ix] == pc) {
+ continue;
+ }
+ if (*pc == (BeamInstr) BeamOp(op_jump_f)) {
+ ep->code[3] = (BeamInstr) 0;
+ ep->code[4] = (BeamInstr) 0;
+ }
+ }
}
-static void reset_bif_trace(int bif_index) {
- Export *ep = bif_export[bif_index];
-
- ASSERT(ExportIsBuiltIn(ep));
- ASSERT(ep->code[4]);
- ASSERT(! ep->match_prog_set);
- ASSERT(! erts_is_mtrace_break((BeamInstr *)ep->code+3, NULL, NULL));
- ep->code[4] = (BeamInstr) bif_table[bif_index].f;
+static void
+setup_bif_trace(void)
+{
+ int i;
+
+ for (i = 0; i < BIF_SIZE; ++i) {
+ Export *ep = bif_export[i];
+ GenericBp* g = (GenericBp *) ep->fake_op_func_info_for_hipe[1];
+ if (g) {
+ if (ExportIsBuiltIn(ep)) {
+ ASSERT(ep->code[4]);
+ ep->code[4] = (BeamInstr) bif_table[i].traced;
+ }
+ }
+ }
}
-static void clear_trace_bif(int bif_index) {
- Export *ep = bif_export[bif_index];
-
-#ifdef HARDDEBUG
- erts_fprintf(stderr, "clear_trace_bif: %T:%T/%bpu\n",
- ep->code[0], ep->code[1], ep->code[2]);
-#endif
- ASSERT(ExportIsBuiltIn(ep));
- MatchSetUnref(ep->match_prog_set);
- ep->match_prog_set = NULL;
+static void
+reset_bif_trace(void)
+{
+ int i;
+ ErtsBpIndex active = erts_active_bp_ix();
+
+ for (i = 0; i < BIF_SIZE; ++i) {
+ Export *ep = bif_export[i];
+ BeamInstr* pc = ep->code+3;
+ GenericBp* g = (GenericBp *) pc[-4];
+ if (g && g->data[active].flags == 0) {
+ if (ExportIsBuiltIn(ep)) {
+ ASSERT(ep->code[4]);
+ ep->code[4] = (BeamInstr) bif_table[i].f;
+ }
+ }
+ }
}
/*
diff --git a/erts/emulator/beam/erl_nif.c b/erts/emulator/beam/erl_nif.c
index da4376fd0a..594c51a5db 100644
--- a/erts/emulator/beam/erl_nif.c
+++ b/erts/emulator/beam/erl_nif.c
@@ -1711,9 +1711,10 @@ BIF_RETTYPE load_nif_2(BIF_ALIST_2)
code_ptr[5+0] = (BeamInstr) BeamOp(op_call_nif);
}
else { /* Function traced, patch the original instruction word */
- BpData** bps = (BpData**) code_ptr[1];
- BpData* bp = (BpData*) bps[erts_bp_sched2ix()];
- bp->orig_instr = (BeamInstr) BeamOp(op_call_nif);
+ GenericBp* g = (GenericBp *) code_ptr[1];
+ ASSERT(code_ptr[5+0] ==
+ (BeamInstr) BeamOp(op_i_generic_breakpoint));
+ g->orig_instr = (BeamInstr) BeamOp(op_call_nif);
}
code_ptr[5+1] = (BeamInstr) entry->funcs[i].fptr;
code_ptr[5+2] = (BeamInstr) lib;
diff --git a/erts/emulator/beam/erl_process.c b/erts/emulator/beam/erl_process.c
index bca8fac259..e07c9ae2b0 100644
--- a/erts/emulator/beam/erl_process.c
+++ b/erts/emulator/beam/erl_process.c
@@ -530,6 +530,7 @@ dbg_chk_aux_work_val(erts_aint32_t value)
#endif
#ifdef ERTS_SMP
valid |= ERTS_SSI_AUX_WORK_CODE_IX_ACTIVATION;
+ valid |= ERTS_SSI_AUX_WORK_FINISH_BP;
#endif
#ifdef ERTS_SSI_AUX_WORK_REAP_PORTS
valid |= ERTS_SSI_AUX_WORK_REAP_PORTS;
@@ -1145,13 +1146,13 @@ unset_aux_work_flags(ErtsSchedulerSleepInfo *ssi, erts_aint32_t flgs)
#ifdef ERTS_SMP
static ERTS_INLINE void
-thr_prgr_current_reset(ErtsAuxWorkData *awdp)
+haw_thr_prgr_current_reset(ErtsAuxWorkData *awdp)
{
awdp->current_thr_prgr = ERTS_THR_PRGR_INVALID;
}
static ERTS_INLINE ErtsThrPrgrVal
-thr_prgr_current(ErtsAuxWorkData *awdp)
+haw_thr_prgr_current(ErtsAuxWorkData *awdp)
{
ErtsThrPrgrVal current = awdp->current_thr_prgr;
if (current == ERTS_THR_PRGR_INVALID) {
@@ -1161,6 +1162,21 @@ thr_prgr_current(ErtsAuxWorkData *awdp)
return current;
}
+static ERTS_INLINE void
+haw_thr_prgr_current_check_progress(ErtsAuxWorkData *awdp)
+{
+ ErtsThrPrgrVal current = awdp->current_thr_prgr;
+ if (current != ERTS_THR_PRGR_INVALID
+ && !erts_thr_progress_equal(current, erts_thr_progress_current())) {
+ /*
+ * We have used a previouly read current value that isn't the
+ * latest; need to poke ourselfs in order to guarantee no loss
+ * of wakeups.
+ */
+ erts_sched_poke(awdp->ssi);
+ }
+}
+
#endif
typedef struct erts_misc_aux_work_t_ erts_misc_aux_work_t;
@@ -1259,7 +1275,7 @@ static ERTS_INLINE erts_aint32_t
handle_misc_aux_work_thr_prgr(ErtsAuxWorkData *awdp,
erts_aint32_t aux_work)
{
- if (!erts_thr_progress_has_reached_this(thr_prgr_current(awdp),
+ if (!erts_thr_progress_has_reached_this(haw_thr_prgr_current(awdp),
awdp->misc.thr_prgr))
return aux_work & ~ERTS_SSI_AUX_WORK_MISC_THR_PRGR;
@@ -1364,7 +1380,7 @@ handle_async_ready_clean(ErtsAuxWorkData *awdp,
#ifdef ERTS_SMP
if (awdp->async_ready.need_thr_prgr
- && !erts_thr_progress_has_reached_this(thr_prgr_current(awdp),
+ && !erts_thr_progress_has_reached_this(haw_thr_prgr_current(awdp),
awdp->async_ready.thr_prgr)) {
return aux_work & ~ERTS_SSI_AUX_WORK_ASYNC_READY_CLEAN;
}
@@ -1431,6 +1447,55 @@ handle_code_ix_activation(ErtsAuxWorkData *awdp, erts_aint32_t aux_work)
}
#endif /* ERTS_SMP */
+#ifdef ERTS_SMP
+void
+erts_notify_finish_breakpointing(Process* p)
+{
+ ErtsAuxWorkData* awdp = &p->scheduler_data->aux_work_data;
+
+ ASSERT(awdp->bp_ix_activation.stager == NULL);
+ awdp->bp_ix_activation.stager = p;
+ awdp->bp_ix_activation.thr_prgr = erts_thr_progress_later(awdp->esdp);
+ erts_thr_progress_wakeup(awdp->esdp, awdp->bp_ix_activation.thr_prgr);
+ erts_smp_proc_inc_refc(p);
+ set_aux_work_flags_wakeup_relb(p->scheduler_data->ssi,
+ ERTS_SSI_AUX_WORK_FINISH_BP);
+}
+
+static erts_aint32_t
+handle_finish_bp(ErtsAuxWorkData* awdp, erts_aint32_t aux_work)
+{
+ ErtsThrPrgrVal current = haw_thr_prgr_current(awdp);
+
+ if (!erts_thr_progress_has_reached_this(current,
+ awdp->bp_ix_activation.thr_prgr)) {
+ return aux_work & ~ERTS_SSI_AUX_WORK_FINISH_BP;
+ }
+ if (erts_finish_breakpointing()) { /* Not done */
+ /* Arrange for being called again */
+ awdp->bp_ix_activation.thr_prgr =
+ erts_thr_progress_later(awdp->esdp);
+ erts_thr_progress_wakeup(awdp->esdp, awdp->bp_ix_activation.thr_prgr);
+ } else { /* Done */
+ Process* p;
+
+ unset_aux_work_flags(awdp->ssi, ERTS_SSI_AUX_WORK_FINISH_BP);
+ p = awdp->bp_ix_activation.stager;
+#ifdef DEBUG
+ awdp->bp_ix_activation.stager = NULL;
+#endif
+ erts_smp_proc_lock(p, ERTS_PROC_LOCK_STATUS);
+ if (!ERTS_PROC_IS_EXITING(p)) {
+ erts_resume(p, ERTS_PROC_LOCK_STATUS);
+ }
+ erts_smp_proc_unlock(p, ERTS_PROC_LOCK_STATUS);
+ erts_smp_proc_dec_refc(p);
+ erts_release_code_write_permission();
+ }
+ return aux_work & ~ERTS_SSI_AUX_WORK_FINISH_BP;
+}
+#endif /* ERTS_SMP */
+
static ERTS_INLINE erts_aint32_t
handle_fix_alloc(ErtsAuxWorkData *awdp, erts_aint32_t aux_work)
{
@@ -1483,7 +1548,7 @@ handle_delayed_dealloc(ErtsAuxWorkData *awdp, erts_aint32_t aux_work)
if (need_thr_progress) {
if (wakeup == ERTS_THR_PRGR_INVALID)
- wakeup = erts_thr_progress_later_than(thr_prgr_current(awdp));
+ wakeup = erts_thr_progress_later(awdp->esdp);
awdp->dd.thr_prgr = wakeup;
set_aux_work_flags(ssi, ERTS_SSI_AUX_WORK_DD_THR_PRGR);
awdp->dd.thr_prgr = wakeup;
@@ -1504,7 +1569,7 @@ handle_delayed_dealloc_thr_prgr(ErtsAuxWorkData *awdp, erts_aint32_t aux_work)
int need_thr_progress;
int more_work;
ErtsThrPrgrVal wakeup = ERTS_THR_PRGR_INVALID;
- ErtsThrPrgrVal current = thr_prgr_current(awdp);
+ ErtsThrPrgrVal current = haw_thr_prgr_current(awdp);
if (!erts_thr_progress_has_reached_this(current, awdp->dd.thr_prgr))
return aux_work & ~ERTS_SSI_AUX_WORK_DD_THR_PRGR;
@@ -1526,7 +1591,7 @@ handle_delayed_dealloc_thr_prgr(ErtsAuxWorkData *awdp, erts_aint32_t aux_work)
if (need_thr_progress) {
if (wakeup == ERTS_THR_PRGR_INVALID)
- wakeup = erts_thr_progress_later_than(current);
+ wakeup = erts_thr_progress_later(awdp->esdp);
awdp->dd.thr_prgr = wakeup;
erts_thr_progress_wakeup(awdp->esdp, wakeup);
}
@@ -1715,7 +1780,7 @@ handle_setup_aux_work_timer(ErtsAuxWorkData *awdp, erts_aint32_t aux_work)
}
static erts_aint32_t
-handle_aux_work(ErtsAuxWorkData *awdp, erts_aint32_t orig_aux_work)
+handle_aux_work(ErtsAuxWorkData *awdp, erts_aint32_t orig_aux_work, int waiting)
{
#undef HANDLE_AUX_WORK
#define HANDLE_AUX_WORK(FLG, HNDLR) \
@@ -1733,7 +1798,7 @@ handle_aux_work(ErtsAuxWorkData *awdp, erts_aint32_t orig_aux_work)
erts_aint32_t ignore = 0;
#ifdef ERTS_SMP
- thr_prgr_current_reset(awdp);
+ haw_thr_prgr_current_reset(awdp);
#endif
ERTS_DBG_CHK_AUX_WORK_VAL(aux_work);
@@ -1802,8 +1867,18 @@ handle_aux_work(ErtsAuxWorkData *awdp, erts_aint32_t orig_aux_work)
HANDLE_AUX_WORK(ERTS_SSI_AUX_WORK_REAP_PORTS,
handle_reap_ports);
+#ifdef ERTS_SMP
+ HANDLE_AUX_WORK(ERTS_SSI_AUX_WORK_FINISH_BP,
+ handle_finish_bp);
+#endif
+
ERTS_DBG_CHK_AUX_WORK_VAL(aux_work);
+#ifdef ERTS_SMP
+ if (waiting && !aux_work)
+ haw_thr_prgr_current_check_progress(awdp);
+#endif
+
return aux_work;
#undef HANDLE_AUX_WORK
@@ -2262,7 +2337,7 @@ aux_thread(void *unused)
if (aux_work) {
if (!thr_prgr_active)
erts_thr_progress_active(NULL, thr_prgr_active = 1);
- aux_work = handle_aux_work(awdp, aux_work);
+ aux_work = handle_aux_work(awdp, aux_work, 1);
if (aux_work && erts_thr_progress_update(NULL))
erts_thr_progress_leader_update(NULL);
}
@@ -2341,7 +2416,7 @@ scheduler_wait(int *fcalls, ErtsSchedulerData *esdp, ErtsRunQueue *rq)
erts_thr_progress_active(esdp, thr_prgr_active = 1);
sched_wall_time_change(esdp, 1);
}
- aux_work = handle_aux_work(&esdp->aux_work_data, aux_work);
+ aux_work = handle_aux_work(&esdp->aux_work_data, aux_work, 1);
if (aux_work && erts_thr_progress_update(esdp))
erts_thr_progress_leader_update(esdp);
}
@@ -2445,7 +2520,7 @@ scheduler_wait(int *fcalls, ErtsSchedulerData *esdp, ErtsRunQueue *rq)
if (!thr_prgr_active)
erts_thr_progress_active(esdp, thr_prgr_active = 1);
#endif
- aux_work = handle_aux_work(&esdp->aux_work_data, aux_work);
+ aux_work = handle_aux_work(&esdp->aux_work_data, aux_work, 1);
#ifdef ERTS_SMP
if (aux_work && erts_thr_progress_update(esdp))
erts_thr_progress_leader_update(esdp);
@@ -3570,7 +3645,7 @@ retire_mpaths(ErtsMigrationPaths *mps)
if (!mpaths.retired.first)
mpaths.retired.last = NULL;
- mps->thr_prgr = erts_thr_progress_later_than(current);
+ mps->thr_prgr = erts_thr_progress_later(NULL);
mps->next = NULL;
if (mpaths.retired.last)
@@ -5167,7 +5242,9 @@ suspend_scheduler(ErtsSchedulerData *esdp)
sched_wall_time_change(esdp, 1);
}
if (aux_work)
- aux_work = handle_aux_work(&esdp->aux_work_data, aux_work);
+ aux_work = handle_aux_work(&esdp->aux_work_data,
+ aux_work,
+ 1);
if (aux_work && erts_thr_progress_update(esdp))
erts_thr_progress_leader_update(esdp);
if (qmask) {
@@ -6839,7 +6916,7 @@ Process *schedule(Process *p, int calls)
if (leader_update)
erts_thr_progress_leader_update(esdp);
if (aux_work)
- handle_aux_work(&esdp->aux_work_data, aux_work);
+ handle_aux_work(&esdp->aux_work_data, aux_work, 0);
erts_smp_runq_lock(rq);
}
}
@@ -6852,7 +6929,7 @@ Process *schedule(Process *p, int calls)
erts_aint32_t aux_work;
aux_work = erts_atomic32_read_acqb(&esdp->ssi->aux_work);
if (aux_work)
- handle_aux_work(&esdp->aux_work_data, aux_work);
+ handle_aux_work(&esdp->aux_work_data, aux_work, 0);
}
#endif /* ERTS_SMP */
diff --git a/erts/emulator/beam/erl_process.h b/erts/emulator/beam/erl_process.h
index e9e5a3365f..93e71681da 100644
--- a/erts/emulator/beam/erl_process.h
+++ b/erts/emulator/beam/erl_process.h
@@ -283,6 +283,7 @@ typedef enum {
#define ERTS_SSI_AUX_WORK_MSEG_CACHE_CHECK (((erts_aint32_t) 1) << 10)
#define ERTS_SSI_AUX_WORK_CODE_IX_ACTIVATION (((erts_aint32_t) 1) << 11)
#define ERTS_SSI_AUX_WORK_REAP_PORTS (((erts_aint32_t) 1) << 12)
+#define ERTS_SSI_AUX_WORK_FINISH_BP (((erts_aint32_t) 1) << 13)
typedef struct ErtsSchedulerSleepInfo_ ErtsSchedulerSleepInfo;
@@ -472,6 +473,12 @@ typedef struct {
ErtsThrPrgrVal thr_prgr;
} code_ix_activation;
#endif
+#ifdef ERTS_SMP
+ struct {
+ Process* stager;
+ ErtsThrPrgrVal thr_prgr;
+ } bp_ix_activation;
+#endif
} ErtsAuxWorkData;
struct ErtsSchedulerData_ {
@@ -1188,6 +1195,7 @@ void erts_notify_check_async_ready_queue(void *);
#endif
#ifdef ERTS_SMP
void erts_notify_code_ix_activation(Process* p, ErtsThrPrgrVal later);
+void erts_notify_finish_breakpointing(Process* p);
#endif
void erts_schedule_misc_aux_work(int sched_id,
void (*func)(void *),
diff --git a/erts/emulator/beam/erl_sched_spec_pre_alloc.c b/erts/emulator/beam/erl_sched_spec_pre_alloc.c
index bff9d246a3..37b186abd9 100644
--- a/erts/emulator/beam/erl_sched_spec_pre_alloc.c
+++ b/erts/emulator/beam/erl_sched_spec_pre_alloc.c
@@ -227,7 +227,7 @@ fetch_remote(erts_sspa_chunk_header_t *chdr, int max)
ERTS_THR_MEMORY_BARRIER;
else {
chdr->head.next.unref_end = (erts_sspa_blk_t *) ilast;
- chdr->head.next.thr_progress = erts_thr_progress_later();
+ chdr->head.next.thr_progress = erts_thr_progress_later(NULL);
erts_atomic32_set_relb(&chdr->tail.data.um_refc_ix,
um_refc_ix);
chdr->head.next.um_refc_ix = um_refc_ix == 0 ? 1 : 0;
diff --git a/erts/emulator/beam/erl_thr_progress.c b/erts/emulator/beam/erl_thr_progress.c
index 9ef83746c5..88524bdd4c 100644
--- a/erts/emulator/beam/erl_thr_progress.c
+++ b/erts/emulator/beam/erl_thr_progress.c
@@ -891,16 +891,16 @@ has_reached_wakeup(ErtsThrPrgrVal wakeup)
ErtsThrPrgrVal limit;
/*
* erts_thr_progress_later() returns values which are
- * equal to 'current + 2'. That is, users should never
- * get a hold of values larger than that.
+ * equal to 'current + 2', or 'current + 3'. That is, users
+ * should never get a hold of values larger than that.
*
- * That is, valid values are values less than 'current + 3'.
+ * That is, valid values are values less than 'current + 4'.
*
* Values larger than this won't work with the wakeup
* algorithm.
*/
- limit = current + 3;
+ limit = current + 4;
if (limit == ERTS_THR_PRGR_VAL_WAITING)
limit = 0;
else if (limit < current) /* Wrapped */
diff --git a/erts/emulator/beam/erl_thr_progress.h b/erts/emulator/beam/erl_thr_progress.h
index a71724b813..89486b065b 100644
--- a/erts/emulator/beam/erl_thr_progress.h
+++ b/erts/emulator/beam/erl_thr_progress.h
@@ -139,11 +139,12 @@ ERTS_GLB_INLINE ErtsThrPrgrVal erts_thr_prgr_read_mb__(ERTS_THR_PRGR_ATOMIC *atm
ERTS_GLB_INLINE int erts_thr_progress_is_managed_thread(void);
ERTS_GLB_INLINE ErtsThrPrgrVal erts_thr_progress_current_to_later__(ErtsThrPrgrVal val);
-ERTS_GLB_INLINE ErtsThrPrgrVal erts_thr_progress_later_than(ErtsThrPrgrVal val);
-ERTS_GLB_INLINE ErtsThrPrgrVal erts_thr_progress_later(void);
+ERTS_GLB_INLINE ErtsThrPrgrVal erts_thr_progress_later(ErtsSchedulerData *);
ERTS_GLB_INLINE ErtsThrPrgrVal erts_thr_progress_current(void);
ERTS_GLB_INLINE int erts_thr_progress_has_passed__(ErtsThrPrgrVal val1, ErtsThrPrgrVal val2);
ERTS_GLB_INLINE int erts_thr_progress_has_reached_this(ErtsThrPrgrVal this, ErtsThrPrgrVal val);
+ERTS_GLB_INLINE int erts_thr_progress_equal(ErtsThrPrgrVal val1,
+ ErtsThrPrgrVal val2);
ERTS_GLB_INLINE int erts_thr_progress_cmp(ErtsThrPrgrVal val1, ErtsThrPrgrVal val2);
ERTS_GLB_INLINE int erts_thr_progress_has_reached(ErtsThrPrgrVal val);
@@ -230,16 +231,23 @@ erts_thr_progress_current_to_later__(ErtsThrPrgrVal val)
}
ERTS_GLB_INLINE ErtsThrPrgrVal
-erts_thr_progress_later_than(ErtsThrPrgrVal val)
+erts_thr_progress_later(ErtsSchedulerData *esdp)
{
- ERTS_THR_MEMORY_BARRIER;
- return erts_thr_progress_current_to_later__(val);
-}
-
-ERTS_GLB_INLINE ErtsThrPrgrVal
-erts_thr_progress_later(void)
-{
- ErtsThrPrgrVal val = erts_thr_prgr_read_mb__(&erts_thr_prgr__.current);
+ ErtsThrPrgrData *tpd;
+ ErtsThrPrgrVal val;
+ if (esdp) {
+ tpd = &esdp->thr_progress_data;
+ managed_thread:
+ val = tpd->previous.local;
+ ERTS_THR_MEMORY_BARRIER;
+ }
+ else {
+ tpd = erts_tsd_get(erts_thr_prgr_data_key__);
+ if (tpd && tpd->is_managed)
+ goto managed_thread;
+ val = erts_thr_prgr_read_mb__(&erts_thr_prgr__.current);
+ }
+ ASSERT(val != ERTS_THR_PRGR_VAL_WAITING);
return erts_thr_progress_current_to_later__(val);
}
@@ -279,6 +287,12 @@ erts_thr_progress_has_reached_this(ErtsThrPrgrVal this, ErtsThrPrgrVal val)
}
ERTS_GLB_INLINE int
+erts_thr_progress_equal(ErtsThrPrgrVal val1, ErtsThrPrgrVal val2)
+{
+ return val1 == val2 && val1 != ERTS_THR_PRGR_INVALID;
+}
+
+ERTS_GLB_INLINE int
erts_thr_progress_cmp(ErtsThrPrgrVal val1, ErtsThrPrgrVal val2)
{
if (val1 == val2)
diff --git a/erts/emulator/beam/erl_thr_queue.c b/erts/emulator/beam/erl_thr_queue.c
index 70949ece76..f07964a265 100644
--- a/erts/emulator/beam/erl_thr_queue.c
+++ b/erts/emulator/beam/erl_thr_queue.c
@@ -422,7 +422,7 @@ clean(ErtsThrQ_t *q, int max_ops, int do_notify)
else {
q->head.next.unref_end = (ErtsThrQElement_t *) ilast;
#ifdef ERTS_SMP
- q->head.next.thr_progress = erts_thr_progress_later();
+ q->head.next.thr_progress = erts_thr_progress_later(NULL);
#endif
erts_atomic32_set_relb(&q->tail.data.um_refc_ix,
um_refc_ix);
diff --git a/erts/emulator/beam/erl_trace.c b/erts/emulator/beam/erl_trace.c
index bc988cd61b..d04a91f18c 100644
--- a/erts/emulator/beam/erl_trace.c
+++ b/erts/emulator/beam/erl_trace.c
@@ -2134,187 +2134,6 @@ void save_calls(Process *p, Export *e)
}
}
-/*
- * Entry point called by the trace wrap functions in erl_bif_wrap.c
- *
- * The trace wrap functions are themselves called through the export
- * entries instead of the original BIF functions.
- */
-Eterm
-erts_bif_trace(int bif_index, Process* p, Eterm* args, BeamInstr* I)
-{
- Eterm result;
- int meta = !!(erts_bif_trace_flags[bif_index] & BIF_TRACE_AS_META);
-
- ERTS_SMP_CHK_HAVE_ONLY_MAIN_PROC_LOCK(p);
-
- if (!ARE_TRACE_FLAGS_ON(p, F_TRACE_CALLS) && (! meta)) {
- /* Warning! This is an Optimization.
- *
- * If neither meta trace is active nor process trace flags then
- * no tracing will occur. Doing the whole else branch will
- * also do nothing, only slower.
- */
- Eterm (*func)(Process*, Eterm*, BeamInstr*) = bif_table[bif_index].f;
- result = func(p, args, I);
- } else {
- Eterm (*func)(Process*, Eterm*, BeamInstr*);
- Export* ep = bif_export[bif_index];
- Uint32 flags = 0, flags_meta = 0;
- int global = !!(erts_bif_trace_flags[bif_index] & BIF_TRACE_AS_GLOBAL);
- int local = !!(erts_bif_trace_flags[bif_index] & BIF_TRACE_AS_LOCAL);
- int time = !!(erts_bif_trace_flags[bif_index] & BIF_TRACE_AS_CALL_TIME);
- Eterm meta_tracer_pid = NIL;
- int applying = (I == &(ep->code[3])); /* Yup, the apply code for a bif
- * is actually in the
- * export entry */
- BeamInstr *cp = p->cp;
-
- /*
- * Make continuation pointer OK, it is not during direct BIF calls,
- * but it is correct during apply of bif.
- */
- if (!applying) {
- p->cp = I;
- }
- if (global || local) {
- flags = erts_call_trace(p, ep->code, ep->match_prog_set, args,
- local, &p->tracer_proc);
- }
- if (meta) {
- flags_meta = erts_bif_mtrace(p, ep->code+3, args, local,
- &meta_tracer_pid);
- }
- if (time) {
- BpDataTime *bdt = NULL;
- BeamInstr *pc = (BeamInstr *)ep->code+3;
-
- bdt = (BpDataTime *) erts_get_time_break(p, pc);
- ASSERT(bdt);
-
- if (!bdt->pause) {
- erts_trace_time_break(p, pc, bdt, ERTS_BP_CALL_TIME_CALL);
- }
- }
- /* Restore original continuation pointer (if changed). */
- p->cp = cp;
-
- func = bif_table[bif_index].f;
-
- result = func(p, args, I);
-
- if (applying && (flags & MATCH_SET_RETURN_TO_TRACE)) {
- BeamInstr i_return_trace = beam_return_trace[0];
- BeamInstr i_return_to_trace = beam_return_to_trace[0];
- BeamInstr i_return_time_trace = beam_return_time_trace[0];
- Eterm *cpp;
- /* Maybe advance cp to skip trace stack frames */
- for (cpp = p->stop; ; cp = cp_val(*cpp++)) {
- if (*cp == i_return_trace) {
- /* Skip stack frame variables */
- while (is_not_CP(*cpp)) cpp++;
- cpp += 2; /* Skip return_trace parameters */
- } else if (*cp == i_return_time_trace) {
- /* Skip stack frame variables */
- while (is_not_CP(*cpp)) cpp++;
- cpp += 1; /* Skip return_time_trace parameters */
- } else if (*cp == i_return_to_trace) {
- /* A return_to trace message is going to be generated
- * by normal means, so we do not have to.
- */
- cp = NULL;
- break;
- } else break;
- }
- }
-
- /* Try to get these in the order
- * they usually appear in normal code... */
- if (is_non_value(result)) {
- Uint reason = p->freason;
- if (reason != TRAP) {
- Eterm class;
- Eterm value = p->fvalue;
- DeclareTmpHeapNoproc(nocatch,3);
- UseTmpHeapNoproc(3);
- /* Expand error value like in handle_error() */
- if (reason & EXF_ARGLIST) {
- Eterm *tp;
- ASSERT(is_tuple(value));
- tp = tuple_val(value);
- value = tp[1];
- }
- if ((reason & EXF_THROWN) && (p->catches <= 0)) {
- value = TUPLE2(nocatch, am_nocatch, value);
- reason = EXC_ERROR;
- }
- /* Note: expand_error_value() could theoretically
- * allocate on the heap, but not for any error
- * returned by a BIF, and it would do no harm,
- * just be annoying.
- */
- value = expand_error_value(p, reason, value);
- class = exception_tag[GET_EXC_CLASS(reason)];
-
- if (flags_meta & MATCH_SET_EXCEPTION_TRACE) {
- erts_trace_exception(p, ep->code, class, value,
- &meta_tracer_pid);
- }
- if (flags & MATCH_SET_EXCEPTION_TRACE) {
- erts_trace_exception(p, ep->code, class, value,
- &p->tracer_proc);
- }
- if ((flags & MATCH_SET_RETURN_TO_TRACE) && p->catches > 0) {
- /* can only happen if(local)*/
- Eterm *ptr = p->stop;
- ASSERT(is_CP(*ptr));
- ASSERT(ptr <= STACK_START(p));
- /* Search the nearest stack frame for a catch */
- while (++ptr < STACK_START(p)) {
- if (is_CP(*ptr)) break;
- if (is_catch(*ptr)) {
- if (applying) {
- /* Apply of BIF, cp is in calling function */
- if (cp) erts_trace_return_to(p, cp);
- } else {
- /* Direct bif call, I points into
- * calling function */
- erts_trace_return_to(p, I);
- }
- }
- }
- }
- UnUseTmpHeapNoproc(3);
- if ((flags_meta|flags) & MATCH_SET_EXCEPTION_TRACE) {
- erts_smp_proc_lock(p, ERTS_PROC_LOCKS_ALL_MINOR);
- p->trace_flags |= F_EXCEPTION_TRACE;
- erts_smp_proc_unlock(p, ERTS_PROC_LOCKS_ALL_MINOR);
- }
- }
- } else {
- if (flags_meta & MATCH_SET_RX_TRACE) {
- erts_trace_return(p, ep->code, result, &meta_tracer_pid);
- }
- /* MATCH_SET_RETURN_TO_TRACE cannot occur if(meta) */
- if (flags & MATCH_SET_RX_TRACE) {
- erts_trace_return(p, ep->code, result, &p->tracer_proc);
- }
- if (flags & MATCH_SET_RETURN_TO_TRACE) {
- /* can only happen if(local)*/
- if (applying) {
- /* Apply of BIF, cp is in calling function */
- if (cp) erts_trace_return_to(p, cp);
- } else {
- /* Direct bif call, I points into calling function */
- erts_trace_return_to(p, I);
- }
- }
- }
- }
- ERTS_SMP_CHK_HAVE_ONLY_MAIN_PROC_LOCK(p);
- return result;
-}
-
/* Sends trace message:
* {trace_ts, Pid, What, Msg, Timestamp}
* or {trace, Pid, What, Msg}
diff --git a/erts/emulator/beam/export.c b/erts/emulator/beam/export.c
index 229641cb32..6b5121f917 100644
--- a/erts/emulator/beam/export.c
+++ b/erts/emulator/beam/export.c
@@ -137,7 +137,6 @@ export_alloc(struct export_entry* tmpl_e)
obj->code[2] = tmpl->code[2];
obj->code[3] = (BeamInstr) em_call_error_handler;
obj->code[4] = 0;
- obj->match_prog_set = NULL;
for (ix=0; ix<ERTS_NUM_CODE_IX; ix++) {
obj->addressv[ix] = obj->code+3;
@@ -260,8 +259,9 @@ erts_find_function(Eterm m, Eterm f, unsigned int a, ErtsCodeIndex code_ix)
struct export_entry* ee;
ee = hash_get(&export_tables[code_ix].htable, init_template(&templ, m, f, a));
- if (ee == NULL || (ee->ep->addressv[code_ix] == ee->ep->code+3 &&
- ee->ep->code[3] != (BeamInstr) em_call_traced_function)) {
+ if (ee == NULL ||
+ (ee->ep->addressv[code_ix] == ee->ep->code+3 &&
+ ee->ep->code[3] != (BeamInstr) BeamOp(op_i_generic_breakpoint))) {
return NULL;
}
return ee->ep;
diff --git a/erts/emulator/beam/export.h b/erts/emulator/beam/export.h
index ec9fcb26f2..ee06e69aff 100644
--- a/erts/emulator/beam/export.h
+++ b/erts/emulator/beam/export.h
@@ -37,7 +37,6 @@
typedef struct export
{
void* addressv[ERTS_NUM_CODE_IX]; /* Pointer to code for function. */
- struct binary* match_prog_set; /* Match program for tracing. */
BeamInstr fake_op_func_info_for_hipe[2]; /* MUST be just before code[] */
/*
@@ -46,12 +45,12 @@ typedef struct export
* code[2]: Arity (untagged integer).
* code[3]: This entry is 0 unless the 'address' field points to it.
* Threaded code instruction to load function
- * (em_call_error_handler), execute BIF (em_apply_bif,
- * em_apply_apply), or call a traced function
- * (em_call_traced_function).
- * code[4]: Function pointer to BIF function (for BIFs only)
+ * (em_call_error_handler), execute BIF (em_apply_bif),
+ * or a breakpoint instruction (op_i_generic_breakpoint).
+ * code[4]: Function pointer to BIF function (for BIFs only),
* or pointer to threaded code if the module has an
- * on_load function that has not been run yet.
+ * on_load function that has not been run yet, or pointer
+ * to code for function code[3] is a breakpont instruction.
* Otherwise: 0.
*/
BeamInstr code[5];
diff --git a/erts/emulator/beam/global.h b/erts/emulator/beam/global.h
index 9e387c550f..c9be20322d 100755
--- a/erts/emulator/beam/global.h
+++ b/erts/emulator/beam/global.h
@@ -1682,10 +1682,10 @@ struct trace_pattern_flags {
};
extern const struct trace_pattern_flags erts_trace_pattern_flags_off;
extern int erts_call_time_breakpoint_tracing;
-int erts_set_trace_pattern(Eterm* mfa, int specified,
+int erts_set_trace_pattern(Process*p, Eterm* mfa, int specified,
Binary* match_prog_set, Binary *meta_match_prog_set,
int on, struct trace_pattern_flags,
- Eterm meta_tracer_pid);
+ Eterm meta_tracer_pid, int is_blocking);
void
erts_get_default_trace_pattern(int *trace_pattern_is_on,
Binary **match_spec,
@@ -1694,6 +1694,7 @@ erts_get_default_trace_pattern(int *trace_pattern_is_on,
Eterm *meta_tracer_pid);
int erts_is_default_trace_enabled(void);
void erts_bif_trace_init(void);
+int erts_finish_breakpointing(void);
/*
** Call_trace uses this API for the parameter matching functions
@@ -1739,14 +1740,6 @@ extern void erts_match_prog_foreach_offheap(Binary *b,
breakpoint functions */
#define MATCH_SET_EXCEPTION_TRACE (0x4) /* exception trace requested */
#define MATCH_SET_RX_TRACE (MATCH_SET_RETURN_TRACE|MATCH_SET_EXCEPTION_TRACE)
-/*
- * Flag values when tracing bif
- * Future note: flag field is 8 bits
- */
-#define BIF_TRACE_AS_LOCAL (0x1)
-#define BIF_TRACE_AS_GLOBAL (0x2)
-#define BIF_TRACE_AS_META (0x4)
-#define BIF_TRACE_AS_CALL_TIME (0x8)
extern erts_driver_t vanilla_driver;
extern erts_driver_t spawn_driver;
diff --git a/erts/emulator/beam/ops.tab b/erts/emulator/beam/ops.tab
index c58b36231c..6764e88c81 100644
--- a/erts/emulator/beam/ops.tab
+++ b/erts/emulator/beam/ops.tab
@@ -62,11 +62,8 @@ label L
i_func_info I a a I
int_code_end
-i_trace_breakpoint
-i_mtrace_breakpoint
+i_generic_breakpoint
i_debug_breakpoint
-i_count_breakpoint
-i_time_breakpoint
i_return_time_trace
i_return_to_trace
i_yield
@@ -522,7 +519,6 @@ apply_bif
call_nif
call_error_handler
error_action_code
-call_traced_function
return_trace
#
diff --git a/erts/emulator/hipe/hipe_arm.c b/erts/emulator/hipe/hipe_arm.c
index e20a8a7969..651d0e3a75 100644
--- a/erts/emulator/hipe/hipe_arm.c
+++ b/erts/emulator/hipe/hipe_arm.c
@@ -181,11 +181,9 @@ void *hipe_alloc_code(Uint nrbytes, Eterm callees, Eterm *trampolines, Process *
curseg.base = base;
curseg.code_pos = base;
curseg.tramp_pos = (unsigned int*)((char*)base + SEGMENT_NRBYTES);
-#if defined(__arm__)
curseg.tramp_pos -= 2;
curseg.tramp_pos[0] = 0xE51FF004; /* ldr pc, [pc,#-4] */
curseg.tramp_pos[1] = (unsigned int)&nbif_callemu;
-#endif
address = try_alloc(nrwords, nrcallees, callees, trampvec);
if (!address) {
@@ -214,11 +212,9 @@ static unsigned int *alloc_stub(Uint nrwords, unsigned int **tramp_callemu)
curseg.base = base;
curseg.code_pos = base;
curseg.tramp_pos = (unsigned int*)((char*)base + SEGMENT_NRBYTES);
-#if defined(__arm__)
curseg.tramp_pos -= 2;
curseg.tramp_pos[0] = 0xE51FF004; /* ldr pc, [pc,#-4] */
curseg.tramp_pos[1] = (unsigned int)&nbif_callemu;
-#endif
address = try_alloc(nrwords, 0, NIL, NULL);
if (!address) {
@@ -269,10 +265,8 @@ int hipe_patch_insn(void *address, Uint32 value, Eterm type)
void *hipe_make_native_stub(void *beamAddress, unsigned int beamArity)
{
unsigned int *code;
-#if defined(__arm__)
unsigned int *tramp_callemu;
int callemu_offset;
-#endif
/*
* Native code calls BEAM via a stub looking as follows:
@@ -288,13 +282,6 @@ void *hipe_make_native_stub(void *beamAddress, unsigned int beamArity)
* (Trampolines are allowed to modify r12, but they don't.)
*/
-#if !defined(__arm__)
- /* verify that 'ba' can reach nbif_callemu */
- if ((unsigned long)&nbif_callemu & ~0x01FFFFFCUL)
- abort();
-#endif
-
-#if defined(__arm__)
code = alloc_stub(4, &tramp_callemu);
callemu_offset = ((int)&nbif_callemu - ((int)&code[2] + 8)) >> 2;
if (!(callemu_offset >= -0x00800000 && callemu_offset <= 0x007FFFFF)) {
@@ -302,11 +289,7 @@ void *hipe_make_native_stub(void *beamAddress, unsigned int beamArity)
if (!(callemu_offset >= -0x00800000 && callemu_offset <= 0x007FFFFF))
abort();
}
-#else
- code = alloc_stub(4, &trampoline);
-#endif
-#if defined(__arm__)
/* mov r0, #beamArity */
code[0] = 0xE3A00000 | (beamArity & 0xFF);
/* ldr r8, [pc,#0] // beamAddress */
@@ -315,16 +298,6 @@ void *hipe_make_native_stub(void *beamAddress, unsigned int beamArity)
code[2] = 0xEA000000 | (callemu_offset & 0x00FFFFFF);
/* .long beamAddress */
code[3] = (unsigned int)beamAddress;
-#else
- /* addi r12,0,beamAddress@l */
- code[0] = 0x39800000 | ((unsigned long)beamAddress & 0xFFFF);
- /* addi r0,0,beamArity */
- code[1] = 0x38000000 | (beamArity & 0x7FFF);
- /* addis r12,r12,beamAddress@ha */
- code[2] = 0x3D8C0000 | at_ha((unsigned long)beamAddress);
- /* ba nbif_callemu */
- code[3] = 0x48000002 | (unsigned long)&nbif_callemu;
-#endif
hipe_flush_icache_range(code, 4*sizeof(int));
@@ -334,60 +307,32 @@ void *hipe_make_native_stub(void *beamAddress, unsigned int beamArity)
static void patch_b(Uint32 *address, Sint32 offset, Uint32 AA)
{
Uint32 oldI = *address;
-#if defined(__arm__)
Uint32 newI = (oldI & 0xFF000000) | (offset & 0x00FFFFFF);
-#else
- Uint32 newI = (oldI & 0xFC000001) | ((offset & 0x00FFFFFF) << 2) | (AA & 2);
-#endif
*address = newI;
hipe_flush_icache_word(address);
}
int hipe_patch_call(void *callAddress, void *destAddress, void *trampoline)
{
-#if !defined(__arm__)
- if ((Uint32)destAddress == ((Uint32)destAddress & 0x01FFFFFC)) {
- /* The destination is in the [0,32MB[ range.
- We can reach it with a ba/bla instruction.
- This is the typical case for BIFs and primops.
- It's also common for trap-to-BEAM stubs (on ppc32). */
- patch_b((Uint32*)callAddress, (Uint32)destAddress >> 2, 2);
+ Sint32 destOffset = ((Sint32)destAddress - ((Sint32)callAddress+8)) >> 2;
+ if (destOffset >= -0x800000 && destOffset <= 0x7FFFFF) {
+ /* The destination is within a [-32MB,+32MB[ range from us.
+ We can reach it with a b/bl instruction.
+ This is typical for nearby Erlang code. */
+ patch_b((Uint32*)callAddress, destOffset, 0);
} else {
-#endif
-#if defined(__arm__)
- Sint32 destOffset = ((Sint32)destAddress - ((Sint32)callAddress+8)) >> 2;
-#else
- Sint32 destOffset = ((Sint32)destAddress - (Sint32)callAddress) >> 2;
-#endif
- if (destOffset >= -0x800000 && destOffset <= 0x7FFFFF) {
- /* The destination is within a [-32MB,+32MB[ range from us.
- We can reach it with a b/bl instruction.
- This is typical for nearby Erlang code. */
- patch_b((Uint32*)callAddress, destOffset, 0);
- } else {
- /* The destination is too distant for b/bl/ba/bla.
- Must do a b/bl to the trampoline. */
-#if defined(__arm__)
- Sint32 trampOffset = ((Sint32)trampoline - ((Sint32)callAddress+8)) >> 2;
-#else
- Sint32 trampOffset = ((Sint32)trampoline - (Sint32)callAddress) >> 2;
-#endif
- if (trampOffset >= -0x800000 && trampOffset <= 0x7FFFFF) {
- /* Update the trampoline's address computation.
- (May be redundant, but we can't tell.) */
-#if defined(__arm__)
- patch_imm32((Uint32*)trampoline+1, (Uint32)destAddress);
-#else
- patch_li((Uint32*)trampoline, (Uint32)destAddress);
-#endif
- /* Update this call site. */
- patch_b((Uint32*)callAddress, trampOffset, 0);
- } else
- return -1;
- }
-#if !defined(__arm__)
+ /* The destination is too distant for b/bl.
+ Must do a b/bl to the trampoline. */
+ Sint32 trampOffset = ((Sint32)trampoline - ((Sint32)callAddress+8)) >> 2;
+ if (trampOffset >= -0x800000 && trampOffset <= 0x7FFFFF) {
+ /* Update the trampoline's address computation.
+ (May be redundant, but we can't tell.) */
+ patch_imm32((Uint32*)trampoline+1, (Uint32)destAddress);
+ /* Update this call site. */
+ patch_b((Uint32*)callAddress, trampOffset, 0);
+ } else
+ return -1;
}
-#endif
return 0;
}
diff --git a/erts/emulator/hipe/hipe_arm_bifs.m4 b/erts/emulator/hipe/hipe_arm_bifs.m4
index e0c6f09796..17c013f1fb 100644
--- a/erts/emulator/hipe/hipe_arm_bifs.m4
+++ b/erts/emulator/hipe/hipe_arm_bifs.m4
@@ -27,7 +27,7 @@ include(`hipe/hipe_arm_asm.m4')
.p2align 2
`#if defined(ERTS_ENABLE_LOCK_CHECK) && defined(ERTS_SMP)
-# define CALL_BIF(F) mov r14, #F; str r14, [r0, #P_BIF_CALLEE]; bl hipe_debug_bif_wrapper
+# define CALL_BIF(F) ldr r14, =F; str r14, [r0, #P_BIF_CALLEE]; bl hipe_debug_bif_wrapper
#else
# define CALL_BIF(F) bl F
#endif'
@@ -67,6 +67,7 @@ $1:
RESTORE_CONTEXT_BIF
beq nbif_1_simple_exception
NBIF_RET(1)
+ .ltorg /* needed by LDR in debug version of `CALL_BIF' */
.size $1, .-$1
.type $1, %function
#endif')
@@ -95,6 +96,7 @@ $1:
RESTORE_CONTEXT_BIF
beq nbif_2_simple_exception
NBIF_RET(2)
+ .ltorg
.size $1, .-$1
.type $1, %function
#endif')
@@ -125,6 +127,7 @@ $1:
RESTORE_CONTEXT_BIF
beq nbif_3_simple_exception
NBIF_RET(3)
+ .ltorg
.size $1, .-$1
.type $1, %function
#endif')
@@ -149,6 +152,7 @@ $1:
RESTORE_CONTEXT_BIF
beq nbif_0_simple_exception
NBIF_RET(0)
+ .ltorg
.size $1, .-$1
.type $1, %function
#endif')
@@ -173,7 +177,8 @@ $1:
/* Save caller-save registers and call the C function. */
SAVE_CONTEXT_GC
- bl $2
+ /* ignore empty BIF__ARGS */
+ CALL_BIF($2)
TEST_GOT_MBUF(0)
/* Restore registers. */
@@ -195,7 +200,9 @@ $1:
/* Save caller-save registers and call the C function. */
SAVE_CONTEXT_GC
- bl $2
+ str r1, [r0, #P_ARG0] /* Store BIF__ARGS in def_arg_reg[] */
+ add r1, r0, #P_ARG0
+ CALL_BIF($2)
TEST_GOT_MBUF(1)
/* Restore registers. Check for exception. */
@@ -220,7 +227,10 @@ $1:
/* Save caller-save registers and call the C function. */
SAVE_CONTEXT_GC
- bl $2
+ str r1, [r0, #P_ARG0] /* Store BIF__ARGS in def_arg_reg[] */
+ str r2, [r0, #P_ARG1]
+ add r1, r0, #P_ARG0
+ CALL_BIF($2)
TEST_GOT_MBUF(2)
/* Restore registers. Check for exception. */
diff --git a/erts/emulator/sys/common/erl_check_io.c b/erts/emulator/sys/common/erl_check_io.c
index c1336c60d9..ce014c19c2 100644
--- a/erts/emulator/sys/common/erl_check_io.c
+++ b/erts/emulator/sys/common/erl_check_io.c
@@ -39,7 +39,6 @@
#include "dtrace-wrapper.h"
#ifdef ERTS_SYS_CONTINOUS_FD_NUMBERS
-# define ERTS_DRV_EV_STATE_EXTRA_SIZE 128
#else
# include "safe_hash.h"
# define DRV_EV_STATE_HTAB_SIZE 1024
@@ -334,7 +333,9 @@ static void
grow_drv_ev_state(int min_ix)
{
int i;
- int new_len = min_ix + 1 + ERTS_DRV_EV_STATE_EXTRA_SIZE;
+ int new_len;
+
+ new_len = ERTS_POLL_EXPORT(erts_poll_get_table_len)(min_ix + 1);
if (new_len > max_fds)
new_len = max_fds;
diff --git a/erts/emulator/sys/common/erl_poll.c b/erts/emulator/sys/common/erl_poll.c
index 26858052c4..5a8588351d 100644
--- a/erts/emulator/sys/common/erl_poll.c
+++ b/erts/emulator/sys/common/erl_poll.c
@@ -105,8 +105,8 @@
#define ERTS_POLL_COALESCE_KP_RES (ERTS_POLL_USE_KQUEUE || ERTS_POLL_USE_EPOLL)
-#define FDS_STATUS_EXTRA_FREE_SIZE 128
-#define POLL_FDS_EXTRA_FREE_SIZE 128
+#define ERTS_EV_TABLE_MIN_LENGTH 1024
+#define ERTS_EV_TABLE_EXP_THRESHOLD (2048*1024)
#ifdef ERTS_POLL_NEED_ASYNC_INTERRUPT_SUPPORT
# define ERTS_POLL_ASYNC_INTERRUPT_SUPPORT 1
@@ -563,6 +563,28 @@ free_update_requests_block(ErtsPollSet ps,
* --- Growing poll set structures -------------------------------------------
*/
+int
+ERTS_POLL_EXPORT(erts_poll_get_table_len) (int new_len)
+{
+ if (new_len < ERTS_EV_TABLE_MIN_LENGTH) {
+ new_len = ERTS_EV_TABLE_MIN_LENGTH;
+ } else if (new_len < ERTS_EV_TABLE_EXP_THRESHOLD) {
+ /* find next power of 2 */
+ --new_len;
+ new_len |= new_len >> 1;
+ new_len |= new_len >> 2;
+ new_len |= new_len >> 4;
+ new_len |= new_len >> 8;
+ new_len |= new_len >> 16;
+ ++new_len;
+ } else {
+ /* grow incrementally */
+ new_len += ERTS_EV_TABLE_EXP_THRESHOLD;
+ }
+ return new_len;
+}
+
+
#if ERTS_POLL_USE_KERNEL_POLL
static void
grow_res_events(ErtsPollSet ps, int new_len)
@@ -575,7 +597,7 @@ grow_res_events(ErtsPollSet ps, int new_len)
#elif ERTS_POLL_USE_KQUEUE
struct kevent
#endif
- )*new_len;
+ ) * ERTS_POLL_EXPORT(erts_poll_get_table_len)(new_len);
/* We do not need to save previously stored data */
if (ps->res_events)
erts_free(ERTS_ALC_T_POLL_RES_EVS, ps->res_events);
@@ -589,7 +611,7 @@ static void
grow_poll_fds(ErtsPollSet ps, int min_ix)
{
int i;
- int new_len = min_ix + 1 + POLL_FDS_EXTRA_FREE_SIZE;
+ int new_len = ERTS_POLL_EXPORT(erts_poll_get_table_len)(min_ix + 1);
if (new_len > max_fds)
new_len = max_fds;
ps->poll_fds = (ps->poll_fds_len
@@ -611,7 +633,7 @@ static void
grow_fds_status(ErtsPollSet ps, int min_fd)
{
int i;
- int new_len = min_fd + 1 + FDS_STATUS_EXTRA_FREE_SIZE;
+ int new_len = ERTS_POLL_EXPORT(erts_poll_get_table_len)(min_fd + 1);
ASSERT(min_fd < max_fds);
if (new_len > max_fds)
new_len = max_fds;
diff --git a/erts/emulator/sys/common/erl_poll.h b/erts/emulator/sys/common/erl_poll.h
index 8dde619105..502290e4bb 100644
--- a/erts/emulator/sys/common/erl_poll.h
+++ b/erts/emulator/sys/common/erl_poll.h
@@ -246,4 +246,6 @@ void ERTS_POLL_EXPORT(erts_poll_get_selected_events)(ErtsPollSet,
ErtsPollEvents [],
int);
+int ERTS_POLL_EXPORT(erts_poll_get_table_len)(int);
+
#endif /* #ifndef ERL_POLL_H__ */
diff --git a/erts/emulator/test/call_trace_SUITE.erl b/erts/emulator/test/call_trace_SUITE.erl
index a642c3a63a..eaecd32f95 100644
--- a/erts/emulator/test/call_trace_SUITE.erl
+++ b/erts/emulator/test/call_trace_SUITE.erl
@@ -78,7 +78,13 @@ init_per_testcase(Func, Config) when is_atom(Func), is_list(Config) ->
end_per_testcase(_Func, Config) ->
Dog = ?config(watchdog, Config),
- ?t:timetrap_cancel(Dog).
+ ?t:timetrap_cancel(Dog),
+
+ %% Reloading the module will clear all trace patterns, and
+ %% in a debug-compiled emulator run assertions of the counters
+ %% for the number of traced exported functions in this module.
+
+ c:l(?MODULE).
hipe(Config) when is_list(Config) ->
?line 0 = erlang:trace_pattern({?MODULE,worker_foo,1}, true),
@@ -187,7 +193,12 @@ basic() ->
%% Trace some functions...
?line trace_func({lists,'_','_'}, []),
+
+ %% Make sure that tracing the same functions more than once
+ %% does not cause any problems.
?line 3 = trace_func({?MODULE,foo,'_'}, true),
+ ?line 3 = trace_func({?MODULE,foo,'_'}, true),
+ ?line 1 = trace_func({?MODULE,bar,0}, true),
?line 1 = trace_func({?MODULE,bar,0}, true),
?line {traced,global} = trace_info({?MODULE,bar,0}, traced),
?line 1 = trace_func({erlang,list_to_integer,1}, true),
diff --git a/erts/emulator/test/trace_local_SUITE.erl b/erts/emulator/test/trace_local_SUITE.erl
index 32e2a98e3c..b238d5c630 100644
--- a/erts/emulator/test/trace_local_SUITE.erl
+++ b/erts/emulator/test/trace_local_SUITE.erl
@@ -80,6 +80,7 @@ config(priv_dir,_) ->
exception_meta_nocatch/1, exception_meta_nocatch_apply/1,
exception_meta_nocatch_function/1,
exception_meta_nocatch_apply_function/1,
+ concurrency/1,
init_per_testcase/2, end_per_testcase/2]).
init_per_testcase(_Case, Config) ->
?line Dog=test_server:timetrap(test_server:minutes(2)),
@@ -89,7 +90,15 @@ end_per_testcase(_Case, Config) ->
shutdown(),
Dog=?config(watchdog, Config),
test_server:timetrap_cancel(Dog),
- ok.
+
+ %% Reloading the module will clear all trace patterns, and
+ %% in a debug-compiled emulator run assertions of the counters
+ %% for the number of functions with breakpoints.
+
+ c:l(?MODULE).
+
+
+
suite() -> [{ct_hooks,[ts_install_cth]}].
all() ->
@@ -106,7 +115,8 @@ all() ->
exception_meta_apply_function, exception_meta_nocatch,
exception_meta_nocatch_apply,
exception_meta_nocatch_function,
- exception_meta_nocatch_apply_function]
+ exception_meta_nocatch_apply_function,
+ concurrency]
end.
groups() ->
@@ -350,7 +360,8 @@ same(A, B) ->
basic_test() ->
?line setup([call]),
- ?line erlang:trace_pattern({?MODULE,'_','_'},[],[local]),
+ NumMatches = erlang:trace_pattern({?MODULE,'_','_'},[],[local]),
+ NumMatches = erlang:trace_pattern({?MODULE,'_','_'},[],[local]),
?line erlang:trace_pattern({?MODULE,slave,'_'},false,[local]),
?line [1,1,1,1] = apply_slave(?MODULE,exported_wrap,[1]),
?line ?CT(?MODULE,exported_wrap,[1]),
@@ -813,6 +824,42 @@ clean_location({crash,{Reason,Stk0}}) ->
{crash,{Reason,Stk}};
clean_location(Term) -> Term.
+concurrency(_Config) ->
+ N = erlang:system_info(schedulers),
+
+ %% Spawn 2*N processes that spin in a tight infinite loop,
+ %% and one process that will turn on and off local call
+ %% trace on the infinite_loop/0 function. We expect the
+ %% emulator to crash if there is a memory barrier bug or
+ %% if an aligned word-sized write is not atomic.
+
+ Ps0 = [spawn_monitor(fun() -> infinite_loop() end) ||
+ _ <- lists:seq(1, 2*N)],
+ OnAndOff = fun() -> concurrency_on_and_off() end,
+ Ps1 = [spawn_monitor(OnAndOff)|Ps0],
+ ?t:sleep(1000),
+
+ %% Now spawn off N more processes that turn on off and off
+ %% a local trace pattern.
+ Ps = [spawn_monitor(OnAndOff) || _ <- lists:seq(1, N)] ++ Ps1,
+ ?t:sleep(1000),
+
+ %% Clean up.
+ [exit(Pid, kill) || {Pid,_} <- Ps],
+ [receive
+ {'DOWN',Ref,process,Pid,killed} -> ok
+ end || {Pid,Ref} <- Ps],
+ erlang:trace_pattern({?MODULE,infinite_loop,0}, false, [local]),
+ ok.
+
+concurrency_on_and_off() ->
+ 1 = erlang:trace_pattern({?MODULE,infinite_loop,0}, true, [local]),
+ 1 = erlang:trace_pattern({?MODULE,infinite_loop,0}, false, [local]),
+ concurrency_on_and_off().
+
+infinite_loop() ->
+ infinite_loop().
+
%%% Tracee target functions %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%
diff --git a/erts/emulator/utils/make_tables b/erts/emulator/utils/make_tables
index 91efb4c023..a841f26d6a 100755
--- a/erts/emulator/utils/make_tables
+++ b/erts/emulator/utils/make_tables
@@ -167,7 +167,6 @@ typedef struct bif_entry {
extern BifEntry bif_table[];
extern Export* bif_export[];
-extern unsigned char erts_bif_trace_flags[];
#define BIF_SIZE $bif_size
@@ -197,7 +196,6 @@ includes("export.h", "sys.h", "erl_vm.h", "erl_process.h", "bif.h",
"erl_bif_table.h", "erl_atom_table.h");
print "\nExport* bif_export[BIF_SIZE];\n";
-print "unsigned char erts_bif_trace_flags[BIF_SIZE];\n\n";
print "BifEntry bif_table[] = {\n";
for ($i = 0; $i < @bif; $i++) {
diff --git a/lib/compiler/src/sys_pre_expand.erl b/lib/compiler/src/sys_pre_expand.erl
index ba9cde1de0..68bc83433e 100644
--- a/lib/compiler/src/sys_pre_expand.erl
+++ b/lib/compiler/src/sys_pre_expand.erl
@@ -42,7 +42,7 @@
compile=[], %Compile flags
attributes=[], %Attributes
callbacks=[], %Callbacks
- defined=[], %Defined functions
+ defined, %Defined functions (gb_set)
vcount=0, %Variable counter
func=[], %Current function
arity=[], %Arity for current function
@@ -83,7 +83,7 @@ module(Fs0, Opts0) ->
{Efs,St2} = expand_pmod(Tfs, St1),
%% Get the correct list of exported functions.
Exports = case member(export_all, St2#expand.compile) of
- true -> St2#expand.defined;
+ true -> gb_sets:to_list(St2#expand.defined);
false -> St2#expand.exports
end,
%% Generate all functions from stored info.
@@ -106,10 +106,11 @@ expand_pmod(Fs0, St0) ->
true ->
Ps0
end,
+ Def = gb_sets:to_list(St0#expand.defined),
{Fs1,Xs,Ds} = sys_expand_pmod:forms(Fs0, Ps,
St0#expand.exports,
- St0#expand.defined),
- St1 = St0#expand{exports=Xs, defined=Ds},
+ Def),
+ St1 = St0#expand{exports=Xs,defined=gb_sets:from_list(Ds)},
{Fs2,St2} = add_instance(Ps, Fs1, St1),
{Fs3,St3} = ensure_new(Base, Ps0, Fs2, St2),
{Fs3,St3#expand{attributes = [{abstract, 0, [true]}
@@ -159,7 +160,7 @@ add_func(Name, Args, Body, Fs, St) ->
F = {function,0,Name,A,[{clause,0,Args,[],Body}]},
NA = {Name,A},
{[F|Fs],St#expand{exports=add_element(NA, St#expand.exports),
- defined=add_element(NA, St#expand.defined)}}.
+ defined=gb_sets:add_element(NA, St#expand.defined)}}.
%% define_function(Form, State) -> State.
%% Add function to defined if form is a function.
@@ -168,7 +169,7 @@ define_functions(Forms, #expand{defined=Predef}=St) ->
Fs = foldl(fun({function,_,N,A,_Cs}, Acc) -> [{N,A}|Acc];
(_, Acc) -> Acc
end, Predef, Forms),
- St#expand{defined=ordsets:from_list(Fs)}.
+ St#expand{defined=gb_sets:from_list(Fs)}.
module_attrs(#expand{attributes=Attributes}=St) ->
Attrs = [{attribute,Line,Name,Val} || {Name,Line,Val} <- Attributes],
@@ -187,7 +188,7 @@ module_predef_func_beh_info(#expand{callbacks=Callbacks,defined=Defined,
PreDef=[{behaviour_info,1}],
PreExp=PreDef,
{[gen_beh_info(Callbacks)],
- St#expand{defined=union(from_list(PreDef), Defined),
+ St#expand{defined=gb_sets:union(gb_sets:from_list(PreDef), Defined),
exports=union(from_list(PreExp), Exports)}}.
gen_beh_info(Callbacks) ->
@@ -215,7 +216,8 @@ module_predef_funcs_mod_info(St) ->
[{clause,0,[{var,0,'X'}],[],
[{call,0,{remote,0,{atom,0,erlang},{atom,0,get_module_info}},
[{atom,0,St#expand.module},{var,0,'X'}]}]}]}],
- St#expand{defined=union(from_list(PreDef), St#expand.defined),
+ St#expand{defined=gb_sets:union(gb_sets:from_list(PreDef),
+ St#expand.defined),
exports=union(from_list(PreExp), St#expand.exports)}}.
%% forms(Forms, State) ->
@@ -721,4 +723,4 @@ imported(F, A, St) ->
end.
defined(F, A, St) ->
- ordsets:is_element({F,A}, St#expand.defined).
+ gb_sets:is_element({F,A}, St#expand.defined).
diff --git a/lib/et/doc/src/et_intro.xml b/lib/et/doc/src/et_intro.xml
index 0c5fb14d55..60da289721 100644
--- a/lib/et/doc/src/et_intro.xml
+++ b/lib/et/doc/src/et_intro.xml
@@ -40,8 +40,8 @@
ports or files.</p>
<section>
- <title>Scope and Purpose</title>'
-
+ <title>Scope and Purpose</title>
+
<p>This manual describes the <c>Event Tracer (ET)</c> application,
as a component of the Erlang/Open Telecom Platform development
environment. It is assumed that the reader is familiar with the
diff --git a/lib/kernel/src/disk_log.erl b/lib/kernel/src/disk_log.erl
index f5f972c112..5b1efcd395 100644
--- a/lib/kernel/src/disk_log.erl
+++ b/lib/kernel/src/disk_log.erl
@@ -282,7 +282,8 @@ change_notify(Log, Pid, NewNotify) ->
-spec change_header(Log, Header) -> 'ok' | {'error', Reason} when
Log :: log(),
- Header :: {head, dlog_head_opt()} | {head_func, mfa()},
+ Header :: {head, dlog_head_opt()}
+ | {head_func, MFA :: {atom(), atom(), list()}},
Reason :: no_such_log | nonode | {read_only_mode, Log}
| {blocked_log, Log} | {badarg, head}.
change_header(Log, NewHead) ->
@@ -336,7 +337,9 @@ format_error(Error) ->
ok | {blocked, QueueLogRecords :: boolean()}}
| {node, Node :: node()}
| {distributed, Dist :: local | [node()]}
- | {head, Head :: none | {head, term()} | mfa()}
+ | {head, Head :: none
+ | {head, term()}
+ | (MFA :: {atom(), atom(), list()})}
| {no_written_items, NoWrittenItems ::non_neg_integer()}
| {full, Full :: boolean}
| {no_current_bytes, non_neg_integer()}
diff --git a/lib/kernel/src/disk_log.hrl b/lib/kernel/src/disk_log.hrl
index 259967650f..242a25a7a6 100644
--- a/lib/kernel/src/disk_log.hrl
+++ b/lib/kernel/src/disk_log.hrl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 1997-2011. All Rights Reserved.
+%% Copyright Ericsson AB 1997-2012. All Rights Reserved.
%%
%% The contents of this file are subject to the Erlang Public License,
%% Version 1.1, (the "License"); you may not use this file except in
@@ -74,7 +74,7 @@
| {distributed, Nodes :: [node()]}
| {notify, boolean()}
| {head, Head :: dlog_head_opt()}
- | {head_func, mfa()}
+ | {head_func, MFA :: {atom(), atom(), list()}}
| {mode, Mode :: dlog_mode()}.
-type dlog_options() :: [dlog_option()].
-type dlog_repair() :: 'truncate' | boolean().
diff --git a/lib/kernel/test/disk_log_SUITE.erl b/lib/kernel/test/disk_log_SUITE.erl
index ad987fe7a7..0c3f5c3514 100644
--- a/lib/kernel/test/disk_log_SUITE.erl
+++ b/lib/kernel/test/disk_log_SUITE.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 1997-2011. All Rights Reserved.
+%% Copyright Ericsson AB 1997-2012. All Rights Reserved.
%%
%% The contents of this file are subject to the Erlang Public License,
%% Version 1.1, (the "License"); you may not use this file except in
@@ -90,7 +90,7 @@
evil/1,
- otp_6278/1]).
+ otp_6278/1, otp_10131/1]).
-export([head_fun/1, hf/0, lserv/1,
measure/0, init_m/1, xx/0, head_exit/0, slow_header/1]).
@@ -124,7 +124,7 @@
[halt_int, wrap_int, halt_ext, wrap_ext, read_mode, head,
notif, new_idx_vsn, reopen, block, unblock, open, close,
error, chunk, truncate, many_users, info, change_size,
- change_attribute, distribution, evil, otp_6278]).
+ change_attribute, distribution, evil, otp_6278, otp_10131]).
%% The following two lists should be mutually exclusive. To skip a case
%% on VxWorks altogether, use the kernel.spec.vxworks file instead.
@@ -153,7 +153,7 @@ all() ->
{group, open}, {group, close}, {group, error}, chunk,
truncate, many_users, {group, info},
{group, change_size}, change_attribute,
- {group, distribution}, evil, otp_6278].
+ {group, distribution}, evil, otp_6278, otp_10131].
groups() ->
[{halt_int, [], [halt_int_inf, {group, halt_int_sz}]},
@@ -4915,6 +4915,22 @@ otp_6278(Conf) when is_list(Conf) ->
end,
?line error_logger:delete_report_handler(?MODULE).
+otp_10131(suite) -> [];
+otp_10131(doc) -> ["OTP-10131. head_func type."];
+otp_10131(Conf) when is_list(Conf) ->
+ Dir = ?privdir(Conf),
+ Log = otp_10131,
+ File = filename:join(Dir, lists:concat([Log, ".LOG"])),
+ HeadFunc = {?MODULE, head_fun, [{ok,"head"}]},
+ {ok, Log} = disk_log:open([{name,Log},{file,File},
+ {head_func, HeadFunc}]),
+ HeadFunc = info(Log, head, undef),
+ HeadFunc2 = {?MODULE, head_fun, [{ok,"head2"}]},
+ ok = disk_log:change_header(Log, {head_func, HeadFunc2}),
+ HeadFunc2 = info(Log, head, undef),
+ ok = disk_log:close(Log),
+ ok.
+
mark(FileName, What) ->
{ok,Fd} = file:open(FileName, [raw, binary, read, write]),
{ok,_} = file:position(Fd, 4),
diff --git a/lib/ssl/src/ssl_connection.erl b/lib/ssl/src/ssl_connection.erl
index 0f436a6caf..c57930e821 100644
--- a/lib/ssl/src/ssl_connection.erl
+++ b/lib/ssl/src/ssl_connection.erl
@@ -85,12 +85,11 @@
premaster_secret, %
file_ref_db, % ets()
cert_db_ref, % ref()
- from, % term(), where to reply
bytes_to_read, % integer(), # bytes to read in passive mode
user_data_buffer, % binary()
log_alert, % boolean()
renegotiation, % {boolean(), From | internal | peer}
- recv_from, %
+ start_or_recv_from, % "gen_fsm From"
send_queue, % queue()
terminated = false, %
allow_renegotiate = true
@@ -758,8 +757,8 @@ handle_sync_event({application_data, Data}, From, StateName,
State#state{send_queue = queue:in({From, Data}, Queue)},
get_timeout(State)};
-handle_sync_event(start, From, hello, State) ->
- hello(start, State#state{from = From});
+handle_sync_event(start, StartFrom, hello, State) ->
+ hello(start, State#state{start_or_recv_from = StartFrom});
%% The two clauses below could happen if a server upgrades a socket in
%% active mode. Note that in this case we are lucky that
@@ -773,8 +772,8 @@ handle_sync_event(start, _, connection, State) ->
{reply, connected, connection, State, get_timeout(State)};
handle_sync_event(start, _From, error, {Error, State = #state{}}) ->
{stop, {shutdown, Error}, {error, Error}, State};
-handle_sync_event(start, From, StateName, State) ->
- {next_state, StateName, State#state{from = From}, get_timeout(State)};
+handle_sync_event(start, StartFrom, StateName, State) ->
+ {next_state, StateName, State#state{start_or_recv_from = StartFrom}, get_timeout(State)};
handle_sync_event(close, _, StateName, State) ->
%% Run terminate before returning
@@ -805,13 +804,13 @@ handle_sync_event({shutdown, How0}, _, StateName,
{stop, normal, Error, State}
end;
-handle_sync_event({recv, N}, From, connection = StateName, State0) ->
- passive_receive(State0#state{bytes_to_read = N, recv_from = From}, StateName);
+handle_sync_event({recv, N}, RecvFrom, connection = StateName, State0) ->
+ passive_receive(State0#state{bytes_to_read = N, start_or_recv_from = RecvFrom}, StateName);
%% Doing renegotiate wait with handling request until renegotiate is
%% finished. Will be handled by next_state_is_connection/2.
-handle_sync_event({recv, N}, From, StateName, State) ->
- {next_state, StateName, State#state{bytes_to_read = N, recv_from = From},
+handle_sync_event({recv, N}, RecvFrom, StateName, State) ->
+ {next_state, StateName, State#state{bytes_to_read = N, start_or_recv_from = RecvFrom},
get_timeout(State)};
handle_sync_event({new_user, User}, _From, StateName,
@@ -962,9 +961,9 @@ handle_info({CloseTag, Socket}, StateName,
{stop, normal, State};
handle_info({ErrorTag, Socket, econnaborted}, StateName,
- #state{socket = Socket, from = User, role = Role,
+ #state{socket = Socket, start_or_recv_from = StartFrom, role = Role,
error_tag = ErrorTag} = State) when StateName =/= connection ->
- alert_user(User, ?ALERT_REC(?FATAL, ?HANDSHAKE_FAILURE), Role),
+ alert_user(StartFrom, ?ALERT_REC(?FATAL, ?HANDSHAKE_FAILURE), Role),
{stop, normal, State};
handle_info({ErrorTag, Socket, Reason}, StateName, #state{socket = Socket,
@@ -1704,7 +1703,7 @@ passive_receive(State0 = #state{user_data_buffer = Buffer}, StateName) ->
read_application_data(Data, #state{user_application = {_Mon, Pid},
socket_options = SOpts,
bytes_to_read = BytesToRead,
- recv_from = From,
+ start_or_recv_from = RecvFrom,
user_data_buffer = Buffer0} = State0) ->
Buffer1 = if
Buffer0 =:= <<>> -> Data;
@@ -1713,9 +1712,9 @@ read_application_data(Data, #state{user_application = {_Mon, Pid},
end,
case get_data(SOpts, BytesToRead, Buffer1) of
{ok, ClientData, Buffer} -> % Send data
- SocketOpt = deliver_app_data(SOpts, ClientData, Pid, From),
+ SocketOpt = deliver_app_data(SOpts, ClientData, Pid, RecvFrom),
State = State0#state{user_data_buffer = Buffer,
- recv_from = undefined,
+ start_or_recv_from = undefined,
bytes_to_read = 0,
socket_options = SocketOpt
},
@@ -1730,7 +1729,7 @@ read_application_data(Data, #state{user_application = {_Mon, Pid},
{more, Buffer} -> % no reply, we need more data
next_record(State0#state{user_data_buffer = Buffer});
{error,_Reason} -> %% Invalid packet in packet mode
- deliver_packet_error(SOpts, Buffer1, Pid, From),
+ deliver_packet_error(SOpts, Buffer1, Pid, RecvFrom),
{stop, normal, State0}
end.
@@ -2016,9 +2015,9 @@ next_state_connection(StateName, #state{send_queue = Queue0,
%% premaster_secret and public_key_info (only needed during handshake)
%% to reduce memory foot print of a connection.
next_state_is_connection(_, State =
- #state{recv_from = From,
+ #state{start_or_recv_from = RecvFrom,
socket_options =
- #socket_options{active = false}}) when From =/= undefined ->
+ #socket_options{active = false}}) when RecvFrom =/= undefined ->
passive_receive(State#state{premaster_secret = undefined,
public_key_info = undefined,
tls_handshake_hashes = {<<>>, <<>>}}, connection);
@@ -2081,7 +2080,7 @@ initial_state(Role, Host, Port, Socket, {SSLOptions, SocketOptions}, User,
log_alert = true,
session_cache_cb = SessionCacheCb,
renegotiation = {false, first},
- recv_from = undefined,
+ start_or_recv_from = undefined,
send_queue = queue:new()
}.
@@ -2185,7 +2184,7 @@ handle_alerts([Alert | Alerts], {next_state, StateName, State, _Timeout}) ->
handle_alerts(Alerts, handle_alert(Alert, StateName, State)).
handle_alert(#alert{level = ?FATAL} = Alert, StateName,
- #state{from = From, host = Host, port = Port, session = Session,
+ #state{start_or_recv_from = From, host = Host, port = Port, session = Session,
user_application = {_Mon, Pid},
log_alert = Log, role = Role, socket_options = Opts} = State) ->
invalidate_session(Role, Host, Port, Session),
@@ -2267,13 +2266,13 @@ handle_own_alert(Alert, Version, StateName,
ok
end.
-handle_normal_shutdown(Alert, _, #state{from = User, role = Role, renegotiation = {false, first}}) ->
- alert_user(User, Alert, Role);
+handle_normal_shutdown(Alert, _, #state{start_or_recv_from = StartFrom, role = Role, renegotiation = {false, first}}) ->
+ alert_user(StartFrom, Alert, Role);
handle_normal_shutdown(Alert, StateName, #state{socket_options = Opts,
user_application = {_Mon, Pid},
- from = User, role = Role}) ->
- alert_user(StateName, Opts, Pid, User, Alert, Role).
+ start_or_recv_from = RecvFrom, role = Role}) ->
+ alert_user(StateName, Opts, Pid, RecvFrom, Alert, Role).
handle_unexpected_message(Msg, Info, #state{negotiated_version = Version} = State) ->
Alert = ?ALERT_REC(?FATAL,?UNEXPECTED_MESSAGE),
@@ -2299,9 +2298,9 @@ ack_connection(#state{renegotiation = {true, From}} = State) ->
gen_fsm:reply(From, ok),
State#state{renegotiation = undefined};
ack_connection(#state{renegotiation = {false, first},
- from = From} = State) when From =/= undefined ->
- gen_fsm:reply(From, connected),
- State#state{renegotiation = undefined};
+ start_or_recv_from = StartFrom} = State) when StartFrom =/= undefined ->
+ gen_fsm:reply(StartFrom, connected),
+ State#state{renegotiation = undefined, start_or_recv_from = undefined};
ack_connection(State) ->
State.
diff --git a/lib/ssl/src/ssl_manager.erl b/lib/ssl/src/ssl_manager.erl
index a18cb70e2d..3e947af2c9 100644
--- a/lib/ssl/src/ssl_manager.erl
+++ b/lib/ssl/src/ssl_manager.erl
@@ -107,10 +107,10 @@ connection_init(Trustedcerts, Role) ->
cache_pem_file(File, DbHandle) ->
MD5 = crypto:md5(File),
case ssl_certificate_db:lookup_cached_pem(DbHandle, MD5) of
- [Content] ->
- {ok, Content};
[{Content,_}] ->
{ok, Content};
+ [Content] ->
+ {ok, Content};
undefined ->
call({cache_pem, {MD5, File}})
end.
diff --git a/lib/ssl/test/ssl_basic_SUITE.erl b/lib/ssl/test/ssl_basic_SUITE.erl
index 2eaab02665..5a52917d6c 100644
--- a/lib/ssl/test/ssl_basic_SUITE.erl
+++ b/lib/ssl/test/ssl_basic_SUITE.erl
@@ -262,7 +262,8 @@ all() ->
no_reuses_session_server_restart_new_cert_file, reuseaddr,
hibernate, connect_twice, renegotiate_dos_mitigate_active,
renegotiate_dos_mitigate_passive,
- tcp_error_propagation_in_active_mode, rizzo, no_rizzo_rc4
+ tcp_error_propagation_in_active_mode, rizzo, no_rizzo_rc4,
+ recv_error_handling
].
groups() ->
@@ -3875,16 +3876,16 @@ tcp_error_propagation_in_active_mode(Config) when is_list(Config) ->
{ClientNode, ServerNode, Hostname} = ssl_test_lib:run_where(Config),
Server = ssl_test_lib:start_server([{node, ServerNode}, {port, 0},
- {from, self()},
- {mfa, {ssl_test_lib, no_result, []}},
- {options, ServerOpts}]),
+ {from, self()},
+ {mfa, {ssl_test_lib, no_result, []}},
+ {options, ServerOpts}]),
Port = ssl_test_lib:inet_port(Server),
{Client, #sslsocket{pid=Pid} = SslSocket} = ssl_test_lib:start_client([return_socket,
- {node, ClientNode}, {port, Port},
- {host, Hostname},
- {from, self()},
- {mfa, {?MODULE, receive_msg, []}},
- {options, ClientOpts}]),
+ {node, ClientNode}, {port, Port},
+ {host, Hostname},
+ {from, self()},
+ {mfa, {?MODULE, receive_msg, []}},
+ {options, ClientOpts}]),
{status, _, _, StatusInfo} = sys:get_status(Pid),
[_, _,_, _, Prop] = StatusInfo,
@@ -3895,6 +3896,32 @@ tcp_error_propagation_in_active_mode(Config) when is_list(Config) ->
Pid ! {tcp_error, Socket, etimedout},
ssl_test_lib:check_result(Client, {ssl_closed, SslSocket}).
+
+
+%%--------------------------------------------------------------------
+
+recv_error_handling(doc) ->
+ ["Special case of call error handling"];
+recv_error_handling(Config) when is_list(Config) ->
+ ClientOpts = ?config(client_opts, Config),
+ ServerOpts = ?config(server_opts, Config),
+
+ {ClientNode, ServerNode, Hostname} = ssl_test_lib:run_where(Config),
+ Server = ssl_test_lib:start_server([{node, ServerNode}, {port, 0},
+ {from, self()},
+ {mfa, {?MODULE, recv_close, []}},
+ {options, [{active, false} | ServerOpts]}]),
+ Port = ssl_test_lib:inet_port(Server),
+ {Client, #sslsocket{pid=Pid} = SslSocket} = ssl_test_lib:start_client([return_socket,
+ {node, ClientNode}, {port, Port},
+ {host, Hostname},
+ {from, self()},
+ {mfa, {ssl_test_lib, no_result, []}},
+ {options, ClientOpts}]),
+ ssl:close(SslSocket),
+ ssl_test_lib:check_result(Server, ok).
+
+
%%--------------------------------------------------------------------
rizzo(doc) -> ["Test that there is a 1/n-1-split for non RC4 in 'TLS < 1.1' as it is
@@ -3906,7 +3933,7 @@ rizzo(Config) when is_list(Config) ->
{?MODULE, send_recv_result_active_rizzo, []}),
run_send_recv_rizzo(Ciphers, Config, tlsv1,
{?MODULE, send_recv_result_active_rizzo, []}).
-
+%%--------------------------------------------------------------------
no_rizzo_rc4(doc) ->
["Test that there is no 1/n-1-split for RC4 as it is not vunrable to Rizzo/Dungon attack"];
@@ -3917,6 +3944,7 @@ no_rizzo_rc4(Config) when is_list(Config) ->
run_send_recv_rizzo(Ciphers, Config, tlsv1,
{?MODULE, send_recv_result_active_no_rizzo, []}).
+%%--------------------------------------------------------------------
run_send_recv_rizzo(Ciphers, Config, Version, Mfa) ->
Result = lists:map(fun(Cipher) ->
rizzo_test(Cipher, Config, Version, Mfa) end,
@@ -3970,6 +3998,15 @@ send_recv_result(Socket) ->
{ok,"Hello world"} = ssl:recv(Socket, 11),
ok.
+recv_close(Socket) ->
+ {error, closed} = ssl:recv(Socket, 11),
+ receive
+ {_,{error,closed}} ->
+ error_extra_close_sent_to_user_process
+ after 500 ->
+ ok
+ end.
+
send_recv_result_active(Socket) ->
ssl:send(Socket, "Hello world"),
receive
diff --git a/lib/tools/doc/src/eprof.xml b/lib/tools/doc/src/eprof.xml
index 8b614d8860..1c5e38109b 100644
--- a/lib/tools/doc/src/eprof.xml
+++ b/lib/tools/doc/src/eprof.xml
@@ -67,9 +67,9 @@
<p><c>Rootset</c> is a list of pids and registered names.</p>
<p>The function returns <c>profiling</c> if tracing could be enabled
for all processes in <c>Rootset</c>, or <c>error</c> otherwise.</p>
- <p>A pattern can be selected to narrow the profiling. For instance ca a specific
- module be selected and only the code processes executes in that module will be
- profiled.</p>
+ <p>A pattern can be selected to narrow the profiling. For instance a
+ specific module can be selected, and only the code executed in that
+ module will be profiled.</p>
</desc>
</func>
<func>
@@ -147,8 +147,8 @@
</type>
<desc>
<p>This function ensures that the results displayed by
- <c>analyze/0,1,2</c> are printed both to
- the file <c>File</c> and the screen.</p>
+ <c>analyze/0,1,2</c> are printed both to the file
+ <c>File</c> and the screen.</p>
</desc>
</func>
<func>
diff --git a/lib/tools/emacs/erlang.el b/lib/tools/emacs/erlang.el
index bc7a190fb4..2f6c7f554e 100644
--- a/lib/tools/emacs/erlang.el
+++ b/lib/tools/emacs/erlang.el
@@ -2986,18 +2986,52 @@ This assumes that the preceding expression is either simple
(forward-sexp (- arg))
(let ((col (current-column)))
(skip-chars-backward " \t")
- ;; Needed to match the colon in "'foo':'bar'".
- (if (not (memq (preceding-char) '(?# ?:)))
- col
- ;; Special hack to handle: (note line break)
- ;; [#myrecord{
- ;; foo = foo}]
- (or
- (ignore-errors
- (backward-char 1)
- (forward-sexp -1)
- (current-column))
- col)))))
+ ;; Special hack to handle: (note line break)
+ ;; [#myrecord{
+ ;; foo = foo}]
+ ;; where the call (forward-sexp -1) will fail when point is at the `#'.
+ (or
+ (ignore-errors
+ ;; Needed to match the colon in "'foo':'bar'".
+ (cond ((eq (preceding-char) ?:)
+ (backward-char 1)
+ (forward-sexp -1)
+ (current-column))
+ ((eq (preceding-char) ?#)
+ ;; We may now be at:
+ ;; - either a construction of a new record
+ ;; - or update of a record, in which case we want
+ ;; the column of the expression to be updated.
+ ;;
+ ;; To see which of the two cases we are at, we first
+ ;; move an expression backwards, check for keywords,
+ ;; then immediately an expression forwards. Moving
+ ;; backwards skips past tokens like `,' or `->', but
+ ;; when moving forwards again, we won't skip past such
+ ;; tokens. We use this: if, after having moved
+ ;; forwards, we're back where we started, then it was
+ ;; a record update.
+ ;; The check for keywords is to detect cases like:
+ ;; case Something of #record_construction{...}
+ (backward-char 1)
+ (let ((record-start (point))
+ (record-start-col (current-column)))
+ (forward-sexp -1)
+ (let ((preceding-expr-col (current-column))
+ ;; white space definition according to erl_scan
+ (white-space "\000-\040\200-\240"))
+ (if (erlang-at-keyword)
+ ;; The (forward-sexp -1) call moved past a keyword
+ (1+ record-start-col)
+ (forward-sexp 1)
+ (skip-chars-forward white-space record-start)
+ ;; Are we back where we started? If so, it was an update.
+ (if (= (point) record-start)
+ preceding-expr-col
+ (goto-char record-start)
+ (1+ (current-column)))))))
+ (t col)))
+ col))))
(defun erlang-indent-parenthesis (stack-position)
(let ((previous (erlang-indent-find-preceding-expr)))
diff --git a/lib/tools/emacs/test.erl.indented b/lib/tools/emacs/test.erl.indented
index 2948ccf1b5..e0593c6522 100644
--- a/lib/tools/emacs/test.erl.indented
+++ b/lib/tools/emacs/test.erl.indented
@@ -657,3 +657,41 @@ indent_comprehensions() ->
foo() ->
[#foo{
foo = foo}].
+
+%% Record indentation
+some_function_with_a_very_long_name() ->
+ #'a-long-record-name-like-it-sometimes-is-with-asn.1-records'{
+ field1=a,
+ field2=b},
+ case dummy_function_with_a_very_very_long_name(x) of
+ #'a-long-record-name-like-it-sometimes-is-with-asn.1-records'{
+ field1=a,
+ field2=b} ->
+ ok;
+ Var = #'a-long-record-name-like-it-sometimes-is-with-asn.1-records'{
+ field1=a,
+ field2=b} ->
+ Var#'a-long-record-name-like-it-sometimes-is-with-asn.1-records'{
+ field1=a,
+ field2=b};
+ #xyz{
+ a=1,
+ b=2} ->
+ ok
+ end.
+
+another_function_with_a_very_very_long_name() ->
+ #rec{
+ field1=1,
+ field2=1}.
+
+some_function_name_xyz(xyzzy, #some_record{
+ field1=Field1,
+ field2=Field2}) ->
+ SomeVariable = f(#'Some-long-record-name'{
+ field_a = 1,
+ 'inter-xyz-parameters' =
+ #'Some-other-very-long-record-name'{
+ field2 = Field1,
+ field2 = Field2}}),
+ {ok, SomeVariable}.
diff --git a/lib/tools/emacs/test.erl.orig b/lib/tools/emacs/test.erl.orig
index 1221c5655e..69356aca9e 100644
--- a/lib/tools/emacs/test.erl.orig
+++ b/lib/tools/emacs/test.erl.orig
@@ -657,3 +657,41 @@ ok.
foo() ->
[#foo{
foo = foo}].
+
+%% Record indentation
+some_function_with_a_very_long_name() ->
+ #'a-long-record-name-like-it-sometimes-is-with-asn.1-records'{
+ field1=a,
+ field2=b},
+ case dummy_function_with_a_very_very_long_name(x) of
+ #'a-long-record-name-like-it-sometimes-is-with-asn.1-records'{
+ field1=a,
+ field2=b} ->
+ ok;
+ Var = #'a-long-record-name-like-it-sometimes-is-with-asn.1-records'{
+ field1=a,
+ field2=b} ->
+ Var#'a-long-record-name-like-it-sometimes-is-with-asn.1-records'{
+ field1=a,
+ field2=b};
+ #xyz{
+ a=1,
+ b=2} ->
+ ok
+ end.
+
+another_function_with_a_very_very_long_name() ->
+ #rec{
+ field1=1,
+ field2=1}.
+
+some_function_name_xyz(xyzzy, #some_record{
+ field1=Field1,
+ field2=Field2}) ->
+ SomeVariable = f(#'Some-long-record-name'{
+ field_a = 1,
+ 'inter-xyz-parameters' =
+ #'Some-other-very-long-record-name'{
+ field2 = Field1,
+ field2 = Field2}}),
+ {ok, SomeVariable}.