aboutsummaryrefslogtreecommitdiffstats
path: root/lib/tools
diff options
context:
space:
mode:
Diffstat (limited to 'lib/tools')
-rw-r--r--lib/tools/doc/src/cover.xml20
-rw-r--r--lib/tools/emacs/erlang-pkg.el3
-rw-r--r--lib/tools/emacs/erlang.el44
-rw-r--r--lib/tools/emacs/vsn.mk3
-rw-r--r--lib/tools/src/cover.erl188
-rw-r--r--lib/tools/test/cover_SUITE.erl133
-rw-r--r--lib/tools/test/cover_SUITE_data/f.erl11
7 files changed, 301 insertions, 101 deletions
diff --git a/lib/tools/doc/src/cover.xml b/lib/tools/doc/src/cover.xml
index 683acc025d..a2444ec947 100644
--- a/lib/tools/doc/src/cover.xml
+++ b/lib/tools/doc/src/cover.xml
@@ -5,7 +5,7 @@
<header>
<copyright>
<year>2001</year>
- <year>2011</year>
+ <year>2012</year>
<holder>Ericsson AB, All Rights Reserved</holder>
</copyright>
<legalnotice>
@@ -104,6 +104,13 @@
remove nodes. The same Cover compiled code will be loaded on each
node, and analysis will collect and sum up coverage data results
from all nodes.</p>
+ <p>To only collect data from remote nodes without stopping
+ <c>cover</c> on those nodes, use <c>cover:flush/1</c></p>
+ <p>If the connection to a remote node goes down, the main node
+ will mark it as lost. If the node comes back it will be added
+ again. If the remote node was alive during the disconnected
+ periode, cover data from before and during this periode will be
+ included in the analysis.</p>
</description>
<funcs>
<func>
@@ -477,6 +484,17 @@
remote nodes is fetched and stored on the main node.</p>
</desc>
</func>
+ <func>
+ <name>flush(Nodes) -> ok | {error,not_main_node}</name>
+ <fsummary>Collect cover data from remote nodes.</fsummary>
+ <type>
+ <v>Nodes = [atom()]</v>
+ </type>
+ <desc>
+ <p>Fetch data from the Cover database on the remote nodes and
+ stored on the main node.</p>
+ </desc>
+ </func>
</funcs>
<section>
diff --git a/lib/tools/emacs/erlang-pkg.el b/lib/tools/emacs/erlang-pkg.el
new file mode 100644
index 0000000000..decc696e21
--- /dev/null
+++ b/lib/tools/emacs/erlang-pkg.el
@@ -0,0 +1,3 @@
+(define-package "erlang" "2.7.0"
+ "Erlang major mode"
+ '((flymake-mode "0.4.6")))
diff --git a/lib/tools/emacs/erlang.el b/lib/tools/emacs/erlang.el
index b5da9e79d8..e2bcd37def 100644
--- a/lib/tools/emacs/erlang.el
+++ b/lib/tools/emacs/erlang.el
@@ -1,4 +1,10 @@
-;; erlang.el --- Major modes for editing and running Erlang
+;;; erlang.el --- Major modes for editing and running Erlang
+
+;; Copyright (C) 2004 Free Software Foundation, Inc.
+;; Author: Anders Lindgren
+;; Keywords: erlang, languages, processes
+;; Date: 2011-12-11
+
;; %CopyrightBegin%
;;
;; Copyright Ericsson AB 1996-2012. All Rights Reserved.
@@ -15,10 +21,7 @@
;; under the License.
;;
;; %CopyrightEnd%
-;;
-;; Copyright (C) 2004 Free Software Foundation, Inc.
-;; Author: Anders Lindgren
-;; Keywords: erlang, languages, processes
+;;
;; Lars Thors�n's modifications of 2000-06-07 included.
;; The original version of this package was written by Robert Virding.
@@ -483,10 +486,6 @@ function.")
"*Non-nil means TAB in Erlang mode should always re-indent the current line,
regardless of where in the line point is when the TAB command is used.")
-(defvar erlang-error-regexp-alist
- '(("^\\([^:( \t\n]+\\)[:(][ \t]*\\([0-9]+\\)[:) \t]" . (1 2)))
- "*Patterns for matching Erlang errors.")
-
(defvar erlang-man-inhibit (eq system-type 'windows-nt)
"Inhibit the creation of the Erlang Manual Pages menu.
@@ -1327,7 +1326,6 @@ Other commands:
(erlang-menu-init)
(erlang-mode-variables)
(erlang-check-module-name-init)
- (erlang-add-compilation-alist erlang-error-regexp-alist)
(erlang-man-init)
(erlang-tags-init)
(erlang-font-lock-init)
@@ -1443,31 +1441,6 @@ Other commands:
(set (make-local-variable 'outline-level) (lambda () 1))
(set (make-local-variable 'add-log-current-defun-function)
'erlang-current-defun))
-
-
-;; Compilation.
-;;
-;; The following code is compatible with the standard package `compilation',
-;; making it possible to go to errors using `erlang-next-error' (or just
-;; `next-error' in Emacs 21).
-;;
-;; The normal `compile' command works of course. For best result, please
-;; execute `make' with the `-w' flag.
-;;
-;; Please see the variables named `compiling-..' above.
-
-(defun erlang-add-compilation-alist (alist)
- (require 'compile)
- (cond ((boundp 'compilation-error-regexp-alist) ; Emacs 19
- (while alist
- (or (assoc (car (car alist)) compilation-error-regexp-alist)
- (setq compilation-error-regexp-alist
- (cons (car alist) compilation-error-regexp-alist)))
- (setq alist (cdr alist))))
- ((boundp 'compilation-error-regexp)
- ;; Emacs 18, Only one regexp is allowed.
- (funcall (symbol-function 'set)
- 'compilation-error-regexp (car (car alist))))))
(defun erlang-font-lock-init ()
"Initialize Font Lock for Erlang mode."
@@ -4896,7 +4869,6 @@ The following special commands are available:
(set (make-local-variable 'compilation-old-error-list) nil))
;; Needed when compiling directly from the Erlang shell.
(setq compilation-last-buffer (current-buffer))
- (erlang-add-compilation-alist erlang-error-regexp-alist)
(setq comint-prompt-regexp "^[^>=]*> *")
(setq comint-eol-on-send t)
(setq comint-input-ignoredups t)
diff --git a/lib/tools/emacs/vsn.mk b/lib/tools/emacs/vsn.mk
index f33ea8b519..a495da3453 100644
--- a/lib/tools/emacs/vsn.mk
+++ b/lib/tools/emacs/vsn.mk
@@ -1,3 +1,2 @@
-EMACS_VSN = 2.4.13
-
+EMACS_VSN = 2.7.0
diff --git a/lib/tools/src/cover.erl b/lib/tools/src/cover.erl
index e21bd1b88c..10f14b0a49 100644
--- a/lib/tools/src/cover.erl
+++ b/lib/tools/src/cover.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 2001-2011. All Rights Reserved.
+%% Copyright Ericsson AB 2001-2012. All Rights Reserved.
%%
%% The contents of this file are subject to the Erlang Public License,
%% Version 1.1, (the "License"); you may not use this file except in
@@ -26,12 +26,17 @@
%% ARCHITECTURE
%% The coverage tool consists of one process on each node involved in
%% coverage analysis. The process is registered as 'cover_server'
-%% (?SERVER). All cover_servers in the distributed system are linked
-%% together. The cover_server on the 'main' node is in charge, and it
-%% traps exits so it can detect nodedown or process crashes on the
-%% remote nodes. This process is implemented by the functions
-%% init_main/1 and main_process_loop/1. The cover_server on the remote
-%% nodes are implemented by the functions init_remote/2 and
+%% (?SERVER). The cover_server on the 'main' node is in charge, and
+%% it monitors the cover_servers on all remote nodes. When it gets a
+%% 'DOWN' message for another cover_server, it marks the node as
+%% 'lost'. If a nodeup is received for a lost node the main node
+%% ensures that the cover compiled modules are loaded again. If the
+%% remote node was alive during the disconnected periode, cover data
+%% for this periode will also be included in the analysis.
+%%
+%% The cover_server process on the main node is implemented by the
+%% functions init_main/1 and main_process_loop/1. The cover_server on
+%% the remote nodes are implemented by the functions init_remote/2 and
%% remote_process_loop/1.
%%
%% TABLES
@@ -81,15 +86,17 @@
export/1, export/2, import/1,
modules/0, imported/0, imported_modules/0, which_nodes/0, is_compiled/1,
reset/1, reset/0,
+ flush/1,
stop/0, stop/1]).
--export([remote_start/1]).
+-export([remote_start/1,get_main_node/0]).
%-export([bump/5]).
-export([transform/4]). % for test purposes
-record(main_state, {compiled=[], % [{Module,File}]
imported=[], % [{Module,File,ImportFile}]
stopper, % undefined | pid()
- nodes=[]}). % [Node]
+ nodes=[], % [Node]
+ lost_nodes=[]}). % [Node]
-record(remote_state, {compiled=[], % [{Module,File}]
main_node}). % atom()
@@ -497,6 +504,19 @@ stop(Node) when is_atom(Node) ->
stop(Nodes) ->
call({stop,remove_myself(Nodes,[])}).
+%% flush(Nodes) -> ok | {error,not_main_node}
+%% Nodes = [Node] | Node
+%% Node = atom()
+%% Error = {not_cover_compiled,Module}
+flush(Node) when is_atom(Node) ->
+ flush([Node]);
+flush(Nodes) ->
+ call({flush,remove_myself(Nodes,[])}).
+
+%% Used by test_server only. Not documented.
+get_main_node() ->
+ call(get_main_node).
+
%% bump(Module, Function, Arity, Clause, Line)
%% Module = Function = atom()
%% Arity = Clause = Line = integer()
@@ -541,7 +561,10 @@ remote_call(Node,Request) ->
Return =
receive
{'DOWN', Ref, _Type, _Object, _Info} ->
- {error,node_dead};
+ case Request of
+ {remote,stop} -> ok;
+ _ -> {error,node_dead}
+ end;
{?SERVER,Reply} ->
Reply
end,
@@ -569,40 +592,14 @@ init_main(Starter) ->
ets:new(?BINARY_TABLE, [set, named_table]),
ets:new(?COLLECTION_TABLE, [set, public, named_table]),
ets:new(?COLLECTION_CLAUSE_TABLE, [set, public, named_table]),
- process_flag(trap_exit,true),
+ net_kernel:monitor_nodes(true),
Starter ! {?SERVER,started},
main_process_loop(#main_state{}).
main_process_loop(State) ->
receive
{From, {start_nodes,Nodes}} ->
- ThisNode = node(),
- StartedNodes =
- lists:foldl(
- fun(Node,Acc) ->
- case rpc:call(Node,cover,remote_start,[ThisNode]) of
- {ok,RPid} ->
- link(RPid),
- [Node|Acc];
- Error ->
- io:format("Could not start cover on ~w: ~p\n",
- [Node,Error]),
- Acc
- end
- end,
- [],
- Nodes),
-
- %% In case some of the compiled modules have been unloaded they
- %% should not be loaded on the new node.
- {_LoadedModules,Compiled} =
- get_compiled_still_loaded(State#main_state.nodes,
- State#main_state.compiled),
- remote_load_compiled(StartedNodes,Compiled),
-
- State1 =
- State#main_state{nodes = State#main_state.nodes ++ StartedNodes,
- compiled = Compiled},
+ {StartedNodes,State1} = do_start_nodes(Nodes, State),
reply(From, {ok,StartedNodes}),
main_process_loop(State1);
@@ -707,8 +704,13 @@ main_process_loop(State) ->
{From, {stop,Nodes}} ->
remote_collect('_',Nodes,true),
reply(From, ok),
- State1 = State#main_state{nodes=State#main_state.nodes--Nodes},
- main_process_loop(State1);
+ Nodes1 = State#main_state.nodes--Nodes,
+ main_process_loop(State#main_state{nodes=Nodes1});
+
+ {From, {flush,Nodes}} ->
+ remote_collect('_',Nodes,false),
+ reply(From, ok),
+ main_process_loop(State);
{From, stop} ->
lists:foreach(
@@ -788,14 +790,30 @@ main_process_loop(State) ->
end,
main_process_loop(S);
- {'EXIT',Pid,_Reason} ->
- %% Exit is trapped on the main node only, so this will only happen
- %% there. I assume that I'm only linked to cover_servers on remote
- %% nodes, so this must be one of them crashing.
- %% Remove node from list!
- State1 = State#main_state{nodes=State#main_state.nodes--[node(Pid)]},
+ {'DOWN', _MRef, process, {?SERVER,Node}, _Info} ->
+ %% A remote cover_server is down, mark as lost
+ Nodes = State#main_state.nodes--[Node],
+ Lost = [Node|State#main_state.lost_nodes],
+ main_process_loop(State#main_state{nodes=Nodes,lost_nodes=Lost});
+
+ {nodeup,Node} ->
+ State1 =
+ case lists:member(Node,State#main_state.lost_nodes) of
+ true ->
+ sync_compiled(Node,State);
+ false ->
+ State
+ end,
main_process_loop(State1);
+
+ {nodedown,_} ->
+ %% Will be taken care of when 'DOWN' message arrives
+ main_process_loop(State);
+ {From, get_main_node} ->
+ reply(From, node()),
+ main_process_loop(State);
+
get_status ->
io:format("~p~n",[State]),
main_process_loop(State)
@@ -850,7 +868,16 @@ remote_process_loop(State) ->
{remote,stop} ->
reload_originals(State#remote_state.compiled),
unregister(?SERVER),
- remote_reply(State#remote_state.main_node, ok);
+ ok; % not replying since 'DOWN' message will be received anyway
+
+ {remote,get_compiled} ->
+ remote_reply(State#remote_state.main_node,
+ State#remote_state.compiled),
+ remote_process_loop(State);
+
+ {From, get_main_node} ->
+ remote_reply(From, State#remote_state.main_node),
+ remote_process_loop(State);
get_status ->
io:format("~p~n",[State]),
@@ -961,6 +988,36 @@ unload([]) ->
%%%--Handling of remote nodes--------------------------------------------
+do_start_nodes(Nodes, State) ->
+ ThisNode = node(),
+ StartedNodes =
+ lists:foldl(
+ fun(Node,Acc) ->
+ case rpc:call(Node,cover,remote_start,[ThisNode]) of
+ {ok,_RPid} ->
+ erlang:monitor(process,{?SERVER,Node}),
+ [Node|Acc];
+ Error ->
+ io:format("Could not start cover on ~w: ~p\n",
+ [Node,Error]),
+ Acc
+ end
+ end,
+ [],
+ Nodes),
+
+ %% In case some of the compiled modules have been unloaded they
+ %% should not be loaded on the new node.
+ {_LoadedModules,Compiled} =
+ get_compiled_still_loaded(State#main_state.nodes,
+ State#main_state.compiled),
+ remote_load_compiled(StartedNodes,Compiled),
+
+ State1 =
+ State#main_state{nodes = State#main_state.nodes ++ StartedNodes,
+ compiled = Compiled},
+ {StartedNodes, State1}.
+
%% start the cover_server on a remote node
remote_start(MainNode) ->
case whereis(?SERVER) of
@@ -984,6 +1041,30 @@ remote_start(MainNode) ->
{error,{already_started,Pid}}
end.
+%% If a lost node comes back, ensure that main and remote node has the
+%% same cover compiled modules. Note that no action is taken if the
+%% same {Mod,File} eksists on both, i.e. code change is not handled!
+sync_compiled(Node,State) ->
+ #main_state{compiled=Compiled0,nodes=Nodes,lost_nodes=Lost}=State,
+ State1 =
+ case remote_call(Node,{remote,get_compiled}) of
+ {error,node_dead} ->
+ {_,S} = do_start_nodes([Node],State),
+ S;
+ {error,_} ->
+ State;
+ RemoteCompiled ->
+ {_,Compiled} = get_compiled_still_loaded(Nodes,Compiled0),
+ Unload = [UM || {UM,_}=U <- RemoteCompiled,
+ false == lists:member(U,Compiled)],
+ remote_unload([Node],Unload),
+ Load = [L || L <- Compiled,
+ false == lists:member(L,RemoteCompiled)],
+ remote_load_compiled([Node],Load),
+ State#main_state{compiled=Compiled, nodes=[Node|Nodes]}
+ end,
+ State1#main_state{lost_nodes=Lost--[Node]}.
+
%% Load a set of cover compiled modules on remote nodes,
%% We do it ?MAX_MODS modules at a time so that we don't
%% run out of memory on the cover_server node.
@@ -1094,7 +1175,6 @@ remove_myself([Node|Nodes],Acc) ->
remove_myself(Nodes,[Node|Acc]);
remove_myself([],Acc) ->
Acc.
-
%%%--Handling of modules state data--------------------------------------
@@ -2254,7 +2334,13 @@ do_reset2([]) ->
do_clear(Module) ->
ets:match_delete(?COVER_CLAUSE_TABLE, {Module,'_'}),
ets:match_delete(?COVER_TABLE, {#bump{module=Module},'_'}),
- ets:match_delete(?COLLECTION_TABLE, {#bump{module=Module},'_'}).
+ case lists:member(?COLLECTION_TABLE, ets:all()) of
+ true ->
+ %% We're on the main node
+ ets:match_delete(?COLLECTION_TABLE, {#bump{module=Module},'_'});
+ false ->
+ ok
+ end.
not_loaded(Module, unloaded, State) ->
do_clear(Module),
@@ -2307,7 +2393,7 @@ pmap(Fun, [E | Rest], Pids, Limit, Cnt, Acc) when Cnt < Limit ->
pmap(Fun, Rest, Pids ++ [Pid], Limit, Cnt + 1, Acc);
pmap(Fun, List, [Pid | Pids], Limit, Cnt, Acc) ->
receive
- {'DOWN', _Ref, process, _, _} ->
+ {'DOWN', _Ref, process, X, _} when is_pid(X) ->
pmap(Fun, List, [Pid | Pids], Limit, Cnt - 1, Acc);
{res, Pid, Res} ->
pmap(Fun, List, Pids, Limit, Cnt, [Res | Acc])
@@ -2316,6 +2402,6 @@ pmap(_Fun, [], [], _Limit, 0, Acc) ->
lists:reverse(Acc);
pmap(Fun, [], [], Limit, Cnt, Acc) ->
receive
- {'DOWN', _Ref, process, _, _} ->
+ {'DOWN', _Ref, process, X, _} when is_pid(X) ->
pmap(Fun, [], [], Limit, Cnt - 1, Acc)
end.
diff --git a/lib/tools/test/cover_SUITE.erl b/lib/tools/test/cover_SUITE.erl
index c2c708d806..3bf1b44af8 100644
--- a/lib/tools/test/cover_SUITE.erl
+++ b/lib/tools/test/cover_SUITE.erl
@@ -23,7 +23,7 @@
init_per_group/2,end_per_group/2]).
-export([start/1, compile/1, analyse/1, misc/1, stop/1,
- distribution/1, export_import/1,
+ distribution/1, reconnect/1, die_and_reconnect/1, export_import/1,
otp_5031/1, eif/1, otp_5305/1, otp_5418/1, otp_6115/1, otp_7095/1,
otp_8188/1, otp_8270/1, otp_8273/1, otp_8340/1]).
@@ -45,7 +45,8 @@ suite() -> [{ct_hooks,[ts_install_cth]}].
all() ->
case whereis(cover_server) of
undefined ->
- [start, compile, analyse, misc, stop, distribution,
+ [start, compile, analyse, misc, stop,
+ distribution, reconnect, die_and_reconnect,
export_import, otp_5031, eif, otp_5305, otp_5418,
otp_6115, otp_7095, otp_8188, otp_8270, otp_8273,
otp_8340];
@@ -326,14 +327,16 @@ distribution(Config) when is_list(Config) ->
?line {ok,N1} = ?t:start_node(cover_SUITE_distribution1,slave,[]),
?line {ok,N2} = ?t:start_node(cover_SUITE_distribution2,slave,[]),
?line {ok,N3} = ?t:start_node(cover_SUITE_distribution3,slave,[]),
+ ?line {ok,N4} = ?t:start_node(cover_SUITE_distribution4,slave,[]),
%% Check that an already compiled module is loaded on new nodes
?line {ok,f} = cover:compile(f),
- ?line {ok,[_,_,_]} = cover:start(nodes()),
+ ?line {ok,[_,_,_,_]} = cover:start(nodes()),
?line cover_compiled = code:which(f),
?line cover_compiled = rpc:call(N1,code,which,[f]),
?line cover_compiled = rpc:call(N2,code,which,[f]),
?line cover_compiled = rpc:call(N3,code,which,[f]),
+ ?line cover_compiled = rpc:call(N4,code,which,[f]),
%% Check that a node cannot be started twice
?line {ok,[]} = cover:start(N2),
@@ -351,6 +354,7 @@ distribution(Config) when is_list(Config) ->
?line cover_compiled = rpc:call(N1,code,which,[v]),
?line cover_compiled = rpc:call(N2,code,which,[v]),
?line cover_compiled = rpc:call(N3,code,which,[v]),
+ ?line cover_compiled = rpc:call(N4,code,which,[v]),
%% this is lost when the node is killed
?line rpc:call(N3,f,f2,[]),
@@ -385,6 +389,18 @@ distribution(Config) when is_list(Config) ->
%% reset on the remote node(s))
?line check_f_calls(1,1),
+ %% Another checn that data is not fetched twice, i.e. when flushed
+ %% then analyse should not add the same data again.
+ ?line rpc:call(N4,f,f2,[]),
+ ?line ok = cover:flush(N4),
+ ?line check_f_calls(1,2),
+
+ %% Check that flush collects data so calls are not lost if node is killed
+ ?line rpc:call(N4,f,f2,[]),
+ ?line ok = cover:flush(N4),
+ ?line rpc:call(N4,erlang,halt,[]),
+ ?line check_f_calls(1,3),
+
%% Check that stop() unloads on all nodes
?line ok = cover:stop(),
?line timer:sleep(100), %% Give nodes time to unload on slow machines.
@@ -393,20 +409,117 @@ distribution(Config) when is_list(Config) ->
?line true = is_unloaded(LocalBeam),
?line true = is_unloaded(N2Beam),
- %% Check that cover_server on remote node dies if main node dies
+ %% Check that cover_server on remote node does not die if main node dies
?line {ok,[N1]} = cover:start(N1),
- ?line true = is_pid(rpc:call(N1,erlang,whereis,[cover_server])),
+ ?line true = is_pid(N1Server = rpc:call(N1,erlang,whereis,[cover_server])),
?line exit(whereis(cover_server),kill),
- ?line timer:sleep(10),
- ?line undefined = rpc:call(N1,erlang,whereis,[cover_server]),
-
+ ?line timer:sleep(100),
+ ?line N1Server = rpc:call(N1,erlang,whereis,[cover_server]),
+
%% Cleanup
?line Files = lsfiles(),
?line remove(files(Files, ".beam")),
?line ?t:stop_node(N1),
?line ?t:stop_node(N2).
-
+%% Test that a lost node is reconnected
+reconnect(Config) ->
+ DataDir = ?config(data_dir, Config),
+ ok = file:set_cwd(DataDir),
+
+ {ok,a} = compile:file(a),
+ {ok,b} = compile:file(b),
+ {ok,f} = compile:file(f),
+
+ {ok,N1} = ?t:start_node(cover_SUITE_reconnect,peer,
+ [{args," -pa " ++ DataDir},{start_cover,false}]),
+ {ok,a} = cover:compile(a),
+ {ok,f} = cover:compile(f),
+ {ok,[N1]} = cover:start(nodes()),
+
+ %% Some calls to check later
+ rpc:call(N1,f,f1,[]),
+ cover:flush(N1),
+ rpc:call(N1,f,f1,[]),
+
+ %% This will cause a call to f:f2() when nodes()==[] on N1
+ rpc:cast(N1,f,call_f2_when_isolated,[]),
+
+ %% Disconnect and check that node is removed from main cover node
+ net_kernel:disconnect(N1),
+ [] = cover:which_nodes(),
+ timer:sleep(500), % allow some time for the f:f2() call
+
+ %% Do some add one module (b) and remove one module (a)
+ code:purge(a),
+ {module,a} = code:load_file(a),
+ {ok,b} = cover:compile(b),
+ cover_compiled = code:which(b),
+
+ [] = cover:which_nodes(),
+ check_f_calls(1,0), % only the first call - before the flush
+
+ %% Reconnect the node and check that b and f are cover compiled but not a
+ net_kernel:connect_node(N1),
+ timer:sleep(100),
+ [N1] = cover:which_nodes(), % we are reconnected
+ cover_compiled = rpc:call(N1,code,which,[b]),
+ cover_compiled = rpc:call(N1,code,which,[f]),
+ ABeam = rpc:call(N1,code,which,[a]),
+ false = (cover_compiled==ABeam),
+
+ %% Ensure that we have:
+ %% * one f1 call from before the flush,
+ %% * one f1 call from after the flush but before disconnect
+ %% * one f2 call when disconnected
+ check_f_calls(2,1),
+
+ cover:stop(),
+ ?t:stop_node(N1),
+ ok.
+
+%% Test that a lost node is reconnected - also if it has been dead
+die_and_reconnect(Config) ->
+ DataDir = ?config(data_dir, Config),
+ ok = file:set_cwd(DataDir),
+
+ {ok,f} = compile:file(f),
+
+ NodeName = cover_SUITE_die_and_reconnect,
+ {ok,N1} = ?t:start_node(NodeName,peer,
+ [{args," -pa " ++ DataDir},{start_cover,false}]),
+ %% {ok,a} = cover:compile(a),
+ {ok,f} = cover:compile(f),
+ {ok,[N1]} = cover:start(nodes()),
+
+ %% Some calls to check later
+ rpc:call(N1,f,f1,[]),
+ cover:flush(N1),
+ rpc:call(N1,f,f1,[]),
+
+ %% Kill the node
+ rpc:call(N1,erlang,halt,[]),
+ [] = cover:which_nodes(),
+
+ check_f_calls(1,0), % only the first call - before the flush
+
+ %% Restart the node and check that cover reconnects
+ {ok,N1} = ?t:start_node(NodeName,peer,
+ [{args," -pa " ++ DataDir},{start_cover,false}]),
+ timer:sleep(100),
+ [N1] = cover:which_nodes(), % we are reconnected
+ cover_compiled = rpc:call(N1,code,which,[f]),
+
+ %% One more call...
+ rpc:call(N1,f,f1,[]),
+
+ %% Ensure that no more calls are counted
+ check_f_calls(2,0),
+
+ cover:stop(),
+ ?t:stop_node(N1),
+ ok.
+
export_import(suite) -> [];
export_import(Config) when is_list(Config) ->
?line DataDir = ?config(data_dir, Config),
@@ -1238,4 +1351,4 @@ is_unloaded(What) ->
end.
check_f_calls(F1,F2) ->
- {ok,[{{f,f1,0},F1},{{f,f2,0},F2}]} = cover:analyse(f,calls,function).
+ {ok,[{{f,f1,0},F1},{{f,f2,0},F2}|_]} = cover:analyse(f,calls,function).
diff --git a/lib/tools/test/cover_SUITE_data/f.erl b/lib/tools/test/cover_SUITE_data/f.erl
index 1ef8bbdb49..ce2963014a 100644
--- a/lib/tools/test/cover_SUITE_data/f.erl
+++ b/lib/tools/test/cover_SUITE_data/f.erl
@@ -1,5 +1,5 @@
-module(f).
--export([f1/0,f2/0]).
+-export([f1/0,f2/0,call_f2_when_isolated/0]).
f1() ->
f1_line1,
@@ -8,3 +8,12 @@ f1() ->
f2() ->
f2_line1,
f2_line2.
+
+call_f2_when_isolated() ->
+ case nodes() of
+ [] ->
+ f2();
+ _ ->
+ timer:sleep(100),
+ call_f2_when_isolated()
+ end.