aboutsummaryrefslogtreecommitdiffstats
path: root/lib/mnesia/src
diff options
context:
space:
mode:
Diffstat (limited to 'lib/mnesia/src')
-rw-r--r--lib/mnesia/src/mnesia.erl438
-rw-r--r--lib/mnesia/src/mnesia_backup.erl20
-rw-r--r--lib/mnesia/src/mnesia_bup.erl76
-rw-r--r--lib/mnesia/src/mnesia_controller.erl262
-rw-r--r--lib/mnesia/src/mnesia_dumper.erl206
-rw-r--r--lib/mnesia/src/mnesia_locker.erl182
-rw-r--r--lib/mnesia/src/mnesia_log.erl108
-rw-r--r--lib/mnesia/src/mnesia_tm.erl372
8 files changed, 832 insertions, 832 deletions
diff --git a/lib/mnesia/src/mnesia.erl b/lib/mnesia/src/mnesia.erl
index 980a9c6213..3d30debc53 100644
--- a/lib/mnesia/src/mnesia.erl
+++ b/lib/mnesia/src/mnesia.erl
@@ -27,7 +27,7 @@
%% Start, stop and debugging
start/0, start/1, stop/0, % Not for public use
set_debug_level/1, lkill/0, kill/0, % Not for public use
- ms/0,
+ ms/0,
change_config/2,
%% Activity mgt
@@ -40,14 +40,14 @@
%% Access within an activity - Lock acquisition
lock/2, lock/4,
lock_table/2,
- read_lock_table/1,
+ read_lock_table/1,
write_lock_table/1,
%% Access within an activity - Updates
- write/1, s_write/1, write/3, write/5,
- delete/1, s_delete/1, delete/3, delete/5,
- delete_object/1, s_delete_object/1, delete_object/3, delete_object/5,
-
+ write/1, s_write/1, write/3, write/5,
+ delete/1, s_delete/1, delete/3, delete/5,
+ delete_object/1, s_delete_object/1, delete_object/3, delete_object/5,
+
%% Access within an activity - Reads
read/1, read/2, wread/1, read/3, read/5,
match_object/1, match_object/3, match_object/5,
@@ -58,9 +58,9 @@
first/1, next/2, last/1, prev/2,
first/3, next/4, last/3, prev/4,
- %% Iterators within an activity
+ %% Iterators within an activity
foldl/3, foldl/4, foldr/3, foldr/4,
-
+
%% Dirty access regardless of activities - Updates
dirty_write/1, dirty_write/2,
dirty_delete/1, dirty_delete/2,
@@ -72,8 +72,8 @@
dirty_select/2,
dirty_match_object/1, dirty_match_object/2, dirty_all_keys/1,
dirty_index_match_object/2, dirty_index_match_object/3,
- dirty_index_read/3, dirty_slot/2,
- dirty_first/1, dirty_next/2, dirty_last/1, dirty_prev/2,
+ dirty_index_read/3, dirty_slot/2,
+ dirty_first/1, dirty_next/2, dirty_last/1, dirty_prev/2,
%% Info
table_info/2, table_info/4, schema/0, schema/1,
@@ -102,7 +102,7 @@
dump_tables/1, wait_for_tables/2, force_load_table/1,
change_table_access_mode/2, change_table_load_order/2,
set_master_nodes/1, set_master_nodes/2,
-
+
%% Misc admin
dump_log/0, subscribe/1, unsubscribe/1, report_event/1,
@@ -112,7 +112,7 @@
%% Textfile access
load_textfile/1, dump_to_textfile/1,
-
+
%% QLC functions
table/1, table/2,
@@ -137,20 +137,20 @@
-define(DEFAULT_ACCESS, ?MODULE).
-%% Select
+%% Select
-define(PATTERN_TO_OBJECT_MATCH_SPEC(Pat), [{Pat,[],['$_']}]).
-define(PATTERN_TO_BINDINGS_MATCH_SPEC(Pat), [{Pat,[],['$$']}]).
-
+
%% Local function in order to avoid external function call
val(Var) ->
case ?catch_val(Var) of
- {'EXIT', Reason} -> mnesia_lib:other_val(Var, Reason);
+ {'EXIT', Reason} -> mnesia_lib:other_val(Var, Reason);
Value -> Value
end.
is_dollar_digits(Var) ->
case atom_to_list(Var) of
- [$$ | Digs] ->
+ [$$ | Digs] ->
is_digits(Digs);
_ ->
false
@@ -166,13 +166,13 @@ is_digits([Dig | Tail]) ->
is_digits([]) ->
true.
-has_var(X) when is_atom(X) ->
- if
- X == '_' ->
+has_var(X) when is_atom(X) ->
+ if
+ X == '_' ->
true;
- is_atom(X) ->
+ is_atom(X) ->
is_dollar_digits(X);
- true ->
+ true ->
false
end;
has_var(X) when is_tuple(X) ->
@@ -196,9 +196,9 @@ e_has_var(X, Pos) ->
start() ->
{Time , Res} = timer:tc(application, start, [?APPLICATION, temporary]),
-
+
Secs = Time div 1000000,
- case Res of
+ case Res of
ok ->
verbose("Mnesia started, ~p seconds~n",[ Secs]),
ok;
@@ -243,10 +243,10 @@ change_config(extra_db_nodes, Ns) when is_list(Ns) ->
mnesia_controller:connect_nodes(Ns);
change_config(dc_dump_limit, N) when is_number(N), N > 0 ->
case mnesia_lib:is_running() of
- yes ->
+ yes ->
mnesia_lib:set(dc_dump_limit, N),
{ok, N};
- _ ->
+ _ ->
{error, {not_started, ?APPLICATION}}
end;
change_config(BadKey, _BadVal) ->
@@ -255,7 +255,7 @@ change_config(BadKey, _BadVal) ->
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%% Debugging
-set_debug_level(Level) ->
+set_debug_level(Level) ->
mnesia_subscr:set_debug_level(Level).
lkill() ->
@@ -274,9 +274,9 @@ ms() ->
mnesia_controller,
mnesia_dumper,
mnesia_loader,
- mnesia_frag,
- mnesia_frag_hash,
- mnesia_frag_old_hash,
+ mnesia_frag,
+ mnesia_frag_hash,
+ mnesia_frag_old_hash,
mnesia_index,
mnesia_kernel_sup,
mnesia_late_loader,
@@ -295,9 +295,9 @@ ms() ->
%% Keep these last in the list, so
%% mnesia_sup kills these last
- mnesia_monitor,
+ mnesia_monitor,
mnesia_event
- ].
+ ].
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
@@ -305,7 +305,7 @@ ms() ->
-spec abort(_) -> no_return().
-abort(Reason) ->
+abort(Reason) ->
exit({aborted, Reason}).
is_transaction() ->
@@ -339,7 +339,7 @@ sync_transaction(Fun, Args, Retries) ->
transaction(get(mnesia_activity_state), Fun, Args, Retries, ?DEFAULT_ACCESS, sync).
-transaction(State, Fun, Args, Retries, Mod, Kind)
+transaction(State, Fun, Args, Retries, Mod, Kind)
when is_function(Fun), is_list(Args), Retries == infinity, is_atom(Mod) ->
mnesia_tm:transaction(State, Fun, Args, Retries, Mod, Kind);
transaction(State, Fun, Args, Retries, Mod, Kind)
@@ -348,7 +348,7 @@ transaction(State, Fun, Args, Retries, Mod, Kind)
transaction(_State, Fun, Args, Retries, Mod, _Kind) ->
{aborted, {badarg, Fun, Args, Retries, Mod}}.
-non_transaction(State, Fun, Args, ActivityKind, Mod)
+non_transaction(State, Fun, Args, ActivityKind, Mod)
when is_function(Fun), is_list(Args), is_atom(Mod) ->
mnesia_tm:non_transaction(State, Fun, Args, ActivityKind, Mod);
non_transaction(_State, Fun, Args, _ActivityKind, _Mod) ->
@@ -394,7 +394,7 @@ wrap_trans(State, Fun, Args, Retries, Mod, Kind) ->
{atomic, GoodRes} -> GoodRes;
BadRes -> exit(BadRes)
end.
-
+
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%% Access within an activity - lock acquisition
@@ -507,13 +507,13 @@ good_global_nodes(Nodes) ->
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%% Access within an activity - updates
-write(Val) when is_tuple(Val), tuple_size(Val) > 2 ->
+write(Val) when is_tuple(Val), tuple_size(Val) > 2 ->
Tab = element(1, Val),
write(Tab, Val, write);
write(Val) ->
abort({bad_type, Val}).
-s_write(Val) when is_tuple(Val), tuple_size(Val) > 2 ->
+s_write(Val) when is_tuple(Val), tuple_size(Val) > 2 ->
Tab = element(1, Val),
write(Tab, Val, sticky_write).
@@ -561,7 +561,7 @@ write_to_store(Tab, Store, Oid, Val) ->
_ ->
?ets_delete(Store, Oid),
?ets_insert(Store, {Oid, Val, write})
- end,
+ end,
ok;
{'EXIT', _} ->
abort({no_exists, Tab});
@@ -611,7 +611,7 @@ delete(Tid, Ts, Tab, Key, LockKind)
ok;
Protocol ->
do_dirty_delete(Protocol, Tab, Key)
- end;
+ end;
delete(_Tid, _Ts, Tab, _Key, _LockKind) ->
abort({bad_type, Tab}).
@@ -640,7 +640,7 @@ delete_object(Tab, Val, LockKind) ->
delete_object(Tid, Ts, Tab, Val, LockKind)
when is_atom(Tab), Tab /= schema, is_tuple(Val), tuple_size(Val) > 2 ->
case has_var(Val) of
- false ->
+ false ->
do_delete_object(Tid, Ts, Tab, Val, LockKind);
true ->
abort({bad_type, Tab, Val})
@@ -665,7 +665,7 @@ do_delete_object(Tid, Ts, Tab, Val, LockKind) ->
abort({bad_type, Tab, LockKind})
end,
case val({Tab, setorbag}) of
- bag ->
+ bag ->
?ets_match_delete(Store, {Oid, Val, '_'}),
?ets_insert(Store, {Oid, Val, delete_object});
_ ->
@@ -731,7 +731,7 @@ read(Tid, Ts, Tab, Key, LockKind)
add_written(?ets_lookup(Store, Oid), Tab, Objs);
_Protocol ->
dirty_read(Tab, Key)
- end;
+ end;
read(_Tid, _Ts, Tab, _Key, _LockKind) ->
abort({bad_type, Tab}).
@@ -744,7 +744,7 @@ first(Tab) ->
_ ->
abort(no_transaction)
end.
-
+
first(Tid, Ts, Tab)
when is_atom(Tab), Tab /= schema ->
case element(1, Tid) of
@@ -845,9 +845,9 @@ prev(_Tid, _Ts,Tab,_) ->
stored_keys(Tab,'$end_of_table',Prev,Ts,Op,Type) ->
case ts_keys(Ts#tidstore.store,Tab,Op,Type,[]) of
[] -> '$end_of_table';
- Keys when Type == ordered_set->
+ Keys when Type == ordered_set->
get_ordered_tskey(Prev,Keys,Op);
- Keys ->
+ Keys ->
get_next_tskey(Prev,Keys,Tab)
end;
stored_keys(Tab,{'EXIT',{aborted,R={badarg,[Tab,Key]}}},
@@ -858,7 +858,7 @@ stored_keys(Tab,{'EXIT',{aborted,R={badarg,[Tab,Key]}}},
Ops ->
case lists:last(Ops) of
[delete] -> abort(R);
- _ ->
+ _ ->
case ts_keys(Store,Tab,Op,Type,[]) of
[] -> '$end_of_table';
Keys -> get_next_tskey(Key,Keys,Tab)
@@ -869,14 +869,14 @@ stored_keys(_,{'EXIT',{aborted,R}},_,_,_,_) ->
abort(R);
stored_keys(Tab,Key,Prev,#tidstore{store=Store},Op,ordered_set) ->
case ?ets_match(Store, {{Tab, Key}, '_', '$1'}) of
- [] ->
+ [] ->
Keys = ts_keys(Store,Tab,Op,ordered_set,[Key]),
get_ordered_tskey(Prev,Keys,Op);
Ops ->
case lists:last(Ops) of
[delete] ->
mnesia:Op(Tab,Key);
- _ ->
+ _ ->
Keys = ts_keys(Store,Tab,Op,ordered_set,[Key]),
get_ordered_tskey(Prev,Keys,Op)
end
@@ -898,7 +898,7 @@ get_ordered_tskey(Prev, [_|R],Op) -> get_ordered_tskey(Prev,R,Op);
get_ordered_tskey(_, [],_) -> '$end_of_table'.
get_next_tskey(Key,Keys,Tab) ->
- Next =
+ Next =
if Key == '$end_of_table' -> hd(Keys);
true ->
case lists:dropwhile(fun(A) -> A /= Key end, Keys) of
@@ -912,7 +912,7 @@ get_next_tskey(Key,Keys,Tab) ->
_ -> %% Really slow anybody got another solution??
case dirty_read(Tab, Next) of
[] -> Next;
- _ ->
+ _ ->
%% Updated value we already returned this key
get_next_tskey(Next,Keys,Tab)
end
@@ -921,7 +921,7 @@ get_next_tskey(Key,Keys,Tab) ->
ts_keys(Store, Tab, Op, Type, Def) ->
All = ?ets_match(Store, {{Tab,'$1'},'_','$2'}),
Keys = ts_keys_1(All, Def),
- if
+ if
Type == ordered_set, Op == prev ->
lists:reverse(lists:sort(Keys));
Type == ordered_set ->
@@ -947,7 +947,7 @@ ts_keys_1([], Acc) ->
%%%%%%%%%%%%%%%%%%%%%
-%% Iterators
+%% Iterators
foldl(Fun, Acc, Tab) ->
foldl(Fun, Acc, Tab, read).
@@ -968,7 +968,7 @@ foldl(ActivityId, Opaque, Fun, Acc, Tab, LockKind) ->
close_iteration(Res, Tab).
do_foldl(A, O, Tab, '$end_of_table', Fun, RAcc, _Type, Stored) ->
- lists:foldl(fun(Key, Acc) ->
+ lists:foldl(fun(Key, Acc) ->
lists:foldl(Fun, Acc, read(A, O, Tab, Key, read))
end, RAcc, Stored);
do_foldl(A, O, Tab, Key, Fun, Acc, ordered_set, [H | Stored]) when H == Key ->
@@ -983,7 +983,7 @@ do_foldl(A, O, Tab, Key, Fun, Acc, ordered_set, [H | Stored]) when H > Key ->
NewAcc = lists:foldl(Fun, Acc, read(A, O, Tab, Key, read)),
{_, Tid, Ts} = get(mnesia_activity_state),
do_foldl(Tid, Ts, Tab, dirty_next(Tab, Key), Fun, NewAcc, ordered_set, [H |Stored]);
-do_foldl(A, O, Tab, Key, Fun, Acc, Type, Stored) -> %% Type is set or bag
+do_foldl(A, O, Tab, Key, Fun, Acc, Type, Stored) -> %% Type is set or bag
NewAcc = lists:foldl(Fun, Acc, read(A, O, Tab, Key, read)),
NewStored = ordsets:del_element(Key, Stored),
{_, Tid, Ts} = get(mnesia_activity_state),
@@ -1003,8 +1003,8 @@ foldr(Fun, Acc, Tab, LockKind) when is_function(Fun) ->
foldr(ActivityId, Opaque, Fun, Acc, Tab, LockKind) ->
{Type, TempPrev} = init_iteration(ActivityId, Opaque, Tab, LockKind),
- Prev =
- if
+ Prev =
+ if
Type == ordered_set ->
lists:reverse(TempPrev);
true -> %% Order doesn't matter for set and bag
@@ -1014,7 +1014,7 @@ foldr(ActivityId, Opaque, Fun, Acc, Tab, LockKind) ->
close_iteration(Res, Tab).
do_foldr(A, O, Tab, '$end_of_table', Fun, RAcc, _Type, Stored) ->
- lists:foldl(fun(Key, Acc) ->
+ lists:foldl(fun(Key, Acc) ->
lists:foldl(Fun, Acc, read(A, O, Tab, Key, read))
end, RAcc, Stored);
do_foldr(A, O, Tab, Key, Fun, Acc, ordered_set, [H | Stored]) when H == Key ->
@@ -1029,7 +1029,7 @@ do_foldr(A, O, Tab, Key, Fun, Acc, ordered_set, [H | Stored]) when H < Key ->
NewAcc = lists:foldl(Fun, Acc, read(A, O, Tab, Key, read)),
{_, Tid, Ts} = get(mnesia_activity_state),
do_foldr(Tid, Ts, Tab, dirty_prev(Tab, Key), Fun, NewAcc, ordered_set, [H |Stored]);
-do_foldr(A, O, Tab, Key, Fun, Acc, Type, Stored) -> %% Type is set or bag
+do_foldr(A, O, Tab, Key, Fun, Acc, Type, Stored) -> %% Type is set or bag
NewAcc = lists:foldl(Fun, Acc, read(A, O, Tab, Key, read)),
NewStored = ordsets:del_element(Key, Stored),
{_, Tid, Ts} = get(mnesia_activity_state),
@@ -1037,25 +1037,25 @@ do_foldr(A, O, Tab, Key, Fun, Acc, Type, Stored) -> %% Type is set or bag
init_iteration(ActivityId, Opaque, Tab, LockKind) ->
lock(ActivityId, Opaque, {table, Tab}, LockKind),
- Type = val({Tab, setorbag}),
+ Type = val({Tab, setorbag}),
Previous = add_previous(ActivityId, Opaque, Type, Tab),
St = val({Tab, storage_type}),
- if
- St == unknown ->
+ if
+ St == unknown ->
ignore;
true ->
mnesia_lib:db_fixtable(St, Tab, true)
- end,
+ end,
{Type, Previous}.
close_iteration(Res, Tab) ->
case val({Tab, storage_type}) of
- unknown ->
+ unknown ->
ignore;
- St ->
+ St ->
mnesia_lib:db_fixtable(St, Tab, false)
end,
- case Res of
+ case Res of
{'EXIT', {aborted, What}} ->
abort(What);
{'EXIT', What} ->
@@ -1074,7 +1074,7 @@ add_previous(_Tid, Ts, _Type, Tab) ->
%% it is correct with respect to what this particular transaction
%% has already written, deleted .... etc
-add_written([], _Tab, Objs) ->
+add_written([], _Tab, Objs) ->
Objs; % standard normal fast case
add_written(Written, Tab, Objs) ->
case val({Tab, setorbag}) of
@@ -1093,7 +1093,7 @@ add_written_to_set(Ws) ->
add_written_to_bag([{_, Val, write} | Tail], Objs, Ack) ->
add_written_to_bag(Tail, lists:delete(Val, Objs), [Val | Ack]);
-add_written_to_bag([], Objs, Ack) ->
+add_written_to_bag([], Objs, Ack) ->
Objs ++ lists:reverse(Ack); %% Oldest write first as in ets
add_written_to_bag([{_, _ , delete} | Tail], _Objs, _Ack) ->
%% This transaction just deleted all objects
@@ -1118,7 +1118,7 @@ match_object(Tab, Pat, LockKind) ->
abort(no_transaction)
end.
-match_object(Tid, Ts, Tab, Pat, LockKind)
+match_object(Tid, Ts, Tab, Pat, LockKind)
when is_atom(Tab), Tab /= schema, is_tuple(Pat), tuple_size(Pat) > 2 ->
case element(1, Tid) of
ets ->
@@ -1142,11 +1142,11 @@ add_written_match(S, Pat, Tab, Objs) ->
add_match(Ops, Objs, val({Tab, setorbag})).
find_ops(S, Tab, Pat) ->
- GetWritten = [{{{Tab, '_'}, Pat, write}, [], ['$_']},
+ GetWritten = [{{{Tab, '_'}, Pat, write}, [], ['$_']},
{{{Tab, '_'}, '_', delete}, [], ['$_']},
{{{Tab, '_'}, Pat, delete_object}, [], ['$_']}],
ets:select(S, GetWritten).
-
+
add_match([], Objs, _Type) ->
Objs;
add_match(Written, Objs, ordered_set) ->
@@ -1162,13 +1162,13 @@ add_match([{Oid, Val, write}|R], Objs, set) ->
add_match(R, [Val | deloid(Oid,Objs)],set).
%% For ordered_set only !!
-add_ordered_match(Written = [{{_, Key}, _, _}|_], [Obj|Objs], Acc)
+add_ordered_match(Written = [{{_, Key}, _, _}|_], [Obj|Objs], Acc)
when Key > element(2, Obj) ->
add_ordered_match(Written, Objs, [Obj|Acc]);
-add_ordered_match([{{_, Key}, Val, write}|Rest], Objs =[Obj|_], Acc)
+add_ordered_match([{{_, Key}, Val, write}|Rest], Objs =[Obj|_], Acc)
when Key < element(2, Obj) ->
add_ordered_match(Rest, [Val|Objs],Acc);
-add_ordered_match([{{_, Key}, _, _DelOP}|Rest], Objs =[Obj|_], Acc)
+add_ordered_match([{{_, Key}, _, _DelOP}|Rest], Objs =[Obj|_], Acc)
when Key < element(2, Obj) ->
add_ordered_match(Rest,Objs,Acc);
%% Greater than last object
@@ -1176,7 +1176,7 @@ add_ordered_match([{_, Val, write}|Rest], [], Acc) ->
add_ordered_match(Rest, [Val], Acc);
add_ordered_match([_|Rest], [], Acc) ->
add_ordered_match(Rest, [], Acc);
-%% Keys are equal from here
+%% Keys are equal from here
add_ordered_match([{_, Val, write}|Rest], [_Obj|Objs], Acc) ->
add_ordered_match(Rest, [Val|Objs], Acc);
add_ordered_match([{_, _Val, delete}|Rest], [_Obj|Objs], Acc) ->
@@ -1207,7 +1207,7 @@ add_sel_match([Op={Oid, _, delete}|R], Objs, Type, Acc) ->
end;
add_sel_match([Op = {_Oid, Val, delete_object}|R], Objs, Type, Acc) ->
case lists:delete(Val, Objs) of
- Objs ->
+ Objs ->
add_sel_match(R, Objs, Type, [Op|Acc]);
NewObjs when Type == set ->
add_sel_match(R, NewObjs, Type, Acc);
@@ -1224,26 +1224,26 @@ add_sel_match([Op={Oid={_,Key}, Val, write}|R], Objs, bag, Acc) ->
end;
add_sel_match([Op={Oid, Val, write}|R], Objs, set, Acc) ->
case deloid(Oid,Objs) of
- Objs ->
+ Objs ->
add_sel_match(R, Objs,set, [Op|Acc]);
NewObjs ->
add_sel_match(R, [Val | NewObjs],set, Acc)
end.
%% For ordered_set only !!
-add_sel_ordered_match(Written = [{{_, Key}, _, _}|_], [Obj|Objs],Acc)
+add_sel_ordered_match(Written = [{{_, Key}, _, _}|_], [Obj|Objs],Acc)
when Key > element(2, Obj) ->
add_sel_ordered_match(Written, Objs, [Obj|Acc]);
-add_sel_ordered_match([{{_, Key}, Val, write}|Rest], Objs =[Obj|_],Acc)
+add_sel_ordered_match([{{_, Key}, Val, write}|Rest], Objs =[Obj|_],Acc)
when Key < element(2, Obj) ->
add_sel_ordered_match(Rest,[Val|Objs],Acc);
-add_sel_ordered_match([{{_, Key}, _, _DelOP}|Rest], Objs =[Obj|_], Acc)
+add_sel_ordered_match([{{_, Key}, _, _DelOP}|Rest], Objs =[Obj|_], Acc)
when Key < element(2, Obj) ->
add_sel_ordered_match(Rest,Objs,Acc);
%% Greater than last object
add_sel_ordered_match(Ops1, [], Acc) ->
{lists:reverse(Acc), Ops1};
-%% Keys are equal from here
+%% Keys are equal from here
add_sel_ordered_match([{_, Val, write}|Rest], [_Obj|Objs], Acc) ->
add_sel_ordered_match(Rest, [Val|Objs], Acc);
add_sel_ordered_match([{_, _Val, delete}|Rest], [_Obj|Objs], Acc) ->
@@ -1264,11 +1264,11 @@ deloid(Oid, [H | T]) ->
[H | deloid(Oid, T)].
%%%%%%%%%%%%%%%%%%
-% select
+% select
select(Tab, Pat) ->
select(Tab, Pat, read).
-select(Tab, Pat, LockKind)
+select(Tab, Pat, LockKind)
when is_atom(Tab), Tab /= schema, is_list(Pat) ->
case get(mnesia_activity_state) of
{?DEFAULT_ACCESS, Tid, Ts} ->
@@ -1293,13 +1293,13 @@ fun_select(Tid, Ts, Tab, Spec, LockKind, TabPat, SelectFun) ->
select_lock(Tid,Ts,LockKind,Spec,Tab),
Store = Ts#tidstore.store,
Written = ?ets_match_object(Store, {{TabPat, '_'}, '_', '_'}),
- case Written of
- [] ->
+ case Written of
+ [] ->
%% Nothing changed in the table during this transaction,
%% Simple case get results from [d]ets
SelectFun(Spec);
- _ ->
- %% Hard (slow case) records added or deleted earlier
+ _ ->
+ %% Hard (slow case) records added or deleted earlier
%% in the transaction, have to cope with that.
Type = val({Tab, setorbag}),
FixedSpec = get_record_pattern(Spec),
@@ -1326,7 +1326,7 @@ select_lock(Tid,Ts,LockKind,Spec,Tab) ->
end.
%% Breakable Select
-select(Tab, Pat, NObjects, LockKind)
+select(Tab, Pat, NObjects, LockKind)
when is_atom(Tab), Tab /= schema, is_list(Pat), is_integer(NObjects) ->
case get(mnesia_activity_state) of
{?DEFAULT_ACCESS, Tid, Ts} ->
@@ -1356,26 +1356,26 @@ fun_select(Tid, Ts, Tab, Spec, LockKind, TabPat, Init, NObjects, Node, Storage)
select_lock(Tid,Ts,LockKind,Spec,Tab),
Store = Ts#tidstore.store,
do_fixtable(Tab, Store),
-
- Written0 = ?ets_match_object(Store, {{TabPat, '_'}, '_', '_'}),
- case Written0 of
- [] ->
+
+ Written0 = ?ets_match_object(Store, {{TabPat, '_'}, '_', '_'}),
+ case Written0 of
+ [] ->
%% Nothing changed in the table during this transaction,
%% Simple case get results from [d]ets
select_state(Init(Spec),Def);
- _ ->
- %% Hard (slow case) records added or deleted earlier
+ _ ->
+ %% Hard (slow case) records added or deleted earlier
%% in the transaction, have to cope with that.
Type = val({Tab, setorbag}),
- Written =
+ Written =
if Type == ordered_set -> %% Sort stable
lists:keysort(1,Written0);
- true ->
+ true ->
Written0
end,
FixedSpec = get_record_pattern(Spec),
CMS = ets:match_spec_compile(Spec),
- trans_select(Init(FixedSpec),
+ trans_select(Init(FixedSpec),
Def#mnesia_select{written=Written,spec=CMS,type=Type, orig=FixedSpec})
end;
_Protocol ->
@@ -1394,7 +1394,7 @@ select(Cont) ->
select_cont(_Tid,_Ts,'$end_of_table') ->
'$end_of_table';
-select_cont(Tid,_Ts,State=#mnesia_select{tid=Tid,cont=Cont, orig=Ms})
+select_cont(Tid,_Ts,State=#mnesia_select{tid=Tid,cont=Cont, orig=Ms})
when element(1,Tid) == ets ->
case Cont of
'$end_of_table' -> '$end_of_table';
@@ -1415,7 +1415,7 @@ trans_select('$end_of_table', #mnesia_select{written=Written0,spec=CMS,type=Type
trans_select({TabRecs,Cont}, State = #mnesia_select{written=Written0,spec=CMS,type=Type}) ->
{FixedRes,Written} = add_sel_match(Written0, TabRecs, Type),
select_state({ets:match_spec_run(FixedRes, CMS),Cont},
- State#mnesia_select{written=Written}).
+ State#mnesia_select{written=Written}).
select_state({Matches, Cont}, MS) ->
{Matches, MS#mnesia_select{cont=Cont}};
@@ -1433,9 +1433,9 @@ all_keys(Tab) ->
Mod:all_keys(Tid, Ts, Tab, read);
_ ->
abort(no_transaction)
- end.
+ end.
-all_keys(Tid, Ts, Tab, LockKind)
+all_keys(Tid, Ts, Tab, LockKind)
when is_atom(Tab), Tab /= schema ->
Pat0 = val({Tab, wild_pattern}),
Pat = setelement(2, Pat0, '$1'),
@@ -1446,7 +1446,7 @@ all_keys(Tid, Ts, Tab, LockKind)
_ ->
Keys
end;
-all_keys(_Tid, _Ts, Tab, _LockKind) ->
+all_keys(_Tid, _Ts, Tab, _LockKind) ->
abort({bad_type, Tab}).
index_match_object(Pat, Attr) when is_tuple(Pat), tuple_size(Pat) > 2 ->
@@ -1465,7 +1465,7 @@ index_match_object(Tab, Pat, Attr, LockKind) ->
abort(no_transaction)
end.
-index_match_object(Tid, Ts, Tab, Pat, Attr, LockKind)
+index_match_object(Tid, Ts, Tab, Pat, Attr, LockKind)
when is_atom(Tab), Tab /= schema, is_tuple(Pat), tuple_size(Pat) > 2 ->
case element(1, Tid) of
ets ->
@@ -1501,7 +1501,7 @@ index_read(Tab, Key, Attr) ->
abort(no_transaction)
end.
-index_read(Tid, Ts, Tab, Key, Attr, LockKind)
+index_read(Tid, Ts, Tab, Key, Attr, LockKind)
when is_atom(Tab), Tab /= schema ->
case element(1, Tid) of
ets ->
@@ -1536,7 +1536,7 @@ dirty_write(Val) when is_tuple(Val), tuple_size(Val) > 2 ->
dirty_write(Tab, Val);
dirty_write(Val) ->
abort({bad_type, Val}).
-
+
dirty_write(Tab, Val) ->
do_dirty_write(async_dirty, Tab, Val).
@@ -1562,7 +1562,7 @@ dirty_delete(Oid) ->
dirty_delete(Tab, Key) ->
do_dirty_delete(async_dirty, Tab, Key).
-
+
do_dirty_delete(SyncMode, Tab, Key) when is_atom(Tab), Tab /= schema ->
Oid = {Tab, Key},
mnesia_tm:dirty(SyncMode, {Oid, Oid, delete});
@@ -1582,7 +1582,7 @@ do_dirty_delete_object(SyncMode, Tab, Val)
when is_atom(Tab), Tab /= schema, is_tuple(Val), tuple_size(Val) > 2 ->
Oid = {Tab, element(2, Val)},
case has_var(Val) of
- false ->
+ false ->
mnesia_tm:dirty(SyncMode, {Oid, Val, delete_object});
true ->
abort({bad_type, Tab, Val})
@@ -1600,7 +1600,7 @@ dirty_update_counter(Counter, _Incr) ->
dirty_update_counter(Tab, Key, Incr) ->
do_dirty_update_counter(async_dirty, Tab, Key, Incr).
-
+
do_dirty_update_counter(SyncMode, Tab, Key, Incr)
when is_atom(Tab), Tab /= schema, is_integer(Incr) ->
case ?catch_val({Tab, record_validation}) of
@@ -1638,7 +1638,7 @@ dirty_match_object(Pat) when is_tuple(Pat), tuple_size(Pat) > 2 ->
dirty_match_object(Tab, Pat);
dirty_match_object(Pat) ->
abort({bad_type, Pat}).
-
+
dirty_match_object(Tab, Pat)
when is_atom(Tab), Tab /= schema, is_tuple(Pat), tuple_size(Pat) > 2 ->
dirty_rpc(Tab, ?MODULE, remote_dirty_match_object, [Tab, Pat]);
@@ -1697,8 +1697,8 @@ remote_dirty_select(Tab, [{HeadPat,_, _}] = Spec, [Pos | Tail])
%% Returns the records without applying the match spec
%% The actual filtering is handled by the caller
CMS = ets:match_spec_compile(Spec),
- case val({Tab, setorbag}) of
- ordered_set ->
+ case val({Tab, setorbag}) of
+ ordered_set ->
ets:match_spec_run(lists:sort(Recs), CMS);
_ ->
ets:match_spec_run(Recs, CMS)
@@ -1730,14 +1730,14 @@ dirty_all_keys(Tab) when is_atom(Tab), Tab /= schema ->
end;
dirty_all_keys(Tab) ->
abort({bad_type, Tab}).
-
+
dirty_index_match_object(Pat, Attr) when is_tuple(Pat), tuple_size(Pat) > 2 ->
Tab = element(1, Pat),
dirty_index_match_object(Tab, Pat, Attr);
dirty_index_match_object(Pat, _Attr) ->
abort({bad_type, Pat}).
-dirty_index_match_object(Tab, Pat, Attr)
+dirty_index_match_object(Tab, Pat, Attr)
when is_atom(Tab), Tab /= schema, is_tuple(Pat), tuple_size(Pat) > 2 ->
case mnesia_schema:attr_tab_to_pos(Tab, Attr) of
Pos when Pos =< tuple_size(Pat) ->
@@ -1752,7 +1752,7 @@ dirty_index_match_object(Tab, Pat, Attr)
[Tab, Pat, Pos]);
true ->
abort({bad_type, Tab, Attr, Elem})
- end
+ end
end;
BadPos ->
abort({bad_type, Tab, BadPos})
@@ -1810,7 +1810,7 @@ do_dirty_rpc(Tab, Node, M, F, Args) ->
%% Sync with mnesia_monitor
try sys:get_status(mnesia_monitor) catch _:_ -> ok end,
case mnesia_controller:call({check_w2r, Node, Tab}) of % Sync
- NewNode when NewNode =:= Node ->
+ NewNode when NewNode =:= Node ->
ErrorTag = mnesia_lib:dirty_rpc_error_tag(Reason),
mnesia:abort({ErrorTag, Args});
NewNode ->
@@ -1821,9 +1821,9 @@ do_dirty_rpc(Tab, Node, M, F, Args) ->
%% to acquire the lock on the NewNode.
%% In this context we do neither know
%% the kind or granularity of the lock.
- %% --> Abort the transaction
+ %% --> Abort the transaction
mnesia:abort({node_not_running, Node});
- {error, {node_not_running, _}} ->
+ {error, {node_not_running, _}} ->
%% Mnesia is stopping
mnesia:abort({no_exists, Args});
_ ->
@@ -1858,21 +1858,21 @@ table_info(_Tid, _Ts, Tab, Item) ->
any_table_info(Tab, Item).
-any_table_info(Tab, Item) when is_atom(Tab) ->
+any_table_info(Tab, Item) when is_atom(Tab) ->
case Item of
master_nodes ->
mnesia_recover:get_master_nodes(Tab);
-% checkpoints ->
+% checkpoints ->
% case ?catch_val({Tab, commit_work}) of
% [{checkpoints, List} | _] -> List;
% No_chk when is_list(No_chk) -> [];
% Else -> info_reply(Else, Tab, Item)
% end;
- size ->
+ size ->
raw_table_info(Tab, Item);
memory ->
raw_table_info(Tab, Item);
- type ->
+ type ->
case ?catch_val({Tab, setorbag}) of
{'EXIT', _} ->
abort({no_exists, Tab, Item});
@@ -1885,8 +1885,8 @@ any_table_info(Tab, Item) when is_atom(Tab) ->
abort({no_exists, Tab, Item});
Props ->
lists:map(fun({setorbag, Type}) -> {type, Type};
- (Prop) -> Prop end,
- Props)
+ (Prop) -> Prop end,
+ Props)
end;
name ->
Tab;
@@ -1927,14 +1927,14 @@ bad_info_reply(_Tab, memory) -> 0;
bad_info_reply(Tab, Item) -> abort({no_exists, Tab, Item}).
%% Raw info about all tables
-schema() ->
+schema() ->
mnesia_schema:info().
%% Raw info about one tables
-schema(Tab) ->
+schema(Tab) ->
mnesia_schema:info(Tab).
-error_description(Err) ->
+error_description(Err) ->
mnesia_lib:error_desc(Err).
info() ->
@@ -1951,18 +1951,18 @@ info() ->
io:format( "---> Processes waiting for locks <--- ~n", []),
lists:foreach(fun({Oid, Op, _Pid, Tid, OwnerTid}) ->
io:format("Tid ~p waits for ~p lock "
- "on oid ~p owned by ~p ~n",
+ "on oid ~p owned by ~p ~n",
[Tid, Op, Oid, OwnerTid])
end, Queued),
mnesia_tm:display_info(group_leader(), TmInfo),
-
+
Pat = {'_', unclear, '_'},
Uncertain = ets:match_object(mnesia_decision, Pat),
io:format( "---> Uncertain transactions <--- ~n", []),
lists:foreach(fun({Tid, _, Nodes}) ->
io:format("Tid ~w waits for decision "
- "from ~w~n",
+ "from ~w~n",
[Tid, Nodes])
end, Uncertain),
@@ -2023,15 +2023,15 @@ display_tab_info() ->
io:format("master node tables = ~p~n", [lists:sort(MasterTabs)]),
Tabs = system_info(tables),
-
+
{Unknown, Ram, Disc, DiscOnly} =
lists:foldl(fun storage_count/2, {[], [], [], []}, Tabs),
-
+
io:format("remote = ~p~n", [lists:sort(Unknown)]),
io:format("ram_copies = ~p~n", [lists:sort(Ram)]),
io:format("disc_copies = ~p~n", [lists:sort(Disc)]),
io:format("disc_only_copies = ~p~n", [lists:sort(DiscOnly)]),
-
+
Rfoldl = fun(T, Acc) ->
Rpat =
case val({T, access_mode}) of
@@ -2041,7 +2041,7 @@ display_tab_info() ->
table_info(T, where_to_commit)
end,
case lists:keysearch(Rpat, 1, Acc) of
- {value, {_Rpat, Rtabs}} ->
+ {value, {_Rpat, Rtabs}} ->
lists:keyreplace(Rpat, 1, Acc, {Rpat, [T | Rtabs]});
false ->
[{Rpat, [T]} | Acc]
@@ -2161,20 +2161,20 @@ system_info2(fallback_activated) ->
system_info2(version) ->
case ?catch_val(version) of
- {'EXIT', _} ->
+ {'EXIT', _} ->
Apps = application:loaded_applications(),
case lists:keysearch(?APPLICATION, 1, Apps) of
{value, {_Name, _Desc, Version}} ->
Version;
false ->
%% Ensure that it does not match
- {mnesia_not_loaded, node(), now()}
+ {mnesia_not_loaded, node(), now()}
end;
Version ->
Version
end;
-system_info2(access_module) -> mnesia_monitor:get_env(access_module);
+system_info2(access_module) -> mnesia_monitor:get_env(access_module);
system_info2(auto_repair) -> mnesia_monitor:get_env(auto_repair);
system_info2(is_running) -> mnesia_lib:is_running();
system_info2(backup_module) -> mnesia_monitor:get_env(backup_module);
@@ -2183,7 +2183,7 @@ system_info2(debug) -> mnesia_monitor:get_env(debug);
system_info2(dump_log_load_regulation) -> mnesia_monitor:get_env(dump_log_load_regulation);
system_info2(dump_log_write_threshold) -> mnesia_monitor:get_env(dump_log_write_threshold);
system_info2(dump_log_time_threshold) -> mnesia_monitor:get_env(dump_log_time_threshold);
-system_info2(dump_log_update_in_place) ->
+system_info2(dump_log_update_in_place) ->
mnesia_monitor:get_env(dump_log_update_in_place);
system_info2(max_wait_for_decision) -> mnesia_monitor:get_env(max_wait_for_decision);
system_info2(embedded_mnemosyne) -> mnesia_monitor:get_env(embedded_mnemosyne);
@@ -2204,9 +2204,9 @@ system_info2(transaction_failures) -> mnesia_lib:read_counter(trans_failures);
system_info2(transaction_commits) -> mnesia_lib:read_counter(trans_commits);
system_info2(transaction_restarts) -> mnesia_lib:read_counter(trans_restarts);
system_info2(transaction_log_writes) -> mnesia_dumper:get_log_writes();
-system_info2(core_dir) -> mnesia_monitor:get_env(core_dir);
-system_info2(no_table_loaders) -> mnesia_monitor:get_env(no_table_loaders);
-system_info2(dc_dump_limit) -> mnesia_monitor:get_env(dc_dump_limit);
+system_info2(core_dir) -> mnesia_monitor:get_env(core_dir);
+system_info2(no_table_loaders) -> mnesia_monitor:get_env(no_table_loaders);
+system_info2(dc_dump_limit) -> mnesia_monitor:get_env(dc_dump_limit);
system_info2(send_compressed) -> mnesia_monitor:get_env(send_compressed);
system_info2(Item) -> exit({badarg, Item}).
@@ -2281,7 +2281,7 @@ system_info_items(no) ->
core_dir,
version
].
-
+
system_info() ->
IsRunning = mnesia_lib:is_running(),
case IsRunning of
@@ -2308,62 +2308,62 @@ load_mnesia_or_abort() ->
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%% Database mgt
-create_schema(Ns) ->
+create_schema(Ns) ->
mnesia_bup:create_schema(Ns).
-delete_schema(Ns) ->
+delete_schema(Ns) ->
mnesia_schema:delete_schema(Ns).
-backup(Opaque) ->
+backup(Opaque) ->
mnesia_log:backup(Opaque).
-backup(Opaque, Mod) ->
+backup(Opaque, Mod) ->
mnesia_log:backup(Opaque, Mod).
-traverse_backup(S, T, Fun, Acc) ->
+traverse_backup(S, T, Fun, Acc) ->
mnesia_bup:traverse_backup(S, T, Fun, Acc).
-traverse_backup(S, SM, T, TM, F, A) ->
+traverse_backup(S, SM, T, TM, F, A) ->
mnesia_bup:traverse_backup(S, SM, T, TM, F, A).
-install_fallback(Opaque) ->
+install_fallback(Opaque) ->
mnesia_bup:install_fallback(Opaque).
-install_fallback(Opaque, Mod) ->
+install_fallback(Opaque, Mod) ->
mnesia_bup:install_fallback(Opaque, Mod).
-uninstall_fallback() ->
+uninstall_fallback() ->
mnesia_bup:uninstall_fallback().
-uninstall_fallback(Args) ->
+uninstall_fallback(Args) ->
mnesia_bup:uninstall_fallback(Args).
-activate_checkpoint(Args) ->
+activate_checkpoint(Args) ->
mnesia_checkpoint:activate(Args).
-deactivate_checkpoint(Name) ->
+deactivate_checkpoint(Name) ->
mnesia_checkpoint:deactivate(Name).
-backup_checkpoint(Name, Opaque) ->
+backup_checkpoint(Name, Opaque) ->
mnesia_log:backup_checkpoint(Name, Opaque).
-backup_checkpoint(Name, Opaque, Mod) ->
+backup_checkpoint(Name, Opaque, Mod) ->
mnesia_log:backup_checkpoint(Name, Opaque, Mod).
-restore(Opaque, Args) ->
+restore(Opaque, Args) ->
mnesia_schema:restore(Opaque, Args).
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%% Table mgt
-create_table(Arg) ->
+create_table(Arg) ->
mnesia_schema:create_table(Arg).
-create_table(Name, Arg) when is_list(Arg) ->
+create_table(Name, Arg) when is_list(Arg) ->
mnesia_schema:create_table([{name, Name}| Arg]);
create_table(Name, Arg) ->
{aborted, badarg, Name, Arg}.
-delete_table(Tab) ->
+delete_table(Tab) ->
mnesia_schema:delete_table(Tab).
add_table_copy(Tab, N, S) ->
@@ -2371,38 +2371,38 @@ add_table_copy(Tab, N, S) ->
del_table_copy(Tab, N) ->
mnesia_schema:del_table_copy(Tab, N).
-move_table_copy(Tab, From, To) ->
+move_table_copy(Tab, From, To) ->
mnesia_schema:move_table(Tab, From, To).
-add_table_index(Tab, Ix) ->
+add_table_index(Tab, Ix) ->
mnesia_schema:add_table_index(Tab, Ix).
-del_table_index(Tab, Ix) ->
+del_table_index(Tab, Ix) ->
mnesia_schema:del_table_index(Tab, Ix).
-transform_table(Tab, Fun, NewA) ->
+transform_table(Tab, Fun, NewA) ->
case catch val({Tab, record_name}) of
- {'EXIT', Reason} ->
+ {'EXIT', Reason} ->
mnesia:abort(Reason);
- OldRN ->
+ OldRN ->
mnesia_schema:transform_table(Tab, Fun, NewA, OldRN)
end.
-transform_table(Tab, Fun, NewA, NewRN) ->
+transform_table(Tab, Fun, NewA, NewRN) ->
mnesia_schema:transform_table(Tab, Fun, NewA, NewRN).
change_table_copy_type(T, N, S) ->
mnesia_schema:change_table_copy_type(T, N, S).
clear_table(Tab) ->
- case get(mnesia_activity_state) of
+ case get(mnesia_activity_state) of
State = {Mod, Tid, _Ts} when element(1, Tid) =/= tid ->
transaction(State, fun() -> do_clear_table(Tab) end, [], infinity, Mod, sync);
- undefined ->
+ undefined ->
transaction(undefined, fun() -> do_clear_table(Tab) end, [], infinity, ?DEFAULT_ACCESS, sync);
_ -> %% Not allowed for clear_table
mnesia:abort({aborted, nested_transaction})
end.
-
+
do_clear_table(Tab) ->
case get(mnesia_activity_state) of
{?DEFAULT_ACCESS, Tid, Ts} ->
@@ -2415,7 +2415,7 @@ do_clear_table(Tab) ->
clear_table(Tid, Ts, Tab, Obj) when element(1, Tid) =:= tid ->
Store = Ts#tidstore.store,
- mnesia_locker:wlock_table(Tid, Store, Tab),
+ mnesia_locker:wlock_table(Tid, Store, Tab),
Oid = {Tab, '_'},
?ets_insert(Store, {Oid, Obj, clear_table}),
ok.
@@ -2423,26 +2423,26 @@ clear_table(Tid, Ts, Tab, Obj) when element(1, Tid) =:= tid ->
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%% Table mgt - user properties
-read_table_property(Tab, PropKey) ->
+read_table_property(Tab, PropKey) ->
val({Tab, user_property, PropKey}).
-write_table_property(Tab, Prop) ->
+write_table_property(Tab, Prop) ->
mnesia_schema:write_table_property(Tab, Prop).
-delete_table_property(Tab, PropKey) ->
+delete_table_property(Tab, PropKey) ->
mnesia_schema:delete_table_property(Tab, PropKey).
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%% Table mgt - user properties
-change_table_frag(Tab, FragProp) ->
+change_table_frag(Tab, FragProp) ->
mnesia_schema:change_table_frag(Tab, FragProp).
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%% Table mgt - table load
%% Dump a ram table to disc
-dump_tables(Tabs) ->
+dump_tables(Tabs) ->
mnesia_schema:dump_tables(Tabs).
%% allow the user to wait for some tables to be loaded
@@ -2455,10 +2455,10 @@ force_load_table(Tab) ->
Other -> Other
end.
-change_table_access_mode(T, Access) ->
+change_table_access_mode(T, Access) ->
mnesia_schema:change_table_access_mode(T, Access).
-change_table_load_order(T, O) ->
+change_table_load_order(T, O) ->
mnesia_schema:change_table_load_order(T, O).
change_table_majority(T, M) ->
@@ -2471,13 +2471,13 @@ set_master_nodes(Nodes) when is_list(Nodes) ->
yes ->
CsPat = {{'_', cstruct}, '_'},
Cstructs0 = ?ets_match_object(mnesia_gvar, CsPat),
- Cstructs = [Cs || {_, Cs} <- Cstructs0],
+ Cstructs = [Cs || {_, Cs} <- Cstructs0],
log_valid_master_nodes(Cstructs, Nodes, UseDir, IsRunning);
_NotRunning ->
case UseDir of
true ->
mnesia_lib:lock_table(schema),
- Res =
+ Res =
case mnesia_schema:read_cstructs_from_disc() of
{ok, Cstructs} ->
log_valid_master_nodes(Cstructs, Nodes, UseDir, IsRunning);
@@ -2497,7 +2497,7 @@ log_valid_master_nodes(Cstructs, Nodes, UseDir, IsRunning) ->
Fun = fun(Cs) ->
Copies = mnesia_lib:copy_holders(Cs),
Valid = mnesia_lib:intersect(Nodes, Copies),
- {Cs#cstruct.name, Valid}
+ {Cs#cstruct.name, Valid}
end,
Args = lists:map(Fun, Cstructs),
mnesia_recover:log_master_nodes(Args, UseDir, IsRunning).
@@ -2523,7 +2523,7 @@ set_master_nodes(Tab, Nodes) when is_list(Nodes) ->
case UseDir of
true ->
mnesia_lib:lock_table(schema),
- Res =
+ Res =
case mnesia_schema:read_cstructs_from_disc() of
{ok, Cstructs} ->
case lists:keysearch(Tab, 2, Cstructs) of
@@ -2553,7 +2553,7 @@ set_master_nodes(Tab, Nodes) ->
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%% Misc admin
-dump_log() ->
+dump_log() ->
mnesia_controller:sync_dump_log(user).
subscribe(What) ->
@@ -2568,10 +2568,10 @@ report_event(Event) ->
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%% Snmp
-snmp_open_table(Tab, Us) ->
+snmp_open_table(Tab, Us) ->
mnesia_schema:add_snmp(Tab, Us).
-snmp_close_table(Tab) ->
+snmp_close_table(Tab) ->
mnesia_schema:del_snmp(Tab).
snmp_get_row(Tab, RowIndex) when is_atom(Tab), Tab /= schema, is_list(RowIndex) ->
@@ -2583,26 +2583,26 @@ snmp_get_row(Tab, RowIndex) when is_atom(Tab), Tab /= schema, is_list(RowIndex)
SnmpType = val({Tab,snmp}),
Fix = fun({{_,Key},Row,Op}, Res) ->
case mnesia_snmp_hook:key_to_oid(Tab,Key,SnmpType) of
- RowIndex ->
+ RowIndex ->
case Op of
write -> {ok, Row};
_ ->
undefined
end;
- _ ->
+ _ ->
Res
end
end,
lists:foldl(Fix, undefined, Ops);
Key ->
case Mod:read(Tid, Ts, Tab, Key, read) of
- [Row] ->
+ [Row] ->
{ok, Row};
- _ ->
+ _ ->
undefined
end
end;
- _ ->
+ _ ->
dirty_rpc(Tab, mnesia_snmp_hook, get_row, [Tab, RowIndex])
end;
snmp_get_row(Tab, _RowIndex) ->
@@ -2613,7 +2613,7 @@ snmp_get_row(Tab, _RowIndex) ->
snmp_get_next_index(Tab, RowIndex) when is_atom(Tab), Tab /= schema, is_list(RowIndex) ->
{Next,OrigKey} = dirty_rpc(Tab, mnesia_snmp_hook, get_next_index, [Tab, RowIndex]),
case get(mnesia_activity_state) of
- {_Mod, Tid, #tidstore{store=Store}} when element(1, Tid) =:= tid ->
+ {_Mod, Tid, #tidstore{store=Store}} when element(1, Tid) =:= tid ->
case OrigKey of
undefined ->
snmp_order_keys(Store, Tab, RowIndex, []);
@@ -2639,7 +2639,7 @@ snmp_get_next_index(Tab, _RowIndex) ->
snmp_order_keys(Store,Tab,RowIndex,Def) ->
All = ?ets_match(Store, {{Tab,'$1'},'_','$2'}),
SnmpType = val({Tab,snmp}),
- Keys0 = [mnesia_snmp_hook:key_to_oid(Tab,Key,SnmpType) ||
+ Keys0 = [mnesia_snmp_hook:key_to_oid(Tab,Key,SnmpType) ||
Key <- ts_keys_1(All, Def)],
Keys = lists:sort(Keys0),
get_ordered_snmp_key(RowIndex,Keys).
@@ -2648,7 +2648,7 @@ get_ordered_snmp_key(Prev, [First|_]) when Prev < First -> {ok, First};
get_ordered_snmp_key(Prev, [_|R]) ->
get_ordered_snmp_key(Prev, R);
get_ordered_snmp_key(_, []) ->
- endOfTable.
+ endOfTable.
%%%%%%%%%%
@@ -2657,7 +2657,7 @@ snmp_get_mnesia_key(Tab, RowIndex) when is_atom(Tab), Tab /= schema, is_list(Row
{_Mod, Tid, Ts} when element(1, Tid) =:= tid ->
Res = dirty_rpc(Tab,mnesia_snmp_hook,get_mnesia_key,[Tab,RowIndex]),
snmp_filter_key(Res, RowIndex, Tab, Ts#tidstore.store);
- _ ->
+ _ ->
dirty_rpc(Tab, mnesia_snmp_hook, get_mnesia_key, [Tab, RowIndex])
end;
snmp_get_mnesia_key(Tab, _RowIndex) ->
@@ -2670,7 +2670,7 @@ snmp_oid_to_mnesia_key(RowIndex, Tab) ->
{ok, MnesiaKey} -> MnesiaKey;
undefined -> unknown
end;
- MnesiaKey ->
+ MnesiaKey ->
MnesiaKey
end.
@@ -2690,20 +2690,20 @@ snmp_filter_key(undefined, RowIndex, Tab, Store) ->
SnmpType = val({Tab,snmp}),
Fix = fun({{_,Key},_,Op}, Res) ->
case mnesia_snmp_hook:key_to_oid(Tab,Key,SnmpType) of
- RowIndex ->
+ RowIndex ->
case Op of
write -> {ok, Key};
_ ->
undefined
end;
- _ ->
+ _ ->
Res
end
end,
lists:foldl(Fix, undefined, Ops);
Key ->
case ?ets_lookup(Store, {Tab,Key}) of
- [] ->
+ [] ->
undefined;
Ops ->
case lists:last(Ops) of
@@ -2716,9 +2716,9 @@ snmp_filter_key(undefined, RowIndex, Tab, Store) ->
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%% Textfile access
-load_textfile(F) ->
+load_textfile(F) ->
mnesia_text:load_textfile(F).
-dump_to_textfile(F) ->
+dump_to_textfile(F) ->
mnesia_text:dump_to_textfile(F).
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
@@ -2727,7 +2727,7 @@ dump_to_textfile(F) ->
table(Tab) ->
table(Tab, []).
table(Tab,Opts) ->
- {[Trav,Lock,NObjects],QlcOptions0} =
+ {[Trav,Lock,NObjects],QlcOptions0} =
qlc_opts(Opts,[{traverse,select},{lock,read},{n_objects,100}]),
TF = case Trav of
{select,Ms} ->
@@ -2740,10 +2740,10 @@ table(Tab,Opts) ->
Pre = fun(Arg) -> pre_qlc(Arg, Tab) end,
Post = fun() -> post_qlc(Tab) end,
Info = fun(Tag) -> qlc_info(Tab, Tag) end,
- ParentFun = fun() ->
- {mnesia_activity, mnesia:get_activity_id()}
+ ParentFun = fun() ->
+ {mnesia_activity, mnesia:get_activity_id()}
end,
- Lookup =
+ Lookup =
case Trav of
{select, _} -> [];
_ ->
@@ -2757,27 +2757,27 @@ table(Tab,Opts) ->
[{lookup_fun, LFun}]
end,
MFA = fun(Type) -> qlc_format(Type, Tab, NObjects, Lock, Opts) end,
- QlcOptions = [{pre_fun, Pre}, {post_fun, Post},
- {info_fun, Info}, {parent_fun, ParentFun},
+ QlcOptions = [{pre_fun, Pre}, {post_fun, Post},
+ {info_fun, Info}, {parent_fun, ParentFun},
{format_fun, MFA}|Lookup] ++ QlcOptions0,
qlc:table(TF, QlcOptions).
pre_qlc(Opts, Tab) ->
- {_,Tid,_} =
+ {_,Tid,_} =
case get(mnesia_activity_state) of
undefined ->
case lists:keysearch(parent_value, 1, Opts) of
{value, {parent_value,{mnesia_activity,undefined}}} ->
abort(no_transaction);
{value, {parent_value,{mnesia_activity,Aid}}} ->
- {value,{stop_fun,Stop}} =
+ {value,{stop_fun,Stop}} =
lists:keysearch(stop_fun,1,Opts),
put_activity_id(Aid,Stop),
Aid;
_ ->
abort(no_transaction)
end;
- Else ->
+ Else ->
Else
end,
case element(1,Tid) of
@@ -2785,9 +2785,9 @@ pre_qlc(Opts, Tab) ->
_ ->
case ?catch_val({Tab, setorbag}) of
ordered_set -> ok;
- _ ->
+ _ ->
dirty_rpc(Tab, mnesia_tm, fixtable, [Tab,true,self()]),
- ok
+ ok
end
end.
@@ -2806,7 +2806,7 @@ post_qlc(Tab) ->
qlc_select('$end_of_table') -> [];
qlc_select({[], Cont}) -> qlc_select(select(Cont));
-qlc_select({Objects, Cont}) ->
+qlc_select({Objects, Cont}) ->
Objects ++ fun() -> qlc_select(select(Cont)) end.
qlc_opts(Opts, Keys) when is_list(Opts) ->
@@ -2826,7 +2826,7 @@ qlc_opts(Opts,[],Acc) -> {lists:reverse(Acc),Opts}.
qlc_info(Tab, num_of_objects) ->
dirty_rpc(Tab, ?MODULE, raw_table_info, [Tab, size]);
-qlc_info(_, keypos) -> 2;
+qlc_info(_, keypos) -> 2;
qlc_info(_, is_unique_objects) -> true;
qlc_info(Tab, is_unique_keys) ->
case val({Tab, type}) of
@@ -2836,9 +2836,9 @@ qlc_info(Tab, is_unique_keys) ->
end;
qlc_info(Tab, is_sorted_objects) ->
case val({Tab, type}) of
- ordered_set ->
+ ordered_set ->
case ?catch_val({Tab, frag_hash}) of
- {'EXIT', _} ->
+ {'EXIT', _} ->
ascending;
_ -> %% Fragmented tables are not ordered
no
@@ -2856,11 +2856,11 @@ qlc_format({match_spec, Ms}, Tab, NObjects, Lock, Opts) ->
{?MODULE, table, [Tab,[{traverse,{select,Ms}},{n_objects, NObjects}, {lock,Lock}|Opts]]};
qlc_format({lookup, 2, Keys}, Tab, _, Lock, _) ->
io_lib:format("lists:flatmap(fun(V) -> "
- "~w:read(~w, V, ~w) end, ~w)",
+ "~w:read(~w, V, ~w) end, ~w)",
[?MODULE, Tab, Lock, Keys]);
qlc_format({lookup, Index,Keys}, Tab, _, _, _) ->
io_lib:format("lists:flatmap(fun(V) -> "
- "~w:index_read(~w, V, ~w) end, ~w)",
+ "~w:index_read(~w, V, ~w) end, ~w)",
[?MODULE, Tab, Index, Keys]).
@@ -2874,7 +2874,7 @@ do_fixtable(Tab, Store) ->
ok;
_ ->
case ?ets_match_object(Store, {fixtable, {Tab, '_'}}) of
- [] ->
+ [] ->
Node = dirty_rpc(Tab, mnesia_tm, fixtable, [Tab,true,self()]),
?ets_insert(Store, {fixtable, {Tab, Node}});
_ ->
@@ -2886,10 +2886,10 @@ do_fixtable(Tab, Store) ->
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%% Mnemosyne exclusive
-get_activity_id() ->
+get_activity_id() ->
get(mnesia_activity_state).
-put_activity_id(Activity) ->
+put_activity_id(Activity) ->
mnesia_tm:put_activity_id(Activity).
-put_activity_id(Activity,Fun) ->
+put_activity_id(Activity,Fun) ->
mnesia_tm:put_activity_id(Activity,Fun).
diff --git a/lib/mnesia/src/mnesia_backup.erl b/lib/mnesia/src/mnesia_backup.erl
index f372ca0be5..736f2ed9bf 100644
--- a/lib/mnesia/src/mnesia_backup.erl
+++ b/lib/mnesia/src/mnesia_backup.erl
@@ -1,19 +1,19 @@
%%
%% %CopyrightBegin%
-%%
-%% Copyright Ericsson AB 1996-2009. All Rights Reserved.
-%%
+%%
+%% Copyright Ericsson AB 1996-2011. All Rights Reserved.
+%%
%% The contents of this file are subject to the Erlang Public License,
%% Version 1.1, (the "License"); you may not use this file except in
%% compliance with the License. You should have received a copy of the
%% Erlang Public License along with this software. If not, it can be
%% retrieved online at http://www.erlang.org/.
-%%
+%%
%% Software distributed under the License is distributed on an "AS IS"
%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
%% the License for the specific language governing rights and limitations
%% under the License.
-%%
+%%
%% %CopyrightEnd%
%%
@@ -56,14 +56,14 @@
-export([
%% Write access
- open_write/1,
- write/2,
- commit_write/1,
+ open_write/1,
+ write/2,
+ commit_write/1,
abort_write/1,
%% Read access
- open_read/1,
- read/1,
+ open_read/1,
+ read/1,
close_read/1
]).
diff --git a/lib/mnesia/src/mnesia_bup.erl b/lib/mnesia/src/mnesia_bup.erl
index 14414537b9..fd87be1759 100644
--- a/lib/mnesia/src/mnesia_bup.erl
+++ b/lib/mnesia/src/mnesia_bup.erl
@@ -1,19 +1,19 @@
%%
%% %CopyrightBegin%
-%%
+%%
%% Copyright Ericsson AB 1996-2011. All Rights Reserved.
-%%
+%%
%% The contents of this file are subject to the Erlang Public License,
%% Version 1.1, (the "License"); you may not use this file except in
%% compliance with the License. You should have received a copy of the
%% Erlang Public License along with this software. If not, it can be
%% retrieved online at http://www.erlang.org/.
-%%
+%%
%% Software distributed under the License is distributed on an "AS IS"
%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
%% the License for the specific language governing rights and limitations
%% under the License.
-%%
+%%
%% %CopyrightEnd%
%%
@@ -62,7 +62,7 @@
fallback_tmp,
skip_tables = [],
keep_tables = [],
- default_op = keep_tables
+ default_op = keep_tables
}).
-type fallback_args() :: #fallback_args{}.
@@ -134,7 +134,7 @@ abort_restore(R, What, Args, Reason) ->
[Mod, What, Args, Reason]),
catch apply(Mod, close_read, [Opaque]),
throw({error, Reason}).
-
+
fallback_to_schema() ->
Fname = fallback_bup(),
fallback_to_schema(Fname).
@@ -146,14 +146,14 @@ fallback_to_schema(Fname) ->
{error, Reason};
Schema ->
case catch lookup_schema(schema, Schema) of
- {error, _} ->
+ {error, _} ->
{error, "No schema in fallback"};
List ->
{ok, fallback, List}
end
end.
-%% Opens Opaque reads schema and then close
+%% Opens Opaque reads schema and then close
read_schema(Mod, Opaque) ->
R = #restore{bup_module = Mod, bup_data = Opaque},
case catch read_schema_section(R) of
@@ -163,7 +163,7 @@ read_schema(Mod, Opaque) ->
catch safe_apply(R2, close_read, [R2#restore.bup_data]),
Schema
end.
-
+
%% Open backup media and extract schema
%% rewind backup media and leave it open
%% Returns {R, {Header, Schema}}
@@ -227,7 +227,7 @@ refresh_cookie(Schema, NewCookie) ->
Cs2 = Cs#cstruct{cookie = NewCookie},
Item = {schema, schema, mnesia_schema:cs2list(Cs2)},
lists:keyreplace(schema, 2, Schema, Item);
-
+
false ->
Reason = "No schema found. Cannot be used as backup.",
throw({error, {Reason, Schema}})
@@ -273,7 +273,7 @@ convert_0_1([{schema, db_nodes, DbNodes} | Schema], Acc, Cs) ->
convert_0_1([{schema, version, Version} | Schema], Acc, Cs) ->
convert_0_1(Schema, Acc, Cs#cstruct{version = Version});
convert_0_1([{schema, Tab, Def} | Schema], Acc, Cs) ->
- Head =
+ Head =
case lists:keysearch(index, 1, Def) of
{value, {index, PosList}} ->
%% Remove the snmp "index"
@@ -334,7 +334,7 @@ create_schema(Ns, ok) ->
case mnesia_lib:ensure_loaded(?APPLICATION) of
ok ->
case mnesia_monitor:get_env(schema_location) of
- ram ->
+ ram ->
{error, {has_no_disc, node()}};
_ ->
case mnesia_schema:opt_create_dir(true, mnesia_lib:dir()) of
@@ -358,7 +358,7 @@ create_schema(Ns, ok) ->
{error, Reason}
end
end
- end;
+ end;
{error, Reason} ->
{error, Reason}
end;
@@ -434,7 +434,7 @@ check_fallback_args([Arg | Tail], FA) ->
check_fallback_args([], FA) ->
{ok, FA}.
-check_fallback_arg_type(Arg, FA) ->
+check_fallback_arg_type(Arg, FA) ->
case Arg of
{scope, global} ->
FA#fallback_args{scope = global};
@@ -462,10 +462,10 @@ atom_list([H | T]) when is_atom(H) ->
atom_list(T);
atom_list([]) ->
ok.
-
+
do_install_fallback(FA) ->
Pid = spawn_link(?MODULE, install_fallback_master, [self(), FA]),
- Res =
+ Res =
receive
{'EXIT', Pid, Reason} -> % if appl has trapped exit
{error, {'EXIT', Reason}};
@@ -506,7 +506,7 @@ restore_recs(Recs, Header, Schema, {start, FA}) ->
Pids = [spawn_link(N, ?MODULE, fallback_receiver, Args) || N <- Ns],
send_fallback(Pids, {start, Header, Schema2}),
Res = restore_recs(Recs, Header, Schema2, Pids),
- global:del_lock({{mnesia_table_lock, schema}, self()}, Ns),
+ global:del_lock({{mnesia_table_lock, schema}, self()}, Ns),
Res
end;
@@ -578,7 +578,7 @@ fallback_tmp_name() -> "FALLBACK.TMP".
-spec fallback_receiver(pid(), fallback_args()) -> no_return().
fallback_receiver(Master, FA) ->
process_flag(trap_exit, true),
-
+
case catch register(mnesia_fallback, self()) of
{'EXIT', _} ->
Reason = {already_exists, node()},
@@ -610,7 +610,7 @@ local_fallback_error(Master, Reason) ->
Master ! {self(), {error, Reason}},
unlink(Master),
exit(Reason).
-
+
check_fallback_dir(Master, FA) ->
case mnesia:system_info(schema_location) of
ram ->
@@ -659,7 +659,7 @@ fallback_receiver_loop(Master, R, FA, State) ->
R2 = safe_apply(R, write, [R#restore.bup_data, Recs]),
Master ! {self(), ok},
fallback_receiver_loop(Master, R2, FA, records);
-
+
{Master, swap} when State =/= schema ->
?eval_debug_fun({?MODULE, fallback_receiver_loop, pre_swap}, []),
safe_apply(R, commit_write, [R#restore.bup_data]),
@@ -834,7 +834,7 @@ create_dat_files([{schema, Tab, TabDef} | Tail], LocalTabs) ->
ok = dets:delete(schema, {schema, Tab}),
create_dat_files(Tail, LocalTabs);
Storage =:= disc_only_copies ->
- Args = [{file, TmpFile}, {keypos, 2},
+ Args = [{file, TmpFile}, {keypos, 2},
{type, mnesia_lib:disk_type(Tab, Cs#cstruct.type)}],
Open = fun(T, LT) when T =:= LT#local_tab.name ->
case mnesia_lib:dets_sync_open(T, Args) of
@@ -861,9 +861,9 @@ create_dat_files([{schema, Tab, TabDef} | Tail], LocalTabs) ->
Swap = fun(T, LT) when T =:= LT#local_tab.name ->
Expunge(),
case LT#local_tab.opened of
- true ->
+ true ->
Close(T,LT);
- false ->
+ false ->
Open(T,LT),
Close(T,LT)
end,
@@ -887,8 +887,8 @@ create_dat_files([{schema, Tab, TabDef} | Tail], LocalTabs) ->
create_dat_files(Tail, LocalTabs);
Storage =:= ram_copies; Storage =:= disc_copies ->
Open = fun(T, LT) when T =:= LT#local_tab.name ->
- mnesia_log:open_log({?MODULE, T},
- mnesia_log:dcl_log_header(),
+ mnesia_log:open_log({?MODULE, T},
+ mnesia_log:dcl_log_header(),
TmpFile,
false,
false,
@@ -917,7 +917,7 @@ create_dat_files([{schema, Tab, TabDef} | Tail], LocalTabs) ->
true ->
Log = mnesia_log:open_log(fallback_tab,
mnesia_log:dcd_log_header(),
- DcdFile,
+ DcdFile,
false),
mnesia_log:close_log(Log),
case LT#local_tab.opened of
@@ -926,7 +926,7 @@ create_dat_files([{schema, Tab, TabDef} | Tail], LocalTabs) ->
false ->
Open(T,LT),
Close(T,LT)
- end,
+ end,
case file:rename(TmpFile, DclFile) of
ok ->
ok;
@@ -959,7 +959,7 @@ create_dat_files([], _LocalTabs) ->
ok.
uninstall_fallback() ->
- uninstall_fallback([{scope, global}]).
+ uninstall_fallback([{scope, global}]).
uninstall_fallback(Args) ->
case check_fallback_args(Args, #fallback_args{}) of
@@ -969,7 +969,7 @@ uninstall_fallback(Args) ->
{error, Reason}
end.
-do_uninstall_fallback(FA) ->
+do_uninstall_fallback(FA) ->
%% Ensure that we access the intended Mnesia
%% directory. This function may not be called
%% during startup since it will cause the
@@ -1040,11 +1040,11 @@ do_uninstall(_ClientPid, [], GoodPids, BadNodes, BadRes) ->
local_uninstall_fallback(Master, FA) ->
%% Don't trap exit
-
+
register(mnesia_fallback, self()), % May exit
FA2 = check_fallback_dir(Master, FA), % May exit
Master ! {self(), started},
-
+
receive
{Master, do_uninstall} ->
?eval_debug_fun({?MODULE, uninstall_fallback2, pre_delete}, []),
@@ -1052,7 +1052,7 @@ local_uninstall_fallback(Master, FA) ->
Tmp = FA2#fallback_args.fallback_tmp,
Bup = FA2#fallback_args.fallback_bup,
file:delete(Tmp),
- Res =
+ Res =
case fallback_exists(Bup) of
true -> file:delete(Bup);
false -> ok
@@ -1079,7 +1079,7 @@ rec_uninstall(ClientPid, [], Res) ->
ClientPid ! {self(), Res},
unlink(ClientPid),
exit(normal).
-
+
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%% Backup traversal
@@ -1130,7 +1130,7 @@ do_traverse_backup(ClientPid, Source, SourceMod, Target, TargetMod, Fun, Acc) ->
if
TargetMod =/= read_only ->
case catch do_apply(TargetMod, open_write, [Target], Target) of
- {error, Error} ->
+ {error, Error} ->
unlink(ClientPid),
ClientPid ! {iter_done, self(), {error, Error}},
exit(Error);
@@ -1140,15 +1140,15 @@ do_traverse_backup(ClientPid, Source, SourceMod, Target, TargetMod, Fun, Acc) ->
ignore
end,
A = {start, Fun, Acc, TargetMod, Iter},
- Res =
+ Res =
case iterate(SourceMod, fun trav_apply/4, Source, A) of
{ok, {iter, _, Acc2, _, Iter2}} when TargetMod =/= read_only ->
case catch do_apply(TargetMod, commit_write, [Iter2], Iter2) of
- {error, Reason} ->
+ {error, Reason} ->
{error, Reason};
- _ ->
+ _ ->
{ok, Acc2}
- end;
+ end;
{ok, {iter, _, Acc2, _, _}} ->
{ok, Acc2};
{error, Reason} when TargetMod =/= read_only->
diff --git a/lib/mnesia/src/mnesia_controller.erl b/lib/mnesia/src/mnesia_controller.erl
index 6a561394d5..e8232278a2 100644
--- a/lib/mnesia/src/mnesia_controller.erl
+++ b/lib/mnesia/src/mnesia_controller.erl
@@ -107,14 +107,14 @@
-include("mnesia.hrl").
--define(SERVER_NAME, ?MODULE).
+-define(SERVER_NAME, ?MODULE).
-record(state, {supervisor,
schema_is_merged = false,
early_msgs = [],
- loader_pid = [], %% Was Pid is now [{Pid,Work}|..]
+ loader_pid = [], %% Was Pid is now [{Pid,Work}|..]
loader_queue, %% Was list is now gb_tree
- sender_pid = [], %% Was a pid or undef is now [{Pid,Work}|..]
+ sender_pid = [], %% Was a pid or undef is now [{Pid,Work}|..]
sender_queue = [],
late_loader_queue, %% Was list is now gb_tree
dumper_pid, %% Dumper or schema commit pid
@@ -124,12 +124,12 @@
is_stopping = false
}).
%% Backwards Comp. Sender_pid is now a list of senders..
-get_senders(#state{sender_pid = Pids}) when is_list(Pids) -> Pids.
+get_senders(#state{sender_pid = Pids}) when is_list(Pids) -> Pids.
%% Backwards Comp. loader_pid is now a list of loaders..
-get_loaders(#state{loader_pid = Pids}) when is_list(Pids) -> Pids.
+get_loaders(#state{loader_pid = Pids}) when is_list(Pids) -> Pids.
max_loaders() ->
case ?catch_val(no_table_loaders) of
- {'EXIT', _} ->
+ {'EXIT', _} ->
mnesia_lib:set(no_table_loaders,1),
1;
Val -> Val
@@ -153,7 +153,7 @@ max_loaders() ->
remote_storage
}).
--record(disc_load, {table,
+-record(disc_load, {table,
reason,
opt_reply_to
}).
@@ -184,7 +184,7 @@ max_loaders() ->
val(Var) ->
case ?catch_val(Var) of
- {'EXIT', Reason} -> mnesia_lib:other_val(Var, Reason);
+ {'EXIT', Reason} -> mnesia_lib:other_val(Var, Reason);
Value -> Value
end.
@@ -199,7 +199,7 @@ sync_dump_log(InitBy) ->
async_dump_log(InitBy) ->
?SERVER_NAME ! {async_dump_log, InitBy}.
-
+
%% Wait for tables to be active
%% If needed, we will wait for Mnesia to start
%% If Mnesia stops, we will wait for Mnesia to restart
@@ -227,7 +227,7 @@ do_wait_for_tables(Tabs, Timeout) ->
exit(Pid, timeout),
reply_wait(Tabs)
end.
-
+
reply_wait(Tabs) ->
case catch mnesia_lib:active_tables() of
{'EXIT', _} ->
@@ -270,7 +270,7 @@ rec_tabs([Tab | Tabs], AllTabs, From, Init) ->
%% This will trigger an exit signal
%% to mnesia_init
exit(wait_for_tables_timeout);
-
+
{'EXIT', Init, _} ->
%% Oops, mnesia_init stopped,
exit(mnesia_stopped)
@@ -319,7 +319,7 @@ get_network_copy(Tab, Cs) ->
% We can't let the controller queue this one
% because that may cause a deadlock between schema_operations
% and initial tableloadings which both takes schema locks.
-% But we have to get copier_done msgs when the other side
+% But we have to get copier_done msgs when the other side
% goes down.
call({add_other, self()}),
Reason = {dumper,add_table_copy},
@@ -341,14 +341,14 @@ get_network_copy(Tab, Cs) ->
ignore
end,
Res#loader_done.reply;
- #loader_done{} ->
+ #loader_done{} ->
Res#loader_done.reply;
Else ->
{not_loaded, Else}
end.
%% This functions is invoked from the dumper
-%%
+%%
%% There are two cases here:
%% startup ->
%% no need for sync, since mnesia_controller not started yet
@@ -380,11 +380,11 @@ force_load_table(Tab) when is_atom(Tab), Tab /= schema ->
end;
force_load_table(Tab) ->
{error, {bad_type, Tab}}.
-
+
do_force_load_table(Tab) ->
Loaded = ?catch_val({Tab, load_reason}),
case Loaded of
- unknown ->
+ unknown ->
set({Tab, load_by_force}, true),
mnesia_late_loader:async_late_disc_load(node(), [Tab], forced_by_user),
wait_for_tables([Tab], infinity);
@@ -394,7 +394,7 @@ do_force_load_table(Tab) ->
wait_for_tables([Tab], infinity);
_ ->
ok
- end.
+ end.
master_nodes_updated(schema, _Masters) ->
ignore;
master_nodes_updated(Tab, Masters) ->
@@ -438,15 +438,15 @@ connect_nodes(Ns) ->
connect_nodes(Ns, fun default_merge/1).
connect_nodes(Ns, UserFun) ->
- case mnesia:system_info(is_running) of
+ case mnesia:system_info(is_running) of
no ->
{error, {node_not_running, node()}};
- yes ->
+ yes ->
Pid = spawn_link(?MODULE,connect_nodes2,[self(),Ns, UserFun]),
- receive
- {?MODULE, Pid, Res, New} ->
+ receive
+ {?MODULE, Pid, Res, New} ->
case Res of
- ok ->
+ ok ->
mnesia_lib:add_list(extra_db_nodes, New),
{ok, New};
{aborted, {throw, Str}} when is_list(Str) ->
@@ -454,8 +454,8 @@ connect_nodes(Ns, UserFun) ->
{error, {merge_schema_failed, lists:flatten(Str)}};
Else ->
{error, Else}
- end;
- {'EXIT', Pid, Reason} ->
+ end;
+ {'EXIT', Pid, Reason} ->
{error, Reason}
end
end.
@@ -466,16 +466,16 @@ connect_nodes2(Father, Ns, UserFun) ->
{NewC, OldC} = mnesia_recover:connect_nodes(Ns),
Connected = NewC ++OldC,
New1 = mnesia_lib:intersect(Ns, Connected),
- New = New1 -- Current,
+ New = New1 -- Current,
process_flag(trap_exit, true),
Res = try_merge_schema(New, [], UserFun),
Msg = {schema_is_merged, [], late_merge, []},
multicall([node()|Ns], Msg),
- After = val({current, db_nodes}),
+ After = val({current, db_nodes}),
Father ! {?MODULE, self(), Res, mnesia_lib:intersect(Ns,After)},
unlink(Father),
ok.
-
+
%% Merge the local schema with the schema on other nodes.
%% But first we must let all processes that want to force
%% load tables wait until the schema merge is done.
@@ -483,7 +483,7 @@ connect_nodes2(Father, Ns, UserFun) ->
merge_schema() ->
AllNodes = mnesia_lib:all_nodes(),
case try_merge_schema(AllNodes, [node()], fun default_merge/1) of
- ok ->
+ ok ->
schema_is_merged();
{aborted, {throw, Str}} when is_list(Str) ->
fatal("Failed to merge schema: ~s~n", [Str]);
@@ -535,7 +535,7 @@ im_running(OldFriends, NewFriends) ->
schema_is_merged() ->
MsgTag = schema_is_merged,
SafeLoads = initial_safe_loads(),
-
+
%% At this point we do not know anything about
%% which tables that the other nodes already
%% has loaded and therefore we let the normal
@@ -545,7 +545,7 @@ schema_is_merged() ->
%% that all nodes tells each other directly
%% when they have loaded a table and are
%% willing to share it.
-
+
try_schedule_late_disc_load(SafeLoads, initial, MsgTag).
@@ -589,7 +589,7 @@ remote_call(Node, Func, Args) ->
Else ->
Else
end.
-
+
multicall(Nodes, Msg) ->
{Good, Bad} = gen_server:multi_call(Nodes, ?MODULE, Msg, infinity),
PatchedGood = [Reply || {_Node, Reply} <- Good],
@@ -621,9 +621,9 @@ init([Parent]) ->
Msg = {async_dump_log, time_threshold},
{ok, Ref} = timer:send_interval(Interval, Msg),
mnesia_dumper:start_regulator(),
-
+
Empty = gb_trees:empty(),
- {ok, #state{supervisor = Parent, dump_log_timer_ref = Ref,
+ {ok, #state{supervisor = Parent, dump_log_timer_ref = Ref,
loader_queue = Empty,
late_loader_queue = Empty}}.
@@ -656,17 +656,17 @@ handle_call(block_controller, From, State) ->
handle_call({update,Fun}, From, State) ->
Res = (catch Fun()),
- reply(From, Res),
+ reply(From, Res),
noreply(State);
handle_call(get_cstructs, From, State) ->
Tabs = val({schema, tables}),
Cstructs = [val({T, cstruct}) || T <- Tabs],
Running = val({current, db_nodes}),
- reply(From, {cstructs, Cstructs, Running}),
+ reply(From, {cstructs, Cstructs, Running}),
noreply(State);
-handle_call({schema_is_merged, [], late_merge, []}, From,
+handle_call({schema_is_merged, [], late_merge, []}, From,
State = #state{schema_is_merged = Merged}) ->
case Merged of
{false, Node} when Node == node(From) ->
@@ -697,8 +697,8 @@ handle_call(disc_load_intents,From,State = #state{loader_queue=LQ,late_loader_qu
handle_call({update_where_to_write, [add, Tab, AddNode], _From}, _Dummy, State) ->
Current = val({current, db_nodes}),
- Res =
- case lists:member(AddNode, Current) and
+ Res =
+ case lists:member(AddNode, Current) and
(State#state.schema_is_merged == true) of
true ->
mnesia_lib:add_lsort({Tab, where_to_write}, AddNode),
@@ -732,7 +732,7 @@ handle_call({add_active_replica, [Tab, ToNode, RemoteS, AccessMode], From},
noreply(State#state{early_msgs = [{call, Msg, undefined} | Msgs]})
end;
-handle_call({unannounce_add_table_copy, [Tab, Node], From}, ReplyTo, State) ->
+handle_call({unannounce_add_table_copy, [Tab, Node], From}, ReplyTo, State) ->
KnownNode = lists:member(node(From), val({current, db_nodes})),
Merged = State#state.schema_is_merged,
if
@@ -752,16 +752,16 @@ handle_call({unannounce_add_table_copy, [Tab, Node], From}, ReplyTo, State) ->
end;
handle_call({net_load, Tab, Cs}, From, State) ->
- State2 =
+ State2 =
case State#state.schema_is_merged of
- true ->
+ true ->
Worker = #net_load{table = Tab,
opt_reply_to = From,
reason = {dumper,add_table_copy},
cstruct = Cs
},
add_worker(Worker, State);
- false ->
+ false ->
reply(From, {not_loaded, schema_not_merged}),
State
end,
@@ -804,16 +804,16 @@ handle_call({add_other, Who}, _From, State = #state{others=Others0}) ->
handle_call({del_other, Who}, _From, State = #state{others=Others0}) ->
Others = lists:delete(Who, Others0),
{reply, ok, State#state{others=Others}};
-
+
handle_call(Msg, _From, State) ->
error("~p got unexpected call: ~p~n", [?SERVER_NAME, Msg]),
noreply(State).
-late_disc_load(TabsR, Reason, RemoteLoaders, From,
+late_disc_load(TabsR, Reason, RemoteLoaders, From,
State = #state{loader_queue = LQ, late_loader_queue = LLQ}) ->
verbose("Intend to load tables: ~p~n", [TabsR]),
?eval_debug_fun({?MODULE, late_disc_load},
- [{tabs, TabsR},
+ [{tabs, TabsR},
{reason, Reason},
{loaders, RemoteLoaders}]),
@@ -822,14 +822,14 @@ late_disc_load(TabsR, Reason, RemoteLoaders, From,
%% Remove deleted tabs and queued/loaded
LocalTabs = gb_sets:from_ordset(lists:sort(mnesia_lib:val({schema,local_tables}))),
- Filter = fun(TabInfo0, Acc) ->
- TabInfo = {Tab,_} =
- case TabInfo0 of
+ Filter = fun(TabInfo0, Acc) ->
+ TabInfo = {Tab,_} =
+ case TabInfo0 of
{_,_} -> TabInfo0;
TabN -> {TabN,Reason}
end,
case gb_sets:is_member(Tab, LocalTabs) of
- true ->
+ true ->
case ?catch_val({Tab, where_to_read}) == node() of
true -> Acc;
false ->
@@ -841,12 +841,12 @@ late_disc_load(TabsR, Reason, RemoteLoaders, From,
false -> Acc
end
end,
-
+
Tabs = lists:foldl(Filter, [], TabsR),
-
+
Nodes = val({current, db_nodes}),
LateQueue = late_loaders(Tabs, RemoteLoaders, Nodes, LLQ),
- State#state{late_loader_queue = LateQueue}.
+ State#state{late_loader_queue = LateQueue}.
late_loaders([{Tab, Reason} | Tabs], RemoteLoaders, Nodes, LLQ) ->
case gb_trees:is_defined(Tab, LLQ) of
@@ -859,7 +859,7 @@ late_loaders([{Tab, Reason} | Tabs], RemoteLoaders, Nodes, LLQ) ->
LateLoad = #late_load{table=Tab,loaders=LoadNodes,reason=Reason},
late_loaders(Tabs, RemoteLoaders, Nodes, gb_trees:insert(Tab,LateLoad,LLQ));
true ->
- late_loaders(Tabs, RemoteLoaders, Nodes, LLQ)
+ late_loaders(Tabs, RemoteLoaders, Nodes, LLQ)
end;
late_loaders([], _RemoteLoaders, _Nodes, LLQ) ->
LLQ.
@@ -899,7 +899,7 @@ late_load_filter([RL | RemoteLoaders], Tab, Nodes, Acc) ->
end;
late_load_filter([], _Tab, _Nodes, Acc) ->
Acc.
-
+
%%----------------------------------------------------------------------
%% Func: handle_cast/2
%% Returns: {noreply, State} |
@@ -911,7 +911,7 @@ handle_cast({release_schema_commit_lock, _Owner}, State) ->
if
State#state.is_stopping == true ->
{stop, shutdown, State};
- true ->
+ true ->
case State#state.dumper_queue of
[#schema_commit_lock{}|Rest] ->
[_Worker | Rest] = State#state.dumper_queue,
@@ -932,7 +932,7 @@ handle_cast(unblock_controller, State) ->
[_Worker | Rest] = State#state.dumper_queue,
State2 = State#state{dumper_pid = undefined,
dumper_queue = Rest},
- State3 = opt_start_worker(State2),
+ State3 = opt_start_worker(State2),
noreply(State3)
end;
@@ -948,31 +948,31 @@ handle_cast({mnesia_down, Node}, State) ->
%% Fix if we are late_merging against the node that went down
case State#state.schema_is_merged of
- {false, Node} ->
+ {false, Node} ->
spawn(?MODULE, call, [{schema_is_merged, [], late_merge, []}]);
_ ->
ignore
end,
-
+
%% Fix internal stuff
LateQ = remove_loaders(Alltabs, Node, State#state.late_loader_queue),
-
+
case get_senders(State) ++ get_loaders(State) of
[] -> ignore;
- Senders ->
+ Senders ->
lists:foreach(fun({Pid,_}) -> Pid ! {copier_done, Node} end,
Senders)
end,
- lists:foreach(fun(Pid) -> Pid ! {copier_done,Node} end,
+ lists:foreach(fun(Pid) -> Pid ! {copier_done,Node} end,
State#state.others),
-
+
Remove = fun(ST) ->
node(ST#send_table.receiver_pid) /= Node
end,
NewSenders = lists:filter(Remove, State#state.sender_queue),
Early = remove_early_messages(State#state.early_msgs, Node),
- noreply(State#state{sender_queue = NewSenders,
- early_msgs = Early,
+ noreply(State#state{sender_queue = NewSenders,
+ early_msgs = Early,
late_loader_queue = LateQ
});
@@ -981,8 +981,8 @@ handle_cast({merging_schema, Node}, State) ->
false ->
%% This comes from dynamic connect_nodes which are made
%% after mnesia:start() and the schema_merge.
- ImANewKidInTheBlock =
- (val({schema, storage_type}) == ram_copies)
+ ImANewKidInTheBlock =
+ (val({schema, storage_type}) == ram_copies)
andalso (mnesia_lib:val({schema, local_tables}) == [schema]),
case ImANewKidInTheBlock of
true -> %% I'm newly started ram_node..
@@ -1000,7 +1000,7 @@ handle_cast(Msg, State) when State#state.schema_is_merged /= true ->
noreply(State#state{early_msgs = [{cast, Msg} | Msgs]});
%% This must be done after schema_is_merged otherwise adopt_orphan
-%% might trigger a table load from wrong nodes as a result of that we don't
+%% might trigger a table load from wrong nodes as a result of that we don't
%% know which tables we can load safly first.
handle_cast({im_running, Node, NewFriends}, State) ->
LocalTabs = mnesia_lib:local_active_tables() -- [schema],
@@ -1027,7 +1027,7 @@ handle_cast({sync_tabs, Tabs, From}, State) ->
handle_cast({i_have_tab, Tab, Node}, State) ->
case lists:member(Node, val({current, db_nodes})) of
- true ->
+ true ->
State2 = node_has_tabs([Tab], Node, State),
noreply(State2);
false ->
@@ -1043,10 +1043,10 @@ handle_cast({force_load_updated, Tab}, State) ->
State2 = node_has_tabs([Tab], SomeNode, State),
noreply(State2)
end;
-
+
handle_cast({master_nodes_updated, Tab, Masters}, State) ->
Active = val({Tab, active_replicas}),
- Valid =
+ Valid =
case val({Tab, load_by_force}) of
true ->
Active;
@@ -1066,10 +1066,10 @@ handle_cast({master_nodes_updated, Tab, Masters}, State) ->
State2 = node_has_tabs([Tab], SomeNode, State),
noreply(State2)
end;
-
+
handle_cast({adopt_orphans, Node, Tabs}, State) ->
State2 = node_has_tabs(Tabs, Node, State),
-
+
case ?catch_val({node_up,Node}) of
true -> ignore;
_ ->
@@ -1101,7 +1101,7 @@ handle_cast(Msg, State) ->
error("~p got unexpected cast: ~p~n", [?SERVER_NAME, Msg]),
noreply(State).
-handle_sync_tabs([Tab | Tabs], From) ->
+handle_sync_tabs([Tab | Tabs], From) ->
case val({Tab, where_to_read}) of
nowhere ->
case get({sync_tab, Tab}) of
@@ -1145,7 +1145,7 @@ handle_info(#dumper_done{worker_pid=Pid, worker_res=Res}, State) ->
{stop, fatal, State}
end;
-handle_info(Done = #loader_done{worker_pid=WPid, table_name=Tab}, State0) ->
+handle_info(Done = #loader_done{worker_pid=WPid, table_name=Tab}, State0) ->
LateQueue0 = State0#state.late_loader_queue,
State1 = State0#state{loader_pid = lists:keydelete(WPid,1,get_loaders(State0))},
@@ -1153,7 +1153,7 @@ handle_info(Done = #loader_done{worker_pid=WPid, table_name=Tab}, State0) ->
case Done#loader_done.is_loaded of
true ->
%% Optional table announcement
- if
+ if
Done#loader_done.needs_announce == true,
Done#loader_done.needs_reply == true ->
i_have_tab(Tab),
@@ -1187,7 +1187,7 @@ handle_info(Done = #loader_done{worker_pid=WPid, table_name=Tab}, State0) ->
State1#state{late_loader_queue=gb_trees:delete_any(Tab, LateQueue0)};
false ->
%% Either the node went down or table was not
- %% loaded remotly yet
+ %% loaded remotly yet
case Done#loader_done.needs_reply of
true ->
reply(Done#loader_done.reply_to,
@@ -1210,7 +1210,7 @@ handle_info(#sender_done{worker_pid=Pid, worker_res=Res}, State) ->
Senders = get_senders(State),
{value, {Pid,_Worker}} = lists:keysearch(Pid, 1, Senders),
if
- Res == ok ->
+ Res == ok ->
State2 = State#state{sender_pid = lists:keydelete(Pid, 1, Senders)},
State3 = opt_start_worker(State2),
noreply(State3);
@@ -1252,7 +1252,7 @@ handle_info(Msg = {'EXIT', Pid, R}, State) when R /= wait_for_tables_timeout ->
{stop, fatal, State};
false ->
case lists:keymember(Pid, 1, get_loaders(State)) of
- true ->
+ true ->
fatal("Loader crashed: ~p~n state: ~p~n", [R, State]),
{stop, fatal, State};
false ->
@@ -1338,7 +1338,7 @@ code_change(_OldVsn, State0, _Extra) ->
State1 = case State0#state.loader_pid of
Pids when is_list(Pids) -> State0;
undefined -> State0#state{loader_pid = [],loader_queue=gb_trees:empty()};
- Pid when is_pid(Pid) ->
+ Pid when is_pid(Pid) ->
[Loader|Rest] = State0#state.loader_queue,
LQ0 = [{element(2,Rec),Rec} || Rec <- Rest],
LQ1 = lists:sort(LQ0),
@@ -1346,7 +1346,7 @@ code_change(_OldVsn, State0, _Extra) ->
State0#state{loader_pid=[{Pid,Loader}], loader_queue=LQ}
end,
%% LateLoaderQueue
- State = if is_list(State1#state.late_loader_queue) ->
+ State = if is_list(State1#state.late_loader_queue) ->
LLQ0 = State1#state.late_loader_queue,
LLQ1 = lists:sort([{element(2,Rec),Rec} || Rec <- LLQ0]),
LLQ = gb_trees:from_orddict(LLQ1),
@@ -1355,7 +1355,7 @@ code_change(_OldVsn, State0, _Extra) ->
State1
end,
{ok, State}.
-
+
%%%----------------------------------------------------------------------
%%% Internal functions
%%%----------------------------------------------------------------------
@@ -1365,20 +1365,20 @@ maybe_log_mnesia_down(N) ->
%% so if we are not running (i.e haven't decided which tables
%% to load locally), don't log mnesia_down yet.
case mnesia_lib:is_running() of
- yes ->
+ yes ->
verbose("Logging mnesia_down ~w~n", [N]),
mnesia_recover:log_mnesia_down(N),
ok;
- _ ->
+ _ ->
Filter = fun(Tab) ->
inactive_copy_holders(Tab, N)
end,
HalfLoadedTabs = lists:any(Filter, val({schema, local_tables}) -- [schema]),
- if
+ if
HalfLoadedTabs == true ->
verbose("Logging mnesia_down ~w~n", [N]),
mnesia_recover:log_mnesia_down(N),
- ok;
+ ok;
true ->
%% Unfortunately we have not loaded some common
%% tables yet, so we cannot rely on the nodedown
@@ -1407,7 +1407,7 @@ orphan_tables([Tab | Tabs], Node, Ns, Local, Remote) ->
BeingCreated = (?catch_val({Tab, create_table}) == true),
Read = val({Tab, where_to_read}),
case lists:member(Node, DiscCopyHolders) of
- _ when BeingCreated == true ->
+ _ when BeingCreated == true ->
orphan_tables(Tabs, Node, Ns, Local, Remote);
_ when Read == node() -> %% Allready loaded
orphan_tables(Tabs, Node, Ns, Local, Remote);
@@ -1445,13 +1445,13 @@ orphan_tables([], _, _, LocalOrphans, RemoteMasters) ->
{LocalOrphans, RemoteMasters}.
node_has_tabs([Tab | Tabs], Node, State) when Node /= node() ->
- State2 =
+ State2 =
case catch update_whereabouts(Tab, Node, State) of
State1 = #state{} -> State1;
{'EXIT', R} -> %% Tab was just deleted?
case ?catch_val({Tab, cstruct}) of
{'EXIT', _} -> State; % yes
- _ -> erlang:error(R)
+ _ -> erlang:error(R)
end
end,
node_has_tabs(Tabs, Node, State2);
@@ -1477,14 +1477,14 @@ update_whereabouts(Tab, Node, State) ->
true ->
lists:member(Node, Masters)
end,
-
+
dbg_out("Table ~w is loaded on ~w. s=~w, r=~w, lc=~w, f=~w, m=~w~n",
[Tab, Node, Storage, Read, LocalC, ByForce, GoGetIt]),
if
LocalC == true ->
%% Local contents, don't care about other node
State;
- BeingCreated == true ->
+ BeingCreated == true ->
%% The table is currently being created
%% It will be handled elsewhere
State;
@@ -1501,8 +1501,8 @@ update_whereabouts(Tab, Node, State) ->
State
end;
Storage == unknown ->
- %% No own copy, continue to read remotely
- add_active_replica(Tab, Node),
+ %% No own copy, continue to read remotely
+ add_active_replica(Tab, Node),
NodeST = mnesia_lib:storage_type_at_node(Node, Tab),
ReadST = mnesia_lib:storage_type_at_node(Read, Tab),
if %% Avoid reading from disc_only_copies
@@ -1542,16 +1542,16 @@ initial_safe_loads() ->
Tabs = val({schema, local_tables}) -- [schema],
LastC = fun(T) -> last_consistent_replica(T, Downs) end,
lists:zf(LastC, Tabs);
-
+
disc_copies ->
Downs = mnesia_recover:get_mnesia_downs(),
dbg_out("mnesia_downs = ~p~n", [Downs]),
-
+
Tabs = val({schema, local_tables}) -- [schema],
LastC = fun(T) -> last_consistent_replica(T, Downs) end,
lists:zf(LastC, Tabs)
end.
-
+
last_consistent_replica(Tab, Downs) ->
Cs = val({Tab, cstruct}),
Storage = mnesia_lib:cs_to_storage_type(node(), Cs),
@@ -1628,7 +1628,7 @@ remove_early_messages([], _Node) ->
[];
remove_early_messages([{call, {add_active_replica, [_, Node, _, _], _}, _}|R], Node) ->
remove_early_messages(R, Node); %% Does a reply before queuing
-remove_early_messages([{call, {block_table, _, From}, ReplyTo}|R], Node)
+remove_early_messages([{call, {block_table, _, From}, ReplyTo}|R], Node)
when node(From) == Node ->
reply(ReplyTo, ok), %% Remove gen:server waits..
remove_early_messages(R, Node);
@@ -1682,9 +1682,9 @@ is_tab_blocked(W2C) when is_list(W2C) ->
is_tab_blocked({blocked, W2C}) when is_list(W2C) ->
{true, W2C}.
-mark_blocked_tab(true, Value) ->
+mark_blocked_tab(true, Value) ->
{blocked, Value};
-mark_blocked_tab(false, Value) ->
+mark_blocked_tab(false, Value) ->
Value.
%%
@@ -1717,7 +1717,7 @@ del_active_replica(Tab, Node) ->
update_where_to_wlock(Tab).
change_table_access_mode(Cs) ->
- W = fun() ->
+ W = fun() ->
Tab = Cs#cstruct.name,
lists:foreach(fun(N) -> add_active_replica(Tab, N, Cs) end,
val({Tab, active_replicas}))
@@ -1746,7 +1746,7 @@ update_where_to_wlock(Tab) ->
unannounce_add_table_copy(Tab, To) ->
catch del_active_replica(Tab, To),
case catch val({Tab , where_to_read}) of
- To ->
+ To ->
mnesia_lib:set_remote_where_to_read(Tab);
_ ->
ignore
@@ -1759,7 +1759,7 @@ user_sync_tab(Tab) ->
_ ->
ignore
end,
-
+
case erase({sync_tab, Tab}) of
undefined ->
ok;
@@ -1778,11 +1778,11 @@ i_have_tab(Tab) ->
sync_and_block_table_whereabouts(Tab, ToNode, RemoteS, AccessMode) when Tab /= schema ->
Current = val({current, db_nodes}),
- Ns =
+ Ns =
case lists:member(ToNode, Current) of
true -> Current -- [ToNode];
false -> Current
- end,
+ end,
remote_call(ToNode, block_table, [Tab]),
[remote_call(Node, add_active_replica, [Tab, ToNode, RemoteS, AccessMode]) ||
Node <- [ToNode | Ns]],
@@ -1827,7 +1827,7 @@ get_workers(Timeout) ->
{timeout, Timeout}
end
end.
-
+
info() ->
Tabs = mnesia_lib:local_active_tables(),
io:format( "---> Active tables <--- ~n", []),
@@ -1836,12 +1836,12 @@ info() ->
info([Tab | Tail]) ->
case val({Tab, storage_type}) of
disc_only_copies ->
- info_format(Tab,
- dets:info(Tab, size),
+ info_format(Tab,
+ dets:info(Tab, size),
dets:info(Tab, file_size),
"bytes on disc");
_ ->
- info_format(Tab,
+ info_format(Tab,
?ets_info(Tab, size),
?ets_info(Tab, memory),
"words of mem")
@@ -1881,7 +1881,7 @@ handle_early_msg({cast, Msg}, State) ->
handle_cast(Msg, State);
handle_early_msg({info, Msg}, State) ->
handle_info(Msg, State).
-
+
noreply(State) ->
{noreply, State}.
@@ -1929,7 +1929,7 @@ add_worker(Worker = #send_table{}, State) ->
add_worker(Worker = #disc_load{}, State) ->
opt_start_worker(add_loader(Worker#disc_load.table,Worker,State));
% Block controller should be used for upgrading mnesia.
-add_worker(Worker = #block_controller{}, State) ->
+add_worker(Worker = #block_controller{}, State) ->
Queue = State#state.dumper_queue,
Queue2 = [Worker | Queue],
State2 = State#state{dumper_queue = Queue2},
@@ -1938,13 +1938,13 @@ add_worker(Worker = #block_controller{}, State) ->
add_loader(Tab,Worker,State = #state{loader_queue=LQ0}) ->
case gb_trees:is_defined(Tab, LQ0) of
true -> State;
- false ->
+ false ->
LQ=gb_trees:insert(Tab, Worker, LQ0),
State#state{loader_queue=LQ}
end.
%% Optionally start a worker
-%%
+%%
%% Dumpers and loaders may run simultaneously
%% but neither of them may run during schema commit.
%% Loaders may not start if a schema commit is enqueued.
@@ -1958,7 +1958,7 @@ opt_start_worker(State) ->
%% Great, a worker in queue and neither
%% a schema transaction is being
%% committed and nor a dumper is running
-
+
%% Start worker but keep him in the queue
if
is_record(Worker, schema_commit_lock) ->
@@ -1966,7 +1966,7 @@ opt_start_worker(State) ->
reply(ReplyTo, granted),
{Owner, _Tag} = ReplyTo,
opt_start_loader(State#state{dumper_pid = Owner});
-
+
is_record(Worker, dump_log) ->
Pid = spawn_link(?MODULE, dump_and_reply, [self(), Worker]),
State2 = State#state{dumper_pid = Pid},
@@ -1976,7 +1976,7 @@ opt_start_worker(State) ->
%% or sender
State3 = opt_start_sender(State2),
opt_start_loader(State3);
-
+
is_record(Worker, block_controller) ->
case {get_senders(State), get_loaders(State)} of
{[], []} ->
@@ -1989,7 +1989,7 @@ opt_start_worker(State) ->
end
end;
_ ->
- %% Bad luck, try with a loader or sender instead
+ %% Bad luck, try with a loader or sender instead
State2 = opt_start_sender(State),
opt_start_loader(State2)
end.
@@ -1997,8 +1997,8 @@ opt_start_worker(State) ->
opt_start_sender(State) ->
case State#state.sender_queue of
[]-> State; %% No need
- SenderQ ->
- {NewS,Kept} = opt_start_sender2(SenderQ, get_senders(State),
+ SenderQ ->
+ {NewS,Kept} = opt_start_sender2(SenderQ, get_senders(State),
[], get_loaders(State)),
State#state{sender_pid = NewS, sender_queue = Kept}
end.
@@ -2007,11 +2007,11 @@ opt_start_sender2([], Pids,Kept, _) -> {Pids,Kept};
opt_start_sender2([Sender|R], Pids, Kept, LoaderQ) ->
Tab = Sender#send_table.table,
Active = val({Tab, active_replicas}),
- IgotIt = lists:member(node(), Active),
- IsLoading = lists:any(fun({_Pid,Loader}) ->
+ IgotIt = lists:member(node(), Active),
+ IsLoading = lists:any(fun({_Pid,Loader}) ->
Tab == element(#net_load.table, Loader)
end, LoaderQ),
- if
+ if
IgotIt, IsLoading ->
%% I'm currently finishing loading the table let him wait
opt_start_sender2(R,Pids, [Sender|Kept], LoaderQ);
@@ -2029,11 +2029,11 @@ opt_start_loader(State = #state{loader_queue = LoaderQ}) ->
Current = get_loaders(State),
Max = max_loaders(),
case gb_trees:is_empty(LoaderQ) of
- true ->
+ true ->
State;
- _ when length(Current) >= Max ->
+ _ when length(Current) >= Max ->
State;
- false ->
+ false ->
SchemaQueue = State#state.dumper_queue,
case lists:keymember(schema_commit_lock, 1, SchemaQueue) of
false ->
@@ -2064,7 +2064,7 @@ already_loading(#disc_load{table=Tab},Loaders) ->
already_loading2(Tab, [{_,#net_load{table=Tab}}|_]) -> true;
already_loading2(Tab, [{_,#disc_load{table=Tab}}|_]) -> true;
-already_loading2(Tab, [_|Rest]) -> already_loading2(Tab,Rest);
+already_loading2(Tab, [_|Rest]) -> already_loading2(Tab,Rest);
already_loading2(_,[]) -> false.
start_remote_sender(Node, Tab, Receiver, Storage) ->
@@ -2093,8 +2093,8 @@ send_and_reply(ReplyTo, Worker) ->
load_and_reply(ReplyTo, Worker) ->
Load = load_table_fun(Worker),
- SendAndReply =
- fun() ->
+ SendAndReply =
+ fun() ->
process_flag(trap_exit, true),
Done = Load(),
ReplyTo ! Done#loader_done{worker_pid = self()},
@@ -2161,7 +2161,7 @@ load_table_fun(#disc_load{table=Tab, reason=Reason, opt_reply_to=ReplyTo}) ->
ReadNode == nowhere ->
%% Already loaded on other node, lets get it
Cs = val({Tab, cstruct}),
- fun() ->
+ fun() ->
case mnesia_loader:net_load_table(Tab, Reason, Active, Cs) of
{loaded, ok} ->
Done#loader_done{needs_sync = true};
@@ -2204,10 +2204,10 @@ filter_active(Tab) ->
Active = val({Tab, active_replicas}),
Masters = mnesia_recover:get_master_nodes(Tab),
Ns = do_filter_active(ByForce, Active, Masters),
- %% Reorder the so that we load from fastest first
+ %% Reorder the so that we load from fastest first
LS = ?catch_val({Tab, storage_type}),
DOC = val({Tab, disc_only_copies}),
- {Good,Worse} =
+ {Good,Worse} =
case LS of
disc_only_copies ->
G = mnesia_lib:intersect(Ns, DOC),
@@ -2218,7 +2218,7 @@ filter_active(Tab) ->
end,
%% Pick a random node of the fastest
Len = length(Good),
- if
+ if
Len > 0 ->
R = erlang:phash(node(), Len+1),
random(R-1,Good,Worse);
@@ -2237,5 +2237,5 @@ do_filter_active(false, Active, []) ->
Active;
do_filter_active(false, Active, Masters) ->
mnesia_lib:intersect(Active, Masters).
-
+
diff --git a/lib/mnesia/src/mnesia_dumper.erl b/lib/mnesia/src/mnesia_dumper.erl
index f8d7664156..d05629977f 100644
--- a/lib/mnesia/src/mnesia_dumper.erl
+++ b/lib/mnesia/src/mnesia_dumper.erl
@@ -1,19 +1,19 @@
%%
%% %CopyrightBegin%
-%%
+%%
%% Copyright Ericsson AB 1996-2011. All Rights Reserved.
-%%
+%%
%% The contents of this file are subject to the Erlang Public License,
%% Version 1.1, (the "License"); you may not use this file except in
%% compliance with the License. You should have received a copy of the
%% Erlang Public License along with this software. If not, it can be
%% retrieved online at http://www.erlang.org/.
-%%
+%%
%% Software distributed under the License is distributed on an "AS IS"
%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
%% the License for the specific language governing rights and limitations
%% under the License.
-%%
+%%
%% %CopyrightEnd%
%%
@@ -43,7 +43,7 @@
%% Internal stuff
-export([regulator_init/1]).
-
+
-include("mnesia.hrl").
-include_lib("kernel/include/file.hrl").
@@ -70,14 +70,14 @@ incr_log_writes() ->
adjust_log_writes(DoCast) ->
Token = {mnesia_adjust_log_writes, self()},
- case global:set_lock(Token, [node()], 1) of
+ case global:set_lock(Token, [node()], 1) of
false ->
ignore; %% Somebody else is sending a dump request
- true ->
- case DoCast of
+ true ->
+ case DoCast of
false ->
ignore;
- true ->
+ true ->
mnesia_controller:async_dump_log(write_threshold)
end,
Max = mnesia_monitor:get_env(dump_log_write_threshold),
@@ -93,16 +93,16 @@ adjust_log_writes(DoCast) ->
opt_dump_log(InitBy) ->
Reg = case whereis(?REGULATOR_NAME) of
undefined ->
- nopid;
+ nopid;
Pid when is_pid(Pid) ->
- Pid
+ Pid
end,
perform_dump(InitBy, Reg).
%% Scan for decisions
perform_dump(InitBy, Regulator) when InitBy == scan_decisions ->
?eval_debug_fun({?MODULE, perform_dump}, [InitBy]),
-
+
dbg_out("Transaction log dump initiated by ~w~n", [InitBy]),
scan_decisions(mnesia_log:previous_log_file(), InitBy, Regulator),
scan_decisions(mnesia_log:latest_log_file(), InitBy, Regulator);
@@ -112,8 +112,8 @@ perform_dump(InitBy, Regulator) ->
?eval_debug_fun({?MODULE, perform_dump}, [InitBy]),
LogState = mnesia_log:prepare_log_dump(InitBy),
dbg_out("Transaction log dump initiated by ~w: ~w~n",
- [InitBy, LogState]),
- adjust_log_writes(false),
+ [InitBy, LogState]),
+ adjust_log_writes(false),
case LogState of
already_dumped ->
mnesia_recover:allow_garb(),
@@ -142,7 +142,7 @@ perform_dump(InitBy, Regulator) ->
mnesia_lib:important(Desc, Reason),
%% Ignore rest of the log
mnesia_log:confirm_log_dump(Diff);
- false ->
+ false ->
fatal(Desc, Reason)
end
end;
@@ -189,9 +189,9 @@ do_perform_dump(Cont, InPlace, InitBy, Regulator, OldVersion) ->
insert_recs([Rec | Recs], InPlace, InitBy, Regulator, LogV) ->
regulate(Regulator),
case insert_rec(Rec, InPlace, InitBy, LogV) of
- LogH when is_record(LogH, log_header) ->
+ LogH when is_record(LogH, log_header) ->
insert_recs(Recs, InPlace, InitBy, Regulator, LogH#log_header.log_version);
- _ ->
+ _ ->
insert_recs(Recs, InPlace, InitBy, Regulator, LogV)
end;
@@ -199,7 +199,7 @@ insert_recs([], _InPlace, _InitBy, _Regulator, Version) ->
Version.
insert_rec(Rec, _InPlace, scan_decisions, _LogV) ->
- if
+ if
is_record(Rec, commit) ->
ignore;
is_record(Rec, log_header) ->
@@ -227,7 +227,7 @@ insert_rec(H, _InPlace, _InitBy, _LogV) when is_record(H, log_header) ->
H#log_header.log_kind /= trans_log ->
exit({"Bad kind of transaction log", H});
H#log_header.log_version == CurrentVersion ->
- ok;
+ ok;
H#log_header.log_version == "4.2" ->
ok;
H#log_header.log_version == "4.1" ->
@@ -247,8 +247,8 @@ do_insert_rec(Tid, Rec, InPlace, InitBy, LogV) ->
[] ->
ignore;
SchemaOps ->
- case val({schema, storage_type}) of
- ram_copies ->
+ case val({schema, storage_type}) of
+ ram_copies ->
insert_ops(Tid, schema_ops, SchemaOps, InPlace, InitBy, LogV);
Storage ->
true = open_files(schema, Storage, InPlace, InitBy),
@@ -264,13 +264,13 @@ do_insert_rec(Tid, Rec, InPlace, InitBy, LogV) ->
_ ->
ignore
end.
-
+
update(_Tid, [], _DumperMode) ->
dumped;
update(Tid, SchemaOps, DumperMode) ->
UseDir = mnesia_monitor:use_dir(),
- Res = perform_update(Tid, SchemaOps, DumperMode, UseDir),
+ Res = perform_update(Tid, SchemaOps, DumperMode, UseDir),
mnesia_controller:release_schema_commit_lock(),
Res.
@@ -279,23 +279,23 @@ perform_update(_Tid, _SchemaOps, mandatory, true) ->
%% dumper perform needed updates
InitBy = schema_update,
- ?eval_debug_fun({?MODULE, dump_schema_op}, [InitBy]),
+ ?eval_debug_fun({?MODULE, dump_schema_op}, [InitBy]),
opt_dump_log(InitBy);
perform_update(Tid, SchemaOps, _DumperMode, _UseDir) ->
%% No need for a full transaction log dump.
%% Ignore the log file and perform only perform
%% the corresponding updates.
- InitBy = fast_schema_update,
+ InitBy = fast_schema_update,
InPlace = mnesia_monitor:get_env(dump_log_update_in_place),
?eval_debug_fun({?MODULE, dump_schema_op}, [InitBy]),
- case catch insert_ops(Tid, schema_ops, SchemaOps, InPlace, InitBy,
+ case catch insert_ops(Tid, schema_ops, SchemaOps, InPlace, InitBy,
mnesia_log:version()) of
{'EXIT', Reason} ->
Error = {error, {"Schema update error", Reason}},
close_files(InPlace, Error, InitBy),
fatal("Schema update error ~p ~p", [Reason, SchemaOps]);
- _ ->
+ _ ->
?eval_debug_fun({?MODULE, post_dump}, [InitBy]),
close_files(InPlace, ok, InitBy),
ok
@@ -318,7 +318,7 @@ insert_ops(Tid, Storage, [Op | Ops], InPlace, InitBy, Ver) when Ver < "4.3" ->
disc_insert(_Tid, Storage, Tab, Key, Val, Op, InPlace, InitBy) ->
case open_files(Tab, Storage, InPlace, InitBy) of
true ->
- case Storage of
+ case Storage of
disc_copies when Tab /= schema ->
mnesia_log:append({?MODULE,Tab}, {{Tab, Key}, Val, Op}),
ok;
@@ -331,7 +331,7 @@ disc_insert(_Tid, Storage, Tab, Key, Val, Op, InPlace, InitBy) ->
%% To fix update_counter so that it behaves better.
%% i.e. if nothing have changed in tab except update_counter
-%% trust that the value in the dets file is correct.
+%% trust that the value in the dets file is correct.
%% Otherwise we will get a double increment.
%% This is perfect but update_counter is a dirty op.
@@ -353,12 +353,12 @@ dets_insert(Op,Tab,Key,Val) ->
_ when Incr < 0 ->
Zero = {RecName, Key, 0},
ok = dets:insert(Tab, Zero);
- _ ->
+ _ ->
Init = {RecName, Key, Incr},
ok = dets:insert(Tab, Init)
end;
false -> ok
- end;
+ end;
delete_object ->
dets_updated(Tab,Key),
ok = dets:delete_object(Tab, Val);
@@ -366,17 +366,17 @@ dets_insert(Op,Tab,Key,Val) ->
dets_cleared(Tab),
ok = dets:delete_all_objects(Tab)
end.
-
-dets_updated(Tab,Key) ->
+
+dets_updated(Tab,Key) ->
case get(mnesia_dumper_dets) of
- undefined ->
+ undefined ->
Empty = gb_trees:empty(),
Tree = gb_trees:insert(Tab, gb_sets:singleton(Key), Empty),
put(mnesia_dumper_dets, Tree);
Tree ->
case gb_trees:lookup(Tab,Tree) of
{value, cleared} -> ignore;
- {value, Set} ->
+ {value, Set} ->
T = gb_trees:update(Tab, gb_sets:add(Key, Set), Tree),
put(mnesia_dumper_dets, T);
none ->
@@ -398,14 +398,14 @@ dets_incr_counter(Tab,Key) ->
dets_cleared(Tab) ->
case get(mnesia_dumper_dets) of
- undefined ->
+ undefined ->
Empty = gb_trees:empty(),
Tree = gb_trees:insert(Tab, cleared, Empty),
put(mnesia_dumper_dets, Tree);
Tree ->
case gb_trees:lookup(Tab,Tree) of
{value, cleared} -> ignore;
- _ ->
+ _ ->
T = gb_trees:enter(Tab, cleared, Tree),
put(mnesia_dumper_dets, T)
end
@@ -417,7 +417,7 @@ insert(Tid, Storage, Tab, Key, [Val | Tail], Op, InPlace, InitBy) ->
insert(_Tid, _Storage, _Tab, _Key, [], _Op, _InPlace, _InitBy) ->
ok;
-
+
insert(Tid, Storage, Tab, Key, Val, Op, InPlace, InitBy) ->
Item = {{Tab, Key}, Val, Op},
case InitBy of
@@ -447,18 +447,18 @@ insert(Tid, Storage, Tab, Key, Val, Op, InPlace, InitBy) ->
disc_delete_table(Tab, Storage) ->
case mnesia_monitor:use_dir() of
true ->
- if
- Storage == disc_only_copies; Tab == schema ->
+ if
+ Storage == disc_only_copies; Tab == schema ->
mnesia_monitor:unsafe_close_dets(Tab),
Dat = mnesia_lib:tab2dat(Tab),
- file:delete(Dat);
- true ->
+ file:delete(Dat);
+ true ->
DclFile = mnesia_lib:tab2dcl(Tab),
case get({?MODULE,Tab}) of
{opened_dumper, dcl} ->
del_opened_tab(Tab),
mnesia_log:unsafe_close_log(Tab);
- _ ->
+ _ ->
ok
end,
file:delete(DclFile),
@@ -490,7 +490,7 @@ insert_op(Tid, Storage, {{Tab, Key}, Val, Op}, InPlace, InitBy) ->
insert_op(_Tid, schema_ops, _OP, _InPlace, Initby)
when Initby /= startup,
Initby /= fast_schema_update,
- Initby /= schema_update ->
+ Initby /= schema_update ->
ignore;
insert_op(Tid, _, {op, rec, Storage, Item}, InPlace, InitBy) ->
@@ -507,7 +507,7 @@ insert_op(Tid, _, {op, change_table_copy_type, N, FromS, ToS, TabDef}, InPlace,
_ ->
ignore
end,
- if
+ if
N == node() ->
Dmp = mnesia_lib:tab2dmp(Tab),
Dat = mnesia_lib:tab2dat(Tab),
@@ -531,8 +531,8 @@ insert_op(Tid, _, {op, change_table_copy_type, N, FromS, ToS, TabDef}, InPlace,
true = open_files(Tab, disc_only_copies, InPlace, InitBy),
%% ram_delete_table must be done before init_indecies,
%% it uses info which is reset in init_indecies,
- %% it doesn't matter, because init_indecies don't use
- %% the ram replica of the table when creating the disc
+ %% it doesn't matter, because init_indecies don't use
+ %% the ram replica of the table when creating the disc
%% index; Could be improved :)
mnesia_schema:ram_delete_table(Tab, FromS),
PosList = Cs#cstruct.index,
@@ -540,17 +540,17 @@ insert_op(Tid, _, {op, change_table_copy_type, N, FromS, ToS, TabDef}, InPlace,
{disc_only_copies, ram_copies} ->
mnesia_monitor:unsafe_close_dets(Tab),
disc_delete_indecies(Tab, Cs, disc_only_copies),
- case InitBy of
- startup ->
+ case InitBy of
+ startup ->
ignore;
- _ ->
+ _ ->
mnesia_controller:get_disc_copy(Tab)
end,
disc_delete_table(Tab, disc_only_copies);
{disc_copies, disc_only_copies} ->
ok = ensure_rename(Dmp, Dat),
true = open_files(Tab, disc_only_copies, InPlace, InitBy),
- mnesia_schema:ram_delete_table(Tab, FromS),
+ mnesia_schema:ram_delete_table(Tab, FromS),
PosList = Cs#cstruct.index,
mnesia_index:init_indecies(Tab, disc_only_copies, PosList),
file:delete(Dcl),
@@ -558,8 +558,8 @@ insert_op(Tid, _, {op, change_table_copy_type, N, FromS, ToS, TabDef}, InPlace,
{disc_only_copies, disc_copies} ->
mnesia_monitor:unsafe_close_dets(Tab),
disc_delete_indecies(Tab, Cs, disc_only_copies),
- case InitBy of
- startup ->
+ case InitBy of
+ startup ->
ignore;
_ ->
mnesia_log:ets2dcd(Tab),
@@ -576,7 +576,7 @@ insert_op(Tid, _, {op, change_table_copy_type, N, FromS, ToS, TabDef}, InPlace,
insert_op(Tid, _, {op, transform, _Fun, TabDef}, InPlace, InitBy) ->
Cs = mnesia_schema:list2cs(TabDef),
case mnesia_lib:cs_to_storage_type(node(), Cs) of
- disc_copies ->
+ disc_copies ->
open_dcl(Cs#cstruct.name);
_ ->
ignore
@@ -614,8 +614,8 @@ insert_op(Tid, _, {op, restore_recreate, TabDef}, InPlace, InitBy) ->
Storage == disc_copies ->
Args = [{keypos, 2}, public, named_table, Type],
mnesia_monitor:mktab(Tab, Args),
- File = mnesia_lib:tab2dcd(Tab),
- FArg = [{file, File}, {name, {mnesia,create}},
+ File = mnesia_lib:tab2dcd(Tab),
+ FArg = [{file, File}, {name, {mnesia,create}},
{repair, false}, {mode, read_write}],
{ok, Log} = mnesia_monitor:open_log(FArg),
mnesia_monitor:unsafe_close_log(Log);
@@ -644,13 +644,13 @@ insert_op(Tid, _, {op, create_table, TabDef}, InPlace, InitBy) ->
ignore;
disc_copies ->
Dcd = mnesia_lib:tab2dcd(Tab),
- case mnesia_lib:exists(Dcd) of
+ case mnesia_lib:exists(Dcd) of
true -> ignore;
false ->
- mnesia_log:open_log(temp,
+ mnesia_log:open_log(temp,
mnesia_log:dcd_log_header(),
- Dcd,
- false,
+ Dcd,
+ false,
false,
read_write),
mnesia_log:unsafe_close_log(temp)
@@ -695,8 +695,8 @@ insert_op(Tid, _, {op, create_table, TabDef}, InPlace, InitBy) ->
%% Indecies are still created by loader
disc_delete_indecies(Tab, Cs, Storage)
%% disc_delete_table(Tab, Storage)
- end,
-
+ end,
+
%% Update whereabouts and create table
mnesia_controller:create_table(Tab),
mnesia_lib:unset({Tab, create_table})
@@ -754,7 +754,7 @@ insert_op(Tid, _, {op, clear_table, TabDef}, InPlace, InitBy) ->
true ->
ignore
end,
- %% Need to catch this, it crashes on ram_copies if
+ %% Need to catch this, it crashes on ram_copies if
%% the op comes before table is loaded at startup.
catch insert(Tid, Storage, Tab, '_', Oid, clear_table, InPlace, InitBy)
end;
@@ -766,16 +766,16 @@ insert_op(Tid, _, {op, merge_schema, TabDef}, InPlace, InitBy) ->
%% If we bootstrap an empty (diskless) mnesia from another node
%% we might have changed the storage_type of schema.
%% I think this is a good place to do it.
- Update = fun(NS = {Node,Storage}) ->
+ Update = fun(NS = {Node,Storage}) ->
case mnesia_lib:cs_to_storage_type(Node, Cs) of
Storage -> NS;
- disc_copies when Node == node() ->
- Dir = mnesia_lib:dir(),
+ disc_copies when Node == node() ->
+ Dir = mnesia_lib:dir(),
ok = mnesia_schema:opt_create_dir(true, Dir),
mnesia_schema:purge_dir(Dir, []),
mnesia_log:purge_all_logs(),
- mnesia_lib:set(use_dir, true),
+ mnesia_lib:set(use_dir, true),
mnesia_log:init(),
Ns = val({current, db_nodes}),
F = fun(U) -> mnesia_recover:log_mnesia_up(U) end,
@@ -783,11 +783,11 @@ insert_op(Tid, _, {op, merge_schema, TabDef}, InPlace, InitBy) ->
raw_named_dump_table(schema, dat),
temp_set_master_nodes(),
{Node,disc_copies};
- CSstorage ->
+ CSstorage ->
{Node,CSstorage}
end
end,
-
+
W2C0 = val({schema, where_to_commit}),
W2C = case W2C0 of
{blocked, List} ->
@@ -854,9 +854,9 @@ insert_op(Tid, _, {op, del_snmp, TabDef}, InPlace, InitBy) ->
InitBy /= startup,
Storage /= unknown ->
case ?catch_val({Tab, {index, snmp}}) of
- {'EXIT', _} ->
+ {'EXIT', _} ->
ignore;
- Stab ->
+ Stab ->
mnesia_snmp_hook:delete_table(Tab, Stab),
mnesia_lib:unset({Tab, {index, snmp}})
end;
@@ -874,7 +874,7 @@ insert_op(Tid, _, {op, add_index, Pos, TabDef}, InPlace, InitBy) ->
true = open_files(Tab, Storage, InPlace, InitBy),
mnesia_index:init_indecies(Tab, Storage, [Pos]);
startup ->
- ignore;
+ ignore;
_ ->
case val({Tab,where_to_read}) of
nowhere -> ignore;
@@ -890,7 +890,7 @@ insert_op(Tid, _, {op, del_index, Pos, TabDef}, InPlace, InitBy) ->
case InitBy of
startup when Storage == disc_only_copies ->
mnesia_index:del_index_table(Tab, Storage, Pos);
- startup ->
+ startup ->
ignore;
_ ->
mnesia_index:del_index_table(Tab, Storage, Pos)
@@ -939,7 +939,7 @@ open_files(Tab, Storage, UpdateInPlace, InitBy)
{'EXIT', _} ->
false;
Type ->
- case Storage of
+ case Storage of
disc_copies when Tab /= schema ->
Bool = open_disc_copies(Tab, InitBy),
Bool;
@@ -964,7 +964,7 @@ open_files(_Tab, _Storage, _UpdateInPlace, _InitBy) ->
open_disc_copies(Tab, InitBy) ->
DclF = mnesia_lib:tab2dcl(Tab),
- DumpEts =
+ DumpEts =
case file:read_file_info(DclF) of
{error, enoent} ->
false;
@@ -975,7 +975,7 @@ open_disc_copies(Tab, InitBy) ->
mnesia_lib:dbg_out("File ~p info_error ~p ~n",
[DcdF, Reason]),
true;
- {ok, DcdInfo} ->
+ {ok, DcdInfo} ->
Mul = case ?catch_val(dc_dump_limit) of
{'EXIT', _} -> ?DumpToEtsMultiplier;
Val -> Val
@@ -983,12 +983,12 @@ open_disc_copies(Tab, InitBy) ->
DcdInfo#file_info.size =< (DclInfo#file_info.size * Mul)
end
end,
- if
- DumpEts == false; InitBy == startup ->
- mnesia_log:open_log({?MODULE,Tab},
- mnesia_log:dcl_log_header(),
- DclF,
- mnesia_lib:exists(DclF),
+ if
+ DumpEts == false; InitBy == startup ->
+ mnesia_log:open_log({?MODULE,Tab},
+ mnesia_log:dcl_log_header(),
+ DclF,
+ mnesia_lib:exists(DclF),
mnesia_monitor:get_env(auto_repair),
read_write),
put({?MODULE, Tab}, {opened_dumper, dcl}),
@@ -997,9 +997,9 @@ open_disc_copies(Tab, InitBy) ->
mnesia_log:ets2dcd(Tab),
put({?MODULE, Tab}, already_dumped),
false
- end.
+ end.
-%% Always opens the dcl file for writing overriding already_dumped
+%% Always opens the dcl file for writing overriding already_dumped
%% mechanismen, used for schema transactions.
open_dcl(Tab) ->
case get({?MODULE, Tab}) of
@@ -1007,10 +1007,10 @@ open_dcl(Tab) ->
true;
_ -> %% undefined or already_dumped
DclF = mnesia_lib:tab2dcl(Tab),
- mnesia_log:open_log({?MODULE,Tab},
- mnesia_log:dcl_log_header(),
- DclF,
- mnesia_lib:exists(DclF),
+ mnesia_log:open_log({?MODULE,Tab},
+ mnesia_log:dcl_log_header(),
+ DclF,
+ mnesia_lib:exists(DclF),
mnesia_monitor:get_env(auto_repair),
read_write),
put({?MODULE, Tab}, {opened_dumper, dcl}),
@@ -1047,7 +1047,7 @@ close_files(InPlace, Outcome, InitBy, [{{?MODULE, Tab}, {opened_dumper, Type}} |
case val({Tab, storage_type}) of
disc_only_copies when InitBy /= startup ->
ignore;
- disc_copies when Tab /= schema ->
+ disc_copies when Tab /= schema ->
mnesia_log:close_log({?MODULE,Tab});
Storage ->
do_close(InPlace, Outcome, Tab, Type, Storage)
@@ -1082,7 +1082,7 @@ do_close(InPlace, Outcome, Tab, dat, Storage) ->
true ->
file:delete(mnesia_lib:tab2tmp(Tab))
end.
-
+
ensure_rename(From, To) ->
case mnesia_lib:exists(From) of
@@ -1096,7 +1096,7 @@ ensure_rename(From, To) ->
{error, {rename_failed, From, To}}
end
end.
-
+
insert_cstruct(Tid, Cs, KeepWhereabouts, InPlace, InitBy) ->
Val = mnesia_schema:insert_cstruct(Tid, Cs, KeepWhereabouts),
{schema, Tab, _} = Val,
@@ -1114,15 +1114,15 @@ delete_cstruct(Tid, Cs, InPlace, InitBy) ->
temp_set_master_nodes() ->
Tabs = val({schema, local_tables}),
- Masters = [{Tab, (val({Tab, disc_copies}) ++
- val({Tab, ram_copies}) ++
- val({Tab, disc_only_copies})) -- [node()]}
+ Masters = [{Tab, (val({Tab, disc_copies}) ++
+ val({Tab, ram_copies}) ++
+ val({Tab, disc_only_copies})) -- [node()]}
|| Tab <- Tabs],
%% UseDir = false since we don't want to remember these
%% masternodes and we are running (really soon anyway) since we want this
%% to be known during table loading.
mnesia_recover:log_master_nodes(Masters, false, yes),
- ok.
+ ok.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%% Raw dump of table. Dumper must have unique access to the ets table.
@@ -1152,7 +1152,7 @@ raw_named_dump_table(Tab, Ftype) ->
{ok, TabRef} ->
Storage = ram_copies,
mnesia_lib:db_fixtable(Storage, Tab, true),
-
+
case catch raw_dump_table(TabRef, Tab) of
{'EXIT', Reason} ->
mnesia_lib:db_fixtable(Storage, Tab, false),
@@ -1179,11 +1179,11 @@ raw_dump_table(DetsRef, EtsRef) ->
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%% Load regulator
-%%
-%% This is a poor mans substitute for a fair scheduler algorithm
-%% in the Erlang emulator. The mnesia_dumper process performs many
-%% costly BIF invokations and must pay for this. But since the
-%% Emulator does not handle this properly we must compensate for
+%%
+%% This is a poor mans substitute for a fair scheduler algorithm
+%% in the Erlang emulator. The mnesia_dumper process performs many
+%% costly BIF invokations and must pay for this. But since the
+%% Emulator does not handle this properly we must compensate for
%% this with some form of load regulation of ourselves in order to
%% not steal all computation power in the Erlang Emulator ans make
%% other processes starve. Hopefully this is a temporary solution.
@@ -1230,6 +1230,6 @@ regulate(RegulatorPid) ->
val(Var) ->
case ?catch_val(Var) of
- {'EXIT', Reason} -> mnesia_lib:other_val(Var, Reason);
- Value -> Value
+ {'EXIT', Reason} -> mnesia_lib:other_val(Var, Reason);
+ Value -> Value
end.
diff --git a/lib/mnesia/src/mnesia_locker.erl b/lib/mnesia/src/mnesia_locker.erl
index 0492d794f3..861f2df78d 100644
--- a/lib/mnesia/src/mnesia_locker.erl
+++ b/lib/mnesia/src/mnesia_locker.erl
@@ -1,19 +1,19 @@
%%
%% %CopyrightBegin%
-%%
+%%
%% Copyright Ericsson AB 1996-2011. All Rights Reserved.
-%%
+%%
%% The contents of this file are subject to the Erlang Public License,
%% Version 1.1, (the "License"); you may not use this file except in
%% compliance with the License. You should have received a copy of the
%% Erlang Public License along with this software. If not, it can be
%% retrieved online at http://www.erlang.org/.
-%%
+%%
%% Software distributed under the License is distributed on an "AS IS"
%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
%% the License for the specific language governing rights and limitations
%% under the License.
-%%
+%%
%% %CopyrightEnd%
%%
@@ -68,12 +68,12 @@
%% mnesia_held_locks: contain {Oid, Op, Tid} entries (bag)
-define(match_oid_held_locks(Oid), {Oid, '_', '_'}).
-%% mnesia_tid_locks: contain {Tid, Oid, Op} entries (bag)
+%% mnesia_tid_locks: contain {Tid, Oid, Op} entries (bag)
-define(match_oid_tid_locks(Tid), {Tid, '_', '_'}).
%% mnesia_sticky_locks: contain {Oid, Node} entries and {Tab, Node} entries (set)
-define(match_oid_sticky_locks(Oid),{Oid, '_'}).
%% mnesia_lock_queue: contain {queue, Oid, Tid, Op, ReplyTo, WaitForTid} entries (bag)
--define(match_oid_lock_queue(Oid), #queue{oid=Oid, tid='_', op = '_', pid = '_', lucky = '_'}).
+-define(match_oid_lock_queue(Oid), #queue{oid=Oid, tid='_', op = '_', pid = '_', lucky = '_'}).
%% mnesia_lock_counter: {{write, Tab}, Number} &&
%% {{read, Tab}, Number} entries (set)
@@ -83,11 +83,11 @@ start() ->
init(Parent) ->
register(?MODULE, self()),
process_flag(trap_exit, true),
- ?ets_new_table(mnesia_held_locks, [bag, private, named_table]),
+ ?ets_new_table(mnesia_held_locks, [bag, private, named_table]),
?ets_new_table(mnesia_tid_locks, [bag, private, named_table]),
?ets_new_table(mnesia_sticky_locks, [set, private, named_table]),
?ets_new_table(mnesia_lock_queue, [bag, private, named_table, {keypos, 2}]),
-
+
proc_lib:init_ack(Parent, {ok, self()}),
case ?catch_val(pid_sort_order) of
r9b_plain -> put(pid_sort_order, r9b_plain);
@@ -98,8 +98,8 @@ init(Parent) ->
val(Var) ->
case ?catch_val(Var) of
- {'EXIT', _ReASoN_} -> mnesia_lib:other_val(Var, _ReASoN_);
- _VaLuE_ -> _VaLuE_
+ {'EXIT', _ReASoN_} -> mnesia_lib:other_val(Var, _ReASoN_);
+ _VaLuE_ -> _VaLuE_
end.
reply(From, R) ->
@@ -111,10 +111,10 @@ l_request(Node, X, Store) ->
l_req_rec(Node, Store) ->
?ets_insert(Store, {nodes, Node}),
- receive
- {?MODULE, Node, Reply} ->
+ receive
+ {?MODULE, Node, Reply} ->
Reply;
- {mnesia_down, Node} ->
+ {mnesia_down, Node} ->
{not_granted, {node_not_running, Node}}
end.
@@ -128,10 +128,10 @@ send_release_tid(Nodes, Tid) ->
rpc:abcast(Nodes, ?MODULE, {self(), {sync_release_tid, Tid}}).
receive_release_tid_acc([Node | Nodes], Tid) ->
- receive
- {?MODULE, Node, {tid_released, Tid}} ->
+ receive
+ {?MODULE, Node, {tid_released, Tid}} ->
receive_release_tid_acc(Nodes, Tid);
- {mnesia_down, Node} ->
+ {mnesia_down, Node} ->
receive_release_tid_acc(Nodes, Tid)
end;
receive_release_tid_acc([], _Tid) ->
@@ -152,27 +152,27 @@ loop(State) ->
%% Really do a read, but get hold of a write lock
%% used by mnesia:wread(Oid).
-
+
{From, {read_write, Tid, Oid}} ->
try_sticky_lock(Tid, read_write, From, Oid),
loop(State);
-
+
%% Tid has somehow terminated, clear up everything
%% and pass locks on to queued processes.
%% This is the purpose of the mnesia_tid_locks table
-
+
{release_tid, Tid} ->
do_release_tid(Tid),
loop(State);
-
+
%% stick lock, first tries this to the where_to_read Node
{From, {test_set_sticky, Tid, {Tab, _} = Oid, Lock}} ->
case ?ets_lookup(mnesia_sticky_locks, Tab) of
- [] ->
+ [] ->
reply(From, not_stuck),
loop(State);
[{_,Node}] when Node == node() ->
- %% Lock is stuck here, see now if we can just set
+ %% Lock is stuck here, see now if we can just set
%% a regular write lock
try_lock(Tid, Lock, From, Oid),
loop(State);
@@ -188,7 +188,7 @@ loop(State) ->
?ets_insert(mnesia_sticky_locks, {Tab, N}),
loop(State);
- %% The caller which sends this message, must have first
+ %% The caller which sends this message, must have first
%% aquired a write lock on the entire table
{unstick, Tab} ->
?ets_delete(mnesia_sticky_locks, Tab),
@@ -205,14 +205,14 @@ loop(State) ->
[{_,N}] ->
Req = {From, {ix_read, Tid, Tab, IxKey, Pos}},
From ! {?MODULE, node(), {switch, N, Req}},
- loop(State)
+ loop(State)
end;
{From, {sync_release_tid, Tid}} ->
do_release_tid(Tid),
reply(From, {tid_released, Tid}),
loop(State);
-
+
{release_remote_non_pending, Node, Pending} ->
release_remote_non_pending(Node, Pending),
mnesia_monitor:mnesia_down(?MODULE, Node),
@@ -229,7 +229,7 @@ loop(State) ->
{get_table, From, LockTable} ->
From ! {LockTable, ?ets_match_object(LockTable, '_')},
loop(State);
-
+
Msg ->
error("~p got unexpected message: ~p~n", [?MODULE, Msg]),
loop(State)
@@ -271,8 +271,8 @@ try_lock(Tid, Op, SimpleOp, Lock, Pid, Oid) ->
{queue, Lucky} ->
?dbg("Queued ~p ~p ~p ~p ~n", [Tid, Oid, Lock, Lucky]),
%% Append to queue: Nice place for trace output
- ?ets_insert(mnesia_lock_queue,
- #queue{oid = Oid, tid = Tid, op = Op,
+ ?ets_insert(mnesia_lock_queue,
+ #queue{oid = Oid, tid = Tid, op = Op,
pid = Pid, lucky = Lucky}),
?ets_insert(mnesia_tid_locks, {Tid, Oid, {queued, Op}})
end.
@@ -315,12 +315,12 @@ grant_lock(Tid, write, Lock, Oid) ->
%% newer (higher tid) transactions may never wait on older ones,
%% 2) When releasing the tids from the queue always begin with youngest (high tid)
%% because of 1) it will avoid the deadlocks.
-%% 3) TabLocks is the problem :-) They should not starve and not deadlock
+%% 3) TabLocks is the problem :-) They should not starve and not deadlock
%% handle tablocks in queue as they had locks on unlocked records.
can_lock(Tid, read, {Tab, Key}, AlreadyQ) when Key /= ?ALL ->
%% The key is bound, no need for the other BIF
- Oid = {Tab, Key},
+ Oid = {Tab, Key},
ObjLocks = ?ets_match_object(mnesia_held_locks, {Oid, write, '_'}),
TabLocks = ?ets_match_object(mnesia_held_locks, {{Tab, ?ALL}, write, '_'}),
check_lock(Tid, Oid, ObjLocks, TabLocks, yes, AlreadyQ, read);
@@ -330,7 +330,7 @@ can_lock(Tid, read, Oid, AlreadyQ) -> % Whole tab
ObjLocks = ?ets_match_object(mnesia_held_locks, {{Tab, '_'}, write, '_'}),
check_lock(Tid, Oid, ObjLocks, [], yes, AlreadyQ, read);
-can_lock(Tid, write, {Tab, Key}, AlreadyQ) when Key /= ?ALL ->
+can_lock(Tid, write, {Tab, Key}, AlreadyQ) when Key /= ?ALL ->
Oid = {Tab, Key},
ObjLocks = ?ets_lookup(mnesia_held_locks, Oid),
TabLocks = ?ets_lookup(mnesia_held_locks, {Tab, ?ALL}),
@@ -348,22 +348,22 @@ check_lock(Tid, Oid, [Lock | Locks], TabLocks, X, AlreadyQ, Type) ->
check_lock(Tid, Oid, Locks, TabLocks, X, AlreadyQ, Type);
WaitForTid ->
Queue = allowed_to_be_queued(WaitForTid,Tid),
- if Queue == true ->
+ if Queue == true ->
check_lock(Tid, Oid, Locks, TabLocks, {queue, WaitForTid}, AlreadyQ, Type);
Tid#tid.pid == WaitForTid#tid.pid ->
dbg_out("Spurious lock conflict ~w ~w: ~w -> ~w~n",
- [Oid, Lock, Tid, WaitForTid]),
+ [Oid, Lock, Tid, WaitForTid]),
%% Test..
{Tab, _Key} = Oid,
- HaveQ = (ets:lookup(mnesia_lock_queue, Oid) /= [])
+ HaveQ = (ets:lookup(mnesia_lock_queue, Oid) /= [])
orelse (ets:lookup(mnesia_lock_queue,{Tab,?ALL}) /= []),
- if
- HaveQ ->
+ if
+ HaveQ ->
{no, WaitForTid};
- true ->
+ true ->
check_lock(Tid,Oid,Locks,TabLocks,{queue,WaitForTid},AlreadyQ,Type)
end;
- %%{no, WaitForTid}; Safe solution
+ %%{no, WaitForTid}; Safe solution
true ->
{no, WaitForTid}
end
@@ -373,7 +373,7 @@ check_lock(_, _, [], [], X, {queue, bad_luck}, _) ->
X; %% The queue should be correct already no need to check it again
check_lock(_, _, [], [], X = {queue, _Tid}, _AlreadyQ, _) ->
- X;
+ X;
check_lock(Tid, Oid, [], [], X, AlreadyQ, Type) ->
{Tab, Key} = Oid,
@@ -387,7 +387,7 @@ check_lock(Tid, Oid, [], [], X, AlreadyQ, Type) ->
%% If there is a queue on that object, read_lock shouldn't be granted
ObjLocks = ets:lookup(mnesia_lock_queue, Oid),
case max(ObjLocks) of
- empty ->
+ empty ->
check_queue(Tid, Tab, X, AlreadyQ);
ObjL ->
case allowed_to_be_queued(ObjL,Tid) of
@@ -407,12 +407,12 @@ check_lock(Tid, Oid, [], TabLocks, X, AlreadyQ, Type) ->
allowed_to_be_queued(WaitForTid, Tid) ->
case get(pid_sort_order) of
undefined -> WaitForTid > Tid;
- r9b_plain ->
+ r9b_plain ->
cmp_tid(true, WaitForTid, Tid) =:= 1;
- standard ->
+ standard ->
cmp_tid(false, WaitForTid, Tid) =:= 1
- end.
-
+ end.
+
%% Check queue for conflicting locks
%% Assume that all queued locks belongs to other tid's
@@ -421,25 +421,25 @@ check_queue(Tid, Tab, X, AlreadyQ) ->
Greatest = max(TabLocks),
case Greatest of
empty -> X;
- Tid -> X;
- WaitForTid ->
+ Tid -> X;
+ WaitForTid ->
case allowed_to_be_queued(WaitForTid,Tid) of
true ->
{queue, WaitForTid};
- false when AlreadyQ =:= {no, bad_luck} ->
+ false when AlreadyQ =:= {no, bad_luck} ->
{no, WaitForTid}
end
end.
sort_queue(QL) ->
case get(pid_sort_order) of
- undefined ->
+ undefined ->
lists:reverse(lists:keysort(#queue.tid, QL));
- r9b_plain ->
- lists:sort(fun(#queue{tid=X},#queue{tid=Y}) ->
+ r9b_plain ->
+ lists:sort(fun(#queue{tid=X},#queue{tid=Y}) ->
cmp_tid(true, X, Y) == 1
end, QL);
- standard ->
+ standard ->
lists:sort(fun(#queue{tid=X},#queue{tid=Y}) ->
cmp_tid(false, X, Y) == 1
end, QL)
@@ -466,12 +466,12 @@ set_read_lock_on_all_keys(Tid, From, Tab, IxKey, Pos) ->
{queue, Lucky} ->
?dbg("Queued ~p ~p ~p ~p ~n", [Tid, Oid, Lock, Lucky]),
%% Append to queue: Nice place for trace output
- ?ets_insert(mnesia_lock_queue,
- #queue{oid = Oid, tid = Tid, op = Op,
+ ?ets_insert(mnesia_lock_queue,
+ #queue{oid = Oid, tid = Tid, op = Op,
pid = From, lucky = Lucky}),
?ets_insert(mnesia_tid_locks, {Tid, Oid, {queued, Op}})
end.
-
+
%%%%%%%%%%%%%%%%%%%%%%%%%%%
%% Release of locks
@@ -530,20 +530,20 @@ release_lock({Tid, Oid, Op}) ->
rearrange_queue([{_Tid, {Tab, Key}, _} | Locks]) ->
if
- Key /= ?ALL->
- Queue =
- ets:lookup(mnesia_lock_queue, {Tab, ?ALL}) ++
+ Key /= ?ALL->
+ Queue =
+ ets:lookup(mnesia_lock_queue, {Tab, ?ALL}) ++
ets:lookup(mnesia_lock_queue, {Tab, Key}),
- case Queue of
- [] ->
+ case Queue of
+ [] ->
ok;
_ ->
Sorted = sort_queue(Queue),
try_waiters_obj(Sorted)
- end;
- true ->
+ end;
+ true ->
Pat = ?match_oid_lock_queue({Tab, '_'}),
- Queue = ?ets_match_object(mnesia_lock_queue, Pat),
+ Queue = ?ets_match_object(mnesia_lock_queue, Pat),
Sorted = sort_queue(Queue),
try_waiters_tab(Sorted)
end,
@@ -556,7 +556,7 @@ try_waiters_obj([W | Waiters]) ->
case try_waiter(W) of
queued ->
no;
- _ ->
+ _ ->
try_waiters_obj(Waiters)
end;
try_waiters_obj([]) ->
@@ -573,10 +573,10 @@ try_waiters_tab([W | Waiters]) ->
end;
Oid ->
case try_waiter(W) of
- queued ->
+ queued ->
Rest = key_delete_all(Oid, #queue.oid, Waiters),
try_waiters_tab(Rest);
- _ ->
+ _ ->
try_waiters_tab(Waiters)
end
end;
@@ -594,20 +594,20 @@ try_waiter(Oid, Op, SimpleOp, Lock, ReplyTo, Tid) ->
case can_lock(Tid, Lock, Oid, {queue, bad_luck}) of
yes ->
%% Delete from queue: Nice place for trace output
- ?ets_match_delete(mnesia_lock_queue,
+ ?ets_match_delete(mnesia_lock_queue,
#queue{oid=Oid, tid = Tid, op = Op,
pid = ReplyTo, lucky = '_'}),
- Reply = grant_lock(Tid, SimpleOp, Lock, Oid),
+ Reply = grant_lock(Tid, SimpleOp, Lock, Oid),
reply(ReplyTo,Reply),
locked;
{queue, _Why} ->
?dbg("Keep ~p ~p ~p ~p~n", [Tid, Oid, Lock, _Why]),
- queued; % Keep waiter in queue
+ queued; % Keep waiter in queue
{no, Lucky} ->
C = #cyclic{op = SimpleOp, lock = Lock, oid = Oid, lucky = Lucky},
verbose("** WARNING ** Restarted transaction, possible deadlock in lock queue ~w: cyclic = ~w~n",
[Tid, C]),
- ?ets_match_delete(mnesia_lock_queue,
+ ?ets_match_delete(mnesia_lock_queue,
#queue{oid=Oid, tid = Tid, op = Op,
pid = ReplyTo, lucky = '_'}),
Reply = {not_granted, C},
@@ -645,7 +645,7 @@ mnesia_down(N, Pending) ->
Pid ! {release_remote_non_pending, N, Pending}
end.
-%% Aquire a write lock, but do a read, used by
+%% Aquire a write lock, but do a read, used by
%% mnesia:wread/1
rwlock(Tid, Store, Oid) ->
@@ -718,7 +718,7 @@ sticky_rwlock(Tid, Store, Oid) ->
sticky_lock(Tid, Store, Oid, read_write).
sticky_lock(Tid, Store, {Tab, Key} = Oid, Lock) ->
- N = val({Tab, where_to_read}),
+ N = val({Tab, where_to_read}),
if
node() == N ->
case need_lock(Store, Tab, Key, write) of
@@ -805,9 +805,9 @@ sticky_wlock_table(Tid, Store, Tab) ->
%% aquire a wlock on Oid
%% We store a {Tabname, write, Tid} in all locktables
%% on all nodes containing a copy of Tabname
-%% We also store an item {{locks, Tab, Key}, write} in the
+%% We also store an item {{locks, Tab, Key}, write} in the
%% local store when we have aquired the lock.
-%%
+%%
wlock(Tid, Store, Oid) ->
wlock(Tid, Store, Oid, _CheckMajority = true).
@@ -845,10 +845,10 @@ wlock_no_exist(Tid, Store, Tab, Ns) ->
need_lock(Store, Tab, Key, LockPattern) ->
TabL = ?ets_match_object(Store, {{locks, Tab, ?ALL}, LockPattern}),
- if
+ if
TabL == [] ->
KeyL = ?ets_match_object(Store, {{locks, Tab, Key}, LockPattern}),
- if
+ if
KeyL == [] ->
yes;
true ->
@@ -865,7 +865,7 @@ del_debug() ->
erase(mnesia_wlock_nodes).
%% We first send lock request to the local node if it is part of the lockers
-%% then the first sorted node then to the rest of the lockmanagers on all
+%% then the first sorted node then to the rest of the lockmanagers on all
%% nodes holding a copy of the table
get_wlocks_on_nodes([Node | Tail], Orig, Store, Request, Oid) ->
@@ -875,18 +875,18 @@ get_wlocks_on_nodes([Node | Tail], Orig, Store, Request, Oid) ->
case node() of
Node -> %% Local done try one more
get_wlocks_on_nodes(Tail, Orig, Store, Request, Oid);
- _ -> %% The first succeded cont with the rest
+ _ -> %% The first succeded cont with the rest
get_wlocks_on_nodes(Tail, Store, Request),
receive_wlocks(Tail, Orig, Store, Oid)
end;
-get_wlocks_on_nodes([], Orig, _Store, _Request, _Oid) ->
+get_wlocks_on_nodes([], Orig, _Store, _Request, _Oid) ->
Orig.
get_wlocks_on_nodes([Node | Tail], Store, Request) ->
{?MODULE, Node} ! Request,
?ets_insert(Store,{nodes, Node}),
get_wlocks_on_nodes(Tail, Store, Request);
-get_wlocks_on_nodes([], _, _) ->
+get_wlocks_on_nodes([], _, _) ->
ok.
get_rwlocks_on_nodes([ReadNode|Tail], _Res, ReadNode, Store, Tid, Oid) ->
@@ -895,7 +895,7 @@ get_rwlocks_on_nodes([ReadNode|Tail], _Res, ReadNode, Store, Tid, Oid) ->
?ets_insert(Store, {nodes, ReadNode}),
Res = receive_wlocks([ReadNode], undefined, Store, Oid),
case node() of
- ReadNode ->
+ ReadNode ->
get_rwlocks_on_nodes(Tail, Res, ReadNode, Store, Tid, Oid);
_ ->
get_wlocks_on_nodes(Tail, Store, {self(), {write, Tid, Oid}}),
@@ -908,7 +908,7 @@ get_rwlocks_on_nodes([Node | Tail], Res, ReadNode, Store, Tid, Oid) ->
receive_wlocks([Node], undefined, Store, Oid),
if node() == Node ->
get_rwlocks_on_nodes(Tail, Res, ReadNode, Store, Tid, Oid);
- Res == rwlock -> %% Hmm
+ Res == rwlock -> %% Hmm
Rest = lists:delete(ReadNode, Tail),
Op2 = {self(), {read_write, Tid, Oid}},
{?MODULE, ReadNode} ! Op2,
@@ -944,8 +944,8 @@ receive_wlocks(Nodes = [This|Ns], Res, Store, Oid) ->
Tail = lists:delete(Node,Nodes),
Nonstuck = lists:delete(Sticky,Tail),
[?ets_insert(Store, {nodes, NSNode}) || NSNode <- Nonstuck],
- case lists:member(Sticky,Tail) of
- true ->
+ case lists:member(Sticky,Tail) of
+ true ->
sticky_flush(Nonstuck,Store),
receive_wlocks([Sticky], Res, Store, Oid);
false ->
@@ -957,7 +957,7 @@ receive_wlocks(Nodes = [This|Ns], Res, Store, Oid) ->
flush_remaining(Ns, This, Reason1)
end.
-sticky_flush([], _) ->
+sticky_flush([], _) ->
del_debug(),
ok;
sticky_flush(Ns=[Node | Tail], Store) ->
@@ -991,7 +991,7 @@ opt_lookup_in_client(lookup_in_client, Oid, Lock) ->
%% Table has been deleted from this node,
%% restart the transaction.
#cyclic{op = read, lock = Lock, oid = Oid, lucky = nowhere};
- Val ->
+ Val ->
Val
end;
opt_lookup_in_client(Val, _Oid, _Lock) ->
@@ -1000,8 +1000,8 @@ opt_lookup_in_client(Val, _Oid, _Lock) ->
return_granted_or_nodes({_, ?ALL} , Nodes) -> Nodes;
return_granted_or_nodes({?GLOBAL, _}, Nodes) -> Nodes;
return_granted_or_nodes(_ , _Nodes) -> granted.
-
-%% We store a {Tab, read, From} item in the
+
+%% We store a {Tab, read, From} item in the
%% locks table on the node where we actually do pick up the object
%% and we also store an item {lock, Oid, read} in our local store
%% so that we can release any locks we hold when we commit.
@@ -1059,9 +1059,9 @@ rlock_get_reply(Node, Store, Oid, {granted, V}) ->
?ets_insert(Store, {{locks, Tab, Key}, read}),
?ets_insert(Store, {nodes, Node}),
case opt_lookup_in_client(V, Oid, read) of
- C = #cyclic{} ->
+ C = #cyclic{} ->
mnesia:abort(C);
- Val ->
+ Val ->
Val
end;
rlock_get_reply(Node, Store, Oid, granted) ->
@@ -1079,7 +1079,7 @@ rlock_get_reply(Node, Store, Tab, {granted, V, RealKeys}) ->
rlock_get_reply(_Node, _Store, _Oid, {not_granted, Reason}) ->
exit({aborted, Reason});
-rlock_get_reply(_Node, Store, Oid, {switch, N2, Req}) ->
+rlock_get_reply(_Node, Store, Oid, {switch, N2, Req}) ->
?ets_insert(Store, {nodes, N2}),
{?MODULE, N2} ! Req,
rlock_get_reply(N2, Store, Oid, l_req_rec(N2, Store)).
@@ -1095,7 +1095,7 @@ ixrlock(Tid, Store, Tab, IxKey, Pos) ->
%%% Old code
%% R = l_request(Node, {ix_read, Tid, Tab, IxKey, Pos}, Store),
%% rlock_get_reply(Node, Store, Tab, R)
-
+
case need_lock(Store, Tab, ?ALL, read) of
no when Node =:= node() ->
ix_read_res(Tab,IxKey,Pos);
diff --git a/lib/mnesia/src/mnesia_log.erl b/lib/mnesia/src/mnesia_log.erl
index 94153473cb..378b82ab89 100644
--- a/lib/mnesia/src/mnesia_log.erl
+++ b/lib/mnesia/src/mnesia_log.erl
@@ -1,19 +1,19 @@
%%
%% %CopyrightBegin%
-%%
+%%
%% Copyright Ericsson AB 1996-2011. All Rights Reserved.
-%%
+%%
%% The contents of this file are subject to the Erlang Public License,
%% Version 1.1, (the "License"); you may not use this file except in
%% compliance with the License. You should have received a copy of the
%% Erlang Public License along with this software. If not, it can be
%% retrieved online at http://www.erlang.org/.
-%%
+%%
%% Software distributed under the License is distributed on an "AS IS"
%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
%% the License for the specific language governing rights and limitations
%% under the License.
-%%
+%%
%% %CopyrightEnd%
%%
@@ -180,8 +180,8 @@
view/1,
write_trans_log_header/0
]).
-
-
+
+
-compile({no_auto_import,[error/2]}).
-include("mnesia.hrl").
@@ -210,7 +210,7 @@ decision_tab_version() -> "1.0".
dcl_version() -> "1.0".
dcd_version() -> "1.0".
-
+
append(Log, Bin) when is_binary(Bin) ->
disk_log:balog(Log, Bin);
append(Log, Term) ->
@@ -218,9 +218,9 @@ append(Log, Term) ->
%% Synced append
sappend(Log, Bin) when is_binary(Bin) ->
- ok = disk_log:blog(Log, Bin);
+ ok = disk_log:blog(Log, Bin);
sappend(Log, Term) ->
- ok = disk_log:log(Log, Term).
+ ok = disk_log:log(Log, Term).
%% Write commit records to the latest_log
log(C) when C#commit.disc_copies == [],
@@ -283,7 +283,7 @@ previous_log_file() -> dir("PREVIOUS.LOG").
decision_log_file() -> dir(decision_log_name()).
decision_tab_file() -> dir(decision_tab_name()).
-
+
previous_decision_log_file() -> dir("PDECISION.LOG").
latest_log_name() -> "LATEST.LOG".
@@ -297,10 +297,10 @@ init() ->
true ->
Prev = previous_log_file(),
verify_no_exists(Prev),
-
+
Latest = latest_log_file(),
verify_no_exists(Latest),
-
+
Header = trans_log_header(),
open_log(latest_log, Header, Latest);
false ->
@@ -346,20 +346,20 @@ open_log(Name, Header, Fname, Exists, Repair, Mode) ->
write_header(Log, Header),
Log;
{repaired, Log, _Recover, BadBytes} ->
- mnesia_lib:important("Data may be missing, log ~p repaired: Lost ~p bytes~n",
+ mnesia_lib:important("Data may be missing, log ~p repaired: Lost ~p bytes~n",
[Fname, BadBytes]),
Log;
{error, Reason} when Repair == true ->
file:delete(Fname),
- mnesia_lib:important("Data may be missing, Corrupt logfile deleted: ~p, ~p ~n",
+ mnesia_lib:important("Data may be missing, Corrupt logfile deleted: ~p, ~p ~n",
[Fname, Reason]),
- %% Create a new
+ %% Create a new
open_log(Name, Header, Fname, false, false, read_write);
{error, Reason} ->
fatal("Cannot open log file ~p: ~p~n", [Fname, Reason])
end.
-write_header(Log, Header) ->
+write_header(Log, Header) ->
append(Log, Header).
write_trans_log_header() ->
@@ -376,12 +376,12 @@ stop() ->
close_log(Log) ->
%% io:format("mnesia_log:close_log ~p~n", [Log]),
%% io:format("mnesia_log:close_log ~p~n", [Log]),
- case disk_log:sync(Log) of
+ case disk_log:sync(Log) of
ok -> ok;
- {error, {read_only_mode, Log}} ->
+ {error, {read_only_mode, Log}} ->
ok;
- {error, Reason} ->
- mnesia_lib:important("Failed syncing ~p to_disk reason ~p ~n",
+ {error, Reason} ->
+ mnesia_lib:important("Failed syncing ~p to_disk reason ~p ~n",
[Log, Reason])
end,
mnesia_monitor:close_log(Log).
@@ -392,7 +392,7 @@ unsafe_close_log(Log) ->
purge_some_logs() ->
- mnesia_monitor:unsafe_close_log(latest_log),
+ mnesia_monitor:unsafe_close_log(latest_log),
file:delete(latest_log_file()),
file:delete(decision_tab_file()).
@@ -466,10 +466,10 @@ chunk_log(Log, Cont) ->
[Log, Reason]);
{C2, Chunk, _BadBytes} ->
%% Read_only case, should we warn about the bad log file?
- %% BUGBUG Should we crash if Repair == false ??
+ %% BUGBUG Should we crash if Repair == false ??
%% We got to check this !!
mnesia_lib:important("~p repaired, lost ~p bad bytes~n", [Log, _BadBytes]),
- {C2, Chunk};
+ {C2, Chunk};
Other ->
Other
end.
@@ -492,7 +492,7 @@ open_decision_log() ->
Latest = decision_log_file(),
open_log(decision_log, decision_log_header(), Latest),
start.
-
+
prepare_decision_log_dump() ->
Prev = previous_decision_log_file(),
prepare_decision_log_dump(exists(Prev), Prev).
@@ -586,11 +586,11 @@ view_file(C, Log) ->
eof;
{C2, Terms, _BadBytes} ->
dbg_out("Lost ~p bytes in ~p ~n", [_BadBytes, Log]),
- lists:foreach(fun(X) -> mnesia_lib:show("~p~n", [X]) end,
+ lists:foreach(fun(X) -> mnesia_lib:show("~p~n", [X]) end,
Terms),
view_file(C2, Log);
{C2, Terms} ->
- lists:foreach(fun(X) -> mnesia_lib:show("~p~n", [X]) end,
+ lists:foreach(fun(X) -> mnesia_lib:show("~p~n", [X]) end,
Terms),
view_file(C2, Log)
end.
@@ -655,7 +655,7 @@ check_backup_args([Arg | Tail], B) ->
check_backup_args([], B) ->
{ok, B}.
-check_backup_arg_type(Arg, B) ->
+check_backup_arg_type(Arg, B) ->
case Arg of
{scope, global} ->
B#backup_args{scope = global};
@@ -714,7 +714,7 @@ select_tables(AllTabs, B) ->
safe_write(B, []) ->
B;
-safe_write(B, Recs) ->
+safe_write(B, Recs) ->
safe_apply(B, write, [B#backup_args.opaque, Recs]).
backup_schema(B, Tabs) ->
@@ -754,7 +754,7 @@ abort_write(B, What, Args, Reason) ->
[Mod, abort_write, [Opaque], Other]),
throw({error, Reason})
end.
-
+
backup_tab(Tab, B) ->
Name = B#backup_args.name,
case mnesia_checkpoint:most_local_node(Name, Tab) of
@@ -768,7 +768,7 @@ backup_tab(Tab, B) ->
{error, Reason} ->
abort_write(B, {?MODULE, backup_tab}, [Tab, B], {error, Reason})
end.
-
+
tab_copier(Pid, B, Tab) when is_record(B, backup_args) ->
%% Intentional crash at exit
Name = B#backup_args.name,
@@ -829,7 +829,7 @@ handle_last(Pid, _Acc) ->
exit(normal).
iterate(B, Name, Tab, Pid, Source, Age, Pass, Acc) ->
- Fun =
+ Fun =
if
Pid == self() ->
RecName = val({Tab, record_name}),
@@ -874,7 +874,7 @@ tab_receiver(Pid, B, Tab, RecName, Slot) ->
Recs2 = rec_filter(B, Tab, RecName, Recs),
B2 = safe_write(B, Recs2),
tab_receiver(Pid, B2, Tab, RecName, Next);
-
+
{Pid, {last, {ok,_}}} ->
B;
@@ -885,7 +885,7 @@ tab_receiver(Pid, B, Tab, RecName, Slot) ->
Reason = {error, {"Tab copier crashed", {'EXIT', R}}},
abort_write(B, {?MODULE, remote_tab_sender}, [self(), B, Tab], Reason);
Msg ->
- R = {error, {"Tab receiver got unexpected msg", Msg}},
+ R = {error, {"Tab receiver got unexpected msg", Msg}},
abort_write(B, {?MODULE, remote_tab_sender}, [self(), B, Tab], R)
end.
@@ -910,9 +910,9 @@ ets2dcd(Tab, Ftype) ->
case Ftype of
dcd -> mnesia_lib:tab2dcd(Tab);
dmp -> mnesia_lib:tab2dmp(Tab)
- end,
+ end,
TmpF = mnesia_lib:tab2tmp(Tab),
- file:delete(TmpF),
+ file:delete(TmpF),
Log = open_log({Tab, ets2dcd}, dcd_log_header(), TmpF, false),
mnesia_lib:db_fixtable(ram_copies, Tab, true),
ok = ets2dcd(mnesia_lib:db_init_chunk(ram_copies, Tab, 1000), Tab, Log),
@@ -926,8 +926,8 @@ ets2dcd(Tab, Ftype) ->
ets2dcd('$end_of_table', _Tab, _Log) ->
ok;
-ets2dcd({Recs, Cont}, Tab, Log) ->
- ok = disk_log:alog_terms(Log, Recs),
+ets2dcd({Recs, Cont}, Tab, Log) ->
+ ok = disk_log:alog_terms(Log, Recs),
ets2dcd(mnesia_lib:db_chunk(ram_copies, Cont), Tab, Log).
dcd2ets(Tab) ->
@@ -937,12 +937,12 @@ dcd2ets(Tab, Rep) ->
Dcd = mnesia_lib:tab2dcd(Tab),
case mnesia_lib:exists(Dcd) of
true ->
- Log = open_log({Tab, dcd2ets}, dcd_log_header(), Dcd,
+ Log = open_log({Tab, dcd2ets}, dcd_log_header(), Dcd,
true, Rep, read_only),
Data = chunk_log(Log, start),
ok = insert_dcdchunk(Data, Log, Tab),
close_log(Log),
- load_dcl(Tab, Rep);
+ load_dcl(Tab, Rep);
false -> %% Handle old dets files, and conversion from disc_only to disc.
Fname = mnesia_lib:tab2dat(Tab),
Type = val({Tab, setorbag}),
@@ -956,13 +956,13 @@ dcd2ets(Tab, Rep) ->
end
end.
-insert_dcdchunk({Cont, [LogH | Rest]}, Log, Tab)
- when is_record(LogH, log_header),
- LogH#log_header.log_kind == dcd_log,
- LogH#log_header.log_version >= "1.0" ->
- insert_dcdchunk({Cont, Rest}, Log, Tab);
+insert_dcdchunk({Cont, [LogH | Rest]}, Log, Tab)
+ when is_record(LogH, log_header),
+ LogH#log_header.log_kind == dcd_log,
+ LogH#log_header.log_version >= "1.0" ->
+ insert_dcdchunk({Cont, Rest}, Log, Tab);
-insert_dcdchunk({Cont, Recs}, Log, Tab) ->
+insert_dcdchunk({Cont, Recs}, Log, Tab) ->
true = ets:insert(Tab, Recs),
insert_dcdchunk(chunk_log(Log, Cont), Log, Tab);
insert_dcdchunk(eof, _Log, _Tab) ->
@@ -971,13 +971,13 @@ insert_dcdchunk(eof, _Log, _Tab) ->
load_dcl(Tab, Rep) ->
FName = mnesia_lib:tab2dcl(Tab),
case mnesia_lib:exists(FName) of
- true ->
+ true ->
Name = {load_dcl,Tab},
- open_log(Name,
- dcl_log_header(),
- FName,
+ open_log(Name,
+ dcl_log_header(),
+ FName,
true,
- Rep,
+ Rep,
read_only),
FirstChunk = chunk_log(Name, start),
N = insert_logchunk(FirstChunk, Name, 0),
@@ -1015,10 +1015,10 @@ add_recs([{{Tab, Key}, Val, update_counter} | Rest], N) ->
true = ets:insert(Tab, Zero)
end,
add_recs(Rest, N+1);
-add_recs([LogH|Rest], N)
- when is_record(LogH, log_header),
- LogH#log_header.log_kind == dcl_log,
- LogH#log_header.log_version >= "1.0" ->
+add_recs([LogH|Rest], N)
+ when is_record(LogH, log_header),
+ LogH#log_header.log_kind == dcl_log,
+ LogH#log_header.log_version >= "1.0" ->
add_recs(Rest, N);
add_recs([{{Tab, _Key}, _Val, clear_table} | Rest], N) ->
Size = ets:info(Tab, size),
diff --git a/lib/mnesia/src/mnesia_tm.erl b/lib/mnesia/src/mnesia_tm.erl
index f62f7cb7c8..0af7f55c06 100644
--- a/lib/mnesia/src/mnesia_tm.erl
+++ b/lib/mnesia/src/mnesia_tm.erl
@@ -36,7 +36,7 @@
prepare_checkpoint/2,
prepare_checkpoint/1, % Internal
prepare_snmp/3,
- do_snmp/2,
+ do_snmp/2,
put_activity_id/1,
put_activity_id/2,
block_tab/1,
@@ -68,7 +68,7 @@
majority = []
}).
--record(participant, {tid, pid, commit, disc_nodes = [],
+-record(participant, {tid, pid, commit, disc_nodes = [],
ram_nodes = [], protocol = sym_trans}).
start() ->
@@ -77,12 +77,12 @@ start() ->
init(Parent) ->
register(?MODULE, self()),
process_flag(trap_exit, true),
-
+
%% Initialize the schema
IgnoreFallback = mnesia_monitor:get_env(ignore_fallback_at_startup),
mnesia_bup:tm_fallback_start(IgnoreFallback),
mnesia_schema:init(IgnoreFallback),
-
+
%% Handshake and initialize transaction recovery
mnesia_recover:init(),
Early = mnesia_monitor:init(),
@@ -101,11 +101,11 @@ init(Parent) ->
false ->
ignore
end,
-
+
mnesia_schema:purge_tmp_files(),
mnesia_recover:start_garb(),
-
- ?eval_debug_fun({?MODULE, init}, [{nodes, AllOthers}]),
+
+ ?eval_debug_fun({?MODULE, init}, [{nodes, AllOthers}]),
case val(debug) of
Debug when Debug /= debug, Debug /= trace ->
@@ -118,8 +118,8 @@ init(Parent) ->
val(Var) ->
case ?catch_val(Var) of
- {'EXIT', _ReASoN_} -> mnesia_lib:other_val(Var, _ReASoN_);
- _VaLuE_ -> _VaLuE_
+ {'EXIT', _ReASoN_} -> mnesia_lib:other_val(Var, _ReASoN_);
+ _VaLuE_ -> _VaLuE_
end.
reply({From,Ref}, R) ->
@@ -136,7 +136,7 @@ req(R) ->
undefined ->
{error, {node_not_running, node()}};
Pid ->
- Ref = make_ref(),
+ Ref = make_ref(),
Pid ! {{self(), Ref}, R},
rec(Pid, Ref)
end.
@@ -161,7 +161,7 @@ rec(Pid, Ref) ->
Reply;
{'EXIT', Pid, _} ->
{error, {node_not_running, node()}}
- end.
+ end.
tmlink({From, Ref}) when is_reference(Ref) ->
link(From);
@@ -209,7 +209,7 @@ doit_loop(#state{coordinators=Coordinators,participants=Participants,supervisor=
State2 = State#state{dirty_queue = [Item | State#state.dirty_queue]},
doit_loop(State2)
end;
-
+
{From, {sync_dirty, Tid, Commit, Tab}} ->
case lists:member(Tab, State#state.blocked_tabs) of
false ->
@@ -220,7 +220,7 @@ doit_loop(#state{coordinators=Coordinators,participants=Participants,supervisor=
State2 = State#state{dirty_queue = [Item | State#state.dirty_queue]},
doit_loop(State2)
end;
-
+
{From, start_outer} -> %% Create and associate ets_tab with Tid
case catch ?ets_new_table(mnesia_trans_store, [bag, public]) of
{'EXIT', Reason} -> %% system limit
@@ -236,16 +236,16 @@ doit_loop(#state{coordinators=Coordinators,participants=Participants,supervisor=
S2 = State#state{coordinators = A2},
reply(From, {new_tid, Tid, Etab}, S2)
end;
-
+
{From, {ask_commit, Protocol, Tid, Commit, DiscNs, RamNs}} ->
- ?eval_debug_fun({?MODULE, doit_ask_commit},
+ ?eval_debug_fun({?MODULE, doit_ask_commit},
[{tid, Tid}, {prot, Protocol}]),
mnesia_checkpoint:tm_enter_pending(Tid, DiscNs, RamNs),
- Pid =
+ Pid =
case Protocol of
asym_trans when node(Tid#tid.pid) /= node() ->
Args = [tmpid(From), Tid, Commit, DiscNs, RamNs],
- spawn_link(?MODULE, commit_participant, Args);
+ spawn_link(?MODULE, commit_participant, Args);
_ when node(Tid#tid.pid) /= node() -> %% *_sym_trans
reply(From, {vote_yes, Tid}),
nopid
@@ -258,7 +258,7 @@ doit_loop(#state{coordinators=Coordinators,participants=Participants,supervisor=
protocol = Protocol},
State2 = State#state{participants = gb_trees:insert(Tid,P,Participants)},
doit_loop(State2);
-
+
{Tid, do_commit} ->
case gb_trees:lookup(Tid, Participants) of
none ->
@@ -272,14 +272,14 @@ doit_loop(#state{coordinators=Coordinators,participants=Participants,supervisor=
Member = lists:member(node(), P#participant.disc_nodes),
if Member == false ->
ignore;
- P#participant.protocol == sym_trans ->
+ P#participant.protocol == sym_trans ->
mnesia_log:log(Commit);
- P#participant.protocol == sync_sym_trans ->
+ P#participant.protocol == sync_sym_trans ->
mnesia_log:slog(Commit)
end,
mnesia_recover:note_decision(Tid, committed),
do_commit(Tid, Commit),
- if
+ if
P#participant.protocol == sync_sym_trans ->
Tid#tid.pid ! {?MODULE, node(), {committed, Tid}};
true ->
@@ -296,13 +296,13 @@ doit_loop(#state{coordinators=Coordinators,participants=Participants,supervisor=
doit_loop(State)
end
end;
-
+
{Tid, simple_commit} ->
mnesia_recover:note_decision(Tid, committed),
mnesia_locker:release_tid(Tid),
transaction_terminated(Tid),
doit_loop(State);
-
+
{Tid, {do_abort, Reason}} ->
?eval_debug_fun({?MODULE, do_abort, pre}, [{tid, Tid}]),
case gb_trees:lookup(Tid, Participants) of
@@ -317,7 +317,7 @@ doit_loop(#state{coordinators=Coordinators,participants=Participants,supervisor=
Commit = P#participant.commit,
mnesia_recover:note_decision(Tid, aborted),
do_abort(Tid, Commit),
- if
+ if
P#participant.protocol == sync_sym_trans ->
Tid#tid.pid ! {?MODULE, node(), {aborted, Tid}};
true ->
@@ -335,7 +335,7 @@ doit_loop(#state{coordinators=Coordinators,participants=Participants,supervisor=
doit_loop(State)
end
end;
-
+
{From, {add_store, Tid}} -> %% new store for nested transaction
case catch ?ets_new_table(mnesia_trans_store, [bag, public]) of
{'EXIT', Reason} -> %% system limit
@@ -355,14 +355,14 @@ doit_loop(#state{coordinators=Coordinators,participants=Participants,supervisor=
{'EXIT', Pid, Reason} ->
handle_exit(Pid, Reason, State);
-
+
{From, {restart, Tid, Store}} ->
A2 = restore_stores(Coordinators, Tid, Store),
clear_fixtable([Store]),
?ets_match_delete(Store, '_'),
?ets_insert(Store, {nodes, node()}),
reply(From, {restarted, Tid}, State#state{coordinators = A2});
-
+
{delete_transaction, Tid} ->
%% used to clear transactions which are committed
%% in coordinator or participant processes
@@ -377,7 +377,7 @@ doit_loop(#state{coordinators=Coordinators,participants=Participants,supervisor=
clear_fixtable(Etabs),
erase_ets_tabs(Etabs),
transaction_terminated(Tid),
- doit_loop(State#state{coordinators =
+ doit_loop(State#state{coordinators =
gb_trees:delete(Tid,Coordinators)})
end;
true ->
@@ -385,20 +385,20 @@ doit_loop(#state{coordinators=Coordinators,participants=Participants,supervisor=
State2 = State#state{participants=gb_trees:delete(Tid,Participants)},
doit_loop(State2)
end;
-
+
{sync_trans_serial, Tid} ->
%% Do the Lamport thing here
mnesia_recover:sync_trans_tid_serial(Tid),
doit_loop(State);
-
+
{From, info} ->
- reply(From, {info, gb_trees:values(Participants),
+ reply(From, {info, gb_trees:values(Participants),
gb_trees:to_list(Coordinators)}, State);
-
+
{mnesia_down, N} ->
verbose("Got mnesia_down from ~p, reconfiguring...~n", [N]),
reconfigure_coordinators(N, gb_trees:to_list(Coordinators)),
-
+
Tids = gb_trees:keys(Participants),
reconfigure_participants(N, gb_trees:values(Participants)),
NewState = clear_fixtable(N, State),
@@ -408,34 +408,34 @@ doit_loop(#state{coordinators=Coordinators,participants=Participants,supervisor=
{From, {unblock_me, Tab}} ->
case lists:member(Tab, State#state.blocked_tabs) of
false ->
- verbose("Wrong dirty Op blocked on ~p ~p ~p",
+ verbose("Wrong dirty Op blocked on ~p ~p ~p",
[node(), Tab, From]),
reply(From, unblocked),
doit_loop(State);
true ->
- Item = {Tab, unblock_me, From},
+ Item = {Tab, unblock_me, From},
State2 = State#state{dirty_queue = [Item | State#state.dirty_queue]},
doit_loop(State2)
- end;
-
+ end;
+
{From, {block_tab, Tab}} ->
State2 = State#state{blocked_tabs = [Tab | State#state.blocked_tabs]},
reply(From, ok, State2);
-
+
{From, {unblock_tab, Tab}} ->
BlockedTabs2 = State#state.blocked_tabs -- [Tab],
case lists:member(Tab, BlockedTabs2) of
false ->
mnesia_controller:unblock_table(Tab),
Queue = process_dirty_queue(Tab, State#state.dirty_queue),
- State2 = State#state{blocked_tabs = BlockedTabs2,
+ State2 = State#state{blocked_tabs = BlockedTabs2,
dirty_queue = Queue},
reply(From, ok, State2);
true ->
State2 = State#state{blocked_tabs = BlockedTabs2},
reply(From, ok, State2)
end;
-
+
{From, {prepare_checkpoint, Cp}} ->
Res = mnesia_checkpoint:tm_prepare(Cp),
case Res of
@@ -448,18 +448,18 @@ doit_loop(#state{coordinators=Coordinators,participants=Participants,supervisor=
reply(From, Res, State);
{From, {fixtable, [Tab,Lock,Requester]}} ->
case ?catch_val({Tab, storage_type}) of
- {'EXIT', _} ->
+ {'EXIT', _} ->
reply(From, error, State);
Storage ->
mnesia_lib:db_fixtable(Storage,Tab,Lock),
NewState = manage_fixtable(Tab,Lock,Requester,State),
reply(From, node(), NewState)
end;
-
+
{system, From, Msg} ->
dbg_out("~p got {system, ~p, ~p}~n", [?MODULE, From, Msg]),
sys:handle_system_msg(Msg, From, Sup, ?MODULE, [], State);
-
+
Msg ->
verbose("** ERROR ** ~p got unexpected message: ~p~n", [?MODULE, Msg]),
doit_loop(State)
@@ -508,7 +508,7 @@ prepare_pending_coordinators([{Tid, [Store | _Etabs]} | Coords], IgnoreNew) ->
ignore
end,
prepare_pending_coordinators(Coords, IgnoreNew);
- {'EXIT', _} ->
+ {'EXIT', _} ->
prepare_pending_coordinators(Coords, IgnoreNew)
end;
prepare_pending_coordinators([], _IgnoreNew) ->
@@ -538,7 +538,7 @@ handle_exit(Pid, _Reason, State) when Pid == State#state.supervisor ->
handle_exit(Pid, Reason, State) ->
%% Check if it is a coordinator
- case pid_search_delete(Pid, gb_trees:to_list(State#state.coordinators)) of
+ case pid_search_delete(Pid, gb_trees:to_list(State#state.coordinators)) of
{none, _} ->
%% Check if it is a participant
Ps = gb_trees:values(State#state.participants),
@@ -552,9 +552,9 @@ handle_exit(Pid, Reason, State) ->
NewPs = gb_trees:delete(P#participant.tid,State#state.participants),
doit_loop(State#state{participants = NewPs})
end;
-
+
{{Tid, Etabs}, RestC} ->
- %% A local coordinator has died and
+ %% A local coordinator has died and
%% we must determine the outcome of the
%% transaction and tell mnesia_tm on the
%% other nodes about it and then recover
@@ -578,7 +578,7 @@ recover_coordinator(Tid, Etabs) ->
%% Tell the participants about the outcome
Protocol = Prep#prep.protocol,
Outcome = tell_outcome(Tid, Protocol, node(), CheckNodes, TellNodes),
-
+
%% Recover locally
CR = Prep#prep.records,
{DiscNs, RamNs} = commit_nodes(CR, [], []),
@@ -589,7 +589,7 @@ recover_coordinator(Tid, Etabs) ->
recover_coordinator(Tid, Protocol, Outcome, Local, DiscNs, RamNs),
?eval_debug_fun({?MODULE, recover_coordinator, post},
[{tid, Tid}, {outcome, Outcome}, {prot, Protocol}]);
- false -> %% When killed before store havn't been copied to
+ false -> %% When killed before store havn't been copied to
ok %% to the new nested trans store.
end
end,
@@ -610,12 +610,12 @@ recover_coordinator(Tid, sync_sym_trans, aborted, _Local, _, _) ->
recover_coordinator(Tid, asym_trans, committed, Local, DiscNs, RamNs) ->
D = #decision{tid = Tid, outcome = committed,
- disc_nodes = DiscNs, ram_nodes = RamNs},
+ disc_nodes = DiscNs, ram_nodes = RamNs},
mnesia_recover:log_decision(D),
do_commit(Tid, Local);
recover_coordinator(Tid, asym_trans, aborted, Local, DiscNs, RamNs) ->
D = #decision{tid = Tid, outcome = aborted,
- disc_nodes = DiscNs, ram_nodes = RamNs},
+ disc_nodes = DiscNs, ram_nodes = RamNs},
mnesia_recover:log_decision(D),
do_abort(Tid, Local).
@@ -631,7 +631,7 @@ add_coord_store(Coords, Tid, Etab) ->
del_coord_store(Coords, Tid, Current, Obsolete) ->
Stores = gb_trees:get(Tid, Coords),
- Rest =
+ Rest =
case Stores of
[Obsolete, Current | Tail] -> Tail;
[Current, Obsolete | Tail] -> Tail
@@ -642,14 +642,14 @@ del_coord_store(Coords, Tid, Current, Obsolete) ->
erase_ets_tabs([H | T]) ->
?ets_delete_table(H),
erase_ets_tabs(T);
-erase_ets_tabs([]) ->
+erase_ets_tabs([]) ->
ok.
%% Clear one transactions all fixtables
clear_fixtable([Store|_]) ->
Fixed = get_elements(fixtable, Store),
lists:foreach(fun({Tab,Node}) ->
- rpc:cast(Node, ?MODULE, fixtable, [Tab,false,self()])
+ rpc:cast(Node, ?MODULE, fixtable, [Tab,false,self()])
end, Fixed).
%% Clear all fixtable Node have done
@@ -661,7 +661,7 @@ clear_fixtable(Node, State=#state{fixed_tabs = FT0}) ->
lists:foreach(
fun(Tab) ->
case ?catch_val({Tab, storage_type}) of
- {'EXIT', _} ->
+ {'EXIT', _} ->
ignore;
Storage ->
mnesia_lib:db_fixtable(Storage,Tab,false)
@@ -680,9 +680,9 @@ manage_fixtable(Tab,true,Requester,State=#state{fixed_tabs = FT0}) ->
end;
manage_fixtable(Tab,false,Requester,State = #state{fixed_tabs = FT0}) ->
Node = node(Requester),
- case mnesia_lib:key_search_delete(Node, 1, FT0) of
+ case mnesia_lib:key_search_delete(Node, 1, FT0) of
{none,_FT} -> State; % Hmm? Safeguard
- {{Node, Tabs0},FT} ->
+ {{Node, Tabs0},FT} ->
case lists:delete(Tab, Tabs0) of
[] -> State#state{fixed_tabs=FT};
Tabs -> State#state{fixed_tabs=[{Node,Tabs}|FT]}
@@ -691,7 +691,7 @@ manage_fixtable(Tab,false,Requester,State = #state{fixed_tabs = FT0}) ->
%% Deletes a pid from a list of participants
%% or from a gb_trees of coordinators
-%% {none, All} or {Tr, Rest}
+%% {none, All} or {Tr, Rest}
pid_search_delete(Pid, Trs) ->
pid_search_delete(Pid, Trs, none, []).
pid_search_delete(Pid, [Tr = {Tid, _Ts} | Trs], _Val, Ack) when Tid#tid.pid == Pid ->
@@ -701,7 +701,7 @@ pid_search_delete(Pid, [Tr | Trs], Val, Ack) ->
pid_search_delete(_Pid, [], Val, Ack) ->
{Val, gb_trees:from_orddict(lists:reverse(Ack))}.
-
+
transaction_terminated(Tid) ->
mnesia_checkpoint:tm_exit_pending(Tid),
Pid = Tid#tid.pid,
@@ -713,14 +713,14 @@ transaction_terminated(Tid) ->
end.
%% If there are an surrounding transaction, we inherit it's context
-non_transaction(OldState={_,_,Trans}, Fun, Args, ActivityKind, Mod)
+non_transaction(OldState={_,_,Trans}, Fun, Args, ActivityKind, Mod)
when Trans /= non_transaction ->
- Kind = case ActivityKind of
+ Kind = case ActivityKind of
sync_dirty -> sync;
_ -> async
end,
case transaction(OldState, Fun, Args, infinity, Mod, Kind) of
- {atomic, Res} ->
+ {atomic, Res} ->
Res;
{aborted,Res} ->
exit(Res)
@@ -766,7 +766,7 @@ transaction(OldTidTs, Fun, Args, Retries, Mod, Type) ->
execute_outer(Mod, Fun, Args, Factor, Retries, Type) ->
case req(start_outer) of
- {error, Reason} ->
+ {error, Reason} ->
{aborted, Reason};
{new_tid, Tid, Store} ->
Ts = #tidstore{store = Store},
@@ -792,7 +792,7 @@ execute_inner(Mod, Tid, OldMod, Ts, Fun, Args, Factor, Retries, Type) ->
copy_ets(From, To) ->
do_copy_ets(?ets_first(From), From, To).
-do_copy_ets('$end_of_table', _,_) ->
+do_copy_ets('$end_of_table', _,_) ->
ok;
do_copy_ets(K, From, To) ->
Objs = ?ets_lookup(From, K),
@@ -813,7 +813,7 @@ execute_transaction(Fun, Args, Factor, Retries, Type) ->
mnesia_lib:incr_counter(trans_commits),
erase(mnesia_activity_state),
%% no need to clear locks, already done by commit ...
- %% Flush any un processed mnesia_down messages we might have
+ %% Flush any un processed mnesia_down messages we might have
flush_downs(),
catch unlink(whereis(?MODULE)),
{atomic, Value};
@@ -846,7 +846,7 @@ check_exit(Fun, Args, Factor, Retries, Reason, Type) ->
maybe_restart(Fun, Args, Factor, Retries, Type, {node_not_running, N});
{aborted, {bad_commit, N}} ->
maybe_restart(Fun, Args, Factor, Retries, Type, {bad_commit, N});
- _ ->
+ _ ->
return_abort(Fun, Args, Reason)
end.
@@ -888,11 +888,11 @@ restart(Mod, Tid, Ts, Fun, Args, Factor0, Retries0, Type, Why) ->
SleepTime = mnesia_lib:random_time(Factor, Tid#tid.counter),
dbg_out("Restarting transaction ~w: in ~wms ~w~n", [Tid, SleepTime, Why]),
timer:sleep(SleepTime),
- execute_outer(Mod, Fun, Args, Factor, Retries, Type);
+ execute_outer(Mod, Fun, Args, Factor, Retries, Type);
_ ->
SleepTime = mnesia_lib:random_time(Factor0, Tid#tid.counter),
dbg_out("Restarting transaction ~w: in ~wms ~w~n", [Tid, SleepTime, Why]),
-
+
if
Factor0 /= 10 ->
ignore;
@@ -911,7 +911,7 @@ restart(Mod, Tid, Ts, Fun, Args, Factor0, Retries0, Type, Why) ->
mnesia_locker:receive_release_tid_acc(Nodes, Tid),
case get_restarted(Tid) of
{restarted, Tid} ->
- execute_transaction(Fun, Args, Factor0 + 1,
+ execute_transaction(Fun, Args, Factor0 + 1,
Retries, Type);
{error, Reason} ->
mnesia:abort(Reason)
@@ -934,7 +934,7 @@ decr(_X) -> 0.
return_abort(Fun, Args, Reason) ->
{_Mod, Tid, Ts} = get(mnesia_activity_state),
- dbg_out("Transaction ~p calling ~p with ~p failed: ~n ~p~n",
+ dbg_out("Transaction ~p calling ~p with ~p failed: ~n ~p~n",
[Tid, Fun, Args, Reason]),
OldStore = Ts#tidstore.store,
Nodes = get_elements(nodes, OldStore),
@@ -945,7 +945,7 @@ return_abort(Fun, Args, Reason) ->
Level == 1 ->
mnesia_locker:async_release_tid(Nodes, Tid),
?MODULE ! {delete_transaction, Tid},
- erase(mnesia_activity_state),
+ erase(mnesia_activity_state),
flush_downs(),
catch unlink(whereis(?MODULE)),
{aborted, mnesia_lib:fix_error(Reason)};
@@ -958,14 +958,14 @@ return_abort(Fun, Args, Reason) ->
level = Level - 1},
NewTidTs = {OldMod, Tid, Ts2},
put(mnesia_activity_state, NewTidTs),
- case Reason of
+ case Reason of
#cyclic{} ->
exit({aborted, Reason});
- {node_not_running, _N} ->
+ {node_not_running, _N} ->
exit({aborted, Reason});
- {bad_commit, _N}->
+ {bad_commit, _N}->
exit({aborted, Reason});
- _ ->
+ _ ->
{aborted, mnesia_lib:fix_error(Reason)}
end
end.
@@ -982,10 +982,10 @@ put_activity_id(MTT) ->
put_activity_id(MTT, undefined).
put_activity_id(undefined,_) ->
erase_activity_id();
-put_activity_id({Mod, Tid = #tid{}, Ts = #tidstore{}},Fun) ->
+put_activity_id({Mod, Tid = #tid{}, Ts = #tidstore{}},Fun) ->
flush_downs(),
Store = Ts#tidstore.store,
- if
+ if
is_function(Fun) ->
?ets_insert(Store, {friends, {stop,Fun}});
true ->
@@ -1000,14 +1000,14 @@ erase_activity_id() ->
flush_downs(),
erase(mnesia_activity_state).
-get_elements(Type,Store) ->
+get_elements(Type,Store) ->
case catch ?ets_lookup(Store, Type) of
[] -> [];
[{_,Val}] -> [Val];
{'EXIT', _} -> [];
Vals -> [Val|| {_,Val} <- Vals]
end.
-
+
opt_propagate_store(_Current, _Obsolete, false) ->
ok;
opt_propagate_store(Current, Obsolete, true) ->
@@ -1030,8 +1030,8 @@ intercept_best_friend([],_) -> ok;
intercept_best_friend([{stop,Fun} | R],Ignore) ->
catch Fun(),
intercept_best_friend(R,Ignore);
-intercept_best_friend([Pid | R],false) ->
- Pid ! {activity_ended, undefined, self()},
+intercept_best_friend([Pid | R],false) ->
+ Pid ! {activity_ended, undefined, self()},
wait_for_best_friend(Pid, 0),
intercept_best_friend(R,true);
intercept_best_friend([_|R],true) ->
@@ -1047,18 +1047,18 @@ wait_for_best_friend(Pid, Timeout) ->
false -> ok
end
end.
-
+
my_process_is_alive(Pid) ->
case catch erlang:is_process_alive(Pid) of % New BIF in R5
- true ->
+ true ->
true;
- false ->
+ false ->
false;
- {'EXIT', _} -> % Pre R5 backward compatibility
+ {'EXIT', _} -> % Pre R5 backward compatibility
case process_info(Pid, message_queue_len) of
undefined -> false;
_ -> true
- end
+ end
end.
dirty(Protocol, Item) ->
@@ -1070,12 +1070,12 @@ dirty(Protocol, Item) ->
async_dirty ->
%% Send commit records to the other involved nodes,
%% but do only wait for one node to complete.
- %% Preferrably, the local node if possible.
-
+ %% Preferrably, the local node if possible.
+
ReadNode = val({Tab, where_to_read}),
{WaitFor, FirstRes} = async_send_dirty(Tid, CR, Tab, ReadNode),
rec_dirty(WaitFor, FirstRes);
-
+
sync_dirty ->
%% Send commit records to the other involved nodes,
%% and wait for all nodes to complete
@@ -1097,7 +1097,7 @@ t_commit(Type) ->
if
Ts#tidstore.level == 1 ->
intercept_friends(Tid, Ts),
- %% N is number of updates
+ %% N is number of updates
case arrange(Tid, Store, Type) of
{N, Prep} when N > 0 ->
multi_commit(Prep#prep.protocol,
@@ -1135,8 +1135,8 @@ arrange(Tid, Store, Type) ->
Recs = prep_recs(Nodes, []),
Key = ?ets_first(Store),
N = 0,
- Prep =
- case Type of
+ Prep =
+ case Type of
async -> #prep{protocol = sym_trans, records = Recs};
sync -> #prep{protocol = sync_sym_trans, records = Recs}
end,
@@ -1146,7 +1146,7 @@ arrange(Tid, Store, Type) ->
case Reason of
{aborted, R} ->
mnesia:abort(R);
- _ ->
+ _ ->
mnesia:abort(Reason)
end;
{New, Prepared} ->
@@ -1155,7 +1155,7 @@ arrange(Tid, Store, Type) ->
reverse([]) ->
[];
-reverse([H=#commit{ram_copies=Ram, disc_copies=DC,
+reverse([H=#commit{ram_copies=Ram, disc_copies=DC,
disc_only_copies=DOC,snmp = Snmp}
|R]) ->
[
@@ -1164,7 +1164,7 @@ reverse([H=#commit{ram_copies=Ram, disc_copies=DC,
disc_copies = lists:reverse(DC),
disc_only_copies = lists:reverse(DOC),
snmp = lists:reverse(Snmp)
- }
+ }
| reverse(R)].
prep_recs([N | Nodes], Recs) ->
@@ -1191,7 +1191,7 @@ do_arrange(Tid, Store, RestoreKey, Prep, N) when RestoreKey == restore_op ->
(BupRec, CommitRecs, RecName, Where, Snmp) ->
Tab = element(1, BupRec),
Key = element(2, BupRec),
- Item =
+ Item =
if
Tab == RecName ->
[{{Tab, Key}, BupRec, write}];
@@ -1200,7 +1200,7 @@ do_arrange(Tid, Store, RestoreKey, Prep, N) when RestoreKey == restore_op ->
[{{Tab, Key}, BupRec2, write}]
end,
do_prepare_items(Tid, Tab, Key, Where, Snmp, Item, CommitRecs)
- end,
+ end,
Recs2 = mnesia_schema:arrange_restore(R, Fun, Prep#prep.records),
P2 = Prep#prep{protocol = asym_trans, records = Recs2},
do_arrange(Tid, Store, ?ets_next(Store, RestoreKey), P2, N + 1);
@@ -1222,20 +1222,20 @@ prepare_items(Tid, Tab, Key, Items, Prep) when Prep#prep.prev_tab == Tab ->
Recs = Prep#prep.records,
Recs2 = do_prepare_items(Tid, Tab, Key, Types, Snmp, Items, Recs),
Prep#prep{records = Recs2};
-
+
prepare_items(Tid, Tab, Key, Items, Prep) ->
Types = val({Tab, where_to_commit}),
case Types of
[] -> mnesia:abort({no_exists, Tab});
- {blocked, _} ->
+ {blocked, _} ->
unblocked = req({unblock_me, Tab}),
prepare_items(Tid, Tab, Key, Items, Prep);
_ ->
Majority = needs_majority(Tab, Prep),
Snmp = val({Tab, snmp}),
- Recs2 = do_prepare_items(Tid, Tab, Key, Types,
+ Recs2 = do_prepare_items(Tid, Tab, Key, Types,
Snmp, Items, Prep#prep.records),
- Prep2 = Prep#prep{records = Recs2, prev_tab = Tab,
+ Prep2 = Prep#prep{records = Recs2, prev_tab = Tab,
majority = Majority,
prev_types = Types, prev_snmp = Snmp},
check_prep(Prep2, Types)
@@ -1273,7 +1273,7 @@ have_majority([{Tab, AllNodes} | Rest], Nodes) ->
end.
prepare_snmp(Tab, Key, Items) ->
- case val({Tab, snmp}) of
+ case val({Tab, snmp}) of
[] ->
[];
Ustruct when Key /= '_' ->
@@ -1286,10 +1286,10 @@ prepare_snmp(Tab, Key, Items) ->
[{clear_table, Tab}]
end.
-prepare_snmp(_Tid, _Tab, _Key, _Types, [], _Items, Recs) ->
+prepare_snmp(_Tid, _Tab, _Key, _Types, [], _Items, Recs) ->
Recs;
-prepare_snmp(Tid, Tab, Key, Types, Us, Items, Recs) ->
+prepare_snmp(Tid, Tab, Key, Types, Us, Items, Recs) ->
if Key /= '_' ->
{_Oid, _Val, Op} = hd(Items),
SnmpOid = mnesia_snmp_hook:key_to_oid(Tab, Key, Us), % May exit
@@ -1334,7 +1334,7 @@ prepare_node(Node, Storage, [Item | Items], Rec, Kind) when Kind == snmp ->
Rec2 = Rec#commit{snmp = [Item | Rec#commit.snmp]},
prepare_node(Node, Storage, Items, Rec2, Kind);
prepare_node(Node, Storage, [Item | Items], Rec, Kind) when Kind /= schema ->
- Rec2 =
+ Rec2 =
case Storage of
ram_copies ->
Rec#commit{ram_copies = [Item | Rec#commit.ram_copies]};
@@ -1345,7 +1345,7 @@ prepare_node(Node, Storage, [Item | Items], Rec, Kind) when Kind /= schema ->
[Item | Rec#commit.disc_only_copies]}
end,
prepare_node(Node, Storage, Items, Rec2, Kind);
-prepare_node(_Node, _Storage, Items, Rec, Kind)
+prepare_node(_Node, _Storage, Items, Rec, Kind)
when Kind == schema, Rec#commit.schema_ops == [] ->
Rec#commit{schema_ops = Items};
prepare_node(_Node, _Storage, [], Rec, _Kind) ->
@@ -1354,7 +1354,7 @@ prepare_node(_Node, _Storage, [], Rec, _Kind) ->
%% multi_commit((Protocol, Tid, CommitRecords, Store)
%% Local work is always performed in users process
multi_commit(read_only, _Maj = [], Tid, CR, _Store) ->
- %% This featherweight commit protocol is used when no
+ %% This featherweight commit protocol is used when no
%% updates has been performed in the transaction.
{DiscNs, RamNs} = commit_nodes(CR, [], []),
@@ -1381,11 +1381,11 @@ multi_commit(sym_trans, _Maj = [], Tid, CR, Store) ->
%% perform the updates.
%%
%% The outcome is kept 3 minutes in the transient decision table.
- %%
+ %%
%% Recovery:
%% If somebody dies before the coordinator has
%% broadcasted do_commit, the transaction is aborted.
- %%
+ %%
%% If a participant dies, the table load algorithm
%% ensures that the contents of the involved tables
%% are picked from another node.
@@ -1394,15 +1394,15 @@ multi_commit(sym_trans, _Maj = [], Tid, CR, Store) ->
%% the outcome with all the others. If all are uncertain
%% about the outcome, the transaction is aborted. If
%% somebody knows the outcome the others will follow.
-
+
{DiscNs, RamNs} = commit_nodes(CR, [], []),
Pending = mnesia_checkpoint:tm_enter_pending(Tid, DiscNs, RamNs),
?ets_insert(Store, Pending),
{WaitFor, Local} = ask_commit(sym_trans, Tid, CR, DiscNs, RamNs),
- {Outcome, []} = rec_all(WaitFor, Tid, do_commit, []),
- ?eval_debug_fun({?MODULE, multi_commit_sym},
- [{tid, Tid}, {outcome, Outcome}]),
+ {Outcome, []} = rec_all(WaitFor, Tid, do_commit, []),
+ ?eval_debug_fun({?MODULE, multi_commit_sym},
+ [{tid, Tid}, {outcome, Outcome}]),
rpc:abcast(DiscNs -- [node()], ?MODULE, {Tid, Outcome}),
rpc:abcast(RamNs -- [node()], ?MODULE, {Tid, Outcome}),
case Outcome of
@@ -1422,15 +1422,15 @@ multi_commit(sync_sym_trans, _Maj = [], Tid, CR, Store) ->
%% This protocol is the same as sym_trans except that it
%% uses syncronized calls to disk_log and syncronized commits
%% when several nodes are involved.
-
+
{DiscNs, RamNs} = commit_nodes(CR, [], []),
Pending = mnesia_checkpoint:tm_enter_pending(Tid, DiscNs, RamNs),
?ets_insert(Store, Pending),
{WaitFor, Local} = ask_commit(sync_sym_trans, Tid, CR, DiscNs, RamNs),
- {Outcome, []} = rec_all(WaitFor, Tid, do_commit, []),
- ?eval_debug_fun({?MODULE, multi_commit_sym_sync},
- [{tid, Tid}, {outcome, Outcome}]),
+ {Outcome, []} = rec_all(WaitFor, Tid, do_commit, []),
+ ?eval_debug_fun({?MODULE, multi_commit_sym_sync},
+ [{tid, Tid}, {outcome, Outcome}]),
[?ets_insert(Store, {waiting_for_commit_ack, Node}) || Node <- WaitFor],
rpc:abcast(DiscNs -- [node()], ?MODULE, {Tid, Outcome}),
rpc:abcast(RamNs -- [node()], ?MODULE, {Tid, Outcome}),
@@ -1451,7 +1451,7 @@ multi_commit(sync_sym_trans, _Maj = [], Tid, CR, Store) ->
Outcome;
multi_commit(asym_trans, Majority, Tid, CR, Store) ->
- %% This more expensive commit protocol is used when
+ %% This more expensive commit protocol is used when
%% table definitions are changed (schema transactions).
%% It is also used when the involved tables are
%% replicated asymetrically. If the storage type differs
@@ -1462,14 +1462,14 @@ multi_commit(asym_trans, Majority, Tid, CR, Store) ->
%% commit record and votes yes or no depending of the
%% outcome of the prepare. The preparation is also performed
%% by the coordinator.
- %%
+ %%
%% 2a Somebody has died or voted no
%% Tell all yes voters to do_abort
%% 2b Everybody has voted yes
%% Put a unclear marker in the log.
%% Tell the others to pre_commit. I.e. that they should
%% put a unclear marker in the log and reply
- %% acc_pre_commit when they are done.
+ %% acc_pre_commit when they are done.
%%
%% 3a Somebody died
%% Tell the remaining participants to do_abort
@@ -1492,7 +1492,7 @@ multi_commit(asym_trans, Majority, Tid, CR, Store) ->
%% If we have no unclear marker in the log we may
%% safely abort, since we know that nobody may have
%% decided to commit yet.
- %%
+ %%
%% If we have a committed marker in the log we may
%% safely commit since we know that everybody else
%% also will come to this conclusion.
@@ -1506,7 +1506,7 @@ multi_commit(asym_trans, Majority, Tid, CR, Store) ->
%% up. When all involved nodes are up and uncertain,
%% we decide to commit (first put a committed marker
%% in the log, then do the updates).
-
+
D = #decision{tid = Tid, outcome = presume_abort},
{D2, CR2} = commit_decision(D, CR, [], []),
DiscNs = D2#decision.disc_nodes,
@@ -1518,10 +1518,10 @@ multi_commit(asym_trans, Majority, Tid, CR, Store) ->
Pending = mnesia_checkpoint:tm_enter_pending(Tid, DiscNs, RamNs),
?ets_insert(Store, Pending),
{WaitFor, Local} = ask_commit(asym_trans, Tid, CR2, DiscNs, RamNs),
- SchemaPrep = (catch mnesia_schema:prepare_commit(Tid, Local, {coord, WaitFor})),
- {Votes, Pids} = rec_all(WaitFor, Tid, do_commit, []),
-
- ?eval_debug_fun({?MODULE, multi_commit_asym_got_votes},
+ SchemaPrep = (catch mnesia_schema:prepare_commit(Tid, Local, {coord, WaitFor})),
+ {Votes, Pids} = rec_all(WaitFor, Tid, do_commit, []),
+
+ ?eval_debug_fun({?MODULE, multi_commit_asym_got_votes},
[{tid, Tid}, {votes, Votes}]),
case Votes of
do_commit ->
@@ -1530,20 +1530,20 @@ multi_commit(asym_trans, Majority, Tid, CR, Store) ->
mnesia_log:log(C), % C is not a binary
?eval_debug_fun({?MODULE, multi_commit_asym_log_commit_rec},
[{tid, Tid}]),
-
+
D3 = C#commit.decision,
- D4 = D3#decision{outcome = unclear},
- mnesia_recover:log_decision(D4),
+ D4 = D3#decision{outcome = unclear},
+ mnesia_recover:log_decision(D4),
?eval_debug_fun({?MODULE, multi_commit_asym_log_commit_dec},
[{tid, Tid}]),
tell_participants(Pids, {Tid, pre_commit}),
%% Now we are uncertain and we do not know
%% if all participants have logged that
%% they are uncertain or not
- rec_acc_pre_commit(Pids, Tid, Store, {C,Local},
+ rec_acc_pre_commit(Pids, Tid, Store, {C,Local},
do_commit, DumperMode, [], []);
{'EXIT', Reason} ->
- %% The others have logged the commit
+ %% The others have logged the commit
%% record but they are not uncertain
mnesia_recover:note_decision(Tid, aborted),
?eval_debug_fun({?MODULE, multi_commit_asym_prepare_exit},
@@ -1564,7 +1564,7 @@ multi_commit(asym_trans, Majority, Tid, CR, Store) ->
end.
%% Returns do_commit or {do_abort, Reason}
-rec_acc_pre_commit([Pid | Tail], Tid, Store, Commit, Res, DumperMode,
+rec_acc_pre_commit([Pid | Tail], Tid, Store, Commit, Res, DumperMode,
GoodPids, SchemaAckPids) ->
receive
{?MODULE, _, {acc_pre_commit, Tid, Pid, true}} ->
@@ -1598,7 +1598,7 @@ rec_acc_pre_commit([], Tid, Store, {Commit,OrigC}, Res, DumperMode, GoodPids, Sc
%% everybody are uncertain.
prepare_sync_schema_commit(Store, SchemaAckPids),
tell_participants(GoodPids, {Tid, committed}),
- D2 = D#decision{outcome = committed},
+ D2 = D#decision{outcome = committed},
mnesia_recover:log_decision(D2),
?eval_debug_fun({?MODULE, rec_acc_pre_commit_log_commit},
[{tid, Tid}]),
@@ -1611,10 +1611,10 @@ rec_acc_pre_commit([], Tid, Store, {Commit,OrigC}, Res, DumperMode, GoodPids, Sc
sync_schema_commit(Tid, Store, SchemaAckPids),
mnesia_locker:release_tid(Tid),
?MODULE ! {delete_transaction, Tid};
-
+
{do_abort, Reason} ->
tell_participants(GoodPids, {Tid, {do_abort, Reason}}),
- D2 = D#decision{outcome = aborted},
+ D2 = D#decision{outcome = aborted},
mnesia_recover:log_decision(D2),
?eval_debug_fun({?MODULE, rec_acc_pre_commit_log_abort},
[{tid, Tid}]),
@@ -1702,7 +1702,7 @@ commit_participant(Coord, Tid, Bin, C0, DiscNs, _RamNs) ->
end,
?eval_debug_fun({?MODULE, commit_participant, do_commit},
[{tid, Tid}]);
-
+
{Tid, {do_abort, _Reason}} ->
mnesia_recover:log_decision(D#decision{outcome = aborted}),
?eval_debug_fun({?MODULE, commit_participant, log_abort},
@@ -1710,7 +1710,7 @@ commit_participant(Coord, Tid, Bin, C0, DiscNs, _RamNs) ->
mnesia_schema:undo_prepare_commit(Tid, C0),
?eval_debug_fun({?MODULE, commit_participant, undo_prepare},
[{tid, Tid}]);
-
+
{'EXIT', _, _} ->
mnesia_recover:log_decision(D#decision{outcome = aborted}),
?eval_debug_fun({?MODULE, commit_participant, exit_log_abort},
@@ -1718,7 +1718,7 @@ commit_participant(Coord, Tid, Bin, C0, DiscNs, _RamNs) ->
mnesia_schema:undo_prepare_commit(Tid, C0),
?eval_debug_fun({?MODULE, commit_participant, exit_undo_prepare},
[{tid, Tid}]);
-
+
Msg ->
verbose("** ERROR ** commit_participant ~p, got unexpected msg: ~p~n",
[Tid, Msg])
@@ -1739,7 +1739,7 @@ commit_participant(Coord, Tid, Bin, C0, DiscNs, _RamNs) ->
verbose("** ERROR ** commit_participant ~p, got unexpected msg: ~p~n",
[Tid, Msg])
end;
-
+
{'EXIT', Reason} ->
?eval_debug_fun({?MODULE, commit_participant, vote_no},
[{tid, Tid}]),
@@ -1750,7 +1750,7 @@ commit_participant(Coord, Tid, Bin, C0, DiscNs, _RamNs) ->
?MODULE ! {delete_transaction, Tid},
unlink(whereis(?MODULE)),
exit(normal).
-
+
do_abort(Tid, Bin) when is_binary(Bin) ->
%% Possible optimization:
%% If we want we could pass arround a flag
@@ -1761,7 +1761,7 @@ do_abort(Tid, Bin) when is_binary(Bin) ->
%% mnesia_schema:undo_prepare_commit/1.
do_abort(Tid, binary_to_term(Bin));
do_abort(Tid, Commit) ->
- mnesia_schema:undo_prepare_commit(Tid, Commit),
+ mnesia_schema:undo_prepare_commit(Tid, Commit),
Commit.
do_dirty(Tid, Commit) when Commit#commit.schema_ops == [] ->
@@ -1799,7 +1799,7 @@ do_update(Tid, Storage, [Op | Ops], OldRes) ->
verbose("do_update in ~w failed: ~p -> {'EXIT', ~p}~n",
[Tid, Op, Reason]),
- do_update(Tid, Storage, Ops, OldRes);
+ do_update(Tid, Storage, Ops, OldRes);
NewRes ->
do_update(Tid, Storage, Ops, NewRes)
end;
@@ -1807,7 +1807,7 @@ do_update(_Tid, _Storage, [], Res) ->
Res.
do_update_op(Tid, Storage, {{Tab, K}, Obj, write}) ->
- commit_write(?catch_val({Tab, commit_work}), Tid,
+ commit_write(?catch_val({Tab, commit_work}), Tid,
Tab, K, Obj, undefined),
mnesia_lib:db_put(Storage, Tab, Obj);
@@ -1816,7 +1816,7 @@ do_update_op(Tid, Storage, {{Tab, K}, Val, delete}) ->
mnesia_lib:db_erase(Storage, Tab, K);
do_update_op(Tid, Storage, {{Tab, K}, {RecName, Incr}, update_counter}) ->
- {NewObj, OldObjs} =
+ {NewObj, OldObjs} =
case catch mnesia_lib:db_update_counter(Storage, Tab, K, Incr) of
NewVal when is_integer(NewVal), NewVal >= 0 ->
{{RecName, K, NewVal}, [{RecName, K, NewVal - Incr}]};
@@ -1824,17 +1824,17 @@ do_update_op(Tid, Storage, {{Tab, K}, {RecName, Incr}, update_counter}) ->
New = {RecName, K, Incr},
mnesia_lib:db_put(Storage, Tab, New),
{New, []};
- _ ->
+ _ ->
Zero = {RecName, K, 0},
mnesia_lib:db_put(Storage, Tab, Zero),
{Zero, []}
end,
- commit_update(?catch_val({Tab, commit_work}), Tid, Tab,
+ commit_update(?catch_val({Tab, commit_work}), Tid, Tab,
K, NewObj, OldObjs),
element(3, NewObj);
do_update_op(Tid, Storage, {{Tab, Key}, Obj, delete_object}) ->
- commit_del_object(?catch_val({Tab, commit_work}),
+ commit_del_object(?catch_val({Tab, commit_work}),
Tid, Tab, Key, Obj, undefined),
mnesia_lib:db_match_erase(Storage, Tab, Obj);
@@ -1846,11 +1846,11 @@ commit_write([], _, _, _, _, _) -> ok;
commit_write([{checkpoints, CpList}|R], Tid, Tab, K, Obj, Old) ->
mnesia_checkpoint:tm_retain(Tid, Tab, K, write, CpList),
commit_write(R, Tid, Tab, K, Obj, Old);
-commit_write([H|R], Tid, Tab, K, Obj, Old)
+commit_write([H|R], Tid, Tab, K, Obj, Old)
when element(1, H) == subscribers ->
mnesia_subscr:report_table_event(H, Tab, Tid, Obj, write, Old),
commit_write(R, Tid, Tab, K, Obj, Old);
-commit_write([H|R], Tid, Tab, K, Obj, Old)
+commit_write([H|R], Tid, Tab, K, Obj, Old)
when element(1, H) == index ->
mnesia_index:add_index(H, Tab, K, Obj, Old),
commit_write(R, Tid, Tab, K, Obj, Old).
@@ -1859,11 +1859,11 @@ commit_update([], _, _, _, _, _) -> ok;
commit_update([{checkpoints, CpList}|R], Tid, Tab, K, Obj, _) ->
Old = mnesia_checkpoint:tm_retain(Tid, Tab, K, write, CpList),
commit_update(R, Tid, Tab, K, Obj, Old);
-commit_update([H|R], Tid, Tab, K, Obj, Old)
+commit_update([H|R], Tid, Tab, K, Obj, Old)
when element(1, H) == subscribers ->
mnesia_subscr:report_table_event(H, Tab, Tid, Obj, write, Old),
commit_update(R, Tid, Tab, K, Obj, Old);
-commit_update([H|R], Tid, Tab, K, Obj, Old)
+commit_update([H|R], Tid, Tab, K, Obj, Old)
when element(1, H) == index ->
mnesia_index:add_index(H, Tab, K, Obj, Old),
commit_update(R, Tid, Tab, K, Obj, Old).
@@ -1872,11 +1872,11 @@ commit_delete([], _, _, _, _, _) -> ok;
commit_delete([{checkpoints, CpList}|R], Tid, Tab, K, Obj, _) ->
Old = mnesia_checkpoint:tm_retain(Tid, Tab, K, delete, CpList),
commit_delete(R, Tid, Tab, K, Obj, Old);
-commit_delete([H|R], Tid, Tab, K, Obj, Old)
+commit_delete([H|R], Tid, Tab, K, Obj, Old)
when element(1, H) == subscribers ->
mnesia_subscr:report_table_event(H, Tab, Tid, Obj, delete, Old),
commit_delete(R, Tid, Tab, K, Obj, Old);
-commit_delete([H|R], Tid, Tab, K, Obj, Old)
+commit_delete([H|R], Tid, Tab, K, Obj, Old)
when element(1, H) == index ->
mnesia_index:delete_index(H, Tab, K),
commit_delete(R, Tid, Tab, K, Obj, Old).
@@ -1885,12 +1885,12 @@ commit_del_object([], _, _, _, _, _) -> ok;
commit_del_object([{checkpoints, CpList}|R], Tid, Tab, K, Obj, _) ->
Old = mnesia_checkpoint:tm_retain(Tid, Tab, K, delete_object, CpList),
commit_del_object(R, Tid, Tab, K, Obj, Old);
-commit_del_object([H|R], Tid, Tab, K, Obj, Old)
- when element(1, H) == subscribers ->
+commit_del_object([H|R], Tid, Tab, K, Obj, Old)
+ when element(1, H) == subscribers ->
mnesia_subscr:report_table_event(H, Tab, Tid, Obj, delete_object, Old),
commit_del_object(R, Tid, Tab, K, Obj, Old);
-commit_del_object([H|R], Tid, Tab, K, Obj, Old)
- when element(1, H) == index ->
+commit_del_object([H|R], Tid, Tab, K, Obj, Old)
+ when element(1, H) == index ->
mnesia_index:del_object_index(H, Tab, K, Obj, Old),
commit_del_object(R, Tid, Tab, K, Obj, Old).
@@ -1898,11 +1898,11 @@ commit_clear([], _, _, _, _) -> ok;
commit_clear([{checkpoints, CpList}|R], Tid, Tab, K, Obj) ->
mnesia_checkpoint:tm_retain(Tid, Tab, K, clear_table, CpList),
commit_clear(R, Tid, Tab, K, Obj);
-commit_clear([H|R], Tid, Tab, K, Obj)
+commit_clear([H|R], Tid, Tab, K, Obj)
when element(1, H) == subscribers ->
mnesia_subscr:report_table_event(H, Tab, Tid, Obj, clear_table, undefined),
commit_clear(R, Tid, Tab, K, Obj);
-commit_clear([H|R], Tid, Tab, K, Obj)
+commit_clear([H|R], Tid, Tab, K, Obj)
when element(1, H) == index ->
mnesia_index:clear_index(H, Tab, K, Obj),
commit_clear(R, Tid, Tab, K, Obj).
@@ -1913,7 +1913,7 @@ do_snmp(Tid, [Head | Tail]) ->
{'EXIT', Reason} ->
%% This should only happen when we recently have
%% deleted our local replica or recently deattached
- %% the snmp table
+ %% the snmp table
verbose("do_snmp in ~w failed: ~p -> {'EXIT', ~p}~n",
[Tid, Head, Reason]);
@@ -1922,7 +1922,7 @@ do_snmp(Tid, [Head | Tail]) ->
end,
do_snmp(Tid, Tail).
-commit_nodes([C | Tail], AccD, AccR)
+commit_nodes([C | Tail], AccD, AccR)
when C#commit.disc_copies == [],
C#commit.disc_only_copies == [],
C#commit.schema_ops == [] ->
@@ -1934,7 +1934,7 @@ commit_nodes([], AccD, AccR) ->
commit_decision(D, [C | Tail], AccD, AccR) ->
N = C#commit.node,
- {D2, Tail2} =
+ {D2, Tail2} =
case C#commit.schema_ops of
[] when C#commit.disc_copies == [],
C#commit.disc_only_copies == [] ->
@@ -1954,8 +1954,8 @@ commit_decision(D, [], AccD, AccR) ->
{D#decision{disc_nodes = AccD, ram_nodes = AccR}, []}.
ram_only_ops(N, [{op, change_table_copy_type, N, _FromS, _ToS, Cs} | _Ops ]) ->
- case lists:member({name, schema}, Cs) of
- true ->
+ case lists:member({name, schema}, Cs) of
+ true ->
%% We always use disk if change type of the schema
false;
false ->
@@ -2025,12 +2025,12 @@ get_dirty_reply(Node, Res) ->
Reply;
{mnesia_down, Node} ->
case get(mnesia_activity_state) of
- {_, Tid, _Ts} when element(1,Tid) == tid ->
+ {_, Tid, _Ts} when element(1,Tid) == tid ->
%% Hmm dirty called inside a transaction, to avoid
%% hanging transaction we need to restart the transaction
mnesia:abort({node_not_running, Node});
_ ->
- %% It's ok to ignore mnesia_down's since we will make
+ %% It's ok to ignore mnesia_down's since we will make
%% the replicas consistent again when Node is started
Res
end
@@ -2068,10 +2068,10 @@ ask_commit(_Protocol, _Tid, [], _DiscNs, _RamNs, WaitFor, Local) ->
%% to be safe we let erts do the translation (many times maybe and thus
%% slower but it works.
% opt_term_to_binary(asym_trans, Head, Nodes) ->
-% opt_term_to_binary(Nodes, Head);
+% opt_term_to_binary(Nodes, Head);
opt_term_to_binary(_Protocol, Head, _Nodes) ->
Head.
-
+
rec_all([Node | Tail], Tid, Res, Pids) ->
receive
{?MODULE, Node, {vote_yes, Tid}} ->
@@ -2085,7 +2085,7 @@ rec_all([Node | Tail], Tid, Res, Pids) ->
{?MODULE, Node, {aborted, Tid}} ->
rec_all(Tail, Tid, Res, Pids);
- {mnesia_down, Node} ->
+ {mnesia_down, Node} ->
%% Make sure that mnesia_tm knows it has died
%% it may have been restarted
Abort = {do_abort, {bad_commit, Node}},
@@ -2095,7 +2095,7 @@ rec_all([Node | Tail], Tid, Res, Pids) ->
rec_all([], _Tid, Res, Pids) ->
{Res, Pids}.
-get_transactions() ->
+get_transactions() ->
{info, Participant, Coordinator} = req(info),
lists:map(fun({Tid, _Tabs}) ->
Status = tr_status(Tid,Participant),
@@ -2125,7 +2125,7 @@ get_info(Timeout) ->
display_info(Stream, {timeout, T}) ->
io:format(Stream, "---> No info about coordinator and participant transactions, "
"timeout ~p <--- ~n", [T]);
-
+
display_info(Stream, {info, Part, Coord}) ->
io:format(Stream, "---> Participant transactions <--- ~n", []),
lists:foreach(fun(P) -> pr_participant(Stream, P) end, Part),
@@ -2134,7 +2134,7 @@ display_info(Stream, {info, Part, Coord}) ->
pr_participant(Stream, P) ->
Commit0 = P#participant.commit,
- Commit =
+ Commit =
if
is_binary(Commit0) -> binary_to_term(Commit0);
true -> Commit0
@@ -2161,11 +2161,11 @@ search_pr_coordinator(S, [{Tid, _Ts}|Tail]) ->
io:format( "Tid is coordinator, owner == \n", []),
display_pid_info(Tid#tid.pid),
search_pr_coordinator(S, Tail);
- _ ->
+ _ ->
search_pr_coordinator(S, Tail)
end.
-search_pr_participant(_S, []) ->
+search_pr_participant(_S, []) ->
false;
search_pr_participant(S, [ P | Tail]) ->
Tid = P#participant.tid,
@@ -2176,15 +2176,15 @@ search_pr_participant(S, [ P | Tail]) ->
Pid = Tid#tid.pid,
display_pid_info(Pid),
io:format( "Tid wants to write objects \n",[]),
- Commit =
+ Commit =
if
is_binary(Commit0) -> binary_to_term(Commit0);
true -> Commit0
end,
-
+
io:format("~p~n", [Commit]),
search_pr_participant(S,Tail); %% !!!!!
- true ->
+ true ->
search_pr_participant(S, Tail)
end.
@@ -2200,7 +2200,7 @@ display_pid_info(Pid) ->
Other ->
Other
end,
- Reds = fetch(reductions, Info),
+ Reds = fetch(reductions, Info),
LM = length(fetch(messages, Info)),
pformat(io_lib:format("~p", [Pid]),
io_lib:format("~p", [Call]),
@@ -2254,7 +2254,7 @@ send_to_pids([_ | Pids], Msg) ->
send_to_pids(Pids, Msg);
send_to_pids([], _Msg) ->
ok.
-
+
reconfigure_participants(N, [P | Tail]) ->
case lists:member(N, P#participant.disc_nodes) or
lists:member(N, P#participant.ram_nodes) of
@@ -2262,25 +2262,25 @@ reconfigure_participants(N, [P | Tail]) ->
%% Ignore, since we are not a participant
%% in the transaction.
reconfigure_participants(N, Tail);
-
+
true ->
%% We are on a participant node, lets
%% check if the dead one was a
%% participant or a coordinator.
Tid = P#participant.tid,
- if
+ if
node(Tid#tid.pid) /= N ->
%% Another participant node died. Ignore.
reconfigure_participants(N, Tail);
true ->
- %% The coordinator node has died and
+ %% The coordinator node has died and
%% we must determine the outcome of the
%% transaction and tell mnesia_tm on all
%% nodes (including the local node) about it
verbose("Coordinator ~p in transaction ~p died~n",
[Tid#tid.pid, Tid]),
-
+
Nodes = P#participant.disc_nodes ++
P#participant.ram_nodes,
AliveNodes = Nodes -- [N],
@@ -2332,8 +2332,8 @@ system_terminate(_Reason, _Parent, _Debug, State) ->
system_code_change(State=#state{coordinators=Cs0,participants=Ps0},_Module,_OldVsn,downgrade) ->
case is_tuple(Cs0) of
- true ->
- Cs = gb_trees:to_list(Cs0),
+ true ->
+ Cs = gb_trees:to_list(Cs0),
Ps = gb_trees:values(Ps0),
{ok, State#state{coordinators=Cs,participants=Ps}};
false ->
@@ -2342,7 +2342,7 @@ system_code_change(State=#state{coordinators=Cs0,participants=Ps0},_Module,_OldV
system_code_change(State=#state{coordinators=Cs0,participants=Ps0},_Module,_OldVsn,_Extra) ->
case is_list(Cs0) of
- true ->
+ true ->
Cs = gb_trees:from_orddict(lists:sort(Cs0)),
Ps1 = [{P#participant.tid,P}|| P <- Ps0],
Ps = gb_trees:from_orddict(lists:sort(Ps1)),