aboutsummaryrefslogtreecommitdiffstats
path: root/lib/mnesia/src
diff options
context:
space:
mode:
Diffstat (limited to 'lib/mnesia/src')
-rw-r--r--lib/mnesia/src/Makefile4
-rw-r--r--lib/mnesia/src/mnesia.appup.src8
-rw-r--r--lib/mnesia/src/mnesia.erl438
-rw-r--r--lib/mnesia/src/mnesia.hrl1
-rw-r--r--lib/mnesia/src/mnesia_backup.erl20
-rw-r--r--lib/mnesia/src/mnesia_bup.erl80
-rw-r--r--lib/mnesia/src/mnesia_controller.erl273
-rw-r--r--lib/mnesia/src/mnesia_dumper.erl240
-rw-r--r--lib/mnesia/src/mnesia_event.erl8
-rw-r--r--lib/mnesia/src/mnesia_frag.erl12
-rw-r--r--lib/mnesia/src/mnesia_frag_hash.erl20
-rw-r--r--lib/mnesia/src/mnesia_lib.erl8
-rw-r--r--lib/mnesia/src/mnesia_loader.erl278
-rw-r--r--lib/mnesia/src/mnesia_locker.erl376
-rw-r--r--lib/mnesia/src/mnesia_log.erl113
-rw-r--r--lib/mnesia/src/mnesia_monitor.erl94
-rw-r--r--lib/mnesia/src/mnesia_recover.erl6
-rw-r--r--lib/mnesia/src/mnesia_schema.erl677
-rw-r--r--lib/mnesia/src/mnesia_tm.erl372
19 files changed, 1595 insertions, 1433 deletions
diff --git a/lib/mnesia/src/Makefile b/lib/mnesia/src/Makefile
index e032f563fa..1c8ec54605 100644
--- a/lib/mnesia/src/Makefile
+++ b/lib/mnesia/src/Makefile
@@ -1,7 +1,7 @@
#
# %CopyrightBegin%
#
-# Copyright Ericsson AB 1996-2009. All Rights Reserved.
+# Copyright Ericsson AB 1996-2011. All Rights Reserved.
#
# The contents of this file are subject to the Erlang Public License,
# Version 1.1, (the "License"); you may not use this file except in
@@ -113,6 +113,8 @@ clean:
docs:
+$(TARGET_FILES): $(HRL_FILES)
+
# ----------------------------------------------------
# Special Build Targets
# ----------------------------------------------------
diff --git a/lib/mnesia/src/mnesia.appup.src b/lib/mnesia/src/mnesia.appup.src
index 3691aa249a..304a15242f 100644
--- a/lib/mnesia/src/mnesia.appup.src
+++ b/lib/mnesia/src/mnesia.appup.src
@@ -1,11 +1,17 @@
%% -*- erlang -*-
-{"%VSN%",
+{"%VSN%",
[
+ {"4.5.1", [{restart_application, mnesia}]},
+ {"4.5", [{restart_application, mnesia}]},
+ {"4.4.19", [{restart_application, mnesia}]},
{"4.4.18", [{restart_application, mnesia}]},
{"4.4.17", [{restart_application, mnesia}]},
{"4.4.16", [{restart_application, mnesia}]}
],
[
+ {"4.5.1", [{restart_application, mnesia}]},
+ {"4.5", [{restart_application, mnesia}]},
+ {"4.4.19", [{restart_application, mnesia}]},
{"4.4.18", [{restart_application, mnesia}]},
{"4.4.17", [{restart_application, mnesia}]},
{"4.4.16", [{restart_application, mnesia}]}
diff --git a/lib/mnesia/src/mnesia.erl b/lib/mnesia/src/mnesia.erl
index 980a9c6213..3d30debc53 100644
--- a/lib/mnesia/src/mnesia.erl
+++ b/lib/mnesia/src/mnesia.erl
@@ -27,7 +27,7 @@
%% Start, stop and debugging
start/0, start/1, stop/0, % Not for public use
set_debug_level/1, lkill/0, kill/0, % Not for public use
- ms/0,
+ ms/0,
change_config/2,
%% Activity mgt
@@ -40,14 +40,14 @@
%% Access within an activity - Lock acquisition
lock/2, lock/4,
lock_table/2,
- read_lock_table/1,
+ read_lock_table/1,
write_lock_table/1,
%% Access within an activity - Updates
- write/1, s_write/1, write/3, write/5,
- delete/1, s_delete/1, delete/3, delete/5,
- delete_object/1, s_delete_object/1, delete_object/3, delete_object/5,
-
+ write/1, s_write/1, write/3, write/5,
+ delete/1, s_delete/1, delete/3, delete/5,
+ delete_object/1, s_delete_object/1, delete_object/3, delete_object/5,
+
%% Access within an activity - Reads
read/1, read/2, wread/1, read/3, read/5,
match_object/1, match_object/3, match_object/5,
@@ -58,9 +58,9 @@
first/1, next/2, last/1, prev/2,
first/3, next/4, last/3, prev/4,
- %% Iterators within an activity
+ %% Iterators within an activity
foldl/3, foldl/4, foldr/3, foldr/4,
-
+
%% Dirty access regardless of activities - Updates
dirty_write/1, dirty_write/2,
dirty_delete/1, dirty_delete/2,
@@ -72,8 +72,8 @@
dirty_select/2,
dirty_match_object/1, dirty_match_object/2, dirty_all_keys/1,
dirty_index_match_object/2, dirty_index_match_object/3,
- dirty_index_read/3, dirty_slot/2,
- dirty_first/1, dirty_next/2, dirty_last/1, dirty_prev/2,
+ dirty_index_read/3, dirty_slot/2,
+ dirty_first/1, dirty_next/2, dirty_last/1, dirty_prev/2,
%% Info
table_info/2, table_info/4, schema/0, schema/1,
@@ -102,7 +102,7 @@
dump_tables/1, wait_for_tables/2, force_load_table/1,
change_table_access_mode/2, change_table_load_order/2,
set_master_nodes/1, set_master_nodes/2,
-
+
%% Misc admin
dump_log/0, subscribe/1, unsubscribe/1, report_event/1,
@@ -112,7 +112,7 @@
%% Textfile access
load_textfile/1, dump_to_textfile/1,
-
+
%% QLC functions
table/1, table/2,
@@ -137,20 +137,20 @@
-define(DEFAULT_ACCESS, ?MODULE).
-%% Select
+%% Select
-define(PATTERN_TO_OBJECT_MATCH_SPEC(Pat), [{Pat,[],['$_']}]).
-define(PATTERN_TO_BINDINGS_MATCH_SPEC(Pat), [{Pat,[],['$$']}]).
-
+
%% Local function in order to avoid external function call
val(Var) ->
case ?catch_val(Var) of
- {'EXIT', Reason} -> mnesia_lib:other_val(Var, Reason);
+ {'EXIT', Reason} -> mnesia_lib:other_val(Var, Reason);
Value -> Value
end.
is_dollar_digits(Var) ->
case atom_to_list(Var) of
- [$$ | Digs] ->
+ [$$ | Digs] ->
is_digits(Digs);
_ ->
false
@@ -166,13 +166,13 @@ is_digits([Dig | Tail]) ->
is_digits([]) ->
true.
-has_var(X) when is_atom(X) ->
- if
- X == '_' ->
+has_var(X) when is_atom(X) ->
+ if
+ X == '_' ->
true;
- is_atom(X) ->
+ is_atom(X) ->
is_dollar_digits(X);
- true ->
+ true ->
false
end;
has_var(X) when is_tuple(X) ->
@@ -196,9 +196,9 @@ e_has_var(X, Pos) ->
start() ->
{Time , Res} = timer:tc(application, start, [?APPLICATION, temporary]),
-
+
Secs = Time div 1000000,
- case Res of
+ case Res of
ok ->
verbose("Mnesia started, ~p seconds~n",[ Secs]),
ok;
@@ -243,10 +243,10 @@ change_config(extra_db_nodes, Ns) when is_list(Ns) ->
mnesia_controller:connect_nodes(Ns);
change_config(dc_dump_limit, N) when is_number(N), N > 0 ->
case mnesia_lib:is_running() of
- yes ->
+ yes ->
mnesia_lib:set(dc_dump_limit, N),
{ok, N};
- _ ->
+ _ ->
{error, {not_started, ?APPLICATION}}
end;
change_config(BadKey, _BadVal) ->
@@ -255,7 +255,7 @@ change_config(BadKey, _BadVal) ->
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%% Debugging
-set_debug_level(Level) ->
+set_debug_level(Level) ->
mnesia_subscr:set_debug_level(Level).
lkill() ->
@@ -274,9 +274,9 @@ ms() ->
mnesia_controller,
mnesia_dumper,
mnesia_loader,
- mnesia_frag,
- mnesia_frag_hash,
- mnesia_frag_old_hash,
+ mnesia_frag,
+ mnesia_frag_hash,
+ mnesia_frag_old_hash,
mnesia_index,
mnesia_kernel_sup,
mnesia_late_loader,
@@ -295,9 +295,9 @@ ms() ->
%% Keep these last in the list, so
%% mnesia_sup kills these last
- mnesia_monitor,
+ mnesia_monitor,
mnesia_event
- ].
+ ].
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
@@ -305,7 +305,7 @@ ms() ->
-spec abort(_) -> no_return().
-abort(Reason) ->
+abort(Reason) ->
exit({aborted, Reason}).
is_transaction() ->
@@ -339,7 +339,7 @@ sync_transaction(Fun, Args, Retries) ->
transaction(get(mnesia_activity_state), Fun, Args, Retries, ?DEFAULT_ACCESS, sync).
-transaction(State, Fun, Args, Retries, Mod, Kind)
+transaction(State, Fun, Args, Retries, Mod, Kind)
when is_function(Fun), is_list(Args), Retries == infinity, is_atom(Mod) ->
mnesia_tm:transaction(State, Fun, Args, Retries, Mod, Kind);
transaction(State, Fun, Args, Retries, Mod, Kind)
@@ -348,7 +348,7 @@ transaction(State, Fun, Args, Retries, Mod, Kind)
transaction(_State, Fun, Args, Retries, Mod, _Kind) ->
{aborted, {badarg, Fun, Args, Retries, Mod}}.
-non_transaction(State, Fun, Args, ActivityKind, Mod)
+non_transaction(State, Fun, Args, ActivityKind, Mod)
when is_function(Fun), is_list(Args), is_atom(Mod) ->
mnesia_tm:non_transaction(State, Fun, Args, ActivityKind, Mod);
non_transaction(_State, Fun, Args, _ActivityKind, _Mod) ->
@@ -394,7 +394,7 @@ wrap_trans(State, Fun, Args, Retries, Mod, Kind) ->
{atomic, GoodRes} -> GoodRes;
BadRes -> exit(BadRes)
end.
-
+
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%% Access within an activity - lock acquisition
@@ -507,13 +507,13 @@ good_global_nodes(Nodes) ->
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%% Access within an activity - updates
-write(Val) when is_tuple(Val), tuple_size(Val) > 2 ->
+write(Val) when is_tuple(Val), tuple_size(Val) > 2 ->
Tab = element(1, Val),
write(Tab, Val, write);
write(Val) ->
abort({bad_type, Val}).
-s_write(Val) when is_tuple(Val), tuple_size(Val) > 2 ->
+s_write(Val) when is_tuple(Val), tuple_size(Val) > 2 ->
Tab = element(1, Val),
write(Tab, Val, sticky_write).
@@ -561,7 +561,7 @@ write_to_store(Tab, Store, Oid, Val) ->
_ ->
?ets_delete(Store, Oid),
?ets_insert(Store, {Oid, Val, write})
- end,
+ end,
ok;
{'EXIT', _} ->
abort({no_exists, Tab});
@@ -611,7 +611,7 @@ delete(Tid, Ts, Tab, Key, LockKind)
ok;
Protocol ->
do_dirty_delete(Protocol, Tab, Key)
- end;
+ end;
delete(_Tid, _Ts, Tab, _Key, _LockKind) ->
abort({bad_type, Tab}).
@@ -640,7 +640,7 @@ delete_object(Tab, Val, LockKind) ->
delete_object(Tid, Ts, Tab, Val, LockKind)
when is_atom(Tab), Tab /= schema, is_tuple(Val), tuple_size(Val) > 2 ->
case has_var(Val) of
- false ->
+ false ->
do_delete_object(Tid, Ts, Tab, Val, LockKind);
true ->
abort({bad_type, Tab, Val})
@@ -665,7 +665,7 @@ do_delete_object(Tid, Ts, Tab, Val, LockKind) ->
abort({bad_type, Tab, LockKind})
end,
case val({Tab, setorbag}) of
- bag ->
+ bag ->
?ets_match_delete(Store, {Oid, Val, '_'}),
?ets_insert(Store, {Oid, Val, delete_object});
_ ->
@@ -731,7 +731,7 @@ read(Tid, Ts, Tab, Key, LockKind)
add_written(?ets_lookup(Store, Oid), Tab, Objs);
_Protocol ->
dirty_read(Tab, Key)
- end;
+ end;
read(_Tid, _Ts, Tab, _Key, _LockKind) ->
abort({bad_type, Tab}).
@@ -744,7 +744,7 @@ first(Tab) ->
_ ->
abort(no_transaction)
end.
-
+
first(Tid, Ts, Tab)
when is_atom(Tab), Tab /= schema ->
case element(1, Tid) of
@@ -845,9 +845,9 @@ prev(_Tid, _Ts,Tab,_) ->
stored_keys(Tab,'$end_of_table',Prev,Ts,Op,Type) ->
case ts_keys(Ts#tidstore.store,Tab,Op,Type,[]) of
[] -> '$end_of_table';
- Keys when Type == ordered_set->
+ Keys when Type == ordered_set->
get_ordered_tskey(Prev,Keys,Op);
- Keys ->
+ Keys ->
get_next_tskey(Prev,Keys,Tab)
end;
stored_keys(Tab,{'EXIT',{aborted,R={badarg,[Tab,Key]}}},
@@ -858,7 +858,7 @@ stored_keys(Tab,{'EXIT',{aborted,R={badarg,[Tab,Key]}}},
Ops ->
case lists:last(Ops) of
[delete] -> abort(R);
- _ ->
+ _ ->
case ts_keys(Store,Tab,Op,Type,[]) of
[] -> '$end_of_table';
Keys -> get_next_tskey(Key,Keys,Tab)
@@ -869,14 +869,14 @@ stored_keys(_,{'EXIT',{aborted,R}},_,_,_,_) ->
abort(R);
stored_keys(Tab,Key,Prev,#tidstore{store=Store},Op,ordered_set) ->
case ?ets_match(Store, {{Tab, Key}, '_', '$1'}) of
- [] ->
+ [] ->
Keys = ts_keys(Store,Tab,Op,ordered_set,[Key]),
get_ordered_tskey(Prev,Keys,Op);
Ops ->
case lists:last(Ops) of
[delete] ->
mnesia:Op(Tab,Key);
- _ ->
+ _ ->
Keys = ts_keys(Store,Tab,Op,ordered_set,[Key]),
get_ordered_tskey(Prev,Keys,Op)
end
@@ -898,7 +898,7 @@ get_ordered_tskey(Prev, [_|R],Op) -> get_ordered_tskey(Prev,R,Op);
get_ordered_tskey(_, [],_) -> '$end_of_table'.
get_next_tskey(Key,Keys,Tab) ->
- Next =
+ Next =
if Key == '$end_of_table' -> hd(Keys);
true ->
case lists:dropwhile(fun(A) -> A /= Key end, Keys) of
@@ -912,7 +912,7 @@ get_next_tskey(Key,Keys,Tab) ->
_ -> %% Really slow anybody got another solution??
case dirty_read(Tab, Next) of
[] -> Next;
- _ ->
+ _ ->
%% Updated value we already returned this key
get_next_tskey(Next,Keys,Tab)
end
@@ -921,7 +921,7 @@ get_next_tskey(Key,Keys,Tab) ->
ts_keys(Store, Tab, Op, Type, Def) ->
All = ?ets_match(Store, {{Tab,'$1'},'_','$2'}),
Keys = ts_keys_1(All, Def),
- if
+ if
Type == ordered_set, Op == prev ->
lists:reverse(lists:sort(Keys));
Type == ordered_set ->
@@ -947,7 +947,7 @@ ts_keys_1([], Acc) ->
%%%%%%%%%%%%%%%%%%%%%
-%% Iterators
+%% Iterators
foldl(Fun, Acc, Tab) ->
foldl(Fun, Acc, Tab, read).
@@ -968,7 +968,7 @@ foldl(ActivityId, Opaque, Fun, Acc, Tab, LockKind) ->
close_iteration(Res, Tab).
do_foldl(A, O, Tab, '$end_of_table', Fun, RAcc, _Type, Stored) ->
- lists:foldl(fun(Key, Acc) ->
+ lists:foldl(fun(Key, Acc) ->
lists:foldl(Fun, Acc, read(A, O, Tab, Key, read))
end, RAcc, Stored);
do_foldl(A, O, Tab, Key, Fun, Acc, ordered_set, [H | Stored]) when H == Key ->
@@ -983,7 +983,7 @@ do_foldl(A, O, Tab, Key, Fun, Acc, ordered_set, [H | Stored]) when H > Key ->
NewAcc = lists:foldl(Fun, Acc, read(A, O, Tab, Key, read)),
{_, Tid, Ts} = get(mnesia_activity_state),
do_foldl(Tid, Ts, Tab, dirty_next(Tab, Key), Fun, NewAcc, ordered_set, [H |Stored]);
-do_foldl(A, O, Tab, Key, Fun, Acc, Type, Stored) -> %% Type is set or bag
+do_foldl(A, O, Tab, Key, Fun, Acc, Type, Stored) -> %% Type is set or bag
NewAcc = lists:foldl(Fun, Acc, read(A, O, Tab, Key, read)),
NewStored = ordsets:del_element(Key, Stored),
{_, Tid, Ts} = get(mnesia_activity_state),
@@ -1003,8 +1003,8 @@ foldr(Fun, Acc, Tab, LockKind) when is_function(Fun) ->
foldr(ActivityId, Opaque, Fun, Acc, Tab, LockKind) ->
{Type, TempPrev} = init_iteration(ActivityId, Opaque, Tab, LockKind),
- Prev =
- if
+ Prev =
+ if
Type == ordered_set ->
lists:reverse(TempPrev);
true -> %% Order doesn't matter for set and bag
@@ -1014,7 +1014,7 @@ foldr(ActivityId, Opaque, Fun, Acc, Tab, LockKind) ->
close_iteration(Res, Tab).
do_foldr(A, O, Tab, '$end_of_table', Fun, RAcc, _Type, Stored) ->
- lists:foldl(fun(Key, Acc) ->
+ lists:foldl(fun(Key, Acc) ->
lists:foldl(Fun, Acc, read(A, O, Tab, Key, read))
end, RAcc, Stored);
do_foldr(A, O, Tab, Key, Fun, Acc, ordered_set, [H | Stored]) when H == Key ->
@@ -1029,7 +1029,7 @@ do_foldr(A, O, Tab, Key, Fun, Acc, ordered_set, [H | Stored]) when H < Key ->
NewAcc = lists:foldl(Fun, Acc, read(A, O, Tab, Key, read)),
{_, Tid, Ts} = get(mnesia_activity_state),
do_foldr(Tid, Ts, Tab, dirty_prev(Tab, Key), Fun, NewAcc, ordered_set, [H |Stored]);
-do_foldr(A, O, Tab, Key, Fun, Acc, Type, Stored) -> %% Type is set or bag
+do_foldr(A, O, Tab, Key, Fun, Acc, Type, Stored) -> %% Type is set or bag
NewAcc = lists:foldl(Fun, Acc, read(A, O, Tab, Key, read)),
NewStored = ordsets:del_element(Key, Stored),
{_, Tid, Ts} = get(mnesia_activity_state),
@@ -1037,25 +1037,25 @@ do_foldr(A, O, Tab, Key, Fun, Acc, Type, Stored) -> %% Type is set or bag
init_iteration(ActivityId, Opaque, Tab, LockKind) ->
lock(ActivityId, Opaque, {table, Tab}, LockKind),
- Type = val({Tab, setorbag}),
+ Type = val({Tab, setorbag}),
Previous = add_previous(ActivityId, Opaque, Type, Tab),
St = val({Tab, storage_type}),
- if
- St == unknown ->
+ if
+ St == unknown ->
ignore;
true ->
mnesia_lib:db_fixtable(St, Tab, true)
- end,
+ end,
{Type, Previous}.
close_iteration(Res, Tab) ->
case val({Tab, storage_type}) of
- unknown ->
+ unknown ->
ignore;
- St ->
+ St ->
mnesia_lib:db_fixtable(St, Tab, false)
end,
- case Res of
+ case Res of
{'EXIT', {aborted, What}} ->
abort(What);
{'EXIT', What} ->
@@ -1074,7 +1074,7 @@ add_previous(_Tid, Ts, _Type, Tab) ->
%% it is correct with respect to what this particular transaction
%% has already written, deleted .... etc
-add_written([], _Tab, Objs) ->
+add_written([], _Tab, Objs) ->
Objs; % standard normal fast case
add_written(Written, Tab, Objs) ->
case val({Tab, setorbag}) of
@@ -1093,7 +1093,7 @@ add_written_to_set(Ws) ->
add_written_to_bag([{_, Val, write} | Tail], Objs, Ack) ->
add_written_to_bag(Tail, lists:delete(Val, Objs), [Val | Ack]);
-add_written_to_bag([], Objs, Ack) ->
+add_written_to_bag([], Objs, Ack) ->
Objs ++ lists:reverse(Ack); %% Oldest write first as in ets
add_written_to_bag([{_, _ , delete} | Tail], _Objs, _Ack) ->
%% This transaction just deleted all objects
@@ -1118,7 +1118,7 @@ match_object(Tab, Pat, LockKind) ->
abort(no_transaction)
end.
-match_object(Tid, Ts, Tab, Pat, LockKind)
+match_object(Tid, Ts, Tab, Pat, LockKind)
when is_atom(Tab), Tab /= schema, is_tuple(Pat), tuple_size(Pat) > 2 ->
case element(1, Tid) of
ets ->
@@ -1142,11 +1142,11 @@ add_written_match(S, Pat, Tab, Objs) ->
add_match(Ops, Objs, val({Tab, setorbag})).
find_ops(S, Tab, Pat) ->
- GetWritten = [{{{Tab, '_'}, Pat, write}, [], ['$_']},
+ GetWritten = [{{{Tab, '_'}, Pat, write}, [], ['$_']},
{{{Tab, '_'}, '_', delete}, [], ['$_']},
{{{Tab, '_'}, Pat, delete_object}, [], ['$_']}],
ets:select(S, GetWritten).
-
+
add_match([], Objs, _Type) ->
Objs;
add_match(Written, Objs, ordered_set) ->
@@ -1162,13 +1162,13 @@ add_match([{Oid, Val, write}|R], Objs, set) ->
add_match(R, [Val | deloid(Oid,Objs)],set).
%% For ordered_set only !!
-add_ordered_match(Written = [{{_, Key}, _, _}|_], [Obj|Objs], Acc)
+add_ordered_match(Written = [{{_, Key}, _, _}|_], [Obj|Objs], Acc)
when Key > element(2, Obj) ->
add_ordered_match(Written, Objs, [Obj|Acc]);
-add_ordered_match([{{_, Key}, Val, write}|Rest], Objs =[Obj|_], Acc)
+add_ordered_match([{{_, Key}, Val, write}|Rest], Objs =[Obj|_], Acc)
when Key < element(2, Obj) ->
add_ordered_match(Rest, [Val|Objs],Acc);
-add_ordered_match([{{_, Key}, _, _DelOP}|Rest], Objs =[Obj|_], Acc)
+add_ordered_match([{{_, Key}, _, _DelOP}|Rest], Objs =[Obj|_], Acc)
when Key < element(2, Obj) ->
add_ordered_match(Rest,Objs,Acc);
%% Greater than last object
@@ -1176,7 +1176,7 @@ add_ordered_match([{_, Val, write}|Rest], [], Acc) ->
add_ordered_match(Rest, [Val], Acc);
add_ordered_match([_|Rest], [], Acc) ->
add_ordered_match(Rest, [], Acc);
-%% Keys are equal from here
+%% Keys are equal from here
add_ordered_match([{_, Val, write}|Rest], [_Obj|Objs], Acc) ->
add_ordered_match(Rest, [Val|Objs], Acc);
add_ordered_match([{_, _Val, delete}|Rest], [_Obj|Objs], Acc) ->
@@ -1207,7 +1207,7 @@ add_sel_match([Op={Oid, _, delete}|R], Objs, Type, Acc) ->
end;
add_sel_match([Op = {_Oid, Val, delete_object}|R], Objs, Type, Acc) ->
case lists:delete(Val, Objs) of
- Objs ->
+ Objs ->
add_sel_match(R, Objs, Type, [Op|Acc]);
NewObjs when Type == set ->
add_sel_match(R, NewObjs, Type, Acc);
@@ -1224,26 +1224,26 @@ add_sel_match([Op={Oid={_,Key}, Val, write}|R], Objs, bag, Acc) ->
end;
add_sel_match([Op={Oid, Val, write}|R], Objs, set, Acc) ->
case deloid(Oid,Objs) of
- Objs ->
+ Objs ->
add_sel_match(R, Objs,set, [Op|Acc]);
NewObjs ->
add_sel_match(R, [Val | NewObjs],set, Acc)
end.
%% For ordered_set only !!
-add_sel_ordered_match(Written = [{{_, Key}, _, _}|_], [Obj|Objs],Acc)
+add_sel_ordered_match(Written = [{{_, Key}, _, _}|_], [Obj|Objs],Acc)
when Key > element(2, Obj) ->
add_sel_ordered_match(Written, Objs, [Obj|Acc]);
-add_sel_ordered_match([{{_, Key}, Val, write}|Rest], Objs =[Obj|_],Acc)
+add_sel_ordered_match([{{_, Key}, Val, write}|Rest], Objs =[Obj|_],Acc)
when Key < element(2, Obj) ->
add_sel_ordered_match(Rest,[Val|Objs],Acc);
-add_sel_ordered_match([{{_, Key}, _, _DelOP}|Rest], Objs =[Obj|_], Acc)
+add_sel_ordered_match([{{_, Key}, _, _DelOP}|Rest], Objs =[Obj|_], Acc)
when Key < element(2, Obj) ->
add_sel_ordered_match(Rest,Objs,Acc);
%% Greater than last object
add_sel_ordered_match(Ops1, [], Acc) ->
{lists:reverse(Acc), Ops1};
-%% Keys are equal from here
+%% Keys are equal from here
add_sel_ordered_match([{_, Val, write}|Rest], [_Obj|Objs], Acc) ->
add_sel_ordered_match(Rest, [Val|Objs], Acc);
add_sel_ordered_match([{_, _Val, delete}|Rest], [_Obj|Objs], Acc) ->
@@ -1264,11 +1264,11 @@ deloid(Oid, [H | T]) ->
[H | deloid(Oid, T)].
%%%%%%%%%%%%%%%%%%
-% select
+% select
select(Tab, Pat) ->
select(Tab, Pat, read).
-select(Tab, Pat, LockKind)
+select(Tab, Pat, LockKind)
when is_atom(Tab), Tab /= schema, is_list(Pat) ->
case get(mnesia_activity_state) of
{?DEFAULT_ACCESS, Tid, Ts} ->
@@ -1293,13 +1293,13 @@ fun_select(Tid, Ts, Tab, Spec, LockKind, TabPat, SelectFun) ->
select_lock(Tid,Ts,LockKind,Spec,Tab),
Store = Ts#tidstore.store,
Written = ?ets_match_object(Store, {{TabPat, '_'}, '_', '_'}),
- case Written of
- [] ->
+ case Written of
+ [] ->
%% Nothing changed in the table during this transaction,
%% Simple case get results from [d]ets
SelectFun(Spec);
- _ ->
- %% Hard (slow case) records added or deleted earlier
+ _ ->
+ %% Hard (slow case) records added or deleted earlier
%% in the transaction, have to cope with that.
Type = val({Tab, setorbag}),
FixedSpec = get_record_pattern(Spec),
@@ -1326,7 +1326,7 @@ select_lock(Tid,Ts,LockKind,Spec,Tab) ->
end.
%% Breakable Select
-select(Tab, Pat, NObjects, LockKind)
+select(Tab, Pat, NObjects, LockKind)
when is_atom(Tab), Tab /= schema, is_list(Pat), is_integer(NObjects) ->
case get(mnesia_activity_state) of
{?DEFAULT_ACCESS, Tid, Ts} ->
@@ -1356,26 +1356,26 @@ fun_select(Tid, Ts, Tab, Spec, LockKind, TabPat, Init, NObjects, Node, Storage)
select_lock(Tid,Ts,LockKind,Spec,Tab),
Store = Ts#tidstore.store,
do_fixtable(Tab, Store),
-
- Written0 = ?ets_match_object(Store, {{TabPat, '_'}, '_', '_'}),
- case Written0 of
- [] ->
+
+ Written0 = ?ets_match_object(Store, {{TabPat, '_'}, '_', '_'}),
+ case Written0 of
+ [] ->
%% Nothing changed in the table during this transaction,
%% Simple case get results from [d]ets
select_state(Init(Spec),Def);
- _ ->
- %% Hard (slow case) records added or deleted earlier
+ _ ->
+ %% Hard (slow case) records added or deleted earlier
%% in the transaction, have to cope with that.
Type = val({Tab, setorbag}),
- Written =
+ Written =
if Type == ordered_set -> %% Sort stable
lists:keysort(1,Written0);
- true ->
+ true ->
Written0
end,
FixedSpec = get_record_pattern(Spec),
CMS = ets:match_spec_compile(Spec),
- trans_select(Init(FixedSpec),
+ trans_select(Init(FixedSpec),
Def#mnesia_select{written=Written,spec=CMS,type=Type, orig=FixedSpec})
end;
_Protocol ->
@@ -1394,7 +1394,7 @@ select(Cont) ->
select_cont(_Tid,_Ts,'$end_of_table') ->
'$end_of_table';
-select_cont(Tid,_Ts,State=#mnesia_select{tid=Tid,cont=Cont, orig=Ms})
+select_cont(Tid,_Ts,State=#mnesia_select{tid=Tid,cont=Cont, orig=Ms})
when element(1,Tid) == ets ->
case Cont of
'$end_of_table' -> '$end_of_table';
@@ -1415,7 +1415,7 @@ trans_select('$end_of_table', #mnesia_select{written=Written0,spec=CMS,type=Type
trans_select({TabRecs,Cont}, State = #mnesia_select{written=Written0,spec=CMS,type=Type}) ->
{FixedRes,Written} = add_sel_match(Written0, TabRecs, Type),
select_state({ets:match_spec_run(FixedRes, CMS),Cont},
- State#mnesia_select{written=Written}).
+ State#mnesia_select{written=Written}).
select_state({Matches, Cont}, MS) ->
{Matches, MS#mnesia_select{cont=Cont}};
@@ -1433,9 +1433,9 @@ all_keys(Tab) ->
Mod:all_keys(Tid, Ts, Tab, read);
_ ->
abort(no_transaction)
- end.
+ end.
-all_keys(Tid, Ts, Tab, LockKind)
+all_keys(Tid, Ts, Tab, LockKind)
when is_atom(Tab), Tab /= schema ->
Pat0 = val({Tab, wild_pattern}),
Pat = setelement(2, Pat0, '$1'),
@@ -1446,7 +1446,7 @@ all_keys(Tid, Ts, Tab, LockKind)
_ ->
Keys
end;
-all_keys(_Tid, _Ts, Tab, _LockKind) ->
+all_keys(_Tid, _Ts, Tab, _LockKind) ->
abort({bad_type, Tab}).
index_match_object(Pat, Attr) when is_tuple(Pat), tuple_size(Pat) > 2 ->
@@ -1465,7 +1465,7 @@ index_match_object(Tab, Pat, Attr, LockKind) ->
abort(no_transaction)
end.
-index_match_object(Tid, Ts, Tab, Pat, Attr, LockKind)
+index_match_object(Tid, Ts, Tab, Pat, Attr, LockKind)
when is_atom(Tab), Tab /= schema, is_tuple(Pat), tuple_size(Pat) > 2 ->
case element(1, Tid) of
ets ->
@@ -1501,7 +1501,7 @@ index_read(Tab, Key, Attr) ->
abort(no_transaction)
end.
-index_read(Tid, Ts, Tab, Key, Attr, LockKind)
+index_read(Tid, Ts, Tab, Key, Attr, LockKind)
when is_atom(Tab), Tab /= schema ->
case element(1, Tid) of
ets ->
@@ -1536,7 +1536,7 @@ dirty_write(Val) when is_tuple(Val), tuple_size(Val) > 2 ->
dirty_write(Tab, Val);
dirty_write(Val) ->
abort({bad_type, Val}).
-
+
dirty_write(Tab, Val) ->
do_dirty_write(async_dirty, Tab, Val).
@@ -1562,7 +1562,7 @@ dirty_delete(Oid) ->
dirty_delete(Tab, Key) ->
do_dirty_delete(async_dirty, Tab, Key).
-
+
do_dirty_delete(SyncMode, Tab, Key) when is_atom(Tab), Tab /= schema ->
Oid = {Tab, Key},
mnesia_tm:dirty(SyncMode, {Oid, Oid, delete});
@@ -1582,7 +1582,7 @@ do_dirty_delete_object(SyncMode, Tab, Val)
when is_atom(Tab), Tab /= schema, is_tuple(Val), tuple_size(Val) > 2 ->
Oid = {Tab, element(2, Val)},
case has_var(Val) of
- false ->
+ false ->
mnesia_tm:dirty(SyncMode, {Oid, Val, delete_object});
true ->
abort({bad_type, Tab, Val})
@@ -1600,7 +1600,7 @@ dirty_update_counter(Counter, _Incr) ->
dirty_update_counter(Tab, Key, Incr) ->
do_dirty_update_counter(async_dirty, Tab, Key, Incr).
-
+
do_dirty_update_counter(SyncMode, Tab, Key, Incr)
when is_atom(Tab), Tab /= schema, is_integer(Incr) ->
case ?catch_val({Tab, record_validation}) of
@@ -1638,7 +1638,7 @@ dirty_match_object(Pat) when is_tuple(Pat), tuple_size(Pat) > 2 ->
dirty_match_object(Tab, Pat);
dirty_match_object(Pat) ->
abort({bad_type, Pat}).
-
+
dirty_match_object(Tab, Pat)
when is_atom(Tab), Tab /= schema, is_tuple(Pat), tuple_size(Pat) > 2 ->
dirty_rpc(Tab, ?MODULE, remote_dirty_match_object, [Tab, Pat]);
@@ -1697,8 +1697,8 @@ remote_dirty_select(Tab, [{HeadPat,_, _}] = Spec, [Pos | Tail])
%% Returns the records without applying the match spec
%% The actual filtering is handled by the caller
CMS = ets:match_spec_compile(Spec),
- case val({Tab, setorbag}) of
- ordered_set ->
+ case val({Tab, setorbag}) of
+ ordered_set ->
ets:match_spec_run(lists:sort(Recs), CMS);
_ ->
ets:match_spec_run(Recs, CMS)
@@ -1730,14 +1730,14 @@ dirty_all_keys(Tab) when is_atom(Tab), Tab /= schema ->
end;
dirty_all_keys(Tab) ->
abort({bad_type, Tab}).
-
+
dirty_index_match_object(Pat, Attr) when is_tuple(Pat), tuple_size(Pat) > 2 ->
Tab = element(1, Pat),
dirty_index_match_object(Tab, Pat, Attr);
dirty_index_match_object(Pat, _Attr) ->
abort({bad_type, Pat}).
-dirty_index_match_object(Tab, Pat, Attr)
+dirty_index_match_object(Tab, Pat, Attr)
when is_atom(Tab), Tab /= schema, is_tuple(Pat), tuple_size(Pat) > 2 ->
case mnesia_schema:attr_tab_to_pos(Tab, Attr) of
Pos when Pos =< tuple_size(Pat) ->
@@ -1752,7 +1752,7 @@ dirty_index_match_object(Tab, Pat, Attr)
[Tab, Pat, Pos]);
true ->
abort({bad_type, Tab, Attr, Elem})
- end
+ end
end;
BadPos ->
abort({bad_type, Tab, BadPos})
@@ -1810,7 +1810,7 @@ do_dirty_rpc(Tab, Node, M, F, Args) ->
%% Sync with mnesia_monitor
try sys:get_status(mnesia_monitor) catch _:_ -> ok end,
case mnesia_controller:call({check_w2r, Node, Tab}) of % Sync
- NewNode when NewNode =:= Node ->
+ NewNode when NewNode =:= Node ->
ErrorTag = mnesia_lib:dirty_rpc_error_tag(Reason),
mnesia:abort({ErrorTag, Args});
NewNode ->
@@ -1821,9 +1821,9 @@ do_dirty_rpc(Tab, Node, M, F, Args) ->
%% to acquire the lock on the NewNode.
%% In this context we do neither know
%% the kind or granularity of the lock.
- %% --> Abort the transaction
+ %% --> Abort the transaction
mnesia:abort({node_not_running, Node});
- {error, {node_not_running, _}} ->
+ {error, {node_not_running, _}} ->
%% Mnesia is stopping
mnesia:abort({no_exists, Args});
_ ->
@@ -1858,21 +1858,21 @@ table_info(_Tid, _Ts, Tab, Item) ->
any_table_info(Tab, Item).
-any_table_info(Tab, Item) when is_atom(Tab) ->
+any_table_info(Tab, Item) when is_atom(Tab) ->
case Item of
master_nodes ->
mnesia_recover:get_master_nodes(Tab);
-% checkpoints ->
+% checkpoints ->
% case ?catch_val({Tab, commit_work}) of
% [{checkpoints, List} | _] -> List;
% No_chk when is_list(No_chk) -> [];
% Else -> info_reply(Else, Tab, Item)
% end;
- size ->
+ size ->
raw_table_info(Tab, Item);
memory ->
raw_table_info(Tab, Item);
- type ->
+ type ->
case ?catch_val({Tab, setorbag}) of
{'EXIT', _} ->
abort({no_exists, Tab, Item});
@@ -1885,8 +1885,8 @@ any_table_info(Tab, Item) when is_atom(Tab) ->
abort({no_exists, Tab, Item});
Props ->
lists:map(fun({setorbag, Type}) -> {type, Type};
- (Prop) -> Prop end,
- Props)
+ (Prop) -> Prop end,
+ Props)
end;
name ->
Tab;
@@ -1927,14 +1927,14 @@ bad_info_reply(_Tab, memory) -> 0;
bad_info_reply(Tab, Item) -> abort({no_exists, Tab, Item}).
%% Raw info about all tables
-schema() ->
+schema() ->
mnesia_schema:info().
%% Raw info about one tables
-schema(Tab) ->
+schema(Tab) ->
mnesia_schema:info(Tab).
-error_description(Err) ->
+error_description(Err) ->
mnesia_lib:error_desc(Err).
info() ->
@@ -1951,18 +1951,18 @@ info() ->
io:format( "---> Processes waiting for locks <--- ~n", []),
lists:foreach(fun({Oid, Op, _Pid, Tid, OwnerTid}) ->
io:format("Tid ~p waits for ~p lock "
- "on oid ~p owned by ~p ~n",
+ "on oid ~p owned by ~p ~n",
[Tid, Op, Oid, OwnerTid])
end, Queued),
mnesia_tm:display_info(group_leader(), TmInfo),
-
+
Pat = {'_', unclear, '_'},
Uncertain = ets:match_object(mnesia_decision, Pat),
io:format( "---> Uncertain transactions <--- ~n", []),
lists:foreach(fun({Tid, _, Nodes}) ->
io:format("Tid ~w waits for decision "
- "from ~w~n",
+ "from ~w~n",
[Tid, Nodes])
end, Uncertain),
@@ -2023,15 +2023,15 @@ display_tab_info() ->
io:format("master node tables = ~p~n", [lists:sort(MasterTabs)]),
Tabs = system_info(tables),
-
+
{Unknown, Ram, Disc, DiscOnly} =
lists:foldl(fun storage_count/2, {[], [], [], []}, Tabs),
-
+
io:format("remote = ~p~n", [lists:sort(Unknown)]),
io:format("ram_copies = ~p~n", [lists:sort(Ram)]),
io:format("disc_copies = ~p~n", [lists:sort(Disc)]),
io:format("disc_only_copies = ~p~n", [lists:sort(DiscOnly)]),
-
+
Rfoldl = fun(T, Acc) ->
Rpat =
case val({T, access_mode}) of
@@ -2041,7 +2041,7 @@ display_tab_info() ->
table_info(T, where_to_commit)
end,
case lists:keysearch(Rpat, 1, Acc) of
- {value, {_Rpat, Rtabs}} ->
+ {value, {_Rpat, Rtabs}} ->
lists:keyreplace(Rpat, 1, Acc, {Rpat, [T | Rtabs]});
false ->
[{Rpat, [T]} | Acc]
@@ -2161,20 +2161,20 @@ system_info2(fallback_activated) ->
system_info2(version) ->
case ?catch_val(version) of
- {'EXIT', _} ->
+ {'EXIT', _} ->
Apps = application:loaded_applications(),
case lists:keysearch(?APPLICATION, 1, Apps) of
{value, {_Name, _Desc, Version}} ->
Version;
false ->
%% Ensure that it does not match
- {mnesia_not_loaded, node(), now()}
+ {mnesia_not_loaded, node(), now()}
end;
Version ->
Version
end;
-system_info2(access_module) -> mnesia_monitor:get_env(access_module);
+system_info2(access_module) -> mnesia_monitor:get_env(access_module);
system_info2(auto_repair) -> mnesia_monitor:get_env(auto_repair);
system_info2(is_running) -> mnesia_lib:is_running();
system_info2(backup_module) -> mnesia_monitor:get_env(backup_module);
@@ -2183,7 +2183,7 @@ system_info2(debug) -> mnesia_monitor:get_env(debug);
system_info2(dump_log_load_regulation) -> mnesia_monitor:get_env(dump_log_load_regulation);
system_info2(dump_log_write_threshold) -> mnesia_monitor:get_env(dump_log_write_threshold);
system_info2(dump_log_time_threshold) -> mnesia_monitor:get_env(dump_log_time_threshold);
-system_info2(dump_log_update_in_place) ->
+system_info2(dump_log_update_in_place) ->
mnesia_monitor:get_env(dump_log_update_in_place);
system_info2(max_wait_for_decision) -> mnesia_monitor:get_env(max_wait_for_decision);
system_info2(embedded_mnemosyne) -> mnesia_monitor:get_env(embedded_mnemosyne);
@@ -2204,9 +2204,9 @@ system_info2(transaction_failures) -> mnesia_lib:read_counter(trans_failures);
system_info2(transaction_commits) -> mnesia_lib:read_counter(trans_commits);
system_info2(transaction_restarts) -> mnesia_lib:read_counter(trans_restarts);
system_info2(transaction_log_writes) -> mnesia_dumper:get_log_writes();
-system_info2(core_dir) -> mnesia_monitor:get_env(core_dir);
-system_info2(no_table_loaders) -> mnesia_monitor:get_env(no_table_loaders);
-system_info2(dc_dump_limit) -> mnesia_monitor:get_env(dc_dump_limit);
+system_info2(core_dir) -> mnesia_monitor:get_env(core_dir);
+system_info2(no_table_loaders) -> mnesia_monitor:get_env(no_table_loaders);
+system_info2(dc_dump_limit) -> mnesia_monitor:get_env(dc_dump_limit);
system_info2(send_compressed) -> mnesia_monitor:get_env(send_compressed);
system_info2(Item) -> exit({badarg, Item}).
@@ -2281,7 +2281,7 @@ system_info_items(no) ->
core_dir,
version
].
-
+
system_info() ->
IsRunning = mnesia_lib:is_running(),
case IsRunning of
@@ -2308,62 +2308,62 @@ load_mnesia_or_abort() ->
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%% Database mgt
-create_schema(Ns) ->
+create_schema(Ns) ->
mnesia_bup:create_schema(Ns).
-delete_schema(Ns) ->
+delete_schema(Ns) ->
mnesia_schema:delete_schema(Ns).
-backup(Opaque) ->
+backup(Opaque) ->
mnesia_log:backup(Opaque).
-backup(Opaque, Mod) ->
+backup(Opaque, Mod) ->
mnesia_log:backup(Opaque, Mod).
-traverse_backup(S, T, Fun, Acc) ->
+traverse_backup(S, T, Fun, Acc) ->
mnesia_bup:traverse_backup(S, T, Fun, Acc).
-traverse_backup(S, SM, T, TM, F, A) ->
+traverse_backup(S, SM, T, TM, F, A) ->
mnesia_bup:traverse_backup(S, SM, T, TM, F, A).
-install_fallback(Opaque) ->
+install_fallback(Opaque) ->
mnesia_bup:install_fallback(Opaque).
-install_fallback(Opaque, Mod) ->
+install_fallback(Opaque, Mod) ->
mnesia_bup:install_fallback(Opaque, Mod).
-uninstall_fallback() ->
+uninstall_fallback() ->
mnesia_bup:uninstall_fallback().
-uninstall_fallback(Args) ->
+uninstall_fallback(Args) ->
mnesia_bup:uninstall_fallback(Args).
-activate_checkpoint(Args) ->
+activate_checkpoint(Args) ->
mnesia_checkpoint:activate(Args).
-deactivate_checkpoint(Name) ->
+deactivate_checkpoint(Name) ->
mnesia_checkpoint:deactivate(Name).
-backup_checkpoint(Name, Opaque) ->
+backup_checkpoint(Name, Opaque) ->
mnesia_log:backup_checkpoint(Name, Opaque).
-backup_checkpoint(Name, Opaque, Mod) ->
+backup_checkpoint(Name, Opaque, Mod) ->
mnesia_log:backup_checkpoint(Name, Opaque, Mod).
-restore(Opaque, Args) ->
+restore(Opaque, Args) ->
mnesia_schema:restore(Opaque, Args).
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%% Table mgt
-create_table(Arg) ->
+create_table(Arg) ->
mnesia_schema:create_table(Arg).
-create_table(Name, Arg) when is_list(Arg) ->
+create_table(Name, Arg) when is_list(Arg) ->
mnesia_schema:create_table([{name, Name}| Arg]);
create_table(Name, Arg) ->
{aborted, badarg, Name, Arg}.
-delete_table(Tab) ->
+delete_table(Tab) ->
mnesia_schema:delete_table(Tab).
add_table_copy(Tab, N, S) ->
@@ -2371,38 +2371,38 @@ add_table_copy(Tab, N, S) ->
del_table_copy(Tab, N) ->
mnesia_schema:del_table_copy(Tab, N).
-move_table_copy(Tab, From, To) ->
+move_table_copy(Tab, From, To) ->
mnesia_schema:move_table(Tab, From, To).
-add_table_index(Tab, Ix) ->
+add_table_index(Tab, Ix) ->
mnesia_schema:add_table_index(Tab, Ix).
-del_table_index(Tab, Ix) ->
+del_table_index(Tab, Ix) ->
mnesia_schema:del_table_index(Tab, Ix).
-transform_table(Tab, Fun, NewA) ->
+transform_table(Tab, Fun, NewA) ->
case catch val({Tab, record_name}) of
- {'EXIT', Reason} ->
+ {'EXIT', Reason} ->
mnesia:abort(Reason);
- OldRN ->
+ OldRN ->
mnesia_schema:transform_table(Tab, Fun, NewA, OldRN)
end.
-transform_table(Tab, Fun, NewA, NewRN) ->
+transform_table(Tab, Fun, NewA, NewRN) ->
mnesia_schema:transform_table(Tab, Fun, NewA, NewRN).
change_table_copy_type(T, N, S) ->
mnesia_schema:change_table_copy_type(T, N, S).
clear_table(Tab) ->
- case get(mnesia_activity_state) of
+ case get(mnesia_activity_state) of
State = {Mod, Tid, _Ts} when element(1, Tid) =/= tid ->
transaction(State, fun() -> do_clear_table(Tab) end, [], infinity, Mod, sync);
- undefined ->
+ undefined ->
transaction(undefined, fun() -> do_clear_table(Tab) end, [], infinity, ?DEFAULT_ACCESS, sync);
_ -> %% Not allowed for clear_table
mnesia:abort({aborted, nested_transaction})
end.
-
+
do_clear_table(Tab) ->
case get(mnesia_activity_state) of
{?DEFAULT_ACCESS, Tid, Ts} ->
@@ -2415,7 +2415,7 @@ do_clear_table(Tab) ->
clear_table(Tid, Ts, Tab, Obj) when element(1, Tid) =:= tid ->
Store = Ts#tidstore.store,
- mnesia_locker:wlock_table(Tid, Store, Tab),
+ mnesia_locker:wlock_table(Tid, Store, Tab),
Oid = {Tab, '_'},
?ets_insert(Store, {Oid, Obj, clear_table}),
ok.
@@ -2423,26 +2423,26 @@ clear_table(Tid, Ts, Tab, Obj) when element(1, Tid) =:= tid ->
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%% Table mgt - user properties
-read_table_property(Tab, PropKey) ->
+read_table_property(Tab, PropKey) ->
val({Tab, user_property, PropKey}).
-write_table_property(Tab, Prop) ->
+write_table_property(Tab, Prop) ->
mnesia_schema:write_table_property(Tab, Prop).
-delete_table_property(Tab, PropKey) ->
+delete_table_property(Tab, PropKey) ->
mnesia_schema:delete_table_property(Tab, PropKey).
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%% Table mgt - user properties
-change_table_frag(Tab, FragProp) ->
+change_table_frag(Tab, FragProp) ->
mnesia_schema:change_table_frag(Tab, FragProp).
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%% Table mgt - table load
%% Dump a ram table to disc
-dump_tables(Tabs) ->
+dump_tables(Tabs) ->
mnesia_schema:dump_tables(Tabs).
%% allow the user to wait for some tables to be loaded
@@ -2455,10 +2455,10 @@ force_load_table(Tab) ->
Other -> Other
end.
-change_table_access_mode(T, Access) ->
+change_table_access_mode(T, Access) ->
mnesia_schema:change_table_access_mode(T, Access).
-change_table_load_order(T, O) ->
+change_table_load_order(T, O) ->
mnesia_schema:change_table_load_order(T, O).
change_table_majority(T, M) ->
@@ -2471,13 +2471,13 @@ set_master_nodes(Nodes) when is_list(Nodes) ->
yes ->
CsPat = {{'_', cstruct}, '_'},
Cstructs0 = ?ets_match_object(mnesia_gvar, CsPat),
- Cstructs = [Cs || {_, Cs} <- Cstructs0],
+ Cstructs = [Cs || {_, Cs} <- Cstructs0],
log_valid_master_nodes(Cstructs, Nodes, UseDir, IsRunning);
_NotRunning ->
case UseDir of
true ->
mnesia_lib:lock_table(schema),
- Res =
+ Res =
case mnesia_schema:read_cstructs_from_disc() of
{ok, Cstructs} ->
log_valid_master_nodes(Cstructs, Nodes, UseDir, IsRunning);
@@ -2497,7 +2497,7 @@ log_valid_master_nodes(Cstructs, Nodes, UseDir, IsRunning) ->
Fun = fun(Cs) ->
Copies = mnesia_lib:copy_holders(Cs),
Valid = mnesia_lib:intersect(Nodes, Copies),
- {Cs#cstruct.name, Valid}
+ {Cs#cstruct.name, Valid}
end,
Args = lists:map(Fun, Cstructs),
mnesia_recover:log_master_nodes(Args, UseDir, IsRunning).
@@ -2523,7 +2523,7 @@ set_master_nodes(Tab, Nodes) when is_list(Nodes) ->
case UseDir of
true ->
mnesia_lib:lock_table(schema),
- Res =
+ Res =
case mnesia_schema:read_cstructs_from_disc() of
{ok, Cstructs} ->
case lists:keysearch(Tab, 2, Cstructs) of
@@ -2553,7 +2553,7 @@ set_master_nodes(Tab, Nodes) ->
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%% Misc admin
-dump_log() ->
+dump_log() ->
mnesia_controller:sync_dump_log(user).
subscribe(What) ->
@@ -2568,10 +2568,10 @@ report_event(Event) ->
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%% Snmp
-snmp_open_table(Tab, Us) ->
+snmp_open_table(Tab, Us) ->
mnesia_schema:add_snmp(Tab, Us).
-snmp_close_table(Tab) ->
+snmp_close_table(Tab) ->
mnesia_schema:del_snmp(Tab).
snmp_get_row(Tab, RowIndex) when is_atom(Tab), Tab /= schema, is_list(RowIndex) ->
@@ -2583,26 +2583,26 @@ snmp_get_row(Tab, RowIndex) when is_atom(Tab), Tab /= schema, is_list(RowIndex)
SnmpType = val({Tab,snmp}),
Fix = fun({{_,Key},Row,Op}, Res) ->
case mnesia_snmp_hook:key_to_oid(Tab,Key,SnmpType) of
- RowIndex ->
+ RowIndex ->
case Op of
write -> {ok, Row};
_ ->
undefined
end;
- _ ->
+ _ ->
Res
end
end,
lists:foldl(Fix, undefined, Ops);
Key ->
case Mod:read(Tid, Ts, Tab, Key, read) of
- [Row] ->
+ [Row] ->
{ok, Row};
- _ ->
+ _ ->
undefined
end
end;
- _ ->
+ _ ->
dirty_rpc(Tab, mnesia_snmp_hook, get_row, [Tab, RowIndex])
end;
snmp_get_row(Tab, _RowIndex) ->
@@ -2613,7 +2613,7 @@ snmp_get_row(Tab, _RowIndex) ->
snmp_get_next_index(Tab, RowIndex) when is_atom(Tab), Tab /= schema, is_list(RowIndex) ->
{Next,OrigKey} = dirty_rpc(Tab, mnesia_snmp_hook, get_next_index, [Tab, RowIndex]),
case get(mnesia_activity_state) of
- {_Mod, Tid, #tidstore{store=Store}} when element(1, Tid) =:= tid ->
+ {_Mod, Tid, #tidstore{store=Store}} when element(1, Tid) =:= tid ->
case OrigKey of
undefined ->
snmp_order_keys(Store, Tab, RowIndex, []);
@@ -2639,7 +2639,7 @@ snmp_get_next_index(Tab, _RowIndex) ->
snmp_order_keys(Store,Tab,RowIndex,Def) ->
All = ?ets_match(Store, {{Tab,'$1'},'_','$2'}),
SnmpType = val({Tab,snmp}),
- Keys0 = [mnesia_snmp_hook:key_to_oid(Tab,Key,SnmpType) ||
+ Keys0 = [mnesia_snmp_hook:key_to_oid(Tab,Key,SnmpType) ||
Key <- ts_keys_1(All, Def)],
Keys = lists:sort(Keys0),
get_ordered_snmp_key(RowIndex,Keys).
@@ -2648,7 +2648,7 @@ get_ordered_snmp_key(Prev, [First|_]) when Prev < First -> {ok, First};
get_ordered_snmp_key(Prev, [_|R]) ->
get_ordered_snmp_key(Prev, R);
get_ordered_snmp_key(_, []) ->
- endOfTable.
+ endOfTable.
%%%%%%%%%%
@@ -2657,7 +2657,7 @@ snmp_get_mnesia_key(Tab, RowIndex) when is_atom(Tab), Tab /= schema, is_list(Row
{_Mod, Tid, Ts} when element(1, Tid) =:= tid ->
Res = dirty_rpc(Tab,mnesia_snmp_hook,get_mnesia_key,[Tab,RowIndex]),
snmp_filter_key(Res, RowIndex, Tab, Ts#tidstore.store);
- _ ->
+ _ ->
dirty_rpc(Tab, mnesia_snmp_hook, get_mnesia_key, [Tab, RowIndex])
end;
snmp_get_mnesia_key(Tab, _RowIndex) ->
@@ -2670,7 +2670,7 @@ snmp_oid_to_mnesia_key(RowIndex, Tab) ->
{ok, MnesiaKey} -> MnesiaKey;
undefined -> unknown
end;
- MnesiaKey ->
+ MnesiaKey ->
MnesiaKey
end.
@@ -2690,20 +2690,20 @@ snmp_filter_key(undefined, RowIndex, Tab, Store) ->
SnmpType = val({Tab,snmp}),
Fix = fun({{_,Key},_,Op}, Res) ->
case mnesia_snmp_hook:key_to_oid(Tab,Key,SnmpType) of
- RowIndex ->
+ RowIndex ->
case Op of
write -> {ok, Key};
_ ->
undefined
end;
- _ ->
+ _ ->
Res
end
end,
lists:foldl(Fix, undefined, Ops);
Key ->
case ?ets_lookup(Store, {Tab,Key}) of
- [] ->
+ [] ->
undefined;
Ops ->
case lists:last(Ops) of
@@ -2716,9 +2716,9 @@ snmp_filter_key(undefined, RowIndex, Tab, Store) ->
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%% Textfile access
-load_textfile(F) ->
+load_textfile(F) ->
mnesia_text:load_textfile(F).
-dump_to_textfile(F) ->
+dump_to_textfile(F) ->
mnesia_text:dump_to_textfile(F).
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
@@ -2727,7 +2727,7 @@ dump_to_textfile(F) ->
table(Tab) ->
table(Tab, []).
table(Tab,Opts) ->
- {[Trav,Lock,NObjects],QlcOptions0} =
+ {[Trav,Lock,NObjects],QlcOptions0} =
qlc_opts(Opts,[{traverse,select},{lock,read},{n_objects,100}]),
TF = case Trav of
{select,Ms} ->
@@ -2740,10 +2740,10 @@ table(Tab,Opts) ->
Pre = fun(Arg) -> pre_qlc(Arg, Tab) end,
Post = fun() -> post_qlc(Tab) end,
Info = fun(Tag) -> qlc_info(Tab, Tag) end,
- ParentFun = fun() ->
- {mnesia_activity, mnesia:get_activity_id()}
+ ParentFun = fun() ->
+ {mnesia_activity, mnesia:get_activity_id()}
end,
- Lookup =
+ Lookup =
case Trav of
{select, _} -> [];
_ ->
@@ -2757,27 +2757,27 @@ table(Tab,Opts) ->
[{lookup_fun, LFun}]
end,
MFA = fun(Type) -> qlc_format(Type, Tab, NObjects, Lock, Opts) end,
- QlcOptions = [{pre_fun, Pre}, {post_fun, Post},
- {info_fun, Info}, {parent_fun, ParentFun},
+ QlcOptions = [{pre_fun, Pre}, {post_fun, Post},
+ {info_fun, Info}, {parent_fun, ParentFun},
{format_fun, MFA}|Lookup] ++ QlcOptions0,
qlc:table(TF, QlcOptions).
pre_qlc(Opts, Tab) ->
- {_,Tid,_} =
+ {_,Tid,_} =
case get(mnesia_activity_state) of
undefined ->
case lists:keysearch(parent_value, 1, Opts) of
{value, {parent_value,{mnesia_activity,undefined}}} ->
abort(no_transaction);
{value, {parent_value,{mnesia_activity,Aid}}} ->
- {value,{stop_fun,Stop}} =
+ {value,{stop_fun,Stop}} =
lists:keysearch(stop_fun,1,Opts),
put_activity_id(Aid,Stop),
Aid;
_ ->
abort(no_transaction)
end;
- Else ->
+ Else ->
Else
end,
case element(1,Tid) of
@@ -2785,9 +2785,9 @@ pre_qlc(Opts, Tab) ->
_ ->
case ?catch_val({Tab, setorbag}) of
ordered_set -> ok;
- _ ->
+ _ ->
dirty_rpc(Tab, mnesia_tm, fixtable, [Tab,true,self()]),
- ok
+ ok
end
end.
@@ -2806,7 +2806,7 @@ post_qlc(Tab) ->
qlc_select('$end_of_table') -> [];
qlc_select({[], Cont}) -> qlc_select(select(Cont));
-qlc_select({Objects, Cont}) ->
+qlc_select({Objects, Cont}) ->
Objects ++ fun() -> qlc_select(select(Cont)) end.
qlc_opts(Opts, Keys) when is_list(Opts) ->
@@ -2826,7 +2826,7 @@ qlc_opts(Opts,[],Acc) -> {lists:reverse(Acc),Opts}.
qlc_info(Tab, num_of_objects) ->
dirty_rpc(Tab, ?MODULE, raw_table_info, [Tab, size]);
-qlc_info(_, keypos) -> 2;
+qlc_info(_, keypos) -> 2;
qlc_info(_, is_unique_objects) -> true;
qlc_info(Tab, is_unique_keys) ->
case val({Tab, type}) of
@@ -2836,9 +2836,9 @@ qlc_info(Tab, is_unique_keys) ->
end;
qlc_info(Tab, is_sorted_objects) ->
case val({Tab, type}) of
- ordered_set ->
+ ordered_set ->
case ?catch_val({Tab, frag_hash}) of
- {'EXIT', _} ->
+ {'EXIT', _} ->
ascending;
_ -> %% Fragmented tables are not ordered
no
@@ -2856,11 +2856,11 @@ qlc_format({match_spec, Ms}, Tab, NObjects, Lock, Opts) ->
{?MODULE, table, [Tab,[{traverse,{select,Ms}},{n_objects, NObjects}, {lock,Lock}|Opts]]};
qlc_format({lookup, 2, Keys}, Tab, _, Lock, _) ->
io_lib:format("lists:flatmap(fun(V) -> "
- "~w:read(~w, V, ~w) end, ~w)",
+ "~w:read(~w, V, ~w) end, ~w)",
[?MODULE, Tab, Lock, Keys]);
qlc_format({lookup, Index,Keys}, Tab, _, _, _) ->
io_lib:format("lists:flatmap(fun(V) -> "
- "~w:index_read(~w, V, ~w) end, ~w)",
+ "~w:index_read(~w, V, ~w) end, ~w)",
[?MODULE, Tab, Index, Keys]).
@@ -2874,7 +2874,7 @@ do_fixtable(Tab, Store) ->
ok;
_ ->
case ?ets_match_object(Store, {fixtable, {Tab, '_'}}) of
- [] ->
+ [] ->
Node = dirty_rpc(Tab, mnesia_tm, fixtable, [Tab,true,self()]),
?ets_insert(Store, {fixtable, {Tab, Node}});
_ ->
@@ -2886,10 +2886,10 @@ do_fixtable(Tab, Store) ->
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%% Mnemosyne exclusive
-get_activity_id() ->
+get_activity_id() ->
get(mnesia_activity_state).
-put_activity_id(Activity) ->
+put_activity_id(Activity) ->
mnesia_tm:put_activity_id(Activity).
-put_activity_id(Activity,Fun) ->
+put_activity_id(Activity,Fun) ->
mnesia_tm:put_activity_id(Activity,Fun).
diff --git a/lib/mnesia/src/mnesia.hrl b/lib/mnesia/src/mnesia.hrl
index 2375b72d59..2855792646 100644
--- a/lib/mnesia/src/mnesia.hrl
+++ b/lib/mnesia/src/mnesia.hrl
@@ -70,6 +70,7 @@
attributes = [key, val], % [Atom]
user_properties = [], % [Record]
frag_properties = [], % [{Key, Val]
+ storage_properties = [], % [{Key, Val]
cookie = ?unique_cookie, % Term
version = {{2, 0}, []}}). % {{Integer, Integer}, [Node]}
diff --git a/lib/mnesia/src/mnesia_backup.erl b/lib/mnesia/src/mnesia_backup.erl
index f372ca0be5..736f2ed9bf 100644
--- a/lib/mnesia/src/mnesia_backup.erl
+++ b/lib/mnesia/src/mnesia_backup.erl
@@ -1,19 +1,19 @@
%%
%% %CopyrightBegin%
-%%
-%% Copyright Ericsson AB 1996-2009. All Rights Reserved.
-%%
+%%
+%% Copyright Ericsson AB 1996-2011. All Rights Reserved.
+%%
%% The contents of this file are subject to the Erlang Public License,
%% Version 1.1, (the "License"); you may not use this file except in
%% compliance with the License. You should have received a copy of the
%% Erlang Public License along with this software. If not, it can be
%% retrieved online at http://www.erlang.org/.
-%%
+%%
%% Software distributed under the License is distributed on an "AS IS"
%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
%% the License for the specific language governing rights and limitations
%% under the License.
-%%
+%%
%% %CopyrightEnd%
%%
@@ -56,14 +56,14 @@
-export([
%% Write access
- open_write/1,
- write/2,
- commit_write/1,
+ open_write/1,
+ write/2,
+ commit_write/1,
abort_write/1,
%% Read access
- open_read/1,
- read/1,
+ open_read/1,
+ read/1,
close_read/1
]).
diff --git a/lib/mnesia/src/mnesia_bup.erl b/lib/mnesia/src/mnesia_bup.erl
index 47dcdad7ac..fd87be1759 100644
--- a/lib/mnesia/src/mnesia_bup.erl
+++ b/lib/mnesia/src/mnesia_bup.erl
@@ -1,19 +1,19 @@
%%
%% %CopyrightBegin%
-%%
+%%
%% Copyright Ericsson AB 1996-2011. All Rights Reserved.
-%%
+%%
%% The contents of this file are subject to the Erlang Public License,
%% Version 1.1, (the "License"); you may not use this file except in
%% compliance with the License. You should have received a copy of the
%% Erlang Public License along with this software. If not, it can be
%% retrieved online at http://www.erlang.org/.
-%%
+%%
%% Software distributed under the License is distributed on an "AS IS"
%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
%% the License for the specific language governing rights and limitations
%% under the License.
-%%
+%%
%% %CopyrightEnd%
%%
@@ -62,7 +62,7 @@
fallback_tmp,
skip_tables = [],
keep_tables = [],
- default_op = keep_tables
+ default_op = keep_tables
}).
-type fallback_args() :: #fallback_args{}.
@@ -134,7 +134,7 @@ abort_restore(R, What, Args, Reason) ->
[Mod, What, Args, Reason]),
catch apply(Mod, close_read, [Opaque]),
throw({error, Reason}).
-
+
fallback_to_schema() ->
Fname = fallback_bup(),
fallback_to_schema(Fname).
@@ -146,14 +146,14 @@ fallback_to_schema(Fname) ->
{error, Reason};
Schema ->
case catch lookup_schema(schema, Schema) of
- {error, _} ->
+ {error, _} ->
{error, "No schema in fallback"};
List ->
{ok, fallback, List}
end
end.
-%% Opens Opaque reads schema and then close
+%% Opens Opaque reads schema and then close
read_schema(Mod, Opaque) ->
R = #restore{bup_module = Mod, bup_data = Opaque},
case catch read_schema_section(R) of
@@ -163,7 +163,7 @@ read_schema(Mod, Opaque) ->
catch safe_apply(R2, close_read, [R2#restore.bup_data]),
Schema
end.
-
+
%% Open backup media and extract schema
%% rewind backup media and leave it open
%% Returns {R, {Header, Schema}}
@@ -227,7 +227,7 @@ refresh_cookie(Schema, NewCookie) ->
Cs2 = Cs#cstruct{cookie = NewCookie},
Item = {schema, schema, mnesia_schema:cs2list(Cs2)},
lists:keyreplace(schema, 2, Schema, Item);
-
+
false ->
Reason = "No schema found. Cannot be used as backup.",
throw({error, {Reason, Schema}})
@@ -273,7 +273,7 @@ convert_0_1([{schema, db_nodes, DbNodes} | Schema], Acc, Cs) ->
convert_0_1([{schema, version, Version} | Schema], Acc, Cs) ->
convert_0_1(Schema, Acc, Cs#cstruct{version = Version});
convert_0_1([{schema, Tab, Def} | Schema], Acc, Cs) ->
- Head =
+ Head =
case lists:keysearch(index, 1, Def) of
{value, {index, PosList}} ->
%% Remove the snmp "index"
@@ -334,7 +334,7 @@ create_schema(Ns, ok) ->
case mnesia_lib:ensure_loaded(?APPLICATION) of
ok ->
case mnesia_monitor:get_env(schema_location) of
- ram ->
+ ram ->
{error, {has_no_disc, node()}};
_ ->
case mnesia_schema:opt_create_dir(true, mnesia_lib:dir()) of
@@ -358,7 +358,7 @@ create_schema(Ns, ok) ->
{error, Reason}
end
end
- end;
+ end;
{error, Reason} ->
{error, Reason}
end;
@@ -372,7 +372,9 @@ mk_str() ->
lists:concat([node()] ++ Now ++ ".TMP").
make_initial_backup(Ns, Opaque, Mod) ->
- Schema = [{schema, schema, mnesia_schema:get_initial_schema(disc_copies, Ns)}],
+ Orig = mnesia_schema:get_initial_schema(disc_copies, Ns),
+ Modded = proplists:delete(storage_properties, proplists:delete(majority, Orig)),
+ Schema = [{schema, schema, Modded}],
O2 = do_apply(Mod, open_write, [Opaque], Opaque),
O3 = do_apply(Mod, write, [O2, [mnesia_log:backup_log_header()]], O2),
O4 = do_apply(Mod, write, [O3, Schema], O3),
@@ -432,7 +434,7 @@ check_fallback_args([Arg | Tail], FA) ->
check_fallback_args([], FA) ->
{ok, FA}.
-check_fallback_arg_type(Arg, FA) ->
+check_fallback_arg_type(Arg, FA) ->
case Arg of
{scope, global} ->
FA#fallback_args{scope = global};
@@ -460,10 +462,10 @@ atom_list([H | T]) when is_atom(H) ->
atom_list(T);
atom_list([]) ->
ok.
-
+
do_install_fallback(FA) ->
Pid = spawn_link(?MODULE, install_fallback_master, [self(), FA]),
- Res =
+ Res =
receive
{'EXIT', Pid, Reason} -> % if appl has trapped exit
{error, {'EXIT', Reason}};
@@ -504,7 +506,7 @@ restore_recs(Recs, Header, Schema, {start, FA}) ->
Pids = [spawn_link(N, ?MODULE, fallback_receiver, Args) || N <- Ns],
send_fallback(Pids, {start, Header, Schema2}),
Res = restore_recs(Recs, Header, Schema2, Pids),
- global:del_lock({{mnesia_table_lock, schema}, self()}, Ns),
+ global:del_lock({{mnesia_table_lock, schema}, self()}, Ns),
Res
end;
@@ -576,7 +578,7 @@ fallback_tmp_name() -> "FALLBACK.TMP".
-spec fallback_receiver(pid(), fallback_args()) -> no_return().
fallback_receiver(Master, FA) ->
process_flag(trap_exit, true),
-
+
case catch register(mnesia_fallback, self()) of
{'EXIT', _} ->
Reason = {already_exists, node()},
@@ -608,7 +610,7 @@ local_fallback_error(Master, Reason) ->
Master ! {self(), {error, Reason}},
unlink(Master),
exit(Reason).
-
+
check_fallback_dir(Master, FA) ->
case mnesia:system_info(schema_location) of
ram ->
@@ -657,7 +659,7 @@ fallback_receiver_loop(Master, R, FA, State) ->
R2 = safe_apply(R, write, [R#restore.bup_data, Recs]),
Master ! {self(), ok},
fallback_receiver_loop(Master, R2, FA, records);
-
+
{Master, swap} when State =/= schema ->
?eval_debug_fun({?MODULE, fallback_receiver_loop, pre_swap}, []),
safe_apply(R, commit_write, [R#restore.bup_data]),
@@ -832,7 +834,7 @@ create_dat_files([{schema, Tab, TabDef} | Tail], LocalTabs) ->
ok = dets:delete(schema, {schema, Tab}),
create_dat_files(Tail, LocalTabs);
Storage =:= disc_only_copies ->
- Args = [{file, TmpFile}, {keypos, 2},
+ Args = [{file, TmpFile}, {keypos, 2},
{type, mnesia_lib:disk_type(Tab, Cs#cstruct.type)}],
Open = fun(T, LT) when T =:= LT#local_tab.name ->
case mnesia_lib:dets_sync_open(T, Args) of
@@ -859,9 +861,9 @@ create_dat_files([{schema, Tab, TabDef} | Tail], LocalTabs) ->
Swap = fun(T, LT) when T =:= LT#local_tab.name ->
Expunge(),
case LT#local_tab.opened of
- true ->
+ true ->
Close(T,LT);
- false ->
+ false ->
Open(T,LT),
Close(T,LT)
end,
@@ -885,8 +887,8 @@ create_dat_files([{schema, Tab, TabDef} | Tail], LocalTabs) ->
create_dat_files(Tail, LocalTabs);
Storage =:= ram_copies; Storage =:= disc_copies ->
Open = fun(T, LT) when T =:= LT#local_tab.name ->
- mnesia_log:open_log({?MODULE, T},
- mnesia_log:dcl_log_header(),
+ mnesia_log:open_log({?MODULE, T},
+ mnesia_log:dcl_log_header(),
TmpFile,
false,
false,
@@ -915,7 +917,7 @@ create_dat_files([{schema, Tab, TabDef} | Tail], LocalTabs) ->
true ->
Log = mnesia_log:open_log(fallback_tab,
mnesia_log:dcd_log_header(),
- DcdFile,
+ DcdFile,
false),
mnesia_log:close_log(Log),
case LT#local_tab.opened of
@@ -924,7 +926,7 @@ create_dat_files([{schema, Tab, TabDef} | Tail], LocalTabs) ->
false ->
Open(T,LT),
Close(T,LT)
- end,
+ end,
case file:rename(TmpFile, DclFile) of
ok ->
ok;
@@ -957,7 +959,7 @@ create_dat_files([], _LocalTabs) ->
ok.
uninstall_fallback() ->
- uninstall_fallback([{scope, global}]).
+ uninstall_fallback([{scope, global}]).
uninstall_fallback(Args) ->
case check_fallback_args(Args, #fallback_args{}) of
@@ -967,7 +969,7 @@ uninstall_fallback(Args) ->
{error, Reason}
end.
-do_uninstall_fallback(FA) ->
+do_uninstall_fallback(FA) ->
%% Ensure that we access the intended Mnesia
%% directory. This function may not be called
%% during startup since it will cause the
@@ -1038,11 +1040,11 @@ do_uninstall(_ClientPid, [], GoodPids, BadNodes, BadRes) ->
local_uninstall_fallback(Master, FA) ->
%% Don't trap exit
-
+
register(mnesia_fallback, self()), % May exit
FA2 = check_fallback_dir(Master, FA), % May exit
Master ! {self(), started},
-
+
receive
{Master, do_uninstall} ->
?eval_debug_fun({?MODULE, uninstall_fallback2, pre_delete}, []),
@@ -1050,7 +1052,7 @@ local_uninstall_fallback(Master, FA) ->
Tmp = FA2#fallback_args.fallback_tmp,
Bup = FA2#fallback_args.fallback_bup,
file:delete(Tmp),
- Res =
+ Res =
case fallback_exists(Bup) of
true -> file:delete(Bup);
false -> ok
@@ -1077,7 +1079,7 @@ rec_uninstall(ClientPid, [], Res) ->
ClientPid ! {self(), Res},
unlink(ClientPid),
exit(normal).
-
+
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%% Backup traversal
@@ -1128,7 +1130,7 @@ do_traverse_backup(ClientPid, Source, SourceMod, Target, TargetMod, Fun, Acc) ->
if
TargetMod =/= read_only ->
case catch do_apply(TargetMod, open_write, [Target], Target) of
- {error, Error} ->
+ {error, Error} ->
unlink(ClientPid),
ClientPid ! {iter_done, self(), {error, Error}},
exit(Error);
@@ -1138,15 +1140,15 @@ do_traverse_backup(ClientPid, Source, SourceMod, Target, TargetMod, Fun, Acc) ->
ignore
end,
A = {start, Fun, Acc, TargetMod, Iter},
- Res =
+ Res =
case iterate(SourceMod, fun trav_apply/4, Source, A) of
{ok, {iter, _, Acc2, _, Iter2}} when TargetMod =/= read_only ->
case catch do_apply(TargetMod, commit_write, [Iter2], Iter2) of
- {error, Reason} ->
+ {error, Reason} ->
{error, Reason};
- _ ->
+ _ ->
{ok, Acc2}
- end;
+ end;
{ok, {iter, _, Acc2, _, _}} ->
{ok, Acc2};
{error, Reason} when TargetMod =/= read_only->
diff --git a/lib/mnesia/src/mnesia_controller.erl b/lib/mnesia/src/mnesia_controller.erl
index d4b2c7b5cc..d488a33d67 100644
--- a/lib/mnesia/src/mnesia_controller.erl
+++ b/lib/mnesia/src/mnesia_controller.erl
@@ -57,7 +57,8 @@
release_schema_commit_lock/0,
create_table/1,
get_disc_copy/1,
- get_cstructs/0,
+ get_remote_cstructs/0, % new function
+ get_cstructs/0, % old function
sync_and_block_table_whereabouts/4,
sync_del_table_copy_whereabouts/2,
block_table/1,
@@ -106,14 +107,14 @@
-include("mnesia.hrl").
--define(SERVER_NAME, ?MODULE).
+-define(SERVER_NAME, ?MODULE).
-record(state, {supervisor,
schema_is_merged = false,
early_msgs = [],
- loader_pid = [], %% Was Pid is now [{Pid,Work}|..]
+ loader_pid = [], %% Was Pid is now [{Pid,Work}|..]
loader_queue, %% Was list is now gb_tree
- sender_pid = [], %% Was a pid or undef is now [{Pid,Work}|..]
+ sender_pid = [], %% Was a pid or undef is now [{Pid,Work}|..]
sender_queue = [],
late_loader_queue, %% Was list is now gb_tree
dumper_pid, %% Dumper or schema commit pid
@@ -123,12 +124,12 @@
is_stopping = false
}).
%% Backwards Comp. Sender_pid is now a list of senders..
-get_senders(#state{sender_pid = Pids}) when is_list(Pids) -> Pids.
+get_senders(#state{sender_pid = Pids}) when is_list(Pids) -> Pids.
%% Backwards Comp. loader_pid is now a list of loaders..
-get_loaders(#state{loader_pid = Pids}) when is_list(Pids) -> Pids.
+get_loaders(#state{loader_pid = Pids}) when is_list(Pids) -> Pids.
max_loaders() ->
case ?catch_val(no_table_loaders) of
- {'EXIT', _} ->
+ {'EXIT', _} ->
mnesia_lib:set(no_table_loaders,1),
1;
Val -> Val
@@ -152,7 +153,7 @@ max_loaders() ->
remote_storage
}).
--record(disc_load, {table,
+-record(disc_load, {table,
reason,
opt_reply_to
}).
@@ -183,7 +184,7 @@ max_loaders() ->
val(Var) ->
case ?catch_val(Var) of
- {'EXIT', Reason} -> mnesia_lib:other_val(Var, Reason);
+ {'EXIT', Reason} -> mnesia_lib:other_val(Var, Reason);
Value -> Value
end.
@@ -198,7 +199,7 @@ sync_dump_log(InitBy) ->
async_dump_log(InitBy) ->
?SERVER_NAME ! {async_dump_log, InitBy}.
-
+
%% Wait for tables to be active
%% If needed, we will wait for Mnesia to start
%% If Mnesia stops, we will wait for Mnesia to restart
@@ -226,7 +227,7 @@ do_wait_for_tables(Tabs, Timeout) ->
exit(Pid, timeout),
reply_wait(Tabs)
end.
-
+
reply_wait(Tabs) ->
case catch mnesia_lib:active_tables() of
{'EXIT', _} ->
@@ -269,7 +270,7 @@ rec_tabs([Tab | Tabs], AllTabs, From, Init) ->
%% This will trigger an exit signal
%% to mnesia_init
exit(wait_for_tables_timeout);
-
+
{'EXIT', Init, _} ->
%% Oops, mnesia_init stopped,
exit(mnesia_stopped)
@@ -278,8 +279,14 @@ rec_tabs([], _, _, Init) ->
unlink(Init),
ok.
+get_remote_cstructs() ->
+ get_cstructs(). %% Sigh not forward compatible always check version
+
+%% Old function kept for backwards compatibility; converts cstructs before sending.
get_cstructs() ->
- call(get_cstructs).
+ {cstructs, Cstructs, Running} = call(get_cstructs),
+ Node = node(group_leader()),
+ {cstructs, mnesia_schema:normalize_cs(Cstructs, Node), Running}.
update(Fun) ->
call({update,Fun}).
@@ -309,7 +316,7 @@ get_network_copy(Tab, Cs) ->
% We can't let the controller queue this one
% because that may cause a deadlock between schema_operations
% and initial tableloadings which both takes schema locks.
-% But we have to get copier_done msgs when the other side
+% But we have to get copier_done msgs when the other side
% goes down.
call({add_other, self()}),
Reason = {dumper,add_table_copy},
@@ -331,14 +338,14 @@ get_network_copy(Tab, Cs) ->
ignore
end,
Res#loader_done.reply;
- #loader_done{} ->
+ #loader_done{} ->
Res#loader_done.reply;
Else ->
{not_loaded, Else}
end.
%% This functions is invoked from the dumper
-%%
+%%
%% There are two cases here:
%% startup ->
%% no need for sync, since mnesia_controller not started yet
@@ -370,11 +377,11 @@ force_load_table(Tab) when is_atom(Tab), Tab /= schema ->
end;
force_load_table(Tab) ->
{error, {bad_type, Tab}}.
-
+
do_force_load_table(Tab) ->
Loaded = ?catch_val({Tab, load_reason}),
case Loaded of
- unknown ->
+ unknown ->
set({Tab, load_by_force}, true),
mnesia_late_loader:async_late_disc_load(node(), [Tab], forced_by_user),
wait_for_tables([Tab], infinity);
@@ -384,7 +391,7 @@ do_force_load_table(Tab) ->
wait_for_tables([Tab], infinity);
_ ->
ok
- end.
+ end.
master_nodes_updated(schema, _Masters) ->
ignore;
master_nodes_updated(Tab, Masters) ->
@@ -428,15 +435,15 @@ connect_nodes(Ns) ->
connect_nodes(Ns, fun default_merge/1).
connect_nodes(Ns, UserFun) ->
- case mnesia:system_info(is_running) of
+ case mnesia:system_info(is_running) of
no ->
{error, {node_not_running, node()}};
- yes ->
+ yes ->
Pid = spawn_link(?MODULE,connect_nodes2,[self(),Ns, UserFun]),
- receive
- {?MODULE, Pid, Res, New} ->
+ receive
+ {?MODULE, Pid, Res, New} ->
case Res of
- ok ->
+ ok ->
mnesia_lib:add_list(extra_db_nodes, New),
{ok, New};
{aborted, {throw, Str}} when is_list(Str) ->
@@ -444,8 +451,8 @@ connect_nodes(Ns, UserFun) ->
{error, {merge_schema_failed, lists:flatten(Str)}};
Else ->
{error, Else}
- end;
- {'EXIT', Pid, Reason} ->
+ end;
+ {'EXIT', Pid, Reason} ->
{error, Reason}
end
end.
@@ -456,16 +463,16 @@ connect_nodes2(Father, Ns, UserFun) ->
{NewC, OldC} = mnesia_recover:connect_nodes(Ns),
Connected = NewC ++OldC,
New1 = mnesia_lib:intersect(Ns, Connected),
- New = New1 -- Current,
+ New = New1 -- Current,
process_flag(trap_exit, true),
Res = try_merge_schema(New, [], UserFun),
Msg = {schema_is_merged, [], late_merge, []},
multicall([node()|Ns], Msg),
- After = val({current, db_nodes}),
+ After = val({current, db_nodes}),
Father ! {?MODULE, self(), Res, mnesia_lib:intersect(Ns,After)},
unlink(Father),
ok.
-
+
%% Merge the local schema with the schema on other nodes.
%% But first we must let all processes that want to force
%% load tables wait until the schema merge is done.
@@ -473,7 +480,7 @@ connect_nodes2(Father, Ns, UserFun) ->
merge_schema() ->
AllNodes = mnesia_lib:all_nodes(),
case try_merge_schema(AllNodes, [node()], fun default_merge/1) of
- ok ->
+ ok ->
schema_is_merged();
{aborted, {throw, Str}} when is_list(Str) ->
fatal("Failed to merge schema: ~s~n", [Str]);
@@ -525,7 +532,7 @@ im_running(OldFriends, NewFriends) ->
schema_is_merged() ->
MsgTag = schema_is_merged,
SafeLoads = initial_safe_loads(),
-
+
%% At this point we do not know anything about
%% which tables that the other nodes already
%% has loaded and therefore we let the normal
@@ -535,7 +542,7 @@ schema_is_merged() ->
%% that all nodes tells each other directly
%% when they have loaded a table and are
%% willing to share it.
-
+
try_schedule_late_disc_load(SafeLoads, initial, MsgTag).
@@ -579,7 +586,7 @@ remote_call(Node, Func, Args) ->
Else ->
Else
end.
-
+
multicall(Nodes, Msg) ->
{Good, Bad} = gen_server:multi_call(Nodes, ?MODULE, Msg, infinity),
PatchedGood = [Reply || {_Node, Reply} <- Good],
@@ -611,9 +618,9 @@ init([Parent]) ->
Msg = {async_dump_log, time_threshold},
{ok, Ref} = timer:send_interval(Interval, Msg),
mnesia_dumper:start_regulator(),
-
+
Empty = gb_trees:empty(),
- {ok, #state{supervisor = Parent, dump_log_timer_ref = Ref,
+ {ok, #state{supervisor = Parent, dump_log_timer_ref = Ref,
loader_queue = Empty,
late_loader_queue = Empty}}.
@@ -646,17 +653,17 @@ handle_call(block_controller, From, State) ->
handle_call({update,Fun}, From, State) ->
Res = (catch Fun()),
- reply(From, Res),
+ reply(From, Res),
noreply(State);
handle_call(get_cstructs, From, State) ->
Tabs = val({schema, tables}),
Cstructs = [val({T, cstruct}) || T <- Tabs],
Running = val({current, db_nodes}),
- reply(From, {cstructs, Cstructs, Running}),
+ reply(From, {cstructs, Cstructs, Running}),
noreply(State);
-handle_call({schema_is_merged, [], late_merge, []}, From,
+handle_call({schema_is_merged, [], late_merge, []}, From,
State = #state{schema_is_merged = Merged}) ->
case Merged of
{false, Node} when Node == node(From) ->
@@ -687,8 +694,8 @@ handle_call(disc_load_intents,From,State = #state{loader_queue=LQ,late_loader_qu
handle_call({update_where_to_write, [add, Tab, AddNode], _From}, _Dummy, State) ->
Current = val({current, db_nodes}),
- Res =
- case lists:member(AddNode, Current) and
+ Res =
+ case lists:member(AddNode, Current) and
(State#state.schema_is_merged == true) of
true ->
mnesia_lib:add_lsort({Tab, where_to_write}, AddNode),
@@ -722,7 +729,7 @@ handle_call({add_active_replica, [Tab, ToNode, RemoteS, AccessMode], From},
noreply(State#state{early_msgs = [{call, Msg, undefined} | Msgs]})
end;
-handle_call({unannounce_add_table_copy, [Tab, Node], From}, ReplyTo, State) ->
+handle_call({unannounce_add_table_copy, [Tab, Node], From}, ReplyTo, State) ->
KnownNode = lists:member(node(From), val({current, db_nodes})),
Merged = State#state.schema_is_merged,
if
@@ -742,16 +749,16 @@ handle_call({unannounce_add_table_copy, [Tab, Node], From}, ReplyTo, State) ->
end;
handle_call({net_load, Tab, Cs}, From, State) ->
- State2 =
+ State2 =
case State#state.schema_is_merged of
- true ->
+ true ->
Worker = #net_load{table = Tab,
opt_reply_to = From,
reason = {dumper,add_table_copy},
cstruct = Cs
},
add_worker(Worker, State);
- false ->
+ false ->
reply(From, {not_loaded, schema_not_merged}),
State
end,
@@ -794,16 +801,16 @@ handle_call({add_other, Who}, _From, State = #state{others=Others0}) ->
handle_call({del_other, Who}, _From, State = #state{others=Others0}) ->
Others = lists:delete(Who, Others0),
{reply, ok, State#state{others=Others}};
-
+
handle_call(Msg, _From, State) ->
error("~p got unexpected call: ~p~n", [?SERVER_NAME, Msg]),
noreply(State).
-late_disc_load(TabsR, Reason, RemoteLoaders, From,
+late_disc_load(TabsR, Reason, RemoteLoaders, From,
State = #state{loader_queue = LQ, late_loader_queue = LLQ}) ->
verbose("Intend to load tables: ~p~n", [TabsR]),
?eval_debug_fun({?MODULE, late_disc_load},
- [{tabs, TabsR},
+ [{tabs, TabsR},
{reason, Reason},
{loaders, RemoteLoaders}]),
@@ -812,14 +819,14 @@ late_disc_load(TabsR, Reason, RemoteLoaders, From,
%% Remove deleted tabs and queued/loaded
LocalTabs = gb_sets:from_ordset(lists:sort(mnesia_lib:val({schema,local_tables}))),
- Filter = fun(TabInfo0, Acc) ->
- TabInfo = {Tab,_} =
- case TabInfo0 of
+ Filter = fun(TabInfo0, Acc) ->
+ TabInfo = {Tab,_} =
+ case TabInfo0 of
{_,_} -> TabInfo0;
TabN -> {TabN,Reason}
end,
case gb_sets:is_member(Tab, LocalTabs) of
- true ->
+ true ->
case ?catch_val({Tab, where_to_read}) == node() of
true -> Acc;
false ->
@@ -831,12 +838,12 @@ late_disc_load(TabsR, Reason, RemoteLoaders, From,
false -> Acc
end
end,
-
+
Tabs = lists:foldl(Filter, [], TabsR),
-
+
Nodes = val({current, db_nodes}),
LateQueue = late_loaders(Tabs, RemoteLoaders, Nodes, LLQ),
- State#state{late_loader_queue = LateQueue}.
+ State#state{late_loader_queue = LateQueue}.
late_loaders([{Tab, Reason} | Tabs], RemoteLoaders, Nodes, LLQ) ->
case gb_trees:is_defined(Tab, LLQ) of
@@ -849,7 +856,7 @@ late_loaders([{Tab, Reason} | Tabs], RemoteLoaders, Nodes, LLQ) ->
LateLoad = #late_load{table=Tab,loaders=LoadNodes,reason=Reason},
late_loaders(Tabs, RemoteLoaders, Nodes, gb_trees:insert(Tab,LateLoad,LLQ));
true ->
- late_loaders(Tabs, RemoteLoaders, Nodes, LLQ)
+ late_loaders(Tabs, RemoteLoaders, Nodes, LLQ)
end;
late_loaders([], _RemoteLoaders, _Nodes, LLQ) ->
LLQ.
@@ -889,7 +896,7 @@ late_load_filter([RL | RemoteLoaders], Tab, Nodes, Acc) ->
end;
late_load_filter([], _Tab, _Nodes, Acc) ->
Acc.
-
+
%%----------------------------------------------------------------------
%% Func: handle_cast/2
%% Returns: {noreply, State} |
@@ -901,7 +908,7 @@ handle_cast({release_schema_commit_lock, _Owner}, State) ->
if
State#state.is_stopping == true ->
{stop, shutdown, State};
- true ->
+ true ->
case State#state.dumper_queue of
[#schema_commit_lock{}|Rest] ->
[_Worker | Rest] = State#state.dumper_queue,
@@ -922,7 +929,7 @@ handle_cast(unblock_controller, State) ->
[_Worker | Rest] = State#state.dumper_queue,
State2 = State#state{dumper_pid = undefined,
dumper_queue = Rest},
- State3 = opt_start_worker(State2),
+ State3 = opt_start_worker(State2),
noreply(State3)
end;
@@ -938,31 +945,31 @@ handle_cast({mnesia_down, Node}, State) ->
%% Fix if we are late_merging against the node that went down
case State#state.schema_is_merged of
- {false, Node} ->
+ {false, Node} ->
spawn(?MODULE, call, [{schema_is_merged, [], late_merge, []}]);
_ ->
ignore
end,
-
+
%% Fix internal stuff
LateQ = remove_loaders(Alltabs, Node, State#state.late_loader_queue),
-
+
case get_senders(State) ++ get_loaders(State) of
[] -> ignore;
- Senders ->
+ Senders ->
lists:foreach(fun({Pid,_}) -> Pid ! {copier_done, Node} end,
Senders)
end,
- lists:foreach(fun(Pid) -> Pid ! {copier_done,Node} end,
+ lists:foreach(fun(Pid) -> Pid ! {copier_done,Node} end,
State#state.others),
-
+
Remove = fun(ST) ->
node(ST#send_table.receiver_pid) /= Node
end,
NewSenders = lists:filter(Remove, State#state.sender_queue),
Early = remove_early_messages(State#state.early_msgs, Node),
- noreply(State#state{sender_queue = NewSenders,
- early_msgs = Early,
+ noreply(State#state{sender_queue = NewSenders,
+ early_msgs = Early,
late_loader_queue = LateQ
});
@@ -971,8 +978,8 @@ handle_cast({merging_schema, Node}, State) ->
false ->
%% This comes from dynamic connect_nodes which are made
%% after mnesia:start() and the schema_merge.
- ImANewKidInTheBlock =
- (val({schema, storage_type}) == ram_copies)
+ ImANewKidInTheBlock =
+ (val({schema, storage_type}) == ram_copies)
andalso (mnesia_lib:val({schema, local_tables}) == [schema]),
case ImANewKidInTheBlock of
true -> %% I'm newly started ram_node..
@@ -990,7 +997,7 @@ handle_cast(Msg, State) when State#state.schema_is_merged /= true ->
noreply(State#state{early_msgs = [{cast, Msg} | Msgs]});
%% This must be done after schema_is_merged otherwise adopt_orphan
-%% might trigger a table load from wrong nodes as a result of that we don't
+%% might trigger a table load from wrong nodes as a result of that we don't
%% know which tables we can load safly first.
handle_cast({im_running, Node, NewFriends}, State) ->
LocalTabs = mnesia_lib:local_active_tables() -- [schema],
@@ -1017,7 +1024,7 @@ handle_cast({sync_tabs, Tabs, From}, State) ->
handle_cast({i_have_tab, Tab, Node}, State) ->
case lists:member(Node, val({current, db_nodes})) of
- true ->
+ true ->
State2 = node_has_tabs([Tab], Node, State),
noreply(State2);
false ->
@@ -1033,10 +1040,10 @@ handle_cast({force_load_updated, Tab}, State) ->
State2 = node_has_tabs([Tab], SomeNode, State),
noreply(State2)
end;
-
+
handle_cast({master_nodes_updated, Tab, Masters}, State) ->
Active = val({Tab, active_replicas}),
- Valid =
+ Valid =
case val({Tab, load_by_force}) of
true ->
Active;
@@ -1056,10 +1063,10 @@ handle_cast({master_nodes_updated, Tab, Masters}, State) ->
State2 = node_has_tabs([Tab], SomeNode, State),
noreply(State2)
end;
-
+
handle_cast({adopt_orphans, Node, Tabs}, State) ->
State2 = node_has_tabs(Tabs, Node, State),
-
+
case ?catch_val({node_up,Node}) of
true -> ignore;
_ ->
@@ -1091,7 +1098,7 @@ handle_cast(Msg, State) ->
error("~p got unexpected cast: ~p~n", [?SERVER_NAME, Msg]),
noreply(State).
-handle_sync_tabs([Tab | Tabs], From) ->
+handle_sync_tabs([Tab | Tabs], From) ->
case val({Tab, where_to_read}) of
nowhere ->
case get({sync_tab, Tab}) of
@@ -1135,7 +1142,7 @@ handle_info(#dumper_done{worker_pid=Pid, worker_res=Res}, State) ->
{stop, fatal, State}
end;
-handle_info(Done = #loader_done{worker_pid=WPid, table_name=Tab}, State0) ->
+handle_info(Done = #loader_done{worker_pid=WPid, table_name=Tab}, State0) ->
LateQueue0 = State0#state.late_loader_queue,
State1 = State0#state{loader_pid = lists:keydelete(WPid,1,get_loaders(State0))},
@@ -1143,7 +1150,7 @@ handle_info(Done = #loader_done{worker_pid=WPid, table_name=Tab}, State0) ->
case Done#loader_done.is_loaded of
true ->
%% Optional table announcement
- if
+ if
Done#loader_done.needs_announce == true,
Done#loader_done.needs_reply == true ->
i_have_tab(Tab),
@@ -1177,7 +1184,7 @@ handle_info(Done = #loader_done{worker_pid=WPid, table_name=Tab}, State0) ->
State1#state{late_loader_queue=gb_trees:delete_any(Tab, LateQueue0)};
false ->
%% Either the node went down or table was not
- %% loaded remotly yet
+ %% loaded remotly yet
case Done#loader_done.needs_reply of
true ->
reply(Done#loader_done.reply_to,
@@ -1200,7 +1207,7 @@ handle_info(#sender_done{worker_pid=Pid, worker_res=Res}, State) ->
Senders = get_senders(State),
{value, {Pid,_Worker}} = lists:keysearch(Pid, 1, Senders),
if
- Res == ok ->
+ Res == ok ->
State2 = State#state{sender_pid = lists:keydelete(Pid, 1, Senders)},
State3 = opt_start_worker(State2),
noreply(State3);
@@ -1242,7 +1249,7 @@ handle_info(Msg = {'EXIT', Pid, R}, State) when R /= wait_for_tables_timeout ->
{stop, fatal, State};
false ->
case lists:keymember(Pid, 1, get_loaders(State)) of
- true ->
+ true ->
fatal("Loader crashed: ~p~n state: ~p~n", [R, State]),
{stop, fatal, State};
false ->
@@ -1328,7 +1335,7 @@ code_change(_OldVsn, State0, _Extra) ->
State1 = case State0#state.loader_pid of
Pids when is_list(Pids) -> State0;
undefined -> State0#state{loader_pid = [],loader_queue=gb_trees:empty()};
- Pid when is_pid(Pid) ->
+ Pid when is_pid(Pid) ->
[Loader|Rest] = State0#state.loader_queue,
LQ0 = [{element(2,Rec),Rec} || Rec <- Rest],
LQ1 = lists:sort(LQ0),
@@ -1336,7 +1343,7 @@ code_change(_OldVsn, State0, _Extra) ->
State0#state{loader_pid=[{Pid,Loader}], loader_queue=LQ}
end,
%% LateLoaderQueue
- State = if is_list(State1#state.late_loader_queue) ->
+ State = if is_list(State1#state.late_loader_queue) ->
LLQ0 = State1#state.late_loader_queue,
LLQ1 = lists:sort([{element(2,Rec),Rec} || Rec <- LLQ0]),
LLQ = gb_trees:from_orddict(LLQ1),
@@ -1345,7 +1352,7 @@ code_change(_OldVsn, State0, _Extra) ->
State1
end,
{ok, State}.
-
+
%%%----------------------------------------------------------------------
%%% Internal functions
%%%----------------------------------------------------------------------
@@ -1355,20 +1362,20 @@ maybe_log_mnesia_down(N) ->
%% so if we are not running (i.e haven't decided which tables
%% to load locally), don't log mnesia_down yet.
case mnesia_lib:is_running() of
- yes ->
+ yes ->
verbose("Logging mnesia_down ~w~n", [N]),
mnesia_recover:log_mnesia_down(N),
ok;
- _ ->
+ _ ->
Filter = fun(Tab) ->
inactive_copy_holders(Tab, N)
end,
HalfLoadedTabs = lists:any(Filter, val({schema, local_tables}) -- [schema]),
- if
+ if
HalfLoadedTabs == true ->
verbose("Logging mnesia_down ~w~n", [N]),
mnesia_recover:log_mnesia_down(N),
- ok;
+ ok;
true ->
%% Unfortunately we have not loaded some common
%% tables yet, so we cannot rely on the nodedown
@@ -1397,7 +1404,7 @@ orphan_tables([Tab | Tabs], Node, Ns, Local, Remote) ->
BeingCreated = (?catch_val({Tab, create_table}) == true),
Read = val({Tab, where_to_read}),
case lists:member(Node, DiscCopyHolders) of
- _ when BeingCreated == true ->
+ _ when BeingCreated == true ->
orphan_tables(Tabs, Node, Ns, Local, Remote);
_ when Read == node() -> %% Allready loaded
orphan_tables(Tabs, Node, Ns, Local, Remote);
@@ -1435,13 +1442,13 @@ orphan_tables([], _, _, LocalOrphans, RemoteMasters) ->
{LocalOrphans, RemoteMasters}.
node_has_tabs([Tab | Tabs], Node, State) when Node /= node() ->
- State2 =
+ State2 =
case catch update_whereabouts(Tab, Node, State) of
State1 = #state{} -> State1;
{'EXIT', R} -> %% Tab was just deleted?
case ?catch_val({Tab, cstruct}) of
{'EXIT', _} -> State; % yes
- _ -> erlang:error(R)
+ _ -> erlang:error(R)
end
end,
node_has_tabs(Tabs, Node, State2);
@@ -1467,14 +1474,14 @@ update_whereabouts(Tab, Node, State) ->
true ->
lists:member(Node, Masters)
end,
-
+
dbg_out("Table ~w is loaded on ~w. s=~w, r=~w, lc=~w, f=~w, m=~w~n",
[Tab, Node, Storage, Read, LocalC, ByForce, GoGetIt]),
if
LocalC == true ->
%% Local contents, don't care about other node
State;
- BeingCreated == true ->
+ BeingCreated == true ->
%% The table is currently being created
%% It will be handled elsewhere
State;
@@ -1491,8 +1498,8 @@ update_whereabouts(Tab, Node, State) ->
State
end;
Storage == unknown ->
- %% No own copy, continue to read remotely
- add_active_replica(Tab, Node),
+ %% No own copy, continue to read remotely
+ add_active_replica(Tab, Node),
NodeST = mnesia_lib:storage_type_at_node(Node, Tab),
ReadST = mnesia_lib:storage_type_at_node(Read, Tab),
if %% Avoid reading from disc_only_copies
@@ -1532,16 +1539,16 @@ initial_safe_loads() ->
Tabs = val({schema, local_tables}) -- [schema],
LastC = fun(T) -> last_consistent_replica(T, Downs) end,
lists:zf(LastC, Tabs);
-
+
disc_copies ->
Downs = mnesia_recover:get_mnesia_downs(),
dbg_out("mnesia_downs = ~p~n", [Downs]),
-
+
Tabs = val({schema, local_tables}) -- [schema],
LastC = fun(T) -> last_consistent_replica(T, Downs) end,
lists:zf(LastC, Tabs)
end.
-
+
last_consistent_replica(Tab, Downs) ->
Cs = val({Tab, cstruct}),
Storage = mnesia_lib:cs_to_storage_type(node(), Cs),
@@ -1618,7 +1625,7 @@ remove_early_messages([], _Node) ->
[];
remove_early_messages([{call, {add_active_replica, [_, Node, _, _], _}, _}|R], Node) ->
remove_early_messages(R, Node); %% Does a reply before queuing
-remove_early_messages([{call, {block_table, _, From}, ReplyTo}|R], Node)
+remove_early_messages([{call, {block_table, _, From}, ReplyTo}|R], Node)
when node(From) == Node ->
reply(ReplyTo, ok), %% Remove gen:server waits..
remove_early_messages(R, Node);
@@ -1672,9 +1679,9 @@ is_tab_blocked(W2C) when is_list(W2C) ->
is_tab_blocked({blocked, W2C}) when is_list(W2C) ->
{true, W2C}.
-mark_blocked_tab(true, Value) ->
+mark_blocked_tab(true, Value) ->
{blocked, Value};
-mark_blocked_tab(false, Value) ->
+mark_blocked_tab(false, Value) ->
Value.
%%
@@ -1707,7 +1714,7 @@ del_active_replica(Tab, Node) ->
update_where_to_wlock(Tab).
change_table_access_mode(Cs) ->
- W = fun() ->
+ W = fun() ->
Tab = Cs#cstruct.name,
lists:foreach(fun(N) -> add_active_replica(Tab, N, Cs) end,
val({Tab, active_replicas}))
@@ -1736,7 +1743,7 @@ update_where_to_wlock(Tab) ->
unannounce_add_table_copy(Tab, To) ->
catch del_active_replica(Tab, To),
case catch val({Tab , where_to_read}) of
- To ->
+ To ->
mnesia_lib:set_remote_where_to_read(Tab);
_ ->
ignore
@@ -1749,7 +1756,7 @@ user_sync_tab(Tab) ->
_ ->
ignore
end,
-
+
case erase({sync_tab, Tab}) of
undefined ->
ok;
@@ -1768,11 +1775,11 @@ i_have_tab(Tab) ->
sync_and_block_table_whereabouts(Tab, ToNode, RemoteS, AccessMode) when Tab /= schema ->
Current = val({current, db_nodes}),
- Ns =
+ Ns =
case lists:member(ToNode, Current) of
true -> Current -- [ToNode];
false -> Current
- end,
+ end,
remote_call(ToNode, block_table, [Tab]),
[remote_call(Node, add_active_replica, [Tab, ToNode, RemoteS, AccessMode]) ||
Node <- [ToNode | Ns]],
@@ -1817,7 +1824,7 @@ get_workers(Timeout) ->
{timeout, Timeout}
end
end.
-
+
info() ->
Tabs = mnesia_lib:local_active_tables(),
io:format( "---> Active tables <--- ~n", []),
@@ -1826,12 +1833,12 @@ info() ->
info([Tab | Tail]) ->
case val({Tab, storage_type}) of
disc_only_copies ->
- info_format(Tab,
- dets:info(Tab, size),
+ info_format(Tab,
+ dets:info(Tab, size),
dets:info(Tab, file_size),
"bytes on disc");
_ ->
- info_format(Tab,
+ info_format(Tab,
?ets_info(Tab, size),
?ets_info(Tab, memory),
"words of mem")
@@ -1871,7 +1878,7 @@ handle_early_msg({cast, Msg}, State) ->
handle_cast(Msg, State);
handle_early_msg({info, Msg}, State) ->
handle_info(Msg, State).
-
+
noreply(State) ->
{noreply, State}.
@@ -1919,7 +1926,7 @@ add_worker(Worker = #send_table{}, State) ->
add_worker(Worker = #disc_load{}, State) ->
opt_start_worker(add_loader(Worker#disc_load.table,Worker,State));
% Block controller should be used for upgrading mnesia.
-add_worker(Worker = #block_controller{}, State) ->
+add_worker(Worker = #block_controller{}, State) ->
Queue = State#state.dumper_queue,
Queue2 = [Worker | Queue],
State2 = State#state{dumper_queue = Queue2},
@@ -1928,13 +1935,13 @@ add_worker(Worker = #block_controller{}, State) ->
add_loader(Tab,Worker,State = #state{loader_queue=LQ0}) ->
case gb_trees:is_defined(Tab, LQ0) of
true -> State;
- false ->
+ false ->
LQ=gb_trees:insert(Tab, Worker, LQ0),
State#state{loader_queue=LQ}
end.
%% Optionally start a worker
-%%
+%%
%% Dumpers and loaders may run simultaneously
%% but neither of them may run during schema commit.
%% Loaders may not start if a schema commit is enqueued.
@@ -1948,7 +1955,7 @@ opt_start_worker(State) ->
%% Great, a worker in queue and neither
%% a schema transaction is being
%% committed and nor a dumper is running
-
+
%% Start worker but keep him in the queue
if
is_record(Worker, schema_commit_lock) ->
@@ -1956,7 +1963,7 @@ opt_start_worker(State) ->
reply(ReplyTo, granted),
{Owner, _Tag} = ReplyTo,
opt_start_loader(State#state{dumper_pid = Owner});
-
+
is_record(Worker, dump_log) ->
Pid = spawn_link(?MODULE, dump_and_reply, [self(), Worker]),
State2 = State#state{dumper_pid = Pid},
@@ -1966,7 +1973,7 @@ opt_start_worker(State) ->
%% or sender
State3 = opt_start_sender(State2),
opt_start_loader(State3);
-
+
is_record(Worker, block_controller) ->
case {get_senders(State), get_loaders(State)} of
{[], []} ->
@@ -1979,7 +1986,7 @@ opt_start_worker(State) ->
end
end;
_ ->
- %% Bad luck, try with a loader or sender instead
+ %% Bad luck, try with a loader or sender instead
State2 = opt_start_sender(State),
opt_start_loader(State2)
end.
@@ -1987,8 +1994,8 @@ opt_start_worker(State) ->
opt_start_sender(State) ->
case State#state.sender_queue of
[]-> State; %% No need
- SenderQ ->
- {NewS,Kept} = opt_start_sender2(SenderQ, get_senders(State),
+ SenderQ ->
+ {NewS,Kept} = opt_start_sender2(SenderQ, get_senders(State),
[], get_loaders(State)),
State#state{sender_pid = NewS, sender_queue = Kept}
end.
@@ -1997,11 +2004,11 @@ opt_start_sender2([], Pids,Kept, _) -> {Pids,Kept};
opt_start_sender2([Sender|R], Pids, Kept, LoaderQ) ->
Tab = Sender#send_table.table,
Active = val({Tab, active_replicas}),
- IgotIt = lists:member(node(), Active),
- IsLoading = lists:any(fun({_Pid,Loader}) ->
+ IgotIt = lists:member(node(), Active),
+ IsLoading = lists:any(fun({_Pid,Loader}) ->
Tab == element(#net_load.table, Loader)
end, LoaderQ),
- if
+ if
IgotIt, IsLoading ->
%% I'm currently finishing loading the table let him wait
opt_start_sender2(R,Pids, [Sender|Kept], LoaderQ);
@@ -2019,11 +2026,11 @@ opt_start_loader(State = #state{loader_queue = LoaderQ}) ->
Current = get_loaders(State),
Max = max_loaders(),
case gb_trees:is_empty(LoaderQ) of
- true ->
+ true ->
State;
- _ when length(Current) >= Max ->
+ _ when length(Current) >= Max ->
State;
- false ->
+ false ->
SchemaQueue = State#state.dumper_queue,
case lists:keymember(schema_commit_lock, 1, SchemaQueue) of
false ->
@@ -2054,7 +2061,7 @@ already_loading(#disc_load{table=Tab},Loaders) ->
already_loading2(Tab, [{_,#net_load{table=Tab}}|_]) -> true;
already_loading2(Tab, [{_,#disc_load{table=Tab}}|_]) -> true;
-already_loading2(Tab, [_|Rest]) -> already_loading2(Tab,Rest);
+already_loading2(Tab, [_|Rest]) -> already_loading2(Tab,Rest);
already_loading2(_,[]) -> false.
start_remote_sender(Node, Tab, Receiver, Storage) ->
@@ -2083,8 +2090,8 @@ send_and_reply(ReplyTo, Worker) ->
load_and_reply(ReplyTo, Worker) ->
Load = load_table_fun(Worker),
- SendAndReply =
- fun() ->
+ SendAndReply =
+ fun() ->
process_flag(trap_exit, true),
Done = Load(),
ReplyTo ! Done#loader_done{worker_pid = self()},
@@ -2151,7 +2158,7 @@ load_table_fun(#disc_load{table=Tab, reason=Reason, opt_reply_to=ReplyTo}) ->
ReadNode == nowhere ->
%% Already loaded on other node, lets get it
Cs = val({Tab, cstruct}),
- fun() ->
+ fun() ->
case mnesia_loader:net_load_table(Tab, Reason, Active, Cs) of
{loaded, ok} ->
Done#loader_done{needs_sync = true};
@@ -2194,10 +2201,10 @@ filter_active(Tab) ->
Active = val({Tab, active_replicas}),
Masters = mnesia_recover:get_master_nodes(Tab),
Ns = do_filter_active(ByForce, Active, Masters),
- %% Reorder the so that we load from fastest first
+ %% Reorder the so that we load from fastest first
LS = ?catch_val({Tab, storage_type}),
DOC = val({Tab, disc_only_copies}),
- {Good,Worse} =
+ {Good,Worse} =
case LS of
disc_only_copies ->
G = mnesia_lib:intersect(Ns, DOC),
@@ -2208,7 +2215,7 @@ filter_active(Tab) ->
end,
%% Pick a random node of the fastest
Len = length(Good),
- if
+ if
Len > 0 ->
R = erlang:phash(node(), Len+1),
random(R-1,Good,Worse);
@@ -2227,5 +2234,5 @@ do_filter_active(false, Active, []) ->
Active;
do_filter_active(false, Active, Masters) ->
mnesia_lib:intersect(Active, Masters).
-
+
diff --git a/lib/mnesia/src/mnesia_dumper.erl b/lib/mnesia/src/mnesia_dumper.erl
index 92fd9dfade..e2a0aa3bda 100644
--- a/lib/mnesia/src/mnesia_dumper.erl
+++ b/lib/mnesia/src/mnesia_dumper.erl
@@ -1,19 +1,19 @@
%%
%% %CopyrightBegin%
-%%
+%%
%% Copyright Ericsson AB 1996-2011. All Rights Reserved.
-%%
+%%
%% The contents of this file are subject to the Erlang Public License,
%% Version 1.1, (the "License"); you may not use this file except in
%% compliance with the License. You should have received a copy of the
%% Erlang Public License along with this software. If not, it can be
%% retrieved online at http://www.erlang.org/.
-%%
+%%
%% Software distributed under the License is distributed on an "AS IS"
%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
%% the License for the specific language governing rights and limitations
%% under the License.
-%%
+%%
%% %CopyrightEnd%
%%
@@ -43,7 +43,7 @@
%% Internal stuff
-export([regulator_init/1]).
-
+
-include("mnesia.hrl").
-include_lib("kernel/include/file.hrl").
@@ -70,14 +70,14 @@ incr_log_writes() ->
adjust_log_writes(DoCast) ->
Token = {mnesia_adjust_log_writes, self()},
- case global:set_lock(Token, [node()], 1) of
+ case global:set_lock(Token, [node()], 1) of
false ->
ignore; %% Somebody else is sending a dump request
- true ->
- case DoCast of
+ true ->
+ case DoCast of
false ->
ignore;
- true ->
+ true ->
mnesia_controller:async_dump_log(write_threshold)
end,
Max = mnesia_monitor:get_env(dump_log_write_threshold),
@@ -93,16 +93,16 @@ adjust_log_writes(DoCast) ->
opt_dump_log(InitBy) ->
Reg = case whereis(?REGULATOR_NAME) of
undefined ->
- nopid;
+ nopid;
Pid when is_pid(Pid) ->
- Pid
+ Pid
end,
perform_dump(InitBy, Reg).
%% Scan for decisions
perform_dump(InitBy, Regulator) when InitBy == scan_decisions ->
?eval_debug_fun({?MODULE, perform_dump}, [InitBy]),
-
+
dbg_out("Transaction log dump initiated by ~w~n", [InitBy]),
scan_decisions(mnesia_log:previous_log_file(), InitBy, Regulator),
scan_decisions(mnesia_log:latest_log_file(), InitBy, Regulator);
@@ -112,8 +112,8 @@ perform_dump(InitBy, Regulator) ->
?eval_debug_fun({?MODULE, perform_dump}, [InitBy]),
LogState = mnesia_log:prepare_log_dump(InitBy),
dbg_out("Transaction log dump initiated by ~w: ~w~n",
- [InitBy, LogState]),
- adjust_log_writes(false),
+ [InitBy, LogState]),
+ adjust_log_writes(false),
case LogState of
already_dumped ->
mnesia_recover:allow_garb(),
@@ -142,7 +142,7 @@ perform_dump(InitBy, Regulator) ->
mnesia_lib:important(Desc, Reason),
%% Ignore rest of the log
mnesia_log:confirm_log_dump(Diff);
- false ->
+ false ->
fatal(Desc, Reason)
end
end;
@@ -189,9 +189,9 @@ do_perform_dump(Cont, InPlace, InitBy, Regulator, OldVersion) ->
insert_recs([Rec | Recs], InPlace, InitBy, Regulator, LogV) ->
regulate(Regulator),
case insert_rec(Rec, InPlace, InitBy, LogV) of
- LogH when is_record(LogH, log_header) ->
+ LogH when is_record(LogH, log_header) ->
insert_recs(Recs, InPlace, InitBy, Regulator, LogH#log_header.log_version);
- _ ->
+ _ ->
insert_recs(Recs, InPlace, InitBy, Regulator, LogV)
end;
@@ -199,7 +199,7 @@ insert_recs([], _InPlace, _InitBy, _Regulator, Version) ->
Version.
insert_rec(Rec, _InPlace, scan_decisions, _LogV) ->
- if
+ if
is_record(Rec, commit) ->
ignore;
is_record(Rec, log_header) ->
@@ -214,7 +214,12 @@ insert_rec(Rec, InPlace, InitBy, LogV) when is_record(Rec, commit) ->
{Tid, committed} ->
do_insert_rec(Tid, Rec, InPlace, InitBy, LogV);
{Tid, aborted} ->
- mnesia_schema:undo_prepare_commit(Tid, Rec)
+ case InitBy of
+ startup ->
+ mnesia_schema:undo_prepare_commit(Tid, Rec);
+ _ ->
+ ok
+ end
end;
insert_rec(H, _InPlace, _InitBy, _LogV) when is_record(H, log_header) ->
CurrentVersion = mnesia_log:version(),
@@ -222,7 +227,7 @@ insert_rec(H, _InPlace, _InitBy, _LogV) when is_record(H, log_header) ->
H#log_header.log_kind /= trans_log ->
exit({"Bad kind of transaction log", H});
H#log_header.log_version == CurrentVersion ->
- ok;
+ ok;
H#log_header.log_version == "4.2" ->
ok;
H#log_header.log_version == "4.1" ->
@@ -242,8 +247,8 @@ do_insert_rec(Tid, Rec, InPlace, InitBy, LogV) ->
[] ->
ignore;
SchemaOps ->
- case val({schema, storage_type}) of
- ram_copies ->
+ case val({schema, storage_type}) of
+ ram_copies ->
insert_ops(Tid, schema_ops, SchemaOps, InPlace, InitBy, LogV);
Storage ->
true = open_files(schema, Storage, InPlace, InitBy),
@@ -259,13 +264,13 @@ do_insert_rec(Tid, Rec, InPlace, InitBy, LogV) ->
_ ->
ignore
end.
-
+
update(_Tid, [], _DumperMode) ->
dumped;
update(Tid, SchemaOps, DumperMode) ->
UseDir = mnesia_monitor:use_dir(),
- Res = perform_update(Tid, SchemaOps, DumperMode, UseDir),
+ Res = perform_update(Tid, SchemaOps, DumperMode, UseDir),
mnesia_controller:release_schema_commit_lock(),
Res.
@@ -274,23 +279,23 @@ perform_update(_Tid, _SchemaOps, mandatory, true) ->
%% dumper perform needed updates
InitBy = schema_update,
- ?eval_debug_fun({?MODULE, dump_schema_op}, [InitBy]),
+ ?eval_debug_fun({?MODULE, dump_schema_op}, [InitBy]),
opt_dump_log(InitBy);
perform_update(Tid, SchemaOps, _DumperMode, _UseDir) ->
%% No need for a full transaction log dump.
%% Ignore the log file and perform only perform
%% the corresponding updates.
- InitBy = fast_schema_update,
+ InitBy = fast_schema_update,
InPlace = mnesia_monitor:get_env(dump_log_update_in_place),
?eval_debug_fun({?MODULE, dump_schema_op}, [InitBy]),
- case catch insert_ops(Tid, schema_ops, SchemaOps, InPlace, InitBy,
+ case catch insert_ops(Tid, schema_ops, SchemaOps, InPlace, InitBy,
mnesia_log:version()) of
{'EXIT', Reason} ->
Error = {error, {"Schema update error", Reason}},
close_files(InPlace, Error, InitBy),
fatal("Schema update error ~p ~p", [Reason, SchemaOps]);
- _ ->
+ _ ->
?eval_debug_fun({?MODULE, post_dump}, [InitBy]),
close_files(InPlace, ok, InitBy),
ok
@@ -313,7 +318,7 @@ insert_ops(Tid, Storage, [Op | Ops], InPlace, InitBy, Ver) when Ver < "4.3" ->
disc_insert(_Tid, Storage, Tab, Key, Val, Op, InPlace, InitBy) ->
case open_files(Tab, Storage, InPlace, InitBy) of
true ->
- case Storage of
+ case Storage of
disc_copies when Tab /= schema ->
mnesia_log:append({?MODULE,Tab}, {{Tab, Key}, Val, Op}),
ok;
@@ -326,7 +331,7 @@ disc_insert(_Tid, Storage, Tab, Key, Val, Op, InPlace, InitBy) ->
%% To fix update_counter so that it behaves better.
%% i.e. if nothing have changed in tab except update_counter
-%% trust that the value in the dets file is correct.
+%% trust that the value in the dets file is correct.
%% Otherwise we will get a double increment.
%% This is perfect but update_counter is a dirty op.
@@ -348,30 +353,30 @@ dets_insert(Op,Tab,Key,Val) ->
_ when Incr < 0 ->
Zero = {RecName, Key, 0},
ok = dets:insert(Tab, Zero);
- _ ->
+ _ ->
Init = {RecName, Key, Incr},
ok = dets:insert(Tab, Init)
end;
false -> ok
- end;
+ end;
delete_object ->
dets_updated(Tab,Key),
ok = dets:delete_object(Tab, Val);
clear_table ->
dets_cleared(Tab),
- ok = dets:match_delete(Tab, '_')
+ ok = dets:delete_all_objects(Tab)
end.
-
-dets_updated(Tab,Key) ->
+
+dets_updated(Tab,Key) ->
case get(mnesia_dumper_dets) of
- undefined ->
+ undefined ->
Empty = gb_trees:empty(),
Tree = gb_trees:insert(Tab, gb_sets:singleton(Key), Empty),
put(mnesia_dumper_dets, Tree);
Tree ->
case gb_trees:lookup(Tab,Tree) of
{value, cleared} -> ignore;
- {value, Set} ->
+ {value, Set} ->
T = gb_trees:update(Tab, gb_sets:add(Key, Set), Tree),
put(mnesia_dumper_dets, T);
none ->
@@ -393,14 +398,14 @@ dets_incr_counter(Tab,Key) ->
dets_cleared(Tab) ->
case get(mnesia_dumper_dets) of
- undefined ->
+ undefined ->
Empty = gb_trees:empty(),
Tree = gb_trees:insert(Tab, cleared, Empty),
put(mnesia_dumper_dets, Tree);
Tree ->
case gb_trees:lookup(Tab,Tree) of
{value, cleared} -> ignore;
- _ ->
+ _ ->
T = gb_trees:enter(Tab, cleared, Tree),
put(mnesia_dumper_dets, T)
end
@@ -412,7 +417,7 @@ insert(Tid, Storage, Tab, Key, [Val | Tail], Op, InPlace, InitBy) ->
insert(_Tid, _Storage, _Tab, _Key, [], _Op, _InPlace, _InitBy) ->
ok;
-
+
insert(Tid, Storage, Tab, Key, Val, Op, InPlace, InitBy) ->
Item = {{Tab, Key}, Val, Op},
case InitBy of
@@ -442,18 +447,18 @@ insert(Tid, Storage, Tab, Key, Val, Op, InPlace, InitBy) ->
disc_delete_table(Tab, Storage) ->
case mnesia_monitor:use_dir() of
true ->
- if
- Storage == disc_only_copies; Tab == schema ->
+ if
+ Storage == disc_only_copies; Tab == schema ->
mnesia_monitor:unsafe_close_dets(Tab),
Dat = mnesia_lib:tab2dat(Tab),
- file:delete(Dat);
- true ->
+ file:delete(Dat);
+ true ->
DclFile = mnesia_lib:tab2dcl(Tab),
case get({?MODULE,Tab}) of
{opened_dumper, dcl} ->
del_opened_tab(Tab),
mnesia_log:unsafe_close_log(Tab);
- _ ->
+ _ ->
ok
end,
file:delete(DclFile),
@@ -485,7 +490,7 @@ insert_op(Tid, Storage, {{Tab, Key}, Val, Op}, InPlace, InitBy) ->
insert_op(_Tid, schema_ops, _OP, _InPlace, Initby)
when Initby /= startup,
Initby /= fast_schema_update,
- Initby /= schema_update ->
+ Initby /= schema_update ->
ignore;
insert_op(Tid, _, {op, rec, Storage, Item}, InPlace, InitBy) ->
@@ -502,7 +507,7 @@ insert_op(Tid, _, {op, change_table_copy_type, N, FromS, ToS, TabDef}, InPlace,
_ ->
ignore
end,
- if
+ if
N == node() ->
Dmp = mnesia_lib:tab2dmp(Tab),
Dat = mnesia_lib:tab2dat(Tab),
@@ -526,8 +531,8 @@ insert_op(Tid, _, {op, change_table_copy_type, N, FromS, ToS, TabDef}, InPlace,
true = open_files(Tab, disc_only_copies, InPlace, InitBy),
%% ram_delete_table must be done before init_indecies,
%% it uses info which is reset in init_indecies,
- %% it doesn't matter, because init_indecies don't use
- %% the ram replica of the table when creating the disc
+ %% it doesn't matter, because init_indecies don't use
+ %% the ram replica of the table when creating the disc
%% index; Could be improved :)
mnesia_schema:ram_delete_table(Tab, FromS),
PosList = Cs#cstruct.index,
@@ -535,17 +540,17 @@ insert_op(Tid, _, {op, change_table_copy_type, N, FromS, ToS, TabDef}, InPlace,
{disc_only_copies, ram_copies} ->
mnesia_monitor:unsafe_close_dets(Tab),
disc_delete_indecies(Tab, Cs, disc_only_copies),
- case InitBy of
- startup ->
+ case InitBy of
+ startup ->
ignore;
- _ ->
+ _ ->
mnesia_controller:get_disc_copy(Tab)
end,
disc_delete_table(Tab, disc_only_copies);
{disc_copies, disc_only_copies} ->
ok = ensure_rename(Dmp, Dat),
true = open_files(Tab, disc_only_copies, InPlace, InitBy),
- mnesia_schema:ram_delete_table(Tab, FromS),
+ mnesia_schema:ram_delete_table(Tab, FromS),
PosList = Cs#cstruct.index,
mnesia_index:init_indecies(Tab, disc_only_copies, PosList),
file:delete(Dcl),
@@ -553,8 +558,8 @@ insert_op(Tid, _, {op, change_table_copy_type, N, FromS, ToS, TabDef}, InPlace,
{disc_only_copies, disc_copies} ->
mnesia_monitor:unsafe_close_dets(Tab),
disc_delete_indecies(Tab, Cs, disc_only_copies),
- case InitBy of
- startup ->
+ case InitBy of
+ startup ->
ignore;
_ ->
mnesia_log:ets2dcd(Tab),
@@ -571,7 +576,7 @@ insert_op(Tid, _, {op, change_table_copy_type, N, FromS, ToS, TabDef}, InPlace,
insert_op(Tid, _, {op, transform, _Fun, TabDef}, InPlace, InitBy) ->
Cs = mnesia_schema:list2cs(TabDef),
case mnesia_lib:cs_to_storage_type(node(), Cs) of
- disc_copies ->
+ disc_copies ->
open_dcl(Cs#cstruct.name);
_ ->
ignore
@@ -599,28 +604,34 @@ insert_op(Tid, _, {op, restore_recreate, TabDef}, InPlace, InitBy) ->
mnesia_checkpoint:tm_del_copy(Tab, node())
end
end,
+ StorageProps = Cs#cstruct.storage_properties,
+
%% And create new ones..
if
(InitBy == startup) or (Storage == unknown) ->
ignore;
Storage == ram_copies ->
- Args = [{keypos, 2}, public, named_table, Type],
+ EtsProps = proplists:get_value(ets, StorageProps, []),
+ Args = [{keypos, 2}, public, named_table, Type | EtsProps],
mnesia_monitor:mktab(Tab, Args);
Storage == disc_copies ->
- Args = [{keypos, 2}, public, named_table, Type],
+ EtsProps = proplists:get_value(ets, StorageProps, []),
+ Args = [{keypos, 2}, public, named_table, Type | EtsProps],
mnesia_monitor:mktab(Tab, Args),
- File = mnesia_lib:tab2dcd(Tab),
- FArg = [{file, File}, {name, {mnesia,create}},
+ File = mnesia_lib:tab2dcd(Tab),
+ FArg = [{file, File}, {name, {mnesia,create}},
{repair, false}, {mode, read_write}],
{ok, Log} = mnesia_monitor:open_log(FArg),
mnesia_monitor:unsafe_close_log(Log);
Storage == disc_only_copies ->
File = mnesia_lib:tab2dat(Tab),
file:delete(File),
+ DetsProps = proplists:get_value(dets, StorageProps, []),
Args = [{file, mnesia_lib:tab2dat(Tab)},
{type, mnesia_lib:disk_type(Tab, Type)},
{keypos, 2},
- {repair, mnesia_monitor:get_env(auto_repair)}],
+ {repair, mnesia_monitor:get_env(auto_repair)}
+ | DetsProps ],
mnesia_monitor:open_dets(Tab, Args)
end,
insert_op(Tid, ignore, {op, create_table, TabDef}, InPlace, InitBy);
@@ -630,6 +641,7 @@ insert_op(Tid, _, {op, create_table, TabDef}, InPlace, InitBy) ->
insert_cstruct(Tid, Cs, false, InPlace, InitBy),
Tab = Cs#cstruct.name,
Storage = mnesia_lib:cs_to_storage_type(node(), Cs),
+ StorageProps = Cs#cstruct.storage_properties,
case InitBy of
startup ->
case Storage of
@@ -639,22 +651,25 @@ insert_op(Tid, _, {op, create_table, TabDef}, InPlace, InitBy) ->
ignore;
disc_copies ->
Dcd = mnesia_lib:tab2dcd(Tab),
- case mnesia_lib:exists(Dcd) of
+ case mnesia_lib:exists(Dcd) of
true -> ignore;
false ->
- mnesia_log:open_log(temp,
+ mnesia_log:open_log(temp,
mnesia_log:dcd_log_header(),
- Dcd,
- false,
+ Dcd,
+ false,
false,
read_write),
mnesia_log:unsafe_close_log(temp)
end;
_ ->
+ DetsProps = proplists:get_value(dets, StorageProps, []),
+
Args = [{file, mnesia_lib:tab2dat(Tab)},
{type, mnesia_lib:disk_type(Tab, Cs#cstruct.type)},
{keypos, 2},
- {repair, mnesia_monitor:get_env(auto_repair)}],
+ {repair, mnesia_monitor:get_env(auto_repair)}
+ | DetsProps ],
case mnesia_monitor:open_dets(Tab, Args) of
{ok, _} ->
mnesia_monitor:unsafe_close_dets(Tab);
@@ -666,7 +681,7 @@ insert_op(Tid, _, {op, create_table, TabDef}, InPlace, InitBy) ->
Copies = mnesia_lib:copy_holders(Cs),
Active = mnesia_lib:intersect(Copies, val({current, db_nodes})),
[mnesia_controller:add_active_replica(Tab, N, Cs) || N <- Active],
-
+
case Storage of
unknown ->
mnesia_lib:unset({Tab, create_table}),
@@ -690,8 +705,8 @@ insert_op(Tid, _, {op, create_table, TabDef}, InPlace, InitBy) ->
%% Indecies are still created by loader
disc_delete_indecies(Tab, Cs, Storage)
%% disc_delete_table(Tab, Storage)
- end,
-
+ end,
+
%% Update whereabouts and create table
mnesia_controller:create_table(Tab),
mnesia_lib:unset({Tab, create_table})
@@ -749,7 +764,7 @@ insert_op(Tid, _, {op, clear_table, TabDef}, InPlace, InitBy) ->
true ->
ignore
end,
- %% Need to catch this, it crashes on ram_copies if
+ %% Need to catch this, it crashes on ram_copies if
%% the op comes before table is loaded at startup.
catch insert(Tid, Storage, Tab, '_', Oid, clear_table, InPlace, InitBy)
end;
@@ -761,16 +776,16 @@ insert_op(Tid, _, {op, merge_schema, TabDef}, InPlace, InitBy) ->
%% If we bootstrap an empty (diskless) mnesia from another node
%% we might have changed the storage_type of schema.
%% I think this is a good place to do it.
- Update = fun(NS = {Node,Storage}) ->
+ Update = fun(NS = {Node,Storage}) ->
case mnesia_lib:cs_to_storage_type(Node, Cs) of
Storage -> NS;
- disc_copies when Node == node() ->
- Dir = mnesia_lib:dir(),
+ disc_copies when Node == node() ->
+ Dir = mnesia_lib:dir(),
ok = mnesia_schema:opt_create_dir(true, Dir),
mnesia_schema:purge_dir(Dir, []),
mnesia_log:purge_all_logs(),
- mnesia_lib:set(use_dir, true),
+ mnesia_lib:set(use_dir, true),
mnesia_log:init(),
Ns = val({current, db_nodes}),
F = fun(U) -> mnesia_recover:log_mnesia_up(U) end,
@@ -778,11 +793,11 @@ insert_op(Tid, _, {op, merge_schema, TabDef}, InPlace, InitBy) ->
raw_named_dump_table(schema, dat),
temp_set_master_nodes(),
{Node,disc_copies};
- CSstorage ->
+ CSstorage ->
{Node,CSstorage}
end
end,
-
+
W2C0 = val({schema, where_to_commit}),
W2C = case W2C0 of
{blocked, List} ->
@@ -849,9 +864,9 @@ insert_op(Tid, _, {op, del_snmp, TabDef}, InPlace, InitBy) ->
InitBy /= startup,
Storage /= unknown ->
case ?catch_val({Tab, {index, snmp}}) of
- {'EXIT', _} ->
+ {'EXIT', _} ->
ignore;
- Stab ->
+ Stab ->
mnesia_snmp_hook:delete_table(Tab, Stab),
mnesia_lib:unset({Tab, {index, snmp}})
end;
@@ -869,7 +884,7 @@ insert_op(Tid, _, {op, add_index, Pos, TabDef}, InPlace, InitBy) ->
true = open_files(Tab, Storage, InPlace, InitBy),
mnesia_index:init_indecies(Tab, Storage, [Pos]);
startup ->
- ignore;
+ ignore;
_ ->
case val({Tab,where_to_read}) of
nowhere -> ignore;
@@ -885,7 +900,7 @@ insert_op(Tid, _, {op, del_index, Pos, TabDef}, InPlace, InitBy) ->
case InitBy of
startup when Storage == disc_only_copies ->
mnesia_index:del_index_table(Tab, Storage, Pos);
- startup ->
+ startup ->
ignore;
_ ->
mnesia_index:del_index_table(Tab, Storage, Pos)
@@ -934,16 +949,19 @@ open_files(Tab, Storage, UpdateInPlace, InitBy)
{'EXIT', _} ->
false;
Type ->
- case Storage of
+ case Storage of
disc_copies when Tab /= schema ->
Bool = open_disc_copies(Tab, InitBy),
Bool;
_ ->
+ Props = val({Tab, storage_properties}),
+ DetsProps = proplists:get_value(dets, Props, []),
Fname = prepare_open(Tab, UpdateInPlace),
Args = [{file, Fname},
{keypos, 2},
{repair, mnesia_monitor:get_env(auto_repair)},
- {type, mnesia_lib:disk_type(Tab, Type)}],
+ {type, mnesia_lib:disk_type(Tab, Type)}
+ | DetsProps],
{ok, _} = mnesia_monitor:open_dets(Tab, Args),
put({?MODULE, Tab}, {opened_dumper, dat}),
true
@@ -959,7 +977,7 @@ open_files(_Tab, _Storage, _UpdateInPlace, _InitBy) ->
open_disc_copies(Tab, InitBy) ->
DclF = mnesia_lib:tab2dcl(Tab),
- DumpEts =
+ DumpEts =
case file:read_file_info(DclF) of
{error, enoent} ->
false;
@@ -970,7 +988,7 @@ open_disc_copies(Tab, InitBy) ->
mnesia_lib:dbg_out("File ~p info_error ~p ~n",
[DcdF, Reason]),
true;
- {ok, DcdInfo} ->
+ {ok, DcdInfo} ->
Mul = case ?catch_val(dc_dump_limit) of
{'EXIT', _} -> ?DumpToEtsMultiplier;
Val -> Val
@@ -978,12 +996,12 @@ open_disc_copies(Tab, InitBy) ->
DcdInfo#file_info.size =< (DclInfo#file_info.size * Mul)
end
end,
- if
- DumpEts == false; InitBy == startup ->
- mnesia_log:open_log({?MODULE,Tab},
- mnesia_log:dcl_log_header(),
- DclF,
- mnesia_lib:exists(DclF),
+ if
+ DumpEts == false; InitBy == startup ->
+ mnesia_log:open_log({?MODULE,Tab},
+ mnesia_log:dcl_log_header(),
+ DclF,
+ mnesia_lib:exists(DclF),
mnesia_monitor:get_env(auto_repair),
read_write),
put({?MODULE, Tab}, {opened_dumper, dcl}),
@@ -992,9 +1010,9 @@ open_disc_copies(Tab, InitBy) ->
mnesia_log:ets2dcd(Tab),
put({?MODULE, Tab}, already_dumped),
false
- end.
+ end.
-%% Always opens the dcl file for writing overriding already_dumped
+%% Always opens the dcl file for writing overriding already_dumped
%% mechanismen, used for schema transactions.
open_dcl(Tab) ->
case get({?MODULE, Tab}) of
@@ -1002,10 +1020,10 @@ open_dcl(Tab) ->
true;
_ -> %% undefined or already_dumped
DclF = mnesia_lib:tab2dcl(Tab),
- mnesia_log:open_log({?MODULE,Tab},
- mnesia_log:dcl_log_header(),
- DclF,
- mnesia_lib:exists(DclF),
+ mnesia_log:open_log({?MODULE,Tab},
+ mnesia_log:dcl_log_header(),
+ DclF,
+ mnesia_lib:exists(DclF),
mnesia_monitor:get_env(auto_repair),
read_write),
put({?MODULE, Tab}, {opened_dumper, dcl}),
@@ -1042,7 +1060,7 @@ close_files(InPlace, Outcome, InitBy, [{{?MODULE, Tab}, {opened_dumper, Type}} |
case val({Tab, storage_type}) of
disc_only_copies when InitBy /= startup ->
ignore;
- disc_copies when Tab /= schema ->
+ disc_copies when Tab /= schema ->
mnesia_log:close_log({?MODULE,Tab});
Storage ->
do_close(InPlace, Outcome, Tab, Type, Storage)
@@ -1077,7 +1095,7 @@ do_close(InPlace, Outcome, Tab, dat, Storage) ->
true ->
file:delete(mnesia_lib:tab2tmp(Tab))
end.
-
+
ensure_rename(From, To) ->
case mnesia_lib:exists(From) of
@@ -1091,7 +1109,7 @@ ensure_rename(From, To) ->
{error, {rename_failed, From, To}}
end
end.
-
+
insert_cstruct(Tid, Cs, KeepWhereabouts, InPlace, InitBy) ->
Val = mnesia_schema:insert_cstruct(Tid, Cs, KeepWhereabouts),
{schema, Tab, _} = Val,
@@ -1109,15 +1127,15 @@ delete_cstruct(Tid, Cs, InPlace, InitBy) ->
temp_set_master_nodes() ->
Tabs = val({schema, local_tables}),
- Masters = [{Tab, (val({Tab, disc_copies}) ++
- val({Tab, ram_copies}) ++
- val({Tab, disc_only_copies})) -- [node()]}
+ Masters = [{Tab, (val({Tab, disc_copies}) ++
+ val({Tab, ram_copies}) ++
+ val({Tab, disc_only_copies})) -- [node()]}
|| Tab <- Tabs],
%% UseDir = false since we don't want to remember these
%% masternodes and we are running (really soon anyway) since we want this
%% to be known during table loading.
mnesia_recover:log_master_nodes(Masters, false, yes),
- ok.
+ ok.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%% Raw dump of table. Dumper must have unique access to the ets table.
@@ -1147,7 +1165,7 @@ raw_named_dump_table(Tab, Ftype) ->
{ok, TabRef} ->
Storage = ram_copies,
mnesia_lib:db_fixtable(Storage, Tab, true),
-
+
case catch raw_dump_table(TabRef, Tab) of
{'EXIT', Reason} ->
mnesia_lib:db_fixtable(Storage, Tab, false),
@@ -1174,11 +1192,11 @@ raw_dump_table(DetsRef, EtsRef) ->
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%% Load regulator
-%%
-%% This is a poor mans substitute for a fair scheduler algorithm
-%% in the Erlang emulator. The mnesia_dumper process performs many
-%% costly BIF invokations and must pay for this. But since the
-%% Emulator does not handle this properly we must compensate for
+%%
+%% This is a poor mans substitute for a fair scheduler algorithm
+%% in the Erlang emulator. The mnesia_dumper process performs many
+%% costly BIF invokations and must pay for this. But since the
+%% Emulator does not handle this properly we must compensate for
%% this with some form of load regulation of ourselves in order to
%% not steal all computation power in the Erlang Emulator ans make
%% other processes starve. Hopefully this is a temporary solution.
@@ -1225,6 +1243,6 @@ regulate(RegulatorPid) ->
val(Var) ->
case ?catch_val(Var) of
- {'EXIT', Reason} -> mnesia_lib:other_val(Var, Reason);
- Value -> Value
+ {'EXIT', Reason} -> mnesia_lib:other_val(Var, Reason);
+ Value -> Value
end.
diff --git a/lib/mnesia/src/mnesia_event.erl b/lib/mnesia/src/mnesia_event.erl
index ec6b99ecaa..8085155fd5 100644
--- a/lib/mnesia/src/mnesia_event.erl
+++ b/lib/mnesia/src/mnesia_event.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 1997-2009. All Rights Reserved.
+%% Copyright Ericsson AB 1997-2011. All Rights Reserved.
%%
%% The contents of this file are subject to the Erlang Public License,
%% Version 1.1, (the "License"); you may not use this file except in
@@ -121,7 +121,7 @@ handle_system_event({mnesia_up, Node}, State) ->
{ok, State#state{nodes = Nodes}};
handle_system_event({mnesia_down, Node}, State) ->
- case mnesia:system_info(fallback_activated) of
+ case mnesia:system_info(fallback_activated) andalso Node =/= node() of
true ->
case mnesia_monitor:get_env(fallback_error_function) of
{mnesia, lkill} ->
@@ -129,8 +129,8 @@ handle_system_event({mnesia_down, Node}, State) ->
"must be restarted. Forcing shutdown "
"after mnesia_down from ~p...~n",
report_fatal(Msg, [Node], nocore, State#state.dumped_core),
- mnesia:lkill(),
- exit(fatal);
+ catch exit(whereis(mnesia_monitor), fatal),
+ {ok, State};
{UserMod, UserFunc} ->
Msg = "Warning: A fallback is installed and Mnesia got mnesia_down "
"from ~p. ~n",
diff --git a/lib/mnesia/src/mnesia_frag.erl b/lib/mnesia/src/mnesia_frag.erl
index 9e77fe0b9f..4a1616e054 100644
--- a/lib/mnesia/src/mnesia_frag.erl
+++ b/lib/mnesia/src/mnesia_frag.erl
@@ -758,7 +758,7 @@ make_activate(Tab, Props) ->
[] ->
Cs2 = Cs#cstruct{frag_properties = Props},
[Cs3] = expand_cstruct(Cs2, activate),
- TabDef = mnesia_schema:cs2list(Cs3),
+ TabDef = mnesia_schema:vsn_cs2list(Cs3),
Op = {op, change_table_frag, activate, TabDef},
[[Op]];
BadProps ->
@@ -783,7 +783,7 @@ make_deactivate(Tab) ->
mnesia:abort({combine_error, Tab, "Too many fragments"});
true ->
Cs2 = Cs#cstruct{frag_properties = []},
- TabDef = mnesia_schema:cs2list(Cs2),
+ TabDef = mnesia_schema:vsn_cs2list(Cs2),
Op = {op, change_table_frag, deactivate, TabDef},
[[Op]]
end.
@@ -850,7 +850,7 @@ make_add_frag(Tab, SortedNs) ->
SplitOps = split(Tab, FH2, FromIndecies, FragNames, []),
Cs2 = replace_frag_hash(Cs, FH2),
- TabDef = mnesia_schema:cs2list(Cs2),
+ TabDef = mnesia_schema:vsn_cs2list(Cs2),
BaseOp = {op, change_table_frag, {add_frag, SortedNs}, TabDef},
[BaseOp, NewOp | SplitOps].
@@ -962,7 +962,7 @@ make_del_frag(Tab) ->
LastFrag = element(N, FragNames),
[LastOp] = mnesia_schema:make_delete_table(LastFrag, single_frag),
Cs2 = replace_frag_hash(Cs, FH2),
- TabDef = mnesia_schema:cs2list(Cs2),
+ TabDef = mnesia_schema:vsn_cs2list(Cs2),
BaseOp = {op, change_table_frag, del_frag, TabDef},
[BaseOp, LastOp | MergeOps];
_ ->
@@ -1075,7 +1075,7 @@ make_add_node(Tab, Node) when is_atom(Node) ->
Props = Cs#cstruct.frag_properties,
Props2 = lists:keyreplace(node_pool, 1, Props, {node_pool, Pool2}),
Cs2 = Cs#cstruct{frag_properties = Props2},
- TabDef = mnesia_schema:cs2list(Cs2),
+ TabDef = mnesia_schema:vsn_cs2list(Cs2),
Op = {op, change_table_frag, {add_node, Node}, TabDef},
[Op];
true ->
@@ -1104,7 +1104,7 @@ make_del_node(Tab, Node) when is_atom(Node) ->
Pool2 = Pool -- [Node],
Props = lists:keyreplace(node_pool, 1, Cs#cstruct.frag_properties, {node_pool, Pool2}),
Cs2 = Cs#cstruct{frag_properties = Props},
- TabDef = mnesia_schema:cs2list(Cs2),
+ TabDef = mnesia_schema:vsn_cs2list(Cs2),
Op = {op, change_table_frag, {del_node, Node}, TabDef},
[Op];
false ->
diff --git a/lib/mnesia/src/mnesia_frag_hash.erl b/lib/mnesia/src/mnesia_frag_hash.erl
index 610ba2535c..3dfdb87f30 100644
--- a/lib/mnesia/src/mnesia_frag_hash.erl
+++ b/lib/mnesia/src/mnesia_frag_hash.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 2002-2009. All Rights Reserved.
+%% Copyright Ericsson AB 2002-2011. All Rights Reserved.
%%
%% The contents of this file are subject to the Erlang Public License,
%% Version 1.1, (the "License"); you may not use this file except in
@@ -101,21 +101,19 @@ del_frag(OldState) ->
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-key_to_frag_number(#hash_state{function = phash, next_n_to_split = SplitN, n_doubles = L}, Key) ->
- P = SplitN,
- A = erlang:phash(Key, power2(L)),
+key_to_frag_number(#hash_state{function = phash, n_fragments = N, n_doubles = L}, Key) ->
+ A = erlang:phash(Key, power2(L + 1)),
if
- A < P ->
- erlang:phash(Key, power2(L + 1));
+ A > N ->
+ A - power2(L);
true ->
A
end;
-key_to_frag_number(#hash_state{function = phash2, next_n_to_split = SplitN, n_doubles = L}, Key) ->
- P = SplitN,
- A = erlang:phash2(Key, power2(L)) + 1,
+key_to_frag_number(#hash_state{function = phash2, n_fragments = N, n_doubles = L}, Key) ->
+ A = erlang:phash2(Key, power2(L + 1)) + 1,
if
- A < P ->
- erlang:phash2(Key, power2(L + 1)) + 1;
+ A > N ->
+ A - power2(L);
true ->
A
end;
diff --git a/lib/mnesia/src/mnesia_lib.erl b/lib/mnesia/src/mnesia_lib.erl
index 7e926a6258..ae6631646c 100644
--- a/lib/mnesia/src/mnesia_lib.erl
+++ b/lib/mnesia/src/mnesia_lib.erl
@@ -413,7 +413,7 @@ pr_other(Var, Other) ->
[self(), process_info(self(), registered_name),
Var, Other, Why]),
case Other of
- {badarg, [{ets, lookup_element, _}|_]} ->
+ {badarg, [{ets, lookup_element, _, _}|_]} ->
exit(Why);
_ ->
erlang:error(Why)
@@ -1141,12 +1141,18 @@ db_erase(ram_copies, Tab, Key) -> ?ets_delete(Tab, Key), ok;
db_erase(disc_copies, Tab, Key) -> ?ets_delete(Tab, Key), ok;
db_erase(disc_only_copies, Tab, Key) -> dets:delete(Tab, Key).
+db_match_erase(Tab, '_') ->
+ db_delete_all(val({Tab, storage_type}),Tab);
db_match_erase(Tab, Pat) ->
db_match_erase(val({Tab, storage_type}), Tab, Pat).
db_match_erase(ram_copies, Tab, Pat) -> ?ets_match_delete(Tab, Pat), ok;
db_match_erase(disc_copies, Tab, Pat) -> ?ets_match_delete(Tab, Pat), ok;
db_match_erase(disc_only_copies, Tab, Pat) -> dets:match_delete(Tab, Pat).
+db_delete_all(ram_copies, Tab) -> ets:delete_all_objects(Tab);
+db_delete_all(disc_copies, Tab) -> ets:delete_all_objects(Tab);
+db_delete_all(disc_only_copies, Tab) -> dets:delete_all_objects(Tab).
+
db_first(Tab) ->
db_first(val({Tab, storage_type}), Tab).
db_first(ram_copies, Tab) -> ?ets_first(Tab);
diff --git a/lib/mnesia/src/mnesia_loader.erl b/lib/mnesia/src/mnesia_loader.erl
index e785b795d1..4ba400fbbf 100644
--- a/lib/mnesia/src/mnesia_loader.erl
+++ b/lib/mnesia/src/mnesia_loader.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 1998-2010. All Rights Reserved.
+%% Copyright Ericsson AB 1998-2011. All Rights Reserved.
%%
%% The contents of this file are subject to the Erlang Public License,
%% Version 1.1, (the "License"); you may not use this file except in
@@ -27,7 +27,6 @@
net_load_table/4,
send_table/3]).
--export([old_node_init_table/6]). %% Spawned old node protocol conversion hack
-export([spawned_receiver/8]). %% Spawned lock taking process
-import(mnesia_lib, [set/2, fatal/2, verbose/2, dbg_out/2]).
@@ -36,7 +35,7 @@
val(Var) ->
case ?catch_val(Var) of
- {'EXIT', Reason} -> mnesia_lib:other_val(Var, Reason);
+ {'EXIT', Reason} -> mnesia_lib:other_val(Var, Reason);
Value -> Value
end.
@@ -51,31 +50,33 @@ disc_load_table(Tab, Reason) ->
?eval_debug_fun({?MODULE, do_get_disc_copy},
[{tab, Tab},
{reason, Reason},
- {storage, Storage},
+ {storage, Storage},
{type, Type}]),
do_get_disc_copy2(Tab, Reason, Storage, Type).
do_get_disc_copy2(Tab, _Reason, Storage, _Type) when Storage == unknown ->
verbose("Local table copy of ~p has recently been deleted, ignored.~n",
[Tab]),
- {loaded, ok}; %% ?
+ {not_loaded, storage_unknown};
do_get_disc_copy2(Tab, Reason, Storage, Type) when Storage == disc_copies ->
%% NOW we create the actual table
Repair = mnesia_monitor:get_env(auto_repair),
- Args = [{keypos, 2}, public, named_table, Type],
- case Reason of
- {dumper, _} -> %% Resources allready allocated
+ StorageProps = val({Tab, storage_properties}),
+ EtsOpts = proplists:get_value(ets, StorageProps, []),
+ Args = [{keypos, 2}, public, named_table, Type | EtsOpts],
+ case Reason of
+ {dumper, _} -> %% Resources already allocated
ignore;
_ ->
mnesia_monitor:mktab(Tab, Args),
- Count = mnesia_log:dcd2ets(Tab, Repair),
+ Count = mnesia_log:dcd2ets(Tab, Repair),
case ets:info(Tab, size) of
X when X < Count * 4 ->
- ok = mnesia_log:ets2dcd(Tab);
+ ok = mnesia_log:ets2dcd(Tab);
_ ->
ignore
end
- end,
+ end,
mnesia_index:init_index(Tab, Storage),
snmpify(Tab, Storage),
set({Tab, load_node}, node()),
@@ -83,8 +84,10 @@ do_get_disc_copy2(Tab, Reason, Storage, Type) when Storage == disc_copies ->
{loaded, ok};
do_get_disc_copy2(Tab, Reason, Storage, Type) when Storage == ram_copies ->
- Args = [{keypos, 2}, public, named_table, Type],
- case Reason of
+ StorageProps = val({Tab, storage_properties}),
+ EtsOpts = proplists:get_value(ets, StorageProps, []),
+ Args = [{keypos, 2}, public, named_table, Type | EtsOpts],
+ case Reason of
{dumper, _} -> %% Resources allready allocated
ignore;
_ ->
@@ -94,12 +97,12 @@ do_get_disc_copy2(Tab, Reason, Storage, Type) when Storage == ram_copies ->
Repair = mnesia_monitor:get_env(auto_repair),
case mnesia_monitor:use_dir() of
true ->
- case mnesia_lib:exists(Fname) of
+ case mnesia_lib:exists(Fname) of
true -> mnesia_log:dcd2ets(Tab, Repair);
false ->
case mnesia_lib:exists(Datname) of
true ->
- mnesia_lib:dets_to_ets(Tab, Tab, Datname,
+ mnesia_lib:dets_to_ets(Tab, Tab, Datname,
Type, Repair, no);
false ->
false
@@ -116,10 +119,14 @@ do_get_disc_copy2(Tab, Reason, Storage, Type) when Storage == ram_copies ->
{loaded, ok};
do_get_disc_copy2(Tab, Reason, Storage, Type) when Storage == disc_only_copies ->
+ StorageProps = val({Tab, storage_properties}),
+ DetsOpts = proplists:get_value(dets, StorageProps, []),
+
Args = [{file, mnesia_lib:tab2dat(Tab)},
{type, mnesia_lib:disk_type(Tab, Type)},
{keypos, 2},
- {repair, mnesia_monitor:get_env(auto_repair)}],
+ {repair, mnesia_monitor:get_env(auto_repair)}
+ | DetsOpts],
case Reason of
{dumper, _} ->
mnesia_index:init_index(Tab, Storage),
@@ -154,11 +161,11 @@ do_get_disc_copy2(Tab, Reason, Storage, Type) when Storage == disc_only_copies -
%% Disable rehashing of table
%% Release read lock on table
%% Send table to receiver in chunks
-%%
+%%
%% Grab read lock on table
%% Block dirty updates
%% Update wherabouts
-%%
+%%
%% Cancel the update subscription
%% Process the subscription events
%% Optionally dump to disc
@@ -166,7 +173,7 @@ do_get_disc_copy2(Tab, Reason, Storage, Type) when Storage == disc_only_copies -
%% Release read lock on table
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
--define(MAX_TRANSFER_SIZE, 7500).
+-define(MAX_TRANSFER_SIZE, 7500).
-define(MAX_RAM_FILE_SIZE, 1000000).
-define(MAX_RAM_TRANSFERS, (?MAX_RAM_FILE_SIZE div ?MAX_TRANSFER_SIZE) + 1).
-define(MAX_NOPACKETS, 20).
@@ -187,14 +194,14 @@ try_net_load_table(Tab, Reason, Ns, Cs) ->
do_get_network_copy(Tab, _Reason, _Ns, unknown, _Cs) ->
verbose("Local table copy of ~p has recently been deleted, ignored.~n", [Tab]),
{not_loaded, storage_unknown};
-do_get_network_copy(Tab, Reason, Ns, Storage, Cs) ->
+do_get_network_copy(Tab, Reason, Ns, Storage, Cs) ->
[Node | Tail] = Ns,
case lists:member(Node,val({current, db_nodes})) of
true ->
dbg_out("Getting table ~p (~p) from node ~p: ~p~n",
[Tab, Storage, Node, Reason]),
?eval_debug_fun({?MODULE, do_get_network_copy},
- [{tab, Tab}, {reason, Reason},
+ [{tab, Tab}, {reason, Reason},
{nodes, Ns}, {storage, Storage}]),
case init_receiver(Node, Tab, Storage, Cs, Reason) of
ok ->
@@ -208,7 +215,7 @@ do_get_network_copy(Tab, Reason, Ns, Storage, Cs) ->
restart ->
try_net_load_table(Tab, Reason, Tail ++ [Node], Cs);
down ->
- try_net_load_table(Tab, Reason, Tail, Cs)
+ try_net_load_table(Tab, Reason, Tail, Cs)
end;
false ->
try_net_load_table(Tab, Reason, Tail, Cs)
@@ -223,10 +230,10 @@ do_snmpify(Tab, Us, Storage) ->
Snmp = mnesia_snmp_hook:create_table(Us, Tab, Storage),
set({Tab, {index, snmp}}, Snmp).
-%% Start the recieiver
+%% Start the recieiver
init_receiver(Node, Tab, Storage, Cs, Reas={dumper,add_table_copy}) ->
case start_remote_sender(Node, Tab, Storage) of
- {SenderPid, TabSize, DetsData} ->
+ {SenderPid, TabSize, DetsData} ->
start_receiver(Tab,Storage,Cs,SenderPid,TabSize,DetsData,Reas);
Else ->
Else
@@ -234,21 +241,21 @@ init_receiver(Node, Tab, Storage, Cs, Reas={dumper,add_table_copy}) ->
init_receiver(Node, Tab,Storage,Cs,Reason) ->
%% Grab a schema lock to avoid deadlock between table_loader and schema_commit dumping.
%% Both may grab tables-locks in different order.
- Load =
- fun() ->
- {_,Tid,Ts} = get(mnesia_activity_state),
+ Load =
+ fun() ->
+ {_,Tid,Ts} = get(mnesia_activity_state),
mnesia_locker:rlock(Tid, Ts#tidstore.store, {schema, Tab}),
- %% Check that table still exists
+ %% Check that table still exists
Active = val({Tab, active_replicas}),
%% Check that we havn't loaded it already
case val({Tab,where_to_read}) == node() of
true -> ok;
_ ->
- %% And that sender still got a copy
- %% (something might have happend while
+ %% And that sender still got a copy
+ %% (something might have happend while
%% we where waiting for the lock)
true = lists:member(Node, Active),
- {SenderPid, TabSize, DetsData} =
+ {SenderPid, TabSize, DetsData} =
start_remote_sender(Node,Tab,Storage),
Init = table_init_fun(SenderPid),
Args = [self(),Tab,Storage,Cs,SenderPid,
@@ -258,18 +265,18 @@ init_receiver(Node, Tab,Storage,Cs,Reason) ->
wait_on_load_complete(Pid)
end
end,
- Res =
+ Res =
case mnesia:transaction(Load, 20) of
- {atomic, {error,Result}} when
- element(1,Reason) == dumper ->
+ {atomic, {error,Result}} when
+ element(1,Reason) == dumper ->
{error,Result};
- {atomic, {error,Result}} ->
+ {atomic, {error,Result}} ->
fatal("Cannot create table ~p: ~p~n",
[[Tab, Storage], Result]);
{atomic, Result} -> Result;
{aborted, nomore} -> restart;
- {aborted, _Reas} ->
- verbose("Receiver failed on ~p from ~p:~nReason: ~p~n",
+ {aborted, _Reas} ->
+ verbose("Receiver failed on ~p from ~p:~nReason: ~p~n",
[Tab,Node,_Reas]),
down %% either this node or sender is dying
end,
@@ -279,7 +286,7 @@ init_receiver(Node, Tab,Storage,Cs,Reason) ->
start_remote_sender(Node,Tab,Storage) ->
mnesia_controller:start_remote_sender(Node, Tab, self(), Storage),
put(mnesia_table_sender_node, {Tab, Node}),
- receive
+ receive
{SenderPid, {first, TabSize}} ->
{SenderPid, TabSize, false};
{SenderPid, {first, TabSize, DetsData}} ->
@@ -291,22 +298,14 @@ start_remote_sender(Node,Tab,Storage) ->
end.
table_init_fun(SenderPid) ->
- PConv = mnesia_monitor:needs_protocol_conversion(node(SenderPid)),
- MeMyselfAndI = self(),
fun(read) ->
- Receiver =
- if
- PConv == true ->
- MeMyselfAndI ! {actual_tabrec, self()},
- MeMyselfAndI; %% Old mnesia
- PConv == false -> self()
- end,
+ Receiver = self(),
SenderPid ! {Receiver, more},
get_data(SenderPid, Receiver)
end.
%% Add_table_copy get's it's own locks.
-start_receiver(Tab,Storage,Cs,SenderPid,TabSize,DetsData,{dumper,add_table_copy}) ->
+start_receiver(Tab,Storage,Cs,SenderPid,TabSize,DetsData,{dumper,add_table_copy}) ->
Init = table_init_fun(SenderPid),
case do_init_table(Tab,Storage,Cs,SenderPid,TabSize,DetsData,self(), Init) of
Err = {error, _} ->
@@ -317,8 +316,8 @@ start_receiver(Tab,Storage,Cs,SenderPid,TabSize,DetsData,{dumper,add_table_copy}
end.
spawned_receiver(ReplyTo,Tab,Storage,Cs, SenderPid,TabSize,DetsData, Init) ->
- process_flag(trap_exit, true),
- Done = do_init_table(Tab,Storage,Cs,
+ process_flag(trap_exit, true),
+ Done = do_init_table(Tab,Storage,Cs,
SenderPid,TabSize,DetsData,
ReplyTo, Init),
ReplyTo ! {self(),Done},
@@ -327,17 +326,17 @@ spawned_receiver(ReplyTo,Tab,Storage,Cs, SenderPid,TabSize,DetsData, Init) ->
exit(normal).
wait_on_load_complete(Pid) ->
- receive
- {Pid, Res} ->
+ receive
+ {Pid, Res} ->
Res;
- {'EXIT', Pid, Reason} ->
+ {'EXIT', Pid, Reason} ->
exit(Reason);
- Else ->
+ Else ->
Pid ! Else,
wait_on_load_complete(Pid)
end.
-do_init_table(Tab,Storage,Cs,SenderPid,
+do_init_table(Tab,Storage,Cs,SenderPid,
TabSize,DetsInfo,OrigTabRec,Init) ->
case create_table(Tab, TabSize, Storage, Cs) of
{Storage,Tab} ->
@@ -345,11 +344,9 @@ do_init_table(Tab,Storage,Cs,SenderPid,
Node = node(SenderPid),
put(mnesia_table_receiver, {Tab, Node, SenderPid}),
mnesia_tm:block_tab(Tab),
- PConv = mnesia_monitor:needs_protocol_conversion(Node),
-
- case init_table(Tab,Storage,Init,PConv,DetsInfo,SenderPid) of
- ok ->
- tab_receiver(Node,Tab,Storage,Cs,PConv,OrigTabRec);
+ case init_table(Tab,Storage,Init,DetsInfo,SenderPid) of
+ ok ->
+ tab_receiver(Node,Tab,Storage,Cs,OrigTabRec);
Reason ->
Msg = "[d]ets:init table failed",
verbose("~s: ~p: ~p~n", [Msg, Tab, Reason]),
@@ -360,17 +357,21 @@ do_init_table(Tab,Storage,Cs,SenderPid,
end.
create_table(Tab, TabSize, Storage, Cs) ->
- if
+ StorageProps = val({Tab, storage_properties}),
+ if
Storage == disc_only_copies ->
mnesia_lib:lock_table(Tab),
Tmp = mnesia_lib:tab2tmp(Tab),
Size = lists:max([TabSize, 256]),
+ DetsOpts = lists:keydelete(estimated_no_objects, 1,
+ proplists:get_value(dets, StorageProps, [])),
Args = [{file, Tmp},
{keypos, 2},
%% {ram_file, true},
{estimated_no_objects, Size},
{repair, mnesia_monitor:get_env(auto_repair)},
- {type, mnesia_lib:disk_type(Tab, Cs#cstruct.type)}],
+ {type, mnesia_lib:disk_type(Tab, Cs#cstruct.type)}
+ | DetsOpts],
file:delete(Tmp),
case mnesia_lib:dets_sync_open(Tab, Args) of
{ok, _} ->
@@ -381,7 +382,8 @@ create_table(Tab, TabSize, Storage, Cs) ->
Else
end;
(Storage == ram_copies) or (Storage == disc_copies) ->
- Args = [{keypos, 2}, public, named_table, Cs#cstruct.type],
+ EtsOpts = proplists:get_value(ets, StorageProps, []),
+ Args = [{keypos, 2}, public, named_table, Cs#cstruct.type | EtsOpts],
case mnesia_monitor:unsafe_mktab(Tab, Args) of
Tab ->
{Storage, Tab};
@@ -390,54 +392,30 @@ create_table(Tab, TabSize, Storage, Cs) ->
end
end.
-tab_receiver(Node, Tab, Storage, Cs, PConv, OrigTabRec) ->
+tab_receiver(Node, Tab, Storage, Cs, OrigTabRec) ->
receive
- {SenderPid, {no_more, DatBin}} when PConv == false ->
+ {SenderPid, {no_more, DatBin}} ->
finish_copy(Storage,Tab,Cs,SenderPid,DatBin,OrigTabRec);
-
- %% Protocol conversion hack
- {SenderPid, {no_more, DatBin}} when is_pid(PConv) ->
- PConv ! {SenderPid, no_more},
- receive
- {old_init_table_complete, ok} ->
- finish_copy(Storage, Tab, Cs, SenderPid, DatBin,OrigTabRec);
- {old_init_table_complete, Reason} ->
- Msg = "OLD: [d]ets:init table failed",
- verbose("~s: ~p: ~p~n", [Msg, Tab, Reason]),
- down(Tab, Storage)
- end;
-
- {actual_tabrec, Pid} ->
- tab_receiver(Node, Tab, Storage, Cs, Pid,OrigTabRec);
-
- {SenderPid, {more, [Recs]}} when is_pid(PConv) ->
- PConv ! {SenderPid, {more, Recs}}, %% Forward Msg to OldNodes
- tab_receiver(Node, Tab, Storage, Cs, PConv,OrigTabRec);
- {'EXIT', PConv, Reason} -> %% [d]ets:init process crashed
- Msg = "Receiver crashed",
- verbose("~s: ~p: ~p~n", [Msg, Tab, Reason]),
- down(Tab, Storage);
-
%% Protocol conversion hack
{copier_done, Node} ->
verbose("Sender of table ~p crashed on node ~p ~n", [Tab, Node]),
down(Tab, Storage);
-
+
{'EXIT', Pid, Reason} ->
handle_exit(Pid, Reason),
- tab_receiver(Node, Tab, Storage, Cs, PConv,OrigTabRec)
+ tab_receiver(Node, Tab, Storage, Cs, OrigTabRec)
end.
make_table_fun(Pid, TabRec) ->
fun(close) ->
ok;
(read) ->
- get_data(Pid, TabRec)
+ get_data(Pid, TabRec)
end.
get_data(Pid, TabRec) ->
- receive
+ receive
{Pid, {more_z, CompressedRecs}} when is_binary(CompressedRecs) ->
Pid ! {TabRec, more},
{zlib_uncompress(CompressedRecs), make_table_fun(Pid,TabRec)};
@@ -448,7 +426,7 @@ get_data(Pid, TabRec) ->
end_of_input;
{copier_done, Node} ->
case node(Pid) of
- Node ->
+ Node ->
{copier_done, Node};
_ ->
get_data(Pid, TabRec)
@@ -458,13 +436,13 @@ get_data(Pid, TabRec) ->
get_data(Pid, TabRec)
end.
-init_table(Tab, disc_only_copies, Fun, false, DetsInfo,Sender) ->
+init_table(Tab, disc_only_copies, Fun, DetsInfo,Sender) ->
ErtsVer = erlang:system_info(version),
case DetsInfo of
- {ErtsVer, DetsData} ->
+ {ErtsVer, DetsData} ->
Res = (catch dets:is_compatible_bchunk_format(Tab, DetsData)),
case Res of
- {'EXIT',{undef,[{dets,_,_}|_]}} ->
+ {'EXIT',{undef,[{dets,_,_,_}|_]}} ->
Sender ! {self(), {old_protocol, Tab}},
dets:init_table(Tab, Fun); %% Old dets version
{'EXIT', What} ->
@@ -481,28 +459,19 @@ init_table(Tab, disc_only_copies, Fun, false, DetsInfo,Sender) ->
_ ->
dets:init_table(Tab, Fun)
end;
-init_table(Tab, _, Fun, false, _DetsInfo,_) ->
+init_table(Tab, _, Fun, _DetsInfo,_) ->
case catch ets:init_table(Tab, Fun) of
true ->
ok;
{'EXIT', Else} -> Else
- end;
-init_table(Tab, Storage, Fun, true, _DetsInfo, Sender) -> %% Old Nodes
- spawn_link(?MODULE, old_node_init_table,
- [Tab, Storage, Fun, self(), false, Sender]),
- ok.
+ end.
-old_node_init_table(Tab, Storage, Fun, TabReceiver, DetsInfo,Sender) ->
- Res = init_table(Tab, Storage, Fun, false, DetsInfo,Sender),
- TabReceiver ! {old_init_table_complete, Res},
- unlink(TabReceiver),
- ok.
finish_copy(Storage,Tab,Cs,SenderPid,DatBin,OrigTabRec) ->
TabRef = {Storage, Tab},
subscr_receiver(TabRef, Cs#cstruct.record_name),
case handle_last(TabRef, Cs#cstruct.type, DatBin) of
- ok ->
+ ok ->
mnesia_index:init_index(Tab, Storage),
snmpify(Tab, Storage),
%% OrigTabRec must not be the spawned tab-receiver
@@ -534,7 +503,7 @@ subscr_receiver(TabRef = {_, Tab}, RecName) ->
ok
end.
-handle_event(TabRef, write, Rec) ->
+handle_event(TabRef, write, Rec) ->
db_put(TabRef, Rec);
handle_event(TabRef, delete, {_Tab, Key}) ->
db_erase(TabRef, Key);
@@ -545,8 +514,8 @@ handle_event(TabRef, clear_table, {_Tab, _Key}) ->
handle_last({disc_copies, Tab}, _Type, nobin) ->
Ret = mnesia_log:ets2dcd(Tab),
- Fname = mnesia_lib:tab2dat(Tab),
- case mnesia_lib:exists(Fname) of
+ Fname = mnesia_lib:tab2dat(Tab),
+ case mnesia_lib:exists(Fname) of
true -> %% Remove old .DAT files.
file:delete(Fname);
false ->
@@ -560,10 +529,13 @@ handle_last({disc_only_copies, Tab}, Type, nobin) ->
Dat = mnesia_lib:tab2dat(Tab),
case file:rename(Tmp, Dat) of
ok ->
+ StorageProps = val({Tab, storage_properties}),
+ DetsOpts = proplists:get_value(dets, StorageProps, []),
+
Args = [{file, mnesia_lib:tab2dat(Tab)},
{type, mnesia_lib:disk_type(Tab, Type)},
{keypos, 2},
- {repair, mnesia_monitor:get_env(auto_repair)}],
+ {repair, mnesia_monitor:get_env(auto_repair)} | DetsOpts],
mnesia_monitor:open_dets(Tab, Args),
ok;
{error, Reason} ->
@@ -653,31 +625,29 @@ send_table(Pid, Tab, RemoteS) ->
{error, {no_exists, Tab}};
Storage ->
%% Send first
- TabSize = mnesia:table_info(Tab, size),
- Pconvert = mnesia_monitor:needs_protocol_conversion(node(Pid)),
+ TabSize = mnesia:table_info(Tab, size),
KeysPerTransfer = calc_nokeys(Storage, Tab),
ChunkData = dets:info(Tab, bchunk_format),
- UseDetsChunk =
- Storage == RemoteS andalso
- Storage == disc_only_copies andalso
- ChunkData /= undefined andalso
- Pconvert == false,
- if
+ UseDetsChunk =
+ Storage == RemoteS andalso
+ Storage == disc_only_copies andalso
+ ChunkData /= undefined,
+ if
UseDetsChunk == true ->
DetsInfo = erlang:system_info(version),
Pid ! {self(), {first, TabSize, {DetsInfo, ChunkData}}};
true ->
Pid ! {self(), {first, TabSize}}
end,
-
+
%% Debug info
put(mnesia_table_sender, {Tab, node(Pid), Pid}),
{Init, Chunk} = reader_funcs(UseDetsChunk, Tab, Storage, KeysPerTransfer),
-
+
SendIt = fun() ->
prepare_copy(Pid, Tab, Storage),
- send_more(Pid, 1, Chunk, Init(), Tab, Pconvert),
+ send_more(Pid, 1, Chunk, Init(), Tab),
finish_copy(Pid, Tab, Storage, RemoteS)
end,
@@ -698,7 +668,7 @@ send_table(Pid, Tab, RemoteS) ->
{error, Reason}
end
end.
-
+
prepare_copy(Pid, Tab, Storage) ->
Trans =
fun() ->
@@ -717,11 +687,11 @@ prepare_copy(Pid, Tab, Storage) ->
update_where_to_write(Tab, Node) ->
case val({Tab, access_mode}) of
- read_only ->
+ read_only ->
ignore;
- read_write ->
+ read_write ->
Current = val({current, db_nodes}),
- Ns =
+ Ns =
case lists:member(Node, Current) of
true -> Current;
false -> [Node | Current]
@@ -729,27 +699,27 @@ update_where_to_write(Tab, Node) ->
update_where_to_write(Ns, Tab, Node)
end.
-update_where_to_write([], _, _) ->
+update_where_to_write([], _, _) ->
ok;
update_where_to_write([H|T], Tab, AddNode) ->
- rpc:call(H, mnesia_controller, call,
+ rpc:call(H, mnesia_controller, call,
[{update_where_to_write, [add, Tab, AddNode], self()}]),
update_where_to_write(T, Tab, AddNode).
-send_more(Pid, N, Chunk, DataState, Tab, OldNode) ->
+send_more(Pid, N, Chunk, DataState, Tab) ->
receive
{NewPid, more} ->
- case send_packet(N - 1, NewPid, Chunk, DataState, OldNode) of
- New when is_integer(New) ->
+ case send_packet(N - 1, NewPid, Chunk, DataState) of
+ New when is_integer(New) ->
New - 1;
NewData ->
- send_more(NewPid, ?MAX_NOPACKETS, Chunk, NewData, Tab, OldNode)
+ send_more(NewPid, ?MAX_NOPACKETS, Chunk, NewData, Tab)
end;
{_NewPid, {old_protocol, Tab}} ->
Storage = val({Tab, storage_type}),
- {Init, NewChunk} =
+ {Init, NewChunk} =
reader_funcs(false, Tab, Storage, calc_nokeys(Storage, Tab)),
- send_more(Pid, 1, NewChunk, Init(), Tab, OldNode);
+ send_more(Pid, 1, NewChunk, Init(), Tab);
{copier_done, Node} when Node == node(Pid)->
verbose("Receiver of table ~p crashed on ~p (more)~n", [Tab, Node]),
@@ -770,7 +740,7 @@ dets_bchunk(Tab, Chunk) -> %% Arrg
case dets:bchunk(Tab, Chunk) of
{Cont, Data} -> {Data, Cont};
Else -> Else
- end.
+ end.
zlib_compress(Data, Level) ->
BinData = term_to_binary(Data),
@@ -793,28 +763,20 @@ compression_level() ->
Val -> Val
end.
-send_packet(N, Pid, _Chunk, '$end_of_table', OldNode) ->
- case OldNode of
- true -> ignore; %% Old nodes can't handle the new no_more
- false -> Pid ! {self(), no_more}
- end,
+send_packet(N, Pid, _Chunk, '$end_of_table') ->
+ Pid ! {self(), no_more},
N;
-send_packet(N, Pid, Chunk, {[], Cont}, OldNode) ->
- send_packet(N, Pid, Chunk, Chunk(Cont), OldNode);
-send_packet(N, Pid, Chunk, {Recs, Cont}, OldNode) when N < ?MAX_NOPACKETS ->
- case OldNode of
- true ->
- Pid ! {self(), {more, [Recs]}}; %% Old need's wrapping list
- false ->
- case compression_level() of
- 0 ->
- Pid ! {self(), {more, Recs}};
- Level ->
- Pid ! {self(), {more_z, zlib_compress(Recs, Level)}}
- end
+send_packet(N, Pid, Chunk, {[], Cont}) ->
+ send_packet(N, Pid, Chunk, Chunk(Cont));
+send_packet(N, Pid, Chunk, {Recs, Cont}) when N < ?MAX_NOPACKETS ->
+ case compression_level() of
+ 0 ->
+ Pid ! {self(), {more, Recs}};
+ Level ->
+ Pid ! {self(), {more_z, zlib_compress(Recs, Level)}}
end,
- send_packet(N+1, Pid, Chunk, Chunk(Cont), OldNode);
-send_packet(_N, _Pid, _Chunk, DataState, _OldNode) ->
+ send_packet(N+1, Pid, Chunk, Chunk(Cont));
+send_packet(_N, _Pid, _Chunk, DataState) ->
DataState.
finish_copy(Pid, Tab, Storage, RemoteS) ->
@@ -855,5 +817,5 @@ dat2bin(_Tab, _LocalS, _RemoteS) ->
handle_exit(Pid, Reason) when node(Pid) == node() ->
exit(Reason);
-handle_exit(_Pid, _Reason) -> %% Not from our node, this will be handled by
+handle_exit(_Pid, _Reason) -> %% Not from our node, this will be handled by
ignore. %% mnesia_down soon.
diff --git a/lib/mnesia/src/mnesia_locker.erl b/lib/mnesia/src/mnesia_locker.erl
index 0492d794f3..a22c95d454 100644
--- a/lib/mnesia/src/mnesia_locker.erl
+++ b/lib/mnesia/src/mnesia_locker.erl
@@ -1,19 +1,19 @@
%%
%% %CopyrightBegin%
-%%
-%% Copyright Ericsson AB 1996-2011. All Rights Reserved.
-%%
+%%
+%% Copyright Ericsson AB 1996-2012. All Rights Reserved.
+%%
%% The contents of this file are subject to the Erlang Public License,
%% Version 1.1, (the "License"); you may not use this file except in
%% compliance with the License. You should have received a copy of the
%% Erlang Public License along with this software. If not, it can be
%% retrieved online at http://www.erlang.org/.
-%%
+%%
%% Software distributed under the License is distributed on an "AS IS"
%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
%% the License for the specific language governing rights and limitations
%% under the License.
-%%
+%%
%% %CopyrightEnd%
%%
@@ -66,14 +66,14 @@
-record(queue, {oid, tid, op, pid, lucky}).
-%% mnesia_held_locks: contain {Oid, Op, Tid} entries (bag)
+%% mnesia_held_locks: contain {Oid, MaxLock, [{Op, Tid}]} entries
-define(match_oid_held_locks(Oid), {Oid, '_', '_'}).
-%% mnesia_tid_locks: contain {Tid, Oid, Op} entries (bag)
+%% mnesia_tid_locks: contain {Tid, Oid, Op} entries (bag)
-define(match_oid_tid_locks(Tid), {Tid, '_', '_'}).
%% mnesia_sticky_locks: contain {Oid, Node} entries and {Tab, Node} entries (set)
-define(match_oid_sticky_locks(Oid),{Oid, '_'}).
%% mnesia_lock_queue: contain {queue, Oid, Tid, Op, ReplyTo, WaitForTid} entries (bag)
--define(match_oid_lock_queue(Oid), #queue{oid=Oid, tid='_', op = '_', pid = '_', lucky = '_'}).
+-define(match_oid_lock_queue(Oid), #queue{oid=Oid, tid='_', op = '_', pid = '_', lucky = '_'}).
%% mnesia_lock_counter: {{write, Tab}, Number} &&
%% {{read, Tab}, Number} entries (set)
@@ -83,11 +83,11 @@ start() ->
init(Parent) ->
register(?MODULE, self()),
process_flag(trap_exit, true),
- ?ets_new_table(mnesia_held_locks, [bag, private, named_table]),
+ ?ets_new_table(mnesia_held_locks, [ordered_set, private, named_table]),
?ets_new_table(mnesia_tid_locks, [bag, private, named_table]),
?ets_new_table(mnesia_sticky_locks, [set, private, named_table]),
?ets_new_table(mnesia_lock_queue, [bag, private, named_table, {keypos, 2}]),
-
+
proc_lib:init_ack(Parent, {ok, self()}),
case ?catch_val(pid_sort_order) of
r9b_plain -> put(pid_sort_order, r9b_plain);
@@ -98,8 +98,8 @@ init(Parent) ->
val(Var) ->
case ?catch_val(Var) of
- {'EXIT', _ReASoN_} -> mnesia_lib:other_val(Var, _ReASoN_);
- _VaLuE_ -> _VaLuE_
+ {'EXIT', _ReASoN_} -> mnesia_lib:other_val(Var, _ReASoN_);
+ _VaLuE_ -> _VaLuE_
end.
reply(From, R) ->
@@ -111,10 +111,10 @@ l_request(Node, X, Store) ->
l_req_rec(Node, Store) ->
?ets_insert(Store, {nodes, Node}),
- receive
- {?MODULE, Node, Reply} ->
+ receive
+ {?MODULE, Node, Reply} ->
Reply;
- {mnesia_down, Node} ->
+ {mnesia_down, Node} ->
{not_granted, {node_not_running, Node}}
end.
@@ -128,10 +128,10 @@ send_release_tid(Nodes, Tid) ->
rpc:abcast(Nodes, ?MODULE, {self(), {sync_release_tid, Tid}}).
receive_release_tid_acc([Node | Nodes], Tid) ->
- receive
- {?MODULE, Node, {tid_released, Tid}} ->
+ receive
+ {?MODULE, Node, {tid_released, Tid}} ->
receive_release_tid_acc(Nodes, Tid);
- {mnesia_down, Node} ->
+ {mnesia_down, Node} ->
receive_release_tid_acc(Nodes, Tid)
end;
receive_release_tid_acc([], _Tid) ->
@@ -152,27 +152,27 @@ loop(State) ->
%% Really do a read, but get hold of a write lock
%% used by mnesia:wread(Oid).
-
+
{From, {read_write, Tid, Oid}} ->
try_sticky_lock(Tid, read_write, From, Oid),
loop(State);
-
+
%% Tid has somehow terminated, clear up everything
%% and pass locks on to queued processes.
%% This is the purpose of the mnesia_tid_locks table
-
+
{release_tid, Tid} ->
do_release_tid(Tid),
loop(State);
-
+
%% stick lock, first tries this to the where_to_read Node
{From, {test_set_sticky, Tid, {Tab, _} = Oid, Lock}} ->
case ?ets_lookup(mnesia_sticky_locks, Tab) of
- [] ->
+ [] ->
reply(From, not_stuck),
loop(State);
[{_,Node}] when Node == node() ->
- %% Lock is stuck here, see now if we can just set
+ %% Lock is stuck here, see now if we can just set
%% a regular write lock
try_lock(Tid, Lock, From, Oid),
loop(State);
@@ -188,7 +188,7 @@ loop(State) ->
?ets_insert(mnesia_sticky_locks, {Tab, N}),
loop(State);
- %% The caller which sends this message, must have first
+ %% The caller which sends this message, must have first
%% aquired a write lock on the entire table
{unstick, Tab} ->
?ets_delete(mnesia_sticky_locks, Tab),
@@ -205,14 +205,14 @@ loop(State) ->
[{_,N}] ->
Req = {From, {ix_read, Tid, Tab, IxKey, Pos}},
From ! {?MODULE, node(), {switch, N, Req}},
- loop(State)
+ loop(State)
end;
{From, {sync_release_tid, Tid}} ->
do_release_tid(Tid),
reply(From, {tid_released, Tid}),
loop(State);
-
+
{release_remote_non_pending, Node, Pending} ->
release_remote_non_pending(Node, Pending),
mnesia_monitor:mnesia_down(?MODULE, Node),
@@ -229,16 +229,23 @@ loop(State) ->
{get_table, From, LockTable} ->
From ! {LockTable, ?ets_match_object(LockTable, '_')},
loop(State);
-
+
Msg ->
error("~p got unexpected message: ~p~n", [?MODULE, Msg]),
loop(State)
end.
-set_lock(Tid, Oid, Op) ->
- ?dbg("Granted ~p ~p ~p~n", [Tid,Oid,Op]),
- ?ets_insert(mnesia_held_locks, {Oid, Op, Tid}),
- ?ets_insert(mnesia_tid_locks, {Tid, Oid, Op}).
+set_lock(Tid, Oid, Op, []) ->
+ ?ets_insert(mnesia_tid_locks, {Tid, Oid, Op}),
+ ?ets_insert(mnesia_held_locks, {Oid, Op, [{Op, Tid}]});
+set_lock(Tid, Oid, read, [{Oid, Prev, Items}]) ->
+ ?ets_insert(mnesia_tid_locks, {Tid, Oid, read}),
+ ?ets_insert(mnesia_held_locks, {Oid, Prev, [{read, Tid}|Items]});
+set_lock(Tid, Oid, write, [{Oid, _Prev, Items}]) ->
+ ?ets_insert(mnesia_tid_locks, {Tid, Oid, write}),
+ ?ets_insert(mnesia_held_locks, {Oid, write, [{write, Tid}|Items]});
+set_lock(Tid, Oid, Op, undefined) ->
+ set_lock(Tid, Oid, Op, ?ets_lookup(mnesia_held_locks, Oid)).
%%%%%%%%%%%%%%%%%%%%%%%%%%%
%% Acquire locks
@@ -261,32 +268,32 @@ try_lock(Tid, Op, Pid, Oid) ->
try_lock(Tid, Op, SimpleOp, Lock, Pid, Oid) ->
case can_lock(Tid, Lock, Oid, {no, bad_luck}) of
- yes ->
- Reply = grant_lock(Tid, SimpleOp, Lock, Oid),
+ {yes, Default} ->
+ Reply = grant_lock(Tid, SimpleOp, Lock, Oid, Default),
reply(Pid, Reply);
- {no, Lucky} ->
+ {{no, Lucky},_} ->
C = #cyclic{op = SimpleOp, lock = Lock, oid = Oid, lucky = Lucky},
?dbg("Rejected ~p ~p ~p ~p ~n", [Tid, Oid, Lock, Lucky]),
reply(Pid, {not_granted, C});
- {queue, Lucky} ->
+ {{queue, Lucky},_} ->
?dbg("Queued ~p ~p ~p ~p ~n", [Tid, Oid, Lock, Lucky]),
%% Append to queue: Nice place for trace output
- ?ets_insert(mnesia_lock_queue,
- #queue{oid = Oid, tid = Tid, op = Op,
+ ?ets_insert(mnesia_lock_queue,
+ #queue{oid = Oid, tid = Tid, op = Op,
pid = Pid, lucky = Lucky}),
?ets_insert(mnesia_tid_locks, {Tid, Oid, {queued, Op}})
end.
-grant_lock(Tid, read, Lock, Oid = {Tab, Key})
+grant_lock(Tid, read, Lock, Oid = {Tab, Key}, Default)
when Key /= ?ALL, Tab /= ?GLOBAL ->
case node(Tid#tid.pid) == node() of
true ->
- set_lock(Tid, Oid, Lock),
+ set_lock(Tid, Oid, Lock, Default),
{granted, lookup_in_client};
false ->
try
Val = mnesia_lib:db_get(Tab, Key), %% lookup as well
- set_lock(Tid, Oid, Lock),
+ set_lock(Tid, Oid, Lock, Default),
{granted, Val}
catch _:_Reason ->
%% Table has been deleted from this node,
@@ -296,87 +303,71 @@ grant_lock(Tid, read, Lock, Oid = {Tab, Key})
{not_granted, C}
end
end;
-grant_lock(Tid, {ix_read,IxKey,Pos}, Lock, Oid = {Tab, _}) ->
+grant_lock(Tid, {ix_read,IxKey,Pos}, Lock, Oid = {Tab, _}, Default) ->
try
Res = ix_read_res(Tab, IxKey,Pos),
- set_lock(Tid, Oid, Lock),
+ set_lock(Tid, Oid, Lock, Default),
{granted, Res, [?ALL]}
catch _:_ ->
{not_granted, {no_exists, Tab, {index, [Pos]}}}
end;
-grant_lock(Tid, read, Lock, Oid) ->
- set_lock(Tid, Oid, Lock),
+grant_lock(Tid, read, Lock, Oid, Default) ->
+ set_lock(Tid, Oid, Lock, Default),
{granted, ok};
-grant_lock(Tid, write, Lock, Oid) ->
- set_lock(Tid, Oid, Lock),
+grant_lock(Tid, write, Lock, Oid, Default) ->
+ set_lock(Tid, Oid, Lock, Default),
granted.
%% 1) Impose an ordering on all transactions favour old (low tid) transactions
%% newer (higher tid) transactions may never wait on older ones,
%% 2) When releasing the tids from the queue always begin with youngest (high tid)
%% because of 1) it will avoid the deadlocks.
-%% 3) TabLocks is the problem :-) They should not starve and not deadlock
+%% 3) TabLocks is the problem :-) They should not starve and not deadlock
%% handle tablocks in queue as they had locks on unlocked records.
-can_lock(Tid, read, {Tab, Key}, AlreadyQ) when Key /= ?ALL ->
- %% The key is bound, no need for the other BIF
- Oid = {Tab, Key},
- ObjLocks = ?ets_match_object(mnesia_held_locks, {Oid, write, '_'}),
- TabLocks = ?ets_match_object(mnesia_held_locks, {{Tab, ?ALL}, write, '_'}),
- check_lock(Tid, Oid, ObjLocks, TabLocks, yes, AlreadyQ, read);
+can_lock(Tid, read, Oid = {Tab, Key}, AlreadyQ) when Key /= ?ALL ->
+ ObjLocks = ?ets_lookup(mnesia_held_locks, Oid),
+ TabLocks = ?ets_lookup(mnesia_held_locks, {Tab, ?ALL}),
+ {check_lock(Tid, Oid,
+ filter_write(ObjLocks),
+ filter_write(TabLocks),
+ yes, AlreadyQ, read),
+ ObjLocks};
can_lock(Tid, read, Oid, AlreadyQ) -> % Whole tab
Tab = element(1, Oid),
ObjLocks = ?ets_match_object(mnesia_held_locks, {{Tab, '_'}, write, '_'}),
- check_lock(Tid, Oid, ObjLocks, [], yes, AlreadyQ, read);
+ {check_lock(Tid, Oid, ObjLocks, [], yes, AlreadyQ, read), undefined};
-can_lock(Tid, write, {Tab, Key}, AlreadyQ) when Key /= ?ALL ->
- Oid = {Tab, Key},
+can_lock(Tid, write, Oid = {Tab, Key}, AlreadyQ) when Key /= ?ALL ->
ObjLocks = ?ets_lookup(mnesia_held_locks, Oid),
TabLocks = ?ets_lookup(mnesia_held_locks, {Tab, ?ALL}),
- check_lock(Tid, Oid, ObjLocks, TabLocks, yes, AlreadyQ, write);
+ {check_lock(Tid, Oid, ObjLocks, TabLocks, yes, AlreadyQ, write), ObjLocks};
can_lock(Tid, write, Oid, AlreadyQ) -> % Whole tab
Tab = element(1, Oid),
ObjLocks = ?ets_match_object(mnesia_held_locks, ?match_oid_held_locks({Tab, '_'})),
- check_lock(Tid, Oid, ObjLocks, [], yes, AlreadyQ, write).
+ {check_lock(Tid, Oid, ObjLocks, [], yes, AlreadyQ, write), undefined}.
+
+filter_write([{_, read, _}]) -> [];
+filter_write(Res) -> Res.
%% Check held locks for conflicting locks
-check_lock(Tid, Oid, [Lock | Locks], TabLocks, X, AlreadyQ, Type) ->
- case element(3, Lock) of
- Tid ->
- check_lock(Tid, Oid, Locks, TabLocks, X, AlreadyQ, Type);
- WaitForTid ->
- Queue = allowed_to_be_queued(WaitForTid,Tid),
- if Queue == true ->
- check_lock(Tid, Oid, Locks, TabLocks, {queue, WaitForTid}, AlreadyQ, Type);
- Tid#tid.pid == WaitForTid#tid.pid ->
- dbg_out("Spurious lock conflict ~w ~w: ~w -> ~w~n",
- [Oid, Lock, Tid, WaitForTid]),
- %% Test..
- {Tab, _Key} = Oid,
- HaveQ = (ets:lookup(mnesia_lock_queue, Oid) /= [])
- orelse (ets:lookup(mnesia_lock_queue,{Tab,?ALL}) /= []),
- if
- HaveQ ->
- {no, WaitForTid};
- true ->
- check_lock(Tid,Oid,Locks,TabLocks,{queue,WaitForTid},AlreadyQ,Type)
- end;
- %%{no, WaitForTid}; Safe solution
- true ->
- {no, WaitForTid}
- end
+check_lock(Tid, Oid, [{_, _, Lock} | Locks], TabLocks, _X, AlreadyQ, Type) ->
+ case can_queue(Lock, Tid, Oid, _X) of
+ {no, _} = Res ->
+ Res;
+ Res ->
+ check_lock(Tid, Oid, Locks, TabLocks, Res, AlreadyQ, Type)
end;
check_lock(_, _, [], [], X, {queue, bad_luck}, _) ->
X; %% The queue should be correct already no need to check it again
check_lock(_, _, [], [], X = {queue, _Tid}, _AlreadyQ, _) ->
- X;
+ X;
-check_lock(Tid, Oid, [], [], X, AlreadyQ, Type) ->
- {Tab, Key} = Oid,
+check_lock(Tid, Oid = {Tab, Key}, [], [], X, AlreadyQ, Type) ->
if
Type == write ->
check_queue(Tid, Tab, X, AlreadyQ);
@@ -387,7 +378,7 @@ check_lock(Tid, Oid, [], [], X, AlreadyQ, Type) ->
%% If there is a queue on that object, read_lock shouldn't be granted
ObjLocks = ets:lookup(mnesia_lock_queue, Oid),
case max(ObjLocks) of
- empty ->
+ empty ->
check_queue(Tid, Tab, X, AlreadyQ);
ObjL ->
case allowed_to_be_queued(ObjL,Tid) of
@@ -403,16 +394,36 @@ check_lock(Tid, Oid, [], [], X, AlreadyQ, Type) ->
check_lock(Tid, Oid, [], TabLocks, X, AlreadyQ, Type) ->
check_lock(Tid, Oid, TabLocks, [], X, AlreadyQ, Type).
+can_queue([{_Op, Tid}|Locks], Tid, Oid, Res) ->
+ can_queue(Locks, Tid, Oid, Res);
+can_queue([{Op, WaitForTid}|Locks], Tid, Oid = {Tab, _}, _) ->
+ case allowed_to_be_queued(WaitForTid,Tid) of
+ true when Tid#tid.pid == WaitForTid#tid.pid ->
+ dbg_out("Spurious lock conflict ~w ~w: ~w -> ~w~n",
+ [Oid, Op, Tid, WaitForTid]),
+ HaveQ = (ets:lookup(mnesia_lock_queue, Oid) /= [])
+ orelse (ets:lookup(mnesia_lock_queue,{Tab,?ALL}) /= []),
+ case HaveQ of
+ true -> {no, WaitForTid};
+ false -> can_queue(Locks, Tid, Oid, {queue, WaitForTid})
+ end;
+ true ->
+ can_queue(Locks, Tid, Oid, {queue, WaitForTid});
+ false ->
+ {no, WaitForTid}
+ end;
+can_queue([], _, _, Res) -> Res.
+
%% True if WaitForTid > Tid -> % Important order
allowed_to_be_queued(WaitForTid, Tid) ->
case get(pid_sort_order) of
undefined -> WaitForTid > Tid;
- r9b_plain ->
+ r9b_plain ->
cmp_tid(true, WaitForTid, Tid) =:= 1;
- standard ->
+ standard ->
cmp_tid(false, WaitForTid, Tid) =:= 1
- end.
-
+ end.
+
%% Check queue for conflicting locks
%% Assume that all queued locks belongs to other tid's
@@ -421,25 +432,25 @@ check_queue(Tid, Tab, X, AlreadyQ) ->
Greatest = max(TabLocks),
case Greatest of
empty -> X;
- Tid -> X;
- WaitForTid ->
+ Tid -> X;
+ WaitForTid ->
case allowed_to_be_queued(WaitForTid,Tid) of
true ->
{queue, WaitForTid};
- false when AlreadyQ =:= {no, bad_luck} ->
+ false when AlreadyQ =:= {no, bad_luck} ->
{no, WaitForTid}
end
end.
sort_queue(QL) ->
case get(pid_sort_order) of
- undefined ->
+ undefined ->
lists:reverse(lists:keysort(#queue.tid, QL));
- r9b_plain ->
- lists:sort(fun(#queue{tid=X},#queue{tid=Y}) ->
+ r9b_plain ->
+ lists:sort(fun(#queue{tid=X},#queue{tid=Y}) ->
cmp_tid(true, X, Y) == 1
end, QL);
- standard ->
+ standard ->
lists:sort(fun(#queue{tid=X},#queue{tid=Y}) ->
cmp_tid(false, X, Y) == 1
end, QL)
@@ -456,22 +467,22 @@ set_read_lock_on_all_keys(Tid, From, Tab, IxKey, Pos) ->
Op = {ix_read,IxKey, Pos},
Lock = read,
case can_lock(Tid, Lock, Oid, {no, bad_luck}) of
- yes ->
- Reply = grant_lock(Tid, Op, Lock, Oid),
+ {yes, Default} ->
+ Reply = grant_lock(Tid, Op, Lock, Oid, Default),
reply(From, Reply);
- {no, Lucky} ->
+ {{no, Lucky},_} ->
C = #cyclic{op = Op, lock = Lock, oid = Oid, lucky = Lucky},
?dbg("Rejected ~p ~p ~p ~p ~n", [Tid, Oid, Lock, Lucky]),
reply(From, {not_granted, C});
- {queue, Lucky} ->
+ {{queue, Lucky},_} ->
?dbg("Queued ~p ~p ~p ~p ~n", [Tid, Oid, Lock, Lucky]),
%% Append to queue: Nice place for trace output
- ?ets_insert(mnesia_lock_queue,
- #queue{oid = Oid, tid = Tid, op = Op,
+ ?ets_insert(mnesia_lock_queue,
+ #queue{oid = Oid, tid = Tid, op = Op,
pid = From, lucky = Lucky}),
?ets_insert(mnesia_tid_locks, {Tid, Oid, {queued, Op}})
end.
-
+
%%%%%%%%%%%%%%%%%%%%%%%%%%%
%% Release of locks
@@ -520,30 +531,40 @@ release_locks([]) ->
release_lock({Tid, Oid, {queued, _}}) ->
?ets_match_delete(mnesia_lock_queue, #queue{oid=Oid, tid = Tid, op = '_',
pid = '_', lucky = '_'});
-release_lock({Tid, Oid, Op}) ->
- if
- Op == write ->
- ?ets_delete(mnesia_held_locks, Oid);
- Op == read ->
- ets:delete_object(mnesia_held_locks, {Oid, Op, Tid})
+release_lock({_Tid, Oid, write}) ->
+ ?ets_delete(mnesia_held_locks, Oid);
+release_lock({Tid, Oid, read}) ->
+ case ?ets_lookup(mnesia_held_locks, Oid) of
+ [{Oid, Prev, Locks0}] ->
+ case remove_tid(Locks0, Tid, []) of
+ [] -> ?ets_delete(mnesia_held_locks, Oid);
+ Locks -> ?ets_insert(mnesia_held_locks, {Oid, Prev, Locks})
+ end;
+ [] -> ok
end.
+remove_tid([{_Op, Tid}|Ls], Tid, Acc) ->
+ remove_tid(Ls,Tid, Acc);
+remove_tid([Keep|Ls], Tid, Acc) ->
+ remove_tid(Ls,Tid, [Keep|Acc]);
+remove_tid([], _, Acc) -> Acc.
+
rearrange_queue([{_Tid, {Tab, Key}, _} | Locks]) ->
if
- Key /= ?ALL->
- Queue =
- ets:lookup(mnesia_lock_queue, {Tab, ?ALL}) ++
+ Key /= ?ALL->
+ Queue =
+ ets:lookup(mnesia_lock_queue, {Tab, ?ALL}) ++
ets:lookup(mnesia_lock_queue, {Tab, Key}),
- case Queue of
- [] ->
+ case Queue of
+ [] ->
ok;
_ ->
Sorted = sort_queue(Queue),
try_waiters_obj(Sorted)
- end;
- true ->
+ end;
+ true ->
Pat = ?match_oid_lock_queue({Tab, '_'}),
- Queue = ?ets_match_object(mnesia_lock_queue, Pat),
+ Queue = ?ets_match_object(mnesia_lock_queue, Pat),
Sorted = sort_queue(Queue),
try_waiters_tab(Sorted)
end,
@@ -556,7 +577,7 @@ try_waiters_obj([W | Waiters]) ->
case try_waiter(W) of
queued ->
no;
- _ ->
+ _ ->
try_waiters_obj(Waiters)
end;
try_waiters_obj([]) ->
@@ -573,10 +594,10 @@ try_waiters_tab([W | Waiters]) ->
end;
Oid ->
case try_waiter(W) of
- queued ->
+ queued ->
Rest = key_delete_all(Oid, #queue.oid, Waiters),
try_waiters_tab(Rest);
- _ ->
+ _ ->
try_waiters_tab(Waiters)
end
end;
@@ -592,22 +613,22 @@ try_waiter({queue, Oid, Tid, Op, ReplyTo, _}) ->
try_waiter(Oid, Op, SimpleOp, Lock, ReplyTo, Tid) ->
case can_lock(Tid, Lock, Oid, {queue, bad_luck}) of
- yes ->
+ {yes, Default} ->
%% Delete from queue: Nice place for trace output
- ?ets_match_delete(mnesia_lock_queue,
+ ?ets_match_delete(mnesia_lock_queue,
#queue{oid=Oid, tid = Tid, op = Op,
pid = ReplyTo, lucky = '_'}),
- Reply = grant_lock(Tid, SimpleOp, Lock, Oid),
+ Reply = grant_lock(Tid, SimpleOp, Lock, Oid, Default),
reply(ReplyTo,Reply),
locked;
- {queue, _Why} ->
+ {{queue, _Why}, _} ->
?dbg("Keep ~p ~p ~p ~p~n", [Tid, Oid, Lock, _Why]),
- queued; % Keep waiter in queue
- {no, Lucky} ->
+ queued; % Keep waiter in queue
+ {{no, Lucky}, _} ->
C = #cyclic{op = SimpleOp, lock = Lock, oid = Oid, lucky = Lucky},
verbose("** WARNING ** Restarted transaction, possible deadlock in lock queue ~w: cyclic = ~w~n",
[Tid, C]),
- ?ets_match_delete(mnesia_lock_queue,
+ ?ets_match_delete(mnesia_lock_queue,
#queue{oid=Oid, tid = Tid, op = Op,
pid = ReplyTo, lucky = '_'}),
Reply = {not_granted, C},
@@ -645,7 +666,7 @@ mnesia_down(N, Pending) ->
Pid ! {release_remote_non_pending, N, Pending}
end.
-%% Aquire a write lock, but do a read, used by
+%% Aquire a write lock, but do a read, used by
%% mnesia:wread/1
rwlock(Tid, Store, Oid) ->
@@ -657,9 +678,10 @@ rwlock(Tid, Store, Oid) ->
Lock = write,
case need_lock(Store, Tab, Key, Lock) of
yes ->
- {Ns, Majority} = w_nodes(Tab),
+ {Ns0, Majority} = w_nodes(Tab),
+ Ns = [Node|lists:delete(Node,Ns0)],
check_majority(Majority, Tab, Ns),
- Res = get_rwlocks_on_nodes(Ns, rwlock, Node, Store, Tid, Oid),
+ Res = get_rwlocks_on_nodes(Ns, make_ref(), Store, Tid, Oid),
?ets_insert(Store, {{locks, Tab, Key}, Lock}),
Res;
no ->
@@ -718,7 +740,7 @@ sticky_rwlock(Tid, Store, Oid) ->
sticky_lock(Tid, Store, Oid, read_write).
sticky_lock(Tid, Store, {Tab, Key} = Oid, Lock) ->
- N = val({Tab, where_to_read}),
+ N = val({Tab, where_to_read}),
if
node() == N ->
case need_lock(Store, Tab, Key, write) of
@@ -805,9 +827,9 @@ sticky_wlock_table(Tid, Store, Tab) ->
%% aquire a wlock on Oid
%% We store a {Tabname, write, Tid} in all locktables
%% on all nodes containing a copy of Tabname
-%% We also store an item {{locks, Tab, Key}, write} in the
+%% We also store an item {{locks, Tab, Key}, write} in the
%% local store when we have aquired the lock.
-%%
+%%
wlock(Tid, Store, Oid) ->
wlock(Tid, Store, Oid, _CheckMajority = true).
@@ -845,10 +867,10 @@ wlock_no_exist(Tid, Store, Tab, Ns) ->
need_lock(Store, Tab, Key, LockPattern) ->
TabL = ?ets_match_object(Store, {{locks, Tab, ?ALL}, LockPattern}),
- if
+ if
TabL == [] ->
KeyL = ?ets_match_object(Store, {{locks, Tab, Key}, LockPattern}),
- if
+ if
KeyL == [] ->
yes;
true ->
@@ -865,7 +887,7 @@ del_debug() ->
erase(mnesia_wlock_nodes).
%% We first send lock request to the local node if it is part of the lockers
-%% then the first sorted node then to the rest of the lockmanagers on all
+%% then the first sorted node then to the rest of the lockmanagers on all
%% nodes holding a copy of the table
get_wlocks_on_nodes([Node | Tail], Orig, Store, Request, Oid) ->
@@ -875,51 +897,31 @@ get_wlocks_on_nodes([Node | Tail], Orig, Store, Request, Oid) ->
case node() of
Node -> %% Local done try one more
get_wlocks_on_nodes(Tail, Orig, Store, Request, Oid);
- _ -> %% The first succeded cont with the rest
+ _ -> %% The first succeded cont with the rest
get_wlocks_on_nodes(Tail, Store, Request),
receive_wlocks(Tail, Orig, Store, Oid)
end;
-get_wlocks_on_nodes([], Orig, _Store, _Request, _Oid) ->
+get_wlocks_on_nodes([], Orig, _Store, _Request, _Oid) ->
Orig.
get_wlocks_on_nodes([Node | Tail], Store, Request) ->
{?MODULE, Node} ! Request,
?ets_insert(Store,{nodes, Node}),
get_wlocks_on_nodes(Tail, Store, Request);
-get_wlocks_on_nodes([], _, _) ->
+get_wlocks_on_nodes([], _, _) ->
ok.
-get_rwlocks_on_nodes([ReadNode|Tail], _Res, ReadNode, Store, Tid, Oid) ->
+get_rwlocks_on_nodes([ReadNode|Tail], Ref, Store, Tid, Oid) ->
Op = {self(), {read_write, Tid, Oid}},
{?MODULE, ReadNode} ! Op,
?ets_insert(Store, {nodes, ReadNode}),
- Res = receive_wlocks([ReadNode], undefined, Store, Oid),
- case node() of
- ReadNode ->
- get_rwlocks_on_nodes(Tail, Res, ReadNode, Store, Tid, Oid);
- _ ->
- get_wlocks_on_nodes(Tail, Store, {self(), {write, Tid, Oid}}),
- receive_wlocks(Tail, Res, Store, Oid)
+ case receive_wlocks([ReadNode], Ref, Store, Oid) of
+ Ref ->
+ get_rwlocks_on_nodes(Tail, Ref, Store, Tid, Oid);
+ Res ->
+ get_wlocks_on_nodes(Tail, Res, Store, {self(), {write, Tid, Oid}}, Oid)
end;
-get_rwlocks_on_nodes([Node | Tail], Res, ReadNode, Store, Tid, Oid) ->
- Op = {self(), {write, Tid, Oid}},
- {?MODULE, Node} ! Op,
- ?ets_insert(Store, {nodes, Node}),
- receive_wlocks([Node], undefined, Store, Oid),
- if node() == Node ->
- get_rwlocks_on_nodes(Tail, Res, ReadNode, Store, Tid, Oid);
- Res == rwlock -> %% Hmm
- Rest = lists:delete(ReadNode, Tail),
- Op2 = {self(), {read_write, Tid, Oid}},
- {?MODULE, ReadNode} ! Op2,
- ?ets_insert(Store, {nodes, ReadNode}),
- get_wlocks_on_nodes(Rest, Store, {self(), {write, Tid, Oid}}),
- receive_wlocks([ReadNode|Rest], undefined, Store, Oid);
- true ->
- get_wlocks_on_nodes(Tail, Store, {self(), {write, Tid, Oid}}),
- receive_wlocks(Tail, Res, Store, Oid)
- end;
-get_rwlocks_on_nodes([],Res,_,_,_,_) ->
+get_rwlocks_on_nodes([],Res,_,_,_) ->
Res.
receive_wlocks([], Res, _Store, _Oid) ->
@@ -944,8 +946,8 @@ receive_wlocks(Nodes = [This|Ns], Res, Store, Oid) ->
Tail = lists:delete(Node,Nodes),
Nonstuck = lists:delete(Sticky,Tail),
[?ets_insert(Store, {nodes, NSNode}) || NSNode <- Nonstuck],
- case lists:member(Sticky,Tail) of
- true ->
+ case lists:member(Sticky,Tail) of
+ true ->
sticky_flush(Nonstuck,Store),
receive_wlocks([Sticky], Res, Store, Oid);
false ->
@@ -957,7 +959,7 @@ receive_wlocks(Nodes = [This|Ns], Res, Store, Oid) ->
flush_remaining(Ns, This, Reason1)
end.
-sticky_flush([], _) ->
+sticky_flush([], _) ->
del_debug(),
ok;
sticky_flush(Ns=[Node | Tail], Store) ->
@@ -991,7 +993,7 @@ opt_lookup_in_client(lookup_in_client, Oid, Lock) ->
%% Table has been deleted from this node,
%% restart the transaction.
#cyclic{op = read, lock = Lock, oid = Oid, lucky = nowhere};
- Val ->
+ Val ->
Val
end;
opt_lookup_in_client(Val, _Oid, _Lock) ->
@@ -1000,8 +1002,8 @@ opt_lookup_in_client(Val, _Oid, _Lock) ->
return_granted_or_nodes({_, ?ALL} , Nodes) -> Nodes;
return_granted_or_nodes({?GLOBAL, _}, Nodes) -> Nodes;
return_granted_or_nodes(_ , _Nodes) -> granted.
-
-%% We store a {Tab, read, From} item in the
+
+%% We store a {Tab, read, From} item in the
%% locks table on the node where we actually do pick up the object
%% and we also store an item {lock, Oid, read} in our local store
%% so that we can release any locks we hold when we commit.
@@ -1059,9 +1061,9 @@ rlock_get_reply(Node, Store, Oid, {granted, V}) ->
?ets_insert(Store, {{locks, Tab, Key}, read}),
?ets_insert(Store, {nodes, Node}),
case opt_lookup_in_client(V, Oid, read) of
- C = #cyclic{} ->
+ C = #cyclic{} ->
mnesia:abort(C);
- Val ->
+ Val ->
Val
end;
rlock_get_reply(Node, Store, Oid, granted) ->
@@ -1079,7 +1081,7 @@ rlock_get_reply(Node, Store, Tab, {granted, V, RealKeys}) ->
rlock_get_reply(_Node, _Store, _Oid, {not_granted, Reason}) ->
exit({aborted, Reason});
-rlock_get_reply(_Node, Store, Oid, {switch, N2, Req}) ->
+rlock_get_reply(_Node, Store, Oid, {switch, N2, Req}) ->
?ets_insert(Store, {nodes, N2}),
{?MODULE, N2} ! Req,
rlock_get_reply(N2, Store, Oid, l_req_rec(N2, Store)).
@@ -1095,7 +1097,7 @@ ixrlock(Tid, Store, Tab, IxKey, Pos) ->
%%% Old code
%% R = l_request(Node, {ix_read, Tid, Tab, IxKey, Pos}, Store),
%% rlock_get_reply(Node, Store, Tab, R)
-
+
case need_lock(Store, Tab, ?ALL, read) of
no when Node =:= node() ->
ix_read_res(Tab,IxKey,Pos);
@@ -1135,11 +1137,23 @@ rec_requests([], _Oid, _Store) ->
get_held_locks() ->
?MODULE ! {get_table, self(), mnesia_held_locks},
- receive {mnesia_held_locks, Locks} -> Locks end.
+ Locks = receive {mnesia_held_locks, Ls} -> Ls after 5000 -> [] end,
+ rewrite_locks(Locks, []).
+
+rewrite_locks([{Oid, _, Ls}|Locks], Acc0) ->
+ Acc = rewrite_locks(Ls, Oid, Acc0),
+ rewrite_locks(Locks, Acc);
+rewrite_locks([], Acc) ->
+ lists:reverse(Acc).
+
+rewrite_locks([{Op, Tid}|Ls], Oid, Acc) ->
+ rewrite_locks(Ls, Oid, [{Oid, Op, Tid}|Acc]);
+rewrite_locks([], _, Acc) ->
+ Acc.
get_lock_queue() ->
?MODULE ! {get_table, self(), mnesia_lock_queue},
- Q = receive {mnesia_lock_queue, Locks} -> Locks end,
+ Q = receive {mnesia_lock_queue, Locks} -> Locks after 5000 -> [] end,
[{Oid, Op, Pid, Tid, WFT} || {queue, Oid, Tid, Op, Pid, WFT} <- Q].
do_stop() ->
diff --git a/lib/mnesia/src/mnesia_log.erl b/lib/mnesia/src/mnesia_log.erl
index 9e804cc4c2..18303869ed 100644
--- a/lib/mnesia/src/mnesia_log.erl
+++ b/lib/mnesia/src/mnesia_log.erl
@@ -1,19 +1,19 @@
%%
%% %CopyrightBegin%
-%%
+%%
%% Copyright Ericsson AB 1996-2011. All Rights Reserved.
-%%
+%%
%% The contents of this file are subject to the Erlang Public License,
%% Version 1.1, (the "License"); you may not use this file except in
%% compliance with the License. You should have received a copy of the
%% Erlang Public License along with this software. If not, it can be
%% retrieved online at http://www.erlang.org/.
-%%
+%%
%% Software distributed under the License is distributed on an "AS IS"
%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
%% the License for the specific language governing rights and limitations
%% under the License.
-%%
+%%
%% %CopyrightEnd%
%%
@@ -180,8 +180,8 @@
view/1,
write_trans_log_header/0
]).
-
-
+
+
-compile({no_auto_import,[error/2]}).
-include("mnesia.hrl").
@@ -210,7 +210,7 @@ decision_tab_version() -> "1.0".
dcl_version() -> "1.0".
dcd_version() -> "1.0".
-
+
append(Log, Bin) when is_binary(Bin) ->
disk_log:balog(Log, Bin);
append(Log, Term) ->
@@ -218,9 +218,9 @@ append(Log, Term) ->
%% Synced append
sappend(Log, Bin) when is_binary(Bin) ->
- ok = disk_log:blog(Log, Bin);
+ ok = disk_log:blog(Log, Bin);
sappend(Log, Term) ->
- ok = disk_log:log(Log, Term).
+ ok = disk_log:log(Log, Term).
%% Write commit records to the latest_log
log(C) when C#commit.disc_copies == [],
@@ -283,7 +283,7 @@ previous_log_file() -> dir("PREVIOUS.LOG").
decision_log_file() -> dir(decision_log_name()).
decision_tab_file() -> dir(decision_tab_name()).
-
+
previous_decision_log_file() -> dir("PDECISION.LOG").
latest_log_name() -> "LATEST.LOG".
@@ -297,10 +297,10 @@ init() ->
true ->
Prev = previous_log_file(),
verify_no_exists(Prev),
-
+
Latest = latest_log_file(),
verify_no_exists(Latest),
-
+
Header = trans_log_header(),
open_log(latest_log, Header, Latest);
false ->
@@ -346,20 +346,20 @@ open_log(Name, Header, Fname, Exists, Repair, Mode) ->
write_header(Log, Header),
Log;
{repaired, Log, _Recover, BadBytes} ->
- mnesia_lib:important("Data may be missing, log ~p repaired: Lost ~p bytes~n",
+ mnesia_lib:important("Data may be missing, log ~p repaired: Lost ~p bytes~n",
[Fname, BadBytes]),
Log;
{error, Reason} when Repair == true ->
file:delete(Fname),
- mnesia_lib:important("Data may be missing, Corrupt logfile deleted: ~p, ~p ~n",
+ mnesia_lib:important("Data may be missing, Corrupt logfile deleted: ~p, ~p ~n",
[Fname, Reason]),
- %% Create a new
+ %% Create a new
open_log(Name, Header, Fname, false, false, read_write);
{error, Reason} ->
fatal("Cannot open log file ~p: ~p~n", [Fname, Reason])
end.
-write_header(Log, Header) ->
+write_header(Log, Header) ->
append(Log, Header).
write_trans_log_header() ->
@@ -376,12 +376,12 @@ stop() ->
close_log(Log) ->
%% io:format("mnesia_log:close_log ~p~n", [Log]),
%% io:format("mnesia_log:close_log ~p~n", [Log]),
- case disk_log:sync(Log) of
+ case disk_log:sync(Log) of
ok -> ok;
- {error, {read_only_mode, Log}} ->
+ {error, {read_only_mode, Log}} ->
ok;
- {error, Reason} ->
- mnesia_lib:important("Failed syncing ~p to_disk reason ~p ~n",
+ {error, Reason} ->
+ mnesia_lib:important("Failed syncing ~p to_disk reason ~p ~n",
[Log, Reason])
end,
mnesia_monitor:close_log(Log).
@@ -392,7 +392,7 @@ unsafe_close_log(Log) ->
purge_some_logs() ->
- mnesia_monitor:unsafe_close_log(latest_log),
+ mnesia_monitor:unsafe_close_log(latest_log),
file:delete(latest_log_file()),
file:delete(decision_tab_file()).
@@ -466,10 +466,10 @@ chunk_log(Log, Cont) ->
[Log, Reason]);
{C2, Chunk, _BadBytes} ->
%% Read_only case, should we warn about the bad log file?
- %% BUGBUG Should we crash if Repair == false ??
+ %% BUGBUG Should we crash if Repair == false ??
%% We got to check this !!
mnesia_lib:important("~p repaired, lost ~p bad bytes~n", [Log, _BadBytes]),
- {C2, Chunk};
+ {C2, Chunk};
Other ->
Other
end.
@@ -492,7 +492,7 @@ open_decision_log() ->
Latest = decision_log_file(),
open_log(decision_log, decision_log_header(), Latest),
start.
-
+
prepare_decision_log_dump() ->
Prev = previous_decision_log_file(),
prepare_decision_log_dump(exists(Prev), Prev).
@@ -586,11 +586,11 @@ view_file(C, Log) ->
eof;
{C2, Terms, _BadBytes} ->
dbg_out("Lost ~p bytes in ~p ~n", [_BadBytes, Log]),
- lists:foreach(fun(X) -> mnesia_lib:show("~p~n", [X]) end,
+ lists:foreach(fun(X) -> mnesia_lib:show("~p~n", [X]) end,
Terms),
view_file(C2, Log);
{C2, Terms} ->
- lists:foreach(fun(X) -> mnesia_lib:show("~p~n", [X]) end,
+ lists:foreach(fun(X) -> mnesia_lib:show("~p~n", [X]) end,
Terms),
view_file(C2, Log)
end.
@@ -655,7 +655,7 @@ check_backup_args([Arg | Tail], B) ->
check_backup_args([], B) ->
{ok, B}.
-check_backup_arg_type(Arg, B) ->
+check_backup_arg_type(Arg, B) ->
case Arg of
{scope, global} ->
B#backup_args{scope = global};
@@ -714,7 +714,7 @@ select_tables(AllTabs, B) ->
safe_write(B, []) ->
B;
-safe_write(B, Recs) ->
+safe_write(B, Recs) ->
safe_apply(B, write, [B#backup_args.opaque, Recs]).
backup_schema(B, Tabs) ->
@@ -754,7 +754,7 @@ abort_write(B, What, Args, Reason) ->
[Mod, abort_write, [Opaque], Other]),
throw({error, Reason})
end.
-
+
backup_tab(Tab, B) ->
Name = B#backup_args.name,
case mnesia_checkpoint:most_local_node(Name, Tab) of
@@ -768,7 +768,7 @@ backup_tab(Tab, B) ->
{error, Reason} ->
abort_write(B, {?MODULE, backup_tab}, [Tab, B], {error, Reason})
end.
-
+
tab_copier(Pid, B, Tab) when is_record(B, backup_args) ->
%% Intentional crash at exit
Name = B#backup_args.name,
@@ -829,7 +829,7 @@ handle_last(Pid, _Acc) ->
exit(normal).
iterate(B, Name, Tab, Pid, Source, Age, Pass, Acc) ->
- Fun =
+ Fun =
if
Pid == self() ->
RecName = val({Tab, record_name}),
@@ -874,7 +874,7 @@ tab_receiver(Pid, B, Tab, RecName, Slot) ->
Recs2 = rec_filter(B, Tab, RecName, Recs),
B2 = safe_write(B, Recs2),
tab_receiver(Pid, B2, Tab, RecName, Next);
-
+
{Pid, {last, {ok,_}}} ->
B;
@@ -885,7 +885,7 @@ tab_receiver(Pid, B, Tab, RecName, Slot) ->
Reason = {error, {"Tab copier crashed", {'EXIT', R}}},
abort_write(B, {?MODULE, remote_tab_sender}, [self(), B, Tab], Reason);
Msg ->
- R = {error, {"Tab receiver got unexpected msg", Msg}},
+ R = {error, {"Tab receiver got unexpected msg", Msg}},
abort_write(B, {?MODULE, remote_tab_sender}, [self(), B, Tab], R)
end.
@@ -910,9 +910,9 @@ ets2dcd(Tab, Ftype) ->
case Ftype of
dcd -> mnesia_lib:tab2dcd(Tab);
dmp -> mnesia_lib:tab2dmp(Tab)
- end,
+ end,
TmpF = mnesia_lib:tab2tmp(Tab),
- file:delete(TmpF),
+ file:delete(TmpF),
Log = open_log({Tab, ets2dcd}, dcd_log_header(), TmpF, false),
mnesia_lib:db_fixtable(ram_copies, Tab, true),
ok = ets2dcd(mnesia_lib:db_init_chunk(ram_copies, Tab, 1000), Tab, Log),
@@ -926,8 +926,8 @@ ets2dcd(Tab, Ftype) ->
ets2dcd('$end_of_table', _Tab, _Log) ->
ok;
-ets2dcd({Recs, Cont}, Tab, Log) ->
- ok = disk_log:alog_terms(Log, Recs),
+ets2dcd({Recs, Cont}, Tab, Log) ->
+ ok = disk_log:log_terms(Log, Recs),
ets2dcd(mnesia_lib:db_chunk(ram_copies, Cont), Tab, Log).
dcd2ets(Tab) ->
@@ -937,12 +937,12 @@ dcd2ets(Tab, Rep) ->
Dcd = mnesia_lib:tab2dcd(Tab),
case mnesia_lib:exists(Dcd) of
true ->
- Log = open_log({Tab, dcd2ets}, dcd_log_header(), Dcd,
+ Log = open_log({Tab, dcd2ets}, dcd_log_header(), Dcd,
true, Rep, read_only),
Data = chunk_log(Log, start),
ok = insert_dcdchunk(Data, Log, Tab),
close_log(Log),
- load_dcl(Tab, Rep);
+ load_dcl(Tab, Rep);
false -> %% Handle old dets files, and conversion from disc_only to disc.
Fname = mnesia_lib:tab2dat(Tab),
Type = val({Tab, setorbag}),
@@ -956,13 +956,13 @@ dcd2ets(Tab, Rep) ->
end
end.
-insert_dcdchunk({Cont, [LogH | Rest]}, Log, Tab)
- when is_record(LogH, log_header),
- LogH#log_header.log_kind == dcd_log,
- LogH#log_header.log_version >= "1.0" ->
- insert_dcdchunk({Cont, Rest}, Log, Tab);
+insert_dcdchunk({Cont, [LogH | Rest]}, Log, Tab)
+ when is_record(LogH, log_header),
+ LogH#log_header.log_kind == dcd_log,
+ LogH#log_header.log_version >= "1.0" ->
+ insert_dcdchunk({Cont, Rest}, Log, Tab);
-insert_dcdchunk({Cont, Recs}, Log, Tab) ->
+insert_dcdchunk({Cont, Recs}, Log, Tab) ->
true = ets:insert(Tab, Recs),
insert_dcdchunk(chunk_log(Log, Cont), Log, Tab);
insert_dcdchunk(eof, _Log, _Tab) ->
@@ -971,13 +971,13 @@ insert_dcdchunk(eof, _Log, _Tab) ->
load_dcl(Tab, Rep) ->
FName = mnesia_lib:tab2dcl(Tab),
case mnesia_lib:exists(FName) of
- true ->
+ true ->
Name = {load_dcl,Tab},
- open_log(Name,
- dcl_log_header(),
- FName,
+ open_log(Name,
+ dcl_log_header(),
+ FName,
true,
- Rep,
+ Rep,
read_only),
FirstChunk = chunk_log(Name, start),
N = insert_logchunk(FirstChunk, Name, 0),
@@ -1015,13 +1015,14 @@ add_recs([{{Tab, Key}, Val, update_counter} | Rest], N) ->
true = ets:insert(Tab, Zero)
end,
add_recs(Rest, N+1);
-add_recs([LogH|Rest], N)
- when is_record(LogH, log_header),
- LogH#log_header.log_kind == dcl_log,
- LogH#log_header.log_version >= "1.0" ->
+add_recs([LogH|Rest], N)
+ when is_record(LogH, log_header),
+ LogH#log_header.log_kind == dcl_log,
+ LogH#log_header.log_version >= "1.0" ->
add_recs(Rest, N);
add_recs([{{Tab, _Key}, _Val, clear_table} | Rest], N) ->
- true = ets:match_delete(Tab, '_'),
- add_recs(Rest, N+ets:info(Tab, size));
+ Size = ets:info(Tab, size),
+ true = ets:delete_all_objects(Tab),
+ add_recs(Rest, N+Size);
add_recs([], N) ->
N.
diff --git a/lib/mnesia/src/mnesia_monitor.erl b/lib/mnesia/src/mnesia_monitor.erl
index b6eda9ad3a..c08bbc879f 100644
--- a/lib/mnesia/src/mnesia_monitor.erl
+++ b/lib/mnesia/src/mnesia_monitor.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 1996-2010. All Rights Reserved.
+%% Copyright Ericsson AB 1996-2011. All Rights Reserved.
%%
%% The contents of this file are subject to the Erlang Public License,
%% Version 1.1, (the "License"); you may not use this file except in
@@ -76,13 +76,13 @@
-include("mnesia.hrl").
--record(state, {supervisor, pending_negotiators = [],
+-record(state, {supervisor, pending_negotiators = [],
going_down = [], tm_started = false, early_connects = [],
connecting, mq = []}).
--define(current_protocol_version, {7,6}).
+-define(current_protocol_version, {8,1}).
--define(previous_protocol_version, {7,5}).
+-define(previous_protocol_version, {8,0}).
start() ->
gen_server:start_link({local, ?MODULE}, ?MODULE,
@@ -151,12 +151,12 @@ check_protocol([{Node, {accept, Mon, Version, Protocol}} | Tail], Protocols) ->
case lists:member(Protocol, Protocols) of
true ->
case Protocol == protocol_version() of
- true ->
+ true ->
set({protocol, Node}, {Protocol, false});
false ->
set({protocol, Node}, {Protocol, true})
end,
- [node(Mon) | check_protocol(Tail, Protocols)];
+ [node(Mon) | check_protocol(Tail, Protocols)];
false ->
verbose("Failed to connect with ~p. ~p protocols rejected. "
"expected version = ~p, expected protocol = ~p~n",
@@ -179,7 +179,7 @@ check_protocol([], [Protocol | _Protocols]) ->
set(protocol_version, Protocol),
[].
-protocol_version() ->
+protocol_version() ->
case ?catch_val(protocol_version) of
{'EXIT', _} -> ?current_protocol_version;
Version -> Version
@@ -188,15 +188,15 @@ protocol_version() ->
%% A sorted list of acceptable protocols the
%% preferred protocols are first in the list
acceptable_protocol_versions() ->
- [protocol_version(), ?previous_protocol_version].
-
+ [protocol_version(), ?previous_protocol_version, {7,6}].
+
needs_protocol_conversion(Node) ->
case {?catch_val({protocol, Node}), protocol_version()} of
{{'EXIT', _}, _} ->
false;
- {{_, Bool}, ?current_protocol_version} ->
+ {{_, Bool}, ?current_protocol_version} ->
Bool;
- {{_, Bool}, _} ->
+ {{_, Bool}, _} ->
not Bool
end.
@@ -255,15 +255,15 @@ terminate_proc(Who, Reason, _State) ->
%%----------------------------------------------------------------------
init([Parent]) ->
process_flag(trap_exit, true),
- ?ets_new_table(mnesia_gvar, [set, public, named_table]),
- ?ets_new_table(mnesia_stats, [set, public, named_table]),
+ ?ets_new_table(mnesia_gvar, [set, public, named_table]),
+ ?ets_new_table(mnesia_stats, [set, public, named_table]),
set(subscribers, []),
set(activity_subscribers, []),
mnesia_lib:verbose("~p starting: ~p~n", [?MODULE, self()]),
Version = mnesia:system_info(version),
set(version, Version),
dbg_out("Version: ~p~n", [Version]),
-
+
case catch process_config_args(env()) of
ok ->
mnesia_lib:set({'$$$_report', current_pos}, 0),
@@ -283,7 +283,7 @@ init([Parent]) ->
set(checkpoints, []),
set(pending_checkpoints, []),
set(pending_checkpoint_pids, []),
-
+
{ok, #state{supervisor = Parent}};
{'EXIT', Reason} ->
mnesia_lib:report_fatal("Bad configuration: ~p~n", [Reason]),
@@ -398,9 +398,9 @@ handle_call({unsafe_close_log, Name}, _From, State) ->
disk_log:close(Name),
{reply, ok, State};
-handle_call({negotiate_protocol, Mon, _Version, _Protocols}, _From, State)
+handle_call({negotiate_protocol, Mon, _Version, _Protocols}, _From, State)
when State#state.tm_started == false ->
- State2 = State#state{early_connects = [node(Mon) | State#state.early_connects]},
+ State2 = State#state{early_connects = [node(Mon) | State#state.early_connects]},
{reply, {node(), {reject, self(), uninitialized, uninitialized}}, State2};
%% From remote monitor..
@@ -412,11 +412,13 @@ handle_call({negotiate_protocol, Mon, Version, Protocols}, From, State)
true ->
accept_protocol(Mon, MyVersion, Protocol, From, State);
false ->
- %% in this release we should be able to handle the previous
+ %% in this release we should be able to handle the previous
%% protocol
case hd(Protocols) of
?previous_protocol_version ->
accept_protocol(Mon, MyVersion, ?previous_protocol_version, From, State);
+ {7,6} ->
+ accept_protocol(Mon, MyVersion, {7,6}, From, State);
_ ->
verbose("Connection with ~p rejected. "
"version = ~p, protocols = ~p, "
@@ -427,7 +429,7 @@ handle_call({negotiate_protocol, Mon, Version, Protocols}, From, State)
end;
%% Local request to negotiate with other monitors (nodes).
-handle_call({negotiate_protocol, Nodes}, From, State) ->
+handle_call({negotiate_protocol, Nodes}, From, State) ->
case mnesia_lib:intersect(State#state.going_down, Nodes) of
[] ->
spawn_link(?MODULE, negotiate_protocol_impl, [Nodes, From]),
@@ -461,7 +463,7 @@ accept_protocol(Mon, Version, Protocol, From, State) ->
%% No need for wait
link(Mon), %% link to remote Monitor
case Protocol == protocol_version() of
- true ->
+ true ->
set({protocol, Node}, {Protocol, false});
false ->
set({protocol, Node}, {Protocol, true})
@@ -509,7 +511,7 @@ handle_cast({disconnect, Node}, State) ->
ignore;
undefined ->
ignore;
- RemoteMon when is_pid(RemoteMon) ->
+ RemoteMon when is_pid(RemoteMon) ->
unlink(RemoteMon)
end,
{noreply, State};
@@ -534,9 +536,13 @@ handle_info({'EXIT', Pid, R}, State) when Pid == State#state.supervisor ->
dbg_out("~p was ~p by supervisor~n",[?MODULE, R]),
{stop, R, State};
-handle_info({'EXIT', Pid, fatal}, State) when node(Pid) == node() ->
+handle_info({'EXIT', Pid, fatal}, State) when node(Pid) == node() ->
dbg_out("~p got FATAL ERROR from: ~p~n",[?MODULE, Pid]),
- exit(State#state.supervisor, shutdown),
+ %% This may hang supervisor if a shutdown happens at the same time as an fatal
+ %% is in progress
+ %% exit(State#state.supervisor, shutdown),
+ %% It is better to kill an innocent process
+ catch exit(whereis(mnesia_locker), kill),
{noreply, State};
handle_info(Msg = {'EXIT',Pid,_}, State) ->
@@ -550,7 +556,7 @@ handle_info(Msg = {'EXIT',Pid,_}, State) ->
Node /= node() ->
{noreply, State#state{mq = State#state.mq ++ [{info, Msg}]}};
true ->
- %% We have probably got an exit signal from
+ %% We have probably got an exit signal from
%% disk_log or dets
Hint = "Hint: check that the disk still is writable",
fatal("~p got unexpected info: ~p; ~p~n",
@@ -567,10 +573,10 @@ handle_info({nodeup, Node}, State) ->
%% Let's check if Mnesia is running there in order
%% to detect if the network has been partitioned
%% due to communication failure.
-
+
HasDown = mnesia_recover:has_mnesia_down(Node),
ImRunning = mnesia_lib:is_running(),
-
+
if
%% If I'm not running the test will be made later.
HasDown == true, ImRunning == yes ->
@@ -589,7 +595,7 @@ handle_info({disk_log, _Node, Log, Info}, State) ->
{truncated, _No} ->
ok;
_ ->
- mnesia_lib:important("Warning Log file ~p error reason ~s~n",
+ mnesia_lib:important("Warning Log file ~p error reason ~s~n",
[Log, disk_log:format_error(Info)])
end,
{noreply, State};
@@ -681,38 +687,38 @@ env() ->
send_compressed
].
-default_env(access_module) ->
+default_env(access_module) ->
mnesia;
-default_env(auto_repair) ->
+default_env(auto_repair) ->
true;
-default_env(backup_module) ->
+default_env(backup_module) ->
mnesia_backup;
-default_env(debug) ->
+default_env(debug) ->
none;
default_env(dir) ->
Name = lists:concat(["Mnesia.", node()]),
filename:absname(Name);
-default_env(dump_log_load_regulation) ->
+default_env(dump_log_load_regulation) ->
false;
-default_env(dump_log_time_threshold) ->
+default_env(dump_log_time_threshold) ->
timer:minutes(3);
-default_env(dump_log_update_in_place) ->
+default_env(dump_log_update_in_place) ->
true;
default_env(dump_log_write_threshold) ->
1000;
-default_env(embedded_mnemosyne) ->
+default_env(embedded_mnemosyne) ->
false;
-default_env(event_module) ->
+default_env(event_module) ->
mnesia_event;
-default_env(extra_db_nodes) ->
+default_env(extra_db_nodes) ->
[];
-default_env(ignore_fallback_at_startup) ->
+default_env(ignore_fallback_at_startup) ->
false;
default_env(fallback_error_function) ->
{mnesia, lkill};
-default_env(max_wait_for_decision) ->
+default_env(max_wait_for_decision) ->
infinity;
-default_env(schema_location) ->
+default_env(schema_location) ->
opt_disc;
default_env(core_dir) ->
false;
@@ -732,7 +738,7 @@ check_type(Env, Val) ->
NewVal ->
NewVal
end.
-
+
do_check_type(access_module, A) when is_atom(A) -> A;
do_check_type(auto_repair, B) -> bool(B);
do_check_type(backup_module, B) when is_atom(B) -> B;
@@ -749,7 +755,7 @@ do_check_type(dump_log_update_in_place, B) -> bool(B);
do_check_type(dump_log_write_threshold, I) when is_integer(I), I > 0 -> I;
do_check_type(event_module, A) when is_atom(A) -> A;
do_check_type(ignore_fallback_at_startup, B) -> bool(B);
-do_check_type(fallback_error_function, {Mod, Func})
+do_check_type(fallback_error_function, {Mod, Func})
when is_atom(Mod), is_atom(Func) -> {Mod, Func};
do_check_type(embedded_mnemosyne, B) -> bool(B);
do_check_type(extra_db_nodes, L) when is_list(L) ->
@@ -804,8 +810,8 @@ detect_inconcistency(Nodes, Context) ->
has_remote_mnesia_down(Node) ->
HasDown = mnesia_recover:has_mnesia_down(Node),
Master = mnesia_recover:get_master_nodes(schema),
- if
- HasDown == true, Master == [] ->
+ if
+ HasDown == true, Master == [] ->
{true, node()};
true ->
{false, node()}
diff --git a/lib/mnesia/src/mnesia_recover.erl b/lib/mnesia/src/mnesia_recover.erl
index b3eed1de6e..4750291a10 100644
--- a/lib/mnesia/src/mnesia_recover.erl
+++ b/lib/mnesia/src/mnesia_recover.erl
@@ -227,11 +227,13 @@ do_log_decision(D, DoTell, NodeD) ->
note_outcome(D2),
case mnesia_monitor:use_dir() of
true ->
- mnesia_log:append(latest_log, D2),
if
DoTell == true, Outcome /= unclear ->
tell_im_certain(NodeD#decision.disc_nodes--[node()],D2),
- tell_im_certain(NodeD#decision.ram_nodes--[node()], D2);
+ tell_im_certain(NodeD#decision.ram_nodes--[node()], D2),
+ mnesia_log:log(D2);
+ Outcome /= unclear ->
+ mnesia_log:log(D2);
true ->
ignore
end;
diff --git a/lib/mnesia/src/mnesia_schema.erl b/lib/mnesia/src/mnesia_schema.erl
index fef72ad39c..6e43052fb0 100644
--- a/lib/mnesia/src/mnesia_schema.erl
+++ b/lib/mnesia/src/mnesia_schema.erl
@@ -39,9 +39,10 @@
change_table_load_order/2,
change_table_majority/2,
change_table_frag/2,
- clear_table/1,
+%% clear_table/1, %% removed since it is not a schema op anymore
create_table/1,
cs2list/1,
+ vsn_cs2list/1,
del_snmp/1,
del_table_copy/2,
del_table_index/2,
@@ -65,6 +66,7 @@
merge_schema/0,
merge_schema/1,
move_table/3,
+ normalize_cs/2,
opt_create_dir/2,
prepare_commit/3,
purge_dir/2,
@@ -100,7 +102,7 @@
]).
%% Needed outside to be able to use/set table_properties
-%% from user (not supported)
+%% from user (not supported)
-export([schema_transaction/1,
insert_schema_ops/2,
do_create_table/1,
@@ -118,9 +120,9 @@
%% Here comes the init function which also resides in
%% this module, it is called upon by the trans server
%% at startup of the system
-%%
+%%
%% We have a meta table which looks like
-%% {table, schema,
+%% {table, schema,
%% {type, set},
%% {disc_copies, all},
%% {arity, 2}
@@ -149,14 +151,14 @@ exit_on_error(GoodRes) ->
val(Var) ->
case ?catch_val(Var) of
- {'EXIT', Reason} -> mnesia_lib:other_val(Var, Reason);
- Value -> Value
+ {'EXIT', Reason} -> mnesia_lib:other_val(Var, Reason);
+ Value -> Value
end.
%% This function traverses all cstructs in the schema and
%% sets all values in mnesia_gvar accordingly for each table/cstruct
-set_schema('$end_of_table') ->
+set_schema('$end_of_table') ->
[];
set_schema(Tab) ->
do_set_schema(Tab),
@@ -186,6 +188,7 @@ do_set_schema(Tab, Cs) ->
[set({Tab, user_property, element(1, P)}, P) || P <- Cs#cstruct.user_properties],
set({Tab, frag_properties}, Cs#cstruct.frag_properties),
mnesia_frag:set_frag_hash(Tab, Cs#cstruct.frag_properties),
+ set({Tab, storage_properties}, Cs#cstruct.storage_properties),
set({Tab, attributes}, Cs#cstruct.attributes),
Arity = length(Cs#cstruct.attributes) + 1,
set({Tab, arity}, Arity),
@@ -253,8 +256,8 @@ version() ->
incr_version(Cs) ->
{{Major, Minor}, _} = Cs#cstruct.version,
Nodes = mnesia_lib:intersect(val({schema, disc_copies}),
- mnesia_lib:cs_to_nodes(Cs)),
- V =
+ mnesia_lib:cs_to_nodes(Cs)),
+ V =
case Nodes -- val({Cs#cstruct.name, active_replicas}) of
[] -> {Major + 1, 0}; % All replicas are active
_ -> {Major, Minor + 1} % Some replicas are inactive
@@ -359,7 +362,7 @@ delete_schema2() ->
{error, Reason} ->
{error, Reason}
end.
-
+
ensure_no_schema([H|T]) when is_atom(H) ->
case rpc:call(H, ?MODULE, remote_read_schema, []) of
{badrpc, Reason} ->
@@ -407,7 +410,7 @@ opt_create_dir(UseDir, Dir) when UseDir == true->
check_can_write(Dir);
false ->
case file:make_dir(Dir) of
- ok ->
+ ok ->
verbose("Create Directory ~p~n", [Dir]),
ok;
{error, Reason} ->
@@ -417,7 +420,7 @@ opt_create_dir(UseDir, Dir) when UseDir == true->
end;
opt_create_dir(false, _) ->
{error, {has_no_disc, node()}}.
-
+
check_can_write(Dir) ->
case file:read_file_info(Dir) of
{ok, FI} when FI#file_info.type == directory,
@@ -450,7 +453,7 @@ read_schema(Keep) ->
read_schema(Keep, IgnoreFallback) ->
lock_schema(),
- Res =
+ Res =
case mnesia:system_info(is_running) of
yes ->
{ok, ram, get_create_list(schema)};
@@ -477,7 +480,7 @@ read_disc_schema(Keep, IgnoreFallback) ->
case mnesia_bup:fallback_exists() of
true when IgnoreFallback == false, Running /= yes ->
mnesia_bup:fallback_to_schema();
- _ ->
+ _ ->
%% If we're running, we read the schema file even
%% if fallback exists
Dat = mnesia_lib:tab2dat(schema),
@@ -499,7 +502,7 @@ read_disc_schema(Keep, IgnoreFallback) ->
end.
do_read_disc_schema(Fname, Keep) ->
- T =
+ T =
case Keep of
false ->
Args = [{keypos, 2}, public, set],
@@ -523,7 +526,7 @@ do_read_disc_schema(Fname, Keep) ->
get_initial_schema(SchemaStorage, Nodes) ->
Cs = #cstruct{name = schema,
record_name = schema,
- attributes = [table, cstruct]},
+ attributes = [table, cstruct]},
Cs2 =
case SchemaStorage of
ram_copies -> Cs#cstruct{ram_copies = Nodes};
@@ -532,7 +535,7 @@ get_initial_schema(SchemaStorage, Nodes) ->
cs2list(Cs2).
read_cstructs_from_disc() ->
- %% Assumptions:
+ %% Assumptions:
%% - local schema lock in global
%% - use_dir is true
%% - Mnesia is not running
@@ -552,14 +555,14 @@ read_cstructs_from_disc() ->
end,
Cstructs = dets:traverse(Tab, Fun),
dets:close(Tab),
- {ok, Cstructs};
+ {ok, Cstructs};
{error, Reason} ->
{error, Reason}
end;
false ->
{error, "No schema file exists"}
end.
-
+
%% We run a very special type of transactions when we
%% we want to manipulate the schema.
@@ -593,20 +596,20 @@ schema_transaction(Fun) ->
%% This process may dump the transaction log, and should
%% therefore not be run in an application process
-%%
+%%
schema_coordinator(Client, _Fun, undefined) ->
Res = {aborted, {node_not_running, node()}},
Client ! {transaction_done, Res, self()},
unlink(Client);
-
+
schema_coordinator(Client, Fun, Controller) when is_pid(Controller) ->
%% Do not trap exit in order to automatically die
%% when the controller dies
link(Controller),
unlink(Client),
-
- %% Fulfull the transaction even if the client dies
+
+ %% Fulfull the transaction even if the client dies
Res = mnesia:transaction(Fun),
Client ! {transaction_done, Res, self()},
unlink(Controller), % Avoids spurious exit message
@@ -619,24 +622,99 @@ schema_coordinator(Client, Fun, Controller) when is_pid(Controller) ->
insert_schema_ops({_Mod, _Tid, Ts}, SchemaIOps) ->
do_insert_schema_ops(Ts#tidstore.store, SchemaIOps).
-
+
do_insert_schema_ops(Store, [Head | Tail]) ->
?ets_insert(Store, Head),
do_insert_schema_ops(Store, Tail);
do_insert_schema_ops(_Store, []) ->
ok.
+api_list2cs(List) when is_list(List) ->
+ Name = pick(unknown, name, List, must),
+ Keys = check_keys(Name, List, record_info(fields, cstruct)),
+ check_duplicates(Name, Keys),
+ list2cs(List);
+api_list2cs(Other) ->
+ mnesia:abort({badarg, Other}).
+
+vsn_cs2list(Cs) ->
+ cs2list(need_old_cstructs(), Cs).
+
cs2list(Cs) when is_record(Cs, cstruct) ->
Tags = record_info(fields, cstruct),
- rec2list(Tags, 2, Cs);
+ rec2list(Tags, Tags, 2, Cs);
cs2list(CreateList) when is_list(CreateList) ->
- CreateList.
-
-rec2list([Tag | Tags], Pos, Rec) ->
+ CreateList;
+%% 4.6
+cs2list(Cs) when element(1, Cs) == cstruct, tuple_size(Cs) == 19 ->
+ Tags = [name,type,ram_copies,disc_copies,disc_only_copies,
+ load_order,access_mode,majority,index,snmp,local_content,
+ record_name,attributes,
+ user_properties,frag_properties,storage_properties,
+ cookie,version],
+ rec2list(Tags, Tags, 2, Cs);
+%% 4.4.19
+cs2list(Cs) when element(1, Cs) == cstruct, tuple_size(Cs) == 18 ->
+ Tags = [name,type,ram_copies,disc_copies,disc_only_copies,
+ load_order,access_mode,majority,index,snmp,local_content,
+ record_name,attributes,user_properties,frag_properties,
+ cookie,version],
+ rec2list(Tags, Tags, 2, Cs);
+%% 4.4.18 and earlier
+cs2list(Cs) when element(1, Cs) == cstruct, tuple_size(Cs) == 17 ->
+ Tags = [name,type,ram_copies,disc_copies,disc_only_copies,
+ load_order,access_mode,index,snmp,local_content,
+ record_name,attributes,user_properties,frag_properties,
+ cookie,version],
+ rec2list(Tags, Tags, 2, Cs).
+
+cs2list(false, Cs) ->
+ cs2list(Cs);
+cs2list(ver4_4_18, Cs) -> %% Or earlier
+ Orig = record_info(fields, cstruct),
+ Tags = [name,type,ram_copies,disc_copies,disc_only_copies,
+ load_order,access_mode,index,snmp,local_content,
+ record_name,attributes,user_properties,frag_properties,
+ cookie,version],
+ rec2list(Tags, Orig, 2, Cs);
+cs2list(ver4_4_19, Cs) ->
+ Orig = record_info(fields, cstruct),
+ Tags = [name,type,ram_copies,disc_copies,disc_only_copies,
+ load_order,access_mode,majority,index,snmp,local_content,
+ record_name,attributes,user_properties,frag_properties,
+ cookie,version],
+ rec2list(Tags, Orig, 2, Cs);
+cs2list(ver4_6, Cs) ->
+ Orig = record_info(fields, cstruct),
+ Tags = [name,type,ram_copies,disc_copies,disc_only_copies,
+ load_order,access_mode,majority,index,snmp,local_content,
+ record_name,attributes,
+ user_properties,frag_properties,storage_properties,
+ cookie,version],
+ rec2list(Tags, Orig, 2, Cs).
+
+
+rec2list([Tag | Tags], [Tag | Orig], Pos, Rec) ->
Val = element(Pos, Rec),
- [{Tag, Val} | rec2list(Tags, Pos + 1, Rec)];
-rec2list([], _Pos, _Rec) ->
- [].
+ [{Tag, Val} | rec2list(Tags, Orig, Pos + 1, Rec)];
+rec2list([], _, _Pos, _Rec) ->
+ [];
+rec2list(Tags, [_|Orig], Pos, Rec) ->
+ rec2list(Tags, Orig, Pos+1, Rec).
+
+normalize_cs(Cstructs, Node) ->
+ %% backward-compatibility hack; normalize before returning
+ case need_old_cstructs([Node]) of
+ false ->
+ Cstructs;
+ Version ->
+ %% some other format
+ [convert_cs(Version, Cs) || Cs <- Cstructs]
+ end.
+
+convert_cs(Version, Cs) ->
+ Fields = [Value || {_, Value} <- cs2list(Version, Cs)],
+ list_to_tuple([cstruct|Fields]).
list2cs(List) when is_list(List) ->
Name = pick(unknown, name, List, must),
@@ -667,10 +745,30 @@ list2cs(List) when is_list(List) ->
Frag = pick(Name, frag_properties, List, []),
verify({alt, [nil, list]}, mnesia_lib:etype(Frag),
- {badarg, Name, {frag_properties, Frag}}),
-
- Keys = check_keys(Name, List, record_info(fields, cstruct)),
- check_duplicates(Name, Keys),
+ {badarg, Name, {frag_properties, Frag}}),
+
+ BEProps = pick(Name, storage_properties, List, []),
+ verify({alt, [nil, list]}, mnesia_lib:etype(Ix),
+ {badarg, Name, {storage_properties, BEProps}}),
+ CheckProp = fun(Opt, Opts) when is_atom(Opt) ->
+ lists:member(Opt, Opts)
+ andalso mnesia:abort({badarg, Name, Opt});
+ (Tuple, Opts) when is_tuple(Tuple) ->
+ lists:member(element(1,Tuple), Opts)
+ andalso mnesia:abort({badarg, Name, Tuple});
+ (What,_) ->
+ mnesia:abort({badarg, Name, What})
+ end,
+ BadEtsOpts = [set, ordered_set, bag, duplicate_bag,
+ public, private, protected,
+ keypos, named_table],
+ EtsOpts = proplists:get_value(ets, BEProps, []),
+ is_list(EtsOpts) orelse mnesia:abort({badarg, Name, {ets, EtsOpts}}),
+ [CheckProp(Prop, BadEtsOpts) || Prop <- EtsOpts],
+ BadDetsOpts = [type, keypos, repair, access, file],
+ DetsOpts = proplists:get_value(dets, BEProps, []),
+ is_list(DetsOpts) orelse mnesia:abort({badarg, Name, {dets, DetsOpts}}),
+ [CheckProp(Prop, BadDetsOpts) || Prop <- DetsOpts],
#cstruct{name = Name,
ram_copies = Rc,
disc_copies = Dc,
@@ -686,10 +784,9 @@ list2cs(List) when is_list(List) ->
attributes = Attrs,
user_properties = lists:sort(UserProps),
frag_properties = lists:sort(Frag),
+ storage_properties = lists:sort(BEProps),
cookie = Cookie,
- version = Version};
-list2cs(Other) ->
- mnesia:abort({badarg, Other}).
+ version = Version}.
pick(Tab, Key, List, Default) ->
case lists:keysearch(Key, 1, List) of
@@ -708,7 +805,7 @@ attr_tab_to_pos(_Tab, Pos) when is_integer(Pos) ->
Pos;
attr_tab_to_pos(Tab, Attr) ->
attr_to_pos(Attr, val({Tab, attributes})).
-
+
%% Convert attribute name to integer if neccessary
attr_to_pos(Pos, _Attrs) when is_integer(Pos) ->
Pos;
@@ -723,7 +820,7 @@ attr_to_pos(Attr, [_ | Attrs], Pos) ->
attr_to_pos(Attr, Attrs, Pos + 1);
attr_to_pos(Attr, _, _) ->
mnesia:abort({bad_type, Attr}).
-
+
check_keys(Tab, [{Key, _Val} | Tail], Items) ->
case lists:member(Key, Items) of
true -> [Key | check_keys(Tab, Tail, Items)];
@@ -759,7 +856,7 @@ verify_cstruct(Cs) when is_record(Cs, cstruct) ->
{bad_type, Tab, {type, Type}}),
%% Currently ordered_set is not supported for disk_only_copies.
- if
+ if
Type == ordered_set, Cs#cstruct.disc_only_copies /= [] ->
mnesia:abort({bad_type, Tab, {not_supported, Type, disc_only_copies}});
true ->
@@ -776,10 +873,10 @@ verify_cstruct(Cs) when is_record(Cs, cstruct) ->
Arity = length(Attrs) + 1,
verify(true, Arity > 2, {bad_type, Tab, {attributes, Attrs}}),
-
+
lists:foldl(fun(Attr,_Other) when Attr == snmp ->
mnesia:abort({bad_type, Tab, {attributes, [Attr]}});
- (Attr,Other) ->
+ (Attr,Other) ->
verify(atom, mnesia_lib:etype(Attr),
{bad_type, Tab, {attributes, [Attr]}}),
verify(false, lists:member(Attr, Other),
@@ -792,7 +889,7 @@ verify_cstruct(Cs) when is_record(Cs, cstruct) ->
Index = Cs#cstruct.index,
verify({alt, [nil, list]}, mnesia_lib:etype(Index),
{bad_type, Tab, {index, Index}}),
-
+
IxFun =
fun(Pos) ->
verify(true, fun() ->
@@ -807,7 +904,7 @@ verify_cstruct(Cs) when is_record(Cs, cstruct) ->
{bad_type, Tab, {index, [Pos]}})
end,
lists:foreach(IxFun, Index),
-
+
LC = Cs#cstruct.local_content,
verify({alt, [true, false]}, LC,
{bad_type, Tab, {local_content, LC}}),
@@ -834,7 +931,7 @@ verify_cstruct(Cs) when is_record(Cs, cstruct) ->
lists:foreach(CheckProp, Cs#cstruct.user_properties),
case Cs#cstruct.cookie of
- {{MegaSecs, Secs, MicroSecs}, _Node}
+ {{MegaSecs, Secs, MicroSecs}, _Node}
when is_integer(MegaSecs), is_integer(Secs),
is_integer(MicroSecs), is_atom(node) ->
ok;
@@ -870,15 +967,15 @@ verify_nodes(Cs) ->
end,
verify(integer, mnesia_lib:etype(LoadOrder),
{bad_type, Tab, {load_order, LoadOrder}}),
-
+
Nodes = Ram ++ Disc ++ DiscOnly,
verify(list, mnesia_lib:etype(Nodes),
{combine_error, Tab,
[{ram_copies, []}, {disc_copies, []}, {disc_only_copies, []}]}),
verify(false, has_duplicates(Nodes), {combine_error, Tab, Nodes}),
- AtomCheck = fun(N) -> verify(atom, mnesia_lib:etype(N), {bad_type, Tab, N}) end,
+ AtomCheck = fun(N) -> verify(atom, mnesia_lib:etype(N), {bad_type, Tab, N}) end,
lists:foreach(AtomCheck, Nodes).
-
+
verify(Expected, Fun, Error) when is_function(Fun) ->
do_verify(Expected, catch Fun(), Error);
verify(Expected, Actual, Error) ->
@@ -909,7 +1006,7 @@ ensure_active(Cs, What) ->
W = {Tab, What},
ensure_non_empty(W),
Nodes = mnesia_lib:intersect(val({schema, disc_copies}),
- mnesia_lib:cs_to_nodes(Cs)),
+ mnesia_lib:cs_to_nodes(Cs)),
case Nodes -- val(W) of
[] ->
ok;
@@ -936,7 +1033,7 @@ ensure_non_empty({Tab, Vhat}) ->
ensure_not_active(Tab = schema, Node) ->
Active = val({Tab, active_replicas}),
- case lists:member(Node, Active) of
+ case lists:member(Node, Active) of
false when Active =/= [] ->
ok;
false ->
@@ -970,7 +1067,7 @@ create_table(TabDef) ->
do_multi_create_table(TabDef) ->
get_tid_ts_and_lock(schema, write),
ensure_writable(schema),
- Cs = list2cs(TabDef),
+ Cs = api_list2cs(TabDef),
case Cs#cstruct.frag_properties of
[] ->
do_create_table(Cs);
@@ -999,7 +1096,7 @@ unsafe_make_create_table(Cs) ->
{_Mod, Tid, Ts} = get_tid_ts_and_lock(schema, none),
verify_cstruct(Cs),
Tab = Cs#cstruct.name,
-
+
%% Check that we have all disc replica nodes running
DiscNodes = Cs#cstruct.disc_copies ++ Cs#cstruct.disc_only_copies,
RunningNodes = val({current, db_nodes}),
@@ -1012,12 +1109,12 @@ unsafe_make_create_table(Cs) ->
Nodes = mnesia_lib:intersect(mnesia_lib:cs_to_nodes(Cs), RunningNodes),
Store = Ts#tidstore.store,
mnesia_locker:wlock_no_exist(Tid, Store, Tab, Nodes),
- [{op, create_table, cs2list(Cs)}].
+ [{op, create_table, vsn_cs2list(Cs)}].
check_if_exists(Tab) ->
TidTs = get_tid_ts_and_lock(schema, write),
{_, _, Ts} = TidTs,
- Store = Ts#tidstore.store,
+ Store = Ts#tidstore.store,
ets:foldl(
fun({op, create_table, [{name, T}|_]}, _Acc) when T==Tab ->
true;
@@ -1054,7 +1151,7 @@ make_delete_table(Tab, Mode) ->
%% nodes etc.
TidTs = get_tid_ts_and_lock(schema, write),
{_, _, Ts} = TidTs,
- Store = Ts#tidstore.store,
+ Store = Ts#tidstore.store,
Deleted = ets:select_delete(
Store, [{{op,'$1',[{name,Tab}|'_']},
[{'or',
@@ -1077,9 +1174,9 @@ make_delete_table(Tab, Mode) ->
[] ->
[make_delete_table2(Tab)];
_Props ->
- %% Check if it is a base table
- mnesia_frag:lookup_frag_hash(Tab),
-
+ %% Check if it is a base table
+ mnesia_frag:lookup_frag_hash(Tab),
+
%% Check for foreigners
F = mnesia_frag:lookup_foreigners(Tab),
verify([], F, {combine_error,
@@ -1097,11 +1194,11 @@ make_delete_table2(Tab) ->
Cs = val({Tab, cstruct}),
ensure_active(Cs),
ensure_writable(Tab),
- {op, delete_table, cs2list(Cs)}.
+ {op, delete_table, vsn_cs2list(Cs)}.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%% Change fragmentation of a table
-
+
change_table_frag(Tab, Change) ->
schema_transaction(fun() -> do_change_table_frag(Tab, Change) end).
@@ -1112,14 +1209,10 @@ do_change_table_frag(Tab, Change) when is_atom(Tab), Tab /= schema ->
ok;
do_change_table_frag(Tab, _Change) ->
mnesia:abort({bad_type, Tab}).
-
+
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%% Clear a table
-%% No need for a schema transaction
-clear_table(Tab) ->
- schema_transaction(fun() -> do_clear_table(Tab) end).
-
do_clear_table(schema) ->
mnesia:abort({bad_type, schema});
do_clear_table(Tab) ->
@@ -1130,7 +1223,7 @@ do_clear_table(Tab) ->
make_clear_table(Tab) ->
Cs = val({Tab, cstruct}),
ensure_writable(Tab),
- [{op, clear_table, cs2list(Cs)}].
+ [{op, clear_table, vsn_cs2list(Cs)}].
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
@@ -1150,7 +1243,7 @@ make_add_table_copy(Tab, Node, Storage) ->
verify(false, lists:member(Node, Ns), {already_exists, Tab, Node}),
Cs2 = new_cs(Cs, Node, Storage, add),
verify_cstruct(Cs2),
-
+
%% Check storage and if node is running
IsRunning = lists:member(Node, val({current, db_nodes})),
if
@@ -1170,28 +1263,28 @@ make_add_table_copy(Tab, Node, Storage) ->
IsRunning == false ->
mnesia:abort({not_active, schema, Node})
end,
- [{op, add_table_copy, Storage, Node, cs2list(Cs2)}].
+ [{op, add_table_copy, Storage, Node, vsn_cs2list(Cs2)}].
del_table_copy(Tab, Node) ->
schema_transaction(fun() -> do_del_table_copy(Tab, Node) end).
do_del_table_copy(Tab, Node) when is_atom(Node) ->
TidTs = get_tid_ts_and_lock(schema, write),
-%% get_tid_ts_and_lock(Tab, write),
+%% get_tid_ts_and_lock(Tab, write),
insert_schema_ops(TidTs, make_del_table_copy(Tab, Node));
do_del_table_copy(Tab, Node) ->
mnesia:abort({badarg, Tab, Node}).
-
+
make_del_table_copy(Tab, Node) ->
ensure_writable(schema),
Cs = incr_version(val({Tab, cstruct})),
Storage = mnesia_lib:schema_cs_to_storage_type(Node, Cs),
- Cs2 = new_cs(Cs, Node, Storage, del),
+ Cs2 = new_cs(Cs, Node, Storage, del),
case mnesia_lib:cs_to_nodes(Cs2) of
[] when Tab == schema ->
mnesia:abort({combine_error, Tab, "Last replica"});
[] ->
- ensure_active(Cs),
+ ensure_active(Cs),
dbg_out("Last replica deleted in table ~p~n", [Tab]),
make_delete_table(Tab, whole_table);
_ when Tab == schema ->
@@ -1199,25 +1292,25 @@ make_del_table_copy(Tab, Node) ->
ensure_not_active(Tab, Node),
verify_cstruct(Cs2),
Ops = remove_node_from_tabs(val({schema, tables}), Node),
- [{op, del_table_copy, ram_copies, Node, cs2list(Cs2)} | Ops];
+ [{op, del_table_copy, ram_copies, Node, vsn_cs2list(Cs2)} | Ops];
_ ->
ensure_active(Cs),
verify_cstruct(Cs2),
- [{op, del_table_copy, Storage, Node, cs2list(Cs2)}]
+ [{op, del_table_copy, Storage, Node, vsn_cs2list(Cs2)}]
end.
remove_node_from_tabs([], _Node) ->
[];
remove_node_from_tabs([schema|Rest], Node) ->
remove_node_from_tabs(Rest, Node);
-remove_node_from_tabs([Tab|Rest], Node) ->
- {Cs, IsFragModified} =
+remove_node_from_tabs([Tab|Rest], Node) ->
+ {Cs, IsFragModified} =
mnesia_frag:remove_node(Node, incr_version(val({Tab, cstruct}))),
case mnesia_lib:schema_cs_to_storage_type(Node, Cs) of
unknown ->
case IsFragModified of
true ->
- [{op, change_table_frag, {del_node, Node}, cs2list(Cs)} |
+ [{op, change_table_frag, {del_node, Node}, vsn_cs2list(Cs)} |
remove_node_from_tabs(Rest, Node)];
false ->
remove_node_from_tabs(Rest, Node)
@@ -1226,11 +1319,11 @@ remove_node_from_tabs([Tab|Rest], Node) ->
Cs2 = new_cs(Cs, Node, Storage, del),
case mnesia_lib:cs_to_nodes(Cs2) of
[] ->
- [{op, delete_table, cs2list(Cs)} |
+ [{op, delete_table, vsn_cs2list(Cs)} |
remove_node_from_tabs(Rest, Node)];
_Ns ->
verify_cstruct(Cs2),
- [{op, del_table_copy, ram_copies, Node, cs2list(Cs2)}|
+ [{op, del_table_copy, ram_copies, Node, vsn_cs2list(Cs2)}|
remove_node_from_tabs(Rest, Node)]
end
end.
@@ -1246,7 +1339,7 @@ new_cs(Cs, Node, ram_copies, del) ->
new_cs(Cs, Node, disc_copies, del) ->
Cs#cstruct{disc_copies = lists:delete(Node , Cs#cstruct.disc_copies)};
new_cs(Cs, Node, disc_only_copies, del) ->
- Cs#cstruct{disc_only_copies =
+ Cs#cstruct{disc_only_copies =
lists:delete(Node , Cs#cstruct.disc_only_copies)};
new_cs(Cs, _Node, Storage, _Op) ->
mnesia:abort({badarg, Cs#cstruct.name, Storage}).
@@ -1278,13 +1371,13 @@ make_move_table(Tab, FromNode, ToNode) ->
Running = val({current, db_nodes}),
Storage = mnesia_lib:schema_cs_to_storage_type(FromNode, Cs),
verify(true, lists:member(ToNode, Running), {not_active, schema, ToNode}),
-
+
Cs2 = new_cs(Cs, ToNode, Storage, add),
Cs3 = new_cs(Cs2, FromNode, Storage, del),
verify_cstruct(Cs3),
- [{op, add_table_copy, Storage, ToNode, cs2list(Cs2)},
+ [{op, add_table_copy, Storage, ToNode, vsn_cs2list(Cs2)},
{op, sync_trans},
- {op, del_table_copy, Storage, FromNode, cs2list(Cs3)}].
+ {op, del_table_copy, Storage, FromNode, vsn_cs2list(Cs3)}].
%% end of functions to add and delete nodes to tables
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
@@ -1306,7 +1399,7 @@ make_change_table_copy_type(Tab, Node, unknown) ->
make_change_table_copy_type(Tab, Node, ToS) ->
ensure_writable(schema),
Cs = incr_version(val({Tab, cstruct})),
- FromS = mnesia_lib:storage_type_at_node(Node, Tab),
+ FromS = mnesia_lib:storage_type_at_node(Node, Tab),
case compare_storage_type(false, FromS, ToS) of
{same, _} ->
@@ -1320,12 +1413,12 @@ make_change_table_copy_type(Tab, Node, ToS) ->
Cs2 = new_cs(Cs, Node, FromS, del),
Cs3 = new_cs(Cs2, Node, ToS, add),
verify_cstruct(Cs3),
-
- [{op, change_table_copy_type, Node, FromS, ToS, cs2list(Cs3)}].
+
+ [{op, change_table_copy_type, Node, FromS, ToS, vsn_cs2list(Cs3)}].
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%% change index functions ....
-%% Pos is allready added by 1 in both of these functions
+%% Pos is allready added by 1 in both of these functions
add_table_index(Tab, Pos) ->
schema_transaction(fun() -> do_add_table_index(Tab, Pos) end).
@@ -1347,7 +1440,7 @@ make_add_table_index(Tab, Pos) ->
Ix2 = lists:sort([Pos | Ix]),
Cs2 = Cs#cstruct{index = Ix2},
verify_cstruct(Cs2),
- [{op, add_index, Pos, cs2list(Cs2)}].
+ [{op, add_index, Pos, vsn_cs2list(Cs2)}].
del_table_index(Tab, Pos) ->
schema_transaction(fun() -> do_del_table_index(Tab, Pos) end).
@@ -1368,7 +1461,7 @@ make_del_table_index(Tab, Pos) ->
verify(true, lists:member(Pos, Ix), {no_exists, Tab, Pos}),
Cs2 = Cs#cstruct{index = lists:delete(Pos, Ix)},
verify_cstruct(Cs2),
- [{op, del_index, Pos, cs2list(Cs2)}].
+ [{op, del_index, Pos, vsn_cs2list(Cs2)}].
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
@@ -1391,7 +1484,7 @@ make_add_snmp(Tab, Ustruct) ->
verify(true, mnesia_snmp_hook:check_ustruct(Ustruct), Error),
Cs2 = Cs#cstruct{snmp = Ustruct},
verify_cstruct(Cs2),
- [{op, add_snmp, Ustruct, cs2list(Cs2)}].
+ [{op, add_snmp, Ustruct, vsn_cs2list(Cs2)}].
del_snmp(Tab) ->
schema_transaction(fun() -> do_del_snmp(Tab) end).
@@ -1409,17 +1502,17 @@ make_del_snmp(Tab) ->
ensure_active(Cs),
Cs2 = Cs#cstruct{snmp = []},
verify_cstruct(Cs2),
- [{op, del_snmp, cs2list(Cs2)}].
+ [{op, del_snmp, vsn_cs2list(Cs2)}].
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-%%
+%%
-transform_table(Tab, Fun, NewAttrs, NewRecName)
- when is_function(Fun), is_list(NewAttrs), is_atom(NewRecName) ->
+transform_table(Tab, Fun, NewAttrs, NewRecName)
+ when is_function(Fun), is_list(NewAttrs), is_atom(NewRecName) ->
schema_transaction(fun() -> do_transform_table(Tab, Fun, NewAttrs, NewRecName) end);
-transform_table(Tab, ignore, NewAttrs, NewRecName)
- when is_list(NewAttrs), is_atom(NewRecName) ->
+transform_table(Tab, ignore, NewAttrs, NewRecName)
+ when is_list(NewAttrs), is_atom(NewRecName) ->
schema_transaction(fun() -> do_transform_table(Tab, ignore, NewAttrs, NewRecName) end);
transform_table(Tab, Fun, NewAttrs, NewRecName) ->
@@ -1438,33 +1531,33 @@ make_transform(Tab, Fun, NewAttrs, NewRecName) ->
ensure_active(Cs),
ensure_writable(Tab),
case mnesia_lib:val({Tab, index}) of
- [] ->
+ [] ->
Cs2 = Cs#cstruct{attributes = NewAttrs, record_name = NewRecName},
verify_cstruct(Cs2),
- [{op, transform, Fun, cs2list(Cs2)}];
+ [{op, transform, Fun, vsn_cs2list(Cs2)}];
PosList ->
DelIdx = fun(Pos, Ncs) ->
Ix = Ncs#cstruct.index,
Ncs1 = Ncs#cstruct{index = lists:delete(Pos, Ix)},
- Op = {op, del_index, Pos, cs2list(Ncs1)},
+ Op = {op, del_index, Pos, vsn_cs2list(Ncs1)},
{Op, Ncs1}
end,
AddIdx = fun(Pos, Ncs) ->
Ix = Ncs#cstruct.index,
Ix2 = lists:sort([Pos | Ix]),
Ncs1 = Ncs#cstruct{index = Ix2},
- Op = {op, add_index, Pos, cs2list(Ncs1)},
+ Op = {op, add_index, Pos, vsn_cs2list(Ncs1)},
{Op, Ncs1}
end,
{DelOps, Cs1} = lists:mapfoldl(DelIdx, Cs, PosList),
Cs2 = Cs1#cstruct{attributes = NewAttrs, record_name = NewRecName},
{AddOps, Cs3} = lists:mapfoldl(AddIdx, Cs2, PosList),
verify_cstruct(Cs3),
- lists:flatten([DelOps, {op, transform, Fun, cs2list(Cs2)}, AddOps])
+ lists:flatten([DelOps, {op, transform, Fun, vsn_cs2list(Cs2)}, AddOps])
end.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-%%
+%%
change_table_access_mode(Tab, Mode) ->
schema_transaction(fun() -> do_change_table_access_mode(Tab, Mode) end).
@@ -1484,7 +1577,7 @@ make_change_table_access_mode(Tab, Mode) ->
verify(false, OldMode == Mode, {already_exists, Tab, Mode}),
Cs2 = Cs#cstruct{access_mode = Mode},
verify_cstruct(Cs2),
- [{op, change_table_access_mode, cs2list(Cs2), OldMode, Mode}].
+ [{op, change_table_access_mode, vsn_cs2list(Cs2), OldMode, Mode}].
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
@@ -1505,7 +1598,7 @@ make_change_table_load_order(Tab, LoadOrder) ->
OldLoadOrder = Cs#cstruct.load_order,
Cs2 = Cs#cstruct{load_order = LoadOrder},
verify_cstruct(Cs2),
- [{op, change_table_load_order, cs2list(Cs2), OldLoadOrder, LoadOrder}].
+ [{op, change_table_load_order, vsn_cs2list(Cs2), OldLoadOrder, LoadOrder}].
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
@@ -1535,14 +1628,14 @@ make_change_table_majority(Tab, Majority) ->
ensure_active(CsT),
CsT2 = CsT#cstruct{majority = Majority},
verify_cstruct(CsT2),
- {op, change_table_majority, cs2list(CsT2),
+ {op, change_table_majority, vsn_cs2list(CsT2),
OldMajority, Majority}
end, FragNames);
false -> [];
{_, _} -> mnesia:abort({bad_type, Tab})
end,
verify_cstruct(Cs2),
- [{op, change_table_majority, cs2list(Cs2), OldMajority, Majority} | FragOps].
+ [{op, change_table_majority, vsn_cs2list(Cs2), OldMajority, Majority} | FragOps].
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
@@ -1583,7 +1676,7 @@ make_write_table_properties(Tab, [Prop | Props], Cs) ->
MergedProps = lists:merge(DelProps, [Prop]),
Cs2 = Cs#cstruct{user_properties = MergedProps},
verify_cstruct(Cs2),
- [{op, write_property, cs2list(Cs2), Prop} |
+ [{op, write_property, vsn_cs2list(Cs2), Prop} |
make_write_table_properties(Tab, Props, Cs2)];
make_write_table_properties(_Tab, [], _Cs) ->
[].
@@ -1598,9 +1691,9 @@ change_prop_in_existing_op(Tab, Prop, How, Store) ->
false ->
false
end.
-
-update_existing_op([{op, Op, L = [{name,Tab}|_], _OldProp}|Ops],
- Tab, Prop, How, Acc) when Op == write_property;
+
+update_existing_op([{op, Op, L = [{name,Tab}|_], _OldProp}|Ops],
+ Tab, Prop, How, Acc) when Op == write_property;
Op == delete_property ->
%% Apparently, mnesia_dumper doesn't care about OldProp here -- just L,
%% so we will throw away OldProp (not that it matters...) and insert Prop.
@@ -1625,7 +1718,7 @@ update_existing_op([], _, _, _, _) ->
do_read_table_property(Tab, Key) ->
TidTs = get_tid_ts_and_lock(schema, read),
{_, _, Ts} = TidTs,
- Store = Ts#tidstore.store,
+ Store = Ts#tidstore.store,
Props = ets:foldl(
fun({op, create_table, [{name, T}|Opts]}, _Acc)
when T==Tab ->
@@ -1689,7 +1782,7 @@ do_delete_table_property(Tab, PropKey) ->
[Tab,PropKey]),
%% this must be an existing table
get_tid_ts_and_lock(Tab, none),
- insert_schema_ops(TidTs,
+ insert_schema_ops(TidTs,
make_delete_table_properties(Tab, [PropKey]))
end.
@@ -1704,24 +1797,24 @@ make_delete_table_properties(Tab, [PropKey | PropKeys], Cs) ->
Props = lists:keydelete(PropKey, 1, OldProps),
Cs2 = Cs#cstruct{user_properties = Props},
verify_cstruct(Cs2),
- [{op, delete_property, cs2list(Cs2), PropKey} |
+ [{op, delete_property, vsn_cs2list(Cs2), PropKey} |
make_delete_table_properties(Tab, PropKeys, Cs2)];
make_delete_table_properties(_Tab, [], _Cs) ->
[].
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-%% Ensure that the transaction can be committed even
+%% Ensure that the transaction can be committed even
%% if the node crashes and Mnesia is restarted
prepare_commit(Tid, Commit, WaitFor) ->
case Commit#commit.schema_ops of
[] ->
{false, Commit, optional};
OrigOps ->
- {Modified, Ops, DumperMode} =
+ {Modified, Ops, DumperMode} =
prepare_ops(Tid, OrigOps, WaitFor, false, [], optional),
InitBy = schema_prepare,
- GoodRes = {Modified,
+ GoodRes = {Modified,
Commit#commit{schema_ops = lists:reverse(Ops)},
DumperMode},
case DumperMode of
@@ -1737,7 +1830,7 @@ prepare_commit(Tid, Commit, WaitFor) ->
end
end,
case Ops of
- [] ->
+ [] ->
ignore;
_ ->
%% We need to grab a dumper lock here, the log may not
@@ -1749,20 +1842,20 @@ prepare_commit(Tid, Commit, WaitFor) ->
prepare_ops(Tid, [Op | Ops], WaitFor, Changed, Acc, DumperMode) ->
case prepare_op(Tid, Op, WaitFor) of
- {true, mandatory} ->
+ {true, mandatory} ->
prepare_ops(Tid, Ops, WaitFor, Changed, [Op | Acc], mandatory);
- {true, optional} ->
+ {true, optional} ->
prepare_ops(Tid, Ops, WaitFor, Changed, [Op | Acc], DumperMode);
- {true, Ops2, mandatory} ->
+ {true, Ops2, mandatory} ->
prepare_ops(Tid, Ops, WaitFor, true, Ops2 ++ Acc, mandatory);
- {true, Ops2, optional} ->
+ {true, Ops2, optional} ->
prepare_ops(Tid, Ops, WaitFor, true, Ops2 ++ Acc, DumperMode);
- {false, optional} ->
+ {false, optional} ->
prepare_ops(Tid, Ops, WaitFor, true, Acc, DumperMode)
end;
prepare_ops(_Tid, [], _WaitFor, Changed, Acc, DumperMode) ->
{Changed, Acc, DumperMode}.
-
+
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%% Prepare for commit
%% returns true if Op should be included, i.e. unmodified
@@ -1781,8 +1874,8 @@ prepare_op(_Tid, {op, rec, unknown, Rec}, _WaitFor) ->
prepare_op(_Tid, {op, announce_im_running, Node, SchemaDef, Running, RemoteRunning}, _WaitFor) ->
SchemaCs = list2cs(SchemaDef),
- if
- Node == node() -> %% Announce has already run on local node
+ if
+ Node == node() -> %% Announce has already run on local node
ignore; %% from do_merge_schema
true ->
%% If a node has restarted it may still linger in db_nodes,
@@ -1794,9 +1887,9 @@ prepare_op(_Tid, {op, announce_im_running, Node, SchemaDef, Running, RemoteRunni
end,
{false, optional};
-prepare_op(_Tid, {op, sync_trans}, {part, CoordPid}) ->
+prepare_op(_Tid, {op, sync_trans}, {part, CoordPid}) ->
CoordPid ! {sync_trans, self()},
- receive
+ receive
{sync_trans, CoordPid} ->
{false, optional};
{mnesia_down, _Node} = Else ->
@@ -1807,7 +1900,7 @@ prepare_op(_Tid, {op, sync_trans}, {part, CoordPid}) ->
mnesia:abort(Else)
end;
-prepare_op(_Tid, {op, sync_trans}, {coord, Nodes}) ->
+prepare_op(_Tid, {op, sync_trans}, {coord, Nodes}) ->
case receive_sync(Nodes, []) of
{abort, Reason} ->
mnesia_lib:verbose("sync_op terminated due to ~p~n", [Reason]),
@@ -1830,18 +1923,18 @@ prepare_op(Tid, {op, create_table, TabDef}, _WaitFor) ->
mnesia:abort(UseDirReason);
ram_copies ->
mnesia_lib:set({Tab, create_table},true),
- create_ram_table(Tab, Cs#cstruct.type),
+ create_ram_table(Tab, Cs),
insert_cstruct(Tid, Cs, false),
{true, optional};
disc_copies ->
mnesia_lib:set({Tab, create_table},true),
- create_ram_table(Tab, Cs#cstruct.type),
+ create_ram_table(Tab, Cs),
create_disc_table(Tab),
insert_cstruct(Tid, Cs, false),
- {true, optional};
+ {true, optional};
disc_only_copies ->
mnesia_lib:set({Tab, create_table},true),
- create_disc_only_table(Tab,Cs#cstruct.type),
+ create_disc_only_table(Tab,Cs),
insert_cstruct(Tid, Cs, false),
{true, optional};
unknown -> %% No replica on this node
@@ -1857,15 +1950,15 @@ prepare_op(Tid, {op, add_table_copy, Storage, Node, TabDef}, _WaitFor) ->
if
Tab == schema ->
{true, optional};
-
+
Node == node() ->
- case mnesia_lib:val({schema, storage_type}) of
- ram_copies when Storage /= ram_copies ->
+ case mnesia_lib:val({schema, storage_type}) of
+ ram_copies when Storage /= ram_copies ->
Error = {combine_error, Tab, "has no disc", Node},
mnesia:abort(Error);
_ ->
ok
- end,
+ end,
%% Tables are created by mnesia_loader get_network code
insert_cstruct(Tid, Cs, true),
case mnesia_controller:get_network_copy(Tab, Cs) of
@@ -1902,22 +1995,22 @@ prepare_op(Tid, {op, add_table_copy, Storage, Node, TabDef}, _WaitFor) ->
prepare_op(Tid, {op, del_table_copy, _Storage, Node, TabDef}, _WaitFor) ->
Cs = list2cs(TabDef),
Tab = Cs#cstruct.name,
-
+
if
%% Schema table lock is always required to run a schema op.
%% No need to look it.
- node(Tid#tid.pid) == node(), Tab /= schema ->
+ node(Tid#tid.pid) == node(), Tab /= schema ->
Self = self(),
Pid = spawn_link(fun() -> lock_del_table(Tab, Node, Cs, Self) end),
put(mnesia_lock, Pid),
- receive
- {Pid, updated} ->
+ receive
+ {Pid, updated} ->
{true, optional};
{Pid, FailReason} ->
mnesia:abort(FailReason);
{'EXIT', Pid, Reason} ->
mnesia:abort(Reason)
- end;
+ end;
true ->
{true, optional}
end;
@@ -1928,12 +2021,12 @@ prepare_op(_Tid, {op, change_table_copy_type, N, FromS, ToS, TabDef}, _WaitFor)
Tab = Cs#cstruct.name,
NotActive = mnesia_lib:not_active_here(Tab),
-
- if
+
+ if
NotActive == true ->
mnesia:abort({not_active, Tab, node()});
-
- Tab == schema ->
+
+ Tab == schema ->
case {FromS, ToS} of
{ram_copies, disc_copies} ->
case mnesia:system_info(schema_location) of
@@ -1943,7 +2036,7 @@ prepare_op(_Tid, {op, change_table_copy_type, N, FromS, ToS, TabDef}, _WaitFor)
mnesia:abort({combine_error, Tab, node(),
"schema_location must be opt_disc"})
end,
- Dir = mnesia_lib:dir(),
+ Dir = mnesia_lib:dir(),
case opt_create_dir(true, Dir) of
ok ->
purge_dir(Dir, []),
@@ -1967,18 +2060,18 @@ prepare_op(_Tid, {op, change_table_copy_type, N, FromS, ToS, TabDef}, _WaitFor)
_ ->
mnesia:abort({combine_error, Tab, ToS})
end;
-
- FromS == ram_copies ->
+
+ FromS == ram_copies ->
case mnesia_monitor:use_dir() of
- true ->
+ true ->
Dat = mnesia_lib:tab2dcd(Tab),
case mnesia_lib:exists(Dat) of
true ->
mnesia:abort({combine_error, Tab, node(),
"Table dump exists"});
false ->
- case ToS of
- disc_copies ->
+ case ToS of
+ disc_copies ->
mnesia_log:ets2dcd(Tab, dmp);
disc_only_copies ->
mnesia_dumper:raw_named_dump_table(Tab, dmp)
@@ -1988,12 +2081,12 @@ prepare_op(_Tid, {op, change_table_copy_type, N, FromS, ToS, TabDef}, _WaitFor)
false ->
mnesia:abort({has_no_disc, node()})
end;
-
+
FromS == disc_copies, ToS == disc_only_copies ->
mnesia_dumper:raw_named_dump_table(Tab, dmp);
FromS == disc_only_copies ->
Type = Cs#cstruct.type,
- create_ram_table(Tab, Type),
+ create_ram_table(Tab, Cs),
Datname = mnesia_lib:tab2dat(Tab),
Repair = mnesia_monitor:get_env(auto_repair),
case mnesia_lib:dets_to_ets(Tab, Tab, Datname, Type, Repair, no) of
@@ -2020,7 +2113,7 @@ prepare_op(_Tid, {op, dump_table, unknown, TabDef}, _WaitFor) ->
case lists:member(node(), Cs#cstruct.ram_copies) of
true ->
case mnesia_monitor:use_dir() of
- true ->
+ true ->
mnesia_log:ets2dcd(Tab, dmp),
Size = mnesia:table_info(Tab, size),
{true, [{op, dump_table, Size, TabDef}], optional};
@@ -2058,7 +2151,7 @@ prepare_op(_Tid, {op, transform, Fun, TabDef}, _WaitFor) ->
mnesia_lib:db_fixtable(Storage, Tab, true),
Key = mnesia_lib:db_first(Tab),
Op = {op, transform, Fun, TabDef},
- case catch transform_objs(Fun, Tab, RecName,
+ case catch transform_objs(Fun, Tab, RecName,
Key, NewArity, Storage, Type, [Op]) of
{'EXIT', Reason} ->
mnesia_lib:db_fixtable(Storage, Tab, false),
@@ -2072,7 +2165,7 @@ prepare_op(_Tid, {op, transform, Fun, TabDef}, _WaitFor) ->
prepare_op(_Tid, {op, merge_schema, TabDef}, _WaitFor) ->
Cs = list2cs(TabDef),
case verify_merge(Cs) of
- ok ->
+ ok ->
{true, optional};
Error ->
verbose("Merge_Schema ~p failed on ~p: ~p~n", [_Tid,node(),Error]),
@@ -2081,8 +2174,9 @@ prepare_op(_Tid, {op, merge_schema, TabDef}, _WaitFor) ->
prepare_op(_Tid, _Op, _WaitFor) ->
{true, optional}.
-create_ram_table(Tab, Type) ->
- Args = [{keypos, 2}, public, named_table, Type],
+create_ram_table(Tab, #cstruct{type=Type, storage_properties=Props}) ->
+ EtsOpts = proplists:get_value(ets, Props, []),
+ Args = [{keypos, 2}, public, named_table, Type | EtsOpts],
case mnesia_monitor:unsafe_mktab(Tab, Args) of
Tab ->
ok;
@@ -2090,10 +2184,11 @@ create_ram_table(Tab, Type) ->
Err = "Failed to create ets table",
mnesia:abort({system_limit, Tab, {Err,Reason}})
end.
+
create_disc_table(Tab) ->
File = mnesia_lib:tab2dcd(Tab),
file:delete(File),
- FArg = [{file, File}, {name, {mnesia,create}},
+ FArg = [{file, File}, {name, {mnesia,create}},
{repair, false}, {mode, read_write}],
case mnesia_monitor:open_log(FArg) of
{ok,Log} ->
@@ -2103,13 +2198,15 @@ create_disc_table(Tab) ->
Err = "Failed to create disc table",
mnesia:abort({system_limit, Tab, {Err,Reason}})
end.
-create_disc_only_table(Tab,Type) ->
+create_disc_only_table(Tab, #cstruct{type=Type, storage_properties=Props}) ->
File = mnesia_lib:tab2dat(Tab),
file:delete(File),
+ DetsOpts = proplists:get_value(dets, Props, []),
Args = [{file, mnesia_lib:tab2dat(Tab)},
{type, mnesia_lib:disk_type(Tab, Type)},
{keypos, 2},
- {repair, mnesia_monitor:get_env(auto_repair)}],
+ {repair, mnesia_monitor:get_env(auto_repair)}
+ | DetsOpts],
case mnesia_monitor:unsafe_open_dets(Tab, Args) of
{ok, _} ->
ok;
@@ -2124,32 +2221,37 @@ receive_sync([], Pids) ->
receive_sync(Nodes, Pids) ->
receive
{sync_trans, Pid} ->
- Node = node(Pid),
+ Node = node(Pid),
receive_sync(lists:delete(Node, Nodes), [Pid | Pids]);
Else ->
{abort, Else}
end.
-lock_del_table(Tab, Node, Cs, Father) ->
+lock_del_table(Tab, NewNode, Cs0, Father) ->
Ns = val({schema, active_replicas}),
process_flag(trap_exit,true),
Lock = fun() ->
mnesia:write_lock_table(Tab),
- {Res, []} = rpc:multicall(Ns, ?MODULE, set_where_to_read, [Tab, Node, Cs]),
+ %% Sigh using cs record
+ Set = fun(Node) ->
+ [Cs] = normalize_cs([Cs0], Node),
+ rpc:call(Node, ?MODULE, set_where_to_read, [Tab, NewNode, Cs])
+ end,
+ Res = [Set(Node) || Node <- Ns],
Filter = fun(ok) ->
false;
({badrpc, {'EXIT', {undef, _}}}) ->
%% This will be the case we talks with elder nodes
- %% than 3.8.2, they will set where_to_read without
- %% getting a lock.
+ %% than 3.8.2, they will set where_to_read without
+ %% getting a lock.
false;
(_) ->
true
end,
case lists:filter(Filter, Res) of
- [] ->
+ [] ->
Father ! {self(), updated},
- %% When transaction is commited the process dies
+ %% When transaction is commited the process dies
%% and the lock is released.
receive _ -> ok end;
Err ->
@@ -2166,7 +2268,7 @@ lock_del_table(Tab, Node, Cs, Father) ->
exit(normal).
set_where_to_read(Tab, Node, Cs) ->
- case mnesia_lib:val({Tab, where_to_read}) of
+ case mnesia_lib:val({Tab, where_to_read}) of
Node ->
case Cs#cstruct.local_content of
true ->
@@ -2185,16 +2287,16 @@ transform_objs(_Fun, _Tab, _RT, '$end_of_table', _NewArity, _Storage, _Type, Acc
transform_objs(Fun, Tab, RecName, Key, A, Storage, Type, Acc) ->
Objs = mnesia_lib:db_get(Tab, Key),
NextKey = mnesia_lib:db_next_key(Tab, Key),
- Oid = {Tab, Key},
+ Oid = {Tab, Key},
NewObjs = {Ws, Ds} = transform_obj(Tab, RecName, Key, Fun, Objs, A, Type, [], []),
- if
- NewObjs == {[], []} ->
+ if
+ NewObjs == {[], []} ->
transform_objs(Fun, Tab, RecName, NextKey, A, Storage, Type, Acc);
- Type == bag ->
+ Type == bag ->
transform_objs(Fun, Tab, RecName, NextKey, A, Storage, Type,
[{op, rec, Storage, {Oid, Ws, write}},
{op, rec, Storage, {Oid, [Oid], delete}} | Acc]);
- Ds == [] ->
+ Ds == [] ->
%% Type is set or ordered_set, no need to delete the record first
transform_objs(Fun, Tab, RecName, NextKey, A, Storage, Type,
[{op, rec, Storage, {Oid, Ws, write}} | Acc]);
@@ -2215,15 +2317,15 @@ transform_obj(Tab, RecName, Key, Fun, [Obj|Rest], NewArity, Type, Ws, Ds) ->
NewObj == Obj ->
transform_obj(Tab, RecName, Key, Fun, Rest, NewArity, Type, Ws, Ds);
RecName == element(1, NewObj), Key == element(2, NewObj) ->
- transform_obj(Tab, RecName, Key, Fun, Rest, NewArity,
+ transform_obj(Tab, RecName, Key, Fun, Rest, NewArity,
Type, [NewObj | Ws], Ds);
- NewObj == delete ->
- case Type of
+ NewObj == delete ->
+ case Type of
bag -> %% Just don't write that object
- transform_obj(Tab, RecName, Key, Fun, Rest,
- NewArity, Type, Ws, Ds);
+ transform_obj(Tab, RecName, Key, Fun, Rest,
+ NewArity, Type, Ws, Ds);
_ ->
- transform_obj(Tab, RecName, Key, Fun, Rest, NewArity,
+ transform_obj(Tab, RecName, Key, Fun, Rest, NewArity,
Type, Ws, [NewObj | Ds])
end;
true ->
@@ -2247,7 +2349,7 @@ undo_prepare_commit(Tid, Commit) ->
%% Undo in reverse order
undo_prepare_ops(Tid, [Op | Ops]) ->
- case element(1, Op) of
+ case element(1, Op) of
TheOp when TheOp /= op, TheOp /= restore_op ->
undo_prepare_ops(Tid, Ops);
_ ->
@@ -2274,7 +2376,7 @@ undo_prepare_op(Tid, {op, create_table, TabDef}) ->
mnesia_lib:unset({Tab, create_table}),
delete_cstruct(Tid, Cs),
case mnesia_lib:cs_to_storage_type(node(), Cs) of
- unknown ->
+ unknown ->
ok;
ram_copies ->
ram_delete_table(Tab, ram_copies);
@@ -2289,7 +2391,7 @@ undo_prepare_op(Tid, {op, create_table, TabDef}) ->
%% disc_delete_table(Tab, Storage),
file:delete(Dat)
end;
-
+
undo_prepare_op(Tid, {op, add_table_copy, Storage, Node, TabDef}) ->
Cs = list2cs(TabDef),
Tab = Cs#cstruct.name,
@@ -2314,21 +2416,22 @@ undo_prepare_op(Tid, {op, add_table_copy, Storage, Node, TabDef}) ->
Cs2 = new_cs(Cs, Node, Storage, del),
insert_cstruct(Tid, Cs2, true) % Don't care about the version
end;
-
-undo_prepare_op(_Tid, {op, del_table_copy, _, Node, TabDef})
+
+undo_prepare_op(_Tid, {op, del_table_copy, _, Node, TabDef})
when Node == node() ->
+ WriteLocker = get(mnesia_lock),
+ WriteLocker =/= undefined andalso (WriteLocker ! die),
Cs = list2cs(TabDef),
Tab = Cs#cstruct.name,
mnesia_lib:set({Tab, where_to_read}, Node);
-
-undo_prepare_op(_Tid, {op, change_table_copy_type, N, FromS, ToS, TabDef})
+undo_prepare_op(_Tid, {op, change_table_copy_type, N, FromS, ToS, TabDef})
when N == node() ->
Cs = list2cs(TabDef),
Tab = Cs#cstruct.name,
mnesia_checkpoint:tm_change_table_copy_type(Tab, ToS, FromS),
Dmp = mnesia_lib:tab2dmp(Tab),
-
+
case {FromS, ToS} of
{ram_copies, disc_copies} when Tab == schema ->
file:delete(Dmp),
@@ -2382,9 +2485,9 @@ ram_delete_table(Tab, Storage) ->
ignore;
disc_only_copies ->
ignore;
- _Else ->
+ _Else ->
%% delete possible index files and data .....
- %% Got to catch this since if no info has been set in the
+ %% Got to catch this since if no info has been set in the
%% mnesia_gvar it will crash
catch mnesia_index:del_transient(Tab, Storage),
case ?catch_val({Tab, {index, snmp}}) of
@@ -2454,7 +2557,7 @@ has_known_suffix(File, [Suffix | Tail], false) ->
has_known_suffix(File, Tail, lists:suffix(Suffix, File));
has_known_suffix(_File, [], Bool) ->
Bool.
-
+
known_suffixes() -> real_suffixes() ++ tmp_suffixes().
real_suffixes() -> [".DAT", ".LOG", ".BUP", ".DCL", ".DCD"].
@@ -2477,11 +2580,11 @@ info2(Tab, [{frag_hash, _V} | Tail]) -> % Ignore frag_hash
info2(Tab, [{P, V} | Tail]) ->
io:format("~-20w -> ~p~n",[P,V]),
info2(Tab, Tail);
-info2(_, []) ->
+info2(_, []) ->
io:format("~n", []).
get_table_properties(Tab) ->
- case catch mnesia_lib:db_match_object(ram_copies,
+ case catch mnesia_lib:db_match_object(ram_copies,
mnesia_gvar, {{Tab, '_'}, '_'}) of
{'EXIT', _} ->
mnesia:abort({no_exists, Tab, all});
@@ -2509,9 +2612,9 @@ get_table_properties(Tab) ->
recs = error_recs
}).
-restore(Opaque) ->
+restore(Opaque) ->
restore(Opaque, [], mnesia_monitor:get_env(backup_module)).
-restore(Opaque, Args) when is_list(Args) ->
+restore(Opaque, Args) when is_list(Args) ->
restore(Opaque, Args, mnesia_monitor:get_env(backup_module));
restore(_Opaque, BadArg) ->
{aborted, {badarg, BadArg}}.
@@ -2522,7 +2625,7 @@ restore(Opaque, Args, Module) when is_list(Args), is_atom(Module) ->
case mnesia_bup:read_schema(R#r.module, Opaque) of
{error, Reason} ->
{aborted, Reason};
- BupSchema ->
+ BupSchema ->
schema_transaction(fun() -> do_restore(R, BupSchema) end)
end;
{'EXIT', Reason} ->
@@ -2556,8 +2659,8 @@ check_restore_arg({keep_tables, List}, R) when is_list(List) ->
check_restore_arg({skip_tables, List}, R) when is_list(List) ->
TableList = [{Tab, skip_tables} || Tab <- List],
R#r{table_options = R#r.table_options ++ TableList};
-check_restore_arg({default_op, Op}, R) ->
- case Op of
+check_restore_arg({default_op, Op}, R) ->
+ case Op of
clear_tables -> ok;
recreate_tables -> ok;
keep_tables -> ok;
@@ -2588,12 +2691,12 @@ restore_items([Rec | Recs], Header, Schema, R) ->
case lists:keysearch(Tab, 1, R#r.tables) of
{value, {Tab, Where0, Snmp, RecName}} ->
Where = case Where0 of
- undefined ->
+ undefined ->
val({Tab, where_to_commit});
_ ->
Where0
end,
- {Rest, NRecs} = restore_tab_items([Rec | Recs], Tab,
+ {Rest, NRecs} = restore_tab_items([Rec | Recs], Tab,
RecName, Where, Snmp,
R#r.recs, R#r.insert_op),
restore_items(Rest, Header, Schema, R#r{recs = NRecs});
@@ -2601,12 +2704,12 @@ restore_items([Rec | Recs], Header, Schema, R) ->
Rest = skip_tab_items(Recs, Tab),
restore_items(Rest, Header, Schema, R)
end;
-
+
restore_items([], _Header, _Schema, R) ->
R.
restore_func(Tab, R) ->
- case lists:keysearch(Tab, 1, R#r.table_options) of
+ case lists:keysearch(Tab, 1, R#r.table_options) of
{value, {Tab, OP}} ->
OP;
false ->
@@ -2618,45 +2721,45 @@ where_to_commit(Tab, CsList) ->
Disc = [{N, disc_copies} || N <- pick(Tab, disc_copies, CsList, [])],
DiscO = [{N, disc_only_copies} || N <- pick(Tab, disc_only_copies, CsList, [])],
Ram ++ Disc ++ DiscO.
-
+
%% Changes of the Meta info of schema itself is not allowed
restore_schema([{schema, schema, _List} | Schema], R) ->
restore_schema(Schema, R);
restore_schema([{schema, Tab, List} | Schema], R) ->
case restore_func(Tab, R) of
- clear_tables ->
+ clear_tables ->
do_clear_table(Tab),
- Snmp = val({Tab, snmp}),
- RecName = val({Tab, record_name}),
+ Snmp = val({Tab, snmp}),
+ RecName = val({Tab, record_name}),
R2 = R#r{tables = [{Tab, undefined, Snmp, RecName} | R#r.tables]},
restore_schema(Schema, R2);
- recreate_tables ->
- case ?catch_val({Tab, cstruct}) of
- {'EXIT', _} ->
- TidTs = {_Mod, Tid, Ts} = get(mnesia_activity_state),
- RunningNodes = val({current, db_nodes}),
- Nodes = mnesia_lib:intersect(mnesia_lib:cs_to_nodes(list2cs(List)),
- RunningNodes),
- mnesia_locker:wlock_no_exist(Tid, Ts#tidstore.store, Tab, Nodes),
- TidTs;
- _ ->
- TidTs = get_tid_ts_and_lock(Tab, write)
- end,
+ recreate_tables ->
+ TidTs = case ?catch_val({Tab, cstruct}) of
+ {'EXIT', _} ->
+ TTs = {_Mod, Tid, Ts} = get(mnesia_activity_state),
+ RunningNodes = val({current, db_nodes}),
+ Nodes = mnesia_lib:intersect(mnesia_lib:cs_to_nodes(list2cs(List)),
+ RunningNodes),
+ mnesia_locker:wlock_no_exist(Tid, Ts#tidstore.store, Tab, Nodes),
+ TTs;
+ _ ->
+ get_tid_ts_and_lock(Tab, write)
+ end,
NC = {cookie, ?unique_cookie},
- List2 = lists:keyreplace(cookie, 1, List, NC),
+ List2 = lists:keyreplace(cookie, 1, List, NC),
Where = where_to_commit(Tab, List2),
Snmp = pick(Tab, snmp, List2, []),
RecName = pick(Tab, record_name, List2, Tab),
insert_schema_ops(TidTs, [{op, restore_recreate, List2}]),
R2 = R#r{tables = [{Tab, Where, Snmp, RecName} | R#r.tables]},
restore_schema(Schema, R2);
- keep_tables ->
+ keep_tables ->
get_tid_ts_and_lock(Tab, write),
Snmp = val({Tab, snmp}),
- RecName = val({Tab, record_name}),
+ RecName = val({Tab, record_name}),
R2 = R#r{tables = [{Tab, undefined, Snmp, RecName} | R#r.tables]},
restore_schema(Schema, R2);
- skip_tables ->
+ skip_tables ->
restore_schema(Schema, R)
end;
@@ -2667,7 +2770,7 @@ restore_schema([{schema, Tab} | Schema], R) ->
restore_schema([], R) ->
R.
-restore_tab_items([Rec | Rest], Tab, RecName, Where, Snmp, Recs, Op)
+restore_tab_items([Rec | Rest], Tab, RecName, Where, Snmp, Recs, Op)
when element(1, Rec) == Tab ->
NewRecs = Op(Rec, Recs, RecName, Where, Snmp),
restore_tab_items(Rest, Tab, RecName, Where, Snmp, NewRecs, Op);
@@ -2675,7 +2778,7 @@ restore_tab_items([Rec | Rest], Tab, RecName, Where, Snmp, Recs, Op)
restore_tab_items(Rest, _Tab, _RecName, _Where, _Snmp, Recs, _Op) ->
{Rest, Recs}.
-skip_tab_items([Rec| Rest], Tab)
+skip_tab_items([Rec| Rest], Tab)
when element(1, Rec) == Tab ->
skip_tab_items(Rest, Tab);
skip_tab_items(Recs, _) ->
@@ -2710,7 +2813,6 @@ merge_schema() ->
merge_schema(UserFun) ->
schema_transaction(fun() -> UserFun(fun(Arg) -> do_merge_schema(Arg) end) end).
-
do_merge_schema(LockTabs0) ->
{_Mod, Tid, Ts} = get_tid_ts_and_lock(schema, write),
LockTabs = [{T, tab_to_nodes(T)} || T <- LockTabs0],
@@ -2732,14 +2834,14 @@ do_merge_schema(LockTabs0) ->
[mnesia_locker:wlock_no_exist(
Tid, Store, T, mnesia_lib:intersect(Ns, OtherNodes))
|| {T,Ns} <- LockTabs],
- case rpc:call(Node, mnesia_controller, get_cstructs, []) of
+ case fetch_cstructs(Node) of
{cstructs, Cstructs, RemoteRunning1} ->
LockedAlready = Running ++ [Node],
{New, Old} = mnesia_recover:connect_nodes(RemoteRunning1),
RemoteRunning = mnesia_lib:intersect(New ++ Old, RemoteRunning1),
- if
+ if
RemoteRunning /= RemoteRunning1 ->
- mnesia_lib:error("Mnesia on ~p could not connect to node(s) ~p~n",
+ mnesia_lib:error("Mnesia on ~p could not connect to node(s) ~p~n",
[node(), RemoteRunning1 -- RemoteRunning]),
mnesia:abort({node_not_running, RemoteRunning1 -- RemoteRunning});
true -> ok
@@ -2749,24 +2851,24 @@ do_merge_schema(LockTabs0) ->
[mnesia_locker:wlock_no_exist(Tid, Store, T,
mnesia_lib:intersect(Ns,NeedsLock))
|| {T,Ns} <- LockTabs],
- {value, SchemaCs} =
- lists:keysearch(schema, #cstruct.name, Cstructs),
+ NeedsConversion = need_old_cstructs(NeedsLock ++ LockedAlready),
+ {value, SchemaCs} = lists:keysearch(schema, #cstruct.name, Cstructs),
+ SchemaDef = cs2list(NeedsConversion, SchemaCs),
%% Announce that Node is running
- A = [{op, announce_im_running, node(),
- cs2list(SchemaCs), Running, RemoteRunning}],
+ A = [{op, announce_im_running, node(), SchemaDef, Running, RemoteRunning}],
do_insert_schema_ops(Store, A),
-
+
%% Introduce remote tables to local node
- do_insert_schema_ops(Store, make_merge_schema(Node, Cstructs)),
-
+ do_insert_schema_ops(Store, make_merge_schema(Node, NeedsConversion, Cstructs)),
+
%% Introduce local tables to remote nodes
Tabs = val({schema, tables}),
Ops = [{op, merge_schema, get_create_list(T)}
|| T <- Tabs,
not lists:keymember(T, #cstruct.name, Cstructs)],
do_insert_schema_ops(Store, Ops),
-
+
%% Ensure that the txn will be committed on all nodes
NewNodes = RemoteRunning -- Running,
mnesia_lib:set(prepare_op, {announce_im_running,NewNodes}),
@@ -2782,19 +2884,54 @@ do_merge_schema(LockTabs0) ->
not_merged
end.
+fetch_cstructs(Node) ->
+ case need_old_cstructs([Node]) of
+ false ->
+ rpc:call(Node, mnesia_controller, get_remote_cstructs, []);
+ _Ver ->
+ case rpc:call(Node, mnesia_controller, get_cstructs, []) of
+ {cstructs, Cs0, RR} ->
+ {cstructs, [list2cs(cs2list(Cs)) || Cs <- Cs0], RR};
+ Err -> Err
+ end
+ end.
+
+need_old_cstructs() ->
+ need_old_cstructs(val({schema, where_to_write})).
+
+need_old_cstructs(Nodes) ->
+ Filter = fun(Node) -> not mnesia_monitor:needs_protocol_conversion(Node) end,
+ case lists:dropwhile(Filter, Nodes) of
+ [] -> false;
+ [Node|_] ->
+ case rpc:call(Node, mnesia_lib, val, [{schema,cstruct}]) of
+ #cstruct{} ->
+ %% mnesia_lib:warning("Mnesia on ~p do not need to convert cstruct (~p)~n",
+ %% [node(), Node]),
+ false;
+ {badrpc, _} ->
+ need_old_cstructs(lists:delete(Node,Nodes));
+ Cs when element(1, Cs) == cstruct, tuple_size(Cs) == 17 ->
+ ver4_4_18; % Without majority
+ Cs when element(1, Cs) == cstruct, tuple_size(Cs) == 18 ->
+ ver4_4_19; % With majority
+ Cs when element(1, Cs) == cstruct, tuple_size(Cs) == 19 ->
+ ver4_6 % With storage_properties
+ end
+ end.
+
tab_to_nodes(Tab) when is_atom(Tab) ->
Cs = val({Tab, cstruct}),
mnesia_lib:cs_to_nodes(Cs).
-make_merge_schema(Node, [Cs | Cstructs]) ->
- Ops = do_make_merge_schema(Node, Cs),
- Ops ++ make_merge_schema(Node, Cstructs);
-make_merge_schema(_Node, []) ->
+make_merge_schema(Node, NeedsConv, [Cs | Cstructs]) ->
+ Ops = do_make_merge_schema(Node, NeedsConv, Cs),
+ Ops ++ make_merge_schema(Node, NeedsConv, Cstructs);
+make_merge_schema(_Node, _, []) ->
[].
%% Merge definitions of schema table
-do_make_merge_schema(Node, RemoteCs)
- when RemoteCs#cstruct.name == schema ->
+do_make_merge_schema(Node, NeedsConv, RemoteCs = #cstruct{name = schema}) ->
Cs = val({schema, cstruct}),
Masters = mnesia_recover:get_master_nodes(schema),
HasRemoteMaster = lists:member(Node, Masters),
@@ -2804,15 +2941,15 @@ do_make_merge_schema(Node, RemoteCs)
StCsLocal = mnesia_lib:cs_to_storage_type(node(), Cs),
StRcsLocal = mnesia_lib:cs_to_storage_type(node(), RemoteCs),
StCsRemote = mnesia_lib:cs_to_storage_type(Node, Cs),
- StRcsRemote = mnesia_lib:cs_to_storage_type(Node, RemoteCs),
-
+ StRcsRemote = mnesia_lib:cs_to_storage_type(Node, RemoteCs),
+
if
Cs#cstruct.cookie == RemoteCs#cstruct.cookie,
Cs#cstruct.version == RemoteCs#cstruct.version ->
%% Great, we have the same cookie and version
%% and do not need to merge cstructs
[];
-
+
Cs#cstruct.cookie /= RemoteCs#cstruct.cookie,
Cs#cstruct.disc_copies /= [],
RemoteCs#cstruct.disc_copies /= [] ->
@@ -2823,14 +2960,14 @@ do_make_merge_schema(Node, RemoteCs)
HasRemoteMaster == false ->
%% Choose local cstruct,
%% since it's the master
- [{op, merge_schema, cs2list(Cs)}];
+ [{op, merge_schema, cs2list(NeedsConv, Cs)}];
HasRemoteMaster == true,
HasLocalMaster == false ->
%% Choose remote cstruct,
%% since it's the master
- [{op, merge_schema, cs2list(RemoteCs)}];
-
+ [{op, merge_schema, cs2list(NeedsConv, RemoteCs)}];
+
true ->
Str = io_lib:format("Incompatible schema cookies. "
"Please, restart from old backup."
@@ -2838,12 +2975,12 @@ do_make_merge_schema(Node, RemoteCs)
[Node, cs2list(RemoteCs), node(), cs2list(Cs)]),
throw(Str)
end;
-
+
StCsLocal /= StRcsLocal, StRcsLocal /= unknown, StCsLocal /= ram_copies ->
Str = io_lib:format("Incompatible schema storage types (local). "
"on ~w storage ~w, on ~w storage ~w~n",
[node(), StCsLocal, Node, StRcsLocal]),
- throw(Str);
+ throw(Str);
StCsRemote /= StRcsRemote, StCsRemote /= unknown, StRcsRemote /= ram_copies ->
Str = io_lib:format("Incompatible schema storage types (remote). "
"on ~w cs ~w, on ~w rcs ~w~n",
@@ -2854,27 +2991,27 @@ do_make_merge_schema(Node, RemoteCs)
%% Choose local cstruct,
%% since it involves disc nodes
MergedCs = merge_cstructs(Cs, RemoteCs, Force),
- [{op, merge_schema, cs2list(MergedCs)}];
-
+ [{op, merge_schema, cs2list(NeedsConv, MergedCs)}];
+
RemoteCs#cstruct.disc_copies /= [] ->
%% Choose remote cstruct,
%% since it involves disc nodes
MergedCs = merge_cstructs(RemoteCs, Cs, Force),
- [{op, merge_schema, cs2list(MergedCs)}];
+ [{op, merge_schema, cs2list(NeedsConv, MergedCs)}];
Cs > RemoteCs ->
%% Choose remote cstruct
MergedCs = merge_cstructs(RemoteCs, Cs, Force),
- [{op, merge_schema, cs2list(MergedCs)}];
-
+ [{op, merge_schema, cs2list(NeedsConv, MergedCs)}];
+
true ->
%% Choose local cstruct
MergedCs = merge_cstructs(Cs, RemoteCs, Force),
- [{op, merge_schema, cs2list(MergedCs)}]
+ [{op, merge_schema, cs2list(NeedsConv, MergedCs)}]
end;
%% Merge definitions of normal table
-do_make_merge_schema(Node, RemoteCs) ->
+do_make_merge_schema(Node, NeedsConv, RemoteCs = #cstruct{}) ->
Tab = RemoteCs#cstruct.name,
Masters = mnesia_recover:get_master_nodes(schema),
HasRemoteMaster = lists:member(Node, Masters),
@@ -2883,27 +3020,27 @@ do_make_merge_schema(Node, RemoteCs) ->
case ?catch_val({Tab, cstruct}) of
{'EXIT', _} ->
%% A completely new table, created while Node was down
- [{op, merge_schema, cs2list(RemoteCs)}];
+ [{op, merge_schema, cs2list(NeedsConv, RemoteCs)}];
Cs when Cs#cstruct.cookie == RemoteCs#cstruct.cookie ->
if
Cs#cstruct.version == RemoteCs#cstruct.version ->
%% We have exactly the same version of the
%% table def
[];
-
+
Cs#cstruct.version > RemoteCs#cstruct.version ->
%% Oops, we have different versions
%% of the table def, lets merge them.
%% The only changes that may have occurred
%% is that new replicas may have been added.
MergedCs = merge_cstructs(Cs, RemoteCs, Force),
- [{op, merge_schema, cs2list(MergedCs)}];
-
+ [{op, merge_schema, cs2list(NeedsConv, MergedCs)}];
+
Cs#cstruct.version < RemoteCs#cstruct.version ->
%% Oops, we have different versions
%% of the table def, lets merge them
MergedCs = merge_cstructs(RemoteCs, Cs, Force),
- [{op, merge_schema, cs2list(MergedCs)}]
+ [{op, merge_schema, cs2list(NeedsConv, MergedCs)}]
end;
Cs ->
%% Different cookies, not possible to merge
@@ -2912,14 +3049,14 @@ do_make_merge_schema(Node, RemoteCs) ->
HasRemoteMaster == false ->
%% Choose local cstruct,
%% since it's the master
- [{op, merge_schema, cs2list(Cs)}];
+ [{op, merge_schema, cs2list(NeedsConv, Cs)}];
HasRemoteMaster == true,
HasLocalMaster == false ->
%% Choose remote cstruct,
%% since it's the master
- [{op, merge_schema, cs2list(RemoteCs)}];
-
+ [{op, merge_schema, cs2list(NeedsConv, RemoteCs)}];
+
true ->
Str = io_lib:format("Bad cookie in table definition"
" ~w: ~w = ~w, ~w = ~w~n",
@@ -2989,7 +3126,7 @@ compare_storage_type(true, One, Another) ->
compare_storage_type(false, Another, One);
compare_storage_type(false, _One, _Another) ->
incompatible.
-
+
change_storage_type(N, ram_copies, Cs) ->
Nodes = [N | Cs#cstruct.ram_copies],
Cs#cstruct{ram_copies = mnesia_lib:uniq(Nodes)};
@@ -3071,14 +3208,14 @@ verify_merge(RemoteCs) ->
if
StCsLocal == StRcsLocal -> ok;
StCsLocal == unknown -> ok;
- (StRcsLocal == unknown), (HasRemoteMaster == false) ->
+ (StRcsLocal == unknown), (HasRemoteMaster == false) ->
{merge_error, Cs, RemoteCs};
%% Trust the merger
true -> ok
end
end.
-announce_im_running([N | Ns], SchemaCs) ->
+announce_im_running([N | Ns], SchemaCs) ->
{L1, L2} = mnesia_recover:connect_nodes([N]),
case lists:member(N, L1) or lists:member(N, L2) of
true ->
@@ -3095,7 +3232,7 @@ announce_im_running([], _) ->
unannounce_im_running([N | Ns]) ->
mnesia_lib:del({current, db_nodes}, N),
- mnesia_controller:del_active_replica(schema, N),
+ mnesia_controller:del_active_replica(schema, N),
unannounce_im_running(Ns);
unannounce_im_running([]) ->
ok.
diff --git a/lib/mnesia/src/mnesia_tm.erl b/lib/mnesia/src/mnesia_tm.erl
index f62f7cb7c8..0af7f55c06 100644
--- a/lib/mnesia/src/mnesia_tm.erl
+++ b/lib/mnesia/src/mnesia_tm.erl
@@ -36,7 +36,7 @@
prepare_checkpoint/2,
prepare_checkpoint/1, % Internal
prepare_snmp/3,
- do_snmp/2,
+ do_snmp/2,
put_activity_id/1,
put_activity_id/2,
block_tab/1,
@@ -68,7 +68,7 @@
majority = []
}).
--record(participant, {tid, pid, commit, disc_nodes = [],
+-record(participant, {tid, pid, commit, disc_nodes = [],
ram_nodes = [], protocol = sym_trans}).
start() ->
@@ -77,12 +77,12 @@ start() ->
init(Parent) ->
register(?MODULE, self()),
process_flag(trap_exit, true),
-
+
%% Initialize the schema
IgnoreFallback = mnesia_monitor:get_env(ignore_fallback_at_startup),
mnesia_bup:tm_fallback_start(IgnoreFallback),
mnesia_schema:init(IgnoreFallback),
-
+
%% Handshake and initialize transaction recovery
mnesia_recover:init(),
Early = mnesia_monitor:init(),
@@ -101,11 +101,11 @@ init(Parent) ->
false ->
ignore
end,
-
+
mnesia_schema:purge_tmp_files(),
mnesia_recover:start_garb(),
-
- ?eval_debug_fun({?MODULE, init}, [{nodes, AllOthers}]),
+
+ ?eval_debug_fun({?MODULE, init}, [{nodes, AllOthers}]),
case val(debug) of
Debug when Debug /= debug, Debug /= trace ->
@@ -118,8 +118,8 @@ init(Parent) ->
val(Var) ->
case ?catch_val(Var) of
- {'EXIT', _ReASoN_} -> mnesia_lib:other_val(Var, _ReASoN_);
- _VaLuE_ -> _VaLuE_
+ {'EXIT', _ReASoN_} -> mnesia_lib:other_val(Var, _ReASoN_);
+ _VaLuE_ -> _VaLuE_
end.
reply({From,Ref}, R) ->
@@ -136,7 +136,7 @@ req(R) ->
undefined ->
{error, {node_not_running, node()}};
Pid ->
- Ref = make_ref(),
+ Ref = make_ref(),
Pid ! {{self(), Ref}, R},
rec(Pid, Ref)
end.
@@ -161,7 +161,7 @@ rec(Pid, Ref) ->
Reply;
{'EXIT', Pid, _} ->
{error, {node_not_running, node()}}
- end.
+ end.
tmlink({From, Ref}) when is_reference(Ref) ->
link(From);
@@ -209,7 +209,7 @@ doit_loop(#state{coordinators=Coordinators,participants=Participants,supervisor=
State2 = State#state{dirty_queue = [Item | State#state.dirty_queue]},
doit_loop(State2)
end;
-
+
{From, {sync_dirty, Tid, Commit, Tab}} ->
case lists:member(Tab, State#state.blocked_tabs) of
false ->
@@ -220,7 +220,7 @@ doit_loop(#state{coordinators=Coordinators,participants=Participants,supervisor=
State2 = State#state{dirty_queue = [Item | State#state.dirty_queue]},
doit_loop(State2)
end;
-
+
{From, start_outer} -> %% Create and associate ets_tab with Tid
case catch ?ets_new_table(mnesia_trans_store, [bag, public]) of
{'EXIT', Reason} -> %% system limit
@@ -236,16 +236,16 @@ doit_loop(#state{coordinators=Coordinators,participants=Participants,supervisor=
S2 = State#state{coordinators = A2},
reply(From, {new_tid, Tid, Etab}, S2)
end;
-
+
{From, {ask_commit, Protocol, Tid, Commit, DiscNs, RamNs}} ->
- ?eval_debug_fun({?MODULE, doit_ask_commit},
+ ?eval_debug_fun({?MODULE, doit_ask_commit},
[{tid, Tid}, {prot, Protocol}]),
mnesia_checkpoint:tm_enter_pending(Tid, DiscNs, RamNs),
- Pid =
+ Pid =
case Protocol of
asym_trans when node(Tid#tid.pid) /= node() ->
Args = [tmpid(From), Tid, Commit, DiscNs, RamNs],
- spawn_link(?MODULE, commit_participant, Args);
+ spawn_link(?MODULE, commit_participant, Args);
_ when node(Tid#tid.pid) /= node() -> %% *_sym_trans
reply(From, {vote_yes, Tid}),
nopid
@@ -258,7 +258,7 @@ doit_loop(#state{coordinators=Coordinators,participants=Participants,supervisor=
protocol = Protocol},
State2 = State#state{participants = gb_trees:insert(Tid,P,Participants)},
doit_loop(State2);
-
+
{Tid, do_commit} ->
case gb_trees:lookup(Tid, Participants) of
none ->
@@ -272,14 +272,14 @@ doit_loop(#state{coordinators=Coordinators,participants=Participants,supervisor=
Member = lists:member(node(), P#participant.disc_nodes),
if Member == false ->
ignore;
- P#participant.protocol == sym_trans ->
+ P#participant.protocol == sym_trans ->
mnesia_log:log(Commit);
- P#participant.protocol == sync_sym_trans ->
+ P#participant.protocol == sync_sym_trans ->
mnesia_log:slog(Commit)
end,
mnesia_recover:note_decision(Tid, committed),
do_commit(Tid, Commit),
- if
+ if
P#participant.protocol == sync_sym_trans ->
Tid#tid.pid ! {?MODULE, node(), {committed, Tid}};
true ->
@@ -296,13 +296,13 @@ doit_loop(#state{coordinators=Coordinators,participants=Participants,supervisor=
doit_loop(State)
end
end;
-
+
{Tid, simple_commit} ->
mnesia_recover:note_decision(Tid, committed),
mnesia_locker:release_tid(Tid),
transaction_terminated(Tid),
doit_loop(State);
-
+
{Tid, {do_abort, Reason}} ->
?eval_debug_fun({?MODULE, do_abort, pre}, [{tid, Tid}]),
case gb_trees:lookup(Tid, Participants) of
@@ -317,7 +317,7 @@ doit_loop(#state{coordinators=Coordinators,participants=Participants,supervisor=
Commit = P#participant.commit,
mnesia_recover:note_decision(Tid, aborted),
do_abort(Tid, Commit),
- if
+ if
P#participant.protocol == sync_sym_trans ->
Tid#tid.pid ! {?MODULE, node(), {aborted, Tid}};
true ->
@@ -335,7 +335,7 @@ doit_loop(#state{coordinators=Coordinators,participants=Participants,supervisor=
doit_loop(State)
end
end;
-
+
{From, {add_store, Tid}} -> %% new store for nested transaction
case catch ?ets_new_table(mnesia_trans_store, [bag, public]) of
{'EXIT', Reason} -> %% system limit
@@ -355,14 +355,14 @@ doit_loop(#state{coordinators=Coordinators,participants=Participants,supervisor=
{'EXIT', Pid, Reason} ->
handle_exit(Pid, Reason, State);
-
+
{From, {restart, Tid, Store}} ->
A2 = restore_stores(Coordinators, Tid, Store),
clear_fixtable([Store]),
?ets_match_delete(Store, '_'),
?ets_insert(Store, {nodes, node()}),
reply(From, {restarted, Tid}, State#state{coordinators = A2});
-
+
{delete_transaction, Tid} ->
%% used to clear transactions which are committed
%% in coordinator or participant processes
@@ -377,7 +377,7 @@ doit_loop(#state{coordinators=Coordinators,participants=Participants,supervisor=
clear_fixtable(Etabs),
erase_ets_tabs(Etabs),
transaction_terminated(Tid),
- doit_loop(State#state{coordinators =
+ doit_loop(State#state{coordinators =
gb_trees:delete(Tid,Coordinators)})
end;
true ->
@@ -385,20 +385,20 @@ doit_loop(#state{coordinators=Coordinators,participants=Participants,supervisor=
State2 = State#state{participants=gb_trees:delete(Tid,Participants)},
doit_loop(State2)
end;
-
+
{sync_trans_serial, Tid} ->
%% Do the Lamport thing here
mnesia_recover:sync_trans_tid_serial(Tid),
doit_loop(State);
-
+
{From, info} ->
- reply(From, {info, gb_trees:values(Participants),
+ reply(From, {info, gb_trees:values(Participants),
gb_trees:to_list(Coordinators)}, State);
-
+
{mnesia_down, N} ->
verbose("Got mnesia_down from ~p, reconfiguring...~n", [N]),
reconfigure_coordinators(N, gb_trees:to_list(Coordinators)),
-
+
Tids = gb_trees:keys(Participants),
reconfigure_participants(N, gb_trees:values(Participants)),
NewState = clear_fixtable(N, State),
@@ -408,34 +408,34 @@ doit_loop(#state{coordinators=Coordinators,participants=Participants,supervisor=
{From, {unblock_me, Tab}} ->
case lists:member(Tab, State#state.blocked_tabs) of
false ->
- verbose("Wrong dirty Op blocked on ~p ~p ~p",
+ verbose("Wrong dirty Op blocked on ~p ~p ~p",
[node(), Tab, From]),
reply(From, unblocked),
doit_loop(State);
true ->
- Item = {Tab, unblock_me, From},
+ Item = {Tab, unblock_me, From},
State2 = State#state{dirty_queue = [Item | State#state.dirty_queue]},
doit_loop(State2)
- end;
-
+ end;
+
{From, {block_tab, Tab}} ->
State2 = State#state{blocked_tabs = [Tab | State#state.blocked_tabs]},
reply(From, ok, State2);
-
+
{From, {unblock_tab, Tab}} ->
BlockedTabs2 = State#state.blocked_tabs -- [Tab],
case lists:member(Tab, BlockedTabs2) of
false ->
mnesia_controller:unblock_table(Tab),
Queue = process_dirty_queue(Tab, State#state.dirty_queue),
- State2 = State#state{blocked_tabs = BlockedTabs2,
+ State2 = State#state{blocked_tabs = BlockedTabs2,
dirty_queue = Queue},
reply(From, ok, State2);
true ->
State2 = State#state{blocked_tabs = BlockedTabs2},
reply(From, ok, State2)
end;
-
+
{From, {prepare_checkpoint, Cp}} ->
Res = mnesia_checkpoint:tm_prepare(Cp),
case Res of
@@ -448,18 +448,18 @@ doit_loop(#state{coordinators=Coordinators,participants=Participants,supervisor=
reply(From, Res, State);
{From, {fixtable, [Tab,Lock,Requester]}} ->
case ?catch_val({Tab, storage_type}) of
- {'EXIT', _} ->
+ {'EXIT', _} ->
reply(From, error, State);
Storage ->
mnesia_lib:db_fixtable(Storage,Tab,Lock),
NewState = manage_fixtable(Tab,Lock,Requester,State),
reply(From, node(), NewState)
end;
-
+
{system, From, Msg} ->
dbg_out("~p got {system, ~p, ~p}~n", [?MODULE, From, Msg]),
sys:handle_system_msg(Msg, From, Sup, ?MODULE, [], State);
-
+
Msg ->
verbose("** ERROR ** ~p got unexpected message: ~p~n", [?MODULE, Msg]),
doit_loop(State)
@@ -508,7 +508,7 @@ prepare_pending_coordinators([{Tid, [Store | _Etabs]} | Coords], IgnoreNew) ->
ignore
end,
prepare_pending_coordinators(Coords, IgnoreNew);
- {'EXIT', _} ->
+ {'EXIT', _} ->
prepare_pending_coordinators(Coords, IgnoreNew)
end;
prepare_pending_coordinators([], _IgnoreNew) ->
@@ -538,7 +538,7 @@ handle_exit(Pid, _Reason, State) when Pid == State#state.supervisor ->
handle_exit(Pid, Reason, State) ->
%% Check if it is a coordinator
- case pid_search_delete(Pid, gb_trees:to_list(State#state.coordinators)) of
+ case pid_search_delete(Pid, gb_trees:to_list(State#state.coordinators)) of
{none, _} ->
%% Check if it is a participant
Ps = gb_trees:values(State#state.participants),
@@ -552,9 +552,9 @@ handle_exit(Pid, Reason, State) ->
NewPs = gb_trees:delete(P#participant.tid,State#state.participants),
doit_loop(State#state{participants = NewPs})
end;
-
+
{{Tid, Etabs}, RestC} ->
- %% A local coordinator has died and
+ %% A local coordinator has died and
%% we must determine the outcome of the
%% transaction and tell mnesia_tm on the
%% other nodes about it and then recover
@@ -578,7 +578,7 @@ recover_coordinator(Tid, Etabs) ->
%% Tell the participants about the outcome
Protocol = Prep#prep.protocol,
Outcome = tell_outcome(Tid, Protocol, node(), CheckNodes, TellNodes),
-
+
%% Recover locally
CR = Prep#prep.records,
{DiscNs, RamNs} = commit_nodes(CR, [], []),
@@ -589,7 +589,7 @@ recover_coordinator(Tid, Etabs) ->
recover_coordinator(Tid, Protocol, Outcome, Local, DiscNs, RamNs),
?eval_debug_fun({?MODULE, recover_coordinator, post},
[{tid, Tid}, {outcome, Outcome}, {prot, Protocol}]);
- false -> %% When killed before store havn't been copied to
+ false -> %% When killed before store havn't been copied to
ok %% to the new nested trans store.
end
end,
@@ -610,12 +610,12 @@ recover_coordinator(Tid, sync_sym_trans, aborted, _Local, _, _) ->
recover_coordinator(Tid, asym_trans, committed, Local, DiscNs, RamNs) ->
D = #decision{tid = Tid, outcome = committed,
- disc_nodes = DiscNs, ram_nodes = RamNs},
+ disc_nodes = DiscNs, ram_nodes = RamNs},
mnesia_recover:log_decision(D),
do_commit(Tid, Local);
recover_coordinator(Tid, asym_trans, aborted, Local, DiscNs, RamNs) ->
D = #decision{tid = Tid, outcome = aborted,
- disc_nodes = DiscNs, ram_nodes = RamNs},
+ disc_nodes = DiscNs, ram_nodes = RamNs},
mnesia_recover:log_decision(D),
do_abort(Tid, Local).
@@ -631,7 +631,7 @@ add_coord_store(Coords, Tid, Etab) ->
del_coord_store(Coords, Tid, Current, Obsolete) ->
Stores = gb_trees:get(Tid, Coords),
- Rest =
+ Rest =
case Stores of
[Obsolete, Current | Tail] -> Tail;
[Current, Obsolete | Tail] -> Tail
@@ -642,14 +642,14 @@ del_coord_store(Coords, Tid, Current, Obsolete) ->
erase_ets_tabs([H | T]) ->
?ets_delete_table(H),
erase_ets_tabs(T);
-erase_ets_tabs([]) ->
+erase_ets_tabs([]) ->
ok.
%% Clear one transactions all fixtables
clear_fixtable([Store|_]) ->
Fixed = get_elements(fixtable, Store),
lists:foreach(fun({Tab,Node}) ->
- rpc:cast(Node, ?MODULE, fixtable, [Tab,false,self()])
+ rpc:cast(Node, ?MODULE, fixtable, [Tab,false,self()])
end, Fixed).
%% Clear all fixtable Node have done
@@ -661,7 +661,7 @@ clear_fixtable(Node, State=#state{fixed_tabs = FT0}) ->
lists:foreach(
fun(Tab) ->
case ?catch_val({Tab, storage_type}) of
- {'EXIT', _} ->
+ {'EXIT', _} ->
ignore;
Storage ->
mnesia_lib:db_fixtable(Storage,Tab,false)
@@ -680,9 +680,9 @@ manage_fixtable(Tab,true,Requester,State=#state{fixed_tabs = FT0}) ->
end;
manage_fixtable(Tab,false,Requester,State = #state{fixed_tabs = FT0}) ->
Node = node(Requester),
- case mnesia_lib:key_search_delete(Node, 1, FT0) of
+ case mnesia_lib:key_search_delete(Node, 1, FT0) of
{none,_FT} -> State; % Hmm? Safeguard
- {{Node, Tabs0},FT} ->
+ {{Node, Tabs0},FT} ->
case lists:delete(Tab, Tabs0) of
[] -> State#state{fixed_tabs=FT};
Tabs -> State#state{fixed_tabs=[{Node,Tabs}|FT]}
@@ -691,7 +691,7 @@ manage_fixtable(Tab,false,Requester,State = #state{fixed_tabs = FT0}) ->
%% Deletes a pid from a list of participants
%% or from a gb_trees of coordinators
-%% {none, All} or {Tr, Rest}
+%% {none, All} or {Tr, Rest}
pid_search_delete(Pid, Trs) ->
pid_search_delete(Pid, Trs, none, []).
pid_search_delete(Pid, [Tr = {Tid, _Ts} | Trs], _Val, Ack) when Tid#tid.pid == Pid ->
@@ -701,7 +701,7 @@ pid_search_delete(Pid, [Tr | Trs], Val, Ack) ->
pid_search_delete(_Pid, [], Val, Ack) ->
{Val, gb_trees:from_orddict(lists:reverse(Ack))}.
-
+
transaction_terminated(Tid) ->
mnesia_checkpoint:tm_exit_pending(Tid),
Pid = Tid#tid.pid,
@@ -713,14 +713,14 @@ transaction_terminated(Tid) ->
end.
%% If there are an surrounding transaction, we inherit it's context
-non_transaction(OldState={_,_,Trans}, Fun, Args, ActivityKind, Mod)
+non_transaction(OldState={_,_,Trans}, Fun, Args, ActivityKind, Mod)
when Trans /= non_transaction ->
- Kind = case ActivityKind of
+ Kind = case ActivityKind of
sync_dirty -> sync;
_ -> async
end,
case transaction(OldState, Fun, Args, infinity, Mod, Kind) of
- {atomic, Res} ->
+ {atomic, Res} ->
Res;
{aborted,Res} ->
exit(Res)
@@ -766,7 +766,7 @@ transaction(OldTidTs, Fun, Args, Retries, Mod, Type) ->
execute_outer(Mod, Fun, Args, Factor, Retries, Type) ->
case req(start_outer) of
- {error, Reason} ->
+ {error, Reason} ->
{aborted, Reason};
{new_tid, Tid, Store} ->
Ts = #tidstore{store = Store},
@@ -792,7 +792,7 @@ execute_inner(Mod, Tid, OldMod, Ts, Fun, Args, Factor, Retries, Type) ->
copy_ets(From, To) ->
do_copy_ets(?ets_first(From), From, To).
-do_copy_ets('$end_of_table', _,_) ->
+do_copy_ets('$end_of_table', _,_) ->
ok;
do_copy_ets(K, From, To) ->
Objs = ?ets_lookup(From, K),
@@ -813,7 +813,7 @@ execute_transaction(Fun, Args, Factor, Retries, Type) ->
mnesia_lib:incr_counter(trans_commits),
erase(mnesia_activity_state),
%% no need to clear locks, already done by commit ...
- %% Flush any un processed mnesia_down messages we might have
+ %% Flush any un processed mnesia_down messages we might have
flush_downs(),
catch unlink(whereis(?MODULE)),
{atomic, Value};
@@ -846,7 +846,7 @@ check_exit(Fun, Args, Factor, Retries, Reason, Type) ->
maybe_restart(Fun, Args, Factor, Retries, Type, {node_not_running, N});
{aborted, {bad_commit, N}} ->
maybe_restart(Fun, Args, Factor, Retries, Type, {bad_commit, N});
- _ ->
+ _ ->
return_abort(Fun, Args, Reason)
end.
@@ -888,11 +888,11 @@ restart(Mod, Tid, Ts, Fun, Args, Factor0, Retries0, Type, Why) ->
SleepTime = mnesia_lib:random_time(Factor, Tid#tid.counter),
dbg_out("Restarting transaction ~w: in ~wms ~w~n", [Tid, SleepTime, Why]),
timer:sleep(SleepTime),
- execute_outer(Mod, Fun, Args, Factor, Retries, Type);
+ execute_outer(Mod, Fun, Args, Factor, Retries, Type);
_ ->
SleepTime = mnesia_lib:random_time(Factor0, Tid#tid.counter),
dbg_out("Restarting transaction ~w: in ~wms ~w~n", [Tid, SleepTime, Why]),
-
+
if
Factor0 /= 10 ->
ignore;
@@ -911,7 +911,7 @@ restart(Mod, Tid, Ts, Fun, Args, Factor0, Retries0, Type, Why) ->
mnesia_locker:receive_release_tid_acc(Nodes, Tid),
case get_restarted(Tid) of
{restarted, Tid} ->
- execute_transaction(Fun, Args, Factor0 + 1,
+ execute_transaction(Fun, Args, Factor0 + 1,
Retries, Type);
{error, Reason} ->
mnesia:abort(Reason)
@@ -934,7 +934,7 @@ decr(_X) -> 0.
return_abort(Fun, Args, Reason) ->
{_Mod, Tid, Ts} = get(mnesia_activity_state),
- dbg_out("Transaction ~p calling ~p with ~p failed: ~n ~p~n",
+ dbg_out("Transaction ~p calling ~p with ~p failed: ~n ~p~n",
[Tid, Fun, Args, Reason]),
OldStore = Ts#tidstore.store,
Nodes = get_elements(nodes, OldStore),
@@ -945,7 +945,7 @@ return_abort(Fun, Args, Reason) ->
Level == 1 ->
mnesia_locker:async_release_tid(Nodes, Tid),
?MODULE ! {delete_transaction, Tid},
- erase(mnesia_activity_state),
+ erase(mnesia_activity_state),
flush_downs(),
catch unlink(whereis(?MODULE)),
{aborted, mnesia_lib:fix_error(Reason)};
@@ -958,14 +958,14 @@ return_abort(Fun, Args, Reason) ->
level = Level - 1},
NewTidTs = {OldMod, Tid, Ts2},
put(mnesia_activity_state, NewTidTs),
- case Reason of
+ case Reason of
#cyclic{} ->
exit({aborted, Reason});
- {node_not_running, _N} ->
+ {node_not_running, _N} ->
exit({aborted, Reason});
- {bad_commit, _N}->
+ {bad_commit, _N}->
exit({aborted, Reason});
- _ ->
+ _ ->
{aborted, mnesia_lib:fix_error(Reason)}
end
end.
@@ -982,10 +982,10 @@ put_activity_id(MTT) ->
put_activity_id(MTT, undefined).
put_activity_id(undefined,_) ->
erase_activity_id();
-put_activity_id({Mod, Tid = #tid{}, Ts = #tidstore{}},Fun) ->
+put_activity_id({Mod, Tid = #tid{}, Ts = #tidstore{}},Fun) ->
flush_downs(),
Store = Ts#tidstore.store,
- if
+ if
is_function(Fun) ->
?ets_insert(Store, {friends, {stop,Fun}});
true ->
@@ -1000,14 +1000,14 @@ erase_activity_id() ->
flush_downs(),
erase(mnesia_activity_state).
-get_elements(Type,Store) ->
+get_elements(Type,Store) ->
case catch ?ets_lookup(Store, Type) of
[] -> [];
[{_,Val}] -> [Val];
{'EXIT', _} -> [];
Vals -> [Val|| {_,Val} <- Vals]
end.
-
+
opt_propagate_store(_Current, _Obsolete, false) ->
ok;
opt_propagate_store(Current, Obsolete, true) ->
@@ -1030,8 +1030,8 @@ intercept_best_friend([],_) -> ok;
intercept_best_friend([{stop,Fun} | R],Ignore) ->
catch Fun(),
intercept_best_friend(R,Ignore);
-intercept_best_friend([Pid | R],false) ->
- Pid ! {activity_ended, undefined, self()},
+intercept_best_friend([Pid | R],false) ->
+ Pid ! {activity_ended, undefined, self()},
wait_for_best_friend(Pid, 0),
intercept_best_friend(R,true);
intercept_best_friend([_|R],true) ->
@@ -1047,18 +1047,18 @@ wait_for_best_friend(Pid, Timeout) ->
false -> ok
end
end.
-
+
my_process_is_alive(Pid) ->
case catch erlang:is_process_alive(Pid) of % New BIF in R5
- true ->
+ true ->
true;
- false ->
+ false ->
false;
- {'EXIT', _} -> % Pre R5 backward compatibility
+ {'EXIT', _} -> % Pre R5 backward compatibility
case process_info(Pid, message_queue_len) of
undefined -> false;
_ -> true
- end
+ end
end.
dirty(Protocol, Item) ->
@@ -1070,12 +1070,12 @@ dirty(Protocol, Item) ->
async_dirty ->
%% Send commit records to the other involved nodes,
%% but do only wait for one node to complete.
- %% Preferrably, the local node if possible.
-
+ %% Preferrably, the local node if possible.
+
ReadNode = val({Tab, where_to_read}),
{WaitFor, FirstRes} = async_send_dirty(Tid, CR, Tab, ReadNode),
rec_dirty(WaitFor, FirstRes);
-
+
sync_dirty ->
%% Send commit records to the other involved nodes,
%% and wait for all nodes to complete
@@ -1097,7 +1097,7 @@ t_commit(Type) ->
if
Ts#tidstore.level == 1 ->
intercept_friends(Tid, Ts),
- %% N is number of updates
+ %% N is number of updates
case arrange(Tid, Store, Type) of
{N, Prep} when N > 0 ->
multi_commit(Prep#prep.protocol,
@@ -1135,8 +1135,8 @@ arrange(Tid, Store, Type) ->
Recs = prep_recs(Nodes, []),
Key = ?ets_first(Store),
N = 0,
- Prep =
- case Type of
+ Prep =
+ case Type of
async -> #prep{protocol = sym_trans, records = Recs};
sync -> #prep{protocol = sync_sym_trans, records = Recs}
end,
@@ -1146,7 +1146,7 @@ arrange(Tid, Store, Type) ->
case Reason of
{aborted, R} ->
mnesia:abort(R);
- _ ->
+ _ ->
mnesia:abort(Reason)
end;
{New, Prepared} ->
@@ -1155,7 +1155,7 @@ arrange(Tid, Store, Type) ->
reverse([]) ->
[];
-reverse([H=#commit{ram_copies=Ram, disc_copies=DC,
+reverse([H=#commit{ram_copies=Ram, disc_copies=DC,
disc_only_copies=DOC,snmp = Snmp}
|R]) ->
[
@@ -1164,7 +1164,7 @@ reverse([H=#commit{ram_copies=Ram, disc_copies=DC,
disc_copies = lists:reverse(DC),
disc_only_copies = lists:reverse(DOC),
snmp = lists:reverse(Snmp)
- }
+ }
| reverse(R)].
prep_recs([N | Nodes], Recs) ->
@@ -1191,7 +1191,7 @@ do_arrange(Tid, Store, RestoreKey, Prep, N) when RestoreKey == restore_op ->
(BupRec, CommitRecs, RecName, Where, Snmp) ->
Tab = element(1, BupRec),
Key = element(2, BupRec),
- Item =
+ Item =
if
Tab == RecName ->
[{{Tab, Key}, BupRec, write}];
@@ -1200,7 +1200,7 @@ do_arrange(Tid, Store, RestoreKey, Prep, N) when RestoreKey == restore_op ->
[{{Tab, Key}, BupRec2, write}]
end,
do_prepare_items(Tid, Tab, Key, Where, Snmp, Item, CommitRecs)
- end,
+ end,
Recs2 = mnesia_schema:arrange_restore(R, Fun, Prep#prep.records),
P2 = Prep#prep{protocol = asym_trans, records = Recs2},
do_arrange(Tid, Store, ?ets_next(Store, RestoreKey), P2, N + 1);
@@ -1222,20 +1222,20 @@ prepare_items(Tid, Tab, Key, Items, Prep) when Prep#prep.prev_tab == Tab ->
Recs = Prep#prep.records,
Recs2 = do_prepare_items(Tid, Tab, Key, Types, Snmp, Items, Recs),
Prep#prep{records = Recs2};
-
+
prepare_items(Tid, Tab, Key, Items, Prep) ->
Types = val({Tab, where_to_commit}),
case Types of
[] -> mnesia:abort({no_exists, Tab});
- {blocked, _} ->
+ {blocked, _} ->
unblocked = req({unblock_me, Tab}),
prepare_items(Tid, Tab, Key, Items, Prep);
_ ->
Majority = needs_majority(Tab, Prep),
Snmp = val({Tab, snmp}),
- Recs2 = do_prepare_items(Tid, Tab, Key, Types,
+ Recs2 = do_prepare_items(Tid, Tab, Key, Types,
Snmp, Items, Prep#prep.records),
- Prep2 = Prep#prep{records = Recs2, prev_tab = Tab,
+ Prep2 = Prep#prep{records = Recs2, prev_tab = Tab,
majority = Majority,
prev_types = Types, prev_snmp = Snmp},
check_prep(Prep2, Types)
@@ -1273,7 +1273,7 @@ have_majority([{Tab, AllNodes} | Rest], Nodes) ->
end.
prepare_snmp(Tab, Key, Items) ->
- case val({Tab, snmp}) of
+ case val({Tab, snmp}) of
[] ->
[];
Ustruct when Key /= '_' ->
@@ -1286,10 +1286,10 @@ prepare_snmp(Tab, Key, Items) ->
[{clear_table, Tab}]
end.
-prepare_snmp(_Tid, _Tab, _Key, _Types, [], _Items, Recs) ->
+prepare_snmp(_Tid, _Tab, _Key, _Types, [], _Items, Recs) ->
Recs;
-prepare_snmp(Tid, Tab, Key, Types, Us, Items, Recs) ->
+prepare_snmp(Tid, Tab, Key, Types, Us, Items, Recs) ->
if Key /= '_' ->
{_Oid, _Val, Op} = hd(Items),
SnmpOid = mnesia_snmp_hook:key_to_oid(Tab, Key, Us), % May exit
@@ -1334,7 +1334,7 @@ prepare_node(Node, Storage, [Item | Items], Rec, Kind) when Kind == snmp ->
Rec2 = Rec#commit{snmp = [Item | Rec#commit.snmp]},
prepare_node(Node, Storage, Items, Rec2, Kind);
prepare_node(Node, Storage, [Item | Items], Rec, Kind) when Kind /= schema ->
- Rec2 =
+ Rec2 =
case Storage of
ram_copies ->
Rec#commit{ram_copies = [Item | Rec#commit.ram_copies]};
@@ -1345,7 +1345,7 @@ prepare_node(Node, Storage, [Item | Items], Rec, Kind) when Kind /= schema ->
[Item | Rec#commit.disc_only_copies]}
end,
prepare_node(Node, Storage, Items, Rec2, Kind);
-prepare_node(_Node, _Storage, Items, Rec, Kind)
+prepare_node(_Node, _Storage, Items, Rec, Kind)
when Kind == schema, Rec#commit.schema_ops == [] ->
Rec#commit{schema_ops = Items};
prepare_node(_Node, _Storage, [], Rec, _Kind) ->
@@ -1354,7 +1354,7 @@ prepare_node(_Node, _Storage, [], Rec, _Kind) ->
%% multi_commit((Protocol, Tid, CommitRecords, Store)
%% Local work is always performed in users process
multi_commit(read_only, _Maj = [], Tid, CR, _Store) ->
- %% This featherweight commit protocol is used when no
+ %% This featherweight commit protocol is used when no
%% updates has been performed in the transaction.
{DiscNs, RamNs} = commit_nodes(CR, [], []),
@@ -1381,11 +1381,11 @@ multi_commit(sym_trans, _Maj = [], Tid, CR, Store) ->
%% perform the updates.
%%
%% The outcome is kept 3 minutes in the transient decision table.
- %%
+ %%
%% Recovery:
%% If somebody dies before the coordinator has
%% broadcasted do_commit, the transaction is aborted.
- %%
+ %%
%% If a participant dies, the table load algorithm
%% ensures that the contents of the involved tables
%% are picked from another node.
@@ -1394,15 +1394,15 @@ multi_commit(sym_trans, _Maj = [], Tid, CR, Store) ->
%% the outcome with all the others. If all are uncertain
%% about the outcome, the transaction is aborted. If
%% somebody knows the outcome the others will follow.
-
+
{DiscNs, RamNs} = commit_nodes(CR, [], []),
Pending = mnesia_checkpoint:tm_enter_pending(Tid, DiscNs, RamNs),
?ets_insert(Store, Pending),
{WaitFor, Local} = ask_commit(sym_trans, Tid, CR, DiscNs, RamNs),
- {Outcome, []} = rec_all(WaitFor, Tid, do_commit, []),
- ?eval_debug_fun({?MODULE, multi_commit_sym},
- [{tid, Tid}, {outcome, Outcome}]),
+ {Outcome, []} = rec_all(WaitFor, Tid, do_commit, []),
+ ?eval_debug_fun({?MODULE, multi_commit_sym},
+ [{tid, Tid}, {outcome, Outcome}]),
rpc:abcast(DiscNs -- [node()], ?MODULE, {Tid, Outcome}),
rpc:abcast(RamNs -- [node()], ?MODULE, {Tid, Outcome}),
case Outcome of
@@ -1422,15 +1422,15 @@ multi_commit(sync_sym_trans, _Maj = [], Tid, CR, Store) ->
%% This protocol is the same as sym_trans except that it
%% uses syncronized calls to disk_log and syncronized commits
%% when several nodes are involved.
-
+
{DiscNs, RamNs} = commit_nodes(CR, [], []),
Pending = mnesia_checkpoint:tm_enter_pending(Tid, DiscNs, RamNs),
?ets_insert(Store, Pending),
{WaitFor, Local} = ask_commit(sync_sym_trans, Tid, CR, DiscNs, RamNs),
- {Outcome, []} = rec_all(WaitFor, Tid, do_commit, []),
- ?eval_debug_fun({?MODULE, multi_commit_sym_sync},
- [{tid, Tid}, {outcome, Outcome}]),
+ {Outcome, []} = rec_all(WaitFor, Tid, do_commit, []),
+ ?eval_debug_fun({?MODULE, multi_commit_sym_sync},
+ [{tid, Tid}, {outcome, Outcome}]),
[?ets_insert(Store, {waiting_for_commit_ack, Node}) || Node <- WaitFor],
rpc:abcast(DiscNs -- [node()], ?MODULE, {Tid, Outcome}),
rpc:abcast(RamNs -- [node()], ?MODULE, {Tid, Outcome}),
@@ -1451,7 +1451,7 @@ multi_commit(sync_sym_trans, _Maj = [], Tid, CR, Store) ->
Outcome;
multi_commit(asym_trans, Majority, Tid, CR, Store) ->
- %% This more expensive commit protocol is used when
+ %% This more expensive commit protocol is used when
%% table definitions are changed (schema transactions).
%% It is also used when the involved tables are
%% replicated asymetrically. If the storage type differs
@@ -1462,14 +1462,14 @@ multi_commit(asym_trans, Majority, Tid, CR, Store) ->
%% commit record and votes yes or no depending of the
%% outcome of the prepare. The preparation is also performed
%% by the coordinator.
- %%
+ %%
%% 2a Somebody has died or voted no
%% Tell all yes voters to do_abort
%% 2b Everybody has voted yes
%% Put a unclear marker in the log.
%% Tell the others to pre_commit. I.e. that they should
%% put a unclear marker in the log and reply
- %% acc_pre_commit when they are done.
+ %% acc_pre_commit when they are done.
%%
%% 3a Somebody died
%% Tell the remaining participants to do_abort
@@ -1492,7 +1492,7 @@ multi_commit(asym_trans, Majority, Tid, CR, Store) ->
%% If we have no unclear marker in the log we may
%% safely abort, since we know that nobody may have
%% decided to commit yet.
- %%
+ %%
%% If we have a committed marker in the log we may
%% safely commit since we know that everybody else
%% also will come to this conclusion.
@@ -1506,7 +1506,7 @@ multi_commit(asym_trans, Majority, Tid, CR, Store) ->
%% up. When all involved nodes are up and uncertain,
%% we decide to commit (first put a committed marker
%% in the log, then do the updates).
-
+
D = #decision{tid = Tid, outcome = presume_abort},
{D2, CR2} = commit_decision(D, CR, [], []),
DiscNs = D2#decision.disc_nodes,
@@ -1518,10 +1518,10 @@ multi_commit(asym_trans, Majority, Tid, CR, Store) ->
Pending = mnesia_checkpoint:tm_enter_pending(Tid, DiscNs, RamNs),
?ets_insert(Store, Pending),
{WaitFor, Local} = ask_commit(asym_trans, Tid, CR2, DiscNs, RamNs),
- SchemaPrep = (catch mnesia_schema:prepare_commit(Tid, Local, {coord, WaitFor})),
- {Votes, Pids} = rec_all(WaitFor, Tid, do_commit, []),
-
- ?eval_debug_fun({?MODULE, multi_commit_asym_got_votes},
+ SchemaPrep = (catch mnesia_schema:prepare_commit(Tid, Local, {coord, WaitFor})),
+ {Votes, Pids} = rec_all(WaitFor, Tid, do_commit, []),
+
+ ?eval_debug_fun({?MODULE, multi_commit_asym_got_votes},
[{tid, Tid}, {votes, Votes}]),
case Votes of
do_commit ->
@@ -1530,20 +1530,20 @@ multi_commit(asym_trans, Majority, Tid, CR, Store) ->
mnesia_log:log(C), % C is not a binary
?eval_debug_fun({?MODULE, multi_commit_asym_log_commit_rec},
[{tid, Tid}]),
-
+
D3 = C#commit.decision,
- D4 = D3#decision{outcome = unclear},
- mnesia_recover:log_decision(D4),
+ D4 = D3#decision{outcome = unclear},
+ mnesia_recover:log_decision(D4),
?eval_debug_fun({?MODULE, multi_commit_asym_log_commit_dec},
[{tid, Tid}]),
tell_participants(Pids, {Tid, pre_commit}),
%% Now we are uncertain and we do not know
%% if all participants have logged that
%% they are uncertain or not
- rec_acc_pre_commit(Pids, Tid, Store, {C,Local},
+ rec_acc_pre_commit(Pids, Tid, Store, {C,Local},
do_commit, DumperMode, [], []);
{'EXIT', Reason} ->
- %% The others have logged the commit
+ %% The others have logged the commit
%% record but they are not uncertain
mnesia_recover:note_decision(Tid, aborted),
?eval_debug_fun({?MODULE, multi_commit_asym_prepare_exit},
@@ -1564,7 +1564,7 @@ multi_commit(asym_trans, Majority, Tid, CR, Store) ->
end.
%% Returns do_commit or {do_abort, Reason}
-rec_acc_pre_commit([Pid | Tail], Tid, Store, Commit, Res, DumperMode,
+rec_acc_pre_commit([Pid | Tail], Tid, Store, Commit, Res, DumperMode,
GoodPids, SchemaAckPids) ->
receive
{?MODULE, _, {acc_pre_commit, Tid, Pid, true}} ->
@@ -1598,7 +1598,7 @@ rec_acc_pre_commit([], Tid, Store, {Commit,OrigC}, Res, DumperMode, GoodPids, Sc
%% everybody are uncertain.
prepare_sync_schema_commit(Store, SchemaAckPids),
tell_participants(GoodPids, {Tid, committed}),
- D2 = D#decision{outcome = committed},
+ D2 = D#decision{outcome = committed},
mnesia_recover:log_decision(D2),
?eval_debug_fun({?MODULE, rec_acc_pre_commit_log_commit},
[{tid, Tid}]),
@@ -1611,10 +1611,10 @@ rec_acc_pre_commit([], Tid, Store, {Commit,OrigC}, Res, DumperMode, GoodPids, Sc
sync_schema_commit(Tid, Store, SchemaAckPids),
mnesia_locker:release_tid(Tid),
?MODULE ! {delete_transaction, Tid};
-
+
{do_abort, Reason} ->
tell_participants(GoodPids, {Tid, {do_abort, Reason}}),
- D2 = D#decision{outcome = aborted},
+ D2 = D#decision{outcome = aborted},
mnesia_recover:log_decision(D2),
?eval_debug_fun({?MODULE, rec_acc_pre_commit_log_abort},
[{tid, Tid}]),
@@ -1702,7 +1702,7 @@ commit_participant(Coord, Tid, Bin, C0, DiscNs, _RamNs) ->
end,
?eval_debug_fun({?MODULE, commit_participant, do_commit},
[{tid, Tid}]);
-
+
{Tid, {do_abort, _Reason}} ->
mnesia_recover:log_decision(D#decision{outcome = aborted}),
?eval_debug_fun({?MODULE, commit_participant, log_abort},
@@ -1710,7 +1710,7 @@ commit_participant(Coord, Tid, Bin, C0, DiscNs, _RamNs) ->
mnesia_schema:undo_prepare_commit(Tid, C0),
?eval_debug_fun({?MODULE, commit_participant, undo_prepare},
[{tid, Tid}]);
-
+
{'EXIT', _, _} ->
mnesia_recover:log_decision(D#decision{outcome = aborted}),
?eval_debug_fun({?MODULE, commit_participant, exit_log_abort},
@@ -1718,7 +1718,7 @@ commit_participant(Coord, Tid, Bin, C0, DiscNs, _RamNs) ->
mnesia_schema:undo_prepare_commit(Tid, C0),
?eval_debug_fun({?MODULE, commit_participant, exit_undo_prepare},
[{tid, Tid}]);
-
+
Msg ->
verbose("** ERROR ** commit_participant ~p, got unexpected msg: ~p~n",
[Tid, Msg])
@@ -1739,7 +1739,7 @@ commit_participant(Coord, Tid, Bin, C0, DiscNs, _RamNs) ->
verbose("** ERROR ** commit_participant ~p, got unexpected msg: ~p~n",
[Tid, Msg])
end;
-
+
{'EXIT', Reason} ->
?eval_debug_fun({?MODULE, commit_participant, vote_no},
[{tid, Tid}]),
@@ -1750,7 +1750,7 @@ commit_participant(Coord, Tid, Bin, C0, DiscNs, _RamNs) ->
?MODULE ! {delete_transaction, Tid},
unlink(whereis(?MODULE)),
exit(normal).
-
+
do_abort(Tid, Bin) when is_binary(Bin) ->
%% Possible optimization:
%% If we want we could pass arround a flag
@@ -1761,7 +1761,7 @@ do_abort(Tid, Bin) when is_binary(Bin) ->
%% mnesia_schema:undo_prepare_commit/1.
do_abort(Tid, binary_to_term(Bin));
do_abort(Tid, Commit) ->
- mnesia_schema:undo_prepare_commit(Tid, Commit),
+ mnesia_schema:undo_prepare_commit(Tid, Commit),
Commit.
do_dirty(Tid, Commit) when Commit#commit.schema_ops == [] ->
@@ -1799,7 +1799,7 @@ do_update(Tid, Storage, [Op | Ops], OldRes) ->
verbose("do_update in ~w failed: ~p -> {'EXIT', ~p}~n",
[Tid, Op, Reason]),
- do_update(Tid, Storage, Ops, OldRes);
+ do_update(Tid, Storage, Ops, OldRes);
NewRes ->
do_update(Tid, Storage, Ops, NewRes)
end;
@@ -1807,7 +1807,7 @@ do_update(_Tid, _Storage, [], Res) ->
Res.
do_update_op(Tid, Storage, {{Tab, K}, Obj, write}) ->
- commit_write(?catch_val({Tab, commit_work}), Tid,
+ commit_write(?catch_val({Tab, commit_work}), Tid,
Tab, K, Obj, undefined),
mnesia_lib:db_put(Storage, Tab, Obj);
@@ -1816,7 +1816,7 @@ do_update_op(Tid, Storage, {{Tab, K}, Val, delete}) ->
mnesia_lib:db_erase(Storage, Tab, K);
do_update_op(Tid, Storage, {{Tab, K}, {RecName, Incr}, update_counter}) ->
- {NewObj, OldObjs} =
+ {NewObj, OldObjs} =
case catch mnesia_lib:db_update_counter(Storage, Tab, K, Incr) of
NewVal when is_integer(NewVal), NewVal >= 0 ->
{{RecName, K, NewVal}, [{RecName, K, NewVal - Incr}]};
@@ -1824,17 +1824,17 @@ do_update_op(Tid, Storage, {{Tab, K}, {RecName, Incr}, update_counter}) ->
New = {RecName, K, Incr},
mnesia_lib:db_put(Storage, Tab, New),
{New, []};
- _ ->
+ _ ->
Zero = {RecName, K, 0},
mnesia_lib:db_put(Storage, Tab, Zero),
{Zero, []}
end,
- commit_update(?catch_val({Tab, commit_work}), Tid, Tab,
+ commit_update(?catch_val({Tab, commit_work}), Tid, Tab,
K, NewObj, OldObjs),
element(3, NewObj);
do_update_op(Tid, Storage, {{Tab, Key}, Obj, delete_object}) ->
- commit_del_object(?catch_val({Tab, commit_work}),
+ commit_del_object(?catch_val({Tab, commit_work}),
Tid, Tab, Key, Obj, undefined),
mnesia_lib:db_match_erase(Storage, Tab, Obj);
@@ -1846,11 +1846,11 @@ commit_write([], _, _, _, _, _) -> ok;
commit_write([{checkpoints, CpList}|R], Tid, Tab, K, Obj, Old) ->
mnesia_checkpoint:tm_retain(Tid, Tab, K, write, CpList),
commit_write(R, Tid, Tab, K, Obj, Old);
-commit_write([H|R], Tid, Tab, K, Obj, Old)
+commit_write([H|R], Tid, Tab, K, Obj, Old)
when element(1, H) == subscribers ->
mnesia_subscr:report_table_event(H, Tab, Tid, Obj, write, Old),
commit_write(R, Tid, Tab, K, Obj, Old);
-commit_write([H|R], Tid, Tab, K, Obj, Old)
+commit_write([H|R], Tid, Tab, K, Obj, Old)
when element(1, H) == index ->
mnesia_index:add_index(H, Tab, K, Obj, Old),
commit_write(R, Tid, Tab, K, Obj, Old).
@@ -1859,11 +1859,11 @@ commit_update([], _, _, _, _, _) -> ok;
commit_update([{checkpoints, CpList}|R], Tid, Tab, K, Obj, _) ->
Old = mnesia_checkpoint:tm_retain(Tid, Tab, K, write, CpList),
commit_update(R, Tid, Tab, K, Obj, Old);
-commit_update([H|R], Tid, Tab, K, Obj, Old)
+commit_update([H|R], Tid, Tab, K, Obj, Old)
when element(1, H) == subscribers ->
mnesia_subscr:report_table_event(H, Tab, Tid, Obj, write, Old),
commit_update(R, Tid, Tab, K, Obj, Old);
-commit_update([H|R], Tid, Tab, K, Obj, Old)
+commit_update([H|R], Tid, Tab, K, Obj, Old)
when element(1, H) == index ->
mnesia_index:add_index(H, Tab, K, Obj, Old),
commit_update(R, Tid, Tab, K, Obj, Old).
@@ -1872,11 +1872,11 @@ commit_delete([], _, _, _, _, _) -> ok;
commit_delete([{checkpoints, CpList}|R], Tid, Tab, K, Obj, _) ->
Old = mnesia_checkpoint:tm_retain(Tid, Tab, K, delete, CpList),
commit_delete(R, Tid, Tab, K, Obj, Old);
-commit_delete([H|R], Tid, Tab, K, Obj, Old)
+commit_delete([H|R], Tid, Tab, K, Obj, Old)
when element(1, H) == subscribers ->
mnesia_subscr:report_table_event(H, Tab, Tid, Obj, delete, Old),
commit_delete(R, Tid, Tab, K, Obj, Old);
-commit_delete([H|R], Tid, Tab, K, Obj, Old)
+commit_delete([H|R], Tid, Tab, K, Obj, Old)
when element(1, H) == index ->
mnesia_index:delete_index(H, Tab, K),
commit_delete(R, Tid, Tab, K, Obj, Old).
@@ -1885,12 +1885,12 @@ commit_del_object([], _, _, _, _, _) -> ok;
commit_del_object([{checkpoints, CpList}|R], Tid, Tab, K, Obj, _) ->
Old = mnesia_checkpoint:tm_retain(Tid, Tab, K, delete_object, CpList),
commit_del_object(R, Tid, Tab, K, Obj, Old);
-commit_del_object([H|R], Tid, Tab, K, Obj, Old)
- when element(1, H) == subscribers ->
+commit_del_object([H|R], Tid, Tab, K, Obj, Old)
+ when element(1, H) == subscribers ->
mnesia_subscr:report_table_event(H, Tab, Tid, Obj, delete_object, Old),
commit_del_object(R, Tid, Tab, K, Obj, Old);
-commit_del_object([H|R], Tid, Tab, K, Obj, Old)
- when element(1, H) == index ->
+commit_del_object([H|R], Tid, Tab, K, Obj, Old)
+ when element(1, H) == index ->
mnesia_index:del_object_index(H, Tab, K, Obj, Old),
commit_del_object(R, Tid, Tab, K, Obj, Old).
@@ -1898,11 +1898,11 @@ commit_clear([], _, _, _, _) -> ok;
commit_clear([{checkpoints, CpList}|R], Tid, Tab, K, Obj) ->
mnesia_checkpoint:tm_retain(Tid, Tab, K, clear_table, CpList),
commit_clear(R, Tid, Tab, K, Obj);
-commit_clear([H|R], Tid, Tab, K, Obj)
+commit_clear([H|R], Tid, Tab, K, Obj)
when element(1, H) == subscribers ->
mnesia_subscr:report_table_event(H, Tab, Tid, Obj, clear_table, undefined),
commit_clear(R, Tid, Tab, K, Obj);
-commit_clear([H|R], Tid, Tab, K, Obj)
+commit_clear([H|R], Tid, Tab, K, Obj)
when element(1, H) == index ->
mnesia_index:clear_index(H, Tab, K, Obj),
commit_clear(R, Tid, Tab, K, Obj).
@@ -1913,7 +1913,7 @@ do_snmp(Tid, [Head | Tail]) ->
{'EXIT', Reason} ->
%% This should only happen when we recently have
%% deleted our local replica or recently deattached
- %% the snmp table
+ %% the snmp table
verbose("do_snmp in ~w failed: ~p -> {'EXIT', ~p}~n",
[Tid, Head, Reason]);
@@ -1922,7 +1922,7 @@ do_snmp(Tid, [Head | Tail]) ->
end,
do_snmp(Tid, Tail).
-commit_nodes([C | Tail], AccD, AccR)
+commit_nodes([C | Tail], AccD, AccR)
when C#commit.disc_copies == [],
C#commit.disc_only_copies == [],
C#commit.schema_ops == [] ->
@@ -1934,7 +1934,7 @@ commit_nodes([], AccD, AccR) ->
commit_decision(D, [C | Tail], AccD, AccR) ->
N = C#commit.node,
- {D2, Tail2} =
+ {D2, Tail2} =
case C#commit.schema_ops of
[] when C#commit.disc_copies == [],
C#commit.disc_only_copies == [] ->
@@ -1954,8 +1954,8 @@ commit_decision(D, [], AccD, AccR) ->
{D#decision{disc_nodes = AccD, ram_nodes = AccR}, []}.
ram_only_ops(N, [{op, change_table_copy_type, N, _FromS, _ToS, Cs} | _Ops ]) ->
- case lists:member({name, schema}, Cs) of
- true ->
+ case lists:member({name, schema}, Cs) of
+ true ->
%% We always use disk if change type of the schema
false;
false ->
@@ -2025,12 +2025,12 @@ get_dirty_reply(Node, Res) ->
Reply;
{mnesia_down, Node} ->
case get(mnesia_activity_state) of
- {_, Tid, _Ts} when element(1,Tid) == tid ->
+ {_, Tid, _Ts} when element(1,Tid) == tid ->
%% Hmm dirty called inside a transaction, to avoid
%% hanging transaction we need to restart the transaction
mnesia:abort({node_not_running, Node});
_ ->
- %% It's ok to ignore mnesia_down's since we will make
+ %% It's ok to ignore mnesia_down's since we will make
%% the replicas consistent again when Node is started
Res
end
@@ -2068,10 +2068,10 @@ ask_commit(_Protocol, _Tid, [], _DiscNs, _RamNs, WaitFor, Local) ->
%% to be safe we let erts do the translation (many times maybe and thus
%% slower but it works.
% opt_term_to_binary(asym_trans, Head, Nodes) ->
-% opt_term_to_binary(Nodes, Head);
+% opt_term_to_binary(Nodes, Head);
opt_term_to_binary(_Protocol, Head, _Nodes) ->
Head.
-
+
rec_all([Node | Tail], Tid, Res, Pids) ->
receive
{?MODULE, Node, {vote_yes, Tid}} ->
@@ -2085,7 +2085,7 @@ rec_all([Node | Tail], Tid, Res, Pids) ->
{?MODULE, Node, {aborted, Tid}} ->
rec_all(Tail, Tid, Res, Pids);
- {mnesia_down, Node} ->
+ {mnesia_down, Node} ->
%% Make sure that mnesia_tm knows it has died
%% it may have been restarted
Abort = {do_abort, {bad_commit, Node}},
@@ -2095,7 +2095,7 @@ rec_all([Node | Tail], Tid, Res, Pids) ->
rec_all([], _Tid, Res, Pids) ->
{Res, Pids}.
-get_transactions() ->
+get_transactions() ->
{info, Participant, Coordinator} = req(info),
lists:map(fun({Tid, _Tabs}) ->
Status = tr_status(Tid,Participant),
@@ -2125,7 +2125,7 @@ get_info(Timeout) ->
display_info(Stream, {timeout, T}) ->
io:format(Stream, "---> No info about coordinator and participant transactions, "
"timeout ~p <--- ~n", [T]);
-
+
display_info(Stream, {info, Part, Coord}) ->
io:format(Stream, "---> Participant transactions <--- ~n", []),
lists:foreach(fun(P) -> pr_participant(Stream, P) end, Part),
@@ -2134,7 +2134,7 @@ display_info(Stream, {info, Part, Coord}) ->
pr_participant(Stream, P) ->
Commit0 = P#participant.commit,
- Commit =
+ Commit =
if
is_binary(Commit0) -> binary_to_term(Commit0);
true -> Commit0
@@ -2161,11 +2161,11 @@ search_pr_coordinator(S, [{Tid, _Ts}|Tail]) ->
io:format( "Tid is coordinator, owner == \n", []),
display_pid_info(Tid#tid.pid),
search_pr_coordinator(S, Tail);
- _ ->
+ _ ->
search_pr_coordinator(S, Tail)
end.
-search_pr_participant(_S, []) ->
+search_pr_participant(_S, []) ->
false;
search_pr_participant(S, [ P | Tail]) ->
Tid = P#participant.tid,
@@ -2176,15 +2176,15 @@ search_pr_participant(S, [ P | Tail]) ->
Pid = Tid#tid.pid,
display_pid_info(Pid),
io:format( "Tid wants to write objects \n",[]),
- Commit =
+ Commit =
if
is_binary(Commit0) -> binary_to_term(Commit0);
true -> Commit0
end,
-
+
io:format("~p~n", [Commit]),
search_pr_participant(S,Tail); %% !!!!!
- true ->
+ true ->
search_pr_participant(S, Tail)
end.
@@ -2200,7 +2200,7 @@ display_pid_info(Pid) ->
Other ->
Other
end,
- Reds = fetch(reductions, Info),
+ Reds = fetch(reductions, Info),
LM = length(fetch(messages, Info)),
pformat(io_lib:format("~p", [Pid]),
io_lib:format("~p", [Call]),
@@ -2254,7 +2254,7 @@ send_to_pids([_ | Pids], Msg) ->
send_to_pids(Pids, Msg);
send_to_pids([], _Msg) ->
ok.
-
+
reconfigure_participants(N, [P | Tail]) ->
case lists:member(N, P#participant.disc_nodes) or
lists:member(N, P#participant.ram_nodes) of
@@ -2262,25 +2262,25 @@ reconfigure_participants(N, [P | Tail]) ->
%% Ignore, since we are not a participant
%% in the transaction.
reconfigure_participants(N, Tail);
-
+
true ->
%% We are on a participant node, lets
%% check if the dead one was a
%% participant or a coordinator.
Tid = P#participant.tid,
- if
+ if
node(Tid#tid.pid) /= N ->
%% Another participant node died. Ignore.
reconfigure_participants(N, Tail);
true ->
- %% The coordinator node has died and
+ %% The coordinator node has died and
%% we must determine the outcome of the
%% transaction and tell mnesia_tm on all
%% nodes (including the local node) about it
verbose("Coordinator ~p in transaction ~p died~n",
[Tid#tid.pid, Tid]),
-
+
Nodes = P#participant.disc_nodes ++
P#participant.ram_nodes,
AliveNodes = Nodes -- [N],
@@ -2332,8 +2332,8 @@ system_terminate(_Reason, _Parent, _Debug, State) ->
system_code_change(State=#state{coordinators=Cs0,participants=Ps0},_Module,_OldVsn,downgrade) ->
case is_tuple(Cs0) of
- true ->
- Cs = gb_trees:to_list(Cs0),
+ true ->
+ Cs = gb_trees:to_list(Cs0),
Ps = gb_trees:values(Ps0),
{ok, State#state{coordinators=Cs,participants=Ps}};
false ->
@@ -2342,7 +2342,7 @@ system_code_change(State=#state{coordinators=Cs0,participants=Ps0},_Module,_OldV
system_code_change(State=#state{coordinators=Cs0,participants=Ps0},_Module,_OldVsn,_Extra) ->
case is_list(Cs0) of
- true ->
+ true ->
Cs = gb_trees:from_orddict(lists:sort(Cs0)),
Ps1 = [{P#participant.tid,P}|| P <- Ps0],
Ps = gb_trees:from_orddict(lists:sort(Ps1)),