diff options
Diffstat (limited to 'lib/stdlib')
46 files changed, 1921 insertions, 3446 deletions
diff --git a/lib/stdlib/doc/src/assert_hrl.xml b/lib/stdlib/doc/src/assert_hrl.xml index e2dfc2ab9b..a29f6d6ad7 100644 --- a/lib/stdlib/doc/src/assert_hrl.xml +++ b/lib/stdlib/doc/src/assert_hrl.xml @@ -28,7 +28,7 @@ <date></date> <rev></rev> </header> - <file>assert.hrl.xml</file> + <file>assert.hrl</file> <filesummary>Assert macros.</filesummary> <description> <p>The include file <c>assert.hrl</c> provides macros for inserting @@ -49,25 +49,33 @@ entries in the <c>Info</c> list are optional; do not rely programatically on any of them being present.</p> + <p>Each assert macro has a corresponding version with an extra argument, + for adding comments to assertions. These can for example be printed as + part of error reports, to clarify the meaning of the check that + failed. For example, <c>?assertEqual(0, fib(0), "Fibonacci is defined + for zero")</c>. The comment text can be any character data (string, + UTF8-binary, or deep list of such data), and will be included in the + error term as <c>{comment, Text}</c>.</p> + <p>If the macro <c>NOASSERT</c> is defined when <c>assert.hrl</c> is read by the compiler, the macros are defined as equivalent to the atom - <c>ok</c>. The test is not performed and there is no cost at runtime.</p> + <c>ok</c>. The test will not be performed and there is no cost at runtime.</p> <p>For example, using <c>erlc</c> to compile your modules, the following - disable all assertions:</p> + disables all assertions:</p> <code type="none"> erlc -DNOASSERT=true *.erl</code> - <p>The value of <c>NOASSERT</c> does not matter, only the fact that it is - defined.</p> + <p>(The value of <c>NOASSERT</c> does not matter, only the fact that it is + defined.)</p> <p>A few other macros also have effect on the enabling or disabling of assertions:</p> <list type="bulleted"> - <item><p>If <c>NODEBUG</c> is defined, it implies <c>NOASSERT</c>, unless - <c>DEBUG</c> is also defined, which is assumed to take precedence.</p> + <item><p>If <c>NODEBUG</c> is defined, it implies <c>NOASSERT</c> (unless + <c>DEBUG</c> is also defined, which overrides <c>NODEBUG</c>).</p> </item> <item><p>If <c>ASSERT</c> is defined, it overrides <c>NOASSERT</c>, that is, the assertions remain enabled.</p></item> @@ -84,16 +92,19 @@ erlc -DNOASSERT=true *.erl</code> <title>Macros</title> <taglist> <tag><c>assert(BoolExpr)</c></tag> + <tag><c>assert(BoolExpr, Comment)</c></tag> <item> <p>Tests that <c>BoolExpr</c> completes normally returning <c>true</c>.</p> </item> <tag><c>assertNot(BoolExpr)</c></tag> + <tag><c>assertNot(BoolExpr, Comment)</c></tag> <item> <p>Tests that <c>BoolExpr</c> completes normally returning <c>false</c>.</p> </item> <tag><c>assertMatch(GuardedPattern, Expr)</c></tag> + <tag><c>assertMatch(GuardedPattern, Expr, Comment)</c></tag> <item> <p>Tests that <c>Expr</c> completes normally yielding a value that matches <c>GuardedPattern</c>, for example:</p> @@ -104,6 +115,7 @@ erlc -DNOASSERT=true *.erl</code> ?assertMatch({bork, X} when X > 0, f())</code> </item> <tag><c>assertNotMatch(GuardedPattern, Expr)</c></tag> + <tag><c>assertNotMatch(GuardedPattern, Expr, Comment)</c></tag> <item> <p>Tests that <c>Expr</c> completes normally yielding a value that does not match <c>GuardedPattern</c>.</p> @@ -111,16 +123,19 @@ erlc -DNOASSERT=true *.erl</code> <c>when</c> part.</p> </item> <tag><c>assertEqual(ExpectedValue, Expr)</c></tag> + <tag><c>assertEqual(ExpectedValue, Expr, Comment)</c></tag> <item> <p>Tests that <c>Expr</c> completes normally yielding a value that is exactly equal to <c>ExpectedValue</c>.</p> </item> <tag><c>assertNotEqual(ExpectedValue, Expr)</c></tag> + <tag><c>assertNotEqual(ExpectedValue, Expr, Comment)</c></tag> <item> <p>Tests that <c>Expr</c> completes normally yielding a value that is not exactly equal to <c>ExpectedValue</c>.</p> </item> <tag><c>assertException(Class, Term, Expr)</c></tag> + <tag><c>assertException(Class, Term, Expr, Comment)</c></tag> <item> <p>Tests that <c>Expr</c> completes abnormally with an exception of type <c>Class</c> and with the associated <c>Term</c>. The assertion fails @@ -130,6 +145,7 @@ erlc -DNOASSERT=true *.erl</code> patterns, as in <c>assertMatch</c>.</p> </item> <tag><c>assertNotException(Class, Term, Expr)</c></tag> + <tag><c>assertNotException(Class, Term, Expr, Comment)</c></tag> <item> <p>Tests that <c>Expr</c> does not evaluate abnormally with an exception of type <c>Class</c> and with the associated <c>Term</c>. @@ -139,14 +155,17 @@ erlc -DNOASSERT=true *.erl</code> be guarded patterns.</p> </item> <tag><c>assertError(Term, Expr)</c></tag> + <tag><c>assertError(Term, Expr, Comment)</c></tag> <item> <p>Equivalent to <c>assertException(error, Term, Expr)</c></p> </item> <tag><c>assertExit(Term, Expr)</c></tag> + <tag><c>assertExit(Term, Expr, Comment)</c></tag> <item> <p>Equivalent to <c>assertException(exit, Term, Expr)</c></p> </item> <tag><c>assertThrow(Term, Expr)</c></tag> + <tag><c>assertThrow(Term, Expr, Comment)</c></tag> <item> <p>Equivalent to <c>assertException(throw, Term, Expr)</c></p> </item> diff --git a/lib/stdlib/doc/src/c.xml b/lib/stdlib/doc/src/c.xml index 92ab59c6b0..55a77d1bc5 100644 --- a/lib/stdlib/doc/src/c.xml +++ b/lib/stdlib/doc/src/c.xml @@ -148,6 +148,15 @@ compile:file(<anno>File</anno>, <anno>Options</anno> ++ [report_errors, report_w </func> <func> + <name name="lm" arity="0"/> + <fsummary>Loads all modified modules.</fsummary> + <desc> + <p>Reloads all currently loaded modules that have changed on disk (see <c>mm()</c>). + Returns the list of results from calling <c>l(M)</c> for each such <c>M</c>.</p> + </desc> + </func> + + <func> <name name="ls" arity="0"/> <fsummary>List files in the current directory.</fsummary> <desc> @@ -182,6 +191,15 @@ compile:file(<anno>File</anno>, <anno>Options</anno> ++ [report_errors, report_w </func> <func> + <name name="mm" arity="0"/> + <fsummary>Lists all modified modules.</fsummary> + <desc> + <p>Lists all modified modules. Shorthand for + <seealso marker="kernel:code#modified_modules/0"><c>code:modified_modules/0</c></seealso>.</p> + </desc> + </func> + + <func> <name name="memory" arity="0"/> <fsummary>Memory allocation information.</fsummary> <desc> diff --git a/lib/stdlib/doc/src/dets.xml b/lib/stdlib/doc/src/dets.xml index 2e4261d72e..eb6e32aecf 100644 --- a/lib/stdlib/doc/src/dets.xml +++ b/lib/stdlib/doc/src/dets.xml @@ -100,18 +100,12 @@ provided by Dets, neither is the limited support for concurrent updates that makes a sequence of <c>first</c> and <c>next</c> calls safe to use on fixed ETS tables. Both these - features will be provided by Dets in a future release of + features may be provided by Dets in a future release of Erlang/OTP. Until then, the Mnesia application (or some user-implemented method for locking) must be used to implement safe concurrency. Currently, no Erlang/OTP library has support for ordered disk-based term storage.</p> - <p>Two versions of the format used for storing objects on file are - supported by Dets. The first version, 8, is the format always used - for tables created by Erlang/OTP R7 and earlier. The second version, 9, - is the default version of tables created by Erlang/OTP R8 (and later - releases). Erlang/OTP R8 can create version 8 tables, and convert version - 8 tables to version 9, and conversely, upon request.</p> <p>All Dets functions return <c>{error, Reason}</c> if an error occurs (<seealso marker="#first/1"><c>first/1</c></seealso> and <seealso marker="#next/2"><c>next/2</c></seealso> are exceptions, they @@ -190,9 +184,6 @@ <datatype> <name name="type"/> </datatype> - <datatype> - <name name="version"/> - </datatype> </datatypes> <funcs> @@ -385,8 +376,7 @@ <p><c>{bchunk_format, binary()}</c> - An opaque binary describing the format of the objects returned by <c>bchunk/2</c>. The binary can be used as argument to - <c>is_compatible_chunk_format/2</c>. Only available for - version 9 tables.</p> + <c>is_compatible_chunk_format/2</c>.</p> </item> <item> <p><c>{hash, Hash}</c> - Describes which BIF is @@ -394,10 +384,6 @@ Dets table. Possible values of <c>Hash</c>:</p> <list> <item> - <p><c>hash</c> - Implies that the <c>erlang:hash/2</c> BIF - is used.</p> - </item> - <item> <p><c>phash</c> - Implies that the <c>erlang:phash/2</c> BIF is used.</p> </item> @@ -413,8 +399,7 @@ </item> <item> <p><c>{no_keys, integer >= 0()}</c> - The number of different - keys stored in the table. Only available for version 9 - tables.</p> + keys stored in the table.</p> </item> <item> <p><c>{no_objects, integer >= 0()}</c> - The number of objects @@ -424,8 +409,7 @@ <p><c>{no_slots, {Min, Used, Max}}</c> - The number of slots of the table. <c>Min</c> is the minimum number of slots, <c>Used</c> is the number of currently used slots, - and <c>Max</c> is the maximum number of slots. Only - available for version 9 tables.</p> + and <c>Max</c> is the maximum number of slots.</p> </item> <item> <p><c>{owner, pid()}</c> - The pid of the process that @@ -466,10 +450,6 @@ time warp safe</seealso>. Time warp safe code must use <c>safe_fixed_monotonic_time</c> instead.</p> </item> - <item> - <p><c>{version, integer()}</c> - The version of the format of - the table.</p> - </item> </list> </desc> </func> @@ -662,8 +642,8 @@ ok objects at a time, until at least one object matches or the end of the table is reached. The default, indicated by giving <c><anno>N</anno></c> the value <c>default</c>, is to let - the number of objects vary depending on the sizes of the objects. If - <c><anno>Name</anno></c> is a version 9 table, all objects with the + the number of objects vary depending on the sizes of the objects. + All objects with the same key are always matched at the same time, which implies that more than <anno>N</anno> objects can sometimes be matched.</p> <p>The table is always to be protected using @@ -743,9 +723,9 @@ ok end of the table is reached. The default, indicated by giving <c><anno>N</anno></c> the value <c>default</c>, is to let the number - of objects vary depending on the sizes of the objects. If - <c><anno>Name</anno></c> is a version 9 table, all matching objects - with the same key are always returned in the same reply, which implies + of objects vary depending on the sizes of the objects. All + matching objects with the same key are always returned + in the same reply, which implies that more than <anno>N</anno> objects can sometimes be returned.</p> <p>The table is always to be protected using <seealso marker="#safe_fixtable/2"><c>safe_fixtable/2</c></seealso> @@ -842,8 +822,7 @@ ok maximal value. Notice that a higher value can increase the table fragmentation, and a smaller value can decrease the fragmentation, at - the expense of execution time. Only available for version - 9 tables.</p> + the expense of execution time.</p> </item> <item> <p><c>{min_no_slots, </c><seealso marker="#type-no_slots"> @@ -880,12 +859,7 @@ ok FileName}}</c> is returned if the table must be repaired.</p> <p>Value <c>force</c> means that a reparation is made even if the table is properly closed. - This is how to convert tables created by older versions of - STDLIB. An example is tables hashed with the deprecated - <c>erlang:hash/2</c> BIF. Tables created with Dets from - STDLIB version 1.8.2 or later use function - <c>erlang:phash/2</c> or function <c>erlang:phash2/1</c>, - which is preferred.</p> + This is a seldom needed option.</p> <p>Option <c>repair</c> is ignored if the table is already open.</p> </item> <item> @@ -893,15 +867,6 @@ ok <c>type()</c></seealso><c>}</c> - The table type. Defaults to <c>set</c>.</p> </item> - <item> - <p><c>{version, </c><seealso marker="#type-version"> - <c>version()</c></seealso><c>}</c> - The version of the format - used for the table. Defaults to <c>9</c>. Tables on the format - used before Erlang/OTP R8 can be created by specifying value - <c>8</c>. A version 8 table can be converted to a version 9 - table by specifying options <c>{version,9}</c> - and <c>{repair,force}</c>.</p> - </item> </list> </desc> </func> @@ -1041,8 +1006,8 @@ ok a time, until at least one object matches or the end of the table is reached. The default, indicated by giving <c><anno>N</anno></c> the value <c>default</c>, is to let the number - of objects vary depending on the sizes of the objects. If - <c><anno>Name</anno></c> is a version 9 table, all objects with the + of objects vary depending on the sizes of the objects. All + objects with the same key are always handled at the same time, which implies that the match specification can be applied to more than <anno>N</anno> objects.</p> diff --git a/lib/stdlib/doc/src/gen_statem.xml b/lib/stdlib/doc/src/gen_statem.xml index 64267c2af5..fd498ee82e 100644 --- a/lib/stdlib/doc/src/gen_statem.xml +++ b/lib/stdlib/doc/src/gen_statem.xml @@ -533,7 +533,7 @@ handle_event(_, _, State, Data) -> Type <c>info</c> originates from regular process messages sent to the <c>gen_statem</c>. Also, the state machine implementation can generate events of types - <c>timeout</c>, <c>state_timeout</c>, <c>enter</c>, + <c>timeout</c>, <c>state_timeout</c>, and <c>internal</c> to itself. </p> </desc> @@ -639,6 +639,20 @@ handle_event(_, _, State, Data) -> </p> <list type="ordered"> <item> + <p> + If the state changes or is the initial state, and + <seealso marker="#type-state_enter"><em>state enter calls</em></seealso> + are used, the <c>gen_statem</c> calls + the new state callback with arguments + <seealso marker="#type-state_enter">(enter, OldState, Data)</seealso>. + Any + <seealso marker="#type-enter_action"><c>actions</c></seealso> + returned from this call are handled as if they were + appended to the actions + returned by the state callback that changed states. + </p> + </item> + <item> <p> All <seealso marker="#type-action">actions</seealso> @@ -668,36 +682,36 @@ handle_event(_, _, State, Data) -> </p> </item> <item> - <p> - If the state changes or is the initial state, and - <seealso marker="#type-state_enter"><em>state enter calls</em></seealso> - are used, the <c>gen_statem</c> calls - the new state callback with arguments - <seealso marker="#type-state_enter">(enter, OldState, Data)</seealso>. - Any - <seealso marker="#type-enter_action"><c>actions</c></seealso> - returned from this call are handled as if they were - appended to the actions - returned by the state callback that changed states. - </p> - </item> - <item> - <p> - If there are enqueued events the (possibly new) - <seealso marker="#state callback">state callback</seealso> - is called with the oldest enqueued event, - and we start again from the top of this list. - </p> - </item> - <item> <p> Timeout timers <seealso marker="#type-state_timeout"><c>state_timeout()</c></seealso> and <seealso marker="#type-event_timeout"><c>event_timeout()</c></seealso> - are handled. This may lead to a time-out zero event - being generated to the + are handled. Time-outs with zero time are guaranteed to be + delivered to the state machine before any external + not yet received event so if there is such a timeout requested, + the corresponding time-out zero event is enqueued as + the newest event. + </p> + <p> + Any event cancels an + <seealso marker="#type-event_timeout"><c>event_timeout()</c></seealso> + so a zero time event time-out is only generated + if the event queue is empty. + </p> + <p> + A state change cancels a + <seealso marker="#type-state_timeout"><c>state_timeout()</c></seealso> + and any new transition option of this type + belongs to the new state. + </p> + </item> + <item> + <p> + If there are enqueued events the <seealso marker="#state callback">state callback</seealso> + for the possibly new state + is called with the oldest enqueued event, and we start again from the top of this list. </p> </item> @@ -759,8 +773,9 @@ handle_event(_, _, State, Data) -> after this time (in milliseconds) unless another event arrives or has arrived in which case this time-out is cancelled. - Note that a retried, inserted or state time-out zero - events counts as arrived. + Note that a retried or inserted event counts as arrived. + So does a state time-out zero event, if it was generated + before this timer is requested. </p> <p> If the value is <c>infinity</c>, no timer is started, as @@ -802,7 +817,7 @@ handle_event(_, _, State, Data) -> <p> Setting this timer while it is running will restart it with the new time-out value. Therefore it is possible to cancel - this timeout by setting it to <c>infinity</c>. + this time-out by setting it to <c>infinity</c>. </p> </desc> </datatype> @@ -1130,7 +1145,7 @@ handle_event(_, _, State, Data) -> <c><anno>Timeout</anno></c> can also be a tuple <c>{clean_timeout,<anno>T</anno>}</c> or <c>{dirty_timeout,<anno>T</anno>}</c>, where - <c><anno>T</anno></c> is the timeout time. + <c><anno>T</anno></c> is the time-out time. <c>{clean_timeout,<anno>T</anno>}</c> works like just <c>T</c> described in the note above and uses a proxy process for <c>T < infinity</c>, @@ -1773,7 +1788,7 @@ handle_event(_, _, State, Data) -> StateFunctionResult </name> <name>Module:handle_event(enter, OldState, State, Data) -> - StateEnterResult + StateEnterResult(State) </name> <name>Module:handle_event(EventType, EventContent, State, Data) -> HandleEventResult @@ -1802,8 +1817,8 @@ handle_event(_, _, State, Data) -> <seealso marker="#type-event_handler_result">event_handler_result</seealso>(<seealso marker="#type-state_name">state_name()</seealso>) </v> <v> - StateEnterResult = - <seealso marker="#type-state_enter_result">state_enter_result</seealso>(<seealso marker="#type-state">state()</seealso>) + StateEnterResult(State) = + <seealso marker="#type-state_enter_result">state_enter_result(State)</seealso> </v> <v> HandleEventResult = diff --git a/lib/stdlib/doc/src/maps.xml b/lib/stdlib/doc/src/maps.xml index e1edbadcd3..8c7270816b 100644 --- a/lib/stdlib/doc/src/maps.xml +++ b/lib/stdlib/doc/src/maps.xml @@ -160,7 +160,7 @@ val1 <p><em>Example:</em></p> <code type="none"> > Map = #{"42" => value}. -#{"42"> => value} +#{"42" => value} > maps:is_key("42",Map). true > maps:is_key(value,Map). diff --git a/lib/stdlib/doc/src/math.xml b/lib/stdlib/doc/src/math.xml index 70ca6ae78e..b4f096217a 100644 --- a/lib/stdlib/doc/src/math.xml +++ b/lib/stdlib/doc/src/math.xml @@ -62,6 +62,7 @@ <name name="cosh" arity="1"/> <name name="exp" arity="1"/> <name name="floor" arity="1"/> + <name name="fmod" arity="2"/> <name name="log" arity="1"/> <name name="log10" arity="1"/> <name name="log2" arity="1"/> diff --git a/lib/stdlib/doc/src/orddict.xml b/lib/stdlib/doc/src/orddict.xml index 076b06fc38..39b43809b6 100644 --- a/lib/stdlib/doc/src/orddict.xml +++ b/lib/stdlib/doc/src/orddict.xml @@ -38,7 +38,7 @@ <p>This module provides a <c>Key</c>-<c>Value</c> dictionary. An <c>orddict</c> is a representation of a dictionary, where a list of pairs is used to store the keys and values. The list is - ordered after the keys.</p> + ordered after the keys in the <em>Erlang term order</em>.</p> <p>This module provides the same interface as the <seealso marker="dict"><c>dict(3)</c></seealso> module diff --git a/lib/stdlib/doc/src/ordsets.xml b/lib/stdlib/doc/src/ordsets.xml index 148281fcf7..7b590932e4 100644 --- a/lib/stdlib/doc/src/ordsets.xml +++ b/lib/stdlib/doc/src/ordsets.xml @@ -39,7 +39,8 @@ <p>Sets are collections of elements with no duplicate elements. An <c>ordset</c> is a representation of a set, where an ordered list is used to store the elements of the set. An ordered list - is more efficient than an unordered list.</p> + is more efficient than an unordered list. Elements are ordered + according to the <em>Erlang term order</em>.</p> <p>This module provides the same interface as the <seealso marker="sets"><c>sets(3)</c></seealso> module diff --git a/lib/stdlib/doc/src/proc_lib.xml b/lib/stdlib/doc/src/proc_lib.xml index da03c39a26..e64b2ce18a 100644 --- a/lib/stdlib/doc/src/proc_lib.xml +++ b/lib/stdlib/doc/src/proc_lib.xml @@ -66,6 +66,12 @@ <seealso marker="sasl:error_logging">SASL Error Logging</seealso> in the SASL User's Guide.</p> + <p>Unlike in "plain Erlang", <c>proc_lib</c> processes will not generate + <em>error reports</em>, which are written to the terminal by the + emulator and do not require SASL to be started. All exceptions are + converted to <em>exits</em> which are ignored by the default + <c>error_logger</c> handler.</p> + <p>The crash report contains the previously stored information, such as ancestors and initial function, the termination reason, and information about other processes that terminate as a result diff --git a/lib/stdlib/doc/src/rand.xml b/lib/stdlib/doc/src/rand.xml index 1dcc3de000..1364a3277b 100644 --- a/lib/stdlib/doc/src/rand.xml +++ b/lib/stdlib/doc/src/rand.xml @@ -41,6 +41,11 @@ Sebastiano Vigna</url>. The normal distribution algorithm uses the <url href="http://www.jstatsoft.org/v05/i08">Ziggurat Method by Marsaglia and Tsang</url>.</p> + <p>For some algorithms, jump functions are provided for generating + non-overlapping sequences for parallel computations. + The jump functions perform calculations + equivalent to perform a large number of repeated calls + for calculating new states. </p> <p>The following algorithms are provided:</p> @@ -48,14 +53,17 @@ <tag><c>exsplus</c></tag> <item> <p>Xorshift116+, 58 bits precision and period of 2^116-1</p> + <p>Jump function: equivalent to 2^64 calls</p> </item> <tag><c>exs64</c></tag> <item> <p>Xorshift64*, 64 bits precision and a period of 2^64-1</p> + <p>Jump function: not available</p> </item> <tag><c>exs1024</c></tag> <item> <p>Xorshift1024*, 64 bits precision and a period of 2^1024-1</p> + <p>Jump function: equivalent to 2^512 calls</p> </item> </taglist> @@ -156,6 +164,33 @@ S0 = rand:seed_s(exsplus), </func> <func> + <name name="jump" arity="0"/> + <fsummary>Return the seed after performing jump calculation + to the state in the process dictionary.</fsummary> + <desc><marker id="jump-0" /> + <p>Returns the state + after performing jump calculation + to the state in the process dictionary.</p> + <p>This function generates a <c>not_implemented</c> error exception + when the jump function is not implemented for + the algorithm specified in the state + in the process dictionary.</p> + </desc> + </func> + + <func> + <name name="jump" arity="1"/> + <fsummary>Return the seed after performing jump calculation.</fsummary> + <desc><marker id="jump-1" /> + <p>Returns the state after performing jump calculation + to the given state. </p> + <p>This function generates a <c>not_implemented</c> error exception + when the jump function is not implemented for + the algorithm specified in the state.</p> + </desc> + </func> + + <func> <name name="normal" arity="0"/> <fsummary>Return a standard normal distributed random float.</fsummary> <desc> diff --git a/lib/stdlib/doc/src/supervisor.xml b/lib/stdlib/doc/src/supervisor.xml index 294196f746..bb06d3645e 100644 --- a/lib/stdlib/doc/src/supervisor.xml +++ b/lib/stdlib/doc/src/supervisor.xml @@ -133,8 +133,10 @@ sup_flags() = #{strategy => strategy(), % optional map. Assuming the values <c>MaxR</c> for <c>intensity</c> and <c>MaxT</c> for <c>period</c>, then, if more than <c>MaxR</c> restarts occur within <c>MaxT</c> seconds, the supervisor - terminates all child processes and then itself. <c>intensity</c> - defaults to <c>1</c> and <c>period</c> defaults to <c>5</c>.</p> + terminates all child processes and then itself. The termination + reason for the supervisor itself in that case will be <c>shutdown</c>. + <c>intensity</c> defaults to <c>1</c> and <c>period</c> defaults to + <c>5</c>.</p> <marker id="child_spec"/> <p>The type definition of a child specification is as follows:</p> diff --git a/lib/stdlib/include/assert.hrl b/lib/stdlib/include/assert.hrl index 9e5d4eb598..2fbaeba0b2 100644 --- a/lib/stdlib/include/assert.hrl +++ b/lib/stdlib/include/assert.hrl @@ -1,8 +1,3 @@ -%% -%% %CopyrightBegin% -%% -%% Copyright (C) 2004-2016 Richard Carlsson, Mickaël Rémond -%% %% Licensed under the Apache License, Version 2.0 (the "License"); %% you may not use this file except in compliance with the License. %% You may obtain a copy of the License at @@ -15,8 +10,7 @@ %% See the License for the specific language governing permissions and %% limitations under the License. %% -%% %CopyrightEnd% -%% +%% Copyright (C) 2004-2016 Richard Carlsson, Mickaël Rémond -ifndef(ASSERT_HRL). -define(ASSERT_HRL, true). @@ -56,7 +50,8 @@ %% It is not possible to nest assert macros. -ifdef(NOASSERT). --define(assert(BoolExpr),ok). +-define(assert(BoolExpr), ok). +-define(assert(BoolExpr, Comment), ok). -else. %% The assert macro is written the way it is so as not to cause warnings %% for clauses that cannot match, even if the expression is a constant or @@ -79,11 +74,31 @@ end end)()) end). +-define(assert(BoolExpr, Comment), + begin + ((fun () -> + __T = is_process_alive(self()), % cheap source of truth + case (BoolExpr) of + __T -> ok; + __V -> erlang:error({assert, + [{module, ?MODULE}, + {line, ?LINE}, + {comment, (Comment)}, + {expression, (??BoolExpr)}, + {expected, true}, + case not __T of + __V -> {value, false}; + _ -> {not_boolean, __V} + end]}) + end + end)()) + end). -endif. %% This is the inverse case of assert, for convenience. -ifdef(NOASSERT). -define(assertNot(BoolExpr),ok). +-define(assertNot(BoolExpr, Comment), ok). -else. -define(assertNot(BoolExpr), begin @@ -103,12 +118,32 @@ end end)()) end). +-define(assertNot(BoolExpr, Comment), + begin + ((fun () -> + __F = not is_process_alive(self()), + case (BoolExpr) of + __F -> ok; + __V -> erlang:error({assert, + [{module, ?MODULE}, + {line, ?LINE}, + {comment, (Comment)}, + {expression, (??BoolExpr)}, + {expected, false}, + case not __F of + __V -> {value, true}; + _ -> {not_boolean, __V} + end]}) + end + end)()) + end). -endif. %% This is mostly a convenience which gives more detailed reports. %% Note: Guard is a guarded pattern, and can not be used for value. -ifdef(NOASSERT). -define(assertMatch(Guard, Expr), ok). +-define(assertMatch(Guard, Expr, Comment), ok). -else. -define(assertMatch(Guard, Expr), begin @@ -124,11 +159,27 @@ end end)()) end). +-define(assertMatch(Guard, Expr, Comment), + begin + ((fun () -> + case (Expr) of + Guard -> ok; + __V -> erlang:error({assertMatch, + [{module, ?MODULE}, + {line, ?LINE}, + {comment, (Comment)}, + {expression, (??Expr)}, + {pattern, (??Guard)}, + {value, __V}]}) + end + end)()) + end). -endif. %% This is the inverse case of assertMatch, for convenience. -ifdef(NOASSERT). -define(assertNotMatch(Guard, Expr), ok). +-define(assertNotMatch(Guard, Expr, Comment), ok). -else. -define(assertNotMatch(Guard, Expr), begin @@ -145,12 +196,29 @@ end end)()) end). +-define(assertNotMatch(Guard, Expr, Comment), + begin + ((fun () -> + __V = (Expr), + case __V of + Guard -> erlang:error({assertNotMatch, + [{module, ?MODULE}, + {line, ?LINE}, + {comment, (Comment)}, + {expression, (??Expr)}, + {pattern, (??Guard)}, + {value, __V}]}); + _ -> ok + end + end)()) + end). -endif. %% This is a convenience macro which gives more detailed reports when %% the expected LHS value is not a pattern, but a computed value -ifdef(NOASSERT). -define(assertEqual(Expect, Expr), ok). +-define(assertEqual(Expect, Expr, Comment), ok). -else. -define(assertEqual(Expect, Expr), begin @@ -167,11 +235,28 @@ end end)()) end). +-define(assertEqual(Expect, Expr, Comment), + begin + ((fun () -> + __X = (Expect), + case (Expr) of + __X -> ok; + __V -> erlang:error({assertEqual, + [{module, ?MODULE}, + {line, ?LINE}, + {comment, (Comment)}, + {expression, (??Expr)}, + {expected, __X}, + {value, __V}]}) + end + end)()) + end). -endif. %% This is the inverse case of assertEqual, for convenience. -ifdef(NOASSERT). -define(assertNotEqual(Unexpected, Expr), ok). +-define(assertNotEqual(Unexpected, Expr, Comment), ok). -else. -define(assertNotEqual(Unexpected, Expr), begin @@ -187,12 +272,28 @@ end end)()) end). +-define(assertNotEqual(Unexpected, Expr, Comment), + begin + ((fun () -> + __X = (Unexpected), + case (Expr) of + __X -> erlang:error({assertNotEqual, + [{module, ?MODULE}, + {line, ?LINE}, + {comment, (Comment)}, + {expression, (??Expr)}, + {value, __X}]}); + _ -> ok + end + end)()) + end). -endif. %% Note: Class and Term are patterns, and can not be used for value. %% Term can be a guarded pattern, but Class cannot. -ifdef(NOASSERT). -define(assertException(Class, Term, Expr), ok). +-define(assertException(Class, Term, Expr, Comment), ok). -else. -define(assertException(Class, Term, Expr), begin @@ -222,17 +323,54 @@ end end)()) end). +-define(assertException(Class, Term, Expr, Comment), + begin + ((fun () -> + try (Expr) of + __V -> erlang:error({assertException, + [{module, ?MODULE}, + {line, ?LINE}, + {comment, (Comment)}, + {expression, (??Expr)}, + {pattern, + "{ "++(??Class)++" , "++(??Term) + ++" , [...] }"}, + {unexpected_success, __V}]}) + catch + Class:Term -> ok; + __C:__T -> + erlang:error({assertException, + [{module, ?MODULE}, + {line, ?LINE}, + {comment, (Comment)}, + {expression, (??Expr)}, + {pattern, + "{ "++(??Class)++" , "++(??Term) + ++" , [...] }"}, + {unexpected_exception, + {__C, __T, + erlang:get_stacktrace()}}]}) + end + end)()) + end). -endif. -define(assertError(Term, Expr), ?assertException(error, Term, Expr)). +-define(assertError(Term, Expr, Comment), + ?assertException(error, Term, Expr, Comment)). -define(assertExit(Term, Expr), ?assertException(exit, Term, Expr)). +-define(assertExit(Term, Expr, Comment), + ?assertException(exit, Term, Expr, Comment)). -define(assertThrow(Term, Expr), ?assertException(throw, Term, Expr)). +-define(assertThrow(Term, Expr, Comment), + ?assertException(throw, Term, Expr, Comment)). %% This is the inverse case of assertException, for convenience. %% Note: Class and Term are patterns, and can not be used for value. %% Both Class and Term can be guarded patterns. -ifdef(NOASSERT). -define(assertNotException(Class, Term, Expr), ok). +-define(assertNotException(Class, Term, Expr, Comment), ok). -else. -define(assertNotException(Class, Term, Expr), begin @@ -263,6 +401,36 @@ end end)()) end). +-define(assertNotException(Class, Term, Expr, Comment), + begin + ((fun () -> + try (Expr) of + _ -> ok + catch + __C:__T -> + case __C of + Class -> + case __T of + Term -> + erlang:error({assertNotException, + [{module, ?MODULE}, + {line, ?LINE}, + {comment, (Comment)}, + {expression, (??Expr)}, + {pattern, + "{ "++(??Class)++" , " + ++(??Term)++" , [...] }"}, + {unexpected_exception, + {__C, __T, + erlang:get_stacktrace() + }}]}); + _ -> ok + end; + _ -> ok + end + end + end)()) + end). -endif. -endif. % ASSERT_HRL diff --git a/lib/stdlib/src/Makefile b/lib/stdlib/src/Makefile index 302834f9d0..d6c0ff8d8d 100644 --- a/lib/stdlib/src/Makefile +++ b/lib/stdlib/src/Makefile @@ -51,7 +51,6 @@ MODULES= \ dets_server \ dets_sup \ dets_utils \ - dets_v8 \ dets_v9 \ dict \ digraph \ @@ -225,7 +224,6 @@ $(EBIN)/beam_lib.beam: ../include/erl_compile.hrl ../../kernel/include/file.hrl $(EBIN)/dets.beam: dets.hrl ../../kernel/include/file.hrl $(EBIN)/dets_server.beam: dets.hrl $(EBIN)/dets_utils.beam: dets.hrl -$(EBIN)/dets_v8.beam: dets.hrl $(EBIN)/dets_v9.beam: dets.hrl $(EBIN)/erl_bits.beam: ../include/erl_bits.hrl $(EBIN)/erl_compile.beam: ../include/erl_compile.hrl ../../kernel/include/file.hrl diff --git a/lib/stdlib/src/array.erl b/lib/stdlib/src/array.erl index d5757dda5b..079b761463 100644 --- a/lib/stdlib/src/array.erl +++ b/lib/stdlib/src/array.erl @@ -1,8 +1,3 @@ -%% -%% %CopyrightBegin% -%% -%% Copyright Ericsson AB 2007-2016. All Rights Reserved. -%% %% Licensed under the Apache License, Version 2.0 (the "License"); %% you may not use this file except in compliance with the License. %% You may obtain a copy of the License at @@ -14,13 +9,12 @@ %% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %% See the License for the specific language governing permissions and %% limitations under the License. -%% -%% %CopyrightEnd% %% -%% @author Richard Carlsson <[email protected]> +%% Copyright (C) 2006-2016 Richard Carlsson and Ericsson AB +%% +%% @author Richard Carlsson <[email protected]> %% @author Dan Gudmundsson <[email protected]> -%% @version 1.0 - +%% %% @doc Functional, extendible arrays. Arrays can have fixed size, or %% can grow automatically as needed. A default value is used for entries %% that have not been explicitly set. diff --git a/lib/stdlib/src/c.erl b/lib/stdlib/src/c.erl index ad4915eabe..d36630214c 100644 --- a/lib/stdlib/src/c.erl +++ b/lib/stdlib/src/c.erl @@ -26,7 +26,7 @@ -export([help/0,lc/1,c/1,c/2,nc/1,nc/2, nl/1,l/1,i/0,i/1,ni/0, y/1, y/2, lc_batch/0, lc_batch/1, - i/3,pid/3,m/0,m/1, + i/3,pid/3,m/0,m/1,mm/0,lm/0, bt/1, q/0, erlangrc/0,erlangrc/1,bi/1, flush/0, regs/0, uptime/0, nregs/0,pwd/0,ls/0,ls/1,cd/1,memory/1,memory/0, xm/1]). @@ -52,11 +52,13 @@ help() -> "ni() -- information about the networked system\n" "i(X,Y,Z) -- information about pid <X,Y,Z>\n" "l(Module) -- load or reload module\n" + "lm() -- load all modified modules\n" "lc([File]) -- compile a list of Erlang modules\n" "ls() -- list files in the current directory\n" "ls(Dir) -- list files in directory <Dir>\n" "m() -- which modules are loaded\n" "m(Mod) -- information about module <Mod>\n" + "mm() -- list all modified modules\n" "memory() -- memory allocation information\n" "memory(T) -- memory allocation information of type <T>\n" "nc(File) -- compile and load code in <File> on all nodes\n" @@ -459,6 +461,16 @@ m() -> mformat(A1, A2) -> format("~-20s ~ts\n", [A1,A2]). +-spec mm() -> [module()]. + +mm() -> + code:modified_modules(). + +-spec lm() -> [code:load_ret()]. + +lm() -> + [l(M) || M <- mm()]. + %% erlangrc(Home) %% Try to run a ".erlang" file, first in the current directory %% else in home directory. diff --git a/lib/stdlib/src/dets.erl b/lib/stdlib/src/dets.erl index 8ce29f23d3..5bc9475fc8 100644 --- a/lib/stdlib/src/dets.erl +++ b/lib/stdlib/src/dets.erl @@ -105,9 +105,6 @@ %%% the file with the split indicator, size etc is held in ram by the %%% server at all times. %%% -%%% The parts specific for formats up to and including 8(c) are -%%% implemented in dets_v8.erl, parts specific for format 9 are -%%% implemented in dets_v9.erl. %% The method of hashing is the so called linear hashing algorithm %% with segments. @@ -140,28 +137,33 @@ %%% written, and a repair is forced next time the file is opened. -record(dets_cont, { - what, % object | bindings | select | bchunk - no_objs, % requested number of objects: default | integer() > 0 - bin, % small chunk not consumed, or 'eof' at end-of-file - alloc, % the part of the file not yet scanned, mostly a binary - tab, - proc, % the pid of the Dets process - match_program % true | compiled_match_spec() | undefined + what :: 'undefined' | 'bchunk' | 'bindings' | 'object' | 'select', + no_objs :: 'default' | pos_integer(), % requested number of objects + bin :: 'eof' | binary(), % small chunk not consumed, + % or 'eof' at end-of-file + alloc :: binary() % the part of the file not yet scanned + | {From :: non_neg_integer(), + To :: non_neg_integer, + binary()}, + tab :: tab_name(), + proc :: 'undefined' | pid(), % the pid of the Dets process + match_program :: 'true' + | 'undefined' + | {'match_spec', ets:comp_match_spec()} }). -record(open_args, { - file, - type, - keypos, - repair, - min_no_slots, - max_no_slots, - ram_file, - delayed_write, - auto_save, - access, - version, - debug + file :: list(), + type :: type(), + keypos :: keypos(), + repair :: 'force' | boolean(), + min_no_slots :: no_slots(), + max_no_slots :: no_slots(), + ram_file :: boolean(), + delayed_write :: cache_parms(), + auto_save :: auto_save(), + access :: access(), + debug :: boolean() }). -define(PATTERN_TO_OBJECT_MATCH_SPEC(Pat), [{Pat,[],['$_']}]). @@ -177,20 +179,13 @@ %%-define(PROFILE(C), C). -define(PROFILE(C), void). --type access() :: 'read' | 'read_write'. --type auto_save() :: 'infinity' | non_neg_integer(). -opaque bindings_cont() :: #dets_cont{}. -opaque cont() :: #dets_cont{}. --type keypos() :: pos_integer(). -type match_spec() :: ets:match_spec(). -type object() :: tuple(). --type no_slots() :: non_neg_integer() | 'default'. -opaque object_cont() :: #dets_cont{}. -type pattern() :: atom() | tuple(). -opaque select_cont() :: #dets_cont{}. --type tab_name() :: term(). --type type() :: 'bag' | 'duplicate_bag' | 'set'. --type version() :: 8 | 9 | 'default'. %%% Some further debug code was added in R12B-1 (stdlib-1.15.1): %%% - there is a new open_file() option 'debug'; @@ -273,19 +268,20 @@ delete_all_objects(Tab) -> delete_object(Tab, O) -> badarg(treq(Tab, {delete_object, [O]}), [Tab, O]). +%% Backwards compatibility. +fsck(Fname, _Version) -> + fsck(Fname). + %% Given a filename, fsck it. Debug. fsck(Fname) -> - fsck(Fname, default). - -fsck(Fname, Version) -> catch begin {ok, Fd, FH} = read_file_header(Fname, read, false), ?DEBUGF("FileHeader: ~p~n", [FH]), - case (FH#fileheader.mod):check_file_header(FH, Fd) of + case dets_v9:check_file_header(FH, Fd) of {error, not_closed} -> - fsck(Fd, make_ref(), Fname, FH, default, default, Version); - {ok, _Head, _Extra} -> - fsck(Fd, make_ref(), Fname, FH, default, default, Version); + fsck(Fd, make_ref(), Fname, FH, default, default); + {ok, _Head} -> + fsck(Fd, make_ref(), Fname, FH, default, default); Error -> Error end @@ -372,7 +368,7 @@ info(Tab) -> Item :: 'access' | 'auto_save' | 'bchunk_format' | 'hash' | 'file_size' | 'filename' | 'keypos' | 'memory' | 'no_keys' | 'no_objects' | 'no_slots' | 'owner' | 'ram_file' - | 'safe_fixed' | 'safe_fixed_monotonic_time' | 'size' | 'type' | 'version', + | 'safe_fixed' | 'safe_fixed_monotonic_time' | 'size' | 'type', Value :: term(). info(Tab, owner) -> @@ -640,8 +636,7 @@ open_file(File) -> | {'keypos', keypos()} | {'ram_file', boolean()} | {'repair', boolean() | 'force'} - | {'type', type()} - | {'version', version()}, + | {'type', type()}, Reason :: term(). open_file(Tab, Args) when is_list(Args) -> @@ -674,13 +669,13 @@ remove_user(Pid, From) -> Continuation2 :: select_cont(), MatchSpec :: match_spec(). -repair_continuation(#dets_cont{match_program = B}=Cont, MS) - when is_binary(B) -> +repair_continuation(#dets_cont{match_program = {match_spec, B}}=Cont, MS) -> case ets:is_compiled_ms(B) of true -> Cont; false -> - Cont#dets_cont{match_program = ets:match_spec_compile(MS)} + Cont#dets_cont{match_program = {match_spec, + ets:match_spec_compile(MS)}} end; repair_continuation(#dets_cont{}=Cont, _MS) -> Cont; @@ -999,7 +994,9 @@ init_chunk_match(Tab, Pat, What, N, Safe) when is_integer(N), N >= 0; case req(Proc, {match, MP, Spec, N, Safe}) of {done, L} -> {L, #dets_cont{tab = Tab, proc = Proc, - what = What, bin = eof}}; + what = What, bin = eof, + no_objs = default, + alloc = <<>>}}; {cont, State} -> chunk_match(State#dets_cont{what = What, tab = Tab, @@ -1041,17 +1038,17 @@ chunk_match(#dets_cont{proc = Proc}=State, Safe) -> do_foldl_bins(Bins, true) -> foldl_bins(Bins, []); -do_foldl_bins(Bins, MP) -> +do_foldl_bins(Bins, {match_spec, MP}) -> foldl_bins(Bins, MP, []). foldl_bins([], Terms) -> - %% Preserve time order (version 9). + %% Preserve time order. Terms; foldl_bins([Bin | Bins], Terms) -> foldl_bins(Bins, [binary_to_term(Bin) | Terms]). foldl_bins([], _MP, Terms) -> - %% Preserve time order (version 9). + %% Preserve time order. Terms; foldl_bins([Bin | Bins], MP, Terms) -> Term = binary_to_term(Bin), @@ -1068,7 +1065,7 @@ compile_match_spec(select, ?PATTERN_TO_OBJECT_MATCH_SPEC('_') = Spec) -> compile_match_spec(select, Spec) -> case catch ets:match_spec_compile(Spec) of X when is_binary(X) -> - {Spec, X}; + {Spec, {match_spec, X}}; _ -> badarg end; @@ -1091,16 +1088,10 @@ defaults(Tab, Args) -> delayed_write = ?DEFAULT_CACHE, auto_save = timer:minutes(?DEFAULT_AUTOSAVE), access = read_write, - version = default, debug = false}, Fun = fun repl/2, Defaults = lists:foldl(Fun, Defaults0, Args), - case Defaults#open_args.version of - 8 -> - Defaults#open_args{max_no_slots = default}; - _ -> - is_comp_min_max(Defaults) - end. + is_comp_min_max(Defaults). to_list(T) when is_atom(T) -> atom_to_list(T); to_list(T) -> T. @@ -1131,7 +1122,6 @@ repl({file, File}, Defs) when is_atom(File) -> repl({keypos, P}, Defs) when is_integer(P), P > 0 -> Defs#open_args{keypos =P}; repl({max_no_slots, I}, Defs) -> - %% Version 9 only. MaxSlots = is_max_no_slots(I), Defs#open_args{max_no_slots = MaxSlots}; repl({min_no_slots, I}, Defs) -> @@ -1147,8 +1137,9 @@ repl({type, T}, Defs) -> mem(T, [set, bag, duplicate_bag]), Defs#open_args{type =T}; repl({version, Version}, Defs) -> - V = is_version(Version), - Defs#open_args{version = V}; + %% Backwards compatibility. + is_version(Version), + Defs; repl({debug, Bool}, Defs) -> %% Not documented. mem(Bool, [true, false]), @@ -1164,16 +1155,15 @@ is_max_no_slots(default) -> default; is_max_no_slots(I) when is_integer(I), I > 0, I < 1 bsl 31 -> I. is_comp_min_max(Defs) -> - #open_args{max_no_slots = Max, min_no_slots = Min, version = V} = Defs, - case V of - _ when Min =:= default -> Defs; - _ when Max =:= default -> Defs; - _ -> true = Min =< Max, Defs + #open_args{max_no_slots = Max, min_no_slots = Min} = Defs, + if + Min =:= default -> Defs; + Max =:= default -> Defs; + true -> true = Min =< Max, Defs end. -is_version(default) -> default; -is_version(8) -> 8; -is_version(9) -> 9. +is_version(default) -> true; +is_version(9) -> true. mem(X, L) -> case lists:member(X, L) of @@ -1288,17 +1278,23 @@ badarg_exit(Reply, _A) -> init(Parent, Server) -> process_flag(trap_exit, true), - open_file_loop(#head{parent = Parent, server = Server}). - -open_file_loop(Head) -> %% The Dets server pretends the file is open before %% internal_open() has been called, which means that unless the %% internal_open message is applied first, other processes can %% find the pid by calling dets_server:get_pid() and do things %% before Head has been initialized properly. receive - ?DETS_CALL(From, {internal_open, _Ref, _Args}=Op) -> - do_apply_op(Op, From, Head, 0) + ?DETS_CALL(From, {internal_open, Ref, Args}=Op) -> + try do_internal_open(Parent, Server, From, Ref, Args) of + Head -> + open_file_loop(Head, 0) + catch + exit:normal -> + exit(normal); + _:Bad -> + bug_found(no_name, Op, Bad, From), + exit(Bad) % give up + end end. open_file_loop(Head, N) when element(1, Head#head.update_mode) =:= error -> @@ -1379,28 +1375,7 @@ do_apply_op(Op, From, Head, N) -> exit:normal -> exit(normal); _:Bad -> - Name = Head#head.name, - case dets_utils:debug_mode() of - true -> - %% If stream_op/5 found more requests, this is not - %% the last operation. - error_logger:format - ("** dets: Bug was found when accessing table ~w,~n" - "** dets: operation was ~p and reply was ~w.~n" - "** dets: Stacktrace: ~w~n", - [Name, Op, Bad, erlang:get_stacktrace()]); - false -> - error_logger:format - ("** dets: Bug was found when accessing table ~w~n", - [Name]) - end, - if - From =/= self() -> - From ! {self(), {error, {dets_bug, Name, Op, Bad}}}, - ok; - true -> % auto_save | may_grow | {delayed_write, _} - ok - end, + bug_found(Head#head.name, Op, Bad, From), open_file_loop(Head, N) end. @@ -1408,10 +1383,7 @@ apply_op(Op, From, Head, N) -> case Op of {add_user, Tab, OpenArgs}-> #open_args{file = Fname, type = Type, keypos = Keypos, - ram_file = Ram, access = Access, - version = Version} = OpenArgs, - VersionOK = (Version =:= default) or - (Head#head.version =:= Version), + ram_file = Ram, access = Access} = OpenArgs, %% min_no_slots and max_no_slots are not tested Res = if Tab =:= Head#head.name, @@ -1419,7 +1391,6 @@ apply_op(Op, From, Head, N) -> Head#head.type =:= Type, Head#head.ram_file =:= Ram, Head#head.access =:= Access, - VersionOK, Fname =:= Head#head.filename -> ok; true -> @@ -1475,21 +1446,14 @@ apply_op(Op, From, Head, N) -> From ! {self(), Res}, ok; {internal_open, Ref, Args} -> - ?PROFILE(ep:do()), - case do_open_file(Args, Head#head.parent, Head#head.server,Ref) of - {ok, H2} -> - From ! {self(), ok}, - H2; - Error -> - From ! {self(), Error}, - exit(normal) - end; + do_internal_open(Head#head.parent, Head#head.server, From, + Ref, Args); may_grow when Head#head.update_mode =/= saved -> if Head#head.update_mode =:= dirty -> %% Won't grow more if the table is full. {H2, _Res} = - (Head#head.mod):may_grow(Head, 0, many_times), + dets_v9:may_grow(Head, 0, many_times), {N + 1, H2}; true -> ok @@ -1519,21 +1483,10 @@ apply_op(Op, From, Head, N) -> From ! {self(), Res}, erlang:garbage_collect(), {0, H2}; - {delete_key, Keys} when Head#head.update_mode =:= dirty -> - if - Head#head.version =:= 8 -> - {H2, Res} = fdelete_key(Head, Keys), - From ! {self(), Res}, - {N + 1, H2}; - true -> - stream_op(Op, From, [], Head, N) - end; + {delete_key, _Keys} when Head#head.update_mode =:= dirty -> + stream_op(Op, From, [], Head, N); {delete_object, Objs} when Head#head.update_mode =:= dirty -> case check_objects(Objs, Head#head.keypos) of - true when Head#head.version =:= 8 -> - {H2, Res} = fdelete_object(Head, Objs), - From ! {self(), Res}, - {N + 1, H2}; true -> stream_op(Op, From, [], Head, N); false -> @@ -1551,10 +1504,6 @@ apply_op(Op, From, Head, N) -> H2; {insert, Objs} when Head#head.update_mode =:= dirty -> case check_objects(Objs, Head#head.keypos) of - true when Head#head.version =:= 8 -> - {H2, Res} = finsert(Head, Objs), - From ! {self(), Res}, - {N + 1, H2}; true -> stream_op(Op, From, [], Head, N); false -> @@ -1565,10 +1514,6 @@ apply_op(Op, From, Head, N) -> {H2, Res} = finsert_new(Head, Objs), From ! {self(), Res}, {N + 1, H2}; - {lookup_keys, Keys} when Head#head.version =:= 8 -> - {H2, Res} = flookup_keys(Head, Keys), - From ! {self(), Res}, - H2; {lookup_keys, _Keys} -> stream_op(Op, From, [], Head, N); {match_init, State, Safe} -> @@ -1584,10 +1529,6 @@ apply_op(Op, From, Head, N) -> {H2, Res} = fmatch(Head, MP, Spec, NObjs, Safe, From), From ! {self(), Res}, H2; - {member, Key} when Head#head.version =:= 8 -> - {H2, Res} = fmember(Head, Key), - From ! {self(), Res}, - H2; {member, _Key} = Op -> stream_op(Op, From, [], Head, N); {next, Key} -> @@ -1628,7 +1569,7 @@ apply_op(Op, From, Head, N) -> apply_op(WriteOp, From, H2, 0); WriteOp when Head#head.access =:= read_write, Head#head.update_mode =:= saved -> - case catch (Head#head.mod):mark_dirty(Head) of + case catch dets_v9:mark_dirty(Head) of ok -> start_auto_save_timer(Head), H2 = Head#head{update_mode = dirty}, @@ -1643,6 +1584,40 @@ apply_op(Op, From, Head, N) -> ok end. +bug_found(Name, Op, Bad, From) -> + case dets_utils:debug_mode() of + true -> + %% If stream_op/5 found more requests, this is not + %% the last operation. + error_logger:format + ("** dets: Bug was found when accessing table ~w,~n" + "** dets: operation was ~p and reply was ~w.~n" + "** dets: Stacktrace: ~w~n", + [Name, Op, Bad, erlang:get_stacktrace()]); + false -> + error_logger:format + ("** dets: Bug was found when accessing table ~w~n", + [Name]) + end, + if + From =/= self() -> + From ! {self(), {error, {dets_bug, Name, Op, Bad}}}, + ok; + true -> % auto_save | may_grow | {delayed_write, _} + ok + end. + +do_internal_open(Parent, Server, From, Ref, Args) -> + ?PROFILE(ep:do()), + case do_open_file(Args, Parent, Server, Ref) of + {ok, Head} -> + From ! {self(), ok}, + Head; + Error -> + From ! {self(), Error}, + exit(normal) + end. + start_auto_save_timer(Head) when Head#head.auto_save =:= infinity -> ok; start_auto_save_timer(Head) -> @@ -1650,7 +1625,7 @@ start_auto_save_timer(Head) -> _Ref = erlang:send_after(Millis, self(), ?DETS_CALL(self(), auto_save)), ok. -%% Version 9: Peek the message queue and try to evaluate several +%% Peek the message queue and try to evaluate several %% lookup requests in parallel. Evalute delete_object, delete and %% insert as well. stream_op(Op, Pid, Pids, Head, N) -> @@ -1760,7 +1735,7 @@ lookup_reply(P, O) -> %% Callback functions for system messages handling. %%----------------------------------------------------------------- system_continue(_Parent, _, Head) -> - open_file_loop(Head). + open_file_loop(Head, 0). system_terminate(Reason, _Parent, _, Head) -> _NewHead = do_stop(Head), @@ -1793,7 +1768,8 @@ read_file_header(FileName, Access, RamFile) -> dets_utils:pread_close(Fd, FileName, ?FILE_FORMAT_VERSION_POS, 4), if Version =< 8 -> - dets_v8:read_file_header(Fd, FileName); + _ = file:close(Fd), + throw({error, {format_8_no_longer_supported, FileName}}); Version =:= 9 -> dets_v9:read_file_header(Fd, FileName); true -> @@ -1820,7 +1796,7 @@ perform_save(Head, DoSync) when Head#head.update_mode =:= dirty; Head#head.update_mode =:= new_dirty -> case catch begin {Head1, []} = write_cache(Head), - {Head2, ok} = (Head1#head.mod):do_perform_save(Head1), + {Head2, ok} = dets_v9:do_perform_save(Head1), ok = ensure_written(Head2, DoSync), {Head2#head{update_mode = saved}, ok} end of @@ -1853,7 +1829,7 @@ ensure_written(Head, false) when not Head#head.ram_file -> do_bchunk_init(Head, Tab) -> case catch write_cache(Head) of {H2, []} -> - case (H2#head.mod):table_parameters(H2) of + case dets_v9:table_parameters(H2) of undefined -> {H2, {error, old_version}}; Parms -> @@ -1862,9 +1838,9 @@ do_bchunk_init(Head, Tab) -> L =:= <<>> -> eof; true -> <<>> end, - C0 = #dets_cont{no_objs = default, bin = Bin, alloc = L}, BinParms = term_to_binary(Parms), - {H2, {C0#dets_cont{tab = Tab, proc = self(),what = bchunk}, + {H2, {#dets_cont{no_objs = default, bin = Bin, alloc = L, + tab = Tab, proc = self(),what = bchunk}, [BinParms]}} end; {NewHead, _} = HeadError when is_record(NewHead, head) -> @@ -1904,16 +1880,8 @@ do_delete_all_objects(Head) -> max_no_slots = MaxSlots, cache = Cache} = Head, CacheSz = dets_utils:cache_size(Cache), ok = dets_utils:truncate(Fd, Fname, bof), - (Head#head.mod):initiate_file(Fd, Tab, Fname, Type, Kp, MinSlots, MaxSlots, - Ram, CacheSz, Auto, true). - -%% -> {NewHead, Reply}, Reply = ok | Error. -fdelete_key(Head, Keys) -> - do_delete(Head, Keys, delete_key). - -%% -> {NewHead, Reply}, Reply = ok | badarg | Error. -fdelete_object(Head, Objects) -> - do_delete(Head, Objects, delete_object). + dets_v9:initiate_file(Fd, Tab, Fname, Type, Kp, MinSlots, MaxSlots, + Ram, CacheSz, Auto, true). ffirst(H) -> Ref = make_ref(), @@ -1930,7 +1898,7 @@ ffirst1(H) -> ffirst(NH, 0). ffirst(H, Slot) -> - case (H#head.mod):slot_objs(H, Slot) of + case dets_v9:slot_objs(H, Slot) of '$end_of_table' -> {H, '$end_of_table'}; [] -> ffirst(H, Slot+1); [X|_] -> {H, element(H#head.keypos, X)} @@ -2067,7 +2035,7 @@ finfo(H, auto_save) -> {H, H#head.auto_save}; finfo(H, bchunk_format) -> case catch write_cache(H) of {H2, []} -> - case (H2#head.mod):table_parameters(H2) of + case dets_v9:table_parameters(H2) of undefined = Undef -> {H2, Undef}; Parms -> @@ -2100,7 +2068,7 @@ finfo(H, no_keys) -> {H2, _} = HeadError when is_record(H2, head) -> HeadError end; -finfo(H, no_slots) -> {H, (H#head.mod):no_slots(H)}; +finfo(H, no_slots) -> {H, dets_v9:no_slots(H)}; finfo(H, pid) -> {H, self()}; finfo(H, ram_file) -> {H, H#head.ram_file}; finfo(H, safe_fixed) -> @@ -2127,7 +2095,7 @@ finfo(H, size) -> HeadError end; finfo(H, type) -> {H, H#head.type}; -finfo(H, version) -> {H, H#head.version}; +finfo(H, version) -> {H, 9}; finfo(H, _) -> {H, undefined}. file_size(Fd, FileName) -> @@ -2136,8 +2104,6 @@ file_size(Fd, FileName) -> test_bchunk_format(_Head, undefined) -> false; -test_bchunk_format(Head, _Term) when Head#head.version =:= 8 -> - false; test_bchunk_format(Head, Term) -> dets_v9:try_bchunk_header(Term, Head) =/= not_ok. @@ -2206,7 +2172,7 @@ do_finit(Head, Init, Format, NoSlots) -> #head{fptr = Fd, type = Type, keypos = Kp, auto_save = Auto, cache = Cache, filename = Fname, ram_file = Ram, min_no_slots = MinSlots0, max_no_slots = MaxSlots, - name = Tab, update_mode = UpdateMode, mod = HMod} = Head, + name = Tab, update_mode = UpdateMode} = Head, CacheSz = dets_utils:cache_size(Cache), {How, Head1} = case Format of @@ -2219,9 +2185,10 @@ do_finit(Head, Init, Format, NoSlots) -> {general_init, Head}; true -> ok = dets_utils:truncate(Fd, Fname, bof), - {ok, H} = HMod:initiate_file(Fd, Tab, Fname, Type, Kp, - MinSlots, MaxSlots, Ram, - CacheSz, Auto, false), + {ok, H} = + dets_v9:initiate_file(Fd, Tab, Fname, Type, Kp, + MinSlots, MaxSlots, Ram, + CacheSz, Auto, false), {general_init, H} end; bchunk -> @@ -2230,7 +2197,7 @@ do_finit(Head, Init, Format, NoSlots) -> end, case How of bchunk_init -> - case HMod:bchunk_init(Head1, Init) of + case dets_v9:bchunk_init(Head1, Init) of {ok, NewHead} -> {ok, NewHead#head{update_mode = dirty}}; Error -> @@ -2238,10 +2205,10 @@ do_finit(Head, Init, Format, NoSlots) -> end; general_init -> Cntrs = ets:new(dets_init, []), - Input = HMod:bulk_input(Head1, Init, Cntrs), + Input = dets_v9:bulk_input(Head1, Init, Cntrs), SlotNumbers = {Head1#head.min_no_slots, bulk_init, MaxSlots}, {Reply, SizeData} = - do_sort(Head1, SlotNumbers, Input, Cntrs, Fname, not_used), + do_sort(Head1, SlotNumbers, Input, Cntrs, Fname), Bulk = true, case Reply of {ok, NoDups, H1} -> @@ -2297,7 +2264,8 @@ fmatch(Head, MP, Spec, N, Safe, From) -> {NewHead, Reply} = flookup_keys(Head, Keys), case Reply of Objs when is_list(Objs) -> - MatchingObjs = ets:match_spec_run(Objs, MP), + {match_spec, MS} = MP, + MatchingObjs = ets:match_spec_run(Objs, MS), {NewHead, {done, MatchingObjs}}; Error -> {NewHead, Error} @@ -2377,7 +2345,7 @@ fmatch_delete(Head, C) -> {[], _} -> {Head, {done, 0}}; {RTs, NC} -> - MP = C#dets_cont.match_program, + {match_spec, MP} = C#dets_cont.match_program, case catch filter_binary_terms(RTs, MP, []) of {'EXIT', _} -> Bad = dets_utils:bad_object(fmatch_delete, RTs), @@ -2405,7 +2373,7 @@ do_fmatch_delete_var_keys(Head, MP, _Spec, From) -> C0 = init_scan(NewHead, default), {NewHead, {cont, C0#dets_cont{match_program = MP}, 0}}. -do_fmatch_constant_keys(Head, Keys, MP) -> +do_fmatch_constant_keys(Head, Keys, {match_spec, MP}) -> case flookup_keys(Head, Keys) of {NewHead, ReadTerms} when is_list(ReadTerms) -> Terms = filter_terms(ReadTerms, MP, []), @@ -2454,18 +2422,8 @@ do_delete(Head, Things, What) -> HeadError end. -fmember(Head, Key) -> - case catch begin - {Head2, [{_NoPid,Objs}]} = - update_cache(Head, [Key], {lookup, nopid}), - {Head2, Objs =/= []} - end of - {NewHead, _} = Reply when is_record(NewHead, head) -> - Reply - end. - fnext(Head, Key) -> - Slot = (Head#head.mod):db_hash(Key, Head), + Slot = dets_v9:db_hash(Key, Head), Ref = make_ref(), case catch {Ref, fnext(Head, Key, Slot)} of {Ref, {H, R}} -> @@ -2476,7 +2434,7 @@ fnext(Head, Key) -> fnext(H, Key, Slot) -> {NH, []} = write_cache(H), - case (H#head.mod):slot_objs(NH, Slot) of + case dets_v9:slot_objs(NH, Slot) of '$end_of_table' -> {NH, '$end_of_table'}; L -> fnext_search(NH, Key, Slot, L) end. @@ -2490,7 +2448,7 @@ fnext_search(H, K, Slot, L) -> %% We've got to continue to search for the next key in the next slot fnext_slot(H, K, Slot) -> - case (H#head.mod):slot_objs(H, Slot) of + case dets_v9:slot_objs(H, Slot) of '$end_of_table' -> {H, '$end_of_table'}; [] -> fnext_slot(H, K, Slot+1); L -> {H, element(H#head.keypos, hd(L))} @@ -2518,11 +2476,10 @@ fopen2(Fname, Tab) -> Acc = read_write, Ram = false, {ok, Fd, FH} = read_file_header(Fname, Acc, Ram), - Mod = FH#fileheader.mod, - Do = case Mod:check_file_header(FH, Fd) of - {ok, Head1, ExtraInfo} -> + Do = case dets_v9:check_file_header(FH, Fd) of + {ok, Head1} -> Head2 = Head1#head{filename = Fname}, - try {ok, Mod:init_freelist(Head2, ExtraInfo)} + try {ok, dets_v9:init_freelist(Head2)} catch throw:_ -> {repair, " has bad free lists, repairing ..."} @@ -2536,8 +2493,7 @@ fopen2(Fname, Tab) -> case Do of {repair, Mess} -> io:format(user, "dets: file ~tp~s~n", [Fname, Mess]), - Version = default, - case fsck(Fd, Tab, Fname, FH, default, default, Version) of + case fsck(Fd, Tab, Fname, FH, default, default) of ok -> fopen2(Fname, Tab); Error -> @@ -2570,33 +2526,23 @@ fopen_existing_file(Tab, OpenArgs) -> #open_args{file = Fname, type = Type, keypos = Kp, repair = Rep, min_no_slots = MinSlots, max_no_slots = MaxSlots, ram_file = Ram, delayed_write = CacheSz, auto_save = - Auto, access = Acc, version = Version, debug = Debug} = + Auto, access = Acc, debug = Debug} = OpenArgs, {ok, Fd, FH} = read_file_header(Fname, Acc, Ram), - V9 = (Version =:= 9) or (Version =:= default), MinF = (MinSlots =:= default) or (MinSlots =:= FH#fileheader.min_no_slots), MaxF = (MaxSlots =:= default) or (MaxSlots =:= FH#fileheader.max_no_slots), - Mod = (FH#fileheader.mod), - Wh = case Mod:check_file_header(FH, Fd) of - {ok, Head, true} when Rep =:= force, Acc =:= read_write, - FH#fileheader.version =:= 9, - FH#fileheader.no_colls =/= undefined, - MinF, MaxF, V9 -> - {compact, Head, true}; - {ok, _Head, _Extra} when Rep =:= force, Acc =:= read -> + Wh = case dets_v9:check_file_header(FH, Fd) of + {ok, Head} when Rep =:= force, Acc =:= read_write, + FH#fileheader.no_colls =/= undefined, + MinF, MaxF -> + {compact, Head}; + {ok, _Head} when Rep =:= force, Acc =:= read -> throw({error, {access_mode, Fname}}); - {ok, Head, need_compacting} when Acc =:= read -> - {final, Head, true}; % Version 8 only. - {ok, _Head, need_compacting} when Rep =:= true -> - %% The file needs to be compacted due to a very big - %% and fragmented free_list. Version 8 only. - M = " is now compacted ...", - {repair, M}; - {ok, _Head, _Extra} when Rep =:= force -> + {ok, _Head} when Rep =:= force -> M = ", repair forced.", {repair, M}; - {ok, Head, ExtraInfo} -> - {final, Head, ExtraInfo}; + {ok, Head} -> + {final, Head}; {error, not_closed} when Rep =:= force, Acc =:= read_write -> M = ", repair forced.", {repair, M}; @@ -2605,17 +2551,13 @@ fopen_existing_file(Tab, OpenArgs) -> {repair, M}; {error, not_closed} when Rep =:= false -> throw({error, {needs_repair, Fname}}); - {error, version_bump} when Rep =:= true, Acc =:= read_write -> - %% Version 8 only - M = " old version, upgrading ...", - {repair, M}; {error, Reason} -> throw({error, {Reason, Fname}}) end, Do = case Wh of - {Tag, Hd, Extra} when Tag =:= final; Tag =:= compact -> + {Tag, Hd} when Tag =:= final; Tag =:= compact -> Hd1 = Hd#head{filename = Fname}, - try {Tag, Mod:init_freelist(Hd1, Extra)} + try {Tag, dets_v9:init_freelist(Hd1)} catch throw:_ -> {repair, " has bad free lists, repairing ..."} @@ -2643,23 +2585,20 @@ fopen_existing_file(Tab, OpenArgs) -> "now repairing ...~n", [Fname]), {ok, Fd2, _FH} = read_file_header(Fname, Acc, Ram), do_repair(Fd2, Tab, Fname, FH, MinSlots, MaxSlots, - Version, OpenArgs) + OpenArgs) end; {repair, Mess} -> io:format(user, "dets: file ~tp~s~n", [Fname, Mess]), do_repair(Fd, Tab, Fname, FH, MinSlots, MaxSlots, - Version, OpenArgs); - _ when FH#fileheader.version =/= Version, Version =/= default -> - throw({error, {version_mismatch, Fname}}); + OpenArgs); {final, H} -> H1 = H#head{auto_save = Auto}, open_final(H1, Fname, Acc, Ram, CacheSz, Tab, Debug) end. -do_repair(Fd, Tab, Fname, FH, MinSlots, MaxSlots, Version, OpenArgs) -> - case fsck(Fd, Tab, Fname, FH, MinSlots, MaxSlots, Version) of +do_repair(Fd, Tab, Fname, FH, MinSlots, MaxSlots, OpenArgs) -> + case fsck(Fd, Tab, Fname, FH, MinSlots, MaxSlots) of ok -> - %% No need to update 'version'. erlang:garbage_collect(), fopen3(Tab, OpenArgs#open_args{repair = false}); Error -> @@ -2673,8 +2612,8 @@ open_final(Head, Fname, Acc, Ram, CacheSz, Tab, Debug) -> filename = Fname, name = Tab, cache = dets_utils:new_cache(CacheSz)}, - init_disk_map(Head1#head.version, Tab, Debug), - (Head1#head.mod):cache_segps(Head1#head.fptr, Fname, Head1#head.next), + init_disk_map(Tab, Debug), + dets_v9:cache_segps(Head1#head.fptr, Fname, Head1#head.next), check_growth(Head1), {ok, Head1}. @@ -2683,7 +2622,7 @@ fopen_init_file(Tab, OpenArgs) -> #open_args{file = Fname, type = Type, keypos = Kp, min_no_slots = MinSlotsArg, max_no_slots = MaxSlotsArg, ram_file = Ram, delayed_write = CacheSz, auto_save = Auto, - version = UseVersion, debug = Debug} = OpenArgs, + debug = Debug} = OpenArgs, MinSlots = choose_no_slots(MinSlotsArg, ?DEFAULT_MIN_NO_SLOTS), MaxSlots = choose_no_slots(MaxSlotsArg, ?DEFAULT_MAX_NO_SLOTS), FileSpec = if @@ -2691,20 +2630,11 @@ fopen_init_file(Tab, OpenArgs) -> true -> Fname end, {ok, Fd} = dets_utils:open(FileSpec, open_args(read_write, Ram)), - Version = if - UseVersion =:= default -> - case os:getenv("DETS_USE_FILE_FORMAT") of - "8" -> 8; - _ -> 9 - end; - true -> - UseVersion - end, - Mod = version2module(Version), %% No need to truncate an empty file. - init_disk_map(Version, Tab, Debug), - case catch Mod:initiate_file(Fd, Tab, Fname, Type, Kp, MinSlots, MaxSlots, - Ram, CacheSz, Auto, true) of + init_disk_map(Tab, Debug), + case catch dets_v9:initiate_file(Fd, Tab, Fname, Type, Kp, + MinSlots, MaxSlots, + Ram, CacheSz, Auto, true) of {error, Reason} when Ram -> _ = file:close(Fd), throw({error, Reason}); @@ -2719,15 +2649,13 @@ fopen_init_file(Tab, OpenArgs) -> end. %% Debug. -init_disk_map(9, Name, Debug) -> +init_disk_map(Name, Debug) -> case Debug orelse dets_utils:debug_mode() of true -> dets_utils:init_disk_map(Name); false -> ok - end; -init_disk_map(_Version, _Name, _Debug) -> - ok. + end. open_args(Access, RamFile) -> A1 = case Access of @@ -2740,15 +2668,7 @@ open_args(Access, RamFile) -> end, A1 ++ A2 ++ [binary, read]. -version2module(V) when V =< 8 -> dets_v8; -version2module(9) -> dets_v9. - -module2version(dets_v8) -> 8; -module2version(dets_v9) -> 9; -module2version(not_used) -> 9. - %% -> ok | throw(Error) -%% For version 9 tables only. compact(SourceHead) -> #head{name = Tab, filename = Fname, fptr = SFd, type = Type, keypos = Kp, ram_file = Ram, auto_save = Auto} = SourceHead, @@ -2759,7 +2679,7 @@ compact(SourceHead) -> %% It is normally not possible to have two open tables in the same %% process since the process dictionary is used for caching %% segment pointers, but here is works anyway--when reading a file - %% serially the pointers to not need to be used. + %% serially the pointers do not need to be used. Head = case catch dets_v9:prep_table_copy(Fd, Tab, Tmp, Type, Kp, Ram, CacheSz, Auto, TblParms) of {ok, H} -> @@ -2794,7 +2714,7 @@ compact(SourceHead) -> %% -> ok | Error %% Closes Fd. -fsck(Fd, Tab, Fname, FH, MinSlotsArg, MaxSlotsArg, Version) -> +fsck(Fd, Tab, Fname, FH, MinSlotsArg, MaxSlotsArg) -> %% MinSlots and MaxSlots are the option values. #fileheader{min_no_slots = MinSlotsFile, max_no_slots = MaxSlotsFile} = FH, @@ -2807,10 +2727,10 @@ fsck(Fd, Tab, Fname, FH, MinSlotsArg, MaxSlotsArg, Version) -> %% If the number of objects (keys) turns out to be significantly %% different from NoSlots, we try again with the correct number of %% objects (keys). - case fsck_try(Fd, Tab, FH, Fname, SlotNumbers, Version) of + case fsck_try(Fd, Tab, FH, Fname, SlotNumbers) of {try_again, BetterNoSlots} -> BetterSlotNumbers = {MinSlots, BetterNoSlots, MaxSlots}, - case fsck_try(Fd, Tab, FH, Fname, BetterSlotNumbers, Version) of + case fsck_try(Fd, Tab, FH, Fname, BetterSlotNumbers) of {try_again, _} -> _ = file:close(Fd), {error, {cannot_repair, Fname}}; @@ -2829,7 +2749,7 @@ choose_no_slots(NoSlots, _) -> NoSlots. %% Initiating a table using a fun and repairing (or converting) a %% file are completely different things, but nevertheless the same %% method is used in both cases... -fsck_try(Fd, Tab, FH, Fname, SlotNumbers, Version) -> +fsck_try(Fd, Tab, FH, Fname, SlotNumbers) -> Tmp = tempfile(Fname), #fileheader{type = Type, keypos = KeyPos} = FH, {_MinSlots, EstNoSlots, MaxSlots} = SlotNumbers, @@ -2838,7 +2758,7 @@ fsck_try(Fd, Tab, FH, Fname, SlotNumbers, Version) -> max_no_slots = MaxSlots, ram_file = false, delayed_write = ?DEFAULT_CACHE, auto_save = infinity, access = read_write, - version = Version, debug = false}, + debug = false}, case catch fopen3(Tab, OpenArgs) of {ok, Head} -> case fsck_try_est(Head, Fd, Fname, SlotNumbers, FH) of @@ -2888,10 +2808,9 @@ assure_no_file(File) -> %% -> {ok, NewHead} | {try_again, integer()} | Error fsck_try_est(Head, Fd, Fname, SlotNumbers, FH) -> %% Mod is the module to use for reading input when repairing. - Mod = FH#fileheader.mod, Cntrs = ets:new(dets_repair, []), - Input = Mod:fsck_input(Head, Fd, Cntrs, FH), - {Reply, SizeData} = do_sort(Head, SlotNumbers, Input, Cntrs, Fname, Mod), + Input = dets_v9:fsck_input(Head, Fd, Cntrs, FH), + {Reply, SizeData} = do_sort(Head, SlotNumbers, Input, Cntrs, Fname), Bulk = false, case Reply of {ok, NoDups, H1} -> @@ -2906,14 +2825,13 @@ fsck_try_est(Head, Fd, Fname, SlotNumbers, FH) -> Else end. -do_sort(Head, SlotNumbers, Input, Cntrs, Fname, Mod) -> - OldV = module2version(Mod), +do_sort(Head, SlotNumbers, Input, Cntrs, Fname) -> %% output_objs/4 replaces {LogSize,NoObjects} in Cntrs by %% {LogSize,Position,Data,NoObjects | NoCollections}. %% Data = {FileName,FileDescriptor} | [object()] - %% For small tables Data may be a list of objects which is more + %% For small tables Data can be a list of objects which is more %% efficient since no temporary files are created. - Output = (Head#head.mod):output_objs(OldV, Head, SlotNumbers, Cntrs), + Output = dets_v9:output_objs(Head, SlotNumbers, Cntrs), TmpDir = filename:dirname(Fname), Reply = (catch file_sorter:sort(Input, Output, [{format, binary},{tmpdir, TmpDir}])), @@ -2954,13 +2872,6 @@ fsck_copy1([SzData | L], Head, Bulk, NoDups) -> {ok, Copied} when Copied =:= ExpectedSize; NoObjects =:= 0 -> % the segments fsck_copy1(L, Head, Bulk, NoDups); - {ok, Copied} when Bulk, Head#head.version =:= 8 -> - NoZeros = ExpectedSize - Copied, - Dups = NoZeros div Size, - Addr = Pos+Copied, - NewHead = free_n_objects(Head, Addr, Size-1, NoDups), - NewNoDups = NoDups - Dups, - fsck_copy1(L, NewHead, Bulk, NewNoDups); {ok, _Copied} -> % should never happen close_files(Bulk, L, Head), Reason = if Bulk -> initialization_failed; @@ -2975,13 +2886,6 @@ fsck_copy1([], Head, _Bulk, NoDups) when NoDups =/= 0 -> fsck_copy1([], Head, _Bulk, _NoDups) -> {ok, Head#head{update_mode = dirty}}. -free_n_objects(Head, _Addr, _Size, 0) -> - Head; -free_n_objects(Head, Addr, Size, N) -> - {NewHead, _} = dets_utils:free(Head, Addr, Size), - NewAddr = Addr + Size + 1, - free_n_objects(NewHead, NewAddr, Size, N-1). - close_files(false, SizeData, Head) -> _ = file:close(Head#head.fptr), close_files(true, SizeData, Head); @@ -3000,7 +2904,7 @@ close_tmp(Fd) -> fslot(H, Slot) -> case catch begin {NH, []} = write_cache(H), - Objs = (NH#head.mod):slot_objs(NH, Slot), + Objs = dets_v9:slot_objs(NH, Slot), {NH, Objs} end of {NewHead, _Objects} = Reply when is_record(NewHead, head) -> @@ -3050,7 +2954,7 @@ where_is_object(Head, Object) -> true -> case catch write_cache(Head) of {NewHead, []} -> - {NewHead, (Head#head.mod):find_object(NewHead, Object)}; + {NewHead, dets_v9:find_object(NewHead, Object)}; {NewHead, _} = HeadError when is_record(NewHead, head) -> HeadError end; @@ -3063,13 +2967,9 @@ check_objects([T | Ts], Kp) when tuple_size(T) >= Kp -> check_objects(L, _Kp) -> L =:= []. -no_things(Head) when Head#head.no_keys =:= undefined -> - Head#head.no_objects; no_things(Head) -> Head#head.no_keys. -file_no_things(FH) when FH#fileheader.no_keys =:= undefined -> - FH#fileheader.no_objects; file_no_things(FH) -> FH#fileheader.no_keys. @@ -3110,7 +3010,7 @@ update_cache(Head, ToAdd) -> if Lookup; NewSize >= Cache#cache.tsize -> %% The cache is considered full, or some lookup. - {NewHead, LU, PwriteList} = (Head#head.mod):write_cache(Head1), + {NewHead, LU, PwriteList} = dets_v9:write_cache(Head1), {NewHead, Found ++ LU, PwriteList}; NewC =:= [] -> {Head1, Found, []}; @@ -3195,7 +3095,7 @@ delayed_write(Head, WrTime) -> %% -> {NewHead, [LookedUpObject]} | throw({NewHead, Error}) write_cache(Head) -> - {Head1, LU, PwriteList} = (Head#head.mod):write_cache(Head), + {Head1, LU, PwriteList} = dets_v9:write_cache(Head), {NewHead, ok} = dets_utils:pwrite(Head1, PwriteList), {NewHead, LU}. @@ -3248,7 +3148,7 @@ scan(Head, C) -> % when is_record(C, dets_cont) scan(Bin, Head, From, To, L, [], R, {C, Head#head.type}). scan(Bin, H, From, To, L, Ts, R, {C0, Type} = C) -> - case (H#head.mod):scan_objs(H, Bin, From, To, L, Ts, R, Type) of + case dets_v9:scan_objs(H, Bin, From, To, L, Ts, R, Type) of {more, NFrom, NTo, NL, NTs, NR, Sz} -> scan_read(H, NFrom, NTo, Sz, NL, NTs, NR, C); {stop, <<>>=B, NFrom, NTo, <<>>=NL, NTs} -> @@ -3317,7 +3217,7 @@ file_info(FileName) -> case catch read_file_header(FileName, read, false) of {ok, Fd, FH} -> _ = file:close(Fd), - (FH#fileheader.mod):file_info(FH); + dets_v9:file_info(FH); Other -> Other end. @@ -3332,15 +3232,13 @@ get_head_field(Fd, Field) -> view(FileName) -> case catch read_file_header(FileName, read, false) of {ok, Fd, FH} -> - Mod = FH#fileheader.mod, - try Mod:check_file_header(FH, Fd) of - {ok, H0, ExtraInfo} -> - Mod = FH#fileheader.mod, - case Mod:check_file_header(FH, Fd) of - {ok, H0, ExtraInfo} -> - H = Mod:init_freelist(H0, ExtraInfo), + try dets_v9:check_file_header(FH, Fd) of + {ok, H0} -> + case dets_v9:check_file_header(FH, Fd) of + {ok, H0} -> + H = dets_v9:init_freelist(H0), v_free_list(H), - Mod:v_segments(H), + dets_v9:v_segments(H), ok; X -> X diff --git a/lib/stdlib/src/dets.hrl b/lib/stdlib/src/dets.hrl index 6ebeb96156..b5e732b08f 100644 --- a/lib/stdlib/src/dets.hrl +++ b/lib/stdlib/src/dets.hrl @@ -21,7 +21,7 @@ -define(DEFAULT_MIN_NO_SLOTS, 256). -define(DEFAULT_MAX_NO_SLOTS, 32*1024*1024). -define(DEFAULT_AUTOSAVE, 3). % minutes --define(DEFAULT_CACHE, {3000, 14000}). % {delay,size} in {milliseconds,bytes} +-define(DEFAULT_CACHE, {3000, 14000}). % cache_parms() %% Type. -define(SET, 1). @@ -46,83 +46,111 @@ -define(DETS_CALL(Pid, Req), {'$dets_call', Pid, Req}). +-type access() :: 'read' | 'read_write'. +-type auto_save() :: 'infinity' | non_neg_integer(). +-type hash_bif() :: 'phash' | 'phash2'. +-type keypos() :: pos_integer(). +-type no_colls() :: [{LogSize :: non_neg_integer(), + NoCollections :: non_neg_integer()}]. +-type no_slots() :: 'default' | non_neg_integer(). +-type tab_name() :: term(). +-type type() :: 'bag' | 'duplicate_bag' | 'set'. +-type update_mode() :: 'dirty' + | 'new_dirty' + | 'saved' + | {'error', Reason :: term()}. + %% Record holding the file header and more. -record(head, { - m, % size - m2, % m * 2 - next, % next position for growth (segm mgmt only) - fptr, % the file descriptor - no_objects, % number of objects in table, - no_keys, % number of keys (version 9 only) - maxobjsize, % 2-log of the size of the biggest object - % collection (version 9 only) + m :: non_neg_integer(), % size + m2 :: non_neg_integer(), % m * 2 + next :: non_neg_integer(), % next position for growth + % (segm mgmt only) + fptr :: file:fd(), % the file descriptor + no_objects :: non_neg_integer() , % number of objects in table, + no_keys :: non_neg_integer(), % number of keys + maxobjsize :: 'undefined' | non_neg_integer(), % 2-log of + % the size of the biggest object collection n, % split indicator - type, % set | bag | duplicate_bag - keypos, % default is 1 as for ets - freelists, % tuple of free lists of buddies - % if fixed =/= false, then a pair of freelists - freelists_p, % cached FreelistsPointer - no_collections, % [{LogSize,NoCollections}] | undefined; number of - % object collections per size (version 9(b)) - auto_save, % Integer | infinity - update_mode, % saved | dirty | new_dirty | {error, Reason} - fixed = false, % false | {now_time(), [{pid(),Counter}]} - % time of first fix, and number of fixes per process - hash_bif, % hash bif used for this file (phash2, phash, hash) - has_md5, % whether the header has an MD5 sum (version 9(c)) - min_no_slots, % minimum number of slots (default or integer) - max_no_slots, % maximum number of slots (default or integer) - cache, % cache(). Write cache. - - filename, % name of the file being used - access = read_write, % read | read_write - ram_file = false, % true | false - name, % the name of the table - - parent, % The supervisor of Dets processes. - server, % The creator of Dets processes. - - %% Depending on the file format: - version, - mod, - bump, - base + type :: type(), + keypos :: keypos(), % default is 1 as for ets + freelists :: 'undefined' + | tuple(), % tuple of free lists of buddies + % if fixed =/= false, then a pair of freelists + freelists_p :: 'undefined' + | non_neg_integer(), % cached FreelistsPointer + no_collections :: 'undefined' + | no_colls(), % number of object collections + % per size (version 9(b)) + auto_save :: auto_save(), + update_mode :: update_mode(), + fixed = false :: 'false' + | {{integer(), integer()}, % time of first fix, + [{pid(), % and number of fixes per process + non_neg_integer()}]}, + hash_bif :: hash_bif(), % hash bif used for this file + has_md5 :: boolean(), % whether the header has + % an MD5 sum (version 9(c)) + min_no_slots :: no_slots(), % minimum number of slots + max_no_slots :: no_slots(), % maximum number of slots + cache :: 'undefined' | cache(), % Write cache. + + filename :: file:name(), % name of the file being used + access = read_write :: access(), + ram_file = false :: boolean(), + name :: tab_name(), % the name of the table + + parent :: 'undefined' | pid(), % The supervisor of Dets processes. + server :: 'undefined' | pid(), % The creator of Dets processes. + + bump :: non_neg_integer(), + base :: non_neg_integer() }). %% Info extracted from the file header. -record(fileheader, { - freelist, - fl_base, - cookie, - closed_properly, - type, - version, - m, - next, - keypos, - no_objects, - no_keys, - min_no_slots, - max_no_slots, - no_colls, - hash_method, - read_md5, - has_md5, - md5, - trailer, - eof, - n, - mod + freelist :: non_neg_integer(), + fl_base :: non_neg_integer(), + cookie :: non_neg_integer(), + closed_properly :: non_neg_integer(), + type :: 'badtype' | type(), + version :: non_neg_integer(), + m :: non_neg_integer(), + next :: non_neg_integer(), + keypos :: keypos(), + no_objects :: non_neg_integer(), + no_keys :: non_neg_integer(), + min_no_slots :: non_neg_integer(), + max_no_slots :: non_neg_integer(), + no_colls :: 'undefined' | no_colls(), + hash_method :: non_neg_integer(), + read_md5 :: binary(), + has_md5 :: boolean(), + md5 :: binary(), + trailer :: non_neg_integer(), + eof :: non_neg_integer(), + n }). +-type delay() :: non_neg_integer(). +-type threshold() :: non_neg_integer(). +-type cache_parms() :: + {Delay :: delay(), % max time items are kept in RAM only, + % in milliseconds + Size :: threshold()}. % threshold size of cache, in bytes + %% Write Cache. -record(cache, { - cache, % [{Key,{Seq,Item}}], write cache, last item first - csize, % current size of the cached items - inserts, % upper limit on number of inserted keys - wrtime, % last write or update time - tsize, % threshold size of cache, in bytes - delay % max time items are kept in RAM only, in milliseconds + cache :: % write cache, last item first + [{Key :: term(), + {Seq :: non_neg_integer(), Item :: term()}}], + csize :: non_neg_integer(), % current size of the cached items + inserts :: % upper limit on number of inserted keys + non_neg_integer(), + wrtime :: 'undefined' | integer(), % last write or update time + tsize :: threshold(), % threshold size of cache + delay :: delay() % max time items are kept in RAM only }). +-type cache() :: #cache{}. diff --git a/lib/stdlib/src/dets_utils.erl b/lib/stdlib/src/dets_utils.erl index 34a8ddddaa..da6ebd18f2 100644 --- a/lib/stdlib/src/dets_utils.erl +++ b/lib/stdlib/src/dets_utils.erl @@ -20,13 +20,13 @@ -module(dets_utils). %% Utility functions common to several dets file formats. -%% To be used from dets, dets_v8 and dets_v9 only. +%% To be used from modules dets and dets_v9 only. -export([cmp/2, msort/1, mkeysort/2, mkeysearch/3, family/1]). -export([rename/2, pread/2, pread/4, ipread/3, pwrite/2, write/2, truncate/2, position/2, sync/1, open/2, truncate/3, fwrite/3, - write_file/2, position/3, position_close/3, pwrite/4, + write_file/2, position/3, position_close/3, pwrite/3, pread_close/4, read_n/2, pread_n/3, read_4/2]). -export([code_to_type/1, type_to_code/1]). @@ -44,8 +44,6 @@ all_allocated_as_list/1, find_allocated/4, find_next_allocated/3, log2/1, make_zeros/1]). --export([init_slots_from_old_file/2]). - -export([list_to_tree/1, tree_to_bin/5]). -compile({inline, [{sz2pos,1}, {adjust_addr,3}]}). @@ -308,12 +306,6 @@ position_close(Fd, FileName, Pos) -> OK -> OK end. -pwrite(Fd, FileName, Position, B) -> - case file:pwrite(Fd, Position, B) of - ok -> ok; - Error -> file_error(FileName, {error, Error}) - end. - pwrite(Fd, FileName, Bins) -> case file:pwrite(Fd, Bins) of ok -> @@ -478,20 +470,6 @@ new_cache({Delay, Size}) -> %%% Ullman. I think buddy systems were invented by Knuth, a long %%% time ago. -init_slots_from_old_file([{Slot,Addr} | T], Ftab) -> - init_slot(Slot+1,[{Slot,Addr} | T], Ftab); -init_slots_from_old_file([], Ftab) -> - Ftab. - -init_slot(_Slot,[], Ftab) -> - Ftab; % should never happen -init_slot(_Slot,[{_Addr,0}|T], Ftab) -> - init_slots_from_old_file(T, Ftab); -init_slot(Slot,[{_Slot1,Addr}|T], Ftab) -> - Stree = element(Slot, Ftab), - %% io:format("init_slot ~p:~p~n",[Slot, Addr]), - init_slot(Slot,T,setelement(Slot, Ftab, bplus_insert(Stree, Addr))). - %%% The free lists are kept in RAM, and written to the end of the file %%% from time to time. It is possible that a considerable amount of %%% memory is used for a fragmented file. diff --git a/lib/stdlib/src/dets_v8.erl b/lib/stdlib/src/dets_v8.erl deleted file mode 100644 index 1bf53d91b1..0000000000 --- a/lib/stdlib/src/dets_v8.erl +++ /dev/null @@ -1,1594 +0,0 @@ -%% -%% %CopyrightBegin% -%% -%% Copyright Ericsson AB 2001-2016. All Rights Reserved. -%% -%% Licensed under the Apache License, Version 2.0 (the "License"); -%% you may not use this file except in compliance with the License. -%% You may obtain a copy of the License at -%% -%% http://www.apache.org/licenses/LICENSE-2.0 -%% -%% Unless required by applicable law or agreed to in writing, software -%% distributed under the License is distributed on an "AS IS" BASIS, -%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -%% See the License for the specific language governing permissions and -%% limitations under the License. -%% -%% %CopyrightEnd% -%% --module(dets_v8). - -%% Dets files, implementation part. This module handles versions up to -%% and including 8(c). To be called from dets.erl only. - --export([mark_dirty/1, read_file_header/2, - check_file_header/2, do_perform_save/1, initiate_file/11, - init_freelist/2, fsck_input/4, - bulk_input/3, output_objs/4, write_cache/1, may_grow/3, - find_object/2, re_hash/2, slot_objs/2, scan_objs/8, - db_hash/2, no_slots/1, table_parameters/1]). - --export([file_info/1, v_segments/1]). - --export([cache_segps/3]). - -%% For backward compatibility. --export([sz2pos/1]). - --dialyzer(no_improper_lists). - --compile({inline, [{sz2pos,1},{scan_skip,7}]}). --compile({inline, [{skip_bytes,5}, {get_segp,1}]}). --compile({inline, [{wl_lookup,5}]}). --compile({inline, [{actual_seg_size,0}]}). - --include("dets.hrl"). - -%% The layout of the file is : -%% -%% bytes decsription -%% ---------------------- File header -%% 4 FreelistsPointer -%% 4 Cookie -%% 4 ClosedProperly (pos=8) -%% 4 Type (pos=12) -%% 4 Version (pos=16) -%% 4 M -%% 4 Next -%% 4 KeyPos -%% 4 NoObjects -%% 4 N -%% ------------------ end of file header -%% 4*8192 SegmentArray -%% ------------------ -%% 4*256 First segment -%% ----------------------------- This is BASE. -%% ??? Objects (free and alive) -%% 4*256 Second segment (2 kB now, due to a bug) -%% ??? Objects (free and alive) -%% ... more objects and segments ... -%% ----------------------------- -%% ??? Free lists -%% ----------------------------- -%% 4 File size, in bytes. - -%% The first slot (0) in the segment array always points to the -%% pre-allocated first segment. -%% Before we can find an object we must find the slot where the -%% object resides. Each slot is a (possibly empty) list (or chain) of -%% objects that hash to the same slot. If the value stored in the -%% slot is zero, the slot chain is empty. If the slot value is -%% non-zero, the value points to a position in the file where the -%% chain starts. Each object in a chain has the following layout: -%% -%% bytes decsription -%% -------------------- -%% 4 Pointer to the next object of the chain. -%% 4 Size of the object in bytes (Sz). -%% 4 Status (FREE or ACTIVE) -%% Sz Binary representing the object -%% -%% The status field is used while repairing a file (but not next or size). -%% -%%|---------------| -%%| head | -%%| | -%%| | -%%|_______________| -%%| |------| -%%|___seg ptr1____| | -%%| | | -%%|__ seg ptr 2___| | -%%| | | segment 1 -%%| .... | V _____________ -%% | | -%% | | -%% |___slot 0 ____| -%% | | -%% |___slot 1 ____|-----| -%% | | | -%% | ..... | | 1:st obj in slot 1 -%% V segment 1 -%% |-----------| -%% | next | -%% |___________| -%% | size | -%% |___________| -%% | status | -%% |___________| -%% | | -%% | | -%% | obj | -%% | | - -%%% -%%% File header -%%% - --define(HEADSZ, 40). % The size of the file header, in bytes. --define(SEGSZ, 256). % Size of a segment, in words. --define(SEGSZ_LOG2, 8). --define(SEGARRSZ, 8192). % Maximal number of segments. --define(SEGADDR(SegN), (?HEADSZ + (4 * (SegN)))). --define(BASE, ?SEGADDR((?SEGSZ + ?SEGARRSZ))). --define(MAXOBJS, (?SEGSZ * ?SEGARRSZ)). % 2 M objects - --define(SLOT2SEG(S), ((S) bsr ?SEGSZ_LOG2)). - -%% BIG is used for hashing. BIG must be greater than the maximum -%% number of slots, currently MAXOBJS. --define(BIG, 16#ffffff). - -%% Hard coded positions into the file header: --define(FREELIST_POS, 0). --define(CLOSED_PROPERLY_POS, 8). --define(D_POS, 20). --define(NO_OBJECTS_POS, (?D_POS + 12)). - -%% The version of a dets file is indicated by the ClosedProperly -%% field. Version 6 was used in the R1A release, and version 7 in the -%% R1B release up to and including the R3B01 release. Both version 6 -%% and version 7 indicate properly closed files by the value -%% CLOSED_PROPERLY. -%% -%% The current version, 8, has three sub-versions: -%% -%% - 8(a), indicated by the value CLOSED_PROPERLY (same as in versions 6 -%% and 7), introduced in R3B02; -%% - 8(b), indicated by the value CLOSED_PROPERLY2(_NEED_COMPACTING), -%% introduced in R5A and used up to and including R6A; -%% - 8(c), indicated by the value CLOSED_PROPERLY_NEW_HASH(_NEED_COMPACTING), -%% in use since R6B. -%% -%% The difference between the 8(a) and the 8(b) versions is the format -%% used for free lists saved on dets files. -%% The 8(c) version uses a different hashing algorithm, erlang:phash -%% (former versions use erlang:hash). -%% Version 8(b) files are only converted to version 8(c) if repair is -%% done, so we need compatibility with 8(b) for a _long_ time. -%% -%% There are known bugs due to the fact that keys and objects are -%% sometimes compared (==) and sometimes matched (=:=). The version -%% used by default (9, see dets_v9.erl) does not have this problem. - --define(NOT_PROPERLY_CLOSED,0). --define(CLOSED_PROPERLY,1). --define(CLOSED_PROPERLY2,2). --define(CLOSED_PROPERLY2_NEED_COMPACTING,3). --define(CLOSED_PROPERLY_NEW_HASH,4). --define(CLOSED_PROPERLY_NEW_HASH_NEED_COMPACTING,5). - --define(FILE_FORMAT_VERSION, 8). --define(CAN_BUMP_BY_REPAIR, [6, 7]). --define(CAN_CONVERT_FREELIST, [8]). - -%%% -%%% Object header (next, size, status). -%%% - --define(OHDSZ, 12). % The size of the object header, in bytes. --define(STATUS_POS, 8). % Position of the status field. - -%% The size of each object is a multiple of 16. -%% BUMP is used when repairing files. --define(BUMP, 16). - --define(ReadAhead, 512). - -%%-define(DEBUGF(X,Y), io:format(X, Y)). --define(DEBUGF(X,Y), void). - -%% -> ok | throw({NewHead,Error}) -mark_dirty(Head) -> - Dirty = [{?CLOSED_PROPERLY_POS, <<?NOT_PROPERLY_CLOSED:32>>}], - {_NewHead, ok} = dets_utils:pwrite(Head, Dirty), - ok = dets_utils:sync(Head), - {ok, _Pos} = dets_utils:position(Head, Head#head.freelists_p), - ok = dets_utils:truncate(Head, cur). - -%% -> {ok, head()} | throw(Error) -initiate_file(Fd, Tab, Fname, Type, Kp, MinSlots, MaxSlots, - Ram, CacheSz, Auto, _DoInitSegments) -> - Freelist = 0, - Cookie = ?MAGIC, - ClosedProperly = ?NOT_PROPERLY_CLOSED, % immediately overwritten - Version = ?FILE_FORMAT_VERSION, - Factor = est_no_segments(MinSlots), - N = 0, - M = Next = ?SEGSZ * Factor, - NoObjects = 0, - dets_utils:pwrite(Fd, Fname, 0, - <<Freelist:32, - Cookie:32, - ClosedProperly:32, - (dets_utils:type_to_code(Type)):32, - Version:32, - M:32, - Next:32, - Kp:32, - NoObjects:32, - N:32, - 0:(?SEGARRSZ*4)/unit:8, % Initialize SegmentArray - 0:(?SEGSZ*4)/unit:8>>), % Initialize first segment - %% We must set the first slot of the segment pointer array to - %% point to the first segment - Pos = ?SEGADDR(0), - SegP = (?HEADSZ + (4 * ?SEGARRSZ)), - dets_utils:pwrite(Fd, Fname, Pos, <<SegP:32>>), - segp_cache(Pos, SegP), - - Ftab = dets_utils:init_alloc(?BASE), - H0 = #head{freelists=Ftab, fptr = Fd, base = ?BASE}, - {H1, Ws} = init_more_segments(H0, 1, Factor, undefined, []), - - %% This is not optimal but simple: always initiate the segments. - dets_utils:pwrite(Fd, Fname, Ws), - - %% Return a new nice head structure - Head = #head{ - m = M, - m2 = M * 2, - next = Next, - fptr = Fd, - no_objects = NoObjects, - n = N, - type = Type, - update_mode = dirty, - freelists = H1#head.freelists, - auto_save = Auto, - hash_bif = phash, - keypos = Kp, - min_no_slots = Factor * ?SEGSZ, - max_no_slots = no_segs(MaxSlots) * ?SEGSZ, - - ram_file = Ram, - filename = Fname, - name = Tab, - cache = dets_utils:new_cache(CacheSz), - version = Version, - bump = ?BUMP, - base = ?BASE, - mod = ?MODULE - }, - {ok, Head}. - -est_no_segments(MinSlots) when 1 + ?SLOT2SEG(MinSlots) > ?SEGARRSZ -> - ?SEGARRSZ; -est_no_segments(MinSlots) -> - 1 + ?SLOT2SEG(MinSlots). - -init_more_segments(Head, SegNo, Factor, undefined, Ws) when SegNo < Factor -> - init_more_segments(Head, SegNo, Factor, seg_zero(), Ws); -init_more_segments(Head, SegNo, Factor, SegZero, Ws) when SegNo < Factor -> - {NewHead, W} = allocate_segment(Head, SegZero, SegNo), - init_more_segments(NewHead, SegNo+1, Factor, SegZero, W++Ws); -init_more_segments(Head, _SegNo, _Factor, _SegZero, Ws) -> - {Head, Ws}. - -allocate_segment(Head, SegZero, SegNo) -> - %% may throw error: - {NewHead, Segment, _} = dets_utils:alloc(Head, 4 * ?SEGSZ), - InitSegment = {Segment, SegZero}, - Pos = ?SEGADDR(SegNo), - segp_cache(Pos, Segment), - SegPointer = {Pos, <<Segment:32>>}, - {NewHead, [InitSegment, SegPointer]}. - -%% Read free lists (using a Buddy System) from file. -init_freelist(Head, {convert_freelist,_Version}) -> - %% This function converts the saved freelist of the form - %% [{Slot1,Addr1},{Addr1,Addr2},...,{AddrN,0},{Slot2,Addr},...] - %% i.e each slot is a linked list which ends with a 0. - %% This is stored in a bplus_tree per Slot. - %% Each Slot is a position in a tuple. - - Ftab = dets_utils:empty_free_lists(), - Pos = Head#head.freelists_p, - case catch prterm(Head, Pos, ?OHDSZ) of - {0, _Sz, Term} -> - FreeList1 = lists:reverse(Term), - FreeList = dets_utils:init_slots_from_old_file(FreeList1, Ftab), - Head#head{freelists = FreeList, base = ?BASE}; - _ -> - throw({error, {bad_freelists, Head#head.filename}}) - end; -init_freelist(Head, _) -> - %% bplus_tree stored as is - Pos = Head#head.freelists_p, - case catch prterm(Head, Pos, ?OHDSZ) of - {0, _Sz, Term} -> - Head#head{freelists = Term, base = ?BASE}; - _ -> - throw({error, {bad_freelists, Head#head.filename}}) - end. - -%% -> {ok, Fd, fileheader()} | throw(Error) -read_file_header(Fd, FileName) -> - {ok, Bin} = dets_utils:pread_close(Fd, FileName, 0, ?HEADSZ), - [Freelist, Cookie, CP, Type2, Version, M, Next, Kp, NoObjects, N] = - bin2ints(Bin), - {ok, EOF} = dets_utils:position_close(Fd, FileName, eof), - {ok, <<FileSize:32>>} = dets_utils:pread_close(Fd, FileName, EOF-4, 4), - FH = #fileheader{freelist = Freelist, - fl_base = ?BASE, - cookie = Cookie, - closed_properly = CP, - type = dets_utils:code_to_type(Type2), - version = Version, - m = M, - next = Next, - keypos = Kp, - no_objects = NoObjects, - min_no_slots = ?DEFAULT_MIN_NO_SLOTS, - max_no_slots = ?DEFAULT_MAX_NO_SLOTS, - trailer = FileSize, - eof = EOF, - n = N, - mod = ?MODULE}, - {ok, Fd, FH}. - -%% -> {ok, head(), ExtraInfo} | {error, Reason} (Reason lacking file name) -%% ExtraInfo = {convert_freelist, Version} | true | need_compacting -check_file_header(FH, Fd) -> - Test = - if - FH#fileheader.cookie =/= ?MAGIC -> - {error, not_a_dets_file}; - FH#fileheader.type =:= badtype -> - {error, invalid_type_code}; - FH#fileheader.version =/= ?FILE_FORMAT_VERSION -> - case lists:member(FH#fileheader.version, - ?CAN_BUMP_BY_REPAIR) of - true -> - {error, version_bump}; - false -> - {error, bad_version} - end; - FH#fileheader.trailer =/= FH#fileheader.eof -> - {error, not_closed}; - FH#fileheader.closed_properly =:= ?CLOSED_PROPERLY -> - case lists:member(FH#fileheader.version, - ?CAN_CONVERT_FREELIST) of - true -> - {ok, {convert_freelist, FH#fileheader.version}, hash}; - false -> - {error, not_closed} % should not happen - end; - FH#fileheader.closed_properly =:= ?CLOSED_PROPERLY2 -> - {ok, true, hash}; - FH#fileheader.closed_properly =:= - ?CLOSED_PROPERLY2_NEED_COMPACTING -> - {ok, need_compacting, hash}; - FH#fileheader.closed_properly =:= ?CLOSED_PROPERLY_NEW_HASH -> - {ok, true, phash}; - FH#fileheader.closed_properly =:= - ?CLOSED_PROPERLY_NEW_HASH_NEED_COMPACTING -> - {ok, need_compacting, phash}; - FH#fileheader.closed_properly =:= ?NOT_PROPERLY_CLOSED -> - {error, not_closed}; - FH#fileheader.closed_properly > - ?CLOSED_PROPERLY_NEW_HASH_NEED_COMPACTING -> - {error, not_closed}; - true -> - {error, not_a_dets_file} - end, - case Test of - {ok, ExtraInfo, HashAlg} -> - H = #head{ - m = FH#fileheader.m, - m2 = FH#fileheader.m * 2, - next = FH#fileheader.next, - fptr = Fd, - no_objects= FH#fileheader.no_objects, - n = FH#fileheader.n, - type = FH#fileheader.type, - update_mode = saved, - auto_save = infinity, % not saved on file - fixed = false, % not saved on file - freelists_p = FH#fileheader.freelist, - hash_bif = HashAlg, - keypos = FH#fileheader.keypos, - min_no_slots = FH#fileheader.min_no_slots, - max_no_slots = FH#fileheader.max_no_slots, - version = ?FILE_FORMAT_VERSION, - mod = ?MODULE, - bump = ?BUMP, - base = FH#fileheader.fl_base}, - {ok, H, ExtraInfo}; - Error -> - Error - end. - -cache_segps(Fd, FileName, M) -> - NSegs = no_segs(M), - {ok, Bin} = dets_utils:pread_close(Fd, FileName, ?HEADSZ, 4 * NSegs), - Fun = fun(S, P) -> segp_cache(P, S), P+4 end, - lists:foldl(Fun, ?HEADSZ, bin2ints(Bin)). - -no_segs(NoSlots) -> - ?SLOT2SEG(NoSlots - 1) + 1. - -bin2ints(<<Int:32, B/binary>>) -> - [Int | bin2ints(B)]; -bin2ints(<<>>) -> - []. - -%%% -%%% Repair, conversion and initialization of a dets file. -%%% - -bulk_input(Head, InitFun, Cntrs) -> - bulk_input(Head, InitFun, Cntrs, make_ref()). - -bulk_input(Head, InitFun, Cntrs, Ref) -> - fun(close) -> - ok; - (read) -> - case catch {Ref, InitFun(read)} of - {Ref, end_of_input} -> - end_of_input; - {Ref, {L0, NewInitFun}} when is_list(L0), - is_function(NewInitFun) -> - Kp = Head#head.keypos, - case catch bulk_objects(L0, Head, Cntrs, Kp, []) of - {'EXIT', _Error} -> - _ = (catch NewInitFun(close)), - {error, invalid_objects_list}; - L -> - {L, bulk_input(Head, NewInitFun, Cntrs, Ref)} - end; - {Ref, Value} -> - {error, {init_fun, Value}}; - Error -> - throw({thrown, Error}) - end - end. - -bulk_objects([T | Ts], Head, Cntrs, Kp, L) -> - BT = term_to_binary(T), - Sz = byte_size(BT), - LogSz = sz2pos(Sz+?OHDSZ), - count_object(Cntrs, LogSz), - Key = element(Kp, T), - bulk_objects(Ts, Head, Cntrs, Kp, [make_object(Head, Key, LogSz, BT) | L]); -bulk_objects([], _Head, _Cntrs, _Kp, L) -> - L. - --define(FSCK_SEGMENT, 10000). - --define(DCT(D, CT), [D | CT]). - --define(VNEW(N, E), erlang:make_tuple(N, E)). --define(VSET(I, V, E), setelement(I, V, E)). --define(VGET(I, V), element(I, V)). - -%% OldVersion not used, assuming later versions have been converted already. -output_objs(OldVersion, Head, SlotNumbers, Cntrs) -> - fun(close) -> - {ok, 0, Head}; - ([]) -> - output_objs(OldVersion, Head, SlotNumbers, Cntrs); - (L) -> - %% Descending sizes. - Count = lists:sort(ets:tab2list(Cntrs)), - RCount = lists:reverse(Count), - NoObjects = lists:foldl(fun({_Sz,No}, A) -> A + No end, 0, Count), - {_, MinSlots, _} = SlotNumbers, - if - %% Using number of objects for bags and duplicate bags - %% is not ideal; number of (unique) keys should be - %% used instead. The effect is that there will be more - %% segments than "necessary". - MinSlots =/= bulk_init, - abs(?SLOT2SEG(NoObjects) - ?SLOT2SEG(MinSlots)) > 5, - (NoObjects < ?MAXOBJS) -> - {try_again, NoObjects}; - true -> - Head1 = Head#head{no_objects = NoObjects}, - SegSz = actual_seg_size(), - {_, End, _} = dets_utils:alloc(Head, SegSz-1), - %% Now {LogSize,NoObjects} in Cntrs is replaced by - %% {LogSize,Position,{FileName,FileDescriptor},NoObjects}. - {Head2, CT} = allocate_all_objects(Head1, RCount, Cntrs), - [E | Es] = bin2term(L, []), - {NE, Acc, DCT1} = - output_slots(E, Es, [E], Head2, ?DCT(0, CT)), - NDCT = write_all_sizes(DCT1, Cntrs), - Max = ets:info(Cntrs, size), - output_objs2(NE, Acc, Head2, Cntrs, NDCT, End, Max,Max) - end - end. - -output_objs2(E, Acc, Head, Cntrs, DCT, End, 0, MaxNoChunks) -> - NDCT = write_all_sizes(DCT, Cntrs), - output_objs2(E, Acc, Head, Cntrs, NDCT, End, MaxNoChunks, MaxNoChunks); -output_objs2(E, Acc, Head, Cntrs, DCT, End, ChunkI, MaxNoChunks) -> - fun(close) -> - DCT1 = output_slot(Acc, Head, DCT), - NDCT = write_all_sizes(DCT1, Cntrs), - ?DCT(NoDups, CT) = NDCT, - [SegAddr | []] = ?VGET(tuple_size(CT), CT), - FinalZ = End - SegAddr, - [{?FSCK_SEGMENT, _, {FileName, Fd}, _}] = - ets:lookup(Cntrs, ?FSCK_SEGMENT), - ok = dets_utils:fwrite(Fd, FileName, - dets_utils:make_zeros(FinalZ)), - NewHead = Head#head{no_objects = Head#head.no_objects - NoDups}, - {ok, NoDups, NewHead}; - (L) -> - Es = bin2term(L, []), - {NE, NAcc, NDCT} = output_slots(E, Es, Acc, Head, DCT), - output_objs2(NE, NAcc, Head, Cntrs, NDCT, End, - ChunkI-1, MaxNoChunks) - end. - -%% By allocating bigger objects before smaller ones, holes in the -%% buddy system memory map are avoided. Unfortunately, the segments -%% are always allocated first, so if there are objects bigger than a -%% segment, there is a hole to handle. (Haven't considered placing the -%% segments among other objects of the same size.) -allocate_all_objects(Head, Count, Cntrs) -> - SegSize = actual_seg_size(), - {Head1, HSz, HN, HA} = alloc_hole(Count, Head, SegSize), - {Max, _} = hd(Count), - CT = ?VNEW(Max+1, not_used), - {Head2, NCT} = allocate_all(Head1, Count, Cntrs, CT), - Head3 = free_hole(Head2, HSz, HN, HA), - {Head3, NCT}. - -alloc_hole([{LSize,_} | _], Head, SegSz) when ?POW(LSize-1) > SegSz -> - {_, SegAddr, _} = dets_utils:alloc(Head, SegSz-1), - Size = ?POW(LSize-1)-1, - {_, Addr, _} = dets_utils:alloc(Head, Size), - N = (Addr - SegAddr) div SegSz, - Head1 = dets_utils:alloc_many(Head, SegSz, N, SegAddr), - {Head1, SegSz-1, N, SegAddr}; -alloc_hole(_Count, Head, _SegSz) -> - {Head, 0, 0, 0}. - -free_hole(Head, _Size, 0, _Addr) -> - Head; -free_hole(Head, Size, N, Addr) -> - {Head1, _} = dets_utils:free(Head, Addr, Size), - free_hole(Head1, Size, N-1, Addr+Size+1). - -%% One (temporary) file for each buddy size, write all objects of that -%% size to the file. -allocate_all(Head, [{LSize,NoObjects} | Count], Cntrs, CT) -> - Size = ?POW(LSize-1)-1, - {_Head, Addr, _} = dets_utils:alloc(Head, Size), - NewHead = dets_utils:alloc_many(Head, Size+1, NoObjects, Addr), - {FileName, Fd} = temp_file(Head, LSize), - true = ets:insert(Cntrs, {LSize, Addr, {FileName, Fd}, NoObjects}), - NCT = ?VSET(LSize, CT, [Addr | []]), - allocate_all(NewHead, Count, Cntrs, NCT); -allocate_all(Head, [], Cntrs, CT) -> - %% Note that space for the segments has been allocated already. - %% And one file for the segments... - {FileName, Fd} = temp_file(Head, ?FSCK_SEGMENT), - Addr = ?SEGADDR(?SEGARRSZ), - true = ets:insert(Cntrs, {?FSCK_SEGMENT, Addr, {FileName, Fd}, 0}), - NCT = ?VSET(tuple_size(CT), CT, [Addr | []]), - {Head, NCT}. - -temp_file(Head, N) -> - TmpName = lists:concat([Head#head.filename, '.', N]), - {ok, Fd} = dets_utils:open(TmpName, [raw, binary, write]), - {TmpName, Fd}. - -bin2term([<<Slot:32, LogSize:8, BinTerm/binary>> | BTs], L) -> - bin2term(BTs, [{Slot, LogSize, BinTerm} | L]); -bin2term([], L) -> - lists:reverse(L). - -write_all_sizes(?DCT(D, CT), Cntrs) -> - ?DCT(D, write_sizes(1, tuple_size(CT), CT, Cntrs)). - -write_sizes(Sz, Sz, CT, Cntrs) -> - write_size(Sz, ?FSCK_SEGMENT, CT, Cntrs); -write_sizes(Sz, MaxSz, CT, Cntrs) -> - NCT = write_size(Sz, Sz, CT, Cntrs), - write_sizes(Sz+1, MaxSz, NCT, Cntrs). - -write_size(Sz, I, CT, Cntrs) -> - case ?VGET(Sz, CT) of - not_used -> - CT; - [Addr | L] -> - {FileName, Fd} = ets:lookup_element(Cntrs, I, 3), - case file:write(Fd, lists:reverse(L)) of - ok -> - ?VSET(Sz, CT, [Addr | []]); - Error -> - dets_utils:file_error(FileName, Error) - end - end. - -output_slots(E, [E1 | Es], Acc, Head, DCT) - when element(1, E) =:= element(1, E1) -> - output_slots(E1, Es, [E1 | Acc], Head, DCT); -output_slots(_E, [E | L], Acc, Head, DCT) -> - NDCT = output_slot(Acc, Head, DCT), - output_slots(E, L, [E], Head, NDCT); -output_slots(E, [], Acc, _Head, DCT) -> - {E, Acc, DCT}. - -output_slot([E], _Head, ?DCT(D, CT)) -> - ?DCT(D, output_slot([{foo, E}], 0, foo, CT)); -output_slot(Es0, Head, ?DCT(D, CT)) -> - Kp = Head#head.keypos, - Fun = fun({_Slot, _LSize, BinTerm} = E) -> - Key = element(Kp, binary_to_term(BinTerm)), - {Key, E} - end, - Es = lists:map(Fun, Es0), - NEs = case Head#head.type of - set -> - [{Key0,_} = E | L0] = lists:sort(Es), - choose_one(lists:sort(L0), Key0, [E]); - bag -> - lists:usort(Es); - duplicate_bag -> - lists:sort(Es) - end, - Dups = D + length(Es) - length(NEs), - ?DCT(Dups, output_slot(NEs, 0, foo, CT)). - -choose_one([{Key,_} | Es], Key, L) -> - choose_one(Es, Key, L); -choose_one([{Key,_} = E | Es], _Key, L) -> - choose_one(Es, Key, [E | L]); -choose_one([], _Key, L) -> - L. - -output_slot([E | Es], Next, _Slot, CT) -> - {_Key, {Slot, LSize, BinTerm}} = E, - Size = byte_size(BinTerm), - Size2 = ?POW(LSize-1), - Pad = <<0:(Size2-Size-?OHDSZ)/unit:8>>, - BinObject = [<<Next:32, Size:32, ?ACTIVE:32>>, BinTerm | Pad], - [Addr | L] = ?VGET(LSize, CT), - NCT = ?VSET(LSize, CT, [Addr+Size2 | [BinObject | L]]), - output_slot(Es, Addr, Slot, NCT); -output_slot([], Next, Slot, CT) -> - I = tuple_size(CT), - [Addr | L] = ?VGET(I, CT), - {Pos, _} = slot_position(Slot), - NoZeros = Pos - Addr, - BinObject = if - NoZeros > 100 -> - [dets_utils:make_zeros(NoZeros) | <<Next:32>>]; - true -> - <<0:NoZeros/unit:8,Next:32>> - end, - Size = NoZeros+4, - ?VSET(I, CT, [Addr+Size | [BinObject | L]]). - -%% Does not close Fd. -fsck_input(Head, Fd, Cntrs, _FileHeader) -> - %% The file is not compressed, so the object size cannot exceed - %% the filesize, for all objects. - MaxSz = case file:position(Fd, eof) of - {ok, Pos} -> - Pos; - _ -> - (1 bsl 32) - 1 - end, - State0 = fsck_read(?BASE, Fd, []), - fsck_input1(Head, State0, Fd, MaxSz, Cntrs). - -fsck_input1(Head, State, Fd, MaxSz, Cntrs) -> - fun(close) -> - ok; - (read) -> - case State of - done -> - end_of_input; - {done, L} -> - R = count_input(Cntrs, L, []), - {R, fsck_input1(Head, done, Fd, MaxSz, Cntrs)}; - {cont, L, Bin, Pos} -> - R = count_input(Cntrs, L, []), - FR = fsck_objs(Bin, Head#head.keypos, Head, []), - NewState = fsck_read(FR, Pos, Fd, MaxSz, Head), - {R, fsck_input1(Head, NewState, Fd, MaxSz, Cntrs)} - end - end. - -%% The ets table Cntrs is used for counting objects per size. -count_input(Cntrs, [[LogSz | B] | Ts], L) -> - count_object(Cntrs, LogSz), - count_input(Cntrs, Ts, [B | L]); -count_input(_Cntrs, [], L) -> - L. - -count_object(Cntrs, LogSz) -> - case catch ets:update_counter(Cntrs, LogSz, 1) of - N when is_integer(N) -> ok; - _Badarg -> true = ets:insert(Cntrs, {LogSz, 1}) - end. - -fsck_read(Pos, F, L) -> - case file:position(F, Pos) of - {ok, _} -> - read_more_bytes(<<>>, 0, Pos, F, L); - _Error -> - {done, L} - end. - -fsck_read({more, Bin, Sz, L}, Pos, F, MaxSz, Head) when Sz > MaxSz -> - FR = skip_bytes(Bin, ?BUMP, Head#head.keypos, Head, L), - fsck_read(FR, Pos, F, MaxSz, Head); -fsck_read({more, Bin, Sz, L}, Pos, F, _MaxSz, _Head) -> - read_more_bytes(Bin, Sz, Pos, F, L); -fsck_read({new, Skip, L}, Pos, F, _MaxSz, _Head) -> - NewPos = Pos + Skip, - fsck_read(NewPos, F, L). - -read_more_bytes(B, Min, Pos, F, L) -> - Max = if - Min < ?CHUNK_SIZE -> ?CHUNK_SIZE; - true -> Min - end, - case dets_utils:read_n(F, Max) of - eof -> - {done, L}; - Bin -> - NewPos = Pos + byte_size(Bin), - {cont, L, list_to_binary([B, Bin]), NewPos} - end. - -fsck_objs(Bin = <<_N:32, Sz:32, Status:32, Tail/binary>>, Kp, Head, L) -> - if - Status =:= ?ACTIVE -> - case Tail of - <<BinTerm:Sz/binary, Tail2/binary>> -> - case catch element(Kp, binary_to_term(BinTerm)) of - {'EXIT', _} -> - skip_bytes(Bin, ?BUMP, Kp, Head, L); - Key -> - LogSz = sz2pos(Sz+?OHDSZ), - Obj = make_object(Head, Key, LogSz, BinTerm), - NL = [[LogSz | Obj] | L], - Skip = ?POW(LogSz-1) - Sz - ?OHDSZ, - skip_bytes(Tail2, Skip, Kp, Head, NL) - end; - _ -> - {more, Bin, Sz, L} - end; - true -> - skip_bytes(Bin, ?BUMP, Kp, Head, L) - end; -fsck_objs(Bin, _Kp, _Head, L) -> - {more, Bin, 0, L}. - -%% Version 8 has to know about version 9. -make_object(Head, Key, _LogSz, BT) when Head#head.version =:= 9 -> - Slot = dets_v9:db_hash(Key, Head), - <<Slot:32, BT/binary>>; -make_object(Head, Key, LogSz, BT) -> - Slot = db_hash(Key, Head), - <<Slot:32, LogSz:8, BT/binary>>. - -%% Inlined. -skip_bytes(Bin, Skip, Kp, Head, L) -> - case Bin of - <<_:Skip/binary, Tail/binary>> -> - fsck_objs(Tail, Kp, Head, L); - _ -> - {new, Skip - byte_size(Bin), L} - end. - -%% -> {NewHead, ok} | throw({Head, Error}) -do_perform_save(H) -> - FL = dets_utils:get_freelists(H), - B = term_to_binary(FL), - Size = byte_size(B), - ?DEBUGF("size of freelist = ~p~n", [Size]), - ?DEBUGF("head.m = ~p~n", [H#head.m]), - ?DEBUGF("head.no_objects = ~p~n", [H#head.no_objects]), - - {ok, Pos} = dets_utils:position(H, eof), - H1 = H#head{freelists_p = Pos}, - W1 = {?FREELIST_POS, <<Pos:32>>}, - W2 = {Pos, [<<0:32, Size:32, ?FREE:32>>, B]}, - - W3 = {?D_POS, <<(H1#head.m):32, - (H1#head.next):32, - (H1#head.keypos):32, - (H1#head.no_objects):32, - (H1#head.n):32>>}, - {ClosedProperly, ClosedProperlyNeedCompacitng} = - case H1#head.hash_bif of - hash -> - {?CLOSED_PROPERLY2, ?CLOSED_PROPERLY2_NEED_COMPACTING}; - phash -> - {?CLOSED_PROPERLY_NEW_HASH, - ?CLOSED_PROPERLY_NEW_HASH_NEED_COMPACTING} - end, - W4 = - if - Size > 1000, Size > H1#head.no_objects -> - {?CLOSED_PROPERLY_POS, - <<ClosedProperlyNeedCompacitng:32>>}; - true -> - {?CLOSED_PROPERLY_POS, <<ClosedProperly:32>>} - end, - W5 = {?FILE_FORMAT_VERSION_POS, <<?FILE_FORMAT_VERSION:32>>}, - {H2, ok} = dets_utils:pwrite(H1, [W1,W2,W3,W4,W5]), - {ok, Pos2} = dets_utils:position(H2, eof), - ?DEBUGF("Writing file size ~p, eof at ~p~n", [Pos2+4, Pos2]), - dets_utils:pwrite(H2, [{Pos2, <<(Pos2 + 4):32>>}]). - -%% -> [term()] | throw({Head, Error}) -slot_objs(H, Slot) when Slot >= H#head.next -> - '$end_of_table'; -slot_objs(H, Slot) -> - {_Pos, Chain} = chain(H, Slot), - collect_chain(H, Chain). - -collect_chain(_H, 0) -> []; -collect_chain(H, Pos) -> - {Next, _Sz, Term} = prterm(H, Pos, ?ReadAhead), - [Term | collect_chain(H, Next)]. - -db_hash(Key, Head) -> - H = h(Key, Head#head.hash_bif), - Hash = H rem Head#head.m, - if - Hash < Head#head.n -> - H rem (Head#head.m2); % H rem (2 * m) - true -> - Hash - end. - -h(I, phash) -> erlang:phash(I, ?BIG) - 1; -h(I, HF) -> erlang:HF(I, ?BIG) - 1. %% stupid BIF has 1 counts. - -no_slots(_Head) -> - undefined. - -table_parameters(_Head) -> - undefined. - -%% Re-hashing a segment, starting with SlotStart. -%% -%% On the average, half of the objects of the chain are put into a new -%% chain. If the slot of the old chain is i, then the slot of the new -%% chain is i+m. -%% Note that the insertion of objects into the new chain is simplified -%% by the fact that the chains are not sorted on key, which means that -%% each moved object can be inserted first in the new chain. -%% (It is also a fact that the objects with the same key are not sorted.) -%% -%% -> {ok, Writes} | throw({Head, Error}) -re_hash(Head, SlotStart) -> - {SlotPos, _4} = slot_position(SlotStart), - {ok, Bin} = dets_utils:pread(Head, SlotPos, 4*?SEGSZ, 0), - {Read, Cs} = split_bin(SlotPos, Bin, [], []), - re_hash_read(Head, [], Read, Cs). - -split_bin(Pos, <<P:32, B/binary>>, R, Cs) -> - if - P =:= 0 -> - split_bin(Pos+4, B, R, Cs); - true -> - split_bin(Pos+4, B, [{P,?ReadAhead} | R], [[Pos] | Cs]) - end; -split_bin(_Pos, <<>>, R, Cs) -> - {R, Cs}. - -re_hash_read(Head, Cs, R, RCs) -> - {ok, Bins} = dets_utils:pread(R, Head), - re_hash_read(Head, R, RCs, Bins, Cs, [], []). - -re_hash_read(Head, [{Pos, Size} | Ps], [C | Cs], - [<<Next:32, Sz:32, _Status:32, Bin0/binary>> | Bins], - DoneCs, R, RCs) -> - case byte_size(Bin0) of - BinSz when BinSz >= Sz -> - case catch binary_to_term(Bin0) of - {'EXIT', _Error} -> - throw(dets_utils:corrupt_reason(Head, bad_object)); - Term -> - Key = element(Head#head.keypos, Term), - New = h(Key, Head#head.hash_bif) rem Head#head.m2, - NC = case New >= Head#head.m of - true -> [{Pos,New} | C]; - false -> [Pos | C] - end, - if - Next =:= 0 -> - NDoneCs = [NC | DoneCs], - re_hash_read(Head, Ps, Cs, Bins, NDoneCs, R, RCs); - true -> - NR = [{Next,?ReadAhead} | R], - NRCs = [NC | RCs], - re_hash_read(Head, Ps, Cs, Bins, DoneCs, NR, NRCs) - end - end; - BinSz when Size =:= BinSz+?OHDSZ -> - NR = [{Pos, Sz+?OHDSZ} | R], - re_hash_read(Head, Ps, Cs, Bins, DoneCs, NR, [C | RCs]); - _BinSz -> - throw({Head, {error, {premature_eof, Head#head.filename}}}) - end; -re_hash_read(Head, [], [], [], Cs, [], []) -> - re_hash_traverse_chains(Cs, Head, [], [], []); -re_hash_read(Head, [], [], [], Cs, R, RCs) -> - re_hash_read(Head, Cs, R, RCs). - -re_hash_traverse_chains([C | Cs], Head, Rs, Ns, Ws) -> - case re_hash_find_new(C, Rs, start, start) of - false -> - re_hash_traverse_chains(Cs, Head, Rs, Ns, Ws); - {NRs, FirstNew, LastNew} -> - LastInNew = case C of - [{_,_} | _] -> true; - _ -> false - end, - N = {FirstNew, LastNew, LastInNew}, - NWs = re_hash_link(C, start, start, start, Ws), - re_hash_traverse_chains(Cs, Head, NRs, [N | Ns], NWs) - end; -re_hash_traverse_chains([], Head, Rs, Ns, Ws) -> - {ok, Bins} = dets_utils:pread(Rs, Head), - {ok, insert_new(Rs, Bins, Ns, Ws)}. - -re_hash_find_new([{Pos,NewSlot} | C], R, start, start) -> - {SPos, _4} = slot_position(NewSlot), - re_hash_find_new(C, [{SPos,4} | R], Pos, Pos); -re_hash_find_new([{Pos,_SPos} | C], R, _FirstNew, LastNew) -> - re_hash_find_new(C, R, Pos, LastNew); -re_hash_find_new([_Pos | C], R, FirstNew, LastNew) -> - re_hash_find_new(C, R, FirstNew, LastNew); -re_hash_find_new([], _R, start, start) -> - false; -re_hash_find_new([], R, FirstNew, LastNew) -> - {R, FirstNew, LastNew}. - -re_hash_link([{Pos,_SPos} | C], LastOld, start, _LastInNew, Ws) -> - re_hash_link(C, LastOld, Pos, true, Ws); -re_hash_link([{Pos,_SPos} | C], LastOld, LastNew, false, Ws) -> - re_hash_link(C, LastOld, Pos, true, [{Pos,<<LastNew:32>>} | Ws]); -re_hash_link([{Pos,_SPos} | C], LastOld, _LastNew, LastInNew, Ws) -> - re_hash_link(C, LastOld, Pos, LastInNew, Ws); -re_hash_link([Pos | C], start, LastNew, true, Ws) -> - re_hash_link(C, Pos, LastNew, false, [{Pos,<<0:32>>} | Ws]); -re_hash_link([Pos | C], LastOld, LastNew, true, Ws) -> - re_hash_link(C, Pos, LastNew, false, [{Pos,<<LastOld:32>>} | Ws]); -re_hash_link([Pos | C], _LastOld, LastNew, LastInNew, Ws) -> - re_hash_link(C, Pos, LastNew, LastInNew, Ws); -re_hash_link([], _LastOld, _LastNew, _LastInNew, Ws) -> - Ws. - -insert_new([{NewSlotPos,_4} | Rs], [<<P:32>> = PB | Bins], [N | Ns], Ws) -> - {FirstNew, LastNew, LastInNew} = N, - Ws1 = case P of - 0 when LastInNew -> - Ws; - 0 -> - [{LastNew, <<0:32>>} | Ws]; - _ -> - [{LastNew, PB} | Ws] - end, - NWs = [{NewSlotPos, <<FirstNew:32>>} | Ws1], - insert_new(Rs, Bins, Ns, NWs); -insert_new([], [], [], Ws) -> - Ws. - -%% When writing the cache, a 'work list' is first created: -%% WorkList = [{Key, {Delete,Lookup,[Inserted]}}] -%% Delete = keep | delete -%% Lookup = skip | lookup -%% Inserted = {object(), No} -%% No = integer() -%% If No =< 0 then there will be -No instances of object() on the file -%% when the cache has been written. If No > 0 then No instances of -%% object() will be added to the file. -%% If Delete has the value 'delete', then all objects with the key Key -%% have been deleted. (This could be viewed as a shorthand for {Object,0} -%% for each object Object on the file not mentioned in some Inserted.) -%% If Lookup has the value 'lookup', all objects with the key Key will -%% be returned. -%% - -%% -> {NewHead, [LookedUpObject], pwrite_list()} | throw({NewHead, Error}) -write_cache(Head) -> - #head{cache = C, type = Type} = Head, - case dets_utils:is_empty_cache(C) of - true -> {Head, [], []}; - false -> - {NewC, _MaxInserts, PerKey} = dets_utils:reset_cache(C), - %% NoInsertedKeys is an upper limit on the number of new keys. - {WL, NoInsertedKeys} = make_wl(PerKey, Type), - Head1 = Head#head{cache = NewC}, - case may_grow(Head1, NoInsertedKeys, once) of - {Head2, ok} -> - eval_work_list(Head2, WL); - HeadError -> - throw(HeadError) - end - end. - -make_wl(PerKey, Type) -> - make_wl(PerKey, Type, [], 0). - -make_wl([{Key,L} | PerKey], Type, WL, Ins) -> - [Cs | I] = wl(L, Type), - make_wl(PerKey, Type, [{Key,Cs} | WL], Ins+I); -make_wl([], _Type, WL, Ins) -> - {WL, Ins}. - -wl(L, Type) -> - wl(L, Type, keep, skip, 0, []). - -wl([{_Seq, delete_key} | Cs], Type, _Del, Lookup, _I, _Objs) -> - wl(Cs, Type, delete, Lookup, 0, []); -wl([{_Seq, {delete_object, Object}} | Cs], Type, Del, Lookup, I, Objs) -> - NObjs = lists:keydelete(Object, 1, Objs), - wl(Cs, Type, Del, Lookup, I, [{Object,0} | NObjs]); -wl([{_Seq, {insert, Object}} | Cs], Type, _Del, Lookup, _I, _Objs) - when Type =:= set -> - wl(Cs, Type, delete, Lookup, 1, [{Object,-1}]); -wl([{_Seq, {insert, Object}} | Cs], Type, Del, Lookup, _I, Objs) -> - NObjs = - case lists:keyfind(Object, 1, Objs) of - {_, 0} -> - lists:keyreplace(Object, 1, Objs, {Object,-1}); - {_, _C} when Type =:= bag -> % C =:= 1; C =:= -1 - Objs; - {_, C} when C < 0 -> % when Type =:= duplicate_bag - lists:keyreplace(Object, 1, Objs, {Object,C-1}); - {_, C} -> % when C > 0, Type =:= duplicate_bag - lists:keyreplace(Object, 1, Objs, {Object,C+1}); - false when Del =:= delete -> - [{Object, -1} | Objs]; - false -> - [{Object, 1} | Objs] - end, - wl(Cs, Type, Del, Lookup, 1, NObjs); -wl([{_Seq, {lookup,_Pid}=Lookup} | Cs], Type, Del, _Lookup, I, Objs) -> - wl(Cs, Type, Del, Lookup, I, Objs); -wl([], _Type, Del, Lookup, I, Objs) -> - [{Del, Lookup, Objs} | I]. - -%% -> {NewHead, ok} | {NewHead, Error} -may_grow(Head, 0, once) -> - {Head, ok}; -may_grow(Head, _N, _How) when Head#head.fixed =/= false -> - {Head, ok}; -may_grow(#head{access = read}=Head, _N, _How) -> - {Head, ok}; -may_grow(Head, _N, _How) when Head#head.next >= ?MAXOBJS -> - {Head, ok}; -may_grow(Head, N, How) -> - Extra = erlang:min(2*?SEGSZ, Head#head.no_objects + N - Head#head.next), - case catch may_grow1(Head, Extra, How) of - {error, Reason} -> % alloc may throw error - {Head, {error, Reason}}; - Reply -> - Reply - end. - -may_grow1(Head, Extra, many_times) when Extra > ?SEGSZ -> - Reply = grow(Head, 1, undefined), - self() ! ?DETS_CALL(self(), may_grow), - Reply; -may_grow1(Head, Extra, _How) -> - grow(Head, Extra, undefined). - -%% -> {Head, ok} | throw({Head, Error}) -grow(Head, Extra, _SegZero) when Extra =< 0 -> - {Head, ok}; -grow(Head, Extra, undefined) -> - grow(Head, Extra, seg_zero()); -grow(Head, Extra, SegZero) -> - #head{n = N, next = Next, m = M} = Head, - SegNum = ?SLOT2SEG(Next), - {Head0, Ws1} = allocate_segment(Head, SegZero, SegNum), - {Head1, ok} = dets_utils:pwrite(Head0, Ws1), - %% If re_hash fails, segp_cache has been called, but it does not matter. - {ok, Ws2} = re_hash(Head1, N), - {Head2, ok} = dets_utils:pwrite(Head1, Ws2), - NewHead = - if - N + ?SEGSZ =:= M -> - Head2#head{n = 0, next = Next + ?SEGSZ, m = 2 * M, m2 = 4 * M}; - true -> - Head2#head{n = N + ?SEGSZ, next = Next + ?SEGSZ} - end, - grow(NewHead, Extra - ?SEGSZ, SegZero). - -seg_zero() -> - <<0:(4*?SEGSZ)/unit:8>>. - -find_object(Head, Object) -> - Key = element(Head#head.keypos, Object), - Slot = db_hash(Key, Head), - find_object(Head, Object, Slot). - -find_object(H, _Obj, Slot) when Slot >= H#head.next -> - false; -find_object(H, Obj, Slot) -> - {_Pos, Chain} = chain(H, Slot), - case catch find_obj(H, Obj, Chain) of - {ok, Pos} -> - {ok, Pos}; - _Else -> - false - end. - -find_obj(H, Obj, Pos) when Pos > 0 -> - {Next, _Sz, Term} = prterm(H, Pos, ?ReadAhead), - if - Term == Obj -> - {ok, Pos}; - true -> - find_obj(H, Obj, Next) - end. - -%% Given, a slot, return the {Pos, Chain} in the file where the -%% objects hashed to this slot reside. Pos is the position in the -%% file where the chain pointer is written and Chain is the position -%% in the file where the first object resides. -chain(Head, Slot) -> - Pos = ?SEGADDR(?SLOT2SEG(Slot)), - Segment = get_segp(Pos), - FinalPos = Segment + (4 * ?REM2(Slot, ?SEGSZ)), - {ok, <<Chain:32>>} = dets_utils:pread(Head, FinalPos, 4, 0), - {FinalPos, Chain}. - -%%% -%%% Cache routines depending on the dets file format. -%%% - -%% -> {Head, [LookedUpObject], pwrite_list()} | throw({Head, Error}) -eval_work_list(Head, WorkLists) -> - SWLs = tag_with_slot(WorkLists, Head, []), - P1 = dets_utils:family(SWLs), - {PerSlot, SlotPositions} = remove_slot_tag(P1, [], []), - {ok, Bins} = dets_utils:pread(SlotPositions, Head), - first_object(PerSlot, SlotPositions, Bins, Head, [], [], [], []). - -tag_with_slot([{K,_} = WL | WLs], Head, L) -> - tag_with_slot(WLs, Head, [{db_hash(K, Head), WL} | L]); -tag_with_slot([], _Head, L) -> - L. - -remove_slot_tag([{S,SWLs} | SSWLs], Ls, SPs) -> - remove_slot_tag(SSWLs, [SWLs | Ls], [slot_position(S) | SPs]); -remove_slot_tag([], Ls, SPs) -> - {Ls, SPs}. - -%% The initial chain pointers and the first object in each chain are -%% read "in parallel", that is, with one call to file:pread/2 (two -%% calls altogether). The following chain objects are read one by -%% one. This is a compromise: if the chains are long and threads are -%% active, it would be faster to keep a state for each chain and read -%% the objects of the chains in parallel, but the overhead would be -%% quite substantial. - -first_object([WorkLists | SPs], [{P1,_4} | Ss], [<<P2:32>> | Bs], Head, - ObjsToRead, ToRead, Ls, LU) when P2 =:= 0 -> - L0 = [{old,P1}], - {L, NLU} = eval_slot(Head, ?ReadAhead, P2, WorkLists, L0, LU), - first_object(SPs, Ss, Bs, Head, ObjsToRead, ToRead, [L | Ls], NLU); -first_object([WorkLists | SPs], [{P1,_4} | Ss], [<<P2:32>> | Bs], Head, - ObjsToRead, ToRead, Ls, LU) -> - E = {P1,P2,WorkLists}, - first_object(SPs, Ss, Bs, Head, - [E | ObjsToRead], [{P2, ?ReadAhead} | ToRead], Ls, LU); -first_object([], [], [], Head, ObjsToRead, ToRead, Ls, LU) -> - {ok, Bins} = dets_utils:pread(ToRead, Head), - case catch eval_first(Bins, ObjsToRead, Head, Ls, LU) of - {ok, NLs, NLU} -> - case create_writes(NLs, Head, [], 0) of - {Head1, [], 0} -> - {Head1, NLU, []}; - {Head1, Ws, No} -> - {NewHead, Ws2} = update_no_objects(Head1, Ws, No), - {NewHead, NLU, Ws2} - end; - _Error -> - throw(dets_utils:corrupt_reason(Head, bad_object)) - end. - -%% Update no_objects on the file too, if the number of segments that -%% dets:fsck/6 use for estimate has changed. -update_no_objects(Head, Ws, 0) -> {Head, Ws}; -update_no_objects(Head, Ws, Delta) -> - No = Head#head.no_objects, - NewNo = No + Delta, - NWs = - if - NewNo > ?MAXOBJS -> - Ws; - ?SLOT2SEG(No) =:= ?SLOT2SEG(NewNo) -> - Ws; - true -> - [{?NO_OBJECTS_POS, <<NewNo:32>>} | Ws] - end, - {Head#head{no_objects = NewNo}, NWs}. - -eval_first([<<Next:32, Sz:32, _Status:32, Bin/binary>> | Bins], - [SP | SPs], Head, Ls, LU) -> - {P1, P2, WLs} = SP, - L0 = [{old,P1}], - case byte_size(Bin) of - BinSz when BinSz >= Sz -> - Term = binary_to_term(Bin), - Key = element(Head#head.keypos, Term), - {L, NLU} = find_key(Head, P2, Next, Sz, Term, Key, WLs, L0, LU), - eval_first(Bins, SPs, Head, [L | Ls], NLU); - _BinSz -> - {L, NLU} = eval_slot(Head, Sz+?OHDSZ, P2, WLs, L0, LU), - eval_first(Bins, SPs, Head, [L | Ls], NLU) - end; -eval_first([], [], _Head, Ls, LU) -> - {ok, Ls, LU}. - -eval_slot(_Head, _TrySize, _Pos=0, [], L, LU) -> - {L, LU}; -eval_slot(Head, _TrySize, Pos=0, [WL | WLs], L, LU) -> - {_Key, {_Delete, LookUp, Objects}} = WL, - {NL, NLU} = end_of_key(Objects, LookUp, L, []), - eval_slot(Head, ?ReadAhead, Pos, WLs, NL, NLU++LU); -eval_slot(Head, TrySize, Pos, WLs, L, LU) -> - {NextPos, Size, Term} = prterm(Head, Pos, TrySize), - Key = element(Head#head.keypos, Term), - find_key(Head, Pos, NextPos, Size, Term, Key, WLs, L, LU). - -find_key(Head, Pos, NextPos, Size, Term, Key, WLs, L, LU) -> - case lists:keyfind(Key, 1, WLs) of - {_, {Delete, LookUp, Objects}} = WL -> - NWLs = lists:delete(WL, WLs), - {NewObjects, NL, LUK} = eval_object(Size, Term, Delete, LookUp, - Objects, Head, Pos, L, []), - eval_key(Key, Delete, LookUp, NewObjects, Head, NextPos, - NWLs, NL, LU, LUK); - false -> - L0 = [{old,Pos} | L], - eval_slot(Head, ?ReadAhead, NextPos, WLs, L0, LU) - end. - -eval_key(_Key, _Delete, Lookup, _Objects, Head, Pos, WLs, L, LU, LUK) - when Head#head.type =:= set -> - NLU = case Lookup of - {lookup, Pid} -> [{Pid,LUK} | LU]; - skip -> LU - end, - eval_slot(Head, ?ReadAhead, Pos, WLs, L, NLU); -eval_key(_Key, _Delete, LookUp, Objects, Head, Pos, WLs, L, LU, LUK) - when Pos =:= 0 -> - {NL, NLU} = end_of_key(Objects, LookUp, L, LUK), - eval_slot(Head, ?ReadAhead, Pos, WLs, NL, NLU++LU); -eval_key(Key, Delete, LookUp, Objects, Head, Pos, WLs, L, LU, LUK) -> - {NextPos, Size, Term} = prterm(Head, Pos, ?ReadAhead), - case element(Head#head.keypos, Term) of - Key -> - {NewObjects, NL, LUK1} = - eval_object(Size, Term, Delete, LookUp,Objects,Head,Pos,L,LUK), - eval_key(Key, Delete, LookUp, NewObjects, Head, NextPos, WLs, - NL, LU, LUK1); - Key2 -> - {L1, NLU} = end_of_key(Objects, LookUp, L, LUK), - find_key(Head, Pos, NextPos, Size, Term, Key2, WLs, L1, NLU++LU) - end. - -%% All objects in Objects have the key Key. -eval_object(Size, Term, Delete, LookUp, Objects, Head, Pos, L, LU) -> - Type = Head#head.type, - case lists:keyfind(Term, 1, Objects) of - {_Object, N} when N =:= 0 -> - L1 = [{delete,Pos,Size} | L], - {Objects, L1, LU}; - {_Object, N} when N < 0, Type =:= set -> - L1 = [{old,Pos} | L], - wl_lookup(LookUp, Objects, Term, L1, LU); - {Object, _N} when Type =:= bag -> % when N =:= 1; N =:= -1 - L1 = [{old,Pos} | L], - Objects1 = lists:keydelete(Object, 1, Objects), - wl_lookup(LookUp, Objects1, Term, L1, LU); - {Object, N} when N < 0, Type =:= duplicate_bag -> - L1 = [{old,Pos} | L], - Objects1 = lists:keyreplace(Object, 1, Objects, {Object,N+1}), - wl_lookup(LookUp, Objects1, Term, L1, LU); - {_Object, N} when N > 0, Type =:= duplicate_bag -> - L1 = [{old,Pos} | L], - wl_lookup(LookUp, Objects, Term, L1, LU); - false when Type =:= set, Delete =:= delete -> - case lists:keyfind(-1, 2, Objects) of - false -> % no inserted object, perhaps deleted objects - L1 = [{delete,Pos,Size} | L], - {[], L1, LU}; - {Term2, -1} -> - Bin2 = term_to_binary(Term2), - NSize = byte_size(Bin2), - Overwrite = - if - NSize =:= Size -> - true; - true -> - SizePos = sz2pos(Size+?OHDSZ), - NSizePos = sz2pos(NSize+?OHDSZ), - SizePos =:= NSizePos - end, - E = if - Overwrite -> - {overwrite,Bin2,Pos}; - true -> - {replace,Bin2,Pos,Size} - end, - wl_lookup(LookUp, [], Term2, [E | L], LU) - end; - false when Delete =:= delete -> - L1 = [{delete,Pos,Size} | L], - {Objects, L1, LU}; - false -> - L1 = [{old,Pos} | L], - wl_lookup(LookUp, Objects, Term, L1, LU) - end. - -%% Inlined. -wl_lookup({lookup,_}, Objects, Term, L, LU) -> - {Objects, L, [Term | LU]}; -wl_lookup(skip, Objects, _Term, L, LU) -> - {Objects, L, LU}. - -end_of_key([{Object,N0} | Objs], LookUp, L, LU) when N0 =/= 0 -> - N = abs(N0), - NL = [{insert,N,term_to_binary(Object)} | L], - NLU = case LookUp of - {lookup, _} -> - lists:duplicate(N, Object) ++ LU; - skip -> - LU - end, - end_of_key(Objs, LookUp, NL, NLU); -end_of_key([_ | Objects], LookUp, L, LU) -> - end_of_key(Objects, LookUp, L, LU); -end_of_key([], {lookup,Pid}, L, LU) -> - {L, [{Pid,LU}]}; -end_of_key([], skip, L, LU) -> - {L, LU}. - -create_writes([L | Ls], H, Ws, No) -> - {NH, NWs, NNo} = create_writes(L, H, Ws, No, 0, true), - create_writes(Ls, NH, NWs, NNo); -create_writes([], H, Ws, No) -> - {H, lists:reverse(Ws), No}. - -create_writes([{old,Pos} | L], H, Ws, No, _Next, true) -> - create_writes(L, H, Ws, No, Pos, true); -create_writes([{old,Pos} | L], H, Ws, No, Next, false) -> - W = {Pos, <<Next:32>>}, - create_writes(L, H, [W | Ws], No, Pos, true); -create_writes([{insert,N,Bin} | L], H, Ws, No, Next, _NextIsOld) -> - {NH, NWs, Pos} = create_inserts(N, H, Ws, Next, byte_size(Bin), Bin), - create_writes(L, NH, NWs, No+N, Pos, false); -create_writes([{overwrite,Bin,Pos} | L], H, Ws, No, Next, _) -> - Size = byte_size(Bin), - W = {Pos, [<<Next:32, Size:32, ?ACTIVE:32>>, Bin]}, - create_writes(L, H, [W | Ws], No, Pos, true); -create_writes([{replace,Bin,Pos,OSize} | L], H, Ws, No, Next, _) -> - Size = byte_size(Bin), - {H1, _} = dets_utils:free(H, Pos, OSize+?OHDSZ), - {NH, NewPos, _} = dets_utils:alloc(H1, ?OHDSZ + Size), - W1 = {NewPos, [<<Next:32, Size:32, ?ACTIVE:32>>, Bin]}, - NWs = if - Pos =:= NewPos -> - [W1 | Ws]; - true -> - W2 = {Pos+?STATUS_POS, <<?FREE:32>>}, - [W1,W2 | Ws] - end, - create_writes(L, NH, NWs, No, NewPos, false); -create_writes([{delete,Pos,Size} | L], H, Ws, No, Next, _) -> - {NH, _} = dets_utils:free(H, Pos, Size+?OHDSZ), - NWs = [{Pos+?STATUS_POS,<<?FREE:32>>} | Ws], - create_writes(L, NH, NWs, No-1, Next, false); -create_writes([], H, Ws, No, _Next, _NextIsOld) -> - {H, Ws, No}. - -create_inserts(0, H, Ws, Next, _Size, _Bin) -> - {H, Ws, Next}; -create_inserts(N, H, Ws, Next, Size, Bin) -> - {NH, Pos, _} = dets_utils:alloc(H, ?OHDSZ + Size), - W = {Pos, [<<Next:32, Size:32, ?ACTIVE:32>>, Bin]}, - create_inserts(N-1, NH, [W | Ws], Pos, Size, Bin). - -slot_position(S) -> - Pos = ?SEGADDR(?SLOT2SEG(S)), - Segment = get_segp(Pos), - FinalPos = Segment + (4 * ?REM2(S, ?SEGSZ)), - {FinalPos, 4}. - -%% Twice the size of a segment due to the bug in sz2pos/1. Inlined. -actual_seg_size() -> - ?POW(sz2pos(?SEGSZ*4)-1). - -segp_cache(Pos, Segment) -> - put(Pos, Segment). - -%% Inlined. -get_segp(Pos) -> - get(Pos). - -%% Bug: If Sz0 is equal to 2**k for some k, then 2**(k+1) bytes are -%% allocated (wasting 2**k bytes). -sz2pos(N) -> - 1 + dets_utils:log2(N+1). - -scan_objs(_Head, Bin, From, To, L, Ts, R, _Type) -> - scan_objs(Bin, From, To, L, Ts, R). - -scan_objs(Bin, From, To, L, Ts, -1) -> - {stop, Bin, From, To, L, Ts}; -scan_objs(B = <<_N:32, Sz:32, St:32, T/binary>>, From, To, L, Ts, R) -> - if - St =:= ?ACTIVE; - St =:= ?FREE -> % deleted after scanning started - case T of - <<BinTerm:Sz/binary, T2/binary>> -> - NTs = [BinTerm | Ts], - OSz = Sz + ?OHDSZ, - Skip = ?POW(sz2pos(OSz)-1) - OSz, - F2 = From + OSz, - NR = if - R < 0 -> - R + 1; - true -> - R + OSz + Skip - end, - scan_skip(T2, F2, To, Skip, L, NTs, NR); - _ -> - {more, From, To, L, Ts, R, Sz+?OHDSZ} - end; - true -> % a segment - scan_skip(B, From, To, actual_seg_size(), L, Ts, R) - end; -scan_objs(_B, From, To, L, Ts, R) -> - {more, From, To, L, Ts, R, 0}. - -scan_skip(Bin, From, To, Skip, L, Ts, R) when From + Skip < To -> - SkipPos = From + Skip, - case Bin of - <<_:Skip/binary, Tail/binary>> -> - scan_objs(Tail, SkipPos, To, L, Ts, R); - _ -> - {more, SkipPos, To, L, Ts, R, 0} - end; -scan_skip(Bin, From, To, Skip, L, Ts, R) when From + Skip =:= To -> - scan_next_allocated(Bin, From, To, L, Ts, R); -scan_skip(_Bin, From, _To, Skip, L, Ts, R) -> % when From + Skip > _To - From1 = From + Skip, - {more, From1, From1, L, Ts, R, 0}. - -scan_next_allocated(_Bin, _From, To, <<>>=L, Ts, R) -> - {more, To, To, L, Ts, R, 0}; -scan_next_allocated(Bin, From0, _To, <<From:32, To:32, L/binary>>, Ts, R) -> - Skip = From - From0, - scan_skip(Bin, From0, To, Skip, L, Ts, R). - -%% Read term from file at position Pos -prterm(Head, Pos, ReadAhead) -> - Res = dets_utils:pread(Head, Pos, ?OHDSZ, ReadAhead), - ?DEBUGF("file:pread(~tp, ~p, ?) -> ~p~n", [Head#head.filename, Pos, Res]), - {ok, <<Next:32, Sz:32, _Status:32, Bin0/binary>>} = Res, - ?DEBUGF("{Next, Sz} = ~p~n", [{Next, Sz}]), - Bin = case byte_size(Bin0) of - Actual when Actual >= Sz -> - Bin0; - _ -> - {ok, Bin1} = dets_utils:pread(Head, Pos + ?OHDSZ, Sz, 0), - Bin1 - end, - Term = binary_to_term(Bin), - {Next, Sz, Term}. - -%%%%%%%%%%%%%%%%% DEBUG functions %%%%%%%%%%%%%%%% - -file_info(FH) -> - #fileheader{closed_properly = CP, keypos = Kp, - m = M, next = Next, n = N, version = Version, - type = Type, no_objects = NoObjects} - = FH, - if - CP =:= 0 -> - {error, not_closed}; - FH#fileheader.cookie =/= ?MAGIC -> - {error, not_a_dets_file}; - FH#fileheader.version =/= ?FILE_FORMAT_VERSION -> - {error, bad_version}; - true -> - {ok, [{closed_properly,CP},{keypos,Kp},{m, M}, - {n,N},{next,Next},{no_objects,NoObjects}, - {type,Type},{version,Version}]} - end. - -v_segments(H) -> - v_segments(H, 0). - -v_segments(_H, ?SEGARRSZ) -> - done; -v_segments(H, SegNo) -> - Seg = dets_utils:read_4(H#head.fptr, ?SEGADDR(SegNo)), - if - Seg =:= 0 -> - done; - true -> - io:format("SEGMENT ~w ", [SegNo]), - io:format("At position ~w~n", [Seg]), - v_segment(H, SegNo, Seg, 0), - v_segments(H, SegNo+1) - end. - -v_segment(_H, _, _SegPos, ?SEGSZ) -> - done; -v_segment(H, SegNo, SegPos, SegSlot) -> - Slot = SegSlot + (SegNo * ?SEGSZ), - Chain = dets_utils:read_4(H#head.fptr, SegPos + (4 * SegSlot)), - if - Chain =:= 0 -> %% don't print empty chains - true; - true -> - io:format(" <~p>~p: [",[SegPos + (4 * SegSlot), Slot]), - print_chain(H, Chain) - end, - v_segment(H, SegNo, SegPos, SegSlot+1). - -print_chain(_H, 0) -> - io:format("] \n", []); -print_chain(H, Pos) -> - {ok, _} = file:position(H#head.fptr, Pos), - case rterm(H#head.fptr) of - {ok, 0, _Sz, Term} -> - io:format("<~p>~p] \n",[Pos, Term]); - {ok, Next, _Sz, Term} -> - io:format("<~p>~p, ", [Pos, Term]), - print_chain(H, Next); - Other -> - io:format("~nERROR ~p~n", [Other]) - end. - -%% Can't be used at the bucket level!!!! -%% Only when we go down a chain -rterm(F) -> - case catch rterm2(F) of - {'EXIT', Reason} -> %% truncated DAT file - dets_utils:vformat("** dets: Corrupt or truncated dets file~n", - []), - {error, Reason}; - Other -> - Other - end. - -rterm2(F) -> - {ok, <<Next:32, Sz:32, _:32>>} = file:read(F, ?OHDSZ), - {ok, Bin} = file:read(F, Sz), - Term = binary_to_term(Bin), - {ok, Next, Sz, Term}. - - diff --git a/lib/stdlib/src/dets_v9.erl b/lib/stdlib/src/dets_v9.erl index 6c406fc03a..3ab8f87ebf 100644 --- a/lib/stdlib/src/dets_v9.erl +++ b/lib/stdlib/src/dets_v9.erl @@ -24,8 +24,8 @@ -export([mark_dirty/1, read_file_header/2, check_file_header/2, do_perform_save/1, initiate_file/11, - prep_table_copy/9, init_freelist/2, fsck_input/4, - bulk_input/3, output_objs/4, bchunk_init/2, + prep_table_copy/9, init_freelist/1, fsck_input/4, + bulk_input/3, output_objs/3, bchunk_init/2, try_bchunk_header/2, compact_init/3, read_bchunks/2, write_cache/1, may_grow/3, find_object/2, slot_objs/2, scan_objs/8, db_hash/2, no_slots/1, table_parameters/1]). @@ -228,8 +228,8 @@ -define(CLOSED_PROPERLY_POS, 8). -define(D_POS, 20). -%%% Dets file versions up to 8 are handled in dets_v8. This module -%%% handles version 9, introduced in R8. +%%% This module handles Dets file format version 9, introduced in +%%% Erlang/OTP R8. %%% %%% Version 9(a) tables have 256 reserved bytes in the file header, %%% all initialized to zero. @@ -249,32 +249,32 @@ -define(OHDSZ, 8). % The size of the object header, in bytes. -define(STATUS_POS, 4). % Position of the status field. --define(OHDSZ_v8, 12). % The size of the version 8 object header. - %% The size of each object is a multiple of 16. %% BUMP is used when repairing files. -define(BUMP, 16). -%%% '$hash' is the value of HASH_PARMS in R8, '$hash2' is the value in R9. +%%% '$hash' is the value of HASH_PARMS in Erlang/OTP R8, '$hash2' is +%%% the value in Erlang/OTP R9. %%% %%% The fields of the ?HASH_PARMS records are the same, but having -%%% different tags makes bchunk_init on R8 nodes reject data from R9 -%%% nodes, and vice versa. This is overkill, and due to an oversight. -%%% What should have been done in R8 was to check the hash method, not -%%% only the type of the table and the key position. R8 nodes cannot -%%% handle the phash2 method. +%%% different tags makes bchunk_init on Erlang/OTP R8 nodes reject +%%% data from Erlang/OTP R9 nodes, and vice versa. This is overkill, +%%% and due to an oversight. What should have been done in Erlang/OTP +%%% R8 was to check the hash method, not only the type of the table +%%% and the key position. Erlang/OTP R8 nodes cannot handle the phash2 +%%% method. -define(HASH_PARMS, '$hash2'). -define(BCHUNK_FORMAT_VERSION, 1). -record(?HASH_PARMS, { - file_format_version, + file_format_version, bchunk_format_version, file, type, keypos, hash_method, n,m,next, min,max, no_objects,no_keys, - no_colls % [{LogSz,NoColls}], NoColls >= 0 + no_colls :: no_colls() }). -define(ACTUAL_SEG_SIZE, (?SEGSZ*4)). @@ -364,10 +364,8 @@ init_file(Fd, Tab, Fname, Type, Kp, MinSlots, MaxSlots, Ram, CacheSz, filename = Fname, name = Tab, cache = dets_utils:new_cache(CacheSz), - version = ?FILE_FORMAT_VERSION, bump = ?BUMP, - base = ?BASE, % to be overwritten - mod = ?MODULE + base = ?BASE % to be overwritten }, FreeListsPointer = 0, @@ -457,7 +455,7 @@ alloc_seg(Head, SegZero, SegNo, Part) -> {NewHead, InitSegment, [SegPointer]}. %% Read free lists (using a Buddy System) from file. -init_freelist(Head, true) -> +init_freelist(Head) -> Pos = Head#head.freelists_p, free_lists_from_file(Head, Pos). @@ -510,12 +508,10 @@ read_file_header(Fd, FileName) -> md5 = erlang:md5(MD5DigestedPart), trailer = FileSize + FlBase, eof = EOF, - n = N, - mod = ?MODULE}, + n = N}, {ok, Fd, FH}. -%% -> {ok, head(), ExtraInfo} | {error, Reason} (Reason lacking file name) -%% ExtraInfo = true +%% -> {ok, head()} | {error, Reason} (Reason lacking file name) check_file_header(FH, Fd) -> HashBif = code_to_hash_method(FH#fileheader.hash_method), Test = @@ -534,14 +530,14 @@ check_file_header(FH, Fd) -> HashBif =:= undefined -> {error, bad_hash_bif}; FH#fileheader.closed_properly =:= ?CLOSED_PROPERLY -> - {ok, true}; + ok; FH#fileheader.closed_properly =:= ?NOT_PROPERLY_CLOSED -> {error, not_closed}; true -> {error, not_a_dets_file} end, case Test of - {ok, ExtraInfo} -> + ok -> MaxObjSize = max_objsize(FH#fileheader.no_colls), H = #head{ m = FH#fileheader.m, @@ -563,11 +559,9 @@ check_file_header(FH, Fd) -> min_no_slots = FH#fileheader.min_no_slots, max_no_slots = FH#fileheader.max_no_slots, no_collections = FH#fileheader.no_colls, - version = ?FILE_FORMAT_VERSION, - mod = ?MODULE, bump = ?BUMP, base = FH#fileheader.fl_base}, - {ok, H, ExtraInfo}; + {ok, H}; Error -> Error end. @@ -621,7 +615,7 @@ no_segs(NoSlots) -> %%% %%% bulk_input/3. Initialization, the general case (any stream of objects). -%%% output_objs/4. Initialization (general case) and repair. +%%% output_objs/3. Initialization (general case) and repair. %%% bchunk_init/2. Initialization using bchunk. bulk_input(Head, InitFun, _Cntrs) -> @@ -678,7 +672,7 @@ bulk_objects([], _Head, Kp, Seq, L) when is_integer(Kp), is_integer(Seq) -> -define(OBJ_COUNTER, 2). -define(KEY_COUNTER, 3). -output_objs(OldV, Head, SlotNums, Cntrs) when OldV =< 9 -> +output_objs(Head, SlotNums, Cntrs) -> fun(close) -> %% Make sure that the segments are initialized in case %% init_table has been called. @@ -686,31 +680,31 @@ output_objs(OldV, Head, SlotNums, Cntrs) when OldV =< 9 -> Acc = [], % This is the only way Acc can be empty. true = ets:insert(Cntrs, {?FSCK_SEGMENT,0,[],0}), true = ets:insert(Cntrs, {?COUNTERS, 0, 0}), - Fun = output_objs2(foo, Acc, OldV, Head, Cache, Cntrs, + Fun = output_objs2(foo, Acc, Head, Cache, Cntrs, SlotNums, bar), Fun(close); ([]) -> - output_objs(OldV, Head, SlotNums, Cntrs); + output_objs(Head, SlotNums, Cntrs); (L) -> %% Information about number of objects per size is not %% relevant for version 9. It is the number of collections %% that matters. true = ets:delete_all_objects(Cntrs), true = ets:insert(Cntrs, {?COUNTERS, 0, 0}), - Es = bin2term(L, OldV, Head#head.keypos), + Es = bin2term(L, Head#head.keypos), %% The cache is a tuple indexed by the (log) size. An element %% is [BinaryObject]. Cache = ?VEMPTY, {NE, NAcc, NCache} = output_slots(Es, Head, Cache, Cntrs, 0, 0), - output_objs2(NE, NAcc, OldV, Head, NCache, Cntrs, SlotNums, 1) + output_objs2(NE, NAcc, Head, NCache, Cntrs, SlotNums, 1) end. -output_objs2(E, Acc, OldV, Head, Cache, SizeT, SlotNums, 0) -> +output_objs2(E, Acc, Head, Cache, SizeT, SlotNums, 0) -> NCache = write_all_sizes(Cache, SizeT, Head, more), %% Number of handled file_sorter chunks before writing: Max = erlang:max(1, erlang:min(tuple_size(NCache), 10)), - output_objs2(E, Acc, OldV, Head, NCache, SizeT, SlotNums, Max); -output_objs2(E, Acc, OldV, Head, Cache, SizeT, SlotNums, ChunkI) -> + output_objs2(E, Acc, Head, NCache, SizeT, SlotNums, Max); +output_objs2(E, Acc, Head, Cache, SizeT, SlotNums, ChunkI) -> fun(close) -> {_, [], Cache1} = if @@ -747,11 +741,10 @@ output_objs2(E, Acc, OldV, Head, Cache, SizeT, SlotNums, ChunkI) -> end end; (L) -> - Es = bin2term(L, OldV, Head#head.keypos), + Es = bin2term(L, Head#head.keypos), {NE, NAcc, NCache} = output_slots(E, Es, Acc, Head, Cache, SizeT, 0, 0), - output_objs2(NE, NAcc, OldV, Head, NCache, SizeT, SlotNums, - ChunkI-1) + output_objs2(NE, NAcc, Head, NCache, SizeT, SlotNums, ChunkI-1) end. %%% Compaction. @@ -1245,10 +1238,8 @@ allocate_all(Head, [{LSize,_,Data,NoCollections} | DTL], L) -> E = {LSize,Addr,Data,NoCollections}, allocate_all(NewHead, DTL, [E | L]). -bin2term(Bin, 9, Kp) -> - bin2term1(Bin, Kp, []); -bin2term(Bin, 8, Kp) -> - bin2term_v8(Bin, Kp, []). +bin2term(Bin, Kp) -> + bin2term1(Bin, Kp, []). bin2term1([<<Slot:32, Seq:32, BinTerm/binary>> | BTs], Kp, L) -> Term = binary_to_term(BinTerm), @@ -1257,13 +1248,6 @@ bin2term1([<<Slot:32, Seq:32, BinTerm/binary>> | BTs], Kp, L) -> bin2term1([], _Kp, L) -> lists:reverse(L). -bin2term_v8([<<Slot:32, BinTerm/binary>> | BTs], Kp, L) -> - Term = binary_to_term(BinTerm), - Key = element(Kp, Term), - bin2term_v8(BTs, Kp, [{Slot, Key, foo, Term, BinTerm} | L]); -bin2term_v8([], _Kp, L) -> - lists:reverse(L). - write_all_sizes({}=Cache, _SizeT, _Head, _More) -> Cache; write_all_sizes(Cache, SizeT, Head, More) -> @@ -1461,7 +1445,7 @@ temp_file(Head, SizeT, N) -> %% Does not close Fd. fsck_input(Head, Fd, Cntrs, FileHeader) -> MaxSz0 = case FileHeader#fileheader.has_md5 of - true when is_integer(FileHeader#fileheader.no_colls) -> + true when is_list(FileHeader#fileheader.no_colls) -> ?POW(max_objsize(FileHeader#fileheader.no_colls)); _ -> %% The file is not compressed, so the bucket size @@ -1485,10 +1469,10 @@ fsck_input(Head, State, Fd, MaxSz, Cntrs) -> done -> end_of_input; {done, L, _Seq} -> - R = count_input(Head, Cntrs, L), + R = count_input(L), {R, fsck_input(Head, done, Fd, MaxSz, Cntrs)}; {cont, L, Bin, Pos, Seq} -> - R = count_input(Head, Cntrs, L), + R = count_input(L), FR = fsck_objs(Bin, Head#head.keypos, Head, [], Seq), NewState = fsck_read(FR, Pos, Fd, MaxSz, Head), {R, fsck_input(Head, NewState, Fd, MaxSz, Cntrs)} @@ -1496,20 +1480,9 @@ fsck_input(Head, State, Fd, MaxSz, Cntrs) -> end. %% The ets table Cntrs is used for counting objects per size. -count_input(Head, Cntrs, L) when Head#head.version =:= 8 -> - count_input1(Cntrs, L, []); -count_input(_Head, _Cntrs, L) -> +count_input(L) -> lists:reverse(L). -count_input1(Cntrs, [[LogSz | B] | Ts], L) -> - case catch ets:update_counter(Cntrs, LogSz, 1) of - N when is_integer(N) -> ok; - _Badarg -> true = ets:insert(Cntrs, {LogSz, 1}) - end, - count_input1(Cntrs, Ts, [B | L]); -count_input1(_Cntrs, [], L) -> - L. - fsck_read(Pos, F, L, Seq) -> case file:position(F, Pos) of {ok, _} -> @@ -1564,11 +1537,6 @@ fsck_objs(Bin = <<Sz:32, Status:32, Tail/binary>>, Kp, Head, L, Seq) -> fsck_objs(Bin, _Kp, _Head, L, Seq) -> {more, Bin, 0, L, Seq}. -make_objects([{K,BT}|Os], Seq, Kp, Head, L) when Head#head.version =:= 8 -> - LogSz = dets_v8:sz2pos(byte_size(BT)+?OHDSZ_v8), - Slot = dets_v8:db_hash(K, Head), - Obj = [LogSz | <<Slot:32, LogSz:8, BT/binary>>], - make_objects(Os, Seq, Kp, Head, [Obj | L]); make_objects([{K,BT} | Os], Seq, Kp, Head, L) -> Obj = make_object(Head, K, Seq, BT), make_objects(Os, Seq+1, Kp, Head, [Obj | L]); @@ -1607,7 +1575,7 @@ do_perform_save(H) -> FileHeader = file_header(H1, FreeListsPointer, ?CLOSED_PROPERLY), case dets_utils:debug_mode() of true -> - TmpHead0 = init_freelist(H1#head{fixed = false}, true), + TmpHead0 = init_freelist(H1#head{fixed = false}), TmpHead = TmpHead0#head{base = H1#head.base}, case catch dets_utils:all_allocated_as_list(TmpHead) @@ -1794,7 +1762,7 @@ table_parameters(Head) -> (E, A) -> [E | A] end, [], CL), NoColls = lists:reverse(NoColls0), - #?HASH_PARMS{file_format_version = Head#head.version, + #?HASH_PARMS{file_format_version = ?FILE_FORMAT_VERSION, bchunk_format_version = ?BCHUNK_FORMAT_VERSION, file = filename:basename(Head#head.filename), type = Head#head.type, diff --git a/lib/stdlib/src/erl_parse.yrl b/lib/stdlib/src/erl_parse.yrl index 549179da68..9cd95705af 100644 --- a/lib/stdlib/src/erl_parse.yrl +++ b/lib/stdlib/src/erl_parse.yrl @@ -516,6 +516,22 @@ comp_op -> '>' : '$1'. comp_op -> '=:=' : '$1'. comp_op -> '=/=' : '$1'. +Header +"%% This file was automatically generated from the file \"erl_parse.yrl\"." +"%%" +"%% Copyright Ericsson AB 1996-2015. All Rights Reserved." +"%%" +"%% Licensed under the Apache License, Version 2.0 (the \"License\"); you may" +"%% not use this file except in compliance with the License. You may obtain" +"%% a copy of the License at <http://www.apache.org/licenses/LICENSE-2.0>" +"%%" +"%% Unless required by applicable law or agreed to in writing, software" +"%% distributed under the License is distributed on an \"AS IS\" BASIS," +"%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied." +"%% See the License for the specific language governing permissions and" +"%% limitations under the License." +"". + Erlang code. -export([parse_form/1,parse_exprs/1,parse_term/1]). @@ -612,11 +628,11 @@ Erlang code. | af_bin(abstract_expr()) | af_binary_op(abstract_expr()) | af_unary_op(abstract_expr()) - | af_record_access(abstract_expr()) + | af_record_creation(abstract_expr()) | af_record_update(abstract_expr()) | af_record_index() | af_record_field_access(abstract_expr()) - | af_map_access(abstract_expr()) + | af_map_creation(abstract_expr()) | af_map_update(abstract_expr()) | af_catch() | af_local_call() @@ -720,26 +736,25 @@ Erlang code. | af_bin(af_guard_test()) | af_binary_op(af_guard_test()) | af_unary_op(af_guard_test()) - | af_record_access(af_guard_test()) + | af_record_creation(af_guard_test()) | af_record_index() | af_record_field_access(af_guard_test()) - | af_map_access(abstract_expr()) % FIXME - | af_map_update(abstract_expr()) % FIXME + | af_map_creation(abstract_expr()) + | af_map_update(abstract_expr()) | af_guard_call() | af_remote_guard_call(). -type af_record_field_access(T) :: {'record_field', anno(), T, record_name(), af_field_name()}. --type af_map_access(T) :: {'map', anno(), [af_map_field(T)]}. - --type af_map_update(T) :: {'map', anno(), T, [af_map_field(T)]}. +-type af_map_creation(T) :: {'map', anno(), [af_assoc(T)]}. --type af_map_field(T) :: af_map_field_assoc(T) | af_map_field_exact(T). +-type af_map_update(T) :: {'map', anno(), T, [af_assoc(T)]}. --type af_map_field_assoc(T) :: {'map_field_assoc', anno(), T, T}. +-type af_assoc(T) :: {'map_field_assoc', anno(), T, T} + | af_assoc_exact(T). --type af_map_field_exact(T) :: {'map_field_exact', anno(), T, T}. +-type af_assoc_exact(T) :: {'map_field_exact', anno(), T, T}. -type af_guard_call() :: {'call', anno(), function_name(), [af_guard_test()]}. @@ -757,20 +772,20 @@ Erlang code. | af_bin(af_pattern()) | af_binary_op(af_pattern()) | af_unary_op(af_pattern()) - | af_record_access(af_pattern()) + | af_record_creation(af_pattern()) | af_record_index() | af_map_pattern(). -type af_record_index() :: {'record_index', anno(), record_name(), af_field_name()}. --type af_record_access(T) :: +-type af_record_creation(T) :: {'record', anno(), record_name(), [af_record_field(T)]}. -type af_record_field(T) :: {'record_field', anno(), af_field_name(), T}. -type af_map_pattern() :: - {'map', anno(), [af_map_field_exact(abstract_expr)]}. % FIXME? + {'map', anno(), [af_assoc_exact(abstract_expr)]}. -type abstract_type() :: af_annotated_type() | af_atom() @@ -807,9 +822,9 @@ Erlang code. {'type', anno(), 'range', [af_singleton_integer_type()]}. -type af_map_type() :: {'type', anno(), 'map', 'any'} - | {'type', anno(), 'map', [af_map_pair_type()]}. + | {'type', anno(), 'map', [af_assoc_type()]}. --type af_map_pair_type() :: +-type af_assoc_type() :: {'type', anno(), 'map_field_assoc', [abstract_type()]} | {'type', anno(), 'map_field_exact', [abstract_type()]}. diff --git a/lib/stdlib/src/error_logger_file_h.erl b/lib/stdlib/src/error_logger_file_h.erl index 665685d3ee..0b262de3ab 100644 --- a/lib/stdlib/src/error_logger_file_h.erl +++ b/lib/stdlib/src/error_logger_file_h.erl @@ -116,8 +116,8 @@ write_event(#st{fd=Fd}=State, Event) -> ignore -> ok; {Head,Pid,FormatList} -> - Time = maybe_utc(erlang:universaltime()), - Header = write_time(Time, Head), + Time = erlang:universaltime(), + Header = header(Time, Head), Body = format_body(State, FormatList), AtNode = if node(Pid) =/= node() -> @@ -125,7 +125,7 @@ write_event(#st{fd=Fd}=State, Event) -> true -> [] end, - io:put_chars(Fd, [Header,Body,AtNode]) + io:put_chars(Fd, [Header,AtNode,Body]) end. format_body(State, [{Format,Args}|T]) -> @@ -172,21 +172,6 @@ parse_event({warning_report, _GL, {Pid, std_warning, Args}}) -> {"WARNING REPORT",Pid,format_term(Args)}; parse_event(_) -> ignore. -maybe_utc(Time) -> - UTC = case application:get_env(sasl, utc_log) of - {ok, Val} -> Val; - undefined -> - %% Backwards compatible: - case application:get_env(stdlib, utc_log) of - {ok, Val} -> Val; - undefined -> false - end - end, - maybe_utc(Time, UTC). - -maybe_utc(Time, true) -> {utc, Time}; -maybe_utc(Time, _) -> {local, calendar:universal_time_to_local_time(Time)}. - format_term(Term) when is_list(Term) -> case string_p(Term) of true -> @@ -227,17 +212,33 @@ string_p1([H|T]) when is_list(H) -> string_p1([]) -> true; string_p1(_) -> false. -write_time({utc,{{Y,Mo,D},{H,Mi,S}}}, Type) -> - io_lib:format("~n=~s==== ~p-~s-~p::~s:~s:~s UTC ===~n", - [Type,D,month(Mo),Y,t(H),t(Mi),t(S)]); -write_time({local, {{Y,Mo,D},{H,Mi,S}}}, Type) -> - io_lib:format("~n=~s==== ~p-~s-~p::~s:~s:~s ===~n", - [Type,D,month(Mo),Y,t(H),t(Mi),t(S)]). +get_utc_config() -> + %% SASL utc_log configuration overrides stdlib config + %% in order to have uniform timestamps in log messages + case application:get_env(sasl, utc_log) of + {ok, Val} -> Val; + undefined -> + case application:get_env(stdlib, utc_log) of + {ok, Val} -> Val; + undefined -> false + end + end. + +header(Time, Title) -> + case get_utc_config() of + true -> + header(Time, Title, "UTC "); + _ -> + header(calendar:universal_time_to_local_time(Time), Title, "") + end. + +header({{Y,Mo,D},{H,Mi,S}}, Title, UTC) -> + io_lib:format("~n=~s==== ~p-~s-~p::~s:~s:~s ~s===~n", + [Title,D,month(Mo),Y,t(H),t(Mi),t(S),UTC]). t(X) when is_integer(X) -> - t1(integer_to_list(X)); -t(_) -> - "". + t1(integer_to_list(X)). + t1([X]) -> [$0,X]; t1(X) -> X. @@ -253,5 +254,3 @@ month(9) -> "Sep"; month(10) -> "Oct"; month(11) -> "Nov"; month(12) -> "Dec". - - diff --git a/lib/stdlib/src/error_logger_tty_h.erl b/lib/stdlib/src/error_logger_tty_h.erl index cb22a8c0b6..2f2fd65252 100644 --- a/lib/stdlib/src/error_logger_tty_h.erl +++ b/lib/stdlib/src/error_logger_tty_h.erl @@ -128,13 +128,12 @@ write_events(State, [Ev|Es]) -> write_events(_State, []) -> ok. -do_write_event(State, {Time0, Event}) -> +do_write_event(State, {Time, Event}) -> case parse_event(Event) of ignore -> ok; - {Head,Pid,FormatList} -> - Time = maybe_utc(Time0), - Header = write_time(Time, Head), + {Title,Pid,FormatList} -> + Header = header(Time, Title), Body = format_body(State, FormatList), AtNode = if node(Pid) =/= node() -> @@ -142,7 +141,7 @@ do_write_event(State, {Time0, Event}) -> true -> [] end, - Str = [Header,Body,AtNode], + Str = [Header,AtNode,Body], case State#st.io_mod of io_lib -> Str; @@ -197,21 +196,6 @@ parse_event({warning_report, _GL, {Pid, std_warning, Args}}) -> {"WARNING REPORT",Pid,format_term(Args)}; parse_event(_) -> ignore. -maybe_utc(Time) -> - UTC = case application:get_env(sasl, utc_log) of - {ok, Val} -> Val; - undefined -> - %% Backwards compatible: - case application:get_env(stdlib, utc_log) of - {ok, Val} -> Val; - undefined -> false - end - end, - maybe_utc(Time, UTC). - -maybe_utc(Time, true) -> {utc, Time}; -maybe_utc(Time, _) -> {local, calendar:universal_time_to_local_time(Time)}. - format_term(Term) when is_list(Term) -> case string_p(Term) of true -> @@ -255,12 +239,29 @@ string_p1([H|T]) when is_list(H) -> string_p1([]) -> true; string_p1(_) -> false. -write_time({utc,{{Y,Mo,D},{H,Mi,S}}},Type) -> - io_lib:format("~n=~s==== ~p-~s-~p::~s:~s:~s UTC ===~n", - [Type,D,month(Mo),Y,t(H),t(Mi),t(S)]); -write_time({local, {{Y,Mo,D},{H,Mi,S}}},Type) -> - io_lib:format("~n=~s==== ~p-~s-~p::~s:~s:~s ===~n", - [Type,D,month(Mo),Y,t(H),t(Mi),t(S)]). +get_utc_config() -> + %% SASL utc_log configuration overrides stdlib config + %% in order to have uniform timestamps in log messages + case application:get_env(sasl, utc_log) of + {ok, Val} -> Val; + undefined -> + case application:get_env(stdlib, utc_log) of + {ok, Val} -> Val; + undefined -> false + end + end. + +header(Time, Title) -> + case get_utc_config() of + true -> + header(Time, Title, "UTC "); + _ -> + header(calendar:universal_time_to_local_time(Time), Title, "") + end. + +header({{Y,Mo,D},{H,Mi,S}}, Title, UTC) -> + io_lib:format("~n=~s==== ~p-~s-~p::~s:~s:~s ~s===~n", + [Title,D,month(Mo),Y,t(H),t(Mi),t(S),UTC]). t(X) when is_integer(X) -> t1(integer_to_list(X)); @@ -281,8 +282,3 @@ month(9) -> "Sep"; month(10) -> "Oct"; month(11) -> "Nov"; month(12) -> "Dec". - - - - - diff --git a/lib/stdlib/src/gb_sets.erl b/lib/stdlib/src/gb_sets.erl index 47a8fa6db0..6d6f7d40ac 100644 --- a/lib/stdlib/src/gb_sets.erl +++ b/lib/stdlib/src/gb_sets.erl @@ -1,8 +1,3 @@ -%% -%% %CopyrightBegin% -%% -%% Copyright Ericsson AB 2001-2015. All Rights Reserved. -%% %% Licensed under the Apache License, Version 2.0 (the "License"); %% you may not use this file except in compliance with the License. %% You may obtain a copy of the License at @@ -14,8 +9,6 @@ %% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %% See the License for the specific language governing permissions and %% limitations under the License. -%% -%% %CopyrightEnd% %% %% ===================================================================== %% Ordered Sets implemented as General Balanced Trees diff --git a/lib/stdlib/src/gb_trees.erl b/lib/stdlib/src/gb_trees.erl index c4a20d92a7..457287fa52 100644 --- a/lib/stdlib/src/gb_trees.erl +++ b/lib/stdlib/src/gb_trees.erl @@ -1,8 +1,3 @@ -%% -%% %CopyrightBegin% -%% -%% Copyright Ericsson AB 2001-2015. All Rights Reserved. -%% %% Licensed under the Apache License, Version 2.0 (the "License"); %% you may not use this file except in compliance with the License. %% You may obtain a copy of the License at @@ -14,8 +9,6 @@ %% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %% See the License for the specific language governing permissions and %% limitations under the License. -%% -%% %CopyrightEnd% %% %% ===================================================================== %% General Balanced Trees - highly efficient dictionaries. diff --git a/lib/stdlib/src/gen_statem.erl b/lib/stdlib/src/gen_statem.erl index 17d1ebecec..018aca90e6 100644 --- a/lib/stdlib/src/gen_statem.erl +++ b/lib/stdlib/src/gen_statem.erl @@ -85,7 +85,8 @@ -type state_enter() :: 'state_enter'. -type transition_option() :: - postpone() | hibernate() | event_timeout(). + postpone() | hibernate() | + event_timeout() | state_timeout(). -type postpone() :: %% If 'true' postpone the current event %% and retry it when the state changes (=/=) @@ -108,7 +109,7 @@ %% * All action()s are executed in order of apperance. %% * Postponing the current event is performed %% iff 'postpone' is 'true'. - %% * A state timer is started iff 'timeout' is set. + %% * A state timeout is started iff 'timeout' is set. %% * Pending events are handled or if there are %% no pending events the server goes into receive %% or hibernate (iff 'hibernate' is 'true') @@ -154,12 +155,12 @@ -type handle_event_result() :: event_handler_result(state()). %% --type state_enter_result(StateType) :: +-type state_enter_result(State) :: {'next_state', % {next_state,NextState,NewData,[]} - State :: StateType, + State, NewData :: data()} | {'next_state', % State transition, maybe to the same state - State :: StateType, + State, NewData :: data(), Actions :: [enter_action()] | enter_action()} | state_callback_result(enter_action()). @@ -231,9 +232,9 @@ -callback handle_event( 'enter', OldState :: state(), - State :: state(), % Current state + State, % Current state Data :: data()) -> - state_enter_result(state()); + state_enter_result(State); (event_type(), EventContent :: term(), State :: state(), % Current state @@ -596,8 +597,8 @@ enter(Module, Opts, State, Data, Server, Actions, Parent) -> data => Data, postponed => P, %% The rest of the fields are set from to the arguments to - %% loop_event_actions/9 when it finally loops back to loop/3 - %% in loop_events_done/9 + %% loop_event_actions/10 when it finally loops back to loop/3 + %% in loop_events/10 %% %% Marker for initial state, cleared immediately when used init_state => true @@ -605,9 +606,10 @@ enter(Module, Opts, State, Data, Server, Actions, Parent) -> NewDebug = sys_debug(Debug, S, State, {enter,Event,State}), case call_callback_mode(S) of {ok,NewS} -> - StateTimer = undefined, + TimerRefs = #{}, + TimerTypes = #{}, loop_event_actions( - Parent, NewDebug, NewS, StateTimer, + Parent, NewDebug, NewS, TimerRefs, TimerTypes, Events, Event, State, Data, NewActions); {Class,Reason,Stacktrace} -> terminate( @@ -747,6 +749,10 @@ print_event(Dev, {out,Reply,{To,_Tag}}, {Name,State}) -> io:format( Dev, "*DBG* ~p send ~p to ~p from state ~p~n", [Name,Reply,To,State]); +print_event(Dev, {terminate,Reason}, {Name,State}) -> + io:format( + Dev, "*DBG* ~p terminate ~p in state ~p~n", + [Name,Reason,State]); print_event(Dev, {Tag,Event,NextState}, {Name,State}) -> StateString = case NextState of @@ -806,7 +812,7 @@ loop(Parent, Debug, #{hibernate := Hibernate} = S) -> %% Entry point for wakeup_from_hibernate/3 loop_receive( - Parent, Debug, #{timer := Timer, state_timer := StateTimer} = S) -> + Parent, Debug, #{timer_refs := TimerRefs, timer_types := TimerTypes} = S) -> receive Msg -> case Msg of @@ -822,18 +828,23 @@ loop_receive( %% but this will stand out in the crash report... terminate( exit, Reason, ?STACKTRACE(), Debug, S, [EXIT]); - {timeout,Timer,Content} - when Timer =/= undefined -> - loop_receive_result( - Parent, Debug, S, StateTimer, - {timeout,Content}); - {timeout,StateTimer,Content} - when StateTimer =/= undefined -> - loop_receive_result( - Parent, Debug, S, undefined, - {state_timeout,Content}); + {timeout,TimerRef,TimerMsg} -> + case TimerRefs of + #{TimerRef := TimerType} -> + Event = {TimerType,TimerMsg}, + %% Unregister the triggered timeout + loop_receive_result( + Parent, Debug, S, + maps:remove(TimerRef, TimerRefs), + maps:remove(TimerType, TimerTypes), + Event); + _ -> + Event = {info,Msg}, + loop_receive_result( + Parent, Debug, S, + TimerRefs, TimerTypes, Event) + end; _ -> - cancel_timer(Timer), Event = case Msg of {'$gen_call',From,Request} -> @@ -844,12 +855,15 @@ loop_receive( {info,Msg} end, loop_receive_result( - Parent, Debug, S, StateTimer, Event) + Parent, Debug, S, + TimerRefs, TimerTypes, Event) end end. -loop_receive_result(Parent, Debug, #{state := State} = S, StateTimer, Event) -> - %% The fields 'timer', 'state_timer' and 'hibernate' +loop_receive_result( + Parent, Debug, #{state := State} = S, + TimerRefs, TimerTypes, Event) -> + %% The fields 'timer_refs', 'timer_types' and 'hibernate' %% are now invalid in state map S - they will be recalculated %% and restored when we return to loop/3 %% @@ -857,82 +871,196 @@ loop_receive_result(Parent, Debug, #{state := State} = S, StateTimer, Event) -> %% Here the queue of not yet handled events is created Events = [], Hibernate = false, - loop_event(Parent, NewDebug, S, StateTimer, Events, Event, Hibernate). + loop_event( + Parent, NewDebug, S, TimerRefs, TimerTypes, Events, Event, Hibernate). -%% Process the event queue, or if it is empty -%% loop back to loop/3 to receive a new event -loop_events( - Parent, Debug, S, StateTimeout, - [Event|Events], _Timeout, State, Data, P, Hibernate) -> +%% Entry point for handling an event, received or enqueued +loop_event( + Parent, Debug, #{state := State, data := Data} = S, TimerRefs, TimerTypes, + Events, {Type,Content} = Event, Hibernate) -> %% - %% If there was an event timer requested we just ignore that - %% since we have events to handle which cancels the timer - loop_event( - Parent, Debug, S, StateTimeout, - Events, Event, State, Data, P, Hibernate); -loop_events( - Parent, Debug, S, {state_timeout,Time,EventContent}, - [] = Events, Timeout, State, Data, P, Hibernate) -> - if - Time =:= 0 -> - %% Simulate an immediate timeout - %% so we do not get the timeout message - %% after any received event - %% - %% This faked event will cancel - %& any not yet started event timer - Event = {state_timeout,EventContent}, - StateTimer = undefined, - loop_event( - Parent, Debug, S, StateTimer, - Events, Event, State, Data, P, Hibernate); - true -> - StateTimer = erlang:start_timer(Time, self(), EventContent), - loop_events( - Parent, Debug, S, StateTimer, - Events, Timeout, State, Data, P, Hibernate) - end; -loop_events( - Parent, Debug, S, StateTimer, - [] = Events, Timeout, State, Data, P, Hibernate) -> - case Timeout of - {timeout,0,EventContent} -> - %% Simulate an immediate timeout - %% so we do not get the timeout message - %% after any received event - %% - Event = {timeout,EventContent}, - loop_event( - Parent, Debug, S, StateTimer, - Events, Event, State, Data, P, Hibernate); - {timeout,Time,EventContent} -> - Timer = erlang:start_timer(Time, self(), EventContent), - loop_events_done( - Parent, Debug, S, StateTimer, - State, Data, P, Hibernate, Timer); - undefined -> - %% No event timeout has been requested - Timer = undefined, - loop_events_done( - Parent, Debug, S, StateTimer, - State, Data, P, Hibernate, Timer) + %% If Hibernate is true here it can only be + %% because it was set from an event action + %% and we did not go into hibernation since there + %% were events in queue, so we do what the user + %% might rely on i.e collect garbage which + %% would have happened if we actually hibernated + %% and immediately was awakened + Hibernate andalso garbage_collect(), + case call_state_function(S, Type, Content, State, Data) of + {ok,Result,NewS} -> + %% Cancel event timeout + {NewTimerRefs,NewTimerTypes} = + cancel_timer_by_type( + timeout, TimerRefs, TimerTypes), + {NewData,NextState,Actions} = + parse_event_result( + true, Debug, NewS, Result, + Events, Event, State, Data), + loop_event_actions( + Parent, Debug, S, NewTimerRefs, NewTimerTypes, + Events, Event, NextState, NewData, Actions); + {Class,Reason,Stacktrace} -> + terminate( + Class, Reason, Stacktrace, Debug, S, [Event|Events]) end. -%% Back to the top -loop_events_done( - Parent, Debug, S, StateTimer, - State, Data, P, Hibernate, Timer) -> +loop_event_actions( + Parent, Debug, + #{state := State, state_enter := StateEnter} = S, TimerRefs, TimerTypes, + Events, Event, NextState, NewData, Actions) -> + case parse_actions(Debug, S, State, Actions) of + {ok,NewDebug,Hibernate,TimeoutsR,Postpone,NextEventsR} -> + if + StateEnter, NextState =/= State -> + loop_event_enter( + Parent, NewDebug, S, TimerRefs, TimerTypes, + Events, Event, NextState, NewData, + Hibernate, TimeoutsR, Postpone, NextEventsR); + StateEnter -> + case maps:is_key(init_state, S) of + true -> + %% Avoid infinite loop in initial state + %% with state entry events + NewS = maps:remove(init_state, S), + loop_event_enter( + Parent, NewDebug, NewS, TimerRefs, TimerTypes, + Events, Event, NextState, NewData, + Hibernate, TimeoutsR, Postpone, NextEventsR); + false -> + loop_event_result( + Parent, NewDebug, S, TimerRefs, TimerTypes, + Events, Event, NextState, NewData, + Hibernate, TimeoutsR, Postpone, NextEventsR) + end; + true -> + loop_event_result( + Parent, NewDebug, S, TimerRefs, TimerTypes, + Events, Event, NextState, NewData, + Hibernate, TimeoutsR, Postpone, NextEventsR) + end; + {Class,Reason,Stacktrace} -> + terminate( + Class, Reason, Stacktrace, + Debug, S#{data := NewData}, [Event|Events]) + end. + +loop_event_enter( + Parent, Debug, #{state := State} = S, TimerRefs, TimerTypes, + Events, Event, NextState, NewData, + Hibernate, TimeoutsR, Postpone, NextEventsR) -> + case call_state_function(S, enter, State, NextState, NewData) of + {ok,Result,NewS} -> + {NewerData,_,Actions} = + parse_event_result( + false, Debug, NewS, Result, + Events, Event, NextState, NewData), + loop_event_enter_actions( + Parent, Debug, NewS, TimerRefs, TimerTypes, + Events, Event, NextState, NewerData, + Hibernate, TimeoutsR, Postpone, NextEventsR, Actions); + {Class,Reason,Stacktrace} -> + terminate( + Class, Reason, Stacktrace, + Debug, S#{state := NextState, data := NewData}, + [Event|Events]) + end. + +loop_event_enter_actions( + Parent, Debug, S, TimerRefs, TimerTypes, + Events, Event, NextState, NewData, + Hibernate, TimeoutsR, Postpone, NextEventsR, Actions) -> + case + parse_enter_actions( + Debug, S, NextState, Actions, + Hibernate, TimeoutsR) + of + {ok,NewDebug,NewHibernate,NewTimeoutsR,_,_} -> + loop_event_result( + Parent, NewDebug, S, TimerRefs, TimerTypes, + Events, Event, NextState, NewData, + NewHibernate, NewTimeoutsR, Postpone, NextEventsR); + {Class,Reason,Stacktrace} -> + terminate( + Class, Reason, Stacktrace, + Debug, S#{state := NextState, data := NewData}, + [Event|Events]) + end. + +loop_event_result( + Parent, Debug, + #{state := State, postponed := P_0} = S, TimerRefs_0, TimerTypes_0, + Events, Event, NextState, NewData, + Hibernate, TimeoutsR, Postpone, NextEventsR) -> + %% + %% All options have been collected and next_events are buffered. + %% Do the actual state transition. + %% + {NewDebug,P_1} = % Move current event to postponed if Postpone + case Postpone of + true -> + {sys_debug(Debug, S, State, {postpone,Event,State}), + [Event|P_0]}; + false -> + {sys_debug(Debug, S, State, {consume,Event,State}), + P_0} + end, + {Events_1,NewP,{TimerRefs_1,TimerTypes_1}} = + %% Move all postponed events to queue and cancel the + %% state timeout if the state changes + if + NextState =:= State -> + {Events,P_1,{TimerRefs_0,TimerTypes_0}}; + true -> + {lists:reverse(P_1, Events),[], + cancel_timer_by_type( + state_timeout, TimerRefs_0, TimerTypes_0)} + end, + {TimerRefs_2,TimerTypes_2,TimeoutEvents} = + %% Stop and start timers non-event timers + parse_timers(TimerRefs_1, TimerTypes_1, TimeoutsR), + %% Place next events last in reversed queue + Events_2R = lists:reverse(Events_1, NextEventsR), + %% Enqueue immediate timeout events and start event timer + {NewTimerRefs,NewTimerTypes,Events_3R} = + process_timeout_events( + TimerRefs_2, TimerTypes_2, TimeoutEvents, Events_2R), + NewEvents = lists:reverse(Events_3R), + loop_events( + Parent, NewDebug, S, NewTimerRefs, NewTimerTypes, + NewEvents, Hibernate, NextState, NewData, NewP). + +%% Loop until out of enqueued events +%% +loop_events( + Parent, Debug, S, TimerRefs, TimerTypes, + [] = _Events, Hibernate, State, Data, P) -> + %% Update S and loop back to loop/3 to receive a new event NewS = S#{ state := State, data := Data, postponed := P, hibernate => Hibernate, - timer => Timer, - state_timer => StateTimer}, - loop(Parent, Debug, NewS). + timer_refs => TimerRefs, + timer_types => TimerTypes}, + loop(Parent, Debug, NewS); +loop_events( + Parent, Debug, S, TimerRefs, TimerTypes, + [Event|Events], Hibernate, State, Data, P) -> + %% Update S and continue with enqueued events + NewS = + S#{ + state := State, + data := Data, + postponed := P}, + loop_event( + Parent, Debug, NewS, TimerRefs, TimerTypes, Events, Event, Hibernate). + +%%--------------------------------------------------------------------------- +%% Server loop helpers call_callback_mode(#{module := Module} = S) -> try Module:callback_mode() of @@ -996,6 +1124,7 @@ parse_callback_mode([H|T], CBMode, StateEnter) -> parse_callback_mode(_, _CBMode, StateEnter) -> {undefined,StateEnter}. + call_state_function( #{callback_mode := undefined} = S, Type, Content, State, Data) -> @@ -1061,42 +1190,6 @@ call_state_function( {Class,Reason,erlang:get_stacktrace()} end. -%% Update S and continue -loop_event( - Parent, Debug, S, StateTimer, - Events, Event, State, Data, P, Hibernate) -> - NewS = - S#{ - state := State, - data := Data, - postponed := P}, - loop_event(Parent, Debug, NewS, StateTimer, Events, Event, Hibernate). - -loop_event( - Parent, Debug, #{state := State, data := Data} = S, StateTimer, - Events, {Type,Content} = Event, Hibernate) -> - %% - %% If Hibernate is true here it can only be - %% because it was set from an event action - %% and we did not go into hibernation since there - %% were events in queue, so we do what the user - %% might rely on i.e collect garbage which - %% would have happened if we actually hibernated - %% and immediately was awakened - Hibernate andalso garbage_collect(), - case call_state_function(S, Type, Content, State, Data) of - {ok,Result,NewS} -> - {NewData,NextState,Actions} = - parse_event_result( - true, Debug, NewS, Result, - Events, Event, State, Data), - loop_event_actions( - Parent, Debug, S, StateTimer, - Events, Event, NextState, NewData, Actions); - {Class,Reason,Stacktrace} -> - terminate( - Class, Reason, Stacktrace, Debug, S, [Event|Events]) - end. %% Interpret all callback return variants parse_event_result( @@ -1146,32 +1239,32 @@ parse_event_result( Debug, S, [Event|Events]) end. + parse_enter_actions( Debug, S, State, Actions, - Hibernate, Timeout, StateTimeout) -> + Hibernate, TimeoutsR) -> Postpone = forbidden, - NextEvents = forbidden, + NextEventsR = forbidden, parse_actions( Debug, S, State, listify(Actions), - Hibernate, Timeout, StateTimeout, Postpone, NextEvents). + Hibernate, TimeoutsR, Postpone, NextEventsR). parse_actions(Debug, S, State, Actions) -> Hibernate = false, - Timeout = undefined, - StateTimeout = undefined, + TimeoutsR = [], Postpone = false, - NextEvents = [], + NextEventsR = [], parse_actions( Debug, S, State, listify(Actions), - Hibernate, Timeout, StateTimeout, Postpone, NextEvents). + Hibernate, TimeoutsR, Postpone, NextEventsR). %% parse_actions( Debug, _S, _State, [], - Hibernate, Timeout, StateTimeout, Postpone, NextEvents) -> - {ok,Debug,Hibernate,Timeout,StateTimeout,Postpone,NextEvents}; + Hibernate, TimeoutsR, Postpone, NextEventsR) -> + {ok,Debug,Hibernate,TimeoutsR,Postpone,NextEventsR}; parse_actions( Debug, S, State, [Action|Actions], - Hibernate, Timeout, StateTimeout, Postpone, NextEvents) -> + Hibernate, TimeoutsR, Postpone, NextEventsR) -> case Action of %% Actual actions {reply,From,Reply} -> @@ -1180,8 +1273,7 @@ parse_actions( NewDebug = do_reply(Debug, S, State, From, Reply), parse_actions( NewDebug, S, State, Actions, - Hibernate, Timeout, StateTimeout, - Postpone, NextEvents); + Hibernate, TimeoutsR, Postpone, NextEventsR); false -> {error, {bad_action_from_state_function,Action}, @@ -1191,7 +1283,7 @@ parse_actions( {hibernate,NewHibernate} when is_boolean(NewHibernate) -> parse_actions( Debug, S, State, Actions, - NewHibernate, Timeout, StateTimeout, Postpone, NextEvents); + NewHibernate, TimeoutsR, Postpone, NextEventsR); {hibernate,_} -> {error, {bad_action_from_state_function,Action}, @@ -1199,43 +1291,44 @@ parse_actions( hibernate -> parse_actions( Debug, S, State, Actions, - true, Timeout, StateTimeout, Postpone, NextEvents); - {state_timeout,Time,_} = NewStateTimeout + true, TimeoutsR, Postpone, NextEventsR); + {state_timeout,Time,_} = StateTimeout when is_integer(Time), Time >= 0; Time =:= infinity -> parse_actions( Debug, S, State, Actions, - Hibernate, Timeout, NewStateTimeout, Postpone, NextEvents); + Hibernate, [StateTimeout|TimeoutsR], Postpone, NextEventsR); {state_timeout,_,_} -> {error, {bad_action_from_state_function,Action}, ?STACKTRACE()}; - {timeout,infinity,_} -> % Clear timer - it will never trigger + {timeout,infinity,_} -> + %% Ignore - timeout will never happen and already cancelled parse_actions( Debug, S, State, Actions, - Hibernate, undefined, StateTimeout, Postpone, NextEvents); - {timeout,Time,_} = NewTimeout when is_integer(Time), Time >= 0 -> + Hibernate, TimeoutsR, Postpone, NextEventsR); + {timeout,Time,_} = Timeout when is_integer(Time), Time >= 0 -> parse_actions( Debug, S, State, Actions, - Hibernate, NewTimeout, StateTimeout, Postpone, NextEvents); + Hibernate, [Timeout|TimeoutsR], Postpone, NextEventsR); {timeout,_,_} -> {error, {bad_action_from_state_function,Action}, ?STACKTRACE()}; - infinity -> % Clear timer - it will never trigger + infinity -> % Ignore - timeout will never happen parse_actions( Debug, S, State, Actions, - Hibernate, undefined, StateTimeout, Postpone, NextEvents); + Hibernate, TimeoutsR, Postpone, NextEventsR); Time when is_integer(Time), Time >= 0 -> - NewTimeout = {timeout,Time,Time}, + Timeout = {timeout,Time,Time}, parse_actions( Debug, S, State, Actions, - Hibernate, NewTimeout, StateTimeout, Postpone, NextEvents); + Hibernate, [Timeout|TimeoutsR], Postpone, NextEventsR); {postpone,NewPostpone} when is_boolean(NewPostpone), Postpone =/= forbidden -> parse_actions( Debug, S, State, Actions, - Hibernate, Timeout, StateTimeout, NewPostpone, NextEvents); + Hibernate, TimeoutsR, NewPostpone, NextEventsR); {postpone,_} -> {error, {bad_action_from_state_function,Action}, @@ -1243,16 +1336,16 @@ parse_actions( postpone when Postpone =/= forbidden -> parse_actions( Debug, S, State, Actions, - Hibernate, Timeout, StateTimeout, true, NextEvents); + Hibernate, TimeoutsR, true, NextEventsR); {next_event,Type,Content} -> case event_type(Type) of - true when NextEvents =/= forbidden -> + true when NextEventsR =/= forbidden -> NewDebug = sys_debug(Debug, S, State, {in,{Type,Content}}), parse_actions( NewDebug, S, State, Actions, - Hibernate, Timeout, StateTimeout, - Postpone, [{Type,Content}|NextEvents]); + Hibernate, TimeoutsR, Postpone, + [{Type,Content}|NextEventsR]); _ -> {error, {bad_action_from_state_function,Action}, @@ -1264,158 +1357,92 @@ parse_actions( ?STACKTRACE()} end. -loop_event_actions( - Parent, Debug, - #{state := State, state_enter := StateEnter} = S, StateTimer, - Events, Event, NextState, NewData, Actions) -> - case parse_actions(Debug, S, State, Actions) of - {ok,NewDebug,Hibernate,Timeout,StateTimeout,Postpone,NextEvents} -> + +%% Stop and start timers as well as create timeout zero events +%% and pending event timer +%% +%% Stop and start timers non-event timers +parse_timers(TimerRefs, TimerTypes, TimeoutsR) -> + parse_timers(TimerRefs, TimerTypes, TimeoutsR, #{}, []). +%% +parse_timers(TimerRefs, TimerTypes, [], _Seen, TimeoutEvents) -> + {TimerRefs,TimerTypes,TimeoutEvents}; +parse_timers( + TimerRefs, TimerTypes, [Timeout|TimeoutsR], Seen, TimeoutEvents) -> + {TimerType,Time,TimerMsg} = Timeout, + case Seen of + #{TimerType := _} -> + %% Type seen before - ignore + parse_timers( + TimerRefs, TimerTypes, TimeoutsR, Seen, TimeoutEvents); + #{} -> + %% Unseen type - handle + NewSeen = Seen#{TimerType => true}, + %% Cancel any running timer + {NewTimerRefs,NewTimerTypes} = + cancel_timer_by_type(TimerType, TimerRefs, TimerTypes), if - StateEnter, NextState =/= State -> - loop_event_enter( - Parent, NewDebug, S, StateTimer, - Events, Event, NextState, NewData, - Hibernate, Timeout, StateTimeout, Postpone, NextEvents); - StateEnter -> - case maps:is_key(init_state, S) of - true -> - %% Avoid infinite loop in initial state - %% with state entry events - NewS = maps:remove(init_state, S), - loop_event_enter( - Parent, NewDebug, NewS, StateTimer, - Events, Event, NextState, NewData, - Hibernate, Timeout, StateTimeout, - Postpone, NextEvents); - false -> - loop_event_result( - Parent, NewDebug, S, StateTimer, - Events, Event, NextState, NewData, - Hibernate, Timeout, StateTimeout, - Postpone, NextEvents) - end; + Time =:= infinity -> + %% Ignore - timer will never fire + parse_timers( + NewTimerRefs, NewTimerTypes, TimeoutsR, + NewSeen, TimeoutEvents); + TimerType =:= timeout -> + %% Handle event timer later + parse_timers( + NewTimerRefs, NewTimerTypes, TimeoutsR, + NewSeen, [Timeout|TimeoutEvents]); + Time =:= 0 -> + %% Handle zero time timeouts later + TimeoutEvent = {TimerType,TimerMsg}, + parse_timers( + NewTimerRefs, NewTimerTypes, TimeoutsR, + NewSeen, [TimeoutEvent|TimeoutEvents]); true -> - loop_event_result( - Parent, NewDebug, S, StateTimer, - Events, Event, NextState, NewData, - Hibernate, Timeout, StateTimeout, Postpone, NextEvents) - end; - {Class,Reason,Stacktrace} -> - terminate( - Class, Reason, Stacktrace, - Debug, S#{data := NewData}, [Event|Events]) + %% Start a new timer + TimerRef = erlang:start_timer(Time, self(), TimerMsg), + parse_timers( + NewTimerRefs#{TimerRef => TimerType}, + NewTimerTypes#{TimerType => TimerRef}, + TimeoutsR, NewSeen, TimeoutEvents) + end end. -loop_event_enter( - Parent, Debug, #{state := State} = S, StateTimer, - Events, Event, NextState, NewData, - Hibernate, Timeout, StateTimeout, Postpone, NextEvents) -> - case call_state_function(S, enter, State, NextState, NewData) of - {ok,Result,NewS} -> - {NewerData,_,Actions} = - parse_event_result( - false, Debug, NewS, Result, - Events, Event, NextState, NewData), - loop_event_enter_actions( - Parent, Debug, NewS, StateTimer, - Events, Event, NextState, NewerData, - Hibernate, Timeout, StateTimeout, Postpone, NextEvents, Actions); - {Class,Reason,Stacktrace} -> - terminate( - Class, Reason, Stacktrace, - Debug, S#{state := NextState, data := NewData}, - [Event|Events]) - end. +%% Enqueue immediate timeout events and start event timer +process_timeout_events(TimerRefs, TimerTypes, [], EventsR) -> + {TimerRefs, TimerTypes, EventsR}; +process_timeout_events( + TimerRefs, TimerTypes, + [{timeout,0,TimerMsg}|TimeoutEvents], []) -> + %% No enqueued events - insert a timeout zero event + TimeoutEvent = {timeout,TimerMsg}, + process_timeout_events( + TimerRefs, TimerTypes, + TimeoutEvents, [TimeoutEvent]); +process_timeout_events( + TimerRefs, TimerTypes, + [{timeout,Time,TimerMsg}], []) -> + %% No enqueued events - start event timer + TimerRef = erlang:start_timer(Time, self(), TimerMsg), + process_timeout_events( + TimerRefs#{TimerRef => timeout}, TimerTypes#{timeout => TimerRef}, + [], []); +process_timeout_events( + TimerRefs, TimerTypes, + [{timeout,_Time,_TimerMsg}|TimeoutEvents], EventsR) -> + %% There will be some other event so optimize by not starting + %% an event timer to just have to cancel it again + process_timeout_events( + TimerRefs, TimerTypes, + TimeoutEvents, EventsR); +process_timeout_events( + TimerRefs, TimerTypes, + [{_TimeoutType,_TimeoutMsg} = TimeoutEvent|TimeoutEvents], EventsR) -> + process_timeout_events( + TimerRefs, TimerTypes, + TimeoutEvents, [TimeoutEvent|EventsR]). -loop_event_enter_actions( - Parent, Debug, S, StateTimer, - Events, Event, NextState, NewData, - Hibernate, Timeout, StateTimeout, Postpone, NextEvents, Actions) -> - case - parse_enter_actions( - Debug, S, NextState, Actions, - Hibernate, Timeout, StateTimeout) - of - {ok,NewDebug,NewHibernate,NewTimeout,NewStateTimeout,_,_} -> - loop_event_result( - Parent, NewDebug, S, StateTimer, - Events, Event, NextState, NewData, - NewHibernate, NewTimeout, NewStateTimeout, Postpone, NextEvents); - {Class,Reason,Stacktrace} -> - terminate( - Class, Reason, Stacktrace, - Debug, S#{state := NextState, data := NewData}, - [Event|Events]) - end. -loop_event_result( - Parent, Debug, - #{state := State, postponed := P_0} = S, StateTimer, - Events, Event, NextState, NewData, - Hibernate, Timeout, StateTimeout, Postpone, NextEvents) -> - %% - %% All options have been collected and next_events are buffered. - %% Do the actual state transition. - %% - NewStateTimeout = - case StateTimeout of - {state_timeout,Time,_} -> - %% New timeout -> cancel timer - case StateTimer of - {state_timeout,_,_} -> - ok; - _ -> - cancel_timer(StateTimer) - end, - case Time of - infinity -> - undefined; - _ -> - StateTimeout - end; - undefined when NextState =/= State -> - %% State change -> cancel timer - case StateTimer of - {state_timeout,_,_} -> - ok; - _ -> - cancel_timer(StateTimer) - end, - undefined; - undefined -> - StateTimer - end, - %% - P_1 = % Move current event to postponed if Postpone - case Postpone of - true -> - [Event|P_0]; - false -> - P_0 - end, - {Events_1,NewP} = % Move all postponed events to queue if state change - if - NextState =:= State -> - {Events,P_1}; - true -> - {lists:reverse(P_1, Events),[]} - end, - %% Place next events first in queue - NewEvents = lists:reverse(NextEvents, Events_1), - %% - NewDebug = - sys_debug( - Debug, S, State, - case Postpone of - true -> - {postpone,Event,State}; - false -> - {consume,Event,State} - end), - %% - loop_events( - Parent, NewDebug, S, NewStateTimeout, - NewEvents, Timeout, NextState, NewData, NewP, Hibernate). %%--------------------------------------------------------------------------- %% Server helpers @@ -1474,16 +1501,20 @@ terminate( sys:print_log(Debug), erlang:raise(C, R, ST) end, - case Reason of - normal -> ok; - shutdown -> ok; - {shutdown,_} -> ok; - _ -> - error_info( - Class, Reason, Stacktrace, S, Q, P, - format_status(terminate, get(), S)), - sys:print_log(Debug) - end, + _ = + case Reason of + normal -> + sys_debug(Debug, S, State, {terminate,Reason}); + shutdown -> + sys_debug(Debug, S, State, {terminate,Reason}); + {shutdown,_} -> + sys_debug(Debug, S, State, {terminate,Reason}); + _ -> + error_info( + Class, Reason, Stacktrace, S, Q, P, + format_status(terminate, get(), S)), + sys:print_log(Debug) + end, case Stacktrace of [] -> erlang:Class(Reason); @@ -1605,8 +1636,19 @@ listify(Item) when is_list(Item) -> listify(Item) -> [Item]. -cancel_timer(undefined) -> - ok; +%% Cancel timer if running, otherwise no op +cancel_timer_by_type(TimerType, TimerRefs, TimerTypes) -> + case TimerTypes of + #{TimerType := TimerRef} -> + cancel_timer(TimerRef), + {maps:remove(TimerRef, TimerRefs), + maps:remove(TimerType, TimerTypes)}; + #{} -> + {TimerRefs,TimerTypes} + end. + +%%cancel_timer(undefined) -> +%% ok; cancel_timer(TRef) -> case erlang:cancel_timer(TRef) of false -> diff --git a/lib/stdlib/src/math.erl b/lib/stdlib/src/math.erl index 1db48cd0a2..3a3b384d8f 100644 --- a/lib/stdlib/src/math.erl +++ b/lib/stdlib/src/math.erl @@ -26,7 +26,8 @@ -export([sin/1, cos/1, tan/1, asin/1, acos/1, atan/1, atan2/2, sinh/1, cosh/1, tanh/1, asinh/1, acosh/1, atanh/1, exp/1, log/1, log2/1, log10/1, pow/2, sqrt/1, erf/1, erfc/1, - ceil/1, floor/1]). + ceil/1, floor/1, + fmod/2]). -spec acos(X) -> float() when X :: number(). @@ -99,6 +100,11 @@ exp(_) -> floor(_) -> erlang:nif_error(undef). +-spec fmod(X, Y) -> float() when + X :: number(), Y :: number(). +fmod(_, _) -> + erlang:nif_error(undef). + -spec log(X) -> float() when X :: number(). log(_) -> diff --git a/lib/stdlib/src/proc_lib.erl b/lib/stdlib/src/proc_lib.erl index 3dc1848550..363705b0f4 100644 --- a/lib/stdlib/src/proc_lib.erl +++ b/lib/stdlib/src/proc_lib.erl @@ -232,7 +232,7 @@ init_p(Parent, Ancestors, Fun) when is_function(Fun) -> Fun() catch Class:Reason -> - exit_p(Class, Reason) + exit_p(Class, Reason, erlang:get_stacktrace()) end. -spec init_p(pid(), [pid()], atom(), atom(), [term()]) -> term(). @@ -247,7 +247,7 @@ init_p_do_apply(M, F, A) -> apply(M, F, A) catch Class:Reason -> - exit_p(Class, Reason) + exit_p(Class, Reason, erlang:get_stacktrace()) end. -spec wake_up(atom(), atom(), [term()]) -> term(). @@ -257,22 +257,29 @@ wake_up(M, F, A) when is_atom(M), is_atom(F), is_list(A) -> apply(M, F, A) catch Class:Reason -> - exit_p(Class, Reason) + exit_p(Class, Reason, erlang:get_stacktrace()) end. -exit_p(Class, Reason) -> +exit_p(Class, Reason, Stacktrace) -> case get('$initial_call') of {M,F,A} when is_atom(M), is_atom(F), is_integer(A) -> MFA = {M,F,make_dummy_args(A, [])}, crash_report(Class, Reason, MFA), - exit(Reason); + erlang:raise(exit, exit_reason(Class, Reason, Stacktrace), Stacktrace); _ -> %% The process dictionary has been cleared or %% possibly modified. crash_report(Class, Reason, []), - exit(Reason) + erlang:raise(exit, exit_reason(Class, Reason, Stacktrace), Stacktrace) end. +exit_reason(error, Reason, Stacktrace) -> + {Reason, Stacktrace}; +exit_reason(exit, Reason, _Stacktrace) -> + Reason; +exit_reason(throw, Reason, Stacktrace) -> + {{nocatch, Reason}, Stacktrace}. + -spec start(Module, Function, Args) -> Ret when Module :: module(), Function :: atom(), diff --git a/lib/stdlib/src/proplists.erl b/lib/stdlib/src/proplists.erl index 5356467b19..21de8c45c1 100644 --- a/lib/stdlib/src/proplists.erl +++ b/lib/stdlib/src/proplists.erl @@ -1,8 +1,3 @@ -%% -%% %CopyrightBegin% -%% -%% Copyright Ericsson AB 2001-2016. All Rights Reserved. -%% %% Licensed under the Apache License, Version 2.0 (the "License"); %% you may not use this file except in compliance with the License. %% You may obtain a copy of the License at @@ -15,14 +10,8 @@ %% See the License for the specific language governing permissions and %% limitations under the License. %% -%% %CopyrightEnd% -%% -%% ===================================================================== -%% Support functions for property lists -%% -%% Copyright (C) 2000-2003 Richard Carlsson -%% --------------------------------------------------------------------- -%% +%% @copyright 2000-2003 Richard Carlsson +%% @author Richard Carlsson <[email protected]> %% @doc Support functions for property lists. %% %% <p>Property lists are ordinary lists containing entries in the form diff --git a/lib/stdlib/src/rand.erl b/lib/stdlib/src/rand.erl index 93409d95df..1f457b9e0e 100644 --- a/lib/stdlib/src/rand.erl +++ b/lib/stdlib/src/rand.erl @@ -19,7 +19,7 @@ %% %% ===================================================================== %% Multiple PRNG module for Erlang/OTP -%% Copyright (c) 2015 Kenji Rikitake +%% Copyright (c) 2015-2016 Kenji Rikitake %% ===================================================================== -module(rand). @@ -27,11 +27,14 @@ -export([seed_s/1, seed_s/2, seed/1, seed/2, export_seed/0, export_seed_s/1, uniform/0, uniform/1, uniform_s/1, uniform_s/2, + jump/0, jump/1, normal/0, normal_s/1 ]). -compile({inline, [exs64_next/1, exsplus_next/1, + exsplus_jump/1, exs1024_next/1, exs1024_calc/2, + exs1024_jump/1, get_52/1, normal_kiwi/1]}). -define(DEFAULT_ALG_HANDLER, exsplus). @@ -48,7 +51,8 @@ max := integer(), next := fun(), uniform := fun(), - uniform_n := fun()}. + uniform_n := fun(), + jump := fun()}. %% Internal state -opaque state() :: {alg_handler(), alg_seed()}. @@ -79,9 +83,7 @@ export_seed_s({#{type:=Alg}, Seed}) -> {Alg, Seed}. -spec seed(AlgOrExpState::alg() | export_state()) -> state(). seed(Alg) -> - R = seed_s(Alg), - _ = seed_put(R), - R. + seed_put(seed_s(Alg)). -spec seed_s(AlgOrExpState::alg() | export_state()) -> state(). seed_s(Alg) when is_atom(Alg) -> @@ -97,9 +99,7 @@ seed_s({Alg0, Seed}) -> -spec seed(Alg :: alg(), {integer(), integer(), integer()}) -> state(). seed(Alg0, S0) -> - State = seed_s(Alg0, S0), - _ = seed_put(State), - State. + seed_put(seed_s(Alg0, S0)). -spec seed_s(Alg :: alg(), {integer(), integer(), integer()}) -> state(). seed_s(Alg0, S0 = {_, _, _}) -> @@ -150,6 +150,25 @@ uniform_s(N, State0 = {#{uniform:=Uniform}, _}) {F, State} = Uniform(State0), {trunc(F * N) + 1, State}. +%% jump/1: given a state, jump/1 +%% returns a new state which is equivalent to that +%% after a large number of call defined for each algorithm. +%% The large number is algorithm dependent. + +-spec jump(state()) -> NewS :: state(). +jump(State = {#{jump:=Jump}, _}) -> + Jump(State). + +%% jump/0: read the internal state and +%% apply the jump function for the state as in jump/1 +%% and write back the new value to the internal state, +%% then returns the new value. + +-spec jump() -> NewS :: state(). + +jump() -> + seed_put(jump(seed_get())). + %% normal/0: returns a random float with standard normal distribution %% updating the state in the process dictionary. @@ -192,9 +211,10 @@ normal_s(State0) -> -type uint64() :: 0..16#ffffffffffffffff. -type uint58() :: 0..16#03ffffffffffffff. --spec seed_put(state()) -> undefined | state(). +-spec seed_put(state()) -> state(). seed_put(Seed) -> - put(?SEED_DICT, Seed). + put(?SEED_DICT, Seed), + Seed. seed_get() -> case get(?SEED_DICT) of @@ -205,15 +225,18 @@ seed_get() -> %% Setup alg record mk_alg(exs64) -> {#{type=>exs64, max=>?UINT64MASK, next=>fun exs64_next/1, - uniform=>fun exs64_uniform/1, uniform_n=>fun exs64_uniform/2}, + uniform=>fun exs64_uniform/1, uniform_n=>fun exs64_uniform/2, + jump=>fun exs64_jump/1}, fun exs64_seed/1}; mk_alg(exsplus) -> {#{type=>exsplus, max=>?UINT58MASK, next=>fun exsplus_next/1, - uniform=>fun exsplus_uniform/1, uniform_n=>fun exsplus_uniform/2}, + uniform=>fun exsplus_uniform/1, uniform_n=>fun exsplus_uniform/2, + jump=>fun exsplus_jump/1}, fun exsplus_seed/1}; mk_alg(exs1024) -> {#{type=>exs1024, max=>?UINT64MASK, next=>fun exs1024_next/1, - uniform=>fun exs1024_uniform/1, uniform_n=>fun exs1024_uniform/2}, + uniform=>fun exs1024_uniform/1, uniform_n=>fun exs1024_uniform/2, + jump=>fun exs1024_jump/1}, fun exs1024_seed/1}. %% ===================================================================== @@ -246,6 +269,9 @@ exs64_uniform(Max, {Alg, R}) -> {V, R1} = exs64_next(R), {(V rem Max) + 1, {Alg, R1}}. +exs64_jump(_) -> + erlang:error(not_implemented). + %% ===================================================================== %% exsplus PRNG: Xorshift116+ %% Algorithm by Sebastiano Vigna @@ -283,6 +309,40 @@ exsplus_uniform(Max, {Alg, R}) -> {V, R1} = exsplus_next(R), {(V rem Max) + 1, {Alg, R1}}. +%% This is the jump function for the exsplus generator, equivalent +%% to 2^64 calls to next/1; it can be used to generate 2^52 +%% non-overlapping subsequences for parallel computations. +%% Note: the jump function takes 116 times of the execution time of +%% next/1. + +%% -define(JUMPCONST, 16#000d174a83e17de2302f8ea6bc32c797). +%% split into 58-bit chunks +%% and two iterative executions + +-define(JUMPCONST1, 16#02f8ea6bc32c797). +-define(JUMPCONST2, 16#345d2a0f85f788c). +-define(JUMPELEMLEN, 58). + +-dialyzer({no_improper_lists, exsplus_jump/1}). +-spec exsplus_jump(state()) -> state(). +exsplus_jump({Alg, S}) -> + {S1, AS1} = exsplus_jump(S, [0|0], ?JUMPCONST1, ?JUMPELEMLEN), + {_, AS2} = exsplus_jump(S1, AS1, ?JUMPCONST2, ?JUMPELEMLEN), + {Alg, AS2}. + +-dialyzer({no_improper_lists, exsplus_jump/4}). +exsplus_jump(S, AS, _, 0) -> + {S, AS}; +exsplus_jump(S, [AS0|AS1], J, N) -> + {_, NS} = exsplus_next(S), + case (J band 1) of + 1 -> + [S0|S1] = S, + exsplus_jump(NS, [(AS0 bxor S0)|(AS1 bxor S1)], J bsr 1, N-1); + 0 -> + exsplus_jump(NS, [AS0|AS1], J bsr 1, N-1) + end. + %% ===================================================================== %% exs1024 PRNG: Xorshift1024* %% Algorithm by Sebastiano Vigna @@ -340,6 +400,60 @@ exs1024_uniform(Max, {Alg, R}) -> {V, R1} = exs1024_next(R), {(V rem Max) + 1, {Alg, R1}}. +%% This is the jump function for the exs1024 generator, equivalent +%% to 2^512 calls to next(); it can be used to generate 2^512 +%% non-overlapping subsequences for parallel computations. +%% Note: the jump function takes ~2000 times of the execution time of +%% next/1. + +%% Jump constant here split into 58 bits for speed +-define(JUMPCONSTHEAD, 16#00242f96eca9c41d). +-define(JUMPCONSTTAIL, + [16#0196e1ddbe5a1561, + 16#0239f070b5837a3c, + 16#03f393cc68796cd2, + 16#0248316f404489af, + 16#039a30088bffbac2, + 16#02fea70dc2d9891f, + 16#032ae0d9644caec4, + 16#0313aac17d8efa43, + 16#02f132e055642626, + 16#01ee975283d71c93, + 16#00552321b06f5501, + 16#00c41d10a1e6a569, + 16#019158ecf8aa1e44, + 16#004e9fc949d0b5fc, + 16#0363da172811fdda, + 16#030e38c3b99181f2, + 16#0000000a118038fc]). +-define(JUMPTOTALLEN, 1024). +-define(RINGLEN, 16). + +-spec exs1024_jump(state()) -> state(). + +exs1024_jump({Alg, {L, RL}}) -> + P = length(RL), + AS = exs1024_jump({L, RL}, + [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], + ?JUMPCONSTTAIL, ?JUMPCONSTHEAD, ?JUMPELEMLEN, ?JUMPTOTALLEN), + {ASL, ASR} = lists:split(?RINGLEN - P, AS), + {Alg, {ASL, lists:reverse(ASR)}}. + +exs1024_jump(_, AS, _, _, _, 0) -> + AS; +exs1024_jump(S, AS, [H|T], _, 0, TN) -> + exs1024_jump(S, AS, T, H, ?JUMPELEMLEN, TN); +exs1024_jump({L, RL}, AS, JL, J, N, TN) -> + {_, NS} = exs1024_next({L, RL}), + case (J band 1) of + 1 -> + AS2 = lists:zipwith(fun(X, Y) -> X bxor Y end, + AS, L ++ lists:reverse(RL)), + exs1024_jump(NS, AS2, JL, J bsr 1, N-1, TN-1); + 0 -> + exs1024_jump(NS, AS, JL, J bsr 1, N-1, TN-1) + end. + %% ===================================================================== %% Ziggurat cont %% ===================================================================== diff --git a/lib/stdlib/src/sets.erl b/lib/stdlib/src/sets.erl index 3e70450320..c65a13b22e 100644 --- a/lib/stdlib/src/sets.erl +++ b/lib/stdlib/src/sets.erl @@ -128,14 +128,14 @@ is_element(E, S) -> Set2 :: set(Element). add_element(E, S0) -> Slot = get_slot(S0, E), - {S1,Ic} = on_bucket(fun (B0) -> add_bkt_el(E, B0, B0) end, S0, Slot), - maybe_expand(S1, Ic). - --spec add_bkt_el(T, [T], [T]) -> {[T], 0 | 1}. -add_bkt_el(E, [E|_], Bkt) -> {Bkt,0}; -add_bkt_el(E, [_|B], Bkt) -> - add_bkt_el(E, B, Bkt); -add_bkt_el(E, [], Bkt) -> {[E|Bkt],1}. + Bkt = get_bucket(S0, Slot), + case lists:member(E, Bkt) of + true -> + S0; + false -> + S1 = update_bucket(S0, Slot, [E | Bkt]), + maybe_expand(S1) + end. %% del_element(Element, Set) -> Set. %% Return Set but with Element removed. @@ -144,15 +144,28 @@ add_bkt_el(E, [], Bkt) -> {[E|Bkt],1}. Set2 :: set(Element). del_element(E, S0) -> Slot = get_slot(S0, E), - {S1,Dc} = on_bucket(fun (B0) -> del_bkt_el(E, B0) end, S0, Slot), - maybe_contract(S1, Dc). + Bkt = get_bucket(S0, Slot), + case lists:member(E, Bkt) of + false -> + S0; + true -> + S1 = update_bucket(S0, Slot, lists:delete(E, Bkt)), + maybe_contract(S1, 1) + end. --spec del_bkt_el(T, [T]) -> {[T], 0 | 1}. -del_bkt_el(E, [E|Bkt]) -> {Bkt,1}; -del_bkt_el(E, [Other|Bkt0]) -> - {Bkt1,Dc} = del_bkt_el(E, Bkt0), - {[Other|Bkt1],Dc}; -del_bkt_el(_, []) -> {[],0}. +%% update_bucket(Set, Slot, NewBucket) -> UpdatedSet. +%% Replace bucket in Slot by NewBucket +-spec update_bucket(Set1, Slot, Bkt) -> Set2 when + Set1 :: set(Element), + Set2 :: set(Element), + Slot :: non_neg_integer(), + Bkt :: [Element]. +update_bucket(Set, Slot, NewBucket) -> + SegI = ((Slot-1) div ?seg_size) + 1, + BktI = ((Slot-1) rem ?seg_size) + 1, + Segs = Set#set.segs, + Seg = element(SegI, Segs), + Set#set{segs = setelement(SegI, Segs, setelement(BktI, Seg, NewBucket))}. %% union(Set1, Set2) -> Set %% Return the union of Set1 and Set2. @@ -272,19 +285,6 @@ get_slot(T, Key) -> -spec get_bucket(set(), non_neg_integer()) -> term(). get_bucket(T, Slot) -> get_bucket_s(T#set.segs, Slot). -%% on_bucket(Fun, Hashdb, Slot) -> {NewHashDb,Result}. -%% Apply Fun to the bucket in Slot and replace the returned bucket. --spec on_bucket(fun((_) -> {[_], 0 | 1}), set(E), non_neg_integer()) -> - {set(E), 0 | 1}. -on_bucket(F, T, Slot) -> - SegI = ((Slot-1) div ?seg_size) + 1, - BktI = ((Slot-1) rem ?seg_size) + 1, - Segs = T#set.segs, - Seg = element(SegI, Segs), - B0 = element(BktI, Seg), - {B1, Res} = F(B0), %Op on the bucket. - {T#set{segs = setelement(SegI, Segs, setelement(BktI, Seg, B1))},Res}. - %% fold_set(Fun, Acc, Dictionary) -> Dictionary. %% filter_set(Fun, Dictionary) -> Dictionary. @@ -349,8 +349,8 @@ put_bucket_s(Segs, Slot, Bkt) -> Seg = setelement(BktI, element(SegI, Segs), Bkt), setelement(SegI, Segs, Seg). --spec maybe_expand(set(E), 0 | 1) -> set(E). -maybe_expand(T0, Ic) when T0#set.size + Ic > T0#set.exp_size -> +-spec maybe_expand(set(E)) -> set(E). +maybe_expand(T0) when T0#set.size + 1 > T0#set.exp_size -> T = maybe_expand_segs(T0), %Do we need more segments. N = T#set.n + 1, %Next slot to expand into Segs0 = T#set.segs, @@ -360,12 +360,12 @@ maybe_expand(T0, Ic) when T0#set.size + Ic > T0#set.exp_size -> {B1,B2} = rehash(B, Slot1, Slot2, T#set.maxn), Segs1 = put_bucket_s(Segs0, Slot1, B1), Segs2 = put_bucket_s(Segs1, Slot2, B2), - T#set{size = T#set.size + Ic, + T#set{size = T#set.size + 1, n = N, exp_size = N * ?expand_load, con_size = N * ?contract_load, segs = Segs2}; -maybe_expand(T, Ic) -> T#set{size = T#set.size + Ic}. +maybe_expand(T) -> T#set{size = T#set.size + 1}. -spec maybe_expand_segs(set(E)) -> set(E). maybe_expand_segs(T) when T#set.n =:= T#set.maxn -> diff --git a/lib/stdlib/src/shell_default.erl b/lib/stdlib/src/shell_default.erl index 6947cf181b..cd63ab28b5 100644 --- a/lib/stdlib/src/shell_default.erl +++ b/lib/stdlib/src/shell_default.erl @@ -23,7 +23,7 @@ -module(shell_default). --export([help/0,lc/1,c/1,c/2,nc/1,nl/1,l/1,i/0,pid/3,i/3,m/0,m/1, +-export([help/0,lc/1,c/1,c/2,nc/1,nl/1,l/1,i/0,pid/3,i/3,m/0,m/1,lm/0,mm/0, memory/0,memory/1,uptime/0, erlangrc/1,bi/1, regs/0, flush/0,pwd/0,ls/0,ls/1,cd/1, y/1, y/2, @@ -83,6 +83,8 @@ ls() -> c:ls(). ls(S) -> c:ls(S). m() -> c:m(). m(Mod) -> c:m(Mod). +lm() -> c:lm(). +mm() -> c:mm(). memory() -> c:memory(). memory(Type) -> c:memory(Type). nc(X) -> c:nc(X). diff --git a/lib/stdlib/src/stdlib.app.src b/lib/stdlib/src/stdlib.app.src index 8cf46482dd..82ab484ea6 100644 --- a/lib/stdlib/src/stdlib.app.src +++ b/lib/stdlib/src/stdlib.app.src @@ -31,7 +31,6 @@ dets_server, dets_sup, dets_utils, - dets_v8, dets_v9, dict, digraph, diff --git a/lib/stdlib/src/stdlib.appup.src b/lib/stdlib/src/stdlib.appup.src index e917b7ea1f..979161fef7 100644 --- a/lib/stdlib/src/stdlib.appup.src +++ b/lib/stdlib/src/stdlib.appup.src @@ -18,9 +18,7 @@ %% %CopyrightEnd% {"%VSN%", %% Up from - max one major revision back - [{<<"3\\.[0-1](\\.[0-9]+)*">>,[restart_new_emulator]}, % OTP-19.* - {<<"2\\.[5-8](\\.[0-9]+)*">>,[restart_new_emulator]}], % OTP-18.* + [{<<"3\\.[0-1](\\.[0-9]+)*">>,[restart_new_emulator]}], % OTP-19.* %% Down to - max one major revision back - [{<<"3\\.[0-1](\\.[0-9]+)*">>,[restart_new_emulator]}, % OTP-19.* - {<<"2\\.[5-8](\\.[0-9]+)*">>,[restart_new_emulator]}] % OTP-18.* + [{<<"3\\.[0-1](\\.[0-9]+)*">>,[restart_new_emulator]}] % OTP-19.* }. diff --git a/lib/stdlib/test/base64_SUITE.erl b/lib/stdlib/test/base64_SUITE.erl index 9176a3664a..d0abe5c961 100644 --- a/lib/stdlib/test/base64_SUITE.erl +++ b/lib/stdlib/test/base64_SUITE.erl @@ -23,9 +23,7 @@ -include_lib("common_test/include/ct.hrl"). %% Test server specific exports --export([all/0, suite/0,groups/0,init_per_suite/1, end_per_suite/1, - init_per_group/2,end_per_group/2, - init_per_testcase/2, end_per_testcase/2]). +-export([all/0, suite/0, groups/0, group/1]). %% Test cases must be exported. -export([base64_encode/1, base64_decode/1, base64_otp_5635/1, @@ -33,41 +31,26 @@ mime_decode_to_string/1, roundtrip_1/1, roundtrip_2/1, roundtrip_3/1, roundtrip_4/1]). -init_per_testcase(_, Config) -> - Config. - -end_per_testcase(_, _Config) -> - ok. - %%------------------------------------------------------------------------- %% Test cases starts here. %%------------------------------------------------------------------------- + suite() -> [{ct_hooks,[ts_install_cth]}, {timetrap,{minutes,4}}]. -all() -> +all() -> [base64_encode, base64_decode, base64_otp_5635, base64_otp_6279, big, illegal, mime_decode, mime_decode_to_string, {group, roundtrip}]. -groups() -> +groups() -> [{roundtrip, [parallel], [roundtrip_1, roundtrip_2, roundtrip_3, roundtrip_4]}]. -init_per_suite(Config) -> - Config. - -end_per_suite(_Config) -> - ok. - -init_per_group(_GroupName, Config) -> - Config. - -end_per_group(_GroupName, Config) -> - Config. - - +group(roundtrip) -> + %% valgrind needs a lot of time + [{timetrap,{minutes,10}}]. %%------------------------------------------------------------------------- %% Test base64:encode/1. @@ -78,9 +61,9 @@ base64_encode(Config) when is_list(Config) -> %% One pad <<"SGVsbG8gV29ybGQ=">> = base64:encode(<<"Hello World">>), %% No pad - "QWxhZGRpbjpvcGVuIHNlc2Ft" = + "QWxhZGRpbjpvcGVuIHNlc2Ft" = base64:encode_to_string("Aladdin:open sesam"), - + "MDEyMzQ1Njc4OSFAIzBeJiooKTs6PD4sLiBbXXt9" = base64:encode_to_string(<<"0123456789!@#0^&*();:<>,. []{}">>), ok. @@ -93,7 +76,7 @@ base64_decode(Config) when is_list(Config) -> %% One pad <<"Hello World">> = base64:decode(<<"SGVsbG8gV29ybGQ=">>), %% No pad - <<"Aladdin:open sesam">> = + <<"Aladdin:open sesam">> = base64:decode("QWxhZGRpbjpvcGVuIHNlc2Ft"), Alphabet = list_to_binary(lists:seq(0, 255)), @@ -208,7 +191,7 @@ mime_decode_to_string(Config) when is_list(Config) -> %% One pad to ignore, followed by more text "Hello World!!" = base64:mime_decode_to_string(<<"SGVsb)(G8gV29ybGQ=h IQ= =">>), %% No pad - "Aladdin:open sesam" = + "Aladdin:open sesam" = base64:mime_decode_to_string("QWxhZGRpbjpvcG¤\")(VuIHNlc2Ft"), %% Encoded base 64 strings may be divided by non base 64 chars. %% In this cases whitespaces. @@ -314,7 +297,7 @@ interleaved_ws_roundtrip_1([], Base64List, Bin, List) -> random_byte_list(0, Acc) -> Acc; -random_byte_list(N, Acc) -> +random_byte_list(N, Acc) -> random_byte_list(N-1, [rand:uniform(255)|Acc]). make_big_binary(N) -> diff --git a/lib/stdlib/test/dets_SUITE.erl b/lib/stdlib/test/dets_SUITE.erl index 8948f496c4..aa31fdde5a 100644 --- a/lib/stdlib/test/dets_SUITE.erl +++ b/lib/stdlib/test/dets_SUITE.erl @@ -35,26 +35,18 @@ -endif. -export([all/0, suite/0,groups/0,init_per_suite/1, end_per_suite/1, - init_per_group/2,end_per_group/2, - newly_started/1, basic_v8/1, basic_v9/1, - open_v8/1, open_v9/1, sets_v8/1, sets_v9/1, bags_v8/1, - bags_v9/1, duplicate_bags_v8/1, duplicate_bags_v9/1, - access_v8/1, access_v9/1, dirty_mark/1, dirty_mark2/1, - bag_next_v8/1, bag_next_v9/1, oldbugs_v8/1, oldbugs_v9/1, - unsafe_assumptions/1, truncated_segment_array_v8/1, - truncated_segment_array_v9/1, open_file_v8/1, open_file_v9/1, - init_table_v8/1, init_table_v9/1, repair_v8/1, repair_v9/1, - hash_v8b_v8c/1, phash/1, fold_v8/1, fold_v9/1, fixtable_v8/1, - fixtable_v9/1, match_v8/1, match_v9/1, select_v8/1, - select_v9/1, update_counter/1, badarg/1, cache_sets_v8/1, - cache_sets_v9/1, cache_bags_v8/1, cache_bags_v9/1, - cache_duplicate_bags_v8/1, cache_duplicate_bags_v9/1, + init_per_group/2,end_per_group/2, newly_started/1, basic/1, + open/1, sets/1, bags/1, duplicate_bags/1, access/1, dirty_mark/1, + dirty_mark2/1, bag_next/1, oldbugs/1, + truncated_segment_array/1, open_file/1, init_table/1, repair/1, + phash/1, fold/1, fixtable/1, match/1, select/1, update_counter/1, + badarg/1, cache_sets/1, cache_bags/1, cache_duplicate_bags/1, otp_4208/1, otp_4989/1, many_clients/1, otp_4906/1, otp_5402/1, simultaneous_open/1, insert_new/1, repair_continuation/1, otp_5487/1, otp_6206/1, otp_6359/1, otp_4738/1, otp_7146/1, otp_8070/1, otp_8856/1, otp_8898/1, otp_8899/1, otp_8903/1, otp_8923/1, otp_9282/1, otp_11245/1, otp_11709/1, otp_13229/1, - otp_13260/1]). + otp_13260/1, otp_13830/1]). -export([dets_dirty_loop/0]). @@ -73,8 +65,7 @@ -define(DETS_SERVER, dets). -%% HEADSZ taken from dets_v8.erl and dets_v9.erl. --define(HEADSZ_v8, 40). +%% HEADSZ taken from dets_v9.erl. -define(HEADSZ_v9, (56+28*4+16)). -define(NO_KEYS_POS_v9, 36). -define(CLOSED_PROPERLY_POS, 8). @@ -94,24 +85,16 @@ suite() -> all() -> [ - basic_v8, basic_v9, open_v8, open_v9, sets_v8, sets_v9, - bags_v8, bags_v9, duplicate_bags_v8, duplicate_bags_v9, - newly_started, open_file_v8, open_file_v9, - init_table_v8, init_table_v9, repair_v8, repair_v9, - access_v8, access_v9, oldbugs_v8, oldbugs_v9, - unsafe_assumptions, truncated_segment_array_v8, - truncated_segment_array_v9, dirty_mark, dirty_mark2, - bag_next_v8, bag_next_v9, hash_v8b_v8c, phash, fold_v8, - fold_v9, fixtable_v8, fixtable_v9, match_v8, match_v9, - select_v8, select_v9, update_counter, badarg, - cache_sets_v8, cache_sets_v9, cache_bags_v8, - cache_bags_v9, cache_duplicate_bags_v8, - cache_duplicate_bags_v9, otp_4208, otp_4989, + basic, open, sets, bags, duplicate_bags, newly_started, open_file, + init_table, repair, access, oldbugs, + truncated_segment_array, dirty_mark, dirty_mark2, bag_next, + phash, fold, fixtable, match, select, update_counter, badarg, + cache_sets, cache_bags, cache_duplicate_bags, otp_4208, otp_4989, many_clients, otp_4906, otp_5402, simultaneous_open, insert_new, repair_continuation, otp_5487, otp_6206, otp_6359, otp_4738, otp_7146, otp_8070, otp_8856, otp_8898, otp_8899, otp_8903, otp_8923, otp_9282, otp_11245, otp_11709, - otp_13229, otp_13260 + otp_13229, otp_13260, otp_13830 ]. groups() -> @@ -137,20 +120,12 @@ newly_started(Config) when is_list(Config) -> test_server:stop_node(Node), ok. -%% Basic test case. -basic_v8(Config) when is_list(Config) -> - basic(Config, 8). - -%% Basic test case. -basic_v9(Config) when is_list(Config) -> - basic(Config, 9). - -basic(Config, Version) -> +basic(Config) when is_list(Config) -> Tab = dets_basic_test, FName = filename(Tab, Config), P0 = pps(), - {ok, _} = dets:open_file(Tab,[{file, FName},{version,Version}]), + {ok, _} = dets:open_file(Tab,[{file, FName}]), ok = dets:insert(Tab,{mazda,japan}), ok = dets:insert(Tab,{toyota,japan}), ok = dets:insert(Tab,{suzuki,japan}), @@ -174,13 +149,7 @@ basic(Config, Version) -> ok. -open_v8(Config) when is_list(Config) -> - open(Config, 8). - -open_v9(Config) when is_list(Config) -> - open(Config, 9). - -open(Config, Version) -> +open(Config) when is_list(Config) -> %% Running this test twice means that the Dets server is restarted %% twice. dets_sup specifies a maximum of 4 restarts in an hour. %% If this becomes a problem, one should consider running this @@ -194,14 +163,14 @@ open(Config, Version) -> Data = make_data(1), P0 = pps(), - Tabs = open_files(1, All, Version), + Tabs = open_files(1, All), initialize(Tabs, Data), check(Tabs, Data), foreach(fun(Tab) -> ok = dets:close(Tab) end, Tabs), %% Now reopen the files ?format("Reopening closed files \n", []), - Tabs = open_files(1, All, Version), + Tabs = open_files(1, All), ?format("Checking contents of reopened files \n", []), check(Tabs, Data), %% crash the dets server @@ -216,7 +185,7 @@ open(Config, Version) -> %% Now reopen the files again ?format("Reopening crashed files \n", []), - open_files(1, All, Version), + open_files(1, All), ?format("Checking contents of repaired files \n", []), check(Tabs, Data), @@ -266,20 +235,13 @@ bad(_Tab, _Item) -> exit(badtab). %% Perform traversal and match testing on set type dets tables. -sets_v8(Config) when is_list(Config) -> - sets(Config, 8). - -%% Perform traversal and match testing on set type dets tables. -sets_v9(Config) when is_list(Config) -> - sets(Config, 9). - -sets(Config, Version) -> +sets(Config) when is_list(Config) -> {Sets, _, _} = args(Config), Data = make_data(1), delete_files(Sets), P0 = pps(), - Tabs = open_files(1, Sets, Version), + Tabs = open_files(1, Sets), Bigger = [{17,q,w,w}, {48,q,w,w,w,w,w,w}], % 48 requires a bigger buddy initialize(Tabs, Data++Bigger++Data), % overwrite Len = length(Data), @@ -302,19 +264,12 @@ sets(Config, Version) -> ok. %% Perform traversal and match testing on bag type dets tables. -bags_v8(Config) when is_list(Config) -> - bags(Config, 8). - -%% Perform traversal and match testing on bag type dets tables. -bags_v9(Config) when is_list(Config) -> - bags(Config, 9). - -bags(Config, Version) -> +bags(Config) when is_list(Config) -> {_, Bags, _} = args(Config), Data = make_data(1, bag), %% gives twice as many objects delete_files(Bags), P0 = pps(), - Tabs = open_files(1, Bags, Version), + Tabs = open_files(1, Bags), initialize(Tabs, Data++Data), Len = length(Data), foreach(fun(Tab) -> trav_test(Data, Len, Tab) end, Tabs), @@ -336,19 +291,12 @@ bags(Config, Version) -> %% Perform traversal and match testing on duplicate_bag type dets tables. -duplicate_bags_v8(Config) when is_list(Config) -> - duplicate_bags(Config, 8). - -%% Perform traversal and match testing on duplicate_bag type dets tables. -duplicate_bags_v9(Config) when is_list(Config) -> - duplicate_bags(Config, 9). - -duplicate_bags(Config, Version) when is_list(Config) -> +duplicate_bags(Config) when is_list(Config) -> {_, _, Dups} = args(Config), Data = make_data(1, duplicate_bag), %% gives twice as many objects delete_files(Dups), P0 = pps(), - Tabs = open_files(1, Dups, Version), + Tabs = open_files(1, Dups), initialize(Tabs, Data), Len = length(Data), foreach(fun(Tab) -> trav_test(Data, Len, Tab) end, Tabs), @@ -369,13 +317,7 @@ duplicate_bags(Config, Version) when is_list(Config) -> ok. -access_v8(Config) when is_list(Config) -> - access(Config, 8). - -access_v9(Config) when is_list(Config) -> - access(Config, 9). - -access(Config, Version) -> +access(Config) when is_list(Config) -> Args_acc = [[{ram_file, true}, {access, read}], [{access, read}]], Args = [[{ram_file, true}], @@ -388,9 +330,9 @@ access(Config, Version) -> P0 = pps(), {error, {file_error,_,enoent}} = dets:open_file('1', hd(Args_acc_1)), - Tabs = open_files(1, Args_1, Version), + Tabs = open_files(1, Args_1), close_all(Tabs), - Tabs = open_files(1, Args_acc_1, Version), + Tabs = open_files(1, Args_acc_1), foreach(fun(Tab) -> {error, {access_mode,_}} = dets:insert(Tab, {1,2}), @@ -522,16 +464,12 @@ dets_dirty_loop() -> %% Check that bags and next work as expected. -bag_next_v8(Config) when is_list(Config) -> - bag_next(Config, 8). - -%% Check that bags and next work as expected. -bag_next_v9(Config) when is_list(Config) -> +bag_next(Config) when is_list(Config) -> Tab = dets_bag_next_test, FName = filename(Tab, Config), %% first and next crash upon error - dets:open_file(Tab,[{file, FName}, {type, bag},{version,9}]), + dets:open_file(Tab,[{file, FName}, {type, bag}]), ok = dets:insert(Tab, [{1,1},{2,2},{3,3},{4,4}]), FirstKey = dets:first(Tab), NextKey = dets:next(Tab, FirstKey), @@ -548,13 +486,8 @@ bag_next_v9(Config) when is_list(Config) -> dets:close(Tab), file:delete(FName), - bag_next(Config, 9). - -bag_next(Config, Version) -> - Tab = dets_bag_next_test, - FName = filename(Tab, Config), P0 = pps(), - dets:open_file(Tab,[{file, FName}, {type, bag},{version,Version}]), + dets:open_file(Tab,[{file, FName}, {type, bag}]), dets:insert(Tab,{698,hopp}), dets:insert(Tab,{186,hopp}), dets:insert(Tab,{hej,hopp}), @@ -578,17 +511,10 @@ bag_next(Config, Version) -> check_pps(P0), ok. -oldbugs_v8(Config) when is_list(Config) -> - oldbugs(Config, 8). - -oldbugs_v9(Config) when is_list(Config) -> - oldbugs(Config, 9). - -oldbugs(Config, Version) -> +oldbugs(Config) when is_list(Config) -> FName = filename(dets_suite_oldbugs_test, Config), P0 = pps(), - {ok, ob} = dets:open_file(ob, [{version, Version}, - {type, bag}, {file, FName}]), + {ok, ob} = dets:open_file(ob, [{type, bag}, {file, FName}]), ok = dets:insert(ob, {1, 2}), ok = dets:insert(ob, {1,3}), ok = dets:insert(ob, {1, 2}), @@ -598,56 +524,19 @@ oldbugs(Config, Version) -> check_pps(P0), ok. -%% Test that shrinking an object and then expanding it works. -unsafe_assumptions(Config) when is_list(Config) -> - FName = filename(dets_suite_unsafe_assumptions_test, Config), - file:delete(FName), - P0 = pps(), - {ok, a} = dets:open_file(a, [{version,8},{file, FName}]), - O0 = {2,false}, - O1 = {1, false}, - O2 = {1, true}, - O3 = {1, duplicate(20,false)}, - O4 = {1, duplicate(25,false)}, % same 2-log as O3 - ok = dets:insert(a, O1), - ok = dets:insert(a, O0), - true = [O1,O0] =:= sort(get_all_objects(a)), - true = [O1,O0] =:= sort(get_all_objects_fast(a)), - ok = dets:insert(a, O2), - true = [O2,O0] =:= sort(get_all_objects(a)), - true = [O2,O0] =:= sort(get_all_objects_fast(a)), - ok = dets:insert(a, O3), - true = [O3,O0] =:= sort(get_all_objects(a)), - true = [O3,O0] =:= sort(get_all_objects_fast(a)), - ok = dets:insert(a, O4), - true = [O4,O0] =:= sort(get_all_objects(a)), - true = [O4,O0] =:= sort(get_all_objects_fast(a)), - ok = dets:close(a), - file:delete(FName), - check_pps(P0), - ok. - -%% Test that a file where the segment array has been truncated -%% is possible to repair. -truncated_segment_array_v8(Config) when is_list(Config) -> - trunc_seg_array(Config, 8). - %% Test that a file where the segment array has been truncated %% is possible to repair. -truncated_segment_array_v9(Config) when is_list(Config) -> - trunc_seg_array(Config, 9). - -trunc_seg_array(Config, V) -> +truncated_segment_array(Config) when is_list(Config) -> TabRef = dets_suite_truncated_segment_array_test, Fname = filename(TabRef, Config), %% Create file that needs to be repaired file:delete(Fname), P0 = pps(), - {ok, TabRef} = dets:open_file(TabRef, [{file, Fname},{version,V}]), + {ok, TabRef} = dets:open_file(TabRef, [{file, Fname}]), ok = dets:close(TabRef), %% Truncate the file - HeadSize = headsz(V), + HeadSize = headsz(), truncate(Fname, HeadSize + 10), %% Open the truncated file @@ -660,19 +549,13 @@ trunc_seg_array(Config, V) -> ok. %% Test open_file/1. -open_file_v8(Config) when is_list(Config) -> - open_1(Config, 8). - -%% Test open_file/1. -open_file_v9(Config) when is_list(Config) -> +open_file(Config) when is_list(Config) -> T = open_v9, Fname = filename(T, Config), - {ok, _} = dets:open_file(T, [{file,Fname},{version,9}]), - 9 = dets:info(T, version), + {ok, _} = dets:open_file(T, [{file,Fname}]), + 9 = dets:info(T, version), % Backwards compatibility. true = [self()] =:= dets:info(T, users), - {ok, _} = dets:open_file(T, [{file,Fname},{version,9}]), - {error,incompatible_arguments} = - dets:open_file(T, [{file,Fname},{version,8}]), + {ok, _} = dets:open_file(T, [{file,Fname}]), true = [self(),self()] =:= dets:info(T, users), ok = dets:close(T), true = [self()] =:= dets:info(T, users), @@ -680,9 +563,9 @@ open_file_v9(Config) when is_list(Config) -> undefined = ets:info(T, users), file:delete(Fname), - open_1(Config, 9). + open_1(Config). -open_1(Config, V) -> +open_1(Config) -> TabRef = open_file_1_test, Fname = filename(TabRef, Config), file:delete(Fname), @@ -694,8 +577,8 @@ open_1(Config, V) -> {error,{not_a_dets_file,Fname}} = dets:open_file(Fname), file:delete(Fname), - HeadSize = headsz(V), - {ok, TabRef} = dets:open_file(TabRef, [{file, Fname},{version,V}]), + HeadSize = headsz(), + {ok, TabRef} = dets:open_file(TabRef, [{file, Fname}]), ok = dets:close(TabRef), truncate(Fname, HeadSize + 10), true = dets:is_dets_file(Fname), @@ -705,7 +588,7 @@ open_1(Config, V) -> file:delete(Fname), %% truncated file header, invalid type - {ok, TabRef} = dets:open_file(TabRef, [{file,Fname},{version,V}]), + {ok, TabRef} = dets:open_file(TabRef, [{file,Fname}]), ok = ins(TabRef, 3000), ok = dets:close(TabRef), TypePos = 12, @@ -714,7 +597,7 @@ open_1(Config, V) -> truncate(Fname, HeadSize - 10), {error,{not_a_dets_file,Fname}} = dets:open_file(Fname), {error,{not_a_dets_file,Fname}} = - dets:open_file(TabRef, [{file,Fname},{version,V}]), + dets:open_file(TabRef, [{file,Fname}]), file:delete(Fname), {error,{file_error,{foo,bar},_}} = dets:is_dets_file({foo,bar}), @@ -722,35 +605,30 @@ open_1(Config, V) -> ok. %% Test initialize_table/2 and from_ets/2. -init_table_v8(Config) when is_list(Config) -> - init_table(Config, 8). - -%% Test initialize_table/2 and from_ets/2. -init_table_v9(Config) when is_list(Config) -> +init_table(Config) when is_list(Config) -> %% Objects are returned in "time order". T = init_table_v9, Fname = filename(T, Config), file:delete(Fname), L = [{1,a},{2,b},{1,c},{2,c},{1,c},{2,a},{1,b}], Input = init([L]), - {ok, _} = dets:open_file(T, [{file,Fname},{version,9}, - {type,duplicate_bag}]), + {ok, _} = dets:open_file(T, [{file,Fname},{type,duplicate_bag}]), ok = dets:init_table(T, Input), [{1,a},{1,c},{1,c},{1,b}] = dets:lookup(T, 1), [{2,b},{2,c},{2,a}] = dets:lookup(T, 2), ok = dets:close(T), file:delete(Fname), - init_table(Config, 9), + init_table_1(Config), fast_init_table(Config). -init_table(Config, V) -> +init_table_1(Config) -> TabRef = init_table_test, Fname = filename(TabRef, Config), file:delete(Fname), P0 = pps(), - Args = [{file,Fname},{version,V},{auto_save,120000}], + Args = [{file,Fname},{auto_save,120000}], {ok, _} = dets:open_file(TabRef, Args), {'EXIT', _} = (catch dets:init_table(TabRef, fun(foo) -> bar end)), @@ -800,13 +678,13 @@ init_table(Config, V) -> file:delete(Fname), L1 = [[{1,a},{2,b}],[],[{3,c}],[{4,d}],[]], - bulk_init(L1, set, 4, Config, V), + bulk_init(L1, set, 4, Config), L2 = [[{1,a},{2,b}],[],[{2,q},{3,c}],[{4,d}],[{4,e},{2,q}]], - bulk_init(L2, set, 4, Config, V), - bulk_init(L2, bag, 6, Config, V), - bulk_init(L2, duplicate_bag, 7, Config, V), - bulk_init(L1, set, 4, 512, Config, V), - bulk_init([], set, 0, 10000, Config, V), + bulk_init(L2, set, 4, Config), + bulk_init(L2, bag, 6, Config), + bulk_init(L2, duplicate_bag, 7, Config), + bulk_init(L1, set, 4, 512, Config), + bulk_init([], set, 0, 10000, Config), file:delete(Fname), %% Initiate a file that contains a lot of objects. @@ -834,16 +712,16 @@ init_table(Config, V) -> check_pps(P0), ok. -bulk_init(Ls, Type, N, Config, V) -> - bulk_init(Ls, Type, N, 256, Config, V). +bulk_init(Ls, Type, N, Config) -> + bulk_init(Ls, Type, N, 256, Config). -bulk_init(Ls, Type, N, Est, Config, V) -> +bulk_init(Ls, Type, N, Est, Config) -> T = init_table_test, Fname = filename(T, Config), file:delete(Fname), Input = init(Ls), Args = [{ram_file,false}, {type,Type},{keypos,1},{file,Fname}, - {estimated_no_objects, Est},{version,V}], + {estimated_no_objects, Est}], {ok, T} = dets:open_file(T, Args), ok = dets:init_table(T, Input), All = sort(get_all_objects(T)), @@ -882,18 +760,17 @@ init_fun(I, N) -> end. fast_init_table(Config) -> - V = 9, TabRef = init_table_test, Fname = filename(TabRef, Config), file:delete(Fname), P0 = pps(), - Args = [{file,Fname},{version,V},{auto_save,120000}], + Args = [{file,Fname},{auto_save,120000}], Source = init_table_test_source, SourceFname = filename(Source, Config), file:delete(SourceFname), - SourceArgs = [{file,SourceFname},{version,V},{auto_save,120000}], + SourceArgs = [{file,SourceFname},{auto_save,120000}], {ok, Source} = dets:open_file(Source, SourceArgs), @@ -1015,13 +892,13 @@ fast_init_table(Config) -> file:delete(SourceFname), L1 = [{1,a},{2,b},{3,c},{4,d}], - fast_bulk_init(L1, set, 4, 4, Config, V), + fast_bulk_init(L1, set, 4, 4, Config), L2 = [{1,a},{2,b},{2,q},{3,c},{4,d},{4,e},{2,q}], - fast_bulk_init(L2, set, 4, 4, Config, V), - fast_bulk_init(L2, bag, 6, 4, Config, V), - fast_bulk_init(L2, duplicate_bag, 7, 4, Config, V), - fast_bulk_init(L1, set, 4, 4, 512, Config, V), - fast_bulk_init([], set, 0, 0, 10000, Config, V), + fast_bulk_init(L2, set, 4, 4, Config), + fast_bulk_init(L2, bag, 6, 4, Config), + fast_bulk_init(L2, duplicate_bag, 7, 4, Config), + fast_bulk_init(L1, set, 4, 4, 512, Config), + fast_bulk_init([], set, 0, 0, 10000, Config), file:delete(Fname), %% Initiate a file that contains a lot of objects. @@ -1112,16 +989,16 @@ fast_init_table(Config) -> check_pps(P0), ok. -fast_bulk_init(L, Type, N, NoKeys, Config, V) -> - fast_bulk_init(L, Type, N, NoKeys, 256, Config, V). +fast_bulk_init(L, Type, N, NoKeys, Config) -> + fast_bulk_init(L, Type, N, NoKeys, 256, Config). -fast_bulk_init(L, Type, N, NoKeys, Est, Config, V) -> +fast_bulk_init(L, Type, N, NoKeys, Est, Config) -> T = init_table_test, Fname = filename(T, Config), file:delete(Fname), Args0 = [{ram_file,false}, {type,Type},{keypos,1}, - {estimated_no_objects, Est},{version,V}], + {estimated_no_objects, Est}], Args = [{file,Fname} | Args0], S = init_table_test_source, SFname = filename(S, Config), @@ -1189,35 +1066,7 @@ items(I, N, C, L) -> items(I+1, N, C-1, [{I, item(I)} | L]). %% Test open_file and repair. -repair_v8(Config) when is_list(Config) -> - repair(Config, 8). - -%% Test open_file and repair. -repair_v9(Config) when is_list(Config) -> - %% Convert from format 9 to format 8. - T = convert_98, - Fname = filename(T, Config), - file:delete(Fname), - {ok, _} = dets:open_file(T, [{file,Fname},{version,9}, - {type,duplicate_bag}]), - 9 = dets:info(T, version), - true = is_binary(dets:info(T, bchunk_format)), - ok = dets:insert(T, [{1,a},{2,b},{1,c},{2,c},{1,c},{2,a},{1,b}]), - dets:close(T), - {error, {version_mismatch, _}} = - dets:open_file(T, [{file,Fname},{version,8},{type,duplicate_bag}]), - {ok, _} = dets:open_file(T, [{file,Fname},{version,8}, - {type,duplicate_bag},{repair,force}]), - 8 = dets:info(T, version), - true = undefined =:= dets:info(T, bchunk_format), - [{1,a},{1,b},{1,c},{1,c}] = sort(dets:lookup(T, 1)), - [{2,a},{2,b},{2,c}] = sort(dets:lookup(T, 2)), - 7 = dets:info(T, no_objects), - no_keys_test(T), - _ = histogram(T, silent), - ok = dets:close(T), - file:delete(Fname), - +repair(Config) when is_list(Config) -> %% The short lived format 9(a). %% Not very throughly tested here. A9 = a9, @@ -1238,13 +1087,13 @@ repair_v9(Config) when is_list(Config) -> ok = dets:close(A9), file:delete(Version9aT), - repair(Config, 9). + repair_1(Config). -repair(Config, V) -> +repair_1(Config) -> TabRef = repair_test, Fname = filename(TabRef, Config), file:delete(Fname), - HeadSize = headsz(V), + HeadSize = headsz(), P0 = pps(), {'EXIT', {badarg, _}} = @@ -1255,7 +1104,7 @@ repair(Config, V) -> dets:open_file(TabRef, [{file, Fname}, {access, read}]), %% compacting, and some kind of test that free lists are saved OK on file - {ok, TabRef} = dets:open_file(TabRef, [{file,Fname},{version,V}]), + {ok, TabRef} = dets:open_file(TabRef, [{file,Fname}]), 0 = dets:info(TabRef, size), ok = ins(TabRef, 30000), ok = del(TabRef, 30000, 3), @@ -1268,38 +1117,20 @@ repair(Config, V) -> 20000 = count_objects_quite_fast(Ref3), % actually a test of match no_keys_test(Ref3), ok = dets:close(Ref3), - if - V =:= 8 -> - {ok, TabRef} = dets:open_file(TabRef, - [{file, Fname},{version,V},{access,read}]), - ok = dets:close(TabRef), - io:format("Expect compacting repair:~n"), - {ok, TabRef} = dets:open_file(TabRef, - [{file, Fname},{version,V}]), - 20000 = dets:info(TabRef, size), - _ = histogram(TabRef, silent), - ok = dets:close(TabRef); - true -> - ok - end, {error,{keypos_mismatch,Fname}} = dets:open_file(TabRef, [{file, Fname},{keypos,17}]), {error,{type_mismatch,Fname}} = dets:open_file(TabRef, [{file, Fname},{type,duplicate_bag}]), %% make one of the temporary files unwritable - TmpFile = if - V =:= 8 -> - Fname ++ ".TMP.10000"; - true -> Fname ++ ".TMP.1" - end, + TmpFile = Fname ++ ".TMP.1", file:delete(TmpFile), {ok, TmpFd} = file:open(TmpFile, [read,write]), ok = file:close(TmpFd), unwritable(TmpFile), - {error,{file_error,TmpFile,eacces}} = dets:fsck(Fname, V), + {error,{file_error,TmpFile,eacces}} = dets:fsck(Fname), {ok, _} = dets:open_file(TabRef, - [{repair,false},{file, Fname},{version,V}]), + [{repair,false},{file, Fname}]), 20000 = length(get_all_objects(TabRef)), _ = histogram(TabRef, silent), 20000 = length(get_all_objects_fast(TabRef)), @@ -1318,68 +1149,15 @@ repair(Config, V) -> file:delete(Fname), %% truncated file header - {ok, TabRef} = dets:open_file(TabRef, [{file,Fname},{version,V}]), + {ok, TabRef} = dets:open_file(TabRef, [{file,Fname}]), ok = ins(TabRef, 100), ok = dets:close(TabRef), file:delete(Fname), - %% version bump (v8) - Version7S = filename:join(?datadir(Config), "version_r2d.dets"), - Version7T = filename('v2.dets', Config), - {ok, _} = file:copy(Version7S, Version7T), - {error,{version_bump, Version7T}} = dets:open_file(Version7T), - {error,{version_bump, Version7T}} = - dets:open_file(Version7T, [{file,Version7T},{repair,false}]), - {error,{version_bump, Version7T}} = - dets:open_file(Version7T, [{file, Version7T}, {access, read}]), - io:format("Expect upgrade:~n"), - {ok, _} = dets:open_file(Version7T, - [{file, Version7T},{version, V}]), - [{1,a},{2,b}] = sort(get_all_objects(Version7T)), - [{1,a},{2,b}] = sort(get_all_objects_fast(Version7T)), - Phash = if - V =:= 8 -> phash; - true -> phash2 - end, - Phash = dets:info(Version7T, hash), - _ = histogram(Version7T, silent), - ok = dets:close(Version7T), - {ok, _} = dets:open_file(Version7T, [{file, Version7T}]), - Phash = dets:info(Version7T, hash), - ok = dets:close(Version7T), - file:delete(Version7T), - - %% converting free lists - Version8aS = filename:join(?datadir(Config), "version_r3b02.dets"), - Version8aT = filename('v3.dets', Config), - {ok, _} = file:copy(Version8aS, Version8aT), - %% min_no_slots and max_no_slots are ignored - no repair is taking place - {ok, _} = dets:open_file(version_8a, - [{file, Version8aT},{min_no_slots,1000}, - {max_no_slots,100000}]), - [{1,b},{2,a},{a,1},{b,2}] = sort(get_all_objects(version_8a)), - [{1,b},{2,a},{a,1},{b,2}] = sort(get_all_objects_fast(version_8a)), - ok = ins(version_8a, 1000), - 1002 = dets:info(version_8a, size), - no_keys_test(version_8a), - All8a = sort(get_all_objects(version_8a)), - 1002 = length(All8a), - FAll8a = sort(get_all_objects_fast(version_8a)), - true = sort(All8a) =:= sort(FAll8a), - ok = del(version_8a, 300, 3), - 902 = dets:info(version_8a, size), - no_keys_test(version_8a), - All8a2 = sort(get_all_objects(version_8a)), - 902 = length(All8a2), - FAll8a2 = sort(get_all_objects_fast(version_8a)), - true = sort(All8a2) =:= sort(FAll8a2), - _ = histogram(version_8a, silent), - ok = dets:close(version_8a), - file:delete(Version8aT), - + %% FIXME. %% will fail unless the slots are properly sorted when repairing (v8) BArgs = [{file, Fname},{type,duplicate_bag}, - {delayed_write,{3000,10000}},{version,V}], + {delayed_write,{3000,10000}}], {ok, TabRef} = dets:open_file(TabRef, BArgs), Seq = seq(1, 500), Small = map(fun(X) -> {X,X} end, Seq), @@ -1393,18 +1171,14 @@ repair(Config, V) -> io:format("Expect forced repair:~n"), {ok, _} = dets:open_file(TabRef, [{repair,force},{min_no_slots,2000} | BArgs]), - if - V =:= 9 -> - {MinNoSlots,_,MaxNoSlots} = dets:info(TabRef, no_slots), - ok = dets:close(TabRef), - io:format("Expect compaction:~n"), - {ok, _} = - dets:open_file(TabRef, [{repair,force}, - {min_no_slots,MinNoSlots}, - {max_no_slots,MaxNoSlots} | BArgs]); - true -> - ok - end, + + {MinNoSlots,_,MaxNoSlots} = dets:info(TabRef, no_slots), + ok = dets:close(TabRef), + io:format("Expect compaction:~n"), + {ok, _} = + dets:open_file(TabRef, [{repair,force}, + {min_no_slots,MinNoSlots}, + {max_no_slots,MaxNoSlots} | BArgs]), All2 = get_all_objects(TabRef), true = All =:= sort(All2), FAll2 = get_all_objects_fast(TabRef), @@ -1418,35 +1192,15 @@ repair(Config, V) -> file:delete(Fname), %% object bigger than segments, the "hole" is taken care of - {ok, TabRef} = dets:open_file(TabRef, [{file, Fname},{version,V}]), + {ok, TabRef} = dets:open_file(TabRef, [{file, Fname}]), Tuple = erlang:make_tuple(1000, foobar), % > 2 kB ok = dets:insert(TabRef, Tuple), %% at least one full segment (objects smaller than 2 kB): ins(TabRef, 2000), ok = dets:close(TabRef), - if - V =:= 8 -> - %% first estimated number of objects is wrong, repair once more - {ok, Fd} = file:open(Fname, [read,write]), - NoPos = HeadSize - 8, % no_objects - file:pwrite(Fd, NoPos, <<0:32>>), % NoItems - ok = file:close(Fd), - dets:fsck(Fname, V), - {ok, _} = - dets:open_file(TabRef, - [{repair,false},{file, Fname},{version,V}]), - 2001 = length(get_all_objects(TabRef)), - _ = histogram(TabRef, silent), - 2001 = length(get_all_objects_fast(TabRef)), - ok = dets:close(TabRef); - true -> - ok - end, - {ok, _} = - dets:open_file(TabRef, - [{repair,false},{file, Fname},{version,V}]), + dets:open_file(TabRef, [{repair,false},{file, Fname}]), {ok, ObjPos} = dets:where(TabRef, {66,{item,number,66}}), ok = dets:close(TabRef), %% Damaged object. @@ -1454,25 +1208,24 @@ repair(Config, V) -> crash(Fname, ObjPos+Pos), io:format( "Expect forced repair (possibly after attempted compaction):~n"), - {ok, _} = - dets:open_file(TabRef, [{repair,force},{file, Fname},{version,V}]), + {ok, _} = dets:open_file(TabRef, [{repair,force},{file, Fname}]), true = dets:info(TabRef, size) < 2001, ok = dets:close(TabRef), file:delete(Fname), %% The file is smaller than the padded object. - {ok, TabRef} = dets:open_file(TabRef, [{file,Fname},{version,V}]), + {ok, TabRef} = dets:open_file(TabRef, [{file,Fname}]), ok = dets:insert(TabRef, Tuple), ok = dets:close(TabRef), io:format("Expect forced repair or compaction:~n"), {ok, _} = - dets:open_file(TabRef, [{repair,force},{file, Fname},{version,V}]), + dets:open_file(TabRef, [{repair,force},{file, Fname}]), true = 1 =:= dets:info(TabRef, size), ok = dets:close(TabRef), file:delete(Fname), %% Damaged free lists. - {ok, TabRef} = dets:open_file(TabRef, [{file,Fname},{version,V}]), + {ok, TabRef} = dets:open_file(TabRef, [{file,Fname}]), ok = ins(TabRef, 300), ok = dets:sync(TabRef), ok = del(TabRef, 300, 3), @@ -1481,48 +1234,42 @@ repair(Config, V) -> ok = dets:close(TabRef), crash(Fname, FileSize+20), %% Used to return bad_freelists, but that changed in OTP-9622 - {ok, TabRef} = - dets:open_file(TabRef, [{file,Fname},{version,V}]), + {ok, TabRef} = dets:open_file(TabRef, [{file,Fname}]), ok = dets:close(TabRef), file:delete(Fname), %% File not closed, opening with read and read_write access tried. - {ok, TabRef} = dets:open_file(TabRef, [{file,Fname},{version,V}]), + {ok, TabRef} = dets:open_file(TabRef, [{file,Fname}]), ok = ins(TabRef, 300), ok = dets:close(TabRef), crash(Fname, ?CLOSED_PROPERLY_POS+3, ?NOT_PROPERLY_CLOSED), {error, {not_closed, Fname}} = - dets:open_file(foo, [{file,Fname},{version,V},{repair,force}, + dets:open_file(foo, [{file,Fname},{repair,force}, {access,read}]), {error, {not_closed, Fname}} = - dets:open_file(foo, [{file,Fname},{version,V},{repair,true}, + dets:open_file(foo, [{file,Fname},{repair,true}, {access,read}]), io:format("Expect repair:~n"), {ok, TabRef} = - dets:open_file(TabRef, [{file,Fname},{version,V},{repair,true}, + dets:open_file(TabRef, [{file,Fname},{repair,true}, {access,read_write}]), ok = dets:close(TabRef), crash(Fname, ?CLOSED_PROPERLY_POS+3, ?NOT_PROPERLY_CLOSED), io:format("Expect forced repair:~n"), {ok, TabRef} = - dets:open_file(TabRef, [{file,Fname},{version,V},{repair,force}, + dets:open_file(TabRef, [{file,Fname},{repair,force}, {access,read_write}]), ok = dets:close(TabRef), file:delete(Fname), %% The size of an object is huge. - {ok, TabRef} = dets:open_file(TabRef, [{file,Fname},{version,V}]), + {ok, TabRef} = dets:open_file(TabRef, [{file,Fname}]), ok = dets:insert(TabRef, [{1,2,3},{2,3,4}]), {ok, ObjPos2} = dets:where(TabRef, {1,2,3}), ok = dets:close(TabRef), - ObjPos3 = if - V =:= 8 -> ObjPos2 + 4; - V =:= 9 -> ObjPos2 - end, - crash(Fname, ObjPos3, 255), + crash(Fname, ObjPos2, 255), io:format("Expect forced repair:~n"), - {ok, TabRef} = - dets:open_file(TabRef, [{file,Fname},{version,V},{repair,force}]), + {ok, TabRef} = dets:open_file(TabRef, [{file,Fname},{repair,force}]), ok = dets:close(TabRef), file:delete(Fname), @@ -1530,82 +1277,6 @@ repair(Config, V) -> ok. -%% Test the use of different hashing algorithms in v8b and v8c of the -%% Dets file format. -hash_v8b_v8c(Config) when is_list(Config) -> - Source = - filename:join(?datadir(Config), "dets_test_v8b.dets"), - %% Little endian version of old file (there is an endianess bug in - %% the old hash). This is all about version 8 of the dets file format. - - P0 = pps(), - SourceLE = - filename:join(?datadir(Config), - "dets_test_v8b_little_endian.dets"), - Target1 = filename('oldhash1.dets', Config), - Target1LE = filename('oldhash1le.dets', Config), - Target2 = filename('oldhash2.dets', Config), - {ok, Bin} = file:read_file(Source), - {ok, BinLE} = file:read_file(SourceLE), - ok = file:write_file(Target1,Bin), - ok = file:write_file(Target1LE,BinLE), - ok = file:write_file(Target2,Bin), - {ok, d1} = dets:open_file(d1,[{file,Target1}]), - {ok, d1le} = dets:open_file(d1le,[{file,Target1LE}]), - {ok, d2} = dets:open_file(d2,[{file,Target2},{repair,force}, - {version,8}]), - FF = fun(N,_F,_T) when N > 16#FFFFFFFFFFFFFFFF -> - ok; - (N,F,T) -> - V = integer_to_list(N), - case dets:lookup(T,N) of - [{N,V}] -> - F(N*2,F,T); - _Error -> - exit({failed,{lookup,T,N}}) - end - end, - Mess = case (catch FF(1,FF,d1)) of - {'EXIT', {failed, {lookup,_,_}}} -> - ok = dets:close(d1), - FF(1,FF,d1le), - hash = dets:info(d1le,hash), - dets:insert(d1le,{33333333333,hejsan}), - [{33333333333,hejsan}] = - dets:lookup(d1le,33333333333), - ok = dets:close(d1le), - {ok, d1le} = dets:open_file(d1le, - [{file,Target1LE}]), - [{33333333333,hejsan}] = - dets:lookup(d1le,33333333333), - FF(1,FF,d1le), - ok = dets:close(d1le), - "Seems to be a little endian machine"; - {'EXIT', Fault} -> - exit(Fault); - _ -> - ok = dets:close(d1le), - hash = dets:info(d1,hash), - dets:insert(d1,{33333333333,hejsan}), - [{33333333333,hejsan}] = - dets:lookup(d1,33333333333), - ok = dets:close(d1), - {ok, d1} = dets:open_file(d1,[{file,Target1}]), - [{33333333333,hejsan}] = - dets:lookup(d1,33333333333), - FF(1,FF,d1), - ok = dets:close(d1), - "Seems to be a big endian machine" - end, - FF(1,FF,d2), - phash = dets:info(d2,hash), - ok = dets:close(d2), - file:delete(Target1), - file:delete(Target1LE), - file:delete(Target2), - check_pps(P0), - {comment, Mess}. - %% Test version 9(b) with erlang:phash/2 as hash function. phash(Config) when is_list(Config) -> T = phash, @@ -1643,9 +1314,10 @@ phash(Config) when is_list(Config) -> ok = dets:close(T), %% One cannot use the bchunk format when copying between a phash - %% table and a phash2 table. (There is no test for the case an R9 - %% (or later) node (using phash2) copies a table to an R8 node - %% (using phash).) See also the comment on HASH_PARMS in dets_v9.erl. + %% table and a phash2 table. (There is no test for the case an + %% Erlang/OTP R9 (or later) node (using phash2) copies a table to + %% an Erlang/OTP R8 node (using phash).) See also the comment on + %% HASH_PARMS in dets_v9.erl. {ok, _} = file:copy(Phash_v9bS, Fname), {ok, T} = dets:open_file(T, [{file, Fname}]), Type = dets:info(T, type), @@ -1653,7 +1325,7 @@ phash(Config) when is_list(Config) -> Input = init_bchunk(T), T2 = phash_table, Fname2 = filename(T2, Config), - Args = [{type,Type},{keypos,KeyPos},{version,9},{file,Fname2}], + Args = [{type,Type},{keypos,KeyPos},{file,Fname2}], {ok, T2} = dets:open_file(T2, Args), {error, {init_fun, _}} = dets:init_table(T2, Input, {format,bchunk}), @@ -1665,21 +1337,14 @@ phash(Config) when is_list(Config) -> ok. %% Test foldl, foldr, to_ets. -fold_v8(Config) when is_list(Config) -> - fold(Config, 8). - -%% Test foldl, foldr, to_ets. -fold_v9(Config) when is_list(Config) -> - fold(Config, 9). - -fold(Config, Version) -> +fold(Config) when is_list(Config) -> T = test_table, N = 100, Fname = filename(T, Config), file:delete(Fname), P0 = pps(), - Args = [{version, Version}, {file,Fname}, {estimated_no_objects, N}], + Args = [{file,Fname}, {estimated_no_objects, N}], {ok, _} = dets:open_file(T, Args), ok = ins(T, N), @@ -1721,10 +1386,7 @@ fold(Config, Version) -> ok = dets:close(T), %% Damaged object. - Pos = if - Version =:= 8 -> 12; - Version =:= 9 -> 8 - end, + Pos = 8, crash(Fname, ObjPos+Pos), {ok, _} = dets:open_file(T, Args), io:format("Expect corrupt table:~n"), @@ -1738,18 +1400,11 @@ fold(Config, Version) -> ok. %% Add objects to a fixed table. -fixtable_v8(Config) when is_list(Config) -> - fixtable(Config, 8). - -%% Add objects to a fixed table. -fixtable_v9(Config) when is_list(Config) -> - fixtable(Config, 9). - -fixtable(Config, Version) when is_list(Config) -> +fixtable(Config) when is_list(Config) -> T = fixtable, Fname = filename(fixtable, Config), file:delete(Fname), - Args = [{version,Version},{file,Fname}], + Args = [{file,Fname}], P0 = pps(), {ok, _} = dets:open_file(T, Args), @@ -1832,21 +1487,13 @@ fixtable(Config, Version) when is_list(Config) -> ok. %% Matching objects of a fixed table. -match_v8(Config) when is_list(Config) -> - match(Config, 8). - -%% Matching objects of a fixed table. -match_v9(Config) when is_list(Config) -> - match(Config, 9). - -match(Config, Version) -> +match(Config) when is_list(Config) -> T = match, Fname = filename(match, Config), file:delete(Fname), P0 = pps(), - Args = [{version, Version}, {file,Fname}, {type, duplicate_bag}, - {estimated_no_objects,550}], + Args = [{file,Fname}, {type, duplicate_bag}, {estimated_no_objects,550}], {ok, _} = dets:open_file(T, Args), ok = dets:insert(T, {1, a, b}), ok = dets:insert(T, {1, b, a}), @@ -1901,7 +1548,7 @@ match(Config, Version) -> {_, TmpCont} = dets:match_object(T, '_', 200), {_, TmpCont1} = dets:match_object(TmpCont), {TTL, _} = dets:match_object(TmpCont1), - DI = if Version =:= 8 -> last(TTL); Version =:= 9 -> hd(TTL) end, + DI = hd(TTL), dets:safe_fixtable(T, true), {L1, C20} = dets:match_object(T, '_', 200), true = 200 =< length(L1), @@ -1957,8 +1604,7 @@ match(Config, Version) -> ok = dets:close(T), %% Damaged size of object. - %% In v8, there is a next pointer before the size. - CrashPos = if Version =:= 8 -> 5; Version =:= 9 -> 1 end, + CrashPos = 1, crash(Fname, ObjPos2+CrashPos), {ok, _} = dets:open_file(T, Args), case dets:insert_new(T, Obj) of % OTP-12024 @@ -1986,7 +1632,7 @@ match(Config, Version) -> ok = dets:close(T), %% match_delete finds an error - CrashPos3 = if Version =:= 8 -> 12; Version =:= 9 -> 16 end, + CrashPos3 = 16, crash(Fname, ObjPos3+CrashPos3), {ok, _} = dets:open_file(T, Args), bad_object(dets:match_delete(T, Spec), Fname), @@ -2008,21 +1654,13 @@ match(Config, Version) -> ok. %% Selecting objects of a fixed table. -select_v8(Config) when is_list(Config) -> - select(Config, 8). - -%% Selecting objects of a fixed table. -select_v9(Config) when is_list(Config) -> - select(Config, 9). - -select(Config, Version) -> +select(Config) when is_list(Config) -> T = select, Fname = filename(select, Config), file:delete(Fname), P0 = pps(), - Args = [{version,Version}, {file,Fname}, {type, duplicate_bag}, - {estimated_no_objects,550}], + Args = [{file,Fname}, {type, duplicate_bag},{estimated_no_objects,550}], {ok, _} = dets:open_file(T, Args), ok = dets:insert(T, {1, a, b}), ok = dets:insert(T, {1, b, a}), @@ -2074,7 +1712,7 @@ select(Config, Version) -> {_, TmpCont} = dets:match_object(T, '_', 200), {_, TmpCont1} = dets:match_object(TmpCont), {TTL, _} = dets:match_object(TmpCont1), - DI = if Version =:= 8 -> last(TTL); Version =:= 9 -> hd(TTL) end, + DI = hd(TTL), dets:safe_fixtable(T, true), {L1, C20} = dets:select(T, AllSpec, 200), true = 200 =< length(L1), @@ -2281,28 +1919,21 @@ badarg(Config) when is_list(Config) -> ok. %% Test the write cache for sets. -cache_sets_v8(Config) when is_list(Config) -> - cache_sets(Config, 8). - -%% Test the write cache for sets. -cache_sets_v9(Config) when is_list(Config) -> - cache_sets(Config, 9). - -cache_sets(Config, Version) -> +cache_sets(Config) when is_list(Config) -> Small = 2, - cache_sets(Config, {0,0}, false, Small, Version), - cache_sets(Config, {0,0}, true, Small, Version), - cache_sets(Config, {5000,5000}, false, Small, Version), - cache_sets(Config, {5000,5000}, true, Small, Version), + cache_sets(Config, {0,0}, false, Small), + cache_sets(Config, {0,0}, true, Small), + cache_sets(Config, {5000,5000}, false, Small), + cache_sets(Config, {5000,5000}, true, Small), %% Objects of size greater than 2 kB. Big = 1200, - cache_sets(Config, {0,0}, false, Big, Version), - cache_sets(Config, {0,0}, true, Big, Version), - cache_sets(Config, {5000,5000}, false, Big, Version), - cache_sets(Config, {5000,5000}, true, Big, Version), + cache_sets(Config, {0,0}, false, Big), + cache_sets(Config, {0,0}, true, Big), + cache_sets(Config, {5000,5000}, false, Big), + cache_sets(Config, {5000,5000}, true, Big), ok. -cache_sets(Config, DelayedWrite, Extra, Sz, Version) -> +cache_sets(Config, DelayedWrite, Extra, Sz) -> %% Extra = bool(). Insert tuples until the tested key is not alone. %% Sz = integer(). Size of the inserted tuples. @@ -2311,9 +1942,8 @@ cache_sets(Config, DelayedWrite, Extra, Sz, Version) -> file:delete(Fname), P0 = pps(), - {ok, _} = - dets:open_file(T,[{version, Version}, {file,Fname}, {type,set}, - {delayed_write, DelayedWrite}]), + {ok, _} = dets:open_file(T,[{file,Fname}, {type,set}, + {delayed_write, DelayedWrite}]), Dups = 1, {Key, OtherKeys} = @@ -2430,28 +2060,21 @@ cache_sets(Config, DelayedWrite, Extra, Sz, Version) -> ok. %% Test the write cache for bags. -cache_bags_v8(Config) when is_list(Config) -> - cache_bags(Config, 8). - -%% Test the write cache for bags. -cache_bags_v9(Config) when is_list(Config) -> - cache_bags(Config, 9). - -cache_bags(Config, Version) -> +cache_bags(Config) when is_list(Config) -> Small = 2, - cache_bags(Config, {0,0}, false, Small, Version), - cache_bags(Config, {0,0}, true, Small, Version), - cache_bags(Config, {5000,5000}, false, Small, Version), - cache_bags(Config, {5000,5000}, true, Small, Version), + cache_bags(Config, {0,0}, false, Small), + cache_bags(Config, {0,0}, true, Small), + cache_bags(Config, {5000,5000}, false, Small), + cache_bags(Config, {5000,5000}, true, Small), %% Objects of size greater than 2 kB. Big = 1200, - cache_bags(Config, {0,0}, false, Big, Version), - cache_bags(Config, {0,0}, true, Big, Version), - cache_bags(Config, {5000,5000}, false, Big, Version), - cache_bags(Config, {5000,5000}, true, Big, Version), + cache_bags(Config, {0,0}, false, Big), + cache_bags(Config, {0,0}, true, Big), + cache_bags(Config, {5000,5000}, false, Big), + cache_bags(Config, {5000,5000}, true, Big), ok. -cache_bags(Config, DelayedWrite, Extra, Sz, Version) -> +cache_bags(Config, DelayedWrite, Extra, Sz) -> %% Extra = bool(). Insert tuples until the tested key is not alone. %% Sz = integer(). Size of the inserted tuples. @@ -2460,9 +2083,8 @@ cache_bags(Config, DelayedWrite, Extra, Sz, Version) -> file:delete(Fname), P0 = pps(), - {ok, _} = - dets:open_file(T,[{version, Version}, {file,Fname}, {type,bag}, - {delayed_write, DelayedWrite}]), + {ok, _} = dets:open_file(T,[{file,Fname}, {type,bag}, + {delayed_write, DelayedWrite}]), Dups = 1, {Key, OtherKeys} = @@ -2588,8 +2210,7 @@ cache_bags(Config, DelayedWrite, Extra, Sz, Version) -> R1 = {index_test,1,2,3,4}, R2 = {index_test,2,2,13,14}, R3 = {index_test,1,12,13,14}, - {ok, _} = dets:open_file(T,[{version,Version},{type,bag}, - {keypos,2},{file,Fname}]), + {ok, _} = dets:open_file(T,[{type,bag}, {keypos,2},{file,Fname}]), ok = dets:insert(T,R1), ok = dets:sync(T), ok = dets:insert(T,R2), @@ -2606,27 +2227,20 @@ cache_bags(Config, DelayedWrite, Extra, Sz, Version) -> ok. %% Test the write cache for duplicate bags. -cache_duplicate_bags_v8(Config) when is_list(Config) -> - cache_duplicate_bags(Config, 8). - -%% Test the write cache for duplicate bags. -cache_duplicate_bags_v9(Config) when is_list(Config) -> - cache_duplicate_bags(Config, 9). - -cache_duplicate_bags(Config, Version) -> +cache_duplicate_bags(Config) when is_list(Config) -> Small = 2, - cache_dup_bags(Config, {0,0}, false, Small, Version), - cache_dup_bags(Config, {0,0}, true, Small, Version), - cache_dup_bags(Config, {5000,5000}, false, Small, Version), - cache_dup_bags(Config, {5000,5000}, true, Small, Version), + cache_dup_bags(Config, {0,0}, false, Small), + cache_dup_bags(Config, {0,0}, true, Small), + cache_dup_bags(Config, {5000,5000}, false, Small), + cache_dup_bags(Config, {5000,5000}, true, Small), %% Objects of size greater than 2 kB. Big = 1200, - cache_dup_bags(Config, {0,0}, false, Big, Version), - cache_dup_bags(Config, {0,0}, true, Big, Version), - cache_dup_bags(Config, {5000,5000}, false, Big, Version), - cache_dup_bags(Config, {5000,5000}, true, Big, Version). + cache_dup_bags(Config, {0,0}, false, Big), + cache_dup_bags(Config, {0,0}, true, Big), + cache_dup_bags(Config, {5000,5000}, false, Big), + cache_dup_bags(Config, {5000,5000}, true, Big). -cache_dup_bags(Config, DelayedWrite, Extra, Sz, Version) -> +cache_dup_bags(Config, DelayedWrite, Extra, Sz) -> %% Extra = bool(). Insert tuples until the tested key is not alone. %% Sz = integer(). Size of the inserted tuples. @@ -2635,10 +2249,8 @@ cache_dup_bags(Config, DelayedWrite, Extra, Sz, Version) -> file:delete(Fname), P0 = pps(), - {ok, _} = - dets:open_file(T,[{version, Version}, {file,Fname}, - {type,duplicate_bag}, - {delayed_write, DelayedWrite}]), + {ok, _} = dets:open_file(T,[{file,Fname}, {type,duplicate_bag}, + {delayed_write, DelayedWrite}]), Dups = 2, {Key, OtherKeys} = @@ -2869,7 +2481,7 @@ otp_8899(Config) when is_list(Config) -> Server = self(), file:delete(FName), - {ok, _} = dets:open_file(Tab,[{file, FName},{version,9}]), + {ok, _} = dets:open_file(Tab,[{file, FName}]), [P1,P2,P3,P4] = new_clients(4, Tab), MC = [Tab], @@ -2895,7 +2507,7 @@ many_clients(Config) when is_list(Config) -> file:delete(FName), P0 = pps(), - {ok, _} = dets:open_file(Tab,[{file, FName},{version,9}]), + {ok, _} = dets:open_file(Tab,[{file, FName}]), [P1,P2,P3,P4] = new_clients(4, Tab), %% dets:init_table/2 is used for making sure that all processes @@ -2954,14 +2566,14 @@ many_clients(Config) when is_list(Config) -> file:delete(FName), %% Check that errors are handled correctly by the streaming operators. - {ok, _} = dets:open_file(Tab,[{file, FName},{version,9}]), + {ok, _} = dets:open_file(Tab,[{file, FName}]), ok = ins(Tab, 100), Obj = {66,{item,number,66}}, {ok, ObjPos} = dets:where(Tab, Obj), ok = dets:close(Tab), %% Damaged object. crash(FName, ObjPos+12), - {ok, _} = dets:open_file(Tab,[{file, FName},{version,9}]), + {ok, _} = dets:open_file(Tab,[{file, FName}]), BadObject1 = dets:lookup_keys(Tab, [65,66,67,68,69]), bad_object(BadObject1, FName), _Error = dets:close(Tab), @@ -3415,18 +3027,13 @@ repair_continuation(Config) -> %% OTP-5487. Growth of read-only table (again). otp_5487(Config) -> - otp_5487(Config, 9), - otp_5487(Config, 8), - ok. - -otp_5487(Config, Version) -> Tab = otp_5487, Fname = filename(otp_5487, Config), file:delete(Fname), Ets = ets:new(otp_5487, [public, set]), lists:foreach(fun(I) -> ets:insert(Ets, {I,I+1}) end, lists:seq(0,1000)), - {ok, _} = dets:open_file(Tab, [{file,Fname},{version,Version}]), + {ok, _} = dets:open_file(Tab, [{file,Fname}]), ok = dets:from_ets(Tab, Ets), ok = dets:sync(Tab), ok = dets:close(Tab), @@ -3470,14 +3077,12 @@ otp_6359(Config) -> %% OTP-4738. ==/2 and =:=/2. otp_4738(Config) -> - %% Version 8 has not been corrected. - %% (The constant -12857447 is for version 9 only.) - otp_4738_set(9, Config), - otp_4738_bag(9, Config), - otp_4738_dupbag(9, Config), + otp_4738_set(Config), + otp_4738_bag(Config), + otp_4738_dupbag(Config), ok. -otp_4738_dupbag(Version, Config) -> +otp_4738_dupbag(Config) -> Tab = otp_4738, File = filename(Tab, Config), file:delete(File), @@ -3485,7 +3090,7 @@ otp_4738_dupbag(Version, Config) -> F = float(I), One = 1, FOne = float(One), - Args = [{file,File},{type,duplicate_bag},{version,Version}], + Args = [{file,File},{type,duplicate_bag}], {ok, Tab} = dets:open_file(Tab, Args), ok = dets:insert(Tab, [{I,One},{F,One},{I,FOne},{F,FOne}]), ok = dets:sync(Tab), @@ -3530,7 +3135,7 @@ otp_4738_dupbag(Version, Config) -> file:delete(File), ok. -otp_4738_bag(Version, Config) -> +otp_4738_bag(Config) -> Tab = otp_4738, File = filename(Tab, Config), file:delete(File), @@ -3538,7 +3143,7 @@ otp_4738_bag(Version, Config) -> F = float(I), One = 1, FOne = float(One), - Args = [{file,File},{type,bag},{version,Version}], + Args = [{file,File},{type,bag}], {ok, Tab} = dets:open_file(Tab, Args), ok = dets:insert(Tab, [{I,One},{F,One},{I,FOne},{F,FOne}]), ok = dets:sync(Tab), @@ -3561,11 +3166,11 @@ otp_4738_bag(Version, Config) -> ok = dets:close(Tab), file:delete(File). -otp_4738_set(Version, Config) -> +otp_4738_set(Config) -> Tab = otp_4738, File = filename(Tab, Config), file:delete(File), - Args = [{file,File},{type,set},{version,Version}], + Args = [{file,File},{type,set}], %% I and F share the same slot. I = -12857447, @@ -3864,6 +3469,19 @@ wait_for_close(Tab) -> wait_for_close(Tab) end. +%% OTP-13830. Format 8 is no longer supported. +otp_13830(Config) -> + Tab = otp_13830, + File8 = filename:join(?datadir(Config), "version_8.dets"), + {error,{format_8_no_longer_supported,_}} = + dets:open_file(Tab, [{file, File8}]), + File = filename(Tab, Config), + %% Check the 'version' option, for backwards compatibility: + {ok, Tab} = dets:open_file(Tab, [{file, File}, {version, 9}]), + ok = dets:close(Tab), + {ok, Tab} = dets:open_file(Tab, [{file, File}, {version, default}]), + ok = dets:close(Tab). + %% %% Parts common to several test cases %% @@ -4000,9 +3618,7 @@ match_test(Data, Tab) -> %% Utilities %% -headsz(8) -> - ?HEADSZ_v8; -headsz(_) -> +headsz() -> ?HEADSZ_v9. unwritable(Fname) -> @@ -4030,13 +3646,13 @@ filename(Name, Config) when is_atom(Name) -> filename(Name, _Config) -> filename:join(?privdir(_Config), Name). -open_files(_Name, [], _Version) -> +open_files(_Name, []) -> []; -open_files(Name0, [Args | Tail], Version) -> +open_files(Name0, [Args | Tail]) -> ?format("init ~p~n", [Args]), Name = list_to_atom(integer_to_list(Name0)), - {ok, Name} = dets:open_file(Name, [{version,Version} | Args]), - [Name | open_files(Name0+1, Tail, Version)]. + {ok, Name} = dets:open_file(Name, Args), + [Name | open_files(Name0+1, Tail)]. close_all(Tabs) -> foreach(fun(Tab) -> ok = dets:close(Tab) end, Tabs). @@ -4137,20 +3753,15 @@ no_keys_test([T | Ts]) -> no_keys_test([]) -> ok; no_keys_test(T) -> - case dets:info(T, version) of - 8 -> - ok; - 9 -> - Kp = dets:info(T, keypos), - All = dets:match_object(T, '_'), - L = lists:map(fun(X) -> element(Kp, X) end, All), - NoKeys = length(lists:usort(L)), - case {dets:info(T, no_keys), NoKeys} of - {N, N} -> - ok; - {N1, N2} -> - exit({no_keys_test, N1, N2}) - end + Kp = dets:info(T, keypos), + All = dets:match_object(T, '_'), + L = lists:map(fun(X) -> element(Kp, X) end, All), + NoKeys = length(lists:usort(L)), + case {dets:info(T, no_keys), NoKeys} of + {N, N} -> + ok; + {N1, N2} -> + exit({no_keys_test, N1, N2}) end. safe_get_all_objects(Tab) -> @@ -4182,7 +3793,6 @@ count_objs_1({Ts,C}, N) when is_list(Ts) -> get_all_objects_fast(Tab) -> dets:match_object(Tab, '_'). -%% Relevant for version 8. histogram(Tab) -> OnePercent = case dets:info(Tab, no_slots) of undefined -> undefined; @@ -4244,10 +3854,6 @@ ave_histogram([{S,N1} | H], N) -> ave_histogram([], N) -> N. -bad_object({error,{bad_object,FileName}}, FileName) -> - ok; % Version 8, no debug. -bad_object({error,{{bad_object,_,_},FileName}}, FileName) -> - ok; % Version 8, debug... bad_object({error,{{bad_object,_}, FileName}}, FileName) -> ok; % No debug. bad_object({error,{{{bad_object,_,_},_,_,_}, FileName}}, FileName) -> diff --git a/lib/stdlib/test/dets_SUITE_data/dets_test_v8b.dets b/lib/stdlib/test/dets_SUITE_data/dets_test_v8b.dets Binary files differdeleted file mode 100644 index d0aa20fe06..0000000000 --- a/lib/stdlib/test/dets_SUITE_data/dets_test_v8b.dets +++ /dev/null diff --git a/lib/stdlib/test/dets_SUITE_data/dets_test_v8b_little_endian.dets b/lib/stdlib/test/dets_SUITE_data/dets_test_v8b_little_endian.dets Binary files differdeleted file mode 100644 index bf490afa1a..0000000000 --- a/lib/stdlib/test/dets_SUITE_data/dets_test_v8b_little_endian.dets +++ /dev/null diff --git a/lib/stdlib/test/dets_SUITE_data/version_r2d.dets b/lib/stdlib/test/dets_SUITE_data/version_8.dets Binary files differindex 327072f99e..278187e85c 100644 --- a/lib/stdlib/test/dets_SUITE_data/version_r2d.dets +++ b/lib/stdlib/test/dets_SUITE_data/version_8.dets diff --git a/lib/stdlib/test/dets_SUITE_data/version_r3b02.dets b/lib/stdlib/test/dets_SUITE_data/version_r3b02.dets Binary files differdeleted file mode 100644 index 058cd15b31..0000000000 --- a/lib/stdlib/test/dets_SUITE_data/version_r3b02.dets +++ /dev/null diff --git a/lib/stdlib/test/error_logger_h_SUITE.erl b/lib/stdlib/test/error_logger_h_SUITE.erl index 2a34c7764f..30f96e0522 100644 --- a/lib/stdlib/test/error_logger_h_SUITE.erl +++ b/lib/stdlib/test/error_logger_h_SUITE.erl @@ -297,13 +297,13 @@ match_format(Tag, [Format,Args], [Head|Lines], AtNode, Depth) -> iolist_to_binary(S) end, Expected0 = binary:split(Bin, <<"\n">>, [global,trim]), - Expected = Expected0 ++ AtNode, + Expected = AtNode ++ Expected0, match_term_lines(Expected, Lines). match_term(Tag, [Arg], [Head|Lines], AtNode, Depth) -> match_head(Tag, Head), Expected0 = match_term_get_expected(Arg, Depth), - Expected = Expected0 ++ AtNode, + Expected = AtNode ++ Expected0, match_term_lines(Expected, Lines). match_term_get_expected(List, Depth) when is_list(List) -> diff --git a/lib/stdlib/test/ets_SUITE.erl b/lib/stdlib/test/ets_SUITE.erl index 9a14c7014c..f68d5eca3f 100644 --- a/lib/stdlib/test/ets_SUITE.erl +++ b/lib/stdlib/test/ets_SUITE.erl @@ -19,7 +19,7 @@ %% -module(ets_SUITE). --export([all/0, suite/0,groups/0,init_per_suite/1, end_per_suite/1, +-export([all/0, suite/0,groups/0,init_per_suite/1, end_per_suite/1, init_per_group/2,end_per_group/2]). -export([default/1,setbag/1,badnew/1,verybadnew/1,named/1,keypos2/1, privacy/1,privacy_owner/2]). @@ -31,15 +31,14 @@ -export([match_delete3/1]). -export([firstnext/1,firstnext_concurrent/1]). -export([slot/1]). --export([ match1/1, match2/1, match_object/1, match_object2/1]). --export([ dups/1, misc1/1, safe_fixtable/1, info/1, tab2list/1]). --export([ tab2file/1, tab2file2/1, tabfile_ext1/1, - tabfile_ext2/1, tabfile_ext3/1, tabfile_ext4/1, badfile/1]). --export([ heavy_lookup/1, heavy_lookup_element/1, heavy_concurrent/1]). --export([ lookup_element_mult/1]). --export([]). +-export([match1/1, match2/1, match_object/1, match_object2/1]). +-export([dups/1, misc1/1, safe_fixtable/1, info/1, tab2list/1]). +-export([tab2file/1, tab2file2/1, tabfile_ext1/1, + tabfile_ext2/1, tabfile_ext3/1, tabfile_ext4/1, badfile/1]). +-export([heavy_lookup/1, heavy_lookup_element/1, heavy_concurrent/1]). +-export([lookup_element_mult/1]). -export([foldl_ordered/1, foldr_ordered/1, foldl/1, foldr/1, fold_empty/1]). --export([t_delete_object/1, t_init_table/1, t_whitebox/1, +-export([t_delete_object/1, t_init_table/1, t_whitebox/1, t_delete_all_objects/1, t_insert_list/1, t_test_ms/1, t_select_delete/1,t_ets_dets/1]). @@ -61,8 +60,7 @@ -export([otp_7665/1]). -export([meta_wb/1]). -export([grow_shrink/1, grow_pseudo_deleted/1, shrink_pseudo_deleted/1]). --export([ - meta_lookup_unnamed_read/1, meta_lookup_unnamed_write/1, +-export([meta_lookup_unnamed_read/1, meta_lookup_unnamed_write/1, meta_lookup_named_read/1, meta_lookup_named_write/1, meta_newdel_unnamed/1, meta_newdel_named/1]). -export([smp_insert/1, smp_fixed_delete/1, smp_unfix_fix/1, smp_select_delete/1, @@ -95,7 +93,7 @@ rename_do/1, rename_unnamed_do/1, interface_equality_do/1, ordered_match_do/1, ordered_do/1, privacy_do/1, empty_do/1, badinsert_do/1, time_lookup_do/1, lookup_order_do/1, lookup_element_mult_do/1, delete_tab_do/1, delete_elem_do/1, - match_delete_do/1, match_delete3_do/1, firstnext_do/1, + match_delete_do/1, match_delete3_do/1, firstnext_do/1, slot_do/1, match1_do/1, match2_do/1, match_object_do/1, match_object2_do/1, misc1_do/1, safe_fixtable_do/1, info_do/1, dups_do/1, heavy_lookup_do/1, heavy_lookup_element_do/1, member_do/1, otp_5340_do/1, otp_7665_do/1, meta_wb_do/1, @@ -129,7 +127,7 @@ suite() -> [{ct_hooks,[ts_install_cth]}, {timetrap,{minutes,5}}]. -all() -> +all() -> [{group, new}, {group, insert}, {group, lookup}, {group, delete}, firstnext, firstnext_concurrent, slot, {group, match}, t_match_spec_run, @@ -161,7 +159,7 @@ all() -> memory_check_summary]. % MUST BE LAST -groups() -> +groups() -> [{new, [], [default, setbag, badnew, verybadnew, named, keypos2, privacy]}, @@ -249,6 +247,7 @@ t_bucket_disappears_do(Opts) -> %% Check ets:match_spec_run/2. t_match_spec_run(Config) when is_list(Config) -> + ct:timetrap({minutes,30}), %% valgrind needs a lot init_externals(), EtsMem = etsmem(), @@ -703,7 +702,7 @@ adjust_xmem([_T1,_T2,_T3,_T4], {A0,B0,C0,D0} = _Mem0, EstCnt) -> {A0+TabSz, B0+HTabSz, C0+HTabSz, D0+HTabSz}. %% Misc. whitebox tests -t_whitebox(Config) when is_list(Config) -> +t_whitebox(Config) when is_list(Config) -> EtsMem = etsmem(), repeat_for_opts(whitebox_1), repeat_for_opts(whitebox_1), @@ -1044,6 +1043,7 @@ do_reverse_chunked({L,C},Acc) -> %% Test the ets:select_delete/2 and ets:select_count/2 BIFs. t_select_delete(Config) when is_list(Config) -> + ct:timetrap({minutes,30}), %% valgrind needs a lot EtsMem = etsmem(), Tables = fill_sets_int(10000) ++ fill_sets_int(10000,[{write_concurrency,true}]), lists:foreach @@ -1489,15 +1489,15 @@ update_element(Config) when is_list(Config) -> verify_etsmem(EtsMem). update_element_opts(Opts) -> - TupleCases = [{{key,val}, 1 ,2}, - {{val,key}, 2, 1}, - {{key,val}, 1 ,[2]}, + TupleCases = [{{key,val}, 1 ,2}, + {{val,key}, 2, 1}, + {{key,val}, 1 ,[2]}, {{key,val,val}, 1, [2,3]}, {{val,key,val,val}, 2, [3,4,1]}, {{val,val,key,val}, 3, [1,4,1,2]}, % update pos1 twice {{val,val,val,key}, 4, [2,1,2,3]}],% update pos2 twice - lists:foreach(fun({Tuple,KeyPos,UpdPos}) -> update_element_opts(Tuple,KeyPos,UpdPos,Opts) end, + lists:foreach(fun({Tuple,KeyPos,UpdPos}) -> update_element_opts(Tuple,KeyPos,UpdPos,Opts) end, TupleCases), update_element_neg(Opts). @@ -1513,9 +1513,9 @@ update_element_opts(Tuple,KeyPos,UpdPos,Opts) -> true = ets:delete(OrdSet), ok. -update_element(T,Tuple,KeyPos,UpdPos) -> +update_element(T,Tuple,KeyPos,UpdPos) -> KeyList = [17,"seventeen",<<"seventeen">>,{17},list_to_binary(lists:seq(1,100)),make_ref(), self()], - lists:foreach(fun(Key) -> + lists:foreach(fun(Key) -> TupleWithKey = setelement(KeyPos,Tuple,Key), update_element_do(T,TupleWithKey,Key,UpdPos) end, @@ -1550,29 +1550,29 @@ update_element_do(Tab,Tuple,Key,UpdPos) -> {Pos, element(ToIx+1,Values)} % single {pos,value} arg end, - UpdateF = fun(ToIx,Rand) -> - PosValArg = PosValArgF(ToIx,[],UpdPos,Rand,PosValArgF), - %%io:format("update_element(~p)~n",[PosValArg]), - ArgHash = erlang:phash2({Tab,Key,PosValArg}), - true = ets:update_element(Tab, Key, PosValArg), - ArgHash = erlang:phash2({Tab,Key,PosValArg}), - NewTuple = update_tuple(PosValArg,Tuple), - [NewTuple] = ets:lookup(Tab,Key) + UpdateF = fun(ToIx,Rand) -> + PosValArg = PosValArgF(ToIx,[],UpdPos,Rand,PosValArgF), + %%io:format("update_element(~p)~n",[PosValArg]), + ArgHash = erlang:phash2({Tab,Key,PosValArg}), + true = ets:update_element(Tab, Key, PosValArg), + ArgHash = erlang:phash2({Tab,Key,PosValArg}), + NewTuple = update_tuple(PosValArg,Tuple), + [NewTuple] = ets:lookup(Tab,Key) end, - LoopF = fun(_FromIx, Incr, _Times, Checksum, _MeF) when Incr >= Length -> + LoopF = fun(_FromIx, Incr, _Times, Checksum, _MeF) when Incr >= Length -> Checksum; % done - (FromIx, Incr, 0, Checksum, MeF) -> + (FromIx, Incr, 0, Checksum, MeF) -> MeF(FromIx, Incr+1, Length, Checksum, MeF); - (FromIx, Incr, Times, Checksum, MeF) -> + (FromIx, Incr, Times, Checksum, MeF) -> ToIx = (FromIx + Incr) rem Length, UpdateF(ToIx,Checksum), - if + if Incr =:= 0 -> UpdateF(ToIx,Checksum); % extra update to same value true -> true - end, + end, MeF(ToIx, Incr, Times-1, Checksum+ToIx+1, MeF) end, @@ -1616,7 +1616,7 @@ update_element_neg_do(T) -> Object = {key, 0, "Hej"}, true = ets:insert(T,Object), - UpdateF = fun(Arg3) -> + UpdateF = fun(Arg3) -> ArgHash = erlang:phash2({T,key,Arg3}), {'EXIT',{badarg,_}} = (catch ets:update_element(T,key,Arg3)), ArgHash = erlang:phash2({T,key,Arg3}), @@ -1691,7 +1691,7 @@ update_counter_for(T) -> true = ets:lookup(T, b) =:= [setelement(1, NewObj, b)], ets:delete(T, b), Myself(NewObj,Times-1,Arg3,Myself) - end, + end, LoopF = fun(Obj, Times, Arg3) -> %%io:format("Loop start:\nObj = ~p\nArg3=~p\n",[Obj,Arg3]), @@ -1800,7 +1800,7 @@ uc_mimic(Obj, [Pits|Tail], Acc) -> uc_adder(Init, {_Pos, Add}) -> Init + Add; -uc_adder(Init, {_Pos, Add, Thres, Warp}) -> +uc_adder(Init, {_Pos, Add, Thres, Warp}) -> case Init + Add of X when X > Thres, Add > 0 -> Warp; @@ -1832,7 +1832,7 @@ update_counter_neg_for(T) -> Object = {key,0,false,1}, true = ets:insert(T,Object), - UpdateF = fun(Arg3) -> + UpdateF = fun(Arg3) -> ArgHash = erlang:phash2({T,key,Arg3}), {'EXIT',{badarg,_}} = (catch ets:update_counter(T,key,Arg3)), ArgHash = erlang:phash2({T,key,Arg3}), @@ -1972,15 +1972,16 @@ fixtable_next_do(Opts) -> verify_etsmem(EtsMem). do_fixtable_next(Tab) -> - F = fun(X,T,FF) -> case X of - 0 -> true; - _ -> - ets:insert(T, {X, - integer_to_list(X), - X rem 10}), - FF(X-1,T,FF) - end - end, + F = fun(X,T,FF) -> + case X of + 0 -> true; + _ -> + ets:insert(T, {X, + integer_to_list(X), + X rem 10}), + FF(X-1,T,FF) + end + end, F(100,Tab,F), ets:safe_fixtable(Tab,true), First = ets:first(Tab), @@ -1995,7 +1996,7 @@ do_fixtable_next(Tab) -> %% Check inserts of deleted keys in fixed bags. fixtable_insert(Config) when is_list(Config) -> - Combos = [[Type,{write_concurrency,WC}] || Type<- [bag,duplicate_bag], + Combos = [[Type,{write_concurrency,WC}] || Type<- [bag,duplicate_bag], WC <- [false,true]], lists:foreach(fun(Opts) -> fixtable_insert_do(Opts) end, Combos), @@ -2111,7 +2112,7 @@ heir_do(Opts) -> %% Different types of heir data and link/monitor relations TestFun = fun(Arg) -> {EtsMem,Arg} end, - Combos = [{Data,Mode} || Data<-[foo_data, <<"binary">>, + Combos = [{Data,Mode} || Data<-[foo_data, <<"binary">>, lists:seq(1,10), {17,TestFun,self()}, "The busy heir"], Mode<-[none,link,monitor]], @@ -2151,7 +2152,7 @@ heir_do(Opts) -> Founder4 ! {go, Heir4}, {'DOWN', MrefH4, process, Heir4, normal} = receive_any(), erts_debug:set_internal_state(next_pid, NextPidIx), - DoppelGanger = spawn_monitor_with_pid(Heir4, + DoppelGanger = spawn_monitor_with_pid(Heir4, fun()-> die_please = receive_any() end), Founder4 ! die_please, {'DOWN', MrefF4, process, Founder4, normal} = receive_any(), @@ -2164,12 +2165,12 @@ heir_do(Opts) -> failed -> io:format("Failed to spawn process with pid ~p\n", [Heir4]), true % try again - end + end end), verify_etsmem(EtsMem). -heir_founder(Master, HeirData, Opts) -> +heir_founder(Master, HeirData, Opts) -> {go,Heir} = receive_any(), HeirTpl = case Heir of none -> {heir,none}; @@ -2242,7 +2243,7 @@ heir_1(HeirData,Mode,Opts) -> {'DOWN', Mref, process, Heir, normal} = receive_any(). %% Test ets:give_way/3. -give_away(Config) when is_list(Config) -> +give_away(Config) when is_list(Config) -> repeat_for_opts(give_away_do). give_away_do(Opts) -> @@ -2381,7 +2382,7 @@ bad_table(Config) when is_list(Config) -> ok. bad_table_do(Opts, DummyFile) -> - Parent = self(), + Parent = self(), {Pid,Mref} = my_spawn_opt(fun()-> ets_new(priv,[private,named_table | Opts]), Priv = ets_new(priv,[private | Opts]), ets_new(prot,[protected,named_table | Opts]), @@ -2436,7 +2437,7 @@ bad_table_do(Opts, DummyFile) -> ], Info = {Opts, Priv, Prot}, lists:foreach(fun(Op) -> bad_table_op(Info, Op) end, - OpList), + OpList), Pid ! die_please, {'DOWN', Mref, process, Pid, normal} = receive_any(), ok. @@ -2571,14 +2572,14 @@ interface_equality_do(Opts) -> Set = ets_new(set,[set | Opts]), OrderedSet = ets_new(ordered_set,[ordered_set | Opts]), F = fun(X,T,FF) -> case X of - 0 -> true; - _ -> - ets:insert(T, {X, - integer_to_list(X), - X rem 10}), - FF(X-1,T,FF) - end - end, + 0 -> true; + _ -> + ets:insert(T, {X, + integer_to_list(X), + X rem 10}), + FF(X-1,T,FF) + end + end, F(100,Set,F), F(100,OrderedSet,F), equal_results(ets, insert, Set, OrderedSet, [{a,"a"}]), @@ -2647,20 +2648,20 @@ ordered_match_do(Opts) -> F(3000,T1,F), [[3,3],[3,3],[3,3]] = ets:match(T1, {'_','_','$1','$2',3}), F2 = fun(X,Rem,Res,FF) -> case X of - 0 -> []; - _ -> + 0 -> []; + _ -> case X rem Rem of Res -> FF(X-1,Rem,Res,FF) ++ [{X, - integer_to_list(X), + integer_to_list(X), X rem 10, X rem 100, X rem 1000}]; _ -> FF(X-1,Rem,Res,FF) end - end + end end, OL1 = F2(3000,100,2,F2), OL1 = ets:match_object(T1, {'_','_','_',2,'_'}), @@ -2738,7 +2739,7 @@ pick_all_backwards(T) -> %% Small test case for both set and bag type ets tables. -setbag(Config) when is_list(Config) -> +setbag(Config) when is_list(Config) -> EtsMem = etsmem(), Set = ets_new(set,[set]), Bag = ets_new(bag,[bag]), @@ -2815,7 +2816,7 @@ privacy_do(Opts) -> privacy_check(pub,prot,priv), - Owner ! {shift,1,{pub,prot,priv}}, + Owner ! {shift,1,{pub,prot,priv}}, receive {Pub1,Prot1,Priv1} -> ok = privacy_check(Pub1,Prot1,Priv1), @@ -2954,7 +2955,7 @@ badlookup(Config) when is_list(Config) -> verify_etsmem(EtsMem). %% Test that lookup returns objects in order of insertion for bag and dbag. -lookup_order(Config) when is_list(Config) -> +lookup_order(Config) when is_list(Config) -> EtsMem = etsmem(), repeat_for_opts(lookup_order_do, [write_concurrency,[bag,duplicate_bag]]), verify_etsmem(EtsMem), @@ -2976,7 +2977,7 @@ lookup_order_2(Opts, Fixed) -> case Fixed of true -> ets:safe_fixtable(T,true); false -> ok - end, + end, S10 = {T,[],key}, S20 = check_insert(S10,A), S30 = check_insert(S20,B), @@ -2988,7 +2989,7 @@ lookup_order_2(Opts, Fixed) -> S80 = check_delete(S70,D2b), S90 = check_insert(S80,D2a), SA0 = check_delete(S90,D3a), - SB0 = check_delete(SA0,D3b), + SB0 = check_delete(SA0,D3b), check_insert_new(SB0,D3b), true = ets:delete(T) @@ -3001,7 +3002,7 @@ check_insert({T,List0,Key},Val) -> ets:insert(T,{Key,Val}), List1 = case (ets:info(T,type) =:= bag andalso lists:member({Key,Val},List0)) of - true -> List0; + true -> List0; false -> [{Key,Val} | List0] end, check_check({T,List1,Key}). @@ -3034,8 +3035,6 @@ check_check(S={T,List,Key}) -> Items = length(List), S. - - fill_tab(Tab,Val) -> ets:insert(Tab,{key,Val}), ets:insert(Tab,{{a,144},Val}), @@ -3063,13 +3062,11 @@ lookup_element_mult_do(Opts) -> verify_etsmem(EtsMem). lem_data() -> - [ - {service,'eddie2@boromir',{150,236,14,103},httpd88,self()}, + [{service,'eddie2@boromir',{150,236,14,103},httpd88,self()}, {service,'eddie2@boromir',{150,236,14,103},httpd80,self()}, {service,'eddie3@boromir',{150,236,14,107},httpd88,self()}, {service,'eddie3@boromir',{150,236,14,107},httpd80,self()}, - {service,'eddie4@boromir',{150,236,14,108},httpd88,self()} - ]. + {service,'eddie4@boromir',{150,236,14,108},httpd88,self()}]. lem_crash(T) -> L = ets:lookup_element(T, 'eddie2@boromir', 3), @@ -3120,6 +3117,7 @@ delete_tab_do(Opts) -> %% Check that ets:delete/1 works and that other processes can run. delete_large_tab(Config) when is_list(Config) -> + ct:timetrap({minutes,30}), %% valgrind needs a lot Data = [{erlang:phash2(I, 16#ffffff),I} || I <- lists:seq(1, 200000)], EtsMem = etsmem(), repeat_for_opts(fun(Opts) -> delete_large_tab_do(Opts,Data) end), @@ -3142,7 +3140,7 @@ delete_large_tab_1(Name, Flags, Data, Fix) -> lists:foreach(fun({K,_}) -> ets:delete(Tab, K) end, Data) end, - {priority, Prio} = process_info(self(), priority), + {priority, Prio} = process_info(self(), priority), Deleter = self(), [SchedTracer] = start_loopers(1, @@ -3189,7 +3187,7 @@ delete_large_tab_1(Name, Flags, Data, Fix) -> %% Delete a large name table and try to create a new table with %% the same name in another process. -delete_large_named_table(Config) when is_list(Config) -> +delete_large_named_table(Config) when is_list(Config) -> Data = [{erlang:phash2(I, 16#ffffff),I} || I <- lists:seq(1, 200000)], EtsMem = etsmem(), repeat_for_opts(fun(Opts) -> delete_large_named_table_do(Opts,Data) end), @@ -3562,7 +3560,7 @@ dyn_lookup(T) -> dyn_lookup(T, ets:first(T)). dyn_lookup(_T, '$end_of_table') -> []; dyn_lookup(T, K) -> - NextKey=ets:next(T,K), + NextKey = ets:next(T,K), case ets:next(T,K) of NextKey -> dyn_lookup(T, NextKey); @@ -4081,9 +4079,9 @@ tabfile_ext2_do(Opts,Config) -> Name = make_ref(), [ets:insert(T,{X,integer_to_list(X)}) || X <- L], ok = ets:tab2file(T,FName,[{extended_info,[md5sum]}]), - true = lists:sort(ets:tab2list(T)) =:= + true = lists:sort(ets:tab2list(T)) =:= lists:sort(ets:tab2list(element(2,ets:file2tab(FName)))), - true = lists:sort(ets:tab2list(T)) =:= + true = lists:sort(ets:tab2list(T)) =:= lists:sort(ets:tab2list( element(2,ets:file2tab(FName,[{verify,true}])))), {ok, Name} = disk_log:open([{name,Name},{file,FName}]), @@ -4098,9 +4096,9 @@ tabfile_ext2_do(Opts,Config) -> ets:tab2list( element(2,ets:file2tab(FName2)))), {error,checksum_error} = ets:file2tab(FName2,[{verify,true}]), - {value,{extended_info,[md5sum]}} = + {value,{extended_info,[md5sum]}} = lists:keysearch(extended_info,1,element(2,ets:tabfile_info(FName2))), - {value,{extended_info,[md5sum]}} = + {value,{extended_info,[md5sum]}} = lists:keysearch(extended_info,1,element(2,ets:tabfile_info(FName))), file:delete(FName), file:delete(FName2), @@ -4145,15 +4143,14 @@ tabfile_ext4(Config) when is_list(Config) -> Name2 = make_ref(), [ets:insert(TL,{X,integer_to_list(X)}) || X <- LL], ok = ets:tab2file(TL,FName,[{extended_info,[md5sum]}]), - {ok, Name2} = disk_log:open([{name, Name2}, {file, FName}, + {ok, Name2} = disk_log:open([{name, Name2}, {file, FName}, {mode, read_only}]), {C,[_|_]} = disk_log:chunk(Name2,start), {_,[_|_]} = disk_log:chunk(Name2,C), disk_log:close(Name2), - true = lists:sort(ets:tab2list(TL)) =:= + true = lists:sort(ets:tab2list(TL)) =:= lists:sort(ets:tab2list(element(2,ets:file2tab(FName)))), - Res = [ - begin + Res = [begin {ok,FD} = file:open(FName,[binary,read,write]), {ok, Bin} = file:pread(FD,0,1000), <<B1:N/binary,Ch:8,B2/binary>> = Bin, @@ -4163,7 +4160,7 @@ tabfile_ext4(Config) when is_list(Config) -> ok = file:close(FD), X = case ets:file2tab(FName) of {ok,TL2} -> - true = lists:sort(ets:tab2list(TL)) =/= + true = lists:sort(ets:tab2list(TL)) =/= lists:sort(ets:tab2list(TL2)); _ -> totally_broken @@ -4171,7 +4168,7 @@ tabfile_ext4(Config) when is_list(Config) -> {error,Y} = ets:file2tab(FName,[{verify,true}]), ets:tab2file(TL,FName,[{extended_info,[md5sum]}]), {X,Y} - end || N <- lists:seq(500,600) ], + end || N <- lists:seq(500,600)], io:format("~p~n",[Res]), file:delete(FName), ok. @@ -4402,16 +4399,14 @@ member_do(Opts) -> build_table(L1,L2,Num) -> - T = ets_new(xxx, [ordered_set] - ), + T = ets_new(xxx, [ordered_set]), lists:foreach( fun(X1) -> lists:foreach( fun(X2) -> F = fun(FF,N) -> - ets:insert(T,{{X1,X2,N}, - X1, X2, N}), - case N of + ets:insert(T,{{X1,X2,N}, X1, X2, N}), + case N of 0 -> ok; _ -> @@ -4424,16 +4419,14 @@ build_table(L1,L2,Num) -> T. build_table2(L1,L2,Num) -> - T = ets_new(xxx, [ordered_set] - ), + T = ets_new(xxx, [ordered_set]), lists:foreach( fun(X1) -> lists:foreach( fun(X2) -> F = fun(FF,N) -> - ets:insert(T,{{N,X1,X2}, - N, X1, X2}), - case N of + ets:insert(T,{{N,X1,X2}, N, X1, X2}), + case N of 0 -> ok; _ -> @@ -4724,7 +4717,7 @@ del_one_by_one_dbag_3(T,From,To) -> N = (ets:info(T,size) + 1), Obj2 = {From, integer_to_list(From)}, ets:delete_object(T,Obj2), - N = (ets:info(T,size) + 2) + N = (ets:info(T,size) + 2) end, Next = if From < To -> @@ -4771,14 +4764,14 @@ gen_dets_filename(Config,N) -> filename:join(proplists:get_value(priv_dir,Config), "testdets_" ++ integer_to_list(N) ++ ".dets"). -otp_6842_select_1000(Config) when is_list(Config) -> +otp_6842_select_1000(Config) when is_list(Config) -> Tab = ets_new(xxx,[ordered_set]), [ets:insert(Tab,{X,X}) || X <- lists:seq(1,10000)], AllTrue = lists:duplicate(10,true), AllTrue = [ length( element(1, - ets:select(Tab,[{'_',[],['$_']}],X*1000))) =:= + ets:select(Tab,[{'_',[],['$_']}],X*1000))) =:= X*1000 || X <- lists:seq(1,10) ], Sequences = [[1000,1000,1000,1000,1000,1000,1000,1000,1000,1000], [2000,2000,2000,2000,2000], @@ -4804,7 +4797,13 @@ check_seq(A,B,C) -> false. otp_6338(Config) when is_list(Config) -> - L = binary_to_term(<<131,108,0,0,0,2,104,2,108,0,0,0,2,103,100,0,19,112,112,98,49,95,98,115,49,50,64,98,108,97,100,101,95,48,95,53,0,0,33,50,0,0,0,4,1,98,0,0,23,226,106,100,0,4,101,120,105,116,104,2,108,0,0,0,2,104,2,100,0,3,115,98,109,100,0,19,112,112,98,50,95,98,115,49,50,64,98,108,97,100,101,95,48,95,56,98,0,0,18,231,106,100,0,4,114,101,99,118,106>>), + L = binary_to_term(<<131,108,0,0,0,2,104,2,108,0,0,0,2,103,100,0,19,112,112, + 98,49,95,98,115,49,50,64,98,108,97,100,101,95,48,95,53, + 0,0,33,50,0,0,0,4,1,98,0,0,23,226,106,100,0,4,101,120, + 105,116,104,2,108,0,0,0,2,104,2,100,0,3,115,98,109,100, + 0,19,112,112,98,50,95,98,115,49,50,64,98,108,97,100, + 101,95,48,95,56,98,0,0,18,231,106,100,0,4,114,101,99, + 118,106>>), T = ets_new(xxx,[ordered_set]), lists:foreach(fun(X) -> ets:insert(T,X) end,L), [[4839,recv]] = ets:match(T,{[{sbm,ppb2_bs12@blade_0_8},'$1'],'$2'}), @@ -4823,7 +4822,7 @@ otp_5340_do(Opts) -> ets:delete(T). w(_,0, _) -> ok; -w(T,N, Id) -> +w(T,N, Id) -> ets:insert(T, {N, Id}), w(T,N-1,Id). @@ -4913,7 +4912,7 @@ meta_wb_new(Name, _, Tabs, Opts) -> case (catch ets_new(Name,[named_table|Opts])) of Name -> false = lists:member(Name, Tabs), - [Name | Tabs]; + [Name | Tabs]; {'EXIT',{badarg,_}} -> true = lists:member(Name, Tabs), Tabs @@ -5088,7 +5087,7 @@ meta_lookup_unnamed_read(Config) when is_list(Config) -> Tab end, ExecF = fun(Tab) -> [{key,data}] = ets:lookup(Tab,key), - Tab + Tab end, FiniF = fun(Tab) -> true = ets:delete(Tab) end, @@ -5112,7 +5111,7 @@ meta_lookup_named_read(Config) when is_list(Config) -> Tab end, ExecF = fun(Tab) -> [{key,data}] = ets:lookup(Tab,key), - Tab + Tab end, FiniF = fun(Tab) -> true = ets:delete(Tab) end, @@ -5171,9 +5170,9 @@ smp_fixed_delete_do() -> ets:safe_fixtable(T,true), Buckets = num_of_buckets(T), InitF = fun([ProcN,NumOfProcs|_]) -> {ProcN,NumOfProcs} end, - ExecF = fun({Key,_}) when Key > NumOfObjs -> + ExecF = fun({Key,_}) when Key > NumOfObjs -> [end_of_work]; - ({Key,Increment}) -> + ({Key,Increment}) -> true = ets:delete(T,Key), {Key+Increment,Increment} end, @@ -5202,7 +5201,7 @@ smp_unfix_fix_do() -> T = ets_new(foo,[public,{write_concurrency,true}]), %%Mem = ets:info(T,memory), NumOfObjs = 100000, - Deleted = 50000, + Deleted = 50000, filltabint(T,NumOfObjs), ets:safe_fixtable(T,true), Buckets = num_of_buckets(T), @@ -5215,7 +5214,7 @@ smp_unfix_fix_do() -> true = ets:info(T,fixed), Deleted = get_kept_objects(T), - {Child, Mref} = + {Child, Mref} = my_spawn_opt( fun()-> true = ets:info(T,fixed), @@ -5274,22 +5273,19 @@ otp_8166_do(WC) -> NumOfObjs = 3000, %% Need more than 1000 live objects for match_object to trap one time Deleted = NumOfObjs div 2, filltabint(T,NumOfObjs), - {ReaderPid, ReaderMref} = - my_spawn_opt(fun()-> otp_8166_reader(T,NumOfObjs) end, - [link, monitor, {scheduler,2}]), - {ZombieCrPid, ZombieCrMref} = - my_spawn_opt(fun()-> otp_8166_zombie_creator(T,Deleted) end, - [link, monitor, {scheduler,3}]), + {ReaderPid, ReaderMref} = my_spawn_opt(fun()-> otp_8166_reader(T,NumOfObjs) end, + [link, monitor, {scheduler,2}]), + {ZombieCrPid, ZombieCrMref} = my_spawn_opt(fun()-> otp_8166_zombie_creator(T,Deleted) end, + [link, monitor, {scheduler,3}]), repeat(fun() -> ZombieCrPid ! {loop, self()}, zombies_created = receive_any(), otp_8166_trapper(T, 10, ZombieCrPid) - end, - 100), + end, 100), ReaderPid ! quit, {'DOWN', ReaderMref, process, ReaderPid, normal} = receive_any(), - ZombieCrPid ! quit, + ZombieCrPid ! quit, {'DOWN', ZombieCrMref, process, ZombieCrPid, normal} = receive_any(), false = ets:info(T,fixed), 0 = get_kept_objects(T), @@ -5299,7 +5295,7 @@ otp_8166_do(WC) -> %% Keep reading the table otp_8166_reader(T, NumOfObjs) -> - repeat_while(fun(0) -> + repeat_while(fun(0) -> receive quit -> {false,done} after 0 -> {true,NumOfObjs} end; @@ -5313,14 +5309,14 @@ otp_8166_reader(T, NumOfObjs) -> otp_8166_trapper(T, Try, ZombieCrPid) -> [] = ets:match_object(T,{'_',"Pink Unicorn"}), case {ets:info(T,fixed),Try} of - {true,1} -> + {true,1} -> io:format("failed to provoke unsafe unfix, give up...\n",[]), ZombieCrPid ! unfix; - {true,_} -> + {true,_} -> io:format("trapper too fast, trying again...\n",[]), otp_8166_trapper(T, Try-1, ZombieCrPid); {false,_} -> done - end. + end. %% Fixate table and create some pseudo-deleted objects (zombies) @@ -5340,7 +5336,7 @@ otp_8166_zombie_creator(T,Deleted) -> repeat_while(fun() -> case ets:info(T,safe_fixed_monotonic_time) of {_,[_P1,_P2]} -> false; - _ -> + _ -> receive unfix -> false after 0 -> true end @@ -5397,7 +5393,7 @@ smp_select_delete(Config) when is_list(Config) -> Mod = 17, Zeros = erlang:make_tuple(Mod,0), InitF = fun(_) -> Zeros end, - ExecF = fun(Diffs0) -> + ExecF = fun(Diffs0) -> case rand:uniform(20) of 1 -> Mod = 17, @@ -5419,7 +5415,7 @@ smp_select_delete(Config) when is_list(Config) -> Diffs1; false -> Diffs0 end - end + end end, FiniF = fun(Result) -> Result end, Results = run_workers_do(InitF,ExecF,FiniF,20000), @@ -5430,7 +5426,7 @@ smp_select_delete(Config) when is_list(Config) -> 0, TotCnts), io:format("LeftInTab = ~p\n",[LeftInTab]), LeftInTab = ets:info(T,size), - lists:foldl(fun(Cnt,Eq) -> + lists:foldl(fun(Cnt,Eq) -> WasCnt = ets:select_count(T, [{{'_', '$1'}, [{'=:=', {'rem', '$1', Mod}, Eq}], @@ -5438,7 +5434,7 @@ smp_select_delete(Config) when is_list(Config) -> io:format("~p: ~p =?= ~p\n",[Eq,Cnt,WasCnt]), Cnt = WasCnt, Eq+1 - end, + end, 0, TotCnts), %% May fail as select_delete does not shrink table (enough) %%verify_table_load(T), @@ -5477,8 +5473,8 @@ types_do(Opts) -> %% OTP-9932: Memory overwrite when inserting large integers in compressed bag. %% Will crash with segv on 64-bit opt if not fixed. otp_9932(Config) when is_list(Config) -> - T = ets:new(xxx, [bag, compressed]), - Fun = fun(N) -> + T = ets:new(xxx, [bag, compressed]), + Fun = fun(N) -> Key = {1316110174588445 bsl N,1316110174588583 bsl N}, S = {Key, Key}, true = ets:insert(T, S), @@ -5494,9 +5490,9 @@ otp_9932(Config) when is_list(Config) -> %% write_concurrency table. otp_9423(Config) when is_list(Config) -> InitF = fun(_) -> {0,0} end, - ExecF = fun({S,F}) -> - receive - stop -> + ExecF = fun({S,F}) -> + receive + stop -> io:format("~p got stop\n", [self()]), [end_of_work | {"Succeded=",S,"Failed=",F}] after 0 -> @@ -5592,12 +5588,12 @@ take(Config) when is_list(Config) -> %% Utility functions: %% -add_lists(L1,L2) -> +add_lists(L1,L2) -> add_lists(L1,L2,[]). add_lists([],[],Acc) -> lists:reverse(Acc); add_lists([E1|T1], [E2|T2], Acc) -> - add_lists(T1, T2, [E1+E2 | Acc]). + add_lists(T1, T2, [E1+E2 | Acc]). run_workers(InitF,ExecF,FiniF,Laps) -> run_workers(InitF,ExecF,FiniF,Laps, 0). @@ -5643,9 +5639,9 @@ worker_loop(infinite, ExecF, State) -> worker_loop(N, ExecF, State) -> worker_loop(N-1,ExecF,ExecF(State)). -wait_pids(Pids) -> +wait_pids(Pids) -> wait_pids(Pids,[]). -wait_pids([],Acc) -> +wait_pids([],Acc) -> Acc; wait_pids(Pids, Acc) -> receive @@ -5682,7 +5678,7 @@ etsmem() -> wait_for_memory_deallocations(), AllTabs = lists:map(fun(T) -> {T,ets:info(T,name),ets:info(T,size), - ets:info(T,memory),ets:info(T,type)} + ets:info(T,memory),ets:info(T,type)} end, ets:all()), EtsAllocInfo = erlang:system_info({allocator,ets_alloc}), @@ -5912,7 +5908,7 @@ receive_any() -> receive_any_spinning() -> receive_any_spinning(1000000). receive_any_spinning(Loops) -> - receive_any_spinning(Loops,Loops,1). + receive_any_spinning(Loops,Loops,1). receive_any_spinning(Loops,0,Tries) -> receive M -> io:format("Spinning process ~p got msg ~p after ~p tries\n", [self(),M,Tries]), diff --git a/lib/stdlib/test/gen_statem_SUITE.erl b/lib/stdlib/test/gen_statem_SUITE.erl index 28f9ab81fe..8f2ba0cab2 100644 --- a/lib/stdlib/test/gen_statem_SUITE.erl +++ b/lib/stdlib/test/gen_statem_SUITE.erl @@ -505,10 +505,10 @@ abnormal2(Config) -> {ok,Pid} = gen_statem:start_link(?MODULE, start_arg(Config, []), []), %% bad return value in the gen_statem loop - {{bad_return_from_state_function,badreturn},_} = + {{{bad_return_from_state_function,badreturn},_},_} = ?EXPECT_FAILURE(gen_statem:call(Pid, badreturn), Reason), receive - {'EXIT',Pid,{bad_return_from_state_function,badreturn}} -> ok + {'EXIT',Pid,{{bad_return_from_state_function,badreturn},_}} -> ok after 5000 -> ct:fail(gen_statem_did_not_die) end, @@ -742,26 +742,40 @@ state_timeout(_Config) -> %% Verify that {state_timeout,0,_} %% comes after next_event and that %% {timeout,0,_} is cancelled by - %% {state_timeout,0,_} + %% pending {state_timeout,0,_} {keep_state, {ok,2,Data}, [{timeout,0,3}]}; - (state_timeout, 2, {ok,2,{Time,From}}) -> - {next_state, state3, 3, + (state_timeout, 2, {ok,2,Data}) -> + %% Verify that timeout 0's are processed + %% in order + {keep_state, {ok,3,Data}, + [{timeout,0,4},{state_timeout,0,5}]}; + (timeout, 4, {ok,3,Data}) -> + %% Verify that timeout 0 is cancelled by + %% enqueued state_timeout 0 and that + %% multiple state_timeout 0 can be enqueued + {keep_state, {ok,4,Data}, + [{state_timeout,0,6},{timeout,0,7}]}; + (state_timeout, 5, {ok,4,Data}) -> + {keep_state, {ok,5,Data}}; + (state_timeout, 6, {ok,5,{Time,From}}) -> + {next_state, state3, 6, [{reply,From,ok}, - {state_timeout,Time,3}]} + {state_timeout,Time,8}]} end, state3 => fun - (info, message_to_self, 3) -> - {keep_state, '3'}; - ({call,From}, check, '3') -> + (info, message_to_self, 6) -> + {keep_state, 7}; + ({call,From}, check, 7) -> {keep_state, From}; - (state_timeout, 3, From) -> + (state_timeout, 8, From) -> {stop_and_reply, normal, {reply,From,ok}} end}, {ok,STM} = gen_statem:start_link(?MODULE, {map_statem,Machine,[]}, []), + sys:trace(STM, true), TRef = erlang:start_timer(1000, self(), kull), ok = gen_statem:call(STM, {go,500}), ok = gen_statem:call(STM, check), @@ -887,7 +901,7 @@ error_format_status(Config) -> gen_statem:start( ?MODULE, start_arg(Config, {data,Data}), []), %% bad return value in the gen_statem loop - {{bad_return_from_state_function,badreturn},_} = + {{{bad_return_from_state_function,badreturn},_},_} = ?EXPECT_FAILURE(gen_statem:call(Pid, badreturn), Reason), receive {error,_, diff --git a/lib/stdlib/test/proc_lib_SUITE.erl b/lib/stdlib/test/proc_lib_SUITE.erl index 416650e27e..a53e99afc9 100644 --- a/lib/stdlib/test/proc_lib_SUITE.erl +++ b/lib/stdlib/test/proc_lib_SUITE.erl @@ -26,7 +26,7 @@ -export([all/0, suite/0,groups/0,init_per_suite/1, end_per_suite/1, init_per_group/2,end_per_group/2, - crash/1, sync_start_nolink/1, sync_start_link/1, + crash/1, stacktrace/1, sync_start_nolink/1, sync_start_link/1, spawn_opt/1, sp1/0, sp2/0, sp3/1, sp4/2, sp5/1, hibernate/1, stop/1, t_format/1]). -export([ otp_6345/1, init_dont_hang/1]). @@ -50,7 +50,7 @@ suite() -> [{ct_hooks,[ts_install_cth]}]. all() -> - [crash, {group, sync_start}, spawn_opt, hibernate, + [crash, stacktrace, {group, sync_start}, spawn_opt, hibernate, {group, tickets}, stop, t_format]. groups() -> @@ -198,6 +198,31 @@ match_info(Tuple1, Tuple2) when tuple_size(Tuple1) =:= tuple_size(Tuple2) -> match_info(_, _) -> throw(no_match). +stacktrace(Config) when is_list(Config) -> + process_flag(trap_exit, true), + %% Errors. + Pid1 = proc_lib:spawn_link(fun() -> 1 = 2 end), + receive + {'EXIT',Pid1,{{badmatch,2},_Stack1}} -> ok + after 500 -> + ct:fail(error) + end, + %% Exits. + Pid2 = proc_lib:spawn_link(fun() -> exit(bye) end), + receive + {'EXIT',Pid2,bye} -> ok + after 500 -> + ct:fail(exit) + end, + %% Throws. + Pid3 = proc_lib:spawn_link(fun() -> throw(ball) end), + receive + {'EXIT',Pid3,{{nocatch,ball},_Stack3}} -> ok + after 500 -> + ct:fail(throw) + end, + ok. + sync_start_nolink(Config) when is_list(Config) -> _Pid = spawn_link(?MODULE, sp5, [self()]), receive @@ -457,7 +482,7 @@ stop(_Config) -> %% System message is handled, but process dies with other reason %% than the given (in system_terminate/4 below) Pid5 = proc_lib:spawn(SysMsgProc), - {'EXIT',{badmatch,2}} = (catch proc_lib:stop(Pid5,crash,infinity)), + {'EXIT',{{badmatch,2},_Stacktrace}} = (catch proc_lib:stop(Pid5,crash,infinity)), false = erlang:is_process_alive(Pid5), %% Local registered name diff --git a/lib/stdlib/test/rand_SUITE.erl b/lib/stdlib/test/rand_SUITE.erl index cb778c96d4..8e7ac223a7 100644 --- a/lib/stdlib/test/rand_SUITE.erl +++ b/lib/stdlib/test/rand_SUITE.erl @@ -18,13 +18,18 @@ %% %CopyrightEnd% -module(rand_SUITE). --export([all/0, suite/0,groups/0]). +-compile({nowarn_deprecated_function,[{random,seed,1}, + {random,uniform_s,1}, + {random,uniform_s,2}]}). + +-export([all/0, suite/0, groups/0, group/1]). -export([interval_int/1, interval_float/1, seed/1, api_eq/1, reference/1, basic_stats_uniform_1/1, basic_stats_uniform_2/1, basic_stats_normal/1, - plugin/1, measure/1]). + plugin/1, measure/1, + reference_jump_state/1, reference_jump_procdict/1]). -export([test/0, gen/1]). @@ -41,24 +46,35 @@ all() -> api_eq, reference, {group, basic_stats}, - plugin, measure]. + plugin, measure, + {group, reference_jump} + ]. groups() -> [{basic_stats, [parallel], - [basic_stats_uniform_1, basic_stats_uniform_2, basic_stats_normal]}]. + [basic_stats_uniform_1, basic_stats_uniform_2, basic_stats_normal]}, + {reference_jump, [parallel], + [reference_jump_state, reference_jump_procdict]}]. + +group(basic_stats) -> + %% valgrind needs a lot of time + [{timetrap,{minutes,10}}]; +group(reference_jump) -> + %% valgrind needs a lot of time + [{timetrap,{minutes,10}}]. %% A simple helper to test without test_server during dev test() -> Tests = all(), lists:foreach(fun(Test) -> - try - ok = ?MODULE:Test([]), - io:format("~p: ok~n", [Test]) - catch _:Reason -> - io:format("Failed: ~p: ~p ~p~n", - [Test, Reason, erlang:get_stacktrace()]) - end - end, Tests). + try + ok = ?MODULE:Test([]), + io:format("~p: ok~n", [Test]) + catch _:Reason -> + io:format("Failed: ~p: ~p ~p~n", + [Test, Reason, erlang:get_stacktrace()]) + end + end, Tests). algs() -> [exs64, exsplus, exs1024]. @@ -220,7 +236,7 @@ interval_float_1(N) -> %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -%% Check if exs64 algorithm generates the proper sequence. +%% Check if each algorithm generates the proper sequence. reference(Config) when is_list(Config) -> [reference_1(Alg) || Alg <- algs()], ok. @@ -234,7 +250,7 @@ reference_1(Alg) -> io:format("Failed: ~p~n",[Alg]), io:format("Length ~p ~p~n",[length(Refval), length(Testval)]), io:format("Head ~p ~p~n",[hd(Refval), hd(Testval)]), - ok + exit(wrong_value) end. gen(Algo) -> @@ -426,6 +442,112 @@ measure_2(N, State0, Fun) when N > 0 -> measure_2(0, _, _) -> ok. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +%% The jump sequence tests has two parts +%% for those with the functional API (jump/1) +%% and for those with the internal state +%% in process dictionary (jump/0). + +-define(LOOP_JUMP, (?LOOP div 1000)). + +%% Check if each algorithm generates the proper jump sequence +%% with the functional API. +reference_jump_state(Config) when is_list(Config) -> + [reference_jump_1(Alg) || Alg <- algs()], + ok. + +reference_jump_1(Alg) -> + Refval = reference_jump_val(Alg), + Testval = gen_jump_1(Alg), + case Refval =:= Testval of + true -> ok; + false -> + io:format("Failed: ~p~n",[Alg]), + io:format("Length ~p ~p~n",[length(Refval), length(Testval)]), + io:format("Head ~p ~p~n",[hd(Refval), hd(Testval)]), + exit(wrong_value) + end. + +gen_jump_1(Algo) -> + Seed = case Algo of + exsplus -> %% Printed with orig 'C' code and this seed + rand:seed_s({exsplus, [12345678|12345678]}); + exs1024 -> %% Printed with orig 'C' code and this seed + rand:seed_s({exs1024, {lists:duplicate(16, 12345678), []}}); + exs64 -> %% Test exception of not_implemented notice + try rand:jump(rand:seed_s(exs64)) + catch + error:not_implemented -> not_implemented + end; + _ -> % unimplemented + not_implemented + end, + case Seed of + not_implemented -> [not_implemented]; + S -> gen_jump_1(?LOOP_JUMP, S, []) + end. + +gen_jump_1(N, State0 = {#{max:=Max}, _}, Acc) when N > 0 -> + {_, State1} = rand:uniform_s(Max, State0), + {Random, State2} = rand:uniform_s(Max, rand:jump(State1)), + case N rem (?LOOP_JUMP div 100) of + 0 -> gen_jump_1(N-1, State2, [Random|Acc]); + _ -> gen_jump_1(N-1, State2, Acc) + end; +gen_jump_1(_, _, Acc) -> lists:reverse(Acc). + +%% Check if each algorithm generates the proper jump sequence +%% with the internal state in the process dictionary. +reference_jump_procdict(Config) when is_list(Config) -> + [reference_jump_0(Alg) || Alg <- algs()], + ok. + +reference_jump_0(Alg) -> + Refval = reference_jump_val(Alg), + Testval = gen_jump_0(Alg), + case Refval =:= Testval of + true -> ok; + false -> + io:format("Failed: ~p~n",[Alg]), + io:format("Length ~p ~p~n",[length(Refval), length(Testval)]), + io:format("Head ~p ~p~n",[hd(Refval), hd(Testval)]), + exit(wrong_value) + end. + +gen_jump_0(Algo) -> + Seed = case Algo of + exsplus -> %% Printed with orig 'C' code and this seed + rand:seed({exsplus, [12345678|12345678]}); + exs1024 -> %% Printed with orig 'C' code and this seed + rand:seed({exs1024, {lists:duplicate(16, 12345678), []}}); + exs64 -> %% Test exception of not_implemented notice + try + _ = rand:seed(exs64), + rand:jump() + catch + error:not_implemented -> not_implemented + end; + _ -> % unimplemented + not_implemented + end, + case Seed of + not_implemented -> [not_implemented]; + S -> + {Seedmap=#{}, _} = S, + Max = maps:get(max, Seedmap), + gen_jump_0(?LOOP_JUMP, Max, []) + end. + +gen_jump_0(N, Max, Acc) when N > 0 -> + _ = rand:uniform(Max), + _ = rand:jump(), + Random = rand:uniform(Max), + case N rem (?LOOP_JUMP div 100) of + 0 -> gen_jump_0(N-1, Max, [Random|Acc]); + _ -> gen_jump_0(N-1, Max, Acc) + end; +gen_jump_0(_, _, Acc) -> lists:reverse(Acc). + +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% %%% Data reference_val(exs64) -> [16#3737ad0c703ff6c3,16#3868a78fe71adbbd,16#1f01b62b4338b605,16#50876a917437965f, @@ -507,3 +629,61 @@ reference_val(exsplus) -> 16#36f715a249f4ec2,16#1c27629826c50d3,16#914d9a6648726a,16#27f5bf5ce2301e8, 16#3dd493b8012970f,16#be13bed1e00e5c,16#ceef033b74ae10,16#3da38c6a50abe03, 16#15cbd1a421c7a8c,16#22794e3ec6ef3b1,16#26154d26e7ea99f,16#3a66681359a6ab6]. + +%%% + +reference_jump_val(exsplus) -> + [82445318862816932, 145810727464480743, 16514517716894509, 247642377064868650, + 162385642339156908, 251810707075252101, 82288275771998924, 234412731596926322, + 49960883129071044, 200690077681656596, 213743196668671647, 131182800982967108, + 144200072021941728, 263557425008503277, 194858522616874272, 185869394820993172, + 80384502675241453, 262654144824057588, 90033295011291362, 4494510449302659, + 226005372746479588, 116780561309220553, 47048528594475843, 39168929349768743, + 139615163424415552, 55330632656603925, 237575574720486569, 102381140288455025, + 18452933910354323, 150248612130579752, 269358096791922740, 61313433522002187, + 160327361842676597, 185187983548528938, 57378981505594193, 167510799293984067, + 105117045862954303, 176126685946302943, 123590876906828803, 69185336947273487, + 9098689247665808, 49906154674145057, 131575138412788650, 161843880211677185, + 30743946051071186, 187578920583823612, 45008401528636978, 122454158686456658, + 111195992644229524, 17962783958752862, 13579507636941108, 130137843317798663, + 144202635170576832, 132539563255093922, 159785575703967124, 187241848364816640, + 183044737781926478, 12921559769912263, 83553932242922001, 96698298841984688, + 281664320227537824, 224233030818578263, 77812932110318774, 169729351013291728, + 164475402723178734, 242780633011249051, 51095111179609125, 19249189591963554, + 221412426221439180, 265700202856282653, 265342254311932308, 241218503498385511, + 255400887248486575, 212083616929812076, 227947034485840579, 268261881651571692, + 104846262373404908, 49690734329496661, 213259196633566308, 186966479726202436, + 282157378232384574, 11272948584603747, 166540426999573480, 50628164001018755, + 65235580992800860, 230664399047956956, 64575592354687978, 40519393736078511, + 108341851194332747, 115426411532008961, 120656817002338193, 234537867870809797, + 12504080415362731, 45083100453836317, 270968267812126657, 93505647407734103, + 252852934678537969, 258758309277167202, 74250882143432077, 141629095984552833]; + +reference_jump_val(exs1024) -> + [2655961906500790629, 17003395417078685063, 10466831598958356428, 7603399148503548021, + 1650550950190587188, 12294992315080723704, 15743995773860389219, 5492181000145247327, + 14118165228742583601, 1024386975263610703, 10124872895886669513, 6445624517813169301, + 6238575554686562601, 14108646153524288915, 11804141635807832816, 8421575378006186238, + 6354993374304550369, 838493020029548163, 14759355804308819469, 12212491527912522022, + 16943204735100571602, 198964074252287588, 7325922870779721649, 15853102065526570574, + 16294058349151823341, 6153379962047409781, 15874031679495957261, 17299265255608442340, + 984658421210027171, 17408042033939375278, 3326465916992232353, 5222817718770538733, + 13262385796795170510, 15648751121811336061, 6718721549566546451, 7353765235619801875, + 16110995049882478788, 14559143407227563441, 4189805181268804683, 10938587948346538224, + 1635025506014383478, 12619562911869525411, 17469465615861488695, 125252234176411528, + 2004192558503448853, 13175467866790974840, 17712272336167363518, 1710549840100880318, + 17486892343528340916, 5337910082227550967, 8333082060923612691, 6284787745504163856, + 8072221024586708290, 6077032673910717705, 11495200863352251610, 11722792537523099594, + 14642059504258647996, 8595733246938141113, 17223366528010341891, 17447739753327015776, + 6149800490736735996, 11155866914574313276, 7123864553063709909, 15982886296520662323, + 5775920250955521517, 8624640108274906072, 8652974210855988961, 8715770416136907275, + 11841689528820039868, 10991309078149220415, 11758038663970841716, 7308750055935299261, + 15939068400245256963, 6920341533033919644, 8017706063646646166, 15814376391419160498, + 13529376573221932937, 16749061963269842448, 14639730709921425830, 3265850480169354066, + 4569394597532719321, 16594515239012200038, 13372824240764466517, 16892840440503406128, + 11260004846380394643, 2441660009097834955, 10566922722880085440, 11463315545387550692, + 5252492021914937692, 10404636333478845345, 11109538423683960387, 5525267334484537655, + 17936751184378118743, 4224632875737239207, 15888641556987476199, 9586888813112229805, + 9476861567287505094, 14909536929239540332, 17996844556292992842, 2699310519182298856]; + +reference_jump_val(exs64) -> [not_implemented]. diff --git a/lib/stdlib/test/tar_SUITE.erl b/lib/stdlib/test/tar_SUITE.erl index 64dd41e75a..6f3979bb77 100644 --- a/lib/stdlib/test/tar_SUITE.erl +++ b/lib/stdlib/test/tar_SUITE.erl @@ -720,20 +720,25 @@ memory(Config) when is_list(Config) -> %% Test filenames with characters outside the US ASCII range. unicode(Config) when is_list(Config) -> - PrivDir = proplists:get_value(priv_dir, Config), - do_unicode(PrivDir), + run_unicode_node(Config, "+fnu"), case has_transparent_naming() of true -> - Pa = filename:dirname(code:which(?MODULE)), - Node = start_node(unicode, "+fnl -pa "++Pa), - ok = rpc:call(Node, erlang, apply, - [fun() -> do_unicode(PrivDir) end,[]]), - true = test_server:stop_node(Node), - ok; + run_unicode_node(Config, "+fnl"); false -> ok end. +run_unicode_node(Config, Option) -> + PrivDir = proplists:get_value(priv_dir, Config), + Pa = filename:dirname(code:which(?MODULE)), + Args = Option ++ " -pa "++Pa, + io:format("~s\n", [Args]), + Node = start_node(unicode, Args), + ok = rpc:call(Node, erlang, apply, + [fun() -> do_unicode(PrivDir) end,[]]), + true = test_server:stop_node(Node), + ok. + has_transparent_naming() -> case os:type() of {unix,darwin} -> false; @@ -745,10 +750,11 @@ do_unicode(PrivDir) -> ok = file:set_cwd(PrivDir), ok = file:make_dir("unicöde"), - Names = unicode_create_files(), + Names = lists:sort(unicode_create_files()), Tar = "unicöde.tar", ok = erl_tar:create(Tar, ["unicöde"], []), - {ok,Names} = erl_tar:table(Tar, []), + {ok,Names0} = erl_tar:table(Tar, []), + Names = lists:sort(Names0), _ = [ok = file:delete(Name) || Name <- Names], ok = erl_tar:extract(Tar), _ = [{ok,_} = file:read_file(Name) || Name <- Names], |