diff options
Diffstat (limited to 'erts')
60 files changed, 3792 insertions, 2553 deletions
diff --git a/erts/Makefile.in b/erts/Makefile.in index 3052dc3065..cddabbecee 100644 --- a/erts/Makefile.in +++ b/erts/Makefile.in @@ -75,12 +75,10 @@ local_setup: $(ERL_TOP)/bin/erl.exe $(ERL_TOP)/bin/erlc.exe \ $(ERL_TOP)/bin/escript $(ERL_TOP)/bin/escript.exe \ $(ERL_TOP)/bin/dialyzer $(ERL_TOP)/bin/dialyzer.exe \ - $(ERL_TOP)/bin/typer $(ERL_TOP)/bin/typer.exe \ $(ERL_TOP)/bin/ct_run $(ERL_TOP)/bin/ct_run.exe \ $(ERL_TOP)/bin/start*.boot $(ERL_TOP)/bin/start*.script @if [ "X$(TARGET)" = "Xwin32" ]; then \ cp $(ERL_TOP)/bin/$(TARGET)/dialyzer.exe $(ERL_TOP)/bin/dialyzer.exe; \ - cp $(ERL_TOP)/bin/$(TARGET)/typer.exe $(ERL_TOP)/bin/typer.exe; \ cp $(ERL_TOP)/bin/$(TARGET)/ct_run.exe $(ERL_TOP)/bin/ct_run.exe; \ cp $(ERL_TOP)/bin/$(TARGET)/erlc.exe $(ERL_TOP)/bin/erlc.exe; \ cp $(ERL_TOP)/bin/$(TARGET)/erl.exe $(ERL_TOP)/bin/erl.exe; \ @@ -100,7 +98,6 @@ local_setup: -e "s;%VSN%;$(VSN);" \ $(ERL_TOP)/erts/etc/unix/cerl.src > $(ERL_TOP)/bin/cerl; \ cp $(ERL_TOP)/bin/$(TARGET)/dialyzer $(ERL_TOP)/bin/dialyzer; \ - cp $(ERL_TOP)/bin/$(TARGET)/typer $(ERL_TOP)/bin/typer; \ cp $(ERL_TOP)/bin/$(TARGET)/ct_run $(ERL_TOP)/bin/ct_run; \ cp $(ERL_TOP)/bin/$(TARGET)/erlc $(ERL_TOP)/bin/erlc; \ cp $(ERL_TOP)/bin/$(TARGET)/escript $(ERL_TOP)/bin/escript; \ diff --git a/erts/configure.in b/erts/configure.in index 1c4a3e90ef..d7d56d45b6 100644 --- a/erts/configure.in +++ b/erts/configure.in @@ -622,7 +622,7 @@ case $chk_arch_ in powerpc) ARCH=ppc;; ppc) ARCH=ppc;; ppc64) ARCH=ppc64;; - ppc64le) ARCH=ppc64;; + ppc64le) ARCH=ppc64le;; "Power Macintosh") ARCH=ppc;; armv5b) ARCH=arm;; armv5teb) ARCH=arm;; @@ -2836,9 +2836,10 @@ if test X${enable_hipe} = Xyes && test X$ARCH = Xamd64; then saved_LDFLAGS=$LDFLAGS LDFLAGS="-no-pie $LDFLAGS" AC_TRY_LINK(,, [], - [AC_MSG_WARN([Linked does not accept option -no-pie]) - LDFLAGS=$saved_LDFLAGS])]) - + [LDFLAGS="-fno-PIE $saved_LDFLAGS" + AC_TRY_LINK(,, [], + [AC_MSG_WARN([Linked does not accept option -no-pie nor -fno-PIE]) + LDFLAGS=$saved_LDFLAGS])])]) fi diff --git a/erts/doc/src/absform.xml b/erts/doc/src/absform.xml index fe8e3b30e7..ec00955ccd 100644 --- a/erts/doc/src/absform.xml +++ b/erts/doc/src/absform.xml @@ -4,7 +4,7 @@ <chapter> <header> <copyright> - <year>2001</year><year>2016</year> + <year>2001</year><year>2017</year> <holder>Ericsson AB. All Rights Reserved.</holder> </copyright> <legalnotice> @@ -182,10 +182,18 @@ can contain the following:</p> <list type="bulleted"> - <item>Tuples <c>{error,E}</c> and <c>{warning,W}</c>, denoting - syntactically incorrect forms and warnings</item> - <item><c>{eof,LINE}</c>, denoting an end-of-stream - encountered before a complete form had been parsed</item> + <item> + <p>Tuples <c>{error,E}</c> and <c>{warning,W}</c>, denoting + syntactically incorrect forms and warnings. + </p> + </item> + <item> + <p><c>{eof,LOCATION}</c>, denoting an end-of-stream + encountered before a complete form had been parsed. + The word <c>LOCATION</c> represents an integer, and denotes the + number of the last line in the source file. + </p> + </item> </list> </section> </section> diff --git a/erts/doc/src/erl_nif.xml b/erts/doc/src/erl_nif.xml index b0a632d2d6..6bb1109415 100644 --- a/erts/doc/src/erl_nif.xml +++ b/erts/doc/src/erl_nif.xml @@ -2634,7 +2634,7 @@ enif_map_iterator_destroy(env, &iter);</code> which is described further below. When a read or write event is triggerred, a notification message like this is sent to the process identified by <c>pid</c>:</p> - <code type="none"><c>{select, Obj, Ref, ready_input | ready_output}</c></code> + <code type="none">{select, Obj, Ref, ready_input | ready_output}</code> <p><c>ready_input</c> or <c>ready_output</c> indicates if the event object is ready for reading or writing.</p> <p>Argument <c>pid</c> may be <c>NULL</c> to indicate the calling process.</p> diff --git a/erts/doc/src/erts_alloc.xml b/erts/doc/src/erts_alloc.xml index 49dadfb42c..b11328c3a8 100644 --- a/erts/doc/src/erts_alloc.xml +++ b/erts/doc/src/erts_alloc.xml @@ -595,7 +595,7 @@ </item> <tag><marker id="M_sbct"/><c><![CDATA[+M<S>sbct <size>]]></c></tag> <item> - <p>Singleblock carrier threshold. Blocks larger than this + <p>Singleblock carrier threshold (in kilobytes). Blocks larger than this threshold are placed in singleblock carriers. Blocks smaller than this threshold are placed in multiblock carriers. On 32-bit Unix style OS this threshold cannot be set diff --git a/erts/doc/src/notes.xml b/erts/doc/src/notes.xml index 05142c9338..f93b386392 100644 --- a/erts/doc/src/notes.xml +++ b/erts/doc/src/notes.xml @@ -32,6 +32,250 @@ <p>This document describes the changes made to the ERTS application.</p> +<section><title>Erts 8.3.1</title> + + <section><title>Fixed Bugs and Malfunctions</title> + <list> + <item> + <p> + Trying to open a directory with file:read_file/1 on Unix + leaked a file descriptor. This bug has now been fixed.</p> + <p> + Own Id: OTP-14308 Aux Id: ERL-383 </p> + </item> + </list> + </section> + + + <section><title>Known Bugs and Problems</title> + <list> + <item> + <p> + Invoking <c>init:stop/0</c> via the SIGTERM signal, in a + non-SMP BEAM, could cause BEAM to terminate with fatal + error. This has now been fixed and the BEAM will + terminate normally when SIGTERM is received.</p> + <p> + Own Id: OTP-14290</p> + </item> + </list> + </section> + +</section> + +<section><title>Erts 8.3</title> + + <section><title>Fixed Bugs and Malfunctions</title> + <list> + <item> + <p>Fixed a number of bugs that caused faulty stack-traces + to be generated. The faulty stack traces were generated + either when applying the following functions or tracing + the following functions:</p> <list> + <item><c>erlang:error/1</c></item> + <item><c>erlang:error/2</c></item> + <item><c>erlang:exit/1</c></item> + <item><c>erlang:throw/1</c></item> </list> + <p> + Own Id: OTP-14055</p> + </item> + <item> + <p> + Corrected documentation about memory footprint for maps.</p> + <p> + Own Id: OTP-14118</p> + </item> + <item> + <p> + Fix <c>process_info(Pid, current_stacktrace)</c> to use + stack depth limit set by + <c>system_flag(backtrace_depth)</c>. The old behavior was + a hard coded depth limit of 8.</p> + <p> + Own Id: OTP-14119 Aux Id: PR-1263 </p> + </item> + <item> + <p> + A process calling <seealso + marker="erlang#system_flag_multi_scheduling"><c>erlang:system_flag(multi_scheduling, + block)</c></seealso> could end up hanging forever in the + call.</p> + <p> + Own Id: OTP-14121</p> + </item> + <item> + <p>Dirty scheduler bug fixes:</p> <list> <item><p>Fixed + call time tracing of process being scheduled on dirty + scheduler.</p></item> <item><p>GC info from dirty + schedulers.</p></item> <item><p>Multi scheduling block + with dirty schedulers could crash the runtime + system.</p></item> <item><p>Process structures could be + removed prematurely.</p></item> <item><p>GC on dirty + scheduler could crash the runtime system.</p></item> + <item><p>Termination of a process executing on a dirty + scheduler could cause a runtime system crash.</p></item> + </list> + <p> + Own Id: OTP-14122</p> + </item> + <item> + <p> + Fixed crash that occurred when writing timer data to a + crash dump.</p> + <p> + Own Id: OTP-14133</p> + </item> + <item> + <p> + A literal area could be removed while still referred from + processes.</p> + <p> + Own Id: OTP-14134</p> + </item> + <item> + <p> + Fixed a bug in the garbage collector that could crash the + runtime system.</p> + <p> + Own Id: OTP-14135</p> + </item> + <item> + <p> + Fixed a bug in call-time trace for NIFs which caused + tracing to erroneously be started multiple times for one + call.</p> + <p> + Own Id: OTP-14136</p> + </item> + <item> + <p> + Remove a debug printout and an unnecessary garbage + collection when handling exceptions in hipe compiled + code.</p> + <p> + Own Id: OTP-14153</p> + </item> + <item> + <p> + Fix bug in tracing of garbage collection that could cause + VM crash. Bug exists since OTP 19.0.</p> + <p> + Own Id: OTP-14154</p> + </item> + <item> + <p> + Fix bug in <c>binary_to_term</c> for binaries created by + <c>term_to_binary </c> with option <c>compressed</c>. The + bug can cause <c>badarg</c> exception for a valid binary + when Erlang VM is linked against a <c>zlib</c> library of + version 1.2.9 or newer. Bug exists since OTP 17.0.</p> + <p> + Own Id: OTP-14159 Aux Id: ERL-340 </p> + </item> + <item> + <p> + Fix suspension of schedulers when generating a crashdump.</p> + <p> + Own Id: OTP-14164</p> + </item> + <item> + <p>NIF resources was not handled in a thread-safe manner + in the runtime system without SMP support.</p> + <p>As a consequence of this fix, the following driver + functions are now thread-safe also in the runtime system + without SMP support:</p> <list> + <item><p><c>driver_free_binary()</c></p></item> + <item><p><c>driver_realloc_binary()</c></p></item> + <item><p><c>driver_binary_get_refc()</c></p></item> + <item><p><c>driver_binary_inc_refc()</c></p></item> + <item><p><c>driver_binary_dec_refc()</c></p></item> + </list> + <p> + Own Id: OTP-14202</p> + </item> + <item> + <p> + Fix <c>erlang:round/1</c> for large floating point + numbers with an odd absolute value between <c>(1 bsl + 52)</c> and <c>(1 bsl 53)</c>. The result was falsely + calculated as the next higher even number even though all + integer values up to <c>(1 bsl 53)</c> can be represented + as floats with full precision.</p> + <p> + Own Id: OTP-14227</p> + </item> + <item> + <p> + Add size of literals to module code size in crash dump + and <c>(l)oaded</c> command in break menu like it used to + be before OTP-19.0.</p> + <p> + Own Id: OTP-14228</p> + </item> + <item> + <p> + Fix potential bug in <c>enif_send</c> when called without + a process context and with argument <c>msg_env</c> as + <c>NULL</c>.</p> + <p> + Own Id: OTP-14229</p> + </item> + <item> + <p> + Fix bug where passing an appendable binary to + <c>erlang:port_control()</c> could crash the emulator.</p> + <p> + Own Id: OTP-14231</p> + </item> + <item> + <p> + Receive expressions with timeout in the Erlang shell + could cause a VM crash.</p> + <p> + Own Id: OTP-14241 Aux Id: ERL-365 </p> + </item> + </list> + </section> + + + <section><title>Improvements and New Features</title> + <list> + <item> + <p> + A received SIGTERM signal to beam will generate a + <c>'stop'</c> message to the <c>init</c> process and + terminate the Erlang VM nicely. This is equivalent to + calling <c>init:stop/0</c>.</p> + <p> + Own Id: OTP-14085</p> + </item> + <item> + <p> + Workaround for buggy Android implementation of + <c>PTHREAD_STACK_MIN</c> causing build of runtime system + to crash on undeclared <c>PAGE_SIZE</c>.</p> + <p> + Own Id: OTP-14165 Aux Id: ERL-319 </p> + </item> + <item> + <p> + Add configure option --without-thread-names that removes + the naming of individual emulator threads.</p> + <p> + Own Id: OTP-14234</p> + </item> + <item> + <p> + Add warning in documentation of <c>zlib:deflateInit/6</c> + about option <c>WindowsBits</c> values 8 and -8.</p> + <p> + Own Id: OTP-14254 Aux Id: ERL-362 </p> + </item> + </list> + </section> + +</section> + <section><title>Erts 8.2.2</title> <section><title>Fixed Bugs and Malfunctions</title> diff --git a/erts/doc/src/zlib.xml b/erts/doc/src/zlib.xml index e1924fffee..04fcc4285d 100644 --- a/erts/doc/src/zlib.xml +++ b/erts/doc/src/zlib.xml @@ -315,6 +315,15 @@ list_to_binary([B1,B2])</pre> <c><anno>WindowBits</anno></c> value suppresses the zlib header (and checksum) from the stream. Notice that the zlib source mentions this only as a undocumented feature.</p> + <warning> + <p>Due to a known bug in the underlying zlib library, <c>WindowBits</c> values 8 and -8 + do not work as expected. In zlib versions before 1.2.9 values + 8 and -8 are automatically changed to 9 and -9. <em>From zlib version 1.2.9 + value -8 is rejected</em> causing <c>zlib:deflateInit/6</c> to fail + (8 is still changed to 9). It also seem possible that future versions + of zlib may fix this bug and start accepting 8 and -8 as is.</p> + <p>Conclusion: Avoid values 8 and -8 unless you know your zlib version supports them.</p> + </warning> </item> <tag><c><anno>MemLevel</anno></c></tag> <item> diff --git a/erts/emulator/Makefile.in b/erts/emulator/Makefile.in index 18fd7f320b..7ea0111e59 100644 --- a/erts/emulator/Makefile.in +++ b/erts/emulator/Makefile.in @@ -656,6 +656,10 @@ generate: $(TTF_DIR)/GENERATED $(PRELOAD_SRC) $(TTF_DIR)/GENERATED: $(GENERATE) $(gen_verbose)echo $? >$(TTF_DIR)/GENERATED + +# Regenerate if Makefile has changed +$(GENERATE): $(TARGET)/Makefile + endif $(TARGET)/erlang_dtrace.h: beam/erlang_dtrace.d diff --git a/erts/emulator/beam/beam_emu.c b/erts/emulator/beam/beam_emu.c index 8be0f58227..6010c17c17 100644 --- a/erts/emulator/beam/beam_emu.c +++ b/erts/emulator/beam/beam_emu.c @@ -850,6 +850,15 @@ do { \ } while (0) #endif +#define IsTaggedTuple(Src,Arityval,Tag,Fail) \ + do { \ + if (!(is_tuple(Src) && \ + (tuple_val(Src))[0] == Arityval && \ + (tuple_val(Src))[1] == Tag)) { \ + Fail; \ + } \ + } while (0) + #define IsBoolean(X, Fail) if ((X) != am_true && (X) != am_false) { Fail; } #define IsBinary(Src, Fail) \ @@ -3035,10 +3044,12 @@ do { \ GetArg2(2, Op1, Op2); if (is_both_small(Op1, Op2)) { /* - * We could extract the tag from one argument, but a tag extraction - * could mean a shift. Therefore, play it safe here. + * TAG ^ TAG == 0. + * + * Therefore, we perform the XOR operation on the tagged values, + * and OR in the tag bits. */ - Eterm result = make_small(signed_val(Op1) ^ signed_val(Op2)); + Eterm result = (Op1 ^ Op2) | make_small(0); StoreBifResult(4, result); } DO_OUTLINED_ARITH_2(bxor, Op1, Op2); diff --git a/erts/emulator/beam/beam_load.c b/erts/emulator/beam/beam_load.c index 48206a75a8..6eea963016 100644 --- a/erts/emulator/beam/beam_load.c +++ b/erts/emulator/beam/beam_load.c @@ -6321,7 +6321,7 @@ erts_make_stub_module(Process* p, Eterm hipe_magic_bin, Eterm Beam, Eterm Info) */ magic = erts_alloc_loader_state(); stp = ERTS_MAGIC_BIN_DATA(magic); - hipe_code = erts_alloc(ERTS_ALC_T_HIPE, sizeof(*hipe_code)); + hipe_code = erts_alloc(ERTS_ALC_T_HIPE_LL, sizeof(*hipe_code)); if (!is_internal_magic_ref(hipe_magic_bin) || !(hipe_magic = erts_magic_ref2bin(hipe_magic_bin), @@ -6556,7 +6556,7 @@ erts_make_stub_module(Process* p, Eterm hipe_magic_bin, Eterm Beam, Eterm Info) } error: - erts_free(ERTS_ALC_T_HIPE, hipe_code); + erts_free(ERTS_ALC_T_HIPE_LL, hipe_code); erts_free_aligned_binary_bytes(temp_alloc); free_loader_state(magic); BIF_ERROR(p, BADARG); @@ -6583,7 +6583,7 @@ int erts_commit_hipe_patch_load(Eterm hipe_magic_bin) /* * Initialise HiPE module */ - hipe_code = erts_alloc(ERTS_ALC_T_HIPE, sizeof(*hipe_code)); + hipe_code = erts_alloc(ERTS_ALC_T_HIPE_LL, sizeof(*hipe_code)); hipe_code->text_segment = hipe_stp->text_segment; hipe_code->text_segment_size = hipe_stp->text_segment_size; hipe_code->data_segment = hipe_stp->data_segment; diff --git a/erts/emulator/beam/bif.c b/erts/emulator/beam/bif.c index 614bedab12..2d37f977c0 100644 --- a/erts/emulator/beam/bif.c +++ b/erts/emulator/beam/bif.c @@ -4254,6 +4254,132 @@ BIF_RETTYPE list_to_pid_1(BIF_ALIST_1) BIF_ERROR(BIF_P, BADARG); } +BIF_RETTYPE list_to_ref_1(BIF_ALIST_1) +{ + /* + * A valid reference is on the format + * "#Ref<N.X.Y.Z>" where N, X, Y, and Z are + * 32-bit integers (i.e., max 10 characters). + */ + Eterm *hp; + Eterm res; + Uint32 refn[ERTS_MAX_REF_NUMBERS]; + int n = 0; + Uint ints[1 + ERTS_MAX_REF_NUMBERS] = {0}; + char* cp; + Sint i; + DistEntry *dep = NULL; + char buf[5 /* #Ref< */ + + (1 + ERTS_MAX_REF_NUMBERS)*(10 + 1) /* N.X.Y.Z> */ + + 1 /* \0 */]; + + /* walk down the list and create a C string */ + if ((i = intlist_to_buf(BIF_ARG_1, buf, sizeof(buf)-1)) < 0) + goto bad; + + buf[i] = '\0'; /* null terminal */ + + cp = &buf[0]; + if (*cp++ != '#') goto bad; + if (*cp++ != 'R') goto bad; + if (*cp++ != 'e') goto bad; + if (*cp++ != 'f') goto bad; + if (*cp++ != '<') goto bad; + + for (i = 0; i < sizeof(ints)/sizeof(Uint); i++) { + if (*cp < '0' || *cp > '9') goto bad; + + while (*cp >= '0' && *cp <= '9') { + ints[i] = 10*ints[i] + (*cp - '0'); + cp++; + } + + n++; + if (ints[i] > ~((Uint32) 0)) goto bad; + if (*cp == '>') break; + if (*cp++ != '.') goto bad; + } + + if (*cp++ != '>') goto bad; + if (*cp != '\0') goto bad; + + if (n < 2) goto bad; + + for (n = 0; i > 0; i--) + refn[n++] = (Uint32) ints[i]; + + ASSERT(n <= ERTS_MAX_REF_NUMBERS); + + dep = erts_channel_no_to_dist_entry(ints[0]); + + if (!dep) + goto bad; + + if(dep == erts_this_dist_entry) { + ErtsMagicBinary *mb; + Uint32 sid; + if (refn[0] > MAX_REFERENCE) goto bad; + if (n != ERTS_REF_NUMBERS) goto bad; + sid = erts_get_ref_numbers_thr_id(refn); + if (sid > erts_no_schedulers) goto bad; + mb = erts_magic_ref_lookup_bin(refn); + if (mb) { + hp = HAlloc(BIF_P, ERTS_MAGIC_REF_THING_SIZE); + res = erts_mk_magic_ref(&hp, &BIF_P->off_heap, + (Binary *) mb); + } + else { + hp = HAlloc(BIF_P, ERTS_REF_THING_SIZE); + write_ref_thing(hp, refn[0], refn[1], refn[2]); + res = make_internal_ref(hp); + } + } + else { + ExternalThing *etp; + ErlNode *enp; + Uint hsz; + int j; + + if (is_nil(dep->cid)) + goto bad; + + enp = erts_find_or_insert_node(dep->sysname, dep->creation); + ASSERT(enp != erts_this_node); + + hsz = EXTERNAL_THING_HEAD_SIZE; +#if defined(ARCH_64) + hsz += n/2 + 1; +#else + hsz += n; +#endif + + etp = (ExternalThing *) HAlloc(BIF_P, hsz); + etp->header = make_external_ref_header(n/2); + etp->next = BIF_P->off_heap.first; + etp->node = enp; + i = 0; +#if defined(ARCH_64) + etp->data.ui32[i] = n; +#endif + for (j = 0; j < n; j++) { + etp->data.ui32[i] = refn[j]; + i++; + } + + BIF_P->off_heap.first = (struct erl_off_heap_header*) etp; + res = make_external_ref(etp); + } + + erts_deref_dist_entry(dep); + BIF_RET(res); + + bad: + if (dep) + erts_deref_dist_entry(dep); + BIF_ERROR(BIF_P, BADARG); +} + + /**********************************************************************/ BIF_RETTYPE group_leader_0(BIF_ALIST_0) diff --git a/erts/emulator/beam/bif.tab b/erts/emulator/beam/bif.tab index ce2cffa498..4140938210 100644 --- a/erts/emulator/beam/bif.tab +++ b/erts/emulator/beam/bif.tab @@ -88,6 +88,7 @@ bif erlang:list_to_binary/1 bif erlang:list_to_float/1 bif erlang:list_to_integer/1 bif erlang:list_to_pid/1 +bif erlang:list_to_ref/1 bif erlang:list_to_tuple/1 bif erlang:loaded/0 bif erlang:localtime/0 @@ -324,7 +325,7 @@ bif erlang:match_spec_test/3 # Bifs in ets module. # -bif ets:all/0 +bif ets:internal_request_all/0 bif ets:new/2 bif ets:delete/1 bif ets:delete/2 @@ -360,6 +361,7 @@ bif ets:select_reverse/1 bif ets:select_reverse/2 bif ets:select_reverse/3 bif ets:select_delete/2 +bif ets:select_replace/2 bif ets:match_spec_compile/1 bif ets:match_spec_run_r/3 diff --git a/erts/emulator/beam/break.c b/erts/emulator/beam/break.c index 4dad5736c5..0b40d70cb7 100644 --- a/erts/emulator/beam/break.c +++ b/erts/emulator/beam/break.c @@ -512,6 +512,8 @@ do_break(void) erts_free_read_env(mode); #endif /* __WIN32__ */ + ASSERT(erts_smp_thr_progress_is_blocking()); + erts_printf("\n" "BREAK: (a)bort (c)ontinue (p)roc info (i)nfo (l)oaded\n" " (v)ersion (k)ill (D)b-tables (d)istribution\n"); diff --git a/erts/emulator/beam/copy.c b/erts/emulator/beam/copy.c index e567eabc82..264ba89e8b 100644 --- a/erts/emulator/beam/copy.c +++ b/erts/emulator/beam/copy.c @@ -286,11 +286,6 @@ do { \ (dst) = result; \ } while(0) -#define BOXED_VISITED_MASK ((Eterm) 3) -#define BOXED_VISITED ((Eterm) 1) -#define BOXED_SHARED_UNPROCESSED ((Eterm) 2) -#define BOXED_SHARED_PROCESSED ((Eterm) 3) - #define COUNT_OFF_HEAP (0) /* diff --git a/erts/emulator/beam/erl_alloc.types b/erts/emulator/beam/erl_alloc.types index 3d5de72ee7..43f43f9034 100644 --- a/erts/emulator/beam/erl_alloc.types +++ b/erts/emulator/beam/erl_alloc.types @@ -227,6 +227,7 @@ type DB_DMC_ERROR ETS ETS db_dmc_error type DB_DMC_ERR_INFO ETS ETS db_dmc_error_info type DB_TERM ETS ETS db_term type DB_PROC_CLEANUP SHORT_LIVED ETS db_proc_cleanup_state +type ETS_ALL_REQ SHORT_LIVED ETS ets_all_request type INSTR_INFO LONG_LIVED SYSTEM instr_info type LOGGER_DSBUF TEMPORARY SYSTEM logger_dsbuf type TMP_DSBUF TEMPORARY SYSTEM tmp_dsbuf @@ -350,8 +351,9 @@ type SL_MPATHS SHORT_LIVED SYSTEM sl_migration_paths +if hipe -# Currently most hipe code use this type. -type HIPE SYSTEM SYSTEM hipe_data +type HIPE_LL LONG_LIVED SYSTEM hipe_long_lived +type HIPE_SL SHORT_LIVED SYSTEM hipe_short_lived +type HIPE_STK STANDARD SYSTEM hipe_nstack +if exec_alloc type HIPE_EXEC EXEC CODE hipe_code diff --git a/erts/emulator/beam/erl_async.h b/erts/emulator/beam/erl_async.h index 473c7686e5..c884a5040d 100644 --- a/erts/emulator/beam/erl_async.h +++ b/erts/emulator/beam/erl_async.h @@ -27,7 +27,6 @@ extern int erts_async_max_threads; #define ERTS_ASYNC_THREAD_MAX_STACK_SIZE 8192 /* Kilo words */ extern int erts_async_thread_suggested_stack_size; -#ifdef USE_THREADS #ifdef ERTS_SMP /* @@ -47,6 +46,10 @@ extern int erts_async_thread_suggested_stack_size; # define ERTS_USE_ASYNC_READY_Q 0 #endif +#ifndef USE_THREADS +# undef ERTS_USE_ASYNC_READY_Q +# define ERTS_USE_ASYNC_READY_Q 0 +#endif /* !USE_THREADS */ #if ERTS_USE_ASYNC_READY_Q int erts_check_async_ready(void *); int erts_async_ready_clean(void *, void *); @@ -58,10 +61,7 @@ void *erts_get_async_ready_queue(Uint sched_id); #endif #endif /* ERTS_USE_ASYNC_READY_Q */ -#endif /* USE_THREADS */ - void erts_init_async(void); void erts_exit_flush_async(void); - #endif /* ERL_ASYNC_H__ */ diff --git a/erts/emulator/beam/erl_bif_info.c b/erts/emulator/beam/erl_bif_info.c index 06a73ffea5..025f99330a 100644 --- a/erts/emulator/beam/erl_bif_info.c +++ b/erts/emulator/beam/erl_bif_info.c @@ -3634,10 +3634,6 @@ BIF_RETTYPE erts_debug_get_internal_state_1(BIF_ALIST_1) BIF_RET(TUPLE2(hp, make_small((Uint) words), erts_ets_hash_sizeof_ext_segtab())); } - else if (ERTS_IS_ATOM_STR("DbTable_meta", BIF_ARG_1)) { - /* Used by ets_SUITE (stdlib) */ - BIF_RET(erts_ets_get_meta_state(BIF_P)); - } else if (ERTS_IS_ATOM_STR("check_io_debug", BIF_ARG_1)) { /* Used by driver_SUITE (emulator) */ Uint sz, *szp; @@ -4398,10 +4394,6 @@ BIF_RETTYPE erts_debug_set_internal_state_2(BIF_ALIST_2) } BIF_RET(am_ok); } - else if (ERTS_IS_ATOM_STR("DbTable_meta", BIF_ARG_1)) { - /* Used by ets_SUITE (stdlib) */ - BIF_RET(erts_ets_restore_meta_state(BIF_P, BIF_ARG_2)); - } else if (ERTS_IS_ATOM_STR("make", BIF_ARG_1)) { if (ERTS_IS_ATOM_STR("magic_ref", BIF_ARG_2)) { Binary *bin = erts_create_magic_binary(0, empty_magic_ref_destructor); diff --git a/erts/emulator/beam/erl_db.c b/erts/emulator/beam/erl_db.c index 9f077dd407..378328856d 100644 --- a/erts/emulator/beam/erl_db.c +++ b/erts/emulator/beam/erl_db.c @@ -19,9 +19,7 @@ */ /* - * This file contains the bif interface functions and - * the handling of the "meta tables" ie the tables of - * db tables. + * This file contains the 'ets' bif interface functions. */ /* @@ -43,6 +41,7 @@ #include "erl_db.h" #include "bif.h" #include "big.h" +#include "erl_binary.h" erts_smp_atomic_t erts_ets_misc_mem_size; @@ -74,62 +73,226 @@ enum DbIterSafety { #define DID_TRAP(P,Ret) (!is_value(Ret) && ((P)->freason == TRAP)) +/* + * "fixed_tabs": list of all fixed tables for a process + */ +#ifdef DEBUG +static int fixed_tabs_find(DbFixation* first, DbFixation* fix); +#endif -/* -** The main meta table, containing all ets tables. -*/ -#ifdef ERTS_SMP +static void fixed_tabs_insert(Process* p, DbFixation* fix) +{ + DbFixation* first = erts_psd_get(p, ERTS_PSD_ETS_FIXED_TABLES); + + if (!first) { + fix->tabs.next = fix->tabs.prev = fix; + erts_psd_set(p, ERTS_PSD_ETS_FIXED_TABLES, fix); + } + else { + ASSERT(!fixed_tabs_find(first, fix)); + fix->tabs.prev = first->tabs.prev; + fix->tabs.next = first; + fix->tabs.prev->tabs.next = fix; + first->tabs.prev = fix; + } +} + +static void fixed_tabs_delete(Process *p, DbFixation* fix) +{ + if (fix->tabs.next == fix) { + DbFixation* old; + ASSERT(fix->tabs.prev == fix); + old = erts_psd_set(p, ERTS_PSD_ETS_FIXED_TABLES, NULL); + ASSERT(old == fix); (void)old; + } + else { + DbFixation *first = (DbFixation*) erts_psd_get(p, ERTS_PSD_ETS_FIXED_TABLES); -#define ERTS_META_MAIN_TAB_LOCK_TAB_BITS 8 -#define ERTS_META_MAIN_TAB_LOCK_TAB_SIZE (1 << ERTS_META_MAIN_TAB_LOCK_TAB_BITS) -#define ERTS_META_MAIN_TAB_LOCK_TAB_MASK (ERTS_META_MAIN_TAB_LOCK_TAB_SIZE - 1) + ASSERT(fixed_tabs_find(first, fix)); + fix->tabs.prev->tabs.next = fix->tabs.next; + fix->tabs.next->tabs.prev = fix->tabs.prev; -typedef union { - erts_smp_rwmtx_t rwmtx; - byte cache_line_align__[ERTS_ALC_CACHE_LINE_ALIGN_SIZE( - sizeof(erts_smp_rwmtx_t))]; -} erts_meta_main_tab_lock_t; + if (fix == first) + erts_psd_set(p, ERTS_PSD_ETS_FIXED_TABLES, fix->tabs.next); + } +} -static erts_meta_main_tab_lock_t *meta_main_tab_locks; +#ifdef DEBUG +static int fixed_tabs_find(DbFixation* first, DbFixation* fix) +{ + DbFixation* p; + if (!first) { + first = (DbFixation*) erts_psd_get(fix->procs.p, ERTS_PSD_ETS_FIXED_TABLES); + } + p = first; + do { + if (p == fix) + return 1; + ASSERT(p->procs.p == fix->procs.p); + ASSERT(p->tabs.next->tabs.prev == p); + p = p->tabs.next; + } while (p != first); + return 0; +} #endif -static struct { - union { - DbTable *tb; /* Only directly readable if slot is ALIVE */ - UWord next_free; /* (index<<2)|1 if slot is FREE */ - }u; -} *meta_main_tab; -/* A slot in meta_main_tab can have three states: - * FREE : Free to use for new table. Part of linked free-list. - * ALIVE: Contains a table - * DEAD : Contains a table that is being removed. + +/* + * fixing_procs: tree of all processes fixating a table */ -#define IS_SLOT_FREE(i) (meta_main_tab[(i)].u.next_free & 1) -#define IS_SLOT_DEAD(i) (meta_main_tab[(i)].u.next_free & 2) -#define IS_SLOT_ALIVE(i) (!(meta_main_tab[(i)].u.next_free & (1|2))) -#define GET_NEXT_FREE_SLOT(i) (meta_main_tab[(i)].u.next_free >> 2) -#define SET_NEXT_FREE_SLOT(i,next) (meta_main_tab[(i)].u.next_free = ((next)<<2)|1) -#define MARK_SLOT_DEAD(i) (meta_main_tab[(i)].u.next_free |= 2) -#define GET_ANY_SLOT_TAB(i) ((DbTable*)(meta_main_tab[(i)].u.next_free & ~(1|2))) /* dead or alive */ +#define ERTS_RBT_PREFIX fixing_procs +#define ERTS_RBT_T DbFixation +#define ERTS_RBT_KEY_T Process* +#define ERTS_RBT_FLAGS_T int +#define ERTS_RBT_INIT_EMPTY_TNODE(T) \ + do { \ + (T)->procs.parent = NULL; \ + (T)->procs.right = NULL; \ + (T)->procs.left = NULL; \ + } while (0) +#define ERTS_RBT_IS_RED(T) ((T)->procs.is_red) +#define ERTS_RBT_SET_RED(T) ((T)->procs.is_red = 1) +#define ERTS_RBT_IS_BLACK(T) (!(T)->procs.is_red) +#define ERTS_RBT_SET_BLACK(T) ((T)->procs.is_red = 0) +#define ERTS_RBT_GET_FLAGS(T) ((T)->procs.is_red) +#define ERTS_RBT_SET_FLAGS(T, F) ((T)->procs.is_red = (F)) +#define ERTS_RBT_GET_PARENT(T) ((T)->procs.parent) +#define ERTS_RBT_SET_PARENT(T, P) ((T)->procs.parent = (P)) +#define ERTS_RBT_GET_RIGHT(T) ((T)->procs.right) +#define ERTS_RBT_SET_RIGHT(T, R) ((T)->procs.right = (R)) +#define ERTS_RBT_GET_LEFT(T) ((T)->procs.left) +#define ERTS_RBT_SET_LEFT(T, L) ((T)->procs.left = (L)) +#define ERTS_RBT_GET_KEY(T) ((T)->procs.p) +#define ERTS_RBT_IS_LT(KX, KY) ((KX) < (KY)) +#define ERTS_RBT_IS_EQ(KX, KY) ((KX) == (KY)) + +#define ERTS_RBT_WANT_INSERT +#define ERTS_RBT_WANT_LOOKUP +#define ERTS_RBT_WANT_DELETE +#define ERTS_RBT_WANT_FOREACH +#define ERTS_RBT_WANT_FOREACH_DESTROY +#ifdef DEBUG +# define ERTS_RBT_WANT_LOOKUP +#endif +#define ERTS_RBT_UNDEF + +#include "erl_rbtree.h" + +#ifdef HARDDEBUG +# error Do something useful with CHECK_TABLES maybe +#else +# define CHECK_TABLES() +#endif + -static ERTS_INLINE erts_smp_rwmtx_t * -get_meta_main_tab_lock(unsigned slot) +static void +send_ets_transfer_message(Process *c_p, Process *proc, + ErtsProcLocks *locks, + DbTable *tb, Eterm heir_data); +static void schedule_free_dbtable(DbTable* tb); +static void delete_sched_table(Process *c_p, DbTable *tb); + +static void table_dec_refc(DbTable *tb, erts_aint_t min_val) +{ + if (erts_smp_refc_dectest(&tb->common.refc, min_val) == 0) + schedule_free_dbtable(tb); +} + +static int +db_table_tid_destructor(Binary *unused) +{ + return 1; +} + +static ERTS_INLINE void +make_btid(DbTable *tb) +{ + Binary *btid = erts_create_magic_indirection(db_table_tid_destructor); + erts_smp_atomic_t *tbref = erts_smp_binary_to_magic_indirection(btid); + erts_smp_atomic_init_nob(tbref, (erts_aint_t) tb); + tb->common.btid = btid; + /* + * Table and magic indirection refer eachother, + * and table is refered once by being alive... + */ + erts_smp_refc_init(&tb->common.refc, 2); + erts_refc_inc(&btid->refc, 1); +} + +static ERTS_INLINE DbTable* btid2tab(Binary* btid) +{ + erts_smp_atomic_t *tbref = erts_smp_binary_to_magic_indirection(btid); + return (DbTable *) erts_smp_atomic_read_nob(tbref); +} + +static DbTable * +tid2tab(Eterm tid) +{ + DbTable *tb; + Binary *btid; + erts_smp_atomic_t *tbref; + if (!is_internal_magic_ref(tid)) + return NULL; + + btid = erts_magic_ref2bin(tid); + if (ERTS_MAGIC_BIN_DESTRUCTOR(btid) != db_table_tid_destructor) + return NULL; + + tbref = erts_smp_binary_to_magic_indirection(btid); + tb = (DbTable *) erts_smp_atomic_read_nob(tbref); + + ASSERT(!tb || tb->common.btid == btid); + + return tb; +} + +static ERTS_INLINE int +is_table_alive(DbTable *tb) +{ + erts_smp_atomic_t *tbref; + DbTable *rtb; + + tbref = erts_smp_binary_to_magic_indirection(tb->common.btid); + rtb = (DbTable *) erts_smp_atomic_read_nob(tbref); + + ASSERT(!rtb || rtb == tb); + + return !!rtb; +} + +static ERTS_INLINE int +is_table_named(DbTable *tb) { #ifdef ERTS_SMP - return &meta_main_tab_locks[slot & ERTS_META_MAIN_TAB_LOCK_TAB_MASK].rwmtx; + return tb->common.type & DB_NAMED_TABLE; #else - return NULL; + return tb->common.status & DB_NAMED_TABLE; #endif } -static erts_smp_spinlock_t meta_main_tab_main_lock; -static Uint meta_main_tab_first_free; /* Index of first free slot */ -static int meta_main_tab_cnt; /* Number of active tables */ -static int meta_main_tab_top; /* Highest ever used slot + 1 */ -static Uint meta_main_tab_slot_mask; /* The slot index part of an unnamed table id */ -static Uint meta_main_tab_seq_incr; -static Uint meta_main_tab_seq_cnt = 0; /* To give unique(-ish) table identifiers */ + +static ERTS_INLINE void +tid_clear(Process *c_p, DbTable *tb) +{ + DbTable *rtb; + Binary *btid = tb->common.btid; + erts_smp_atomic_t *tbref = erts_smp_binary_to_magic_indirection(btid); + rtb = (DbTable *) erts_smp_atomic_xchg_nob(tbref, (erts_aint_t) NULL); + ASSERT(!rtb || tb == rtb); + if (rtb) { + table_dec_refc(tb, 1); + delete_sched_table(c_p, tb); + } +} + +static ERTS_INLINE Eterm +make_tid(Process *c_p, DbTable *tb) +{ + Eterm *hp = HAlloc(c_p, ERTS_MAGIC_REF_THING_SIZE); + return erts_mk_magic_ref(&hp, &c_p->off_heap, tb->common.btid); +} + /* ** The meta hash table of all NAMED ets tables @@ -181,8 +344,6 @@ int user_requested_db_max_tabs; int erts_ets_realloc_always_moves; int erts_ets_always_compress; static int db_max_tabs; -static DbTable *meta_pid_to_tab; /* Pid mapped to owned tables */ -static DbTable *meta_pid_to_fixed_tab; /* Pid mapped to fixed tables */ static Eterm ms_delete_all; static Eterm ms_delete_all_buff[8]; /* To compare with for deletion of all objects */ @@ -195,15 +356,13 @@ static void fix_table_locked(Process* p, DbTable* tb); static void unfix_table_locked(Process* p, DbTable* tb, db_lock_kind_t* kind); static void set_heir(Process* me, DbTable* tb, Eterm heir, UWord heir_data); static void free_heir_data(DbTable*); -static void free_fixations_locked(DbTable *tb); +static SWord free_fixations_locked(Process* p, DbTable *tb); -static int free_table_cont(Process *p, - DbTable *tb, - int first, - int clean_meta_tab); +static SWord free_table_continue(Process *p, DbTable *tb, SWord reds); static void print_table(fmtfn_t to, void *to_arg, int show, DbTable* tb); static BIF_RETTYPE ets_select_delete_1(BIF_ALIST_1); static BIF_RETTYPE ets_select_count_1(BIF_ALIST_1); +static BIF_RETTYPE ets_select_replace_1(BIF_ALIST_1); static BIF_RETTYPE ets_select_trap_1(BIF_ALIST_1); static BIF_RETTYPE ets_delete_trap(BIF_ALIST_1); static Eterm table_info(Process* p, DbTable* tb, Eterm What); @@ -218,6 +377,7 @@ static BIF_RETTYPE ets_select3(Process* p, Eterm arg1, Eterm arg2, Eterm arg3); */ Export ets_select_delete_continue_exp; Export ets_select_count_continue_exp; +Export ets_select_replace_continue_exp; Export ets_select_continue_exp; /* @@ -235,23 +395,16 @@ free_dbtable(void *vtb) erts_smp_atomic_read_nob(&tb->common.memory_size)-sizeof(DbTable), tb->common.fixations); } - erts_fprintf(stderr, "ets: free_dbtable(%T) deleted!!!\r\n", - tb->common.id); - - erts_fprintf(stderr, "ets: free_dbtable: meta_pid_to_tab common.memory_size = %ld\n", - erts_smp_atomic_read_nob(&meta_pid_to_tab->common.memory_size)); - print_table(ERTS_PRINT_STDOUT, NULL, 1, meta_pid_to_tab); - - - erts_fprintf(stderr, "ets: free_dbtable: meta_pid_to_fixed_tab common.memory_size = %ld\n", - erts_smp_atomic_read_nob(&meta_pid_to_fixed_tab->common.memory_size)); - print_table(ERTS_PRINT_STDOUT, NULL, 1, meta_pid_to_fixed_tab); #endif #ifdef ERTS_SMP erts_smp_rwmtx_destroy(&tb->common.rwlock); erts_smp_mtx_destroy(&tb->common.fixlock); #endif ASSERT(is_immed(tb->common.heir_data)); + + if (tb->common.btid && erts_refc_dectest(&tb->common.btid->refc, 0) == 0) + erts_bin_free(tb->common.btid); + erts_db_free(ERTS_ALC_T_DB_TABLE, tb, (void *) tb, sizeof(DbTable)); } @@ -266,13 +419,163 @@ static void schedule_free_dbtable(DbTable* tb) * Caller is *not* allowed to access the specialized part * (hash or tree) of *tb after this function has returned. */ - ASSERT(erts_smp_refc_read(&tb->common.ref, 0) == 0); + ASSERT(erts_smp_refc_read(&tb->common.refc, 0) == 0); + ASSERT(erts_smp_refc_read(&tb->common.fix_count, 0) == 0); erts_schedule_thr_prgr_later_cleanup_op(free_dbtable, (void *) tb, &tb->release.data, sizeof(DbTable)); } +static ERTS_INLINE void +save_sched_table(Process *c_p, DbTable *tb) +{ + ErtsSchedulerData *esdp = erts_proc_sched_data(c_p); + DbTable *first; + + ASSERT(esdp); + esdp->ets_tables.count++; + erts_smp_refc_inc(&tb->common.refc, 1); + + first = esdp->ets_tables.clist; + if (!first) { + tb->common.all.next = tb->common.all.prev = tb; + esdp->ets_tables.clist = tb; + } + else { + tb->common.all.prev = first->common.all.prev; + tb->common.all.next = first; + tb->common.all.prev->common.all.next = tb; + first->common.all.prev = tb; + } +} + +static ERTS_INLINE void +remove_sched_table(ErtsSchedulerData *esdp, DbTable *tb) +{ + ErtsEtsAllYieldData *eaydp; + ASSERT(esdp); + ASSERT(erts_get_ref_numbers_thr_id(ERTS_MAGIC_BIN_REFN(tb->common.btid)) + == (Uint32) esdp->no); + + ASSERT(esdp->ets_tables.count > 0); + esdp->ets_tables.count--; + + eaydp = ERTS_SCHED_AUX_YIELD_DATA(esdp, ets_all); + if (eaydp->ongoing) { + /* ets:all() op process list from last to first... */ + if (eaydp->tab == tb) { + if (eaydp->tab == esdp->ets_tables.clist) + eaydp->tab = NULL; + else + eaydp->tab = tb->common.all.prev; + } + } + + if (tb->common.all.next == tb) { + ASSERT(tb->common.all.prev == tb); + ASSERT(esdp->ets_tables.clist == tb); + esdp->ets_tables.clist = NULL; + } + else { +#ifdef DEBUG + DbTable *tmp = esdp->ets_tables.clist; + do { + if (tmp == tb) break; + tmp = tmp->common.all.next; + } while (tmp != esdp->ets_tables.clist); + ASSERT(tmp == tb); +#endif + tb->common.all.prev->common.all.next = tb->common.all.next; + tb->common.all.next->common.all.prev = tb->common.all.prev; + + if (esdp->ets_tables.clist == tb) + esdp->ets_tables.clist = tb->common.all.next; + + } + + table_dec_refc(tb, 0); +} + +static void +scheduled_remove_sched_table(void *vtb) +{ + remove_sched_table(erts_get_scheduler_data(), (DbTable *) vtb); +} + +static void +delete_sched_table(Process *c_p, DbTable *tb) +{ + ErtsSchedulerData *esdp = erts_proc_sched_data(c_p); + Uint32 sid; + + ASSERT(esdp); + + ASSERT(tb->common.btid); + sid = erts_get_ref_numbers_thr_id(ERTS_MAGIC_BIN_REFN(tb->common.btid)); + ASSERT(1 <= sid && sid <= erts_no_schedulers); + if (sid == (Uint32) esdp->no) + remove_sched_table(esdp, tb); + else + erts_schedule_misc_aux_work((int) sid, scheduled_remove_sched_table, tb); +} + +static ERTS_INLINE void +save_owned_table(Process *c_p, DbTable *tb) +{ + DbTable *first; + + erts_smp_proc_lock(c_p, ERTS_PROC_LOCK_STATUS); + + first = (DbTable*) erts_psd_get(c_p, ERTS_PSD_ETS_OWNED_TABLES); + + erts_smp_refc_inc(&tb->common.refc, 1); + + if (!first) { + tb->common.owned.next = tb->common.owned.prev = tb; + erts_psd_set(c_p, ERTS_PSD_ETS_OWNED_TABLES, tb); + } + else { + tb->common.owned.prev = first->common.owned.prev; + tb->common.owned.next = first; + tb->common.owned.prev->common.owned.next = tb; + first->common.owned.prev = tb; + } + erts_smp_proc_unlock(c_p, ERTS_PROC_LOCK_STATUS); +} + +static ERTS_INLINE void +delete_owned_table(Process *p, DbTable *tb) +{ + erts_smp_proc_lock(p, ERTS_PROC_LOCK_STATUS); + if (tb->common.owned.next == tb) { + DbTable* old; + ASSERT(tb->common.owned.prev == tb); + old = erts_psd_set(p, ERTS_PSD_ETS_OWNED_TABLES, NULL); + ASSERT(old == tb); (void)old; + } + else { + DbTable *first = (DbTable*) erts_psd_get(p, ERTS_PSD_ETS_OWNED_TABLES); +#ifdef DEBUG + DbTable *tmp = first; + do { + if (tmp == tb) break; + tmp = tmp->common.owned.next; + } while (tmp != first); + ASSERT(tmp == tb); +#endif + tb->common.owned.prev->common.owned.next = tb->common.owned.next; + tb->common.owned.next->common.owned.prev = tb->common.owned.prev; + + if (tb == first) + erts_psd_set(p, ERTS_PSD_ETS_OWNED_TABLES, tb->common.owned.next); + } + erts_smp_proc_unlock(p, ERTS_PROC_LOCK_STATUS); + + table_dec_refc(tb, 1); +} + + static ERTS_INLINE void db_init_lock(DbTable* tb, int use_frequent_read_lock, char *rwname, char* fixname) { @@ -294,7 +597,6 @@ static ERTS_INLINE void db_init_lock(DbTable* tb, int use_frequent_read_lock, static ERTS_INLINE void db_lock(DbTable* tb, db_lock_kind_t kind) { #ifdef ERTS_SMP - ASSERT(tb != meta_pid_to_tab && tb != meta_pid_to_fixed_tab); if (tb->common.type & DB_FINE_LOCKED) { if (kind == LCK_WRITE) { erts_smp_rwmtx_rwlock(&tb->common.rwlock); @@ -327,8 +629,6 @@ static ERTS_INLINE void db_unlock(DbTable* tb, db_lock_kind_t kind) * to follow the tb pointer! */ #ifdef ERTS_SMP - ASSERT(tb != meta_pid_to_tab && tb != meta_pid_to_fixed_tab); - if (tb->common.type & DB_FINE_LOCKED) { if (kind == LCK_WRITE) { ASSERT(tb->common.is_thread_safe); @@ -354,20 +654,6 @@ static ERTS_INLINE void db_unlock(DbTable* tb, db_lock_kind_t kind) #endif } - -static ERTS_INLINE void db_meta_lock(DbTable* tb, db_lock_kind_t kind) -{ - ASSERT(tb == meta_pid_to_tab || tb == meta_pid_to_fixed_tab); - ASSERT(kind != LCK_WRITE); - /* As long as we only lock for READ we don't have to lock at all. */ -} - -static ERTS_INLINE void db_meta_unlock(DbTable* tb, db_lock_kind_t kind) -{ - ASSERT(tb == meta_pid_to_tab || tb == meta_pid_to_fixed_tab); - ASSERT(kind != LCK_WRITE); -} - static ERTS_INLINE DbTable* db_get_table_aux(Process *p, Eterm id, @@ -375,7 +661,7 @@ DbTable* db_get_table_aux(Process *p, db_lock_kind_t kind, int meta_already_locked) { - DbTable *tb = NULL; + DbTable *tb; erts_smp_rwmtx_t *mtl = NULL; /* @@ -385,23 +671,7 @@ DbTable* db_get_table_aux(Process *p, */ ASSERT(erts_get_scheduler_data()); - if (is_small(id)) { - Uint slot = unsigned_val(id) & meta_main_tab_slot_mask; - if (!meta_already_locked) { - mtl = get_meta_main_tab_lock(slot); - erts_smp_rwmtx_rlock(mtl); - } -#if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_CHECK) - else { - erts_smp_rwmtx_t *test_mtl = get_meta_main_tab_lock(slot); - ERTS_SMP_LC_ASSERT(erts_lc_rwmtx_is_rlocked(test_mtl) - || erts_lc_rwmtx_is_rwlocked(test_mtl)); - } -#endif - if (slot < db_max_tabs && IS_SLOT_ALIVE(slot)) - tb = meta_main_tab[slot].u.tb; - } - else if (is_atom(id)) { + if (is_atom(id)) { struct meta_name_tab_entry* bucket = meta_name_tab_bucket(id,&mtl); if (!meta_already_locked) erts_smp_rwmtx_rlock(mtl); @@ -410,7 +680,7 @@ DbTable* db_get_table_aux(Process *p, || erts_lc_rwmtx_is_rwlocked(mtl)); mtl = NULL; } - + tb = NULL; if (bucket->pu.tb != NULL) { if (is_atom(bucket->u.name_atom)) { /* single */ if (bucket->u.name_atom == id) @@ -428,11 +698,13 @@ DbTable* db_get_table_aux(Process *p, } } } + else + tb = tid2tab(id); + if (tb) { db_lock(tb, kind); - if (tb->common.id != id - || ((tb->common.status & what) == 0 - && p->common.id != tb->common.owner)) { + if ((tb->common.status & what) == 0 + && p->common.id != tb->common.owner) { db_unlock(tb, kind); tb = NULL; } @@ -451,18 +723,6 @@ DbTable* db_get_table(Process *p, return db_get_table_aux(p, id, what, kind, 0); } -/* Requires meta_main_tab_locks[slot] locked. -*/ -static ERTS_INLINE void free_slot(int slot) -{ - ASSERT(!IS_SLOT_FREE(slot)); - erts_smp_spin_lock(&meta_main_tab_main_lock); - SET_NEXT_FREE_SLOT(slot,meta_main_tab_first_free); - meta_main_tab_first_free = slot; - meta_main_tab_cnt--; - erts_smp_spin_unlock(&meta_main_tab_main_lock); -} - static int insert_named_tab(Eterm name_atom, DbTable* tb, int have_lock) { int ret = 0; @@ -527,9 +787,10 @@ static int remove_named_tab(DbTable *tb, int have_lock) { int ret = 0; erts_smp_rwmtx_t* rwlock; - Eterm name_atom = tb->common.id; + Eterm name_atom = tb->common.the_name; struct meta_name_tab_entry* bucket = meta_name_tab_bucket(name_atom, &rwlock); + ASSERT(is_table_named(tb)); #ifdef ERTS_SMP if (!have_lock && erts_smp_rwmtx_tryrwlock(rwlock) == EBUSY) { db_unlock(tb, LCK_WRITE); @@ -600,11 +861,11 @@ done: */ static ERTS_INLINE void local_fix_table(DbTable* tb) { - erts_smp_refc_inc(&tb->common.ref, 1); + erts_smp_refc_inc(&tb->common.fix_count, 1); } static ERTS_INLINE void local_unfix_table(DbTable* tb) { - if (erts_smp_refc_dectest(&tb->common.ref, 0) == 0) { + if (erts_smp_refc_dectest(&tb->common.fix_count, 0) == 0) { ASSERT(IS_HASH_TABLE(tb->common.status)); db_unfix_table_hash(&(tb->hash)); } @@ -1244,6 +1505,7 @@ BIF_RETTYPE ets_rename_2(BIF_ALIST_2) { DbTable* tb; Eterm ret; + Eterm old_name; erts_smp_rwmtx_t *lck1, *lck2; #ifdef HARDDEBUG @@ -1260,12 +1522,10 @@ BIF_RETTYPE ets_rename_2(BIF_ALIST_2) (void) meta_name_tab_bucket(BIF_ARG_2, &lck1); - if (is_small(BIF_ARG_1)) { - Uint slot = unsigned_val(BIF_ARG_1) & meta_main_tab_slot_mask; - lck2 = get_meta_main_tab_lock(slot); - } - else if (is_atom(BIF_ARG_1)) { - (void) meta_name_tab_bucket(BIF_ARG_1, &lck2); + if (is_atom(BIF_ARG_1)) { + old_name = BIF_ARG_1; + named_tab: + (void) meta_name_tab_bucket(old_name, &lck2); if (lck1 == lck2) lck2 = NULL; else if (lck1 > lck2) { @@ -1275,7 +1535,16 @@ BIF_RETTYPE ets_rename_2(BIF_ALIST_2) } } else { - BIF_ERROR(BIF_P, BADARG); + tb = tid2tab(BIF_ARG_1); + if (!tb) + BIF_ERROR(BIF_P, BADARG); + else { + if (is_table_named(tb)) { + old_name = tb->common.the_name; + goto named_tab; + } + lck2 = NULL; + } } erts_smp_rwmtx_rwlock(lck1); @@ -1286,21 +1555,19 @@ BIF_RETTYPE ets_rename_2(BIF_ALIST_2) if (!tb) goto badarg; - if (is_not_atom(tb->common.id)) { /* Not a named table */ - tb->common.the_name = BIF_ARG_2; - goto done; - } + if (is_table_named(tb)) { + if (!insert_named_tab(BIF_ARG_2, tb, 1)) + goto badarg; - if (!insert_named_tab(BIF_ARG_2, tb, 1)) - goto badarg; - - if (!remove_named_tab(tb, 1)) - erts_exit(ERTS_ERROR_EXIT,"Could not find named tab %s", tb->common.id); - - tb->common.id = tb->common.the_name = BIF_ARG_2; + if (!remove_named_tab(tb, 1)) + erts_exit(ERTS_ERROR_EXIT,"Could not find named tab %s", tb->common.the_name); + ret = BIF_ARG_2; + } + else { /* Not a named table */ + ret = BIF_ARG_1; + } + tb->common.the_name = BIF_ARG_2; - done: - ret = tb->common.id; db_unlock(tb, LCK_WRITE); erts_smp_rwmtx_rwunlock(lck1); if (lck2) @@ -1324,7 +1591,6 @@ BIF_RETTYPE ets_rename_2(BIF_ALIST_2) BIF_RETTYPE ets_new_2(BIF_ALIST_2) { DbTable* tb = NULL; - int slot; Eterm list; Eterm val; Eterm ret; @@ -1339,9 +1605,7 @@ BIF_RETTYPE ets_new_2(BIF_ALIST_2) #ifdef DEBUG int cret; #endif - DeclareTmpHeap(meta_tuple,3,BIF_P); DbTableMethod* meth; - erts_smp_rwmtx_t *mmtl; if (is_not_atom(BIF_ARG_1)) { BIF_ERROR(BIF_P, BADARG); @@ -1350,7 +1614,7 @@ BIF_RETTYPE ets_new_2(BIF_ALIST_2) BIF_ERROR(BIF_P, BADARG); } - status = DB_NORMAL | DB_SET | DB_PROTECTED; + status = DB_SET | DB_PROTECTED; keypos = 1; is_named = 0; #ifdef ERTS_SMP @@ -1433,6 +1697,7 @@ BIF_RETTYPE ets_new_2(BIF_ALIST_2) } else if (val == am_named_table) { is_named = 1; + status |= DB_NAMED_TABLE; } else if (val == am_compressed) { is_compressed = 1; @@ -1487,7 +1752,7 @@ BIF_RETTYPE ets_new_2(BIF_ALIST_2) tb->common.type = status & ERTS_ETS_TABLE_TYPES; /* Note, 'type' is *read only* from now on... */ #endif - erts_smp_refc_init(&tb->common.ref, 0); + erts_smp_refc_init(&tb->common.fix_count, 0); db_init_lock(tb, status & (DB_FINE_LOCKED|DB_FREQ_READ), "db_tab", "db_tab_fix"); tb->common.keypos = keypos; @@ -1496,7 +1761,7 @@ BIF_RETTYPE ets_new_2(BIF_ALIST_2) erts_smp_atomic_init_nob(&tb->common.nitems, 0); - tb->common.fixations = NULL; + tb->common.fixing_procs = NULL; tb->common.compress = is_compressed; #ifdef DEBUG @@ -1505,87 +1770,36 @@ BIF_RETTYPE ets_new_2(BIF_ALIST_2) meth->db_create(BIF_P, tb); ASSERT(cret == DB_ERROR_NONE); - erts_smp_spin_lock(&meta_main_tab_main_lock); - - if (meta_main_tab_cnt >= db_max_tabs) { - erts_smp_spin_unlock(&meta_main_tab_main_lock); - erts_send_error_to_logger_str(BIF_P->group_leader, - "** Too many db tables **\n"); - free_heir_data(tb); - tb->common.meth->db_free_table(tb); - free_dbtable((void *) tb); - BIF_ERROR(BIF_P, SYSTEM_LIMIT); - } - - slot = meta_main_tab_first_free; - ASSERT(slot>=0 && slot<db_max_tabs); - meta_main_tab_first_free = GET_NEXT_FREE_SLOT(slot); - meta_main_tab_cnt++; - if (slot >= meta_main_tab_top) { - ASSERT(slot == meta_main_tab_top); - meta_main_tab_top = slot + 1; - } - - if (is_named) { - ret = BIF_ARG_1; - } - else { - ret = make_small(slot | meta_main_tab_seq_cnt); - meta_main_tab_seq_cnt += meta_main_tab_seq_incr; - ASSERT((unsigned_val(ret) & meta_main_tab_slot_mask) == slot); - } - erts_smp_spin_unlock(&meta_main_tab_main_lock); + make_btid(tb); - tb->common.id = ret; - tb->common.slot = slot; /* store slot for erase */ + if (is_named) + ret = BIF_ARG_1; + else + ret = make_tid(BIF_P, tb); - mmtl = get_meta_main_tab_lock(slot); - erts_smp_rwmtx_rwlock(mmtl); - meta_main_tab[slot].u.tb = tb; - ASSERT(IS_SLOT_ALIVE(slot)); - erts_smp_rwmtx_rwunlock(mmtl); + save_sched_table(BIF_P, tb); if (is_named && !insert_named_tab(BIF_ARG_1, tb, 0)) { - mmtl = get_meta_main_tab_lock(slot); - erts_smp_rwmtx_rwlock(mmtl); - free_slot(slot); - erts_smp_rwmtx_rwunlock(mmtl); + tid_clear(BIF_P, tb); db_lock(tb,LCK_WRITE); free_heir_data(tb); tb->common.meth->db_free_table(tb); - schedule_free_dbtable(tb); db_unlock(tb,LCK_WRITE); + table_dec_refc(tb, 0); BIF_ERROR(BIF_P, BADARG); } BIF_P->flags |= F_USING_DB; /* So we can remove tb if p dies */ + save_owned_table(BIF_P, tb); #ifdef HARDDEBUG erts_fprintf(stderr, "ets:new(%T,%T)=%T; Process: %T, initial: %T:%T/%bpu\n", BIF_ARG_1, BIF_ARG_2, ret, BIF_P->common.id, BIF_P->u.initial[0], BIF_P->u.initial[1], BIF_P->u.initial[2]); - erts_fprintf(stderr, "ets: new: meta_pid_to_tab common.memory_size = %ld\n", - erts_smp_atomic_read_nob(&meta_pid_to_tab->common.memory_size)); - erts_fprintf(stderr, "ets: new: meta_pid_to_fixed_tab common.memory_size = %ld\n", - erts_smp_atomic_read_nob(&meta_pid_to_fixed_tab->common.memory_size)); #endif - UseTmpHeap(3,BIF_P); - - db_meta_lock(meta_pid_to_tab, LCK_WRITE_REC); - if (db_put_hash(meta_pid_to_tab, - TUPLE2(meta_tuple, - BIF_P->common.id, - make_small(slot)), - 0) != DB_ERROR_NONE) { - erts_exit(ERTS_ERROR_EXIT,"Could not update ets metadata."); - } - db_meta_unlock(meta_pid_to_tab, LCK_WRITE_REC); - - UnUseTmpHeap(3,BIF_P); - BIF_RET(ret); } @@ -1690,9 +1904,9 @@ BIF_RETTYPE ets_lookup_element_3(BIF_ALIST_3) */ BIF_RETTYPE ets_delete_1(BIF_ALIST_1) { - int trap; + SWord initial_reds = ERTS_BIF_REDS_LEFT(BIF_P); + SWord reds = initial_reds; DbTable* tb; - erts_smp_rwmtx_t *mmtl; #ifdef HARDDEBUG erts_fprintf(stderr, @@ -1715,7 +1929,6 @@ BIF_RETTYPE ets_delete_1(BIF_ALIST_1) tb->common.status |= DB_DELETE; if (tb->common.owner != BIF_P->common.id) { - DeclareTmpHeap(meta_tuple,3,BIF_P); /* * The table is being deleted by a process other than its owner. @@ -1723,50 +1936,33 @@ BIF_RETTYPE ets_delete_1(BIF_ALIST_1) * current process will be killed (e.g. by an EXIT signal), we will * now transfer the ownership to the current process. */ - UseTmpHeap(3,BIF_P); - db_meta_lock(meta_pid_to_tab, LCK_WRITE_REC); - db_erase_bag_exact2(meta_pid_to_tab, tb->common.owner, - make_small(tb->common.slot)); - - BIF_P->flags |= F_USING_DB; - tb->common.owner = BIF_P->common.id; - - db_put_hash(meta_pid_to_tab, - TUPLE2(meta_tuple, - BIF_P->common.id, - make_small(tb->common.slot)), - 0); - db_meta_unlock(meta_pid_to_tab, LCK_WRITE_REC); - UnUseTmpHeap(3,BIF_P); - } - mmtl = get_meta_main_tab_lock(tb->common.slot); -#ifdef ERTS_SMP - if (erts_smp_rwmtx_tryrwlock(mmtl) == EBUSY) { - /* - * We keep our increased refc over this op in order to - * prevent the table from disapearing. - */ - db_unlock(tb, LCK_WRITE); - erts_smp_rwmtx_rwlock(mmtl); - db_lock(tb, LCK_WRITE); + Process *rp = erts_proc_lookup_raw(tb->common.owner); + /* + * Process 'rp' might be exiting, but our table lock prevents it + * from terminating as it cannot complete erts_db_process_exiting(). + */ + ASSERT(!(ERTS_PSFLG_FREE & erts_smp_atomic32_read_nob(&rp->state))); + + delete_owned_table(rp, tb); + BIF_P->flags |= F_USING_DB; + tb->common.owner = BIF_P->common.id; + save_owned_table(BIF_P, tb); } -#endif - /* We must keep the slot, to be found by db_proc_dead() if process dies */ - MARK_SLOT_DEAD(tb->common.slot); - erts_smp_rwmtx_rwunlock(mmtl); - if (is_atom(tb->common.id)) + + tid_clear(BIF_P, tb); + + if (is_table_named(tb)) remove_named_tab(tb, 0); /* disable inheritance */ free_heir_data(tb); tb->common.heir = am_none; - free_fixations_locked(tb); - - trap = free_table_cont(BIF_P, tb, 1, 1); + reds -= free_fixations_locked(BIF_P, tb); db_unlock(tb, LCK_WRITE); - if (trap) { + + if (free_table_continue(BIF_P, tb, reds) < 0) { /* * Package the DbTable* pointer into a bignum so that it can be safely * passed through a trap. We used to pass the DbTable* pointer directly @@ -1776,9 +1972,11 @@ BIF_RETTYPE ets_delete_1(BIF_ALIST_1) Eterm *hp = HAlloc(BIF_P, 2); hp[0] = make_pos_bignum_header(1); hp[1] = (Eterm) tb; + BUMP_ALL_REDS(BIF_P); BIF_TRAP1(&ets_delete_continue_exp, BIF_P, make_big(hp)); } else { + BUMP_REDS(BIF_P, (initial_reds - reds)); BIF_RET(am_true); } } @@ -1790,7 +1988,6 @@ BIF_RETTYPE ets_give_away_3(BIF_ALIST_3) { Process* to_proc = NULL; ErtsProcLocks to_locks = ERTS_PROC_LOCK_MAIN; - DeclareTmpHeap(buf,5,BIF_P); Eterm to_pid = BIF_ARG_2; Eterm from_pid; DbTable* tb = NULL; @@ -1812,26 +2009,14 @@ BIF_RETTYPE ets_give_away_3(BIF_ALIST_3) goto badarg; /* or should we be idempotent? return false maybe */ } - UseTmpHeap(5,BIF_P); - db_meta_lock(meta_pid_to_tab, LCK_WRITE_REC); - db_erase_bag_exact2(meta_pid_to_tab, tb->common.owner, - make_small(tb->common.slot)); - + delete_owned_table(BIF_P, tb); to_proc->flags |= F_USING_DB; tb->common.owner = to_pid; - - db_put_hash(meta_pid_to_tab, - TUPLE2(buf,to_pid,make_small(tb->common.slot)), - 0); - db_meta_unlock(meta_pid_to_tab, LCK_WRITE_REC); + save_owned_table(to_proc, tb); db_unlock(tb,LCK_WRITE); - erts_send_message(BIF_P, to_proc, &to_locks, - TUPLE4(buf, am_ETS_TRANSFER, - tb->common.id, - from_pid, - BIF_ARG_3), - 0); + send_ets_transfer_message(BIF_P, to_proc, &to_locks, + tb, BIF_ARG_3); erts_smp_proc_unlock(to_proc, to_locks); UnUseTmpHeap(5,BIF_P); BIF_RET(am_true); @@ -2074,7 +2259,7 @@ BIF_RETTYPE ets_select_delete_2(BIF_ALIST_2) if (safety == ITER_UNSAFE) { local_fix_table(tb); } - cret = tb->common.meth->db_select_delete(BIF_P, tb, BIF_ARG_2, &ret); + cret = tb->common.meth->db_select_delete(BIF_P, tb, BIF_ARG_1, BIF_ARG_2, &ret); if (DID_TRAP(BIF_P,ret) && safety != ITER_SAFE) { fix_table_locked(BIF_P,tb); @@ -2101,46 +2286,254 @@ BIF_RETTYPE ets_select_delete_2(BIF_ALIST_2) return result; } -/* -** Return a list of tables on this node -*/ -BIF_RETTYPE ets_all_0(BIF_ALIST_0) +/* + * ets:all/0 + * + * ets:all() calls ets:internal_request_all/0 which + * requests information about all tables from + * each scheduler thread. Each scheduler replies + * to the calling process with information about + * existing tables created on that specific scheduler. + */ + +struct ErtsEtsAllReq_ { + erts_smp_atomic32_t refc; + Process *proc; + ErtsOIRefStorage ref; + ErtsEtsAllReqList list[1]; /* one per scheduler */ +}; + +#define ERTS_ETS_ALL_REQ_SIZE \ + (sizeof(ErtsEtsAllReq) \ + + (sizeof(ErtsEtsAllReqList) \ + * (erts_no_schedulers - 1))) + +typedef struct { + ErtsEtsAllReq *ongoing; + ErlHeapFragment *hfrag; + DbTable *tab; + ErtsEtsAllReq *queue; +} ErtsEtsAllData; + +/* Tables handled before yielding */ +#define ERTS_ETS_ALL_TB_YCNT 200 +/* + * Min yield count required before starting + * an operation that will require yield. + */ +#define ERTS_ETS_ALL_TB_YCNT_START 10 + +#ifdef DEBUG +/* Test yielding... */ +#undef ERTS_ETS_ALL_TB_YCNT +#undef ERTS_ETS_ALL_TB_YCNT_START +#define ERTS_ETS_ALL_TB_YCNT 10 +#define ERTS_ETS_ALL_TB_YCNT_START 1 +#endif + +static int +ets_all_reply(ErtsSchedulerData *esdp, ErtsEtsAllReq **reqpp, + ErlHeapFragment **hfragpp, DbTable **tablepp, + int *yield_count_p) { - DbTable* tb; - Eterm previous; - int i; - Eterm* hp; - Eterm* hendp; - int t_tabs_cnt; - int t_top; - - erts_smp_spin_lock(&meta_main_tab_main_lock); - t_tabs_cnt = meta_main_tab_cnt; - t_top = meta_main_tab_top; - erts_smp_spin_unlock(&meta_main_tab_main_lock); - - hp = HAlloc(BIF_P, 2*t_tabs_cnt); - hendp = hp + 2*t_tabs_cnt; - - previous = NIL; - for(i = 0; i < t_top; i++) { - erts_smp_rwmtx_t *mmtl = get_meta_main_tab_lock(i); - erts_smp_rwmtx_rlock(mmtl); - if (IS_SLOT_ALIVE(i)) { - if (hp == hendp) { - /* Racing table creator, grab some more heap space */ - t_tabs_cnt = 10; - hp = HAlloc(BIF_P, 2*t_tabs_cnt); - hendp = hp + 2*t_tabs_cnt; - } - tb = meta_main_tab[i].u.tb; - previous = CONS(hp, tb->common.id, previous); - hp += 2; - } - erts_smp_rwmtx_runlock(mmtl); + ErtsEtsAllReq *reqp = *reqpp; + ErlHeapFragment *hfragp = *hfragpp; + int ycount = *yield_count_p; + DbTable *tb, *first; + Uint sz; + Eterm list, msg, ref, *hp; + ErlOffHeap *ohp; + ErtsMessage *mp; + + /* + * - save_sched_table() inserts at end of circular list. + * + * - This function scans from the end so we know that + * the amount of tables to scan wont grow even if we + * yield. + * + * - remove_sched_table() updates the table we yielded + * on if it removes it. + */ + + if (hfragp) { + /* Restart of a yielded operation... */ + ASSERT(hfragp->used_size < hfragp->alloc_size); + ohp = &hfragp->off_heap; + hp = &hfragp->mem[hfragp->used_size]; + list = *hp; + hfragp->used_size = hfragp->alloc_size; + first = esdp->ets_tables.clist; + tb = *tablepp; + } + else { + /* A new operation... */ + ASSERT(!*tablepp); + + /* Max heap size needed... */ + sz = esdp->ets_tables.count; + sz *= ERTS_MAGIC_REF_THING_SIZE + 2; + sz += 3 + ERTS_REF_THING_SIZE; + hfragp = new_message_buffer(sz); + + hp = &hfragp->mem[0]; + ohp = &hfragp->off_heap; + list = NIL; + first = esdp->ets_tables.clist; + tb = first ? first->common.all.prev : NULL; + } + + if (tb) { + while (1) { + if (is_table_alive(tb)) { + Eterm tid; + if (is_table_named(tb)) + tid = tb->common.the_name; + else + tid = erts_mk_magic_ref(&hp, ohp, tb->common.btid); + list = CONS(hp, tid, list); + hp += 2; + } + + if (tb == first) + break; + + tb = tb->common.all.prev; + + if (--ycount <= 0) { + sz = hp - &hfragp->mem[0]; + ASSERT(hfragp->alloc_size > sz + 1); + *hp = list; + hfragp->used_size = sz; + *hfragpp = hfragp; + *reqpp = reqp; + *tablepp = tb; + *yield_count_p = 0; + return 1; /* Yield! */ + } + } } - HRelease(BIF_P, hendp, hp); - BIF_RET(previous); + + ref = erts_oiref_storage_make_ref(&reqp->ref, &hp); + msg = TUPLE2(hp, ref, list); + hp += 3; + + sz = hp - &hfragp->mem[0]; + ASSERT(sz <= hfragp->alloc_size); + + hfragp = erts_resize_message_buffer(hfragp, sz, &msg, 1); + + mp = erts_alloc_message(0, NULL); + mp->data.heap_frag = hfragp; + + erts_queue_message(reqp->proc, 0, mp, msg, am_system); + + erts_proc_dec_refc(reqp->proc); + + if (erts_smp_atomic32_dec_read_nob(&reqp->refc) == 0) + erts_free(ERTS_ALC_T_ETS_ALL_REQ, reqp); + + *reqpp = NULL; + *hfragpp = NULL; + *tablepp = NULL; + *yield_count_p = ycount; + + return 0; +} + +int +erts_handle_yielded_ets_all_request(ErtsSchedulerData *esdp, + ErtsEtsAllYieldData *eaydp) +{ + int ix = (int) esdp->no - 1; + int yc = ERTS_ETS_ALL_TB_YCNT; + + while (1) { + if (!eaydp->ongoing) { + ErtsEtsAllReq *ongoing; + + if (!eaydp->queue) + return 0; /* All work completed! */ + + if (yc < ERTS_ETS_ALL_TB_YCNT_START && yc > esdp->ets_tables.count) + return 1; /* Yield! */ + + eaydp->ongoing = ongoing = eaydp->queue; + if (ongoing->list[ix].next == ongoing) + eaydp->queue = NULL; + else { + ongoing->list[ix].next->list[ix].prev = ongoing->list[ix].prev; + ongoing->list[ix].prev->list[ix].next = ongoing->list[ix].next; + eaydp->queue = ongoing->list[ix].next; + } + ASSERT(!eaydp->hfrag); + ASSERT(!eaydp->tab); + } + + if (ets_all_reply(esdp, &eaydp->ongoing, &eaydp->hfrag, &eaydp->tab, &yc)) + return 1; /* Yield! */ + } +} + +static void +handle_ets_all_request(void *vreq) +{ + ErtsSchedulerData *esdp = erts_get_scheduler_data(); + ErtsEtsAllYieldData *eayp = ERTS_SCHED_AUX_YIELD_DATA(esdp, ets_all); + ErtsEtsAllReq *req = (ErtsEtsAllReq *) vreq; + + if (!eayp->ongoing && !eayp->queue) { + /* No ets:all() operations ongoing... */ + ErlHeapFragment *hf = NULL; + DbTable *tb = NULL; + int yc = ERTS_ETS_ALL_TB_YCNT; + if (ets_all_reply(esdp, &req, &hf, &tb, &yc)) { + /* Yielded... */ + ASSERT(hf); + eayp->ongoing = req; + eayp->hfrag = hf; + eayp->tab = tb; + erts_notify_new_aux_yield_work(esdp); + } + } + else { + /* Ongoing ets:all() operations; queue up this request... */ + int ix = (int) esdp->no - 1; + if (!eayp->queue) { + req->list[ix].next = req; + req->list[ix].prev = req; + eayp->queue = req; + } + else { + req->list[ix].next = eayp->queue; + req->list[ix].prev = eayp->queue->list[ix].prev; + eayp->queue->list[ix].prev = req; + req->list[ix].prev->list[ix].next = req; + } + } +} + +BIF_RETTYPE ets_internal_request_all_0(BIF_ALIST_0) +{ + Eterm ref = erts_make_ref(BIF_P); + ErtsEtsAllReq *req = erts_alloc(ERTS_ALC_T_ETS_ALL_REQ, + ERTS_ETS_ALL_REQ_SIZE); + erts_smp_atomic32_init_nob(&req->refc, + (erts_aint32_t) erts_no_schedulers); + erts_oiref_storage_save(&req->ref, ref); + req->proc = BIF_P; + erts_proc_add_refc(BIF_P, (Sint) erts_no_schedulers); + +#ifdef ERTS_SMP + if (erts_no_schedulers > 1) + erts_schedule_multi_misc_aux_work(1, + erts_no_schedulers, + handle_ets_all_request, + (void *) req); +#endif + + handle_ets_all_request((void *) req); + BIF_RET(ref); } @@ -2245,7 +2638,7 @@ ets_select3(Process* p, Eterm arg1, Eterm arg2, Eterm arg3) if (safety == ITER_UNSAFE) { local_fix_table(tb); } - cret = tb->common.meth->db_select_chunk(p, tb, + cret = tb->common.meth->db_select_chunk(p, tb, arg1, arg2, chunk_size, 0 /* not reversed */, &ret); @@ -2414,8 +2807,7 @@ ets_select2(Process* p, Eterm arg1, Eterm arg2) local_fix_table(tb); } - cret = tb->common.meth->db_select(p, tb, arg2, - 0, &ret); + cret = tb->common.meth->db_select(p, tb, arg1, arg2, 0, &ret); if (DID_TRAP(p,ret) && safety != ITER_SAFE) { fix_table_locked(p, tb); @@ -2506,7 +2898,7 @@ BIF_RETTYPE ets_select_count_2(BIF_ALIST_2) if (safety == ITER_UNSAFE) { local_fix_table(tb); } - cret = tb->common.meth->db_select_count(BIF_P,tb,BIF_ARG_2, &ret); + cret = tb->common.meth->db_select_count(BIF_P,tb, BIF_ARG_1, BIF_ARG_2, &ret); if (DID_TRAP(BIF_P,ret) && safety != ITER_SAFE) { fix_table_locked(BIF_P, tb); @@ -2532,6 +2924,103 @@ BIF_RETTYPE ets_select_count_2(BIF_ALIST_2) return result; } +/* + ** This is for trapping, cannot be called directly. + */ +static BIF_RETTYPE ets_select_replace_1(BIF_ALIST_1) +{ + Process *p = BIF_P; + Eterm a1 = BIF_ARG_1; + BIF_RETTYPE result; + DbTable* tb; + int cret; + Eterm ret; + Eterm *tptr; + db_lock_kind_t kind = LCK_WRITE_REC; + + CHECK_TABLES(); + ASSERT(is_tuple(a1)); + tptr = tuple_val(a1); + ASSERT(arityval(*tptr) >= 1); + + if ((tb = db_get_table(p, tptr[1], DB_WRITE, kind)) == NULL) { + BIF_ERROR(p,BADARG); + } + + cret = tb->common.meth->db_select_replace_continue(p,tb,a1,&ret); + + if(!DID_TRAP(p,ret) && ITERATION_SAFETY(p,tb) != ITER_SAFE) { + unfix_table_locked(p, tb, &kind); + } + + db_unlock(tb, kind); + + switch (cret) { + case DB_ERROR_NONE: + ERTS_BIF_PREP_RET(result, ret); + break; + default: + ERTS_BIF_PREP_ERROR(result, p, BADARG); + break; + } + erts_match_set_release_result(p); + + return result; +} + + +BIF_RETTYPE ets_select_replace_2(BIF_ALIST_2) +{ + BIF_RETTYPE result; + DbTable* tb; + int cret; + Eterm ret; + enum DbIterSafety safety; + + CHECK_TABLES(); + + if ((tb = db_get_table(BIF_P, BIF_ARG_1, DB_WRITE, LCK_WRITE_REC)) == NULL) { + BIF_ERROR(BIF_P, BADARG); + } + + if (tb->common.status & DB_BAG) { + /* Bag implementation presented both semantic consistency + and performance issues */ + db_unlock(tb, LCK_WRITE_REC); + BIF_ERROR(BIF_P, BADARG); + } + + safety = ITERATION_SAFETY(BIF_P,tb); + if (safety == ITER_UNSAFE) { + local_fix_table(tb); + } + cret = tb->common.meth->db_select_replace(BIF_P, tb, BIF_ARG_1, BIF_ARG_2, &ret); + + if (DID_TRAP(BIF_P,ret) && safety != ITER_SAFE) { + fix_table_locked(BIF_P,tb); + } + if (safety == ITER_UNSAFE) { + local_unfix_table(tb); + } + db_unlock(tb, LCK_WRITE_REC); + + switch (cret) { + case DB_ERROR_NONE: + ERTS_BIF_PREP_RET(result, ret); + break; + case DB_ERROR_SYSRES: + ERTS_BIF_PREP_ERROR(result, BIF_P, SYSTEM_LIMIT); + break; + default: + ERTS_BIF_PREP_ERROR(result, BIF_P, BADARG); + break; + } + + erts_match_set_release_result(BIF_P); + + return result; +} + BIF_RETTYPE ets_select_reverse_3(BIF_ALIST_3) { @@ -2560,7 +3049,7 @@ BIF_RETTYPE ets_select_reverse_3(BIF_ALIST_3) if (safety == ITER_UNSAFE) { local_fix_table(tb); } - cret = tb->common.meth->db_select_chunk(BIF_P,tb, + cret = tb->common.meth->db_select_chunk(BIF_P,tb, BIF_ARG_1, BIF_ARG_2, chunk_size, 1 /* reversed */, &ret); if (DID_TRAP(BIF_P,ret) && safety != ITER_SAFE) { @@ -2610,7 +3099,7 @@ BIF_RETTYPE ets_select_reverse_2(BIF_ALIST_2) if (safety == ITER_UNSAFE) { local_fix_table(tb); } - cret = tb->common.meth->db_select(BIF_P,tb,BIF_ARG_2, + cret = tb->common.meth->db_select(BIF_P,tb, BIF_ARG_1, BIF_ARG_2, 1 /*reversed*/, &ret); if (DID_TRAP(BIF_P,ret) && safety != ITER_SAFE) { @@ -2701,7 +3190,7 @@ BIF_RETTYPE ets_info_1(BIF_ALIST_1) */ if ((tb = db_get_table(BIF_P, BIF_ARG_1, DB_INFO, LCK_READ)) == NULL) { - if (is_atom(BIF_ARG_1) || is_small(BIF_ARG_1)) { + if (is_atom(BIF_ARG_1) || is_ref(BIF_ARG_1)) { BIF_RET(am_undefined); } BIF_ERROR(BIF_P, BADARG); @@ -2763,7 +3252,7 @@ BIF_RETTYPE ets_info_2(BIF_ALIST_2) Eterm ret = THE_NON_VALUE; if ((tb = db_get_table(BIF_P, BIF_ARG_1, DB_INFO, LCK_READ)) == NULL) { - if (is_atom(BIF_ARG_1) || is_small(BIF_ARG_1)) { + if (is_atom(BIF_ARG_1) || is_ref(BIF_ARG_1)) { BIF_RET(am_undefined); } BIF_ERROR(BIF_P, BADARG); @@ -2853,7 +3342,6 @@ int erts_ets_rwmtx_spin_count = -1; void init_db(ErtsDbSpinCount db_spin_count) { - DbTable init_tb; int i; Eterm *hp; unsigned bits; @@ -2902,16 +3390,6 @@ void init_db(ErtsDbSpinCount db_spin_count) if (erts_ets_rwmtx_spin_count >= 0) rwmtx_opt.main_spincount = erts_ets_rwmtx_spin_count; - meta_main_tab_locks = - erts_alloc_permanent_cache_aligned(ERTS_ALC_T_DB_TABLES, - sizeof(erts_meta_main_tab_lock_t) - * ERTS_META_MAIN_TAB_LOCK_TAB_SIZE); - - for (i = 0; i < ERTS_META_MAIN_TAB_LOCK_TAB_SIZE; i++) { - erts_smp_rwmtx_init_opt_x(&meta_main_tab_locks[i].rwmtx, &rwmtx_opt, - "meta_main_tab_slot", make_small(i)); - } - erts_smp_spinlock_init(&meta_main_tab_main_lock, "meta_main_tab_main"); for (i=0; i<META_NAME_TAB_LOCK_CNT; i++) { erts_smp_rwmtx_init_opt_x(&meta_name_tab_rwlocks[i].lck, &rwmtx_opt, "meta_name_tab", make_small(i)); @@ -2931,20 +3409,6 @@ void init_db(ErtsDbSpinCount db_spin_count) erts_exit(ERTS_ERROR_EXIT,"Max limit for ets tabled too high %u (max %u).", db_max_tabs, ((Uint)1)<<SMALL_BITS); } - meta_main_tab_slot_mask = (((Uint)1)<<bits) - 1; - meta_main_tab_seq_incr = (((Uint)1)<<bits); - - size = sizeof(*meta_main_tab)*db_max_tabs; - meta_main_tab = erts_db_alloc_nt(ERTS_ALC_T_DB_TABLES, size); - ERTS_ETS_MISC_MEM_ADD(size); - - meta_main_tab_cnt = 0; - meta_main_tab_top = 0; - for (i=1; i<db_max_tabs; i++) { - SET_NEXT_FREE_SLOT(i-1,i); - } - SET_NEXT_FREE_SLOT(db_max_tabs-1, (Uint)-1); - meta_main_tab_first_free = 0; meta_name_tab_mask = (((Uint) 1)<<(bits-1)) - 1; /* At least half the size of main tab */ size = sizeof(struct meta_name_tab_entry)*(meta_name_tab_mask+1); @@ -2959,70 +3423,6 @@ void init_db(ErtsDbSpinCount db_spin_count) db_initialize_hash(); db_initialize_tree(); - /*TT*/ - /* Create meta table invertion. */ - erts_smp_atomic_init_nob(&init_tb.common.memory_size, 0); - meta_pid_to_tab = (DbTable*) erts_db_alloc(ERTS_ALC_T_DB_TABLE, - &init_tb, - sizeof(DbTable)); - erts_smp_atomic_init_nob(&meta_pid_to_tab->common.memory_size, - erts_smp_atomic_read_nob(&init_tb.common.memory_size)); - - meta_pid_to_tab->common.id = NIL; - meta_pid_to_tab->common.the_name = am_true; - meta_pid_to_tab->common.status = (DB_NORMAL | DB_BAG | DB_PUBLIC | DB_FINE_LOCKED); -#ifdef ERTS_SMP - meta_pid_to_tab->common.type - = meta_pid_to_tab->common.status & ERTS_ETS_TABLE_TYPES; - /* Note, 'type' is *read only* from now on... */ - meta_pid_to_tab->common.is_thread_safe = 0; -#endif - meta_pid_to_tab->common.keypos = 1; - meta_pid_to_tab->common.owner = NIL; - erts_smp_atomic_init_nob(&meta_pid_to_tab->common.nitems, 0); - meta_pid_to_tab->common.slot = -1; - meta_pid_to_tab->common.meth = &db_hash; - meta_pid_to_tab->common.compress = 0; - - erts_smp_refc_init(&meta_pid_to_tab->common.ref, 0); - /* Neither rwlock or fixlock used - db_init_lock(meta_pid_to_tab, "meta_pid_to_tab", "meta_pid_to_tab_FIX");*/ - - if (db_create_hash(NULL, meta_pid_to_tab) != DB_ERROR_NONE) { - erts_exit(ERTS_ERROR_EXIT,"Unable to create ets metadata tables."); - } - - erts_smp_atomic_set_nob(&init_tb.common.memory_size, 0); - meta_pid_to_fixed_tab = (DbTable*) erts_db_alloc(ERTS_ALC_T_DB_TABLE, - &init_tb, - sizeof(DbTable)); - erts_smp_atomic_init_nob(&meta_pid_to_fixed_tab->common.memory_size, - erts_smp_atomic_read_nob(&init_tb.common.memory_size)); - - meta_pid_to_fixed_tab->common.id = NIL; - meta_pid_to_fixed_tab->common.the_name = am_true; - meta_pid_to_fixed_tab->common.status = (DB_NORMAL | DB_BAG | DB_PUBLIC | DB_FINE_LOCKED); -#ifdef ERTS_SMP - meta_pid_to_fixed_tab->common.type - = meta_pid_to_fixed_tab->common.status & ERTS_ETS_TABLE_TYPES; - /* Note, 'type' is *read only* from now on... */ - meta_pid_to_fixed_tab->common.is_thread_safe = 0; -#endif - meta_pid_to_fixed_tab->common.keypos = 1; - meta_pid_to_fixed_tab->common.owner = NIL; - erts_smp_atomic_init_nob(&meta_pid_to_fixed_tab->common.nitems, 0); - meta_pid_to_fixed_tab->common.slot = -1; - meta_pid_to_fixed_tab->common.meth = &db_hash; - meta_pid_to_fixed_tab->common.compress = 0; - - erts_smp_refc_init(&meta_pid_to_fixed_tab->common.ref, 0); - /* Neither rwlock or fixlock used - db_init_lock(meta_pid_to_fixed_tab, "meta_pid_to_fixed_tab", "meta_pid_to_fixed_tab_FIX");*/ - - if (db_create_hash(NULL, meta_pid_to_fixed_tab) != DB_ERROR_NONE) { - erts_exit(ERTS_ERROR_EXIT,"Unable to create ets metadata tables."); - } - /* Non visual BIF to trap to. */ erts_init_trap_export(&ets_select_delete_continue_exp, am_ets, am_atom_put("delete_trap",11), 1, @@ -3034,6 +3434,11 @@ void init_db(ErtsDbSpinCount db_spin_count) &ets_select_count_1); /* Non visual BIF to trap to. */ + erts_init_trap_export(&ets_select_replace_continue_exp, + am_ets, am_atom_put("replace_trap",11), 1, + &ets_select_replace_1); + + /* Non visual BIF to trap to. */ erts_init_trap_export(&ets_select_continue_exp, am_ets, am_atom_put("select_trap",11), 1, &ets_select_trap_1); @@ -3051,81 +3456,18 @@ void init_db(ErtsDbSpinCount db_spin_count) ms_delete_all = CONS(hp, ms_delete_all,NIL); } -#define ARRAY_CHUNK 100 - -typedef enum { - ErtsDbProcCleanupProgressTables, - ErtsDbProcCleanupProgressFixations, - ErtsDbProcCleanupProgressDone, -} ErtsDbProcCleanupProgress; - -typedef enum { - ErtsDbProcCleanupOpGetTables, - ErtsDbProcCleanupOpDeleteTables, - ErtsDbProcCleanupOpGetFixations, - ErtsDbProcCleanupOpDeleteFixations, - ErtsDbProcCleanupOpDone -} ErtsDbProcCleanupOperation; - -typedef struct { - ErtsDbProcCleanupProgress progress; - ErtsDbProcCleanupOperation op; - struct { - Eterm arr[ARRAY_CHUNK]; - int size; - int ix; - int clean_ix; - } slots; -} ErtsDbProcCleanupState; - - -static void -proc_exit_cleanup_tables_meta_data(Eterm pid, ErtsDbProcCleanupState *state) +void +erts_ets_sched_spec_data_init(ErtsSchedulerData *esdp) { - ASSERT(state->slots.clean_ix <= state->slots.ix); - if (state->slots.clean_ix < state->slots.ix) { - db_meta_lock(meta_pid_to_tab, LCK_WRITE_REC); - if (state->slots.size < ARRAY_CHUNK - && state->slots.ix == state->slots.size) { - Eterm dummy; - db_erase_hash(meta_pid_to_tab,pid,&dummy); - } - else { - int ix; - /* Need to erase each explicitly */ - for (ix = state->slots.clean_ix; ix < state->slots.ix; ix++) - db_erase_bag_exact2(meta_pid_to_tab, - pid, - state->slots.arr[ix]); - } - db_meta_unlock(meta_pid_to_tab, LCK_WRITE_REC); - state->slots.clean_ix = state->slots.ix; - } + ErtsEtsAllYieldData *eaydp = ERTS_SCHED_AUX_YIELD_DATA(esdp, ets_all); + eaydp->ongoing = NULL; + eaydp->hfrag = NULL; + eaydp->tab = NULL; + eaydp->queue = NULL; + esdp->ets_tables.clist = NULL; + esdp->ets_tables.count = 0; } -static void -proc_exit_cleanup_fixations_meta_data(Eterm pid, ErtsDbProcCleanupState *state) -{ - ASSERT(state->slots.clean_ix <= state->slots.ix); - if (state->slots.clean_ix < state->slots.ix) { - db_meta_lock(meta_pid_to_fixed_tab, LCK_WRITE_REC); - if (state->slots.size < ARRAY_CHUNK - && state->slots.ix == state->slots.size) { - Eterm dummy; - db_erase_hash(meta_pid_to_fixed_tab,pid,&dummy); - } - else { - int ix; - /* Need to erase each explicitly */ - for (ix = state->slots.clean_ix; ix < state->slots.ix; ix++) - db_erase_bag_exact2(meta_pid_to_fixed_tab, - pid, - state->slots.arr[ix]); - } - db_meta_unlock(meta_pid_to_fixed_tab, LCK_WRITE_REC); - state->slots.clean_ix = state->slots.ix; - } -} /* In: Table LCK_WRITE ** Return TRUE : ok, table not mine and NOT locked anymore. @@ -3135,7 +3477,6 @@ static int give_away_to_heir(Process* p, DbTable* tb) { Process* to_proc; ErtsProcLocks to_locks = ERTS_PROC_LOCK_MAIN; - DeclareTmpHeap(buf,5,p); Eterm to_pid; UWord heir_data; @@ -3179,19 +3520,12 @@ retry: erts_smp_proc_unlock(to_proc, to_locks); return 0; /* heir dead and pid reused, table still mine */ } - UseTmpHeap(5,p); - db_meta_lock(meta_pid_to_tab, LCK_WRITE_REC); - db_erase_bag_exact2(meta_pid_to_tab, tb->common.owner, - make_small(tb->common.slot)); + delete_owned_table(p, tb); to_proc->flags |= F_USING_DB; tb->common.owner = to_pid; - - db_put_hash(meta_pid_to_tab, - TUPLE2(buf,to_pid,make_small(tb->common.slot)), - 0); - db_meta_unlock(meta_pid_to_tab, LCK_WRITE_REC); - UnUseTmpHeap(5,p); + save_owned_table(to_proc, tb); + db_unlock(tb,LCK_WRITE); heir_data = tb->common.heir_data; if (!is_immed(heir_data)) { @@ -3199,17 +3533,100 @@ retry: ASSERT(arityval(*tpv) == 1); heir_data = tpv[1]; } - erts_send_message(p, to_proc, &to_locks, - TUPLE4(buf, - am_ETS_TRANSFER, - tb->common.id, - p->common.id, - heir_data), - 0); + send_ets_transfer_message(p, to_proc, &to_locks, tb, heir_data); erts_smp_proc_unlock(to_proc, to_locks); return !0; } +static void +send_ets_transfer_message(Process *c_p, Process *proc, + ErtsProcLocks *locks, + DbTable *tb, Eterm heir_data) +{ + Uint hsz, hd_sz; + ErtsMessage *mp; + Eterm *hp; + ErlOffHeap *ohp; + Eterm tid, hd_copy, msg, sender; + + hsz = 5; + if (!is_table_named(tb)) + hsz += ERTS_MAGIC_REF_THING_SIZE; + if (is_immed(heir_data)) + hd_sz = 0; + else { + hd_sz = size_object(heir_data); + hsz += hd_sz; + } + + mp = erts_alloc_message_heap(proc, locks, hsz, &hp, &ohp); + if (is_table_named(tb)) + tid = tb->common.the_name; + else + tid = erts_mk_magic_ref(&hp, ohp, tb->common.btid); + if (!hd_sz) + hd_copy = heir_data; + else + hd_copy = copy_struct(heir_data, hd_sz, &hp, ohp); + sender = c_p->common.id; + msg = TUPLE4(hp, am_ETS_TRANSFER, tid, sender, hd_copy); + erts_queue_message(proc, *locks, mp, msg, sender); +} + + +/* Auto-release fixation from exiting process */ +static SWord proc_cleanup_fixed_table(Process* p, DbFixation* fix) +{ + DbTable* tb = btid2tab(fix->tabs.btid); + SWord work = 0; + + ASSERT(fix->procs.p == p); (void)p; + if (tb) { + db_lock(tb, LCK_WRITE_REC); + if (!(tb->common.status & DB_DELETE)) { + erts_aint_t diff; + #ifdef ERTS_SMP + erts_smp_mtx_lock(&tb->common.fixlock); + #endif + + ASSERT(fixing_procs_rbt_lookup(tb->common.fixing_procs, p)); + + diff = -((erts_aint_t) fix->counter); + erts_smp_refc_add(&tb->common.fix_count,diff,0); + fix->counter = 0; + + fixing_procs_rbt_delete(&tb->common.fixing_procs, fix); + + #ifdef ERTS_SMP + erts_smp_mtx_unlock(&tb->common.fixlock); + #endif + if (!IS_FIXED(tb) && IS_HASH_TABLE(tb->common.status)) { + work += db_unfix_table_hash(&(tb->hash)); + } + + ASSERT(sizeof(DbFixation) == ERTS_ALC_DBG_BLK_SZ(fix)); + ERTS_DB_ALC_MEM_UPDATE_(tb, sizeof(DbFixation), 0); + } + else { + ASSERT(fix->counter == 0); + } + db_unlock(tb, LCK_WRITE_REC); + } + else { + ASSERT(fix->counter == 0); + } + + if (erts_refc_dectest(&fix->tabs.btid->refc, 0) == 0) { + erts_bin_free(fix->tabs.btid); + } + erts_free(ERTS_ALC_T_DB_FIXATION, fix); + ERTS_ETS_MISC_MEM_ADD(-sizeof(DbFixation)); + ++work; + + return work; +} + + /* * erts_db_process_exiting() is called when a process terminates. * It returns 0 when completely done, and !0 when it wants to @@ -3223,276 +3640,160 @@ retry: int erts_db_process_exiting(Process *c_p, ErtsProcLocks c_p_locks) { - ErtsDbProcCleanupState *state = (ErtsDbProcCleanupState *) c_p->u.terminate; + typedef struct { + enum { + GET_OWNED_TABLE, + FREE_OWNED_TABLE, + UNFIX_TABLES, + }op; + DbTable *tb; + } CleanupState; + CleanupState *state = (CleanupState *) c_p->u.terminate; Eterm pid = c_p->common.id; - ErtsDbProcCleanupState default_state; - int ret; + CleanupState default_state; + SWord initial_reds = ERTS_BIF_REDS_LEFT(c_p); + SWord reds = initial_reds; if (!state) { state = &default_state; - state->progress = ErtsDbProcCleanupProgressTables; - state->op = ErtsDbProcCleanupOpGetTables; + state->op = GET_OWNED_TABLE; + state->tb = NULL; } - while (!0) { + do { switch (state->op) { - case ErtsDbProcCleanupOpGetTables: - state->slots.size = ARRAY_CHUNK; - db_meta_lock(meta_pid_to_tab, LCK_READ); - ret = db_get_element_array(meta_pid_to_tab, - pid, - 2, - state->slots.arr, - &state->slots.size); - db_meta_unlock(meta_pid_to_tab, LCK_READ); - if (ret == DB_ERROR_BADKEY) { - /* Done with tables; now fixations */ - state->progress = ErtsDbProcCleanupProgressFixations; - state->op = ErtsDbProcCleanupOpGetFixations; - break; - } else if (ret != DB_ERROR_NONE) { - ERTS_DB_INTERNAL_ERROR("Inconsistent ets table metadata"); - } - - state->slots.ix = 0; - state->slots.clean_ix = 0; - state->op = ErtsDbProcCleanupOpDeleteTables; - /* Fall through */ - - case ErtsDbProcCleanupOpDeleteTables: - - while (state->slots.ix < state->slots.size) { - DbTable *tb = NULL; - Sint ix = unsigned_val(state->slots.arr[state->slots.ix]); - erts_smp_rwmtx_t *mmtl = get_meta_main_tab_lock(ix); - erts_smp_rwmtx_rlock(mmtl); - if (!IS_SLOT_FREE(ix)) { - tb = GET_ANY_SLOT_TAB(ix); - ASSERT(tb); - } - erts_smp_rwmtx_runlock(mmtl); - if (tb) { - int do_yield; - db_lock(tb, LCK_WRITE); - /* Ownership may have changed since - we looked up the table. */ - if (tb->common.owner != pid) { - do_yield = 0; - db_unlock(tb, LCK_WRITE); - } - else if (tb->common.heir != am_none - && tb->common.heir != pid - && give_away_to_heir(c_p, tb)) { - do_yield = 0; - } - else { - int first_call; -#ifdef HARDDEBUG - erts_fprintf(stderr, - "erts_db_process_exiting(); Table: %T, " - "Process: %T\n", - tb->common.id, pid); -#endif - first_call = (tb->common.status & DB_DELETE) == 0; - if (first_call) { - /* Clear all access bits. */ - tb->common.status &= ~(DB_PROTECTED - | DB_PUBLIC - | DB_PRIVATE); - tb->common.status |= DB_DELETE; - - if (is_atom(tb->common.id)) - remove_named_tab(tb, 0); - - free_heir_data(tb); - free_fixations_locked(tb); - } - - do_yield = free_table_cont(c_p, tb, first_call, 0); - db_unlock(tb, LCK_WRITE); - } - if (do_yield) - goto yield; - } - state->slots.ix++; - if (ERTS_BIF_REDS_LEFT(c_p) <= 0) - goto yield; - } + case GET_OWNED_TABLE: { + DbTable* tb; + erts_smp_proc_lock(c_p, ERTS_PROC_LOCK_STATUS); + tb = (DbTable*) erts_psd_get(c_p, ERTS_PSD_ETS_OWNED_TABLES); + erts_smp_proc_unlock(c_p, ERTS_PROC_LOCK_STATUS); + + if (!tb) { + /* Done with owned tables; now fixations */ + state->op = UNFIX_TABLES; + break; + } - proc_exit_cleanup_tables_meta_data(pid, state); - state->op = ErtsDbProcCleanupOpGetTables; - break; + ASSERT(tb != state->tb); + state->tb = tb; + db_lock(tb, LCK_WRITE); + /* + * Ownership may have changed since we looked up the table. + */ + if (tb->common.owner != pid) { + db_unlock(tb, LCK_WRITE); + break; + } + if (tb->common.heir != am_none + && tb->common.heir != pid + && give_away_to_heir(c_p, tb)) { + break; + } + tid_clear(c_p, tb); + /* Clear all access bits. */ + tb->common.status &= ~(DB_PROTECTED | DB_PUBLIC | DB_PRIVATE); + tb->common.status |= DB_DELETE; + + if (is_table_named(tb)) + remove_named_tab(tb, 0); + + free_heir_data(tb); + reds -= free_fixations_locked(c_p, tb); + db_unlock(tb, LCK_WRITE); + state->op = FREE_OWNED_TABLE; + break; + } + case FREE_OWNED_TABLE: + reds = free_table_continue(c_p, state->tb, reds); + if (reds < 0) + goto yield; - case ErtsDbProcCleanupOpGetFixations: - state->slots.size = ARRAY_CHUNK; - db_meta_lock(meta_pid_to_fixed_tab, LCK_READ); - ret = db_get_element_array(meta_pid_to_fixed_tab, - pid, - 2, - state->slots.arr, - &state->slots.size); - db_meta_unlock(meta_pid_to_fixed_tab, LCK_READ); - - if (ret == DB_ERROR_BADKEY) { - /* Done */ - state->progress = ErtsDbProcCleanupProgressDone; - state->op = ErtsDbProcCleanupOpDone; - break; - } else if (ret != DB_ERROR_NONE) { - ERTS_DB_INTERNAL_ERROR("Inconsistent ets fix table metadata"); - } + state->op = GET_OWNED_TABLE; + break; - state->slots.ix = 0; - state->slots.clean_ix = 0; - state->op = ErtsDbProcCleanupOpDeleteFixations; - /* Fall through */ + case UNFIX_TABLES: { + DbFixation* fix; - case ErtsDbProcCleanupOpDeleteFixations: + fix = (DbFixation*) erts_psd_get(c_p, ERTS_PSD_ETS_FIXED_TABLES); - while (state->slots.ix < state->slots.size) { - DbTable *tb = NULL; - Sint ix = unsigned_val(state->slots.arr[state->slots.ix]); - erts_smp_rwmtx_t *mmtl = get_meta_main_tab_lock(ix); - erts_smp_rwmtx_rlock(mmtl); - if (IS_SLOT_ALIVE(ix)) { - tb = meta_main_tab[ix].u.tb; - ASSERT(tb); - } - erts_smp_rwmtx_runlock(mmtl); - if (tb) { - int reds = 0; - - db_lock(tb, LCK_WRITE_REC); - if (!(tb->common.status & DB_DELETE)) { - DbFixation** pp; - - #ifdef ERTS_SMP - erts_smp_mtx_lock(&tb->common.fixlock); - #endif - reds = 10; - - for (pp = &tb->common.fixations; *pp != NULL; - pp = &(*pp)->next) { - if ((*pp)->pid == pid) { - DbFixation* fix = *pp; - erts_aint_t diff = -((erts_aint_t) fix->counter); - erts_smp_refc_add(&tb->common.ref,diff,0); - *pp = fix->next; - erts_db_free(ERTS_ALC_T_DB_FIXATION, - tb, fix, sizeof(DbFixation)); - ERTS_ETS_MISC_MEM_ADD(-sizeof(DbFixation)); - break; - } - } - #ifdef ERTS_SMP - erts_smp_mtx_unlock(&tb->common.fixlock); - #endif - if (!IS_FIXED(tb) && IS_HASH_TABLE(tb->common.status)) { - db_unfix_table_hash(&(tb->hash)); - reds += 40; - } - } - db_unlock(tb, LCK_WRITE_REC); - BUMP_REDS(c_p, reds); - } - state->slots.ix++; - if (ERTS_BIF_REDS_LEFT(c_p) <= 0) - goto yield; - } + if (!fix) { + /* Done */ - proc_exit_cleanup_fixations_meta_data(pid, state); - state->op = ErtsDbProcCleanupOpGetFixations; - break; + if (state != &default_state) + erts_free(ERTS_ALC_T_DB_PROC_CLEANUP, state); + c_p->u.terminate = NULL; - case ErtsDbProcCleanupOpDone: + BUMP_REDS(c_p, (initial_reds - reds)); + return 0; + } - if (state != &default_state) - erts_free(ERTS_ALC_T_DB_PROC_CLEANUP, state); - c_p->u.terminate = NULL; - return 0; + fixed_tabs_delete(c_p, fix); + reds -= proc_cleanup_fixed_table(c_p, fix); + break; + } default: ERTS_DB_INTERNAL_ERROR("Bad internal state"); - } - } - - yield: + } - switch (state->progress) { - case ErtsDbProcCleanupProgressTables: - proc_exit_cleanup_tables_meta_data(pid, state); - break; - case ErtsDbProcCleanupProgressFixations: - proc_exit_cleanup_fixations_meta_data(pid, state); - break; - default: - break; - } + } while (reds > 0); - ASSERT(c_p->u.terminate == (void *) state - || state == &default_state); + yield: if (state == &default_state) { c_p->u.terminate = erts_alloc(ERTS_ALC_T_DB_PROC_CLEANUP, - sizeof(ErtsDbProcCleanupState)); - sys_memcpy(c_p->u.terminate, - (void*) state, - sizeof(ErtsDbProcCleanupState)); + sizeof(CleanupState)); + sys_memcpy(c_p->u.terminate, (void*) state, sizeof(CleanupState)); } + else + ASSERT(state == c_p->u.terminate); return !0; } + /* SMP note: table only need to be LCK_READ locked */ static void fix_table_locked(Process* p, DbTable* tb) { DbFixation *fix; - DeclareTmpHeap(meta_tuple,3,p); #ifdef ERTS_SMP erts_smp_mtx_lock(&tb->common.fixlock); #endif - erts_smp_refc_inc(&tb->common.ref,1); - fix = tb->common.fixations; + erts_smp_refc_inc(&tb->common.fix_count,1); + fix = tb->common.fixing_procs; if (fix == NULL) { tb->common.time.monotonic = erts_get_monotonic_time(erts_proc_sched_data(p)); tb->common.time.offset = erts_get_time_offset(); } else { - for (; fix != NULL; fix = fix->next) { - if (fix->pid == p->common.id) { - ++(fix->counter); + fix = fixing_procs_rbt_lookup(fix, p); + if (fix) { + ASSERT(fixed_tabs_find(NULL, fix)); + ++(fix->counter); + #ifdef ERTS_SMP - erts_smp_mtx_unlock(&tb->common.fixlock); + erts_smp_mtx_unlock(&tb->common.fixlock); #endif - return; - } + return; } } fix = (DbFixation *) erts_db_alloc(ERTS_ALC_T_DB_FIXATION, tb, sizeof(DbFixation)); ERTS_ETS_MISC_MEM_ADD(sizeof(DbFixation)); - fix->pid = p->common.id; + fix->tabs.btid = tb->common.btid; + erts_refc_inc(&fix->tabs.btid->refc, 2); + fix->procs.p = p; fix->counter = 1; - fix->next = tb->common.fixations; - tb->common.fixations = fix; + fixing_procs_rbt_insert(&tb->common.fixing_procs, fix); + #ifdef ERTS_SMP erts_smp_mtx_unlock(&tb->common.fixlock); #endif - p->flags |= F_USING_DB; - UseTmpHeap(3,p); - db_meta_lock(meta_pid_to_fixed_tab, LCK_WRITE_REC); - if (db_put_hash(meta_pid_to_fixed_tab, - TUPLE2(meta_tuple, - p->common.id, - make_small(tb->common.slot)), - 0) != DB_ERROR_NONE) { - UnUseTmpHeap(3,p); - erts_exit(ERTS_ERROR_EXIT,"Could not insert ets metadata in safe_fixtable."); - } - UnUseTmpHeap(3,p); - db_meta_unlock(meta_pid_to_fixed_tab, LCK_WRITE_REC); + p->flags |= F_USING_DB; + + fixed_tabs_insert(p, fix); } /* SMP note: May re-lock table @@ -3500,28 +3801,26 @@ static void fix_table_locked(Process* p, DbTable* tb) static void unfix_table_locked(Process* p, DbTable* tb, db_lock_kind_t* kind_p) { - DbFixation** pp; + DbFixation* fix; #ifdef ERTS_SMP erts_smp_mtx_lock(&tb->common.fixlock); #endif - for (pp = &tb->common.fixations; *pp != NULL; pp = &(*pp)->next) { - if ((*pp)->pid == p->common.id) { - DbFixation* fix = *pp; - erts_smp_refc_dec(&tb->common.ref,0); - --(fix->counter); - ASSERT(fix->counter >= 0); - if (fix->counter > 0) { - break; - } - *pp = fix->next; + fix = fixing_procs_rbt_lookup(tb->common.fixing_procs, p); + + if (fix) { + erts_smp_refc_dec(&tb->common.fix_count,0); + --(fix->counter); + ASSERT(fix->counter >= 0); + if (fix->counter == 0) { + fixing_procs_rbt_delete(&tb->common.fixing_procs, fix); #ifdef ERTS_SMP erts_smp_mtx_unlock(&tb->common.fixlock); #endif - db_meta_lock(meta_pid_to_fixed_tab, LCK_WRITE_REC); - db_erase_bag_exact2(meta_pid_to_fixed_tab, - p->common.id, make_small(tb->common.slot)); - db_meta_unlock(meta_pid_to_fixed_tab, LCK_WRITE_REC); + fixed_tabs_delete(p, fix); + + erts_refc_dec(&fix->tabs.btid->refc, 1); + erts_db_free(ERTS_ALC_T_DB_FIXATION, tb, (void *) fix, sizeof(DbFixation)); ERTS_ETS_MISC_MEM_ADD(-sizeof(DbFixation)); @@ -3548,29 +3847,87 @@ unlocked: } } -/* Assume that tb is WRITE locked */ -static void free_fixations_locked(DbTable *tb) +struct free_fixations_ctx { - DbFixation *fix; - DbFixation *next_fix; + Process* p; + DbTable* tb; + SWord cnt; +}; + +static void free_fixations_op(DbFixation* fix, void* vctx) +{ + struct free_fixations_ctx* ctx = (struct free_fixations_ctx*) vctx; + erts_aint_t diff; +#ifdef DEBUG + DbTable* dbg_tb = btid2tab(fix->tabs.btid); +#endif + + ASSERT(!dbg_tb || dbg_tb == ctx->tb); + ASSERT(fix->counter > 0); + ASSERT(ctx->tb->common.status & DB_DELETE); + + diff = -((erts_aint_t) fix->counter); + erts_smp_refc_add(&ctx->tb->common.fix_count, diff, 0); + +#ifdef ERTS_SMP + if (fix->procs.p != ctx->p) { /* Fixated by other process */ + fix->counter = 0; - fix = tb->common.fixations; - while (fix != NULL) { - erts_aint_t diff = -((erts_aint_t) fix->counter); - erts_smp_refc_add(&tb->common.ref,diff,0); - next_fix = fix->next; - db_meta_lock(meta_pid_to_fixed_tab, LCK_WRITE_REC); - db_erase_bag_exact2(meta_pid_to_fixed_tab, - fix->pid, - make_small(tb->common.slot)); - db_meta_unlock(meta_pid_to_fixed_tab, LCK_WRITE_REC); - erts_db_free(ERTS_ALC_T_DB_FIXATION, - tb, (void *) fix, sizeof(DbFixation)); - ERTS_ETS_MISC_MEM_ADD(-sizeof(DbFixation)); + /* Fake memory stats for table */ + ASSERT(sizeof(DbFixation) == ERTS_ALC_DBG_BLK_SZ(fix)); + ERTS_DB_ALC_MEM_UPDATE_(ctx->tb, sizeof(DbFixation), 0); - fix = next_fix; + erts_schedule_ets_free_fixation(fix->procs.p->common.id, fix); + /* + * Either sys task is scheduled and erts_db_execute_free_fixation() + * will remove 'fix' or process will exit, drop sys task and + * proc_cleanup_fixed_table() will remove 'fix'. + */ } - tb->common.fixations = NULL; + else +#endif + { + fixed_tabs_delete(fix->procs.p, fix); + + if (erts_refc_dectest(&fix->tabs.btid->refc, 0) == 0) { + erts_bin_free(fix->tabs.btid); + } + + erts_db_free(ERTS_ALC_T_DB_FIXATION, + ctx->tb, (void *) fix, sizeof(DbFixation)); + ERTS_ETS_MISC_MEM_ADD(-sizeof(DbFixation)); + } + ctx->cnt++; +} + +#ifdef ERTS_SMP +int erts_db_execute_free_fixation(Process* p, DbFixation* fix) +{ + ASSERT(fix->counter == 0); + fixed_tabs_delete(p, fix); + + if (erts_refc_dectest(&fix->tabs.btid->refc, 0) == 0) { + erts_bin_free(fix->tabs.btid); + } + erts_free(ERTS_ALC_T_DB_FIXATION, fix); + ERTS_ETS_MISC_MEM_ADD(-sizeof(DbFixation)); + return 1; +} +#endif + +static SWord free_fixations_locked(Process* p, DbTable *tb) +{ + struct free_fixations_ctx ctx; + + ERTS_SMP_LC_ASSERT(erts_smp_lc_rwmtx_is_rwlocked(&tb->common.rwlock)); + + ctx.p = p; + ctx.tb = tb; + ctx.cnt = 0; + fixing_procs_rbt_foreach_destroy(&tb->common.fixing_procs, + free_fixations_op, &ctx); + tb->common.fixing_procs = NULL; + return ctx.cnt; } static void set_heir(Process* me, DbTable* tb, Eterm heir, UWord heir_data) @@ -3634,55 +3991,40 @@ static void free_heir_data(DbTable* tb) static BIF_RETTYPE ets_delete_trap(BIF_ALIST_1) { - Process *p = BIF_P; + SWord initial_reds = ERTS_BIF_REDS_LEFT(BIF_P); + SWord reds = initial_reds; Eterm cont = BIF_ARG_1; - int trap; Eterm* ptr = big_val(cont); DbTable *tb = *((DbTable **) (UWord) (ptr + 1)); ASSERT(*ptr == make_pos_bignum_header(1)); - db_lock(tb, LCK_WRITE); - trap = free_table_cont(p, tb, 0, 1); - db_unlock(tb, LCK_WRITE); - - if (trap) { - BIF_TRAP1(&ets_delete_continue_exp, p, cont); + if (free_table_continue(BIF_P, tb, reds) < 0) { + BUMP_ALL_REDS(BIF_P); + BIF_TRAP1(&ets_delete_continue_exp, BIF_P, cont); } else { + BUMP_REDS(BIF_P, (initial_reds - reds)); BIF_RET(am_true); } } /* - * free_table_cont() returns 0 when done and !0 when more work is needed. + * free_table_continue() returns reductions left + * done if >= 0 + * yield if < 0 */ -static int free_table_cont(Process *p, - DbTable *tb, - int first, - int clean_meta_tab) +static SWord free_table_continue(Process *p, DbTable *tb, SWord reds) { - Eterm result; - erts_smp_rwmtx_t *mmtl; - -#ifdef HARDDEBUG - if (!first) { - erts_fprintf(stderr,"ets: free_table_cont %T (continue)\r\n", - tb->common.id); - } -#endif - - result = tb->common.meth->db_free_table_continue(tb); + reds = tb->common.meth->db_free_table_continue(tb, reds); - if (result == 0) { + if (reds < 0) { #ifdef HARDDEBUG erts_fprintf(stderr,"ets: free_table_cont %T (continue begin)\r\n", tb->common.id); #endif /* More work to be done. Let other processes work and call us again. */ - BUMP_ALL_REDS(p); - return !0; } else { #ifdef HARDDEBUG @@ -3690,27 +4032,28 @@ static int free_table_cont(Process *p, tb->common.id); #endif /* Completely done - we will not get called again. */ - mmtl = get_meta_main_tab_lock(tb->common.slot); -#ifdef ERTS_SMP - if (erts_smp_rwmtx_tryrwlock(mmtl) == EBUSY) { - erts_smp_rwmtx_rwunlock(&tb->common.rwlock); - erts_smp_rwmtx_rwlock(mmtl); - erts_smp_rwmtx_rwlock(&tb->common.rwlock); - } -#endif - free_slot(tb->common.slot); - erts_smp_rwmtx_rwunlock(mmtl); - - if (clean_meta_tab) { - db_meta_lock(meta_pid_to_tab, LCK_WRITE_REC); - db_erase_bag_exact2(meta_pid_to_tab,tb->common.owner, - make_small(tb->common.slot)); - db_meta_unlock(meta_pid_to_tab, LCK_WRITE_REC); - } - schedule_free_dbtable(tb); - BUMP_REDS(p, 100); - return 0; + delete_owned_table(p, tb); + table_dec_refc(tb, 0); } + return reds; +} + +struct fixing_procs_info_ctx +{ + Process* p; + Eterm list; +}; + +static void fixing_procs_info_op(DbFixation* fix, void* vctx) +{ + struct fixing_procs_info_ctx* ctx = (struct fixing_procs_info_ctx*) vctx; + Eterm* hp; + Eterm tpl; + + hp = HAllocX(ctx->p, 5, 100); + tpl = TUPLE2(hp, fix->procs.p->common.id, make_small(fix->counter)); + hp += 3; + ctx->list = CONS(hp, tpl, ctx->list); } static Eterm table_info(Process* p, DbTable* tb, Eterm What) @@ -3759,7 +4102,7 @@ static Eterm table_info(Process* p, DbTable* tb, Eterm What) } else if (What == am_node) { ret = erts_this_dist_entry->sysname; } else if (What == am_named_table) { - ret = is_atom(tb->common.id) ? am_true : am_false; + ret = is_table_named(tb) ? am_true : am_false; } else if (What == am_compressed) { ret = tb->common.compress ? am_true : am_false; } @@ -3784,9 +4127,9 @@ static Eterm table_info(Process* p, DbTable* tb, Eterm What) if (IS_FIXED(tb)) { Uint need; Eterm *hp; - Eterm tpl, lst; - DbFixation *fix; + Eterm time; Sint64 mtime; + struct fixing_procs_info_ctx ctx; need = 3; if (use_monotonic) { @@ -3799,19 +4142,15 @@ static Eterm table_info(Process* p, DbTable* tb, Eterm What) mtime = 0; need += 4; } - for (fix = tb->common.fixations; fix != NULL; fix = fix->next) { - need += 5; - } + ctx.p = p; + ctx.list = NIL; + fixing_procs_rbt_foreach(tb->common.fixing_procs, + fixing_procs_info_op, + &ctx); + hp = HAlloc(p, need); - lst = NIL; - for (fix = tb->common.fixations; fix != NULL; fix = fix->next) { - tpl = TUPLE2(hp,fix->pid,make_small(fix->counter)); - hp += 3; - lst = CONS(hp,tpl,lst); - hp += 2; - } if (use_monotonic) - tpl = (IS_SSMALL(mtime) + time = (IS_SSMALL(mtime) ? make_small(mtime) : erts_sint64_to_big(mtime, &hp)); else { @@ -3819,10 +4158,10 @@ static Eterm table_info(Process* p, DbTable* tb, Eterm What) erts_make_timestamp_value(&ms, &s, &us, tb->common.time.monotonic, tb->common.time.offset); - tpl = TUPLE3(hp, make_small(ms), make_small(s), make_small(us)); + time = TUPLE3(hp, make_small(ms), make_small(s), make_small(us)); hp += 4; } - ret = TUPLE2(hp, tpl, lst); + ret = TUPLE2(hp, time, ctx.list); } else { ret = am_false; } @@ -3867,7 +4206,19 @@ static Eterm table_info(Process* p, DbTable* tb, Eterm What) static void print_table(fmtfn_t to, void *to_arg, int show, DbTable* tb) { - erts_print(to, to_arg, "Table: %T\n", tb->common.id); + Eterm tid; + Eterm heap[ERTS_MAGIC_REF_THING_SIZE]; + + if (is_table_named(tb)) { + tid = tb->common.the_name; + } else { + ErlOffHeap oh; + ERTS_INIT_OFF_HEAP(&oh); + write_magic_ref_thing(heap, &oh, (ErtsMagicBinary *) tb->common.btid); + tid = make_internal_ref(heap); + } + + erts_print(to, to_arg, "Table: %T\n", tid); erts_print(to, to_arg, "Name: %T\n", tb->common.the_name); tb->common.meth->db_print(to, to_arg, show, tb); @@ -3885,21 +4236,30 @@ static void print_table(fmtfn_t to, void *to_arg, int show, DbTable* tb) erts_print(to, to_arg, "Read Concurrency: %T\n", table_info(NULL, tb, am_read_concurrency)); } +typedef struct { + fmtfn_t to; + void *to_arg; + int show; +} ErtsPrintDbInfo; + +static void +db_info_print(DbTable *tb, void *vpdbip) +{ + ErtsPrintDbInfo *pdbip = (ErtsPrintDbInfo *) vpdbip; + erts_print(pdbip->to, pdbip->to_arg, "=ets:%T\n", tb->common.owner); + erts_print(pdbip->to, pdbip->to_arg, "Slot: %bpu\n", (Uint) tb); + print_table(pdbip->to, pdbip->to_arg, pdbip->show, tb); +} + void db_info(fmtfn_t to, void *to_arg, int show) /* Called by break handler */ { - int i; - for (i=0; i < db_max_tabs; i++) - if (IS_SLOT_ALIVE(i)) { - erts_print(to, to_arg, "=ets:%T\n", meta_main_tab[i].u.tb->common.owner); - erts_print(to, to_arg, "Slot: %d\n", i); - print_table(to, to_arg, show, meta_main_tab[i].u.tb); - } -#ifdef DEBUG - erts_print(to, to_arg, "=internal_ets: Process to table index\n"); - print_table(to, to_arg, show, meta_pid_to_tab); - erts_print(to, to_arg, "=internal_ets: Process to fixation index\n"); - print_table(to, to_arg, show, meta_pid_to_fixed_tab); -#endif + ErtsPrintDbInfo pdbi; + + pdbi.to = to; + pdbi.to_arg = to_arg; + pdbi.show = show; + + erts_db_foreach_table(db_info_print, &pdbi); } Uint @@ -3914,15 +4274,22 @@ erts_get_ets_misc_mem_size(void) void erts_db_foreach_table(void (*func)(DbTable *, void *), void *arg) { - int i, j; - j = 0; - for(i = 0; (i < db_max_tabs && j < meta_main_tab_cnt); i++) { - if (IS_SLOT_ALIVE(i)) { - j++; - (*func)(meta_main_tab[i].u.tb, arg); - } + int ix; + + ASSERT(erts_smp_thr_progress_is_blocking()); + + for (ix = 0; ix < erts_no_schedulers; ix++) { + ErtsSchedulerData *esdp = ERTS_SCHEDULER_IX(ix); + DbTable *first = esdp->ets_tables.clist; + if (first) { + DbTable *tb = first; + do { + if (is_table_alive(tb)) + (*func)(tb, arg); + tb = tb->common.all.next; + } while (tb != first); + } } - ASSERT(j == meta_main_tab_cnt); } /* SMP Note: May only be used when system is locked */ @@ -3972,53 +4339,3 @@ erts_ets_colliding_names(Process* p, Eterm name, Uint cnt) return list; } -/* - * For testing only - * Retreive meta table size state - */ -Eterm erts_ets_get_meta_state(Process* p) -{ - Eterm* hp = HAlloc(p, 3); - return TUPLE2(hp, - erts_ets_hash_get_memstate(p, &meta_pid_to_tab->hash), - erts_ets_hash_get_memstate(p, &meta_pid_to_fixed_tab->hash)); -} -/* - * For testing only - * Restore a previously retrieved meta table size state. - * We do this to suppress failed memory checks - * caused by the hysteresis of meta tables grow/shrink limits. - */ -Eterm erts_ets_restore_meta_state(Process* p, Eterm meta_state) -{ - Eterm* tv; - Eterm* hp; - if (!is_tuple_arity(meta_state, 2)) - return am_badarg; - - tv = tuple_val(meta_state); - hp = HAlloc(p, 3); - return TUPLE2(hp, - erts_ets_hash_restore_memstate(&meta_pid_to_tab->hash, tv[1]), - erts_ets_hash_restore_memstate(&meta_pid_to_fixed_tab->hash, tv[2])); -} - -#ifdef HARDDEBUG /* Here comes some debug functions */ - -void db_check_tables(void) -{ -#ifdef ERTS_SMP - return; -#else - int i; - - for (i = 0; i < db_max_tabs; i++) { - if (IS_SLOT_ALIVE(i)) { - DbTable* tb = meta_main_tab[i].t; - tb->common.meth->db_check_table(tb); - } - } -#endif -} - -#endif /* HARDDEBUG */ diff --git a/erts/emulator/beam/erl_db.h b/erts/emulator/beam/erl_db.h index 852440ff31..4ff9f224e8 100644 --- a/erts/emulator/beam/erl_db.h +++ b/erts/emulator/beam/erl_db.h @@ -24,8 +24,37 @@ * */ -#ifndef __DB_H__ -#define __DB_H__ +#ifndef ERTS_DB_SCHED_SPEC_TYPES__ +#define ERTS_DB_SCHED_SPEC_TYPES__ + +union db_table; +typedef union db_table DbTable; + +typedef struct ErtsEtsAllReq_ ErtsEtsAllReq; + +typedef struct { + ErtsEtsAllReq *next; + ErtsEtsAllReq *prev; +} ErtsEtsAllReqList; + +typedef struct { + ErtsEtsAllReq *ongoing; + ErlHeapFragment *hfrag; + DbTable *tab; + ErtsEtsAllReq *queue; +} ErtsEtsAllYieldData; + +typedef struct { + Uint count; + DbTable *clist; +} ErtsEtsTables; + +#endif /* ERTS_DB_SCHED_SPEC_TYPES__ */ + +#ifndef ERTS_ONLY_SCHED_SPEC_ETS_DATA + +#ifndef ERL_DB_H__ +#define ERL_DB_H__ #include "sys.h" #undef ERL_THR_PROGRESS_TSD_TYPE_ONLY @@ -46,6 +75,12 @@ typedef struct { ErtsThrPrgrLaterOp data; } DbTableRelease; +struct ErtsSchedulerData_; +int erts_handle_yielded_ets_all_request(struct ErtsSchedulerData_ *esdp, + ErtsEtsAllYieldData *eadp); + +void erts_ets_sched_spec_data_init(struct ErtsSchedulerData_ *esdp); + /* * So, the structure for a database table, NB this is only * interesting in db.c. @@ -74,6 +109,7 @@ typedef enum { void init_db(ErtsDbSpinCount); int erts_db_process_exiting(Process *, ErtsProcLocks); +int erts_db_execute_free_fixation(Process*, DbFixation*); void db_info(fmtfn_t, void *, int); void erts_db_foreach_table(void (*)(DbTable *, void *), void *); void erts_db_foreach_offheap(DbTable *, @@ -86,16 +122,14 @@ extern int erts_ets_realloc_always_moves; /* set in erl_init */ extern int erts_ets_always_compress; /* set in erl_init */ extern Export ets_select_delete_continue_exp; extern Export ets_select_count_continue_exp; +extern Export ets_select_replace_continue_exp; extern Export ets_select_continue_exp; extern erts_smp_atomic_t erts_ets_misc_mem_size; Eterm erts_ets_colliding_names(Process*, Eterm name, Uint cnt); -Eterm erts_ets_get_meta_state(Process* p); -Eterm erts_ets_restore_meta_state(Process* p, Eterm target_state); - Uint erts_db_get_max_tabs(void); -#endif +#endif /* ERL_DB_H__ */ #if defined(ERTS_WANT_DB_INTERNAL__) && !defined(ERTS_HAVE_DB_INTERNAL__) #define ERTS_HAVE_DB_INTERNAL__ @@ -271,3 +305,4 @@ erts_db_free_nt(ErtsAlcType_t type, void *ptr, Uint size) #endif /* #if defined(ERTS_WANT_DB_INTERNAL__) && !defined(ERTS_HAVE_DB_INTERNAL__) */ +#endif /* !ERTS_ONLY_SCHED_SPEC_ETS_DATA */ diff --git a/erts/emulator/beam/erl_db_hash.c b/erts/emulator/beam/erl_db_hash.c index bb29d56aa7..7ab27df00c 100644 --- a/erts/emulator/beam/erl_db_hash.c +++ b/erts/emulator/beam/erl_db_hash.c @@ -288,6 +288,9 @@ static ERTS_INLINE Sint next_slot_w(DbTableHash* tb, Uint ix, #endif } +#ifndef MIN +#define MIN(X, Y) ((X) < (Y) ? (X) : (Y)) +#endif /* * Some special binary flags @@ -352,7 +355,8 @@ static ERTS_INLINE void SET_SEGTAB(DbTableHash* tb, erts_smp_atomic_set_nob(&tb->segtab, (erts_aint_t) segtab); } - +/* Used by select_replace on analyze_pattern */ +typedef int (*extra_match_validator_t)(int keypos, Eterm match, Eterm guard, Eterm body); /* ** Forward decl's (static functions) @@ -368,8 +372,9 @@ static void shrink(DbTableHash* tb, int nitems); static void grow(DbTableHash* tb, int nitems); static Eterm build_term_list(Process* p, HashDbTerm* ptr1, HashDbTerm* ptr2, Uint sz, DbTableHash*); -static int analyze_pattern(DbTableHash *tb, Eterm pattern, - struct mp_info *mpi); +static int analyze_pattern(DbTableHash *tb, Eterm pattern, + extra_match_validator_t extra_validator, /* Optional callback */ + struct mp_info *mpi); /* * Method interface functions @@ -393,24 +398,29 @@ static int db_erase_object_hash(DbTable *tbl, Eterm object,Eterm *ret); static int db_slot_hash(Process *p, DbTable *tbl, Eterm slot_term, Eterm *ret); -static int db_select_chunk_hash(Process *p, DbTable *tbl, +static int db_select_chunk_hash(Process *p, DbTable *tbl, Eterm tid, Eterm pattern, Sint chunk_size, int reverse, Eterm *ret); -static int db_select_hash(Process *p, DbTable *tbl, +static int db_select_hash(Process *p, DbTable *tbl, Eterm tid, Eterm pattern, int reverse, Eterm *ret); -static int db_select_count_hash(Process *p, DbTable *tbl, - Eterm pattern, Eterm *ret); -static int db_select_delete_hash(Process *p, DbTable *tbl, - Eterm pattern, Eterm *ret); - -static int db_select_continue_hash(Process *p, DbTable *tbl, +static int db_select_continue_hash(Process *p, DbTable *tbl, Eterm continuation, Eterm *ret); -static int db_select_count_continue_hash(Process *p, DbTable *tbl, +static int db_select_count_hash(Process *p, DbTable *tbl, Eterm tid, + Eterm pattern, Eterm *ret); +static int db_select_count_continue_hash(Process *p, DbTable *tbl, Eterm continuation, Eterm *ret); +static int db_select_delete_hash(Process *p, DbTable *tbl, Eterm tid, + Eterm pattern, Eterm *ret); static int db_select_delete_continue_hash(Process *p, DbTable *tbl, Eterm continuation, Eterm *ret); + +static int db_select_replace_hash(Process *p, DbTable *tbl, Eterm tid, + Eterm pattern, Eterm *ret); +static int db_select_replace_continue_hash(Process *p, DbTable *tbl, + Eterm continuation, Eterm *ret); + static int db_take_hash(Process *, DbTable *, Eterm, Eterm *); static void db_print_hash(fmtfn_t to, void *to_arg, @@ -418,7 +428,7 @@ static void db_print_hash(fmtfn_t to, DbTable *tbl); static int db_free_table_hash(DbTable *tbl); -static int db_free_table_continue_hash(DbTable *tbl); +static SWord db_free_table_continue_hash(DbTable *tbl, SWord reds); static void db_foreach_offheap_hash(DbTable *, @@ -523,17 +533,14 @@ DbTableMethod db_hash = db_select_delete_continue_hash, db_select_count_hash, db_select_count_continue_hash, + db_select_replace_hash, + db_select_replace_continue_hash, db_take_hash, db_delete_all_objects_hash, db_free_table_hash, db_free_table_continue_hash, db_print_hash, db_foreach_offheap_hash, -#ifdef HARDDEBUG - db_check_table_hash, -#else - NULL, -#endif db_lookup_dbterm_hash, db_finalize_dbterm_hash }; @@ -581,9 +588,10 @@ static void restore_fixdel(DbTableHash* tb, FixedDeletion* fixdel) ** Table interface routines ie what's called by the bif's */ -void db_unfix_table_hash(DbTableHash *tb) +SWord db_unfix_table_hash(DbTableHash *tb) { FixedDeletion* fixdel; + SWord work = 0; ERTS_SMP_LC_ASSERT(erts_smp_lc_rwmtx_is_rwlocked(&tb->common.rwlock) || (erts_smp_lc_rwmtx_is_rlocked(&tb->common.rwlock) @@ -604,7 +612,7 @@ restart: if (!IS_FIXED(tb)) { goto restart; /* unfixed again! */ } - return; + return work; } if (ix < NACTIVE(tb)) { bp = &BUCKET(tb, ix); @@ -614,6 +622,7 @@ restart: if (b->hvalue == INVALID_HASH) { *bp = b->next; free_term(tb, b); + work++; b = *bp; } else { bp = &b->next; @@ -629,9 +638,12 @@ restart: (void *) fx, sizeof(FixedDeletion)); ERTS_ETS_MISC_MEM_ADD(-sizeof(FixedDeletion)); + work++; } /* ToDo: Maybe try grow/shrink the table as well */ + + return work; } int db_create_hash(Process *p, DbTable *tbl) @@ -846,7 +858,6 @@ Lnew: grow(tb, nitems); } } - CHECK_TABLES(); return DB_ERROR_NONE; Ldone: @@ -871,7 +882,6 @@ get_term_list(Process *p, DbTableHash *tb, Eterm key, HashValue hval, } } copy = build_term_list(p, b1, b2, sz, tb); - CHECK_TABLES(); if (bend) { *bend = b2; } @@ -903,70 +913,6 @@ done: RUNLOCK_HASH(lck); return DB_ERROR_NONE; } - -int db_get_element_array(DbTable *tbl, - Eterm key, - int ndex, - Eterm *ret, - int *num_ret) -{ - DbTableHash *tb = &tbl->hash; - HashValue hval; - int ix; - HashDbTerm* b1; - int num = 0; - int retval; - erts_smp_rwmtx_t* lck; - - ASSERT(!IS_FIXED(tbl)); /* no support for fixed tables here */ - - hval = MAKE_HASH(key); - lck = RLOCK_HASH(tb, hval); - ix = hash_to_ix(tb, hval); - b1 = BUCKET(tb, ix); - - while(b1 != 0) { - if (has_live_key(tb,b1,key,hval)) { - if (tb->common.status & (DB_BAG | DB_DUPLICATE_BAG)) { - HashDbTerm* b; - HashDbTerm* b2 = b1->next; - - while(b2 != NULL && has_live_key(tb,b2,key,hval)) { - if (ndex > arityval(b2->dbterm.tpl[0])) { - retval = DB_ERROR_BADITEM; - goto done; - } - b2 = b2->next; - } - - b = b1; - while(b != b2) { - if (num < *num_ret) { - ret[num++] = b->dbterm.tpl[ndex]; - } else { - retval = DB_ERROR_NONE; - goto done; - } - b = b->next; - } - *num_ret = num; - } - else { - ASSERT(*num_ret > 0); - ret[0] = b1->dbterm.tpl[ndex]; - *num_ret = 1; - } - retval = DB_ERROR_NONE; - goto done; - } - b1 = b1->next; - } - retval = DB_ERROR_BADKEY; -done: - RUNLOCK_HASH(lck); - return retval; -} - static int db_member_hash(DbTable *tbl, Eterm key, Eterm *ret) { @@ -1059,54 +1005,6 @@ done: } /* - * Very internal interface, removes elements of arity two from - * BAG. Used for the PID meta table - */ -int db_erase_bag_exact2(DbTable *tbl, Eterm key, Eterm value) -{ - DbTableHash *tb = &tbl->hash; - HashValue hval; - int ix; - HashDbTerm** bp; - HashDbTerm* b; - erts_smp_rwmtx_t* lck; - int found = 0; - - hval = MAKE_HASH(key); - lck = WLOCK_HASH(tb,hval); - ix = hash_to_ix(tb, hval); - bp = &BUCKET(tb, ix); - b = *bp; - - ASSERT(!IS_FIXED(tb)); - ASSERT((tb->common.status & DB_BAG)); - ASSERT(!tb->common.compress); - - while(b != 0) { - if (has_live_key(tb,b,key,hval)) { - found = 1; - if ((arityval(b->dbterm.tpl[0]) == 2) && - EQ(value, b->dbterm.tpl[2])) { - *bp = b->next; - free_term(tb, b); - erts_smp_atomic_dec_nob(&tb->common.nitems); - b = *bp; - break; - } - } else if (found) { - break; - } - bp = &b->next; - b = b->next; - } - WUNLOCK_HASH(lck); - if (found) { - try_shrink(tb); - } - return DB_ERROR_NONE; -} - -/* ** NB, this is for the db_erase/2 bif. */ int db_erase_hash(DbTable *tbl, Eterm key, Eterm *ret) @@ -1253,814 +1151,1104 @@ static BIF_RETTYPE bif_trap1(Export *bif, { BIF_TRAP1(bif, p, p1); } - + + /* - * Continue collecting select matches, this may happen either due to a trap - * or when the user calls ets:select/1 + * Match traversal callbacks */ -static int db_select_continue_hash(Process *p, - DbTable *tbl, - Eterm continuation, - Eterm *ret) -{ - DbTableHash *tb = &tbl->hash; - Sint slot_ix; - Sint save_slot_ix; - Sint chunk_size; - int all_objects; - Binary *mp; - int num_left = 1000; - HashDbTerm *current = 0; - Eterm match_list; - Eterm *hp; - Eterm match_res; - Sint got; - Eterm *tptr; - erts_smp_rwmtx_t* lck; -#define RET_TO_BIF(Term, State) do { *ret = (Term); return State; } while(0); +/* Called when no match is possible. + * context_ptr: Pointer to context + * ret: Pointer to traversal function term return. + * + * Both the direct return value and 'ret' are used as the traversal function return values. + */ +typedef int (*mtraversal_on_nothing_can_match_t)(void* context_ptr, Eterm* ret); + +/* Called for each match result. + * context_ptr: Pointer to context + * slot_ix: Current slot index + * current_ptr_ptr: Triple pointer to either the bucket or the 'next' pointer in the previous element; + * can be (carefully) used to adjust iteration when deleting or replacing elements. + * match_res: The result of running the match program against the current term. + * + * Should return 1 for successful match, 0 otherwise. + */ +typedef int (*mtraversal_on_match_res_t)(void* context_ptr, Sint slot_ix, HashDbTerm*** current_ptr_ptr, + Eterm match_res); + +/* Called when either we've matched enough elements in this cycle or EOT was reached. + * context_ptr: Pointer to context + * slot_ix: Current slot index + * got: How many elements have been matched so far + * iterations_left: Number of intended iterations (down from an initial max.) left in this traversal cycle + * mpp: Double pointer to the compiled match program + * ret: Pointer to traversal function term return. + * + * Both the direct return value and 'ret' are used as the traversal function return values. + * If *mpp is set to NULL, it won't be deallocated (useful for trapping.) + */ +typedef int (*mtraversal_on_loop_ended_t)(void* context_ptr, Sint slot_ix, Sint got, + Sint iterations_left, Binary** mpp, Eterm* ret); + +/* Called when it's time to trap + * context_ptr: Pointer to context + * slot_ix: Current slot index + * got: How many elements have been matched so far + * mpp: Double pointer to the compiled match program + * ret: Pointer to traversal function term return. + * + * Both the direct return value and 'ret' are used as the traversal function return values. + * If *mpp is set to NULL, it won't be deallocated (useful for trapping.) + */ +typedef int (*mtraversal_on_trap_t)(void* context_ptr, Sint slot_ix, Sint got, Binary** mpp, Eterm* ret); - /* Decode continuation. We know it's a tuple but not the arity or anything else */ +/* + * Begin hash table match traversal + */ +static int match_traverse(Process* p, DbTableHash* tb, + Eterm pattern, + extra_match_validator_t extra_match_validator, /* Optional */ + Sint chunk_size, /* If 0, no chunking */ + Sint iterations_left, /* Nr. of iterations left */ + Eterm** hpp, /* Heap */ + int lock_for_write, /* Set to 1 if we're going to delete or + modify existing terms */ + mtraversal_on_nothing_can_match_t on_nothing_can_match, + mtraversal_on_match_res_t on_match_res, + mtraversal_on_loop_ended_t on_loop_ended, + mtraversal_on_trap_t on_trap, + void* context_ptr, /* State for callbacks above */ + Eterm* ret) +{ + Sint slot_ix; /* Slot index */ + HashDbTerm** current_ptr; /* Refers to either the bucket pointer or + * the 'next' pointer in the previous term + */ + HashDbTerm* saved_current; /* Helper to avoid double skip on match */ + struct mp_info mpi; + unsigned current_list_pos = 0; /* Prefound buckets list index */ + Eterm match_res; + Sint got = 0; /* Matched terms counter */ + erts_smp_rwmtx_t* lck; /* Slot lock */ + int ret_value; +#ifdef ERTS_SMP + erts_smp_rwmtx_t* (*lock_hash_function)(DbTableHash*, HashValue) + = (lock_for_write ? WLOCK_HASH : RLOCK_HASH); + void (*unlock_hash_function)(erts_smp_rwmtx_t*) + = (lock_for_write ? WUNLOCK_HASH : RUNLOCK_HASH); +#else + #define lock_hash_function(tb, hval) NULL + #define unlock_hash_function(lck) ((void)lck) +#endif + Sint (*next_slot_function)(DbTableHash*, Uint, erts_smp_rwmtx_t**) + = (lock_for_write ? next_slot_w : next_slot); - tptr = tuple_val(continuation); + if ((ret_value = analyze_pattern(tb, pattern, extra_match_validator, &mpi)) + != DB_ERROR_NONE) + { + *ret = NIL; + goto done; + } - if (arityval(*tptr) != 6) - RET_TO_BIF(NIL,DB_ERROR_BADPARAM); - - if (!is_small(tptr[2]) || !is_small(tptr[3]) || - !(is_list(tptr[5]) || tptr[5] == NIL) || !is_small(tptr[6])) - RET_TO_BIF(NIL,DB_ERROR_BADPARAM); - if ((chunk_size = signed_val(tptr[3])) < 0) - RET_TO_BIF(NIL,DB_ERROR_BADPARAM); - mp = erts_db_get_match_prog_binary(tptr[4]); - if (!mp) - RET_TO_BIF(NIL,DB_ERROR_BADPARAM); - all_objects = mp->flags & BIN_FLAG_ALL_OBJECTS; - match_list = tptr[5]; - if ((got = signed_val(tptr[6])) < 0) - RET_TO_BIF(NIL,DB_ERROR_BADPARAM); + if (!mpi.something_can_match) { + /* Can't possibly match anything */ + ret_value = on_nothing_can_match(context_ptr, ret); + goto done; + } - slot_ix = signed_val(tptr[2]); - if (slot_ix < 0 /* EOT */ - || (chunk_size && got >= chunk_size)) { - goto done; /* Already got all or enough in the match_list */ + if (mpi.all_objects) { + mpi.mp->flags |= BIN_FLAG_ALL_OBJECTS; } - lck = RLOCK_HASH(tb,slot_ix); - if (slot_ix >= NACTIVE(tb)) { - RUNLOCK_HASH(lck); - RET_TO_BIF(NIL,DB_ERROR_BADPARAM); + /* + * Look for initial slot / bucket + */ + if (!mpi.key_given) { + /* Run this code if pattern is variable or GETKEY(pattern) */ + /* is a variable */ + slot_ix = 0; + lck = lock_hash_function(tb,slot_ix); + for (;;) { + ASSERT(slot_ix < NACTIVE(tb)); + if (*(current_ptr = &BUCKET(tb,slot_ix)) != NULL) { + break; + } + slot_ix = next_slot_function(tb,slot_ix,&lck); + if (slot_ix == 0) { + ret_value = on_loop_ended(context_ptr, slot_ix, got, iterations_left, &mpi.mp, ret); + goto done; + } + } + } else { + /* We have at least one */ + slot_ix = mpi.lists[current_list_pos].ix; + lck = lock_hash_function(tb, slot_ix); + current_ptr = mpi.lists[current_list_pos].bucket; + ASSERT(*current_ptr == BUCKET(tb,slot_ix)); + ++current_list_pos; } - while ((current = BUCKET(tb,slot_ix)) == NULL) { - slot_ix = next_slot(tb, slot_ix, &lck); - if (slot_ix == 0) { - slot_ix = -1; /* EOT */ - goto done; - } - } + /* + * Execute traversal cycle + */ for(;;) { - if (current->hvalue != INVALID_HASH && - (match_res = db_match_dbterm(&tb->common, p, mp, all_objects, - ¤t->dbterm, &hp, 2), - is_value(match_res))) { + if (*current_ptr != NULL) { + if ((*current_ptr)->hvalue != INVALID_HASH) { + match_res = db_match_dbterm(&tb->common, p, mpi.mp, 0, + &(*current_ptr)->dbterm, hpp, 2); + saved_current = *current_ptr; + if (on_match_res(context_ptr, slot_ix, ¤t_ptr, match_res)) { + ++got; + } + --iterations_left; + if (*current_ptr != saved_current) { + /* Don't advance to next, the callback did it already */ + continue; + } + } + current_ptr = &((*current_ptr)->next); + } + else if (mpi.key_given) { /* Key is bound */ + unlock_hash_function(lck); + if (current_list_pos == mpi.num_lists) { + ret_value = on_loop_ended(context_ptr, -1, got, iterations_left, &mpi.mp, ret); + goto done; + } else { + slot_ix = mpi.lists[current_list_pos].ix; + lck = lock_hash_function(tb, slot_ix); + current_ptr = mpi.lists[current_list_pos].bucket; + ASSERT(mpi.lists[current_list_pos].bucket == &BUCKET(tb,slot_ix)); + ++current_list_pos; + } + } + else { /* Key is variable */ + if ((slot_ix = next_slot_function(tb,slot_ix,&lck)) == 0) { + slot_ix = -1; + break; + } + if (chunk_size && got >= chunk_size) { + unlock_hash_function(lck); + break; + } + if (iterations_left <= 0 || MBUF(p)) { + /* + * We have either reached our limit, or just created some heap fragments. + * Since many heap fragments will make the GC slower, trap and GC now. + */ + unlock_hash_function(lck); + ret_value = on_trap(context_ptr, slot_ix, got, &mpi.mp, ret); + goto done; + } + current_ptr = &BUCKET(tb,slot_ix); + } + } - match_list = CONS(hp, match_res, match_list); - ++got; - } + ret_value = on_loop_ended(context_ptr, slot_ix, got, iterations_left, &mpi.mp, ret); - --num_left; - save_slot_ix = slot_ix; - if ((current = next(tb, (Uint*)&slot_ix, &lck, current)) == NULL) { - slot_ix = -1; /* EOT */ - break; - } - if (slot_ix != save_slot_ix) { - if (chunk_size && got >= chunk_size) { - RUNLOCK_HASH(lck); - break; - } - if (num_left <= 0 || MBUF(p)) { - /* - * We have either reached our limit, or just created some heap fragments. - * Since many heap fragments will make the GC slower, trap and GC now. - */ - RUNLOCK_HASH(lck); - goto trap; - } - } - } done: - BUMP_REDS(p, 1000 - num_left); - if (chunk_size) { - Eterm continuation; - Eterm rest = NIL; - Sint rest_size = 0; - - if (got > chunk_size) { /* Cannot write destructively here, - the list may have - been in user space */ - rest = NIL; - hp = HAlloc(p, (got - chunk_size) * 2); - while (got-- > chunk_size) { - rest = CONS(hp, CAR(list_val(match_list)), rest); - hp += 2; - match_list = CDR(list_val(match_list)); - ++rest_size; - } - } - if (rest != NIL || slot_ix >= 0) { - hp = HAlloc(p,3+7); - continuation = TUPLE6(hp, tptr[1], make_small(slot_ix), - tptr[3], tptr[4], rest, - make_small(rest_size)); - hp += 7; - RET_TO_BIF(TUPLE2(hp, match_list, continuation),DB_ERROR_NONE); - } else { - if (match_list != NIL) { - hp = HAlloc(p, 3); - RET_TO_BIF(TUPLE2(hp, match_list, am_EOT),DB_ERROR_NONE); - } else { - RET_TO_BIF(am_EOT, DB_ERROR_NONE); - } - } + /* We should only jump directly to this label if + * we've already called on_nothing_can_match / on_loop_ended / on_trap + */ + if (mpi.mp != NULL) { + erts_bin_free(mpi.mp); } - RET_TO_BIF(match_list,DB_ERROR_NONE); - -trap: - BUMP_ALL_REDS(p); - - hp = HAlloc(p,7); - continuation = TUPLE6(hp, tptr[1], make_small(slot_ix), tptr[3], - tptr[4], match_list, make_small(got)); - RET_TO_BIF(bif_trap1(&ets_select_continue_exp, p, - continuation), - DB_ERROR_NONE); - -#undef RET_TO_BIF - -} + if (mpi.lists != mpi.dlists) { + erts_free(ERTS_ALC_T_DB_SEL_LIST, + (void *) mpi.lists); + } + return ret_value; -static int db_select_hash(Process *p, DbTable *tbl, - Eterm pattern, int reverse, - Eterm *ret) -{ - return db_select_chunk_hash(p, tbl, pattern, 0, reverse, ret); +#ifndef SMP +#undef lock_hash_function +#undef unlock_hash_function +#endif } -static int db_select_chunk_hash(Process *p, DbTable *tbl, - Eterm pattern, Sint chunk_size, - int reverse, /* not used */ - Eterm *ret) -{ - DbTableHash *tb = &tbl->hash; - struct mp_info mpi; - Sint slot_ix; - HashDbTerm *current = 0; - unsigned current_list_pos = 0; - Eterm match_list; +/* + * Continue hash table match traversal + */ +static int match_traverse_continue(Process* p, DbTableHash* tb, + Sint chunk_size, /* If 0, no chunking */ + Sint iterations_left, /* Nr. of iterations left */ + Eterm** hpp, /* Heap */ + Sint slot_ix, /* Slot index to resume traversal from */ + Sint got, /* Matched terms counter */ + Binary** mpp, /* Existing match program */ + int lock_for_write, /* Set to 1 if we're going to delete or + modify existing terms */ + mtraversal_on_match_res_t on_match_res, + mtraversal_on_loop_ended_t on_loop_ended, + mtraversal_on_trap_t on_trap, + void* context_ptr, /* For callbacks */ + Eterm* ret) +{ + int all_objects = (*mpp)->flags & BIN_FLAG_ALL_OBJECTS; + HashDbTerm** current_ptr; /* Refers to either the bucket pointer or + * the 'next' pointer in the previous term + */ + HashDbTerm* saved_current; /* Helper to avoid double skip on match */ Eterm match_res; - Eterm *hp; - int num_left = 1000; - Uint got = 0; - Eterm continuation; - int errcode; - Eterm mpb; erts_smp_rwmtx_t* lck; + int ret_value; +#ifdef ERTS_SMP + erts_smp_rwmtx_t* (*lock_hash_function)(DbTableHash*, HashValue) + = (lock_for_write ? WLOCK_HASH : RLOCK_HASH); + void (*unlock_hash_function)(erts_smp_rwmtx_t*) + = (lock_for_write ? WUNLOCK_HASH : RUNLOCK_HASH); +#else + #define lock_hash_function(tb, hval) NULL + #define unlock_hash_function(lck) ((void)lck) +#endif + Sint (*next_slot_function)(DbTableHash* tb, Uint ix, erts_smp_rwmtx_t** lck_ptr) + = (lock_for_write ? next_slot_w : next_slot); - -#define RET_TO_BIF(Term,RetVal) do { \ - if (mpi.mp != NULL) { \ - erts_bin_free(mpi.mp); \ - } \ - if (mpi.lists != mpi.dlists) { \ - erts_free(ERTS_ALC_T_DB_SEL_LIST, \ - (void *) mpi.lists); \ - } \ - *ret = (Term); \ - return RetVal; \ - } while(0) - - - if ((errcode = analyze_pattern(tb, pattern, &mpi)) != DB_ERROR_NONE) { - RET_TO_BIF(NIL,errcode); + if (got < 0) { + *ret = NIL; + return DB_ERROR_BADPARAM; } - if (!mpi.something_can_match) { - if (chunk_size) { - RET_TO_BIF(am_EOT, DB_ERROR_NONE); /* We're done */ - } - RET_TO_BIF(NIL, DB_ERROR_NONE); - /* can't possibly match anything */ + if (slot_ix < 0 /* EOT */ + || (chunk_size && got >= chunk_size)) + { + /* Already got all or enough in the match_list */ + ret_value = on_loop_ended(context_ptr, slot_ix, got, iterations_left, mpp, ret); + goto done; } - if (!mpi.key_given) { - /* Run this code if pattern is variable or GETKEY(pattern) */ - /* is a variable */ - slot_ix = 0; - lck = RLOCK_HASH(tb,slot_ix); - for (;;) { - ASSERT(slot_ix < NACTIVE(tb)); - if ((current = BUCKET(tb,slot_ix)) != NULL) { - break; - } - slot_ix = next_slot(tb,slot_ix,&lck); - if (slot_ix == 0) { - if (chunk_size) { - RET_TO_BIF(am_EOT, DB_ERROR_NONE); /* We're done */ - } - RET_TO_BIF(NIL,DB_ERROR_NONE); - } - } - } else { - /* We have at least one */ - slot_ix = mpi.lists[current_list_pos].ix; - lck = RLOCK_HASH(tb, slot_ix); - current = *(mpi.lists[current_list_pos].bucket); - ASSERT(current == BUCKET(tb,slot_ix)); - ++current_list_pos; + lck = lock_hash_function(tb, slot_ix); + if (slot_ix >= NACTIVE(tb)) { /* Is this possible? */ + unlock_hash_function(lck); + *ret = NIL; + ret_value = DB_ERROR_BADPARAM; + goto done; } - match_list = NIL; - + /* + * Resume traversal cycle from where we left + */ + current_ptr = &BUCKET(tb,slot_ix); for(;;) { - if (current != NULL) { - if (current->hvalue != INVALID_HASH) { - match_res = db_match_dbterm(&tb->common, p, mpi.mp, 0, - ¤t->dbterm, &hp, 2); - if (is_value(match_res)) { - match_list = CONS(hp, match_res, match_list); - ++got; - } - --num_left; - } - current = current->next; - } - else if (mpi.key_given) { /* Key is bound */ - RUNLOCK_HASH(lck); - if (current_list_pos == mpi.num_lists) { - slot_ix = -1; /* EOT */ - goto done; - } else { - slot_ix = mpi.lists[current_list_pos].ix; - lck = RLOCK_HASH(tb, slot_ix); - current = *(mpi.lists[current_list_pos].bucket); - ASSERT(mpi.lists[current_list_pos].bucket == &BUCKET(tb,slot_ix)); - ++current_list_pos; - } - } - else { /* Key is variable */ - - if ((slot_ix=next_slot(tb,slot_ix,&lck)) == 0) { - slot_ix = -1; - break; - } - if (chunk_size && got >= chunk_size) { - RUNLOCK_HASH(lck); - break; - } - if (num_left <= 0 || MBUF(p)) { - /* - * We have either reached our limit, or just created some heap fragments. - * Since many heap fragments will make the GC slower, trap and GC now. - */ - RUNLOCK_HASH(lck); - goto trap; - } - current = BUCKET(tb,slot_ix); + if (*current_ptr != NULL) { + if ((*current_ptr)->hvalue != INVALID_HASH) { + match_res = db_match_dbterm(&tb->common, p, *mpp, all_objects, + &(*current_ptr)->dbterm, hpp, 2); + saved_current = *current_ptr; + if (on_match_res(context_ptr, slot_ix, ¤t_ptr, match_res)) { + ++got; + } + --iterations_left; + if (*current_ptr != saved_current) { + /* Don't advance to next, the callback did it already */ + continue; + } + } + current_ptr = &((*current_ptr)->next); + } + else { + if ((slot_ix=next_slot_function(tb,slot_ix,&lck)) == 0) { + slot_ix = -1; + break; + } + if (chunk_size && got >= chunk_size) { + unlock_hash_function(lck); + break; + } + if (iterations_left <= 0 || MBUF(p)) { + /* + * We have either reached our limit, or just created some heap fragments. + * Since many heap fragments will make the GC slower, trap and GC now. + */ + unlock_hash_function(lck); + ret_value = on_trap(context_ptr, slot_ix, got, mpp, ret); + goto done; + } + current_ptr = &BUCKET(tb,slot_ix); } } -done: - BUMP_REDS(p, 1000 - num_left); - if (chunk_size) { - Eterm continuation; - Eterm rest = NIL; - Sint rest_size = 0; - - if (mpi.all_objects) - (mpi.mp)->flags |= BIN_FLAG_ALL_OBJECTS; - if (got > chunk_size) { /* Split list in return value and 'rest' */ - Eterm tmp = match_list; - rest = match_list; - while (got-- > chunk_size + 1) { - tmp = CDR(list_val(tmp)); - ++rest_size; - } - ++rest_size; - match_list = CDR(list_val(tmp)); - CDR(list_val(tmp)) = NIL; /* Destructive, the list has never - been in 'user space' */ - } - if (rest != NIL || slot_ix >= 0) { /* Need more calls */ - hp = HAlloc(p,3+7+ERTS_MAGIC_REF_THING_SIZE); - mpb = erts_db_make_match_prog_ref(p,(mpi.mp),&hp); - if (mpi.all_objects) - (mpi.mp)->flags |= BIN_FLAG_ALL_OBJECTS; - continuation = TUPLE6(hp, tb->common.id,make_small(slot_ix), - make_small(chunk_size), - mpb, rest, - make_small(rest_size)); - mpi.mp = NULL; /*otherwise the return macro will destroy it */ - hp += 7; - RET_TO_BIF(TUPLE2(hp, match_list, continuation),DB_ERROR_NONE); - } else { /* All data is exhausted */ - if (match_list != NIL) { /* No more data to search but still a - result to return to the caller */ - hp = HAlloc(p, 3); - RET_TO_BIF(TUPLE2(hp, match_list, am_EOT),DB_ERROR_NONE); - } else { /* Reached the end of the ttable with no data to return */ - RET_TO_BIF(am_EOT, DB_ERROR_NONE); - } - } - } - RET_TO_BIF(match_list,DB_ERROR_NONE); -trap: - BUMP_ALL_REDS(p); - if (mpi.all_objects) - (mpi.mp)->flags |= BIN_FLAG_ALL_OBJECTS; - hp = HAlloc(p,7+ERTS_MAGIC_REF_THING_SIZE); - mpb = erts_db_make_match_prog_ref(p,(mpi.mp),&hp); - continuation = TUPLE6(hp, tb->common.id, make_small(slot_ix), - make_small(chunk_size), - mpb, match_list, - make_small(got)); - mpi.mp = NULL; /*otherwise the return macro will destroy it */ - RET_TO_BIF(bif_trap1(&ets_select_continue_exp, p, - continuation), - DB_ERROR_NONE); -#undef RET_TO_BIF - -} + ret_value = on_loop_ended(context_ptr, slot_ix, got, iterations_left, mpp, ret); -static int db_select_count_hash(Process *p, - DbTable *tbl, - Eterm pattern, - Eterm *ret) -{ - DbTableHash *tb = &tbl->hash; - struct mp_info mpi; - Uint slot_ix = 0; - HashDbTerm* current = NULL; - unsigned current_list_pos = 0; - Eterm *hp; - int num_left = 1000; - Uint got = 0; - Eterm continuation; - int errcode; - Eterm egot; - Eterm mpb; - erts_smp_rwmtx_t* lck; - -#define RET_TO_BIF(Term,RetVal) do { \ - if (mpi.mp != NULL) { \ - erts_bin_free(mpi.mp); \ - } \ - if (mpi.lists != mpi.dlists) { \ - erts_free(ERTS_ALC_T_DB_SEL_LIST, \ - (void *) mpi.lists); \ - } \ - *ret = (Term); \ - return RetVal; \ - } while(0) +done: + /* We should only jump directly to this label if + * we've already called on_loop_ended / on_trap + */ + return ret_value; +#ifndef SMP +#undef lock_hash_function +#undef unlock_hash_function +#endif +} - if ((errcode = analyze_pattern(tb, pattern, &mpi)) != DB_ERROR_NONE) { - RET_TO_BIF(NIL,errcode); - } - if (!mpi.something_can_match) { - RET_TO_BIF(make_small(0), DB_ERROR_NONE); - /* can't possibly match anything */ - } +/* + * Common traversal trapping/continuation code; + * used by select_count, select_delete and select_replace, + * as well as their continuation-handling counterparts. + */ - if (!mpi.key_given) { - /* Run this code if pattern is variable or GETKEY(pattern) */ - /* is a variable */ - slot_ix = 0; - lck = RLOCK_HASH(tb,slot_ix); - current = BUCKET(tb,slot_ix); - } else { - /* We have at least one */ - slot_ix = mpi.lists[current_list_pos].ix; - lck = RLOCK_HASH(tb, slot_ix); - current = *(mpi.lists[current_list_pos].bucket); - ASSERT(current == BUCKET(tb,slot_ix)); - ++current_list_pos; - } +static ERTS_INLINE int on_mtraversal_simple_trap(Export* trap_function, + Process* p, + DbTableHash* tb, + Eterm tid, + Eterm* prev_continuation_tptr, + Sint slot_ix, + Sint got, + Binary** mpp, + Eterm* ret) +{ + Eterm* hp; + Eterm egot; + Eterm mpb; + Eterm continuation; + int is_first_trap = (prev_continuation_tptr == NULL); + size_t base_halloc_sz = (is_first_trap ? ERTS_MAGIC_REF_THING_SIZE : 0); - for(;;) { - if (current != NULL) { - if (current->hvalue != INVALID_HASH) { - if (db_match_dbterm(&tb->common, p, mpi.mp, 0, - ¤t->dbterm, NULL,0) == am_true) { - ++got; - } - --num_left; - } - current = current->next; - } - else { /* next bucket */ - if (mpi.key_given) { /* Key is bound */ - RUNLOCK_HASH(lck); - if (current_list_pos == mpi.num_lists) { - goto done; - } else { - slot_ix = mpi.lists[current_list_pos].ix; - lck = RLOCK_HASH(tb, slot_ix); - current = *(mpi.lists[current_list_pos].bucket); - ASSERT(mpi.lists[current_list_pos].bucket == &BUCKET(tb,slot_ix)); - ++current_list_pos; - } - } - else { - if ((slot_ix=next_slot(tb,slot_ix,&lck)) == 0) { - goto done; - } - if (num_left <= 0) { - RUNLOCK_HASH(lck); - goto trap; - } - current = BUCKET(tb,slot_ix); - } - } - } -done: - BUMP_REDS(p, 1000 - num_left); - RET_TO_BIF(erts_make_integer(got,p),DB_ERROR_NONE); -trap: BUMP_ALL_REDS(p); if (IS_USMALL(0, got)) { - hp = HAlloc(p, ERTS_MAGIC_REF_THING_SIZE + 5); + hp = HAlloc(p, base_halloc_sz + 5); egot = make_small(got); } else { - hp = HAlloc(p, BIG_UINT_HEAP_SIZE + ERTS_MAGIC_REF_THING_SIZE + 5); + hp = HAlloc(p, base_halloc_sz + BIG_UINT_HEAP_SIZE + 5); egot = uint_to_big(got, hp); hp += BIG_UINT_HEAP_SIZE; } - mpb = erts_db_make_match_prog_ref(p,mpi.mp,&hp); - continuation = TUPLE4(hp, tb->common.id, make_small(slot_ix), - mpb, - egot); - mpi.mp = NULL; /*otherwise the return macro will destroy it */ - RET_TO_BIF(bif_trap1(&ets_select_count_continue_exp, p, - continuation), - DB_ERROR_NONE); -#undef RET_TO_BIF + if (is_first_trap) { + mpb = erts_db_make_match_prog_ref(p, *mpp, &hp); + *mpp = NULL; /* otherwise the caller will destroy it */ + } + else { + mpb = prev_continuation_tptr[3]; + } + + continuation = TUPLE4( + hp, + tid, + make_small(slot_ix), + mpb, + egot); + *ret = bif_trap1(trap_function, p, continuation); + return DB_ERROR_NONE; } -static int db_select_delete_hash(Process *p, - DbTable *tbl, - Eterm pattern, - Eterm *ret) +static ERTS_INLINE int unpack_simple_mtraversal_continuation(Eterm continuation, + Eterm** tptr_ptr, + Eterm* tid_ptr, + Sint* slot_ix_p, + Binary** mpp, + Sint* got_p) { - DbTableHash *tb = &tbl->hash; - struct mp_info mpi; - Uint slot_ix = 0; - HashDbTerm **current = NULL; - unsigned current_list_pos = 0; - Eterm *hp; - int num_left = 1000; - Uint got = 0; - Eterm continuation; - int errcode; - Uint last_pseudo_delete = (Uint)-1; - Eterm mpb; - Eterm egot; -#ifdef ERTS_SMP - erts_aint_t fixated_by_me = tb->common.is_thread_safe ? 0 : 1; /* ToDo: something nicer */ -#else - erts_aint_t fixated_by_me = 0; -#endif - erts_smp_rwmtx_t* lck; - -#define RET_TO_BIF(Term,RetVal) do { \ - if (mpi.mp != NULL) { \ - erts_bin_free(mpi.mp); \ - } \ - if (mpi.lists != mpi.dlists) { \ - erts_free(ERTS_ALC_T_DB_SEL_LIST, \ - (void *) mpi.lists); \ - } \ - *ret = (Term); \ - return RetVal; \ - } while(0) - + Eterm* tptr; + ASSERT(is_tuple(continuation)); + tptr = tuple_val(continuation); + if (arityval(*tptr) != 4) + return 1; - if ((errcode = analyze_pattern(tb, pattern, &mpi)) != DB_ERROR_NONE) { - RET_TO_BIF(NIL,errcode); + if (! is_small(tptr[2]) || !(is_big(tptr[4]) || is_small(tptr[4]))) { + return 1; } - if (!mpi.something_can_match) { - RET_TO_BIF(make_small(0), DB_ERROR_NONE); - /* can't possibly match anything */ + *tptr_ptr = tptr; + *tid_ptr = tptr[1]; + *slot_ix_p = unsigned_val(tptr[2]); + *mpp = erts_db_get_match_prog_binary_unchecked(tptr[3]); + if (is_big(tptr[4])) { + *got_p = big_to_uint32(tptr[4]); + } + else { + *got_p = unsigned_val(tptr[4]); } + return 0; +} - if (!mpi.key_given) { - /* Run this code if pattern is variable or GETKEY(pattern) */ - /* is a variable */ - lck = WLOCK_HASH(tb,slot_ix); - current = &BUCKET(tb,slot_ix); - } else { - /* We have at least one */ - slot_ix = mpi.lists[current_list_pos].ix; - lck = WLOCK_HASH(tb, slot_ix); - current = mpi.lists[current_list_pos++].bucket; - ASSERT(*current == BUCKET(tb,slot_ix)); + +/* + * + * select / select_chunk match traversal + * + */ + +#define MAX_SELECT_CHUNK_ITERATIONS 1000 + +typedef struct { + Process* p; + DbTableHash* tb; + Eterm tid; + Eterm* hp; + Sint chunk_size; + Eterm match_list; + Eterm* prev_continuation_tptr; +} mtraversal_select_chunk_context_t; + +static int mtraversal_select_chunk_on_nothing_can_match(void* context_ptr, Eterm* ret) { + mtraversal_select_chunk_context_t* sc_context_ptr = (mtraversal_select_chunk_context_t*) context_ptr; + *ret = (sc_context_ptr->chunk_size > 0 ? am_EOT : NIL); + return DB_ERROR_NONE; +} + +static int mtraversal_select_chunk_on_match_res(void* context_ptr, Sint slot_ix, + HashDbTerm*** current_ptr_ptr, + Eterm match_res) +{ + mtraversal_select_chunk_context_t* sc_context_ptr = (mtraversal_select_chunk_context_t*) context_ptr; + if (is_value(match_res)) { + sc_context_ptr->match_list = CONS(sc_context_ptr->hp, match_res, sc_context_ptr->match_list); + return 1; } + return 0; +} +static int mtraversal_select_chunk_on_loop_ended(void* context_ptr, Sint slot_ix, Sint got, + Sint iterations_left, Binary** mpp, Eterm* ret) +{ + mtraversal_select_chunk_context_t* sc_context_ptr = (mtraversal_select_chunk_context_t*) context_ptr; + Eterm mpb; - for(;;) { - if ((*current) == NULL) { - if (mpi.key_given) { /* Key is bound */ - WUNLOCK_HASH(lck); - if (current_list_pos == mpi.num_lists) { - goto done; - } else { - slot_ix = mpi.lists[current_list_pos].ix; - lck = WLOCK_HASH(tb, slot_ix); - current = mpi.lists[current_list_pos].bucket; - ASSERT(mpi.lists[current_list_pos].bucket == &BUCKET(tb,slot_ix)); - ++current_list_pos; - } - } else { - if ((slot_ix=next_slot_w(tb,slot_ix,&lck)) == 0) { - goto done; - } - if (num_left <= 0) { - WUNLOCK_HASH(lck); - goto trap; - } - current = &BUCKET(tb,slot_ix); - } - } - else if ((*current)->hvalue == INVALID_HASH) { - current = &((*current)->next); - } - else { - int did_erase = 0; - if (db_match_dbterm(&tb->common, p, mpi.mp, 0, - &(*current)->dbterm, NULL, 0) == am_true) { - HashDbTerm *del; - if (NFIXED(tb) > fixated_by_me) { /* fixated by others? */ - if (slot_ix != last_pseudo_delete) { - if (!add_fixed_deletion(tb, slot_ix, fixated_by_me)) - goto do_erase; - last_pseudo_delete = slot_ix; - } - (*current)->hvalue = INVALID_HASH; - } else { - do_erase: - del = *current; - *current = (*current)->next; - free_term(tb, del); - did_erase = 1; - } - erts_smp_atomic_dec_nob(&tb->common.nitems); - ++got; - } - --num_left; - if (!did_erase) { - current = &((*current)->next); - } - } + if (iterations_left == MAX_SELECT_CHUNK_ITERATIONS) { + /* We didn't get to iterate a single time, which means EOT */ + ASSERT(sc_context_ptr->match_list == NIL); + *ret = (sc_context_ptr->chunk_size > 0 ? am_EOT : NIL); + return DB_ERROR_NONE; } -done: - BUMP_REDS(p, 1000 - num_left); - if (got) { - try_shrink(tb); + else { + ASSERT(iterations_left < MAX_SELECT_CHUNK_ITERATIONS); + BUMP_REDS(sc_context_ptr->p, MAX_SELECT_CHUNK_ITERATIONS - iterations_left); + if (sc_context_ptr->chunk_size) { + Eterm continuation; + Eterm rest = NIL; + Sint rest_size = 0; + + if (got > sc_context_ptr->chunk_size) { /* Split list in return value and 'rest' */ + Eterm tmp = sc_context_ptr->match_list; + rest = sc_context_ptr->match_list; + while (got-- > sc_context_ptr->chunk_size + 1) { + tmp = CDR(list_val(tmp)); + ++rest_size; + } + ++rest_size; + sc_context_ptr->match_list = CDR(list_val(tmp)); + CDR(list_val(tmp)) = NIL; /* Destructive, the list has never + been in 'user space' */ + } + if (rest != NIL || slot_ix >= 0) { /* Need more calls */ + sc_context_ptr->hp = HAlloc(sc_context_ptr->p, 3 + 7 + ERTS_MAGIC_REF_THING_SIZE); + mpb = erts_db_make_match_prog_ref(sc_context_ptr->p, *mpp, &sc_context_ptr->hp); + continuation = TUPLE6( + sc_context_ptr->hp, + sc_context_ptr->tid, + make_small(slot_ix), + make_small(sc_context_ptr->chunk_size), + mpb, rest, + make_small(rest_size)); + *mpp = NULL; /* Otherwise the caller will destroy it */ + sc_context_ptr->hp += 7; + *ret = TUPLE2(sc_context_ptr->hp, sc_context_ptr->match_list, continuation); + return DB_ERROR_NONE; + } else { /* All data is exhausted */ + if (sc_context_ptr->match_list != NIL) { /* No more data to search but still a + result to return to the caller */ + sc_context_ptr->hp = HAlloc(sc_context_ptr->p, 3); + *ret = TUPLE2(sc_context_ptr->hp, sc_context_ptr->match_list, am_EOT); + return DB_ERROR_NONE; + } else { /* Reached the end of the ttable with no data to return */ + *ret = am_EOT; + return DB_ERROR_NONE; + } + } + } + *ret = sc_context_ptr->match_list; + return DB_ERROR_NONE; } - RET_TO_BIF(erts_make_integer(got,p),DB_ERROR_NONE); -trap: - BUMP_ALL_REDS(p); - if (IS_USMALL(0, got)) { - hp = HAlloc(p, ERTS_MAGIC_REF_THING_SIZE + 5); - egot = make_small(got); +} + +static int mtraversal_select_chunk_on_trap(void* context_ptr, Sint slot_ix, Sint got, + Binary** mpp, Eterm* ret) +{ + mtraversal_select_chunk_context_t* sc_context_ptr = (mtraversal_select_chunk_context_t*) context_ptr; + Eterm mpb; + Eterm continuation; + Eterm* hp; + + BUMP_ALL_REDS(sc_context_ptr->p); + + if (sc_context_ptr->prev_continuation_tptr == NULL) { + /* First time we're trapping */ + hp = HAlloc(sc_context_ptr->p, 7 + ERTS_MAGIC_REF_THING_SIZE); + mpb = erts_db_make_match_prog_ref(sc_context_ptr->p, *mpp, &hp); + continuation = TUPLE6( + hp, + sc_context_ptr->tid, + make_small(slot_ix), + make_small(sc_context_ptr->chunk_size), + mpb, + sc_context_ptr->match_list, + make_small(got)); + *mpp = NULL; /* otherwise the caller will destroy it */ } else { - hp = HAlloc(p, BIG_UINT_HEAP_SIZE + ERTS_MAGIC_REF_THING_SIZE + 5); - egot = uint_to_big(got, hp); - hp += BIG_UINT_HEAP_SIZE; - } - mpb = erts_db_make_match_prog_ref(p,mpi.mp,&hp); - continuation = TUPLE4(hp, tb->common.id, make_small(slot_ix), - mpb, - egot); - mpi.mp = NULL; /*otherwise the return macro will destroy it */ - RET_TO_BIF(bif_trap1(&ets_select_delete_continue_exp, p, - continuation), - DB_ERROR_NONE); + /* Not the first time we're trapping; reuse continuation terms */ + hp = HAlloc(sc_context_ptr->p, 7); + continuation = TUPLE6( + hp, + sc_context_ptr->prev_continuation_tptr[1], + make_small(slot_ix), + sc_context_ptr->prev_continuation_tptr[3], + sc_context_ptr->prev_continuation_tptr[4], + sc_context_ptr->match_list, + make_small(got)); + } + *ret = bif_trap1(&ets_select_continue_exp, sc_context_ptr->p, continuation); + return DB_ERROR_NONE; +} -#undef RET_TO_BIF +static int db_select_hash(Process *p, DbTable *tbl, Eterm tid, Eterm pattern, int reverse, Eterm *ret) { + return db_select_chunk_hash(p, tbl, tid, pattern, 0, reverse, ret); +} +static int db_select_chunk_hash(Process *p, DbTable *tbl, Eterm tid, Eterm pattern, Sint chunk_size, + int reverse, Eterm *ret) +{ + mtraversal_select_chunk_context_t sc_context; + sc_context.p = p; + sc_context.tb = &tbl->hash; + sc_context.tid = tid; + sc_context.hp = NULL; + sc_context.chunk_size = chunk_size; + sc_context.match_list = NIL; + sc_context.prev_continuation_tptr = NULL; + + return match_traverse( + sc_context.p, sc_context.tb, + pattern, NULL, + sc_context.chunk_size, + MAX_SELECT_CHUNK_ITERATIONS, + &sc_context.hp, 0, + mtraversal_select_chunk_on_nothing_can_match, + mtraversal_select_chunk_on_match_res, + mtraversal_select_chunk_on_loop_ended, + mtraversal_select_chunk_on_trap, + &sc_context, ret); } + /* -** This is called when select_delete traps -*/ -static int db_select_delete_continue_hash(Process *p, - DbTable *tbl, - Eterm continuation, - Eterm *ret) + * + * select_continue match traversal + * + */ + +static int mtraversal_select_chunk_continue_on_loop_ended(void* context_ptr, Sint slot_ix, Sint got, + Sint iterations_left, Binary** mpp, Eterm* ret) { - DbTableHash *tb = &tbl->hash; - Uint slot_ix; - Uint last_pseudo_delete = (Uint)-1; - HashDbTerm **current = NULL; - Eterm *hp; - int num_left = 1000; - Uint got; - Eterm *tptr; - Binary *mp; - Eterm egot; - int fixated_by_me = ONLY_WRITER(p,tb) ? 0 : 1; /* ToDo: something nicer */ - erts_smp_rwmtx_t* lck; + mtraversal_select_chunk_context_t* sc_context_ptr = (mtraversal_select_chunk_context_t*) context_ptr; + Eterm continuation; + Eterm rest = NIL; + Eterm* hp; + + ASSERT(iterations_left <= MAX_SELECT_CHUNK_ITERATIONS); + BUMP_REDS(sc_context_ptr->p, MAX_SELECT_CHUNK_ITERATIONS - iterations_left); + if (sc_context_ptr->chunk_size) { + Sint rest_size = 0; + if (got > sc_context_ptr->chunk_size) { + /* Cannot write destructively here, + the list may have + been in user space */ + hp = HAlloc(sc_context_ptr->p, (got - sc_context_ptr->chunk_size) * 2); + while (got-- > sc_context_ptr->chunk_size) { + rest = CONS(hp, CAR(list_val(sc_context_ptr->match_list)), rest); + hp += 2; + sc_context_ptr->match_list = CDR(list_val(sc_context_ptr->match_list)); + ++rest_size; + } + } + if (rest != NIL || slot_ix >= 0) { + hp = HAlloc(sc_context_ptr->p, 3 + 7); + continuation = TUPLE6( + hp, + sc_context_ptr->prev_continuation_tptr[1], + make_small(slot_ix), + sc_context_ptr->prev_continuation_tptr[3], + sc_context_ptr->prev_continuation_tptr[4], + rest, + make_small(rest_size)); + hp += 7; + *ret = TUPLE2(hp, sc_context_ptr->match_list, continuation); + return DB_ERROR_NONE; + } else { + if (sc_context_ptr->match_list != NIL) { + hp = HAlloc(sc_context_ptr->p, 3); + *ret = TUPLE2(hp, sc_context_ptr->match_list, am_EOT); + return DB_ERROR_NONE; + } else { + *ret = am_EOT; + return DB_ERROR_NONE; + } + } + } + *ret = sc_context_ptr->match_list; + return DB_ERROR_NONE; +} -#define RET_TO_BIF(Term,RetVal) do { \ - *ret = (Term); \ - return RetVal; \ - } while(0) +/* + * This is called when select traps + */ +static int db_select_continue_hash(Process* p, DbTable* tbl, Eterm continuation, Eterm* ret) { + mtraversal_select_chunk_context_t sc_context = {0}; + Eterm* tptr; + Eterm tid; + Binary* mp; + Sint got; + Sint slot_ix; + Sint chunk_size; + Eterm match_list; + Sint iterations_left = MAX_SELECT_CHUNK_ITERATIONS; - + /* Decode continuation. We know it's a tuple but not the arity or anything else */ + ASSERT(is_tuple(continuation)); tptr = tuple_val(continuation); - slot_ix = unsigned_val(tptr[2]); - mp = erts_db_get_match_prog_binary_unchecked(tptr[3]); - if (is_big(tptr[4])) { - got = big_to_uint32(tptr[4]); - } else { - got = unsigned_val(tptr[4]); - } - - lck = WLOCK_HASH(tb,slot_ix); - if (slot_ix >= NACTIVE(tb)) { - WUNLOCK_HASH(lck); - goto done; - } - current = &BUCKET(tb,slot_ix); - for(;;) { - if ((*current) == NULL) { - if ((slot_ix=next_slot_w(tb,slot_ix,&lck)) == 0) { - goto done; - } - if (num_left <= 0) { - WUNLOCK_HASH(lck); - goto trap; - } - current = &BUCKET(tb,slot_ix); - } - else if ((*current)->hvalue == INVALID_HASH) { - current = &((*current)->next); - } - else { - int did_erase = 0; - if (db_match_dbterm(&tb->common, p, mp, 0, - &(*current)->dbterm, NULL, 0) == am_true) { - HashDbTerm *del; - if (NFIXED(tb) > fixated_by_me) { /* fixated by others? */ - if (slot_ix != last_pseudo_delete) { - if (!add_fixed_deletion(tb, slot_ix, fixated_by_me)) - goto do_erase; - last_pseudo_delete = slot_ix; - } - (*current)->hvalue = INVALID_HASH; - } else { - do_erase: - del = *current; - *current = (*current)->next; - free_term(tb, del); - did_erase = 1; - } - erts_smp_atomic_dec_nob(&tb->common.nitems); - ++got; - } - - --num_left; - if (!did_erase) { - current = &((*current)->next); - } - } - } -done: - BUMP_REDS(p, 1000 - num_left); - if (got) { - try_shrink(tb); - } - RET_TO_BIF(erts_make_integer(got,p),DB_ERROR_NONE); -trap: - BUMP_ALL_REDS(p); - if (IS_USMALL(0, got)) { - hp = HAlloc(p, 5); - egot = make_small(got); - } - else { - hp = HAlloc(p, BIG_UINT_HEAP_SIZE + 5); - egot = uint_to_big(got, hp); - hp += BIG_UINT_HEAP_SIZE; - } - continuation = TUPLE4(hp, tb->common.id, make_small(slot_ix), - tptr[3], - egot); - RET_TO_BIF(bif_trap1(&ets_select_delete_continue_exp, p, - continuation), - DB_ERROR_NONE); + if (arityval(*tptr) != 6) + goto badparam; + + if (!is_small(tptr[2]) || !is_small(tptr[3]) || + !(is_list(tptr[5]) || tptr[5] == NIL) || !is_small(tptr[6])) + goto badparam; + if ((chunk_size = signed_val(tptr[3])) < 0) + goto badparam; + + mp = erts_db_get_match_prog_binary(tptr[4]); + if (mp == NULL) + goto badparam; + + if ((got = signed_val(tptr[6])) < 0) + goto badparam; -#undef RET_TO_BIF + tid = tptr[1]; + slot_ix = signed_val(tptr[2]); + match_list = tptr[5]; + /* Proceed */ + sc_context.p = p; + sc_context.tb = &tbl->hash; + sc_context.tid = tid; + sc_context.hp = NULL; + sc_context.chunk_size = chunk_size; + sc_context.match_list = match_list; + sc_context.prev_continuation_tptr = tptr; + + return match_traverse_continue( + sc_context.p, sc_context.tb, sc_context.chunk_size, + iterations_left, &sc_context.hp, slot_ix, got, &mp, 0, + mtraversal_select_chunk_on_match_res, /* Reuse callback */ + mtraversal_select_chunk_continue_on_loop_ended, + mtraversal_select_chunk_on_trap, /* Reuse callback */ + &sc_context, ret); + +badparam: + *ret = NIL; + return DB_ERROR_BADPARAM; } - + +#undef MAX_SELECT_CHUNK_ITERATIONS + + /* -** This is called when select_count traps -*/ -static int db_select_count_continue_hash(Process *p, - DbTable *tbl, - Eterm continuation, - Eterm *ret) + * + * select_count match traversal + * + */ + +#define MAX_SELECT_COUNT_ITERATIONS 1000 + +typedef struct { + Process* p; + DbTableHash* tb; + Eterm tid; + Eterm* hp; + Eterm* prev_continuation_tptr; +} mtraversal_select_count_context_t; + +static int mtraversal_select_count_on_nothing_can_match(void* context_ptr, Eterm* ret) { + *ret = make_small(0); + return DB_ERROR_NONE; +} + +static int mtraversal_select_count_on_match_res(void* context_ptr, Sint slot_ix, + HashDbTerm*** current_ptr_ptr, + Eterm match_res) { - DbTableHash *tb = &tbl->hash; - Uint slot_ix; - HashDbTerm* current; - Eterm *hp; - int num_left = 1000; - Uint got; - Eterm *tptr; - Binary *mp; - Eterm egot; - erts_smp_rwmtx_t* lck; + return (match_res == am_true); +} -#define RET_TO_BIF(Term,RetVal) do { \ - *ret = (Term); \ - return RetVal; \ - } while(0) +static int mtraversal_select_count_on_loop_ended(void* context_ptr, Sint slot_ix, Sint got, + Sint iterations_left, Binary** mpp, Eterm* ret) +{ + mtraversal_select_count_context_t* scnt_context_ptr = (mtraversal_select_count_context_t*) context_ptr; + ASSERT(iterations_left <= MAX_SELECT_COUNT_ITERATIONS); + BUMP_REDS(scnt_context_ptr->p, MAX_SELECT_COUNT_ITERATIONS - iterations_left); + *ret = erts_make_integer(got, scnt_context_ptr->p); + return DB_ERROR_NONE; +} - - tptr = tuple_val(continuation); - slot_ix = unsigned_val(tptr[2]); - mp = erts_db_get_match_prog_binary_unchecked(tptr[3]); - if (is_big(tptr[4])) { - got = big_to_uint32(tptr[4]); - } else { - got = unsigned_val(tptr[4]); - } - +static int mtraversal_select_count_on_trap(void* context_ptr, Sint slot_ix, Sint got, + Binary** mpp, Eterm* ret) +{ + mtraversal_select_count_context_t* scnt_context_ptr = (mtraversal_select_count_context_t*) context_ptr; + return on_mtraversal_simple_trap( + &ets_select_count_continue_exp, + scnt_context_ptr->p, + scnt_context_ptr->tb, + scnt_context_ptr->tid, + scnt_context_ptr->prev_continuation_tptr, + slot_ix, got, mpp, ret); +} - lck = RLOCK_HASH(tb, slot_ix); - if (slot_ix >= NACTIVE(tb)) { /* Is this posible? */ - RUNLOCK_HASH(lck); - goto done; - } - current = BUCKET(tb,slot_ix); - - for(;;) { - if (current != NULL) { - if (current->hvalue == INVALID_HASH) { - current = current->next; - continue; - } - if (db_match_dbterm(&tb->common, p, mp, 0, ¤t->dbterm, - NULL, 0) == am_true) { - ++got; - } - --num_left; - current = current->next; - } - else { /* next bucket */ - if ((slot_ix = next_slot(tb,slot_ix,&lck)) == 0) { - goto done; - } - if (num_left <= 0) { - RUNLOCK_HASH(lck); - goto trap; - } - current = BUCKET(tb,slot_ix); - } - } -done: - BUMP_REDS(p, 1000 - num_left); - RET_TO_BIF(erts_make_integer(got,p),DB_ERROR_NONE); -trap: - BUMP_ALL_REDS(p); - if (IS_USMALL(0, got)) { - hp = HAlloc(p, 5); - egot = make_small(got); +static int db_select_count_hash(Process *p, DbTable *tbl, Eterm tid, Eterm pattern, Eterm *ret) { + mtraversal_select_count_context_t scnt_context = {0}; + Sint iterations_left = MAX_SELECT_COUNT_ITERATIONS; + Sint chunk_size = 0; + + scnt_context.p = p; + scnt_context.tb = &tbl->hash; + scnt_context.tid = tid; + scnt_context.hp = NULL; + scnt_context.prev_continuation_tptr = NULL; + + return match_traverse( + scnt_context.p, scnt_context.tb, + pattern, NULL, + chunk_size, iterations_left, NULL, 0, + mtraversal_select_count_on_nothing_can_match, + mtraversal_select_count_on_match_res, + mtraversal_select_count_on_loop_ended, + mtraversal_select_count_on_trap, + &scnt_context, ret); +} + +/* + * This is called when select_count traps + */ +static int db_select_count_continue_hash(Process* p, DbTable* tbl, Eterm continuation, Eterm* ret) { + mtraversal_select_count_context_t scnt_context = {0}; + Eterm* tptr; + Eterm tid; + Binary* mp; + Sint got; + Sint slot_ix; + Sint chunk_size = 0; + *ret = NIL; + + if (unpack_simple_mtraversal_continuation(continuation, &tptr, &tid, &slot_ix, &mp, &got)) { + *ret = NIL; + return DB_ERROR_BADPARAM; + } + + scnt_context.p = p; + scnt_context.tb = &tbl->hash; + scnt_context.tid = tid; + scnt_context.hp = NULL; + scnt_context.prev_continuation_tptr = tptr; + + return match_traverse_continue( + scnt_context.p, scnt_context.tb, chunk_size, + MAX_SELECT_COUNT_ITERATIONS, + NULL, slot_ix, got, &mp, 0, + mtraversal_select_count_on_match_res, /* Reuse callback */ + mtraversal_select_count_on_loop_ended, /* Reuse callback */ + mtraversal_select_count_on_trap, /* Reuse callback */ + &scnt_context, ret); +} + +#undef MAX_SELECT_COUNT_ITERATIONS + + +/* + * + * select_delete match traversal + * + */ + +#define MAX_SELECT_DELETE_ITERATIONS 1000 + +typedef struct { + Process* p; + DbTableHash* tb; + Eterm tid; + Eterm* hp; + Eterm* prev_continuation_tptr; + erts_aint_t fixated_by_me; + Uint last_pseudo_delete; +} mtraversal_select_delete_context_t; + +static int mtraversal_select_delete_on_nothing_can_match(void* context_ptr, Eterm* ret) { + *ret = make_small(0); + return DB_ERROR_NONE; +} + +static int mtraversal_select_delete_on_match_res(void* context_ptr, Sint slot_ix, + HashDbTerm*** current_ptr_ptr, + Eterm match_res) +{ + HashDbTerm** current_ptr = *current_ptr_ptr; + mtraversal_select_delete_context_t* sd_context_ptr = (mtraversal_select_delete_context_t*) context_ptr; + HashDbTerm* del; + if (match_res != am_true) + return 0; + + if (NFIXED(sd_context_ptr->tb) > sd_context_ptr->fixated_by_me) { /* fixated by others? */ + if (slot_ix != sd_context_ptr->last_pseudo_delete) { + if (!add_fixed_deletion(sd_context_ptr->tb, slot_ix, sd_context_ptr->fixated_by_me)) + goto do_erase; + sd_context_ptr->last_pseudo_delete = slot_ix; + } + (*current_ptr)->hvalue = INVALID_HASH; } else { - hp = HAlloc(p, BIG_UINT_HEAP_SIZE + 5); - egot = uint_to_big(got, hp); - hp += BIG_UINT_HEAP_SIZE; + do_erase: + del = *current_ptr; + *current_ptr = (*current_ptr)->next; // replace pointer to term using next + free_term(sd_context_ptr->tb, del); } - continuation = TUPLE4(hp, tb->common.id, make_small(slot_ix), - tptr[3], - egot); - RET_TO_BIF(bif_trap1(&ets_select_count_continue_exp, p, - continuation), - DB_ERROR_NONE); + erts_smp_atomic_dec_nob(&sd_context_ptr->tb->common.nitems); -#undef RET_TO_BIF + return 1; +} +static int mtraversal_select_delete_on_loop_ended(void* context_ptr, Sint slot_ix, Sint got, + Sint iterations_left, Binary** mpp, Eterm* ret) +{ + mtraversal_select_delete_context_t* sd_context_ptr = (mtraversal_select_delete_context_t*) context_ptr; + ASSERT(iterations_left <= MAX_SELECT_DELETE_ITERATIONS); + BUMP_REDS(sd_context_ptr->p, MAX_SELECT_DELETE_ITERATIONS - iterations_left); + if (got) { + try_shrink(sd_context_ptr->tb); + } + *ret = erts_make_integer(got, sd_context_ptr->p); + return DB_ERROR_NONE; } - + +static int mtraversal_select_delete_on_trap(void* context_ptr, Sint slot_ix, Sint got, + Binary** mpp, Eterm* ret) +{ + mtraversal_select_delete_context_t* sd_context_ptr = (mtraversal_select_delete_context_t*) context_ptr; + return on_mtraversal_simple_trap( + &ets_select_delete_continue_exp, + sd_context_ptr->p, + sd_context_ptr->tb, + sd_context_ptr->tid, + sd_context_ptr->prev_continuation_tptr, + slot_ix, got, mpp, ret); +} + +static int db_select_delete_hash(Process *p, DbTable *tbl, Eterm tid, Eterm pattern, Eterm *ret) { + mtraversal_select_delete_context_t sd_context = {0}; + Sint chunk_size = 0; + + sd_context.p = p; + sd_context.tb = &tbl->hash; + sd_context.tid = tid; + sd_context.hp = NULL; + sd_context.prev_continuation_tptr = NULL; +#ifdef ERTS_SMP + sd_context.fixated_by_me = sd_context.tb->common.is_thread_safe ? 0 : 1; /* TODO: something nicer */ +#else + sd_context.fixated_by_me = 0; +#endif + sd_context.last_pseudo_delete = (Uint) -1; + + return match_traverse( + sd_context.p, sd_context.tb, + pattern, NULL, + chunk_size, + MAX_SELECT_DELETE_ITERATIONS, NULL, 1, + mtraversal_select_delete_on_nothing_can_match, + mtraversal_select_delete_on_match_res, + mtraversal_select_delete_on_loop_ended, + mtraversal_select_delete_on_trap, + &sd_context, ret); +} + +/* + * This is called when select_delete traps + */ +static int db_select_delete_continue_hash(Process* p, DbTable* tbl, Eterm continuation, Eterm* ret) { + mtraversal_select_delete_context_t sd_context = {0}; + Eterm* tptr; + Eterm tid; + Binary* mp; + Sint got; + Sint slot_ix; + Sint chunk_size = 0; + + if (unpack_simple_mtraversal_continuation(continuation, &tptr, &tid, &slot_ix, &mp, &got)) { + *ret = NIL; + return DB_ERROR_BADPARAM; + } + + sd_context.p = p; + sd_context.tb = &tbl->hash; + sd_context.tid = tid; + sd_context.hp = NULL; + sd_context.prev_continuation_tptr = tptr; + sd_context.fixated_by_me = ONLY_WRITER(p, sd_context.tb) ? 0 : 1; /* TODO: something nicer */ + sd_context.last_pseudo_delete = (Uint) -1; + + return match_traverse_continue( + sd_context.p, sd_context.tb, chunk_size, + MAX_SELECT_DELETE_ITERATIONS, + NULL, slot_ix, got, &mp, 1, + mtraversal_select_delete_on_match_res, /* Reuse callback */ + mtraversal_select_delete_on_loop_ended, /* Reuse callback */ + mtraversal_select_delete_on_trap, /* Reuse callback */ + &sd_context, ret); +} + +#undef MAX_SELECT_DELETE_ITERATIONS + + +/* + * + * select_replace match traversal + * + */ + +#define MAX_SELECT_REPLACE_ITERATIONS 1000 + +typedef struct { + Process* p; + DbTableHash* tb; + Eterm tid; + Eterm* hp; + Eterm* prev_continuation_tptr; +} mtraversal_select_replace_context_t; + +static int mtraversal_select_replace_on_nothing_can_match(void* context_ptr, Eterm* ret) { + *ret = make_small(0); + return DB_ERROR_NONE; +} + +static int mtraversal_select_replace_on_match_res(void* context_ptr, Sint slot_ix, + HashDbTerm*** current_ptr_ptr, + Eterm match_res) +{ + mtraversal_select_replace_context_t* sr_context_ptr = (mtraversal_select_replace_context_t*) context_ptr; + DbTableHash* tb = sr_context_ptr->tb; + HashDbTerm* new; + HashDbTerm* next; + HashValue hval; + + if (is_value(match_res)) { +#ifdef DEBUG + Eterm key = db_getkey(tb->common.keypos, match_res); + ASSERT(is_value(key)); + ASSERT(eq(key, GETKEY(tb, (**current_ptr_ptr)->dbterm.tpl))); +#endif + next = (**current_ptr_ptr)->next; + hval = (**current_ptr_ptr)->hvalue; + new = new_dbterm(tb, match_res); + new->next = next; + new->hvalue = hval; + free_term(tb, **current_ptr_ptr); + **current_ptr_ptr = new; /* replace 'next' pointer in previous object */ + *current_ptr_ptr = &((**current_ptr_ptr)->next); /* advance to next object */ + return 1; + } + return 0; +} + +static int mtraversal_select_replace_on_loop_ended(void* context_ptr, Sint slot_ix, Sint got, + Sint iterations_left, Binary** mpp, Eterm* ret) +{ + mtraversal_select_replace_context_t* sr_context_ptr = (mtraversal_select_replace_context_t*) context_ptr; + ASSERT(iterations_left <= MAX_SELECT_REPLACE_ITERATIONS); + /* the more objects we've replaced, the more reductions we've consumed */ + BUMP_REDS(sr_context_ptr->p, + MIN(MAX_SELECT_REPLACE_ITERATIONS * 2, + (MAX_SELECT_REPLACE_ITERATIONS - iterations_left) + (int)got)); + *ret = erts_make_integer(got, sr_context_ptr->p); + return DB_ERROR_NONE; +} + +static int mtraversal_select_replace_on_trap(void* context_ptr, Sint slot_ix, Sint got, + Binary** mpp, Eterm* ret) +{ + mtraversal_select_replace_context_t* sr_context_ptr = (mtraversal_select_replace_context_t*) context_ptr; + return on_mtraversal_simple_trap( + &ets_select_replace_continue_exp, + sr_context_ptr->p, + sr_context_ptr->tb, + sr_context_ptr->tid, + sr_context_ptr->prev_continuation_tptr, + slot_ix, got, mpp, ret); +} + +static int db_select_replace_hash(Process *p, DbTable *tbl, Eterm tid, Eterm pattern, Eterm *ret) +{ + mtraversal_select_replace_context_t sr_context = {0}; + Sint chunk_size = 0; + + /* Bag implementation presented both semantic consistency and performance issues, + * unsupported for now + */ + ASSERT(!(tbl->hash.common.status & DB_BAG)); + + sr_context.p = p; + sr_context.tb = &tbl->hash; + sr_context.tid = tid; + sr_context.hp = NULL; + sr_context.prev_continuation_tptr = NULL; + + return match_traverse( + sr_context.p, sr_context.tb, + pattern, db_match_keeps_key, + chunk_size, + MAX_SELECT_REPLACE_ITERATIONS, NULL, 1, + mtraversal_select_replace_on_nothing_can_match, + mtraversal_select_replace_on_match_res, + mtraversal_select_replace_on_loop_ended, + mtraversal_select_replace_on_trap, + &sr_context, ret); +} + +/* + * This is called when select_replace traps + */ +static int db_select_replace_continue_hash(Process* p, DbTable* tbl, Eterm continuation, Eterm* ret) +{ + mtraversal_select_replace_context_t sr_context = {0}; + Eterm* tptr; + Eterm tid ; + Binary* mp; + Sint got; + Sint slot_ix; + Sint chunk_size = 0; + *ret = NIL; + + if (unpack_simple_mtraversal_continuation(continuation, &tptr, &tid, &slot_ix, &mp, &got)) { + *ret = NIL; + return DB_ERROR_BADPARAM; + } + + /* Proceed */ + sr_context.p = p; + sr_context.tb = &tbl->hash; + sr_context.tid = tid; + sr_context.hp = NULL; + sr_context.prev_continuation_tptr = tptr; + + return match_traverse_continue( + sr_context.p, sr_context.tb, chunk_size, + MAX_SELECT_REPLACE_ITERATIONS, + NULL, slot_ix, got, &mp, 1, + mtraversal_select_replace_on_match_res, /* Reuse callback */ + mtraversal_select_replace_on_loop_ended, /* Reuse callback */ + mtraversal_select_replace_on_trap, /* Reuse callback */ + &sr_context, ret); +} + + static int db_take_hash(Process *p, DbTable *tbl, Eterm key, Eterm *ret) { DbTableHash *tb = &tbl->hash; @@ -2101,6 +2289,7 @@ static int db_take_hash(Process *p, DbTable *tbl, Eterm key, Eterm *ret) return DB_ERROR_NONE; } + /* ** Other interface routines (not directly coupled to one bif) */ @@ -2197,19 +2386,17 @@ static void db_print_hash(fmtfn_t to, void *to_arg, int show, DbTable *tbl) /* release all memory occupied by a single table */ static int db_free_table_hash(DbTable *tbl) { - while (!db_free_table_continue_hash(tbl)) + while (db_free_table_continue_hash(tbl, ERTS_SWORD_MAX) < 0) ; return 0; } -static int db_free_table_continue_hash(DbTable *tbl) +static SWord db_free_table_continue_hash(DbTable *tbl, SWord reds) { DbTableHash *tb = &tbl->hash; - int done; FixedDeletion* fixdel = (FixedDeletion*) erts_smp_atomic_read_acqb(&tb->fixdel); - ERTS_SMP_LC_ASSERT(IS_TAB_WLOCKED(tb)); + ERTS_SMP_LC_ASSERT(IS_TAB_WLOCKED(tb) || (tb->common.status & DB_DELETE)); - done = 0; while (fixdel != NULL) { FixedDeletion *fx = fixdel; @@ -2219,22 +2406,21 @@ static int db_free_table_continue_hash(DbTable *tbl) (void *) fx, sizeof(FixedDeletion)); ERTS_ETS_MISC_MEM_ADD(-sizeof(FixedDeletion)); - if (++done >= 2*DELETE_RECORD_LIMIT) { + if (--reds < 0) { erts_smp_atomic_set_relb(&tb->fixdel, (erts_aint_t)fixdel); - return 0; /* Not done */ + return reds; /* Not done */ } } erts_smp_atomic_set_relb(&tb->fixdel, (erts_aint_t)NULL); - done /= 2; while(tb->nslots != 0) { - done += 1 + EXT_SEGSZ/64 + free_seg(tb, 1); + reds -= EXT_SEGSZ/64 + free_seg(tb, 1); /* * If we have done enough work, get out here. */ - if (done >= DELETE_RECORD_LIMIT) { - return 0; /* Not done */ + if (reds < 0) { + return reds; /* Not done */ } } #ifdef ERTS_SMP @@ -2249,7 +2435,7 @@ static int db_free_table_continue_hash(DbTable *tbl) } #endif ASSERT(erts_smp_atomic_read_nob(&tb->common.memory_size) == sizeof(DbTable)); - return 1; /* Done */ + return reds; /* Done */ } @@ -2262,7 +2448,8 @@ static int db_free_table_continue_hash(DbTable *tbl) ** slots should be searched. Also compiles the match program */ static int analyze_pattern(DbTableHash *tb, Eterm pattern, - struct mp_info *mpi) + extra_match_validator_t extra_validator, /* Optional callback */ + struct mp_info *mpi) { Eterm *ptpl; Eterm lst, tpl, ttpl; @@ -2300,7 +2487,10 @@ static int analyze_pattern(DbTableHash *tb, Eterm pattern, i = 0; for(lst = pattern; is_list(lst); lst = CDR(list_val(lst))) { - Eterm body; + Eterm match; + Eterm guard; + Eterm body; + ttpl = CAR(list_val(lst)); if (!is_tuple(ttpl)) { if (buff != sbuff) { @@ -2315,9 +2505,17 @@ static int analyze_pattern(DbTableHash *tb, Eterm pattern, } return DB_ERROR_BADPARAM; } - matches[i] = tpl = ptpl[1]; - guards[i] = ptpl[2]; + matches[i] = match = tpl = ptpl[1]; + guards[i] = guard = ptpl[2]; bodies[i] = body = ptpl[3]; + + if(extra_validator != NULL && !extra_validator(tb->common.keypos, match, guard, body)) { + if (buff != sbuff) { + erts_free(ERTS_ALC_T_DB_TMP, buff); + } + return DB_ERROR_BADPARAM; + } + if (!is_list(body) || CDR(list_val(body)) != NIL || CAR(list_val(body)) != am_DollarUnderscore) { mpi->all_objects = 0; @@ -3007,63 +3205,4 @@ Eterm erts_ets_hash_sizeof_ext_segtab(void) { return make_small(((SIZEOF_EXT_SEGTAB(0)-1) / sizeof(UWord)) + 1); } -/* For testing only */ -Eterm erts_ets_hash_get_memstate(Process* p, DbTableHash* tb) -{ - Eterm seg_cnt; - while (!begin_resizing(tb)) - /*spinn*/; - - seg_cnt = make_small(SLOT_IX_TO_SEG_IX(tb->nslots)); - done_resizing(tb); - return seg_cnt; -} -/* For testing only */ -Eterm erts_ets_hash_restore_memstate(DbTableHash* tb, Eterm memstate) -{ - int seg_cnt, target; - - if (!is_small(memstate)) - return make_small(__LINE__); - - target = signed_val(memstate); - if (target < 1) - return make_small(__LINE__); - while (1) { - while (!begin_resizing(tb)) - /*spin*/; - seg_cnt = SLOT_IX_TO_SEG_IX(tb->nslots); - done_resizing(tb); - - if (target == seg_cnt) - return am_ok; - if (IS_FIXED(tb)) - return make_small(__LINE__); - if (target < seg_cnt) - shrink(tb, 0); - else - grow(tb, INT_MAX); - } -} - -#ifdef HARDDEBUG - -void db_check_table_hash(DbTable *tbl) -{ - DbTableHash *tb = &tbl->hash; - HashDbTerm* list; - int j; - - for (j = 0; j < NACTIVE(tb); j++) { - if ((list = BUCKET(tb,j)) != 0) { - while (list != 0) { - if (!is_tuple(make_tuple(list->dbterm.tpl))) { - erts_exit(ERTS_ERROR_EXIT, "Bad term in slot %d of ets table", j); - } - list = list->next; - } - } - } -} -#endif diff --git a/erts/emulator/beam/erl_db_hash.h b/erts/emulator/beam/erl_db_hash.h index 6d25c73549..c340c72311 100644 --- a/erts/emulator/beam/erl_db_hash.h +++ b/erts/emulator/beam/erl_db_hash.h @@ -75,7 +75,7 @@ typedef struct db_table_hash { ** table types. The process is always an [in out] parameter. */ void db_initialize_hash(void); -void db_unfix_table_hash(DbTableHash *tb /* [in out] */); +SWord db_unfix_table_hash(DbTableHash *tb); Uint db_kept_items_hash(DbTableHash *tb); /* Interface for meta pid table */ @@ -88,14 +88,6 @@ int db_get_hash(Process *p, DbTable *tbl, Eterm key, Eterm *ret); int db_erase_hash(DbTable *tbl, Eterm key, Eterm *ret); -int db_get_element_array(DbTable *tbl, - Eterm key, - int ndex, - Eterm *ret, - int *num_ret); - -int db_erase_bag_exact2(DbTable *tbl, Eterm key, Eterm value); - /* not yet in method table */ int db_mark_all_deleted_hash(DbTable *tbl); @@ -110,7 +102,5 @@ typedef struct { void db_calc_stats_hash(DbTableHash* tb, DbHashStats*); Eterm erts_ets_hash_sizeof_ext_segtab(void); -Eterm erts_ets_hash_get_memstate(Process*, DbTableHash* tb); -Eterm erts_ets_hash_restore_memstate(DbTableHash* tb, Eterm memstate); #endif /* _DB_HASH_H */ diff --git a/erts/emulator/beam/erl_db_tree.c b/erts/emulator/beam/erl_db_tree.c index c4ecd2ba37..ab8da6ccf6 100644 --- a/erts/emulator/beam/erl_db_tree.c +++ b/erts/emulator/beam/erl_db_tree.c @@ -76,9 +76,18 @@ ((Dtt->pos) ? \ (Dtt)->array[(Dtt)->pos - 1] : NULL) -#define EMPTY_NODE(Dtt) (TOP_NODE(Dtt) == NULL) +#define TOPN_NODE(Dtt, Pos) \ + (((Pos) < Dtt->pos) ? \ + (Dtt)->array[(Dtt)->pos - ((Pos) + 1)] : NULL) + +#define REPLACE_TOP_NODE(Dtt, Node) \ + if ((Dtt)->pos) (Dtt)->array[(Dtt)->pos - 1] = (Node) +#define EMPTY_NODE(Dtt) (TOP_NODE(Dtt) == NULL) +#ifndef MIN +#define MIN(X, Y) ((X) < (Y) ? (X) : (Y)) +#endif /* Obtain table static stack if available. NULL if not. ** Must be released with release_stack() @@ -180,7 +189,6 @@ static ERTS_INLINE TreeDbTerm* replace_dbterm(DbTableTree *tb, TreeDbTerm* old, static TreeDbTerm *traverse_until(TreeDbTerm *t, int *current, int to); static void check_slot_pos(DbTableTree *tb); static void check_saved_stack(DbTableTree *tb); -static int check_table_tree(DbTableTree* tb, TreeDbTerm *t); #define TREE_DEBUG #endif @@ -226,9 +234,9 @@ struct mp_info { Eterm most; /* The highest matching key (possibly * partially bound expression) */ - TreeDbTerm *save_term; /* If the key is completely bound, this - * will be the Tree node we're searching - * for, otherwise it will be useless */ + TreeDbTerm **save_term; /* If the key is completely bound, this + * will be the Tree node we're searching + * for, otherwise it will be useless */ Binary *mp; /* The compiled match program */ }; @@ -278,12 +286,30 @@ struct select_delete_context { }; /* + * Used by doit_select_replace + */ +struct select_replace_context { + Process *p; + DbTableTree *tb; + Binary *mp; + Eterm end_condition; + Eterm *lastobj; + Sint32 max; + int keypos; + int all_objects; + Sint replaced; +}; + +/* Used by select_replace on analyze_pattern */ +typedef int (*extra_match_validator_t)(int keypos, Eterm match, Eterm guard, Eterm body); + +/* ** Forward declarations */ static TreeDbTerm *linkout_tree(DbTableTree *tb, Eterm key); static TreeDbTerm *linkout_object_tree(DbTableTree *tb, Eterm object); -static int do_free_tree_cont(DbTableTree *tb, int num_left); +static SWord do_free_tree_continue(DbTableTree *tb, SWord reds); static void free_term(DbTableTree *tb, TreeDbTerm* p); static int balance_left(TreeDbTerm **this); static int balance_right(TreeDbTerm **this); @@ -291,6 +317,7 @@ static int delsub(TreeDbTerm **this); static TreeDbTerm *slot_search(Process *p, DbTableTree *tb, Sint slot); static TreeDbTerm *find_node(DbTableTree *tb, Eterm key); static TreeDbTerm **find_node2(DbTableTree *tb, Eterm key); +static TreeDbTerm **find_ptr(DbTableTree *tb, DbTreeStack*, TreeDbTerm *this); static TreeDbTerm *find_next(DbTableTree *tb, DbTreeStack*, Eterm key); static TreeDbTerm *find_prev(DbTableTree *tb, DbTreeStack*, Eterm key); static TreeDbTerm *find_next_from_pb_key(DbTableTree *tb, DbTreeStack*, @@ -312,14 +339,23 @@ static void traverse_forward(DbTableTree *tb, TreeDbTerm *, void *, int), - void *context); -static int key_given(DbTableTree *tb, Eterm pattern, TreeDbTerm **ret, + void *context); +static void traverse_update_backwards(DbTableTree *tb, + DbTreeStack*, + Eterm lastkey, + int (*doit)(DbTableTree *tb, + TreeDbTerm **, // out + void *, + int), + void *context); +static int key_given(DbTableTree *tb, Eterm pattern, TreeDbTerm ***ret, Eterm *partly_bound_key); static Sint cmp_partly_bound(Eterm partly_bound_key, Eterm bound_key); static Sint do_cmp_partly_bound(Eterm a, Eterm b, int *done); static int analyze_pattern(DbTableTree *tb, Eterm pattern, - struct mp_info *mpi); + extra_match_validator_t extra_validator, /* Optional callback */ + struct mp_info *mpi); static int doit_select(DbTableTree *tb, TreeDbTerm *this, void *ptr, @@ -336,6 +372,10 @@ static int doit_select_delete(DbTableTree *tb, TreeDbTerm *this, void *ptr, int forward); +static int doit_select_replace(DbTableTree *tb, + TreeDbTerm **this_ptr, + void *ptr, + int forward); static int partly_bound_can_match_lesser(Eterm partly_bound_1, Eterm partly_bound_2); @@ -369,27 +409,31 @@ static int db_erase_tree(DbTable *tbl, Eterm key, Eterm *ret); static int db_erase_object_tree(DbTable *tbl, Eterm object,Eterm *ret); static int db_slot_tree(Process *p, DbTable *tbl, Eterm slot_term, Eterm *ret); -static int db_select_tree(Process *p, DbTable *tbl, +static int db_select_tree(Process *p, DbTable *tbl, Eterm tid, Eterm pattern, int reversed, Eterm *ret); -static int db_select_count_tree(Process *p, DbTable *tbl, +static int db_select_count_tree(Process *p, DbTable *tbl, Eterm tid, Eterm pattern, Eterm *ret); -static int db_select_chunk_tree(Process *p, DbTable *tbl, +static int db_select_chunk_tree(Process *p, DbTable *tbl, Eterm tid, Eterm pattern, Sint chunk_size, int reversed, Eterm *ret); static int db_select_continue_tree(Process *p, DbTable *tbl, Eterm continuation, Eterm *ret); static int db_select_count_continue_tree(Process *p, DbTable *tbl, Eterm continuation, Eterm *ret); -static int db_select_delete_tree(Process *p, DbTable *tbl, +static int db_select_delete_tree(Process *p, DbTable *tbl, Eterm tid, Eterm pattern, Eterm *ret); static int db_select_delete_continue_tree(Process *p, DbTable *tbl, Eterm continuation, Eterm *ret); +static int db_select_replace_tree(Process *p, DbTable *tbl, Eterm tid, + Eterm pattern, Eterm *ret); +static int db_select_replace_continue_tree(Process *p, DbTable *tbl, + Eterm continuation, Eterm *ret); static int db_take_tree(Process *, DbTable *, Eterm, Eterm *); static void db_print_tree(fmtfn_t to, void *to_arg, int show, DbTable *tbl); static int db_free_table_tree(DbTable *tbl); -static int db_free_table_continue_tree(DbTable *tbl); +static SWord db_free_table_continue_tree(DbTable *tbl, SWord); static void db_foreach_offheap_tree(DbTable *, void (*)(ErlOffHeap *, void *), @@ -436,17 +480,14 @@ DbTableMethod db_tree = db_select_delete_continue_tree, db_select_count_tree, db_select_count_continue_tree, + db_select_replace_tree, + db_select_replace_continue_tree, db_take_tree, db_delete_all_objects_tree, db_free_table_tree, db_free_table_continue_tree, db_print_tree, db_foreach_offheap_tree, -#ifdef HARDDEBUG - db_check_table_tree, -#else - NULL, -#endif db_lookup_dbterm_tree, db_finalize_dbterm_tree @@ -1058,7 +1099,7 @@ static int db_select_continue_tree(Process *p, } -static int db_select_tree(Process *p, DbTable *tbl, +static int db_select_tree(Process *p, DbTable *tbl, Eterm tid, Eterm pattern, int reverse, Eterm *ret) { /* Strategy: Traverse backwards to build resulting list from tail to head */ @@ -1095,7 +1136,7 @@ static int db_select_tree(Process *p, DbTable *tbl, sc.got = 0; sc.chunk_size = 0; - if ((errcode = analyze_pattern(tb, pattern, &mpi)) != DB_ERROR_NONE) { + if ((errcode = analyze_pattern(tb, pattern, NULL, &mpi)) != DB_ERROR_NONE) { RET_TO_BIF(NIL,errcode); } @@ -1109,7 +1150,7 @@ static int db_select_tree(Process *p, DbTable *tbl, if (!mpi.got_partial && mpi.some_limitation && CMP_EQ(mpi.least,mpi.most)) { - doit_select(tb,mpi.save_term,&sc,0 /* direction doesn't matter */); + doit_select(tb,*(mpi.save_term),&sc,0 /* direction doesn't matter */); RET_TO_BIF(sc.accum,DB_ERROR_NONE); } @@ -1151,7 +1192,7 @@ static int db_select_tree(Process *p, DbTable *tbl, continuation = TUPLE8 (hp, - tb->common.id, + tid, key, sc.end_condition, /* From the match program, needn't be copied */ make_small(0), /* Chunk size of zero means not chunked to the @@ -1263,7 +1304,7 @@ static int db_select_count_continue_tree(Process *p, } -static int db_select_count_tree(Process *p, DbTable *tbl, +static int db_select_count_tree(Process *p, DbTable *tbl, Eterm tid, Eterm pattern, Eterm *ret) { DbTableTree *tb = &tbl->tree; @@ -1298,7 +1339,7 @@ static int db_select_count_tree(Process *p, DbTable *tbl, sc.keypos = tb->common.keypos; sc.got = 0; - if ((errcode = analyze_pattern(tb, pattern, &mpi)) != DB_ERROR_NONE) { + if ((errcode = analyze_pattern(tb, pattern, NULL, &mpi)) != DB_ERROR_NONE) { RET_TO_BIF(NIL,errcode); } @@ -1312,7 +1353,7 @@ static int db_select_count_tree(Process *p, DbTable *tbl, if (!mpi.got_partial && mpi.some_limitation && CMP_EQ(mpi.least,mpi.most)) { - doit_select_count(tb,mpi.save_term,&sc,0 /* dummy */); + doit_select_count(tb,*(mpi.save_term),&sc,0 /* dummy */); RET_TO_BIF(erts_make_integer(sc.got,p),DB_ERROR_NONE); } @@ -1349,7 +1390,7 @@ static int db_select_count_tree(Process *p, DbTable *tbl, continuation = TUPLE5 (hp, - tb->common.id, + tid, key, sc.end_condition, /* From the match program, needn't be copied */ mpb, @@ -1363,7 +1404,7 @@ static int db_select_count_tree(Process *p, DbTable *tbl, } -static int db_select_chunk_tree(Process *p, DbTable *tbl, +static int db_select_chunk_tree(Process *p, DbTable *tbl, Eterm tid, Eterm pattern, Sint chunk_size, int reverse, Eterm *ret) @@ -1401,7 +1442,7 @@ static int db_select_chunk_tree(Process *p, DbTable *tbl, sc.got = 0; sc.chunk_size = chunk_size; - if ((errcode = analyze_pattern(tb, pattern, &mpi)) != DB_ERROR_NONE) { + if ((errcode = analyze_pattern(tb, pattern, NULL, &mpi)) != DB_ERROR_NONE) { RET_TO_BIF(NIL,errcode); } @@ -1415,7 +1456,7 @@ static int db_select_chunk_tree(Process *p, DbTable *tbl, if (!mpi.got_partial && mpi.some_limitation && CMP_EQ(mpi.least,mpi.most)) { - doit_select(tb,mpi.save_term,&sc, 0 /* direction doesn't matter */); + doit_select(tb,*(mpi.save_term),&sc, 0 /* direction doesn't matter */); if (sc.accum != NIL) { hp=HAlloc(p, 3); RET_TO_BIF(TUPLE2(hp,sc.accum,am_EOT),DB_ERROR_NONE); @@ -1474,7 +1515,7 @@ static int db_select_chunk_tree(Process *p, DbTable *tbl, continuation = TUPLE8 (hp, - tb->common.id, + tid, key, sc.end_condition, /* From the match program, needn't be copied */ @@ -1499,7 +1540,7 @@ static int db_select_chunk_tree(Process *p, DbTable *tbl, mpb = erts_db_make_match_prog_ref(p,mpi.mp,&hp); continuation = TUPLE8 (hp, - tb->common.id, + tid, key, sc.end_condition, /* From the match program, needn't be copied */ make_small(chunk_size), @@ -1605,7 +1646,7 @@ static int db_select_delete_continue_tree(Process *p, #undef RET_TO_BIF } -static int db_select_delete_tree(Process *p, DbTable *tbl, +static int db_select_delete_tree(Process *p, DbTable *tbl, Eterm tid, Eterm pattern, Eterm *ret) { DbTableTree *tb = &tbl->tree; @@ -1643,7 +1684,7 @@ static int db_select_delete_tree(Process *p, DbTable *tbl, sc.keypos = tb->common.keypos; sc.tb = tb; - if ((errcode = analyze_pattern(tb, pattern, &mpi)) != DB_ERROR_NONE) { + if ((errcode = analyze_pattern(tb, pattern, NULL, &mpi)) != DB_ERROR_NONE) { RET_TO_BIF(0,errcode); } @@ -1656,7 +1697,7 @@ static int db_select_delete_tree(Process *p, DbTable *tbl, if (!mpi.got_partial && mpi.some_limitation && CMP_EQ(mpi.least,mpi.most)) { - doit_select_delete(tb,mpi.save_term,&sc, 0 /* direction doesn't + doit_select_delete(tb,*(mpi.save_term),&sc, 0 /* direction doesn't matter */); RET_TO_BIF(erts_make_integer(sc.accum,p),DB_ERROR_NONE); } @@ -1691,7 +1732,7 @@ static int db_select_delete_tree(Process *p, DbTable *tbl, continuation = TUPLE5 (hp, - tb->common.id, + tid, key, sc.end_condition, /* From the match program, needn't be copied */ mpb, @@ -1708,6 +1749,208 @@ static int db_select_delete_tree(Process *p, DbTable *tbl, } +static int db_select_replace_continue_tree(Process *p, + DbTable *tbl, + Eterm continuation, + Eterm *ret) +{ + DbTableTree *tb = &tbl->tree; + DbTreeStack* stack; + struct select_replace_context sc; + unsigned sz; + Eterm *hp; + Eterm lastkey; + Eterm end_condition; + Binary *mp; + Eterm key; + Eterm *tptr; + Eterm ereplaced; + Sint prev_replaced; + + +#define RET_TO_BIF(Term, State) do { *ret = (Term); return State; } while(0); + + /* Decode continuation. We know it's a tuple and everything else as + this is only called by ourselves */ + + /* continuation: + {Table, Lastkey, EndCondition, MatchProgBin, HowManyReplaced}*/ + + tptr = tuple_val(continuation); + + if (arityval(*tptr) != 5) + erts_exit(ERTS_ERROR_EXIT,"Internal error in ets:select_replace/1"); + + lastkey = tptr[2]; + end_condition = tptr[3]; + mp = erts_db_get_match_prog_binary_unchecked(tptr[4]); + + sc.p = p; + sc.mp = mp; + sc.end_condition = NIL; + sc.lastobj = NULL; + sc.max = 1000; + sc.keypos = tb->common.keypos; + if (is_big(tptr[5])) { + sc.replaced = big_to_uint32(tptr[5]); + } else { + sc.replaced = unsigned_val(tptr[5]); + } + prev_replaced = sc.replaced; + + stack = get_any_stack(tb); + traverse_update_backwards(tb, stack, lastkey, &doit_select_replace, &sc); + release_stack(tb,stack); + + // the more objects we've replaced, the more reductions we've consumed + BUMP_REDS(p, MIN(2000, (1000 - sc.max) + (sc.replaced - prev_replaced))); + + if (sc.max > 0) { + RET_TO_BIF(erts_make_integer(sc.replaced,p), DB_ERROR_NONE); + } + key = GETKEY(tb, sc.lastobj); + if (end_condition != NIL && + (cmp_partly_bound(end_condition,key) > 0)) { + /* done anyway */ + RET_TO_BIF(make_small(sc.replaced),DB_ERROR_NONE); + } + /* Not done yet, let's trap. */ + sz = size_object(key); + if (IS_USMALL(0, sc.replaced)) { + hp = HAlloc(p, sz + 6); + ereplaced = make_small(sc.replaced); + } + else { + hp = HAlloc(p, BIG_UINT_HEAP_SIZE + sz + 6); + ereplaced = uint_to_big(sc.replaced, hp); + hp += BIG_UINT_HEAP_SIZE; + } + key = copy_struct(key, sz, &hp, &MSO(p)); + continuation = TUPLE5 + (hp, + tptr[1], + key, + tptr[3], + tptr[4], + ereplaced); + RET_TO_BIF(bif_trap1(&ets_select_replace_continue_exp, p, continuation), + DB_ERROR_NONE); + +#undef RET_TO_BIF +} + +static int db_select_replace_tree(Process *p, DbTable *tbl, Eterm tid, + Eterm pattern, Eterm *ret) +{ + DbTableTree *tb = &tbl->tree; + DbTreeStack* stack; + struct select_replace_context sc; + struct mp_info mpi; + Eterm lastkey = THE_NON_VALUE; + Eterm key; + Eterm continuation; + unsigned sz; + Eterm *hp; + TreeDbTerm *this; + int errcode; + Eterm ereplaced; + Eterm mpb; + + +#define RET_TO_BIF(Term,RetVal) do { \ + if (mpi.mp != NULL) { \ + erts_bin_free(mpi.mp); \ + } \ + *ret = (Term); \ + return RetVal; \ + } while(0) + + mpi.mp = NULL; + + sc.lastobj = NULL; + sc.p = p; + sc.tb = tb; + sc.max = 1000; + sc.end_condition = NIL; + sc.keypos = tb->common.keypos; + sc.replaced = 0; + + if ((errcode = analyze_pattern(tb, pattern, db_match_keeps_key, &mpi)) != DB_ERROR_NONE) { + RET_TO_BIF(NIL,errcode); + } + + if (!mpi.something_can_match) { + RET_TO_BIF(make_small(0),DB_ERROR_NONE); + /* can't possibly match anything */ + } + + sc.mp = mpi.mp; + sc.all_objects = mpi.all_objects; + + stack = get_static_stack(tb); + if (!mpi.got_partial && mpi.some_limitation && + CMP_EQ(mpi.least,mpi.most)) { + TreeDbTerm* term = *(mpi.save_term); + doit_select_replace(tb,mpi.save_term,&sc,0 /* dummy */); + if (stack != NULL) { + if (TOP_NODE(stack) == term) + // throw away potentially invalid reference + REPLACE_TOP_NODE(stack, *(mpi.save_term)); + release_stack(tb, stack); + } + RET_TO_BIF(erts_make_integer(sc.replaced,p),DB_ERROR_NONE); + } + + if (stack == NULL) + stack = get_any_stack(tb); + + if (mpi.some_limitation) { + if ((this = find_next_from_pb_key(tb, stack, mpi.most)) != NULL) { + lastkey = GETKEY(tb, this->dbterm.tpl); + } + sc.end_condition = mpi.least; + } + + traverse_update_backwards(tb, stack, lastkey, &doit_select_replace, &sc); + release_stack(tb,stack); + // the more objects we've replaced, the more reductions we've consumed + BUMP_REDS(p, MIN(2000, (1000 - sc.max) + sc.replaced)); + if (sc.max > 0) { + RET_TO_BIF(erts_make_integer(sc.replaced,p),DB_ERROR_NONE); + } + + key = GETKEY(tb, sc.lastobj); + sz = size_object(key); + if (IS_USMALL(0, sc.replaced)) { + hp = HAlloc(p, sz + ERTS_MAGIC_REF_THING_SIZE + 6); + ereplaced = make_small(sc.replaced); + } + else { + hp = HAlloc(p, BIG_UINT_HEAP_SIZE + sz + ERTS_MAGIC_REF_THING_SIZE + 6); + ereplaced = uint_to_big(sc.replaced, hp); + hp += BIG_UINT_HEAP_SIZE; + } + key = copy_struct(key, sz, &hp, &MSO(p)); + if (mpi.all_objects) + (mpi.mp)->flags |= BIN_FLAG_ALL_OBJECTS; + mpb = erts_db_make_match_prog_ref(p,mpi.mp,&hp); + + continuation = TUPLE5 + (hp, + tid, + key, + sc.end_condition, /* From the match program, needn't be copied */ + mpb, + ereplaced); + + /* Don't free mpi.mp, so don't use macro */ + *ret = bif_trap1(&ets_select_replace_continue_exp, p, continuation); + return DB_ERROR_NONE; + +#undef RET_TO_BIF + +} + static int db_take_tree(Process *p, DbTable *tbl, Eterm key, Eterm *ret) { DbTableTree *tb = &tbl->tree; @@ -1757,23 +2000,22 @@ static void db_print_tree(fmtfn_t to, void *to_arg, /* release all memory occupied by a single table */ static int db_free_table_tree(DbTable *tbl) { - while (!db_free_table_continue_tree(tbl)) + while (db_free_table_continue_tree(tbl, ERTS_SWORD_MAX) < 0) ; return 1; } -static int db_free_table_continue_tree(DbTable *tbl) +static SWord db_free_table_continue_tree(DbTable *tbl, SWord reds) { DbTableTree *tb = &tbl->tree; - int result; if (!tb->deletion) { tb->static_stack.pos = 0; tb->deletion = 1; PUSH_NODE(&tb->static_stack, tb->root); } - result = do_free_tree_cont(tb, DELETE_RECORD_LIMIT); - if (result) { /* Completely done. */ + reds = do_free_tree_continue(tb, reds); + if (reds >= 0) { /* Completely done. */ erts_db_free(ERTS_ALC_T_DB_STK, (DbTable *) tb, (void *) tb->static_stack.array, @@ -1781,7 +2023,7 @@ static int db_free_table_continue_tree(DbTable *tbl) ASSERT(erts_smp_atomic_read_nob(&tb->common.memory_size) == sizeof(DbTable)); } - return result; + return reds; } static int db_delete_all_objects_tree(Process* p, DbTable* tbl) @@ -1954,8 +2196,9 @@ static TreeDbTerm *linkout_object_tree(DbTableTree *tb, ** For the select functions, analyzes the pattern and determines which ** part of the tree should be searched. Also compiles the match program */ -static int analyze_pattern(DbTableTree *tb, Eterm pattern, - struct mp_info *mpi) +static int analyze_pattern(DbTableTree *tb, Eterm pattern, + extra_match_validator_t extra_validator, /* Optional callback */ + struct mp_info *mpi) { Eterm lst, tpl, ttpl; Eterm *matches,*guards, *bodies; @@ -1993,7 +2236,10 @@ static int analyze_pattern(DbTableTree *tb, Eterm pattern, i = 0; for(lst = pattern; is_list(lst); lst = CDR(list_val(lst))) { - Eterm body; + Eterm match; + Eterm guard; + Eterm body; + ttpl = CAR(list_val(lst)); if (!is_tuple(ttpl)) { if (buff != sbuff) { @@ -2008,9 +2254,17 @@ static int analyze_pattern(DbTableTree *tb, Eterm pattern, } return DB_ERROR_BADPARAM; } - matches[i] = tpl = ptpl[1]; - guards[i] = ptpl[2]; + matches[i] = match = tpl = ptpl[1]; + guards[i] = guard = ptpl[2]; bodies[i] = body = ptpl[3]; + + if(extra_validator != NULL && !extra_validator(tb->common.keypos, match, guard, body)) { + if (buff != sbuff) { + erts_free(ERTS_ALC_T_DB_TMP, buff); + } + return DB_ERROR_BADPARAM; + } + if (!is_list(body) || CDR(list_val(body)) != NIL || CAR(list_val(body)) != am_DollarUnderscore) { mpi->all_objects = 0; @@ -2018,7 +2272,7 @@ static int analyze_pattern(DbTableTree *tb, Eterm pattern, ++i; partly_bound = NIL; - res = key_given(tb, tpl, &mpi->save_term, &partly_bound); + res = key_given(tb, tpl, &(mpi->save_term), &partly_bound); if ( res >= 0 ) { /* Can match something */ key = 0; mpi->something_can_match = 1; @@ -2064,7 +2318,7 @@ static int analyze_pattern(DbTableTree *tb, Eterm pattern, return DB_ERROR_NONE; } -static int do_free_tree_cont(DbTableTree *tb, int num_left) +static SWord do_free_tree_continue(DbTableTree *tb, SWord reds) { TreeDbTerm *root; TreeDbTerm *p; @@ -2083,15 +2337,14 @@ static int do_free_tree_cont(DbTableTree *tb, int num_left) root = p; } else { free_term(tb, root); - if (--num_left > 0) { - break; - } else { - return 0; /* Done enough for now */ - } + if (--reds < 0) { + return reds; /* Done enough for now */ + } + break; } } } - return 1; + return reds; } /* @@ -2522,6 +2775,58 @@ static TreeDbTerm **find_node2(DbTableTree *tb, Eterm key) return this; } +/* + * Find node and return the address of the node pointer (NULL if not found) + * Tries to reuse the existing stack for performance. + */ + +static TreeDbTerm **find_ptr(DbTableTree *tb, DbTreeStack *stack, TreeDbTerm *this) { + Eterm key = GETKEY(tb, this->dbterm.tpl); + TreeDbTerm *tmp; + TreeDbTerm *parent; + Sint c; + + if(( tmp = TOP_NODE(stack)) != NULL) { + if (!cmp_key_eq(tb,key,tmp)) { + /* Start from the beginning */ + stack->pos = stack->slot = 0; + } + } + if (EMPTY_NODE(stack)) { /* Have to rebuild the stack */ + if (( tmp = tb->root ) == NULL) + return NULL; + for (;;) { + PUSH_NODE(stack, tmp); + if (( c = cmp_key(tb,key,tmp) ) < 0) { + if (tmp->left == NULL) /* We are at the next + and the element does + not exist */ + break; + else + tmp = tmp->left; + } else if (c > 0) { + if (tmp->right == NULL) /* Done */ + return NULL; + else + tmp = tmp->right; + } else + break; + } + } + + if (TOP_NODE(stack) != this) + return NULL; + + parent = TOPN_NODE(stack, 1); + if (parent == NULL) + return ((this != tb->root) ? NULL : &(tb->root)); + if (parent->left == this) + return &(parent->left); + if (parent->right == this) + return &(parent->right); + return NULL; +} + static int db_lookup_dbterm_tree(Process *p, DbTable *tbl, Eterm key, Eterm obj, DbUpdateHandle* handle) @@ -2663,13 +2968,60 @@ static void traverse_forward(DbTableTree *tb, } /* + * Traverse the tree with an update callback function, used by db_select_replace + */ +static void traverse_update_backwards(DbTableTree *tb, + DbTreeStack* stack, + Eterm lastkey, + int (*doit)(DbTableTree*, + TreeDbTerm**, + void*, + int), + void* context) +{ + int res; + TreeDbTerm *this, *next, **this_ptr; + + if (lastkey == THE_NON_VALUE) { + stack->pos = stack->slot = 0; + if (( this = tb->root ) == NULL) { + return; + } + while (this != NULL) { + PUSH_NODE(stack, this); + this = this->right; + } + this = TOP_NODE(stack); + this_ptr = find_ptr(tb, stack, this); + ASSERT(this_ptr != NULL); + res = (*doit)(tb, this_ptr, context, 0); + REPLACE_TOP_NODE(stack, *this_ptr); + next = find_prev(tb, stack, GETKEY(tb, (*this_ptr)->dbterm.tpl)); + if (!res) + return; + } else { + next = find_prev(tb, stack, lastkey); + } + + while ((this = next) != NULL) { + this_ptr = find_ptr(tb, stack, this); + ASSERT(this_ptr != NULL); + res = (*doit)(tb, this_ptr, context, 0); + REPLACE_TOP_NODE(stack, *this_ptr); + next = find_prev(tb, stack, GETKEY(tb, (*this_ptr)->dbterm.tpl)); + if (!res) + return; + } +} + +/* * Returns 0 if not given 1 if given and -1 on no possible match * if key is given; *ret is set to point to the object concerned. */ -static int key_given(DbTableTree *tb, Eterm pattern, TreeDbTerm **ret, +static int key_given(DbTableTree *tb, Eterm pattern, TreeDbTerm ***ret, Eterm *partly_bound) { - TreeDbTerm *this; + TreeDbTerm **this; Eterm key; ASSERT(ret != NULL); @@ -2679,7 +3031,7 @@ static int key_given(DbTableTree *tb, Eterm pattern, TreeDbTerm **ret, if (is_non_value(key)) return -1; /* can't possibly match anything */ if (!db_has_variable(key)) { /* Bound key */ - if (( this = find_node(tb, key) ) == NULL) { + if (( this = find_node2(tb, key) ) == NULL) { return -1; } *ret = this; @@ -3102,6 +3454,46 @@ static int doit_select_delete(DbTableTree *tb, TreeDbTerm *this, void *ptr, return 1; } +static int doit_select_replace(DbTableTree *tb, TreeDbTerm **this, void *ptr, + int forward) +{ + struct select_replace_context *sc = (struct select_replace_context *) ptr; + Eterm ret; + + sc->lastobj = (*this)->dbterm.tpl; + + /* Always backwards traversing */ + if (sc->end_condition != NIL && + (cmp_partly_bound(sc->end_condition, + GETKEY_WITH_POS(sc->keypos, (*this)->dbterm.tpl)) > 0)) { + return 0; + } + ret = db_match_dbterm(&tb->common, sc->p, sc->mp, 0, + &(*this)->dbterm, NULL, 0); + + if (is_value(ret)) { + TreeDbTerm* new; + TreeDbTerm* old = *this; +#ifdef DEBUG + Eterm key = db_getkey(tb->common.keypos, ret); + ASSERT(is_value(key)); + ASSERT(cmp_key(tb, key, old) == 0); +#endif + new = new_dbterm(tb, ret); + new->left = old->left; + new->right = old->right; + new->balance = old->balance; + sc->lastobj = new->dbterm.tpl; + *this = new; + free_term(tb, old); + ++(sc->replaced); + } + if (--(sc->max) <= 0) { + return 0; + } + return 1; +} + #ifdef TREE_DEBUG static void do_dump_tree2(DbTableTree* tb, int to, void *to_arg, int show, TreeDbTerm *t, int offset) @@ -3130,6 +3522,9 @@ static void do_dump_tree2(DbTableTree* tb, int to, void *to_arg, int show, #ifdef HARDDEBUG +/* + * No called, but kept as it might come to use + */ void db_check_table_tree(DbTable *tbl) { DbTableTree *tb = &tbl->tree; diff --git a/erts/emulator/beam/erl_db_util.c b/erts/emulator/beam/erl_db_util.c index 6f30b1d3dd..03cc11bdc4 100644 --- a/erts/emulator/beam/erl_db_util.c +++ b/erts/emulator/beam/erl_db_util.c @@ -1119,6 +1119,177 @@ error: return NULL; } +/* + * Compare a matching term 'a' with a constructing term 'b' for equality. + * + * Returns true if 'b' is guaranteed to always construct + * the same term as 'a' has matched. + */ +static int db_match_eq_body(Eterm a, Eterm b) +{ + DECLARE_ESTACK(s); + Uint arity; + Eterm *ap, *bp; + int const_mode = 0; + const Eterm CONST_MODE_OFF = THE_NON_VALUE; + + while (1) { + switch(b & _TAG_PRIMARY_MASK) { + case TAG_PRIMARY_LIST: + if (!is_list(a)) + return 0; + ESTACK_PUSH2(s, CDR(list_val(a)), CDR(list_val(b))); + a = CAR(list_val(a)); + b = CAR(list_val(b)); + continue; /* loop without pop */ + + case TAG_PRIMARY_BOXED: + if (is_tuple(b)) { + bp = tuple_val(b); + if (!const_mode) { + if (bp[0] == make_arityval(1) && is_tuple(bp[1])) { + b = bp[1]; /* double-tuple syntax */ + } + else if (bp[0] == make_arityval(2) && bp[1] == am_const) { + ESTACK_PUSH(s, CONST_MODE_OFF); + const_mode = 1; /* {const, term()} syntax */ + b = bp[2]; + continue; /* loop without pop */ + } + else + return 0; /* function call or invalid tuple syntax */ + } + if (!is_tuple(a)) + return 0; + + ap = tuple_val(a); + bp = tuple_val(b); + if (ap[0] != bp[0]) + return 0; + arity = arityval(ap[0]); + if (arity > 0) { + a = *(++ap); + b = *(++bp); + while(--arity) { + ESTACK_PUSH2(s, *(++ap), *(++bp)); + } + continue; /* loop without pop */ + } + } + else if (is_map(b)) { + /* We don't know what other pairs the matched map may contain */ + return 0; + } + else if (!eq(a,b)) /* other boxed */ + return 0; + break; + + case TAG_PRIMARY_IMMED1: + if (a != b || a == am_Underscore || a == am_DollarDollar + || a == am_DollarUnderscore + || (const_mode && db_is_variable(a) >= 0)) { + + return 0; + } + break; + default: + erts_exit(ERTS_ABORT_EXIT, "db_compare: " + "Bad object on ESTACK: 0x%bex\n", b); + } + +pop_next: + if (ESTACK_ISEMPTY(s)) + break; /* done */ + + b = ESTACK_POP(s); + if (b == CONST_MODE_OFF) { + ASSERT(const_mode); + const_mode = 0; + goto pop_next; + } + a = ESTACK_POP(s); + } + + DESTROY_ESTACK(s); + return 1; +} + +/* This is used by select_replace */ +int db_match_keeps_key(int keypos, Eterm match, Eterm guard, Eterm body) +{ + Eterm match_key; + Eterm* body_list; + Eterm single_body_term; + Eterm* single_body_term_tpl; + Eterm single_body_subterm; + Eterm single_body_subterm_key; + Eterm* single_body_subterm_key_tpl; + + if (!is_list(body)) { + return 0; + } + + body_list = list_val(body); + if (CDR(body_list) != NIL) { + return 0; + } + + single_body_term = CAR(body_list); + if (single_body_term == am_DollarUnderscore) { + /* same tuple is returned */ + return 1; + } + + if (!is_tuple(single_body_term)) { + return 0; + } + + single_body_term_tpl = tuple_val(single_body_term); + if (arityval(*single_body_term_tpl) != 1) { + // not the 1-element tuple we're expecting + return 0; + } + + match_key = db_getkey(keypos, match); + if (!is_value(match_key)) { + // can't get key out of match + return 0; + } + + single_body_subterm = single_body_term_tpl[1]; + single_body_subterm_key = db_getkey(keypos, single_body_subterm); + if (!is_value(single_body_subterm_key)) { + // can't get key out of single body subterm + return 0; + } + + if (db_match_eq_body(match_key, single_body_subterm_key)) { + /* tuple with same key is returned */ + return 1; + } + + if (!is_tuple(single_body_subterm_key)) { + /* can't possibly be an element instruction */ + return 0; + } + + single_body_subterm_key_tpl = tuple_val(single_body_subterm_key); + if (arityval(*single_body_subterm_key_tpl) != 3) { + /* can't possibly be an element instruction */ + return 0; + } + + if (single_body_subterm_key_tpl[1] == am_element && + single_body_subterm_key_tpl[3] == am_DollarUnderscore && + single_body_subterm_key_tpl[2] == make_small(keypos)) + { + /* {element, KeyPos, '$_'} */ + return 1; + } + + return 0; +} + /* This is used when tracing */ Eterm erts_match_set_lint(Process *p, Eterm matchexpr) { return db_match_set_lint(p, matchexpr, DCOMP_TRACE); @@ -2172,7 +2343,7 @@ restart: } } else { - *esp = term; + *esp++ = term; } break; case matchPushArrayAsList: @@ -5161,6 +5332,7 @@ void db_free_tmp_uncompressed(DbTerm* obj) Eterm db_match_dbterm(DbTableCommon* tb, Process* c_p, Binary* bprog, int all, DbTerm* obj, Eterm** hpp, Uint extra) { + enum erts_pam_run_flags flags; Uint32 dummy; Eterm res; @@ -5168,9 +5340,13 @@ Eterm db_match_dbterm(DbTableCommon* tb, Process* c_p, Binary* bprog, obj = db_alloc_tmp_uncompressed(tb, obj); } + flags = (hpp ? + ERTS_PAM_COPY_RESULT | ERTS_PAM_CONTIGUOUS_TUPLE : + ERTS_PAM_TMP_RESULT | ERTS_PAM_CONTIGUOUS_TUPLE); + res = db_prog_match(c_p, c_p, bprog, make_tuple(obj->tpl), NULL, 0, - ERTS_PAM_COPY_RESULT|ERTS_PAM_CONTIGUOUS_TUPLE, &dummy); + flags, &dummy); if (is_value(res) && hpp!=NULL) { *hpp = HAlloc(c_p, extra); diff --git a/erts/emulator/beam/erl_db_util.h b/erts/emulator/beam/erl_db_util.h index 471fefe3cb..9be77fcefa 100644 --- a/erts/emulator/beam/erl_db_util.h +++ b/erts/emulator/beam/erl_db_util.h @@ -75,9 +75,6 @@ typedef struct db_term { */ } DbTerm; -union db_table; -typedef union db_table DbTable; - #define DB_MUST_RESIZE 1 #define DB_NEW_OBJECT 2 #define DB_INC_TRY_GROW 4 @@ -138,43 +135,56 @@ typedef struct db_table_method Eterm slot, Eterm* ret); int (*db_select_chunk)(Process* p, - DbTable* tb, /* [in out] */ + DbTable* tb, /* [in out] */ + Eterm tid, Eterm pattern, Sint chunk_size, int reverse, Eterm* ret); int (*db_select)(Process* p, - DbTable* tb, /* [in out] */ + DbTable* tb, /* [in out] */ + Eterm tid, Eterm pattern, int reverse, Eterm* ret); int (*db_select_delete)(Process* p, - DbTable* tb, /* [in out] */ + DbTable* tb, /* [in out] */ + Eterm tid, Eterm pattern, Eterm* ret); int (*db_select_continue)(Process* p, - DbTable* tb, /* [in out] */ + DbTable* tb, /* [in out] */ Eterm continuation, Eterm* ret); int (*db_select_delete_continue)(Process* p, - DbTable* tb, /* [in out] */ + DbTable* tb, /* [in out] */ Eterm continuation, Eterm* ret); int (*db_select_count)(Process* p, - DbTable* tb, /* [in out] */ + DbTable* tb, /* [in out] */ + Eterm tid, Eterm pattern, Eterm* ret); int (*db_select_count_continue)(Process* p, DbTable* tb, /* [in out] */ Eterm continuation, Eterm* ret); + int (*db_select_replace)(Process* p, + DbTable* tb, /* [in out] */ + Eterm tid, + Eterm pattern, + Eterm* ret); + int (*db_select_replace_continue)(Process* p, + DbTable* tb, /* [in out] */ + Eterm continuation, + Eterm* ret); int (*db_take)(Process *, DbTable *, Eterm, Eterm *); int (*db_delete_all_objects)(Process* p, DbTable* db /* [in out] */ ); int (*db_free_table)(DbTable* db /* [in out] */ ); - int (*db_free_table_continue)(DbTable* db); /* [in out] */ + SWord (*db_free_table_continue)(DbTable* db, SWord reds); void (*db_print)(fmtfn_t to, void* to_arg, @@ -184,7 +194,6 @@ typedef struct db_table_method void (*db_foreach_offheap)(DbTable* db, /* [in out] */ void (*func)(ErlOffHeap *, void *), void *arg); - void (*db_check_table)(DbTable* tb); /* Lookup a dbterm for updating. Return false if not found. */ int (*db_lookup_dbterm)(Process *, DbTable *, Eterm key, Eterm obj, @@ -198,11 +207,27 @@ typedef struct db_table_method } DbTableMethod; typedef struct db_fixation { - Eterm pid; + /* Node in fixed_tabs list */ + struct { + struct db_fixation *next, *prev; + Binary* btid; + } tabs; + + /* Node in fixing_procs tree */ + struct { + struct db_fixation *left, *right, *parent; + int is_red; + Process* p; + } procs; + Uint counter; - struct db_fixation *next; } DbFixation; +typedef struct { + DbTable *next; + DbTable *prev; +} DbTableList; + /* * This structure contains data for all different types of database * tables. Note that these fields must match the same fields @@ -212,10 +237,13 @@ typedef struct db_fixation { */ typedef struct db_table_common { - erts_smp_refc_t ref; /* fixation counter */ + erts_smp_refc_t refc; /* reference count of table struct */ + erts_smp_refc_t fix_count;/* fixation counter */ + DbTableList all; + DbTableList owned; #ifdef ERTS_SMP erts_smp_rwmtx_t rwlock; /* rw lock on table */ - erts_smp_mtx_t fixlock; /* Protects fixations,megasec,sec,microsec */ + erts_smp_mtx_t fixlock; /* Protects fixing_procs and time */ int is_thread_safe; /* No fine locking inside table needed */ Uint32 type; /* table type, *read only* after creation */ #endif @@ -224,7 +252,7 @@ typedef struct db_table_common { UWord heir_data; /* To send in ETS-TRANSFER (is_immed or (DbTerm*) */ Uint64 heir_started_interval; /* To further identify the heir */ Eterm the_name; /* an atom */ - Eterm id; /* atom | integer */ + Binary *btid; DbTableMethod* meth; /* table methods */ erts_smp_atomic_t nitems; /* Total number of items in table */ erts_smp_atomic_t memory_size;/* Total memory size. NOTE: in bytes! */ @@ -232,36 +260,35 @@ typedef struct db_table_common { ErtsMonotonicTime monotonic; ErtsMonotonicTime offset; } time; - DbFixation* fixations; /* List of processes who have done safe_fixtable, + DbFixation* fixing_procs; /* Tree of processes who have done safe_fixtable, "local" fixations not included. */ /* All 32-bit fields */ Uint32 status; /* bit masks defined below */ - int slot; /* slot index in meta_main_tab */ int keypos; /* defaults to 1 */ int compress; } DbTableCommon; /* These are status bit patterns */ -#define DB_NORMAL (1 << 0) -#define DB_PRIVATE (1 << 1) -#define DB_PROTECTED (1 << 2) -#define DB_PUBLIC (1 << 3) -#define DB_BAG (1 << 4) -#define DB_SET (1 << 5) -/*#define DB_LHASH (1 << 6)*/ -#define DB_FINE_LOCKED (1 << 7) /* fine grained locking enabled */ -#define DB_DUPLICATE_BAG (1 << 8) -#define DB_ORDERED_SET (1 << 9) -#define DB_DELETE (1 << 10) /* table is being deleted */ -#define DB_FREQ_READ (1 << 11) - -#define ERTS_ETS_TABLE_TYPES (DB_BAG|DB_SET|DB_DUPLICATE_BAG|DB_ORDERED_SET|DB_FINE_LOCKED|DB_FREQ_READ) +#define DB_PRIVATE (1 << 0) +#define DB_PROTECTED (1 << 1) +#define DB_PUBLIC (1 << 2) +#define DB_DELETE (1 << 3) /* table is being deleted */ +#define DB_SET (1 << 4) +#define DB_BAG (1 << 5) +#define DB_DUPLICATE_BAG (1 << 6) +#define DB_ORDERED_SET (1 << 7) +#define DB_FINE_LOCKED (1 << 8) /* write_concurrency */ +#define DB_FREQ_READ (1 << 9) /* read_concurrency */ +#define DB_NAMED_TABLE (1 << 10) + +#define ERTS_ETS_TABLE_TYPES (DB_BAG|DB_SET|DB_DUPLICATE_BAG|DB_ORDERED_SET\ + |DB_FINE_LOCKED|DB_FREQ_READ|DB_NAMED_TABLE) #define IS_HASH_TABLE(Status) (!!((Status) & \ (DB_BAG | DB_SET | DB_DUPLICATE_BAG))) #define IS_TREE_TABLE(Status) (!!((Status) & \ DB_ORDERED_SET)) -#define NFIXED(T) (erts_smp_refc_read(&(T)->common.ref,0)) +#define NFIXED(T) (erts_smp_refc_read(&(T)->common.fix_count,0)) #define IS_FIXED(T) (NFIXED(T) != 0) /* @@ -356,6 +383,7 @@ Eterm db_add_counter(Eterm** hpp, Wterm counter, Eterm incr); Eterm db_match_set_lint(Process *p, Eterm matchexpr, Uint flags); Binary *db_match_set_compile(Process *p, Eterm matchexpr, Uint flags); +int db_match_keeps_key(int keypos, Eterm match, Eterm guard, Eterm body); int erts_db_match_prog_destructor(Binary *); typedef struct match_prog { @@ -506,14 +534,5 @@ erts_db_get_match_prog_binary(Eterm term) #define Binary2MatchProg(BP) \ (ASSERT(IsMatchProgBinary((BP))), \ ((MatchProg *) ERTS_MAGIC_BIN_DATA((BP)))) -/* -** Debugging -*/ -#ifdef HARDDEBUG -void db_check_tables(void); /* in db.c */ -#define CHECK_TABLES() db_check_tables() -#else -#define CHECK_TABLES() -#endif #endif /* _DB_UTIL_H */ diff --git a/erts/emulator/beam/erl_init.c b/erts/emulator/beam/erl_init.c index b2c307e826..61477af316 100644 --- a/erts/emulator/beam/erl_init.c +++ b/erts/emulator/beam/erl_init.c @@ -2307,6 +2307,7 @@ erl_start(int argc, char **argv) #endif set_main_stack_size(); erts_sched_init_time_sup(esdp); + erts_ets_sched_spec_data_init(esdp); process_main(esdp->x_reg_array, esdp->f_reg_array); } #endif diff --git a/erts/emulator/beam/erl_lock_check.c b/erts/emulator/beam/erl_lock_check.c index 6ff9aea5ab..da73469516 100644 --- a/erts/emulator/beam/erl_lock_check.c +++ b/erts/emulator/beam/erl_lock_check.c @@ -100,14 +100,12 @@ static erts_lc_lock_order_t erts_lock_order[] = { { "dist_entry_links", "address" }, { "code_write_permission", NULL }, { "purge_state", NULL }, + { "meta_name_tab", "address" }, + { "db_tab", "address" }, { "proc_status", "pid" }, { "proc_trace", "pid" }, { "ports_snapshot", NULL }, - { "meta_name_tab", "address" }, - { "meta_main_tab_slot", "address" }, - { "db_tab", "address" }, { "db_tab_fix", "address" }, - { "meta_main_tab_main", NULL }, { "db_hash_slot", "address" }, { "node_table", NULL }, { "dist_table", NULL }, diff --git a/erts/emulator/beam/erl_mtrace.c b/erts/emulator/beam/erl_mtrace.c index e275867928..bb6f8660f1 100644 --- a/erts/emulator/beam/erl_mtrace.c +++ b/erts/emulator/beam/erl_mtrace.c @@ -572,7 +572,7 @@ void erts_mtrace_pre_init(void) void erts_mtrace_init(char *receiver, char *nodename) { - char hostname[MAXHOSTNAMELEN]; + char hostname[MAXHOSTNAMELEN + 1]; char pid[21]; /* enough for a 64 bit number */ socket_desc = ERTS_SOCK_INVALID_SOCKET; @@ -613,9 +613,10 @@ void erts_mtrace_init(char *receiver, char *nodename) } tracep = trace_buffer; endp = trace_buffer + TRACE_BUF_SZ; - if (erts_sock_gethostname(hostname, MAXHOSTNAMELEN) != 0) + /* gethostname requires that the len is max(hostname) + 1 */ + if (erts_sock_gethostname(hostname, MAXHOSTNAMELEN + 1) != 0) hostname[0] = '\0'; - hostname[MAXHOSTNAMELEN-1] = '\0'; + hostname[MAXHOSTNAMELEN] = '\0'; sys_get_pid(pid, sizeof(pid)); write_trace_header(nodename ? nodename : "", pid, hostname); erts_mtrace_update_heap_size(); diff --git a/erts/emulator/beam/erl_nif.c b/erts/emulator/beam/erl_nif.c index e6da4c1a76..63a4a997da 100644 --- a/erts/emulator/beam/erl_nif.c +++ b/erts/emulator/beam/erl_nif.c @@ -998,16 +998,14 @@ int enif_inspect_binary(ErlNifEnv* env, Eterm bin_term, ErlNifBinary* bin) byte* raw_ptr; }u; - if (is_boxed(bin_term) && *binary_val(bin_term) == HEADER_SUB_BIN) { - ErlSubBin* sb = (ErlSubBin*) binary_val(bin_term); - if (sb->is_writable) { - ProcBin* pb = (ProcBin*) binary_val(sb->orig); - ASSERT(pb->thing_word == HEADER_PROC_BIN); - if (pb->flags) { - erts_emasculate_writable_binary(pb); - sb->is_writable = 0; - } - } + if (is_binary(bin_term)) { + ProcBin *pb = (ProcBin*) binary_val(bin_term); + if (pb->thing_word == HEADER_SUB_BIN) { + ErlSubBin* sb = (ErlSubBin*) pb; + pb = (ProcBin*) binary_val(sb->orig); + } + if (pb->thing_word == HEADER_PROC_BIN && pb->flags) + erts_emasculate_writable_binary(pb); } u.tmp = NULL; bin->data = erts_get_aligned_binary_bytes_extra(bin_term, &u.raw_ptr, allocator, @@ -1024,7 +1022,7 @@ int enif_inspect_binary(ErlNifEnv* env, Eterm bin_term, ErlNifBinary* bin) bin->bin_term = bin_term; bin->size = binary_size(bin_term); bin->ref_bin = NULL; - ADD_READONLY_CHECK(env, bin->data, bin->size); + ADD_READONLY_CHECK(env, bin->data, bin->size); return 1; } diff --git a/erts/emulator/beam/erl_node_tables.c b/erts/emulator/beam/erl_node_tables.c index e2072fe30f..89b627aaf5 100644 --- a/erts/emulator/beam/erl_node_tables.c +++ b/erts/emulator/beam/erl_node_tables.c @@ -850,14 +850,15 @@ static Eterm AM_timer; static Eterm AM_delayed_delete_timer; static void setup_reference_table(void); -static Eterm reference_table_term(Uint **hpp, Uint *szp); +static Eterm reference_table_term(Uint **hpp, ErlOffHeap *ohp, Uint *szp); static void delete_reference_table(void); -#if BIG_UINT_HEAP_SIZE > 3 /* 2-tuple */ -#define ID_HEAP_SIZE BIG_UINT_HEAP_SIZE -#else -#define ID_HEAP_SIZE 3 /* 2-tuple */ -#endif +#undef ERTS_MAX__ +#define ERTS_MAX__(A, B) ((A) > (B) ? (A) : (B)) + +#define ID_HEAP_SIZE \ + ERTS_MAX__(ERTS_MAGIC_REF_THING_SIZE, \ + ERTS_MAX__(BIG_UINT_HEAP_SIZE, 3)) typedef struct node_referrer_ { struct node_referrer_ *next; @@ -870,6 +871,7 @@ typedef struct node_referrer_ { int system_ref; Eterm id; Uint id_heap[ID_HEAP_SIZE]; + ErlOffHeap off_heap; } NodeReferrer; typedef struct { @@ -942,7 +944,7 @@ erts_get_node_and_dist_references(struct process *proc) /* Get term size */ size = 0; - (void) reference_table_term(NULL, &size); + (void) reference_table_term(NULL, NULL, &size); hp = HAlloc(proc, size); #ifdef DEBUG @@ -951,7 +953,7 @@ erts_get_node_and_dist_references(struct process *proc) #endif /* Write term */ - res = reference_table_term(&hp, NULL); + res = reference_table_term(&hp, &proc->off_heap, NULL); ASSERT(endp == hp); @@ -1048,13 +1050,14 @@ insert_node_referrer(ReferredNode *referred_node, int type, Eterm id) nrp = (NodeReferrer *) erts_alloc(ERTS_ALC_T_NC_TMP, sizeof(NodeReferrer)); nrp->next = referred_node->referrers; + ERTS_INIT_OFF_HEAP(&nrp->off_heap); referred_node->referrers = nrp; if(IS_CONST(id)) nrp->id = id; else { Uint *hp = &nrp->id_heap[0]; - ASSERT(is_big(id) || is_tuple(id)); - nrp->id = copy_struct(id, size_object(id), &hp, NULL); + ASSERT(is_big(id) || is_tuple(id) || is_internal_magic_ref(id)); + nrp->id = copy_struct(id, size_object(id), &hp, &nrp->off_heap); } nrp->heap_ref = 0; nrp->link_ref = 0; @@ -1211,10 +1214,20 @@ insert_links2(ErtsLink *lnk, Eterm id) static void insert_ets_table(DbTable *tab, void *unused) { + ErlOffHeap off_heap; + Eterm heap[ERTS_MAGIC_REF_THING_SIZE]; struct insert_offheap2_arg a; a.type = ETS_REF; - a.id = tab->common.id; + if (tab->common.status & DB_NAMED_TABLE) + a.id = tab->common.the_name; + else { + Eterm *hp = &heap[0]; + ERTS_INIT_OFF_HEAP(&off_heap); + a.id = erts_mk_magic_ref(&hp, &off_heap, tab->common.btid); + } erts_db_foreach_offheap(tab, insert_offheap2, (void *) &a); + if (is_not_atom(a.id)) + erts_cleanup_offheap(&off_heap); } static void @@ -1518,7 +1531,7 @@ setup_reference_table(void) */ static Eterm -reference_table_term(Uint **hpp, Uint *szp) +reference_table_term(Uint **hpp, ErlOffHeap *ohp, Uint *szp) { #undef MK_2TUP #undef MK_3TUP @@ -1573,12 +1586,11 @@ reference_table_term(Uint **hpp, Uint *szp) nrid = nrp->id; if (!IS_CONST(nrp->id)) { - Uint nrid_sz = size_object(nrp->id); if (szp) *szp += nrid_sz; if (hpp) - nrid = copy_struct(nrp->id, nrid_sz, hpp, NULL); + nrid = copy_struct(nrp->id, nrid_sz, hpp, ohp); } if (is_internal_pid(nrid) || nrid == am_error_logger) { @@ -1713,6 +1725,7 @@ delete_reference_table(void) NodeReferrer *tnrp; nrp = referred_nodes[i].referrers; while(nrp) { + erts_cleanup_offheap(&nrp->off_heap); tnrp = nrp; nrp = nrp->next; erts_free(ERTS_ALC_T_NC_TMP, (void *) tnrp); diff --git a/erts/emulator/beam/erl_process.c b/erts/emulator/beam/erl_process.c index 52d9f9ddf7..88fae30845 100644 --- a/erts/emulator/beam/erl_process.c +++ b/erts/emulator/beam/erl_process.c @@ -449,7 +449,8 @@ typedef enum { ERTS_PSTT_CPC, /* Check Process Code */ ERTS_PSTT_CLA, /* Copy Literal Area */ ERTS_PSTT_COHMQ, /* Change off heap message queue */ - ERTS_PSTT_FTMQ /* Flush trace msg queue */ + ERTS_PSTT_FTMQ, /* Flush trace msg queue */ + ERTS_PSTT_ETS_FREE_FIXATION } ErtsProcSysTaskType; #define ERTS_MAX_PROC_SYS_TASK_ARGS 2 @@ -602,6 +603,7 @@ dbg_chk_aux_work_val(erts_aint32_t value) valid |= ERTS_SSI_AUX_WORK_REAP_PORTS; #endif valid |= ERTS_SSI_AUX_WORK_DEBUG_WAIT_COMPLETED; + valid |= ERTS_SSI_AUX_WORK_YIELD; if (~valid & value) erts_exit(ERTS_ABORT_EXIT, @@ -690,6 +692,8 @@ erts_pre_init_process(void) = "SET_TMO"; erts_aux_work_flag_descr[ERTS_SSI_AUX_WORK_MSEG_CACHE_CHECK_IX] = "MSEG_CACHE_CHECK"; + erts_aux_work_flag_descr[ERTS_SSI_AUX_WORK_YIELD_IX] + = "YIELD"; erts_aux_work_flag_descr[ERTS_SSI_AUX_WORK_REAP_PORTS_IX] = "REAP_PORTS"; erts_aux_work_flag_descr[ERTS_SSI_AUX_WORK_DEBUG_WAIT_COMPLETED_IX] @@ -726,6 +730,16 @@ erts_pre_init_process(void) = ERTS_PSD_NIF_TRAP_EXPORT_GET_LOCKS; erts_psd_required_locks[ERTS_PSD_NIF_TRAP_EXPORT].set_locks = ERTS_PSD_NIF_TRAP_EXPORT_SET_LOCKS; + + erts_psd_required_locks[ERTS_PSD_ETS_OWNED_TABLES].get_locks + = ERTS_PSD_ETS_OWNED_TABLES_GET_LOCKS; + erts_psd_required_locks[ERTS_PSD_ETS_OWNED_TABLES].set_locks + = ERTS_PSD_ETS_OWNED_TABLES_SET_LOCKS; + + erts_psd_required_locks[ERTS_PSD_ETS_FIXED_TABLES].get_locks + = ERTS_PSD_ETS_FIXED_TABLES_GET_LOCKS; + erts_psd_required_locks[ERTS_PSD_ETS_FIXED_TABLES].set_locks + = ERTS_PSD_ETS_FIXED_TABLES_SET_LOCKS; #endif } @@ -2557,6 +2571,48 @@ handle_reap_ports(ErtsAuxWorkData *awdp, erts_aint32_t aux_work, int waiting) return aux_work & ~ERTS_SSI_AUX_WORK_REAP_PORTS; } +void +erts_notify_new_aux_yield_work(ErtsSchedulerData *esdp) +{ + ASSERT(esdp == erts_get_scheduler_data()); + /* Always called by the scheduler itself... */ + set_aux_work_flags_wakeup_nob(esdp->ssi, ERTS_SSI_AUX_WORK_YIELD); +} + +static ERTS_INLINE erts_aint32_t +handle_yield(ErtsAuxWorkData *awdp, erts_aint32_t aux_work, int waiting) +{ + int yield = 0; + /* + * Yield operations are always requested by the scheduler itself. + * + * The following handlers should *not* set the ERTS_SSI_AUX_WORK_YIELD + * flag in order to indicate more work. They should instead return + * information so this "main handler" can manipulate the flag... + * + * The following handlers should be able to handle being called + * even though no work is to be done... + */ + + /* Various yielding operations... */ + + yield |= erts_handle_yielded_ets_all_request(awdp->esdp, + &awdp->yield.ets_all); + + /* + * Other yielding operations... + * + */ + + if (!yield) { + unset_aux_work_flags(awdp->ssi, ERTS_SSI_AUX_WORK_YIELD); + return aux_work & ~ERTS_SSI_AUX_WORK_YIELD; + } + + return aux_work; +} + + #if HAVE_ERTS_MSEG static ERTS_INLINE erts_aint32_t @@ -2697,6 +2753,9 @@ handle_aux_work(ErtsAuxWorkData *awdp, erts_aint32_t orig_aux_work, int waiting) handle_mseg_cache_check); #endif + HANDLE_AUX_WORK(ERTS_SSI_AUX_WORK_YIELD, + handle_yield); + HANDLE_AUX_WORK(ERTS_SSI_AUX_WORK_REAP_PORTS, handle_reap_ports); @@ -8727,6 +8786,8 @@ sched_thread_func(void *vesdp) ERTS_VERIFY_UNUSED_TEMP_ALLOC(NULL); #endif + erts_ets_sched_spec_data_init(esdp); + process_main(esdp->x_reg_array, esdp->f_reg_array); /* No schedulers should *ever* terminate */ @@ -11148,6 +11209,12 @@ execute_sys_tasks(Process *c_p, erts_aint32_t *statep, int in_reds) st_res = am_true; break; #endif +#ifdef ERTS_SMP + case ERTS_PSTT_ETS_FREE_FIXATION: + reds -= erts_db_execute_free_fixation(c_p, (DbFixation*)st->arg[0]); + st_res = am_true; + break; +#endif default: ERTS_INTERNAL_ERROR("Invalid process sys task type"); st_res = am_false; @@ -11199,7 +11266,8 @@ cleanup_sys_tasks(Process *c_p, erts_aint32_t in_state, int in_reds) case ERTS_PSTT_GC_MAJOR: case ERTS_PSTT_GC_MINOR: case ERTS_PSTT_CPC: - case ERTS_PSTT_COHMQ: + case ERTS_PSTT_COHMQ: + case ERTS_PSTT_ETS_FREE_FIXATION: st_res = am_false; break; case ERTS_PSTT_CLA: @@ -11553,13 +11621,12 @@ erts_internal_request_system_task_4(BIF_ALIST_4) } static void -erts_schedule_generic_sys_task(Eterm pid, ErtsProcSysTaskType type) +erts_schedule_generic_sys_task(Eterm pid, ErtsProcSysTaskType type, void* arg) { Process *rp = erts_proc_lookup(pid); if (rp) { ErtsProcSysTask *st; erts_aint32_t state, fail_state; - int i; st = erts_alloc(ERTS_ALC_T_PROC_SYS_TSK, ERTS_PROC_SYS_TASK_SIZE(0)); @@ -11568,8 +11635,7 @@ erts_schedule_generic_sys_task(Eterm pid, ErtsProcSysTaskType type) st->reply_tag = NIL; st->req_id = NIL; st->req_id_sz = 0; - for (i = 0; i < ERTS_MAX_PROC_SYS_TASK_ARGS; i++) - st->arg[i] = NIL; + st->arg[0] = (Eterm)arg; ERTS_INIT_OFF_HEAP(&st->off_heap); state = erts_smp_atomic32_read_nob(&rp->state); @@ -11585,7 +11651,13 @@ erts_schedule_generic_sys_task(Eterm pid, ErtsProcSysTaskType type) void erts_schedule_complete_off_heap_message_queue_change(Eterm pid) { - erts_schedule_generic_sys_task(pid, ERTS_PSTT_COHMQ); + erts_schedule_generic_sys_task(pid, ERTS_PSTT_COHMQ, NULL); +} + +void +erts_schedule_ets_free_fixation(Eterm pid, DbFixation* fix) +{ + erts_schedule_generic_sys_task(pid, ERTS_PSTT_ETS_FREE_FIXATION, fix); } #ifdef ERTS_DIRTY_SCHEDULERS @@ -11633,7 +11705,7 @@ erts_schedule_flush_trace_messages(Process *proc, int force_on_proc) dhndl = erts_thr_progress_unmanaged_delay(); #endif - erts_schedule_generic_sys_task(pid, ERTS_PSTT_FTMQ); + erts_schedule_generic_sys_task(pid, ERTS_PSTT_FTMQ, NULL); #ifdef ERTS_SMP erts_thr_progress_unmanaged_continue(dhndl); diff --git a/erts/emulator/beam/erl_process.h b/erts/emulator/beam/erl_process.h index 8bf372dad5..baf830615d 100644 --- a/erts/emulator/beam/erl_process.h +++ b/erts/emulator/beam/erl_process.h @@ -63,6 +63,9 @@ typedef struct process Process; #define ERTS_ONLY_INCLUDE_TRACE_FLAGS #include "erl_trace.h" #undef ERTS_ONLY_INCLUDE_TRACE_FLAGS +#define ERTS_ONLY_SCHED_SPEC_ETS_DATA +#include "erl_db.h" +#undef ERTS_ONLY_SCHED_SPEC_ETS_DATA #ifdef HIPE #include "hipe_process.h" @@ -312,6 +315,7 @@ typedef enum { ERTS_SSI_AUX_WORK_PENDING_EXITERS_IX, ERTS_SSI_AUX_WORK_SET_TMO_IX, ERTS_SSI_AUX_WORK_MSEG_CACHE_CHECK_IX, + ERTS_SSI_AUX_WORK_YIELD_IX, ERTS_SSI_AUX_WORK_REAP_PORTS_IX, ERTS_SSI_AUX_WORK_DEBUG_WAIT_COMPLETED_IX, /* SHOULD be last flag index */ @@ -348,6 +352,8 @@ typedef enum { (((erts_aint32_t) 1) << ERTS_SSI_AUX_WORK_SET_TMO_IX) #define ERTS_SSI_AUX_WORK_MSEG_CACHE_CHECK \ (((erts_aint32_t) 1) << ERTS_SSI_AUX_WORK_MSEG_CACHE_CHECK_IX) +#define ERTS_SSI_AUX_WORK_YIELD \ + (((erts_aint32_t) 1) << ERTS_SSI_AUX_WORK_YIELD_IX) #define ERTS_SSI_AUX_WORK_REAP_PORTS \ (((erts_aint32_t) 1) << ERTS_SSI_AUX_WORK_REAP_PORTS_IX) #define ERTS_SSI_AUX_WORK_DEBUG_WAIT_COMPLETED \ @@ -613,6 +619,10 @@ typedef struct { } delayed_wakeup; #endif struct { + ErtsEtsAllYieldData ets_all; + /* Other yielding operations... */ + } yield; + struct { struct { erts_aint32_t flags; void (*callback)(void *); @@ -621,6 +631,10 @@ typedef struct { } debug; } ErtsAuxWorkData; +#define ERTS_SCHED_AUX_YIELD_DATA(ESDP, NAME) \ + (&(ESDP)->aux_work_data.yield.NAME) +void erts_notify_new_aux_yield_work(ErtsSchedulerData *esdp); + #ifdef ERTS_DIRTY_SCHEDULERS typedef enum { ERTS_DIRTY_CPU_SCHEDULER, @@ -688,7 +702,7 @@ struct ErtsSchedulerData_ { ErtsSchedWallTime sched_wall_time; ErtsGCInfo gc_info; ErtsPortTaskHandle nosuspend_port_task_handle; - + ErtsEtsTables ets_tables; #ifdef ERTS_DO_VERIFY_UNUSED_TEMP_ALLOC erts_alloc_verify_func_t verify_unused_temp_alloc; Allctr_t *verify_unused_temp_alloc_data; @@ -822,14 +836,16 @@ erts_smp_reset_max_len(ErtsRunQueue *rq, ErtsRunQueueInfo *rqi) #define ERTS_PSD_CALL_TIME_BP 3 #define ERTS_PSD_DELAYED_GC_TASK_QS 4 #define ERTS_PSD_NIF_TRAP_EXPORT 5 -#define ERTS_PSD_SUSPENDED_SAVED_CALLS_BUF 6 +#define ERTS_PSD_ETS_OWNED_TABLES 6 +#define ERTS_PSD_ETS_FIXED_TABLES 7 +#define ERTS_PSD_SUSPENDED_SAVED_CALLS_BUF 8 -#define ERTS_PSD_SIZE 7 +#define ERTS_PSD_SIZE 9 #if !defined(HIPE) # undef ERTS_PSD_SUSPENDED_SAVED_CALLS_BUF # undef ERTS_PSD_SIZE -# define ERTS_PSD_SIZE 6 +# define ERTS_PSD_SIZE 8 #endif typedef struct { @@ -857,6 +873,12 @@ typedef struct { #define ERTS_PSD_NIF_TRAP_EXPORT_GET_LOCKS ERTS_PROC_LOCK_MAIN #define ERTS_PSD_NIF_TRAP_EXPORT_SET_LOCKS ERTS_PROC_LOCK_MAIN +#define ERTS_PSD_ETS_OWNED_TABLES_GET_LOCKS ERTS_PROC_LOCK_STATUS +#define ERTS_PSD_ETS_OWNED_TABLES_SET_LOCKS ERTS_PROC_LOCK_STATUS + +#define ERTS_PSD_ETS_FIXED_TABLES_GET_LOCKS ERTS_PROC_LOCK_MAIN +#define ERTS_PSD_ETS_FIXED_TABLES_SET_LOCKS ERTS_PROC_LOCK_MAIN + typedef struct { ErtsProcLocks get_locks; ErtsProcLocks set_locks; @@ -1782,6 +1804,8 @@ void erts_schedule_thr_prgr_later_cleanup_op(void (*)(void *), ErtsThrPrgrLaterOp *, UWord); void erts_schedule_complete_off_heap_message_queue_change(Eterm pid); +struct db_fixation; +void erts_schedule_ets_free_fixation(Eterm pid, struct db_fixation*); void erts_schedule_flush_trace_messages(Process *proc, int force_on_proc); int erts_flush_trace_messages(Process *c_p, ErtsProcLocks locks); diff --git a/erts/emulator/beam/erl_rbtree.h b/erts/emulator/beam/erl_rbtree.h index 5fefaea978..6a42853957 100644 --- a/erts/emulator/beam/erl_rbtree.h +++ b/erts/emulator/beam/erl_rbtree.h @@ -105,7 +105,10 @@ * <ERTS_RBT_PREFIX>_rbt_yield_state_t. * * The yield state should be statically initialized by - * ERTS_RBT_YIELD_STAT_INITER. + * ERTS_RBT_YIELD_STAT_INITER + * + * or dynamically initialized with + * ERTS_RBT_YIELD_STAT_INIT(<ERTS_RBT_PREFIX>_rbt_yield_state_t *ystate) * * * The following API functions are implemented if corresponding @@ -178,8 +181,8 @@ * Operate by calling the operator 'op' on each element. * Order is undefined. * - * Yield when 'ylimit' elements has been processed. Zero is - * returned when yielding, and a non-zero value is returned when + * Yield when 'ylimit' elements has been processed. True is + * returned when yielding, and false is returned when * the whole tree has been processed. The tree should not be * modified until all of it has been processed. * @@ -195,8 +198,8 @@ * Order is undefined. Each element should be destroyed * by 'op'. * - * Yield when 'ylimit' elements has been processed. Zero is - * returned when yielding, and a non-zero value is returned when + * Yield when 'ylimit' elements has been processed. True is + * returned when yielding, and false is returned when * the whole tree has been processed. * * 'arg' is passed as argument to 'op'. @@ -228,8 +231,8 @@ * Operate by calling the operator 'op' on each element from * smallest towards larger elements. * - * Yield when 'ylimit' elements has been processed. Zero is - * returned when yielding, and a non-zero value is returned when + * Yield when 'ylimit' elements has been processed. True is + * returned when yielding, and false is returned when * the whole tree has been processed. The tree should not be * modified until all of it has been processed. * @@ -244,8 +247,8 @@ * Operate by calling the operator 'op' on each element from * largest towards smaller elements. * - * Yield when 'ylimit' elements has been processed. Zero is - * returned when yielding, and a non-zero value is returned when + * Yield when 'ylimit' elements has been processed. True is + * returned when yielding, and false is returned when * the whole tree has been processed. The tree should not be * modified until all of it has been processed. * @@ -296,8 +299,8 @@ * Note that elements are often destroyed in another order * than the order that the elements are operated on. * - * Yield when 'ylimit' elements has been processed. Zero is - * returned when yielding, and a non-zero value is returned when + * Yield when 'ylimit' elements has been processed. True is + * returned when yielding, and false is returned when * the whole tree has been processed. The tree should not be * modified until all of it has been processed. * @@ -318,8 +321,8 @@ * Note that elements are often destroyed in another order * than the order that the elements are operated on. * - * Yield when 'ylimit' elements has been processed. Zero is - * returned when yielding, and a non-zero value is returned when + * Yield when 'ylimit' elements has been processed. True is + * returned when yielding, and false is returned when * the whole tree has been processed. The tree should not be * modified until all of it has been processed. * @@ -422,6 +425,13 @@ #ifndef ERTS_RBT_YIELD_STAT_INITER # define ERTS_RBT_YIELD_STAT_INITER {NULL, 0} #endif +#ifndef ERTS_RBT_YIELD_STAT_INIT +# define ERTS_RBT_YIELD_STAT_INIT(YS) \ + do { \ + (YS)->x = NULL; \ + (YS)->up = 0; \ + } while (0) +#endif #define ERTS_RBT_CONCAT_MACRO_VALUES___(X, Y) \ X ## Y @@ -476,12 +486,12 @@ typedef struct { #if defined(ERTS_RBT_HARD_DEBUG) \ && (defined(ERTS_RBT_WANT_DELETE) \ || defined(ERTS_RBT_NEED_INSERT__)) -static void ERTS_RBT_FUNC__(hdbg_check_tree)(ERTS_RBT_T *root); +static void ERTS_RBT_FUNC__(hdbg_check_tree)(ERTS_RBT_T *root, ERTS_RBT_T *node); # define ERTS_RBT_NEED_HDBG_CHECK_TREE__ -# define ERTS_RBT_HDBG_CHECK_TREE__(R) \ - ERTS_RBT_FUNC__(hdbg_check_tree)((R)) +# define ERTS_RBT_HDBG_CHECK_TREE__(R,N) \ + ERTS_RBT_FUNC__(hdbg_check_tree)((R),(N)) #else -# define ERTS_RBT_HDBG_CHECK_TREE__(R) ((void) 1) +# define ERTS_RBT_HDBG_CHECK_TREE__(R,N) ((void) 1) #endif #ifdef ERTS_RBT_NEED_ROTATE__ @@ -634,7 +644,7 @@ ERTS_RBT_FUNC__(delete)(ERTS_RBT_T **root, ERTS_RBT_T *n) ERTS_RBT_T null_x; /* null_x is used to get the fixup started when we splice out a node without children. */ - ERTS_RBT_HDBG_CHECK_TREE__(*root); + ERTS_RBT_HDBG_CHECK_TREE__(*root, n); ERTS_RBT_INIT_EMPTY_TNODE(&null_x); @@ -852,7 +862,7 @@ ERTS_RBT_FUNC__(delete)(ERTS_RBT_T **root, ERTS_RBT_T *n) } } - ERTS_RBT_HDBG_CHECK_TREE__(*root); + ERTS_RBT_HDBG_CHECK_TREE__(*root, NULL); } @@ -982,7 +992,7 @@ ERTS_RBT_FUNC__(insert_aux__)(ERTS_RBT_T **root, ERTS_RBT_T *n, int lookup) { ERTS_RBT_KEY_T kn = ERTS_RBT_GET_KEY(n); - ERTS_RBT_HDBG_CHECK_TREE__(*root); + ERTS_RBT_HDBG_CHECK_TREE__(*root, NULL); ERTS_RBT_INIT_EMPTY_TNODE(n); @@ -1004,7 +1014,7 @@ ERTS_RBT_FUNC__(insert_aux__)(ERTS_RBT_T **root, ERTS_RBT_T *n, int lookup) if (lookup && ERTS_RBT_IS_EQ(kn, kx)) { - ERTS_RBT_HDBG_CHECK_TREE__(*root); + ERTS_RBT_HDBG_CHECK_TREE__(*root, NULL); return x; } @@ -1038,7 +1048,7 @@ ERTS_RBT_FUNC__(insert_aux__)(ERTS_RBT_T **root, ERTS_RBT_T *n, int lookup) ERTS_RBT_FUNC__(insert_fixup__)(root, n); } - ERTS_RBT_HDBG_CHECK_TREE__(*root); + ERTS_RBT_HDBG_CHECK_TREE__(*root, n); return NULL; } @@ -1364,7 +1374,7 @@ ERTS_RBT_FUNC__(foreach_ordered__)(ERTS_RBT_T **root, ystate->x = NULL; ystate->up = 0; } - return 1; /* Done */ + return 0; /* Done */ } x = p; } @@ -1579,15 +1589,17 @@ ERTS_RBT_FUNC__(debug_print)(FILE *filep, ERTS_RBT_T *x, int indent, #ifdef ERTS_RBT_NEED_HDBG_CHECK_TREE__ static void -ERTS_RBT_FUNC__(hdbg_check_tree)(ERTS_RBT_T *root) +ERTS_RBT_FUNC__(hdbg_check_tree)(ERTS_RBT_T *root, ERTS_RBT_T *n) { int black_depth = -1, no_black = 0; ERTS_RBT_T *c, *p, *x = root; ERTS_RBT_KEY_T kx; ERTS_RBT_KEY_T kc; - if (!x) + if (!x) { + ERTS_RBT_ASSERT(!n); return; + } ERTS_RBT_ASSERT(!ERTS_RBT_GET_PARENT(x)); @@ -1597,6 +1609,9 @@ ERTS_RBT_FUNC__(hdbg_check_tree)(ERTS_RBT_T *root) while (1) { + if (x == n) + n = NULL; + if (ERTS_RBT_IS_BLACK(x)) no_black++; else { @@ -1668,6 +1683,7 @@ ERTS_RBT_FUNC__(hdbg_check_tree)(ERTS_RBT_T *root) if (!p) { ERTS_RBT_ASSERT(root == x); ERTS_RBT_ASSERT(no_black == 0); + ERTS_RBT_ASSERT(!n); return; /* Done */ } diff --git a/erts/emulator/beam/erl_term.h b/erts/emulator/beam/erl_term.h index a602a8f7c6..097d580d99 100644 --- a/erts/emulator/beam/erl_term.h +++ b/erts/emulator/beam/erl_term.h @@ -873,6 +873,12 @@ typedef union { ErtsORefThing o; } ErtsRefThing; +/* for copy sharing */ +#define BOXED_VISITED_MASK ((Eterm) 3) +#define BOXED_VISITED ((Eterm) 1) +#define BOXED_SHARED_UNPROCESSED ((Eterm) 2) +#define BOXED_SHARED_PROCESSED ((Eterm) 3) + #define ERTS_REF_THING_SIZE (sizeof(ErtsORefThing)/sizeof(Uint)) #define ERTS_MAGIC_REF_THING_SIZE (sizeof(ErtsMRefThing)/sizeof(Uint)) #define ERTS_MAX_INTERNAL_REF_SIZE (sizeof(ErtsRefThing)/sizeof(Uint)) @@ -888,9 +894,14 @@ typedef union { # define is_ref_thing_header(x) ((x) == ERTS_REF_THING_HEADER) -#define is_ordinary_ref_thing(x) \ - (ASSERT(is_ref_thing_header(*((Eterm *)(x)))), \ +#ifdef SHCOPY +#define is_ordinary_ref_thing(x) \ + (((ErtsRefThing *) (x))->o.marker == ERTS_ORDINARY_REF_MARKER) +#else +#define is_ordinary_ref_thing(x) \ + (ASSERT(is_ref_thing_header((*((Eterm *)(x))) & ~BOXED_VISITED_MASK)), \ ((ErtsRefThing *) (x))->o.marker == ERTS_ORDINARY_REF_MARKER) +#endif #define is_magic_ref_thing(x) \ (!is_ordinary_ref_thing((x))) diff --git a/erts/emulator/beam/ops.tab b/erts/emulator/beam/ops.tab index ec36b23059..9b5bd7a749 100644 --- a/erts/emulator/beam/ops.tab +++ b/erts/emulator/beam/ops.tab @@ -620,6 +620,20 @@ test_heap Need u==1 | put_list Y=y x==0 x==0 => test_heap_1_put_list Need Y %macro: test_heap_1_put_list TestHeapPutList -pack test_heap_1_put_list I y +# +# is_tagged_tuple Fail=f Src=rxy Arity Atom=a +# + +is_tagged_tuple Fail Literal=q Arity Atom => \ + move Literal x | is_tagged_tuple Fail x Arity Atom +is_tagged_tuple Fail=f c Arity Atom => jump Fail + +%macro:is_tagged_tuple IsTaggedTuple -fail_action + +is_tagged_tuple f r A a +is_tagged_tuple f x A a +is_tagged_tuple f y A a + # Test tuple & arity (head) is_tuple Fail Literal=q => move Literal x | is_tuple Fail x diff --git a/erts/emulator/beam/sys.h b/erts/emulator/beam/sys.h index c6ea8049c3..144dd60d21 100644 --- a/erts/emulator/beam/sys.h +++ b/erts/emulator/beam/sys.h @@ -487,6 +487,12 @@ extern volatile int erts_break_requested; void erts_do_break_handling(void); #endif +#if !defined(ERTS_SMP) && !defined(__WIN32__) +extern volatile Uint erts_signal_state; +#define ERTS_SIGNAL_STATE erts_signal_state +void erts_handle_signal_state(void); +#endif + #ifdef ERTS_SMP extern erts_smp_atomic32_t erts_writing_erl_crash_dump; extern erts_tsd_key_t erts_is_crash_dumping_key; diff --git a/erts/emulator/drivers/common/inet_drv.c b/erts/emulator/drivers/common/inet_drv.c index 278abe4e00..0fe5183b42 100644 --- a/erts/emulator/drivers/common/inet_drv.c +++ b/erts/emulator/drivers/common/inet_drv.c @@ -8345,10 +8345,10 @@ static ErlDrvData inet_start(ErlDrvPort port, int size, int protocol) return (ErlDrvData)desc; } - -#ifndef MAXHOSTNAMELEN -#define MAXHOSTNAMELEN 256 -#endif +/* MAXHOSTNAMELEN could be 64 or 255 depending +on the platform. Instead, use INET_MAXHOSTNAMELEN +which is always 255 across all platforms */ +#define INET_MAXHOSTNAMELEN 255 /* ** common TCP/UDP/SCTP control command @@ -8525,13 +8525,14 @@ static ErlDrvSSizeT inet_ctl(inet_descriptor* desc, int cmd, char* buf, } case INET_REQ_GETHOSTNAME: { /* get host name */ - char tbuf[MAXHOSTNAMELEN]; + char tbuf[INET_MAXHOSTNAMELEN + 1]; DEBUGF(("inet_ctl(%ld): GETHOSTNAME\r\n", (long)desc->port)); if (len != 0) return ctl_error(EINVAL, rbuf, rsize); - if (IS_SOCKET_ERROR(sock_hostname(tbuf, MAXHOSTNAMELEN))) + /* gethostname requires len to be max(hostname) + 1 */ + if (IS_SOCKET_ERROR(sock_hostname(tbuf, INET_MAXHOSTNAMELEN + 1))) return ctl_error(sock_errno(), rbuf, rsize); return ctl_reply(INET_REP_OK, tbuf, strlen(tbuf), rbuf, rsize); } diff --git a/erts/emulator/drivers/unix/unix_efile.c b/erts/emulator/drivers/unix/unix_efile.c index 3ff68a8859..0acc2432a7 100644 --- a/erts/emulator/drivers/unix/unix_efile.c +++ b/erts/emulator/drivers/unix/unix_efile.c @@ -430,6 +430,9 @@ efile_openfile(Efile_error* errInfo, /* Where to return error codes. */ if ( (stat("/dev/null", &nullstatbuf) < 0) || (statbuf.st_ino != nullstatbuf.st_ino) || (statbuf.st_dev != nullstatbuf.st_dev) ) { +#ifdef HAVE_FSTAT + efile_closefile(fd); +#endif errno = EISDIR; return check_error(-1, errInfo); } diff --git a/erts/emulator/hipe/elf64ppc.x b/erts/emulator/hipe/elf64ppc.x index 46d2632970..bb14a6cd29 100644 --- a/erts/emulator/hipe/elf64ppc.x +++ b/erts/emulator/hipe/elf64ppc.x @@ -28,7 +28,7 @@ SEARCH_DIR("/mnt/archive/cross-ppc64/ppc64-unknown-linux/lib"); SECTIONS { /* Read-only sections, merged into text segment: */ - PROVIDE (__executable_start = 0x0180000); . = 0x01800000 + SIZEOF_HEADERS; + PROVIDE (__executable_start = 0x01800000); . = 0x01800000 + SIZEOF_HEADERS; .interp : { *(.interp) } .hash : { *(.hash) } .dynsym : { *(.dynsym) } diff --git a/erts/emulator/hipe/hipe_bif0.c b/erts/emulator/hipe/hipe_bif0.c index 688c82ab7a..8b420b9e9b 100644 --- a/erts/emulator/hipe/hipe_bif0.c +++ b/erts/emulator/hipe/hipe_bif0.c @@ -447,6 +447,7 @@ BIF_RETTYPE hipe_bifs_alloc_data_3(BIF_ALIST_3) { Uint align; HipeLoaderState *stp; + void *aligned_block; if (is_not_small(BIF_ARG_1) || is_not_small(BIF_ARG_2) || (!(stp = get_loader_state(BIF_ARG_3))) || @@ -459,18 +460,14 @@ BIF_RETTYPE hipe_bifs_alloc_data_3(BIF_ALIST_3) stp->data_segment_size = unsigned_val(BIF_ARG_2); if (stp->data_segment_size == 0) BIF_RET(make_small(0)); - stp->data_segment = erts_alloc(ERTS_ALC_T_HIPE, stp->data_segment_size); - if ((unsigned long)stp->data_segment & (align-1)) { - fprintf(stderr, "%s: erts_alloc(%lu) returned %p which is not %lu-byte " - "aligned\r\n", - __FUNCTION__, (unsigned long)stp->data_segment_size, - stp->data_segment, (unsigned long)align); - erts_free(ERTS_ALC_T_HIPE, stp->data_segment); - stp->data_segment = NULL; - stp->data_segment_size = 0; - BIF_ERROR(BIF_P, EXC_NOTSUP); - } - BIF_RET(address_to_term(stp->data_segment, BIF_P)); + + stp->data_segment_size += align-1; /* Make room to align the pointer */ + stp->data_segment = erts_alloc(ERTS_ALC_T_HIPE_LL, stp->data_segment_size); + + /* Align the pointer */ + aligned_block = (void*)((UWord)(stp->data_segment + align - 1) + & ~(UWord)(align-1)); + BIF_RET(address_to_term(aligned_block, BIF_P)); } /* @@ -545,7 +542,7 @@ static void init_const_term_table(void) f.meta_alloc = (HMALLOC_FUN) erts_alloc; f.meta_free = (HMFREE_FUN) erts_free; f.meta_print = (HMPRINT_FUN) erts_print; - hash_init(ERTS_ALC_T_HIPE, &const_term_table, "const_term_table", 97, f); + hash_init(ERTS_ALC_T_HIPE_LL, &const_term_table, "const_term_table", 97, f); } BIF_RETTYPE hipe_bifs_merge_term_1(BIF_ALIST_1) @@ -859,7 +856,7 @@ static void init_primop_table(void) f.meta_free = (HMFREE_FUN) erts_free; f.meta_print = (HMPRINT_FUN) erts_print; - hash_init(ERTS_ALC_T_HIPE, &primop_table, "primop_table", 50, f); + hash_init(ERTS_ALC_T_HIPE_LL, &primop_table, "primop_table", 50, f); for (i = 0; i < sizeof(primops)/sizeof(primops[0]); ++i) hash_put(&primop_table, &primops[i]); @@ -1094,7 +1091,7 @@ static void mod2mfa_tab_init(void) f.meta_free = (HMFREE_FUN) erts_free; f.meta_print = (HMPRINT_FUN) erts_print; - hash_init(ERTS_ALC_T_HIPE, &mod2mfa_tab, "mod2mfa_tab", 50, f); + hash_init(ERTS_ALC_T_HIPE_LL, &mod2mfa_tab, "mod2mfa_tab", 50, f); } static struct hipe_mfa_info* mod2mfa_get(Module* modp) @@ -1172,7 +1169,7 @@ struct hipe_mfa_info* mod2mfa_get_safe(Module* modp) static struct hipe_mfa_info **hipe_mfa_info_table_alloc_bucket(unsigned int size) { unsigned long nbytes = size * sizeof(struct hipe_mfa_info*); - struct hipe_mfa_info **bucket = erts_alloc(ERTS_ALC_T_HIPE, nbytes); + struct hipe_mfa_info **bucket = erts_alloc(ERTS_ALC_T_HIPE_LL, nbytes); sys_memzero(bucket, nbytes); return bucket; } @@ -1201,14 +1198,14 @@ static void hipe_mfa_info_table_grow(void) b = next; } } - erts_free(ERTS_ALC_T_HIPE, old_bucket); + erts_free(ERTS_ALC_T_HIPE_LL, old_bucket); } static struct hipe_mfa_info *hipe_mfa_info_table_alloc(Eterm m, Eterm f, unsigned int arity) { struct hipe_mfa_info *res; - res = (struct hipe_mfa_info*)erts_alloc(ERTS_ALC_T_HIPE, sizeof(*res)); + res = (struct hipe_mfa_info*)erts_alloc(ERTS_ALC_T_HIPE_LL, sizeof(*res)); res->m = m; res->f = f; res->a = arity; @@ -1547,7 +1544,7 @@ BIF_RETTYPE hipe_bifs_add_ref_2(BIF_ALIST_2) hipe_mfa_info_table_rwlock(); callee_mfa = hipe_mfa_info_table_put_rwlocked(callee.mod, callee.fun, callee.ari); - ref = erts_alloc(ERTS_ALC_T_HIPE, sizeof(struct hipe_ref)); + ref = erts_alloc(ERTS_ALC_T_HIPE_LL, sizeof(struct hipe_ref)); ref->address = address; #if defined(__arm__) || defined(__powerpc__) || defined(__ppc__) || defined(__powerpc64__) ref->trampoline = trampoline; @@ -1618,7 +1615,7 @@ static void purge_mfa(struct hipe_mfa_info* p) ASSERT(p->is_stub); remove_mfa_info(p); hipe_free_native_stub(p->remote_address); - erts_free(ERTS_ALC_T_HIPE, p); + erts_free(ERTS_ALC_T_HIPE_LL, p); } /* Called by init:restart after unloading all hipe compiled modules @@ -1643,7 +1640,7 @@ static void hipe_purge_all_refs(void) bucket[i] = mfa->bucket.next; hash_erase(&mod2mfa_tab, mfa); - erts_free(ERTS_ALC_T_HIPE, mfa); + erts_free(ERTS_ALC_T_HIPE_LL, mfa); } } hipe_mfa_info_table.used = 0; @@ -1717,7 +1714,7 @@ void hipe_purge_refs(struct hipe_ref* first_ref, Eterm caller_module, } ref = ref->next_from_modi; - erts_free(ERTS_ALC_T_HIPE, free_ref); + erts_free(ERTS_ALC_T_HIPE_LL, free_ref); } } diff --git a/erts/emulator/hipe/hipe_bif1.c b/erts/emulator/hipe/hipe_bif1.c index 0c66eb6abe..0ba0fa5172 100644 --- a/erts/emulator/hipe/hipe_bif1.c +++ b/erts/emulator/hipe/hipe_bif1.c @@ -50,7 +50,7 @@ BIF_RETTYPE hipe_bifs_call_count_on_1(BIF_ALIST_1) BIF_ERROR(BIF_P, BADARG); if (pc[0] == BeamOpCode(op_hipe_call_count)) BIF_RET(NIL); - hcc = erts_alloc(ERTS_ALC_T_HIPE, sizeof(*hcc)); + hcc = erts_alloc(ERTS_ALC_T_HIPE_SL, sizeof(*hcc)); hcc->count = 0; hcc->opcode = pc[0]; pc[-4] = (Eterm)hcc; @@ -74,7 +74,7 @@ BIF_RETTYPE hipe_bifs_call_count_off_1(BIF_ALIST_1) count = hcc->count; pc[0] = hcc->opcode; pc[-4] = (Eterm)NULL; - erts_free(ERTS_ALC_T_HIPE, hcc); + erts_free(ERTS_ALC_T_HIPE_SL, hcc); BIF_RET(make_small(count)); } diff --git a/erts/emulator/hipe/hipe_load.c b/erts/emulator/hipe/hipe_load.c index 87c5004d2b..0b53880628 100644 --- a/erts/emulator/hipe/hipe_load.c +++ b/erts/emulator/hipe/hipe_load.c @@ -45,7 +45,7 @@ void hipe_free_loader_state(HipeLoaderState *stp) stp->text_segment_size = 0; if (stp->data_segment) - erts_free(ERTS_ALC_T_HIPE, stp->data_segment); + erts_free(ERTS_ALC_T_HIPE_LL, stp->data_segment); stp->data_segment = NULL; stp->data_segment_size = 0; diff --git a/erts/emulator/hipe/hipe_mode_switch.c b/erts/emulator/hipe/hipe_mode_switch.c index f11223d8b0..712f65f629 100644 --- a/erts/emulator/hipe/hipe_mode_switch.c +++ b/erts/emulator/hipe/hipe_mode_switch.c @@ -664,7 +664,7 @@ void hipe_inc_nstack(Process *p) { unsigned old_size = p->hipe.nstend - p->hipe.nstack; unsigned new_size = hipe_next_nstack_size(old_size); - Eterm *new_nstack = erts_alloc(ERTS_ALC_T_HIPE, new_size*sizeof(Eterm)); + Eterm *new_nstack = erts_alloc(ERTS_ALC_T_HIPE_STK, new_size*sizeof(Eterm)); unsigned used_size = p->hipe.nstend - p->hipe.nsp; sys_memcpy(new_nstack+new_size-used_size, p->hipe.nsp, used_size*sizeof(Eterm)); @@ -673,7 +673,7 @@ void hipe_inc_nstack(Process *p) if (p->hipe.nstblacklim) p->hipe.nstblacklim = new_nstack + new_size - (p->hipe.nstend - p->hipe.nstblacklim); if (p->hipe.nstack) - erts_free(ERTS_ALC_T_HIPE, p->hipe.nstack); + erts_free(ERTS_ALC_T_HIPE_STK, p->hipe.nstack); p->hipe.nstack = new_nstack; p->hipe.nstend = new_nstack + new_size; p->hipe.nsp = new_nstack + new_size - used_size; @@ -683,7 +683,7 @@ void hipe_inc_nstack(Process *p) void hipe_empty_nstack(Process *p) { if (p->hipe.nstack) { - erts_free(ERTS_ALC_T_HIPE, p->hipe.nstack); + erts_free(ERTS_ALC_T_HIPE_STK, p->hipe.nstack); } p->hipe.nstgraylim = NULL; p->hipe.nsp = NULL; diff --git a/erts/emulator/hipe/hipe_module.c b/erts/emulator/hipe/hipe_module.c index 469f077dd2..2e99a30556 100644 --- a/erts/emulator/hipe/hipe_module.c +++ b/erts/emulator/hipe/hipe_module.c @@ -29,7 +29,7 @@ void hipe_free_module(HipeModule *mod) { hipe_free_code(mod->text_segment, mod->text_segment_size); if (mod->data_segment) /* Some modules lack data segments */ - erts_free(ERTS_ALC_T_HIPE, mod->data_segment); + erts_free(ERTS_ALC_T_HIPE_LL, mod->data_segment); - erts_free(ERTS_ALC_T_HIPE, mod); + erts_free(ERTS_ALC_T_HIPE_LL, mod); } diff --git a/erts/emulator/hipe/hipe_process.h b/erts/emulator/hipe/hipe_process.h index a8d5972280..36b6ffc021 100644 --- a/erts/emulator/hipe/hipe_process.h +++ b/erts/emulator/hipe/hipe_process.h @@ -79,7 +79,7 @@ static __inline__ void hipe_init_process(struct hipe_process_state *p) static __inline__ void hipe_delete_process(struct hipe_process_state *p) { if (p->nstack) - erts_free(ERTS_ALC_T_HIPE, (void*)p->nstack); + erts_free(ERTS_ALC_T_HIPE_STK, (void*)p->nstack); } #ifdef ERTS_SMP diff --git a/erts/emulator/hipe/hipe_stack.c b/erts/emulator/hipe/hipe_stack.c index b80e44bc37..d0f0407489 100644 --- a/erts/emulator/hipe/hipe_stack.c +++ b/erts/emulator/hipe/hipe_stack.c @@ -46,7 +46,7 @@ struct hipe_sdesc_table hipe_sdesc_table; static struct hipe_sdesc **alloc_bucket(unsigned int size) { unsigned long nbytes = size * sizeof(struct hipe_sdesc*); - struct hipe_sdesc **bucket = erts_alloc(ERTS_ALC_T_HIPE, nbytes); + struct hipe_sdesc **bucket = erts_alloc(ERTS_ALC_T_HIPE_LL, nbytes); sys_memzero(bucket, nbytes); return bucket; } @@ -75,7 +75,7 @@ static void hipe_grow_sdesc_table(void) b = next; } } - erts_free(ERTS_ALC_T_HIPE, old_bucket); + erts_free(ERTS_ALC_T_HIPE_LL, old_bucket); } struct hipe_sdesc *hipe_put_sdesc(struct hipe_sdesc *sdesc) @@ -121,7 +121,7 @@ void hipe_destruct_sdesc(struct hipe_sdesc *sdesc) free_me = ErtsContainerStruct(sdesc, struct hipe_sdesc_with_exnra, sdesc); else free_me = sdesc; - erts_free(ERTS_ALC_T_HIPE, free_me); + erts_free(ERTS_ALC_T_HIPE_LL, free_me); } void hipe_init_sdesc_table(struct hipe_sdesc *sdesc) @@ -199,7 +199,7 @@ struct hipe_sdesc *hipe_decode_sdesc(Eterm arg) ? offsetof(struct hipe_sdesc_with_exnra, sdesc.livebits) : offsetof(struct hipe_sdesc, livebits)) + livebitswords * sizeof(int); - p = erts_alloc(ERTS_ALC_T_HIPE, sdescbytes); + p = erts_alloc(ERTS_ALC_T_HIPE_LL, sdescbytes); /* If we have an exception handler use the special sdesc_with_exnra structure. */ if (exnra) { diff --git a/erts/emulator/hipe/hipe_x86_signal.c b/erts/emulator/hipe/hipe_x86_signal.c index 1a34ce786c..b24b9148a2 100644 --- a/erts/emulator/hipe/hipe_x86_signal.c +++ b/erts/emulator/hipe/hipe_x86_signal.c @@ -267,7 +267,7 @@ void hipe_thread_signal_init(void) { /* Stack don't really need to be cache aligned. We use it to suppress false leak report from valgrind */ - hipe_sigaltstack(erts_alloc_permanent_cache_aligned(ERTS_ALC_T_HIPE, SIGSTKSZ)); + hipe_sigaltstack(erts_alloc_permanent_cache_aligned(ERTS_ALC_T_HIPE_LL, SIGSTKSZ)); } #endif diff --git a/erts/emulator/sys/common/erl_check_io.c b/erts/emulator/sys/common/erl_check_io.c index 1ef9fa2a05..00c70268df 100644 --- a/erts/emulator/sys/common/erl_check_io.c +++ b/erts/emulator/sys/common/erl_check_io.c @@ -2169,6 +2169,12 @@ ERTS_CIO_EXPORT(erts_check_io)(int do_wait) erts_do_break_handling(); #endif +#ifdef ERTS_SIGNAL_STATE /* ifndef ERTS_SMP */ + if (ERTS_SIGNAL_STATE) { + erts_handle_signal_state(); + } +#endif + /* Figure out timeout value */ timeout_time = (do_wait ? erts_check_next_timeout_time(esdp) @@ -2207,6 +2213,14 @@ ERTS_CIO_EXPORT(erts_check_io)(int do_wait) erts_do_break_handling(); #endif + +#ifdef ERTS_SIGNAL_STATE /* ifndef ERTS_SMP */ + if (ERTS_SIGNAL_STATE) { + erts_handle_signal_state(); + } +#endif + + if (poll_ret != 0) { erts_smp_atomic_set_nob(&pollset.in_poll_wait, 0); forget_removed(&pollset); diff --git a/erts/emulator/sys/unix/sys.c b/erts/emulator/sys/unix/sys.c index 0d1ed17449..b48b3f8804 100644 --- a/erts/emulator/sys/unix/sys.c +++ b/erts/emulator/sys/unix/sys.c @@ -139,6 +139,28 @@ volatile int erts_break_requested = 0; #define ERTS_SET_BREAK_REQUESTED (erts_break_requested = 1) #define ERTS_UNSET_BREAK_REQUESTED (erts_break_requested = 0) #endif + +#ifndef ERTS_SMP +static Eterm signalstate_sigterm[] = { + am_sigint, /* 0 */ + am_sighup, /* 1 */ + am_sigquit, /* 2 */ + am_sigabrt, /* 3 */ + am_sigalrm, /* 4 */ + am_sigterm, /* 5 */ + am_sigusr1, /* 6 */ + am_sigusr2, /* 7 */ + am_sigchld, /* 8 */ + am_sigstop, /* 9 */ + am_sigtstp /* 10 */ +}; + +volatile Uint erts_signal_state = 0; +#define ERTS_SET_SIGNAL_STATE(S) (erts_signal_state |= signum_to_signalstate(S)) +#define ERTS_CLEAR_SIGNAL_STATE (erts_signal_state = 0) +static ERTS_INLINE Uint signum_to_signalstate(int signum); +#endif + /* set early so the break handler has access to initial mode */ static struct termios initial_tty_mode; static int replace_intr = 0; @@ -770,13 +792,34 @@ signum_to_signalterm(int signum) } } +#ifndef ERTS_SMP +static ERTS_INLINE Uint +signum_to_signalstate(int signum) +{ + switch (signum) { + case SIGINT: return (1 << 0); + case SIGHUP: return (1 << 1); + case SIGQUIT: return (1 << 2); + case SIGABRT: return (1 << 3); + case SIGALRM: return (1 << 4); + case SIGTERM: return (1 << 5); + case SIGUSR1: return (1 << 6); + case SIGUSR2: return (1 << 7); + case SIGCHLD: return (1 << 8); + case SIGSTOP: return (1 << 9); + case SIGTSTP: return (1 << 10); + default: return 0; + } +} +#endif + static RETSIGTYPE generic_signal_handler(int signum) { #ifdef ERTS_SMP smp_sig_notify(signum); #else - Eterm signal = signum_to_signalterm(signum); - signal_notify_requested(signal); + ERTS_SET_SIGNAL_STATE(signum); + ERTS_CHK_IO_AS_INTR(); /* Make sure we don't sleep in poll */ #endif } @@ -962,6 +1005,23 @@ void erts_do_break_handling(void) erts_smp_thr_progress_unblock(); } +#ifdef ERTS_SIGNAL_STATE +void erts_handle_signal_state(void) { + Uint signal_state = ERTS_SIGNAL_STATE; + Uint i = 0; + + ERTS_CLEAR_SIGNAL_STATE; + + while (signal_state) { + if (signal_state & 0x1) { + signal_notify_requested(signalstate_sigterm[i]); + } + i++; + signal_state = signal_state >> 1; + } +} +#endif + /* Fills in the systems representation of the jam/beam process identifier. ** The Pid is put in STRING representation in the supplied buffer, ** no interpretatione of this should be done by the rest of the @@ -1274,8 +1334,8 @@ signal_dispatcher_thread_func(void *unused) do { res = read(sig_notify_fds[0], (void *) &sb.buf[i], sizeof(int) - i); - i += res; - } while ((i != sizeof(int) && res >= 0) || (res < 0 && errno == EINTR)); + i += res > 0 ? res : 0; + } while ((i < sizeof(int) && res >= 0) || (res < 0 && errno == EINTR)); if (res < 0) { erts_exit(ERTS_ABORT_EXIT, diff --git a/erts/emulator/test/os_signal_SUITE.erl b/erts/emulator/test/os_signal_SUITE.erl index 9aa49a453e..6bafb0e18c 100644 --- a/erts/emulator/test/os_signal_SUITE.erl +++ b/erts/emulator/test/os_signal_SUITE.erl @@ -323,7 +323,10 @@ kill(Signal, Pid) -> load_nif(Config) -> Path = proplists:get_value(data_dir, Config), - ok = erlang:load_nif(filename:join(Path,"os_signal_nif"), 0). + case erlang:load_nif(filename:join(Path,"os_signal_nif"), 0) of + ok -> ok; + {error,{reload,_}} -> ok + end. run(Program0, Args) -> run(".", Program0, Args). run(Cwd, Program0, Args) when is_list(Cwd) -> diff --git a/erts/epmd/src/epmd_cli.c b/erts/epmd/src/epmd_cli.c index 6fd27d46ea..b10b331cb5 100644 --- a/erts/epmd/src/epmd_cli.c +++ b/erts/epmd/src/epmd_cli.c @@ -46,10 +46,11 @@ void kill_epmd(EpmdVars *g) if ((rval = read_fill(fd,buf,2)) == 2) { if (buf[0] == 'O' && buf[1] == 'K') { printf("Killed\n"); + epmd_cleanup_exit(g,0); } else { printf("Killing not allowed - living nodes in database.\n"); + epmd_cleanup_exit(g,1); } - epmd_cleanup_exit(g,0); } else if (rval < 0) { printf("epmd: failed to read answer from local epmd\n"); epmd_cleanup_exit(g,1); diff --git a/erts/etc/common/Makefile.in b/erts/etc/common/Makefile.in index cb053a1b7c..d3af634729 100644 --- a/erts/etc/common/Makefile.in +++ b/erts/etc/common/Makefile.in @@ -143,7 +143,7 @@ MC_OUTPUTS=$(OBJDIR)/erlsrv_logmess.h $(OBJDIR)/erlsrv_logmess.res MT_FLAG="-MD" endif INET_GETHOST = $(BINDIR)/inet_gethost.exe -INSTALL_EMBEDDED_PROGS += $(BINDIR)/typer.exe $(BINDIR)/dialyzer.exe $(BINDIR)/erlc.exe $(BINDIR)/start_erl.exe $(BINDIR)/escript.exe $(BINDIR)/ct_run.exe +INSTALL_EMBEDDED_PROGS += $(BINDIR)/dialyzer.exe $(BINDIR)/erlc.exe $(BINDIR)/start_erl.exe $(BINDIR)/escript.exe $(BINDIR)/ct_run.exe INSTALL_SRC = $(WINETC)/start_erl.c $(WINETC)/Nmakefile.start_erl ERLEXECDIR=. INSTALL_LIBS = @@ -176,7 +176,7 @@ ENTRY_OBJ= ERLSRV_OBJECTS= MC_OUTPUTS= INET_GETHOST = $(BINDIR)/inet_gethost@EXEEXT@ -INSTALL_EMBEDDED_PROGS += $(BINDIR)/typer@EXEEXT@ $(BINDIR)/dialyzer@EXEEXT@ \ +INSTALL_EMBEDDED_PROGS += $(BINDIR)/dialyzer@EXEEXT@ \ $(BINDIR)/erlc@EXEEXT@ $(BINDIR)/escript@EXEEXT@ $(BINDIR)/ct_run@EXEEXT@ \ $(BINDIR)/run_erl $(BINDIR)/to_erl $(BINDIR)/dyn_erl INSTALL_EMBEDDED_DATA = $(UXETC)/start.src $(UXETC)/start_erl.src @@ -242,7 +242,6 @@ endif rm -f $(ERL_TOP)/erts/obj*/$(TARGET)/safe_string.o rm -f $(ERL_TOP)/erts/obj*/$(TARGET)/run_erl.o rm -f $(ERL_TOP)/erts/obj*/$(TARGET)/to_erl.o - rm -f $(ERL_TOP)/erts/obj*/$(TARGET)/typer.o rm -f $(ERL_TOP)/erts/obj*/$(TARGET)/ct_run.o rm -f $(ERL_TOP)/erts/obj*/$(TARGET)/vxcall.o rm -f $(ERL_TOP)/erts/obj*/$(TARGET)/erl.o @@ -433,12 +432,6 @@ $(BINDIR)/dialyzer@EXEEXT@: $(OBJDIR)/dialyzer.o $(ERTS_LIB) $(OBJDIR)/dialyzer.o: dialyzer.c $(RC_GENERATED) $(V_CC) $(CFLAGS) -o $@ -c dialyzer.c -$(BINDIR)/typer@EXEEXT@: $(OBJDIR)/typer.o $(ERTS_LIB) - $(ld_verbose)$(PURIFY) $(LD) $(LDFLAGS) -o $@ $(OBJDIR)/typer.o -L$(OBJDIR) $(LIBS) $(ERTS_INTERNAL_LIBS) - -$(OBJDIR)/typer.o: typer.c $(RC_GENERATED) - $(V_CC) $(CFLAGS) -o $@ -c typer.c - $(BINDIR)/escript@EXEEXT@: $(OBJDIR)/escript.o $(ERTS_LIB) $(ld_verbose)$(PURIFY) $(LD) $(LDFLAGS) -o $@ $(OBJDIR)/escript.o -L$(OBJDIR) $(LIBS) $(ERTS_INTERNAL_LIBS) diff --git a/erts/etc/common/erlexec.c b/erts/etc/common/erlexec.c index 2b2e0e480a..ee59759940 100644 --- a/erts/etc/common/erlexec.c +++ b/erts/etc/common/erlexec.c @@ -1044,7 +1044,7 @@ int main(int argc, char **argv) start_epmd(epmd_prog); #if (! defined(__WIN32__)) && defined(DEBUG) - if (start_detached) { + if (start_detached && get_env("ERL_CONSOLE_MODE")) { /* Start the emulator within an xterm. * Move up all arguments and insert * "xterm -e " first. diff --git a/erts/etc/common/typer.c b/erts/etc/common/typer.c deleted file mode 100644 index 77a95ccded..0000000000 --- a/erts/etc/common/typer.c +++ /dev/null @@ -1,455 +0,0 @@ -/* - * %CopyrightBegin% - * - * Copyright Ericsson AB 2006-2016. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - * %CopyrightEnd% - */ -/* - * Purpose: Typer front-end. - */ -#ifdef HAVE_CONFIG_H -# include "config.h" -#endif - -#include "sys.h" -#ifdef __WIN32__ -#include <winbase.h> -#endif - -#include <ctype.h> - -#define NO 0 -#define YES 1 - -#define ASIZE(a) (sizeof(a)/sizeof(a[0])) - -static int debug = 0; /* Bit flags for debug printouts. */ - -static char** eargv_base; /* Base of vector. */ -static char** eargv; /* First argument for erl. */ - -static int eargc; /* Number of arguments in eargv. */ - -#ifdef __WIN32__ -# define QUOTE(s) possibly_quote(s) -# define IS_DIRSEP(c) ((c) == '/' || (c) == '\\') -# define ERL_NAME "erl.exe" -#else -# define QUOTE(s) s -# define IS_DIRSEP(c) ((c) == '/') -# define ERL_NAME "erl" -#endif - -#define UNSHIFT(s) eargc++, eargv--; eargv[0] = QUOTE(s) -#define PUSH(s) eargv[eargc++] = QUOTE(s) -#define PUSH2(s, t) PUSH(s); PUSH(t) -#define PUSH3(s, t, u) PUSH2(s, t); PUSH(u) - -/* - * Local functions. - */ - -static void error(char* format, ...); -static void* emalloc(size_t size); -static char* strsave(char* string); -static void push_words(char* src); -static int run_erlang(char* name, char** argv); -static char* get_default_emulator(char* progname); -#ifdef __WIN32__ -static char* possibly_quote(char* arg); -static void* erealloc(void *p, size_t size); -#endif - -/* - * Supply a strerror() function if libc doesn't. - */ -#ifndef HAVE_STRERROR - -extern int sys_nerr; - -#ifndef SYS_ERRLIST_DECLARED -extern const char * const sys_errlist[]; -#endif /* !SYS_ERRLIST_DECLARED */ - -char *strerror(int errnum) -{ - static char *emsg[1024]; - - if (errnum != 0) { - if (errnum > 0 && errnum < sys_nerr) - sprintf((char *) &emsg[0], "(%s)", sys_errlist[errnum]); - else - sprintf((char *) &emsg[0], "errnum = %d ", errnum); - } - else { - emsg[0] = '\0'; - } - return (char *) &emsg[0]; -} -#endif /* !HAVE_STRERROR */ - -#ifdef __WIN32__ -int wmain(int argc, wchar_t **wcargv) -{ - char** argv; -#else -int -main(int argc, char** argv) -{ -#endif - int eargv_size; - int eargc_base; /* How many arguments in the base of eargv. */ - char* emulator; - int need_shell = 0; - -#ifdef __WIN32__ - int i; - int len; - /* Convert argv to utf8 */ - argv = emalloc((argc+1) * sizeof(char*)); - for (i=0; i<argc; i++) { - len = WideCharToMultiByte(CP_UTF8, 0, wcargv[i], -1, NULL, 0, NULL, NULL); - argv[i] = emalloc(len*sizeof(char)); - WideCharToMultiByte(CP_UTF8, 0, wcargv[i], -1, argv[i], len, NULL, NULL); - } - argv[argc] = NULL; -#endif - - emulator = get_default_emulator(argv[0]); - - /* - * Allocate the argv vector to be used for arguments to Erlang. - * Arrange for starting to pushing information in the middle of - * the array, to allow easy addition of commands in the beginning. - */ - - eargv_size = argc*4+100; - eargv_base = (char **) emalloc(eargv_size*sizeof(char*)); - eargv = eargv_base; - eargc = 0; - push_words(emulator); - eargc_base = eargc; - eargv = eargv + eargv_size/2; - eargc = 0; - - /* - * Push initial arguments. - */ - - if (argc > 1 && strcmp(argv[1], "-smp") == 0) { - PUSH("-smpauto"); - argc--, argv++; - } - - PUSH("+B"); - PUSH2("-boot", "start_clean"); - PUSH3("-run", "typer", "start"); - PUSH("-extra"); - - /* - * Push everything except --shell. - */ - - while (argc > 1) { - if (strcmp(argv[1], "--shell") == 0) { - need_shell = 1; - } else { - PUSH(argv[1]); - } - argc--, argv++; - } - - if (!need_shell) { - UNSHIFT("-noinput"); - } - - /* - * Move up the commands for invoking the emulator and adjust eargv - * accordingly. - */ - - while (--eargc_base >= 0) { - UNSHIFT(eargv_base[eargc_base]); - } - - /* - * Invoke Erlang with the collected options. - */ - - PUSH(NULL); - return run_erlang(eargv[0], eargv); -} - -static void -push_words(char* src) -{ - char sbuf[MAXPATHLEN]; - char* dst; - - dst = sbuf; - while ((*dst++ = *src++) != '\0') { - if (isspace((int)*src)) { - *dst = '\0'; - PUSH(strsave(sbuf)); - dst = sbuf; - do { - src++; - } while (isspace((int)*src)); - } - } - if (sbuf[0]) - PUSH(strsave(sbuf)); -} -#ifdef __WIN32__ -wchar_t *make_commandline(char **argv) -{ - static wchar_t *buff = NULL; - static int siz = 0; - int num = 0, len; - char **arg; - wchar_t *p; - - if (*argv == NULL) { - return L""; - } - for (arg = argv; *arg != NULL; ++arg) { - num += strlen(*arg)+1; - } - if (!siz) { - siz = num; - buff = (wchar_t *) emalloc(siz*sizeof(wchar_t)); - } else if (siz < num) { - siz = num; - buff = (wchar_t *) erealloc(buff,siz*sizeof(wchar_t)); - } - p = buff; - num=0; - for (arg = argv; *arg != NULL; ++arg) { - len = MultiByteToWideChar(CP_UTF8, 0, *arg, -1, p, siz); - p+=(len-1); - *p++=L' '; - } - *(--p) = L'\0'; - - if (debug) { - printf("Processed command line:%S\n",buff); - } - return buff; -} - -int my_spawnvp(char **argv) -{ - STARTUPINFOW siStartInfo; - PROCESS_INFORMATION piProcInfo; - DWORD ec; - - memset(&siStartInfo,0,sizeof(STARTUPINFOW)); - siStartInfo.cb = sizeof(STARTUPINFOW); - siStartInfo.dwFlags = STARTF_USESTDHANDLES; - siStartInfo.hStdInput = GetStdHandle(STD_INPUT_HANDLE); - siStartInfo.hStdOutput = GetStdHandle(STD_OUTPUT_HANDLE); - siStartInfo.hStdError = GetStdHandle(STD_ERROR_HANDLE); - - if (!CreateProcessW(NULL, - make_commandline(argv), - NULL, - NULL, - TRUE, - 0, - NULL, - NULL, - &siStartInfo, - &piProcInfo)) { - return -1; - } - CloseHandle(piProcInfo.hThread); - - WaitForSingleObject(piProcInfo.hProcess,INFINITE); - if (!GetExitCodeProcess(piProcInfo.hProcess,&ec)) { - return 0; - } - return (int) ec; -} -#endif /* __WIN32__ */ - - -static int -run_erlang(char* progname, char** argv) -{ -#ifdef __WIN32__ - int status; -#endif - - if (debug) { - int i = 0; - while (argv[i] != NULL) - printf(" %s", argv[i++]); - printf("\n"); - } - -#ifdef __WIN32__ - /* - * Alas, we must wait here for the program to finish. - * Otherwise, the shell from which we were executed will think - * we are finished and print a prompt and read keyboard input. - */ - - status = my_spawnvp(argv)/*_spawnvp(_P_WAIT,progname,argv)*/; - if (status == -1) { - fprintf(stderr, "typer: Error executing '%s': %d", progname, - GetLastError()); - } - return status; -#else - execvp(progname, argv); - error("Error %d executing \'%s\'.", errno, progname); - return 2; -#endif -} - -static void -error(char* format, ...) -{ - char sbuf[1024]; - va_list ap; - - va_start(ap, format); - erts_vsnprintf(sbuf, sizeof(sbuf), format, ap); - va_end(ap); - fprintf(stderr, "typer: %s\n", sbuf); - exit(1); -} - -static void* -emalloc(size_t size) -{ - void *p = malloc(size); - if (p == NULL) - error("Insufficient memory"); - return p; -} - -#ifdef __WIN32__ -static void * -erealloc(void *p, size_t size) -{ - void *res = realloc(p, size); - if (res == NULL) - error("Insufficient memory"); - return res; -} -#endif - -static char* -strsave(char* string) -{ - char* p = emalloc(strlen(string)+1); - strcpy(p, string); - return p; -} - -static int -file_exists(char *progname) -{ -#ifdef __WIN32__ - wchar_t wcsbuf[MAXPATHLEN]; - MultiByteToWideChar(CP_UTF8, 0, progname, -1, wcsbuf, MAXPATHLEN); - return (_waccess(wcsbuf, 0) != -1); -#else - return (access(progname, 1) != -1); -#endif -} - -static char* -get_default_emulator(char* progname) -{ - char sbuf[MAXPATHLEN]; - char* s; - - if (strlen(progname) >= sizeof(sbuf)) - return ERL_NAME; - - strcpy(sbuf, progname); - for (s = sbuf+strlen(sbuf); s >= sbuf; s--) { - if (IS_DIRSEP(*s)) { - strcpy(s+1, ERL_NAME); - if(file_exists(sbuf)) - return strsave(sbuf); - break; - } - } - return ERL_NAME; -} - -#ifdef __WIN32__ -static char* -possibly_quote(char* arg) -{ - int mustQuote = NO; - int n = 0; - char* s; - char* narg; - - if (arg == NULL) { - return arg; - } - - /* - * Scan the string to find out if it needs quoting and return - * the original argument if not. - */ - - for (s = arg; *s; s++, n++) { - switch(*s) { - case ' ': - mustQuote = YES; - continue; - case '"': - mustQuote = YES; - n++; - continue; - case '\\': - if(s[1] == '"') - n++; - continue; - default: - continue; - } - } - if (!mustQuote) { - return arg; - } - - /* - * Insert the quotes and put a backslash in front of every quote - * inside the string. - */ - - s = narg = emalloc(n+2+1); - for (*s++ = '"'; *arg; arg++, s++) { - if (*arg == '"' || (*arg == '\\' && arg[1] == '"')) { - *s++ = '\\'; - } - *s = *arg; - } - if (s[-1] == '\\') { - *s++ ='\\'; - } - *s++ = '"'; - *s = '\0'; - return narg; -} -#endif /* __WIN32__ */ diff --git a/erts/etc/unix/Install.src b/erts/etc/unix/Install.src index e71308edbe..8be696b16f 100644 --- a/erts/etc/unix/Install.src +++ b/erts/etc/unix/Install.src @@ -89,7 +89,6 @@ cd "$ERL_ROOT/bin" cp -p "$ERL_ROOT/erts-%I_VSN%/bin/erl" . cp -p "$ERL_ROOT/erts-%I_VSN%/bin/erlc" . cp -p "$ERL_ROOT/erts-%I_VSN%/bin/dialyzer" . -cp -p "$ERL_ROOT/erts-%I_VSN%/bin/typer" . cp -p "$ERL_ROOT/erts-%I_VSN%/bin/ct_run" . cp -p "$ERL_ROOT/erts-%I_VSN%/bin/escript" . diff --git a/erts/etc/win32/Install.c b/erts/etc/win32/Install.c index 43930ff284..04522a0779 100644 --- a/erts/etc/win32/Install.c +++ b/erts/etc/win32/Install.c @@ -48,7 +48,7 @@ int wmain(int argc, wchar_t **argv) InitSection *ini_section; HANDLE module = GetModuleHandle(NULL); wchar_t *binaries[] = { L"erl.exe", L"werl.exe", L"erlc.exe", - L"dialyzer.exe", L"typer.exe", + L"dialyzer.exe", L"escript.exe", L"ct_run.exe", NULL }; wchar_t *scripts[] = { L"start_clean.boot", L"start_sasl.boot", L"no_dot_erlang.boot", NULL }; wchar_t fromname[MAX_PATH]; diff --git a/erts/preloaded/ebin/erlang.beam b/erts/preloaded/ebin/erlang.beam Binary files differindex 149d30d299..5d6f36b222 100644 --- a/erts/preloaded/ebin/erlang.beam +++ b/erts/preloaded/ebin/erlang.beam diff --git a/erts/preloaded/ebin/init.beam b/erts/preloaded/ebin/init.beam Binary files differindex fdd87ef739..92eedd73d8 100644 --- a/erts/preloaded/ebin/init.beam +++ b/erts/preloaded/ebin/init.beam diff --git a/erts/preloaded/src/erlang.erl b/erts/preloaded/src/erlang.erl index 2b0c9ff2af..888d2beee0 100644 --- a/erts/preloaded/src/erlang.erl +++ b/erts/preloaded/src/erlang.erl @@ -129,7 +129,7 @@ -export([list_to_atom/1, list_to_binary/1]). -export([list_to_bitstring/1, list_to_existing_atom/1, list_to_float/1]). -export([list_to_integer/1, list_to_integer/2]). --export([list_to_pid/1, list_to_tuple/1, loaded/0]). +-export([list_to_pid/1, list_to_ref/1, list_to_tuple/1, loaded/0]). -export([localtime/0, make_ref/0]). -export([map_size/1, match_spec_test/3, md5/1, md5_final/1]). -export([md5_init/0, md5_update/2, module_loaded/1, monitor/2]). @@ -1159,6 +1159,12 @@ list_to_integer(_String,_Base) -> String :: string(). list_to_pid(_String) -> erlang:nif_error(undefined). + +%% list_to_ref/1 +-spec erlang:list_to_ref(String) -> reference() when + String :: string(). +list_to_ref(_String) -> + erlang:nif_error(undefined). %% list_to_tuple/1 -spec list_to_tuple(List) -> tuple() when diff --git a/erts/preloaded/src/init.erl b/erts/preloaded/src/init.erl index 5c7cae3ebf..c12a5b159a 100644 --- a/erts/preloaded/src/init.erl +++ b/erts/preloaded/src/init.erl @@ -1,7 +1,7 @@ %% %% %CopyrightBegin% %% -%% Copyright Ericsson AB 1996-2016. All Rights Reserved. +%% Copyright Ericsson AB 1996-2017. All Rights Reserved. %% %% Licensed under the Apache License, Version 2.0 (the "License"); %% you may not use this file except in compliance with the License. @@ -1099,7 +1099,7 @@ start_it({eval,Bin}) -> {ok,Ts,_} = erl_scan:string(Str), Ts1 = case reverse(Ts) of [{dot,_}|_] -> Ts; - TsR -> reverse([{dot,1} | TsR]) + TsR -> reverse([{dot,erl_anno:new(1)} | TsR]) end, {ok,Expr} = erl_parse:parse_exprs(Ts1), {value, _Value, _Bs} = erl_eval:exprs(Expr, erl_eval:new_bindings()), diff --git a/erts/test/upgrade_SUITE.erl b/erts/test/upgrade_SUITE.erl index f93da8955f..086e54f8a4 100644 --- a/erts/test/upgrade_SUITE.erl +++ b/erts/test/upgrade_SUITE.erl @@ -37,10 +37,9 @@ %% In specific: %% - hipe does not support any upgrade at all %% - dialyzer requires hipe (in the .app file) -%% - typer requires hipe (in the .app file) %% - erl_interface, jinterface support no upgrade -define(appup_exclude, - [dialyzer,hipe,typer,erl_interface,jinterface,ose]). + [dialyzer,hipe,erl_interface,jinterface,ose]). init_per_suite(Config) -> %% Check that a real release is running, not e.g. cerl |