diff options
Diffstat (limited to 'erts/emulator')
67 files changed, 4877 insertions, 1498 deletions
diff --git a/erts/emulator/Makefile.in b/erts/emulator/Makefile.in index 054692819e..f84c124a14 100644 --- a/erts/emulator/Makefile.in +++ b/erts/emulator/Makefile.in @@ -633,8 +633,8 @@ GENERATE += $(TTF_DIR)/driver_tab.c # This list must be consistent with PRE_LOADED_MODULES in # erts/preloaded/src/Makefile. -PRELOAD_BEAM = $(ERL_TOP)/erts/preloaded/ebin/otp_ring0.beam \ - $(ERL_TOP)/erts/preloaded/ebin/erts_code_purger.beam \ +PRELOAD_BEAM = $(ERL_TOP)/erts/preloaded/ebin/erts_code_purger.beam \ + $(ERL_TOP)/erts/preloaded/ebin/erl_init.beam \ $(ERL_TOP)/erts/preloaded/ebin/init.beam \ $(ERL_TOP)/erts/preloaded/ebin/prim_buffer.beam \ $(ERL_TOP)/erts/preloaded/ebin/prim_eval.beam \ @@ -874,7 +874,7 @@ RUN_OBJS += \ $(OBJDIR)/erl_thr_queue.o $(OBJDIR)/erl_sched_spec_pre_alloc.o \ $(OBJDIR)/erl_ptab.o $(OBJDIR)/erl_map.o \ $(OBJDIR)/erl_msacc.o $(OBJDIR)/erl_lock_flags.o \ - $(OBJDIR)/erl_io_queue.o + $(OBJDIR)/erl_io_queue.o $(OBJDIR)/erl_db_catree.o LTTNG_OBJS = $(OBJDIR)/erlang_lttng.o NIF_OBJS = \ diff --git a/erts/emulator/beam/atom.names b/erts/emulator/beam/atom.names index 45b7540aeb..fb223d2a5d 100644 --- a/erts/emulator/beam/atom.names +++ b/erts/emulator/beam/atom.names @@ -208,7 +208,6 @@ atom dirty_nif_finalizer atom disable_trace atom disabled atom discard -atom display_items atom dist atom dist_cmd atom dist_ctrl_put_data @@ -237,6 +236,7 @@ atom eof atom eol atom Eq='=:=' atom Eqeq='==' +atom erl_init atom erl_tracer atom erlang atom erl_signal_server diff --git a/erts/emulator/beam/beam_debug.c b/erts/emulator/beam/beam_debug.c index 9633de2021..b33aab7eee 100644 --- a/erts/emulator/beam/beam_debug.c +++ b/erts/emulator/beam/beam_debug.c @@ -786,8 +786,8 @@ print_op(fmtfn_t to, void *to_arg, int op, int size, BeamInstr* addr) } } break; - case op_i_put_tuple_xI: - case op_i_put_tuple_yI: + case op_put_tuple2_xI: + case op_put_tuple2_yI: case op_new_map_dtI: case op_update_map_assoc_sdtI: case op_update_map_exact_jsdtI: diff --git a/erts/emulator/beam/beam_emu.c b/erts/emulator/beam/beam_emu.c index ab5920a67e..aa61a2d7f9 100644 --- a/erts/emulator/beam/beam_emu.c +++ b/erts/emulator/beam/beam_emu.c @@ -3061,12 +3061,14 @@ erts_gc_update_map_exact(Process* p, Eterm* reg, Uint live, Uint n, Eterm* new_p Uint need; flatmap_t *old_mp, *mp; Eterm res; + Eterm* old_hp; Eterm* hp; Eterm* E; Eterm* old_keys; Eterm* old_vals; Eterm new_key; Eterm map; + int changed = 0; n /= 2; /* Number of values to be updated */ ASSERT(n > 0); @@ -3133,6 +3135,7 @@ erts_gc_update_map_exact(Process* p, Eterm* reg, Uint live, Uint n, Eterm* new_p * Update map, keeping the old key tuple. */ + old_hp = p->htop; hp = p->htop; E = p->stop; @@ -3155,20 +3158,26 @@ erts_gc_update_map_exact(Process* p, Eterm* reg, Uint live, Uint n, Eterm* new_p /* Not same keys */ *hp++ = *old_vals; } else { - GET_TERM(new_p[1], *hp); - hp++; - n--; + GET_TERM(new_p[1], *hp); + if(*hp != *old_vals) changed = 1; + hp++; + n--; if (n == 0) { - /* - * All updates done. Copy remaining values - * and return the result. - */ - for (i++, old_vals++; i < num_old; i++) { - *hp++ = *old_vals++; - } - ASSERT(hp == p->htop + need); - p->htop = hp; - return res; + /* + * All updates done. Copy remaining values + * if any changed or return the original one. + */ + if(changed) { + for (i++, old_vals++; i < num_old; i++) { + *hp++ = *old_vals++; + } + ASSERT(hp == p->htop + need); + p->htop = hp; + return res; + } else { + p->htop = old_hp; + return map; + } } else { new_p += 2; GET_TERM(*new_p, new_key); diff --git a/erts/emulator/beam/beam_load.c b/erts/emulator/beam/beam_load.c index e61199a8fd..c28a5c57e6 100644 --- a/erts/emulator/beam/beam_load.c +++ b/erts/emulator/beam/beam_load.c @@ -2868,6 +2868,7 @@ load_code(LoaderState* stp) break; case op_bs_put_string_WW: case op_i_bs_match_string_xfWW: + case op_i_bs_match_string_yfWW: new_string_patch(stp, ci-1); break; @@ -4245,21 +4246,55 @@ gen_make_fun2(LoaderState* stp, GenOpArg idx) { ErlFunEntry* fe; GenOp* op; + Uint arity, num_free; if (idx.val >= stp->num_lambdas) { - stp->lambda_error = "missing or short chunk 'FunT'"; - fe = 0; + stp->lambda_error = "missing or short chunk 'FunT'"; + fe = 0; + num_free = 0; + arity = 0; } else { - fe = stp->lambdas[idx.val].fe; + fe = stp->lambdas[idx.val].fe; + num_free = stp->lambdas[idx.val].num_free; + arity = fe->arity; } NEW_GENOP(stp, op); - op->op = genop_i_make_fun_2; - op->arity = 2; - op->a[0].type = TAG_u; - op->a[0].val = (BeamInstr) fe; - op->a[1].type = TAG_u; - op->a[1].val = stp->lambdas[idx.val].num_free; + + /* + * It's possible this is called before init process is started, + * skip the optimisation in such case. + */ + if (num_free == 0 && erts_init_process_id != ERTS_INVALID_PID) { + Uint lit; + Eterm* hp; + ErlFunThing* funp; + + lit = new_literal(stp, &hp, ERL_FUN_SIZE); + funp = (ErlFunThing *) hp; + erts_refc_inc(&fe->refc, 2); + funp->thing_word = HEADER_FUN; + funp->next = NULL; + funp->fe = fe; + funp->num_free = 0; + funp->creator = erts_init_process_id; + funp->arity = arity; + + op->op = genop_move_2; + op->arity = 2; + op->a[0].type = TAG_q; + op->a[0].val = lit; + op->a[1].type = TAG_x; + op->a[1].val = 0; + } else { + op->op = genop_i_make_fun_2; + op->arity = 2; + op->a[0].type = TAG_u; + op->a[0].val = (BeamInstr) fe; + op->a[1].type = TAG_u; + op->a[1].val = num_free; + } + op->next = NULL; return op; } diff --git a/erts/emulator/beam/bif.c b/erts/emulator/beam/bif.c index f18af8bcd7..3b45f968cf 100644 --- a/erts/emulator/beam/bif.c +++ b/erts/emulator/beam/bif.c @@ -218,11 +218,8 @@ BIF_RETTYPE link_1(BIF_ALIST_1) * We have (pending) connection. * Setup link and enqueue link signal. */ -#ifdef DEBUG - int inserted = -#endif - erts_link_dist_insert(&ldp->b, dep->mld); - ASSERT(inserted); + int inserted = erts_link_dist_insert(&ldp->b, dep->mld); + ASSERT(inserted); (void)inserted; erts_de_runlock(dep); code = erts_dsig_send_link(&dsd, BIF_P->common.id, BIF_ARG_1); @@ -567,12 +564,8 @@ BIF_RETTYPE monitor_2(BIF_ALIST_2) case ERTS_DSIG_PREP_PENDING: case ERTS_DSIG_PREP_CONNECTED: { -#ifdef DEBUG - int inserted = -#endif - - erts_monitor_dist_insert(&mdp->target, dep->mld); - ASSERT(inserted); + int inserted = erts_monitor_dist_insert(&mdp->target, dep->mld); + ASSERT(inserted); (void)inserted; erts_de_runlock(dep); code = erts_dsig_send_monitor(&dsd, BIF_P->common.id, target, ref); @@ -1803,6 +1796,7 @@ ebif_bang_2(BIF_ALIST_2) #define SEND_INTERNAL_ERROR (-6) #define SEND_AWAIT_RESULT (-7) #define SEND_YIELD_CONTINUE (-8) +#define SEND_SYSTEM_LIMIT (-9) static Sint remote_send(Process *p, DistEntry *dep, @@ -1842,6 +1836,8 @@ static Sint remote_send(Process *p, DistEntry *dep, res = SEND_YIELD_RETURN; else if (code == ERTS_DSIG_SEND_CONTINUE) res = SEND_YIELD_CONTINUE; + else if (code == ERTS_DSIG_SEND_TOO_LRG) + res = SEND_SYSTEM_LIMIT; else res = 0; break; @@ -2162,6 +2158,9 @@ BIF_RETTYPE send_3(BIF_ALIST_3) case SEND_BADARG: ERTS_BIF_PREP_ERROR(retval, p, BADARG); break; + case SEND_SYSTEM_LIMIT: + ERTS_BIF_PREP_ERROR(retval, p, SYSTEM_LIMIT); + break; case SEND_USER_ERROR: ERTS_BIF_PREP_ERROR(retval, p, EXC_ERROR); break; @@ -2218,6 +2217,10 @@ static BIF_RETTYPE dsend_continue_trap_1(BIF_ALIST_1) BUMP_ALL_REDS(BIF_P); BIF_TRAP1(&dsend_continue_trap_export, BIF_P, BIF_ARG_1); } + case ERTS_DSIG_SEND_TOO_LRG: { /*SEND_SYSTEM_LIMIT*/ + erts_set_gc_state(BIF_P, 1); + BIF_ERROR(BIF_P, SYSTEM_LIMIT); + } default: erts_exit(ERTS_ABORT_EXIT, "dsend_continue_trap invalid result %d\n", (int)result); break; @@ -2275,6 +2278,9 @@ Eterm erl_send(Process *p, Eterm to, Eterm msg) case SEND_BADARG: ERTS_BIF_PREP_ERROR(retval, p, BADARG); break; + case SEND_SYSTEM_LIMIT: + ERTS_BIF_PREP_ERROR(retval, p, SYSTEM_LIMIT); + break; case SEND_USER_ERROR: ERTS_BIF_PREP_ERROR(retval, p, EXC_ERROR); break; @@ -2732,9 +2738,7 @@ BIF_RETTYPE atom_to_list_1(BIF_ALIST_1) Uint num_chars, num_built, num_eaten; byte* err_pos; Eterm res; -#ifdef DEBUG int ares; -#endif if (is_not_atom(BIF_ARG_1)) BIF_ERROR(BIF_P, BADARG); @@ -2744,11 +2748,9 @@ BIF_RETTYPE atom_to_list_1(BIF_ALIST_1) if (ap->len == 0) BIF_RET(NIL); /* the empty atom */ -#ifdef DEBUG ares = -#endif erts_analyze_utf8(ap->name, ap->len, &err_pos, &num_chars, NULL); - ASSERT(ares == ERTS_UTF8_OK); + ASSERT(ares == ERTS_UTF8_OK); (void)ares; res = erts_utf8_to_list(BIF_P, num_chars, ap->name, ap->len, ap->len, &num_built, &num_eaten, NIL); @@ -4438,13 +4440,6 @@ BIF_RETTYPE system_flag_2(BIF_ALIST_2) erts_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN); BIF_RET(old_value); - } else if (BIF_ARG_1 == am_display_items) { - int oval = display_items; - if (!is_small(BIF_ARG_2) || (n = signed_val(BIF_ARG_2)) < 0) { - goto error; - } - display_items = n < 32 ? 32 : n; - BIF_RET(make_small(oval)); } else if (BIF_ARG_1 == am_debug_flags) { BIF_RET(am_true); } else if (BIF_ARG_1 == am_backtrace_depth) { @@ -5147,17 +5142,11 @@ BIF_RETTYPE send_to_logger_2(BIF_ALIST_2) else if (len == 0) buf = ""; else { -#ifdef DEBUG ErlDrvSizeT len2; -#endif buf = (byte *) erts_alloc(ERTS_ALC_T_TMP, len+1); -#ifdef DEBUG len2 = -#else - (void) -#endif erts_iolist_to_buf(BIF_ARG_2, buf, len); - ASSERT(len2 == len); + ASSERT(len2 == len); (void)len2; buf[len] = '\0'; switch (BIF_ARG_1) { case am_info: diff --git a/erts/emulator/beam/bif.tab b/erts/emulator/beam/bif.tab index 7548924178..a770524221 100644 --- a/erts/emulator/beam/bif.tab +++ b/erts/emulator/beam/bif.tab @@ -629,7 +629,6 @@ bif maps:from_list/1 bif maps:is_key/2 bif maps:keys/1 bif maps:merge/2 -bif maps:new/0 bif maps:put/3 bif maps:remove/2 bif maps:update/3 diff --git a/erts/emulator/beam/big.h b/erts/emulator/beam/big.h index 7556205063..a1ad75708c 100644 --- a/erts/emulator/beam/big.h +++ b/erts/emulator/beam/big.h @@ -42,7 +42,7 @@ typedef Uint16 ErtsHalfDigit; #undef BIG_HAVE_DOUBLE_DIGIT typedef Uint32 ErtsHalfDigit; #else -#error "can not determine machine size" +#error "cannot determine machine size" #endif typedef Uint dsize_t; /* Vector size type */ diff --git a/erts/emulator/beam/break.c b/erts/emulator/beam/break.c index 9ff52c92b8..81531f6cc8 100644 --- a/erts/emulator/beam/break.c +++ b/erts/emulator/beam/break.c @@ -108,6 +108,7 @@ process_killer(void) erts_exit(0, ""); switch(j) { case 'k': + ASSERT(erts_init_process_id != ERTS_INVALID_PID); /* Send a 'kill' exit signal from init process */ erts_proc_sig_send_exit(NULL, erts_init_process_id, rp->common.id, am_kill, NIL, diff --git a/erts/emulator/beam/bs_instrs.tab b/erts/emulator/beam/bs_instrs.tab index 61eb02a7a2..2dde70c2e1 100644 --- a/erts/emulator/beam/bs_instrs.tab +++ b/erts/emulator/beam/bs_instrs.tab @@ -102,6 +102,7 @@ i_bs_get_binary_all2(Fail, Ms, Live, Unit, Dst) { LIGHT_SWAPIN; HEAP_SPACE_VERIFIED(0); ASSERT(is_value(_result)); + $REFRESH_GEN_DEST(); $Dst = _result; } else { HEAP_SPACE_VERIFIED(0); @@ -123,6 +124,7 @@ i_bs_get_binary2(Fail, Ms, Live, Sz, Flags, Dst) { if (is_non_value(_result)) { $FAIL($Fail); } else { + $REFRESH_GEN_DEST(); $Dst = _result; } } @@ -139,6 +141,7 @@ i_bs_get_binary_imm2(Fail, Ms, Live, Sz, Flags, Dst) { if (is_non_value(_result)) { $FAIL($Fail); } else { + $REFRESH_GEN_DEST(); $Dst = _result; } } @@ -161,6 +164,7 @@ i_bs_get_float2(Fail, Ms, Live, Sz, Flags, Dst) { if (is_non_value(_result)) { $FAIL($Fail); } else { + $REFRESH_GEN_DEST(); $Dst = _result; } } @@ -724,26 +728,34 @@ bs_start_match.execute(Fail, Live, Slots, Dst) { $FAIL($Fail); } header = *boxed_val(context); - slots = $Slots; + + /* Reserve a slot for the start position. */ + slots = $Slots + 1; live = $Live; + if (header_is_bin_matchstate(header)) { ErlBinMatchState* ms = (ErlBinMatchState *) boxed_val(context); Uint actual_slots = HEADER_NUM_SLOTS(header); + + /* We're not compatible with contexts created by bs_start_match3. */ + ASSERT(actual_slots >= 1); + ms->save_offset[0] = ms->mb.offset; - if (actual_slots < slots) { - ErlBinMatchState* dst; + if (ERTS_UNLIKELY(actual_slots < slots)) { + ErlBinMatchState* expanded; Uint live = $Live; Uint wordsneeded = ERL_BIN_MATCHSTATE_SIZE(slots); - $GC_TEST_PRESERVE(wordsneeded, live, context); ms = (ErlBinMatchState *) boxed_val(context); - dst = (ErlBinMatchState *) HTOP; - *dst = *ms; + expanded = (ErlBinMatchState *) HTOP; + *expanded = *ms; *HTOP = HEADER_BIN_MATCHSTATE(slots); HTOP += wordsneeded; HEAP_SPACE_VERIFIED(0); - $Dst = make_matchstate(dst); + context = make_matchstate(expanded); + $REFRESH_GEN_DEST(); } + $Dst = context; } else if (is_binary_header(header)) { Eterm result; Uint wordsneeded = ERL_BIN_MATCHSTATE_SIZE(slots); @@ -758,6 +770,7 @@ bs_start_match.execute(Fail, Live, Slots, Dst) { if (is_non_value(result)) { $FAIL($Fail); } + $REFRESH_GEN_DEST(); $Dst = result; } else { $FAIL($Fail); @@ -906,6 +919,7 @@ i_bs_get_integer(Fail, Live, FlagsAndUnit, Ms, Sz, Dst) { } wordsneeded = 1+WSIZE(NBYTES((Uint) size)); $GC_TEST_PRESERVE(wordsneeded, $Live, ms); + $REFRESH_GEN_DEST(); } mb = ms_matchbuffer(ms); LIGHT_SWAPOUT; @@ -939,6 +953,7 @@ i_bs_get_utf8(Ctx, Fail, Dst) { if (is_non_value(result)) { $FAIL($Fail); } + $REFRESH_GEN_DEST(); $Dst = result; } @@ -949,6 +964,7 @@ i_bs_get_utf16(Ctx, Fail, Flags, Dst) { if (is_non_value(result)) { $FAIL($Fail); } + $REFRESH_GEN_DEST(); $Dst = result; } @@ -1029,10 +1045,289 @@ i_bs_match_string(Ctx, Fail, Bits, Ptr) { i_bs_save2(Src, Slot) { ErlBinMatchState* _ms = (ErlBinMatchState*) boxed_val((Eterm) $Src); + ASSERT(HEADER_NUM_SLOTS(_ms->thing_word) > $Slot); _ms->save_offset[$Slot] = _ms->mb.offset; } i_bs_restore2(Src, Slot) { ErlBinMatchState* _ms = (ErlBinMatchState*) boxed_val((Eterm) $Src); + ASSERT(HEADER_NUM_SLOTS(_ms->thing_word) > $Slot); _ms->mb.offset = _ms->save_offset[$Slot]; } + +bs_get_tail(Src, Dst, Live) { + ErlBinMatchBuffer* mb; + Uint size, offs; + ErlSubBin* sb; + Eterm context; + + context = $Src; + + ASSERT(header_is_bin_matchstate(*boxed_val(context))); + + $GC_TEST_PRESERVE(ERL_SUB_BIN_SIZE, $Live, context); + + mb = ms_matchbuffer(context); + + offs = mb->offset; + size = mb->size - offs; + + sb = (ErlSubBin *) HTOP; + HTOP += ERL_SUB_BIN_SIZE; + + sb->thing_word = HEADER_SUB_BIN; + sb->size = BYTE_OFFSET(size); + sb->bitsize = BIT_OFFSET(size); + sb->offs = BYTE_OFFSET(offs); + sb->bitoffs = BIT_OFFSET(offs); + sb->is_writable = 0; + sb->orig = mb->orig; + + $REFRESH_GEN_DEST(); + $Dst = make_binary(sb); +} + + +%if ARCH_64 + +i_bs_start_match3_gp(Src, Live, Fail, Dst, Pos) { + Eterm context, header; + Uint position, live; + + context = $Src; + live = $Live; + + if (!is_boxed(context)) { + $FAIL($Fail); + } + + header = *boxed_val(context); + + if (header_is_bin_matchstate(header)) { + ErlBinMatchBuffer *mb; + + ASSERT(HEADER_NUM_SLOTS(header) == 0); + + mb = ms_matchbuffer(context); + position = mb->offset; + + $Dst = context; + } else if (is_binary_header(header)) { + ErlBinMatchState *ms; + + $GC_TEST_PRESERVE(ERL_BIN_MATCHSTATE_SIZE(0), live, context); + HEAP_TOP(c_p) = HTOP; +#ifdef DEBUG + c_p->stop = E; /* Needed for checking in HeapOnlyAlloc(). */ +#endif + ms = erts_bs_start_match_3(c_p, context); + HTOP = HEAP_TOP(c_p); + HEAP_SPACE_VERIFIED(0); + + if (ms == NULL) { + $FAIL($Fail); + } + + $REFRESH_GEN_DEST(); + $Dst = make_matchstate(ms); + position = ms->mb.offset; + } else { + $FAIL($Fail); + } + + ASSERT(IS_USMALL(0, position)); + $Pos = make_small(position); +} + +i_bs_start_match3(Src, Live, Fail, Dst) { + Eterm context, header; + Uint live; + + context = $Src; + live = $Live; + + if (!is_boxed(context)) { + $FAIL($Fail); + } + + header = *boxed_val(context); + + if (header_is_bin_matchstate(header)) { + ASSERT(HEADER_NUM_SLOTS(header) == 0); + $Dst = context; + } else if (is_binary_header(header)) { + ErlBinMatchState *ms; + + $GC_TEST_PRESERVE(ERL_BIN_MATCHSTATE_SIZE(0), live, context); + HEAP_TOP(c_p) = HTOP; +#ifdef DEBUG + c_p->stop = E; /* Needed for checking in HeapOnlyAlloc(). */ +#endif + ms = erts_bs_start_match_3(c_p, context); + HTOP = HEAP_TOP(c_p); + HEAP_SPACE_VERIFIED(0); + + if (ms == NULL) { + $FAIL($Fail); + } + + $REFRESH_GEN_DEST(); + $Dst = make_matchstate(ms); + } else { + $FAIL($Fail); + } +} + +bs_set_position(Ctx, Pos) { + ErlBinMatchBuffer* mb; + Eterm context; + + context = $Ctx; + ASSERT(header_is_bin_matchstate(*boxed_val(context))); + + mb = ms_matchbuffer(context); + mb->offset = unsigned_val($Pos); +} + +i_bs_get_position(Ctx, Dst) { + ErlBinMatchBuffer* mb; + Eterm context; + + context = $Ctx; + ASSERT(header_is_bin_matchstate(*boxed_val(context))); + + mb = ms_matchbuffer(context); + $Dst = make_small(mb->offset); +} + +%else + +# +# Unlike their 64-bit counterparts, the 32-bit position instructions operate on +# an offset from the "base position" of the context because storing raw +# positions would lead to the creation of far too many bigints. +# +# When a match context is reused we check whether its position fits into an +# immediate, and create a new match context if it does not. This means we only +# have to allocate stuff roughly once every 16MB rather than every time we +# match at a position beyond 16MB. +# + +bs_set_position(Ctx, Pos) { + Eterm context, position; + ErlBinMatchState *ms; + + context = $Ctx; + position = $Pos; + + ASSERT(header_is_bin_matchstate(*boxed_val(context))); + ms = (ErlBinMatchState*)boxed_val(context); + + if (ERTS_LIKELY(is_small(position))) { + ms->mb.offset = ms->save_offset[0] + unsigned_val(position); + } else { + ASSERT(is_big(position)); + ms->mb.offset = ms->save_offset[0] + *BIG_V(big_val(position)); + } +} + +bs_get_position(Ctx, Dst, Live) { + ErlBinMatchState *ms; + Eterm context; + Uint position; + + context = $Ctx; + + ASSERT(header_is_bin_matchstate(*boxed_val(context))); + ms = (ErlBinMatchState*)boxed_val(context); + + position = ms->mb.offset - ms->save_offset[0]; + + if (ERTS_LIKELY(IS_USMALL(0, position))) { + $Dst = make_small(position); + } else { + Eterm *hp; + + $GC_TEST_PRESERVE(BIG_UINT_HEAP_SIZE, $Live, context); + + hp = HTOP; + HTOP += BIG_UINT_HEAP_SIZE; + + *hp = make_pos_bignum_header(1); + BIG_DIGIT(hp, 0) = position; + + $REFRESH_GEN_DEST(); + $Dst = make_big(hp); + } +} + +i_bs_start_match3(Src, Live, Fail, Dst) { + Eterm context, header; + Uint live; + + context = $Src; + live = $Live; + + if (!is_boxed(context)) { + $FAIL($Fail); + } + + header = *boxed_val(context); + + if (header_is_bin_matchstate(header)) { + ErlBinMatchState *current_ms; + Uint position; + + ASSERT(HEADER_NUM_SLOTS(header) == 1); + + current_ms = (ErlBinMatchState*)boxed_val(context); + position = current_ms->mb.offset - current_ms->save_offset[0]; + + if (ERTS_LIKELY(IS_USMALL(0, position))) { + $Dst = context; + } else { + ErlBinMatchState *new_ms; + + $GC_TEST_PRESERVE(ERL_BIN_MATCHSTATE_SIZE(1), live, context); + current_ms = (ErlBinMatchState*)boxed_val(context); + + new_ms = (ErlBinMatchState*)HTOP; + HTOP += ERL_BIN_MATCHSTATE_SIZE(1); + + new_ms->thing_word = HEADER_BIN_MATCHSTATE(1); + new_ms->save_offset[0] = current_ms->mb.offset; + new_ms->mb = current_ms->mb; + + $REFRESH_GEN_DEST(); + $Dst = make_matchstate(new_ms); + } + } else if (is_binary_header(header)) { + Eterm result; + + $GC_TEST_PRESERVE(ERL_BIN_MATCHSTATE_SIZE(1), live, context); + HEAP_TOP(c_p) = HTOP; + +#ifdef DEBUG + c_p->stop = E; /* Needed for checking in HeapOnlyAlloc(). */ +#endif + + /* We intentionally use erts_bs_start_match_2 so that we can use + * save_offset as a base for all saved positions on this context, + * allowing us to avoid bigints for much longer. */ + result = erts_bs_start_match_2(c_p, context, 1); + + HTOP = HEAP_TOP(c_p); + HEAP_SPACE_VERIFIED(0); + + if (is_non_value(result)) { + $FAIL($Fail); + } + + $REFRESH_GEN_DEST(); + $Dst = result; + } else { + $FAIL($Fail); + } +} + +%endif diff --git a/erts/emulator/beam/dist.c b/erts/emulator/beam/dist.c index 0633bff3c2..15642e1669 100644 --- a/erts/emulator/beam/dist.c +++ b/erts/emulator/beam/dist.c @@ -574,9 +574,7 @@ int erts_do_net_exits(DistEntry *dep, Eterm reason) } if (dep->state == ERTS_DE_STATE_EXITING) { -#ifdef DEBUG ASSERT(erts_atomic32_read_nob(&dep->qflgs) & ERTS_DE_QFLG_EXIT); -#endif } else { dep->state = ERTS_DE_STATE_EXITING; @@ -1343,10 +1341,7 @@ int erts_net_message(Port *prt, from, to); ASSERT(ldp->a.other.item == to); ASSERT(eq(ldp->b.other.item, from)); -#ifdef DEBUG - code = -#endif - erts_link_dist_insert(&ldp->a, dep->mld); + code = erts_link_dist_insert(&ldp->a, dep->mld); ASSERT(code); if (erts_proc_sig_send_link(NULL, to, &ldp->b)) @@ -1354,10 +1349,7 @@ int erts_net_message(Port *prt, /* Failed to send signal; cleanup and reply noproc... */ -#ifdef DEBUG - code = -#endif - erts_link_dist_delete(&ldp->a); + code = erts_link_dist_delete(&ldp->a); ASSERT(code); erts_link_release_both(ldp); } @@ -1904,6 +1896,12 @@ erts_dsig_send(ErtsDSigData *dsdp, struct erts_dsig_send_context* ctx) ASSERT(ctx->obuf->ext_endp <= &ctx->obuf->data[0] + ctx->data_size); ctx->data_size = ctx->obuf->ext_endp - ctx->obuf->extp; + if (ctx->data_size > (Uint) INT_MAX) { + free_dist_obuf(ctx->obuf); + ctx->obuf = NULL; + retval = ERTS_DSIG_SEND_TOO_LRG; + goto done; + } ctx->obuf->hopefull_flags = ctx->u.ec.hopefull_flags; /* @@ -3881,28 +3879,22 @@ monitor_node(Process* p, Eterm Node, Eterm Bool, Eterm Options) Node); mdep = (ErtsMonitorDataExtended *) erts_monitor_to_data(mon); if (created) { -#ifdef DEBUG int inserted = -#endif erts_monitor_dist_insert(&mdep->md.target, dep->mld); - ASSERT(inserted); + ASSERT(inserted); (void)inserted; ASSERT(mdep->dist->connection_id == dep->connection_id); } else if (mdep->dist->connection_id != dep->connection_id) { ErtsMonitorDataExtended *mdep2; ErtsMonitor *mon2; -#ifdef DEBUG int inserted; -#endif mdep2 = ((ErtsMonitorDataExtended *) erts_monitor_create(ERTS_MON_TYPE_NODE, NIL, p->common.id, Node, NIL)); mon2 = &mdep2->md.origin; -#ifdef DEBUG inserted = -#endif erts_monitor_dist_insert(&mdep->md.target, dep->mld); - ASSERT(inserted); + ASSERT(inserted); (void)inserted; ASSERT(mdep2->dist->connection_id == dep->connection_id); mdep2->uptr.node_monitors = mdep->uptr.node_monitors; diff --git a/erts/emulator/beam/dist.h b/erts/emulator/beam/dist.h index d4d7874a70..845fab229a 100644 --- a/erts/emulator/beam/dist.h +++ b/erts/emulator/beam/dist.h @@ -378,6 +378,7 @@ typedef struct { #define ERTS_DSIG_SEND_OK 0 #define ERTS_DSIG_SEND_YIELD 1 #define ERTS_DSIG_SEND_CONTINUE 2 +#define ERTS_DSIG_SEND_TOO_LRG 3 extern int erts_dsig_send_link(ErtsDSigData *, Eterm, Eterm); extern int erts_dsig_send_msg(Eterm, Eterm, ErtsSendContext*); diff --git a/erts/emulator/beam/erl_alloc.c b/erts/emulator/beam/erl_alloc.c index 8fe1ccb758..36c46fd7aa 100644 --- a/erts/emulator/beam/erl_alloc.c +++ b/erts/emulator/beam/erl_alloc.c @@ -64,9 +64,6 @@ # error "Too many schedulers; cannot create that many pref alloc instances" #endif -#define ERTS_ALC_FIX_TYPE_IX(T) \ - (ERTS_ALC_T2N((T)) - ERTS_ALC_N_MIN_A_FIXED_SIZE) - #define ERTS_ALC_DEFAULT_MAX_THR_PREF ERTS_MAX_NO_OF_SCHEDULERS #if defined(SMALL_MEMORY) || defined(PURIFY) || defined(VALGRIND) @@ -156,20 +153,13 @@ ERTS_SCHED_PREF_QUICK_ALLOC_IMPL(aireq, ErtsAlcType_t erts_fix_core_allocator_ix; -enum allctr_type { - GOODFIT, - BESTFIT, - AFIT, - FIRSTFIT -}; - struct au_init { int enable; int thr_spec; int disable_allowed; int thr_spec_allowed; int carrier_migration_allowed; - enum allctr_type atype; + ErtsAlcStrat_t astrat; struct { AllctrInit_t util; GFAllctrInit_t gf; @@ -219,7 +209,9 @@ typedef struct { struct au_init test_alloc; } erts_alc_hndl_args_init_t; -#define ERTS_AU_INIT__ {0, 0, 1, 1, 1, GOODFIT, DEFAULT_ALLCTR_INIT, {1,1,1,1}} +#define ERTS_AU_INIT__ {0, 0, 1, 1, 1, \ + ERTS_ALC_S_GOODFIT, DEFAULT_ALLCTR_INIT, \ + {1,1,1,1}} #define SET_DEFAULT_ALLOC_OPTS(IP) \ do { \ @@ -233,7 +225,7 @@ set_default_sl_alloc_opts(struct au_init *ip) SET_DEFAULT_ALLOC_OPTS(ip); ip->enable = AU_ALLOC_DEFAULT_ENABLE(1); ip->thr_spec = 1; - ip->atype = GOODFIT; + ip->astrat = ERTS_ALC_S_GOODFIT; ip->init.util.name_prefix = "sl_"; ip->init.util.alloc_no = ERTS_ALC_A_SHORT_LIVED; #ifndef SMALL_MEMORY @@ -252,7 +244,7 @@ set_default_std_alloc_opts(struct au_init *ip) SET_DEFAULT_ALLOC_OPTS(ip); ip->enable = AU_ALLOC_DEFAULT_ENABLE(1); ip->thr_spec = 1; - ip->atype = BESTFIT; + ip->astrat = ERTS_ALC_S_BESTFIT; ip->init.util.name_prefix = "std_"; ip->init.util.alloc_no = ERTS_ALC_A_STANDARD; #ifndef SMALL_MEMORY @@ -270,7 +262,7 @@ set_default_ll_alloc_opts(struct au_init *ip) SET_DEFAULT_ALLOC_OPTS(ip); ip->enable = AU_ALLOC_DEFAULT_ENABLE(1); ip->thr_spec = 0; - ip->atype = BESTFIT; + ip->astrat = ERTS_ALC_S_BESTFIT; ip->init.bf.ao = 1; ip->init.util.ramv = 0; ip->init.util.mmsbc = 0; @@ -299,7 +291,7 @@ set_default_literal_alloc_opts(struct au_init *ip) ip->disable_allowed = 0; ip->thr_spec_allowed = 0; ip->carrier_migration_allowed = 0; - ip->atype = BESTFIT; + ip->astrat = ERTS_ALC_S_BESTFIT; ip->init.bf.ao = 1; ip->init.util.ramv = 0; ip->init.util.mmsbc = 0; @@ -349,7 +341,7 @@ set_default_exec_alloc_opts(struct au_init *ip) ip->disable_allowed = 0; ip->thr_spec_allowed = 0; ip->carrier_migration_allowed = 0; - ip->atype = BESTFIT; + ip->astrat = ERTS_ALC_S_BESTFIT; ip->init.bf.ao = 1; ip->init.util.ramv = 0; ip->init.util.mmsbc = 0; @@ -378,7 +370,7 @@ set_default_temp_alloc_opts(struct au_init *ip) ip->thr_spec = 1; ip->disable_allowed = 0; ip->carrier_migration_allowed = 0; - ip->atype = AFIT; + ip->astrat = ERTS_ALC_S_AFIT; ip->init.util.name_prefix = "temp_"; ip->init.util.alloc_no = ERTS_ALC_A_TEMPORARY; #ifndef SMALL_MEMORY @@ -397,7 +389,7 @@ set_default_eheap_alloc_opts(struct au_init *ip) SET_DEFAULT_ALLOC_OPTS(ip); ip->enable = AU_ALLOC_DEFAULT_ENABLE(1); ip->thr_spec = 1; - ip->atype = GOODFIT; + ip->astrat = ERTS_ALC_S_GOODFIT; ip->init.util.name_prefix = "eheap_"; ip->init.util.alloc_no = ERTS_ALC_A_EHEAP; #ifndef SMALL_MEMORY @@ -416,7 +408,7 @@ set_default_binary_alloc_opts(struct au_init *ip) SET_DEFAULT_ALLOC_OPTS(ip); ip->enable = AU_ALLOC_DEFAULT_ENABLE(1); ip->thr_spec = 1; - ip->atype = BESTFIT; + ip->astrat = ERTS_ALC_S_BESTFIT; ip->init.util.name_prefix = "binary_"; ip->init.util.alloc_no = ERTS_ALC_A_BINARY; #ifndef SMALL_MEMORY @@ -435,7 +427,7 @@ set_default_ets_alloc_opts(struct au_init *ip) SET_DEFAULT_ALLOC_OPTS(ip); ip->enable = AU_ALLOC_DEFAULT_ENABLE(1); ip->thr_spec = 1; - ip->atype = BESTFIT; + ip->astrat = ERTS_ALC_S_BESTFIT; ip->init.util.name_prefix = "ets_"; ip->init.util.alloc_no = ERTS_ALC_A_ETS; #ifndef SMALL_MEMORY @@ -453,7 +445,7 @@ set_default_driver_alloc_opts(struct au_init *ip) SET_DEFAULT_ALLOC_OPTS(ip); ip->enable = AU_ALLOC_DEFAULT_ENABLE(1); ip->thr_spec = 1; - ip->atype = BESTFIT; + ip->astrat = ERTS_ALC_S_BESTFIT; ip->init.util.name_prefix = "driver_"; ip->init.util.alloc_no = ERTS_ALC_A_DRIVER; #ifndef SMALL_MEMORY @@ -473,7 +465,7 @@ set_default_fix_alloc_opts(struct au_init *ip, SET_DEFAULT_ALLOC_OPTS(ip); ip->enable = AU_ALLOC_DEFAULT_ENABLE(1); ip->thr_spec = 1; - ip->atype = BESTFIT; + ip->astrat = ERTS_ALC_S_BESTFIT; ip->init.bf.ao = 1; ip->init.util.name_prefix = "fix_"; ip->init.util.fix_type_size = fix_type_sizes; @@ -493,7 +485,7 @@ set_default_test_alloc_opts(struct au_init *ip) SET_DEFAULT_ALLOC_OPTS(ip); ip->enable = 0; /* Disabled by default */ ip->thr_spec = -1 * erts_no_schedulers; - ip->atype = FIRSTFIT; + ip->astrat = ERTS_ALC_S_FIRSTFIT; ip->init.aoff.crr_order = FF_AOFF; ip->init.aoff.blk_order = FF_BF; ip->init.util.name_prefix = "test_"; @@ -552,8 +544,8 @@ start_au_allocator(ErtsAlcType_t alctr_n, static void refuse_af_strategy(struct au_init *init) { - if (init->atype == AFIT) - init->atype = GOODFIT; + if (init->astrat == ERTS_ALC_S_AFIT) + init->astrat = ERTS_ALC_S_GOODFIT; } #ifdef HARD_DEBUG @@ -576,7 +568,10 @@ static void adjust_fix_alloc_sizes(UWord extra_block_size) for (i=0; i < tspec->size; i++) { Allctr_t* allctr = tspec->allctr[i]; for (j=0; j < ERTS_ALC_NO_FIXED_SIZES; ++j) { - allctr->fix[j].type_size += extra_block_size; + size_t size = allctr->fix[j].type_size; + size = MAX(size + extra_block_size, + sizeof(ErtsAllctrDDBlock_t)); + allctr->fix[j].type_size = size; } } } @@ -584,8 +579,11 @@ static void adjust_fix_alloc_sizes(UWord extra_block_size) { Allctr_t* allctr = erts_allctrs_info[ERTS_ALC_A_FIXED_SIZE].extra; for (j=0; j < ERTS_ALC_NO_FIXED_SIZES; ++j) { - allctr->fix[j].type_size += extra_block_size; - } + size_t size = allctr->fix[j].type_size; + size = MAX(size + extra_block_size, + sizeof(ErtsAllctrDDBlock_t)); + allctr->fix[j].type_size = size; + } } } } @@ -597,7 +595,7 @@ strategy_support_carrier_migration(struct au_init *auip) * Currently only aoff* and ageff* support carrier * migration, i.e, type AOFIRSTFIT. */ - return auip->atype == FIRSTFIT; + return auip->astrat == ERTS_ALC_S_FIRSTFIT; } static ERTS_INLINE void @@ -612,7 +610,7 @@ adjust_carrier_migration_support(struct au_init *auip) */ if (!strategy_support_carrier_migration(auip)) { /* Default to aoffcbf */ - auip->atype = FIRSTFIT; + auip->astrat = ERTS_ALC_S_FIRSTFIT; auip->init.aoff.crr_order = FF_AOFF; auip->init.aoff.blk_order = FF_BF; } @@ -1018,7 +1016,7 @@ start_au_allocator(ErtsAlcType_t alctr_n, int i; int size = 1; void *as0; - enum allctr_type atype; + ErtsAlcStrat_t astrat; ErtsAllocatorFunctions_t *af = &erts_allctrs[alctr_n]; ErtsAllocatorInfo_t *ai = &erts_allctrs_info[alctr_n]; ErtsAllocatorThrSpec_t *tspec = &erts_allctr_thr_spec[alctr_n]; @@ -1077,7 +1075,7 @@ start_au_allocator(ErtsAlcType_t alctr_n, for (i = 0; i < size; i++) { Allctr_t *as; - atype = init->atype; + astrat = init->astrat; if (!init->thr_spec) as0 = state; @@ -1094,8 +1092,8 @@ start_au_allocator(ErtsAlcType_t alctr_n, if (i != 0) init->init.util.ts = 0; else { - if (atype == AFIT) - atype = GOODFIT; + if (astrat == ERTS_ALC_S_AFIT) + astrat = ERTS_ALC_S_GOODFIT; init->init.util.ts = 1; } init->init.util.tspec = init->thr_spec + 1; @@ -1109,25 +1107,26 @@ start_au_allocator(ErtsAlcType_t alctr_n, (((char *) fix_lists) + fix_list_size)); } + init->init.util.alloc_strat = astrat; init->init.util.ix = i; - switch (atype) { - case GOODFIT: + switch (astrat) { + case ERTS_ALC_S_GOODFIT: as = erts_gfalc_start((GFAllctr_t *) as0, &init->init.gf, &init->init.util); break; - case BESTFIT: + case ERTS_ALC_S_BESTFIT: as = erts_bfalc_start((BFAllctr_t *) as0, &init->init.bf, &init->init.util); break; - case AFIT: + case ERTS_ALC_S_AFIT: as = erts_afalc_start((AFAllctr_t *) as0, &init->init.af, &init->init.util); break; - case FIRSTFIT: + case ERTS_ALC_S_FIRSTFIT: as = erts_aoffalc_start((AOFFAllctr_t *) as0, &init->init.aoff, &init->init.util); @@ -1363,51 +1362,59 @@ handle_au_arg(struct au_init *auip, else if(has_prefix("as", sub_param)) { char *alg = get_value(sub_param + 2, argv, ip); if (sys_strcmp("bf", alg) == 0) { - auip->atype = BESTFIT; + auip->astrat = ERTS_ALC_S_BESTFIT; auip->init.bf.ao = 0; } else if (sys_strcmp("aobf", alg) == 0) { - auip->atype = BESTFIT; + auip->astrat = ERTS_ALC_S_BESTFIT; auip->init.bf.ao = 1; } else if (sys_strcmp("gf", alg) == 0) { - auip->atype = GOODFIT; + auip->astrat = ERTS_ALC_S_GOODFIT; } else if (sys_strcmp("af", alg) == 0) { - auip->atype = AFIT; + auip->astrat = ERTS_ALC_S_AFIT; } else if (sys_strcmp("aoff", alg) == 0) { - auip->atype = FIRSTFIT; + auip->astrat = ERTS_ALC_S_FIRSTFIT; auip->init.aoff.crr_order = FF_AOFF; auip->init.aoff.blk_order = FF_AOFF; } else if (sys_strcmp("aoffcbf", alg) == 0) { - auip->atype = FIRSTFIT; + auip->astrat = ERTS_ALC_S_FIRSTFIT; auip->init.aoff.crr_order = FF_AOFF; auip->init.aoff.blk_order = FF_BF; } else if (sys_strcmp("aoffcaobf", alg) == 0) { - auip->atype = FIRSTFIT; + auip->astrat = ERTS_ALC_S_FIRSTFIT; auip->init.aoff.crr_order = FF_AOFF; auip->init.aoff.blk_order = FF_AOBF; } else if (sys_strcmp("ageffcaoff", alg) == 0) { - auip->atype = FIRSTFIT; + auip->astrat = ERTS_ALC_S_FIRSTFIT; auip->init.aoff.crr_order = FF_AGEFF; auip->init.aoff.blk_order = FF_AOFF; } else if (sys_strcmp("ageffcbf", alg) == 0) { - auip->atype = FIRSTFIT; + auip->astrat = ERTS_ALC_S_FIRSTFIT; auip->init.aoff.crr_order = FF_AGEFF; auip->init.aoff.blk_order = FF_BF; } else if (sys_strcmp("ageffcaobf", alg) == 0) { - auip->atype = FIRSTFIT; + auip->astrat = ERTS_ALC_S_FIRSTFIT; auip->init.aoff.crr_order = FF_AGEFF; auip->init.aoff.blk_order = FF_AOBF; } else { - bad_value(param, sub_param + 1, alg); + if (auip->init.util.alloc_no == ERTS_ALC_A_TEST + && sys_strcmp("chaosff", alg) == 0) { + auip->astrat = ERTS_ALC_S_FIRSTFIT; + auip->init.aoff.crr_order = FF_CHAOS; + auip->init.aoff.blk_order = FF_CHAOS; + } + else { + bad_value(param, sub_param + 1, alg); + } } if (!strategy_support_carrier_migration(auip)) auip->init.util.acul = 0; @@ -2030,33 +2037,55 @@ erts_realloc_n_enomem(ErtsAlcType_t n, void *ptr, Uint size) } static ERTS_INLINE UWord -alcu_size(ErtsAlcType_t ai, ErtsAlcUFixInfo_t *fi, int fisz) +alcu_size(ErtsAlcType_t alloc_no, ErtsAlcUFixInfo_t *fi, int fisz) { - UWord res = 0; + UWord res; + int ai; - ASSERT(erts_allctrs_info[ai].enabled); - ASSERT(erts_allctrs_info[ai].alloc_util); + if (!erts_allctrs_info[alloc_no].thr_spec) { + AllctrSize_t size; + Allctr_t *allctr; - if (!erts_allctrs_info[ai].thr_spec) { - Allctr_t *allctr = erts_allctrs_info[ai].extra; - AllctrSize_t asize; - erts_alcu_current_size(allctr, &asize, fi, fisz); - res += asize.blocks; + allctr = erts_allctrs_info[alloc_no].extra; + erts_alcu_current_size(allctr, &size, fi, fisz); + + return size.blocks; } - else { - ErtsAllocatorThrSpec_t *tspec = &erts_allctr_thr_spec[ai]; - int i; - ASSERT(tspec->enabled); + res = 0; - for (i = tspec->size - 1; i >= 0; i--) { - Allctr_t *allctr = tspec->allctr[i]; - AllctrSize_t asize; - if (allctr) { - erts_alcu_current_size(allctr, &asize, fi, fisz); - res += asize.blocks; - } - } + /* Thread-specific allocators can migrate carriers across types, so we have + * to visit every allocator type to gather information on blocks that were + * allocated by us. */ + for (ai = ERTS_ALC_A_MIN; ai < ERTS_ALC_A_MAX; ai++) { + ErtsAllocatorThrSpec_t *tspec; + Allctr_t *allctr; + int i; + + if (!erts_allctrs_info[ai].thr_spec) { + continue; + } + + tspec = &erts_allctr_thr_spec[ai]; + ASSERT(tspec->enabled); + + for (i = tspec->size - 1; i >= 0; i--) { + allctr = tspec->allctr[i]; + + if (allctr) { + AllctrSize_t size; + + if (ai == alloc_no) { + erts_alcu_current_size(allctr, &size, fi, fisz); + } else { + erts_alcu_foreign_size(allctr, alloc_no, &size); + } + + ASSERT(((SWord)size.blocks) >= 0); + + res += size.blocks; + } + } } return res; @@ -2400,6 +2429,7 @@ erts_memory(fmtfn_t *print_to_p, void *print_to_arg, void *proc, Eterm earg) } if (want_tot_or_sys) { + ASSERT(size.total >= size.processes); size.system = size.total - size.processes; } @@ -3459,29 +3489,29 @@ UWord erts_alc_test(UWord op, UWord a1, UWord a2, UWord a3) switch (op) { case 0xf00: if (((Allctr_t *) a1)->thread_safe) - return (UWord) erts_alcu_alloc_ts(ERTS_ALC_T_UNDEF, + return (UWord) erts_alcu_alloc_ts(ERTS_ALC_T_TEST, (void *) a1, (Uint) a2); else - return (UWord) erts_alcu_alloc(ERTS_ALC_T_UNDEF, + return (UWord) erts_alcu_alloc(ERTS_ALC_T_TEST, (void *) a1, (Uint) a2); case 0xf01: if (((Allctr_t *) a1)->thread_safe) - return (UWord) erts_alcu_realloc_ts(ERTS_ALC_T_UNDEF, + return (UWord) erts_alcu_realloc_ts(ERTS_ALC_T_TEST, (void *) a1, (void *) a2, (Uint) a3); else - return (UWord) erts_alcu_realloc(ERTS_ALC_T_UNDEF, + return (UWord) erts_alcu_realloc(ERTS_ALC_T_TEST, (void *) a1, (void *) a2, (Uint) a3); case 0xf02: if (((Allctr_t *) a1)->thread_safe) - erts_alcu_free_ts(ERTS_ALC_T_UNDEF, (void *) a1, (void *) a2); + erts_alcu_free_ts(ERTS_ALC_T_TEST, (void *) a1, (void *) a2); else - erts_alcu_free(ERTS_ALC_T_UNDEF, (void *) a1, (void *) a2); + erts_alcu_free(ERTS_ALC_T_TEST, (void *) a1, (void *) a2); return 0; case 0xf03: { Allctr_t *allctr; @@ -3489,8 +3519,10 @@ UWord erts_alc_test(UWord op, UWord a1, UWord a2, UWord a3) SET_DEFAULT_ALLOC_OPTS(&init); init.enable = 1; - init.atype = GOODFIT; + init.astrat = ERTS_ALC_S_GOODFIT; init.init.util.name_prefix = (char *) a1; + init.init.util.alloc_no = ERTS_ALC_A_TEST; + init.init.util.alloc_strat = init.astrat; init.init.util.ts = 1; if ((char **) a3) { char **argv = (char **) a3; @@ -3504,31 +3536,31 @@ UWord erts_alc_test(UWord op, UWord a1, UWord a2, UWord a3) } } - switch (init.atype) { - case GOODFIT: + switch (init.astrat) { + case ERTS_ALC_S_GOODFIT: allctr = erts_gfalc_start((GFAllctr_t *) - erts_alloc(ERTS_ALC_T_UNDEF, + erts_alloc(ERTS_ALC_T_TEST, sizeof(GFAllctr_t)), &init.init.gf, &init.init.util); break; - case BESTFIT: + case ERTS_ALC_S_BESTFIT: allctr = erts_bfalc_start((BFAllctr_t *) - erts_alloc(ERTS_ALC_T_UNDEF, + erts_alloc(ERTS_ALC_T_TEST, sizeof(BFAllctr_t)), &init.init.bf, &init.init.util); break; - case AFIT: + case ERTS_ALC_S_AFIT: allctr = erts_afalc_start((AFAllctr_t *) - erts_alloc(ERTS_ALC_T_UNDEF, + erts_alloc(ERTS_ALC_T_TEST, sizeof(AFAllctr_t)), &init.init.af, &init.init.util); break; - case FIRSTFIT: + case ERTS_ALC_S_FIRSTFIT: allctr = erts_aoffalc_start((AOFFAllctr_t *) - erts_alloc(ERTS_ALC_T_UNDEF, + erts_alloc(ERTS_ALC_T_TEST, sizeof(AOFFAllctr_t)), &init.init.aoff, &init.init.util); @@ -3544,7 +3576,7 @@ UWord erts_alc_test(UWord op, UWord a1, UWord a2, UWord a3) } case 0xf04: erts_alcu_stop((Allctr_t *) a1); - erts_free(ERTS_ALC_T_UNDEF, (void *) a1); + erts_free(ERTS_ALC_T_TEST, (void *) a1); break; case 0xf05: return (UWord) 1; case 0xf06: return (UWord) ((Allctr_t *) a1)->thread_safe; @@ -3554,7 +3586,7 @@ UWord erts_alc_test(UWord op, UWord a1, UWord a2, UWord a3) case 0xf07: return (UWord) ((Allctr_t *) a1)->thread_safe; #endif case 0xf08: { - ethr_mutex *mtx = erts_alloc(ERTS_ALC_T_UNDEF, sizeof(ethr_mutex)); + ethr_mutex *mtx = erts_alloc(ERTS_ALC_T_TEST, sizeof(ethr_mutex)); if (ethr_mutex_init(mtx) != 0) ERTS_ALC_TEST_ABORT; return (UWord) mtx; @@ -3563,7 +3595,7 @@ UWord erts_alc_test(UWord op, UWord a1, UWord a2, UWord a3) ethr_mutex *mtx = (ethr_mutex *) a1; if (ethr_mutex_destroy(mtx) != 0) ERTS_ALC_TEST_ABORT; - erts_free(ERTS_ALC_T_UNDEF, (void *) mtx); + erts_free(ERTS_ALC_T_TEST, (void *) mtx); break; } case 0xf0a: @@ -3573,7 +3605,7 @@ UWord erts_alc_test(UWord op, UWord a1, UWord a2, UWord a3) ethr_mutex_unlock((ethr_mutex *) a1); break; case 0xf0c: { - ethr_cond *cnd = erts_alloc(ERTS_ALC_T_UNDEF, sizeof(ethr_cond)); + ethr_cond *cnd = erts_alloc(ERTS_ALC_T_TEST, sizeof(ethr_cond)); if (ethr_cond_init(cnd) != 0) ERTS_ALC_TEST_ABORT; return (UWord) cnd; @@ -3582,7 +3614,7 @@ UWord erts_alc_test(UWord op, UWord a1, UWord a2, UWord a3) ethr_cond *cnd = (ethr_cond *) a1; if (ethr_cond_destroy(cnd) != 0) ERTS_ALC_TEST_ABORT; - erts_free(ERTS_ALC_T_UNDEF, (void *) cnd); + erts_free(ERTS_ALC_T_TEST, (void *) cnd); break; } case 0xf0e: @@ -3596,7 +3628,7 @@ UWord erts_alc_test(UWord op, UWord a1, UWord a2, UWord a3) break; } case 0xf10: { - ethr_tid *tid = erts_alloc(ERTS_ALC_T_UNDEF, sizeof(ethr_tid)); + ethr_tid *tid = erts_alloc(ERTS_ALC_T_TEST, sizeof(ethr_tid)); if (ethr_thr_create(tid, (void * (*)(void *)) a1, (void *) a2, @@ -3608,7 +3640,7 @@ UWord erts_alc_test(UWord op, UWord a1, UWord a2, UWord a3) ethr_tid *tid = (ethr_tid *) a1; if (ethr_thr_join(*tid, NULL) != 0) ERTS_ALC_TEST_ABORT; - erts_free(ERTS_ALC_T_UNDEF, (void *) tid); + erts_free(ERTS_ALC_T_TEST, (void *) tid); break; } case 0xf12: @@ -3960,9 +3992,10 @@ check_memory_fence(void *ptr, Uint *size, ErtsAlcType_t n, int func) static ErtsAllocatorFunctions_t real_allctrs[ERTS_ALC_A_MAX+1]; static void * -debug_alloc(ErtsAlcType_t n, void *extra, Uint size) +debug_alloc(ErtsAlcType_t type, void *extra, Uint size) { ErtsAllocatorFunctions_t *real_af = (ErtsAllocatorFunctions_t *) extra; + ErtsAlcType_t n; Uint dsize; void *res; @@ -3970,9 +4003,11 @@ debug_alloc(ErtsAlcType_t n, void *extra, Uint size) erts_hdbg_chk_blks(); #endif + n = ERTS_ALC_T2N(type); + ASSERT(ERTS_ALC_N_MIN <= n && n <= ERTS_ALC_N_MAX); dsize = size + FENCE_SZ; - res = (*real_af->alloc)(n, real_af->extra, dsize); + res = (*real_af->alloc)(type, real_af->extra, dsize); res = set_memory_fence(res, size, n); @@ -3986,14 +4021,17 @@ debug_alloc(ErtsAlcType_t n, void *extra, Uint size) static void * -debug_realloc(ErtsAlcType_t n, void *extra, void *ptr, Uint size) +debug_realloc(ErtsAlcType_t type, void *extra, void *ptr, Uint size) { ErtsAllocatorFunctions_t *real_af = (ErtsAllocatorFunctions_t *) extra; + ErtsAlcType_t n; Uint dsize; Uint old_size; void *dptr; void *res; + n = ERTS_ALC_T2N(type); + ASSERT(ERTS_ALC_N_MIN <= n && n <= ERTS_ALC_N_MAX); dsize = size + FENCE_SZ; @@ -4008,7 +4046,7 @@ debug_realloc(ErtsAlcType_t n, void *extra, void *ptr, Uint size) 0xf, sizeof(Uint) + old_size - size); - res = (*real_af->realloc)(n, real_af->extra, dptr, dsize); + res = (*real_af->realloc)(type, real_af->extra, dptr, dsize); res = set_memory_fence(res, size, n); @@ -4021,12 +4059,16 @@ debug_realloc(ErtsAlcType_t n, void *extra, void *ptr, Uint size) } static void -debug_free(ErtsAlcType_t n, void *extra, void *ptr) +debug_free(ErtsAlcType_t type, void *extra, void *ptr) { ErtsAllocatorFunctions_t *real_af = (ErtsAllocatorFunctions_t *) extra; + ErtsAlcType_t n; void *dptr; Uint size; - int free_pattern = n; + int free_pattern; + + n = ERTS_ALC_T2N(type); + free_pattern = n; ASSERT(ERTS_ALC_N_MIN <= n && n <= ERTS_ALC_N_MAX); @@ -4041,7 +4083,7 @@ debug_free(ErtsAlcType_t n, void *extra, void *ptr) #endif sys_memset((void *) dptr, free_pattern, size + FENCE_SZ); - (*real_af->free)(n, real_af->extra, dptr); + (*real_af->free)(type, real_af->extra, dptr); #ifdef PRINT_OPS fprintf(stderr, "free(%s, 0x%lx)\r\n", ERTS_ALC_N2TD(n), (Uint) ptr); diff --git a/erts/emulator/beam/erl_alloc.h b/erts/emulator/beam/erl_alloc.h index fcb58ff58a..c13cf3f5b0 100644 --- a/erts/emulator/beam/erl_alloc.h +++ b/erts/emulator/beam/erl_alloc.h @@ -26,10 +26,23 @@ #define ERL_THR_PROGRESS_TSD_TYPE_ONLY #include "erl_thr_progress.h" #undef ERL_THR_PROGRESS_TSD_TYPE_ONLY -#include "erl_alloc_util.h" #include "erl_threads.h" #include "erl_mmap.h" +typedef enum { + ERTS_ALC_S_INVALID = 0, + + ERTS_ALC_S_GOODFIT, + ERTS_ALC_S_BESTFIT, + ERTS_ALC_S_AFIT, + ERTS_ALC_S_FIRSTFIT, + + ERTS_ALC_S_MIN = ERTS_ALC_S_GOODFIT, + ERTS_ALC_S_MAX = ERTS_ALC_S_FIRSTFIT +} ErtsAlcStrat_t; + +#include "erl_alloc_util.h" + #ifdef DEBUG # undef ERTS_ALC_WANT_INLINE # define ERTS_ALC_WANT_INLINE 0 @@ -52,6 +65,14 @@ #define ERTS_ALC_NO_FIXED_SIZES \ (ERTS_ALC_N_MAX_A_FIXED_SIZE - ERTS_ALC_N_MIN_A_FIXED_SIZE + 1) +#define ERTS_ALC_IS_FIX_TYPE(T) \ + (ERTS_ALC_T2N(T) >= ERTS_ALC_N_MIN_A_FIXED_SIZE && \ + ERTS_ALC_T2N(T) <= ERTS_ALC_N_MAX_A_FIXED_SIZE) + +#define ERTS_ALC_FIX_TYPE_IX(T) \ + (ASSERT(ERTS_ALC_IS_FIX_TYPE(T)), \ + ERTS_ALC_T2N((T)) - ERTS_ALC_N_MIN_A_FIXED_SIZE) + void erts_sys_alloc_init(void); void *erts_sys_alloc(ErtsAlcType_t, void *, Uint); void *erts_sys_realloc(ErtsAlcType_t, void *, void *, Uint); @@ -228,7 +249,7 @@ void *erts_alloc(ErtsAlcType_t type, Uint size) void *res; ERTS_MSACC_PUSH_AND_SET_STATE_X(ERTS_MSACC_STATE_ALLOC); res = (*erts_allctrs[ERTS_ALC_T2A(type)].alloc)( - ERTS_ALC_T2N(type), + type, erts_allctrs[ERTS_ALC_T2A(type)].extra, size); if (!res) @@ -243,7 +264,7 @@ void *erts_realloc(ErtsAlcType_t type, void *ptr, Uint size) void *res; ERTS_MSACC_PUSH_AND_SET_STATE_X(ERTS_MSACC_STATE_ALLOC); res = (*erts_allctrs[ERTS_ALC_T2A(type)].realloc)( - ERTS_ALC_T2N(type), + type, erts_allctrs[ERTS_ALC_T2A(type)].extra, ptr, size); @@ -258,7 +279,7 @@ void erts_free(ErtsAlcType_t type, void *ptr) { ERTS_MSACC_PUSH_AND_SET_STATE_X(ERTS_MSACC_STATE_ALLOC); (*erts_allctrs[ERTS_ALC_T2A(type)].free)( - ERTS_ALC_T2N(type), + type, erts_allctrs[ERTS_ALC_T2A(type)].extra, ptr); ERTS_MSACC_POP_STATE_X(); @@ -271,7 +292,7 @@ void *erts_alloc_fnf(ErtsAlcType_t type, Uint size) void *res; ERTS_MSACC_PUSH_AND_SET_STATE_X(ERTS_MSACC_STATE_ALLOC); res = (*erts_allctrs[ERTS_ALC_T2A(type)].alloc)( - ERTS_ALC_T2N(type), + type, erts_allctrs[ERTS_ALC_T2A(type)].extra, size); ERTS_MSACC_POP_STATE_X(); @@ -285,7 +306,7 @@ void *erts_realloc_fnf(ErtsAlcType_t type, void *ptr, Uint size) void *res; ERTS_MSACC_PUSH_AND_SET_STATE_X(ERTS_MSACC_STATE_ALLOC); res = (*erts_allctrs[ERTS_ALC_T2A(type)].realloc)( - ERTS_ALC_T2N(type), + type, erts_allctrs[ERTS_ALC_T2A(type)].extra, ptr, size); diff --git a/erts/emulator/beam/erl_alloc.types b/erts/emulator/beam/erl_alloc.types index 5409b89bab..f1e99820af 100644 --- a/erts/emulator/beam/erl_alloc.types +++ b/erts/emulator/beam/erl_alloc.types @@ -30,10 +30,10 @@ # name space). # * Types, allocators, classes, and descriptions have different name # spaces. -# * The type, allocator, and class names INVALID are reserved and can -# not be used. +# * The type, allocator, and class names INVALID are reserved and +# cannot be used. # * The descriptions invalid_allocator, invalid_class, and invalid_type -# are reserved and can not be used. +# are reserved and cannot be used. # * Declarations can be done conditionally by use of a # +if <boolean_variable> # diff --git a/erts/emulator/beam/erl_alloc_util.c b/erts/emulator/beam/erl_alloc_util.c index a5740a08cf..b7a8b9c2d0 100644 --- a/erts/emulator/beam/erl_alloc_util.c +++ b/erts/emulator/beam/erl_alloc_util.c @@ -96,7 +96,6 @@ static int initialized = 0; #define MBC_REALLOC_ALWAYS_MOVES #endif - /* alloc_util global parameters */ static Uint sys_alloc_carrier_size; #if HAVE_ERTS_MSEG @@ -113,32 +112,38 @@ static int allow_sys_alloc_carriers; #define DEC_CC(CC) ((CC)--) -/* Multi block carrier (MBC) memory layout in R16: +/* Multi block carrier (MBC) memory layout in OTP 22: Empty MBC: -[Carrier_t|pad|Block_t L0T|fhdr| free... ] +[Carrier_t|pad|Block_t L0T0|fhdr| free... ] MBC after allocating first block: -[Carrier_t|pad|Block_t 000| udata |pad|Block_t L0T|fhdr| free... ] +[Carrier_t|pad|Block_t 0000| udata |pad|Block_t L0T0|fhdr| free... ] MBC after allocating second block: -[Carrier_t|pad|Block_t 000| udata |pad|Block_t 000| udata |pad|Block_t L0T|fhdr| free... ] +[Carrier_t|pad|Block_t 0000| udata |pad|Block_t 0000| udata |pad|Block_t L0T0|fhdr| free... ] MBC after deallocating first block: -[Carrier_t|pad|Block_t 00T|fhdr| free |FreeBlkFtr_t|Block_t 0P0| udata |pad|Block_t L0T|fhdr| free... ] +[Carrier_t|pad|Block_t 00T0|fhdr| free |FreeBlkFtr_t|Block_t 0P00| udata |pad|Block_t L0T0|fhdr| free... ] +MBC after allocating first block, with allocation tagging enabled: +[Carrier_t|pad|Block_t 000A| udata |atag|pad|Block_t L0T0|fhdr| free... ] udata = Allocated user data + atag = A tag with basic metadata about this allocation pad = Padding to ensure correct alignment for user data fhdr = Allocator specific header to keep track of free block free = Unused free memory T = This block is free (THIS_FREE_BLK_HDR_FLG) P = Previous block is free (PREV_FREE_BLK_HDR_FLG) L = Last block in carrier (LAST_BLK_HDR_FLG) + A = Block has an allocation tag footer, only valid for allocated blocks + (ATAG_BLK_HDR_FLG) */ /* Single block carrier (SBC): -[Carrier_t|pad|Block_t 111| udata... ] +[Carrier_t|pad|Block_t 1110| udata... ] +[Carrier_t|pad|Block_t 111A| udata | atag] */ /* Allocation tags ... @@ -154,20 +159,20 @@ MBC after deallocating first block: typedef UWord alcu_atag_t; -#define MAKE_ATAG(IdAtom, Type) \ - (ASSERT((Type) >= ERTS_ALC_N_MIN && (Type) <= ERTS_ALC_N_MAX), \ +#define MAKE_ATAG(IdAtom, TypeNum) \ + (ASSERT((TypeNum) >= ERTS_ALC_N_MIN && (TypeNum) <= ERTS_ALC_N_MAX), \ ASSERT(atom_val(IdAtom) <= MAX_ATAG_ATOM_ID), \ - (atom_val(IdAtom) << ERTS_ALC_N_BITS) | (Type)) + (atom_val(IdAtom) << ERTS_ALC_N_BITS) | (TypeNum)) #define ATAG_ID(AT) (make_atom((AT) >> ERTS_ALC_N_BITS)) #define ATAG_TYPE(AT) ((AT) & ERTS_ALC_N_MASK) #define MAX_ATAG_ATOM_ID (ERTS_UWORD_MAX >> ERTS_ALC_N_BITS) -#define DBG_IS_VALID_ATAG(Allocator, AT) \ +#define DBG_IS_VALID_ATAG(AT) \ (ATAG_TYPE(AT) >= ERTS_ALC_N_MIN && \ ATAG_TYPE(AT) <= ERTS_ALC_N_MAX && \ - (Allocator)->alloc_no == ERTS_ALC_T2A(ERTS_ALC_N2T(ATAG_TYPE(AT)))) + ATAG_ID(AT) <= MAX_ATAG_ATOM_ID) /* Blocks ... */ @@ -182,10 +187,15 @@ typedef UWord alcu_atag_t; #endif #define FBLK_FTR_SZ (sizeof(FreeBlkFtr_t)) +#define BLK_HAS_ATAG(B) \ + (!!((B)->bhdr & ATAG_BLK_HDR_FLG)) + #define GET_BLK_ATAG(B) \ - (((alcu_atag_t *) (((char *) (B)) + (BLK_SZ(B))))[-1]) + (ASSERT(BLK_HAS_ATAG(B)), \ + ((alcu_atag_t *) (((char *) (B)) + (BLK_SZ(B))))[-1]) #define SET_BLK_ATAG(B, T) \ - (((alcu_atag_t *) (((char *) (B)) + (BLK_SZ(B))))[-1] = (T)) + ((B)->bhdr |= ATAG_BLK_HDR_FLG, \ + ((alcu_atag_t *) (((char *) (B)) + (BLK_SZ(B))))[-1] = (T)) #define BLK_ATAG_SZ(AP) ((AP)->atags ? sizeof(alcu_atag_t) : 0) @@ -203,13 +213,13 @@ typedef UWord alcu_atag_t; (((FreeBlkFtr_t *) (((char *) (B)) + (SZ)))[-1] = (SZ)) #define SET_MBC_ABLK_SZ(B, SZ) \ - (ASSERT(((SZ) & FLG_MASK) == 0), \ + (ASSERT(((SZ) & BLK_FLG_MASK) == 0), \ (B)->bhdr = (((B)->bhdr) & ~MBC_ABLK_SZ_MASK) | (SZ)) #define SET_MBC_FBLK_SZ(B, SZ) \ - (ASSERT(((SZ) & FLG_MASK) == 0), \ + (ASSERT(((SZ) & BLK_FLG_MASK) == 0), \ (B)->bhdr = (((B)->bhdr) & ~MBC_FBLK_SZ_MASK) | (SZ)) #define SET_SBC_BLK_SZ(B, SZ) \ - (ASSERT(((SZ) & FLG_MASK) == 0), \ + (ASSERT(((SZ) & BLK_FLG_MASK) == 0), \ (B)->bhdr = (((B)->bhdr) & ~SBC_BLK_SZ_MASK) | (SZ)) #define SET_PREV_BLK_FREE(AP,B) \ (ASSERT(!IS_MBC_FIRST_BLK(AP,B)), \ @@ -235,12 +245,12 @@ typedef UWord alcu_atag_t; # define SET_MBC_ABLK_HDR(B, Sz, F, C) \ (ASSERT(((Sz) & ~MBC_ABLK_SZ_MASK) == 0), \ - ASSERT(!((UWord)(F) & (~FLG_MASK|THIS_FREE_BLK_HDR_FLG))), \ + ASSERT(!((UWord)(F) & (~BLK_FLG_MASK|THIS_FREE_BLK_HDR_FLG))), \ (B)->bhdr = ((Sz) | (F) | (BLK_CARRIER_OFFSET(B,C) << MBC_ABLK_OFFSET_SHIFT))) # define SET_MBC_FBLK_HDR(B, Sz, F, C) \ (ASSERT(((Sz) & ~MBC_FBLK_SZ_MASK) == 0), \ - ASSERT(((UWord)(F) & (~FLG_MASK|THIS_FREE_BLK_HDR_FLG|PREV_FREE_BLK_HDR_FLG)) == THIS_FREE_BLK_HDR_FLG), \ + ASSERT(((UWord)(F) & (~BLK_FLG_MASK|THIS_FREE_BLK_HDR_FLG|PREV_FREE_BLK_HDR_FLG)) == THIS_FREE_BLK_HDR_FLG), \ (B)->bhdr = ((Sz) | (F)), \ (B)->u.carrier = (C)) @@ -257,8 +267,8 @@ typedef UWord alcu_atag_t; # define SET_BLK_FREE(B) \ (ASSERT(!IS_PREV_BLK_FREE(B)), \ (B)->u.carrier = ABLK_TO_MBC(B), \ - (B)->bhdr |= THIS_FREE_BLK_HDR_FLG, \ - (B)->bhdr &= (MBC_ABLK_SZ_MASK|FLG_MASK)) + (B)->bhdr &= (MBC_ABLK_SZ_MASK|LAST_BLK_HDR_FLG), \ + (B)->bhdr |= THIS_FREE_BLK_HDR_FLG) # define SET_BLK_ALLOCED(B) \ (ASSERT(((B)->bhdr & (MBC_ABLK_OFFSET_MASK|THIS_FREE_BLK_HDR_FLG)) == THIS_FREE_BLK_HDR_FLG), \ @@ -270,15 +280,16 @@ typedef UWord alcu_atag_t; # define MBC_SZ_MAX_LIMIT ((UWord)~0) # define SET_MBC_ABLK_HDR(B, Sz, F, C) \ - (ASSERT(((Sz) & FLG_MASK) == 0), \ - ASSERT(!((UWord)(F) & (~FLG_MASK|THIS_FREE_BLK_HDR_FLG))), \ - ASSERT((UWord)(F) < SBC_BLK_HDR_FLG), \ + (ASSERT(((Sz) & BLK_FLG_MASK) == 0), \ + ASSERT(((F) & ~BLK_FLG_MASK) == 0), \ + ASSERT(!((UWord)(F) & (~BLK_FLG_MASK|THIS_FREE_BLK_HDR_FLG))), \ (B)->bhdr = ((Sz) | (F)), \ (B)->carrier = (C)) # define SET_MBC_FBLK_HDR(B, Sz, F, C) \ - (ASSERT(((Sz) & FLG_MASK) == 0), \ - ASSERT(((UWord)(F) & (~FLG_MASK|THIS_FREE_BLK_HDR_FLG|PREV_FREE_BLK_HDR_FLG)) == THIS_FREE_BLK_HDR_FLG), \ + (ASSERT(((Sz) & BLK_FLG_MASK) == 0), \ + ASSERT(((F) & ~BLK_FLG_MASK) == 0), \ + ASSERT(((UWord)(F) & (~BLK_FLG_MASK|THIS_FREE_BLK_HDR_FLG|PREV_FREE_BLK_HDR_FLG)) == THIS_FREE_BLK_HDR_FLG), \ (B)->bhdr = ((Sz) | (F)), \ (B)->carrier = (C)) @@ -297,7 +308,7 @@ typedef UWord alcu_atag_t; #endif /* !MBC_ABLK_OFFSET_BITS */ #define SET_SBC_BLK_HDR(B, Sz) \ - (ASSERT(((Sz) & FLG_MASK) == 0), (B)->bhdr = ((Sz) | (SBC_BLK_HDR_FLG))) + (ASSERT(((Sz) & BLK_FLG_MASK) == 0), (B)->bhdr = ((Sz) | (SBC_BLK_HDR_FLG))) #define BLK_UMEM_SZ(B) \ @@ -320,7 +331,7 @@ typedef UWord alcu_atag_t; #define GET_PREV_FREE_BLK_HDR_FLG(B) \ ((B)->bhdr & PREV_FREE_BLK_HDR_FLG) #define GET_BLK_HDR_FLGS(B) \ - ((B)->bhdr & FLG_MASK) + ((B)->bhdr & BLK_FLG_MASK) #define NXT_BLK(B) \ (ASSERT(IS_MBC_BLK(B)), \ @@ -419,7 +430,7 @@ do { \ #define SCH_SBC SBC_CARRIER_HDR_FLAG #define SET_CARRIER_HDR(C, Sz, F, AP) \ - (ASSERT(((Sz) & FLG_MASK) == 0), (C)->chdr = ((Sz) | (F)), \ + (ASSERT(((Sz) & CRR_FLG_MASK) == 0), (C)->chdr = ((Sz) | (F)), \ erts_atomic_init_nob(&(C)->allctr, (erts_aint_t) (AP))) #define BLK_TO_SBC(B) \ @@ -444,8 +455,8 @@ do { \ (!IS_SB_CARRIER((C))) #define SET_CARRIER_SZ(C, SZ) \ - (ASSERT(((SZ) & FLG_MASK) == 0), \ - ((C)->chdr = ((C)->chdr & FLG_MASK) | (SZ))) + (ASSERT(((SZ) & CRR_FLG_MASK) == 0), \ + ((C)->chdr = ((C)->chdr & CRR_FLG_MASK) | (SZ))) #define CFLG_SBC (1 << 0) #define CFLG_MBC (1 << 1) @@ -575,10 +586,12 @@ do { \ STAT_MSEG_MBC_ALLOC((AP), csz__); \ else \ STAT_SYS_ALLOC_MBC_ALLOC((AP), csz__); \ - (AP)->mbcs.blocks.curr.no += (CRR)->cpool.blocks; \ + set_new_allctr_abandon_limit(AP); \ + (AP)->mbcs.blocks.curr.no += (CRR)->cpool.blocks[(AP)->alloc_no]; \ if ((AP)->mbcs.blocks.max.no < (AP)->mbcs.blocks.curr.no) \ (AP)->mbcs.blocks.max.no = (AP)->mbcs.blocks.curr.no; \ - (AP)->mbcs.blocks.curr.size += (CRR)->cpool.blocks_size; \ + (AP)->mbcs.blocks.curr.size += \ + (CRR)->cpool.blocks_size[(AP)->alloc_no]; \ if ((AP)->mbcs.blocks.max.size < (AP)->mbcs.blocks.curr.size) \ (AP)->mbcs.blocks.max.size = (AP)->mbcs.blocks.curr.size; \ } while (0) @@ -601,25 +614,33 @@ do { \ DEBUG_CHECK_CARRIER_NO_SZ((AP)); \ } while (0) -#define STAT_MBC_ABANDON(AP, CRR) \ -do { \ - UWord csz__ = CARRIER_SZ((CRR)); \ - if (IS_MSEG_CARRIER((CRR))) \ - STAT_MSEG_MBC_FREE((AP), csz__); \ - else \ - STAT_SYS_ALLOC_MBC_FREE((AP), csz__); \ - ERTS_ALC_CPOOL_ASSERT((AP)->mbcs.blocks.curr.no \ - >= (CRR)->cpool.blocks); \ - (AP)->mbcs.blocks.curr.no -= (CRR)->cpool.blocks; \ - ERTS_ALC_CPOOL_ASSERT((AP)->mbcs.blocks.curr.size \ - >= (CRR)->cpool.blocks_size); \ - (AP)->mbcs.blocks.curr.size -= (CRR)->cpool.blocks_size; \ +#define STAT_MBC_FREE(AP, CRR) \ +do { \ + UWord csz__ = CARRIER_SZ((CRR)); \ + if (IS_MSEG_CARRIER((CRR))) { \ + STAT_MSEG_MBC_FREE((AP), csz__); \ + } else { \ + STAT_SYS_ALLOC_MBC_FREE((AP), csz__); \ + } \ + set_new_allctr_abandon_limit(AP); \ } while (0) -#define STAT_MBC_BLK_ALLOC_CRR(CRR, BSZ) \ +#define STAT_MBC_ABANDON(AP, CRR) \ +do { \ + STAT_MBC_FREE(AP, CRR); \ + ERTS_ALC_CPOOL_ASSERT((AP)->mbcs.blocks.curr.no \ + >= (CRR)->cpool.blocks[(AP)->alloc_no]); \ + (AP)->mbcs.blocks.curr.no -= (CRR)->cpool.blocks[(AP)->alloc_no]; \ + ERTS_ALC_CPOOL_ASSERT((AP)->mbcs.blocks.curr.size \ + >= (CRR)->cpool.blocks_size[(AP)->alloc_no]); \ + (AP)->mbcs.blocks.curr.size -= (CRR)->cpool.blocks_size[(AP)->alloc_no]; \ +} while (0) + +#define STAT_MBC_BLK_ALLOC_CRR(AP, CRR, BSZ) \ do { \ - (CRR)->cpool.blocks++; \ - (CRR)->cpool.blocks_size += (BSZ); \ + (CRR)->cpool.blocks[(AP)->alloc_no]++; \ + (CRR)->cpool.blocks_size[(AP)->alloc_no] += (BSZ); \ + (CRR)->cpool.total_blocks_size += (BSZ); \ } while (0) #define STAT_MBC_BLK_ALLOC(AP, CRR, BSZ, FLGS) \ @@ -631,50 +652,67 @@ do { \ cstats__->blocks.curr.size += (BSZ); \ if (cstats__->blocks.max.size < cstats__->blocks.curr.size) \ cstats__->blocks.max.size = cstats__->blocks.curr.size; \ - STAT_MBC_BLK_ALLOC_CRR((CRR), (BSZ)); \ + STAT_MBC_BLK_ALLOC_CRR((AP), (CRR), (BSZ)); \ } while (0) static ERTS_INLINE int stat_cpool_mbc_blk_free(Allctr_t *allctr, + ErtsAlcType_t type, Carrier_t *crr, Carrier_t **busy_pcrr_pp, UWord blksz) { + Allctr_t *orig_allctr; + int alloc_no; - ERTS_ALC_CPOOL_ASSERT(crr->cpool.blocks > 0); - crr->cpool.blocks--; - ERTS_ALC_CPOOL_ASSERT(crr->cpool.blocks_size >= blksz); - crr->cpool.blocks_size -= blksz; + alloc_no = ERTS_ALC_T2A(type); - if (!busy_pcrr_pp || !*busy_pcrr_pp) - return 0; + ERTS_ALC_CPOOL_ASSERT(crr->cpool.blocks[alloc_no] > 0); + crr->cpool.blocks[alloc_no]--; + ERTS_ALC_CPOOL_ASSERT(crr->cpool.blocks_size[alloc_no] >= blksz); + crr->cpool.blocks_size[alloc_no] -= blksz; + ERTS_ALC_CPOOL_ASSERT(crr->cpool.total_blocks_size >= blksz); + crr->cpool.total_blocks_size -= blksz; - ERTS_ALC_CPOOL_ASSERT(crr == *busy_pcrr_pp); + if (allctr->alloc_no == alloc_no && (!busy_pcrr_pp || !*busy_pcrr_pp)) { + /* This is a local block, so we should not update the pool + * statistics. */ + return 0; + } + + /* This is either a foreign block that's been fetched from the pool, or any + * block that's in the pool. The carrier's owner keeps the statistics for + * both pooled and foreign blocks. */ + + orig_allctr = crr->cpool.orig_allctr; + + ERTS_ALC_CPOOL_ASSERT(alloc_no != allctr->alloc_no || + (crr == *busy_pcrr_pp && allctr == orig_allctr)); #ifdef ERTS_ALC_CPOOL_DEBUG ERTS_ALC_CPOOL_ASSERT( - erts_atomic_dec_read_nob(&allctr->cpool.stat.no_blocks) >= 0); + erts_atomic_dec_read_nob(&orig_allctr->cpool.stat.no_blocks[alloc_no]) >= 0); ERTS_ALC_CPOOL_ASSERT( - erts_atomic_add_read_nob(&allctr->cpool.stat.blocks_size, + erts_atomic_add_read_nob(&orig_allctr->cpool.stat.blocks_size[alloc_no], -((erts_aint_t) blksz)) >= 0); #else - erts_atomic_dec_nob(&allctr->cpool.stat.no_blocks); - erts_atomic_add_nob(&allctr->cpool.stat.blocks_size, + erts_atomic_dec_nob(&orig_allctr->cpool.stat.no_blocks[alloc_no]); + erts_atomic_add_nob(&orig_allctr->cpool.stat.blocks_size[alloc_no], -((erts_aint_t) blksz)); #endif return 1; } -#define STAT_MBC_BLK_FREE(AP, CRR, BPCRRPP, BSZ, FLGS) \ -do { \ - if (!stat_cpool_mbc_blk_free((AP), (CRR), (BPCRRPP), (BSZ))) { \ - CarriersStats_t *cstats__ = &(AP)->mbcs; \ - ASSERT(cstats__->blocks.curr.no > 0); \ - cstats__->blocks.curr.no--; \ - ASSERT(cstats__->blocks.curr.size >= (BSZ)); \ - cstats__->blocks.curr.size -= (BSZ); \ - } \ +#define STAT_MBC_BLK_FREE(AP, TYPE, CRR, BPCRRPP, BSZ, FLGS) \ +do { \ + if (!stat_cpool_mbc_blk_free((AP), (TYPE), (CRR), (BPCRRPP), (BSZ))) { \ + CarriersStats_t *cstats__ = &(AP)->mbcs; \ + ASSERT(cstats__->blocks.curr.no > 0); \ + cstats__->blocks.curr.no--; \ + ASSERT(cstats__->blocks.curr.size >= (BSZ)); \ + cstats__->blocks.curr.size -= (BSZ); \ + } \ } while (0) /* Debug stuff... */ @@ -721,8 +759,8 @@ static void make_name_atoms(Allctr_t *allctr); static Block_t *create_carrier(Allctr_t *, Uint, UWord); static void destroy_carrier(Allctr_t *, Block_t *, Carrier_t **); -static void mbc_free(Allctr_t *allctr, void *p, Carrier_t **busy_pcrr_pp); -static void dealloc_block(Allctr_t *, void *, ErtsAlcFixList_t *, int); +static void mbc_free(Allctr_t *allctr, ErtsAlcType_t type, void *p, Carrier_t **busy_pcrr_pp); +static void dealloc_block(Allctr_t *, ErtsAlcType_t, Uint32, void *, ErtsAlcFixList_t *); static alcu_atag_t determine_alloc_tag(Allctr_t *allocator, ErtsAlcType_t type) { @@ -764,14 +802,14 @@ static alcu_atag_t determine_alloc_tag(Allctr_t *allocator, ErtsAlcType_t type) } } - return MAKE_ATAG(id, type); + return MAKE_ATAG(id, ERTS_ALC_T2N(type)); } static void set_alloc_tag(Allctr_t *allocator, void *p, alcu_atag_t tag) { Block_t *block; - ASSERT(DBG_IS_VALID_ATAG(allocator, tag)); + ASSERT(DBG_IS_VALID_ATAG(tag)); ASSERT(allocator->atags && p); (void)allocator; @@ -1308,28 +1346,9 @@ chk_fix_list(Allctr_t *allctr, ErtsAlcFixList_t *fix, int ix, int before) #define ERTS_DBG_CHK_FIX_LIST(A, FIX, IX, B) #endif +static ERTS_INLINE Allctr_t *get_pref_allctr(void *extra); static void *mbc_alloc(Allctr_t *allctr, Uint size); -typedef struct { - ErtsAllctrDDBlock_t ddblock__; /* must be first */ - ErtsAlcType_t fix_type; -} ErtsAllctrFixDDBlock_t; - -#define ERTS_ALC_FIX_NO_UNUSE (((ErtsAlcType_t) 1) << ERTS_ALC_N_BITS) - -static ERTS_INLINE void -dealloc_fix_block(Allctr_t *allctr, - ErtsAlcType_t type, - void *ptr, - ErtsAlcFixList_t *fix, - int dec_cc_on_redirect) -{ - /* May be redirected... */ - ASSERT((type & ERTS_ALC_FIX_NO_UNUSE) == 0); - ((ErtsAllctrFixDDBlock_t *) ptr)->fix_type = type | ERTS_ALC_FIX_NO_UNUSE; - dealloc_block(allctr, ptr, fix, dec_cc_on_redirect); -} - static ERTS_INLINE void sched_fix_shrink(Allctr_t *allctr, int on) { @@ -1371,7 +1390,7 @@ fix_cpool_check_shrink(Allctr_t *allctr, if (fix->u.cpool.min_list_size > fix->list_size) fix->u.cpool.min_list_size = fix->list_size; - dealloc_fix_block(allctr, type, p, fix, 0); + dealloc_block(allctr, type, DEALLOC_FLG_FIX_SHRINK, p, fix); } } } @@ -1382,11 +1401,9 @@ fix_cpool_alloc(Allctr_t *allctr, ErtsAlcType_t type, Uint size) void *res; ErtsAlcFixList_t *fix; - ASSERT(ERTS_ALC_N_MIN_A_FIXED_SIZE <= type - && type <= ERTS_ALC_N_MAX_A_FIXED_SIZE); - - fix = &allctr->fix[type - ERTS_ALC_N_MIN_A_FIXED_SIZE]; - ASSERT(size == fix->type_size); + fix = &allctr->fix[ERTS_ALC_FIX_TYPE_IX(type)]; + ASSERT(type == fix->type && size == fix->type_size); + ASSERT(size >= sizeof(ErtsAllctrDDBlock_t)); res = fix->list; if (res) { @@ -1415,21 +1432,39 @@ fix_cpool_alloc(Allctr_t *allctr, ErtsAlcType_t type, Uint size) static ERTS_INLINE void fix_cpool_free(Allctr_t *allctr, ErtsAlcType_t type, + Uint32 flags, void *p, - Carrier_t **busy_pcrr_pp, - int unuse) + Carrier_t **busy_pcrr_pp) { ErtsAlcFixList_t *fix; + Allctr_t *fix_allctr; + + /* If this isn't a fix allocator we need to update the fix list of our + * neighboring fix_alloc to keep the statistics consistent. */ + if (!allctr->fix) { + ErtsAllocatorThrSpec_t *tspec = &erts_allctr_thr_spec[ERTS_ALC_A_FIXED_SIZE]; + fix_allctr = get_pref_allctr(tspec); + ASSERT(!fix_allctr->thread_safe); + ASSERT(allctr != fix_allctr); + } + else { + fix_allctr = allctr; + } - ASSERT(ERTS_ALC_N_MIN_A_FIXED_SIZE <= type - && type <= ERTS_ALC_N_MAX_A_FIXED_SIZE); + ASSERT(ERTS_ALC_IS_CPOOL_ENABLED(fix_allctr)); + ASSERT(ERTS_ALC_IS_CPOOL_ENABLED(allctr)); - fix = &allctr->fix[type - ERTS_ALC_N_MIN_A_FIXED_SIZE]; + fix = &fix_allctr->fix[ERTS_ALC_FIX_TYPE_IX(type)]; + ASSERT(type == fix->type); - if (unuse) - fix->u.cpool.used--; + if (!(flags & DEALLOC_FLG_FIX_SHRINK)) { + fix->u.cpool.used--; + } - if ((!busy_pcrr_pp || !*busy_pcrr_pp) + /* We don't want foreign blocks to be long-lived, so we skip recycling if + * allctr != fix_allctr. */ + if (allctr == fix_allctr + && (!busy_pcrr_pp || !*busy_pcrr_pp) && !fix->u.cpool.shrink_list && fix->list_size < ERTS_ALCU_FIX_MAX_LIST_SZ) { *((void **) p) = fix->list; @@ -1442,7 +1477,7 @@ fix_cpool_free(Allctr_t *allctr, if (IS_SBC_BLK(blk)) destroy_carrier(allctr, blk, NULL); else - mbc_free(allctr, p, busy_pcrr_pp); + mbc_free(allctr, type, p, busy_pcrr_pp); fix->u.cpool.allocated--; fix_cpool_check_shrink(allctr, type, fix, busy_pcrr_pp); } @@ -1469,7 +1504,7 @@ fix_cpool_alloc_shrink(Allctr_t *allctr, erts_aint32_t flgs) fix->u.cpool.shrink_list = fix->u.cpool.min_list_size; fix->u.cpool.min_list_size = fix->list_size; } - type = (ErtsAlcType_t) (ix + ERTS_ALC_N_MIN_A_FIXED_SIZE); + type = ERTS_ALC_N2T((ErtsAlcType_t) (ix + ERTS_ALC_N_MIN_A_FIXED_SIZE)); for (o = 0; o < ERTS_ALC_FIX_MAX_SHRINK_OPS || flush; o++) { void *ptr; @@ -1483,7 +1518,7 @@ fix_cpool_alloc_shrink(Allctr_t *allctr, erts_aint32_t flgs) fix->list = *((void **) ptr); fix->list_size--; fix->u.cpool.shrink_list--; - dealloc_fix_block(allctr, type, ptr, fix, 0); + dealloc_block(allctr, type, DEALLOC_FLG_FIX_SHRINK, ptr, fix); } if (fix->u.cpool.min_list_size > fix->list_size) fix->u.cpool.min_list_size = fix->list_size; @@ -1509,11 +1544,9 @@ fix_nocpool_alloc(Allctr_t *allctr, ErtsAlcType_t type, Uint size) ErtsAlcFixList_t *fix; void *res; - ASSERT(ERTS_ALC_N_MIN_A_FIXED_SIZE <= type - && type <= ERTS_ALC_N_MAX_A_FIXED_SIZE); - - fix = &allctr->fix[type - ERTS_ALC_N_MIN_A_FIXED_SIZE]; - ASSERT(size == fix->type_size); + fix = &allctr->fix[ERTS_ALC_FIX_TYPE_IX(type)]; + ASSERT(type == fix->type && size == fix->type_size); + ASSERT(size >= sizeof(ErtsAllctrDDBlock_t)); ERTS_DBG_CHK_FIX_LIST(allctr, fix, ix, 1); fix->u.nocpool.used++; @@ -1530,7 +1563,7 @@ fix_nocpool_alloc(Allctr_t *allctr, ErtsAlcType_t type, Uint size) if (IS_SBC_BLK(blk)) destroy_carrier(allctr, blk, NULL); else - mbc_free(allctr, p, NULL); + mbc_free(allctr, type, p, NULL); fix->u.nocpool.allocated--; } ERTS_DBG_CHK_FIX_LIST(allctr, fix, ix, 0); @@ -1565,10 +1598,8 @@ fix_nocpool_free(Allctr_t *allctr, Block_t *blk; ErtsAlcFixList_t *fix; - ASSERT(ERTS_ALC_N_MIN_A_FIXED_SIZE <= type - && type <= ERTS_ALC_N_MAX_A_FIXED_SIZE); - - fix = &allctr->fix[type - ERTS_ALC_N_MIN_A_FIXED_SIZE]; + fix = &allctr->fix[ERTS_ALC_T2N(type) - ERTS_ALC_N_MIN_A_FIXED_SIZE]; + ASSERT(fix->type == type); ERTS_DBG_CHK_FIX_LIST(allctr, fix, ix, 1); fix->u.nocpool.used--; @@ -1587,7 +1618,7 @@ fix_nocpool_free(Allctr_t *allctr, if (IS_SBC_BLK(blk)) destroy_carrier(allctr, blk, NULL); else - mbc_free(allctr, p, NULL); + mbc_free(allctr, type, p, NULL); p = fix->list; fix->list = *((void **) p); fix->list_size--; @@ -1598,7 +1629,7 @@ fix_nocpool_free(Allctr_t *allctr, if (IS_SBC_BLK(blk)) destroy_carrier(allctr, blk, NULL); else - mbc_free(allctr, p, NULL); + mbc_free(allctr, type, p, NULL); ERTS_DBG_CHK_FIX_LIST(allctr, fix, ix, 0); } @@ -1639,7 +1670,7 @@ fix_nocpool_alloc_shrink(Allctr_t *allctr, erts_aint32_t flgs) ptr = fix->list; fix->list = *((void **) ptr); fix->list_size--; - dealloc_block(allctr, ptr, NULL, 0); + dealloc_block(allctr, fix->type, 0, ptr, NULL); fix->u.nocpool.allocated--; } if (fix->list_size != 0) { @@ -1681,6 +1712,7 @@ dealloc_mbc(Allctr_t *allctr, Carrier_t *crr) } +static UWord allctr_abandon_limit(Allctr_t *allctr); static void set_new_allctr_abandon_limit(Allctr_t*); static void abandon_carrier(Allctr_t*, Carrier_t*); static void poolify_my_carrier(Allctr_t*, Carrier_t*); @@ -1802,7 +1834,7 @@ get_used_allctr(Allctr_t *pref_allctr, int pref_lock, void *p, UWord *sizep, static void init_dd_queue(ErtsAllctrDDQueue_t *ddq) { - erts_atomic_init_nob(&ddq->tail.data.marker.atmc_next, ERTS_AINT_NULL); + erts_atomic_init_nob(&ddq->tail.data.marker.u.atmc_next, ERTS_AINT_NULL); erts_atomic_init_nob(&ddq->tail.data.last, (erts_aint_t) &ddq->tail.data.marker); erts_atomic_init_nob(&ddq->tail.data.um_refc[0], 0); @@ -1823,17 +1855,17 @@ ddq_managed_thread_enqueue(ErtsAllctrDDQueue_t *ddq, void *ptr, int cinit) erts_aint_t itmp; ErtsAllctrDDBlock_t *enq, *this = ptr; - erts_atomic_init_nob(&this->atmc_next, ERTS_AINT_NULL); + erts_atomic_init_nob(&this->u.atmc_next, ERTS_AINT_NULL); /* Enqueue at end of list... */ enq = (ErtsAllctrDDBlock_t *) erts_atomic_read_nob(&ddq->tail.data.last); - itmp = erts_atomic_cmpxchg_relb(&enq->atmc_next, + itmp = erts_atomic_cmpxchg_relb(&enq->u.atmc_next, (erts_aint_t) this, ERTS_AINT_NULL); if (itmp == ERTS_AINT_NULL) { /* We are required to move last pointer */ #ifdef DEBUG - ASSERT(ERTS_AINT_NULL == erts_atomic_read_nob(&this->atmc_next)); + ASSERT(ERTS_AINT_NULL == erts_atomic_read_nob(&this->u.atmc_next)); ASSERT(((erts_aint_t) enq) == erts_atomic_xchg_relb(&ddq->tail.data.last, (erts_aint_t) this)); @@ -1851,8 +1883,8 @@ ddq_managed_thread_enqueue(ErtsAllctrDDQueue_t *ddq, void *ptr, int cinit) while (1) { erts_aint_t itmp2; - erts_atomic_set_nob(&this->atmc_next, itmp); - itmp2 = erts_atomic_cmpxchg_relb(&enq->atmc_next, + erts_atomic_set_nob(&this->u.atmc_next, itmp); + itmp2 = erts_atomic_cmpxchg_relb(&enq->u.atmc_next, (erts_aint_t) this, itmp); if (itmp == itmp2) @@ -1861,7 +1893,7 @@ ddq_managed_thread_enqueue(ErtsAllctrDDQueue_t *ddq, void *ptr, int cinit) itmp = itmp2; else { enq = (ErtsAllctrDDBlock_t *) itmp2; - itmp = erts_atomic_read_acqb(&enq->atmc_next); + itmp = erts_atomic_read_acqb(&enq->u.atmc_next); ASSERT(itmp != ERTS_AINT_NULL); } i++; @@ -1877,8 +1909,8 @@ check_insert_marker(ErtsAllctrDDQueue_t *ddq, erts_aint_t ilast) erts_aint_t itmp; ErtsAllctrDDBlock_t *last = (ErtsAllctrDDBlock_t *) ilast; - erts_atomic_init_nob(&ddq->tail.data.marker.atmc_next, ERTS_AINT_NULL); - itmp = erts_atomic_cmpxchg_relb(&last->atmc_next, + erts_atomic_init_nob(&ddq->tail.data.marker.u.atmc_next, ERTS_AINT_NULL); + itmp = erts_atomic_cmpxchg_relb(&last->u.atmc_next, (erts_aint_t) &ddq->tail.data.marker, ERTS_AINT_NULL); if (itmp == ERTS_AINT_NULL) { @@ -1929,7 +1961,7 @@ ddq_dequeue(ErtsAllctrDDQueue_t *ddq) ASSERT(ddq->head.used_marker); ddq->head.used_marker = 0; blk = ((ErtsAllctrDDBlock_t *) - erts_atomic_read_nob(&blk->atmc_next)); + erts_atomic_read_nob(&blk->u.atmc_next)); if (blk == ddq->head.unref_end) { ddq->head.first = blk; return NULL; @@ -1937,7 +1969,7 @@ ddq_dequeue(ErtsAllctrDDQueue_t *ddq) } ddq->head.first = ((ErtsAllctrDDBlock_t *) - erts_atomic_read_nob(&blk->atmc_next)); + erts_atomic_read_nob(&blk->u.atmc_next)); ASSERT(ddq->head.first); @@ -1999,19 +2031,13 @@ check_pending_dealloc_carrier(Allctr_t *allctr, int *need_more_work); static void -handle_delayed_fix_dealloc(Allctr_t *allctr, void *ptr) +handle_delayed_fix_dealloc(Allctr_t *allctr, ErtsAlcType_t type, Uint32 flags, + void *ptr) { - ErtsAlcType_t type; - - type = ((ErtsAllctrFixDDBlock_t *) ptr)->fix_type; - - ASSERT(ERTS_ALC_N_MIN_A_FIXED_SIZE - <= (type & ~ERTS_ALC_FIX_NO_UNUSE)); - ASSERT((type & ~ERTS_ALC_FIX_NO_UNUSE) - <= ERTS_ALC_N_MAX_A_FIXED_SIZE); + ASSERT(ERTS_ALC_IS_FIX_TYPE(type)); if (!ERTS_ALC_IS_CPOOL_ENABLED(allctr)) - fix_nocpool_free(allctr, (type & ~ERTS_ALC_FIX_NO_UNUSE), ptr); + fix_nocpool_free(allctr, type, ptr); else { Block_t *blk = UMEM2BLK(ptr); Carrier_t *busy_pcrr_p; @@ -2026,20 +2052,24 @@ handle_delayed_fix_dealloc(Allctr_t *allctr, void *ptr) NULL, &busy_pcrr_p); if (used_allctr == allctr) { doit: - fix_cpool_free(allctr, (type & ~ERTS_ALC_FIX_NO_UNUSE), - ptr, &busy_pcrr_p, - !(type & ERTS_ALC_FIX_NO_UNUSE)); + fix_cpool_free(allctr, type, flags, ptr, &busy_pcrr_p); clear_busy_pool_carrier(allctr, busy_pcrr_p); } else { /* Carrier migrated; need to redirect block to new owner... */ - int cinit = used_allctr->dd.ix - allctr->dd.ix; + ErtsAllctrDDBlock_t *dd_block; + int cinit; + + dd_block = (ErtsAllctrDDBlock_t*)ptr; + dd_block->flags = flags; + dd_block->type = type; ERTS_ALC_CPOOL_ASSERT(!busy_pcrr_p); DEC_CC(allctr->calls.this_free); - ((ErtsAllctrFixDDBlock_t *) ptr)->fix_type = type; + cinit = used_allctr->dd.ix - allctr->dd.ix; + if (ddq_enqueue(&used_allctr->dd.q, ptr, cinit)) erts_alloc_notify_delayed_dealloc(used_allctr->ix); } @@ -2063,7 +2093,6 @@ handle_delayed_dealloc(Allctr_t *allctr, int need_mr_wrk = 0; int have_checked_incoming = 0; int ops = 0; - ErtsAlcFixList_t *fix; int res; ErtsAllctrDDQueue_t *ddq; @@ -2072,8 +2101,6 @@ handle_delayed_dealloc(Allctr_t *allctr, ERTS_ALCU_DBG_CHK_THR_ACCESS(allctr); - fix = allctr->fix; - ddq = &allctr->dd.q; res = 0; @@ -2162,16 +2189,27 @@ handle_delayed_dealloc(Allctr_t *allctr, } } else { + ErtsAllctrDDBlock_t *dd_block; + ErtsAlcType_t type; + Uint32 flags; + + dd_block = (ErtsAllctrDDBlock_t*)ptr; + flags = dd_block->flags; + type = dd_block->type; + + flags |= DEALLOC_FLG_REDIRECTED; + ASSERT(IS_SBC_BLK(blk) || (ABLK_TO_MBC(blk) != ErtsContainerStruct(blk, Carrier_t, cpool.homecoming_dd.blk))); INC_CC(allctr->calls.this_free); - if (fix) - handle_delayed_fix_dealloc(allctr, ptr); - else - dealloc_block(allctr, ptr, NULL, 1); + if (ERTS_ALC_IS_FIX_TYPE(type)) { + handle_delayed_fix_dealloc(allctr, type, flags, ptr); + } else { + dealloc_block(allctr, type, flags, ptr, NULL); + } } } @@ -2199,8 +2237,10 @@ enqueue_dealloc_other_instance(ErtsAlcType_t type, void *ptr, int cinit) { - if (allctr->fix) - ((ErtsAllctrFixDDBlock_t*) ptr)->fix_type = type; + ErtsAllctrDDBlock_t *dd_block = ((ErtsAllctrDDBlock_t*)ptr); + + dd_block->type = type; + dd_block->flags = 0; if (ddq_enqueue(&allctr->dd.q, ptr, cinit)) erts_alloc_notify_delayed_dealloc(allctr->ix); @@ -2230,10 +2270,7 @@ check_abandon_carrier(Allctr_t *allctr, Block_t *fblk, Carrier_t **busy_pcrr_pp) if (!ERTS_ALC_IS_CPOOL_ENABLED(allctr)) return; - allctr->cpool.check_limit_count--; - if (--allctr->cpool.check_limit_count <= 0) - set_new_allctr_abandon_limit(allctr); - + ASSERT(allctr->cpool.abandon_limit == allctr_abandon_limit(allctr)); ASSERT(erts_thr_progress_is_managed_thread()); if (allctr->cpool.disable_abandon) @@ -2251,7 +2288,7 @@ check_abandon_carrier(Allctr_t *allctr, Block_t *fblk, Carrier_t **busy_pcrr_pp) if (allctr->main_carrier == crr) return; - if (crr->cpool.blocks_size > crr->cpool.abandon_limit) + if (crr->cpool.total_blocks_size > crr->cpool.abandon_limit) return; if (crr->cpool.thr_prgr != ERTS_THR_PRGR_INVALID @@ -2287,24 +2324,26 @@ erts_alcu_check_delayed_dealloc(Allctr_t *allctr, ERTS_ALCU_DD_OPS_LIM_LOW, NULL, NULL, NULL) static void -dealloc_block(Allctr_t *allctr, void *ptr, ErtsAlcFixList_t *fix, int dec_cc_on_redirect) +dealloc_block(Allctr_t *allctr, ErtsAlcType_t type, Uint32 flags, void *ptr, + ErtsAlcFixList_t *fix) { Block_t *blk = UMEM2BLK(ptr); + ASSERT(!fix || type == fix->type); + ERTS_LC_ASSERT(!allctr->thread_safe || erts_lc_mtx_is_locked(&allctr->mutex)); if (IS_SBC_BLK(blk)) { destroy_carrier(allctr, blk, NULL); if (fix && ERTS_ALC_IS_CPOOL_ENABLED(allctr)) { - ErtsAlcType_t type = ((ErtsAllctrFixDDBlock_t *) ptr)->fix_type; - if (!(type & ERTS_ALC_FIX_NO_UNUSE)) + if (!(flags & DEALLOC_FLG_FIX_SHRINK)) fix->u.cpool.used--; fix->u.cpool.allocated--; } } else if (!ERTS_ALC_IS_CPOOL_ENABLED(allctr)) - mbc_free(allctr, ptr, NULL); + mbc_free(allctr, type, ptr, NULL); else { Carrier_t *busy_pcrr_p; Allctr_t *used_allctr; @@ -2313,22 +2352,29 @@ dealloc_block(Allctr_t *allctr, void *ptr, ErtsAlcFixList_t *fix, int dec_cc_on_ NULL, &busy_pcrr_p); if (used_allctr == allctr) { if (fix) { - ErtsAlcType_t type = ((ErtsAllctrFixDDBlock_t *) ptr)->fix_type; - if (!(type & ERTS_ALC_FIX_NO_UNUSE)) + if (!(flags & DEALLOC_FLG_FIX_SHRINK)) fix->u.cpool.used--; fix->u.cpool.allocated--; } - mbc_free(allctr, ptr, &busy_pcrr_p); + mbc_free(allctr, type, ptr, &busy_pcrr_p); clear_busy_pool_carrier(allctr, busy_pcrr_p); } else { /* Carrier migrated; need to redirect block to new owner... */ - int cinit = used_allctr->dd.ix - allctr->dd.ix; + ErtsAllctrDDBlock_t *dd_block; + int cinit; + + dd_block = (ErtsAllctrDDBlock_t*)ptr; + dd_block->flags = flags; + dd_block->type = type; ERTS_ALC_CPOOL_ASSERT(!busy_pcrr_p); - if (dec_cc_on_redirect) + if (flags & DEALLOC_FLG_REDIRECTED) DEC_CC(allctr->calls.this_free); + + cinit = used_allctr->dd.ix - allctr->dd.ix; + if (ddq_enqueue(&used_allctr->dd.q, ptr, cinit)) erts_alloc_notify_delayed_dealloc(used_allctr->ix); } @@ -2495,7 +2541,7 @@ mbc_alloc(Allctr_t *allctr, Uint size) } static void -mbc_free(Allctr_t *allctr, void *p, Carrier_t **busy_pcrr_pp) +mbc_free(Allctr_t *allctr, ErtsAlcType_t type, void *p, Carrier_t **busy_pcrr_pp) { Uint is_first_blk; Uint is_last_blk; @@ -2517,7 +2563,8 @@ mbc_free(Allctr_t *allctr, void *p, Carrier_t **busy_pcrr_pp) crr = ABLK_TO_MBC(blk); ERTS_ALC_CPOOL_FREE_OP(allctr); - STAT_MBC_BLK_FREE(allctr, crr, busy_pcrr_pp, blk_sz, alcu_flgs); + + STAT_MBC_BLK_FREE(allctr, type, crr, busy_pcrr_pp, blk_sz, alcu_flgs); is_first_blk = IS_MBC_FIRST_ABLK(allctr, blk); is_last_blk = IS_LAST_BLK(blk); @@ -2586,8 +2633,8 @@ mbc_free(Allctr_t *allctr, void *p, Carrier_t **busy_pcrr_pp) } static void * -mbc_realloc(Allctr_t *allctr, void *p, Uint size, Uint32 alcu_flgs, - Carrier_t **busy_pcrr_pp) +mbc_realloc(Allctr_t *allctr, ErtsAlcType_t type, void *p, Uint size, + Uint32 alcu_flgs, Carrier_t **busy_pcrr_pp) { void *new_p; Uint old_blk_sz; @@ -2625,7 +2672,7 @@ mbc_realloc(Allctr_t *allctr, void *p, Uint size, Uint32 alcu_flgs, new_blk = UMEM2BLK(new_p); ASSERT(!(IS_MBC_BLK(new_blk) && ABLK_TO_MBC(new_blk) == *busy_pcrr_pp)); sys_memcpy(new_p, p, MIN(size, old_blk_sz - ABLK_HDR_SZ)); - mbc_free(allctr, p, busy_pcrr_pp); + mbc_free(allctr, type, p, busy_pcrr_pp); return new_p; } @@ -2702,7 +2749,7 @@ mbc_realloc(Allctr_t *allctr, void *p, Uint size, Uint32 alcu_flgs, crr = ABLK_TO_MBC(blk); ERTS_ALC_CPOOL_REALLOC_OP(allctr); - STAT_MBC_BLK_FREE(allctr, crr, NULL, old_blk_sz, alcu_flgs); + STAT_MBC_BLK_FREE(allctr, type, crr, NULL, old_blk_sz, alcu_flgs); STAT_MBC_BLK_ALLOC(allctr, crr, blk_sz, alcu_flgs); ASSERT(MBC_BLK_SZ(blk) >= allctr->min_block_size); @@ -2806,7 +2853,7 @@ mbc_realloc(Allctr_t *allctr, void *p, Uint size, Uint32 alcu_flgs, } ERTS_ALC_CPOOL_REALLOC_OP(allctr); - STAT_MBC_BLK_FREE(allctr, crr, NULL, old_blk_sz, alcu_flgs); + STAT_MBC_BLK_FREE(allctr, type, crr, NULL, old_blk_sz, alcu_flgs); STAT_MBC_BLK_ALLOC(allctr, crr, blk_sz, alcu_flgs); ASSERT(IS_ALLOCED_BLK(blk)); @@ -2867,7 +2914,7 @@ mbc_realloc(Allctr_t *allctr, void *p, Uint size, Uint32 alcu_flgs, if (!new_p) return NULL; sys_memcpy(new_p, p, MIN(size, old_blk_sz - ABLK_HDR_SZ)); - mbc_free(allctr, p, busy_pcrr_pp); + mbc_free(allctr, type, p, busy_pcrr_pp); return new_p; @@ -2897,7 +2944,7 @@ mbc_realloc(Allctr_t *allctr, void *p, Uint size, Uint32 alcu_flgs, 1); new_p = BLK2UMEM(new_blk); sys_memcpy(new_p, p, MIN(size, old_blk_sz - ABLK_HDR_SZ)); - mbc_free(allctr, p, NULL); + mbc_free(allctr, type, p, NULL); return new_p; } else { @@ -2954,7 +3001,7 @@ mbc_realloc(Allctr_t *allctr, void *p, Uint size, Uint32 alcu_flgs, 0); ERTS_ALC_CPOOL_FREE_OP(allctr); - STAT_MBC_BLK_FREE(allctr, crr, NULL, old_blk_sz, alcu_flgs); + STAT_MBC_BLK_FREE(allctr, type, crr, NULL, old_blk_sz, alcu_flgs); return new_p; } @@ -2965,7 +3012,6 @@ mbc_realloc(Allctr_t *allctr, void *p, Uint size, Uint32 alcu_flgs, #define ERTS_ALC_MAX_DEALLOC_CARRIER 10 #define ERTS_ALC_CPOOL_MAX_FETCH_INSPECT 100 -#define ERTS_ALC_CPOOL_CHECK_LIMIT_COUNT 100 #define ERTS_ALC_CPOOL_MAX_FAILED_STAT_READS 3 #define ERTS_ALC_CPOOL_PTR_MOD_MRK (((erts_aint_t) 1) << 0) @@ -2992,14 +3038,11 @@ typedef union { # error "Carrier pool implementation assumes ERTS_ALC_A_MIN > ERTS_ALC_A_INVALID" #endif -/* - * The pool is only allowed to be manipulated by managed - * threads except in the alloc_SUITE:cpool case. In this - * test case carrier_pool[ERTS_ALC_A_INVALID] will be - * used. - */ +/* The pools are only allowed to be manipulated by managed threads except in + * the alloc_SUITE:cpool test, where only test_carrier_pool is used. */ -static ErtsAlcCrrPool_t carrier_pool[ERTS_ALC_A_MAX+1] erts_align_attribute(ERTS_CACHE_LINE_SIZE); +static ErtsAlcCrrPool_t firstfit_carrier_pool; +static ErtsAlcCrrPool_t test_carrier_pool; #define ERTS_ALC_CPOOL_MAX_BACKOFF (1 << 8) @@ -3020,12 +3063,12 @@ backoff(int n) static int cpool_dbg_is_in_pool(Allctr_t *allctr, Carrier_t *crr) { - ErtsAlcCPoolData_t *sentinel = &carrier_pool[allctr->alloc_no].sentinel; + ErtsAlcCPoolData_t *sentinel = allctr->cpool.sentinel; ErtsAlcCPoolData_t *cpdp = sentinel; Carrier_t *tmp_crr; while (1) { - cpdp = (ErtsAlcCPoolData_t *) (erts_atomic_read_ddrb(&cpdp->next) & ~FLG_MASK); + cpdp = (ErtsAlcCPoolData_t *) (erts_atomic_read_ddrb(&cpdp->next) & ~CRR_FLG_MASK); if (cpdp == sentinel) return 0; tmp_crr = (Carrier_t *) (((char *) cpdp) - offsetof(Carrier_t, cpool)); @@ -3037,7 +3080,7 @@ cpool_dbg_is_in_pool(Allctr_t *allctr, Carrier_t *crr) static int cpool_is_empty(Allctr_t *allctr) { - ErtsAlcCPoolData_t *sentinel = &carrier_pool[allctr->alloc_no].sentinel; + ErtsAlcCPoolData_t *sentinel = allctr->cpool.sentinel; return ((erts_atomic_read_rb(&sentinel->next) == (erts_aint_t) sentinel) && (erts_atomic_read_rb(&sentinel->prev) == (erts_aint_t) sentinel)); } @@ -3127,16 +3170,31 @@ cpool_insert(Allctr_t *allctr, Carrier_t *crr) { ErtsAlcCPoolData_t *cpd1p, *cpd2p; erts_aint_t val; - ErtsAlcCPoolData_t *sentinel = &carrier_pool[allctr->alloc_no].sentinel; + ErtsAlcCPoolData_t *sentinel = allctr->cpool.sentinel; Allctr_t *orig_allctr = crr->cpool.orig_allctr; - ERTS_ALC_CPOOL_ASSERT(allctr->alloc_no == ERTS_ALC_A_INVALID /* testcase */ + ERTS_ALC_CPOOL_ASSERT(allctr->alloc_no == ERTS_ALC_A_TEST /* testcase */ || erts_thr_progress_is_managed_thread()); - erts_atomic_add_nob(&orig_allctr->cpool.stat.blocks_size, - (erts_aint_t) crr->cpool.blocks_size); - erts_atomic_add_nob(&orig_allctr->cpool.stat.no_blocks, - (erts_aint_t) crr->cpool.blocks); + { + int alloc_no = allctr->alloc_no; + + ERTS_ALC_CPOOL_ASSERT( + erts_atomic_read_nob(&orig_allctr->cpool.stat.blocks_size[alloc_no]) >= 0 && + crr->cpool.blocks_size[alloc_no] >= 0); + + ERTS_ALC_CPOOL_ASSERT( + erts_atomic_read_nob(&orig_allctr->cpool.stat.no_blocks[alloc_no]) >= 0 && + crr->cpool.blocks[alloc_no] >= 0); + + /* We only modify the counter for our current type since the others are + * conceptually still in the pool. */ + erts_atomic_add_nob(&orig_allctr->cpool.stat.blocks_size[alloc_no], + ((erts_aint_t) crr->cpool.blocks_size[alloc_no])); + erts_atomic_add_nob(&orig_allctr->cpool.stat.no_blocks[alloc_no], + ((erts_aint_t) crr->cpool.blocks[alloc_no])); + } + erts_atomic_add_nob(&orig_allctr->cpool.stat.carriers_size, (erts_aint_t) CARRIER_SZ(crr)); erts_atomic_inc_nob(&orig_allctr->cpool.stat.no_carriers); @@ -3209,10 +3267,10 @@ cpool_delete(Allctr_t *allctr, Allctr_t *prev_allctr, Carrier_t *crr) ErtsAlcCPoolData_t *cpd1p, *cpd2p; erts_aint_t val; #ifdef ERTS_ALC_CPOOL_DEBUG - ErtsAlcCPoolData_t *sentinel = &carrier_pool[allctr->alloc_no].sentinel; + ErtsAlcCPoolData_t *sentinel = allctr->cpool.sentinel; #endif - ERTS_ALC_CPOOL_ASSERT(allctr->alloc_no == ERTS_ALC_A_INVALID /* testcase */ + ERTS_ALC_CPOOL_ASSERT(allctr->alloc_no == ERTS_ALC_A_TEST /* testcase */ || erts_thr_progress_is_managed_thread()); ERTS_ALC_CPOOL_ASSERT(sentinel != &crr->cpool); @@ -3288,28 +3346,43 @@ cpool_delete(Allctr_t *allctr, Allctr_t *prev_allctr, Carrier_t *crr) crr->cpool.thr_prgr = erts_thr_progress_later(NULL); - erts_atomic_add_nob(&prev_allctr->cpool.stat.blocks_size, - -((erts_aint_t) crr->cpool.blocks_size)); - erts_atomic_add_nob(&prev_allctr->cpool.stat.no_blocks, - -((erts_aint_t) crr->cpool.blocks)); - erts_atomic_add_nob(&prev_allctr->cpool.stat.carriers_size, + { + Allctr_t *orig_allctr = crr->cpool.orig_allctr; + int alloc_no = allctr->alloc_no; + + ERTS_ALC_CPOOL_ASSERT(orig_allctr == prev_allctr); + + ERTS_ALC_CPOOL_ASSERT(crr->cpool.blocks_size[alloc_no] <= + erts_atomic_read_nob(&orig_allctr->cpool.stat.blocks_size[alloc_no])); + + ERTS_ALC_CPOOL_ASSERT(crr->cpool.blocks[alloc_no] <= + erts_atomic_read_nob(&orig_allctr->cpool.stat.no_blocks[alloc_no])); + + /* We only modify the counters for our current type since the others + * were, conceptually, never taken out of the pool. */ + erts_atomic_add_nob(&orig_allctr->cpool.stat.blocks_size[alloc_no], + -((erts_aint_t) crr->cpool.blocks_size[alloc_no])); + erts_atomic_add_nob(&orig_allctr->cpool.stat.no_blocks[alloc_no], + -((erts_aint_t) crr->cpool.blocks[alloc_no])); + + erts_atomic_add_nob(&orig_allctr->cpool.stat.carriers_size, -((erts_aint_t) CARRIER_SZ(crr))); - erts_atomic_dec_wb(&prev_allctr->cpool.stat.no_carriers); + erts_atomic_dec_wb(&orig_allctr->cpool.stat.no_carriers); + } } static Carrier_t * cpool_fetch(Allctr_t *allctr, UWord size) { - enum { IGNORANT, HAS_SEEN_SENTINEL, THE_LAST_ONE } loop_state; - int i; + int i, seen_sentinel; Carrier_t *crr; Carrier_t *reinsert_crr = NULL; ErtsAlcCPoolData_t *cpdp; ErtsAlcCPoolData_t *cpool_entrance = NULL; ErtsAlcCPoolData_t *sentinel; - ERTS_ALC_CPOOL_ASSERT(allctr->alloc_no == ERTS_ALC_A_INVALID /* testcase */ + ERTS_ALC_CPOOL_ASSERT(allctr->alloc_no == ERTS_ALC_A_TEST /* testcase */ || erts_thr_progress_is_managed_thread()); i = ERTS_ALC_CPOOL_MAX_FETCH_INSPECT; @@ -3411,48 +3484,39 @@ cpool_fetch(Allctr_t *allctr, UWord size) /* * Finally search the shared pool and try employ foreign carriers */ - sentinel = &carrier_pool[allctr->alloc_no].sentinel; + sentinel = allctr->cpool.sentinel; if (cpool_entrance) { /* * We saw a pooled carried above, use it as entrance into the pool */ - cpdp = cpool_entrance; } else { /* - * No pooled carried seen above. Start search at cpool sentinel, + * No pooled carrier seen above. Start search at cpool sentinel, * but begin by passing one element before trying to fetch. * This in order to avoid contention with threads inserting elements. */ - cpool_entrance = sentinel; - cpdp = cpool_aint2cpd(cpool_read(&cpool_entrance->prev)); - if (cpdp == sentinel) + cpool_entrance = cpool_aint2cpd(cpool_read(&sentinel->prev)); + if (cpool_entrance == sentinel) goto check_dc_list; } - loop_state = IGNORANT; + cpdp = cpool_entrance; + seen_sentinel = 0; do { erts_aint_t exp; cpdp = cpool_aint2cpd(cpool_read(&cpdp->prev)); - if (cpdp == cpool_entrance) { - if (cpool_entrance == sentinel) { - cpdp = cpool_aint2cpd(cpool_read(&cpdp->prev)); - if (cpdp == sentinel) - break; - } - loop_state = THE_LAST_ONE; - } - else if (cpdp == sentinel) { - if (loop_state == HAS_SEEN_SENTINEL) { + if (cpdp == sentinel) { + if (seen_sentinel) { /* We been here before. cpool_entrance must have been removed */ INC_CC(allctr->cpool.stat.entrance_removed); break; } - cpdp = cpool_aint2cpd(cpool_read(&cpdp->prev)); - if (cpdp == sentinel) - break; - loop_state = HAS_SEEN_SENTINEL; + seen_sentinel = 1; + continue; } + ASSERT(cpdp != cpool_entrance || seen_sentinel); + crr = ErtsContainerStruct(cpdp, Carrier_t, cpool); exp = erts_atomic_read_rb(&crr->allctr); @@ -3485,7 +3549,7 @@ cpool_fetch(Allctr_t *allctr, UWord size) INC_CC(allctr->cpool.stat.fail_shared); return NULL; } - }while (loop_state != THE_LAST_ONE); + }while (cpdp != cpool_entrance); check_dc_list: /* Last; check our own pending dealloc carrier list... */ @@ -3664,8 +3728,9 @@ cpool_init_carrier_data(Allctr_t *allctr, Carrier_t *crr) crr->cpool.orig_allctr = allctr; crr->cpool.thr_prgr = ERTS_THR_PRGR_INVALID; erts_atomic_init_nob(&crr->cpool.max_size, 0); - crr->cpool.blocks = 0; - crr->cpool.blocks_size = 0; + sys_memset(&crr->cpool.blocks_size, 0, sizeof(crr->cpool.blocks_size)); + sys_memset(&crr->cpool.blocks, 0, sizeof(crr->cpool.blocks)); + crr->cpool.total_blocks_size = 0; if (!ERTS_ALC_IS_CPOOL_ENABLED(allctr)) crr->cpool.abandon_limit = 0; else { @@ -3680,14 +3745,14 @@ cpool_init_carrier_data(Allctr_t *allctr, Carrier_t *crr) crr->cpool.state = ERTS_MBC_IS_HOME; } -static void -set_new_allctr_abandon_limit(Allctr_t *allctr) + + +static UWord +allctr_abandon_limit(Allctr_t *allctr) { UWord limit; UWord csz; - allctr->cpool.check_limit_count = ERTS_ALC_CPOOL_CHECK_LIMIT_COUNT; - csz = allctr->mbcs.curr.norm.mseg.size; csz += allctr->mbcs.curr.norm.sys_alloc.size; @@ -3697,7 +3762,13 @@ set_new_allctr_abandon_limit(Allctr_t *allctr) else limit = (csz/100)*allctr->cpool.util_limit; - allctr->cpool.abandon_limit = limit; + return limit; +} + +static void ERTS_INLINE +set_new_allctr_abandon_limit(Allctr_t *allctr) +{ + allctr->cpool.abandon_limit = allctr_abandon_limit(allctr); } static void @@ -3709,7 +3780,6 @@ abandon_carrier(Allctr_t *allctr, Carrier_t *crr) unlink_carrier(&allctr->mbc_list, crr); allctr->remove_mbc(allctr, crr); - set_new_allctr_abandon_limit(allctr); cpool_insert(allctr, crr); @@ -3762,7 +3832,8 @@ poolify_my_carrier(Allctr_t *allctr, Carrier_t *crr) } static void -cpool_read_stat(Allctr_t *allctr, UWord *nocp, UWord *cszp, UWord *nobp, UWord *bszp) +cpool_read_stat(Allctr_t *allctr, int alloc_no, + UWord *nocp, UWord *cszp, UWord *nobp, UWord *bszp) { int i; UWord noc = 0, csz = 0, nob = 0, bsz = 0; @@ -3782,10 +3853,10 @@ cpool_read_stat(Allctr_t *allctr, UWord *nocp, UWord *cszp, UWord *nobp, UWord * ? erts_atomic_read_nob(&allctr->cpool.stat.carriers_size) : 0); tnob = (UWord) (nobp - ? erts_atomic_read_nob(&allctr->cpool.stat.no_blocks) + ? erts_atomic_read_nob(&allctr->cpool.stat.no_blocks[alloc_no]) : 0); tbsz = (UWord) (bszp - ? erts_atomic_read_nob(&allctr->cpool.stat.blocks_size) + ? erts_atomic_read_nob(&allctr->cpool.stat.blocks_size[alloc_no]) : 0); if (tnoc == noc && tcsz == csz && tnob == nob && tbsz == bsz) break; @@ -4040,6 +4111,7 @@ create_carrier(Allctr_t *allctr, Uint umem_sz, UWord flags) #if HAVE_ERTS_MSEG mbc_final_touch: #endif + set_new_allctr_abandon_limit(allctr); blk = MBC_TO_FIRST_BLK(allctr, crr); @@ -4258,7 +4330,6 @@ destroy_carrier(Allctr_t *allctr, Block_t *blk, Carrier_t **busy_pcrr_pp) else { ASSERT(IS_MBC_FIRST_FBLK(allctr, blk)); crr = FIRST_BLK_TO_MBC(allctr, blk); - crr_sz = CARRIER_SZ(crr); #ifdef DEBUG if (!allctr->stopped) { @@ -4290,15 +4361,7 @@ destroy_carrier(Allctr_t *allctr, Block_t *blk, Carrier_t **busy_pcrr_pp) else { unlink_carrier(&allctr->mbc_list, crr); -#if HAVE_ERTS_MSEG - if (IS_MSEG_CARRIER(crr)) { - ASSERT(crr_sz % ERTS_SACRR_UNIT_SZ == 0); - STAT_MSEG_MBC_FREE(allctr, crr_sz); - } - else -#endif - STAT_SYS_ALLOC_MBC_FREE(allctr, crr_sz); - + STAT_MBC_FREE(allctr, crr); if (allctr->remove_mbc) allctr->remove_mbc(allctr, crr); } @@ -4312,7 +4375,7 @@ destroy_carrier(Allctr_t *allctr, Block_t *blk, Carrier_t **busy_pcrr_pp) LTTNG5(carrier_destroy, ERTS_ALC_A2AD(allctr->alloc_no), allctr->ix, - crr_sz, + CARRIER_SZ(crr), mbc_stats, sbc_stats); } @@ -4390,6 +4453,8 @@ static struct { Eterm blocks_size; Eterm blocks; + Eterm foreign_blocks; + Eterm calls; Eterm sys_alloc; Eterm sys_free; @@ -4490,6 +4555,7 @@ init_atoms(Allctr_t *allctr) AM_INIT(carriers); AM_INIT(blocks_size); AM_INIT(blocks); + AM_INIT(foreign_blocks); AM_INIT(calls); AM_INIT(sys_alloc); @@ -4625,7 +4691,6 @@ sz_info_fix(Allctr_t *allctr, ErtsAlcFixList_t *fix = &allctr->fix[ix]; UWord alloced = fix->type_size * fix->u.cpool.allocated; UWord used = fix->type_size * fix->u.cpool.used; - ErtsAlcType_t n = ERTS_ALC_N_MIN_A_FIXED_SIZE + ix; if (print_to_p) { fmtfn_t to = *print_to_p; @@ -4633,14 +4698,14 @@ sz_info_fix(Allctr_t *allctr, erts_print(to, arg, "fix type internal: %s %bpu %bpu\n", - (char *) ERTS_ALC_N2TD(n), + (char *) ERTS_ALC_T2TD(fix->type), alloced, used); } if (hpp || szp) { add_3tup(hpp, szp, &res, - alloc_type_atoms[n], + alloc_type_atoms[ERTS_ALC_T2N(fix->type)], bld_unstable_uint(hpp, szp, alloced), bld_unstable_uint(hpp, szp, used)); } @@ -4653,7 +4718,6 @@ sz_info_fix(Allctr_t *allctr, ErtsAlcFixList_t *fix = &allctr->fix[ix]; UWord alloced = fix->type_size * fix->u.nocpool.allocated; UWord used = fix->type_size*fix->u.nocpool.used; - ErtsAlcType_t n = ERTS_ALC_N_MIN_A_FIXED_SIZE + ix; if (print_to_p) { fmtfn_t to = *print_to_p; @@ -4661,14 +4725,14 @@ sz_info_fix(Allctr_t *allctr, erts_print(to, arg, "fix type: %s %bpu %bpu\n", - (char *) ERTS_ALC_N2TD(n), + (char *) ERTS_ALC_T2TD(fix->type), alloced, used); } if (hpp || szp) { add_3tup(hpp, szp, &res, - alloc_type_atoms[n], + alloc_type_atoms[ERTS_ALC_T2N(fix->type)], bld_unstable_uint(hpp, szp, alloced), bld_unstable_uint(hpp, szp, used)); } @@ -4741,9 +4805,9 @@ info_cpool(Allctr_t *allctr, noc = csz = nob = bsz = ~0; if (print_to_p || hpp) { if (sz_only) - cpool_read_stat(allctr, NULL, &csz, NULL, &bsz); + cpool_read_stat(allctr, allctr->alloc_no, NULL, &csz, NULL, &bsz); else - cpool_read_stat(allctr, &noc, &csz, &nob, &bsz); + cpool_read_stat(allctr, allctr->alloc_no, &noc, &csz, &nob, &bsz); } if (print_to_p) { @@ -4758,6 +4822,10 @@ info_cpool(Allctr_t *allctr, } if (hpp || szp) { + Eterm foreign_blocks; + int i; + + foreign_blocks = NIL; res = NIL; if (!sz_only) { @@ -4804,22 +4872,61 @@ info_cpool(Allctr_t *allctr, add_3tup(hpp, szp, &res, am.entrance_removed, bld_unstable_uint(hpp, szp, ERTS_ALC_CC_GIGA_VAL(allctr->cpool.stat.entrance_removed)), bld_unstable_uint(hpp, szp, ERTS_ALC_CC_VAL(allctr->cpool.stat.entrance_removed))); + } add_2tup(hpp, szp, &res, am.carriers_size, bld_unstable_uint(hpp, szp, csz)); - } - if (!sz_only) - add_2tup(hpp, szp, &res, - am.carriers, - bld_unstable_uint(hpp, szp, noc)); + + if (!sz_only) { + add_2tup(hpp, szp, &res, + am.carriers, + bld_unstable_uint(hpp, szp, noc)); + } + add_2tup(hpp, szp, &res, am.blocks_size, bld_unstable_uint(hpp, szp, bsz)); - if (!sz_only) + + if (!sz_only) { add_2tup(hpp, szp, &res, am.blocks, bld_unstable_uint(hpp, szp, nob)); + } + + for (i = ERTS_ALC_A_MIN; i <= ERTS_ALC_A_MAX; i++) { + const char *name_str; + Eterm name, info; + + if (i == allctr->alloc_no) { + continue; + } + + cpool_read_stat(allctr, i, NULL, NULL, &nob, &bsz); + + if (bsz == 0 && (nob == 0 || sz_only)) { + continue; + } + + name_str = ERTS_ALC_A2AD(i); + info = NIL; + + add_2tup(hpp, szp, &info, + am.blocks_size, + bld_unstable_uint(hpp, szp, bsz)); + + if (!sz_only) { + add_2tup(hpp, szp, &info, + am.blocks, + bld_unstable_uint(hpp, szp, nob)); + } + + name = am_atom_put(name_str, sys_strlen(name_str)); + + add_2tup(hpp, szp, &foreign_blocks, name, info); + } + + add_2tup(hpp, szp, &res, am.foreign_blocks, foreign_blocks); } return res; @@ -5455,6 +5562,19 @@ erts_alcu_info(Allctr_t *allctr, return res; } +void +erts_alcu_foreign_size(Allctr_t *allctr, ErtsAlcType_t alloc_no, AllctrSize_t *size) +{ + if (ERTS_ALC_IS_CPOOL_ENABLED(allctr)) { + UWord csz, bsz; + cpool_read_stat(allctr, alloc_no, NULL, &csz, NULL, &bsz); + size->carriers = csz; + size->blocks = bsz; + } else { + size->carriers = 0; + size->blocks = 0; + } +} void erts_alcu_current_size(Allctr_t *allctr, AllctrSize_t *size, ErtsAlcUFixInfo_t *fi, int fisz) @@ -5473,7 +5593,7 @@ erts_alcu_current_size(Allctr_t *allctr, AllctrSize_t *size, ErtsAlcUFixInfo_t * if (ERTS_ALC_IS_CPOOL_ENABLED(allctr)) { UWord csz, bsz; - cpool_read_stat(allctr, NULL, &csz, NULL, &bsz); + cpool_read_stat(allctr, allctr->alloc_no, NULL, &csz, NULL, &bsz); size->blocks += bsz; size->carriers += csz; } @@ -5518,6 +5638,11 @@ do_erts_alcu_alloc(ErtsAlcType_t type, Allctr_t *allctr, Uint size) ERTS_ALCU_DBG_CHK_THR_ACCESS(allctr); + /* Reject sizes that can't fit into the header word. */ + if (size > ~BLK_FLG_MASK) { + return NULL; + } + #if ALLOC_ZERO_EQ_NULL if (!size) return NULL; @@ -5684,12 +5809,11 @@ do_erts_alcu_free(ErtsAlcType_t type, Allctr_t *allctr, void *p, ERTS_ALCU_DBG_CHK_THR_ACCESS(allctr); if (p) { - INC_CC(allctr->calls.this_free); - if (allctr->fix) { + if (ERTS_ALC_IS_FIX_TYPE(type)) { if (ERTS_ALC_IS_CPOOL_ENABLED(allctr)) - fix_cpool_free(allctr, type, p, busy_pcrr_pp, 1); + fix_cpool_free(allctr, type, 0, p, busy_pcrr_pp); else fix_nocpool_free(allctr, type, p); } @@ -5698,7 +5822,7 @@ do_erts_alcu_free(ErtsAlcType_t type, Allctr_t *allctr, void *p, if (IS_SBC_BLK(blk)) destroy_carrier(allctr, blk, NULL); else - mbc_free(allctr, p, busy_pcrr_pp); + mbc_free(allctr, type, p, busy_pcrr_pp); } } } @@ -5800,6 +5924,11 @@ do_erts_alcu_realloc(ErtsAlcType_t type, return res; } + /* Reject sizes that can't fit into the header word. */ + if (size > ~BLK_FLG_MASK) { + return NULL; + } + #if ALLOC_ZERO_EQ_NULL if (!size) { ASSERT(p); @@ -5816,7 +5945,7 @@ do_erts_alcu_realloc(ErtsAlcType_t type, if (size < allctr->sbc_threshold) { if (IS_MBC_BLK(blk)) - res = mbc_realloc(allctr, p, size, alcu_flgs, busy_pcrr_pp); + res = mbc_realloc(allctr, type, p, size, alcu_flgs, busy_pcrr_pp); else { Uint used_sz = SBC_HEADER_SIZE + ABLK_HDR_SZ + size; Uint crr_sz; @@ -5875,7 +6004,7 @@ do_erts_alcu_realloc(ErtsAlcType_t type, sys_memcpy((void *) res, (void *) p, MIN(MBC_ABLK_SZ(blk) - ABLK_HDR_SZ, size)); - mbc_free(allctr, p, busy_pcrr_pp); + mbc_free(allctr, type, p, busy_pcrr_pp); } else res = NULL; @@ -6243,6 +6372,7 @@ int erts_alcu_start(Allctr_t *allctr, AllctrInit_t *init) { /* erts_alcu_start assumes that allctr has been zeroed */ + int i; if (((UWord)allctr & ERTS_CRR_ALCTR_FLG_MASK) != 0) { erts_exit(ERTS_ABORT_EXIT, "%s:%d:erts_alcu_start: Alignment error\n", @@ -6266,6 +6396,11 @@ erts_alcu_start(Allctr_t *allctr, AllctrInit_t *init) allctr->ix = init->ix; allctr->alloc_no = init->alloc_no; + allctr->alloc_strat = init->alloc_strat; + + ASSERT(allctr->alloc_no >= ERTS_ALC_A_MIN && + allctr->alloc_no <= ERTS_ALC_A_MAX); + if (allctr->alloc_no < ERTS_ALC_A_MIN || ERTS_ALC_A_MAX < allctr->alloc_no) allctr->alloc_no = ERTS_ALC_A_INVALID; @@ -6318,8 +6453,7 @@ erts_alcu_start(Allctr_t *allctr, AllctrInit_t *init) + sizeof(FreeBlkFtr_t)); if (init->tpref) { Uint sz = ABLK_HDR_SZ; - sz += (init->fix ? - sizeof(ErtsAllctrFixDDBlock_t) : sizeof(ErtsAllctrDDBlock_t)); + sz += sizeof(ErtsAllctrDDBlock_t); sz = UNIT_CEILING(sz); if (sz > allctr->min_block_size) allctr->min_block_size = sz; @@ -6330,15 +6464,23 @@ erts_alcu_start(Allctr_t *allctr, AllctrInit_t *init) allctr->cpool.dc_list.last = NULL; allctr->cpool.abandon_limit = 0; allctr->cpool.disable_abandon = 0; - erts_atomic_init_nob(&allctr->cpool.stat.blocks_size, 0); - erts_atomic_init_nob(&allctr->cpool.stat.no_blocks, 0); + for (i = ERTS_ALC_A_MIN; i <= ERTS_ALC_A_MAX; i++) { + erts_atomic_init_nob(&allctr->cpool.stat.blocks_size[i], 0); + erts_atomic_init_nob(&allctr->cpool.stat.no_blocks[i], 0); + } erts_atomic_init_nob(&allctr->cpool.stat.carriers_size, 0); erts_atomic_init_nob(&allctr->cpool.stat.no_carriers, 0); - allctr->cpool.check_limit_count = ERTS_ALC_CPOOL_CHECK_LIMIT_COUNT; if (!init->ts && init->acul && init->acnl) { allctr->cpool.util_limit = init->acul; allctr->cpool.in_pool_limit = init->acnl; allctr->cpool.fblk_min_limit = init->acfml; + + if (allctr->alloc_strat == ERTS_ALC_S_FIRSTFIT) { + allctr->cpool.sentinel = &firstfit_carrier_pool.sentinel; + } + else if (allctr->alloc_no != ERTS_ALC_A_TEST) { + ERTS_INTERNAL_ERROR("Impossible carrier migration config."); + } } else { allctr->cpool.util_limit = 0; @@ -6346,6 +6488,12 @@ erts_alcu_start(Allctr_t *allctr, AllctrInit_t *init) allctr->cpool.fblk_min_limit = 0; } + /* The invasive tests don't really care whether the pool is enabled or not, + * so we need to set this unconditionally for this allocator type. */ + if (allctr->alloc_no == ERTS_ALC_A_TEST) { + allctr->cpool.sentinel = &test_carrier_pool.sentinel; + } + allctr->sbc_threshold = adjust_sbct(allctr, init->sbct); #if HAVE_ERTS_MSEG @@ -6457,9 +6605,9 @@ erts_alcu_start(Allctr_t *allctr, AllctrInit_t *init) allctr->fix_shrink_scheduled = 0; for (i = 0; i < ERTS_ALC_NO_FIXED_SIZES; i++) { allctr->fix[i].type_size = init->fix_type_size[i]; + allctr->fix[i].type = ERTS_ALC_N2T(i + ERTS_ALC_N_MIN_A_FIXED_SIZE); allctr->fix[i].list_size = 0; allctr->fix[i].list = NULL; - ASSERT(allctr->fix[i].type_size >= sizeof(ErtsAllctrFixDDBlock_t)); if (ERTS_ALC_IS_CPOOL_ENABLED(allctr)) { allctr->fix[i].u.cpool.min_list_size = 0; allctr->fix[i].u.cpool.shrink_list = 0; @@ -6508,12 +6656,16 @@ erts_alcu_stop(Allctr_t *allctr) void erts_alcu_init(AlcUInit_t *init) { - int i; - for (i = 0; i <= ERTS_ALC_A_MAX; i++) { - ErtsAlcCPoolData_t *sentinel = &carrier_pool[i].sentinel; - erts_atomic_init_nob(&sentinel->next, (erts_aint_t) sentinel); - erts_atomic_init_nob(&sentinel->prev, (erts_aint_t) sentinel); - } + ErtsAlcCPoolData_t *sentinel; + + sentinel = &firstfit_carrier_pool.sentinel; + erts_atomic_init_nob(&sentinel->next, (erts_aint_t) sentinel); + erts_atomic_init_nob(&sentinel->prev, (erts_aint_t) sentinel); + + sentinel = &test_carrier_pool.sentinel; + erts_atomic_init_nob(&sentinel->next, (erts_aint_t) sentinel); + erts_atomic_init_nob(&sentinel->prev, (erts_aint_t) sentinel); + ERTS_CT_ASSERT(SBC_BLK_SZ_MASK == MBC_FBLK_SZ_MASK); /* see BLK_SZ */ #if HAVE_ERTS_MSEG ASSERT(erts_mseg_unit_size() == ERTS_SACRR_UNIT_SZ); @@ -6695,7 +6847,7 @@ static int blockscan_cpool_yielding(blockscan_t *state) { ErtsAlcCPoolData_t *sentinel, *cursor; - sentinel = &carrier_pool[(state->allocator)->alloc_no].sentinel; + sentinel = (state->allocator)->cpool.sentinel; cursor = blockscan_restore_cpool_cursor(state); if (ERTS_PROC_IS_EXITING(state->process)) { @@ -6827,11 +6979,8 @@ static int blockscan_sweep_mbcs(blockscan_t *state) static int blockscan_sweep_cpool(blockscan_t *state) { if (state->current_op != blockscan_sweep_cpool) { - ErtsAlcCPoolData_t *sentinel; - SET_CARRIER_HDR(&state->dummy_carrier, 0, SCH_MBC, state->allocator); - sentinel = &carrier_pool[(state->allocator)->alloc_no].sentinel; - state->cpool_cursor = sentinel; + state->cpool_cursor = (state->allocator)->cpool.sentinel; } state->current_op = blockscan_sweep_cpool; @@ -7115,11 +7264,14 @@ static int gather_ahist_scan(Allctr_t *allocator, alcu_atag_t tag; block = SBC2BLK(allocator, carrier); - tag = GET_BLK_ATAG(block); - ASSERT(DBG_IS_VALID_ATAG(allocator, tag)); + if (BLK_HAS_ATAG(block)) { + tag = GET_BLK_ATAG(block); - gather_ahist_update(state, tag, SBC_BLK_SZ(block)); + ASSERT(DBG_IS_VALID_ATAG(tag)); + + gather_ahist_update(state, tag, SBC_BLK_SZ(block)); + } } else { UWord scanned_bytes = MBC_HEADER_SIZE(allocator); @@ -7130,10 +7282,10 @@ static int gather_ahist_scan(Allctr_t *allocator, while (1) { UWord block_size = MBC_BLK_SZ(block); - if (IS_ALLOCED_BLK(block)) { + if (IS_ALLOCED_BLK(block) && BLK_HAS_ATAG(block)) { alcu_atag_t tag = GET_BLK_ATAG(block); - ASSERT(DBG_IS_VALID_ATAG(allocator, tag)); + ASSERT(DBG_IS_VALID_ATAG(tag)); gather_ahist_update(state, tag, block_size); } @@ -7293,8 +7445,6 @@ int erts_alcu_gather_alloc_histograms(Process *p, int allocator_num, sched_id, &allocator)) { return 0; - } else if (!allocator->atags) { - return 0; } ensure_atoms_initialized(allocator); diff --git a/erts/emulator/beam/erl_alloc_util.h b/erts/emulator/beam/erl_alloc_util.h index f26ace1534..9ab8589bf3 100644 --- a/erts/emulator/beam/erl_alloc_util.h +++ b/erts/emulator/beam/erl_alloc_util.h @@ -24,6 +24,7 @@ #define ERTS_ALCU_VSN_STR "3.0" #include "erl_alloc_types.h" +#include "erl_alloc.h" #define ERL_THREADS_EMU_INTERNAL__ #include "erl_threads.h" @@ -44,6 +45,7 @@ typedef struct { typedef struct { char *name_prefix; ErtsAlcType_t alloc_no; + ErtsAlcStrat_t alloc_strat; int force; int ix; int ts; @@ -101,6 +103,7 @@ typedef struct { #define ERTS_DEFAULT_ALLCTR_INIT { \ NULL, \ ERTS_ALC_A_INVALID, /* (number) alloc_no: allocator number */\ + ERTS_ALC_S_INVALID, /* (number) alloc_strat: allocator strategy */\ 0, /* (bool) force: force enabled */\ 0, /* (number) ix: instance index */\ 1, /* (bool) ts: thread safe */\ @@ -138,6 +141,7 @@ typedef struct { #define ERTS_DEFAULT_ALLCTR_INIT { \ NULL, \ ERTS_ALC_A_INVALID, /* (number) alloc_no: allocator number */\ + ERTS_ALC_S_INVALID, /* (number) alloc_strat: allocator strategy */\ 0, /* (bool) force: force enabled */\ 0, /* (number) ix: instance index */\ 1, /* (bool) ts: thread safe */\ @@ -188,6 +192,7 @@ Eterm erts_alcu_info(Allctr_t *, int, int, fmtfn_t *, void *, Uint **, Uint *); void erts_alcu_init(AlcUInit_t *); void erts_alcu_current_size(Allctr_t *, AllctrSize_t *, ErtsAlcUFixInfo_t *, int); +void erts_alcu_foreign_size(Allctr_t *, ErtsAlcType_t, AllctrSize_t *); void erts_alcu_check_delayed_dealloc(Allctr_t *, int, int *, ErtsThrPrgrVal *, int *); erts_aint32_t erts_alcu_fix_alloc_shrink(Allctr_t *, erts_aint32_t); @@ -286,10 +291,18 @@ void erts_alcu_sched_spec_data_init(struct ErtsSchedulerData_ *esdp); #define UNIT_FLOOR(X) ((X) & UNIT_MASK) #define UNIT_CEILING(X) UNIT_FLOOR((X) + INV_UNIT_MASK) -#define FLG_MASK INV_UNIT_MASK -#define SBC_BLK_SZ_MASK UNIT_MASK -#define MBC_FBLK_SZ_MASK UNIT_MASK -#define CARRIER_SZ_MASK UNIT_MASK +/* We store flags in the bits that no one will ever use. Generally these are + * the bits below the alignment size, but for blocks we also steal the highest + * bit since the header's a size and no one can expect to be able to allocate + * objects that large. */ +#define HIGHEST_WORD_BIT (((UWord) 1) << (sizeof(UWord) * CHAR_BIT - 1)) + +#define BLK_FLG_MASK (INV_UNIT_MASK | HIGHEST_WORD_BIT) +#define SBC_BLK_SZ_MASK (~BLK_FLG_MASK) +#define MBC_FBLK_SZ_MASK (~BLK_FLG_MASK) + +#define CRR_FLG_MASK INV_UNIT_MASK +#define CRR_SZ_MASK UNIT_MASK #if ERTS_HAVE_MSEG_SUPER_ALIGNED \ || (!HAVE_ERTS_MSEG && ERTS_HAVE_ERTS_SYS_ALIGNED_ALLOC) @@ -299,9 +312,9 @@ void erts_alcu_sched_spec_data_init(struct ErtsSchedulerData_ *esdp); # define ERTS_SUPER_ALIGN_BITS 18 # endif # ifdef ARCH_64 -# define MBC_ABLK_OFFSET_BITS 24 +# define MBC_ABLK_OFFSET_BITS 23 # else -# define MBC_ABLK_OFFSET_BITS 9 +# define MBC_ABLK_OFFSET_BITS 8 /* Affects hard limits for sbct and lmbcs documented in erts_alloc.xml */ # endif # define ERTS_SACRR_UNIT_SHIFT ERTS_SUPER_ALIGN_BITS @@ -322,18 +335,17 @@ void erts_alcu_sched_spec_data_init(struct ErtsSchedulerData_ *esdp); #if MBC_ABLK_OFFSET_BITS # define MBC_ABLK_OFFSET_SHIFT (sizeof(UWord)*8 - MBC_ABLK_OFFSET_BITS) -# define MBC_ABLK_OFFSET_MASK (~((UWord)0) << MBC_ABLK_OFFSET_SHIFT) -# define MBC_ABLK_SZ_MASK (~MBC_ABLK_OFFSET_MASK & ~FLG_MASK) +# define MBC_ABLK_OFFSET_MASK ((~((UWord)0) << MBC_ABLK_OFFSET_SHIFT) & ~BLK_FLG_MASK) +# define MBC_ABLK_SZ_MASK (~MBC_ABLK_OFFSET_MASK & ~BLK_FLG_MASK) #else -# define MBC_ABLK_SZ_MASK (~FLG_MASK) +# define MBC_ABLK_SZ_MASK (~BLK_FLG_MASK) #endif #define MBC_ABLK_SZ(B) (ASSERT(!is_sbc_blk(B)), (B)->bhdr & MBC_ABLK_SZ_MASK) #define MBC_FBLK_SZ(B) (ASSERT(!is_sbc_blk(B)), (B)->bhdr & MBC_FBLK_SZ_MASK) #define SBC_BLK_SZ(B) (ASSERT(is_sbc_blk(B)), (B)->bhdr & SBC_BLK_SZ_MASK) -#define CARRIER_SZ(C) \ - ((C)->chdr & CARRIER_SZ_MASK) +#define CARRIER_SZ(C) ((C)->chdr & CRR_SZ_MASK) typedef union {char c[ERTS_ALLOC_ALIGN_BYTES]; long l; double d;} Unit_t; @@ -351,12 +363,20 @@ typedef struct { #endif } Block_t; -typedef union ErtsAllctrDDBlock_t_ ErtsAllctrDDBlock_t; +typedef struct ErtsAllctrDDBlock__ { + union { + struct ErtsAllctrDDBlock__ *ptr_next; + erts_atomic_t atmc_next; + } u; + ErtsAlcType_t type; + Uint32 flags; +} ErtsAllctrDDBlock_t; -union ErtsAllctrDDBlock_t_ { - erts_atomic_t atmc_next; - ErtsAllctrDDBlock_t *ptr_next; -}; +/* Deallocation was caused by shrinking a fix-list, so usage statistics has + * already been updated. */ +#define DEALLOC_FLG_FIX_SHRINK (1 << 0) +/* Deallocation was redirected to another instance. */ +#define DEALLOC_FLG_REDIRECTED (1 << 1) typedef struct { Block_t blk; @@ -365,11 +385,10 @@ typedef struct { #endif } ErtsFakeDDBlock_t; - - #define THIS_FREE_BLK_HDR_FLG (((UWord) 1) << 0) #define PREV_FREE_BLK_HDR_FLG (((UWord) 1) << 1) #define LAST_BLK_HDR_FLG (((UWord) 1) << 2) +#define ATAG_BLK_HDR_FLG HIGHEST_WORD_BIT #define SBC_BLK_HDR_FLG /* Special flag combo for (allocated) SBC blocks */\ (THIS_FREE_BLK_HDR_FLG | PREV_FREE_BLK_HDR_FLG | LAST_BLK_HDR_FLG) @@ -381,9 +400,9 @@ typedef struct { #define HOMECOMING_MBC_BLK_HDR (THIS_FREE_BLK_HDR_FLG | LAST_BLK_HDR_FLG) #define IS_FREE_LAST_MBC_BLK(B) \ - (((B)->bhdr & FLG_MASK) == (THIS_FREE_BLK_HDR_FLG | LAST_BLK_HDR_FLG)) + (((B)->bhdr & BLK_FLG_MASK) == (THIS_FREE_BLK_HDR_FLG | LAST_BLK_HDR_FLG)) -#define IS_SBC_BLK(B) (((B)->bhdr & FLG_MASK) == SBC_BLK_HDR_FLG) +#define IS_SBC_BLK(B) (((B)->bhdr & SBC_BLK_HDR_FLG) == SBC_BLK_HDR_FLG) #define IS_MBC_BLK(B) (!IS_SBC_BLK((B))) #define IS_FREE_BLK(B) (ASSERT(IS_MBC_BLK(B)), \ (B)->bhdr & THIS_FREE_BLK_HDR_FLG) @@ -394,7 +413,8 @@ typedef struct { # define ABLK_TO_MBC(B) \ (ASSERT(IS_MBC_BLK(B) && !IS_FREE_BLK(B)), \ (Carrier_t*)((ERTS_SACRR_UNIT_FLOOR((UWord)(B)) - \ - (((B)->bhdr >> MBC_ABLK_OFFSET_SHIFT) << ERTS_SACRR_UNIT_SHIFT)))) + ((((B)->bhdr & ~BLK_FLG_MASK) >> MBC_ABLK_OFFSET_SHIFT) \ + << ERTS_SACRR_UNIT_SHIFT)))) # define BLK_TO_MBC(B) (IS_FREE_BLK(B) ? FBLK_TO_MBC(B) : ABLK_TO_MBC(B)) #else # define FBLK_TO_MBC(B) ((B)->carrier) @@ -433,8 +453,9 @@ typedef struct { ErtsThrPrgrVal thr_prgr; erts_atomic_t max_size; UWord abandon_limit; - UWord blocks; - UWord blocks_size; + UWord blocks[ERTS_ALC_A_MAX + 1]; + UWord blocks_size[ERTS_ALC_A_MAX + 1]; + UWord total_blocks_size; enum { ERTS_MBC_IS_HOME, ERTS_MBC_WAS_POOLED, @@ -452,7 +473,7 @@ struct Carrier_t_ { }; #define ERTS_ALC_CARRIER_TO_ALLCTR(C) \ - ((Allctr_t *) (erts_atomic_read_nob(&(C)->allctr) & ~FLG_MASK)) + ((Allctr_t *) (erts_atomic_read_nob(&(C)->allctr) & ~CRR_FLG_MASK)) typedef struct { Carrier_t *first; @@ -530,7 +551,6 @@ typedef struct { } head; } ErtsAllctrDDQueue_t; - typedef struct { size_t type_size; SWord list_size; @@ -549,6 +569,7 @@ typedef struct { UWord used; } cpool; } u; + ErtsAlcType_t type; } ErtsAlcFixList_t; struct Allctr_t_ { @@ -569,6 +590,9 @@ struct Allctr_t_ { /* Allocator number */ ErtsAlcType_t alloc_no; + /* Allocator strategy */ + ErtsAlcStrat_t alloc_strat; + /* Instance index */ int ix; @@ -617,6 +641,9 @@ struct Allctr_t_ { AOFF_RBTree_t* pooled_tree; CarrierList_t dc_list; + /* the sentinel of the cpool we're attached to */ + ErtsAlcCPoolData_t *sentinel; + UWord abandon_limit; int disable_abandon; int check_limit_count; @@ -624,8 +651,8 @@ struct Allctr_t_ { UWord in_pool_limit; /* acnl */ UWord fblk_min_limit; /* acmfl */ struct { - erts_atomic_t blocks_size; - erts_atomic_t no_blocks; + erts_atomic_t blocks_size[ERTS_ALC_A_MAX + 1]; + erts_atomic_t no_blocks[ERTS_ALC_A_MAX + 1]; erts_atomic_t carriers_size; erts_atomic_t no_carriers; CallCounter_t fail_pooled; diff --git a/erts/emulator/beam/erl_ao_firstfit_alloc.c b/erts/emulator/beam/erl_ao_firstfit_alloc.c index 3f0ab33597..0e3e4c890a 100644 --- a/erts/emulator/beam/erl_ao_firstfit_alloc.c +++ b/erts/emulator/beam/erl_ao_firstfit_alloc.c @@ -107,9 +107,11 @@ typedef struct AOFF_Carrier_t_ AOFF_Carrier_t; struct AOFF_Carrier_t_ { Carrier_t crr; - AOFF_RBTree_t rbt_node; /* My node in the carrier tree */ - AOFF_RBTree_t* root; /* Root of my block tree */ + AOFF_RBTree_t rbt_node; /* My node in the carrier tree */ + AOFF_RBTree_t* root; /* Root of my block tree */ + enum AOFFSortOrder blk_order; }; + #define RBT_NODE_TO_MBC(PTR) ErtsContainerStruct((PTR), AOFF_Carrier_t, rbt_node) /* @@ -281,15 +283,28 @@ erts_aoffalc_start(AOFFAllctr_t *alc, sys_memcpy((void *) alc, (void *) &zero.allctr, sizeof(AOFFAllctr_t)); + if (aoffinit->blk_order == FF_CHAOS) { + const enum AOFFSortOrder orders[3] = {FF_AOFF, FF_AOBF, FF_BF}; + int index = init->ix % (sizeof(orders) / sizeof(orders[0])); + + ASSERT(init->alloc_no == ERTS_ALC_A_TEST); + aoffinit->blk_order = orders[index]; + } + + if (aoffinit->crr_order == FF_CHAOS) { + const enum AOFFSortOrder orders[2] = {FF_AGEFF, FF_AOFF}; + int index = init->ix % (sizeof(orders) / sizeof(orders[0])); + + ASSERT(init->alloc_no == ERTS_ALC_A_TEST); + aoffinit->crr_order = orders[index]; + } + alc->blk_order = aoffinit->blk_order; alc->crr_order = aoffinit->crr_order; allctr->mbc_header_size = sizeof(AOFF_Carrier_t); allctr->min_mbc_size = MIN_MBC_SZ; allctr->min_mbc_first_free_size = MIN_MBC_FIRST_FREE_SZ; - allctr->min_block_size = (aoffinit->blk_order == FF_BF - ? (offsetof(AOFF_RBTree_t, u.next) - + ErtsSizeofMember(AOFF_RBTree_t, u.next)) - : offsetof(AOFF_RBTree_t, u)); + allctr->min_block_size = sizeof(AOFF_RBTree_t); allctr->vsn_str = ERTS_ALC_AOFF_ALLOC_VSN_STR; @@ -512,14 +527,15 @@ tree_insert_fixup(AOFF_RBTree_t** root, AOFF_RBTree_t *blk) static void aoff_unlink_free_block(Allctr_t *allctr, Block_t *blk) { - AOFFAllctr_t* alc = (AOFFAllctr_t*)allctr; AOFF_RBTree_t* del = (AOFF_RBTree_t*)blk; AOFF_Carrier_t *crr = (AOFF_Carrier_t*) FBLK_TO_MBC(&del->hdr); + (void)allctr; + ASSERT(crr->rbt_node.hdr.bhdr == crr->root->max_sz); - HARD_CHECK_TREE(&crr->crr, alc->blk_order, crr->root, 0); + HARD_CHECK_TREE(&crr->crr, crr->blk_order, crr->root, 0); - if (alc->blk_order == FF_BF) { + if (crr->blk_order == FF_BF) { ASSERT(del->flags & IS_BF_FLG); if (IS_LIST_ELEM(del)) { /* Remove from list */ @@ -540,14 +556,14 @@ aoff_unlink_free_block(Allctr_t *allctr, Block_t *blk) replace(&crr->root, (AOFF_RBTree_t*)del, LIST_NEXT(del)); - HARD_CHECK_TREE(&crr->crr, alc->blk_order, crr->root, 0); + HARD_CHECK_TREE(&crr->crr, crr->blk_order, crr->root, 0); return; } } rbt_delete(&crr->root, (AOFF_RBTree_t*)del); - HARD_CHECK_TREE(&crr->crr, alc->blk_order, crr->root, 0); + HARD_CHECK_TREE(&crr->crr, crr->blk_order, crr->root, 0); /* Update the carrier tree with a potentially new (lower) max_sz */ @@ -737,17 +753,18 @@ rbt_delete(AOFF_RBTree_t** root, AOFF_RBTree_t* del) static void aoff_link_free_block(Allctr_t *allctr, Block_t *block) { - AOFFAllctr_t* alc = (AOFFAllctr_t*) allctr; AOFF_RBTree_t *blk = (AOFF_RBTree_t *) block; AOFF_RBTree_t *crr_node; AOFF_Carrier_t *blk_crr = (AOFF_Carrier_t*) FBLK_TO_MBC(block); Uint blk_sz = AOFF_BLK_SZ(blk); + (void)allctr; + ASSERT(allctr == ERTS_ALC_CARRIER_TO_ALLCTR(&blk_crr->crr)); ASSERT(blk_crr->rbt_node.hdr.bhdr == (blk_crr->root ? blk_crr->root->max_sz : 0)); - HARD_CHECK_TREE(&blk_crr->crr, alc->blk_order, blk_crr->root, 0); + HARD_CHECK_TREE(&blk_crr->crr, blk_crr->blk_order, blk_crr->root, 0); - rbt_insert(alc->blk_order, &blk_crr->root, blk); + rbt_insert(blk_crr->blk_order, &blk_crr->root, blk); /* * Update carrier tree with a potentially new (larger) max_sz @@ -891,7 +908,7 @@ aoff_get_free_block(Allctr_t *allctr, Uint size, /* Get block within carrier tree */ #ifdef HARD_DEBUG - dbg_blk = HARD_CHECK_TREE(&crr->crr, alc->blk_order, crr->root, size); + dbg_blk = HARD_CHECK_TREE(&crr->crr, crr->blk_order, crr->root, size); #endif blk = rbt_search(crr->root, size); @@ -904,7 +921,7 @@ aoff_get_free_block(Allctr_t *allctr, Uint size, if (!blk) return NULL; - if (cand_blk && cmp_cand_blk(alc->blk_order, cand_blk, blk) < 0) { + if (cand_blk && cmp_cand_blk(crr->blk_order, cand_blk, blk) < 0) { return NULL; /* cand_blk was better */ } @@ -927,21 +944,28 @@ static void aoff_creating_mbc(Allctr_t *allctr, Carrier_t *carrier) AOFFAllctr_t *alc = (AOFFAllctr_t *) allctr; AOFF_Carrier_t *crr = (AOFF_Carrier_t*) carrier; AOFF_RBTree_t **root = &alc->mbc_root; + Sint64 bt = get_birth_time(); HARD_CHECK_TREE(NULL, alc->crr_order, *root, 0); crr->rbt_node.hdr.bhdr = 0; - if (alc->crr_order == FF_AGEFF || IS_DEBUG) { - Sint64 bt = get_birth_time(); - crr->rbt_node.u.birth_time = bt; - crr->crr.cpool.pooled.u.birth_time = bt; - } + + /* While birth time is only used for FF_AGEFF, we have to set it for all + * types as we can be migrated to an instance that uses it and we don't + * want to mess its order up. */ + crr->rbt_node.u.birth_time = bt; + crr->crr.cpool.pooled.u.birth_time = bt; + rbt_insert(alc->crr_order, root, &crr->rbt_node); /* aoff_link_free_block will add free block later */ crr->root = NULL; HARD_CHECK_TREE(NULL, alc->crr_order, *root, 0); + + /* When a carrier has been migrated, its block order may differ from that + * of the allocator it's been migrated to. */ + crr->blk_order = alc->blk_order; } #define IS_CRR_IN_TREE(CRR,ROOT) \ diff --git a/erts/emulator/beam/erl_ao_firstfit_alloc.h b/erts/emulator/beam/erl_ao_firstfit_alloc.h index 68df9e0a49..9c9b98da86 100644 --- a/erts/emulator/beam/erl_ao_firstfit_alloc.h +++ b/erts/emulator/beam/erl_ao_firstfit_alloc.h @@ -32,7 +32,12 @@ enum AOFFSortOrder { FF_AGEFF = 0, /* carrier trees only */ FF_AOFF = 1, FF_AOBF = 2, /* block trees only */ - FF_BF = 3 /* block trees only */ + FF_BF = 3, /* block trees only */ + + FF_CHAOS = -1 /* A test-specific sort order that picks any of the above + * after instance id. Used to test that carriers created + * under one order will work fine after being migrated + * to another. */ }; typedef struct { diff --git a/erts/emulator/beam/erl_bif_binary.c b/erts/emulator/beam/erl_bif_binary.c index a2610bf2e1..5b3f091ccc 100644 --- a/erts/emulator/beam/erl_bif_binary.c +++ b/erts/emulator/beam/erl_bif_binary.c @@ -208,8 +208,8 @@ typedef struct _ac_trie { typedef struct _bm_data { byte *x; Sint len; + Sint *badshift; Sint *goodshift; - Sint badshift[ALPHABET_SIZE]; } BMData; typedef struct _ac_find_all_state { @@ -319,16 +319,104 @@ static void dump_ac_node(ACNode *node, int indent, int ch); * The needed size of binary data for a search structure - given the * accumulated string lengths. */ -#define BM_SIZE(StrLen) /* StrLen: length of searchstring */ \ -((MYALIGN(sizeof(Sint) * (StrLen))) + /* goodshift array */ \ - MYALIGN(StrLen) + /* searchstring saved */ \ - (MYALIGN(sizeof(BMData)))) /* Structure */ +#define BM_SIZE_SINGLE() /* Single byte search string */ \ +(MYALIGN(1) + /* searchstring saved */ \ + (MYALIGN(sizeof(BMData)))) /* Structure */ + +#define BM_SIZE_MULTI(StrLen) /* StrLen: length of searchstring */ \ +((MYALIGN(sizeof(Uint) * (StrLen))) + /* goodshift array */ \ + (MYALIGN(sizeof(Uint) * ALPHABET_SIZE)) + /* badshift array */ \ + MYALIGN(StrLen) + /* searchstring saved */ \ + (MYALIGN(sizeof(BMData)))) /* Structure */ #define AC_SIZE(StrLens) /* StrLens: sum of all searchstring lengths */ \ ((MYALIGN(sizeof(ACNode)) * \ ((StrLens)+1)) + /* The actual nodes (including rootnode) */ \ MYALIGN(sizeof(ACTrie))) /* Structure */ +/* + * Boyer Moore - most obviously implemented more or less exactly as + * Christian Charras and Thierry Lecroq describe it in "Handbook of + * Exact String-Matching Algorithms" + * http://www-igm.univ-mlv.fr/~lecroq/string/ + */ + +/* + * Call this to compute badshifts array + */ +static void compute_badshifts(BMData *bmd) +{ + Sint i; + Sint m = bmd->len; + + for (i = 0; i < ALPHABET_SIZE; ++i) { + bmd->badshift[i] = m; + } + for (i = 0; i < m - 1; ++i) { + bmd->badshift[bmd->x[i]] = m - i - 1; + } +} + +/* Helper for "compute_goodshifts" */ +static void compute_suffixes(byte *x, Sint m, Sint *suffixes) +{ + int f,g,i; + + suffixes[m - 1] = m; + + f = 0; /* To avoid use before set warning */ + + g = m - 1; + + for (i = m - 2; i >= 0; --i) { + if (i > g && suffixes[i + m - 1 - f] < i - g) { + suffixes[i] = suffixes[i + m - 1 - f]; + } else { + if (i < g) { + g = i; + } + f = i; + while ( g >= 0 && x[g] == x[g + m - 1 - f] ) { + --g; + } + suffixes[i] = f - g; + } + } +} + +/* + * Call this to compute goodshift array + */ +static void compute_goodshifts(BMData *bmd) +{ + Sint m = bmd->len; + byte *x = bmd->x; + Sint i, j; + Sint *suffixes = erts_alloc(ERTS_ALC_T_TMP, m * sizeof(Sint)); + + compute_suffixes(x, m, suffixes); + + for (i = 0; i < m; ++i) { + bmd->goodshift[i] = m; + } + + j = 0; + + for (i = m - 1; i >= -1; --i) { + if (i == -1 || suffixes[i] == i + 1) { + while (j < m - 1 - i) { + if (bmd->goodshift[j] == m) { + bmd->goodshift[j] = m - 1 - i; + } + ++j; + } + } + } + for (i = 0; i <= m - 2; ++i) { + bmd->goodshift[m - 1 - suffixes[i]] = m - 1 - i; + } + erts_free(ERTS_ALC_T_TMP, suffixes); +} /* * Callback for the magic binary @@ -377,20 +465,37 @@ static ACTrie *create_acdata(MyAllocator *my, Uint len, /* * The same initialization of allocator and basic data for Boyer-Moore. + * For single byte, we don't use goodshift and badshift, only memchr. */ static BMData *create_bmdata(MyAllocator *my, byte *x, Uint len, Binary **the_bin /* out */) { - Uint datasize = BM_SIZE(len); + Uint datasize; BMData *bmd; - Binary *mb = erts_create_magic_binary(datasize,cleanup_my_data_bm); - byte *data = ERTS_MAGIC_BIN_DATA(mb); + Binary *mb; + byte *data; + + if(len > 1) { + datasize = BM_SIZE_MULTI(len); + } else { + datasize = BM_SIZE_SINGLE(); + } + + mb = erts_create_magic_binary(datasize,cleanup_my_data_bm); + data = ERTS_MAGIC_BIN_DATA(mb); init_my_allocator(my, datasize, data); bmd = my_alloc(my, sizeof(BMData)); bmd->x = my_alloc(my,len); sys_memcpy(bmd->x,x,len); bmd->len = len; - bmd->goodshift = my_alloc(my,sizeof(Uint) * len); + + if(len > 1) { + bmd->goodshift = my_alloc(my, sizeof(Uint) * len); + bmd->badshift = my_alloc(my, sizeof(Uint) * ALPHABET_SIZE); + compute_badshifts(bmd); + compute_goodshifts(bmd); + } + *the_bin = mb; return bmd; } @@ -711,91 +816,8 @@ static BFReturn ac_find_all_non_overlapping(BinaryFindContext *ctx, byte *haysta return (m == 0) ? BF_NOT_FOUND : BF_OK; } -/* - * Boyer Moore - most obviously implemented more or less exactly as - * Christian Charras and Thierry Lecroq describe it in "Handbook of - * Exact String-Matching Algorithms" - * http://www-igm.univ-mlv.fr/~lecroq/string/ - */ - -/* - * Call this to compute badshifts array - */ -static void compute_badshifts(BMData *bmd) -{ - Sint i; - Sint m = bmd->len; - - for (i = 0; i < ALPHABET_SIZE; ++i) { - bmd->badshift[i] = m; - } - for (i = 0; i < m - 1; ++i) { - bmd->badshift[bmd->x[i]] = m - i - 1; - } -} - -/* Helper for "compute_goodshifts" */ -static void compute_suffixes(byte *x, Sint m, Sint *suffixes) -{ - int f,g,i; - - suffixes[m - 1] = m; - - f = 0; /* To avoid use before set warning */ - - g = m - 1; - - for (i = m - 2; i >= 0; --i) { - if (i > g && suffixes[i + m - 1 - f] < i - g) { - suffixes[i] = suffixes[i + m - 1 - f]; - } else { - if (i < g) { - g = i; - } - f = i; - while ( g >= 0 && x[g] == x[g + m - 1 - f] ) { - --g; - } - suffixes[i] = f - g; - } - } -} - -/* - * Call this to compute goodshift array - */ -static void compute_goodshifts(BMData *bmd) -{ - Sint m = bmd->len; - byte *x = bmd->x; - Sint i, j; - Sint *suffixes = erts_alloc(ERTS_ALC_T_TMP, m * sizeof(Sint)); - - compute_suffixes(x, m, suffixes); - - for (i = 0; i < m; ++i) { - bmd->goodshift[i] = m; - } - - j = 0; - - for (i = m - 1; i >= -1; --i) { - if (i == -1 || suffixes[i] == i + 1) { - while (j < m - 1 - i) { - if (bmd->goodshift[j] == m) { - bmd->goodshift[j] = m - 1 - i; - } - ++j; - } - } - } - for (i = 0; i <= m - 2; ++i) { - bmd->goodshift[m - 1 - suffixes[i]] = m - 1 - i; - } - erts_free(ERTS_ALC_T_TMP, suffixes); -} - #define BM_LOOP_FACTOR 10 /* Should we have a higher value? */ +#define MC_LOOP_FACTOR 8 static void bm_init_find_first_match(BinaryFindContext *ctx) { @@ -819,13 +841,38 @@ static BFReturn bm_find_first_match(BinaryFindContext *ctx, byte *haystack) Sint i; Sint j = state->pos; register Uint reds = *reductions; + byte *pos_pointer; + Sint needle_last = blen - 1; + Sint mem_read = len - needle_last - j; - while (j <= len - blen) { + if (mem_read <= 0) { + return BF_NOT_FOUND; + } + mem_read = MIN(mem_read, reds * MC_LOOP_FACTOR); + ASSERT(mem_read > 0); + + pos_pointer = memchr(&haystack[j + needle_last], needle[needle_last], mem_read); + if (pos_pointer == NULL) { + reds -= mem_read / MC_LOOP_FACTOR; + j += mem_read; + } else { + reds -= (pos_pointer - &haystack[j]) / MC_LOOP_FACTOR; + j = pos_pointer - haystack - needle_last; + } + + // Ensure we have at least one reduction before entering the loop + ++reds; + + for(;;) { + if (j > len - blen) { + *reductions = reds; + return BF_NOT_FOUND; + } if (--reds == 0) { state->pos = j; return BF_RESTART; } - for (i = blen - 1; i >= 0 && needle[i] == haystack[i + j]; --i) + for (i = needle_last; i >= 0 && needle[i] == haystack[i + j]; --i) ; if (i < 0) { /* found */ *reductions = reds; @@ -835,8 +882,6 @@ static BFReturn bm_find_first_match(BinaryFindContext *ctx, byte *haystack) } j += MAX(gs[i],bs[haystack[i+j]] - blen + 1 + i); } - *reductions = reds; - return BF_NOT_FOUND; } static void bm_init_find_all(BinaryFindContext *ctx) @@ -875,14 +920,38 @@ static BFReturn bm_find_all_non_overlapping(BinaryFindContext *ctx, byte *haysta Sint *gs = bmd->goodshift; Sint *bs = bmd->badshift; byte *needle = bmd->x; - Sint i; + Sint i = -1; /* Use memchr on start and on every match */ Sint j = state->pos; Uint m = state->m; Uint allocated = state->allocated; FindallData *out = state->out; register Uint reds = *reductions; + byte *pos_pointer; + Sint needle_last = blen - 1; + Sint mem_read; - while (j <= len - blen) { + for(;;) { + if (i < 0) { + mem_read = len - needle_last - j; + if(mem_read <= 0) { + goto done; + } + mem_read = MIN(mem_read, reds * MC_LOOP_FACTOR); + ASSERT(mem_read > 0); + pos_pointer = memchr(&haystack[j + needle_last], needle[needle_last], mem_read); + if (pos_pointer == NULL) { + reds -= mem_read / MC_LOOP_FACTOR; + j += mem_read; + } else { + reds -= (pos_pointer - &haystack[j]) / MC_LOOP_FACTOR; + j = pos_pointer - haystack - needle_last; + } + // Ensure we have at least one reduction when resuming the loop + ++reds; + } + if (j > len - blen) { + goto done; + } if (--reds == 0) { state->pos = j; state->m = m; @@ -890,7 +959,7 @@ static BFReturn bm_find_all_non_overlapping(BinaryFindContext *ctx, byte *haysta state->out = out; return BF_RESTART; } - for (i = blen - 1; i >= 0 && needle[i] == haystack[i + j]; --i) + for (i = needle_last; i >= 0 && needle[i] == haystack[i + j]; --i) ; if (i < 0) { /* found */ if (m >= allocated) { @@ -912,6 +981,7 @@ static BFReturn bm_find_all_non_overlapping(BinaryFindContext *ctx, byte *haysta j += MAX(gs[i],bs[haystack[i+j]] - blen + 1 + i); } } + done: state->m = m; state->out = out; *reductions = reds; @@ -931,6 +1001,7 @@ static int do_binary_match_compile(Eterm argument, Eterm *tag, Binary **binp) Eterm t, b, comp_term = NIL; Uint characters; Uint words; + Uint size; characters = 0; words = 0; @@ -946,11 +1017,12 @@ static int do_binary_match_compile(Eterm argument, Eterm *tag, Binary **binp) if (binary_bitsize(b) != 0) { goto badarg; } - if (binary_size(b) == 0) { + size = binary_size(b); + if (size == 0) { goto badarg; } ++words; - characters += binary_size(b); + characters += size; } if (is_not_nil(t)) { goto badarg; @@ -987,8 +1059,6 @@ static int do_binary_match_compile(Eterm argument, Eterm *tag, Binary **binp) bytes = erts_get_aligned_binary_bytes(comp_term, &temp_alloc); } bmd = create_bmdata(&my, bytes, characters, &bin); - compute_badshifts(bmd); - compute_goodshifts(bmd); erts_free_aligned_binary_bytes(temp_alloc); CHECK_ALLOCATOR(my); *tag = am_bm; @@ -3012,17 +3082,19 @@ static void dump_bm_data(BMData *bm) } } erts_printf(">>\n"); - erts_printf("GoodShift array:\n"); - for (i = 0; i < bm->len; ++i) { - erts_printf("GoodShift[%d]: %ld\n", i, bm->goodshift[i]); - } - erts_printf("BadShift array:\n"); - j = 0; - for (i = 0; i < ALPHABET_SIZE; i += j) { - for (j = 0; i + j < ALPHABET_SIZE && j < 6; ++j) { - erts_printf("BS[%03d]:%02ld, ", i+j, bm->badshift[i+j]); + if(bm->len > 1) { + erts_printf("GoodShift array:\n"); + for (i = 0; i < bm->len; ++i) { + erts_printf("GoodShift[%d]: %ld\n", i, bm->goodshift[i]); + } + erts_printf("BadShift array:\n"); + j = 0; + for (i = 0; i < ALPHABET_SIZE; i += j) { + for (j = 0; i + j < ALPHABET_SIZE && j < 6; ++j) { + erts_printf("BS[%03d]:%02ld, ", i+j, bm->badshift[i+j]); + } + erts_printf("\n"); } - erts_printf("\n"); } } diff --git a/erts/emulator/beam/erl_bif_ddll.c b/erts/emulator/beam/erl_bif_ddll.c index 4cda0948a0..639aee29dc 100644 --- a/erts/emulator/beam/erl_bif_ddll.c +++ b/erts/emulator/beam/erl_bif_ddll.c @@ -829,7 +829,7 @@ BIF_RETTYPE erl_ddll_format_error_int_1(BIF_ALIST_1) "cannot be loaded/unloaded"; break; case am_permanent: - errstring = "DDLL driver is permanent an can not be unloaded/loaded"; + errstring = "DDLL driver is permanent an cannot be unloaded/loaded"; break; case am_not_loaded: errstring = "DDLL driver is not loaded"; diff --git a/erts/emulator/beam/erl_bif_info.c b/erts/emulator/beam/erl_bif_info.c index 7fada0d548..2a8e7e8858 100644 --- a/erts/emulator/beam/erl_bif_info.c +++ b/erts/emulator/beam/erl_bif_info.c @@ -2705,9 +2705,7 @@ BIF_RETTYPE system_info_1(BIF_ALIST_1) goto bld_instruction_counts; } -#ifdef DEBUG ASSERT(endp == hp); -#endif BIF_RET(res); #endif /* #ifndef ERTS_OPCODE_COUNTER_SUPPORT */ diff --git a/erts/emulator/beam/erl_bif_re.c b/erts/emulator/beam/erl_bif_re.c index bbc64eb9aa..e0b9202fe7 100644 --- a/erts/emulator/beam/erl_bif_re.c +++ b/erts/emulator/beam/erl_bif_re.c @@ -532,10 +532,7 @@ re_compile(Process* p, Eterm arg1, Eterm arg2) int options = 0; int pflags = 0; int unicode = 0; -#ifdef DEBUG int buffres; -#endif - if (parse_options(arg2,&options,NULL,&pflags,NULL,NULL,NULL,NULL) < 0) { @@ -556,12 +553,8 @@ re_compile(Process* p, Eterm arg1, Eterm arg2) BIF_ERROR(p,BADARG); } expr = erts_alloc(ERTS_ALC_T_RE_TMP_BUF, slen + 1); -#ifdef DEBUG - buffres = -#endif - erts_iolist_to_buf(arg1, expr, slen); - - ASSERT(buffres >= 0); + buffres = erts_iolist_to_buf(arg1, expr, slen); + ASSERT(buffres >= 0); (void)buffres; expr[slen]='\0'; result = erts_pcre_compile2(expr, options, &errcode, @@ -1052,9 +1045,7 @@ build_capture(Eterm capture_spec[CAPSPEC_SIZE], const pcre *code) tmpb[ap->len] = '\0'; } else { ErlDrvSizeT slen; -#ifdef DEBUG int buffres; -#endif if (erts_iolist_size(val, &slen)) { goto error; @@ -1068,11 +1059,8 @@ build_capture(Eterm capture_spec[CAPSPEC_SIZE], const pcre *code) } } -#ifdef DEBUG - buffres = -#endif - erts_iolist_to_buf(val, tmpb, slen); - ASSERT(buffres >= 0); + buffres = erts_iolist_to_buf(val, tmpb, slen); + ASSERT(buffres >= 0); (void)buffres; tmpb[slen] = '\0'; } build_one_capture(code,&ri,&sallocated,has_dupnames,tmpb); @@ -1145,9 +1133,7 @@ re_run(Process *p, Eterm arg1, Eterm arg2, Eterm arg3) const char *errstr = ""; int errofset = 0; int capture_count; -#ifdef DEBUG int buffres; -#endif if (pflags & PARSE_FLAG_UNICODE && (!is_binary(arg2) || !is_binary(arg1) || @@ -1161,12 +1147,8 @@ re_run(Process *p, Eterm arg1, Eterm arg2, Eterm arg3) expr = erts_alloc(ERTS_ALC_T_RE_TMP_BUF, slen + 1); -#ifdef DEBUG - buffres = -#endif - erts_iolist_to_buf(arg2, expr, slen); - - ASSERT(buffres >= 0); + buffres = erts_iolist_to_buf(arg2, expr, slen); + ASSERT(buffres >= 0); (void)buffres; expr[slen]='\0'; result = erts_pcre_compile2(expr, comp_options, &errcode, @@ -1317,9 +1299,7 @@ re_run(Process *p, Eterm arg1, Eterm arg2, Eterm arg3) restart.subject = (char *) (pb->bytes+offset); restart.flags |= RESTART_FLAG_SUBJECT_IN_BINARY; } else { -#ifdef DEBUG int buffres; -#endif handle_iolist: if (erts_iolist_size(arg1, &slength)) { erts_free(ERTS_ALC_T_RE_SUBJECT, restart.ovector); @@ -1331,11 +1311,8 @@ handle_iolist: } restart.subject = erts_alloc(ERTS_ALC_T_RE_SUBJECT, slength); -#ifdef DEBUG - buffres = -#endif - erts_iolist_to_buf(arg1, restart.subject, slength); - ASSERT(buffres >= 0); + buffres = erts_iolist_to_buf(arg1, restart.subject, slength); + ASSERT(buffres >= 0); (void)buffres; } if (pflags & PARSE_FLAG_REPORT_ERRORS) { @@ -1457,10 +1434,7 @@ re_inspect_2(BIF_ALIST_2) Eterm res; const pcre *code; byte *temp_alloc = NULL; -#ifdef DEBUG - int infores; -#endif - + int infores; if (is_not_tuple(BIF_ARG_1) || (arityval(*tuple_val(BIF_ARG_1)) != 5)) { goto error; @@ -1484,12 +1458,8 @@ re_inspect_2(BIF_ALIST_2) if (erts_pcre_fullinfo(code, NULL, PCRE_INFO_OPTIONS, &options) != 0) goto error; -#ifdef DEBUG - infores = -#endif - erts_pcre_fullinfo(code, NULL, PCRE_INFO_NAMECOUNT, &top); - - ASSERT(infores == 0); + infores = erts_pcre_fullinfo(code, NULL, PCRE_INFO_NAMECOUNT, &top); + ASSERT(infores == 0); (void)infores; if (top <= 0) { hp = HAlloc(BIF_P, 3); @@ -1497,18 +1467,10 @@ re_inspect_2(BIF_ALIST_2) erts_free_aligned_binary_bytes(temp_alloc); BIF_RET(res); } -#ifdef DEBUG - infores = -#endif - erts_pcre_fullinfo(code, NULL, PCRE_INFO_NAMEENTRYSIZE, &entrysize); - + infores = erts_pcre_fullinfo(code, NULL, PCRE_INFO_NAMEENTRYSIZE, &entrysize); ASSERT(infores == 0); -#ifdef DEBUG - infores = -#endif - erts_pcre_fullinfo(code, NULL, PCRE_INFO_NAMETABLE, &nametable); - + infores = erts_pcre_fullinfo(code, NULL, PCRE_INFO_NAMETABLE, &nametable); ASSERT(infores == 0); has_dupnames = ((options & PCRE_DUPNAMES) != 0); diff --git a/erts/emulator/beam/erl_bits.c b/erts/emulator/beam/erl_bits.c index 3a16913473..e82c776e70 100644 --- a/erts/emulator/beam/erl_bits.c +++ b/erts/emulator/beam/erl_bits.c @@ -144,6 +144,42 @@ erts_bs_start_match_2(Process *p, Eterm Binary, Uint Max) return make_matchstate(ms); } +ErlBinMatchState *erts_bs_start_match_3(Process *p, Eterm Binary) +{ + Eterm Orig; + Uint offs; + Uint* hp; + Uint NeededSize; + ErlBinMatchState *ms; + Uint bitoffs; + Uint bitsize; + Uint total_bin_size; + ProcBin* pb; + + ASSERT(is_binary(Binary)); + total_bin_size = binary_size(Binary); + if ((total_bin_size >> (8*sizeof(Uint)-3)) != 0) { + return NULL; + } + + NeededSize = ERL_BIN_MATCHSTATE_SIZE(0); + hp = HeapOnlyAlloc(p, NeededSize); + ms = (ErlBinMatchState *) hp; + ERTS_GET_REAL_BIN(Binary, Orig, offs, bitoffs, bitsize); + pb = (ProcBin *) boxed_val(Orig); + if (pb->thing_word == HEADER_PROC_BIN && pb->flags != 0) { + erts_emasculate_writable_binary(pb); + } + + ms->thing_word = HEADER_BIN_MATCHSTATE(0); + (ms->mb).orig = Orig; + (ms->mb).base = binary_bytes(Orig); + (ms->mb).offset = 8 * offs + bitoffs; + (ms->mb).size = total_bin_size * 8 + (ms->mb).offset + bitsize; + + return ms; +} + #ifdef DEBUG # define CHECK_MATCH_BUFFER(MB) check_match_buffer(MB) diff --git a/erts/emulator/beam/erl_bits.h b/erts/emulator/beam/erl_bits.h index 7beef5cfda..50d353e1fa 100644 --- a/erts/emulator/beam/erl_bits.h +++ b/erts/emulator/beam/erl_bits.h @@ -73,12 +73,16 @@ struct erl_bits_state { typedef struct erl_bin_match_struct{ Eterm thing_word; ErlBinMatchBuffer mb; /* Present match buffer */ - Eterm save_offset[1]; /* Saved offsets */ + Eterm save_offset[1]; /* Saved offsets, only valid for contexts + * created through bs_start_match2. */ } ErlBinMatchState; -#define ERL_BIN_MATCHSTATE_SIZE(_Max) ((sizeof(ErlBinMatchState) + (_Max)*sizeof(Eterm))/sizeof(Eterm)) -#define HEADER_BIN_MATCHSTATE(_Max) _make_header(ERL_BIN_MATCHSTATE_SIZE((_Max))-1, _TAG_HEADER_BIN_MATCHSTATE) -#define HEADER_NUM_SLOTS(hdr) (header_arity(hdr)-sizeof(ErlBinMatchState)/sizeof(Eterm)+1) +#define ERL_BIN_MATCHSTATE_SIZE(_Max) \ + ((offsetof(ErlBinMatchState, save_offset) + (_Max)*sizeof(Eterm))/sizeof(Eterm)) +#define HEADER_BIN_MATCHSTATE(_Max) \ + _make_header(ERL_BIN_MATCHSTATE_SIZE((_Max)) - 1, _TAG_HEADER_BIN_MATCHSTATE) +#define HEADER_NUM_SLOTS(hdr) \ + (header_arity(hdr) - (offsetof(ErlBinMatchState, save_offset) / sizeof(Eterm)) + 1) #define make_matchstate(_Ms) make_boxed((Eterm*)(_Ms)) #define ms_matchbuffer(_Ms) &(((ErlBinMatchState*) boxed_val(_Ms))->mb) @@ -144,6 +148,7 @@ void erts_bits_destroy_state(ERL_BITS_PROTO_0); */ Eterm erts_bs_start_match_2(Process *p, Eterm Bin, Uint Max); +ErlBinMatchState *erts_bs_start_match_3(Process *p, Eterm Bin); Eterm erts_bs_get_integer_2(Process *p, Uint num_bits, unsigned flags, ErlBinMatchBuffer* mb); Eterm erts_bs_get_binary_2(Process *p, Uint num_bits, unsigned flags, ErlBinMatchBuffer* mb); Eterm erts_bs_get_float_2(Process *p, Uint num_bits, unsigned flags, ErlBinMatchBuffer* mb); diff --git a/erts/emulator/beam/erl_db.c b/erts/emulator/beam/erl_db.c index c009a3bde8..3653c0bf7c 100644 --- a/erts/emulator/beam/erl_db.c +++ b/erts/emulator/beam/erl_db.c @@ -90,7 +90,8 @@ enum DbIterSafety { ITER_SAFE /* No need to fixate at all */ }; # define ITERATION_SAFETY(Proc,Tab) \ - ((IS_TREE_TABLE((Tab)->common.status) || ONLY_WRITER(Proc,Tab)) ? ITER_SAFE \ + ((IS_TREE_TABLE((Tab)->common.status) || IS_CATREE_TABLE((Tab)->common.status) \ + || ONLY_WRITER(Proc,Tab)) ? ITER_SAFE \ : (((Tab)->common.status & DB_FINE_LOCKED) ? ITER_UNSAFE : ITER_SAFE_LOCKED)) #define DID_TRAP(P,Ret) (!is_value(Ret) && ((P)->freason == TRAP)) @@ -359,6 +360,7 @@ typedef enum { extern DbTableMethod db_hash; extern DbTableMethod db_tree; +extern DbTableMethod db_catree; int user_requested_db_max_tabs; int erts_ets_realloc_always_moves; @@ -407,21 +409,17 @@ static void free_dbtable(void *vtb) { DbTable *tb = (DbTable *) vtb; -#ifdef HARDDEBUG - if (erts_atomic_read_nob(&tb->common.memory_size) != sizeof(DbTable)) { - erts_fprintf(stderr, "ets: free_dbtable memory remain=%ld fix=%x\n", - erts_atomic_read_nob(&tb->common.memory_size)-sizeof(DbTable), - tb->common.fixations); - } -#endif - erts_rwmtx_destroy(&tb->common.rwlock); - erts_mtx_destroy(&tb->common.fixlock); - ASSERT(is_immed(tb->common.heir_data)); - if (tb->common.btid) - erts_bin_release(tb->common.btid); + ASSERT(erts_atomic_read_nob(&tb->common.memory_size) == sizeof(DbTable)); + + erts_rwmtx_destroy(&tb->common.rwlock); + erts_mtx_destroy(&tb->common.fixlock); + ASSERT(is_immed(tb->common.heir_data)); - erts_db_free(ERTS_ALC_T_DB_TABLE, tb, (void *) tb, sizeof(DbTable)); + if (tb->common.btid) + erts_bin_release(tb->common.btid); + + erts_db_free(ERTS_ALC_T_DB_TABLE, tb, (void *) tb, sizeof(DbTable)); } static void schedule_free_dbtable(DbTable* tb) @@ -1076,7 +1074,7 @@ BIF_RETTYPE ets_update_element_3(BIF_ALIST_3) DB_BIF_GET_TABLE(tb, DB_WRITE, LCK_WRITE_REC, BIF_ets_update_element_3); UseTmpHeap(2,BIF_P); - if (!(tb->common.status & (DB_SET | DB_ORDERED_SET))) { + if (!(tb->common.status & (DB_SET | DB_ORDERED_SET | DB_CA_ORDERED_SET))) { goto bail_out; } if (is_tuple(BIF_ARG_3)) { @@ -1165,7 +1163,7 @@ do_update_counter(Process *p, DbTable* tb, UseTmpHeap(5, p); - if (!(tb->common.status & (DB_SET | DB_ORDERED_SET))) { + if (!(tb->common.status & (DB_SET | DB_ORDERED_SET | DB_CA_ORDERED_SET))) { goto bail_out; } if (is_integer(arg3)) { /* Incr */ @@ -1647,15 +1645,15 @@ BIF_RETTYPE ets_new_2(BIF_ALIST_2) val = CAR(list_val(list)); if (val == am_bag) { status |= DB_BAG; - status &= ~(DB_SET | DB_DUPLICATE_BAG | DB_ORDERED_SET); + status &= ~(DB_SET | DB_DUPLICATE_BAG | DB_ORDERED_SET | DB_CA_ORDERED_SET); } else if (val == am_duplicate_bag) { status |= DB_DUPLICATE_BAG; - status &= ~(DB_SET | DB_BAG | DB_ORDERED_SET); + status &= ~(DB_SET | DB_BAG | DB_ORDERED_SET | DB_CA_ORDERED_SET); } else if (val == am_ordered_set) { status |= DB_ORDERED_SET; - status &= ~(DB_SET | DB_BAG | DB_DUPLICATE_BAG); + status &= ~(DB_SET | DB_BAG | DB_DUPLICATE_BAG | DB_CA_ORDERED_SET); } else if (is_tuple(val)) { Eterm *tp = tuple_val(val); @@ -1716,7 +1714,13 @@ BIF_RETTYPE ets_new_2(BIF_ALIST_2) if (is_not_nil(list)) { /* bad opt or not a well formed list */ BIF_ERROR(BIF_P, BADARG); } - if (IS_HASH_TABLE(status)) { + if (IS_TREE_TABLE(status) && is_fine_locked && !(status & DB_PRIVATE)) { + meth = &db_catree; + status |= DB_CA_ORDERED_SET; + status &= ~(DB_SET | DB_BAG | DB_DUPLICATE_BAG | DB_ORDERED_SET); + status |= DB_FINE_LOCKED; + } + else if (IS_HASH_TABLE(status)) { meth = &db_hash; if (is_fine_locked && !(status & DB_PRIVATE)) { status |= DB_FINE_LOCKED; @@ -3506,6 +3510,7 @@ void init_db(ErtsDbSpinCount db_spin_count) db_initialize_hash(); db_initialize_tree(); + db_initialize_catree(); /* Non visual BIF to trap to. */ erts_init_trap_export(&ets_select_delete_continue_exp, @@ -4114,6 +4119,8 @@ static Eterm table_info(Process* p, DbTable* tb, Eterm What) ret = am_duplicate_bag; } else if (tb->common.status & DB_ORDERED_SET) { ret = am_ordered_set; + } else if (tb->common.status & DB_CA_ORDERED_SET) { + ret = am_ordered_set; } else { /*TT*/ ASSERT(tb->common.status & DB_BAG); ret = am_bag; @@ -4409,6 +4416,12 @@ void erts_lcnt_enable_db_lock_count(DbTable *tb, int enable) { if(IS_HASH_TABLE(tb->common.status)) { erts_lcnt_enable_db_hash_lock_count(&tb->hash, enable); + } else if(IS_CATREE_TABLE(tb->common.status)) { + /* erts_lcnt_enable_db_catree_lock_count is not thread safe so + the table needs to get locked */ + db_lock(tb, LCK_WRITE); + erts_lcnt_enable_db_catree_lock_count(&tb->catree, enable); + db_unlock(tb, LCK_WRITE); } } diff --git a/erts/emulator/beam/erl_db.h b/erts/emulator/beam/erl_db.h index 23975d208f..7a915ccea2 100644 --- a/erts/emulator/beam/erl_db.h +++ b/erts/emulator/beam/erl_db.h @@ -66,6 +66,7 @@ typedef struct { #include "erl_db_util.h" /* Flags */ #include "erl_db_hash.h" /* DbTableHash */ #include "erl_db_tree.h" /* DbTableTree */ +#include "erl_db_catree.h" /* DbTableCATree */ /*TT*/ Uint erts_get_ets_misc_mem_size(void); @@ -90,6 +91,7 @@ union db_table { DbTableCommon common; /* Any type of db table */ DbTableHash hash; /* Linear hash array specific data */ DbTableTree tree; /* AVL tree specific data */ + DbTableCATree catree; /* CA tree specific data */ DbTableRelease release; /*TT*/ }; @@ -284,6 +286,12 @@ ERTS_GLB_INLINE void erts_db_free(ErtsAlcType_t type, void *ptr, Uint size); +ERTS_GLB_INLINE void erts_schedule_db_free(DbTableCommon* tab, + void (*free_func)(void *), + void *ptr, + ErtsThrPrgrLaterOp *lop, + Uint size); + ERTS_GLB_INLINE void erts_db_free_nt(ErtsAlcType_t type, void *ptr, Uint size); @@ -304,6 +312,26 @@ erts_db_free(ErtsAlcType_t type, DbTable *tab, void *ptr, Uint size) } ERTS_GLB_INLINE void +erts_schedule_db_free(DbTableCommon* tab, + void (*free_func)(void *), + void *ptr, + ErtsThrPrgrLaterOp *lop, + Uint size) +{ + ASSERT(ptr != 0); + ASSERT(((void *) tab) != ptr); + ASSERT(size == ERTS_ALC_DBG_BLK_SZ(ptr)); + + /* + * We update table memory stats here as table may already be gone + * when 'free_func' is finally called. + */ + ERTS_DB_ALC_MEM_UPDATE_((DbTable*)tab, size, 0); + + erts_schedule_thr_prgr_later_cleanup_op(free_func, ptr, lop, size); +} + +ERTS_GLB_INLINE void erts_db_free_nt(ErtsAlcType_t type, void *ptr, Uint size) { ASSERT(ptr != 0); diff --git a/erts/emulator/beam/erl_db_catree.c b/erts/emulator/beam/erl_db_catree.c new file mode 100644 index 0000000000..a8e48bce1b --- /dev/null +++ b/erts/emulator/beam/erl_db_catree.c @@ -0,0 +1,2173 @@ +/* + * %CopyrightBegin% + * + * Copyright Ericsson AB and Kjell Winblad 1998-2018. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * %CopyrightEnd% + */ + +/* + * Description: Implementation of ETS ordered_set table type with + * fine-grained synchronization. + * + * Author: Kjell Winblad + * + * This implementation is based on the contention adapting search tree + * (CA tree). The CA tree is a concurrent data structure that + * dynamically adapts its synchronization granularity based on how + * much contention is detected in locks. The following publication + * contains a detailed description of CA trees: + * + * A Contention Adapting Approach to Concurrent Ordered Sets + * Journal of Parallel and Distributed Computing, 2018 + * Kjell Winblad and Konstantinos Sagonas + * https://doi.org/10.1016/j.jpdc.2017.11.007 + * + * The following publication may also be interesting as it discusses + * how the CA tree can be used as an ETS ordered_set table type + * backend: + * + * More Scalable Ordered Set for ETS Using Adaptation + * In Thirteenth ACM SIGPLAN workshop on Erlang (2014) + * Kjell Winblad and Konstantinos Sagonas + * https://doi.org/10.1145/2633448.2633455 + * + * This implementation of the ordered_set ETS table type is only + * activated when the options {write_concurrency, true}, public and + * ordered_set are passed to the ets:new/2 function. This + * implementation is expected to scale better than the default + * implementation (located in "erl_db_tree.c") when concurrent + * processes use the following ETS operations to operate on a table: + * + * delete/2, delete_object/2, first/1, insert/2 (single object), + * insert_new/2 (single object), lookup/2, lookup_element/2, member/2, + * next/2, take/2 and update_element/3 (single object). + * + * Currently, the implementation does not have scalable support for + * the other operations (e.g., select/2). These operations are handled + * by merging all locks so that all terms get protected by a single + * lock. This implementation may thus perform worse than the default + * implementation in some scenarios. For example, when concurrent + * processes access a table with the operations insert/2, delete/2 and + * select/2, the insert/2 and delete/2 operations will trigger splits + * of locks (to get more fine-grained synchronization) but this will + * quickly be undone by the select/2 operation if this operation is + * also called frequently. + * + * The default implementation has a static stack optimization (see + * get_static_stack in erl_db_tree.c). This implementation does not + * have such an optimization as it induces bad scalability when + * concurrent read operations are frequent (they all try to get hold + * of the same stack). The default implementation may thus perform + * better compared to this implementation in scenarios where the + * static stack optimization is useful. One such scenario is when only + * one process is accessing the table and this process is traversing + * the table with a sequence of next/2 calls. + */ + +#ifdef HAVE_CONFIG_H +# include "config.h" +#endif + +#include "sys.h" +#include "erl_vm.h" +#include "global.h" +#include "erl_process.h" +#include "error.h" +#define ERTS_WANT_DB_INTERNAL__ +#include "erl_db.h" +#include "bif.h" +#include "big.h" +#include "erl_binary.h" + +#include "erl_db_catree.h" +#include "erl_db_tree.h" +#include "erl_db_tree_util.h" + +/* +** Forward declarations +*/ + +static SWord do_free_base_node_cont(DbTableCATree *tb, SWord num_left); +static SWord do_free_routing_nodes_catree_cont(DbTableCATree *tb, SWord num_left); +static DbTableCATreeNode *catree_first_base_node_from_free_list(DbTableCATree *tb); + +/* Method interface functions */ +static int db_first_catree(Process *p, DbTable *tbl, + Eterm *ret); +static int db_next_catree(Process *p, DbTable *tbl, + Eterm key, Eterm *ret); +static int db_last_catree(Process *p, DbTable *tbl, + Eterm *ret); +static int db_prev_catree(Process *p, DbTable *tbl, + Eterm key, + Eterm *ret); +static int db_put_catree(DbTable *tbl, Eterm obj, int key_clash_fail); +static int db_get_catree(Process *p, DbTable *tbl, + Eterm key, Eterm *ret); +static int db_member_catree(DbTable *tbl, Eterm key, Eterm *ret); +static int db_get_element_catree(Process *p, DbTable *tbl, + Eterm key,int ndex, + Eterm *ret); +static int db_erase_catree(DbTable *tbl, Eterm key, Eterm *ret); +static int db_erase_object_catree(DbTable *tbl, Eterm object,Eterm *ret); +static int db_slot_catree(Process *p, DbTable *tbl, + Eterm slot_term, Eterm *ret); +static int db_select_catree(Process *p, DbTable *tbl, Eterm tid, + Eterm pattern, int reversed, Eterm *ret); +static int db_select_count_catree(Process *p, DbTable *tbl, Eterm tid, + Eterm pattern, Eterm *ret); +static int db_select_chunk_catree(Process *p, DbTable *tbl, Eterm tid, + Eterm pattern, Sint chunk_size, + int reversed, Eterm *ret); +static int db_select_continue_catree(Process *p, DbTable *tbl, + Eterm continuation, Eterm *ret); +static int db_select_count_continue_catree(Process *p, DbTable *tbl, + Eterm continuation, Eterm *ret); +static int db_select_delete_catree(Process *p, DbTable *tbl, Eterm tid, + Eterm pattern, Eterm *ret); +static int db_select_delete_continue_catree(Process *p, DbTable *tbl, + Eterm continuation, Eterm *ret); +static int db_select_replace_catree(Process *p, DbTable *tbl, Eterm tid, + Eterm pattern, Eterm *ret); +static int db_select_replace_continue_catree(Process *p, DbTable *tbl, + Eterm continuation, Eterm *ret); +static int db_take_catree(Process *, DbTable *, Eterm, Eterm *); +static void db_print_catree(fmtfn_t to, void *to_arg, + int show, DbTable *tbl); +static int db_free_table_catree(DbTable *tbl); +static SWord db_free_table_continue_catree(DbTable *tbl, SWord); +static void db_foreach_offheap_catree(DbTable *, + void (*)(ErlOffHeap *, void *), + void *); +static SWord db_delete_all_objects_catree(Process* p, DbTable* tbl, SWord reds); +static int +db_lookup_dbterm_catree(Process *, DbTable *, Eterm key, Eterm obj, + DbUpdateHandle*); +static void db_finalize_dbterm_catree(int cret, DbUpdateHandle *); + +/* +** External interface +*/ +DbTableMethod db_catree = +{ + db_create_catree, + db_first_catree, + db_next_catree, + db_last_catree, + db_prev_catree, + db_put_catree, + db_get_catree, + db_get_element_catree, + db_member_catree, + db_erase_catree, + db_erase_object_catree, + db_slot_catree, + db_select_chunk_catree, + db_select_catree, + db_select_delete_catree, + db_select_continue_catree, + db_select_delete_continue_catree, + db_select_count_catree, + db_select_count_continue_catree, + db_select_replace_catree, + db_select_replace_continue_catree, + db_take_catree, + db_delete_all_objects_catree, + db_free_table_catree, + db_free_table_continue_catree, + db_print_catree, + db_foreach_offheap_catree, + db_lookup_dbterm_catree, + db_finalize_dbterm_catree + +}; + +/* + * Constants + */ + +#define ERL_DB_CATREE_LOCK_FAILURE_CONTRIBUTION 200 +#define ERL_DB_CATREE_LOCK_SUCCESS_CONTRIBUTION (-1) +#define ERL_DB_CATREE_LOCK_MORE_THAN_ONE_CONTRIBUTION (-10) +#define ERL_DB_CATREE_HIGH_CONTENTION_LIMIT 1000 +#define ERL_DB_CATREE_LOW_CONTENTION_LIMIT (-1000) +#define ERL_DB_CATREE_MAX_ROUTE_NODE_LAYER_HEIGHT 14 + +/* + * Internal CA tree related helper functions and macros + */ + +#define GET_ROUTE_NODE_KEY(node) (node->u.route.key.term) +#define GET_BASE_NODE_LOCK(node) (&(node->u.base.lock)) +#define GET_ROUTE_NODE_LOCK(node) (&(node->u.route.lock)) + + +/* Helpers for reading and writing shared atomic variables */ + +/* No memory barrier */ +#define GET_ROOT(tb) ((DbTableCATreeNode*)erts_atomic_read_nob(&(tb->root))) +#define GET_LEFT(ca_tree_route_node) ((DbTableCATreeNode*)erts_atomic_read_nob(&(ca_tree_route_node->u.route.left))) +#define GET_RIGHT(ca_tree_route_node) ((DbTableCATreeNode*)erts_atomic_read_nob(&(ca_tree_route_node->u.route.right))) +#define SET_ROOT(tb, v) erts_atomic_set_nob(&((tb)->root), (erts_aint_t)(v)) +#define SET_LEFT(ca_tree_route_node, v) erts_atomic_set_nob(&(ca_tree_route_node->u.route.left), (erts_aint_t)(v)); +#define SET_RIGHT(ca_tree_route_node, v) erts_atomic_set_nob(&(ca_tree_route_node->u.route.right), (erts_aint_t)(v)); + + +/* Release or acquire barriers */ +#define GET_ROOT_ACQB(tb) ((DbTableCATreeNode*)erts_atomic_read_acqb(&(tb->root))) +#define GET_LEFT_ACQB(ca_tree_route_node) ((DbTableCATreeNode*)erts_atomic_read_acqb(&(ca_tree_route_node->u.route.left))) +#define GET_RIGHT_ACQB(ca_tree_route_node) ((DbTableCATreeNode*)erts_atomic_read_acqb(&(ca_tree_route_node->u.route.right))) +#define SET_ROOT_RELB(tb, v) erts_atomic_set_relb(&((tb)->root), (erts_aint_t)(v)) +#define SET_LEFT_RELB(ca_tree_route_node, v) erts_atomic_set_relb(&(ca_tree_route_node->u.route.left), (erts_aint_t)(v)); +#define SET_RIGHT_RELB(ca_tree_route_node, v) erts_atomic_set_relb(&(ca_tree_route_node->u.route.right), (erts_aint_t)(v)); + +/* Compares a key to the key in a route node */ +static ERTS_INLINE Sint cmp_key_route(Eterm key, + DbTableCATreeNode *obj) +{ + return CMP(key, GET_ROUTE_NODE_KEY(obj)); +} + +static ERTS_INLINE void push_node_dyn_array(DbTable *tb, + CATreeNodeStack *stack, + DbTableCATreeNode *node) +{ + int i; + if (stack->pos == stack->size) { + DbTableCATreeNode **newArray = + erts_db_alloc(ERTS_ALC_T_DB_STK, tb, + sizeof(DbTableCATreeNode*) * (stack->size*2)); + for (i = 0; i < stack->pos; i++) { + newArray[i] = stack->array[i]; + } + if (stack->size > STACK_NEED) { + /* Dynamically allocated array that needs to be deallocated */ + erts_db_free(ERTS_ALC_T_DB_STK, tb, + stack->array, + sizeof(DbTableCATreeNode *) * stack->size); + } + stack->array = newArray; + stack->size = stack->size*2; + } + PUSH_NODE(stack, node); +} + +/* + * Used by the split_tree function + */ +static ERTS_INLINE +int less_than_two_elements(TreeDbTerm *root) +{ + return root == NULL || (root->left == NULL && root->right == NULL); +} + +/* + * Inserts a TreeDbTerm into a tree. Returns the new root. + */ +static ERTS_INLINE +TreeDbTerm* insert_TreeDbTerm(DbTableCATree *tb, + TreeDbTerm *insert_to_root, + TreeDbTerm *value_to_insert) { + /* Non recursive insertion in AVL tree, building our own stack */ + TreeDbTerm **tstack[STACK_NEED]; + int tpos = 0; + int dstack[STACK_NEED+1]; + int dpos = 0; + int state = 0; + TreeDbTerm * base = insert_to_root; + TreeDbTerm **this = &base; + Sint c; + Eterm key; + int dir; + TreeDbTerm *p1, *p2, *p; + + key = GETKEY(tb, value_to_insert->dbterm.tpl); + + dstack[dpos++] = DIR_END; + for (;;) + if (!*this) { /* Found our place */ + state = 1; + *this = value_to_insert; + (*this)->balance = 0; + (*this)->left = (*this)->right = NULL; + break; + } else if ((c = cmp_key(&tb->common, key, *this)) < 0) { + /* go lefts */ + dstack[dpos++] = DIR_LEFT; + tstack[tpos++] = this; + this = &((*this)->left); + } else { /* go right */ + dstack[dpos++] = DIR_RIGHT; + tstack[tpos++] = this; + this = &((*this)->right); + } + + while (state && ( dir = dstack[--dpos] ) != DIR_END) { + this = tstack[--tpos]; + p = *this; + if (dir == DIR_LEFT) { + switch (p->balance) { + case 1: + p->balance = 0; + state = 0; + break; + case 0: + p->balance = -1; + break; + case -1: /* The icky case */ + p1 = p->left; + if (p1->balance == -1) { /* Single LL rotation */ + p->left = p1->right; + p1->right = p; + p->balance = 0; + (*this) = p1; + } else { /* Double RR rotation */ + p2 = p1->right; + p1->right = p2->left; + p2->left = p1; + p->left = p2->right; + p2->right = p; + p->balance = (p2->balance == -1) ? +1 : 0; + p1->balance = (p2->balance == 1) ? -1 : 0; + (*this) = p2; + } + (*this)->balance = 0; + state = 0; + break; + } + } else { /* dir == DIR_RIGHT */ + switch (p->balance) { + case -1: + p->balance = 0; + state = 0; + break; + case 0: + p->balance = 1; + break; + case 1: + p1 = p->right; + if (p1->balance == 1) { /* Single RR rotation */ + p->right = p1->left; + p1->left = p; + p->balance = 0; + (*this) = p1; + } else { /* Double RL rotation */ + p2 = p1->left; + p1->left = p2->right; + p2->right = p1; + p->right = p2->left; + p2->left = p; + p->balance = (p2->balance == 1) ? -1 : 0; + p1->balance = (p2->balance == -1) ? 1 : 0; + (*this) = p2; + } + (*this)->balance = 0; + state = 0; + break; + } + } + } + return base; +} + +/* + * Split an AVL tree into two trees. The function stores the node + * containing the "split key" in the write back parameter + * split_key_wb. The function stores the left tree containing the keys + * that are smaller than the "split key" in the write back parameter + * left_wb and the tree containing the rest of the keys in the write + * back parameter right_wb. + */ +static void split_tree(DbTableCATree *tb, + TreeDbTerm *root, + TreeDbTerm **split_key_node_wb, + TreeDbTerm **left_wb, + TreeDbTerm **right_wb) { + TreeDbTerm * split_node = NULL; + TreeDbTerm * left_root; + TreeDbTerm * right_root; + if (root->left == NULL) { /* To get non empty split */ + *right_wb = root->right; + *split_key_node_wb = root->right; + root->right = NULL; + root->balance = 0; + *left_wb = root; + return; + } + split_node = root; + left_root = split_node->left; + split_node->left = NULL; + right_root = split_node->right; + split_node->right = NULL; + right_root = insert_TreeDbTerm(tb, right_root, split_node); + *split_key_node_wb = split_node; + *left_wb = left_root; + *right_wb = right_root; +} + +/* + * Used by the join_trees function + */ +static ERTS_INLINE int compute_tree_hight(TreeDbTerm * root) +{ + if(root == NULL) { + return 0; + } else { + TreeDbTerm * current_node = root; + int hight_so_far = 1; + while (current_node->left != NULL || current_node->right != NULL) { + if (current_node->balance == -1) { + current_node = current_node->left; + } else { + current_node = current_node->right; + } + hight_so_far = hight_so_far + 1; + } + return hight_so_far; + } +} + +/* + * Used by the join_trees function + */ +static ERTS_INLINE +TreeDbTerm* linkout_min_or_max_tree_node(TreeDbTerm **root, int is_min) +{ + TreeDbTerm **tstack[STACK_NEED]; + int tpos = 0; + int dstack[STACK_NEED+1]; + int dpos = 0; + int state = 0; + TreeDbTerm **this = root; + int dir; + TreeDbTerm *q = NULL; + + dstack[dpos++] = DIR_END; + for (;;) { + if (!*this) { /* Failure */ + return NULL; + } else if (is_min && (*this)->left != NULL) { + dstack[dpos++] = DIR_LEFT; + tstack[tpos++] = this; + this = &((*this)->left); + } else if (!is_min && (*this)->right != NULL) { + dstack[dpos++] = DIR_RIGHT; + tstack[tpos++] = this; + this = &((*this)->right); + } else { /* Min value, found the one to splice out */ + q = (*this); + if (q->right == NULL) { + (*this) = q->left; + state = 1; + } else if (q->left == NULL) { + (*this) = q->right; + state = 1; + } + break; + } + } + while (state && ( dir = dstack[--dpos] ) != DIR_END) { + this = tstack[--tpos]; + if (dir == DIR_LEFT) { + state = tree_balance_left(this); + } else { + state = tree_balance_right(this); + } + } + return q; +} + +#define LINKOUT_MIN_TREE_NODE(root) linkout_min_or_max_tree_node(root, 1) +#define LINKOUT_MAX_TREE_NODE(root) linkout_min_or_max_tree_node(root, 0) + +/* + * Joins two AVL trees where all the keys in the left one are smaller + * then the keys in the right one and returns the resulting tree. + * + * The algorithm is described on page 474 in D. E. Knuth. The Art of + * Computer Programming: Sorting and Searching, + * vol. 3. Addison-Wesley, 2nd edition, 1998. + */ +static TreeDbTerm* join_trees(TreeDbTerm *left_root_param, + TreeDbTerm *right_root_param) +{ + TreeDbTerm **tstack[STACK_NEED]; + int tpos = 0; + int dstack[STACK_NEED+1]; + int dpos = 0; + int state = 1; + TreeDbTerm **this; + int dir; + TreeDbTerm *p1, *p2, *p; + TreeDbTerm *left_root = left_root_param; + TreeDbTerm *right_root = right_root_param; + int left_height; + int right_height; + int current_height; + dstack[dpos++] = DIR_END; + if (left_root == NULL) { + return right_root; + } else if (right_root == NULL) { + return left_root; + } + + left_height = compute_tree_hight(left_root); + right_height = compute_tree_hight(right_root); + if (left_height >= right_height) { + TreeDbTerm * new_root = + LINKOUT_MIN_TREE_NODE(&right_root); + int new_right_height = compute_tree_hight(right_root); + TreeDbTerm * current_node = left_root; + this = &left_root; + current_height = left_height; + while(current_height > new_right_height + 1) { + if (current_node->balance == -1) { + current_height = current_height - 2; + } else { + current_height = current_height - 1; + } + dstack[dpos++] = DIR_RIGHT; + tstack[tpos++] = this; + this = &((*this)->right); + current_node = current_node->right; + } + new_root->left = current_node; + new_root->right = right_root; + new_root->balance = new_right_height - current_height; + *this = new_root; + } else { + /* This case is symmetric to the previous case */ + TreeDbTerm * new_root = + LINKOUT_MAX_TREE_NODE(&left_root); + int new_left_height = compute_tree_hight(left_root); + TreeDbTerm * current_node = right_root; + this = &right_root; + current_height = right_height; + while (current_height > new_left_height + 1) { + if (current_node->balance == 1) { + current_height = current_height - 2; + } else { + current_height = current_height - 1; + } + dstack[dpos++] = DIR_LEFT; + tstack[tpos++] = this; + this = &((*this)->left); + current_node = current_node->left; + } + new_root->right = current_node; + new_root->left = left_root; + new_root->balance = current_height - new_left_height; + *this = new_root; + } + /* Now we need to continue as if this was during the insert */ + while (state && ( dir = dstack[--dpos] ) != DIR_END) { + this = tstack[--tpos]; + p = *this; + if (dir == DIR_LEFT) { + switch (p->balance) { + case 1: + p->balance = 0; + state = 0; + break; + case 0: + p->balance = -1; + break; + case -1: /* The icky case */ + p1 = p->left; + if (p1->balance == -1) { /* Single LL rotation */ + p->left = p1->right; + p1->right = p; + p->balance = 0; + (*this) = p1; + } else { /* Double RR rotation */ + p2 = p1->right; + p1->right = p2->left; + p2->left = p1; + p->left = p2->right; + p2->right = p; + p->balance = (p2->balance == -1) ? +1 : 0; + p1->balance = (p2->balance == 1) ? -1 : 0; + (*this) = p2; + } + (*this)->balance = 0; + state = 0; + break; + } + } else { /* dir == DIR_RIGHT */ + switch (p->balance) { + case -1: + p->balance = 0; + state = 0; + break; + case 0: + p->balance = 1; + break; + case 1: + p1 = p->right; + if (p1->balance == 1) { /* Single RR rotation */ + p->right = p1->left; + p1->left = p; + p->balance = 0; + (*this) = p1; + } else { /* Double RL rotation */ + p2 = p1->left; + p1->left = p2->right; + p2->right = p1; + p->right = p2->left; + p2->left = p; + p->balance = (p2->balance == 1) ? -1 : 0; + p1->balance = (p2->balance == -1) ? 1 : 0; + (*this) = p2; + } + (*this)->balance = 0; + state = 0; + break; + } + } + } + /* Return the joined tree */ + if (left_height >= right_height) { + return left_root; + } else { + return right_root; + } +} + + +static ERTS_INLINE +int try_wlock_base_node(DbTableCATreeBaseNode *base_node) +{ + return EBUSY == erts_rwmtx_tryrwlock(&base_node->lock); +} + +/* + * Locks a base node without adjusting the lock statistics + */ +static ERTS_INLINE +void wlock_base_node_no_stats(DbTableCATreeBaseNode *base_node) +{ + erts_rwmtx_rwlock(&base_node->lock); +} + +/* + * Locks a base node and adjusts the lock statistics according to if + * the lock was contended or not + */ +static ERTS_INLINE +void wlock_base_node(DbTableCATreeBaseNode *base_node) +{ + if (try_wlock_base_node(base_node)) { + /* The lock is contended */ + wlock_base_node_no_stats(base_node); + base_node->lock_statistics += ERL_DB_CATREE_LOCK_FAILURE_CONTRIBUTION; + } else { + base_node->lock_statistics += ERL_DB_CATREE_LOCK_SUCCESS_CONTRIBUTION; + } +} + +static ERTS_INLINE +void wunlock_base_node(DbTableCATreeBaseNode *base_node) +{ + erts_rwmtx_rwunlock(&base_node->lock); +} + +static ERTS_INLINE +void rlock_base_node(DbTableCATreeBaseNode *base_node) +{ + erts_rwmtx_rlock(&base_node->lock); +} + +static ERTS_INLINE +void runlock_base_node(DbTableCATreeBaseNode *base_node) +{ + erts_rwmtx_runlock(&base_node->lock); +} + +static ERTS_INLINE +void lock_route_node(DbTableCATreeNode *route_node) +{ + ASSERT(!route_node->is_base_node); + erts_mtx_lock(&route_node->u.route.lock); +} + +static ERTS_INLINE +void unlock_route_node(DbTableCATreeNode *route_node) +{ + ASSERT(!route_node->is_base_node); + erts_mtx_unlock(&route_node->u.route.lock); +} + + +/* + * The following macros are used to create the ETS functions that only + * need to access one element (e.g. db_put_catree, db_get_catree and + * db_erase_catree). + */ + +#define ERL_DB_CATREE_CREATE_DO_OPERATION_FUNCTION_FIND_BASE_NODE_PART(LOCK,UNLOCK) \ + int retry; \ + DbTableCATreeNode *current_node; \ + DbTableCATreeNode *prev_node; \ + DbTableCATreeBaseNode *base_node; \ + int current_level; \ + (void)prev_node; \ + do { \ + retry = 0; \ + current_node = GET_ROOT_ACQB(tb); \ + prev_node = NULL; \ + current_level = 0; \ + while ( ! current_node->is_base_node ) { \ + current_level = current_level + 1; \ + prev_node = current_node; \ + if (cmp_key_route(key,current_node) < 0) { \ + current_node = GET_LEFT_ACQB(current_node); \ + } else { \ + current_node = GET_RIGHT_ACQB(current_node); \ + } \ + } \ + base_node = ¤t_node->u.base; \ + LOCK(base_node); \ + if ( ! base_node->is_valid ) { \ + /* Retry */ \ + UNLOCK(base_node); \ + retry = 1; \ + } \ + } while(retry); + + +#define ERL_DB_CATREE_CREATE_DO_OPERATION_FUNCTION_ADAPT_AND_UNLOCK_PART \ + if (base_node->lock_statistics > ERL_DB_CATREE_HIGH_CONTENTION_LIMIT \ + && current_level < ERL_DB_CATREE_MAX_ROUTE_NODE_LAYER_HEIGHT) { \ + split_catree(tb, prev_node, current_node); \ + } else if (base_node->lock_statistics < ERL_DB_CATREE_LOW_CONTENTION_LIMIT) { \ + join_catree(tb, prev_node, current_node); \ + } else { \ + wunlock_base_node(base_node); \ + } + +#define ERL_DB_CATREE_CREATE_DO_OPERATION_FUNCTION(FUN_POSTFIX,PARAMETERS,SEQUENTAIL_CALL) \ + static int erl_db_catree_do_operation_##FUN_POSTFIX(DbTableCATree *tb, \ + Eterm key, \ + PARAMETERS){ \ + int result; \ + ERL_DB_CATREE_CREATE_DO_OPERATION_FUNCTION_FIND_BASE_NODE_PART(wlock_base_node,wunlock_base_node) \ + result = SEQUENTAIL_CALL; \ + ERL_DB_CATREE_CREATE_DO_OPERATION_FUNCTION_ADAPT_AND_UNLOCK_PART \ + return result; \ + } + + +#define ERL_DB_CATREE_CREATE_DO_READ_OPERATION_FUNCTION(FUN_POSTFIX,PARAMETERS,SEQUENTAIL_CALL) \ + static int erl_db_catree_do_operation_##FUN_POSTFIX(DbTableCATree *tb, \ + Eterm key, \ + PARAMETERS){ \ + int result; \ + ERL_DB_CATREE_CREATE_DO_OPERATION_FUNCTION_FIND_BASE_NODE_PART(rlock_base_node,runlock_base_node) \ + result = SEQUENTAIL_CALL; \ + runlock_base_node(base_node); \ + return result; \ + } + + +static ERTS_INLINE +void copy_route_key(DbRouteKey* dst, Eterm key, Uint key_size) +{ + dst->size = key_size; + if (key_size != 0) { + Eterm* hp = &dst->heap[0]; + ErlOffHeap tmp_offheap; + tmp_offheap.first = NULL; + dst->term = copy_struct(key, key_size, &hp, &tmp_offheap); + dst->oh = tmp_offheap.first; + } + else { + ASSERT(is_immed(key)); + dst->term = key; + dst->oh = NULL; + } +} + +static ERTS_INLINE +void destroy_route_key(DbRouteKey* key) +{ + if (key->oh) { + ErlOffHeap oh; + oh.first = key->oh; + erts_cleanup_offheap(&oh); + } +} + + +#ifdef ERTS_ENABLE_LOCK_CHECK +# define sizeof_base_node(KEY_SZ) \ + (offsetof(DbTableCATreeNode, u.base.lc_key.heap) \ + + (KEY_SZ)*sizeof(Eterm)) +# define LC_ORDER(ORDER) ORDER +#else +# define sizeof_base_node(KEY_SZ) \ + offsetof(DbTableCATreeNode, u.base.end_of_struct__) +# define LC_ORDER(ORDER) NIL +#endif + +static DbTableCATreeNode *create_base_node(DbTableCATree *tb, + TreeDbTerm* root, + Eterm lc_key) +{ + DbTableCATreeNode *p; + erts_rwmtx_opt_t rwmtx_opt = ERTS_RWMTX_OPT_DEFAULT_INITER; +#ifdef ERTS_ENABLE_LOCK_CHECK + Eterm lc_key_size = size_object(lc_key); +#endif + p = erts_db_alloc(ERTS_ALC_T_DB_TABLE, (DbTable *) tb, + sizeof_base_node(lc_key_size)); + + p->is_base_node = 1; + p->u.base.root = root; + if (tb->common.type & DB_FREQ_READ) + rwmtx_opt.type = ERTS_RWMTX_TYPE_FREQUENT_READ; + if (erts_ets_rwmtx_spin_count >= 0) + rwmtx_opt.main_spincount = erts_ets_rwmtx_spin_count; + +#ifdef ERTS_ENABLE_LOCK_CHECK + copy_route_key(&p->u.base.lc_key, lc_key, lc_key_size); +#endif + erts_rwmtx_init_opt(&p->u.base.lock, &rwmtx_opt, + "erl_db_catree_base_node", + lc_key, + ERTS_LOCK_FLAGS_CATEGORY_DB); + p->u.base.lock_statistics = 0; + p->u.base.is_valid = 1; + return p; +} + +static ERTS_INLINE +DbTableCATreeNode *create_wlocked_base_node(DbTableCATree *tb, + TreeDbTerm* root, + Eterm lc_key) +{ + DbTableCATreeNode* p = create_base_node(tb, root, lc_key); + ethr_rwmutex_rwlock(&p->u.base.lock.rwmtx); +#ifdef ERTS_ENABLE_LOCK_CHECK + erts_lc_trylock_flg(-1, &p->u.base.lock.lc, ERTS_LOCK_OPTIONS_RDWR); +#endif + return p; +} + + +static ERTS_INLINE Uint sizeof_route_node(Uint key_size) +{ + return (offsetof(DbTableCATreeNode, u.route.key.heap) + + key_size*sizeof(Eterm)); +} + +static DbTableCATreeNode* +create_route_node(DbTableCATree *tb, + DbTableCATreeNode *left, + DbTableCATreeNode *right, + DbTerm * keyTerm, + DbTableCATreeNode* lc_parent) +{ + Eterm key = GETKEY(tb,keyTerm->tpl); + int key_size = size_object(key); + DbTableCATreeNode* p = erts_db_alloc(ERTS_ALC_T_DB_TABLE, + (DbTable *) tb, + sizeof_route_node(key_size)); + + copy_route_key(&p->u.route.key, key, key_size); + p->is_base_node = 0; + p->u.route.is_valid = 1; + erts_atomic_init_nob(&p->u.route.left, (erts_aint_t)left); + erts_atomic_init_nob(&p->u.route.right, (erts_aint_t)right); +#ifdef ERTS_ENABLE_LOCK_CHECK + /* Route node lock order is inverse tree depth (from leafs toward root) */ + p->u.route.lc_order = (lc_parent == NULL ? MAX_SMALL : + lc_parent->u.route.lc_order - 1); + /* + * This assert may eventually fail as we don't increase 'lc_order' in join + * operations when route nodes move up in the tree. + * Tough luck if you run a lock-checking VM for such a long time on 32-bit. + */ + ERTS_LC_ASSERT(p->u.route.lc_order >= 0); +#endif + erts_mtx_init(&p->u.route.lock, "erl_db_catree_route_node", + LC_ORDER(make_small(p->u.route.lc_order)), + ERTS_LOCK_FLAGS_CATEGORY_DB); + return p; +} + +static void do_free_base_node(void* vptr) +{ + DbTableCATreeNode *p = (DbTableCATreeNode *)vptr; + ASSERT(p->is_base_node); + erts_rwmtx_destroy(&p->u.base.lock); +#ifdef ERTS_ENABLE_LOCK_CHECK + destroy_route_key(&p->u.base.lc_key); +#endif + erts_free(ERTS_ALC_T_DB_TABLE, p); +} + +static void free_catree_base_node(DbTableCATree* tb, DbTableCATreeNode* p) +{ + ASSERT(p->is_base_node); + ERTS_DB_ALC_MEM_UPDATE_(tb, sizeof_base_node(p->u.base.lc_key.size), 0); + do_free_base_node(p); +} + +static void do_free_route_node(void *vptr) +{ + DbTableCATreeNode *p = (DbTableCATreeNode *)vptr; + ASSERT(!p->is_base_node); + erts_mtx_destroy(&p->u.route.lock); + destroy_route_key(&p->u.route.key); + erts_free(ERTS_ALC_T_DB_TABLE, p); +} + +static void free_catree_route_node(DbTableCATree* tb, DbTableCATreeNode* p) +{ + ASSERT(!p->is_base_node); + ERTS_DB_ALC_MEM_UPDATE_(tb, sizeof_route_node(p->u.route.key.size), 0); + do_free_route_node(p); +} + + +/* + * Returns the parent routing node of the specified + * route node 'child' if such a parent exists + * or NULL if 'child' is attached to the root. + */ +static ERTS_INLINE DbTableCATreeNode * +parent_of(DbTableCATree *tb, + DbTableCATreeNode *child) +{ + Eterm key = GET_ROUTE_NODE_KEY(child); + DbTableCATreeNode *current = GET_ROOT_ACQB(tb); + DbTableCATreeNode *prev = NULL; + + while (current != child) { + prev = current; + if (cmp_key_route(key, current) < 0) { + current = GET_LEFT_ACQB(current); + } else { + current = GET_RIGHT_ACQB(current); + } + } + return prev; +} + + +static ERTS_INLINE DbTableCATreeNode * +leftmost_base_node(DbTableCATreeNode *root) +{ + DbTableCATreeNode *node = root; + while (!node->is_base_node) { + node = GET_LEFT_ACQB(node); + } + return node; +} + + +static ERTS_INLINE DbTableCATreeNode * +rightmost_base_node(DbTableCATreeNode *root) +{ + DbTableCATreeNode *node = root; + while (!node->is_base_node) { + node = GET_RIGHT_ACQB(node); + } + return node; +} + + +static ERTS_INLINE DbTableCATreeNode * +leftmost_route_node(DbTableCATreeNode *root) +{ + DbTableCATreeNode *node = root; + DbTableCATreeNode *prev_node = NULL; + while (!node->is_base_node) { + prev_node = node; + node = GET_LEFT_ACQB(node); + } + return prev_node; +} + +static ERTS_INLINE DbTableCATreeNode* +rightmost_route_node(DbTableCATreeNode *root) +{ + DbTableCATreeNode * node = root; + DbTableCATreeNode * prev_node = NULL; + while (!node->is_base_node) { + prev_node = node; + node = GET_RIGHT_ACQB(node); + } + return prev_node; +} + +static ERTS_INLINE DbTableCATreeNode* +leftmost_base_node_and_path(DbTableCATreeNode *root, CATreeNodeStack * stack) +{ + DbTableCATreeNode * node = root; + while (!node->is_base_node) { + PUSH_NODE(stack, node); + node = GET_LEFT_ACQB(node); + } + return node; +} + +static ERTS_INLINE DbTableCATreeNode* +get_next_base_node_and_path(DbTableCATreeNode *base_node, + CATreeNodeStack *stack) +{ + if (EMPTY_NODE(stack)) { /* The parent of b is the root */ + return NULL; + } else { + if (GET_LEFT(TOP_NODE(stack)) == base_node) { + return leftmost_base_node_and_path( + GET_RIGHT_ACQB(TOP_NODE(stack)), + stack); + } else { + Eterm pkey = + TOP_NODE(stack)->u.route.key.term; /* pKey = key of parent */ + POP_NODE(stack); + while (!EMPTY_NODE(stack)) { + if (TOP_NODE(stack)->u.route.is_valid && + cmp_key_route(pkey, TOP_NODE(stack)) <= 0) { + return leftmost_base_node_and_path(GET_RIGHT_ACQB(TOP_NODE(stack)), stack); + } else { + POP_NODE(stack); + } + } + } + return NULL; + } +} + +static ERTS_INLINE void +clone_stack(CATreeNodeStack *search_stack_ptr, + CATreeNodeStack *search_stack_copy_ptr) +{ + int i; + search_stack_copy_ptr->pos = search_stack_ptr->pos; + for (i = 0; i < search_stack_ptr->pos; i++) { + search_stack_copy_ptr->array[i] = search_stack_ptr->array[i]; + } +} + + + +static ERTS_INLINE DbTableCATreeNode* +lock_first_base_node(DbTable *tbl, + CATreeNodeStack *search_stack_ptr, + CATreeNodeStack *locked_base_nodes_stack_ptr) +{ + DbTableCATreeNode *current_node; + DbTableCATreeBaseNode *base_node; + DbTableCATree* tb = &tbl->catree; + while (1) { + current_node = GET_ROOT_ACQB(tb); + while ( ! current_node->is_base_node ) { + PUSH_NODE(search_stack_ptr, current_node); + current_node = GET_LEFT_ACQB(current_node); + } + base_node = ¤t_node->u.base; + rlock_base_node(base_node); + if (base_node->is_valid) + break; + /* Retry */ + runlock_base_node(base_node); + search_stack_ptr->pos = 0; + } + push_node_dyn_array(tbl, locked_base_nodes_stack_ptr, current_node); + return current_node; +} + +static ERTS_INLINE DbTableCATreeBaseNode* +find_and_lock_next_base_node_and_path(DbTable *tbl, + CATreeNodeStack **search_stack_ptr_ptr, + CATreeNodeStack **search_stack_copy_ptr_ptr, + CATreeNodeStack *locked_base_nodes_stack_ptr) +{ + DbTableCATreeNode *current_node; + DbTableCATreeBaseNode *base_node; + CATreeNodeStack * tmp_stack_ptr; + + while (1) { + current_node = TOP_NODE(locked_base_nodes_stack_ptr); + clone_stack(*search_stack_ptr_ptr, *search_stack_copy_ptr_ptr); + current_node = + get_next_base_node_and_path(current_node, *search_stack_ptr_ptr); + if (current_node == NULL) { + return NULL; + } + base_node = ¤t_node->u.base; + rlock_base_node(base_node); + if (base_node->is_valid) + break; + + /* Retry */ + runlock_base_node(base_node); + /* Revert to previous state */ + current_node = TOP_NODE(locked_base_nodes_stack_ptr); + tmp_stack_ptr = *search_stack_ptr_ptr; + *search_stack_ptr_ptr = *search_stack_copy_ptr_ptr; + *search_stack_copy_ptr_ptr = tmp_stack_ptr; + } + + push_node_dyn_array(tbl, locked_base_nodes_stack_ptr, current_node); + return base_node; +} + +static ERTS_INLINE +void unlock_and_release_locked_base_node_stack(DbTable *tbl, + CATreeNodeStack *locked_base_nodes_stack_ptr) +{ + DbTableCATreeNode *current_node; + DbTableCATreeBaseNode *base_node; + int i; + for (i = 0; i < locked_base_nodes_stack_ptr->pos; i++) { + current_node = locked_base_nodes_stack_ptr->array[i]; + base_node = ¤t_node->u.base; + if (locked_base_nodes_stack_ptr->pos > 1) { + base_node->lock_statistics = /* This is not atomic which is fine as */ + base_node->lock_statistics + /* correctness does not depend on that. */ + ERL_DB_CATREE_LOCK_MORE_THAN_ONE_CONTRIBUTION; + } + runlock_base_node(base_node); + } + if (locked_base_nodes_stack_ptr->size > STACK_NEED) { + erts_db_free(ERTS_ALC_T_DB_STK, tbl, + locked_base_nodes_stack_ptr->array, + sizeof(DbTableCATreeNode *) * locked_base_nodes_stack_ptr->size); + } +} + +static ERTS_INLINE +void init_stack(CATreeNodeStack *stack, + DbTableCATreeNode **stack_array, + Uint init_size) +{ + stack->array = stack_array; + stack->pos = 0; + stack->size = init_size; +} + +static ERTS_INLINE +void init_tree_stack(DbTreeStack *stack, + TreeDbTerm **stack_array, + Uint init_slot) +{ + stack->array = stack_array; + stack->pos = 0; + stack->slot = init_slot; +} + +#define DEC_ROUTE_NODE_STACK_AND_ARRAY(STACK_NAME) \ + CATreeNodeStack STACK_NAME; \ + CATreeNodeStack * STACK_NAME##_ptr = &(STACK_NAME); \ + DbTableCATreeNode *STACK_NAME##_array[STACK_NEED]; + +#define DECLARE_AND_INIT_BASE_NODE_SEARCH_STACKS \ +DEC_ROUTE_NODE_STACK_AND_ARRAY(search_stack) \ +DEC_ROUTE_NODE_STACK_AND_ARRAY(search_stack_copy) \ +DEC_ROUTE_NODE_STACK_AND_ARRAY(locked_base_nodes_stack) \ +init_stack(&search_stack, search_stack_array, 0); \ +init_stack(&search_stack_copy, search_stack_copy_array, 0); \ +init_stack(&locked_base_nodes_stack, locked_base_nodes_stack_array, STACK_NEED);/* Abuse as stack array size*/ + +/* + * Locks and returns the base node that contains the specified key if + * it is present. The function saves the search path to the found base + * node in search_stack_ptr and adds the found base node to + * locked_base_nodes_stack_ptr. + */ +static ERTS_INLINE DbTableCATreeBaseNode * +lock_base_node_with_key(DbTable *tbl, + Eterm key, + CATreeNodeStack * search_stack_ptr, + CATreeNodeStack * locked_base_nodes_stack_ptr) +{ + int retry; + DbTableCATreeNode *current_node; + DbTableCATreeBaseNode *base_node; + DbTableCATree* tb = &tbl->catree; + do { + retry = 0; + current_node = GET_ROOT_ACQB(tb); + while ( ! current_node->is_base_node ) { + PUSH_NODE(search_stack_ptr, current_node); + if( cmp_key_route(key,current_node) < 0 ) { + current_node = GET_LEFT_ACQB(current_node); + } else { + current_node = GET_RIGHT_ACQB(current_node); + } + } + base_node = ¤t_node->u.base; + rlock_base_node(base_node); + if ( ! base_node->is_valid ) { + /* Retry */ + runlock_base_node(base_node); + retry = 1; + } + } while(retry); + push_node_dyn_array(tbl, locked_base_nodes_stack_ptr, current_node); + return base_node; +} + +/* + * Joins a base node with it's right neighbor. Returns the base node + * resulting from the join in locked state or NULL if there is no base + * node to join with. + */ +static DbTableCATreeNode* +erl_db_catree_force_join_right(DbTableCATree *tb, + DbTableCATreeNode *parent, + DbTableCATreeNode *thiz, + DbTableCATreeNode **result_parent_wb) +{ + DbTableCATreeNode *gparent; + DbTableCATreeNode *neighbor; + DbTableCATreeNode *new_neighbor; + DbTableCATreeNode *neighbor_parent; + TreeDbTerm* new_root; + + if (parent == NULL) { + return NULL; + } + ASSERT(thiz == GET_LEFT(parent)); + while (1) { + neighbor = leftmost_base_node(GET_RIGHT_ACQB(parent)); + wlock_base_node_no_stats(&neighbor->u.base); + if (neighbor->u.base.is_valid) + break; + wunlock_base_node(&neighbor->u.base); + } + lock_route_node(parent); + parent->u.route.is_valid = 0; + neighbor->u.base.is_valid = 0; + thiz->u.base.is_valid = 0; + while (1) { + gparent = parent_of(tb, parent); + if (gparent == NULL) + break; + lock_route_node(gparent); + if (gparent->u.route.is_valid) + break; + unlock_route_node(gparent); + } + if (gparent == NULL) { + SET_ROOT_RELB(tb, GET_RIGHT(parent)); + } else if (GET_LEFT(gparent) == parent) { + SET_LEFT_RELB(gparent, GET_RIGHT(parent)); + } else { + SET_RIGHT_RELB(gparent, GET_RIGHT(parent)); + } + unlock_route_node(parent); + if (gparent != NULL) { + unlock_route_node(gparent); + } + + new_root = join_trees(thiz->u.base.root, neighbor->u.base.root); + new_neighbor = create_wlocked_base_node(tb, new_root, + LC_ORDER(thiz->u.base.lc_key.term)); + + if (GET_RIGHT(parent) == neighbor) { + neighbor_parent = gparent; + } else { + neighbor_parent = leftmost_route_node(GET_RIGHT(parent)); + } + if(neighbor_parent == NULL) { + SET_ROOT_RELB(tb, new_neighbor); + } else if (GET_LEFT(neighbor_parent) == neighbor) { + SET_LEFT_RELB(neighbor_parent, new_neighbor); + } else { + SET_RIGHT_RELB(neighbor_parent, new_neighbor); + } + wunlock_base_node(&thiz->u.base); + wunlock_base_node(&neighbor->u.base); + /* Free the parent and base */ + erts_schedule_db_free(&tb->common, + do_free_route_node, + parent, + &parent->u.route.free_item, + sizeof_route_node(parent->u.route.key.size)); + erts_schedule_db_free(&tb->common, + do_free_base_node, + thiz, + &thiz->u.base.free_item, + sizeof_base_node(thiz->u.base.lc_key.size)); + erts_schedule_db_free(&tb->common, + do_free_base_node, + neighbor, + &neighbor->u.base.free_item, + sizeof_base_node(neighbor->u.base.lc_key.size)); + + *result_parent_wb = neighbor_parent; + return new_neighbor; +} + +/* + * Used to merge together all base nodes for operations such as last + * and select_*. Returns the base node resulting from the merge in + * locked state. + */ +static DbTableCATreeNode * +merge_to_one_locked_base_node(DbTableCATree* tb) +{ + DbTableCATreeNode *parent; + DbTableCATreeNode *new_parent; + DbTableCATreeNode *base; + DbTableCATreeNode *new_base; + int is_not_valid; + /* Find first base node */ + do { + parent = NULL; + base = GET_ROOT_ACQB(tb); + while ( ! base->is_base_node ) { + parent = base; + base = GET_LEFT_ACQB(base); + } + wlock_base_node_no_stats(&(base->u.base)); + is_not_valid = ! base->u.base.is_valid; + if (is_not_valid) { + wunlock_base_node(&(base->u.base)); + } + } while(is_not_valid); + do { + new_base = erl_db_catree_force_join_right(tb, + parent, + base, + &new_parent); + if (new_base != NULL) { + base = new_base; + parent = new_parent; + } + } while(new_base != NULL); + return base; +} + + +static void join_catree(DbTableCATree *tb, + DbTableCATreeNode *parent, + DbTableCATreeNode *thiz) +{ + DbTableCATreeNode *gparent; + DbTableCATreeNode *neighbor; + DbTableCATreeNode *new_neighbor; + DbTableCATreeNode *neighbor_parent; + + ASSERT(thiz->is_base_node); + if (parent == NULL) { + thiz->u.base.lock_statistics = 0; + wunlock_base_node(&thiz->u.base); + return; + } + ASSERT(!parent->is_base_node); + if (GET_LEFT(parent) == thiz) { + neighbor = leftmost_base_node(GET_RIGHT_ACQB(parent)); + if (try_wlock_base_node(&neighbor->u.base)) { + /* Failed to acquire lock */ + thiz->u.base.lock_statistics = 0; + wunlock_base_node(&thiz->u.base); + return; + } else if (!neighbor->u.base.is_valid) { + thiz->u.base.lock_statistics = 0; + wunlock_base_node(&thiz->u.base); + wunlock_base_node(&neighbor->u.base); + return; + } else { + lock_route_node(parent); + parent->u.route.is_valid = 0; + neighbor->u.base.is_valid = 0; + thiz->u.base.is_valid = 0; + gparent = NULL; + do { + if (gparent != NULL) { + unlock_route_node(gparent); + } + gparent = parent_of(tb, parent); + if (gparent != NULL) + lock_route_node(gparent); + } while (gparent != NULL && !gparent->u.route.is_valid); + + if (gparent == NULL) { + SET_ROOT_RELB(tb, GET_RIGHT(parent)); + } else if (GET_LEFT(gparent) == parent) { + SET_LEFT_RELB(gparent, GET_RIGHT(parent)); + } else { + SET_RIGHT_RELB(gparent, GET_RIGHT(parent)); + } + unlock_route_node(parent); + if (gparent != NULL) { + unlock_route_node(gparent); + } + { + TreeDbTerm* new_root = join_trees(thiz->u.base.root, + neighbor->u.base.root); + new_neighbor = create_base_node(tb, new_root, + LC_ORDER(thiz->u.base.lc_key.term)); + } + if (GET_RIGHT(parent) == neighbor) { + neighbor_parent = gparent; + } else { + neighbor_parent = leftmost_route_node(GET_RIGHT(parent)); + } + } + } else { /* Symetric case */ + ASSERT(GET_RIGHT(parent) == thiz); + neighbor = rightmost_base_node(GET_LEFT_ACQB(parent)); + if (try_wlock_base_node(&neighbor->u.base)) { + /* Failed to acquire lock */ + thiz->u.base.lock_statistics = 0; + wunlock_base_node(&thiz->u.base); + return; + } else if (!neighbor->u.base.is_valid) { + thiz->u.base.lock_statistics = 0; + wunlock_base_node(&thiz->u.base); + wunlock_base_node(&neighbor->u.base); + return; + } else { + lock_route_node(parent); + parent->u.route.is_valid = 0; + neighbor->u.base.is_valid = 0; + thiz->u.base.is_valid = 0; + gparent = NULL; + do { + if (gparent != NULL) { + unlock_route_node(gparent); + } + gparent = parent_of(tb, parent); + if (gparent != NULL) { + lock_route_node(gparent); + } else { + gparent = NULL; + } + } while (gparent != NULL && !gparent->u.route.is_valid); + if (gparent == NULL) { + SET_ROOT_RELB(tb, GET_LEFT(parent)); + } else if (GET_RIGHT(gparent) == parent) { + SET_RIGHT_RELB(gparent, GET_LEFT(parent)); + } else { + SET_LEFT_RELB(gparent, GET_LEFT(parent)); + } + unlock_route_node(parent); + if (gparent != NULL) { + unlock_route_node(gparent); + } + { + TreeDbTerm* new_root = join_trees(neighbor->u.base.root, + thiz->u.base.root); + new_neighbor = create_base_node(tb, new_root, + LC_ORDER(thiz->u.base.lc_key.term)); + } + if (GET_LEFT(parent) == neighbor) { + neighbor_parent = gparent; + } else { + neighbor_parent = + rightmost_route_node(GET_LEFT(parent)); + } + } + } + /* Link in new neighbor and free nodes that are no longer in the tree */ + if (neighbor_parent == NULL) { + SET_ROOT_RELB(tb, new_neighbor); + } else if (GET_LEFT(neighbor_parent) == neighbor) { + SET_LEFT_RELB(neighbor_parent, new_neighbor); + } else { + SET_RIGHT_RELB(neighbor_parent, new_neighbor); + } + wunlock_base_node(&thiz->u.base); + wunlock_base_node(&neighbor->u.base); + /* Free the parent and base */ + erts_schedule_db_free(&tb->common, + do_free_route_node, + parent, + &parent->u.route.free_item, + sizeof_route_node(parent->u.route.key.size)); + erts_schedule_db_free(&tb->common, + do_free_base_node, + thiz, + &thiz->u.base.free_item, + sizeof_base_node(thiz->u.base.lc_key.size)); + erts_schedule_db_free(&tb->common, + do_free_base_node, + neighbor, + &neighbor->u.base.free_item, + sizeof_base_node(neighbor->u.base.lc_key.size)); +} + +static void split_catree(DbTableCATree *tb, + DbTableCATreeNode* ERTS_RESTRICT parent, + DbTableCATreeNode* ERTS_RESTRICT base) { + TreeDbTerm *splitOutWriteBack; + DbTableCATreeNode* ERTS_RESTRICT new_left; + DbTableCATreeNode* ERTS_RESTRICT new_right; + DbTableCATreeNode* ERTS_RESTRICT new_route; + + if (less_than_two_elements(base->u.base.root)) { + base->u.base.lock_statistics = 0; + wunlock_base_node(&base->u.base); + return; + } else { + TreeDbTerm *left_tree; + TreeDbTerm *right_tree; + + split_tree(tb, base->u.base.root, &splitOutWriteBack, + &left_tree, &right_tree); + + new_left = create_base_node(tb, left_tree, + LC_ORDER(GETKEY(tb, left_tree->dbterm.tpl))); + new_right = create_base_node(tb, right_tree, + LC_ORDER(GETKEY(tb, right_tree->dbterm.tpl))); + new_route = create_route_node(tb, + new_left, + new_right, + &splitOutWriteBack->dbterm, + parent); + if (parent == NULL) { + SET_ROOT_RELB(tb, new_route); + } else if(GET_LEFT(parent) == base) { + SET_LEFT_RELB(parent, new_route); + } else { + SET_RIGHT_RELB(parent, new_route); + } + base->u.base.is_valid = 0; + wunlock_base_node(&base->u.base); + erts_schedule_db_free(&tb->common, + do_free_base_node, + base, + &base->u.base.free_item, + sizeof_base_node(base->u.base.lc_key.size)); + } +} + +/* + * Helper functions for removing the table + */ + +static void catree_add_base_node_to_free_list( + DbTableCATree *tb, + DbTableCATreeNode *base_node_container) +{ + base_node_container->u.base.next = + tb->base_nodes_to_free_list; + tb->base_nodes_to_free_list = base_node_container; +} + +static void catree_deque_base_node_from_free_list(DbTableCATree *tb) +{ + if (tb->base_nodes_to_free_list == NULL) { + return; /* List empty */ + } else { + DbTableCATreeNode *first = tb->base_nodes_to_free_list; + tb->base_nodes_to_free_list = first->u.base.next; + } +} + +static DbTableCATreeNode *catree_first_base_node_from_free_list( + DbTableCATree *tb) +{ + return tb->base_nodes_to_free_list; +} + +static SWord do_free_routing_nodes_catree_cont(DbTableCATree *tb, SWord num_left) +{ + DbTableCATreeNode *root; + DbTableCATreeNode *p; + for (;;) { + root = POP_NODE(&tb->free_stack_rnodes); + if (root == NULL) break; + else if(root->is_base_node) { + catree_add_base_node_to_free_list(tb, root); + break; + } + for (;;) { + if ((GET_LEFT(root) != NULL) && + (p = GET_LEFT(root))->is_base_node) { + SET_LEFT(root, NULL); + catree_add_base_node_to_free_list(tb, p); + } else if ((GET_RIGHT(root) != NULL) && + (p = GET_RIGHT(root))->is_base_node) { + SET_RIGHT(root, NULL); + catree_add_base_node_to_free_list(tb, p); + } else if ((p = GET_LEFT(root)) != NULL) { + SET_LEFT(root, NULL); + PUSH_NODE(&tb->free_stack_rnodes, root); + root = p; + } else if ((p = GET_RIGHT(root)) != NULL) { + SET_RIGHT(root, NULL); + PUSH_NODE(&tb->free_stack_rnodes, root); + root = p; + } else { + free_catree_route_node(tb, root); + if (--num_left >= 0) { + break; + } else { + return num_left; /* Done enough for now */ + } + } + } + } + return num_left; +} + +static SWord do_free_base_node_cont(DbTableCATree *tb, SWord num_left) +{ + TreeDbTerm *root; + TreeDbTerm *p; + DbTableCATreeNode *base_node_container = + catree_first_base_node_from_free_list(tb); + for (;;) { + root = POP_NODE(&tb->free_stack_elems); + if (root == NULL) break; + for (;;) { + if ((p = root->left) != NULL) { + root->left = NULL; + PUSH_NODE(&tb->free_stack_elems, root); + root = p; + } else if ((p = root->right) != NULL) { + root->right = NULL; + PUSH_NODE(&tb->free_stack_elems, root); + root = p; + } else { + free_term((DbTable*)tb, root); + if (--num_left >= 0) { + break; + } else { + return num_left; /* Done enough for now */ + } + } + } + } + catree_deque_base_node_from_free_list(tb); + free_catree_base_node(tb, base_node_container); + base_node_container = catree_first_base_node_from_free_list(tb); + if (base_node_container != NULL) { + PUSH_NODE(&tb->free_stack_elems, base_node_container->u.base.root); + } + return num_left; +} + + +/* +** Initialization function +*/ + +void db_initialize_catree(void) +{ + return; +}; + +/* +** Table interface routines (i.e., what's called by the bif's) +*/ + +int db_create_catree(Process *p, DbTable *tbl) +{ + DbTableCATree *tb = &tbl->catree; + DbTableCATreeNode *root = create_base_node(tb, NULL, NIL); + tb->deletion = 0; + tb->base_nodes_to_free_list = NULL; + erts_atomic_init_relb(&(tb->root), (erts_aint_t)root); + return DB_ERROR_NONE; +} + +static int db_first_catree(Process *p, DbTable *tbl, Eterm *ret) +{ + DbTableCATreeBaseNode *base_node; + int result; + DECLARE_AND_INIT_BASE_NODE_SEARCH_STACKS; + /* Find first base node */ + base_node = &(lock_first_base_node(tbl, &search_stack, &locked_base_nodes_stack)->u.base); + /* Find next base node until non-empty base node is found */ + while (base_node != NULL && base_node->root == NULL) { + base_node = find_and_lock_next_base_node_and_path(tbl, &search_stack_ptr, &search_stack_copy_ptr, locked_base_nodes_stack_ptr); + } + /* Get the return value */ + result = db_first_tree_common(p, tbl, (base_node == NULL ? NULL : base_node->root), ret, NULL); + /* Unlock base nodes */ + unlock_and_release_locked_base_node_stack(tbl, locked_base_nodes_stack_ptr); + return result; +} + +static int db_next_catree(Process *p, DbTable *tbl, Eterm key, Eterm *ret) +{ + DbTreeStack next_search_stack; + TreeDbTerm *next_search_stack_array[STACK_NEED]; + DbTableCATreeBaseNode *base_node; + int result = 0; + DECLARE_AND_INIT_BASE_NODE_SEARCH_STACKS; + init_tree_stack(&next_search_stack, next_search_stack_array, 0); + /* Lock base node with key */ + base_node = lock_base_node_with_key(tbl, key, &search_stack, &locked_base_nodes_stack); + /* Continue until key is not >= greatest key in base_node */ + while (base_node != NULL) { + result = db_next_tree_common(p, tbl, base_node->root, key, + ret, &next_search_stack); + if (result != DB_ERROR_NONE || *ret != am_EOT) { + break; + } + base_node = + find_and_lock_next_base_node_and_path(tbl, + &search_stack_ptr, + &search_stack_copy_ptr, + locked_base_nodes_stack_ptr); + } + unlock_and_release_locked_base_node_stack(tbl, &locked_base_nodes_stack); + return result; +} + +static int db_last_catree(Process *p, DbTable *tbl, Eterm *ret) +{ + DbTableCATreeNode *base = merge_to_one_locked_base_node(&tbl->catree); + int result = db_last_tree_common(p, tbl, base->u.base.root, ret, NULL); + wunlock_base_node(&(base->u.base)); + return result; + +} + +static int db_prev_catree(Process *p, DbTable *tbl, Eterm key, Eterm *ret) +{ + DbTreeStack stack; + TreeDbTerm * stack_array[STACK_NEED]; + int result; + DbTableCATreeNode *base; + init_tree_stack(&stack, stack_array, 0); + base = merge_to_one_locked_base_node(&tbl->catree); + result = db_prev_tree_common(p, tbl, base->u.base.root, key, ret, &stack); + wunlock_base_node(&(base->u.base)); + return result; +} + +#define ERL_DB_CATREE_DO_OPERATION_PUT_PARAMS Eterm obj, int key_clash_fail +ERL_DB_CATREE_CREATE_DO_OPERATION_FUNCTION(put, + ERL_DB_CATREE_DO_OPERATION_PUT_PARAMS, + db_put_tree_common(&tb->common, + &base_node->root, + obj, + key_clash_fail, + NULL);) + +static int db_put_catree(DbTable *tbl, Eterm obj, int key_clash_fail) +{ + DbTableCATree *tb = &tbl->catree; + Eterm key = GETKEY(&tb->common, tuple_val(obj)); + return erl_db_catree_do_operation_put(tb, + key, + obj, + key_clash_fail); +} + +#define ERL_DB_CATREE_DO_OPERATION_GET_PARAMS Process *p, Eterm *ret +ERL_DB_CATREE_CREATE_DO_READ_OPERATION_FUNCTION(get, + ERL_DB_CATREE_DO_OPERATION_GET_PARAMS, + db_get_tree_common(p, &tb->common, + base_node->root, + key, ret, NULL);) + +static int db_get_catree(Process *p, DbTable *tbl, Eterm key, Eterm *ret) +{ + DbTableCATree *tb = &tbl->catree; + return erl_db_catree_do_operation_get(tb, key, p, ret); +} + +#define ERL_DB_CATREE_DO_OPERATION_MEMBER_PARAMS Eterm *ret +ERL_DB_CATREE_CREATE_DO_READ_OPERATION_FUNCTION(member, + ERL_DB_CATREE_DO_OPERATION_MEMBER_PARAMS, + db_member_tree_common(&tb->common, + base_node->root, + key, + ret, + NULL);) + +static int db_member_catree(DbTable *tbl, Eterm key, Eterm *ret) +{ + DbTableCATree *tb = &tbl->catree; + return erl_db_catree_do_operation_member(tb, key, ret); +} + +#define ERL_DB_CATREE_DO_OPERATION_GET_ELEMENT_PARAMS Process *p, int ndex, Eterm *ret +ERL_DB_CATREE_CREATE_DO_READ_OPERATION_FUNCTION(get_element, + ERL_DB_CATREE_DO_OPERATION_GET_ELEMENT_PARAMS, + db_get_element_tree_common(p, &tb->common, + base_node->root, + key, ndex, + ret, NULL)) + +static int db_get_element_catree(Process *p, DbTable *tbl, + Eterm key, int ndex, Eterm *ret) +{ + DbTableCATree *tb = &tbl->catree; + return erl_db_catree_do_operation_get_element(tb, key, p, ndex, ret); +} + +#define ERL_DB_CATREE_DO_OPERATION_ERASE_PARAMS DbTable *tbl, Eterm *ret +ERL_DB_CATREE_CREATE_DO_OPERATION_FUNCTION(erase, + ERL_DB_CATREE_DO_OPERATION_ERASE_PARAMS, + db_erase_tree_common(tbl, + &base_node->root, + key, + ret, + NULL);) + +static int db_erase_catree(DbTable *tbl, Eterm key, Eterm *ret) +{ + DbTableCATree *tb = &tbl->catree; + return erl_db_catree_do_operation_erase(tb, key, tbl, ret); +} + +#define ERL_DB_CATREE_DO_OPERATION_ERASE_OBJECT_PARAMS DbTable *tbl, Eterm object, Eterm *ret +ERL_DB_CATREE_CREATE_DO_OPERATION_FUNCTION(erase_object, + ERL_DB_CATREE_DO_OPERATION_ERASE_OBJECT_PARAMS, + db_erase_object_tree_common(tbl, + &base_node->root, + object, + ret, + NULL);) + +static int db_erase_object_catree(DbTable *tbl, Eterm object, Eterm *ret) +{ + DbTableCATree *tb = &tbl->catree; + Eterm key = GETKEY(&tb->common, tuple_val(object)); + return erl_db_catree_do_operation_erase_object(tb, key, tbl, object, ret); +} + + +static int db_slot_catree(Process *p, DbTable *tbl, + Eterm slot_term, Eterm *ret) +{ + int result; + DbTableCATreeNode *base; + base = merge_to_one_locked_base_node(&tbl->catree); + result = db_slot_tree_common(p, tbl, base->u.base.root, + slot_term, ret, NULL); + wunlock_base_node(&(base->u.base)); + return result; +} + +static int db_select_continue_catree(Process *p, + DbTable *tbl, + Eterm continuation, + Eterm *ret) +{ + int result; + DbTableCATreeNode *base; + base = merge_to_one_locked_base_node(&tbl->catree); + result = db_select_continue_tree_common(p, &tbl->common, &base->u.base.root, + continuation, ret, NULL); + wunlock_base_node(&(base->u.base)); + return result; +} + + +static int db_select_catree(Process *p, DbTable *tbl, Eterm tid, + Eterm pattern, int reverse, Eterm *ret) +{ + int result; + DbTableCATreeNode *base; + base = merge_to_one_locked_base_node(&tbl->catree); + result = db_select_tree_common(p, &tbl->common, &base->u.base.root, + tid, pattern, reverse, ret, + NULL); + wunlock_base_node(&(base->u.base)); + return result; +} + +static int db_select_count_continue_catree(Process *p, + DbTable *tbl, + Eterm continuation, + Eterm *ret) +{ + int result; + DbTableCATreeNode *base; + base = merge_to_one_locked_base_node(&tbl->catree); + result = db_select_count_continue_tree_common(p, &tbl->common, + &base->u.base.root, + continuation, ret, NULL); + wunlock_base_node(&(base->u.base)); + return result; +} + +static int db_select_count_catree(Process *p, DbTable *tbl, Eterm tid, + Eterm pattern, Eterm *ret) +{ + int result; + DbTableCATreeNode *base; + base = merge_to_one_locked_base_node(&tbl->catree); + result = db_select_count_tree_common(p, &tbl->common, &base->u.base.root, + tid, pattern, ret, NULL); + wunlock_base_node(&(base->u.base)); + return result; +} + +static int db_select_chunk_catree(Process *p, DbTable *tbl, Eterm tid, + Eterm pattern, Sint chunk_size, + int reversed, Eterm *ret) +{ + int result; + DbTableCATreeNode *base; + base = merge_to_one_locked_base_node(&tbl->catree); + result = db_select_chunk_tree_common(p, &tbl->common, &base->u.base.root, + tid, pattern, chunk_size, reversed, ret, NULL); + wunlock_base_node(&(base->u.base)); + return result; +} + +static int db_select_delete_continue_catree(Process *p, + DbTable *tbl, + Eterm continuation, + Eterm *ret) +{ + DbTreeStack stack; + TreeDbTerm * stack_array[STACK_NEED]; + int result; + DbTableCATreeNode *base; + init_tree_stack(&stack, stack_array, 0); + base = merge_to_one_locked_base_node(&tbl->catree); + result = db_select_delete_continue_tree_common(p, tbl, &base->u.base.root, + continuation, ret, &stack); + wunlock_base_node(&(base->u.base)); + return result; +} + +static int db_select_delete_catree(Process *p, DbTable *tbl, Eterm tid, + Eterm pattern, Eterm *ret) +{ + DbTreeStack stack; + TreeDbTerm * stack_array[STACK_NEED]; + int result; + DbTableCATreeNode *base; + init_tree_stack(&stack, stack_array, 0); + base = merge_to_one_locked_base_node(&tbl->catree); + result = db_select_delete_tree_common(p, tbl, &base->u.base.root, + tid, pattern, ret, &stack); + wunlock_base_node(&(base->u.base)); + return result; +} + +static int db_select_replace_catree(Process *p, DbTable *tbl, Eterm tid, + Eterm pattern, Eterm *ret) +{ + int result; + DbTableCATreeNode *base; + base = merge_to_one_locked_base_node(&tbl->catree); + result = db_select_replace_tree_common(p, &tbl->common, + &base->u.base.root, + tid, pattern, ret, NULL); + wunlock_base_node(&(base->u.base)); + return result; +} + +static int db_select_replace_continue_catree(Process *p, DbTable *tbl, + Eterm continuation, Eterm *ret) +{ + int result; + DbTableCATreeNode *base; + base = merge_to_one_locked_base_node(&tbl->catree); + result = db_select_replace_continue_tree_common(p, &tbl->common, + &base->u.base.root, + continuation, ret, NULL); + wunlock_base_node(&(base->u.base)); + return result; +} + +#define ERL_DB_CATREE_DO_OPERATION_TAKE_PARAMS Process *p, Eterm *ret, DbTable *tbl +ERL_DB_CATREE_CREATE_DO_OPERATION_FUNCTION(take, + ERL_DB_CATREE_DO_OPERATION_TAKE_PARAMS, + db_take_tree_common(p, tbl, + &base_node->root, + key, ret, NULL);) + +static int db_take_catree(Process *p, DbTable *tbl, Eterm key, Eterm *ret) +{ + DbTableCATree *tb = &tbl->catree; + return erl_db_catree_do_operation_take(tb, key, p, ret, tbl); +} + +/* +** Other interface routines (not directly coupled to one bif) +*/ + + +/* Display tree contents (for dump) */ +static void db_print_catree(fmtfn_t to, void *to_arg, + int show, DbTable *tbl) +{ + DbTableCATreeNode *base = merge_to_one_locked_base_node(&tbl->catree); + db_print_tree_common(to, to_arg, show, base->u.base.root, tbl); + wunlock_base_node(&(base->u.base)); +} + +/* Release all memory occupied by a single table */ +static int db_free_table_catree(DbTable *tbl) +{ + while (db_free_table_continue_catree(tbl, ERTS_SWORD_MAX) < 0) + ; + return 1; +} + +static SWord db_free_table_continue_catree(DbTable *tbl, SWord reds) +{ + DbTableCATreeNode *first_base_node; + DbTableCATree *tb = &tbl->catree; + if (!tb->deletion) { + tb->deletion = 1; + tb->free_stack_elems.array = + erts_db_alloc(ERTS_ALC_T_DB_STK, + (DbTable *) tb, + sizeof(TreeDbTerm *) * STACK_NEED); + tb->free_stack_elems.pos = 0; + tb->free_stack_elems.slot = 0; + tb->free_stack_rnodes.array = + erts_db_alloc(ERTS_ALC_T_DB_STK, + (DbTable *) tb, + sizeof(DbTableCATreeNode *) * STACK_NEED); + tb->free_stack_rnodes.pos = 0; + tb->free_stack_rnodes.size = STACK_NEED; + PUSH_NODE(&tb->free_stack_rnodes, GET_ROOT(tb)); + tb->is_routing_nodes_freed = 0; + tb->base_nodes_to_free_list = NULL; + } + if ( ! tb->is_routing_nodes_freed ) { + reds = do_free_routing_nodes_catree_cont(tb, reds); + if (reds < 0) { + return reds; /* Not finished */ + } else { + tb->is_routing_nodes_freed = 1; /* Ready with the routing nodes */ + first_base_node = catree_first_base_node_from_free_list(tb); + PUSH_NODE(&tb->free_stack_elems, first_base_node->u.base.root); + } + } + while (catree_first_base_node_from_free_list(tb) != NULL) { + reds = do_free_base_node_cont(tb, reds); + if (reds < 0) { + return reds; /* Continue later */ + } + } + /* Time to free the main structure*/ + erts_db_free(ERTS_ALC_T_DB_STK, + (DbTable *) tb, + (void *) tb->free_stack_elems.array, + sizeof(TreeDbTerm *) * STACK_NEED); + erts_db_free(ERTS_ALC_T_DB_STK, + (DbTable *) tb, + (void *) tb->free_stack_rnodes.array, + sizeof(DbTableCATreeNode *) * STACK_NEED); + return 1; +} + +static SWord db_delete_all_objects_catree(Process* p, DbTable* tbl, SWord reds) +{ + reds = db_free_table_continue_catree(tbl, reds); + if (reds < 0) + return reds; + db_create_catree(p, tbl); + erts_atomic_set_nob(&tbl->catree.common.nitems, 0); + return reds; +} + + +static void db_foreach_offheap_catree(DbTable *tbl, + void (*func)(ErlOffHeap *, void *), + void *arg) +{ + DbTableCATreeNode *base = merge_to_one_locked_base_node(&tbl->catree); + db_foreach_offheap_tree_common(base->u.base.root, func, arg); + wunlock_base_node(&(base->u.base)); +} + +static int db_lookup_dbterm_catree(Process *p, DbTable *tbl, Eterm key, Eterm obj, + DbUpdateHandle *handle) +{ + DbTableCATree *tb = &tbl->catree; + int res; + ERL_DB_CATREE_CREATE_DO_OPERATION_FUNCTION_FIND_BASE_NODE_PART(wlock_base_node,wunlock_base_node); + res = db_lookup_dbterm_tree_common(p, tbl, &base_node->root, key, obj, handle, NULL); + if (res == 0) { + ERL_DB_CATREE_CREATE_DO_OPERATION_FUNCTION_ADAPT_AND_UNLOCK_PART; + } else { + /* db_finalize_dbterm_catree will unlock */ + handle->lck = prev_node; + handle->lck2 = current_node; + handle->current_level = current_level; + } + return res; +} + +static void db_finalize_dbterm_catree(int cret, DbUpdateHandle *handle) +{ + DbTableCATree *tb = &(handle->tb->catree); + DbTableCATreeNode *prev_node = handle->lck; + DbTableCATreeNode *current_node = handle->lck2; + int current_level = handle->current_level; + DbTableCATreeBaseNode *base_node = ¤t_node->u.base; + db_finalize_dbterm_tree_common(cret, handle, NULL); + ERL_DB_CATREE_CREATE_DO_OPERATION_FUNCTION_ADAPT_AND_UNLOCK_PART; + return; +} + +#ifdef ERTS_ENABLE_LOCK_COUNT +static void erts_lcnt_enable_db_catree_lock_count_helper(DbTableCATree *tb, + DbTableCATreeNode *node, + int enable) +{ + erts_lcnt_ref_t *lcnt_ref; + erts_lock_flags_t lock_type; + if (node->is_base_node) { + lcnt_ref = &GET_BASE_NODE_LOCK(node)->lcnt; + lock_type = ERTS_LOCK_TYPE_RWMUTEX; + } else { + erts_lcnt_enable_db_catree_lock_count_helper(tb, GET_LEFT(node), enable); + erts_lcnt_enable_db_catree_lock_count_helper(tb, GET_RIGHT(node), enable); + lcnt_ref = &GET_ROUTE_NODE_LOCK(node)->lcnt; + lock_type = ERTS_LOCK_TYPE_MUTEX; + } + if (enable) { + erts_lcnt_install_new_lock_info(lcnt_ref, "db_hash_slot", tb->common.the_name, + lock_type | ERTS_LOCK_FLAGS_CATEGORY_DB); + } else { + erts_lcnt_uninstall(lcnt_ref); + } +} + +void erts_lcnt_enable_db_catree_lock_count(DbTableCATree *tb, int enable) +{ + erts_lcnt_enable_db_catree_lock_count_helper(tb, GET_ROOT(tb), enable); +} +#endif /* ERTS_ENABLE_LOCK_COUNT */ + + +#ifdef HARDDEBUG + +/* + * Not called, but kept as it might come to use + */ +static inline int my_check_table_tree(TreeDbTerm *t) +{ + int lh, rh; + if (t == NULL) + return 0; + lh = my_check_table_tree(t->left); + rh = my_check_table_tree(t->right); + if ((rh - lh) != t->balance) { + erts_fprintf(stderr, "Invalid tree balance for this node:\n"); + erts_fprintf(stderr,"balance = %d, left = 0x%08X, right = 0x%08X\n", + t->balance, t->left, t->right); + erts_fprintf(stderr,"\nDump:\n---------------------------------\n"); + erts_fprintf(stderr,"\n---------------------------------\n"); + abort(); + } + return ((rh > lh) ? rh : lh) + 1; +} + +#endif diff --git a/erts/emulator/beam/erl_db_catree.h b/erts/emulator/beam/erl_db_catree.h new file mode 100644 index 0000000000..f9c0784289 --- /dev/null +++ b/erts/emulator/beam/erl_db_catree.h @@ -0,0 +1,104 @@ +/* + * %CopyrightBegin% + * + * Copyright Ericsson AB 1998-2016. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * %CopyrightEnd% + */ + +/* + * Description: Implementation of ETS ordered_set table type with + * fine-grained synchronization. + * + * Author: Kjell Winblad + * + * "erl_db_catree.c" contains more details about the implementation. + * + */ + +#ifndef _DB_CATREE_H +#define _DB_CATREE_H + +struct DbTableCATreeNode; + +typedef struct { + Eterm term; + struct erl_off_heap_header* oh; + Uint size; + Eterm heap[1]; +} DbRouteKey; + +typedef struct { + erts_rwmtx_t lock; /* The lock for this base node */ + Sint lock_statistics; + int is_valid; /* If this base node is still valid */ + TreeDbTerm *root; /* The root of the sequential tree */ + ErtsThrPrgrLaterOp free_item; /* Used when freeing using thread progress */ + struct DbTableCATreeNode * next; /* Used when gradually deleting */ + +#ifdef ERTS_ENABLE_LOCK_CHECK + DbRouteKey lc_key; +#endif + char end_of_struct__; +} DbTableCATreeBaseNode; + +typedef struct { +#ifdef ERTS_ENABLE_LOCK_CHECK + Sint lc_order; +#endif + ErtsThrPrgrLaterOp free_item; /* Used when freeing using thread progress */ + erts_mtx_t lock; /* Used when joining route nodes */ + int is_valid; /* If this route node is still valid */ + erts_atomic_t left; + erts_atomic_t right; + DbRouteKey key; +} DbTableCATreeRouteNode; + +typedef struct DbTableCATreeNode { + int is_base_node; + union { + DbTableCATreeRouteNode route; + DbTableCATreeBaseNode base; + } u; +} DbTableCATreeNode; + +typedef struct { + Uint pos; /* Current position on stack */ + Uint size; /* The size of the stack array */ + DbTableCATreeNode** array; /* The stack */ +} CATreeNodeStack; + +typedef struct db_table_catree { + DbTableCommon common; + + /* CA Tree-specific fields */ + erts_atomic_t root; /* The tree root (DbTableCATreeNode*) */ + Uint deletion; /* Being deleted */ + DbTreeStack free_stack_elems;/* Used for deletion ...*/ + CATreeNodeStack free_stack_rnodes; + DbTableCATreeNode *base_nodes_to_free_list; + int is_routing_nodes_freed; +} DbTableCATree; + +void db_initialize_catree(void); + +int db_create_catree(Process *p, DbTable *tbl); + + +#ifdef ERTS_ENABLE_LOCK_COUNT +void erts_lcnt_enable_db_catree_lock_count(DbTableCATree *tb, int enable); +#endif + +#endif /* _DB_CATREE_H */ diff --git a/erts/emulator/beam/erl_db_hash.c b/erts/emulator/beam/erl_db_hash.c index 752d3ae3a8..61806876a7 100644 --- a/erts/emulator/beam/erl_db_hash.c +++ b/erts/emulator/beam/erl_db_hash.c @@ -2731,13 +2731,9 @@ static int free_seg(DbTableHash *tb, int free_records) * sure no lingering threads are still hanging in BUCKET macro * with an old segtab pointer. */ - Uint sz = SIZEOF_EXT_SEGTAB(est->nsegs); - ASSERT(sz == ERTS_ALC_DBG_BLK_SZ(est)); - ERTS_DB_ALC_MEM_UPDATE_(tb, sz, 0); - erts_schedule_thr_prgr_later_cleanup_op(dealloc_ext_segtab, - est, - &est->lop, - sz); + erts_schedule_db_free(&tb->common, dealloc_ext_segtab, + est, &est->lop, + SIZEOF_EXT_SEGTAB(est->nsegs)); } else erts_db_free(ERTS_ALC_T_DB_SEG, (DbTable*)tb, est, diff --git a/erts/emulator/beam/erl_db_tree.c b/erts/emulator/beam/erl_db_tree.c index 45e4be2426..c6f1a0fc6d 100644 --- a/erts/emulator/beam/erl_db_tree.c +++ b/erts/emulator/beam/erl_db_tree.c @@ -48,34 +48,13 @@ #include "erl_binary.h" #include "erl_db_tree.h" +#include "erl_db_tree_util.h" #define GETKEY_WITH_POS(Keypos, Tplp) (*((Tplp) + Keypos)) #define NITEMS(tb) ((int)erts_atomic_read_nob(&(tb)->common.nitems)) -/* -** A stack of this size is enough for an AVL tree with more than -** 0xFFFFFFFF elements. May be subject to change if -** the datatype of the element counter is changed to a 64 bit integer. -** The Maximal height of an AVL tree is calculated as: -** h(n) <= 1.4404 * log(n + 2) - 0.328 -** Where n denotes the number of nodes, h(n) the height of the tree -** with n nodes and log is the binary logarithm. -*/ - -#define STACK_NEED 50 #define TREE_MAX_ELEMENTS 0xFFFFFFFFUL -#define PUSH_NODE(Dtt, Tdt) \ - ((Dtt)->array[(Dtt)->pos++] = Tdt) - -#define POP_NODE(Dtt) \ - (((Dtt)->pos) ? \ - (Dtt)->array[--((Dtt)->pos)] : NULL) - -#define TOP_NODE(Dtt) \ - ((Dtt->pos) ? \ - (Dtt)->array[(Dtt)->pos - 1] : NULL) - #define TOPN_NODE(Dtt, Pos) \ (((Pos) < Dtt->pos) ? \ (Dtt)->array[(Dtt)->pos - ((Pos) + 1)] : NULL) @@ -89,9 +68,9 @@ /* Obtain table static stack if available. NULL if not. ** Must be released with release_stack() */ -static DbTreeStack* get_static_stack(DbTableTree* tb) +ERTS_INLINE static DbTreeStack* get_static_stack(DbTableTree* tb) { - if (!erts_atomic_xchg_acqb(&tb->is_stack_busy, 1)) { + if (tb != NULL && !erts_atomic_xchg_acqb(&tb->is_stack_busy, 1)) { return &tb->static_stack; } return NULL; @@ -100,13 +79,14 @@ static DbTreeStack* get_static_stack(DbTableTree* tb) /* Obtain static stack if available, otherwise empty dynamic stack. ** Must be released with release_stack() */ -static DbTreeStack* get_any_stack(DbTableTree* tb) +static DbTreeStack* get_any_stack(DbTable* tb, DbTableTree* stack_container) { DbTreeStack* stack; - if (!erts_atomic_xchg_acqb(&tb->is_stack_busy, 1)) { - return &tb->static_stack; + if (stack_container != NULL && + !erts_atomic_xchg_acqb(&stack_container->is_stack_busy, 1)) { + return &stack_container->static_stack; } - stack = erts_db_alloc(ERTS_ALC_T_DB_STK, (DbTable *) tb, + stack = erts_db_alloc(ERTS_ALC_T_DB_STK, tb, sizeof(DbTreeStack) + sizeof(TreeDbTerm*) * STACK_NEED); stack->pos = 0; stack->slot = 0; @@ -114,62 +94,59 @@ static DbTreeStack* get_any_stack(DbTableTree* tb) return stack; } -static void release_stack(DbTableTree* tb, DbTreeStack* stack) +static void release_stack(DbTable* tb, DbTableTree* stack_container, DbTreeStack* stack) { - if (stack == &tb->static_stack) { - ASSERT(erts_atomic_read_nob(&tb->is_stack_busy) == 1); - erts_atomic_set_relb(&tb->is_stack_busy, 0); + if (stack_container != NULL && stack == &stack_container->static_stack) { + ASSERT(erts_atomic_read_nob(&stack_container->is_stack_busy) == 1); + erts_atomic_set_relb(&stack_container->is_stack_busy, 0); } else { - erts_db_free(ERTS_ALC_T_DB_STK, (DbTable *) tb, + erts_db_free(ERTS_ALC_T_DB_STK, tb, (void *) stack, sizeof(DbTreeStack) + sizeof(TreeDbTerm*) * STACK_NEED); } } -static ERTS_INLINE void reset_static_stack(DbTableTree* tb) +static ERTS_INLINE void reset_stack(DbTreeStack* stack) { - tb->static_stack.pos = 0; - tb->static_stack.slot = 0; + if (stack != NULL) { + stack->pos = 0; + stack->slot = 0; + } } -static ERTS_INLINE void free_term(DbTableTree *tb, TreeDbTerm* p) +static ERTS_INLINE void reset_static_stack(DbTableTree* tb) { - db_free_term((DbTable*)tb, p, offsetof(TreeDbTerm, dbterm)); + if (tb != NULL) { + reset_stack(&tb->static_stack); + } } -static ERTS_INLINE TreeDbTerm* new_dbterm(DbTableTree *tb, Eterm obj) +static ERTS_INLINE TreeDbTerm* new_dbterm(DbTableCommon *tb, Eterm obj) { TreeDbTerm* p; - if (tb->common.compress) { - p = db_store_term_comp(&tb->common, NULL, offsetof(TreeDbTerm,dbterm), obj); + if (tb->compress) { + p = db_store_term_comp(tb, NULL, offsetof(TreeDbTerm,dbterm), obj); } else { - p = db_store_term(&tb->common, NULL, offsetof(TreeDbTerm,dbterm), obj); + p = db_store_term(tb, NULL, offsetof(TreeDbTerm,dbterm), obj); } return p; } -static ERTS_INLINE TreeDbTerm* replace_dbterm(DbTableTree *tb, TreeDbTerm* old, +static ERTS_INLINE TreeDbTerm* replace_dbterm(DbTableCommon *tb, TreeDbTerm* old, Eterm obj) { TreeDbTerm* p; ASSERT(old != NULL); - if (tb->common.compress) { - p = db_store_term_comp(&tb->common, &(old->dbterm), offsetof(TreeDbTerm,dbterm), obj); + if (tb->compress) { + p = db_store_term_comp(tb, &(old->dbterm), offsetof(TreeDbTerm,dbterm), obj); } else { - p = db_store_term(&tb->common, &(old->dbterm), offsetof(TreeDbTerm,dbterm), obj); + p = db_store_term(tb, &(old->dbterm), offsetof(TreeDbTerm,dbterm), obj); } return p; } /* -** Some macros for "direction stacks" -*/ -#define DIR_LEFT 0 -#define DIR_RIGHT 1 -#define DIR_END 2 - -/* * Number of records to delete before trapping. */ #define DELETE_RECORD_LIMIT 12000 @@ -262,7 +239,9 @@ struct select_count_context { */ struct select_delete_context { Process *p; - DbTableTree *tb; + DbTableCommon *tb; + TreeDbTerm **root; + DbTreeStack *stack; Uint accum; Binary *mp; Eterm end_condition; @@ -277,7 +256,8 @@ struct select_delete_context { */ struct select_replace_context { Process *p; - DbTableTree *tb; + DbTableCommon *tb; + TreeDbTerm **root; Binary *mp; Eterm end_condition; Eterm *lastobj; @@ -292,73 +272,82 @@ typedef int (*extra_match_validator_t)(int keypos, Eterm match, Eterm guard, Ete /* ** Forward declarations */ -static TreeDbTerm *linkout_tree(DbTableTree *tb, Eterm key); -static TreeDbTerm *linkout_object_tree(DbTableTree *tb, - Eterm object); +static TreeDbTerm *linkout_tree(DbTableCommon *tb, TreeDbTerm **root, + Eterm key, DbTreeStack *stack); +static TreeDbTerm *linkout_object_tree(DbTableCommon *tb, TreeDbTerm **root, + Eterm object, DbTableTree *stack); static SWord do_free_tree_continue(DbTableTree *tb, SWord reds); -static void free_term(DbTableTree *tb, TreeDbTerm* p); -static int balance_left(TreeDbTerm **this); -static int balance_right(TreeDbTerm **this); +static void free_term(DbTable *tb, TreeDbTerm* p); +int tree_balance_left(TreeDbTerm **this); +int tree_balance_right(TreeDbTerm **this); static int delsub(TreeDbTerm **this); -static TreeDbTerm *slot_search(Process *p, DbTableTree *tb, Sint slot); -static TreeDbTerm *find_node(DbTableTree *tb, Eterm key); -static TreeDbTerm **find_node2(DbTableTree *tb, Eterm key); -static TreeDbTerm **find_ptr(DbTableTree *tb, DbTreeStack*, TreeDbTerm *this); -static TreeDbTerm *find_next(DbTableTree *tb, DbTreeStack*, Eterm key); -static TreeDbTerm *find_prev(DbTableTree *tb, DbTreeStack*, Eterm key); -static TreeDbTerm *find_next_from_pb_key(DbTableTree *tb, DbTreeStack*, - Eterm key); -static TreeDbTerm *find_prev_from_pb_key(DbTableTree *tb, DbTreeStack*, - Eterm key); -static void traverse_backwards(DbTableTree *tb, +static TreeDbTerm *slot_search(Process *p, TreeDbTerm *root, Sint slot, + DbTable *tb, DbTableTree *stack_container); +static TreeDbTerm *find_node(DbTableCommon *tb, TreeDbTerm *root, + Eterm key, DbTableTree *stack_container); +static TreeDbTerm **find_node2(DbTableCommon *tb, TreeDbTerm **root, Eterm key); +static TreeDbTerm **find_ptr(DbTableCommon *tb, TreeDbTerm **root, + DbTreeStack *stack, TreeDbTerm *this); +static TreeDbTerm *find_next(DbTableCommon *tb, TreeDbTerm *root, + DbTreeStack* stack, Eterm key); +static TreeDbTerm *find_prev(DbTableCommon *tb, TreeDbTerm *root, + DbTreeStack* stack, Eterm key); +static TreeDbTerm *find_next_from_pb_key(DbTableCommon *tb, TreeDbTerm *root, + DbTreeStack* stack, Eterm key); +static TreeDbTerm *find_prev_from_pb_key(DbTableCommon *tb, TreeDbTerm *root, + DbTreeStack* stack, Eterm key); +static void traverse_backwards(DbTableCommon *tb, + TreeDbTerm **root, DbTreeStack*, Eterm lastkey, - int (*doit)(DbTableTree *tb, + int (*doit)(DbTableCommon *, TreeDbTerm *, void *, int), void *context); -static void traverse_forward(DbTableTree *tb, +static void traverse_forward(DbTableCommon *tb, + TreeDbTerm **root, DbTreeStack*, Eterm lastkey, - int (*doit)(DbTableTree *tb, + int (*doit)(DbTableCommon *tb, TreeDbTerm *, void *, int), void *context); -static void traverse_update_backwards(DbTableTree *tb, +static void traverse_update_backwards(DbTableCommon *tb, + TreeDbTerm **root, DbTreeStack*, Eterm lastkey, - int (*doit)(DbTableTree *tb, + int (*doit)(DbTableCommon *tb, TreeDbTerm **, // out void *, int), void *context); -static int key_given(DbTableTree *tb, Eterm pattern, TreeDbTerm ***ret, - Eterm *partly_bound_key); +static int key_given(DbTableCommon *tb, TreeDbTerm **root, Eterm pattern, + TreeDbTerm ***ret, Eterm *partly_bound); static Sint cmp_partly_bound(Eterm partly_bound_key, Eterm bound_key); static Sint do_cmp_partly_bound(Eterm a, Eterm b, int *done); -static int analyze_pattern(DbTableTree *tb, Eterm pattern, +static int analyze_pattern(DbTableCommon *tb, TreeDbTerm **root, Eterm pattern, extra_match_validator_t extra_validator, /* Optional callback */ struct mp_info *mpi); -static int doit_select(DbTableTree *tb, +static int doit_select(DbTableCommon *tb, TreeDbTerm *this, void *ptr, int forward); -static int doit_select_count(DbTableTree *tb, +static int doit_select_count(DbTableCommon *tb, TreeDbTerm *this, void *ptr, int forward); -static int doit_select_chunk(DbTableTree *tb, +static int doit_select_chunk(DbTableCommon *tb, TreeDbTerm *this, void *ptr, int forward); -static int doit_select_delete(DbTableTree *tb, +static int doit_select_delete(DbTableCommon *tb, TreeDbTerm *this, void *ptr, int forward); -static int doit_select_replace(DbTableTree *tb, +static int doit_select_replace(DbTableCommon *tb, TreeDbTerm **this_ptr, void *ptr, int forward); @@ -508,18 +497,18 @@ int db_create_tree(Process *p, DbTable *tbl) return DB_ERROR_NONE; } -static int db_first_tree(Process *p, DbTable *tbl, Eterm *ret) +int db_first_tree_common(Process *p, DbTable *tbl, TreeDbTerm *root, + Eterm *ret, DbTableTree *stack_container) { - DbTableTree *tb = &tbl->tree; DbTreeStack* stack; TreeDbTerm *this; - if (( this = tb->root ) == NULL) { + if (( this = root ) == NULL) { *ret = am_EOT; return DB_ERROR_NONE; } /* Walk down the tree to the left */ - if ((stack = get_static_stack(tb)) != NULL) { + if ((stack = get_static_stack(stack_container)) != NULL) { stack->pos = stack->slot = 0; } while (this->left != NULL) { @@ -529,23 +518,27 @@ static int db_first_tree(Process *p, DbTable *tbl, Eterm *ret) if (stack) { PUSH_NODE(stack, this); stack->slot = 1; - release_stack(tb,stack); + release_stack(tbl,stack_container,stack); } *ret = db_copy_key(p, tbl, &this->dbterm); return DB_ERROR_NONE; } -static int db_next_tree(Process *p, DbTable *tbl, Eterm key, Eterm *ret) +static int db_first_tree(Process *p, DbTable *tbl, Eterm *ret) { DbTableTree *tb = &tbl->tree; - DbTreeStack* stack; + return db_first_tree_common(p, tbl, tb->root, ret, tb); +} + +int db_next_tree_common(Process *p, DbTable *tbl, + TreeDbTerm *root, Eterm key, + Eterm *ret, DbTreeStack* stack) +{ TreeDbTerm *this; if (is_atom(key) && key == am_EOT) return DB_ERROR_BADKEY; - stack = get_any_stack(tb); - this = find_next(tb, stack, key); - release_stack(tb,stack); + this = find_next(&tbl->common, root, stack, key); if (this == NULL) { *ret = am_EOT; return DB_ERROR_NONE; @@ -554,18 +547,27 @@ static int db_next_tree(Process *p, DbTable *tbl, Eterm key, Eterm *ret) return DB_ERROR_NONE; } -static int db_last_tree(Process *p, DbTable *tbl, Eterm *ret) +static int db_next_tree(Process *p, DbTable *tbl, Eterm key, Eterm *ret) { DbTableTree *tb = &tbl->tree; + DbTreeStack* stack = get_any_stack(tbl, tb); + int ret_val = db_next_tree_common(p, tbl, tb->root, key, ret, stack); + release_stack(tbl,tb,stack); + return ret_val; +} + +int db_last_tree_common(Process *p, DbTable *tbl, TreeDbTerm *root, + Eterm *ret, DbTableTree *stack_container) +{ TreeDbTerm *this; DbTreeStack* stack; - if (( this = tb->root ) == NULL) { + if (( this = root ) == NULL) { *ret = am_EOT; return DB_ERROR_NONE; } /* Walk down the tree to the right */ - if ((stack = get_static_stack(tb)) != NULL) { + if ((stack = get_static_stack(stack_container)) != NULL) { stack->pos = stack->slot = 0; } while (this->right != NULL) { @@ -574,24 +576,27 @@ static int db_last_tree(Process *p, DbTable *tbl, Eterm *ret) } if (stack) { PUSH_NODE(stack, this); - stack->slot = NITEMS(tb); - release_stack(tb,stack); + stack->slot = NITEMS(tbl); + release_stack(tbl,stack_container,stack); } *ret = db_copy_key(p, tbl, &this->dbterm); return DB_ERROR_NONE; } -static int db_prev_tree(Process *p, DbTable *tbl, Eterm key, Eterm *ret) +static int db_last_tree(Process *p, DbTable *tbl, Eterm *ret) { DbTableTree *tb = &tbl->tree; + return db_last_tree_common(p, tbl, tb->root, ret, tb); +} + +int db_prev_tree_common(Process *p, DbTable *tbl, TreeDbTerm *root, Eterm key, + Eterm *ret, DbTreeStack* stack) +{ TreeDbTerm *this; - DbTreeStack* stack; if (is_atom(key) && key == am_EOT) return DB_ERROR_BADKEY; - stack = get_any_stack(tb); - this = find_prev(tb, stack, key); - release_stack(tb,stack); + this = find_prev(&tbl->common, root, stack, key); if (this == NULL) { *ret = am_EOT; return DB_ERROR_NONE; @@ -600,25 +605,30 @@ static int db_prev_tree(Process *p, DbTable *tbl, Eterm key, Eterm *ret) return DB_ERROR_NONE; } -static ERTS_INLINE Sint cmp_key(DbTableTree* tb, Eterm key, TreeDbTerm* obj) { - return CMP(key, GETKEY(tb,obj->dbterm.tpl)); +static int db_prev_tree(Process *p, DbTable *tbl, Eterm key, Eterm *ret) +{ + DbTableTree *tb = &tbl->tree; + DbTreeStack* stack = get_any_stack(tbl, tb); + int res = db_prev_tree_common(p, tbl, tb->root, key, ret, stack); + release_stack(tbl,tb,stack); + return res; } -static ERTS_INLINE int cmp_key_eq(DbTableTree* tb, Eterm key, TreeDbTerm* obj) { +static ERTS_INLINE int cmp_key_eq(DbTableCommon* tb, Eterm key, TreeDbTerm* obj) { Eterm obj_key = GETKEY(tb,obj->dbterm.tpl); return is_same(key, obj_key) || CMP(key, obj_key) == 0; } -static int db_put_tree(DbTable *tbl, Eterm obj, int key_clash_fail) +int db_put_tree_common(DbTableCommon *tb, TreeDbTerm **root, Eterm obj, + int key_clash_fail, DbTableTree *stack_container) { - DbTableTree *tb = &tbl->tree; /* Non recursive insertion in AVL tree, building our own stack */ TreeDbTerm **tstack[STACK_NEED]; int tpos = 0; int dstack[STACK_NEED+1]; int dpos = 0; int state = 0; - TreeDbTerm **this = &tb->root; + TreeDbTerm **this = root; Sint c; Eterm key; int dir; @@ -626,14 +636,14 @@ static int db_put_tree(DbTable *tbl, Eterm obj, int key_clash_fail) key = GETKEY(tb, tuple_val(obj)); - reset_static_stack(tb); + reset_static_stack(stack_container); dstack[dpos++] = DIR_END; for (;;) if (!*this) { /* Found our place */ state = 1; - if (erts_atomic_inc_read_nob(&tb->common.nitems) >= TREE_MAX_ELEMENTS) { - erts_atomic_dec_nob(&tb->common.nitems); + if (erts_atomic_inc_read_nob(&tb->nitems) >= TREE_MAX_ELEMENTS) { + erts_atomic_dec_nob(&tb->nitems); return DB_ERROR_SYSRES; } *this = new_dbterm(tb, obj); @@ -724,9 +734,15 @@ static int db_put_tree(DbTable *tbl, Eterm obj, int key_clash_fail) return DB_ERROR_NONE; } -static int db_get_tree(Process *p, DbTable *tbl, Eterm key, Eterm *ret) +static int db_put_tree(DbTable *tbl, Eterm obj, int key_clash_fail) { DbTableTree *tb = &tbl->tree; + return db_put_tree_common(&tb->common, &tb->root, obj, key_clash_fail, tb); +} + +int db_get_tree_common(Process *p, DbTableCommon *tb, TreeDbTerm *root, Eterm key, + Eterm *ret, DbTableTree *stack_container) +{ Eterm copy; Eterm *hp, *hend; TreeDbTerm *this; @@ -737,13 +753,13 @@ static int db_get_tree(Process *p, DbTable *tbl, Eterm key, Eterm *ret) * The list created around it is purely for interface conformance. */ - this = find_node(tb,key); + this = find_node(tb,root,key,stack_container); if (this == NULL) { *ret = NIL; } else { hp = HAlloc(p, this->dbterm.size + 2); hend = hp + this->dbterm.size + 2; - copy = db_copy_object_from_ets(&tb->common, &this->dbterm, &hp, &MSO(p)); + copy = db_copy_object_from_ets(tb, &this->dbterm, &hp, &MSO(p)); *ret = CONS(hp, copy, NIL); hp += 2; HRelease(p,hend,hp); @@ -751,18 +767,28 @@ static int db_get_tree(Process *p, DbTable *tbl, Eterm key, Eterm *ret) return DB_ERROR_NONE; } -static int db_member_tree(DbTable *tbl, Eterm key, Eterm *ret) +static int db_get_tree(Process *p, DbTable *tbl, Eterm key, Eterm *ret) { DbTableTree *tb = &tbl->tree; + return db_get_tree_common(p, &tb->common, tb->root, key, ret, tb); +} - *ret = (find_node(tb,key) == NULL) ? am_false : am_true; +int db_member_tree_common(DbTableCommon *tb, TreeDbTerm *root, Eterm key, Eterm *ret, + DbTableTree *stack_container) +{ + *ret = (find_node(tb,root,key,stack_container) == NULL) ? am_false : am_true; return DB_ERROR_NONE; } -static int db_get_element_tree(Process *p, DbTable *tbl, - Eterm key, int ndex, Eterm *ret) +static int db_member_tree(DbTable *tbl, Eterm key, Eterm *ret) { DbTableTree *tb = &tbl->tree; + return db_member_tree_common(&tb->common, tb->root, key, ret, tb); +} + +int db_get_element_tree_common(Process *p, DbTableCommon *tb, TreeDbTerm *root, Eterm key, + int ndex, Eterm *ret, DbTableTree *stack_container) +{ /* * Look the node up: */ @@ -776,49 +802,68 @@ static int db_get_element_tree(Process *p, DbTable *tbl, * around the element here either. */ - this = find_node(tb,key); + this = find_node(tb,root,key,stack_container); if (this == NULL) { return DB_ERROR_BADKEY; } else { if (ndex > arityval(this->dbterm.tpl[0])) { return DB_ERROR_BADPARAM; } - *ret = db_copy_element_from_ets(&tb->common, p, &this->dbterm, ndex, &hp, 0); + *ret = db_copy_element_from_ets(tb, p, &this->dbterm, ndex, &hp, 0); } return DB_ERROR_NONE; } -static int db_erase_tree(DbTable *tbl, Eterm key, Eterm *ret) +static int db_get_element_tree(Process *p, DbTable *tbl, + Eterm key, int ndex, Eterm *ret) { DbTableTree *tb = &tbl->tree; + return db_get_element_tree_common(p, &tb->common, tb->root, key, + ndex, ret, tb); +} + +int db_erase_tree_common(DbTable *tbl, TreeDbTerm **root, Eterm key, Eterm *ret, + DbTreeStack *stack /* NULL if no static stack */) +{ TreeDbTerm *res; *ret = am_true; - if ((res = linkout_tree(tb, key)) != NULL) { - free_term(tb, res); + if ((res = linkout_tree(&tbl->common, root,key, stack)) != NULL) { + free_term(tbl, res); } return DB_ERROR_NONE; } -static int db_erase_object_tree(DbTable *tbl, Eterm object, Eterm *ret) +static int db_erase_tree(DbTable *tbl, Eterm key, Eterm *ret) { DbTableTree *tb = &tbl->tree; + return db_erase_tree_common(tbl, &tb->root, key, ret, &tb->static_stack); +} + +int db_erase_object_tree_common(DbTable *tbl, TreeDbTerm **root, Eterm object, + Eterm *ret, DbTableTree *stack_container) +{ TreeDbTerm *res; *ret = am_true; - if ((res = linkout_object_tree(tb, object)) != NULL) { - free_term(tb, res); + if ((res = linkout_object_tree(&tbl->common, root, object, stack_container)) != NULL) { + free_term(tbl, res); } return DB_ERROR_NONE; } - -static int db_slot_tree(Process *p, DbTable *tbl, - Eterm slot_term, Eterm *ret) +static int db_erase_object_tree(DbTable *tbl, Eterm object, Eterm *ret) { DbTableTree *tb = &tbl->tree; + return db_erase_object_tree_common(tbl, &tb->root, object, ret, tb); +} + +int db_slot_tree_common(Process *p, DbTable *tbl, TreeDbTerm *root, + Eterm slot_term, Eterm *ret, + DbTableTree *stack_container) +{ Sint slot; TreeDbTerm *st; Eterm *hp, *hend; @@ -834,10 +879,10 @@ static int db_slot_tree(Process *p, DbTable *tbl, if (is_not_small(slot_term) || ((slot = signed_val(slot_term)) < 0) || - (slot > NITEMS(tb))) + (slot > NITEMS(tbl))) return DB_ERROR_BADPARAM; - if (slot == NITEMS(tb)) { + if (slot == NITEMS(tbl)) { *ret = am_EOT; return DB_ERROR_NONE; } @@ -847,20 +892,27 @@ static int db_slot_tree(Process *p, DbTable *tbl, * are counted from 1 and up. */ ++slot; - st = slot_search(p, tb, slot); + st = slot_search(p, root, slot, tbl, stack_container); if (st == NULL) { *ret = am_false; return DB_ERROR_UNSPEC; } hp = HAlloc(p, st->dbterm.size + 2); hend = hp + st->dbterm.size + 2; - copy = db_copy_object_from_ets(&tb->common, &st->dbterm, &hp, &MSO(p)); + copy = db_copy_object_from_ets(&tbl->common, &st->dbterm, &hp, &MSO(p)); *ret = CONS(hp, copy, NIL); hp += 2; HRelease(p,hend,hp); return DB_ERROR_NONE; } +static int db_slot_tree(Process *p, DbTable *tbl, + Eterm slot_term, Eterm *ret) +{ + DbTableTree *tb = &tbl->tree; + return db_slot_tree_common(p, tbl, tb->root, slot_term, ret, tb); +} + static BIF_RETTYPE ets_select_reverse(BIF_ALIST_3) @@ -926,19 +978,14 @@ static BIF_RETTYPE bif_trap3(Export *bif, { BIF_TRAP3(bif, p, p1, p2, p3); } - -/* -** This is called either when the select bif traps or when ets:select/1 -** is called. It does mostly the same as db_select_tree and may in either case -** trap to itself again (via the ets:select/1 bif). -** Note that this is common for db_select_tree and db_select_chunk_tree. -*/ -static int db_select_continue_tree(Process *p, - DbTable *tbl, - Eterm continuation, - Eterm *ret) + +int db_select_continue_tree_common(Process *p, + DbTableCommon *tb, + TreeDbTerm **root, + Eterm continuation, + Eterm *ret, + DbTableTree *stack_container) { - DbTableTree *tb = &tbl->tree; DbTreeStack* stack; struct select_context sc; unsigned sz; @@ -980,26 +1027,26 @@ static int db_select_continue_tree(Process *p, sc.end_condition = NIL; sc.lastobj = NULL; sc.max = 1000; - sc.keypos = tb->common.keypos; + sc.keypos = tb->keypos; sc.chunk_size = chunk_size; reverse = unsigned_val(tptr[7]); sc.got = signed_val(tptr[8]); - stack = get_any_stack(tb); + stack = get_any_stack((DbTable*)tb, stack_container); if (chunk_size) { if (reverse) { - traverse_backwards(tb, stack, lastkey, &doit_select_chunk, &sc); + traverse_backwards(tb, root, stack, lastkey, &doit_select_chunk, &sc); } else { - traverse_forward(tb, stack, lastkey, &doit_select_chunk, &sc); + traverse_forward(tb, root, stack, lastkey, &doit_select_chunk, &sc); } } else { if (reverse) { - traverse_forward(tb, stack, lastkey, &doit_select, &sc); + traverse_forward(tb, root, stack, lastkey, &doit_select, &sc); } else { - traverse_backwards(tb, stack, lastkey, &doit_select, &sc); + traverse_backwards(tb, root, stack, lastkey, &doit_select, &sc); } } - release_stack(tb,stack); + release_stack((DbTable*)tb,stack_container,stack); BUMP_REDS(p, 1000 - sc.max); @@ -1082,13 +1129,28 @@ static int db_select_continue_tree(Process *p, #undef RET_TO_BIF } + +/* +** This is called either when the select bif traps or when ets:select/1 +** is called. It does mostly the same as db_select_tree and may in either case +** trap to itself again (via the ets:select/1 bif). +** Note that this is common for db_select_tree and db_select_chunk_tree. +*/ +static int db_select_continue_tree(Process *p, + DbTable *tbl, + Eterm continuation, + Eterm *ret) +{ + DbTableTree *tb = &tbl->tree; + return db_select_continue_tree_common(p, &tb->common, &tb->root, + continuation, ret, tb); +} - -static int db_select_tree(Process *p, DbTable *tbl, Eterm tid, - Eterm pattern, int reverse, Eterm *ret) +int db_select_tree_common(Process *p, DbTableCommon *tb, TreeDbTerm **root, + Eterm tid, Eterm pattern, int reverse, Eterm *ret, + DbTableTree *stack_container) { /* Strategy: Traverse backwards to build resulting list from tail to head */ - DbTableTree *tb = &tbl->tree; DbTreeStack* stack; struct select_context sc; struct mp_info mpi; @@ -1117,11 +1179,11 @@ static int db_select_tree(Process *p, DbTable *tbl, Eterm tid, sc.p = p; sc.max = 1000; sc.end_condition = NIL; - sc.keypos = tb->common.keypos; + sc.keypos = tb->keypos; sc.got = 0; sc.chunk_size = 0; - if ((errcode = analyze_pattern(tb, pattern, NULL, &mpi)) != DB_ERROR_NONE) { + if ((errcode = analyze_pattern(tb, root, pattern, NULL, &mpi)) != DB_ERROR_NONE) { RET_TO_BIF(NIL,errcode); } @@ -1138,25 +1200,25 @@ static int db_select_tree(Process *p, DbTable *tbl, Eterm tid, RET_TO_BIF(sc.accum,DB_ERROR_NONE); } - stack = get_any_stack(tb); + stack = get_any_stack((DbTable*)tb,stack_container); if (reverse) { if (mpi.some_limitation) { - if ((this = find_prev_from_pb_key(tb, stack, mpi.least)) != NULL) { + if ((this = find_prev_from_pb_key(tb, *root, stack, mpi.least)) != NULL) { lastkey = GETKEY(tb, this->dbterm.tpl); } sc.end_condition = mpi.most; } - traverse_forward(tb, stack, lastkey, &doit_select, &sc); + traverse_forward(tb, root, stack, lastkey, &doit_select, &sc); } else { if (mpi.some_limitation) { - if ((this = find_next_from_pb_key(tb, stack, mpi.most)) != NULL) { + if ((this = find_next_from_pb_key(tb, *root, stack, mpi.most)) != NULL) { lastkey = GETKEY(tb, this->dbterm.tpl); } sc.end_condition = mpi.least; } - traverse_backwards(tb, stack, lastkey, &doit_select, &sc); + traverse_backwards(tb, root, stack, lastkey, &doit_select, &sc); } - release_stack(tb,stack); + release_stack((DbTable*)tb,stack_container,stack); #ifdef HARDDEBUG erts_fprintf(stderr,"Least: %T\n", mpi.least); erts_fprintf(stderr,"Most: %T\n", mpi.most); @@ -1192,16 +1254,21 @@ static int db_select_tree(Process *p, DbTable *tbl, Eterm tid, } - -/* -** This is called either when the select_count bif traps. -*/ -static int db_select_count_continue_tree(Process *p, - DbTable *tbl, - Eterm continuation, - Eterm *ret) +static int db_select_tree(Process *p, DbTable *tbl, Eterm tid, + Eterm pattern, int reverse, Eterm *ret) { DbTableTree *tb = &tbl->tree; + return db_select_tree_common(p, &tb->common, &tb->root, tid, + pattern, reverse, ret, tb); +} + +int db_select_count_continue_tree_common(Process *p, + DbTableCommon *tb, + TreeDbTerm **root, + Eterm continuation, + Eterm *ret, + DbTableTree *stack_container) +{ DbTreeStack* stack; struct select_count_context sc; unsigned sz; @@ -1238,16 +1305,16 @@ static int db_select_count_continue_tree(Process *p, sc.end_condition = NIL; sc.lastobj = NULL; sc.max = 1000; - sc.keypos = tb->common.keypos; + sc.keypos = tb->keypos; if (is_big(tptr[5])) { sc.got = big_to_uint32(tptr[5]); } else { sc.got = unsigned_val(tptr[5]); } - stack = get_any_stack(tb); - traverse_backwards(tb, stack, lastkey, &doit_select_count, &sc); - release_stack(tb,stack); + stack = get_any_stack((DbTable*)tb, stack_container); + traverse_backwards(tb, root, stack, lastkey, &doit_select_count, &sc); + release_stack((DbTable*)tb,stack_container,stack); BUMP_REDS(p, 1000 - sc.max); @@ -1285,11 +1352,24 @@ static int db_select_count_continue_tree(Process *p, #undef RET_TO_BIF } - -static int db_select_count_tree(Process *p, DbTable *tbl, Eterm tid, - Eterm pattern, Eterm *ret) +/* +** This is called either when the select_count bif traps. +*/ +static int db_select_count_continue_tree(Process *p, + DbTable *tbl, + Eterm continuation, + Eterm *ret) { DbTableTree *tb = &tbl->tree; + return db_select_count_continue_tree_common(p, &tb->common, &tb->root, + continuation, ret, tb); +} + + +int db_select_count_tree_common(Process *p, DbTableCommon *tb, TreeDbTerm **root, + Eterm tid, Eterm pattern, Eterm *ret, + DbTableTree *stack_container) +{ DbTreeStack* stack; struct select_count_context sc; struct mp_info mpi; @@ -1318,10 +1398,10 @@ static int db_select_count_tree(Process *p, DbTable *tbl, Eterm tid, sc.p = p; sc.max = 1000; sc.end_condition = NIL; - sc.keypos = tb->common.keypos; + sc.keypos = tb->keypos; sc.got = 0; - if ((errcode = analyze_pattern(tb, pattern, NULL, &mpi)) != DB_ERROR_NONE) { + if ((errcode = analyze_pattern(tb, root, pattern, NULL, &mpi)) != DB_ERROR_NONE) { RET_TO_BIF(NIL,errcode); } @@ -1338,16 +1418,16 @@ static int db_select_count_tree(Process *p, DbTable *tbl, Eterm tid, RET_TO_BIF(erts_make_integer(sc.got,p),DB_ERROR_NONE); } - stack = get_any_stack(tb); + stack = get_any_stack((DbTable*)tb, stack_container); if (mpi.some_limitation) { - if ((this = find_next_from_pb_key(tb, stack, mpi.most)) != NULL) { + if ((this = find_next_from_pb_key(tb, *root, stack, mpi.most)) != NULL) { lastkey = GETKEY(tb, this->dbterm.tpl); } sc.end_condition = mpi.least; } - traverse_backwards(tb, stack, lastkey, &doit_select_count, &sc); - release_stack(tb,stack); + traverse_backwards(tb, root, stack, lastkey, &doit_select_count, &sc); + release_stack((DbTable*)tb,stack_container,stack); BUMP_REDS(p, 1000 - sc.max); if (sc.max > 0) { RET_TO_BIF(erts_make_integer(sc.got,p),DB_ERROR_NONE); @@ -1383,12 +1463,20 @@ static int db_select_count_tree(Process *p, DbTable *tbl, Eterm tid, } -static int db_select_chunk_tree(Process *p, DbTable *tbl, Eterm tid, - Eterm pattern, Sint chunk_size, - int reverse, - Eterm *ret) +static int db_select_count_tree(Process *p, DbTable *tbl, Eterm tid, + Eterm pattern, Eterm *ret) { DbTableTree *tb = &tbl->tree; + return db_select_count_tree_common(p, &tb->common, &tb->root, + tid, pattern, ret, tb); +} + + +int db_select_chunk_tree_common(Process *p, DbTableCommon *tb, TreeDbTerm **root, + Eterm tid, Eterm pattern, Sint chunk_size, + int reverse, Eterm *ret, + DbTableTree *stack_container) +{ DbTreeStack* stack; struct select_context sc; struct mp_info mpi; @@ -1417,11 +1505,11 @@ static int db_select_chunk_tree(Process *p, DbTable *tbl, Eterm tid, sc.p = p; sc.max = 1000; sc.end_condition = NIL; - sc.keypos = tb->common.keypos; + sc.keypos = tb->keypos; sc.got = 0; sc.chunk_size = chunk_size; - if ((errcode = analyze_pattern(tb, pattern, NULL, &mpi)) != DB_ERROR_NONE) { + if ((errcode = analyze_pattern(tb, root, pattern, NULL, &mpi)) != DB_ERROR_NONE) { RET_TO_BIF(NIL,errcode); } @@ -1443,25 +1531,25 @@ static int db_select_chunk_tree(Process *p, DbTable *tbl, Eterm tid, } } - stack = get_any_stack(tb); + stack = get_any_stack((DbTable*)tb,stack_container); if (reverse) { if (mpi.some_limitation) { - if ((this = find_next_from_pb_key(tb, stack, mpi.most)) != NULL) { + if ((this = find_next_from_pb_key(tb, *root, stack, mpi.most)) != NULL) { lastkey = GETKEY(tb, this->dbterm.tpl); } sc.end_condition = mpi.least; } - traverse_backwards(tb, stack, lastkey, &doit_select_chunk, &sc); + traverse_backwards(tb, root, stack, lastkey, &doit_select_chunk, &sc); } else { if (mpi.some_limitation) { - if ((this = find_prev_from_pb_key(tb, stack, mpi.least)) != NULL) { + if ((this = find_prev_from_pb_key(tb, *root, stack, mpi.least)) != NULL) { lastkey = GETKEY(tb, this->dbterm.tpl); } sc.end_condition = mpi.most; } - traverse_forward(tb, stack, lastkey, &doit_select_chunk, &sc); + traverse_forward(tb, root, stack, lastkey, &doit_select_chunk, &sc); } - release_stack(tb,stack); + release_stack((DbTable*)tb,stack_container,stack); BUMP_REDS(p, 1000 - sc.max); if (sc.max > 0 || sc.got == chunk_size) { @@ -1530,15 +1618,25 @@ static int db_select_chunk_tree(Process *p, DbTable *tbl, Eterm tid, } -/* -** This is called when select_delete traps -*/ -static int db_select_delete_continue_tree(Process *p, - DbTable *tbl, - Eterm continuation, - Eterm *ret) +static int db_select_chunk_tree(Process *p, DbTable *tbl, Eterm tid, + Eterm pattern, Sint chunk_size, + int reverse, + Eterm *ret) { DbTableTree *tb = &tbl->tree; + return db_select_chunk_tree_common(p, &tb->common, &tb->root, + tid, pattern, chunk_size, + reverse, ret, tb); +} + + +int db_select_delete_continue_tree_common(Process *p, + DbTable *tbl, + TreeDbTerm **root, + Eterm continuation, + Eterm *ret, + DbTreeStack* stack) +{ struct select_delete_context sc; unsigned sz; Eterm *hp; @@ -1552,7 +1650,7 @@ static int db_select_delete_continue_tree(Process *p, #define RET_TO_BIF(Term, State) do { \ if (sc.erase_lastterm) { \ - free_term(tb, sc.lastterm); \ + free_term(tbl, sc.lastterm); \ } \ *ret = (Term); \ return State; \ @@ -1571,7 +1669,9 @@ static int db_select_delete_continue_tree(Process *p, mp = erts_db_get_match_prog_binary_unchecked(tptr[4]); sc.p = p; - sc.tb = tb; + sc.tb = &tbl->common; + sc.root = root; + sc.stack = stack; if (is_big(tptr[5])) { sc.accum = big_to_uint32(tptr[5]); } else { @@ -1580,17 +1680,16 @@ static int db_select_delete_continue_tree(Process *p, sc.mp = mp; sc.end_condition = NIL; sc.max = 1000; - sc.keypos = tb->common.keypos; + sc.keypos = tbl->common.keypos; - ASSERT(!erts_atomic_read_nob(&tb->is_stack_busy)); - traverse_backwards(tb, &tb->static_stack, lastkey, &doit_select_delete, &sc); + traverse_backwards(&tbl->common, root, stack, lastkey, &doit_select_delete, &sc); BUMP_REDS(p, 1000 - sc.max); if (sc.max > 0) { RET_TO_BIF(erts_make_integer(sc.accum, p), DB_ERROR_NONE); } - key = GETKEY(tb, (sc.lastterm)->dbterm.tpl); + key = GETKEY(&tbl->common, (sc.lastterm)->dbterm.tpl); if (end_condition != NIL && cmp_partly_bound(end_condition,key) > 0) { /* done anyway */ RET_TO_BIF(erts_make_integer(sc.accum,p),DB_ERROR_NONE); @@ -1620,10 +1719,24 @@ static int db_select_delete_continue_tree(Process *p, #undef RET_TO_BIF } -static int db_select_delete_tree(Process *p, DbTable *tbl, Eterm tid, - Eterm pattern, Eterm *ret) +static int db_select_delete_continue_tree(Process *p, + DbTable *tbl, + Eterm continuation, + Eterm *ret) { DbTableTree *tb = &tbl->tree; + ASSERT(!erts_atomic_read_nob(&tb->is_stack_busy)); + return db_select_delete_continue_tree_common(p, tbl, &tb->root, + continuation, ret, + &tb->static_stack); +} + +int db_select_delete_tree_common(Process *p, DbTable *tbl, + TreeDbTerm **root, + Eterm tid, Eterm pattern, + Eterm *ret, + DbTreeStack* stack) +{ struct select_delete_context sc; struct mp_info mpi; Eterm lastkey = THE_NON_VALUE; @@ -1641,7 +1754,7 @@ static int db_select_delete_tree(Process *p, DbTable *tbl, Eterm tid, erts_bin_free(mpi.mp); \ } \ if (sc.erase_lastterm) { \ - free_term(tb, sc.lastterm); \ + free_term(tbl, sc.lastterm); \ } \ *ret = (Term); \ return RetVal; \ @@ -1655,10 +1768,12 @@ static int db_select_delete_tree(Process *p, DbTable *tbl, Eterm tid, sc.p = p; sc.max = 1000; sc.end_condition = NIL; - sc.keypos = tb->common.keypos; - sc.tb = tb; + sc.keypos = tbl->common.keypos; + sc.tb = &tbl->common; + sc.root = root; + sc.stack = stack; - if ((errcode = analyze_pattern(tb, pattern, NULL, &mpi)) != DB_ERROR_NONE) { + if ((errcode = analyze_pattern(&tbl->common, root, pattern, NULL, &mpi)) != DB_ERROR_NONE) { RET_TO_BIF(0,errcode); } @@ -1671,26 +1786,26 @@ static int db_select_delete_tree(Process *p, DbTable *tbl, Eterm tid, if (!mpi.got_partial && mpi.some_limitation && CMP_EQ(mpi.least,mpi.most)) { - doit_select_delete(tb,*(mpi.save_term),&sc, 0 /* direction doesn't + doit_select_delete(&tbl->common,*(mpi.save_term),&sc, 0 /* direction doesn't matter */); RET_TO_BIF(erts_make_integer(sc.accum,p),DB_ERROR_NONE); } if (mpi.some_limitation) { - if ((this = find_next_from_pb_key(tb, &tb->static_stack, mpi.most)) != NULL) { - lastkey = GETKEY(tb, this->dbterm.tpl); + if ((this = find_next_from_pb_key(&tbl->common, *root, stack, mpi.most)) != NULL) { + lastkey = GETKEY(&tbl->common, this->dbterm.tpl); } sc.end_condition = mpi.least; } - traverse_backwards(tb, &tb->static_stack, lastkey, &doit_select_delete, &sc); + traverse_backwards(&tbl->common, root, stack, lastkey, &doit_select_delete, &sc); BUMP_REDS(p, 1000 - sc.max); if (sc.max > 0) { RET_TO_BIF(erts_make_integer(sc.accum,p), DB_ERROR_NONE); } - key = GETKEY(tb, (sc.lastterm)->dbterm.tpl); + key = GETKEY(&tbl->common, (sc.lastterm)->dbterm.tpl); sz = size_object(key); if (IS_USMALL(0, sc.accum)) { hp = HAlloc(p, sz + ERTS_MAGIC_REF_THING_SIZE + 6); @@ -1714,7 +1829,7 @@ static int db_select_delete_tree(Process *p, DbTable *tbl, Eterm tid, /* Don't free mpi.mp, so don't use macro */ if (sc.erase_lastterm) { - free_term(tb, sc.lastterm); + free_term(tbl, sc.lastterm); } *ret = bif_trap1(&ets_select_delete_continue_exp, p, continuation); return DB_ERROR_NONE; @@ -1723,12 +1838,21 @@ static int db_select_delete_tree(Process *p, DbTable *tbl, Eterm tid, } -static int db_select_replace_continue_tree(Process *p, - DbTable *tbl, - Eterm continuation, - Eterm *ret) +static int db_select_delete_tree(Process *p, DbTable *tbl, Eterm tid, + Eterm pattern, Eterm *ret) { DbTableTree *tb = &tbl->tree; + return db_select_delete_tree_common(p, tbl, &tb->root, tid, pattern, ret, + &tb->static_stack); +} + +int db_select_replace_continue_tree_common(Process *p, + DbTableCommon *tb, + TreeDbTerm **root, + Eterm continuation, + Eterm *ret, + DbTableTree *stack_container) +{ DbTreeStack* stack; struct select_replace_context sc; unsigned sz; @@ -1764,7 +1888,7 @@ static int db_select_replace_continue_tree(Process *p, sc.end_condition = NIL; sc.lastobj = NULL; sc.max = 1000; - sc.keypos = tb->common.keypos; + sc.keypos = tb->keypos; if (is_big(tptr[5])) { sc.replaced = big_to_uint32(tptr[5]); } else { @@ -1772,9 +1896,9 @@ static int db_select_replace_continue_tree(Process *p, } prev_replaced = sc.replaced; - stack = get_any_stack(tb); - traverse_update_backwards(tb, stack, lastkey, &doit_select_replace, &sc); - release_stack(tb,stack); + stack = get_any_stack((DbTable*)tb,stack_container); + traverse_update_backwards(tb, root, stack, lastkey, &doit_select_replace, &sc); + release_stack((DbTable*)tb,stack_container,stack); // the more objects we've replaced, the more reductions we've consumed BUMP_REDS(p, MIN(2000, (1000 - sc.max) + (sc.replaced - prev_replaced))); @@ -1813,10 +1937,20 @@ static int db_select_replace_continue_tree(Process *p, #undef RET_TO_BIF } -static int db_select_replace_tree(Process *p, DbTable *tbl, Eterm tid, - Eterm pattern, Eterm *ret) +static int db_select_replace_continue_tree(Process *p, + DbTable *tbl, + Eterm continuation, + Eterm *ret) { DbTableTree *tb = &tbl->tree; + return db_select_replace_continue_tree_common(p, &tb->common, &tb->root, + continuation, ret, tb); +} + +int db_select_replace_tree_common(Process *p, DbTableCommon *tb, TreeDbTerm **root, + Eterm tid, Eterm pattern, Eterm *ret, + DbTableTree *stack_container) +{ DbTreeStack* stack; struct select_replace_context sc; struct mp_info mpi; @@ -1844,12 +1978,13 @@ static int db_select_replace_tree(Process *p, DbTable *tbl, Eterm tid, sc.lastobj = NULL; sc.p = p; sc.tb = tb; + sc.root = root; sc.max = 1000; sc.end_condition = NIL; - sc.keypos = tb->common.keypos; + sc.keypos = tb->keypos; sc.replaced = 0; - if ((errcode = analyze_pattern(tb, pattern, db_match_keeps_key, &mpi)) != DB_ERROR_NONE) { + if ((errcode = analyze_pattern(tb, root, pattern, db_match_keeps_key, &mpi)) != DB_ERROR_NONE) { RET_TO_BIF(NIL,errcode); } @@ -1863,21 +1998,21 @@ static int db_select_replace_tree(Process *p, DbTable *tbl, Eterm tid, if (!mpi.got_partial && mpi.some_limitation && CMP_EQ(mpi.least,mpi.most)) { doit_select_replace(tb,mpi.save_term,&sc,0 /* dummy */); - reset_static_stack(tb); /* may refer replaced term */ + reset_static_stack(stack_container); /* may refer replaced term */ RET_TO_BIF(erts_make_integer(sc.replaced,p),DB_ERROR_NONE); } - stack = get_any_stack(tb); + stack = get_any_stack((DbTable*)tb,stack_container); if (mpi.some_limitation) { - if ((this = find_next_from_pb_key(tb, stack, mpi.most)) != NULL) { + if ((this = find_next_from_pb_key(tb, *root, stack, mpi.most)) != NULL) { lastkey = GETKEY(tb, this->dbterm.tpl); } sc.end_condition = mpi.least; } - traverse_update_backwards(tb, stack, lastkey, &doit_select_replace, &sc); - release_stack(tb,stack); + traverse_update_backwards(tb, root, stack, lastkey, &doit_select_replace, &sc); + release_stack((DbTable*)tb,stack_container,stack); // the more objects we've replaced, the more reductions we've consumed BUMP_REDS(p, MIN(2000, (1000 - sc.max) + sc.replaced)); if (sc.max > 0) { @@ -1914,52 +2049,73 @@ static int db_select_replace_tree(Process *p, DbTable *tbl, Eterm tid, } -static int db_take_tree(Process *p, DbTable *tbl, Eterm key, Eterm *ret) +static int db_select_replace_tree(Process *p, DbTable *tbl, Eterm tid, + Eterm pattern, Eterm *ret) { DbTableTree *tb = &tbl->tree; + return db_select_replace_tree_common(p, &tb->common, &tb->root, + tid, pattern, ret, tb); +} + +int db_take_tree_common(Process *p, DbTable *tbl, TreeDbTerm **root, + Eterm key, Eterm *ret, + DbTreeStack *stack /* NULL if no static stack */) +{ TreeDbTerm *this; *ret = NIL; - this = linkout_tree(tb, key); + this = linkout_tree(&tbl->common, root, key, stack); if (this) { Eterm copy, *hp, *hend; hp = HAlloc(p, this->dbterm.size + 2); hend = hp + this->dbterm.size + 2; - copy = db_copy_object_from_ets(&tb->common, + copy = db_copy_object_from_ets(&tbl->common, &this->dbterm, &hp, &MSO(p)); *ret = CONS(hp, copy, NIL); hp += 2; HRelease(p, hend, hp); - free_term(tb, this); + free_term(tbl, this); } return DB_ERROR_NONE; } +static int db_take_tree(Process *p, DbTable *tbl, Eterm key, Eterm *ret) +{ + DbTableTree *tb = &tbl->tree; + return db_take_tree_common(p, tbl, &tb->root, + key, ret, &tb->static_stack); +} + /* ** Other interface routines (not directly coupled to one bif) */ - -/* Display tree contents (for dump) */ -static void db_print_tree(fmtfn_t to, void *to_arg, - int show, - DbTable *tbl) +void db_print_tree_common(fmtfn_t to, void *to_arg, + int show, TreeDbTerm *root, DbTable *tbl) { - DbTableTree *tb = &tbl->tree; #ifdef TREE_DEBUG if (show) erts_print(to, to_arg, "\nTree data dump:\n" "------------------------------------------------\n"); - do_dump_tree2(&tbl->tree, to, to_arg, show, tb->root, 0); + do_dump_tree2(&tbl->common, to, to_arg, show, root, 0); if (show) erts_print(to, to_arg, "\n" "------------------------------------------------\n"); #else - erts_print(to, to_arg, "Ordered set (AVL tree), Elements: %d\n", NITEMS(tb)); + erts_print(to, to_arg, "Ordered set (AVL tree), Elements: %d\n", NITEMS(tbl)); #endif } +/* Display tree contents (for dump) */ +static void db_print_tree(fmtfn_t to, void *to_arg, + int show, + DbTable *tbl) +{ + DbTableTree *tb = &tbl->tree; + db_print_tree_common(to, to_arg, show, tb->root, tbl); +} + /* release all memory occupied by a single table */ static int db_free_empty_table_tree(DbTable *tbl) { @@ -1984,8 +2140,10 @@ static SWord db_free_table_continue_tree(DbTable *tbl, SWord reds) (DbTable *) tb, (void *) tb->static_stack.array, sizeof(TreeDbTerm *) * STACK_NEED); - ASSERT(erts_atomic_read_nob(&tb->common.memory_size) - == sizeof(DbTable)); + ASSERT((erts_atomic_read_nob(&tb->common.memory_size) + == sizeof(DbTable)) || + (erts_atomic_read_nob(&tb->common.memory_size) + == (sizeof(DbTable) + sizeof(DbFixation)))); } return reds; } @@ -2004,11 +2162,18 @@ static void do_db_tree_foreach_offheap(TreeDbTerm *, void (*)(ErlOffHeap *, void *), void *); +void db_foreach_offheap_tree_common(TreeDbTerm *root, + void (*func)(ErlOffHeap *, void *), + void * arg) +{ + do_db_tree_foreach_offheap(root, func, arg); +} + static void db_foreach_offheap_tree(DbTable *tbl, void (*func)(ErlOffHeap *, void *), void * arg) { - do_db_tree_foreach_offheap(tbl->tree.root, func, arg); + db_foreach_offheap_tree_common(tbl->tree.root, func, arg); } @@ -2033,13 +2198,14 @@ do_db_tree_foreach_offheap(TreeDbTerm *tdbt, do_db_tree_foreach_offheap(tdbt->right, func, arg); } -static TreeDbTerm *linkout_tree(DbTableTree *tb, Eterm key) { +static TreeDbTerm *linkout_tree(DbTableCommon *tb, TreeDbTerm **root, + Eterm key, DbTreeStack *stack) { TreeDbTerm **tstack[STACK_NEED]; int tpos = 0; int dstack[STACK_NEED+1]; int dpos = 0; int state = 0; - TreeDbTerm **this = &tb->root; + TreeDbTerm **this = root; Sint c; int dir; TreeDbTerm *q = NULL; @@ -2050,7 +2216,7 @@ static TreeDbTerm *linkout_tree(DbTableTree *tb, Eterm key) { * keep the balance. As in insert, we do the stacking ourselves. */ - reset_static_stack(tb); + reset_stack(stack); dstack[dpos++] = DIR_END; for (;;) { if (!*this) { /* Failure */ @@ -2076,30 +2242,30 @@ static TreeDbTerm *linkout_tree(DbTableTree *tb, Eterm key) { tstack[tpos++] = this; state = delsub(this); } - erts_atomic_dec_nob(&tb->common.nitems); + erts_atomic_dec_nob(&tb->nitems); break; } } while (state && ( dir = dstack[--dpos] ) != DIR_END) { this = tstack[--tpos]; if (dir == DIR_LEFT) { - state = balance_left(this); + state = tree_balance_left(this); } else { - state = balance_right(this); + state = tree_balance_right(this); } } return q; } -static TreeDbTerm *linkout_object_tree(DbTableTree *tb, - Eterm object) +static TreeDbTerm *linkout_object_tree(DbTableCommon *tb, TreeDbTerm **root, + Eterm object, DbTableTree *stack) { TreeDbTerm **tstack[STACK_NEED]; int tpos = 0; int dstack[STACK_NEED+1]; int dpos = 0; int state = 0; - TreeDbTerm **this = &tb->root; + TreeDbTerm **this = root; Sint c; int dir; TreeDbTerm *q = NULL; @@ -2114,7 +2280,7 @@ static TreeDbTerm *linkout_object_tree(DbTableTree *tb, key = GETKEY(tb, tuple_val(object)); - reset_static_stack(tb); + reset_static_stack(stack); dstack[dpos++] = DIR_END; for (;;) { if (!*this) { /* Failure */ @@ -2128,7 +2294,7 @@ static TreeDbTerm *linkout_object_tree(DbTableTree *tb, tstack[tpos++] = this; this = &((*this)->right); } else { /* Equal key, found the only possible matching object*/ - if (!db_eq(&tb->common,object,&(*this)->dbterm)) { + if (!db_eq(tb,object,&(*this)->dbterm)) { return NULL; } q = (*this); @@ -2143,16 +2309,16 @@ static TreeDbTerm *linkout_object_tree(DbTableTree *tb, tstack[tpos++] = this; state = delsub(this); } - erts_atomic_dec_nob(&tb->common.nitems); + erts_atomic_dec_nob(&tb->nitems); break; } } while (state && ( dir = dstack[--dpos] ) != DIR_END) { this = tstack[--tpos]; if (dir == DIR_LEFT) { - state = balance_left(this); + state = tree_balance_left(this); } else { - state = balance_right(this); + state = tree_balance_right(this); } } return q; @@ -2162,7 +2328,7 @@ static TreeDbTerm *linkout_object_tree(DbTableTree *tb, ** For the select functions, analyzes the pattern and determines which ** part of the tree should be searched. Also compiles the match program */ -static int analyze_pattern(DbTableTree *tb, Eterm pattern, +static int analyze_pattern(DbTableCommon *tb, TreeDbTerm **root, Eterm pattern, extra_match_validator_t extra_validator, /* Optional callback */ struct mp_info *mpi) { @@ -2223,7 +2389,7 @@ static int analyze_pattern(DbTableTree *tb, Eterm pattern, guards[i] = guard = ptpl[2]; bodies[i] = body = ptpl[3]; - if(extra_validator != NULL && !extra_validator(tb->common.keypos, match, guard, body)) { + if(extra_validator != NULL && !extra_validator(tb->keypos, match, guard, body)) { if (buff != sbuff) { erts_free(ERTS_ALC_T_DB_TMP, buff); } @@ -2236,7 +2402,7 @@ static int analyze_pattern(DbTableTree *tb, Eterm pattern, ++i; partly_bound = NIL; - res = key_given(tb, tpl, &(mpi->save_term), &partly_bound); + res = key_given(tb, root, tpl, &(mpi->save_term), &partly_bound); if ( res >= 0 ) { /* Can match something */ key = 0; mpi->something_can_match = 1; @@ -2300,7 +2466,7 @@ static SWord do_free_tree_continue(DbTableTree *tb, SWord reds) PUSH_NODE(&tb->static_stack, root); root = p; } else { - free_term(tb, root); + free_term((DbTable*)tb, root); if (--reds < 0) { return reds; /* Done enough for now */ } @@ -2314,7 +2480,7 @@ static SWord do_free_tree_continue(DbTableTree *tb, SWord reds) /* * Deletion helpers */ -static int balance_left(TreeDbTerm **this) +int tree_balance_left(TreeDbTerm **this) { TreeDbTerm *p, *p1, *p2; int b1, b2, h = 1; @@ -2359,7 +2525,7 @@ static int balance_left(TreeDbTerm **this) return h; } -static int balance_right(TreeDbTerm **this) +int tree_balance_right(TreeDbTerm **this) { TreeDbTerm *p, *p1, *p2; int b1, b2, h = 1; @@ -2431,7 +2597,7 @@ static int delsub(TreeDbTerm **this) h = 1; while (tpos && h) { r = tstack[--tpos]; - h = balance_right(r); + h = tree_balance_right(r); } return h; } @@ -2440,11 +2606,13 @@ static int delsub(TreeDbTerm **this) * Helper for db_slot */ -static TreeDbTerm *slot_search(Process *p, DbTableTree *tb, Sint slot) +static TreeDbTerm *slot_search(Process *p, TreeDbTerm *root, + Sint slot, DbTable *tb, + DbTableTree *stack_container) { TreeDbTerm *this; TreeDbTerm *tmp; - DbTreeStack* stack = get_any_stack(tb); + DbTreeStack* stack = get_any_stack(tb,stack_container); ASSERT(stack != NULL); if (slot == 1) { /* Don't search from where we are if we are @@ -2457,7 +2625,7 @@ static TreeDbTerm *slot_search(Process *p, DbTableTree *tb, Sint slot) stack->pos = 0; } if (EMPTY_NODE(stack)) { - this = tb->root; + this = root; if (this == NULL) goto done; while (this->left != NULL){ @@ -2506,7 +2674,7 @@ static TreeDbTerm *slot_search(Process *p, DbTableTree *tb, Sint slot) } } done: - release_stack(tb,stack); + release_stack(tb,stack_container,stack); return this; } @@ -2514,7 +2682,8 @@ done: * Find next and previous in sort order */ -static TreeDbTerm *find_next(DbTableTree *tb, DbTreeStack* stack, Eterm key) { +static TreeDbTerm *find_next(DbTableCommon *tb, TreeDbTerm *root, + DbTreeStack* stack, Eterm key) { TreeDbTerm *this; TreeDbTerm *tmp; Sint c; @@ -2526,7 +2695,7 @@ static TreeDbTerm *find_next(DbTableTree *tb, DbTreeStack* stack, Eterm key) { } } if (EMPTY_NODE(stack)) { /* Have to rebuild the stack */ - if (( this = tb->root ) == NULL) + if (( this = root ) == NULL) return NULL; for (;;) { PUSH_NODE(stack, this); @@ -2570,7 +2739,8 @@ static TreeDbTerm *find_next(DbTableTree *tb, DbTreeStack* stack, Eterm key) { return this; } -static TreeDbTerm *find_prev(DbTableTree *tb, DbTreeStack* stack, Eterm key) { +static TreeDbTerm *find_prev(DbTableCommon *tb, TreeDbTerm *root, + DbTreeStack* stack, Eterm key) { TreeDbTerm *this; TreeDbTerm *tmp; Sint c; @@ -2582,7 +2752,7 @@ static TreeDbTerm *find_prev(DbTableTree *tb, DbTreeStack* stack, Eterm key) { } } if (EMPTY_NODE(stack)) { /* Have to rebuild the stack */ - if (( this = tb->root ) == NULL) + if (( this = root ) == NULL) return NULL; for (;;) { PUSH_NODE(stack, this); @@ -2626,8 +2796,8 @@ static TreeDbTerm *find_prev(DbTableTree *tb, DbTreeStack* stack, Eterm key) { return this; } -static TreeDbTerm *find_next_from_pb_key(DbTableTree *tb, DbTreeStack* stack, - Eterm key) +static TreeDbTerm *find_next_from_pb_key(DbTableCommon *tb, TreeDbTerm *root, + DbTreeStack* stack, Eterm key) { TreeDbTerm *this; TreeDbTerm *tmp; @@ -2635,7 +2805,7 @@ static TreeDbTerm *find_next_from_pb_key(DbTableTree *tb, DbTreeStack* stack, /* spool the stack, we have to "re-search" */ stack->pos = stack->slot = 0; - if (( this = tb->root ) == NULL) + if (( this = root ) == NULL) return NULL; for (;;) { PUSH_NODE(stack, this); @@ -2659,8 +2829,8 @@ static TreeDbTerm *find_next_from_pb_key(DbTableTree *tb, DbTreeStack* stack, } } -static TreeDbTerm *find_prev_from_pb_key(DbTableTree *tb, DbTreeStack* stack, - Eterm key) +static TreeDbTerm *find_prev_from_pb_key(DbTableCommon *tb, TreeDbTerm *root, + DbTreeStack* stack, Eterm key) { TreeDbTerm *this; TreeDbTerm *tmp; @@ -2668,7 +2838,7 @@ static TreeDbTerm *find_prev_from_pb_key(DbTableTree *tb, DbTreeStack* stack, /* spool the stack, we have to "re-search" */ stack->pos = stack->slot = 0; - if (( this = tb->root ) == NULL) + if (( this = root ) == NULL) return NULL; for (;;) { PUSH_NODE(stack, this); @@ -2696,16 +2866,17 @@ static TreeDbTerm *find_prev_from_pb_key(DbTableTree *tb, DbTreeStack* stack, /* * Just lookup a node */ -static TreeDbTerm *find_node(DbTableTree *tb, Eterm key) +static TreeDbTerm *find_node(DbTableCommon *tb, TreeDbTerm *root, + Eterm key, DbTableTree *stack_container) { TreeDbTerm *this; Sint res; - DbTreeStack* stack = get_static_stack(tb); + DbTreeStack* stack = get_static_stack(stack_container); if(!stack || EMPTY_NODE(stack) || !cmp_key_eq(tb, key, (this=TOP_NODE(stack)))) { - this = tb->root; + this = root; while (this != NULL && (res = cmp_key(tb,key,this)) != 0) { if (res < 0) this = this->left; @@ -2714,7 +2885,7 @@ static TreeDbTerm *find_node(DbTableTree *tb, Eterm key) } } if (stack) { - release_stack(tb,stack); + release_stack((DbTable*)tb,stack_container,stack); } return this; } @@ -2722,12 +2893,12 @@ static TreeDbTerm *find_node(DbTableTree *tb, Eterm key) /* * Lookup a node and return the address of the node pointer in the tree */ -static TreeDbTerm **find_node2(DbTableTree *tb, Eterm key) +static TreeDbTerm **find_node2(DbTableCommon *tb, TreeDbTerm **root, Eterm key) { TreeDbTerm **this; Sint res; - this = &tb->root; + this = root; while ((*this) != NULL && (res = cmp_key(tb, key, *this)) != 0) { if (res < 0) this = &((*this)->left); @@ -2744,7 +2915,8 @@ static TreeDbTerm **find_node2(DbTableTree *tb, Eterm key) * Tries to reuse the existing stack for performance. */ -static TreeDbTerm **find_ptr(DbTableTree *tb, DbTreeStack *stack, TreeDbTerm *this) { +static TreeDbTerm **find_ptr(DbTableCommon *tb, TreeDbTerm **root, + DbTreeStack *stack, TreeDbTerm *this) { Eterm key = GETKEY(tb, this->dbterm.tpl); TreeDbTerm *tmp; TreeDbTerm *parent; @@ -2757,7 +2929,7 @@ static TreeDbTerm **find_ptr(DbTableTree *tb, DbTreeStack *stack, TreeDbTerm *th } } if (EMPTY_NODE(stack)) { /* Have to rebuild the stack */ - if (( tmp = tb->root ) == NULL) + if (( tmp = *root ) == NULL) return NULL; for (;;) { PUSH_NODE(stack, tmp); @@ -2783,7 +2955,7 @@ static TreeDbTerm **find_ptr(DbTableTree *tb, DbTreeStack *stack, TreeDbTerm *th parent = TOPN_NODE(stack, 1); if (parent == NULL) - return ((this != tb->root) ? NULL : &(tb->root)); + return ((this != *root) ? NULL : root); if (parent->left == this) return &(parent->left); if (parent->right == this) @@ -2791,12 +2963,11 @@ static TreeDbTerm **find_ptr(DbTableTree *tb, DbTreeStack *stack, TreeDbTerm *th return NULL; } -static int -db_lookup_dbterm_tree(Process *p, DbTable *tbl, Eterm key, Eterm obj, - DbUpdateHandle* handle) +int db_lookup_dbterm_tree_common(Process *p, DbTable *tbl, TreeDbTerm **root, + Eterm key, Eterm obj, DbUpdateHandle* handle, + DbTableTree *stack_container) { - DbTableTree *tb = &tbl->tree; - TreeDbTerm **pp = find_node2(tb, key); + TreeDbTerm **pp = find_node2(&tbl->common, root, key); int flags = 0; if (pp == NULL) { @@ -2807,18 +2978,19 @@ db_lookup_dbterm_tree(Process *p, DbTable *tbl, Eterm key, Eterm obj, int arity = arityval(*objp); Eterm *htop, *hend; - ASSERT(arity >= tb->common.keypos); + ASSERT(arity >= tbl->common.keypos); htop = HAlloc(p, arity + 1); hend = htop + arity + 1; sys_memcpy(htop, objp, sizeof(Eterm) * (arity + 1)); - htop[tb->common.keypos] = key; + htop[tbl->common.keypos] = key; obj = make_tuple(htop); - if (db_put_tree(tbl, obj, 1) != DB_ERROR_NONE) { + if (db_put_tree_common(&tbl->common, root, + obj, 1, stack_container) != DB_ERROR_NONE) { return 0; } - pp = find_node2(tb, key); + pp = find_node2(&tbl->common, root, key); ASSERT(pp != NULL); HRelease(p, hend, htop); flags |= DB_NEW_OBJECT; @@ -2833,21 +3005,28 @@ db_lookup_dbterm_tree(Process *p, DbTable *tbl, Eterm key, Eterm obj, return 1; } -static void -db_finalize_dbterm_tree(int cret, DbUpdateHandle *handle) +static int +db_lookup_dbterm_tree(Process *p, DbTable *tbl, Eterm key, Eterm obj, + DbUpdateHandle* handle) { - DbTable *tbl = handle->tb; DbTableTree *tb = &tbl->tree; + return db_lookup_dbterm_tree_common(p, tbl, &tb->root, key, obj, handle, tb); +} + +void db_finalize_dbterm_tree_common(int cret, DbUpdateHandle *handle, + DbTableTree *stack_container) +{ + DbTable *tbl = handle->tb; TreeDbTerm *bp = (TreeDbTerm *) *handle->bp; if (handle->flags & DB_NEW_OBJECT && cret != DB_ERROR_NONE) { Eterm ret; - db_erase_tree(tbl, GETKEY(tb, bp->dbterm.tpl), &ret); + db_erase_tree(tbl, GETKEY(&tbl->common, bp->dbterm.tpl), &ret); } else if (handle->flags & DB_MUST_RESIZE) { db_finalize_resize(handle, offsetof(TreeDbTerm,dbterm)); - reset_static_stack(tb); + reset_static_stack(stack_container); - free_term(tb, bp); + free_term(tbl, bp); } #ifdef DEBUG handle->dbterm = 0; @@ -2855,13 +3034,22 @@ db_finalize_dbterm_tree(int cret, DbUpdateHandle *handle) return; } +static void +db_finalize_dbterm_tree(int cret, DbUpdateHandle *handle) +{ + DbTable *tbl = handle->tb; + DbTableTree *tb = &tbl->tree; + db_finalize_dbterm_tree_common(cret, handle, tb); +} + /* * Traverse the tree with a callback function, used by db_match_xxx */ -static void traverse_backwards(DbTableTree *tb, +static void traverse_backwards(DbTableCommon *tb, + TreeDbTerm **root, DbTreeStack* stack, Eterm lastkey, - int (*doit)(DbTableTree *, + int (*doit)(DbTableCommon *, TreeDbTerm *, void *, int), @@ -2871,7 +3059,7 @@ static void traverse_backwards(DbTableTree *tb, if (lastkey == THE_NON_VALUE) { stack->pos = stack->slot = 0; - if (( this = tb->root ) == NULL) { + if (( this = *root ) == NULL) { return; } while (this != NULL) { @@ -2879,15 +3067,15 @@ static void traverse_backwards(DbTableTree *tb, this = this->right; } this = TOP_NODE(stack); - next = find_prev(tb, stack, GETKEY(tb, this->dbterm.tpl)); + next = find_prev(tb, *root, stack, GETKEY(tb, this->dbterm.tpl)); if (!((*doit)(tb, this, context, 0))) return; } else { - next = find_prev(tb, stack, lastkey); + next = find_prev(tb, *root, stack, lastkey); } while ((this = next) != NULL) { - next = find_prev(tb, stack, GETKEY(tb, this->dbterm.tpl)); + next = find_prev(tb, *root, stack, GETKEY(tb, this->dbterm.tpl)); if (!((*doit)(tb, this, context, 0))) return; } @@ -2896,10 +3084,11 @@ static void traverse_backwards(DbTableTree *tb, /* * Traverse the tree with a callback function, used by db_match_xxx */ -static void traverse_forward(DbTableTree *tb, +static void traverse_forward(DbTableCommon *tb, + TreeDbTerm **root, DbTreeStack* stack, Eterm lastkey, - int (*doit)(DbTableTree *, + int (*doit)(DbTableCommon *, TreeDbTerm *, void *, int), @@ -2909,7 +3098,7 @@ static void traverse_forward(DbTableTree *tb, if (lastkey == THE_NON_VALUE) { stack->pos = stack->slot = 0; - if (( this = tb->root ) == NULL) { + if (( this = *root ) == NULL) { return; } while (this != NULL) { @@ -2917,15 +3106,15 @@ static void traverse_forward(DbTableTree *tb, this = this->left; } this = TOP_NODE(stack); - next = find_next(tb, stack, GETKEY(tb, this->dbterm.tpl)); + next = find_next(tb, *root, stack, GETKEY(tb, this->dbterm.tpl)); if (!((*doit)(tb, this, context, 1))) return; } else { - next = find_next(tb, stack, lastkey); + next = find_next(tb, *root, stack, lastkey); } while ((this = next) != NULL) { - next = find_next(tb, stack, GETKEY(tb, this->dbterm.tpl)); + next = find_next(tb, *root, stack, GETKEY(tb, this->dbterm.tpl)); if (!((*doit)(tb, this, context, 1))) return; } @@ -2934,10 +3123,11 @@ static void traverse_forward(DbTableTree *tb, /* * Traverse the tree with an update callback function, used by db_select_replace */ -static void traverse_update_backwards(DbTableTree *tb, +static void traverse_update_backwards(DbTableCommon *tb, + TreeDbTerm **root, DbTreeStack* stack, Eterm lastkey, - int (*doit)(DbTableTree*, + int (*doit)(DbTableCommon*, TreeDbTerm**, void*, int), @@ -2948,7 +3138,7 @@ static void traverse_update_backwards(DbTableTree *tb, if (lastkey == THE_NON_VALUE) { stack->pos = stack->slot = 0; - if (( this = tb->root ) == NULL) { + if (( this = *root ) == NULL) { return; } while (this != NULL) { @@ -2956,23 +3146,23 @@ static void traverse_update_backwards(DbTableTree *tb, this = this->right; } this = TOP_NODE(stack); - this_ptr = find_ptr(tb, stack, this); + this_ptr = find_ptr(tb, root, stack, this); ASSERT(this_ptr != NULL); res = (*doit)(tb, this_ptr, context, 0); REPLACE_TOP_NODE(stack, *this_ptr); - next = find_prev(tb, stack, GETKEY(tb, (*this_ptr)->dbterm.tpl)); + next = find_prev(tb, *root, stack, GETKEY(tb, (*this_ptr)->dbterm.tpl)); if (!res) return; } else { - next = find_prev(tb, stack, lastkey); + next = find_prev(tb, *root, stack, lastkey); } while ((this = next) != NULL) { - this_ptr = find_ptr(tb, stack, this); + this_ptr = find_ptr(tb, root, stack, this); ASSERT(this_ptr != NULL); res = (*doit)(tb, this_ptr, context, 0); REPLACE_TOP_NODE(stack, *this_ptr); - next = find_prev(tb, stack, GETKEY(tb, (*this_ptr)->dbterm.tpl)); + next = find_prev(tb, *root, stack, GETKEY(tb, (*this_ptr)->dbterm.tpl)); if (!res) return; } @@ -2982,8 +3172,8 @@ static void traverse_update_backwards(DbTableTree *tb, * Returns 0 if not given 1 if given and -1 on no possible match * if key is given; *ret is set to point to the object concerned. */ -static int key_given(DbTableTree *tb, Eterm pattern, TreeDbTerm ***ret, - Eterm *partly_bound) +static int key_given(DbTableCommon *tb, TreeDbTerm **root, Eterm pattern, + TreeDbTerm ***ret, Eterm *partly_bound) { TreeDbTerm **this; Eterm key; @@ -2991,11 +3181,11 @@ static int key_given(DbTableTree *tb, Eterm pattern, TreeDbTerm ***ret, ASSERT(ret != NULL); if (pattern == am_Underscore || db_is_variable(pattern) != -1) return 0; - key = db_getkey(tb->common.keypos, pattern); + key = db_getkey(tb->keypos, pattern); if (is_non_value(key)) return -1; /* can't possibly match anything */ if (!db_has_variable(key)) { /* Bound key */ - if (( this = find_node2(tb, key) ) == NULL) { + if (( this = find_node2(tb, root, key) ) == NULL) { return -1; } *ret = this; @@ -3118,7 +3308,7 @@ static int partly_bound_can_match_lesser(Eterm partly_bound_1, if (ret) erts_fprintf(stderr," can match lesser than "); else - erts_fprintf(stderr," can not match lesser than "); + erts_fprintf(stderr," cannot match lesser than "); erts_fprintf(stderr,"%T\n",partly_bound_2); #endif return ret; @@ -3136,7 +3326,7 @@ static int partly_bound_can_match_greater(Eterm partly_bound_1, if (ret) erts_fprintf(stderr," can match greater than "); else - erts_fprintf(stderr," can not match greater than "); + erts_fprintf(stderr," cannot match greater than "); erts_fprintf(stderr,"%T\n",partly_bound_2); #endif return ret; @@ -3288,7 +3478,7 @@ static int do_partly_bound_can_match_greater(Eterm a, Eterm b, * Callback functions for the different match functions */ -static int doit_select(DbTableTree *tb, TreeDbTerm *this, void *ptr, +static int doit_select(DbTableCommon *tb, TreeDbTerm *this, void *ptr, int forward) { struct select_context *sc = (struct select_context *) ptr; @@ -3306,7 +3496,7 @@ static int doit_select(DbTableTree *tb, TreeDbTerm *this, void *ptr, GETKEY_WITH_POS(sc->keypos, this->dbterm.tpl)) > 0))) { return 0; } - ret = db_match_dbterm(&tb->common, sc->p, sc->mp, &this->dbterm, &hp, 2); + ret = db_match_dbterm(tb, sc->p,sc->mp, &this->dbterm, &hp, 2); if (is_value(ret)) { sc->accum = CONS(hp, ret, sc->accum); } @@ -3323,7 +3513,7 @@ static int doit_select(DbTableTree *tb, TreeDbTerm *this, void *ptr, return 1; } -static int doit_select_count(DbTableTree *tb, TreeDbTerm *this, void *ptr, +static int doit_select_count(DbTableCommon *tb, TreeDbTerm *this, void *ptr, int forward) { struct select_count_context *sc = (struct select_count_context *) ptr; @@ -3337,7 +3527,7 @@ static int doit_select_count(DbTableTree *tb, TreeDbTerm *this, void *ptr, GETKEY_WITH_POS(sc->keypos, this->dbterm.tpl)) > 0)) { return 0; } - ret = db_match_dbterm(&tb->common, sc->p, sc->mp, &this->dbterm, NULL, 0); + ret = db_match_dbterm(tb, sc->p, sc->mp, &this->dbterm, NULL, 0); if (ret == am_true) { ++(sc->got); } @@ -3347,7 +3537,7 @@ static int doit_select_count(DbTableTree *tb, TreeDbTerm *this, void *ptr, return 1; } -static int doit_select_chunk(DbTableTree *tb, TreeDbTerm *this, void *ptr, +static int doit_select_chunk(DbTableCommon *tb, TreeDbTerm *this, void *ptr, int forward) { struct select_context *sc = (struct select_context *) ptr; @@ -3366,7 +3556,7 @@ static int doit_select_chunk(DbTableTree *tb, TreeDbTerm *this, void *ptr, return 0; } - ret = db_match_dbterm(&tb->common, sc->p, sc->mp, &this->dbterm, &hp, 2); + ret = db_match_dbterm(tb, sc->p, sc->mp, &this->dbterm, &hp, 2); if (is_value(ret)) { ++(sc->got); sc->accum = CONS(hp, ret, sc->accum); @@ -3385,7 +3575,7 @@ static int doit_select_chunk(DbTableTree *tb, TreeDbTerm *this, void *ptr, } -static int doit_select_delete(DbTableTree *tb, TreeDbTerm *this, void *ptr, +static int doit_select_delete(DbTableCommon *tb, TreeDbTerm *this, void *ptr, int forward) { struct select_delete_context *sc = (struct select_delete_context *) ptr; @@ -3393,7 +3583,7 @@ static int doit_select_delete(DbTableTree *tb, TreeDbTerm *this, void *ptr, Eterm key; if (sc->erase_lastterm) - free_term(tb, sc->lastterm); + free_term((DbTable*)tb, sc->lastterm); sc->erase_lastterm = 0; sc->lastterm = this; @@ -3401,10 +3591,10 @@ static int doit_select_delete(DbTableTree *tb, TreeDbTerm *this, void *ptr, cmp_partly_bound(sc->end_condition, GETKEY_WITH_POS(sc->keypos, this->dbterm.tpl)) > 0) return 0; - ret = db_match_dbterm(&tb->common, sc->p, sc->mp, &this->dbterm, NULL, 0); + ret = db_match_dbterm(tb, sc->p, sc->mp, &this->dbterm, NULL, 0); if (ret == am_true) { key = GETKEY(sc->tb, this->dbterm.tpl); - linkout_tree(sc->tb, key); + linkout_tree(sc->tb, sc->root, key, sc->stack); sc->erase_lastterm = 1; ++sc->accum; } @@ -3414,7 +3604,7 @@ static int doit_select_delete(DbTableTree *tb, TreeDbTerm *this, void *ptr, return 1; } -static int doit_select_replace(DbTableTree *tb, TreeDbTerm **this, void *ptr, +static int doit_select_replace(DbTableCommon *tb, TreeDbTerm **this, void *ptr, int forward) { struct select_replace_context *sc = (struct select_replace_context *) ptr; @@ -3428,13 +3618,13 @@ static int doit_select_replace(DbTableTree *tb, TreeDbTerm **this, void *ptr, GETKEY_WITH_POS(sc->keypos, (*this)->dbterm.tpl)) > 0)) { return 0; } - ret = db_match_dbterm(&tb->common, sc->p, sc->mp, &(*this)->dbterm, NULL, 0); + ret = db_match_dbterm(tb, sc->p, sc->mp, &(*this)->dbterm, NULL, 0); if (is_value(ret)) { TreeDbTerm* new; TreeDbTerm* old = *this; #ifdef DEBUG - Eterm key = db_getkey(tb->common.keypos, ret); + Eterm key = db_getkey(tb->keypos, ret); ASSERT(is_value(key)); ASSERT(cmp_key(tb, key, old) == 0); #endif @@ -3444,7 +3634,7 @@ static int doit_select_replace(DbTableTree *tb, TreeDbTerm **this, void *ptr, new->balance = old->balance; sc->lastobj = new->dbterm.tpl; *this = new; - free_term(tb, old); + free_term((DbTable*)tb, old); ++(sc->replaced); } if (--(sc->max) <= 0) { @@ -3454,7 +3644,7 @@ static int doit_select_replace(DbTableTree *tb, TreeDbTerm **this, void *ptr, } #ifdef TREE_DEBUG -static void do_dump_tree2(DbTableTree* tb, int to, void *to_arg, int show, +static void do_dump_tree2(DbTableCommon* tb, int to, void *to_arg, int show, TreeDbTerm *t, int offset) { if (t == NULL) @@ -3463,7 +3653,7 @@ static void do_dump_tree2(DbTableTree* tb, int to, void *to_arg, int show, if (show) { const char* prefix; Eterm term; - if (tb->common.compress) { + if (tb->compress) { prefix = "key="; term = GETKEY(tb, t->dbterm.tpl); } @@ -3518,7 +3708,7 @@ static void check_slot_pos(DbTableTree *tb) "element position %d is really 0x%08X, when stack says " "it's 0x%08X\n", tb->stack.slot, t, tb->stack.array[tb->stack.pos - 1]); - do_dump_tree2(tb, ERTS_PRINT_STDERR, NULL, 1, tb->root, 0); + do_dump_tree2(&tb->common, ERTS_PRINT_STDERR, NULL, 1, tb->root, 0); } } @@ -3533,14 +3723,14 @@ static void check_saved_stack(DbTableTree *tb) if (t != stack->array[0]) { erts_fprintf(stderr,"tb->stack[0] is 0x%08X, should be 0x%08X\n", stack->array[0], t); - do_dump_tree2(tb, ERTS_PRINT_STDERR, NULL, 1, tb->root, 0); + do_dump_tree2(&tb->common, ERTS_PRINT_STDERR, NULL, 1, tb->root, 0); return; } while (n < stack->pos) { if (t == NULL) { erts_fprintf(stderr, "NULL pointer in tree when stack not empty," " stack depth is %d\n", n); - do_dump_tree2(tb, ERTS_PRINT_STDERR, NULL, 1, tb->root, 0); + do_dump_tree2(&tb->common, ERTS_PRINT_STDERR, NULL, 1, tb->root, 0); return; } n++; @@ -3554,7 +3744,7 @@ static void check_saved_stack(DbTableTree *tb) "represent child pointer in tree!" "(left == 0x%08X, right == 0x%08X\n", n, tb->stack[n], t->left, t->right); - do_dump_tree2(tb, ERTS_PRINT_STDERR, NULL, 1, tb->root, 0); + do_dump_tree2(&tb->common, ERTS_PRINT_STDERR, NULL, 1, tb->root, 0); return; } } @@ -3573,7 +3763,7 @@ static int check_table_tree(DbTableTree* tb, TreeDbTerm *t) erts_fprintf(stderr,"balance = %d, left = 0x%08X, right = 0x%08X\n", t->balance, t->left, t->right); erts_fprintf(stderr,"\nDump:\n---------------------------------\n"); - do_dump_tree2(tb, ERTS_PRINT_STDERR, NULL, 1, t, 0); + do_dump_tree2(&tb->common, ERTS_PRINT_STDERR, NULL, 1, t, 0); erts_fprintf(stderr,"\n---------------------------------\n"); } return ((rh > lh) ? rh : lh) + 1; diff --git a/erts/emulator/beam/erl_db_tree_util.h b/erts/emulator/beam/erl_db_tree_util.h new file mode 100644 index 0000000000..fbd9a9124a --- /dev/null +++ b/erts/emulator/beam/erl_db_tree_util.h @@ -0,0 +1,151 @@ +/* + * %CopyrightBegin% + * + * Copyright Ericsson AB 1998-2016. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * %CopyrightEnd% + */ + +#ifndef _DB_TREE_UTIL_H +#define _DB_TREE_UTIL_H + +/* +** Internal functions and macros used by both the CA tree and the AVL tree +*/ + +/* +** A stack of this size is enough for an AVL tree with more than +** 0xFFFFFFFF elements. May be subject to change if +** the datatype of the element counter is changed to a 64 bit integer. +** The Maximal height of an AVL tree is calculated as: +** h(n) <= 1.4404 * log(n + 2) - 0.328 +** Where n denotes the number of nodes, h(n) the height of the tree +** with n nodes and log is the binary logarithm. +*/ + +#define STACK_NEED 50 + +#define PUSH_NODE(Dtt, Tdt) \ + ((Dtt)->array[(Dtt)->pos++] = Tdt) + +#define POP_NODE(Dtt) \ + (((Dtt)->pos) ? \ + (Dtt)->array[--((Dtt)->pos)] : NULL) + +#define TOP_NODE(Dtt) \ + ((Dtt->pos) ? \ + (Dtt)->array[(Dtt)->pos - 1] : NULL) + +#define EMPTY_NODE(Dtt) (TOP_NODE(Dtt) == NULL) + +static ERTS_INLINE void free_term(DbTable *tb, TreeDbTerm* p) +{ + db_free_term(tb, p, offsetof(TreeDbTerm, dbterm)); +} + +/* +** Some macros for "direction stacks" +*/ +#define DIR_LEFT 0 +#define DIR_RIGHT 1 +#define DIR_END 2 + +static ERTS_INLINE Sint cmp_key(DbTableCommon* tb, Eterm key, TreeDbTerm* obj) { + return CMP(key, GETKEY(tb,obj->dbterm.tpl)); +} + +int tree_balance_left(TreeDbTerm **this); +int tree_balance_right(TreeDbTerm **this); + +int db_first_tree_common(Process *p, DbTable *tbl, TreeDbTerm *root, + Eterm *ret, DbTableTree *stack_container); +int db_next_tree_common(Process *p, DbTable *tbl, + TreeDbTerm *root, Eterm key, + Eterm *ret, DbTreeStack* stack); +int db_last_tree_common(Process *p, DbTable *tbl, TreeDbTerm *root, + Eterm *ret, DbTableTree *stack_container); +int db_prev_tree_common(Process *p, DbTable *tbl, TreeDbTerm *root, Eterm key, + Eterm *ret, DbTreeStack* stack); +int db_put_tree_common(DbTableCommon *tb, TreeDbTerm **root, Eterm obj, + int key_clash_fail, DbTableTree *stack_container); +int db_get_tree_common(Process *p, DbTableCommon *tb, TreeDbTerm *root, Eterm key, + Eterm *ret, DbTableTree *stack_container); +int db_get_element_tree_common(Process *p, DbTableCommon *tb, TreeDbTerm *root, Eterm key, + int ndex, Eterm *ret, DbTableTree *stack_container); +int db_member_tree_common(DbTableCommon *tb, TreeDbTerm *root, Eterm key, Eterm *ret, + DbTableTree *stack_container); +int db_erase_tree_common(DbTable *tbl, TreeDbTerm **root, Eterm key, Eterm *ret, + DbTreeStack *stack /* NULL if no static stack */); +int db_erase_object_tree_common(DbTable *tbl, TreeDbTerm **root, Eterm object, + Eterm *ret, DbTableTree *stack_container); +int db_slot_tree_common(Process *p, DbTable *tbl, TreeDbTerm *root, + Eterm slot_term, Eterm *ret, + DbTableTree *stack_container); +int db_select_chunk_tree_common(Process *p, DbTableCommon *tb, TreeDbTerm **root, + Eterm tid, Eterm pattern, Sint chunk_size, + int reverse, Eterm *ret, + DbTableTree *stack_container); +int db_select_tree_common(Process *p, DbTableCommon *tb, TreeDbTerm **root, + Eterm tid, Eterm pattern, int reverse, Eterm *ret, + DbTableTree *stack_container); +int db_select_delete_tree_common(Process *p, DbTable *tbl, + TreeDbTerm **root, + Eterm tid, Eterm pattern, + Eterm *ret, + DbTreeStack* stack); +int db_select_continue_tree_common(Process *p, + DbTableCommon *tb, + TreeDbTerm **root, + Eterm continuation, + Eterm *ret, + DbTableTree *stack_container); +int db_select_delete_continue_tree_common(Process *p, + DbTable *tbl, + TreeDbTerm **root, + Eterm continuation, + Eterm *ret, + DbTreeStack* stack); +int db_select_count_tree_common(Process *p, DbTableCommon *tb, TreeDbTerm **root, + Eterm tid, Eterm pattern, Eterm *ret, + DbTableTree *stack_container); +int db_select_count_continue_tree_common(Process *p, + DbTableCommon *tb, + TreeDbTerm **root, + Eterm continuation, + Eterm *ret, + DbTableTree *stack_container); +int db_select_replace_tree_common(Process *p, DbTableCommon *tb, TreeDbTerm **root, + Eterm tid, Eterm pattern, Eterm *ret, + DbTableTree *stack_container); +int db_select_replace_continue_tree_common(Process *p, + DbTableCommon *tb, + TreeDbTerm **root, + Eterm continuation, + Eterm *ret, + DbTableTree *stack_container); +int db_take_tree_common(Process *p, DbTable *tbl, TreeDbTerm **root, + Eterm key, Eterm *ret, + DbTreeStack *stack /* NULL if no static stack */); +void db_print_tree_common(fmtfn_t to, void *to_arg, + int show, TreeDbTerm *root, DbTable *tbl); +void db_foreach_offheap_tree_common(TreeDbTerm *root, + void (*func)(ErlOffHeap *, void *), + void * arg); +int db_lookup_dbterm_tree_common(Process *p, DbTable *tbl, TreeDbTerm **root, + Eterm key, Eterm obj, DbUpdateHandle* handle, + DbTableTree *stack_container); +void db_finalize_dbterm_tree_common(int cret, DbUpdateHandle *handle, + DbTableTree *stack_container); +#endif /* _DB_TREE_UTIL_H */ diff --git a/erts/emulator/beam/erl_db_util.c b/erts/emulator/beam/erl_db_util.c index f1d47326b4..a78623f490 100644 --- a/erts/emulator/beam/erl_db_util.c +++ b/erts/emulator/beam/erl_db_util.c @@ -3118,9 +3118,7 @@ void* db_store_term_comp(DbTableCommon *tb, DbTerm* old, Uint offset, Eterm obj) Uint new_sz = offset + db_size_dbterm_comp(tb, obj); byte* basep; DbTerm* newp; -#ifdef DEBUG byte* top; -#endif ASSERT(tb->compress); if (old != 0) { @@ -3142,11 +3140,8 @@ void* db_store_term_comp(DbTableCommon *tb, DbTerm* old, Uint offset, Eterm obj) } newp->size = size_object(obj); -#ifdef DEBUG - top = -#endif - copy_to_comp(tb, obj, newp, new_sz); - ASSERT(top <= basep + new_sz); + top = copy_to_comp(tb, obj, newp, new_sz); + ASSERT(top <= basep + new_sz); (void)top; /* ToDo: Maybe realloc if ((basep+new_sz) - top) > WASTED_SPACE_LIMIT */ diff --git a/erts/emulator/beam/erl_db_util.h b/erts/emulator/beam/erl_db_util.h index 6ec3b4f98f..be74bc795c 100644 --- a/erts/emulator/beam/erl_db_util.h +++ b/erts/emulator/beam/erl_db_util.h @@ -90,6 +90,8 @@ typedef struct { Uint new_size; int flags; void* lck; + void* lck2; + int current_level; } DbUpdateHandle; @@ -274,23 +276,26 @@ typedef struct db_table_common { } DbTableCommon; /* These are status bit patterns */ -#define DB_PRIVATE (1 << 0) -#define DB_PROTECTED (1 << 1) -#define DB_PUBLIC (1 << 2) -#define DB_DELETE (1 << 3) /* table is being deleted */ -#define DB_SET (1 << 4) -#define DB_BAG (1 << 5) -#define DB_DUPLICATE_BAG (1 << 6) -#define DB_ORDERED_SET (1 << 7) -#define DB_FINE_LOCKED (1 << 8) /* write_concurrency */ -#define DB_FREQ_READ (1 << 9) /* read_concurrency */ -#define DB_NAMED_TABLE (1 << 10) -#define DB_BUSY (1 << 11) +#define DB_PRIVATE (1 << 0) +#define DB_PROTECTED (1 << 1) +#define DB_PUBLIC (1 << 2) +#define DB_DELETE (1 << 3) /* table is being deleted */ +#define DB_SET (1 << 4) +#define DB_BAG (1 << 5) +#define DB_DUPLICATE_BAG (1 << 6) +#define DB_ORDERED_SET (1 << 7) +#define DB_CA_ORDERED_SET (1 << 8) +#define DB_FINE_LOCKED (1 << 9) /* write_concurrency */ +#define DB_FREQ_READ (1 << 10) /* read_concurrency */ +#define DB_NAMED_TABLE (1 << 11) +#define DB_BUSY (1 << 12) #define IS_HASH_TABLE(Status) (!!((Status) & \ (DB_BAG | DB_SET | DB_DUPLICATE_BAG))) #define IS_TREE_TABLE(Status) (!!((Status) & \ DB_ORDERED_SET)) +#define IS_CATREE_TABLE(Status) (!!((Status) & \ + DB_CA_ORDERED_SET)) #define NFIXED(T) (erts_refc_read(&(T)->common.fix_count,0)) #define IS_FIXED(T) (NFIXED(T) != 0) diff --git a/erts/emulator/beam/erl_drv_nif.h b/erts/emulator/beam/erl_drv_nif.h index 31b4817fb1..9ef7c39d41 100644 --- a/erts/emulator/beam/erl_drv_nif.h +++ b/erts/emulator/beam/erl_drv_nif.h @@ -53,7 +53,8 @@ typedef enum { enum ErlNifSelectFlags { ERL_NIF_SELECT_READ = (1 << 0), ERL_NIF_SELECT_WRITE = (1 << 1), - ERL_NIF_SELECT_STOP = (1 << 2) + ERL_NIF_SELECT_STOP = (1 << 2), + ERL_NIF_SELECT_CANCEL = (1 << 3) }; /* diff --git a/erts/emulator/beam/erl_init.c b/erts/emulator/beam/erl_init.c index 57c6c10c7f..9ba3ae226e 100644 --- a/erts/emulator/beam/erl_init.c +++ b/erts/emulator/beam/erl_init.c @@ -128,7 +128,7 @@ const Eterm etp_hole_marker = 0; static int modified_sched_thread_suggested_stack_size = 0; -Eterm erts_init_process_id; +Eterm erts_init_process_id = ERTS_INVALID_PID; /* * Note about VxWorks: All variables must be initialized by executable code, @@ -163,7 +163,6 @@ int erts_initialized = 0; * Configurable parameters. */ -Uint display_items; /* no of items to display in traces etc */ int H_MIN_SIZE; /* The minimum heap grain */ int BIN_VH_MIN_SIZE; /* The minimum binary virtual*/ int H_MAX_SIZE; /* The maximum heap size */ @@ -375,21 +374,19 @@ erl_init(int ncpu, } static Eterm -erl_first_process_otp(char* modname, void* code, unsigned size, int argc, char** argv) +erl_first_process_otp(char* mod_name, int argc, char** argv) { int i; - Eterm start_mod; Eterm args; Eterm res; Eterm* hp; Process parent; ErlSpawnOpts so; - Eterm env; - - start_mod = erts_atom_put((byte *) modname, sys_strlen(modname), ERTS_ATOM_ENC_LATIN1, 1); - if (erts_find_function(start_mod, am_start, 2, + Eterm boot_mod; + + if (erts_find_function(am_erl_init, am_start, 2, erts_active_code_ix()) == NULL) { - erts_exit(ERTS_ERROR_EXIT, "No function %s:start/2\n", modname); + erts_exit(ERTS_ERROR_EXIT, "No function erl_init:start/2\n"); } /* @@ -405,13 +402,13 @@ erl_first_process_otp(char* modname, void* code, unsigned size, int argc, char** args = CONS(hp, new_binary(&parent, (byte*)argv[i], len), args); hp += 2; } - env = new_binary(&parent, code, size); + boot_mod = erts_atom_put((byte *) mod_name, sys_strlen(mod_name), ERTS_ATOM_ENC_LATIN1, 1); args = CONS(hp, args, NIL); hp += 2; - args = CONS(hp, env, args); + args = CONS(hp, boot_mod, args); so.flags = erts_default_spo_flags|SPO_SYSTEM_PROC; - res = erl_create_process(&parent, start_mod, am_start, args, &so); + res = erl_create_process(&parent, am_erl_init, am_start, args, &so); erts_proc_unlock(&parent, ERTS_PROC_LOCK_MAIN); erts_cleanup_empty_process(&parent); return res; @@ -481,7 +478,6 @@ erts_preloaded(Process* p) /* static variables that must not change (use same values at restart) */ static char* program; static char* init = "init"; -static char* boot = "boot"; static int boot_argc; static char** boot_argv; @@ -535,9 +531,6 @@ void erts_usage(void) int this_rel = this_rel_num(); erts_fprintf(stderr, "Usage: %s [flags] [ -- [init_args] ]\n", progname(program)); erts_fprintf(stderr, "The flags are:\n\n"); - - /* erts_fprintf(stderr, "-# number set the number of items to be used in traces etc\n"); */ - erts_fprintf(stderr, "-a size suggested stack size in kilo words for threads\n"); erts_fprintf(stderr, " in the async-thread pool, valid range is [%d-%d]\n", ERTS_ASYNC_THREAD_MIN_STACK_SIZE, @@ -545,13 +538,9 @@ void erts_usage(void) erts_fprintf(stderr, "-A number set number of threads in async thread pool,\n"); erts_fprintf(stderr, " valid range is [0-%d]\n", ERTS_MAX_NO_OF_ASYNC_THREADS); - erts_fprintf(stderr, "-B[c|d|i] c to have Ctrl-c interrupt the Erlang shell,\n"); erts_fprintf(stderr, " d (or no extra option) to disable the break\n"); erts_fprintf(stderr, " handler, i to ignore break signals\n"); - - /* erts_fprintf(stderr, "-b func set the boot function (default boot)\n"); */ - erts_fprintf(stderr, "-c bool enable or disable time correction\n"); erts_fprintf(stderr, "-C mode set time warp mode; valid modes are:\n"); erts_fprintf(stderr, " no_time_warp|single_time_warp|multi_time_warp\n"); @@ -570,7 +559,6 @@ void erts_usage(void) erts_pd_initial_size); erts_fprintf(stderr, "-hmqd val set default message queue data flag for processes,\n"); erts_fprintf(stderr, " valid values are: off_heap | on_heap\n"); - erts_fprintf(stderr, "-IOp number set number of pollsets to be used to poll for I/O,\n"); erts_fprintf(stderr, " This value has to be equal or smaller than the\n"); erts_fprintf(stderr, " number of poll threads. If the current platform\n"); @@ -581,9 +569,7 @@ void erts_usage(void) erts_fprintf(stderr, " number of poll threads."); erts_fprintf(stderr, "-IOPt number set number of threads to be used to poll for I/O\n"); erts_fprintf(stderr, " as a percentage of the number of schedulers."); - - /* erts_fprintf(stderr, "-i module set the boot module (default init)\n"); */ - + erts_fprintf(stderr, "-i module set the boot module (default init)\n"); erts_fprintf(stderr, "-n[s|a|d] Control behavior of signals to ports\n"); erts_fprintf(stderr, " Note that this flag is deprecated!\n"); erts_fprintf(stderr, "-M<X> <Y> memory allocator switches,\n"); @@ -598,7 +584,6 @@ void erts_usage(void) erts_fprintf(stderr, "-R number set compatibility release number,\n"); erts_fprintf(stderr, " valid range [%d-%d]\n", this_rel-2, this_rel); - erts_fprintf(stderr, "-r force ets memory block to be moved on realloc\n"); erts_fprintf(stderr, "-rg amount set reader groups limit\n"); erts_fprintf(stderr, "-sbt type set scheduler bind type, valid types are:\n"); @@ -669,9 +654,7 @@ void erts_usage(void) erts_fprintf(stderr, "-T number set modified timing level, valid range is [0-%d]\n", ERTS_MODIFIED_TIMING_LEVELS-1); erts_fprintf(stderr, "-V print Erlang version\n"); - erts_fprintf(stderr, "-v turn on chatty mode (GCs will be reported etc)\n"); - erts_fprintf(stderr, "-W<i|w|e> set error logger warnings mapping,\n"); erts_fprintf(stderr, " see error_logger documentation for details\n"); erts_fprintf(stderr, "-zdbbl size set the distribution buffer busy limit in kilobytes\n"); @@ -762,7 +745,6 @@ early_init(int *argc, char **argv) /* erts_sched_compact_load = 1; erts_printf_eterm_func = erts_printf_term; - display_items = 200; erts_backtrace_depth = DEFAULT_BACKTRACE_SIZE; erts_async_max_threads = ERTS_DEFAULT_NO_ASYNC_THREADS; erts_async_thread_suggested_stack_size = ERTS_ASYNC_THREAD_MIN_STACK_SIZE; @@ -1269,25 +1251,9 @@ erl_start(int argc, char **argv) /* * NOTE: -M flags are handled (and removed from argv) by - * erts_alloc_init(). - * - * The -d, -m, -S, -t, and -T flags was removed in - * Erlang 5.3/OTP R9C. - * - * -S, and -T has been reused in Erlang 5.5/OTP R11B. - * - * -d has been reused in a patch R12B-4. + * erts_alloc_init(). */ - case '#' : - arg = get_arg(argv[i]+2, argv[i+1], &i); - if ((display_items = atoi(arg)) == 0) { - erts_fprintf(stderr, "bad display items%s\n", arg); - erts_usage(); - } - VERBOSE(DEBUG_SYSTEM, - ("using display items %d\n",display_items)); - break; case 'p': if (!sys_strncmp(argv[i],"-pc",3)) { int printable_chars = ERL_PRINTABLE_CHARACTERS_LATIN1; @@ -1566,11 +1532,6 @@ erl_start(int argc, char **argv) init = get_arg(argv[i]+2, argv[i+1], &i); break; - case 'b': - /* define name of initial function */ - boot = get_arg(argv[i]+2, argv[i+1], &i); - break; - case 'B': if (argv[i][2] == 'i') /* +Bi */ ignore_break = 1; @@ -2256,8 +2217,8 @@ erl_start(int argc, char **argv) erts_initialized = 1; - erts_init_process_id = erl_first_process_otp("otp_ring0", NULL, 0, - boot_argc, boot_argv); + erts_init_process_id = erl_first_process_otp(init, boot_argc, boot_argv); + ASSERT(erts_init_process_id != ERTS_INVALID_PID); { /* diff --git a/erts/emulator/beam/erl_lock_check.c b/erts/emulator/beam/erl_lock_check.c index 463ae898a3..987e370341 100644 --- a/erts/emulator/beam/erl_lock_check.c +++ b/erts/emulator/beam/erl_lock_check.c @@ -42,6 +42,7 @@ #include "erl_term.h" #include "erl_threads.h" #include "erl_atom_table.h" +#include "erl_utils.h" typedef struct { char *name; @@ -91,6 +92,8 @@ static erts_lc_lock_order_t erts_lock_order[] = { { "db_tab", "address" }, { "db_tab_fix", "address" }, { "db_hash_slot", "address" }, + { "erl_db_catree_base_node", "term" }, + { "erl_db_catree_route_node", "index" }, { "resource_monitors", "address" }, { "driver_list", NULL }, { "proc_msgq", "pid" }, @@ -707,6 +710,14 @@ erts_lc_get_lock_order_id(char *name) return (Sint16) -1; } +static int +lc_is_term_order(Sint16 id) +{ + return erts_lock_order[id].internal_order != NULL + && sys_strcmp(erts_lock_order[id].internal_order, "term") == 0; +} + + static int compare_locked_by_id(lc_locked_lock_t *locked_lock, erts_lc_lock_t *comparand) { if(locked_lock->id < comparand->id) { @@ -718,18 +729,23 @@ static int compare_locked_by_id(lc_locked_lock_t *locked_lock, erts_lc_lock_t *c return 0; } -static int compare_locked_by_id_extra(lc_locked_lock_t *locked_lock, erts_lc_lock_t *comparand) +static int compare_locked_by_id_extra(lc_locked_lock_t *ll, erts_lc_lock_t *comparand) { - int order = compare_locked_by_id(locked_lock, comparand); + int order = compare_locked_by_id(ll, comparand); if(order) { return order; - } else if(locked_lock->extra < comparand->extra) { + } + if (ll->flags & ERTS_LOCK_FLAGS_PROPERTY_TERM_ORDER) { + ASSERT(!is_header(ll->extra) && !is_header(comparand->extra)); + return CMP(ll->extra, comparand->extra); + } + + if(ll->extra < comparand->extra) { return -1; - } else if(locked_lock->extra > comparand->extra) { + } else if(ll->extra > comparand->extra) { return 1; } - return 0; } @@ -968,7 +984,8 @@ erts_lc_trylock_force_busy_flg(erts_lc_lock_t *lck, erts_lock_options_t options) return 0; } else { - lc_locked_lock_t *tl_lck; + lc_locked_lock_t *ll; + int order; ASSERT(thr->locked.last); @@ -977,25 +994,28 @@ erts_lc_trylock_force_busy_flg(erts_lc_lock_t *lck, erts_lock_options_t options) type_order_violation("trylocking ", thr, lck); #endif - if (thr->locked.last->id < lck->id - || (thr->locked.last->id == lck->id - && thr->locked.last->extra < lck->extra)) - return 0; + ll = thr->locked.last; + order = compare_locked_by_id_extra(ll, lck); + + if (order < 0) + return 0; /* - * Lock order violation + * TryLock order violation */ - - /* Check that we are not trying to lock this lock twice */ - for (tl_lck = thr->locked.last; tl_lck; tl_lck = tl_lck->prev) { - if (tl_lck->id < lck->id - || (tl_lck->id == lck->id && tl_lck->extra <= lck->extra)) { - if (tl_lck->id == lck->id && tl_lck->extra == lck->extra) - lock_twice("Trylocking", thr, lck, options); - break; - } - } + /* Check that we are not trying to lock this lock twice */ + while (1) { + if (order <= 0) { + if (order == 0) + lock_twice("Trylocking", thr, lck, options); + break; + } + ll = ll->prev; + if (!ll) + break; + order = compare_locked_by_id_extra(ll, lck); + } #ifndef ERTS_LC_ALLWAYS_FORCE_BUSY_TRYLOCK_ON_LOCK_ORDER_VIOLATION /* We only force busy if a lock order violation would occur @@ -1014,6 +1034,11 @@ erts_lc_trylock_force_busy_flg(erts_lc_lock_t *lck, erts_lock_options_t options) #endif } +/* + * locked = 0 trylock failed + * locked > 0 trylock succeeded + * locked < 0 prelocking of newly created lock (no lock order check) + */ void erts_lc_trylock_flg_x(int locked, erts_lc_lock_t *lck, erts_lock_options_t options, char *file, unsigned int line) { @@ -1042,9 +1067,9 @@ void erts_lc_trylock_flg_x(int locked, erts_lc_lock_t *lck, erts_lock_options_t #endif for (tl_lck = thr->locked.last; tl_lck; tl_lck = tl_lck->prev) { - if (tl_lck->id < lck->id - || (tl_lck->id == lck->id && tl_lck->extra <= lck->extra)) { - if (tl_lck->id == lck->id && tl_lck->extra == lck->extra) + int order = compare_locked_by_id_extra(tl_lck, lck); + if (order <= 0) { + if (order == 0 && locked >= 0) lock_twice("Trylocking", thr, lck, options); if (locked) { ll->next = tl_lck->next; @@ -1087,10 +1112,10 @@ void erts_lc_require_lock_flg(erts_lc_lock_t *lck, erts_lock_options_t options, for (l_lck2 = thr->required.last; l_lck2; l_lck2 = l_lck2->prev) { - if (l_lck2->id < lck->id - || (l_lck2->id == lck->id && l_lck2->extra < lck->extra)) + int order = compare_locked_by_id_extra(l_lck2, lck); + if (order < 0) break; - else if (l_lck2->id == lck->id && l_lck2->extra == lck->extra) + if (order == 0) require_twice(thr, lck); } if (!l_lck2) { @@ -1148,6 +1173,7 @@ void erts_lc_lock_flg_x(erts_lc_lock_t *lck, erts_lock_options_t options, { lc_thread_t *thr; lc_locked_lock_t *new_ll; + int order; if (lck->inited != ERTS_LC_INITITALIZED) uninitialized_lock(); @@ -1163,10 +1189,10 @@ void erts_lc_lock_flg_x(erts_lc_lock_t *lck, erts_lock_options_t options, thr->locked.last = thr->locked.first = new_ll; ASSERT(0 < lck->id && lck->id < ERTS_LOCK_ORDER_SIZE); thr->matrix.m[lck->id][0] = 1; + return; } - else if (thr->locked.last->id < lck->id - || (thr->locked.last->id == lck->id - && thr->locked.last->extra < lck->extra)) { + order = compare_locked_by_id_extra(thr->locked.last, lck); + if (order < 0) { lc_locked_lock_t* ll; if (LOCK_IS_TYPE_ORDER_VIOLATION(lck->flags, thr->locked.last->flags)) { type_order_violation("locking ", thr, lck); @@ -1296,7 +1322,6 @@ void erts_lc_init_lock(erts_lc_lock_t *lck, char *name, erts_lock_flags_t flags) { lck->id = erts_lc_get_lock_order_id(name); - lck->extra = (UWord) &lck->extra; ASSERT(is_not_immed(lck->extra)); lck->flags = flags; @@ -1309,8 +1334,13 @@ erts_lc_init_lock_x(erts_lc_lock_t *lck, char *name, erts_lock_flags_t flags, Et { lck->id = erts_lc_get_lock_order_id(name); lck->extra = extra; - ASSERT(is_immed(lck->extra)); lck->flags = flags; + if (lc_is_term_order(lck->id)) { + lck->flags |= ERTS_LOCK_FLAGS_PROPERTY_TERM_ORDER; + ASSERT(!is_header(lck->extra)); + } + else + ASSERT(is_immed(lck->extra)); lck->taken_options = 0; lck->inited = ERTS_LC_INITITALIZED; } diff --git a/erts/emulator/beam/erl_lock_check.h b/erts/emulator/beam/erl_lock_check.h index d10e32985a..b32f27d9f9 100644 --- a/erts/emulator/beam/erl_lock_check.h +++ b/erts/emulator/beam/erl_lock_check.h @@ -104,7 +104,7 @@ Eterm erts_lc_dump_graph(void); #define erts_lc_lock(lck) erts_lc_lock_x(lck,__FILE__,__LINE__) #define erts_lc_trylock(res,lck) erts_lc_trylock_x(res,lck,__FILE__,__LINE__) -#define erts_lc_lock_flg(lck) erts_lc_lock_flg_x(lck,__FILE__,__LINE__) -#define erts_lc_trylock_flg(res,lck) erts_lc_trylock_flg_x(res,lck,__FILE__,__LINE__) +#define erts_lc_lock_flg(lck,flg) erts_lc_lock_flg_x(lck,flg,__FILE__,__LINE__) +#define erts_lc_trylock_flg(res,lck,flg) erts_lc_trylock_flg_x(res,lck,flg,__FILE__,__LINE__) #endif /* #ifndef ERTS_LOCK_CHECK_H__ */ diff --git a/erts/emulator/beam/erl_lock_count.h b/erts/emulator/beam/erl_lock_count.h index 89d95a73cf..0d47b16e0b 100644 --- a/erts/emulator/beam/erl_lock_count.h +++ b/erts/emulator/beam/erl_lock_count.h @@ -532,7 +532,7 @@ ERTS_GLB_INLINE void lcnt_dec_lock_state__(ethr_atomic_t *l_state) { ethr_sint_t state = ethr_atomic_dec_read_acqb(l_state); - /* We can not assume that state is >= -1 here; unlock and unacquire might + /* We cannot assume that state is >= -1 here; unlock and unacquire might * bring it below -1 and race to increment it back. */ if(state < 0) { diff --git a/erts/emulator/beam/erl_lock_flags.h b/erts/emulator/beam/erl_lock_flags.h index d711f69456..2db133b598 100644 --- a/erts/emulator/beam/erl_lock_flags.h +++ b/erts/emulator/beam/erl_lock_flags.h @@ -28,15 +28,17 @@ /* Property/category are bitfields to simplify their use in masks. */ #define ERTS_LOCK_FLAGS_MASK_CATEGORY (0xFFC0) -#define ERTS_LOCK_FLAGS_MASK_PROPERTY (0x0030) +#define ERTS_LOCK_FLAGS_MASK_PROPERTY (0x0038) /* Type is a plain number. */ -#define ERTS_LOCK_FLAGS_MASK_TYPE (0x000F) +#define ERTS_LOCK_FLAGS_MASK_TYPE (0x0007) #define ERTS_LOCK_FLAGS_TYPE_SPINLOCK (1) #define ERTS_LOCK_FLAGS_TYPE_MUTEX (2) #define ERTS_LOCK_FLAGS_TYPE_PROCLOCK (3) +/* Lock checker use real term order instead of raw word compare */ +#define ERTS_LOCK_FLAGS_PROPERTY_TERM_ORDER (1 << 3) /* "Static" guarantees that the lock will never be destroyed once created. */ #define ERTS_LOCK_FLAGS_PROPERTY_STATIC (1 << 4) #define ERTS_LOCK_FLAGS_PROPERTY_READ_WRITE (1 << 5) diff --git a/erts/emulator/beam/erl_map.c b/erts/emulator/beam/erl_map.c index cba17d3e6a..3d6c9eb43f 100644 --- a/erts/emulator/beam/erl_map.c +++ b/erts/emulator/beam/erl_map.c @@ -1505,25 +1505,6 @@ int hashmap_key_hash_cmp(Eterm* ap, Eterm* bp) return ap ? -1 : 1; } -/* maps:new/0 */ - -BIF_RETTYPE maps_new_0(BIF_ALIST_0) { - Eterm* hp; - Eterm tup; - flatmap_t *mp; - - hp = HAlloc(BIF_P, (MAP_HEADER_FLATMAP_SZ + 1)); - tup = make_tuple(hp); - *hp++ = make_arityval(0); - - mp = (flatmap_t*)hp; - mp->thing_word = MAP_HEADER_FLATMAP; - mp->size = 0; - mp->keys = tup; - - BIF_RET(make_flatmap(mp)); -} - /* maps:put/3 */ BIF_RETTYPE maps_put_3(BIF_ALIST_3) { @@ -1707,11 +1688,16 @@ int erts_maps_update(Process *p, Eterm key, Eterm value, Eterm map, Eterm *res) return 0; found_key: - *hp++ = value; - vs++; - if (++i < n) - sys_memcpy(hp, vs, (n - i)*sizeof(Eterm)); - *res = make_flatmap(shp); + if(*vs == value) { + HRelease(p, shp + MAP_HEADER_FLATMAP_SZ + n, shp); + *res = map; + } else { + *hp++ = value; + vs++; + if (++i < n) + sys_memcpy(hp, vs, (n - i)*sizeof(Eterm)); + *res = make_flatmap(shp); + } return 1; } @@ -1767,9 +1753,7 @@ Eterm erts_maps_put(Process *p, Eterm key, Eterm value, Eterm map) { if (is_immed(key)) { for( i = 0; i < n; i ++) { if (ks[i] == key) { - *hp++ = value; - vs++; - c = 1; + goto found_key; } else { *hp++ = *vs++; } @@ -1777,18 +1761,13 @@ Eterm erts_maps_put(Process *p, Eterm key, Eterm value, Eterm map) { } else { for( i = 0; i < n; i ++) { if (EQ(ks[i], key)) { - *hp++ = value; - vs++; - c = 1; + goto found_key; } else { *hp++ = *vs++; } } } - if (c) - return res; - /* the map will grow */ if (n >= MAP_SMALL_MAP_LIMIT) { @@ -1843,6 +1822,18 @@ Eterm erts_maps_put(Process *p, Eterm key, Eterm value, Eterm map) { */ *shp = make_pos_bignum_header(0); return res; + +found_key: + if(*vs == value) { + HRelease(p, shp + MAP_HEADER_FLATMAP_SZ + n, shp); + return map; + } else { + *hp++ = value; + vs++; + if (++i < n) + sys_memcpy(hp, vs, (n - i)*sizeof(Eterm)); + return res; + } } ASSERT(is_hashmap(map)); diff --git a/erts/emulator/beam/erl_nif.c b/erts/emulator/beam/erl_nif.c index ee6e6085b6..7339aa8874 100644 --- a/erts/emulator/beam/erl_nif.c +++ b/erts/emulator/beam/erl_nif.c @@ -1040,7 +1040,7 @@ ERL_NIF_TERM enif_make_copy(ErlNifEnv* dst_env, ERL_NIF_TERM src_term) Eterm* hp; /* * No preserved sharing allowed as long as literals are also preserved. - * Process independent environment can not be reached by purge. + * Process independent environment cannot be reached by purge. */ sz = size_object(src_term); hp = alloc_heap(dst_env, sz); diff --git a/erts/emulator/beam/erl_nif.h b/erts/emulator/beam/erl_nif.h index 4c09496ef1..c5227a0c23 100644 --- a/erts/emulator/beam/erl_nif.h +++ b/erts/emulator/beam/erl_nif.h @@ -160,6 +160,8 @@ typedef int ErlNifEvent; #define ERL_NIF_SELECT_STOP_SCHEDULED (1 << 1) #define ERL_NIF_SELECT_INVALID_EVENT (1 << 2) #define ERL_NIF_SELECT_FAILED (1 << 3) +#define ERL_NIF_SELECT_READ_CANCELLED (1 << 4) +#define ERL_NIF_SELECT_WRITE_CANCELLED (1 << 5) typedef enum { diff --git a/erts/emulator/beam/erl_process.c b/erts/emulator/beam/erl_process.c index 0f7f1598fd..a21acb9a57 100644 --- a/erts/emulator/beam/erl_process.c +++ b/erts/emulator/beam/erl_process.c @@ -4022,9 +4022,7 @@ schedule_bound_processes(ErtsRunQueue *rq, static ERTS_INLINE void clear_proc_dirty_queue_bit(Process *p, ErtsRunQueue *rq, int prio_bit) { -#ifdef DEBUG erts_aint32_t old; -#endif erts_aint32_t qb = prio_bit; if (rq == ERTS_DIRTY_CPU_RUNQ) qb <<= ERTS_PDSFLGS_IN_CPU_PRQ_MASK_OFFSET; @@ -4032,13 +4030,8 @@ clear_proc_dirty_queue_bit(Process *p, ErtsRunQueue *rq, int prio_bit) ASSERT(rq == ERTS_DIRTY_IO_RUNQ); qb <<= ERTS_PDSFLGS_IN_IO_PRQ_MASK_OFFSET; } -#ifdef DEBUG - old = (int) -#else - (void) -#endif - erts_atomic32_read_band_mb(&p->dirty_state, ~qb); - ASSERT(old & qb); + old = (int) erts_atomic32_read_band_mb(&p->dirty_state, ~qb); + ASSERT(old & qb); (void)old; } @@ -7175,9 +7168,7 @@ msb_scheduler_type_switch(ErtsSchedType sched_type, Uint32 nrml_prio, dcpu_prio, dio_prio; ErtsSchedType exec_type; ErtsRunQueue *exec_rq; -#ifdef DEBUG erts_aint32_t dbg_val; -#endif ASSERT(schdlr_sspnd.msb.ongoing); @@ -7292,16 +7283,12 @@ msb_scheduler_type_switch(ErtsSchedType sched_type, * Suspend this scheduler and wake up scheduler * number one of another type... */ -#ifdef DEBUG dbg_val = -#else - (void) -#endif erts_atomic32_read_bset_mb(&esdp->ssi->flags, (ERTS_SSI_FLG_SUSPENDED | ERTS_SSI_FLG_MSB_EXEC), ERTS_SSI_FLG_SUSPENDED); - ASSERT(dbg_val & ERTS_SSI_FLG_MSB_EXEC); + ASSERT(dbg_val & ERTS_SSI_FLG_MSB_EXEC); (void)dbg_val; switch (exec_type) { case ERTS_SCHED_NORMAL: @@ -7319,11 +7306,7 @@ msb_scheduler_type_switch(ErtsSchedType sched_type, break; } -#ifdef DEBUG dbg_val = -#else - (void) -#endif erts_atomic32_read_bset_mb(&exec_rq->scheduler->ssi->flags, (ERTS_SSI_FLG_SUSPENDED | ERTS_SSI_FLG_MSB_EXEC), @@ -8888,11 +8871,8 @@ erts_suspend(Process* c_p, ErtsProcLocks c_p_locks, Port *busy_port) suspend = 1; if (suspend) { -#ifdef DEBUG - int res = -#endif - suspend_process(c_p, c_p); - ASSERT(res); + int res = suspend_process(c_p, c_p); + ASSERT(res); (void)res; } if (!(c_p_locks & ERTS_PROC_LOCK_STATUS)) @@ -12490,9 +12470,7 @@ erts_continue_exit_process(Process *p) yield: -#ifdef DEBUG ASSERT(yield_allowed); -#endif ERTS_LC_ASSERT(curr_locks == erts_proc_lc_my_proc_locks(p)); ERTS_LC_ASSERT(ERTS_PROC_LOCK_MAIN & curr_locks); diff --git a/erts/emulator/beam/erl_process_dump.c b/erts/emulator/beam/erl_process_dump.c index 243db4c734..706530023b 100644 --- a/erts/emulator/beam/erl_process_dump.c +++ b/erts/emulator/beam/erl_process_dump.c @@ -963,12 +963,16 @@ dump_module_literals(fmtfn_t to, void *to_arg, ErtsLiteralArea* lit_area) } erts_putc(to, to_arg, '\n'); } - } else if (is_export_header(w)) { + } else if (is_export_header(w) || is_fun_header(w)) { dump_externally(to, to_arg, term); erts_putc(to, to_arg, '\n'); } size = 1 + header_arity(w); switch (w & _HEADER_SUBTAG_MASK) { + case FUN_SUBTAG: + ASSERT(((ErlFunThing*)(htop))->num_free == 0); + size += 1; + break; case MAP_SUBTAG: if (is_flatmap_header(w)) { size += 1 + flatmap_get_size(htop); diff --git a/erts/emulator/beam/erl_unicode.c b/erts/emulator/beam/erl_unicode.c index d225916ac5..1d6869a7cd 100644 --- a/erts/emulator/beam/erl_unicode.c +++ b/erts/emulator/beam/erl_unicode.c @@ -1358,11 +1358,9 @@ Uint erts_atom_to_string_length(Eterm atom) else { byte* err_pos; Uint num_chars; -#ifdef DEBUG int ares = -#endif erts_analyze_utf8(ap->name, ap->len, &err_pos, &num_chars, NULL); - ASSERT(ares == ERTS_UTF8_OK); + ASSERT(ares == ERTS_UTF8_OK); (void)ares; return num_chars; } diff --git a/erts/emulator/beam/global.h b/erts/emulator/beam/global.h index 21ae205237..187d2635ea 100644 --- a/erts/emulator/beam/global.h +++ b/erts/emulator/beam/global.h @@ -292,7 +292,6 @@ union erl_off_heap_ptr { /* controls warning mapping in error_logger */ extern Eterm node_cookie; -extern Uint display_items; /* no of items to display in traces etc */ extern int erts_backtrace_depth; extern erts_atomic32_t erts_max_gen_gcs; diff --git a/erts/emulator/beam/instrs.tab b/erts/emulator/beam/instrs.tab index 42c1168f85..da1dd3dc45 100644 --- a/erts/emulator/beam/instrs.tab +++ b/erts/emulator/beam/instrs.tab @@ -559,17 +559,19 @@ update_list(Hd, Dst) { HTOP += 2; } -i_put_tuple := i_put_tuple.make.fill; - -i_put_tuple.make(Dst) { - $Dst = make_tuple(HTOP); -} - -i_put_tuple.fill(Arity) { +put_tuple2(Dst, Arity) { Eterm* hp = HTOP; Eterm arity = $Arity; + /* + * If operands are not packed (in the 32-bit VM), + * is is not safe to use $Dst directly after I + * has been updated. + */ + Eterm* dst_ptr = &($Dst); + //| -no_next + ASSERT(arity != 0); *hp++ = make_arityval(arity); I = $NEXT_INSTRUCTION; do { @@ -586,6 +588,7 @@ i_put_tuple.fill(Arity) { break; } } while (--arity != 0); + *dst_ptr = make_tuple(HTOP); HTOP = hp; ASSERT(VALID_INSTR(* (Eterm *)I)); Goto(*I); diff --git a/erts/emulator/beam/ops.tab b/erts/emulator/beam/ops.tab index e76d896ffc..349034e8ac 100644 --- a/erts/emulator/beam/ops.tab +++ b/erts/emulator/beam/ops.tab @@ -483,9 +483,16 @@ is_eq f? s s is_ne f? s s # -# Putting things. +# Putting tuples. +# +# Code compiled with OTP 22 and later uses put_tuple2 to +# to construct a tuple. +# +# Code compiled before OTP 22 uses put_tuple + one put instruction +# per element. Translate to put_tuple2. # +i_put_tuple/2 put_tuple Arity Dst => i_put_tuple Dst u i_put_tuple Dst Arity Puts=* | put S1 | put S2 | \ @@ -495,11 +502,13 @@ i_put_tuple Dst Arity Puts=* | put S1 | put S2 | \ i_put_tuple Dst Arity Puts=* | put S => \ tuple_append_put(Arity, Dst, Puts, S) -i_put_tuple/2 +i_put_tuple Dst Arity Puts=* => put_tuple2 Dst Arity Puts -i_put_tuple xy I +put_tuple2 xy I # +# Putting lists. +# # The instruction "put_list Const [] Dst" were generated in rare # circumstances up to and including OTP 18. Starting with OTP 19, # AFAIK, it should never be generated. @@ -1080,67 +1089,73 @@ func_info M F A => i_func_info u M F A %warm bs_start_match2 Fail=f ica X Y D => jump Fail bs_start_match2 Fail Bin X Y D => i_bs_start_match2 Bin Fail X Y D -i_bs_start_match2 xy f t t x +i_bs_start_match2 xy f t t d bs_save2 Reg Index => gen_bs_save(Reg, Index) -i_bs_save2 x t +i_bs_save2 xy t bs_restore2 Reg Index => gen_bs_restore(Reg, Index) -i_bs_restore2 x t +i_bs_restore2 xy t # Matching integers bs_match_string Fail Ms Bits Val => i_bs_match_string Ms Fail Bits Val -i_bs_match_string x f W W +i_bs_match_string xy f W W # Fetching integers from binaries. -bs_get_integer2 Fail=f Ms=x Live=u Sz=sq Unit=u Flags=u Dst=d => \ +bs_get_integer2 Fail=f Ms=xy Live=u Sz=sq Unit=u Flags=u Dst=d => \ gen_get_integer2(Fail, Ms, Live, Sz, Unit, Flags, Dst) -i_bs_get_integer_small_imm x W f? t x -i_bs_get_integer_imm x W t f? t x -i_bs_get_integer f? t t x s x -i_bs_get_integer_8 x f? x -i_bs_get_integer_16 x f? x +i_bs_get_integer_small_imm Ms Bits Fail Flags Y=y => \ + i_bs_get_integer_small_imm Ms Bits Fail Flags x | move x Y + +i_bs_get_integer_imm Ms Bits Live Fail Flags Y=y => \ + i_bs_get_integer_imm Ms Bits Live Fail Flags x | move x Y + +i_bs_get_integer_small_imm xy W f? t x +i_bs_get_integer_imm xy W t f? t x +i_bs_get_integer f? t t xy s d +i_bs_get_integer_8 xy f? d +i_bs_get_integer_16 xy f? d %if ARCH_64 -i_bs_get_integer_32 x f? x +i_bs_get_integer_32 xy f? d %endif # Fetching binaries from binaries. -bs_get_binary2 Fail=f Ms=x Live=u Sz=sq Unit=u Flags=u Dst=d => \ +bs_get_binary2 Fail=f Ms=xy Live=u Sz=sq Unit=u Flags=u Dst=d => \ gen_get_binary2(Fail, Ms, Live, Sz, Unit, Flags, Dst) -i_bs_get_binary_imm2 f? x t W t x -i_bs_get_binary2 f x t? s t x -i_bs_get_binary_all2 f? x t t x -i_bs_get_binary_all_reuse x f? t +i_bs_get_binary_imm2 f? xy t W t d +i_bs_get_binary2 f xy t? s t d +i_bs_get_binary_all2 f? xy t t d +i_bs_get_binary_all_reuse xy f? t # Fetching float from binaries. -bs_get_float2 Fail=f Ms=x Live=u Sz=s Unit=u Flags=u Dst=d => \ +bs_get_float2 Fail=f Ms=xy Live=u Sz=s Unit=u Flags=u Dst=d => \ gen_get_float2(Fail, Ms, Live, Sz, Unit, Flags, Dst) bs_get_float2 Fail=f Ms=x Live=u Sz=q Unit=u Flags=u Dst=d => jump Fail -i_bs_get_float2 f? x t s t x +i_bs_get_float2 f? xy t s t d # Miscellanous -bs_skip_bits2 Fail=f Ms=x Sz=sq Unit=u Flags=u => \ +bs_skip_bits2 Fail=f Ms=xy Sz=sq Unit=u Flags=u => \ gen_skip_bits2(Fail, Ms, Sz, Unit, Flags) -i_bs_skip_bits_imm2 f? x W -i_bs_skip_bits2 f? x xy t -i_bs_skip_bits_all2 f? x t +i_bs_skip_bits_imm2 f? xy W +i_bs_skip_bits2 f? xy xy t +i_bs_skip_bits_all2 f? xy t -bs_test_tail2 Fail=f Ms=x Bits=u==0 => bs_test_zero_tail2 Fail Ms -bs_test_tail2 Fail=f Ms=x Bits=u => bs_test_tail_imm2 Fail Ms Bits -bs_test_zero_tail2 f? x -bs_test_tail_imm2 f? x W +bs_test_tail2 Fail=f Ms=xy Bits=u==0 => bs_test_zero_tail2 Fail Ms +bs_test_tail2 Fail=f Ms=xy Bits=u => bs_test_tail_imm2 Fail Ms Bits +bs_test_zero_tail2 f? xy +bs_test_tail_imm2 f? xy W bs_test_unit F Ms Unit=u==8 => bs_test_unit8 F Ms -bs_test_unit f? x t -bs_test_unit8 f? x +bs_test_unit f? xy t +bs_test_unit8 f? xy # An y register operand for bs_context_to_binary is rare, # but can happen because of inlining. @@ -1152,23 +1167,55 @@ bs_context_to_binary Y=y => move Y x | bs_context_to_binary x bs_context_to_binary x +# Gets a bitstring from the tail of a context. +bs_get_tail xy d t + +# New bs_start_match variant for contexts with external position storage. +# +# bs_get/set_position is used to save positions into registers instead of +# "slots" in the context itself, which lets us continue matching even after +# we've passed it off to another function. + +%if ARCH_64 +bs_start_match3 Fail Bin Live Ctx | bs_get_position Ctx Pos=x Ignored => \ + i_bs_start_match3_gp Bin Live Fail Ctx Pos +i_bs_start_match3_gp xy t f d x +%endif + +bs_start_match3 Fail=f ica Live Dst => jump Fail +bs_start_match3 Fail Bin Live Dst => i_bs_start_match3 Bin Live Fail Dst + +i_bs_start_match3 xy t f d + +# Match context position instructions. 64-bit assumes that all positions can +# fit into an unsigned small. + +%if ARCH_64 + bs_get_position Src Dst Live => i_bs_get_position Src Dst + i_bs_get_position xy xy + bs_set_position xy xy +%else + bs_get_position xy d t? + bs_set_position xy xy +%endif + # # Utf8/utf16/utf32 support. (R12B-5) # -bs_get_utf8 Fail=f Ms=x u u Dst=d => i_bs_get_utf8 Ms Fail Dst -i_bs_get_utf8 x f? x +bs_get_utf8 Fail=f Ms=xy u u Dst=d => i_bs_get_utf8 Ms Fail Dst +i_bs_get_utf8 xy f? d -bs_skip_utf8 Fail=f Ms=x u u => i_bs_get_utf8 Ms Fail x +bs_skip_utf8 Fail=f Ms=xy u u => i_bs_get_utf8 Ms Fail x -bs_get_utf16 Fail=f Ms=x u Flags=u Dst=d => i_bs_get_utf16 Ms Fail Flags Dst -bs_skip_utf16 Fail=f Ms=x u Flags=u => i_bs_get_utf16 Ms Fail Flags x +bs_get_utf16 Fail=f Ms=xy u Flags=u Dst=d => i_bs_get_utf16 Ms Fail Flags Dst +bs_skip_utf16 Fail=f Ms=xy u Flags=u => i_bs_get_utf16 Ms Fail Flags x -i_bs_get_utf16 x f? t x +i_bs_get_utf16 xy f? t d -bs_get_utf32 Fail=f Ms=x Live=u Flags=u Dst=d => \ +bs_get_utf32 Fail=f Ms=xy Live=u Flags=u Dst=d => \ bs_get_integer2 Fail Ms Live i=32 u=1 Flags Dst | \ i_bs_validate_unicode_retract Fail Dst Ms -bs_skip_utf32 Fail=f Ms=x Live=u Flags=u => \ +bs_skip_utf32 Fail=f Ms=xy Live=u Flags=u => \ bs_get_integer2 Fail Ms Live i=32 u=1 Flags x | \ i_bs_validate_unicode_retract Fail x Ms @@ -1182,6 +1229,9 @@ i_bs_validate_unicode_retract j s S bs_init2 Fail Sz Words Regs Flags Dst | binary_too_big(Sz) => system_limit Fail +bs_init2 Fail Sz Words Regs Flags Dst=y => \ + bs_init2 Fail Sz Words Regs Flags x | move x Dst + bs_init2 Fail Sz=u Words=u==0 Regs Flags Dst => i_bs_init Sz Regs Dst bs_init2 Fail Sz=u Words Regs Flags Dst => \ @@ -1202,6 +1252,8 @@ i_bs_init_heap W I t? x bs_init_bits Fail Sz=o Words Regs Flags Dst => system_limit Fail +bs_init_bits Fail Sz Words Regs Flags Dst=y => \ + bs_init_bits Fail Sz Words Regs Flags x | move x Dst bs_init_bits Fail Sz=u Words=u==0 Regs Flags Dst => i_bs_init_bits Sz Regs Dst bs_init_bits Fail Sz=u Words Regs Flags Dst => i_bs_init_bits_heap Sz Words Regs Dst @@ -1230,7 +1282,7 @@ bs_private_append Fail Size Unit Bin Flags Dst => \ bs_init_writable -i_bs_append j? I t? t s x +i_bs_append j? I t? t s xy i_bs_private_append j? t s S x # diff --git a/erts/emulator/beam/utils.c b/erts/emulator/beam/utils.c index 08f8ca9788..996757ef43 100644 --- a/erts/emulator/beam/utils.c +++ b/erts/emulator/beam/utils.c @@ -1569,7 +1569,7 @@ make_hash2(Eterm term) * MUST BE USED AS INPUT FOR THE HASH. Two different terms must always have a * chance of hashing different when salted: hash([Salt|A]) vs hash([Salt|B]). * - * This is why we can not use cached hash values for atoms for example. + * This is why we cannot use cached hash values for atoms for example. * */ diff --git a/erts/emulator/drivers/common/inet_drv.c b/erts/emulator/drivers/common/inet_drv.c index 7f20477363..c9f7006384 100644 --- a/erts/emulator/drivers/common/inet_drv.c +++ b/erts/emulator/drivers/common/inet_drv.c @@ -4552,7 +4552,7 @@ static void desc_close_read(inet_descriptor* desc) { if (desc->s != INVALID_SOCKET) { #ifdef __WIN32__ - /* This call can not be right??? + /* This call cannot be right??? * We want to turn off read events but keep any write events. * But on windows driver_select(...,READ,1) is only used as a * way to hook into the pollset. sock_select is used to control diff --git a/erts/emulator/drivers/unix/ttsl_drv.c b/erts/emulator/drivers/unix/ttsl_drv.c index 28c6cc0f94..11bb4373d8 100644 --- a/erts/emulator/drivers/unix/ttsl_drv.c +++ b/erts/emulator/drivers/unix/ttsl_drv.c @@ -31,7 +31,7 @@ static int ttysl_init(void); static ErlDrvData ttysl_start(ErlDrvPort, char*); -#ifdef HAVE_TERMCAP /* else make an empty driver that can not be opened */ +#ifdef HAVE_TERMCAP /* else make an empty driver that cannot be opened */ #ifndef WANT_NONBLOCKING #define WANT_NONBLOCKING diff --git a/erts/emulator/internal_doc/CarrierMigration.md b/erts/emulator/internal_doc/CarrierMigration.md index 3a796d11b7..bb3d8aac28 100644 --- a/erts/emulator/internal_doc/CarrierMigration.md +++ b/erts/emulator/internal_doc/CarrierMigration.md @@ -34,8 +34,7 @@ Solution -------- In order to prevent scenarios like this we've implemented support for -migration of multi-block carriers between allocator instances of the -same type. +migration of multi-block carriers between allocator instances. ### Management of Free Blocks ### @@ -130,10 +129,6 @@ threads may have references to it via the pool. ### Migration ### -There exists one pool for each allocator type enabling migration of -carriers between scheduler specific allocator instances of the same -allocator type. - Each allocator instance keeps track of the current utilization of its multi-block carriers. When the total utilization falls below the "abandon carrier utilization limit" it starts to inspect the utilization of the @@ -208,8 +203,8 @@ limited. We only inspect a limited number of carriers. If none of those carriers had a free block large enough to satisfy the allocation request, the search will fail. A carrier in the pool can also be BUSY if another thread is currently doing block deallocation work on the -carrier. A BUSY carrier will also be skipped by the search as it can -not satisfy the request. The pool is lock-free and we do not want to +carrier. A BUSY carrier will also be skipped by the search as it cannot +satisfy the request. The pool is lock-free and we do not want to block, waiting for the other thread to finish. ### The bad cluster problem ### @@ -287,11 +282,3 @@ reduced using the `aoffcbf` strategy. A trade off between memory consumption and performance is however inevitable, and it is up to the user to decide what is most important. -Further work ------------- - -It would be quite easy to extend this to allow migration of multi-block -carriers between all allocator types. More or less the only obstacle -is maintenance of the statistics information. - - diff --git a/erts/emulator/nifs/common/prim_file_nif.c b/erts/emulator/nifs/common/prim_file_nif.c index a05d50b333..9b98ac3f9a 100644 --- a/erts/emulator/nifs/common/prim_file_nif.c +++ b/erts/emulator/nifs/common/prim_file_nif.c @@ -442,7 +442,8 @@ static ERL_NIF_TERM open_nif(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[] ERL_NIF_TERM result; efile_path_t path; - if(argc != 2 || !enif_is_list(env, argv[1])) { + ASSERT(argc == 2); + if(!enif_is_list(env, argv[1])) { return enif_make_badarg(env); } @@ -469,9 +470,7 @@ static ERL_NIF_TERM open_nif(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[] static ERL_NIF_TERM close_nif_impl(efile_data_t *d, ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) { enum efile_state_t previous_state; - if(argc != 0) { - return enif_make_badarg(env); - } + ASSERT(argc == 0); previous_state = erts_atomic32_cmpxchg_acqb(&d->state, EFILE_STATE_CLOSED, EFILE_STATE_BUSY); @@ -495,7 +494,8 @@ static ERL_NIF_TERM read_nif_impl(efile_data_t *d, ErlNifEnv *env, int argc, con SysIOVec read_vec[1]; ErlNifBinary result; - if(argc != 1 || !enif_is_number(env, argv[0])) { + ASSERT(argc == 1); + if(!enif_is_number(env, argv[0])) { return enif_make_badarg(env); } @@ -532,7 +532,8 @@ static ERL_NIF_TERM write_nif_impl(efile_data_t *d, ErlNifEnv *env, int argc, co Sint64 bytes_written; ERL_NIF_TERM tail; - if(argc != 1 || !enif_inspect_iovec(env, 64, argv[0], &tail, &input)) { + ASSERT(argc == 1); + if(!enif_inspect_iovec(env, 64, argv[0], &tail, &input)) { return enif_make_badarg(env); } @@ -555,8 +556,8 @@ static ERL_NIF_TERM pread_nif_impl(efile_data_t *d, ErlNifEnv *env, int argc, co SysIOVec read_vec[1]; ErlNifBinary result; - if(argc != 2 || !enif_is_number(env, argv[0]) - || !enif_is_number(env, argv[1])) { + ASSERT(argc == 2); + if(!enif_is_number(env, argv[0]) || !enif_is_number(env, argv[1])) { return enif_make_badarg(env); } @@ -594,8 +595,9 @@ static ERL_NIF_TERM pwrite_nif_impl(efile_data_t *d, ErlNifEnv *env, int argc, c Sint64 bytes_written, offset; ERL_NIF_TERM tail; - if(argc != 2 || !enif_is_number(env, argv[0]) - || !enif_inspect_iovec(env, 64, argv[1], &tail, &input)) { + ASSERT(argc == 2); + if(!enif_is_number(env, argv[0]) + || !enif_inspect_iovec(env, 64, argv[1], &tail, &input)) { return enif_make_badarg(env); } @@ -622,7 +624,8 @@ static ERL_NIF_TERM seek_nif_impl(efile_data_t *d, ErlNifEnv *env, int argc, con Sint64 new_position, offset; enum efile_seek_t seek; - if(argc != 2 || !enif_get_int64(env, argv[1], &offset)) { + ASSERT(argc == 2); + if(!enif_get_int64(env, argv[1], &offset)) { return enif_make_badarg(env); } @@ -646,7 +649,8 @@ static ERL_NIF_TERM seek_nif_impl(efile_data_t *d, ErlNifEnv *env, int argc, con static ERL_NIF_TERM sync_nif_impl(efile_data_t *d, ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) { int data_only; - if(argc != 1 || !enif_get_int(env, argv[0], &data_only)) { + ASSERT(argc == 1); + if(!enif_get_int(env, argv[0], &data_only)) { return enif_make_badarg(env); } @@ -658,9 +662,7 @@ static ERL_NIF_TERM sync_nif_impl(efile_data_t *d, ErlNifEnv *env, int argc, con } static ERL_NIF_TERM truncate_nif_impl(efile_data_t *d, ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) { - if(argc != 0) { - return enif_make_badarg(env); - } + ASSERT(argc == 0); if(!efile_truncate(d)) { return posix_error_to_tuple(env, d->posix_errno); @@ -672,8 +674,8 @@ static ERL_NIF_TERM truncate_nif_impl(efile_data_t *d, ErlNifEnv *env, int argc, static ERL_NIF_TERM allocate_nif_impl(efile_data_t *d, ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) { Sint64 offset, length; - if(argc != 2 || !enif_is_number(env, argv[0]) - || !enif_is_number(env, argv[1])) { + ASSERT(argc == 2); + if(!enif_is_number(env, argv[0]) || !enif_is_number(env, argv[1])) { return enif_make_badarg(env); } @@ -694,8 +696,8 @@ static ERL_NIF_TERM advise_nif_impl(efile_data_t *d, ErlNifEnv *env, int argc, c enum efile_advise_t advise; Sint64 offset, length; - if(argc != 3 || !enif_is_number(env, argv[0]) - || !enif_is_number(env, argv[1])) { + ASSERT(argc == 3); + if(!enif_is_number(env, argv[0]) || !enif_is_number(env, argv[1])) { return enif_make_badarg(env); } @@ -758,8 +760,8 @@ static ERL_NIF_TERM ipread_s32bu_p32bu_nif_impl(efile_data_t *d, ErlNifEnv *env, ErlNifBinary payload; - if(argc != 2 || !enif_is_number(env, argv[0]) - || !enif_is_number(env, argv[1])) { + ASSERT(argc == 2); + if(!enif_is_number(env, argv[0]) || !enif_is_number(env, argv[1])) { return enif_make_badarg(env); } @@ -825,9 +827,7 @@ static ERL_NIF_TERM ipread_s32bu_p32bu_nif_impl(efile_data_t *d, ErlNifEnv *env, } static ERL_NIF_TERM get_handle_nif_impl(efile_data_t *d, ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) { - if(argc != 0) { - return enif_make_badarg(env); - } + ASSERT(argc == 0); return efile_get_handle(env, d); } @@ -839,7 +839,8 @@ static ERL_NIF_TERM read_info_nif(ErlNifEnv *env, int argc, const ERL_NIF_TERM a efile_path_t path; int follow_links; - if(argc != 2 || !enif_get_int(env, argv[1], &follow_links)) { + ASSERT(argc == 2); + if(!enif_get_int(env, argv[1], &follow_links)) { return enif_make_badarg(env); } @@ -874,7 +875,8 @@ static ERL_NIF_TERM set_permissions_nif(ErlNifEnv *env, int argc, const ERL_NIF_ efile_path_t path; Uint32 permissions; - if(argc != 2 || !enif_get_uint(env, argv[1], &permissions)) { + ASSERT(argc == 2); + if(!enif_get_uint(env, argv[1], &permissions)) { return enif_make_badarg(env); } @@ -893,8 +895,8 @@ static ERL_NIF_TERM set_owner_nif(ErlNifEnv *env, int argc, const ERL_NIF_TERM a efile_path_t path; Sint32 uid, gid; - if(argc != 3 || !enif_get_int(env, argv[1], &uid) - || !enif_get_int(env, argv[2], &gid)) { + ASSERT(argc == 3); + if(!enif_get_int(env, argv[1], &uid) || !enif_get_int(env, argv[2], &gid)) { return enif_make_badarg(env); } @@ -913,9 +915,10 @@ static ERL_NIF_TERM set_time_nif(ErlNifEnv *env, int argc, const ERL_NIF_TERM ar Sint64 accessed, modified, created; efile_path_t path; - if(argc != 4 || !enif_get_int64(env, argv[1], &accessed) - || !enif_get_int64(env, argv[2], &modified) - || !enif_get_int64(env, argv[3], &created)) { + ASSERT(argc == 4); + if(!enif_get_int64(env, argv[1], &accessed) + || !enif_get_int64(env, argv[2], &modified) + || !enif_get_int64(env, argv[3], &created)) { return enif_make_badarg(env); } @@ -934,9 +937,7 @@ static ERL_NIF_TERM read_link_nif(ErlNifEnv *env, int argc, const ERL_NIF_TERM a efile_path_t path; ERL_NIF_TERM result; - if(argc != 1) { - return enif_make_badarg(env); - } + ASSERT(argc == 1); if((posix_errno = efile_marshal_path(env, argv[0], &path))) { return posix_error_to_tuple(env, posix_errno); @@ -953,9 +954,7 @@ static ERL_NIF_TERM list_dir_nif(ErlNifEnv *env, int argc, const ERL_NIF_TERM ar efile_path_t path; ERL_NIF_TERM result; - if(argc != 1) { - return enif_make_badarg(env); - } + ASSERT(argc == 1); if((posix_errno = efile_marshal_path(env, argv[0], &path))) { return posix_error_to_tuple(env, posix_errno); @@ -971,9 +970,7 @@ static ERL_NIF_TERM rename_nif(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv efile_path_t existing_path, new_path; - if(argc != 2) { - return enif_make_badarg(env); - } + ASSERT(argc == 2); if((posix_errno = efile_marshal_path(env, argv[0], &existing_path))) { return posix_error_to_tuple(env, posix_errno); @@ -991,9 +988,7 @@ static ERL_NIF_TERM make_hard_link_nif(ErlNifEnv *env, int argc, const ERL_NIF_T efile_path_t existing_path, new_path; - if(argc != 2) { - return enif_make_badarg(env); - } + ASSERT(argc == 2); if((posix_errno = efile_marshal_path(env, argv[0], &existing_path))) { return posix_error_to_tuple(env, posix_errno); @@ -1011,9 +1006,7 @@ static ERL_NIF_TERM make_soft_link_nif(ErlNifEnv *env, int argc, const ERL_NIF_T efile_path_t existing_path, new_path; - if(argc != 2) { - return enif_make_badarg(env); - } + ASSERT(argc == 2); if((posix_errno = efile_marshal_path(env, argv[0], &existing_path))) { return posix_error_to_tuple(env, posix_errno); @@ -1031,9 +1024,7 @@ static ERL_NIF_TERM make_dir_nif(ErlNifEnv *env, int argc, const ERL_NIF_TERM ar efile_path_t path; - if(argc != 1) { - return enif_make_badarg(env); - } + ASSERT(argc == 1); if((posix_errno = efile_marshal_path(env, argv[0], &path))) { return posix_error_to_tuple(env, posix_errno); @@ -1049,9 +1040,7 @@ static ERL_NIF_TERM del_file_nif(ErlNifEnv *env, int argc, const ERL_NIF_TERM ar efile_path_t path; - if(argc != 1) { - return enif_make_badarg(env); - } + ASSERT(argc == 1); if((posix_errno = efile_marshal_path(env, argv[0], &path))) { return posix_error_to_tuple(env, posix_errno); @@ -1067,9 +1056,7 @@ static ERL_NIF_TERM del_dir_nif(ErlNifEnv *env, int argc, const ERL_NIF_TERM arg efile_path_t path; - if(argc != 1) { - return enif_make_badarg(env); - } + ASSERT(argc == 1); if((posix_errno = efile_marshal_path(env, argv[0], &path))) { return posix_error_to_tuple(env, posix_errno); @@ -1086,7 +1073,8 @@ static ERL_NIF_TERM get_device_cwd_nif(ErlNifEnv *env, int argc, const ERL_NIF_T ERL_NIF_TERM result; int device_index; - if(argc != 1 || !enif_get_int(env, argv[0], &device_index)) { + ASSERT(argc == 1); + if(!enif_get_int(env, argv[0], &device_index)) { return enif_make_badarg(env); } @@ -1101,9 +1089,7 @@ static ERL_NIF_TERM get_cwd_nif(ErlNifEnv *env, int argc, const ERL_NIF_TERM arg posix_errno_t posix_errno; ERL_NIF_TERM result; - if(argc != 0) { - return enif_make_badarg(env); - } + ASSERT(argc == 0); if((posix_errno = efile_get_cwd(env, &result))) { return posix_error_to_tuple(env, posix_errno); @@ -1117,9 +1103,7 @@ static ERL_NIF_TERM set_cwd_nif(ErlNifEnv *env, int argc, const ERL_NIF_TERM arg efile_path_t path; - if(argc != 1) { - return enif_make_badarg(env); - } + ASSERT(argc == 1); if((posix_errno = efile_marshal_path(env, argv[0], &path))) { return posix_error_to_tuple(env, posix_errno); @@ -1195,9 +1179,7 @@ static ERL_NIF_TERM read_file_nif(ErlNifEnv *env, int argc, const ERL_NIF_TERM a ErlNifBinary result; - if(argc != 1) { - return enif_make_badarg(env); - } + ASSERT(argc == 1); if((posix_errno = efile_marshal_path(env, argv[0], &path))) { return posix_error_to_tuple(env, posix_errno); @@ -1223,9 +1205,7 @@ static ERL_NIF_TERM altname_nif(ErlNifEnv *env, int argc, const ERL_NIF_TERM arg efile_path_t path; ERL_NIF_TERM result; - if(argc != 1) { - return enif_make_badarg(env); - } + ASSERT(argc == 1); if((posix_errno = efile_marshal_path(env, argv[0], &path))) { return posix_error_to_tuple(env, posix_errno); diff --git a/erts/emulator/sys/common/erl_check_io.c b/erts/emulator/sys/common/erl_check_io.c index 9f115706dc..f421794f91 100644 --- a/erts/emulator/sys/common/erl_check_io.c +++ b/erts/emulator/sys/common/erl_check_io.c @@ -916,7 +916,7 @@ enif_select(ErlNifEnv* env, ctl_op = ERTS_POLL_OP_DEL; } else { - on = 1; + on = !(mode & ERL_NIF_SELECT_CANCEL); ASSERT(mode); if (mode & ERL_DRV_READ) { ctl_events |= ERTS_POLL_EV_IN; @@ -1042,38 +1042,51 @@ enif_select(ErlNifEnv* env, ret = 0; } else { /* off */ + ret = 0; if (state->type == ERTS_EV_TYPE_NIF) { - state->driver.nif->in.pid = NIL; - state->driver.nif->out.pid = NIL; - } - ASSERT(state->events==0); - if (!wake_poller) { - /* - * Safe to close fd now as it is not in pollset - * or there was no need to eject fd (kernel poll) - */ - if (state->type == ERTS_EV_TYPE_NIF) { - ASSERT(state->driver.stop.resource == resource); - call_stop = CALL_STOP_AND_RELEASE; - state->driver.stop.resource = NULL; + if (mode & ERL_NIF_SELECT_READ + && is_not_nil(state->driver.nif->in.pid)) { + state->driver.nif->in.pid = NIL; + ret |= ERL_NIF_SELECT_READ_CANCELLED; } - else { - ASSERT(!state->driver.stop.resource); - call_stop = CALL_STOP; + if (mode & ERL_NIF_SELECT_WRITE + && is_not_nil(state->driver.nif->out.pid)) { + state->driver.nif->out.pid = NIL; + ret |= ERL_NIF_SELECT_WRITE_CANCELLED; } - state->type = ERTS_EV_TYPE_NONE; - ret = ERL_NIF_SELECT_STOP_CALLED; } - else { - /* Not safe to close fd, postpone stop_select callback. */ - if (state->type == ERTS_EV_TYPE_NONE) { - ASSERT(!state->driver.stop.resource); - state->driver.stop.resource = resource; - enif_keep_resource(resource); + if (mode & ERL_NIF_SELECT_STOP) { + ASSERT(state->events==0); + if (!wake_poller) { + /* + * Safe to close fd now as it is not in pollset + * or there was no need to eject fd (kernel poll) + */ + if (state->type == ERTS_EV_TYPE_NIF) { + ASSERT(state->driver.stop.resource == resource); + call_stop = CALL_STOP_AND_RELEASE; + state->driver.stop.resource = NULL; + } + else { + ASSERT(!state->driver.stop.resource); + call_stop = CALL_STOP; + } + state->type = ERTS_EV_TYPE_NONE; + ret |= ERL_NIF_SELECT_STOP_CALLED; + } + else { + /* Not safe to close fd, postpone stop_select callback. */ + if (state->type == ERTS_EV_TYPE_NONE) { + ASSERT(!state->driver.stop.resource); + state->driver.stop.resource = resource; + enif_keep_resource(resource); + } + state->type = ERTS_EV_TYPE_STOP_NIF; + ret |= ERL_NIF_SELECT_STOP_SCHEDULED; } - state->type = ERTS_EV_TYPE_STOP_NIF; - ret = ERL_NIF_SELECT_STOP_SCHEDULED; } + else + ASSERT(mode & ERL_NIF_SELECT_CANCEL); } done: @@ -1251,7 +1264,8 @@ print_nif_select_op(erts_dsprintf_buf_t *dsbufp, (int) fd, mode & ERL_NIF_SELECT_READ ? " READ" : "", mode & ERL_NIF_SELECT_WRITE ? " WRITE" : "", - mode & ERL_NIF_SELECT_STOP ? " STOP" : "", + (mode & ERL_NIF_SELECT_STOP ? " STOP" + : (mode & ERL_NIF_SELECT_CANCEL ? " CANCEL" : "")), resource->type->module, resource->type->name, ref); @@ -2332,10 +2346,16 @@ drvmode2str(int mode) { static ERTS_INLINE char * nifmode2str(enum ErlNifSelectFlags mode) { + if (mode & ERL_NIF_SELECT_STOP) + return "STOP"; switch (mode) { case ERL_NIF_SELECT_READ: return "READ"; case ERL_NIF_SELECT_WRITE: return "WRITE"; - case ERL_NIF_SELECT_STOP: return "STOP"; + case ERL_NIF_SELECT_READ|ERL_NIF_SELECT_WRITE: return "READ|WRITE"; + case ERL_NIF_SELECT_CANCEL|ERL_NIF_SELECT_READ: return "CANCEL|READ"; + case ERL_NIF_SELECT_CANCEL|ERL_NIF_SELECT_WRITE: return "CANCEL|WRITE"; + case ERL_NIF_SELECT_CANCEL|ERL_NIF_SELECT_READ|ERL_NIF_SELECT_WRITE: + return "CANCEL|READ|WRITE"; default: return "UNKNOWN"; } } diff --git a/erts/emulator/test/alloc_SUITE.erl b/erts/emulator/test/alloc_SUITE.erl index 343afe85e6..4e0243c1cd 100644 --- a/erts/emulator/test/alloc_SUITE.erl +++ b/erts/emulator/test/alloc_SUITE.erl @@ -71,7 +71,8 @@ migration(Cfg) -> %% Disable driver_alloc to avoid recursive alloc_util calls %% through enif_mutex_create() in my_creating_mbc(). drv_case(Cfg, concurrent, "+MZe true +MRe false"), - drv_case(Cfg, concurrent, "+MZe true +MRe false +MZas ageffcbf"). + drv_case(Cfg, concurrent, "+MZe true +MRe false +MZas ageffcbf"), + drv_case(Cfg, concurrent, "+MZe true +MRe false +MZas chaosff"). erts_mmap(Config) when is_list(Config) -> case {os:type(), mmsc_flags()} of diff --git a/erts/emulator/test/call_trace_SUITE.erl b/erts/emulator/test/call_trace_SUITE.erl index d19f7f81ad..742592f88e 100644 --- a/erts/emulator/test/call_trace_SUITE.erl +++ b/erts/emulator/test/call_trace_SUITE.erl @@ -1395,7 +1395,7 @@ seq(M, N, R) when M =< N -> seq(M, N-1, [N|R]); seq(_, _, R) -> R. -%% lists:reverse can not be called since it is traced +%% lists:reverse cannot be called since it is traced reverse(L) -> reverse(L, []). %% diff --git a/erts/emulator/test/code_SUITE.erl b/erts/emulator/test/code_SUITE.erl index 9c6dc3ff83..7e690fd870 100644 --- a/erts/emulator/test/code_SUITE.erl +++ b/erts/emulator/test/code_SUITE.erl @@ -332,6 +332,7 @@ constant_pools(Config) when is_list(Config) -> A = literals:a(), B = literals:b(), C = literals:huge_bignum(), + D = literals:funs(), process_flag(trap_exit, true), Self = self(), @@ -345,7 +346,7 @@ constant_pools(Config) when is_list(Config) -> true = erlang:purge_module(literals), NoOldHeap ! done, receive - {'EXIT',NoOldHeap,{A,B,C}} -> + {'EXIT',NoOldHeap,{A,B,C,D}} -> ok; Other -> ct:fail({unexpected,Other}) @@ -362,7 +363,7 @@ constant_pools(Config) when is_list(Config) -> erlang:purge_module(literals), OldHeap ! done, receive - {'EXIT',OldHeap,{A,B,C,[1,2,3|_]=Seq}} when length(Seq) =:= 16 -> + {'EXIT',OldHeap,{A,B,C,D,[1,2,3|_]=Seq}} when length(Seq) =:= 16 -> ok end, @@ -390,7 +391,7 @@ constant_pools(Config) when is_list(Config) -> {'DOWN', Mon, process, Hib, Reason} -> {undef, [{no_module, no_function, - [{A,B,C,[1,2,3|_]=Seq}], _}]} = Reason, + [{A,B,C,D,[1,2,3|_]=Seq}], _}]} = Reason, 16 = length(Seq) end, HeapSz = TotHeapSz, %% Ensure restored to hibernated state... @@ -400,7 +401,9 @@ constant_pools(Config) when is_list(Config) -> no_old_heap(Parent) -> A = literals:a(), B = literals:b(), - Res = {A,B,literals:huge_bignum()}, + C = literals:huge_bignum(), + D = literals:funs(), + Res = {A,B,C,D}, Parent ! go, receive done -> @@ -410,7 +413,9 @@ no_old_heap(Parent) -> old_heap(Parent) -> A = literals:a(), B = literals:b(), - Res = {A,B,literals:huge_bignum(),lists:seq(1, 16)}, + C = literals:huge_bignum(), + D = literals:funs(), + Res = {A,B,C,D,lists:seq(1, 16)}, create_old_heap(), Parent ! go, receive @@ -421,7 +426,9 @@ old_heap(Parent) -> hibernated(Parent) -> A = literals:a(), B = literals:b(), - Res = {A,B,literals:huge_bignum(),lists:seq(1, 16)}, + C = literals:huge_bignum(), + D = literals:funs(), + Res = {A,B,C,D,lists:seq(1, 16)}, Parent ! go, erlang:hibernate(no_module, no_function, [Res]). @@ -755,7 +762,8 @@ t_copy_literals_frags(Config) when is_list(Config) -> 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,10,11,12,13,14,15, 0, 1, 2, 3, 4, 5, 6, 7, - 8, 9,10,11,12,13,14,15>>}]), + 8, 9,10,11,12,13,14,15>>}, + {f, fun ?MODULE:all/0}]), {module, ?mod} = erlang:load_module(?mod, Bin), N = 6000, @@ -796,6 +804,7 @@ literal_receiver() -> C = ?mod:c(), D = ?mod:d(), E = ?mod:e(), + F = ?mod:f(), literal_receiver(); {Pid, sender_confirm} -> io:format("sender confirm ~w~n", [Pid]), @@ -811,7 +820,8 @@ literal_sender(N, Recv) -> ?mod:b(), ?mod:c(), ?mod:d(), - ?mod:e()]}, + ?mod:e(), + ?mod:f()]}, literal_sender(N - 1, Recv). literal_switcher() -> diff --git a/erts/emulator/test/code_SUITE_data/literals.erl b/erts/emulator/test/code_SUITE_data/literals.erl index 7c3b0ebe73..13c8b412b0 100644 --- a/erts/emulator/test/code_SUITE_data/literals.erl +++ b/erts/emulator/test/code_SUITE_data/literals.erl @@ -19,7 +19,8 @@ %% -module(literals). --export([a/0,b/0,huge_bignum/0,binary/0,unused_binaries/0,bits/0]). +-export([a/0,b/0,huge_bignum/0,funs/0, + binary/0,unused_binaries/0,bits/0]). -export([msg1/0,msg2/0,msg3/0,msg4/0,msg5/0]). a() -> @@ -108,3 +109,8 @@ msg2() -> {"hello","world"}. msg3() -> <<"halloj">>. msg4() -> #{ 1=> "hello", b => "world"}. msg5() -> {1,2,3,4,5,6}. + +funs() -> + %% Literal funs (in a non-literal list). + [fun ?MODULE:a/0, + fun() -> ok end]. %No environment. diff --git a/erts/emulator/test/driver_SUITE.erl b/erts/emulator/test/driver_SUITE.erl index 6f5d639d04..9ffb484eb4 100644 --- a/erts/emulator/test/driver_SUITE.erl +++ b/erts/emulator/test/driver_SUITE.erl @@ -2645,24 +2645,7 @@ wait_deallocations() -> driver_alloc_size() -> wait_deallocations(), - case erlang:system_info({allocator_sizes, driver_alloc}) of - false -> - undefined; - MemInfo -> - CS = lists:foldl( - fun ({instance, _, L}, Acc) -> - {value,{_,MBCS}} = lists:keysearch(mbcs, 1, L), - {value,{_,SBCS}} = lists:keysearch(sbcs, 1, L), - [MBCS,SBCS | Acc] - end, - [], - MemInfo), - lists:foldl( - fun(L, Sz0) -> - {value,{_,Sz,_,_}} = lists:keysearch(blocks_size, 1, L), - Sz0+Sz - end, 0, CS) - end. + erts_debug:alloc_blocks_size(driver_alloc). rpc(Config, Fun) -> case proplists:get_value(node, Config) of diff --git a/erts/emulator/test/erts_debug_SUITE.erl b/erts/emulator/test/erts_debug_SUITE.erl index 6aa7a445b5..f39dbedd8f 100644 --- a/erts/emulator/test/erts_debug_SUITE.erl +++ b/erts/emulator/test/erts_debug_SUITE.erl @@ -22,8 +22,10 @@ -include_lib("common_test/include/ct.hrl"). -export([all/0, suite/0, - test_size/1,flat_size_big/1,df/1,term_type/1, - instructions/1, stack_check/1]). + test_size/1,flat_size_big/1,df/1,term_type/1, + instructions/1, stack_check/1, alloc_blocks_size/1]). + +-export([do_alloc_blocks_size/0]). suite() -> [{ct_hooks,[ts_install_cth]}, @@ -31,7 +33,7 @@ suite() -> all() -> [test_size, flat_size_big, df, instructions, term_type, - stack_check]. + stack_check, alloc_blocks_size]. test_size(Config) when is_list(Config) -> ConsCell1 = id([a|b]), @@ -210,5 +212,28 @@ instructions(Config) when is_list(Config) -> _ = [list_to_atom(I) || I <- Is], ok. +alloc_blocks_size(Config) when is_list(Config) -> + F = fun(Args) -> + Node = start_slave(Args), + ok = rpc:call(Node, ?MODULE, do_alloc_blocks_size, []), + true = test_server:stop_node(Node) + end, + F("+Meamax"), + F("+Meamin"), + F(""), + ok. + +do_alloc_blocks_size() -> + _ = erts_debug:alloc_blocks_size(binary_alloc), + ok. + +start_slave(Args) -> + Name = ?MODULE_STRING ++ "_slave", + Pa = filename:dirname(code:which(?MODULE)), + {ok, Node} = test_server:start_node(list_to_atom(Name), + slave, + [{args, "-pa " ++ Pa ++ " " ++ Args}]), + Node. + id(I) -> I. diff --git a/erts/emulator/test/fun_SUITE.erl b/erts/emulator/test/fun_SUITE.erl index 73fe9b0d8f..f8a879182e 100644 --- a/erts/emulator/test/fun_SUITE.erl +++ b/erts/emulator/test/fun_SUITE.erl @@ -576,7 +576,7 @@ refc_dist(Config) when is_list(Config) -> process_flag(trap_exit, true), Pid = spawn_link(Node, fun() -> receive Fun when is_function(Fun) -> - 2 = fun_refc(Fun), + 3 = fun_refc(Fun), exit({normal,Fun}) end end), F = fun() -> 42 end, @@ -598,7 +598,7 @@ refc_dist_send(Node, F) -> Pid = spawn_link(Node, fun() -> receive {To,Fun} when is_function(Fun) -> wait_until(fun () -> - 2 =:= fun_refc(Fun) + 3 =:= fun_refc(Fun) end), To ! Fun end @@ -626,7 +626,7 @@ refc_dist_reg_send(Node, F) -> Me ! Ref, receive {Me,Fun} when is_function(Fun) -> - 2 = fun_refc(Fun), + 3 = fun_refc(Fun), Me ! Fun end end), @@ -806,11 +806,13 @@ verify_not_undef(Fun, Tag) -> ct:fail("tag ~w not defined in fun_info", [Tag]); {Tag,_} -> ok end. - + id(X) -> X. spawn_call(Node, AFun) -> + Parent = self(), + Init = erlang:whereis(init), Pid = spawn_link(Node, fun() -> receive @@ -821,8 +823,10 @@ spawn_call(Node, AFun) -> _ -> lists:seq(0, Arity-1) end, Res = apply(Fun, Args), - {pid,Creator} = erlang:fun_info(Fun, pid), - Creator ! {result,Res} + case erlang:fun_info(Fun, pid) of + {pid,Init} -> Parent ! {result,Res}; + {pid,Creator} -> Creator ! {result,Res} + end end end), Pid ! {AFun,AFun,AFun}, diff --git a/erts/emulator/test/nif_SUITE.erl b/erts/emulator/test/nif_SUITE.erl index a2f3489943..edad62a9fb 100644 --- a/erts/emulator/test/nif_SUITE.erl +++ b/erts/emulator/test/nif_SUITE.erl @@ -485,12 +485,14 @@ t_on_load(Config) when is_list(Config) -> -define(ERL_NIF_SELECT_READ, (1 bsl 0)). -define(ERL_NIF_SELECT_WRITE, (1 bsl 1)). -define(ERL_NIF_SELECT_STOP, (1 bsl 2)). +-define(ERL_NIF_SELECT_CANCEL, (1 bsl 3)). -define(ERL_NIF_SELECT_STOP_CALLED, (1 bsl 0)). -define(ERL_NIF_SELECT_STOP_SCHEDULED, (1 bsl 1)). -define(ERL_NIF_SELECT_INVALID_EVENT, (1 bsl 2)). -define(ERL_NIF_SELECT_FAILED, (1 bsl 3)). - +-define(ERL_NIF_SELECT_READ_CANCELLED, (1 bsl 4)). +-define(ERL_NIF_SELECT_WRITE_CANCELLED, (1 bsl 5)). select(Config) when is_list(Config) -> ensure_lib_loaded(Config), @@ -516,7 +518,16 @@ select(Config) when is_list(Config) -> end), 0 = select_nif(R,?ERL_NIF_SELECT_READ,R,Pid,Ref), {Pid, done} = receive_any(1000), + + %% Cancel read + 0 = select_nif(R,?ERL_NIF_SELECT_READ bor ?ERL_NIF_SELECT_CANCEL,R,null,Ref), <<"hej">> = read_nif(R, 3), + 0 = select_nif(R,?ERL_NIF_SELECT_READ,R,null,Ref), + ?ERL_NIF_SELECT_READ_CANCELLED = + select_nif(R,?ERL_NIF_SELECT_READ bor ?ERL_NIF_SELECT_CANCEL,R,null,Ref), + ok = write_nif(W, <<"hej again">>), + [] = flush(0), + <<"hej again">> = read_nif(R, 9), %% Wait for write Written = write_full(W, $a), @@ -525,6 +536,15 @@ select(Config) when is_list(Config) -> Written = read_nif(R,byte_size(Written)), [{select, W, Ref, ready_output}] = flush(), + %% Cancel write + 0 = select_nif(W,?ERL_NIF_SELECT_WRITE bor ?ERL_NIF_SELECT_CANCEL,W,null,Ref), + Written2 = write_full(W, $b), + 0 = select_nif(W,?ERL_NIF_SELECT_WRITE,W,null,Ref), + ?ERL_NIF_SELECT_WRITE_CANCELLED = + select_nif(W,?ERL_NIF_SELECT_WRITE bor ?ERL_NIF_SELECT_CANCEL,W,null,Ref), + Written2 = read_nif(R,byte_size(Written2)), + [] = flush(0), + %% Close write and wait for EOF eagain = read_nif(R, 1), check_stop_ret(select_nif(W,?ERL_NIF_SELECT_STOP,W,null,Ref)), diff --git a/erts/emulator/test/process_SUITE.erl b/erts/emulator/test/process_SUITE.erl index 57eb082d64..f4b1d885fe 100644 --- a/erts/emulator/test/process_SUITE.erl +++ b/erts/emulator/test/process_SUITE.erl @@ -2233,8 +2233,8 @@ processes_term_proc_list(Config) when is_list(Config) -> %% We have to run this test case with +S1 since instrument:allocations() %% will report a free()'d block as present until it's actually deallocated %% by its employer. - Run("+MSe true +MSatags false +S1"), - Run("+MSe true +MSatags true +S1"), + Run("+MSe true +Muatags false +S1"), + Run("+MSe true +Muatags true +S1"), ok. @@ -2242,10 +2242,12 @@ processes_term_proc_list(Config) when is_list(Config) -> chk_term_proc_list(?LINE, MC, XB)). chk_term_proc_list(Line, MustChk, ExpectBlks) -> - Allocs = instrument:allocations(#{ allocator_types => [sl_alloc] }), + Allocs = instrument:allocations(), case {MustChk, Allocs} of {false, {error, not_enabled}} -> not_enabled; + {false, {ok, {_Shift, _Unscanned, ByOrigin}}} when ByOrigin =:= #{} -> + not_enabled; {_, {ok, {_Shift, _Unscanned, ByOrigin}}} -> ByType = maps:get(system, ByOrigin, #{}), Hist = maps:get(ptab_list_deleted_el, ByType, {}), diff --git a/erts/emulator/test/smoke_test_SUITE.erl b/erts/emulator/test/smoke_test_SUITE.erl index 26c610e3a8..5b46342127 100644 --- a/erts/emulator/test/smoke_test_SUITE.erl +++ b/erts/emulator/test/smoke_test_SUITE.erl @@ -56,7 +56,7 @@ end_per_testcase(_Case, Config) when is_list(Config) -> %%% boot_combo(Config) when is_list(Config) -> - ZFlags = os:getenv("ERL_ZFLAGS"), + ZFlags = os:getenv("ERL_ZFLAGS", ""), NOOP = fun () -> ok end, A42 = fun () -> case erlang:system_info(threads) of @@ -87,10 +87,7 @@ boot_combo(Config) when is_list(Config) -> %% A lot more combos could be implemented... ok after - os:putenv("ERL_ZFLAGS", case ZFlags of - false -> ""; - _ -> ZFlags - end) + os:putenv("ERL_ZFLAGS", ZFlags) end. native_atomics(Config) when is_list(Config) -> diff --git a/erts/emulator/test/system_info_SUITE.erl b/erts/emulator/test/system_info_SUITE.erl index 21ab6b378a..8ea2d88ec4 100644 --- a/erts/emulator/test/system_info_SUITE.erl +++ b/erts/emulator/test/system_info_SUITE.erl @@ -457,11 +457,16 @@ cmp_memory(MWs, Str) -> %% Total, processes, processes_used, and system will seldom %% give us exactly the same result since the two readings %% aren't taken atomically. + %% + %% Torerance is scaled according to the number of schedulers + %% to match spawn_mem_workers. + + Tolerance = 1.05 + 0.01 * erlang:system_info(schedulers_online), - cmp_memory(total, EM, EDM, 1.05), - cmp_memory(processes, EM, EDM, 1.05), - cmp_memory(processes_used, EM, EDM, 1.05), - cmp_memory(system, EM, EDM, 1.05), + cmp_memory(total, EM, EDM, Tolerance), + cmp_memory(processes, EM, EDM, Tolerance), + cmp_memory(processes_used, EM, EDM, Tolerance), + cmp_memory(system, EM, EDM, Tolerance), ok. diff --git a/erts/emulator/test/timer_bif_SUITE.erl b/erts/emulator/test/timer_bif_SUITE.erl index fc11a04a31..15fe13c8c0 100644 --- a/erts/emulator/test/timer_bif_SUITE.erl +++ b/erts/emulator/test/timer_bif_SUITE.erl @@ -361,7 +361,7 @@ evil_timers(Config) when is_list(Config) -> %% %% 1. A timer started with erlang:start_timer(Time, Receiver, Msg), %% where Msg is a composite term, expires, and the receivers main - %% lock *can not* be acquired immediately (typically when the + %% lock *cannot* be acquired immediately (typically when the %% receiver *is* running). %% %% The wrap tuple ({timeout, TRef, Msg}) will in this case @@ -372,7 +372,7 @@ evil_timers(Config) when is_list(Config) -> RecvTimeOutMsgs0 = evil_recv_timeouts(200), %% 2. A timer started with erlang:start_timer(Time, Receiver, Msg), %% where Msg is an immediate term, expires, and the receivers main - %% lock *can not* be acquired immediately (typically when the + %% lock *cannot* be acquired immediately (typically when the %% receiver *is* running). %% %% The wrap tuple will in this case be allocated in a new |