diff options
Diffstat (limited to 'erts/emulator/beam')
79 files changed, 9132 insertions, 1717 deletions
diff --git a/erts/emulator/beam/atom.names b/erts/emulator/beam/atom.names index c2f32ba089..d28e519ae1 100644 --- a/erts/emulator/beam/atom.names +++ b/erts/emulator/beam/atom.names @@ -71,6 +71,7 @@ atom ac atom active atom all atom all_but_first +atom all_names atom alloc_info atom alloc_sizes atom allocated @@ -78,6 +79,7 @@ atom allocated_areas atom allocator atom allocator_sizes atom alloc_util_allocators +atom allow_gc atom allow_passive_connect atom already_loaded atom amd64 @@ -114,6 +116,7 @@ atom binary_longest_prefix_trap atom binary_longest_suffix_trap atom binary_match_trap atom binary_matches_trap +atom binary_to_term_trap atom block atom blocked atom bm @@ -176,6 +179,7 @@ atom dexit atom depth atom dgroup_leader atom dictionary +atom dirty_cpu_schedulers_online atom disable_trace atom disabled atom display_items @@ -322,6 +326,8 @@ atom low atom Lt='<' atom machine atom match +atom match_limit +atom match_limit_recursion atom match_spec atom max atom maximum @@ -351,11 +357,13 @@ atom multi_scheduling atom multiline atom name atom named_table +atom namelist atom native_addresses atom Neq='=/=' atom Neqeq='/=' atom net_kernel atom net_kernel_terminated +atom never_utf atom new atom new_index atom new_uniq @@ -381,6 +389,7 @@ atom nosuspend atom no_float atom no_integer atom no_network +atom no_start_optimize atom not atom not_a_list atom not_loaded @@ -391,6 +400,7 @@ atom notalive atom notbol atom noteol atom notempty +atom notempty_atstart atom notify atom notsup atom nouse_stdio @@ -475,6 +485,7 @@ atom register atom registered_name atom reload atom rem +atom report_errors atom reset atom restart atom return_from @@ -557,6 +568,7 @@ atom true atom tuple atom type atom ucompile +atom ucp atom undef atom ungreedy atom unicode diff --git a/erts/emulator/beam/beam_bif_load.c b/erts/emulator/beam/beam_bif_load.c index 649594a334..df1983a83d 100644 --- a/erts/emulator/beam/beam_bif_load.c +++ b/erts/emulator/beam/beam_bif_load.c @@ -36,7 +36,7 @@ #include "erl_thr_progress.h" static void set_default_trace_pattern(Eterm module); -static Eterm check_process_code(Process* rp, Module* modp); +static Eterm check_process_code(Process* rp, Module* modp, int allow_gc, int *redsp); static void delete_code(Module* modp); static void decrement_refc(BeamInstr* code); static int is_native(BeamInstr* code); @@ -201,7 +201,7 @@ finish_loading_1(BIF_ALIST_1) * to keep the elements in. */ - n = list_length(BIF_ARG_1); + n = erts_list_length(BIF_ARG_1); if (n == -1) { ERTS_BIF_PREP_ERROR(res, BIF_P, BADARG); goto done; @@ -427,69 +427,82 @@ check_old_code_1(BIF_ALIST_1) } Eterm -check_process_code_2(BIF_ALIST_2) +erts_check_process_code(Process *c_p, Eterm module, int allow_gc, int *redsp) { - Process* rp; Module* modp; + Eterm res; + ErtsCodeIndex code_ix; - if (is_not_atom(BIF_ARG_2)) { - goto error; - } - if (is_internal_pid(BIF_ARG_1)) { - Eterm res; - ErtsCodeIndex code_ix; - - code_ix = erts_active_code_ix(); - modp = erts_get_module(BIF_ARG_2, code_ix); - if (modp == NULL) { /* Doesn't exist. */ - return am_false; - } - erts_rlock_old_code(code_ix); - if (modp->old.code == NULL) { /* No old code. */ - erts_runlock_old_code(code_ix); - return am_false; - } - erts_runlock_old_code(code_ix); - -#ifdef ERTS_SMP - rp = erts_pid2proc_suspend(BIF_P, ERTS_PROC_LOCK_MAIN, - BIF_ARG_1, ERTS_PROC_LOCK_MAIN); -#else - rp = erts_pid2proc(BIF_P, 0, BIF_ARG_1, 0); -#endif - if (!rp) { - BIF_RET(am_false); - } - if (rp == ERTS_PROC_LOCK_BUSY) { - ERTS_BIF_YIELD2(bif_export[BIF_check_process_code_2], BIF_P, - BIF_ARG_1, BIF_ARG_2); - } - erts_rlock_old_code(code_ix); - if (modp->old.code != NULL) { /* must check again */ - res = check_process_code(rp, modp); - } - else { - res = am_false; - } - erts_runlock_old_code(code_ix); -#ifdef ERTS_SMP - if (BIF_P != rp) { - erts_resume(rp, ERTS_PROC_LOCK_MAIN); - erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_MAIN); + (*redsp)++; + + ASSERT(is_atom(module)); + + code_ix = erts_active_code_ix(); + modp = erts_get_module(module, code_ix); + if (!modp) + return am_false; + erts_rlock_old_code(code_ix); + res = modp->old.code ? check_process_code(c_p, modp, allow_gc, redsp) : am_false; + erts_runlock_old_code(code_ix); + + return res; +} + +BIF_RETTYPE erts_internal_check_process_code_2(BIF_ALIST_2) +{ + int reds = 0; + Eterm res; + Eterm olist = BIF_ARG_2; + int allow_gc = 1; + + if (is_not_atom(BIF_ARG_1)) + goto badarg; + + while (is_list(olist)) { + Eterm *lp = list_val(olist); + Eterm opt = CAR(lp); + if (is_tuple(opt)) { + Eterm* tp = tuple_val(opt); + switch (arityval(tp[0])) { + case 2: + switch (tp[1]) { + case am_allow_gc: + switch (tp[2]) { + case am_false: + allow_gc = 0; + break; + case am_true: + allow_gc = 1; + break; + default: + goto badarg; + } + break; + default: + goto badarg; + } + break; + default: + goto badarg; + } } -#endif - BIF_RET(res); - } - else if (is_external_pid(BIF_ARG_1) - && external_pid_dist_entry(BIF_ARG_1) == erts_this_dist_entry) { - BIF_RET(am_false); + else + goto badarg; + olist = CDR(lp); } + if (is_not_nil(olist)) + goto badarg; + + res = erts_check_process_code(BIF_P, BIF_ARG_1, allow_gc, &reds); + + ASSERT(is_value(res)); + + BIF_RET2(res, reds); - error: +badarg: BIF_ERROR(BIF_P, BADARG); } - BIF_RETTYPE delete_module_1(BIF_ALIST_1) { ErtsCodeIndex code_ix; @@ -710,7 +723,7 @@ set_default_trace_pattern(Eterm module) } static Eterm -check_process_code(Process* rp, Module* modp) +check_process_code(Process* rp, Module* modp, int allow_gc, int *redsp) { BeamInstr* start; char* mod_start; @@ -773,6 +786,16 @@ check_process_code(Process* rp, Module* modp) } } + if (rp->flags & F_DISABLE_GC) { + /* + * Cannot proceed. Process has disabled gc in order to + * safely leave inconsistent data on the heap and/or + * off heap lists. Need to wait for gc to be enabled + * again. + */ + return THE_NON_VALUE; + } + /* * See if there are funs that refer to the old version of the module. */ @@ -786,6 +809,8 @@ check_process_code(Process* rp, Module* modp) if (done_gc) { return am_true; } else { + if (!allow_gc) + return am_aborted; /* * Try to get rid of this fun by garbage collecting. * Clear both fvalue and ftrace to make sure they @@ -796,7 +821,7 @@ check_process_code(Process* rp, Module* modp) rp->ftrace = NIL; done_gc = 1; FLAGS(rp) |= F_NEED_FULLSWEEP; - (void) erts_garbage_collect(rp, 0, rp->arg_reg, rp->arity); + *redsp += erts_garbage_collect(rp, 0, rp->arg_reg, rp->arity); goto rescan; } } @@ -850,6 +875,9 @@ check_process_code(Process* rp, Module* modp) Uint lit_size; struct erl_off_heap_header* oh; + if (!allow_gc) + return am_aborted; + /* * Try to get rid of constants by by garbage collecting. * Clear both fvalue and ftrace. @@ -859,11 +887,12 @@ check_process_code(Process* rp, Module* modp) rp->ftrace = NIL; done_gc = 1; FLAGS(rp) |= F_NEED_FULLSWEEP; - (void) erts_garbage_collect(rp, 0, rp->arg_reg, rp->arity); + *redsp += erts_garbage_collect(rp, 0, rp->arg_reg, rp->arity); literals = (Eterm *) modp->old.code[MI_LITERALS_START]; lit_size = (Eterm *) modp->old.code[MI_LITERALS_END] - literals; oh = (struct erl_off_heap_header *) modp->old.code[MI_LITERALS_OFF_HEAP]; + *redsp += lit_size / 10; /* Need, better value... */ erts_garbage_collect_literals(rp, literals, lit_size, oh); } } diff --git a/erts/emulator/beam/beam_bp.c b/erts/emulator/beam/beam_bp.c index 68907a771a..49a34ab4ad 100644 --- a/erts/emulator/beam/beam_bp.c +++ b/erts/emulator/beam/beam_bp.c @@ -46,7 +46,8 @@ #if defined(ERTS_ENABLE_LOCK_CHECK) && defined(ERTS_SMP) # define ERTS_SMP_REQ_PROC_MAIN_LOCK(P) \ - if ((P)) erts_proc_lc_require_lock((P), ERTS_PROC_LOCK_MAIN) + if ((P)) erts_proc_lc_require_lock((P), ERTS_PROC_LOCK_MAIN,\ + __FILE__, __LINE__) # define ERTS_SMP_UNREQ_PROC_MAIN_LOCK(P) \ if ((P)) erts_proc_lc_unrequire_lock((P), ERTS_PROC_LOCK_MAIN) #else diff --git a/erts/emulator/beam/beam_debug.c b/erts/emulator/beam/beam_debug.c index e36ec2a93e..a3cd08834f 100644 --- a/erts/emulator/beam/beam_debug.c +++ b/erts/emulator/beam/beam_debug.c @@ -635,6 +635,11 @@ print_op(int to, void *to_arg, int op, int size, BeamInstr* addr) case op_i_put_tuple_rI: case op_i_put_tuple_xI: case op_i_put_tuple_yI: + case op_new_map_jdII: + case op_update_map_assoc_jsdII: + case op_update_map_exact_jsdII: + case op_i_has_map_fields_fsI: + case op_i_get_map_elements_fsI: { int n = unpacked[-1]; diff --git a/erts/emulator/beam/beam_emu.c b/erts/emulator/beam/beam_emu.c index 78ab6fa30f..0cec9ea3ec 100644 --- a/erts/emulator/beam/beam_emu.c +++ b/erts/emulator/beam/beam_emu.c @@ -1,7 +1,7 @@ /* * %CopyrightBegin% * - * Copyright Ericsson AB 1996-2013. All Rights Reserved. + * Copyright Ericsson AB 1996-2014. All Rights Reserved. * * The contents of this file are subject to the Erlang Public License, * Version 1.1, (the "License"); you may not use this file except in @@ -31,6 +31,7 @@ #include "big.h" #include "beam_load.h" #include "erl_binary.h" +#include "erl_map.h" #include "erl_bits.h" #include "dist.h" #include "beam_bp.h" @@ -48,7 +49,7 @@ # define OpCase(OpCode) case op_##OpCode # define CountCase(OpCode) case op_count_##OpCode # define OpCode(OpCode) ((Uint*)op_##OpCode) -# define Goto(Rel) {Go = (int)(Rel); goto emulator_loop;} +# define Goto(Rel) {Go = (int)(UWord)(Rel); goto emulator_loop;} # define LabelAddr(Addr) &&##Addr #else # define OpCase(OpCode) lb_##OpCode @@ -70,7 +71,8 @@ do { \ ERTS_SMP_LC_ASSERT(!erts_thr_progress_is_blocking()); \ } while (0) # define ERTS_SMP_REQ_PROC_MAIN_LOCK(P) \ - if ((P)) erts_proc_lc_require_lock((P), ERTS_PROC_LOCK_MAIN) + if ((P)) erts_proc_lc_require_lock((P), ERTS_PROC_LOCK_MAIN,\ + __FILE__, __LINE__) # define ERTS_SMP_UNREQ_PROC_MAIN_LOCK(P) \ if ((P)) erts_proc_lc_unrequire_lock((P), ERTS_PROC_LOCK_MAIN) # else @@ -133,7 +135,7 @@ do { \ /* We don't check the range if an ordinary switch is used */ #ifdef NO_JUMP_TABLE -#define VALID_INSTR(IP) (0 <= (int)(IP) && ((int)(IP) < (NUMBER_OF_OPCODES*2+10))) +#define VALID_INSTR(IP) ((UWord)(IP) < (NUMBER_OF_OPCODES*2+10)) #else #define VALID_INSTR(IP) \ ((SWord)LabelAddr(emulator_loop) <= (SWord)(IP) && \ @@ -217,6 +219,7 @@ BeamInstr beam_continue_exit[1]; BeamInstr* em_call_error_handler; BeamInstr* em_apply_bif; +BeamInstr* em_call_nif; /* NOTE These should be the only variables containing trace instructions. @@ -700,6 +703,19 @@ extern int count_instructions; Fail; \ } +#define IsMap(Src, Fail) if (is_not_map(Src)) { Fail; } + +#define HasMapField(Src, Key, Fail) if (has_not_map_field(Src, Key)) { Fail; } + +#define GetMapElement(Src, Key, Dst, Fail) \ + do { \ + Eterm _res = get_map_element(Src, Key); \ + if (is_non_value(_res)) { \ + Fail; \ + } \ + Dst = _res; \ + } while (0) + #define IsFunction(X, Action) \ do { \ if ( !(is_any_fun(X)) ) { \ @@ -943,7 +959,13 @@ static BeamInstr* apply_fun(Process* p, Eterm fun, Eterm args, Eterm* reg) NOINLINE; static Eterm new_fun(Process* p, Eterm* reg, ErlFunEntry* fe, int num_free) NOINLINE; - +static Eterm new_map(Process* p, Eterm* reg, BeamInstr* I) NOINLINE; +static Eterm update_map_assoc(Process* p, Eterm* reg, + Eterm map, BeamInstr* I) NOINLINE; +static Eterm update_map_exact(Process* p, Eterm* reg, + Eterm map, BeamInstr* I) NOINLINE; +static int has_not_map_field(Eterm map, Eterm key); +static Eterm get_map_element(Eterm map, Eterm key); /* * Functions not directly called by process_main(). OK to inline. @@ -1169,11 +1191,16 @@ void process_main(void) * c_p->arg_reg before calling the scheduler. */ if (!init_done) { + /* This should only be reached during the init phase when only the main + * process is running. I.e. there is no race for init_done. + */ init_done = 1; goto init_emulator; } + c_p = NULL; reds_used = 0; + goto do_schedule1; do_schedule: @@ -1182,7 +1209,11 @@ void process_main(void) if (start_time != 0) { Sint64 diff = erts_timestamp_millis() - start_time; - if (diff > 0 && (Uint) diff > erts_system_monitor_long_schedule) { + if (diff > 0 && (Uint) diff > erts_system_monitor_long_schedule +#ifdef ERTS_DIRTY_SCHEDULERS + && !ERTS_SCHEDULER_IS_DIRTY(c_p->scheduler_data) +#endif + ) { BeamInstr *inptr = find_function_from_pc(start_time_i); BeamInstr *outptr = find_function_from_pc(c_p->i); monitor_long_schedule_proc(c_p,inptr,outptr,(Uint) diff); @@ -2322,6 +2353,175 @@ void process_main(void) Goto(*I); } + OpCase(new_map_jdII): { + Eterm res; + + x(0) = r(0); + SWAPOUT; + res = new_map(c_p, reg, I); + SWAPIN; + r(0) = x(0); + StoreResult(res, Arg(1)); + Next(4+Arg(3)); + } + + OpCase(i_has_map_fields_fsI): { + map_t* mp; + Eterm map; + Eterm field; + Eterm *ks; + BeamInstr* fs; + Uint sz,n; + + GetArg1(1, map); + + /* this instruction assumes Arg1 is a map, + * i.e. that it follows a test is_map if needed. + */ + + mp = (map_t *)map_val(map); + sz = map_get_size(mp); + + if (sz == 0) { + SET_I((BeamInstr *) Arg(0)); + goto has_map_fields_fail; + } + + ks = map_get_keys(mp); + n = (Uint)Arg(2); + fs = &Arg(3); /* pattern fields */ + + ASSERT(n>0); + + while(sz) { + field = (Eterm)*fs; + if (EQ(field,*ks)) { + n--; + fs++; + if (n == 0) break; + } + ks++; sz--; + } + + if (n) { + SET_I((BeamInstr *) Arg(0)); + goto has_map_fields_fail; + } + + I += 4 + Arg(2); +has_map_fields_fail: + ASSERT(VALID_INSTR(*I)); + Goto(*I); + } + +#define PUT_TERM_REG(term, desc) \ +do { \ + switch ((desc) & _TAG_IMMED1_MASK) { \ + case (R_REG_DEF << _TAG_PRIMARY_SIZE) | TAG_PRIMARY_HEADER: \ + r(0) = (term); \ + break; \ + case (X_REG_DEF << _TAG_PRIMARY_SIZE) | TAG_PRIMARY_HEADER: \ + x((desc) >> _TAG_IMMED1_SIZE) = (term); \ + break; \ + case (Y_REG_DEF << _TAG_PRIMARY_SIZE) | TAG_PRIMARY_HEADER: \ + y((desc) >> _TAG_IMMED1_SIZE) = (term); \ + break; \ + default: \ + ASSERT(0); \ + break; \ + } \ +} while(0) + + OpCase(i_get_map_elements_fsI): { + Eterm map; + map_t *mp; + Eterm field; + Eterm *ks; + Eterm *vs; + BeamInstr *fs; + Uint sz,n; + + GetArg1(1, map); + + /* this instruction assumes Arg1 is a map, + * i.e. that it follows a test is_map if needed. + */ + + mp = (map_t *)map_val(map); + sz = map_get_size(mp); + + if (sz == 0) { + SET_I((BeamInstr *) Arg(0)); + goto get_map_elements_fail; + } + + n = (Uint)Arg(2) / 2; + fs = &Arg(3); /* pattern fields and target registers */ + ks = map_get_keys(mp); + vs = map_get_values(mp); + + while(sz) { + field = (Eterm)*fs; + if (EQ(field,*ks)) { + PUT_TERM_REG(*vs, fs[1]); + n--; + fs += 2; + /* no more values to fetch, we are done */ + if (n == 0) break; + } + ks++; sz--; + vs++; + } + + if (n) { + SET_I((BeamInstr *) Arg(0)); + goto get_map_elements_fail; + } + + I += 4 + Arg(2); +get_map_elements_fail: + ASSERT(VALID_INSTR(*I)); + Goto(*I); + } +#undef PUT_TERM_REG + + OpCase(update_map_assoc_jsdII): { + Eterm res; + Eterm map; + + GetArg1(1, map); + x(0) = r(0); + SWAPOUT; + res = update_map_assoc(c_p, reg, map, I); + SWAPIN; + if (is_value(res)) { + r(0) = x(0); + StoreResult(res, Arg(2)); + Next(5+Arg(4)); + } else { + goto badarg; + } + } + + OpCase(update_map_exact_jsdII): { + Eterm res; + Eterm map; + + GetArg1(1, map); + x(0) = r(0); + SWAPOUT; + res = update_map_exact(c_p, reg, map, I); + SWAPIN; + if (is_value(res)) { + r(0) = x(0); + StoreResult(res, Arg(2)); + Next(5+Arg(4)); + } else { + goto badarg; + } + } + + /* * All guards with zero arguments have special instructions: * self/0 @@ -3323,6 +3523,13 @@ void process_main(void) reg[0] = r(0); nif_bif_result = (*fp)(&env, bif_nif_arity, reg); erts_post_nif(&env); +#ifdef ERTS_DIRTY_SCHEDULERS + if (is_non_value(nif_bif_result) && c_p->freason == TRAP) { + Export* ep = (Export*) c_p->psd->data[ERTS_PSD_DIRTY_SCHED_TRAP_EXPORT]; + ep->code[0] = I[-3]; + ep->code[1] = I[-2]; + } +#endif } ASSERT(!ERTS_PROC_IS_EXITING(c_p) || is_non_value(nif_bif_result)); PROCESS_MAIN_CHK_LOCKS(c_p); @@ -4326,7 +4533,19 @@ void process_main(void) flags = Arg(2); BsGetFieldSize(tmp_arg2, (flags >> 3), ClauseFail(), size); if (size >= SMALL_BITS) { - Uint wordsneeded = 1+WSIZE(NBYTES((Uint) size)); + Uint wordsneeded; + /* check bits size before potential gc. + * We do not want a gc and then realize we don't need + * the allocated space (i.e. if the op fails) + * + * remember to reacquire the matchbuffer after gc. + */ + + mb = ms_matchbuffer(tmp_arg1); + if (mb->size - mb->offset < size) { + ClauseFail(); + } + wordsneeded = 1+WSIZE(NBYTES((Uint) size)); TestHeapPreserve(wordsneeded, Arg(1), tmp_arg1); } mb = ms_matchbuffer(tmp_arg1); @@ -4952,6 +5171,7 @@ void process_main(void) em_call_error_handler = OpCode(call_error_handler); em_apply_bif = OpCode(apply_bif); + em_call_nif = OpCode(call_nif); beam_apply[0] = (BeamInstr) OpCode(i_apply); beam_apply[1] = (BeamInstr) OpCode(normal_exit); @@ -5010,6 +5230,8 @@ translate_gc_bif(void* gcf) return bit_size_1; } else if (gcf == erts_gc_byte_size_1) { return byte_size_1; + } else if (gcf == erts_gc_map_size_1) { + return map_size_1; } else if (gcf == erts_gc_abs_1) { return abs_1; } else if (gcf == erts_gc_float_1) { @@ -6206,6 +6428,397 @@ new_fun(Process* p, Eterm* reg, ErlFunEntry* fe, int num_free) return make_fun(funp); } +static int has_not_map_field(Eterm map, Eterm key) +{ + map_t* mp; + Eterm* keys; + Uint i; + Uint n; + + mp = (map_t *)map_val(map); + keys = map_get_keys(mp); + n = map_get_size(mp); + if (is_immed(key)) { + for (i = 0; i < n; i++) { + if (keys[i] == key) { + return 0; + } + } + } else { + for (i = 0; i < n; i++) { + if (EQ(keys[i], key)) { + return 0; + } + } + } + return 1; +} + +static Eterm get_map_element(Eterm map, Eterm key) +{ + map_t *mp; + Eterm* ks, *vs; + Uint i; + Uint n; + + mp = (map_t *)map_val(map); + ks = map_get_keys(mp); + vs = map_get_values(mp); + n = map_get_size(mp); + if (is_immed(key)) { + for (i = 0; i < n; i++) { + if (ks[i] == key) { + return vs[i]; + } + } + } else { + for (i = 0; i < n; i++) { + if (EQ(ks[i], key)) { + return vs[i]; + } + } + } + return THE_NON_VALUE; +} + +#define GET_TERM(term, dest) \ +do { \ + Eterm src = (Eterm)(term); \ + switch (src & _TAG_IMMED1_MASK) { \ + case (R_REG_DEF << _TAG_PRIMARY_SIZE) | TAG_PRIMARY_HEADER: \ + dest = x(0); \ + break; \ + case (X_REG_DEF << _TAG_PRIMARY_SIZE) | TAG_PRIMARY_HEADER: \ + dest = x(src >> _TAG_IMMED1_SIZE); \ + break; \ + case (Y_REG_DEF << _TAG_PRIMARY_SIZE) | TAG_PRIMARY_HEADER: \ + dest = y(src >> _TAG_IMMED1_SIZE); \ + break; \ + default: \ + dest = src; \ + break; \ + } \ +} while(0) + + +static Eterm +new_map(Process* p, Eterm* reg, BeamInstr* I) +{ + Uint n = Arg(3); + Uint i; + Uint need = n + 1 /* hdr */ + 1 /*size*/ + 1 /* ptr */ + 1 /* arity */; + Eterm keys; + Eterm *mhp,*thp; + Eterm *E; + BeamInstr *ptr; + map_t *mp; + + if (HeapWordsLeft(p) < need) { + erts_garbage_collect(p, need, reg, Arg(2)); + } + + thp = p->htop; + mhp = thp + 1 + n/2; + E = p->stop; + ptr = &Arg(4); + keys = make_tuple(thp); + *thp++ = make_arityval(n/2); + + mp = (map_t *)mhp; mhp += MAP_HEADER_SIZE; + mp->thing_word = MAP_HEADER; + mp->size = n/2; + mp->keys = keys; + + for (i = 0; i < n/2; i++) { + GET_TERM(*ptr++, *thp++); + GET_TERM(*ptr++, *mhp++); + } + p->htop = mhp; + return make_map(mp); +} + +static Eterm +update_map_assoc(Process* p, Eterm* reg, Eterm map, BeamInstr* I) +{ + Uint n; + Uint num_old; + Uint num_updates; + Uint need; + map_t *old_mp, *mp; + Eterm res; + Eterm* hp; + Eterm* E; + Eterm* old_keys; + Eterm* old_vals; + BeamInstr* new_p; + Eterm new_key; + Eterm* kp; + + if (is_not_map(map)) { + return THE_NON_VALUE; + } + + old_mp = (map_t *) map_val(map); + num_old = map_get_size(old_mp); + + /* + * If the old map is empty, create a new map. + */ + + if (num_old == 0) { + return new_map(p, reg, I+1); + } + + /* + * Allocate heap space for the worst case (i.e. all keys in the + * update list are new). + */ + + num_updates = Arg(4) / 2; + need = 2*(num_old+num_updates) + 1 + MAP_HEADER_SIZE; + if (HeapWordsLeft(p) < need) { + Uint live = Arg(3); + reg[live] = map; + erts_garbage_collect(p, need, reg, live+1); + map = reg[live]; + old_mp = (map_t *)map_val(map); + } + + /* + * Build the skeleton for the map, ready to be filled in. + * + * +-----------------------------------+ + * | (Space for aritvyal for keys) | <-----------+ + * +-----------------------------------+ | + * | (Space for key 1) | | <-- kp + * +-----------------------------------+ | + * . | + * . | + * . | + * +-----------------------------------+ | + * | (Space for last key) | | + * +-----------------------------------+ | + * | MAP_HEADER | | + * +-----------------------------------+ | + * | (Space for number of keys/values) | | + * +-----------------------------------+ | + * | Boxed tuple pointer >----------------+ + * +-----------------------------------+ + * | (Space for value 1) | <-- hp + * +-----------------------------------+ + */ + + E = p->stop; + kp = p->htop + 1; /* Point to first key */ + hp = kp + num_old + num_updates; + + res = make_map(hp); + mp = (map_t *)hp; + hp += MAP_HEADER_SIZE; + mp->thing_word = MAP_HEADER; + mp->keys = make_tuple(kp-1); + + old_vals = map_get_values(old_mp); + old_keys = map_get_keys(old_mp); + + new_p = &Arg(5); + GET_TERM(*new_p, new_key); + n = num_updates; + + /* + * Fill in keys and values, until we run out of either updates + * or old values and keys. + */ + + for (;;) { + Eterm key; + Sint c; + + ASSERT(kp < (Eterm *)mp); + key = *old_keys; + if ((c = CMP_TERM(key, new_key)) < 0) { + /* Copy old key and value */ + *kp++ = key; + *hp++ = *old_vals; + old_keys++, old_vals++, num_old--; + } else { /* Replace or insert new */ + GET_TERM(new_p[1], *hp++); + if (c > 0) { /* If new new key */ + *kp++ = new_key; + } else { /* If replacement */ + *kp++ = key; + old_keys++, old_vals++, num_old--; + } + n--; + if (n == 0) { + break; + } else { + new_p += 2; + GET_TERM(*new_p, new_key); + } + } + if (num_old == 0) { + break; + } + } + + /* + * At this point, we have run out of either old keys and values, + * or the update list. In other words, at least of one n and + * num_old must be zero. + */ + + if (n > 0) { + /* + * All old keys and values have been copied, but there + * are still new keys and values in the update list that + * must be copied. + */ + ASSERT(num_old == 0); + while (n-- > 0) { + GET_TERM(new_p[0], *kp++); + GET_TERM(new_p[1], *hp++); + new_p += 2; + } + } else { + /* + * All updates are now done. We may still have old + * keys and values that we must copy. + */ + ASSERT(n == 0); + while (num_old-- > 0) { + ASSERT(kp < (Eterm *)mp); + *kp++ = *old_keys++; + *hp++ = *old_vals++; + } + } + + /* + * Calculate how many values that are unused at the end of the + * key tuple and fill it out with a bignum header. + */ + if ((n = (Eterm *)mp - kp) > 0) { + *kp = make_pos_bignum_header(n-1); + } + + /* + * Fill in the size of the map in both the key tuple and in the map. + */ + + n = kp - p->htop - 1; /* Actual number of keys/values */ + *p->htop = make_arityval(n); + mp->size = n; + p->htop = hp; + return res; +} + +/* + * Update values for keys that already exist in the map. + */ + +static Eterm +update_map_exact(Process* p, Eterm* reg, Eterm map, BeamInstr* I) +{ + Uint n; + Uint i; + Uint num_old; + Uint need; + map_t *old_mp, *mp; + Eterm res; + Eterm* hp; + Eterm* E; + Eterm* old_keys; + Eterm* old_vals; + BeamInstr* new_p; + Eterm new_key; + + if (is_not_map(map)) { + return THE_NON_VALUE; + } + + old_mp = (map_t *) map_val(map); + num_old = map_get_size(old_mp); + + /* + * If the old map is empty, create a new map. + */ + + if (num_old == 0) { + return THE_NON_VALUE; + } + + /* + * Allocate the exact heap space needed. + */ + + need = num_old + MAP_HEADER_SIZE; + if (HeapWordsLeft(p) < need) { + Uint live = Arg(3); + reg[live] = map; + erts_garbage_collect(p, need, reg, live+1); + map = reg[live]; + old_mp = (map_t *)map_val(map); + } + + /* + * Update map, keeping the old key tuple. + */ + + hp = p->htop; + E = p->stop; + + old_vals = map_get_values(old_mp); + old_keys = map_get_keys(old_mp); + + res = make_map(hp); + mp = (map_t *)hp; + hp += MAP_HEADER_SIZE; + mp->thing_word = MAP_HEADER; + mp->size = num_old; + mp->keys = old_mp->keys; + + /* Get array of key/value pairs to be updated */ + new_p = &Arg(5); + GET_TERM(*new_p, new_key); + + /* Update all values */ + n = Arg(4) / 2; /* Number of values to be updated */ + ASSERT(n > 0); + for (i = 0; i < num_old; i++) { + if (!EQ(*old_keys, new_key)) { + /* Not same keys */ + *hp++ = *old_vals; + } else { + GET_TERM(new_p[1], *hp); + hp++; + n--; + if (n == 0) { + /* + * All updates done. Copy remaining values + * and return the result. + */ + for (i++, old_vals++; i < num_old; i++) { + *hp++ = *old_vals++; + } + ASSERT(hp == p->htop + need); + p->htop = hp; + return res; + } else { + new_p += 2; + GET_TERM(*new_p, new_key); + } + } + old_vals++, old_keys++; + } + + /* + * Updates left. That means that at least one the keys in the + * update list did not previously exist. + */ + ASSERT(hp == p->htop + need); + return THE_NON_VALUE; +} +#undef GET_TERM int catchlevel(Process *p) { diff --git a/erts/emulator/beam/beam_load.c b/erts/emulator/beam/beam_load.c index 938fd8f2c9..e96177cfd9 100644 --- a/erts/emulator/beam/beam_load.c +++ b/erts/emulator/beam/beam_load.c @@ -1,7 +1,7 @@ /* * %CopyrightBegin% * - * Copyright Ericsson AB 1996-2013. All Rights Reserved. + * Copyright Ericsson AB 1996-2014. All Rights Reserved. * * The contents of this file are subject to the Erlang Public License, * Version 1.1, (the "License"); you may not use this file except in @@ -409,7 +409,7 @@ typedef struct LoaderState { __result = __result << 8 | *Stp->file_p++; \ } \ Dest = __result; \ - } while (0) + } #define GetByte(Stp, Dest) \ if ((Stp)->file_left < 1) { \ @@ -506,6 +506,9 @@ static GenOp* gen_select_literals(LoaderState* stp, GenOpArg S, static GenOp* const_select_val(LoaderState* stp, GenOpArg S, GenOpArg Fail, GenOpArg Size, GenOpArg* Rest); +static GenOp* gen_get_map_element(LoaderState* stp, GenOpArg Fail, GenOpArg Src, + GenOpArg Size, GenOpArg* Rest); + static int freeze_code(LoaderState* stp); static void final_touch(LoaderState* stp); @@ -3783,6 +3786,8 @@ gen_guard_bif1(LoaderState* stp, GenOpArg Fail, GenOpArg Live, GenOpArg Bif, op->a[1].val = (BeamInstr) (void *) erts_gc_bit_size_1; } else if (bf == byte_size_1) { op->a[1].val = (BeamInstr) (void *) erts_gc_byte_size_1; + } else if (bf == map_size_1) { + op->a[1].val = (BeamInstr) (void *) erts_gc_map_size_1; } else if (bf == abs_1) { op->a[1].val = (BeamInstr) (void *) erts_gc_abs_1; } else if (bf == float_1) { @@ -3949,6 +3954,49 @@ tuple_append_put(LoaderState* stp, GenOpArg Arity, GenOpArg Dst, return op; } +/* + * Replace a get_map_elements with one key to an instruction with one + * element + */ + +static GenOp* +gen_get_map_element(LoaderState* stp, GenOpArg Fail, GenOpArg Src, + GenOpArg Size, GenOpArg* Rest) +{ + GenOp* op; + + ASSERT(Size.type == TAG_u); + + NEW_GENOP(stp, op); + op->next = NULL; + op->op = genop_get_map_element_4; + op->arity = 4; + + op->a[0] = Fail; + op->a[1] = Src; + op->a[2] = Rest[0]; + op->a[3] = Rest[1]; + return op; +} + +static GenOp* +gen_has_map_field(LoaderState* stp, GenOpArg Fail, GenOpArg Src, + GenOpArg Size, GenOpArg* Rest) +{ + GenOp* op; + + ASSERT(Size.type == TAG_u); + + NEW_GENOP(stp, op); + op->next = NULL; + op->op = genop_has_map_field_3; + op->arity = 4; + + op->a[0] = Fail; + op->a[1] = Src; + op->a[2] = Rest[0]; + return op; +} /* * Freeze the code in memory, move the string table into place, @@ -4376,6 +4424,7 @@ transform_engine(LoaderState* st) Uint* restart; /* Where to restart if current match fails. */ GenOpArg def_vars[TE_MAX_VARS]; /* Default buffer for variables. */ GenOpArg* var = def_vars; + int num_vars = 0; int i; /* General index. */ Uint mask; GenOp* instr; @@ -4578,9 +4627,9 @@ transform_engine(LoaderState* st) { int n = *pc++; int formal_arity = gen_opc[instr->op].arity; - int num_vars = n + (instr->arity - formal_arity); int j = formal_arity; + num_vars = n + (instr->arity - formal_arity); var = erts_alloc(ERTS_ALC_T_LOADER_TMP, num_vars * sizeof(GenOpArg)); for (i = 0; i < n; i++) { @@ -4592,7 +4641,6 @@ transform_engine(LoaderState* st) } break; #endif - case TOP_next_arg: ap++; break; @@ -4680,6 +4728,20 @@ transform_engine(LoaderState* st) instr->a[ap].val = var[i].val; ap++; break; +#if defined(TOP_store_rest_args) + case TOP_store_rest_args: + { + int n = *pc++; + int num_extra = num_vars - n; + + ASSERT(n <= num_vars); + GENOP_ARITY(instr, instr->arity+num_extra); + memcpy(instr->a, instr->def_args, ap*sizeof(GenOpArg)); + memcpy(instr->a+ap, var+n, num_extra*sizeof(GenOpArg)); + ap += num_extra; + } + break; +#endif case TOP_try_me_else: restart = pc + 1; restart += *pc++; @@ -5808,7 +5870,7 @@ erts_make_stub_module(Process* p, Eterm Mod, Eterm Beam, Eterm Info) Funcs = tp[1]; Patchlist = tp[2]; - if ((n = list_length(Funcs)) < 0) { + if ((n = erts_list_length(Funcs)) < 0) { goto error; } if ((bytes = erts_get_aligned_binary_bytes(Beam, &temp_alloc)) == NULL) { diff --git a/erts/emulator/beam/beam_load.h b/erts/emulator/beam/beam_load.h index 65a8f26d7c..bd22b0c4de 100644 --- a/erts/emulator/beam/beam_load.h +++ b/erts/emulator/beam/beam_load.h @@ -49,6 +49,7 @@ extern void** beam_ops; extern BeamInstr beam_debug_apply[]; extern BeamInstr* em_call_error_handler; extern BeamInstr* em_apply_bif; +extern BeamInstr* em_call_nif; /* * The following variables keep a sorted list of address ranges for diff --git a/erts/emulator/beam/bif.c b/erts/emulator/beam/bif.c index 13d31285b2..06a1230ca0 100644 --- a/erts/emulator/beam/bif.c +++ b/erts/emulator/beam/bif.c @@ -2675,7 +2675,7 @@ BIF_RETTYPE list_to_atom_1(BIF_ALIST_1) if (i < 0) { erts_free(ERTS_ALC_T_TMP, (void *) buf); - i = list_length(BIF_ARG_1); + i = erts_list_length(BIF_ARG_1); if (i > MAX_ATOM_CHARACTERS) { BIF_ERROR(BIF_P, SYSTEM_LIMIT); } @@ -2953,7 +2953,7 @@ BIF_RETTYPE list_to_integer_2(BIF_ALIST_2) char *buf = NULL; int base; - i = list_length(BIF_ARG_1); + i = erts_list_length(BIF_ARG_1); if (i < 0) BIF_ERROR(BIF_P, BADARG); @@ -3292,7 +3292,7 @@ BIF_RETTYPE list_to_float_1(BIF_ALIST_1) Eterm res; char *buf = NULL; - i = list_length(BIF_ARG_1); + i = erts_list_length(BIF_ARG_1); if (i < 0) BIF_ERROR(BIF_P, BADARG); @@ -3407,7 +3407,7 @@ BIF_RETTYPE list_to_tuple_1(BIF_ALIST_1) Eterm* hp; int len; - if ((len = list_length(list)) < 0 || len > ERTS_MAX_TUPLE_SIZE) { + if ((len = erts_list_length(list)) < 0 || len > ERTS_MAX_TUPLE_SIZE) { BIF_ERROR(BIF_P, BADARG); } @@ -3764,45 +3764,6 @@ BIF_RETTYPE now_0(BIF_ALIST_0) /**********************************************************************/ -BIF_RETTYPE garbage_collect_1(BIF_ALIST_1) -{ - int reds; - Process *rp; - - if (is_not_pid(BIF_ARG_1)) { - BIF_ERROR(BIF_P, BADARG); - } - - if (BIF_P->common.id == BIF_ARG_1) - rp = BIF_P; - else { -#ifdef ERTS_SMP - rp = erts_pid2proc_suspend(BIF_P, ERTS_PROC_LOCK_MAIN, - BIF_ARG_1, ERTS_PROC_LOCK_MAIN); - if (rp == ERTS_PROC_LOCK_BUSY) - ERTS_BIF_YIELD1(bif_export[BIF_garbage_collect_1], BIF_P, BIF_ARG_1); -#else - rp = erts_proc_lookup(BIF_ARG_1); -#endif - if (!rp) - BIF_RET(am_false); - } - - /* The GC cost is taken for the process executing this BIF. */ - - FLAGS(rp) |= F_NEED_FULLSWEEP; - reds = erts_garbage_collect(rp, 0, rp->arg_reg, rp->arity); - -#ifdef ERTS_SMP - if (BIF_P != rp) { - erts_resume(rp, ERTS_PROC_LOCK_MAIN); - erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_MAIN); - } -#endif - - BIF_RET2(am_true, reds); -} - BIF_RETTYPE garbage_collect_0(BIF_ALIST_0) { int reds; @@ -4372,7 +4333,11 @@ BIF_RETTYPE system_flag_2(BIF_ALIST_2) switch (erts_set_schedulers_online(BIF_P, ERTS_PROC_LOCK_MAIN, signed_val(BIF_ARG_2), - &old_no)) { + &old_no +#ifdef ERTS_DIRTY_SCHEDULERS + , 0 +#endif + )) { case ERTS_SCHDLR_SSPND_DONE: BIF_RET(make_small(old_no)); case ERTS_SCHDLR_SSPND_YIELD_RESTART: @@ -4504,6 +4469,33 @@ BIF_RETTYPE system_flag_2(BIF_ALIST_2) ref, old ? am_true : am_false); } +#if defined(ERTS_SMP) && defined(ERTS_DIRTY_SCHEDULERS) + } else if (BIF_ARG_1 == am_dirty_cpu_schedulers_online) { + Sint old_no; + if (!is_small(BIF_ARG_2)) + goto error; + switch (erts_set_schedulers_online(BIF_P, + ERTS_PROC_LOCK_MAIN, + signed_val(BIF_ARG_2), + &old_no, + 1)) { + case ERTS_SCHDLR_SSPND_DONE: + BIF_RET(make_small(old_no)); + case ERTS_SCHDLR_SSPND_YIELD_RESTART: + ERTS_VBUMP_ALL_REDS(BIF_P); + BIF_TRAP2(bif_export[BIF_system_flag_2], + BIF_P, BIF_ARG_1, BIF_ARG_2); + case ERTS_SCHDLR_SSPND_YIELD_DONE: + ERTS_BIF_YIELD_RETURN_X(BIF_P, make_small(old_no), + am_dirty_cpu_schedulers_online); + case ERTS_SCHDLR_SSPND_EINVAL: + goto error; + default: + ASSERT(0); + BIF_ERROR(BIF_P, EXC_INTERNAL_ERROR); + break; + } +#endif } else if (ERTS_IS_ATOM_STR("scheduling_statistics", BIF_ARG_1)) { int what; if (ERTS_IS_ATOM_STR("disable", BIF_ARG_2)) @@ -4527,7 +4519,7 @@ BIF_RETTYPE system_flag_2(BIF_ALIST_2) BIF_P->group_leader, "A call to erlang:system_flag(cpu_topology, _) was made.\n" "The cpu_topology argument is deprecated and scheduled\n" - "for removal in erts-5.10/OTP-R16. For more information\n" + "for removal in Erlang/OTP 18. For more information\n" "see the erlang:system_flag/2 documentation.\n"); BIF_TRAP1(set_cpu_topology_trap, BIF_P, BIF_ARG_2); } else if (ERTS_IS_ATOM_STR("scheduler_bind_type", BIF_ARG_1)) { @@ -4535,7 +4527,7 @@ BIF_RETTYPE system_flag_2(BIF_ALIST_2) BIF_P->group_leader, "A call to erlang:system_flag(scheduler_bind_type, _) was\n" "made. The scheduler_bind_type argument is deprecated and\n" - "scheduled for removal in erts-5.10/OTP-R16. For more\n" + "scheduled for removal in Erlang/OTP 18. For more\n" "information see the erlang:system_flag/2 documentation.\n"); return erts_bind_schedulers(BIF_P, BIF_ARG_2); } @@ -4654,6 +4646,17 @@ BIF_RETTYPE bump_reductions_1(BIF_ALIST_1) BIF_RET2(am_true, reds); } +BIF_RETTYPE erts_internal_cmp_term_2(BIF_ALIST_2) { + int res = CMP_TERM(BIF_ARG_1,BIF_ARG_2); + + /* ensure -1, 0, 1 result */ + if (res < 0) { + BIF_RET(make_small(-1)); + } else if (res > 0) { + BIF_RET(make_small(1)); + } + BIF_RET(make_small(0)); +} /* * Processes doing yield on return in a bif ends up in bif_return_trap(). */ diff --git a/erts/emulator/beam/bif.tab b/erts/emulator/beam/bif.tab index 6037c08dd8..2d888862bf 100644 --- a/erts/emulator/beam/bif.tab +++ b/erts/emulator/beam/bif.tab @@ -45,8 +45,6 @@ bif erlang:apply/3 bif erlang:atom_to_list/1 bif erlang:binary_to_list/1 bif erlang:binary_to_list/3 -bif erlang:binary_to_term/1 -bif erlang:check_process_code/2 bif erlang:crc32/1 bif erlang:crc32/2 bif erlang:crc32_combine/3 @@ -67,7 +65,6 @@ bif erlang:float_to_list/1 bif erlang:float_to_list/2 bif erlang:fun_info/2 bif erlang:garbage_collect/0 -bif erlang:garbage_collect/1 bif erlang:get/0 bif erlang:get/1 bif erlang:get_keys/1 @@ -154,6 +151,12 @@ bif erts_internal:port_command/3 bif erts_internal:port_control/3 bif erts_internal:port_close/1 bif erts_internal:port_connect/2 +bif erts_internal:binary_to_term/1 +bif erts_internal:binary_to_term/2 + +bif erts_internal:request_system_task/3 +bif erts_internal:check_process_code/2 + # inet_db support bif erlang:port_set_data/2 @@ -477,11 +480,6 @@ bif erlang:call_on_load_function/1 bif erlang:finish_after_on_load/2 # -# New Bifs in R13B4 -# -bif erlang:binary_to_term/2 - -# # The binary match bifs (New in R14A - EEP9) # @@ -575,6 +573,29 @@ bif io:printable_range/0 bif os:unsetenv/1 # +# New in R17A +# + +bif re:inspect/2 + +ubif erlang:is_map/1 +ubif erlang:map_size/1 +bif maps:to_list/1 +bif maps:find/2 +bif maps:get/2 +bif maps:from_list/1 +bif maps:is_key/2 +bif maps:keys/1 +bif maps:merge/2 +bif maps:new/0 +bif maps:put/3 +bif maps:remove/2 +bif maps:update/3 +bif maps:values/1 + +bif erts_internal:cmp_term/2 + +# # Obsolete # diff --git a/erts/emulator/beam/big.c b/erts/emulator/beam/big.c index 2b27b111d8..41a041eba6 100644 --- a/erts/emulator/beam/big.c +++ b/erts/emulator/beam/big.c @@ -1,7 +1,7 @@ /* * %CopyrightBegin% * - * Copyright Ericsson AB 1996-2013. All Rights Reserved. + * Copyright Ericsson AB 1996-2014. All Rights Reserved. * * The contents of this file are subject to the Erlang Public License, * Version 1.1, (the "License"); you may not use this file except in @@ -1603,9 +1603,11 @@ big_to_double(Wterm x, double* resp) /* * Logic has been copied from erl_bif_guard.c and slightly * modified to use a static instead of dynamic heap + * + * HALFWORD: Return relative term with 'heap' as base. */ Eterm -double_to_big(double x, Eterm *heap) +double_to_big(double x, Eterm *heap, Uint hsz) { int is_negative; int ds; @@ -1633,9 +1635,10 @@ double_to_big(double x, Eterm *heap) sz = BIG_NEED_SIZE(ds); /* number of words including arity */ hp = heap; - res = make_big(hp); + res = make_big_rel(hp, heap); xp = (ErtsDigit*) (hp + 1); + ASSERT(ds < hsz); for (i = ds - 1; i >= 0; i--) { ErtsDigit d; diff --git a/erts/emulator/beam/big.h b/erts/emulator/beam/big.h index 1a7b14170f..d80111822e 100644 --- a/erts/emulator/beam/big.h +++ b/erts/emulator/beam/big.h @@ -1,7 +1,7 @@ /* * %CopyrightBegin% * - * Copyright Ericsson AB 1996-2013. All Rights Reserved. + * Copyright Ericsson AB 1996-2014. All Rights Reserved. * * The contents of this file are subject to the Erlang Public License, * Version 1.1, (the "License"); you may not use this file except in @@ -141,7 +141,7 @@ Eterm big_lshift(Eterm, Sint, Eterm*); int big_comp (Wterm, Wterm); int big_ucomp (Eterm, Eterm); int big_to_double(Wterm x, double* resp); -Eterm double_to_big(double, Eterm*); +Eterm double_to_big(double, Eterm*, Uint hsz); Eterm small_to_big(Sint, Eterm*); Eterm uint_to_big(Uint, Eterm*); Eterm uword_to_big(UWord, Eterm*); diff --git a/erts/emulator/beam/binary.c b/erts/emulator/beam/binary.c index 33abac2f3d..c7926f18af 100644 --- a/erts/emulator/beam/binary.c +++ b/erts/emulator/beam/binary.c @@ -447,6 +447,7 @@ BIF_RETTYPE bitstring_to_list_1(BIF_ALIST_1) BIF_RETTYPE erts_list_to_binary_bif(Process *p, Eterm arg) { Eterm bin; + Eterm h,t; ErlDrvSizeT size; byte* bytes; #ifdef DEBUG @@ -459,6 +460,16 @@ BIF_RETTYPE erts_list_to_binary_bif(Process *p, Eterm arg) if (is_not_list(arg)) { goto error; } + /* check for [binary()] case */ + h = CAR(list_val(arg)); + t = CDR(list_val(arg)); + if (is_binary(h) && is_nil(t) && !( + HEADER_SUB_BIN == *(binary_val(h)) && ( + ((ErlSubBin *)binary_val(h))->bitoffs != 0 || + ((ErlSubBin *)binary_val(h))->bitsize != 0 + ))) { + return h; + } switch (erts_iolist_size(arg, &size)) { case ERTS_IOLIST_OVERFLOW: BIF_ERROR(p, SYSTEM_LIMIT); case ERTS_IOLIST_TYPE: goto error; diff --git a/erts/emulator/beam/break.c b/erts/emulator/beam/break.c index b7e1092907..7d4f52ee23 100644 --- a/erts/emulator/beam/break.c +++ b/erts/emulator/beam/break.c @@ -112,10 +112,12 @@ process_killer(void) erts_smp_proc_lock(rp, rp_locks); state = erts_smp_atomic32_read_acqb(&rp->state); if (state & (ERTS_PSFLG_FREE - | ERTS_PSFLG_EXITING - | ERTS_PSFLG_ACTIVE - | ERTS_PSFLG_IN_RUNQ - | ERTS_PSFLG_RUNNING)) { + | ERTS_PSFLG_EXITING + | ERTS_PSFLG_ACTIVE + | ERTS_PSFLG_ACTIVE_SYS + | ERTS_PSFLG_IN_RUNQ + | ERTS_PSFLG_RUNNING + | ERTS_PSFLG_RUNNING_SYS)) { erts_printf("Can only kill WAITING processes this way\n"); } else { diff --git a/erts/emulator/beam/code_ix.c b/erts/emulator/beam/code_ix.c index c66d5a2f05..4344558348 100644 --- a/erts/emulator/beam/code_ix.c +++ b/erts/emulator/beam/code_ix.c @@ -58,7 +58,8 @@ void erts_code_ix_init(void) erts_smp_atomic32_init_nob(&the_staging_code_index, 0); erts_smp_mtx_init(&code_write_permission_mtx, "code_write_permission"); #ifdef ERTS_ENABLE_LOCK_CHECK - erts_tsd_key_create(&has_code_write_permission); + erts_tsd_key_create(&has_code_write_permission, + "erts_has_code_write_permission"); #endif CIX_TRACE("init"); } diff --git a/erts/emulator/beam/copy.c b/erts/emulator/beam/copy.c index 23c0fca6aa..3a987e213b 100644 --- a/erts/emulator/beam/copy.c +++ b/erts/emulator/beam/copy.c @@ -27,6 +27,7 @@ #include "erl_process.h" #include "erl_gc.h" #include "big.h" +#include "erl_map.h" #include "erl_binary.h" #include "erl_bits.h" #include "dtrace-wrapper.h" @@ -150,6 +151,24 @@ Uint size_object(Eterm obj) goto pop_next; } break; + case MAP_SUBTAG: + { + Uint n; + map_t *mp; + mp = (map_t*)map_val_rel(obj,base); + ptr = (Eterm *)mp; + n = map_get_size(mp) + 1; + sum += n + 2; + ptr += 2; /* hdr + size words */ + while (n--) { + obj = *ptr++; + if (!IS_CONST(obj)) { + ESTACK_PUSH(s, obj); + } + } + goto pop_next; + } + break; case BIN_MATCHSTATE_SUBTAG: erl_exit(ERTS_ABORT_EXIT, "size_object: matchstate term not allowed"); @@ -318,6 +337,15 @@ Eterm copy_struct(Eterm obj, Uint sz, Eterm** hpp, ErlOffHeap* off_heap) } } break; + case MAP_SUBTAG: + { + i = map_get_size(objp) + 3; + *argp = make_map_rel(htop, dst_base); + while (i--) { + *htop++ = *objp++; + } + } + break; case REFC_BINARY_SUBTAG: { ProcBin* pb; @@ -537,6 +565,10 @@ Eterm copy_shallow(Eterm* ptr, Uint sz, Eterm** hpp, ErlOffHeap* off_heap) } goto off_heap_common; + case MAP_SUBTAG: + *hp++ = *tp++; + sz--; + break; case EXTERNAL_PID_SUBTAG: case EXTERNAL_PORT_SUBTAG: case EXTERNAL_REF_SUBTAG: diff --git a/erts/emulator/beam/dist.h b/erts/emulator/beam/dist.h index ff8f5e106f..f32b999198 100644 --- a/erts/emulator/beam/dist.h +++ b/erts/emulator/beam/dist.h @@ -1,7 +1,7 @@ /* * %CopyrightBegin% * - * Copyright Ericsson AB 1996-2013. All Rights Reserved. + * Copyright Ericsson AB 1996-2014. All Rights Reserved. * * The contents of this file are subject to the Erlang Public License, * Version 1.1, (the "License"); you may not use this file except in @@ -40,13 +40,16 @@ #define DFLAG_SMALL_ATOM_TAGS 0x4000 #define DFLAG_INTERNAL_TAGS 0x8000 #define DFLAG_UTF8_ATOMS 0x10000 +#define DFLAG_MAP_TAG 0x20000 /* All flags that should be enabled when term_to_binary/1 is used. */ #define TERM_TO_BINARY_DFLAGS (DFLAG_EXTENDED_REFERENCES \ | DFLAG_NEW_FUN_TAGS \ + | DFLAG_NEW_FLOATS \ | DFLAG_EXTENDED_PIDS_PORTS \ | DFLAG_EXPORT_PTR_TAG \ - | DFLAG_BIT_BINARIES) + | DFLAG_BIT_BINARIES \ + | DFLAG_MAP_TAG) /* opcodes used in distribution messages */ #define DOP_LINK 1 diff --git a/erts/emulator/beam/erl_alloc.c b/erts/emulator/beam/erl_alloc.c index b5ba9bb94a..05ac24e04d 100644 --- a/erts/emulator/beam/erl_alloc.c +++ b/erts/emulator/beam/erl_alloc.c @@ -75,9 +75,9 @@ #define ERTS_ALC_DEFAULT_ENABLED_ACUL_EHEAP_ALLOC 45 #define ERTS_ALC_DEFAULT_ENABLED_ACUL_LL_ALLOC 85 -#define ERTS_ALC_DEFAULT_ACUL 0 -#define ERTS_ALC_DEFAULT_ACUL_EHEAP_ALLOC 0 -#define ERTS_ALC_DEFAULT_ACUL_LL_ALLOC 0 +#define ERTS_ALC_DEFAULT_ACUL ERTS_ALC_DEFAULT_ENABLED_ACUL +#define ERTS_ALC_DEFAULT_ACUL_EHEAP_ALLOC ERTS_ALC_DEFAULT_ENABLED_ACUL_EHEAP_ALLOC +#define ERTS_ALC_DEFAULT_ACUL_LL_ALLOC ERTS_ALC_DEFAULT_ENABLED_ACUL_LL_ALLOC #ifndef ERTS_SMP # undef ERTS_ALC_DEFAULT_ACUL @@ -242,32 +242,13 @@ do { \ sys_memcpy((void *) (IP), (void *) &aui__, sizeof(struct au_init)); \ } while (0) -#if ERTS_ALC_DEFAULT_ACUL \ - || ERTS_ALC_DEFAULT_ACUL_LL_ALLOC \ - || ERTS_ALC_DEFAULT_ACUL_EHEAP_ALLOC - -static ERTS_INLINE void -set_default_acul(struct au_init *ip, int acul) -{ - ip->thr_spec = 1; - ip->atype = AOFIRSTFIT; - ip->init.aoff.flavor = AOFF_BF; - ip->init.util.acul = acul; -} - -#endif - static void set_default_sl_alloc_opts(struct au_init *ip) { SET_DEFAULT_ALLOC_OPTS(ip); ip->enable = AU_ALLOC_DEFAULT_ENABLE(1); -#if ERTS_ALC_DEFAULT_ACUL - set_default_acul(ip, ERTS_ALC_DEFAULT_ACUL); -#else ip->thr_spec = 1; ip->atype = GOODFIT; -#endif ip->init.util.name_prefix = "sl_"; ip->init.util.alloc_no = ERTS_ALC_A_SHORT_LIVED; #ifndef SMALL_MEMORY @@ -281,7 +262,7 @@ set_default_sl_alloc_opts(struct au_init *ip) ip->init.util.force = 1; ip->init.util.low_mem = 1; #endif - + ip->init.util.acul = ERTS_ALC_DEFAULT_ACUL; } static void @@ -289,12 +270,8 @@ set_default_std_alloc_opts(struct au_init *ip) { SET_DEFAULT_ALLOC_OPTS(ip); ip->enable = AU_ALLOC_DEFAULT_ENABLE(1); -#if ERTS_ALC_DEFAULT_ACUL - set_default_acul(ip, ERTS_ALC_DEFAULT_ACUL); -#else ip->thr_spec = 1; ip->atype = BESTFIT; -#endif ip->init.util.name_prefix = "std_"; ip->init.util.alloc_no = ERTS_ALC_A_STANDARD; #ifndef SMALL_MEMORY @@ -303,6 +280,7 @@ set_default_std_alloc_opts(struct au_init *ip) ip->init.util.mmbcs = 32*1024; /* Main carrier size */ #endif ip->init.util.ts = ERTS_ALC_MTA_STANDARD; + ip->init.util.acul = ERTS_ALC_DEFAULT_ACUL; } static void @@ -310,13 +288,9 @@ set_default_ll_alloc_opts(struct au_init *ip) { SET_DEFAULT_ALLOC_OPTS(ip); ip->enable = AU_ALLOC_DEFAULT_ENABLE(1); -#if ERTS_ALC_DEFAULT_ACUL_LL_ALLOC - set_default_acul(ip, ERTS_ALC_DEFAULT_ACUL_LL_ALLOC); -#else ip->thr_spec = 0; ip->atype = BESTFIT; ip->init.bf.ao = 1; -#endif ip->init.util.ramv = 0; ip->init.util.mmsbc = 0; ip->init.util.sbct = ~((UWord) 0); @@ -332,6 +306,7 @@ set_default_ll_alloc_opts(struct au_init *ip) ip->init.util.rsbcst = 0; ip->init.util.rsbcmt = 0; ip->init.util.rmbcmt = 0; + ip->init.util.acul = ERTS_ALC_DEFAULT_ACUL_LL_ALLOC; } static void @@ -363,12 +338,8 @@ set_default_eheap_alloc_opts(struct au_init *ip) { SET_DEFAULT_ALLOC_OPTS(ip); ip->enable = AU_ALLOC_DEFAULT_ENABLE(1); -#if ERTS_ALC_DEFAULT_ACUL_EHEAP_ALLOC - set_default_acul(ip, ERTS_ALC_DEFAULT_ACUL_EHEAP_ALLOC); -#else ip->thr_spec = 1; ip->atype = GOODFIT; -#endif ip->init.util.name_prefix = "eheap_"; ip->init.util.alloc_no = ERTS_ALC_A_EHEAP; #ifndef SMALL_MEMORY @@ -382,6 +353,7 @@ set_default_eheap_alloc_opts(struct au_init *ip) ip->init.util.force = 1; ip->init.util.low_mem = 1; #endif + ip->init.util.acul = ERTS_ALC_DEFAULT_ACUL_EHEAP_ALLOC; } static void @@ -389,12 +361,8 @@ set_default_binary_alloc_opts(struct au_init *ip) { SET_DEFAULT_ALLOC_OPTS(ip); ip->enable = AU_ALLOC_DEFAULT_ENABLE(1); -#if ERTS_ALC_DEFAULT_ACUL - set_default_acul(ip, ERTS_ALC_DEFAULT_ACUL); -#else ip->thr_spec = 1; ip->atype = BESTFIT; -#endif ip->init.util.name_prefix = "binary_"; ip->init.util.alloc_no = ERTS_ALC_A_BINARY; #ifndef SMALL_MEMORY @@ -403,6 +371,7 @@ set_default_binary_alloc_opts(struct au_init *ip) ip->init.util.mmbcs = 32*1024; /* Main carrier size */ #endif ip->init.util.ts = ERTS_ALC_MTA_BINARY; + ip->init.util.acul = ERTS_ALC_DEFAULT_ACUL; } static void @@ -410,12 +379,8 @@ set_default_ets_alloc_opts(struct au_init *ip) { SET_DEFAULT_ALLOC_OPTS(ip); ip->enable = AU_ALLOC_DEFAULT_ENABLE(1); -#if ERTS_ALC_DEFAULT_ACUL - set_default_acul(ip, ERTS_ALC_DEFAULT_ACUL); -#else ip->thr_spec = 1; ip->atype = BESTFIT; -#endif ip->init.util.name_prefix = "ets_"; ip->init.util.alloc_no = ERTS_ALC_A_ETS; #ifndef SMALL_MEMORY @@ -424,6 +389,7 @@ set_default_ets_alloc_opts(struct au_init *ip) ip->init.util.mmbcs = 32*1024; /* Main carrier size */ #endif ip->init.util.ts = ERTS_ALC_MTA_ETS; + ip->init.util.acul = ERTS_ALC_DEFAULT_ACUL; } static void @@ -431,12 +397,8 @@ set_default_driver_alloc_opts(struct au_init *ip) { SET_DEFAULT_ALLOC_OPTS(ip); ip->enable = AU_ALLOC_DEFAULT_ENABLE(1); -#if ERTS_ALC_DEFAULT_ACUL - set_default_acul(ip, ERTS_ALC_DEFAULT_ACUL); -#else ip->thr_spec = 1; ip->atype = BESTFIT; -#endif ip->init.util.name_prefix = "driver_"; ip->init.util.alloc_no = ERTS_ALC_A_DRIVER; #ifndef SMALL_MEMORY @@ -445,6 +407,7 @@ set_default_driver_alloc_opts(struct au_init *ip) ip->init.util.mmbcs = 32*1024; /* Main carrier size */ #endif ip->init.util.ts = ERTS_ALC_MTA_DRIVER; + ip->init.util.acul = ERTS_ALC_DEFAULT_ACUL; } static void @@ -453,12 +416,8 @@ set_default_fix_alloc_opts(struct au_init *ip, { SET_DEFAULT_ALLOC_OPTS(ip); ip->enable = AU_ALLOC_DEFAULT_ENABLE(1); -#if ERTS_ALC_DEFAULT_ACUL - set_default_acul(ip, ERTS_ALC_DEFAULT_ACUL); -#else ip->thr_spec = 1; ip->atype = BESTFIT; -#endif ip->init.bf.ao = 1; ip->init.util.name_prefix = "fix_"; ip->init.util.fix_type_size = fix_type_sizes; @@ -469,6 +428,7 @@ set_default_fix_alloc_opts(struct au_init *ip, ip->init.util.mmbcs = 128*1024; /* Main carrier size */ #endif ip->init.util.ts = ERTS_ALC_MTA_FIXED_SIZE; + ip->init.util.acul = ERTS_ALC_DEFAULT_ACUL; } #ifdef ERTS_SMP @@ -560,26 +520,25 @@ strategy_support_carrier_migration(struct au_init *auip) } static ERTS_INLINE void -check_disable_carrier_migration(struct au_init *auip) -{ - if (!strategy_support_carrier_migration(auip) || !auip->thr_spec) - auip->init.util.acul = 0; -} - -static ERTS_INLINE void -ensure_carrier_migration_support(struct au_init *auip) +adjust_carrier_migration_support(struct au_init *auip) { - auip->thr_spec = 1; /* Need thread preferred */ +#ifdef ERTS_SMP + if (auip->init.util.acul) { + auip->thr_spec = -1; /* Need thread preferred */ - /* - * If strategy cannot handle carrier migration, - * default to a strategy that can... - */ - if (!strategy_support_carrier_migration(auip)) { - /* Default to aoffcbf */ - auip->atype = AOFIRSTFIT; - auip->init.aoff.flavor = AOFF_BF; + /* + * If strategy cannot handle carrier migration, + * default to a strategy that can... + */ + if (!strategy_support_carrier_migration(auip)) { + /* Default to aoffcbf */ + auip->atype = AOFIRSTFIT; + auip->init.aoff.flavor = AOFF_BF; + } } +#else + auip->init.util.acul = 0; +#endif } void @@ -626,7 +585,8 @@ erts_alloc_init(int *argc, char **argv, ErtsAllocInitOpts *eaiop) if (ncpu < 1) ncpu = 1; - erts_tsd_key_create(&erts_allctr_prelock_tsd_key); + erts_tsd_key_create(&erts_allctr_prelock_tsd_key, + "erts_allctr_prelock_tsd_key"); erts_sys_alloc_init(); erts_init_utils_mem(); @@ -670,8 +630,21 @@ erts_alloc_init(int *argc, char **argv, ErtsAllocInitOpts *eaiop) init.fix_alloc.thr_spec = 0; #endif + /* Make adjustments for carrier migration support */ + init.temp_alloc.init.util.acul = 0; + adjust_carrier_migration_support(&init.sl_alloc); + adjust_carrier_migration_support(&init.std_alloc); + adjust_carrier_migration_support(&init.ll_alloc); + adjust_carrier_migration_support(&init.eheap_alloc); + adjust_carrier_migration_support(&init.binary_alloc); + adjust_carrier_migration_support(&init.ets_alloc); + adjust_carrier_migration_support(&init.driver_alloc); + adjust_carrier_migration_support(&init.fix_alloc); + if (init.erts_alloc_config) { /* Adjust flags that erts_alloc_config won't like */ + + /* No thread specific instances */ init.temp_alloc.thr_spec = 0; init.sl_alloc.thr_spec = 0; init.std_alloc.thr_spec = 0; @@ -680,18 +653,19 @@ erts_alloc_init(int *argc, char **argv, ErtsAllocInitOpts *eaiop) init.binary_alloc.thr_spec = 0; init.ets_alloc.thr_spec = 0; init.driver_alloc.thr_spec = 0; - init.fix_alloc.thr_spec = 0; - } - - check_disable_carrier_migration(&init.sl_alloc); - check_disable_carrier_migration(&init.std_alloc); - check_disable_carrier_migration(&init.ll_alloc); - check_disable_carrier_migration(&init.eheap_alloc); - check_disable_carrier_migration(&init.binary_alloc); - check_disable_carrier_migration(&init.ets_alloc); - check_disable_carrier_migration(&init.driver_alloc); - check_disable_carrier_migration(&init.fix_alloc); + init.fix_alloc.thr_spec = 0; + /* No carrier migration */ + init.temp_alloc.init.util.acul = 0; + init.sl_alloc.init.util.acul = 0; + init.std_alloc.init.util.acul = 0; + init.ll_alloc.init.util.acul = 0; + init.eheap_alloc.init.util.acul = 0; + init.binary_alloc.init.util.acul = 0; + init.ets_alloc.init.util.acul = 0; + init.driver_alloc.init.util.acul = 0; + init.fix_alloc.init.util.acul = 0; + } #ifdef ERTS_SMP /* Only temp_alloc can use thread specific interface */ @@ -1290,8 +1264,6 @@ handle_au_arg(struct au_init *auip, break; } } - ensure_carrier_migration_support(auip); - auip->init.util.acul = get_acul_value(auip, sub_param + 4, argv, ip); } else if(has_prefix("asbcst", sub_param)) { @@ -1328,7 +1300,8 @@ handle_au_arg(struct au_init *auip, else { bad_value(param, sub_param + 1, alg); } - check_disable_carrier_migration(auip); + if (!strategy_support_carrier_migration(auip)) + auip->init.util.acul = 0; } else goto bad_switch; @@ -1409,7 +1382,7 @@ handle_au_arg(struct au_init *auip, } else if (res == 0) { auip->thr_spec = 0; - check_disable_carrier_migration(auip); + auip->init.util.acul = 0; break; } goto bad_switch; @@ -1607,7 +1580,7 @@ handle_args(int *argc, char **argv, erts_alc_hndl_args_init_t *init) for (a = 0; a < aui_sz; a++) { aui[a]->thr_spec = 0; - check_disable_carrier_migration(aui[a]); + aui[a]->init.util.acul = 0; aui[a]->init.util.ramv = 0; aui[a]->init.util.lmbcs = 5*1024*1024; } @@ -1754,6 +1727,9 @@ erts_alloc_register_scheduler(void *vesdp) int ix = (int) esdp->no; int aix; +#ifdef ERTS_DIRTY_SCHEDULERS + ASSERT(!ERTS_SCHEDULER_IS_DIRTY(esdp)); +#endif for (aix = ERTS_ALC_A_MIN; aix <= ERTS_ALC_A_MAX; aix++) { ErtsAllocatorThrSpec_t *tspec = &erts_allctr_thr_spec[aix]; esdp->alloc_data.deallctr[aix] = NULL; diff --git a/erts/emulator/beam/erl_alloc.h b/erts/emulator/beam/erl_alloc.h index f83f6b39cf..942eaa47d0 100644 --- a/erts/emulator/beam/erl_alloc.h +++ b/erts/emulator/beam/erl_alloc.h @@ -209,8 +209,8 @@ int erts_is_allctr_wrapper_prelocked(void); void *erts_alloc_permanent_cache_aligned(ErtsAlcType_t type, Uint size); #ifndef ERTS_CACHE_LINE_SIZE -/* Assume a cache line size of 64 bytes */ -# define ERTS_CACHE_LINE_SIZE ((UWord) 64) +/* Assumed cache line size */ +# define ERTS_CACHE_LINE_SIZE ((UWord) ASSUMED_CACHE_LINE_SIZE) # define ERTS_CACHE_LINE_MASK (ERTS_CACHE_LINE_SIZE - 1) #endif diff --git a/erts/emulator/beam/erl_alloc.types b/erts/emulator/beam/erl_alloc.types index bb5eba80be..17ac6316b7 100644 --- a/erts/emulator/beam/erl_alloc.types +++ b/erts/emulator/beam/erl_alloc.types @@ -1,7 +1,7 @@ # # %CopyrightBegin% # -# Copyright Ericsson AB 2003-2013. All Rights Reserved. +# Copyright Ericsson AB 2003-2014. All Rights Reserved. # # The contents of this file are subject to the Erlang Public License, # Version 1.1, (the "License"); you may not use this file except in @@ -150,7 +150,7 @@ type LINK_LH STANDARD PROCESSES link_lh type SUSPEND_MON STANDARD PROCESSES suspend_monitor type PEND_SUSPEND SHORT_LIVED PROCESSES pending_suspend type PROC_LIST SHORT_LIVED PROCESSES proc_list -type EXTRA_ROOT SHORT_LIVED PROCESSES extra_root +type SAVED_ESTACK SHORT_LIVED PROCESSES saved_estack type FUN_ENTRY LONG_LIVED CODE fun_entry type ATOM_TXT LONG_LIVED ATOM atom_text type BEAM_REGISTER EHEAP PROCESSES beam_register @@ -268,6 +268,8 @@ type PROC_INTERVAL LONG_LIVED SYSTEM process_interval type BUSY_CALLER_TAB SHORT_LIVED SYSTEM busy_caller_table type BUSY_CALLER SHORT_LIVED SYSTEM busy_caller type PORT_DATA_HEAP STANDARD SYSTEM port_data_heap +type PROC_SYS_TSK SHORT_LIVED PROCESSES proc_sys_task +type PROC_SYS_TSK_QS SHORT_LIVED PROCESSES proc_sys_task_queues +if threads_no_smp # Need thread safe allocs, but std_alloc and fix_alloc are not; @@ -412,6 +414,21 @@ type PRT_REP_EXIT STANDARD SYSTEM port_report_exit +endif ++if ose + +type SYS_READ_BUF TEMPORARY SYSTEM sys_read_buf +type FD_TAB LONG_LIVED SYSTEM fd_tab +type FD_ENTRY_BUF STANDARD SYSTEM fd_entry_buf +type FD_SIG_LIST SHORT_LIVED SYSTEM fd_sig_list +type DRV_EV STANDARD SYSTEM driver_event +type CS_PROG_PATH LONG_LIVED SYSTEM cs_prog_path +type ENVIRONMENT TEMPORARY SYSTEM environment +type PUTENV_STR SYSTEM SYSTEM putenv_string +type PRT_REP_EXIT STANDARD SYSTEM port_report_exit + ++endif + + +if win32 type DRV_DATA_BUF SYSTEM SYSTEM drv_data_buf diff --git a/erts/emulator/beam/erl_alloc_util.c b/erts/emulator/beam/erl_alloc_util.c index c6cea0185f..45f0cc4312 100644 --- a/erts/emulator/beam/erl_alloc_util.c +++ b/erts/emulator/beam/erl_alloc_util.c @@ -5561,11 +5561,11 @@ erts_alcu_start(Allctr_t *allctr, AllctrInit_t *init) erts_mtx_init_x_opt(&allctr->mutex, "alcu_allocator", make_small(allctr->alloc_no), - ERTS_LCNT_LT_ALLOC); + ERTS_LCNT_LT_ALLOC,1); #else erts_mtx_init_x(&allctr->mutex, "alcu_allocator", - make_small(allctr->alloc_no)); + make_small(allctr->alloc_no),1); #endif /*ERTS_ENABLE_LOCK_COUNT*/ #ifdef DEBUG diff --git a/erts/emulator/beam/erl_async.c b/erts/emulator/beam/erl_async.c index e6d72f569b..b3dc327704 100644 --- a/erts/emulator/beam/erl_async.c +++ b/erts/emulator/beam/erl_async.c @@ -166,6 +166,7 @@ async_ready_q(Uint sched_id) #endif + void erts_init_async(void) { @@ -226,11 +227,23 @@ erts_init_async(void) thr_opts.suggested_stack_size = erts_async_thread_suggested_stack_size; +#ifdef ETHR_HAVE_THREAD_NAMES + thr_opts.name = malloc(sizeof(char)*(strlen("async_XXXX")+1)); +#endif + for (i = 0; i < erts_async_max_threads; i++) { ErtsAsyncQ *aq = async_q(i); + +#ifdef ETHR_HAVE_THREAD_NAMES + sprintf(thr_opts.name, "async_%d", i+1); +#endif + erts_thr_create(&aq->thr_id, async_main, (void*) aq, &thr_opts); } +#ifdef ETHR_HAVE_THREAD_NAMES + free(thr_opts.name); +#endif /* Wait for async threads to initialize... */ erts_mtx_lock(&async->init.data.mtx); @@ -602,7 +615,7 @@ unsigned int driver_async_port_key(ErlDrvPort port) ** return values: ** 0 completed ** -1 error -** N handle value (used with async_cancel) +** N handle value ** arguments: ** ix driver index ** key pointer to secedule queue (NULL means round robin) @@ -687,23 +700,3 @@ long driver_async(ErlDrvPort ix, unsigned int* key, return id; } - -int driver_async_cancel(unsigned int id) -{ - /* - * Not supported anymore. Always fail (which is backward - * compatible). - * - * This functionality could be implemented again. However, - * it is (and always has been) completely useless since - * it doesn't give you any guarantees whatsoever. The user - * needs to (and always have had to) synchronize in his/her - * own code in order to get any guarantees. - */ - return 0; -} - - - - - diff --git a/erts/emulator/beam/erl_bif_ddll.c b/erts/emulator/beam/erl_bif_ddll.c index 1c3e955f47..1728b200f7 100644 --- a/erts/emulator/beam/erl_bif_ddll.c +++ b/erts/emulator/beam/erl_bif_ddll.c @@ -182,7 +182,7 @@ BIF_RETTYPE erl_ddll_try_load_3(BIF_ALIST_3) Eterm name_term = BIF_ARG_2; Eterm options = BIF_ARG_3; char *path = NULL; - ErlDrvSizeT path_len; + Sint path_len; char *name = NULL; DE_Handle *dh; erts_driver_t *drv; @@ -198,6 +198,7 @@ BIF_RETTYPE erl_ddll_try_load_3(BIF_ALIST_3) int kill_ports = 0; int do_build_load_error = 0; int build_this_load_error = 0; + int encoding; for(l = options; is_list(l); l = CDR(list_val(l))) { Eterm opt = CAR(list_val(l)); @@ -257,18 +258,23 @@ BIF_RETTYPE erl_ddll_try_load_3(BIF_ALIST_3) goto error; } - if (erts_iolist_size(path_term, &path_len)) { - goto error; + encoding = erts_get_native_filename_encoding(); + if (encoding == ERL_FILENAME_WIN_WCHAR) { + /* Do not convert the lib name to utf-16le yet, do that in win32 specific code */ + /* since lib_name is used in error messages */ + encoding = ERL_FILENAME_UTF8; } - path = erts_alloc(ERTS_ALC_T_DDLL_TMP_BUF, path_len + 1 /* might need path separator */ + sys_strlen(name) + 1); - if (erts_iolist_to_buf(path_term, path, path_len) != 0) { + path = erts_convert_filename_to_encoding(path_term, NULL, 0, + ERTS_ALC_T_DDLL_TMP_BUF, 1, 0, + encoding, &path_len, + sys_strlen(name) + 2); /* might need path separator */ + if (!path) { goto error; } - while (path_len > 0 && (path[path_len-1] == '\\' || path[path_len-1] == '/')) { - --path_len; - } + ASSERT(path_len > 0 && path[path_len-1] == 0); + while (--path_len > 0 && (path[path_len-1] == '\\' || path[path_len-1] == '/')) + ; path[path_len++] = '/'; - /*path[path_len] = '\0';*/ sys_strcpy(path+path_len,name); #if DDLL_SMP @@ -1524,7 +1530,7 @@ static int do_load_driver_entry(DE_Handle *dh, char *path, char *name) assert_drv_list_rwlocked(); - if ((res = erts_sys_ddll_open(path, &(dh->handle))) != ERL_DE_NO_ERROR) { + if ((res = erts_sys_ddll_open(path, &(dh->handle), NULL)) != ERL_DE_NO_ERROR) { return res; } diff --git a/erts/emulator/beam/erl_bif_guard.c b/erts/emulator/beam/erl_bif_guard.c index a715756c15..bbd8aa31d9 100644 --- a/erts/emulator/beam/erl_bif_guard.c +++ b/erts/emulator/beam/erl_bif_guard.c @@ -33,6 +33,7 @@ #include "bif.h" #include "big.h" #include "erl_binary.h" +#include "erl_map.h" static Eterm gc_double_to_integer(Process* p, double x, Eterm* reg, Uint live); @@ -455,6 +456,28 @@ Eterm erts_gc_byte_size_1(Process* p, Eterm* reg, Uint live) } } +Eterm erts_gc_map_size_1(Process* p, Eterm* reg, Uint live) +{ + Eterm arg = reg[live]; + if (is_map(arg)) { + map_t *mp = (map_t*)map_val(arg); + Uint size = map_get_size(mp); + if (IS_USMALL(0, size)) { + return make_small(size); + } else { + Eterm* hp; + if (ERTS_NEED_GC(p, BIG_UINT_HEAP_SIZE)) { + erts_garbage_collect(p, BIG_UINT_HEAP_SIZE, reg, live); + } + hp = p->htop; + p->htop += BIG_UINT_HEAP_SIZE; + return uint_to_big(size, hp); + } + } else { + BIF_ERROR(p, BADARG); + } +} + Eterm erts_gc_abs_1(Process* p, Eterm* reg, Uint live) { Eterm arg; diff --git a/erts/emulator/beam/erl_bif_info.c b/erts/emulator/beam/erl_bif_info.c index d7f1e2d971..2adba9b240 100755 --- a/erts/emulator/beam/erl_bif_info.c +++ b/erts/emulator/beam/erl_bif_info.c @@ -64,9 +64,11 @@ static Export *gather_gc_info_res_trap; #define DECL_AM(S) Eterm AM_ ## S = am_atom_put(#S, sizeof(#S) - 1) +static char otp_version[] = ERLANG_OTP_VERSION; /* Keep erts_system_version as a global variable for easy access from a core */ -static char erts_system_version[] = ("Erlang " ERLANG_OTP_RELEASE - " (erts-" ERLANG_VERSION ")" +static char erts_system_version[] = ("Erlang/OTP " ERLANG_OTP_RELEASE + "%s" + " [erts-" ERLANG_VERSION "]" #if !HEAP_ON_C_STACK && !HALFWORD_HEAP " [no-c-stack-objects]" #endif @@ -88,6 +90,9 @@ static char erts_system_version[] = ("Erlang " ERLANG_OTP_RELEASE " [smp:%beu:%beu]" #endif #ifdef USE_THREADS +#ifdef ERTS_DIRTY_SCHEDULERS + " [ds:%beu:%beu:%beu]" +#endif " [async-threads:%d]" #endif #ifdef HIPE @@ -304,13 +309,39 @@ make_link_list(Process *p, ErtsLink *root, Eterm tail) int erts_print_system_version(int to, void *arg, Process *c_p) { + int i, rc = -1; + char *rc_str = ""; + char rc_buf[100]; + char *ov = otp_version; #ifdef ERTS_SMP Uint total, online, active; - (void) erts_schedulers_state(&total, &online, &active, 0); +#ifdef ERTS_DIRTY_SCHEDULERS + Uint dirty_cpu, dirty_cpu_onln, dirty_io; + + (void) erts_schedulers_state(&total, &online, &active, &dirty_cpu, &dirty_cpu_onln, &dirty_io, 0); +#else + (void) erts_schedulers_state(&total, &online, &active, NULL, NULL, NULL, 0); #endif - return erts_print(to, arg, erts_system_version +#endif + for (i = 0; i < sizeof(otp_version)-4; i++) { + if (ov[i] == '-' && ov[i+1] == 'r' && ov[i+2] == 'c') + rc = atoi(&ov[i+3]); + } + if (rc >= 0) { + if (rc == 0) + rc_str = " [DEVELOPMENT]"; + else { + erts_snprintf(rc_buf, sizeof(rc_buf), " [RELEASE CANDIDATE %d]", rc); + rc_str = rc_buf; + } + } + return erts_print(to, arg, erts_system_version, + rc_str #ifdef ERTS_SMP , total, online +#ifdef ERTS_DIRTY_SCHEDULERS + , dirty_cpu, dirty_cpu_onln, dirty_io +#endif #endif #ifdef USE_THREADS , erts_async_max_threads @@ -2454,6 +2485,9 @@ BIF_RETTYPE system_info_1(BIF_ALIST_1) switch (erts_schedulers_state(&total, &online, &active, + NULL, + NULL, + NULL, 1)) { case ERTS_SCHDLR_SSPND_DONE: { Eterm *hp = HAlloc(BIF_P, 4); @@ -2477,7 +2511,7 @@ BIF_RETTYPE system_info_1(BIF_ALIST_1) BIF_RET(make_small(1)); #else Uint total, online, active; - switch (erts_schedulers_state(&total, &online, &active, 1)) { + switch (erts_schedulers_state(&total, &online, &active, NULL, NULL, NULL, 1)) { case ERTS_SCHDLR_SSPND_DONE: BIF_RET(make_small(online)); case ERTS_SCHDLR_SSPND_YIELD_RESTART: @@ -2494,7 +2528,7 @@ BIF_RETTYPE system_info_1(BIF_ALIST_1) BIF_RET(make_small(1)); #else Uint total, online, active; - switch (erts_schedulers_state(&total, &online, &active, 1)) { + switch (erts_schedulers_state(&total, &online, &active, NULL, NULL, NULL, 1)) { case ERTS_SCHDLR_SSPND_DONE: BIF_RET(make_small(active)); case ERTS_SCHDLR_SSPND_YIELD_RESTART: @@ -2506,6 +2540,20 @@ BIF_RETTYPE system_info_1(BIF_ALIST_1) BIF_ERROR(BIF_P, EXC_INTERNAL_ERROR); } #endif +#if defined(ERTS_SMP) && defined(ERTS_DIRTY_SCHEDULERS) + } else if (ERTS_IS_ATOM_STR("dirty_cpu_schedulers", BIF_ARG_1)) { + Uint dirty_cpu; + erts_schedulers_state(NULL, NULL, NULL, &dirty_cpu, NULL, NULL, 1); + BIF_RET(make_small(dirty_cpu)); + } else if (ERTS_IS_ATOM_STR("dirty_cpu_schedulers_online", BIF_ARG_1)) { + Uint dirty_cpu_onln; + erts_schedulers_state(NULL, NULL, NULL, NULL, &dirty_cpu_onln, NULL, 1); + BIF_RET(make_small(dirty_cpu_onln)); + } else if (ERTS_IS_ATOM_STR("dirty_io_schedulers", BIF_ARG_1)) { + Uint dirty_io; + erts_schedulers_state(NULL, NULL, NULL, NULL, NULL, &dirty_io, 1); + BIF_RET(make_small(dirty_io)); +#endif } else if (ERTS_IS_ATOM_STR("run_queues", BIF_ARG_1)) { res = make_small(erts_no_run_queues); BIF_RET(res); @@ -3603,6 +3651,20 @@ BIF_RETTYPE erts_debug_set_internal_state_2(BIF_ALIST_2) BIF_RET(am_true); } } + else if (ERTS_IS_ATOM_STR("gc_state", BIF_ARG_1)) { + /* Used by process_SUITE (emulator) */ + int res, enable; + + switch (BIF_ARG_2) { + case am_true: enable = 1; break; + case am_false: enable = 0; break; + default: BIF_ERROR(BIF_P, BADARG); break; + } + + res = (BIF_P->flags & F_DISABLE_GC) ? am_false : am_true; + erts_set_gc_state(BIF_P, enable); + BIF_RET(res); + } else if (ERTS_IS_ATOM_STR("send_fake_exit_signal", BIF_ARG_1)) { /* Used by signal_SUITE (emulator) */ diff --git a/erts/emulator/beam/erl_bif_lists.c b/erts/emulator/beam/erl_bif_lists.c index 1805366cfe..820ed2385d 100644 --- a/erts/emulator/beam/erl_bif_lists.c +++ b/erts/emulator/beam/erl_bif_lists.c @@ -43,7 +43,7 @@ static BIF_RETTYPE append(Process* p, Eterm A, Eterm B) Eterm* hp; int i; - if ((i = list_length(A)) < 0) { + if ((i = erts_list_length(A)) < 0) { BIF_ERROR(p, BADARG); } if (i == 0) { @@ -102,10 +102,10 @@ static Eterm subtract(Process* p, Eterm A, Eterm B) int n; int m; - if ((n = list_length(A)) < 0) { + if ((n = erts_list_length(A)) < 0) { BIF_ERROR(p, BADARG); } - if ((m = list_length(B)) < 0) { + if ((m = erts_list_length(B)) < 0) { BIF_ERROR(p, BADARG); } diff --git a/erts/emulator/beam/erl_bif_op.c b/erts/emulator/beam/erl_bif_op.c index adac0052d6..37dd6457db 100644 --- a/erts/emulator/beam/erl_bif_op.c +++ b/erts/emulator/beam/erl_bif_op.c @@ -36,6 +36,7 @@ #include "dist.h" #include "erl_version.h" #include "erl_binary.h" +#include "erl_map.h" BIF_RETTYPE and_2(BIF_ALIST_2) { @@ -321,7 +322,10 @@ BIF_RETTYPE is_record_3(BIF_ALIST_3) BIF_RET(am_false); } - - - - +BIF_RETTYPE is_map_1(BIF_ALIST_1) +{ + if (is_map(BIF_ARG_1)) { + BIF_RET(am_true); + } + BIF_RET(am_false); +} diff --git a/erts/emulator/beam/erl_bif_port.c b/erts/emulator/beam/erl_bif_port.c index 3cd53ef65d..77627a6897 100644 --- a/erts/emulator/beam/erl_bif_port.c +++ b/erts/emulator/beam/erl_bif_port.c @@ -796,43 +796,29 @@ open_port(Process* p, Eterm name, Eterm settings, int *err_typep, int *err_nump) goto badarg; } - if (*tp == am_spawn || *tp == am_spawn_driver) { /* A process port */ + if (*tp == am_spawn || *tp == am_spawn_driver || *tp == am_spawn_executable) { /* A process port */ + int encoding; if (arity != make_arityval(2)) { goto badarg; } name = tp[1]; - if (is_atom(name)) { - name_buf = (char *) erts_alloc(ERTS_ALC_T_TMP, - atom_tab(atom_val(name))->len+1); - sys_memcpy((void *) name_buf, - (void *) atom_tab(atom_val(name))->name, - atom_tab(atom_val(name))->len); - name_buf[atom_tab(atom_val(name))->len] = '\0'; - } else if ((i = is_string(name))) { - name_buf = (char *) erts_alloc(ERTS_ALC_T_TMP, i + 1); - if (intlist_to_buf(name, name_buf, i) != i) - erl_exit(1, "%s:%d: Internal error\n", __FILE__, __LINE__); - name_buf[i] = '\0'; - } else { + encoding = erts_get_native_filename_encoding(); + /* Do not convert the command to utf-16le yet, do that in win32 specific code */ + /* since the cmd is used for comparsion with drivers names and copied to port info */ + if (encoding == ERL_FILENAME_WIN_WCHAR) { + encoding = ERL_FILENAME_UTF8; + } + if ((name_buf = erts_convert_filename_to_encoding(name, NULL, 0, ERTS_ALC_T_TMP,0,1, encoding, NULL, 0)) + == NULL) { goto badarg; } + if (*tp == am_spawn_driver) { opts.spawn_type = ERTS_SPAWN_DRIVER; + } else if (*tp == am_spawn_executable) { + opts.spawn_type = ERTS_SPAWN_EXECUTABLE; } - driver = &spawn_driver; - } else if (*tp == am_spawn_executable) { /* A program */ - /* - * {spawn_executable,Progname} - */ - - if (arity != make_arityval(2)) { - goto badarg; - } - name = tp[1]; - if ((name_buf = erts_convert_filename_to_native(name, NULL, 0, ERTS_ALC_T_TMP,0,1, NULL)) == NULL) { - goto badarg; - } - opts.spawn_type = ERTS_SPAWN_EXECUTABLE; + driver = &spawn_driver; } else if (*tp == am_fd) { /* An fd port */ int n; @@ -873,29 +859,8 @@ open_port(Process* p, Eterm name, Eterm settings, int *err_typep, int *err_nump) } if (edir != NIL) { - /* A working directory is expressed differently if spawn_executable, i.e. Unicode is handles - for spawn_executable... */ - if (opts.spawn_type != ERTS_SPAWN_EXECUTABLE) { - Eterm iolist; - DeclareTmpHeap(heap,4,p); - int r; - - UseTmpHeap(4,p); - heap[0] = edir; - heap[1] = make_list(heap+2); - heap[2] = make_small(0); - heap[3] = NIL; - iolist = make_list(heap); - r = erts_iolist_to_buf(iolist, (char*) dir, MAXPATHLEN); - UnUseTmpHeap(4,p); - if (ERTS_IOLIST_TO_BUF_FAILED(r)) { - goto badarg; - } - opts.wd = (char *) dir; - } else { - if ((opts.wd = erts_convert_filename_to_native(edir, NULL, 0, ERTS_ALC_T_TMP,0,1,NULL)) == NULL) { - goto badarg; - } + if ((opts.wd = erts_convert_filename_to_native(edir, NULL, 0, ERTS_ALC_T_TMP,0,1,NULL)) == NULL) { + goto badarg; } } @@ -973,11 +938,12 @@ static char **convert_args(Eterm l) int n; int i = 0; Eterm str; - /* We require at least one element in list (argv[0]) */ if (is_not_list(l) && is_not_nil(l)) { return NULL; } - n = list_length(l); + + n = erts_list_length(l); + /* We require at least one element in argv[0] + NULL at end */ pp = erts_alloc(ERTS_ALC_T_TMP, (n + 2) * sizeof(char **)); pp[i++] = erts_default_arg0; while (is_list(l)) { @@ -1021,7 +987,7 @@ static byte* convert_environment(Process* p, Eterm env) byte* bytes; int encoding = erts_get_native_filename_encoding(); - if ((n = list_length(env)) < 0) { + if ((n = erts_list_length(env)) < 0) { return NULL; } heap_size = 2*(5*n+1); diff --git a/erts/emulator/beam/erl_bif_re.c b/erts/emulator/beam/erl_bif_re.c index 3d34c2a77f..448c6f6f6d 100644 --- a/erts/emulator/beam/erl_bif_re.c +++ b/erts/emulator/beam/erl_bif_re.c @@ -180,6 +180,9 @@ static Eterm make_signed_integer(int x, Process *p) #define PARSE_FLAG_STARTOFFSET 8 #define PARSE_FLAG_CAPTURE_OPT 16 #define PARSE_FLAG_GLOBAL 32 +#define PARSE_FLAG_REPORT_ERRORS 64 +#define PARSE_FLAG_MATCH_LIMIT 128 +#define PARSE_FLAG_MATCH_LIMIT_RECURSION 256 #define CAPSPEC_VALUES 0 #define CAPSPEC_TYPE 1 @@ -192,7 +195,9 @@ parse_options(Eterm listp, /* in */ int *exec_options, /* out */ int *flags,/* out */ int *startoffset, /* out */ - Eterm *capture_spec) /* capture_spec[CAPSPEC_SIZE] */ /* out */ + Eterm *capture_spec, /* capture_spec[CAPSPEC_SIZE] */ /* out */ + int *match_limit, /* out */ + int *match_limit_recursion) /* out */ { int copt,eopt,fl; Eterm item; @@ -234,7 +239,7 @@ parse_options(Eterm listp, /* in */ case am_offset: { int tmp; - if (!term_to_int(tp[2],&tmp)) { + if (!term_to_int(tp[2],&tmp) || tmp < 0) { return -1; } if (startoffset != NULL) { @@ -243,6 +248,31 @@ parse_options(Eterm listp, /* in */ } fl |= (PARSE_FLAG_UNIQUE_EXEC_OPT|PARSE_FLAG_STARTOFFSET); break; + case am_match_limit: + { + int tmp; + if (!term_to_int(tp[2],&tmp) || tmp < 0) { + return -1; + } + if (match_limit != NULL) { + *match_limit = tmp; + } + } + fl |= (PARSE_FLAG_UNIQUE_EXEC_OPT|PARSE_FLAG_MATCH_LIMIT); + break; + case am_match_limit_recursion: + { + int tmp; + if (!term_to_int(tp[2],&tmp) || tmp < 0) { + return -1; + } + if (match_limit_recursion != NULL) { + *match_limit_recursion = tmp; + } + } + fl |= (PARSE_FLAG_UNIQUE_EXEC_OPT| + PARSE_FLAG_MATCH_LIMIT_RECURSION); + break; case am_newline: if (!is_atom(tp[2])) { return -1; @@ -276,7 +306,7 @@ parse_options(Eterm listp, /* in */ default: return -1; } - }else if (is_not_atom(item)) { + } else if (is_not_atom(item)) { return -1; } else { switch(item) { @@ -288,6 +318,10 @@ parse_options(Eterm listp, /* in */ eopt |= PCRE_NOTEMPTY; fl |= PARSE_FLAG_UNIQUE_EXEC_OPT; break; + case am_notempty_atstart: + eopt |= PCRE_NOTEMPTY_ATSTART; + fl |= PARSE_FLAG_UNIQUE_EXEC_OPT; + break; case am_notbol: eopt |= PCRE_NOTBOL; fl |= PARSE_FLAG_UNIQUE_EXEC_OPT; @@ -296,6 +330,10 @@ parse_options(Eterm listp, /* in */ eopt |= PCRE_NOTEOL; fl |= PARSE_FLAG_UNIQUE_EXEC_OPT; break; + case am_no_start_optimize: + copt |= PCRE_NO_START_OPTIMIZE; + fl |= PARSE_FLAG_UNIQUE_COMPILE_OPT; + break; case am_caseless: copt |= PCRE_CASELESS; fl |= PARSE_FLAG_UNIQUE_COMPILE_OPT; @@ -332,6 +370,18 @@ parse_options(Eterm listp, /* in */ copt |= PCRE_UNGREEDY; fl |= PARSE_FLAG_UNIQUE_COMPILE_OPT; break; + case am_ucp: + copt |= PCRE_UCP; + fl |= PARSE_FLAG_UNIQUE_COMPILE_OPT; + break; + case am_never_utf: + copt |= PCRE_NEVER_UTF; + fl |= PARSE_FLAG_UNIQUE_COMPILE_OPT; + break; + case am_report_errors: + fl |= (PARSE_FLAG_UNIQUE_EXEC_OPT | + PARSE_FLAG_REPORT_ERRORS); + break; case am_unicode: copt |= PCRE_UTF8; fl |= (PARSE_FLAG_UNIQUE_COMPILE_OPT | PARSE_FLAG_UNICODE); @@ -359,7 +409,7 @@ parse_options(Eterm listp, /* in */ if (compile_options != NULL) { *compile_options = copt; } - if (exec_options != NULL) { + if (exec_options != NULL) { *exec_options = eopt; } if (flags != NULL) { @@ -373,34 +423,49 @@ parse_options(Eterm listp, /* in */ */ static Eterm -build_compile_result(Process *p, Eterm error_tag, pcre *result, int errcode, const char *errstr, int errofset, int unicode, int with_ok) +build_compile_result(Process *p, Eterm error_tag, pcre *result, int errcode, const char *errstr, int errofset, int unicode, int with_ok, Eterm extra_err_tag) { Eterm *hp; Eterm ret; size_t pattern_size; int capture_count; + int use_crlf; + unsigned long options; if (!result) { /* Return {error_tag, {Code, String, Offset}} */ int elen = sys_strlen(errstr); int need = 3 /* tuple of 2 */ + 3 /* tuple of 2 */ + - (2 * elen) /* The error string list */; + (2 * elen) /* The error string list */ + + ((extra_err_tag != NIL) ? 3 : 0); hp = HAlloc(p, need); ret = buf_to_intlist(&hp, (char *) errstr, elen, NIL); ret = TUPLE2(hp, ret, make_small(errofset)); hp += 3; + if (extra_err_tag != NIL) { + /* Return {error_tag, {extra_tag, + {Code, String, Offset}}} instead */ + ret = TUPLE2(hp, extra_err_tag, ret); + hp += 3; + } ret = TUPLE2(hp, error_tag, ret); } else { erts_pcre_fullinfo(result, NULL, PCRE_INFO_SIZE, &pattern_size); erts_pcre_fullinfo(result, NULL, PCRE_INFO_CAPTURECOUNT, &capture_count); + erts_pcre_fullinfo(result, NULL, PCRE_INFO_OPTIONS, &options); + options &= PCRE_NEWLINE_CR|PCRE_NEWLINE_LF | PCRE_NEWLINE_CRLF | + PCRE_NEWLINE_ANY | PCRE_NEWLINE_ANYCRLF; + use_crlf = (options == PCRE_NEWLINE_ANY || + options == PCRE_NEWLINE_CRLF || + options == PCRE_NEWLINE_ANYCRLF); /* XXX: Optimize - keep in offheap binary to allow this to be kept across traps w/o need of copying */ ret = new_binary(p, (byte *) result, pattern_size); erts_pcre_free(result); - hp = HAlloc(p, (with_ok) ? (3+5) : 5); - ret = TUPLE4(hp,am_re_pattern, make_small(capture_count), make_small(unicode),ret); + hp = HAlloc(p, (with_ok) ? (3+6) : 6); + ret = TUPLE5(hp,am_re_pattern, make_small(capture_count), make_small(unicode),make_small(use_crlf),ret); if (with_ok) { - hp += 5; + hp += 6; ret = TUPLE2(hp,am_ok,ret); } } @@ -424,9 +489,12 @@ re_compile(Process* p, Eterm arg1, Eterm arg2) int options = 0; int pflags = 0; int unicode = 0; +#ifdef DEBUG + int buffres; +#endif - if (parse_options(arg2,&options,NULL,&pflags,NULL,NULL) + if (parse_options(arg2,&options,NULL,&pflags,NULL,NULL,NULL,NULL) < 0) { BIF_ERROR(p,BADARG); } @@ -445,16 +513,19 @@ re_compile(Process* p, Eterm arg1, Eterm arg2) BIF_ERROR(p,BADARG); } expr = erts_alloc(ERTS_ALC_T_RE_TMP_BUF, slen + 1); - if (erts_iolist_to_buf(arg1, expr, slen) != 0) { - erts_free(ERTS_ALC_T_RE_TMP_BUF, expr); - BIF_ERROR(p,BADARG); - } +#ifdef DEBUG + buffres = +#endif + erts_iolist_to_buf(arg1, expr, slen); + + ASSERT(buffres >= 0); + expr[slen]='\0'; result = erts_pcre_compile2(expr, options, &errcode, &errstr, &errofset, default_table); ret = build_compile_result(p, am_error, result, errcode, - errstr, errofset, unicode, 1); + errstr, errofset, unicode, 1, NIL); erts_free(ERTS_ALC_T_RE_TMP_BUF, expr); BIF_RET(ret); } @@ -492,7 +563,7 @@ typedef struct _return_info { } ReturnInfo; typedef struct _restart_context { - pcre_extra extra; + erts_pcre_extra extra; void *restart_data; Uint32 flags; char *subject; /* to be able to free it when done */ @@ -502,6 +573,7 @@ typedef struct _restart_context { } RestartContext; #define RESTART_FLAG_SUBJECT_IN_BINARY 0x1 +#define RESTART_FLAG_REPORT_MATCH_LIMIT 0x2 static void cleanup_restart_context(RestartContext *rc) { @@ -542,13 +614,29 @@ static Eterm build_exec_return(Process *p, int rc, RestartContext *restartp, Ete Eterm res; Eterm *hp; if (rc <= 0) { - res = am_nomatch; + if (restartp->flags & RESTART_FLAG_REPORT_MATCH_LIMIT) { + if (rc == PCRE_ERROR_MATCHLIMIT) { + hp = HAlloc(p,3); + res = TUPLE2(hp,am_error,am_match_limit); + } else if (rc == PCRE_ERROR_RECURSIONLIMIT) { + hp = HAlloc(p,3); + res = TUPLE2(hp,am_error,am_match_limit_recursion); + } else { + res = am_nomatch; + } + } else { + res = am_nomatch; + } } else { - ReturnInfo *ri = restartp->ret_info; + ReturnInfo *ri; ReturnInfo defri = {RetIndex,0,{0}}; - if (ri == NULL) { + + if (restartp->ret_info == NULL) { ri = &defri; + } else { + ri = restartp->ret_info; } + if (ri->type == RetNone) { res = am_match; } else if (ri->type == RetIndex){ @@ -577,6 +665,17 @@ static Eterm build_exec_return(Process *p, int rc, RestartContext *restartp, Ete ri->num_spec * 2 * sizeof(Eterm)); for (i = 0; i < ri->num_spec; ++i) { x = ri->v[i]; + if (x < -1) { + int n = i-x+1; + int j; + for (j = i+1; j < ri->num_spec && j < n; ++j) { + if (restartp->ovector[(ri->v[j])*2] >= 0) { + x = ri->v[j]; + break; + } + } + i = n-1; + } if (x < rc && x >= 0) { tmp_vect[n*2] = make_signed_integer(restartp->ovector[x*2],p); tmp_vect[n*2+1] = make_signed_integer(restartp->ovector[x*2+1]-restartp->ovector[x*2],p); @@ -658,6 +757,17 @@ static Eterm build_exec_return(Process *p, int rc, RestartContext *restartp, Ete ri->num_spec * sizeof(Eterm)); for (i = 0; i < ri->num_spec; ++i) { x = ri->v[i]; + if (x < -1) { + int n = i-x+1; + int j; + for (j = i+1; j < ri->num_spec && j < n; ++j) { + if (restartp->ovector[(ri->v[j])*2] >= 0) { + x = ri->v[j]; + break; + } + } + i = n-1; + } if (x < rc && x >= 0) { char *cp; int len; @@ -722,6 +832,49 @@ static Eterm build_exec_return(Process *p, int rc, RestartContext *restartp, Ete */ #define RINFO_SIZ(Num) (sizeof(ReturnInfo) + (sizeof(int) * (Num - 1))) +#define PICK_INDEX(NameEntry) \ + ((int) ((((unsigned) ((unsigned char *) (NameEntry))[0]) << 8) + \ + ((unsigned) ((unsigned char *) (NameEntry))[1]))) + + +static void build_one_capture(const pcre *code, ReturnInfo **ri, int *sallocated, int has_dupnames, char *name) +{ + ReturnInfo *r = (*ri); + if (has_dupnames) { + /* Build a sequence of positions, starting with -size if + more than one, otherwise just put the index there... */ + char *first,*last; + int esize = erts_pcre_get_stringtable_entries(code,name,&first,&last); + if (esize == PCRE_ERROR_NOSUBSTRING) { + r->v[r->num_spec - 1] = -1; + } else if(last == first) { + r->v[r->num_spec - 1] = PICK_INDEX(first); + } else { + int num = ((last - first) / esize) + 1; + int i; + ASSERT(num > 1); + r->v[r->num_spec - 1] = -num; /* A value less than -1 means + multiple indexes for same name */ + for (i = 0; i < num; ++i) { + ++(r->num_spec); + if(r->num_spec > (*sallocated)) { + (*sallocated) += 10; + r = erts_realloc(ERTS_ALC_T_RE_SUBJECT, r, + RINFO_SIZ((*sallocated))); + } + r->v[r->num_spec - 1] = PICK_INDEX(first); + first += esize; + } + } + } else { + /* Use the faster binary search if no duplicate names are present */ + if ((r->v[r->num_spec - 1] = erts_pcre_get_stringnumber(code,name)) == + PCRE_ERROR_NOSUBSTRING) { + r->v[r->num_spec - 1] = -1; + } + } + *ri = r; +} static ReturnInfo * build_capture(Eterm capture_spec[CAPSPEC_SIZE], const pcre *code) @@ -770,13 +923,58 @@ build_capture(Eterm capture_spec[CAPSPEC_SIZE], const pcre *code) } ri->v[ri->num_spec - 1] = 0; break; + case am_all_names: + { + int rc,i,top; + int entrysize; + unsigned char *nametable, *last = NULL; + int has_dupnames; + unsigned long options; + + if (erts_pcre_fullinfo(code, NULL, PCRE_INFO_OPTIONS, &options) != 0) + goto error; + if ((rc = erts_pcre_fullinfo(code, NULL, PCRE_INFO_NAMECOUNT, &top)) != 0) + goto error; + if (top <= 0) { + ri->num_spec = 0; + ri->type = RetNone; + break; + } + if (erts_pcre_fullinfo(code, NULL, PCRE_INFO_NAMEENTRYSIZE, &entrysize) != 0) + goto error; + if (erts_pcre_fullinfo(code, NULL, PCRE_INFO_NAMETABLE, &nametable) != 0) + goto error; + + has_dupnames = ((options & PCRE_DUPNAMES) != 0); + + for(i=0;i<top;++i) { + if (last == NULL || !has_dupnames || strcmp((char *) last+2,(char *) nametable+2)) { + ASSERT(ri->num_spec >= 0); + ++(ri->num_spec); + if(ri->num_spec > sallocated) { + sallocated += 10; + ri = erts_realloc(ERTS_ALC_T_RE_SUBJECT, ri, RINFO_SIZ(sallocated)); + } + if (has_dupnames) { + /* This could be more effective, we actually have + the names and could fill in the vector + immediately. Now we lookup the name again. */ + build_one_capture(code,&ri,&sallocated,has_dupnames,(char *) nametable+2); + } else { + ri->v[ri->num_spec - 1] = PICK_INDEX(nametable); + } + } + last = nametable; + nametable += entrysize; + } + break; + } default: if (is_list(capture_spec[CAPSPEC_VALUES])) { for(l=capture_spec[CAPSPEC_VALUES];is_list(l);l = CDR(list_val(l))) { int x; Eterm val = CAR(list_val(l)); - if (ri->num_spec < 0) - ri->num_spec = 0; + ASSERT(ri->num_spec >= 0); ++(ri->num_spec); if(ri->num_spec > sallocated) { sallocated += 10; @@ -785,6 +983,11 @@ build_capture(Eterm capture_spec[CAPSPEC_SIZE], const pcre *code) if (term_to_int(val,&x)) { ri->v[ri->num_spec - 1] = x; } else if (is_atom(val) || is_binary(val) || is_list(val)) { + int has_dupnames; + unsigned long options; + if (erts_pcre_fullinfo(code, NULL, PCRE_INFO_OPTIONS, &options) != 0) + goto error; + has_dupnames = ((options & PCRE_DUPNAMES) != 0); if (is_atom(val)) { Atom *ap = atom_tab(atom_val(val)); if ((ap->len + 1) > tmpbsiz) { @@ -799,6 +1002,10 @@ build_capture(Eterm capture_spec[CAPSPEC_SIZE], const pcre *code) tmpb[ap->len] = '\0'; } else { ErlDrvSizeT slen; +#ifdef DEBUG + int buffres; +#endif + if (erts_iolist_size(val, &slen)) { goto error; } @@ -810,15 +1017,15 @@ build_capture(Eterm capture_spec[CAPSPEC_SIZE], const pcre *code) (tmpbsiz = slen + 1)); } } - if (erts_iolist_to_buf(val, tmpb, slen) != 0) { - goto error; - } + +#ifdef DEBUG + buffres = +#endif + erts_iolist_to_buf(val, tmpb, slen); + ASSERT(buffres >= 0); tmpb[slen] = '\0'; } - if ((ri->v[ri->num_spec - 1] = erts_pcre_get_stringnumber(code,tmpb)) == - PCRE_ERROR_NOSUBSTRING) { - ri->v[ri->num_spec - 1] = -1; - } + build_one_capture(code,&ri,&sallocated,has_dupnames,tmpb); } else { goto error; } @@ -867,15 +1074,18 @@ re_run(Process *p, Eterm arg1, Eterm arg2, Eterm arg3) unsigned long loop_count; Eterm capture[CAPSPEC_SIZE] = CAPSPEC_INIT; int is_list_cap; + int match_limit = 0; + int match_limit_recursion = 0; - if (parse_options(arg3,&comp_options,&options,&pflags,&startoffset,capture) + if (parse_options(arg3,&comp_options,&options,&pflags,&startoffset,capture, + &match_limit,&match_limit_recursion) < 0) { BIF_ERROR(p,BADARG); } is_list_cap = ((pflags & PARSE_FLAG_CAPTURE_OPT) && (capture[CAPSPEC_TYPE] == am_list)); - if (is_not_tuple(arg2) || (arityval(*tuple_val(arg2)) != 4)) { + if (is_not_tuple(arg2) || (arityval(*tuple_val(arg2)) != 5)) { if (is_binary(arg2) || is_list(arg2) || is_nil(arg2)) { /* Compile from textual RE */ ErlDrvSizeT slen; @@ -885,6 +1095,9 @@ re_run(Process *p, Eterm arg1, Eterm arg2, Eterm arg3) const char *errstr = ""; int errofset = 0; int capture_count; +#ifdef DEBUG + int buffres; +#endif if (pflags & PARSE_FLAG_UNICODE && (!is_binary(arg2) || !is_binary(arg1) || @@ -897,18 +1110,32 @@ re_run(Process *p, Eterm arg1, Eterm arg2, Eterm arg3) } expr = erts_alloc(ERTS_ALC_T_RE_TMP_BUF, slen + 1); - if (erts_iolist_to_buf(arg2, expr, slen) != 0) { - erts_free(ERTS_ALC_T_RE_TMP_BUF, expr); - BIF_ERROR(p,BADARG); - } + +#ifdef DEBUG + buffres = +#endif + erts_iolist_to_buf(arg2, expr, slen); + + ASSERT(buffres >= 0); + expr[slen]='\0'; result = erts_pcre_compile2(expr, comp_options, &errcode, &errstr, &errofset, default_table); if (!result) { - erts_free(ERTS_ALC_T_RE_TMP_BUF, expr); /* Compilation error gives badarg except in the compile - function */ - BIF_ERROR(p,BADARG); + function or if we have PARSE_FLAG_REPORT_ERRORS */ + if (pflags & PARSE_FLAG_REPORT_ERRORS) { + res = build_compile_result(p, am_error, result, errcode, + errstr, errofset, + (pflags & + PARSE_FLAG_UNICODE) ? 1 : 0, + 1, am_compile); + erts_free(ERTS_ALC_T_RE_TMP_BUF, expr); + BIF_RET(res); + } else { + erts_free(ERTS_ALC_T_RE_TMP_BUF, expr); + BIF_ERROR(p,BADARG); + } } if (pflags & PARSE_FLAG_GLOBAL) { Eterm precompiled = @@ -917,7 +1144,7 @@ re_run(Process *p, Eterm arg1, Eterm arg2, Eterm arg3) errstr, errofset, (pflags & PARSE_FLAG_UNICODE) ? 1 : 0, - 0); + 0, NIL); Eterm *hp,r; erts_free(ERTS_ALC_T_RE_TMP_BUF, expr); hp = HAlloc(p,4); @@ -947,7 +1174,8 @@ re_run(Process *p, Eterm arg1, Eterm arg2, Eterm arg3) tp = tuple_val(arg2); if (tp[1] != am_re_pattern || is_not_small(tp[2]) || - is_not_small(tp[3]) || is_not_binary(tp[4])) { + is_not_small(tp[3]) || is_not_small(tp[4]) || + is_not_binary(tp[5])) { BIF_ERROR(p,BADARG); } @@ -967,9 +1195,9 @@ re_run(Process *p, Eterm arg1, Eterm arg2, Eterm arg3) } ovsize = 3*(unsigned_val(tp[2])+1); - code_size = binary_size(tp[4]); - if ((code_tmp = (const pcre *) - erts_get_aligned_binary_bytes(tp[4], &temp_alloc)) == NULL) { + code_size = binary_size(tp[5]); + code_tmp = (const pcre *) erts_get_aligned_binary_bytes(tp[5], &temp_alloc); + if (code_tmp == NULL || code_size < 4) { erts_free_aligned_binary_bytes(temp_alloc); BIF_ERROR(p, BADARG); } @@ -994,6 +1222,16 @@ re_run(Process *p, Eterm arg1, Eterm arg2, Eterm arg3) restart.extra.restart_flags = 0; restart.extra.loop_counter_return = &loop_count; restart.ret_info = NULL; + + if (pflags & PARSE_FLAG_MATCH_LIMIT) { + restart.extra.flags |= PCRE_EXTRA_MATCH_LIMIT; + restart.extra.match_limit = match_limit; + } + + if (pflags & PARSE_FLAG_MATCH_LIMIT_RECURSION) { + restart.extra.flags |= PCRE_EXTRA_MATCH_LIMIT_RECURSION; + restart.extra.match_limit_recursion = match_limit_recursion; + } if (pflags & PARSE_FLAG_CAPTURE_OPT) { if ((restart.ret_info = build_capture(capture,restart.code)) == NULL) { @@ -1002,7 +1240,7 @@ re_run(Process *p, Eterm arg1, Eterm arg2, Eterm arg3) BIF_ERROR(p,BADARG); } } - + /* Optimized - if already in binary off heap, keep that and avoid copying, also binary returns can be sub binaries in that case */ @@ -1029,6 +1267,9 @@ re_run(Process *p, Eterm arg1, Eterm arg2, Eterm arg3) restart.subject = (char *) (pb->bytes+offset); restart.flags |= RESTART_FLAG_SUBJECT_IN_BINARY; } else { +#ifdef DEBUG + int buffres; +#endif handle_iolist: if (erts_iolist_size(arg1, &slength)) { erts_free(ERTS_ALC_T_RE_SUBJECT, restart.ovector); @@ -1040,24 +1281,30 @@ handle_iolist: } restart.subject = erts_alloc(ERTS_ALC_T_RE_SUBJECT, slength); - if (erts_iolist_to_buf(arg1, restart.subject, slength) != 0) { - erts_free(ERTS_ALC_T_RE_SUBJECT, restart.ovector); - erts_free(ERTS_ALC_T_RE_SUBJECT, restart.code); - erts_free(ERTS_ALC_T_RE_SUBJECT, restart.subject); - if (restart.ret_info != NULL) { - erts_free(ERTS_ALC_T_RE_SUBJECT, restart.ret_info); - } - BIF_ERROR(p,BADARG); - } +#ifdef DEBUG + buffres = +#endif + erts_iolist_to_buf(arg1, restart.subject, slength); + ASSERT(buffres >= 0); } + if (pflags & PARSE_FLAG_REPORT_ERRORS) { + restart.flags |= RESTART_FLAG_REPORT_MATCH_LIMIT; + } #ifdef DEBUG loop_count = 0xFFFFFFFF; #endif + + rc = erts_pcre_exec(restart.code, &(restart.extra), restart.subject, + slength, startoffset, + options, restart.ovector, ovsize); + + if (rc == PCRE_ERROR_BADENDIANNESS || rc == PCRE_ERROR_BADMAGIC) { + cleanup_restart_context(&restart); + BIF_ERROR(p,BADARG); + } - rc = erts_pcre_exec(restart.code, &(restart.extra), restart.subject, slength, startoffset, - options, restart.ovector, ovsize); ASSERT(loop_count != 0xFFFFFFFF); BUMP_REDS(p, loop_count / LOOP_FACTOR); if (rc == PCRE_ERROR_LOOP_LIMIT) { @@ -1077,7 +1324,7 @@ handle_iolist: arg2 /* To avoid GC of precompiled code, XXX: not utilized yet */, magic_bin); } - + res = build_exec_return(p, rc, &restart, arg1); cleanup_restart_context(&restart); @@ -1149,6 +1396,120 @@ static BIF_RETTYPE re_exec_trap(BIF_ALIST_3) BIF_RET(res); } +BIF_RETTYPE +re_inspect_2(BIF_ALIST_2) +{ + Eterm *tp,*tmp_vec,*hp; + int i,top,j; + int entrysize; + unsigned char *nametable, *last,*name; + int has_dupnames; + unsigned long options; + int num_names; + Eterm res; + const pcre *code; + byte *temp_alloc = NULL; +#ifdef DEBUG + int infores; +#endif + + + if (is_not_tuple(BIF_ARG_1) || (arityval(*tuple_val(BIF_ARG_1)) != 5)) { + goto error; + } + tp = tuple_val(BIF_ARG_1); + if (tp[1] != am_re_pattern || is_not_small(tp[2]) || + is_not_small(tp[3]) || is_not_small(tp[4]) || + is_not_binary(tp[5])) { + goto error; + } + if (BIF_ARG_2 != am_namelist) { + goto error; + } + if ((code = (const pcre *) + erts_get_aligned_binary_bytes(tp[5], &temp_alloc)) == NULL) { + goto error; + } + + /* OK, so let's try to get some info */ + + if (erts_pcre_fullinfo(code, NULL, PCRE_INFO_OPTIONS, &options) != 0) + goto error; + +#ifdef DEBUG + infores = +#endif + erts_pcre_fullinfo(code, NULL, PCRE_INFO_NAMECOUNT, &top); + + ASSERT(infores == 0); + + if (top <= 0) { + hp = HAlloc(BIF_P, 3); + res = TUPLE2(hp,am_namelist,NIL); + erts_free_aligned_binary_bytes(temp_alloc); + BIF_RET(res); + } +#ifdef DEBUG + infores = +#endif + erts_pcre_fullinfo(code, NULL, PCRE_INFO_NAMEENTRYSIZE, &entrysize); + + ASSERT(infores == 0); + +#ifdef DEBUG + infores = +#endif + erts_pcre_fullinfo(code, NULL, PCRE_INFO_NAMETABLE, &nametable); + + ASSERT(infores == 0); + + has_dupnames = ((options & PCRE_DUPNAMES) != 0); + /* First, count the names */ + num_names = 0; + last = NULL; + name = nametable; + for(i=0;i<top;++i) { + if (last == NULL || !has_dupnames || strcmp((char *) last+2, + (char *) name+2)) { + ++num_names; + } + last = name; + name += entrysize; + } + tmp_vec = erts_alloc(ERTS_ALC_T_RE_TMP_BUF, + num_names * sizeof(Eterm)); + /* Re-iterate and fill tmp_vec */ + last = NULL; + name = nametable; + j = 0; + for(i=0;i<top;++i) { + if (last == NULL || !has_dupnames || strcmp((char *) last+2, + (char *) name+2)) { + tmp_vec[j++] = new_binary(BIF_P, (byte *) name+2, strlen((char *) name+2)); + } + last = name; + name += entrysize; + } + ASSERT(j == num_names); + hp = HAlloc(BIF_P, 3+2*j); + res = NIL; + for(i = j-1 ;i >= 0; --i) { + res = CONS(hp,tmp_vec[i],res); + hp += 2; + } + res = TUPLE2(hp,am_namelist,res); + erts_free_aligned_binary_bytes(temp_alloc); + erts_free(ERTS_ALC_T_RE_TMP_BUF, tmp_vec); + BIF_RET(res); + + error: + /* tmp_vec never allocated when we reach here */ + erts_free_aligned_binary_bytes(temp_alloc); + BIF_ERROR(BIF_P,BADARG); +} + + + diff --git a/erts/emulator/beam/erl_bif_timer.c b/erts/emulator/beam/erl_bif_timer.c index d67695e533..03ac97283c 100644 --- a/erts/emulator/beam/erl_bif_timer.c +++ b/erts/emulator/beam/erl_bif_timer.c @@ -616,7 +616,7 @@ erts_print_bif_timer_info(int to, void *to_arg) : btm->receiver.proc.ess->common.id); erts_print(to, to_arg, "=timer:%T\n", receiver); erts_print(to, to_arg, "Message: %T\n", btm->message); - erts_print(to, to_arg, "Time left: %u ms\n", + erts_print(to, to_arg, "Time left: %u\n", erts_time_left(&btm->tm)); } } diff --git a/erts/emulator/beam/erl_cpu_topology.c b/erts/emulator/beam/erl_cpu_topology.c index 88c6c34881..f594cb9392 100644 --- a/erts/emulator/beam/erl_cpu_topology.c +++ b/erts/emulator/beam/erl_cpu_topology.c @@ -1699,7 +1699,7 @@ erts_early_init_cpu_topology(int no_schedulers, } max_main_threads = erts_get_cpu_configured(cpuinfo); - if (max_main_threads > no_schedulers) + if (max_main_threads > no_schedulers || max_main_threads < 0) max_main_threads = no_schedulers; *max_main_threads_p = max_main_threads; diff --git a/erts/emulator/beam/erl_db.c b/erts/emulator/beam/erl_db.c index 41e64fcd4f..a5d67571e2 100644 --- a/erts/emulator/beam/erl_db.c +++ b/erts/emulator/beam/erl_db.c @@ -1,7 +1,7 @@ /* * %CopyrightBegin% * - * Copyright Ericsson AB 1996-2013. All Rights Reserved. + * Copyright Ericsson AB 1996-2014. All Rights Reserved. * * The contents of this file are subject to the Erlang Public License, * Version 1.1, (the "License"); you may not use this file except in @@ -125,6 +125,7 @@ get_meta_main_tab_lock(unsigned slot) static erts_smp_spinlock_t meta_main_tab_main_lock; static Uint meta_main_tab_first_free; /* Index of first free slot */ static int meta_main_tab_cnt; /* Number of active tables */ +static int meta_main_tab_top; /* Highest ever used slot + 1 */ static Uint meta_main_tab_slot_mask; /* The slot index part of an unnamed table id */ static Uint meta_main_tab_seq_incr; static Uint meta_main_tab_seq_cnt = 0; /* To give unique(-ish) table identifiers */ @@ -1469,6 +1470,10 @@ BIF_RETTYPE ets_new_2(BIF_ALIST_2) ASSERT(slot>=0 && slot<db_max_tabs); meta_main_tab_first_free = GET_NEXT_FREE_SLOT(slot); meta_main_tab_cnt++; + if (slot >= meta_main_tab_top) { + ASSERT(slot == meta_main_tab_top); + meta_main_tab_top = slot + 1; + } if (is_named) { ret = BIF_ARG_1; @@ -2058,27 +2063,31 @@ BIF_RETTYPE ets_all_0(BIF_ALIST_0) { DbTable* tb; Eterm previous; - int i, j; + int i; Eterm* hp; Eterm* hendp; int t_tabs_cnt; - int t_max_tabs; + int t_top; erts_smp_spin_lock(&meta_main_tab_main_lock); t_tabs_cnt = meta_main_tab_cnt; - t_max_tabs = db_max_tabs; + t_top = meta_main_tab_top; erts_smp_spin_unlock(&meta_main_tab_main_lock); hp = HAlloc(BIF_P, 2*t_tabs_cnt); hendp = hp + 2*t_tabs_cnt; previous = NIL; - j = 0; - for(i = 0; (i < t_max_tabs && j < t_tabs_cnt); i++) { + for(i = 0; i < t_top; i++) { erts_smp_rwmtx_t *mmtl = get_meta_main_tab_lock(i); erts_smp_rwmtx_rlock(mmtl); if (IS_SLOT_ALIVE(i)) { - j++; + if (hp == hendp) { + /* Racing table creator, grab some more heap space */ + t_tabs_cnt = 10; + hp = HAlloc(BIF_P, 2*t_tabs_cnt); + hendp = hp + 2*t_tabs_cnt; + } tb = meta_main_tab[i].u.tb; previous = CONS(hp, tb->common.id, previous); hp += 2; @@ -2849,6 +2858,7 @@ void init_db(void) ERTS_ETS_MISC_MEM_ADD(size); meta_main_tab_cnt = 0; + meta_main_tab_top = 0; for (i=1; i<db_max_tabs; i++) { SET_NEXT_FREE_SLOT(i-1,i); } diff --git a/erts/emulator/beam/erl_db_hash.c b/erts/emulator/beam/erl_db_hash.c index 2fea4671e1..06dac8f161 100644 --- a/erts/emulator/beam/erl_db_hash.c +++ b/erts/emulator/beam/erl_db_hash.c @@ -2106,7 +2106,7 @@ static void db_print_hash(int to, void *to_arg, int show, DbTable *tbl) DbTableHash *tb = &tbl->hash; int i; - erts_print(to, to_arg, "Buckets: %d \n", NACTIVE(tb)); + erts_print(to, to_arg, "Buckets: %d\n", NACTIVE(tb)); if (show) { for (i = 0; i < NACTIVE(tb); i++) { diff --git a/erts/emulator/beam/erl_db_hash.h b/erts/emulator/beam/erl_db_hash.h index d17bd9f693..908cec11d4 100644 --- a/erts/emulator/beam/erl_db_hash.h +++ b/erts/emulator/beam/erl_db_hash.h @@ -33,7 +33,12 @@ typedef struct hash_db_term { DbTerm dbterm; /* The actual term */ } HashDbTerm; +#ifdef ERTS_DB_HASH_LOCK_CNT +#define DB_HASH_LOCK_CNT ERTS_DB_HASH_LOCK_CNT +#else #define DB_HASH_LOCK_CNT 64 +#endif + typedef struct db_table_hash_fine_locks { union { erts_smp_rwmtx_t lck; diff --git a/erts/emulator/beam/erl_db_tree.c b/erts/emulator/beam/erl_db_tree.c index 25029ba90f..a62a83a928 100644 --- a/erts/emulator/beam/erl_db_tree.c +++ b/erts/emulator/beam/erl_db_tree.c @@ -485,7 +485,7 @@ static int db_first_tree(Process *p, DbTable *tbl, Eterm *ret) *ret = am_EOT; return DB_ERROR_NONE; } - /* Walk down to the tree to the left */ + /* Walk down the tree to the left */ if ((stack = get_static_stack(tb)) != NULL) { stack->pos = stack->slot = 0; } @@ -531,7 +531,7 @@ static int db_last_tree(Process *p, DbTable *tbl, Eterm *ret) *ret = am_EOT; return DB_ERROR_NONE; } - /* Walk down to the tree to the left */ + /* Walk down the tree to the right */ if ((stack = get_static_stack(tb)) != NULL) { stack->pos = stack->slot = 0; } diff --git a/erts/emulator/beam/erl_db_util.c b/erts/emulator/beam/erl_db_util.c index ef3749a2c4..3927615e04 100644 --- a/erts/emulator/beam/erl_db_util.c +++ b/erts/emulator/beam/erl_db_util.c @@ -1,7 +1,7 @@ /* * %CopyrightBegin% * - * Copyright Ericsson AB 1998-2013. All Rights Reserved. + * Copyright Ericsson AB 1998-2014. All Rights Reserved. * * The contents of this file are subject to the Erlang Public License, * Version 1.1, (the "License"); you may not use this file except in @@ -35,6 +35,7 @@ #include "bif.h" #include "big.h" #include "erl_binary.h" +#include "erl_map.h" #include "erl_thr_progress.h" #include "erl_db_util.h" @@ -482,7 +483,8 @@ void match_pseudo_process_init(void) { #ifdef ERTS_SMP - erts_smp_tsd_key_create(&match_pseudo_process_key); + erts_smp_tsd_key_create(&match_pseudo_process_key, + "erts_match_pseudo_process_key"); erts_smp_install_exit_handler(destroy_match_pseudo_process); #else match_pseudo_process = create_match_pseudo_process(); @@ -565,6 +567,12 @@ static DMCGuardBif guard_tab[] = DBIF_ALL }, { + am_is_map, + &is_map_1, + 1, + DBIF_ALL + }, + { am_is_binary, &is_binary_1, 1, @@ -631,6 +639,12 @@ static DMCGuardBif guard_tab[] = DBIF_ALL }, { + am_map_size, + &map_size_1, + 1, + DBIF_ALL + }, + { am_bit_size, &bit_size_1, 1, @@ -1838,7 +1852,7 @@ restart: ep = termp; break; case matchArrayBind: /* When the array size is unknown. */ - ASSERT(termp); + ASSERT(termp || arity==0); n = *pc++; variables[n].term = dpm_array_to_list(psp, termp, arity); break; diff --git a/erts/emulator/beam/erl_debug.c b/erts/emulator/beam/erl_debug.c index b90d00f236..50bdc79506 100644 --- a/erts/emulator/beam/erl_debug.c +++ b/erts/emulator/beam/erl_debug.c @@ -1,7 +1,7 @@ /* * %CopyrightBegin% * - * Copyright Ericsson AB 1998-2012. All Rights Reserved. + * Copyright Ericsson AB 1998-2013. All Rights Reserved. * * The contents of this file are subject to the Erlang Public License, * Version 1.1, (the "License"); you may not use this file except in @@ -29,6 +29,7 @@ #include "bif.h" #include "beam_catches.h" #include "erl_debug.h" +#include "erl_map.h" #define WITHIN(ptr, x, y) ((x) <= (ptr) && (ptr) < (y)) @@ -299,6 +300,9 @@ void erts_check_for_holes(Process* p) ErlHeapFragment* hf; Eterm* start; + if (p->flags & F_DISABLE_GC) + return; + start = p->last_htop ? p->last_htop : HEAP_START(p); check_memory(start, HEAP_TOP(p)); p->last_htop = HEAP_TOP(p); diff --git a/erts/emulator/beam/erl_driver.h b/erts/emulator/beam/erl_driver.h index b68fd46fcc..5517c26ba4 100644 --- a/erts/emulator/beam/erl_driver.h +++ b/erts/emulator/beam/erl_driver.h @@ -1,7 +1,7 @@ /* * %CopyrightBegin% * - * Copyright Ericsson AB 1999-2013. All Rights Reserved. + * Copyright Ericsson AB 1999-2014. All Rights Reserved. * * The contents of this file are subject to the Erlang Public License, * Version 1.1, (the "License"); you may not use this file except in @@ -132,8 +132,8 @@ typedef struct { #define DO_WRITE ERL_DRV_WRITE #define ERL_DRV_EXTENDED_MARKER (0xfeeeeeed) -#define ERL_DRV_EXTENDED_MAJOR_VERSION 2 -#define ERL_DRV_EXTENDED_MINOR_VERSION 2 +#define ERL_DRV_EXTENDED_MAJOR_VERSION 3 +#define ERL_DRV_EXTENDED_MINOR_VERSION 0 /* * The emulator will refuse to load a driver with different major @@ -271,7 +271,6 @@ typedef struct ErlDrvCond_ ErlDrvCond; typedef struct ErlDrvRWLock_ ErlDrvRWLock; typedef int ErlDrvTSDKey; - /* * */ @@ -365,17 +364,23 @@ typedef struct erl_drv_entry { * It must initialize a ErlDrvEntry structure and return a pointer to it. */ +#ifdef STATIC_ERLANG_DRIVER +# define ERLANG_DRIVER_NAME(NAME) NAME ## _driver_init +#else +# define ERLANG_DRIVER_NAME(NAME) driver_init +#endif + /* For windows dynamic drivers */ #ifndef ERL_DRIVER_TYPES_ONLY #if defined(__WIN32__) # define DRIVER_INIT(DRIVER_NAME) \ - __declspec(dllexport) ErlDrvEntry* driver_init(void); \ - __declspec(dllexport) ErlDrvEntry* driver_init(void) + __declspec(dllexport) ErlDrvEntry* ERLANG_DRIVER_NAME(DRIVER_NAME)(void); \ + __declspec(dllexport) ErlDrvEntry* ERLANG_DRIVER_NAME(DRIVER_NAME)(void) #else # define DRIVER_INIT(DRIVER_NAME) \ - ErlDrvEntry* driver_init(void); \ - ErlDrvEntry* driver_init(void) + ErlDrvEntry* ERLANG_DRIVER_NAME(DRIVER_NAME)(void); \ + ErlDrvEntry* ERLANG_DRIVER_NAME(DRIVER_NAME)(void) #endif #define ERL_DRV_BUSY_MSGQ_DISABLED (~((ErlDrvSizeT) 0)) @@ -599,6 +604,8 @@ EXTERN int null_func(void); #define ERL_DRV_INT64 ((ErlDrvTermData) 15) /* ErlDrvSInt64 * */ #define ERL_DRV_UINT64 ((ErlDrvTermData) 16) /* ErlDrvUInt64 * */ +#define ERL_DRV_MAP ((ErlDrvTermData) 17) /* ErlDrvUInt */ + #ifndef ERL_DRIVER_TYPES_ONLY /* make terms for driver_output_term and driver_send_term */ @@ -651,12 +658,6 @@ EXTERN long driver_async(ErlDrvPort ix, void* async_data, void (*async_free)(void*)); -/* - * driver_async_cancel() is deprecated. It is scheduled for removal - * in OTP-R16. For more information see the erl_driver(3) documentation. - */ -EXTERN int driver_async_cancel(unsigned int key) ERL_DRV_DEPRECATED_FUNC; - /* Locks the driver in the machine "forever", there is no unlock function. Note that this is almost never useful, as an open port towards the driver locks it until the port is closed, why unexpected @@ -678,6 +679,16 @@ EXTERN char *driver_dl_error(void); EXTERN int erl_drv_putenv(char *key, char *value); EXTERN int erl_drv_getenv(char *key, char *value, size_t *value_size); +#ifdef __OSE__ +typedef ErlDrvUInt ErlDrvOseEventId; +EXTERN union SIGNAL *erl_drv_ose_get_signal(ErlDrvEvent ev); +EXTERN ErlDrvEvent erl_drv_ose_event_alloc(SIGSELECT sig, ErlDrvOseEventId handle, + ErlDrvOseEventId (*resolve_signal)(union SIGNAL *sig), void *extra); +EXTERN void erl_drv_ose_event_free(ErlDrvEvent ev); +EXTERN void erl_drv_ose_event_fetch(ErlDrvEvent ev, SIGSELECT *sig, + ErlDrvOseEventId *handle, void **extra); +#endif + #endif /* !ERL_DRIVER_TYPES_ONLY */ #ifdef WIN32_DYNAMIC_ERL_DRIVER diff --git a/erts/emulator/beam/erl_drv_nif.h b/erts/emulator/beam/erl_drv_nif.h index ea013a49a3..3f829ea7ea 100644 --- a/erts/emulator/beam/erl_drv_nif.h +++ b/erts/emulator/beam/erl_drv_nif.h @@ -41,6 +41,13 @@ typedef struct { int suggested_stack_size; } ErlDrvThreadOpts; +#if defined(ERL_DRV_DIRTY_SCHEDULER_SUPPORT) || defined(ERL_NIF_DIRTY_SCHEDULER_SUPPORT) +typedef enum { + ERL_DRV_DIRTY_JOB_CPU_BOUND = 1, + ERL_DRV_DIRTY_JOB_IO_BOUND = 2 +} ErlDrvDirtyJobFlags; +#endif + #endif /* __ERL_DRV_NIF_H__ */ diff --git a/erts/emulator/beam/erl_drv_thread.c b/erts/emulator/beam/erl_drv_thread.c index 4f1bba8657..147249f751 100644 --- a/erts/emulator/beam/erl_drv_thread.c +++ b/erts/emulator/beam/erl_drv_thread.c @@ -78,8 +78,6 @@ struct ErlDrvTid_ { static ethr_tsd_key tid_key; -static ethr_thr_opts def_ethr_opts = ETHR_THR_OPTS_DEFAULT_INITER; - #else /* USE_THREADS */ static Uint tsd_len; static void **tsd; @@ -123,7 +121,7 @@ void erl_drv_thr_init(void) { int i; #ifdef USE_THREADS - int res = ethr_tsd_key_create(&tid_key); + int res = ethr_tsd_key_create(&tid_key,"erts_tid_key"); if (res == 0) res = ethr_install_exit_handler(thread_exit_handler); if (res != 0) @@ -605,6 +603,7 @@ erl_drv_thread_create(char *name, struct ErlDrvTid_ *dtid; ethr_thr_opts ethr_opts; ethr_thr_opts *use_opts; + ethr_thr_opts def_ethr_opts = ETHR_THR_OPTS_DEFAULT_INITER; if (!opts) use_opts = NULL; diff --git a/erts/emulator/beam/erl_gc.c b/erts/emulator/beam/erl_gc.c index e89725c190..aa15d2cc57 100644 --- a/erts/emulator/beam/erl_gc.c +++ b/erts/emulator/beam/erl_gc.c @@ -1,7 +1,7 @@ /* * %CopyrightBegin% * - * Copyright Ericsson AB 2002-2013. All Rights Reserved. + * Copyright Ericsson AB 2002-2014. All Rights Reserved. * * The contents of this file are subject to the Erlang Public License, * Version 1.1, (the "License"); you may not use this file except in @@ -28,6 +28,7 @@ #include "beam_catches.h" #include "erl_binary.h" #include "erl_bits.h" +#include "erl_map.h" #include "error.h" #include "big.h" #include "erl_gc.h" @@ -400,10 +401,18 @@ erts_garbage_collect(Process* p, int need, Eterm* objv, int nobj) Uint reclaimed_now = 0; int done = 0; Uint ms1, s1, us1; - ErtsSchedulerData *esdp = erts_get_scheduler_data(); + ErtsSchedulerData *esdp; #ifdef USE_VM_PROBES DTRACE_CHARBUF(pidbuf, DTRACE_TERM_BUF_SIZE); #endif + + if (p->flags & F_DISABLE_GC) { + ASSERT(need == 0); + return 1; + } + + esdp = erts_get_scheduler_data(); + if (IS_TRACED_FL(p, F_TRACE_GC)) { trace_gc(p, am_gc_start); } @@ -532,6 +541,9 @@ erts_garbage_collect_hibernate(Process* p) Uint area_size; Sint offs; + if (p->flags & F_DISABLE_GC) + ERTS_INTERNAL_ERROR("GC disabled"); + /* * Preliminaries. */ @@ -667,6 +679,8 @@ erts_garbage_collect_literals(Process* p, Eterm* literals, Uint n; struct erl_off_heap_header** prev; + if (p->flags & F_DISABLE_GC) + return; /* * Set GC state. */ @@ -1146,7 +1160,7 @@ do_minor(Process *p, Uint new_sz, Eterm* objv, int nobj) old_htop = sweep_one_area(OLD_HTOP(p), old_htop, heap, heap_size); } OLD_HTOP(p) = old_htop; - HIGH_WATER(p) = (HEAP_START(p) != HIGH_WATER(p)) ? n_heap : n_htop; + HIGH_WATER(p) = n_htop; if (MSO(p).first) { sweep_off_heap(p, 0); @@ -1964,17 +1978,6 @@ setup_rootset(Process *p, Eterm *objv, int nobj, Rootset *rootset) ++n; } - /* - * A trapping BIF can add to rootset by setting the extra_root - * in the process_structure. - */ - if (p->extra_root != NULL) { - roots[n].v = p->extra_root->objv; - roots[n].sz = p->extra_root->sz; - ++n; - } - - ASSERT((is_nil(p->seq_trace_token) || is_tuple(follow_moved(p->seq_trace_token)) || is_atom(p->seq_trace_token))); @@ -2552,11 +2555,6 @@ offset_one_rootset(Process *p, Sint offs, char* area, Uint area_size, p->dictionary->used, offs, area, area_size); } - if (p->extra_root != NULL) { - offset_heap_ptr(p->extra_root->objv, - p->extra_root->sz, - offs, area, area_size); - } offset_heap_ptr(&p->fvalue, 1, offs, area, area_size); offset_heap_ptr(&p->ftrace, 1, offs, area, area_size); diff --git a/erts/emulator/beam/erl_gc.h b/erts/emulator/beam/erl_gc.h index 1801df359a..5203dda263 100644 --- a/erts/emulator/beam/erl_gc.h +++ b/erts/emulator/beam/erl_gc.h @@ -20,6 +20,8 @@ #ifndef __ERL_GC_H__ #define __ERL_GC_H__ +#include "erl_map.h" + /* GC declarations shared by beam/erl_gc.c and hipe/hipe_gc.c */ #if defined(DEBUG) && !ERTS_GLB_INLINE_INCL_FUNC_DEF @@ -42,23 +44,24 @@ do { \ HTOP += 2; /* update tospace htop */ \ } while(0) -#define MOVE_BOXED(PTR,HDR,HTOP,ORIG) \ -do { \ - Eterm gval; \ - Sint nelts; \ - \ - ASSERT(is_header(HDR)); \ - gval = make_boxed(HTOP); \ - *ORIG = gval; \ - *HTOP++ = HDR; \ - *PTR++ = gval; \ - nelts = header_arity(HDR); \ - switch ((HDR) & _HEADER_SUBTAG_MASK) { \ - case SUB_BINARY_SUBTAG: nelts++; break; \ - case FUN_SUBTAG: nelts+=((ErlFunThing*)(PTR-1))->num_free+1; break; \ - } \ - while (nelts--) \ - *HTOP++ = *PTR++; \ +#define MOVE_BOXED(PTR,HDR,HTOP,ORIG) \ +do { \ + Eterm gval; \ + Sint nelts; \ + \ + ASSERT(is_header(HDR)); \ + nelts = header_arity(HDR); \ + switch ((HDR) & _HEADER_SUBTAG_MASK) { \ + case SUB_BINARY_SUBTAG: nelts++; break; \ + case MAP_SUBTAG: nelts+=map_get_size(PTR) + 1; break; \ + case FUN_SUBTAG: nelts+=((ErlFunThing*)(PTR))->num_free+1; break; \ + } \ + gval = make_boxed(HTOP); \ + *ORIG = gval; \ + *HTOP++ = HDR; \ + *PTR++ = gval; \ + while (nelts--) *HTOP++ = *PTR++; \ + \ } while(0) #define in_area(ptr,start,nbytes) \ diff --git a/erts/emulator/beam/erl_init.c b/erts/emulator/beam/erl_init.c index 8c4fffa75b..d54658f1ea 100644 --- a/erts/emulator/beam/erl_init.c +++ b/erts/emulator/beam/erl_init.c @@ -178,6 +178,11 @@ int erts_compat_rel; static int no_schedulers; static int no_schedulers_online; +#ifdef ERTS_DIRTY_SCHEDULERS +static int no_dirty_cpu_schedulers; +static int no_dirty_cpu_schedulers_online; +static int no_dirty_io_schedulers; +#endif #ifdef DEBUG Uint32 verbose; /* See erl_debug.h for information about verbose */ @@ -304,7 +309,13 @@ erl_init(int ncpu, erts_init_sys_common_misc(); erts_init_process(ncpu, proc_tab_sz, legacy_proc_tab); erts_init_scheduling(no_schedulers, - no_schedulers_online); + no_schedulers_online +#ifdef ERTS_DIRTY_SCHEDULERS + , no_dirty_cpu_schedulers, + no_dirty_cpu_schedulers_online, + no_dirty_io_schedulers +#endif + ); erts_init_cpu_topology(); /* Must be after init_scheduling */ erts_init_gc(); /* Must be after init_scheduling */ erts_alloc_late_init(); @@ -484,92 +495,107 @@ void erts_usage(void) /* erts_fprintf(stderr, "-# number set the number of items to be used in traces etc\n"); */ - erts_fprintf(stderr, "-a size suggested stack size in kilo words for threads\n"); - erts_fprintf(stderr, " in the async-thread pool, valid range is [%d-%d]\n", + erts_fprintf(stderr, "-a size suggested stack size in kilo words for threads\n"); + erts_fprintf(stderr, " in the async-thread pool, valid range is [%d-%d]\n", ERTS_ASYNC_THREAD_MIN_STACK_SIZE, ERTS_ASYNC_THREAD_MAX_STACK_SIZE); - erts_fprintf(stderr, "-A number set number of threads in async thread pool,\n"); - erts_fprintf(stderr, " valid range is [0-%d]\n", + erts_fprintf(stderr, "-A number set number of threads in async thread pool,\n"); + erts_fprintf(stderr, " valid range is [0-%d]\n", ERTS_MAX_NO_OF_ASYNC_THREADS); - erts_fprintf(stderr, "-B[c|d|i] c to have Ctrl-c interrupt the Erlang shell,\n"); - erts_fprintf(stderr, " d (or no extra option) to disable the break\n"); - erts_fprintf(stderr, " handler, i to ignore break signals\n"); + erts_fprintf(stderr, "-B[c|d|i] c to have Ctrl-c interrupt the Erlang shell,\n"); + erts_fprintf(stderr, " d (or no extra option) to disable the break\n"); + erts_fprintf(stderr, " handler, i to ignore break signals\n"); /* erts_fprintf(stderr, "-b func set the boot function (default boot)\n"); */ - erts_fprintf(stderr, "-c disable continuous date/time correction with\n"); - erts_fprintf(stderr, " respect to uptime\n"); + erts_fprintf(stderr, "-c disable continuous date/time correction with\n"); + erts_fprintf(stderr, " respect to uptime\n"); - erts_fprintf(stderr, "-d don't write a crash dump for internally detected errors\n"); - erts_fprintf(stderr, " (halt(String) will still produce a crash dump)\n"); - erts_fprintf(stderr, "-fn[u|a|l] Control how filenames are interpreted\n"); - erts_fprintf(stderr, "-hms size set minimum heap size in words (default %d)\n", + erts_fprintf(stderr, "-d don't write a crash dump for internally detected errors\n"); + erts_fprintf(stderr, " (halt(String) will still produce a crash dump)\n"); + erts_fprintf(stderr, "-fn[u|a|l] Control how filenames are interpreted\n"); + erts_fprintf(stderr, "-hms size set minimum heap size in words (default %d)\n", H_DEFAULT_SIZE); - erts_fprintf(stderr, "-hmbs size set minimum binary virtual heap size in words (default %d)\n", + erts_fprintf(stderr, "-hmbs size set minimum binary virtual heap size in words (default %d)\n", VH_DEFAULT_SIZE); /* erts_fprintf(stderr, "-i module set the boot module (default init)\n"); */ - erts_fprintf(stderr, "-K boolean enable or disable kernel poll\n"); - erts_fprintf(stderr, "-n[s|a|d] Control behavior of signals to ports\n"); - erts_fprintf(stderr, " Note that this flag is deprecated!\n"); - erts_fprintf(stderr, "-M<X> <Y> memory allocator switches,\n"); - erts_fprintf(stderr, " see the erts_alloc(3) documentation for more info.\n"); - erts_fprintf(stderr, "-pc <set> Control what characters are considered printable (default latin1)\n"); - erts_fprintf(stderr, "-P number set maximum number of processes on this node,\n"); - erts_fprintf(stderr, " valid range is [%d-%d]\n", + erts_fprintf(stderr, "-K boolean enable or disable kernel poll\n"); + erts_fprintf(stderr, "-n[s|a|d] Control behavior of signals to ports\n"); + erts_fprintf(stderr, " Note that this flag is deprecated!\n"); + erts_fprintf(stderr, "-M<X> <Y> memory allocator switches,\n"); + erts_fprintf(stderr, " see the erts_alloc(3) documentation for more info.\n"); + erts_fprintf(stderr, "-pc <set> Control what characters are considered printable (default latin1)\n"); + erts_fprintf(stderr, "-P number set maximum number of processes on this node,\n"); + erts_fprintf(stderr, " valid range is [%d-%d]\n", ERTS_MIN_PROCESSES, ERTS_MAX_PROCESSES); - erts_fprintf(stderr, "-Q number set maximum number of ports on this node,\n"); - erts_fprintf(stderr, " valid range is [%d-%d]\n", + erts_fprintf(stderr, "-Q number set maximum number of ports on this node,\n"); + erts_fprintf(stderr, " valid range is [%d-%d]\n", ERTS_MIN_PORTS, ERTS_MAX_PORTS); - erts_fprintf(stderr, "-R number set compatibility release number,\n"); - erts_fprintf(stderr, " valid range [%d-%d]\n", + erts_fprintf(stderr, "-R number set compatibility release number,\n"); + erts_fprintf(stderr, " valid range [%d-%d]\n", this_rel-2, this_rel); - erts_fprintf(stderr, "-r force ets memory block to be moved on realloc\n"); - erts_fprintf(stderr, "-rg amount set reader groups limit\n"); - erts_fprintf(stderr, "-sbt type set scheduler bind type, valid types are:\n"); - erts_fprintf(stderr, "-stbt type u|ns|ts|ps|s|nnts|nnps|tnnps|db\n"); - erts_fprintf(stderr, "-sbwt val set scheduler busy wait threshold, valid values are:\n"); - erts_fprintf(stderr, " none|very_short|short|medium|long|very_long.\n"); - erts_fprintf(stderr, "-scl bool enable/disable compaction of scheduler load,\n"); - erts_fprintf(stderr, " see the erl(1) documentation for more info.\n"); - erts_fprintf(stderr, "-sct cput set cpu topology,\n"); - erts_fprintf(stderr, " see the erl(1) documentation for more info.\n"); - erts_fprintf(stderr, "-sws val set scheduler wakeup strategy, valid values are:\n"); - erts_fprintf(stderr, " default|legacy.\n"); - erts_fprintf(stderr, "-swct val set scheduler wake cleanup threshold, valid values are:\n"); - erts_fprintf(stderr, " very_lazy|lazy|medium|eager|very_eager.\n"); - erts_fprintf(stderr, "-swt val set scheduler wakeup threshold, valid values are:\n"); - erts_fprintf(stderr, " very_low|low|medium|high|very_high.\n"); - erts_fprintf(stderr, "-sss size suggested stack size in kilo words for scheduler threads,\n"); - erts_fprintf(stderr, " valid range is [%d-%d]\n", + erts_fprintf(stderr, "-r force ets memory block to be moved on realloc\n"); + erts_fprintf(stderr, "-rg amount set reader groups limit\n"); + erts_fprintf(stderr, "-sbt type set scheduler bind type, valid types are:\n"); + erts_fprintf(stderr, "-stbt type u|ns|ts|ps|s|nnts|nnps|tnnps|db\n"); + erts_fprintf(stderr, "-sbwt val set scheduler busy wait threshold, valid values are:\n"); + erts_fprintf(stderr, " none|very_short|short|medium|long|very_long.\n"); + erts_fprintf(stderr, "-scl bool enable/disable compaction of scheduler load,\n"); + erts_fprintf(stderr, " see the erl(1) documentation for more info.\n"); + erts_fprintf(stderr, "-sct cput set cpu topology,\n"); + erts_fprintf(stderr, " see the erl(1) documentation for more info.\n"); +#if ERTS_HAVE_SCHED_UTIL_BALANCING_SUPPORT_OPT + erts_fprintf(stderr, "-sub bool enable/disable scheduler utilization balancing,\n"); +#else + erts_fprintf(stderr, "-sub false disable scheduler utilization balancing,\n"); +#endif + erts_fprintf(stderr, " see the erl(1) documentation for more info.\n"); + erts_fprintf(stderr, "-sws val set scheduler wakeup strategy, valid values are:\n"); + erts_fprintf(stderr, " default|legacy.\n"); + erts_fprintf(stderr, "-swct val set scheduler wake cleanup threshold, valid values are:\n"); + erts_fprintf(stderr, " very_lazy|lazy|medium|eager|very_eager.\n"); + erts_fprintf(stderr, "-swt val set scheduler wakeup threshold, valid values are:\n"); + erts_fprintf(stderr, " very_low|low|medium|high|very_high.\n"); + erts_fprintf(stderr, "-sss size suggested stack size in kilo words for scheduler threads,\n"); + erts_fprintf(stderr, " valid range is [%d-%d]\n", ERTS_SCHED_THREAD_MIN_STACK_SIZE, ERTS_SCHED_THREAD_MAX_STACK_SIZE); - erts_fprintf(stderr, "-spp Bool set port parallelism scheduling hint\n"); - erts_fprintf(stderr, "-S n1:n2 set number of schedulers (n1), and number of\n"); - erts_fprintf(stderr, " schedulers online (n2), maximum for both\n"); - erts_fprintf(stderr, " numbers is %d\n", + erts_fprintf(stderr, "-spp Bool set port parallelism scheduling hint\n"); + erts_fprintf(stderr, "-S n1:n2 set number of schedulers (n1), and number of\n"); + erts_fprintf(stderr, " schedulers online (n2), maximum for both\n"); + erts_fprintf(stderr, " numbers is %d\n", ERTS_MAX_NO_OF_SCHEDULERS); - erts_fprintf(stderr, "-SP p1:p2 specify schedulers (p1) and schedulers online (p2)\n"); - erts_fprintf(stderr, " as percentages of logical processors configured and logical\n"); - erts_fprintf(stderr, " processors available, respectively\n"); - erts_fprintf(stderr, "-t size set the maximum number of atoms the " - "emulator can handle\n"); - erts_fprintf(stderr, " valid range is [%d-%d]\n", + erts_fprintf(stderr, "-SP p1:p2 specify schedulers (p1) and schedulers online (p2)\n"); + erts_fprintf(stderr, " as percentages of logical processors configured and logical\n"); + erts_fprintf(stderr, " processors available, respectively\n"); +#ifdef ERTS_DIRTY_SCHEDULERS + erts_fprintf(stderr, "-SDcpu n1:n2 set number of dirty CPU schedulers (n1), and number of\n"); + erts_fprintf(stderr, " dirty CPU schedulers online (n2), valid range for both\n"); + erts_fprintf(stderr, " numbers is [1-%d], and n2 must be less than or equal to n1\n", + ERTS_MAX_NO_OF_DIRTY_CPU_SCHEDULERS); + erts_fprintf(stderr, "-SDPcpu p1:p2 specify dirty CPU schedulers (p1) and dirty CPU schedulers\n"); + erts_fprintf(stderr, " online (p2) as percentages of logical processors configured\n"); + erts_fprintf(stderr, " and logical processors available, respectively\n"); + erts_fprintf(stderr, "-SDio n set number of dirty I/O schedulers, valid range is [0-%d]\n", + ERTS_MAX_NO_OF_DIRTY_IO_SCHEDULERS); +#endif + erts_fprintf(stderr, "-t size set the maximum number of atoms the emulator can handle\n"); + erts_fprintf(stderr, " valid range is [%d-%d]\n", MIN_ATOM_TABLE_SIZE, MAX_ATOM_TABLE_SIZE); - erts_fprintf(stderr, "-T number set modified timing level,\n"); - erts_fprintf(stderr, " valid range is [0-%d]\n", + erts_fprintf(stderr, "-T number set modified timing level, valid range is [0-%d]\n", ERTS_MODIFIED_TIMING_LEVELS-1); - erts_fprintf(stderr, "-V print Erlang version\n"); + erts_fprintf(stderr, "-V print Erlang version\n"); - erts_fprintf(stderr, "-v turn on chatty mode (GCs will be reported etc)\n"); + erts_fprintf(stderr, "-v turn on chatty mode (GCs will be reported etc)\n"); - erts_fprintf(stderr, "-W<i|w> set error logger warnings mapping,\n"); - erts_fprintf(stderr, " see error_logger documentation for details\n"); - erts_fprintf(stderr, "-zdbbl size set the distribution buffer busy limit in kilobytes\n"); - erts_fprintf(stderr, " valid range is [1-%d]\n", INT_MAX/1024); + erts_fprintf(stderr, "-W<i|w> set error logger warnings mapping,\n"); + erts_fprintf(stderr, " see error_logger documentation for details\n"); + erts_fprintf(stderr, "-zdbbl size set the distribution buffer busy limit in kilobytes\n"); + erts_fprintf(stderr, " valid range is [1-%d]\n", INT_MAX/1024); erts_fprintf(stderr, "\n"); erts_fprintf(stderr, "Note that if the emulator is started with erlexec (typically\n"); erts_fprintf(stderr, "from the erl script), these flags should be specified with +.\n"); @@ -637,6 +663,13 @@ early_init(int *argc, char **argv) /* int schdlrs_percentage = 100; int schdlrs_onln_percentage = 100; int max_main_threads; +#ifdef ERTS_DIRTY_SCHEDULERS + int dirty_cpu_scheds; + int dirty_cpu_scheds_online; + int dirty_cpu_scheds_pctg = 100; + int dirty_cpu_scheds_onln_pctg = 100; + int dirty_io_scheds; +#endif int max_reader_groups; int reader_groups; char envbuf[21]; /* enough for any 64-bit integer */ @@ -687,7 +720,7 @@ early_init(int *argc, char **argv) /* #endif #ifdef ERTS_SMP erts_smp_atomic32_init_nob(&erts_writing_erl_crash_dump, 0L); - erts_tsd_key_create(&erts_is_crash_dumping_key); + erts_tsd_key_create(&erts_is_crash_dumping_key,"erts_is_crash_dumping_key"); #else erts_writing_erl_crash_dump = 0; #endif @@ -712,6 +745,12 @@ early_init(int *argc, char **argv) /* schdlrs = no_schedulers; schdlrs_onln = no_schedulers_online; +#ifdef ERTS_DIRTY_SCHEDULERS + dirty_cpu_scheds = no_schedulers; + dirty_cpu_scheds_online = no_schedulers_online; + dirty_io_scheds = 10; +#endif + envbufsz = sizeof(envbuf); /* erts_sys_getenv(_raw)() not initialized yet; need erts_sys_getenv__() */ @@ -752,7 +791,7 @@ early_init(int *argc, char **argv) /* case 'A': { /* set number of threads in thread pool */ char *arg = get_arg(argv[i]+2, argv[i+1], &i); - if (((erts_async_max_threads = atoi(arg)) < 0) || + if (((erts_async_max_threads = atoi(arg)) < ERTS_MIN_NO_OF_ASYNC_THREADS) || (erts_async_max_threads > ERTS_MAX_NO_OF_ASYNC_THREADS)) { erts_fprintf(stderr, "bad number of async threads %s\n", @@ -802,7 +841,121 @@ early_init(int *argc, char **argv) /* VERBOSE(DEBUG_SYSTEM, ("using %d:%d scheduler percentages\n", schdlrs_percentage, schdlrs_onln_percentage)); - } else { + } +#ifdef ERTS_DIRTY_SCHEDULERS + else if (argv[i][2] == 'D') { + char *arg; + char *type = argv[i]+3; + if (strncmp(type, "Pcpu", 4) == 0) { + int ptot, ponln; + arg = get_arg(argv[i]+7, argv[i+1], &i); + switch (sscanf(arg, "%d:%d", &ptot, &ponln)) { + case 0: + switch (sscanf(arg, ":%d", &ponln)) { + case 1: + if (ponln < 0) + goto bad_SDPcpu; + ptot = 100; + goto chk_SDPcpu; + default: + goto bad_SDPcpu; + } + case 1: + if (ptot < 0) + goto bad_SDPcpu; + ponln = ptot < 100 ? ptot : 100; + goto chk_SDPcpu; + case 2: + if (ptot < 0 || ponln < 0) + goto bad_SDPcpu; + chk_SDPcpu: + dirty_cpu_scheds_pctg = ptot; + dirty_cpu_scheds_onln_pctg = ponln; + break; + default: + bad_SDPcpu: + erts_fprintf(stderr, + "bad dirty CPU schedulers percentage specifier %s\n", + arg); + erts_usage(); + break; + } + VERBOSE(DEBUG_SYSTEM, + ("using %d:%d dirty CPU scheduler percentages\n", + dirty_cpu_scheds_pctg, dirty_cpu_scheds_onln_pctg)); + } else if (strncmp(type, "cpu", 3) == 0) { + int tot, onln; + arg = get_arg(argv[i]+6, argv[i+1], &i); + switch (sscanf(arg, "%d:%d", &tot, &onln)) { + case 0: + switch (sscanf(arg, ":%d", &onln)) { + case 1: + tot = no_schedulers; + goto chk_SDcpu; + default: + goto bad_SDcpu; + } + case 1: + onln = tot < dirty_cpu_scheds_online ? + tot : dirty_cpu_scheds_online; + case 2: + chk_SDcpu: + if (tot > 0) + dirty_cpu_scheds = tot; + else + dirty_cpu_scheds = no_schedulers + tot; + if (onln > 0) + dirty_cpu_scheds_online = onln; + else + dirty_cpu_scheds_online = no_schedulers_online + onln; + if (dirty_cpu_scheds < 1 || + ERTS_MAX_NO_OF_DIRTY_CPU_SCHEDULERS < dirty_cpu_scheds) { + erts_fprintf(stderr, + "bad amount of dirty CPU schedulers %d\n", + tot); + erts_usage(); + } + if (dirty_cpu_scheds_online < 1 || + dirty_cpu_scheds < dirty_cpu_scheds_online) { + erts_fprintf(stderr, + "bad amount of dirty CPU schedulers online %d " + "(total amount of dirty CPU schedulers %d)\n", + dirty_cpu_scheds_online, dirty_cpu_scheds); + erts_usage(); + } + break; + default: + bad_SDcpu: + erts_fprintf(stderr, + "bad amount of dirty CPU schedulers %s\n", + arg); + erts_usage(); + break; + } + VERBOSE(DEBUG_SYSTEM, + ("using %d:%d dirty CPU scheduler(s)\n", tot, onln)); + } else if (strncmp(type, "io", 2) == 0) { + arg = get_arg(argv[i]+5, argv[i+1], &i); + dirty_io_scheds = atoi(arg); + if (dirty_io_scheds < 0 || + dirty_io_scheds > ERTS_MAX_NO_OF_DIRTY_IO_SCHEDULERS) { + erts_fprintf(stderr, + "bad number of dirty I/O schedulers %s\n", + arg); + erts_usage(); + } + VERBOSE(DEBUG_SYSTEM, + ("using %d dirty I/O scheduler(s)\n", dirty_io_scheds)); + } else { + erts_fprintf(stderr, + "bad or missing dirty scheduler specifier: %s\n", + argv[i]); + erts_usage(); + break; + } + } +#endif + else { int tot, onln; char *arg = get_arg(argv[i]+2, argv[i+1], &i); switch (sscanf(arg, "%d:%d", &tot, &onln)) { @@ -889,6 +1042,17 @@ early_init(int *argc, char **argv) /* (void)schdlrs_percentage; (void)schdlrs_onln_percentage; #endif +#ifdef ERTS_DIRTY_SCHEDULERS + /* apply any dirty scheduler precentages */ + if (dirty_cpu_scheds_pctg != 100 || dirty_cpu_scheds_onln_pctg != 100) { + dirty_cpu_scheds = dirty_cpu_scheds * dirty_cpu_scheds_pctg / 100; + dirty_cpu_scheds_online = dirty_cpu_scheds_online * dirty_cpu_scheds_onln_pctg / 100; + } + if (dirty_cpu_scheds > schdlrs) + dirty_cpu_scheds = schdlrs; + if (dirty_cpu_scheds_online > schdlrs_onln) + dirty_cpu_scheds_online = schdlrs_onln; +#endif } #ifndef USE_THREADS @@ -901,6 +1065,11 @@ early_init(int *argc, char **argv) /* erts_no_schedulers = (Uint) no_schedulers; #endif +#ifdef ERTS_DIRTY_SCHEDULERS + erts_no_dirty_cpu_schedulers = no_dirty_cpu_schedulers = dirty_cpu_scheds; + no_dirty_cpu_schedulers_online = dirty_cpu_scheds_online; + erts_no_dirty_io_schedulers = no_dirty_io_schedulers = dirty_io_scheds; +#endif erts_early_init_scheduling(no_schedulers); alloc_opts.ncpu = ncpu; @@ -918,10 +1087,18 @@ early_init(int *argc, char **argv) /* * * * Unmanaged threads that need to register: * ** Async threads (see erl_async.c) + * ** Dirty scheduler threads */ erts_thr_progress_init(no_schedulers, no_schedulers+2, - erts_async_max_threads); +#ifndef ERTS_DIRTY_SCHEDULERS + erts_async_max_threads +#else + erts_async_max_threads + + erts_no_dirty_cpu_schedulers + + erts_no_dirty_io_schedulers +#endif + ); #endif erts_thr_q_init(); erts_init_utils(); @@ -1386,7 +1563,15 @@ erl_start(int argc, char **argv) break; case 'S' : /* Was handled in early_init() just read past it */ - if (argv[i][2] == 'P') + if (argv[i][2] == 'D') { + char* type = argv[i]+3; + if (strcmp(type, "Pcpu") == 0) + (void) get_arg(argv[i]+7, argv[i+1], &i); + if (strcmp(type, "cpu") == 0) + (void) get_arg(argv[i]+6, argv[i+1], &i); + else if (strcmp(type, "io") == 0) + (void) get_arg(argv[i]+5, argv[i+1], &i); + } else if (argv[i][2] == 'P') (void) get_arg(argv[i]+3, argv[i+1], &i); else (void) get_arg(argv[i]+2, argv[i+1], &i); @@ -1433,8 +1618,10 @@ erl_start(int argc, char **argv) } else if (has_prefix("cl", sub_param)) { arg = get_arg(sub_param+2, argv[i+1], &i); - if (sys_strcmp("true", arg) == 0) + if (sys_strcmp("true", arg) == 0) { erts_sched_compact_load = 1; + erts_sched_balance_util = 0; + } else if (sys_strcmp("false", arg) == 0) erts_sched_compact_load = 0; else { @@ -1512,6 +1699,26 @@ erl_start(int argc, char **argv) erts_usage(); } } + else if (has_prefix("ub", sub_param)) { + arg = get_arg(sub_param+2, argv[i+1], &i); + if (sys_strcmp("true", arg) == 0) { +#if ERTS_HAVE_SCHED_UTIL_BALANCING_SUPPORT_OPT + erts_sched_balance_util = 1; +#else + erts_fprintf(stderr, + "scheduler utilization balancing not " + "supported on this system\n"); + erts_usage(); +#endif + } + else if (sys_strcmp("false", arg) == 0) + erts_sched_balance_util = 0; + else { + erts_fprintf(stderr, "bad scheduler utilization balancing " + " value '%s'\n", arg); + erts_usage(); + } + } else if (has_prefix("wct", sub_param)) { arg = get_arg(sub_param+3, argv[i+1], &i); if (erts_sched_set_wake_cleanup_threshold(arg) != 0) { diff --git a/erts/emulator/beam/erl_lock_check.c b/erts/emulator/beam/erl_lock_check.c index 0dd83fa6ed..7e3a90779d 100644 --- a/erts/emulator/beam/erl_lock_check.c +++ b/erts/emulator/beam/erl_lock_check.c @@ -123,6 +123,9 @@ static erts_lc_lock_order_t erts_lock_order[] = { { "schdlr_sspnd", NULL }, { "migration_info_update", NULL }, { "run_queue", "address" }, +#ifdef ERTS_DIRTY_SCHEDULERS + { "dirty_run_queue_sleep_list", "address" }, +#endif { "process_table", NULL }, { "cpu_info", NULL }, { "pollset", "address" }, @@ -238,6 +241,8 @@ struct erts_lc_locked_lock_t_ { erts_lc_locked_lock_t *prev; UWord extra; Sint16 id; + char *file; + unsigned int line; Uint16 flags; }; @@ -427,47 +432,51 @@ make_my_locked_locks(void) } static ERTS_INLINE erts_lc_locked_lock_t * -new_locked_lock(erts_lc_lock_t *lck, Uint16 op_flags) +new_locked_lock(erts_lc_lock_t *lck, Uint16 op_flags, + char *file, unsigned int line) { erts_lc_locked_lock_t *l_lck = (erts_lc_locked_lock_t *) lc_alloc(); l_lck->next = NULL; l_lck->prev = NULL; l_lck->id = lck->id; l_lck->extra = lck->extra; + l_lck->file = file; + l_lck->line = line; l_lck->flags = lck->flags | op_flags; return l_lck; } static void -print_lock2(char *prefix, Sint16 id, Wterm extra, Uint16 flags, char *suffix) +raw_print_lock(char *prefix, Sint16 id, Wterm extra, Uint16 flags, + char* file, unsigned int line, char *suffix) { char *lname = (0 <= id && id < ERTS_LOCK_ORDER_SIZE ? erts_lock_order[id].name : "unknown"); + erts_fprintf(stderr,"%s'%s:",prefix,lname); + if (is_not_immed(extra)) - erts_fprintf(stderr, - "%s'%s:%p%s'%s%s", - prefix, - lname, - _unchecked_boxed_val(extra), - lock_type(flags), - rw_op_str(flags), - suffix); + erts_fprintf(stderr,"%p",_unchecked_boxed_val(extra)); else - erts_fprintf(stderr, - "%s'%s:%T%s'%s%s", - prefix, - lname, - extra, - lock_type(flags), - rw_op_str(flags), - suffix); + erts_fprintf(stderr,"%T",extra); + erts_fprintf(stderr,"%s",lock_type(flags)); + + if (file) + erts_fprintf(stderr,"(%s:%d)",file,line); + + erts_fprintf(stderr,"'%s%s",rw_op_str(flags),suffix); +} + +static void +print_lock2(char *prefix, Sint16 id, Wterm extra, Uint16 flags, char *suffix) +{ + raw_print_lock(prefix, id, extra, flags, NULL, 0, suffix); } static void print_lock(char *prefix, erts_lc_lock_t *lck, char *suffix) { - print_lock2(prefix, lck->id, lck->extra, lck->flags, suffix); + raw_print_lock(prefix, lck->id, lck->extra, lck->flags, NULL, 0, suffix); } static void @@ -483,7 +492,8 @@ print_curr_locks(erts_lc_locked_locks_t *l_lcks) "Currently these locks are locked by the %s thread:\n", l_lcks->thread_name); for (l_lck = l_lcks->locked.first; l_lck; l_lck = l_lck->next) - print_lock2(" ", l_lck->id, l_lck->extra, l_lck->flags, "\n"); + raw_print_lock(" ", l_lck->id, l_lck->extra, l_lck->flags, + l_lck->file, l_lck->line, "\n"); } } @@ -999,7 +1009,8 @@ erts_lc_trylock_force_busy_flg(erts_lc_lock_t *lck, Uint16 op_flags) #endif } -void erts_lc_trylock_flg(int locked, erts_lc_lock_t *lck, Uint16 op_flags) +void erts_lc_trylock_flg_x(int locked, erts_lc_lock_t *lck, Uint16 op_flags, + char *file, unsigned int line) { erts_lc_locked_locks_t *l_lcks; erts_lc_locked_lock_t *l_lck; @@ -1011,7 +1022,7 @@ void erts_lc_trylock_flg(int locked, erts_lc_lock_t *lck, Uint16 op_flags) return; l_lcks = make_my_locked_locks(); - l_lck = locked ? new_locked_lock(lck, op_flags) : NULL; + l_lck = locked ? new_locked_lock(lck, op_flags, file, line) : NULL; if (!l_lcks->locked.last) { ASSERT(!l_lcks->locked.first); @@ -1052,13 +1063,14 @@ void erts_lc_trylock_flg(int locked, erts_lc_lock_t *lck, Uint16 op_flags) } -void erts_lc_require_lock_flg(erts_lc_lock_t *lck, Uint16 op_flags) +void erts_lc_require_lock_flg(erts_lc_lock_t *lck, Uint16 op_flags, + char *file, unsigned int line) { erts_lc_locked_locks_t *l_lcks = make_my_locked_locks(); erts_lc_locked_lock_t *l_lck = l_lcks->locked.first; if (!find_lock(&l_lck, lck)) required_not_locked(l_lcks, lck); - l_lck = new_locked_lock(lck, op_flags); + l_lck = new_locked_lock(lck, op_flags, file, line); if (!l_lcks->required.last) { ASSERT(!l_lcks->required.first); l_lck->next = l_lck->prev = NULL; @@ -1126,7 +1138,8 @@ void erts_lc_unrequire_lock_flg(erts_lc_lock_t *lck, Uint16 op_flags) lc_free((void *) l_lck); } -void erts_lc_lock_flg(erts_lc_lock_t *lck, Uint16 op_flags) +void erts_lc_lock_flg_x(erts_lc_lock_t *lck, Uint16 op_flags, + char *file, unsigned int line) { erts_lc_locked_locks_t *l_lcks; erts_lc_locked_lock_t *l_lck; @@ -1138,7 +1151,7 @@ void erts_lc_lock_flg(erts_lc_lock_t *lck, Uint16 op_flags) return; l_lcks = make_my_locked_locks(); - l_lck = new_locked_lock(lck, op_flags); + l_lck = new_locked_lock(lck, op_flags, file, line); if (!l_lcks->locked.last) { ASSERT(!l_lcks->locked.first); @@ -1229,15 +1242,15 @@ erts_lc_trylock_force_busy(erts_lc_lock_t *lck) } void -erts_lc_trylock(int locked, erts_lc_lock_t *lck) +erts_lc_trylock_x(int locked, erts_lc_lock_t *lck, char *file, unsigned int line) { - erts_lc_trylock_flg(locked, lck, 0); + erts_lc_trylock_flg_x(locked, lck, 0, file, line); } void -erts_lc_lock(erts_lc_lock_t *lck) +erts_lc_lock_x(erts_lc_lock_t *lck, char *file, unsigned int line) { - erts_lc_lock_flg(lck, 0); + erts_lc_lock_flg_x(lck, 0, file, line); } void @@ -1251,9 +1264,9 @@ void erts_lc_might_unlock(erts_lc_lock_t *lck) erts_lc_might_unlock_flg(lck, 0); } -void erts_lc_require_lock(erts_lc_lock_t *lck) +void erts_lc_require_lock(erts_lc_lock_t *lck, char *file, unsigned int line) { - erts_lc_require_lock_flg(lck, 0); + erts_lc_require_lock_flg(lck, 0, file, line); } void erts_lc_unrequire_lock(erts_lc_lock_t *lck) @@ -1319,7 +1332,7 @@ erts_lc_init(void) if (ethr_spinlock_init(&free_blocks_lock) != 0) lc_abort(); - erts_tsd_key_create(&locks_key); + erts_tsd_key_create(&locks_key,"erts_lock_check_key"); } void diff --git a/erts/emulator/beam/erl_lock_check.h b/erts/emulator/beam/erl_lock_check.h index 068340abe7..3f7f417e61 100644 --- a/erts/emulator/beam/erl_lock_check.h +++ b/erts/emulator/beam/erl_lock_check.h @@ -35,6 +35,11 @@ #ifdef ERTS_ENABLE_LOCK_CHECK +#ifndef ERTS_ENABLE_LOCK_POSITION +/* Enable in order for _x variants of mtx functions to be used. */ +#define ERTS_ENABLE_LOCK_POSITION 1 +#endif + typedef struct { int inited; Sint16 id; @@ -79,13 +84,16 @@ void erts_lc_have_locks(int *resv, erts_lc_lock_t *lcks, int len); void erts_lc_have_lock_ids(int *resv, int *ids, int len); void erts_lc_check_no_locked_of_type(Uint16 flags); int erts_lc_trylock_force_busy_flg(erts_lc_lock_t *lck, Uint16 op_flags); -void erts_lc_trylock_flg(int locked, erts_lc_lock_t *lck, Uint16 op_flags); -void erts_lc_lock_flg(erts_lc_lock_t *lck, Uint16 op_flags); +void erts_lc_trylock_flg_x(int locked, erts_lc_lock_t *lck, Uint16 op_flags, + char *file, unsigned int line); +void erts_lc_lock_flg_x(erts_lc_lock_t *lck, Uint16 op_flags, + char *file, unsigned int line); void erts_lc_unlock_flg(erts_lc_lock_t *lck, Uint16 op_flags); void erts_lc_might_unlock_flg(erts_lc_lock_t *lck, Uint16 op_flags); int erts_lc_trylock_force_busy(erts_lc_lock_t *lck); -void erts_lc_trylock(int locked, erts_lc_lock_t *lck); -void erts_lc_lock(erts_lc_lock_t *lck); +void erts_lc_trylock_x(int locked, erts_lc_lock_t *lck, + char* file, unsigned int line); +void erts_lc_lock_x(erts_lc_lock_t *lck, char* file, unsigned int line); void erts_lc_unlock(erts_lc_lock_t *lck); void erts_lc_might_unlock(erts_lc_lock_t *lck); void erts_lc_init_lock(erts_lc_lock_t *lck, char *name, Uint16 flags); @@ -96,10 +104,11 @@ int erts_lc_assert_failed(char *file, int line, char *assertion); void erts_lc_set_thread_name(char *thread_name); void erts_lc_pll(void); -void erts_lc_require_lock_flg(erts_lc_lock_t *lck, Uint16 op_flags); +void erts_lc_require_lock_flg(erts_lc_lock_t *lck, Uint16 op_flags, + char *file, unsigned int line); void erts_lc_unrequire_lock_flg(erts_lc_lock_t *lck, Uint16 op_flags); -void erts_lc_require_lock(erts_lc_lock_t *lck); +void erts_lc_require_lock(erts_lc_lock_t *lck, char *file, unsigned int line); void erts_lc_unrequire_lock(erts_lc_lock_t *lck); int erts_lc_is_emu_thr(void); @@ -116,4 +125,9 @@ int erts_lc_is_emu_thr(void); #define ERTS_LC_ASSERT(A) ((void) 1) #endif /* #ifdef ERTS_ENABLE_LOCK_CHECK */ +#define erts_lc_lock(lck) erts_lc_lock_x(lck,__FILE__,__LINE__) +#define erts_lc_trylock(res,lck) erts_lc_trylock_x(res,lck,__FILE__,__LINE__) +#define erts_lc_lock_flg(lck) erts_lc_lock_flg_x(lck,__FILE__,__LINE__) +#define erts_lc_trylock_flg(res,lck) erts_lc_trylock_flg_x(res,lck,__FILE__,__LINE__) + #endif /* #ifndef ERTS_LOCK_CHECK_H__ */ diff --git a/erts/emulator/beam/erl_lock_count.c b/erts/emulator/beam/erl_lock_count.c index 5f75b0ac0b..6f44bf097b 100644 --- a/erts/emulator/beam/erl_lock_count.c +++ b/erts/emulator/beam/erl_lock_count.c @@ -236,7 +236,7 @@ void erts_lcnt_init() { /* init tsd */ lcnt_n_thr = 0; - ethr_tsd_key_create(&lcnt_thr_data_key); + ethr_tsd_key_create(&lcnt_thr_data_key,"lcnt_data"); lcnt_lock(); diff --git a/erts/emulator/beam/erl_lock_count.h b/erts/emulator/beam/erl_lock_count.h index a4fc91b510..75f7cd028b 100644 --- a/erts/emulator/beam/erl_lock_count.h +++ b/erts/emulator/beam/erl_lock_count.h @@ -61,8 +61,14 @@ #define ERTS_LOCK_COUNT_H__ #ifdef ERTS_ENABLE_LOCK_COUNT +#ifndef ERTS_ENABLE_LOCK_POSITION +/* Enable in order for _x variants of mtx functions to be used. */ +#define ERTS_ENABLE_LOCK_POSITION 1 +#endif + #include "ethread.h" + #define ERTS_LCNT_MAX_LOCK_LOCATIONS (10) #define ERTS_LCNT_LT_SPINLOCK (((Uint16) 1) << 0) diff --git a/erts/emulator/beam/erl_map.c b/erts/emulator/beam/erl_map.c new file mode 100644 index 0000000000..2fff7f9390 --- /dev/null +++ b/erts/emulator/beam/erl_map.c @@ -0,0 +1,819 @@ +/* + * %CopyrightBegin% + * + * Copyright Ericsson AB 2014. All Rights Reserved. + * + * The contents of this file are subject to the Erlang Public License, + * Version 1.1, (the "License"); you may not use this file except in + * compliance with the License. You should have received a copy of the + * Erlang Public License along with this software. If not, it can be + * retrieved online at http://www.erlang.org/. + * + * Software distributed under the License is distributed on an "AS IS" + * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See + * the License for the specific language governing rights and limitations + * under the License. + * + * %CopyrightEnd% + * + * Author: Björn-Egil Dahlberg + */ + +#ifdef HAVE_CONFIG_H +# include "config.h" +#endif + +#include "sys.h" +#include "erl_vm.h" +#include "global.h" +#include "erl_process.h" +#include "error.h" +#include "bif.h" + +#include "erl_map.h" + +/* BIFs + * + * DONE: + * - erlang:is_map/1 + * - erlang:map_size/1 + * + * - maps:find/2 + * - maps:from_list/1 + * - maps:get/2 + * - maps:is_key/2 + * - maps:keys/1 + * - maps:merge/2 + * - maps:new/0 + * - maps:put/3 + * - maps:remove/2 + * - maps:to_list/1 + * - maps:update/3 + * - maps:values/1 + * + * TODO: + * - maps:foldl/3 + * - maps:foldr/3 + * - maps:map/3 + * - maps:size/1 + * - maps:without/2 + * + */ + +/* erlang:map_size/1 + * the corresponding instruction is implemented in: + * beam/erl_bif_guard.c + */ + +BIF_RETTYPE map_size_1(BIF_ALIST_1) { + if (is_map(BIF_ARG_1)) { + Eterm *hp; + Uint hsz = 0; + map_t *mp = (map_t*)map_val(BIF_ARG_1); + Uint n = map_get_size(mp); + + erts_bld_uint(NULL, &hsz, n); + hp = HAlloc(BIF_P, hsz); + BIF_RET(erts_bld_uint(&hp, NULL, n)); + } + + BIF_ERROR(BIF_P, BADARG); +} + +/* maps:to_list/1 + */ + +BIF_RETTYPE maps_to_list_1(BIF_ALIST_1) { + if (is_map(BIF_ARG_1)) { + Uint n; + Eterm* hp; + Eterm *ks,*vs, res, tup; + map_t *mp = (map_t*)map_val(BIF_ARG_1); + + ks = map_get_keys(mp); + vs = map_get_values(mp); + n = map_get_size(mp); + hp = HAlloc(BIF_P, (2 + 3) * n); + res = NIL; + + while(n--) { + tup = TUPLE2(hp, ks[n], vs[n]); hp += 3; + res = CONS(hp, tup, res); hp += 2; + } + + BIF_RET(res); + } + + BIF_ERROR(BIF_P, BADARG); +} + +/* maps:find/2 + * return value if key *matches* a key in the map + */ + +int erts_maps_find(Eterm key, Eterm map, Eterm *value) { + + Eterm *ks,*vs; + map_t *mp; + Uint n,i; + + mp = (map_t*)map_val(map); + n = map_get_size(mp); + ks = map_get_keys(mp); + vs = map_get_values(mp); + + for( i = 0; i < n; i++) { + if (EQ(ks[i], key)) { + *value = vs[i]; + return 1; + } + } + return 0; +} + +BIF_RETTYPE maps_find_2(BIF_ALIST_2) { + if (is_map(BIF_ARG_2)) { + Eterm *hp, value,res; + + if (erts_maps_find(BIF_ARG_1, BIF_ARG_2, &value)) { + hp = HAlloc(BIF_P, 3); + res = make_tuple(hp); + *hp++ = make_arityval(2); + *hp++ = am_ok; + *hp++ = value; + BIF_RET(res); + } + + BIF_RET(am_error); + } + BIF_ERROR(BIF_P, BADARG); +} +/* maps:get/2 + * return value if key *matches* a key in the map + * exception bad_key if none matches + */ + + +int erts_maps_get(Eterm key, Eterm map, Eterm *value) { + Eterm *ks,*vs; + map_t *mp; + Uint n,i; + + mp = (map_t*)map_val(map); + n = map_get_size(mp); + + if (n == 0) + return 0; + + ks = map_get_keys(mp); + vs = map_get_values(mp); + + if (is_immed(key)) { + for( i = 0; i < n; i++) { + if (ks[i] == key) { + *value = vs[i]; + return 1; + } + } + } + + for( i = 0; i < n; i++) { + if (EQ(ks[i], key)) { + *value = vs[i]; + return 1; + } + } + return 0; +} + +BIF_RETTYPE maps_get_2(BIF_ALIST_2) { + if (is_map(BIF_ARG_2)) { + Eterm *hp; + Eterm value, error; + char *s_error; + + if (erts_maps_get(BIF_ARG_1, BIF_ARG_2, &value)) { + BIF_RET(value); + } + + s_error = "bad_key"; + error = am_atom_put(s_error, sys_strlen(s_error)); + + hp = HAlloc(BIF_P, 3); + BIF_P->fvalue = TUPLE2(hp, error, BIF_ARG_1); + BIF_ERROR(BIF_P, EXC_ERROR_2); + } + BIF_ERROR(BIF_P, BADARG); +} + +/* maps:from_list/1 + * List may be unsorted [{K,V}] + */ + +BIF_RETTYPE maps_from_list_1(BIF_ALIST_1) { + Eterm *kv, item = BIF_ARG_1; + Eterm *hp, *thp,*vs, *ks, keys, res; + map_t *mp; + Uint size = 0, unused_size = 0; + Sint c = 0; + Sint idx = 0; + + if (is_list(item) || is_nil(item)) { + + /* Calculate size and check validity */ + + while(is_list(item)) { + res = CAR(list_val(item)); + if (is_not_tuple(res)) + goto error; + + kv = tuple_val(res); + if (*kv != make_arityval(2)) + goto error; + + size++; + item = CDR(list_val(item)); + } + + if (is_not_nil(item)) + goto error; + + hp = HAlloc(BIF_P, 3 + 1 + (2 * size)); + thp = hp; + keys = make_tuple(hp); + *hp++ = make_arityval(size); + ks = hp; + hp += size; + mp = (map_t*)hp; + res = make_map(mp); + hp += MAP_HEADER_SIZE; + vs = hp; + + mp->thing_word = MAP_HEADER; + mp->size = size; /* set later, might shrink*/ + mp->keys = keys; + + if (size == 0) + BIF_RET(res); + + item = BIF_ARG_1; + + /* first entry */ + kv = tuple_val(CAR(list_val(item))); + ks[0] = kv[1]; + vs[0] = kv[2]; + size = 1; + item = CDR(list_val(item)); + + /* insert sort key/value pairs */ + while(is_list(item)) { + + kv = tuple_val(CAR(list_val(item))); + + /* compare ks backwards + * idx represent word index to be written (hole position). + * We cannot copy the elements when searching since we might + * have an equal key. So we search for just the index first =( + * + * It is perhaps faster to move the values in the first pass. + * Check for uniqueness during insert phase and then have a + * second phace compacting the map if duplicates are found + * during insert. .. or do someother sort .. shell-sort perhaps. + */ + + idx = size; + + while(idx > 0 && (c = CMP_TERM(kv[1],ks[idx-1])) < 0) { idx--; } + + if (c == 0) { + /* last compare was equal, + * i.e. we have to release memory + * and overwrite that key/value + */ + ks[idx-1] = kv[1]; + vs[idx-1] = kv[2]; + unused_size++; + } else { + Uint i = size; + while(i > idx) { + ks[i] = ks[i-1]; + vs[i] = vs[i-1]; + i--; + } + ks[idx] = kv[1]; + vs[idx] = kv[2]; + size++; + } + item = CDR(list_val(item)); + } + + if (unused_size) { + /* the key tuple is embedded in the heap + * write a bignum to clear it. + */ + /* release values as normal since they are on the top of the heap */ + + ks[size] = make_pos_bignum_header(unused_size - 1); + HRelease(BIF_P, vs + size + unused_size, vs + size); + } + + *thp = make_arityval(size); + mp->size = size; + BIF_RET(res); + } + +error: + + BIF_ERROR(BIF_P, BADARG); +} + +/* maps:is_key/2 + */ + +BIF_RETTYPE maps_is_key_2(BIF_ALIST_2) { + if (is_map(BIF_ARG_2)) { + Eterm *ks, key; + map_t *mp; + Uint n,i; + + mp = (map_t*)map_val(BIF_ARG_2); + key = BIF_ARG_1; + n = map_get_size(mp); + ks = map_get_keys(mp); + + if (n == 0) + BIF_RET(am_false); + + if (is_immed(key)) { + for( i = 0; i < n; i++) { + if (ks[i] == key) { + BIF_RET(am_true); + } + } + } + + for( i = 0; i < n; i++) { + if (EQ(ks[i], key)) { + BIF_RET(am_true); + } + } + BIF_RET(am_false); + } + BIF_ERROR(BIF_P, BADARG); +} + +/* maps:keys/1 + */ + +BIF_RETTYPE maps_keys_1(BIF_ALIST_1) { + if (is_map(BIF_ARG_1)) { + Eterm *hp, *ks, res = NIL; + map_t *mp; + Uint n; + + mp = (map_t*)map_val(BIF_ARG_1); + n = map_get_size(mp); + + if (n == 0) + BIF_RET(res); + + hp = HAlloc(BIF_P, (2 * n)); + ks = map_get_keys(mp); + + while(n--) { + res = CONS(hp, ks[n], res); hp += 2; + } + + BIF_RET(res); + } + BIF_ERROR(BIF_P, BADARG); +} +/* maps:merge/2 + */ + +BIF_RETTYPE maps_merge_2(BIF_ALIST_2) { + if (is_map(BIF_ARG_1) && is_map(BIF_ARG_2)) { + Eterm *hp,*thp; + Eterm tup; + Eterm *ks,*vs,*ks1,*vs1,*ks2,*vs2; + map_t *mp1,*mp2,*mp_new; + Uint n1,n2,i1,i2,need,unused_size=0; + int c = 0; + + mp1 = (map_t*)map_val(BIF_ARG_1); + mp2 = (map_t*)map_val(BIF_ARG_2); + n1 = map_get_size(mp1); + n2 = map_get_size(mp2); + + need = MAP_HEADER_SIZE + 1 + 2*(n1 + n2); + + hp = HAlloc(BIF_P, need); + thp = hp; + tup = make_tuple(thp); + ks = hp + 1; hp += 1 + n1 + n2; + mp_new = (map_t*)hp; hp += MAP_HEADER_SIZE; + vs = hp; hp += n1 + n2; + + mp_new->thing_word = MAP_HEADER; + mp_new->size = 0; + mp_new->keys = tup; + + i1 = 0; i2 = 0; + ks1 = map_get_keys(mp1); + vs1 = map_get_values(mp1); + ks2 = map_get_keys(mp2); + vs2 = map_get_values(mp2); + + while(i1 < n1 && i2 < n2) { + c = CMP_TERM(ks1[i1],ks2[i2]); + if ( c == 0) { + /* use righthand side arguments map value, + * but advance both maps */ + *ks++ = ks2[i2]; + *vs++ = vs2[i2]; + i1++, i2++, unused_size++; + } else if ( c < 0) { + *ks++ = ks1[i1]; + *vs++ = vs1[i1]; + i1++; + } else { + *ks++ = ks2[i2]; + *vs++ = vs2[i2]; + i2++; + } + } + + /* copy remaining */ + while (i1 < n1) { + *ks++ = ks1[i1]; + *vs++ = vs1[i1]; + i1++; + } + + while (i2 < n2) { + *ks++ = ks2[i2]; + *vs++ = vs2[i2]; + i2++; + } + + if (unused_size) { + /* the key tuple is embedded in the heap, write a bignum to clear it. + * + * release values as normal since they are on the top of the heap + * size = n1 + n1 - unused_size + */ + + *ks = make_pos_bignum_header(unused_size - 1); + HRelease(BIF_P, vs + unused_size, vs); + } + + mp_new->size = n1 + n2 - unused_size; + *thp = make_arityval(n1 + n2 - unused_size); + + BIF_RET(make_map(mp_new)); + } + BIF_ERROR(BIF_P, BADARG); +} +/* maps:new/2 + */ + +BIF_RETTYPE maps_new_0(BIF_ALIST_0) { + Eterm* hp; + Eterm tup; + map_t *mp; + + hp = HAlloc(BIF_P, (MAP_HEADER_SIZE + 1)); + tup = make_tuple(hp); + *hp++ = make_arityval(0); + + mp = (map_t*)hp; + mp->thing_word = MAP_HEADER; + mp->size = 0; + mp->keys = tup; + + BIF_RET(make_map(mp)); +} + +/* maps:put/3 + */ + +Eterm erts_maps_put(Process *p, Eterm key, Eterm value, Eterm map) { + Sint n,i; + Sint c = 0; + Eterm* hp, *shp; + Eterm *ks,*vs, res, tup; + map_t *mp = (map_t*)map_val(map); + + n = map_get_size(mp); + + if (n == 0) { + hp = HAlloc(p, MAP_HEADER_SIZE + 1 + 2); + tup = make_tuple(hp); + *hp++ = make_arityval(1); + *hp++ = key; + res = make_map(hp); + *hp++ = MAP_HEADER; + *hp++ = 1; + *hp++ = tup; + *hp++ = value; + + return res; + } + + ks = map_get_keys(mp); + vs = map_get_values(mp); + + /* only allocate for values, + * assume key-tuple will be intact + */ + + hp = HAlloc(p, MAP_HEADER_SIZE + n); + shp = hp; /* save hp, used if optimistic update fails */ + res = make_map(hp); + *hp++ = MAP_HEADER; + *hp++ = n; + *hp++ = mp->keys; + + if (is_immed(key)) { + for( i = 0; i < n; i ++) { + if (ks[i] == key) { + *hp++ = value; + vs++; + c = 1; + } else { + *hp++ = *vs++; + } + } + } else { + for( i = 0; i < n; i ++) { + if (EQ(ks[i], key)) { + *hp++ = value; + vs++; + c = 1; + } else { + *hp++ = *vs++; + } + } + } + + if (c) + return res; + + /* need to make a new tuple, + * use old hp since it needs to be recreated anyway. + */ + tup = make_tuple(shp); + *shp++ = make_arityval(n+1); + + hp = HAlloc(p, 3 + n + 1); + res = make_map(hp); + *hp++ = MAP_HEADER; + *hp++ = n + 1; + *hp++ = tup; + + ks = map_get_keys(mp); + vs = map_get_values(mp); + + ASSERT(n >= 0); + + /* copy map in order */ + while (n && ((c = CMP_TERM(*ks, key)) < 0)) { + *shp++ = *ks++; + *hp++ = *vs++; + n--; + } + + *shp++ = key; + *hp++ = value; + + ASSERT(n >= 0); + + while(n--) { + *shp++ = *ks++; + *hp++ = *vs++; + } + /* we have one word remaining + * this will work out fine once we get the size word + * in the header. + */ + *shp = make_pos_bignum_header(0); + return res; +} + +BIF_RETTYPE maps_put_3(BIF_ALIST_3) { + if (is_map(BIF_ARG_3)) { + BIF_RET(erts_maps_put(BIF_P, BIF_ARG_1, BIF_ARG_2, BIF_ARG_3)); + } + BIF_ERROR(BIF_P, BADARG); +} + +/* maps:remove/3 + */ + +int erts_maps_remove(Process *p, Eterm key, Eterm map, Eterm *res) { + Sint n; + Uint need; + Eterm *hp_start; + Eterm *thp, *mhp; + Eterm *ks, *vs, tup; + map_t *mp = (map_t*)map_val(map); + + n = map_get_size(mp); + + if (n == 0) { + *res = map; + return 1; + } + + ks = map_get_keys(mp); + vs = map_get_values(mp); + + /* Assume key exists. + * Release allocated if it didn't. + * Allocate key tuple first. + */ + + need = n + 1 - 1 + 3 + n - 1; /* tuple - 1 + map - 1 */ + hp_start = HAlloc(p, need); + thp = hp_start; + mhp = thp + n; /* offset with tuple heap size */ + + tup = make_tuple(thp); + *thp++ = make_arityval(n - 1); + + *res = make_map(mhp); + *mhp++ = MAP_HEADER; + *mhp++ = n - 1; + *mhp++ = tup; + + if (is_immed(key)) { + while(n--) { + if (*ks == key) { + goto found_key; + } else { + *mhp++ = *vs++; + *thp++ = *ks++; + } + } + } else { + while(n--) { + if (EQ(*ks, key)) { + goto found_key; + } else { + *mhp++ = *vs++; + *thp++ = *ks++; + } + } + } + + /* Not found, remove allocated memory + * and return previous map. + */ + HRelease(p, hp_start + need, hp_start); + + *res = map; + return 1; + +found_key: + /* Copy rest of keys and values */ + if (n) { + sys_memcpy(mhp, vs+1, n*sizeof(Eterm)); + sys_memcpy(thp, ks+1, n*sizeof(Eterm)); + } + return 1; +} + +BIF_RETTYPE maps_remove_2(BIF_ALIST_2) { + if (is_map(BIF_ARG_2)) { + Eterm res; + if (erts_maps_remove(BIF_P, BIF_ARG_1, BIF_ARG_2, &res)) { + BIF_RET(res); + } + } + BIF_ERROR(BIF_P, BADARG); +} + +/* maps:update/3 + */ + +int erts_maps_update(Process *p, Eterm key, Eterm value, Eterm map, Eterm *res) { + Sint n,i; + Eterm* hp,*shp; + Eterm *ks,*vs; + map_t *mp = (map_t*)map_val(map); + + if ((n = map_get_size(mp)) == 0) { + return 0; + } + + ks = map_get_keys(mp); + vs = map_get_values(mp); + + /* only allocate for values, + * assume key-tuple will be intact + */ + + hp = HAlloc(p, MAP_HEADER_SIZE + n); + shp = hp; + *hp++ = MAP_HEADER; + *hp++ = n; + *hp++ = mp->keys; + + if (is_immed(key)) { + for( i = 0; i < n; i ++) { + if (ks[i] == key) { + goto found_key; + } else { + *hp++ = *vs++; + } + } + } else { + for( i = 0; i < n; i ++) { + if (EQ(ks[i], key)) { + goto found_key; + } else { + *hp++ = *vs++; + } + } + } + + HRelease(p, shp + MAP_HEADER_SIZE + n, shp); + return 0; + +found_key: + *hp++ = value; + vs++; + if (++i < n) + sys_memcpy(hp, vs, (n - i)*sizeof(Eterm)); + *res = make_map(shp); + return 1; +} + +BIF_RETTYPE maps_update_3(BIF_ALIST_3) { + if (is_map(BIF_ARG_3)) { + Eterm res; + if (erts_maps_update(BIF_P, BIF_ARG_1, BIF_ARG_2, BIF_ARG_3, &res)) { + BIF_RET(res); + } + } + BIF_ERROR(BIF_P, BADARG); +} + + +/* maps:values/1 + */ + +BIF_RETTYPE maps_values_1(BIF_ALIST_1) { + if (is_map(BIF_ARG_1)) { + Eterm *hp, *vs, res = NIL; + map_t *mp; + Uint n; + + mp = (map_t*)map_val(BIF_ARG_1); + n = map_get_size(mp); + + if (n == 0) + BIF_RET(res); + + hp = HAlloc(BIF_P, (2 * n)); + vs = map_get_values(mp); + + while(n--) { + res = CONS(hp, vs[n], res); hp += 2; + } + + BIF_RET(res); + } + BIF_ERROR(BIF_P, BADARG); +} + +int erts_validate_and_sort_map(map_t* mp) +{ + Eterm *ks = map_get_keys(mp); + Eterm *vs = map_get_values(mp); + Uint sz = map_get_size(mp); + Uint ix,jx; + Eterm tmp; + int c; + + /* sort */ + + for (ix = 1; ix < sz; ix++) { + jx = ix; + while( jx > 0 && (c = CMP_TERM(ks[jx],ks[jx-1])) <= 0 ) { + /* identical key -> error */ + if (c == 0) return 0; + + tmp = ks[jx]; + ks[jx] = ks[jx - 1]; + ks[jx - 1] = tmp; + + tmp = vs[jx]; + vs[jx] = vs[jx - 1]; + vs[jx - 1] = tmp; + + jx--; + } + } + return 1; +} diff --git a/erts/emulator/beam/erl_map.h b/erts/emulator/beam/erl_map.h new file mode 100644 index 0000000000..cfacb2ec28 --- /dev/null +++ b/erts/emulator/beam/erl_map.h @@ -0,0 +1,72 @@ +/* + * %CopyrightBegin% + * + * Copyright Ericsson AB 2014. All Rights Reserved. + * + * The contents of this file are subject to the Erlang Public License, + * Version 1.1, (the "License"); you may not use this file except in + * compliance with the License. You should have received a copy of the + * Erlang Public License along with this software. If not, it can be + * retrieved online at http://www.erlang.org/. + * + * Software distributed under the License is distributed on an "AS IS" + * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See + * the License for the specific language governing rights and limitations + * under the License. + * + * %CopyrightEnd% + */ + + +#ifndef __ERL_MAP_H__ +#define __ERL_MAP_H__ + +#include "sys.h" +/* MAP */ + +typedef struct map_s { + Eterm thing_word; + Uint size; + Eterm keys; /* tuple */ +} map_t; +/* map node + * + * ----------- + * Eterm THING + * Uint size + * Eterm Keys -> {K1, K2, K3, ..., Kn} where n = size + * ---- + * Eterm V1 + * ... + * Eterm Vn, where n = size + * ----------- + */ + + + +/* erl_term.h stuff */ +#define make_map(x) make_boxed((Eterm*)(x)) +#define make_map_rel(x, BASE) make_boxed_rel((Eterm*)(x),(BASE)) +#define is_map(x) (is_boxed((x)) && is_map_header(*boxed_val((x)))) +#define is_map_rel(RTERM,BASE) is_map(rterm2wterm(RTERM,BASE)) +#define is_not_map(x) (!is_map((x))) +#define is_map_header(x) (((x) & (_TAG_HEADER_MASK)) == _TAG_HEADER_MAP) +#define header_is_map(x) ((((x) & (_HEADER_SUBTAG_MASK)) == MAP_SUBTAG)) +#define map_val(x) (_unchecked_boxed_val((x))) +#define map_val_rel(RTERM, BASE) map_val(rterm2wterm(RTERM, BASE)) + +#define map_get_values(x) (((Eterm *)(x)) + 3) +#define map_get_keys(x) (((Eterm *)tuple_val(((map_t *)(x))->keys)) + 1) +#define map_get_size(x) (((map_t*)(x))->size) + +#define MAP_HEADER _make_header(1,_TAG_HEADER_MAP) +#define MAP_HEADER_SIZE (sizeof(map_t) / sizeof(Eterm)) + +Eterm erts_maps_put(Process *p, Eterm key, Eterm value, Eterm map); +int erts_maps_update(Process *p, Eterm key, Eterm value, Eterm map, Eterm *res); +int erts_maps_find(Eterm key, Eterm map, Eterm *value); +int erts_maps_get(Eterm key, Eterm map, Eterm *value); +int erts_maps_remove(Process *p, Eterm key, Eterm map, Eterm *res); +int erts_validate_and_sort_map(map_t* map); +#endif + diff --git a/erts/emulator/beam/erl_message.h b/erts/emulator/beam/erl_message.h index 771eba431f..0f3bb8d281 100644 --- a/erts/emulator/beam/erl_message.h +++ b/erts/emulator/beam/erl_message.h @@ -46,6 +46,11 @@ typedef struct erl_off_heap { Uint64 overhead; /* Administrative overhead (used to force GC). */ } ErlOffHeap; +#define ERTS_INIT_OFF_HEAP(OHP) \ + do { \ + (OHP)->first = NULL; \ + (OHP)->overhead = 0; \ + } while (0) #include "external.h" #include "erl_process.h" diff --git a/erts/emulator/beam/erl_nif.c b/erts/emulator/beam/erl_nif.c index 48f8be8dd3..40860e141c 100644 --- a/erts/emulator/beam/erl_nif.c +++ b/erts/emulator/beam/erl_nif.c @@ -1,7 +1,7 @@ /* * %CopyrightBegin% * - * Copyright Ericsson AB 2009-2013. All Rights Reserved. + * Copyright Ericsson AB 2009-2014. All Rights Reserved. * * The contents of this file are subject to the Erlang Public License, * Version 1.1, (the "License"); you may not use this file except in @@ -31,9 +31,11 @@ #include "bif.h" #include "error.h" #include "big.h" +#include "erl_map.h" #include "beam_bp.h" #include "erl_thr_progress.h" #include "dtrace-wrapper.h" +#include "erl_process.h" #if defined(USE_DYNAMIC_TRACE) && (defined(USE_DTRACE) || defined(USE_SYSTEMTAP)) #define HAVE_USE_DTRACE 1 #endif @@ -874,7 +876,7 @@ int enif_get_list_cell(ErlNifEnv* env, Eterm term, Eterm* head, Eterm* tail) int enif_get_list_length(ErlNifEnv* env, Eterm term, unsigned* len) { if (is_not_list(term) && is_not_nil(term)) return 0; - *len = list_length(term); + *len = erts_list_length(term); return 1; } @@ -1213,7 +1215,8 @@ static void close_lib(struct erl_module_nif* lib) lib->entry->unload(&env, lib->priv_data); post_nif_noproc(&env); } - erts_sys_ddll_close(lib->handle); + if (!erts_is_static_nif(lib->handle)) + erts_sys_ddll_close(lib->handle); lib->handle = NULL; } @@ -1234,6 +1237,19 @@ static void steal_resource_type(ErlNifResourceType* type) } } +/* The opened_rt_list is used by enif_open_resource_type() + * in order to rollback "creates" and "take-overs" in case the load fails. + */ +struct opened_resource_type +{ + struct opened_resource_type* next; + + ErlNifResourceFlags op; + ErlNifResourceType* type; + ErlNifResourceDtor* new_dtor; +}; +static struct opened_resource_type* opened_rt_list = NULL; + ErlNifResourceType* enif_open_resource_type(ErlNifEnv* env, const char* module_str, @@ -1255,22 +1271,21 @@ enif_open_resource_type(ErlNifEnv* env, if (type == NULL) { if (flags & ERL_NIF_RT_CREATE) { type = erts_alloc(ERTS_ALC_T_NIF, - sizeof(struct enif_resource_type_t)); - type->dtor = dtor; + sizeof(struct enif_resource_type_t)); type->module = module_am; type->name = name_am; erts_refc_init(&type->refc, 1); - type->owner = env->mod_nif; - type->prev = &resource_type_list; - type->next = resource_type_list.next; - type->next->prev = type; - type->prev->next = type; op = ERL_NIF_RT_CREATE; + #ifdef DEBUG + type->dtor = (void*)1; + type->owner = (void*)2; + type->prev = (void*)3; + type->next = (void*)4; + #endif } } else { - if (flags & ERL_NIF_RT_TAKEOVER) { - steal_resource_type(type); + if (flags & ERL_NIF_RT_TAKEOVER) { op = ERL_NIF_RT_TAKEOVER; } else { @@ -1278,12 +1293,13 @@ enif_open_resource_type(ErlNifEnv* env, } } if (type != NULL) { - type->owner = env->mod_nif; - type->dtor = dtor; - if (type->dtor != NULL) { - erts_refc_inc(&type->owner->rt_dtor_cnt, 1); - } - erts_refc_inc(&type->owner->rt_cnt, 1); + struct opened_resource_type* ort = erts_alloc(ERTS_ALC_T_TMP, + sizeof(struct opened_resource_type)); + ort->op = op; + ort->type = type; + ort->new_dtor = dtor; + ort->next = opened_rt_list; + opened_rt_list = ort; } if (tried != NULL) { *tried = op; @@ -1291,6 +1307,51 @@ enif_open_resource_type(ErlNifEnv* env, return type; } +static void commit_opened_resource_types(struct erl_module_nif* lib) +{ + while (opened_rt_list) { + struct opened_resource_type* ort = opened_rt_list; + + ErlNifResourceType* type = ort->type; + + if (ort->op == ERL_NIF_RT_CREATE) { + type->prev = &resource_type_list; + type->next = resource_type_list.next; + type->next->prev = type; + type->prev->next = type; + } + else { /* ERL_NIF_RT_TAKEOVER */ + steal_resource_type(type); + } + + type->owner = lib; + type->dtor = ort->new_dtor; + + if (type->dtor != NULL) { + erts_refc_inc(&lib->rt_dtor_cnt, 1); + } + erts_refc_inc(&lib->rt_cnt, 1); + + opened_rt_list = ort->next; + erts_free(ERTS_ALC_T_TMP, ort); + } +} + +static void rollback_opened_resource_types(void) +{ + while (opened_rt_list) { + struct opened_resource_type* ort = opened_rt_list; + + if (ort->op == ERL_NIF_RT_CREATE) { + erts_free(ERTS_ALC_T_NIF, ort->type); + } + + opened_rt_list = ort->next; + erts_free(ERTS_ALC_T_TMP, ort); + } +} + + static void nif_resource_dtor(Binary* bin) { ErlNifResource* resource = (ErlNifResource*) ERTS_MAGIC_BIN_DATA(bin); @@ -1316,6 +1377,8 @@ void* enif_alloc_resource(ErlNifResourceType* type, size_t size) { Binary* bin = erts_create_magic_binary(SIZEOF_ErlNifResource(size), &nif_resource_dtor); ErlNifResource* resource = ERTS_MAGIC_BIN_DATA(bin); + + ASSERT(type->owner && type->next && type->prev); /* not allowed in load/upgrade */ resource->type = type; erts_refc_inc(&bin->refc, 1); #ifdef DEBUG @@ -1406,7 +1469,7 @@ void* enif_dlopen(const char* lib, ErtsSysDdllError errdesc = ERTS_SYS_DDLL_ERROR_INIT; void* handle; void* init_func; - if (erts_sys_ddll_open2(lib, &handle, &errdesc) == ERL_DE_NO_ERROR) { + if (erts_sys_ddll_open(lib, &handle, &errdesc) == ERL_DE_NO_ERROR) { if (erts_sys_ddll_load_nif_init(handle, &init_func, &errdesc) == ERL_DE_NO_ERROR) { erts_sys_ddll_call_nif_init(init_func); } @@ -1450,6 +1513,347 @@ int enif_consume_timeslice(ErlNifEnv* env, int percent) return ERTS_BIF_REDS_LEFT(env->proc) == 0; } +#ifdef ERTS_DIRTY_SCHEDULERS + +static void +alloc_proc_psd(Process* proc, Export **ep) +{ + int i; + if (!*ep) { + *ep = erts_alloc(ERTS_ALC_T_PSD, sizeof(Export)); + sys_memset((void*) *ep, 0, sizeof(Export)); + for (i=0; i<ERTS_NUM_CODE_IX; i++) { + (*ep)->addressv[i] = &(*ep)->code[3]; + } + (*ep)->code[3] = (BeamInstr) em_call_nif; + } + (void) ERTS_PROC_SET_DIRTY_SCHED_TRAP_EXPORT(proc, ERTS_PROC_LOCK_MAIN, *ep); +} + +static ERL_NIF_TERM +execute_dirty_nif_finalizer(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]) +{ + Eterm* reg = ERTS_PROC_GET_SCHDATA(env->proc)->x_reg_array; + ERL_NIF_TERM result = (ERL_NIF_TERM) reg[0]; + typedef ERL_NIF_TERM (*FinalizerFP)(ErlNifEnv*, ERL_NIF_TERM); + FinalizerFP fp; +#if HAVE_INT64 && SIZEOF_LONG != 8 + ASSERT(sizeof(fp) <= sizeof(ErlNifUInt64)); + enif_get_uint64(env, reg[1], (ErlNifUInt64 *) &fp); +#else + ASSERT(sizeof(fp) <= sizeof(unsigned long)); + enif_get_ulong(env, reg[1], (unsigned long *) &fp); +#endif + return (*fp)(env, result); +} + +#endif /* ERTS_DIRTY_SCHEDULERS */ + +#ifdef ERL_NIF_DIRTY_SCHEDULER_SUPPORT + +ERL_NIF_TERM +enif_schedule_dirty_nif(ErlNifEnv* env, int flags, + ERL_NIF_TERM (*fp)(ErlNifEnv*, int, const ERL_NIF_TERM[]), + int argc, const ERL_NIF_TERM argv[]) +{ +#ifdef USE_THREADS + erts_aint32_t state, n, a; + Process* proc = env->proc; + Eterm* reg = ERTS_PROC_GET_SCHDATA(proc)->x_reg_array; + Export* ep = NULL; + int i; + + int chkflgs = (flags & (ERL_NIF_DIRTY_JOB_IO_BOUND|ERL_NIF_DIRTY_JOB_CPU_BOUND)); + if (chkflgs != ERL_NIF_DIRTY_JOB_IO_BOUND && chkflgs != ERL_NIF_DIRTY_JOB_CPU_BOUND) + return enif_make_badarg(env); + + a = erts_smp_atomic32_read_acqb(&proc->state); + while (1) { + n = state = a; + /* + * clear any current dirty flags and dirty queue indicators, + * in case the application is shifting a job from one type + * of dirty scheduler to the other + */ + n &= ~(ERTS_PSFLG_DIRTY_CPU_PROC|ERTS_PSFLG_DIRTY_IO_PROC + |ERTS_PSFLG_DIRTY_CPU_PROC_IN_Q|ERTS_PSFLG_DIRTY_IO_PROC_IN_Q); + if (chkflgs == ERL_NIF_DIRTY_JOB_CPU_BOUND) + n |= ERTS_PSFLG_DIRTY_CPU_PROC; + else + n |= ERTS_PSFLG_DIRTY_IO_PROC; + a = erts_smp_atomic32_cmpxchg_mb(&proc->state, n, state); + if (a == state) + break; + } + if (!(ep = ERTS_PROC_GET_DIRTY_SCHED_TRAP_EXPORT(proc))) + alloc_proc_psd(proc, &ep); + ERTS_VBUMP_ALL_REDS(proc); + ep->code[2] = argc; + for (i = 0; i < argc; i++) { + reg[i] = (Eterm) argv[i]; + } + proc->i = (BeamInstr*) ep->addressv[0]; + ep->code[4] = (BeamInstr) fp; + proc->freason = TRAP; + + return THE_NON_VALUE; +#else + return (*fp)(env, argc, argv); +#endif +} + +ERL_NIF_TERM +enif_schedule_dirty_nif_finalizer(ErlNifEnv* env, ERL_NIF_TERM result, + ERL_NIF_TERM (*fp)(ErlNifEnv*, ERL_NIF_TERM)) +{ +#ifdef USE_THREADS + Process* proc = env->proc; + Eterm* reg = ERTS_PROC_GET_SCHDATA(proc)->x_reg_array; + Export* ep; + + erts_smp_atomic32_read_band_mb(&proc->state, + ~(ERTS_PSFLG_DIRTY_CPU_PROC + |ERTS_PSFLG_DIRTY_IO_PROC + |ERTS_PSFLG_DIRTY_CPU_PROC_IN_Q + |ERTS_PSFLG_DIRTY_IO_PROC_IN_Q)); + if (!(ep = ERTS_PROC_GET_DIRTY_SCHED_TRAP_EXPORT(proc))) + alloc_proc_psd(proc, &ep); + ERTS_VBUMP_ALL_REDS(proc); + ep->code[2] = 2; + reg[0] = (Eterm) result; +#if HAVE_INT64 && SIZEOF_LONG != 8 + ASSERT(sizeof(fp) <= sizeof(ErlNifUInt64)); + reg[1] = (Eterm) enif_make_uint64(env, (ErlNifUInt64) fp); +#else + ASSERT(sizeof(fp) <= sizeof(unsigned long)); + reg[1] = (Eterm) enif_make_ulong(env, (unsigned long) fp); +#endif + proc->i = (BeamInstr*) ep->addressv[0]; + ep->code[4] = (BeamInstr) execute_dirty_nif_finalizer; + proc->freason = TRAP; + + return THE_NON_VALUE; +#else + return (*fp)(env, result); +#endif +} + +/* A simple finalizer that just returns its result argument */ +ERL_NIF_TERM +enif_dirty_nif_finalizer(ErlNifEnv* env, ERL_NIF_TERM result) +{ + return result; +} + +int +enif_is_on_dirty_scheduler(ErlNifEnv* env) +{ + return ERTS_SCHEDULER_IS_DIRTY(env->proc->scheduler_data); +} + +int +enif_have_dirty_schedulers() +{ +#ifdef USE_THREADS + return 1; +#else + return 0; +#endif +} + +#endif /* ERL_NIF_DIRTY_SCHEDULER_SUPPORT */ + +/* Maps */ + +int enif_is_map(ErlNifEnv* env, ERL_NIF_TERM term) +{ + return is_map(term); +} + +int enif_get_map_size(ErlNifEnv* env, ERL_NIF_TERM term, size_t *size) +{ + if (is_map(term)) { + map_t *mp; + mp = (map_t*)map_val(term); + *size = map_get_size(mp); + return 1; + } + return 0; +} + +ERL_NIF_TERM enif_make_new_map(ErlNifEnv* env) +{ + Eterm* hp = alloc_heap(env,MAP_HEADER_SIZE+1); + Eterm tup; + map_t *mp; + + tup = make_tuple(hp); + *hp++ = make_arityval(0); + mp = (map_t*)hp; + mp->thing_word = MAP_HEADER; + mp->size = 0; + mp->keys = tup; + + return make_map(mp); +} + +int enif_make_map_put(ErlNifEnv* env, + Eterm map_in, + Eterm key, + Eterm value, + Eterm *map_out) +{ + if (is_not_map(map_in)) { + return 0; + } + flush_env(env); + *map_out = erts_maps_put(env->proc, key, value, map_in); + cache_env(env); + return 1; +} + +int enif_get_map_value(ErlNifEnv* env, + Eterm map, + Eterm key, + Eterm *value) +{ + if (is_not_map(map)) { + return 0; + } + return erts_maps_get(key, map, value); +} + +int enif_make_map_update(ErlNifEnv* env, + Eterm map_in, + Eterm key, + Eterm value, + Eterm *map_out) +{ + int res; + if (is_not_map(map_in)) { + return 0; + } + + flush_env(env); + res = erts_maps_update(env->proc, key, value, map_in, map_out); + cache_env(env); + return res; +} + +int enif_make_map_remove(ErlNifEnv* env, + Eterm map_in, + Eterm key, + Eterm *map_out) +{ + int res; + if (is_not_map(map_in)) { + return 0; + } + flush_env(env); + res = erts_maps_remove(env->proc, key, map_in, map_out); + cache_env(env); + return res; +} + +int enif_map_iterator_create(ErlNifEnv *env, + Eterm map, + ErlNifMapIterator *iter, + ErlNifMapIteratorEntry entry) +{ + if (is_map(map)) { + map_t *mp = (map_t*)map_val(map); + size_t offset; + + switch (entry) { + case ERL_NIF_MAP_ITERATOR_HEAD: offset = 0; break; + case ERL_NIF_MAP_ITERATOR_TAIL: offset = map_get_size(mp) - 1; break; + default: goto error; + } + + /* empty maps are ok but will leave the iterator + * in bad shape. + */ + + iter->map = map; + iter->ks = ((Eterm *)map_get_keys(mp)) + offset; + iter->vs = ((Eterm *)map_get_values(mp)) + offset; + iter->t_limit = map_get_size(mp) + 1; + iter->idx = offset + 1; + + return 1; + } + +error: +#ifdef DEBUG + iter->map = THE_NON_VALUE; +#endif + return 0; +} + +void enif_map_iterator_destroy(ErlNifEnv *env, ErlNifMapIterator *iter) +{ + /* not used */ +#ifdef DEBUG + iter->map = THE_NON_VALUE; +#endif + +} + +int enif_map_iterator_is_tail(ErlNifEnv *env, ErlNifMapIterator *iter) +{ + ASSERT(iter && is_map(iter->map)); + ASSERT(iter->idx >= 0 && (iter->idx <= map_get_size(map_val(iter->map)) + 1)); + return (iter->t_limit == 1 || iter->idx == iter->t_limit); +} + +int enif_map_iterator_is_head(ErlNifEnv *env, ErlNifMapIterator *iter) +{ + ASSERT(iter && is_map(iter->map)); + ASSERT(iter->idx >= 0 && (iter->idx <= map_get_size(map_val(iter->map)) + 1)); + return (iter->t_limit == 1 || iter->idx == 0); +} + + +int enif_map_iterator_next(ErlNifEnv *env, ErlNifMapIterator *iter) +{ + ASSERT(iter && is_map(iter->map)); + if (iter->idx < iter->t_limit) { + iter->idx++; + iter->ks++; + iter->vs++; + } + return (iter->idx != iter->t_limit); +} + +int enif_map_iterator_prev(ErlNifEnv *env, ErlNifMapIterator *iter) +{ + ASSERT(iter && is_map(iter->map)); + if (iter->idx > 0) { + iter->idx--; + iter->ks--; + iter->vs--; + } + return (iter->idx > 0); +} + +int enif_map_iterator_get_pair(ErlNifEnv *env, + ErlNifMapIterator *iter, + Eterm *key, + Eterm *value) +{ + ASSERT(iter && is_map(iter->map)); + if (iter->idx > 0 && iter->idx < iter->t_limit) { + ASSERT(iter->ks >= map_get_keys(map_val(iter->map)) && + iter->ks < (map_get_keys(map_val(iter->map)) + map_get_size(map_val(iter->map)))); + ASSERT(iter->vs >= map_get_values(map_val(iter->map)) && + iter->vs < (map_get_values(map_val(iter->map)) + map_get_size(map_val(iter->map)))); + *key = *(iter->ks); + *value = *(iter->vs); + return 1; + } + return 0; +} + /*************************************************************************** ** load_nif/2 ** ***************************************************************************/ @@ -1564,12 +1968,13 @@ BIF_RETTYPE load_nif_2(BIF_ALIST_2) static const char upgrade[] = "upgrade"; char* lib_name = NULL; void* handle = NULL; - void* init_func; + void* init_func = NULL; ErlNifEntry* entry = NULL; ErlNifEnv env; - int len, i, err; + int i, err, encoding; Module* mod; Eterm mod_atom; + const Atom* mod_atomp; Eterm f_atom; BeamInstr* caller; ErtsSysDdllError errdesc = ERTS_SYS_DDLL_ERROR_INIT; @@ -1578,18 +1983,18 @@ BIF_RETTYPE load_nif_2(BIF_ALIST_2) struct erl_module_nif* lib = NULL; int reload_warning = 0; - len = list_length(BIF_ARG_1); - if (len < 0) { - BIF_ERROR(BIF_P, BADARG); + encoding = erts_get_native_filename_encoding(); + if (encoding == ERL_FILENAME_WIN_WCHAR) { + /* Do not convert the lib name to utf-16le yet, do that in win32 specific code */ + /* since lib_name is used in error messages */ + encoding = ERL_FILENAME_UTF8; } - - lib_name = (char *) erts_alloc(ERTS_ALC_T_TMP, len + 1); - - if (intlist_to_buf(BIF_ARG_1, lib_name, len) != len) { - erts_free(ERTS_ALC_T_TMP, lib_name); + lib_name = erts_convert_filename_to_encoding(BIF_ARG_1, NULL, 0, + ERTS_ALC_T_TMP, 1, 0, encoding, + NULL, 0); + if (!lib_name) { BIF_ERROR(BIF_P, BADARG); } - lib_name[len] = '\0'; if (!erts_try_seize_code_write_permission(BIF_P)) { erts_free(ERTS_ALC_T_TMP, lib_name); @@ -1613,13 +2018,19 @@ BIF_RETTYPE load_nif_2(BIF_ALIST_2) mod=erts_get_module(mod_atom, erts_active_code_ix()); ASSERT(mod != NULL); + mod_atomp = atom_tab(atom_val(mod_atom)); + init_func = erts_static_nif_get_nif_init((char*)mod_atomp->name, mod_atomp->len); + if (init_func != NULL) + handle = init_func; + if (!in_area(caller, mod->curr.code, mod->curr.code_length)) { ASSERT(in_area(caller, mod->old.code, mod->old.code_length)); ret = load_nif_error(BIF_P, "old_code", "Calling load_nif from old " "module '%T' not allowed", mod_atom); } - else if ((err=erts_sys_ddll_open2(lib_name, &handle, &errdesc)) != ERL_DE_NO_ERROR) { + else if (init_func == NULL && + (err=erts_sys_ddll_open(lib_name, &handle, &errdesc)) != ERL_DE_NO_ERROR) { const char slogan[] = "Failed to load NIF library"; if (strstr(errdesc.str, lib_name) != NULL) { ret = load_nif_error(BIF_P, "load_failed", "%s: '%s'", slogan, errdesc.str); @@ -1628,7 +2039,8 @@ BIF_RETTYPE load_nif_2(BIF_ALIST_2) ret = load_nif_error(BIF_P, "load_failed", "%s %s: '%s'", slogan, lib_name, errdesc.str); } } - else if (erts_sys_ddll_load_nif_init(handle, &init_func, &errdesc) != ERL_DE_NO_ERROR) { + else if (init_func == NULL && + erts_sys_ddll_load_nif_init(handle, &init_func, &errdesc) != ERL_DE_NO_ERROR) { ret = load_nif_error(BIF_P, bad_lib, "Failed to find library init" " function: '%s'", errdesc.str); @@ -1638,7 +2050,8 @@ BIF_RETTYPE load_nif_2(BIF_ALIST_2) ret = load_nif_error(BIF_P, bad_lib, "Library init-call unsuccessful"); } else if (entry->major != ERL_NIF_MAJOR_VERSION - || entry->minor > ERL_NIF_MINOR_VERSION) { + || entry->minor > ERL_NIF_MINOR_VERSION + || (entry->major==2 && entry->minor == 5)) { /* experimental maps */ ret = load_nif_error(BIF_P, bad_lib, "Library version (%d.%d) not compatible (with %d.%d).", entry->major, entry->minor, ERL_NIF_MAJOR_VERSION, ERL_NIF_MINOR_VERSION); @@ -1687,9 +2100,15 @@ BIF_RETTYPE load_nif_2(BIF_ALIST_2) lib->entry = entry; erts_refc_init(&lib->rt_cnt, 0); erts_refc_init(&lib->rt_dtor_cnt, 0); + ASSERT(opened_rt_list == NULL); lib->mod = mod; env.mod_nif = lib; - if (mod->curr.nif != NULL) { /* Reload */ + if (mod->curr.nif != NULL) { /*************** Reload ******************/ + /* + * Repeated load_nif calls from same Erlang module instance ("reload") + * is deprecated and was only ment as a development feature not to + * be used in production systems. (See warning below) + */ int k; lib->priv_data = mod->curr.nif->priv_data; @@ -1721,6 +2140,7 @@ BIF_RETTYPE load_nif_2(BIF_ALIST_2) ret = load_nif_error(BIF_P, reload, "Library reload-call unsuccessful."); } else { + commit_opened_resource_types(lib); mod->curr.nif->entry = NULL; /* to prevent 'unload' callback */ erts_unload_nif(mod->curr.nif); reload_warning = 1; @@ -1728,7 +2148,7 @@ BIF_RETTYPE load_nif_2(BIF_ALIST_2) } else { lib->priv_data = NULL; - if (mod->old.nif != NULL) { /* Upgrade */ + if (mod->old.nif != NULL) { /**************** Upgrade ***************/ void* prev_old_data = mod->old.nif->priv_data; if (entry->upgrade == NULL) { ret = load_nif_error(BIF_P, upgrade, "Upgrade not supported by this NIF library."); @@ -1741,17 +2161,18 @@ BIF_RETTYPE load_nif_2(BIF_ALIST_2) mod->old.nif->priv_data = prev_old_data; ret = load_nif_error(BIF_P, upgrade, "Library upgrade-call unsuccessful."); } - /*else if (mod->old_nif->priv_data != prev_old_data) { - refresh_cached_nif_data(mod->old_code, mod->old_nif); - }*/ + else + commit_opened_resource_types(lib); } - else if (entry->load != NULL) { /* Initial load */ + else if (entry->load != NULL) { /********* Initial load ***********/ erts_pre_nif(&env, BIF_P, lib); veto = entry->load(&env, &lib->priv_data, BIF_ARG_2); erts_post_nif(&env); if (veto) { ret = load_nif_error(BIF_P, "load", "Library load-call unsuccessful."); } + else + commit_opened_resource_types(lib); } } if (ret == am_ok) { @@ -1780,11 +2201,12 @@ BIF_RETTYPE load_nif_2(BIF_ALIST_2) } else { error: + rollback_opened_resource_types(); ASSERT(ret != am_ok); if (lib != NULL) { erts_free(ERTS_ALC_T_NIF, lib); } - if (handle != NULL) { + if (handle != NULL && !erts_is_static_nif(handle)) { erts_sys_ddll_close(handle); } erts_sys_ddll_free_error(&errdesc); diff --git a/erts/emulator/beam/erl_nif.h b/erts/emulator/beam/erl_nif.h index 8006741a63..c12ba4d554 100644 --- a/erts/emulator/beam/erl_nif.h +++ b/erts/emulator/beam/erl_nif.h @@ -1,7 +1,7 @@ /* * %CopyrightBegin% * - * Copyright Ericsson AB 2009-2013. All Rights Reserved. + * Copyright Ericsson AB 2009-2014. All Rights Reserved. * * The contents of this file are subject to the Erlang Public License, * Version 1.1, (the "License"); you may not use this file except in @@ -23,7 +23,11 @@ #ifndef __ERL_NIF_H__ #define __ERL_NIF_H__ +#ifdef HAVE_CONFIG_H +# include "config.h" +#endif +#include "erl_native_features_config.h" #include "erl_drv_nif.h" /* Version history: @@ -34,9 +38,13 @@ ** 2.2: R14B03 enif_is_exception ** 2.3: R15 enif_make_reverse_list, enif_is_number ** 2.4: R16 enif_consume_timeslice +** 2.5: First experimental maps API additions (libs of this version is not compatible with any other VM) +** 2.5: R17 Maps API additions +** 2.6: R17 with maps +** R17 dirty schedulers */ #define ERL_NIF_MAJOR_VERSION 2 -#define ERL_NIF_MINOR_VERSION 4 +#define ERL_NIF_MINOR_VERSION 6 #include <stdlib.h> @@ -95,6 +103,8 @@ typedef unsigned long long ERL_NIF_TERM; # endif #endif +typedef ERL_NIF_TERM ERL_NIF_UINT; + struct enif_environment_t; typedef struct enif_environment_t ErlNifEnv; @@ -159,6 +169,29 @@ typedef int ErlNifTSDKey; typedef ErlDrvThreadOpts ErlNifThreadOpts; +#ifdef ERL_NIF_DIRTY_SCHEDULER_SUPPORT +typedef enum +{ + ERL_NIF_DIRTY_JOB_CPU_BOUND = ERL_DRV_DIRTY_JOB_CPU_BOUND, + ERL_NIF_DIRTY_JOB_IO_BOUND = ERL_DRV_DIRTY_JOB_IO_BOUND +}ErlNifDirtyTaskFlags; +#endif + +typedef struct /* All fields all internal and may change */ +{ + ERL_NIF_TERM map; + ERL_NIF_UINT t_limit; + ERL_NIF_UINT idx; + ERL_NIF_TERM *ks; + ERL_NIF_TERM *vs; + void* __spare__[2]; /* for future additions to be ABI compatible (same struct size) */ +} ErlNifMapIterator; + +typedef enum { + ERL_NIF_MAP_ITERATOR_HEAD = 1, + ERL_NIF_MAP_ITERATOR_TAIL = 2 +} ErlNifMapIteratorEntry; + #if (defined(__WIN32__) || defined(_WIN32) || defined(_WIN32_)) # define ERL_NIF_API_FUNC_DECL(RET_TYPE, NAME, ARGS) RET_TYPE (*NAME) ARGS typedef struct { @@ -168,7 +201,7 @@ extern TWinDynNifCallbacks WinDynNifCallbacks; # undef ERL_NIF_API_FUNC_DECL #endif -#if (defined(__WIN32__) || defined(_WIN32) || defined(_WIN32_)) && !defined(STATIC_ERLANG_DRIVER) +#if (defined(__WIN32__) || defined(_WIN32) || defined(_WIN32_)) && !defined(STATIC_ERLANG_DRIVER) && !defined(STATIC_ERLANG_NIF) # define ERL_NIF_API_FUNC_MACRO(NAME) (WinDynNifCallbacks.NAME) # include "erl_nif_api_funcs.h" /* note that we have to keep ERL_NIF_API_FUNC_MACRO defined */ @@ -180,15 +213,22 @@ extern TWinDynNifCallbacks WinDynNifCallbacks; # undef ERL_NIF_API_FUNC_DECL #endif - #if (defined(__WIN32__) || defined(_WIN32) || defined(_WIN32_)) # define ERL_NIF_INIT_GLOB TWinDynNifCallbacks WinDynNifCallbacks; -# define ERL_NIF_INIT_DECL(MODNAME) __declspec(dllexport) ErlNifEntry* nif_init(TWinDynNifCallbacks* callbacks) +# ifdef STATIC_ERLANG_NIF +# define ERL_NIF_INIT_DECL(MODNAME) __declspec(dllexport) ErlNifEntry* MODNAME ## _nif_init(TWinDynNifCallbacks* callbacks) +# else +# define ERL_NIF_INIT_DECL(MODNAME) __declspec(dllexport) ErlNifEntry* nif_init(TWinDynNifCallbacks* callbacks) +# endif # define ERL_NIF_INIT_BODY memcpy(&WinDynNifCallbacks,callbacks,sizeof(TWinDynNifCallbacks)) #else # define ERL_NIF_INIT_GLOB # define ERL_NIF_INIT_BODY -# define ERL_NIF_INIT_DECL(MODNAME) ErlNifEntry* nif_init(void) +# ifdef STATIC_ERLANG_NIF +# define ERL_NIF_INIT_DECL(MODNAME) ErlNifEntry* MODNAME ## _nif_init(void) +# else +# define ERL_NIF_INIT_DECL(MODNAME) ErlNifEntry* nif_init(void) +# endif #endif diff --git a/erts/emulator/beam/erl_nif_api_funcs.h b/erts/emulator/beam/erl_nif_api_funcs.h index 2f841645e1..d7c554e60b 100644 --- a/erts/emulator/beam/erl_nif_api_funcs.h +++ b/erts/emulator/beam/erl_nif_api_funcs.h @@ -1,7 +1,7 @@ /* * %CopyrightBegin% * - * Copyright Ericsson AB 2009-2013. All Rights Reserved. + * Copyright Ericsson AB 2009-2014. All Rights Reserved. * * The contents of this file are subject to the Erlang Public License, * Version 1.1, (the "License"); you may not use this file except in @@ -141,6 +141,29 @@ ERL_NIF_API_FUNC_DECL(int,enif_is_number,(ErlNifEnv*, ERL_NIF_TERM term)); ERL_NIF_API_FUNC_DECL(void*,enif_dlopen,(const char* lib, void (*err_handler)(void*,const char*), void* err_arg)); ERL_NIF_API_FUNC_DECL(void*,enif_dlsym,(void* handle, const char* symbol, void (*err_handler)(void*,const char*), void* err_arg)); ERL_NIF_API_FUNC_DECL(int,enif_consume_timeslice,(ErlNifEnv*, int percent)); +#ifdef ERL_NIF_DIRTY_SCHEDULER_SUPPORT +ERL_NIF_API_FUNC_DECL(ERL_NIF_TERM,enif_schedule_dirty_nif,(ErlNifEnv*,int,ERL_NIF_TERM (*)(ErlNifEnv*,int,const ERL_NIF_TERM[]),int,const ERL_NIF_TERM[])); +ERL_NIF_API_FUNC_DECL(ERL_NIF_TERM,enif_schedule_dirty_nif_finalizer,(ErlNifEnv*,ERL_NIF_TERM,ERL_NIF_TERM (*)(ErlNifEnv*,ERL_NIF_TERM))); +ERL_NIF_API_FUNC_DECL(ERL_NIF_TERM,enif_dirty_nif_finalizer,(ErlNifEnv*,ERL_NIF_TERM)); +ERL_NIF_API_FUNC_DECL(int,enif_is_on_dirty_scheduler,(ErlNifEnv*)); +ERL_NIF_API_FUNC_DECL(int,enif_have_dirty_schedulers,(void)); +#endif + +ERL_NIF_API_FUNC_DECL(int, enif_is_map, (ErlNifEnv* env, ERL_NIF_TERM term)); +ERL_NIF_API_FUNC_DECL(int, enif_get_map_size, (ErlNifEnv* env, ERL_NIF_TERM term, size_t *size)); +ERL_NIF_API_FUNC_DECL(ERL_NIF_TERM, enif_make_new_map, (ErlNifEnv* env)); +ERL_NIF_API_FUNC_DECL(int, enif_make_map_put, (ErlNifEnv* env, ERL_NIF_TERM map_in, ERL_NIF_TERM key, ERL_NIF_TERM value, ERL_NIF_TERM* map_out)); +ERL_NIF_API_FUNC_DECL(int, enif_get_map_value, (ErlNifEnv* env, ERL_NIF_TERM map, ERL_NIF_TERM key, ERL_NIF_TERM* value)); +ERL_NIF_API_FUNC_DECL(int, enif_make_map_update, (ErlNifEnv* env, ERL_NIF_TERM map_in, ERL_NIF_TERM key, ERL_NIF_TERM value, ERL_NIF_TERM* map_out)); +ERL_NIF_API_FUNC_DECL(int, enif_make_map_remove, (ErlNifEnv* env, ERL_NIF_TERM map_in, ERL_NIF_TERM key, ERL_NIF_TERM* map_out)); +ERL_NIF_API_FUNC_DECL(int, enif_map_iterator_create, (ErlNifEnv *env, ERL_NIF_TERM map, ErlNifMapIterator *iter, ErlNifMapIteratorEntry entry)); +ERL_NIF_API_FUNC_DECL(void, enif_map_iterator_destroy, (ErlNifEnv *env, ErlNifMapIterator *iter)); +ERL_NIF_API_FUNC_DECL(int, enif_map_iterator_is_head, (ErlNifEnv *env, ErlNifMapIterator *iter)); +ERL_NIF_API_FUNC_DECL(int, enif_map_iterator_is_tail, (ErlNifEnv *env, ErlNifMapIterator *iter)); +ERL_NIF_API_FUNC_DECL(int, enif_map_iterator_next, (ErlNifEnv *env, ErlNifMapIterator *iter)); +ERL_NIF_API_FUNC_DECL(int, enif_map_iterator_prev, (ErlNifEnv *env, ErlNifMapIterator *iter)); +ERL_NIF_API_FUNC_DECL(int, enif_map_iterator_get_pair, (ErlNifEnv *env, ErlNifMapIterator *iter, ERL_NIF_TERM *key, ERL_NIF_TERM *value)); + /* ** Add new entries here to keep compatibility on Windows!!! @@ -266,6 +289,28 @@ ERL_NIF_API_FUNC_DECL(int,enif_consume_timeslice,(ErlNifEnv*, int percent)); # define enif_dlopen ERL_NIF_API_FUNC_MACRO(enif_dlopen) # define enif_dlsym ERL_NIF_API_FUNC_MACRO(enif_dlsym) # define enif_consume_timeslice ERL_NIF_API_FUNC_MACRO(enif_consume_timeslice) +#ifdef ERL_NIF_DIRTY_SCHEDULER_SUPPORT +# define enif_schedule_dirty_nif ERL_NIF_API_FUNC_MACRO(enif_schedule_dirty_nif) +# define enif_schedule_dirty_nif_finalizer ERL_NIF_API_FUNC_MACRO(enif_schedule_dirty_nif_finalizer) +# define enif_dirty_nif_finalizer ERL_NIF_API_FUNC_MACRO(enif_dirty_nif_finalizer) +# define enif_is_on_dirty_scheduler ERL_NIF_API_FUNC_MACRO(enif_is_on_dirty_scheduler) +# define enif_have_dirty_schedulers ERL_NIF_API_FUNC_MACRO(enif_have_dirty_schedulers) +#endif + +# define enif_is_map ERL_NIF_API_FUNC_MACRO(enif_is_map) +# define enif_get_map_size ERL_NIF_API_FUNC_MACRO(enif_get_map_size) +# define enif_make_new_map ERL_NIF_API_FUNC_MACRO(enif_make_new_map) +# define enif_make_map_put ERL_NIF_API_FUNC_MACRO(enif_make_map_put) +# define enif_get_map_value ERL_NIF_API_FUNC_MACRO(enif_get_map_value) +# define enif_make_map_update ERL_NIF_API_FUNC_MACRO(enif_make_map_update) +# define enif_make_map_remove ERL_NIF_API_FUNC_MACRO(enif_make_map_remove) +# define enif_map_iterator_create ERL_NIF_API_FUNC_MACRO(enif_map_iterator_create) +# define enif_map_iterator_destroy ERL_NIF_API_FUNC_MACRO(enif_map_iterator_destroy) +# define enif_map_iterator_is_head ERL_NIF_API_FUNC_MACRO(enif_map_iterator_is_head) +# define enif_map_iterator_is_tail ERL_NIF_API_FUNC_MACRO(enif_map_iterator_is_tail) +# define enif_map_iterator_next ERL_NIF_API_FUNC_MACRO(enif_map_iterator_next) +# define enif_map_iterator_prev ERL_NIF_API_FUNC_MACRO(enif_map_iterator_prev) +# define enif_map_iterator_get_pair ERL_NIF_API_FUNC_MACRO(enif_map_iterator_get_pair) /* ** Add new entries here diff --git a/erts/emulator/beam/erl_port_task.c b/erts/emulator/beam/erl_port_task.c index 547a42beb2..fb6048b41f 100644 --- a/erts/emulator/beam/erl_port_task.c +++ b/erts/emulator/beam/erl_port_task.c @@ -877,6 +877,11 @@ enqueue_port(ErtsRunQueue *runq, Port *pp) ASSERT(runq->ports.start && runq->ports.end); erts_smp_inc_runq_len(runq, &runq->ports.info, ERTS_PORT_PRIO_LEVEL); + +#ifdef ERTS_SMP + if (runq->halt_in_progress) + erts_non_empty_runq(runq); +#endif } static ERTS_INLINE Port * @@ -1676,7 +1681,8 @@ erts_port_task_execute(ErtsRunQueue *runq, Port **curr_port_pp) reds = ERTS_PORT_REDS_INPUT; ASSERT((state & ERTS_PORT_SFLGS_DEAD) == 0); DTRACE_DRIVER(driver_ready_input, pp); - /* NOTE some windows drivers use ->ready_input for input and output */ + /* NOTE some windows/ose drivers use ->ready_input + for input and output */ (*pp->drv_ptr->ready_input)((ErlDrvData) pp->drv_data, ptp->u.alive.td.io.event); io_tasks_executed++; diff --git a/erts/emulator/beam/erl_port_task.h b/erts/emulator/beam/erl_port_task.h index 123253a057..1d30465ec9 100644 --- a/erts/emulator/beam/erl_port_task.h +++ b/erts/emulator/beam/erl_port_task.h @@ -185,11 +185,13 @@ erts_port_task_init_sched(ErtsPortTaskSched *ptsp, Eterm instr_id) ptsp->taskq.in.last = NULL; erts_smp_atomic32_init_nob(&ptsp->flags, 0); #ifdef ERTS_SMP + erts_mtx_init_x(&ptsp->mtx, lock_str, instr_id, #ifdef ERTS_ENABLE_LOCK_COUNT - if (!(erts_lcnt_rt_options & ERTS_LCNT_OPT_PORTLOCK)) - lock_str = NULL; + (erts_lcnt_rt_options & ERTS_LCNT_OPT_PORTLOCK) +#else + 1 #endif - erts_mtx_init_x(&ptsp->mtx, lock_str, instr_id); + ); #endif } diff --git a/erts/emulator/beam/erl_printf_term.c b/erts/emulator/beam/erl_printf_term.c index 436147749e..d18760dc43 100644 --- a/erts/emulator/beam/erl_printf_term.c +++ b/erts/emulator/beam/erl_printf_term.c @@ -24,6 +24,7 @@ #include "erl_printf_term.h" #include "sys.h" #include "big.h" +#include "erl_map.h" #define PRINT_CHAR(CNT, FN, ARG, C) \ do { \ @@ -216,14 +217,15 @@ static int print_atom_name(fmtfn_t fn, void* arg, Eterm atom, long *dcount) } -#define PRT_BAR ((Eterm) 0) -#define PRT_COMMA ((Eterm) 1) -#define PRT_CLOSE_LIST ((Eterm) 2) -#define PRT_CLOSE_TUPLE ((Eterm) 3) -#define PRT_TERM ((Eterm) 4) -#define PRT_ONE_CONS ((Eterm) 5) -#define PRT_PATCH_FUN_SIZE ((Eterm) 6) -#define PRT_LAST_ARRAY_ELEMENT ((Eterm) 7) /* Note! Must be last... */ +#define PRT_BAR ((Eterm) 0) +#define PRT_COMMA ((Eterm) 1) +#define PRT_CLOSE_LIST ((Eterm) 2) +#define PRT_CLOSE_TUPLE ((Eterm) 3) +#define PRT_ASSOC ((Eterm) 4) +#define PRT_TERM ((Eterm) 5) +#define PRT_ONE_CONS ((Eterm) 6) +#define PRT_PATCH_FUN_SIZE ((Eterm) 7) +#define PRT_LAST_ARRAY_ELEMENT ((Eterm) 8) /* Note! Must be last... */ static int print_term(fmtfn_t fn, void* arg, Eterm obj, long *dcount, @@ -260,6 +262,9 @@ print_term(fmtfn_t fn, void* arg, Eterm obj, long *dcount, case PRT_CLOSE_TUPLE: PRINT_CHAR(res, fn, arg, '}'); goto L_outer_loop; + case PRT_ASSOC: + PRINT_STRING(res, fn, arg, "=>"); + goto L_outer_loop; default: popped.word = WSTACK_POP(s); @@ -483,6 +488,37 @@ print_term(fmtfn_t fn, void* arg, Eterm obj, long *dcount, PRINT_CHAR(res, fn, arg, '>'); } break; + case MAP_DEF: + { + Uint n; + Eterm *ks, *vs; + map_t *mp = (map_t *)map_val(wobj); + n = map_get_size(mp); + ks = map_get_keys(mp); + vs = map_get_values(mp); + + PRINT_CHAR(res, fn, arg, '#'); + PRINT_CHAR(res, fn, arg, '{'); + WSTACK_PUSH(s, PRT_CLOSE_TUPLE); + if (n > 0) { + n--; + WSTACK_PUSH(s, vs[n]); + WSTACK_PUSH(s, PRT_TERM); + WSTACK_PUSH(s, PRT_ASSOC); + WSTACK_PUSH(s, ks[n]); + WSTACK_PUSH(s, PRT_TERM); + + while (n--) { + WSTACK_PUSH(s, PRT_COMMA); + WSTACK_PUSH(s, vs[n]); + WSTACK_PUSH(s, PRT_TERM); + WSTACK_PUSH(s, PRT_ASSOC); + WSTACK_PUSH(s, ks[n]); + WSTACK_PUSH(s, PRT_TERM); + } + } + } + break; default: PRINT_STRING(res, fn, arg, "<unknown:"); PRINT_POINTER(res, fn, arg, wobj); diff --git a/erts/emulator/beam/erl_process.c b/erts/emulator/beam/erl_process.c index 1efd070afd..37e1d07107 100644 --- a/erts/emulator/beam/erl_process.c +++ b/erts/emulator/beam/erl_process.c @@ -1,7 +1,7 @@ /* * %CopyrightBegin% * - * Copyright Ericsson AB 1996-2013. All Rights Reserved. + * Copyright Ericsson AB 1996-2014. All Rights Reserved. * * The contents of this file are subject to the Erlang Public License, * Version 1.1, (the "License"); you may not use this file except in @@ -44,6 +44,7 @@ #include "dtrace-wrapper.h" #include "erl_ptab.h" + #define ERTS_DELAYED_WAKEUP_INFINITY (~(Uint64) 0) #define ERTS_DELAYED_WAKEUP_REDUCTIONS ((Uint64) CONTEXT_REDS/2) @@ -53,7 +54,11 @@ #define ERTS_PROC_MIN_CONTEXT_SWITCH_REDS_COST (CONTEXT_REDS/10) +#ifndef ERTS_SCHED_MIN_SPIN #define ERTS_SCHED_SPIN_UNTIL_YIELD 100 +#else +#define ERTS_SCHED_SPIN_UNTIL_YIELD 1 +#endif #define ERTS_SCHED_SYS_SLEEP_SPINCOUNT_VERY_LONG 40 #define ERTS_SCHED_AUX_WORK_SLEEP_SPINCOUNT_FACT_VERY_LONG 1000 @@ -144,7 +149,12 @@ extern BeamInstr beam_exit[]; extern BeamInstr beam_continue_exit[]; int erts_sched_compact_load; +int erts_sched_balance_util = 0; Uint erts_no_schedulers; +#ifdef ERTS_DIRTY_SCHEDULERS +Uint erts_no_dirty_cpu_schedulers; +Uint erts_no_dirty_io_schedulers; +#endif #define ERTS_THR_PRGR_LATER_CLEANUP_OP_THRESHOLD_VERY_LAZY (4*1024*1024) #define ERTS_THR_PRGR_LATER_CLEANUP_OP_THRESHOLD_LAZY (512*1024) @@ -182,6 +192,13 @@ static ErtsAuxWorkData *aux_thread_aux_work_data; #define ERTS_SCHDLR_SSPND_CHNG_SET(VAL, OLD_VAL) \ erts_smp_atomic32_set_nob(&schdlr_sspnd.changing, (VAL)) +#ifdef ERTS_DIRTY_SCHEDULERS +#define ERTS_SCHDLR_SSPND_DIRTY_CPU_CHNG_SET(VAL, OLD_VAL) \ + erts_smp_atomic32_set_nob(&schdlr_sspnd.dirty_cpu_changing, (VAL)) +#define ERTS_SCHDLR_SSPND_DIRTY_IO_CHNG_SET(VAL, OLD_VAL) \ + erts_smp_atomic32_set_nob(&schdlr_sspnd.dirty_io_changing, (VAL)) +#endif + #else #define ERTS_SCHDLR_SSPND_CHNG_SET(VAL, OLD_VAL) \ @@ -192,6 +209,23 @@ do { \ ASSERT(old_val__ == (OLD_VAL)); \ } while (0) +#ifdef ERTS_DIRTY_SCHEDULERS +#define ERTS_SCHDLR_SSPND_DIRTY_CPU_CHNG_SET(VAL, OLD_VAL) \ +do { \ + erts_aint32_t old_val__; \ + old_val__ = erts_smp_atomic32_xchg_nob(&schdlr_sspnd.dirty_cpu_changing, \ + (VAL)); \ + ASSERT(old_val__ == (OLD_VAL)); \ +} while (0) +#define ERTS_SCHDLR_SSPND_DIRTY_IO_CHNG_SET(VAL, OLD_VAL) \ +do { \ + erts_aint32_t old_val__; \ + old_val__ = erts_smp_atomic32_xchg_nob(&schdlr_sspnd.dirty_io_changing, \ + (VAL)); \ + ASSERT(old_val__ == (OLD_VAL)); \ +} while (0) +#endif + #endif @@ -201,11 +235,29 @@ static struct { int online; int curr_online; int wait_curr_online; +#ifdef ERTS_DIRTY_SCHEDULERS + int dirty_cpu_online; + int dirty_cpu_curr_online; + int dirty_cpu_wait_curr_online; + int dirty_io_online; + int dirty_io_curr_online; + int dirty_io_wait_curr_online; +#endif erts_smp_atomic32_t changing; erts_smp_atomic32_t active; +#ifdef ERTS_DIRTY_SCHEDULERS + erts_smp_atomic32_t dirty_cpu_changing; + erts_smp_atomic32_t dirty_cpu_active; + erts_smp_atomic32_t dirty_io_changing; + erts_smp_atomic32_t dirty_io_active; +#endif struct { int ongoing; long wait_active; +#ifdef ERTS_DIRTY_SCHEDULERS + long dirty_cpu_wait_active; + long dirty_io_wait_active; +#endif ErtsProcList *procs; } msb; /* Multi Scheduling Block */ } schdlr_sspnd; @@ -258,6 +310,10 @@ ErtsAlignedRunQueue *erts_aligned_run_queues; Uint erts_no_run_queues; ErtsAlignedSchedulerData *erts_aligned_scheduler_data; +#ifdef ERTS_DIRTY_SCHEDULERS +ErtsAlignedSchedulerData *erts_aligned_dirty_cpu_scheduler_data; +ErtsAlignedSchedulerData *erts_aligned_dirty_io_scheduler_data; +#endif typedef union { ErtsSchedulerSleepInfo ssi; @@ -265,6 +321,12 @@ typedef union { } ErtsAlignedSchedulerSleepInfo; static ErtsAlignedSchedulerSleepInfo *aligned_sched_sleep_info; +#ifdef ERTS_DIRTY_SCHEDULERS +#ifdef ERTS_SMP +static ErtsAlignedSchedulerSleepInfo *aligned_dirty_cpu_sched_sleep_info; +static ErtsAlignedSchedulerSleepInfo *aligned_dirty_io_sched_sleep_info; +#endif +#endif static Uint last_reductions; static Uint last_exact_reductions; @@ -283,6 +345,40 @@ struct erts_system_profile_flags_t erts_system_profile_flags; #error "Need to store process_count in another type" #endif +typedef enum { + ERTS_PSTT_GC, /* Garbage Collect */ + ERTS_PSTT_CPC /* Check Process Code */ +} ErtsProcSysTaskType; + +#define ERTS_MAX_PROC_SYS_TASK_ARGS 2 + +struct ErtsProcSysTask_ { + ErtsProcSysTask *next; + ErtsProcSysTask *prev; + ErtsProcSysTaskType type; + Eterm requester; + Eterm reply_tag; + Eterm req_id; + Uint req_id_sz; + Eterm arg[ERTS_MAX_PROC_SYS_TASK_ARGS]; + ErlOffHeap off_heap; + Eterm heap[1]; +}; + +#define ERTS_PROC_SYS_TASK_SIZE(HSz) \ + (sizeof(ErtsProcSysTask) - sizeof(Eterm) + sizeof(Eterm)*(HSz)) + +struct ErtsProcSysTaskQs_ { + int qmask; + int ncount; + ErtsProcSysTask *q[ERTS_NO_PROC_PRIO_LEVELS]; +}; + +ERTS_SCHED_PREF_QUICK_ALLOC_IMPL(proc_sys_task_queues, + ErtsProcSysTaskQs, + 50, + ERTS_ALC_T_PROC_SYS_TSK_QS) + ERTS_SCHED_PREF_QUICK_ALLOC_IMPL(misc_op_list, ErtsMiscOpList, 10, @@ -297,6 +393,16 @@ ERTS_SCHED_PREF_QUICK_ALLOC_IMPL(proclist, (ASSERT(-1 <= ((int) (IX)) \ && ((int) (IX)) < ((int) erts_no_schedulers)), \ &aligned_sched_sleep_info[(IX)].ssi) +#ifdef ERTS_DIRTY_SCHEDULERS +#define ERTS_DIRTY_CPU_SCHED_SLEEP_INFO_IX(IX) \ + (ASSERT(0 <= ((int) (IX)) \ + && ((int) (IX)) < ((int) erts_no_dirty_cpu_schedulers)), \ + &aligned_dirty_cpu_sched_sleep_info[(IX)].ssi) +#define ERTS_DIRTY_IO_SCHED_SLEEP_INFO_IX(IX) \ + (ASSERT(0 <= ((int) (IX)) \ + && ((int) (IX)) < ((int) erts_no_dirty_io_schedulers)), \ + &aligned_dirty_io_sched_sleep_info[(IX)].ssi) +#endif #define ERTS_FOREACH_RUNQ(RQVAR, DO) \ do { \ @@ -353,6 +459,14 @@ static void aux_work_timeout_early_init(int no_schedulers); static void aux_work_timeout_late_init(void); static void setup_aux_work_timer(void); +static int execute_sys_tasks(Process *c_p, + erts_aint32_t *statep, + int in_reds); +static int cleanup_sys_tasks(Process *c_p, + erts_aint32_t in_state, + int in_reds); + + #if defined(DEBUG) || 0 #define ERTS_DBG_CHK_AUX_WORK_VAL(V) dbg_chk_aux_work_val((V)) static void @@ -399,7 +513,7 @@ dbg_chk_aux_work_val(erts_aint32_t value) #ifdef ERTS_SMP static void handle_pending_exiters(ErtsProcList *); - +static void wake_scheduler(ErtsRunQueue *rq); #endif #if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_CHECK) @@ -439,7 +553,7 @@ void erts_pre_init_process(void) { #ifdef USE_THREADS - erts_tsd_key_create(&sched_data_key); + erts_tsd_key_create(&sched_data_key, "erts_sched_data_key"); #endif #ifdef ERTS_ENABLE_LOCK_CHECK @@ -471,6 +585,18 @@ erts_pre_init_process(void) erts_psd_required_locks[ERTS_PSD_CALL_TIME_BP].set_locks = ERTS_PSD_CALL_TIME_BP_SET_LOCKS; + erts_psd_required_locks[ERTS_PSD_DELAYED_GC_TASK_QS].get_locks + = ERTS_PSD_DELAYED_GC_TASK_QS_GET_LOCKS; + erts_psd_required_locks[ERTS_PSD_DELAYED_GC_TASK_QS].set_locks + = ERTS_PSD_DELAYED_GC_TASK_QS_SET_LOCKS; + +#ifdef ERTS_DIRTY_SCHEDULERS + erts_psd_required_locks[ERTS_PSD_DIRTY_SCHED_TRAP_EXPORT].get_locks + = ERTS_PSD_DIRTY_SCHED_TRAP_EXPORT_GET_LOCKS; + erts_psd_required_locks[ERTS_PSD_DIRTY_SCHED_TRAP_EXPORT].set_locks + = ERTS_PSD_DIRTY_SCHED_TRAP_EXPORT_SET_LOCKS; +#endif + /* Check that we have locks for all entries */ for (ix = 0; ix < ERTS_PSD_SIZE; ix++) { ERTS_SMP_LC_ASSERT(erts_psd_required_locks[ix].get_locks); @@ -561,6 +687,7 @@ erts_late_init_process(void) static void init_sched_wall_time(ErtsSchedWallTime *swtp) { + swtp->need = erts_sched_balance_util; swtp->enabled = 0; swtp->start = 0; swtp->working.total = 0; @@ -583,27 +710,253 @@ sched_wall_time_ts(void) #endif } +#if ERTS_HAVE_SCHED_UTIL_BALANCING_SUPPORT + +#ifdef ARCH_64 + +static ERTS_INLINE Uint64 +aschedtime_read(ErtsAtomicSchedTime *var) +{ + return (Uint64) erts_atomic_read_nob((erts_atomic_t *) var); +} + +static ERTS_INLINE void +aschedtime_set(ErtsAtomicSchedTime *var, Uint64 val) +{ + erts_atomic_set_nob((erts_atomic_t *) var, (erts_aint_t) val); +} + +static ERTS_INLINE void +aschedtime_init(ErtsAtomicSchedTime *var) +{ + erts_atomic_init_nob((erts_atomic_t *) var, (erts_aint_t) 0); +} + +#elif defined(ARCH_32) + +static ERTS_INLINE Uint64 +aschedtime_read(ErtsAtomicSchedTime *var) +{ + erts_dw_aint_t dw; + erts_dw_atomic_read_nob((erts_dw_atomic_t *) var, &dw); +#ifdef ETHR_SU_DW_NAINT_T__ + return (Uint64) dw.dw_sint; +#else + { + Uint64 res; + res = (Uint64) ((Uint32) dw.sint[ERTS_DW_AINT_HIGH_WORD]); + res <<= 32; + res |= (Uint64) ((Uint32) dw.sint[ERTS_DW_AINT_LOW_WORD]); + return res; + } +#endif +} + +static ERTS_INLINE void +aschedtime_set(ErtsAtomicSchedTime *var, Uint64 val) +{ + erts_dw_aint_t dw; +#ifdef ETHR_SU_DW_NAINT_T__ + dw.dw_sint = (ETHR_SU_DW_NAINT_T__) val; +#else + dw.sint[ERTS_DW_AINT_LOW_WORD] = (erts_aint_t) (val & 0xffffffff); + dw.sint[ERTS_DW_AINT_HIGH_WORD] = (erts_aint_t) ((val >> 32) & 0xffffffff); +#endif + erts_dw_atomic_set_nob((erts_dw_atomic_t *) var, &dw); +} + +static ERTS_INLINE void +aschedtime_init(ErtsAtomicSchedTime *var) +{ + erts_dw_aint_t dw; + dw.sint[ERTS_DW_AINT_LOW_WORD] = (erts_aint_t) 0; + dw.sint[ERTS_DW_AINT_HIGH_WORD] = (erts_aint_t) 0; + erts_dw_atomic_init_nob((erts_dw_atomic_t *) var, &dw); +} + +#else +# error :-/ +#endif + +#define ERTS_GET_AVG_MAX_UNLOCKED_TRY 50 +#define ERTS_SCHED_AVG_UTIL_WRITE_MARKER (~((Uint64) 0)) + +/* Intervals in nanoseconds */ +#define ERTS_SCHED_UTIL_SHORT_INTERVAL ((Uint64) 1*1000*1000*1000) +#define ERTS_SCHED_UTIL_LONG_INTERVAL ((Uint64) 10*1000*1000*1000) + + +#define ERTS_SCHED_UTIL_IGNORE_IMBALANCE_DIFF 5000 /* ppm */ + +static ERTS_INLINE Uint64 +calc_sched_worktime(int is_working, Uint64 now, Uint64 last, + Uint64 interval, Uint64 old_worktime) +{ + Uint64 worktime; + Uint64 new; + + if (now <= last) + return old_worktime; + + new = now - last; + + if (new >= interval) + return is_working ? interval : (Uint64) 0; + + + /* + * Division by 1000 in order to avoid + * overflow. If changed update assertions + * in init_runq_sched_util(). + */ + worktime = old_worktime; + worktime *= (interval - new)/1000; + worktime /= (interval/1000); + if (is_working) + worktime += new; + + ASSERT(0 <= worktime && worktime <= interval); + + return worktime; +} + +static ERTS_INLINE void +update_avg_sched_util(ErtsSchedulerData *esdp, Uint64 now, int is_working) +{ + ErtsRunQueue *rq; + int worked; + Uint64 swt, lwt, last; + + rq = esdp->run_queue; + last = aschedtime_read(&rq->sched_util.last); + + if (now <= last) { + ASSERT(last == ERTS_SCHED_AVG_UTIL_WRITE_MARKER); + return; + } + + ASSERT(now >= last); + + worked = rq->sched_util.is_working; + + swt = calc_sched_worktime(worked, now, last, ERTS_SCHED_UTIL_SHORT_INTERVAL, + rq->sched_util.worktime.short_interval); + lwt = calc_sched_worktime(worked, now, last, ERTS_SCHED_UTIL_LONG_INTERVAL, + rq->sched_util.worktime.long_interval); + + aschedtime_set(&rq->sched_util.last, ERTS_SCHED_AVG_UTIL_WRITE_MARKER); + ERTS_THR_WRITE_MEMORY_BARRIER; + rq->sched_util.is_working = is_working; + rq->sched_util.worktime.short_interval = swt; + rq->sched_util.worktime.long_interval = lwt; + ERTS_THR_WRITE_MEMORY_BARRIER; + aschedtime_set(&rq->sched_util.last, now); +} + +int +erts_get_sched_util(ErtsRunQueue *rq, int initially_locked, int short_interval) +{ + /* Average scheduler utilization in ppm */ + int util, is_working, try = 0, locked = initially_locked; + Uint64 worktime, old_worktime, now, last, interval, *old_worktimep; + + if (short_interval) { + old_worktimep = &rq->sched_util.worktime.short_interval; + interval = ERTS_SCHED_UTIL_SHORT_INTERVAL; + } + else { + old_worktimep = &rq->sched_util.worktime.long_interval; + interval = ERTS_SCHED_UTIL_LONG_INTERVAL; + } + + while (1) { + Uint64 chk_last; + last = aschedtime_read(&rq->sched_util.last); + ERTS_THR_READ_MEMORY_BARRIER; + is_working = rq->sched_util.is_working; + old_worktime = *old_worktimep; + ERTS_THR_READ_MEMORY_BARRIER; + chk_last = aschedtime_read(&rq->sched_util.last); + if (chk_last == last) + break; + if (!locked) { + if (++try >= ERTS_GET_AVG_MAX_UNLOCKED_TRY) { + /* Writer will eventually block on runq-lock */ + erts_smp_runq_lock(rq); + locked = 1; + } + } + } + + if (!initially_locked && locked) + erts_smp_runq_unlock(rq); + + now = sched_wall_time_ts(); + worktime = calc_sched_worktime(is_working, now, last, interval, old_worktime); + + util = (int) ((worktime * 1000000)/interval); + + ASSERT(0 <= util && util <= 1000000); + + return util; +} + +static void +init_runq_sched_util(ErtsRunQueueSchedUtil *rqsu, int enabled) +{ + aschedtime_init(&rqsu->last); + if (!enabled) + aschedtime_set(&rqsu->last, ERTS_SCHED_AVG_UTIL_WRITE_MARKER); + rqsu->is_working = 0; + rqsu->worktime.short_interval = (Uint64) 0; + rqsu->worktime.long_interval = (Uint64) 0; + +#ifdef DEBUG + { + Uint64 intrvl; + /* + * If one of these asserts fail we may have + * overflow in calc_sched_worktime(). Which + * have to be fixed either by shrinking + * interval size, or fix calculation of + * worktime in calc_sched_worktime(). + */ + intrvl = ERTS_SCHED_UTIL_SHORT_INTERVAL; + ASSERT(intrvl*(intrvl/1000) > intrvl); + intrvl = ERTS_SCHED_UTIL_LONG_INTERVAL; + ASSERT(intrvl*(intrvl/1000) > intrvl); + } +#endif +} + +#endif /* ERTS_HAVE_SCHED_UTIL_BALANCING_SUPPORT */ + static ERTS_INLINE void sched_wall_time_change(ErtsSchedulerData *esdp, int working) { - if (esdp->sched_wall_time.enabled) { + if (esdp->sched_wall_time.need) { Uint64 ts = sched_wall_time_ts(); - if (working) { +#if ERTS_HAVE_SCHED_UTIL_BALANCING_SUPPORT + update_avg_sched_util(esdp, ts, working); +#endif + if (esdp->sched_wall_time.enabled) { + if (working) { #ifdef DEBUG - ASSERT(!esdp->sched_wall_time.working.currently); - esdp->sched_wall_time.working.currently = 1; + ASSERT(!esdp->sched_wall_time.working.currently); + esdp->sched_wall_time.working.currently = 1; #endif - ts -= esdp->sched_wall_time.start; - esdp->sched_wall_time.working.start = ts; - } - else { + ts -= esdp->sched_wall_time.start; + esdp->sched_wall_time.working.start = ts; + } + else { #ifdef DEBUG - ASSERT(esdp->sched_wall_time.working.currently); - esdp->sched_wall_time.working.currently = 0; + ASSERT(esdp->sched_wall_time.working.currently); + esdp->sched_wall_time.working.currently = 0; #endif - ts -= esdp->sched_wall_time.start; - ts -= esdp->sched_wall_time.working.start; - esdp->sched_wall_time.working.total += ts; + ts -= esdp->sched_wall_time.start; + ts -= esdp->sched_wall_time.working.start; + esdp->sched_wall_time.working.total += ts; + } } } } @@ -656,12 +1009,17 @@ reply_sched_wall_time(void *vswtrp) ErlHeapFragment *bp = NULL; ASSERT(esdp); - +#ifdef ERTS_DIRTY_SCHEDULERS + ASSERT(!ERTS_SCHEDULER_IS_DIRTY(esdp)); +#endif if (swtrp->set) { - if (!swtrp->enable && esdp->sched_wall_time.enabled) + if (!swtrp->enable && esdp->sched_wall_time.enabled) { + esdp->sched_wall_time.need = erts_sched_balance_util; esdp->sched_wall_time.enabled = 0; + } else if (swtrp->enable && !esdp->sched_wall_time.enabled) { Uint64 ts = sched_wall_time_ts(); + esdp->sched_wall_time.need = 1; esdp->sched_wall_time.enabled = 1; esdp->sched_wall_time.start = ts; esdp->sched_wall_time.working.total = 0; @@ -737,6 +1095,9 @@ erts_sched_wall_time_request(Process *c_p, int set, int enable) if (!set && !esdp->sched_wall_time.enabled) return THE_NON_VALUE; +#ifdef ERTS_DIRTY_SCHEDULERS + ASSERT(!ERTS_SCHEDULER_IS_DIRTY(esdp)); +#endif swtrp = swtreq_alloc(); ref = erts_make_ref(c_p); @@ -991,6 +1352,9 @@ static ERTS_INLINE void haw_thr_prgr_current_check_progress(ErtsAuxWorkData *awdp) { ErtsThrPrgrVal current = awdp->current_thr_prgr; +#ifdef ERTS_DIRTY_SCHEDULERS + ASSERT(!awdp->esdp || !ERTS_SCHEDULER_IS_DIRTY(awdp->esdp)); +#endif if (current != ERTS_THR_PRGR_INVALID && !erts_thr_progress_equal(current, erts_thr_progress_current())) { /* @@ -1007,6 +1371,10 @@ handle_delayed_aux_work_wakeup(ErtsAuxWorkData *awdp, erts_aint32_t aux_work, in { int jix, max_jix; +#ifdef ERTS_DIRTY_SCHEDULERS + ASSERT(!awdp->esdp || !ERTS_SCHEDULER_IS_DIRTY(awdp->esdp)); +#endif + ASSERT(awdp->delayed_wakeup.next != ERTS_DELAYED_WAKEUP_INFINITY); if (!waiting && awdp->delayed_wakeup.next > awdp->esdp->reductions) @@ -1162,6 +1530,9 @@ handle_misc_aux_work_thr_prgr(ErtsAuxWorkData *awdp, erts_aint32_t aux_work, int waiting) { +#ifdef ERTS_DIRTY_SCHEDULERS + ASSERT(!awdp->esdp || !ERTS_SCHEDULER_IS_DIRTY(awdp->esdp)); +#endif if (!erts_thr_progress_has_reached_this(haw_thr_prgr_current(awdp), awdp->misc.thr_prgr)) return aux_work & ~ERTS_SSI_AUX_WORK_MISC_THR_PRGR; @@ -1214,6 +1585,9 @@ erts_schedule_multi_misc_aux_work(int ignore_self, if (ignore_self) { ErtsSchedulerData *esdp = erts_get_scheduler_data(); +#ifdef ERTS_DIRTY_SCHEDULERS + ASSERT(!ERTS_SCHEDULER_IS_DIRTY(esdp)); +#endif if (esdp) self = (int) esdp->no; } @@ -1243,6 +1617,9 @@ handle_async_ready(ErtsAuxWorkData *awdp, int waiting) { ErtsSchedulerSleepInfo *ssi = awdp->ssi; +#ifdef ERTS_DIRTY_SCHEDULERS + ASSERT(!awdp->esdp || !ERTS_SCHEDULER_IS_DIRTY(awdp->esdp)); +#endif unset_aux_work_flags(ssi, ERTS_SSI_AUX_WORK_ASYNC_READY); if (erts_check_async_ready(awdp->async_ready.queue)) { if (set_aux_work_flags(ssi, ERTS_SSI_AUX_WORK_ASYNC_READY) @@ -1267,6 +1644,9 @@ handle_async_ready_clean(ErtsAuxWorkData *awdp, { void *thr_prgr_p; +#ifdef ERTS_DIRTY_SCHEDULERS + ASSERT(!awdp->esdp || !ERTS_SCHEDULER_IS_DIRTY(awdp->esdp)); +#endif #ifdef ERTS_SMP if (awdp->async_ready.need_thr_prgr && !erts_thr_progress_has_reached_this(haw_thr_prgr_current(awdp), @@ -1304,6 +1684,9 @@ handle_fix_alloc(ErtsAuxWorkData *awdp, erts_aint32_t aux_work, int waiting) ErtsSchedulerSleepInfo *ssi = awdp->ssi; erts_aint32_t res; +#ifdef ERTS_DIRTY_SCHEDULERS + ASSERT(!awdp->esdp || !ERTS_SCHEDULER_IS_DIRTY(awdp->esdp)); +#endif unset_aux_work_flags(ssi, (ERTS_SSI_AUX_WORK_FIX_ALLOC_LOWER_LIM | ERTS_SSI_AUX_WORK_FIX_ALLOC_DEALLOC)); aux_work &= ~(ERTS_SSI_AUX_WORK_FIX_ALLOC_LOWER_LIM @@ -1323,7 +1706,7 @@ void erts_alloc_notify_delayed_dealloc(int ix) { ErtsSchedulerData *esdp = erts_get_scheduler_data(); - if (esdp) + if (esdp && !ERTS_SCHEDULER_IS_DIRTY(esdp)) schedule_aux_work_wakeup(&esdp->aux_work_data, ix, ERTS_SSI_AUX_WORK_DD); @@ -1337,7 +1720,7 @@ erts_alloc_ensure_handle_delayed_dealloc_call(int ix) { #ifdef DEBUG ErtsSchedulerData *esdp = erts_get_scheduler_data(); - ASSERT(!esdp || ix == (int) esdp->no); + ASSERT(!esdp || (ERTS_SCHEDULER_IS_DIRTY(esdp) || ix == (int) esdp->no)); #endif set_aux_work_flags_wakeup_nob(ERTS_SCHED_SLEEP_INFO_IX(ix-1), ERTS_SSI_AUX_WORK_DD); @@ -1351,6 +1734,9 @@ handle_delayed_dealloc(ErtsAuxWorkData *awdp, erts_aint32_t aux_work, int waitin ErtsThrPrgrVal wakeup = ERTS_THR_PRGR_INVALID; int more_work = 0; +#ifdef ERTS_DIRTY_SCHEDULERS + ASSERT(!awdp->esdp || !ERTS_SCHEDULER_IS_DIRTY(awdp->esdp)); +#endif unset_aux_work_flags(ssi, ERTS_SSI_AUX_WORK_DD); erts_alloc_scheduler_handle_delayed_dealloc((void *) awdp->esdp, &need_thr_progress, @@ -1390,6 +1776,9 @@ handle_delayed_dealloc_thr_prgr(ErtsAuxWorkData *awdp, erts_aint32_t aux_work, i ErtsThrPrgrVal wakeup = ERTS_THR_PRGR_INVALID; ErtsThrPrgrVal current = haw_thr_prgr_current(awdp); +#ifdef ERTS_DIRTY_SCHEDULERS + ASSERT(!awdp->esdp || !ERTS_SCHEDULER_IS_DIRTY(awdp->esdp)); +#endif if (!erts_thr_progress_has_reached_this(current, awdp->dd.thr_prgr)) return aux_work & ~ERTS_SSI_AUX_WORK_DD_THR_PRGR; @@ -1437,6 +1826,9 @@ handle_thr_prgr_later_op(ErtsAuxWorkData *awdp, erts_aint32_t aux_work, int wait int lops; ErtsThrPrgrVal current = haw_thr_prgr_current(awdp); +#ifdef ERTS_DIRTY_SCHEDULERS + ASSERT(!awdp->esdp || !ERTS_SCHEDULER_IS_DIRTY(awdp->esdp)); +#endif for (lops = 0; lops < ERTS_MAX_THR_PRGR_LATER_OPS; lops++) { ErtsThrPrgrLaterOp *lop = awdp->later_op.first; if (!erts_thr_progress_has_reached_this(current, lop->later)) @@ -1595,6 +1987,14 @@ erts_smp_notify_check_children_needed(void) for (i = 0; i < erts_no_schedulers; i++) set_aux_work_flags_wakeup_nob(ERTS_SCHED_SLEEP_INFO_IX(i), ERTS_SSI_AUX_WORK_CHECK_CHILDREN); +#ifdef ERTS_DIRTY_SCHEDULERS + for (i = 0; i < erts_no_dirty_cpu_schedulers; i++) + set_aux_work_flags_wakeup_nob(ERTS_DIRTY_CPU_SCHED_SLEEP_INFO_IX(i), + ERTS_SSI_AUX_WORK_CHECK_CHILDREN); + for (i = 0; i < erts_no_dirty_io_schedulers; i++) + set_aux_work_flags_wakeup_nob(ERTS_DIRTY_IO_SCHED_SLEEP_INFO_IX(i), + ERTS_SSI_AUX_WORK_CHECK_CHILDREN); +#endif } static ERTS_INLINE erts_aint32_t @@ -1942,6 +2342,9 @@ static ERTS_INLINE void sched_active_sys(Uint no, ErtsRunQueue *rq) { ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(rq)); +#ifdef ERTS_DIRTY_SCHEDULERS + ASSERT(!ERTS_RUNQ_IX_IS_DIRTY(rq->ix)); +#endif ASSERT(rq->waiting < 0); rq->waiting *= -1; rq->waiting--; @@ -1977,14 +2380,24 @@ try_set_sys_scheduling(void) #endif static ERTS_INLINE int -prepare_for_sys_schedule(void) +prepare_for_sys_schedule(ErtsSchedulerData *esdp) { #ifdef ERTS_SMP while (!erts_port_task_have_outstanding_io_tasks() && try_set_sys_scheduling()) { - if (!erts_port_task_have_outstanding_io_tasks()) - return 1; +#ifdef ERTS_SCHED_ONLY_POLL_SCHED_1 + if (esdp->no != 1) { + /* If we are not scheduler 1 and ERTS_SCHED_ONLY_POLL_SCHED_1 is used + then we make sure to wake scheduler 1 */ + ErtsRunQueue *rq = ERTS_RUNQ_IX(0); clear_sys_scheduling(); + wake_scheduler(rq); + return 0; + } +#endif + if (!erts_port_task_have_outstanding_io_tasks()) + return 1; + clear_sys_scheduling(); } return 0; #else @@ -1998,6 +2411,9 @@ static ERTS_INLINE void sched_change_waiting_sys_to_waiting(Uint no, ErtsRunQueue *rq) { ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(rq)); +#ifdef ERTS_DIRTY_SCHEDULERS + ASSERT(!ERTS_RUNQ_IX_IS_DIRTY(rq->ix)); +#endif ASSERT(rq->waiting < 0); rq->waiting *= -1; } @@ -2013,7 +2429,7 @@ sched_waiting(Uint no, ErtsRunQueue *rq) else rq->waiting++; rq->woken = 0; - if (erts_system_profile_flags.scheduler) + if (!ERTS_RUNQ_IX_IS_DIRTY(rq->ix) && erts_system_profile_flags.scheduler) profile_scheduler(make_small(no), am_inactive); } @@ -2025,7 +2441,7 @@ sched_active(Uint no, ErtsRunQueue *rq) rq->waiting++; else rq->waiting--; - if (erts_system_profile_flags.scheduler) + if (!ERTS_RUNQ_IX_IS_DIRTY(rq->ix) && erts_system_profile_flags.scheduler) profile_scheduler(make_small(no), am_active); } @@ -2037,10 +2453,9 @@ ongoing_multi_scheduling_block(void) } static ERTS_INLINE void -empty_runq(ErtsRunQueue *rq) +empty_runq_aux(ErtsRunQueue *rq, Uint32 old_flags) { - Uint32 old_flags = ERTS_RUNQ_FLGS_UNSET(rq, ERTS_RUNQ_FLG_NONEMPTY|ERTS_RUNQ_FLG_PROTECTED); - if (old_flags & ERTS_RUNQ_FLG_NONEMPTY) { + if (!ERTS_RUNQ_IX_IS_DIRTY(rq->ix) && old_flags & ERTS_RUNQ_FLG_NONEMPTY) { #ifdef DEBUG erts_aint32_t empty = erts_smp_atomic32_read_nob(&no_empty_run_queues); /* @@ -2060,10 +2475,27 @@ empty_runq(ErtsRunQueue *rq) } static ERTS_INLINE void +empty_runq(ErtsRunQueue *rq) +{ + Uint32 old_flags = ERTS_RUNQ_FLGS_UNSET(rq, ERTS_RUNQ_FLG_NONEMPTY|ERTS_RUNQ_FLG_PROTECTED); + empty_runq_aux(rq, old_flags); +} + +static ERTS_INLINE Uint32 +empty_protected_runq(ErtsRunQueue *rq) +{ + Uint32 old_flags = ERTS_RUNQ_FLGS_BSET(rq, + ERTS_RUNQ_FLG_NONEMPTY|ERTS_RUNQ_FLG_PROTECTED, + ERTS_RUNQ_FLG_PROTECTED); + empty_runq_aux(rq, old_flags); + return old_flags; +} + +static ERTS_INLINE void non_empty_runq(ErtsRunQueue *rq) { Uint32 old_flags = ERTS_RUNQ_FLGS_SET(rq, ERTS_RUNQ_FLG_NONEMPTY); - if (!(old_flags & ERTS_RUNQ_FLG_NONEMPTY)) { + if (!ERTS_RUNQ_IX_IS_DIRTY(rq->ix) && (!(old_flags & ERTS_RUNQ_FLG_NONEMPTY))) { #ifdef DEBUG erts_aint32_t empty = erts_smp_atomic32_read_nob(&no_empty_run_queues); /* @@ -2083,6 +2515,18 @@ non_empty_runq(ErtsRunQueue *rq) } } +void +erts_empty_runq(ErtsRunQueue *rq) +{ + empty_runq(rq); +} + +void +erts_non_empty_runq(ErtsRunQueue *rq) +{ + non_empty_runq(rq); +} + static erts_aint32_t sched_prep_spin_wait(ErtsSchedulerSleepInfo *ssi) { @@ -2259,6 +2703,8 @@ aux_thread(void *unused) erts_thr_progress_active(NULL, thr_prgr_active = 0); erts_thr_progress_prepare_wait(NULL); + ERTS_SCHED_FAIR_YIELD(); + flgs = sched_spin_wait(ssi, 0); if (flgs & ERTS_SSI_FLG_SLEEPING) { @@ -2296,18 +2742,37 @@ scheduler_wait(int *fcalls, ErtsSchedulerData *esdp, ErtsRunQueue *rq) ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(rq)); +#ifdef ERTS_DIRTY_SCHEDULERS + if (ERTS_RUNQ_IX_IS_DIRTY(rq->ix)) + erts_smp_spin_lock(&rq->sleepers.lock); +#endif flgs = sched_prep_spin_wait(ssi); if (flgs & ERTS_SSI_FLG_SUSPENDED) { /* Go suspend instead... */ +#ifdef ERTS_DIRTY_SCHEDULERS + if (ERTS_RUNQ_IX_IS_DIRTY(rq->ix)) + erts_smp_spin_unlock(&rq->sleepers.lock); +#endif return; } +#ifdef ERTS_DIRTY_SCHEDULERS + if (ERTS_RUNQ_IX_IS_DIRTY(rq->ix)) { + ssi->prev = NULL; + ssi->next = rq->sleepers.list; + if (rq->sleepers.list) + rq->sleepers.list->prev = ssi; + rq->sleepers.list = ssi; + erts_smp_spin_unlock(&rq->sleepers.lock); + } +#endif + /* * If all schedulers are waiting, one of them *should* * be waiting in erl_sys_schedule() */ - if (!prepare_for_sys_schedule()) { + if (ERTS_SCHEDULER_IS_DIRTY(esdp) || !prepare_for_sys_schedule(esdp)) { sched_waiting(esdp->no, rq); @@ -2317,30 +2782,35 @@ scheduler_wait(int *fcalls, ErtsSchedulerData *esdp, ErtsRunQueue *rq) tse_wait: - if (thr_prgr_active != working) + if (!ERTS_SCHEDULER_IS_DIRTY(esdp) && thr_prgr_active != working) sched_wall_time_change(esdp, thr_prgr_active); while (1) { aux_work = erts_atomic32_read_acqb(&ssi->aux_work); if (aux_work) { - if (!thr_prgr_active) { + if (!ERTS_SCHEDULER_IS_DIRTY(esdp) && !thr_prgr_active) { erts_thr_progress_active(esdp, thr_prgr_active = 1); sched_wall_time_change(esdp, 1); } aux_work = handle_aux_work(&esdp->aux_work_data, aux_work, 1); - if (aux_work && erts_thr_progress_update(esdp)) + if (aux_work && !ERTS_SCHEDULER_IS_DIRTY(esdp) + && erts_thr_progress_update(esdp)) erts_thr_progress_leader_update(esdp); } if (aux_work) flgs = erts_smp_atomic32_read_acqb(&ssi->flags); else { - if (thr_prgr_active) { - erts_thr_progress_active(esdp, thr_prgr_active = 0); - sched_wall_time_change(esdp, 0); + if (!ERTS_SCHEDULER_IS_DIRTY(esdp)) { + if (thr_prgr_active) { + erts_thr_progress_active(esdp, thr_prgr_active = 0); + sched_wall_time_change(esdp, 0); + } + erts_thr_progress_prepare_wait(esdp); } - erts_thr_progress_prepare_wait(esdp); + + ERTS_SCHED_FAIR_YIELD(); flgs = sched_spin_wait(ssi, spincount); if (flgs & ERTS_SSI_FLG_SLEEPING) { @@ -2355,7 +2825,8 @@ scheduler_wait(int *fcalls, ErtsSchedulerData *esdp, ErtsRunQueue *rq) } while (res == EINTR); } } - erts_thr_progress_finalize_wait(esdp); + if (!ERTS_SCHEDULER_IS_DIRTY(esdp)) + erts_thr_progress_finalize_wait(esdp); } if (!(flgs & ERTS_SSI_FLG_WAITING)) { @@ -2376,7 +2847,7 @@ scheduler_wait(int *fcalls, ErtsSchedulerData *esdp, ErtsRunQueue *rq) if (flgs & ~ERTS_SSI_FLG_SUSPENDED) erts_smp_atomic32_read_band_nob(&ssi->flags, ERTS_SSI_FLG_SUSPENDED); - if (!thr_prgr_active) { + if (!ERTS_SCHEDULER_IS_DIRTY(esdp) && !thr_prgr_active) { erts_thr_progress_active(esdp, thr_prgr_active = 1); sched_wall_time_change(esdp, 1); } @@ -2393,6 +2864,13 @@ scheduler_wait(int *fcalls, ErtsSchedulerData *esdp, ErtsRunQueue *rq) erts_smp_atomic32_set_relb(&function_calls, 0); *fcalls = 0; +#ifdef ERTS_DIRTY_SCHEDULERS + ASSERT(!ERTS_SCHEDULER_IS_DIRTY(esdp)); +#endif + +#ifdef ERTS_SCHED_ONLY_POLL_SCHED_1 + ASSERT(esdp->no == 1); +#endif sched_waiting_sys(esdp->no, rq); @@ -2413,7 +2891,6 @@ scheduler_wait(int *fcalls, ErtsSchedulerData *esdp, ErtsRunQueue *rq) sched_wall_time_change(esdp, working = 0); ASSERT(!erts_port_task_have_outstanding_io_tasks()); - erl_sys_schedule(1); /* Might give us something to do */ dt = erts_do_time_read_and_reset(); @@ -2459,7 +2936,7 @@ scheduler_wait(int *fcalls, ErtsSchedulerData *esdp, ErtsRunQueue *rq) * Got to check that we still got I/O tasks; otherwise * we have to continue checking for I/O... */ - if (!prepare_for_sys_schedule()) { + if (!prepare_for_sys_schedule(esdp)) { spincount *= ERTS_SCHED_TSE_SLEEP_SPINCOUNT_FACT; goto tse_wait; } @@ -2481,7 +2958,7 @@ scheduler_wait(int *fcalls, ErtsSchedulerData *esdp, ErtsRunQueue *rq) * Got to check that we still got I/O tasks; otherwise * we have to wait in erl_sys_schedule() after all... */ - if (!prepare_for_sys_schedule()) { + if (!prepare_for_sys_schedule(esdp)) { /* * Not allowed to wait in erl_sys_schedule; * do tse wait instead... @@ -2585,7 +3062,7 @@ ssi_flags_set_wake(ErtsSchedulerSleepInfo *ssi) } static void -wake_scheduler(ErtsRunQueue *rq, int incq) +wake_scheduler(ErtsRunQueue *rq) { ErtsSchedulerSleepInfo *ssi; erts_aint32_t flgs; @@ -2604,10 +3081,53 @@ wake_scheduler(ErtsRunQueue *rq, int incq) flgs = ssi_flags_set_wake(ssi); erts_sched_finish_poke(ssi, flgs); +} + +#ifdef ERTS_DIRTY_SCHEDULERS +static void +wake_dirty_schedulers(ErtsRunQueue *rq, int one) +{ + ErtsSchedulerSleepInfo *ssi; + ErtsSchedulerSleepList *sl; + + ASSERT(ERTS_RUNQ_IX_IS_DIRTY(rq->ix)); + + sl = &rq->sleepers; + erts_smp_spin_lock(&sl->lock); + ssi = sl->list; + if (!ssi) { + erts_smp_spin_unlock(&sl->lock); + if (one) + wake_scheduler(rq); + } else if (one) { + erts_aint32_t flgs; + if (ssi->prev) + ssi->prev->next = ssi->next; + else { + ASSERT(sl->list == ssi); + sl->list = ssi->next; + } + if (ssi->next) + ssi->next->prev = ssi->prev; + + erts_smp_spin_unlock(&sl->lock); + + ERTS_THR_MEMORY_BARRIER; + flgs = ssi_flags_set_wake(ssi); + erts_sched_finish_poke(ssi, flgs); + } else { + sl->list = NULL; + erts_smp_spin_unlock(&sl->lock); - if (incq && (flgs & ERTS_SSI_FLG_WAITING)) - non_empty_runq(rq); + ERTS_THR_MEMORY_BARRIER; + do { + ErtsSchedulerSleepInfo *wake_ssi = ssi; + ssi = ssi->next; + erts_sched_finish_poke(wake_ssi, ssi_flags_set_wake(wake_ssi)); + } while (ssi); + } } +#endif #define ERTS_NO_USED_RUNQS_SHIFT 16 #define ERTS_NO_RUNQS_MASK 0xffff @@ -2697,7 +3217,7 @@ chk_wake_sched(ErtsRunQueue *crq, int ix, int activate) if (try_inc_no_active_runqs(ix+1)) (void) ERTS_RUNQ_FLGS_UNSET(wrq, ERTS_RUNQ_FLG_INACTIVE); } - wake_scheduler(wrq, 0); + wake_scheduler(wrq); return 1; } return 0; @@ -2744,8 +3264,14 @@ static ERTS_INLINE void smp_notify_inc_runq(ErtsRunQueue *runq) { #ifdef ERTS_SMP - if (runq) - wake_scheduler(runq, 1); + if (runq) { +#ifdef ERTS_DIRTY_SCHEDULERS + if (ERTS_RUNQ_IX_IS_DIRTY(runq->ix)) + wake_dirty_schedulers(runq, 1); + else +#endif + wake_scheduler(runq); + } #endif } @@ -2763,7 +3289,7 @@ erts_sched_notify_check_cpu_bind(void) for (ix = 0; ix < erts_no_run_queues; ix++) { ErtsRunQueue *rq = ERTS_RUNQ_IX(ix); (void) ERTS_RUNQ_FLGS_SET(rq, ERTS_RUNQ_FLG_CHK_CPU_BIND); - wake_scheduler(rq, 0); + wake_scheduler(rq); } #else erts_sched_check_cpu_bind(erts_get_scheduler_data()); @@ -2848,7 +3374,7 @@ dequeue_process(ErtsRunQueue *runq, int prio_q, erts_aint32_t *statep) if (statep) *statep = state; - prio = (int) (ERTS_PSFLG_PRIO_MASK & state); + prio = (int) ERTS_PSFLGS_GET_PRQ_PRIO(state); rqi = &runq->procs.prio_info[prio]; @@ -2891,6 +3417,11 @@ check_immigration_need(ErtsRunQueue *c_rq, ErtsMigrationPath *mp, int prio) if (!f_rq) return NULL; +#if ERTS_HAVE_SCHED_UTIL_BALANCING_SUPPORT + if (mp->sched_util) + return NULL; +#endif + f_rq_flags = ERTS_RUNQ_FLGS_GET(f_rq); if (f_rq_flags & ERTS_RUNQ_FLG_PROTECTED) return NULL; @@ -2997,7 +3528,7 @@ immigrate(ErtsRunQueue *c_rq, ErtsMigrationPath *mp) erts_aint32_t state; state = erts_smp_atomic32_read_acqb(&proc->state); if (!(ERTS_PSFLG_BOUND & state) - && (prio == (int) (ERTS_PSFLG_PRIO_MASK & state))) { + && (prio == (int) ERTS_PSFLGS_GET_PRQ_PRIO(state))) { ErtsRunQueueInfo *rqi = &rq->procs.prio_info[prio]; unqueue_process(rq, rpq, rqi, prio, prev_proc, proc); erts_smp_runq_unlock(rq); @@ -3030,10 +3561,11 @@ suspend_run_queue(ErtsRunQueue *rq) ERTS_SSI_FLG_SUSPENDED); (void) ERTS_RUNQ_FLGS_SET(rq, ERTS_RUNQ_FLG_SUSPENDED); - wake_scheduler(rq, 0); + wake_scheduler(rq); } static void scheduler_ix_resume_wake(Uint ix); +static void scheduler_ssi_resume_wake(ErtsSchedulerSleepInfo *ssi); static ERTS_INLINE void resume_run_queue(ErtsRunQueue *rq) @@ -3060,7 +3592,10 @@ resume_run_queue(ErtsRunQueue *rq) erts_smp_runq_unlock(rq); - scheduler_ix_resume_wake(rq->ix); +#ifdef ERTS_DIRTY_SCHEDULERS + if (!ERTS_RUNQ_IX_IS_DIRTY(rq->ix)) +#endif + scheduler_ix_resume_wake(rq->ix); } typedef struct { @@ -3079,7 +3614,7 @@ schedule_bound_processes(ErtsRunQueue *rq, while (proc) { erts_aint32_t state = erts_smp_atomic32_read_acqb(&proc->state); next = proc->next; - enqueue_process(rq, (int) (ERTS_PSFLG_PRIO_MASK & state), proc); + enqueue_process(rq, (int) ERTS_PSFLGS_GET_PRQ_PRIO(state), proc); proc = next; } } @@ -3091,20 +3626,28 @@ evacuate_run_queue(ErtsRunQueue *rq, int prio_q; ErtsRunQueue *to_rq; ErtsMigrationPaths *mps; - ErtsMigrationPath *mp; + ErtsMigrationPath *mp = NULL; ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(rq)); (void) ERTS_RUNQ_FLGS_UNSET(rq, ERTS_RUNQ_FLG_PROTECTED); - mps = erts_get_migration_paths_managed(); - mp = &mps->mpath[rq->ix]; +#ifdef ERTS_DIRTY_SCHEDULERS + if (!ERTS_RUNQ_IX_IS_DIRTY(rq->ix)) +#endif + { + mps = erts_get_migration_paths_managed(); + mp = &mps->mpath[rq->ix]; + } /* Evacuate scheduled misc ops */ if (rq->misc.start) { ErtsMiscOpList *start, *end; +#ifdef ERTS_DIRTY_SCHEDULERS + ASSERT(!ERTS_RUNQ_IX_IS_DIRTY(rq->ix)); +#endif to_rq = mp->misc_evac_runq; if (!to_rq) return; @@ -3122,6 +3665,9 @@ evacuate_run_queue(ErtsRunQueue *rq, to_rq->misc.start = start; to_rq->misc.end = end; + + non_empty_runq(to_rq); + erts_smp_runq_unlock(to_rq); smp_notify_inc_runq(to_rq); erts_smp_runq_lock(to_rq); @@ -3130,6 +3676,9 @@ evacuate_run_queue(ErtsRunQueue *rq, if (rq->ports.start) { Port *prt; +#ifdef ERTS_DIRTY_SCHEDULERS + ASSERT(!ERTS_RUNQ_IX_IS_DIRTY(rq->ix)); +#endif to_rq = mp->prio[ERTS_PORT_PRIO_LEVEL].runq; if (!to_rq) return; @@ -3165,15 +3714,26 @@ evacuate_run_queue(ErtsRunQueue *rq, erts_aint32_t state; Process *proc; int notify = 0; +#ifdef ERTS_DIRTY_SCHEDULERS + int requeue; +#endif to_rq = NULL; - if (!mp->prio[prio_q].runq) - return; - if (prio_q == PRIORITY_NORMAL && !mp->prio[PRIORITY_LOW].runq) - return; +#ifdef ERTS_DIRTY_SCHEDULERS + if (!ERTS_RUNQ_IX_IS_DIRTY(rq->ix)) +#endif + { + if (!mp->prio[prio_q].runq) + return; + if (prio_q == PRIORITY_NORMAL && !mp->prio[PRIORITY_LOW].runq) + return; + } proc = dequeue_process(rq, prio_q, &state); while (proc) { +#ifdef ERTS_DIRTY_SCHEDULERS + requeue = 1; +#endif if (ERTS_PSFLG_BOUND & state) { /* Bound processes get stuck here... */ proc->next = NULL; @@ -3182,12 +3742,43 @@ evacuate_run_queue(ErtsRunQueue *rq, else sbpp->first = proc; sbpp->last = proc; +#ifdef ERTS_DIRTY_SCHEDULERS + requeue = 0; +#endif + } +#ifdef ERTS_DIRTY_SCHEDULERS + else if (state & ERTS_PSFLG_DIRTY_CPU_PROC_IN_Q) { + erts_aint32_t old; + old = erts_smp_atomic32_read_band_nob(&proc->state, + ~(ERTS_PSFLG_DIRTY_CPU_PROC + | ERTS_PSFLG_DIRTY_CPU_PROC_IN_Q)); + /* assert that no other dirty flags are set */ + ASSERT(!(old & (ERTS_PSFLG_DIRTY_IO_PROC|ERTS_PSFLG_DIRTY_IO_PROC_IN_Q))); + } else if (state & ERTS_PSFLG_DIRTY_IO_PROC_IN_Q) { + erts_aint32_t old; + old = erts_smp_atomic32_read_band_nob(&proc->state, + ~(ERTS_PSFLG_DIRTY_IO_PROC + | ERTS_PSFLG_DIRTY_IO_PROC_IN_Q)); + /* assert that no other dirty flags are set */ + ASSERT(!(old & (ERTS_PSFLG_DIRTY_CPU_PROC|ERTS_PSFLG_DIRTY_CPU_PROC_IN_Q))); } + if (requeue) { +#else else { - int prio = (int) (ERTS_PSFLG_PRIO_MASK & state); +#endif + int prio = (int) ERTS_PSFLGS_GET_PRQ_PRIO(state); erts_smp_runq_unlock(rq); - to_rq = mp->prio[prio].runq; +#ifdef ERTS_DIRTY_SCHEDULERS + if (ERTS_RUNQ_IX_IS_DIRTY(rq->ix)) + /* + * dirty run queues evacuate only to run + * queue 0 during multi-scheduling blocking + */ + to_rq = ERTS_RUNQ_IX(0); + else +#endif + to_rq = mp->prio[prio].runq; RUNQ_SET_RQ(&proc->run_queue, to_rq); erts_smp_runq_lock(to_rq); @@ -3257,7 +3848,7 @@ try_steal_task_from_victim(ErtsRunQueue *rq, int *rq_lockedp, ErtsRunQueue *vrq, erts_aint32_t state = erts_smp_atomic32_read_acqb(&proc->state); if (!(ERTS_PSFLG_BOUND & state)) { /* Steal process */ - int prio = (int) (ERTS_PSFLG_PRIO_MASK & state); + int prio = (int) ERTS_PSFLGS_GET_PRQ_PRIO(state); ErtsRunQueueInfo *rqi = &vrq->procs.prio_info[prio]; unqueue_process(vrq, rpq, rqi, prio, prev_proc, proc); erts_smp_runq_unlock(vrq); @@ -3334,7 +3925,7 @@ try_steal_task(ErtsRunQueue *rq) Uint32 flags; /* Protect jobs we steal from getting stolen from us... */ - flags = ERTS_RUNQ_FLGS_SET(rq, ERTS_RUNQ_FLG_PROTECTED); + flags = empty_protected_runq(rq); if (flags & ERTS_RUNQ_FLG_SUSPENDED) return 0; /* go suspend instead... */ @@ -3413,6 +4004,9 @@ typedef struct { int full_reds_history_change; int oowc; int max_len; +#if ERTS_HAVE_SCHED_UTIL_BALANCING_SUPPORT + int sched_util; +#endif } ErtsRunQueueBalance; static ErtsRunQueueBalance *run_queue_info; @@ -3576,6 +4170,9 @@ check_balance(ErtsRunQueue *c_rq) Sint64 scheds_reds, full_scheds_reds; int forced, active, current_active, oowc, half_full_scheds, full_scheds, mmax_len, blnc_no_rqs, qix, pix, freds_hist_ix; +#if ERTS_HAVE_SCHED_UTIL_BALANCING_SUPPORT + int sched_util_balancing; +#endif if (erts_smp_atomic32_xchg_nob(&balance_info.checking_balance, 1)) { c_rq->check_balance_reds = INT_MAX; @@ -3631,6 +4228,10 @@ check_balance(ErtsRunQueue *c_rq) return; } +#if ERTS_HAVE_SCHED_UTIL_BALANCING_SUPPORT + sched_util_balancing = 0; +#endif + freds_hist_ix = balance_info.full_reds_history_index; balance_info.full_reds_history_index++; if (balance_info.full_reds_history_index >= ERTS_FULL_REDS_HISTORY_SIZE) @@ -3661,7 +4262,12 @@ check_balance(ErtsRunQueue *c_rq) run_queue_info[qix].oowc = rq->out_of_work_count; run_queue_info[qix].max_len = rq->max_len; rq->check_balance_reds = INT_MAX; - + +#if ERTS_HAVE_SCHED_UTIL_BALANCING_SUPPORT + if (erts_sched_balance_util) + run_queue_info[qix].sched_util = erts_get_sched_util(rq, 1, 0); +#endif + erts_smp_runq_unlock(rq); } @@ -3731,8 +4337,38 @@ check_balance(ErtsRunQueue *c_rq) mmax_len = run_queue_info[qix].max_len; } - if (!erts_sched_compact_load) + if (!erts_sched_compact_load) { +#if ERTS_HAVE_SCHED_UTIL_BALANCING_SUPPORT + if (erts_sched_balance_util && full_scheds < blnc_no_rqs) { + int avg_util = 0; + + for (qix = 0; qix < blnc_no_rqs; qix++) + avg_util += run_queue_info[qix].sched_util; + + avg_util /= blnc_no_rqs; /* in ppm */ + + sched_util_balancing = 1; + /* + * In order to avoid renaming a large amount of fields + * we write utilization values instead of lenght values + * in the 'max_len' and 'migration_limit' fields... + */ + for (qix = 0; qix < blnc_no_rqs; qix++) { + run_queue_info[qix].flags = 0; /* Reset for later use... */ + for (pix = 0; pix < ERTS_NO_PRIO_LEVELS; pix++) { + run_queue_info[qix].prio[pix].emigrate_to = -1; + run_queue_info[qix].prio[pix].immigrate_from = -1; + run_queue_info[qix].prio[pix].avail = 100; + run_queue_info[qix].prio[pix].max_len = run_queue_info[qix].sched_util; + run_queue_info[qix].prio[pix].migration_limit = avg_util; + } + } + active = blnc_no_rqs; + goto setup_migration_paths; + } +#endif goto all_active; + } if (!forced && half_full_scheds != blnc_no_rqs) { int min = 1; @@ -3849,15 +4485,30 @@ check_balance(ErtsRunQueue *c_rq) } } +#if ERTS_HAVE_SCHED_UTIL_BALANCING_SUPPORT + setup_migration_paths: +#endif + /* Setup migration paths for all priorities */ for (pix = 0; pix < ERTS_NO_PRIO_LEVELS; pix++) { int low = 0, high = 0; for (qix = 0; qix < blnc_no_rqs; qix++) { int len_diff = run_queue_info[qix].prio[pix].max_len; len_diff -= run_queue_info[qix].prio[pix].migration_limit; + #ifdef DBG_PRINT if (pix == 2) erts_fprintf(stderr, "%d ", len_diff); #endif + +#if ERTS_HAVE_SCHED_UTIL_BALANCING_SUPPORT + if (sched_util_balancing + && -ERTS_SCHED_UTIL_IGNORE_IMBALANCE_DIFF <= len_diff + && len_diff <= ERTS_SCHED_UTIL_IGNORE_IMBALANCE_DIFF) { + /* ignore minor imbalance */ + len_diff = 0; + } +#endif + run_queue_compare[qix].qix = qix; run_queue_compare[qix].len = len_diff; if (len_diff != 0) { @@ -3984,6 +4635,9 @@ erts_fprintf(stderr, "--------------------------------\n"); Uint32 flags = run_queue_info[qix].flags; ErtsMigrationPath *mp = &new_mpaths->mpath[qix]; +#if ERTS_HAVE_SCHED_UTIL_BALANCING_SUPPORT + mp->sched_util = sched_util_balancing; +#endif mp->flags = flags; mp->misc_evac_runq = NULL; @@ -4238,13 +4892,20 @@ wakeup_other_check(ErtsRunQueue *rq, Uint32 flags) rq->wakeup_other += (left_len*wo_reds + ERTS_WAKEUP_OTHER_FIXED_INC); if (rq->wakeup_other > wakeup_other.limit) { - int empty_rqs = - erts_smp_atomic32_read_acqb(&no_empty_run_queues); - if (flags & ERTS_RUNQ_FLG_PROTECTED) - (void) ERTS_RUNQ_FLGS_UNSET(rq, ERTS_RUNQ_FLG_PROTECTED); - if (empty_rqs != 0) - wake_scheduler_on_empty_runq(rq); - rq->wakeup_other = 0; +#ifdef ERTS_DIRTY_SCHEDULERS + if (ERTS_RUNQ_IX_IS_DIRTY(rq->ix) && rq->waiting) + wake_dirty_schedulers(rq, 1); + else +#endif + { + int empty_rqs = + erts_smp_atomic32_read_acqb(&no_empty_run_queues); + if (flags & ERTS_RUNQ_FLG_PROTECTED) + (void) ERTS_RUNQ_FLGS_UNSET(rq, ERTS_RUNQ_FLG_PROTECTED); + if (empty_rqs != 0) + wake_scheduler_on_empty_runq(rq); + rq->wakeup_other = 0; + } } } rq->wakeup_other_reds = 0; @@ -4405,11 +5066,17 @@ erts_early_init_scheduling(int no_schedulers) wakeup_other.threshold = ERTS_SCHED_WAKEUP_OTHER_THRESHOLD_MEDIUM; wakeup_other.type = ERTS_SCHED_WAKEUP_OTHER_TYPE_DEFAULT; #endif +#ifndef ERTS_SCHED_MIN_SPIN sched_busy_wait.sys_schedule = ERTS_SCHED_SYS_SLEEP_SPINCOUNT_MEDIUM; sched_busy_wait.tse = (ERTS_SCHED_SYS_SLEEP_SPINCOUNT_MEDIUM * ERTS_SCHED_TSE_SLEEP_SPINCOUNT_FACT); sched_busy_wait.aux_work = (ERTS_SCHED_SYS_SLEEP_SPINCOUNT_MEDIUM * ERTS_SCHED_AUX_WORK_SLEEP_SPINCOUNT_FACT_MEDIUM); +#else + sched_busy_wait.sys_schedule = ERTS_SCHED_SYS_SLEEP_SPINCOUNT_NONE; + sched_busy_wait.tse = ERTS_SCHED_SYS_SLEEP_SPINCOUNT_NONE; + sched_busy_wait.aux_work = ERTS_SCHED_SYS_SLEEP_SPINCOUNT_NONE; +#endif } int @@ -4525,7 +5192,14 @@ erts_sched_set_wake_cleanup_threshold(char *str) static void init_aux_work_data(ErtsAuxWorkData *awdp, ErtsSchedulerData *esdp, char *dawwp) { - awdp->sched_id = esdp ? (int) esdp->no : 0; + if (!esdp) + awdp->sched_id = 0; +#ifdef ERTS_DIRTY_SCHEDULERS + else if (ERTS_SCHEDULER_IS_DIRTY(esdp)) + awdp->sched_id = (int) ERTS_DIRTY_SCHEDULER_NO(esdp); +#endif + else + awdp->sched_id = (int) esdp->no; awdp->esdp = esdp; awdp->ssi = esdp ? esdp->ssi : NULL; #ifdef ERTS_SMP @@ -4565,41 +5239,122 @@ init_aux_work_data(ErtsAuxWorkData *awdp, ErtsSchedulerData *esdp, char *dawwp) #endif } +static void +init_scheduler_data(ErtsSchedulerData* esdp, int num, + ErtsSchedulerSleepInfo* ssi, + ErtsRunQueue* runq, + char** daww_ptr, size_t daww_sz) +{ +#ifdef ERTS_SMP + erts_bits_init_state(&esdp->erl_bits_state); + esdp->match_pseudo_process = NULL; + esdp->free_process = NULL; +#endif + esdp->x_reg_array = + erts_alloc_permanent_cache_aligned(ERTS_ALC_T_BEAM_REGISTER, + ERTS_X_REGS_ALLOCATED * + sizeof(Eterm)); + esdp->f_reg_array = + erts_alloc_permanent_cache_aligned(ERTS_ALC_T_BEAM_REGISTER, + MAX_REG * sizeof(FloatDef)); +#if !HEAP_ON_C_STACK + esdp->num_tmp_heap_used = 0; +#endif +#ifdef ERTS_DIRTY_SCHEDULERS + if (ERTS_RUNQ_IX_IS_DIRTY(runq->ix)) { + esdp->no = 0; + ERTS_DIRTY_SCHEDULER_NO(esdp) = (Uint) num; + } + else { + esdp->no = (Uint) num; + ERTS_DIRTY_SCHEDULER_NO(esdp) = 0; + } +#else + esdp->no = (Uint) num; +#endif + esdp->ssi = ssi; + esdp->current_process = NULL; + esdp->current_port = NULL; + + esdp->virtual_reds = 0; + esdp->cpu_id = -1; + + erts_init_atom_cache_map(&esdp->atom_cache_map); + + esdp->run_queue = runq; + esdp->run_queue->scheduler = esdp; + + if (daww_ptr) { + init_aux_work_data(&esdp->aux_work_data, esdp, *daww_ptr); +#ifdef ERTS_SMP + *daww_ptr += daww_sz; +#endif + } + + esdp->reductions = 0; + + init_sched_wall_time(&esdp->sched_wall_time); + erts_port_task_handle_init(&esdp->nosuspend_port_task_handle); +} + void -erts_init_scheduling(int no_schedulers, int no_schedulers_online) +erts_init_scheduling(int no_schedulers, int no_schedulers_online +#ifdef ERTS_DIRTY_SCHEDULERS + , int no_dirty_cpu_schedulers, int no_dirty_cpu_schedulers_online, + int no_dirty_io_schedulers +#endif + ) { int ix, n, no_ssi; char *daww_ptr; -#ifdef ERTS_SMP size_t daww_sz; -#endif + size_t size_runqs; init_misc_op_list_alloc(); + init_proc_sys_task_queues_alloc(); #ifdef ERTS_SMP set_wakeup_other_data(); #endif +#if ERTS_HAVE_SCHED_UTIL_BALANCING_SUPPORT + if (erts_sched_balance_util) + erts_sched_compact_load = 0; +#endif + ASSERT(no_schedulers_online <= no_schedulers); ASSERT(no_schedulers_online >= 1); ASSERT(no_schedulers >= 1); +#ifdef ERTS_DIRTY_SCHEDULERS + ASSERT(no_dirty_cpu_schedulers <= no_schedulers); + ASSERT(no_dirty_cpu_schedulers >= 1); + ASSERT(no_dirty_cpu_schedulers_online <= no_schedulers_online); + ASSERT(no_dirty_cpu_schedulers_online >= 1); +#endif /* Create and initialize run queues */ n = no_schedulers; - - erts_aligned_run_queues = - erts_alloc_permanent_cache_aligned(ERTS_ALC_T_RUNQS, - sizeof(ErtsAlignedRunQueue) * n); + size_runqs = sizeof(ErtsAlignedRunQueue) * (n + ERTS_NUM_DIRTY_RUNQS); + erts_aligned_run_queues = + erts_alloc_permanent_cache_aligned(ERTS_ALC_T_RUNQS, size_runqs); #ifdef ERTS_SMP +#ifdef ERTS_DIRTY_SCHEDULERS + erts_aligned_run_queues += ERTS_NUM_DIRTY_RUNQS; +#endif erts_smp_atomic32_init_nob(&no_empty_run_queues, 0); #endif erts_no_run_queues = n; - for (ix = 0; ix < n; ix++) { + for (ix = -(ERTS_NUM_DIRTY_RUNQS); ix < n; ix++) { int pix, rix; +#ifdef ERTS_DIRTY_SCHEDULERS + ErtsRunQueue *rq = ERTS_RUNQ_IX_IS_DIRTY(ix) ? + ERTS_DIRTY_RUNQ_IX(ix) : ERTS_RUNQ_IX(ix); +#else ErtsRunQueue *rq = ERTS_RUNQ_IX(ix); +#endif rq->ix = ix; @@ -4610,6 +5365,14 @@ erts_init_scheduling(int no_schedulers, int no_schedulers_online) erts_smp_mtx_init_x(&rq->mtx, "run_queue", make_small(ix + 1)); erts_smp_cnd_init(&rq->cnd); +#ifdef ERTS_DIRTY_SCHEDULERS +#ifdef ERTS_SMP + if (ERTS_RUNQ_IX_IS_DIRTY(ix)) + erts_smp_spinlock_init(&rq->sleepers.lock, "dirty_run_queue_sleep_list"); + rq->sleepers.list = NULL; +#endif +#endif + rq->waiting = 0; rq->woken = 0; ERTS_RUNQ_FLGS_INIT(rq, ERTS_RUNQ_FLG_NONEMPTY); @@ -4648,6 +5411,11 @@ erts_init_scheduling(int no_schedulers, int no_schedulers_online) rq->ports.info.reds = 0; rq->ports.start = NULL; rq->ports.end = NULL; + +#if ERTS_HAVE_SCHED_UTIL_BALANCING_SUPPORT + init_runq_sched_util(&rq->sched_util, erts_sched_balance_util); +#endif + } #ifdef ERTS_SMP @@ -4665,6 +5433,10 @@ erts_init_scheduling(int no_schedulers, int no_schedulers_online) n = (int) no_schedulers; erts_no_schedulers = n; +#ifdef ERTS_DIRTY_SCHEDULERS + erts_no_dirty_cpu_schedulers = no_dirty_cpu_schedulers; + erts_no_dirty_io_schedulers = no_dirty_io_schedulers; +#endif /* Create and initialize scheduler sleep info */ #ifdef ERTS_SMP @@ -4691,6 +5463,29 @@ erts_init_scheduling(int no_schedulers, int no_schedulers_online) #ifdef ERTS_SMP aligned_sched_sleep_info++; + +#ifdef ERTS_DIRTY_SCHEDULERS + aligned_dirty_cpu_sched_sleep_info = + erts_alloc_permanent_cache_aligned( + ERTS_ALC_T_SCHDLR_SLP_INFO, + no_dirty_cpu_schedulers*sizeof(ErtsAlignedSchedulerSleepInfo)); + for (ix = 0; ix < no_dirty_cpu_schedulers; ix++) { + ErtsSchedulerSleepInfo *ssi = &aligned_dirty_cpu_sched_sleep_info[ix].ssi; + erts_smp_atomic32_init_nob(&ssi->flags, 0); + ssi->event = NULL; /* initialized in sched_dirty_cpu_thread_func */ + erts_atomic32_init_nob(&ssi->aux_work, 0); + } + aligned_dirty_io_sched_sleep_info = + erts_alloc_permanent_cache_aligned( + ERTS_ALC_T_SCHDLR_SLP_INFO, + no_dirty_io_schedulers*sizeof(ErtsAlignedSchedulerSleepInfo)); + for (ix = 0; ix < no_dirty_io_schedulers; ix++) { + ErtsSchedulerSleepInfo *ssi = &aligned_dirty_io_sched_sleep_info[ix].ssi; + erts_smp_atomic32_init_nob(&ssi->flags, 0); + ssi->event = NULL; /* initialized in sched_dirty_io_thread_func */ + erts_atomic32_init_nob(&ssi->aux_work, 0); + } +#endif #endif /* Create and initialize scheduler specific data */ @@ -4701,6 +5496,7 @@ erts_init_scheduling(int no_schedulers, int no_schedulers_online) daww_ptr = erts_alloc_permanent_cache_aligned(ERTS_ALC_T_SCHDLR_DATA, daww_sz*n); #else + daww_sz = 0; daww_ptr = NULL; #endif @@ -4710,45 +5506,32 @@ erts_init_scheduling(int no_schedulers, int no_schedulers_online) for (ix = 0; ix < n; ix++) { ErtsSchedulerData *esdp = ERTS_SCHEDULER_IX(ix); -#ifdef ERTS_SMP - erts_bits_init_state(&esdp->erl_bits_state); - esdp->match_pseudo_process = NULL; - esdp->free_process = NULL; -#endif - esdp->x_reg_array = - erts_alloc_permanent_cache_aligned(ERTS_ALC_T_BEAM_REGISTER, - ERTS_X_REGS_ALLOCATED * - sizeof(Eterm)); - esdp->f_reg_array = - erts_alloc_permanent_cache_aligned(ERTS_ALC_T_BEAM_REGISTER, - MAX_REG * sizeof(FloatDef)); -#if !HEAP_ON_C_STACK - esdp->num_tmp_heap_used = 0; -#endif - esdp->no = (Uint) ix+1; - esdp->ssi = ERTS_SCHED_SLEEP_INFO_IX(ix); - esdp->current_process = NULL; - esdp->current_port = NULL; - - esdp->virtual_reds = 0; - esdp->cpu_id = -1; - - erts_init_atom_cache_map(&esdp->atom_cache_map); - - esdp->run_queue = ERTS_RUNQ_IX(ix); - esdp->run_queue->scheduler = esdp; + init_scheduler_data(esdp, ix+1, ERTS_SCHED_SLEEP_INFO_IX(ix), + ERTS_RUNQ_IX(ix), &daww_ptr, daww_sz); + } - init_aux_work_data(&esdp->aux_work_data, esdp, daww_ptr); +#ifdef ERTS_DIRTY_SCHEDULERS #ifdef ERTS_SMP - daww_ptr += daww_sz; -#endif - - esdp->reductions = 0; - - init_sched_wall_time(&esdp->sched_wall_time); - erts_port_task_handle_init(&esdp->nosuspend_port_task_handle); - + erts_aligned_dirty_cpu_scheduler_data = + erts_alloc_permanent_cache_aligned( + ERTS_ALC_T_SCHDLR_DATA, + no_dirty_cpu_schedulers*sizeof(ErtsAlignedSchedulerData)); + for (ix = 0; ix < no_dirty_cpu_schedulers; ix++) { + ErtsSchedulerData *esdp = ERTS_DIRTY_CPU_SCHEDULER_IX(ix); + init_scheduler_data(esdp, ix+1, ERTS_DIRTY_CPU_SCHED_SLEEP_INFO_IX(ix), + ERTS_DIRTY_CPU_RUNQ, NULL, 0); + } + erts_aligned_dirty_io_scheduler_data = + erts_alloc_permanent_cache_aligned( + ERTS_ALC_T_SCHDLR_DATA, + no_dirty_io_schedulers*sizeof(ErtsAlignedSchedulerData)); + for (ix = 0; ix < no_dirty_io_schedulers; ix++) { + ErtsSchedulerData *esdp = ERTS_DIRTY_IO_SCHEDULER_IX(ix); + init_scheduler_data(esdp, ix+1, ERTS_DIRTY_IO_SCHED_SLEEP_INFO_IX(ix), + ERTS_DIRTY_IO_RUNQ, NULL, 0); } +#endif +#endif init_misc_aux_work(); #if !HALFWORD_HEAP @@ -4772,6 +5555,16 @@ erts_init_scheduling(int no_schedulers, int no_schedulers_online) schdlr_sspnd.curr_online = no_schedulers; schdlr_sspnd.msb.ongoing = 0; erts_smp_atomic32_init_nob(&schdlr_sspnd.active, no_schedulers); +#ifdef ERTS_DIRTY_SCHEDULERS + erts_smp_atomic32_init_nob(&schdlr_sspnd.dirty_cpu_changing, 0); + schdlr_sspnd.dirty_cpu_online = no_dirty_cpu_schedulers_online; + schdlr_sspnd.dirty_cpu_curr_online = no_dirty_cpu_schedulers; + erts_smp_atomic32_init_nob(&schdlr_sspnd.dirty_cpu_active, no_dirty_cpu_schedulers); + erts_smp_atomic32_init_nob(&schdlr_sspnd.dirty_io_changing, 0); + schdlr_sspnd.dirty_io_online = no_dirty_io_schedulers; + schdlr_sspnd.dirty_io_curr_online = no_dirty_io_schedulers; + erts_smp_atomic32_init_nob(&schdlr_sspnd.dirty_io_active, no_dirty_io_schedulers); +#endif schdlr_sspnd.msb.procs = NULL; init_no_runqs(no_schedulers_online, no_schedulers_online); balance_info.last_active_runqs = no_schedulers; @@ -4797,6 +5590,21 @@ erts_init_scheduling(int no_schedulers, int no_schedulers_online) schdlr_sspnd.curr_online *= 2; /* Boot strapping... */ ERTS_SCHDLR_SSPND_CHNG_SET((ERTS_SCHDLR_SSPND_CHNG_ONLN | ERTS_SCHDLR_SSPND_CHNG_WAITER), 0); +#ifdef ERTS_DIRTY_SCHEDULERS + schdlr_sspnd.dirty_cpu_wait_curr_online = no_dirty_cpu_schedulers_online; + schdlr_sspnd.dirty_cpu_curr_online *= 2; + ERTS_SCHDLR_SSPND_DIRTY_CPU_CHNG_SET((ERTS_SCHDLR_SSPND_CHNG_ONLN + | ERTS_SCHDLR_SSPND_CHNG_WAITER), 0); + for (ix = no_dirty_cpu_schedulers_online; ix < no_dirty_cpu_schedulers; ix++) { + ErtsSchedulerData* esdp = ERTS_DIRTY_CPU_SCHEDULER_IX(ix); + erts_smp_atomic32_read_bor_nob(&esdp->ssi->flags, ERTS_SSI_FLG_SUSPENDED); + } + + schdlr_sspnd.dirty_io_wait_curr_online = no_dirty_io_schedulers; + schdlr_sspnd.dirty_io_curr_online *= 2; + ERTS_SCHDLR_SSPND_DIRTY_IO_CHNG_SET((ERTS_SCHDLR_SSPND_CHNG_ONLN + | ERTS_SCHDLR_SSPND_CHNG_WAITER), 0); +#endif erts_smp_atomic32_init_nob(&doing_sys_schedule, 0); @@ -4812,6 +5620,10 @@ erts_init_scheduling(int no_schedulers, int no_schedulers_online) #endif } erts_no_schedulers = 1; +#ifdef ERTS_DIRTY_SCHEDULERS + erts_no_dirty_cpu_schedulers = 0; + erts_no_dirty_io_schedulers = 0; +#endif #endif erts_smp_atomic32_init_nob(&function_calls, 0); @@ -4858,76 +5670,293 @@ erts_get_scheduler_data(void) #endif +static Process * +make_proxy_proc(Process *prev_proxy, Process *proc, erts_aint32_t prio) +{ + erts_aint32_t state; + Process *proxy; +#ifdef ERTS_SMP + ErtsRunQueue *rq = RUNQ_READ_RQ(&proc->run_queue); +#endif + + state = (ERTS_PSFLG_PROXY + | ERTS_PSFLG_IN_RUNQ + | (((erts_aint32_t) 1) << (prio + ERTS_PSFLGS_IN_PRQ_MASK_OFFSET)) + | (prio << ERTS_PSFLGS_PRQ_PRIO_OFFSET) + | (prio << ERTS_PSFLGS_USR_PRIO_OFFSET) + | (prio << ERTS_PSFLGS_ACT_PRIO_OFFSET)); + + if (prev_proxy) { + proxy = prev_proxy; + ASSERT(erts_smp_atomic32_read_nob(&proxy->state) & ERTS_PSFLG_PROXY); + erts_smp_atomic32_set_nob(&proxy->state, state); +#ifdef ERTS_SMP + RUNQ_SET_RQ(&proc->run_queue, rq); +#endif + } + else { + proxy = erts_alloc(ERTS_ALC_T_PROC, sizeof(Process)); +#ifdef DEBUG + { + int i; + Uint32 *ui32 = (Uint32 *) (char *) proxy; + for (i = 0; i < sizeof(Process)/sizeof(Uint32); i++) + ui32[i] = (Uint32) 0xdeadbeef; + } +#endif + erts_smp_atomic32_init_nob(&proxy->state, state); +#ifdef ERTS_SMP + erts_smp_atomic_init_nob(&proxy->run_queue, + erts_smp_atomic_read_nob(&proc->run_queue)); +#endif + } + + proxy->common.id = proc->common.id; + + return proxy; +} + +static ERTS_INLINE void +free_proxy_proc(Process *proxy) +{ + ASSERT(erts_smp_atomic32_read_nob(&proxy->state) & ERTS_PSFLG_PROXY); + erts_free(ERTS_ALC_T_PROC, proxy); +} + +#define ERTS_ENQUEUE_NOT 0 +#define ERTS_ENQUEUE_NORMAL_QUEUE 1 +#ifdef ERTS_DIRTY_SCHEDULERS +#define ERTS_ENQUEUE_DIRTY_CPU_QUEUE 2 +#define ERTS_ENQUEUE_DIRTY_IO_QUEUE 3 +#endif + +static ERTS_INLINE int +check_enqueue_in_prio_queue(Process *c_p, + erts_aint32_t *prq_prio_p, + erts_aint32_t *newp, + erts_aint32_t actual) +{ + erts_aint32_t aprio, qbit, max_qbit; + + aprio = (actual >> ERTS_PSFLGS_ACT_PRIO_OFFSET) & ERTS_PSFLGS_PRIO_MASK; + qbit = 1 << aprio; + + *prq_prio_p = aprio; + +#ifdef ERTS_DIRTY_SCHEDULERS + if (actual & (ERTS_PSFLG_DIRTY_CPU_PROC|ERTS_PSFLG_DIRTY_IO_PROC)) { + /* + * If we have system tasks of a priority higher + * or equal to the user priority, we enqueue + * on ordinary run-queue and take care of + * those system tasks first. + */ + if (actual & ERTS_PSFLG_ACTIVE_SYS) { + erts_aint32_t uprio, stprio, qmask; + uprio = (actual >> ERTS_PSFLGS_USR_PRIO_OFFSET) & ERTS_PSFLGS_PRIO_MASK; + if (aprio < uprio) + goto enqueue_normal_runq; /* system tasks with higher prio */ + erts_smp_proc_lock(c_p, ERTS_PROC_LOCK_STATUS); + qmask = c_p->sys_task_qs->qmask; + erts_smp_proc_unlock(c_p, ERTS_PROC_LOCK_STATUS); + switch (qmask & -qmask) { + case MAX_BIT: + stprio = PRIORITY_MAX; + break; + case HIGH_BIT: + stprio = PRIORITY_HIGH; + break; + case NORMAL_BIT: + stprio = PRIORITY_NORMAL; + break; + case LOW_BIT: + stprio = PRIORITY_LOW; + break; + default: + stprio = PRIORITY_LOW+1; + break; + } + if (stprio <= uprio) + goto enqueue_normal_runq; /* system tasks with higher prio */ + } + + /* Enqueue in dirty run queue if not already enqueued */ + if (actual & (ERTS_PSFLG_DIRTY_CPU_PROC_IN_Q|ERTS_PSFLG_DIRTY_IO_PROC_IN_Q)) + return ERTS_ENQUEUE_NOT; /* already in queue */ + if (actual & ERTS_PSFLG_DIRTY_CPU_PROC) { + *newp |= ERTS_PSFLG_DIRTY_CPU_PROC_IN_Q; + if (actual & ERTS_PSFLG_IN_RUNQ) + return -ERTS_ENQUEUE_DIRTY_CPU_QUEUE; /* use proxy */ + *newp |= ERTS_PSFLG_IN_RUNQ; + return ERTS_ENQUEUE_DIRTY_CPU_QUEUE; + } + *newp |= ERTS_PSFLG_DIRTY_IO_PROC_IN_Q; + if (actual & ERTS_PSFLG_IN_RUNQ) + return -ERTS_ENQUEUE_DIRTY_IO_QUEUE; /* use proxy */ + *newp |= ERTS_PSFLG_IN_RUNQ; + return ERTS_ENQUEUE_DIRTY_IO_QUEUE; + } + + enqueue_normal_runq: +#endif + max_qbit = (actual >> ERTS_PSFLGS_IN_PRQ_MASK_OFFSET) & ERTS_PSFLGS_QMASK; + max_qbit |= 1 << ERTS_PSFLGS_QMASK_BITS; + max_qbit &= -max_qbit; + /* + * max_qbit now either contain bit set for highest prio queue or a bit + * out of range (which will have a value larger than valid range). + */ + + if (qbit >= max_qbit) + return ERTS_ENQUEUE_NOT; /* Already queued in higher or equal prio */ + + /* Need to enqueue (if already enqueued, it is in lower prio) */ + *newp |= qbit << ERTS_PSFLGS_IN_PRQ_MASK_OFFSET; + + if ((actual & (ERTS_PSFLG_IN_RUNQ|ERTS_PSFLGS_USR_PRIO_MASK)) + != (aprio << ERTS_PSFLGS_USR_PRIO_OFFSET)) { + /* + * Process struct already enqueued, or actual prio not + * equal to user prio, i.e., enqueue using proxy. + */ + return -ERTS_ENQUEUE_NORMAL_QUEUE; + } + + /* + * Enqueue using process struct. + */ + *newp &= ~ERTS_PSFLGS_PRQ_PRIO_MASK; + *newp |= ERTS_PSFLG_IN_RUNQ | (aprio << ERTS_PSFLGS_PRQ_PRIO_OFFSET); + return ERTS_ENQUEUE_NORMAL_QUEUE; +} + /* - * scheduler_out_process() return with c_rq locked. + * schedule_out_process() return with c_rq locked. */ static ERTS_INLINE int -schedule_out_process(ErtsRunQueue *c_rq, erts_aint32_t state, Process *p) +schedule_out_process(ErtsRunQueue *c_rq, erts_aint32_t state, Process *p, Process *proxy) { - erts_aint32_t a, e, n; - int res = 0; + erts_aint32_t a, e, n, enq_prio = -1; + int enqueue; /* < 0 -> use proxy */ + Process* sched_p; + ErtsRunQueue* runq; +#ifdef ERTS_SMP + int check_emigration_need; +#endif a = state; while (1) { n = e = a; - ASSERT(a & ERTS_PSFLG_RUNNING); - ASSERT(!(a & ERTS_PSFLG_IN_RUNQ)); + ASSERT(a & (ERTS_PSFLG_RUNNING|ERTS_PSFLG_RUNNING_SYS)); + + enqueue = ERTS_ENQUEUE_NOT; - n &= ~ERTS_PSFLG_RUNNING; - if ((a & (ERTS_PSFLG_ACTIVE|ERTS_PSFLG_SUSPENDED)) == ERTS_PSFLG_ACTIVE) - n |= ERTS_PSFLG_IN_RUNQ; + n &= ~(ERTS_PSFLG_RUNNING|ERTS_PSFLG_RUNNING_SYS); + if (a & ERTS_PSFLG_ACTIVE_SYS + || (a & (ERTS_PSFLG_ACTIVE|ERTS_PSFLG_SUSPENDED)) == ERTS_PSFLG_ACTIVE) { + enqueue = check_enqueue_in_prio_queue(p, &enq_prio, &n, a); + } a = erts_smp_atomic32_cmpxchg_mb(&p->state, n, e); if (a == e) break; } - if (!(n & ERTS_PSFLG_IN_RUNQ)) { - if (erts_system_profile_flags.runnable_procs) - profile_runnable_proc(p, am_inactive); + switch (enqueue) { + case ERTS_ENQUEUE_NOT: + if (erts_system_profile_flags.runnable_procs) { + + if (!(a & ERTS_PSFLG_ACTIVE_SYS) + && (!(a & ERTS_PSFLG_ACTIVE) + || (a & ERTS_PSFLG_SUSPENDED))) { + /* Process inactive */ + profile_runnable_proc(p, am_inactive); + } + } + + if (proxy) + free_proxy_proc(proxy); + + erts_smp_runq_lock(c_rq); + return 0; + +#ifdef ERTS_DIRTY_SCHEDULERS +#ifdef ERTS_SMP + case ERTS_ENQUEUE_DIRTY_CPU_QUEUE: + case -ERTS_ENQUEUE_DIRTY_CPU_QUEUE: + runq = ERTS_DIRTY_CPU_RUNQ; + ASSERT(ERTS_SCHEDULER_IS_DIRTY_CPU(runq->scheduler)); +#ifdef ERTS_SMP + check_emigration_need = 0; +#endif + break; + + case ERTS_ENQUEUE_DIRTY_IO_QUEUE: + case -ERTS_ENQUEUE_DIRTY_IO_QUEUE: + runq = ERTS_DIRTY_IO_RUNQ; + ASSERT(ERTS_SCHEDULER_IS_DIRTY_IO(runq->scheduler)); +#ifdef ERTS_SMP + check_emigration_need = 0; +#endif + break; +#endif +#endif + + default: + ASSERT(enqueue == ERTS_ENQUEUE_NORMAL_QUEUE + || enqueue == -ERTS_ENQUEUE_NORMAL_QUEUE); + + runq = erts_get_runq_proc(p); +#ifdef ERTS_SMP + check_emigration_need = !(ERTS_PSFLG_BOUND & n); +#endif + break; } - else { - int prio = (int) (ERTS_PSFLG_PRIO_MASK & n); - ErtsRunQueue *runq = erts_get_runq_proc(p); - ASSERT(!(n & ERTS_PSFLG_SUSPENDED)); + ASSERT(!(n & ERTS_PSFLG_SUSPENDED) || (n & ERTS_PSFLG_ACTIVE_SYS)); + + if (enqueue < 0) + sched_p = make_proxy_proc(proxy, p, enq_prio); + else { + sched_p = p; + if (proxy) + free_proxy_proc(proxy); + } #ifdef ERTS_SMP - if (!(ERTS_PSFLG_BOUND & n)) { - ErtsRunQueue *new_runq = erts_check_emigration_need(runq, prio); - if (new_runq) { - RUNQ_SET_RQ(&p->run_queue, new_runq); - runq = new_runq; - } + if (check_emigration_need) { + ErtsRunQueue *new_runq = erts_check_emigration_need(runq, enq_prio); + if (new_runq) { + RUNQ_SET_RQ(&sched_p->run_queue, new_runq); + runq = new_runq; } + } #endif - ASSERT(runq); - res = 1; - erts_smp_runq_lock(runq); + ASSERT(runq); - /* Enqueue the process */ - enqueue_process(runq, prio, p); + erts_smp_runq_lock(runq); - if (runq == c_rq) - return res; - erts_smp_runq_unlock(runq); - smp_notify_inc_runq(runq); - } + /* Enqueue the process */ + enqueue_process(runq, (int) enq_prio, sched_p); + + if (runq == c_rq) + return 1; + erts_smp_runq_unlock(runq); + smp_notify_inc_runq(runq); erts_smp_runq_lock(c_rq); - return res; + return 1; } static ERTS_INLINE void -add2runq(Process *p, erts_aint32_t state) +add2runq(Process *p, erts_aint32_t state, erts_aint32_t prio) { - int prio = (int) (ERTS_PSFLG_PRIO_MASK & state); ErtsRunQueue *runq = erts_get_runq_proc(p); #ifdef ERTS_SMP if (!(ERTS_PSFLG_BOUND & state)) { - ErtsRunQueue *new_runq = erts_check_emigration_need(runq, prio); + ErtsRunQueue *new_runq = erts_check_emigration_need(runq, (int) prio); if (new_runq) { RUNQ_SET_RQ(&p->run_queue, new_runq); runq = new_runq; @@ -4939,101 +5968,236 @@ add2runq(Process *p, erts_aint32_t state) erts_smp_runq_lock(runq); /* Enqueue the process */ - enqueue_process(runq, prio, p); + enqueue_process(runq, (int) prio, p); erts_smp_runq_unlock(runq); smp_notify_inc_runq(runq); } -static ERTS_INLINE void -schedule_process(Process *p, erts_aint32_t state, int active_enq) +static ERTS_INLINE int +change_proc_schedule_state(Process *p, + erts_aint32_t clear_state_flags, + erts_aint32_t set_state_flags, + erts_aint32_t *statep, + erts_aint32_t *enq_prio_p) { - erts_aint32_t a = state, n; + /* + * NOTE: ERTS_PSFLG_RUNNING, ERTS_PSFLG_RUNNING_SYS and + * ERTS_PSFLG_ACTIVE_SYS are not allowed to be + * altered by this function! + */ + erts_aint32_t a = *statep, n; + int enqueue; /* < 0 -> use proxy */ + + ASSERT(!(a & ERTS_PSFLG_PROXY)); + ASSERT((clear_state_flags & (ERTS_PSFLG_RUNNING + | ERTS_PSFLG_RUNNING_SYS + | ERTS_PSFLG_ACTIVE_SYS)) == 0); + ASSERT((set_state_flags & (ERTS_PSFLG_RUNNING + | ERTS_PSFLG_RUNNING_SYS + | ERTS_PSFLG_ACTIVE_SYS)) == 0); while (1) { erts_aint32_t e; n = e = a; + enqueue = ERTS_ENQUEUE_NOT; + if (a & ERTS_PSFLG_FREE) - return; /* We don't want to schedule free processes... */ - n |= ERTS_PSFLG_ACTIVE; - if (!(a & (ERTS_PSFLG_SUSPENDED|ERTS_PSFLG_RUNNING))) - n |= ERTS_PSFLG_IN_RUNQ; + break; /* We don't want to schedule free processes... */ + + if (clear_state_flags) + n &= ~clear_state_flags; + + if (set_state_flags) + n |= set_state_flags; + + if ((n & (ERTS_PSFLG_SUSPENDED + | ERTS_PSFLG_RUNNING + | ERTS_PSFLG_RUNNING_SYS + | ERTS_PSFLG_IN_RUNQ + | ERTS_PSFLG_ACTIVE)) == ERTS_PSFLG_ACTIVE) { + /* + * Active and seemingly need to be enqueued, but + * process may be in a run queue via proxy, need + * further inspection... + */ + enqueue = check_enqueue_in_prio_queue(p, enq_prio_p, &n, a); + } + a = erts_smp_atomic32_cmpxchg_mb(&p->state, n, e); if (a == e) break; - if (!active_enq && (a & ERTS_PSFLG_ACTIVE)) - return; /* Someone else activated process ... */ + if (enqueue == ERTS_ENQUEUE_NOT && n == a) + break; } - if (erts_system_profile_flags.runnable_procs - && !(a & (ERTS_PSFLG_ACTIVE|ERTS_PSFLG_SUSPENDED))) { - profile_runnable_proc(p, am_active); + if (erts_system_profile_flags.runnable_procs) { + + if (((n & (ERTS_PSFLG_SUSPENDED + | ERTS_PSFLG_ACTIVE)) == ERTS_PSFLG_ACTIVE) + && (!(a & (ERTS_PSFLG_ACTIVE_SYS + | ERTS_PSFLG_RUNNING + | ERTS_PSFLG_RUNNING_SYS) + && (!(a & ERTS_PSFLG_ACTIVE) + || (a & ERTS_PSFLG_SUSPENDED))))) { + /* We activated a prevously inactive process */ + profile_runnable_proc(p, am_active); + } + } - if ((n & ERTS_PSFLG_IN_RUNQ) && !(a & ERTS_PSFLG_IN_RUNQ)) - add2runq(p, n); + *statep = a; + + return enqueue; +} + +static ERTS_INLINE void +schedule_process(Process *p, erts_aint32_t in_state) +{ + erts_aint32_t enq_prio = -1; + erts_aint32_t state = in_state; + int enqueue = change_proc_schedule_state(p, + 0, + ERTS_PSFLG_ACTIVE, + &state, + &enq_prio); + if (enqueue != ERTS_ENQUEUE_NOT) + add2runq(enqueue > 0 ? p : make_proxy_proc(NULL, p, enq_prio), + state, + enq_prio); } void erts_schedule_process(Process *p, erts_aint32_t state) { - schedule_process(p, state, 0); + schedule_process(p, state); +} + +static void +schedule_process_sys_task(Process *p, erts_aint32_t state, Process *proxy) +{ + erts_aint32_t a = state, n, enq_prio = -1; + int enqueue; /* < 0 -> use proxy */ + + ASSERT(!(state & ERTS_PSFLG_PROXY)); + + while (1) { + erts_aint32_t e; + n = e = a; + + if (a & ERTS_PSFLG_FREE) + return; /* We don't want to schedule free processes... */ + + enqueue = ERTS_ENQUEUE_NOT; + n |= ERTS_PSFLG_ACTIVE_SYS; + if (!(a & (ERTS_PSFLG_RUNNING|ERTS_PSFLG_RUNNING_SYS))) + enqueue = check_enqueue_in_prio_queue(p, &enq_prio, &n, a); + a = erts_smp_atomic32_cmpxchg_mb(&p->state, n, e); + if (a == e) + break; + if (a == n && enqueue == ERTS_ENQUEUE_NOT) + goto cleanup; + } + + if (erts_system_profile_flags.runnable_procs) { + + if (!(a & (ERTS_PSFLG_ACTIVE_SYS + | ERTS_PSFLG_RUNNING + | ERTS_PSFLG_RUNNING_SYS)) + && (!(a & ERTS_PSFLG_ACTIVE) || (a & ERTS_PSFLG_SUSPENDED))) { + /* We activated a prevously inactive process */ + profile_runnable_proc(p, am_active); + } + + } + + if (enqueue != ERTS_ENQUEUE_NOT) { + Process *sched_p; + if (enqueue > 0) + sched_p = p; + else { + sched_p = make_proxy_proc(proxy, p, enq_prio); + proxy = NULL; + } + add2runq(sched_p, n, enq_prio); + } + +cleanup: + if (proxy) + free_proxy_proc(proxy); } static ERTS_INLINE int suspend_process(Process *c_p, Process *p) { - erts_aint32_t state = erts_smp_atomic32_read_acqb(&p->state); + erts_aint32_t state; int suspended = 0; ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_STATUS & erts_proc_lc_my_proc_locks(p)); + state = erts_smp_atomic32_read_acqb(&p->state); + if ((state & ERTS_PSFLG_SUSPENDED)) suspended = -1; else { if (c_p == p) { state = erts_smp_atomic32_read_bor_relb(&p->state, ERTS_PSFLG_SUSPENDED); - state |= ERTS_PSFLG_SUSPENDED; ASSERT(state & ERTS_PSFLG_RUNNING); - suspended = 1; + suspended = (state & ERTS_PSFLG_SUSPENDED) ? -1: 1; } else { while (!(state & (ERTS_PSFLG_RUNNING|ERTS_PSFLG_EXITING))) { - erts_aint32_t e, n; + erts_aint32_t n, e; + n = e = state; n |= ERTS_PSFLG_SUSPENDED; state = erts_smp_atomic32_cmpxchg_relb(&p->state, n, e); if (state == e) { - state = n; suspended = 1; break; } + if (state & ERTS_PSFLG_SUSPENDED) { + suspended = -1; + break; + } } } } - if (state & ERTS_PSFLG_SUSPENDED) { + if (suspended) { ASSERT(!(ERTS_PSFLG_RUNNING & state) || p == erts_get_current_process()); - if (erts_system_profile_flags.runnable_procs - && (p->rcount == 0) - && (state & ERTS_PSFLG_ACTIVE)) { - profile_runnable_proc(p, am_inactive); + if (suspended > 0 && erts_system_profile_flags.runnable_procs) { + + /* 'state' is before our change... */ + + if ((state & (ERTS_PSFLG_ACTIVE + | ERTS_PSFLG_ACTIVE_SYS + | ERTS_PSFLG_RUNNING + | ERTS_PSFLG_RUNNING_SYS + | ERTS_PSFLG_SUSPENDED)) == ERTS_PSFLG_ACTIVE) { + /* We made process inactive */ + profile_runnable_proc(p, am_inactive); + } + } p->rcount++; /* count number of suspend */ } + return suspended; } static ERTS_INLINE void resume_process(Process *p) { - erts_aint32_t state; + erts_aint32_t state, enq_prio = -1; + int enqueue; + ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_STATUS & erts_proc_lc_my_proc_locks(p)); ASSERT(p->rcount > 0); @@ -5041,14 +6205,16 @@ resume_process(Process *p) if (--p->rcount > 0) /* multiple suspend */ return; - state = erts_smp_atomic32_read_band_mb(&p->state, ~ERTS_PSFLG_SUSPENDED); - state &= ~ERTS_PSFLG_SUSPENDED; - if ((state & (ERTS_PSFLG_EXITING - | ERTS_PSFLG_ACTIVE - | ERTS_PSFLG_IN_RUNQ - | ERTS_PSFLG_RUNNING)) == ERTS_PSFLG_ACTIVE) { - schedule_process(p, state, 1); - } + state = erts_smp_atomic32_read_nob(&p->state); + enqueue = change_proc_schedule_state(p, + ERTS_PSFLG_SUSPENDED, + 0, + &state, + &enq_prio); + if (enqueue) + add2runq(enqueue > 0 ? p : make_proxy_proc(NULL, p, enq_prio), + state, + enq_prio); } int @@ -5070,6 +6236,12 @@ static void scheduler_ix_resume_wake(Uint ix) { ErtsSchedulerSleepInfo *ssi = ERTS_SCHED_SLEEP_INFO_IX(ix); + scheduler_ssi_resume_wake(ssi); +} + +static void +scheduler_ssi_resume_wake(ErtsSchedulerSleepInfo *ssi) +{ erts_aint32_t xflgs = (ERTS_SSI_FLG_SLEEPING | ERTS_SSI_FLG_TSE_SLEEPING | ERTS_SSI_FLG_WAITING @@ -5160,6 +6332,301 @@ sched_set_suspended_sleeptype(ErtsSchedulerSleepInfo *ssi) } } +#ifdef ERTS_DIRTY_SCHEDULERS + +static void +suspend_scheduler(ErtsSchedulerData *esdp) +{ + erts_aint32_t flgs; + erts_aint32_t changing; +#ifdef ERTS_DIRTY_SCHEDULERS + long no = (long) (ERTS_SCHEDULER_IS_DIRTY(esdp) + ? ERTS_DIRTY_SCHEDULER_NO(esdp) + : esdp->no); +#else + long no = (long) esdp->no; +#endif + ErtsSchedulerSleepInfo *ssi = esdp->ssi; + long active_schedulers; + int curr_online = 1; + int wake = 0; + erts_aint32_t aux_work; + int thr_prgr_active = 1; + ErtsStuckBoundProcesses sbp = {NULL, NULL}; + int* ss_onlinep; + int* ss_curr_onlinep; + int* ss_wait_curr_onlinep; + long* ss_wait_activep; + long ss_wait_active_target; + erts_smp_atomic32_t* ss_changingp; + erts_smp_atomic32_t* ss_activep; + + /* + * Schedulers may be suspended in two different ways: + * - A scheduler may be suspended since it is not online. + * All schedulers with scheduler ids greater than + * schdlr_sspnd.online are suspended; same for dirty + * schedulers and schdlr_sspnd.dirty_cpu_online and + * schdlr_sspnd.dirty_io_online. + * - Multi scheduling is blocked. All schedulers except the + * scheduler with scheduler id 1 are suspended, and all + * dirty CPU and dirty I/O schedulers are suspended. + * + * Regardless of why a scheduler is suspended, it ends up here. + */ + + ASSERT(ERTS_SCHEDULER_IS_DIRTY(esdp) || no != 1); + +#ifdef ERTS_DIRTY_SCHEDULERS + if (ERTS_SCHEDULER_IS_DIRTY(esdp)) { + if (erts_smp_mtx_trylock(&schdlr_sspnd.mtx) == EBUSY) { + erts_smp_runq_unlock(esdp->run_queue); + erts_smp_mtx_lock(&schdlr_sspnd.mtx); + erts_smp_runq_lock(esdp->run_queue); + } + if (ongoing_multi_scheduling_block()) + evacuate_run_queue(esdp->run_queue, &sbp); + } else +#endif + evacuate_run_queue(esdp->run_queue, &sbp); + + erts_smp_runq_unlock(esdp->run_queue); + +#ifdef ERTS_DIRTY_SCHEDULERS + if (!ERTS_SCHEDULER_IS_DIRTY(esdp)) +#endif + { + erts_sched_check_cpu_bind_prep_suspend(esdp); + + if (erts_system_profile_flags.scheduler) + profile_scheduler(make_small(esdp->no), am_inactive); + + sched_wall_time_change(esdp, 0); + + erts_smp_mtx_lock(&schdlr_sspnd.mtx); + } + + flgs = sched_prep_spin_suspended(ssi, ERTS_SSI_FLG_SUSPENDED); + if (flgs & ERTS_SSI_FLG_SUSPENDED) { + +#ifdef ERTS_DIRTY_SCHEDULERS + if (ERTS_SCHEDULER_IS_DIRTY(esdp)) { + if (ERTS_RUNQ_IS_DIRTY_CPU_RUNQ(esdp->run_queue)) { + active_schedulers = erts_smp_atomic32_dec_read_nob(&schdlr_sspnd.dirty_cpu_active); + ASSERT(active_schedulers >= 0); + changing = erts_smp_atomic32_read_nob(&schdlr_sspnd.dirty_cpu_changing); + ss_onlinep = &schdlr_sspnd.dirty_cpu_online; + ss_curr_onlinep = &schdlr_sspnd.dirty_cpu_curr_online; + ss_wait_curr_onlinep = &schdlr_sspnd.dirty_cpu_wait_curr_online; + ss_changingp = &schdlr_sspnd.dirty_cpu_changing; + ss_wait_activep = &schdlr_sspnd.msb.dirty_cpu_wait_active; + ss_activep = &schdlr_sspnd.dirty_cpu_active; + } else { + active_schedulers = erts_smp_atomic32_dec_read_nob(&schdlr_sspnd.dirty_io_active); + ASSERT(active_schedulers >= 0); + changing = erts_smp_atomic32_read_nob(&schdlr_sspnd.dirty_io_changing); + ss_onlinep = &schdlr_sspnd.dirty_io_online; + ss_curr_onlinep = &schdlr_sspnd.dirty_io_curr_online; + ss_wait_curr_onlinep = &schdlr_sspnd.dirty_io_wait_curr_online; + ss_changingp = &schdlr_sspnd.dirty_io_changing; + ss_wait_activep = &schdlr_sspnd.msb.dirty_io_wait_active; + ss_activep = &schdlr_sspnd.dirty_io_active; + } + ss_wait_active_target = 0; + } + else +#endif + { + active_schedulers = erts_smp_atomic32_dec_read_nob(&schdlr_sspnd.active); + ASSERT(active_schedulers >= 1); + changing = erts_smp_atomic32_read_nob(&schdlr_sspnd.changing); + ss_onlinep = &schdlr_sspnd.online; + ss_curr_onlinep = &schdlr_sspnd.curr_online; + ss_wait_curr_onlinep = &schdlr_sspnd.wait_curr_online; + ss_changingp = &schdlr_sspnd.changing; + ss_wait_activep = &schdlr_sspnd.msb.wait_active; + ss_activep = &schdlr_sspnd.active; + ss_wait_active_target = 1; + } + if (changing & ERTS_SCHDLR_SSPND_CHNG_MSB) { + if (active_schedulers == *ss_wait_activep) + wake = 1; + if (active_schedulers == ss_wait_active_target) { + changing = erts_smp_atomic32_read_band_nob(ss_changingp, + ~ERTS_SCHDLR_SSPND_CHNG_MSB); + changing &= ~ERTS_SCHDLR_SSPND_CHNG_MSB; + } + } + + while (1) { + if (changing & ERTS_SCHDLR_SSPND_CHNG_ONLN) { + int changed = 0; + if (no > *ss_onlinep && curr_online) { + (*ss_curr_onlinep)--; + curr_online = 0; + changed = 1; + } + else if (no <= *ss_onlinep && !curr_online) { + (*ss_curr_onlinep)++; + curr_online = 1; + changed = 1; + } + if (changed + && *ss_curr_onlinep == *ss_wait_curr_onlinep) + wake = 1; + if (*ss_onlinep == *ss_curr_onlinep) { + changing = erts_smp_atomic32_read_band_nob(ss_changingp, + ~ERTS_SCHDLR_SSPND_CHNG_ONLN); + changing &= ~ERTS_SCHDLR_SSPND_CHNG_ONLN; + } + } + + if (wake) { + erts_smp_cnd_signal(&schdlr_sspnd.cnd); + wake = 0; + } + + if (curr_online && !ongoing_multi_scheduling_block()) { + flgs = erts_smp_atomic32_read_acqb(&ssi->flags); + if (!(flgs & ERTS_SSI_FLG_SUSPENDED)) + break; + } + erts_smp_mtx_unlock(&schdlr_sspnd.mtx); + + while (1) { + erts_aint32_t qmask; + erts_aint32_t flgs; + + qmask = (ERTS_RUNQ_FLGS_GET(esdp->run_queue) + & ERTS_RUNQ_FLGS_QMASK); + aux_work = erts_atomic32_read_acqb(&ssi->aux_work); + if (aux_work|qmask) { + if (!ERTS_SCHEDULER_IS_DIRTY(esdp) && !thr_prgr_active) { + erts_thr_progress_active(esdp, thr_prgr_active = 1); + sched_wall_time_change(esdp, 1); + } + if (aux_work) + aux_work = handle_aux_work(&esdp->aux_work_data, + aux_work, + 1); + + if (!ERTS_SCHEDULER_IS_DIRTY(esdp) && + (aux_work && erts_thr_progress_update(esdp))) + erts_thr_progress_leader_update(esdp); + if (qmask) { +#ifdef ERTS_DIRTY_SCHEDULERS + if (ERTS_SCHEDULER_IS_DIRTY(esdp)) { + erts_smp_mtx_lock(&schdlr_sspnd.mtx); + erts_smp_runq_lock(esdp->run_queue); + if (ongoing_multi_scheduling_block()) + evacuate_run_queue(esdp->run_queue, &sbp); + erts_smp_runq_unlock(esdp->run_queue); + erts_smp_mtx_unlock(&schdlr_sspnd.mtx); + } else +#endif + { + erts_smp_runq_lock(esdp->run_queue); + evacuate_run_queue(esdp->run_queue, &sbp); + erts_smp_runq_unlock(esdp->run_queue); + } + } + } + + if (!aux_work) { +#ifdef ERTS_DIRTY_SCHEDULERS + if (!ERTS_SCHEDULER_IS_DIRTY(esdp)) +#endif + { + if (thr_prgr_active) { + erts_thr_progress_active(esdp, thr_prgr_active = 0); + sched_wall_time_change(esdp, 0); + } + erts_thr_progress_prepare_wait(esdp); + } + flgs = sched_spin_suspended(ssi, + ERTS_SCHED_SUSPEND_SLEEP_SPINCOUNT); + if (flgs == (ERTS_SSI_FLG_SLEEPING + | ERTS_SSI_FLG_WAITING + | ERTS_SSI_FLG_SUSPENDED)) { + flgs = sched_set_suspended_sleeptype(ssi); + if (flgs == (ERTS_SSI_FLG_SLEEPING + | ERTS_SSI_FLG_TSE_SLEEPING + | ERTS_SSI_FLG_WAITING + | ERTS_SSI_FLG_SUSPENDED)) { + int res; + + do { + res = erts_tse_wait(ssi->event); + } while (res == EINTR); + } + } +#ifdef ERTS_DIRTY_SCHEDULERS + if (!ERTS_SCHEDULER_IS_DIRTY(esdp)) +#endif + erts_thr_progress_finalize_wait(esdp); + } + + flgs = sched_prep_spin_suspended(ssi, (ERTS_SSI_FLG_WAITING + | ERTS_SSI_FLG_SUSPENDED)); + if (!(flgs & ERTS_SSI_FLG_SUSPENDED)) + break; + changing = erts_smp_atomic32_read_nob(ss_changingp); + if (changing & ~ERTS_SCHDLR_SSPND_CHNG_WAITER) + break; + } + + erts_smp_mtx_lock(&schdlr_sspnd.mtx); + changing = erts_smp_atomic32_read_nob(ss_changingp); + } + + active_schedulers = erts_smp_atomic32_inc_read_nob(ss_activep); + changing = erts_smp_atomic32_read_nob(ss_changingp); + if ((changing & ERTS_SCHDLR_SSPND_CHNG_MSB) + && *ss_onlinep == active_schedulers) { + erts_smp_atomic32_read_band_nob(ss_changingp, + ~ERTS_SCHDLR_SSPND_CHNG_MSB); + } + +#ifdef ERTS_DIRTY_SCHEDULERS + if (!ERTS_SCHEDULER_IS_DIRTY(esdp)) +#endif + ASSERT(no <= *ss_onlinep); + ASSERT(!ongoing_multi_scheduling_block()); + + } + + erts_smp_mtx_unlock(&schdlr_sspnd.mtx); + + ASSERT(curr_online); + +#ifdef ERTS_DIRTY_SCHEDULERS + if (!ERTS_SCHEDULER_IS_DIRTY(esdp)) +#endif + { + if (erts_system_profile_flags.scheduler) + profile_scheduler(make_small(esdp->no), am_active); + + if (!thr_prgr_active) { + erts_thr_progress_active(esdp, thr_prgr_active = 1); + sched_wall_time_change(esdp, 1); + } + } + + erts_smp_runq_lock(esdp->run_queue); + non_empty_runq(esdp->run_queue); + +#ifdef ERTS_DIRTY_SCHEDULERS + if (!ERTS_SCHEDULER_IS_DIRTY(esdp)) +#endif + { + schedule_bound_processes(esdp->run_queue, &sbp); + + erts_sched_check_cpu_bind_post_suspend(esdp); + } +} + +#else /* !ERTS_DIRTY_SCHEDULERS */ + static void suspend_scheduler(ErtsSchedulerData *esdp) { @@ -5348,29 +6815,328 @@ suspend_scheduler(ErtsSchedulerData *esdp) erts_sched_check_cpu_bind_post_suspend(esdp); } +#endif + ErtsSchedSuspendResult erts_schedulers_state(Uint *total, Uint *online, Uint *active, + Uint *dirty_cpu, + Uint *dirty_cpu_online, + Uint *dirty_io, int yield_allowed) { - int res; + int res = ERTS_SCHDLR_SSPND_EINVAL; erts_aint32_t changing; erts_smp_mtx_lock(&schdlr_sspnd.mtx); changing = erts_smp_atomic32_read_nob(&schdlr_sspnd.changing); +#ifdef ERTS_DIRTY_SCHEDULERS + changing |= (erts_smp_atomic32_read_nob(&schdlr_sspnd.dirty_cpu_changing) + | erts_smp_atomic32_read_nob(&schdlr_sspnd.dirty_io_changing)); +#endif if (yield_allowed && (changing & ~ERTS_SCHDLR_SSPND_CHNG_WAITER)) res = ERTS_SCHDLR_SSPND_YIELD_RESTART; else { - *active = *online = schdlr_sspnd.online; - if (ongoing_multi_scheduling_block()) + if (active) + *active = schdlr_sspnd.online; + if (online) + *online = schdlr_sspnd.online; + if (ongoing_multi_scheduling_block() && active) *active = 1; +#ifdef ERTS_DIRTY_SCHEDULERS + if (dirty_cpu_online) + *dirty_cpu_online = schdlr_sspnd.dirty_cpu_online; +#endif res = ERTS_SCHDLR_SSPND_DONE; } erts_smp_mtx_unlock(&schdlr_sspnd.mtx); - *total = erts_no_schedulers; + if (total) + *total = erts_no_schedulers; +#ifdef ERTS_DIRTY_SCHEDULERS + if (dirty_cpu) + *dirty_cpu = erts_no_dirty_cpu_schedulers; + if (dirty_io) + *dirty_io = erts_no_dirty_io_schedulers; +#endif + return res; +} + +#ifdef ERTS_DIRTY_SCHEDULERS + +ErtsSchedSuspendResult +erts_set_schedulers_online(Process *p, + ErtsProcLocks plocks, + Sint new_no, + Sint *old_no +#ifdef ERTS_DIRTY_SCHEDULERS + , int dirty_only +#endif + ) +{ + ErtsSchedulerData *esdp; + int ix, res = -1, no, have_unlocked_plocks, end_wait; + erts_aint32_t changing = 0; +#ifdef ERTS_DIRTY_SCHEDULERS + ErtsSchedulerSleepInfo* ssi; + int dirty_no, change_dirty; +#endif + + if (new_no < 1) + return ERTS_SCHDLR_SSPND_EINVAL; +#ifdef ERTS_DIRTY_SCHEDULERS + else if (dirty_only && erts_no_dirty_cpu_schedulers < new_no) + return ERTS_SCHDLR_SSPND_EINVAL; +#endif + else if (erts_no_schedulers < new_no) + return ERTS_SCHDLR_SSPND_EINVAL; + + esdp = ERTS_PROC_GET_SCHDATA(p); + end_wait = 0; + + erts_smp_mtx_lock(&schdlr_sspnd.mtx); + + have_unlocked_plocks = 0; + no = (int) new_no; + +#ifdef ERTS_DIRTY_SCHEDULERS + ASSERT(schdlr_sspnd.dirty_cpu_online <= erts_no_dirty_cpu_schedulers); + if (dirty_only) { + if (no > schdlr_sspnd.online) { + erts_smp_mtx_unlock(&schdlr_sspnd.mtx); + return ERTS_SCHDLR_SSPND_EINVAL; + } + dirty_no = no; + } else { + /* + * Adjust the number of dirty CPU schedulers online relative to the + * adjustment made to the number of normal schedulers online. + */ + int total_pct = erts_no_dirty_cpu_schedulers*100/erts_no_schedulers; + int onln_pct = no*total_pct/schdlr_sspnd.online; + dirty_no = schdlr_sspnd.dirty_cpu_online*onln_pct/100; + if (dirty_no == 0) + dirty_no = 1; + ASSERT(dirty_no <= erts_no_dirty_cpu_schedulers); + } +#endif + changing = erts_smp_atomic32_read_nob(&schdlr_sspnd.changing); +#ifdef ERTS_DIRTY_SCHEDULERS + changing |= erts_smp_atomic32_read_nob(&schdlr_sspnd.dirty_cpu_changing); +#endif + if (changing) { + res = ERTS_SCHDLR_SSPND_YIELD_RESTART; + } + else { + int online = *old_no = schdlr_sspnd.online; +#ifdef ERTS_DIRTY_SCHEDULERS + int dirty_online = schdlr_sspnd.dirty_cpu_online; + + if (dirty_only) { + *old_no = schdlr_sspnd.dirty_cpu_online; + if (dirty_no == schdlr_sspnd.dirty_cpu_online) { + res = ERTS_SCHDLR_SSPND_DONE; + } + change_dirty = 1; + } else { +#endif + if (no == schdlr_sspnd.online) { +#ifdef ERTS_DIRTY_SCHEDULERS + dirty_only = 1; + if (dirty_no == schdlr_sspnd.dirty_cpu_online) +#endif + res = ERTS_SCHDLR_SSPND_DONE; +#ifdef ERTS_DIRTY_SCHEDULERS + else + change_dirty = 1; +#endif + } +#ifdef ERTS_DIRTY_SCHEDULERS + else + change_dirty = (dirty_no != schdlr_sspnd.dirty_cpu_online); + } +#endif + if (res == -1) + { + int increase = (no > online); +#ifdef ERTS_DIRTY_SCHEDULERS + if (!dirty_only) { +#endif + ERTS_SCHDLR_SSPND_CHNG_SET((ERTS_SCHDLR_SSPND_CHNG_ONLN + | ERTS_SCHDLR_SSPND_CHNG_WAITER), 0); + schdlr_sspnd.online = no; +#ifdef ERTS_DIRTY_SCHEDULERS + } else + increase = (dirty_no > dirty_online); + if (change_dirty) { + ERTS_SCHDLR_SSPND_DIRTY_CPU_CHNG_SET((ERTS_SCHDLR_SSPND_CHNG_ONLN + | ERTS_SCHDLR_SSPND_CHNG_WAITER), 0); + schdlr_sspnd.dirty_cpu_online = dirty_no; + } +#endif + if (increase) { + int ix; +#ifdef ERTS_DIRTY_SCHEDULERS + if (!dirty_only) { +#endif + schdlr_sspnd.wait_curr_online = no; + if (ongoing_multi_scheduling_block()) { + for (ix = online; ix < no; ix++) + erts_sched_poke(ERTS_SCHED_SLEEP_INFO_IX(ix)); + } + else { + if (plocks) { + have_unlocked_plocks = 1; + erts_smp_proc_unlock(p, plocks); + } + change_no_used_runqs(no); + + for (ix = online; ix < no; ix++) + resume_run_queue(ERTS_RUNQ_IX(ix)); + + for (ix = no; ix < erts_no_run_queues; ix++) + suspend_run_queue(ERTS_RUNQ_IX(ix)); + } +#ifdef ERTS_DIRTY_SCHEDULERS + } + if (change_dirty) { + schdlr_sspnd.dirty_cpu_wait_curr_online = dirty_no; + ASSERT(schdlr_sspnd.dirty_cpu_curr_online != + schdlr_sspnd.dirty_cpu_wait_curr_online); + if (ongoing_multi_scheduling_block()) { + for (ix = dirty_online; ix < dirty_no; ix++) { + ssi = ERTS_DIRTY_CPU_SCHED_SLEEP_INFO_IX(ix); + erts_sched_poke(ssi); + } + } else { + for (ix = dirty_online; ix < dirty_no; ix++) { + ssi = ERTS_DIRTY_CPU_SCHED_SLEEP_INFO_IX(ix); + scheduler_ssi_resume_wake(ssi); + erts_smp_atomic32_read_band_nob(&ssi->flags, + ~ERTS_SSI_FLG_SUSPENDED); + } + wake_dirty_schedulers(ERTS_DIRTY_CPU_RUNQ, 0); + } + } +#endif + res = ERTS_SCHDLR_SSPND_DONE; + } + else /* if (no < online) */ { +#ifdef ERTS_DIRTY_SCHEDULERS + if (change_dirty) { + schdlr_sspnd.dirty_cpu_wait_curr_online = dirty_no; + ASSERT(schdlr_sspnd.dirty_cpu_curr_online != + schdlr_sspnd.dirty_cpu_wait_curr_online); + if (ongoing_multi_scheduling_block()) { + for (ix = dirty_no; ix < dirty_online; ix++) { + ssi = ERTS_DIRTY_CPU_SCHED_SLEEP_INFO_IX(ix); + erts_sched_poke(ssi); + } + } else { + for (ix = dirty_no; ix < dirty_online; ix++) { + ssi = ERTS_DIRTY_CPU_SCHED_SLEEP_INFO_IX(ix); + erts_smp_atomic32_read_bor_nob(&ssi->flags, + ERTS_SSI_FLG_SUSPENDED); + } + wake_dirty_schedulers(ERTS_DIRTY_CPU_RUNQ, 0); + } + } + if (dirty_only) { + res = ERTS_SCHDLR_SSPND_DONE; + } + else +#endif + { + if (p->scheduler_data->no <= no) { + res = ERTS_SCHDLR_SSPND_DONE; + schdlr_sspnd.wait_curr_online = no; + } + else { + /* + * Yield! Current process needs to migrate + * before bif returns. + */ + res = ERTS_SCHDLR_SSPND_YIELD_DONE; + schdlr_sspnd.wait_curr_online = no+1; + } + + if (ongoing_multi_scheduling_block()) { + for (ix = no; ix < online; ix++) + erts_sched_poke(ERTS_SCHED_SLEEP_INFO_IX(ix)); + } + else { + if (plocks) { + have_unlocked_plocks = 1; + erts_smp_proc_unlock(p, plocks); + } + + change_no_used_runqs(no); + for (ix = no; ix < erts_no_run_queues; ix++) + suspend_run_queue(ERTS_RUNQ_IX(ix)); + + for (ix = no; ix < online; ix++) { + ErtsRunQueue *rq = ERTS_RUNQ_IX(ix); + wake_scheduler(rq); + } + } + } + } + +#ifdef ERTS_DIRTY_SCHEDULERS + if (change_dirty) { + while (schdlr_sspnd.dirty_cpu_curr_online != schdlr_sspnd.dirty_cpu_wait_curr_online) + erts_smp_cnd_wait(&schdlr_sspnd.cnd, &schdlr_sspnd.mtx); + ERTS_SCHDLR_SSPND_DIRTY_CPU_CHNG_SET(0, ERTS_SCHDLR_SSPND_CHNG_WAITER); + erts_smp_atomic32_read_band_nob(&schdlr_sspnd.dirty_cpu_changing, + ~ERTS_SCHDLR_SSPND_CHNG_WAITER); + } + if (!dirty_only) +#endif + { + if (schdlr_sspnd.curr_online != schdlr_sspnd.wait_curr_online) { + erts_smp_mtx_unlock(&schdlr_sspnd.mtx); + if (plocks && !have_unlocked_plocks) { + have_unlocked_plocks = 1; + erts_smp_proc_unlock(p, plocks); + } + erts_thr_progress_active(esdp, 0); + erts_thr_progress_prepare_wait(esdp); + end_wait = 1; + erts_smp_mtx_lock(&schdlr_sspnd.mtx); + } + + while (schdlr_sspnd.curr_online != schdlr_sspnd.wait_curr_online) + erts_smp_cnd_wait(&schdlr_sspnd.cnd, &schdlr_sspnd.mtx); + + ASSERT(res != ERTS_SCHDLR_SSPND_DONE + ? (ERTS_SCHDLR_SSPND_CHNG_WAITER + & erts_smp_atomic32_read_nob(&schdlr_sspnd.changing)) + : (ERTS_SCHDLR_SSPND_CHNG_WAITER + == erts_smp_atomic32_read_nob(&schdlr_sspnd.changing))); + erts_smp_atomic32_read_band_nob(&schdlr_sspnd.changing, + ~ERTS_SCHDLR_SSPND_CHNG_WAITER); + } + } + } + + erts_smp_mtx_unlock(&schdlr_sspnd.mtx); +#ifdef ERTS_DIRTY_SCHEDULERS + ASSERT(schdlr_sspnd.dirty_cpu_online <= schdlr_sspnd.online); + if (!dirty_only) +#endif + { + if (end_wait) { + erts_thr_progress_finalize_wait(esdp); + erts_thr_progress_active(esdp, 1); + } + if (have_unlocked_plocks) + erts_smp_proc_lock(p, plocks); + } + return res; } +#else /* !ERTS_DIRTY_SCHEDULERS */ + ErtsSchedSuspendResult erts_set_schedulers_online(Process *p, ErtsProcLocks plocks, @@ -5457,7 +7223,7 @@ erts_set_schedulers_online(Process *p, for (ix = no; ix < online; ix++) { ErtsRunQueue *rq = ERTS_RUNQ_IX(ix); - wake_scheduler(rq, 0); + wake_scheduler(rq); } } } @@ -5499,15 +7265,24 @@ erts_set_schedulers_online(Process *p, return res; } +#endif + ErtsSchedSuspendResult erts_block_multi_scheduling(Process *p, ErtsProcLocks plocks, int on, int all) { - int ix, res, have_unlocked_plocks = 0; + int ix, res, have_unlocked_plocks = 0, online; erts_aint32_t changing; ErtsProcList *plp; +#ifdef ERTS_DIRTY_SCHEDULERS + ErtsSchedulerSleepInfo* ssi; +#endif erts_smp_mtx_lock(&schdlr_sspnd.mtx); changing = erts_smp_atomic32_read_nob(&schdlr_sspnd.changing); +#ifdef ERTS_DIRTY_SCHEDULERS + changing |= (erts_smp_atomic32_read_nob(&schdlr_sspnd.dirty_cpu_changing) + | erts_smp_atomic32_read_nob(&schdlr_sspnd.dirty_io_changing)); +#endif if (changing) { res = ERTS_SCHDLR_SSPND_YIELD_RESTART; /* Yield */ } @@ -5517,10 +7292,13 @@ erts_block_multi_scheduling(Process *p, ErtsProcLocks plocks, int on, int all) erts_proclist_store_last(&schdlr_sspnd.msb.procs, plp); p->flags |= F_HAVE_BLCKD_MSCHED; ASSERT(erts_smp_atomic32_read_nob(&schdlr_sspnd.active) == 1); +#ifdef ERTS_DIRTY_SCHEDULERS + ASSERT(erts_smp_atomic32_read_nob(&schdlr_sspnd.dirty_cpu_active) == 0); + ASSERT(erts_smp_atomic32_read_nob(&schdlr_sspnd.dirty_io_active) == 0); +#endif ASSERT(p->scheduler_data->no == 1); res = ERTS_SCHDLR_SSPND_DONE_MSCHED_BLOCKED; - } - else { + } else { int online = schdlr_sspnd.online; p->flags |= F_HAVE_BLCKD_MSCHED; if (plocks) { @@ -5532,6 +7310,35 @@ erts_block_multi_scheduling(Process *p, ErtsProcLocks plocks, int on, int all) if (online == 1) { res = ERTS_SCHDLR_SSPND_DONE_MSCHED_BLOCKED; ASSERT(erts_smp_atomic32_read_nob(&schdlr_sspnd.active) == 1); +#ifdef ERTS_DIRTY_SCHEDULERS + ASSERT(erts_smp_atomic32_read_nob(&schdlr_sspnd.dirty_cpu_active) == 1); + ASSERT(!(erts_smp_atomic32_read_nob(&ERTS_DIRTY_CPU_SCHED_SLEEP_INFO_IX(0)->flags) + & ERTS_SSI_FLG_SUSPENDED)); + schdlr_sspnd.msb.dirty_cpu_wait_active = 0; + ERTS_SCHDLR_SSPND_DIRTY_CPU_CHNG_SET((ERTS_SCHDLR_SSPND_CHNG_MSB + | ERTS_SCHDLR_SSPND_CHNG_WAITER), 0); + ssi = ERTS_DIRTY_CPU_SCHED_SLEEP_INFO_IX(0); + erts_smp_atomic32_read_bor_nob(&ssi->flags, ERTS_SSI_FLG_SUSPENDED); + wake_dirty_schedulers(ERTS_DIRTY_CPU_RUNQ, 0); + while (erts_smp_atomic32_read_nob(&schdlr_sspnd.dirty_cpu_active) + != schdlr_sspnd.msb.dirty_cpu_wait_active) + erts_smp_cnd_wait(&schdlr_sspnd.cnd, &schdlr_sspnd.mtx); + ERTS_SCHDLR_SSPND_DIRTY_CPU_CHNG_SET(0, ERTS_SCHDLR_SSPND_CHNG_WAITER); + + schdlr_sspnd.msb.dirty_io_wait_active = 0; + ERTS_SCHDLR_SSPND_DIRTY_IO_CHNG_SET((ERTS_SCHDLR_SSPND_CHNG_MSB + | ERTS_SCHDLR_SSPND_CHNG_WAITER), 0); + for (ix = 0; ix < erts_no_dirty_io_schedulers; ix++) { + ssi = ERTS_DIRTY_IO_SCHED_SLEEP_INFO_IX(ix); + erts_smp_atomic32_read_bor_nob(&ssi->flags, + ERTS_SSI_FLG_SUSPENDED); + } + wake_dirty_schedulers(ERTS_DIRTY_IO_RUNQ, 0); + while (erts_smp_atomic32_read_nob(&schdlr_sspnd.dirty_io_active) + != schdlr_sspnd.msb.dirty_io_wait_active) + erts_smp_cnd_wait(&schdlr_sspnd.cnd, &schdlr_sspnd.mtx); + ERTS_SCHDLR_SSPND_DIRTY_IO_CHNG_SET(0, ERTS_SCHDLR_SSPND_CHNG_WAITER); +#endif ASSERT(p->scheduler_data->no == 1); } else { @@ -5550,13 +7357,44 @@ erts_block_multi_scheduling(Process *p, ErtsProcLocks plocks, int on, int all) schdlr_sspnd.msb.wait_active = 2; } +#ifdef ERTS_DIRTY_SCHEDULERS + schdlr_sspnd.msb.dirty_cpu_wait_active = 0; + ERTS_SCHDLR_SSPND_DIRTY_CPU_CHNG_SET((ERTS_SCHDLR_SSPND_CHNG_MSB + | ERTS_SCHDLR_SSPND_CHNG_WAITER), 0); + for (ix = 0; ix < erts_no_dirty_cpu_schedulers; ix++) { + ssi = ERTS_DIRTY_CPU_SCHED_SLEEP_INFO_IX(ix); + erts_smp_atomic32_read_bor_nob(&ssi->flags, + ERTS_SSI_FLG_SUSPENDED); + } + wake_dirty_schedulers(ERTS_DIRTY_CPU_RUNQ, 0); + while (erts_smp_atomic32_read_nob(&schdlr_sspnd.dirty_cpu_active) + != schdlr_sspnd.msb.dirty_cpu_wait_active) + erts_smp_cnd_wait(&schdlr_sspnd.cnd, &schdlr_sspnd.mtx); + ERTS_SCHDLR_SSPND_DIRTY_CPU_CHNG_SET(0, ERTS_SCHDLR_SSPND_CHNG_WAITER); + ASSERT(schdlr_sspnd.dirty_cpu_curr_online == schdlr_sspnd.dirty_cpu_online); + + schdlr_sspnd.msb.dirty_io_wait_active = 0; + ERTS_SCHDLR_SSPND_DIRTY_IO_CHNG_SET((ERTS_SCHDLR_SSPND_CHNG_MSB + | ERTS_SCHDLR_SSPND_CHNG_WAITER), 0); + for (ix = 0; ix < erts_no_dirty_io_schedulers; ix++) { + ssi = ERTS_DIRTY_IO_SCHED_SLEEP_INFO_IX(ix); + erts_smp_atomic32_read_bor_nob(&ssi->flags, + ERTS_SSI_FLG_SUSPENDED); + } + wake_dirty_schedulers(ERTS_DIRTY_IO_RUNQ, 0); + while (erts_smp_atomic32_read_nob(&schdlr_sspnd.dirty_io_active) + != schdlr_sspnd.msb.dirty_io_wait_active) + erts_smp_cnd_wait(&schdlr_sspnd.cnd, &schdlr_sspnd.mtx); + ERTS_SCHDLR_SSPND_DIRTY_IO_CHNG_SET(0, ERTS_SCHDLR_SSPND_CHNG_WAITER); + ASSERT(schdlr_sspnd.dirty_io_curr_online == schdlr_sspnd.dirty_io_online); +#endif change_no_used_runqs(1); for (ix = 1; ix < erts_no_run_queues; ix++) suspend_run_queue(ERTS_RUNQ_IX(ix)); for (ix = 1; ix < online; ix++) { ErtsRunQueue *rq = ERTS_RUNQ_IX(ix); - wake_scheduler(rq, 0); + wake_scheduler(rq); } if (erts_smp_atomic32_read_nob(&schdlr_sspnd.active) @@ -5590,6 +7428,7 @@ erts_block_multi_scheduling(Process *p, ErtsProcLocks plocks, int on, int all) erts_smp_mtx_lock(&schdlr_sspnd.mtx); } + ASSERT(res != ERTS_SCHDLR_SSPND_DONE_MSCHED_BLOCKED ? (ERTS_SCHDLR_SSPND_CHNG_WAITER & erts_smp_atomic32_read_nob(&schdlr_sspnd.changing)) @@ -5630,12 +7469,12 @@ erts_block_multi_scheduling(Process *p, ErtsProcLocks plocks, int on, int all) p->flags &= ~F_HAVE_BLCKD_MSCHED; schdlr_sspnd.msb.ongoing = 0; if (schdlr_sspnd.online == 1) { - /* No schedulers to resume */ + /* No normal schedulers to resume */ ASSERT(erts_smp_atomic32_read_nob(&schdlr_sspnd.active) == 1); ERTS_SCHDLR_SSPND_CHNG_SET(0, ERTS_SCHDLR_SSPND_CHNG_MSB); } else { - int online = schdlr_sspnd.online; + online = schdlr_sspnd.online; if (plocks) { have_unlocked_plocks = 1; erts_smp_proc_unlock(p, plocks); @@ -5650,6 +7489,27 @@ erts_block_multi_scheduling(Process *p, ErtsProcLocks plocks, int on, int all) for (ix = online; ix < erts_no_run_queues; ix++) suspend_run_queue(ERTS_RUNQ_IX(ix)); } +#ifdef ERTS_DIRTY_SCHEDULERS + ERTS_SCHDLR_SSPND_DIRTY_CPU_CHNG_SET(ERTS_SCHDLR_SSPND_CHNG_MSB, 0); + schdlr_sspnd.msb.dirty_cpu_wait_active = schdlr_sspnd.dirty_cpu_online; + for (ix = 0; ix < schdlr_sspnd.dirty_cpu_online; ix++) { + ssi = ERTS_DIRTY_CPU_SCHED_SLEEP_INFO_IX(ix); + scheduler_ssi_resume_wake(ssi); + erts_smp_atomic32_read_band_nob(&ssi->flags, + ~ERTS_SSI_FLG_SUSPENDED); + } + wake_dirty_schedulers(ERTS_DIRTY_CPU_RUNQ, 0); + + ERTS_SCHDLR_SSPND_DIRTY_IO_CHNG_SET(ERTS_SCHDLR_SSPND_CHNG_MSB, 0); + schdlr_sspnd.msb.dirty_io_wait_active = erts_no_dirty_io_schedulers; + for (ix = 0; ix < erts_no_dirty_io_schedulers; ix++) { + ssi = ERTS_DIRTY_IO_SCHED_SLEEP_INFO_IX(ix); + scheduler_ssi_resume_wake(ssi); + erts_smp_atomic32_read_band_nob(&ssi->flags, + ~ERTS_SSI_FLG_SUSPENDED); + } + wake_dirty_schedulers(ERTS_DIRTY_IO_RUNQ, 0); +#endif res = ERTS_SCHDLR_SSPND_DONE; } } @@ -5776,7 +7636,11 @@ sched_thread_func(void *vesdp) erts_smp_atomic32_read_band_nob(&schdlr_sspnd.changing, ~ERTS_SCHDLR_SSPND_CHNG_ONLN); if (no != 1) +#ifdef ERTS_DIRTY_SCHEDULERS + erts_smp_cnd_broadcast(&schdlr_sspnd.cnd); +#else erts_smp_cnd_signal(&schdlr_sspnd.cnd); +#endif } if (no == 1) { @@ -5806,22 +7670,155 @@ sched_thread_func(void *vesdp) return NULL; } +#ifdef ERTS_DIRTY_SCHEDULERS +#ifdef ERTS_SMP +static void* +sched_dirty_cpu_thread_func(void *vesdp) +{ + ErtsThrPrgrCallbacks callbacks; + ErtsSchedulerData *esdp = vesdp; + Uint no = ERTS_DIRTY_SCHEDULER_NO(esdp); + ERTS_DIRTY_SCHEDULER_TYPE(esdp) = ERTS_DIRTY_CPU_SCHEDULER; + ASSERT(no != 0); + ERTS_DIRTY_CPU_SCHED_SLEEP_INFO_IX(no-1)->event = erts_tse_fetch(); + callbacks.arg = (void *) esdp->ssi; + callbacks.wakeup = thr_prgr_wakeup; + callbacks.prepare_wait = NULL; + callbacks.wait = NULL; + callbacks.finalize_wait = NULL; + + erts_thr_progress_register_unmanaged_thread(&callbacks); +#ifdef ERTS_ENABLE_LOCK_CHECK + { + char buf[31]; + erts_snprintf(&buf[0], 31, "dirty cpu scheduler %beu", no); + erts_lc_set_thread_name(&buf[0]); + } +#endif + erts_tsd_set(sched_data_key, vesdp); +#if ERTS_USE_ASYNC_READY_Q + esdp->aux_work_data.async_ready.queue = NULL; +#endif + + erts_proc_lock_prepare_proc_lock_waiter(); + +#ifdef HIPE + hipe_thread_signal_init(); +#endif + erts_thread_init_float(); + + erts_smp_mtx_lock(&schdlr_sspnd.mtx); + ASSERT(erts_smp_atomic32_read_nob(&schdlr_sspnd.dirty_cpu_changing) + & ERTS_SCHDLR_SSPND_CHNG_ONLN); + + if (--schdlr_sspnd.dirty_cpu_curr_online == schdlr_sspnd.dirty_cpu_wait_curr_online) { + erts_smp_atomic32_read_band_nob(&schdlr_sspnd.dirty_cpu_changing, + ~ERTS_SCHDLR_SSPND_CHNG_ONLN); + if (no != 1) + erts_smp_cnd_broadcast(&schdlr_sspnd.cnd); + } + + if (no == 1) { + while (schdlr_sspnd.dirty_cpu_curr_online != schdlr_sspnd.dirty_cpu_wait_curr_online) + erts_smp_cnd_wait(&schdlr_sspnd.cnd, &schdlr_sspnd.mtx); + ERTS_SCHDLR_SSPND_DIRTY_CPU_CHNG_SET(0, ERTS_SCHDLR_SSPND_CHNG_WAITER); + } + erts_smp_mtx_unlock(&schdlr_sspnd.mtx); + + process_main(); + /* No schedulers should *ever* terminate */ + erl_exit(ERTS_ABORT_EXIT, + "Dirty CPU scheduler thread number %beu terminated\n", + no); + return NULL; +} + +static void* +sched_dirty_io_thread_func(void *vesdp) +{ + ErtsThrPrgrCallbacks callbacks; + ErtsSchedulerData *esdp = vesdp; + Uint no = ERTS_DIRTY_SCHEDULER_NO(esdp); + ERTS_DIRTY_SCHEDULER_TYPE(esdp) = ERTS_DIRTY_IO_SCHEDULER; + ASSERT(no != 0); + ERTS_DIRTY_IO_SCHED_SLEEP_INFO_IX(no-1)->event = erts_tse_fetch(); + callbacks.arg = (void *) esdp->ssi; + callbacks.wakeup = thr_prgr_wakeup; + callbacks.prepare_wait = NULL; + callbacks.wait = NULL; + callbacks.finalize_wait = NULL; + + erts_thr_progress_register_unmanaged_thread(&callbacks); +#ifdef ERTS_ENABLE_LOCK_CHECK + { + char buf[31]; + erts_snprintf(&buf[0], 31, "dirty io scheduler %beu", no); + erts_lc_set_thread_name(&buf[0]); + } +#endif + erts_tsd_set(sched_data_key, vesdp); +#if ERTS_USE_ASYNC_READY_Q + esdp->aux_work_data.async_ready.queue = NULL; +#endif + + erts_proc_lock_prepare_proc_lock_waiter(); + +#ifdef HIPE + hipe_thread_signal_init(); +#endif + erts_thread_init_float(); + + erts_smp_mtx_lock(&schdlr_sspnd.mtx); + ASSERT(erts_smp_atomic32_read_nob(&schdlr_sspnd.dirty_io_changing) + & ERTS_SCHDLR_SSPND_CHNG_ONLN); + + if (--schdlr_sspnd.dirty_io_curr_online == schdlr_sspnd.dirty_io_wait_curr_online) { + erts_smp_atomic32_read_band_nob(&schdlr_sspnd.dirty_io_changing, + ~ERTS_SCHDLR_SSPND_CHNG_ONLN); + if (no != 1) + erts_smp_cnd_broadcast(&schdlr_sspnd.cnd); + } + + if (no == 1) { + while (schdlr_sspnd.dirty_io_curr_online != schdlr_sspnd.dirty_io_wait_curr_online) + erts_smp_cnd_wait(&schdlr_sspnd.cnd, &schdlr_sspnd.mtx); + ERTS_SCHDLR_SSPND_DIRTY_IO_CHNG_SET(0, ERTS_SCHDLR_SSPND_CHNG_WAITER); + } + erts_smp_mtx_unlock(&schdlr_sspnd.mtx); + + process_main(); + /* No schedulers should *ever* terminate */ + erl_exit(ERTS_ABORT_EXIT, + "Dirty I/O scheduler thread number %beu terminated\n", + no); + return NULL; +} +#endif +#endif + static ethr_tid aux_tid; void erts_start_schedulers(void) { int res = 0; - Uint actual = 0; + Uint actual; Uint wanted = erts_no_schedulers; Uint wanted_no_schedulers = erts_no_schedulers; ethr_thr_opts opts = ETHR_THR_OPTS_DEFAULT_INITER; opts.detached = 1; +#ifdef ETHR_HAVE_THREAD_NAMES + opts.name = malloc(80); +#endif + #ifdef ERTS_SMP if (erts_runq_supervision_interval) { opts.suggested_stack_size = 16; +#ifdef ETHR_HAVE_THREAD_NAMES + sprintf(opts.name, "runq_supervisor"); +#endif erts_atomic_init_nob(&runq_supervisor_sleeping, 0); if (0 != ethr_event_init(&runq_supervision_event)) erl_exit(1, "Failed to create run-queue supervision event\n"); @@ -5843,21 +7840,65 @@ erts_start_schedulers(void) res = ENOTSUP; } - while (actual < wanted) { + for (actual = 0; actual < wanted; actual++) { ErtsSchedulerData *esdp = ERTS_SCHEDULER_IX(actual); - actual++; - ASSERT(actual == esdp->no); - res = ethr_thr_create(&esdp->tid,sched_thread_func,(void*)esdp,&opts); + + ASSERT(actual == esdp->no - 1); + +#ifdef ETHR_HAVE_THREAD_NAMES + sprintf(opts.name, "scheduler_%d", actual + 1); +#endif + +#ifdef __OSE__ + /* This should be done in the bind strategy */ + opts.coreNo = (actual+1) % ose_num_cpus(); +#endif + + res = ethr_thr_create(&esdp->tid, sched_thread_func, (void*)esdp, &opts); + if (res != 0) { - actual--; - break; + break; } } - + erts_no_schedulers = actual; +#ifdef ERTS_DIRTY_SCHEDULERS +#ifdef ERTS_SMP + { + int ix; + for (ix = 0; ix < erts_no_dirty_cpu_schedulers; ix++) { + ErtsSchedulerData *esdp = ERTS_DIRTY_CPU_SCHEDULER_IX(ix); +#ifdef ETHR_HAVE_THREAD_NAMES + sprintf(opts.name,"dirty_cpu_scheduler_%d", ix + 1); +#endif + res = ethr_thr_create(&esdp->tid,sched_dirty_cpu_thread_func,(void*)esdp,&opts); + if (res != 0) + erl_exit(1, "Failed to create dirty cpu scheduler thread %d\n", ix); + } + for (ix = 0; ix < erts_no_dirty_io_schedulers; ix++) { + ErtsSchedulerData *esdp = ERTS_DIRTY_IO_SCHEDULER_IX(ix); +#ifdef ETHR_HAVE_THREAD_NAMES + sprintf(opts.name,"dirty_io_scheduler_%d", ix + 1); +#endif + res = ethr_thr_create(&esdp->tid,sched_dirty_io_thread_func,(void*)esdp,&opts); + if (res != 0) + erl_exit(1, "Failed to create dirty io scheduler thread %d\n", ix); + } + } +#endif +#endif + ERTS_THR_MEMORY_BARRIER; +#ifdef ETHR_HAVE_THREAD_NAMES + sprintf(opts.name, "aux"); +#endif + +#ifdef __OSE__ + opts.coreNo = 0; +#endif /* __OSE__ */ + res = ethr_thr_create(&aux_tid, aux_thread, NULL, &opts); if (res != 0) erl_exit(1, "Failed to create aux thread\n"); @@ -5877,6 +7918,10 @@ erts_start_schedulers(void) actual, actual == 1 ? " was" : "s were"); erts_send_error_to_logger_nogl(dsbufp); } + +#ifdef ETHR_HAVE_THREAD_NAMES + free(opts.name); +#endif } #endif /* ERTS_SMP */ @@ -6032,7 +8077,8 @@ pid2proc_not_running(Process *c_p, ErtsProcLocks c_p_locks, goto done; } else { - if (!(ERTS_PSFLG_RUNNING & erts_smp_atomic32_read_acqb(&rp->state))) + if (!((ERTS_PSFLG_RUNNING|ERTS_PSFLG_RUNNING_SYS) + & erts_smp_atomic32_read_acqb(&rp->state))) goto done; } @@ -6695,7 +8741,7 @@ Eterm erts_get_process_priority(Process *p) { erts_aint32_t state = erts_smp_atomic32_read_nob(&p->state); - switch (state & ERTS_PSFLG_PRIO_MASK) { + switch (ERTS_PSFLGS_GET_USR_PRIO(state)) { case PRIORITY_MAX: return am_max; case PRIORITY_HIGH: return am_high; case PRIORITY_NORMAL: return am_normal; @@ -6718,18 +8764,68 @@ erts_set_process_priority(Process *p, Eterm value) } a = erts_smp_atomic32_read_nob(&p->state); - if (nprio == (a & ERTS_PSFLG_PRIO_MASK)) + if (nprio == ERTS_PSFLGS_GET_USR_PRIO(a)) oprio = nprio; else { - erts_aint32_t e, n; + int slocked = 0; + erts_aint32_t e, n, aprio; + + if (a & ERTS_PSFLG_ACTIVE_SYS) { + erts_smp_proc_lock(p, ERTS_PROC_LOCK_STATUS); + slocked = 1; + } + do { - oprio = a & ERTS_PSFLG_PRIO_MASK; + oprio = ERTS_PSFLGS_GET_USR_PRIO(a); n = e = a; - ASSERT(!(a & ERTS_PSFLG_IN_RUNQ)); + if (!(a & (ERTS_PSFLG_ACTIVE_SYS|ERTS_PSFLG_DELAYED_SYS))) + aprio = nprio; + else { + int max_qbit; + + if (!slocked) { + erts_smp_proc_lock(p, ERTS_PROC_LOCK_STATUS); + slocked = 1; + } + + max_qbit = 0; + if (a & ERTS_PSFLG_ACTIVE_SYS) + max_qbit |= p->sys_task_qs->qmask; + if (a & ERTS_PSFLG_DELAYED_SYS) { + ErtsProcSysTaskQs *qs; + qs = ERTS_PROC_GET_DELAYED_GC_TASK_QS(p); + ASSERT(qs); + max_qbit |= qs->qmask; + } + max_qbit &= -max_qbit; + switch (max_qbit) { + case MAX_BIT: + aprio = PRIORITY_MAX; + break; + case HIGH_BIT: + aprio = PRIORITY_HIGH; + break; + case NORMAL_BIT: + aprio = PRIORITY_NORMAL; + break; + case LOW_BIT: + aprio = PRIORITY_LOW; + break; + default: + ERTS_INTERNAL_ERROR("Invalid qmask"); + aprio = -1; + } + + if (aprio > nprio) /* low value -> high prio */ + aprio = nprio; + } + + n &= ~(ERTS_PSFLGS_USR_PRIO_MASK + | ERTS_PSFLGS_ACT_PRIO_MASK); + n |= ((nprio << ERTS_PSFLGS_USR_PRIO_OFFSET) + | (aprio << ERTS_PSFLGS_ACT_PRIO_OFFSET)); - n &= ~ERTS_PSFLG_PRIO_MASK; - n |= nprio; a = erts_smp_atomic32_cmpxchg_mb(&p->state, n, e); } while (a != e); } @@ -6763,6 +8859,7 @@ erts_set_process_priority(Process *p, Eterm value) Process *schedule(Process *p, int calls) { + Process *proxy_p = NULL; ErtsRunQueue *rq; erts_aint_t dt; ErtsSchedulerData *esdp; @@ -6792,7 +8889,8 @@ Process *schedule(Process *p, int calls) input_reductions = INPUT_REDUCTIONS; } - ERTS_SMP_LC_ASSERT(!erts_thr_progress_is_blocking()); + ERTS_SMP_LC_ASSERT(ERTS_SCHEDULER_IS_DIRTY(erts_get_scheduler_data()) + || !erts_thr_progress_is_blocking()); /* * Clean up after the process being scheduled out. @@ -6805,6 +8903,8 @@ Process *schedule(Process *p, int calls) actual_reds = reds = 0; erts_smp_runq_lock(rq); } else { + sched_out_proc: + #ifdef ERTS_SMP ERTS_SMP_CHK_HAVE_ONLY_MAIN_PROC_LOCK(p); esdp = p->scheduler_data; @@ -6858,10 +8958,11 @@ Process *schedule(Process *p, int calls) esdp->reductions += reds; - schedule_out_process(rq, state, p); /* Returns with rq locked! */ + schedule_out_process(rq, state, p, proxy_p); /* Returns with rq locked! */ + proxy_p = NULL; ERTS_PROC_REDUCTIONS_EXECUTED(rq, - (int) (state & ERTS_PSFLG_PRIO_MASK), + (int) ERTS_PSFLGS_GET_USR_PRIO(state), reds, actual_reds); @@ -6870,18 +8971,19 @@ Process *schedule(Process *p, int calls) p->scheduler_data = NULL; #endif + erts_smp_proc_unlock(p, ERTS_PROC_LOCK_MAIN|ERTS_PROC_LOCK_STATUS); if (state & ERTS_PSFLG_FREE) { #ifdef ERTS_SMP ASSERT(esdp->free_process == p); esdp->free_process = NULL; -#else - erts_free_proc(p); +#else + state = erts_smp_atomic32_read_nob(&p->state); + if (!(state & ERTS_PSFLG_IN_RUNQ)) + erts_free_proc(p); #endif } - erts_smp_proc_unlock(p, ERTS_PROC_LOCK_MAIN|ERTS_PROC_LOCK_STATUS); - #ifdef ERTS_SMP ASSERT(!esdp->free_process); #endif @@ -6899,45 +9001,42 @@ Process *schedule(Process *p, int calls) } - ERTS_SMP_LC_ASSERT(!erts_thr_progress_is_blocking()); + ERTS_SMP_LC_ASSERT(ERTS_SCHEDULER_IS_DIRTY(esdp) + || !erts_thr_progress_is_blocking()); check_activities_to_run: { #ifdef ERTS_SMP ErtsMigrationPaths *mps; ErtsMigrationPath *mp; - -#ifdef ERTS_SMP - { - ErtsProcList *pnd_xtrs = rq->procs.pending_exiters; - if (erts_proclist_fetch(&pnd_xtrs, NULL)) { - rq->procs.pending_exiters = NULL; - erts_smp_runq_unlock(rq); - handle_pending_exiters(pnd_xtrs); - erts_smp_runq_lock(rq); - } - + ErtsProcList *pnd_xtrs = rq->procs.pending_exiters; + if (erts_proclist_fetch(&pnd_xtrs, NULL)) { + rq->procs.pending_exiters = NULL; + erts_smp_runq_unlock(rq); + handle_pending_exiters(pnd_xtrs); + erts_smp_runq_lock(rq); } -#endif - if (rq->check_balance_reds <= 0) - check_balance(rq); + if (!ERTS_SCHEDULER_IS_DIRTY(esdp)) { + if (rq->check_balance_reds <= 0) + check_balance(rq); - ERTS_SMP_LC_ASSERT(!erts_thr_progress_is_blocking()); - ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(rq)); + ERTS_SMP_LC_ASSERT(!erts_thr_progress_is_blocking()); - mps = erts_get_migration_paths_managed(); - mp = &mps->mpath[rq->ix]; + mps = erts_get_migration_paths_managed(); + mp = &mps->mpath[rq->ix]; - if (mp->flags & ERTS_RUNQ_FLGS_IMMIGRATE_QMASK) - immigrate(rq, mp); + if (mp->flags & ERTS_RUNQ_FLGS_IMMIGRATE_QMASK) + immigrate(rq, mp); + } + ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(rq)); continue_check_activities_to_run: flags = ERTS_RUNQ_FLGS_GET_NOB(rq); continue_check_activities_to_run_known_flags: - + ASSERT(ERTS_SCHEDULER_IS_DIRTY(esdp) + || flags & ERTS_RUNQ_FLG_NONEMPTY); if (flags & (ERTS_RUNQ_FLG_CHK_CPU_BIND|ERTS_RUNQ_FLG_SUSPENDED)) { - if (flags & ERTS_RUNQ_FLG_SUSPENDED) { suspend_scheduler(esdp); flags = ERTS_RUNQ_FLGS_GET_NOB(rq); @@ -6948,22 +9047,32 @@ Process *schedule(Process *p, int calls) erts_sched_check_cpu_bind(esdp); } } +#ifdef ERTS_DIRTY_SCHEDULERS + else if (ERTS_SCHEDULER_IS_DIRTY(esdp) + && (erts_smp_atomic32_read_acqb(&esdp->ssi->flags) + & ERTS_SSI_FLG_SUSPENDED)) + suspend_scheduler(esdp); +#endif { erts_aint32_t aux_work; - int leader_update = erts_thr_progress_update(esdp); + int leader_update = ERTS_SCHEDULER_IS_DIRTY(esdp) ? 0 + : erts_thr_progress_update(esdp); aux_work = erts_atomic32_read_acqb(&esdp->ssi->aux_work); - if (aux_work | leader_update) { + if (aux_work | leader_update | ERTS_SCHED_FAIR) { erts_smp_runq_unlock(rq); if (leader_update) erts_thr_progress_leader_update(esdp); + else if (ERTS_SCHED_FAIR) + ERTS_SCHED_FAIR_YIELD(); if (aux_work) handle_aux_work(&esdp->aux_work_data, aux_work, 0); erts_smp_runq_lock(rq); } - } - ERTS_SMP_LC_ASSERT(!erts_thr_progress_is_blocking()); + ERTS_SMP_LC_ASSERT(ERTS_SCHEDULER_IS_DIRTY(esdp) + || !erts_thr_progress_is_blocking()); + } ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(rq)); #else /* ERTS_SMP */ @@ -6977,6 +9086,17 @@ Process *schedule(Process *p, int calls) flags = ERTS_RUNQ_FLGS_GET_NOB(rq); +#ifdef ERTS_DIRTY_SCHEDULERS + if (ERTS_RUNQ_IX_IS_DIRTY(rq->ix) && rq->halt_in_progress) { + /* + * TODO: if halt in progress, need to put the dirty scheduler + * to sleep somewhere around here to prevent it from picking up + * new work + */ + } + else +#endif + if ((!(flags & ERTS_RUNQ_FLGS_QMASK) && !rq->misc.start) || (rq->halt_in_progress && ERTS_EMPTY_RUNQ_PORTS(rq))) { /* Prepare for scheduler wait */ @@ -6986,20 +9106,16 @@ Process *schedule(Process *p, int calls) rq->wakeup_other = 0; rq->wakeup_other_reds = 0; - empty_runq(rq); - flags = ERTS_RUNQ_FLGS_GET_NOB(rq); - if (flags & ERTS_RUNQ_FLG_SUSPENDED) { - non_empty_runq(rq); + if (flags & ERTS_RUNQ_FLG_SUSPENDED) goto continue_check_activities_to_run_known_flags; - } - else if (!(flags & ERTS_RUNQ_FLG_INACTIVE)) { - if (try_steal_task(rq)) { - non_empty_runq(rq); + if (flags & ERTS_RUNQ_FLG_INACTIVE) + empty_runq(rq); + else { + if (!ERTS_RUNQ_IX_IS_DIRTY(rq->ix) && try_steal_task(rq)) goto continue_check_activities_to_run; - } - (void) ERTS_RUNQ_FLGS_UNSET(rq, ERTS_RUNQ_FLG_PROTECTED); + empty_runq(rq); /* * Check for ERTS_RUNQ_FLG_SUSPENDED has to be done @@ -7008,10 +9124,10 @@ Process *schedule(Process *p, int calls) flags = ERTS_RUNQ_FLGS_GET_NOB(rq); if (flags & ERTS_RUNQ_FLG_SUSPENDED) { non_empty_runq(rq); + flags |= ERTS_RUNQ_FLG_NONEMPTY; goto continue_check_activities_to_run_known_flags; } } - #endif scheduler_wait(&fcalls, esdp, rq); @@ -7022,7 +9138,9 @@ Process *schedule(Process *p, int calls) goto check_activities_to_run; } - else if (fcalls > input_reductions && prepare_for_sys_schedule()) { + else if (!ERTS_SCHEDULER_IS_DIRTY(esdp) && + (fcalls > input_reductions && + prepare_for_sys_schedule(esdp))) { /* * Schedule system-level activities. */ @@ -7088,6 +9206,8 @@ Process *schedule(Process *p, int calls) * Find a new process to run. */ pick_next_process: { + erts_aint32_t psflg_band_mask; + erts_aint32_t running_flag; int prio_q; int qmask; @@ -7121,18 +9241,74 @@ Process *schedule(Process *p, int calls) ASSERT(p); /* Wrong qmask in rq->flags? */ + psflg_band_mask = ~(((erts_aint32_t) 1) << (ERTS_PSFLGS_GET_PRQ_PRIO(state) + + ERTS_PSFLGS_IN_PRQ_MASK_OFFSET)); + +#ifdef ERTS_DIRTY_SCHEDULERS + ASSERT((state & (ERTS_PSFLG_DIRTY_CPU_PROC|ERTS_PSFLG_DIRTY_IO_PROC)) != + (ERTS_PSFLG_DIRTY_CPU_PROC|ERTS_PSFLG_DIRTY_IO_PROC)); + if (state & (ERTS_PSFLG_DIRTY_CPU_PROC|ERTS_PSFLG_DIRTY_IO_PROC)) { + ASSERT((ERTS_SCHEDULER_IS_DIRTY_CPU(esdp) && (state & ERTS_PSFLG_DIRTY_CPU_PROC)) || + (ERTS_SCHEDULER_IS_DIRTY_IO(esdp) && (state & ERTS_PSFLG_DIRTY_IO_PROC))); + if (!ERTS_SCHEDULER_IS_DIRTY(esdp) && !(state & ERTS_PSFLG_ACTIVE_SYS)) + goto pick_next_process; + state &= ~(ERTS_PSFLG_DIRTY_CPU_PROC_IN_Q|ERTS_PSFLG_DIRTY_IO_PROC_IN_Q); + } +#endif + + if (!(state & ERTS_PSFLG_PROXY)) + psflg_band_mask &= ~ERTS_PSFLG_IN_RUNQ; + else { + proxy_p = p; + p = erts_proc_lookup_raw(proxy_p->common.id); + if (!p) { + free_proxy_proc(proxy_p); + proxy_p = NULL; + goto pick_next_process; + } + state = erts_smp_atomic32_read_nob(&p->state); + } + + + if (state & ERTS_PSFLG_ACTIVE_SYS) + running_flag = ERTS_PSFLG_RUNNING_SYS; + else + running_flag = ERTS_PSFLG_RUNNING; + while (1) { erts_aint32_t exp, new, tmp; tmp = new = exp = state; - new &= ~ERTS_PSFLG_IN_RUNQ; - tmp = state & (ERTS_PSFLG_SUSPENDED|ERTS_PSFLG_PENDING_EXIT); - if (tmp != ERTS_PSFLG_SUSPENDED) - new |= ERTS_PSFLG_RUNNING; + new &= psflg_band_mask; + if (!(state & (ERTS_PSFLG_RUNNING + | ERTS_PSFLG_RUNNING_SYS))) { + tmp = state & (ERTS_PSFLG_SUSPENDED + | ERTS_PSFLG_PENDING_EXIT + | ERTS_PSFLG_ACTIVE_SYS); + if (tmp != ERTS_PSFLG_SUSPENDED) + new |= running_flag; + } state = erts_smp_atomic32_cmpxchg_relb(&p->state, new, exp); if (state == exp) { - tmp = state & (ERTS_PSFLG_SUSPENDED|ERTS_PSFLG_PENDING_EXIT); - if (tmp == ERTS_PSFLG_SUSPENDED) + if ((state & (ERTS_PSFLG_RUNNING + | ERTS_PSFLG_RUNNING_SYS + | ERTS_PSFLG_FREE)) + || ((state & (ERTS_PSFLG_SUSPENDED + | ERTS_PSFLG_PENDING_EXIT + | ERTS_PSFLG_ACTIVE_SYS)) + == ERTS_PSFLG_SUSPENDED)) { + if (state & ERTS_PSFLG_FREE) { +#ifdef ERTS_SMP + erts_smp_proc_dec_refc(p); +#else + erts_free_proc(p); +#endif + } + if (proxy_p) { + free_proxy_proc(proxy_p); + proxy_p = NULL; + } goto pick_next_process; + } state = new; break; } @@ -7162,7 +9338,11 @@ Process *schedule(Process *p, int calls) (UWord) esdp->no); int migrated = old && old != esdp->no; - prio = (int) (state & ERTS_PSFLG_PRIO_MASK); +#ifdef ERTS_DIRTY_SCHEDULERS + ASSERT(!ERTS_SCHEDULER_IS_DIRTY(esdp)); +#endif + + prio = (int) ERTS_PSFLGS_GET_USR_PRIO(state); erts_smp_spin_lock(&erts_sched_stat.lock); erts_sched_stat.prio[prio].total_executed++; @@ -7182,9 +9362,6 @@ Process *schedule(Process *p, int calls) ASSERT(!p->scheduler_data); p->scheduler_data = esdp; #endif - /* Never run a suspended process */ - ASSERT(!(ERTS_PSFLG_SUSPENDED & erts_smp_atomic32_read_nob(&p->state))); - reds = context_reds; if (IS_TRACED(p)) { @@ -7210,21 +9387,784 @@ Process *schedule(Process *p, int calls) erts_check_my_tracer_proc(p); #endif + if (state & ERTS_PSFLG_RUNNING_SYS) { + reds -= execute_sys_tasks(p, &state, reds); + if (reds <= 0 +#ifdef ERTS_DIRTY_SCHEDULERS + || (state & (ERTS_PSFLG_DIRTY_CPU_PROC|ERTS_PSFLG_DIRTY_IO_PROC)) +#endif + ) { + p->fcalls = reds; + goto sched_out_proc; + } + + ASSERT(state & ERTS_PSFLG_RUNNING_SYS); + ASSERT(!(state & ERTS_PSFLG_RUNNING)); + + while (1) { + erts_aint32_t n, e; + + if (((state & (ERTS_PSFLG_SUSPENDED + | ERTS_PSFLG_ACTIVE)) != ERTS_PSFLG_ACTIVE) + && !(state & ERTS_PSFLG_EXITING)) + goto sched_out_proc; + + n = e = state; + n &= ~ERTS_PSFLG_RUNNING_SYS; + n |= ERTS_PSFLG_RUNNING; + + state = erts_smp_atomic32_cmpxchg_mb(&p->state, n, e); + if (state == e) { + state = n; + break; + } + + ASSERT(state & ERTS_PSFLG_RUNNING_SYS); + ASSERT(!(state & ERTS_PSFLG_RUNNING)); + } + } + if (!(state & ERTS_PSFLG_EXITING) && ((FLAGS(p) & F_FORCE_GC) || (MSO(p).overhead > BIN_VHEAP_SZ(p)))) { reds -= erts_garbage_collect(p, 0, p->arg_reg, p->arity); - if (reds < 0) { - reds = 1; + if (reds <= 0) { + p->fcalls = reds; + goto sched_out_proc; } } + + if (proxy_p) { + free_proxy_proc(proxy_p); + proxy_p = NULL; + } p->fcalls = reds; ERTS_SMP_CHK_HAVE_ONLY_MAIN_PROC_LOCK(p); + + /* Never run a suspended process */ + ASSERT(!(ERTS_PSFLG_SUSPENDED & erts_smp_atomic32_read_nob(&p->state))); + return p; } } +static int +notify_sys_task_executed(Process *c_p, ErtsProcSysTask *st, Eterm st_result) +{ + Process *rp = erts_proc_lookup(st->requester); + if (rp) { + ErtsProcLocks rp_locks; + ErlOffHeap *ohp; + ErlHeapFragment* bp; + Eterm *hp, msg, req_id, result; + Uint st_result_sz, hsz; +#ifdef DEBUG + Eterm *hp_start; +#endif + + rp_locks = (c_p == rp) ? ERTS_PROC_LOCK_MAIN : 0; + + st_result_sz = is_immed(st_result) ? 0 : size_object(st_result); + hsz = st->req_id_sz + st_result_sz + 4 /* 3-tuple */; + + hp = erts_alloc_message_heap(hsz, + &bp, + &ohp, + rp, + &rp_locks); + +#ifdef DEBUG + hp_start = hp; +#endif + + req_id = st->req_id_sz == 0 ? st->req_id : copy_struct(st->req_id, + st->req_id_sz, + &hp, + ohp); + + result = st_result_sz == 0 ? st_result : copy_struct(st_result, + st_result_sz, + &hp, + ohp); + + ASSERT(is_immed(st->reply_tag)); + + msg = TUPLE3(hp, st->reply_tag, req_id, result); + +#ifdef DEBUG + hp += 4; + ASSERT(hp_start + hsz == hp); +#endif + + erts_queue_message(rp, + &rp_locks, + bp, + msg, + NIL +#ifdef USE_VM_PROBES + , NIL +#endif + ); + + if (c_p == rp) + rp_locks &= ~ERTS_PROC_LOCK_MAIN; + + if (rp_locks) + erts_smp_proc_unlock(rp, rp_locks); + } + + erts_cleanup_offheap(&st->off_heap); + + erts_free(ERTS_ALC_T_PROC_SYS_TSK, st); + + return rp ? 1 : 0; +} + +static ERTS_INLINE ErtsProcSysTask * +fetch_sys_task(Process *c_p, erts_aint32_t state, int *qmaskp, int *priop) +{ + ErtsProcSysTaskQs *unused_qs = NULL; + int qbit, qmask; + ErtsProcSysTask *st, **qp; + + *priop = -1; /* Shut up annoying erroneous warning */ + + erts_smp_proc_lock(c_p, ERTS_PROC_LOCK_STATUS); + + if (!c_p->sys_task_qs) { + qmask = 0; + st = NULL; + goto update_state; + } + + qmask = c_p->sys_task_qs->qmask; + + if ((state & (ERTS_PSFLG_ACTIVE + | ERTS_PSFLG_EXITING + | ERTS_PSFLG_SUSPENDED)) == ERTS_PSFLG_ACTIVE) { + /* No sys tasks if we got exclusively higher prio user work to do */ + st = NULL; + switch (ERTS_PSFLGS_GET_USR_PRIO(state)) { + case PRIORITY_MAX: + if (!(qmask & MAX_BIT)) + goto done; + break; + case PRIORITY_HIGH: + if (!(qmask & (MAX_BIT|HIGH_BIT))) + goto done; + break; + default: + break; + } + } + + qbit = qmask & -qmask; + switch (qbit) { + case MAX_BIT: + qp = &c_p->sys_task_qs->q[PRIORITY_MAX]; + *priop = PRIORITY_MAX; + break; + case HIGH_BIT: + qp = &c_p->sys_task_qs->q[PRIORITY_HIGH]; + *priop = PRIORITY_HIGH; + break; + case NORMAL_BIT: + if (!(qmask & PRIORITY_LOW) + || ++c_p->sys_task_qs->ncount <= RESCHEDULE_LOW) { + qp = &c_p->sys_task_qs->q[PRIORITY_NORMAL]; + *priop = PRIORITY_NORMAL; + break; + } + c_p->sys_task_qs->ncount = 0; + /* Fall through */ + case LOW_BIT: + qp = &c_p->sys_task_qs->q[PRIORITY_LOW]; + *priop = PRIORITY_LOW; + break; + default: + ERTS_INTERNAL_ERROR("Invalid qmask"); + } + + st = *qp; + ASSERT(st); + if (st->next != st) { + *qp = st->next; + st->next->prev = st->prev; + st->prev->next = st->next; + } + else { + erts_aint32_t a, e, n, st_prio, qmask2; + + *qp = NULL; + qmask &= ~qbit; + c_p->sys_task_qs->qmask = qmask; + + update_state: + + qmask2 = qmask; + + if (state & ERTS_PSFLG_DELAYED_SYS) { + ErtsProcSysTaskQs *qs = ERTS_PROC_GET_DELAYED_GC_TASK_QS(c_p); + ASSERT(qs); + qmask2 |= qs->qmask; + } + + switch (qmask2 & -qmask2) { + case MAX_BIT: + st_prio = PRIORITY_MAX; + break; + case HIGH_BIT: + st_prio = PRIORITY_HIGH; + break; + case NORMAL_BIT: + st_prio = PRIORITY_NORMAL; + break; + case LOW_BIT: + case 0: + st_prio = PRIORITY_LOW; + break; + default: + ERTS_INTERNAL_ERROR("Invalid qmask"); + } + + if (!qmask) { + unused_qs = c_p->sys_task_qs; + c_p->sys_task_qs = NULL; + } + + a = state; + do { + erts_aint32_t prio = ERTS_PSFLGS_GET_USR_PRIO(a); + + if (prio > st_prio) + prio = st_prio; + + n = e = a; + + n &= ~ERTS_PSFLGS_ACT_PRIO_MASK; + n |= (prio << ERTS_PSFLGS_ACT_PRIO_OFFSET); + + if (!qmask) + n &= ~ERTS_PSFLG_ACTIVE_SYS; + + if (a == n) + break; + a = erts_smp_atomic32_cmpxchg_nob(&c_p->state, n, e); + } while (a != e); + } + +done: + + erts_smp_proc_unlock(c_p, ERTS_PROC_LOCK_STATUS); + + if (unused_qs) + proc_sys_task_queues_free(unused_qs); + + *qmaskp = qmask; + + return st; +} + +static void save_gc_task(Process *c_p, ErtsProcSysTask *st, int prio); + +static int +execute_sys_tasks(Process *c_p, erts_aint32_t *statep, int in_reds) +{ + int garbage_collected = 0; + erts_aint32_t state = *statep; + int max_reds = in_reds; + int reds = 0; + int qmask = 0; + + ERTS_SMP_LC_ASSERT(erts_proc_lc_my_proc_locks(c_p) == ERTS_PROC_LOCK_MAIN); + + do { + ErtsProcSysTask *st; + int st_prio; + Eterm st_res; + + if (state & (ERTS_PSFLG_EXITING|ERTS_PSFLG_PENDING_EXIT)) { +#ifdef ERTS_SMP + if (state & ERTS_PSFLG_PENDING_EXIT) + erts_handle_pending_exit(c_p, ERTS_PROC_LOCK_MAIN); +#endif + ASSERT(ERTS_PROC_IS_EXITING(c_p)); + break; + } + + st = fetch_sys_task(c_p, state, &qmask, &st_prio); + if (!st) + break; + + switch (st->type) { + case ERTS_PSTT_GC: + if (c_p->flags & F_DISABLE_GC) { + save_gc_task(c_p, st, st_prio); + st = NULL; + reds++; + } + else { + if (!garbage_collected) { + FLAGS(c_p) |= F_NEED_FULLSWEEP; + reds += erts_garbage_collect(c_p, + 0, + c_p->arg_reg, + c_p->arity); + garbage_collected = 1; + } + st_res = am_true; + } + break; + case ERTS_PSTT_CPC: + st_res = erts_check_process_code(c_p, + st->arg[0], + st->arg[1] == am_true, + &reds); + if (is_non_value(st_res)) { + /* Needed gc, but gc was disabled */ + save_gc_task(c_p, st, st_prio); + st = NULL; + } + break; + default: + ERTS_INTERNAL_ERROR("Invalid process sys task type"); + st_res = am_false; + } + + if (st) + reds += notify_sys_task_executed(c_p, st, st_res); + + state = erts_smp_atomic32_read_acqb(&c_p->state); + } while (qmask && reds < max_reds); + + *statep = state; + + return reds; +} + +static int +cleanup_sys_tasks(Process *c_p, erts_aint32_t in_state, int in_reds) +{ + erts_aint32_t state = in_state; + int max_reds = in_reds; + int reds = 0; + int qmask = 0; + + ERTS_SMP_LC_ASSERT(erts_proc_lc_my_proc_locks(c_p) == ERTS_PROC_LOCK_MAIN); + + do { + ErtsProcSysTask *st; + Eterm st_res; + int st_prio; + + st = fetch_sys_task(c_p, state, &qmask, &st_prio); + if (!st) + break; + + switch (st->type) { + case ERTS_PSTT_GC: + st_res = am_false; + break; + case ERTS_PSTT_CPC: + st_res = am_false; + break; + default: + ERTS_INTERNAL_ERROR("Invalid process sys task type"); + st_res = am_false; + break; + } + + reds += notify_sys_task_executed(c_p, st, st_res); + + state = erts_smp_atomic32_read_acqb(&c_p->state); + } while (qmask && reds < max_reds); + + return reds; +} + +BIF_RETTYPE +erts_internal_request_system_task_3(BIF_ALIST_3) +{ + Process *rp = erts_proc_lookup(BIF_ARG_1); + ErtsProcSysTaskQs *stqs, *free_stqs = NULL; + ErtsProcSysTask *st = NULL; + erts_aint32_t prio, rp_state; + int rp_locked; + Eterm noproc_res, req_type; + + if (!rp && !is_internal_pid(BIF_ARG_1)) { + if (!is_external_pid(BIF_ARG_1)) + goto badarg; + if (external_pid_dist_entry(BIF_ARG_1) != erts_this_dist_entry) + goto badarg; + } + + switch (BIF_ARG_2) { + case am_max: prio = PRIORITY_MAX; break; + case am_high: prio = PRIORITY_HIGH; break; + case am_normal: prio = PRIORITY_NORMAL; break; + case am_low: prio = PRIORITY_LOW; break; + default: goto badarg; + } + + if (is_not_tuple(BIF_ARG_3)) + goto badarg; + else { + int i; + Eterm *tp = tuple_val(BIF_ARG_3); + Uint arity = arityval(*tp); + Eterm req_id; + Uint req_id_sz; + Eterm arg[ERTS_MAX_PROC_SYS_TASK_ARGS]; + Uint arg_sz[ERTS_MAX_PROC_SYS_TASK_ARGS]; + Uint tot_sz; + Eterm *hp; + + if (arity < 2) + goto badarg; + if (arity > 2 + ERTS_MAX_PROC_SYS_TASK_ARGS) + goto badarg; + req_type = tp[1]; + req_id = tp[2]; + req_id_sz = is_immed(req_id) ? req_id : size_object(req_id); + tot_sz = req_id_sz; + for (i = 0; i < ERTS_MAX_PROC_SYS_TASK_ARGS; i++) { + int tix = 3 + i; + if (tix > arity) { + arg[i] = THE_NON_VALUE; + arg_sz[i] = 0; + } + else { + arg[i] = tp[tix]; + if (is_immed(arg[i])) + arg_sz[i] = 0; + else { + arg_sz[i] = size_object(arg[i]); + tot_sz += arg_sz[i]; + } + } + } + st = erts_alloc(ERTS_ALC_T_PROC_SYS_TSK, + ERTS_PROC_SYS_TASK_SIZE(tot_sz)); + st->next = st->prev = st; /* Prep for empty prio queue */ + ERTS_INIT_OFF_HEAP(&st->off_heap); + hp = &st->heap[0]; + + st->requester = BIF_P->common.id; + st->reply_tag = req_type; + st->req_id_sz = req_id_sz; + st->req_id = req_id_sz == 0 ? req_id : copy_struct(req_id, + req_id_sz, + &hp, + &st->off_heap); + + for (i = 0; i < ERTS_MAX_PROC_SYS_TASK_ARGS; i++) + st->arg[i] = arg_sz[i] == 0 ? arg[i] : copy_struct(arg[i], + arg_sz[i], + &hp, + &st->off_heap); + ASSERT(&st->heap[0] + tot_sz == hp); + } + + switch (req_type) { + + case am_garbage_collect: + st->type = ERTS_PSTT_GC; + noproc_res = am_false; + if (!rp) + goto noproc; + break; + + case am_check_process_code: + if (is_not_atom(st->arg[0])) + goto badarg; + if (st->arg[1] != am_true && st->arg[1] != am_false) + goto badarg; + noproc_res = am_false; + st->type = ERTS_PSTT_CPC; + if (!rp) + goto noproc; + break; + + default: + goto badarg; + } + + rp_state = erts_smp_atomic32_read_nob(&rp->state); + + rp_locked = 0; + + free_stqs = NULL; + if (rp_state & ERTS_PSFLG_ACTIVE_SYS) + stqs = NULL; + else { + alloc_qs: + stqs = proc_sys_task_queues_alloc(); + stqs->qmask = 1 << prio; + stqs->ncount = 0; + stqs->q[PRIORITY_MAX] = NULL; + stqs->q[PRIORITY_HIGH] = NULL; + stqs->q[PRIORITY_NORMAL] = NULL; + stqs->q[PRIORITY_LOW] = NULL; + stqs->q[prio] = st; + } + + if (!rp_locked) { + rp_locked = 1; + erts_smp_proc_lock(rp, ERTS_PROC_LOCK_STATUS); + + rp_state = erts_smp_atomic32_read_nob(&rp->state); + if (rp_state & ERTS_PSFLG_EXITING) { + erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_STATUS); + rp = NULL; + free_stqs = stqs; + goto noproc; + } + } + + if (!rp->sys_task_qs) { + if (stqs) + rp->sys_task_qs = stqs; + else + goto alloc_qs; + } + else { + if (stqs) + free_stqs = stqs; + stqs = rp->sys_task_qs; + if (!stqs->q[prio]) { + stqs->q[prio] = st; + stqs->qmask |= 1 << prio; + } + else { + st->next = stqs->q[prio]; + st->prev = stqs->q[prio]->prev; + st->next->prev = st; + st->prev->next = st; + ASSERT(stqs->qmask & (1 << prio)); + } + } + + if (ERTS_PSFLGS_GET_ACT_PRIO(rp_state) > prio) { + erts_aint32_t n, a, e; + /* Need to elevate actual prio */ + + a = rp_state; + do { + if (ERTS_PSFLGS_GET_ACT_PRIO(a) <= prio) { + n = a; + break; + } + n = e = a; + n &= ~ERTS_PSFLGS_ACT_PRIO_MASK; + n |= (prio << ERTS_PSFLGS_ACT_PRIO_OFFSET); + a = erts_smp_atomic32_cmpxchg_nob(&rp->state, n, e); + } while (a != e); + rp_state = n; + } + + erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_STATUS); + + schedule_process_sys_task(rp, rp_state, NULL); + + if (free_stqs) + proc_sys_task_queues_free(free_stqs); + + BIF_RET(am_ok); + +noproc: + + notify_sys_task_executed(BIF_P, st, noproc_res); + if (free_stqs) + proc_sys_task_queues_free(free_stqs); + BIF_RET(am_ok); + +badarg: + + if (st) { + erts_cleanup_offheap(&st->off_heap); + erts_free(ERTS_ALC_T_PROC_SYS_TSK, st); + } + if (free_stqs) + proc_sys_task_queues_free(free_stqs); + BIF_ERROR(BIF_P, BADARG); +} + +static void +save_gc_task(Process *c_p, ErtsProcSysTask *st, int prio) +{ + erts_aint32_t state; + ErtsProcSysTaskQs *qs; + + ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_MAIN == erts_proc_lc_my_proc_locks(c_p)); + + qs = ERTS_PROC_GET_DELAYED_GC_TASK_QS(c_p); + if (!qs) { + st->next = st->prev = st; + qs = proc_sys_task_queues_alloc(); + qs->qmask = 1 << prio; + qs->ncount = 0; + qs->q[PRIORITY_MAX] = NULL; + qs->q[PRIORITY_HIGH] = NULL; + qs->q[PRIORITY_NORMAL] = NULL; + qs->q[PRIORITY_LOW] = NULL; + qs->q[prio] = st; + (void) ERTS_PROC_SET_DELAYED_GC_TASK_QS(c_p, ERTS_PROC_LOCK_MAIN, qs); + } + else { + if (!qs->q[prio]) { + st->next = st->prev = st; + qs->q[prio] = st; + qs->qmask |= 1 << prio; + } + else { + st->next = qs->q[prio]; + st->prev = qs->q[prio]->prev; + st->next->prev = st; + st->prev->next = st; + ASSERT(qs->qmask & (1 << prio)); + } + } + + state = erts_smp_atomic32_read_nob(&c_p->state); + ASSERT((ERTS_PSFLG_RUNNING|ERTS_PSFLG_RUNNING_SYS) & state); + + while (!(state & ERTS_PSFLG_DELAYED_SYS) + || prio < ERTS_PSFLGS_GET_ACT_PRIO(state)) { + erts_aint32_t n, e; + + n = e = state; + n |= ERTS_PSFLG_DELAYED_SYS; + if (prio < ERTS_PSFLGS_GET_ACT_PRIO(state)) { + n &= ~ERTS_PSFLGS_ACT_PRIO_MASK; + n |= prio << ERTS_PSFLGS_ACT_PRIO_OFFSET; + } + state = erts_smp_atomic32_cmpxchg_relb(&c_p->state, n, e); + if (state == e) + break; + } +} + +int +erts_set_gc_state(Process *c_p, int enable) +{ + ErtsProcSysTaskQs *dgc_tsk_qs; + ASSERT(c_p == erts_get_current_process()); + ASSERT((ERTS_PSFLG_RUNNING|ERTS_PSFLG_RUNNING_SYS) + & erts_smp_atomic32_read_nob(&c_p->state)); + ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_MAIN == erts_proc_lc_my_proc_locks(c_p)); + + if (!enable) { + c_p->flags |= F_DISABLE_GC; + return 0; + } + + c_p->flags &= ~F_DISABLE_GC; + + dgc_tsk_qs = ERTS_PROC_GET_DELAYED_GC_TASK_QS(c_p); + if (!dgc_tsk_qs) + return 0; + + /* Move delayed gc tasks into sys tasks queues. */ + + erts_smp_proc_lock(c_p, ERTS_PROC_LOCK_STATUS); + + if (!c_p->sys_task_qs) { + c_p->sys_task_qs = dgc_tsk_qs; + dgc_tsk_qs = NULL; + } + else { + ErtsProcSysTaskQs *stsk_qs; + int prio; + + /* + * We push delayed tasks to the front of the queue + * since they have already made it to the front + * once and then been delayed after that. + */ + + stsk_qs = c_p->sys_task_qs; + + while (dgc_tsk_qs->qmask) { + int qbit = dgc_tsk_qs->qmask & -dgc_tsk_qs->qmask; + dgc_tsk_qs->qmask &= ~qbit; + switch (qbit) { + case MAX_BIT: + prio = PRIORITY_MAX; + break; + case HIGH_BIT: + prio = PRIORITY_HIGH; + break; + case NORMAL_BIT: + prio = PRIORITY_NORMAL; + break; + case LOW_BIT: + prio = PRIORITY_LOW; + break; + default: + ERTS_INTERNAL_ERROR("Invalid qmask"); + prio = -1; + break; + } + + ASSERT(dgc_tsk_qs->q[prio]); + + if (!stsk_qs->q[prio]) { + stsk_qs->q[prio] = dgc_tsk_qs->q[prio]; + stsk_qs->qmask |= 1 << prio; + } + else { + ErtsProcSysTask *first1, *last1, *first2, *last2; + + ASSERT(stsk_qs->qmask & (1 << prio)); + first1 = dgc_tsk_qs->q[prio]; + last1 = first1->prev; + first2 = stsk_qs->q[prio]; + last2 = first1->prev; + + last1->next = first2; + first2->prev = last1; + + first1->prev = last2; + last2->next = first1; + + stsk_qs->q[prio] = first1; + } + + } + } + +#ifdef DEBUG + { + int qmask; + erts_aint32_t aprio, state = +#endif + + erts_smp_atomic32_read_bset_nob(&c_p->state, + (ERTS_PSFLG_DELAYED_SYS + | ERTS_PSFLG_ACTIVE_SYS), + ERTS_PSFLG_ACTIVE_SYS); + +#ifdef DEBUG + ASSERT(state & ERTS_PSFLG_DELAYED_SYS); + qmask = c_p->sys_task_qs->qmask; + aprio = ERTS_PSFLGS_GET_ACT_PRIO(state); + ASSERT(ERTS_PSFLGS_GET_USR_PRIO(state) >= aprio); + ASSERT((qmask & -qmask) >= (1 << aprio)); + } +#endif + + erts_smp_proc_unlock(c_p, ERTS_PROC_LOCK_STATUS); + + (void) ERTS_PROC_SET_DELAYED_GC_TASK_QS(c_p, ERTS_PROC_LOCK_MAIN, NULL); + + if (dgc_tsk_qs) + proc_sys_task_queues_free(dgc_tsk_qs); + + return 1; +} + void erts_sched_stat_modify(int what) { @@ -7324,6 +10264,10 @@ erts_schedule_misc_op(void (*func)(void *), void *arg) rq->misc.start = molp; rq->misc.end = molp; +#ifdef ERTS_SMP + non_empty_runq(rq); +#endif + erts_smp_runq_unlock(rq); smp_notify_inc_runq(rq); @@ -7506,7 +10450,7 @@ erl_create_process(Process* parent, /* Parent of process (default group leader). * Check for errors. */ - if (is_not_atom(mod) || is_not_atom(func) || ((arity = list_length(args)) < 0)) { + if (is_not_atom(mod) || is_not_atom(func) || ((arity = erts_list_length(args)) < 0)) { so->error_code = BADARG; goto error; } @@ -7521,7 +10465,8 @@ erl_create_process(Process* parent, /* Parent of process (default group leader). prio = (erts_aint32_t) so->priority; } - state |= (prio & ERTS_PSFLG_PRIO_MASK); + state |= (((prio & ERTS_PSFLGS_PRIO_MASK) << ERTS_PSFLGS_ACT_PRIO_OFFSET) + | ((prio & ERTS_PSFLGS_PRIO_MASK) << ERTS_PSFLGS_USR_PRIO_OFFSET)); if (!rq) rq = erts_get_runq_proc(parent); @@ -7592,13 +10537,14 @@ erl_create_process(Process* parent, /* Parent of process (default group leader). p->htop = p->heap; p->heap_sz = sz; p->catches = 0; - p->extra_root = NULL; p->bin_vheap_sz = p->min_vheap_size; p->bin_old_vheap_sz = p->min_vheap_size; p->bin_old_vheap = 0; p->bin_vheap_mature = 0; + p->sys_task_qs = NULL; + /* No need to initialize p->fcalls. */ p->current = p->initial+INITIAL_MOD; @@ -7764,7 +10710,7 @@ erl_create_process(Process* parent, /* Parent of process (default group leader). * Schedule process for execution. */ - schedule_process(p, state, 0); + schedule_process(p, state); VERBOSE(DEBUG_PROCESSES, ("Created a new process: %T\n",p->common.id)); @@ -7815,6 +10761,7 @@ void erts_init_empty_process(Process *p) p->bin_vheap_sz = BIN_VH_MIN_SIZE; p->bin_old_vheap_sz = BIN_VH_MIN_SIZE; p->bin_old_vheap = 0; + p->sys_task_qs = NULL; p->bin_vheap_mature = 0; #ifdef ERTS_SMP p->common.u.alive.ptimer = NULL; @@ -8070,33 +11017,21 @@ delete_process(Process* p) p->fvalue = NIL; } -static ERTS_INLINE erts_aint32_t -set_proc_exiting_state(Process *p, erts_aint32_t state) -{ - erts_aint32_t a, n, e; - a = state; - while (1) { - n = e = a; - n &= ~(ERTS_PSFLG_SUSPENDED|ERTS_PSFLG_PENDING_EXIT); - n |= ERTS_PSFLG_EXITING|ERTS_PSFLG_ACTIVE; - if (!(a & (ERTS_PSFLG_IN_RUNQ|ERTS_PSFLG_RUNNING))) - n |= ERTS_PSFLG_IN_RUNQ; - a = erts_smp_atomic32_cmpxchg_relb(&p->state, n, e); - if (a == e) - break; - } - return a; -} - static ERTS_INLINE void set_proc_exiting(Process *p, - erts_aint32_t state, + erts_aint32_t in_state, Eterm reason, ErlHeapFragment *bp) { + erts_aint32_t state = in_state, enq_prio = -1; + int enqueue; ERTS_SMP_LC_ASSERT(erts_proc_lc_my_proc_locks(p) == ERTS_PROC_LOCKS_ALL); - state = set_proc_exiting_state(p, state); + enqueue = change_proc_schedule_state(p, + ERTS_PSFLG_SUSPENDED|ERTS_PSFLG_PENDING_EXIT, + ERTS_PSFLG_EXITING|ERTS_PSFLG_ACTIVE, + &state, + &enq_prio); p->fvalue = reason; if (bp) @@ -8111,15 +11046,37 @@ set_proc_exiting(Process *p, cancel_timer(p); p->i = (BeamInstr *) beam_exit; - if (erts_system_profile_flags.runnable_procs - && !(state & (ERTS_PSFLG_ACTIVE|ERTS_PSFLG_SUSPENDED))) { - profile_runnable_proc(p, am_active); - } - - if (!(state & (ERTS_PSFLG_IN_RUNQ|ERTS_PSFLG_RUNNING))) - add2runq(p, state); + if (enqueue) + add2runq(enqueue > 0 ? p : make_proxy_proc(NULL, p, enq_prio), + state, + enq_prio); } +static ERTS_INLINE erts_aint32_t +set_proc_self_exiting(Process *c_p) +{ +#ifdef DEBUG + int enqueue; +#endif + erts_aint32_t state, enq_prio = -1; + + ERTS_SMP_LC_ASSERT(erts_proc_lc_my_proc_locks(c_p) == ERTS_PROC_LOCKS_ALL); + + state = erts_smp_atomic32_read_nob(&c_p->state); + ASSERT(state & (ERTS_PSFLG_RUNNING|ERTS_PSFLG_RUNNING_SYS)); + +#ifdef DEBUG + enqueue = +#endif + change_proc_schedule_state(c_p, + ERTS_PSFLG_SUSPENDED|ERTS_PSFLG_PENDING_EXIT, + ERTS_PSFLG_EXITING|ERTS_PSFLG_ACTIVE, + &state, + &enq_prio); + + ASSERT(!enqueue); + return state; +} #ifdef ERTS_SMP @@ -8167,7 +11124,7 @@ handle_pending_exiters(ErtsProcList *pnd_xtrs) if (p) { if (erts_proclist_same(plp, p)) { erts_aint32_t state = erts_smp_atomic32_read_acqb(&p->state); - if (!(state & ERTS_PSFLG_RUNNING)) { + if (!(state & (ERTS_PSFLG_RUNNING|ERTS_PSFLG_RUNNING_SYS))) { ASSERT(state & ERTS_PSFLG_PENDING_EXIT); erts_handle_pending_exit(p, ERTS_PROC_LOCKS_ALL); } @@ -8196,8 +11153,15 @@ save_pending_exiter(Process *p) erts_proclist_store_last(&rq->procs.pending_exiters, plp); + non_empty_runq(rq); + erts_smp_runq_unlock(rq); - wake_scheduler(rq, 1); +#ifdef ERTS_DIRTY_SCHEDULERS + if (ERTS_RUNQ_IX_IS_DIRTY(rq->ix)) + wake_dirty_schedulers(rq, 0); + else +#endif + wake_scheduler(rq); } #endif @@ -8388,7 +11352,7 @@ send_exit_signal(Process *c_p, /* current process if and only } set_proc_exiting(c_p, state, rsn, NULL); } - else if (!(state & ERTS_PSFLG_RUNNING)) { + else if (!(state & (ERTS_PSFLG_RUNNING|ERTS_PSFLG_RUNNING_SYS))) { /* Process not running ... */ ErtsProcLocks need_locks = ~(*rp_locks) & ERTS_PROC_LOCKS_ALL; if (need_locks @@ -8765,9 +11729,6 @@ resume_suspend_monitor(ErtsSuspendMonitor *smon, void *vc_p) void erts_do_exit_process(Process* p, Eterm reason) { -#ifdef ERTS_SMP - erts_aint32_t state; -#endif p->arity = 0; /* No live registers */ p->fvalue = reason; @@ -8792,10 +11753,9 @@ erts_do_exit_process(Process* p, Eterm reason) #endif #ifndef ERTS_SMP - set_proc_exiting_state(p, erts_smp_atomic32_read_nob(&p->state)); + set_proc_self_exiting(p); #else - state = set_proc_exiting_state(p, erts_smp_atomic32_read_nob(&p->state)); - if (state & ERTS_PSFLG_PENDING_EXIT) { + if (ERTS_PSFLG_PENDING_EXIT & set_proc_self_exiting(p)) { /* Process exited before pending exit was received... */ p->pending_exit.reason = THE_NON_VALUE; if (p->pending_exit.bp) { @@ -8849,6 +11809,7 @@ erts_continue_exit_process(Process *p) DistEntry *dep; struct saved_calls *scb; process_breakpoint_time_t *pbt; + erts_aint32_t state; #ifdef DEBUG int yield_allowed = 1; @@ -8885,6 +11846,13 @@ erts_continue_exit_process(Process *p) p->flags &= ~F_USING_DB; } + erts_set_gc_state(p, 1); + state = erts_smp_atomic32_read_acqb(&p->state); + if (state & ERTS_PSFLG_ACTIVE_SYS) { + if (cleanup_sys_tasks(p, state, CONTEXT_REDS) >= CONTEXT_REDS/2) + goto yield; + } + if (p->flags & F_USING_DDLL) { erts_ddll_proc_dead(p, ERTS_PROC_LOCK_MAIN); p->flags &= ~F_USING_DDLL; @@ -8962,17 +11930,31 @@ erts_continue_exit_process(Process *p) { /* Inactivate and notify free */ erts_aint32_t n, e, a = erts_smp_atomic32_read_nob(&p->state); +#ifdef ERTS_SMP + int refc_inced = 0; +#endif while (1) { n = e = a; ASSERT(a & ERTS_PSFLG_EXITING); n |= ERTS_PSFLG_FREE; n &= ~ERTS_PSFLG_ACTIVE; +#ifdef ERTS_SMP + if ((n & ERTS_PSFLG_IN_RUNQ) && !refc_inced) { + erts_smp_proc_inc_refc(p); + refc_inced = 1; + } +#endif a = erts_smp_atomic32_cmpxchg_mb(&p->state, n, e); if (a == e) break; } - } +#ifdef ERTS_SMP + if (refc_inced && !(n & ERTS_PSFLG_IN_RUNQ)) + erts_smp_proc_dec_refc(p); +#endif + } + dep = ((p->flags & F_DISTRIBUTION) ? ERTS_PROC_SET_DIST_ENTRY(p, ERTS_PROC_LOCKS_ALL, NULL) : NULL); @@ -9025,12 +12007,6 @@ erts_continue_exit_process(Process *p) if (pbt) erts_free(ERTS_ALC_T_BPD, (void *) pbt); - if (p->extra_root != NULL) { - (p->extra_root->cleanup)(p->extra_root); /* Should deallocate - whole structure */ - p->extra_root = NULL; - } - delete_process(p); #ifdef ERTS_SMP @@ -9075,7 +12051,7 @@ timeout_proc(Process* p) state = erts_smp_atomic32_read_acqb(&p->state); if (!(state & ERTS_PSFLG_ACTIVE)) - schedule_process(p, state, 0); + schedule_process(p, state); } @@ -9153,7 +12129,9 @@ erts_program_counter_info(int to, void *to_arg, Process *p) print_function_from_pc(to, to_arg, p->cp); erts_print(to, to_arg, ")\n"); state = erts_smp_atomic32_read_acqb(&p->state); - if (!(state & (ERTS_PSFLG_RUNNING|ERTS_PSFLG_GC))) { + if (!(state & (ERTS_PSFLG_RUNNING + | ERTS_PSFLG_RUNNING_SYS + | ERTS_PSFLG_GC))) { erts_print(to, to_arg, "arity = %d\n",p->arity); if (!ERTS_IS_CRASH_DUMPING) { /* @@ -9241,6 +12219,10 @@ void erl_halt(int code) if (-1 == erts_smp_atomic32_cmpxchg_acqb(&erts_halt_progress, erts_no_schedulers, -1)) { +#ifdef ERTS_DIRTY_SCHEDULERS + ERTS_DIRTY_CPU_RUNQ->halt_in_progress = 1; + ERTS_DIRTY_IO_RUNQ->halt_in_progress = 1; +#endif erts_halt_code = code; notify_reap_ports_relb(); } diff --git a/erts/emulator/beam/erl_process.h b/erts/emulator/beam/erl_process.h index 8d136f6e8b..ed6dadbffa 100644 --- a/erts/emulator/beam/erl_process.h +++ b/erts/emulator/beam/erl_process.h @@ -1,7 +1,7 @@ /* * %CopyrightBegin% * - * Copyright Ericsson AB 1996-2013. All Rights Reserved. + * Copyright Ericsson AB 1996-2014. All Rights Reserved. * * The contents of this file are subject to the Erlang Public License, * Version 1.1, (the "License"); you may not use this file except in @@ -70,7 +70,14 @@ typedef struct process Process; struct ErtsNodesMonitor_; +#define ERTS_HAVE_SCHED_UTIL_BALANCING_SUPPORT_OPT 0 +#define ERTS_HAVE_SCHED_UTIL_BALANCING_SUPPORT 0 + #define ERTS_MAX_NO_OF_SCHEDULERS 1024 +#ifdef ERTS_DIRTY_SCHEDULERS +#define ERTS_MAX_NO_OF_DIRTY_CPU_SCHEDULERS ERTS_MAX_NO_OF_SCHEDULERS +#define ERTS_MAX_NO_OF_DIRTY_IO_SCHEDULERS ERTS_MAX_NO_OF_SCHEDULERS +#endif #define ERTS_DEFAULT_MAX_PROCESSES (1 << 18) @@ -98,7 +105,12 @@ struct saved_calls { extern Export exp_send, exp_receive, exp_timeout; extern int erts_sched_compact_load; +extern int erts_sched_balance_util; extern Uint erts_no_schedulers; +#ifdef ERTS_DIRTY_SCHEDULERS +extern Uint erts_no_dirty_cpu_schedulers; +extern Uint erts_no_dirty_io_schedulers; +#endif extern Uint erts_no_run_queues; extern int erts_sched_thread_suggested_stack_size; #define ERTS_SCHED_THREAD_MIN_STACK_SIZE 4 /* Kilo words */ @@ -198,6 +210,10 @@ extern int erts_sched_thread_suggested_stack_size; #define ERTS_RUNQ_FLGS_SET(RQ, FLGS) \ ((Uint32) erts_smp_atomic32_read_bor_relb(&(RQ)->flags, \ (erts_aint32_t) (FLGS))) +#define ERTS_RUNQ_FLGS_BSET(RQ, MSK, FLGS) \ + ((Uint32) erts_smp_atomic32_read_bset_relb(&(RQ)->flags, \ + (erts_aint32_t) (MSK), \ + (erts_aint32_t) (FLGS))) #define ERTS_RUNQ_FLGS_UNSET(RQ, FLGS) \ ((Uint32) erts_smp_atomic32_read_band_relb(&(RQ)->flags, \ (erts_aint32_t) ~(FLGS))) @@ -267,6 +283,13 @@ typedef enum { typedef struct ErtsSchedulerSleepInfo_ ErtsSchedulerSleepInfo; +#ifdef ERTS_DIRTY_SCHEDULERS +typedef struct { + erts_smp_spinlock_t lock; + ErtsSchedulerSleepInfo *list; +} ErtsSchedulerSleepList; +#endif + struct ErtsSchedulerSleepInfo_ { #ifdef ERTS_SMP ErtsSchedulerSleepInfo *next; @@ -316,9 +339,40 @@ typedef struct { int reds; } ErtsRunQueueInfo; + +#ifdef HAVE_GETHRTIME +# undef ERTS_HAVE_SCHED_UTIL_BALANCING_SUPPORT_OPT +# define ERTS_HAVE_SCHED_UTIL_BALANCING_SUPPORT_OPT 1 +#endif + #ifdef ERTS_SMP +#undef ERTS_HAVE_SCHED_UTIL_BALANCING_SUPPORT +#define ERTS_HAVE_SCHED_UTIL_BALANCING_SUPPORT ERTS_HAVE_SCHED_UTIL_BALANCING_SUPPORT_OPT + +#ifdef ARCH_64 +typedef erts_atomic_t ErtsAtomicSchedTime; +#elif defined(ARCH_32) +typedef erts_dw_atomic_t ErtsAtomicSchedTime; +#else +# error :-/ +#endif + +#if ERTS_HAVE_SCHED_UTIL_BALANCING_SUPPORT +typedef struct { + ErtsAtomicSchedTime last; + struct { + Uint64 short_interval; + Uint64 long_interval; + } worktime; + int is_working; +} ErtsRunQueueSchedUtil; +#endif + typedef struct { +#if ERTS_HAVE_SCHED_UTIL_BALANCING_SUPPORT + int sched_util; +#endif Uint32 flags; ErtsRunQueue *misc_evac_runq; struct { @@ -348,6 +402,12 @@ struct ErtsRunQueue_ { erts_smp_mtx_t mtx; erts_smp_cnd_t cnd; +#ifdef ERTS_DIRTY_SCHEDULERS +#ifdef ERTS_SMP + ErtsSchedulerSleepList sleepers; +#endif +#endif + ErtsSchedulerData *scheduler; int waiting; /* < 0 in sys schedule; > 0 on cnd variable */ int woken; @@ -385,6 +445,9 @@ struct ErtsRunQueue_ { Port *start; Port *end; } ports; +#if ERTS_HAVE_SCHED_UTIL_BALANCING_SUPPORT + ErtsRunQueueSchedUtil sched_util; +#endif }; #ifdef ERTS_SMP @@ -414,6 +477,7 @@ do { \ } while (0) typedef struct { + int need; /* "+sbu true" or scheduler_wall_time enabled */ int enabled; Uint64 start; struct { @@ -479,6 +543,21 @@ typedef struct { #endif } ErtsAuxWorkData; +#ifdef ERTS_DIRTY_SCHEDULERS +typedef enum { + ERTS_DIRTY_CPU_SCHEDULER, + ERTS_DIRTY_IO_SCHEDULER +} ErtsDirtySchedulerType; + +typedef union { + struct { + ErtsDirtySchedulerType type: 1; + unsigned num: 31; + } s; + Uint no; +} ErtsDirtySchedId; +#endif + struct ErtsSchedulerData_ { /* * Keep X registers first (so we get as many low @@ -499,12 +578,14 @@ struct ErtsSchedulerData_ { Eterm tmp_heap[TMP_HEAP_SIZE]; int num_tmp_heap_used; Eterm beam_emu_tmp_heap[BEAM_EMU_TMP_HEAP_SIZE]; - Eterm cmp_tmp_heap[CMP_TMP_HEAP_SIZE]; Eterm erl_arith_tmp_heap[ERL_ARITH_TMP_HEAP_SIZE]; #endif ErtsSchedulerSleepInfo *ssi; Process *current_process; - Uint no; /* Scheduler number */ + Uint no; /* Scheduler number for normal schedulers */ +#ifdef ERTS_DIRTY_SCHEDULERS + ErtsDirtySchedId dirty_no; /* Scheduler number for dirty schedulers */ +#endif Port *current_port; ErtsRunQueue *run_queue; int virtual_reds; @@ -531,17 +612,34 @@ typedef union { } ErtsAlignedSchedulerData; extern ErtsAlignedSchedulerData *erts_aligned_scheduler_data; +#ifdef ERTS_DIRTY_SCHEDULERS +extern ErtsAlignedSchedulerData *erts_aligned_dirty_cpu_scheduler_data; +extern ErtsAlignedSchedulerData *erts_aligned_dirty_io_scheduler_data; +#endif #ifndef ERTS_SMP extern ErtsSchedulerData *erts_scheduler_data; #endif +#ifdef ERTS_SCHED_FAIR +#define ERTS_SCHED_FAIR_YIELD() ETHR_YIELD() +#else +#define ERTS_SCHED_FAIR 0 +#define ERTS_SCHED_FAIR_YIELD() +#endif + #if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_CHECK) int erts_smp_lc_runq_is_locked(ErtsRunQueue *); #endif #ifdef ERTS_INCLUDE_SCHEDULER_INTERNALS +#ifdef ERTS_SMP +void erts_empty_runq(ErtsRunQueue *rq); +void erts_non_empty_runq(ErtsRunQueue *rq); +#endif + + /* * Run queue locked during modifications. We use atomic ops since * other threads peek at values without run queue lock. @@ -574,6 +672,10 @@ erts_smp_inc_runq_len(ErtsRunQueue *rq, ErtsRunQueueInfo *rqi, int prio) erts_smp_atomic32_set_relb(&rqi->len, len); +#ifdef ERTS_SMP + if (rq->len == 0) + erts_non_empty_runq(rq); +#endif rq->len++; if (rq->max_len < rq->len) rq->max_len = len; @@ -631,8 +733,14 @@ erts_smp_reset_max_len(ErtsRunQueue *rq, ErtsRunQueueInfo *rqi) #define ERTS_PSD_SCHED_ID 2 #define ERTS_PSD_DIST_ENTRY 3 #define ERTS_PSD_CALL_TIME_BP 4 +#define ERTS_PSD_DELAYED_GC_TASK_QS 5 +#ifdef ERTS_DIRTY_SCHEDULERS +#define ERTS_PSD_DIRTY_SCHED_TRAP_EXPORT 6 -#define ERTS_PSD_SIZE 5 +#define ERTS_PSD_SIZE 7 +#else +#define ERTS_PSD_SIZE 6 +#endif typedef struct { void *data[ERTS_PSD_SIZE]; @@ -656,6 +764,14 @@ typedef struct { #define ERTS_PSD_CALL_TIME_BP_GET_LOCKS ERTS_PROC_LOCK_MAIN #define ERTS_PSD_CALL_TIME_BP_SET_LOCKS ERTS_PROC_LOCK_MAIN +#define ERTS_PSD_DELAYED_GC_TASK_QS_GET_LOCKS ERTS_PROC_LOCK_MAIN +#define ERTS_PSD_DELAYED_GC_TASK_QS_SET_LOCKS ERTS_PROC_LOCK_MAIN + +#ifdef ERTS_DIRTY_SCHEDULERS +#define ERTS_PSD_DIRTY_SCHED_TRAP_EXPORT_GET_LOCKS ERTS_PROC_LOCK_MAIN +#define ERTS_PSD_DIRTY_SCHED_TRAP_EXPORT_SET_LOCKS ERTS_PROC_LOCK_MAIN +#endif + typedef struct { ErtsProcLocks get_locks; ErtsProcLocks set_locks; @@ -688,6 +804,9 @@ typedef struct { ErlHeapFragment *bp; } ErtsPendExit; +typedef struct ErtsProcSysTask_ ErtsProcSysTask; +typedef struct ErtsProcSysTaskQs_ ErtsProcSysTaskQs; + #ifdef ERTS_SMP typedef struct ErtsPendingSuspend_ ErtsPendingSuspend; @@ -704,13 +823,6 @@ struct ErtsPendingSuspend_ { #endif -typedef struct ErlExtraRootSet_ ErlExtraRootSet; -struct ErlExtraRootSet_ { - Eterm *objv; - Uint sz; - void (*cleanup)(ErlExtraRootSet *); -}; - /* Defines to ease the change of memory architecture */ # define HEAP_START(p) (p)->heap # define HEAP_TOP(p) (p)->htop @@ -804,8 +916,6 @@ struct process { ErlMessageQueue msg; /* Message queue */ - ErlExtraRootSet *extra_root; /* Used by trapping BIF's */ - union { ErtsBifTimer *bif_timers; /* Bif timers aiming at this process */ void *terminate; @@ -855,6 +965,8 @@ struct process { Uint64 bin_old_vheap_sz; /* Virtual old heap block size for binaries */ Uint64 bin_old_vheap; /* Virtual old heap size for binaries */ + ErtsProcSysTaskQs *sys_task_qs; + erts_smp_atomic32_t state; /* Process state flags (see ERTS_PSFLG_*) */ #ifdef ERTS_SMP @@ -924,24 +1036,73 @@ void erts_check_for_holes(Process* p); # error "Need to increase ERTS_PSFLG_PRIO_SHIFT" #endif -#define ERTS_PSFLG_PRIO_SHIFT 2 +#define ERTS_PSFLGS_PRIO_BITS 2 +#define ERTS_PSFLGS_PRIO_MASK \ + ((((erts_aint32_t) 1) << ERTS_PSFLGS_PRIO_BITS) - 1) -#define ERTS_PSFLG_BIT(N) \ - (((erts_aint32_t) 1) << (ERTS_PSFLG_PRIO_SHIFT + (N))) +#define ERTS_PSFLGS_ACT_PRIO_OFFSET (0*ERTS_PSFLGS_PRIO_BITS) +#define ERTS_PSFLGS_USR_PRIO_OFFSET (1*ERTS_PSFLGS_PRIO_BITS) +#define ERTS_PSFLGS_PRQ_PRIO_OFFSET (2*ERTS_PSFLGS_PRIO_BITS) +#define ERTS_PSFLGS_ZERO_BIT_OFFSET (3*ERTS_PSFLGS_PRIO_BITS) -#define ERTS_PSFLG_PRIO_MASK (ERTS_PSFLG_BIT(0) - 1) +#define ERTS_PSFLGS_QMASK_BITS 4 +#define ERTS_PSFLGS_QMASK \ + ((((erts_aint32_t) 1) << ERTS_PSFLGS_QMASK_BITS) - 1) +#define ERTS_PSFLGS_IN_PRQ_MASK_OFFSET \ + ERTS_PSFLGS_ZERO_BIT_OFFSET -#define ERTS_PSFLG_FREE ERTS_PSFLG_BIT(0) -#define ERTS_PSFLG_EXITING ERTS_PSFLG_BIT(1) -#define ERTS_PSFLG_PENDING_EXIT ERTS_PSFLG_BIT(2) -#define ERTS_PSFLG_ACTIVE ERTS_PSFLG_BIT(3) -#define ERTS_PSFLG_IN_RUNQ ERTS_PSFLG_BIT(4) -#define ERTS_PSFLG_RUNNING ERTS_PSFLG_BIT(5) -#define ERTS_PSFLG_SUSPENDED ERTS_PSFLG_BIT(6) -#define ERTS_PSFLG_GC ERTS_PSFLG_BIT(7) -#define ERTS_PSFLG_BOUND ERTS_PSFLG_BIT(8) -#define ERTS_PSFLG_TRAP_EXIT ERTS_PSFLG_BIT(9) +#define ERTS_PSFLG_BIT(N) \ + (((erts_aint32_t) 1) << (ERTS_PSFLGS_ZERO_BIT_OFFSET + (N))) +/* + * ACT_PRIO -> Active prio, i.e., currently active prio. This + * prio may be higher than user prio. + * USR_PRIO -> User prio. i.e., prio the user has set. + * PRQ_PRIO -> Prio queue prio, i.e., prio queue currently + * enqueued in. + */ +#define ERTS_PSFLGS_ACT_PRIO_MASK \ + (ERTS_PSFLGS_PRIO_MASK << ERTS_PSFLGS_ACT_PRIO_OFFSET) +#define ERTS_PSFLGS_USR_PRIO_MASK \ + (ERTS_PSFLGS_PRIO_MASK << ERTS_PSFLGS_USR_PRIO_OFFSET) +#define ERTS_PSFLGS_PRQ_PRIO_MASK \ + (ERTS_PSFLGS_PRIO_MASK << ERTS_PSFLGS_PRQ_PRIO_OFFSET) +#define ERTS_PSFLG_IN_PRQ_MAX ERTS_PSFLG_BIT(0) +#define ERTS_PSFLG_IN_PRQ_HIGH ERTS_PSFLG_BIT(1) +#define ERTS_PSFLG_IN_PRQ_NORMAL ERTS_PSFLG_BIT(2) +#define ERTS_PSFLG_IN_PRQ_LOW ERTS_PSFLG_BIT(3) +#define ERTS_PSFLG_FREE ERTS_PSFLG_BIT(4) +#define ERTS_PSFLG_EXITING ERTS_PSFLG_BIT(5) +#define ERTS_PSFLG_PENDING_EXIT ERTS_PSFLG_BIT(6) +#define ERTS_PSFLG_ACTIVE ERTS_PSFLG_BIT(7) +#define ERTS_PSFLG_IN_RUNQ ERTS_PSFLG_BIT(8) +#define ERTS_PSFLG_RUNNING ERTS_PSFLG_BIT(9) +#define ERTS_PSFLG_SUSPENDED ERTS_PSFLG_BIT(10) +#define ERTS_PSFLG_GC ERTS_PSFLG_BIT(11) +#define ERTS_PSFLG_BOUND ERTS_PSFLG_BIT(12) +#define ERTS_PSFLG_TRAP_EXIT ERTS_PSFLG_BIT(13) +#define ERTS_PSFLG_ACTIVE_SYS ERTS_PSFLG_BIT(14) +#define ERTS_PSFLG_RUNNING_SYS ERTS_PSFLG_BIT(15) +#define ERTS_PSFLG_PROXY ERTS_PSFLG_BIT(16) +#define ERTS_PSFLG_DELAYED_SYS ERTS_PSFLG_BIT(17) +#ifdef ERTS_DIRTY_SCHEDULERS +#define ERTS_PSFLG_DIRTY_CPU_PROC ERTS_PSFLG_BIT(18) +#define ERTS_PSFLG_DIRTY_IO_PROC ERTS_PSFLG_BIT(19) +#define ERTS_PSFLG_DIRTY_CPU_PROC_IN_Q ERTS_PSFLG_BIT(20) +#define ERTS_PSFLG_DIRTY_IO_PROC_IN_Q ERTS_PSFLG_BIT(21) +#endif + +#define ERTS_PSFLGS_IN_PRQ_MASK (ERTS_PSFLG_IN_PRQ_MAX \ + | ERTS_PSFLG_IN_PRQ_HIGH \ + | ERTS_PSFLG_IN_PRQ_NORMAL \ + | ERTS_PSFLG_IN_PRQ_LOW) + +#define ERTS_PSFLGS_GET_ACT_PRIO(PSFLGS) \ + (((PSFLGS) >> ERTS_PSFLGS_ACT_PRIO_OFFSET) & ERTS_PSFLGS_PRIO_MASK) +#define ERTS_PSFLGS_GET_USR_PRIO(PSFLGS) \ + (((PSFLGS) >> ERTS_PSFLGS_USR_PRIO_OFFSET) & ERTS_PSFLGS_PRIO_MASK) +#define ERTS_PSFLGS_GET_PRQ_PRIO(PSFLGS) \ + (((PSFLGS) >> ERTS_PSFLGS_USR_PRIO_OFFSET) & ERTS_PSFLGS_PRIO_MASK) /* The sequential tracing token is a tuple of size 5: * @@ -1056,6 +1217,7 @@ extern struct erts_system_profile_flags_t erts_system_profile_flags; #define F_HAVE_BLCKD_MSCHED (1 << 8) /* Process has blocked multi-scheduling */ #define F_P2PNR_RESCHED (1 << 9) /* Process has been rescheduled via erts_pid2proc_not_running() */ #define F_FORCE_GC (1 << 10) /* Force gc at process in-scheduling */ +#define F_DISABLE_GC (1 << 11) /* Disable GC */ /* process trace_flags */ #define F_SENSITIVE (1 << 0) @@ -1134,18 +1296,71 @@ extern struct erts_system_profile_flags_t erts_system_profile_flags; (p)->flags &= ~F_TIMO; \ } while (0) +#if defined(ERTS_DIRTY_SCHEDULERS) && defined(ERTS_SMP) +#define ERTS_NUM_DIRTY_RUNQS 2 +#else +#define ERTS_NUM_DIRTY_RUNQS 0 +#endif + #define ERTS_RUNQ_IX(IX) \ (ASSERT(0 <= (IX) && (IX) < erts_no_run_queues), \ &erts_aligned_run_queues[(IX)].runq) +#ifdef ERTS_DIRTY_SCHEDULERS +#define ERTS_RUNQ_IX_IS_DIRTY(IX) \ + (-(ERTS_NUM_DIRTY_RUNQS) <= (IX) && (IX) < 0) +#define ERTS_DIRTY_RUNQ_IX(IX) \ + (ASSERT(ERTS_RUNQ_IX_IS_DIRTY(IX)), \ + &erts_aligned_run_queues[(IX)].runq) +#define ERTS_DIRTY_CPU_RUNQ (&erts_aligned_run_queues[-1].runq) +#define ERTS_DIRTY_IO_RUNQ (&erts_aligned_run_queues[-2].runq) +#define ERTS_RUNQ_IS_DIRTY_CPU_RUNQ(RQ) ((RQ)->ix == -1) +#define ERTS_RUNQ_IS_DIRTY_IO_RUNQ(RQ) ((RQ)->ix == -2) +#else +#define ERTS_RUNQ_IX_IS_DIRTY(IX) 0 +#endif #define ERTS_SCHEDULER_IX(IX) \ (ASSERT(0 <= (IX) && (IX) < erts_no_schedulers), \ &erts_aligned_scheduler_data[(IX)].esd) +#ifdef ERTS_DIRTY_SCHEDULERS +#define ERTS_DIRTY_CPU_SCHEDULER_IX(IX) \ + (ASSERT(0 <= (IX) && (IX) < erts_no_dirty_cpu_schedulers), \ + &erts_aligned_dirty_cpu_scheduler_data[(IX)].esd) +#define ERTS_DIRTY_IO_SCHEDULER_IX(IX) \ + (ASSERT(0 <= (IX) && (IX) < erts_no_dirty_io_schedulers), \ + &erts_aligned_dirty_io_scheduler_data[(IX)].esd) +#define ERTS_DIRTY_SCHEDULER_NO(ESDP) \ + ((ESDP)->dirty_no.s.num) +#define ERTS_DIRTY_SCHEDULER_TYPE(ESDP) \ + ((ESDP)->dirty_no.s.type) +#ifdef ERTS_SMP +#define ERTS_SCHEDULER_IS_DIRTY(ESDP) \ + ((ESDP)->dirty_no.s.num != 0) +#define ERTS_SCHEDULER_IS_DIRTY_CPU(ESDP) \ + ((ESDP)->dirty_no.s.type == 0) +#define ERTS_SCHEDULER_IS_DIRTY_IO(ESDP) \ + ((ESDP)->dirty_no.s.type == 1) +#else +#define ERTS_SCHEDULER_IS_DIRTY(ESDP) 0 +#define ERTS_SCHEDULER_IS_DIRTY_CPU(ESDP) 0 +#define ERTS_SCHEDULER_IS_DIRTY_IO(ESDP) 0 +#endif +#else +#define ERTS_RUNQ_IX_IS_DIRTY(IX) 0 +#define ERTS_SCHEDULER_IS_DIRTY(ESDP) 0 +#define ERTS_SCHEDULER_IS_DIRTY_CPU(ESDP) 0 +#define ERTS_SCHEDULER_IS_DIRTY_IO(ESDP) 0 +#endif void erts_pre_init_process(void); void erts_late_init_process(void); void erts_early_init_scheduling(int); -void erts_init_scheduling(int, int); +void erts_init_scheduling(int, int +#ifdef ERTS_DIRTY_SCHEDULERS + , int, int, int +#endif + ); +int erts_set_gc_state(Process *c_p, int enable); Eterm erts_sched_wall_time_request(Process *c_p, int set, int enable); Eterm erts_gc_info_request(Process *c_p); Uint64 erts_get_proc_interval(void); @@ -1341,14 +1556,20 @@ int erts_dbg_check_halloc_lock(Process *p); void erts_dbg_multi_scheduling_return_trap(Process *, Eterm); #endif int erts_get_max_no_executing_schedulers(void); -#ifdef ERTS_SMP +#if defined(ERTS_SMP) || defined(ERTS_DIRTY_SCHEDULERS) ErtsSchedSuspendResult -erts_schedulers_state(Uint *, Uint *, Uint *, int); +erts_schedulers_state(Uint *, Uint *, Uint *, Uint *, Uint *, Uint *, int); +#endif +#ifdef ERTS_SMP ErtsSchedSuspendResult erts_set_schedulers_online(Process *p, ErtsProcLocks plocks, Sint new_no, - Sint *old_no); + Sint *old_no +#ifdef ERTS_DIRTY_SCHEDULERS + , int dirty_only +#endif + ); ErtsSchedSuspendResult erts_block_multi_scheduling(Process *, ErtsProcLocks, int, int); int erts_is_multi_scheduling_blocked(void); @@ -1461,7 +1682,7 @@ do { \ ErtsSchedulerData *esdp__ = ((P) \ ? ERTS_PROC_GET_SCHDATA((Process *) (P)) \ : erts_get_scheduler_data()); \ - if (esdp__) \ + if (esdp__ && !ERTS_SCHEDULER_IS_DIRTY(esdp__)) \ esdp__->verify_unused_temp_alloc( \ esdp__->verify_unused_temp_alloc_data); \ } while (0) @@ -1591,6 +1812,18 @@ erts_psd_set(Process *p, ErtsProcLocks plocks, int ix, void *data) #define ERTS_PROC_SET_CALL_TIME(P, L, PBT) \ ((process_breakpoint_time_t *) erts_psd_set((P), (L), ERTS_PSD_CALL_TIME_BP, (void *) (PBT))) +#define ERTS_PROC_GET_DELAYED_GC_TASK_QS(P) \ + ((ErtsProcSysTaskQs *) erts_psd_get((P), ERTS_PSD_DELAYED_GC_TASK_QS)) +#define ERTS_PROC_SET_DELAYED_GC_TASK_QS(P, L, PBT) \ + ((ErtsProcSysTaskQs *) erts_psd_set((P), (L), ERTS_PSD_DELAYED_GC_TASK_QS, (void *) (PBT))) + +#ifdef ERTS_DIRTY_SCHEDULERS +#define ERTS_PROC_GET_DIRTY_SCHED_TRAP_EXPORT(P) \ + ((Export *) erts_psd_get((P), ERTS_PSD_DIRTY_SCHED_TRAP_EXPORT)) +#define ERTS_PROC_SET_DIRTY_SCHED_TRAP_EXPORT(P, L, DSTE) \ + ((Export *) erts_psd_set((P), (L), ERTS_PSD_DIRTY_SCHED_TRAP_EXPORT, (void *) (DSTE))) +#endif + ERTS_GLB_INLINE Eterm erts_proc_get_error_handler(Process *p); ERTS_GLB_INLINE Eterm erts_proc_set_error_handler(Process *p, @@ -1636,6 +1869,13 @@ erts_proc_set_error_handler(Process *p, ErtsProcLocks plocks, Eterm handler) extern erts_atomic_t erts_migration_paths; +#if ERTS_HAVE_SCHED_UTIL_BALANCING_SUPPORT +int erts_get_sched_util(ErtsRunQueue *rq, + int initially_locked, + int short_interval); +#endif + + ERTS_GLB_INLINE ErtsMigrationPaths *erts_get_migration_paths_managed(void); ERTS_GLB_INLINE ErtsMigrationPaths *erts_get_migration_paths(void); ERTS_GLB_INLINE ErtsRunQueue *erts_check_emigration_need(ErtsRunQueue *c_rq, @@ -1687,22 +1927,36 @@ erts_check_emigration_need(ErtsRunQueue *c_rq, int prio) return mp->prio[prio].runq; } - - if (prio == ERTS_PORT_PRIO_LEVEL) - len = RUNQ_READ_LEN(&c_rq->ports.info.len); +#if ERTS_HAVE_SCHED_UTIL_BALANCING_SUPPORT + if (mp->sched_util) { + ErtsRunQueue *rq = mp->prio[prio].runq; + /* No migration if other is non-empty */ + if (!(ERTS_RUNQ_FLGS_GET(rq) & ERTS_RUNQ_FLG_NONEMPTY) + && erts_get_sched_util(rq, 0, 1) < mp->prio[prio].limit.other + && erts_get_sched_util(c_rq, 0, 1) > mp->prio[prio].limit.this) { + return rq; + } + } else - len = RUNQ_READ_LEN(&c_rq->procs.prio_info[prio].len); - - if (len > mp->prio[prio].limit.this) { - ErtsRunQueue *n_rq = mp->prio[prio].runq; - if (n_rq) { - if (prio == ERTS_PORT_PRIO_LEVEL) - len = RUNQ_READ_LEN(&n_rq->ports.info.len); - else - len = RUNQ_READ_LEN(&n_rq->procs.prio_info[prio].len); - - if (len < mp->prio[prio].limit.other) - return n_rq; +#endif + { + + if (prio == ERTS_PORT_PRIO_LEVEL) + len = RUNQ_READ_LEN(&c_rq->ports.info.len); + else + len = RUNQ_READ_LEN(&c_rq->procs.prio_info[prio].len); + + if (len > mp->prio[prio].limit.this) { + ErtsRunQueue *n_rq = mp->prio[prio].runq; + if (n_rq) { + if (prio == ERTS_PORT_PRIO_LEVEL) + len = RUNQ_READ_LEN(&n_rq->ports.info.len); + else + len = RUNQ_READ_LEN(&n_rq->procs.prio_info[prio].len); + + if (len < mp->prio[prio].limit.other) + return n_rq; + } } } } @@ -1763,7 +2017,12 @@ Uint erts_get_scheduler_id(void) { #ifdef ERTS_SMP ErtsSchedulerData *esdp = erts_get_scheduler_data(); - return esdp ? esdp->no : (Uint) 0; +#ifdef ERTS_DIRTY_SCHEDULERS + if (esdp && ERTS_SCHEDULER_IS_DIRTY(esdp)) + return 0; + else +#endif + return esdp ? esdp->no : (Uint) 0; #else return erts_get_scheduler_data() ? (Uint) 1 : (Uint) 0; #endif @@ -1793,12 +2052,6 @@ erts_get_runq_current(ErtsSchedulerData *esdp) #endif } -#ifdef ERTS_ENABLE_LOCK_COUNT - -#define erts_smp_runq_lock(rq) erts_smp_mtx_lock_x(&(rq)->mtx, __FILE__, __LINE__) - -#else - ERTS_GLB_INLINE void erts_smp_runq_lock(ErtsRunQueue *rq) { @@ -1807,6 +2060,10 @@ erts_smp_runq_lock(ErtsRunQueue *rq) #endif } +#ifdef ERTS_ENABLE_LOCK_COUNT + +#define erts_smp_runq_lock(rq) erts_smp_mtx_lock_x(&(rq)->mtx, __FILE__, __LINE__) + #endif ERTS_GLB_INLINE int diff --git a/erts/emulator/beam/erl_process_dict.c b/erts/emulator/beam/erl_process_dict.c index a611b52af2..23e5bf737f 100644 --- a/erts/emulator/beam/erl_process_dict.c +++ b/erts/emulator/beam/erl_process_dict.c @@ -659,7 +659,7 @@ static void shrink(Process *p, Eterm* ret) } else { int needed = 4; if (is_list(hi) && is_list(lo)) { - needed = 2*list_length(hi); + needed = 2*erts_list_length(hi); } if (HeapWordsLeft(p) < needed) { BUMP_REDS(p, erts_garbage_collect(p, needed, ret, 1)); diff --git a/erts/emulator/beam/erl_process_lock.c b/erts/emulator/beam/erl_process_lock.c index 2db5df06b4..82cc68222d 100644 --- a/erts/emulator/beam/erl_process_lock.c +++ b/erts/emulator/beam/erl_process_lock.c @@ -117,7 +117,7 @@ erts_init_proc_lock(int cpus) for (i = 0; i < ERTS_NO_OF_PIX_LOCKS; i++) { #ifdef ERTS_ENABLE_LOCK_COUNT erts_mtx_init_x(&erts_pix_locks[i].u.mtx, - "pix_lock", make_small(i)); + "pix_lock", make_small(i), 1); #else erts_mtx_init(&erts_pix_locks[i].u.mtx, "pix_lock"); #endif @@ -901,7 +901,7 @@ erts_pid2proc_opt(Process *c_p, busy = (int) erts_smp_proc_raw_trylock__(proc, need_locks); #if ERTS_PROC_LOCK_OWN_IMPL && defined(ERTS_ENABLE_LOCK_CHECK) - erts_proc_lc_trylock(proc, need_locks, !busy); + erts_proc_lc_trylock(proc, need_locks, !busy, __FILE__,__LINE__); #endif #ifdef ERTS_PROC_LOCK_DEBUG if (!busy) @@ -1001,8 +1001,8 @@ erts_pid2proc_opt(Process *c_p, void erts_proc_lock_init(Process *p) { -#if ERTS_PROC_LOCK_OWN_IMPL int i; +#if ERTS_PROC_LOCK_OWN_IMPL /* We always start with all locks locked */ #if ERTS_PROC_LOCK_ATOMIC_IMPL erts_smp_atomic32_init_nob(&p->lock.flags, @@ -1013,25 +1013,33 @@ erts_proc_lock_init(Process *p) for (i = 0; i <= ERTS_PROC_LOCK_MAX_BIT; i++) p->lock.queue[i] = NULL; #ifdef ERTS_ENABLE_LOCK_CHECK - erts_proc_lc_trylock(p, ERTS_PROC_LOCKS_ALL, 1); + erts_proc_lc_trylock(p, ERTS_PROC_LOCKS_ALL, 1,__FILE__,__LINE__); #endif #elif ERTS_PROC_LOCK_RAW_MUTEX_IMPL - erts_mtx_init_x(&p->lock.main, "proc_main", p->common.id); + +#ifdef ERTS_ENABLE_LOCK_COUNT + int do_lock_count = 1; +#else + int do_lock_count = 0; +#endif + + erts_mtx_init_x(&p->lock.main, "proc_main", p->common.id, do_lock_count); ethr_mutex_lock(&p->lock.main.mtx); #ifdef ERTS_ENABLE_LOCK_CHECK erts_lc_trylock(1, &p->lock.main.lc); #endif - erts_mtx_init_x(&p->lock.link, "proc_link", p->common.id); + erts_mtx_init_x(&p->lock.link, "proc_link", p->common.id, do_lock_count); ethr_mutex_lock(&p->lock.link.mtx); #ifdef ERTS_ENABLE_LOCK_CHECK erts_lc_trylock(1, &p->lock.link.lc); #endif - erts_mtx_init_x(&p->lock.msgq, "proc_msgq", p->common.id); + erts_mtx_init_x(&p->lock.msgq, "proc_msgq", p->common.id, do_lock_count); ethr_mutex_lock(&p->lock.msgq.mtx); #ifdef ERTS_ENABLE_LOCK_CHECK erts_lc_trylock(1, &p->lock.msgq.lc); #endif - erts_mtx_init_x(&p->lock.status, "proc_status", p->common.id); + erts_mtx_init_x(&p->lock.status, "proc_status", p->common.id, + do_lock_count); ethr_mutex_lock(&p->lock.status.mtx); #ifdef ERTS_ENABLE_LOCK_CHECK erts_lc_trylock(1, &p->lock.status.lc); @@ -1210,50 +1218,51 @@ void erts_lcnt_enable_proc_lock_count(int enable) #if ERTS_PROC_LOCK_OWN_IMPL void -erts_proc_lc_lock(Process *p, ErtsProcLocks locks) +erts_proc_lc_lock(Process *p, ErtsProcLocks locks, char *file, unsigned int line) { erts_lc_lock_t lck = ERTS_LC_LOCK_INIT(-1, p->common.id, ERTS_LC_FLG_LT_PROCLOCK); if (locks & ERTS_PROC_LOCK_MAIN) { lck.id = lc_id.proc_lock_main; - erts_lc_lock(&lck); + erts_lc_lock_x(&lck,file,line); } if (locks & ERTS_PROC_LOCK_LINK) { lck.id = lc_id.proc_lock_link; - erts_lc_lock(&lck); + erts_lc_lock_x(&lck,file,line); } if (locks & ERTS_PROC_LOCK_MSGQ) { lck.id = lc_id.proc_lock_msgq; - erts_lc_lock(&lck); + erts_lc_lock_x(&lck,file,line); } if (locks & ERTS_PROC_LOCK_STATUS) { lck.id = lc_id.proc_lock_status; - erts_lc_lock(&lck); + erts_lc_lock_x(&lck,file,line); } } void -erts_proc_lc_trylock(Process *p, ErtsProcLocks locks, int locked) +erts_proc_lc_trylock(Process *p, ErtsProcLocks locks, int locked, + char* file, unsigned int line) { erts_lc_lock_t lck = ERTS_LC_LOCK_INIT(-1, p->common.id, ERTS_LC_FLG_LT_PROCLOCK); if (locks & ERTS_PROC_LOCK_MAIN) { lck.id = lc_id.proc_lock_main; - erts_lc_trylock(locked, &lck); + erts_lc_trylock_x(locked, &lck, file, line); } if (locks & ERTS_PROC_LOCK_LINK) { lck.id = lc_id.proc_lock_link; - erts_lc_trylock(locked, &lck); + erts_lc_trylock_x(locked, &lck, file, line); } if (locks & ERTS_PROC_LOCK_MSGQ) { lck.id = lc_id.proc_lock_msgq; - erts_lc_trylock(locked, &lck); + erts_lc_trylock_x(locked, &lck, file, line); } if (locks & ERTS_PROC_LOCK_STATUS) { lck.id = lc_id.proc_lock_status; - erts_lc_trylock(locked, &lck); + erts_lc_trylock_x(locked, &lck, file, line); } } @@ -1319,7 +1328,8 @@ erts_proc_lc_might_unlock(Process *p, ErtsProcLocks locks) } void -erts_proc_lc_require_lock(Process *p, ErtsProcLocks locks) +erts_proc_lc_require_lock(Process *p, ErtsProcLocks locks, char *file, + unsigned int line) { #if ERTS_PROC_LOCK_OWN_IMPL erts_lc_lock_t lck = ERTS_LC_LOCK_INIT(-1, @@ -1327,29 +1337,29 @@ erts_proc_lc_require_lock(Process *p, ErtsProcLocks locks) ERTS_LC_FLG_LT_PROCLOCK); if (locks & ERTS_PROC_LOCK_MAIN) { lck.id = lc_id.proc_lock_main; - erts_lc_require_lock(&lck); + erts_lc_require_lock(&lck, file, line); } if (locks & ERTS_PROC_LOCK_LINK) { lck.id = lc_id.proc_lock_link; - erts_lc_require_lock(&lck); + erts_lc_require_lock(&lck, file, line); } if (locks & ERTS_PROC_LOCK_MSGQ) { lck.id = lc_id.proc_lock_msgq; - erts_lc_require_lock(&lck); + erts_lc_require_lock(&lck, file, line); } if (locks & ERTS_PROC_LOCK_STATUS) { lck.id = lc_id.proc_lock_status; - erts_lc_require_lock(&lck); + erts_lc_require_lock(&lck, file, line); } #elif ERTS_PROC_LOCK_RAW_MUTEX_IMPL if (locks & ERTS_PROC_LOCK_MAIN) - erts_lc_require_lock(&p->lock.main.lc); + erts_lc_require_lock(&p->lock.main.lc, file, line); if (locks & ERTS_PROC_LOCK_LINK) - erts_lc_require_lock(&p->lock.link.lc); + erts_lc_require_lock(&p->lock.link.lc, file, line); if (locks & ERTS_PROC_LOCK_MSGQ) - erts_lc_require_lock(&p->lock.msgq.lc); + erts_lc_require_lock(&p->lock.msgq.lc, file, line); if (locks & ERTS_PROC_LOCK_STATUS) - erts_lc_require_lock(&p->lock.status.lc); + erts_lc_require_lock(&p->lock.status.lc, file, line); #endif } diff --git a/erts/emulator/beam/erl_process_lock.h b/erts/emulator/beam/erl_process_lock.h index 9dd503f3cb..052d992d3f 100644 --- a/erts/emulator/beam/erl_process_lock.h +++ b/erts/emulator/beam/erl_process_lock.h @@ -215,7 +215,7 @@ typedef struct erts_proc_lock_t_ { /* Lock counter implemetation */ -#ifdef ERTS_ENABLE_LOCK_COUNT +#ifdef ERTS_ENABLE_LOCK_POSITION #define erts_smp_proc_lock__(P,I,L) erts_smp_proc_lock_x__(P,I,L,__FILE__,__LINE__) #define erts_smp_proc_lock(P,L) erts_smp_proc_lock_x(P,L,__FILE__,__LINE__) #endif @@ -243,8 +243,10 @@ void erts_lcnt_enable_proc_lock_count(int enable); erts_proc_lc_chk_no_proc_locks(__FILE__, __LINE__) #define ERTS_SMP_CHK_HAVE_ONLY_MAIN_PROC_LOCK(P) \ erts_proc_lc_chk_only_proc_main((P)) -void erts_proc_lc_lock(Process *p, ErtsProcLocks locks); -void erts_proc_lc_trylock(Process *p, ErtsProcLocks locks, int locked); +void erts_proc_lc_lock(Process *p, ErtsProcLocks locks, + char *file, unsigned int line); +void erts_proc_lc_trylock(Process *p, ErtsProcLocks locks, int locked, + char *file, unsigned int line); void erts_proc_lc_unlock(Process *p, ErtsProcLocks locks); void erts_proc_lc_might_unlock(Process *p, ErtsProcLocks locks); void erts_proc_lc_chk_have_proc_locks(Process *p, ErtsProcLocks locks); @@ -253,7 +255,8 @@ void erts_proc_lc_chk_only_proc_main(Process *p); void erts_proc_lc_chk_no_proc_locks(char *file, int line); ErtsProcLocks erts_proc_lc_my_proc_locks(Process *p); int erts_proc_lc_trylock_force_busy(Process *p, ErtsProcLocks locks); -void erts_proc_lc_require_lock(Process *p, ErtsProcLocks locks); +void erts_proc_lc_require_lock(Process *p, ErtsProcLocks locks, + char* file, unsigned int line); void erts_proc_lc_unrequire_lock(Process *p, ErtsProcLocks locks); #else #define ERTS_SMP_CHK_NO_PROC_LOCKS @@ -372,7 +375,7 @@ ERTS_GLB_INLINE int erts_lc_pix_lock_is_locked(erts_pix_lock_t *); ERTS_GLB_INLINE ErtsProcLocks erts_smp_proc_raw_trylock__(Process *p, ErtsProcLocks locks); -#ifdef ERTS_ENABLE_LOCK_COUNT +#ifdef ERTS_ENABLE_LOCK_POSITION ERTS_GLB_INLINE void erts_smp_proc_lock_x__(Process *, erts_pix_lock_t *, ErtsProcLocks, @@ -482,7 +485,7 @@ busy_main: } ERTS_GLB_INLINE void -#ifdef ERTS_ENABLE_LOCK_COUNT +#ifdef ERTS_ENABLE_LOCK_POSITION erts_smp_proc_lock_x__(Process *p, erts_pix_lock_t *pix_lck, ErtsProcLocks locks, @@ -528,7 +531,7 @@ erts_smp_proc_lock__(Process *p, erts_lcnt_proc_lock_post_x(&(p->lock), locks, file, line); #endif #ifdef ERTS_ENABLE_LOCK_CHECK - erts_proc_lc_lock(p, locks); + erts_proc_lc_lock(p, locks, file, line); #endif #ifdef ERTS_PROC_LOCK_DEBUG @@ -695,7 +698,7 @@ erts_smp_proc_trylock__(Process *p, #endif #ifdef ERTS_ENABLE_LOCK_CHECK - erts_proc_lc_trylock(p, locks, res == 0); + erts_proc_lc_trylock(p, locks, res == 0, __FILE__, __LINE__); #endif #if ERTS_PROC_LOCK_ATOMIC_IMPL @@ -741,7 +744,7 @@ erts_proc_lock_op_debug(Process *p, ErtsProcLocks locks, int locked) #endif /* ERTS_SMP */ -#ifdef ERTS_ENABLE_LOCK_COUNT +#ifdef ERTS_ENABLE_LOCK_POSITION ERTS_GLB_INLINE void erts_smp_proc_lock_x(Process *, ErtsProcLocks, char *file, unsigned int line); #else ERTS_GLB_INLINE void erts_smp_proc_lock(Process *, ErtsProcLocks); @@ -756,13 +759,13 @@ ERTS_GLB_INLINE void erts_smp_proc_add_refc(Process *, Sint32); #if ERTS_GLB_INLINE_INCL_FUNC_DEF ERTS_GLB_INLINE void -#ifdef ERTS_ENABLE_LOCK_COUNT +#ifdef ERTS_ENABLE_LOCK_POSITION erts_smp_proc_lock_x(Process *p, ErtsProcLocks locks, char *file, unsigned int line) #else erts_smp_proc_lock(Process *p, ErtsProcLocks locks) #endif { -#if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_COUNT) +#if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_POSITION) erts_smp_proc_lock_x__(p, #if ERTS_PROC_LOCK_ATOMIC_IMPL NULL, diff --git a/erts/emulator/beam/erl_ptab.c b/erts/emulator/beam/erl_ptab.c index fa5482b841..eabf016081 100644 --- a/erts/emulator/beam/erl_ptab.c +++ b/erts/emulator/beam/erl_ptab.c @@ -756,7 +756,8 @@ erts_ptab_delete_element(ErtsPTab *ptab, pix = erts_ptab_id2pix(ptab, ptab_el->id); - ASSERT(erts_get_scheduler_id()); /* *Need* to be a scheduler */ + /* *Need* to be an managed thread */ + ERTS_SMP_LC_ASSERT(erts_thr_progress_is_managed_thread()); erts_ptab_rlock(ptab); maybe_save = ptab->list.data.deleted.end != NULL; diff --git a/erts/emulator/beam/erl_smp.h b/erts/emulator/beam/erl_smp.h index ecb5525022..c38ef47d87 100644 --- a/erts/emulator/beam/erl_smp.h +++ b/erts/emulator/beam/erl_smp.h @@ -26,10 +26,13 @@ #define ERL_SMP_H #include "erl_threads.h" -#ifdef ERTS_ENABLE_LOCK_COUNT +#ifdef ERTS_ENABLE_LOCK_POSITION #define erts_smp_mtx_lock(L) erts_smp_mtx_lock_x(L, __FILE__, __LINE__) +#define erts_smp_mtx_trylock(L) erts_smp_mtx_trylock_x(L, __FILE__, __LINE__) #define erts_smp_spin_lock(L) erts_smp_spin_lock_x(L, __FILE__, __LINE__) +#define erts_smp_rwmtx_tryrlock(L) erts_smp_rwmtx_tryrlock_x(L, __FILE__, __LINE__) #define erts_smp_rwmtx_rlock(L) erts_smp_rwmtx_rlock_x(L, __FILE__, __LINE__) +#define erts_smp_rwmtx_tryrwlock(L) erts_smp_rwmtx_tryrwlock_x(L, __FILE__, __LINE__) #define erts_smp_rwmtx_rwlock(L) erts_smp_rwmtx_rwlock_x(L, __FILE__, __LINE__) #define erts_smp_read_lock(L) erts_smp_read_lock_x(L, __FILE__, __LINE__) #define erts_smp_write_lock(L) erts_smp_write_lock_x(L, __FILE__, __LINE__) @@ -131,10 +134,11 @@ ERTS_GLB_INLINE void erts_smp_mtx_init_locked_x(erts_smp_mtx_t *mtx, ERTS_GLB_INLINE void erts_smp_mtx_init(erts_smp_mtx_t *mtx, char *name); ERTS_GLB_INLINE void erts_smp_mtx_init_locked(erts_smp_mtx_t *mtx, char *name); ERTS_GLB_INLINE void erts_smp_mtx_destroy(erts_smp_mtx_t *mtx); -ERTS_GLB_INLINE int erts_smp_mtx_trylock(erts_smp_mtx_t *mtx); -#ifdef ERTS_ENABLE_LOCK_COUNT -ERTS_GLB_INLINE void erts_smp_mtx_lock_x(erts_smp_mtx_t *mtx, char *file, int line); +#ifdef ERTS_ENABLE_LOCK_POSITION +ERTS_GLB_INLINE int erts_smp_mtx_trylock_x(erts_smp_mtx_t *mtx, char *file, unsigned int line); +ERTS_GLB_INLINE void erts_smp_mtx_lock_x(erts_smp_mtx_t *mtx, char *file, unsigned int line); #else +ERTS_GLB_INLINE int erts_smp_mtx_trylock(erts_smp_mtx_t *mtx); ERTS_GLB_INLINE void erts_smp_mtx_lock(erts_smp_mtx_t *mtx); #endif ERTS_GLB_INLINE void erts_smp_mtx_unlock(erts_smp_mtx_t *mtx); @@ -159,16 +163,18 @@ ERTS_GLB_INLINE void erts_smp_rwmtx_init_opt(erts_smp_rwmtx_t *rwmtx, ERTS_GLB_INLINE void erts_smp_rwmtx_init(erts_smp_rwmtx_t *rwmtx, char *name); ERTS_GLB_INLINE void erts_smp_rwmtx_destroy(erts_smp_rwmtx_t *rwmtx); -ERTS_GLB_INLINE int erts_smp_rwmtx_tryrlock(erts_smp_rwmtx_t *rwmtx); -#ifdef ERTS_ENABLE_LOCK_COUNT +#ifdef ERTS_ENABLE_LOCK_POSITION +ERTS_GLB_INLINE int erts_smp_rwmtx_tryrlock_x(erts_smp_rwmtx_t *rwmtx, char *file, unsigned int line); ERTS_GLB_INLINE void erts_smp_rwmtx_rlock_x(erts_smp_rwmtx_t *rwmtx, char *file, unsigned int line); ERTS_GLB_INLINE void erts_smp_rwmtx_rwlock_x(erts_smp_rwmtx_t *rwmtx, char *file, unsigned int line); +ERTS_GLB_INLINE int erts_smp_rwmtx_tryrwlock_x(erts_smp_rwmtx_t *rwmtx, char *file, unsigned int line); #else +ERTS_GLB_INLINE int erts_smp_rwmtx_tryrlock(erts_smp_rwmtx_t *rwmtx); ERTS_GLB_INLINE void erts_smp_rwmtx_rlock(erts_smp_rwmtx_t *rwmtx); ERTS_GLB_INLINE void erts_smp_rwmtx_rwlock(erts_smp_rwmtx_t *rwmtx); +ERTS_GLB_INLINE int erts_smp_rwmtx_tryrwlock(erts_smp_rwmtx_t *rwmtx); #endif ERTS_GLB_INLINE void erts_smp_rwmtx_runlock(erts_smp_rwmtx_t *rwmtx); -ERTS_GLB_INLINE int erts_smp_rwmtx_tryrwlock(erts_smp_rwmtx_t *rwmtx); ERTS_GLB_INLINE void erts_smp_rwmtx_rwunlock(erts_smp_rwmtx_t *rwmtx); ERTS_GLB_INLINE int erts_smp_lc_rwmtx_is_rlocked(erts_smp_rwmtx_t *mtx); ERTS_GLB_INLINE int erts_smp_lc_rwmtx_is_rwlocked(erts_smp_rwmtx_t *mtx); @@ -179,7 +185,7 @@ ERTS_GLB_INLINE void erts_smp_spinlock_init(erts_smp_spinlock_t *lock, char *name); ERTS_GLB_INLINE void erts_smp_spinlock_destroy(erts_smp_spinlock_t *lock); ERTS_GLB_INLINE void erts_smp_spin_unlock(erts_smp_spinlock_t *lock); -#ifdef ERTS_ENABLE_LOCK_COUNT +#ifdef ERTS_ENABLE_LOCK_POSITION ERTS_GLB_INLINE void erts_smp_spin_lock_x(erts_smp_spinlock_t *lock, char *file, unsigned int line); #else ERTS_GLB_INLINE void erts_smp_spin_lock(erts_smp_spinlock_t *lock); @@ -192,7 +198,7 @@ ERTS_GLB_INLINE void erts_smp_rwlock_init(erts_smp_rwlock_t *lock, char *name); ERTS_GLB_INLINE void erts_smp_rwlock_destroy(erts_smp_rwlock_t *lock); ERTS_GLB_INLINE void erts_smp_read_unlock(erts_smp_rwlock_t *lock); -#ifdef ERTS_ENABLE_LOCK_COUNT +#ifdef ERTS_ENABLE_LOCK_POSITION ERTS_GLB_INLINE void erts_smp_read_lock_x(erts_smp_rwlock_t *lock, char *file, unsigned int line); ERTS_GLB_INLINE void erts_smp_write_lock_x(erts_smp_rwlock_t *lock, char *file, unsigned int line); #else @@ -202,7 +208,8 @@ ERTS_GLB_INLINE void erts_smp_write_lock(erts_smp_rwlock_t *lock); ERTS_GLB_INLINE void erts_smp_write_unlock(erts_smp_rwlock_t *lock); ERTS_GLB_INLINE int erts_smp_lc_rwlock_is_rlocked(erts_smp_rwlock_t *lock); ERTS_GLB_INLINE int erts_smp_lc_rwlock_is_rwlocked(erts_smp_rwlock_t *lock); -ERTS_GLB_INLINE void erts_smp_tsd_key_create(erts_smp_tsd_key_t *keyp); +ERTS_GLB_INLINE void erts_smp_tsd_key_create(erts_smp_tsd_key_t *keyp, + char *keyname); ERTS_GLB_INLINE void erts_smp_tsd_key_delete(erts_smp_tsd_key_t key); ERTS_GLB_INLINE void erts_smp_tsd_set(erts_smp_tsd_key_t key, void *value); ERTS_GLB_INLINE void * erts_smp_tsd_get(erts_smp_tsd_key_t key); @@ -835,7 +842,7 @@ ERTS_GLB_INLINE void erts_smp_mtx_init_x(erts_smp_mtx_t *mtx, char *name, Eterm extra) { #ifdef ERTS_SMP - erts_mtx_init_x(mtx, name, extra); + erts_mtx_init_x(mtx, name, extra, 1); #endif } @@ -843,7 +850,7 @@ ERTS_GLB_INLINE void erts_smp_mtx_init_locked_x(erts_smp_mtx_t *mtx, char *name, Eterm extra) { #ifdef ERTS_SMP - erts_mtx_init_locked_x(mtx, name, extra); + erts_mtx_init_locked_x(mtx, name, extra, 1); #endif } @@ -872,9 +879,15 @@ erts_smp_mtx_destroy(erts_smp_mtx_t *mtx) } ERTS_GLB_INLINE int +#ifdef ERTS_ENABLE_LOCK_POSITION +erts_smp_mtx_trylock_x(erts_smp_mtx_t *mtx, char *file, unsigned int line) +#else erts_smp_mtx_trylock(erts_smp_mtx_t *mtx) +#endif { -#ifdef ERTS_SMP +#if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_POSITION) + return erts_mtx_trylock_x(mtx,file,line); +#elif defined(ERTS_SMP) return erts_mtx_trylock(mtx); #else return 0; @@ -884,13 +897,13 @@ erts_smp_mtx_trylock(erts_smp_mtx_t *mtx) ERTS_GLB_INLINE void -#ifdef ERTS_ENABLE_LOCK_COUNT -erts_smp_mtx_lock_x(erts_smp_mtx_t *mtx, char *file, int line) +#ifdef ERTS_ENABLE_LOCK_POSITION +erts_smp_mtx_lock_x(erts_smp_mtx_t *mtx, char *file, unsigned int line) #else erts_smp_mtx_lock(erts_smp_mtx_t *mtx) #endif { -#if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_COUNT) +#if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_POSITION) erts_mtx_lock_x(mtx, file, line); #elif defined(ERTS_SMP) erts_mtx_lock(mtx); @@ -1020,9 +1033,15 @@ erts_smp_rwmtx_destroy(erts_smp_rwmtx_t *rwmtx) } ERTS_GLB_INLINE int +#ifdef ERTS_ENABLE_LOCK_POSITION +erts_smp_rwmtx_tryrlock_x(erts_smp_rwmtx_t *rwmtx, char *file, unsigned int line) +#else erts_smp_rwmtx_tryrlock(erts_smp_rwmtx_t *rwmtx) +#endif { -#ifdef ERTS_SMP +#if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_POSITION) + return erts_rwmtx_tryrlock_x(rwmtx, file, line); +#elif defined(ERTS_SMP) return erts_rwmtx_tryrlock(rwmtx); #else return 0; @@ -1030,13 +1049,13 @@ erts_smp_rwmtx_tryrlock(erts_smp_rwmtx_t *rwmtx) } ERTS_GLB_INLINE void -#ifdef ERTS_ENABLE_LOCK_COUNT +#ifdef ERTS_ENABLE_LOCK_POSITION erts_smp_rwmtx_rlock_x(erts_smp_rwmtx_t *rwmtx, char *file, unsigned int line) #else erts_smp_rwmtx_rlock(erts_smp_rwmtx_t *rwmtx) #endif { -#if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_COUNT) +#if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_POSITION) erts_rwmtx_rlock_x(rwmtx, file, line); #elif defined(ERTS_SMP) erts_rwmtx_rlock(rwmtx); @@ -1053,9 +1072,15 @@ erts_smp_rwmtx_runlock(erts_smp_rwmtx_t *rwmtx) ERTS_GLB_INLINE int +#ifdef ERTS_ENABLE_LOCK_POSITION +erts_smp_rwmtx_tryrwlock_x(erts_smp_rwmtx_t *rwmtx, char *file, unsigned int line) +#else erts_smp_rwmtx_tryrwlock(erts_smp_rwmtx_t *rwmtx) +#endif { -#ifdef ERTS_SMP +#if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_POSITION) + return erts_rwmtx_tryrwlock_x(rwmtx, file, line); +#elif defined(ERTS_SMP) return erts_rwmtx_tryrwlock(rwmtx); #else return 0; @@ -1063,13 +1088,13 @@ erts_smp_rwmtx_tryrwlock(erts_smp_rwmtx_t *rwmtx) } ERTS_GLB_INLINE void -#ifdef ERTS_ENABLE_LOCK_COUNT +#ifdef ERTS_ENABLE_LOCK_POSITION erts_smp_rwmtx_rwlock_x(erts_smp_rwmtx_t *rwmtx, char *file, unsigned int line) #else erts_smp_rwmtx_rwlock(erts_smp_rwmtx_t *rwmtx) #endif { -#if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_COUNT) +#if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_POSITION) erts_rwmtx_rwlock_x(rwmtx, file, line); #elif defined(ERTS_SMP) erts_rwmtx_rwlock(rwmtx); @@ -1171,13 +1196,13 @@ erts_smp_spin_unlock(erts_smp_spinlock_t *lock) } ERTS_GLB_INLINE void -#ifdef ERTS_ENABLE_LOCK_COUNT +#ifdef ERTS_ENABLE_LOCK_POSITION erts_smp_spin_lock_x(erts_smp_spinlock_t *lock, char *file, unsigned int line) #else erts_smp_spin_lock(erts_smp_spinlock_t *lock) #endif { -#if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_COUNT) +#if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_POSITION) erts_spin_lock_x(lock, file, line); #elif defined(ERTS_SMP) erts_spin_lock(lock); @@ -1237,13 +1262,13 @@ erts_smp_read_unlock(erts_smp_rwlock_t *lock) } ERTS_GLB_INLINE void -#ifdef ERTS_ENABLE_LOCK_COUNT +#ifdef ERTS_ENABLE_LOCK_POSITION erts_smp_read_lock_x(erts_smp_rwlock_t *lock, char *file, unsigned int line) #else erts_smp_read_lock(erts_smp_rwlock_t *lock) #endif { -#if defined(ERTS_ENABLE_LOCK_COUNT) && defined(ERTS_SMP) +#if defined(ERTS_ENABLE_LOCK_POSITION) && defined(ERTS_SMP) erts_read_lock_x(lock, file, line); #elif defined(ERTS_SMP) erts_read_lock(lock); @@ -1263,13 +1288,13 @@ erts_smp_write_unlock(erts_smp_rwlock_t *lock) } ERTS_GLB_INLINE void -#ifdef ERTS_ENABLE_LOCK_COUNT +#ifdef ERTS_ENABLE_LOCK_POSITION erts_smp_write_lock_x(erts_smp_rwlock_t *lock, char *file, unsigned int line) #else erts_smp_write_lock(erts_smp_rwlock_t *lock) #endif { -#if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_COUNT) +#if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_POSITION) erts_write_lock_x(lock, file, line); #elif defined(ERTS_SMP) erts_write_lock(lock); @@ -1299,10 +1324,10 @@ erts_smp_lc_rwlock_is_rwlocked(erts_smp_rwlock_t *lock) } ERTS_GLB_INLINE void -erts_smp_tsd_key_create(erts_smp_tsd_key_t *keyp) +erts_smp_tsd_key_create(erts_smp_tsd_key_t *keyp, char* keyname) { #ifdef ERTS_SMP - erts_tsd_key_create(keyp); + erts_tsd_key_create(keyp,keyname); #endif } diff --git a/erts/emulator/beam/erl_term.c b/erts/emulator/beam/erl_term.c index 2f206ffbec..28cbe7004f 100644 --- a/erts/emulator/beam/erl_term.c +++ b/erts/emulator/beam/erl_term.c @@ -23,6 +23,7 @@ #include "sys.h" #include "erl_vm.h" #include "global.h" +#include "erl_map.h" #include <stdlib.h> #include <stdio.h> @@ -85,7 +86,10 @@ unsigned tag_val_def(Wterm x) case (_TAG_HEADER_EXTERNAL_PID >> _TAG_PRIMARY_SIZE): return EXTERNAL_PID_DEF; case (_TAG_HEADER_EXTERNAL_PORT >> _TAG_PRIMARY_SIZE): return EXTERNAL_PORT_DEF; case (_TAG_HEADER_EXTERNAL_REF >> _TAG_PRIMARY_SIZE): return EXTERNAL_REF_DEF; - default: return BINARY_DEF; + case (_TAG_HEADER_REFC_BIN >> _TAG_PRIMARY_SIZE): return BINARY_DEF; + case (_TAG_HEADER_HEAP_BIN >> _TAG_PRIMARY_SIZE): return BINARY_DEF; + case (_TAG_HEADER_SUB_BIN >> _TAG_PRIMARY_SIZE): return BINARY_DEF; + case (_TAG_HEADER_MAP >> _TAG_PRIMARY_SIZE): return MAP_DEF; } break; } diff --git a/erts/emulator/beam/erl_term.h b/erts/emulator/beam/erl_term.h index 953edf79ea..f10a3a9d38 100644 --- a/erts/emulator/beam/erl_term.h +++ b/erts/emulator/beam/erl_term.h @@ -1,7 +1,7 @@ /* * %CopyrightBegin% * - * Copyright Ericsson AB 2000-2013. All Rights Reserved. + * Copyright Ericsson AB 2000-2014. All Rights Reserved. * * The contents of this file are subject to the Erlang Public License, * Version 1.1, (the "License"); you may not use this file except in @@ -135,11 +135,12 @@ struct erl_node_; /* Declared in erl_node_tables.h */ #define REF_SUBTAG (0x4 << _TAG_PRIMARY_SIZE) /* REF */ #define FUN_SUBTAG (0x5 << _TAG_PRIMARY_SIZE) /* FUN */ #define FLOAT_SUBTAG (0x6 << _TAG_PRIMARY_SIZE) /* FLOAT */ -#define EXPORT_SUBTAG (0x7 << _TAG_PRIMARY_SIZE) /* FLOAT */ +#define EXPORT_SUBTAG (0x7 << _TAG_PRIMARY_SIZE) /* FLOAT */ #define _BINARY_XXX_MASK (0x3 << _TAG_PRIMARY_SIZE) #define REFC_BINARY_SUBTAG (0x8 << _TAG_PRIMARY_SIZE) /* BINARY */ #define HEAP_BINARY_SUBTAG (0x9 << _TAG_PRIMARY_SIZE) /* BINARY */ #define SUB_BINARY_SUBTAG (0xA << _TAG_PRIMARY_SIZE) /* BINARY */ +#define MAP_SUBTAG (0xB << _TAG_PRIMARY_SIZE) /* MAP */ #define EXTERNAL_PID_SUBTAG (0xC << _TAG_PRIMARY_SIZE) /* EXTERNAL_PID */ #define EXTERNAL_PORT_SUBTAG (0xD << _TAG_PRIMARY_SIZE) /* EXTERNAL_PORT */ #define EXTERNAL_REF_SUBTAG (0xE << _TAG_PRIMARY_SIZE) /* EXTERNAL_REF */ @@ -155,6 +156,7 @@ struct erl_node_; /* Declared in erl_node_tables.h */ #define _TAG_HEADER_REFC_BIN (TAG_PRIMARY_HEADER|REFC_BINARY_SUBTAG) #define _TAG_HEADER_HEAP_BIN (TAG_PRIMARY_HEADER|HEAP_BINARY_SUBTAG) #define _TAG_HEADER_SUB_BIN (TAG_PRIMARY_HEADER|SUB_BINARY_SUBTAG) +#define _TAG_HEADER_MAP (TAG_PRIMARY_HEADER|MAP_SUBTAG) #define _TAG_HEADER_EXTERNAL_PID (TAG_PRIMARY_HEADER|EXTERNAL_PID_SUBTAG) #define _TAG_HEADER_EXTERNAL_PORT (TAG_PRIMARY_HEADER|EXTERNAL_PORT_SUBTAG) #define _TAG_HEADER_EXTERNAL_REF (TAG_PRIMARY_HEADER|EXTERNAL_REF_SUBTAG) @@ -354,7 +356,10 @@ _ET_DECLARE_CHECKED(Uint,thing_subtag,Eterm) #define is_value(x) ((x) != THE_NON_VALUE) /* binary object access methods */ -#define is_binary_header(x) (((x) & (_TAG_HEADER_MASK-_BINARY_XXX_MASK)) == _TAG_HEADER_REFC_BIN) +#define is_binary_header(x) \ + ((((x) & (_TAG_HEADER_MASK)) == _TAG_HEADER_REFC_BIN) || \ + (((x) & (_TAG_HEADER_MASK)) == _TAG_HEADER_HEAP_BIN) || \ + (((x) & (_TAG_HEADER_MASK)) == _TAG_HEADER_SUB_BIN)) #define make_binary(x) make_boxed((Eterm*)(x)) #define is_binary(x) (is_boxed((x)) && is_binary_header(*boxed_val((x)))) #define is_not_binary(x) (!is_binary((x))) @@ -1064,8 +1069,8 @@ _ET_DECLARE_CHECKED(Uint,y_reg_index,Uint) /* * Backwards compatibility definitions: - * - #define virtal *_DEF constants with values that fit term order: - * number < atom < ref < fun < port < pid < tuple < nil < cons < binary + * - #define virtual *_DEF constants with values that fit term order: + * number < atom < ref < fun < port < pid < tuple < map < nil < cons < binary * - tag_val_def() function generates virtual _DEF tag * - not_eq_tags() and NUMBER_CODE() defined in terms * of the tag_val_def() function @@ -1074,19 +1079,20 @@ _ET_DECLARE_CHECKED(Uint,y_reg_index,Uint) #define BINARY_DEF 0x0 #define LIST_DEF 0x1 #define NIL_DEF 0x2 -#define TUPLE_DEF 0x3 -#define PID_DEF 0x4 -#define EXTERNAL_PID_DEF 0x5 -#define PORT_DEF 0x6 -#define EXTERNAL_PORT_DEF 0x7 -#define EXPORT_DEF 0x8 -#define FUN_DEF 0x9 -#define REF_DEF 0xa -#define EXTERNAL_REF_DEF 0xb -#define ATOM_DEF 0xc -#define FLOAT_DEF 0xd -#define BIG_DEF 0xe -#define SMALL_DEF 0xf +#define MAP_DEF 0x3 +#define TUPLE_DEF 0x4 +#define PID_DEF 0x5 +#define EXTERNAL_PID_DEF 0x6 +#define PORT_DEF 0x7 +#define EXTERNAL_PORT_DEF 0x8 +#define EXPORT_DEF 0x9 +#define FUN_DEF 0xa +#define REF_DEF 0xb +#define EXTERNAL_REF_DEF 0xc +#define ATOM_DEF 0xd +#define FLOAT_DEF 0xe +#define BIG_DEF 0xf +#define SMALL_DEF 0x10 #if ET_DEBUG extern unsigned tag_val_def_debug(Wterm, const char*, unsigned); @@ -1096,8 +1102,8 @@ extern unsigned tag_val_def(Wterm); #endif #define not_eq_tags(X,Y) (tag_val_def((X)) ^ tag_val_def((Y))) -#define NUMBER_CODE(x,y) ((tag_val_def(x) << 4) | tag_val_def(y)) -#define _NUMBER_CODE(TX,TY) ((TX << 4) | TY) +#define NUMBER_CODE(x,y) ((tag_val_def(x) << 5) | tag_val_def(y)) +#define _NUMBER_CODE(TX,TY) ((TX << 5) | TY) #define SMALL_SMALL _NUMBER_CODE(SMALL_DEF,SMALL_DEF) #define SMALL_BIG _NUMBER_CODE(SMALL_DEF,BIG_DEF) #define SMALL_FLOAT _NUMBER_CODE(SMALL_DEF,FLOAT_DEF) @@ -1126,6 +1132,7 @@ extern unsigned tag_val_def(Wterm); #define make_tuple_rel make_boxed_rel #define make_external_rel make_boxed_rel #define make_internal_ref_rel make_boxed_rel +#define make_big_rel make_boxed_rel #define binary_val_rel(RTERM, BASE) binary_val(rterm2wterm(RTERM, BASE)) #define list_val_rel(RTERM, BASE) list_val(rterm2wterm(RTERM, BASE)) diff --git a/erts/emulator/beam/erl_thr_progress.c b/erts/emulator/beam/erl_thr_progress.c index cf5e3dc012..545a0343d0 100644 --- a/erts/emulator/beam/erl_thr_progress.c +++ b/erts/emulator/beam/erl_thr_progress.c @@ -417,7 +417,8 @@ void erts_thr_progress_pre_init(void) { intrnl = NULL; - erts_tsd_key_create(&erts_thr_prgr_data_key__); + erts_tsd_key_create(&erts_thr_prgr_data_key__, + "erts_thr_prgr_data_key"); init_nob(&erts_thr_prgr__.current, ERTS_THR_PRGR_VAL_FIRST); } diff --git a/erts/emulator/beam/erl_threads.h b/erts/emulator/beam/erl_threads.h index 759c8f4c33..80026104db 100644 --- a/erts/emulator/beam/erl_threads.h +++ b/erts/emulator/beam/erl_threads.h @@ -281,10 +281,13 @@ #define ERTS_THR_READ_MEMORY_BARRIER ETHR_READ_MEMORY_BARRIER #define ERTS_THR_DATA_DEPENDENCY_READ_MEMORY_BARRIER ETHR_READ_DEPEND_MEMORY_BARRIER -#ifdef ERTS_ENABLE_LOCK_COUNT +#ifdef ERTS_ENABLE_LOCK_POSITION #define erts_mtx_lock(L) erts_mtx_lock_x(L, __FILE__, __LINE__) +#define erts_mtx_trylock(L) erts_mtx_trylock_x(L, __FILE__, __LINE__) #define erts_spin_lock(L) erts_spin_lock_x(L, __FILE__, __LINE__) +#define erts_rwmtx_tryrlock(L) erts_rwmtx_tryrlock_x(L, __FILE__, __LINE__) #define erts_rwmtx_rlock(L) erts_rwmtx_rlock_x(L, __FILE__, __LINE__) +#define erts_rwmtx_tryrwlock(L) erts_rwmtx_tryrwlock_x(L, __FILE__, __LINE__) #define erts_rwmtx_rwlock(L) erts_rwmtx_rwlock_x(L, __FILE__, __LINE__) #define erts_read_lock(L) erts_read_lock_x(L, __FILE__, __LINE__) #define erts_write_lock(L) erts_write_lock_x(L, __FILE__, __LINE__) @@ -461,18 +464,24 @@ ERTS_GLB_INLINE void erts_thr_exit(void *res); ERTS_GLB_INLINE void erts_thr_install_exit_handler(void (*exit_handler)(void)); ERTS_GLB_INLINE erts_tid_t erts_thr_self(void); ERTS_GLB_INLINE int erts_equal_tids(erts_tid_t x, erts_tid_t y); -ERTS_GLB_INLINE void erts_mtx_init_x(erts_mtx_t *mtx, char *name, Eterm extra); -ERTS_GLB_INLINE void erts_mtx_init_x_opt(erts_mtx_t *mtx, char *name, Eterm extra, Uint16 opt); +ERTS_GLB_INLINE void erts_mtx_init_x(erts_mtx_t *mtx, char *name, Eterm extra, + int enable_lcnt); +ERTS_GLB_INLINE void erts_mtx_init_x_opt(erts_mtx_t *mtx, char *name, Eterm extra, + Uint16 opt, int enable_lcnt); ERTS_GLB_INLINE void erts_mtx_init_locked_x(erts_mtx_t *mtx, char *name, - Eterm extra); + Eterm extra, + int enable_lcnt); ERTS_GLB_INLINE void erts_mtx_init(erts_mtx_t *mtx, char *name); ERTS_GLB_INLINE void erts_mtx_init_locked(erts_mtx_t *mtx, char *name); ERTS_GLB_INLINE void erts_mtx_destroy(erts_mtx_t *mtx); -ERTS_GLB_INLINE int erts_mtx_trylock(erts_mtx_t *mtx); -#ifdef ERTS_ENABLE_LOCK_COUNT -ERTS_GLB_INLINE void erts_mtx_lock_x(erts_mtx_t *mtx, char *file, unsigned int line); +#ifdef ERTS_ENABLE_LOCK_POSITION +ERTS_GLB_INLINE int erts_mtx_trylock_x(erts_mtx_t *mtx, char *file, + unsigned int line); +ERTS_GLB_INLINE void erts_mtx_lock_x(erts_mtx_t *mtx, char *file, + unsigned int line); #else +ERTS_GLB_INLINE int erts_mtx_trylock(erts_mtx_t *mtx); ERTS_GLB_INLINE void erts_mtx_lock(erts_mtx_t *mtx); #endif ERTS_GLB_INLINE void erts_mtx_unlock(erts_mtx_t *mtx); @@ -496,16 +505,18 @@ ERTS_GLB_INLINE void erts_rwmtx_init_opt(erts_rwmtx_t *rwmtx, ERTS_GLB_INLINE void erts_rwmtx_init(erts_rwmtx_t *rwmtx, char *name); ERTS_GLB_INLINE void erts_rwmtx_destroy(erts_rwmtx_t *rwmtx); -ERTS_GLB_INLINE int erts_rwmtx_tryrlock(erts_rwmtx_t *rwmtx); -#ifdef ERTS_ENABLE_LOCK_COUNT +#ifdef ERTS_ENABLE_LOCK_POSITION +ERTS_GLB_INLINE int erts_rwmtx_tryrlock_x(erts_rwmtx_t *rwmtx, char *file, unsigned int line); ERTS_GLB_INLINE void erts_rwmtx_rlock_x(erts_rwmtx_t *rwmtx, char *file, unsigned int line); ERTS_GLB_INLINE void erts_rwmtx_rwlock_x(erts_rwmtx_t *rwmtx, char *file, unsigned int line); +ERTS_GLB_INLINE int erts_rwmtx_tryrwlock_x(erts_rwmtx_t *rwmtx, char *file, unsigned int line); #else +ERTS_GLB_INLINE int erts_rwmtx_tryrlock(erts_rwmtx_t *rwmtx); ERTS_GLB_INLINE void erts_rwmtx_rlock(erts_rwmtx_t *rwmtx); ERTS_GLB_INLINE void erts_rwmtx_rwlock(erts_rwmtx_t *rwmtx); +ERTS_GLB_INLINE int erts_rwmtx_tryrwlock(erts_rwmtx_t *rwmtx); #endif ERTS_GLB_INLINE void erts_rwmtx_runlock(erts_rwmtx_t *rwmtx); -ERTS_GLB_INLINE int erts_rwmtx_tryrwlock(erts_rwmtx_t *rwmtx); ERTS_GLB_INLINE void erts_rwmtx_rwunlock(erts_rwmtx_t *rwmtx); ERTS_GLB_INLINE int erts_lc_rwmtx_is_rlocked(erts_rwmtx_t *mtx); ERTS_GLB_INLINE int erts_lc_rwmtx_is_rwlocked(erts_rwmtx_t *mtx); @@ -571,7 +582,7 @@ ERTS_GLB_INLINE void erts_spinlock_init(erts_spinlock_t *lock, char *name); ERTS_GLB_INLINE void erts_spinlock_destroy(erts_spinlock_t *lock); ERTS_GLB_INLINE void erts_spin_unlock(erts_spinlock_t *lock); -#ifdef ERTS_ENABLE_LOCK_COUNT +#ifdef ERTS_ENABLE_LOCK_POSITION ERTS_GLB_INLINE void erts_spin_lock_x(erts_spinlock_t *lock, char *file, unsigned int line); #else ERTS_GLB_INLINE void erts_spin_lock(erts_spinlock_t *lock); @@ -584,7 +595,7 @@ ERTS_GLB_INLINE void erts_rwlock_init(erts_rwlock_t *lock, char *name); ERTS_GLB_INLINE void erts_rwlock_destroy(erts_rwlock_t *lock); ERTS_GLB_INLINE void erts_read_unlock(erts_rwlock_t *lock); -#ifdef ERTS_ENABLE_LOCK_COUNT +#ifdef ERTS_ENABLE_LOCK_POSITION ERTS_GLB_INLINE void erts_read_lock_x(erts_rwlock_t *lock, char *file, unsigned int line); ERTS_GLB_INLINE void erts_write_lock_x(erts_rwlock_t *lock, char *file, unsigned int line); #else @@ -594,7 +605,7 @@ ERTS_GLB_INLINE void erts_write_lock(erts_rwlock_t *lock); ERTS_GLB_INLINE void erts_write_unlock(erts_rwlock_t *lock); ERTS_GLB_INLINE int erts_lc_rwlock_is_rlocked(erts_rwlock_t *lock); ERTS_GLB_INLINE int erts_lc_rwlock_is_rwlocked(erts_rwlock_t *lock); -ERTS_GLB_INLINE void erts_tsd_key_create(erts_tsd_key_t *keyp); +ERTS_GLB_INLINE void erts_tsd_key_create(erts_tsd_key_t *keyp, char *keyname); ERTS_GLB_INLINE void erts_tsd_key_delete(erts_tsd_key_t key); ERTS_GLB_INLINE void erts_tsd_set(erts_tsd_key_t key, void *value); ERTS_GLB_INLINE void * erts_tsd_get(erts_tsd_key_t key); @@ -1549,7 +1560,7 @@ erts_equal_tids(erts_tid_t x, erts_tid_t y) } ERTS_GLB_INLINE void -erts_mtx_init_x(erts_mtx_t *mtx, char *name, Eterm extra) +erts_mtx_init_x(erts_mtx_t *mtx, char *name, Eterm extra, int enable_lcnt) { #ifdef USE_THREADS int res = ethr_mutex_init(&mtx->mtx); @@ -1559,13 +1570,17 @@ erts_mtx_init_x(erts_mtx_t *mtx, char *name, Eterm extra) erts_lc_init_lock_x(&mtx->lc, name, ERTS_LC_FLG_LT_MUTEX, extra); #endif #ifdef ERTS_ENABLE_LOCK_COUNT - erts_lcnt_init_lock_x(&mtx->lcnt, name, ERTS_LCNT_LT_MUTEX, extra); + if (enable_lcnt) + erts_lcnt_init_lock_x(&mtx->lcnt, name, ERTS_LCNT_LT_MUTEX, extra); + else + erts_lcnt_init_lock_x(&mtx->lcnt, NULL, ERTS_LCNT_LT_MUTEX, extra); #endif #endif } ERTS_GLB_INLINE void -erts_mtx_init_x_opt(erts_mtx_t *mtx, char *name, Eterm extra, Uint16 opt) +erts_mtx_init_x_opt(erts_mtx_t *mtx, char *name, Eterm extra, Uint16 opt, + int enable_lcnt) { #ifdef USE_THREADS int res = ethr_mutex_init(&mtx->mtx); @@ -1575,14 +1590,17 @@ erts_mtx_init_x_opt(erts_mtx_t *mtx, char *name, Eterm extra, Uint16 opt) erts_lc_init_lock_x(&mtx->lc, name, ERTS_LC_FLG_LT_MUTEX, extra); #endif #ifdef ERTS_ENABLE_LOCK_COUNT - erts_lcnt_init_lock_x(&mtx->lcnt, name, ERTS_LCNT_LT_MUTEX | opt, extra); + if (enable_lcnt) + erts_lcnt_init_lock_x(&mtx->lcnt, name, ERTS_LCNT_LT_MUTEX | opt, extra); + else + erts_lcnt_init_lock_x(&mtx->lcnt, NULL, ERTS_LCNT_LT_MUTEX | opt, extra); #endif #endif } ERTS_GLB_INLINE void -erts_mtx_init_locked_x(erts_mtx_t *mtx, char *name, Eterm extra) +erts_mtx_init_locked_x(erts_mtx_t *mtx, char *name, Eterm extra, int enable_lcnt) { #ifdef USE_THREADS int res = ethr_mutex_init(&mtx->mtx); @@ -1592,7 +1610,10 @@ erts_mtx_init_locked_x(erts_mtx_t *mtx, char *name, Eterm extra) erts_lc_init_lock_x(&mtx->lc, name, ERTS_LC_FLG_LT_MUTEX, extra); #endif #ifdef ERTS_ENABLE_LOCK_COUNT - erts_lcnt_init_lock_x(&mtx->lcnt, name, ERTS_LCNT_LT_MUTEX, extra); + if (enable_lcnt) + erts_lcnt_init_lock_x(&mtx->lcnt, name, ERTS_LCNT_LT_MUTEX, extra); + else + erts_lcnt_init_lock_x(&mtx->lcnt, NULL, ERTS_LCNT_LT_MUTEX, extra); #endif ethr_mutex_lock(&mtx->mtx); #ifdef ERTS_ENABLE_LOCK_CHECK @@ -1670,7 +1691,11 @@ erts_mtx_destroy(erts_mtx_t *mtx) } ERTS_GLB_INLINE int +#ifdef ERTS_ENABLE_LOCK_POSITION +erts_mtx_trylock_x(erts_mtx_t *mtx, char *file, unsigned int line) +#else erts_mtx_trylock(erts_mtx_t *mtx) +#endif { #ifdef USE_THREADS int res; @@ -1684,8 +1709,12 @@ erts_mtx_trylock(erts_mtx_t *mtx) res = ethr_mutex_trylock(&mtx->mtx); #ifdef ERTS_ENABLE_LOCK_CHECK +#ifdef ERTS_ENABLE_LOCK_POSITION + erts_lc_trylock_x(res == 0, &mtx->lc,file,line); +#else erts_lc_trylock(res == 0, &mtx->lc); #endif +#endif #ifdef ERTS_ENABLE_LOCK_COUNT erts_lcnt_trylock(&mtx->lcnt, res); #endif @@ -1697,7 +1726,7 @@ erts_mtx_trylock(erts_mtx_t *mtx) } ERTS_GLB_INLINE void -#ifdef ERTS_ENABLE_LOCK_COUNT +#ifdef ERTS_ENABLE_LOCK_POSITION erts_mtx_lock_x(erts_mtx_t *mtx, char *file, unsigned int line) #else erts_mtx_lock(erts_mtx_t *mtx) @@ -1705,8 +1734,12 @@ erts_mtx_lock(erts_mtx_t *mtx) { #ifdef USE_THREADS #ifdef ERTS_ENABLE_LOCK_CHECK +#ifdef ERTS_ENABLE_LOCK_POSITION + erts_lc_lock_x(&mtx->lc, file, line); +#else erts_lc_lock(&mtx->lc); #endif +#endif #ifdef ERTS_ENABLE_LOCK_COUNT erts_lcnt_lock(&mtx->lcnt); #endif @@ -1857,7 +1890,10 @@ erts_rwmtx_init_opt_x(erts_rwmtx_t *rwmtx, erts_lc_init_lock_x(&rwmtx->lc, name, ERTS_LC_FLG_LT_RWMUTEX, extra); #endif #ifdef ERTS_ENABLE_LOCK_COUNT - erts_lcnt_init_lock_x(&rwmtx->lcnt, name, ERTS_LCNT_LT_RWMUTEX, extra); + if (name && name[0] == '\0') + erts_lcnt_init_lock_x(&rwmtx->lcnt, NULL, ERTS_LCNT_LT_RWMUTEX, extra); + else + erts_lcnt_init_lock_x(&rwmtx->lcnt, name, ERTS_LCNT_LT_RWMUTEX, extra); #endif #endif } @@ -1921,7 +1957,11 @@ erts_rwmtx_destroy(erts_rwmtx_t *rwmtx) } ERTS_GLB_INLINE int +#ifdef ERTS_ENABLE_LOCK_POSITION +erts_rwmtx_tryrlock_x(erts_rwmtx_t *rwmtx, char *file, unsigned int line) +#else erts_rwmtx_tryrlock(erts_rwmtx_t *rwmtx) +#endif { #ifdef USE_THREADS int res; @@ -1935,8 +1975,12 @@ erts_rwmtx_tryrlock(erts_rwmtx_t *rwmtx) res = ethr_rwmutex_tryrlock(&rwmtx->rwmtx); #ifdef ERTS_ENABLE_LOCK_CHECK +#ifdef ERTS_ENABLE_LOCK_POSITION + erts_lc_trylock_flg_x(res == 0, &rwmtx->lc, ERTS_LC_FLG_LO_READ,file,line); +#else erts_lc_trylock_flg(res == 0, &rwmtx->lc, ERTS_LC_FLG_LO_READ); #endif +#endif #ifdef ERTS_ENABLE_LOCK_COUNT erts_lcnt_trylock_opt(&rwmtx->lcnt, res, ERTS_LCNT_LO_READ); #endif @@ -1948,7 +1992,7 @@ erts_rwmtx_tryrlock(erts_rwmtx_t *rwmtx) } ERTS_GLB_INLINE void -#ifdef ERTS_ENABLE_LOCK_COUNT +#ifdef ERTS_ENABLE_LOCK_POSITION erts_rwmtx_rlock_x(erts_rwmtx_t *rwmtx, char *file, unsigned int line) #else erts_rwmtx_rlock(erts_rwmtx_t *rwmtx) @@ -1956,8 +2000,12 @@ erts_rwmtx_rlock(erts_rwmtx_t *rwmtx) { #ifdef USE_THREADS #ifdef ERTS_ENABLE_LOCK_CHECK +#ifdef ERTS_ENABLE_LOCK_POSITION + erts_lc_lock_flg_x(&rwmtx->lc, ERTS_LC_FLG_LO_READ,file,line); +#else erts_lc_lock_flg(&rwmtx->lc, ERTS_LC_FLG_LO_READ); #endif +#endif #ifdef ERTS_ENABLE_LOCK_COUNT erts_lcnt_lock_opt(&rwmtx->lcnt, ERTS_LCNT_LO_READ); #endif @@ -1984,7 +2032,11 @@ erts_rwmtx_runlock(erts_rwmtx_t *rwmtx) ERTS_GLB_INLINE int +#ifdef ERTS_ENABLE_LOCK_POSITION +erts_rwmtx_tryrwlock_x(erts_rwmtx_t *rwmtx, char *file, unsigned int line) +#else erts_rwmtx_tryrwlock(erts_rwmtx_t *rwmtx) +#endif { #ifdef USE_THREADS int res; @@ -1998,8 +2050,12 @@ erts_rwmtx_tryrwlock(erts_rwmtx_t *rwmtx) res = ethr_rwmutex_tryrwlock(&rwmtx->rwmtx); #ifdef ERTS_ENABLE_LOCK_CHECK +#ifdef ERTS_ENABLE_LOCK_POSITION + erts_lc_trylock_flg_x(res == 0, &rwmtx->lc, ERTS_LC_FLG_LO_READ_WRITE,file,line); +#else erts_lc_trylock_flg(res == 0, &rwmtx->lc, ERTS_LC_FLG_LO_READ_WRITE); #endif +#endif #ifdef ERTS_ENABLE_LOCK_COUNT erts_lcnt_trylock_opt(&rwmtx->lcnt, res, ERTS_LCNT_LO_READ_WRITE); #endif @@ -2011,7 +2067,7 @@ erts_rwmtx_tryrwlock(erts_rwmtx_t *rwmtx) } ERTS_GLB_INLINE void -#ifdef ERTS_ENABLE_LOCK_COUNT +#ifdef ERTS_ENABLE_LOCK_POSITION erts_rwmtx_rwlock_x(erts_rwmtx_t *rwmtx, char *file, unsigned int line) #else erts_rwmtx_rwlock(erts_rwmtx_t *rwmtx) @@ -2019,8 +2075,12 @@ erts_rwmtx_rwlock(erts_rwmtx_t *rwmtx) { #ifdef USE_THREADS #ifdef ERTS_ENABLE_LOCK_CHECK +#ifdef ERTS_ENABLE_LOCK_POSITION + erts_lc_lock_flg_x(&rwmtx->lc, ERTS_LC_FLG_LO_READ_WRITE,file,line); +#else erts_lc_lock_flg(&rwmtx->lc, ERTS_LC_FLG_LO_READ_WRITE); #endif +#endif #ifdef ERTS_ENABLE_LOCK_COUNT erts_lcnt_lock_opt(&rwmtx->lcnt, ERTS_LCNT_LO_READ_WRITE); #endif @@ -2426,7 +2486,7 @@ erts_spin_unlock(erts_spinlock_t *lock) } ERTS_GLB_INLINE void -#ifdef ERTS_ENABLE_LOCK_COUNT +#ifdef ERTS_ENABLE_LOCK_POSITION erts_spin_lock_x(erts_spinlock_t *lock, char *file, unsigned int line) #else erts_spin_lock(erts_spinlock_t *lock) @@ -2434,8 +2494,12 @@ erts_spin_lock(erts_spinlock_t *lock) { #ifdef USE_THREADS #ifdef ERTS_ENABLE_LOCK_CHECK +#ifdef ERTS_ENABLE_LOCK_POSITION + erts_lc_lock_x(&lock->lc,file,line); +#else erts_lc_lock(&lock->lc); #endif +#endif #ifdef ERTS_ENABLE_LOCK_COUNT erts_lcnt_lock(&lock->lcnt); #endif @@ -2545,7 +2609,7 @@ erts_read_unlock(erts_rwlock_t *lock) } ERTS_GLB_INLINE void -#ifdef ERTS_ENABLE_LOCK_COUNT +#ifdef ERTS_ENABLE_LOCK_POSITION erts_read_lock_x(erts_rwlock_t *lock, char *file, unsigned int line) #else erts_read_lock(erts_rwlock_t *lock) @@ -2553,8 +2617,12 @@ erts_read_lock(erts_rwlock_t *lock) { #ifdef USE_THREADS #ifdef ERTS_ENABLE_LOCK_CHECK +#ifdef ERTS_ENABLE_LOCK_POSITION + erts_lc_lock_flg_x(&lock->lc, ERTS_LC_FLG_LO_READ,file,line); +#else erts_lc_lock_flg(&lock->lc, ERTS_LC_FLG_LO_READ); #endif +#endif #ifdef ERTS_ENABLE_LOCK_COUNT erts_lcnt_lock_opt(&lock->lcnt, ERTS_LCNT_LO_READ); #endif @@ -2584,7 +2652,7 @@ erts_write_unlock(erts_rwlock_t *lock) } ERTS_GLB_INLINE void -#ifdef ERTS_ENABLE_LOCK_COUNT +#ifdef ERTS_ENABLE_LOCK_POSITION erts_write_lock_x(erts_rwlock_t *lock, char *file, unsigned int line) #else erts_write_lock(erts_rwlock_t *lock) @@ -2592,8 +2660,12 @@ erts_write_lock(erts_rwlock_t *lock) { #ifdef USE_THREADS #ifdef ERTS_ENABLE_LOCK_CHECK +#ifdef ERTS_ENABLE_LOCK_POSITION + erts_lc_lock_flg_x(&lock->lc, ERTS_LC_FLG_LO_READ_WRITE,file,line); +#else erts_lc_lock_flg(&lock->lc, ERTS_LC_FLG_LO_READ_WRITE); #endif +#endif #ifdef ERTS_ENABLE_LOCK_COUNT erts_lcnt_lock_opt(&lock->lcnt, ERTS_LCNT_LO_READ_WRITE); #endif @@ -2635,10 +2707,10 @@ erts_lc_rwlock_is_rwlocked(erts_rwlock_t *lock) } ERTS_GLB_INLINE void -erts_tsd_key_create(erts_tsd_key_t *keyp) +erts_tsd_key_create(erts_tsd_key_t *keyp, char *keyname) { #ifdef USE_THREADS - int res = ethr_tsd_key_create(keyp); + int res = ethr_tsd_key_create(keyp, keyname); if (res) erts_thr_fatal_error(res, "create thread specific data key"); #endif diff --git a/erts/emulator/beam/erl_trace.c b/erts/emulator/beam/erl_trace.c index ff7fdfcfca..6978a5f11a 100644 --- a/erts/emulator/beam/erl_trace.c +++ b/erts/emulator/beam/erl_trace.c @@ -3309,6 +3309,8 @@ sys_msg_dispatcher_func(void *unused) if (erts_thr_progress_update(NULL)) erts_thr_progress_leader_update(NULL); + ERTS_SCHED_FAIR_YIELD(); + #ifdef DEBUG_PRINTOUTS print_msg_type(smqp); #endif @@ -3467,12 +3469,20 @@ static void init_sys_msg_dispatcher(void) { erts_smp_thr_opts_t thr_opts = ERTS_SMP_THR_OPTS_DEFAULT_INITER; +#ifdef __OSE__ + thr_opts.coreNo = 0; +#endif thr_opts.detached = 1; init_smq_element_alloc(); sys_message_queue = NULL; sys_message_queue_end = NULL; erts_smp_cnd_init(&smq_cnd); erts_smp_mtx_init(&smq_mtx, "sys_msg_q"); + +#ifdef ETHR_HAVE_THREAD_NAMES + thr_opts.name = "sys_msg_dispatcher"; +#endif + erts_smp_thr_create(&sys_msg_dispatcher_tid, sys_msg_dispatcher_func, NULL, diff --git a/erts/emulator/beam/erl_unicode.c b/erts/emulator/beam/erl_unicode.c index 569c0a7d31..3a968594f3 100644 --- a/erts/emulator/beam/erl_unicode.c +++ b/erts/emulator/beam/erl_unicode.c @@ -1984,9 +1984,21 @@ BIF_RETTYPE binary_to_existing_atom_2(BIF_ALIST_2) * string routines, that will certainly fail on some OS. */ -char *erts_convert_filename_to_native(Eterm name, char *statbuf, size_t statbuf_size, ErtsAlcType_t alloc_type, int allow_empty, int allow_atom, Sint *used) +char *erts_convert_filename_to_native(Eterm name, char *statbuf, size_t statbuf_size, + ErtsAlcType_t alloc_type, int allow_empty, + int allow_atom, Sint *used) { int encoding = erts_get_native_filename_encoding(); + return erts_convert_filename_to_encoding(name, statbuf, statbuf_size, alloc_type, + allow_empty, allow_atom, encoding, + used, 0); +} + +char *erts_convert_filename_to_encoding(Eterm name, char *statbuf, size_t statbuf_size, + ErtsAlcType_t alloc_type, int allow_empty, + int allow_atom, int encoding, Sint *used, + Uint extra) +{ char* name_buf = NULL; if ((allow_atom && is_atom(name)) || @@ -1998,13 +2010,14 @@ char *erts_convert_filename_to_native(Eterm name, char *statbuf, size_t statbuf_ } if (encoding == ERL_FILENAME_WIN_WCHAR) { need += 2; + extra *= 2; } else { ++need; } if (used) *used = (Sint) need; - if (need > statbuf_size) { - name_buf = (char *) erts_alloc(alloc_type, need); + if (need+extra > statbuf_size) { + name_buf = (char *) erts_alloc(alloc_type, need+extra); } else { name_buf = statbuf; } @@ -2016,52 +2029,27 @@ char *erts_convert_filename_to_native(Eterm name, char *statbuf, size_t statbuf_ } else if (is_binary(name)) { byte *temp_alloc = NULL; byte *bytes; - byte *err_pos; - Uint size,num_chars; + Uint size; size = binary_size(name); bytes = erts_get_aligned_binary_bytes(name, &temp_alloc); + if (encoding != ERL_FILENAME_WIN_WCHAR) { /*Add 0 termination only*/ if (used) *used = (Sint) size+1; - if (size+1 > statbuf_size) { - name_buf = (char *) erts_alloc(alloc_type, size+1); + if (size+1+extra > statbuf_size) { + name_buf = (char *) erts_alloc(alloc_type, size+1+extra); } else { name_buf = statbuf; } memcpy(name_buf,bytes,size); name_buf[size]=0; - } else if (erts_analyze_utf8(bytes,size,&err_pos,&num_chars,NULL) != ERTS_UTF8_OK || - erts_get_user_requested_filename_encoding() == ERL_FILENAME_LATIN1) { - byte *p; - /* What to do now? Maybe latin1, so just take byte for byte instead */ - if (used) - *used = (Sint) (size+1)*2; - if ((size+1)*2 > statbuf_size) { - name_buf = (char *) erts_alloc(alloc_type, (size+1)*2); - } else { - name_buf = statbuf; - } - p = (byte *) name_buf; - while (size--) { - *p++ = *bytes++; - *p++ = 0; - } - *p++ = 0; - *p++ = 0; - } else { /* WIN_WCHAR and valid UTF8 */ - if (used) - *used = (Sint) (num_chars+1)*2; - if ((num_chars+1)*2 > statbuf_size) { - name_buf = (char *) erts_alloc(alloc_type, (num_chars+1)*2); - } else { - name_buf = statbuf; - } - erts_copy_utf8_to_utf16_little((byte *) name_buf, bytes, num_chars); - name_buf[num_chars*2] = 0; - name_buf[num_chars*2+1] = 0; - } + } else { + name_buf = erts_convert_filename_to_wchar(bytes, size, + statbuf, statbuf_size, + alloc_type, used, extra); + } erts_free_aligned_binary_bytes(temp_alloc); } else { return NULL; @@ -2069,6 +2057,50 @@ char *erts_convert_filename_to_native(Eterm name, char *statbuf, size_t statbuf_ return name_buf; } +char* erts_convert_filename_to_wchar(byte* bytes, Uint size, + char *statbuf, size_t statbuf_size, + ErtsAlcType_t alloc_type, Sint* used, + Uint extra_wchars) +{ + byte *err_pos; + Uint num_chars; + char* name_buf = NULL; + Sint need; + char *p; + + if (erts_analyze_utf8(bytes,size,&err_pos,&num_chars,NULL) != ERTS_UTF8_OK || + erts_get_user_requested_filename_encoding() == ERL_FILENAME_LATIN1) { + + /* What to do now? Maybe latin1, so just take byte for byte instead */ + need = (Sint) (size + extra_wchars + 1) * 2; + if (need > statbuf_size) { + name_buf = (char *) erts_alloc(alloc_type, need); + } else { + name_buf = statbuf; + } + p = name_buf; + while (size--) { + *p++ = *bytes++; + *p++ = 0; + } + } else { /* WIN_WCHAR and valid UTF8 */ + need = (Sint) (num_chars + extra_wchars + 1) * 2; + if (need > statbuf_size) { + name_buf = (char *) erts_alloc(alloc_type, need); + } else { + name_buf = statbuf; + } + erts_copy_utf8_to_utf16_little((byte *) name_buf, bytes, num_chars); + p = name_buf + num_chars*2; + } + *p++ = 0; + *p++ = 0; + if (used) + *used = p - name_buf; + return name_buf; +} + + static int filename_len_16bit(byte *str) { byte *p = str; @@ -2148,16 +2180,31 @@ Sint erts_native_filename_need(Eterm ioterm, int encoding) ap = atom_tab(atom_val(ioterm)); switch (encoding) { case ERL_FILENAME_LATIN1: - need = ap->len; + need = ap->latin1_chars; /* May be -1 */ break; case ERL_FILENAME_UTF8_MAC: case ERL_FILENAME_UTF8: - for (i = 0; i < ap->len; i++) { - need += (ap->name[i] >= 0x80) ? 2 : 1; - } + need = ap->len; break; case ERL_FILENAME_WIN_WCHAR: - need = 2*(ap->len); + if (ap->latin1_chars >= 0) { + need = 2* ap->latin1_chars; + } + else { + for (i = 0; i < ap->len; ) { + if (ap->name[i] < 0x80) { + i++; + } else if (ap->name[i] < 0xE0) { + i += 2; + } else if (ap->name[i] < 0xF0) { + i += 3; + } else { + need = -1; + break; + } + need += 2; + } + } break; default: need = -1; @@ -2287,26 +2334,36 @@ void erts_native_filename_put(Eterm ioterm, int encoding, byte *p) switch (encoding) { case ERL_FILENAME_LATIN1: for (i = 0; i < ap->len; i++) { - *p++ = ap->name[i]; - } - break; - case ERL_FILENAME_UTF8_MAC: - case ERL_FILENAME_UTF8: - for (i = 0; i < ap->len; i++) { - if(ap->name[i] < 0x80) { + if (ap->name[i] < 0x80) { *p++ = ap->name[i]; } else { - *p++ = (((ap->name[i]) >> 6) | ((byte) 0xC0)); - *p++ = (((ap->name[i]) & 0x3F) | ((byte) 0x80)); + ASSERT(ap->name[i] < 0xC4); + *p++ = ((ap->name[i] & 3) << 6) | (ap->name[i+1] & 0x3F); + i++; } } break; + case ERL_FILENAME_UTF8_MAC: + case ERL_FILENAME_UTF8: + sys_memcpy(p, ap->name, ap->len); + break; case ERL_FILENAME_WIN_WCHAR: for (i = 0; i < ap->len; i++) { /* Little endian */ - *p++ = ap->name[i]; - *p++ = 0; - } + if (ap->name[i] < 0x80) { + *p++ = ap->name[i]; + *p++ = 0; + } else if (ap->name[i] < 0xE0) { + *p++ = ((ap->name[i] & 3) << 6) | (ap->name[i+1] & 0x3F); + *p++ = ((ap->name[i] & 0x1C) >> 2); + i++; + } else { + ASSERT(ap->name[i] < 0xF0); + *p++ = ((ap->name[i+1] & 3) << 6) | (ap->name[i+2] & 0x3C); + *p++ = ((ap->name[i] & 0xF) << 4) | ((ap->name[i+1] & 0x3C) >> 2); + i += 2; + } + } break; default: ASSERT(0); diff --git a/erts/emulator/beam/erl_utils.h b/erts/emulator/beam/erl_utils.h index 292d135946..0807649ea1 100644 --- a/erts/emulator/beam/erl_utils.h +++ b/erts/emulator/beam/erl_utils.h @@ -1,7 +1,7 @@ /* * %CopyrightBegin% * - * Copyright Ericsson AB 2012-2013. All Rights Reserved. + * Copyright Ericsson AB 2012-2014. All Rights Reserved. * * The contents of this file are subject to the Erlang Public License, * Version 1.1, (the "License"); you may not use this file except in @@ -150,7 +150,7 @@ void erts_silence_warn_unused_result(long unused); int erts_fit_in_bits_int64(Sint64); int erts_fit_in_bits_int32(Sint32); -int list_length(Eterm); +int erts_list_length(Eterm); int erts_is_builtin(Eterm, Eterm, int); Uint32 make_broken_hash(Eterm); Uint32 block_hash(byte *, unsigned, Uint32); @@ -202,23 +202,38 @@ int eq(Eterm, Eterm); #define EQ(x,y) (((x) == (y)) || (is_not_both_immed((x),(y)) && eq((x),(y)))) #if HALFWORD_HEAP -Sint cmp_rel(Eterm, Eterm*, Eterm, Eterm*); -#define CMP(A,B) cmp_rel(A,NULL,B,NULL) +Sint erts_cmp_rel_opt(Eterm, Eterm*, Eterm, Eterm*, int); +#define cmp_rel(A,A_BASE,B,B_BASE) erts_cmp_rel_opt(A,A_BASE,B,B_BASE,0) +#define cmp_rel_term(A,A_BASE,B,B_BASE) erts_cmp_rel_opt(A,A_BASE,B,B_BASE,1) +#define CMP(A,B) erts_cmp_rel_opt(A,NULL,B,NULL,0) +#define CMP_TERM(A,B) erts_cmp_rel_opt(A,NULL,B,NULL,1) #else Sint cmp(Eterm, Eterm); -#define cmp_rel(A,A_BASE,B,B_BASE) cmp(A,B) -#define CMP(A,B) cmp(A,B) +Sint erts_cmp(Eterm, Eterm, int); +#define cmp_rel(A,A_BASE,B,B_BASE) erts_cmp(A,B,0) +#define cmp_rel_term(A,A_BASE,B,B_BASE) erts_cmp(A,B,1) +#define CMP(A,B) erts_cmp(A,B,0) +#define CMP_TERM(A,B) erts_cmp(A,B,1) #endif -#define cmp_lt(a,b) (CMP((a),(b)) < 0) -#define cmp_le(a,b) (CMP((a),(b)) <= 0) -#define cmp_eq(a,b) (CMP((a),(b)) == 0) -#define cmp_ne(a,b) (CMP((a),(b)) != 0) -#define cmp_ge(a,b) (CMP((a),(b)) >= 0) -#define cmp_gt(a,b) (CMP((a),(b)) > 0) - -#define CMP_LT(a,b) ((a) != (b) && cmp_lt((a),(b))) -#define CMP_GE(a,b) ((a) == (b) || cmp_ge((a),(b))) -#define CMP_EQ(a,b) ((a) == (b) || cmp_eq((a),(b))) -#define CMP_NE(a,b) ((a) != (b) && cmp_ne((a),(b))) + +#define cmp_lt(a,b) (CMP((a),(b)) < 0) +#define cmp_le(a,b) (CMP((a),(b)) <= 0) +#define cmp_eq(a,b) (CMP((a),(b)) == 0) +#define cmp_ne(a,b) (CMP((a),(b)) != 0) +#define cmp_ge(a,b) (CMP((a),(b)) >= 0) +#define cmp_gt(a,b) (CMP((a),(b)) > 0) + +#define cmp_lt_term(a,b) (CMP_TERM((a),(b)) < 0) +#define cmp_le_term(a,b) (CMP_TERM((a),(b)) <= 0) +#define cmp_ge_term(a,b) (CMP_TERM((a),(b)) >= 0) +#define cmp_gt_term(a,b) (CMP_TERM((a),(b)) > 0) + +#define CMP_LT(a,b) ((a) != (b) && cmp_lt((a),(b))) +#define CMP_GE(a,b) ((a) == (b) || cmp_ge((a),(b))) +#define CMP_EQ(a,b) ((a) == (b) || cmp_eq((a),(b))) +#define CMP_NE(a,b) ((a) != (b) && cmp_ne((a),(b))) + +#define CMP_LT_TERM(a,b) ((a) != (b) && cmp_lt_term((a),(b))) +#define CMP_GE_TERM(a,b) ((a) == (b) || cmp_ge_term((a),(b))) #endif diff --git a/erts/emulator/beam/erl_vm.h b/erts/emulator/beam/erl_vm.h index 337422eead..b7de8208ad 100644 --- a/erts/emulator/beam/erl_vm.h +++ b/erts/emulator/beam/erl_vm.h @@ -1,7 +1,7 @@ /* * %CopyrightBegin% * - * Copyright Ericsson AB 1996-2013. All Rights Reserved. + * Copyright Ericsson AB 1996-2014. All Rights Reserved. * * The contents of this file are subject to the Erlang Public License, * Version 1.1, (the "License"); you may not use this file except in @@ -46,7 +46,6 @@ heap data on the C stack or if we use the buffers in the scheduler data. */ #define TMP_HEAP_SIZE 128 /* Number of Eterm in the schedulers small heap for transient heap data */ -#define CMP_TMP_HEAP_SIZE 32 /* cmp wants its own tmp-heap... */ #define ERL_ARITH_TMP_HEAP_SIZE 4 /* as does erl_arith... */ #define BEAM_EMU_TMP_HEAP_SIZE 2 /* and beam_emu... */ diff --git a/erts/emulator/beam/erl_zlib.c b/erts/emulator/beam/erl_zlib.c index 47fd92988e..8e33144f96 100644 --- a/erts/emulator/beam/erl_zlib.c +++ b/erts/emulator/beam/erl_zlib.c @@ -87,6 +87,46 @@ int ZEXPORT erl_zlib_deflate_finish(z_stream *streamp) return deflateEnd(streamp); } +int ZEXPORT erl_zlib_inflate_start(z_stream *streamp, const Bytef* source, + uLong sourceLen) +{ + streamp->next_in = (Bytef*)source; + streamp->avail_in = (uInt)sourceLen; + streamp->total_out = streamp->avail_out = 0; + streamp->next_out = NULL; + erl_zlib_alloc_init(streamp); + return inflateInit(streamp); +} +/* + * Inflate a chunk, The destination length is the limit. + * Returns Z_OK if more to process, Z_STREAM_END if we are done. + */ +int ZEXPORT erl_zlib_inflate_chunk(z_stream *streamp, Bytef* dest, uLongf* destLen) +{ + int err; + uLongf last_tot = streamp->total_out; + + streamp->next_out = dest; + streamp->avail_out = (uInt)*destLen; + + if ((uLong)streamp->avail_out != *destLen) return Z_BUF_ERROR; + + err = inflate(streamp, Z_NO_FLUSH); + ASSERT(err != Z_STREAM_ERROR); + *destLen = streamp->total_out - last_tot; + return err; +} + +/* + * When we are done, free up the inflate structure + * Retyurns Z_OK or Error + */ +int ZEXPORT erl_zlib_inflate_finish(z_stream *streamp) +{ + return inflateEnd(streamp); +} + + int ZEXPORT erl_zlib_compress2 (Bytef* dest, uLongf* destLen, const Bytef* source, uLong sourceLen, int level) diff --git a/erts/emulator/beam/erl_zlib.h b/erts/emulator/beam/erl_zlib.h index 5ac849d21c..160166c66b 100644 --- a/erts/emulator/beam/erl_zlib.h +++ b/erts/emulator/beam/erl_zlib.h @@ -39,6 +39,12 @@ int ZEXPORT erl_zlib_deflate_start(z_stream *streamp, const Bytef* source, int ZEXPORT erl_zlib_deflate_chunk(z_stream *streamp, Bytef* dest, uLongf* destLen); int ZEXPORT erl_zlib_deflate_finish(z_stream *streamp); +int ZEXPORT erl_zlib_inflate_start(z_stream *streamp, const Bytef* source, + uLong sourceLen); +int ZEXPORT erl_zlib_inflate_chunk(z_stream *streamp, Bytef* dest, uLongf* destLen); +int ZEXPORT erl_zlib_inflate_finish(z_stream *streamp); + + /* Use instead of compress */ #define erl_zlib_compress(dest,destLen,source,sourceLen) \ diff --git a/erts/emulator/beam/external.c b/erts/emulator/beam/external.c index 22b0a02937..656de7c49a 100644 --- a/erts/emulator/beam/external.c +++ b/erts/emulator/beam/external.c @@ -1,7 +1,7 @@ /* * %CopyrightBegin% * - * Copyright Ericsson AB 1996-2013. All Rights Reserved. + * Copyright Ericsson AB 1996-2014. All Rights Reserved. * * The contents of this file are subject to the Erlang Public License, * Version 1.1, (the "License"); you may not use this file except in @@ -42,6 +42,7 @@ #include "erl_binary.h" #include "erl_bits.h" #include "erl_zlib.h" +#include "erl_map.h" #ifdef HIPE #include "hipe_mode_switch.h" @@ -61,6 +62,9 @@ */ # define ERTS_DEBUG_USE_DIST_SEP # endif +# define IF_DEBUG(X) X +#else +# define IF_DEBUG(X) #endif /* Does Sint fit in Sint32? @@ -84,29 +88,40 @@ static Export term_to_binary_trap_export; static byte* enc_term(ErtsAtomCacheMap *, Eterm, byte*, Uint32, struct erl_off_heap_header** off_heap); -static int enc_term_int(Process *p,ErtsAtomCacheMap *acmp, Eterm obj, byte* ep, Uint32 dflags, +struct TTBEncodeContext_; +static int enc_term_int(struct TTBEncodeContext_*,ErtsAtomCacheMap *acmp, Eterm obj, byte* ep, Uint32 dflags, struct erl_off_heap_header** off_heap, Sint *reds, byte **res); static Uint is_external_string(Eterm obj, int* p_is_string); static byte* enc_atom(ErtsAtomCacheMap *, Eterm, byte*, Uint32); static byte* enc_pid(ErtsAtomCacheMap *, Eterm, byte*, Uint32); -static byte* dec_term(ErtsDistExternal *, Eterm**, byte*, ErlOffHeap*, Eterm*); +struct B2TContext_t; +static byte* dec_term(ErtsDistExternal *, Eterm**, byte*, ErlOffHeap*, Eterm*, struct B2TContext_t*); static byte* dec_atom(ErtsDistExternal *, byte*, Eterm*); static byte* dec_pid(ErtsDistExternal *, Eterm**, byte*, ErlOffHeap*, Eterm*); -static Sint decoded_size(byte *ep, byte* endp, int internal_tags); +static Sint decoded_size(byte *ep, byte* endp, int internal_tags, struct B2TContext_t*); static BIF_RETTYPE term_to_binary_trap_1(BIF_ALIST_1); static Eterm erts_term_to_binary_int(Process* p, Eterm Term, int level, Uint flags, Binary *context_b); static Uint encode_size_struct2(ErtsAtomCacheMap *, Eterm, unsigned); -static int encode_size_struct_int(Process *p, ErtsAtomCacheMap *acmp, Eterm obj, +struct TTBSizeContext_; +static int encode_size_struct_int(struct TTBSizeContext_*, ErtsAtomCacheMap *acmp, Eterm obj, unsigned dflags, Sint *reds, Uint *res); +static Export binary_to_term_trap_export; +static BIF_RETTYPE binary_to_term_trap_1(BIF_ALIST_1); +static Eterm binary_to_term_int(Process* p, Uint32 flags, Eterm bin, Binary* context_b); + void erts_init_external(void) { #if 1 /* In R16 */ erts_init_trap_export(&term_to_binary_trap_export, am_erlang, am_term_to_binary_trap, 1, &term_to_binary_trap_1); + + erts_init_trap_export(&binary_to_term_trap_export, + am_erlang, am_binary_to_term_trap, 1, + &binary_to_term_trap_1); #else sys_memset((void *) &term_to_binary_trap_export, 0, sizeof(Export)); term_to_binary_trap_export.address = &term_to_binary_trap_export.code[3]; @@ -514,7 +529,7 @@ Uint erts_encode_ext_size(Eterm term) Uint erts_encode_ext_size_2(Eterm term, unsigned dflags) { - return encode_size_struct2(NULL, term, TERM_TO_BINARY_DFLAGS|dflags) + return encode_size_struct2(NULL, term, dflags) + 1 /* VERSION_MAGIC */; } @@ -877,7 +892,7 @@ erts_decode_dist_ext_size(ErtsDistExternal *edep) goto fail; ep = edep->extp+1; } - res = decoded_size(ep, edep->ext_endp, 0); + res = decoded_size(ep, edep->ext_endp, 0, NULL); if (res >= 0) return res; fail: @@ -889,12 +904,12 @@ Sint erts_decode_ext_size(byte *ext, Uint size) { if (size == 0 || *ext != VERSION_MAGIC) return -1; - return decoded_size(ext+1, ext+size, 0); + return decoded_size(ext+1, ext+size, 0, NULL); } Sint erts_decode_ext_size_ets(byte *ext, Uint size) { - Sint sz = decoded_size(ext, ext+size, 1); + Sint sz = decoded_size(ext, ext+size, 1, NULL); ASSERT(sz >= 0); return sz; } @@ -927,7 +942,7 @@ erts_decode_dist_ext(Eterm** hpp, goto error; ep++; } - ep = dec_term(edep, hpp, ep, off_heap, &obj); + ep = dec_term(edep, hpp, ep, off_heap, &obj, NULL); if (!ep) goto error; @@ -948,7 +963,7 @@ Eterm erts_decode_ext(Eterm **hpp, ErlOffHeap *off_heap, byte **ext) byte *ep = *ext; if (*ep++ != VERSION_MAGIC) return THE_NON_VALUE; - ep = dec_term(NULL, hpp, ep, off_heap, &obj); + ep = dec_term(NULL, hpp, ep, off_heap, &obj, NULL); if (!ep) { #ifdef DEBUG bin_write(ERTS_PRINT_STDERR,NULL,*ext,500); @@ -962,7 +977,7 @@ Eterm erts_decode_ext(Eterm **hpp, ErlOffHeap *off_heap, byte **ext) Eterm erts_decode_ext_ets(Eterm **hpp, ErlOffHeap *off_heap, byte *ext) { Eterm obj; - ext = dec_term(NULL, hpp, ext, off_heap, &obj); + ext = dec_term(NULL, hpp, ext, off_heap, &obj, NULL); ASSERT(ext); return obj; } @@ -1043,18 +1058,25 @@ static BIF_RETTYPE term_to_binary_trap_1(BIF_ALIST_1) Binary *bin = ((ProcBin *) binary_val(bt))->val; Eterm res = erts_term_to_binary_int(BIF_P, Term, 0, 0,bin); if (is_tuple(res)) { + ASSERT(BIF_P->flags & F_DISABLE_GC); BIF_TRAP1(&term_to_binary_trap_export,BIF_P,res); } else { - BIF_RET(res); + if (erts_set_gc_state(BIF_P, 1) + || MSO(BIF_P).overhead > BIN_VHEAP_SZ(BIF_P)) + ERTS_BIF_YIELD_RETURN(BIF_P, res); + else + BIF_RET(res); } } - + BIF_RETTYPE term_to_binary_1(BIF_ALIST_1) { Eterm res = erts_term_to_binary_int(BIF_P, BIF_ARG_1, 0, TERM_TO_BINARY_DFLAGS, NULL); if (is_tuple(res)) { + erts_set_gc_state(BIF_P, 0); BIF_TRAP1(&term_to_binary_trap_export,BIF_P,res); } else { + ASSERT(!(BIF_P->flags & F_DISABLE_GC)); BIF_RET(res); } } @@ -1067,7 +1089,6 @@ BIF_RETTYPE term_to_binary_2(BIF_ALIST_2) int level = 0; Uint flags = TERM_TO_BINARY_DFLAGS; Eterm res; - Binary *bin = NULL; while (is_list(Flags)) { Eterm arg = CAR(list_val(Flags)); @@ -1078,10 +1099,10 @@ BIF_RETTYPE term_to_binary_2(BIF_ALIST_2) if (tp[1] == am_minor_version && is_small(tp[2])) { switch (signed_val(tp[2])) { case 0: - flags = TERM_TO_BINARY_DFLAGS; + flags = TERM_TO_BINARY_DFLAGS & ~DFLAG_NEW_FLOATS; break; case 1: - flags = TERM_TO_BINARY_DFLAGS|DFLAG_NEW_FLOATS; + flags = TERM_TO_BINARY_DFLAGS; break; default: goto error; @@ -1104,14 +1125,75 @@ BIF_RETTYPE term_to_binary_2(BIF_ALIST_2) goto error; } - res = erts_term_to_binary_int(p, Term, level, flags, bin); + res = erts_term_to_binary_int(p, Term, level, flags, NULL); if (is_tuple(res)) { + erts_set_gc_state(p, 0); BIF_TRAP1(&term_to_binary_trap_export,BIF_P,res); } else { + ASSERT(!(BIF_P->flags & F_DISABLE_GC)); BIF_RET(res); } } + +enum B2TState { /* order is somewhat significant */ + B2TPrepare, + B2TUncompressChunk, + B2TSizeInit, + B2TSize, + B2TDecodeInit, + B2TDecode, + B2TDecodeList, + B2TDecodeTuple, + B2TDecodeString, + B2TDecodeBinary, + + B2TDone, + B2TDecodeFail, + B2TBadArg +}; + +typedef struct { + int heap_size; + int terms; + byte* ep; + int atom_extra_skip; +} B2TSizeContext; + +typedef struct { + byte* ep; + Eterm res; + Eterm* next; + Eterm* hp_start; + Eterm* hp; + Eterm* hp_end; + int remaining_n; + char* remaining_bytes; + Eterm* maps_head; +} B2TDecodeContext; + +typedef struct { + z_stream stream; + byte* dbytes; + Uint dleft; +} B2TUncompressContext; + +typedef struct B2TContext_t { + Sint heap_size; + byte* aligned_alloc; + ErtsBinary2TermState b2ts; + Uint32 flags; + SWord reds; + Eterm trap_bin; + enum B2TState state; + union { + B2TSizeContext sc; + B2TDecodeContext dc; + B2TUncompressContext uc; + } u; +} B2TContext; + + static uLongf binary2term_uncomp_size(byte* data, Sint size) { z_stream stream; @@ -1141,48 +1223,62 @@ static uLongf binary2term_uncomp_size(byte* data, Sint size) return err == Z_STREAM_END ? uncomp_size : 0; } -static ERTS_INLINE Sint -binary2term_prepare(ErtsBinary2TermState *state, byte *data, Sint data_size) +static ERTS_INLINE int +binary2term_prepare(ErtsBinary2TermState *state, byte *data, Sint data_size, + B2TContext* ctx) { - Sint res; byte *bytes = data; Sint size = data_size; state->exttmp = 0; if (size < 1 || *bytes != VERSION_MAGIC) { - error: - if (state->exttmp) - erts_free(ERTS_ALC_T_TMP, state->extp); - state->extp = NULL; - state->exttmp = 0; return -1; } bytes++; size--; if (size < 5 || *bytes != COMPRESSED) { state->extp = bytes; + if (ctx) + ctx->state = B2TSizeInit; } else { uLongf dest_len = (Uint32) get_int32(bytes+1); bytes += 5; size -= 5; if (dest_len > 32*1024*1024 - || (state->extp = erts_alloc_fnf(ERTS_ALC_T_TMP, dest_len)) == NULL) { + || (state->extp = erts_alloc_fnf(ERTS_ALC_T_EXT_TERM_DATA, dest_len)) == NULL) { + /* + * Try avoid out-of-memory crash due to corrupted 'dest_len' + * by checking the actual length of the uncompressed data. + * The only way to do that is to uncompress it. Sad but true. + */ if (dest_len != binary2term_uncomp_size(bytes, size)) { - goto error; + return -1; } - state->extp = erts_alloc(ERTS_ALC_T_TMP, dest_len); + state->extp = erts_alloc(ERTS_ALC_T_EXT_TERM_DATA, dest_len); + ctx->reds -= dest_len; } state->exttmp = 1; - if (erl_zlib_uncompress(state->extp, &dest_len, bytes, size) != Z_OK) - goto error; + if (ctx) { + if (erl_zlib_inflate_start(&ctx->u.uc.stream, bytes, size) != Z_OK) + return -1; + + ctx->u.uc.dbytes = state->extp; + ctx->u.uc.dleft = dest_len; + ctx->state = B2TUncompressChunk; + } + else { + uLongf dlen = dest_len; + if (erl_zlib_uncompress(state->extp, &dlen, bytes, size) != Z_OK + || dlen != dest_len) { + return -1; + } + } size = (Sint) dest_len; } - res = decoded_size(state->extp, state->extp + size, 0); - if (res < 0) - goto error; - return res; + state->extsize = size; + return 0; } static ERTS_INLINE void @@ -1190,7 +1286,7 @@ binary2term_abort(ErtsBinary2TermState *state) { if (state->exttmp) { state->exttmp = 0; - erts_free(ERTS_ALC_T_TMP, state->extp); + erts_free(ERTS_ALC_T_EXT_TERM_DATA, state->extp); } } @@ -1198,11 +1294,11 @@ static ERTS_INLINE Eterm binary2term_create(ErtsDistExternal *edep, ErtsBinary2TermState *state, Eterm **hpp, ErlOffHeap *ohp) { Eterm res; - if (!dec_term(edep, hpp, state->extp, ohp, &res)) + if (!dec_term(edep, hpp, state->extp, ohp, &res, NULL)) res = THE_NON_VALUE; if (state->exttmp) { state->exttmp = 0; - erts_free(ERTS_ALC_T_TMP, state->extp); + erts_free(ERTS_ALC_T_EXT_TERM_DATA, state->extp); } return res; } @@ -1210,7 +1306,18 @@ binary2term_create(ErtsDistExternal *edep, ErtsBinary2TermState *state, Eterm ** Sint erts_binary2term_prepare(ErtsBinary2TermState *state, byte *data, Sint data_size) { - return binary2term_prepare(state, data, data_size); + Sint res; + + if (binary2term_prepare(state, data, data_size, NULL) < 0 || + (res=decoded_size(state->extp, state->extp + state->extsize, 0, NULL)) < 0) { + + if (state->exttmp) + erts_free(ERTS_ALC_T_EXT_TERM_DATA, state->extp); + state->extp = NULL; + state->exttmp = 0; + return -1; + } + return res; } void @@ -1225,68 +1332,234 @@ erts_binary2term_create(ErtsBinary2TermState *state, Eterm **hpp, ErlOffHeap *oh return binary2term_create(NULL,state, hpp, ohp); } -BIF_RETTYPE binary_to_term_1(BIF_ALIST_1) +static void b2t_destroy_context(B2TContext* context) { - Sint heap_size; - Eterm res; + erts_free_aligned_binary_bytes_extra(context->aligned_alloc, + ERTS_ALC_T_EXT_TERM_DATA); + context->aligned_alloc = NULL; + binary2term_abort(&context->b2ts); + if (context->state == B2TUncompressChunk) { + erl_zlib_inflate_finish(&context->u.uc.stream); + } +} + +static void b2t_context_destructor(Binary *context_bin) +{ + B2TContext* ctx = (B2TContext*) ERTS_MAGIC_BIN_DATA(context_bin); + ASSERT(ERTS_MAGIC_BIN_DESTRUCTOR(context_bin) == b2t_context_destructor); + + b2t_destroy_context(ctx); +} + +static BIF_RETTYPE binary_to_term_trap_1(BIF_ALIST_1) +{ + Binary *context_bin = ((ProcBin *) binary_val(BIF_ARG_1))->val; + ASSERT(ERTS_MAGIC_BIN_DESTRUCTOR(context_bin) == b2t_context_destructor); + + return binary_to_term_int(BIF_P, 0, THE_NON_VALUE, context_bin); +} + + +#define B2T_BYTES_PER_REDUCTION 128 +#define B2T_MEMCPY_FACTOR 8 + +/* Define for testing */ +/*#define EXTREME_B2T_TRAPPING 1*/ + +#ifdef EXTREME_B2T_TRAPPING +static unsigned b2t_rand(void) +{ + static unsigned prev = 17; + prev = (prev * 214013 + 2531011); + return prev; +} +#endif + + +static B2TContext* b2t_export_context(Process* p, B2TContext* src) +{ + Binary* context_b = erts_create_magic_binary(sizeof(B2TContext), + b2t_context_destructor); + B2TContext* ctx = ERTS_MAGIC_BIN_DATA(context_b); Eterm* hp; - Eterm* endp; - Sint size; - byte* bytes; - byte* temp_alloc = NULL; - ErtsBinary2TermState b2ts; + sys_memcpy(ctx, src, sizeof(B2TContext)); + if (ctx->state >= B2TDecode && ctx->u.dc.next == &src->u.dc.res) { + ctx->u.dc.next = &ctx->u.dc.res; + } + hp = HAlloc(p, PROC_BIN_SIZE); + ctx->trap_bin = erts_mk_magic_binary_term(&hp, &MSO(p), context_b); + return ctx; +} - if ((bytes = erts_get_aligned_binary_bytes(BIF_ARG_1, &temp_alloc)) == NULL) { - error: - erts_free_aligned_binary_bytes(temp_alloc); - BIF_ERROR(BIF_P, BADARG); +static Eterm binary_to_term_int(Process* p, Uint32 flags, Eterm bin, Binary* context_b) +{ +#ifdef EXTREME_B2T_TRAPPING + SWord initial_reds = 1 + b2t_rand() % 4; +#else + SWord initial_reds = (Uint)(ERTS_BIF_REDS_LEFT(p) * B2T_BYTES_PER_REDUCTION); +#endif + B2TContext c_buff; + B2TContext *ctx; + int is_first_call; + + if (context_b == NULL) { + /* Setup enough to get started */ + is_first_call = 1; + ctx = &c_buff; + ctx->state = B2TPrepare; + ctx->aligned_alloc = NULL; + ctx->flags = flags; + IF_DEBUG(ctx->trap_bin = THE_NON_VALUE;) + } else { + is_first_call = 0; + ctx = ERTS_MAGIC_BIN_DATA(context_b); + ASSERT(ctx->state != B2TPrepare); } - size = binary_size(BIF_ARG_1); + ctx->reds = initial_reds; + + do { + switch (ctx->state) { + case B2TPrepare: { + byte* bytes; + Uint bin_size; + bytes = erts_get_aligned_binary_bytes_extra(bin, + &ctx->aligned_alloc, + ERTS_ALC_T_EXT_TERM_DATA, + 0); + if (bytes == NULL) { + ctx->b2ts.exttmp = 0; + ctx->state = B2TBadArg; + break; + } + bin_size = binary_size(bin); + if (ctx->aligned_alloc) { + ctx->reds -= bin_size / 8; + } + if (binary2term_prepare(&ctx->b2ts, bytes, bin_size, ctx) < 0) { + ctx->state = B2TBadArg; + } + break; + } + case B2TUncompressChunk: { + uLongf chunk = ctx->reds; + int zret; + + if (chunk > ctx->u.uc.dleft) + chunk = ctx->u.uc.dleft; + zret = erl_zlib_inflate_chunk(&ctx->u.uc.stream, + ctx->u.uc.dbytes, &chunk); + ctx->u.uc.dbytes += chunk; + ctx->u.uc.dleft -= chunk; + if (zret == Z_OK && ctx->u.uc.dleft > 0) { + ctx->reds = 0; + } + else if (erl_zlib_inflate_finish(&ctx->u.uc.stream) == Z_OK + && zret == Z_STREAM_END + && ctx->u.uc.dleft == 0) { + ctx->reds -= chunk; + ctx->state = B2TSizeInit; + } + else { + ctx->state = B2TBadArg; + } + break; + } + case B2TSizeInit: + ctx->u.sc.ep = NULL; + ctx->state = B2TSize; + /*fall through*/ + case B2TSize: + ctx->heap_size = decoded_size(ctx->b2ts.extp, + ctx->b2ts.extp + ctx->b2ts.extsize, + 0, ctx); + break; + + case B2TDecodeInit: + if (ctx == &c_buff && ctx->b2ts.extsize > ctx->reds) { + /* dec_term will maybe trap, allocate space for magic bin + before result term to make it easy to trim with HRelease. + */ + ctx = b2t_export_context(p, &c_buff); + } + ctx->u.dc.ep = ctx->b2ts.extp; + ctx->u.dc.res = (Eterm) (UWord) NULL; + ctx->u.dc.next = &ctx->u.dc.res; + ctx->u.dc.hp_start = HAlloc(p, ctx->heap_size); + ctx->u.dc.hp = ctx->u.dc.hp_start; + ctx->u.dc.hp_end = ctx->u.dc.hp_start + ctx->heap_size; + ctx->u.dc.maps_head = NULL; + ctx->state = B2TDecode; + /*fall through*/ + case B2TDecode: + case B2TDecodeList: + case B2TDecodeTuple: + case B2TDecodeString: + case B2TDecodeBinary: { + ErtsDistExternal fakedep; + fakedep.flags = ctx->flags; + dec_term(&fakedep, NULL, NULL, &MSO(p), NULL, ctx); + break; + } + case B2TDecodeFail: + HRelease(p, ctx->u.dc.hp_end, ctx->u.dc.hp_start); + /*fall through*/ + case B2TBadArg: + b2t_destroy_context(ctx); + if (!is_first_call) { + erts_set_gc_state(p, 1); + } + BUMP_REDS(p, (initial_reds - ctx->reds) / B2T_BYTES_PER_REDUCTION); + BIF_ERROR(p, BADARG & ~EXF_SAVETRACE); - heap_size = binary2term_prepare(&b2ts, bytes, size); - if (heap_size < 0) - goto error; + case B2TDone: + b2t_destroy_context(ctx); - hp = HAlloc(BIF_P, heap_size); - endp = hp + heap_size; + if (ctx->u.dc.hp > ctx->u.dc.hp_end) { + erl_exit(1, ":%s, line %d: heap overrun by %d words(s)\n", + __FILE__, __LINE__, ctx->u.dc.hp - ctx->u.dc.hp_end); + } + HRelease(p, ctx->u.dc.hp_end, ctx->u.dc.hp); - res = binary2term_create(NULL, &b2ts, &hp, &MSO(BIF_P)); + if (!is_first_call) { + erts_set_gc_state(p, 1); + } + BUMP_REDS(p, (initial_reds - ctx->reds) / B2T_BYTES_PER_REDUCTION); + return ctx->u.dc.res; - erts_free_aligned_binary_bytes(temp_alloc); + default: + ASSERT(!"Unknown state in binary_to_term"); + } + }while (ctx->reds > 0 || ctx->state >= B2TDone); - if (hp > endp) { - erl_exit(1, ":%s, line %d: heap overrun by %d words(s)\n", - __FILE__, __LINE__, hp-endp); + if (ctx == &c_buff) { + ASSERT(ctx->trap_bin == THE_NON_VALUE); + ctx = b2t_export_context(p, &c_buff); } + ASSERT(ctx->trap_bin != THE_NON_VALUE); - HRelease(BIF_P, endp, hp); - - if (res == THE_NON_VALUE) - goto error; + if (is_first_call) { + erts_set_gc_state(p, 0); + } + BUMP_ALL_REDS(p); + BIF_TRAP1(&binary_to_term_trap_export, p, ctx->trap_bin); +} - return res; +BIF_RETTYPE erts_internal_binary_to_term_1(BIF_ALIST_1) +{ + return binary_to_term_int(BIF_P, 0, BIF_ARG_1, NULL); } -BIF_RETTYPE binary_to_term_2(BIF_ALIST_2) +BIF_RETTYPE erts_internal_binary_to_term_2(BIF_ALIST_2) { - Sint heap_size; - Eterm res; Eterm opts; Eterm opt; - Eterm* hp; - Eterm* endp; - Sint size; - byte* bytes; - byte* temp_alloc = NULL; - ErtsBinary2TermState b2ts; - ErtsDistExternal fakedep; + Uint32 flags = 0; - fakedep.flags = 0; opts = BIF_ARG_2; while (is_list(opts)) { opt = CAR(list_val(opts)); if (opt == am_safe) { - fakedep.flags |= ERTS_DIST_EXT_BTT_SAFE; + flags |= ERTS_DIST_EXT_BTT_SAFE; } else { goto error; @@ -1297,35 +1570,10 @@ BIF_RETTYPE binary_to_term_2(BIF_ALIST_2) if (is_not_nil(opts)) goto error; - if ((bytes = erts_get_aligned_binary_bytes(BIF_ARG_1, &temp_alloc)) == NULL) { - error: - erts_free_aligned_binary_bytes(temp_alloc); - BIF_ERROR(BIF_P, BADARG); - } - size = binary_size(BIF_ARG_1); - - heap_size = binary2term_prepare(&b2ts, bytes, size); - if (heap_size < 0) - goto error; - - hp = HAlloc(BIF_P, heap_size); - endp = hp + heap_size; - - res = binary2term_create(&fakedep, &b2ts, &hp, &MSO(BIF_P)); + return binary_to_term_int(BIF_P, flags, BIF_ARG_1, NULL); - erts_free_aligned_binary_bytes(temp_alloc); - - if (hp > endp) { - erl_exit(1, ":%s, line %d: heap overrun by %d words(s)\n", - __FILE__, __LINE__, hp-endp); - } - - HRelease(BIF_P, endp, hp); - - if (res == THE_NON_VALUE) - goto error; - - return res; +error: + BIF_ERROR(BIF_P, BADARG); } Eterm @@ -1357,9 +1605,9 @@ external_size_2(BIF_ALIST_2) if (tp[1] == am_minor_version && is_small(tp[2])) { switch (signed_val(tp[2])) { case 0: + flags &= ~DFLAG_NEW_FLOATS; break; case 1: - flags |= DFLAG_NEW_FLOATS; break; default: goto error; @@ -1473,25 +1721,29 @@ erts_term_to_binary(Process* p, Eterm Term, int level, Uint flags) { /* #define EXTREME_TTB_TRAPPING 1 */ #ifndef EXTREME_TTB_TRAPPING -#define TERM_TO_BINARY_LOOP_FACTOR 500 -#define TERM_TO_BINARY_SIZE_FACTOR 500000 -#define TERM_TO_BINARY_COMPRESS_CHUNK 500000 +#define TERM_TO_BINARY_LOOP_FACTOR 32 +#define TERM_TO_BINARY_COMPRESS_CHUNK (1 << 18) #else #define TERM_TO_BINARY_LOOP_FACTOR 1 -#define TERM_TO_BINARY_SIZE_FACTOR 10 #define TERM_TO_BINARY_COMPRESS_CHUNK 10 #endif typedef enum { TTBSize, TTBEncode, TTBCompress } TTBState; -typedef struct { +typedef struct TTBSizeContext_ { Uint flags; int level; + Uint result; + Eterm obj; + ErtsEStack estack; } TTBSizeContext; -typedef struct { +typedef struct TTBEncodeContext_ { Uint flags; int level; + byte* ep; + Eterm obj; + ErtsWStack wstack; Binary *result_bin; } TTBEncodeContext; @@ -1514,15 +1766,17 @@ typedef struct { } s; } TTBContext; -static void context_destructor(Binary *context_bin) +static void ttb_context_destructor(Binary *context_bin) { TTBContext *context = ERTS_MAGIC_BIN_DATA(context_bin); if (context->alive) { context->alive = 0; switch (context->state) { case TTBSize: + DESTROY_SAVED_ESTACK(&context->s.sc.estack); break; case TTBEncode: + DESTROY_SAVED_WSTACK(&context->s.ec.wstack); if (context->s.ec.result_bin != NULL) { /* Set to NULL if ever made alive! */ ASSERT(erts_refc_read(&(context->s.ec.result_bin->refc),0) == 0); erts_bin_free(context->s.ec.result_bin); @@ -1567,7 +1821,7 @@ static Eterm erts_term_to_binary_int(Process* p, Eterm Term, int level, Uint fla do { \ if (context_b == NULL) { \ context_b = erts_create_magic_binary(sizeof(TTBContext), \ - context_destructor); \ + ttb_context_destructor); \ context = ERTS_MAGIC_BIN_DATA(context_b); \ memcpy(context,&c_buff,sizeof(TTBContext)); \ } \ @@ -1587,6 +1841,7 @@ static Eterm erts_term_to_binary_int(Process* p, Eterm Term, int level, Uint fla /* Setup enough to get started */ context->state = TTBSize; context->alive = 1; + context->s.sc.estack.start = NULL; context->s.sc.flags = flags; context->s.sc.level = level; } else { @@ -1602,7 +1857,8 @@ static Eterm erts_term_to_binary_int(Process* p, Eterm Term, int level, Uint fla int level; Uint flags; /* Try for fast path */ - if (encode_size_struct_int(p, NULL, Term, context->s.sc.flags, &reds, &size) < 0) { + if (encode_size_struct_int(&context->s.sc, NULL, Term, + context->s.sc.flags, &reds, &size) < 0) { EXPORT_CONTEXT(); /* Same state */ RETURN_STATE(); @@ -1615,7 +1871,7 @@ static Eterm erts_term_to_binary_int(Process* p, Eterm Term, int level, Uint fla /* Finish in one go */ res = erts_term_to_binary_simple(p, Term, size, level, flags); - BUMP_REDS(p, size / TERM_TO_BINARY_SIZE_FACTOR); + BUMP_REDS(p, 1); return res; } @@ -1628,6 +1884,7 @@ static Eterm erts_term_to_binary_int(Process* p, Eterm Term, int level, Uint fla context->state = TTBEncode; context->s.ec.flags = flags; context->s.ec.level = level; + context->s.ec.wstack.wstart = NULL; context->s.ec.result_bin = result_bin; break; } @@ -1639,7 +1896,7 @@ static Eterm erts_term_to_binary_int(Process* p, Eterm Term, int level, Uint fla Binary *result_bin; flags = context->s.ec.flags; - if (enc_term_int(p,NULL,Term, bytes+1, flags, NULL, &reds, &endp) < 0) { + if (enc_term_int(&context->s.ec, NULL,Term, bytes+1, flags, NULL, &reds, &endp) < 0) { EXPORT_CONTEXT(); RETURN_STATE(); } @@ -2047,27 +2304,6 @@ dec_pid(ErtsDistExternal *edep, Eterm** hpp, byte* ep, ErlOffHeap* off_heap, Ete #define ENC_PATCH_FUN_SIZE ((Eterm) 2) #define ENC_LAST_ARRAY_ELEMENT ((Eterm) 3) -/* Free extra rootset (used when trapping) */ -static void cleanup_ttb_extra_root(ErlExtraRootSet *rs) -{ - if (rs->objv != NULL) { - erts_free(ERTS_ALC_T_EXTRA_ROOT, rs->objv); - } - erts_free(ERTS_ALC_T_EXTRA_ROOT, rs); -} - -/* Same as above, but we have an extra "stack" beyond GC reach, i.e. an array of two extra roots */ -static void cleanup_ttb_extra_root_2(ErlExtraRootSet *rs) -{ - if (rs->objv != NULL) { - erts_free(ERTS_ALC_T_EXTRA_ROOT, rs->objv); - } - if (rs[1].objv != NULL) { - erts_free(ERTS_ALC_T_EXTRA_ROOT, rs[1].objv); - } - - erts_free(ERTS_ALC_T_EXTRA_ROOT, rs); -} static byte* enc_term(ErtsAtomCacheMap *acmp, Eterm obj, byte* ep, Uint32 dflags, @@ -2079,39 +2315,43 @@ enc_term(ErtsAtomCacheMap *acmp, Eterm obj, byte* ep, Uint32 dflags, } static int -enc_term_int(Process *p,ErtsAtomCacheMap *acmp, Eterm obj, byte* ep, Uint32 dflags, +enc_term_int(TTBEncodeContext* ctx, ErtsAtomCacheMap *acmp, Eterm obj, byte* ep, Uint32 dflags, struct erl_off_heap_header** off_heap, Sint *reds, byte **res) { - DECLARE_ESTACK(s); - DECLARE_WSTACK(com); + DECLARE_WSTACK(s); Uint n; Uint i; Uint j; Uint* ptr; Eterm val; FloatDef f; - int count_reds = (p != NULL && reds != NULL); Sint r = 0; +#if HALFWORD_HEAP + UWord wobj; +#endif - if (count_reds) { - ESTACK_CHANGE_ALLOCATOR(s, ERTS_ALC_T_EXTRA_ROOT); - WSTACK_CHANGE_ALLOCATOR(com, ERTS_ALC_T_EXTRA_ROOT); + + if (ctx) { + WSTACK_CHANGE_ALLOCATOR(s, ERTS_ALC_T_SAVED_ESTACK); r = *reds; - } - if (p && p->extra_root) { /* restore saved stacks and byte pointer */ - ESTACK_RESTORE(s,p->extra_root[0].objv, p->extra_root[0].sz); - obj = ESTACK_POP(s); - WSTACK_RESTORE(com, p->extra_root[1].objv, p->extra_root[1].sz); - ep = (byte *) WSTACK_POP(com); + if (ctx->wstack.wstart) { /* restore saved stacks and byte pointer */ + WSTACK_RESTORE(s, &ctx->wstack); + ep = ctx->ep; + obj = ctx->obj; + } } goto L_jump_start; outer_loop: - while (!ESTACK_ISEMPTY(s)) { - obj = ESTACK_POP(s); - switch (val = WSTACK_POP(com)) { + while (!WSTACK_ISEMPTY(s)) { +#if HALFWORD_HEAP + obj = (Eterm) (wobj = WSTACK_POP(s)); +#else + obj = WSTACK_POP(s); +#endif + switch (val = WSTACK_POP(s)) { case ENC_TERM: break; case ENC_ONE_CONS: @@ -2122,55 +2362,52 @@ enc_term_int(Process *p,ErtsAtomCacheMap *acmp, Eterm obj, byte* ep, Uint32 dfla obj = CAR(cons); tl = CDR(cons); - WSTACK_PUSH(com, is_list(tl) ? ENC_ONE_CONS : ENC_TERM); - ESTACK_PUSH(s, tl); + WSTACK_PUSH(s, is_list(tl) ? ENC_ONE_CONS : ENC_TERM); + WSTACK_PUSH(s, tl); } break; case ENC_PATCH_FUN_SIZE: - /* obj will be discarded, it was NIL */ { - byte* size_p = (byte *) WSTACK_POP(com); +#if HALFWORD_HEAP + byte* size_p = (byte *) wobj; +#else + byte* size_p = (byte *) obj; +#endif put_int32(ep - size_p, size_p); } goto outer_loop; case ENC_LAST_ARRAY_ELEMENT: /* obj is the tuple */ { - Eterm* ptr = tuple_val(obj); - i = arityval(*ptr); - obj = ptr[i]; +#if HALFWORD_HEAP + Eterm* ptr = (Eterm *) wobj; +#else + Eterm* ptr = (Eterm *) obj; +#endif + obj = *ptr; } break; default: /* ENC_LAST_ARRAY_ELEMENT+1 and upwards */ { - Eterm* ptr = tuple_val(obj); - i = arityval(*ptr); - ESTACK_PUSH(s, obj); /* put back tuple and next element index */ - WSTACK_PUSH(com, val-1); - obj = ptr[i - (val - ENC_LAST_ARRAY_ELEMENT)]; /* the index is counting down */ +#if HALFWORD_HEAP + Eterm* ptr = (Eterm *) wobj; +#else + Eterm* ptr = (Eterm *) obj; +#endif + WSTACK_PUSH(s, val-1); + obj = *ptr++; + WSTACK_PUSH(s, (UWord)ptr); } break; } L_jump_start: - if (count_reds && --r == 0) { + if (ctx && --r == 0) { *reds = r; - ESTACK_PUSH(s,obj); /* push back current object, to be popped on restore */ - WSTACK_PUSH(com,((UWord) ep)); - if (p->extra_root == NULL) { - /* NB. Allocate an array of two "extra-roots", of which only the first element - is seen and handled by the GC. Index 1 holds the Wstack. */ - p->extra_root = erts_alloc(ERTS_ALC_T_EXTRA_ROOT, sizeof(ErlExtraRootSet)*2); - p->extra_root->objv = NULL; - p->extra_root->sz = 0; - p->extra_root->cleanup = cleanup_ttb_extra_root_2; - p->extra_root[1].objv = NULL; - p->extra_root[1].sz = 0; - p->extra_root[1].cleanup = NULL; /* Never used */ - } - ESTACK_SAVE(s, p->extra_root[0].objv, p->extra_root[0].sz); - WSTACK_SAVE(com, p->extra_root[1].objv, (p->extra_root[1].sz)); + ctx->obj = obj; + ctx->ep = ep; + WSTACK_SAVE(s, &ctx->wstack); return -1; } switch(tag_val_def(obj)) { @@ -2316,8 +2553,36 @@ enc_term_int(Process *p,ErtsAtomCacheMap *acmp, Eterm obj, byte* ep, Uint32 dfla ep += 4; } if (i > 0) { - WSTACK_PUSH(com, ENC_LAST_ARRAY_ELEMENT+i-1); - ESTACK_PUSH(s, obj); + WSTACK_PUSH(s, ENC_LAST_ARRAY_ELEMENT+i-1); + WSTACK_PUSH(s, (UWord)ptr); + } + break; + + case MAP_DEF: + { + map_t *mp = (map_t*)map_val(obj); + Uint size = map_get_size(mp); + + *ep++ = MAP_EXT; + put_int32(size, ep); ep += 4; + + if (size > 0) { + Eterm *kptr = map_get_keys(mp); + Eterm *vptr = map_get_values(mp); + + for (i = size-1; i >= 1; i--) { + WSTACK_PUSH(s, ENC_TERM); + WSTACK_PUSH(s, (UWord) vptr[i]); + WSTACK_PUSH(s, ENC_TERM); + WSTACK_PUSH(s, (UWord) kptr[i]); + } + + WSTACK_PUSH(s, ENC_TERM); + WSTACK_PUSH(s, (UWord) vptr[0]); + + obj = kptr[0]; + goto L_jump_start; + } } break; @@ -2461,9 +2726,8 @@ enc_term_int(Process *p,ErtsAtomCacheMap *acmp, Eterm obj, byte* ep, Uint32 dfla int ei; *ep++ = NEW_FUN_EXT; - WSTACK_PUSH(com, (UWord) ep); /* Position for patching in size */ - WSTACK_PUSH(com, ENC_PATCH_FUN_SIZE); - ESTACK_PUSH(s,NIL); /* Will be thrown away */ + WSTACK_PUSH(s, ENC_PATCH_FUN_SIZE); + WSTACK_PUSH(s, (UWord) ep); /* Position for patching in size */ ep += 4; *ep = funp->arity; ep += 1; @@ -2480,8 +2744,8 @@ enc_term_int(Process *p,ErtsAtomCacheMap *acmp, Eterm obj, byte* ep, Uint32 dfla fun_env: for (ei = funp->num_free-1; ei > 0; ei--) { - WSTACK_PUSH(com, ENC_TERM); - ESTACK_PUSH(s, (UWord) funp->env[ei]); + WSTACK_PUSH(s, ENC_TERM); + WSTACK_PUSH(s, (UWord) funp->env[ei]); } if (funp->num_free != 0) { obj = funp->env[0]; @@ -2524,13 +2788,9 @@ enc_term_int(Process *p,ErtsAtomCacheMap *acmp, Eterm obj, byte* ep, Uint32 dfla break; } } - DESTROY_ESTACK(s); - DESTROY_WSTACK(com); - if (p && p->extra_root) { - cleanup_ttb_extra_root_2(p->extra_root); - p->extra_root = NULL; - } - if (count_reds) { + DESTROY_WSTACK(s); + if (ctx) { + ASSERT(ctx->wstack.wstart == NULL); *reds = r; } *res = ep; @@ -2604,21 +2864,115 @@ undo_offheap_in_area(ErlOffHeap* off_heap, Eterm* start, Eterm* end) #endif /* DEBUG */ } + /* Decode term from external format into *objp. ** On failure return NULL and (R13B04) *hpp will be unchanged. */ static byte* -dec_term(ErtsDistExternal *edep, Eterm** hpp, byte* ep, ErlOffHeap* off_heap, Eterm* objp) +dec_term(ErtsDistExternal *edep, Eterm** hpp, byte* ep, ErlOffHeap* off_heap, + Eterm* objp, B2TContext* ctx) { - Eterm* hp_saved = *hpp; + Eterm* hp_saved; int n; ErtsAtomEncoding char_enc; - register Eterm* hp = *hpp; /* Please don't take the address of hp */ - Eterm* next = objp; + register Eterm* hp; /* Please don't take the address of hp */ + Eterm *maps_head; /* for validation of maps */ + Eterm* next; + SWord reds; + + if (ctx) { + hp_saved = ctx->u.dc.hp_start; + reds = ctx->reds; + next = ctx->u.dc.next; + ep = ctx->u.dc.ep; + hpp = &ctx->u.dc.hp; + maps_head = ctx->u.dc.maps_head; + + if (ctx->state != B2TDecode) { + int n_limit = reds; + + n = ctx->u.dc.remaining_n; + if (ctx->state == B2TDecodeBinary) { + n_limit *= B2T_MEMCPY_FACTOR; + ASSERT(n_limit >= reds); + reds -= n / B2T_MEMCPY_FACTOR; + } + else + reds -= n; - *next = (Eterm) (UWord) NULL; + if (n > n_limit) { + ctx->u.dc.remaining_n -= n_limit; + n = n_limit; + reds = 0; + } + else { + ctx->u.dc.remaining_n = 0; + } + + switch (ctx->state) { + case B2TDecodeList: + objp = next - 2; + while (n > 0) { + objp[0] = (Eterm) COMPRESS_POINTER(next); + objp[1] = make_list(next); + next = objp; + objp -= 2; + n--; + } + break; + + case B2TDecodeTuple: + objp = next - 1; + while (n-- > 0) { + objp[0] = (Eterm) COMPRESS_POINTER(next); + next = objp; + objp--; + } + break; + + case B2TDecodeString: + hp = *hpp; + hp[-1] = make_list(hp); /* overwrite the premature NIL */ + while (n-- > 0) { + hp[0] = make_small(*ep++); + hp[1] = make_list(hp+2); + hp += 2; + } + hp[-1] = NIL; + *hpp = hp; + break; + + case B2TDecodeBinary: + sys_memcpy(ctx->u.dc.remaining_bytes, ep, n); + ctx->u.dc.remaining_bytes += n; + ep += n; + break; + + default: + ASSERT(!"Unknown state"); + } + if (!ctx->u.dc.remaining_n) { + ctx->state = B2TDecode; + } + if (reds <= 0) { + ctx->u.dc.next = next; + ctx->u.dc.ep = ep; + ctx->reds = 0; + return NULL; + } + } + } + else { + hp_saved = *hpp; + reds = ERTS_SWORD_MAX; + next = objp; + *next = (Eterm) (UWord) NULL; + maps_head = NULL; + } + hp = *hpp; while (next != NULL) { + objp = next; next = (Eterm *) EXPAND_POINTER(*objp); @@ -2738,7 +3092,16 @@ dec_term_atom_common: *objp = make_tuple(hp); *hp++ = make_arityval(n); hp += n; - objp = hp - 1; + objp = hp - 1; + if (ctx) { + if (reds < n) { + ASSERT(reds > 0); + ctx->state = B2TDecodeTuple; + ctx->u.dc.remaining_n = n - reds; + n = reds; + } + reds -= n; + } while (n-- > 0) { objp[0] = (Eterm) COMPRESS_POINTER(next); next = objp; @@ -2756,17 +3119,27 @@ dec_term_atom_common: break; } *objp = make_list(hp); - hp += 2*n; + hp += 2 * n; objp = hp - 2; objp[0] = (Eterm) COMPRESS_POINTER((objp+1)); objp[1] = (Eterm) COMPRESS_POINTER(next); next = objp; objp -= 2; - while (--n > 0) { + n--; + if (ctx) { + if (reds < n) { + ctx->state = B2TDecodeList; + ctx->u.dc.remaining_n = n - reds; + n = reds; + } + reds -= n; + } + while (n > 0) { objp[0] = (Eterm) COMPRESS_POINTER(next); - objp[1] = make_list(objp + 2); + objp[1] = make_list(next); next = objp; objp -= 2; + n--; } break; case STRING_EXT: @@ -2777,6 +3150,14 @@ dec_term_atom_common: break; } *objp = make_list(hp); + if (ctx) { + if (reds < n) { + ctx->state = B2TDecodeString; + ctx->u.dc.remaining_n = n - reds; + n = reds; + } + reds -= n; + } while (n-- > 0) { hp[0] = make_small(*ep++); hp[1] = make_list(hp+2); @@ -2984,7 +3365,6 @@ dec_term_atom_common: dbin->flags = 0; dbin->orig_size = n; erts_refc_init(&dbin->refc, 1); - sys_memcpy(dbin->orig_bytes, ep, n); pb = (ProcBin *) hp; hp += PROC_BIN_SIZE; pb->thing_word = HEADER_PROC_BIN; @@ -2995,7 +3375,20 @@ dec_term_atom_common: pb->bytes = (byte*) dbin->orig_bytes; pb->flags = 0; *objp = make_binary(pb); - } + if (ctx) { + int n_limit = reds * B2T_MEMCPY_FACTOR; + if (n > n_limit) { + ctx->state = B2TDecodeBinary; + ctx->u.dc.remaining_n = n - n_limit; + ctx->u.dc.remaining_bytes = dbin->orig_bytes + n_limit; + n = n_limit; + reds = 0; + } + else + reds -= n / B2T_MEMCPY_FACTOR; + } + sys_memcpy(dbin->orig_bytes, ep, n); + } ep += n; break; } @@ -3018,13 +3411,14 @@ dec_term_atom_common: sys_memcpy(hb->data, ep, n); bin = make_binary(hb); hp += heap_bin_size(n); + ep += n; } else { Binary* dbin = erts_bin_nrml_alloc(n); ProcBin* pb; + dbin->flags = 0; dbin->orig_size = n; erts_refc_init(&dbin->refc, 1); - sys_memcpy(dbin->orig_bytes, ep, n); pb = (ProcBin *) hp; pb->thing_word = HEADER_PROC_BIN; pb->size = n; @@ -3035,8 +3429,23 @@ dec_term_atom_common: pb->flags = 0; bin = make_binary(pb); hp += PROC_BIN_SIZE; - } - ep += n; + if (ctx) { + int n_limit = reds * B2T_MEMCPY_FACTOR; + if (n > n_limit) { + ctx->state = B2TDecodeBinary; + ctx->u.dc.remaining_n = n - n_limit; + ctx->u.dc.remaining_bytes = dbin->orig_bytes + n_limit; + n = n_limit; + reds = 0; + } + else + reds -= n / B2T_MEMCPY_FACTOR; + } + sys_memcpy(dbin->orig_bytes, ep, n); + ep += n; + n = pb->size; + } + if (bitsize == 8 || n == 0) { *objp = bin; } else { @@ -3067,7 +3476,7 @@ dec_term_atom_common: goto error; } *hpp = hp; - ep = dec_term(edep, hpp, ep, off_heap, &temp); + ep = dec_term(edep, hpp, ep, off_heap, &temp, NULL); hp = *hpp; if (ep == NULL) { goto error; @@ -3094,6 +3503,50 @@ dec_term_atom_common: break; } break; + case MAP_EXT: + { + map_t *mp; + Uint32 size,n; + Eterm *kptr,*vptr; + Eterm keys; + + size = get_int32(ep); ep += 4; + + keys = make_tuple(hp); + *hp++ = make_arityval(size); + hp += size; + kptr = hp - 1; + + mp = (map_t*)hp; + hp += MAP_HEADER_SIZE; + hp += size; + vptr = hp - 1; + + /* kptr, last word for keys + * vptr, last word for values + */ + + /* + * Use thing_word to link through decoded maps. + * The list of maps is for later validation. + */ + + mp->thing_word = (Eterm) COMPRESS_POINTER(maps_head); + maps_head = (Eterm *) mp; + + mp->size = size; + mp->keys = keys; + *objp = make_map(mp); + + for (n = size; n; n--) { + *vptr = (Eterm) COMPRESS_POINTER(next); + *kptr = (Eterm) COMPRESS_POINTER(vptr); + next = kptr; + vptr--; + kptr--; + } + } + break; case NEW_FUN_EXT: { ErlFunThing* funp = (ErlFunThing *) hp; @@ -3127,7 +3580,7 @@ dec_term_atom_common: } *hpp = hp; /* Index */ - if ((ep = dec_term(edep, hpp, ep, off_heap, &temp)) == NULL) { + if ((ep = dec_term(edep, hpp, ep, off_heap, &temp, NULL)) == NULL) { goto error; } if (!is_small(temp)) { @@ -3136,7 +3589,7 @@ dec_term_atom_common: old_index = unsigned_val(temp); /* Uniq */ - if ((ep = dec_term(edep, hpp, ep, off_heap, &temp)) == NULL) { + if ((ep = dec_term(edep, hpp, ep, off_heap, &temp, NULL)) == NULL) { goto error; } if (!is_small(temp)) { @@ -3204,7 +3657,7 @@ dec_term_atom_common: } /* Index */ - if ((ep = dec_term(edep, hpp, ep, off_heap, &temp)) == NULL) { + if ((ep = dec_term(edep, hpp, ep, off_heap, &temp, NULL)) == NULL) { goto error; } if (!is_small(temp)) { @@ -3213,7 +3666,7 @@ dec_term_atom_common: old_index = unsigned_val(temp); /* Uniq */ - if ((ep = dec_term(edep, hpp, ep, off_heap, &temp)) == NULL) { + if ((ep = dec_term(edep, hpp, ep, off_heap, &temp, NULL)) == NULL) { goto error; } if (!is_small(temp)) { @@ -3303,21 +3756,62 @@ dec_term_atom_common: } default: - error: - /* UNDO: - * Must unlink all off-heap objects that may have been - * linked into the process. - */ - if (hp < *hpp) { /* Sometimes we used hp and sometimes *hpp */ - hp = *hpp; /* the largest must be the freshest */ - } - undo_offheap_in_area(off_heap, hp_saved, hp); - *hpp = hp_saved; - return NULL; + goto error; } + + if (--reds <= 0) { + if (ctx) { + if (next || ctx->state != B2TDecode) { + ctx->u.dc.ep = ep; + ctx->u.dc.next = next; + ctx->u.dc.hp = hp; + ctx->u.dc.maps_head = maps_head; + ctx->reds = 0; + return NULL; + } + } + else { + reds = ERTS_SWORD_MAX; + } + } } + + /* Iterate through all the maps and check for validity and sort keys + * - done here for when we know it is complete. + */ + + while (maps_head) { + next = (Eterm *)(EXPAND_POINTER(*maps_head)); + *maps_head = MAP_HEADER; + if (!erts_validate_and_sort_map((map_t*)maps_head)) + goto error; + maps_head = next; + } + + if (ctx) { + ctx->state = B2TDone; + ctx->reds = reds; + } + *hpp = hp; return ep; + +error: + /* UNDO: + * Must unlink all off-heap objects that may have been + * linked into the process. + */ + if (hp < *hpp) { /* Sometimes we used hp and sometimes *hpp */ + hp = *hpp; /* the largest must be the freshest */ + } + undo_offheap_in_area(off_heap, hp_saved, hp); + *hpp = hp_saved; + if (ctx) { + ctx->state = B2TDecodeFail; + ctx->reds = reds; + } + + return NULL; } /* returns the number of bytes needed to encode an object @@ -3331,26 +3825,24 @@ static Uint encode_size_struct2(ErtsAtomCacheMap *acmp, Eterm obj, unsigned dfla } static int -encode_size_struct_int(Process *p, ErtsAtomCacheMap *acmp, Eterm obj, +encode_size_struct_int(TTBSizeContext* ctx, ErtsAtomCacheMap *acmp, Eterm obj, unsigned dflags, Sint *reds, Uint *res) { DECLARE_ESTACK(s); Uint m, i, arity; Uint result = 0; - int count_reds = (p != NULL && reds != 0); Sint r = 0; - if (count_reds) { - ESTACK_CHANGE_ALLOCATOR(s, ERTS_ALC_T_EXTRA_ROOT); + if (ctx) { + ESTACK_CHANGE_ALLOCATOR(s, ERTS_ALC_T_SAVED_ESTACK); r = *reds; - } - if (p && p->extra_root) { /* restore saved stack */ - ESTACK_RESTORE(s,p->extra_root->objv, p->extra_root->sz + 1); - result = ESTACK_POP(s); /*Untagged, beyond p->extra_root->sz */ - obj = ESTACK_POP(s); - - } + if (ctx->estack.start) { /* restore saved stack */ + ESTACK_RESTORE(s, &ctx->estack); + result = ctx->result; + obj = ctx->obj; + } + } goto L_jump_start; @@ -3376,18 +3868,11 @@ encode_size_struct_int(Process *p, ErtsAtomCacheMap *acmp, Eterm obj, } L_jump_start: - if (count_reds && --r == 0) { + if (ctx && --r == 0) { *reds = r; - ESTACK_PUSH(s,obj); /* push back current object */ - ESTACK_PUSH(s,result); /* Untagged, will be out of GC reach */ - if (p->extra_root == NULL) { - p->extra_root = erts_alloc(ERTS_ALC_T_EXTRA_ROOT, sizeof(ErlExtraRootSet)); - p->extra_root->objv = NULL; - p->extra_root->sz = 0; - p->extra_root->cleanup = cleanup_ttb_extra_root; - } - ESTACK_SAVE(s, p->extra_root->objv, p->extra_root->sz); - --p->extra_root->sz; /* Hide result from GC */ + ctx->obj = obj; + ctx->result = result; + ESTACK_SAVE(s, &ctx->estack); return -1; } switch (tag_val_def(obj)) { @@ -3496,6 +3981,46 @@ encode_size_struct_int(Process *p, ErtsAtomCacheMap *acmp, Eterm obj, goto outer_loop; } break; + case MAP_DEF: + { + map_t *mp = (map_t*)map_val(obj); + Uint size = map_get_size(mp); + Uint i; + Eterm *ptr; + + result += 1 + 4; /* tag + 4 bytes size */ + + /* push values first */ + ptr = map_get_values(mp); + i = size; + while(i--) { + if (is_list(*ptr)) { + if ((m = is_string(*ptr)) && (m < MAX_STRING_LEN)) { + result += m + 2 + 1; + } else { + result += 5; + } + } + ESTACK_PUSH(s,*ptr); + ++ptr; + } + + ptr = map_get_keys(mp); + i = size; + while(i--) { + if (is_list(*ptr)) { + if ((m = is_string(*ptr)) && (m < MAX_STRING_LEN)) { + result += m + 2 + 1; + } else { + result += 5; + } + } + ESTACK_PUSH(s,*ptr); + ++ptr; + } + goto outer_loop; + } + break; case FLOAT_DEF: if (dflags & DFLAG_NEW_FLOATS) { result += 9; @@ -3590,11 +4115,8 @@ encode_size_struct_int(Process *p, ErtsAtomCacheMap *acmp, Eterm obj, } DESTROY_ESTACK(s); - if (p && p->extra_root) { - cleanup_ttb_extra_root(p->extra_root); - p->extra_root = NULL; - } - if (count_reds) { + if (ctx) { + ASSERT(ctx->estack.start == NULL); *reds = r; } *res = result; @@ -3602,18 +4124,37 @@ encode_size_struct_int(Process *p, ErtsAtomCacheMap *acmp, Eterm obj, } static Sint -decoded_size(byte *ep, byte* endp, int internal_tags) +decoded_size(byte *ep, byte* endp, int internal_tags, B2TContext* ctx) { - int heap_size = 0; + int heap_size; int terms; - int atom_extra_skip = 0; + int atom_extra_skip; Uint n; + SWord reds; + + if (ctx) { + reds = ctx->reds; + if (ctx->u.sc.ep) { + heap_size = ctx->u.sc.heap_size; + terms = ctx->u.sc.terms; + ep = ctx->u.sc.ep; + atom_extra_skip = ctx->u.sc.atom_extra_skip; + goto init_done; + } + } + else + reds = 0; /* not used but compiler warns anyway */ + + heap_size = 0; + terms = 1; + atom_extra_skip = 0; +init_done: #define SKIP(sz) \ do { \ if ((sz) <= endp-ep) { \ ep += (sz); \ - } else { return -1; }; \ + } else { goto error; }; \ } while (0) #define SKIP2(sz1, sz2) \ @@ -3621,31 +4162,32 @@ decoded_size(byte *ep, byte* endp, int internal_tags) Uint sz = (sz1) + (sz2); \ if (sz1 < sz && (sz) <= endp-ep) { \ ep += (sz); \ - } else { return -1; } \ + } else { goto error; } \ } while (0) #define CHKSIZE(sz) \ do { \ - if ((sz) > endp-ep) { return -1; } \ + if ((sz) > endp-ep) { goto error; } \ } while (0) #define ADDTERMS(n) \ do { \ int before = terms; \ terms += (n); \ - if (terms < before) return -1; \ + if (terms < before) goto error; \ } while (0) - - for (terms=1; terms > 0; terms--) { - int tag; - + ASSERT(terms > 0); + do { + int tag; CHKSIZE(1); tag = ep++[0]; switch (tag) { case INTEGER_EXT: SKIP(4); +#if !defined(ARCH_64) || HALFWORD_HEAP heap_size += BIG_UINT_HEAP_SIZE; +#endif break; case SMALL_INTEGER_EXT: SKIP(1); @@ -3660,7 +4202,7 @@ decoded_size(byte *ep, byte* endp, int internal_tags) CHKSIZE(4); n = get_int32(ep); if (n > BIG_ARITY_MAX*sizeof(ErtsDigit)) { - return -1; + goto error; } SKIP2(n,4+1); /* skip, size,sign,digits */ heap_size += 1+1+(n+sizeof(Eterm)-1)/sizeof(Eterm); /* XXX: 1 too much? */ @@ -3669,7 +4211,7 @@ decoded_size(byte *ep, byte* endp, int internal_tags) CHKSIZE(2); n = get_int16(ep); if (n > MAX_ATOM_CHARACTERS) { - return -1; + goto error; } SKIP(n+2+atom_extra_skip); atom_extra_skip = 0; @@ -3679,7 +4221,7 @@ decoded_size(byte *ep, byte* endp, int internal_tags) n = get_int16(ep); ep += 2; if (n > MAX_ATOM_SZ_LIMIT) { - return -1; + goto error; } SKIP(n+atom_extra_skip); atom_extra_skip = 0; @@ -3688,7 +4230,7 @@ decoded_size(byte *ep, byte* endp, int internal_tags) CHKSIZE(1); n = get_int8(ep); if (n > MAX_ATOM_CHARACTERS) { - return -1; + goto error; } SKIP(n+1+atom_extra_skip); atom_extra_skip = 0; @@ -3698,7 +4240,7 @@ decoded_size(byte *ep, byte* endp, int internal_tags) n = get_int8(ep); ep++; if (n > MAX_ATOM_SZ_LIMIT) { - return -1; + goto error; } SKIP(n+atom_extra_skip); atom_extra_skip = 0; @@ -3727,7 +4269,7 @@ decoded_size(byte *ep, byte* endp, int internal_tags) id_words = get_int16(ep); if (id_words > ERTS_MAX_REF_NUMBERS) - return -1; + goto error; ep += 2; atom_extra_skip = 1 + 4*id_words; @@ -3769,6 +4311,13 @@ decoded_size(byte *ep, byte* endp, int internal_tags) ADDTERMS(n); heap_size += n + 1; break; + case MAP_EXT: + CHKSIZE(4); + n = get_int32(ep); + ep += 4; + ADDTERMS(2*n); + heap_size += 3 + n + 1 + n; + break; case STRING_EXT: CHKSIZE(2); n = get_int16(ep); @@ -3829,7 +4378,7 @@ decoded_size(byte *ep, byte* endp, int internal_tags) num_free = get_int32(ep); ep += 4; if (num_free > MAX_ARG) { - return -1; + goto error; } terms += 4 + num_free; heap_size += ERL_FUN_SIZE + num_free; @@ -3846,25 +4395,111 @@ decoded_size(byte *ep, byte* endp, int internal_tags) case BINARY_INTERNAL_REF: if (!internal_tags) { - return -1; + goto error; } SKIP(sizeof(ProcBin)); heap_size += PROC_BIN_SIZE; break; case BIT_BINARY_INTERNAL_REF: if (!internal_tags) { - return -1; + goto error; } SKIP(2+sizeof(ProcBin)); heap_size += PROC_BIN_SIZE + ERL_SUB_BIN_SIZE; break; default: - return -1; + goto error; } - } + terms--; + + if (ctx && --reds <= 0 && terms > 0) { + ctx->u.sc.heap_size = heap_size; + ctx->u.sc.terms = terms; + ctx->u.sc.ep = ep; + ctx->u.sc.atom_extra_skip = atom_extra_skip; + ctx->reds = 0; + return 0; + } + }while (terms > 0); + /* 'terms' may be non-zero if it has wrapped around */ - return terms==0 ? heap_size : -1; + if (terms == 0) { + if (ctx) { + ctx->state = B2TDecodeInit; + ctx->reds = reds; + } + return heap_size; + } + +error: + if (ctx) { + ctx->state = B2TBadArg; + } + return -1; #undef SKIP #undef SKIP2 #undef CHKSIZE } + + +#ifdef HIPE +BIF_RETTYPE hipe_wrapper_term_to_binary_1(BIF_ALIST_1); +BIF_RETTYPE hipe_wrapper_term_to_binary_2(BIF_ALIST_2); +BIF_RETTYPE hipe_wrapper_erts_internal_binary_to_term_1(BIF_ALIST_1); +BIF_RETTYPE hipe_wrapper_erts_internal_binary_to_term_2(BIF_ALIST_2); + +/* Hipe wrappers used by native code for BIFs that disable GC while trapping. + * + * Problem: + * When native code calls a BIF that traps, hipe_mode_switch will push a + * "trap frame" on the Erlang stack in order to find its way back from beam_emu + * back to native caller when finally done. If GC is disabled and stack/heap + * is full there is no place to push the "trap frame". + * + * Solution: + * We reserve space on stack for the "trap frame" here before the BIF is called. + * If the BIF does not trap, the space is reclaimed here before returning. + * If the BIF traps, hipe_push_beam_trap_frame() will detect that a "trap frame" + * already is reserved and use it. + */ +BIF_RETTYPE hipe_wrapper_term_to_binary_1(BIF_ALIST_1) +{ + Eterm res; + hipe_reserve_beam_trap_frame(BIF_P, BIF__ARGS, 1); + res = term_to_binary_1(BIF_P, BIF__ARGS); + if (is_value(res) || BIF_P->freason != TRAP) { + hipe_unreserve_beam_trap_frame(BIF_P); + } + return res; +} +BIF_RETTYPE hipe_wrapper_term_to_binary_2(BIF_ALIST_2) +{ + Eterm res; + hipe_reserve_beam_trap_frame(BIF_P, BIF__ARGS, 2); + res = term_to_binary_2(BIF_P, BIF__ARGS); + if (is_value(res) || BIF_P->freason != TRAP) { + hipe_unreserve_beam_trap_frame(BIF_P); + } + return res; +} +BIF_RETTYPE hipe_wrapper_erts_internal_binary_to_term_1(BIF_ALIST_1) +{ + Eterm res; + hipe_reserve_beam_trap_frame(BIF_P, BIF__ARGS, 1); + res = erts_internal_binary_to_term_1(BIF_P, BIF__ARGS); + if (is_value(res) || BIF_P->freason != TRAP) { + hipe_unreserve_beam_trap_frame(BIF_P); + } + return res; +} +BIF_RETTYPE hipe_wrapper_erts_internal_binary_to_term_2(BIF_ALIST_2) +{ + Eterm res; + hipe_reserve_beam_trap_frame(BIF_P, BIF__ARGS, 2); + res = erts_internal_binary_to_term_2(BIF_P, BIF__ARGS); + if (is_value(res) || BIF_P->freason != TRAP) { + hipe_unreserve_beam_trap_frame(BIF_P); + } + return res; +} +#endif /*HIPE*/ diff --git a/erts/emulator/beam/external.h b/erts/emulator/beam/external.h index ff29e84972..bf00958eb1 100644 --- a/erts/emulator/beam/external.h +++ b/erts/emulator/beam/external.h @@ -50,6 +50,7 @@ #define LARGE_BIG_EXT 'o' #define NEW_FUN_EXT 'p' #define EXPORT_EXT 'q' +#define MAP_EXT 't' #define FUN_EXT 'u' #define ATOM_UTF8_EXT 'v' #define SMALL_ATOM_UTF8_EXT 'w' @@ -146,6 +147,7 @@ typedef struct { typedef struct { byte *extp; int exttmp; + Uint extsize; } ErtsBinary2TermState; /* -------------------------------------------------------------------------- */ diff --git a/erts/emulator/beam/global.h b/erts/emulator/beam/global.h index 063d16c0c7..8fcb95d0e2 100755 --- a/erts/emulator/beam/global.h +++ b/erts/emulator/beam/global.h @@ -1,7 +1,7 @@ /* * %CopyrightBegin% * - * Copyright Ericsson AB 1996-2013. All Rights Reserved. + * Copyright Ericsson AB 1996-2014. All Rights Reserved. * * The contents of this file are subject to the Erlang Public License, * Version 1.1, (the "License"); you may not use this file except in @@ -186,11 +186,6 @@ extern void erts_ddll_remove_monitor(Process *p, extern Eterm erts_ddll_monitor_driver(Process *p, Eterm description, ErtsProcLocks plocks); -/* - * Max no. of drivers (linked in and dynamically loaded). Each table - * entry uses 4 bytes. - */ -#define DRIVER_TAB_SIZE 32 /* ** Just like the driver binary but with initial flags @@ -375,231 +370,233 @@ extern int stackdump_on_exit; * DESTROY_ESTACK(Stack) */ +typedef struct { + Eterm* start; + Eterm* sp; + Eterm* end; + ErtsAlcType_t alloc_type; +}ErtsEStack; -void erl_grow_stack(ErtsAlcType_t a_type, Eterm** start, Eterm** sp, Eterm** end); -#define ESTK_CONCAT(a,b) a##b -#define ESTK_SUBSCRIPT(s,i) *((Eterm *)((byte *)ESTK_CONCAT(s,_start) + (i))) #define DEF_ESTACK_SIZE (16) -#define DECLARE_ESTACK(s) \ - Eterm ESTK_CONCAT(s,_default_stack)[DEF_ESTACK_SIZE]; \ - Eterm* ESTK_CONCAT(s,_start) = ESTK_CONCAT(s,_default_stack); \ - Eterm* ESTK_CONCAT(s,_sp) = ESTK_CONCAT(s,_start); \ - Eterm* ESTK_CONCAT(s,_end) = ESTK_CONCAT(s,_start) + DEF_ESTACK_SIZE;\ - ErtsAlcType_t ESTK_CONCAT(s,_alloc_type) = ERTS_ALC_T_ESTACK +void erl_grow_estack(ErtsEStack*, Eterm* def_stack); +#define ESTK_CONCAT(a,b) a##b +#define ESTK_DEF_STACK(s) ESTK_CONCAT(s,_default_estack) + +#define DECLARE_ESTACK(s) \ + Eterm ESTK_DEF_STACK(s)[DEF_ESTACK_SIZE]; \ + ErtsEStack s = { \ + ESTK_DEF_STACK(s), /* start */ \ + ESTK_DEF_STACK(s), /* sp */ \ + ESTK_DEF_STACK(s) + DEF_ESTACK_SIZE, /* end */ \ + ERTS_ALC_T_ESTACK /* alloc_type */ \ + } #define ESTACK_CHANGE_ALLOCATOR(s,t) \ do { \ - if (ESTK_CONCAT(s,_start) != ESTK_CONCAT(s,_default_stack)) { \ + if (s.start != ESTK_DEF_STACK(s)) { \ erl_exit(1, "Internal error - trying to change allocator " \ "type of active estack\n"); \ } \ - ESTK_CONCAT(s,_alloc_type) = (t); \ + s.alloc_type = (t); \ } while (0) +#define DESTROY_ESTACK(s) \ +do { \ + if (s.start != ESTK_DEF_STACK(s)) { \ + erts_free(s.alloc_type, s.start); \ + } \ +} while(0) + + /* - * Do not free the stack after this, it may have pointers into what - * was saved in 'v'. 'v' and 'vsize' are changed by this macro. If - * 'v' points to anything, it should have been allocated by a previous - * call to this macro. Be careful to set a correct allocator prior to - * saving. - * 'v' can be any lvalue pointer, it will point to an array of UWord - * after calling this macro. + * Do not free the stack after this, it may have pointers into what + * was saved in 'dst'. */ -#define ESTACK_SAVE(s,v,vsize) /* v and vsize are "name parameters" */ \ -do { \ - Uint _esz = ESTACK_COUNT(s); \ - if (ESTK_CONCAT(s,_start) == ESTK_CONCAT(s,_default_stack)) { \ - if ((v) == NULL) { \ - (v) = erts_alloc(ESTK_CONCAT(s,_alloc_type), \ - DEF_ESTACK_SIZE * sizeof(Eterm)); \ - } \ - memcpy((v),ESTK_CONCAT(s,_start),_esz*sizeof(Eterm)); \ - } else { \ - (v) = (void *) ESTK_CONCAT(s,_start); \ - } \ - (vsize) = _esz; \ +#define ESTACK_SAVE(s,dst)\ +do {\ + if (s.start == ESTK_DEF_STACK(s)) {\ + UWord _wsz = ESTACK_COUNT(s);\ + (dst)->start = erts_alloc(s.alloc_type,\ + DEF_ESTACK_SIZE * sizeof(Eterm));\ + memcpy((dst)->start, s.start,_wsz*sizeof(Eterm));\ + (dst)->sp = (dst)->start + _wsz;\ + (dst)->end = (dst)->start + DEF_ESTACK_SIZE;\ + (dst)->alloc_type = s.alloc_type;\ + } else\ + *(dst) = s;\ } while (0) -/* - * Use on empty stack, only the allocator can be changed before this - * The vector parameter is reset to NULL if the vector is moved to stack, - * otherwise it's kept for reuse, so a saved and restored vector might - * need freeing using the correct allocator parameter. - * 'v' can be any lvalue pointer, it's cast to an (Eterm *). +#define DESTROY_SAVED_ESTACK(estack)\ +do {\ + if ((estack)->start) {\ + erts_free((estack)->alloc_type, (estack)->start);\ + (estack)->start = NULL;\ + }\ +} while(0) + +/* + * Use on empty stack, only the allocator can be changed before this. + * The src stack is reset to NULL. */ -#define ESTACK_RESTORE(s, v, vsize) /*v is a "name parameter"*/ \ -do { \ - if ((vsize) > DEF_ESTACK_SIZE) { \ - Uint _ca = DEF_ESTACK_SIZE; \ - while (_ca < (vsize)) \ - _ca = _ca * 2; \ - ESTK_CONCAT(s,_start) = (Eterm *) (v); \ - ESTK_CONCAT(s,_end) = ((Eterm *)(v)) + _ca; \ - ESTK_CONCAT(s,_sp) = ESTK_CONCAT(s,_start) + (vsize); \ - (v) = NULL; \ - } else { \ - memcpy(ESTK_CONCAT(s,_start),(v),(vsize)*sizeof(Eterm));\ - ESTK_CONCAT(s,_sp) = ESTK_CONCAT(s,_start) + (vsize); \ - } \ - } while (0) +#define ESTACK_RESTORE(s, src) \ +do { \ + ASSERT(s.start == ESTK_DEF_STACK(s)); \ + s = *(src); /* struct copy */ \ + (src)->start = NULL; \ + ASSERT(s.sp >= s.start); \ + ASSERT(s.sp <= s.end); \ +} while (0) -#define ESTACK_IS_STATIC(s) (ESTK_CONCAT(s,_start) == ESTK_CONCAT(s,_default_stack)) +#define ESTACK_IS_STATIC(s) (s.start == ESTK_DEF_STACK(s))) -#define DESTROY_ESTACK(s) \ -do { \ - if (ESTK_CONCAT(s,_start) != ESTK_CONCAT(s,_default_stack)) { \ - erts_free(ESTK_CONCAT(s,_alloc_type), ESTK_CONCAT(s,_start)); \ - } \ +#define ESTACK_PUSH(s, x) \ +do { \ + if (s.sp == s.end) { \ + erl_grow_estack(&s, ESTK_DEF_STACK(s)); \ + } \ + *s.sp++ = (x); \ } while(0) -#define ESTACK_PUSH(s, x) \ -do { \ - if (ESTK_CONCAT(s,_sp) == ESTK_CONCAT(s,_end)) { \ - erl_grow_stack(ESTK_CONCAT(s,_alloc_type),&ESTK_CONCAT(s,_start), \ - &ESTK_CONCAT(s,_sp), &ESTK_CONCAT(s,_end)); \ - } \ - *ESTK_CONCAT(s,_sp)++ = (x); \ +#define ESTACK_PUSH2(s, x, y) \ +do { \ + if (s.sp > s.end - 2) { \ + erl_grow_estack(&s, ESTK_DEF_STACK(s)); \ + } \ + *s.sp++ = (x); \ + *s.sp++ = (y); \ } while(0) -#define ESTACK_PUSH2(s, x, y) \ -do { \ - if (ESTK_CONCAT(s,_sp) > ESTK_CONCAT(s,_end) - 2) { \ - erl_grow_stack(ESTK_CONCAT(s,_alloc_type),&ESTK_CONCAT(s,_start), \ - &ESTK_CONCAT(s,_sp), &ESTK_CONCAT(s,_end)); \ - } \ - *ESTK_CONCAT(s,_sp)++ = (x); \ - *ESTK_CONCAT(s,_sp)++ = (y); \ +#define ESTACK_PUSH3(s, x, y, z) \ +do { \ + if (s.sp > s.end - 3) { \ + erl_grow_estack(&s, ESTK_DEF_STACK(s)); \ + } \ + *s.sp++ = (x); \ + *s.sp++ = (y); \ + *s.sp++ = (z); \ } while(0) -#define ESTACK_PUSH3(s, x, y, z) \ -do { \ - if (ESTK_CONCAT(s,_sp) > ESTK_CONCAT(s,_end) - 3) { \ - erl_grow_stack(&ESTK_CONCAT(s,_start), &ESTK_CONCAT(s,_sp), \ - &ESTK_CONCAT(s,_end)); \ - } \ - *ESTK_CONCAT(s,_sp)++ = (x); \ - *ESTK_CONCAT(s,_sp)++ = (y); \ - *ESTK_CONCAT(s,_sp)++ = (z); \ -} while(0) +#define ESTACK_COUNT(s) (s.sp - s.start) +#define ESTACK_ISEMPTY(s) (s.sp == s.start) +#define ESTACK_POP(s) (*(--s.sp)) -#define ESTACK_COUNT(s) (ESTK_CONCAT(s,_sp) - ESTK_CONCAT(s,_start)) -#define ESTACK_ISEMPTY(s) (ESTK_CONCAT(s,_sp) == ESTK_CONCAT(s,_start)) -#define ESTACK_POP(s) (*(--ESTK_CONCAT(s,_sp))) +/* + * WSTACK: same as ESTACK but with UWord instead of Eterm + */ +typedef struct { + UWord* wstart; + UWord* wsp; + UWord* wend; + ErtsAlcType_t alloc_type; +}ErtsWStack; -void erl_grow_wstack(ErtsAlcType_t a_type, UWord** start, UWord** sp, UWord** end); -#define WSTK_CONCAT(a,b) a##b -#define WSTK_SUBSCRIPT(s,i) *((UWord *)((byte *)WSTK_CONCAT(s,_start) + (i))) #define DEF_WSTACK_SIZE (16) -#define DECLARE_WSTACK(s) \ - UWord WSTK_CONCAT(s,_default_stack)[DEF_WSTACK_SIZE]; \ - UWord* WSTK_CONCAT(s,_start) = WSTK_CONCAT(s,_default_stack); \ - UWord* WSTK_CONCAT(s,_sp) = WSTK_CONCAT(s,_start); \ - UWord* WSTK_CONCAT(s,_end) = WSTK_CONCAT(s,_start) + DEF_WSTACK_SIZE; \ - ErtsAlcType_t WSTK_CONCAT(s,_alloc_type) = ERTS_ALC_T_ESTACK +void erl_grow_wstack(ErtsWStack*, UWord* def_stack); +#define WSTK_CONCAT(a,b) a##b +#define WSTK_DEF_STACK(s) WSTK_CONCAT(s,_default_wstack) + +#define DECLARE_WSTACK(s) \ + UWord WSTK_DEF_STACK(s)[DEF_WSTACK_SIZE]; \ + ErtsWStack s = { \ + WSTK_DEF_STACK(s), /* wstart */ \ + WSTK_DEF_STACK(s), /* wsp */ \ + WSTK_DEF_STACK(s) + DEF_WSTACK_SIZE, /* wend */ \ + ERTS_ALC_T_ESTACK /* alloc_type */ \ + } #define WSTACK_CHANGE_ALLOCATOR(s,t) \ do { \ - if (WSTK_CONCAT(s,_start) != WSTK_CONCAT(s,_default_stack)) { \ + if (s.wstart != WSTK_DEF_STACK(s)) { \ erl_exit(1, "Internal error - trying to change allocator " \ "type of active wstack\n"); \ } \ - WSTK_CONCAT(s,_alloc_type) = (t); \ + s.alloc_type = (t); \ } while (0) -#define DESTROY_WSTACK(s) \ -do { \ - if (WSTK_CONCAT(s,_start) != WSTK_CONCAT(s,_default_stack)) { \ - erts_free(WSTK_CONCAT(s,_alloc_type), WSTK_CONCAT(s,_start)); \ - } \ +#define DESTROY_WSTACK(s) \ +do { \ + if (s.wstart != WSTK_DEF_STACK(s)) { \ + erts_free(s.alloc_type, s.wstart); \ + } \ } while(0) + /* - * Do not free the stack after this, it may have pointers into what - * was saved in 'v'. 'v' and 'vsize' are changed by this macro. If - * 'v' points to anything, it should have been allocated by a previous - * call to this macro. Be careful to set a correct allocator prior to - * saving. - * 'v' can be any lvalue pointer, it will point to an array of UWord - * after calling this macro. + * Do not free the stack after this, it may have pointers into what + * was saved in 'dst'. */ -#define WSTACK_SAVE(s,v,vsize) /* v and vsize are "name parameters" */ \ -do { \ - Uint _wsz = WSTACK_COUNT(s); \ - if (WSTK_CONCAT(s,_start) == WSTK_CONCAT(s,_default_stack)) { \ - if ((v) == NULL) { \ - (v) = erts_alloc(WSTK_CONCAT(s,_alloc_type), \ - DEF_WSTACK_SIZE * sizeof(UWord)); \ - } \ - memcpy((v),WSTK_CONCAT(s,_start),_wsz*sizeof(UWord)); \ - } else { \ - (v) = (void *) WSTK_CONCAT(s,_start); \ - } \ - (vsize) = _wsz; \ +#define WSTACK_SAVE(s,dst)\ +do {\ + if (s.wstart == WSTK_DEF_STACK(s)) {\ + UWord _wsz = WSTACK_COUNT(s);\ + (dst)->wstart = erts_alloc(s.alloc_type,\ + DEF_WSTACK_SIZE * sizeof(UWord));\ + memcpy((dst)->wstart, s.wstart,_wsz*sizeof(UWord));\ + (dst)->wsp = (dst)->wstart + _wsz;\ + (dst)->wend = (dst)->wstart + DEF_WSTACK_SIZE;\ + (dst)->alloc_type = s.alloc_type;\ + } else\ + *(dst) = s;\ } while (0) -/* - * Use on empty stack, only the allocator can be changed before this - * The vector parameter is reset to NULL if the vector is moved to stack, - * otherwise it's kept for reuse, so a saved and restored vector might - * need freeing using the correct allocator parameter. - * 'v' can be any lvalue pointer, it's cast to an (UWord *). +#define DESTROY_SAVED_WSTACK(wstack)\ +do {\ + if ((wstack)->wstart) {\ + erts_free((wstack)->alloc_type, (wstack)->wstart);\ + (wstack)->wstart = NULL;\ + }\ +} while(0) + +/* + * Use on empty stack, only the allocator can be changed before this. + * The src stack is reset to NULL. */ -#define WSTACK_RESTORE(s, v, vsize) /*v is a "name parameter"*/ \ -do { \ - if ((vsize) > DEF_WSTACK_SIZE) { \ - Uint _ca = DEF_WSTACK_SIZE; \ - while (_ca < (vsize)) \ - _ca = _ca * 2; \ - WSTK_CONCAT(s,_start) = (UWord *) (v); \ - WSTK_CONCAT(s,_end) = ((UWord *)(v)) + _ca; \ - WSTK_CONCAT(s,_sp) = WSTK_CONCAT(s,_start) + (vsize); \ - (v) = NULL; \ - } else { \ - memcpy(WSTK_CONCAT(s,_start),(v),(vsize)*sizeof(UWord));\ - WSTK_CONCAT(s,_sp) = WSTK_CONCAT(s,_start) + (vsize); \ - } \ - } while (0) +#define WSTACK_RESTORE(s, src) \ +do { \ + ASSERT(s.wstart == WSTK_DEF_STACK(s)); \ + s = *(src); /* struct copy */ \ + (src)->wstart = NULL; \ + ASSERT(s.wsp >= s.wstart); \ + ASSERT(s.wsp <= s.wend); \ +} while (0) -#define WSTACK_IS_STATIC(s) (WSTK_CONCAT(s,_start) == WSTK_CONCAT(s,_default_stack)) +#define WSTACK_IS_STATIC(s) (s.wstart == WSTK_DEF_STACK(s))) -#define WSTACK_PUSH(s, x) \ -do { \ - if (WSTK_CONCAT(s,_sp) == WSTK_CONCAT(s,_end)) { \ - erl_grow_wstack(WSTK_CONCAT(s,_alloc_type), &WSTK_CONCAT(s,_start), \ - &WSTK_CONCAT(s,_sp), &WSTK_CONCAT(s,_end)); \ - } \ - *WSTK_CONCAT(s,_sp)++ = (x); \ +#define WSTACK_PUSH(s, x) \ +do { \ + if (s.wsp == s.wend) { \ + erl_grow_wstack(&s, WSTK_DEF_STACK(s)); \ + } \ + *s.wsp++ = (x); \ } while(0) -#define WSTACK_PUSH2(s, x, y) \ -do { \ - if (WSTK_CONCAT(s,_sp) > WSTK_CONCAT(s,_end) - 2) { \ - erl_grow_wstack(WSTK_CONCAT(s,_alloc_type), &WSTK_CONCAT(s,_start), \ - &WSTK_CONCAT(s,_sp), &WSTK_CONCAT(s,_end)); \ - } \ - *WSTK_CONCAT(s,_sp)++ = (x); \ - *WSTK_CONCAT(s,_sp)++ = (y); \ +#define WSTACK_PUSH2(s, x, y) \ +do { \ + if (s.wsp > s.wend - 2) { \ + erl_grow_wstack(&s, WSTK_DEF_STACK(s)); \ + } \ + *s.wsp++ = (x); \ + *s.wsp++ = (y); \ } while(0) -#define WSTACK_PUSH3(s, x, y, z) \ -do { \ - if (WSTK_CONCAT(s,_sp) > WSTK_CONCAT(s,_end) - 3) { \ - erl_grow_wstack(WSTK_CONCAT(s,_alloc_type), &WSTK_CONCAT(s,_start), \ - &WSTK_CONCAT(s,_sp), &WSTK_CONCAT(s,_end)); \ - } \ - *WSTK_CONCAT(s,_sp)++ = (x); \ - *WSTK_CONCAT(s,_sp)++ = (y); \ - *WSTK_CONCAT(s,_sp)++ = (z); \ +#define WSTACK_PUSH3(s, x, y, z) \ +do { \ + if (s.wsp > s.wend - 3) { \ + erl_grow_wstack(&s, WSTK_DEF_STACK(s)); \ + } \ + *s.wsp++ = (x); \ + *s.wsp++ = (y); \ + *s.wsp++ = (z); \ } while(0) -#define WSTACK_COUNT(s) (WSTK_CONCAT(s,_sp) - WSTK_CONCAT(s,_start)) +#define WSTACK_COUNT(s) (s.wsp - s.wstart) +#define WSTACK_ISEMPTY(s) (s.wsp == s.wstart) +#define WSTACK_POP(s) (*(--s.wsp)) -#define WSTACK_ISEMPTY(s) (WSTK_CONCAT(s,_sp) == WSTK_CONCAT(s,_start)) -#define WSTACK_POP(s) (*(--WSTK_CONCAT(s,_sp))) /* binary.c */ @@ -655,6 +652,10 @@ Eterm erl_send(Process *p, Eterm to, Eterm msg); Eterm erl_is_function(Process* p, Eterm arg1, Eterm arg2); +/* beam_bif_load.c */ +Eterm erts_check_process_code(Process *c_p, Eterm module, int allow_gc, int *redsp); + + /* beam_load.c */ typedef struct { BeamInstr* current; /* Pointer to: Mod, Name, Arity */ @@ -852,6 +853,12 @@ Port *erts_get_heart_port(void); void erts_lcnt_enable_io_lock_count(int enable); #endif +/* driver_tab.c */ +typedef void *(*ErtsStaticNifInitFPtr)(void); +ErtsStaticNifInitFPtr erts_static_nif_get_nif_init(const char *name, int len); +int erts_is_static_nif(void *handle); +void erts_init_static_drivers(void); + /* erl_drv_thread.c */ void erl_drv_thr_init(void); @@ -911,6 +918,17 @@ char *erts_convert_filename_to_native(Eterm name, char *statbuf, ErtsAlcType_t alloc_type, int allow_empty, int allow_atom, Sint *used /* out */); +char *erts_convert_filename_to_encoding(Eterm name, char *statbuf, + size_t statbuf_size, + ErtsAlcType_t alloc_type, + int allow_empty, int allow_atom, + int encoding, + Sint *used /* out */, + Uint extra); +char* erts_convert_filename_to_wchar(byte* bytes, Uint size, + char *statbuf, size_t statbuf_size, + ErtsAlcType_t alloc_type, Sint* used, + Uint extra_wchars); Eterm erts_convert_native_to_filename(Process *p, byte *bytes); Eterm erts_utf8_to_list(Process *p, Uint num, byte *bytes, Uint sz, Uint left, Uint *num_built, Uint *num_eaten, Eterm tail); @@ -980,6 +998,7 @@ Eterm erts_gc_length_1(Process* p, Eterm* reg, Uint live); Eterm erts_gc_size_1(Process* p, Eterm* reg, Uint live); Eterm erts_gc_bit_size_1(Process* p, Eterm* reg, Uint live); Eterm erts_gc_byte_size_1(Process* p, Eterm* reg, Uint live); +Eterm erts_gc_map_size_1(Process* p, Eterm* reg, Uint live); Eterm erts_gc_abs_1(Process* p, Eterm* reg, Uint live); Eterm erts_gc_float_1(Process* p, Eterm* reg, Uint live); Eterm erts_gc_round_1(Process* p, Eterm* reg, Uint live); @@ -1110,7 +1129,12 @@ erts_alloc_message_heap_state(Uint size, if (statep) *statep = state; if ((state & (ERTS_PSFLG_EXITING|ERTS_PSFLG_PENDING_EXIT)) + || (receiver->flags & F_DISABLE_GC) || HEAP_LIMIT(receiver) - HEAP_TOP(receiver) <= size) { + /* + * The heap is either potentially in an inconsistent + * state, or not large enough. + */ #ifdef ERTS_SMP if (locked_main) { *receiver_locks &= ~ERTS_PROC_LOCK_MAIN; diff --git a/erts/emulator/beam/io.c b/erts/emulator/beam/io.c index 9076bbe73c..cd5060ebb3 100644 --- a/erts/emulator/beam/io.c +++ b/erts/emulator/beam/io.c @@ -1,7 +1,7 @@ /* * %CopyrightBegin% * - * Copyright Ericsson AB 1996-2013. All Rights Reserved. + * Copyright Ericsson AB 1996-2014. All Rights Reserved. * * The contents of this file are subject to the Erlang Public License, * Version 1.1, (the "License"); you may not use this file except in @@ -46,9 +46,12 @@ #define ERTS_WANT_EXTERNAL_TAGS #include "external.h" #include "dtrace-wrapper.h" +#include "erl_map.h" extern ErlDrvEntry fd_driver_entry; +#ifndef __OSE__ extern ErlDrvEntry vanilla_driver_entry; +#endif extern ErlDrvEntry spawn_driver_entry; extern ErlDrvEntry *driver_tab[]; /* table of static drivers, only used during initialization */ @@ -244,11 +247,13 @@ static ERTS_INLINE void port_init_instr(Port *prt ASSERT(prt->drv_ptr && prt->lock); if (!prt->drv_ptr->lock) { char *lock_str = "port_lock"; + erts_mtx_init_locked_x(prt->lock, lock_str, id, #ifdef ERTS_ENABLE_LOCK_COUNT - if (!(erts_lcnt_rt_options & ERTS_LCNT_OPT_PORTLOCK)) - lock_str = NULL; + (erts_lcnt_rt_options & ERTS_LCNT_OPT_PORTLOCK) +#else + 0 #endif - erts_mtx_init_locked_x(prt->lock, lock_str, id); + ); } #endif erts_port_task_init_sched(&prt->sched, id); @@ -2742,8 +2747,10 @@ void erts_init_io(int port_tab_size, &drv_list_rwmtx_opts, "driver_list"); driver_list = NULL; - erts_smp_tsd_key_create(&driver_list_lock_status_key); - erts_smp_tsd_key_create(&driver_list_last_error_key); + erts_smp_tsd_key_create(&driver_list_lock_status_key, + "erts_driver_list_lock_status_key"); + erts_smp_tsd_key_create(&driver_list_last_error_key, + "erts_driver_list_last_error_key"); erts_ptab_init_table(&erts_port, ERTS_ALC_T_PORT_TABLE, @@ -2763,8 +2770,11 @@ void erts_init_io(int port_tab_size, erts_smp_rwmtx_rwlock(&erts_driver_list_lock); init_driver(&fd_driver, &fd_driver_entry, NULL); +#ifndef __OSE__ init_driver(&vanilla_driver, &vanilla_driver_entry, NULL); +#endif init_driver(&spawn_driver, &spawn_driver_entry, NULL); + erts_init_static_drivers(); for (dp = driver_tab; *dp != NULL; dp++) erts_add_driver_entry(*dp, NULL, 1); @@ -5292,6 +5302,17 @@ driver_deliver_term(Eterm to, ErlDrvTermData* data, int len) depth++; break; } + case ERL_DRV_MAP: { /* int */ + ERTS_DDT_CHK_ENOUGH_ARGS(1); + if ((int) ptr[0] < 0) ERTS_DDT_FAIL; + need += MAP_HEADER_SIZE + 1 + 2*ptr[0]; + depth -= 2*ptr[0]; + if (depth < 0) ERTS_DDT_FAIL; + ptr++; + depth++; + break; + } + default: ERTS_DDT_FAIL; } @@ -5528,6 +5549,36 @@ driver_deliver_term(Eterm to, ErlDrvTermData* data, int len) ptr += 2; break; + case ERL_DRV_MAP: { /* int */ + int size = (int)ptr[0]; + Eterm* tp = hp; + Eterm* vp; + map_t *mp; + + *tp = make_arityval(size); + + hp += 1 + size; + mp = (map_t*)hp; + mp->thing_word = MAP_HEADER; + mp->size = size; + mp->keys = make_tuple(tp); + mess = make_map(mp); + + hp += MAP_HEADER_SIZE + size; /* advance "heap" pointer */ + + tp += size; /* point at last key */ + vp = hp - 1; /* point at last value */ + + while(size--) { + *vp-- = ESTACK_POP(stack); + *tp-- = ESTACK_POP(stack); + } + if (!erts_validate_and_sort_map(mp)) + ERTS_DDT_FAIL; + ptr++; + break; + } + } ESTACK_PUSH(stack, mess); } @@ -7060,7 +7111,7 @@ void *driver_dl_open(char * path) int res; int *last_error_p = erts_smp_tsd_get(driver_list_last_error_key); int locked = maybe_lock_driver_list(); - if ((res = erts_sys_ddll_open(path, &ptr)) == 0) { + if ((res = erts_sys_ddll_open(path, &ptr, NULL)) == 0) { maybe_unlock_driver_list(locked); return ptr; } else { @@ -7263,10 +7314,11 @@ init_driver(erts_driver_t *drv, ErlDrvEntry *de, DE_Handle *handle) erts_atom_put((byte *) drv->name, sys_strlen(drv->name), ERTS_ATOM_ENC_LATIN1, - 1) + 1), #else - NIL + NIL, #endif + 1 ); } #endif diff --git a/erts/emulator/beam/ops.tab b/erts/emulator/beam/ops.tab index 1e5ae46bfa..68fcc177ae 100644 --- a/erts/emulator/beam/ops.tab +++ b/erts/emulator/beam/ops.tab @@ -763,17 +763,17 @@ allocate_init t I y ################################################################# # -# The BIFs erlang:check_process_code/2 must be called like a function, +# The BIFs erts_internal:check_process_code/2 must be called like a function, # to ensure that c_p->i (program counter) is set correctly (an ordinary # BIF call doesn't set it). # -call_ext u==2 Bif=u$bif:erlang:check_process_code/2 => i_call_ext Bif -call_ext_last u==2 Bif=u$bif:erlang:check_process_code/2 D => i_call_ext_last Bif D -call_ext_only u==2 Bif=u$bif:erlang:check_process_code/2 => i_call_ext_only Bif +call_ext u==2 Bif=u$bif:erts_internal:check_process_code/2 => i_call_ext Bif +call_ext_last u==2 Bif=u$bif:erts_internal:check_process_code/2 D => i_call_ext_last Bif D +call_ext_only u==2 Bif=u$bif:erts_internal:check_process_code/2 => i_call_ext_only Bif # -# The BIFs erlang:garbage_collect/0,1 must be called like functions, +# The BIFs erlang:garbage_collect/0 must be called like a function, # to allow them to invoke the garbage collector. (The stack pointer must # be saved and p->arity must be zeroed, which is not done on ordinary BIF calls.) # @@ -782,10 +782,6 @@ call_ext u==0 Bif=u$bif:erlang:garbage_collect/0 => i_call_ext Bif call_ext_last u==0 Bif=u$bif:erlang:garbage_collect/0 D => i_call_ext_last Bif D call_ext_only u==0 Bif=u$bif:erlang:garbage_collect/0 => i_call_ext_only Bif -call_ext u==1 Bif=u$bif:erlang:garbage_collect/1 => i_call_ext Bif -call_ext_last u==1 Bif=u$bif:erlang:garbage_collect/1 D => i_call_ext_last Bif D -call_ext_only u==1 Bif=u$bif:erlang:garbage_collect/1 => i_call_ext_only Bif - # # put/2 and erase/1 must be able to do garbage collection, so we must call # them like functions. @@ -1470,6 +1466,93 @@ apply I apply_last I P # +# Map instructions in R17. +# + +put_map_assoc F n Dst Live Size Rest=* => new_map F Dst Live Size Rest +put_map_assoc F Src=s Dst Live Size Rest=* => \ + update_map_assoc F Src Dst Live Size Rest +put_map_assoc F Src Dst Live Size Rest=* => \ + move Src x | update_map_assoc F x Dst Live Size Rest +put_map_exact F n Dst Live Size Rest=* => new_map F Dst Live Size Rest +put_map_exact F Src=s Dst Live Size Rest=* => \ + update_map_exact F Src Dst Live Size Rest +put_map_exact F Src Dst Live Size Rest=* => \ + move Src x | update_map_exact F x Dst Live Size Rest + +new_map j d I I +update_map_assoc j s d I I +update_map_exact j s d I I + +is_map Fail Literal=q => move Literal x | is_map Fail x +is_map Fail c => jump Fail + +%macro: is_map IsMap -fail_action +is_map f r +is_map f x +is_map f y + +## Transform has_map_field(s) #{ K1 := _, K2 := _ } + +has_map_field/3 + +has_map_fields Fail Src Size=u==1 Rest=* => gen_has_map_field(Fail,Src,Size,Rest) +has_map_fields Fail Src Size Rest=* => i_has_map_fields Fail Src Size Rest + +i_has_map_fields f s I + +has_map_field Fail Src=rxy Key=arxy => i_has_map_field Fail Src Key +has_map_field Fail Src Key => move Key x | i_has_map_field Fail Src x + +%macro: i_has_map_field HasMapField -fail_action +i_has_map_field f r a +i_has_map_field f x a +i_has_map_field f y a +i_has_map_field f r r +i_has_map_field f x r +i_has_map_field f y r +i_has_map_field f r x +i_has_map_field f x x +i_has_map_field f y x +i_has_map_field f r y +i_has_map_field f x y +i_has_map_field f y y + +## Transform get_map_elements(s) #{ K1 := V1, K2 := V2 } + +get_map_element/4 + +get_map_elements Fail Src=rxy Size=u==2 Rest=* => gen_get_map_element(Fail,Src,Size,Rest) +get_map_elements Fail Src Size Rest=* => i_get_map_elements Fail Src Size Rest + +i_get_map_elements f s I + +get_map_element Fail Src=rxy Key=ax Dst => i_get_map_element Fail Src Key Dst +get_map_element Fail Src=rxy Key=rycq Dst => \ + move Key x | i_get_map_element Fail Src x Dst +get_map_element Fail Src Key Dst => jump Fail + +%macro: i_get_map_element GetMapElement -fail_action +i_get_map_element f r a r +i_get_map_element f x a r +i_get_map_element f y a r +i_get_map_element f r a x +i_get_map_element f x a x +i_get_map_element f y a x +i_get_map_element f r a y +i_get_map_element f x a y +i_get_map_element f y a y +i_get_map_element f r x r +i_get_map_element f x x r +i_get_map_element f y x r +i_get_map_element f r x x +i_get_map_element f x x x +i_get_map_element f y x x +i_get_map_element f r x y +i_get_map_element f x x y +i_get_map_element f y x y + +# # Optimize addition and subtraction of small literals using # the i_increment/4 instruction (in bodies, not in guards). # diff --git a/erts/emulator/beam/sys.h b/erts/emulator/beam/sys.h index 31252ed78f..e273056a2b 100644 --- a/erts/emulator/beam/sys.h +++ b/erts/emulator/beam/sys.h @@ -38,6 +38,8 @@ #if defined (__WIN32__) # include "erl_win_sys.h" +#elif defined (__OSE__) +# include "erl_ose_sys.h" #else # include "erl_unix_sys.h" #ifndef UNIX @@ -279,18 +281,21 @@ typedef unsigned long UWord; typedef long SWord; #define SWORD_CONSTANT(Const) Const##L #define UWORD_CONSTANT(Const) Const##UL +#define ERTS_UWORD_MAX ULONG_MAX #define ERTS_SWORD_MAX LONG_MAX #elif SIZEOF_VOID_P == SIZEOF_INT typedef unsigned int UWord; typedef int SWord; #define SWORD_CONSTANT(Const) Const #define UWORD_CONSTANT(Const) Const##U +#define ERTS_UWORD_MAX UINT_MAX #define ERTS_SWORD_MAX INT_MAX #elif SIZEOF_VOID_P == SIZEOF_LONG_LONG typedef unsigned long long UWord; typedef long long SWord; #define SWORD_CONSTANT(Const) Const##LL #define UWORD_CONSTANT(Const) Const##ULL +#define ERTS_UWORD_MAX ULLONG_MAX #define ERTS_SWORD_MAX LLONG_MAX #else #error Found no appropriate type to use for 'Eterm', 'Uint' and 'Sint' @@ -304,6 +309,7 @@ typedef unsigned long Uint; typedef long Sint; #define SWORD_CONSTANT(Const) Const##L #define UWORD_CONSTANT(Const) Const##UL +#define ERTS_UWORD_MAX ULONG_MAX #define ERTS_SWORD_MAX LONG_MAX #define ERTS_SIZEOF_ETERM SIZEOF_LONG #define ErtsStrToSint strtol @@ -313,6 +319,7 @@ typedef unsigned int Uint; typedef int Sint; #define SWORD_CONSTANT(Const) Const #define UWORD_CONSTANT(Const) Const##U +#define ERTS_UWORD_MAX UINT_MAX #define ERTS_SWORD_MAX INT_MAX #define ERTS_SIZEOF_ETERM SIZEOF_INT #define ErtsStrToSint strtol @@ -322,6 +329,7 @@ typedef unsigned long long Uint; typedef long long Sint; #define SWORD_CONSTANT(Const) Const##LL #define UWORD_CONSTANT(Const) Const##ULL +#define ERTS_UWORD_MAX ULLONG_MAX #define ERTS_SWORD_MAX LLONG_MAX #define ERTS_SIZEOF_ETERM SIZEOF_LONG_LONG #if defined(__WIN32__) @@ -661,8 +669,7 @@ typedef struct { #define ERTS_SYS_DDLL_ERROR_INIT {NULL} extern void erts_sys_ddll_free_error(ErtsSysDdllError*); extern void erl_sys_ddll_init(void); /* to initialize mutexes etc */ -extern int erts_sys_ddll_open2(const char *path, void **handle, ErtsSysDdllError*); -#define erts_sys_ddll_open(P,H) erts_sys_ddll_open2(P,H,NULL) +extern int erts_sys_ddll_open(const char *path, void **handle, ErtsSysDdllError*); extern int erts_sys_ddll_open_noext(char *path, void **handle, ErtsSysDdllError*); extern int erts_sys_ddll_load_driver_init(void *handle, void **function); extern int erts_sys_ddll_load_nif_init(void *handle, void **function,ErtsSysDdllError*); diff --git a/erts/emulator/beam/utils.c b/erts/emulator/beam/utils.c index 0d75bbcc77..738f793020 100644 --- a/erts/emulator/beam/utils.c +++ b/erts/emulator/beam/utils.c @@ -1,7 +1,7 @@ /* * %CopyrightBegin% * - * Copyright Ericsson AB 1996-2013. All Rights Reserved. + * Copyright Ericsson AB 1996-2014. All Rights Reserved. * * The contents of this file are subject to the Erlang Public License, * Version 1.1, (the "License"); you may not use this file except in @@ -31,6 +31,7 @@ #include "bif.h" #include "erl_binary.h" #include "erl_bits.h" +#include "erl_map.h" #include "packet_parser.h" #include "erl_gc.h" #define ERTS_WANT_DB_INTERNAL__ @@ -185,39 +186,41 @@ erts_set_hole_marker(Eterm* ptr, Uint sz) * Helper function for the ESTACK macros defined in global.h. */ void -erl_grow_stack(ErtsAlcType_t a_type, Eterm** start, Eterm** sp, Eterm** end) +erl_grow_estack(ErtsEStack* s, Eterm* default_estack) { - Uint old_size = (*end - *start); + Uint old_size = (s->end - s->start); Uint new_size = old_size * 2; - Uint sp_offs = *sp - *start; - if (new_size > 2 * DEF_ESTACK_SIZE) { - *start = erts_realloc(a_type, (void *) *start, new_size*sizeof(Eterm)); + Uint sp_offs = s->sp - s->start; + if (s->start != default_estack) { + s->start = erts_realloc(s->alloc_type, s->start, + new_size*sizeof(Eterm)); } else { - Eterm* new_ptr = erts_alloc(a_type, new_size*sizeof(Eterm)); - sys_memcpy(new_ptr, *start, old_size*sizeof(Eterm)); - *start = new_ptr; + Eterm* new_ptr = erts_alloc(s->alloc_type, new_size*sizeof(Eterm)); + sys_memcpy(new_ptr, s->start, old_size*sizeof(Eterm)); + s->start = new_ptr; } - *end = *start + new_size; - *sp = *start + sp_offs; + s->end = s->start + new_size; + s->sp = s->start + sp_offs; } /* - * Helper function for the ESTACK macros defined in global.h. + * Helper function for the WSTACK macros defined in global.h. */ void -erl_grow_wstack(ErtsAlcType_t a_type, UWord** start, UWord** sp, UWord** end) +erl_grow_wstack(ErtsWStack* s, UWord* default_wstack) { - Uint old_size = (*end - *start); + Uint old_size = (s->wend - s->wstart); Uint new_size = old_size * 2; - Uint sp_offs = *sp - *start; - if (new_size > 2 * DEF_ESTACK_SIZE) { - *start = erts_realloc(a_type, (void *) *start, new_size*sizeof(UWord)); + Uint sp_offs = s->wsp - s->wstart; + if (s->wstart != default_wstack) { + s->wstart = erts_realloc(s->alloc_type, s->wstart, + new_size*sizeof(UWord)); } else { - UWord* new_ptr = erts_alloc(a_type, new_size*sizeof(UWord)); - sys_memcpy(new_ptr, *start, old_size*sizeof(UWord)); - *start = new_ptr; + UWord* new_ptr = erts_alloc(s->alloc_type, new_size*sizeof(UWord)); + sys_memcpy(new_ptr, s->wstart, old_size*sizeof(UWord)); + s->wstart = new_ptr; } - *end = *start + new_size; - *sp = *start + sp_offs; + s->wend = s->wstart + new_size; + s->wsp = s->wstart + sp_offs; } /* CTYPE macros */ @@ -255,7 +258,7 @@ erl_grow_wstack(ErtsAlcType_t a_type, UWord** start, UWord** sp, UWord** end) * Returns -1 if not a proper list (i.e. not terminated with NIL) */ int -list_length(Eterm list) +erts_list_length(Eterm list) { int i = 0; @@ -732,6 +735,8 @@ erts_bld_atom_2uint_3tup_list(Uint **hpp, Uint *szp, Sint length, #define FUNNY_NUMBER10 268440479 #define FUNNY_NUMBER11 268440577 #define FUNNY_NUMBER12 268440581 +#define FUNNY_NUMBER13 268440593 +#define FUNNY_NUMBER14 268440611 static Uint32 hash_binary_bytes(Eterm bin, Uint sz, Uint32 hash) @@ -783,10 +788,10 @@ Uint32 make_hash(Eterm term_arg) unsigned op; /* Must not collide with the real tag_val_def's: */ -#define MAKE_HASH_TUPLE_OP 0x10 -#define MAKE_HASH_FUN_OP 0x11 -#define MAKE_HASH_CDR_PRE_OP 0x12 -#define MAKE_HASH_CDR_POST_OP 0x13 +#define MAKE_HASH_TUPLE_OP 0x11 +#define MAKE_HASH_TERM_ARRAY_OP 0x12 +#define MAKE_HASH_CDR_PRE_OP 0x13 +#define MAKE_HASH_CDR_POST_OP 0x14 /* ** Convenience macro for calculating a bytewise hash on an unsigned 32 bit @@ -875,7 +880,7 @@ tail_recur: hash = hash*FUNNY_NUMBER2 + funp->fe->old_uniq; if (num_free > 0) { if (num_free > 1) { - WSTACK_PUSH3(stack, (UWord) &funp->env[1], (num_free-1), MAKE_HASH_FUN_OP); + WSTACK_PUSH3(stack, (UWord) &funp->env[1], (num_free-1), MAKE_HASH_TERM_ARRAY_OP); } term = funp->env[0]; goto tail_recur; @@ -965,6 +970,24 @@ tail_recur: hash *= is_neg ? FUNNY_NUMBER4 : FUNNY_NUMBER3; break; } + case MAP_DEF: + { + map_t *mp = (map_t *)map_val(term); + int size = map_get_size(mp); + Eterm *ks = map_get_keys(mp); + Eterm *vs = map_get_values(mp); + + /* Use a prime with size to remedy some of + * the {} and <<>> hash problems */ + hash = hash*FUNNY_NUMBER13 + FUNNY_NUMBER14 + size; + if (size == 0) + break; + + /* push values first */ + WSTACK_PUSH3(stack, (UWord)vs, (UWord) size, MAKE_HASH_TERM_ARRAY_OP); + WSTACK_PUSH3(stack, (UWord)ks, (UWord) size, MAKE_HASH_TERM_ARRAY_OP); + break; + } case TUPLE_DEF: { Eterm* ptr = tuple_val(term); @@ -974,7 +997,7 @@ tail_recur: op = MAKE_HASH_TUPLE_OP; }/*fall through*/ case MAKE_HASH_TUPLE_OP: - case MAKE_HASH_FUN_OP: + case MAKE_HASH_TERM_ARRAY_OP: { Uint i = (Uint) WSTACK_POP(stack); Eterm* ptr = (Eterm*) WSTACK_POP(stack); @@ -1068,9 +1091,11 @@ Uint32 make_hash2(Eterm term) { Uint32 hash; + Uint32 hash_xor_keys = 0; + Uint32 hash_xor_values = 0; DeclareTmpHeapNoproc(tmp_big,2); -/* (HCONST * {2, ..., 14}) mod 2^32 */ +/* (HCONST * {2, ..., 16}) mod 2^32 */ #define HCONST_2 0x3c6ef372UL #define HCONST_3 0xdaa66d2bUL #define HCONST_4 0x78dde6e4UL @@ -1085,6 +1110,11 @@ make_hash2(Eterm term) #define HCONST_13 0x08d12e65UL #define HCONST_14 0xa708a81eUL #define HCONST_15 0x454021d7UL +#define HCONST_16 0xe3779b90UL + +#define HASH_MAP_TAIL (_make_header(1,_TAG_HEADER_REF)) +#define HASH_MAP_KEY (_make_header(2,_TAG_HEADER_REF)) +#define HASH_MAP_VAL (_make_header(3,_TAG_HEADER_REF)) #define UINT32_HASH_2(Expr1, Expr2, AConst) \ do { \ @@ -1180,11 +1210,45 @@ make_hash2(Eterm term) UINT32_HASH(arity, HCONST_9); if (arity == 0) /* Empty tuple */ goto hash2_common; - for (i = arity; i >= 2; i--) { + for (i = arity; i >= 1; i--) { tmp = elem[i]; ESTACK_PUSH(s, tmp); } - term = elem[1]; + goto hash2_common; + } + break; + case MAP_SUBTAG: + { + map_t *mp = (map_t *)map_val(term); + int i; + int size = map_get_size(mp); + Eterm *ks = map_get_keys(mp); + Eterm *vs = map_get_values(mp); + UINT32_HASH(size, HCONST_16); + if (size == 0) { + goto hash2_common; + } + ESTACK_PUSH(s, hash_xor_values); + ESTACK_PUSH(s, hash_xor_keys); + ESTACK_PUSH(s, hash); + ESTACK_PUSH(s, HASH_MAP_TAIL); + hash = 0; + hash_xor_keys = 0; + hash_xor_values = 0; + for (i = size - 1; i >= 0; i--) { + tmp = vs[i]; + ESTACK_PUSH(s, HASH_MAP_VAL); + ESTACK_PUSH(s, tmp); + } + /* We do not want to expose the tuple representation. + * Do not push the keys as a tuple. + */ + for (i = size - 1; i >= 0; i--) { + tmp = ks[i]; + ESTACK_PUSH(s, HASH_MAP_KEY); + ESTACK_PUSH(s, tmp); + } + goto hash2_common; } break; case EXPORT_SUBTAG: @@ -1378,15 +1442,47 @@ make_hash2(Eterm term) default: erl_exit(1, "Invalid tag in make_hash2(0x%X)\n", term); hash2_common: + + /* Uint32 hash always has the hash value of the previous term, + * compounded or otherwise. + */ + if (ESTACK_ISEMPTY(s)) { DESTROY_ESTACK(s); UnUseTmpHeapNoproc(2); return hash; } + term = ESTACK_POP(s); + + switch (term) { + case HASH_MAP_TAIL: { + hash = (Uint32) ESTACK_POP(s); + UINT32_HASH(hash_xor_keys, HCONST_16); + UINT32_HASH(hash_xor_values, HCONST_16); + hash_xor_keys = (Uint32) ESTACK_POP(s); + hash_xor_values = (Uint32) ESTACK_POP(s); + goto hash2_common; + } + case HASH_MAP_KEY: + hash_xor_keys ^= hash; + hash = 0; + goto hash2_common; + case HASH_MAP_VAL: + hash_xor_values ^= hash; + hash = 0; + goto hash2_common; + default: + break; + } } } } + +#undef HASH_MAP_TAIL +#undef HASH_MAP_KEY +#undef HASH_MAP_VAL + #undef UINT32_HASH_2 #undef UINT32_HASH #undef SINT32_HASH @@ -1488,7 +1584,7 @@ tail_recur: hash = hash*FUNNY_NUMBER2 + funp->fe->old_uniq; if (num_free > 0) { if (num_free > 1) { - WSTACK_PUSH3(stack, (UWord) &funp->env[1], (num_free-1), MAKE_HASH_FUN_OP); + WSTACK_PUSH3(stack, (UWord) &funp->env[1], (num_free-1), MAKE_HASH_TERM_ARRAY_OP); } term = funp->env[0]; goto tail_recur; @@ -1601,6 +1697,24 @@ tail_recur: } break; + case MAP_DEF: + { + map_t *mp = (map_t *)map_val(term); + int size = map_get_size(mp); + Eterm *ks = map_get_keys(mp); + Eterm *vs = map_get_values(mp); + + /* Use a prime with size to remedy some of + * the {} and <<>> hash problems */ + hash = hash*FUNNY_NUMBER13 + FUNNY_NUMBER14 + size; + if (size == 0) + break; + + /* push values first */ + WSTACK_PUSH3(stack, (UWord)vs, (UWord) size, MAKE_HASH_TERM_ARRAY_OP); + WSTACK_PUSH3(stack, (UWord)ks, (UWord) size, MAKE_HASH_TERM_ARRAY_OP); + break; + } case TUPLE_DEF: { Eterm* ptr = tuple_val(term); @@ -1610,7 +1724,7 @@ tail_recur: op = MAKE_HASH_TUPLE_OP; }/*fall through*/ case MAKE_HASH_TUPLE_OP: - case MAKE_HASH_FUN_OP: + case MAKE_HASH_TERM_ARRAY_OP: { Uint i = (Uint) WSTACK_POP(stack); Eterm* ptr = (Eterm*) WSTACK_POP(stack); @@ -1638,7 +1752,7 @@ tail_recur: return hash; #undef MAKE_HASH_TUPLE_OP -#undef MAKE_HASH_FUN_OP +#undef MAKE_HASH_TERM_ARRAY_OP #undef MAKE_HASH_CDR_PRE_OP #undef MAKE_HASH_CDR_POST_OP } @@ -1675,7 +1789,7 @@ static int do_send_to_logger(Eterm tag, Eterm gleader, char *buf, int len) p = erts_whereis_process(NULL, 0, am_error_logger, 0, 0); if (p) { erts_aint32_t state = erts_smp_atomic32_read_acqb(&p->state); - if (state & ERTS_PSFLG_RUNNING) + if (state & (ERTS_PSFLG_RUNNING|ERTS_PSFLG_RUNNING_SYS)) p = NULL; } } @@ -2005,6 +2119,22 @@ tailrecur_ne: ++bb; goto term_array; } + case MAP_SUBTAG: + { + aa = map_val_rel(a, a_base); + if (!is_boxed(b) || *boxed_val_rel(b,b_base) != *aa) + goto not_equal; + bb = map_val_rel(b,b_base); + sz = map_get_size((map_t*)aa); + + if (sz != map_get_size((map_t*)bb)) goto not_equal; + if (sz == 0) goto pop_next; + + aa += 2; + bb += 2; + sz += 1; /* increment for tuple-keys */ + goto term_array; + } case REFC_BINARY_SUBTAG: case HEAP_BINARY_SUBTAG: case SUB_BINARY_SUBTAG: @@ -2279,7 +2409,7 @@ static int cmpbytes(byte *s1, int l1, byte *s2, int l2) * * According to the Erlang Standard, types are orderered as follows: * numbers < (characters) < atoms < refs < funs < ports < pids < - * tuples < [] < conses < binaries. + * tuples < maps < [] < conses < binaries. * * Note that characters are currently not implemented. * @@ -2299,10 +2429,24 @@ static int cmp_atoms(Eterm a, Eterm b) bb->name+3, bb->len-3); } +#if !HALFWORD_HEAP +/* cmp(Eterm a, Eterm b) + * For compatibility with HiPE - arith-based compare. + */ +Sint cmp(Eterm a, Eterm b) +{ + return erts_cmp(a, b, 0); +} +#endif + +/* erts_cmp(Eterm a, Eterm b, int exact) + * exact = 1 -> term-based compare + * exact = 0 -> arith-based compare + */ #if HALFWORD_HEAP -Sint cmp_rel(Eterm a, Eterm* a_base, Eterm b, Eterm* b_base) +Sint erts_cmp_rel_opt(Eterm a, Eterm* a_base, Eterm b, Eterm* b_base, int exact) #else -Sint cmp(Eterm a, Eterm b) +Sint erts_cmp(Eterm a, Eterm b, int exact) #endif { DECLARE_WSTACK(stack); @@ -2462,7 +2606,25 @@ tailrecur_ne: ++aa; ++bb; goto term_array; + case (_TAG_HEADER_MAP >> _TAG_PRIMARY_SIZE) : + if (!is_map_rel(b,b_base)) { + a_tag = MAP_DEF; + goto mixed_types; + } + aa = (Eterm *)map_val_rel(a,a_base); + bb = (Eterm *)map_val_rel(b,b_base); + i = map_get_size((map_t*)aa); + if (i != map_get_size((map_t*)bb)) { + RETURN_NEQ((int)(i - map_get_size((map_t*)bb))); + } + if (i == 0) { + goto pop_next; + } + aa += 2; + bb += 2; + i += 1; /* increment for tuple-keys */ + goto term_array; case (_TAG_HEADER_FLOAT >> _TAG_PRIMARY_SIZE): if (!is_float_rel(b,b_base)) { a_tag = FLOAT_DEF; @@ -2686,11 +2848,6 @@ tailrecur_ne: { FloatDef f1, f2; Eterm big; -#if HEAP_ON_C_STACK - Eterm big_buf[CMP_TMP_HEAP_SIZE]; /* If HEAP_ON_C_STACK */ -#else - Eterm *big_buf = erts_get_scheduler_data()->cmp_tmp_heap; -#endif #if HALFWORD_HEAP Wterm aw = is_immed(a) ? a : rterm2wterm(a,a_base); Wterm bw = is_immed(b) ? b : rterm2wterm(b,b_base); @@ -2701,6 +2858,8 @@ tailrecur_ne: #define MAX_LOSSLESS_FLOAT ((double)((1LL << 53) - 2)) #define MIN_LOSSLESS_FLOAT ((double)(((1LL << 53) - 2)*-1)) #define BIG_ARITY_FLOAT_MAX (1024 / D_EXP) /* arity of max float as a bignum */ + Eterm big_buf[BIG_NEED_SIZE(BIG_ARITY_FLOAT_MAX)]; + b_tag = tag_val_def(bw); switch(_NUMBER_CODE(a_tag, b_tag)) { @@ -2711,13 +2870,15 @@ tailrecur_ne: j = big_sign(aw) ? -1 : 1; break; case SMALL_FLOAT: + if (exact) goto exact_fall_through; GET_DOUBLE(bw, f2); if (f2.fd < MAX_LOSSLESS_FLOAT && f2.fd > MIN_LOSSLESS_FLOAT) { /* Float is within the no loss limit */ f1.fd = signed_val(aw); j = float_comp(f1.fd, f2.fd); + } #if ERTS_SIZEOF_ETERM == 8 - } else if (f2.fd > (double) (MAX_SMALL + 1)) { + else if (f2.fd > (double) (MAX_SMALL + 1)) { /* Float is a positive bignum, i.e. bigger */ j = -1; } else if (f2.fd < (double) (MIN_SMALL - 1)) { @@ -2728,19 +2889,21 @@ tailrecur_ne: j = signed_val(aw) - (Sint) f2.fd; } #else - } else { + else { /* If float is positive it is bigger than small */ j = (f2.fd > 0.0) ? -1 : 1; } #endif /* ERTS_SIZEOF_ETERM == 8 */ break; case FLOAT_BIG: + if (exact) goto exact_fall_through; { Wterm tmp = aw; aw = bw; bw = tmp; }/* fall through */ case BIG_FLOAT: + if (exact) goto exact_fall_through; GET_DOUBLE(bw, f2); if ((f2.fd < (double) (MAX_SMALL + 1)) && (f2.fd > (double) (MIN_SMALL - 1))) { @@ -2762,21 +2925,23 @@ tailrecur_ne: j = float_comp(f1.fd, f2.fd); } } else { - big = double_to_big(f2.fd, big_buf); - j = big_comp(aw, big); + big = double_to_big(f2.fd, big_buf, sizeof(big_buf)/sizeof(Eterm)); + j = big_comp(aw, rterm2wterm(big,big_buf)); } if (_NUMBER_CODE(a_tag, b_tag) == FLOAT_BIG) { j = -j; } break; case FLOAT_SMALL: + if (exact) goto exact_fall_through; GET_DOUBLE(aw, f1); if (f1.fd < MAX_LOSSLESS_FLOAT && f1.fd > MIN_LOSSLESS_FLOAT) { /* Float is within the no loss limit */ f2.fd = signed_val(bw); j = float_comp(f1.fd, f2.fd); + } #if ERTS_SIZEOF_ETERM == 8 - } else if (f1.fd > (double) (MAX_SMALL + 1)) { + else if (f1.fd > (double) (MAX_SMALL + 1)) { /* Float is a positive bignum, i.e. bigger */ j = 1; } else if (f1.fd < (double) (MIN_SMALL - 1)) { @@ -2787,12 +2952,13 @@ tailrecur_ne: j = (Sint) f1.fd - signed_val(bw); } #else - } else { + else { /* If float is positive it is bigger than small */ j = (f1.fd > 0.0) ? 1 : -1; } #endif /* ERTS_SIZEOF_ETERM == 8 */ break; +exact_fall_through: default: j = b_tag - a_tag; } @@ -2846,7 +3012,7 @@ pop_next: return 0; not_equal: - DESTROY_ESTACK(stack); + DESTROY_WSTACK(stack); return j; #undef CMP_NODES @@ -3021,6 +3187,14 @@ buf_to_intlist(Eterm** hpp, const char *buf, size_t len, Eterm tail) ** Return remaining bytes in buffer on success ** ERTS_IOLIST_TO_BUF_OVERFLOW on overflow ** ERTS_IOLIST_TO_BUF_TYPE_ERROR on type error (including that result would not be a whole number of bytes) +** +** Note! +** Do not detect indata errors in this fiunction that are not detected by erts_iolist_size! +** +** A caller should be able to rely on a successful return from erts_iolist_to_buf +** if erts_iolist_size is previously successfully called and erts_iolist_to_buf +** is called with a buffer at least as large as the value given by erts_iolist_size. +** */ ErlDrvSizeT erts_iolist_to_buf(Eterm obj, char* buf, ErlDrvSizeT alloced_len) @@ -3127,6 +3301,11 @@ ErlDrvSizeT erts_iolist_to_buf(Eterm obj, char* buf, ErlDrvSizeT alloced_len) /* * Return 0 if successful, and non-zero if unsuccessful. + * + * It is vital that if erts_iolist_to_buf would return an error for + * any type of term data, this function should do so as well. + * Any input term error detected in erts_iolist_to_buf should also + * be detected in this function! */ int erts_iolist_size(Eterm obj, ErlDrvSizeT* sizep) { @@ -4006,7 +4185,6 @@ erts_smp_ensure_later_interval_acqb(erts_interval_t *icp, Uint64 ic) #endif } - /* * A millisecond timestamp without time correction where there's no hrtime * - for tracing on "long" things... |