diff options
Diffstat (limited to 'erts/emulator/beam')
85 files changed, 4833 insertions, 3310 deletions
diff --git a/erts/emulator/beam/atom.c b/erts/emulator/beam/atom.c index bbe1cb3e11..e5b7616a0d 100644 --- a/erts/emulator/beam/atom.c +++ b/erts/emulator/beam/atom.c @@ -452,7 +452,7 @@ init_atom_table(void) /* Ordinary atoms */ for (i = 0; erl_atom_names[i] != 0; i++) { int ix; - a.len = strlen(erl_atom_names[i]); + a.len = sys_strlen(erl_atom_names[i]); a.latin1_chars = a.len; a.name = (byte*)erl_atom_names[i]; a.slot.index = i; diff --git a/erts/emulator/beam/atom.h b/erts/emulator/beam/atom.h index be998a46bd..385120a8d9 100644 --- a/erts/emulator/beam/atom.h +++ b/erts/emulator/beam/atom.h @@ -36,7 +36,7 @@ /* Internal atom cache needs MAX_ATOM_TABLE_SIZE to be less than an unsigned 32 bit integer. See external.c(erts_encode_ext_dist_header_setup) for more details. */ -#define MAX_ATOM_TABLE_SIZE ((MAX_ATOM_INDEX + 1 < (UWORD_CONSTANT(1) << 32)) ? MAX_ATOM_INDEX + 1 : (UWORD_CONSTANT(1) << 32)) +#define MAX_ATOM_TABLE_SIZE ((MAX_ATOM_INDEX + 1 < (UWORD_CONSTANT(1) << 32)) ? MAX_ATOM_INDEX + 1 : ((UWORD_CONSTANT(1) << 31) - 1)) /* Here we use maximum signed interger value to avoid integer overflow */ #else #define MAX_ATOM_TABLE_SIZE (MAX_ATOM_INDEX + 1) #endif diff --git a/erts/emulator/beam/atom.names b/erts/emulator/beam/atom.names index fc55b687d4..38b5f0c5e3 100644 --- a/erts/emulator/beam/atom.names +++ b/erts/emulator/beam/atom.names @@ -48,7 +48,7 @@ atom Empty='' # # Used in the Beam emulator loop. (Smaller literals usually means tighter code.) # -atom fun infinity timeout normal call return +atom infinity timeout normal call return atom throw error exit atom undefined @@ -69,11 +69,9 @@ atom DOWN='DOWN' atom UP='UP' atom EXIT='EXIT' atom abort -atom aborted atom abs_path atom absoluteURI atom ac -atom accessor atom active atom active_tasks atom active_tasks_all @@ -88,7 +86,6 @@ atom allocated_areas atom allocator atom allocator_sizes atom alloc_util_allocators -atom allow_gc atom allow_passive_connect atom already_loaded atom amd64 @@ -108,6 +105,7 @@ atom asynchronous atom atom atom atom_used atom attributes +atom auto_connect atom await_microstate_accounting_modifications atom await_port_send_result atom await_proc_exit @@ -121,9 +119,7 @@ atom bag atom band atom big atom bif_return_trap -atom bif_timer_server atom binary -atom binary_bin_to_list_trap atom binary_copy_trap atom binary_find_trap atom binary_longest_prefix_trap @@ -178,7 +174,6 @@ atom convert_time_unit atom connect atom connected atom connection_closed -atom cons atom const atom context_switches atom control @@ -198,9 +193,6 @@ atom debug_flags atom decimals atom default atom delay_trap -atom dexit -atom depth -atom dgroup_leader atom dictionary atom dirty_bif_exception atom dirty_bif_result @@ -221,7 +213,6 @@ atom dist_ctrl_put_data atom dist_data atom Div='/' atom div -atom dlink atom dmonitor_node atom dmonitor_p atom DollarDollar='$$' @@ -230,9 +221,7 @@ atom dollar_endonly atom dotall atom driver atom driver_options -atom dsend atom dsend_continue_trap -atom dunlink atom duplicate_bag atom duplicated atom dupnames @@ -250,15 +239,14 @@ atom Eqeq='==' atom erl_tracer atom erlang atom erl_signal_server -atom ERROR='ERROR' atom error_handler atom error_logger atom erts_code_purger atom erts_debug +atom erts_dflags atom erts_internal atom ets atom ETS_TRANSFER='ETS-TRANSFER' -atom event atom exact_reductions atom exception_from atom exception_trace @@ -285,27 +273,20 @@ atom force atom format_cpu_topology atom free atom fullsweep_after -atom fullsweep_if_old_binaries -atom fun -atom function atom functions atom function_clause atom garbage_collecting atom garbage_collection atom garbage_collection_info -atom gc_end atom gc_major_end atom gc_major_start atom gc_max_heap_size atom gc_minor_end atom gc_minor_start -atom gc_start atom Ge='>=' atom generational -atom get_data atom get_seq_token atom get_tcw -atom getenv atom gather_gc_info_result atom gather_io_bytes atom gather_microstate_accounting_result @@ -347,11 +328,11 @@ atom initial_call atom input atom internal atom internal_error -atom internal_status atom instruction_counts atom invalid atom is_constant atom is_seq_trace +atom iterator atom io atom keypos atom kill @@ -392,14 +373,12 @@ atom match_spec_result atom max atom maximum atom max_heap_size -atom max_tables max_processes atom mbuf_size atom md5 atom memory atom memory_internal atom memory_types atom message -atom message_binary atom message_queue_data atom message_queue_len atom messages @@ -446,21 +425,18 @@ atom new_processes atom new_ports atom new_uniq atom newline -atom next atom no atom nomatch atom none atom no_auto_capture atom noconnect atom noconnection -atom nocookie atom node atom node_type atom nodedown atom nodedown_reason atom nodeup atom noeol -atom nofile atom noproc atom normal atom nosuspend @@ -482,7 +458,6 @@ atom notempty_atstart atom notify atom notsup atom nouse_stdio -atom objects atom off_heap atom offset atom ok @@ -552,7 +527,6 @@ atom re_run_trap atom read_concurrency atom ready_input atom ready_output -atom ready_async atom reason atom receive atom recent_size @@ -580,7 +554,8 @@ atom running_procs atom runtime atom safe atom save_calls -atom scheduler +atom sbct +atom scheduler atom scheduler_id atom scheduler_wall_time atom scheduler_wall_time_all @@ -598,7 +573,6 @@ atom sequential_trace_token atom serial atom set atom set_cpu_topology -atom set_data atom set_on_first_link atom set_on_first_spawn atom set_on_link @@ -606,8 +580,6 @@ atom set_on_spawn atom set_seq_token atom set_tcw atom set_tcw_fake -atom separate -atom shared atom sighup atom sigterm atom sigusr1 @@ -623,7 +595,6 @@ atom sigtstp atom sigquit atom silent atom size -atom sl_alloc atom spawn_executable atom spawn_driver atom spawned @@ -631,7 +602,6 @@ atom ssl_tls atom stack_size atom start atom status -atom static atom stderr_to_stdout atom stop atom stream @@ -641,13 +611,11 @@ atom sunrm atom suspend atom suspended atom suspending -atom sys_misc atom system -atom system_error +atom system_flag_scheduler_wall_time atom system_limit atom system_version atom system_architecture -atom SYSTEM='SYSTEM' atom table atom term_to_binary_trap atom this @@ -665,7 +633,7 @@ atom total_heap_size atom total_run_queue_lengths atom total_run_queue_lengths_all atom tpkt -atom trace trace_ts traced +atom trace traced atom trace_control_word atom trace_status atom tracer @@ -674,7 +642,6 @@ atom trim atom trim_all atom try_clause atom true -atom tuple atom type atom ucompile atom ucp @@ -691,14 +658,11 @@ atom unblock_normal atom uniq atom unless_suspending atom unloaded -atom unloading atom unloaded_only atom unload_cancelled atom value -atom values atom version atom visible -atom wait atom waiting atom wall_clock atom warning diff --git a/erts/emulator/beam/beam_bp.c b/erts/emulator/beam/beam_bp.c index fe1e15701b..0832b3f374 100644 --- a/erts/emulator/beam/beam_bp.c +++ b/erts/emulator/beam/beam_bp.c @@ -998,7 +998,9 @@ do_call_trace(Process* c_p, ErtsCodeInfo* info, Eterm* reg, fixup_cp_before_trace(c_p, &return_to_trace); + ERTS_UNREQ_PROC_MAIN_LOCK(c_p); flags = erts_call_trace(c_p, info, ms, reg, local, &tracer); + ERTS_REQ_PROC_MAIN_LOCK(c_p); /* restore cp after potential fixup */ c_p->cp = cp_save; diff --git a/erts/emulator/beam/beam_debug.c b/erts/emulator/beam/beam_debug.c index 509aa2a84f..5eb68b817e 100644 --- a/erts/emulator/beam/beam_debug.c +++ b/erts/emulator/beam/beam_debug.c @@ -40,6 +40,7 @@ #include "erl_binary.h" #include "erl_thr_progress.h" #include "erl_nfunc_sched.h" +#include "beam_catches.h" #ifdef ARCH_64 # define HEXF "%016bpX" @@ -55,6 +56,7 @@ static int print_op(fmtfn_t to, void *to_arg, int op, int size, BeamInstr* addr) static void print_bif_name(fmtfn_t to, void* to_arg, BifFunction bif); static BeamInstr* f_to_addr(BeamInstr* base, int op, BeamInstr* ap); static BeamInstr* f_to_addr_packed(BeamInstr* base, int op, Sint32* ap); +static void print_byte_string(fmtfn_t to, void *to_arg, byte* str, Uint bytes); BIF_RETTYPE erts_debug_same_2(BIF_ALIST_2) @@ -226,11 +228,11 @@ erts_debug_instructions_0(BIF_ALIST_0) Eterm res = NIL; for (i = 0; i < num_instructions; i++) { - needed += 2*strlen(opc[i].name); + needed += 2*sys_strlen(opc[i].name); } hp = HAlloc(BIF_P, needed); for (i = num_instructions-1; i >= 0; i--) { - Eterm s = erts_bld_string_n(&hp, 0, opc[i].name, strlen(opc[i].name)); + Eterm s = erts_bld_string_n(&hp, 0, opc[i].name, sys_strlen(opc[i].name)); res = erts_bld_cons(&hp, 0, s, res); } return res; @@ -396,6 +398,7 @@ print_op(fmtfn_t to, void *to_arg, int op, int size, BeamInstr* addr) BeamInstr args[8]; /* Arguments for this instruction. */ BeamInstr* ap; /* Pointer to arguments. */ BeamInstr* unpacked; /* Unpacked arguments */ + BeamInstr* first_arg; /* First argument */ start_prog = opc[op].pack; @@ -428,7 +431,7 @@ print_op(fmtfn_t to, void *to_arg, int op, int size, BeamInstr* addr) * the packing program backwards and in reverse. */ - prog = start_prog + strlen(start_prog); + prog = start_prog + sys_strlen(start_prog); while (start_prog < prog) { prog--; switch (*prog) { @@ -480,6 +483,8 @@ print_op(fmtfn_t to, void *to_arg, int op, int size, BeamInstr* addr) ap = args; } + first_arg = ap; + /* * Print the name and all operands of the instructions. */ @@ -570,24 +575,60 @@ print_op(fmtfn_t to, void *to_arg, int op, int size, BeamInstr* addr) } break; } + case op_i_make_fun_Wt: + if (*sign == 'W') { + ErlFunEntry* fe = (ErlFunEntry *) *ap; + ErtsCodeMFA* cmfa = find_function_from_pc(fe->address); + erts_print(to, to_arg, "%T:%T/%bpu", cmfa->module, + cmfa->function, cmfa->arity); + } else { + erts_print(to, to_arg, "%d", *ap); + } + break; + case op_i_bs_match_string_xfWW: + if (ap - first_arg < 3) { + erts_print(to, to_arg, "%d", *ap); + } else { + Uint bits = ap[-1]; + Uint bytes = (bits+7)/8; + byte* str = (byte *) *ap; + print_byte_string(to, to_arg, str, bytes); + } + break; + case op_bs_put_string_WW: + if (ap - first_arg == 0) { + erts_print(to, to_arg, "%d", *ap); + } else { + Uint bytes = ap[-1]; + byte* str = (byte *) ap[0]; + print_byte_string(to, to_arg, str, bytes); + } + break; default: erts_print(to, to_arg, "%d", *ap); } ap++; break; case 'f': /* Destination label */ - { - BeamInstr* target = f_to_addr(addr, op, ap); - ErtsCodeMFA* cmfa = find_function_from_pc(target); - if (!cmfa || erts_codemfa_to_code(cmfa) != target) { - erts_print(to, to_arg, "f(" HEXF ")", target); - } else { - erts_print(to, to_arg, "%T:%T/%bpu", cmfa->module, - cmfa->function, cmfa->arity); - } - ap++; - } - break; + switch (op) { + case op_catch_yf: + erts_print(to, to_arg, "f(" HEXF ")", catch_pc((BeamInstr)*ap)); + break; + default: + { + BeamInstr* target = f_to_addr(addr, op, ap); + ErtsCodeMFA* cmfa = find_function_from_pc(target); + if (!cmfa || erts_codemfa_to_code(cmfa) != target) { + erts_print(to, to_arg, "f(" HEXF ")", target); + } else { + erts_print(to, to_arg, "%T:%T/%bpu", cmfa->module, + cmfa->function, cmfa->arity); + } + ap++; + } + break; + } + break; case 'p': /* Pointer (to label) */ { BeamInstr* target = f_to_addr(addr, op, ap); @@ -848,6 +889,14 @@ static BeamInstr* f_to_addr_packed(BeamInstr* base, int op, Sint32* ap) return base - 1 + opc[op].adjust + *ap; } +static void print_byte_string(fmtfn_t to, void *to_arg, byte* str, Uint bytes) +{ + Uint i; + + for (i = 0; i < bytes; i++) { + erts_print(to, to_arg, "%02X", str[i]); + } +} /* * Dirty BIF testing. diff --git a/erts/emulator/beam/beam_emu.c b/erts/emulator/beam/beam_emu.c index 60d0008d8f..fbd0e38735 100644 --- a/erts/emulator/beam/beam_emu.c +++ b/erts/emulator/beam/beam_emu.c @@ -400,12 +400,13 @@ static BeamInstr* apply_fun(Process* p, Eterm fun, Eterm args, Eterm* reg) NOINLINE; static Eterm new_fun(Process* p, Eterm* reg, ErlFunEntry* fe, int num_free) NOINLINE; -static Eterm new_map(Process* p, Eterm* reg, Uint live, Uint n, BeamInstr* ptr) NOINLINE; -static Eterm new_small_map_lit(Process* p, Eterm* reg, Eterm keys_literal, +static Eterm erts_gc_new_map(Process* p, Eterm* reg, Uint live, + Uint n, BeamInstr* ptr) NOINLINE; +static Eterm erts_gc_new_small_map_lit(Process* p, Eterm* reg, Eterm keys_literal, Uint live, BeamInstr* ptr) NOINLINE; -static Eterm update_map_assoc(Process* p, Eterm* reg, Uint live, +static Eterm erts_gc_update_map_assoc(Process* p, Eterm* reg, Uint live, Uint n, BeamInstr* new_p) NOINLINE; -static Eterm update_map_exact(Process* p, Eterm* reg, Uint live, +static Eterm erts_gc_update_map_exact(Process* p, Eterm* reg, Uint live, Uint n, Eterm* new_p) NOINLINE; static Eterm get_map_element(Eterm map, Eterm key); static Eterm get_map_element_hash(Eterm map, Eterm key, Uint32 hx); @@ -748,7 +749,7 @@ void process_main(Eterm * x_reg_array, FloatDef* f_reg_array) dtrace_proc_str(c_p, process_buf); if (ERTS_PROC_IS_EXITING(c_p)) { - strcpy(fun_buf, "<exiting>"); + sys_strcpy(fun_buf, "<exiting>"); } else { ErtsCodeMFA *cmfa = find_function_from_pc(c_p->i); if (cmfa) { @@ -1228,7 +1229,7 @@ void erts_dirty_process_main(ErtsSchedulerData *esdp) dtrace_proc_str(c_p, process_buf); if (ERTS_PROC_IS_EXITING(c_p)) { - strcpy(fun_buf, "<exiting>"); + sys_strcpy(fun_buf, "<exiting>"); } else { ErtsCodeMFA *cmfa = find_function_from_pc(c_p->i); if (cmfa) { @@ -1453,6 +1454,7 @@ handle_error(Process* c_p, BeamInstr* pc, Eterm* reg, ErtsCodeMFA *bif_mfa) reg[3] = c_p->ftrace; if ((new_pc = next_catch(c_p, reg))) { c_p->cp = 0; /* To avoid keeping stale references. */ + c_p->msg.saved_last = 0; /* No longer safe to use this position */ return new_pc; } if (c_p->catches > 0) erts_exit(ERTS_ERROR_EXIT, "Catch not found"); @@ -2755,7 +2757,7 @@ do { \ static Eterm -new_map(Process* p, Eterm* reg, Uint live, Uint n, BeamInstr* ptr) +erts_gc_new_map(Process* p, Eterm* reg, Uint live, Uint n, BeamInstr* ptr) { Uint i; Uint need = n + 1 /* hdr */ + 1 /*size*/ + 1 /* ptr */ + 1 /* arity */; @@ -2812,7 +2814,8 @@ new_map(Process* p, Eterm* reg, Uint live, Uint n, BeamInstr* ptr) } static Eterm -new_small_map_lit(Process* p, Eterm* reg, Eterm keys_literal, Uint live, BeamInstr* ptr) +erts_gc_new_small_map_lit(Process* p, Eterm* reg, Eterm keys_literal, + Uint live, BeamInstr* ptr) { Eterm* keys = tuple_val(keys_literal); Uint n = arityval(*keys); @@ -2846,7 +2849,8 @@ new_small_map_lit(Process* p, Eterm* reg, Eterm keys_literal, Uint live, BeamIns } static Eterm -update_map_assoc(Process* p, Eterm* reg, Uint live, Uint n, BeamInstr* new_p) +erts_gc_update_map_assoc(Process* p, Eterm* reg, Uint live, + Uint n, BeamInstr* new_p) { Uint num_old; Uint num_updates; @@ -2892,7 +2896,7 @@ update_map_assoc(Process* p, Eterm* reg, Uint live, Uint n, BeamInstr* new_p) */ if (num_old == 0) { - return new_map(p, reg, live, n, new_p); + return erts_gc_new_map(p, reg, live, n, new_p); } /* @@ -3048,7 +3052,7 @@ update_map_assoc(Process* p, Eterm* reg, Uint live, Uint n, BeamInstr* new_p) */ static Eterm -update_map_exact(Process* p, Eterm* reg, Uint live, Uint n, Eterm* new_p) +erts_gc_update_map_exact(Process* p, Eterm* reg, Uint live, Uint n, Eterm* new_p) { Uint i; Uint num_old; diff --git a/erts/emulator/beam/beam_load.c b/erts/emulator/beam/beam_load.c index 7331c331a6..50498cb6cf 100644 --- a/erts/emulator/beam/beam_load.c +++ b/erts/emulator/beam/beam_load.c @@ -1,7 +1,7 @@ /* * %CopyrightBegin% * - * Copyright Ericsson AB 1996-2017. All Rights Reserved. + * Copyright Ericsson AB 1996-2018. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -40,6 +40,7 @@ #include "erl_zlib.h" #include "erl_map.h" #include "erl_process_dict.h" +#include "erl_unicode.h" #ifdef HIPE #include "hipe_bif0.h" @@ -458,7 +459,7 @@ typedef struct LoaderState { #ifdef DEBUG # define GARBAGE 0xCC -# define DEBUG_INIT_GENOP(Dst) memset(Dst, GARBAGE, sizeof(GenOp)) +# define DEBUG_INIT_GENOP(Dst) sys_memset(Dst, GARBAGE, sizeof(GenOp)) #else # define DEBUG_INIT_GENOP(Dst) #endif @@ -1421,7 +1422,7 @@ load_atom_table(LoaderState* stp, ErtsAtomEncoding enc) Atom* ap; ap = atom_tab(atom_val(stp->atom[1])); - memcpy(sbuf, ap->name, ap->len); + sys_memcpy(sbuf, ap->name, ap->len); sbuf[ap->len] = '\0'; LoadError1(stp, "module name in object code is %s", sbuf); } @@ -1806,7 +1807,7 @@ read_line_table(LoaderState* stp) GetInt(stp, 2, n); GetString(stp, fname, n); - stp->fname[i] = erts_atom_put(fname, n, ERTS_ATOM_ENC_LATIN1, 1); + stp->fname[i] = erts_atom_put(fname, n, ERTS_ATOM_ENC_UTF8, 1); } } @@ -2088,7 +2089,7 @@ load_code(LoaderState* stp) erts_alloc(ERTS_ALC_T_LOADER_TMP, (arity+last_op->a[arg].val) *sizeof(GenOpArg)); - memcpy(last_op->a, last_op->def_args, + sys_memcpy(last_op->a, last_op->def_args, arity*sizeof(GenOpArg)); arity += last_op->a[arg].val; break; @@ -2384,8 +2385,18 @@ load_code(LoaderState* stp) code[ci++] = NIL; break; case TAG_q: - new_literal_patch(stp, ci); - code[ci++] = tmp_op->a[arg].val; + { + BeamInstr val = tmp_op->a[arg].val; + Eterm term = stp->literals[val].term; + new_literal_patch(stp, ci); + code[ci++] = val; + switch (loader_tag(term)) { + case LOADER_X_REG: + case LOADER_Y_REG: + LoadError1(stp, "the term '%T' would be confused " + "with a register", term); + } + } break; default: LoadError1(stp, "bad tag %d for general source", @@ -4904,7 +4915,7 @@ freeze_code(LoaderState* stp) line_items[i] = codev + stp->ci - 1; line_tab->fname_ptr = (Eterm*) &line_items[i + 1]; - memcpy(line_tab->fname_ptr, stp->fname, stp->num_fnames*sizeof(Eterm)); + sys_memcpy(line_tab->fname_ptr, stp->fname, stp->num_fnames*sizeof(Eterm)); line_tab->loc_size = stp->loc_size; if (stp->loc_size == 2) { @@ -5487,8 +5498,8 @@ transform_engine(LoaderState* st) case TOP_store_rest_args: { GENOP_ARITY(instr, instr->arity+num_rest_args); - memcpy(instr->a, instr->def_args, ap*sizeof(GenOpArg)); - memcpy(instr->a+ap, rest_args, num_rest_args*sizeof(GenOpArg)); + sys_memcpy(instr->a, instr->def_args, ap*sizeof(GenOpArg)); + sys_memcpy(instr->a+ap, rest_args, num_rest_args*sizeof(GenOpArg)); ap += num_rest_args; } break; @@ -6018,13 +6029,36 @@ erts_release_literal_area(ErtsLiteralArea* literal_area) return; oh = literal_area->off_heap; - + while (oh) { - Binary* bptr; - ASSERT(thing_subtag(oh->thing_word) == REFC_BINARY_SUBTAG); - bptr = ((ProcBin*)oh)->val; - erts_bin_release(bptr); - oh = oh->next; + switch (thing_subtag(oh->thing_word)) { + case REFC_BINARY_SUBTAG: + { + Binary* bptr = ((ProcBin*)oh)->val; + erts_bin_release(bptr); + break; + } + case FUN_SUBTAG: + { + ErlFunEntry* fe = ((ErlFunThing*)oh)->fe; + if (erts_refc_dectest(&fe->refc, 0) == 0) { + erts_erase_fun_entry(fe); + } + break; + } + case REF_SUBTAG: + { + ErtsMagicBinary *bptr; + ASSERT(is_magic_ref_thing(oh)); + bptr = ((ErtsMRefThing *) oh)->mb; + erts_bin_release((Binary *) bptr); + break; + } + default: + ASSERT(is_external_header(oh->thing_word)); + erts_deref_node_entry(((ExternalThing*)oh)->node); + } + oh = oh->next; } erts_free(ERTS_ALC_T_LITERAL, literal_area); } @@ -6221,8 +6255,7 @@ erts_build_mfa_item(FunctionInfo* fi, Eterm* hp, Eterm args, Eterm* mfa_p) file_term = buf_to_intlist(&hp, ".erl", 4, NIL); file_term = buf_to_intlist(&hp, (char*)ap->name, ap->len, file_term); } else { - Atom* ap = atom_tab(atom_val((fi->fname_ptr)[file-1])); - file_term = buf_to_intlist(&hp, (char*)ap->name, ap->len, NIL); + file_term = erts_atom_to_string(&hp, (fi->fname_ptr)[file-1]); } tuple = TUPLE2(hp, am_line, make_small(line)); @@ -6414,7 +6447,7 @@ stub_copy_info(LoaderState* stp, Sint decoded_size; Uint size = stp->chunks[chunk].size; if (size != 0) { - memcpy(info, stp->chunks[chunk].start, size); + sys_memcpy(info, stp->chunks[chunk].start, size); *ptr_word = info; decoded_size = erts_decode_ext_size(info, size); if (decoded_size < 0) { @@ -6951,6 +6984,8 @@ int erts_commit_hipe_patch_load(Eterm hipe_magic_bin) hipe_stp->new_hipe_refs = NULL; hipe_stp->new_hipe_sdesc = NULL; + hipe_redirect_to_module(modp); + return 1; } @@ -6982,8 +7017,8 @@ void dbg_set_traced_mfa(const char* m, const char* f, Uint a) { unsigned i = dbg_trace_ix++; ASSERT(i < MFA_MAX); - dbg_trace_m[i] = am_atom_put(m, strlen(m)); - dbg_trace_f[i] = am_atom_put(f, strlen(f)); + dbg_trace_m[i] = am_atom_put(m, sys_strlen(m)); + dbg_trace_f[i] = am_atom_put(f, sys_strlen(f)); dbg_trace_a[i] = a; } diff --git a/erts/emulator/beam/beam_ranges.c b/erts/emulator/beam/beam_ranges.c index 01bda7f3c1..f0c9496341 100644 --- a/erts/emulator/beam/beam_ranges.c +++ b/erts/emulator/beam/beam_ranges.c @@ -1,7 +1,7 @@ /* * %CopyrightBegin% * - * Copyright Ericsson AB 2012-2016. All Rights Reserved. + * Copyright Ericsson AB 2012-2018. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -26,6 +26,7 @@ #include "erl_vm.h" #include "global.h" #include "beam_load.h" +#include "erl_unicode.h" typedef struct { BeamInstr* start; /* Pointer to start of module. */ @@ -341,8 +342,7 @@ lookup_loc(FunctionInfo* fi, const BeamInstr* pc, Atom* mod_atom = atom_tab(atom_val(fi->mfa->module)); fi->needed += 2*(mod_atom->len+4); } else { - Atom* ap = atom_tab(atom_val((fi->fname_ptr)[file-1])); - fi->needed += 2*ap->len; + fi->needed += 2*erts_atom_to_string_length((fi->fname_ptr)[file-1]); } return; } else { diff --git a/erts/emulator/beam/bif.c b/erts/emulator/beam/bif.c index 4b11884f38..652b95105f 100644 --- a/erts/emulator/beam/bif.c +++ b/erts/emulator/beam/bif.c @@ -59,6 +59,7 @@ Export *erts_convert_time_unit_trap = NULL; static Export *await_msacc_mod_trap = NULL; static erts_atomic32_t msacc; +static Export *system_flag_scheduler_wall_time_trap; static Export *await_sched_wall_time_mod_trap; static erts_atomic32_t sched_wall_time; @@ -227,17 +228,38 @@ BIF_RETTYPE link_1(BIF_ALIST_1) goto res_no_proc; } - code = erts_dsig_prepare(&dsd, dep, BIF_P, ERTS_DSP_RLOCK, 0); + code = erts_dsig_prepare(&dsd, dep, BIF_P, + (ERTS_PROC_LOCK_MAIN | ERTS_PROC_LOCK_LINK), + ERTS_DSP_RLOCK, 0, 1); switch (code) { case ERTS_DSIG_PREP_NOT_ALIVE: - /* Let the dlink trap handle it */ - case ERTS_DSIG_PREP_NOT_CONNECTED: - erts_proc_unlock(BIF_P, ERTS_PROC_LOCK_LINK); - BIF_TRAP1(dlink_trap, BIF_P, BIF_ARG_1); - + case ERTS_DSIG_PREP_NOT_CONNECTED: { + ErtsProcLocks locks = ERTS_PROC_LOCK_MAIN | ERTS_PROC_LOCK_LINK; + erts_aint32_t state; + erts_proc_lock(BIF_P, (ERTS_PROC_LOCKS_ALL & ~locks)); + locks = ERTS_PROC_LOCKS_ALL; + erts_send_exit_signal(BIF_P, BIF_ARG_1, BIF_P, &locks, + am_noconnection, NIL, NULL, 0); + erts_proc_unlock(BIF_P, locks & ERTS_PROC_LOCKS_ALL_MINOR); + + /* + * Copy-paste from old dist_exit_3, not sure if we really + * need erts_handle_pending_exit when exit_2 does not. + */ + state = erts_atomic32_read_acqb(&BIF_P->state); + if (state & (ERTS_PSFLG_EXITING|ERTS_PSFLG_PENDING_EXIT)) { + if (state & ERTS_PSFLG_PENDING_EXIT) + erts_handle_pending_exit(BIF_P, ERTS_PROC_LOCK_MAIN); + ERTS_BIF_EXITED(BIF_P); + } + BIF_RET(am_true); + } + case ERTS_DSIG_PREP_PENDING: case ERTS_DSIG_PREP_CONNECTED: - /* We are connected. Setup link and send link signal */ - + /* + * We have (pending) connection. + * Setup link and enqueue link signal. + */ erts_de_links_lock(dep); erts_add_link(&ERTS_P_LINKS(BIF_P), LINK_PID, BIF_ARG_1); @@ -256,8 +278,7 @@ BIF_RETTYPE link_1(BIF_ALIST_1) ERTS_BIF_YIELD_RETURN(BIF_P, am_true); BIF_RET(am_true); default: - ASSERT(! "Invalid dsig prepare result"); - BIF_ERROR(BIF_P, EXC_INTERNAL_ERROR); + ERTS_ASSERT(! "Invalid dsig prepare result"); } } } @@ -292,7 +313,8 @@ remote_demonitor(Process *c_p, DistEntry *dep, Eterm ref, Eterm to) ERTS_LC_ASSERT((ERTS_PROC_LOCK_MAIN|ERTS_PROC_LOCK_LINK) == erts_proc_lc_my_proc_locks(c_p)); - code = erts_dsig_prepare(&dsd, dep, c_p, ERTS_DSP_RLOCK, 0); + code = erts_dsig_prepare(&dsd, dep, c_p, ERTS_PROC_LOCK_MAIN, + ERTS_DSP_RLOCK, 0, 0); switch (code) { case ERTS_DSIG_PREP_NOT_ALIVE: case ERTS_DSIG_PREP_NOT_CONNECTED: @@ -313,6 +335,7 @@ remote_demonitor(Process *c_p, DistEntry *dep, Eterm ref, Eterm to) res = am_true; break; + case ERTS_DSIG_PREP_PENDING: case ERTS_DSIG_PREP_CONNECTED: erts_de_links_lock(dep); @@ -347,8 +370,7 @@ remote_demonitor(Process *c_p, DistEntry *dep, Eterm ref, Eterm to) } break; default: - ASSERT(! "Invalid dsig prepare result"); - return am_internal_error; + ERTS_ASSERT(! "Invalid dsig prepare result"); } @@ -767,23 +789,20 @@ remote_monitor(Process *p, Eterm bifarg1, Eterm bifarg2, BIF_RETTYPE ret; int code; + ASSERT(dep); erts_proc_lock(p, ERTS_PROC_LOCK_LINK); - code = erts_dsig_prepare(&dsd, dep, p, ERTS_DSP_RLOCK, 0); + code = erts_dsig_prepare(&dsd, dep, + p, (ERTS_PROC_LOCK_MAIN | ERTS_PROC_LOCK_LINK), + ERTS_DSP_RLOCK, 0, 1); switch (code) { case ERTS_DSIG_PREP_NOT_ALIVE: - /* Let the dmonitor_p trap handle it */ case ERTS_DSIG_PREP_NOT_CONNECTED: erts_proc_unlock(p, ERTS_PROC_LOCK_LINK); ERTS_BIF_PREP_TRAP2(ret, dmonitor_p_trap, p, bifarg1, bifarg2); break; + case ERTS_DSIG_PREP_PENDING: case ERTS_DSIG_PREP_CONNECTED: - if (!(dep->flags & DFLAG_DIST_MONITOR) - || (byname && !(dep->flags & DFLAG_DIST_MONITOR_NAME))) { - erts_de_runlock(dep); - erts_proc_unlock(p, ERTS_PROC_LOCK_LINK); - ERTS_BIF_PREP_ERROR(ret, p, BADARG); - } - else { + { Eterm p_trgt, p_name, d_name, mon_ref; mon_ref = erts_make_ref(p); @@ -818,9 +837,7 @@ remote_monitor(Process *p, Eterm bifarg1, Eterm bifarg2, } break; default: - ASSERT(! "Invalid dsig prepare result"); - ERTS_BIF_PREP_ERROR(ret, p, EXC_INTERNAL_ERROR); - break; + ERTS_ASSERT(! "Invalid dsig prepare result"); } BIF_RET(ret); @@ -888,17 +905,17 @@ local_port: if (!erts_is_alive && remote_node != am_Noname) { goto badarg; /* Remote monitor from (this) undistributed node */ } - dep = erts_sysname_to_connected_dist_entry(remote_node); + dep = erts_find_or_insert_dist_entry(remote_node); if (dep == erts_this_dist_entry) { ret = local_name_monitor(BIF_P, BIF_ARG_1, name); } else { ret = remote_monitor(BIF_P, BIF_ARG_1, BIF_ARG_2, dep, name, 1); } + erts_deref_dist_entry(dep); } else { badarg: ERTS_BIF_PREP_ERROR(ret, BIF_P, BADARG); } - return ret; } @@ -1158,21 +1175,14 @@ BIF_RETTYPE unlink_1(BIF_ALIST_1) BIF_RET(am_true); } - code = erts_dsig_prepare(&dsd, dep, BIF_P, ERTS_DSP_NO_LOCK, 0); + code = erts_dsig_prepare(&dsd, dep, BIF_P, ERTS_PROC_LOCK_MAIN, + ERTS_DSP_NO_LOCK, 0, 0); switch (code) { case ERTS_DSIG_PREP_NOT_ALIVE: case ERTS_DSIG_PREP_NOT_CONNECTED: -#if 1 BIF_RET(am_true); -#else - /* - * This is how we used to do it, but the link is obviously not - * active, so I see no point in setting up a connection. - * /Rickard - */ - BIF_TRAP1(dunlink_trap, BIF_P, BIF_ARG_1); -#endif + case ERTS_DSIG_PREP_PENDING: case ERTS_DSIG_PREP_CONNECTED: erts_remove_dist_link(&dld, BIF_P->common.id, BIF_ARG_1, dep); code = erts_dsig_send_unlink(&dsd, BIF_P->common.id, BIF_ARG_1); @@ -1545,22 +1555,24 @@ BIF_RETTYPE exit_2(BIF_ALIST_2) DistEntry *dep; dep = external_pid_dist_entry(BIF_ARG_1); + ERTS_ASSERT(dep); if(dep == erts_this_dist_entry) BIF_RET(am_true); - code = erts_dsig_prepare(&dsd, dep, BIF_P, ERTS_DSP_NO_LOCK, 0); + code = erts_dsig_prepare(&dsd, dep, BIF_P, ERTS_PROC_LOCK_MAIN, + ERTS_DSP_NO_LOCK, 0, 1); switch (code) { case ERTS_DSIG_PREP_NOT_ALIVE: case ERTS_DSIG_PREP_NOT_CONNECTED: - BIF_TRAP2(dexit_trap, BIF_P, BIF_ARG_1, BIF_ARG_2); + BIF_RET(am_true); + case ERTS_DSIG_PREP_PENDING: case ERTS_DSIG_PREP_CONNECTED: code = erts_dsig_send_exit2(&dsd, BIF_P->common.id, BIF_ARG_1, BIF_ARG_2); if (code == ERTS_DSIG_SEND_YIELD) ERTS_BIF_YIELD_RETURN(BIF_P, am_true); BIF_RET(am_true); default: - ASSERT(! "Invalid dsig prepare result"); - BIF_ERROR(BIF_P, EXC_INTERNAL_ERROR); + ERTS_ASSERT(! "Invalid dsig prepare result"); } } else if (is_not_internal_pid(BIF_ARG_1)) { @@ -1732,7 +1744,6 @@ BIF_RETTYPE process_flag_2(BIF_ALIST_2) else if (BIF_ARG_1 == am_scheduler) { ErtsRunQueue *old, *new, *curr; Sint sched; - erts_aint32_t state; if (!is_small(BIF_ARG_2)) goto error; @@ -1741,23 +1752,23 @@ BIF_RETTYPE process_flag_2(BIF_ALIST_2) goto error; if (sched == 0) { + old = erts_bind_runq_proc(BIF_P, 0); new = NULL; - state = erts_atomic32_read_band_mb(&BIF_P->state, - ~ERTS_PSFLG_BOUND); } else { + int bound = !0; new = erts_schedid2runq(sched); - erts_atomic_set_nob(&BIF_P->run_queue, (erts_aint_t) new); - state = erts_atomic32_read_bor_mb(&BIF_P->state, - ERTS_PSFLG_BOUND); + old = erts_set_runq_proc(BIF_P, new, &bound); + if (!bound) + old = NULL; } + old_value = old ? make_small(old->ix+1) : make_small(0); + curr = erts_proc_sched_data(BIF_P)->run_queue; - old = (ERTS_PSFLG_BOUND & state) ? curr : NULL; ASSERT(!old || old == curr); - old_value = old ? make_small(old->ix+1) : make_small(0); if (new && new != curr) ERTS_BIF_YIELD_RETURN_X(BIF_P, old_value, am_scheduler); else @@ -1964,7 +1975,7 @@ ebif_bang_2(BIF_ALIST_2) * Send a message to Process, Port or Registered Process. * Returns non-negative reduction bump or negative result code. */ -#define SEND_TRAP (-1) +#define SEND_NOCONNECT (-1) #define SEND_YIELD (-2) #define SEND_YIELD_RETURN (-3) #define SEND_BADARG (-4) @@ -1980,20 +1991,22 @@ static Sint remote_send(Process *p, DistEntry *dep, { Sint res; int code; - ASSERT(is_atom(to) || is_external_pid(to)); ctx->dep = dep; - code = erts_dsig_prepare(&ctx->dsd, dep, p, ERTS_DSP_NO_LOCK, !ctx->suspend); + code = erts_dsig_prepare(&ctx->dsd, dep, p, ERTS_PROC_LOCK_MAIN, + ERTS_DSP_NO_LOCK, + !ctx->suspend, ctx->connect); switch (code) { case ERTS_DSIG_PREP_NOT_ALIVE: case ERTS_DSIG_PREP_NOT_CONNECTED: - res = SEND_TRAP; + res = SEND_NOCONNECT; break; case ERTS_DSIG_PREP_WOULD_SUSPEND: ASSERT(!ctx->suspend); res = SEND_YIELD; break; + case ERTS_DSIG_PREP_PENDING: case ERTS_DSIG_PREP_CONNECTED: { if (is_atom(to)) @@ -2170,6 +2183,7 @@ do_send(Process *p, Eterm to, Eterm msg, Eterm *refp, ErtsSendContext *ctx) } return ret_val; } else if (is_tuple(to)) { /* Remote send */ + int deref_dep = 0; int ret; tp = tuple_val(to); if (*tp != make_arityval(2)) @@ -2177,11 +2191,10 @@ do_send(Process *p, Eterm to, Eterm msg, Eterm *refp, ErtsSendContext *ctx) if (is_not_atom(tp[1]) || is_not_atom(tp[2])) return SEND_BADARG; - /* sysname_to_connected_dist_entry will return NULL if there - is no dist_entry or the dist_entry has no port, + /* erts_find_dist_entry will return NULL if there is no dist_entry but remote_send() will handle that. */ - dep = erts_sysname_to_connected_dist_entry(tp[2]); + dep = erts_find_dist_entry(tp[2]); if (dep == erts_this_dist_entry) { Eterm id; @@ -2205,13 +2218,20 @@ do_send(Process *p, Eterm to, Eterm msg, Eterm *refp, ErtsSendContext *ctx) } return 0; } + if (dep == NULL) { + dep = erts_find_or_insert_dist_entry(tp[2]); + ASSERT(dep != erts_this_dist_entry); + deref_dep = 1; + } + ctx->dsd.node = tp[2]; ret = remote_send(p, dep, tp[1], to, msg, ctx); if (ret == SEND_YIELD_CONTINUE) { - if (dep) - erts_ref_dist_entry(dep); - ctx->dep_to_deref = dep; + erts_ref_dist_entry(ctx->dep); + ctx->deref_dep = 1; } + if (deref_dep) + erts_deref_dist_entry(dep); return ret; } else { if (IS_TRACED_FL(p, F_TRACE_SEND)) @@ -2223,20 +2243,15 @@ do_send(Process *p, Eterm to, Eterm msg, Eterm *refp, ErtsSendContext *ctx) send_message: { ErtsProcLocks rp_locks = 0; - Sint res; if (p == rp) rp_locks |= ERTS_PROC_LOCK_MAIN; /* send to local process */ - res = erts_send_message(p, rp, &rp_locks, msg, 0); - if (erts_use_sender_punish) - res *= 4; - else - res = 0; + erts_send_message(p, rp, &rp_locks, msg, 0); erts_proc_unlock(rp, p == rp ? (rp_locks & ~ERTS_PROC_LOCK_MAIN) : rp_locks); - return res; + return 0; } } @@ -2251,7 +2266,6 @@ BIF_RETTYPE send_3(BIF_ALIST_3) Eterm msg = BIF_ARG_2; Eterm opts = BIF_ARG_3; - int connect = !0; Eterm l = opts; Sint result; @@ -2262,14 +2276,15 @@ BIF_RETTYPE send_3(BIF_ALIST_3) UseTmpHeap(sizeof(ErtsSendContext)/sizeof(Eterm), BIF_P); ctx->suspend = !0; - ctx->dep_to_deref = NULL; + ctx->connect = !0; + ctx->deref_dep = 0; ctx->return_term = am_ok; ctx->dss.reds = (Sint) (ERTS_BIF_REDS_LEFT(p) * TERM_TO_BINARY_LOOP_FACTOR); ctx->dss.phase = ERTS_DSIG_SEND_PHASE_INIT; while (is_list(l)) { if (CAR(list_val(l)) == am_noconnect) { - connect = 0; + ctx->connect = 0; } else if (CAR(list_val(l)) == am_nosuspend) { ctx->suspend = 0; } else { @@ -2291,8 +2306,8 @@ BIF_RETTYPE send_3(BIF_ALIST_3) result = do_send(p, to, msg, &ref, ctx); ERTS_MSACC_POP_STATE_M_X(); - if (result > 0) { - ERTS_VBUMP_REDS(p, result); + if (result >= 0) { + ERTS_VBUMP_REDS(p, 4); if (ERTS_IS_PROC_OUT_OF_REDS(p)) goto yield_return; ERTS_BIF_PREP_RET(retval, am_ok); @@ -2300,15 +2315,9 @@ BIF_RETTYPE send_3(BIF_ALIST_3) } switch (result) { - case 0: - /* May need to yield even though we do not bump reds here... */ - if (ERTS_IS_PROC_OUT_OF_REDS(p)) - goto yield_return; - ERTS_BIF_PREP_RET(retval, am_ok); - break; - case SEND_TRAP: - if (connect) { - ERTS_BIF_PREP_TRAP3(retval, dsend3_trap, p, to, msg, opts); + case SEND_NOCONNECT: + if (ctx->connect) { + ERTS_BIF_PREP_RET(retval, am_ok); } else { ERTS_BIF_PREP_RET(retval, am_noconnect); } @@ -2412,7 +2421,8 @@ Eterm erl_send(Process *p, Eterm to, Eterm msg) ref = NIL; #endif ctx->suspend = !0; - ctx->dep_to_deref = NULL; + ctx->connect = !0; + ctx->deref_dep = 0; ctx->return_term = msg; ctx->dss.reds = (Sint) (ERTS_BIF_REDS_LEFT(p) * TERM_TO_BINARY_LOOP_FACTOR); ctx->dss.phase = ERTS_DSIG_SEND_PHASE_INIT; @@ -2421,8 +2431,8 @@ Eterm erl_send(Process *p, Eterm to, Eterm msg) ERTS_MSACC_POP_STATE_M_X(); - if (result > 0) { - ERTS_VBUMP_REDS(p, result); + if (result >= 0) { + ERTS_VBUMP_REDS(p, 4); if (ERTS_IS_PROC_OUT_OF_REDS(p)) goto yield_return; ERTS_BIF_PREP_RET(retval, msg); @@ -2430,15 +2440,9 @@ Eterm erl_send(Process *p, Eterm to, Eterm msg) } switch (result) { - case 0: - /* May need to yield even though we do not bump reds here... */ - if (ERTS_IS_PROC_OUT_OF_REDS(p)) - goto yield_return; + case SEND_NOCONNECT: ERTS_BIF_PREP_RET(retval, msg); break; - case SEND_TRAP: - ERTS_BIF_PREP_TRAP2(retval, dsend2_trap, p, to, msg); - break; case SEND_YIELD: ERTS_BIF_PREP_YIELD2(retval, bif_export[BIF_send_2], p, to, msg); break; @@ -3097,7 +3101,7 @@ BIF_RETTYPE list_to_integer_2(BIF_ALIST_2) static int do_float_to_charbuf(Process *p, Eterm efloat, Eterm list, char *fbuf, int sizeof_fbuf) { - const static int arity_two = make_arityval(2); + Eterm arity_two = make_arityval(2); int decimals = SYS_DEFAULT_FLOAT_DECIMALS; int compact = 0; enum fmt_type_ { @@ -3461,7 +3465,7 @@ BIF_RETTYPE binary_to_float_1(BIF_ALIST_1) if (bit_offs) erts_copy_bits(bytes, bit_offs, 1, buf, 0, 1, size*8); else - memcpy(buf, bytes, size); + sys_memcpy(buf, bytes, size); buf[size] = '\0'; @@ -4195,10 +4199,10 @@ BIF_RETTYPE list_to_port_1(BIF_ALIST_1) buf[i] = '\0'; /* null terminal */ cp = &buf[0]; - if (strncmp("#Port<", cp, 6) != 0) + if (sys_strncmp("#Port<", cp, 6) != 0) goto bad; - cp += 6; /* strlen("#Port<") */ + cp += 6; /* sys_strlen("#Port<") */ if (sscanf(cp, "%u.%u>", (unsigned int*)&n, (unsigned int*)&p) < 2) goto bad; @@ -4384,23 +4388,24 @@ BIF_RETTYPE group_leader_2(BIF_ALIST_2) int code; ErtsDSigData dsd; dep = external_pid_dist_entry(BIF_ARG_2); + ERTS_ASSERT(dep); if(dep == erts_this_dist_entry) BIF_ERROR(BIF_P, BADARG); - code = erts_dsig_prepare(&dsd, dep, BIF_P, ERTS_DSP_NO_LOCK, 0); + code = erts_dsig_prepare(&dsd, dep, BIF_P, ERTS_PROC_LOCK_MAIN, + ERTS_DSP_NO_LOCK, 0, 1); switch (code) { case ERTS_DSIG_PREP_NOT_ALIVE: - BIF_RET(am_true); case ERTS_DSIG_PREP_NOT_CONNECTED: - BIF_TRAP2(dgroup_leader_trap, BIF_P, BIF_ARG_1, BIF_ARG_2); + BIF_RET(am_true); + case ERTS_DSIG_PREP_PENDING: case ERTS_DSIG_PREP_CONNECTED: code = erts_dsig_send_group_leader(&dsd, BIF_ARG_1, BIF_ARG_2); if (code == ERTS_DSIG_SEND_YIELD) ERTS_BIF_YIELD_RETURN(BIF_P, am_true); BIF_RET(am_true); default: - ASSERT(! "Invalid dsig prepare result"); - BIF_ERROR(BIF_P, EXC_INTERNAL_ERROR); + ERTS_ASSERT(! "Invalid dsig prepare result"); } } else if (is_internal_pid(BIF_ARG_2)) { @@ -4472,7 +4477,7 @@ BIF_RETTYPE group_leader_2(BIF_ALIST_2) BIF_ERROR(BIF_P, BADARG); } } - + BIF_RETTYPE system_flag_2(BIF_ALIST_2) { Sint n; @@ -4690,17 +4695,9 @@ BIF_RETTYPE system_flag_2(BIF_ALIST_2) BIF_RET(am_true); } else if (BIF_ARG_1 == am_scheduler_wall_time) { - if (BIF_ARG_2 == am_true || BIF_ARG_2 == am_false) { - erts_aint32_t new = BIF_ARG_2 == am_true ? 1 : 0; - erts_aint32_t old = erts_atomic32_xchg_nob(&sched_wall_time, - new); - Eterm ref = erts_sched_wall_time_request(BIF_P, 1, new, 0, 0); - ASSERT(is_value(ref)); - BIF_TRAP2(await_sched_wall_time_mod_trap, - BIF_P, - ref, - old ? am_true : am_false); - } + if (BIF_ARG_2 == am_true || BIF_ARG_2 == am_false) + BIF_TRAP1(system_flag_scheduler_wall_time_trap, + BIF_P, BIF_ARG_2); } else if (BIF_ARG_1 == am_dirty_cpu_schedulers_online) { Sint old_no; if (!is_small(BIF_ARG_2)) @@ -4807,11 +4804,24 @@ BIF_RETTYPE system_flag_2(BIF_ALIST_2) "scheduled for removal in Erlang/OTP 18. For more\n" "information see the erlang:system_flag/2 documentation.\n"); return erts_bind_schedulers(BIF_P, BIF_ARG_2); + } else if (ERTS_IS_ATOM_STR("erts_alloc", BIF_ARG_1)) { + return erts_alloc_set_dyn_param(BIF_P, BIF_ARG_2); } error: BIF_ERROR(BIF_P, BADARG); } +BIF_RETTYPE erts_internal_scheduler_wall_time_1(BIF_ALIST_1) +{ + erts_aint32_t new = BIF_ARG_1 == am_true ? 1 : 0; + erts_aint32_t old = erts_atomic32_xchg_nob(&sched_wall_time, + new); + Eterm ref = erts_sched_wall_time_request(BIF_P, 1, new, 0, 0); + ASSERT(is_value(ref)); + BIF_TRAP2(await_sched_wall_time_mod_trap, + BIF_P, ref, old ? am_true : am_false); +} + /**********************************************************************/ BIF_RETTYPE phash_2(BIF_ALIST_2) @@ -5070,8 +5080,10 @@ void erts_init_bif(void) await_proc_exit_trap = erts_export_put(am_erlang,am_await_proc_exit,3); await_port_send_result_trap = erts_export_put(am_erts_internal, am_await_port_send_result, 3); + system_flag_scheduler_wall_time_trap + = erts_export_put(am_erts_internal, am_system_flag_scheduler_wall_time, 1); await_sched_wall_time_mod_trap - = erts_export_put(am_erlang, am_await_sched_wall_time_modifications, 2); + = erts_export_put(am_erts_internal, am_await_sched_wall_time_modifications, 2); await_msacc_mod_trap = erts_export_put(am_erts_internal, am_await_microstate_accounting_modifications, 3); diff --git a/erts/emulator/beam/bif.tab b/erts/emulator/beam/bif.tab index f7b4451890..be653ee2a0 100644 --- a/erts/emulator/beam/bif.tab +++ b/erts/emulator/beam/bif.tab @@ -153,14 +153,12 @@ bif erlang:whereis/1 bif erlang:spawn_opt/1 bif erlang:setnode/2 bif erlang:setnode/3 -bif erlang:dist_exit/3 bif erlang:dist_get_stat/1 bif erlang:dist_ctrl_input_handler/2 bif erlang:dist_ctrl_put_data/2 bif erlang:dist_ctrl_get_data/1 bif erlang:dist_ctrl_get_data_notification/1 - # Static native functions in erts_internal bif erts_internal:port_info/1 bif erts_internal:port_info/2 @@ -187,6 +185,8 @@ bif erts_internal:system_check/1 bif erts_internal:release_literal_area_switch/0 +bif erts_internal:scheduler_wall_time/1 + # inet_db support bif erlang:port_set_data/2 bif erlang:port_get_data/1 @@ -376,9 +376,10 @@ bif ets:match_spec_run_r/3 # Bifs in os module. # -bif os:putenv/2 -bif os:getenv/0 -bif os:getenv/1 +bif os:get_env_var/1 +bif os:set_env_var/2 +bif os:unset_env_var/1 +bif os:list_env_vars/0 bif os:getpid/0 bif os:timestamp/0 bif os:system_time/0 @@ -550,9 +551,6 @@ bif binary:last/1 bif binary:at/2 bif binary:part/2 binary_binary_part_2 bif binary:part/3 binary_binary_part_3 -bif binary:bin_to_list/1 -bif binary:bin_to_list/2 -bif binary:bin_to_list/3 bif binary:list_to_bin/1 bif binary:copy/1 bif binary:copy/2 @@ -619,7 +617,6 @@ bif erlang:float_to_binary/2 bif erlang:binary_to_float/1 bif io:printable_range/0 -bif os:unsetenv/1 # # New in 17.0 @@ -629,7 +626,6 @@ bif re:inspect/2 ubif erlang:is_map/1 gcbif erlang:map_size/1 -bif maps:to_list/1 bif maps:find/2 bif maps:get/2 bif maps:from_list/1 @@ -684,10 +680,18 @@ bif math:floor/1 bif math:ceil/1 bif math:fmod/2 bif os:set_signal/2 -bif erts_internal:maps_to_list/2 # # New in 20.1 # - bif erlang:iolist_to_iovec/1 + +# +# New in 21.0 +# + +bif erts_internal:get_dflags/0 +bif erts_internal:new_connection/1 +bif erts_internal:abort_connection/2 +bif erts_internal:map_next/3 +bif ets:whereis/1 diff --git a/erts/emulator/beam/big.c b/erts/emulator/beam/big.c index 5eaf262cd8..c5cb268f09 100644 --- a/erts/emulator/beam/big.c +++ b/erts/emulator/beam/big.c @@ -2549,12 +2549,17 @@ int term_equals_2pow32(Eterm x) } } +static ERTS_INLINE int c2int_is_valid_char(byte ch, int base) { + if (base <= 10) + return (ch >= '0' && ch < ('0' + base)); + else + return (ch >= '0' && ch <= '9') + || (ch >= 'A' && ch < ('A' + base - 10)) + || (ch >= 'a' && ch < ('a' + base - 10)); +} + static ERTS_INLINE int c2int_is_invalid_char(byte ch, int base) { - return (ch < '0' - || (ch > ('0' + base - 1) - && !(base > 10 - && ((ch >= 'a' && ch < ('a' + base - 10)) - || (ch >= 'A' && ch < ('A' + base - 10)))))); + return !c2int_is_valid_char(ch, base); } static ERTS_INLINE byte c2int_digit_from_base(byte ch) { diff --git a/erts/emulator/beam/binary.c b/erts/emulator/beam/binary.c index ca3e48e205..95d324d2c1 100644 --- a/erts/emulator/beam/binary.c +++ b/erts/emulator/beam/binary.c @@ -60,14 +60,36 @@ erts_init_binary(void) } +static ERTS_INLINE +Eterm build_proc_bin(ErlOffHeap* ohp, Eterm* hp, Binary* bptr) +{ + ProcBin* pb = (ProcBin *) hp; + pb->thing_word = HEADER_PROC_BIN; + pb->size = bptr->orig_size; + pb->next = ohp->first; + ohp->first = (struct erl_off_heap_header*)pb; + pb->val = bptr; + pb->bytes = (byte*) bptr->orig_bytes; + pb->flags = 0; + OH_OVERHEAD(ohp, pb->size / sizeof(Eterm)); + + return make_binary(pb); +} + +/** @brief Initiate a ProcBin for a full Binary. + * @param hp must point to PROC_BIN_SIZE available heap words. + */ +Eterm erts_build_proc_bin(ErlOffHeap* ohp, Eterm* hp, Binary* bptr) +{ + return build_proc_bin(ohp, hp, bptr); +} + /* * Create a brand new binary from scratch. */ - Eterm new_binary(Process *p, byte *buf, Uint len) { - ProcBin* pb; Binary* bptr; if (len <= ERL_ONHEAP_BIN_LIMIT) { @@ -88,23 +110,7 @@ new_binary(Process *p, byte *buf, Uint len) sys_memcpy(bptr->orig_bytes, buf, len); } - /* - * Now allocate the ProcBin on the heap. - */ - pb = (ProcBin *) HAlloc(p, PROC_BIN_SIZE); - pb->thing_word = HEADER_PROC_BIN; - pb->size = len; - pb->next = MSO(p).first; - MSO(p).first = (struct erl_off_heap_header*)pb; - pb->val = bptr; - pb->bytes = (byte*) bptr->orig_bytes; - pb->flags = 0; - - /* - * Miscellaneous updates. Return the tagged binary. - */ - OH_OVERHEAD(&(MSO(p)), pb->size / sizeof(Eterm)); - return make_binary(pb); + return build_proc_bin(&MSO(p), HAlloc(p, PROC_BIN_SIZE), bptr); } /* @@ -113,7 +119,6 @@ new_binary(Process *p, byte *buf, Uint len) Eterm erts_new_mso_binary(Process *p, byte *buf, Uint len) { - ProcBin* pb; Binary* bptr; /* @@ -124,23 +129,7 @@ Eterm erts_new_mso_binary(Process *p, byte *buf, Uint len) sys_memcpy(bptr->orig_bytes, buf, len); } - /* - * Now allocate the ProcBin on the heap. - */ - pb = (ProcBin *) HAlloc(p, PROC_BIN_SIZE); - pb->thing_word = HEADER_PROC_BIN; - pb->size = len; - pb->next = MSO(p).first; - MSO(p).first = (struct erl_off_heap_header*)pb; - pb->val = bptr; - pb->bytes = (byte*) bptr->orig_bytes; - pb->flags = 0; - - /* - * Miscellaneous updates. Return the tagged binary. - */ - OH_OVERHEAD(&(MSO(p)), pb->size / sizeof(Eterm)); - return make_binary(pb); + return build_proc_bin(&MSO(p), HAlloc(p, PROC_BIN_SIZE), bptr); } /* @@ -964,7 +953,10 @@ HIPE_WRAPPER_BIF_DISABLE_GC(iolist_to_binary, 1) BIF_RETTYPE iolist_to_binary_1(BIF_ALIST_1) { if (is_binary(BIF_ARG_1)) { - BIF_RET(BIF_ARG_1); + if (binary_bitsize(BIF_ARG_1) == 0) { + BIF_RET(BIF_ARG_1); + } + BIF_ERROR(BIF_P, BADARG); } return erts_list_to_binary_bif(BIF_P, BIF_ARG_1, bif_export[BIF_iolist_to_binary_1]); } diff --git a/erts/emulator/beam/break.c b/erts/emulator/beam/break.c index a46063142e..3967f7f7fc 100644 --- a/erts/emulator/beam/break.c +++ b/erts/emulator/beam/break.c @@ -333,6 +333,12 @@ print_process_info(fmtfn_t to, void *to_arg, Process *p) erts_print(to, to_arg, "Heap unused: %bpu\n", (p->hend - p->htop)); erts_print(to, to_arg, "OldHeap unused: %bpu\n", (OLD_HEAP(p) == NULL) ? 0 : (OLD_HEND(p) - OLD_HTOP(p)) ); + erts_print(to, to_arg, "BinVHeap: %b64u\n", p->off_heap.overhead); + erts_print(to, to_arg, "OldBinVHeap: %b64u\n", BIN_OLD_VHEAP(p)); + erts_print(to, to_arg, "BinVHeap unused: %b64u\n", + BIN_VHEAP_SZ(p) - p->off_heap.overhead); + erts_print(to, to_arg, "OldBinVHeap unused: %b64u\n", + BIN_OLD_VHEAP_SZ(p) - BIN_OLD_VHEAP(p)); erts_print(to, to_arg, "Memory: %beu\n", erts_process_memory(p, !0)); if (garbing) { @@ -355,7 +361,7 @@ print_process_info(fmtfn_t to, void *to_arg, Process *p) static void print_garb_info(fmtfn_t to, void *to_arg, Process* p) { - /* ERTS_SMP: A scheduler is probably concurrently doing gc... */ + /* A scheduler is probably concurrently doing gc... */ if (!ERTS_IS_CRASH_DUMPING) return; erts_print(to, to_arg, "New heap start: %bpX\n", p->heap); @@ -485,9 +491,7 @@ loaded(fmtfn_t to, void *to_arg) static void dump_attributes(fmtfn_t to, void *to_arg, byte* ptr, int size) { - while (size-- > 0) { - erts_print(to, to_arg, "%02X", *ptr++); - } + erts_print_base64(to, to_arg, ptr, size); erts_print(to, to_arg, "\n"); } @@ -502,7 +506,7 @@ do_break(void) /* check if we're in console mode and, if so, halt immediately if break is called */ mode = erts_read_env("ERL_CONSOLE_MODE"); - if (mode && strcmp(mode, "window") != 0) + if (mode && sys_strcmp(mode, "window") != 0) erts_exit(0, ""); erts_free_read_env(mode); #endif /* __WIN32__ */ @@ -774,16 +778,16 @@ erl_crash_dump_v(char *file, int line, char* fmt, va_list args) * - write dump until alarm or file is written completely */ - if (erts_sys_getenv__("ERL_CRASH_DUMP_SECONDS", env, &envsz) != 0) { - env_erl_crash_dump_seconds_set = 0; - secs = -1; + if (erts_sys_explicit_8bit_getenv("ERL_CRASH_DUMP_SECONDS", env, &envsz) == 1) { + env_erl_crash_dump_seconds_set = 1; + secs = atoi(env); } else { - env_erl_crash_dump_seconds_set = 1; - secs = atoi(env); + env_erl_crash_dump_seconds_set = 0; + secs = -1; } if (secs == 0) { - return; + return; } /* erts_sys_prepare_crash_dump returns 1 if heart port is found, otherwise 0 @@ -799,7 +803,7 @@ erl_crash_dump_v(char *file, int line, char* fmt, va_list args) crash_dump_limit = ERTS_SINT64_MAX; envsz = sizeof(env); - if (erts_sys_getenv__("ERL_CRASH_DUMP_BYTES", env, &envsz) == 0) { + if (erts_sys_explicit_8bit_getenv("ERL_CRASH_DUMP_BYTES", env, &envsz) == 1) { Sint64 limit; char* endptr; errno = 0; @@ -812,7 +816,7 @@ erl_crash_dump_v(char *file, int line, char* fmt, va_list args) } } - if (erts_sys_getenv__("ERL_CRASH_DUMP",&dumpnamebuf[0],&dumpnamebufsize) != 0) + if (erts_sys_explicit_8bit_getenv("ERL_CRASH_DUMP",&dumpnamebuf[0],&dumpnamebufsize) != 1) dumpname = "erl_crash.dump"; else dumpname = &dumpnamebuf[0]; @@ -845,7 +849,7 @@ erl_crash_dump_v(char *file, int line, char* fmt, va_list args) } time(&now); - erts_cbprintf(to, to_arg, "=erl_crash_dump:0.4\n%s", ctime(&now)); + erts_cbprintf(to, to_arg, "=erl_crash_dump:0.5\n%s", ctime(&now)); if (file != NULL) erts_cbprintf(to, to_arg, "The error occurred in file %s, line %d\n", file, line); @@ -890,6 +894,21 @@ erl_crash_dump_v(char *file, int line, char* fmt, va_list args) erts_print_scheduler_info(to, to_arg, ERTS_SCHEDULER_IX(i)), erts_cbprintf(to, to_arg, "** crashed **\n")); } + for (i = 0; i < erts_no_dirty_cpu_schedulers; i++) { + ERTS_SYS_TRY_CATCH( + erts_print_scheduler_info(to, to_arg, ERTS_DIRTY_CPU_SCHEDULER_IX(i)), + erts_cbprintf(to, to_arg, "** crashed **\n")); + } + erts_cbprintf(to, to_arg, "=dirty_cpu_run_queue\n"); + erts_print_run_queue_info(to, to_arg, ERTS_DIRTY_CPU_RUNQ); + + for (i = 0; i < erts_no_dirty_io_schedulers; i++) { + ERTS_SYS_TRY_CATCH( + erts_print_scheduler_info(to, to_arg, ERTS_DIRTY_IO_SCHEDULER_IX(i)), + erts_cbprintf(to, to_arg, "** crashed **\n")); + } + erts_cbprintf(to, to_arg, "=dirty_io_run_queue\n"); + erts_print_run_queue_info(to, to_arg, ERTS_DIRTY_IO_RUNQ); #endif @@ -956,3 +975,28 @@ erl_crash_dump_v(char *file, int line, char* fmt, va_list args) erts_fprintf(stderr,"done\n"); } +void +erts_print_base64(fmtfn_t to, void *to_arg, byte* src, Uint size) +{ + static const byte base64_chars[] = + "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"; + + while (size >= 3) { + erts_putc(to, to_arg, base64_chars[src[0] >> 2]); + erts_putc(to, to_arg, base64_chars[((src[0] & 0x03) << 4) | (src[1] >> 4)]); + erts_putc(to, to_arg, base64_chars[((src[1] & 0x0f) << 2) | (src[2] >> 6)]); + erts_putc(to, to_arg, base64_chars[src[2] & 0x3f]); + size -= 3; + src += 3; + } + if (size == 1) { + erts_putc(to, to_arg, base64_chars[src[0] >> 2]); + erts_putc(to, to_arg, base64_chars[(src[0] & 0x03) << 4]); + erts_print(to, to_arg, "=="); + } else if (size == 2) { + erts_putc(to, to_arg, base64_chars[src[0] >> 2]); + erts_putc(to, to_arg, base64_chars[((src[0] & 0x03) << 4) | (src[1] >> 4)]); + erts_putc(to, to_arg, base64_chars[(src[1] & 0x0f) << 2]); + erts_putc(to, to_arg, '='); + } +} diff --git a/erts/emulator/beam/bs_instrs.tab b/erts/emulator/beam/bs_instrs.tab index 9f03b19731..94e0000c8b 100644 --- a/erts/emulator/beam/bs_instrs.tab +++ b/erts/emulator/beam/bs_instrs.tab @@ -919,9 +919,23 @@ i_bs_get_integer(Fail, Live, FlagsAndUnit, Ms, Sz, Dst) { } i_bs_get_utf8(Ctx, Fail, Dst) { + Eterm result; ErlBinMatchBuffer* mb = ms_matchbuffer($Ctx); - Eterm result = erts_bs_get_utf8(mb); + if (mb->size - mb->offset < 8) { + $FAIL($Fail); + } + if (BIT_OFFSET(mb->offset) != 0) { + result = erts_bs_get_utf8(mb); + } else { + byte b = mb->base[BYTE_OFFSET(mb->offset)]; + if (b < 128) { + result = make_small(b); + mb->offset += 8; + } else { + result = erts_bs_get_utf8(mb); + } + } if (is_non_value(result)) { $FAIL($Fail); } @@ -976,6 +990,9 @@ ctx_to_bin.execute() { Uint hole_size; Uint orig = mb->orig; ErlSubBin* sb = (ErlSubBin *) boxed_val(context); + /* Since we're going to overwrite the match state with the result, an + * ErlBinMatchState must be at least as large as an ErlSubBin. */ + ERTS_CT_ASSERT(sizeof(ErlSubBin) <= sizeof(ErlBinMatchState)); hole_size = 1 + header_arity(sb->thing_word) - ERL_SUB_BIN_SIZE; sb->thing_word = HEADER_SUB_BIN; sb->size = BYTE_OFFSET(size); diff --git a/erts/emulator/beam/copy.c b/erts/emulator/beam/copy.c index 10bf197405..7769a914db 100644 --- a/erts/emulator/beam/copy.c +++ b/erts/emulator/beam/copy.c @@ -611,7 +611,7 @@ Eterm copy_struct_x(Eterm obj, Uint sz, Eterm** hpp, ErlOffHeap* off_heap, Uint Eterm* htop; Eterm* hbot; Eterm* hp; - Eterm* objp; + Eterm* ERTS_RESTRICT objp; Eterm* tp; Eterm res; Eterm elem; @@ -1821,7 +1821,8 @@ all_clean: * * NOTE: Assumes that term is a tuple (ptr is an untagged tuple ptr). */ -Eterm copy_shallow(Eterm* ptr, Uint sz, Eterm** hpp, ErlOffHeap* off_heap) +Eterm copy_shallow(Eterm* ERTS_RESTRICT ptr, Uint sz, Eterm** hpp, + ErlOffHeap* off_heap) { Eterm* tp = ptr; Eterm* hp = *hpp; @@ -1985,7 +1986,7 @@ move_one_frag(Eterm** hpp, ErlHeapFragment* frag, ErlOffHeap* off_heap, int lite if (is_header(val)) { struct erl_off_heap_header* hdr = (struct erl_off_heap_header*)hp; ASSERT(ptr + header_arity(val) < end); - move_boxed(&ptr, val, &hp, &dummy_ref); + ptr = move_boxed(ptr, val, &hp, &dummy_ref); switch (val & _HEADER_SUBTAG_MASK) { case REF_SUBTAG: if (is_ordinary_ref_thing(hdr)) @@ -2002,7 +2003,7 @@ move_one_frag(Eterm** hpp, ErlHeapFragment* frag, ErlOffHeap* off_heap, int lite } else { /* must be a cons cell */ ASSERT(ptr+1 < end); - move_cons(&ptr, val, &hp, &dummy_ref); + move_cons(ptr, val, &hp, &dummy_ref); ptr += 2; } } diff --git a/erts/emulator/beam/dist.c b/erts/emulator/beam/dist.c index bc168fc58d..cd799e04b8 100644 --- a/erts/emulator/beam/dist.c +++ b/erts/emulator/beam/dist.c @@ -103,21 +103,12 @@ dist_msg_dbg(ErtsDistExternal *edep, char *what, byte *buf, int sz) -#define PASS_THROUGH 'p' /* This code should go */ - int erts_is_alive; /* System must be blocked on change */ int erts_dist_buf_busy_limit; /* distribution trap functions */ -Export* dsend2_trap = NULL; -Export* dsend3_trap = NULL; -/*Export* dsend_nosuspend_trap = NULL;*/ -Export* dlink_trap = NULL; -Export* dunlink_trap = NULL; Export* dmonitor_node_trap = NULL; -Export* dgroup_leader_trap = NULL; -Export* dexit_trap = NULL; Export* dmonitor_p_trap = NULL; /* local variables */ @@ -129,6 +120,7 @@ static void clear_dist_entry(DistEntry*); static int dsig_send_ctl(ErtsDSigData* dsdp, Eterm ctl, int force_busy); static void send_nodes_mon_msgs(Process *, Eterm, Eterm, Eterm, Eterm); static void init_nodes_monitors(void); +static Sint abort_connection(DistEntry* dep, Uint32 conn_id); static erts_atomic_t no_caches; static erts_atomic_t no_nodes; @@ -383,22 +375,24 @@ static void doit_node_link_net_exits(ErtsLink *lnk, void *vnecp) if (!rp) goto done; erts_proc_lock(rp, rp_locks); - rlnk = erts_remove_link(&ERTS_P_LINKS(rp), name); - if (rlnk != NULL) { - ASSERT(is_atom(rlnk->pid) && (rlnk->type == LINK_NODE)); - erts_destroy_link(rlnk); - } - n = ERTS_LINK_REFC(lnk); - for (i = 0; i < n; ++i) { - Eterm tup; - Eterm *hp; - ErtsMessage *msgp; - - msgp = erts_alloc_message_heap(rp, &rp_locks, - 3, &hp, &ohp); - tup = TUPLE2(hp, am_nodedown, name); - erts_queue_message(rp, rp_locks, msgp, tup, am_system); - } + if (!ERTS_PROC_IS_EXITING(rp)) { + rlnk = erts_remove_link(&ERTS_P_LINKS(rp), name); + if (rlnk != NULL) { + ASSERT(is_atom(rlnk->pid) && (rlnk->type == LINK_NODE)); + erts_destroy_link(rlnk); + } + n = ERTS_LINK_REFC(lnk); + for (i = 0; i < n; ++i) { + Eterm tup; + Eterm *hp; + ErtsMessage *msgp; + + msgp = erts_alloc_message_heap(rp, &rp_locks, + 3, &hp, &ohp); + tup = TUPLE2(hp, am_nodedown, name); + erts_queue_message(rp, rp_locks, msgp, tup, am_system); + } + } erts_proc_unlock(rp, rp_locks); } done: @@ -484,41 +478,55 @@ int erts_do_net_exits(DistEntry *dep, Eterm reason) if (dep == erts_this_dist_entry) { /* Net kernel has died (clean up!!) */ DistEntry *tdep; - int no_dist_ctrl = 0; + int no_dist_ctrl; + int no_pending; Eterm nd_reason = (reason == am_no_network ? am_no_network : am_net_kernel_terminated); + int i = 0; + Eterm *dist_ctrl; + DistEntry** pending; + + ERTS_UNDEF(dist_ctrl, NULL); + ERTS_UNDEF(pending, NULL); + erts_rwmtx_rlock(&erts_dist_table_rwmtx); - for (tdep = erts_hidden_dist_entries; tdep; tdep = tdep->next) - no_dist_ctrl++; - for (tdep = erts_visible_dist_entries; tdep; tdep = tdep->next) - no_dist_ctrl++; + no_dist_ctrl = (erts_no_of_hidden_dist_entries + + erts_no_of_visible_dist_entries); + no_pending = erts_no_of_pending_dist_entries; /* KILL all port controllers */ - if (no_dist_ctrl == 0) - erts_rwmtx_runlock(&erts_dist_table_rwmtx); - else { - Eterm def_buf[128]; - int i = 0; - Eterm *dist_ctrl; - - if (no_dist_ctrl <= sizeof(def_buf)/sizeof(def_buf[0])) - dist_ctrl = &def_buf[0]; - else - dist_ctrl = erts_alloc(ERTS_ALC_T_TMP, - sizeof(Eterm)*no_dist_ctrl); + if (no_dist_ctrl) { + dist_ctrl = erts_alloc(ERTS_ALC_T_TMP, + sizeof(Eterm)*no_dist_ctrl); for (tdep = erts_hidden_dist_entries; tdep; tdep = tdep->next) { ASSERT(is_internal_port(tdep->cid) || is_internal_pid(tdep->cid)); + ASSERT(i < no_dist_ctrl); dist_ctrl[i++] = tdep->cid; } for (tdep = erts_visible_dist_entries; tdep; tdep = tdep->next) { ASSERT(is_internal_port(tdep->cid) || is_internal_pid(tdep->cid)); + ASSERT(i < no_dist_ctrl); dist_ctrl[i++] = tdep->cid; } - erts_rwmtx_runlock(&erts_dist_table_rwmtx); + ASSERT(i == no_dist_ctrl); + } + if (no_pending) { + pending = erts_alloc(ERTS_ALC_T_TMP, sizeof(DistEntry*)*no_pending); + i = 0; + for (tdep = erts_pending_dist_entries; tdep; tdep = tdep->next) { + ASSERT(is_nil(tdep->cid)); + ASSERT(i < no_pending); + pending[i++] = tdep; + erts_ref_dist_entry(tdep); + } + ASSERT(i == no_pending); + } + erts_rwmtx_runlock(&erts_dist_table_rwmtx); - for (i = 0; i < no_dist_ctrl; i++) { + if (no_dist_ctrl) { + for (i = 0; i < no_dist_ctrl; i++) { if (is_internal_pid(dist_ctrl[i])) schedule_kill_dist_ctrl_proc(dist_ctrl[i]); else { @@ -532,11 +540,18 @@ int erts_do_net_exits(DistEntry *dep, Eterm reason) prt, dist_ctrl[i], nd_reason, NULL); } } - } + } + erts_free(ERTS_ALC_T_TMP, dist_ctrl); + } + + if (no_pending) { + for (i = 0; i < no_pending; i++) { + abort_connection(pending[i], pending[i]->connection_id); + erts_deref_dist_entry(pending[i]); + } + erts_free(ERTS_ALC_T_TMP, pending); + } - if (dist_ctrl != &def_buf[0]) - erts_free(ERTS_ALC_T_TMP, dist_ctrl); - } /* * When last dist ctrl exits, node will be taken @@ -573,13 +588,13 @@ int erts_do_net_exits(DistEntry *dep, Eterm reason) erts_port_task_abort(&dep->dist_cmd); } - if (dep->status & ERTS_DE_SFLG_EXITING) { + if (dep->state == ERTS_DE_STATE_EXITING) { #ifdef DEBUG ASSERT(erts_atomic32_read_nob(&dep->qflgs) & ERTS_DE_QFLG_EXIT); #endif } else { - dep->status |= ERTS_DE_SFLG_EXITING; + dep->state = ERTS_DE_STATE_EXITING; erts_mtx_lock(&dep->qlock); ASSERT(!(erts_atomic32_read_nob(&dep->qflgs) & ERTS_DE_QFLG_EXIT)); erts_atomic32_read_bor_relb(&dep->qflgs, ERTS_DE_QFLG_EXIT); @@ -613,7 +628,6 @@ int erts_do_net_exits(DistEntry *dep, Eterm reason) reason == am_normal ? am_connection_closed : reason); clear_dist_entry(dep); - } dec_no_nodes(); @@ -627,6 +641,14 @@ trap_function(Eterm func, int arity) return erts_export_put(am_erlang, func, arity); } +/* + * Sync with dist_util.erl: + * + * -record(erts_dflags, + * {default, mandatory, addable, rejectable, strict_order}). + */ +static Eterm erts_dflags_record; + void init_dist(void) { init_nodes_monitors(); @@ -638,18 +660,21 @@ void init_dist(void) erts_atomic_init_nob(&no_caches, 0); /* Lookup/Install all references to trap functions */ - dsend2_trap = trap_function(am_dsend,2); - dsend3_trap = trap_function(am_dsend,3); - /* dsend_nosuspend_trap = trap_function(am_dsend_nosuspend,2);*/ - dlink_trap = trap_function(am_dlink,1); - dunlink_trap = trap_function(am_dunlink,1); dmonitor_node_trap = trap_function(am_dmonitor_node,3); - dgroup_leader_trap = trap_function(am_dgroup_leader,2); - dexit_trap = trap_function(am_dexit, 2); dmonitor_p_trap = trap_function(am_dmonitor_p, 2); dist_ctrl_put_data_trap = erts_export_put(am_erts_internal, am_dist_ctrl_put_data, 2); + { + Eterm* hp = erts_alloc(ERTS_ALC_T_LITERAL, (1+6)*sizeof(Eterm)); + erts_dflags_record = TUPLE6(hp, am_erts_dflags, + make_small(DFLAG_DIST_DEFAULT), + make_small(DFLAG_DIST_MANDATORY), + make_small(DFLAG_DIST_ADDABLE), + make_small(DFLAG_DIST_REJECTABLE), + make_small(DFLAG_DIST_STRICT_ORDER)); + erts_set_literal_tag(&erts_dflags_record, hp, (1+6)); + } } #define ErtsDistOutputBuf2Binary(OB) \ @@ -664,6 +689,7 @@ alloc_dist_obuf(Uint size) obuf = (ErtsDistOutputBuf *) &bin->orig_bytes[0]; #ifdef DEBUG obuf->dbg_pattern = ERTS_DIST_OUTPUT_BUF_DBG_PATTERN; + obuf->alloc_endp = obuf->data + size; ASSERT(bin == ErtsDistOutputBuf2Binary(obuf)); #endif return obuf; @@ -684,31 +710,11 @@ size_obuf(ErtsDistOutputBuf *obuf) return bin->orig_size; } -static void clear_dist_entry(DistEntry *dep) +static ErtsDistOutputBuf* clear_de_out_queues(DistEntry* dep) { - Sint obufsize = 0; - ErtsAtomCache *cache; - ErtsProcList *suspendees; ErtsDistOutputBuf *obuf; - erts_de_rwlock(dep); - erts_atomic_set_nob(&dep->input_handler, - (erts_aint_t) NIL); - cache = dep->cache; - dep->cache = NULL; - -#ifdef DEBUG - erts_de_links_lock(dep); - ASSERT(!dep->nlinks); - ASSERT(!dep->node_links); - ASSERT(!dep->monitors); - erts_de_links_unlock(dep); -#endif - - erts_mtx_lock(&dep->qlock); - - erts_atomic64_set_nob(&dep->in, 0); - erts_atomic64_set_nob(&dep->out, 0); + ERTS_LC_ASSERT(erts_lc_mtx_is_locked(&dep->qlock)); if (!dep->out_queue.last) obuf = dep->finalized_out_queue.first; @@ -728,17 +734,13 @@ static void clear_dist_entry(DistEntry *dep) dep->tmp_out_queue.last = NULL; dep->finalized_out_queue.first = NULL; dep->finalized_out_queue.last = NULL; - dep->status = 0; - suspendees = get_suspended_on_de(dep, ERTS_DE_QFLGS_ALL); - - erts_mtx_unlock(&dep->qlock); - erts_atomic_set_nob(&dep->dist_cmd_scheduled, 0); - dep->send = NULL; - erts_de_rwunlock(dep); - erts_resume_processes(suspendees); + return obuf; +} - delete_cache(cache); +static void free_de_out_queues(DistEntry* dep, ErtsDistOutputBuf *obuf) +{ + Sint obufsize = 0; while (obuf) { ErtsDistOutputBuf *fobuf; @@ -750,13 +752,56 @@ static void clear_dist_entry(DistEntry *dep) if (obufsize) { erts_mtx_lock(&dep->qlock); - ASSERT(erts_atomic_read_nob(&dep->qsize) >= obufsize); + ASSERT(erts_atomic_read_nob(&dep->qsize) >= obufsize); erts_atomic_add_nob(&dep->qsize, (erts_aint_t) -obufsize); erts_mtx_unlock(&dep->qlock); } } +static void clear_dist_entry(DistEntry *dep) +{ + ErtsAtomCache *cache; + ErtsProcList *suspendees; + ErtsDistOutputBuf *obuf; + + erts_de_rwlock(dep); + erts_atomic_set_nob(&dep->input_handler, + (erts_aint_t) NIL); + cache = dep->cache; + dep->cache = NULL; + +#ifdef DEBUG + erts_de_links_lock(dep); + ASSERT(!dep->nlinks); + ASSERT(!dep->node_links); + ASSERT(!dep->monitors); + erts_de_links_unlock(dep); +#endif + + erts_mtx_lock(&dep->qlock); + + erts_atomic64_set_nob(&dep->in, 0); + erts_atomic64_set_nob(&dep->out, 0); + + obuf = clear_de_out_queues(dep); + dep->state = ERTS_DE_STATE_IDLE; + suspendees = get_suspended_on_de(dep, ERTS_DE_QFLGS_ALL); + + erts_mtx_unlock(&dep->qlock); + erts_atomic_set_nob(&dep->dist_cmd_scheduled, 0); + dep->send = NULL; + erts_de_rwunlock(dep); + + erts_resume_processes(suspendees); + + delete_cache(cache); + + free_de_out_queues(dep, obuf); + if (dep->transcode_ctx) + transcode_free_ctx(dep); +} + int erts_dsend_context_dtor(Binary* ctx_bin) { ErtsSendContext* ctx = ERTS_MAGIC_BIN_DATA(ctx_bin); @@ -772,8 +817,8 @@ int erts_dsend_context_dtor(Binary* ctx_bin) if (ctx->dss.phase >= ERTS_DSIG_SEND_PHASE_ALLOC && ctx->dss.obuf) { free_dist_obuf(ctx->dss.obuf); } - if (ctx->dep_to_deref) - erts_deref_dist_entry(ctx->dep_to_deref); + if (ctx->deref_dep) + erts_deref_dist_entry(ctx->dep); return 1; } @@ -852,6 +897,13 @@ erts_dsig_send_m_exit(ErtsDSigData *dsdp, Eterm watcher, Eterm watched, DeclareTmpHeapNoproc(ctl_heap,6); int res; + if (~dsdp->dep->flags & (DFLAG_DIST_MONITOR | DFLAG_DIST_MONITOR_NAME)) { + /* + * Receiver does not support DOP_MONITOR_P_EXIT (see dsig_send_monitor) + */ + return ERTS_DSIG_SEND_OK; + } + UseTmpHeapNoproc(6); ctl = TUPLE5(&ctl_heap[0], make_small(DOP_MONITOR_P_EXIT), @@ -879,6 +931,16 @@ erts_dsig_send_monitor(ErtsDSigData *dsdp, Eterm watcher, Eterm watched, DeclareTmpHeapNoproc(ctl_heap,5); int res; + if (~dsdp->dep->flags & (DFLAG_DIST_MONITOR | DFLAG_DIST_MONITOR_NAME)) { + /* + * Receiver does not support DOP_MONITOR_P. + * Just avoid sending it and by doing that reduce this monitor + * to only supervise the connection. This will work for simple c-nodes + * with a 1-to-1 relation between "Erlang process" and OS-process. + */ + return ERTS_DSIG_SEND_OK; + } + UseTmpHeapNoproc(5); ctl = TUPLE4(&ctl_heap[0], make_small(DOP_MONITOR_P), @@ -901,6 +963,13 @@ erts_dsig_send_demonitor(ErtsDSigData *dsdp, Eterm watcher, DeclareTmpHeapNoproc(ctl_heap,5); int res; + if (~dsdp->dep->flags & (DFLAG_DIST_MONITOR | DFLAG_DIST_MONITOR_NAME)) { + /* + * Receiver does not support DOP_DEMONITOR_P (see dsig_send_monitor) + */ + return ERTS_DSIG_SEND_OK; + } + UseTmpHeapNoproc(5); ctl = TUPLE4(&ctl_heap[0], make_small(DOP_DEMONITOR_P), @@ -1161,7 +1230,7 @@ erts_dsig_send_group_leader(ErtsDSigData *dsdp, Eterm leader, Eterm remote) # define PURIFY_MSG(msg) \ do { \ char buf__[1]; size_t bufsz__ = sizeof(buf__); \ - if (erts_sys_getenv_raw("VALGRIND_LOG_XML", buf__, &bufsz__) >= 0) { \ + if (erts_sys_explicit_8bit_getenv("VALGRIND_LOG_XML", buf__, &bufsz__) >= 0) { \ VALGRIND_PRINTF_XML("<erlang_error_log>" \ "%s, line %d: %s</erlang_error_log>\n", \ __FILE__, __LINE__, msg); \ @@ -1324,7 +1393,7 @@ int erts_net_message(Port *prt, /* This is tricky (we MUST force a distributed send) */ ErtsDSigData dsd; int code; - code = erts_dsig_prepare(&dsd, dep, NULL, ERTS_DSP_NO_LOCK, 0); + code = erts_dsig_prepare(&dsd, dep, NULL, 0, ERTS_DSP_NO_LOCK, 0, 0); if (code == ERTS_DSIG_PREP_CONNECTED) { code = erts_dsig_send_exit(&dsd, to, from, am_noproc); ASSERT(code == ERTS_DSIG_SEND_OK); @@ -1416,7 +1485,7 @@ int erts_net_message(Port *prt, if (!rp) { ErtsDSigData dsd; int code; - code = erts_dsig_prepare(&dsd, dep, NULL, ERTS_DSP_NO_LOCK, 0); + code = erts_dsig_prepare(&dsd, dep, NULL, 0, ERTS_DSP_NO_LOCK, 0, 0); if (code == ERTS_DSIG_PREP_CONNECTED) { code = erts_dsig_send_m_exit(&dsd, watcher, watched, ref, am_noproc); @@ -1879,33 +1948,32 @@ erts_dsig_send(ErtsDSigData *dsdp, struct erts_dsig_send_context* ctx) if (ctx->flags & DFLAG_DIST_HDR_ATOM_CACHE) { ctx->acmp = erts_get_atom_cache_map(ctx->c_p); - ctx->pass_through_size = 0; + ctx->max_finalize_prepend = 0; } else { ctx->acmp = NULL; - ctx->pass_through_size = 1; + ctx->max_finalize_prepend = 3; } #ifdef ERTS_DIST_MSG_DBG - erts_fprintf(stderr, ">>%s CTL: %T\n", ctx->pass_through_size ? "P" : " ", ctx->ctl); - if (is_value(ctx->msg)) - erts_fprintf(stderr, " MSG: %T\n", ctx->msg); + erts_fprintf(stderr, ">> CTL: %T\n", ctx->ctl); + if (is_value(ctx->msg)) + erts_fprintf(stderr, " MSG: %T\n", ctx->msg); #endif - ctx->data_size = ctx->pass_through_size; + ctx->data_size = ctx->max_finalize_prepend; erts_reset_atom_cache_map(ctx->acmp); erts_encode_dist_ext_size(ctx->ctl, ctx->flags, ctx->acmp, &ctx->data_size); - if (is_value(ctx->msg)) { - ctx->u.sc.wstack.wstart = NULL; - ctx->u.sc.flags = ctx->flags; - ctx->u.sc.level = 0; - ctx->phase = ERTS_DSIG_SEND_PHASE_MSG_SIZE; - } else { - ctx->phase = ERTS_DSIG_SEND_PHASE_ALLOC; - } - break; + if (is_non_value(ctx->msg)) { + ctx->phase = ERTS_DSIG_SEND_PHASE_ALLOC; + break; + } + ctx->u.sc.wstack.wstart = NULL; + ctx->u.sc.flags = ctx->flags; + ctx->u.sc.level = 0; + ctx->phase = ERTS_DSIG_SEND_PHASE_MSG_SIZE; case ERTS_DSIG_SEND_PHASE_MSG_SIZE: if (erts_encode_dist_ext_size_int(ctx->msg, ctx, &ctx->data_size)) { retval = ERTS_DSIG_SEND_CONTINUE; @@ -1920,23 +1988,25 @@ erts_dsig_send(ErtsDSigData *dsdp, struct erts_dsig_send_context* ctx) ctx->data_size += ctx->dhdr_ext_size; ctx->obuf = alloc_dist_obuf(ctx->data_size); - ctx->obuf->ext_endp = &ctx->obuf->data[0] + ctx->pass_through_size + ctx->dhdr_ext_size; + ctx->obuf->ext_endp = &ctx->obuf->data[0] + ctx->max_finalize_prepend + ctx->dhdr_ext_size; /* Encode internal version of dist header */ ctx->obuf->extp = erts_encode_ext_dist_header_setup(ctx->obuf->ext_endp, ctx->acmp); /* Encode control message */ erts_encode_dist_ext(ctx->ctl, &ctx->obuf->ext_endp, ctx->flags, ctx->acmp, NULL, NULL); - if (is_value(ctx->msg)) { - ctx->u.ec.flags = ctx->flags; - ctx->u.ec.level = 0; - ctx->u.ec.wstack.wstart = NULL; - ctx->phase = ERTS_DSIG_SEND_PHASE_MSG_ENCODE; - } else { - ctx->phase = ERTS_DSIG_SEND_PHASE_FIN; - } - break; - - case ERTS_DSIG_SEND_PHASE_MSG_ENCODE: + if (is_non_value(ctx->msg)) { + ctx->obuf->msg_start = NULL; + ctx->phase = ERTS_DSIG_SEND_PHASE_FIN; + break; + } + ctx->u.ec.flags = ctx->flags; + ctx->u.ec.hopefull_flags = 0; + ctx->u.ec.level = 0; + ctx->u.ec.wstack.wstart = NULL; + ctx->obuf->msg_start = ctx->obuf->ext_endp; + + ctx->phase = ERTS_DSIG_SEND_PHASE_MSG_ENCODE; + case ERTS_DSIG_SEND_PHASE_MSG_ENCODE: if (erts_encode_dist_ext(ctx->msg, &ctx->obuf->ext_endp, ctx->flags, ctx->acmp, &ctx->u.ec, &ctx->reds)) { retval = ERTS_DSIG_SEND_CONTINUE; goto done; @@ -1949,11 +2019,12 @@ erts_dsig_send(ErtsDSigData *dsdp, struct erts_dsig_send_context* ctx) int resume = 0; ASSERT(ctx->obuf->extp < ctx->obuf->ext_endp); - ASSERT(&ctx->obuf->data[0] <= ctx->obuf->extp - ctx->pass_through_size); + ASSERT(&ctx->obuf->data[0] <= ctx->obuf->extp - ctx->max_finalize_prepend); ASSERT(ctx->obuf->ext_endp <= &ctx->obuf->data[0] + ctx->data_size); ctx->data_size = ctx->obuf->ext_endp - ctx->obuf->extp; + ctx->obuf->hopefull_flags = ctx->u.ec.hopefull_flags; /* * Signal encoded; now verify that the connection still exists, * and if so enqueue the signal and schedule it for send. @@ -1961,9 +2032,9 @@ erts_dsig_send(ErtsDSigData *dsdp, struct erts_dsig_send_context* ctx) ctx->obuf->next = NULL; erts_de_rlock(dep); cid = dep->cid; - if (cid != dsdp->cid - || dep->connection_id != dsdp->connection_id - || dep->status & ERTS_DE_SFLG_EXITING) { + if (dep->state == ERTS_DE_STATE_EXITING + || dep->state == ERTS_DE_STATE_IDLE + || dep->connection_id != dsdp->connection_id) { /* Not the same connection as when we started; drop message... */ erts_de_runlock(dep); free_dist_obuf(ctx->obuf); @@ -2037,8 +2108,13 @@ erts_dsig_send(ErtsDSigData *dsdp, struct erts_dsig_send_context* ctx) } erts_mtx_unlock(&dep->qlock); - if (is_internal_port(dep->cid)) - erts_schedule_dist_command(NULL, dep); + if (dep->state != ERTS_DE_STATE_PENDING) { + if (is_internal_port(dep->cid)) + erts_schedule_dist_command(NULL, dep); + } + else { + notify_proc = NIL; + } erts_de_runlock(dep); if (is_internal_pid(notify_proc)) notify_dist_data(ctx->c_p, notify_proc); @@ -2203,7 +2279,6 @@ dist_port_commandv(Port *prt, ErtsDistOutputBuf *obuf) #endif #define ERTS_PORT_REDS_DIST_CMD_START 5 -#define ERTS_PORT_REDS_DIST_CMD_FINALIZE 3 #define ERTS_PORT_REDS_DIST_CMD_EXIT 200 #define ERTS_PORT_REDS_DIST_CMD_RESUMED 5 #define ERTS_PORT_REDS_DIST_CMD_DATA(SZ) \ @@ -2212,10 +2287,10 @@ dist_port_commandv(Port *prt, ErtsDistOutputBuf *obuf) : ((((Sint) (SZ)) >> 10) & ((Sint) ERTS_PORT_REDS_MASK__))) int -erts_dist_command(Port *prt, int reds_limit) +erts_dist_command(Port *prt, int initial_reds) { - Sint reds = ERTS_PORT_REDS_DIST_CMD_START; - Uint32 status; + Sint reds = initial_reds - ERTS_PORT_REDS_DIST_CMD_START; + enum dist_entry_state state; Uint32 flags; Sint qsize, obufsize = 0; ErtsDistOutputQueue oq, foq; @@ -2230,15 +2305,18 @@ erts_dist_command(Port *prt, int reds_limit) erts_de_rlock(dep); flags = dep->flags; - status = dep->status; + state = dep->state; send = dep->send; erts_de_runlock(dep); - if (status & ERTS_DE_SFLG_EXITING) { + if (state == ERTS_DE_STATE_EXITING) { erts_deliver_port_exit(prt, prt->common.id, am_killed, 0, 1); - return reds + ERTS_PORT_REDS_DIST_CMD_EXIT; + reds -= ERTS_PORT_REDS_DIST_CMD_EXIT; + return initial_reds - reds; } + ASSERT(state != ERTS_DE_STATE_PENDING); + ASSERT(send); /* @@ -2263,7 +2341,7 @@ erts_dist_command(Port *prt, int reds_limit) sched_flags = erts_atomic32_read_nob(&prt->sched.flags); - if (reds > reds_limit) + if (reds < 0) goto preempted; if (!(sched_flags & ERTS_PTS_FLG_BUSY_PORT) && foq.first) { @@ -2278,13 +2356,13 @@ erts_dist_command(Port *prt, int reds_limit) erts_fprintf(stderr, ">> "); bw(foq.first->extp, size); #endif - reds += ERTS_PORT_REDS_DIST_CMD_DATA(size); - fob = foq.first; - obufsize += size_obuf(fob); - foq.first = foq.first->next; - free_dist_obuf(fob); + reds -= ERTS_PORT_REDS_DIST_CMD_DATA(size); + fob = foq.first; + obufsize += size_obuf(fob); + foq.first = foq.first->next; + free_dist_obuf(fob); sched_flags = erts_atomic32_read_nob(&prt->sched.flags); - preempt = reds > reds_limit || (sched_flags & ERTS_PTS_FLG_EXIT); + preempt = reds < 0 || (sched_flags & ERTS_PTS_FLG_EXIT); if (sched_flags & ERTS_PTS_FLG_BUSY_PORT) break; } while (foq.first && !preempt); @@ -2297,81 +2375,71 @@ erts_dist_command(Port *prt, int reds_limit) if (sched_flags & ERTS_PTS_FLG_BUSY_PORT) { if (oq.first) { ErtsDistOutputBuf *ob; - int preempt; + ErtsDistOutputBuf *last_finalized = NULL; finalize_only: - preempt = 0; ob = oq.first; ASSERT(ob); do { - ob->extp = erts_encode_ext_dist_header_finalize(ob->extp, - dep->cache, - flags); - if (!(flags & DFLAG_DIST_HDR_ATOM_CACHE)) - *--ob->extp = PASS_THROUGH; /* Old node; 'pass through' - needed */ - ASSERT(&ob->data[0] <= ob->extp && ob->extp < ob->ext_endp); - reds += ERTS_PORT_REDS_DIST_CMD_FINALIZE; - preempt = reds > reds_limit; - if (preempt) - break; - ob = ob->next; + reds = erts_encode_ext_dist_header_finalize(ob, dep, flags, reds); + if (reds < 0) + break; + last_finalized = ob; + ob = ob->next; } while (ob); - /* - * At least one buffer was finalized; if we got preempted, - * ob points to the last buffer that we finalized. - */ - if (foq.last) - foq.last->next = oq.first; - else - foq.first = oq.first; - if (!preempt) { - /* All buffers finalized */ - foq.last = oq.last; - oq.first = oq.last = NULL; - } - else { - /* Not all buffers finalized; split oq. */ - foq.last = ob; - oq.first = ob->next; - if (oq.first) - ob->next = NULL; - else - oq.last = NULL; - } - if (preempt) - goto preempted; + if (last_finalized) { + /* + * At least one buffer was finalized; if we got preempted, + * ob points to the next buffer to continue finalize. + */ + if (foq.last) + foq.last->next = oq.first; + else + foq.first = oq.first; + foq.last = last_finalized; + if (!ob) { + /* All buffers finalized */ + ASSERT(foq.last == oq.last); + ASSERT(foq.last->next == NULL); + oq.first = oq.last = NULL; + } + else { + /* Not all buffers finalized; split oq. */ + ASSERT(foq.last->next == ob); + foq.last->next = NULL; + oq.first = ob; + } + } + if (reds <= 0) + goto preempted; } } else { int de_busy; int preempt = 0; while (oq.first && !preempt) { - ErtsDistOutputBuf *fob; - Uint size; - oq.first->extp - = erts_encode_ext_dist_header_finalize(oq.first->extp, - dep->cache, - flags); - reds += ERTS_PORT_REDS_DIST_CMD_FINALIZE; - if (!(flags & DFLAG_DIST_HDR_ATOM_CACHE)) - *--oq.first->extp = PASS_THROUGH; /* Old node; 'pass through' - needed */ - ASSERT(&oq.first->data[0] <= oq.first->extp - && oq.first->extp < oq.first->ext_endp); - size = (*send)(prt, oq.first); + ErtsDistOutputBuf *fob; + Uint size; + reds = erts_encode_ext_dist_header_finalize(oq.first, dep, flags, reds); + if (reds < 0) { + preempt = 1; + break; + } + ASSERT(&oq.first->data[0] <= oq.first->extp + && oq.first->extp <= oq.first->ext_endp); + size = (*send)(prt, oq.first); erts_atomic64_inc_nob(&dep->out); - esdp->io.out += (Uint64) size; + esdp->io.out += (Uint64) size; #ifdef ERTS_RAW_DIST_MSG_DBG erts_fprintf(stderr, ">> "); bw(oq.first->extp, size); #endif - reds += ERTS_PORT_REDS_DIST_CMD_DATA(size); - fob = oq.first; - obufsize += size_obuf(fob); - oq.first = oq.first->next; - free_dist_obuf(fob); + reds -= ERTS_PORT_REDS_DIST_CMD_DATA(size); + fob = oq.first; + obufsize += size_obuf(fob); + oq.first = oq.first->next; + free_dist_obuf(fob); sched_flags = erts_atomic32_read_nob(&prt->sched.flags); - preempt = reds > reds_limit || (sched_flags & ERTS_PTS_FLG_EXIT); + preempt = reds <= 0 || (sched_flags & ERTS_PTS_FLG_EXIT); if ((sched_flags & ERTS_PTS_FLG_BUSY_PORT) && oq.first && !preempt) goto finalize_only; } @@ -2411,7 +2479,7 @@ erts_dist_command(Port *prt, int reds_limit) erts_mtx_unlock(&dep->qlock); resumed = erts_resume_processes(suspendees); - reds += resumed*ERTS_PORT_REDS_DIST_CMD_RESUMED; + reds -= resumed*ERTS_PORT_REDS_DIST_CMD_RESUMED; } else erts_mtx_unlock(&dep->qlock); @@ -2434,8 +2502,7 @@ erts_dist_command(Port *prt, int reds_limit) erts_mtx_unlock(&dep->qlock); } - ASSERT(foq.first || !foq.last); - ASSERT(!foq.first || foq.last); + ASSERT(!!foq.first == !!foq.last); ASSERT(!dep->finalized_out_queue.first); ASSERT(!dep->finalized_out_queue.last); @@ -2445,10 +2512,10 @@ erts_dist_command(Port *prt, int reds_limit) } /* Avoid wrapping reduction counter... */ - if (reds > INT_MAX/2) - reds = INT_MAX/2; + if (reds < INT_MIN/2) + reds = INT_MIN/2; - return reds; + return initial_reds - reds; preempted: /* @@ -2456,8 +2523,7 @@ erts_dist_command(Port *prt, int reds_limit) * since last call to driver. */ - ASSERT(oq.first || !oq.last); - ASSERT(!oq.first || oq.last); + ASSERT(!!oq.first == !!oq.last); if (sched_flags & ERTS_PTS_FLG_EXIT) { /* @@ -2481,12 +2547,6 @@ erts_dist_command(Port *prt, int reds_limit) foq.first = NULL; foq.last = NULL; - -#ifdef DEBUG - erts_mtx_lock(&dep->qlock); - ASSERT(erts_atomic_read_nob(&dep->qsize) == obufsize); - erts_mtx_unlock(&dep->qlock); -#endif } else { if (oq.first) { @@ -2776,7 +2836,8 @@ BIF_RETTYPE dist_ctrl_get_data_1(BIF_ALIST_1) { DistEntry *dep = ERTS_PROC_GET_DIST_ENTRY(BIF_P); - int reds = 1; + const Sint initial_reds = ERTS_BIF_REDS_LEFT(BIF_P); + Sint reds = initial_reds; ErtsDistOutputBuf *obuf; Eterm *hp; ProcBin *pb; @@ -2790,7 +2851,7 @@ dist_ctrl_get_data_1(BIF_ALIST_1) erts_de_rlock(dep); - if (dep->status & ERTS_DE_SFLG_EXITING) + if (dep->state == ERTS_DE_STATE_EXITING) goto return_none; ASSERT(dep->cid == BIF_P->common.id); @@ -2807,6 +2868,7 @@ dist_ctrl_get_data_1(BIF_ALIST_1) { if (!dep->tmp_out_queue.first) { ASSERT(!dep->tmp_out_queue.last); + ASSERT(!dep->transcode_ctx); qsize = erts_atomic_read_acqb(&dep->qsize); if (qsize > 0) { erts_mtx_lock(&dep->qlock); @@ -2824,21 +2886,18 @@ dist_ctrl_get_data_1(BIF_ALIST_1) erts_de_runlock(dep); BIF_RET(am_none); } - else { - obuf = dep->tmp_out_queue.first; - dep->tmp_out_queue.first = obuf->next; - if (!obuf->next) - dep->tmp_out_queue.last = NULL; + + obuf = dep->tmp_out_queue.first; + reds = erts_encode_ext_dist_header_finalize(obuf, dep, dep->flags, reds); + if (reds < 0) { + erts_de_runlock(dep); + ERTS_BIF_YIELD1(bif_export[BIF_dist_ctrl_get_data_1], + BIF_P, BIF_ARG_1); } - obuf->extp = erts_encode_ext_dist_header_finalize(obuf->extp, - dep->cache, - dep->flags); - reds += ERTS_PORT_REDS_DIST_CMD_FINALIZE; - if (!(dep->flags & DFLAG_DIST_HDR_ATOM_CACHE)) - *--obuf->extp = PASS_THROUGH; /* 'pass through' needed */ - ASSERT(&obuf->data[0] <= obuf->extp - && obuf->extp < obuf->ext_endp); + dep->tmp_out_queue.first = obuf->next; + if (!obuf->next) + dep->tmp_out_queue.last = NULL; } erts_atomic64_inc_nob(&dep->out); @@ -2866,11 +2925,11 @@ dist_ctrl_get_data_1(BIF_ALIST_1) erts_mtx_unlock(&dep->qlock); if (resume_procs) { int resumed = erts_resume_processes(resume_procs); - reds += resumed*ERTS_PORT_REDS_DIST_CMD_RESUMED; + reds -= resumed*ERTS_PORT_REDS_DIST_CMD_RESUMED; } } - BIF_RET2(make_binary(pb), reds); + BIF_RET2(make_binary(pb), (initial_reds - reds)); } void @@ -2892,24 +2951,31 @@ erts_dist_port_not_busy(Port *prt) erts_schedule_dist_command(prt, NULL); } +static void kill_connection(DistEntry *dep) +{ + ERTS_LC_ASSERT(erts_lc_is_de_rwlocked(dep)); + ASSERT(dep->state == ERTS_DE_STATE_CONNECTED); + + dep->state = ERTS_DE_STATE_EXITING; + erts_mtx_lock(&dep->qlock); + ASSERT(!(erts_atomic32_read_nob(&dep->qflgs) & ERTS_DE_QFLG_EXIT)); + erts_atomic32_read_bor_nob(&dep->qflgs, ERTS_DE_QFLG_EXIT); + erts_mtx_unlock(&dep->qlock); + + if (is_internal_port(dep->cid)) + erts_schedule_dist_command(NULL, dep); + else if (is_internal_pid(dep->cid)) + schedule_kill_dist_ctrl_proc(dep->cid); +} + void erts_kill_dist_connection(DistEntry *dep, Uint32 connection_id) { erts_de_rwlock(dep); if (connection_id == dep->connection_id - && !(dep->status & ERTS_DE_SFLG_EXITING)) { - - dep->status |= ERTS_DE_SFLG_EXITING; - - erts_mtx_lock(&dep->qlock); - ASSERT(!(erts_atomic32_read_nob(&dep->qflgs) & ERTS_DE_QFLG_EXIT)); - erts_atomic32_read_bor_nob(&dep->qflgs, ERTS_DE_QFLG_EXIT); - erts_mtx_unlock(&dep->qlock); + && dep->state == ERTS_DE_STATE_CONNECTED) { - if (is_internal_port(dep->cid)) - erts_schedule_dist_command(NULL, dep); - else if (is_internal_pid(dep->cid)) - schedule_kill_dist_ctrl_proc(dep->cid); + kill_connection(dep); } erts_de_rwunlock(dep); } @@ -3069,6 +3135,10 @@ int distribution_info(fmtfn_t to, void *arg) /* Called by break handler */ info_dist_entry(to, arg, dep, 0, 1); } + for (dep = erts_pending_dist_entries; dep; dep = dep->next) { + info_dist_entry(to, arg, dep, 0, 0); + } + for (dep = erts_not_connected_dist_entries; dep; dep = dep->next) { if (dep != erts_this_dist_entry) { info_dist_entry(to, arg, dep, 0, 0); @@ -3091,7 +3161,6 @@ int distribution_info(fmtfn_t to, void *arg) /* Called by break handler */ monitor_node -- turn on/off node monitoring node controller only: - dist_exit/3 -- send exit signals from remote to local process dist_link/2 -- link a remote process to a local dist_unlink/2 -- unlink a remote from a local ****************************************************************************/ @@ -3101,15 +3170,6 @@ int distribution_info(fmtfn_t to, void *arg) /* Called by break handler */ /********************************************************************** ** Set the node name of current node fail if node already is set. ** setnode(name@host, Creation) - ** loads functions pointer to trap_functions from module erlang. - ** erlang:dsend/2 - ** erlang:dlink/1 - ** erlang:dunlink/1 - ** erlang:dmonitor_node/3 - ** erlang:dgroup_leader/2 - ** erlang:dexit/2 - ** -- are these needed ? - ** dexit/1 ***********************************************************************/ BIF_RETTYPE setnode_2(BIF_ALIST_2) @@ -3133,15 +3193,8 @@ BIF_RETTYPE setnode_2(BIF_ALIST_2) goto error; /* Check that all trap functions are defined !! */ - if (dsend2_trap->addressv[0] == NULL || - dsend3_trap->addressv[0] == NULL || - /* dsend_nosuspend_trap->address == NULL ||*/ - dlink_trap->addressv[0] == NULL || - dunlink_trap->addressv[0] == NULL || - dmonitor_node_trap->addressv[0] == NULL || - dgroup_leader_trap->addressv[0] == NULL || - dmonitor_p_trap->addressv[0] == NULL || - dexit_trap->addressv[0] == NULL) { + if (dmonitor_node_trap->addressv[0] == NULL || + dmonitor_p_trap->addressv[0] == NULL) { goto error; } @@ -3218,6 +3271,8 @@ BIF_RETTYPE setnode_3(BIF_ALIST_3) ErtsProcLocks proc_unlock = 0; Process *proc; Port *pp = NULL; + Eterm notify_proc; + erts_aint32_t qflgs; /* * Check and pick out arguments @@ -3245,21 +3300,25 @@ BIF_RETTYPE setnode_3(BIF_ALIST_3) if (!is_atom(ic) || !is_atom(oc)) goto badarg; - /* DFLAG_EXTENDED_REFERENCES is compulsory from R9 and forward */ - if (!(DFLAG_EXTENDED_REFERENCES & flags)) { + if (~flags & DFLAG_DIST_MANDATORY) { erts_dsprintf_buf_t *dsbufp = erts_create_logger_dsbuf(); erts_dsprintf(dsbufp, "%T", BIF_P->common.id); if (BIF_P->common.u.alive.reg) erts_dsprintf(dsbufp, " (%T)", BIF_P->common.u.alive.reg->name); erts_dsprintf(dsbufp, " attempted to enable connection to node %T " - "which is not able to handle extended references.\n", + "which does not support all mandatory capabilities.\n", BIF_ARG_1); erts_send_error_to_logger(BIF_P->group_leader, dsbufp); goto badarg; } /* + * ToDo: Should we not pass connection_id as well + * to make sure it's the right connection we commit. + */ + + /* * Arguments seem to be in order. */ @@ -3302,6 +3361,23 @@ BIF_RETTYPE setnode_3(BIF_ALIST_3) goto badarg; } + if (dep->state == ERTS_DE_STATE_EXITING) { + /* Suspend on dist entry waiting for the exit to finish */ + ErtsProcList *plp = erts_proclist_create(BIF_P); + plp->next = NULL; + erts_suspend(BIF_P, ERTS_PROC_LOCK_MAIN, NULL); + erts_mtx_lock(&dep->qlock); + erts_proclist_store_last(&dep->suspended, plp); + erts_mtx_unlock(&dep->qlock); + goto yield; + } + if (dep->state != ERTS_DE_STATE_PENDING) { + if (dep->state == ERTS_DE_STATE_IDLE) + erts_set_dist_entry_pending(dep); + else + goto badarg; + } + if (is_not_nil(dep->cid)) goto badarg; @@ -3334,7 +3410,7 @@ BIF_RETTYPE setnode_3(BIF_ALIST_3) goto done; /* Already set */ } - if (dep->status & ERTS_DE_SFLG_EXITING) { + if (dep->state == ERTS_DE_STATE_EXITING) { /* Suspend on dist entry waiting for the exit to finish */ ErtsProcList *plp = erts_proclist_create(BIF_P); plp->next = NULL; @@ -3344,8 +3420,12 @@ BIF_RETTYPE setnode_3(BIF_ALIST_3) erts_mtx_unlock(&dep->qlock); goto yield; } - - ASSERT(!(dep->status & ERTS_DE_SFLG_EXITING)); + if (dep->state != ERTS_DE_STATE_PENDING) { + if (dep->state == ERTS_DE_STATE_IDLE) + erts_set_dist_entry_pending(dep); + else + goto badarg; + } if (pp->dist_entry || is_not_nil(dep->cid)) goto badarg; @@ -3376,7 +3456,8 @@ BIF_RETTYPE setnode_3(BIF_ALIST_3) dep->creation = 0; #ifdef DEBUG - ASSERT(erts_atomic_read_nob(&dep->qsize) == 0); + ASSERT(erts_atomic_read_nob(&dep->qsize) == 0 + || (dep->state == ERTS_DE_STATE_PENDING)); #endif if (flags & DFLAG_DIST_HDR_ATOM_CACHE) @@ -3384,7 +3465,26 @@ BIF_RETTYPE setnode_3(BIF_ALIST_3) erts_set_dist_entry_connected(dep, BIF_ARG_2, flags); + notify_proc = NIL; + if (erts_atomic_read_nob(&dep->qsize)) { + if (is_internal_port(dep->cid)) { + erts_schedule_dist_command(NULL, dep); + } + else { + qflgs = erts_atomic32_read_nob(&dep->qflgs); + if (qflgs & ERTS_DE_QFLG_REQ_INFO) { + qflgs = erts_atomic32_read_band_mb(&dep->qflgs, + ~ERTS_DE_QFLG_REQ_INFO); + if (qflgs & ERTS_DE_QFLG_REQ_INFO) { + notify_proc = dep->cid; + ASSERT(is_internal_pid(notify_proc)); + } + } + } + } erts_de_rwunlock(dep); + if (is_internal_pid(notify_proc)) + notify_dist_data(BIF_P, notify_proc); ERTS_BIF_PREP_RET(ret, erts_make_dhandle(BIF_P, dep)); @@ -3426,79 +3526,189 @@ BIF_RETTYPE setnode_3(BIF_ALIST_3) goto done; } +BIF_RETTYPE erts_internal_get_dflags_0(BIF_ALIST_0) +{ + return erts_dflags_record; +} -/**********************************************************************/ -/* dist_exit(Local, Term, Remote) -> Bool */ +BIF_RETTYPE erts_internal_new_connection_1(BIF_ALIST_1) +{ + DistEntry* dep; + Uint32 conn_id; + Eterm* hp; + Eterm dhandle; + + if (is_not_atom(BIF_ARG_1)) { + BIF_ERROR(BIF_P, BADARG); + } + dep = erts_find_or_insert_dist_entry(BIF_ARG_1); + + if (dep == erts_this_dist_entry) { + erts_deref_dist_entry(dep); + BIF_ERROR(BIF_P, BADARG); + } + + erts_de_rwlock(dep); + + switch (dep->state) { + case ERTS_DE_STATE_PENDING: + case ERTS_DE_STATE_CONNECTED: + conn_id = dep->connection_id; + break; + case ERTS_DE_STATE_IDLE: + erts_set_dist_entry_pending(dep); + conn_id = dep->connection_id; + break; + case ERTS_DE_STATE_EXITING: + conn_id = (dep->connection_id + 1) & ERTS_DIST_CON_ID_MASK; + break; + default: + erts_exit(ERTS_ABORT_EXIT, "Invalid dep->state (%d)\n", dep->state); + } + erts_de_rwunlock(dep); + hp = HAlloc(BIF_P, 3 + ERTS_MAGIC_REF_THING_SIZE); + dhandle = erts_build_dhandle(&hp, &BIF_P->off_heap, dep); + erts_deref_dist_entry(dep); + BIF_RET(TUPLE2(hp, make_small(conn_id), dhandle)); +} -BIF_RETTYPE dist_exit_3(BIF_ALIST_3) +static Sint abort_connection(DistEntry* dep, Uint32 conn_id) { - Eterm local; - Eterm remote; - DistEntry *rdep; + erts_de_rwlock(dep); - local = BIF_ARG_1; - remote = BIF_ARG_3; + if (dep->connection_id != conn_id) + ; + else if (dep->state == ERTS_DE_STATE_CONNECTED) { + kill_connection(dep); + } + else if (dep->state == ERTS_DE_STATE_PENDING) { + NetExitsContext nec = {dep}; + ErtsLink *nlinks; + ErtsLink *node_links; + ErtsMonitor *monitors; + ErtsAtomCache *cache; + ErtsDistOutputBuf *obuf; + ErtsProcList *resume_procs; + Sint reds = 0; + + ASSERT(is_nil(dep->cid)); - /* Check that remote is a remote process */ - if (is_not_external_pid(remote)) - goto error; + erts_de_links_lock(dep); + monitors = dep->monitors; + nlinks = dep->nlinks; + node_links = dep->node_links; + dep->monitors = NULL; + dep->nlinks = NULL; + dep->node_links = NULL; + erts_de_links_unlock(dep); - rdep = external_dist_entry(remote); - - if(rdep == erts_this_dist_entry) - goto error; + cache = dep->cache; + dep->cache = NULL; + erts_mtx_lock(&dep->qlock); + obuf = dep->out_queue.first; + dep->out_queue.first = NULL; + dep->out_queue.last = NULL; + ASSERT(!dep->tmp_out_queue.first); + ASSERT(!dep->finalized_out_queue.first); + resume_procs = get_suspended_on_de(dep, ERTS_DE_QFLGS_ALL); + erts_mtx_unlock(&dep->qlock); + erts_atomic_set_nob(&dep->dist_cmd_scheduled, 0); + dep->send = NULL; - /* Check that local is local */ - if (is_internal_pid(local)) { - Process *lp; - ErtsProcLocks lp_locks; - if (BIF_P->common.id == local) { - lp_locks = ERTS_PROC_LOCKS_ALL; - lp = BIF_P; - erts_proc_lock(BIF_P, ERTS_PROC_LOCKS_ALL_MINOR); - } - else { - lp_locks = ERTS_PROC_LOCKS_XSIG_SEND; - lp = erts_pid2proc(BIF_P, ERTS_PROC_LOCK_MAIN, - local, lp_locks); - if (!lp) { - BIF_RET(am_true); /* ignore */ - } - } - - (void) erts_send_exit_signal(BIF_P, - remote, - lp, - &lp_locks, - BIF_ARG_2, - NIL, - NULL, - 0); - if (lp == BIF_P) - lp_locks &= ~ERTS_PROC_LOCK_MAIN; - erts_proc_unlock(lp, lp_locks); - if (lp == BIF_P) { - erts_aint32_t state = erts_atomic32_read_acqb(&BIF_P->state); - /* - * We may have exited current process and may have to take action. - */ - if (state & (ERTS_PSFLG_EXITING|ERTS_PSFLG_PENDING_EXIT)) { - if (state & ERTS_PSFLG_PENDING_EXIT) - erts_handle_pending_exit(BIF_P, ERTS_PROC_LOCK_MAIN); - ERTS_BIF_EXITED(BIF_P); - } - } + erts_set_dist_entry_not_connected(dep); + + erts_de_rwunlock(dep); + + erts_sweep_monitors(monitors, &doit_monitor_net_exits, &nec); + erts_sweep_links(nlinks, &doit_link_net_exits, &nec); + erts_sweep_links(node_links, &doit_node_link_net_exits, &nec); + + if (resume_procs) { + int resumed = erts_resume_processes(resume_procs); + reds += resumed*ERTS_PORT_REDS_DIST_CMD_RESUMED; + } + + delete_cache(cache); + free_de_out_queues(dep, obuf); + + /* + * We wait to make DistEntry idle and accept new connection attempts + * until all is cleared and deallocated. This to get some back pressure + * against repeated failing connection attempts saturating all CPUs + * with cleanup jobs. + */ + erts_de_rwlock(dep); + ASSERT(dep->state == ERTS_DE_STATE_EXITING); + dep->state = ERTS_DE_STATE_IDLE; + erts_de_rwunlock(dep); + return reds; } - else if (is_external_pid(local) - && external_dist_entry(local) == erts_this_dist_entry) { - BIF_RET(am_true); /* ignore */ + erts_de_rwunlock(dep); + return 0; +} + +BIF_RETTYPE erts_internal_abort_connection_2(BIF_ALIST_2) +{ + DistEntry* dep; + Eterm* tp; + + if (is_not_atom(BIF_ARG_1) || is_not_tuple_arity(BIF_ARG_2, 2)) { + BIF_ERROR(BIF_P, BADARG); + } + tp = tuple_val(BIF_ARG_2); + dep = erts_dhandle_to_dist_entry(tp[2]); + if (is_not_small(tp[1]) || dep != erts_find_dist_entry(BIF_ARG_1) + || dep == erts_this_dist_entry) { + BIF_ERROR(BIF_P, BADARG); + } + + if (dep) { + Sint reds = abort_connection(dep, unsigned_val(tp[1])); + BUMP_REDS(BIF_P, reds); } - else - goto error; BIF_RET(am_true); +} - error: - BIF_ERROR(BIF_P, BADARG); +int erts_auto_connect(DistEntry* dep, Process *proc, ErtsProcLocks proc_locks) +{ + erts_de_rwlock(dep); + if (dep->state != ERTS_DE_STATE_IDLE) { + erts_de_rwunlock(dep); + } + else { + Process* net_kernel; + ErtsProcLocks nk_locks = ERTS_PROC_LOCK_MSGQ; + Eterm *hp; + ErlOffHeap *ohp; + ErtsMessage *mp; + Eterm msg, dhandle; + Uint32 conn_id; + + erts_set_dist_entry_pending(dep); + conn_id = dep->connection_id; + erts_de_rwunlock(dep); + + net_kernel = erts_whereis_process(proc, proc_locks, + am_net_kernel, nk_locks, 0); + if (!net_kernel) { + abort_connection(dep, conn_id); + return 0; + } + + /* + * Send {auto_connect, Node, ConnId, DHandle} to net_kernel + */ + mp = erts_alloc_message_heap(net_kernel, &nk_locks, + 5 + ERTS_MAGIC_REF_THING_SIZE, + &hp, &ohp); + dhandle = erts_build_dhandle(&hp, ohp, dep); + msg = TUPLE4(hp, am_auto_connect, dep->sysname, make_small(conn_id), + dhandle); + erts_queue_message(net_kernel, nk_locks, mp, msg, proc->common.id); + erts_proc_unlock(net_kernel, nk_locks); + } + + return 1; } /**********************************************************************/ @@ -3574,9 +3784,11 @@ BIF_RETTYPE nodes_1(BIF_ALIST_1) ASSERT(erts_no_of_not_connected_dist_entries > 0); ASSERT(erts_no_of_hidden_dist_entries >= 0); + ASSERT(erts_no_of_pending_dist_entries >= 0); ASSERT(erts_no_of_visible_dist_entries >= 0); if(not_connected) - length += (erts_no_of_not_connected_dist_entries - 1); + length += ((erts_no_of_not_connected_dist_entries - 1) + + erts_no_of_pending_dist_entries); if(hidden) length += erts_no_of_hidden_dist_entries; if(visible) @@ -3596,13 +3808,18 @@ BIF_RETTYPE nodes_1(BIF_ALIST_1) #ifdef DEBUG endp = hp + length*2; #endif - if(not_connected) + if(not_connected) { for(dep = erts_not_connected_dist_entries; dep; dep = dep->next) { if (dep != erts_this_dist_entry) { result = CONS(hp, dep->sysname, result); hp += 2; } + } + for(dep = erts_pending_dist_entries; dep; dep = dep->next) { + result = CONS(hp, dep->sysname, result); + hp += 2; } + } if(hidden) for(dep = erts_hidden_dist_entries; dep; dep = dep->next) { result = CONS(hp, dep->sysname, result); @@ -3647,11 +3864,17 @@ monitor_node(Process* p, Eterm Node, Eterm Bool, Eterm Options) DistEntry *dep; ErtsLink *lnk; Eterm l; + int async_connect = 1; for (l = Options; l != NIL && is_list(l); l = CDR(list_val(l))) { Eterm t = CAR(list_val(l)); - /* allow_passive_connect the only available option right now */ - if (t != am_allow_passive_connect) { + if (t == am_allow_passive_connect) { + /* + * Handle this horrible feature by falling back on old synchronous + * auto-connect (if needed) + */ + async_connect = 0; + } else { BIF_ERROR(p, BADARG); } } @@ -3665,50 +3888,91 @@ monitor_node(Process* p, Eterm Node, Eterm Bool, Eterm Options) && (Node != erts_this_node->sysname))) { BIF_ERROR(p, BADARG); } - dep = erts_sysname_to_connected_dist_entry(Node); - if (!dep) { - do_trap: - BIF_TRAP3(dmonitor_node_trap, p, Node, Bool, Options); - } - if (dep == erts_this_dist_entry) - goto done; - - erts_proc_lock(p, ERTS_PROC_LOCK_LINK); - erts_de_rlock(dep); - if (ERTS_DE_IS_NOT_CONNECTED(dep)) { - erts_proc_unlock(p, ERTS_PROC_LOCK_LINK); - erts_de_runlock(dep); - goto do_trap; - } - erts_de_links_lock(dep); - erts_de_runlock(dep); if (Bool == am_true) { - ASSERT(dep->cid != NIL); - lnk = erts_add_or_lookup_link(&(dep->node_links), LINK_NODE, - p->common.id); - ++ERTS_LINK_REFC(lnk); - lnk = erts_add_or_lookup_link(&ERTS_P_LINKS(p), LINK_NODE, Node); - ++ERTS_LINK_REFC(lnk); - } - else { - lnk = erts_lookup_link(dep->node_links, p->common.id); - if (lnk != NULL) { - if ((--ERTS_LINK_REFC(lnk)) == 0) { - erts_destroy_link(erts_remove_link(&(dep->node_links), - p->common.id)); - } - } - lnk = erts_lookup_link(ERTS_P_LINKS(p), Node); - if (lnk != NULL) { - if ((--ERTS_LINK_REFC(lnk)) == 0) { - erts_destroy_link(erts_remove_link(&ERTS_P_LINKS(p), - Node)); + ErtsDSigData dsd; + dsd.node = Node; + dep = erts_find_or_insert_dist_entry(Node); + if (dep == erts_this_dist_entry) + goto done; + + erts_proc_lock(p, ERTS_PROC_LOCK_LINK); + + switch (erts_dsig_prepare(&dsd, dep, p, + (ERTS_PROC_LOCK_MAIN | ERTS_PROC_LOCK_LINK), + ERTS_DSP_RLOCK, 0, async_connect)) { + case ERTS_DSIG_PREP_NOT_ALIVE: + case ERTS_DSIG_PREP_NOT_CONNECTED: + /* Trap to either send 'nodedown' or do passive connection attempt */ + trap: + erts_proc_unlock(p, ERTS_PROC_LOCK_LINK); + erts_deref_dist_entry(dep); + BIF_TRAP3(dmonitor_node_trap, p, Node, Bool, Options); + case ERTS_DSIG_PREP_PENDING: + if (!async_connect) { + /* + * Pending connection may fail, so we must trap + * to ensure passive connection attempt + */ + erts_de_runlock(dep); + goto trap; } - } + /*fall through*/ + case ERTS_DSIG_PREP_CONNECTED: + erts_de_links_lock(dep); + erts_de_runlock(dep); + lnk = erts_add_or_lookup_link(&(dep->node_links), LINK_NODE, + p->common.id); + ++ERTS_LINK_REFC(lnk); + lnk = erts_add_or_lookup_link(&ERTS_P_LINKS(p), LINK_NODE, Node); + ++ERTS_LINK_REFC(lnk); + erts_de_links_unlock(dep); + break; + default: + ERTS_ASSERT(! "Invalid dsig prepare result"); + } + erts_deref_dist_entry(dep); + } + else { /* Bool == false */ + dep = erts_sysname_to_connected_dist_entry(Node); + if (!dep) { + /* + * Before OTP-21 this case triggered auto-connect + * and a 'nodedown' message if that failed. + * Now it's a simple no-op which feels more reasonable. + */ + BIF_RET(am_true); + } + if (dep == erts_this_dist_entry) + goto done; + + erts_proc_lock(p, ERTS_PROC_LOCK_LINK); + erts_de_rlock(dep); + if (dep->state == ERTS_DE_STATE_IDLE) { + ASSERT(!dep->node_links); + erts_proc_unlock(p, ERTS_PROC_LOCK_LINK); + erts_de_runlock(dep); + goto done; + } + erts_de_links_lock(dep); + erts_de_runlock(dep); + lnk = erts_lookup_link(dep->node_links, p->common.id); + if (lnk != NULL) { + if ((--ERTS_LINK_REFC(lnk)) == 0) { + erts_destroy_link(erts_remove_link(&(dep->node_links), + p->common.id)); + } + } + lnk = erts_lookup_link(ERTS_P_LINKS(p), Node); + if (lnk != NULL) { + if ((--ERTS_LINK_REFC(lnk)) == 0) { + erts_destroy_link(erts_remove_link(&ERTS_P_LINKS(p), + Node)); + } + } + erts_de_links_unlock(dep); } - erts_de_links_unlock(dep); erts_proc_unlock(p, ERTS_PROC_LOCK_LINK); done: diff --git a/erts/emulator/beam/dist.h b/erts/emulator/beam/dist.h index d4765c50b8..b1b7ce9c78 100644 --- a/erts/emulator/beam/dist.h +++ b/erts/emulator/beam/dist.h @@ -40,11 +40,51 @@ #define DFLAG_UNICODE_IO 0x1000 #define DFLAG_DIST_HDR_ATOM_CACHE 0x2000 #define DFLAG_SMALL_ATOM_TAGS 0x4000 -#define DFLAG_INTERNAL_TAGS 0x8000 +#define DFLAG_INTERNAL_TAGS 0x8000 /* used by ETS 'compressed' option */ #define DFLAG_UTF8_ATOMS 0x10000 #define DFLAG_MAP_TAG 0x20000 #define DFLAG_BIG_CREATION 0x40000 #define DFLAG_SEND_SENDER 0x80000 +#define DFLAG_NO_MAGIC 0x100000 /* internal for pending connection */ + +/* Mandatory flags for distribution */ +#define DFLAG_DIST_MANDATORY (DFLAG_EXTENDED_REFERENCES \ + | DFLAG_EXTENDED_PIDS_PORTS \ + | DFLAG_UTF8_ATOMS \ + | DFLAG_NEW_FUN_TAGS) + +/* + * Additional optimistic flags when encoding toward pending connection. + * If remote node (erl_interface) does not supporting these then we may need + * to transcode messages enqueued before connection setup was finished. + */ +#define DFLAG_DIST_HOPEFULLY (DFLAG_EXPORT_PTR_TAG \ + | DFLAG_BIT_BINARIES \ + | DFLAG_DIST_MONITOR \ + | DFLAG_DIST_MONITOR_NAME) + +/* Our preferred set of flags. Used for connection setup handshake */ +#define DFLAG_DIST_DEFAULT (DFLAG_DIST_MANDATORY | DFLAG_DIST_HOPEFULLY \ + | DFLAG_FUN_TAGS \ + | DFLAG_NEW_FLOATS \ + | DFLAG_UNICODE_IO \ + | DFLAG_DIST_HDR_ATOM_CACHE \ + | DFLAG_SMALL_ATOM_TAGS \ + | DFLAG_UTF8_ATOMS \ + | DFLAG_MAP_TAG \ + | DFLAG_BIG_CREATION \ + | DFLAG_SEND_SENDER) + +/* Flags addable by local distr implementations */ +#define DFLAG_DIST_ADDABLE DFLAG_DIST_DEFAULT + +/* Flags rejectable by local distr implementation */ +#define DFLAG_DIST_REJECTABLE (DFLAG_DIST_HDR_ATOM_CACHE \ + | DFLAG_HIDDEN_ATOM_CACHE \ + | DFLAG_ATOM_CACHE) + +/* Flags for all features needing strict order delivery */ +#define DFLAG_DIST_STRICT_ORDER DFLAG_DIST_HDR_ATOM_CACHE /* All flags that should be enabled when term_to_binary/1 is used. */ #define TERM_TO_BINARY_DFLAGS (DFLAG_EXTENDED_REFERENCES \ @@ -79,52 +119,34 @@ #define DOP_SEND_SENDER_TT 23 /* distribution trap functions */ -extern Export* dsend2_trap; -extern Export* dsend3_trap; -extern Export* dlink_trap; -extern Export* dunlink_trap; extern Export* dmonitor_node_trap; -extern Export* dgroup_leader_trap; -extern Export* dexit_trap; extern Export* dmonitor_p_trap; typedef enum { ERTS_DSP_NO_LOCK, - ERTS_DSP_RLOCK, - ERTS_DSP_RWLOCK + ERTS_DSP_RLOCK } ErtsDSigPrepLock; typedef struct { Process *proc; DistEntry *dep; + Eterm node; /* used if dep == NULL */ Eterm cid; Eterm connection_id; int no_suspend; } ErtsDSigData; -#define ERTS_DE_IS_NOT_CONNECTED(DEP) \ - (ERTS_LC_ASSERT(erts_lc_rwmtx_is_rlocked(&(DEP)->rwmtx) \ - || erts_lc_rwmtx_is_rwlocked(&(DEP)->rwmtx)), \ - (is_nil((DEP)->cid) || ((DEP)->status & ERTS_DE_SFLG_EXITING))) - -#define ERTS_DE_IS_CONNECTED(DEP) \ - (!ERTS_DE_IS_NOT_CONNECTED((DEP))) - #define ERTS_DE_BUSY_LIMIT (1024*1024) extern int erts_dist_buf_busy_limit; extern int erts_is_alive; /* * erts_dsig_prepare() prepares a send of a distributed signal. - * One of the values defined below are returned. If the returned - * value is another than ERTS_DSIG_PREP_CONNECTED, the - * distributed signal cannot be sent before appropriate actions - * have been taken. Appropriate actions would typically be setting - * up the connection. + * One of the values defined below are returned. */ -/* Connected; signal can be sent. */ +/* Connected; signals can be enqueued and sent. */ #define ERTS_DSIG_PREP_CONNECTED 0 /* Not connected; connection needs to be set up. */ #define ERTS_DSIG_PREP_NOT_CONNECTED 1 @@ -132,43 +154,80 @@ extern int erts_is_alive; #define ERTS_DSIG_PREP_WOULD_SUSPEND 2 /* System not alive (distributed) */ #define ERTS_DSIG_PREP_NOT_ALIVE 3 +/* Pending connection; signals can be enqueued */ +#define ERTS_DSIG_PREP_PENDING 4 ERTS_GLB_INLINE int erts_dsig_prepare(ErtsDSigData *, - DistEntry *, + DistEntry*, Process *, + ErtsProcLocks, ErtsDSigPrepLock, + int, int); ERTS_GLB_INLINE void erts_schedule_dist_command(Port *, DistEntry *); +int erts_auto_connect(DistEntry* dep, Process *proc, ErtsProcLocks proc_locks); + #if ERTS_GLB_INLINE_INCL_FUNC_DEF ERTS_GLB_INLINE int erts_dsig_prepare(ErtsDSigData *dsdp, DistEntry *dep, Process *proc, + ErtsProcLocks proc_locks, ErtsDSigPrepLock dspl, - int no_suspend) + int no_suspend, + int connect) { - int failure; + int res; + if (!erts_is_alive) return ERTS_DSIG_PREP_NOT_ALIVE; - if (!dep) - return ERTS_DSIG_PREP_NOT_CONNECTED; - if (dspl == ERTS_DSP_RWLOCK) - erts_de_rwlock(dep); - else - erts_de_rlock(dep); - if (ERTS_DE_IS_NOT_CONNECTED(dep)) { - failure = ERTS_DSIG_PREP_NOT_CONNECTED; + if (!dep) { + ASSERT(!connect); + return ERTS_DSIG_PREP_NOT_CONNECTED; + } + +#ifdef ERTS_ENABLE_LOCK_CHECK + if (connect) { + erts_proc_lc_might_unlock(proc, proc_locks); + } +#endif + +retry: + erts_de_rlock(dep); + + if (dep->state == ERTS_DE_STATE_CONNECTED) { + res = ERTS_DSIG_PREP_CONNECTED; + } + else if (dep->state == ERTS_DE_STATE_PENDING) { + res = ERTS_DSIG_PREP_PENDING; + } + else if (dep->state == ERTS_DE_STATE_EXITING) { + res = ERTS_DSIG_PREP_NOT_CONNECTED; + goto fail; + } + else if (connect) { + ASSERT(dep->state == ERTS_DE_STATE_IDLE); + erts_de_runlock(dep); + if (!erts_auto_connect(dep, proc, proc_locks)) { + return ERTS_DSIG_PREP_NOT_ALIVE; + } + goto retry; + } + else { + ASSERT(dep->state == ERTS_DE_STATE_IDLE); + res = ERTS_DSIG_PREP_NOT_CONNECTED; goto fail; } + if (no_suspend) { - if (erts_atomic32_read_acqb(&dep->qflgs) & ERTS_DE_QFLG_BUSY) { - failure = ERTS_DSIG_PREP_WOULD_SUSPEND; + if (erts_atomic32_read_acqb(&dep->qflgs) & ERTS_DE_QFLG_BUSY) { + res = ERTS_DSIG_PREP_WOULD_SUSPEND; goto fail; - } + } } dsdp->proc = proc; dsdp->dep = dep; @@ -177,15 +236,11 @@ erts_dsig_prepare(ErtsDSigData *dsdp, dsdp->no_suspend = no_suspend; if (dspl == ERTS_DSP_NO_LOCK) erts_de_runlock(dep); - return ERTS_DSIG_PREP_CONNECTED; + return res; fail: - if (dspl == ERTS_DSP_RWLOCK) - erts_de_rwunlock(dep); - else - erts_de_runlock(dep); - return failure; - + erts_de_runlock(dep); + return res; } ERTS_GLB_INLINE @@ -291,6 +346,7 @@ typedef struct TTBSizeContext_ { typedef struct TTBEncodeContext_ { Uint flags; + Uint hopefull_flags; int level; byte* ep; Eterm obj; @@ -332,7 +388,7 @@ struct erts_dsig_send_context { Eterm ctl; Eterm msg; int force_busy; - Uint32 pass_through_size; + Uint32 max_finalize_prepend; Uint data_size, dhdr_ext_size; ErtsAtomCacheMap *acmp; ErtsDistOutputBuf *obuf; @@ -346,11 +402,12 @@ struct erts_dsig_send_context { typedef struct { int suspend; + int connect; Eterm ctl_heap[6]; ErtsDSigData dsd; - DistEntry* dep_to_deref; DistEntry *dep; + int deref_dep; struct erts_dsig_send_context dss; Eterm return_term; diff --git a/erts/emulator/beam/erl_afit_alloc.c b/erts/emulator/beam/erl_afit_alloc.c index 4ebe37ee1d..23efe3bba4 100644 --- a/erts/emulator/beam/erl_afit_alloc.c +++ b/erts/emulator/beam/erl_afit_alloc.c @@ -181,7 +181,7 @@ static struct { static void ERTS_INLINE atom_init(Eterm *atom, char *name) { - *atom = am_atom_put(name, strlen(name)); + *atom = am_atom_put(name, sys_strlen(name)); } #define AM_INIT(AM) atom_init(&am.AM, #AM) diff --git a/erts/emulator/beam/erl_alloc.c b/erts/emulator/beam/erl_alloc.c index 88285d8be6..fa49096d2c 100644 --- a/erts/emulator/beam/erl_alloc.c +++ b/erts/emulator/beam/erl_alloc.c @@ -160,7 +160,7 @@ enum allctr_type { GOODFIT, BESTFIT, AFIT, - AOFIRSTFIT + FIRSTFIT }; struct au_init { @@ -500,8 +500,9 @@ set_default_test_alloc_opts(struct au_init *ip) SET_DEFAULT_ALLOC_OPTS(ip); ip->enable = 0; /* Disabled by default */ ip->thr_spec = -1 * erts_no_schedulers; - ip->atype = AOFIRSTFIT; - ip->init.aoff.flavor = AOFF_BF; + ip->atype = FIRSTFIT; + ip->init.aoff.crr_order = FF_AOFF; + ip->init.aoff.blk_order = FF_BF; ip->init.util.name_prefix = "test_"; ip->init.util.alloc_no = ERTS_ALC_A_TEST; ip->init.util.mmbcs = 0; /* Main carrier size */ @@ -599,10 +600,10 @@ static ERTS_INLINE int strategy_support_carrier_migration(struct au_init *auip) { /* - * Currently only aoff, aoffcbf and aoffcaobf support carrier + * Currently only aoff* and ageff* support carrier * migration, i.e, type AOFIRSTFIT. */ - return auip->atype == AOFIRSTFIT; + return auip->atype == FIRSTFIT; } static ERTS_INLINE void @@ -617,8 +618,9 @@ adjust_carrier_migration_support(struct au_init *auip) */ if (!strategy_support_carrier_migration(auip)) { /* Default to aoffcbf */ - auip->atype = AOFIRSTFIT; - auip->init.aoff.flavor = AOFF_BF; + auip->atype = FIRSTFIT; + auip->init.aoff.crr_order = FF_AOFF; + auip->init.aoff.blk_order = FF_BF; } } } @@ -1132,7 +1134,7 @@ start_au_allocator(ErtsAlcType_t alctr_n, &init->init.af, &init->init.util); break; - case AOFIRSTFIT: + case FIRSTFIT: as = erts_aoffalc_start((AOFFAllctr_t *) as0, &init->init.aoff, &init->init.util); @@ -1217,31 +1219,41 @@ get_bool_value(char *param_end, char** argv, int* ip) { char *param = argv[*ip]+1; char *value = get_value(param_end, argv, ip); - if (strcmp(value, "true") == 0) + if (sys_strcmp(value, "true") == 0) return 1; - else if (strcmp(value, "false") == 0) + else if (sys_strcmp(value, "false") == 0) return 0; else bad_value(param, param_end, value); return -1; } +static Uint kb_to_bytes(Sint kb, Uint *bytes) +{ + const Uint max = ((~((Uint) 0))/1024) + 1; + + if (kb < 0 || (Uint)kb > max) + return 0; + if ((Uint)kb == max) + *bytes = ~((Uint) 0); + else + *bytes = ((Uint) kb)*1024; + return 1; +} + static Uint get_kb_value(char *param_end, char** argv, int* ip) { Sint tmp; - Uint max = ((~((Uint) 0))/1024) + 1; + Uint bytes = 0; char *rest; char *param = argv[*ip]+1; char *value = get_value(param_end, argv, ip); errno = 0; tmp = (Sint) ErtsStrToSint(value, &rest, 10); - if (errno != 0 || rest == value || tmp < 0 || max < ((Uint) tmp)) + if (errno != 0 || rest == value || !kb_to_bytes(tmp, &bytes)) bad_value(param, param_end, value); - if (max == (Uint) tmp) - return ~((Uint) 0); - else - return ((Uint) tmp)*1024; + return bytes; } static UWord @@ -1328,49 +1340,79 @@ handle_au_arg(struct au_init *auip, switch (sub_param[0]) { case 'a': - if (has_prefix("acul", sub_param)) { - if (!auip->carrier_migration_allowed) { - if (!u_switch) - goto bad_switch; - else { - /* ignore */ - (void) get_acul_value(auip, sub_param + 4, argv, ip); - break; - } - } - auip->init.util.acul = get_acul_value(auip, sub_param + 4, argv, ip); - } + if (sub_param[1] == 'c') { /* Migration parameters "ac*" */ + UWord value; + UWord* wp; + if (!auip->carrier_migration_allowed && !u_switch) + goto bad_switch; + + if (has_prefix("acul", sub_param)) { + value = get_acul_value(auip, sub_param + 4, argv, ip); + wp = &auip->init.util.acul; + } + else if (has_prefix("acnl", sub_param)) { + value = get_amount_value(sub_param + 4, argv, ip); + wp = &auip->init.util.acnl; + } + else if (has_prefix("acfml", sub_param)) { + value = get_amount_value(sub_param + 5, argv, ip); + wp = &auip->init.util.acfml; + } + else + goto bad_switch; + + if (auip->carrier_migration_allowed) + *wp = value; + } else if(has_prefix("asbcst", sub_param)) { auip->init.util.asbcst = get_kb_value(sub_param + 6, argv, ip); } else if(has_prefix("as", sub_param)) { char *alg = get_value(sub_param + 2, argv, ip); - if (strcmp("bf", alg) == 0) { + if (sys_strcmp("bf", alg) == 0) { auip->atype = BESTFIT; auip->init.bf.ao = 0; } - else if (strcmp("aobf", alg) == 0) { + else if (sys_strcmp("aobf", alg) == 0) { auip->atype = BESTFIT; auip->init.bf.ao = 1; } - else if (strcmp("gf", alg) == 0) { + else if (sys_strcmp("gf", alg) == 0) { auip->atype = GOODFIT; } - else if (strcmp("af", alg) == 0) { + else if (sys_strcmp("af", alg) == 0) { auip->atype = AFIT; } - else if (strcmp("aoff", alg) == 0) { - auip->atype = AOFIRSTFIT; - auip->init.aoff.flavor = AOFF_AOFF; + else if (sys_strcmp("aoff", alg) == 0) { + auip->atype = FIRSTFIT; + auip->init.aoff.crr_order = FF_AOFF; + auip->init.aoff.blk_order = FF_AOFF; } - else if (strcmp("aoffcbf", alg) == 0) { - auip->atype = AOFIRSTFIT; - auip->init.aoff.flavor = AOFF_BF; + else if (sys_strcmp("aoffcbf", alg) == 0) { + auip->atype = FIRSTFIT; + auip->init.aoff.crr_order = FF_AOFF; + auip->init.aoff.blk_order = FF_BF; } - else if (strcmp("aoffcaobf", alg) == 0) { - auip->atype = AOFIRSTFIT; - auip->init.aoff.flavor = AOFF_AOBF; + else if (sys_strcmp("aoffcaobf", alg) == 0) { + auip->atype = FIRSTFIT; + auip->init.aoff.crr_order = FF_AOFF; + auip->init.aoff.blk_order = FF_AOBF; } + else if (sys_strcmp("ageffcaoff", alg) == 0) { + auip->atype = FIRSTFIT; + auip->init.aoff.crr_order = FF_AGEFF; + auip->init.aoff.blk_order = FF_AOFF; + } + else if (sys_strcmp("ageffcbf", alg) == 0) { + auip->atype = FIRSTFIT; + auip->init.aoff.crr_order = FF_AGEFF; + auip->init.aoff.blk_order = FF_BF; + } + else if (sys_strcmp("ageffcaobf", alg) == 0) { + auip->atype = FIRSTFIT; + auip->init.aoff.crr_order = FF_AGEFF; + auip->init.aoff.blk_order = FF_AOBF; + } else { bad_value(param, sub_param + 1, alg); } @@ -1646,7 +1688,7 @@ handle_args(int *argc, char **argv, erts_alc_hndl_args_init_t *init) } else if (has_prefix("e", param+2)) { arg = get_value(param+3, argv, &i); - if (strcmp("true", arg) != 0) + if (sys_strcmp("true", arg) != 0) bad_value(param, param+3, arg); } else @@ -1658,20 +1700,20 @@ handle_args(int *argc, char **argv, erts_alc_hndl_args_init_t *init) case 'a': { int a; arg = get_value(argv[i]+4, argv, &i); - if (strcmp("min", arg) == 0) { + if (sys_strcmp("min", arg) == 0) { for (a = 0; a < aui_sz; a++) aui[a]->enable = 0; } - else if (strcmp("max", arg) == 0) { + else if (sys_strcmp("max", arg) == 0) { for (a = 0; a < aui_sz; a++) aui[a]->enable = 1; } - else if (strcmp("config", arg) == 0) { + else if (sys_strcmp("config", arg) == 0) { init->erts_alloc_config = 1; } - else if (strcmp("r9c", arg) == 0 - || strcmp("r10b", arg) == 0 - || strcmp("r11b", arg) == 0) { + else if (sys_strcmp("r9c", arg) == 0 + || sys_strcmp("r10b", arg) == 0 + || sys_strcmp("r11b", arg) == 0) { set_default_sl_alloc_opts(&init->sl_alloc); set_default_std_alloc_opts(&init->std_alloc); set_default_ll_alloc_opts(&init->ll_alloc); @@ -1683,7 +1725,7 @@ handle_args(int *argc, char **argv, erts_alc_hndl_args_init_t *init) set_default_driver_alloc_opts(&init->fix_alloc); init->driver_alloc.enable = 0; - if (strcmp("r9c", arg) == 0) { + if (sys_strcmp("r9c", arg) == 0) { init->sl_alloc.enable = 0; init->std_alloc.enable = 0; init->binary_alloc.enable = 0; @@ -1710,18 +1752,18 @@ handle_args(int *argc, char **argv, erts_alc_hndl_args_init_t *init) switch (argv[i][3]) { case 's': arg = get_value(argv[i]+4, argv, &i); - if (strcmp("true", arg) == 0) + if (sys_strcmp("true", arg) == 0) init->instr.stat = 1; - else if (strcmp("false", arg) == 0) + else if (sys_strcmp("false", arg) == 0) init->instr.stat = 0; else bad_value(param, param+3, arg); break; case 'm': arg = get_value(argv[i]+4, argv, &i); - if (strcmp("true", arg) == 0) + if (sys_strcmp("true", arg) == 0) init->instr.map = 1; - else if (strcmp("false", arg) == 0) + else if (sys_strcmp("false", arg) == 0) init->instr.map = 0; else bad_value(param, param+3, arg); @@ -1736,9 +1778,9 @@ handle_args(int *argc, char **argv, erts_alc_hndl_args_init_t *init) case 'l': if (has_prefix("pm", param+2)) { arg = get_value(argv[i]+5, argv, &i); - if (strcmp("all", arg) == 0) + if (sys_strcmp("all", arg) == 0) lock_all_physical_memory = 1; - else if (strcmp("no", arg) == 0) + else if (sys_strcmp("no", arg) == 0) lock_all_physical_memory = 0; else bad_value(param, param+4, arg); @@ -1788,8 +1830,8 @@ handle_args(int *argc, char **argv, erts_alc_hndl_args_init_t *init) /* || init->instr.stat || init->instr.map */) { while (i < *argc) { - if(strcmp(argv[i], "-sname") == 0 - || strcmp(argv[i], "-name") == 0) { + if(sys_strcmp(argv[i], "-sname") == 0 + || sys_strcmp(argv[i], "-name") == 0) { if (i + 1 <*argc) { init->instr.nodename = argv[i+1]; break; @@ -2650,7 +2692,7 @@ erts_allocated_areas(fmtfn_t *print_to_p, void *print_to_arg, void *proc) Eterm atom; if (hpp) atom = am_atom_put(values[i].name, - (int) strlen(values[i].name)); + (int) sys_strlen(values[i].name)); else atom = am_true; @@ -2841,7 +2883,7 @@ erts_allocator_options(void *proc) for (a = ERTS_ALC_A_MIN; a <= ERTS_ALC_A_MAX; a++) { Eterm tmp = NIL; atoms[length] = am_atom_put((char *) ERTS_ALC_A2AD(a), - strlen(ERTS_ALC_A2AD(a))); + sys_strlen(ERTS_ALC_A2AD(a))); if (erts_allctrs_info[a].enabled) { if (erts_allctrs_info[a].alloc_util) { Allctr_t *allctr; @@ -2935,7 +2977,7 @@ erts_allocator_options(void *proc) for (a = ERTS_ALC_A_MIN; a <= ERTS_ALC_A_MAX; a++) { if (erts_allctrs_info[a].enabled) { terms[length++] = am_atom_put((char *) ERTS_ALC_A2AD(a), - strlen(ERTS_ALC_A2AD(a))); + sys_strlen(ERTS_ALC_A2AD(a))); } } @@ -3392,6 +3434,65 @@ erts_request_alloc_info(struct process *c_p, return 1; } +Eterm erts_alloc_set_dyn_param(Process* c_p, Eterm tuple) +{ + ErtsAllocatorThrSpec_t *tspec; + ErtsAlcType_t ai; + Allctr_t* allctr; + Eterm* tp; + Eterm res; + + if (!is_tuple_arity(tuple, 3)) + goto badarg; + + tp = tuple_val(tuple); + + /* + * Ex: {ets_alloc, sbct, 256000} + */ + if (!is_atom(tp[1]) || !is_atom(tp[2]) || !is_integer(tp[3])) + goto badarg; + + for (ai = ERTS_ALC_A_MIN; ai <= ERTS_ALC_A_MAX; ai++) + if (erts_is_atom_str(erts_alc_a2ad[ai], tp[1], 0)) + break; + + if (ai > ERTS_ALC_A_MAX) + goto badarg; + + if (!erts_allctrs_info[ai].enabled || + !erts_allctrs_info[ai].alloc_util) { + return am_notsup; + } + + if (tp[2] == am_sbct) { + Uint sbct; + int i, ok; + + if (!term_to_Uint(tp[3], &sbct)) + goto badarg; + + tspec = &erts_allctr_thr_spec[ai]; + if (tspec->enabled) { + ok = 0; + for (i = 0; i < tspec->size; i++) { + allctr = tspec->allctr[i]; + ok |= allctr->try_set_dyn_param(allctr, am_sbct, sbct); + } + } + else { + allctr = erts_allctrs_info[ai].extra; + ok = allctr->try_set_dyn_param(allctr, am_sbct, sbct); + } + return ok ? am_ok : am_notsup; + } + return am_notsup; + +badarg: + ERTS_BIF_PREP_ERROR(res, c_p, EXC_BADARG); + return res; +} + /* * The allocator wrapper prelocking stuff below is about the locking order. * It only affects wrappers (erl_mtrace.c and erl_instrument.c) that keep locks @@ -3528,7 +3629,7 @@ UWord erts_alc_test(UWord op, UWord a1, UWord a2, UWord a3) &init.init.af, &init.init.util); break; - case AOFIRSTFIT: + case FIRSTFIT: allctr = erts_aoffalc_start((AOFFAllctr_t *) erts_alloc(ERTS_ALC_T_UNDEF, sizeof(AOFFAllctr_t)), @@ -3622,7 +3723,9 @@ UWord erts_alc_test(UWord op, UWord a1, UWord a2, UWord a3) case 0xf15: erts_free(ERTS_ALC_T_TEST, (void*)a1); return 0; - case 0xf16: { + case 0xf16: return (UWord) erts_realloc(ERTS_ALC_T_TEST, (void*)a1, (Uint)a2); + + case 0xf17: { Uint extra_hdr_sz = UNIT_CEILING((Uint)a1); ErtsAllocatorThrSpec_t* ts = &erts_allctr_thr_spec[ERTS_ALC_A_TEST]; Uint offset = ts->allctr[0]->mbc_header_size; @@ -3649,7 +3752,7 @@ UWord erts_alc_test(UWord op, UWord a1, UWord a2, UWord a3) *(void**)a3 = orig_destroying_mbc; return offset; } - case 0xf17: { + case 0xf18: { ErtsAllocatorThrSpec_t* ts = &erts_allctr_thr_spec[ERTS_ALC_A_TEST]; return ts->allctr[0]->largest_mbc_size; } @@ -3858,7 +3961,7 @@ set_memory_fence(void *ptr, Uint sz, ErtsAlcType_t n) *(ui_ptr++) = sz; *(ui_ptr++) = pattern; - memcpy((void *) (((char *) ui_ptr)+sz), (void *) &pattern, sizeof(UWord)); + sys_memcpy((void *) (((char *) ui_ptr)+sz), (void *) &pattern, sizeof(UWord)); #ifdef HARD_DEBUG *mblkpp = hdbg_alloc((void *) ui_ptr, sz, n); @@ -3898,7 +4001,7 @@ check_memory_fence(void *ptr, Uint *size, ErtsAlcType_t n, int func) (UWord) ptr); } - memcpy((void *) &post_pattern, (void *) (((char *)ptr)+sz), sizeof(UWord)); + sys_memcpy((void *) &post_pattern, (void *) (((char *)ptr)+sz), sizeof(UWord)); if (post_pattern != MK_PATTERN(n) || pre_pattern != post_pattern) { diff --git a/erts/emulator/beam/erl_alloc.h b/erts/emulator/beam/erl_alloc.h index 117f96a4ad..578a3717d9 100644 --- a/erts/emulator/beam/erl_alloc.h +++ b/erts/emulator/beam/erl_alloc.h @@ -126,8 +126,10 @@ typedef struct { void *extra; } ErtsAllocatorFunctions_t; -extern ErtsAllocatorFunctions_t erts_allctrs[ERTS_ALC_A_MAX+1]; -extern ErtsAllocatorInfo_t erts_allctrs_info[ERTS_ALC_A_MAX+1]; +extern ErtsAllocatorFunctions_t + ERTS_WRITE_UNLIKELY(erts_allctrs[ERTS_ALC_A_MAX+1]); +extern ErtsAllocatorInfo_t + ERTS_WRITE_UNLIKELY(erts_allctrs_info[ERTS_ALC_A_MAX+1]); typedef struct { int enabled; @@ -144,7 +146,7 @@ typedef struct ErtsAllocatorWrapper_t_ { void (*unlock)(void); struct ErtsAllocatorWrapper_t_* next; }ErtsAllocatorWrapper_t; -ErtsAllocatorWrapper_t *erts_allctr_wrappers; +extern ErtsAllocatorWrapper_t *erts_allctr_wrappers; extern int erts_allctr_wrapper_prelocked; extern erts_tsd_key_t erts_allctr_prelock_tsd_key; void erts_allctr_wrapper_prelock_init(ErtsAllocatorWrapper_t* wrapper); @@ -169,6 +171,8 @@ __decl_noreturn void erts_realloc_n_enomem(ErtsAlcType_t,void*,Uint) __decl_noreturn void erts_alc_fatal_error(int,int,ErtsAlcType_t,...) __noreturn; +Eterm erts_alloc_set_dyn_param(struct process*, Eterm); + #undef ERTS_HAVE_IS_IN_LITERAL_RANGE #if defined(ARCH_32) || defined(ERTS_HAVE_OS_PHYSICAL_MEMORY_RESERVATION) # define ERTS_HAVE_IS_IN_LITERAL_RANGE @@ -430,7 +434,7 @@ NAME##_free(TYPE *p) \ #ifdef DEBUG #define ERTS_PRE_ALLOC_SIZE(SZ) ((SZ) < 1000 ? (SZ)/10 + 10 : 100) -#define ERTS_PRE_ALLOC_CLOBBER(P, T) memset((void *) (P), 0xfd, sizeof(T)) +#define ERTS_PRE_ALLOC_CLOBBER(P, T) sys_memset((void *) (P), 0xfd, sizeof(T)) #else #define ERTS_PRE_ALLOC_SIZE(SZ) ((SZ) > 1 ? (SZ) : 1) #define ERTS_PRE_ALLOC_CLOBBER(P, T) diff --git a/erts/emulator/beam/erl_alloc.types b/erts/emulator/beam/erl_alloc.types index 2960272eab..8107f133aa 100644 --- a/erts/emulator/beam/erl_alloc.types +++ b/erts/emulator/beam/erl_alloc.types @@ -261,6 +261,7 @@ type MINDIRECTION FIXED_SIZE SYSTEM magic_indirection type BINARY_FIND SHORT_LIVED PROCESSES binary_find type OPEN_PORT_ENV TEMPORARY SYSTEM open_port_env type CRASH_DUMP STANDARD SYSTEM crash_dump +type DIST_TRANSCODE SHORT_LIVED SYSTEM dist_transcode_context type THR_Q_EL STANDARD SYSTEM thr_q_element type THR_Q_EL_SL FIXED_SIZE SYSTEM sl_thr_q_element @@ -282,6 +283,8 @@ type THR_PRGR_DATA LONG_LIVED SYSTEM thr_prgr_data type T_THR_PRGR_DATA SHORT_LIVED SYSTEM temp_thr_prgr_data type RELEASE_LAREA SHORT_LIVED SYSTEM release_literal_area +type ENVIRONMENT SYSTEM SYSTEM environment + # # Types used for special emulators # @@ -369,8 +372,6 @@ type SYS_READ_BUF TEMPORARY SYSTEM sys_read_buf type FD_TAB LONG_LIVED SYSTEM fd_tab type FD_ENTRY_BUF STANDARD SYSTEM fd_entry_buf type CS_PROG_PATH LONG_LIVED SYSTEM cs_prog_path -type ENVIRONMENT TEMPORARY SYSTEM environment -type PUTENV_STR SYSTEM SYSTEM putenv_string type PRT_REP_EXIT STANDARD SYSTEM port_report_exit type SYS_BLOCKING STANDARD SYSTEM sys_blocking @@ -382,9 +383,7 @@ type SYS_WRITE_BUF TEMPORARY SYSTEM sys_write_buf type DRV_DATA_BUF SYSTEM SYSTEM drv_data_buf type PRELOADED LONG_LIVED SYSTEM preloaded -type PUTENV_STR SYSTEM SYSTEM putenv_string type WAITER_OBJ LONG_LIVED SYSTEM waiter_object -type ENVIRONMENT SYSTEM SYSTEM environment type CON_VPRINTF_BUF TEMPORARY SYSTEM con_vprintf_buf +endif diff --git a/erts/emulator/beam/erl_alloc_util.c b/erts/emulator/beam/erl_alloc_util.c index 4d4bddb93f..d178c2c2c2 100644 --- a/erts/emulator/beam/erl_alloc_util.c +++ b/erts/emulator/beam/erl_alloc_util.c @@ -362,8 +362,10 @@ do { \ #define ERTS_CRR_ALCTR_FLG_IN_POOL (((erts_aint_t) 1) << 0) #define ERTS_CRR_ALCTR_FLG_BUSY (((erts_aint_t) 1) << 1) +#define ERTS_CRR_ALCTR_FLG_HOMECOMING (((erts_aint_t) 1) << 2) #define ERTS_CRR_ALCTR_FLG_MASK (ERTS_CRR_ALCTR_FLG_IN_POOL | \ - ERTS_CRR_ALCTR_FLG_BUSY) + ERTS_CRR_ALCTR_FLG_BUSY | \ + ERTS_CRR_ALCTR_FLG_HOMECOMING) #define SBC_HEADER_SIZE \ (UNIT_CEILING(offsetof(Carrier_t, cpool) \ @@ -563,7 +565,7 @@ do { \ DEBUG_CHECK_CARRIER_NO_SZ((AP)); \ } while (0) -#define STAT_MBC_CPOOL_INSERT(AP, CRR) \ +#define STAT_MBC_ABANDON(AP, CRR) \ do { \ UWord csz__ = CARRIER_SZ((CRR)); \ if (IS_MSEG_CARRIER((CRR))) \ @@ -1153,89 +1155,23 @@ unlink_carrier(CarrierList_t *cl, Carrier_t *crr) ASSERT(crr->next); crr->next->prev = crr->prev; } -} - - #ifdef DEBUG -static int is_in_list(ErtsDoubleLink_t* sentinel, ErtsDoubleLink_t* node) -{ - ErtsDoubleLink_t* p; - - ASSERT(node != sentinel); - for (p = sentinel->next; p != sentinel; p = p->next) { - if (p == node) - return 1; - } - return 0; -} -#endif /* DEBUG */ - -static ERTS_INLINE void -link_edl_after(ErtsDoubleLink_t* after_me, ErtsDoubleLink_t* node) -{ - ErtsDoubleLink_t* before_me = after_me->next; - ASSERT(node != after_me && node != before_me); - node->next = before_me; - node->prev = after_me; - before_me->prev = node; - after_me->next = node; -} - -static ERTS_INLINE void -link_edl_before(ErtsDoubleLink_t* before_me, ErtsDoubleLink_t* node) -{ - ErtsDoubleLink_t* after_me = before_me->prev; - ASSERT(node != before_me && node != after_me); - node->next = before_me; - node->prev = after_me; - before_me->prev = node; - after_me->next = node; -} - -static ERTS_INLINE void -unlink_edl(ErtsDoubleLink_t* node) -{ - node->next->prev = node->prev; - node->prev->next = node->next; -} - -static ERTS_INLINE void -relink_edl_before(ErtsDoubleLink_t* before_me, ErtsDoubleLink_t* node) -{ - if (node != before_me && node != before_me->prev) { - unlink_edl(node); - link_edl_before(before_me, node); - } + crr->next = crr; + crr->prev = crr; +#endif } static ERTS_INLINE int is_abandoned(Carrier_t *crr) { - return crr->cpool.abandoned.next != NULL; -} - -static ERTS_INLINE void -link_abandoned_carrier(ErtsDoubleLink_t* list, Carrier_t *crr) -{ - ASSERT(!is_abandoned(crr)); - - link_edl_after(list, &crr->cpool.abandoned); - - ASSERT(crr->cpool.abandoned.next != &crr->cpool.abandoned); - ASSERT(crr->cpool.abandoned.prev != &crr->cpool.abandoned); + return crr->cpool.state != ERTS_MBC_IS_HOME; } static ERTS_INLINE void unlink_abandoned_carrier(Carrier_t *crr) { - ASSERT(is_in_list(&crr->cpool.orig_allctr->cpool.pooled_list, - &crr->cpool.abandoned) || - is_in_list(&crr->cpool.orig_allctr->cpool.traitor_list, - &crr->cpool.abandoned)); - - unlink_edl(&crr->cpool.abandoned); - - crr->cpool.abandoned.next = NULL; - crr->cpool.abandoned.prev = NULL; + if (crr->cpool.state == ERTS_MBC_WAS_POOLED) { + aoff_remove_pooled_mbc(crr->cpool.orig_allctr, crr); + } } static ERTS_INLINE void @@ -1243,24 +1179,19 @@ clear_busy_pool_carrier(Allctr_t *allctr, Carrier_t *crr) { if (crr) { erts_aint_t max_size; - erts_aint_t new_val; + erts_aint_t iallctr; max_size = (erts_aint_t) allctr->largest_fblk_in_mbc(allctr, crr); erts_atomic_set_nob(&crr->cpool.max_size, max_size); - new_val = (((erts_aint_t) allctr)|ERTS_CRR_ALCTR_FLG_IN_POOL); + iallctr = erts_atomic_read_nob(&crr->allctr); + ERTS_ALC_CPOOL_ASSERT((iallctr & ~ERTS_CRR_ALCTR_FLG_HOMECOMING) + == ((erts_aint_t)allctr | + ERTS_CRR_ALCTR_FLG_IN_POOL | + ERTS_CRR_ALCTR_FLG_BUSY)); -#ifdef ERTS_ALC_CPOOL_DEBUG - { - erts_aint_t old_val = new_val|ERTS_CRR_ALCTR_FLG_BUSY; - - ERTS_ALC_CPOOL_ASSERT(old_val - == erts_atomic_xchg_relb(&crr->allctr, - new_val)); - } -#else - erts_atomic_set_relb(&crr->allctr, new_val); -#endif + iallctr &= ~ERTS_CRR_ALCTR_FLG_BUSY; + erts_atomic_set_relb(&crr->allctr, iallctr); } } @@ -1658,6 +1589,11 @@ dealloc_mbc(Allctr_t *allctr, Carrier_t *crr) } +static void set_new_allctr_abandon_limit(Allctr_t*); +static void abandon_carrier(Allctr_t*, Carrier_t*); +static void poolify_my_carrier(Allctr_t*, Carrier_t*); +static void enqueue_homecoming(Allctr_t*, Carrier_t*); + static ERTS_INLINE Allctr_t* get_pref_allctr(void *extra) { @@ -1724,9 +1660,23 @@ get_used_allctr(Allctr_t *pref_allctr, int pref_lock, void *p, UWord *sizep, erts_aint_t act; ERTS_ALC_CPOOL_ASSERT(!(iallctr & ERTS_CRR_ALCTR_FLG_BUSY)); - act = erts_atomic_cmpxchg_ddrb(&crr->allctr, - iallctr|ERTS_CRR_ALCTR_FLG_BUSY, - iallctr); + if (iallctr & ERTS_CRR_ALCTR_FLG_HOMECOMING) { + /* + * This carrier has just been given back to us by writing + * to crr->allctr with a write barrier (see abandon_carrier). + * + * We need a mathing read barrier to guarantee a correct view + * of the carrier for deallocation work. + */ + act = erts_atomic_cmpxchg_rb(&crr->allctr, + iallctr|ERTS_CRR_ALCTR_FLG_BUSY, + iallctr); + } + else { + act = erts_atomic_cmpxchg_ddrb(&crr->allctr, + iallctr|ERTS_CRR_ALCTR_FLG_BUSY, + iallctr); + } if (act == iallctr) { *busy_pcrr_pp = crr; break; @@ -1742,13 +1692,6 @@ get_used_allctr(Allctr_t *pref_allctr, int pref_lock, void *p, UWord *sizep, erts_mtx_unlock(&pref_allctr->mutex); } } - - ERTS_ALC_CPOOL_ASSERT( - (((iallctr & ~ERTS_CRR_ALCTR_FLG_MASK) == (erts_aint_t) pref_allctr) - ? (((iallctr & ERTS_CRR_ALCTR_FLG_MASK) == ERTS_CRR_ALCTR_FLG_IN_POOL) - || ((iallctr & ERTS_CRR_ALCTR_FLG_MASK) == 0)) - : 1)); - return used_allctr; } } @@ -2000,9 +1943,9 @@ handle_delayed_fix_dealloc(Allctr_t *allctr, void *ptr) /* Carrier migrated; need to redirect block to new owner... */ int cinit = used_allctr->dd.ix - allctr->dd.ix; - ERTS_ALC_CPOOL_ASSERT(!busy_pcrr_p); + ERTS_ALC_CPOOL_ASSERT(!busy_pcrr_p); - DEC_CC(allctr->calls.this_free); + DEC_CC(allctr->calls.this_free); ((ErtsAllctrFixDDBlock_t *) ptr)->fix_type = type; if (ddq_enqueue(&used_allctr->dd.q, ptr, cinit)) @@ -2011,8 +1954,9 @@ handle_delayed_fix_dealloc(Allctr_t *allctr, void *ptr) } } -static void -schedule_dealloc_carrier(Allctr_t *allctr, Carrier_t *crr); +static void schedule_dealloc_carrier(Allctr_t*, Carrier_t*); +static void dealloc_my_carrier(Allctr_t*, Carrier_t*); + static ERTS_INLINE int handle_delayed_dealloc(Allctr_t *allctr, @@ -2074,39 +2018,61 @@ handle_delayed_dealloc(Allctr_t *allctr, res = 1; blk = UMEM2BLK(ptr); - if (IS_FREE_LAST_MBC_BLK(blk)) { + if (blk->bhdr == HOMECOMING_MBC_BLK_HDR) { /* * A multiblock carrier that previously has been migrated away - * from us and now is back to be deallocated. For more info - * see schedule_dealloc_carrier(). - * - * Note that we cannot use FBLK_TO_MBC(blk) since it - * data has been overwritten by the queue. + * from us, was sent back to us either because + * - it became empty and we need to deallocated it, or + * - it was inserted into the pool and we need to update our pooled_tree */ - Carrier_t *crr = FIRST_BLK_TO_MBC(allctr, blk); - - /* Restore word overwritten by the dd-queue as it will be read - * if this carrier is pulled from dc_list by cpool_fetch() - */ - ERTS_ALC_CPOOL_ASSERT(FBLK_TO_MBC(blk) != crr); - ERTS_CT_ASSERT(sizeof(ErtsAllctrDDBlock_t) == sizeof(void*)); -#ifdef MBC_ABLK_OFFSET_BITS - blk->u.carrier = crr; -#else - blk->carrier = crr; -#endif + Carrier_t *crr = ErtsContainerStruct(blk, Carrier_t, + cpool.homecoming_dd.blk); + Block_t* first_blk = MBC_TO_FIRST_BLK(allctr, crr); + erts_aint_t iallctr; ERTS_ALC_CPOOL_ASSERT(ERTS_ALC_IS_CPOOL_ENABLED(allctr)); ERTS_ALC_CPOOL_ASSERT(allctr == crr->cpool.orig_allctr); - ERTS_ALC_CPOOL_ASSERT(((erts_aint_t) allctr) - != (erts_atomic_read_nob(&crr->allctr) - & ~ERTS_CRR_ALCTR_FLG_MASK)); - - erts_atomic_set_nob(&crr->allctr, ((erts_aint_t) allctr)); - schedule_dealloc_carrier(allctr, crr); + iallctr = erts_atomic_read_nob(&crr->allctr); + ASSERT(iallctr & ERTS_CRR_ALCTR_FLG_HOMECOMING); + while (1) { + if ((iallctr & (~ERTS_CRR_ALCTR_FLG_MASK | + ERTS_CRR_ALCTR_FLG_IN_POOL)) + == (erts_aint_t)allctr) { + /* + * Carrier is home (mine and not in pool) + */ + ASSERT(!(iallctr & ERTS_CRR_ALCTR_FLG_BUSY)); + erts_atomic_set_nob(&crr->allctr, (erts_aint_t)allctr); + if (IS_FREE_LAST_MBC_BLK(first_blk)) + dealloc_my_carrier(allctr, crr); + else + ASSERT(crr->cpool.state == ERTS_MBC_IS_HOME); + } + else { + erts_aint_t exp = iallctr; + erts_aint_t want = iallctr & ~ERTS_CRR_ALCTR_FLG_HOMECOMING; + + iallctr = erts_atomic_cmpxchg_nob(&crr->allctr, + want, + exp); + if (iallctr != exp) + continue; /* retry */ + + ASSERT(crr->cpool.state != ERTS_MBC_IS_HOME); + unlink_abandoned_carrier(crr); + if (iallctr & ERTS_CRR_ALCTR_FLG_IN_POOL) + poolify_my_carrier(allctr, crr); + else + crr->cpool.state = ERTS_MBC_WAS_TRAITOR; + } + break; + } } else { + ASSERT(IS_SBC_BLK(blk) || (ABLK_TO_MBC(blk) != + ErtsContainerStruct(blk, Carrier_t, + cpool.homecoming_dd.blk))); INC_CC(allctr->calls.this_free); @@ -2148,20 +2114,26 @@ enqueue_dealloc_other_instance(ErtsAlcType_t type, erts_alloc_notify_delayed_dealloc(allctr->ix); } - -static void -set_new_allctr_abandon_limit(Allctr_t *allctr); -static void -abandon_carrier(Allctr_t *allctr, Carrier_t *crr); - +static ERTS_INLINE void +update_pooled_tree(Allctr_t *allctr, Carrier_t *crr, Uint blk_sz) +{ + if (allctr == crr->cpool.orig_allctr && crr->cpool.state == ERTS_MBC_WAS_POOLED) { + /* + * Update pooled_tree with a potentially new (larger) max_sz + */ + AOFF_RBTree_t* crr_node = &crr->cpool.pooled; + if (blk_sz > crr_node->hdr.bhdr) { + crr_node->hdr.bhdr = blk_sz; + erts_aoff_larger_max_size(crr_node); + } + } +} static ERTS_INLINE void check_abandon_carrier(Allctr_t *allctr, Block_t *fblk, Carrier_t **busy_pcrr_pp) { Carrier_t *crr; - - if (busy_pcrr_pp && *busy_pcrr_pp) - return; + UWord ncrr_in_pool, largest_fblk; if (!ERTS_ALC_IS_CPOOL_ENABLED(allctr)) return; @@ -2170,8 +2142,7 @@ check_abandon_carrier(Allctr_t *allctr, Block_t *fblk, Carrier_t **busy_pcrr_pp) if (--allctr->cpool.check_limit_count <= 0) set_new_allctr_abandon_limit(allctr); - if (!erts_thr_progress_is_managed_thread()) - return; + ASSERT(erts_thr_progress_is_managed_thread()); if (allctr->cpool.disable_abandon) return; @@ -2179,6 +2150,9 @@ check_abandon_carrier(Allctr_t *allctr, Block_t *fblk, Carrier_t **busy_pcrr_pp) if (allctr->mbcs.blocks.curr.size > allctr->cpool.abandon_limit) return; + ncrr_in_pool = erts_atomic_read_nob(&allctr->cpool.stat.no_carriers); + if (ncrr_in_pool >= allctr->cpool.in_pool_limit) + return; crr = FBLK_TO_MBC(fblk); @@ -2189,9 +2163,14 @@ check_abandon_carrier(Allctr_t *allctr, Block_t *fblk, Carrier_t **busy_pcrr_pp) return; if (crr->cpool.thr_prgr != ERTS_THR_PRGR_INVALID - && !erts_thr_progress_has_reached(crr->cpool.thr_prgr)) - return; + && !erts_thr_progress_has_reached(crr->cpool.thr_prgr)) + return; + + largest_fblk = allctr->largest_fblk_in_mbc(allctr, crr); + if (largest_fblk < allctr->cpool.fblk_min_limit) + return; + erts_atomic_set_nob(&crr->cpool.max_size, largest_fblk); abandon_carrier(allctr, crr); } @@ -2237,6 +2216,7 @@ dealloc_block(Allctr_t *allctr, void *ptr, ErtsAlcFixList_t *fix, int dec_cc_on_ else { Carrier_t *busy_pcrr_p; Allctr_t *used_allctr; + used_allctr = get_used_allctr(allctr, ERTS_ALC_TS_PREF_LOCK_NO, ptr, NULL, &busy_pcrr_p); if (used_allctr == allctr) { @@ -2253,10 +2233,10 @@ dealloc_block(Allctr_t *allctr, void *ptr, ErtsAlcFixList_t *fix, int dec_cc_on_ /* Carrier migrated; need to redirect block to new owner... */ int cinit = used_allctr->dd.ix - allctr->dd.ix; - ERTS_ALC_CPOOL_ASSERT(!busy_pcrr_p); + ERTS_ALC_CPOOL_ASSERT(!busy_pcrr_p); - if (dec_cc_on_redirect) - DEC_CC(allctr->calls.this_free); + if (dec_cc_on_redirect) + DEC_CC(allctr->calls.this_free); if (ddq_enqueue(&used_allctr->dd.q, ptr, cinit)) erts_alloc_notify_delayed_dealloc(used_allctr->ix); } @@ -2500,15 +2480,16 @@ mbc_free(Allctr_t *allctr, void *p, Carrier_t **busy_pcrr_pp) ASSERT(blk_sz % sizeof(Unit_t) == 0); ASSERT(IS_MBC_BLK(blk)); - if (is_first_blk - && is_last_blk - && allctr->main_carrier != FIRST_BLK_TO_MBC(allctr, blk)) { - destroy_carrier(allctr, blk, busy_pcrr_pp); + if (is_first_blk && is_last_blk && crr != allctr->main_carrier) { + destroy_carrier(allctr, blk, busy_pcrr_pp); } else { (*allctr->link_free_block)(allctr, blk); HARD_CHECK_BLK_CARRIER(allctr, blk); - check_abandon_carrier(allctr, blk, busy_pcrr_pp); + if (busy_pcrr_pp && *busy_pcrr_pp) + update_pooled_tree(allctr, crr, blk_sz); + else + check_abandon_carrier(allctr, blk, busy_pcrr_pp); } } @@ -2542,8 +2523,19 @@ mbc_realloc(Allctr_t *allctr, void *p, Uint size, Uint32 alcu_flgs, return NULL; #else /* !MBC_REALLOC_ALWAYS_MOVES */ - if (busy_pcrr_pp && *busy_pcrr_pp) - goto realloc_move; /* Don't want to use carrier in pool */ + if (busy_pcrr_pp && *busy_pcrr_pp) { + /* + * Don't want to use carrier in pool + */ + new_p = mbc_alloc(allctr, size); + if (!new_p) + return NULL; + new_blk = UMEM2BLK(new_p); + ASSERT(!(IS_MBC_BLK(new_blk) && ABLK_TO_MBC(new_blk) == *busy_pcrr_pp)); + sys_memcpy(new_p, p, MIN(size, old_blk_sz - ABLK_HDR_SZ)); + mbc_free(allctr, p, busy_pcrr_pp); + return new_p; + } get_blk_sz = blk_sz = UMEMSZ2BLKSZ(allctr, size); @@ -2776,7 +2768,7 @@ mbc_realloc(Allctr_t *allctr, void *p, Uint size, Uint32 alcu_flgs, if (cand_blk_sz < get_blk_sz) { /* We wont fit in cand_blk get a new one */ - realloc_move: + #endif /* !MBC_REALLOC_ALWAYS_MOVES */ new_p = mbc_alloc(allctr, size); @@ -2880,8 +2872,7 @@ mbc_realloc(Allctr_t *allctr, void *p, Uint size, Uint32 alcu_flgs, #define ERTS_ALC_MAX_DEALLOC_CARRIER 10 -#define ERTS_ALC_CPOOL_MAX_FETCH_INSPECT 20 -#define ERTS_ALC_CPOOL_MAX_TRAITOR_INSPECT 10 +#define ERTS_ALC_CPOOL_MAX_FETCH_INSPECT 100 #define ERTS_ALC_CPOOL_CHECK_LIMIT_COUNT 100 #define ERTS_ALC_CPOOL_MAX_FAILED_STAT_READS 3 @@ -3045,19 +3036,18 @@ cpool_insert(Allctr_t *allctr, Carrier_t *crr) ErtsAlcCPoolData_t *cpd1p, *cpd2p; erts_aint_t val; ErtsAlcCPoolData_t *sentinel = &carrier_pool[allctr->alloc_no].sentinel; + Allctr_t *orig_allctr = crr->cpool.orig_allctr; ERTS_ALC_CPOOL_ASSERT(allctr->alloc_no == ERTS_ALC_A_INVALID /* testcase */ || erts_thr_progress_is_managed_thread()); - ERTS_ALC_CPOOL_ASSERT(erts_atomic_read_nob(&crr->allctr) - == (erts_aint_t) allctr); - erts_atomic_add_nob(&allctr->cpool.stat.blocks_size, + erts_atomic_add_nob(&orig_allctr->cpool.stat.blocks_size, (erts_aint_t) crr->cpool.blocks_size); - erts_atomic_add_nob(&allctr->cpool.stat.no_blocks, + erts_atomic_add_nob(&orig_allctr->cpool.stat.no_blocks, (erts_aint_t) crr->cpool.blocks); - erts_atomic_add_nob(&allctr->cpool.stat.carriers_size, + erts_atomic_add_nob(&orig_allctr->cpool.stat.carriers_size, (erts_aint_t) CARRIER_SZ(crr)); - erts_atomic_inc_nob(&allctr->cpool.stat.no_carriers); + erts_atomic_inc_nob(&orig_allctr->cpool.stat.no_carriers); /* * We search in 'next' direction and begin by passing @@ -3118,8 +3108,6 @@ cpool_insert(Allctr_t *allctr, Carrier_t *crr) (erts_aint_t) &crr->cpool, (erts_aint_t) cpd1p); - erts_atomic_set_wb(&crr->allctr, - ((erts_aint_t) allctr)|ERTS_CRR_ALCTR_FLG_IN_POOL); LTTNG3(carrier_pool_put, ERTS_ALC_A2AD(allctr->alloc_no), allctr->ix, CARRIER_SZ(crr)); } @@ -3221,130 +3209,126 @@ cpool_delete(Allctr_t *allctr, Allctr_t *prev_allctr, Carrier_t *crr) static Carrier_t * cpool_fetch(Allctr_t *allctr, UWord size) { - int i, i_stop, has_passed_sentinel; + enum { IGNORANT, HAS_SEEN_SENTINEL, THE_LAST_ONE } loop_state; + int i; Carrier_t *crr; + Carrier_t *reinsert_crr = NULL; ErtsAlcCPoolData_t *cpdp; - ErtsAlcCPoolData_t *cpool_entrance; + ErtsAlcCPoolData_t *cpool_entrance = NULL; ErtsAlcCPoolData_t *sentinel; - ErtsDoubleLink_t* dl; - ErtsDoubleLink_t* first_old_traitor; ERTS_ALC_CPOOL_ASSERT(allctr->alloc_no == ERTS_ALC_A_INVALID /* testcase */ || erts_thr_progress_is_managed_thread()); i = ERTS_ALC_CPOOL_MAX_FETCH_INSPECT; - first_old_traitor = allctr->cpool.traitor_list.next; - cpool_entrance = NULL; LTTNG3(carrier_pool_get, ERTS_ALC_A2AD(allctr->alloc_no), allctr->ix, (unsigned long)size); /* - * Search my own pooled_list, + * Search my own pooled_tree, * i.e my abandoned carriers that were in the pool last time I checked. */ + do { + erts_aint_t exp, act; + + crr = aoff_lookup_pooled_mbc(allctr, size); + if (!crr) + break; + + ASSERT(crr->cpool.state == ERTS_MBC_WAS_POOLED); + ASSERT(crr->cpool.orig_allctr == allctr); + + aoff_remove_pooled_mbc(allctr, crr); + + exp = erts_atomic_read_nob(&crr->allctr); + if (exp & ERTS_CRR_ALCTR_FLG_IN_POOL) { + ASSERT((exp & ~ERTS_CRR_ALCTR_FLG_MASK) == (erts_aint_t)allctr); + if (erts_atomic_read_nob(&crr->cpool.max_size) < size) { + /* + * This carrier has been fetched and inserted back again + * by a foreign allocator. That's why it has a stale search size. + */ + ASSERT(exp & ERTS_CRR_ALCTR_FLG_HOMECOMING); + crr->cpool.pooled.hdr.bhdr = erts_atomic_read_nob(&crr->cpool.max_size); + aoff_add_pooled_mbc(allctr, crr); + INC_CC(allctr->cpool.stat.skip_size); + continue; + } + else if (exp & ERTS_CRR_ALCTR_FLG_BUSY) { + /* + * This must be our own carrier as part of a realloc call. + * Skip it to make things simpler. + * Must wait to re-insert to not be found again by lookup. + */ + ASSERT(!reinsert_crr); + reinsert_crr = crr; + INC_CC(allctr->cpool.stat.skip_busy); + continue; + } - dl = allctr->cpool.pooled_list.next; - while(dl != &allctr->cpool.pooled_list) { - erts_aint_t exp, act; - crr = (Carrier_t *) (((char *) dl) - offsetof(Carrier_t, cpool.abandoned)); - - ASSERT(!is_in_list(&allctr->cpool.traitor_list, dl)); - ASSERT(crr->cpool.orig_allctr == allctr); - dl = dl->next; - exp = erts_atomic_read_rb(&crr->allctr); - if ((exp & ERTS_CRR_ALCTR_FLG_MASK) == ERTS_CRR_ALCTR_FLG_IN_POOL - && erts_atomic_read_nob(&crr->cpool.max_size) >= size) { - /* Try to fetch it... */ - act = erts_atomic_cmpxchg_mb(&crr->allctr, - (erts_aint_t) allctr, - exp); - if (act == exp) { - cpool_delete(allctr, ((Allctr_t *) (act & ~ERTS_CRR_ALCTR_FLG_MASK)), crr); - unlink_abandoned_carrier(crr); - - /* Move sentinel to continue next search from here */ - relink_edl_before(dl, &allctr->cpool.pooled_list); - return crr; - } - exp = act; - } - if (exp & ERTS_CRR_ALCTR_FLG_IN_POOL) { - if (!cpool_entrance) - cpool_entrance = &crr->cpool; - } - else { /* Not in pool, move to traitor_list */ - unlink_abandoned_carrier(crr); - link_abandoned_carrier(&allctr->cpool.traitor_list, crr); - } - if (--i <= 0) { - /* Move sentinel to continue next search from here */ - relink_edl_before(dl, &allctr->cpool.pooled_list); - return NULL; - } - } + /* Try to fetch it... */ + act = erts_atomic_cmpxchg_mb(&crr->allctr, + exp & ~ERTS_CRR_ALCTR_FLG_IN_POOL, + exp); + if (act == exp) { + cpool_delete(allctr, allctr, crr); + crr->cpool.state = ERTS_MBC_IS_HOME; + + if (reinsert_crr) + aoff_add_pooled_mbc(allctr, reinsert_crr); + return crr; + } + exp = act; + INC_CC(allctr->cpool.stat.skip_race); + } + else + INC_CC(allctr->cpool.stat.skip_not_pooled); - /* Now search traitor_list. - * i.e carriers employed by other allocators last time I checked. - * They might have been abandoned since then. - */ + /* Not in pool anymore */ + ASSERT(!(exp & ERTS_CRR_ALCTR_FLG_BUSY)); + crr->cpool.state = ERTS_MBC_WAS_TRAITOR; - i_stop = (i < ERTS_ALC_CPOOL_MAX_TRAITOR_INSPECT ? - 0 : i - ERTS_ALC_CPOOL_MAX_TRAITOR_INSPECT); - dl = first_old_traitor; - while(dl != &allctr->cpool.traitor_list) { - erts_aint_t exp, act; - crr = (Carrier_t *) (((char *) dl) - offsetof(Carrier_t, cpool.abandoned)); - ASSERT(dl != &allctr->cpool.pooled_list); - ASSERT(crr->cpool.orig_allctr == allctr); - dl = dl->next; - exp = erts_atomic_read_rb(&crr->allctr); - if (exp & ERTS_CRR_ALCTR_FLG_IN_POOL) { - if (!(exp & ERTS_CRR_ALCTR_FLG_BUSY) - && erts_atomic_read_nob(&crr->cpool.max_size) >= size) { - /* Try to fetch it... */ - act = erts_atomic_cmpxchg_mb(&crr->allctr, - (erts_aint_t) allctr, - exp); - if (act == exp) { - cpool_delete(allctr, ((Allctr_t *) (act & ~ERTS_CRR_ALCTR_FLG_MASK)), crr); - unlink_abandoned_carrier(crr); + }while (--i > 0); - /* Move sentinel to continue next search from here */ - relink_edl_before(dl, &allctr->cpool.traitor_list); - return crr; - } - exp = act; - } - if (exp & ERTS_CRR_ALCTR_FLG_IN_POOL) { - if (!cpool_entrance) - cpool_entrance = &crr->cpool; + if (reinsert_crr) + aoff_add_pooled_mbc(allctr, reinsert_crr); - /* Move to pooled_list */ - unlink_abandoned_carrier(crr); - link_abandoned_carrier(&allctr->cpool.pooled_list, crr); - } - } - if (--i <= i_stop) { - /* Move sentinel to continue next search from here */ - relink_edl_before(dl, &allctr->cpool.traitor_list); - if (i > 0) - break; - else - return NULL; - } + /* + * Try find a nice cpool_entrance + */ + while (allctr->cpool.pooled_tree) { + erts_aint_t iallctr; + + crr = ErtsContainerStruct(allctr->cpool.pooled_tree, Carrier_t, cpool.pooled); + iallctr = erts_atomic_read_nob(&crr->allctr); + if (iallctr & ERTS_CRR_ALCTR_FLG_IN_POOL) { + cpool_entrance = &crr->cpool; + break; + } + /* Not in pool anymore */ + ASSERT(!(iallctr & ERTS_CRR_ALCTR_FLG_BUSY)); + aoff_remove_pooled_mbc(allctr, crr); + crr->cpool.state = ERTS_MBC_WAS_TRAITOR; + + if (--i <= 0) { + INC_CC(allctr->cpool.stat.fail_pooled); + return NULL; + } } + /* * Finally search the shared pool and try employ foreign carriers */ - sentinel = &carrier_pool[allctr->alloc_no].sentinel; if (cpool_entrance) { - /* We saw a pooled carried above, use it as entrance into the pool + /* + * We saw a pooled carried above, use it as entrance into the pool */ cpdp = cpool_entrance; } else { - /* No pooled carried seen above. Start search at cpool sentinel, + /* + * No pooled carried seen above. Start search at cpool sentinel, * but begin by passing one element before trying to fetch. * This in order to avoid contention with threads inserting elements. */ @@ -3354,8 +3338,8 @@ cpool_fetch(Allctr_t *allctr, UWord size) goto check_dc_list; } - has_passed_sentinel = 0; - while (1) { + loop_state = IGNORANT; + do { erts_aint_t exp; cpdp = cpool_aint2cpd(cpool_read(&cpdp->prev)); if (cpdp == cpool_entrance) { @@ -3364,38 +3348,52 @@ cpool_fetch(Allctr_t *allctr, UWord size) if (cpdp == sentinel) break; } - i = 0; /* Last one to inspect */ + loop_state = THE_LAST_ONE; } else if (cpdp == sentinel) { - if (has_passed_sentinel) { + if (loop_state == HAS_SEEN_SENTINEL) { /* We been here before. cpool_entrance must have been removed */ + INC_CC(allctr->cpool.stat.entrance_removed); break; } cpdp = cpool_aint2cpd(cpool_read(&cpdp->prev)); if (cpdp == sentinel) break; - has_passed_sentinel = 1; + loop_state = HAS_SEEN_SENTINEL; } - crr = (Carrier_t *)(((char *)cpdp) - offsetof(Carrier_t, cpool)); + crr = ErtsContainerStruct(cpdp, Carrier_t, cpool); exp = erts_atomic_read_rb(&crr->allctr); - if (((exp & (ERTS_CRR_ALCTR_FLG_MASK)) == ERTS_CRR_ALCTR_FLG_IN_POOL) - && (erts_atomic_read_nob(&cpdp->max_size) >= size)) { + + if (erts_atomic_read_nob(&cpdp->max_size) < size) { + INC_CC(allctr->cpool.stat.skip_size); + } + else if ((exp & (ERTS_CRR_ALCTR_FLG_IN_POOL | ERTS_CRR_ALCTR_FLG_BUSY)) + == ERTS_CRR_ALCTR_FLG_IN_POOL) { erts_aint_t act; - /* Try to fetch it... */ - act = erts_atomic_cmpxchg_mb(&crr->allctr, - (erts_aint_t) allctr, - exp); + erts_aint_t want = (((erts_aint_t) allctr) + | (exp & ERTS_CRR_ALCTR_FLG_HOMECOMING)); + /* Try to fetch it... */ + act = erts_atomic_cmpxchg_mb(&crr->allctr, want, exp); if (act == exp) { cpool_delete(allctr, ((Allctr_t *) (act & ~ERTS_CRR_ALCTR_FLG_MASK)), crr); if (crr->cpool.orig_allctr == allctr) { unlink_abandoned_carrier(crr); - } + crr->cpool.state = ERTS_MBC_IS_HOME; + } return crr; } } - if (--i <= 0) + + if (exp & ERTS_CRR_ALCTR_FLG_BUSY) + INC_CC(allctr->cpool.stat.skip_busy); + else + INC_CC(allctr->cpool.stat.skip_race); + + if (--i <= 0) { + INC_CC(allctr->cpool.stat.fail_shared); return NULL; - } + } + }while (loop_state != THE_LAST_ONE); check_dc_list: /* Last; check our own pending dealloc carrier list... */ @@ -3404,23 +3402,23 @@ check_dc_list: if (erts_atomic_read_nob(&crr->cpool.max_size) >= size) { Block_t* blk; unlink_carrier(&allctr->cpool.dc_list, crr); -#ifdef ERTS_ALC_CPOOL_DEBUG - ERTS_ALC_CPOOL_ASSERT(erts_atomic_xchg_nob(&crr->allctr, - ((erts_aint_t) allctr)) - == (((erts_aint_t) allctr) & ~ERTS_CRR_ALCTR_FLG_MASK)); -#else - erts_atomic_set_nob(&crr->allctr, ((erts_aint_t) allctr)); -#endif + ERTS_ALC_CPOOL_ASSERT(erts_atomic_read_nob(&crr->allctr) + == ((erts_aint_t) allctr)); blk = MBC_TO_FIRST_BLK(allctr, crr); ASSERT(FBLK_TO_MBC(blk) == crr); allctr->link_free_block(allctr, blk); return crr; } crr = crr->prev; - if (--i <= 0) + if (--i <= 0) { + INC_CC(allctr->cpool.stat.fail_pend_dealloc); return NULL; + } } + if (i != ERTS_ALC_CPOOL_MAX_FETCH_INSPECT) + INC_CC(allctr->cpool.stat.fail); + return NULL; } @@ -3475,9 +3473,6 @@ static void schedule_dealloc_carrier(Allctr_t *allctr, Carrier_t *crr) { Allctr_t *orig_allctr; - Block_t *blk; - int check_pending_dealloc; - erts_aint_t max_size; ASSERT(IS_MB_CARRIER(crr)); @@ -3488,9 +3483,17 @@ schedule_dealloc_carrier(Allctr_t *allctr, Carrier_t *crr) orig_allctr = crr->cpool.orig_allctr; - if (allctr != orig_allctr) { - int cinit = orig_allctr->dd.ix - allctr->dd.ix; - + if (allctr == orig_allctr) { + if (!(erts_atomic_read_nob(&crr->allctr) & ERTS_CRR_ALCTR_FLG_HOMECOMING)) { + dealloc_my_carrier(allctr, crr); + } + /*else + * Carrier was abandoned earlier by other thread and + * is still waiting for us in dd-queue. + * handle_delayed_dealloc() will handle it when crr is dequeued. + */ + } + else { /* * We send the carrier to its origin for deallocation. * This in order: @@ -3499,29 +3502,39 @@ schedule_dealloc_carrier(Allctr_t *allctr, Carrier_t *crr) * - to ensure that we always only reuse empty carriers * originating from our own thread specific mseg_alloc * instance which is beneficial on NUMA systems. - * - * The receiver will recognize that this is a carrier to - * deallocate (and not a block which is the common case) - * since the block is an mbc block that is free and last - * in the carrier. */ - blk = MBC_TO_FIRST_BLK(allctr, crr); - ERTS_ALC_CPOOL_ASSERT(IS_FREE_LAST_MBC_BLK(blk)); - - ERTS_ALC_CPOOL_ASSERT(IS_MBC_FIRST_ABLK(allctr, blk)); - ERTS_ALC_CPOOL_ASSERT(crr == FBLK_TO_MBC(blk)); - ERTS_ALC_CPOOL_ASSERT(crr == FIRST_BLK_TO_MBC(allctr, blk)); - ERTS_ALC_CPOOL_ASSERT(((erts_aint_t) allctr) - == (erts_atomic_read_nob(&crr->allctr) - & ~ERTS_CRR_ALCTR_FLG_MASK)); + erts_aint_t iallctr; +#ifdef ERTS_ALC_CPOOL_DEBUG + Block_t* first_blk = MBC_TO_FIRST_BLK(allctr, crr); + ERTS_ALC_CPOOL_ASSERT(IS_FREE_LAST_MBC_BLK(first_blk)); + + ERTS_ALC_CPOOL_ASSERT(IS_MBC_FIRST_ABLK(allctr, first_blk)); + ERTS_ALC_CPOOL_ASSERT(crr == FBLK_TO_MBC(first_blk)); + ERTS_ALC_CPOOL_ASSERT(crr == FIRST_BLK_TO_MBC(allctr, first_blk)); + ERTS_ALC_CPOOL_ASSERT((erts_atomic_read_nob(&crr->allctr) + & ~ERTS_CRR_ALCTR_FLG_HOMECOMING) + == (erts_aint_t) allctr); +#endif - if (ddq_enqueue(&orig_allctr->dd.q, BLK2UMEM(blk), cinit)) - erts_alloc_notify_delayed_dealloc(orig_allctr->ix); - return; + iallctr = (erts_aint_t)orig_allctr | ERTS_CRR_ALCTR_FLG_HOMECOMING; + if (!(erts_atomic_xchg_nob(&crr->allctr, iallctr) + & ERTS_CRR_ALCTR_FLG_HOMECOMING)) { + enqueue_homecoming(allctr, crr); + } } +} + +static void dealloc_my_carrier(Allctr_t *allctr, Carrier_t *crr) +{ + Block_t *blk; + int check_pending_dealloc; + erts_aint_t max_size; - if (is_abandoned(crr)) - unlink_abandoned_carrier(crr); + ERTS_ALC_CPOOL_ASSERT(allctr == crr->cpool.orig_allctr); + if (is_abandoned(crr)) { + unlink_abandoned_carrier(crr); + crr->cpool.state = ERTS_MBC_IS_HOME; + } if (crr->cpool.thr_prgr == ERTS_THR_PRGR_INVALID || erts_thr_progress_has_reached(crr->cpool.thr_prgr)) { @@ -3553,6 +3566,7 @@ schedule_dealloc_carrier(Allctr_t *allctr, Carrier_t *crr) static ERTS_INLINE void cpool_init_carrier_data(Allctr_t *allctr, Carrier_t *crr) { + crr->cpool.homecoming_dd.blk.bhdr = HOMECOMING_MBC_BLK_HDR; erts_atomic_init_nob(&crr->cpool.next, ERTS_AINT_NULL); erts_atomic_init_nob(&crr->cpool.prev, ERTS_AINT_NULL); crr->cpool.orig_allctr = allctr; @@ -3571,8 +3585,7 @@ cpool_init_carrier_data(Allctr_t *allctr, Carrier_t *crr) limit = (csz/100)*allctr->cpool.util_limit; crr->cpool.abandon_limit = limit; } - crr->cpool.abandoned.next = NULL; - crr->cpool.abandoned.prev = NULL; + crr->cpool.state = ERTS_MBC_IS_HOME; } static void @@ -3598,23 +3611,62 @@ set_new_allctr_abandon_limit(Allctr_t *allctr) static void abandon_carrier(Allctr_t *allctr, Carrier_t *crr) { - erts_aint_t max_size; + erts_aint_t iallctr; - STAT_MBC_CPOOL_INSERT(allctr, crr); + STAT_MBC_ABANDON(allctr, crr); unlink_carrier(&allctr->mbc_list, crr); - if (crr->cpool.orig_allctr == allctr) { - link_abandoned_carrier(&allctr->cpool.pooled_list, crr); + allctr->remove_mbc(allctr, crr); + set_new_allctr_abandon_limit(allctr); + + cpool_insert(allctr, crr); + + + iallctr = erts_atomic_read_nob(&crr->allctr); + if (allctr == crr->cpool.orig_allctr) { + /* preserve HOMECOMING flag */ + ASSERT((iallctr & ~ERTS_CRR_ALCTR_FLG_HOMECOMING) == (erts_aint_t)allctr); + erts_atomic_set_wb(&crr->allctr, iallctr | ERTS_CRR_ALCTR_FLG_IN_POOL); + poolify_my_carrier(allctr, crr); } + else { + ASSERT((iallctr & ~ERTS_CRR_ALCTR_FLG_HOMECOMING) == (erts_aint_t)allctr); + iallctr = ((erts_aint_t)crr->cpool.orig_allctr | + ERTS_CRR_ALCTR_FLG_HOMECOMING | + ERTS_CRR_ALCTR_FLG_IN_POOL); + if (!(erts_atomic_xchg_wb(&crr->allctr, iallctr) + & ERTS_CRR_ALCTR_FLG_HOMECOMING)) { + + enqueue_homecoming(allctr, crr); + } + } +} - allctr->remove_mbc(allctr, crr); +static void +enqueue_homecoming(Allctr_t* allctr, Carrier_t* crr) +{ + Allctr_t* orig_allctr = crr->cpool.orig_allctr; + const int cinit = orig_allctr->dd.ix - allctr->dd.ix; + Block_t* dd_blk = &crr->cpool.homecoming_dd.blk; - max_size = (erts_aint_t) allctr->largest_fblk_in_mbc(allctr, crr); - erts_atomic_set_nob(&crr->cpool.max_size, max_size); + /* + * The receiver will recognize this as a carrier + * (and not a block which is the common case) + * since the block header is HOMECOMING_MBC_BLK_HDR. + */ + ASSERT(dd_blk->bhdr == HOMECOMING_MBC_BLK_HDR); + if (ddq_enqueue(&orig_allctr->dd.q, BLK2UMEM(dd_blk), cinit)) + erts_alloc_notify_delayed_dealloc(orig_allctr->ix); +} - cpool_insert(allctr, crr); +static void +poolify_my_carrier(Allctr_t *allctr, Carrier_t *crr) +{ + ERTS_ALC_CPOOL_ASSERT(allctr == crr->cpool.orig_allctr); - set_new_allctr_abandon_limit(allctr); + crr->cpool.pooled.hdr.bhdr = erts_atomic_read_nob(&crr->cpool.max_size); + aoff_add_pooled_mbc(allctr, crr); + crr->cpool.state = ERTS_MBC_WAS_POOLED; } static void @@ -3771,6 +3823,7 @@ create_carrier(Allctr_t *allctr, Uint umem_sz, UWord flags) crr = cpool_fetch(allctr, blk_sz); if (crr) { STAT_MBC_CPOOL_FETCH(allctr, crr); + INC_CC(allctr->cpool.stat.fetch); link_carrier(&allctr->mbc_list, crr); (*allctr->add_mbc)(allctr, crr); blk = (*allctr->get_free_block)(allctr, blk_sz, NULL, 0); @@ -4128,13 +4181,18 @@ destroy_carrier(Allctr_t *allctr, Block_t *blk, Carrier_t **busy_pcrr_pp) #endif if (busy_pcrr_pp && *busy_pcrr_pp) { + erts_aint_t iallctr = erts_atomic_read_nob(&crr->allctr); ERTS_ALC_CPOOL_ASSERT(*busy_pcrr_pp == crr); - *busy_pcrr_pp = NULL; - ERTS_ALC_CPOOL_ASSERT(erts_atomic_read_nob(&crr->allctr) - == (((erts_aint_t) allctr) - | ERTS_CRR_ALCTR_FLG_IN_POOL - | ERTS_CRR_ALCTR_FLG_BUSY)); - erts_atomic_set_nob(&crr->allctr, ((erts_aint_t) allctr)); + ERTS_ALC_CPOOL_ASSERT((iallctr & ~ERTS_CRR_ALCTR_FLG_HOMECOMING) + == (((erts_aint_t) allctr) + | ERTS_CRR_ALCTR_FLG_IN_POOL + | ERTS_CRR_ALCTR_FLG_BUSY)); + ERTS_ALC_CPOOL_ASSERT(allctr == crr->cpool.orig_allctr); + + *busy_pcrr_pp = NULL; + erts_atomic_set_nob(&crr->allctr, + (iallctr & ~(ERTS_CRR_ALCTR_FLG_IN_POOL | + ERTS_CRR_ALCTR_FLG_BUSY))); cpool_delete(allctr, allctr, crr); } else @@ -4184,7 +4242,6 @@ static struct { Eterm e; Eterm t; Eterm ramv; - Eterm sbct; #if HAVE_ERTS_MSEG Eterm asbcst; Eterm rsbcst; @@ -4201,6 +4258,8 @@ static struct { Eterm smbcs; Eterm mbcgs; Eterm acul; + Eterm acnl; + Eterm acfml; #if HAVE_ERTS_MSEG Eterm mmc; @@ -4212,6 +4271,17 @@ static struct { Eterm mbcs; Eterm mbcs_pool; + Eterm fetch; + Eterm fail_pooled; + Eterm fail_shared; + Eterm fail_pend_dealloc; + Eterm fail; + Eterm skip_size; + Eterm skip_busy; + Eterm skip_not_pooled; + Eterm skip_homecoming; + Eterm skip_race; + Eterm entrance_removed; Eterm sbcs; Eterm sys_alloc_carriers_size; @@ -4245,7 +4315,7 @@ static Eterm fix_type_atoms[ERTS_ALC_NO_FIXED_SIZES]; static ERTS_INLINE void atom_init(Eterm *atom, char *name) { - *atom = am_atom_put(name, strlen(name)); + *atom = am_atom_put(name, sys_strlen(name)); } #define AM_INIT(AM) atom_init(&am.AM, #AM) @@ -4272,7 +4342,6 @@ init_atoms(Allctr_t *allctr) AM_INIT(e); AM_INIT(t); AM_INIT(ramv); - AM_INIT(sbct); #if HAVE_ERTS_MSEG AM_INIT(asbcst); AM_INIT(rsbcst); @@ -4289,6 +4358,8 @@ init_atoms(Allctr_t *allctr) AM_INIT(smbcs); AM_INIT(mbcgs); AM_INIT(acul); + AM_INIT(acnl); + AM_INIT(acfml); #if HAVE_ERTS_MSEG AM_INIT(mmc); @@ -4300,6 +4371,17 @@ init_atoms(Allctr_t *allctr) AM_INIT(mbcs); AM_INIT(mbcs_pool); + AM_INIT(fetch); + AM_INIT(fail_pooled); + AM_INIT(fail_shared); + AM_INIT(fail_pend_dealloc); + AM_INIT(fail); + AM_INIT(skip_size); + AM_INIT(skip_busy); + AM_INIT(skip_not_pooled); + AM_INIT(skip_homecoming); + AM_INIT(skip_race); + AM_INIT(entrance_removed); AM_INIT(sbcs); AM_INIT(sys_alloc_carriers_size); @@ -4334,7 +4416,7 @@ init_atoms(Allctr_t *allctr) for (ix = 0; ix < ERTS_ALC_NO_FIXED_SIZES; ix++) { ErtsAlcType_t n = ERTS_ALC_N_MIN_A_FIXED_SIZE + ix; char *name = (char *) ERTS_ALC_N2TD(n); - size_t len = strlen(name); + size_t len = sys_strlen(name); fix_type_atoms[ix] = am_atom_put(name, len); } } @@ -4583,9 +4665,56 @@ info_cpool(Allctr_t *allctr, if (hpp || szp) { res = NIL; + + if (!sz_only) { + add_3tup(hpp, szp, &res, am.fail_pooled, + bld_unstable_uint(hpp, szp, ERTS_ALC_CC_GIGA_VAL(allctr->cpool.stat.fail_pooled)), + bld_unstable_uint(hpp, szp, ERTS_ALC_CC_VAL(allctr->cpool.stat.fail_pooled))); + + add_3tup(hpp, szp, &res, am.fail_shared, + bld_unstable_uint(hpp, szp, ERTS_ALC_CC_GIGA_VAL(allctr->cpool.stat.fail_shared)), + bld_unstable_uint(hpp, szp, ERTS_ALC_CC_VAL(allctr->cpool.stat.fail_shared))); + + add_3tup(hpp, szp, &res, am.fail_pend_dealloc, + bld_unstable_uint(hpp, szp, ERTS_ALC_CC_GIGA_VAL(allctr->cpool.stat.fail_pend_dealloc)), + bld_unstable_uint(hpp, szp, ERTS_ALC_CC_VAL(allctr->cpool.stat.fail_pend_dealloc))); + + add_3tup(hpp, szp, &res, am.fail, + bld_unstable_uint(hpp, szp, ERTS_ALC_CC_GIGA_VAL(allctr->cpool.stat.fail)), + bld_unstable_uint(hpp, szp, ERTS_ALC_CC_VAL(allctr->cpool.stat.fail))); + + add_3tup(hpp, szp, &res, am.fetch, + bld_unstable_uint(hpp, szp, ERTS_ALC_CC_GIGA_VAL(allctr->cpool.stat.fetch)), + bld_unstable_uint(hpp, szp, ERTS_ALC_CC_VAL(allctr->cpool.stat.fetch))); + + add_3tup(hpp, szp, &res, am.skip_size, + bld_unstable_uint(hpp, szp, ERTS_ALC_CC_GIGA_VAL(allctr->cpool.stat.skip_size)), + bld_unstable_uint(hpp, szp, ERTS_ALC_CC_VAL(allctr->cpool.stat.skip_size))); + + add_3tup(hpp, szp, &res, am.skip_busy, + bld_unstable_uint(hpp, szp, ERTS_ALC_CC_GIGA_VAL(allctr->cpool.stat.skip_busy)), + bld_unstable_uint(hpp, szp, ERTS_ALC_CC_VAL(allctr->cpool.stat.skip_busy))); + + add_3tup(hpp, szp, &res, am.skip_not_pooled, + bld_unstable_uint(hpp, szp, ERTS_ALC_CC_GIGA_VAL(allctr->cpool.stat.skip_not_pooled)), + bld_unstable_uint(hpp, szp, ERTS_ALC_CC_VAL(allctr->cpool.stat.skip_not_pooled))); + + add_3tup(hpp, szp, &res, am.skip_homecoming, + bld_unstable_uint(hpp, szp, ERTS_ALC_CC_GIGA_VAL(allctr->cpool.stat.skip_homecoming)), + bld_unstable_uint(hpp, szp, ERTS_ALC_CC_VAL(allctr->cpool.stat.skip_homecoming))); + + add_3tup(hpp, szp, &res, am.skip_race, + bld_unstable_uint(hpp, szp, ERTS_ALC_CC_GIGA_VAL(allctr->cpool.stat.skip_race)), + bld_unstable_uint(hpp, szp, ERTS_ALC_CC_VAL(allctr->cpool.stat.skip_race))); + + add_3tup(hpp, szp, &res, am.entrance_removed, + bld_unstable_uint(hpp, szp, ERTS_ALC_CC_GIGA_VAL(allctr->cpool.stat.entrance_removed)), + bld_unstable_uint(hpp, szp, ERTS_ALC_CC_VAL(allctr->cpool.stat.entrance_removed))); + add_2tup(hpp, szp, &res, am.carriers_size, bld_unstable_uint(hpp, szp, csz)); + } if (!sz_only) add_2tup(hpp, szp, &res, am.carriers, @@ -4725,20 +4854,20 @@ make_name_atoms(Allctr_t *allctr) char realloc[] = "realloc"; char free[] = "free"; char buf[MAX_ATOM_CHARACTERS]; - size_t prefix_len = strlen(allctr->name_prefix); + size_t prefix_len = sys_strlen(allctr->name_prefix); if (prefix_len > MAX_ATOM_CHARACTERS + sizeof(realloc) - 1) erts_exit(ERTS_ERROR_EXIT,"Too long allocator name: %salloc\n",allctr->name_prefix); - memcpy((void *) buf, (void *) allctr->name_prefix, prefix_len); + sys_memcpy((void *) buf, (void *) allctr->name_prefix, prefix_len); - memcpy((void *) &buf[prefix_len], (void *) alloc, sizeof(alloc) - 1); + sys_memcpy((void *) &buf[prefix_len], (void *) alloc, sizeof(alloc) - 1); allctr->name.alloc = am_atom_put(buf, prefix_len + sizeof(alloc) - 1); - memcpy((void *) &buf[prefix_len], (void *) realloc, sizeof(realloc) - 1); + sys_memcpy((void *) &buf[prefix_len], (void *) realloc, sizeof(realloc) - 1); allctr->name.realloc = am_atom_put(buf, prefix_len + sizeof(realloc) - 1); - memcpy((void *) &buf[prefix_len], (void *) free, sizeof(free) - 1); + sys_memcpy((void *) &buf[prefix_len], (void *) free, sizeof(free) - 1); allctr->name.free = am_atom_put(buf, prefix_len + sizeof(free) - 1); } @@ -4844,7 +4973,7 @@ info_options(Allctr_t *allctr, Uint *szp) { Eterm res = THE_NON_VALUE; - int acul; + UWord acul, acnl, acfml; if (!allctr) { if (print_to_p) @@ -4857,6 +4986,8 @@ info_options(Allctr_t *allctr, } acul = allctr->cpool.util_limit; + acnl = allctr->cpool.in_pool_limit; + acfml = allctr->cpool.fblk_min_limit; if (print_to_p) { char topt[21]; /* Enough for any 64-bit integer */ @@ -4884,7 +5015,7 @@ info_options(Allctr_t *allctr, "option lmbcs: %beu\n" "option smbcs: %beu\n" "option mbcgs: %beu\n" - "option acul: %d\n", + "option acul: %bpu\n", topt, allctr->ramv ? "true" : "false", allctr->sbc_threshold, @@ -4909,9 +5040,15 @@ info_options(Allctr_t *allctr, hpp, szp); if (hpp || szp) { + add_2tup(hpp, szp, &res, + am.acfml, + bld_uint(hpp, szp, acfml)); + add_2tup(hpp, szp, &res, + am.acnl, + bld_uint(hpp, szp, acnl)); add_2tup(hpp, szp, &res, am.acul, - bld_uint(hpp, szp, (UWord) acul)); + bld_uint(hpp, szp, acul)); add_2tup(hpp, szp, &res, am.mbcgs, bld_uint(hpp, szp, allctr->mbc_growth_stages)); @@ -4947,7 +5084,7 @@ info_options(Allctr_t *allctr, bld_uint(hpp, szp, allctr->mseg_opt.abs_shrink_th)); #endif add_2tup(hpp, szp, &res, - am.sbct, + am_sbct, bld_uint(hpp, szp, allctr->sbc_threshold)); add_2tup(hpp, szp, &res, am.ramv, allctr->ramv ? am_true : am_false); add_2tup(hpp, szp, &res, am.t, (allctr->t ? am_true : am_false)); @@ -5481,12 +5618,13 @@ erts_alcu_free_thr_pref(ErtsAlcType_t type, void *extra, void *p) pref_allctr = get_pref_allctr(extra); used_allctr = get_used_allctr(pref_allctr, ERTS_ALC_TS_PREF_LOCK_IF_USED, p, NULL, &busy_pcrr_p); - if (pref_allctr != used_allctr) + if (pref_allctr != used_allctr) { enqueue_dealloc_other_instance(type, - used_allctr, - p, - (used_allctr->dd.ix - - pref_allctr->dd.ix)); + used_allctr, + p, + (used_allctr->dd.ix + - pref_allctr->dd.ix)); + } else { ERTS_ALCU_DBG_CHK_THR_ACCESS(used_allctr); do_erts_alcu_free(type, used_allctr, p, &busy_pcrr_p); @@ -5854,6 +5992,37 @@ erts_alcu_realloc_mv_thr_pref(ErtsAlcType_t type, void *extra, +static Uint adjust_sbct(Allctr_t* allctr, Uint sbct) +{ +#ifndef ARCH_64 + if (sbct > 0) { + Uint max_mbc_block_sz = UNIT_CEILING(sbct - 1 + ABLK_HDR_SZ); + if (max_mbc_block_sz + UNIT_FLOOR(allctr->min_block_size - 1) > MBC_ABLK_SZ_MASK + || max_mbc_block_sz < sbct) { /* wrap around */ + /* + * By limiting sbc_threshold to (hard limit - min_block_size) + * we avoid having to split off free "residue blocks" + * smaller than min_block_size. + */ + max_mbc_block_sz = MBC_ABLK_SZ_MASK - UNIT_FLOOR(allctr->min_block_size - 1); + sbct = max_mbc_block_sz - ABLK_HDR_SZ + 1; + } + } +#endif + return sbct; +} + +int erts_alcu_try_set_dyn_param(Allctr_t* allctr, Eterm param, Uint value) +{ + const Uint MIN_DYN_SBCT = 4000; /* a lame catastrophe prevention */ + + if (param == am_sbct && value >= MIN_DYN_SBCT) { + allctr->sbc_threshold = adjust_sbct(allctr, value); + return 1; + } + return 0; +} + /* ------------------------------------------------------------------------- */ int @@ -5941,10 +6110,7 @@ erts_alcu_start(Allctr_t *allctr, AllctrInit_t *init) allctr->min_block_size = sz; } - allctr->cpool.pooled_list.next = &allctr->cpool.pooled_list; - allctr->cpool.pooled_list.prev = &allctr->cpool.pooled_list; - allctr->cpool.traitor_list.next = &allctr->cpool.traitor_list; - allctr->cpool.traitor_list.prev = &allctr->cpool.traitor_list; + allctr->cpool.pooled_tree = NULL; allctr->cpool.dc_list.first = NULL; allctr->cpool.dc_list.last = NULL; allctr->cpool.abandon_limit = 0; @@ -5954,24 +6120,18 @@ erts_alcu_start(Allctr_t *allctr, AllctrInit_t *init) erts_atomic_init_nob(&allctr->cpool.stat.carriers_size, 0); erts_atomic_init_nob(&allctr->cpool.stat.no_carriers, 0); allctr->cpool.check_limit_count = ERTS_ALC_CPOOL_CHECK_LIMIT_COUNT; - allctr->cpool.util_limit = init->ts ? 0 : init->acul; - - allctr->sbc_threshold = init->sbct; -#ifndef ARCH_64 - if (allctr->sbc_threshold > 0) { - Uint max_mbc_block_sz = UNIT_CEILING(allctr->sbc_threshold - 1 + ABLK_HDR_SZ); - if (max_mbc_block_sz + UNIT_FLOOR(allctr->min_block_size - 1) > MBC_ABLK_SZ_MASK - || max_mbc_block_sz < allctr->sbc_threshold) { /* wrap around */ - /* - * By limiting sbc_threshold to (hard limit - min_block_size) - * we avoid having to split off free "residue blocks" - * smaller than min_block_size. - */ - max_mbc_block_sz = MBC_ABLK_SZ_MASK - UNIT_FLOOR(allctr->min_block_size - 1); - allctr->sbc_threshold = max_mbc_block_sz - ABLK_HDR_SZ + 1; - } + if (!init->ts && init->acul && init->acnl) { + allctr->cpool.util_limit = init->acul; + allctr->cpool.in_pool_limit = init->acnl; + allctr->cpool.fblk_min_limit = init->acfml; } -#endif + else { + allctr->cpool.util_limit = 0; + allctr->cpool.in_pool_limit = 0; + allctr->cpool.fblk_min_limit = 0; + } + + allctr->sbc_threshold = adjust_sbct(allctr, init->sbct); #if HAVE_ERTS_MSEG if (allctr->mseg_opt.abs_shrink_th > ~((UWord) 0) / 100) @@ -6022,6 +6182,9 @@ erts_alcu_start(Allctr_t *allctr, AllctrInit_t *init) allctr->sys_realloc = &erts_alcu_sys_realloc; allctr->sys_dealloc = &erts_alcu_sys_dealloc; } + + allctr->try_set_dyn_param = &erts_alcu_try_set_dyn_param; + #if HAVE_ERTS_MSEG if (init->mseg_alloc) { ASSERT(init->mseg_realloc && init->mseg_dealloc); @@ -6036,6 +6199,7 @@ erts_alcu_start(Allctr_t *allctr, AllctrInit_t *init) allctr->mseg_realloc = &erts_alcu_mseg_realloc; allctr->mseg_dealloc = &erts_alcu_mseg_dealloc; } + /* If a custom carrier alloc function is specified, make sure it's used */ if (init->mseg_alloc && !init->sys_alloc) { allctr->crr_set_flgs = CFLG_FORCE_MSEG; diff --git a/erts/emulator/beam/erl_alloc_util.h b/erts/emulator/beam/erl_alloc_util.h index faeb5ef368..9a6de2bb75 100644 --- a/erts/emulator/beam/erl_alloc_util.h +++ b/erts/emulator/beam/erl_alloc_util.h @@ -61,7 +61,9 @@ typedef struct { UWord lmbcs; UWord smbcs; UWord mbcgs; - int acul; + UWord acul; + UWord acnl; + UWord acfml; void *fix; size_t *fix_type_size; @@ -116,6 +118,8 @@ typedef struct { 1024*1024, /* (bytes) smbcs: smallest mbc size */\ 10, /* (amount) mbcgs: mbc growth stages */\ 0, /* (%) acul: abandon carrier utilization limit */\ + 1000, /* (amount) acnl: abandoned carriers number limit */\ + 0, /* (bytes) acfml: abandoned carrier fblk min limit */\ /* --- Data not options -------------------------------------------- */\ NULL, /* (ptr) fix */\ NULL /* (ptr) fix_type_size */\ @@ -149,6 +153,8 @@ typedef struct { 128*1024, /* (bytes) smbcs: smallest mbc size */\ 10, /* (amount) mbcgs: mbc growth stages */\ 0, /* (%) acul: abandon carrier utilization limit */\ + 1000, /* (amount) acnl: abandoned carriers number limit */\ + 0, /* (bytes) acfml: abandoned carrier fblk min limit */\ /* --- Data not options -------------------------------------------- */\ NULL, /* (ptr) fix */\ NULL /* (ptr) fix_type_size */\ @@ -216,6 +222,8 @@ void erts_alcu_literal_32_sys_dealloc(Allctr_t*, void *ptr, Uint size, int supe void erts_lcnt_update_allocator_locks(int enable); #endif +int erts_alcu_try_set_dyn_param(Allctr_t*, Eterm param, Uint value); + #endif /* !ERL_ALLOC_UTIL__ */ #if defined(GET_ERL_ALLOC_UTIL_IMPL) && !defined(ERL_ALLOC_UTIL_IMPL__) @@ -296,41 +304,7 @@ void erts_lcnt_update_allocator_locks(int enable); typedef union {char c[ERTS_ALLOC_ALIGN_BYTES]; long l; double d;} Unit_t; - -typedef struct ErtsDoubleLink_t_ { - struct ErtsDoubleLink_t_ *next; - struct ErtsDoubleLink_t_ *prev; -}ErtsDoubleLink_t; - -typedef struct { - erts_atomic_t next; - erts_atomic_t prev; - Allctr_t *orig_allctr; /* read-only while carrier is alive */ - ErtsThrPrgrVal thr_prgr; - erts_atomic_t max_size; - UWord abandon_limit; - UWord blocks; - UWord blocks_size; - ErtsDoubleLink_t abandoned; /* node in pooled_list or traitor_list */ -} ErtsAlcCPoolData_t; - - typedef struct Carrier_t_ Carrier_t; -struct Carrier_t_ { - UWord chdr; - Carrier_t *next; - Carrier_t *prev; - erts_atomic_t allctr; - ErtsAlcCPoolData_t cpool; /* Overwritten by block if sbc */ -}; - -#define ERTS_ALC_CARRIER_TO_ALLCTR(C) \ - ((Allctr_t *) (erts_atomic_read_nob(&(C)->allctr) & ~FLG_MASK)) - -typedef struct { - Carrier_t *first; - Carrier_t *last; -} CarrierList_t; typedef struct { UWord bhdr; @@ -344,6 +318,22 @@ typedef struct { #endif } Block_t; +typedef union ErtsAllctrDDBlock_t_ ErtsAllctrDDBlock_t; + +union ErtsAllctrDDBlock_t_ { + erts_atomic_t atmc_next; + ErtsAllctrDDBlock_t *ptr_next; +}; + +typedef struct { + Block_t blk; +#if !MBC_ABLK_OFFSET_BITS + ErtsAllctrDDBlock_t umem_; +#endif +} ErtsFakeDDBlock_t; + + + #define THIS_FREE_BLK_HDR_FLG (((UWord) 1) << 0) #define PREV_FREE_BLK_HDR_FLG (((UWord) 1) << 1) #define LAST_BLK_HDR_FLG (((UWord) 1) << 2) @@ -352,14 +342,13 @@ typedef struct { (THIS_FREE_BLK_HDR_FLG | PREV_FREE_BLK_HDR_FLG | LAST_BLK_HDR_FLG) /* - * FREE_LAST_MBC_BLK_HDR_FLGS is a special flag combo used for - * distinguishing empty mbc's from allocated blocks in - * handle_delayed_dealloc(). + * HOMECOMING_MBC_BLK_HDR is a special block header combo used for + * distinguishing MBC's from allocated blocks in handle_delayed_dealloc(). */ -#define FREE_LAST_MBC_BLK_HDR_FLGS (THIS_FREE_BLK_HDR_FLG | LAST_BLK_HDR_FLG) +#define HOMECOMING_MBC_BLK_HDR (THIS_FREE_BLK_HDR_FLG | LAST_BLK_HDR_FLG) #define IS_FREE_LAST_MBC_BLK(B) \ - (((B)->bhdr & FLG_MASK) == FREE_LAST_MBC_BLK_HDR_FLGS) + (((B)->bhdr & FLG_MASK) == (THIS_FREE_BLK_HDR_FLG | LAST_BLK_HDR_FLG)) #define IS_SBC_BLK(B) (((B)->bhdr & FLG_MASK) == SBC_BLK_HDR_FLG) #define IS_MBC_BLK(B) (!IS_SBC_BLK((B))) @@ -383,6 +372,57 @@ typedef struct { typedef UWord FreeBlkFtr_t; /* Footer of a free block */ +/* This AOFF stuff really belong in erl_ao_firstfit_alloc.h */ +typedef struct AOFF_RBTree_t_ AOFF_RBTree_t; +struct AOFF_RBTree_t_ { + Block_t hdr; + AOFF_RBTree_t *parent; + AOFF_RBTree_t *left; + AOFF_RBTree_t *right; + Uint32 flags; + Uint32 max_sz; /* of all blocks in this sub-tree */ +}; + +void aoff_add_pooled_mbc(Allctr_t*, Carrier_t*); +void aoff_remove_pooled_mbc(Allctr_t*, Carrier_t*); +Carrier_t* aoff_lookup_pooled_mbc(Allctr_t*, Uint size); +void erts_aoff_larger_max_size(AOFF_RBTree_t *node); + +typedef struct { + ErtsFakeDDBlock_t homecoming_dd; + erts_atomic_t next; + erts_atomic_t prev; + Allctr_t *orig_allctr; /* read-only while carrier is alive */ + ErtsThrPrgrVal thr_prgr; + erts_atomic_t max_size; + UWord abandon_limit; + UWord blocks; + UWord blocks_size; + enum { + ERTS_MBC_IS_HOME, + ERTS_MBC_WAS_POOLED, + ERTS_MBC_WAS_TRAITOR + } state; + AOFF_RBTree_t pooled; /* node in pooled_tree */ +} ErtsAlcCPoolData_t; + +struct Carrier_t_ { + UWord chdr; + Carrier_t *next; + Carrier_t *prev; + erts_atomic_t allctr; + ErtsAlcCPoolData_t cpool; /* Overwritten by block if sbc */ +}; + +#define ERTS_ALC_CARRIER_TO_ALLCTR(C) \ + ((Allctr_t *) (erts_atomic_read_nob(&(C)->allctr) & ~FLG_MASK)) + +typedef struct { + Carrier_t *first; + Carrier_t *last; +} CarrierList_t; + + typedef Uint64 CallCounter_t; typedef struct { @@ -419,13 +459,6 @@ typedef struct { #endif -typedef union ErtsAllctrDDBlock_t_ ErtsAllctrDDBlock_t; - -union ErtsAllctrDDBlock_t_ { - erts_atomic_t atmc_next; - ErtsAllctrDDBlock_t *ptr_next; -}; - typedef struct { ErtsAllctrDDBlock_t marker; erts_atomic_t last; @@ -537,25 +570,37 @@ struct Allctr_t_ { UWord crr_set_flgs; UWord crr_clr_flgs; - /* Carriers */ + /* Carriers *employed* by this allocator */ CarrierList_t mbc_list; CarrierList_t sbc_list; struct { - /* pooled_list, traitor list and dc_list contain only - carriers _created_ by this allocator */ - ErtsDoubleLink_t pooled_list; - ErtsDoubleLink_t traitor_list; + /* pooled_tree and dc_list contain only + carriers *created* by this allocator */ + AOFF_RBTree_t* pooled_tree; CarrierList_t dc_list; UWord abandon_limit; int disable_abandon; int check_limit_count; - int util_limit; + UWord util_limit; /* acul */ + UWord in_pool_limit; /* acnl */ + UWord fblk_min_limit; /* acmfl */ struct { erts_atomic_t blocks_size; erts_atomic_t no_blocks; erts_atomic_t carriers_size; erts_atomic_t no_carriers; + CallCounter_t fail_pooled; + CallCounter_t fail_shared; + CallCounter_t fail_pend_dealloc; + CallCounter_t fail; + CallCounter_t fetch; + CallCounter_t skip_size; + CallCounter_t skip_busy; + CallCounter_t skip_not_pooled; + CallCounter_t skip_homecoming; + CallCounter_t skip_race; + CallCounter_t entrance_removed; } stat; } cpool; @@ -589,6 +634,8 @@ struct Allctr_t_ { void* (*sys_realloc)(Allctr_t *allctr, void *ptr, Uint *size_p, Uint old_size, int superalign); void (*sys_dealloc)(Allctr_t *allctr, void *ptr, Uint size, int superalign); + int (*try_set_dyn_param)(Allctr_t*, Eterm param, Uint value); + void (*init_atoms) (void); #ifdef ERTS_ALLOC_UTIL_HARD_DEBUG diff --git a/erts/emulator/beam/erl_ao_firstfit_alloc.c b/erts/emulator/beam/erl_ao_firstfit_alloc.c index 05ba1f9891..0c5545401a 100644 --- a/erts/emulator/beam/erl_ao_firstfit_alloc.c +++ b/erts/emulator/beam/erl_ao_firstfit_alloc.c @@ -20,7 +20,7 @@ /* - * Description: An "address order first fit" allocator + * Description: A family of "first fit" allocator strategies * based on a Red-Black (binary search) Tree. The search, * insert, and delete operations are all O(log n) operations * on a Red-Black Tree. @@ -40,6 +40,10 @@ * sorting order. Blocks within the same carrier are sorted * wrt size instead of address. The 'max_sz' field is maintained * in order to dismiss entire carriers with too small blocks. + * Age Order: + * Carriers are ordered by creation time instead of address. + * Oldest carrier with a large enough free block is chosen. + * No age order supported for blocks. * * Authors: Rickard Green/Sverker Eriksson */ @@ -53,10 +57,12 @@ #include "erl_ao_firstfit_alloc.h" #ifdef DEBUG +# define IS_DEBUG 1 #if 0 #define HARD_DEBUG #endif #else +# define IS_DEBUG 0 #undef HARD_DEBUG #endif @@ -92,18 +98,6 @@ #define RBT_ASSERT(x) #endif - -/* Types... */ -typedef struct AOFF_RBTree_t_ AOFF_RBTree_t; - -struct AOFF_RBTree_t_ { - Block_t hdr; - AOFF_RBTree_t *parent; - AOFF_RBTree_t *left; - AOFF_RBTree_t *right; - Uint32 flags; - Uint32 max_sz; /* of all blocks in this sub-tree */ -}; #define AOFF_BLK_SZ(B) MBC_FBLK_SZ(&(B)->hdr) /* BF block nodes keeps list of all with equal size @@ -121,6 +115,7 @@ typedef struct AOFF_Carrier_t_ AOFF_Carrier_t; struct AOFF_Carrier_t_ { Carrier_t crr; AOFF_RBTree_t rbt_node; /* My node in the carrier tree */ + Sint64 birth_time; AOFF_RBTree_t* root; /* Root of my block tree */ }; #define RBT_NODE_TO_MBC(PTR) ErtsContainerStruct((PTR), AOFF_Carrier_t, rbt_node) @@ -136,12 +131,12 @@ struct AOFF_Carrier_t_ { */ #ifdef HARD_DEBUG -# define HARD_CHECK_IS_MEMBER(ROOT,NODE) rbt_assert_is_member(ROOT,NODE) -# define HARD_CHECK_TREE(CRR,FLV,ROOT,SZ) check_tree(CRR, FLV, ROOT, SZ) -static AOFF_RBTree_t * check_tree(Carrier_t* within_crr, enum AOFF_Flavor flavor, AOFF_RBTree_t* root, Uint); +# define HARD_CHECK_IS_MEMBER(ROOT,NODE) ASSERT(rbt_is_member(ROOT,NODE)) +# define HARD_CHECK_TREE(CRR,ORDER,ROOT,SZ) check_tree(CRR, ORDER, ROOT, SZ) +static AOFF_RBTree_t * check_tree(Carrier_t*, enum AOFFSortOrder, AOFF_RBTree_t*, Uint); #else # define HARD_CHECK_IS_MEMBER(ROOT,NODE) -# define HARD_CHECK_TREE(CRR,FLV,ROOT,SZ) +# define HARD_CHECK_TREE(CRR,ORDER,ROOT,SZ) #endif @@ -179,25 +174,63 @@ static ERTS_INLINE void lower_max_size(AOFF_RBTree_t *node, else ASSERT(new_max == old_max); } -static ERTS_INLINE SWord cmp_blocks(enum AOFF_Flavor flavor, +/* + * Set possibly new larger 'max_sz' of node and propagate change toward root + */ +void erts_aoff_larger_max_size(AOFF_RBTree_t *node) +{ + AOFF_RBTree_t* x = node; + const Uint new_sz = node->hdr.bhdr; + + ASSERT(!x->left || x->left->max_sz <= x->max_sz); + ASSERT(!x->right || x->right->max_sz <= x->max_sz); + + while (new_sz > x->max_sz) { + x->max_sz = new_sz; + x = x->parent; + if (!x) + break; + } +} + +/* Compare nodes for both carrier and block trees */ +static ERTS_INLINE SWord cmp_blocks(enum AOFFSortOrder order, AOFF_RBTree_t* lhs, AOFF_RBTree_t* rhs) { ASSERT(lhs != rhs); - ASSERT(flavor == AOFF_AOFF || FBLK_TO_MBC(&lhs->hdr) == FBLK_TO_MBC(&rhs->hdr)); - if (flavor != AOFF_AOFF) { - SWord diff = (SWord)AOFF_BLK_SZ(lhs) - (SWord)AOFF_BLK_SZ(rhs); - if (diff || flavor == AOFF_BF) return diff; + if (order == FF_AGEFF) { + AOFF_Carrier_t* lc = RBT_NODE_TO_MBC(lhs); + AOFF_Carrier_t* rc = RBT_NODE_TO_MBC(rhs); + Sint64 diff = lc->birth_time - rc->birth_time; + #ifdef ARCH_64 + if (diff) + return diff; + #else + if (diff < 0) + return -1; + else if (diff > 0) + return 1; + #endif + } + else { + ASSERT(order == FF_AOFF || FBLK_TO_MBC(&lhs->hdr) == FBLK_TO_MBC(&rhs->hdr)); + if (order != FF_AOFF) { + SWord diff = (SWord)AOFF_BLK_SZ(lhs) - (SWord)AOFF_BLK_SZ(rhs); + if (diff || order == FF_BF) return diff; + } } return (char*)lhs - (char*)rhs; } -static ERTS_INLINE SWord cmp_cand_blk(enum AOFF_Flavor flavor, +/* Compare candidate block. Only for block tree */ +static ERTS_INLINE SWord cmp_cand_blk(enum AOFFSortOrder order, Block_t* cand_blk, AOFF_RBTree_t* rhs) { - if (flavor != AOFF_AOFF) { + ASSERT(order != FF_AGEFF); + if (order != FF_AOFF) { if (BLK_TO_MBC(cand_blk) == FBLK_TO_MBC(&rhs->hdr)) { SWord diff = (SWord)MBC_BLK_SZ(cand_blk) - (SWord)MBC_FBLK_SZ(&rhs->hdr); - if (diff || flavor == AOFF_BF) return diff; + if (diff || order == FF_BF) return diff; } } return (char*)cand_blk - (char*)rhs; @@ -218,11 +251,8 @@ static UWord aoff_largest_fblk_in_mbc(Allctr_t*, Carrier_t*); /* Generic tree functions used by both carrier and block trees. */ static void rbt_delete(AOFF_RBTree_t** root, AOFF_RBTree_t* del); -static void rbt_insert(enum AOFF_Flavor flavor, AOFF_RBTree_t** root, AOFF_RBTree_t* blk); +static void rbt_insert(enum AOFFSortOrder, AOFF_RBTree_t** root, AOFF_RBTree_t* blk); static AOFF_RBTree_t* rbt_search(AOFF_RBTree_t* root, Uint size); -#ifdef HARD_DEBUG -static int rbt_assert_is_member(AOFF_RBTree_t* root, AOFF_RBTree_t* node); -#endif static Eterm info_options(Allctr_t *, char *, fmtfn_t *, void *, Uint **, Uint *); static void init_atoms(void); @@ -230,10 +260,17 @@ static void init_atoms(void); static int atoms_initialized = 0; +#ifndef ERTS_HAVE_OS_MONOTONIC_TIME_SUPPORT +static erts_atomic64_t birth_time_counter; +#endif + void erts_aoffalc_init(void) { atoms_initialized = 0; +#ifndef ERTS_HAVE_OS_MONOTONIC_TIME_SUPPORT + erts_atomic64_init_nob(&birth_time_counter, 0); +#endif } Allctr_t * @@ -254,11 +291,12 @@ erts_aoffalc_start(AOFFAllctr_t *alc, sys_memcpy((void *) alc, (void *) &zero.allctr, sizeof(AOFFAllctr_t)); - alc->flavor = aoffinit->flavor; + alc->blk_order = aoffinit->blk_order; + alc->crr_order = aoffinit->crr_order; allctr->mbc_header_size = sizeof(AOFF_Carrier_t); allctr->min_mbc_size = MIN_MBC_SZ; allctr->min_mbc_first_free_size = MIN_MBC_FIRST_FREE_SZ; - allctr->min_block_size = (aoffinit->flavor == AOFF_BF ? + allctr->min_block_size = (aoffinit->blk_order == FF_BF ? sizeof(AOFF_RBTreeList_t):sizeof(AOFF_RBTree_t)); allctr->vsn_str = ERTS_ALC_AOFF_ALLOC_VSN_STR; @@ -487,9 +525,9 @@ aoff_unlink_free_block(Allctr_t *allctr, Block_t *blk) AOFF_Carrier_t *crr = (AOFF_Carrier_t*) FBLK_TO_MBC(&del->hdr); ASSERT(crr->rbt_node.hdr.bhdr == crr->root->max_sz); - HARD_CHECK_TREE(&crr->crr, alc->flavor, crr->root, 0); + HARD_CHECK_TREE(&crr->crr, alc->blk_order, crr->root, 0); - if (alc->flavor == AOFF_BF) { + if (alc->blk_order == FF_BF) { ASSERT(del->flags & IS_BF_FLG); if (IS_LIST_ELEM(del)) { /* Remove from list */ @@ -510,14 +548,14 @@ aoff_unlink_free_block(Allctr_t *allctr, Block_t *blk) replace(&crr->root, (AOFF_RBTree_t*)del, LIST_NEXT(del)); - HARD_CHECK_TREE(&crr->crr, alc->flavor, crr->root, 0); + HARD_CHECK_TREE(&crr->crr, alc->blk_order, crr->root, 0); return; } } rbt_delete(&crr->root, (AOFF_RBTree_t*)del); - HARD_CHECK_TREE(&crr->crr, alc->flavor, crr->root, 0); + HARD_CHECK_TREE(&crr->crr, alc->blk_order, crr->root, 0); /* Update the carrier tree with a potentially new (lower) max_sz */ @@ -715,32 +753,33 @@ aoff_link_free_block(Allctr_t *allctr, Block_t *block) ASSERT(allctr == ERTS_ALC_CARRIER_TO_ALLCTR(&blk_crr->crr)); ASSERT(blk_crr->rbt_node.hdr.bhdr == (blk_crr->root ? blk_crr->root->max_sz : 0)); - HARD_CHECK_TREE(&blk_crr->crr, alc->flavor, blk_crr->root, 0); + HARD_CHECK_TREE(&blk_crr->crr, alc->blk_order, blk_crr->root, 0); - rbt_insert(alc->flavor, &blk_crr->root, blk); + rbt_insert(alc->blk_order, &blk_crr->root, blk); - /* Update the carrier tree with a potentially new (larger) max_sz - */ + /* + * Update carrier tree with a potentially new (larger) max_sz + */ crr_node = &blk_crr->rbt_node; if (blk_sz > crr_node->hdr.bhdr) { - ASSERT(blk_sz == blk_crr->root->max_sz); - crr_node->hdr.bhdr = blk_sz; - while (blk_sz > crr_node->max_sz) { - crr_node->max_sz = blk_sz; - crr_node = crr_node->parent; - if (!crr_node) break; - } + ASSERT(blk_sz == blk_crr->root->max_sz); + crr_node->hdr.bhdr = blk_sz; + while (blk_sz > crr_node->max_sz) { + crr_node->max_sz = blk_sz; + crr_node = crr_node->parent; + if (!crr_node) break; + } } - HARD_CHECK_TREE(&blk_crr->crr, alc->flavor, blk_crr->root, 0); + HARD_CHECK_TREE(NULL, alc->crr_order, alc->mbc_root, 0); } static void -rbt_insert(enum AOFF_Flavor flavor, AOFF_RBTree_t** root, AOFF_RBTree_t* blk) +rbt_insert(enum AOFFSortOrder order, AOFF_RBTree_t** root, AOFF_RBTree_t* blk) { Uint blk_sz = AOFF_BLK_SZ(blk); #ifdef DEBUG - blk->flags = (flavor == AOFF_BF) ? IS_BF_FLG : 0; + blk->flags = (order == FF_BF) ? IS_BF_FLG : 0; #else blk->flags = 0; #endif @@ -760,7 +799,7 @@ rbt_insert(enum AOFF_Flavor flavor, AOFF_RBTree_t** root, AOFF_RBTree_t* blk) if (x->max_sz < blk_sz) { x->max_sz = blk_sz; } - diff = cmp_blocks(flavor, blk, x); + diff = cmp_blocks(order, blk, x); if (diff < 0) { if (!x->left) { blk->parent = x; @@ -778,7 +817,7 @@ rbt_insert(enum AOFF_Flavor flavor, AOFF_RBTree_t** root, AOFF_RBTree_t* blk) x = x->right; } else { - ASSERT(flavor == AOFF_BF); + ASSERT(order == FF_BF); ASSERT(blk->flags & IS_BF_FLG); ASSERT(x->flags & IS_BF_FLG); SET_LIST_ELEM(blk); @@ -798,7 +837,7 @@ rbt_insert(enum AOFF_Flavor flavor, AOFF_RBTree_t** root, AOFF_RBTree_t* blk) if (IS_RED(blk->parent)) tree_insert_fixup(root, blk); } - if (flavor == AOFF_BF) { + if (order == FF_BF) { SET_TREE_NODE(blk); LIST_NEXT(blk) = NULL; } @@ -826,6 +865,16 @@ rbt_search(AOFF_RBTree_t* root, Uint size) } } +Carrier_t* aoff_lookup_pooled_mbc(Allctr_t* allctr, Uint size) +{ + AOFF_RBTree_t* node; + + if (!allctr->cpool.pooled_tree) + return NULL; + node = rbt_search(allctr->cpool.pooled_tree, size); + return node ? ErtsContainerStruct(node, Carrier_t, cpool.pooled) : NULL; +} + static Block_t * aoff_get_free_block(Allctr_t *allctr, Uint size, Block_t *cand_blk, Uint cand_size) @@ -850,7 +899,7 @@ aoff_get_free_block(Allctr_t *allctr, Uint size, /* Get block within carrier tree */ #ifdef HARD_DEBUG - dbg_blk = HARD_CHECK_TREE(&crr->crr, alc->flavor, crr->root, size); + dbg_blk = HARD_CHECK_TREE(&crr->crr, alc->blk_order, crr->root, size); #endif blk = rbt_search(crr->root, size); @@ -863,7 +912,7 @@ aoff_get_free_block(Allctr_t *allctr, Uint size, if (!blk) return NULL; - if (cand_blk && cmp_cand_blk(alc->flavor, cand_blk, blk) < 0) { + if (cand_blk && cmp_cand_blk(alc->blk_order, cand_blk, blk) < 0) { return NULL; /* cand_blk was better */ } @@ -872,23 +921,32 @@ aoff_get_free_block(Allctr_t *allctr, Uint size, return (Block_t *) blk; } +static ERTS_INLINE Sint64 get_birth_time(void) +{ +#ifdef ERTS_HAVE_OS_MONOTONIC_TIME_SUPPORT + return (Sint64) erts_os_monotonic_time(); +#else + return (Sint64) erts_atomic64_inc_read_nob(&birth_time_counter); +#endif +} + static void aoff_creating_mbc(Allctr_t *allctr, Carrier_t *carrier) { AOFFAllctr_t *alc = (AOFFAllctr_t *) allctr; AOFF_Carrier_t *crr = (AOFF_Carrier_t*) carrier; AOFF_RBTree_t **root = &alc->mbc_root; - HARD_CHECK_TREE(NULL, 0, *root, 0); + HARD_CHECK_TREE(NULL, alc->crr_order, *root, 0); - /* Link carrier in address order tree - */ crr->rbt_node.hdr.bhdr = 0; - rbt_insert(AOFF_AOFF, root, &crr->rbt_node); + if (alc->crr_order == FF_AGEFF || IS_DEBUG) + crr->birth_time = get_birth_time(); + rbt_insert(alc->crr_order, root, &crr->rbt_node); /* aoff_link_free_block will add free block later */ crr->root = NULL; - HARD_CHECK_TREE(NULL, 0, *root, 0); + HARD_CHECK_TREE(NULL, alc->crr_order, *root, 0); } #define IS_CRR_IN_TREE(CRR,ROOT) \ @@ -911,27 +969,38 @@ static void aoff_add_mbc(Allctr_t *allctr, Carrier_t *carrier) AOFF_RBTree_t **root = &alc->mbc_root; ASSERT(!IS_CRR_IN_TREE(crr, *root)); - HARD_CHECK_TREE(NULL, 0, *root, 0); + HARD_CHECK_TREE(NULL, alc->crr_order, *root, 0); + + rbt_insert(alc->crr_order, root, &crr->rbt_node); + + HARD_CHECK_TREE(NULL, alc->crr_order, *root, 0); +} + +void aoff_add_pooled_mbc(Allctr_t *allctr, Carrier_t *crr) +{ + AOFF_RBTree_t **root = &allctr->cpool.pooled_tree; + + ASSERT(allctr == crr->cpool.orig_allctr); + HARD_CHECK_TREE(NULL, 0, *root, 0); /* Link carrier in address order tree */ - rbt_insert(AOFF_AOFF, root, &crr->rbt_node); + rbt_insert(FF_AOFF, root, &crr->cpool.pooled); HARD_CHECK_TREE(NULL, 0, *root, 0); } static void aoff_remove_mbc(Allctr_t *allctr, Carrier_t *carrier) { - AOFFAllctr_t *alc = (AOFFAllctr_t *) allctr; - AOFF_Carrier_t *crr = (AOFF_Carrier_t*) carrier; - AOFF_RBTree_t **root = &alc->mbc_root; + AOFF_RBTree_t **root = &((AOFFAllctr_t*)allctr)->mbc_root; + AOFF_Carrier_t *crr = (AOFF_Carrier_t*)carrier; ASSERT(allctr == ERTS_ALC_CARRIER_TO_ALLCTR(carrier)); if (!IS_CRR_IN_TREE(crr,*root)) - return; + return; - HARD_CHECK_TREE(NULL, 0, *root, 0); + HARD_CHECK_TREE(NULL, alc->crr_order, *root, 0); rbt_delete(root, &crr->rbt_node); crr->rbt_node.parent = NULL; @@ -939,9 +1008,27 @@ static void aoff_remove_mbc(Allctr_t *allctr, Carrier_t *carrier) crr->rbt_node.right = NULL; crr->rbt_node.max_sz = crr->rbt_node.hdr.bhdr; - HARD_CHECK_TREE(NULL, 0, *root, 0); + HARD_CHECK_TREE(NULL, alc->crr_order, *root, 0); +} + +void aoff_remove_pooled_mbc(Allctr_t *allctr, Carrier_t *crr) +{ + ASSERT(allctr == crr->cpool.orig_allctr); + + HARD_CHECK_TREE(NULL, 0, allctr->cpool.pooled_tree, 0); + + rbt_delete(&allctr->cpool.pooled_tree, &crr->cpool.pooled); +#ifdef DEBUG + crr->cpool.pooled.parent = NULL; + crr->cpool.pooled.left = NULL; + crr->cpool.pooled.right = NULL; + crr->cpool.pooled.max_sz = 0; +#endif + HARD_CHECK_TREE(NULL, 0, allctr->cpool.pooled_tree, 0); + } + static UWord aoff_largest_fblk_in_mbc(Allctr_t* allctr, Carrier_t* carrier) { AOFF_Carrier_t *crr = (AOFF_Carrier_t*) carrier; @@ -955,47 +1042,35 @@ static UWord aoff_largest_fblk_in_mbc(Allctr_t* allctr, Carrier_t* carrier) * info_options() */ +static const char* flavor_str[2][3] = { + {"ageffcaoff", "ageffcaobf", "ageffcbf"}, + { "aoff", "aoffcaobf", "aoffcbf"} +}; +static Eterm flavor_atoms[2][3]; + static struct { Eterm as; - Eterm aoff; - Eterm aoffcaobf; - Eterm aoffcbf; -#ifdef DEBUG - Eterm end_of_atoms; -#endif } am; -static void ERTS_INLINE atom_init(Eterm *atom, char *name) +static void ERTS_INLINE atom_init(Eterm *atom, const char *name) { - *atom = am_atom_put(name, strlen(name)); + *atom = am_atom_put(name, sys_strlen(name)); } #define AM_INIT(AM) atom_init(&am.AM, #AM) static void init_atoms(void) { -#ifdef DEBUG - Eterm *atom; -#endif + int i, j; if (atoms_initialized) return; -#ifdef DEBUG - for (atom = (Eterm *) &am; atom <= &am.end_of_atoms; atom++) { - *atom = THE_NON_VALUE; - } -#endif AM_INIT(as); - AM_INIT(aoff); - AM_INIT(aoffcaobf); - AM_INIT(aoffcbf); -#ifdef DEBUG - for (atom = (Eterm *) &am; atom < &am.end_of_atoms; atom++) { - ASSERT(*atom != THE_NON_VALUE); - } -#endif + for (i = 0; i < 2; i++) + for (j = 0; j < 3; j++) + atom_init(&flavor_atoms[i][j], flavor_str[i][j]); atoms_initialized = 1; } @@ -1021,15 +1096,16 @@ info_options(Allctr_t *allctr, { AOFFAllctr_t* alc = (AOFFAllctr_t*) allctr; Eterm res = THE_NON_VALUE; - const char* flavor_str[3] = {"aoff", "aoffcaobf", "aoffcbf"}; - Eterm flavor_atom[3] = {am.aoff, am.aoffcaobf, am.aoffcbf}; + + ASSERT(alc->crr_order >= 0 && alc->crr_order <= 1); + ASSERT(alc->blk_order >= 1 && alc->blk_order <= 3); if (print_to_p) { erts_print(*print_to_p, print_to_arg, "%sas: %s\n", prefix, - flavor_str[alc->flavor]); + flavor_str[alc->crr_order][alc->blk_order-1]); } if (hpp || szp) { @@ -1039,7 +1115,8 @@ info_options(Allctr_t *allctr, __FILE__, __LINE__);; res = NIL; - add_2tup(hpp, szp, &res, am.as, flavor_atom[alc->flavor]); + add_2tup(hpp, szp, &res, am.as, + flavor_atoms[alc->crr_order][alc->blk_order-1]); } return res; @@ -1057,7 +1134,7 @@ UWord erts_aoffalc_test(UWord op, UWord a1, UWord a2) { switch (op) { - case 0x500: return (UWord) ((AOFFAllctr_t *) a1)->flavor == AOFF_AOBF; + case 0x500: return (UWord) ((AOFFAllctr_t *) a1)->blk_order == FF_AOBF; case 0x501: { AOFF_RBTree_t *node = ((AOFFAllctr_t *) a1)->mbc_root; Uint size = (Uint) a2; @@ -1072,7 +1149,7 @@ erts_aoffalc_test(UWord op, UWord a1, UWord a2) case 0x507: return (UWord) IS_TREE_NODE((AOFF_RBTree_t *) a1); case 0x508: return (UWord) 0; /* IS_BF_ALGO */ case 0x509: return (UWord) ((AOFF_RBTree_t *) a1)->max_sz; - case 0x50a: return (UWord) ((AOFFAllctr_t *) a1)->flavor == AOFF_BF; + case 0x50a: return (UWord) ((AOFFAllctr_t *) a1)->blk_order == FF_BF; case 0x50b: return (UWord) LIST_PREV(a1); default: ASSERT(0); return ~((UWord) 0); } @@ -1085,12 +1162,13 @@ erts_aoffalc_test(UWord op, UWord a1, UWord a2) #ifdef HARD_DEBUG - -static int rbt_assert_is_member(AOFF_RBTree_t* root, AOFF_RBTree_t* node) +static int rbt_is_member(AOFF_RBTree_t* root, AOFF_RBTree_t* node) { while (node != root) { - ASSERT(node->parent); - ASSERT(node->parent->left == node || node->parent->right == node); + if (!node->parent || (node->parent->left != node && + node->parent->right != node)) { + return 0; + } node = node->parent; } return 1; @@ -1132,7 +1210,7 @@ static void print_tree(AOFF_RBTree_t*); */ static AOFF_RBTree_t * -check_tree(Carrier_t* within_crr, enum AOFF_Flavor flavor, AOFF_RBTree_t* root, Uint size) +check_tree(Carrier_t* within_crr, enum AOFFSortOrder order, AOFF_RBTree_t* root, Uint size) { AOFF_RBTree_t *res = NULL; Sint blacks; @@ -1144,7 +1222,8 @@ check_tree(Carrier_t* within_crr, enum AOFF_Flavor flavor, AOFF_RBTree_t* root, #ifdef PRINT_TREE print_tree(root); #endif - ASSERT(within_crr || flavor == AOFF_AOFF); + ASSERT((within_crr && order >= FF_AOFF) || + (!within_crr && order <= FF_AOFF)); if (!root) return res; @@ -1202,7 +1281,7 @@ check_tree(Carrier_t* within_crr, enum AOFF_Flavor flavor, AOFF_RBTree_t* root, ASSERT(((char*)x + AOFF_BLK_SZ(x)) <= ((char*)crr + CARRIER_SZ(crr))); } - if (flavor == AOFF_BF) { + if (order == FF_BF) { AOFF_RBTree_t* y = x; AOFF_RBTree_t* nxt = LIST_NEXT(y); ASSERT(IS_TREE_NODE(x)); @@ -1225,13 +1304,13 @@ check_tree(Carrier_t* within_crr, enum AOFF_Flavor flavor, AOFF_RBTree_t* root, if (x->left) { ASSERT(x->left->parent == x); - ASSERT(cmp_blocks(flavor, x->left, x) < 0); + ASSERT(cmp_blocks(order, x->left, x) < 0); ASSERT(x->left->max_sz <= x->max_sz); } if (x->right) { ASSERT(x->right->parent == x); - ASSERT(cmp_blocks(flavor, x->right, x) > 0); + ASSERT(cmp_blocks(order, x->right, x) > 0); ASSERT(x->right->max_sz <= x->max_sz); } ASSERT(x->max_sz >= AOFF_BLK_SZ(x)); @@ -1240,7 +1319,7 @@ check_tree(Carrier_t* within_crr, enum AOFF_Flavor flavor, AOFF_RBTree_t* root, || x->max_sz == (x->right ? x->right->max_sz : 0)); if (size && AOFF_BLK_SZ(x) >= size) { - if (!res || cmp_blocks(flavor, x, res) < 0) { + if (!res || cmp_blocks(order, x, res) < 0) { res = x; } } diff --git a/erts/emulator/beam/erl_ao_firstfit_alloc.h b/erts/emulator/beam/erl_ao_firstfit_alloc.h index 7349c6ab19..9cf4fc81a8 100644 --- a/erts/emulator/beam/erl_ao_firstfit_alloc.h +++ b/erts/emulator/beam/erl_ao_firstfit_alloc.h @@ -28,14 +28,16 @@ typedef struct AOFFAllctr_t_ AOFFAllctr_t; -enum AOFF_Flavor { - AOFF_AOFF = 0, - AOFF_AOBF = 1, - AOFF_BF = 2 +enum AOFFSortOrder { + FF_AGEFF = 0, + FF_AOFF = 1, + FF_AOBF = 2, + FF_BF = 3 }; typedef struct { - enum AOFF_Flavor flavor; + enum AOFFSortOrder blk_order; + enum AOFFSortOrder crr_order; } AOFFAllctrInit_t; #define ERTS_DEFAULT_AOFF_ALLCTR_INIT {0/*dummy*/} @@ -53,12 +55,12 @@ Allctr_t *erts_aoffalc_start(AOFFAllctr_t *, AOFFAllctrInit_t*, AllctrInit_t *); #define GET_ERL_ALLOC_UTIL_IMPL #include "erl_alloc_util.h" - struct AOFFAllctr_t_ { Allctr_t allctr; /* Has to be first! */ struct AOFF_RBTree_t_* mbc_root; - enum AOFF_Flavor flavor; + enum AOFFSortOrder blk_order; + enum AOFFSortOrder crr_order; }; UWord erts_aoffalc_test(UWord, UWord, UWord); diff --git a/erts/emulator/beam/erl_bestfit_alloc.c b/erts/emulator/beam/erl_bestfit_alloc.c index 6173c408e1..85fc4c3a85 100644 --- a/erts/emulator/beam/erl_bestfit_alloc.c +++ b/erts/emulator/beam/erl_bestfit_alloc.c @@ -875,7 +875,7 @@ static struct { static void ERTS_INLINE atom_init(Eterm *atom, char *name) { - *atom = am_atom_put(name, strlen(name)); + *atom = am_atom_put(name, sys_strlen(name)); } #define AM_INIT(AM) atom_init(&am.AM, #AM) diff --git a/erts/emulator/beam/erl_bif_binary.c b/erts/emulator/beam/erl_bif_binary.c index 4cafa499a9..469f6a1ea8 100644 --- a/erts/emulator/beam/erl_bif_binary.c +++ b/erts/emulator/beam/erl_bif_binary.c @@ -61,8 +61,6 @@ static Export binary_longest_prefix_trap_export; static BIF_RETTYPE binary_longest_prefix_trap(BIF_ALIST_3); static Export binary_longest_suffix_trap_export; static BIF_RETTYPE binary_longest_suffix_trap(BIF_ALIST_3); -static Export binary_bin_to_list_trap_export; -static BIF_RETTYPE binary_bin_to_list_trap(BIF_ALIST_3); static Export binary_copy_trap_export; static BIF_RETTYPE binary_copy_trap(BIF_ALIST_2); static Uint max_loop_limit; @@ -86,10 +84,6 @@ void erts_init_bif_binary(void) am_erlang, am_binary_longest_suffix_trap, 3, &binary_longest_suffix_trap); - erts_init_trap_export(&binary_bin_to_list_trap_export, - am_erlang, am_binary_bin_to_list_trap, 3, - &binary_bin_to_list_trap); - erts_init_trap_export(&binary_copy_trap_export, am_erlang, am_binary_copy_trap, 2, &binary_copy_trap); @@ -371,7 +365,7 @@ static ACTrie *create_acdata(MyAllocator *my, Uint len, acn->d = 0; acn->final = 0; acn->h = NULL; - memset(acn->g, 0, sizeof(ACNode *) * ALPHABET_SIZE); + sys_memset(acn->g, 0, sizeof(ACNode *) * ALPHABET_SIZE); #ifdef HARDDEBUG act->idc = 0; acn->id = 0; @@ -394,7 +388,7 @@ static BMData *create_bmdata(MyAllocator *my, byte *x, Uint len, init_my_allocator(my, datasize, data); bmd = my_alloc(my, sizeof(BMData)); bmd->x = my_alloc(my,len); - memcpy(bmd->x,x,len); + sys_memcpy(bmd->x,x,len); bmd->len = len; bmd->goodshift = my_alloc(my,sizeof(Uint) * len); *the_bin = mb; @@ -439,7 +433,7 @@ static void ac_add_one_pattern(MyAllocator *my, ACTrie *act, byte *x, Uint len) nn->d = i+1; nn->h = act->root; nn->final = 0; - memset(nn->g, 0, sizeof(ACNode *) * ALPHABET_SIZE); + sys_memset(nn->g, 0, sizeof(ACNode *) * ALPHABET_SIZE); acn->g[x[i]] = nn; ++i; acn = nn; @@ -2270,7 +2264,7 @@ static BIF_RETTYPE do_longest_common(Process *p, Eterm list, int direction) if (cd[i].type == CL_TYPE_HEAP_NOALLOC) { unsigned char *tmp = cd[i].buff; cd[i].buff = erts_alloc(ERTS_ALC_T_BINARY_BUFFER, cd[i].bufflen); - memcpy(cd[i].buff,tmp,cd[i].bufflen); + sys_memcpy(cd[i].buff,tmp,cd[i].bufflen); cd[i].type = CL_TYPE_HEAP; } } @@ -2440,191 +2434,6 @@ BIF_RETTYPE binary_at_2(BIF_ALIST_2) BIF_ERROR(BIF_P,BADARG); } -#define BIN_TO_LIST_OK 0 -#define BIN_TO_LIST_TRAP 1 -/* No badarg, checked before call */ - -#define BIN_TO_LIST_LOOP_FACTOR 10 - -static int do_bin_to_list(Process *p, byte *bytes, Uint bit_offs, - Uint start, Sint *lenp, Eterm *termp) -{ - Uint reds = get_reds(p, BIN_TO_LIST_LOOP_FACTOR); /* reds can never be 0 */ - Uint len = *lenp; - Uint loops; - Eterm *hp; - Eterm term = *termp; - Uint n; - - ASSERT(reds > 0); - - loops = MIN(reds,len); - - BUMP_REDS(p, loops / BIN_TO_LIST_LOOP_FACTOR); - - hp = HAlloc(p,2*loops); - while (loops--) { - --len; - if (bit_offs) { - n = ((((Uint) bytes[start+len]) << bit_offs) | - (((Uint) bytes[start+len+1]) >> (8-bit_offs))) & 0xFF; - } else { - n = bytes[start+len]; - } - - term = CONS(hp,make_small(n),term); - hp +=2; - } - *termp = term; - *lenp = len; - if (len) { - BUMP_ALL_REDS(p); - return BIN_TO_LIST_TRAP; - } - return BIN_TO_LIST_OK; -} - - -static BIF_RETTYPE do_trap_bin_to_list(Process *p, Eterm binary, - Uint start, Sint len, Eterm sofar) -{ - Eterm *hp; - Eterm blob; - - hp = HAlloc(p,3); - hp[0] = make_pos_bignum_header(2); - hp[1] = start; - hp[2] = (Uint) len; - blob = make_big(hp); - BIF_TRAP3(&binary_bin_to_list_trap_export, p, binary, blob, sofar); -} - -static BIF_RETTYPE binary_bin_to_list_trap(BIF_ALIST_3) -{ - Eterm *ptr; - Uint start; - Sint len; - byte *bytes; - Uint bit_offs; - Uint bit_size; - Eterm res = BIF_ARG_3; - - ptr = big_val(BIF_ARG_2); - start = ptr[1]; - len = (Sint) ptr[2]; - - ERTS_GET_BINARY_BYTES(BIF_ARG_1,bytes,bit_offs,bit_size); - if (do_bin_to_list(BIF_P, bytes, bit_offs, start, &len, &res) == - BIN_TO_LIST_OK) { - BIF_RET(res); - } - return do_trap_bin_to_list(BIF_P,BIF_ARG_1,start,len,res); -} - -static BIF_RETTYPE binary_bin_to_list_common(Process *p, - Eterm bin, - Eterm epos, - Eterm elen) -{ - Uint pos; - Sint len; - size_t sz; - byte *bytes; - Uint bit_offs; - Uint bit_size; - Eterm res = NIL; - - if (is_not_binary(bin)) { - goto badarg; - } - if (!term_to_Uint(epos, &pos)) { - goto badarg; - } - if (!term_to_Sint(elen, &len)) { - goto badarg; - } - if (len < 0) { - Uint lentmp = -(Uint)len; - /* overflow */ - if ((Sint)lentmp < 0) { - goto badarg; - } - len = lentmp; - if (len > pos) { - goto badarg; - } - pos -= len; - } - /* overflow */ - if ((pos + len) < pos || (len > 0 && (pos + len) == pos)) { - goto badarg; - } - sz = binary_size(bin); - - if (pos+len > sz) { - goto badarg; - } - ERTS_GET_BINARY_BYTES(bin,bytes,bit_offs,bit_size); - if (bit_size != 0) { - goto badarg; - } - if(do_bin_to_list(p, bytes, bit_offs, pos, &len, &res) == - BIN_TO_LIST_OK) { - BIF_RET(res); - } - return do_trap_bin_to_list(p,bin,pos,len,res); - - badarg: - BIF_ERROR(p,BADARG); -} - -BIF_RETTYPE binary_bin_to_list_3(BIF_ALIST_3) -{ - return binary_bin_to_list_common(BIF_P,BIF_ARG_1,BIF_ARG_2,BIF_ARG_3); -} - -BIF_RETTYPE binary_bin_to_list_2(BIF_ALIST_2) -{ - Eterm *tp; - - if (is_not_tuple(BIF_ARG_2)) { - goto badarg; - } - tp = tuple_val(BIF_ARG_2); - if (arityval(*tp) != 2) { - goto badarg; - } - return binary_bin_to_list_common(BIF_P,BIF_ARG_1,tp[1],tp[2]); - badarg: - BIF_ERROR(BIF_P,BADARG); -} - -BIF_RETTYPE binary_bin_to_list_1(BIF_ALIST_1) -{ - Uint pos = 0; - Sint len; - byte *bytes; - Uint bit_offs; - Uint bit_size; - Eterm res = NIL; - - if (is_not_binary(BIF_ARG_1)) { - goto badarg; - } - len = binary_size(BIF_ARG_1); - ERTS_GET_BINARY_BYTES(BIF_ARG_1,bytes,bit_offs,bit_size); - if (bit_size != 0) { - goto badarg; - } - if(do_bin_to_list(BIF_P, bytes, bit_offs, pos, &len, &res) == - BIN_TO_LIST_OK) { - BIF_RET(res); - } - return do_trap_bin_to_list(BIF_P,BIF_ARG_1,pos,len,res); - badarg: - BIF_ERROR(BIF_P,BADARG); -} - HIPE_WRAPPER_BIF_DISABLE_GC(binary_list_to_bin, 1) BIF_RETTYPE binary_list_to_bin_1(BIF_ALIST_1) @@ -2747,7 +2556,7 @@ static BIF_RETTYPE do_binary_copy(Process *p, Eterm bin, Eterm en) 0); if (!(cbs->temp_alloc)) { /* alignment not needed, need to copy */ byte *tmp = erts_alloc(ERTS_ALC_T_BINARY_BUFFER,size); - memcpy(tmp,cbs->source,size); + sys_memcpy(tmp,cbs->source,size); cbs->source = tmp; cbs->source_type = BC_TYPE_HEAP; } else { @@ -2760,7 +2569,7 @@ static BIF_RETTYPE do_binary_copy(Process *p, Eterm bin, Eterm en) pos = 0; i = 0; while (pos < reds) { - memcpy(t+pos,cbs->source, size); + sys_memcpy(t+pos,cbs->source, size); pos += size; ++i; } @@ -2785,7 +2594,7 @@ static BIF_RETTYPE do_binary_copy(Process *p, Eterm bin, Eterm en) } pos = 0; while (pos < target_size) { - memcpy(t+pos,source, size); + sys_memcpy(t+pos,source, size); pos += size; } erts_free_aligned_binary_bytes(temp_alloc); @@ -2815,7 +2624,7 @@ BIF_RETTYPE binary_copy_trap(BIF_ALIST_2) if ((n-1) * size >= reds) { Uint i = 0; while ((pos - opos) < reds) { - memcpy(t+pos,cbs->source, size); + sys_memcpy(t+pos,cbs->source, size); pos += size; ++i; } @@ -2825,28 +2634,21 @@ BIF_RETTYPE binary_copy_trap(BIF_ALIST_2) BIF_TRAP2(&binary_copy_trap_export, BIF_P, BIF_ARG_1, BIF_ARG_2); } else { Binary *save; - ProcBin* pb; + Eterm resbin; Uint target_size = cbs->result->orig_size; while (pos < target_size) { - memcpy(t+pos,cbs->source, size); + sys_memcpy(t+pos,cbs->source, size); pos += size; } - save = cbs->result; + save = cbs->result; cbs->result = NULL; cleanup_copy_bin_state(mb); /* now cbs is dead */ - pb = (ProcBin *) HAlloc(BIF_P, PROC_BIN_SIZE); - pb->thing_word = HEADER_PROC_BIN; - pb->size = target_size; - pb->next = MSO(BIF_P).first; - MSO(BIF_P).first = (struct erl_off_heap_header*) pb; - pb->val = save; - pb->bytes = t; - pb->flags = 0; - - OH_OVERHEAD(&(MSO(BIF_P)), target_size / sizeof(Eterm)); - BUMP_REDS(BIF_P,(pos - opos) / BINARY_COPY_LOOP_FACTOR); - BIF_RET(make_binary(pb)); + resbin = erts_build_proc_bin(&MSO(BIF_P), + HAlloc(BIF_P, PROC_BIN_SIZE), + save); + BUMP_REDS(BIF_P,(pos - opos) / BINARY_COPY_LOOP_FACTOR); + BIF_RET(resbin); } } @@ -3227,7 +3029,7 @@ static void dump_bm_data(BMData *bm) static void dump_ac_node(ACNode *node, int indent, int ch) { int i; char *spaces = erts_alloc(ERTS_ALC_T_TMP, 10 * indent + 1); - memset(spaces,' ',10*indent); + sys_memset(spaces,' ',10*indent); spaces[10*indent] = '\0'; erts_printf("%s-> %c\n",spaces,ch); erts_printf("%sId: %u\n",spaces,(unsigned) node->id); diff --git a/erts/emulator/beam/erl_bif_chksum.c b/erts/emulator/beam/erl_bif_chksum.c index 9417803e14..9095bcd380 100644 --- a/erts/emulator/beam/erl_bif_chksum.c +++ b/erts/emulator/beam/erl_bif_chksum.c @@ -516,7 +516,7 @@ md5_2(BIF_ALIST_2) /* No need to check context, this function cannot be called with unaligned or badly sized context as it's always trapped to. */ bytes = binary_bytes(BIF_ARG_1); - memcpy(&context,bytes,sizeof(MD5_CTX)); + sys_memcpy(&context,bytes,sizeof(MD5_CTX)); rest = do_chksum(&md5_wrap,BIF_P,BIF_ARG_2,100,(void *) &context,&res, &err); if (err != 0) { @@ -564,7 +564,7 @@ md5_update_2(BIF_ALIST_2) erts_free_aligned_binary_bytes(temp_alloc); BIF_ERROR(BIF_P, BADARG); } - memcpy(&context,bytes,sizeof(MD5_CTX)); + sys_memcpy(&context,bytes,sizeof(MD5_CTX)); erts_free_aligned_binary_bytes(temp_alloc); rest = do_chksum(&md5_wrap,BIF_P,BIF_ARG_2,100,(void *) &context,&res, &err); @@ -599,7 +599,7 @@ md5_final_1(BIF_ALIST_1) goto error; } bin = erts_new_heap_binary(BIF_P, (byte *)NULL, 16, &result); - memcpy(&ctx_copy, context, sizeof(MD5_CTX)); + sys_memcpy(&ctx_copy, context, sizeof(MD5_CTX)); erts_free_aligned_binary_bytes(temp_alloc); MD5Final(result, &ctx_copy); BIF_RET(bin); diff --git a/erts/emulator/beam/erl_bif_ddll.c b/erts/emulator/beam/erl_bif_ddll.c index f673ef3194..579e9b12f4 100644 --- a/erts/emulator/beam/erl_bif_ddll.c +++ b/erts/emulator/beam/erl_bif_ddll.c @@ -1412,7 +1412,7 @@ static Eterm copy_ref(Eterm ref, Eterm *hp) ErtsORefThing *ptr; ASSERT(is_internal_ordinary_ref(ref)); ptr = ordinary_ref_thing_ptr(ref); - memcpy(hp, ptr, sizeof(ErtsORefThing)); + sys_memcpy(hp, ptr, sizeof(ErtsORefThing)); return (make_internal_ref(hp)); } @@ -1454,9 +1454,9 @@ static void set_driver_reloading(DE_Handle *dh, Process *proc, char *path, char dh->procs = p; dh->status = ERL_DE_RELOAD; dh->reload_full_path = erts_alloc(ERTS_ALC_T_DDLL_HANDLE, sys_strlen(path) + 1); - strcpy(dh->reload_full_path,path); + sys_strcpy(dh->reload_full_path,path); dh->reload_driver_name = erts_alloc(ERTS_ALC_T_DDLL_HANDLE, sys_strlen(name) + 1); - strcpy(dh->reload_driver_name,name); + sys_strcpy(dh->reload_driver_name,name); dh->reload_flags = flags; } @@ -1501,7 +1501,7 @@ static int do_load_driver_entry(DE_Handle *dh, char *path, char *name) goto error; } - if (strcmp(name, dp->driver_name) != 0) { + if (sys_strcmp(name, dp->driver_name) != 0) { res = ERL_DE_LOAD_ERROR_BAD_NAME; goto error; } @@ -1799,7 +1799,7 @@ static char *pick_list_or_atom(Eterm name_term) goto error; } name = erts_alloc(ERTS_ALC_T_DDLL_TMP_BUF, ap->len + 1); - memcpy(name,ap->name,ap->len); + sys_memcpy(name,ap->name,ap->len); name[ap->len] = '\0'; } else { if (erts_iolist_size(name_term, &name_len)) { @@ -1870,7 +1870,7 @@ static erts_driver_t *lookup_driver(char *name) { erts_driver_t *drv; assert_drv_list_locked(); - for (drv = driver_list; drv != NULL && strcmp(drv->name, name); drv = drv->next) + for (drv = driver_list; drv != NULL && sys_strcmp(drv->name, name); drv = drv->next) ; return drv; } diff --git a/erts/emulator/beam/erl_bif_info.c b/erts/emulator/beam/erl_bif_info.c index 27bbf70c0b..6475f04c56 100644 --- a/erts/emulator/beam/erl_bif_info.c +++ b/erts/emulator/beam/erl_bif_info.c @@ -1754,7 +1754,7 @@ static int check_if_xml(void) { char buf[1]; size_t bufsz = sizeof(buf); - return erts_sys_getenv_raw("VALGRIND_LOG_XML", buf, &bufsz) >= 0; + return erts_sys_explicit_8bit_getenv("VALGRIND_LOG_XML", buf, &bufsz) >= 0; } #else #define check_if_xml() 0 @@ -2378,12 +2378,12 @@ BIF_RETTYPE system_info_1(BIF_ALIST_1) BIF_RET(os_version_tuple); } else if (BIF_ARG_1 == am_version) { - int n = strlen(ERLANG_VERSION); + int n = sys_strlen(ERLANG_VERSION); hp = HAlloc(BIF_P, ((sizeof ERLANG_VERSION)-1) * 2); BIF_RET(buf_to_intlist(&hp, ERLANG_VERSION, n, NIL)); } else if (BIF_ARG_1 == am_machine) { - int n = strlen(EMULATOR); + int n = sys_strlen(EMULATOR); hp = HAlloc(BIF_P, n*2); BIF_RET(buf_to_intlist(&hp, EMULATOR, n, NIL)); } @@ -2409,7 +2409,7 @@ BIF_RETTYPE system_info_1(BIF_ALIST_1) res = erts_bld_cons(hpp, hszp, erts_bld_tuple(hpp, hszp, 2, erts_atom_put((byte *)opc[i].name, - strlen(opc[i].name), + sys_strlen(opc[i].name), ERTS_ATOM_ENC_LATIN1, 1), erts_bld_uint(hpp, hszp, @@ -2805,21 +2805,21 @@ BIF_RETTYPE system_info_1(BIF_ALIST_1) Uint sz; Eterm res = NIL, tup, text; Eterm *hp = HAlloc(BIF_P, 3*(2 + 3) + /* three 2-tuples and three cons */ - 2*(strlen(erts_build_flags_CONFIG_H) + - strlen(erts_build_flags_CFLAGS) + - strlen(erts_build_flags_LDFLAGS))); + 2*(sys_strlen(erts_build_flags_CONFIG_H) + + sys_strlen(erts_build_flags_CFLAGS) + + sys_strlen(erts_build_flags_LDFLAGS))); - sz = strlen(erts_build_flags_CONFIG_H); + sz = sys_strlen(erts_build_flags_CONFIG_H); text = buf_to_intlist(&hp, erts_build_flags_CONFIG_H, sz, NIL); tup = TUPLE2(hp, am_config_h, text); hp += 3; res = CONS(hp, tup, res); hp += 2; - sz = strlen(erts_build_flags_CFLAGS); + sz = sys_strlen(erts_build_flags_CFLAGS); text = buf_to_intlist(&hp, erts_build_flags_CFLAGS, sz, NIL); tup = TUPLE2(hp, am_cflags, text); hp += 3; res = CONS(hp, tup, res); hp += 2; - sz = strlen(erts_build_flags_LDFLAGS); + sz = sys_strlen(erts_build_flags_LDFLAGS); text = buf_to_intlist(&hp, erts_build_flags_LDFLAGS, sz, NIL); tup = TUPLE2(hp, am_ldflags, text); hp += 3; res = CONS(hp, tup, res); hp += 2; @@ -3830,10 +3830,10 @@ BIF_RETTYPE erts_debug_get_internal_state_1(BIF_ALIST_1) BIF_RET(res); } } - else if (ERTS_IS_ATOM_STR("term_to_binary_no_funs", tp[1])) { - Uint dflags = (DFLAG_EXTENDED_REFERENCES | - DFLAG_EXTENDED_PIDS_PORTS | - DFLAG_BIT_BINARIES); + else if (ERTS_IS_ATOM_STR("term_to_binary_tuple_fallbacks", tp[1])) { + Uint dflags = (TERM_TO_BINARY_DFLAGS + & ~DFLAG_EXPORT_PTR_TAG + & ~DFLAG_BIT_BINARIES); BIF_RET(erts_term_to_binary(BIF_P, tp[2], 0, dflags)); } else if (ERTS_IS_ATOM_STR("dist_ctrl", tp[1])) { @@ -3883,7 +3883,7 @@ BIF_RETTYPE erts_debug_get_internal_state_1(BIF_ALIST_1) while (ix >= atom_table_size()) { char tmp[20]; erts_snprintf(tmp, sizeof(tmp), "am%x", atom_table_size()); - erts_atom_put((byte *) tmp, strlen(tmp), ERTS_ATOM_ENC_LATIN1, 1); + erts_atom_put((byte *) tmp, sys_strlen(tmp), ERTS_ATOM_ENC_LATIN1, 1); } return make_atom(ix); } @@ -4334,6 +4334,19 @@ BIF_RETTYPE erts_debug_set_internal_state_2(BIF_ALIST_2) BIF_RET(res); } } + else if (ERTS_IS_ATOM_STR("binary", BIF_ARG_1)) { + Sint64 size; + if (term_to_Sint64(BIF_ARG_2, &size)) { + Binary* refbin = erts_bin_drv_alloc_fnf(size); + if (!refbin) + BIF_RET(am_false); + sys_memset(refbin->orig_bytes, 0, size); + BIF_RET(erts_build_proc_bin(&MSO(BIF_P), + HAlloc(BIF_P, PROC_BIN_SIZE), + refbin)); + } + } + } BIF_ERROR(BIF_P, BADARG); @@ -4436,7 +4449,7 @@ static Eterm lcnt_build_lock_stats_term(Eterm **hpp, Uint *szp, erts_lcnt_lock_s file = stats->file ? stats->file : "undefined"; - af = erts_atom_put((byte *)file, strlen(file), ERTS_ATOM_ENC_LATIN1, 1); + af = erts_atom_put((byte *)file, sys_strlen(file), ERTS_ATOM_ENC_LATIN1, 1); uil = erts_bld_uint( hpp, szp, stats->line); tloc = erts_bld_tuple(hpp, szp, 2, af, uil); @@ -4465,7 +4478,7 @@ static Eterm lcnt_build_lock_stats_term(Eterm **hpp, Uint *szp, erts_lcnt_lock_s static Eterm lcnt_pretty_print_lock_id(erts_lcnt_lock_info_t *info) { Eterm id = info->id; - if((info->flags & ERTS_LOCK_FLAGS_MASK_TYPE) == ERTS_LOCK_TYPE_PROCLOCK) { + if((info->flags & ERTS_LOCK_FLAGS_MASK_TYPE) == ERTS_LOCK_FLAGS_TYPE_PROCLOCK) { /* Use registered names as id's for process locks if available. Thread * progress is delayed since we may be running on a dirty scheduler. */ ErtsThrPrgrDelayHandle delay_handle; @@ -4482,7 +4495,7 @@ static Eterm lcnt_pretty_print_lock_id(erts_lcnt_lock_info_t *info) { } else if(info->flags & ERTS_LOCK_FLAGS_CATEGORY_ALLOCATOR) { if(is_small(id) && !sys_strcmp(info->name, "alcu_allocator")) { const char *name = (const char*)ERTS_ALC_A2AD(signed_val(id)); - id = erts_atom_put((byte*)name, strlen(name), ERTS_ATOM_ENC_LATIN1, 1); + id = erts_atom_put((byte*)name, sys_strlen(name), ERTS_ATOM_ENC_LATIN1, 1); } } @@ -4502,8 +4515,8 @@ static Eterm lcnt_build_lock_term(Eterm **hpp, Uint *szp, lcnt_sample_t *sample, lock_desc = erts_lock_flags_get_type_name(info->flags); - type = erts_atom_put((byte*)lock_desc, strlen(lock_desc), ERTS_ATOM_ENC_LATIN1, 1); - name = erts_atom_put((byte*)info->name, strlen(info->name), ERTS_ATOM_ENC_LATIN1, 1); + type = erts_atom_put((byte*)lock_desc, sys_strlen(lock_desc), ERTS_ATOM_ENC_LATIN1, 1); + name = erts_atom_put((byte*)info->name, sys_strlen(info->name), ERTS_ATOM_ENC_LATIN1, 1); /* Only attempt to resolve ids when actually emitting the term. This ought * to be safe since all immediates are the same size. */ @@ -4539,11 +4552,11 @@ static Eterm lcnt_build_result_term(Eterm **hpp, Uint *szp, erts_lcnt_time_t *du dtns = bld_unstable_uint64(hpp, szp, duration->ns); tdt = erts_bld_tuple(hpp, szp, 2, dts, dtns); - adur = erts_atom_put((byte *)str_duration, strlen(str_duration), ERTS_ATOM_ENC_LATIN1, 1); + adur = erts_atom_put((byte *)str_duration, sys_strlen(str_duration), ERTS_ATOM_ENC_LATIN1, 1); tdur = erts_bld_tuple(hpp, szp, 2, adur, tdt); /* lock tuple */ - aloc = erts_atom_put((byte *)str_locks, strlen(str_locks), ERTS_ATOM_ENC_LATIN1, 1); + aloc = erts_atom_put((byte *)str_locks, sys_strlen(str_locks), ERTS_ATOM_ENC_LATIN1, 1); for(i = 0; i < current_locks->size; i++) { lloc = lcnt_build_lock_term(hpp, szp, ¤t_locks->elements[i], lloc); @@ -4597,7 +4610,7 @@ static Eterm lcnt_build_category_list(Eterm **hpp, Uint *szp, erts_lock_flags_t for(i = 0; lcnt_category_map[i].name != NULL; i++) { if(mask & lcnt_category_map[i].flag) { Eterm category = erts_atom_put((byte*)lcnt_category_map[i].name, - strlen(lcnt_category_map[i].name), + sys_strlen(lcnt_category_map[i].name), ERTS_ATOM_ENC_UTF8, 0); res = erts_bld_cons(hpp, szp, category, res); @@ -4727,14 +4740,14 @@ BIF_RETTYPE erts_debug_lcnt_control_2(BIF_ALIST_2) static void os_info_init(void) { - Eterm type = erts_atom_put((byte *) os_type, strlen(os_type), ERTS_ATOM_ENC_LATIN1, 1); + Eterm type = erts_atom_put((byte *) os_type, sys_strlen(os_type), ERTS_ATOM_ENC_LATIN1, 1); Eterm flav; int major, minor, build; char* buf = erts_alloc(ERTS_ALC_T_TMP, 1024); /* More than enough */ Eterm* hp; os_flavor(buf, 1024); - flav = erts_atom_put((byte *) buf, strlen(buf), ERTS_ATOM_ENC_LATIN1, 1); + flav = erts_atom_put((byte *) buf, sys_strlen(buf), ERTS_ATOM_ENC_LATIN1, 1); erts_free(ERTS_ALC_T_TMP, (void *) buf); hp = erts_alloc(ERTS_ALC_T_LITERAL, (3+4)*sizeof(Eterm)); os_type_tuple = TUPLE2(hp, type, flav); @@ -4758,7 +4771,7 @@ erts_bif_info_init(void) alloc_info_trap = erts_export_put(am_erlang, am_alloc_info, 1); alloc_sizes_trap = erts_export_put(am_erlang, am_alloc_sizes, 1); gather_sched_wall_time_res_trap - = erts_export_put(am_erlang, am_gather_sched_wall_time_result, 1); + = erts_export_put(am_erts_internal, am_gather_sched_wall_time_result, 1); gather_gc_info_res_trap = erts_export_put(am_erlang, am_gather_gc_info_result, 1); gather_io_bytes_trap diff --git a/erts/emulator/beam/erl_bif_os.c b/erts/emulator/beam/erl_bif_os.c index 910325a2f4..ce2b27409b 100644 --- a/erts/emulator/beam/erl_bif_os.c +++ b/erts/emulator/beam/erl_bif_os.c @@ -36,8 +36,7 @@ #include "big.h" #include "dist.h" #include "erl_version.h" - -static int check_env_name(char *name); +#include "erl_osenv.h" /* * Return the pid for the Erlang process in the host OS. @@ -67,148 +66,78 @@ BIF_RETTYPE os_getpid_0(BIF_ALIST_0) BIF_RET(buf_to_intlist(&hp, pid_string, n, NIL)); } -BIF_RETTYPE os_getenv_0(BIF_ALIST_0) +static void os_getenv_foreach(Process *process, Eterm *result, Eterm key, Eterm value) { - GETENV_STATE state; - char *cp; - Eterm* hp; - Eterm ret; - Eterm str; + Eterm kvp_term, *hp; - init_getenv_state(&state); + hp = HAlloc(process, 5); + kvp_term = TUPLE2(hp, key, value); + hp += 3; - ret = NIL; - while ((cp = getenv_string(&state)) != NULL) { - str = erts_convert_native_to_filename(BIF_P,(byte *)cp); - hp = HAlloc(BIF_P, 2); - ret = CONS(hp, str, ret); - } + (*result) = CONS(hp, kvp_term, (*result)); +} - fini_getenv_state(&state); +BIF_RETTYPE os_list_env_vars_0(BIF_ALIST_0) +{ + const erts_osenv_t *global_env; + Eterm result = NIL; + + global_env = erts_sys_rlock_global_osenv(); + erts_osenv_foreach_term(global_env, BIF_P, &result, (void*)&os_getenv_foreach); + erts_sys_runlock_global_osenv(); - return ret; + return result; } -#define STATIC_BUF_SIZE 1024 -BIF_RETTYPE os_getenv_1(BIF_ALIST_1) +BIF_RETTYPE os_get_env_var_1(BIF_ALIST_1) { - Process* p = BIF_P; - Eterm str; - Sint len; - int res; - char *key_str, *val; - char buf[STATIC_BUF_SIZE]; - size_t val_size = sizeof(buf); - - key_str = erts_convert_filename_to_native(BIF_ARG_1,buf,STATIC_BUF_SIZE, - ERTS_ALC_T_TMP,1,0,&len); - - if (!check_env_name(key_str)) { - if (key_str && key_str != &buf[0]) - erts_free(ERTS_ALC_T_TMP, key_str); - BIF_ERROR(p, BADARG); - } + const erts_osenv_t *global_env; + Eterm out_term; + int error; - if (key_str != &buf[0]) - val = &buf[0]; - else { - /* len includes zero byte */ - val_size -= len; - val = &buf[len]; - } - res = erts_sys_getenv(key_str, val, &val_size); - - if (res < 0) { - no_var: - str = am_false; - } else { - if (res > 0) { - val = erts_alloc(ERTS_ALC_T_TMP, val_size); - while (1) { - res = erts_sys_getenv(key_str, val, &val_size); - if (res == 0) - break; - else if (res < 0) - goto no_var; - else - val = erts_realloc(ERTS_ALC_T_TMP, val, val_size); - } - } - str = erts_convert_native_to_filename(p,(byte *)val); - } - if (key_str != &buf[0]) - erts_free(ERTS_ALC_T_TMP, key_str); - if (val < &buf[0] || &buf[sizeof(buf)-1] < val) - erts_free(ERTS_ALC_T_TMP, val); - BIF_RET(str); + global_env = erts_sys_rlock_global_osenv(); + error = erts_osenv_get_term(global_env, BIF_P, BIF_ARG_1, &out_term); + erts_sys_runlock_global_osenv(); + + if (error == 0) { + return am_false; + } else if (error < 0) { + BIF_ERROR(BIF_P, BADARG); + } + + return out_term; } -BIF_RETTYPE os_putenv_2(BIF_ALIST_2) +BIF_RETTYPE os_set_env_var_2(BIF_ALIST_2) { - char def_buf_key[STATIC_BUF_SIZE]; - char def_buf_value[STATIC_BUF_SIZE]; - char *key_buf = NULL, *value_buf = NULL; - - key_buf = erts_convert_filename_to_native(BIF_ARG_1,def_buf_key, - STATIC_BUF_SIZE, - ERTS_ALC_T_TMP,0,0,NULL); - if (!check_env_name(key_buf)) - goto badarg; - - value_buf = erts_convert_filename_to_native(BIF_ARG_2,def_buf_value, - STATIC_BUF_SIZE, - ERTS_ALC_T_TMP,1,0, - NULL); - if (!value_buf) - goto badarg; - - if (erts_sys_putenv(key_buf, value_buf)) { - if (key_buf != def_buf_key) { - erts_free(ERTS_ALC_T_TMP, key_buf); - } - if (value_buf != def_buf_value) { - erts_free(ERTS_ALC_T_TMP, value_buf); - } - BIF_ERROR(BIF_P, BADARG); - } - if (key_buf != def_buf_key) { - erts_free(ERTS_ALC_T_TMP, key_buf); - } - if (value_buf != def_buf_value) { - erts_free(ERTS_ALC_T_TMP, value_buf); + erts_osenv_t *global_env; + int error; + + global_env = erts_sys_rwlock_global_osenv(); + error = erts_osenv_put_term(global_env, BIF_ARG_1, BIF_ARG_2); + erts_sys_rwunlock_global_osenv(); + + if (error < 0) { + BIF_ERROR(BIF_P, BADARG); } - BIF_RET(am_true); -badarg: - if (key_buf && key_buf != def_buf_key) - erts_free(ERTS_ALC_T_TMP, key_buf); - if (value_buf && value_buf != def_buf_value) - erts_free(ERTS_ALC_T_TMP, value_buf); - BIF_ERROR(BIF_P, BADARG); + BIF_RET(am_true); } -BIF_RETTYPE os_unsetenv_1(BIF_ALIST_1) +BIF_RETTYPE os_unset_env_var_1(BIF_ALIST_1) { - char *key_buf; - char buf[STATIC_BUF_SIZE]; + erts_osenv_t *global_env; + int error; - key_buf = erts_convert_filename_to_native(BIF_ARG_1,buf,STATIC_BUF_SIZE, - ERTS_ALC_T_TMP,0,0,NULL); - if (!check_env_name(key_buf)) - goto badarg; + global_env = erts_sys_rwlock_global_osenv(); + error = erts_osenv_unset_term(global_env, BIF_ARG_1); + erts_sys_rwunlock_global_osenv(); - if (erts_sys_unsetenv(key_buf)) - goto badarg; - - if (key_buf != buf) { - erts_free(ERTS_ALC_T_TMP, key_buf); + if (error < 0) { + BIF_ERROR(BIF_P, BADARG); } - BIF_RET(am_true); -badarg: - if (key_buf && key_buf != buf) - erts_free(ERTS_ALC_T_TMP, key_buf); - BIF_ERROR(BIF_P, BADARG); + BIF_RET(am_true); } BIF_RETTYPE os_set_signal_2(BIF_ALIST_2) { @@ -224,27 +153,3 @@ BIF_RETTYPE os_set_signal_2(BIF_ALIST_2) { error: BIF_ERROR(BIF_P, BADARG); } - -static int -check_env_name(char *raw_name) -{ - byte *c = (byte *) raw_name; - int encoding; - - if (!c) - return 0; - - encoding = erts_get_native_filename_encoding(); - - if (erts_raw_env_char_is_7bit_ascii_char('\0', c, encoding)) - return 0; /* Do not allow empty name... */ - - /* Verify no '=' characters in variable name... */ - do { - if (erts_raw_env_char_is_7bit_ascii_char('=', c, encoding)) - return 0; - c = erts_raw_env_next_char(c, encoding); - } while (!erts_raw_env_char_is_7bit_ascii_char('\0', c, encoding)); - - return 1; /* Seems ok... */ -} diff --git a/erts/emulator/beam/erl_bif_port.c b/erts/emulator/beam/erl_bif_port.c index c4a4dd5863..1feb892b93 100644 --- a/erts/emulator/beam/erl_bif_port.c +++ b/erts/emulator/beam/erl_bif_port.c @@ -45,7 +45,7 @@ #include "dtrace-wrapper.h" static Port *open_port(Process* p, Eterm name, Eterm settings, int *err_typep, int *err_nump); -static char* convert_environment(Eterm env); +static int merge_global_environment(erts_osenv_t *env, Eterm key_value_pairs); static char **convert_args(Eterm); static void free_args(char **); @@ -71,7 +71,7 @@ BIF_RETTYPE erts_internal_open_port_2(BIF_ALIST_2) BIF_ERROR(BIF_P, EXC_INTERNAL_ERROR); } else if (err_type == -2) { str = erl_errno_id(err_num); - res = erts_atom_put((byte *) str, strlen(str), ERTS_ATOM_ENC_LATIN1, 1); + res = erts_atom_put((byte *) str, sys_strlen(str), ERTS_ATOM_ENC_LATIN1, 1); } else { res = am_einval; } @@ -639,6 +639,27 @@ BIF_RETTYPE port_get_data_1(BIF_ALIST_1) BIF_RET(res); } +Eterm erts_port_data_read(Port* prt) +{ + Eterm res; + erts_aint_t data; + + data = erts_atomic_read_ddrb(&prt->data); + if (data == (erts_aint_t)NULL) + return am_undefined; /* Port terminated by racing thread */ + + if ((data & 0x3) != 0) { + res = (Eterm) (UWord) data; + ASSERT(is_immed(res)); + } + else { + ErtsPortDataHeap *pdhp = (ErtsPortDataHeap *) data; + res = pdhp->data; + } + return res; +} + + /* * Open a port. Most of the work is not done here but rather in * the file io.c. @@ -651,6 +672,7 @@ BIF_RETTYPE port_get_data_1(BIF_ALIST_1) static Port * open_port(Process* p, Eterm name, Eterm settings, int *err_typep, int *err_nump) { + int merged_environment = 0; Sint i; Eterm option; Uint arity; @@ -672,12 +694,13 @@ open_port(Process* p, Eterm name, Eterm settings, int *err_typep, int *err_nump) opts.read_write = 0; opts.hide_window = 0; opts.wd = NULL; - opts.envir = NULL; opts.exit_status = 0; opts.overlapped_io = 0; opts.spawn_type = ERTS_SPAWN_ANY; opts.argv = NULL; opts.parallelism = erts_port_parallelism; + erts_osenv_init(&opts.envir); + linebuf = 0; *err_nump = 0; @@ -718,11 +741,16 @@ open_port(Process* p, Eterm name, Eterm settings, int *err_typep, int *err_nump) goto badarg; } } else if (option == am_env) { - if (opts.envir) /* ignore previous env option... */ - erts_free(ERTS_ALC_T_OPEN_PORT_ENV, opts.envir); - opts.envir = convert_environment(*tp); - if (!opts.envir) - goto badarg; + if (merged_environment) { + /* Ignore previous env option */ + erts_osenv_clear(&opts.envir); + } + + merged_environment = 1; + + if (merge_global_environment(&opts.envir, *tp)) { + goto badarg; + } } else if (option == am_args) { char **av; char **oav = opts.argv; @@ -807,6 +835,12 @@ open_port(Process* p, Eterm name, Eterm settings, int *err_typep, int *err_nump) if((linebuf && opts.packet_bytes) || (opts.redir_stderr && !opts.use_stdio)) { goto badarg; +} + + /* If we lacked an env option, fill in the global environment without + * changes. */ + if (!merged_environment) { + merge_global_environment(&opts.envir, NIL); } /* @@ -956,8 +990,7 @@ open_port(Process* p, Eterm name, Eterm settings, int *err_typep, int *err_nump) erts_atomic32_read_bor_relb(&port->state, sflgs); do_return: - if (opts.envir) - erts_free(ERTS_ALC_T_OPEN_PORT_ENV, opts.envir); + erts_osenv_clear(&opts.envir); if (name_buf) erts_free(ERTS_ALC_T_TMP, (void *) name_buf); if (opts.argv) { @@ -977,6 +1010,45 @@ open_port(Process* p, Eterm name, Eterm settings, int *err_typep, int *err_nump) goto do_return; } +/* Merges the the global environment and the given {Key, Value} list into env, + * unsetting all keys whose value is either 'false' or NIL. The behavior on + * NIL is undocumented and perhaps surprising, but the previous implementation + * worked in this manner. */ +static int merge_global_environment(erts_osenv_t *env, Eterm key_value_pairs) { + const erts_osenv_t *global_env = erts_sys_rlock_global_osenv(); + erts_osenv_merge(env, global_env, 0); + erts_sys_runlock_global_osenv(); + + while (is_list(key_value_pairs)) { + Eterm *cell, *tuple; + + cell = list_val(key_value_pairs); + + if(!is_tuple_arity(CAR(cell), 2)) { + return -1; + } + + tuple = tuple_val(CAR(cell)); + key_value_pairs = CDR(cell); + + if(is_nil(tuple[2]) || tuple[2] == am_false) { + if(erts_osenv_unset_term(env, tuple[1]) < 0) { + return -1; + } + } else { + if(erts_osenv_put_term(env, tuple[1], tuple[2]) < 0) { + return -1; + } + } + } + + if(!is_nil(key_value_pairs)) { + return -1; + } + + return 0; +} + /* Arguments can be given i unicode and as raw binaries, convert filename is used to convert */ static char **convert_args(Eterm l) { @@ -1024,130 +1096,6 @@ static void free_args(char **av) erts_free(ERTS_ALC_T_TMP, av); } -#ifdef DEBUG -#define ERTS_CONV_ENV_BUF_EXTRA 2 -#else -#define ERTS_CONV_ENV_BUF_EXTRA 1024 -#endif - -static char* convert_environment(Eterm env) -{ - /* - * Returns environment buffer in memory allocated - * as ERTS_ALC_T_OPEN_PORT_ENV. Caller *needs* - * to deallocate... - */ - - Sint size, alloc_size; - byte* bytes; - int encoding = erts_get_native_filename_encoding(); - - alloc_size = ERTS_CONV_ENV_BUF_EXTRA; - bytes = erts_alloc(ERTS_ALC_T_OPEN_PORT_ENV, - alloc_size); - size = 0; - - /* ERTS_CONV_ENV_BUF_EXTRA >= for end delimiter... */ - ERTS_CT_ASSERT(ERTS_CONV_ENV_BUF_EXTRA >= 2); - - while (is_list(env)) { - Sint var_sz, val_sz, need; - byte *str, *limit; - Eterm tmp, *tp, *consp; - - consp = list_val(env); - tmp = CAR(consp); - if (is_not_tuple_arity(tmp, 2)) - goto error; - - tp = tuple_val(tmp); - - /* Check encoding of env variable... */ - if (is_not_list(tp[1])) - goto error; - var_sz = erts_native_filename_need(tp[1], encoding); - if (var_sz <= 0) - goto error; - /* Check encoding of value... */ - if (tp[2] == am_false || is_nil(tp[2])) - val_sz = 0; - else if (is_not_list(tp[2])) - goto error; - else { - val_sz = erts_native_filename_need(tp[2], encoding); - if (val_sz < 0) - goto error; - } - - /* Ensure enough memory... */ - need = size; - need += var_sz + val_sz; - /* '=' and '\0' */ - need += 2 * erts_raw_env_7bit_ascii_char_need(encoding); - if (need > alloc_size) { - alloc_size = (need - alloc_size) + alloc_size; - alloc_size += ERTS_CONV_ENV_BUF_EXTRA; - bytes = erts_realloc(ERTS_ALC_T_OPEN_PORT_ENV, - bytes, alloc_size); - } - - /* Write environment variable name... */ - str = bytes + size; - erts_native_filename_put(tp[1], encoding, str); - /* empty variable name is not allowed... */ - if (erts_raw_env_char_is_7bit_ascii_char('\0', str, encoding)) - goto error; - - /* - * Drop null characters at the end and verify that we do - * not have any '=' characters in the name... - */ - limit = str + var_sz; - while (str < limit) { - if (erts_raw_env_char_is_7bit_ascii_char('\0', str, encoding)) - break; - if (erts_raw_env_char_is_7bit_ascii_char('=', str, encoding)) - goto error; - str = erts_raw_env_next_char(str, encoding); - } - - /* Write the equals sign... */ - str = erts_raw_env_7bit_ascii_char_put('=', str, encoding); - - /* Write the value... */ - if (val_sz > 0) { - limit = str + val_sz; - erts_native_filename_put(tp[2], encoding, str); - while (str < limit) { - if (erts_raw_env_char_is_7bit_ascii_char('\0', str, encoding)) - break; - str = erts_raw_env_next_char(str, encoding); - } - } - - /* Delimit... */ - str = erts_raw_env_7bit_ascii_char_put('\0', str, encoding); - - size = str - bytes; - ASSERT(size <= alloc_size); - - env = CDR(consp); - } - - /* End delimit... */ - (void) erts_raw_env_7bit_ascii_char_put('\0', &bytes[size], encoding); - - if (is_nil(env)) - return (char *) bytes; - -error: - - if (bytes) - erts_free(ERTS_ALC_T_OPEN_PORT_ENV, bytes); - - return (char *) NULL; /* error... */ -} - /* ------------ decode_packet() and friends: */ struct packet_callback_args @@ -1197,7 +1145,7 @@ http_bld_string(struct packet_callback_args* pca, Uint **hpp, Uint *szp, ErlHeapBin* bin = (ErlHeapBin*) *hpp; bin->thing_word = header_heap_bin(len); bin->size = len; - memcpy(bin->data, str, len); + sys_memcpy(bin->data, str, len); *hpp += size; } } @@ -1375,9 +1323,9 @@ int ssl_tls_erl(void* arg, unsigned type, unsigned major, unsigned minor, Eterm bin = new_binary(pca->p, NULL, plen+len); byte* bin_ptr = binary_bytes(bin); - memcpy(bin_ptr+plen, buf, len); + sys_memcpy(bin_ptr+plen, buf, len); if (plen) { - memcpy(bin_ptr, prefix, plen); + sys_memcpy(bin_ptr, prefix, plen); } /* {ssl_tls,NIL,ContentType,{Major,Minor},Bin} */ diff --git a/erts/emulator/beam/erl_bif_re.c b/erts/emulator/beam/erl_bif_re.c index bc819505e7..4d769c2d46 100644 --- a/erts/emulator/beam/erl_bif_re.c +++ b/erts/emulator/beam/erl_bif_re.c @@ -514,7 +514,7 @@ re_version_0(BIF_ALIST_0) Eterm ret; size_t version_size = 0; byte *version = (byte *) erts_pcre_version(); - version_size = strlen((const char *) version); + version_size = sys_strlen((const char *) version); ret = new_binary(BIF_P, version, version_size); BIF_RET(ret); } @@ -998,7 +998,7 @@ build_capture(Eterm capture_spec[CAPSPEC_SIZE], const pcre *code) has_dupnames = ((options & PCRE_DUPNAMES) != 0); for(i=0;i<top;++i) { - if (last == NULL || !has_dupnames || strcmp((char *) last+2,(char *) nametable+2)) { + if (last == NULL || !has_dupnames || sys_strcmp((char *) last+2,(char *) nametable+2)) { ASSERT(ri->num_spec >= 0); ++(ri->num_spec); if(ri->num_spec > sallocated) { @@ -1048,7 +1048,7 @@ build_capture(Eterm capture_spec[CAPSPEC_SIZE], const pcre *code) (tmpbsiz = ap->len + 1)); } } - memcpy(tmpb,ap->name,ap->len); + sys_memcpy(tmpb,ap->name,ap->len); tmpb[ap->len] = '\0'; } else { ErlDrvSizeT slen; @@ -1210,7 +1210,7 @@ re_run(Process *p, Eterm arg1, Eterm arg2, Eterm arg3) erts_pcre_fullinfo(result, NULL, PCRE_INFO_CAPTURECOUNT, &capture_count); ovsize = 3*(capture_count+1); restart.code = erts_alloc(ERTS_ALC_T_RE_SUBJECT, code_size); - memcpy(restart.code, result, code_size); + sys_memcpy(restart.code, result, code_size); erts_pcre_free(result); erts_free(ERTS_ALC_T_RE_TMP_BUF, expr); /*unicode = (pflags & PARSE_FLAG_UNICODE) ? 1 : 0;*/ @@ -1252,7 +1252,7 @@ re_run(Process *p, Eterm arg1, Eterm arg2, Eterm arg3) BIF_ERROR(p, BADARG); } restart.code = erts_alloc(ERTS_ALC_T_RE_SUBJECT, code_size); - memcpy(restart.code, code_tmp, code_size); + sys_memcpy(restart.code, code_tmp, code_size); erts_free_aligned_binary_bytes(temp_alloc); } @@ -1364,7 +1364,7 @@ handle_iolist: RestartContext *restartp = ERTS_MAGIC_BIN_DATA(mbp); Eterm magic_ref; Eterm *hp; - memcpy(restartp,&restart,sizeof(RestartContext)); + sys_memcpy(restartp,&restart,sizeof(RestartContext)); BUMP_ALL_REDS(p); hp = HAlloc(p, ERTS_MAGIC_REF_THING_SIZE); magic_ref = erts_mk_magic_ref(&hp, &MSO(p), mbp); @@ -1517,7 +1517,7 @@ re_inspect_2(BIF_ALIST_2) last = NULL; name = nametable; for(i=0;i<top;++i) { - if (last == NULL || !has_dupnames || strcmp((char *) last+2, + if (last == NULL || !has_dupnames || sys_strcmp((char *) last+2, (char *) name+2)) { ++num_names; } @@ -1531,9 +1531,9 @@ re_inspect_2(BIF_ALIST_2) name = nametable; j = 0; for(i=0;i<top;++i) { - if (last == NULL || !has_dupnames || strcmp((char *) last+2, + if (last == NULL || !has_dupnames || sys_strcmp((char *) last+2, (char *) name+2)) { - tmp_vec[j++] = new_binary(BIF_P, (byte *) name+2, strlen((char *) name+2)); + tmp_vec[j++] = new_binary(BIF_P, (byte *) name+2, sys_strlen((char *) name+2)); } last = name; name += entrysize; diff --git a/erts/emulator/beam/erl_bif_unique.h b/erts/emulator/beam/erl_bif_unique.h index 9aa631fde9..6af9d4ac8c 100644 --- a/erts/emulator/beam/erl_bif_unique.h +++ b/erts/emulator/beam/erl_bif_unique.h @@ -307,7 +307,7 @@ erts_iref_storage_clean(ErtsIRefStorage *iref) if (iref->is_magic && erts_refc_dectest(&iref->u.mb->intern.refc, 0) == 0) erts_ref_bin_free(iref->u.mb); #ifdef DEBUG - memset((void *) iref, 0xf, sizeof(ErtsIRefStorage)); + sys_memset((void *) iref, 0xf, sizeof(ErtsIRefStorage)); #endif } @@ -342,7 +342,7 @@ erts_iref_storage_make_ref(ErtsIRefStorage *iref, #ifdef DEBUG if (clean_storage) - memset((void *) iref, 0xf, sizeof(ErtsIRefStorage)); + sys_memset((void *) iref, 0xf, sizeof(ErtsIRefStorage)); #endif return make_internal_ref(hp); diff --git a/erts/emulator/beam/erl_binary.h b/erts/emulator/beam/erl_binary.h index 05007e864e..46653a8580 100644 --- a/erts/emulator/beam/erl_binary.h +++ b/erts/emulator/beam/erl_binary.h @@ -363,8 +363,6 @@ erts_free_aligned_binary_bytes(byte* buf) # define CHICKEN_PAD (sizeof(void*) - 1) #endif -/* Caller must initialize 'refc' -*/ ERTS_GLB_INLINE Binary * erts_bin_drv_alloc_fnf(Uint size) { @@ -383,8 +381,6 @@ erts_bin_drv_alloc_fnf(Uint size) return res; } -/* Caller must initialize 'refc' -*/ ERTS_GLB_INLINE Binary * erts_bin_drv_alloc(Uint size) { @@ -401,9 +397,6 @@ erts_bin_drv_alloc(Uint size) return res; } - -/* Caller must initialize 'refc' -*/ ERTS_GLB_INLINE Binary * erts_bin_nrml_alloc(Uint size) { diff --git a/erts/emulator/beam/erl_db.c b/erts/emulator/beam/erl_db.c index 3ba0886464..a76d769283 100644 --- a/erts/emulator/beam/erl_db.c +++ b/erts/emulator/beam/erl_db.c @@ -1753,6 +1753,28 @@ BIF_RETTYPE ets_new_2(BIF_ALIST_2) BIF_RET(ret); } +/* +** Retrieves the tid() of a named ets table. +*/ +BIF_RETTYPE ets_whereis_1(BIF_ALIST_1) +{ + DbTable* tb; + Eterm res; + + if (is_not_atom(BIF_ARG_1)) { + BIF_ERROR(BIF_P, BADARG); + } + + if ((tb = db_get_table(BIF_P, BIF_ARG_1, DB_INFO, LCK_READ)) == NULL) { + BIF_RET(am_undefined); + } + + res = make_tid(BIF_P, tb); + db_unlock(tb, LCK_READ); + + BIF_RET(res); +} + /* ** The lookup BIF */ @@ -3126,7 +3148,8 @@ BIF_RETTYPE ets_info_1(BIF_ALIST_1) static Eterm fields[] = {am_protection, am_keypos, am_type, am_named_table, am_node, am_size, am_name, am_heir, am_owner, am_memory, am_compressed, am_write_concurrency, - am_read_concurrency}; + am_read_concurrency, + am_id}; Eterm results[sizeof(fields)/sizeof(Eterm)]; DbTable* tb; Eterm res; @@ -4016,7 +4039,10 @@ static Eterm table_info(Process* p, DbTable* tb, Eterm What) ret = is_table_named(tb) ? am_true : am_false; } else if (What == am_compressed) { ret = tb->common.compress ? am_true : am_false; + } else if (What == am_id) { + ret = make_tid(p, tb); } + /* * For debugging purposes */ @@ -4235,7 +4261,7 @@ erts_ets_colliding_names(Process* p, Eterm name, Uint cnt) while (index >= atom_table_size()) { char tmp[20]; erts_snprintf(tmp, sizeof(tmp), "am%x", atom_table_size()); - erts_atom_put((byte *) tmp, strlen(tmp), ERTS_ATOM_ENC_LATIN1, 1); + erts_atom_put((byte *) tmp, sys_strlen(tmp), ERTS_ATOM_ENC_LATIN1, 1); } list = CONS(hp, make_atom(index), list); hp += 2; diff --git a/erts/emulator/beam/erl_db_util.c b/erts/emulator/beam/erl_db_util.c index e017b9552b..956662aefa 100644 --- a/erts/emulator/beam/erl_db_util.c +++ b/erts/emulator/beam/erl_db_util.c @@ -91,7 +91,7 @@ do { \ (On).siz *= 2; \ (On).data \ = (((On).def == (On).data) \ - ? memcpy(erts_alloc(ERTS_ALC_T_DB_MC_STK, \ + ? sys_memcpy(erts_alloc(ERTS_ALC_T_DB_MC_STK, \ (On).siz*sizeof(*((On).data))), \ (On).def, \ DMC_DEFAULT_SIZE*sizeof(*((On).data))) \ @@ -1518,7 +1518,7 @@ restart: context.current_match < num_progs; ++context.current_match) { /* This loop is long, too long */ - memset(heap.vars, 0, heap.size * sizeof(*heap.vars)); + sys_memset(heap.vars, 0, heap.size * sizeof(*heap.vars)); t = context.matchexpr[context.current_match]; context.stack_used = 0; structure_checked = 0; @@ -2140,7 +2140,7 @@ restart: case matchEqFloat: if (!is_float(*ep)) FAIL(); - if (memcmp(float_val(*ep) + 1, pc, sizeof(double))) + if (sys_memcmp(float_val(*ep) + 1, pc, sizeof(double))) FAIL(); pc += TermWords(2); ++ep; @@ -2742,8 +2742,8 @@ Eterm db_format_dmc_err_info(Process *p, DMCErrInfo *ei) if (vnum >= 0) erts_snprintf(buff,sizeof(buff)+20,tmp->error_string, vnum); else - strcpy(buff,tmp->error_string); - sl = strlen(buff); + sys_strcpy(buff,tmp->error_string); + sl = sys_strlen(buff); shp = HAlloc(p, sl * 2 + 5); sev = (tmp->severity == dmcWarning) ? am_atom_put("warning",7) : @@ -5044,7 +5044,7 @@ static int match_compact(ErlHeapFragment *expr, DMCErrInfo *err_info) ASSERT(j < x); erts_snprintf(buff+1, sizeof(buff) - 1, "%u", (unsigned) j); /* Yes, writing directly into terms, they ARE off heap */ - *p = erts_atom_put((byte *) buff, strlen(buff), + *p = erts_atom_put((byte *) buff, sys_strlen(buff), ERTS_ATOM_ENC_LATIN1, 1); } ++p; @@ -5505,7 +5505,7 @@ void db_match_dis(Binary *bp) ++t; { double num; - memcpy(&num,t,sizeof(double)); + sys_memcpy(&num,t,sizeof(double)); t += TermWords(2); erts_printf("EqFloat\t%f\n", num); } diff --git a/erts/emulator/beam/erl_drv_thread.c b/erts/emulator/beam/erl_drv_thread.c index 71d4534ef9..4cf42fce57 100644 --- a/erts/emulator/beam/erl_drv_thread.c +++ b/erts/emulator/beam/erl_drv_thread.c @@ -439,7 +439,7 @@ erl_drv_tsd_key_create(char *name, ErlDrvTSDKey *key) name_copy = no_name; else { name_copy = erts_alloc_fnf(ERTS_ALC_T_DRV_TSD, - sizeof(char)*(strlen(name) + 1)); + sizeof(char)*(sys_strlen(name) + 1)); if (!name_copy) { *key = -1; return ENOMEM; @@ -750,7 +750,7 @@ erl_drv_steal_main_thread(char *name, + (name ? sys_strlen(name) + 1 : 0))); if (!dtid) return ENOMEM; - memset(dtid,0,sizeof(ErlDrvTid_)); + sys_memset(dtid,0,sizeof(ErlDrvTid_)); dtid->tid = (void * ) -1; dtid->drv_thr = 1; dtid->func = func; @@ -763,8 +763,8 @@ erl_drv_steal_main_thread(char *name, *tid = NULL; /* Ignore options and name... */ - memcpy(buff,&func,sizeof(void* (*)(void*))); - memcpy(buff + sizeof(void* (*)(void*)),&arg,sizeof(void *)); + sys_memcpy(buff,&func,sizeof(void* (*)(void*))); + sys_memcpy(buff + sizeof(void* (*)(void*)),&arg,sizeof(void *)); write(erts_darwin_main_thread_pipe[1],buff,buff_sz); return 0; } diff --git a/erts/emulator/beam/erl_gc.c b/erts/emulator/beam/erl_gc.c index 97a1ca915f..1c64644efc 100644 --- a/erts/emulator/beam/erl_gc.c +++ b/erts/emulator/beam/erl_gc.c @@ -116,6 +116,7 @@ typedef struct { static Uint setup_rootset(Process*, Eterm*, int, Rootset*); static void cleanup_rootset(Rootset *rootset); static Eterm *full_sweep_heaps(Process *p, + ErlHeapFragment *live_hf_end, int hibernate, Eterm *n_heap, Eterm* n_htop, char *oh, Uint oh_size, @@ -142,7 +143,7 @@ static Eterm* sweep_literal_area(Eterm* n_hp, Eterm* n_htop, static Eterm* sweep_literals_to_old_heap(Eterm* heap_ptr, Eterm* heap_end, Eterm* htop, char* src, Uint src_size); static Eterm* collect_live_heap_frags(Process* p, ErlHeapFragment *live_hf_end, - Eterm* heap, Eterm* htop, Eterm* objv, int nobj); + Eterm* htop); static int adjust_after_fullsweep(Process *p, int need, Eterm *objv, int nobj); static void shrink_new_heap(Process *p, Uint new_sz, Eterm *objv, int nobj); static void grow_new_heap(Process *p, Uint new_sz, Eterm* objv, int nobj); @@ -917,6 +918,7 @@ garbage_collect_hibernate(Process* p, int check_long_gc) htop = heap; htop = full_sweep_heaps(p, + ERTS_INVALID_HFRAG_PTR, 1, heap, htop, @@ -1161,7 +1163,7 @@ erts_garbage_collect_literals(Process* p, Eterm* literals, roots++; - while (g_sz--) { + for ( ; g_sz--; g_ptr++) { Eterm gval = *g_ptr; switch (primary_tag(gval)) { @@ -1170,26 +1172,21 @@ erts_garbage_collect_literals(Process* p, Eterm* literals, val = *ptr; if (IS_MOVED_BOXED(val)) { ASSERT(is_boxed(val)); - *g_ptr++ = val; + *g_ptr = val; } else if (ErtsInArea(ptr, area, area_size)) { - move_boxed(&ptr,val,&old_htop,g_ptr++); - } else { - g_ptr++; + move_boxed(ptr,val,&old_htop,g_ptr); } break; case TAG_PRIMARY_LIST: ptr = list_val(gval); val = *ptr; if (IS_MOVED_CONS(val)) { /* Moved */ - *g_ptr++ = ptr[1]; + *g_ptr = ptr[1]; } else if (ErtsInArea(ptr, area, area_size)) { - move_cons(&ptr,val,&old_htop,g_ptr++); - } else { - g_ptr++; - } + move_cons(ptr,val,&old_htop,g_ptr); + } break; default: - g_ptr++; break; } } @@ -1210,8 +1207,8 @@ erts_garbage_collect_literals(Process* p, Eterm* literals, p->old_htop = old_htop; /* - * Prepare to sweep binaries. Since all MSOs on the new heap - * must be come before MSOs on the old heap, find the end of + * Prepare to sweep off-heap objects. Since all MSOs on the new + * heap must be come before MSOs on the old heap, find the end of * current MSO list and use that as a starting point. */ @@ -1223,25 +1220,50 @@ erts_garbage_collect_literals(Process* p, Eterm* literals, } /* - * Sweep through all binaries in the temporary literal area. + * Sweep through all off-heap objects in the temporary literal area. */ while (oh) { if (IS_MOVED_BOXED(oh->thing_word)) { - Binary* bptr; struct erl_off_heap_header* ptr; - ptr = (struct erl_off_heap_header*) boxed_val(oh->thing_word); - ASSERT(thing_subtag(ptr->thing_word) == REFC_BINARY_SUBTAG); - bptr = ((ProcBin*)ptr)->val; - - /* - * This binary has been copied to the heap. + /* + * This off-heap object has been copied to the heap. * We must increment its reference count and * link it into the MSO list for the process. */ - erts_refc_inc(&bptr->intern.refc, 1); + ptr = (struct erl_off_heap_header*) boxed_val(oh->thing_word); + switch (thing_subtag(ptr->thing_word)) { + case REFC_BINARY_SUBTAG: + { + Binary* bptr = ((ProcBin*)ptr)->val; + erts_refc_inc(&bptr->intern.refc, 1); + break; + } + case FUN_SUBTAG: + { + ErlFunEntry* fe = ((ErlFunThing*)ptr)->fe; + erts_refc_inc(&fe->refc, 1); + break; + } + case REF_SUBTAG: + { + ErtsMagicBinary *bptr; + ASSERT(is_magic_ref_thing(ptr)); + bptr = ((ErtsMRefThing *) ptr)->mb; + erts_refc_inc(&bptr->intern.refc, 1); + break; + } + default: + { + ExternalThing *etp; + ASSERT(is_external_header(ptr->thing_word)); + etp = (ExternalThing *) ptr; + erts_refc_inc(&etp->node->refc, 1); + break; + } + } *prev = ptr; prev = &ptr->next; } @@ -1454,25 +1476,29 @@ do_minor(Process *p, ErlHeapFragment *live_hf_end, n_htop = n_heap = (Eterm*) ERTS_HEAP_ALLOC(ERTS_ALC_T_HEAP, sizeof(Eterm)*new_sz); + n = setup_rootset(p, objv, nobj, &rootset); + roots = rootset.roots; + + /* + * All allocations done. Start defile heap with move markers. + * A crash dump due to allocation failure above will see a healthy heap. + */ + if (live_hf_end != ERTS_INVALID_HFRAG_PTR) { /* * Move heap frags that we know are completely live * directly into the new young heap generation. */ - n_htop = collect_live_heap_frags(p, live_hf_end, n_heap, n_htop, - objv, nobj); + n_htop = collect_live_heap_frags(p, live_hf_end, n_htop); } - n = setup_rootset(p, objv, nobj, &rootset); - roots = rootset.roots; - GENSWEEP_NSTACK(p, old_htop, n_htop); while (n--) { Eterm* g_ptr = roots->v; Uint g_sz = roots->sz; roots++; - while (g_sz--) { + for ( ; g_sz--; g_ptr++) { gval = *g_ptr; switch (primary_tag(gval)) { @@ -1482,14 +1508,12 @@ do_minor(Process *p, ErlHeapFragment *live_hf_end, val = *ptr; if (IS_MOVED_BOXED(val)) { ASSERT(is_boxed(val)); - *g_ptr++ = val; + *g_ptr = val; } else if (ErtsInArea(ptr, mature, mature_size)) { - move_boxed(&ptr,val,&old_htop,g_ptr++); + move_boxed(ptr,val,&old_htop,g_ptr); } else if (ErtsInYoungGen(gval, ptr, oh, oh_size)) { - move_boxed(&ptr,val,&n_htop,g_ptr++); - } else { - g_ptr++; - } + move_boxed(ptr,val,&n_htop,g_ptr); + } break; } @@ -1497,19 +1521,15 @@ do_minor(Process *p, ErlHeapFragment *live_hf_end, ptr = list_val(gval); val = *ptr; if (IS_MOVED_CONS(val)) { /* Moved */ - *g_ptr++ = ptr[1]; + *g_ptr = ptr[1]; } else if (ErtsInArea(ptr, mature, mature_size)) { - move_cons(&ptr,val,&old_htop,g_ptr++); + move_cons(ptr,val,&old_htop,g_ptr); } else if (ErtsInYoungGen(gval, ptr, oh, oh_size)) { - move_cons(&ptr,val,&n_htop,g_ptr++); - } else { - g_ptr++; - } + move_cons(ptr,val,&n_htop,g_ptr); + } break; } - default: - g_ptr++; break; } } @@ -1543,9 +1563,9 @@ do_minor(Process *p, ErlHeapFragment *live_hf_end, ASSERT(is_boxed(val)); *n_hp++ = val; } else if (ErtsInArea(ptr, mature, mature_size)) { - move_boxed(&ptr,val,&old_htop,n_hp++); + move_boxed(ptr,val,&old_htop,n_hp++); } else if (ErtsInYoungGen(gval, ptr, oh, oh_size)) { - move_boxed(&ptr,val,&n_htop,n_hp++); + move_boxed(ptr,val,&n_htop,n_hp++); } else { n_hp++; } @@ -1557,9 +1577,9 @@ do_minor(Process *p, ErlHeapFragment *live_hf_end, if (IS_MOVED_CONS(val)) { *n_hp++ = ptr[1]; } else if (ErtsInArea(ptr, mature, mature_size)) { - move_cons(&ptr,val,&old_htop,n_hp++); + move_cons(ptr,val,&old_htop,n_hp++); } else if (ErtsInYoungGen(gval, ptr, oh, oh_size)) { - move_cons(&ptr,val,&n_htop,n_hp++); + move_cons(ptr,val,&n_htop,n_hp++); } else { n_hp++; } @@ -1579,10 +1599,10 @@ do_minor(Process *p, ErlHeapFragment *live_hf_end, *origptr = val; mb->base = binary_bytes(val); } else if (ErtsInArea(ptr, mature, mature_size)) { - move_boxed(&ptr,val,&old_htop,origptr); + move_boxed(ptr,val,&old_htop,origptr); mb->base = binary_bytes(mb->orig); } else if (ErtsInYoungGen(*origptr, ptr, oh, oh_size)) { - move_boxed(&ptr,val,&n_htop,origptr); + move_boxed(ptr,val,&n_htop,origptr); mb->base = binary_bytes(mb->orig); } } @@ -1708,16 +1728,8 @@ major_collection(Process* p, ErlHeapFragment *live_hf_end, n_htop = n_heap = (Eterm *) ERTS_HEAP_ALLOC(ERTS_ALC_T_HEAP, sizeof(Eterm)*new_sz); - if (live_hf_end != ERTS_INVALID_HFRAG_PTR) { - /* - * Move heap frags that we know are completely live - * directly into the heap. - */ - n_htop = collect_live_heap_frags(p, live_hf_end, n_heap, n_htop, - objv, nobj); - } - - n_htop = full_sweep_heaps(p, 0, n_heap, n_htop, oh, oh_size, objv, nobj); + n_htop = full_sweep_heaps(p, live_hf_end, 0, n_heap, n_htop, oh, oh_size, + objv, nobj); /* Move the stack to the end of the heap */ stk_sz = HEAP_END(p) - p->stop; @@ -1764,6 +1776,7 @@ major_collection(Process* p, ErlHeapFragment *live_hf_end, static Eterm * full_sweep_heaps(Process *p, + ErlHeapFragment *live_hf_end, int hibernate, Eterm *n_heap, Eterm* n_htop, char *oh, Uint oh_size, @@ -1780,6 +1793,19 @@ full_sweep_heaps(Process *p, n = setup_rootset(p, objv, nobj, &rootset); + /* + * All allocations done. Start defile heap with move markers. + * A crash dump due to allocation failure above will see a healthy heap. + */ + + if (live_hf_end != ERTS_INVALID_HFRAG_PTR) { + /* + * Move heap frags that we know are completely live + * directly into the heap. + */ + n_htop = collect_live_heap_frags(p, live_hf_end, n_htop); + } + #ifdef HIPE if (hibernate) hipe_empty_nstack(p); @@ -1793,7 +1819,7 @@ full_sweep_heaps(Process *p, Eterm g_sz = roots->sz; roots++; - while (g_sz--) { + for ( ; g_sz--; g_ptr++) { Eterm* ptr; Eterm val; Eterm gval = *g_ptr; @@ -1805,32 +1831,26 @@ full_sweep_heaps(Process *p, val = *ptr; if (IS_MOVED_BOXED(val)) { ASSERT(is_boxed(val)); - *g_ptr++ = val; + *g_ptr = val; } else if (!erts_is_literal(gval, ptr)) { - move_boxed(&ptr,val,&n_htop,g_ptr++); - } else { - g_ptr++; + move_boxed(ptr,val,&n_htop,g_ptr); } - continue; + break; } case TAG_PRIMARY_LIST: { ptr = list_val(gval); val = *ptr; if (IS_MOVED_CONS(val)) { - *g_ptr++ = ptr[1]; + *g_ptr = ptr[1]; } else if (!erts_is_literal(gval, ptr)) { - move_cons(&ptr,val,&n_htop,g_ptr++); - } else { - g_ptr++; + move_cons(ptr,val,&n_htop,g_ptr); } - continue; + break; } - default: { - g_ptr++; - continue; - } + default: + break; } } } @@ -2109,7 +2129,7 @@ sweep(Eterm *n_hp, Eterm *n_htop, ASSERT(is_boxed(val)); *n_hp++ = val; } else if (ERTS_IS_IN_SWEEP_AREA(gval, ptr)) { - move_boxed(&ptr,val,&n_htop,n_hp++); + move_boxed(ptr,val,&n_htop,n_hp++); } else { n_hp++; } @@ -2121,7 +2141,7 @@ sweep(Eterm *n_hp, Eterm *n_htop, if (IS_MOVED_CONS(val)) { *n_hp++ = ptr[1]; } else if (ERTS_IS_IN_SWEEP_AREA(gval, ptr)) { - move_cons(&ptr,val,&n_htop,n_hp++); + move_cons(ptr,val,&n_htop,n_hp++); } else { n_hp++; } @@ -2142,7 +2162,7 @@ sweep(Eterm *n_hp, Eterm *n_htop, *origptr = val; mb->base = binary_bytes(*origptr); } else if (ERTS_IS_IN_SWEEP_AREA(*origptr, ptr)) { - move_boxed(&ptr,val,&n_htop,origptr); + move_boxed(ptr,val,&n_htop,origptr); mb->base = binary_bytes(*origptr); } } @@ -2205,7 +2225,7 @@ sweep_literals_to_old_heap(Eterm* heap_ptr, Eterm* heap_end, Eterm* htop, ASSERT(is_boxed(val)); *heap_ptr++ = val; } else if (ErtsInArea(ptr, src, src_size)) { - move_boxed(&ptr,val,&htop,heap_ptr++); + move_boxed(ptr,val,&htop,heap_ptr++); } else { heap_ptr++; } @@ -2217,7 +2237,7 @@ sweep_literals_to_old_heap(Eterm* heap_ptr, Eterm* heap_end, Eterm* htop, if (IS_MOVED_CONS(val)) { *heap_ptr++ = ptr[1]; } else if (ErtsInArea(ptr, src, src_size)) { - move_cons(&ptr,val,&htop,heap_ptr++); + move_cons(ptr,val,&htop,heap_ptr++); } else { heap_ptr++; } @@ -2238,7 +2258,7 @@ sweep_literals_to_old_heap(Eterm* heap_ptr, Eterm* heap_end, Eterm* htop, *origptr = val; mb->base = binary_bytes(*origptr); } else if (ErtsInArea(ptr, src, src_size)) { - move_boxed(&ptr,val,&htop,origptr); + move_boxed(ptr,val,&htop,origptr); mb->base = binary_bytes(*origptr); } } @@ -2271,11 +2291,11 @@ move_one_area(Eterm* n_htop, char* src, Uint src_size) ASSERT(val != ERTS_HOLE_MARKER); if (is_header(val)) { ASSERT(ptr + header_arity(val) < end); - move_boxed(&ptr, val, &n_htop, &dummy_ref); + ptr = move_boxed(ptr, val, &n_htop, &dummy_ref); } else { /* must be a cons cell */ ASSERT(ptr+1 < end); - move_cons(&ptr, val, &n_htop, &dummy_ref); + move_cons(ptr, val, &n_htop, &dummy_ref); ptr += 2; } } @@ -2288,9 +2308,7 @@ move_one_area(Eterm* n_htop, char* src, Uint src_size) */ static Eterm* -collect_live_heap_frags(Process* p, ErlHeapFragment *live_hf_end, - Eterm* n_hstart, Eterm* n_htop, - Eterm* objv, int nobj) +collect_live_heap_frags(Process* p, ErlHeapFragment *live_hf_end, Eterm* n_htop) { ErlHeapFragment* qb; char* frag_begin; @@ -3256,8 +3274,7 @@ reply_gc_info(void *vgcirp) gcireq_free(vgcirp); } -void erts_sub_binary_to_heap_binary(Eterm **pp, Eterm **hpp, Eterm *orig) { - Eterm *ptr = *pp; +Eterm* erts_sub_binary_to_heap_binary(Eterm *ptr, Eterm **hpp, Eterm *orig) { Eterm *htop = *hpp; Eterm gval; ErlSubBin *sb = (ErlSubBin *)ptr; @@ -3285,7 +3302,7 @@ void erts_sub_binary_to_heap_binary(Eterm **pp, Eterm **hpp, Eterm *orig) { htop += heap_bin_size(sb->size); *hpp = htop; - *pp = ptr; + return ptr; } diff --git a/erts/emulator/beam/erl_gc.h b/erts/emulator/beam/erl_gc.h index 6a529b8443..dec0ab1143 100644 --- a/erts/emulator/beam/erl_gc.h +++ b/erts/emulator/beam/erl_gc.h @@ -33,14 +33,15 @@ #define IS_MOVED_BOXED(x) (!is_header((x))) #define IS_MOVED_CONS(x) (is_non_value((x))) -void erts_sub_binary_to_heap_binary(Eterm **pp, Eterm **hpp, Eterm *orig); +Eterm* erts_sub_binary_to_heap_binary(Eterm *ptr, Eterm **hpp, Eterm *orig); -ERTS_GLB_INLINE void move_cons(Eterm **pp, Eterm car, Eterm **hpp, Eterm *orig); +ERTS_GLB_INLINE void move_cons(Eterm *ERTS_RESTRICT ptr, Eterm car, Eterm **hpp, + Eterm *orig); #if ERTS_GLB_INLINE_INCL_FUNC_DEF -ERTS_GLB_INLINE void move_cons(Eterm **pp, Eterm car, Eterm **hpp, Eterm *orig) +ERTS_GLB_INLINE void move_cons(Eterm *ERTS_RESTRICT ptr, Eterm car, Eterm **hpp, + Eterm *orig) { - Eterm *ptr = *pp; - Eterm *htop = *hpp; + Eterm *ERTS_RESTRICT htop = *hpp; Eterm gval; htop[0] = car; /* copy car */ @@ -53,14 +54,15 @@ ERTS_GLB_INLINE void move_cons(Eterm **pp, Eterm car, Eterm **hpp, Eterm *orig) } #endif -ERTS_GLB_INLINE void move_boxed(Eterm **pp, Eterm hdr, Eterm **hpp, Eterm *orig); +ERTS_GLB_INLINE Eterm* move_boxed(Eterm *ERTS_RESTRICT ptr, Eterm hdr, Eterm **hpp, + Eterm *orig); #if ERTS_GLB_INLINE_INCL_FUNC_DEF -ERTS_GLB_INLINE void move_boxed(Eterm **pp, Eterm hdr, Eterm **hpp, Eterm *orig) +ERTS_GLB_INLINE Eterm* move_boxed(Eterm *ERTS_RESTRICT ptr, Eterm hdr, Eterm **hpp, + Eterm *orig) { Eterm gval; Sint nelts; - Eterm *ptr = *pp; - Eterm *htop = *hpp; + Eterm *ERTS_RESTRICT htop = *hpp; ASSERT(is_header(hdr)); nelts = header_arity(hdr); @@ -71,8 +73,7 @@ ERTS_GLB_INLINE void move_boxed(Eterm **pp, Eterm hdr, Eterm **hpp, Eterm *orig) /* convert sub-binary to heap-binary if applicable */ if (sb->bitsize == 0 && sb->bitoffs == 0 && sb->is_writable == 0 && sb->size <= sizeof(Eterm) * 3) { - erts_sub_binary_to_heap_binary(pp, hpp, orig); - return; + return erts_sub_binary_to_heap_binary(ptr, hpp, orig); } } nelts++; @@ -90,7 +91,7 @@ ERTS_GLB_INLINE void move_boxed(Eterm **pp, Eterm hdr, Eterm **hpp, Eterm *orig) while (nelts--) *htop++ = *ptr++; *hpp = htop; - *pp = ptr; + return ptr; } #endif diff --git a/erts/emulator/beam/erl_goodfit_alloc.c b/erts/emulator/beam/erl_goodfit_alloc.c index 50aa41b4d2..e3ba67f0af 100644 --- a/erts/emulator/beam/erl_goodfit_alloc.c +++ b/erts/emulator/beam/erl_goodfit_alloc.c @@ -170,6 +170,7 @@ static void unlink_free_block (Allctr_t *, Block_t *); static void update_last_aux_mbc (Allctr_t *, Carrier_t *); static Eterm info_options (Allctr_t *, char *, fmtfn_t *, void *, Uint **, Uint *); +static int gfalc_try_set_dyn_param(Allctr_t*, Eterm param, Uint value); static void init_atoms (void); #ifdef ERTS_ALLOC_UTIL_HARD_DEBUG @@ -250,6 +251,8 @@ erts_gfalc_start(GFAllctr_t *gfallctr, if (!erts_alcu_start(allctr, init)) return NULL; + allctr->try_set_dyn_param = gfalc_try_set_dyn_param; + if (allctr->min_block_size != MIN_BLK_SZ) return NULL; @@ -505,7 +508,7 @@ static struct { static void ERTS_INLINE atom_init(Eterm *atom, char *name) { - *atom = am_atom_put(name, strlen(name)); + *atom = am_atom_put(name, sys_strlen(name)); } #define AM_INIT(AM) atom_init(&am.AM, #AM) @@ -584,6 +587,15 @@ info_options(Allctr_t *allctr, return res; } +static int gfalc_try_set_dyn_param(Allctr_t* allctr, Eterm param, Uint value) +{ + if (param == am_sbct) { + /* Cannot change 'sbct' without rearranging buckets */ + return 0; + } + return erts_alcu_try_set_dyn_param(allctr, param, value); +} + /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *\ * NOTE: erts_gfalc_test() is only supposed to be used for testing. * * * diff --git a/erts/emulator/beam/erl_init.c b/erts/emulator/beam/erl_init.c index 6cef9bd0e3..8430a5559b 100644 --- a/erts/emulator/beam/erl_init.c +++ b/erts/emulator/beam/erl_init.c @@ -50,7 +50,7 @@ #define ERTS_WANT_TIMER_WHEEL_API #include "erl_time.h" #include "erl_check_io.h" - +#include "erl_osenv.h" #ifdef HIPE #include "hipe_mode_switch.h" /* for hipe_mode_switch_init() */ #include "hipe_signal.h" /* for hipe_signal_init() */ @@ -155,9 +155,6 @@ erts_atomic32_t erts_writing_erl_crash_dump; erts_tsd_key_t erts_is_crash_dumping_key; int erts_initialized = 0; - -int erts_use_sender_punish; - /* * Configurable parameters. */ @@ -241,7 +238,7 @@ progname(char *fullname) { int i; - i = strlen(fullname); + i = sys_strlen(fullname); while (i >= 0) { if ((fullname[i] != '/') && (fullname[i] != '\\')) i--; @@ -758,8 +755,6 @@ early_init(int *argc, char **argv) /* erts_initialized = 0; - erts_use_sender_punish = 1; - erts_pre_early_init_cpu_topology(&max_reader_groups, &ncpu, &ncpuonln, @@ -803,8 +798,9 @@ early_init(int *argc, char **argv) /* envbufsz = sizeof(envbuf); - /* erts_sys_getenv(_raw)() not initialized yet; need erts_sys_getenv__() */ - if (erts_sys_getenv__("ERL_THREAD_POOL_SIZE", envbuf, &envbufsz) == 0) + /* erts_osenv hasn't been initialized yet, so we need to fall back to + * erts_sys_explicit_host_getenv() */ + if (erts_sys_explicit_host_getenv("ERL_THREAD_POOL_SIZE", envbuf, &envbufsz) == 1) erts_async_max_threads = atoi(envbuf); else erts_async_max_threads = ERTS_DEFAULT_NO_ASYNC_THREADS; @@ -814,7 +810,7 @@ early_init(int *argc, char **argv) /* if (argc && argv) { int i = 1; while (i < *argc) { - if (strcmp(argv[i], "--") == 0) { /* end of emulator options */ + if (sys_strcmp(argv[i], "--") == 0) { /* end of emulator options */ i++; break; } @@ -896,7 +892,7 @@ early_init(int *argc, char **argv) /* else if (argv[i][2] == 'D') { char *arg; char *type = argv[i]+3; - if (strncmp(type, "Pcpu", 4) == 0) { + if (sys_strncmp(type, "Pcpu", 4) == 0) { int ptot, ponln; arg = get_arg(argv[i]+7, argv[i+1], &i); switch (sscanf(arg, "%d:%d", &ptot, &ponln)) { @@ -933,7 +929,7 @@ early_init(int *argc, char **argv) /* VERBOSE(DEBUG_SYSTEM, ("using %d:%d dirty CPU scheduler percentages\n", dirty_cpu_scheds_pctg, dirty_cpu_scheds_onln_pctg)); - } else if (strncmp(type, "cpu", 3) == 0) { + } else if (sys_strncmp(type, "cpu", 3) == 0) { int tot, onln; arg = get_arg(argv[i]+6, argv[i+1], &i); switch (sscanf(arg, "%d:%d", &tot, &onln)) { @@ -984,7 +980,7 @@ early_init(int *argc, char **argv) /* } VERBOSE(DEBUG_SYSTEM, ("using %d:%d dirty CPU scheduler(s)\n", tot, onln)); - } else if (strncmp(type, "io", 2) == 0) { + } else if (sys_strncmp(type, "io", 2) == 0) { arg = get_arg(argv[i]+5, argv[i+1], &i); dirty_io_scheds = atoi(arg); if (dirty_io_scheds < 0 || @@ -1210,20 +1206,20 @@ erl_start(int argc, char **argv) &time_warp_mode); envbufsz = sizeof(envbuf); - if (erts_sys_getenv_raw(ERL_MAX_ETS_TABLES_ENV, envbuf, &envbufsz) == 0) + if (erts_sys_explicit_8bit_getenv(ERL_MAX_ETS_TABLES_ENV, envbuf, &envbufsz) == 1) user_requested_db_max_tabs = atoi(envbuf); else user_requested_db_max_tabs = 0; envbufsz = sizeof(envbuf); - if (erts_sys_getenv_raw("ERL_FULLSWEEP_AFTER", envbuf, &envbufsz) == 0) { + if (erts_sys_explicit_8bit_getenv("ERL_FULLSWEEP_AFTER", envbuf, &envbufsz) == 1) { Uint16 max_gen_gcs = atoi(envbuf); erts_atomic32_set_nob(&erts_max_gen_gcs, (erts_aint32_t) max_gen_gcs); } envbufsz = sizeof(envbuf); - if (erts_sys_getenv_raw("ERL_MAX_PORTS", envbuf, &envbufsz) == 0) { + if (erts_sys_explicit_8bit_getenv("ERL_MAX_PORTS", envbuf, &envbufsz) == 1) { port_tab_sz = atoi(envbuf); port_tab_sz_ignore_files = 1; } @@ -1246,7 +1242,7 @@ erl_start(int argc, char **argv) if (argv[i][0] != '-') { erts_usage(); } - if (strcmp(argv[i], "--") == 0) { /* end of emulator options */ + if (sys_strcmp(argv[i], "--") == 0) { /* end of emulator options */ i++; break; } @@ -1274,12 +1270,12 @@ erl_start(int argc, char **argv) ("using display items %d\n",display_items)); break; case 'p': - if (!strncmp(argv[i],"-pc",3)) { + if (!sys_strncmp(argv[i],"-pc",3)) { int printable_chars = ERL_PRINTABLE_CHARACTERS_LATIN1; arg = get_arg(argv[i]+3, argv[i+1], &i); - if (!strcmp(arg,"unicode")) { + if (!sys_strcmp(arg,"unicode")) { printable_chars = ERL_PRINTABLE_CHARACTERS_UNICODE; - } else if (strcmp(arg,"latin1")) { + } else if (sys_strcmp(arg,"latin1")) { erts_fprintf(stderr, "bad range of printable " "characters: %s\n", arg); erts_usage(); @@ -1291,7 +1287,7 @@ erl_start(int argc, char **argv) erts_usage(); } case 'f': - if (!strncmp(argv[i],"-fn",3)) { + if (!sys_strncmp(argv[i],"-fn",3)) { int warning_type = ERL_FILENAME_WARNING_WARNING; arg = get_arg(argv[i]+3, argv[i+1], &i); switch (*arg) { @@ -1474,9 +1470,9 @@ erl_start(int argc, char **argv) } } else if (has_prefix("maxk", sub_param)) { arg = get_arg(sub_param+4, argv[i+1], &i); - if (strcmp(arg,"true") == 0) { + if (sys_strcmp(arg,"true") == 0) { H_MAX_FLAGS |= MAX_HEAP_SIZE_KILL; - } else if (strcmp(arg,"false") == 0) { + } else if (sys_strcmp(arg,"false") == 0) { H_MAX_FLAGS &= ~MAX_HEAP_SIZE_KILL; } else { erts_fprintf(stderr, "bad max heap kill %s\n", arg); @@ -1485,9 +1481,9 @@ erl_start(int argc, char **argv) VERBOSE(DEBUG_SYSTEM, ("using max heap kill %d\n", H_MAX_FLAGS)); } else if (has_prefix("maxel", sub_param)) { arg = get_arg(sub_param+5, argv[i+1], &i); - if (strcmp(arg,"true") == 0) { + if (sys_strcmp(arg,"true") == 0) { H_MAX_FLAGS |= MAX_HEAP_SIZE_LOG; - } else if (strcmp(arg,"false") == 0) { + } else if (sys_strcmp(arg,"false") == 0) { H_MAX_FLAGS &= ~MAX_HEAP_SIZE_LOG; } else { erts_fprintf(stderr, "bad max heap error logger %s\n", arg); @@ -1605,7 +1601,7 @@ erl_start(int argc, char **argv) case 'P': /* set maximum number of processes */ arg = get_arg(argv[i]+2, argv[i+1], &i); - if (strcmp(arg, "legacy") == 0) + if (sys_strcmp(arg, "legacy") == 0) legacy_proc_tab = 1; else { errno = 0; @@ -1621,7 +1617,7 @@ erl_start(int argc, char **argv) case 'Q': /* set maximum number of ports */ arg = get_arg(argv[i]+2, argv[i+1], &i); - if (strcmp(arg, "legacy") == 0) + if (sys_strcmp(arg, "legacy") == 0) legacy_port_tab = 1; else { errno = 0; @@ -1639,11 +1635,11 @@ erl_start(int argc, char **argv) case 'S' : /* Was handled in early_init() just read past it */ if (argv[i][2] == 'D') { char* type = argv[i]+3; - if (strcmp(type, "Pcpu") == 0) + if (sys_strcmp(type, "Pcpu") == 0) (void) get_arg(argv[i]+7, argv[i+1], &i); - if (strcmp(type, "cpu") == 0) + if (sys_strcmp(type, "cpu") == 0) (void) get_arg(argv[i]+6, argv[i+1], &i); - else if (strcmp(type, "io") == 0) + else if (sys_strcmp(type, "io") == 0) (void) get_arg(argv[i]+5, argv[i+1], &i); } else if (argv[i][2] == 'P') (void) get_arg(argv[i]+3, argv[i+1], &i); @@ -1750,6 +1746,7 @@ erl_start(int argc, char **argv) } else if (has_prefix("ecio", sub_param)) { /* ignore argument, eager check io no longer used */ + arg = get_arg(sub_param+4, argv[i+1], &i); } else if (has_prefix("pp", sub_param)) { arg = get_arg(sub_param+2, argv[i+1], &i); @@ -1764,8 +1761,6 @@ erl_start(int argc, char **argv) erts_usage(); } } - else if (sys_strcmp("nsp", sub_param) == 0) - erts_use_sender_punish = 0; else if (has_prefix("tbt", sub_param)) { arg = get_arg(sub_param+3, argv[i+1], &i); res = erts_init_scheduler_bind_type_string(arg); diff --git a/erts/emulator/beam/erl_instrument.c b/erts/emulator/beam/erl_instrument.c index 634509f880..2f70e7996e 100644 --- a/erts/emulator/beam/erl_instrument.c +++ b/erts/emulator/beam/erl_instrument.c @@ -103,7 +103,7 @@ static struct { static void ERTS_INLINE atom_init(Eterm *atom, const char *name) { - *atom = am_atom_put((char *) name, strlen(name)); + *atom = am_atom_put((char *) name, sys_strlen(name)); } #define AM_INIT(AM) atom_init(&am.AM, #AM) diff --git a/erts/emulator/beam/erl_io_queue.c b/erts/emulator/beam/erl_io_queue.c index 40d69ea6b0..d779d1031a 100644 --- a/erts/emulator/beam/erl_io_queue.c +++ b/erts/emulator/beam/erl_io_queue.c @@ -801,12 +801,11 @@ static Eterm iol2v_make_sub_bin(iol2v_state_t *state, Eterm bin_term, ERTS_GET_REAL_BIN(bin_term, orig_pb_term, byte_offset, bit_offset, bit_size); - (void)bit_offset; - (void)bit_size; + ASSERT(bit_size == 0); sb->thing_word = HEADER_SUB_BIN; + sb->bitoffs = bit_offset; sb->bitsize = 0; - sb->bitoffs = 0; sb->orig = orig_pb_term; sb->is_writable = 0; @@ -817,24 +816,15 @@ static Eterm iol2v_make_sub_bin(iol2v_state_t *state, Eterm bin_term, } static Eterm iol2v_promote_acc(iol2v_state_t *state) { - ProcBin *pb; - - state->acc = erts_bin_realloc(state->acc, state->acc_size); - - pb = (ProcBin*)HAlloc(state->process, PROC_BIN_SIZE); - pb->thing_word = HEADER_PROC_BIN; - pb->size = state->acc_size; - pb->val = state->acc; - pb->bytes = (byte*)(state->acc)->orig_bytes; - pb->flags = 0; - pb->next = MSO(state->process).first; - OH_OVERHEAD(&(MSO(state->process)), pb->size / sizeof(Eterm)); - MSO(state->process).first = (struct erl_off_heap_header*)pb; + Eterm bin; + bin = erts_build_proc_bin(&MSO(state->process), + HAlloc(state->process, PROC_BIN_SIZE), + erts_bin_realloc(state->acc, state->acc_size)); state->acc_size = 0; state->acc = NULL; - return make_binary(pb); + return bin; } /* Destructively enqueues a term to the result list, saving us the hassle of @@ -984,7 +974,7 @@ static int iol2v_append_binary(iol2v_state_t *state, Eterm bin_term) { parent_header = binary_val(parent_binary); binary_size = binary_size(bin_term); - if (bit_offset != 0 || bit_size != 0) { + if (bit_size != 0) { return 0; } else if (binary_size == 0) { state->bytereds_spent += 1; @@ -1026,8 +1016,16 @@ static int iol2v_append_binary(iol2v_state_t *state, Eterm bin_term) { * then just copy it into the accumulator. */ iol2v_expand_acc(state, binary_size); - sys_memcpy(&(state->acc)->orig_bytes[state->acc_size], - binary_data, binary_size); + if (ERTS_LIKELY(bit_offset == 0)) { + sys_memcpy(&(state->acc)->orig_bytes[state->acc_size], + binary_data, binary_size); + } else { + ASSERT(binary_size <= ERTS_UWORD_MAX / 8); + + erts_copy_bits(binary_data, bit_offset, 1, + (byte*)&(state->acc)->orig_bytes[state->acc_size], 0, 1, + binary_size * 8); + } state->acc_size += binary_size; } else { @@ -1038,8 +1036,16 @@ static int iol2v_append_binary(iol2v_state_t *state, Eterm bin_term) { iol2v_expand_acc(state, spill); - sys_memcpy(&(state->acc)->orig_bytes[state->acc_size], - binary_data, spill); + if (ERTS_LIKELY(bit_offset == 0)) { + sys_memcpy(&(state->acc)->orig_bytes[state->acc_size], + binary_data, spill); + } else { + ASSERT(binary_size <= ERTS_UWORD_MAX / 8); + + erts_copy_bits(binary_data, bit_offset, 1, + (byte*)&(state->acc)->orig_bytes[state->acc_size], 0, 1, + spill * 8); + } state->acc_size += spill; @@ -1188,7 +1194,10 @@ BIF_RETTYPE iolist_to_iovec_1(BIF_ALIST_1) { if (is_nil(BIF_ARG_1)) { BIF_RET(NIL); } else if (is_binary(BIF_ARG_1)) { - if (binary_size(BIF_ARG_1) != 0) { + if (binary_bitsize(BIF_ARG_1) != 0) { + ASSERT(!(BIF_P->flags & F_DISABLE_GC)); + BIF_ERROR(BIF_P, BADARG); + } else if (binary_size(BIF_ARG_1) != 0) { Eterm *hp = HAlloc(BIF_P, 2); BIF_RET(CONS(hp, BIF_ARG_1, NIL)); diff --git a/erts/emulator/beam/erl_lock_check.c b/erts/emulator/beam/erl_lock_check.c index 4cdef0200f..773b138d92 100644 --- a/erts/emulator/beam/erl_lock_check.c +++ b/erts/emulator/beam/erl_lock_check.c @@ -86,6 +86,10 @@ static erts_lc_lock_order_t erts_lock_order[] = { { "hipe_mfait_lock", NULL }, #endif { "nodes_monitors", NULL }, + { "meta_name_tab", "address" }, + { "db_tab", "address" }, + { "db_tab_fix", "address" }, + { "db_hash_slot", "address" }, { "resource_monitors", "address" }, { "driver_list", NULL }, { "proc_link", "pid" }, @@ -95,12 +99,8 @@ static erts_lc_lock_order_t erts_lock_order[] = { { "dist_entry_links", "address" }, { "code_write_permission", NULL }, { "purge_state", NULL }, - { "meta_name_tab", "address" }, - { "db_tab", "address" }, { "proc_status", "pid" }, { "proc_trace", "pid" }, - { "db_tab_fix", "address" }, - { "db_hash_slot", "address" }, { "node_table", NULL }, { "dist_table", NULL }, { "sys_tracers", NULL }, @@ -109,7 +109,6 @@ static erts_lc_lock_order_t erts_lock_order[] = { { "fun_tab", NULL }, { "environ", NULL }, { "release_literal_areas", NULL }, - { "efile_drv", "address" }, { "drv_ev_state_grow", NULL, }, { "drv_ev_state", "address" }, { "safe_hash", "address" }, @@ -170,9 +169,6 @@ static erts_lc_lock_order_t erts_lock_order[] = { { "save_ops_lock", NULL }, #endif #endif -#ifdef USE_VM_PROBES - { "efile_drv dtrace mutex", NULL }, -#endif { "mtrace_buf", NULL }, { "os_monotonic_time", NULL }, { "erts_alloc_hard_debug", NULL }, @@ -263,7 +259,7 @@ static ERTS_INLINE void lc_free(void *p) { erts_lc_free_block_t *fb = (erts_lc_free_block_t *) p; #ifdef DEBUG - memset((void *) p, 0xdf, sizeof(erts_lc_free_block_t)); + sys_memset((void *) p, 0xdf, sizeof(erts_lc_free_block_t)); #endif lc_lock(); fb->next = free_blocks; @@ -293,12 +289,12 @@ static void *lc_core_alloc(void) } for (i = 1; i < ERTS_LC_FB_CHUNK_SIZE - 1; i++) { #ifdef DEBUG - memset((void *) &fbs[i], 0xdf, sizeof(erts_lc_free_block_t)); + sys_memset((void *) &fbs[i], 0xdf, sizeof(erts_lc_free_block_t)); #endif fbs[i].next = &fbs[i+1]; } #ifdef DEBUG - memset((void *) &fbs[ERTS_LC_FB_CHUNK_SIZE-1], + sys_memset((void *) &fbs[ERTS_LC_FB_CHUNK_SIZE-1], 0xdf, sizeof(erts_lc_free_block_t)); #endif lc_lock(); @@ -699,7 +695,7 @@ erts_lc_get_lock_order_id(char *name) erts_fprintf(stderr, "Missing lock name\n"); else { for (i = 0; i < ERTS_LOCK_ORDER_SIZE; i++) - if (strcmp(erts_lock_order[i].name, name) == 0) + if (sys_strcmp(erts_lock_order[i].name, name) == 0) return i; erts_fprintf(stderr, "Lock name '%s' missing in lock order " @@ -1326,12 +1322,12 @@ erts_lc_init(void) static erts_lc_free_block_t fbs[ERTS_LC_FB_CHUNK_SIZE]; for (i = 0; i < ERTS_LC_FB_CHUNK_SIZE - 1; i++) { #ifdef DEBUG - memset((void *) &fbs[i], 0xdf, sizeof(erts_lc_free_block_t)); + sys_memset((void *) &fbs[i], 0xdf, sizeof(erts_lc_free_block_t)); #endif fbs[i].next = &fbs[i+1]; } #ifdef DEBUG - memset((void *) &fbs[ERTS_LC_FB_CHUNK_SIZE-1], + sys_memset((void *) &fbs[ERTS_LC_FB_CHUNK_SIZE-1], 0xdf, sizeof(erts_lc_free_block_t)); #endif fbs[ERTS_LC_FB_CHUNK_SIZE-1].next = NULL; diff --git a/erts/emulator/beam/erl_map.c b/erts/emulator/beam/erl_map.c index f0c54e05f7..8047a9567f 100644 --- a/erts/emulator/beam/erl_map.c +++ b/erts/emulator/beam/erl_map.c @@ -91,7 +91,6 @@ static BIF_RETTYPE hashmap_merge(Process *p, Eterm nodeA, Eterm nodeB, int swap_ static Export hashmap_merge_trap_export; static BIF_RETTYPE maps_merge_trap_1(BIF_ALIST_1); static Uint hashmap_subtree_size(Eterm node); -static Eterm hashmap_to_list(Process *p, Eterm map, Sint n); static Eterm hashmap_keys(Process *p, Eterm map); static Eterm hashmap_values(Process *p, Eterm map); static Eterm hashmap_delete(Process *p, Uint32 hx, Eterm key, Eterm node, Eterm *value); @@ -139,80 +138,6 @@ BIF_RETTYPE map_size_1(BIF_ALIST_1) { BIF_ERROR(BIF_P, BADMAP); } -/* maps:to_list/1 */ - -BIF_RETTYPE maps_to_list_1(BIF_ALIST_1) { - if (is_flatmap(BIF_ARG_1)) { - Uint n; - Eterm* hp; - Eterm *ks,*vs, res, tup; - flatmap_t *mp = (flatmap_t*)flatmap_val(BIF_ARG_1); - - ks = flatmap_get_keys(mp); - vs = flatmap_get_values(mp); - n = flatmap_get_size(mp); - hp = HAlloc(BIF_P, (2 + 3) * n); - res = NIL; - - while(n--) { - tup = TUPLE2(hp, ks[n], vs[n]); hp += 3; - res = CONS(hp, tup, res); hp += 2; - } - - BIF_RET(res); - } else if (is_hashmap(BIF_ARG_1)) { - return hashmap_to_list(BIF_P, BIF_ARG_1, -1); - } - - BIF_P->fvalue = BIF_ARG_1; - BIF_ERROR(BIF_P, BADMAP); -} - -/* erts_internal:maps_to_list/2 - * - * This function should be removed once iterators are in place. - * Never document it. - * Never encourage its usage. - * - * A negative value in ARG 2 means the entire map. - */ - -BIF_RETTYPE erts_internal_maps_to_list_2(BIF_ALIST_2) { - Sint m; - if (term_to_Sint(BIF_ARG_2, &m)) { - if (is_flatmap(BIF_ARG_1)) { - Uint n; - Eterm* hp; - Eterm *ks,*vs, res, tup; - flatmap_t *mp = (flatmap_t*)flatmap_val(BIF_ARG_1); - - ks = flatmap_get_keys(mp); - vs = flatmap_get_values(mp); - n = flatmap_get_size(mp); - - if (m >= 0) { - n = m < n ? m : n; - } - - hp = HAlloc(BIF_P, (2 + 3) * n); - res = NIL; - - while(n--) { - tup = TUPLE2(hp, ks[n], vs[n]); hp += 3; - res = CONS(hp, tup, res); hp += 2; - } - - BIF_RET(res); - } else if (is_hashmap(BIF_ARG_1)) { - return hashmap_to_list(BIF_P, BIF_ARG_1, m); - } - BIF_P->fvalue = BIF_ARG_1; - BIF_ERROR(BIF_P, BADMAP); - } - BIF_ERROR(BIF_P, BADARG); -} - - /* maps:find/2 * return value if key *matches* a key in the map */ @@ -1962,45 +1887,31 @@ BIF_RETTYPE maps_values_1(BIF_ALIST_1) { BIF_ERROR(BIF_P, BADMAP); } -static Eterm hashmap_to_list(Process *p, Eterm node, Sint m) { - DECLARE_WSTACK(stack); - Eterm *hp, *kv; - Eterm tup, res = NIL; - Uint n = hashmap_size(node); - - if (m >= 0) { - n = m < n ? m : n; - } - - hp = HAlloc(p, n * (2 + 3)); - hashmap_iterator_init(&stack, node, 0); - while (n--) { - kv = hashmap_iterator_next(&stack); - ASSERT(kv != NULL); - tup = TUPLE2(hp, CAR(kv), CDR(kv)); - hp += 3; - res = CONS(hp, tup, res); - hp += 2; - } - DESTROY_WSTACK(stack); - return res; -} - -void hashmap_iterator_init(ErtsWStack* s, Eterm node, int reverse) { - Eterm hdr = *hashmap_val(node); +static ERTS_INLINE +Uint hashmap_node_size(Eterm hdr, Eterm **nodep) +{ Uint sz; switch(hdr & _HEADER_MAP_SUBTAG_MASK) { case HAMT_SUBTAG_HEAD_ARRAY: sz = 16; + if (nodep) ++*nodep; break; case HAMT_SUBTAG_HEAD_BITMAP: + if (nodep) ++*nodep; case HAMT_SUBTAG_NODE_BITMAP: sz = hashmap_bitcount(MAP_HEADER_VAL(hdr)); + ASSERT(sz < 17); break; default: erts_exit(ERTS_ABORT_EXIT, "bad header"); } + return sz; +} + +void hashmap_iterator_init(ErtsWStack* s, Eterm node, int reverse) { + Eterm hdr = *hashmap_val(node); + Uint sz = hashmap_node_size(hdr, NULL); WSTACK_PUSH3((*s), (UWord)THE_NON_VALUE, /* end marker */ (UWord)(!reverse ? 0 : sz+1), @@ -2024,20 +1935,7 @@ Eterm* hashmap_iterator_next(ErtsWStack* s) { ptr = boxed_val(node); hdr = *ptr; ASSERT(is_header(hdr)); - switch(hdr & _HEADER_MAP_SUBTAG_MASK) { - case HAMT_SUBTAG_HEAD_ARRAY: - ptr++; - sz = 16; - break; - case HAMT_SUBTAG_HEAD_BITMAP: - ptr++; - case HAMT_SUBTAG_NODE_BITMAP: - sz = hashmap_bitcount(MAP_HEADER_VAL(hdr)); - ASSERT(sz < 17); - break; - default: - erts_exit(ERTS_ABORT_EXIT, "bad header"); - } + sz = hashmap_node_size(hdr, &ptr); idx++; @@ -2074,20 +1972,7 @@ Eterm* hashmap_iterator_prev(ErtsWStack* s) { ptr = boxed_val(node); hdr = *ptr; ASSERT(is_header(hdr)); - switch(hdr & _HEADER_MAP_SUBTAG_MASK) { - case HAMT_SUBTAG_HEAD_ARRAY: - ptr++; - sz = 16; - break; - case HAMT_SUBTAG_HEAD_BITMAP: - ptr++; - case HAMT_SUBTAG_NODE_BITMAP: - sz = hashmap_bitcount(MAP_HEADER_VAL(hdr)); - ASSERT(sz < 17); - break; - default: - erts_exit(ERTS_ERROR_EXIT, "bad header"); - } + sz = hashmap_node_size(hdr, &ptr); if (idx > sz) idx = sz; @@ -3061,6 +2946,363 @@ static Eterm hashmap_bld_tuple_uint(Uint **hpp, Uint *szp, Uint n, Uint nums[]) } +/** + * In hashmap the Path is a bit pattern that describes + * which slot we should traverse in each hashmap node. + * Since each hashmap node can only be up to 16 elements + * large we use 4 bits per level in the path. + * + * So a Path with value 0x110 will first get the 0:th + * slot in the head node, and then the 1:st slot in the + * resulting node and then finally the 1:st slot in the + * node beneath. If that slot is not a leaf, then the path + * continues down the 0:th slot until it finds a leaf. + * + * Once the leaf has been found, the return value is created + * by traversing the tree using the the stack that was built + * when searching for the first leaf to return. + * + * The index can become a bignum, which complicates the code + * a bit. However it should be very rare that this happens + * even on a 32bit system as you would need a tree of depth + * 7 or more. + * + * If the number of elements remaining in the map is greater + * than how many we want to return, we build a new Path, using + * the stack, that points to the next leaf. + * + * The third argument to this function controls how the data + * is returned. + * + * iterator: The key-value associations are to be used by + * maps:iterator. The return has this format: + * {K1,V1,{K2,V2,none | [Path | Map]}} + * this makes the maps:next function very simple + * and performant. + * + * list(): The key-value associations are to be used by + * maps:to_list. The return has this format: + * [Path, Map | [{K1,V1},{K2,V2} | BIF_ARG_3]] + * or if no more associations remain + * [{K1,V1},{K2,V2} | BIF_ARG_3] + */ + +#define PATH_ELEM_SIZE 4 +#define PATH_ELEM_MASK 0xf +#define PATH_ELEM(PATH) ((PATH) & PATH_ELEM_MASK) +#define PATH_ELEMS_PER_DIGIT (sizeof(ErtsDigit) * 8 / PATH_ELEM_SIZE) + +BIF_RETTYPE erts_internal_map_next_3(BIF_ALIST_3) { + + Eterm path, map; + enum { iterator, list } type; + + path = BIF_ARG_1; + map = BIF_ARG_2; + + if (!is_map(map)) + BIF_ERROR(BIF_P, BADARG); + + if (BIF_ARG_3 == am_iterator) { + type = iterator; + } else if (is_nil(BIF_ARG_3) || is_list(BIF_ARG_3)) { + type = list; + } else { + BIF_ERROR(BIF_P, BADARG); + } + + if (is_flatmap(map)) { + Uint n; + Eterm *ks,*vs, res, *hp; + flatmap_t *mp = (flatmap_t*)flatmap_val(map); + + ks = flatmap_get_keys(mp); + vs = flatmap_get_values(mp); + n = flatmap_get_size(mp); + + if (!is_small(BIF_ARG_1) || n < unsigned_val(BIF_ARG_1)) + BIF_ERROR(BIF_P, BADARG); + + if (type == iterator) { + hp = HAlloc(BIF_P, 4 * n); + res = am_none; + + while(n--) { + res = TUPLE3(hp, ks[n], vs[n], res); hp += 4; + } + } else { + hp = HAlloc(BIF_P, (2 + 3) * n); + res = BIF_ARG_3; + + while(n--) { + Eterm tup = TUPLE2(hp, ks[n], vs[n]); hp += 3; + res = CONS(hp, tup, res); hp += 2; + } + } + + BIF_RET(res); + } else { + Uint curr_path; + Uint path_length = 0; + Uint *path_rest = NULL; + int i, elems, orig_elems; + Eterm node = map, res, *path_ptr = NULL, *hp; + + /* A stack WSTACK is used when traversing the hashmap. + * It contains: node, idx, sz, ptr + * + * `node` is not really needed, but it is very nice to + * have when debugging. + * + * `idx` always points to the next un-explored entry in + * a node. If there are no more un-explored entries, + * `idx` is equal to `sz`. + * + * `sz` is the number of elements in the node. + * + * `ptr` is a pointer to where the elements of the node begins. + */ + DECLARE_WSTACK(stack); + + ASSERT(is_hashmap(node)); + +/* How many elements we return in one call depends on the number of reductions + * that the process has left to run. In debug we return fewer elements to test + * the Path implementation better. + * + * Also, when the path is 0 (i.e. for the first call) we limit the number of + * elements to MAP_SMALL_MAP_LIMIT in order to not use a huge amount of heap + * when only the first X associations in the hashmap was needed. + */ +#if defined(DEBUG) +#define FCALLS_ELEMS(BIF_P) ((BIF_P->fcalls / 4) & 0xF) +#else +#define FCALLS_ELEMS(BIF_P) (BIF_P->fcalls / 4) +#endif + + if (MAX(FCALLS_ELEMS(BIF_P), 1) < hashmap_size(map)) + elems = MAX(FCALLS_ELEMS(BIF_P), 1); + else + elems = hashmap_size(map); + +#undef FCALLS_ELEMS + + if (is_small(path)) { + curr_path = unsigned_val(path); + + if (curr_path == 0 && elems > MAP_SMALL_MAP_LIMIT) { + elems = MAP_SMALL_MAP_LIMIT; + } + } else if (is_big(path)) { + Eterm *big = big_val(path); + if (bignum_header_is_neg(*big)) + BIF_ERROR(BIF_P, BADARG); + path_length = BIG_ARITY(big) - 1; + curr_path = BIG_DIGIT(big, 0); + path_rest = BIG_V(big) + 1; + } else { + BIF_ERROR(BIF_P, BADARG); + } + + if (type == iterator) { + /* iterator uses the format {K, V, {K, V, {K, V, [Path | Map]}}}, + * so each element is 4 words large */ + hp = HAlloc(BIF_P, 4 * elems); + res = am_none; + } else { + /* list used the format [Path, Map, {K,V}, {K,V} | BIF_ARG_3], + * so each element is 2+3 words large */ + hp = HAlloc(BIF_P, (2 + 3) * elems); + res = BIF_ARG_3; + } + + orig_elems = elems; + + /* First we look for the leaf to start at using the + path given. While doing so, we push each map node + and the index onto the stack to use later. */ + for (i = 1; ; i++) { + Eterm *ptr = hashmap_val(node), + hdr = *ptr++; + Uint sz; + + sz = hashmap_node_size(hdr, &ptr); + + if (PATH_ELEM(curr_path) >= sz) + goto badarg; + + WSTACK_PUSH4(stack, node, PATH_ELEM(curr_path)+1, sz, (UWord)ptr); + + /* We have found a leaf, return it and the next X elements */ + if (is_list(ptr[PATH_ELEM(curr_path)])) { + Eterm *lst = list_val(ptr[PATH_ELEM(curr_path)]); + if (type == iterator) { + res = TUPLE3(hp, CAR(lst), CDR(lst), res); hp += 4; + /* Note where we should patch the Iterator is needed */ + path_ptr = hp-1; + } else { + Eterm tup = TUPLE2(hp, CAR(lst), CDR(lst)); hp += 3; + res = CONS(hp, tup, res); hp += 2; + } + elems--; + break; + } + + node = ptr[PATH_ELEM(curr_path)]; + + curr_path >>= PATH_ELEM_SIZE; + + if (i == PATH_ELEMS_PER_DIGIT) { + /* Switch to next bignum word if available, + otherwise just follow 0 path */ + i = 0; + if (path_length) { + curr_path = *path_rest; + path_length--; + path_rest++; + } else { + curr_path = 0; + } + } + } + + /* We traverse the hashmap and return at most `elems` elements */ + while(1) { + Eterm *ptr = (Eterm*)WSTACK_POP(stack); + Uint sz = (Uint)WSTACK_POP(stack); + Uint idx = (Uint)WSTACK_POP(stack); + Eterm node = (Eterm)WSTACK_POP(stack); + + while (idx < sz && elems != 0 && is_list(ptr[idx])) { + Eterm *lst = list_val(ptr[idx]); + if (type == iterator) { + res = TUPLE3(hp, CAR(lst), CDR(lst), res); hp += 4; + } else { + Eterm tup = TUPLE2(hp, CAR(lst), CDR(lst)); hp += 3; + res = CONS(hp, tup, res); hp += 2; + } + elems--; + idx++; + } + + if (elems == 0) { + if (idx < sz) { + /* There are more elements in this node to explore */ + WSTACK_PUSH4(stack, node, idx+1, sz, (UWord)ptr); + } else { + /* pop stack to find the next value */ + while (!WSTACK_ISEMPTY(stack)) { + Eterm *ptr = (Eterm*)WSTACK_POP(stack); + Uint sz = (Uint)WSTACK_POP(stack); + Uint idx = (Uint)WSTACK_POP(stack); + Eterm node = (Eterm)WSTACK_POP(stack); + if (idx < sz) { + WSTACK_PUSH4(stack, node, idx+1, sz, (UWord)ptr); + break; + } + } + } + break; + } else { + if (idx < sz) { + Eterm hdr; + /* Push next idx in current node */ + WSTACK_PUSH4(stack, node, idx+1, sz, (UWord)ptr); + + /* Push first idx in child node */ + node = ptr[idx]; + ptr = hashmap_val(ptr[idx]); + hdr = *ptr++; + sz = hashmap_node_size(hdr, &ptr); + WSTACK_PUSH4(stack, node, 0, sz, (UWord)ptr); + } + } + + /* There are no more element in the hashmap */ + if (WSTACK_ISEMPTY(stack)) { + break; + } + + } + + if (!WSTACK_ISEMPTY(stack)) { + Uint depth = WSTACK_COUNT(stack) / 4 + 1; + /* +1 because we already have the first element in curr_path */ + Eterm *path_digits = NULL; + Uint curr_path = 0; + + /* If the path cannot fit in a small, we allocate a bignum */ + if (depth >= PATH_ELEMS_PER_DIGIT) { + /* We need multiple ErtsDigit's to represent the path */ + int big_size = BIG_NEED_FOR_BITS(depth * PATH_ELEM_SIZE); + hp = HAlloc(BIF_P, big_size); + hp[0] = make_pos_bignum_header(big_size - BIG_NEED_SIZE(0)); + path_digits = hp + big_size - 1; + } + + + /* Pop the stack to create the complete path to the next leaf */ + while(!WSTACK_ISEMPTY(stack)) { + Uint idx; + + (void)WSTACK_POP(stack); + (void)WSTACK_POP(stack); + idx = (Uint)WSTACK_POP(stack)-1; + /* idx - 1 because idx in the stack is pointing to + the next element to fetch. */ + (void)WSTACK_POP(stack); + + depth--; + if (depth % PATH_ELEMS_PER_DIGIT == 0) { + /* Switch to next bignum element */ + path_digits[0] = curr_path; + path_digits--; + curr_path = 0; + } + + curr_path <<= PATH_ELEM_SIZE; + curr_path |= idx; + } + + if (path_digits) { + path_digits[0] = curr_path; + path = make_big(hp); + } else { + /* The Uint could be too large for a small */ + path = erts_make_integer(curr_path, BIF_P); + } + + if (type == iterator) { + hp = HAlloc(BIF_P, 2); + *path_ptr = CONS(hp, path, map); hp += 2; + } else { + hp = HAlloc(BIF_P, 4); + res = CONS(hp, map, res); hp += 2; + res = CONS(hp, path, res); hp += 2; + } + } else { + if (type == iterator) { + HRelease(BIF_P, hp + 4 * elems, hp); + } else { + HRelease(BIF_P, hp + (2+3) * elems, hp); + } + } + BIF_P->fcalls -= 4 * (orig_elems - elems); + DESTROY_WSTACK(stack); + BIF_RET(res); + + badarg: + if (type == iterator) { + HRelease(BIF_P, hp + 4 * elems, hp); + } else { + HRelease(BIF_P, hp + (2+3) * elems, hp); + } + BIF_P->fcalls -= 4 * (orig_elems - elems); + DESTROY_WSTACK(stack); + BIF_ERROR(BIF_P, BADARG); + } +} + /* implementation of builtin emulations */ #if !ERTS_AT_LEAST_GCC_VSN__(3, 4, 0) diff --git a/erts/emulator/beam/erl_map.h b/erts/emulator/beam/erl_map.h index c3ccf80b85..718d400e22 100644 --- a/erts/emulator/beam/erl_map.h +++ b/erts/emulator/beam/erl_map.h @@ -64,7 +64,6 @@ typedef struct flatmap_s { #define hashmap_shift_hash(Heap,Hx,Lvl,Key) \ (((++(Lvl)) & 7) ? (Hx) >> 4 : hashmap_make_hash(CONS(Heap, make_small((Lvl)>>3), Key))) - /* erl_term.h stuff */ #define flatmap_get_values(x) (((Eterm *)(x)) + sizeof(flatmap_t)/sizeof(Eterm)) #define flatmap_get_keys(x) (((Eterm *)tuple_val(((flatmap_t *)(x))->keys)) + 1) diff --git a/erts/emulator/beam/erl_message.c b/erts/emulator/beam/erl_message.c index abf194cf94..6f7c71ef98 100644 --- a/erts/emulator/beam/erl_message.c +++ b/erts/emulator/beam/erl_message.c @@ -616,7 +616,7 @@ erts_try_alloc_message_on_heap(Process *pp, } else { in_message_fragment: - if (!((*psp) & ERTS_PSFLG_ON_HEAP_MSGQ)) { + if ((*psp) & ERTS_PSFLG_OFF_HEAP_MSGQ) { mp = erts_alloc_message(sz, hpp); *ohpp = sz == 0 ? NULL : &mp->hfrag.off_heap; } @@ -1079,8 +1079,6 @@ erts_change_message_queue_management(Process *c_p, Eterm new_state) case am_on_heap: c_p->flags |= F_ON_HEAP_MSGQ; c_p->flags &= ~F_OFF_HEAP_MSGQ; - erts_atomic32_read_bor_nob(&c_p->state, - ERTS_PSFLG_ON_HEAP_MSGQ); /* * We are not allowed to clear ERTS_PSFLG_OFF_HEAP_MSGQ * if a off heap change is ongoing. It will be adjusted @@ -1106,8 +1104,6 @@ erts_change_message_queue_management(Process *c_p, Eterm new_state) break; case am_off_heap: c_p->flags &= ~F_ON_HEAP_MSGQ; - erts_atomic32_read_band_nob(&c_p->state, - ~ERTS_PSFLG_ON_HEAP_MSGQ); goto change_to_off_heap; default: res = THE_NON_VALUE; /* badarg */ diff --git a/erts/emulator/beam/erl_message.h b/erts/emulator/beam/erl_message.h index 9c8cf84e43..a14f4f51d8 100644 --- a/erts/emulator/beam/erl_message.h +++ b/erts/emulator/beam/erl_message.h @@ -167,10 +167,9 @@ typedef struct { Sint len; /* queue length */ /* - * The following two fields are used by the recv_mark/1 and + * The following field is used by the recv_mark/1 and * recv_set/1 instructions. */ - BeamInstr* mark; /* address to rec_loop/2 instruction */ ErtsMessage** saved_last; /* saved last pointer */ } ErlMessageQueue; @@ -236,12 +235,17 @@ typedef struct erl_trace_message_queue__ { (p)->msg.len--; \ if (__mp == NULL) \ (p)->msg.last = (p)->msg.save; \ - (p)->msg.mark = 0; \ } while(0) -/* Reset message save point (after receive match) */ -#define JOIN_MESSAGE(p) \ - (p)->msg.save = &(p)->msg.first +/* + * Reset message save point (after receive match). + * Also invalidate the saved position since it may no + * longer be safe to use. + */ +#define JOIN_MESSAGE(p) do { \ + (p)->msg.save = &(p)->msg.first; \ + (p)->msg.saved_last = 0; \ +} while(0) /* Save current message */ #define SAVE_MESSAGE(p) \ diff --git a/erts/emulator/beam/erl_msacc.c b/erts/emulator/beam/erl_msacc.c index d659842b7e..d13d6080e1 100644 --- a/erts/emulator/beam/erl_msacc.c +++ b/erts/emulator/beam/erl_msacc.c @@ -81,7 +81,7 @@ void erts_msacc_init(void) { sizeof(Eterm)*ERTS_MSACC_STATE_COUNT); for (i = 0; i < ERTS_MSACC_STATE_COUNT; i++) { erts_msacc_state_atoms[i] = am_atom_put(erts_msacc_states[i], - strlen(erts_msacc_states[i])); + sys_strlen(erts_msacc_states[i])); } } @@ -191,7 +191,7 @@ Eterm erts_msacc_gather_stats(ErtsMsAcc *msacc, ErtsHeapFactory *factory) { map->keys = key; hp[0] = state_map; hp[1] = msacc->id; - hp[2] = am_atom_put(msacc->type,strlen(msacc->type)); + hp[2] = am_atom_put(msacc->type,sys_strlen(msacc->type)); return make_flatmap(map); } diff --git a/erts/emulator/beam/erl_msacc.h b/erts/emulator/beam/erl_msacc.h index 2588dec903..895b1ae319 100644 --- a/erts/emulator/beam/erl_msacc.h +++ b/erts/emulator/beam/erl_msacc.h @@ -159,12 +159,12 @@ struct erl_msacc_t_ { #ifdef ERTS_ENABLE_MSACC -extern erts_tsd_key_t erts_msacc_key; +extern erts_tsd_key_t ERTS_WRITE_UNLIKELY(erts_msacc_key); #ifdef ERTS_MSACC_ALWAYS_ON #define erts_msacc_enabled 1 #else -extern int erts_msacc_enabled; +extern int ERTS_WRITE_UNLIKELY(erts_msacc_enabled); #endif #define ERTS_MSACC_TSD_GET() erts_tsd_get(erts_msacc_key) diff --git a/erts/emulator/beam/erl_mtrace.c b/erts/emulator/beam/erl_mtrace.c index f2a660f085..2807b443a1 100644 --- a/erts/emulator/beam/erl_mtrace.c +++ b/erts/emulator/beam/erl_mtrace.c @@ -314,7 +314,7 @@ disable_trace(int error, char *reason, int eno) erts_fprintf(stderr, "%s: %s\n", mt_dis, reason); else { eno_str = erl_errno_id(eno); - if (strcmp(eno_str, "unknown") == 0) + if (sys_strcmp(eno_str, "unknown") == 0) erts_fprintf(stderr, "%s: %s: %d\n", mt_dis, reason, eno); else erts_fprintf(stderr, "%s: %s: %s\n", mt_dis, reason, eno_str); @@ -401,9 +401,9 @@ write_trace_header(char *nodename, char *pid, char *hostname) PUT_UI32(tracep, ERTS_MT_MAJOR_VSN); PUT_UI32(tracep, ERTS_MT_MINOR_VSN); - n_len = strlen(nodename); - h_len = strlen(hostname); - p_len = strlen(pid); + n_len = sys_strlen(nodename); + h_len = sys_strlen(hostname); + p_len = sys_strlen(pid); hdr_prolog_len = (2*UI32_SZ + 3*UI16_SZ + 3*UI32_SZ @@ -436,15 +436,15 @@ write_trace_header(char *nodename, char *pid, char *hostname) PUT_UI32(tracep, start_time.usec); PUT_UI8(tracep, (byte) n_len); - memcpy((void *) tracep, (void *) nodename, n_len); + sys_memcpy((void *) tracep, (void *) nodename, n_len); tracep += n_len; PUT_UI8(tracep, (byte) h_len); - memcpy((void *) tracep, (void *) hostname, h_len); + sys_memcpy((void *) tracep, (void *) hostname, h_len); tracep += h_len; PUT_UI8(tracep, (byte) p_len); - memcpy((void *) tracep, (void *) pid, p_len); + sys_memcpy((void *) tracep, (void *) pid, p_len); tracep += p_len; ASSERT(startp + hdr_prolog_len == tracep); @@ -472,7 +472,7 @@ write_trace_header(char *nodename, char *pid, char *hostname) str = ERTS_ALC_A2AD(i); ASSERT(str); - str_len = strlen(str); + str_len = sys_strlen(str); if (str_len >= (1 << 8)) { disable_trace(1, "Excessively large allocator string", 0); return 0; @@ -493,7 +493,7 @@ write_trace_header(char *nodename, char *pid, char *hostname) PUT_UI16(tracep, aflags); PUT_UI16(tracep, (Uint16) i); PUT_UI8( tracep, (byte) str_len); - memcpy((void *) tracep, (void *) str, str_len); + sys_memcpy((void *) tracep, (void *) str, str_len); tracep += str_len; if (erts_allctrs_info[i].alloc_util) { PUT_UI8(tracep, 2); @@ -519,7 +519,7 @@ write_trace_header(char *nodename, char *pid, char *hostname) str = ERTS_ALC_N2TD(i); ASSERT(str); - str_len = strlen(str); + str_len = sys_strlen(str); if (str_len >= (1 << 8)) { disable_trace(1, "Excessively large type string", 0); return 0; @@ -543,7 +543,7 @@ write_trace_header(char *nodename, char *pid, char *hostname) PUT_UI16(tracep, nflags); PUT_UI16(tracep, (Uint16) i); PUT_UI8(tracep, (byte) str_len); - memcpy((void *) tracep, (void *) str, str_len); + sys_memcpy((void *) tracep, (void *) str, str_len); tracep += str_len; PUT_UI16(tracep, no); ASSERT(startp + entry_sz == tracep); diff --git a/erts/emulator/beam/erl_nif.c b/erts/emulator/beam/erl_nif.c index f7f12efe28..c60cc7fecf 100644 --- a/erts/emulator/beam/erl_nif.c +++ b/erts/emulator/beam/erl_nif.c @@ -237,9 +237,11 @@ static void cache_env(ErlNifEnv* env); static void full_flush_env(ErlNifEnv *env); static void flush_env(ErlNifEnv* env); -/* Temporary object header, auto-deallocated when NIF returns - * or when independent environment is cleared. - */ +/* Temporary object header, auto-deallocated when NIF returns or when + * independent environment is cleared. + * + * The payload can be accessed with &tmp_obj_ptr[1] but keep in mind that its + * first element must not require greater alignment than `next`. */ struct enif_tmp_obj_t { struct enif_tmp_obj_t* next; void (*dtor)(struct enif_tmp_obj_t*); @@ -256,6 +258,46 @@ static ERTS_INLINE void free_tmp_objs(ErlNifEnv* env) } } +/* Whether the given environment is bound to a process and will be cleaned up + * when the NIF returns. It's safe to use temp_alloc for objects in + * env->tmp_obj_list when this is true. */ +static ERTS_INLINE int is_proc_bound(ErlNifEnv *env) +{ + return env->mod_nif != NULL; +} + +/* Allocates and attaches an object to the given environment, running its + * destructor when the environment is cleared. To avoid temporary variables the + * address of the allocated object is returned instead of the enif_tmp_obj_t. + * + * The destructor *must* call `erts_free(tmp_obj->allocator, tmp_obj)` to free + * the object. If the destructor needs to refer to the allocated object its + * address will be &tmp_obj[1]. */ +static ERTS_INLINE void *alloc_tmp_obj(ErlNifEnv *env, size_t size, + void (*dtor)(struct enif_tmp_obj_t*)) { + struct enif_tmp_obj_t *tmp_obj; + ErtsAlcType_t allocator; + + allocator = is_proc_bound(env) ? ERTS_ALC_T_TMP : ERTS_ALC_T_NIF; + + tmp_obj = erts_alloc(allocator, sizeof(struct enif_tmp_obj_t) + MAX(1, size)); + + tmp_obj->next = env->tmp_obj_list; + tmp_obj->allocator = allocator; + tmp_obj->dtor = dtor; + + env->tmp_obj_list = tmp_obj; + + return (void*)&tmp_obj[1]; +} + +/* Generic destructor for objects allocated through alloc_tmp_obj that don't + * care about their payload. */ +static void tmp_alloc_dtor(struct enif_tmp_obj_t *tmp_obj) +{ + erts_free(tmp_obj->allocator, tmp_obj); +} + void erts_post_nif(ErlNifEnv* env) { erts_unblock_fpe(env->fpe_was_unmasked); @@ -446,6 +488,7 @@ static void cache_env(ErlNifEnv* env) env->hp_end = env->heap_frag->mem + env->heap_frag->alloc_size; } } + void* enif_priv_data(ErlNifEnv* env) { return env->mod_nif->priv_data; @@ -486,7 +529,7 @@ setup_nif_env(struct enif_msg_environment_t* msg_env, msg_env->env.tmp_obj_list = NULL; msg_env->env.proc = &msg_env->phony_proc; msg_env->env.exception_thrown = 0; - memset(&msg_env->phony_proc, 0, sizeof(Process)); + sys_memset(&msg_env->phony_proc, 0, sizeof(Process)); HEAP_START(&msg_env->phony_proc) = phony_heap; HEAP_TOP(&msg_env->phony_proc) = phony_heap; HEAP_LIMIT(&msg_env->phony_proc) = phony_heap; @@ -544,6 +587,9 @@ void enif_clear_env(ErlNifEnv* env) ASSERT(p == menv->env.proc); ASSERT(p->common.id == ERTS_INVALID_PID); ASSERT(MBUF(p) == menv->env.heap_frag); + + free_tmp_objs(env); + if (MBUF(p) != NULL) { erts_cleanup_offheap(&MSO(p)); clear_offheap(&MSO(p)); @@ -555,12 +601,11 @@ void enif_clear_env(ErlNifEnv* env) menv->env.hp = menv->env.hp_end = HEAP_TOP(p); ASSERT(!is_offheap(&MSO(p))); - free_tmp_objs(env); } #ifdef DEBUG static int enif_send_delay = 0; -#define ERTS_FORCE_ENIF_SEND_DELAY() (enif_send_delay++ % 2 == 0) +#define ERTS_FORCE_ENIF_SEND_DELAY() (enif_send_delay++ % 32 == 0) #else #ifdef ERTS_PROC_LOCK_OWN_IMPL #define ERTS_FORCE_ENIF_SEND_DELAY() 0 @@ -1017,11 +1062,6 @@ int enif_is_number(ErlNifEnv* env, ERL_NIF_TERM term) return is_number(term); } -static ERTS_INLINE int is_proc_bound(ErlNifEnv* env) -{ - return env->mod_nif != NULL; -} - static void aligned_binary_dtor(struct enif_tmp_obj_t* obj) { erts_free_aligned_binary_bytes_extra((byte*)obj, obj->allocator); @@ -1056,22 +1096,14 @@ int enif_inspect_binary(ErlNifEnv* env, Eterm bin_term, ErlNifBinary* bin) u.tmp->dtor = &aligned_binary_dtor; env->tmp_obj_list = u.tmp; } - bin->bin_term = bin_term; bin->size = binary_size(bin_term); bin->ref_bin = NULL; ADD_READONLY_CHECK(env, bin->data, bin->size); return 1; } -static void tmp_alloc_dtor(struct enif_tmp_obj_t* obj) -{ - erts_free(obj->allocator, obj); -} - int enif_inspect_iolist_as_binary(ErlNifEnv* env, Eterm term, ErlNifBinary* bin) { - struct enif_tmp_obj_t* tobj; - ErtsAlcType_t allocator; ErlDrvSizeT sz; if (is_binary(term)) { return enif_inspect_binary(env,term,bin); @@ -1079,7 +1111,6 @@ int enif_inspect_iolist_as_binary(ErlNifEnv* env, Eterm term, ErlNifBinary* bin) if (is_nil(term)) { bin->data = (unsigned char*) &bin->data; /* dummy non-NULL */ bin->size = 0; - bin->bin_term = THE_NON_VALUE; bin->ref_bin = NULL; return 1; } @@ -1087,16 +1118,8 @@ int enif_inspect_iolist_as_binary(ErlNifEnv* env, Eterm term, ErlNifBinary* bin) return 0; } - allocator = is_proc_bound(env) ? ERTS_ALC_T_TMP : ERTS_ALC_T_NIF; - tobj = erts_alloc(allocator, sz + sizeof(struct enif_tmp_obj_t)); - tobj->allocator = allocator; - tobj->next = env->tmp_obj_list; - tobj->dtor = &tmp_alloc_dtor; - env->tmp_obj_list = tobj; - - bin->data = (unsigned char*) &tobj[1]; + bin->data = alloc_tmp_obj(env, sz, &tmp_alloc_dtor); bin->size = sz; - bin->bin_term = THE_NON_VALUE; bin->ref_bin = NULL; erts_iolist_to_buf(term, (char*) bin->data, sz); ADD_READONLY_CHECK(env, bin->data, bin->size); @@ -1114,7 +1137,6 @@ int enif_alloc_binary(size_t size, ErlNifBinary* bin) bin->size = size; bin->data = (unsigned char*) refbin->orig_bytes; - bin->bin_term = THE_NON_VALUE; bin->ref_bin = refbin; return 1; } @@ -1148,12 +1170,10 @@ void enif_release_binary(ErlNifBinary* bin) { if (bin->ref_bin != NULL) { Binary* refbin = bin->ref_bin; - ASSERT(bin->bin_term == THE_NON_VALUE); erts_bin_release(refbin); } #ifdef DEBUG bin->data = NULL; - bin->bin_term = THE_NON_VALUE; bin->ref_bin = NULL; #endif } @@ -1201,11 +1221,12 @@ size_t enif_binary_to_term(ErlNifEnv *dst_env, Sint size; ErtsHeapFactory factory; byte *bp = (byte*) data; + Uint32 flags = 0; - ERTS_CT_ASSERT(ERL_NIF_BIN2TERM_SAFE == ERTS_DIST_EXT_BTT_SAFE); - - if (opts & ~ERL_NIF_BIN2TERM_SAFE) { - return 0; + switch ((Uint32)opts) { + case 0: break; + case ERL_NIF_BIN2TERM_SAFE: flags = ERTS_DIST_EXT_BTT_SAFE; break; + default: return 0; } if ((size = erts_decode_ext_size(bp, data_sz)) < 0) return 0; @@ -1217,7 +1238,7 @@ size_t enif_binary_to_term(ErlNifEnv *dst_env, erts_factory_dummy_init(&factory); } - *term = erts_decode_ext(&factory, &bp, (Uint32)opts); + *term = erts_decode_ext(&factory, &bp, flags); if (is_non_value(*term)) { return 0; @@ -1309,39 +1330,51 @@ int enif_get_string(ErlNifEnv *env, ERL_NIF_TERM list, char* buf, unsigned len, Eterm enif_make_binary(ErlNifEnv* env, ErlNifBinary* bin) { - if (bin->bin_term != THE_NON_VALUE) { - return bin->bin_term; - } - else if (bin->ref_bin != NULL) { - Binary* bptr = bin->ref_bin; - ProcBin* pb; - Eterm bin_term; - - /* !! Copy-paste from new_binary() !! */ - pb = (ProcBin *) alloc_heap(env, PROC_BIN_SIZE); - pb->thing_word = HEADER_PROC_BIN; - pb->size = bptr->orig_size; - pb->next = MSO(env->proc).first; - MSO(env->proc).first = (struct erl_off_heap_header*) pb; - pb->val = bptr; - pb->bytes = (byte*) bptr->orig_bytes; - pb->flags = 0; - - OH_OVERHEAD(&(MSO(env->proc)), pb->size / sizeof(Eterm)); - bin_term = make_binary(pb); - if (erts_refc_read(&bptr->intern.refc, 1) == 1) { - /* Total ownership transfer */ - bin->ref_bin = NULL; - bin->bin_term = bin_term; - } - return bin_term; - } - else { - flush_env(env); - bin->bin_term = new_binary(env->proc, bin->data, bin->size); - cache_env(env); - return bin->bin_term; + Eterm bin_term; + + if (bin->ref_bin != NULL) { + Binary* binary = bin->ref_bin; + + /* If the binary is smaller than the heap binary limit we'll return a + * heap binary to reduce the number of small refc binaries in the + * system. We can't simply release the refc binary right away however; + * the documentation states that the binary should be considered + * read-only from this point on, which implies that it should still be + * readable. + * + * We could keep it alive until we return by adding it to the temporary + * object list, but that requires an off-heap allocation which is + * potentially quite slow, so we create a dummy ProcBin instead and + * rely on the next minor GC to get rid of it. */ + if (bin->size <= ERL_ONHEAP_BIN_LIMIT) { + ErlHeapBin* hb; + + hb = (ErlHeapBin*)alloc_heap(env, heap_bin_size(bin->size)); + hb->thing_word = header_heap_bin(bin->size); + hb->size = bin->size; + + sys_memcpy(hb->data, bin->data, bin->size); + + erts_build_proc_bin(&MSO(env->proc), + alloc_heap(env, PROC_BIN_SIZE), + binary); + + bin_term = make_binary(hb); + } else { + bin_term = erts_build_proc_bin(&MSO(env->proc), + alloc_heap(env, PROC_BIN_SIZE), + binary); + } + + /* Our (possibly shared) ownership has been transferred to the term. */ + bin->ref_bin = NULL; + } else { + flush_env(env); + bin_term = new_binary(env->proc, bin->data, bin->size); + cache_env(env); } + + return bin_term; } Eterm enif_make_sub_binary(ErlNifEnv* env, ERL_NIF_TERM bin_term, @@ -3291,8 +3324,8 @@ typedef struct { Eterm sublist_start; Eterm sublist_end; - UWord offheap_size; - UWord onheap_size; + UWord referenced_size; + UWord copied_size; UWord iovec_len; } iovec_slice_t; @@ -3302,16 +3335,16 @@ static int examine_iovec_term(Eterm list, UWord max_length, iovec_slice_t *resul result->sublist_start = list; result->sublist_length = 0; - result->offheap_size = 0; - result->onheap_size = 0; + result->referenced_size = 0; + result->copied_size = 0; result->iovec_len = 0; lookahead = result->sublist_start; while (is_list(lookahead)) { - Eterm *binary_header, binary; + UWord byte_size; + Eterm binary; Eterm *cell; - UWord size; cell = list_val(lookahead); binary = CAR(cell); @@ -3320,39 +3353,42 @@ static int examine_iovec_term(Eterm list, UWord max_length, iovec_slice_t *resul return 0; } - size = binary_size(binary); - binary_header = binary_val(binary); + byte_size = binary_size(binary); + + if (byte_size > 0) { + int bit_offset, bit_size; + Eterm parent_binary; + UWord byte_offset; + + int requires_copying; - /* If we're a sub-binary we'll need to check our underlying binary to - * determine whether we're on-heap or not. */ - if(thing_subtag(*binary_header) == SUB_BINARY_SUBTAG) { - ErlSubBin *sb = (ErlSubBin*)binary_header; + ERTS_GET_REAL_BIN(binary, parent_binary, byte_offset, + bit_offset, bit_size); - /* Reject bitstrings */ - if((sb->bitoffs + sb->bitsize) > 0) { + (void)byte_offset; + + if (bit_size != 0) { return 0; } - ASSERT(size <= binary_size(sb->orig)); - binary_header = binary_val(sb->orig); - } - - if(thing_subtag(*binary_header) == HEAP_BINARY_SUBTAG) { - ASSERT(size <= ERL_ONHEAP_BIN_LIMIT); + /* If we're unaligned or an on-heap binary we'll need to copy + * ourselves over to a temporary buffer. */ + requires_copying = (bit_offset != 0) || + thing_subtag(*binary_val(parent_binary)) == HEAP_BINARY_SUBTAG; - result->iovec_len += 1; - result->onheap_size += size; - } else { - ASSERT(thing_subtag(*binary_header) == REFC_BINARY_SUBTAG); + if (requires_copying) { + result->copied_size += byte_size; + } else { + result->referenced_size += byte_size; + } - result->iovec_len += 1 + size / MAX_SYSIOVEC_IOVLEN; - result->offheap_size += size; + result->iovec_len += 1 + byte_size / MAX_SYSIOVEC_IOVLEN; } result->sublist_length += 1; lookahead = CDR(cell); - if(result->sublist_length >= max_length) { + if (result->sublist_length >= max_length) { break; } } @@ -3366,7 +3402,9 @@ static int examine_iovec_term(Eterm list, UWord max_length, iovec_slice_t *resul return 1; } -static void inspect_raw_binary_data(Eterm binary, ErlNifBinary *result) { +static void marshal_iovec_binary(Eterm binary, ErlNifBinary *copy_buffer, + UWord *copy_offset, ErlNifBinary *result) { + Eterm *parent_header; Eterm parent_binary; @@ -3377,14 +3415,19 @@ static void inspect_raw_binary_data(Eterm binary, ErlNifBinary *result) { ERTS_GET_REAL_BIN(binary, parent_binary, byte_offset, bit_offset, bit_size); + ASSERT(bit_size == 0); + parent_header = binary_val(parent_binary); result->size = binary_size(binary); - result->bin_term = binary; if (thing_subtag(*parent_header) == REFC_BINARY_SUBTAG) { ProcBin *pb = (ProcBin*)parent_header; + if (pb->flags & (PB_IS_WRITABLE | PB_ACTIVE_WRITER)) { + erts_emasculate_writable_binary(pb); + } + ASSERT(pb->val != NULL); ASSERT(byte_offset < pb->size); ASSERT(&pb->bytes[byte_offset] >= (byte*)(pb->val)->orig_bytes); @@ -3399,24 +3442,48 @@ static void inspect_raw_binary_data(Eterm binary, ErlNifBinary *result) { result->data = &((unsigned char*)&hb->data)[byte_offset]; result->ref_bin = NULL; } + + /* If this isn't an *aligned* refc binary, copy its contents to the buffer + * and reference that instead. */ + + if (result->ref_bin == NULL || bit_offset != 0) { + ASSERT(copy_buffer->ref_bin != NULL && copy_buffer->data != NULL); + ASSERT(result->size <= (copy_buffer->size - *copy_offset)); + + if (bit_offset == 0) { + sys_memcpy(©_buffer->data[*copy_offset], + result->data, result->size); + } else { + erts_copy_bits(result->data, bit_offset, 1, + (byte*)©_buffer->data[*copy_offset], 0, 1, + result->size * 8); + } + + result->data = ©_buffer->data[*copy_offset]; + result->ref_bin = copy_buffer->ref_bin; + + *copy_offset += result->size; + } } static int fill_iovec_with_slice(ErlNifEnv *env, iovec_slice_t *slice, ErlNifIOVec *iovec) { - UWord onheap_offset, iovec_idx; - ErlNifBinary onheap_data; + ErlNifBinary copy_buffer = {0}; + UWord copy_offset, iovec_idx; Eterm sublist_iterator; - /* Set up a common refc binary for all on-heap binaries. */ - if (slice->onheap_size > 0) { - if (!enif_alloc_binary(slice->onheap_size, &onheap_data)) { + /* Set up a common refc binary for all on-heap and unaligned binaries. */ + if (slice->copied_size > 0) { + if (!enif_alloc_binary(slice->copied_size, ©_buffer)) { return 0; } + + ASSERT(copy_buffer.ref_bin != NULL); } sublist_iterator = slice->sublist_start; - onheap_offset = 0; + copy_offset = 0; iovec_idx = 0; while (sublist_iterator != slice->sublist_end) { @@ -3424,27 +3491,13 @@ static int fill_iovec_with_slice(ErlNifEnv *env, Eterm *cell; cell = list_val(sublist_iterator); - inspect_raw_binary_data(CAR(cell), &raw_data); - - /* If this isn't a refc binary, copy its contents to the onheap buffer - * and reference that instead. */ - if (raw_data.ref_bin == NULL) { - ASSERT(onheap_offset < onheap_data.size); - ASSERT(slice->onheap_size > 0); - - sys_memcpy(&onheap_data.data[onheap_offset], - raw_data.data, raw_data.size); - - raw_data.data = &onheap_data.data[onheap_offset]; - raw_data.ref_bin = onheap_data.ref_bin; - } - - ASSERT(raw_data.ref_bin != NULL); + marshal_iovec_binary(CAR(cell), ©_buffer, ©_offset, &raw_data); while (raw_data.size > 0) { UWord chunk_len = MIN(raw_data.size, MAX_SYSIOVEC_IOVLEN); ASSERT(iovec_idx < iovec->iovcnt); + ASSERT(raw_data.ref_bin != NULL); iovec->iov[iovec_idx].iov_base = raw_data.data; iovec->iov[iovec_idx].iov_len = chunk_len; @@ -3469,16 +3522,18 @@ static int fill_iovec_with_slice(ErlNifEnv *env, erts_refc_inc(&refc_binary->intern.refc, 1); } - if (slice->onheap_size > 0) { + if (slice->copied_size > 0) { /* Transfer ownership to the iovec; we've taken references to it in * the above loop. */ - enif_release_binary(&onheap_data); + enif_release_binary(©_buffer); } } else { - if (slice->onheap_size > 0) { - /* Attach the binary to our environment and let the GC take care of - * it after returning. */ - enif_make_binary(env, &onheap_data); + if (slice->copied_size > 0) { + /* Attach the binary to our environment and let the next minor GC + * get rid of it. This is slightly faster than using the tmp object + * list since it avoids off-heap allocations. */ + erts_build_proc_bin(&MSO(env->proc), + alloc_heap(env, PROC_BIN_SIZE), copy_buffer.ref_bin); } } @@ -3504,19 +3559,14 @@ static int create_iovec_from_slice(ErlNifEnv *env, alloc_size = binv_offset; alloc_size += slice->iovec_len * sizeof(Binary*); - /* If we have an environment we'll attach the allocated data to it. The - * GC will take care of releasing it later on. */ + /* When the user passes an environment, we attach the iovec to it so + * the user won't have to bother managing it (similar to + * enif_inspect_binary). It'll disappear once the environment is + * cleaned up. */ if (env != NULL) { - ErlNifBinary gc_bin; - - if (!enif_alloc_binary(alloc_size, &gc_bin)) { - return 0; - } - - alloc_base = (char*)gc_bin.data; - enif_make_binary(env, &gc_bin); + alloc_base = alloc_tmp_obj(env, alloc_size, &tmp_alloc_dtor); } else { - alloc_base = enif_alloc(alloc_size); + alloc_base = erts_alloc(ERTS_ALC_T_NIF, alloc_size); } iovec = (ErlNifIOVec*)alloc_base; @@ -3525,12 +3575,12 @@ static int create_iovec_from_slice(ErlNifEnv *env, iovec->flags = 0; } - iovec->size = slice->offheap_size + slice->onheap_size; + iovec->size = slice->referenced_size + slice->copied_size; iovec->iovcnt = slice->iovec_len; if(!fill_iovec_with_slice(env, slice, iovec)) { if (env == NULL && !(iovec->flags & ERL_NIF_IOVEC_FLAGS_PREALLOC)) { - enif_free(iovec); + erts_free(ERTS_ALC_T_NIF, iovec); } return 0; @@ -3597,6 +3647,55 @@ int enif_ioq_deq(ErlNifIOQueue *q, size_t elems, size_t *size) return 1; } +int enif_ioq_peek_head(ErlNifEnv *env, ErlNifIOQueue *q, size_t *size, ERL_NIF_TERM *bin_term) { + SysIOVec *iov_entry; + Binary *ref_bin; + + if (q->size == 0) { + return 0; + } + + ASSERT(q->b_head != q->b_tail && q->v_head != q->v_tail); + + ref_bin = &q->b_head[0]->nif; + iov_entry = &q->v_head[0]; + + if (size != NULL) { + *size = iov_entry->iov_len; + } + + if (iov_entry->iov_len > ERL_ONHEAP_BIN_LIMIT) { + ProcBin *pb = (ProcBin*)alloc_heap(env, PROC_BIN_SIZE); + + pb->thing_word = HEADER_PROC_BIN; + pb->next = MSO(env->proc).first; + pb->val = ref_bin; + pb->flags = 0; + + ASSERT((byte*)iov_entry->iov_base >= (byte*)ref_bin->orig_bytes); + ASSERT(iov_entry->iov_len <= ref_bin->orig_size); + + pb->bytes = (byte*)iov_entry->iov_base; + pb->size = iov_entry->iov_len; + + MSO(env->proc).first = (struct erl_off_heap_header*) pb; + OH_OVERHEAD(&(MSO(env->proc)), pb->size / sizeof(Eterm)); + + erts_refc_inc(&ref_bin->intern.refc, 2); + *bin_term = make_binary(pb); + } else { + ErlHeapBin* hb = (ErlHeapBin*)alloc_heap(env, heap_bin_size(iov_entry->iov_len)); + + hb->thing_word = header_heap_bin(iov_entry->iov_len); + hb->size = iov_entry->iov_len; + + sys_memcpy(hb->data, iov_entry->iov_base, iov_entry->iov_len); + *bin_term = make_binary(hb); + } + + return 1; +} + SysIOVec *enif_ioq_peek(ErlNifIOQueue *q, int *iovlen) { return erts_ioq_peekq(q, iovlen); @@ -4171,34 +4270,31 @@ static unsigned calc_checksum(unsigned char* ptr, unsigned size); struct readonly_check_t { - struct enif_tmp_obj_t hdr; unsigned char* ptr; unsigned size; unsigned checksum; }; static void add_readonly_check(ErlNifEnv* env, unsigned char* ptr, unsigned sz) { - ErtsAlcType_t allocator = is_proc_bound(env) ? ERTS_ALC_T_TMP : ERTS_ALC_T_NIF; - struct readonly_check_t* obj = erts_alloc(allocator, - sizeof(struct readonly_check_t)); - obj->hdr.allocator = allocator; - obj->hdr.next = env->tmp_obj_list; - env->tmp_obj_list = &obj->hdr; - obj->hdr.dtor = &readonly_check_dtor; + struct readonly_check_t* obj; + + obj = alloc_tmp_obj(env, sizeof(struct readonly_check_t), + &readonly_check_dtor); + obj->ptr = ptr; obj->size = sz; - obj->checksum = calc_checksum(ptr, sz); + obj->checksum = calc_checksum(ptr, sz); } -static void readonly_check_dtor(struct enif_tmp_obj_t* o) +static void readonly_check_dtor(struct enif_tmp_obj_t* tmp_obj) { - struct readonly_check_t* obj = (struct readonly_check_t*) o; - unsigned chksum = calc_checksum(obj->ptr, obj->size); - if (chksum != obj->checksum) { + struct readonly_check_t* ro_check = (struct readonly_check_t*)&tmp_obj[1]; + unsigned chksum = calc_checksum(ro_check->ptr, ro_check->size); + if (chksum != ro_check->checksum) { fprintf(stderr, "\r\nReadonly data written by NIF, checksums differ" - " %x != %x\r\nABORTING\r\n", chksum, obj->checksum); + " %x != %x\r\nABORTING\r\n", chksum, ro_check->checksum); abort(); } - erts_free(obj->hdr.allocator, obj); + erts_free(tmp_obj->allocator, tmp_obj); } static unsigned calc_checksum(unsigned char* ptr, unsigned size) { @@ -4273,7 +4369,7 @@ static void get_string_maybe(ErlNifEnv *env, const ERL_NIF_TERM term, str_bin.size > bufsiz) { *ptr = NULL; } else { - memcpy(buf, (char *) str_bin.data, str_bin.size); + sys_memcpy(buf, (char *) str_bin.data, str_bin.size); buf[str_bin.size] = '\0'; *ptr = buf; } @@ -4290,7 +4386,7 @@ ERL_NIF_TERM erl_nif_user_trace_s1(ErlNifEnv* env, int argc, message_bin.size > MESSAGE_BUFSIZ) { return am_badarg; } - memcpy(messagebuf, (char *) message_bin.data, message_bin.size); + sys_memcpy(messagebuf, (char *) message_bin.data, message_bin.size); messagebuf[message_bin.size] = '\0'; DTRACE1(user_trace_s1, messagebuf); return am_true; diff --git a/erts/emulator/beam/erl_nif.h b/erts/emulator/beam/erl_nif.h index d195721054..a99b4db705 100644 --- a/erts/emulator/beam/erl_nif.h +++ b/erts/emulator/beam/erl_nif.h @@ -50,10 +50,12 @@ ** 2.9: 18.2 enif_getenv ** 2.10: Time API ** 2.11: 19.0 enif_snprintf -** 2.12: 20.0 add enif_queue +** 2.12: 20.0 add enif_select, enif_open_resource_type_x +** 2.13: 20.1 add enif_ioq +** 2.14: 21.0 add enif_ioq_peek_head */ #define ERL_NIF_MAJOR_VERSION 2 -#define ERL_NIF_MINOR_VERSION 12 +#define ERL_NIF_MINOR_VERSION 14 /* * The emulator will refuse to load a nif-lib with a major version @@ -135,8 +137,9 @@ typedef struct unsigned char* data; /* Internals (avert your eyes) */ - ERL_NIF_TERM bin_term; void* ref_bin; + /* for future additions to be ABI compatible (same struct size) */ + void* __spare__[2]; }ErlNifBinary; #if (defined(__WIN32__) || defined(_WIN32) || defined(_WIN32_)) diff --git a/erts/emulator/beam/erl_nif_api_funcs.h b/erts/emulator/beam/erl_nif_api_funcs.h index 9e573307d8..3750fd9b68 100644 --- a/erts/emulator/beam/erl_nif_api_funcs.h +++ b/erts/emulator/beam/erl_nif_api_funcs.h @@ -198,6 +198,7 @@ ERL_NIF_API_FUNC_DECL(SysIOVec*,enif_ioq_peek,(ErlNifIOQueue *q, int *iovlen)); ERL_NIF_API_FUNC_DECL(int,enif_inspect_iovec,(ErlNifEnv *env, size_t max_length, ERL_NIF_TERM iovec_term, ERL_NIF_TERM *tail, ErlNifIOVec **iovec)); ERL_NIF_API_FUNC_DECL(void,enif_free_iovec,(ErlNifIOVec *iov)); +ERL_NIF_API_FUNC_DECL(int,enif_ioq_peek_head,(ErlNifEnv *env, ErlNifIOQueue *q, size_t *size, ERL_NIF_TERM *head)); /* ** ADD NEW ENTRIES HERE (before this comment) !!! @@ -373,6 +374,7 @@ ERL_NIF_API_FUNC_DECL(void,enif_free_iovec,(ErlNifIOVec *iov)); # define enif_ioq_peek ERL_NIF_API_FUNC_MACRO(enif_ioq_peek) # define enif_inspect_iovec ERL_NIF_API_FUNC_MACRO(enif_inspect_iovec) # define enif_free_iovec ERL_NIF_API_FUNC_MACRO(enif_free_iovec) +# define enif_ioq_peek_head ERL_NIF_API_FUNC_MACRO(enif_ioq_peek_head) /* ** ADD NEW ENTRIES HERE (before this comment) diff --git a/erts/emulator/beam/erl_node_tables.c b/erts/emulator/beam/erl_node_tables.c index 0f3dfa797c..e8901a652f 100644 --- a/erts/emulator/beam/erl_node_tables.c +++ b/erts/emulator/beam/erl_node_tables.c @@ -39,9 +39,11 @@ erts_rwmtx_t erts_node_table_rwmtx; DistEntry *erts_hidden_dist_entries; DistEntry *erts_visible_dist_entries; +DistEntry *erts_pending_dist_entries; DistEntry *erts_not_connected_dist_entries; /* including erts_this_dist_entry */ Sint erts_no_of_hidden_dist_entries; Sint erts_no_of_visible_dist_entries; +Sint erts_no_of_pending_dist_entries; Sint erts_no_of_not_connected_dist_entries; /* including erts_this_dist_entry */ DistEntry *erts_this_dist_entry; @@ -101,7 +103,9 @@ void erts_ref_dist_entry(DistEntry *dep) { ASSERT(dep); - de_refc_inc(dep, 1); + if (de_refc_inc_read(dep, 1) == 1) { + de_refc_inc(dep, 2); /* Pending delete */ + } } void @@ -139,9 +143,7 @@ dist_table_cmp(void *dep1, void *dep2) static void* dist_table_alloc(void *dep_tmpl) { -#ifdef DEBUG erts_aint_t refc; -#endif Eterm sysname; Binary *bin; DistEntry *dep; @@ -158,13 +160,8 @@ dist_table_alloc(void *dep_tmpl) dist_entries++; -#ifdef DEBUG - refc = -#else - (void) -#endif - de_refc_dec_read(dep, -1); - ASSERT(refc == -1); + refc = de_refc_dec_read(dep, -1); + ASSERT(refc == -1); (void)refc; dep->prev = NULL; erts_rwmtx_init_opt(&dep->rwmtx, &rwmtx_opt, "dist_entry", sysname, @@ -173,7 +170,7 @@ dist_table_alloc(void *dep_tmpl) dep->cid = NIL; erts_atomic_init_nob(&dep->input_handler, (erts_aint_t) NIL); dep->connection_id = 0; - dep->status = 0; + dep->state = ERTS_DE_STATE_IDLE; dep->flags = 0; dep->version = 0; @@ -202,6 +199,7 @@ dist_table_alloc(void *dep_tmpl) erts_port_task_handle_init(&dep->dist_cmd); dep->send = NULL; dep->cache = NULL; + dep->transcode_ctx = NULL; /* Link in */ @@ -224,6 +222,8 @@ dist_table_free(void *vdep) { DistEntry *dep = (DistEntry *) vdep; + ASSERT(de_refc_read(dep, -1) == -1); + ASSERT(dep->state == ERTS_DE_STATE_IDLE); ASSERT(is_nil(dep->cid)); ASSERT(dep->nlinks == NULL); ASSERT(dep->node_links == NULL); @@ -384,56 +384,92 @@ erts_dhandle_to_dist_entry(Eterm dhandle) } Eterm -erts_make_dhandle(Process *c_p, DistEntry *dep) +erts_build_dhandle(Eterm **hpp, ErlOffHeap* ohp, DistEntry *dep) { - Binary *bin; - Eterm *hp; - - bin = ErtsDistEntry2Bin(dep); + Binary *bin = ErtsDistEntry2Bin(dep); ASSERT(bin); ASSERT(ERTS_MAGIC_BIN_DESTRUCTOR(bin) == erts_dist_entry_destructor); - hp = HAlloc(c_p, ERTS_MAGIC_REF_THING_SIZE); - return erts_mk_magic_ref(&hp, &c_p->off_heap, bin); + return erts_mk_magic_ref(hpp, ohp, bin); +} + +Eterm +erts_make_dhandle(Process *c_p, DistEntry *dep) +{ + Eterm *hp = HAlloc(c_p, ERTS_MAGIC_REF_THING_SIZE); + return erts_build_dhandle(&hp, &c_p->off_heap, dep); } -static void try_delete_dist_entry(void *vbin); +static void start_timer_delete_dist_entry(void *vdep); +static void prepare_try_delete_dist_entry(void *vdep); +static void try_delete_dist_entry(DistEntry*); + +static void schedule_delete_dist_entry(DistEntry* dep) +{ + /* + * Here we need thread progress to wait for other threads, that may have + * done lookup without refc++, to do either refc++ or drop their refs. + * + * Note that timeouts do not guarantee thread progress. + */ + erts_schedule_thr_prgr_later_op(start_timer_delete_dist_entry, + dep, &dep->later_op); +} static void -prepare_try_delete_dist_entry(void *vbin) +start_timer_delete_dist_entry(void *vdep) { - Binary *bin = (Binary *) vbin; - DistEntry *dep = ErtsBin2DistEntry(bin); - Uint size; + if (node_tab_delete_delay == 0) { + prepare_try_delete_dist_entry(vdep); + } + else { + ASSERT(node_tab_delete_delay > 0); + erts_start_timer_callback(node_tab_delete_delay, + prepare_try_delete_dist_entry, + vdep); + } +} + +static void +prepare_try_delete_dist_entry(void *vdep) +{ + DistEntry *dep = vdep; erts_aint_t refc; + /* + * Time has passed since we decremented refc to zero and DistEntry may + * have been revived. Do a fast check without table lock first. + */ refc = de_refc_read(dep, 0); - if (refc > 0) - return; - - size = ERTS_MAGIC_BIN_SIZE(sizeof(DistEntry)); - erts_schedule_thr_prgr_later_cleanup_op(try_delete_dist_entry, - vbin, &dep->later_op, size); + if (refc == 0) { + try_delete_dist_entry(dep); + } + else { + /* + * Someone has done lookup and done refc++ for us. + */ + refc = de_refc_dec_read(dep, 0); + if (refc == 0) + schedule_delete_dist_entry(dep); + } } -static void try_delete_dist_entry(void *vbin) +static void try_delete_dist_entry(DistEntry* dep) { - Binary *bin = (Binary *) vbin; - DistEntry *dep = ErtsBin2DistEntry(bin); erts_aint_t refc; erts_rwmtx_rwlock(&erts_dist_table_rwmtx); /* * Another thread might have looked up this dist entry after * we decided to delete it (refc became zero). If so, the other - * thread incremented refc twice. Once for the new reference - * and once for this thread. + * thread incremented refc one extra step for this thread. * - * If refc reach -1, no one has used the entry since we - * set up the timer. Delete the entry. + * If refc reach -1, no one has done lookup and no one can do lookup + * as we have table lock. Delete the entry. * - * If refc reach 0, the entry is currently not in use - * but has been used since we set up the timer. Set up a - * new timer. + * If refc reach 0, someone raced us and either + * (1) did lookup with own refc++ and already released it again + * (2) did lookup without own refc++ + * Schedule new delete operation. * * If refc > 0, the entry is in use. Keep the entry. */ @@ -443,12 +479,7 @@ static void try_delete_dist_entry(void *vbin) erts_rwmtx_rwunlock(&erts_dist_table_rwmtx); if (refc == 0) { - if (node_tab_delete_delay == 0) - prepare_try_delete_dist_entry(vbin); - else if (node_tab_delete_delay > 0) - erts_start_timer_callback(node_tab_delete_delay, - prepare_try_delete_dist_entry, - vbin); + schedule_delete_dist_entry(dep); } } @@ -462,12 +493,7 @@ int erts_dist_entry_destructor(Binary *bin) if (refc == -1) return 1; /* Allow deallocation of structure... */ - if (node_tab_delete_delay == 0) - prepare_try_delete_dist_entry((void *) bin); - else if (node_tab_delete_delay > 0) - erts_start_timer_callback(node_tab_delete_delay, - prepare_try_delete_dist_entry, - (void *) bin); + schedule_delete_dist_entry(dep); return 0; } @@ -498,12 +524,17 @@ erts_dist_table_size(void) i++; ASSERT(i == erts_no_of_hidden_dist_entries); i = 0; + for(dep = erts_pending_dist_entries; dep; dep = dep->next) + i++; + ASSERT(i == erts_no_of_pending_dist_entries); + i = 0; for(dep = erts_not_connected_dist_entries; dep; dep = dep->next) i++; ASSERT(i == erts_no_of_not_connected_dist_entries); ASSERT(dist_entries == (erts_no_of_visible_dist_entries + erts_no_of_hidden_dist_entries + + erts_no_of_pending_dist_entries + erts_no_of_not_connected_dist_entries)); #endif @@ -518,43 +549,46 @@ erts_dist_table_size(void) void erts_set_dist_entry_not_connected(DistEntry *dep) { + DistEntry** head; + ERTS_LC_ASSERT(erts_lc_is_de_rwlocked(dep)); erts_rwmtx_rwlock(&erts_dist_table_rwmtx); ASSERT(dep != erts_this_dist_entry); - ASSERT(is_internal_port(dep->cid) || is_internal_pid(dep->cid)); - - if(dep->flags & DFLAG_PUBLISHED) { - if(dep->prev) { - ASSERT(is_in_de_list(dep, erts_visible_dist_entries)); - dep->prev->next = dep->next; - } - else { - ASSERT(erts_visible_dist_entries == dep); - erts_visible_dist_entries = dep->next; - } - ASSERT(erts_no_of_visible_dist_entries > 0); - erts_no_of_visible_dist_entries--; + if (dep->state == ERTS_DE_STATE_PENDING) { + ASSERT(is_nil(dep->cid)); + ASSERT(erts_no_of_pending_dist_entries > 0); + erts_no_of_pending_dist_entries--; + head = &erts_pending_dist_entries; } else { - if(dep->prev) { - ASSERT(is_in_de_list(dep, erts_hidden_dist_entries)); - dep->prev->next = dep->next; - } - else { - ASSERT(erts_hidden_dist_entries == dep); - erts_hidden_dist_entries = dep->next; - } - - ASSERT(erts_no_of_hidden_dist_entries > 0); - erts_no_of_hidden_dist_entries--; + ASSERT(dep->state != ERTS_DE_STATE_IDLE); + ASSERT(is_internal_port(dep->cid) || is_internal_pid(dep->cid)); + if (dep->flags & DFLAG_PUBLISHED) { + ASSERT(erts_no_of_visible_dist_entries > 0); + erts_no_of_visible_dist_entries--; + head = &erts_visible_dist_entries; + } + else { + ASSERT(erts_no_of_hidden_dist_entries > 0); + erts_no_of_hidden_dist_entries--; + head = &erts_hidden_dist_entries; + } } + if(dep->prev) { + ASSERT(is_in_de_list(dep, *head)); + dep->prev->next = dep->next; + } + else { + ASSERT(*head == dep); + *head = dep->next; + } if(dep->next) dep->next->prev = dep->prev; - dep->status &= ~ERTS_DE_SFLG_CONNECTED; + dep->state = ERTS_DE_STATE_EXITING; dep->flags = 0; dep->prev = NULL; dep->cid = NIL; @@ -570,46 +604,86 @@ erts_set_dist_entry_not_connected(DistEntry *dep) } void +erts_set_dist_entry_pending(DistEntry *dep) +{ + ERTS_LC_ASSERT(erts_lc_is_de_rwlocked(dep)); + erts_rwmtx_rwlock(&erts_dist_table_rwmtx); + + ASSERT(dep != erts_this_dist_entry); + ASSERT(dep->state == ERTS_DE_STATE_IDLE); + ASSERT(is_nil(dep->cid)); + + if(dep->prev) { + ASSERT(is_in_de_list(dep, erts_not_connected_dist_entries)); + dep->prev->next = dep->next; + } + else { + ASSERT(dep == erts_not_connected_dist_entries); + erts_not_connected_dist_entries = dep->next; + } + + if(dep->next) + dep->next->prev = dep->prev; + + erts_no_of_not_connected_dist_entries--; + + dep->state = ERTS_DE_STATE_PENDING; + dep->flags = (DFLAG_DIST_MANDATORY | DFLAG_DIST_HOPEFULLY | DFLAG_NO_MAGIC); + dep->connection_id = (dep->connection_id + 1) & ERTS_DIST_CON_ID_MASK; + + dep->prev = NULL; + dep->next = erts_pending_dist_entries; + if(erts_pending_dist_entries) { + ASSERT(erts_pending_dist_entries->prev == NULL); + erts_pending_dist_entries->prev = dep; + } + erts_pending_dist_entries = dep; + erts_no_of_pending_dist_entries++; + erts_rwmtx_rwunlock(&erts_dist_table_rwmtx); +} + +void erts_set_dist_entry_connected(DistEntry *dep, Eterm cid, Uint flags) { + erts_aint32_t set_qflgs; + ERTS_LC_ASSERT(erts_lc_is_de_rwlocked(dep)); erts_rwmtx_rwlock(&erts_dist_table_rwmtx); ASSERT(dep != erts_this_dist_entry); ASSERT(is_nil(dep->cid)); + ASSERT(dep->state == ERTS_DE_STATE_PENDING); ASSERT(is_internal_port(cid) || is_internal_pid(cid)); if(dep->prev) { - ASSERT(is_in_de_list(dep, erts_not_connected_dist_entries)); + ASSERT(is_in_de_list(dep, erts_pending_dist_entries)); dep->prev->next = dep->next; } else { - ASSERT(erts_not_connected_dist_entries == dep); - erts_not_connected_dist_entries = dep->next; + ASSERT(erts_pending_dist_entries == dep); + erts_pending_dist_entries = dep->next; } if(dep->next) dep->next->prev = dep->prev; - ASSERT(erts_no_of_not_connected_dist_entries > 0); - erts_no_of_not_connected_dist_entries--; + ASSERT(erts_no_of_pending_dist_entries > 0); + erts_no_of_pending_dist_entries--; - dep->status |= ERTS_DE_SFLG_CONNECTED; - dep->flags = flags; + dep->state = ERTS_DE_STATE_CONNECTED; + dep->flags = flags & ~DFLAG_NO_MAGIC; dep->cid = cid; erts_atomic_set_nob(&dep->input_handler, (erts_aint_t) cid); - dep->connection_id++; - dep->connection_id &= ERTS_DIST_EXT_CON_ID_MASK; dep->prev = NULL; erts_atomic64_set_nob(&dep->in, 0); erts_atomic64_set_nob(&dep->out, 0); - erts_atomic32_set_nob(&dep->qflgs, - (is_internal_port(cid) - ? ERTS_DE_QFLG_PORT_CTRL - : ERTS_DE_QFLG_PROC_CTRL)); + set_qflgs = (is_internal_port(cid) ? + ERTS_DE_QFLG_PORT_CTRL : ERTS_DE_QFLG_PROC_CTRL); + erts_atomic32_read_bor_nob(&dep->qflgs, set_qflgs); + if(flags & DFLAG_PUBLISHED) { dep->next = erts_visible_dist_entries; if(erts_visible_dist_entries) { @@ -929,9 +1003,11 @@ void erts_init_node_tables(int dd_sec) erts_hidden_dist_entries = NULL; erts_visible_dist_entries = NULL; + erts_pending_dist_entries = NULL; erts_not_connected_dist_entries = NULL; erts_no_of_hidden_dist_entries = 0; erts_no_of_visible_dist_entries = 0; + erts_no_of_pending_dist_entries = 0; erts_no_of_not_connected_dist_entries = 0; node_tmpl.sysname = am_Noname; @@ -1057,6 +1133,7 @@ typedef struct { typedef struct dist_referrer_ { struct dist_referrer_ *next; int heap_ref; + int ets_ref; int node_ref; int ctrl_ref; int system_ref; @@ -1175,10 +1252,11 @@ insert_dist_referrer(ReferredDist *referred_dist, else { Uint *hp = &drp->id_heap[0]; ASSERT(is_tuple(id)); - drp->id = copy_struct(id, size_object(id), &hp, NULL); + drp->id = copy_struct(id, size_object(id), &hp, NULL); } drp->creation = creation; drp->heap_ref = 0; + drp->ets_ref = 0; drp->node_ref = 0; drp->ctrl_ref = 0; drp->system_ref = 0; @@ -1188,6 +1266,7 @@ insert_dist_referrer(ReferredDist *referred_dist, case NODE_REF: drp->node_ref++; break; case CTRL_REF: drp->ctrl_ref++; break; case HEAP_REF: drp->heap_ref++; break; + case ETS_REF: drp->ets_ref++; break; case SYSTEM_REF: drp->system_ref++; break; default: ASSERT(0); } @@ -1300,6 +1379,11 @@ insert_offheap2(ErlOffHeap *oh, void *arg) (((Bin)->intern.flags & BIN_FLAG_MAGIC) \ && ERTS_MAGIC_BIN_DESTRUCTOR((Bin)) == erts_dist_entry_destructor) +#define IsSendCtxBinary(Bin) \ + (((Bin)->intern.flags & BIN_FLAG_MAGIC) \ + && ERTS_MAGIC_BIN_DESTRUCTOR((Bin)) == erts_dsend_context_dtor) + + static void insert_offheap(ErlOffHeap *oh, int type, Eterm id) { @@ -1336,7 +1420,12 @@ insert_offheap(ErlOffHeap *oh, int type, Eterm id) inserted_bins = nib; UnUseTmpHeapNoproc(BIG_UINT_HEAP_SIZE); } - } + } + else if (IsSendCtxBinary(u.mref->mb)) { + ErtsSendContext* ctx = ERTS_MAGIC_BIN_DATA(u.mref->mb); + if (ctx->deref_dep) + insert_dist_entry(ctx->dep, type, id, 0); + } break; case REFC_BINARY_SUBTAG: case FUN_SUBTAG: @@ -1463,9 +1552,9 @@ insert_delayed_delete_node(void *state, } static void -insert_thr_prgr_delete_dist_entry(void *arg, ErtsThrPrgrVal thr_prgr, void *vbin) +insert_thr_prgr_delete_dist_entry(void *arg, ErtsThrPrgrVal thr_prgr, void *vdep) { - DistEntry *dep = ErtsBin2DistEntry(vbin); + DistEntry *dep = vdep; Eterm heap[3]; insert_dist_entry(dep, SYSTEM_REF, @@ -1476,9 +1565,9 @@ insert_thr_prgr_delete_dist_entry(void *arg, ErtsThrPrgrVal thr_prgr, void *vbin static void insert_delayed_delete_dist_entry(void *state, ErtsMonotonicTime timeout_pos, - void *vbin) + void *vdep) { - DistEntry *dep = ErtsBin2DistEntry(vbin); + DistEntry *dep = vdep; Eterm heap[3]; insert_dist_entry(dep, SYSTEM_REF, @@ -1520,7 +1609,7 @@ setup_reference_table(void) erts_debug_callback_timer_foreach(prepare_try_delete_dist_entry, insert_delayed_delete_dist_entry, NULL); - erts_debug_later_op_foreach(try_delete_dist_entry, + erts_debug_later_op_foreach(start_timer_delete_dist_entry, insert_thr_prgr_delete_dist_entry, NULL); @@ -1686,6 +1775,15 @@ setup_reference_table(void) insert_monitors(dep->monitors, dep->sysname); } + for(dep = erts_pending_dist_entries; dep; dep = dep->next) { + if(dep->nlinks) + insert_links2(dep->nlinks, dep->sysname); + if(dep->node_links) + insert_links(dep->node_links, dep->sysname); + if(dep->monitors) + insert_monitors(dep->monitors, dep->sysname); + } + /* Not connected dist entries should not have any links, but inspect them anyway */ for(dep = erts_not_connected_dist_entries; dep; dep = dep->next) { @@ -1855,6 +1953,10 @@ reference_table_term(Uint **hpp, ErlOffHeap *ohp, Uint *szp) tup = MK_2TUP(AM_heap, MK_UINT(drp->heap_ref)); drl = MK_CONS(tup, drl); } + if(drp->ets_ref) { + tup = MK_2TUP(AM_ets, MK_UINT(drp->ets_ref)); + drl = MK_CONS(tup, drl); + } if(drp->system_ref) { tup = MK_2TUP(AM_system, MK_UINT(drp->system_ref)); drl = MK_CONS(tup, drl); @@ -1871,12 +1973,17 @@ reference_table_term(Uint **hpp, ErlOffHeap *ohp, Uint *szp) else if (is_tuple(drp->id)) { Eterm *t; ASSERT(drp->system_ref && !drp->node_ref - && !drp->ctrl_ref && !drp->heap_ref); + && !drp->ctrl_ref && !drp->heap_ref && !drp->ets_ref); t = tuple_val(drp->id); ASSERT(2 == arityval(t[0])); tup = MK_2TUP(t[1], t[2]); } - else { + else if (drp->ets_ref) { + ASSERT(!drp->heap_ref && !drp->node_ref && + !drp->ctrl_ref && !drp->system_ref); + tup = MK_2TUP(AM_ets, drp->id); + } + else { ASSERT(!drp->ctrl_ref && drp->node_ref); ASSERT(is_atom(drp->id)); tup = MK_2TUP(drp->id, MK_UINT(drp->creation)); diff --git a/erts/emulator/beam/erl_node_tables.h b/erts/emulator/beam/erl_node_tables.h index ee8277b5ea..58279017c8 100644 --- a/erts/emulator/beam/erl_node_tables.h +++ b/erts/emulator/beam/erl_node_tables.h @@ -57,11 +57,12 @@ #define ERST_INTERNAL_CHANNEL_NO 0 -#define ERTS_DE_SFLG_CONNECTED (((Uint32) 1) << 0) -#define ERTS_DE_SFLG_EXITING (((Uint32) 1) << 1) - -#define ERTS_DE_SFLGS_ALL (ERTS_DE_SFLG_CONNECTED \ - | ERTS_DE_SFLG_EXITING) +enum dist_entry_state { + ERTS_DE_STATE_IDLE, + ERTS_DE_STATE_PENDING, + ERTS_DE_STATE_CONNECTED, + ERTS_DE_STATE_EXITING +}; #define ERTS_DE_QFLG_BUSY (((erts_aint32_t) 1) << 0) #define ERTS_DE_QFLG_EXIT (((erts_aint32_t) 1) << 1) @@ -85,10 +86,13 @@ typedef struct ErtsDistOutputBuf_ ErtsDistOutputBuf; struct ErtsDistOutputBuf_ { #ifdef DEBUG Uint dbg_pattern; + byte *alloc_endp; #endif ErtsDistOutputBuf *next; + Uint hopefull_flags; byte *extp; byte *ext_endp; + byte *msg_start; byte data[1]; }; @@ -109,8 +113,6 @@ struct ErtsProcList_; * unlock mutexes with higher numbers before mutexes with higher numbers. */ -struct erl_link; - typedef struct dist_entry_ { HashBucket hash_bucket; /* Hash bucket */ struct dist_entry_ *next; /* Next entry in dist_table (not sorted) */ @@ -123,7 +125,7 @@ typedef struct dist_entry_ { Eterm cid; /* connection handler (pid or port), NIL == free */ Uint32 connection_id; /* Connection id incremented on connect */ - Uint32 status; /* Slot status, like exiting reserved etc */ + enum dist_entry_state state; Uint32 flags; /* Distribution flags, like hidden, atom cache etc. */ unsigned long version; /* Protocol version */ @@ -159,6 +161,8 @@ typedef struct dist_entry_ { struct cache* cache; /* The atom cache */ ErtsThrPrgrLaterOp later_op; + + struct transcode_context* transcode_ctx; } DistEntry; typedef struct erl_node_ { @@ -177,9 +181,11 @@ extern erts_rwmtx_t erts_node_table_rwmtx; extern DistEntry *erts_hidden_dist_entries; extern DistEntry *erts_visible_dist_entries; +extern DistEntry *erts_pending_dist_entries; extern DistEntry *erts_not_connected_dist_entries; extern Sint erts_no_of_hidden_dist_entries; extern Sint erts_no_of_visible_dist_entries; +extern Sint erts_no_of_pending_dist_entries; extern Sint erts_no_of_not_connected_dist_entries; extern DistEntry *erts_this_dist_entry; @@ -195,6 +201,7 @@ void erts_schedule_delete_dist_entry(DistEntry *); Uint erts_dist_table_size(void); void erts_dist_table_info(fmtfn_t, void *); void erts_set_dist_entry_not_connected(DistEntry *); +void erts_set_dist_entry_pending(DistEntry *); void erts_set_dist_entry_connected(DistEntry *, Eterm, Uint); ErlNode *erts_find_or_insert_node(Eterm, Uint32); void erts_schedule_delete_node(ErlNode *); @@ -210,6 +217,7 @@ int erts_lc_is_de_rlocked(DistEntry *); #endif int erts_dist_entry_destructor(Binary *bin); DistEntry *erts_dhandle_to_dist_entry(Eterm dhandle); +Eterm erts_build_dhandle(Eterm **hpp, ErlOffHeap*, DistEntry*); Eterm erts_make_dhandle(Process *c_p, DistEntry *dep); void erts_ref_dist_entry(DistEntry *dep); void erts_deref_dist_entry(DistEntry *dep); diff --git a/erts/emulator/beam/erl_port.h b/erts/emulator/beam/erl_port.h index 9117eb1f72..0d148ee048 100644 --- a/erts/emulator/beam/erl_port.h +++ b/erts/emulator/beam/erl_port.h @@ -180,6 +180,7 @@ void erts_init_port_data(Port *); void erts_cleanup_port_data(Port *); Uint erts_port_data_size(Port *); ErlOffHeap *erts_port_data_offheap(Port *); +Eterm erts_port_data_read(Port* prt); #define ERTS_PORT_GET_CONNECTED(PRT) \ ((Eterm) erts_atomic_read_nob(&(PRT)->connected)) @@ -195,26 +196,52 @@ struct erl_drv_port_data_lock { Port *prt; }; +ERTS_GLB_INLINE void erts_init_runq_port(Port *prt, ErtsRunQueue *runq); +ERTS_GLB_INLINE void erts_set_runq_port(Port *prt, ErtsRunQueue *runq); +ERTS_GLB_INLINE ErtsRunQueue *erts_get_runq_port(Port *prt); ERTS_GLB_INLINE ErtsRunQueue *erts_port_runq(Port *prt); #if ERTS_GLB_INLINE_INCL_FUNC_DEF +ERTS_GLB_INLINE void +erts_init_runq_port(Port *prt, ErtsRunQueue *runq) +{ + if (!runq) + ERTS_INTERNAL_ERROR("Missing run-queue"); + erts_atomic_init_nob(&prt->run_queue, (erts_aint_t) runq); +} + +ERTS_GLB_INLINE void +erts_set_runq_port(Port *prt, ErtsRunQueue *runq) +{ + if (!runq) + ERTS_INTERNAL_ERROR("Missing run-queue"); + erts_atomic_set_nob(&prt->run_queue, (erts_aint_t) runq); +} + +ERTS_GLB_INLINE ErtsRunQueue * +erts_get_runq_port(Port *prt) +{ + ErtsRunQueue *runq; + runq = (ErtsRunQueue *) erts_atomic_read_nob(&prt->run_queue); + if (!runq) + ERTS_INTERNAL_ERROR("Missing run-queue"); + return runq; +} + + ERTS_GLB_INLINE ErtsRunQueue * erts_port_runq(Port *prt) { ErtsRunQueue *rq1, *rq2; - rq1 = (ErtsRunQueue *) erts_atomic_read_nob(&prt->run_queue); - if (!rq1) - return NULL; + rq1 = erts_get_runq_port(prt); while (1) { erts_runq_lock(rq1); - rq2 = (ErtsRunQueue *) erts_atomic_read_nob(&prt->run_queue); + rq2 = erts_get_runq_port(prt); if (rq1 == rq2) return rq1; erts_runq_unlock(rq1); rq1 = rq2; - if (!rq1) - return NULL; } } diff --git a/erts/emulator/beam/erl_port_task.c b/erts/emulator/beam/erl_port_task.c index a588477320..4a3671df0c 100644 --- a/erts/emulator/beam/erl_port_task.c +++ b/erts/emulator/beam/erl_port_task.c @@ -84,11 +84,10 @@ static void chk_task_queues(Port *pp, ErtsPortTask *execq, int processing_busy_q #define LTTNG_DRIVER(TRACEPOINT, PP) do {} while(0) #endif -#define ERTS_LC_VERIFY_RQ(RQ, PP) \ - do { \ +#define ERTS_LC_VERIFY_RQ(RQ, PP) \ + do { \ ERTS_LC_ASSERT(erts_lc_runq_is_locked(runq)); \ - ERTS_LC_ASSERT((RQ) == ((ErtsRunQueue *) \ - erts_atomic_read_nob(&(PP)->run_queue))); \ + ERTS_LC_ASSERT((RQ) == erts_get_runq_port((PP))); \ } while (0) #define ERTS_PT_STATE_SCHEDULED 0 @@ -1520,19 +1519,15 @@ erts_port_task_schedule(Eterm id, /* Enqueue port on run-queue */ runq = erts_port_runq(pp); - if (!runq) - ERTS_INTERNAL_ERROR("Missing run-queue"); xrunq = erts_check_emigration_need(runq, ERTS_PORT_PRIO_LEVEL); ERTS_LC_ASSERT(runq != xrunq); ERTS_LC_VERIFY_RQ(runq, pp); if (xrunq) { /* Emigrate port ... */ - erts_atomic_set_nob(&pp->run_queue, (erts_aint_t) xrunq); + erts_set_runq_port(pp, xrunq); erts_runq_unlock(runq); runq = erts_port_runq(pp); - if (!runq) - ERTS_INTERNAL_ERROR("Missing run-queue"); } enqueue_port(runq, pp); @@ -1593,8 +1588,6 @@ erts_port_task_free_port(Port *pp) ASSERT(!(erts_atomic32_read_nob(&pp->state) & ERTS_PORT_SFLGS_DEAD)); runq = erts_port_runq(pp); - if (!runq) - ERTS_INTERNAL_ERROR("Missing run-queue"); erts_port_task_sched_lock(&pp->sched); flags = erts_atomic32_read_bor_relb(&pp->sched.flags, ERTS_PTS_FLG_EXIT); @@ -1805,7 +1798,7 @@ erts_port_task_execute(ErtsRunQueue *runq, Port **curr_port_pp) erts_unblock_fpe(fpe_was_unmasked); ERTS_MSACC_POP_STATE_M(); - ASSERT(runq == (ErtsRunQueue *) erts_atomic_read_nob(&pp->run_queue)); + ASSERT(runq == erts_get_runq_port(pp)); active = finalize_exec(pp, &execq, processing_busy_q); @@ -1831,11 +1824,10 @@ erts_port_task_execute(ErtsRunQueue *runq, Port **curr_port_pp) } else { /* Emigrate port... */ - erts_atomic_set_nob(&pp->run_queue, (erts_aint_t) xrunq); + erts_set_runq_port(pp, xrunq); erts_runq_unlock(runq); xrunq = erts_port_runq(pp); - ASSERT(xrunq); enqueue_port(xrunq, pp); erts_runq_unlock(xrunq); erts_notify_inc_runq(xrunq); @@ -2069,7 +2061,7 @@ void erts_enqueue_port(ErtsRunQueue *rq, Port *pp) { ERTS_LC_ASSERT(erts_lc_runq_is_locked(rq)); - ASSERT(rq == (ErtsRunQueue *) erts_atomic_read_nob(&pp->run_queue)); + ASSERT(rq == erts_get_runq_port(pp)); ASSERT(erts_atomic32_read_nob(&pp->sched.flags) & ERTS_PTS_FLG_IN_RUNQ); enqueue_port(rq, pp); } @@ -2080,8 +2072,7 @@ erts_dequeue_port(ErtsRunQueue *rq) Port *pp; ERTS_LC_ASSERT(erts_lc_runq_is_locked(rq)); pp = pop_port(rq); - ASSERT(!pp - || rq == (ErtsRunQueue *) erts_atomic_read_nob(&pp->run_queue)); + ASSERT(!pp || rq == erts_get_runq_port(pp)); ASSERT(!pp || (erts_atomic32_read_nob(&pp->sched.flags) & ERTS_PTS_FLG_IN_RUNQ)); return pp; diff --git a/erts/emulator/beam/erl_process.c b/erts/emulator/beam/erl_process.c index 0433b4ea17..d069f62b39 100644 --- a/erts/emulator/beam/erl_process.c +++ b/erts/emulator/beam/erl_process.c @@ -137,36 +137,6 @@ runq_got_work_to_execute(ErtsRunQueue *rq) return runq_got_work_to_execute_flags(ERTS_RUNQ_FLGS_GET_NOB(rq)); } -#undef RUNQ_READ_RQ -#undef RUNQ_SET_RQ -#define RUNQ_READ_RQ(X) ((ErtsRunQueue *) erts_atomic_read_nob((X))) -#define RUNQ_SET_RQ(X, RQ) erts_atomic_set_nob((X), (erts_aint_t) (RQ)) - -#ifdef DEBUG -# if defined(ARCH_64) -# define ERTS_DBG_SET_INVALID_RUNQP(RQP, N) \ - (RUNQ_SET_RQ((RQP), (0xdeadbeefdead0003LL | ((N) << 4))) -# define ERTS_DBG_VERIFY_VALID_RUNQP(RQP) \ -do { \ - ASSERT((RQP) != NULL); \ - ASSERT(((((Uint) (RQP)) & ((Uint) 0x3))) == ((Uint) 0)); \ - ASSERT((((Uint) (RQP)) & ~((Uint) 0xffff)) != ((Uint) 0xdeadbeefdead0000LL));\ -} while (0) -# else -# define ERTS_DBG_SET_INVALID_RUNQP(RQP, N) \ - (RUNQ_SET_RQ((RQP), (0xdead0003 | ((N) << 4)))) -# define ERTS_DBG_VERIFY_VALID_RUNQP(RQP) \ -do { \ - ASSERT((RQP) != NULL); \ - ASSERT(((((UWord) (RQP)) & ((UWord) 1))) == ((UWord) 0)); \ - ASSERT((((UWord) (RQP)) & ~((UWord) 0xffff)) != ((UWord) 0xdead0000)); \ -} while (0) -# endif -#else -# define ERTS_DBG_SET_INVALID_RUNQP(RQP, N) -# define ERTS_DBG_VERIFY_VALID_RUNQP(RQP) -#endif - const Process erts_invalid_process = {{ERTS_INVALID_PID}}; extern BeamInstr beam_apply[]; @@ -1538,6 +1508,20 @@ erts_proclist_destroy(ErtsProcList *plp) proclist_destroy(plp); } +void +erts_proclist_dump(fmtfn_t to, void *to_arg, ErtsProcList *plp) +{ + ErtsProcList *first = plp; + + while (plp) { + erts_print(to, to_arg, "%T", plp->pid); + plp = plp->next; + if (plp == first) + break; + } + erts_print(to, to_arg, "\n"); +} + void * erts_psd_set_init(Process *p, int ix, void *data) { @@ -3926,21 +3910,16 @@ immigrate(ErtsRunQueue *c_rq, ErtsMigrationPath *mp) Port *prt; prt = erts_dequeue_port(rq); if (prt) - RUNQ_SET_RQ(&prt->run_queue, c_rq); + erts_set_runq_port(prt, c_rq); erts_runq_unlock(rq); if (prt) { - /* port might terminate while we have no lock... */ rq = erts_port_runq(prt); - if (rq) { - if (rq != c_rq) - erts_exit(ERTS_ABORT_EXIT, - "%s:%d:%s(): Internal error", - __FILE__, __LINE__, __func__); - erts_enqueue_port(c_rq, prt); - if (!iflag) - return; /* done */ - erts_runq_unlock(c_rq); - } + if (rq != c_rq) + ERTS_INTERNAL_ERROR("Unexpected run-queue"); + erts_enqueue_port(c_rq, prt); + if (!iflag) + return; /* done */ + erts_runq_unlock(c_rq); } } else { @@ -3954,12 +3933,11 @@ immigrate(ErtsRunQueue *c_rq, ErtsMigrationPath *mp) while (proc) { erts_aint32_t state; state = erts_atomic32_read_acqb(&proc->state); - if (!(ERTS_PSFLG_BOUND & state) - && (prio == (int) ERTS_PSFLGS_GET_PRQ_PRIO(state))) { + if (prio == (int) ERTS_PSFLGS_GET_PRQ_PRIO(state) + && erts_try_change_runq_proc(proc, c_rq)) { ErtsRunQueueInfo *rqi = &rq->procs.prio_info[prio]; unqueue_process(rq, rpq, rqi, prio, prev_proc, proc); erts_runq_unlock(rq); - RUNQ_SET_RQ(&proc->run_queue, c_rq); rq_locked = 0; erts_runq_lock(c_rq); @@ -4140,21 +4118,13 @@ evacuate_run_queue(ErtsRunQueue *rq, while (prt) { ErtsRunQueue *prt_rq; prt = erts_dequeue_port(rq); - RUNQ_SET_RQ(&prt->run_queue, to_rq); + erts_set_runq_port(prt, to_rq); erts_runq_unlock(rq); - /* - * The port might terminate while - * we have no lock on it... - */ prt_rq = erts_port_runq(prt); - if (prt_rq) { - if (prt_rq != to_rq) - erts_exit(ERTS_ABORT_EXIT, - "%s:%d:%s() internal error\n", - __FILE__, __LINE__, __func__); - erts_enqueue_port(to_rq, prt); - erts_runq_unlock(to_rq); - } + if (prt_rq != to_rq) + ERTS_INTERNAL_ERROR("Unexpected run-queue"); + erts_enqueue_port(to_rq, prt); + erts_runq_unlock(to_rq); erts_runq_lock(rq); prt = rq->ports.start; } @@ -4165,8 +4135,6 @@ evacuate_run_queue(ErtsRunQueue *rq, for (prio_q = 0; prio_q < ERTS_NO_PROC_PRIO_QUEUES; prio_q++) { erts_aint32_t state; Process *proc; - int notify = 0; - to_rq = NULL; if (!mp->prio[prio_q].runq) return; @@ -4177,14 +4145,13 @@ evacuate_run_queue(ErtsRunQueue *rq, while (proc) { Process *real_proc; int prio; - erts_aint32_t max_qbit, qbit, real_state; + erts_aint32_t max_qbit, qbit; prio = ERTS_PSFLGS_GET_PRQ_PRIO(state); qbit = ((erts_aint32_t) 1) << prio; if (!(state & ERTS_PSFLG_PROXY)) { real_proc = proc; - real_state = state; } else { real_proc = erts_proc_lookup_raw(proc->common.id); @@ -4192,7 +4159,6 @@ evacuate_run_queue(ErtsRunQueue *rq, free_proxy_proc(proc); goto handle_next_proc; } - real_state = erts_atomic32_read_acqb(&real_proc->state); } max_qbit = (state >> ERTS_PSFLGS_IN_PRQ_MASK_OFFSET); @@ -4227,7 +4193,13 @@ evacuate_run_queue(ErtsRunQueue *rq, goto handle_next_proc; } - if (ERTS_PSFLG_BOUND & real_state) { + prio = (int) ERTS_PSFLGS_GET_PRQ_PRIO(state); + to_rq = mp->prio[prio].runq; + + if (!to_rq) + goto handle_next_proc; + + if (!erts_try_change_runq_proc(proc, to_rq)) { /* Bound processes get stuck here... */ proc->next = NULL; if (sbpp->last) @@ -4237,16 +4209,13 @@ evacuate_run_queue(ErtsRunQueue *rq, sbpp->last = proc; } else { - int prio = (int) ERTS_PSFLGS_GET_PRQ_PRIO(state); erts_runq_unlock(rq); - to_rq = mp->prio[prio].runq; - RUNQ_SET_RQ(&proc->run_queue, to_rq); - erts_runq_lock(to_rq); enqueue_process(to_rq, prio, proc); erts_runq_unlock(to_rq); - notify = 1; + + smp_notify_inc_runq(to_rq); erts_runq_lock(rq); } @@ -4254,8 +4223,7 @@ evacuate_run_queue(ErtsRunQueue *rq, handle_next_proc: proc = dequeue_process(rq, prio_q, &state); } - if (notify) - smp_notify_inc_runq(to_rq); + } } @@ -4309,14 +4277,13 @@ try_steal_task_from_victim(ErtsRunQueue *rq, int *rq_lockedp, ErtsRunQueue *vrq, proc = rpq->first; while (proc) { - erts_aint32_t state = erts_atomic32_read_acqb(&proc->state); - if (!(ERTS_PSFLG_BOUND & state)) { + if (erts_try_change_runq_proc(proc, rq)) { + erts_aint32_t state = erts_atomic32_read_acqb(&proc->state); /* Steal process */ int prio = (int) ERTS_PSFLGS_GET_PRQ_PRIO(state); ErtsRunQueueInfo *rqi = &vrq->procs.prio_info[prio]; unqueue_process(vrq, rpq, rqi, prio, prev_proc, proc); erts_runq_unlock(vrq); - RUNQ_SET_RQ(&proc->run_queue, rq); erts_runq_lock(rq); *rq_lockedp = 1; @@ -4341,26 +4308,14 @@ no_procs: if (vrq->ports.start) { ErtsRunQueue *prt_rq; Port *prt = erts_dequeue_port(vrq); - RUNQ_SET_RQ(&prt->run_queue, rq); + erts_set_runq_port(prt, rq); erts_runq_unlock(vrq); - - /* - * The port might terminate while - * we have no lock on it... - */ - prt_rq = erts_port_runq(prt); - if (!prt_rq) - return 0; - else { - if (prt_rq != rq) - erts_exit(ERTS_ABORT_EXIT, - "%s:%d:%s() internal error\n", - __FILE__, __LINE__, __func__); - *rq_lockedp = 1; - erts_enqueue_port(rq, prt); - return !0; - } + if (prt_rq != rq) + ERTS_INTERNAL_ERROR("Unexpected run-queue"); + *rq_lockedp = 1; + erts_enqueue_port(rq, prt); + return !0; } erts_runq_unlock(vrq); @@ -6116,7 +6071,8 @@ make_proxy_proc(Process *prev_proxy, Process *proc, erts_aint32_t prio) { erts_aint32_t state; Process *proxy; - ErtsRunQueue *rq = RUNQ_READ_RQ(&proc->run_queue); + int bound; + ErtsRunQueue *rq = erts_get_runq_proc(proc, &bound); state = (ERTS_PSFLG_PROXY | ERTS_PSFLG_IN_RUNQ @@ -6129,7 +6085,7 @@ make_proxy_proc(Process *prev_proxy, Process *proc, erts_aint32_t prio) proxy = prev_proxy; ASSERT(erts_atomic32_read_nob(&proxy->state) & ERTS_PSFLG_PROXY); erts_atomic32_set_nob(&proxy->state, state); - RUNQ_SET_RQ(&proc->run_queue, rq); + (void) erts_set_runq_proc(proc, rq, &bound); } else { proxy = erts_alloc(ERTS_ALC_T_PROC, sizeof(Process)); @@ -6142,8 +6098,7 @@ make_proxy_proc(Process *prev_proxy, Process *proc, erts_aint32_t prio) } #endif erts_atomic32_init_nob(&proxy->state, state); - erts_atomic_init_nob(&proxy->run_queue, - erts_atomic_read_nob(&proc->run_queue)); + erts_init_runq_proc(proc, rq, bound); } proxy->common.id = proc->common.id; @@ -6334,18 +6289,21 @@ select_enqueue_run_queue(int enqueue, int enq_prio, Process *p, erts_aint32_t st default: { ErtsRunQueue* runq; + int bound; ASSERT(enqueue == ERTS_ENQUEUE_NORMAL_QUEUE || enqueue == -ERTS_ENQUEUE_NORMAL_QUEUE); - runq = erts_get_runq_proc(p); + runq = erts_get_runq_proc(p, &bound); - if (!(ERTS_PSFLG_BOUND & state)) { + if (!bound) { ErtsRunQueue *new_runq = erts_check_emigration_need(runq, enq_prio); - if (new_runq) { - RUNQ_SET_RQ(&p->run_queue, new_runq); - runq = new_runq; - } + if (new_runq) { + if (erts_try_change_runq_proc(p, new_runq)) + runq = new_runq; + else + runq = erts_get_runq_proc(p, NULL); + } } ASSERT(runq); @@ -10824,7 +10782,7 @@ request_system_task(Process *c_p, Eterm requester, Eterm target, goto badarg; req_type = tp[1]; req_id = tp[2]; - req_id_sz = is_immed(req_id) ? req_id : size_object(req_id); + req_id_sz = is_immed(req_id) ? 0 : size_object(req_id); tot_sz = req_id_sz; for (i = 0; i < ERTS_MAX_PROC_SYS_TASK_ARGS; i++) { int tix = 3 + i; @@ -11462,6 +11420,7 @@ typedef struct { Process *proc; erts_aint32_t state; ErtsRunQueue *run_queue; + int bound; } ErtsEarlyProcInit; static void early_init_process_struct(void *varg, Eterm data) @@ -11472,10 +11431,9 @@ static void early_init_process_struct(void *varg, Eterm data) proc->common.id = make_internal_pid(data); erts_atomic32_init_nob(&proc->dirty_state, 0); proc->dirty_sys_tasks = NULL; + erts_init_runq_proc(proc, arg->run_queue, arg->bound); erts_atomic32_init_relb(&proc->state, arg->state); - RUNQ_SET_RQ(&proc->run_queue, arg->run_queue); - erts_proc_lock_init(proc); /* All locks locked */ } @@ -11484,7 +11442,7 @@ static void early_init_process_struct(void *varg, Eterm data) ** Allocate process and find out where to place next process. */ static Process* -alloc_process(ErtsRunQueue *rq, erts_aint32_t state) +alloc_process(ErtsRunQueue *rq, int bound, erts_aint32_t state) { ErtsEarlyProcInit init_arg; Process *p; @@ -11493,9 +11451,12 @@ alloc_process(ErtsRunQueue *rq, erts_aint32_t state) if (!p) return NULL; + ASSERT(rq); + init_arg.proc = (Process *) p; - init_arg.run_queue = rq; init_arg.state = state; + init_arg.run_queue = rq; + init_arg.bound = bound; ERTS_CT_ASSERT(offsetof(Process,common) == 0); @@ -11529,6 +11490,7 @@ erl_create_process(Process* parent, /* Parent of process (default group leader). Eterm args, /* Arguments for function (must be well-formed list). */ ErlSpawnOpts* so) /* Options for spawn. */ { + int bound = 0; Uint flags = 0; ErtsRunQueue *rq = NULL; Process *p; @@ -11565,7 +11527,7 @@ erl_create_process(Process* parent, /* Parent of process (default group leader). ASSERT(0 <= ix && ix < erts_no_run_queues); rq = ERTS_RUNQ_IX(ix); /* Unsupported feature... */ - state |= ERTS_PSFLG_BOUND; + bound = !0; } prio = (erts_aint32_t) so->priority; } @@ -11578,17 +11540,16 @@ erl_create_process(Process* parent, /* Parent of process (default group leader). flags |= F_OFF_HEAP_MSGQ; } else if (so->flags & SPO_ON_HEAP_MSGQ) { - state |= ERTS_PSFLG_ON_HEAP_MSGQ; flags |= F_ON_HEAP_MSGQ; } ASSERT((flags & F_ON_HEAP_MSGQ) || (flags & F_OFF_HEAP_MSGQ)); if (!rq) - rq = erts_get_runq_proc(parent); + rq = erts_get_runq_proc(parent, NULL); - p = alloc_process(rq, state); /* All proc locks are locked by this thread - on success */ + p = alloc_process(rq, bound, state); /* All proc locks are locked by this thread + on success */ if (!p) { erts_send_error_to_logger_str(parent->group_leader, "Too many processes\n"); @@ -11596,11 +11557,6 @@ erl_create_process(Process* parent, /* Parent of process (default group leader). goto error; } - ASSERT((erts_atomic32_read_nob(&p->state) - & ERTS_PSFLG_ON_HEAP_MSGQ) - || (erts_atomic32_read_nob(&p->state) - & ERTS_PSFLG_OFF_HEAP_MSGQ)); - #ifdef SHCOPY_SPAWN arg_size = copy_shared_calculate(args, &info); #else @@ -11974,7 +11930,7 @@ void erts_init_empty_process(Process *p) p->pending_exit.bp = NULL; erts_proc_lock_init(p); erts_proc_unlock(p, ERTS_PROC_LOCKS_ALL); - RUNQ_SET_RQ(&p->run_queue, ERTS_RUNQ_IX(0)); + erts_init_runq_proc(p, ERTS_RUNQ_IX(0), 0); #if !defined(NO_FPE_SIGNALS) || defined(HIPE) p->fp_exception = 0; @@ -12296,7 +12252,7 @@ save_pending_exiter(Process *p, ErtsProcList *plp) ERTS_LC_ASSERT(ERTS_PROC_LOCK_STATUS & erts_proc_lc_my_proc_locks(p)); - rq = RUNQ_READ_RQ(&p->run_queue); + rq = erts_get_runq_proc(p, NULL); ASSERT(rq && !ERTS_RUNQ_IX_IS_DIRTY(rq->ix)); if (!plp) @@ -12657,9 +12613,11 @@ static void doit_exit_monitor(ErtsMonitor *mon, void *vpcontext) erts_de_links_unlock(dep); if (rmon) { ErtsDSigData dsd; - int code = erts_dsig_prepare(&dsd, dep, NULL, - ERTS_DSP_NO_LOCK, 0); - if (code == ERTS_DSIG_PREP_CONNECTED) { + int code = erts_dsig_prepare(&dsd, dep, NULL, 0, + ERTS_DSP_NO_LOCK, 0, 0); + if (code == ERTS_DSIG_PREP_CONNECTED || + code == ERTS_DSIG_PREP_PENDING) { + code = erts_dsig_send_demonitor(&dsd, rmon->u.pid, mon->name, @@ -12703,9 +12661,11 @@ static void doit_exit_monitor(ErtsMonitor *mon, void *vpcontext) erts_de_links_unlock(dep); if (rmon) { ErtsDSigData dsd; - int code = erts_dsig_prepare(&dsd, dep, NULL, - ERTS_DSP_NO_LOCK, 0); - if (code == ERTS_DSIG_PREP_CONNECTED) { + int code = erts_dsig_prepare(&dsd, dep, NULL, 0, + ERTS_DSP_NO_LOCK, 0, 0); + if (code == ERTS_DSIG_PREP_CONNECTED || + code == ERTS_DSIG_PREP_PENDING) { + code = erts_dsig_send_demonitor(&dsd, rmon->u.pid, mon->u.pid, @@ -12762,8 +12722,8 @@ static void doit_exit_monitor(ErtsMonitor *mon, void *vpcontext) erts_de_links_unlock(dep); if (rmon) { ErtsDSigData dsd; - int code = erts_dsig_prepare(&dsd, dep, NULL, - ERTS_DSP_NO_LOCK, 0); + int code = erts_dsig_prepare(&dsd, dep, NULL, 0, + ERTS_DSP_NO_LOCK, 0, 0); if (code == ERTS_DSIG_PREP_CONNECTED) { code = erts_dsig_send_m_exit(&dsd, mon->u.pid, @@ -12885,14 +12845,18 @@ static void doit_exit_link(ErtsLink *lnk, void *vpcontext) int code; ErtsDistLinkData dld; erts_remove_dist_link(&dld, p->common.id, item, dep); - erts_proc_lock(p, ERTS_PROC_LOCK_MAIN); - code = erts_dsig_prepare(&dsd, dep, p, ERTS_DSP_NO_LOCK, 0); - if (code == ERTS_DSIG_PREP_CONNECTED) { - code = erts_dsig_send_exit_tt(&dsd, p->common.id, item, - reason, SEQ_TRACE_TOKEN(p)); - ASSERT(code == ERTS_DSIG_SEND_OK); - } - erts_proc_unlock(p, ERTS_PROC_LOCK_MAIN); + if (dld.d_lnk) { + erts_proc_lock(p, ERTS_PROC_LOCK_MAIN); + code = erts_dsig_prepare(&dsd, dep, p, 0, ERTS_DSP_NO_LOCK, 0, 0); + if (code == ERTS_DSIG_PREP_CONNECTED || + code == ERTS_DSIG_PREP_PENDING) { + + code = erts_dsig_send_exit_tt(&dsd, p->common.id, item, + reason, SEQ_TRACE_TOKEN(p)); + ASSERT(code == ERTS_DSIG_SEND_OK); + } + erts_proc_unlock(p, ERTS_PROC_LOCK_MAIN); + } erts_destroy_dist_link(&dld); } } @@ -13388,16 +13352,33 @@ stack_element_dump(fmtfn_t to, void *to_arg, Eterm* sp, int yreg) return yreg; } +static void print_current_process_info(fmtfn_t, void *to_arg, ErtsSchedulerData*); + /* * Print scheduler information */ void -erts_print_scheduler_info(fmtfn_t to, void *to_arg, ErtsSchedulerData *esdp) { +erts_print_scheduler_info(fmtfn_t to, void *to_arg, ErtsSchedulerData *esdp) +{ int i; erts_aint32_t flg; - Process *p; - erts_print(to, to_arg, "=scheduler:%u\n", esdp->no); + switch (esdp->type) { + case ERTS_SCHED_NORMAL: + erts_print(to, to_arg, "=scheduler:%u\n", esdp->no); + break; + case ERTS_SCHED_DIRTY_CPU: + erts_print(to, to_arg, "=dirty_cpu_scheduler:%u\n", + (esdp->dirty_no + erts_no_schedulers)); + break; + case ERTS_SCHED_DIRTY_IO: + erts_print(to, to_arg, "=dirty_io_scheduler:%u\n", + (esdp->dirty_no + erts_no_schedulers + erts_no_dirty_cpu_schedulers)); + break; + default: + erts_print(to, to_arg, "=unknown_scheduler_type:%u\n", esdp->type); + break; + } flg = erts_atomic32_read_dirty(&esdp->ssi->flags); erts_print(to, to_arg, "Scheduler Sleep Info Flags: "); @@ -13443,10 +13424,24 @@ erts_print_scheduler_info(fmtfn_t to, void *to_arg, ErtsSchedulerData *esdp) { } erts_print(to, to_arg, "\n"); - erts_print(to, to_arg, "Current Port: "); - if (esdp->current_port) - erts_print(to, to_arg, "%T", esdp->current_port->common.id); - erts_print(to, to_arg, "\n"); + if (esdp->type == ERTS_SCHED_NORMAL) { + erts_print(to, to_arg, "Current Port: "); + if (esdp->current_port) + erts_print(to, to_arg, "%T", esdp->current_port->common.id); + erts_print(to, to_arg, "\n"); + + erts_print_run_queue_info(to, to_arg, esdp->run_queue); + } + + /* This *MUST* to be the last information in scheduler block */ + print_current_process_info(to, to_arg, esdp); +} + +void erts_print_run_queue_info(fmtfn_t to, void *to_arg, + ErtsRunQueue *run_queue) +{ + erts_aint32_t flg; + int i; for (i = 0; i < ERTS_NO_PROC_PRIO_LEVELS; i++) { erts_print(to, to_arg, "Run Queue "); @@ -13468,12 +13463,12 @@ erts_print_scheduler_info(fmtfn_t to, void *to_arg, ErtsSchedulerData *esdp) { break; } erts_print(to, to_arg, "Length: %d\n", - erts_atomic32_read_dirty(&esdp->run_queue->procs.prio_info[i].len)); + erts_atomic32_read_dirty(&run_queue->procs.prio_info[i].len)); } erts_print(to, to_arg, "Run Queue Port Length: %d\n", - erts_atomic32_read_dirty(&esdp->run_queue->ports.info.len)); + erts_atomic32_read_dirty(&run_queue->ports.info.len)); - flg = erts_atomic32_read_dirty(&esdp->run_queue->flags); + flg = erts_atomic32_read_dirty(&run_queue->flags); erts_print(to, to_arg, "Run Queue Flags: "); for (i = 0; i < ERTS_RUNQ_FLG_MAX && flg; i++) { erts_aint32_t chk = (1 << i); @@ -13540,9 +13535,15 @@ erts_print_scheduler_info(fmtfn_t to, void *to_arg, ErtsSchedulerData *esdp) { } } erts_print(to, to_arg, "\n"); +} + + +static void print_current_process_info(fmtfn_t to, void *to_arg, + ErtsSchedulerData* esdp) +{ + Process *p = esdp->current_process; + erts_aint32_t flg; - /* This *MUST* to be the last information in scheduler block */ - p = esdp->current_process; erts_print(to, to_arg, "Current Process: "); if (esdp->current_process && !(ERTS_TRACE_FLAGS(p) & F_SENSITIVE)) { flg = erts_atomic32_read_dirty(&p->state); diff --git a/erts/emulator/beam/erl_process.h b/erts/emulator/beam/erl_process.h index 16b3526208..ebf990c837 100644 --- a/erts/emulator/beam/erl_process.h +++ b/erts/emulator/beam/erl_process.h @@ -105,13 +105,13 @@ struct saved_calls { }; extern Export exp_send, exp_receive, exp_timeout; -extern int erts_sched_compact_load; -extern int erts_sched_balance_util; -extern Uint erts_no_schedulers; -extern Uint erts_no_total_schedulers; -extern Uint erts_no_dirty_cpu_schedulers; -extern Uint erts_no_dirty_io_schedulers; -extern Uint erts_no_run_queues; +extern int ERTS_WRITE_UNLIKELY(erts_sched_compact_load); +extern int ERTS_WRITE_UNLIKELY(erts_sched_balance_util); +extern Uint ERTS_WRITE_UNLIKELY(erts_no_schedulers); +extern Uint ERTS_WRITE_UNLIKELY(erts_no_total_schedulers); +extern Uint ERTS_WRITE_UNLIKELY(erts_no_dirty_cpu_schedulers); +extern Uint ERTS_WRITE_UNLIKELY(erts_no_dirty_io_schedulers); +extern Uint ERTS_WRITE_UNLIKELY(erts_no_run_queues); extern int erts_sched_thread_suggested_stack_size; extern int erts_dcpu_sched_thread_suggested_stack_size; extern int erts_dio_sched_thread_suggested_stack_size; @@ -244,6 +244,9 @@ extern int erts_dio_sched_thread_suggested_stack_size; (erts_aint32_t) (MSK), \ (erts_aint32_t) (FLGS))) +#define ERTS_RUNQ_POINTER_MASK (~((erts_aint_t) 3)) +#define ERTS_RUNQ_BOUND_FLAG ((erts_aint_t) 1) + typedef enum { ERTS_SCHDLR_SSPND_DONE_MSCHED_BLOCKED, ERTS_SCHDLR_SSPND_DONE_NMSCHED_BLOCKED, @@ -522,7 +525,7 @@ typedef union { char align[ERTS_ALC_CACHE_LINE_ALIGN_SIZE(sizeof(ErtsRunQueue))]; } ErtsAlignedRunQueue; -extern ErtsAlignedRunQueue *erts_aligned_run_queues; +extern ErtsAlignedRunQueue * ERTS_WRITE_UNLIKELY(erts_aligned_run_queues); #define ERTS_PROC_REDUCTIONS_EXECUTED(SD, RQ, PRIO, REDS, AREDS)\ do { \ @@ -675,9 +678,9 @@ typedef union { char align[ERTS_ALC_CACHE_LINE_ALIGN_SIZE(sizeof(ErtsSchedulerData))]; } ErtsAlignedSchedulerData; -extern ErtsAlignedSchedulerData *erts_aligned_scheduler_data; -extern ErtsAlignedSchedulerData *erts_aligned_dirty_cpu_scheduler_data; -extern ErtsAlignedSchedulerData *erts_aligned_dirty_io_scheduler_data; +extern ErtsAlignedSchedulerData * ERTS_WRITE_UNLIKELY(erts_aligned_scheduler_data); +extern ErtsAlignedSchedulerData * ERTS_WRITE_UNLIKELY(erts_aligned_dirty_cpu_scheduler_data); +extern ErtsAlignedSchedulerData * ERTS_WRITE_UNLIKELY(erts_aligned_dirty_io_scheduler_data); #if defined(ERTS_ENABLE_LOCK_CHECK) @@ -1167,14 +1170,14 @@ void erts_check_for_holes(Process* p); #define ERTS_PSFLG_RUNNING ERTS_PSFLG_BIT(9) #define ERTS_PSFLG_SUSPENDED ERTS_PSFLG_BIT(10) #define ERTS_PSFLG_GC ERTS_PSFLG_BIT(11) -#define ERTS_PSFLG_BOUND ERTS_PSFLG_BIT(12) +/* #define ERTS_PSFLG_ ERTS_PSFLG_BIT(12) */ #define ERTS_PSFLG_TRAP_EXIT ERTS_PSFLG_BIT(13) #define ERTS_PSFLG_ACTIVE_SYS ERTS_PSFLG_BIT(14) #define ERTS_PSFLG_RUNNING_SYS ERTS_PSFLG_BIT(15) #define ERTS_PSFLG_PROXY ERTS_PSFLG_BIT(16) #define ERTS_PSFLG_DELAYED_SYS ERTS_PSFLG_BIT(17) #define ERTS_PSFLG_OFF_HEAP_MSGQ ERTS_PSFLG_BIT(18) -#define ERTS_PSFLG_ON_HEAP_MSGQ ERTS_PSFLG_BIT(19) +/* #define ERTS_PSFLG_ ERTS_PSFLG_BIT(19) */ #define ERTS_PSFLG_DIRTY_CPU_PROC ERTS_PSFLG_BIT(20) #define ERTS_PSFLG_DIRTY_IO_PROC ERTS_PSFLG_BIT(21) #define ERTS_PSFLG_DIRTY_ACTIVE_SYS ERTS_PSFLG_BIT(22) @@ -1269,7 +1272,7 @@ void erts_check_for_holes(Process* p); #define SPO_OFF_HEAP_MSGQ 16 #define SPO_ON_HEAP_MSGQ 32 -extern int erts_default_spo_flags; +extern int ERTS_WRITE_UNLIKELY(erts_default_spo_flags); /* * The following struct contains options for a process to be spawned. @@ -1325,10 +1328,10 @@ extern erts_rwmtx_t erts_cpu_bind_rwmtx; ** erts_system_monitor must be != NIL, to allow testing on just ** the erts_system_monitor_* variables. */ -extern Eterm erts_system_monitor; -extern Uint erts_system_monitor_long_gc; -extern Uint erts_system_monitor_long_schedule; -extern Uint erts_system_monitor_large_heap; +extern Eterm ERTS_WRITE_UNLIKELY(erts_system_monitor); +extern Uint ERTS_WRITE_UNLIKELY(erts_system_monitor_long_gc); +extern Uint ERTS_WRITE_UNLIKELY(erts_system_monitor_long_schedule); +extern Uint ERTS_WRITE_UNLIKELY(erts_system_monitor_large_heap); struct erts_system_monitor_flags_t { unsigned int busy_port : 1; unsigned int busy_dist_port : 1; @@ -1545,6 +1548,7 @@ Uint64 erts_step_proc_interval(void); ErtsProcList *erts_proclist_create(Process *); ErtsProcList *erts_proclist_copy(ErtsProcList *); void erts_proclist_destroy(ErtsProcList *); +void erts_proclist_dump(fmtfn_t to, void *to_arg, ErtsProcList*); ERTS_GLB_INLINE int erts_proclist_same(ErtsProcList *, Process *); ERTS_GLB_INLINE void erts_proclist_store_first(ErtsProcList **, ErtsProcList *); @@ -1791,6 +1795,7 @@ void erts_stack_dump(fmtfn_t to, void *to_arg, Process *); void erts_limited_stack_trace(fmtfn_t to, void *to_arg, Process *); void erts_program_counter_info(fmtfn_t to, void *to_arg, Process *); void erts_print_scheduler_info(fmtfn_t to, void *to_arg, ErtsSchedulerData *esdp); +void erts_print_run_queue_info(fmtfn_t, void *to_arg, ErtsRunQueue*); void erts_dump_extended_process_state(fmtfn_t to, void *to_arg, erts_aint32_t psflg); void erts_dump_process_state(fmtfn_t to, void *to_arg, erts_aint32_t psflg); @@ -2166,7 +2171,12 @@ ERTS_GLB_INLINE int erts_is_scheduler_bound(ErtsSchedulerData *esdp); ERTS_GLB_INLINE Process *erts_get_current_process(void); ERTS_GLB_INLINE Eterm erts_get_current_pid(void); ERTS_GLB_INLINE Uint erts_get_scheduler_id(void); -ERTS_GLB_INLINE ErtsRunQueue *erts_get_runq_proc(Process *p); +ERTS_GLB_INLINE void erts_init_runq_proc(Process *p, ErtsRunQueue *rq, int bnd); +ERTS_GLB_INLINE ErtsRunQueue *erts_set_runq_proc(Process *p, ErtsRunQueue *rq, int *boundp); +ERTS_GLB_INLINE int erts_try_change_runq_proc(Process *p, ErtsRunQueue *rq); +ERTS_GLB_INLINE ErtsRunQueue *erts_bind_runq_proc(Process *p, int bind); +ERTS_GLB_INLINE int erts_proc_runq_is_bound(Process *p); +ERTS_GLB_INLINE ErtsRunQueue *erts_get_runq_proc(Process *p, int *boundp); ERTS_GLB_INLINE ErtsRunQueue *erts_get_runq_current(ErtsSchedulerData *esdp); ERTS_GLB_INLINE void erts_runq_lock(ErtsRunQueue *rq); ERTS_GLB_INLINE int erts_runq_trylock(ErtsRunQueue *rq); @@ -2246,11 +2256,144 @@ Uint erts_get_scheduler_id(void) return esdp ? esdp->no : (Uint) 0; } +/** + * Init run-queue of process. + * + * @param p[in,out] Process + * @param rq[in] Run-queue that process will be assigned to + * @param bnd[in,out] If non-zero binds process to run-queue. + */ + +ERTS_GLB_INLINE void +erts_init_runq_proc(Process *p, ErtsRunQueue *rq, int bnd) +{ + erts_aint_t rqint = (erts_aint_t) rq; + if (bnd) + rqint |= ERTS_RUNQ_BOUND_FLAG; + erts_atomic_init_nob(&p->run_queue, rqint); +} + +/** + * Forcibly set run-queue of process. + * + * @param p[in,out] Process + * @param rq[in] Run-queue that process will be assigned to + * @param bndp[in,out] Pointer to integer. On input non-zero + * value causes the process to be bound to + * the run-queue. On output, indicating + * wether process previously was bound or + * not. + * @return Previous run-queue. + */ + +ERTS_GLB_INLINE ErtsRunQueue * +erts_set_runq_proc(Process *p, ErtsRunQueue *rq, int *bndp) +{ + erts_aint_t rqint = (erts_aint_t) rq; + ASSERT(bndp); + ASSERT(rq); + if (*bndp) + rqint |= ERTS_RUNQ_BOUND_FLAG; + rqint = erts_atomic_xchg_nob(&p->run_queue, rqint); + *bndp = (int) (rqint & ERTS_RUNQ_BOUND_FLAG); + return (ErtsRunQueue *) (rqint & ERTS_RUNQ_POINTER_MASK); +} + +/** + * Try to change run-queue assignment of a process. + * + * @param p[in,out] Process + * @param rq[int] Run-queue that process will be assigned to + * @return Non-zero if the run-queue assignment was + * successfully changed. + */ + +ERTS_GLB_INLINE int +erts_try_change_runq_proc(Process *p, ErtsRunQueue *rq) +{ + erts_aint_t old_rqint, new_rqint; + + new_rqint = (erts_aint_t) rq; + old_rqint = (erts_aint_t) erts_atomic_read_nob(&p->run_queue); + while (1) { + erts_aint_t act_rqint; + + if (old_rqint & ERTS_RUNQ_BOUND_FLAG) + return 0; + + act_rqint = erts_atomic_cmpxchg_nob(&p->run_queue, + new_rqint, + old_rqint); + if (act_rqint == old_rqint) + return !0; + } +} + +/** + * + * Bind or unbind process to/from currently used run-queue. + * + * @param p Process + * @param bind Bind if non-zero; otherwise unbind + * @return Pointer to previously bound run-queue, + * or NULL if previously unbound + */ + +ERTS_GLB_INLINE ErtsRunQueue * +erts_bind_runq_proc(Process *p, int bind) +{ + erts_aint_t rqint; + if (bind) + rqint = erts_atomic_read_bor_nob(&p->run_queue, + ERTS_RUNQ_BOUND_FLAG); + else + rqint = erts_atomic_read_band_nob(&p->run_queue, + ~ERTS_RUNQ_BOUND_FLAG); + if (rqint & ERTS_RUNQ_BOUND_FLAG) + return (ErtsRunQueue *) (rqint & ERTS_RUNQ_POINTER_MASK); + else + return NULL; +} + +/** + * Determine wether a process is bound to a run-queue or not. + * + * @return Returns a non-zero value if bound, + * and zero of not bound. + */ + +ERTS_GLB_INLINE int +erts_proc_runq_is_bound(Process *p) +{ + erts_aint_t rqint = erts_atomic_read_nob(&p->run_queue); + return (int) (rqint & ERTS_RUNQ_BOUND_FLAG); +} + +/** + * Set run-queue of process. + * + * @param p[in,out] Process + * @param bndp[out] Pointer to integer. If non-NULL pointer, + * the integer will be set to a non-zero + * value if the process is bound to the + * run-queue. + * @return Pointer to the normal run-queue that + * the process currently is assigend to. + * A process is always assigned to a + * normal run-queue. + */ + ERTS_GLB_INLINE ErtsRunQueue * -erts_get_runq_proc(Process *p) +erts_get_runq_proc(Process *p, int *bndp) { - ASSERT(ERTS_AINT_NULL != erts_atomic_read_nob(&p->run_queue)); - return (ErtsRunQueue *) erts_atomic_read_nob(&p->run_queue); + erts_aint_t rqint = erts_atomic_read_nob(&p->run_queue); + ErtsRunQueue *rq; + if (bndp) + *bndp = (int) (rqint & ERTS_RUNQ_BOUND_FLAG); + rqint &= ERTS_RUNQ_POINTER_MASK; + rq = (ErtsRunQueue *) rqint; + ASSERT(rq); + return rq; } ERTS_GLB_INLINE ErtsRunQueue * diff --git a/erts/emulator/beam/erl_process_dict.c b/erts/emulator/beam/erl_process_dict.c index 3c80f0e0f6..aee88841ae 100644 --- a/erts/emulator/beam/erl_process_dict.c +++ b/erts/emulator/beam/erl_process_dict.c @@ -79,6 +79,8 @@ /* Array access macro */ #define ARRAY_GET(PDict, Index) (ASSERT((Index) < (PDict)->arraySize), \ (PDict)->data[Index]) +#define ARRAY_GET_PTR(PDict, Index) (ASSERT((Index) < (PDict)->arraySize), \ + &(PDict)->data[Index]) #define ARRAY_PUT(PDict, Index, Val) (ASSERT((Index) < (PDict)->arraySize), \ (PDict)->data[Index] = (Val)) @@ -92,7 +94,7 @@ static void pd_hash_erase_all(Process *p); static Eterm pd_hash_get_with_hval(Process *p, Eterm bucket, Eterm id); static Eterm pd_hash_get_keys(Process *p, Eterm value); static Eterm pd_hash_get_all_keys(Process *p, ProcDict *pd); -static Eterm pd_hash_get_all(Process *p, ProcDict *pd); +static Eterm pd_hash_get_all(Process *p, ProcDict *pd, int keep_dict); static Eterm pd_hash_put(Process *p, Eterm id, Eterm value); static void shrink(Process *p, Eterm* ret); @@ -281,7 +283,7 @@ BIF_RETTYPE get_0(BIF_ALIST_0) { Eterm ret; PD_CHECK(BIF_P->dictionary); - ret = pd_hash_get_all(BIF_P, BIF_P->dictionary); + ret = pd_hash_get_all(BIF_P, BIF_P->dictionary, 1); PD_CHECK(BIF_P->dictionary); BIF_RET(ret); } @@ -329,7 +331,7 @@ BIF_RETTYPE erase_0(BIF_ALIST_0) { Eterm ret; PD_CHECK(BIF_P->dictionary); - ret = pd_hash_get_all(BIF_P, BIF_P->dictionary); + ret = pd_hash_get_all(BIF_P, BIF_P->dictionary, 0); pd_hash_erase_all(BIF_P); PD_CHECK(BIF_P->dictionary); BIF_RET(ret); @@ -541,29 +543,46 @@ static Eterm pd_hash_get_keys(Process *p, Eterm value) static Eterm -pd_hash_get_all(Process *p, ProcDict *pd) +pd_hash_get_all(Process *p, ProcDict *pd, int keep_dict) { Eterm* hp; + Eterm* tp; Eterm res = NIL; Eterm tmp, tmp2; unsigned int i; unsigned int num; + Uint need; if (pd == NULL) { return res; } num = HASH_RANGE(pd); - hp = HAlloc(p, pd->numElements * 2); - + + /* + * If this is not erase/0, then must copy all key-value tuples + * as they may be mutated by put/2. + */ + need = pd->numElements * (keep_dict ? 2+3 : 2); + hp = HAlloc(p, need); + for (i = 0; i < num; ++i) { tmp = ARRAY_GET(pd, i); if (is_boxed(tmp)) { - ASSERT(is_tuple(tmp)); + if (keep_dict) { + tp = tuple_val(tmp); + tmp = TUPLE2(hp, tp[1], tp[2]); + hp += 3; + } res = CONS(hp, tmp, res); hp += 2; } else if (is_list(tmp)) { while (tmp != NIL) { tmp2 = TCAR(tmp); + if (keep_dict) { + tp = tuple_val(tmp2); + tmp2 = TUPLE2(hp, tp[1], tp[2]); + hp += 3; + } res = CONS(hp, tmp2, res); hp += 2; tmp = TCDR(tmp); @@ -577,11 +596,14 @@ static Eterm pd_hash_put(Process *p, Eterm id, Eterm value) { unsigned int hval; Eterm *hp; + Eterm *tp; + Eterm *bucket; Eterm tpl; Eterm old; + Eterm old_val = am_undefined; Eterm tmp; int needed; - int i = 0; + int new_key = 1; #ifdef DEBUG Eterm *hp_limit; #endif @@ -595,7 +617,8 @@ static Eterm pd_hash_put(Process *p, Eterm id, Eterm value) p->dictionary->numElements = 0; } hval = pd_hash_value(p->dictionary, id); - old = ARRAY_GET(p->dictionary, hval); + bucket = ARRAY_GET_PTR(p->dictionary, hval); + old = *bucket; /* * Calculate the number of heap words needed and garbage @@ -603,32 +626,49 @@ static Eterm pd_hash_put(Process *p, Eterm id, Eterm value) */ needed = 3; /* {Key,Value} tuple */ if (is_boxed(old)) { - /* - * We don't want to compare keys twice, so we'll always - * reserve the space for two CONS cells. - */ - needed += 2+2; + ASSERT(is_tuple(old)); + tp = tuple_val(old); + if (EQ(tp[1], id)) { + old_val = tp[2]; + if (is_immed(value)) { + tp[2] = value; /* DESTRUCTIVE HEAP ASSIGNMENT */ + return old_val; + } + new_key = 0; + } + else { + needed += 2+2; + } } else if (is_list(old)) { - i = 0; - for (tmp = old; tmp != NIL && !EQ(tuple_val(TCAR(tmp))[1], id); tmp = TCDR(tmp)) { - ++i; - } - if (is_nil(tmp)) { - i = -1; - needed += 2; - } else { - needed += 2*(i+1); + Eterm* prev_cdr = bucket; + + needed += 2; + for (tmp = old; tmp != NIL; prev_cdr = &TCDR(tmp), tmp = *prev_cdr) { + tp = tuple_val(TCAR(tmp)); + if (EQ(tp[1], id)) { + old_val = tp[2]; + if (is_immed(value)) { + tp[2] = value; /* DESTRUCTIVE HEAP ASSIGNMENT */ + return old_val; + } + new_key = 0; + /* Unlink old {Key,Value} from list */ + *prev_cdr = TCDR(tmp); /* maybe DESTRUCTIVE HEAP ASSIGNMENT */ + break; + } } } if (HeapWordsLeft(p) < needed) { Eterm root[3]; root[0] = id; root[1] = value; - root[2] = old; + root[2] = old_val; erts_garbage_collect(p, needed, root, 3); id = root[0]; value = root[1]; - old = root[2]; + old_val = root[2]; + ASSERT(bucket == ARRAY_GET_PTR(p->dictionary, hval)); + old = *bucket; } #ifdef DEBUG hp_limit = p->htop + needed; @@ -644,66 +684,29 @@ static Eterm pd_hash_put(Process *p, Eterm id, Eterm value) * Update the dictionary. */ if (is_nil(old)) { - ARRAY_PUT(p->dictionary, hval, tpl); - ++(p->dictionary->numElements); + *bucket = tpl; } else if (is_boxed(old)) { ASSERT(is_tuple(old)); - if (EQ(tuple_val(old)[1],id)) { - ARRAY_PUT(p->dictionary, hval, tpl); - return tuple_val(old)[2]; + if (!new_key) { + ASSERT(EQ(tuple_val(old)[1],id)); + *bucket = tpl; + return old_val; } else { hp = HeapOnlyAlloc(p, 4); tmp = CONS(hp, old, NIL); hp += 2; - ++(p->dictionary->numElements); - ARRAY_PUT(p->dictionary, hval, CONS(hp, tpl, tmp)); + *bucket = CONS(hp, tpl, tmp); hp += 2; ASSERT(hp <= hp_limit); } } else if (is_list(old)) { - if (i == -1) { - /* - * New key. Simply prepend the tuple to the beginning of the list. - */ - hp = HeapOnlyAlloc(p, 2); - ARRAY_PUT(p->dictionary, hval, CONS(hp, tpl, old)); - hp += 2; - ASSERT(hp <= hp_limit); - ++(p->dictionary->numElements); - } else { - /* - * i = Number of CDRs to skip to reach the changed element in the list. - * - * Replace old value in list. To avoid pointers from the old generation - * to the new, we must rebuild the list from the beginning up to and - * including the changed element. - */ - Eterm nlist; - int j; - - hp = HeapOnlyAlloc(p, (i+1)*2); - - /* Find the list element to change. */ - for (j = 0, nlist = old; j < i; j++, nlist = TCDR(nlist)) { - ; - } - ASSERT(EQ(tuple_val(TCAR(nlist))[1], id)); - nlist = TCDR(nlist); /* Unchanged part of list. */ - - /* Rebuild list before the updated element. */ - for (tmp = old; i-- > 0; tmp = TCDR(tmp)) { - nlist = CONS(hp, TCAR(tmp), nlist); - hp += 2; - } - ASSERT(EQ(tuple_val(TCAR(tmp))[1], id)); - - /* Put the updated element first in the new list. */ - nlist = CONS(hp, tpl, nlist); - hp += 2; - ASSERT(hp <= hp_limit); - ARRAY_PUT(p->dictionary, hval, nlist); - return tuple_val(TCAR(tmp))[2]; - } + /* + * Simply prepend the tuple to the beginning of the list. + */ + hp = HeapOnlyAlloc(p, 2); + *bucket = CONS(hp, tpl, *bucket); + hp += 2; + ASSERT(hp <= hp_limit); } else { #ifdef DEBUG erts_fprintf(stderr, @@ -714,10 +717,13 @@ static Eterm pd_hash_put(Process *p, Eterm id, Eterm value) erts_exit(ERTS_ERROR_EXIT, "Damaged process dictionary found during put/2."); } + + p->dictionary->numElements += new_key; + if (HASH_RANGE(p->dictionary) <= p->dictionary->numElements) { grow(p); } - return am_undefined; + return old_val; } /* diff --git a/erts/emulator/beam/erl_process_dump.c b/erts/emulator/beam/erl_process_dump.c index 0b7f361622..05e7bcdea2 100644 --- a/erts/emulator/beam/erl_process_dump.c +++ b/erts/emulator/beam/erl_process_dump.c @@ -51,6 +51,8 @@ static void stack_trace_dump(fmtfn_t to, void *to_arg, Eterm* sp); static void print_function_from_pc(fmtfn_t to, void *to_arg, BeamInstr* x); static void heap_dump(fmtfn_t to, void *to_arg, Eterm x); static void dump_binaries(fmtfn_t to, void *to_arg, Binary* root); +void erts_print_base64(fmtfn_t to, void *to_arg, + byte* src, Uint size); static void dump_externally(fmtfn_t to, void *to_arg, Eterm term); static void mark_literal(Eterm* ptr); static void init_literal_areas(void); @@ -76,8 +78,17 @@ erts_deep_process_dump(fmtfn_t to, void *to_arg) Process *p = erts_pix2proc(i); if (p && p->i != ENULL) { erts_aint32_t state = erts_atomic32_read_acqb(&p->state); - if (!(state & (ERTS_PSFLG_EXITING|ERTS_PSFLG_GC))) - dump_process_info(to, to_arg, p); + if (state & ERTS_PSFLG_EXITING) + continue; + if (state & ERTS_PSFLG_GC) { + ErtsSchedulerData *sdp = erts_get_scheduler_data(); + if (!sdp || p != sdp->current_process) + continue; + + /* We want to dump the garbing process that caused the dump */ + } + + dump_process_info(to, to_arg, p); } } @@ -133,9 +144,12 @@ dump_process_info(fmtfn_t to, void *to_arg, Process *p) ErtsMessage* mp; int yreg = -1; + if (ERTS_TRACE_FLAGS(p) & F_SENSITIVE) + return; + ERTS_MSGQ_MV_INQ2PRIVQ(p); - if ((ERTS_TRACE_FLAGS(p) & F_SENSITIVE) == 0 && p->msg.first) { + if (p->msg.first) { erts_print(to, to_arg, "=proc_messages:%T\n", p->common.id); for (mp = p->msg.first; mp != NULL; mp = mp->next) { Eterm mesg = ERL_MESSAGE_TERM(mp); @@ -150,38 +164,34 @@ dump_process_info(fmtfn_t to, void *to_arg, Process *p) } } - if ((ERTS_TRACE_FLAGS(p) & F_SENSITIVE) == 0) { - if (p->dictionary) { - erts_print(to, to_arg, "=proc_dictionary:%T\n", p->common.id); - erts_deep_dictionary_dump(to, to_arg, - p->dictionary, dump_element_nl); - } + if (p->dictionary) { + erts_print(to, to_arg, "=proc_dictionary:%T\n", p->common.id); + erts_deep_dictionary_dump(to, to_arg, + p->dictionary, dump_element_nl); } - if ((ERTS_TRACE_FLAGS(p) & F_SENSITIVE) == 0) { - erts_print(to, to_arg, "=proc_stack:%T\n", p->common.id); - for (sp = p->stop; sp < STACK_START(p); sp++) { - yreg = stack_element_dump(to, to_arg, sp, yreg); - } + erts_print(to, to_arg, "=proc_stack:%T\n", p->common.id); + for (sp = p->stop; sp < STACK_START(p); sp++) { + yreg = stack_element_dump(to, to_arg, sp, yreg); + } - erts_print(to, to_arg, "=proc_heap:%T\n", p->common.id); - for (sp = p->stop; sp < STACK_START(p); sp++) { - Eterm term = *sp; - - if (!is_catch(term) && !is_CP(term)) { - heap_dump(to, to_arg, term); - } - } - for (mp = p->msg.first; mp != NULL; mp = mp->next) { - Eterm mesg = ERL_MESSAGE_TERM(mp); - if (is_value(mesg)) - heap_dump(to, to_arg, mesg); - mesg = ERL_MESSAGE_TOKEN(mp); - heap_dump(to, to_arg, mesg); - } - if (p->dictionary) { - erts_deep_dictionary_dump(to, to_arg, p->dictionary, heap_dump); - } + erts_print(to, to_arg, "=proc_heap:%T\n", p->common.id); + for (sp = p->stop; sp < STACK_START(p); sp++) { + Eterm term = *sp; + + if (!is_catch(term) && !is_CP(term)) { + heap_dump(to, to_arg, term); + } + } + for (mp = p->msg.first; mp != NULL; mp = mp->next) { + Eterm mesg = ERL_MESSAGE_TERM(mp); + if (is_value(mesg)) + heap_dump(to, to_arg, mesg); + mesg = ERL_MESSAGE_TOKEN(mp); + heap_dump(to, to_arg, mesg); + } + if (p->dictionary) { + erts_deep_dictionary_dump(to, to_arg, p->dictionary, heap_dump); } } @@ -193,6 +203,7 @@ dump_dist_ext(fmtfn_t to, void *to_arg, ErtsDistExternal *edep) else { byte *e; size_t sz; + if (!(edep->flags & ERTS_DIST_EXT_ATOM_TRANS_TAB)) erts_print(to, to_arg, "D0:"); else { @@ -210,12 +221,18 @@ dump_dist_ext(fmtfn_t to, void *to_arg, ErtsDistExternal *edep) else { ASSERT(*e == VERSION_MAGIC); } - erts_print(to, to_arg, "E%X:", sz); - if (edep->flags & ERTS_DIST_EXT_DFLAG_HDR) - erts_print(to, to_arg, "%02X", VERSION_MAGIC); - while (e < edep->ext_endp) - erts_print(to, to_arg, "%02X", *e++); + if (edep->flags & ERTS_DIST_EXT_DFLAG_HDR) { + byte sbuf[3]; + int i = 0; + + sbuf[i++] = VERSION_MAGIC; + while (i < sizeof(sbuf) && e < edep->ext_endp) { + sbuf[i++] = *e++; + } + erts_print_base64(to, to_arg, sbuf, i); + } + erts_print_base64(to, to_arg, e, edep->ext_endp - e); } } @@ -444,16 +461,13 @@ heap_dump(fmtfn_t to, void *to_arg, Eterm x) } else if (is_binary_header(hdr)) { Uint tag = thing_subtag(hdr); Uint size = binary_size(x); - Uint i; if (tag == HEAP_BINARY_SUBTAG) { byte* p; erts_print(to, to_arg, "Yh%X:", size); p = binary_bytes(x); - for (i = 0; i < size; i++) { - erts_print(to, to_arg, "%02X", p[i]); - } + erts_print_base64(to, to_arg, p, size); } else if (tag == REFC_BINARY_SUBTAG) { ProcBin* pb = (ProcBin *) binary_val(x); Binary* val = pb->val; @@ -596,16 +610,13 @@ static void dump_binaries(fmtfn_t to, void *to_arg, Binary* current) { while (current) { - long i; - long size = current->orig_size; + SWord size = current->orig_size; byte* bytes = (byte*) current->orig_bytes; erts_print(to, to_arg, "=binary:" PTR_FMT "\n", current); erts_print(to, to_arg, "%X:", size); - for (i = 0; i < size; i++) { - erts_print(to, to_arg, "%02X", bytes[i]); - } - erts_putc(to, to_arg, '\n'); + erts_print_base64(to, to_arg, bytes, size); + erts_putc(to, to_arg, '\n'); current = (Binary *) current->intern.flags; } } @@ -644,9 +655,7 @@ dump_externally(fmtfn_t to, void *to_arg, Eterm term) s = p = sbuf; erts_encode_ext(term, &p); erts_print(to, to_arg, "E%X:", p-s); - while (s < p) { - erts_print(to, to_arg, "%02X", *s++); - } + erts_print_base64(to, to_arg, sbuf, p-s); } /* @@ -802,16 +811,13 @@ dump_module_literals(fmtfn_t to, void *to_arg, ErtsLiteralArea* lit_area) } else if (is_binary_header(w)) { Uint tag = thing_subtag(w); Uint size = binary_size(term); - Uint i; if (tag == HEAP_BINARY_SUBTAG) { byte* p; erts_print(to, to_arg, "Yh%X:", size); p = binary_bytes(term); - for (i = 0; i < size; i++) { - erts_print(to, to_arg, "%02X", p[i]); - } + erts_print_base64(to, to_arg, p, size); } else if (tag == REFC_BINARY_SUBTAG) { ProcBin* pb = (ProcBin *) binary_val(term); Binary* val = pb->val; @@ -1003,8 +1009,6 @@ erts_dump_extended_process_state(fmtfn_t to, void *to_arg, erts_aint32_t psflg) erts_print(to, to_arg, "SUSPENDED"); break; case ERTS_PSFLG_GC: erts_print(to, to_arg, "GC"); break; - case ERTS_PSFLG_BOUND: - erts_print(to, to_arg, "BOUND"); break; case ERTS_PSFLG_TRAP_EXIT: erts_print(to, to_arg, "TRAP_EXIT"); break; case ERTS_PSFLG_ACTIVE_SYS: @@ -1017,8 +1021,6 @@ erts_dump_extended_process_state(fmtfn_t to, void *to_arg, erts_aint32_t psflg) erts_print(to, to_arg, "DELAYED_SYS"); break; case ERTS_PSFLG_OFF_HEAP_MSGQ: erts_print(to, to_arg, "OFF_HEAP_MSGQ"); break; - case ERTS_PSFLG_ON_HEAP_MSGQ: - erts_print(to, to_arg, "ON_HEAP_MSGQ"); break; case ERTS_PSFLG_DIRTY_CPU_PROC: erts_print(to, to_arg, "DIRTY_CPU_PROC"); break; case ERTS_PSFLG_DIRTY_IO_PROC: diff --git a/erts/emulator/beam/erl_term.h b/erts/emulator/beam/erl_term.h index 5ec6b6b44b..3dd1c2555c 100644 --- a/erts/emulator/beam/erl_term.h +++ b/erts/emulator/beam/erl_term.h @@ -55,9 +55,6 @@ struct erl_node_; /* Declared in erl_node_tables.h */ #if defined(ARCH_64) # define TAG_PTR_MASK__ 0x7 # if !defined(ERTS_HAVE_OS_PHYSICAL_MEMORY_RESERVATION) -# ifdef HIPE -# error Hipe on 64-bit needs a real mmap as it does not support the literal tag -# endif # define TAG_LITERAL_PTR 0x4 # else # undef TAG_LITERAL_PTR @@ -316,7 +313,8 @@ _ET_DECLARE_CHECKED(Uint,header_arity,Eterm) #define MAX_ARITYVAL ((((Uint)1) << 24) - 1) #define ERTS_MAX_TUPLE_SIZE MAX_ARITYVAL -#define make_arityval(sz) _make_header((sz),_TAG_HEADER_ARITYVAL) +#define make_arityval(sz) (ASSERT((sz) <= MAX_ARITYVAL), \ + _make_header((sz),_TAG_HEADER_ARITYVAL)) #define is_arity_value(x) (((x) & _TAG_HEADER_MASK) == _TAG_HEADER_ARITYVAL) #define is_sane_arity_value(x) ((((x) & _TAG_HEADER_MASK) == _TAG_HEADER_ARITYVAL) && \ (((x) >> _HEADER_ARITY_OFFS) <= MAX_ARITYVAL)) diff --git a/erts/emulator/beam/erl_thr_progress.h b/erts/emulator/beam/erl_thr_progress.h index fa936b5707..8c029bcf99 100644 --- a/erts/emulator/beam/erl_thr_progress.h +++ b/erts/emulator/beam/erl_thr_progress.h @@ -28,7 +28,7 @@ * Author: Rickard Green */ -#if !defined(ERL_THR_PROGRESS_H__TSD_TYPE__) +#ifndef ERL_THR_PROGRESS_H__TSD_TYPE__ #define ERL_THR_PROGRESS_H__TSD_TYPE__ #include "sys.h" diff --git a/erts/emulator/beam/erl_trace.c b/erts/emulator/beam/erl_trace.c index 4b996d8fc2..018894f685 100644 --- a/erts/emulator/beam/erl_trace.c +++ b/erts/emulator/beam/erl_trace.c @@ -2533,7 +2533,7 @@ load_tracer_nif(const ErtsTracer tracer) for(i = 0; i < num_of_funcs; i++) { for (j = 0; j < NIF_TRACER_TYPES; j++) { - if (strcmp(tracers[j].name, funcs[i].name) == 0 && tracers[j].arity == funcs[i].arity) { + if (sys_strcmp(tracers[j].name, funcs[i].name) == 0 && tracers[j].arity == funcs[i].arity) { tracers[j].cb = &(funcs[i]); break; } @@ -2783,24 +2783,28 @@ is_tracer_enabled(Process* c_p, ErtsProcLocks c_p_locks, ASSERT(0); } - /* Only remove tracer on self() and ports */ + /* Only remove tracer on (self() or ports) AND we are on a normal scheduler */ if (is_internal_port(t_p->id) || (c_p && c_p->common.id == t_p->id)) { + ErtsSchedulerData *esdp = erts_get_scheduler_data(); ErtsProcLocks c_p_xlocks = 0; - if (is_internal_pid(t_p->id)) { - ERTS_LC_ASSERT(erts_proc_lc_my_proc_locks(c_p) & ERTS_PROC_LOCK_MAIN); - if (c_p_locks != ERTS_PROC_LOCKS_ALL) { - c_p_xlocks = ~c_p_locks & ERTS_PROC_LOCKS_ALL; - if (erts_proc_trylock(c_p, c_p_xlocks) == EBUSY) { - erts_proc_unlock(c_p, c_p_locks & ~ERTS_PROC_LOCK_MAIN); - erts_proc_lock(c_p, ERTS_PROC_LOCKS_ALL_MINOR); + if (esdp && !ERTS_SCHEDULER_IS_DIRTY(esdp)) { + if (is_internal_pid(t_p->id)) { + ERTS_LC_ASSERT(erts_proc_lc_my_proc_locks(c_p) & ERTS_PROC_LOCK_MAIN); + if (c_p_locks != ERTS_PROC_LOCKS_ALL) { + c_p_xlocks = ~c_p_locks & ERTS_PROC_LOCKS_ALL; + if (erts_proc_trylock(c_p, c_p_xlocks) == EBUSY) { + erts_proc_unlock(c_p, c_p_locks & ~ERTS_PROC_LOCK_MAIN); + erts_proc_lock(c_p, ERTS_PROC_LOCKS_ALL_MINOR); + } } } - } - erts_tracer_replace(t_p, erts_tracer_nil); - t_p->trace_flags &= ~TRACEE_FLAGS; - if (c_p_xlocks) - erts_proc_unlock(c_p, c_p_xlocks); + erts_tracer_replace(t_p, erts_tracer_nil); + t_p->trace_flags &= ~TRACEE_FLAGS; + + if (c_p_xlocks) + erts_proc_unlock(c_p, c_p_xlocks); + } } return 0; @@ -3015,7 +3019,7 @@ static void *tracer_alloc_fun(void* tmpl) ErtsTracerNif *obj = erts_alloc(ERTS_ALC_T_TRACER_NIF, sizeof(ErtsTracerNif) + sizeof(ErtsThrPrgrLaterOp)); - memcpy(obj, tmpl, sizeof(*obj)); + sys_memcpy(obj, tmpl, sizeof(*obj)); return obj; } diff --git a/erts/emulator/beam/erl_unicode.c b/erts/emulator/beam/erl_unicode.c index b7a5c45fea..8673e029e6 100644 --- a/erts/emulator/beam/erl_unicode.c +++ b/erts/emulator/beam/erl_unicode.c @@ -1,7 +1,7 @@ /* * %CopyrightBegin% * - * Copyright Ericsson AB 2008-2017. All Rights Reserved. + * Copyright Ericsson AB 2008-2018. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -144,7 +144,7 @@ static Eterm make_magic_bin_for_restart(Process *p, RestartContext *rc) cleanup_restart_context_bin); RestartContext *restartp = ERTS_MAGIC_BIN_DATA(mbp); Eterm *hp; - memcpy(restartp,rc,sizeof(RestartContext)); + sys_memcpy(restartp,rc,sizeof(RestartContext)); hp = HAlloc(p, ERTS_MAGIC_REF_THING_SIZE); return erts_mk_magic_ref(&hp, &MSO(p), mbp); } @@ -254,12 +254,12 @@ static Uint copy_utf8_bin(byte *target, byte *source, Uint size, ASSERT(need > 0); ASSERT(from_source > 0); if (size < from_source) { - memcpy(leftover + (*num_leftovers), source, size); + sys_memcpy(leftover + (*num_leftovers), source, size); *num_leftovers += size; return 0; } /* leftover has room for four bytes (see bif) */ - memcpy(leftover + (*num_leftovers),source,from_source); + sys_memcpy(leftover + (*num_leftovers),source,from_source); c = copy_utf8_bin(target, leftover, need, NULL, NULL, &tmp_err_pos, characters); if (tmp_err_pos != 0) { *err_pos = source; @@ -291,7 +291,7 @@ static Uint copy_utf8_bin(byte *target, byte *source, Uint size, size -= 2; copied += 2; } else if (((*source) & ((byte) 0xF0)) == 0xE0) { if (leftover && size < 3) { - memcpy(leftover, source, (int) size); + sys_memcpy(leftover, source, (int) size); *num_leftovers = (int) size; break; } @@ -313,7 +313,7 @@ static Uint copy_utf8_bin(byte *target, byte *source, Uint size, size -= 3; copied += 3; } else if (((*source) & ((byte) 0xF8)) == 0xF0) { if (leftover && size < 4) { - memcpy(leftover, source, (int) size); + sys_memcpy(leftover, source, (int) size); *num_leftovers = (int) size; break; } @@ -1158,14 +1158,15 @@ static ERTS_INLINE int analyze_utf8(byte *source, Uint size, byte **err_pos, Uint *num_chars, int *left, Sint *num_latin1_chars, Uint max_chars) { + int res = ERTS_UTF8_OK; Uint latin1_count; int is_latin1; + Uint nchars = 0; *err_pos = source; if (num_latin1_chars) { is_latin1 = 1; latin1_count = 0; } - *num_chars = 0; while (size) { if (((*source) & ((byte) 0x80)) == 0) { source++; @@ -1174,11 +1175,13 @@ analyze_utf8(byte *source, Uint size, byte **err_pos, Uint *num_chars, int *left latin1_count++; } else if (((*source) & ((byte) 0xE0)) == 0xC0) { if (size < 2) { - return ERTS_UTF8_INCOMPLETE; + res = ERTS_UTF8_INCOMPLETE; + break; } if (((source[1] & ((byte) 0xC0)) != 0x80) || ((*source) < 0xC2) /* overlong */) { - return ERTS_UTF8_ERROR; + res = ERTS_UTF8_ERROR; + break; } if (num_latin1_chars) { latin1_count++; @@ -1189,16 +1192,19 @@ analyze_utf8(byte *source, Uint size, byte **err_pos, Uint *num_chars, int *left size -= 2; } else if (((*source) & ((byte) 0xF0)) == 0xE0) { if (size < 3) { - return ERTS_UTF8_INCOMPLETE; + res = ERTS_UTF8_INCOMPLETE; + break; } if (((source[1] & ((byte) 0xC0)) != 0x80) || ((source[2] & ((byte) 0xC0)) != 0x80) || (((*source) == 0xE0) && (source[1] < 0xA0)) /* overlong */ ) { - return ERTS_UTF8_ERROR; + res = ERTS_UTF8_ERROR; + break; } if ((((*source) & ((byte) 0xF)) == 0xD) && ((source[1] & 0x20) != 0)) { - return ERTS_UTF8_ERROR; + res = ERTS_UTF8_ERROR; + break; } source += 3; size -= 3; @@ -1206,37 +1212,47 @@ analyze_utf8(byte *source, Uint size, byte **err_pos, Uint *num_chars, int *left is_latin1 = 0; } else if (((*source) & ((byte) 0xF8)) == 0xF0) { if (size < 4) { - return ERTS_UTF8_INCOMPLETE; + res = ERTS_UTF8_INCOMPLETE; + break; } if (((source[1] & ((byte) 0xC0)) != 0x80) || ((source[2] & ((byte) 0xC0)) != 0x80) || ((source[3] & ((byte) 0xC0)) != 0x80) || (((*source) == 0xF0) && (source[1] < 0x90)) /* overlong */) { - return ERTS_UTF8_ERROR; + res = ERTS_UTF8_ERROR; + break; } if ((((*source) & ((byte)0x7)) > 0x4U) || ((((*source) & ((byte)0x7)) == 0x4U) && ((source[1] & ((byte)0x3F)) > 0xFU))) { - return ERTS_UTF8_ERROR; + res = ERTS_UTF8_ERROR; + break; } source += 4; size -= 4; if (num_latin1_chars) is_latin1 = 0; } else { - return ERTS_UTF8_ERROR; + res = ERTS_UTF8_ERROR; + break; } - ++(*num_chars); + ++nchars; *err_pos = source; - if (max_chars && size > 0 && *num_chars == max_chars) - return ERTS_UTF8_OK_MAX_CHARS; + if (max_chars && size > 0 && nchars == max_chars) { + res = ERTS_UTF8_OK_MAX_CHARS; + break; + } if (left && --(*left) <= 0 && size) { - return ERTS_UTF8_ANALYZE_MORE; + res = ERTS_UTF8_ANALYZE_MORE; + break; } } + + *num_chars = nchars; if (num_latin1_chars) *num_latin1_chars = is_latin1 ? latin1_count : -1; - return ERTS_UTF8_OK; + + return res; } int erts_analyze_utf8(byte *source, Uint size, @@ -1252,29 +1268,18 @@ int erts_analyze_utf8_x(byte *source, Uint size, return analyze_utf8(source, size, err_pos, num_chars, left, num_latin1_chars, max_chars); } -/* - * No errors should be able to occur - no overlongs, no malformed, no nothing - */ -static Eterm do_utf8_to_list(Process *p, Uint num, byte *bytes, Uint sz, - Uint left, - Uint *num_built, Uint *num_eaten, Eterm tail) +static ERTS_INLINE Eterm +make_list_from_utf8_buf(Eterm **hpp, Uint num, + byte *bytes, Uint sz, + Uint *num_built, Uint *num_eaten, + Eterm tail) { Eterm *hp; Eterm ret; + Uint left = num; byte *source, *ssource; Uint unipoint; - - ASSERT(num > 0); - if (left < num) { - if (left > 0) - num = left; - else - num = 1; - } - - *num_built = num; /* Always */ - - hp = HAlloc(p,num * 2); + hp = *hpp; ret = tail; source = bytes + sz; ssource = source; @@ -1302,20 +1307,97 @@ static Eterm do_utf8_to_list(Process *p, Uint num, byte *bytes, Uint sz, } ret = CONS(hp,make_small(unipoint),ret); hp += 2; - if (--num <= 0) { + if (--left <= 0) { break; } } + *hpp = hp; + *num_built = num; /* Always */ *num_eaten = (ssource - source); return ret; } +/* + * No errors should be able to occur - no overlongs, no malformed, no nothing + */ +static Eterm do_utf8_to_list(Process *p, Uint num, byte *bytes, Uint sz, + Uint left, + Uint *num_built, Uint *num_eaten, Eterm tail) +{ + Eterm *hp; + + ASSERT(num > 0); + if (left < num) { + if (left > 0) + num = left; + else + num = 1; + } + + hp = HAlloc(p,num * 2); + + return make_list_from_utf8_buf(&hp, num, bytes, sz, + num_built, num_eaten, + tail); +} Eterm erts_utf8_to_list(Process *p, Uint num, byte *bytes, Uint sz, Uint left, Uint *num_built, Uint *num_eaten, Eterm tail) { return do_utf8_to_list(p, num, bytes, sz, left, num_built, num_eaten, tail); } +Uint erts_atom_to_string_length(Eterm atom) +{ + Atom *ap; + + ASSERT(is_atom(atom)); + ap = atom_tab(atom_val(atom)); + + if (ap->latin1_chars >= 0) + return (Uint) ap->len; + else { + byte* err_pos; + Uint num_chars; +#ifdef DEBUG + int ares = +#endif + erts_analyze_utf8(ap->name, ap->len, &err_pos, &num_chars, NULL); + ASSERT(ares == ERTS_UTF8_OK); + + return num_chars; + } +} + +Eterm erts_atom_to_string(Eterm **hpp, Eterm atom) +{ + Atom *ap; + + ASSERT(is_atom(atom)); + ap = atom_tab(atom_val(atom)); + if (ap->latin1_chars >= 0) + return buf_to_intlist(hpp, (char*)ap->name, ap->len, NIL); + else { + Eterm res; + byte* err_pos; + Uint num_chars, num_built, num_eaten; +#ifdef DEBUG + Eterm *hp_start = *hpp; + int ares = +#endif + erts_analyze_utf8(ap->name, ap->len, &err_pos, &num_chars, NULL); + ASSERT(ares == ERTS_UTF8_OK); + + res = make_list_from_utf8_buf(hpp, num_chars, ap->name, ap->len, + &num_built, &num_eaten, NIL); + + ASSERT(num_built == num_chars); + ASSERT(num_eaten == ap->len); + ASSERT(*hpp - hp_start == 2*num_chars); + + return res; + } +} + static int is_candidate(Uint cp) { int index,pos; @@ -2026,7 +2108,7 @@ char *erts_convert_filename_to_encoding(Eterm name, char *statbuf, size_t statbu } else { name_buf = statbuf; } - memcpy(name_buf,bytes,size); + sys_memcpy(name_buf,bytes,size); name_buf[size]=0; } else { name_buf = erts_convert_filename_to_wchar(bytes, size, @@ -2083,18 +2165,9 @@ char* erts_convert_filename_to_wchar(byte* bytes, Uint size, return name_buf; } - -static int filename_len_16bit(byte *str) -{ - byte *p = str; - while(*p != '\0' || p[1] != '\0') { - p += 2; - } - return (p - str); -} -Eterm erts_convert_native_to_filename(Process *p, byte *bytes) +Eterm erts_convert_native_to_filename(Process *p, size_t size, byte *bytes) { - Uint size,num_chars; + Uint num_chars; Eterm *hp; byte *err_pos; Uint num_built; /* characters */ @@ -2108,7 +2181,6 @@ Eterm erts_convert_native_to_filename(Process *p, byte *bytes) case ERL_FILENAME_UTF8_MAC: mac = 1; case ERL_FILENAME_UTF8: - size = strlen((char *) bytes); if (size == 0) return NIL; if (erts_analyze_utf8(bytes,size,&err_pos,&num_chars,NULL) != ERTS_UTF8_OK) { @@ -2123,7 +2195,6 @@ Eterm erts_convert_native_to_filename(Process *p, byte *bytes) } return ret; case ERL_FILENAME_WIN_WCHAR: - size=filename_len_16bit(bytes); if ((size % 2) != 0) { /* Panic fixup to avoid crashing the emulator */ size--; hp = HAlloc(p, size+2); @@ -2146,7 +2217,6 @@ Eterm erts_convert_native_to_filename(Process *p, byte *bytes) goto noconvert; } noconvert: - size = strlen((char *) bytes); hp = HAlloc(p, 2 * size); return erts_bin_bytes_to_list(NIL, hp, bytes, size, 0); } @@ -2158,7 +2228,6 @@ Sint erts_native_filename_need(Eterm ioterm, int encoding) Eterm obj; DECLARE_ESTACK(stack); Sint need = 0; - int seen_null = 0; if (is_atom(ioterm)) { Atom* ap; @@ -2203,9 +2272,7 @@ Sint erts_native_filename_need(Eterm ioterm, int encoding) byte *name = ap->name; int len = ap->len; for (i = 0; i < len; i++) { - if (name[i] == 0) - seen_null = 1; - else if (seen_null) { + if (name[i] == 0) { need = -1; break; } @@ -2245,9 +2312,7 @@ L_Again: /* Restart with sublist, old listend was pushed on stack */ * Do not allow null in * the middle of filenames */ - if (x == 0) - seen_null = 1; - else if (seen_null) { + if (x == 0) { DESTROY_ESTACK(stack); return ((Sint) -1); } @@ -2580,7 +2645,6 @@ BIF_RETTYPE prim_file_internal_name2native_1(BIF_ALIST_1) BIF_ERROR(BIF_P,BADARG); } if (is_binary(BIF_ARG_1)) { - int seen_null = 0; byte *temp_alloc = NULL; byte *bytes; byte *err_pos; @@ -2597,8 +2661,6 @@ BIF_RETTYPE prim_file_internal_name2native_1(BIF_ALIST_1) for (i = 0; i < size; i++) { /* Don't allow null in the middle of filenames... */ if (bytes[i] == 0) - seen_null = 1; - else if (seen_null) goto bin_name_error; bin_p[i] = bytes[i]; } @@ -2617,8 +2679,6 @@ BIF_RETTYPE prim_file_internal_name2native_1(BIF_ALIST_1) while (size--) { /* Don't allow null in the middle of filenames... */ if (*bytes == 0) - seen_null = 1; - else if (seen_null) goto bin_name_error; *bin_p++ = *bytes++; *bin_p++ = 0; diff --git a/erts/emulator/beam/erl_unicode.h b/erts/emulator/beam/erl_unicode.h index e01eaa787e..31369fc8f9 100644 --- a/erts/emulator/beam/erl_unicode.h +++ b/erts/emulator/beam/erl_unicode.h @@ -1,7 +1,7 @@ /* * %CopyrightBegin% * - * Copyright Ericsson AB 2008-2016. All Rights Reserved. + * Copyright Ericsson AB 2008-2018. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -21,4 +21,7 @@ #ifndef _ERL_UNICODE_H #define _ERL_UNICODE_H +Uint erts_atom_to_string_length(Eterm atom); +Eterm erts_atom_to_string(Eterm **hpp, Eterm atom); + #endif /* _ERL_UNICODE_H */ diff --git a/erts/emulator/beam/erl_utils.h b/erts/emulator/beam/erl_utils.h index 44d8c85867..e4087e0ac8 100644 --- a/erts/emulator/beam/erl_utils.h +++ b/erts/emulator/beam/erl_utils.h @@ -91,7 +91,7 @@ Eterm erts_bld_tuple(Uint **hpp, Uint *szp, Uint arity, ...); #define erts_bld_tuple5(H,S,E1,E2,E3,E4,E5) erts_bld_tuple(H,S,5,E1,E2,E3,E4,E5) Eterm erts_bld_tuplev(Uint **hpp, Uint *szp, Uint arity, Eterm terms[]); Eterm erts_bld_string_n(Uint **hpp, Uint *szp, const char *str, Sint len); -#define erts_bld_string(hpp,szp,str) erts_bld_string_n(hpp,szp,str,strlen(str)) +#define erts_bld_string(hpp,szp,str) erts_bld_string_n(hpp,szp,str,sys_strlen(str)) Eterm erts_bld_list(Uint **hpp, Uint *szp, Sint length, Eterm terms[]); Eterm erts_bld_2tup_list(Uint **hpp, Uint *szp, Sint length, Eterm terms1[], Uint terms2[]); diff --git a/erts/emulator/beam/erl_vm.h b/erts/emulator/beam/erl_vm.h index 76980b5871..f8391fb665 100644 --- a/erts/emulator/beam/erl_vm.h +++ b/erts/emulator/beam/erl_vm.h @@ -69,7 +69,7 @@ # ifdef CHECK_FOR_HOLES # define INIT_HEAP_MEM(p,sz) erts_set_hole_marker(HEAP_TOP(p), (sz)) # else -# define INIT_HEAP_MEM(p,sz) memset(HEAP_TOP(p),0x01,(sz)*sizeof(Eterm*)) +# define INIT_HEAP_MEM(p,sz) sys_memset(HEAP_TOP(p),0x01,(sz)*sizeof(Eterm*)) # endif #else # define INIT_HEAP_MEM(p,sz) ((void)0) diff --git a/erts/emulator/beam/erlang_dtrace.d b/erts/emulator/beam/erlang_dtrace.d index 237889e0f5..d0b10e0306 100644 --- a/erts/emulator/beam/erlang_dtrace.d +++ b/erts/emulator/beam/erlang_dtrace.d @@ -634,72 +634,6 @@ provider erlang { */ probe aio_pool__get(char *, int); - /* Probes for efile_drv.c */ - - /** - * Entry into the efile_drv.c file I/O driver - * - * For a list of command numbers used by this driver, see the section - * "Guide to efile_drv.c probe arguments" in ../../../HOWTO/DTRACE.md. - * That section also contains explanation of the various integer and - * string arguments that may be present when any particular probe fires. - * - * NOTE: Not all Linux platforms (using SystemTap) can support - * arguments beyond arg9. - * - * - * TODO: Adding the port string, args[10], is a pain. Making that - * port string available to all the other efile_drv.c probes - * will be more pain. Is the pain worth it? If yes, then - * add them everywhere else and grit our teeth. If no, then - * rip it out. - * - * @param thread-id number of the scheduler Pthread arg0 - * @param tag number: {thread-id, tag} uniquely names a driver operation - * @param user-tag string arg2 - * @param command number arg3 - * @param string argument 1 arg4 - * @param string argument 2 arg5 - * @param integer argument 1 arg6 - * @param integer argument 2 arg7 - * @param integer argument 3 arg8 - * @param integer argument 4 arg9 - * @param port the port ID of the busy port args[10] - */ - probe efile_drv__entry(int, int, char *, int, char *, char *, - int64_t, int64_t, int64_t, int64_t, char *); - - /** - * Entry into the driver's internal work function. Computation here - * is performed by a async worker pool Pthread. - * - * @param thread-id number - * @param tag number - * @param command number - */ - probe efile_drv__int_entry(int, int, int); - - /** - * Return from the driver's internal work function. - * - * @param thread-id number - * @param tag number - * @param command number - */ - probe efile_drv__int_return(int, int, int); - - /** - * Return from the efile_drv.c file I/O driver - * - * @param thread-id number arg0 - * @param tag number arg1 - * @param user-tag string arg2 - * @param command number arg3 - * @param Success? 1 is success, 0 is failure arg4 - * @param If failure, the errno of the error. arg5 - */ - probe efile_drv__return(int, int, char *, int, int, int); - /* * The set of probes called by the erlang tracer nif backend. In order diff --git a/erts/emulator/beam/external.c b/erts/emulator/beam/external.c index 970158933f..fb42969a8f 100644 --- a/erts/emulator/beam/external.c +++ b/erts/emulator/beam/external.c @@ -122,8 +122,9 @@ static int encode_size_struct_int(struct TTBSizeContext_*, ErtsAtomCacheMap *acm static Export binary_to_term_trap_export; static BIF_RETTYPE binary_to_term_trap_1(BIF_ALIST_1); -static BIF_RETTYPE binary_to_term_int(Process* p, Uint32 flags, Eterm bin, Binary* context_b, - Export *bif, Eterm arg0, Eterm arg1); +static Sint transcode_dist_obuf(ErtsDistOutputBuf*, DistEntry*, Uint32 dflags, Sint reds); + + void erts_init_external(void) { erts_init_trap_export(&term_to_binary_trap_export, @@ -222,23 +223,10 @@ erts_destroy_atom_cache_map(ErtsAtomCacheMap *acmp) static ERTS_INLINE void insert_acache_map(ErtsAtomCacheMap *acmp, Eterm atom, Uint32 dflags) { - /* - * If the receiver do not understand utf8 atoms - * and this atom cannot be represented in latin1, - * we are not allowed to cache it. - * - * In this case all atoms are assumed to have - * latin1 encoding in the cache. By refusing it - * in the cache we will instead encode it using - * ATOM_UTF8_EXT/SMALL_ATOM_UTF8_EXT which the - * receiver do not recognize and tear down the - * connection. - */ - if (acmp && acmp->sz < ERTS_MAX_INTERNAL_ATOM_CACHE_ENTRIES - && ((dflags & DFLAG_UTF8_ATOMS) - || atom_tab(atom_val(atom))->latin1_chars >= 0)) { + if (acmp && acmp->sz < ERTS_MAX_INTERNAL_ATOM_CACHE_ENTRIES) { int ix; ASSERT(acmp->hdr_sz < 0); + ASSERT(dflags & DFLAG_UTF8_ATOMS); ix = atom2cix(atom); if (acmp->cache[ix].iix < 0) { acmp->cache[ix].iix = acmp->sz; @@ -258,9 +246,7 @@ get_iix_acache_map(ErtsAtomCacheMap *acmp, Eterm atom, Uint32 dflags) ASSERT(is_atom(atom)); ix = atom2cix(atom); if (acmp->cache[ix].iix < 0) { - ASSERT(acmp->sz == ERTS_MAX_INTERNAL_ATOM_CACHE_ENTRIES - || (!(dflags & DFLAG_UTF8_ATOMS) - && atom_tab(atom_val(atom))->latin1_chars < 0)); + ASSERT(acmp->sz == ERTS_MAX_INTERNAL_ATOM_CACHE_ENTRIES); return -1; } else { @@ -274,7 +260,6 @@ void erts_finalize_atom_cache_map(ErtsAtomCacheMap *acmp, Uint32 dflags) { if (acmp) { - int utf8_atoms = (int) (dflags & DFLAG_UTF8_ATOMS); int long_atoms = 0; /* !0 if one or more atoms are longer than 255. */ int i; int sz; @@ -285,6 +270,7 @@ erts_finalize_atom_cache_map(ErtsAtomCacheMap *acmp, Uint32 dflags) + 1 /* number of internal cache entries */ ; int min_sz; + ASSERT(dflags & DFLAG_UTF8_ATOMS); ASSERT(acmp->hdr_sz < 0); /* Make sure cache update instructions fit */ min_sz = fix_sz+(2+4)*acmp->sz; @@ -296,7 +282,7 @@ erts_finalize_atom_cache_map(ErtsAtomCacheMap *acmp, Uint32 dflags) atom = acmp->cache[acmp->cix[i]].atom; ASSERT(is_atom(atom)); a = atom_tab(atom_val(atom)); - len = (int) (utf8_atoms ? a->len : a->latin1_chars); + len = (int) a->len; ASSERT(len >= 0); if (!long_atoms && len > 255) long_atoms = 1; @@ -366,18 +352,77 @@ byte *erts_encode_ext_dist_header_setup(byte *ctl_ext, ErtsAtomCacheMap *acmp) } } -byte *erts_encode_ext_dist_header_finalize(byte *ext, ErtsAtomCache *cache, Uint32 dflags) + +#define PASS_THROUGH 'p' + +Sint erts_encode_ext_dist_header_finalize(ErtsDistOutputBuf* ob, + DistEntry* dep, + Uint32 dflags, + Sint reds) { byte *ip; byte instr_buf[(2+4)*ERTS_ATOM_CACHE_SIZE]; int ci, sz; byte dist_hdr_flags; int long_atoms; - int utf8_atoms = (int) (dflags & DFLAG_UTF8_ATOMS); - register byte *ep = ext; - ASSERT(ep[0] == VERSION_MAGIC); - if (ep[1] != DIST_HEADER) - return ext; + register byte *ep = ob->extp; + ASSERT(dflags & DFLAG_UTF8_ATOMS); + + /* + * The buffer can have different layouts at this point depending on + * what was known when encoded: + * + * Pending connection: CtrlTerm [, MsgTerm] + * With atom cache : VERSION_MAGIC, DIST_HEADER, ..., CtrlTerm [, MsgTerm] + * No atom cache : VERSION_MAGIC, CtrlTerm [, VERSION_MAGIC, MsgTerm] + */ + + if (ep[0] != VERSION_MAGIC || dep->transcode_ctx) { + /* + * Was encoded without atom cache toward pending connection. + */ + ASSERT(ep[0] == SMALL_TUPLE_EXT || ep[0] == LARGE_TUPLE_EXT); + + if (~dflags & (DFLAG_DIST_MONITOR | DFLAG_DIST_MONITOR_NAME) + && ep[0] == SMALL_TUPLE_EXT + && ep[1] == 4 + && ep[2] == SMALL_INTEGER_EXT + && (ep[3] == DOP_MONITOR_P || + ep[3] == DOP_MONITOR_P_EXIT || + ep[3] == DOP_DEMONITOR_P)) { + /* + * Receiver does not support process monitoring. + * Suppress monitor control msg (see erts_dsig_send_monitor) + * by converting it to an empty (tick) packet. + */ + ob->ext_endp = ob->extp; + return reds; + } + if (~dflags & (DFLAG_BIT_BINARIES | DFLAG_EXPORT_PTR_TAG + | DFLAG_DIST_HDR_ATOM_CACHE)) { + reds = transcode_dist_obuf(ob, dep, dflags, reds); + if (reds < 0) + return reds; + ep = ob->extp; + } + if (dflags & DFLAG_DIST_HDR_ATOM_CACHE) { + /* + * Encoding was done without atom caching but receiver expects + * a dist header, so we prepend an empty one. + */ + *--ep = 0; /* NumberOfAtomCacheRefs */ + *--ep = DIST_HEADER; + *--ep = VERSION_MAGIC; + } + goto done; + } + else if (ep[1] != DIST_HEADER) { + ASSERT(ep[1] == SMALL_TUPLE_EXT || ep[1] == LARGE_TUPLE_EXT); + ASSERT(!(dflags & DFLAG_DIST_HDR_ATOM_CACHE)); + /* Node without atom cache, 'pass through' needed */ + *--ep = PASS_THROUGH; + goto done; + } dist_hdr_flags = ep[2]; long_atoms = ERTS_DIST_HDR_LONG_ATOMS_FLG & ((int) dist_hdr_flags); @@ -405,6 +450,7 @@ byte *erts_encode_ext_dist_header_finalize(byte *ext, ErtsAtomCache *cache, Uint / sizeof(Uint32))+1]; register Uint32 flgs; int iix, flgs_bytes, flgs_buf_ix, used_half_bytes; + ErtsAtomCache* cache = dep->cache; #ifdef DEBUG int tot_used_half_bytes; #endif @@ -447,17 +493,9 @@ byte *erts_encode_ext_dist_header_finalize(byte *ext, ErtsAtomCache *cache, Uint Atom *a; cache->out_arr[cix] = atom; a = atom_tab(atom_val(atom)); - if (utf8_atoms) { - sz = a->len; - ep -= sz; - sys_memcpy((void *) ep, (void *) a->name, sz); - } - else { - ASSERT(0 <= a->latin1_chars && a->latin1_chars <= MAX_ATOM_CHARACTERS); - ep -= a->latin1_chars; - sz = erts_utf8_to_latin1(ep, a->name, a->len); - ASSERT(a->latin1_chars == sz); - } + sz = a->len; + ep -= sz; + sys_memcpy((void *) ep, (void *) a->name, sz); if (long_atoms) { ep -= 2; put_int16(sz, ep); @@ -504,12 +542,16 @@ byte *erts_encode_ext_dist_header_finalize(byte *ext, ErtsAtomCache *cache, Uint break; } } + reds -= 3; /*was ERTS_PORT_REDS_DIST_CMD_FINALIZE*/ } --ep; put_int8(ci, ep); *--ep = DIST_HEADER; *--ep = VERSION_MAGIC; - return ep; +done: + ob->extp = ep; + ASSERT(&ob->data[0] <= ob->extp && ob->extp < ob->ext_endp); + return reds < 0 ? 0 : reds; } int erts_encode_dist_ext_size(Eterm term, Uint32 flags, ErtsAtomCacheMap *acmp, @@ -520,7 +562,7 @@ int erts_encode_dist_ext_size(Eterm term, Uint32 flags, ErtsAtomCacheMap *acmp, return -1; } else { #ifndef ERTS_DEBUG_USE_DIST_SEP - if (!(flags & DFLAG_DIST_HDR_ATOM_CACHE)) + if (!(flags & (DFLAG_DIST_HDR_ATOM_CACHE | DFLAG_NO_MAGIC))) #endif sz++ /* VERSION_MAGIC */; @@ -536,7 +578,7 @@ int erts_encode_dist_ext_size_int(Eterm term, struct erts_dsig_send_context* ctx return -1; } else { #ifndef ERTS_DEBUG_USE_DIST_SEP - if (!(ctx->flags & DFLAG_DIST_HDR_ATOM_CACHE)) + if (!(ctx->flags & (DFLAG_DIST_HDR_ATOM_CACHE | DFLAG_NO_MAGIC))) #endif sz++ /* VERSION_MAGIC */; @@ -568,7 +610,7 @@ int erts_encode_dist_ext(Eterm term, byte **ext, Uint32 flags, ErtsAtomCacheMap { if (!ctx || !ctx->wstack.wstart) { #ifndef ERTS_DEBUG_USE_DIST_SEP - if (!(flags & DFLAG_DIST_HDR_ATOM_CACHE)) + if (!(flags & (DFLAG_DIST_HDR_ATOM_CACHE | DFLAG_NO_MAGIC))) #endif *(*ext)++ = VERSION_MAGIC; } @@ -643,7 +685,7 @@ erts_prepare_dist_ext(ErtsDistExternal *edep, #endif register byte *ep = ext; - int utf8_atoms = (int) (dep->flags & DFLAG_UTF8_ATOMS); + ASSERT(dep->flags & DFLAG_UTF8_ATOMS); edep->heap_size = -1; edep->ext_endp = ext+size; @@ -669,17 +711,16 @@ erts_prepare_dist_ext(ErtsDistExternal *edep, erts_de_rlock(dep); - if ((dep->status & (ERTS_DE_SFLG_EXITING|ERTS_DE_SFLG_CONNECTED)) - != ERTS_DE_SFLG_CONNECTED) { + if (dep->state != ERTS_DE_STATE_CONNECTED && + dep->state != ERTS_DE_STATE_PENDING) { erts_de_runlock(dep); return ERTS_PREP_DIST_EXT_CLOSED; } - if (dep->flags & DFLAG_DIST_HDR_ATOM_CACHE) edep->flags |= ERTS_DIST_EXT_DFLAG_HDR; *connection_id = dep->connection_id; - edep->flags |= (dep->connection_id & ERTS_DIST_EXT_CON_ID_MASK); + edep->connection_id = dep->connection_id; if (ep[1] != DIST_HEADER) { if (edep->flags & ERTS_DIST_EXT_DFLAG_HDR) @@ -806,9 +847,7 @@ erts_prepare_dist_ext(ErtsDistExternal *edep, CHKSIZE(len); atom = erts_atom_put((byte *) ep, len, - (utf8_atoms - ? ERTS_ATOM_ENC_UTF8 - : ERTS_ATOM_ENC_LATIN1), + ERTS_ATOM_ENC_UTF8, 0); if (is_non_value(atom)) ERTS_EXT_HDR_FAIL; @@ -895,7 +934,7 @@ bad_dist_ext(ErtsDistExternal *edep) erts_dsprintf(dsbufp, ", %d=%T", i, edep->attab.atom[i]); } erts_send_warning_to_logger_nogl(dsbufp); - erts_kill_dist_connection(dep, ERTS_DIST_EXT_CON_ID(edep)); + erts_kill_dist_connection(dep, edep->connection_id); } } @@ -1220,7 +1259,8 @@ typedef struct B2TContext_t { ErtsBinary2TermState b2ts; Uint32 flags; SWord reds; - Eterm trap_bin; + Uint used_bytes; /* In: boolean, Out: bytes */ + Eterm trap_bin; /* THE_NON_VALUE if not exported */ Export *bif; Eterm arg[2]; enum B2TState state; @@ -1314,6 +1354,11 @@ binary2term_prepare(ErtsBinary2TermState *state, byte *data, Sint data_size, ctx->u.uc.dbytes = state->extp; ctx->u.uc.dleft = dest_len; + if (ctx->used_bytes) { + ASSERT(ctx->used_bytes == 1); + /* to be subtracted by stream.avail_in when done */ + ctx->used_bytes = data_size; + } ctx->state = B2TUncompressChunk; *ctxp = ctx; } @@ -1416,13 +1461,15 @@ static int b2t_context_destructor(Binary *context_bin) return 1; } +static BIF_RETTYPE binary_to_term_int(Process*, Eterm bin, B2TContext*); + + static BIF_RETTYPE binary_to_term_trap_1(BIF_ALIST_1) { Binary *context_bin = erts_magic_ref2bin(BIF_ARG_1); ASSERT(ERTS_MAGIC_BIN_DESTRUCTOR(context_bin) == b2t_context_destructor); - return binary_to_term_int(BIF_P, 0, THE_NON_VALUE, context_bin, NULL, - THE_NON_VALUE, THE_NON_VALUE); + return binary_to_term_int(BIF_P, THE_NON_VALUE, ERTS_MAGIC_BIN_DATA(context_bin)); } @@ -1448,6 +1495,8 @@ static B2TContext* b2t_export_context(Process* p, B2TContext* src) b2t_context_destructor); B2TContext* ctx = ERTS_MAGIC_BIN_DATA(context_b); Eterm* hp; + + ASSERT(is_non_value(src->trap_bin)); sys_memcpy(ctx, src, sizeof(B2TContext)); if (ctx->state >= B2TDecode && ctx->u.dc.next == &src->u.dc.res) { ctx->u.dc.next = &ctx->u.dc.res; @@ -1457,8 +1506,7 @@ static B2TContext* b2t_export_context(Process* p, B2TContext* src) return ctx; } -static BIF_RETTYPE binary_to_term_int(Process* p, Uint32 flags, Eterm bin, Binary* context_b, - Export *bif_init, Eterm arg0, Eterm arg1) +static BIF_RETTYPE binary_to_term_int(Process* p, Eterm bin, B2TContext *ctx) { BIF_RETTYPE ret_val; #ifdef EXTREME_B2T_TRAPPING @@ -1466,25 +1514,17 @@ static BIF_RETTYPE binary_to_term_int(Process* p, Uint32 flags, Eterm bin, Binar #else SWord initial_reds = (Uint)(ERTS_BIF_REDS_LEFT(p) * B2T_BYTES_PER_REDUCTION); #endif - B2TContext c_buff; - B2TContext *ctx; int is_first_call; - if (context_b == NULL) { + if (is_value(bin)) { /* Setup enough to get started */ is_first_call = 1; - ctx = &c_buff; ctx->state = B2TPrepare; ctx->aligned_alloc = NULL; - ctx->flags = flags; - ctx->bif = bif_init; - ctx->arg[0] = arg0; - ctx->arg[1] = arg1; - IF_DEBUG(ctx->trap_bin = THE_NON_VALUE;) } else { - is_first_call = 0; - ctx = ERTS_MAGIC_BIN_DATA(context_b); + ASSERT(is_value(ctx->trap_bin)); ASSERT(ctx->state != B2TPrepare); + is_first_call = 0; } ctx->reds = initial_reds; @@ -1528,6 +1568,10 @@ static BIF_RETTYPE binary_to_term_int(Process* p, Uint32 flags, Eterm bin, Binar && zret == Z_STREAM_END && ctx->u.uc.dleft == 0) { ctx->reds -= chunk; + if (ctx->used_bytes) { + ASSERT(ctx->used_bytes > 5 + ctx->u.uc.stream.avail_in); + ctx->used_bytes -= ctx->u.uc.stream.avail_in; + } ctx->state = B2TSizeInit; } else { @@ -1546,11 +1590,11 @@ static BIF_RETTYPE binary_to_term_int(Process* p, Uint32 flags, Eterm bin, Binar break; case B2TDecodeInit: - if (ctx == &c_buff && ctx->b2ts.extsize > ctx->reds) { + if (is_non_value(ctx->trap_bin) && ctx->b2ts.extsize > ctx->reds) { /* dec_term will maybe trap, allocate space for magic bin before result term to make it easy to trim with HRelease. */ - ctx = b2t_export_context(p, &c_buff); + ctx = b2t_export_context(p, ctx); } ctx->u.dc.ep = ctx->b2ts.extp; ctx->u.dc.res = (Eterm) (UWord) NULL; @@ -1593,6 +1637,25 @@ static BIF_RETTYPE binary_to_term_int(Process* p, Uint32 flags, Eterm bin, Binar return ret_val; case B2TDone: + if (ctx->used_bytes) { + Eterm *hp; + Eterm used; + if (!ctx->b2ts.exttmp) { + ASSERT(ctx->used_bytes == 1); + ctx->used_bytes = (ctx->u.dc.ep - ctx->b2ts.extp + +1); /* VERSION_MAGIC */ + } + if (IS_USMALL(0, ctx->used_bytes)) { + hp = erts_produce_heap(&ctx->u.dc.factory, 3, 0); + used = make_small(ctx->used_bytes); + } + else { + hp = erts_produce_heap(&ctx->u.dc.factory, 3+BIG_UINT_HEAP_SIZE, 0); + used = uint_to_big(ctx->used_bytes, hp); + hp += BIG_UINT_HEAP_SIZE; + } + ctx->u.dc.res = TUPLE2(hp, ctx->u.dc.res, used); + } b2t_destroy_context(ctx); if (ctx->u.dc.factory.hp > ctx->u.dc.factory.hp_end) { @@ -1613,11 +1676,10 @@ static BIF_RETTYPE binary_to_term_int(Process* p, Uint32 flags, Eterm bin, Binar } }while (ctx->reds > 0 || ctx->state >= B2TDone); - if (ctx == &c_buff) { - ASSERT(ctx->trap_bin == THE_NON_VALUE); - ctx = b2t_export_context(p, &c_buff); + if (is_non_value(ctx->trap_bin)) { + ctx = b2t_export_context(p, ctx); + ASSERT(is_value(ctx->trap_bin)); } - ASSERT(ctx->trap_bin != THE_NON_VALUE); if (is_first_call) { erts_set_gc_state(p, 0); @@ -1634,23 +1696,35 @@ HIPE_WRAPPER_BIF_DISABLE_GC(binary_to_term, 1) BIF_RETTYPE binary_to_term_1(BIF_ALIST_1) { - return binary_to_term_int(BIF_P, 0, BIF_ARG_1, NULL, bif_export[BIF_binary_to_term_1], - BIF_ARG_1, THE_NON_VALUE); + B2TContext ctx; + + ctx.flags = 0; + ctx.used_bytes = 0; + ctx.trap_bin = THE_NON_VALUE; + ctx.bif = bif_export[BIF_binary_to_term_1]; + ctx.arg[0] = BIF_ARG_1; + ctx.arg[1] = THE_NON_VALUE; + return binary_to_term_int(BIF_P, BIF_ARG_1, &ctx); } HIPE_WRAPPER_BIF_DISABLE_GC(binary_to_term, 2) BIF_RETTYPE binary_to_term_2(BIF_ALIST_2) { + B2TContext ctx; Eterm opts; Eterm opt; - Uint32 flags = 0; + ctx.flags = 0; + ctx.used_bytes = 0; opts = BIF_ARG_2; while (is_list(opts)) { opt = CAR(list_val(opts)); if (opt == am_safe) { - flags |= ERTS_DIST_EXT_BTT_SAFE; + ctx.flags |= ERTS_DIST_EXT_BTT_SAFE; + } + else if (opt == am_used) { + ctx.used_bytes = 1; } else { goto error; @@ -1661,8 +1735,11 @@ BIF_RETTYPE binary_to_term_2(BIF_ALIST_2) if (is_not_nil(opts)) goto error; - return binary_to_term_int(BIF_P, flags, BIF_ARG_1, NULL, bif_export[BIF_binary_to_term_2], - BIF_ARG_1, BIF_ARG_2); + ctx.trap_bin = THE_NON_VALUE; + ctx.bif = bif_export[BIF_binary_to_term_2]; + ctx.arg[0] = BIF_ARG_1; + ctx.arg[1] = BIF_ARG_2; + return binary_to_term_int(BIF_P, BIF_ARG_1, &ctx); error: BIF_ERROR(BIF_P, BADARG); @@ -1876,7 +1953,7 @@ static Eterm erts_term_to_binary_int(Process* p, Eterm Term, int level, Uint fla context_b = erts_create_magic_binary(sizeof(TTBContext), \ ttb_context_destructor); \ context = ERTS_MAGIC_BIN_DATA(context_b); \ - memcpy(context,&c_buff,sizeof(TTBContext)); \ + sys_memcpy(context,&c_buff,sizeof(TTBContext)); \ } \ } while (0) @@ -1955,23 +2032,14 @@ static Eterm erts_term_to_binary_int(Process* p, Eterm Term, int level, Uint fla level = context->s.ec.level; BUMP_REDS(p, (initial_reds - reds) / TERM_TO_BINARY_LOOP_FACTOR); if (level == 0 || real_size < 6) { /* We are done */ - ProcBin* pb; return_normal: context->s.ec.result_bin = NULL; context->alive = 0; - pb = (ProcBin *) HAlloc(p, PROC_BIN_SIZE); - pb->thing_word = HEADER_PROC_BIN; - pb->size = real_size; - pb->next = MSO(p).first; - MSO(p).first = (struct erl_off_heap_header*)pb; - pb->val = result_bin; - pb->bytes = (byte*) result_bin->orig_bytes; - pb->flags = 0; - OH_OVERHEAD(&(MSO(p)), pb->size / sizeof(Eterm)); if (context_b && erts_refc_read(&context_b->intern.refc,0) == 0) { erts_bin_free(context_b); } - return make_binary(pb); + return erts_build_proc_bin(&MSO(p), HAlloc(p, PROC_BIN_SIZE), + result_bin); } /* Continue with compression... */ /* To make absolutely sure that zlib does not barf on a reallocated context, @@ -2028,16 +2096,7 @@ static Eterm erts_term_to_binary_int(Process* p, Eterm Term, int level, Uint fla result_bin = erts_bin_realloc(context->s.cc.destination_bin, context->s.cc.dest_len+6); context->s.cc.destination_bin = NULL; - pb = (ProcBin *) HAlloc(p, PROC_BIN_SIZE); - pb->thing_word = HEADER_PROC_BIN; - pb->size = context->s.cc.dest_len+6; - pb->next = MSO(p).first; - MSO(p).first = (struct erl_off_heap_header*)pb; - pb->val = result_bin; ASSERT(erts_refc_read(&result_bin->intern.refc, 1)); - pb->bytes = (byte*) result_bin->orig_bytes; - pb->flags = 0; - OH_OVERHEAD(&(MSO(p)), pb->size / sizeof(Eterm)); erts_bin_free(context->s.cc.result_bin); context->s.cc.result_bin = NULL; context->alive = 0; @@ -2045,7 +2104,9 @@ static Eterm erts_term_to_binary_int(Process* p, Eterm Term, int level, Uint fla if (context_b && erts_refc_read(&context_b->intern.refc,0) == 0) { erts_bin_free(context_b); } - return make_binary(pb); + return erts_build_proc_bin(&MSO(p), + HAlloc(p, PROC_BIN_SIZE), + result_bin); } default: /* Compression error, revert to uncompressed binary (still in context) */ @@ -2099,7 +2160,7 @@ enc_atom(ErtsAtomCacheMap *acmp, Eterm atom, byte *ep, Uint32 dflags) { int iix; int len; - int utf8_atoms = (int) (dflags & DFLAG_UTF8_ATOMS); + const int utf8_atoms = (int) (dflags & DFLAG_UTF8_ATOMS); ASSERT(is_atom(atom)); @@ -2494,8 +2555,6 @@ enc_term_int(TTBEncodeContext* ctx, ErtsAtomCacheMap *acmp, Eterm obj, byte* ep, break; } - L_jump_start: - if (ctx && --r <= 0) { *reds = 0; ctx->obj = obj; @@ -2503,6 +2562,8 @@ enc_term_int(TTBEncodeContext* ctx, ErtsAtomCacheMap *acmp, Eterm obj, byte* ep, WSTACK_SAVE(s, &ctx->wstack); return -1; } + + L_jump_start: switch(tag_val_def(obj)) { case NIL_DEF: *ep++ = NIL_EXT; @@ -2812,6 +2873,8 @@ enc_term_int(TTBEncodeContext* ctx, ErtsAtomCacheMap *acmp, Eterm obj, byte* ep, ep[j] = 0; /* Zero unused bits at end of binary */ data_dst = ep; ep += j + 1; + if (ctx) + ctx->hopefull_flags |= DFLAG_BIT_BINARIES; } else { /* * Bit-level binary, but the receiver doesn't support it. @@ -2847,6 +2910,8 @@ enc_term_int(TTBEncodeContext* ctx, ErtsAtomCacheMap *acmp, Eterm obj, byte* ep, ep = enc_atom(acmp, exp->info.mfa.function, ep, dflags); ep = enc_term(acmp, make_small(exp->info.mfa.arity), ep, dflags, off_heap); + if (ctx) + ctx->hopefull_flags |= DFLAG_EXPORT_PTR_TAG; } else { /* Tag, arity */ *ep++ = SMALL_TUPLE_EXT; @@ -2867,62 +2932,27 @@ enc_term_int(TTBEncodeContext* ctx, ErtsAtomCacheMap *acmp, Eterm obj, byte* ep, ErlFunThing* funp = (ErlFunThing *) fun_val(obj); int ei; - if ((dflags & DFLAG_NEW_FUN_TAGS) != 0) { - *ep++ = NEW_FUN_EXT; - WSTACK_PUSH2(s, ENC_PATCH_FUN_SIZE, - (UWord) ep); /* Position for patching in size */ - ep += 4; - *ep = funp->arity; - ep += 1; - sys_memcpy(ep, funp->fe->uniq, 16); - ep += 16; - put_int32(funp->fe->index, ep); - ep += 4; - put_int32(funp->num_free, ep); - ep += 4; - ep = enc_atom(acmp, funp->fe->module, ep, dflags); - ep = enc_term(acmp, make_small(funp->fe->old_index), ep, dflags, off_heap); - ep = enc_term(acmp, make_small(funp->fe->old_uniq), ep, dflags, off_heap); - ep = enc_pid(acmp, funp->creator, ep, dflags); - } else { - /* - * Communicating with an obsolete erl_interface or - * jinterface node. Convert the fun to a tuple to - * avoid crasching. - */ - - /* Tag, arity */ - *ep++ = SMALL_TUPLE_EXT; - put_int8(5, ep); - ep += 1; - - /* 'fun' */ - ep = enc_atom(acmp, am_fun, ep, dflags); - - /* Module name */ - ep = enc_atom(acmp, funp->fe->module, ep, dflags); - - /* Index, Uniq */ - *ep++ = INTEGER_EXT; - put_int32(funp->fe->old_index, ep); - ep += 4; - *ep++ = INTEGER_EXT; - put_int32(funp->fe->old_uniq, ep); - ep += 4; - - /* Environment sub-tuple arity */ - ASSERT(funp->num_free < MAX_ARG); - *ep++ = SMALL_TUPLE_EXT; - put_int8(funp->num_free, ep); - ep += 1; - } - for (ei = funp->num_free-1; ei > 0; ei--) { + ASSERT(dflags & DFLAG_NEW_FUN_TAGS); + *ep++ = NEW_FUN_EXT; + WSTACK_PUSH2(s, ENC_PATCH_FUN_SIZE, + (UWord) ep); /* Position for patching in size */ + ep += 4; + *ep = funp->arity; + ep += 1; + sys_memcpy(ep, funp->fe->uniq, 16); + ep += 16; + put_int32(funp->fe->index, ep); + ep += 4; + put_int32(funp->num_free, ep); + ep += 4; + ep = enc_atom(acmp, funp->fe->module, ep, dflags); + ep = enc_term(acmp, make_small(funp->fe->old_index), ep, dflags, off_heap); + ep = enc_term(acmp, make_small(funp->fe->old_uniq), ep, dflags, off_heap); + ep = enc_pid(acmp, funp->creator, ep, dflags); + + for (ei = funp->num_free-1; ei >= 0; ei--) { WSTACK_PUSH2(s, ENC_TERM, (UWord) funp->env[ei]); } - if (funp->num_free != 0) { - obj = funp->env[0]; - goto L_jump_start; - } } break; } @@ -3260,6 +3290,7 @@ dec_term_atom_common: n--; if (ctx) { if (reds < n) { + ASSERT(reds > 0); ctx->state = B2TDecodeList; ctx->u.dc.remaining_n = n - reds; n = reds; @@ -3541,18 +3572,9 @@ dec_term_atom_common: *objp = make_binary(hb); } else { Binary* dbin = erts_bin_nrml_alloc(n); - ProcBin* pb; - pb = (ProcBin *) hp; + + *objp = erts_build_proc_bin(factory->off_heap, hp, dbin); hp += PROC_BIN_SIZE; - pb->thing_word = HEADER_PROC_BIN; - pb->size = n; - pb->next = factory->off_heap->first; - factory->off_heap->first = (struct erl_off_heap_header*)pb; - OH_OVERHEAD(factory->off_heap, pb->size / sizeof(Eterm)); - pb->val = dbin; - pb->bytes = (byte*) dbin->orig_bytes; - pb->flags = 0; - *objp = make_binary(pb); if (ctx) { int n_limit = reds * B2T_MEMCPY_FACTOR; if (n > n_limit) { @@ -3592,18 +3614,9 @@ dec_term_atom_common: ep += n; } else { Binary* dbin = erts_bin_nrml_alloc(n); - ProcBin* pb; + Uint n_copy = n; - pb = (ProcBin *) hp; - pb->thing_word = HEADER_PROC_BIN; - pb->size = n; - pb->next = factory->off_heap->first; - factory->off_heap->first = (struct erl_off_heap_header*)pb; - OH_OVERHEAD(factory->off_heap, pb->size / sizeof(Eterm)); - pb->val = dbin; - pb->bytes = (byte*) dbin->orig_bytes; - pb->flags = 0; - bin = make_binary(pb); + bin = erts_build_proc_bin(factory->off_heap, hp, dbin); hp += PROC_BIN_SIZE; if (ctx) { int n_limit = reds * B2T_MEMCPY_FACTOR; @@ -3611,15 +3624,15 @@ dec_term_atom_common: ctx->state = B2TDecodeBinary; ctx->u.dc.remaining_n = n - n_limit; ctx->u.dc.remaining_bytes = dbin->orig_bytes + n_limit; - n = n_limit; + n_copy = n_limit; reds = 0; } - else + else { reds -= n / B2T_MEMCPY_FACTOR; + } } - sys_memcpy(dbin->orig_bytes, ep, n); - ep += n; - n = pb->size; + sys_memcpy(dbin->orig_bytes, ep, n_copy); + ep += n_copy; } if (bitsize == 8 || n == 0) { @@ -4000,6 +4013,7 @@ dec_term_atom_common: if (ctx) { ctx->state = B2TDone; ctx->reds = reds; + ctx->u.dc.ep = ep; } return ep; @@ -4269,24 +4283,12 @@ encode_size_struct_int(TTBSizeContext* ctx, ErtsAtomCacheMap *acmp, Eterm obj, { ErlFunThing* funp = (ErlFunThing *) fun_val(obj); - if ((dflags & DFLAG_NEW_FUN_TAGS) != 0) { - result += 20+1+1+4; /* New ID + Tag */ - result += 4; /* Length field (number of free variables */ - result += encode_size_struct2(acmp, funp->creator, dflags); - result += encode_size_struct2(acmp, funp->fe->module, dflags); - result += 2 * (1+4); /* Index, Uniq */ - } else { - /* - * Size when fun is mapped to a tuple. - */ - result += 1 + 1; /* Tuple tag, arity */ - result += 1 + 1 + 2 + - atom_tab(atom_val(am_fun))->len; /* 'fun' */ - result += 1 + 1 + 2 + - atom_tab(atom_val(funp->fe->module))->len; /* Module name */ - result += 2 * (1 + 4); /* Index + Uniq */ - result += 1 + (funp->num_free < 0x100 ? 1 : 4); - } + ASSERT(dflags & DFLAG_NEW_FUN_TAGS); + result += 20+1+1+4; /* New ID + Tag */ + result += 4; /* Length field (number of free variables */ + result += encode_size_struct2(acmp, funp->creator, dflags); + result += encode_size_struct2(acmp, funp->fe->module, dflags); + result += 2 * (1+4); /* Index, Uniq */ if (funp->num_free > 1) { WSTACK_PUSH2(s, (UWord) (funp->env + 1), (UWord) TERM_ARRAY_OP(funp->num_free-1)); @@ -4374,7 +4376,7 @@ decoded_size(byte *ep, byte* endp, int internal_tags, B2TContext* ctx) } } else - reds = 0; /* not used but compiler warns anyway */ + ERTS_UNDEF(reds, 0); heap_size = 0; terms = 1; @@ -4684,3 +4686,182 @@ error: #undef SKIP2 #undef CHKSIZE } + + +struct transcode_context { + enum { + TRANSCODE_DEC_MSG_SIZE, + TRANSCODE_DEC_MSG, + TRANSCODE_ENC_CTL, + TRANSCODE_ENC_MSG + }state; + Eterm ctl_term; + Eterm* ctl_heap; + ErtsHeapFactory ctl_factory; + Eterm* msg_heap; + B2TContext b2t; + TTBEncodeContext ttb; +#ifdef DEBUG + ErtsDistOutputBuf* dbg_ob; +#endif +}; + +void transcode_free_ctx(DistEntry* dep) +{ + struct transcode_context* ctx = dep->transcode_ctx; + + erts_factory_close(&ctx->ctl_factory); + erts_free(ERTS_ALC_T_DIST_TRANSCODE, ctx->ctl_heap); + + if (ctx->msg_heap) { + erts_factory_close(&ctx->b2t.u.dc.factory); + erts_free(ERTS_ALC_T_DIST_TRANSCODE, ctx->msg_heap); + } + erts_free(ERTS_ALC_T_DIST_TRANSCODE, ctx); + dep->transcode_ctx = NULL; +} + +Sint transcode_dist_obuf(ErtsDistOutputBuf* ob, + DistEntry* dep, + Uint32 dflags, + Sint reds) +{ + Sint hsz; + byte* decp; + const int have_msg = !!ob->msg_start; + int i; + struct transcode_context* ctx = dep->transcode_ctx; + + if (!ctx) { /* first call for 'ob' */ + ASSERT(!(ob->hopefull_flags & ~(Uint)(DFLAG_BIT_BINARIES | + DFLAG_EXPORT_PTR_TAG))); + if (~dflags & ob->hopefull_flags) { + /* + * Receiver does not support bitstrings and/or export funs + * and output buffer contains such message tags (hopefull_flags). + * Must transcode control and message terms to use tuple fallbacks. + */ + ctx = erts_alloc(ERTS_ALC_T_DIST_TRANSCODE, sizeof(struct transcode_context)); + dep->transcode_ctx = ctx; + #ifdef DEBUG + ctx->dbg_ob = ob; + #endif + + hsz = decoded_size(ob->extp, ob->ext_endp, 0, NULL); + ctx->ctl_heap = erts_alloc(ERTS_ALC_T_DIST_TRANSCODE, hsz*sizeof(Eterm)); + erts_factory_tmp_init(&ctx->ctl_factory, ctx->ctl_heap, hsz, ERTS_ALC_T_DIST_TRANSCODE); + ctx->msg_heap = NULL; + + decp = dec_term(NULL, &ctx->ctl_factory, ob->extp, &ctx->ctl_term, NULL); + if (have_msg) { + ASSERT(decp == ob->msg_start); (void)decp; + ctx->b2t.u.sc.ep = NULL; + ctx->b2t.state = B2TSize; + ctx->b2t.aligned_alloc = NULL; + ctx->b2t.b2ts.exttmp = 0; + ctx->state = TRANSCODE_DEC_MSG_SIZE; + } + else { + ASSERT(decp == ob->ext_endp); + ctx->state = TRANSCODE_ENC_CTL; + } + } + else if (!(dflags & DFLAG_DIST_HDR_ATOM_CACHE)) { + /* + * No need for full transcoding, but primitive receiver (erl_/jinterface) + * expects VERSION_MAGIC before both control and message terms. + */ + if (ob->msg_start) { + Sint ctl_bytes = ob->msg_start - ob->extp; + ASSERT(ob->extp < ob->msg_start && ob->msg_start < ob->ext_endp); + /* Move control term back 1 byte to make room */ + sys_memmove(ob->extp-1, ob->extp, ctl_bytes); + *--(ob->msg_start) = VERSION_MAGIC; + --(ob->extp); + reds -= ctl_bytes / (B2T_BYTES_PER_REDUCTION * B2T_MEMCPY_FACTOR); + } + *--(ob->extp) = VERSION_MAGIC; + goto done; + } + else + goto done; + } + else { /* continue after yield */ + ASSERT(ctx->dbg_ob == ob); + } + ctx->b2t.reds = reds * B2T_BYTES_PER_REDUCTION; + + switch (ctx->state) { + case TRANSCODE_DEC_MSG_SIZE: + hsz = decoded_size(ob->msg_start, ob->ext_endp, 0, &ctx->b2t); + if (ctx->b2t.state == B2TSize) { + return -1; + } + ASSERT(ctx->b2t.state == B2TDecodeInit); + ctx->msg_heap = erts_alloc(ERTS_ALC_T_DIST_TRANSCODE, hsz*sizeof(Eterm)); + ctx->b2t.u.dc.ep = ob->msg_start; + ctx->b2t.u.dc.res = (Eterm) NULL; + ctx->b2t.u.dc.next = &ctx->b2t.u.dc.res; + erts_factory_tmp_init(&ctx->b2t.u.dc.factory, + ctx->msg_heap, hsz, ERTS_ALC_T_DIST_TRANSCODE); + ctx->b2t.u.dc.flat_maps.wstart = NULL; + ctx->b2t.u.dc.hamt_array.pstart = NULL; + ctx->b2t.state = B2TDecode; + + ctx->state = TRANSCODE_DEC_MSG; + case TRANSCODE_DEC_MSG: + if (ctx->b2t.reds <= 0) + ctx->b2t.reds = 1; + decp = dec_term(NULL, NULL, NULL, NULL, &ctx->b2t); + if (ctx->b2t.state < B2TDone) { + return -1; + } + ASSERT(ctx->b2t.state == B2TDone); + ASSERT(decp && decp <= ob->ext_endp); + reds = ctx->b2t.reds / B2T_BYTES_PER_REDUCTION; + b2t_destroy_context(&ctx->b2t); + + ctx->state = TRANSCODE_ENC_CTL; + case TRANSCODE_ENC_CTL: + if (!(dflags & DFLAG_DIST_HDR_ATOM_CACHE)) { + ASSERT(!(dflags & DFLAG_NO_MAGIC)); + ob->extp -= 2; /* VERSION_MAGIC x 2 */ + } + ob->ext_endp = ob->extp; + i = erts_encode_dist_ext(ctx->ctl_term, &ob->ext_endp, dflags, + NULL, NULL, NULL); + ASSERT(i == 0); (void)i; + ASSERT(ob->ext_endp <= ob->alloc_endp); + + if (!have_msg) { + break; + } + ob->msg_start = ob->ext_endp; + ctx->ttb.wstack.wstart = NULL; + ctx->ttb.flags = dflags; + ctx->ttb.hopefull_flags = 0; + ctx->ttb.level = 0; + + ctx->state = TRANSCODE_ENC_MSG; + case TRANSCODE_ENC_MSG: + reds *= TERM_TO_BINARY_LOOP_FACTOR; + if (erts_encode_dist_ext(ctx->b2t.u.dc.res, &ob->ext_endp, dflags, NULL, + &ctx->ttb, &reds)) { + return -1; + } + reds /= TERM_TO_BINARY_LOOP_FACTOR; + + ASSERT(ob->ext_endp <= ob->alloc_endp); + ASSERT(!ctx->ttb.hopefull_flags); + } + transcode_free_ctx(dep); + +done: + if (!(dflags & DFLAG_DIST_HDR_ATOM_CACHE)) + *--(ob->extp) = PASS_THROUGH; + + if (reds < 0) + reds = 0; + + return reds; +} diff --git a/erts/emulator/beam/external.h b/erts/emulator/beam/external.h index 3c61d013da..f9f8abcc27 100644 --- a/erts/emulator/beam/external.h +++ b/erts/emulator/beam/external.h @@ -109,35 +109,26 @@ typedef struct { } ErtsAtomTranslationTable; /* - * These flags are tagged onto the high bits of a connection ID and stored in - * the ErtsDistExternal structure's flags field. They are used to indicate - * various bits of state necessary to decode binaries in a variety of - * scenarios. The mask ERTS_DIST_EXT_CON_ID_MASK is used later to separate the - * connection ID from the flags. Be careful to ensure that the mask does not - * overlap any of the bits used for flags, or ERTS will leak flags bits into - * connection IDs and leak connection ID bits into the flags. + * These flags are stored in the ErtsDistExternal structure's flags field. + * They are used to indicate various bits of state necessary to decode binaries + * in a variety of scenarios. */ -#define ERTS_DIST_EXT_DFLAG_HDR ((Uint32) 0x80000000) -#define ERTS_DIST_EXT_ATOM_TRANS_TAB ((Uint32) 0x40000000) -#define ERTS_DIST_EXT_BTT_SAFE ((Uint32) 0x20000000) -#define ERTS_DIST_EXT_CON_ID_MASK ((Uint32) 0x1fffffff) +#define ERTS_DIST_EXT_DFLAG_HDR ((Uint32) 0x1) +#define ERTS_DIST_EXT_ATOM_TRANS_TAB ((Uint32) 0x2) +#define ERTS_DIST_EXT_BTT_SAFE ((Uint32) 0x4) + +#define ERTS_DIST_CON_ID_MASK ((Uint32) 0x00ffffff) /* also in net_kernel.erl */ -#define ERTS_DIST_EXT_CON_ID(DIST_EXTP) \ - ((DIST_EXTP)->flags & ERTS_DIST_EXT_CON_ID_MASK) typedef struct { DistEntry *dep; byte *extp; byte *ext_endp; Sint heap_size; + Uint32 connection_id; Uint32 flags; ErtsAtomTranslationTable attab; } ErtsDistExternal; -typedef struct { - int have_header; - int cache_entries; -} ErtsDistHeaderPeek; - #define ERTS_DIST_EXT_SIZE(EDEP) \ (sizeof(ErtsDistExternal) \ - (((EDEP)->flags & ERTS_DIST_EXT_ATOM_TRANS_TAB) \ @@ -163,7 +154,7 @@ void erts_finalize_atom_cache_map(ErtsAtomCacheMap *, Uint32); Uint erts_encode_ext_dist_header_size(ErtsAtomCacheMap *); byte *erts_encode_ext_dist_header_setup(byte *, ErtsAtomCacheMap *); -byte *erts_encode_ext_dist_header_finalize(byte *, ErtsAtomCache *, Uint32); +Sint erts_encode_ext_dist_header_finalize(ErtsDistOutputBuf*, DistEntry *, Uint32 dflags, Sint reds); struct erts_dsig_send_context; int erts_encode_dist_ext_size(Eterm, Uint32, ErtsAtomCacheMap*, Uint* szp); int erts_encode_dist_ext_size_int(Eterm term, struct erts_dsig_send_context* ctx, Uint* szp); @@ -177,9 +168,6 @@ Uint erts_encode_ext_size_ets(Eterm); void erts_encode_ext(Eterm, byte **); byte* erts_encode_ext_ets(Eterm, byte *, struct erl_off_heap_header** ext_off_heap); -#ifdef ERTS_WANT_EXTERNAL_TAGS -ERTS_GLB_INLINE void erts_peek_dist_header(ErtsDistHeaderPeek *, byte *, Uint); -#endif ERTS_GLB_INLINE void erts_free_dist_ext_copy(ErtsDistExternal *); ERTS_GLB_INLINE void *erts_dist_ext_trailer(ErtsDistExternal *); ErtsDistExternal *erts_make_dist_ext_copy(ErtsDistExternal *, Uint); @@ -207,23 +195,9 @@ void erts_binary2term_abort(ErtsBinary2TermState *); Eterm erts_binary2term_create(ErtsBinary2TermState *, ErtsHeapFactory*); int erts_debug_max_atom_out_cache_index(void); int erts_debug_atom_to_out_cache_index(Eterm); - +void transcode_free_ctx(DistEntry* dep); #if ERTS_GLB_INLINE_INCL_FUNC_DEF -#ifdef ERTS_WANT_EXTERNAL_TAGS -ERTS_GLB_INLINE void -erts_peek_dist_header(ErtsDistHeaderPeek *dhpp, byte *ext, Uint sz) -{ - if (ext[0] == VERSION_MAGIC - || ext[1] != DIST_HEADER - || sz < (1+1+1)) - dhpp->have_header = 0; - else { - dhpp->have_header = 1; - dhpp->cache_entries = (int) get_int8(&ext[2]); - } -} -#endif ERTS_GLB_INLINE void erts_free_dist_ext_copy(ErtsDistExternal *edep) diff --git a/erts/emulator/beam/global.h b/erts/emulator/beam/global.h index 3dd3a60939..0f23027752 100644 --- a/erts/emulator/beam/global.h +++ b/erts/emulator/beam/global.h @@ -368,7 +368,7 @@ do {\ UWord _wsz = ESTACK_COUNT(s);\ (dst)->start = erts_alloc((s).alloc_type,\ DEF_ESTACK_SIZE * sizeof(Eterm));\ - memcpy((dst)->start, (s).start,_wsz*sizeof(Eterm));\ + sys_memcpy((dst)->start, (s).start,_wsz*sizeof(Eterm));\ (dst)->sp = (dst)->start + _wsz;\ (dst)->end = (dst)->start + DEF_ESTACK_SIZE;\ (dst)->edefault = NULL;\ @@ -536,7 +536,7 @@ do {\ UWord _wsz = WSTACK_COUNT(s);\ (dst)->wstart = erts_alloc(s.alloc_type,\ DEF_WSTACK_SIZE * sizeof(UWord));\ - memcpy((dst)->wstart, s.wstart,_wsz*sizeof(UWord));\ + sys_memcpy((dst)->wstart, s.wstart,_wsz*sizeof(UWord));\ (dst)->wsp = (dst)->wstart + _wsz;\ (dst)->wend = (dst)->wstart + DEF_WSTACK_SIZE;\ (dst)->wdefault = NULL;\ @@ -861,6 +861,7 @@ Eterm erts_new_heap_binary(Process *p, byte *buf, int len, byte** datap); Eterm erts_new_mso_binary(Process*, byte*, Uint); Eterm new_binary(Process*, byte*, Uint); Eterm erts_realloc_binary(Eterm bin, size_t size); +Eterm erts_build_proc_bin(ErlOffHeap*, Eterm*, Binary*); /* erl_bif_info.c */ @@ -947,8 +948,8 @@ void erts_update_ranges(BeamInstr* code, Uint size); void erts_remove_from_ranges(BeamInstr* code); UWord erts_ranges_sz(void); void erts_lookup_function_info(FunctionInfo* fi, BeamInstr* pc, int full_info); -ErtsLiteralArea** erts_dump_lit_areas; -Uint erts_dump_num_lit_areas; +extern ErtsLiteralArea** erts_dump_lit_areas; +extern Uint erts_dump_num_lit_areas; /* break.c */ void init_break_handler(void); @@ -958,6 +959,7 @@ void process_info(fmtfn_t, void *); void print_process_info(fmtfn_t, void *, Process*); void info(fmtfn_t, void *); void loaded(fmtfn_t, void *); +void erts_print_base64(fmtfn_t to, void *to_arg, byte* src, Uint size); /* sighandler sys.c */ int erts_set_signal(Eterm signal, Eterm type); @@ -1064,7 +1066,7 @@ Eterm copy_struct_x(Eterm, Uint, Eterm**, ErlOffHeap*, Uint*, erts_literal_area_ #define copy_struct_litopt(Obj,Sz,HPP,OH,LitArea) \ copy_struct_x(Obj,Sz,HPP,OH,NULL,LitArea) -Eterm copy_shallow(Eterm*, Uint, Eterm**, ErlOffHeap*); +Eterm copy_shallow(Eterm* ERTS_RESTRICT, Uint, Eterm**, ErlOffHeap*); void erts_move_multi_frags(Eterm** hpp, ErlOffHeap*, ErlHeapFragment* first, Eterm* refs, unsigned nrefs, int literals); @@ -1121,7 +1123,6 @@ extern int erts_no_line_info; extern Eterm erts_error_logger_warnings; extern int erts_initialized; extern int erts_compat_rel; -extern int erts_use_sender_punish; void erl_start(int, char**); void erts_usage(void); Eterm erts_preloaded(Process* p); @@ -1263,7 +1264,7 @@ char* erts_convert_filename_to_wchar(byte* bytes, Uint size, char *statbuf, size_t statbuf_size, ErtsAlcType_t alloc_type, Sint* used, Uint extra_wchars); -Eterm erts_convert_native_to_filename(Process *p, byte *bytes); +Eterm erts_convert_native_to_filename(Process *p, size_t size, byte *bytes); Eterm erts_utf8_to_list(Process *p, Uint num, byte *bytes, Uint sz, Uint left, Uint *num_built, Uint *num_eaten, Eterm tail); int erts_utf8_to_latin1(byte* dest, const byte* source, int slen); diff --git a/erts/emulator/beam/index.c b/erts/emulator/beam/index.c index 93d1111904..be1771b037 100644 --- a/erts/emulator/beam/index.c +++ b/erts/emulator/beam/index.c @@ -58,7 +58,7 @@ IndexTable* erts_index_init(ErtsAlcType_t type, IndexTable* t, char* name, int size, int limit, HashFunctions fun) { - Uint base_size = ((limit+INDEX_PAGE_SIZE-1)/INDEX_PAGE_SIZE)*sizeof(IndexSlot*); + Uint base_size = (((Uint)limit+INDEX_PAGE_SIZE-1)/INDEX_PAGE_SIZE)*sizeof(IndexSlot*); hash_init(type, &t->htable, name, 3*size/4, fun); t->size = 0; diff --git a/erts/emulator/beam/instrs.tab b/erts/emulator/beam/instrs.tab index c17d1a8f69..6a531fcc09 100644 --- a/erts/emulator/beam/instrs.tab +++ b/erts/emulator/beam/instrs.tab @@ -322,6 +322,16 @@ get_list(Src, Hd, Tl) { $Tl = tl; } +get_hd(Src, Hd) { + Eterm* tmp_ptr = list_val($Src); + $Hd = CAR(tmp_ptr); +} + +get_tl(Src, Tl) { + Eterm* tmp_ptr = list_val($Src); + $Tl = CDR(tmp_ptr); +} + i_get(Src, Dst) { $Dst = erts_pd_hash_get(c_p, $Src); } @@ -542,6 +552,13 @@ put_list(Hd, Tl, Dst) { HTOP += 2; } +update_list(Hd, Dst) { + HTOP[0] = $Hd; + HTOP[1] = $Dst; + $Dst = make_list(HTOP); + HTOP += 2; +} + i_put_tuple := i_put_tuple.make.fill; i_put_tuple.make(Dst) { @@ -924,3 +941,33 @@ i_raise() { //| -no_next } +build_stacktrace() { + SWAPOUT; + x(0) = build_stacktrace(c_p, x(0)); + SWAPIN; +} + +raw_raise() { + Eterm class = x(0); + Eterm value = x(1); + Eterm stacktrace = x(2); + + if (class == am_error) { + c_p->freason = EXC_ERROR & ~EXF_SAVETRACE; + c_p->fvalue = value; + c_p->ftrace = stacktrace; + goto find_func_info; + } else if (class == am_exit) { + c_p->freason = EXC_EXIT & ~EXF_SAVETRACE; + c_p->fvalue = value; + c_p->ftrace = stacktrace; + goto find_func_info; + } else if (class == am_throw) { + c_p->freason = EXC_THROWN & ~EXF_SAVETRACE; + c_p->fvalue = value; + c_p->ftrace = stacktrace; + goto find_func_info; + } else { + x(0) = am_badarg; + } +} diff --git a/erts/emulator/beam/io.c b/erts/emulator/beam/io.c index 85013af3ad..e4a5f2b6b6 100644 --- a/erts/emulator/beam/io.c +++ b/erts/emulator/beam/io.c @@ -298,7 +298,6 @@ static Port *create_port(char *name, erts_aint32_t state = ERTS_PORT_SFLG_CONNECTED; erts_aint32_t x_pts_flgs = 0; - ErtsRunQueue *runq; if (!driver_lock) { /* Align size for mutex following port struct */ port_size = size = ERTS_ALC_DATA_ALIGN_SIZE(sizeof(Port)); @@ -347,11 +346,16 @@ static Port *create_port(char *name, p += sizeof(erts_mtx_t); state |= ERTS_PORT_SFLG_PORT_SPECIFIC_LOCK; } - if (erts_get_scheduler_data()) - runq = erts_get_runq_current(NULL); - else - runq = ERTS_RUNQ_IX(0); - erts_atomic_set_nob(&prt->run_queue, (erts_aint_t) runq); + + { + ErtsRunQueue *runq; + ErtsSchedulerData *esdp = erts_get_scheduler_data(); + if (esdp) + runq = erts_get_runq_current(esdp); + else + runq = ERTS_RUNQ_IX(0); + erts_init_runq_port(prt, runq); + } prt->xports = NULL; @@ -584,7 +588,7 @@ erts_open_driver(erts_driver_t* driver, /* Pointer to driver. */ */ for (d = driver_list; d; d = d->next) { - if (strcmp(d->name, name) == 0 && + if (sys_strcmp(d->name, name) == 0 && erts_ddll_driver_ok(d->handle)) { driver = d; break; @@ -634,7 +638,7 @@ erts_open_driver(erts_driver_t* driver, /* Pointer to driver. */ trace_port_open(port, pid, erts_atom_put((byte *) port->name, - strlen(port->name), + sys_strlen(port->name), ERTS_ATOM_ENC_LATIN1, 1)); } @@ -2925,7 +2929,7 @@ erl_drv_init_ack(ErlDrvPort ix, ErlDrvData res) { break; case -2: { char *str = erl_errno_id(errno); - resp = erts_atom_put((byte *) str, strlen(str), + resp = erts_atom_put((byte *) str, sys_strlen(str), ERTS_ATOM_ENC_LATIN1, 1); break; } @@ -3384,24 +3388,11 @@ static void deliver_read_message(Port* prt, erts_aint32_t state, Eterm to, if ((state & ERTS_PORT_SFLG_BINARY_IO) == 0) { listp = buf_to_intlist(&hp, buf, len, listp); } else if (buf != NULL) { - ProcBin* pb; - Binary* bptr; - - bptr = erts_bin_nrml_alloc(len); + Binary* bptr = erts_bin_nrml_alloc(len); sys_memcpy(bptr->orig_bytes, buf, len); - pb = (ProcBin *) hp; - pb->thing_word = HEADER_PROC_BIN; - pb->size = len; - pb->next = ohp->first; - ohp->first = (struct erl_off_heap_header*)pb; - pb->val = bptr; - pb->bytes = (byte*) bptr->orig_bytes; - pb->flags = 0; + listp = erts_build_proc_bin(ohp, hp, bptr); hp += PROC_BIN_SIZE; - - OH_OVERHEAD(ohp, pb->size / sizeof(Eterm)); - listp = make_binary(pb); } /* Prepend the header */ @@ -3829,11 +3820,12 @@ static void sweep_one_link(ErtsLink *lnk, void *vpsc) ErtsDistLinkData dld; ErtsDSigData dsd; int code; - code = erts_dsig_prepare(&dsd, dep, NULL, ERTS_DSP_NO_LOCK, 0); + code = erts_dsig_prepare(&dsd, dep, NULL, 0, ERTS_DSP_NO_LOCK, 0, 0); switch (code) { case ERTS_DSIG_PREP_NOT_ALIVE: case ERTS_DSIG_PREP_NOT_CONNECTED: break; + case ERTS_DSIG_PREP_PENDING: case ERTS_DSIG_PREP_CONNECTED: erts_remove_dist_link(&dld, port_id, lnk->pid, dep); erts_destroy_dist_link(&dld); @@ -4203,17 +4195,9 @@ write_port_control_result(int control_flags, else { dbin = (ErlDrvBinary *) resp_bufp; if (dbin->orig_size > ERL_ONHEAP_BIN_LIMIT) { - ProcBin* pb = (ProcBin *) *hpp; + res = erts_build_proc_bin(ohp, *hpp, ErlDrvBinary2Binary(dbin)); *hpp += PROC_BIN_SIZE; - pb->thing_word = HEADER_PROC_BIN; - pb->size = dbin->orig_size; - pb->next = ohp->first; - ohp->first = (struct erl_off_heap_header *) pb; - pb->val = ErlDrvBinary2Binary(dbin); - pb->bytes = (byte*) dbin->orig_bytes; - pb->flags = 0; - OH_OVERHEAD(ohp, dbin->orig_size / sizeof(Eterm)); - return make_binary(pb); + return res; } resp_bufp = dbin->orig_bytes; resp_size = dbin->orig_size; @@ -5095,6 +5079,93 @@ static void prt_one_lnk(ErtsLink *lnk, void *vprtd) erts_print(prtd->to, prtd->arg, "%T", lnk->pid); } +static void dump_port_state(fmtfn_t to, void *arg, erts_aint32_t state) +{ + erts_aint32_t rest; + int unknown = 0; + char delim = ' '; + + erts_print(to, arg, "State:"); + + rest = state; + while (rest) { + erts_aint32_t chk = (rest ^ (rest-1)) & rest; /* lowest set bit */ + char* s; + + rest &= ~chk; + switch (chk) { + case ERTS_PORT_SFLG_CONNECTED: s = "CONNECTED"; break; + case ERTS_PORT_SFLG_EXITING: s = "EXITING"; break; + case ERTS_PORT_SFLG_DISTRIBUTION: s = "DISTR"; break; + case ERTS_PORT_SFLG_BINARY_IO: s = "BINARY_IO"; break; + case ERTS_PORT_SFLG_SOFT_EOF: s = "SOFT_EOF"; break; + case ERTS_PORT_SFLG_CLOSING: s = "CLOSING"; break; + case ERTS_PORT_SFLG_SEND_CLOSED: s = "SEND_CLOSED"; break; + case ERTS_PORT_SFLG_LINEBUF_IO: s = "LINEBUF_IO"; break; + case ERTS_PORT_SFLG_FREE: s = "FREE"; break; + case ERTS_PORT_SFLG_INITIALIZING: s = "INITIALIZING"; break; + case ERTS_PORT_SFLG_PORT_SPECIFIC_LOCK: s = "PORT_LOCK"; break; + case ERTS_PORT_SFLG_INVALID: s = "INVALID"; break; + case ERTS_PORT_SFLG_HALT: s = "HALT"; break; +#ifdef DEBUG + case ERTS_PORT_SFLG_PORT_DEBUG: s = "DEBUG"; break; +#endif + default: + unknown = 1; + continue; + } + erts_print(to, arg, "%c%s", delim, s); + delim = '|'; + } + if (unknown || !state) + erts_print(to, arg, "%c0x%x\n", delim, state); + else + erts_print(to, arg, "\n"); +} + +static void dump_port_task_flags(fmtfn_t to, void *arg, Port* p) +{ + erts_aint32_t flags = erts_atomic32_read_nob(&p->sched.flags); + erts_aint32_t unknown = 0; + char delim = ' '; + + if (!flags) + return; + + erts_print(to, arg, "Task Flags:"); + + while (flags) { + erts_aint32_t chk = (flags ^ (flags-1)) & flags; /* lowest set bit */ + char* s; + + flags &= ~chk; + switch (chk) { + case ERTS_PTS_FLG_IN_RUNQ: s = "IN_RUNQ"; break; + case ERTS_PTS_FLG_EXEC: s = "EXEC"; break; + case ERTS_PTS_FLG_HAVE_TASKS: s = "HAVE_TASKS"; break; + case ERTS_PTS_FLG_EXIT: s = "EXIT"; break; + case ERTS_PTS_FLG_BUSY_PORT: s = "BUSY_PORT"; break; + case ERTS_PTS_FLG_BUSY_PORT_Q: s = "BUSY_Q"; break; + case ERTS_PTS_FLG_CHK_UNSET_BUSY_PORT_Q: s = "CHK_UNSET_BUSY_Q"; break; + case ERTS_PTS_FLG_HAVE_BUSY_TASKS: s = "BUSY_TASKS"; break; + case ERTS_PTS_FLG_HAVE_NS_TASKS: s = "NS_TASKS"; break; + case ERTS_PTS_FLG_PARALLELISM: s = "PARALLELISM"; break; + case ERTS_PTS_FLG_FORCE_SCHED: s = "FORCE_SCHED"; break; + case ERTS_PTS_FLG_EXITING: s = "EXITING"; break; + case ERTS_PTS_FLG_EXEC_IMM: s = "EXEC_IMM"; break; + default: + unknown |= chk; + continue; + } + erts_print(to, arg, "%c%s", delim, s); + delim = '|'; + } + if (unknown) + erts_print(to, arg, "%cUNKNOWN(0x%x)\n", delim, unknown); + else + erts_print(to, arg, "\n"); +} + void print_port_info(Port *p, fmtfn_t to, void *arg) { @@ -5104,6 +5175,8 @@ print_port_info(Port *p, fmtfn_t to, void *arg) return; erts_print(to, arg, "=port:%T\n", p->common.id); + dump_port_state(to, arg, state); + dump_port_task_flags(to, arg, p); erts_print(to, arg, "Slot: %d\n", internal_port_index(p->common.id)); if (state & ERTS_PORT_SFLG_CONNECTED) { erts_print(to, arg, "Connected: %T", ERTS_PORT_GET_CONNECTED(p)); @@ -5126,6 +5199,10 @@ print_port_info(Port *p, fmtfn_t to, void *arg) erts_doforall_monitors(ERTS_P_MONITORS(p), &prt_one_monitor, &prtd); erts_print(to, arg, "\n"); } + if (p->suspended) { + erts_print(to, arg, "Suspended: "); + erts_proclist_dump(to, arg, p->suspended); + } if (p->common.u.alive.reg != NULL) erts_print(to, arg, "Registered as: %T\n", p->common.u.alive.reg->name); @@ -5143,6 +5220,14 @@ print_port_info(Port *p, fmtfn_t to, void *arg) } else { erts_print(to, arg, "Port controls linked-in driver: %s\n",p->name); } + erts_print(to, arg, "Input: %beu\n", p->bytes_in); + erts_print(to, arg, "Output: %beu\n", p->bytes_out); + erts_print(to, arg, "Queue: %beu\n", erts_ioq_size(&p->ioq)); + { + Eterm port_data = erts_port_data_read(p); + if (port_data != am_undefined) + erts_print(to, arg, "Port Data: %T\n", port_data); + } } void @@ -5977,21 +6062,12 @@ driver_deliver_term(Port *prt, Eterm to, ErlDrvTermData* data, int len) mess = make_binary(hbp); } else { - ProcBin* pbp; + Eterm* hp; Binary* bp = erts_bin_nrml_alloc(size); ASSERT(bufp); sys_memcpy((void *) bp->orig_bytes, (void *) bufp, size); - pbp = (ProcBin *) erts_produce_heap(&factory, - PROC_BIN_SIZE, HEAP_EXTRA); - pbp->thing_word = HEADER_PROC_BIN; - pbp->size = size; - pbp->next = factory.off_heap->first; - factory.off_heap->first = (struct erl_off_heap_header*)pbp; - pbp->val = bp; - pbp->bytes = (byte*) bp->orig_bytes; - pbp->flags = 0; - OH_OVERHEAD(factory.off_heap, pbp->size / sizeof(Eterm)); - mess = make_binary(pbp); + hp = erts_produce_heap(&factory, PROC_BIN_SIZE, HEAP_EXTRA); + mess = erts_build_proc_bin(factory.off_heap, hp, bp); } ptr += 2; break; @@ -7248,7 +7324,7 @@ int driver_failure_atom(ErlDrvPort ix, char* string) { return driver_failure_term(ix, erts_atom_put((byte *) string, - strlen(string), + sys_strlen(string), ERTS_ATOM_ENC_LATIN1, 1), 0); @@ -7714,13 +7790,27 @@ int null_func(void) int erl_drv_putenv(const char *key, char *value) { - return erts_sys_putenv_raw((char*)key, value); + switch (erts_sys_explicit_8bit_putenv((char*)key, value)) { + case -1: /* Insufficient buffer space */ + return 1; + case 1: /* Success */ + return 0; + default: /* Not found */ + return -1; + } } int erl_drv_getenv(const char *key, char *value, size_t *value_size) { - return erts_sys_getenv_raw((char*)key, value, value_size); + switch (erts_sys_explicit_8bit_getenv((char*)key, value, value_size)) { + case -1: /* Insufficient buffer space */ + return 1; + case 1: /* Success */ + return 0; + default: /* Not found */ + return -1; + } } /* get heart_port diff --git a/erts/emulator/beam/lttng-wrapper.h b/erts/emulator/beam/lttng-wrapper.h index 0bc75c1552..e7f2971bf7 100644 --- a/erts/emulator/beam/lttng-wrapper.h +++ b/erts/emulator/beam/lttng-wrapper.h @@ -61,13 +61,13 @@ #define lttng_proc_to_mfa_str(p, Name) \ do { \ if (ERTS_PROC_IS_EXITING((p))) { \ - strcpy(Name, "<exiting>"); \ + sys_strcpy(Name, "<exiting>"); \ } else { \ BeamInstr *_fptr = find_function_from_pc((p)->i); \ if (_fptr) { \ lttng_mfa_to_str(_fptr[0],_fptr[1],_fptr[2], Name); \ } else { \ - strcpy(Name, "<unknown>"); \ + sys_strcpy(Name, "<unknown>"); \ } \ } \ } while(0) diff --git a/erts/emulator/beam/macros.tab b/erts/emulator/beam/macros.tab index e0b5f56b53..494fe8961e 100644 --- a/erts/emulator/beam/macros.tab +++ b/erts/emulator/beam/macros.tab @@ -20,13 +20,12 @@ // // -// Use if there is a garbage collection before storing to a -// general destination (either X or Y register). +// Define a regular expression that will match instructions that +// perform GC. That will allow beam_makeops to check for instructions +// that don't use $REFRESH_GEN_DEST() when they should. // -REFRESH_GEN_DEST() { - dst_ptr = REG_TARGET_PTR(dst); -} +GC_REGEXP=erts_garbage_collect|erts_gc|GcBifFunction; // $Offset is relative to the start of the instruction (not to the // location of the failure label reference). Since combined diff --git a/erts/emulator/beam/map_instrs.tab b/erts/emulator/beam/map_instrs.tab index bbb2f49b66..c594a87298 100644 --- a/erts/emulator/beam/map_instrs.tab +++ b/erts/emulator/beam/map_instrs.tab @@ -31,7 +31,7 @@ new_map(Dst, Live, N) { Eterm res; HEAVY_SWAPOUT; - res = new_map(c_p, reg, $Live, $N, $NEXT_INSTRUCTION); + res = erts_gc_new_map(c_p, reg, $Live, $N, $NEXT_INSTRUCTION); HEAVY_SWAPIN; $REFRESH_GEN_DEST(); $Dst = res; @@ -44,7 +44,7 @@ i_new_small_map_lit(Dst, Live, Keys) { Eterm keys = $Keys; HEAVY_SWAPOUT; - res = new_small_map_lit(c_p, reg, keys, $Live, $NEXT_INSTRUCTION); + res = erts_gc_new_small_map_lit(c_p, reg, keys, $Live, $NEXT_INSTRUCTION); HEAVY_SWAPIN; $REFRESH_GEN_DEST(); $Dst = res; @@ -133,7 +133,7 @@ update_map_assoc(Src, Dst, Live, N) { reg[live] = $Src; HEAVY_SWAPOUT; - res = update_map_assoc(c_p, reg, live, $N, $NEXT_INSTRUCTION); + res = erts_gc_update_map_assoc(c_p, reg, live, $N, $NEXT_INSTRUCTION); HEAVY_SWAPIN; ASSERT(is_value(res)); $REFRESH_GEN_DEST(); @@ -147,7 +147,7 @@ update_map_exact(Fail, Src, Dst, Live, N) { reg[live] = $Src; HEAVY_SWAPOUT; - res = update_map_exact(c_p, reg, live, $N, $NEXT_INSTRUCTION); + res = erts_gc_update_map_exact(c_p, reg, live, $N, $NEXT_INSTRUCTION); HEAVY_SWAPIN; if (is_value(res)) { $REFRESH_GEN_DEST(); diff --git a/erts/emulator/beam/msg_instrs.tab b/erts/emulator/beam/msg_instrs.tab index 8055a8616f..d6d4d2fb49 100644 --- a/erts/emulator/beam/msg_instrs.tab +++ b/erts/emulator/beam/msg_instrs.tab @@ -43,27 +43,23 @@ // * // */ -recv_mark(Dest) { +i_recv_mark() { /* - * Save the current position in message buffer and the - * the label for the loop_rec/2 instruction for the - * the receive statement. + * Save the current position in message buffer. */ - $SET_REL_I(c_p->msg.mark, $Dest); c_p->msg.saved_last = c_p->msg.last; } i_recv_set() { /* - * If the mark is valid (points to the loop_rec/2 - * instruction that follows), we know that the saved - * position points to the first message that could - * possibly be matched out. + * If c_p->msg.saved_last is non-zero, it points to the first + * message that could possibly be matched out. * - * If the mark is invalid, we do nothing, meaning that - * we will look through all messages in the message queue. + * If c_p->msg.saved_last is zero, it means that it was invalidated + * because another receive was executed before this i_recv_set() + * instruction was reached. */ - if (c_p->msg.mark == (BeamInstr *) ($NEXT_INSTRUCTION)) { + if (c_p->msg.saved_last) { c_p->msg.save = c_p->msg.saved_last; } SET_I($NEXT_INSTRUCTION); @@ -131,6 +127,7 @@ i_loop_rec(Dest) { ASSERT(HTOP == c_p->htop && E == c_p->stop); /* TODO: Add DTrace probe for this bad message situation? */ UNLINK_MESSAGE(c_p, msgp); + c_p->msg.saved_last = 0; /* Better safe than sorry. */ msgp->next = NULL; erts_cleanup_messages(msgp); goto loop_rec__; diff --git a/erts/emulator/beam/ops.tab b/erts/emulator/beam/ops.tab index 7a2c39b3a8..77e375f2c0 100644 --- a/erts/emulator/beam/ops.tab +++ b/erts/emulator/beam/ops.tab @@ -182,6 +182,9 @@ get_list r x y get_list r y r get_list r x r +get_hd xy xy +get_tl xy xy + # Old-style catch. catch y f catch_end y @@ -503,6 +506,10 @@ i_put_tuple xy I # put_list Const=c n Dst => move Const x | put_list x n Dst +put_list Src Dst=x Dst => update_list Src Dst + +update_list xyc x + put_list x n x put_list y n x put_list x x x @@ -511,8 +518,6 @@ put_list y x x put_list y y x put_list x y x -put_list y x x - # put_list SrcReg Constant Dst put_list x c x @@ -527,8 +532,6 @@ put_list c y x # The following put_list instructions using x(0) are frequently used. -put_list y r r -put_list x r r put_list r n r put_list r n x put_list r x x @@ -539,6 +542,7 @@ put_list x x r put_list s s d %hot + # # Some more only used by the emulator # @@ -1567,8 +1571,20 @@ on_load # # R14A. # -recv_mark f +# Modified in OTP 21 because it turns out that we don't need the +# label after all. +# + +recv_mark f => i_recv_mark +i_recv_mark recv_set Fail | label Lbl | loop_rec Lf Reg => \ i_recv_set | label Lbl | loop_rec Lf Reg i_recv_set + +# +# OTP 21. +# + +build_stacktrace +raw_raise diff --git a/erts/emulator/beam/packet_parser.c b/erts/emulator/beam/packet_parser.c index f14910bc72..de1d481105 100644 --- a/erts/emulator/beam/packet_parser.c +++ b/erts/emulator/beam/packet_parser.c @@ -200,7 +200,7 @@ static int http_init(void) for (i = 0; i < HTTP_HDR_HASH_SIZE; i++) http_hdr_hash[i] = NULL; for (i = 0; http_hdr_strings[i] != NULL; i++) { - ASSERT(strlen(http_hdr_strings[i]) <= HTTP_MAX_NAME_LEN); + ASSERT(sys_strlen(http_hdr_strings[i]) <= HTTP_MAX_NAME_LEN); http_hdr_table[i].index = i; http_hash_insert(http_hdr_strings[i], &http_hdr_table[i], @@ -516,7 +516,7 @@ static http_atom_t* http_hash_lookup(const char* name, int len, while (ap != NULL) { if ((ap->h == h) && (ap->len == len) && - (strncmp(ap->name, name, len) == 0)) + (sys_strncmp(ap->name, name, len) == 0)) return ap; ap = ap->next; } @@ -656,7 +656,7 @@ int packet_parse_http(const char* buf, int len, int* statep, if (*statep == 0) { /* start-line = Request-Line | Status-Line */ - if (n >= 5 && (strncmp(buf, "HTTP/", 5) == 0)) { + if (n >= 5 && (sys_strncmp(buf, "HTTP/", 5) == 0)) { int major = 0; int minor = 0; int status = 0; @@ -750,7 +750,7 @@ int packet_parse_http(const char* buf, int len, int* statep, } if (n < 8) return -1; - if (strncmp(ptr, "HTTP/", 5) != 0) + if (sys_strncmp(ptr, "HTTP/", 5) != 0) return -1; ptr += 5; n -= 5; diff --git a/erts/emulator/beam/sys.h b/erts/emulator/beam/sys.h index bf7d310568..13ae80e4a5 100644 --- a/erts/emulator/beam/sys.h +++ b/erts/emulator/beam/sys.h @@ -636,6 +636,8 @@ typedef struct preload { */ typedef Eterm ErtsTracer; +#include "erl_osenv.h" + /* * This structure contains options to all built in drivers. * None of the drivers use all of the fields. @@ -651,8 +653,7 @@ typedef struct _SysDriverOpts { int hide_window; /* Hide this windows (Windows). */ int exit_status; /* Report exit status of subprocess. */ int overlapped_io; /* Only has effect on windows NT et al */ - char *envir; /* Environment of the port process, */ - /* in Windows format. */ + erts_osenv_t envir; /* Environment of the port process */ char **argv; /* Argument vector in Unix'ish format. */ char *wd; /* Working directory. */ unsigned spawn_type; /* Bitfield of ERTS_SPAWN_DRIVER | @@ -782,9 +783,6 @@ void set_break_quit(void (*)(void), void (*)(void)); void os_flavor(char*, unsigned); void os_version(int*, int*, int*); -void init_getenv_state(GETENV_STATE *); -char * getenv_string(GETENV_STATE *); -void fini_getenv_state(GETENV_STATE *); #define HAVE_ERTS_CHECK_IO_DEBUG typedef struct { @@ -805,20 +803,21 @@ int sys_double_to_chars_ext(double, char*, size_t, size_t); int sys_double_to_chars_fast(double, char*, int, int, int); void sys_get_pid(char *, size_t); -/* erts_sys_putenv() returns, 0 on success and a value != 0 on failure. */ -int erts_sys_putenv(char *key, char *value); -/* Simple variant used from drivers, raw eightbit interface */ -int erts_sys_putenv_raw(char *key, char *value); -/* erts_sys_getenv() returns 0 on success (length of value string in - *size), a value > 0 if value buffer is too small (*size is set to needed - size), and a value < 0 on failure. */ -int erts_sys_getenv(char *key, char *value, size_t *size); -/* Simple variant used from drivers, raw eightbit interface */ -int erts_sys_getenv_raw(char *key, char *value, size_t *size); -/* erts_sys_getenv__() is only allowed to be used in early init phase */ -int erts_sys_getenv__(char *key, char *value, size_t *size); -/* erst_sys_unsetenv() returns 0 on success and a value != 0 on failure. */ -int erts_sys_unsetenv(char *key); +/* erl_drv_get/putenv have been implicitly 8-bit for so long that we can't + * change them without breaking things on Windows. Their return values are + * identical to erts_osenv_get/putenv */ +int erts_sys_explicit_8bit_getenv(char *key, char *value, size_t *size); +int erts_sys_explicit_8bit_putenv(char *key, char *value); + +/* This is identical to erts_sys_explicit_8bit_getenv but falls down to the + * host OS implementation instead of erts_osenv. */ +int erts_sys_explicit_host_getenv(char *key, char *value, size_t *size); + +const erts_osenv_t *erts_sys_rlock_global_osenv(void); +void erts_sys_runlock_global_osenv(void); + +erts_osenv_t *erts_sys_rwlock_global_osenv(void); +void erts_sys_rwunlock_global_osenv(void); /* Easier to use, but not as efficient, environment functions */ char *erts_read_env(char *key); @@ -997,16 +996,36 @@ erts_refc_read(erts_refc_t *refcp, erts_aint_t min_val) #endif /* #if ERTS_GLB_INLINE_INCL_FUNC_DEF */ -#define sys_memcpy(s1,s2,n) memcpy(s1,s2,n) -#define sys_memmove(s1,s2,n) memmove(s1,s2,n) -#define sys_memcmp(s1,s2,n) memcmp(s1,s2,n) -#define sys_memset(s,c,n) memset(s,c,n) -#define sys_memzero(s, n) memset(s,'\0',n) -#define sys_strcmp(s1,s2) strcmp(s1,s2) -#define sys_strncmp(s1,s2,n) strncmp(s1,s2,n) -#define sys_strcpy(s1,s2) strcpy(s1,s2) -#define sys_strncpy(s1,s2,n) strncpy(s1,s2,n) -#define sys_strlen(s) strlen(s) +/* Thin wrappers around memcpy and friends, which should always be used in + * place of plain memcpy, memset, etc. + * + * Passing NULL to any of these functions is undefined behavior even though it + * may seemingly work when the length (if any) is zero; a compiler can take + * this as a hint that the passed operand may *never* be NULL and then optimize + * based on that information. + * + * (The weird casts in the assertions silence an "always evaluates to true" + * warning when an operand is the address of an lvalue) */ +#define sys_memcpy(s1,s2,n) \ + (ASSERT((void*)(s1) != NULL && (void*)(s2) != NULL), memcpy(s1,s2,n)) +#define sys_memmove(s1,s2,n) \ + (ASSERT((void*)(s1) != NULL && (void*)(s2) != NULL), memmove(s1,s2,n)) +#define sys_memcmp(s1,s2,n) \ + (ASSERT((void*)(s1) != NULL && (void*)(s2) != NULL), memcmp(s1,s2,n)) +#define sys_memset(s,c,n) \ + (ASSERT((void*)(s) != NULL), memset(s,c,n)) +#define sys_memzero(s, n) \ + (ASSERT((void*)(s) != NULL), memset(s,'\0',n)) +#define sys_strcmp(s1,s2) \ + (ASSERT((void*)(s1) != NULL && (void*)(s2) != NULL), strcmp(s1,s2)) +#define sys_strncmp(s1,s2,n) \ + (ASSERT((void*)(s1) != NULL && (void*)(s2) != NULL), strncmp(s1,s2,n)) +#define sys_strcpy(s1,s2) \ + (ASSERT((void*)(s1) != NULL && (void*)(s2) != NULL), strcpy(s1,s2)) +#define sys_strncpy(s1,s2,n) \ + (ASSERT((void*)(s1) != NULL && (void*)(s2) != NULL), strncpy(s1,s2,n)) +#define sys_strlen(s) \ + (ASSERT((void*)(s) != NULL), strlen(s)) /* define function symbols (needed in sys_drv_api) */ #define sys_fp_alloc sys_alloc diff --git a/erts/emulator/beam/utils.c b/erts/emulator/beam/utils.c index 993585be10..4bf60619ba 100644 --- a/erts/emulator/beam/utils.c +++ b/erts/emulator/beam/utils.c @@ -3152,6 +3152,9 @@ tailrecur_ne: int cmp; byte* a_ptr; byte* b_ptr; + if (eq_only && a_size != b_size) { + RETURN_NEQ(a_size - b_size); + } ERTS_GET_BINARY_BYTES(a, a_ptr, a_bitoffs, a_bitsize); ERTS_GET_BINARY_BYTES(b, b_ptr, b_bitoffs, b_bitsize); if ((a_bitsize | b_bitsize | a_bitoffs | b_bitoffs) == 0) { @@ -4360,15 +4363,20 @@ erts_read_env(char *key) char *value = erts_alloc(ERTS_ALC_T_TMP, value_len); int res; while (1) { - res = erts_sys_getenv_raw(key, value, &value_len); - if (res <= 0) - break; - value = erts_realloc(ERTS_ALC_T_TMP, value, value_len); + res = erts_sys_explicit_8bit_getenv(key, value, &value_len); + + if (res >= 0) { + break; + } + + value = erts_realloc(ERTS_ALC_T_TMP, value, value_len); } - if (res != 0) { - erts_free(ERTS_ALC_T_TMP, value); - return NULL; + + if (res != 1) { + erts_free(ERTS_ALC_T_TMP, value); + return NULL; } + return value; } |