diff options
Diffstat (limited to 'erts/emulator')
54 files changed, 3169 insertions, 1724 deletions
diff --git a/erts/emulator/beam/atom.names b/erts/emulator/beam/atom.names index 5ec1409adf..0cf21b4635 100644 --- a/erts/emulator/beam/atom.names +++ b/erts/emulator/beam/atom.names @@ -265,6 +265,7 @@ atom get_seq_token atom get_tcw atom getenv atom gather_gc_info_result +atom gather_io_bytes atom gather_sched_wall_time_result atom getting_linked atom getting_unlinked @@ -350,6 +351,7 @@ atom message atom message_binary atom message_queue_len atom messages +atom merge_trap atom meta atom meta_match_spec atom micro_seconds diff --git a/erts/emulator/beam/beam_bif_load.c b/erts/emulator/beam/beam_bif_load.c index 500a98195b..c769428266 100644 --- a/erts/emulator/beam/beam_bif_load.c +++ b/erts/emulator/beam/beam_bif_load.c @@ -33,6 +33,7 @@ #include "beam_catches.h" #include "erl_binary.h" #include "erl_nif.h" +#include "erl_bits.h" #include "erl_thr_progress.h" static void set_default_trace_pattern(Eterm module); @@ -937,7 +938,15 @@ any_heap_refs(Eterm* start, Eterm* end, char* mod_start, Uint mod_size) break; case TAG_PRIMARY_HEADER: if (!header_is_transparent(val)) { - Eterm* new_p = p + thing_arityval(val); + Eterm* new_p; + if (header_is_bin_matchstate(val)) { + ErlBinMatchState *ms = (ErlBinMatchState*) p; + ErlBinMatchBuffer *mb = &(ms->mb); + if (in_area(EXPAND_POINTER(mb->orig), mod_start, mod_size)) { + return 1; + } + } + new_p = p + thing_arityval(val); ASSERT(start <= new_p && new_p < end); p = new_p; } diff --git a/erts/emulator/beam/beam_emu.c b/erts/emulator/beam/beam_emu.c index a21622f424..2d4bf4240c 100644 --- a/erts/emulator/beam/beam_emu.c +++ b/erts/emulator/beam/beam_emu.c @@ -6653,8 +6653,9 @@ new_map(Process* p, Eterm* reg, BeamInstr* I) p->htop = mhp; - factory.p = p; + erts_factory_proc_init(&factory, p); res = erts_hashmap_from_array(&factory, thp, n/2, 0); + erts_factory_close(&factory); if (p->mbuf) { Uint live = Arg(2); reg[live] = res; diff --git a/erts/emulator/beam/beam_load.c b/erts/emulator/beam/beam_load.c index 0d40201934..006ba5d0db 100644 --- a/erts/emulator/beam/beam_load.c +++ b/erts/emulator/beam/beam_load.c @@ -205,10 +205,7 @@ typedef struct { typedef struct { Eterm term; /* The tagged term (in the heap). */ - Uint heap_size; /* (Exact) size on the heap. */ - SWord offset; /* Offset from temporary location to final. */ - ErlOffHeap off_heap; /* Start of linked list of ProcBins. */ - Eterm* heap; /* Heap for term. */ + ErlHeapFragment* heap_frags; } Literal; /* @@ -477,6 +474,8 @@ typedef struct LoaderState { static void free_loader_state(Binary* magic); +static ErlHeapFragment* new_literal_fragment(Uint size); +static void free_literal_fragment(ErlHeapFragment*); static void loader_state_dtor(Binary* magic); static Eterm insert_new_code(Process *c_p, ErtsProcLocks c_p_locks, Eterm group_leader, Eterm module, @@ -525,13 +524,16 @@ static void new_literal_patch(LoaderState* stp, int pos); static void new_string_patch(LoaderState* stp, int pos); static Uint new_literal(LoaderState* stp, Eterm** hpp, Uint heap_size); static int genopargcompare(GenOpArg* a, GenOpArg* b); -static Eterm exported_from_module(Process* p, Eterm mod); -static Eterm functions_in_module(Process* p, Eterm mod); -static Eterm attributes_for_module(Process* p, Eterm mod); -static Eterm compilation_info_for_module(Process* p, Eterm mod); -static Eterm md5_of_module(Process* p, Eterm mod); -static Eterm has_native(Process* p, Eterm mod); -static Eterm native_addresses(Process* p, Eterm mod); +static Eterm get_module_info(Process* p, ErtsCodeIndex code_ix, + BeamInstr* code, Eterm module, Eterm what); +static Eterm exported_from_module(Process* p, ErtsCodeIndex code_ix, + Eterm mod); +static Eterm functions_in_module(Process* p, BeamInstr* code); +static Eterm attributes_for_module(Process* p, BeamInstr* code); +static Eterm compilation_info_for_module(Process* p, BeamInstr* code); +static Eterm md5_of_module(Process* p, BeamInstr* code); +static Eterm has_native(BeamInstr* code); +static Eterm native_addresses(Process* p, BeamInstr* code); int patch_funentries(Eterm Patchlist); int patch(Eterm Addresses, Uint fe); static int safe_mul(UWord a, UWord b, UWord* resp); @@ -883,6 +885,28 @@ free_loader_state(Binary* magic) } } +static ErlHeapFragment* new_literal_fragment(Uint size) +{ + ErlHeapFragment* bp; + bp = (ErlHeapFragment*) ERTS_HEAP_ALLOC(ERTS_ALC_T_PREPARED_CODE, + ERTS_HEAP_FRAG_SIZE(size)); + ERTS_INIT_HEAP_FRAG(bp, size); + return bp; +} + +static void free_literal_fragment(ErlHeapFragment* bp) +{ + ASSERT(bp != NULL); + do { + ErlHeapFragment* next_bp = bp->next; + + erts_cleanup_offheap(&bp->off_heap); + ERTS_HEAP_FREE(ERTS_ALC_T_PREPARED_CODE, (void *) bp, + ERTS_HEAP_FRAG_SIZE(bp->size)); + bp = next_bp; + }while (bp != NULL); +} + /* * This destructor function can safely be called multiple times. */ @@ -922,10 +946,9 @@ loader_state_dtor(Binary* magic) if (stp->literals != 0) { int i; for (i = 0; i < stp->num_literals; i++) { - if (stp->literals[i].heap != 0) { - erts_free(ERTS_ALC_T_PREPARED_CODE, - (void *) stp->literals[i].heap); - stp->literals[i].heap = 0; + if (stp->literals[i].heap_frags != 0) { + free_literal_fragment(stp->literals[i].heap_frags); + stp->literals[i].heap_frags = 0; } } erts_free(ERTS_ALC_T_PREPARED_CODE, (void *) stp->literals); @@ -1450,6 +1473,7 @@ read_lambda_table(LoaderState* stp) return 0; } + static int read_literal_table(LoaderState* stp) { @@ -1471,7 +1495,7 @@ read_literal_table(LoaderState* stp) stp->allocated_literals = stp->num_literals; for (i = 0; i < stp->num_literals; i++) { - stp->literals[i].heap = 0; + stp->literals[i].heap_frags = 0; } for (i = 0; i < stp->num_literals; i++) { @@ -1479,28 +1503,38 @@ read_literal_table(LoaderState* stp) Sint heap_size; byte* p; Eterm val; - Eterm* hp; + ErtsHeapFactory factory; GetInt(stp, 4, sz); /* Size of external term format. */ GetString(stp, p, sz); if ((heap_size = erts_decode_ext_size(p, sz)) < 0) { LoadError1(stp, "literal %d: bad external format", i); } - hp = stp->literals[i].heap = erts_alloc(ERTS_ALC_T_PREPARED_CODE, - heap_size*sizeof(Eterm)); - stp->literals[i].off_heap.first = 0; - stp->literals[i].off_heap.overhead = 0; - val = erts_decode_ext(&hp, &stp->literals[i].off_heap, &p); - stp->literals[i].heap_size = hp - stp->literals[i].heap; - if (stp->literals[i].heap_size > heap_size) { - erl_exit(1, "overrun by %d word(s) for literal heap, term %d", - stp->literals[i].heap_size - heap_size, i); - } - if (is_non_value(val)) { - LoadError1(stp, "literal %d: bad external format", i); - } - stp->literals[i].term = val; - stp->total_literal_size += stp->literals[i].heap_size; + + if (heap_size > 0) { + erts_factory_message_init(&factory, NULL, NULL, + new_literal_fragment(heap_size)); + factory.alloc_type = ERTS_ALC_T_PREPARED_CODE; + val = erts_decode_ext(&factory, &p); + + if (is_non_value(val)) { + LoadError1(stp, "literal %d: bad external format", i); + } + erts_factory_close(&factory); + stp->literals[i].heap_frags = factory.heap_frags; + stp->total_literal_size += erts_used_frag_sz(factory.heap_frags); + } + else { + erts_factory_dummy_init(&factory); + val = erts_decode_ext(&factory, &p); + if (is_non_value(val)) { + LoadError1(stp, "literal %d: bad external format", i); + } + ASSERT(is_immed(val)); + stp->literals[i].heap_frags = NULL; + } + stp->literals[i].term = val; + } erts_free(ERTS_ALC_T_TMP, uncompressed); return 1; @@ -4370,8 +4404,9 @@ freeze_code(LoaderState* stp) Uint* low; Uint* high; LiteralPatch* lp; - struct erl_off_heap_header* off_heap = 0; - struct erl_off_heap_header** off_heap_last = &off_heap; + ErlOffHeap code_off_heap; + + ERTS_INIT_OFF_HEAP(&code_off_heap); low = (Uint *) (code+stp->ci); high = low + stp->total_literal_size; @@ -4379,73 +4414,21 @@ freeze_code(LoaderState* stp) code[MI_LITERALS_END] = (BeamInstr) high; ptr = low; for (i = 0; i < stp->num_literals; i++) { - SWord offset; - struct erl_off_heap_header* t_off_heap; - - sys_memcpy(ptr, stp->literals[i].heap, - stp->literals[i].heap_size*sizeof(Eterm)); - offset = ptr - stp->literals[i].heap; - stp->literals[i].offset = offset; - high = ptr + stp->literals[i].heap_size; - while (ptr < high) { - Eterm val = *ptr; - switch (primary_tag(val)) { - case TAG_PRIMARY_LIST: - case TAG_PRIMARY_BOXED: - *ptr++ = offset_ptr(val, offset); - break; - case TAG_PRIMARY_HEADER: - if (header_is_transparent(val)) { - ptr++; - } else { - if (thing_subtag(val) == REFC_BINARY_SUBTAG) { - struct erl_off_heap_header* oh; - - oh = (struct erl_off_heap_header*) ptr; - if (oh->next) { - Eterm** uptr = (Eterm **) (void *) &oh->next; - *uptr += offset; - } - } - ptr += 1 + thing_arityval(val); - } - break; - default: - ptr++; - break; - } - } - ASSERT(ptr == high); - - /* - * Re-link the off_heap list for this term onto the - * off_heap list for the entire module. - */ - t_off_heap = stp->literals[i].off_heap.first; - if (t_off_heap) { - t_off_heap = (struct erl_off_heap_header *) - offset_ptr((UWord) t_off_heap, offset); - while (t_off_heap) { - *off_heap_last = t_off_heap; - off_heap_last = &t_off_heap->next; - t_off_heap = t_off_heap->next; - } - } + if (stp->literals[i].heap_frags) { + move_multi_frags(&ptr, &code_off_heap, stp->literals[i].heap_frags, + &stp->literals[i].term, 1); + } + else ASSERT(is_immed(stp->literals[i].term)); } - code[MI_LITERALS_OFF_HEAP] = (BeamInstr) off_heap; + code[MI_LITERALS_OFF_HEAP] = (BeamInstr) code_off_heap.first; lp = stp->literal_patches; while (lp != 0) { BeamInstr* op_ptr; - Uint literal; Literal* lit; op_ptr = code + lp->pos; lit = &stp->literals[op_ptr[0]]; - literal = lit->term; - if (is_boxed(literal) || is_list(literal)) { - literal = offset_ptr(literal, lit->offset); - } - op_ptr[0] = literal; + op_ptr[0] = lit->term; lp = lp->next; } literal_end += stp->total_literal_size; @@ -5376,19 +5359,18 @@ new_literal(LoaderState* stp, Eterm** hpp, Uint heap_size) stp->total_literal_size += heap_size; lit = stp->literals + stp->num_literals; - lit->offset = 0; - lit->heap_size = heap_size; - lit->heap = erts_alloc(ERTS_ALC_T_PREPARED_CODE, heap_size*sizeof(Eterm)); - lit->term = make_boxed(lit->heap); - lit->off_heap.first = 0; - lit->off_heap.overhead = 0; - *hpp = lit->heap; + lit->heap_frags = new_literal_fragment(heap_size); + lit->term = make_boxed(lit->heap_frags->mem); + *hpp = lit->heap_frags->mem; return stp->num_literals++; } Eterm erts_module_info_0(Process* p, Eterm module) { + Module* modp; + ErtsCodeIndex code_ix = erts_active_code_ix(); + BeamInstr* code; Eterm *hp; Eterm list = NIL; Eterm tup; @@ -5397,12 +5379,18 @@ erts_module_info_0(Process* p, Eterm module) return THE_NON_VALUE; } - if (erts_get_module(module, erts_active_code_ix()) == NULL) { + modp = erts_get_module(module, code_ix); + if (modp == NULL) { return THE_NON_VALUE; } + code = modp->curr.code; + if (code == NULL) { + return THE_NON_VALUE; + } + #define BUILD_INFO(What) \ - tup = erts_module_info_1(p, module, What); \ + tup = get_module_info(p, code_ix, code, module, What); \ hp = HAlloc(p, 5); \ tup = TUPLE2(hp, What, tup); \ hp += 3; \ @@ -5423,22 +5411,47 @@ erts_module_info_0(Process* p, Eterm module) Eterm erts_module_info_1(Process* p, Eterm module, Eterm what) { + Module* modp; + ErtsCodeIndex code_ix = erts_active_code_ix(); + BeamInstr* code; + + if (is_not_atom(module)) { + return THE_NON_VALUE; + } + + modp = erts_get_module(module, code_ix); + if (modp == NULL) { + return THE_NON_VALUE; + } + + code = modp->curr.code; + if (code == NULL) { + return THE_NON_VALUE; + } + + return get_module_info(p, code_ix, code, module, what); +} + +static Eterm +get_module_info(Process* p, ErtsCodeIndex code_ix, BeamInstr* code, + Eterm module, Eterm what) +{ if (what == am_module) { return module; } else if (what == am_md5) { - return md5_of_module(p, module); + return md5_of_module(p, code); } else if (what == am_exports) { - return exported_from_module(p, module); + return exported_from_module(p, code_ix, module); } else if (what == am_functions) { - return functions_in_module(p, module); + return functions_in_module(p, code); } else if (what == am_attributes) { - return attributes_for_module(p, module); + return attributes_for_module(p, code); } else if (what == am_compile) { - return compilation_info_for_module(p, module); + return compilation_info_for_module(p, code); } else if (what == am_native_addresses) { - return native_addresses(p, module); + return native_addresses(p, code); } else if (what == am_native) { - return has_native(p, module); + return has_native(code); } return THE_NON_VALUE; } @@ -5446,16 +5459,12 @@ erts_module_info_1(Process* p, Eterm module, Eterm what) /* * Builds a list of all functions in the given module: * [{Name, Arity},...] - * - * Returns a tagged term, or 0 on error. */ Eterm functions_in_module(Process* p, /* Process whose heap to use. */ - Eterm mod) /* Tagged atom for module. */ + BeamInstr* code) { - Module* modp; - BeamInstr* code; int i; Uint num_functions; Uint need; @@ -5463,15 +5472,6 @@ functions_in_module(Process* p, /* Process whose heap to use. */ Eterm* hp_end; Eterm result = NIL; - if (is_not_atom(mod)) { - return THE_NON_VALUE; - } - - modp = erts_get_module(mod, erts_active_code_ix()); - if (modp == NULL) { - return THE_NON_VALUE; - } - code = modp->curr.code; num_functions = code[MI_NUM_FUNCTIONS]; need = 5*num_functions; hp = HAlloc(p, need); @@ -5503,23 +5503,12 @@ functions_in_module(Process* p, /* Process whose heap to use. */ */ static Eterm -has_native(Process* p, Eterm mod) +has_native(BeamInstr *code) { Eterm result = am_false; #ifdef HIPE - Module* modp; - - if (is_not_atom(mod)) { - return THE_NON_VALUE; - } - - modp = erts_get_module(mod, erts_active_code_ix()); - if (modp == NULL) { - return THE_NON_VALUE; - } - - if (erts_is_module_native(modp->curr.code)) { - result = am_true; + if (erts_is_module_native(code)) { + result = am_true; } #endif return result; @@ -5548,15 +5537,11 @@ erts_is_module_native(BeamInstr* code) /* * Builds a list of all functions including native addresses. * [{Name,Arity,NativeAddress},...] - * - * Returns a tagged term, or 0 on error. */ static Eterm -native_addresses(Process* p, Eterm mod) +native_addresses(Process* p, BeamInstr* code) { - Module* modp; - BeamInstr* code; int i; Eterm* hp; Uint num_functions; @@ -5564,16 +5549,6 @@ native_addresses(Process* p, Eterm mod) Eterm* hp_end; Eterm result = NIL; - if (is_not_atom(mod)) { - return THE_NON_VALUE; - } - - modp = erts_get_module(mod, erts_active_code_ix()); - if (modp == NULL) { - return THE_NON_VALUE; - } - - code = modp->curr.code; num_functions = code[MI_NUM_FUNCTIONS]; need = (6+BIG_UINT_HEAP_SIZE)*num_functions; hp = HAlloc(p, need); @@ -5600,25 +5575,18 @@ native_addresses(Process* p, Eterm mod) /* * Builds a list of all exported functions in the given module: * [{Name, Arity},...] - * - * Returns a tagged term, or 0 on error. */ Eterm exported_from_module(Process* p, /* Process whose heap to use. */ + ErtsCodeIndex code_ix, Eterm mod) /* Tagged atom for module. */ { int i; Eterm* hp = NULL; Eterm* hend = NULL; Eterm result = NIL; - ErtsCodeIndex code_ix; - - if (is_not_atom(mod)) { - return THE_NON_VALUE; - } - code_ix = erts_active_code_ix(); for (i = 0; i < export_list_size(code_ix); i++) { Export* ep = export_list(i,code_ix); @@ -5648,112 +5616,59 @@ exported_from_module(Process* p, /* Process whose heap to use. */ /* * Returns a list of all attributes for the module. - * - * Returns a tagged term, or 0 on error. */ Eterm attributes_for_module(Process* p, /* Process whose heap to use. */ - Eterm mod) /* Tagged atom for module. */ - + BeamInstr* code) { - Module* modp; - BeamInstr* code; - Eterm* hp; byte* ext; Eterm result = NIL; - Eterm* end; - if (is_not_atom(mod)) { - return THE_NON_VALUE; - } - - modp = erts_get_module(mod, erts_active_code_ix()); - if (modp == NULL) { - return THE_NON_VALUE; - } - code = modp->curr.code; ext = (byte *) code[MI_ATTR_PTR]; if (ext != NULL) { - hp = HAlloc(p, code[MI_ATTR_SIZE_ON_HEAP]); - end = hp + code[MI_ATTR_SIZE_ON_HEAP]; - result = erts_decode_ext(&hp, &MSO(p), &ext); + ErtsHeapFactory factory; + erts_factory_proc_prealloc_init(&factory, p, code[MI_ATTR_SIZE_ON_HEAP]); + result = erts_decode_ext(&factory, &ext); if (is_value(result)) { - ASSERT(hp <= end); + erts_factory_close(&factory); } - HRelease(p,end,hp); } return result; } /* * Returns a list containing compilation information. - * - * Returns a tagged term, or 0 on error. */ Eterm compilation_info_for_module(Process* p, /* Process whose heap to use. */ - Eterm mod) /* Tagged atom for module. */ + BeamInstr* code) { - Module* modp; - BeamInstr* code; - Eterm* hp; byte* ext; Eterm result = NIL; - Eterm* end; - - if (is_not_atom(mod)) { - return THE_NON_VALUE; - } - modp = erts_get_module(mod, erts_active_code_ix()); - if (modp == NULL) { - return THE_NON_VALUE; - } - code = modp->curr.code; ext = (byte *) code[MI_COMPILE_PTR]; if (ext != NULL) { - hp = HAlloc(p, code[MI_COMPILE_SIZE_ON_HEAP]); - end = hp + code[MI_COMPILE_SIZE_ON_HEAP]; - result = erts_decode_ext(&hp, &MSO(p), &ext); + ErtsHeapFactory factory; + erts_factory_proc_prealloc_init(&factory, p, code[MI_COMPILE_SIZE_ON_HEAP]); + result = erts_decode_ext(&factory, &ext); if (is_value(result)) { - ASSERT(hp <= end); + erts_factory_close(&factory); } - HRelease(p,end,hp); } return result; } /* * Returns the MD5 checksum for a module - * - * Returns a tagged term, or 0 on error. */ Eterm md5_of_module(Process* p, /* Process whose heap to use. */ - Eterm mod) /* Tagged atom for module. */ + BeamInstr* code) { - Module* modp; - BeamInstr* code; - Eterm res = NIL; - - if (is_not_atom(mod)) { - return THE_NON_VALUE; - } - - modp = erts_get_module(mod, erts_active_code_ix()); - if (modp == NULL) { - return THE_NON_VALUE; - } - code = modp->curr.code; - if (code[MI_MD5_PTR] != 0) { - res = new_binary(p, (byte *) code[MI_MD5_PTR], MD5_SIZE); - } else { - res = am_undefined; - } - return res; + return new_binary(p, (byte *) code[MI_MD5_PTR], MD5_SIZE); } /* diff --git a/erts/emulator/beam/bif.c b/erts/emulator/beam/bif.c index 2b782f4484..bfca861830 100644 --- a/erts/emulator/beam/bif.c +++ b/erts/emulator/beam/bif.c @@ -1914,7 +1914,6 @@ do_send(Process *p, Eterm to, Eterm msg, Eterm *refp, ErtsSendContext* ctx) } else if (is_external_pid(to)) { dep = external_pid_dist_entry(to); if(dep == erts_this_dist_entry) { -#if DEBUG erts_dsprintf_buf_t *dsbufp = erts_create_logger_dsbuf(); erts_dsprintf(dsbufp, "Discarding message %T from %T to %T in an old " @@ -1925,7 +1924,6 @@ do_send(Process *p, Eterm to, Eterm msg, Eterm *refp, ErtsSendContext* ctx) external_pid_creation(to), erts_this_node->creation); erts_send_error_to_logger(p->group_leader, dsbufp); -#endif return 0; } return remote_send(p, dep, to, to, msg, ctx); @@ -1959,7 +1957,6 @@ do_send(Process *p, Eterm to, Eterm msg, Eterm *refp, ErtsSendContext* ctx) } else if (is_external_port(to) && (external_port_dist_entry(to) == erts_this_dist_entry)) { -#if DEBUG erts_dsprintf_buf_t *dsbufp = erts_create_logger_dsbuf(); erts_dsprintf(dsbufp, "Discarding message %T from %T to %T in an old " @@ -1970,7 +1967,6 @@ do_send(Process *p, Eterm to, Eterm msg, Eterm *refp, ErtsSendContext* ctx) external_port_creation(to), erts_this_node->creation); erts_send_error_to_logger(p->group_leader, dsbufp); -#endif return 0; } else if (is_internal_port(to)) { int ret_val; diff --git a/erts/emulator/beam/big.h b/erts/emulator/beam/big.h index 4e4611de16..5b5550da43 100644 --- a/erts/emulator/beam/big.h +++ b/erts/emulator/beam/big.h @@ -85,6 +85,7 @@ typedef Uint dsize_t; /* Vector size type */ /* The heap size needed for a bignum */ #define BIG_NEED_SIZE(x) ((x) + 1) +#define BIG_NEED_FOR_BITS(bits) BIG_NEED_SIZE(((bits)-1)/D_EXP + 1) #define BIG_UINT_HEAP_SIZE (1 + 1) /* always, since sizeof(Uint) <= sizeof(Eterm) */ diff --git a/erts/emulator/beam/break.c b/erts/emulator/beam/break.c index 02e65cb9c6..3cb605834f 100644 --- a/erts/emulator/beam/break.c +++ b/erts/emulator/beam/break.c @@ -242,7 +242,6 @@ print_process_info(int to, void *to_arg, Process *p) p->current[1], p->current[2]); } - erts_print(to, to_arg, "Run queue: %d\n", erts_get_runq_proc(p)->ix); erts_print(to, to_arg, "Spawned by: %T\n", p->parent); approx_started = (time_t) p->approx_started; diff --git a/erts/emulator/beam/copy.c b/erts/emulator/beam/copy.c index 850606dd86..cddadb346c 100644 --- a/erts/emulator/beam/copy.c +++ b/erts/emulator/beam/copy.c @@ -34,7 +34,7 @@ #include "erl_bits.h" #include "dtrace-wrapper.h" -static void move_one_frag(Eterm** hpp, Eterm* src, Uint src_sz, ErlOffHeap*); +static void move_one_frag(Eterm** hpp, ErlHeapFragment*, ErlOffHeap*); /* * Copy object "obj" to process p. @@ -661,8 +661,7 @@ void move_multi_frags(Eterm** hpp, ErlOffHeap* off_heap, ErlHeapFragment* first, unsigned i; for (bp=first; bp!=NULL; bp=bp->next) { - move_one_frag(hpp, bp->mem, bp->used_size, off_heap); - OH_OVERHEAD(off_heap, bp->off_heap.overhead); + move_one_frag(hpp, bp, off_heap); } hp_end = *hpp; for (hp=hp_start; hp<hp_end; ++hp) { @@ -698,10 +697,10 @@ void move_multi_frags(Eterm** hpp, ErlOffHeap* off_heap, ErlHeapFragment* first, } static void -move_one_frag(Eterm** hpp, Eterm* src, Uint src_sz, ErlOffHeap* off_heap) +move_one_frag(Eterm** hpp, ErlHeapFragment* frag, ErlOffHeap* off_heap) { - Eterm* ptr = src; - Eterm* end = ptr + src_sz; + Eterm* ptr = frag->mem; + Eterm* end = ptr + frag->used_size; Eterm dummy_ref; Eterm* hp = *hpp; @@ -732,5 +731,7 @@ move_one_frag(Eterm** hpp, Eterm* src, Uint src_sz, ErlOffHeap* off_heap) } } *hpp = hp; + OH_OVERHEAD(off_heap, frag->off_heap.overhead); + frag->off_heap.first = NULL; } diff --git a/erts/emulator/beam/dist.c b/erts/emulator/beam/dist.c index 142fcb3c00..1354cee267 100644 --- a/erts/emulator/beam/dist.c +++ b/erts/emulator/beam/dist.c @@ -1149,6 +1149,7 @@ int erts_net_message(Port *prt, DeclareTmpHeapNoproc(ctl_default,DIST_CTL_DEFAULT_SIZE); Eterm* ctl = ctl_default; ErlOffHeap off_heap; + ErtsHeapFactory factory; Eterm* hp; Sint type; Eterm token; @@ -1225,7 +1226,8 @@ int erts_net_message(Port *prt, } hp = ctl; - arg = erts_decode_dist_ext(&hp, &off_heap, &ede); + erts_factory_static_init(&factory, ctl, ctl_len, &off_heap); + arg = erts_decode_dist_ext(&factory, &ede); if (is_non_value(arg)) { #ifdef ERTS_DIST_MSG_DBG erts_fprintf(stderr, "DIST MSG DEBUG: erts_dist_ext_size(CTL) failed:\n"); @@ -2086,6 +2088,7 @@ erts_dist_command(Port *prt, int reds_limit) DistEntry *dep = prt->dist_entry; Uint (*send)(Port *prt, ErtsDistOutputBuf *obuf); erts_aint32_t sched_flags; + ErtsSchedulerData *esdp = erts_get_scheduler_data(); ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt)); @@ -2140,12 +2143,12 @@ erts_dist_command(Port *prt, int reds_limit) ErtsDistOutputBuf *fob; size = (*send)(prt, foq.first); + esdp->io.out += (Uint64) size; #ifdef ERTS_RAW_DIST_MSG_DBG erts_fprintf(stderr, ">> "); bw(foq.first->extp, size); #endif reds += ERTS_PORT_REDS_DIST_CMD_DATA(size); - erts_smp_atomic_add_nob(&erts_bytes_out, size); fob = foq.first; obufsize += size_obuf(fob); foq.first = foq.first->next; @@ -2225,12 +2228,12 @@ erts_dist_command(Port *prt, int reds_limit) ASSERT(&oq.first->data[0] <= oq.first->extp && oq.first->extp < oq.first->ext_endp); size = (*send)(prt, oq.first); + esdp->io.out += (Uint64) size; #ifdef ERTS_RAW_DIST_MSG_DBG erts_fprintf(stderr, ">> "); bw(oq.first->extp, size); #endif reds += ERTS_PORT_REDS_DIST_CMD_DATA(size); - erts_smp_atomic_add_nob(&erts_bytes_out, size); fob = oq.first; obufsize += size_obuf(fob); oq.first = oq.first->next; @@ -2522,7 +2525,7 @@ info_dist_entry(int to, void *arg, DistEntry *dep, int visible, int connected) erts_print(to, arg, "Name: %T", dep->sysname); #ifdef DEBUG - erts_print(to, arg, " (refc=%d)", erts_refc_read(&dep->refc, 1)); + erts_print(to, arg, " (refc=%d)", erts_refc_read(&dep->refc, 0)); #endif erts_print(to, arg, "\n"); if (!connected && is_nil(dep->cid)) { diff --git a/erts/emulator/beam/erl_alloc.c b/erts/emulator/beam/erl_alloc.c index dcae5509ec..c9ac024743 100644 --- a/erts/emulator/beam/erl_alloc.c +++ b/erts/emulator/beam/erl_alloc.c @@ -581,8 +581,10 @@ erts_alloc_init(int *argc, char **argv, ErtsAllocInitOpts *eaiop) = erts_timer_type_size(ERTS_ALC_T_HL_PTIMER); fix_type_sizes[ERTS_ALC_FIX_TYPE_IX(ERTS_ALC_T_BIF_TIMER)] = erts_timer_type_size(ERTS_ALC_T_BIF_TIMER); +#ifdef ERTS_BTM_ACCESSOR_SUPPORT fix_type_sizes[ERTS_ALC_FIX_TYPE_IX(ERTS_ALC_T_ABIF_TIMER)] = erts_timer_type_size(ERTS_ALC_T_ABIF_TIMER); +#endif #ifdef HARD_DEBUG hdbg_init(); @@ -1882,8 +1884,8 @@ erts_alc_fatal_error(int error, int func, ErtsAlcType_t n, ...) size = va_arg(argp, Uint); va_end(argp); erl_exit(1, - "%s: Cannot %s %lu bytes of memory (of type \"%s\", thread %d).\n", - allctr_str, op, size, t_str, ERTS_ALC_GET_THR_IX()); + "%s: Cannot %s %lu bytes of memory (of type \"%s\").\n", + allctr_str, op, size, t_str); break; } case ERTS_ALC_E_NOALLCTR: @@ -2343,10 +2345,12 @@ erts_memory(int *print_to_p, void *print_to_arg, void *proc, Eterm earg) &size.processes_used, fi, ERTS_ALC_T_BIF_TIMER); +#ifdef ERTS_BTM_ACCESSOR_SUPPORT add_fix_values(&size.processes, &size.processes_used, fi, ERTS_ALC_T_ABIF_TIMER); +#endif } if (want.atom || want.atom_used) { diff --git a/erts/emulator/beam/erl_alloc.types b/erts/emulator/beam/erl_alloc.types index 57c506458c..92c397ffd3 100644 --- a/erts/emulator/beam/erl_alloc.types +++ b/erts/emulator/beam/erl_alloc.types @@ -167,7 +167,7 @@ type TIMER_SERVICE LONG_LIVED SYSTEM timer_service type LL_PTIMER FIXED_SIZE PROCESSES ll_ptimer type HL_PTIMER FIXED_SIZE PROCESSES hl_ptimer type BIF_TIMER FIXED_SIZE PROCESSES bif_timer -type ABIF_TIMER FIXED_SIZE PROCESSES accessor_bif_timer +# type ABIF_TIMER FIXED_SIZE PROCESSES accessor_bif_timer type TIMER_REQUEST SHORT_LIVED PROCESSES timer_request type BTM_YIELD_STATE SHORT_LIVED PROCESSES btm_yield_state type REG_TABLE STANDARD SYSTEM reg_tab @@ -274,6 +274,7 @@ type BUSY_CALLER SHORT_LIVED SYSTEM busy_caller type PROC_SYS_TSK SHORT_LIVED PROCESSES proc_sys_task type PROC_SYS_TSK_QS SHORT_LIVED PROCESSES proc_sys_task_queues type NEW_TIME_OFFSET SHORT_LIVED SYSTEM new_time_offset +type IOB_REQ SHORT_LIVED SYSTEM io_bytes_request +if threads_no_smp # Need thread safe allocs, but std_alloc and fix_alloc are not; diff --git a/erts/emulator/beam/erl_bif_info.c b/erts/emulator/beam/erl_bif_info.c index f74aea80a7..7eb31fb80e 100644 --- a/erts/emulator/beam/erl_bif_info.c +++ b/erts/emulator/beam/erl_bif_info.c @@ -60,6 +60,7 @@ static Export* alloc_info_trap = NULL; static Export* alloc_sizes_trap = NULL; +static Export* gather_io_bytes_trap = NULL; static Export *gather_sched_wall_time_res_trap; static Export *gather_gc_info_res_trap; @@ -1106,19 +1107,18 @@ process_info_aux(Process *BIF_P, heap_need += mq[i].copy_struct_size; } else { - mq[i].copy_struct_size = 0; - if (mp->data.attached) - heap_need += erts_msg_attached_data_size(mp); + mq[i].copy_struct_size = mp->data.attached ? + erts_msg_attached_data_size(mp) : 0; } i++; } - hp = HAlloc(BIF_P, heap_need); - hp_end = hp + heap_need; - ASSERT(i == n); - for (i--; i >= 0; i--) { - Eterm msg = ERL_MESSAGE_TERM(mq[i].msgp); - if (rp != BIF_P) { + if (rp != BIF_P) { + hp = HAlloc(BIF_P, heap_need); + hp_end = hp + heap_need; + ASSERT(i == n); + for (i--; i >= 0; i--) { + Eterm msg = ERL_MESSAGE_TERM(mq[i].msgp); if (is_value(msg)) { if (mq[i].copy_struct_size) msg = copy_struct(msg, @@ -1152,9 +1152,9 @@ process_info_aux(Process *BIF_P, } else { /* Make our copy of the message */ - ASSERT(size_object(msg) == hfp->used_size); + ASSERT(size_object(msg) == erts_used_frag_sz(hfp)); msg = copy_struct(msg, - hfp->used_size, + erts_used_frag_sz(hfp), &hp, &MSO(BIF_P)); } @@ -1164,27 +1164,38 @@ process_info_aux(Process *BIF_P, remove_bad_messages = 1; continue; } + res = CONS(hp, msg, res); + hp += 2; } - else { + HRelease(BIF_P, hp_end, hp+3); + } + else { + for (i--; i >= 0; i--) { + ErtsHeapFactory factory; + Eterm msg = ERL_MESSAGE_TERM(mq[i].msgp); + + erts_factory_proc_prealloc_init(&factory, BIF_P, + mq[i].copy_struct_size+2); if (mq[i].msgp->data.attached) { /* Decode it on the heap */ - erts_move_msg_attached_data_to_heap(&hp, - &MSO(BIF_P), + erts_move_msg_attached_data_to_heap(&factory, mq[i].msgp); msg = ERL_MESSAGE_TERM(mq[i].msgp); ASSERT(!mq[i].msgp->data.attached); - if (is_non_value(msg)) { - /* Bad distribution message; ignore */ - remove_bad_messages = 1; - continue; - } - } + } + if (is_value(msg)) { + hp = erts_produce_heap(&factory, 2, 0); + res = CONS(hp, msg, res); + } + else { + /* Bad distribution message; ignore */ + remove_bad_messages = 1; + continue; + } + erts_factory_close(&factory); } - - res = CONS(hp, msg, res); - hp += 2; + hp = HAlloc(BIF_P, 3); } - HRelease(BIF_P, hp_end, hp+3); erts_free(ERTS_ALC_T_TMP, mq); if (remove_bad_messages) { ErlMessage **mpp; @@ -2687,6 +2698,15 @@ BIF_RETTYPE system_info_1(BIF_ALIST_1) hp = hsz ? HAlloc(BIF_P, hsz) : NULL; res = erts_bld_uint(&hp, NULL, erts_dist_buf_busy_limit); BIF_RET(res); + } else if (ERTS_IS_ATOM_STR("delayed_node_table_gc", BIF_ARG_1)) { + Uint hsz = 0; + Uint dntgc = erts_delayed_node_table_gc(); + if (dntgc == ERTS_NODE_TAB_DELAY_GC_INFINITY) + BIF_RET(am_infinity); + (void) erts_bld_uint(NULL, &hsz, dntgc); + hp = hsz ? HAlloc(BIF_P, hsz) : NULL; + res = erts_bld_uint(&hp, NULL, dntgc); + BIF_RET(res); } else if (ERTS_IS_ATOM_STR("ethread_info", BIF_ARG_1)) { BIF_RET(erts_get_ethread_info(BIF_P)); } @@ -3201,7 +3221,6 @@ BIF_RETTYPE process_display_2(BIF_ALIST_2) BIF_RET(am_true); } - /* this is a general call which return some possibly useful information */ BIF_RETTYPE statistics_1(BIF_ALIST_1) @@ -3274,23 +3293,8 @@ BIF_RETTYPE statistics_1(BIF_ALIST_1) res = TUPLE2(hp, b1, b2); BIF_RET(res); } else if (BIF_ARG_1 == am_io) { - Eterm r1, r2; - Eterm in, out; - Uint hsz = 9; - Uint bytes_in = (Uint) erts_smp_atomic_read_nob(&erts_bytes_in); - Uint bytes_out = (Uint) erts_smp_atomic_read_nob(&erts_bytes_out); - - (void) erts_bld_uint(NULL, &hsz, bytes_in); - (void) erts_bld_uint(NULL, &hsz, bytes_out); - hp = HAlloc(BIF_P, hsz); - in = erts_bld_uint(&hp, NULL, bytes_in); - out = erts_bld_uint(&hp, NULL, bytes_out); - - r1 = TUPLE2(hp, am_input, in); - hp += 3; - r2 = TUPLE2(hp, am_output, out); - hp += 3; - BIF_RET(TUPLE2(hp, r1, r2)); + Eterm ref = erts_request_io_bytes(BIF_P); + BIF_TRAP2(gather_io_bytes_trap, BIF_P, ref, make_small(erts_no_schedulers)); } else if (ERTS_IS_ATOM_STR("run_queues", BIF_ARG_1)) { Eterm res, *hp, **hpp; @@ -4050,7 +4054,14 @@ BIF_RETTYPE erts_debug_set_internal_state_2(BIF_ALIST_2) } else if (ERTS_IS_ATOM_STR("wait", BIF_ARG_1)) { if (ERTS_IS_ATOM_STR("deallocations", BIF_ARG_2)) { - if (erts_debug_wait_deallocations(BIF_P)) { + int flag = ERTS_DEBUG_WAIT_COMPLETED_DEALLOCATIONS; + if (erts_debug_wait_completed(BIF_P, flag)) { + ERTS_BIF_YIELD_RETURN(BIF_P, am_ok); + } + } + if (ERTS_IS_ATOM_STR("timer_cancellations", BIF_ARG_2)) { + int flag = ERTS_DEBUG_WAIT_COMPLETED_TIMER_CANCELLATIONS; + if (erts_debug_wait_completed(BIF_P, flag)) { ERTS_BIF_YIELD_RETURN(BIF_P, am_ok); } } @@ -4062,6 +4073,17 @@ BIF_RETTYPE erts_debug_set_internal_state_2(BIF_ALIST_2) int res = erts_debug_set_unique_monotonic_integer_state(BIF_ARG_2); BIF_RET(res ? am_true : am_false); } + else if (ERTS_IS_ATOM_STR("node_tab_delayed_delete", BIF_ARG_1)) { + /* node_container_SUITE */ + Sint64 msecs; + if (term_to_Sint64(BIF_ARG_2, &msecs)) { + /* Negative value restore original value... */ + erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN); + erts_debug_test_node_tab_delayed_delete(msecs); + erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN); + BIF_RET(am_ok); + } + } } BIF_ERROR(BIF_P, BADARG); @@ -4259,59 +4281,41 @@ BIF_RETTYPE erts_debug_lock_counters_1(BIF_ALIST_1) BIF_RET(am_ok); } else if (is_tuple(BIF_ARG_1)) { - Eterm* tp = tuple_val(BIF_ARG_1); - - switch (arityval(tp[0])) { - case 2: { - int opt = 0; - int val = 0; - if (ERTS_IS_ATOM_STR("copy_save", tp[1])) { - opt = ERTS_LCNT_OPT_COPYSAVE; - } else if (ERTS_IS_ATOM_STR("process_locks", tp[1])) { - opt = ERTS_LCNT_OPT_PROCLOCK; - } else if (ERTS_IS_ATOM_STR("port_locks", tp[1])) { - opt = ERTS_LCNT_OPT_PORTLOCK; - } else if (ERTS_IS_ATOM_STR("suspend", tp[1])) { - opt = ERTS_LCNT_OPT_SUSPEND; - } else if (ERTS_IS_ATOM_STR("location", tp[1])) { - opt = ERTS_LCNT_OPT_LOCATION; - } else { - BIF_ERROR(BIF_P, BADARG); - } - if (tp[2] == am_true) { - val = 1; - } else if (tp[2] == am_false) { - val = 0; - } else { - BIF_ERROR(BIF_P, BADARG); - } - - erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN); - erts_smp_thr_progress_block(); - - if (val) { - res = erts_lcnt_set_rt_opt(opt) ? am_true : am_false; - } else { - res = erts_lcnt_clear_rt_opt(opt) ? am_true : am_false; - } + Eterm* ptr = tuple_val(BIF_ARG_1); + + if ((arityval(ptr[0]) == 2) && (ptr[2] == am_false || ptr[2] == am_true)) { + int lock_opt = 0, enable = (ptr[2] == am_true) ? 1 : 0; + if (ERTS_IS_ATOM_STR("copy_save", ptr[1])) { + lock_opt = ERTS_LCNT_OPT_COPYSAVE; + } else if (ERTS_IS_ATOM_STR("process_locks", ptr[1])) { + lock_opt = ERTS_LCNT_OPT_PROCLOCK; + } else if (ERTS_IS_ATOM_STR("port_locks", ptr[1])) { + lock_opt = ERTS_LCNT_OPT_PORTLOCK; + } else if (ERTS_IS_ATOM_STR("suspend", ptr[1])) { + lock_opt = ERTS_LCNT_OPT_SUSPEND; + } else if (ERTS_IS_ATOM_STR("location", ptr[1])) { + lock_opt = ERTS_LCNT_OPT_LOCATION; + } else { + BIF_ERROR(BIF_P, BADARG); + } + + erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN); + erts_smp_thr_progress_block(); + + if (enable) res = erts_lcnt_set_rt_opt(lock_opt) ? am_true : am_false; + else res = erts_lcnt_clear_rt_opt(lock_opt) ? am_true : am_false; + #ifdef ERTS_SMP - if (res != tp[2]) { - if (opt == ERTS_LCNT_OPT_PORTLOCK) { - erts_lcnt_enable_io_lock_count(val); - } else if (opt == ERTS_LCNT_OPT_PROCLOCK) { - erts_lcnt_enable_proc_lock_count(val); - } - } + if (res != ptr[2] && lock_opt == ERTS_LCNT_OPT_PORTLOCK) { + erts_lcnt_enable_io_lock_count(enable); + } else if (res != ptr[2] && lock_opt == ERTS_LCNT_OPT_PROCLOCK) { + erts_lcnt_enable_proc_lock_count(enable); + } #endif - erts_smp_thr_progress_unblock(); - erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN); - BIF_RET(res); - break; - } - - default: - break; - } + erts_smp_thr_progress_unblock(); + erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN); + BIF_RET(res); + } } #endif @@ -4351,6 +4355,8 @@ erts_bif_info_init(void) = erts_export_put(am_erlang, am_gather_sched_wall_time_result, 1); gather_gc_info_res_trap = erts_export_put(am_erlang, am_gather_gc_info_result, 1); + gather_io_bytes_trap + = erts_export_put(am_erts_internal, am_gather_io_bytes, 2); process_info_init(); os_info_init(); } diff --git a/erts/emulator/beam/erl_binary.h b/erts/emulator/beam/erl_binary.h index 8d264d166e..6b96787d40 100644 --- a/erts/emulator/beam/erl_binary.h +++ b/erts/emulator/beam/erl_binary.h @@ -194,6 +194,9 @@ ERTS_GLB_INLINE Binary *erts_bin_nrml_alloc(Uint size); ERTS_GLB_INLINE Binary *erts_bin_realloc_fnf(Binary *bp, Uint size); ERTS_GLB_INLINE Binary *erts_bin_realloc(Binary *bp, Uint size); ERTS_GLB_INLINE void erts_bin_free(Binary *bp); +ERTS_GLB_INLINE Binary *erts_create_magic_binary_x(Uint size, + void (*destructor)(Binary *), + int unaligned); ERTS_GLB_INLINE Binary *erts_create_magic_binary(Uint size, void (*destructor)(Binary *)); @@ -332,21 +335,30 @@ erts_bin_free(Binary *bp) } ERTS_GLB_INLINE Binary * -erts_create_magic_binary(Uint size, void (*destructor)(Binary *)) +erts_create_magic_binary_x(Uint size, void (*destructor)(Binary *), + int unaligned) { - Uint bsize = ERTS_MAGIC_BIN_SIZE(size); + Uint bsize = unaligned ? ERTS_MAGIC_BIN_UNALIGNED_SIZE(size) + : ERTS_MAGIC_BIN_SIZE(size); Binary* bptr = erts_alloc_fnf(ERTS_ALC_T_BINARY, bsize); ASSERT(bsize > size); if (!bptr) erts_alloc_n_enomem(ERTS_ALC_T2N(ERTS_ALC_T_BINARY), bsize); ERTS_CHK_BIN_ALIGNMENT(bptr); bptr->flags = BIN_FLAG_MAGIC; - bptr->orig_size = ERTS_MAGIC_BIN_ORIG_SIZE(size); + bptr->orig_size = unaligned ? ERTS_MAGIC_BIN_UNALIGNED_ORIG_SIZE(size) + : ERTS_MAGIC_BIN_ORIG_SIZE(size); erts_refc_init(&bptr->refc, 0); ERTS_MAGIC_BIN_DESTRUCTOR(bptr) = destructor; return bptr; } +ERTS_GLB_INLINE Binary * +erts_create_magic_binary(Uint size, void (*destructor)(Binary *)) +{ + return erts_create_magic_binary_x(size, destructor, 0); +} + #endif /* #if ERTS_GLB_INLINE_INCL_FUNC_DEF */ #endif /* !__ERL_BINARY_H */ diff --git a/erts/emulator/beam/erl_bits.c b/erts/emulator/beam/erl_bits.c index b8ae93fa58..2e29bf8895 100644 --- a/erts/emulator/beam/erl_bits.c +++ b/erts/emulator/beam/erl_bits.c @@ -107,6 +107,14 @@ erts_bits_destroy_state(ERL_BITS_PROTO_0) void erts_init_bits(void) { + ERTS_CT_ASSERT(offsetof(Binary,orig_bytes) % 8 == 0); + ERTS_CT_ASSERT(offsetof(ErtsMagicBinary,u.aligned.data) % 8 == 0); + ERTS_CT_ASSERT(ERTS_MAGIC_BIN_BYTES_TO_ALIGN == + (offsetof(ErtsMagicBinary,u.aligned.data) + - offsetof(ErtsMagicBinary,u.unaligned.data))); + ERTS_CT_ASSERT(offsetof(ErtsBinary,driver.binary.orig_bytes) + == offsetof(Binary,orig_bytes)); + erts_smp_atomic_init_nob(&bits_bufs_size, 0); #if defined(ERTS_SMP) /* erl_process.c calls erts_bits_init_state() on all state instances */ diff --git a/erts/emulator/beam/erl_db.c b/erts/emulator/beam/erl_db.c index 2e2cb98354..844272c699 100644 --- a/erts/emulator/beam/erl_db.c +++ b/erts/emulator/beam/erl_db.c @@ -279,6 +279,8 @@ static ERTS_INLINE void db_init_lock(DbTable* tb, int use_frequent_read_lock, erts_smp_rwmtx_opt_t rwmtx_opt = ERTS_SMP_RWMTX_OPT_DEFAULT_INITER; if (use_frequent_read_lock) rwmtx_opt.type = ERTS_SMP_RWMTX_TYPE_FREQUENT_READ; + if (erts_ets_rwmtx_spin_count >= 0) + rwmtx_opt.main_spincount = erts_ets_rwmtx_spin_count; #endif #ifdef ERTS_SMP erts_smp_rwmtx_init_opt_x(&tb->common.rwlock, &rwmtx_opt, @@ -2856,10 +2858,11 @@ BIF_RETTYPE ets_match_spec_run_r_3(BIF_ALIST_3) ** External interface (NOT BIF's) */ +int erts_ets_rwmtx_spin_count = -1; /* Init the db */ -void init_db(void) +void init_db(ErtsDbSpinCount db_spin_count) { DbTable init_tb; int i; @@ -2868,10 +2871,48 @@ void init_db(void) size_t size; #ifdef ERTS_SMP + int max_spin_count = (1 << 15) - 1; /* internal limit */ erts_smp_rwmtx_opt_t rwmtx_opt = ERTS_SMP_RWMTX_OPT_DEFAULT_INITER; rwmtx_opt.type = ERTS_SMP_RWMTX_TYPE_FREQUENT_READ; rwmtx_opt.lived = ERTS_SMP_RWMTX_LONG_LIVED; + switch (db_spin_count) { + case ERTS_DB_SPNCNT_NONE: + erts_ets_rwmtx_spin_count = 0; + break; + case ERTS_DB_SPNCNT_VERY_LOW: + erts_ets_rwmtx_spin_count = 100; + break; + case ERTS_DB_SPNCNT_LOW: + erts_ets_rwmtx_spin_count = 200; + erts_ets_rwmtx_spin_count += erts_no_schedulers * 50; + if (erts_ets_rwmtx_spin_count > 1000) + erts_ets_rwmtx_spin_count = 1000; + break; + case ERTS_DB_SPNCNT_HIGH: + erts_ets_rwmtx_spin_count = 2000; + erts_ets_rwmtx_spin_count += erts_no_schedulers * 100; + if (erts_ets_rwmtx_spin_count > 15000) + erts_ets_rwmtx_spin_count = 15000; + break; + case ERTS_DB_SPNCNT_VERY_HIGH: + erts_ets_rwmtx_spin_count = 15000; + erts_ets_rwmtx_spin_count += erts_no_schedulers * 500; + if (erts_ets_rwmtx_spin_count > max_spin_count) + erts_ets_rwmtx_spin_count = max_spin_count; + break; + case ERTS_DB_SPNCNT_EXTREMELY_HIGH: + erts_ets_rwmtx_spin_count = max_spin_count; + break; + case ERTS_DB_SPNCNT_NORMAL: + default: + erts_ets_rwmtx_spin_count = -1; + break; + } + + if (erts_ets_rwmtx_spin_count >= 0) + rwmtx_opt.main_spincount = erts_ets_rwmtx_spin_count; + meta_main_tab_locks = erts_alloc_permanent_cache_aligned(ERTS_ALC_T_DB_TABLES, sizeof(erts_meta_main_tab_lock_t) diff --git a/erts/emulator/beam/erl_db.h b/erts/emulator/beam/erl_db.h index 5b4681fc90..5039b71108 100644 --- a/erts/emulator/beam/erl_db.h +++ b/erts/emulator/beam/erl_db.h @@ -61,7 +61,17 @@ union db_table { "ERL_MAX_ETS_TABLES" */ #define ERL_MAX_ETS_TABLES_ENV "ERL_MAX_ETS_TABLES" -void init_db(void); +typedef enum { + ERTS_DB_SPNCNT_NONE, + ERTS_DB_SPNCNT_VERY_LOW, + ERTS_DB_SPNCNT_LOW, + ERTS_DB_SPNCNT_NORMAL, + ERTS_DB_SPNCNT_HIGH, + ERTS_DB_SPNCNT_VERY_HIGH, + ERTS_DB_SPNCNT_EXTREMELY_HIGH +} ErtsDbSpinCount; + +void init_db(ErtsDbSpinCount); int erts_db_process_exiting(Process *, ErtsProcLocks); void db_info(int, void *, int); void erts_db_foreach_table(void (*)(DbTable *, void *), void *); @@ -69,6 +79,7 @@ void erts_db_foreach_offheap(DbTable *, void (*func)(ErlOffHeap *, void *), void *); +extern int erts_ets_rwmtx_spin_count; extern int user_requested_db_max_tabs; /* set in erl_init */ extern int erts_ets_realloc_always_moves; /* set in erl_init */ extern int erts_ets_always_compress; /* set in erl_init */ diff --git a/erts/emulator/beam/erl_db_hash.c b/erts/emulator/beam/erl_db_hash.c index 383ee7c430..dce0a3d621 100644 --- a/erts/emulator/beam/erl_db_hash.c +++ b/erts/emulator/beam/erl_db_hash.c @@ -670,6 +670,8 @@ int db_create_hash(Process *p, DbTable *tbl) int i; if (tb->common.type & DB_FREQ_READ) rwmtx_opt.type = ERTS_SMP_RWMTX_TYPE_FREQUENT_READ; + if (erts_ets_rwmtx_spin_count >= 0) + rwmtx_opt.main_spincount = erts_ets_rwmtx_spin_count; tb->locks = (DbTableHashFineLocks*) erts_db_alloc_fnf(ERTS_ALC_T_DB_SEG, /* Other type maybe? */ (DbTable *) tb, sizeof(DbTableHashFineLocks)); @@ -1049,7 +1051,6 @@ static int db_get_element_hash(Process *p, DbTable *tbl, Eterm copy = db_copy_element_from_ets(&tb->common, p, &b->dbterm, ndex, &hp, 2); elem_list = CONS(hp, copy, elem_list); - hp += 2; } b = b->next; } diff --git a/erts/emulator/beam/erl_db_util.c b/erts/emulator/beam/erl_db_util.c index c6c3c55a7e..d47ff03a30 100644 --- a/erts/emulator/beam/erl_db_util.c +++ b/erts/emulator/beam/erl_db_util.c @@ -2175,11 +2175,12 @@ restart: { ErtsHeapFactory factory; Uint ix; - factory.p = build_proc; for (ix = 0; ix < 2*n; ix++){ ehp[ix] = esp[ix]; } + erts_factory_proc_init(&factory, build_proc); t = erts_hashmap_from_array(&factory, ehp, n, 0); + erts_factory_close(&factory); } *esp++ = t; break; @@ -3192,6 +3193,7 @@ Eterm db_copy_from_comp(DbTableCommon* tb, DbTerm* bp, Eterm** hpp, { Eterm* hp = *hpp; int i, arity = arityval(bp->tpl[0]); + ErtsHeapFactory factory; hp[0] = bp->tpl[0]; *hpp += arity + 1; @@ -3199,17 +3201,23 @@ Eterm db_copy_from_comp(DbTableCommon* tb, DbTerm* bp, Eterm** hpp, hp[tb->keypos] = copy_struct_rel(bp->tpl[tb->keypos], size_object_rel(bp->tpl[tb->keypos], bp->tpl), hpp, off_heap, bp->tpl, NULL); + + erts_factory_static_init(&factory, *hpp, bp->size - (arity+1), off_heap); + for (i=arity; i>0; i--) { if (i != tb->keypos) { if (is_immed(bp->tpl[i])) { hp[i] = bp->tpl[i]; } else { - hp[i] = erts_decode_ext_ets(hpp, off_heap, + hp[i] = erts_decode_ext_ets(&factory, elem2ext(bp->tpl, i)); } } } + *hpp = factory.hp; + erts_factory_close(&factory); + ASSERT((*hpp - hp) <= bp->size); #ifdef DEBUG_CLONE ASSERT(eq_rel(make_tuple(hp),NULL,make_tuple(bp->debug_clone),bp->debug_clone)); @@ -3228,12 +3236,13 @@ Eterm db_copy_element_from_ets(DbTableCommon* tb, Process* p, if (tb->compress && pos != tb->keypos) { byte* ext = elem2ext(obj->tpl, pos); Sint sz = erts_decode_ext_size_ets(ext, db_alloced_size_comp(obj)) + extra; - Eterm* hp = HAlloc(p, sz); - Eterm* endp = hp + sz; - Eterm copy = erts_decode_ext_ets(&hp, &MSO(p), ext); - *hpp = hp; - hp += extra; - HRelease(p, endp, hp); + Eterm copy; + ErtsHeapFactory factory; + + erts_factory_proc_prealloc_init(&factory, p, sz); + copy = erts_decode_ext_ets(&factory, ext); + *hpp = erts_produce_heap(&factory, extra, 0); + erts_factory_close(&factory); #ifdef DEBUG_CLONE ASSERT(eq_rel(copy, NULL, obj->debug_clone[pos], obj->debug_clone)); #endif diff --git a/erts/emulator/beam/erl_gc.c b/erts/emulator/beam/erl_gc.c index 1785fc27be..71ca2713b2 100644 --- a/erts/emulator/beam/erl_gc.c +++ b/erts/emulator/beam/erl_gc.c @@ -108,8 +108,7 @@ static Eterm* sweep_one_heap(Eterm* heap_ptr, Eterm* heap_end, Eterm* htop, char* src, Uint src_size); static Eterm* collect_heap_frags(Process* p, Eterm* heap, Eterm* htop, Eterm* objv, int nobj); -static Uint adjust_after_fullsweep(Process *p, Uint size_before, - int need, Eterm *objv, int nobj); +static void adjust_after_fullsweep(Process *p, int need, Eterm *objv, int nobj); static void shrink_new_heap(Process *p, Uint new_sz, Eterm *objv, int nobj); static void grow_new_heap(Process *p, Uint new_sz, Eterm* objv, int nobj); static void sweep_off_heap(Process *p, int fullsweep); @@ -677,7 +676,7 @@ erts_garbage_collect_literals(Process* p, Eterm* literals, Uint area_size; Eterm* old_htop; Uint n; - struct erl_off_heap_header** prev; + struct erl_off_heap_header** prev = NULL; if (p->flags & F_DISABLE_GC) return; @@ -786,10 +785,10 @@ erts_garbage_collect_literals(Process* p, Eterm* literals, */ if (oh) { - prev = &MSO(p).first; - while (*prev) { - prev = &(*prev)->next; - } + prev = &MSO(p).first; + while (*prev) { + prev = &(*prev)->next; + } } /* @@ -818,6 +817,10 @@ erts_garbage_collect_literals(Process* p, Eterm* literals, oh = oh->next; } + if (prev) { + *prev = NULL; + } + /* * We no longer need this temporary area. */ @@ -868,29 +871,37 @@ minor_collection(Process* p, int need, Eterm* objv, int nobj, Uint *recl) ErlMessage *msgp; Uint size_after; Uint need_after; - Uint stack_size = STACK_SZ_ON_HEAP(p); - Uint fragments = MBUF_SIZE(p) + combined_message_size(p); - Uint size_before = fragments + (HEAP_TOP(p) - HEAP_START(p)); - Uint new_sz = next_heap_size(p, HEAP_SIZE(p) + fragments, 0); + const Uint stack_size = STACK_SZ_ON_HEAP(p); + const Uint size_before = MBUF_SIZE(p) + (HEAP_TOP(p) - HEAP_START(p)); + Uint new_sz = HEAP_SIZE(p) + MBUF_SIZE(p) + combined_message_size(p); + new_sz = next_heap_size(p, new_sz, 0); do_minor(p, new_sz, objv, nobj); - /* + size_after = HEAP_TOP(p) - HEAP_START(p); + *recl += (size_before - size_after); + + /* * Copy newly received message onto the end of the new heap. */ - ErtsGcQuickSanityCheck(p); - for (msgp = p->msg.first; msgp; msgp = msgp->next) { - if (msgp->data.attached) { - erts_move_msg_attached_data_to_heap(&p->htop, &p->off_heap, msgp); - ErtsGcQuickSanityCheck(p); - } - } + ErtsGcQuickSanityCheck(p); + for (msgp = p->msg.first; msgp; msgp = msgp->next) { + if (msgp->data.attached) { + ErtsHeapFactory factory; + erts_factory_proc_prealloc_init(&factory, p, + erts_msg_attached_data_size(msgp)); + erts_move_msg_attached_data_to_heap(&factory, msgp); + erts_factory_close(&factory); + ErtsGcQuickSanityCheck(p); + } + } ErtsGcQuickSanityCheck(p); GEN_GCS(p)++; - size_after = HEAP_TOP(p) - HEAP_START(p); - need_after = size_after + need + stack_size; - *recl += (size_before - size_after); + need_after = ((HEAP_TOP(p) - HEAP_START(p)) + + erts_used_frag_sz(MBUF(p)) + + need + + stack_size); /* * Excessively large heaps should be shrunk, but @@ -925,6 +936,7 @@ minor_collection(Process* p, int need, Eterm* objv, int nobj, Uint *recl) } ASSERT(HEAP_SIZE(p) == next_heap_size(p, HEAP_SIZE(p), 0)); + ASSERT(MBUF(p) == NULL); return 1; /* We are done. */ } @@ -933,6 +945,7 @@ minor_collection(Process* p, int need, Eterm* objv, int nobj, Uint *recl) * The heap size turned out to be just right. We are done. */ ASSERT(HEAP_SIZE(p) == next_heap_size(p, HEAP_SIZE(p), 0)); + ASSERT(MBUF(p) == NULL); return 1; } } @@ -1212,7 +1225,9 @@ major_collection(Process* p, int need, Eterm* objv, int nobj, Uint *recl) { Rootset rootset; Roots* roots; - Uint size_before; + const Uint size_before = ((HEAP_TOP(p) - HEAP_START(p)) + + (OLD_HTOP(p) - OLD_HEAP(p)) + + MBUF_SIZE(p)); Eterm* n_heap; Eterm* n_htop; char* src = (char *) HEAP_START(p); @@ -1221,24 +1236,15 @@ major_collection(Process* p, int need, Eterm* objv, int nobj, Uint *recl) Uint oh_size = (char *) OLD_HTOP(p) - oh; Uint n; Uint new_sz; - Uint fragments = MBUF_SIZE(p) + combined_message_size(p); - - size_before = fragments + (HEAP_TOP(p) - HEAP_START(p)); /* * Do a fullsweep GC. First figure out the size of the heap * to receive all live data. */ - new_sz = HEAP_SIZE(p) + fragments + (OLD_HTOP(p) - OLD_HEAP(p)); - /* - * We used to do - * - * new_sz += STACK_SZ_ON_HEAP(p); - * - * here for no obvious reason. (The stack size is already counted once - * in HEAP_SIZE(p).) - */ + new_sz = (HEAP_SIZE(p) + MBUF_SIZE(p) + + combined_message_size(p) + + (OLD_HTOP(p) - OLD_HEAP(p))); new_sz = next_heap_size(p, new_sz, 0); /* @@ -1431,20 +1437,27 @@ major_collection(Process* p, int need, Eterm* objv, int nobj, Uint *recl) ErtsGcQuickSanityCheck(p); + *recl += size_before - (HEAP_TOP(p) - HEAP_START(p)); + { ErlMessage *msgp; + /* * Copy newly received message onto the end of the new heap. */ - for (msgp = p->msg.first; msgp; msgp = msgp->next) { + for (msgp = p->msg.first; msgp; msgp = msgp->next) { if (msgp->data.attached) { - erts_move_msg_attached_data_to_heap(&p->htop, &p->off_heap, msgp); + ErtsHeapFactory factory; + erts_factory_proc_prealloc_init(&factory, p, + erts_msg_attached_data_size(msgp)); + erts_move_msg_attached_data_to_heap(&factory, msgp); + erts_factory_close(&factory); ErtsGcQuickSanityCheck(p); } } } - *recl += adjust_after_fullsweep(p, size_before, need, objv, nobj); + adjust_after_fullsweep(p, need, objv, nobj); #ifdef HARDDEBUG disallow_heap_frag_ref_in_heap(p); @@ -1455,21 +1468,17 @@ major_collection(Process* p, int need, Eterm* objv, int nobj, Uint *recl) return 1; /* We are done. */ } -static Uint -adjust_after_fullsweep(Process *p, Uint size_before, int need, Eterm *objv, int nobj) +static void +adjust_after_fullsweep(Process *p, int need, Eterm *objv, int nobj) { - Uint wanted, sz, size_after, need_after; + Uint wanted, sz, need_after; Uint stack_size = STACK_SZ_ON_HEAP(p); - Uint reclaimed_now; - - size_after = (HEAP_TOP(p) - HEAP_START(p)); - reclaimed_now = (size_before - size_after); /* * Resize the heap if needed. */ - need_after = size_after + need + stack_size; + need_after = (HEAP_TOP(p) - HEAP_START(p)) + need + stack_size; if (HEAP_SIZE(p) < need_after) { /* Too small - grow to match requested need */ sz = next_heap_size(p, need_after, 0); @@ -1492,8 +1501,6 @@ adjust_after_fullsweep(Process *p, Uint size_before, int need, Eterm *objv, int shrink_new_heap(p, sz, objv, nobj); } } - - return reclaimed_now; } /* @@ -1870,6 +1877,21 @@ sweep_one_heap(Eterm* heap_ptr, Eterm* heap_end, Eterm* htop, char* src, Uint sr if (!header_is_thing(gval)) { heap_ptr++; } else { + if (header_is_bin_matchstate(gval)) { + ErlBinMatchState *ms = (ErlBinMatchState*) heap_ptr; + ErlBinMatchBuffer *mb = &(ms->mb); + Eterm* origptr; + origptr = &(mb->orig); + ptr = boxed_val(*origptr); + val = *ptr; + if (IS_MOVED_BOXED(val)) { + *origptr = val; + mb->base = binary_bytes(*origptr); + } else if (in_area(ptr, src, src_size)) { + MOVE_BOXED(ptr,val,htop,origptr); + mb->base = binary_bytes(*origptr); + } + } heap_ptr += (thing_arityval(gval)+1); } break; @@ -1941,7 +1963,8 @@ collect_heap_frags(Process* p, Eterm* n_hstart, Eterm* n_htop, * until next GC. */ qb = MBUF(p); - while (qb != NULL) { + while (qb != NULL) { + ASSERT(!qb->off_heap.first); /* process fragments use the MSO(p) list */ frag_size = qb->used_size * sizeof(Eterm); if (frag_size != 0) { frag_begin = (char *) qb->mem; diff --git a/erts/emulator/beam/erl_hl_timer.c b/erts/emulator/beam/erl_hl_timer.c index 51cd843935..8eacb921fe 100644 --- a/erts/emulator/beam/erl_hl_timer.c +++ b/erts/emulator/beam/erl_hl_timer.c @@ -81,6 +81,13 @@ static void hdbg_chk_srv(ErtsHLTimerService *srv); #error "ERTS_REF_NUMBERS changed. Update me..." #endif +typedef enum { + ERTS_TMR_BIF, + ERTS_TMR_PROC, + ERTS_TMR_PORT, + ERTS_TMR_CALLBACK +} ErtsTmrType; + #define ERTS_BIF_TIMER_SHORT_TIME 5000 #ifdef ERTS_SMP @@ -93,11 +100,14 @@ static void hdbg_chk_srv(ErtsHLTimerService *srv); /* Bit 0 to 9 contains scheduler id (see mask below) */ #define ERTS_TMR_ROFLG_HLT (((Uint32) 1) << 10) #define ERTS_TMR_ROFLG_BIF_TMR (((Uint32) 1) << 11) -#define ERTS_TMR_ROFLG_ABIF_TMR (((Uint32) 1) << 12) -#define ERTS_TMR_ROFLG_PRE_ALC (((Uint32) 1) << 13) -#define ERTS_TMR_ROFLG_REG_NAME (((Uint32) 1) << 14) -#define ERTS_TMR_ROFLG_PROC (((Uint32) 1) << 15) -#define ERTS_TMR_ROFLG_PORT (((Uint32) 1) << 16) +#define ERTS_TMR_ROFLG_PRE_ALC (((Uint32) 1) << 12) +#define ERTS_TMR_ROFLG_REG_NAME (((Uint32) 1) << 13) +#define ERTS_TMR_ROFLG_PROC (((Uint32) 1) << 14) +#define ERTS_TMR_ROFLG_PORT (((Uint32) 1) << 15) +#define ERTS_TMR_ROFLG_CALLBACK (((Uint32) 1) << 16) +#ifdef ERTS_BTM_ACCESSOR_SUPPORT +#define ERTS_TMR_ROFLG_ABIF_TMR (((Uint32) 1) << 17) +#endif #define ERTS_TMR_ROFLG_SID_MASK \ (ERTS_TMR_ROFLG_HLT - (Uint32) 1) @@ -141,6 +151,7 @@ typedef struct { Uint32 roflgs; erts_smp_atomic32_t refc; union { + void *arg; erts_atomic_t next; } u; } ErtsTmrHead; @@ -156,6 +167,7 @@ struct ErtsHLTimer_ { Process *proc; Port *port; Eterm name; + void (*callback)(void *); } receiver; #ifdef ERTS_HLT_HARD_DEBUG @@ -172,19 +184,28 @@ struct ErtsHLTimer_ { Eterm message; ErlHeapFragment *bp; } btm; +#ifdef ERTS_BTM_ACCESSOR_SUPPORT struct { Eterm accessor; ErtsHLTimerTree tree; } abtm; +#endif }; #define ERTS_HL_PTIMER_SIZE offsetof(ErtsHLTimer, btm) +#ifdef ERTS_BTM_ACCESSOR_SUPPORT #define ERTS_BIF_TIMER_SIZE offsetof(ErtsHLTimer, abtm) #define ERTS_ABIF_TIMER_SIZE sizeof(ErtsHLTimer) +#else +#define ERTS_BIF_TIMER_SIZE sizeof(ErtsHLTimer) +#endif typedef struct { ErtsTmrHead head; /* NEED to be first! */ - void *p; + union { + void *p; + void (*callback)(void *); + } u; ErtsTWheelTimer tw_tmr; } ErtsTWTimer; @@ -340,8 +361,8 @@ refn_is_lt(Uint32 *x, Uint32 *y) #define ERTS_RBT_WANT_SMALLEST #define ERTS_RBT_WANT_LOOKUP_INSERT #define ERTS_RBT_WANT_REPLACE +#define ERTS_RBT_WANT_FOREACH #ifdef ERTS_HLT_HARD_DEBUG -# define ERTS_RBT_WANT_FOREACH # define ERTS_RBT_WANT_LOOKUP #endif #define ERTS_RBT_UNDEF @@ -452,8 +473,6 @@ same_time_list_foreach_destroy_yielding(ErtsHLTimer **root, } } -#ifdef ERTS_HLT_HARD_DEBUG - static ERTS_INLINE void same_time_list_foreach(ErtsHLTimer *root, void (*op)(ErtsHLTimer *, void *), @@ -468,6 +487,8 @@ same_time_list_foreach(ErtsHLTimer *root, } } +#ifdef ERTS_HLT_HARD_DEBUG + static ERTS_INLINE ErtsHLTimer * same_time_list_lookup(ErtsHLTimer *root, ErtsHLTimer *x) { @@ -584,6 +605,8 @@ same_time_list_lookup(ErtsHLTimer *root, ErtsHLTimer *x) #include "erl_rbtree.h" +#ifdef ERTS_BTM_ACCESSOR_SUPPORT + #define ERTS_RBT_PREFIX abtm #define ERTS_RBT_T ErtsHLTimer #define ERTS_RBT_KEY_T Uint32 * @@ -634,6 +657,8 @@ same_time_list_lookup(ErtsHLTimer *root, ErtsHLTimer *x) #include "erl_rbtree.h" +#endif /* ERTS_BTM_ACCESSOR_SUPPORT */ + #ifdef ERTS_SMP static void init_canceled_queue(ErtsHLTCncldTmrQ *cq); #endif @@ -673,7 +698,9 @@ erts_timer_type_size(ErtsAlcType_t type) case ERTS_ALC_T_LL_PTIMER: return sizeof(ErtsTWTimer); case ERTS_ALC_T_HL_PTIMER: return ERTS_HL_PTIMER_SIZE; case ERTS_ALC_T_BIF_TIMER: return ERTS_BIF_TIMER_SIZE; +#ifdef ERTS_BTM_ACCESSOR_SUPPORT case ERTS_ALC_T_ABIF_TIMER: return ERTS_ABIF_TIMER_SIZE; +#endif default: ERTS_INTERNAL_ERROR("Unknown type"); } return 0; @@ -747,9 +774,9 @@ schedule_tw_timer_destroy(ErtsTWTimer *tmr) * dropped at once... */ if (tmr->head.roflgs & ERTS_TMR_ROFLG_PROC) - erts_proc_dec_refc((Process *) tmr->p); - else - erts_port_dec_refc((Port *) tmr->p); + erts_proc_dec_refc((Process *) tmr->u.p); + else if (tmr->head.roflgs & ERTS_TMR_ROFLG_PORT) + erts_port_dec_refc((Port *) tmr->u.p); erts_schedule_thr_prgr_later_cleanup_op( scheduled_tw_timer_destroy, @@ -771,7 +798,7 @@ static void tw_proc_timeout(void *vtwtp) { ErtsTWTimer *twtp = (ErtsTWTimer *) vtwtp; - Process *proc = (Process *) twtp->p; + Process *proc = (Process *) twtp->u.p; if (proc_timeout_common(proc, vtwtp)) tw_timer_dec_refc(twtp); tw_timer_dec_refc(twtp); @@ -781,7 +808,7 @@ static void tw_port_timeout(void *vtwtp) { ErtsTWTimer *twtp = (ErtsTWTimer *) vtwtp; - Port *port = (Port *) twtp->p; + Port *port = (Port *) twtp->u.p; if (port_timeout_common(port, vtwtp)) tw_timer_dec_refc(twtp); tw_timer_dec_refc(twtp); @@ -801,13 +828,26 @@ cancel_tw_timer(ErtsSchedulerData *esdp, ErtsTWTimer *tmr) erts_twheel_cancel_timer(esdp->timer_wheel, &tmr->tw_tmr); } +static void +tw_callback_timeout(void *vtwtp) +{ + ErtsTWTimer *twtp = (ErtsTWTimer *) vtwtp; + void (*callback)(void *) = twtp->u.callback; + void *arg = twtp->head.u.arg; + tw_timer_dec_refc(twtp); + (*callback)(arg); +} + static ErtsTWTimer * create_tw_timer(ErtsSchedulerData *esdp, - void *p, int is_proc, + ErtsTmrType type, void *p, + void (*callback)(void *), void *arg, ErtsMonotonicTime timeout_pos) { ErtsTWTimer *tmr; void (*timeout_func)(void *); + void (*cancel_func)(void *); + erts_aint32_t refc; tmr = tw_timer_alloc(); erts_twheel_init_timer(&tmr->tw_tmr); @@ -815,24 +855,48 @@ create_tw_timer(ErtsSchedulerData *esdp, tmr->head.roflgs = (Uint32) esdp->no; ERTS_HLT_ASSERT((tmr->head.roflgs & ~ERTS_TMR_ROFLG_SID_MASK) == 0); - tmr->p = p; - if (is_proc) { + + switch (type) { + + case ERTS_TMR_PROC: + tmr->u.p = p; tmr->head.roflgs |= ERTS_TMR_ROFLG_PROC; timeout_func = tw_proc_timeout; + cancel_func = tw_ptimer_cancel; erts_proc_inc_refc((Process *) p); - } - else { + refc = 2; + break; + + case ERTS_TMR_PORT: + tmr->u.p = p; tmr->head.roflgs |= ERTS_TMR_ROFLG_PORT; timeout_func = tw_port_timeout; + cancel_func = tw_ptimer_cancel; erts_port_inc_refc((Port *) p); + refc = 2; + break; + + case ERTS_TMR_CALLBACK: + tmr->head.u.arg = arg; + tmr->u.callback = callback; + + tmr->head.roflgs |= ERTS_TMR_ROFLG_CALLBACK; + timeout_func = tw_callback_timeout; + cancel_func = NULL; + refc = 1; + break; + + default: + ERTS_INTERNAL_ERROR("Unsupported timer type"); + return NULL; } - erts_smp_atomic32_init_nob(&tmr->head.refc, 2); + erts_smp_atomic32_init_nob(&tmr->head.refc, refc); erts_twheel_set_timer(esdp->timer_wheel, &tmr->tw_tmr, timeout_func, - tw_ptimer_cancel, + cancel_func, tmr, timeout_pos); @@ -852,8 +916,10 @@ hl_timer_destroy(ErtsHLTimer *tmr) else { if (roflgs & ERTS_TMR_ROFLG_PRE_ALC) bif_timer_pre_free(tmr); +#ifdef ERTS_BTM_ACCESSOR_SUPPORT else if (roflgs & ERTS_TMR_ROFLG_ABIF_TMR) erts_free(ERTS_ALC_T_ABIF_TIMER, tmr); +#endif else erts_free(ERTS_ALC_T_BIF_TIMER, tmr); } @@ -948,6 +1014,8 @@ check_canceled_queue(ErtsSchedulerData *esdp, ErtsHLTimerService *srv) #endif } +#ifdef ERTS_BTM_ACCESSOR_SUPPORT + static void hlt_delete_abtm(ErtsHLTimer *tmr) { @@ -971,18 +1039,20 @@ hlt_delete_abtm(ErtsHLTimer *tmr) } } +#endif + static ErtsHLTimer * create_hl_timer(ErtsSchedulerData *esdp, ErtsMonotonicTime timeout_pos, - int short_time, int is_bif_tmr, + int short_time, ErtsTmrType type, void *rcvrp, Eterm rcvr, Eterm acsr, - Eterm msg, Uint32 *refn) + Eterm msg, Uint32 *refn, + void (*callback)(void *), void *arg) { ErtsHLTimerService *srv = esdp->timer_service; ErtsHLTimer *tmr, *st_tmr; erts_aint32_t refc; Uint32 roflgs; - int is_abif_tmr = is_bif_tmr && is_value(acsr) && acsr != rcvr; check_canceled_queue(esdp, srv); @@ -990,43 +1060,69 @@ create_hl_timer(ErtsSchedulerData *esdp, roflgs = ((Uint32) esdp->no) | ERTS_TMR_ROFLG_HLT; - if (!is_bif_tmr) + if (type != ERTS_TMR_BIF) { + tmr = erts_alloc(ERTS_ALC_T_HL_PTIMER, ERTS_HL_PTIMER_SIZE); - else if (short_time) { - tmr = bif_timer_pre_alloc(); - if (!tmr) - goto alloc_bif_timer; - roflgs |= ERTS_TMR_ROFLG_PRE_ALC; - } - else { - alloc_bif_timer: - if (is_abif_tmr) - tmr = erts_alloc(ERTS_ALC_T_ABIF_TIMER, - ERTS_ABIF_TIMER_SIZE); - else - tmr = erts_alloc(ERTS_ALC_T_BIF_TIMER, - ERTS_BIF_TIMER_SIZE); - } + tmr->timeout = timeout_pos; - tmr->timeout = timeout_pos; + switch (type) { + + case ERTS_TMR_PROC: + ERTS_HLT_ASSERT(is_internal_pid(rcvr)); - if (!is_bif_tmr) { - if (is_internal_pid(rcvr)) { erts_proc_inc_refc((Process *) rcvrp); tmr->receiver.proc = (Process *) rcvrp; roflgs |= ERTS_TMR_ROFLG_PROC; - } - else { - erts_port_inc_refc((Port *) rcvrp); + refc = 2; + break; + + case ERTS_TMR_PORT: ERTS_HLT_ASSERT(is_internal_port(rcvr)); + erts_port_inc_refc((Port *) rcvrp); tmr->receiver.port = (Port *) rcvrp; roflgs |= ERTS_TMR_ROFLG_PORT; + refc = 2; + break; + + case ERTS_TMR_CALLBACK: + roflgs |= ERTS_TMR_ROFLG_CALLBACK; + tmr->receiver.callback = callback; + tmr->head.u.arg = arg; + refc = 1; + break; + + default: + ERTS_INTERNAL_ERROR("Unsupported timer type"); + return NULL; } - refc = 2; + } - else { + else { /* ERTS_TMR_BIF */ Uint hsz; +#ifdef ERTS_BTM_ACCESSOR_SUPPORT + int is_abif_tmr = is_value(acsr) && acsr != rcvr; +#endif + + if (short_time) { + tmr = bif_timer_pre_alloc(); + if (!tmr) + goto alloc_bif_timer; + roflgs |= ERTS_TMR_ROFLG_PRE_ALC; + } + else { + alloc_bif_timer: +#ifdef ERTS_BTM_ACCESSOR_SUPPORT + if (is_abif_tmr) + tmr = erts_alloc(ERTS_ALC_T_ABIF_TIMER, + ERTS_ABIF_TIMER_SIZE); + else +#endif + tmr = erts_alloc(ERTS_ALC_T_BIF_TIMER, + ERTS_BIF_TIMER_SIZE); + } + + tmr->timeout = timeout_pos; roflgs |= ERTS_TMR_ROFLG_BIF_TMR; if (is_internal_pid(rcvr)) { @@ -1057,6 +1153,8 @@ create_hl_timer(ErtsSchedulerData *esdp, tmr->btm.refn[2] = refn[2]; tmr->btm.proc_tree.parent = ERTS_HLT_PFIELD_NOT_IN_TABLE; + +#ifdef ERTS_BTM_ACCESSOR_SUPPORT if (is_abif_tmr) { Process *aproc; roflgs |= ERTS_TMR_ROFLG_ABIF_TMR; @@ -1071,6 +1169,9 @@ create_hl_timer(ErtsSchedulerData *esdp, erts_smp_proc_unlock(aproc, ERTS_PROC_LOCK_BTM); } } +#endif + + btm_rbt_insert(&srv->btm_tree, tmr); } tmr->head.roflgs = roflgs; @@ -1098,9 +1199,6 @@ create_hl_timer(ErtsSchedulerData *esdp, if (st_tmr) same_time_list_insert(&st_tmr->time.tree.same_time, tmr); - if (is_bif_tmr) - btm_rbt_insert(&srv->btm_tree, tmr); - #ifdef ERTS_HLT_HARD_DEBUG tmr->pending_timeout = 0; #endif @@ -1120,8 +1218,10 @@ hlt_bif_timer_timeout(ErtsHLTimer *tmr, Uint32 roflgs) Uint32 is_reg_name = (roflgs & ERTS_TMR_ROFLG_REG_NAME); ERTS_HLT_ASSERT(roflgs & ERTS_TMR_ROFLG_BIF_TMR); +#ifdef ERTS_BTM_ACCESSOR_SUPPORT if (tmr->head.roflgs & ERTS_TMR_ROFLG_ABIF_TMR) hlt_delete_abtm(tmr); +#endif if (is_reg_name) { Eterm pid; @@ -1203,10 +1303,13 @@ static void hlt_timeout(ErtsHLTimer *tmr, void *vsrv) hlt_bif_timer_timeout(tmr, roflgs); else if (roflgs & ERTS_TMR_ROFLG_PROC) hlt_proc_timeout(tmr); - else { - ERTS_HLT_ASSERT(roflgs & ERTS_TMR_ROFLG_PORT); + else if (roflgs & ERTS_TMR_ROFLG_PORT) hlt_port_timeout(tmr); + else { + ERTS_HLT_ASSERT(roflgs & ERTS_TMR_ROFLG_CALLBACK); + (*tmr->receiver.callback)(tmr->head.u.arg); } + } tmr->time.tree.parent = ERTS_HLT_PFIELD_NOT_IN_TABLE; @@ -1300,8 +1403,10 @@ hlt_delete_timer(ErtsSchedulerData *esdp, ErtsHLTimer *tmr) tmr->btm.tree.parent = ERTS_HLT_PFIELD_NOT_IN_TABLE; } +#ifdef ERTS_BTM_ACCESSOR_SUPPORT if (tmr->head.roflgs & ERTS_TMR_ROFLG_ABIF_TMR) hlt_delete_abtm(tmr); +#endif } if (tmr->time.tree.parent == ERTS_HLT_PFIELD_NOT_IN_TABLE) { @@ -1670,8 +1775,9 @@ setup_bif_timer(Process *c_p, ErtsMonotonicTime timeout_pos, tmo_msg = wrap ? TUPLE3(tmp_hp, am_timeout, ref, msg) : msg; - tmr = create_hl_timer(esdp, timeout_pos, short_time, 1, NULL, - rcvr, acsr, tmo_msg, internal_ref_numbers(ref)); + tmr = create_hl_timer(esdp, timeout_pos, short_time, + ERTS_TMR_BIF, NULL, rcvr, acsr, tmo_msg, + internal_ref_numbers(ref), NULL, NULL); UnUseTmpHeap(4, c_p); @@ -1943,8 +2049,10 @@ try_access_sched_remote_btm(ErtsSchedulerData *esdp, */ erts_smp_proc_lock(c_p, ERTS_PROC_LOCK_BTM); tmr = proc_btm_rbt_lookup(c_p->bif_timers, trefn); +#ifdef ERTS_BTM_ACCESSOR_SUPPORT if (!tmr) tmr = abtm_rbt_lookup(c_p->accessor_bif_timers, trefn); +#endif erts_smp_proc_unlock(c_p, ERTS_PROC_LOCK_BTM); if (!tmr) return 0; @@ -2184,11 +2292,13 @@ parse_bif_timer_options(Eterm option_list, int *async, int *info, if (!abs || !bool_arg(tp[2], abs)) return 0; break; +#ifdef ERTS_BTM_ACCESSOR_SUPPORT case am_accessor: if (!accessor || is_not_internal_pid(tp[2])) return 0; *accessor = tp[2]; break; +#endif default: return 0; } @@ -2250,7 +2360,9 @@ typedef struct { ErtsBifTimers *bif_timers; union { proc_btm_rbt_yield_state_t proc_btm_yield_state; +#ifdef ERTS_BTM_ACCESSOR_SUPPORT abtm_rbt_yield_state_t abtm_yield_state; +#endif } u; } ErtsBifTimerYieldState; @@ -2291,6 +2403,8 @@ int erts_cancel_bif_timers(Process *p, ErtsBifTimers *btm, void **vyspp) return res; } +#ifdef ERTS_BTM_ACCESSOR_SUPPORT + static void detach_bif_timer(ErtsHLTimer *tmr, void *vesdp) { @@ -2335,6 +2449,8 @@ int erts_detach_accessor_bif_timers(Process *p, ErtsBifTimers *btm, void **vyspp return res; } +#endif /* ERTS_BTM_ACCESSOR_SUPPORT */ + static ERTS_INLINE int parse_timeout_pos(ErtsSchedulerData *esdp, Eterm arg, ErtsMonotonicTime *conv_arg, int abs, @@ -2498,6 +2614,80 @@ BIF_RETTYPE read_timer_2(BIF_ALIST_2) return ret; } +static void +start_callback_timer(ErtsSchedulerData *esdp, + int twt, + ErtsMonotonicTime timeout_pos, + void (*callback)(void *), + void *arg) + +{ + if (twt) + create_tw_timer(esdp, ERTS_TMR_CALLBACK, NULL, + callback, arg, timeout_pos); + else + create_hl_timer(esdp, timeout_pos, 0, + ERTS_TMR_CALLBACK, NULL, + NIL, THE_NON_VALUE, NIL, + NULL, callback, arg); +} + +typedef struct { + int twt; + ErtsMonotonicTime timeout_pos; + void (*callback)(void *); + void *arg; +} ErtsStartCallbackTimerRequest; + +static void +scheduled_start_callback_timer(void *vsctr) +{ + ErtsStartCallbackTimerRequest *sctr + = (ErtsStartCallbackTimerRequest *) vsctr; + + start_callback_timer(erts_get_scheduler_data(), + sctr->twt, + sctr->timeout_pos, + sctr->callback, + sctr->arg); + + erts_free(ERTS_ALC_T_TIMER_REQUEST, vsctr); +} + +void +erts_start_timer_callback(ErtsMonotonicTime tmo, + void (*callback)(void *), + void *arg) +{ + ErtsSchedulerData *esdp; + ErtsMonotonicTime timeout_pos; + int twt; + + esdp = erts_get_scheduler_data(); + timeout_pos = get_timeout_pos(erts_get_monotonic_time(esdp), + tmo); + twt = tmo < ERTS_TIMER_WHEEL_MSEC; + + if (esdp) + start_callback_timer(esdp, + twt, + timeout_pos, + callback, + arg); + else { + ErtsStartCallbackTimerRequest *sctr; + sctr = erts_alloc(ERTS_ALC_T_TIMER_REQUEST, + sizeof(ErtsStartCallbackTimerRequest)); + sctr->twt = twt; + sctr->timeout_pos = timeout_pos; + sctr->callback = callback; + sctr->arg = arg; + erts_schedule_misc_aux_work(1, + scheduled_start_callback_timer, + (void *) sctr); + } +} + /* * Process and Port timer functionality. * @@ -2521,11 +2711,13 @@ set_proc_timer_common(Process *c_p, ErtsSchedulerData *esdp, Sint64 tmo, c_p->flags &= ~F_TIMO; if (tmo < ERTS_TIMER_WHEEL_MSEC) - tmr = (void *) create_tw_timer(esdp, (void *) c_p, 1, timeout_pos); + tmr = (void *) create_tw_timer(esdp, ERTS_TMR_PROC, (void *) c_p, + NULL, NULL, timeout_pos); else - tmr = (void *) create_hl_timer(esdp, timeout_pos, - short_time, 0, (void *) c_p, - c_p->common.id, NIL, NIL, NULL); + tmr = (void *) create_hl_timer(esdp, timeout_pos, short_time, + ERTS_TMR_PROC, (void *) c_p, + c_p->common.id, THE_NON_VALUE, + NIL, NULL, NULL, NULL); erts_smp_atomic_set_relb(&c_p->common.timer, (erts_aint_t) tmr); } } @@ -2608,13 +2800,12 @@ erts_set_port_timer(Port *c_prt, Sint64 tmo) timeout_pos = get_timeout_pos(erts_get_monotonic_time(esdp), tmo); if (tmo < ERTS_TIMER_WHEEL_MSEC) - tmr = (void *) create_tw_timer(esdp, (void *) c_prt, 0, - timeout_pos); + tmr = (void *) create_tw_timer(esdp, ERTS_TMR_PORT, (void *) c_prt, + NULL, NULL, timeout_pos); else - tmr = (void *) create_hl_timer(esdp, timeout_pos, 0, 0, - (void *) c_prt, - c_prt->common.id, NIL, NIL, - NULL); + tmr = (void *) create_hl_timer(esdp, timeout_pos, 0, ERTS_TMR_PORT, + (void *) c_prt, c_prt->common.id, + THE_NON_VALUE, NIL, NULL, NULL, NULL); erts_smp_atomic_set_relb(&c_prt->common.timer, (erts_aint_t) tmr); } @@ -2759,6 +2950,98 @@ erts_debug_bif_timer_foreach(void (*func)(Eterm, } } +typedef struct { + void (*tclbk)(void *); + void (*func)(void *, + ErtsMonotonicTime, + void *); + void *arg; +} ErtsDebugForeachCallbackTimer; + +static void +debug_callback_timer_foreach_list(ErtsHLTimer *tmr, void *vdfct) +{ + ErtsDebugForeachCallbackTimer *dfct + = (ErtsDebugForeachCallbackTimer *) vdfct; + + if ((tmr->head.roflgs & ERTS_TMR_ROFLG_CALLBACK) + && (tmr->receiver.callback && dfct->tclbk)) + (*dfct->func)(dfct->arg, + tmr->timeout, + tmr->head.u.arg); +} + +static void +debug_callback_timer_foreach(ErtsHLTimer *tmr, void *vdfct) +{ + ErtsDebugForeachCallbackTimer *dfct + = (ErtsDebugForeachCallbackTimer *) vdfct; + + if (tmr->time.tree.same_time) + same_time_list_foreach(tmr->time.tree.same_time, + debug_callback_timer_foreach_list, + vdfct); + + if ((tmr->head.roflgs & ERTS_TMR_ROFLG_CALLBACK) + && (tmr->receiver.callback && dfct->tclbk)) + (*dfct->func)(dfct->arg, + tmr->timeout, + tmr->head.u.arg); +} + +static void +debug_tw_callback_timer(void *vdfct, + ErtsMonotonicTime timeout_pos, + void *vtwtp) +{ + ErtsTWTimer *twtp = (ErtsTWTimer *) vtwtp; + ErtsDebugForeachCallbackTimer *dfct + = (ErtsDebugForeachCallbackTimer *) vdfct; + + if (twtp->u.callback == dfct->tclbk) + (*dfct->func)(dfct->arg, + timeout_pos, + twtp->head.u.arg); +} + +void +erts_debug_callback_timer_foreach(void (*tclbk)(void *), + void (*func)(void *, + ErtsMonotonicTime, + void *), + void *arg) +{ + int six; + ErtsDebugForeachCallbackTimer dfct; + + dfct.tclbk = tclbk; + dfct.func = func; + dfct.arg = arg; + + if (!erts_smp_thr_progress_is_blocking()) + ERTS_INTERNAL_ERROR("Not blocking thread progress"); + + for (six = 0; six < erts_no_schedulers; six++) { + ErtsHLTimerService *srv = + erts_aligned_scheduler_data[six].esd.timer_service; + ErtsTimerWheel *twheel = + erts_aligned_scheduler_data[six].esd.timer_wheel; + + erts_twheel_debug_foreach(twheel, + tw_callback_timeout, + debug_tw_callback_timer, + (void *) &dfct); + + if (srv->yield.root) + debug_callback_timer_foreach(srv->yield.root, + (void *) &dfct); + + time_rbt_foreach(srv->btm_tree, + debug_callback_timer_foreach, + (void *) &dfct); + } +} + #ifdef ERTS_HLT_HARD_DEBUG typedef struct { diff --git a/erts/emulator/beam/erl_hl_timer.h b/erts/emulator/beam/erl_hl_timer.h index 30889a71da..24c57fc873 100644 --- a/erts/emulator/beam/erl_hl_timer.h +++ b/erts/emulator/beam/erl_hl_timer.h @@ -59,7 +59,9 @@ int erts_cancel_bif_timers(Process *, ErtsBifTimers *, void **); int erts_detach_accessor_bif_timers(Process *, ErtsBifTimers *, void **); ErtsHLTimerService *erts_create_timer_service(void); void erts_hl_timer_init(void); - +void erts_start_timer_callback(ErtsMonotonicTime, + void (*)(void *), + void *); #ifdef ERTS_SMP void erts_handle_canceled_timers(void *vesdp, @@ -76,5 +78,10 @@ void erts_debug_bif_timer_foreach(void (*func)(Eterm, ErlHeapFragment *, void *), void *arg); - +void +erts_debug_callback_timer_foreach(void (*tclbk)(void *), + void (*func)(void *, + ErtsMonotonicTime, + void *), + void *arg); #endif /* ERL_HL_TIMER_H__ */ diff --git a/erts/emulator/beam/erl_init.c b/erts/emulator/beam/erl_init.c index 33417833a9..ac1f00d2d8 100644 --- a/erts/emulator/beam/erl_init.c +++ b/erts/emulator/beam/erl_init.c @@ -141,7 +141,9 @@ static void erl_init(int ncpu, int port_tab_sz_ignore_files, int legacy_port_tab, int time_correction, - ErtsTimeWarpMode time_warp_mode); + ErtsTimeWarpMode time_warp_mode, + int node_tab_delete_delay, + ErtsDbSpinCount db_spin_count); static erts_atomic_t exiting; @@ -314,7 +316,9 @@ erts_short_init(void) 0, 0, time_correction, - time_warp_mode); + time_warp_mode, + ERTS_NODE_TAB_DELAY_GC_DEFAULT, + ERTS_DB_SPNCNT_NORMAL); erts_initialized = 1; } @@ -326,7 +330,9 @@ erl_init(int ncpu, int port_tab_sz_ignore_files, int legacy_port_tab, int time_correction, - ErtsTimeWarpMode time_warp_mode) + ErtsTimeWarpMode time_warp_mode, + int node_tab_delete_delay, + ErtsDbSpinCount db_spin_count) { init_benchmarking(); @@ -366,8 +372,8 @@ erl_init(int ncpu, erts_ptab_init(); /* Must be after init_emulator() */ erts_init_binary(); /* Must be after init_emulator() */ erts_bp_init(); - init_db(); /* Must be after init_emulator */ - erts_init_node_tables(); + init_db(db_spin_count); /* Must be after init_emulator */ + erts_init_node_tables(node_tab_delete_delay); init_dist(); erl_drv_thr_init(); erts_init_async(); @@ -379,6 +385,7 @@ erl_init(int ncpu, erts_init_bif_re(); erts_init_unicode(); /* after RE to get access to PCRE unicode */ erts_init_external(); + erts_init_map(); erts_delay_trap = erts_export_put(am_erlang, am_delay_trap, 2); erts_late_init_process(); #if HAVE_ERTS_MSEG @@ -629,6 +636,13 @@ void erts_usage(void) erts_fprintf(stderr, " see error_logger documentation for details\n"); erts_fprintf(stderr, "-zdbbl size set the distribution buffer busy limit in kilobytes\n"); erts_fprintf(stderr, " valid range is [1-%d]\n", INT_MAX/1024); + erts_fprintf(stderr, "-zdntgc time set delayed node table gc in seconds\n"); + erts_fprintf(stderr, " valid values are infinity or intergers in the range [0-%d]\n", + ERTS_NODE_TAB_DELAY_GC_MAX); +#if 0 + erts_fprintf(stderr, "-zebwt val set ets busy wait threshold, valid values are:\n"); + erts_fprintf(stderr, " none|very_short|short|medium|long|very_long|extremely_long\n"); +#endif erts_fprintf(stderr, "\n"); erts_fprintf(stderr, "Note that if the emulator is started with erlexec (typically\n"); erts_fprintf(stderr, "from the erl script), these flags should be specified with +.\n"); @@ -1219,6 +1233,8 @@ erl_start(int argc, char **argv) int legacy_port_tab = 0; int time_correction; ErtsTimeWarpMode time_warp_mode; + int node_tab_delete_delay = ERTS_NODE_TAB_DELAY_GC_DEFAULT; + ErtsDbSpinCount db_spin_count = ERTS_DB_SPNCNT_NORMAL; set_default_time_adj(&time_correction, &time_warp_mode); @@ -2005,9 +2021,9 @@ erl_start(int argc, char **argv) case 'z': { char *sub_param = argv[i]+2; - int new_limit; if (has_prefix("dbbl", sub_param)) { + int new_limit; arg = get_arg(sub_param+4, argv[i+1], &i); new_limit = atoi(arg); if (new_limit < 1 || INT_MAX/1024 < new_limit) { @@ -2016,6 +2032,46 @@ erl_start(int argc, char **argv) } else { erts_dist_buf_busy_limit = new_limit*1024; } + } + else if (has_prefix("dntgc", sub_param)) { + long secs; + + arg = get_arg(sub_param+5, argv[i+1], &i); + if (sys_strcmp(arg, "infinity") == 0) + secs = ERTS_NODE_TAB_DELAY_GC_INFINITY; + else { + char *endptr; + errno = 0; + secs = strtol(arg, &endptr, 10); + if (errno != 0 || *arg == '\0' || *endptr != '\0' + || secs < 0 || ERTS_NODE_TAB_DELAY_GC_MAX < secs) { + erts_fprintf(stderr, "Invalid delayed node table gc: %s\n", arg); + erts_usage(); + } + } + node_tab_delete_delay = (int) secs; + } + else if (has_prefix("ebwt", sub_param)) { + arg = get_arg(sub_param+4, argv[i+1], &i); + if (sys_strcmp(arg, "none") == 0) + db_spin_count = ERTS_DB_SPNCNT_NONE; + else if (sys_strcmp(arg, "very_short") == 0) + db_spin_count = ERTS_DB_SPNCNT_VERY_LOW; + else if (sys_strcmp(arg, "short") == 0) + db_spin_count = ERTS_DB_SPNCNT_LOW; + else if (sys_strcmp(arg, "medium") == 0) + db_spin_count = ERTS_DB_SPNCNT_NORMAL; + else if (sys_strcmp(arg, "long") == 0) + db_spin_count = ERTS_DB_SPNCNT_HIGH; + else if (sys_strcmp(arg, "very_long") == 0) + db_spin_count = ERTS_DB_SPNCNT_VERY_HIGH; + else if (sys_strcmp(arg, "extremely_long") == 0) + db_spin_count = ERTS_DB_SPNCNT_EXTREMELY_HIGH; + else { + erts_fprintf(stderr, + "Invalid ets busy wait threshold: %s\n", arg); + erts_usage(); + } } else { erts_fprintf(stderr, "bad -z option %s\n", argv[i]); erts_usage(); @@ -2089,7 +2145,9 @@ erl_start(int argc, char **argv) port_tab_sz_ignore_files, legacy_port_tab, time_correction, - time_warp_mode); + time_warp_mode, + node_tab_delete_delay, + db_spin_count); load_preloaded(); erts_end_staging_code_ix(); diff --git a/erts/emulator/beam/erl_lock_count.c b/erts/emulator/beam/erl_lock_count.c index c6d8f4df95..c4956d8569 100644 --- a/erts/emulator/beam/erl_lock_count.c +++ b/erts/emulator/beam/erl_lock_count.c @@ -20,7 +20,7 @@ /* * Description: Statistics for locks. * - * Author: Bj�rn-Egil Dahlberg + * Author: Björn-Egil Dahlberg * Date: 2008-07-03 */ @@ -49,7 +49,7 @@ const char *str_undefined = "undefined"; static ethr_tsd_key lcnt_thr_data_key; static int lcnt_n_thr; -static erts_lcnt_thread_data_t *lcnt_thread_data[4096]; +static erts_lcnt_thread_data_t *lcnt_thread_data[2048]; /* local functions */ @@ -83,12 +83,12 @@ static ERTS_INLINE int lcnt_log2(Uint64 v) { static char* lcnt_lock_type(Uint16 flag) { switch(flag & ERTS_LCNT_LT_ALL) { - case ERTS_LCNT_LT_SPINLOCK: return "spinlock"; - case ERTS_LCNT_LT_RWSPINLOCK: return "rw_spinlock"; - case ERTS_LCNT_LT_MUTEX: return "mutex"; - case ERTS_LCNT_LT_RWMUTEX: return "rw_mutex"; - case ERTS_LCNT_LT_PROCLOCK: return "proclock"; - default: return ""; + case ERTS_LCNT_LT_SPINLOCK: return "spinlock"; + case ERTS_LCNT_LT_RWSPINLOCK: return "rw_spinlock"; + case ERTS_LCNT_LT_MUTEX: return "mutex"; + case ERTS_LCNT_LT_RWMUTEX: return "rw_mutex"; + case ERTS_LCNT_LT_PROCLOCK: return "proclock"; + default: return ""; } } @@ -116,15 +116,15 @@ static void lcnt_time(erts_lcnt_time_t *time) { static void lcnt_time_diff(erts_lcnt_time_t *d, erts_lcnt_time_t *t1, erts_lcnt_time_t *t0) { long ds; long dns; - + ds = t1->s - t0->s; dns = t1->ns - t0->ns; - + /* the difference should not be able to get bigger than 1 sec in ns*/ - + if (dns < 0) { - ds -= 1; - dns += 1000000000LL; + ds -= 1; + dns += 1000000000LL; } ASSERT(ds >= 0); @@ -145,7 +145,7 @@ static void lcnt_time_add(erts_lcnt_time_t *t, erts_lcnt_time_t *d) { static erts_lcnt_thread_data_t *lcnt_thread_data_alloc(void) { erts_lcnt_thread_data_t *eltd; - + eltd = (erts_lcnt_thread_data_t*)malloc(sizeof(erts_lcnt_thread_data_t)); if (!eltd) { ERTS_INTERNAL_ERROR("Lock counter failed to allocate memory!"); @@ -164,7 +164,6 @@ static erts_lcnt_thread_data_t *lcnt_get_thread_data(void) { return (erts_lcnt_thread_data_t *)ethr_tsd_get(lcnt_thr_data_key); } - /* debug */ #if 0 @@ -183,15 +182,15 @@ static void print_lock_x(erts_lcnt_lock_t *lock, Uint16 flag, char *action) { type = lcnt_lock_type(lock->flag); r_state = ethr_atomic_read(&lock->r_state); w_state = ethr_atomic_read(&lock->w_state); - + if (lock->flag & flag) { erts_fprintf(stderr,"%10s [%24s] [r/w state %4ld/%4ld] %2s id %T\r\n", - action, - lock->name, - r_state, - w_state, - type, - lock->id); + action, + lock->name, + r_state, + w_state, + type, + lock->id); } } #endif @@ -201,18 +200,18 @@ static erts_lcnt_lock_stats_t *lcnt_get_lock_stats(erts_lcnt_lock_t *lock, char erts_lcnt_lock_stats_t *stats = NULL; if (erts_lcnt_rt_options & ERTS_LCNT_OPT_LOCATION) { - for (i = 0; i < lock->n_stats; i++) { - if ((lock->stats[i].file == file) && (lock->stats[i].line == line)) { - return &(lock->stats[i]); - } - } - if (lock->n_stats < ERTS_LCNT_MAX_LOCK_LOCATIONS) { - stats = &lock->stats[lock->n_stats]; - lock->n_stats++; - stats->file = file; - stats->line = line; - return stats; - } + for (i = 0; i < lock->n_stats; i++) { + if ((lock->stats[i].file == file) && (lock->stats[i].line == line)) { + return &(lock->stats[i]); + } + } + if (lock->n_stats < ERTS_LCNT_MAX_LOCK_LOCATIONS) { + stats = &lock->stats[lock->n_stats]; + lock->n_stats++; + stats->file = file; + stats->line = line; + return stats; + } } return &lock->stats[0]; } @@ -222,53 +221,48 @@ static void lcnt_update_stats_hist(erts_lcnt_hist_t *hist, erts_lcnt_time_t *tim unsigned long r; if (time_wait->s > 0 || time_wait->ns > ERTS_LCNT_HISTOGRAM_MAX_NS) { - idx = ERTS_LCNT_HISTOGRAM_SLOT_SIZE - 1; + idx = ERTS_LCNT_HISTOGRAM_SLOT_SIZE - 1; } else { - r = time_wait->ns >> ERTS_LCNT_HISTOGRAM_RSHIFT; - if (r) idx = lcnt_log2(r); - else idx = 0; + r = time_wait->ns >> ERTS_LCNT_HISTOGRAM_RSHIFT; + if (r) idx = lcnt_log2(r); + else idx = 0; } hist->ns[idx]++; } static void lcnt_update_stats(erts_lcnt_lock_stats_t *stats, int lock_in_conflict, - erts_lcnt_time_t *time_wait) { - + erts_lcnt_time_t *time_wait) { + ethr_atomic_inc(&stats->tries); if (lock_in_conflict) - ethr_atomic_inc(&stats->colls); + ethr_atomic_inc(&stats->colls); if (time_wait) { - lcnt_time_add(&(stats->timer), time_wait); - stats->timer_n++; - lcnt_update_stats_hist(&stats->hist,time_wait); + lcnt_time_add(&(stats->timer), time_wait); + stats->timer_n++; + lcnt_update_stats_hist(&stats->hist,time_wait); } } -/* - * interface - */ +/* interface */ void erts_lcnt_init() { erts_lcnt_thread_data_t *eltd = NULL; - + /* init lock */ if (ethr_mutex_init(&lcnt_data_lock) != 0) abort(); /* init tsd */ lcnt_n_thr = 0; - ethr_tsd_key_create(&lcnt_thr_data_key,"lcnt_data"); lcnt_lock(); - erts_lcnt_rt_options = ERTS_LCNT_OPT_PROCLOCK | ERTS_LCNT_OPT_LOCATION; - + erts_lcnt_rt_options = ERTS_LCNT_OPT_LOCATION | ERTS_LCNT_OPT_PROCLOCK; eltd = lcnt_thread_data_alloc(); - ethr_tsd_set(lcnt_thr_data_key, eltd); - + /* init lcnt structure */ erts_lcnt_data = (erts_lcnt_data_t*)malloc(sizeof(erts_lcnt_data_t)); if (!erts_lcnt_data) { @@ -293,7 +287,7 @@ void erts_lcnt_late_init() { erts_lcnt_lock_list_t *erts_lcnt_list_init(void) { erts_lcnt_lock_list_t *list; - + list = (erts_lcnt_lock_list_t*)malloc(sizeof(erts_lcnt_lock_list_t)); if (!list) { ERTS_INTERNAL_ERROR("Lock counter failed to allocate memory!"); @@ -307,14 +301,14 @@ erts_lcnt_lock_list_t *erts_lcnt_list_init(void) { /* only do this on the list with the deleted locks! */ void erts_lcnt_list_clear(erts_lcnt_lock_list_t *list) { erts_lcnt_lock_t *lock = NULL, - *next = NULL; + *next = NULL; lock = list->head; - + while(lock != NULL) { - next = lock->next; - free(lock); - lock = next; + next = lock->next; + free(lock); + lock = next; } list->head = NULL; @@ -327,26 +321,25 @@ void erts_lcnt_list_insert(erts_lcnt_lock_list_t *list, erts_lcnt_lock_t *lock) tail = list->tail; if (tail) { - tail->next = lock; - lock->prev = tail; + tail->next = lock; + lock->prev = tail; } else { - list->head = lock; - lock->prev = NULL; - ASSERT(!lock->next); + list->head = lock; + lock->prev = NULL; + ASSERT(!lock->next); } lock->next = NULL; list->tail = lock; - + list->n++; } void erts_lcnt_list_delete(erts_lcnt_lock_list_t *list, erts_lcnt_lock_t *lock) { - if (lock->next) lock->next->prev = lock->prev; if (lock->prev) lock->prev->next = lock->next; if (list->head == lock) list->head = lock->next; if (list->tail == lock) list->tail = lock->prev; - + lock->prev = NULL; lock->next = NULL; list->n--; @@ -364,12 +357,9 @@ void erts_lcnt_init_lock(erts_lcnt_lock_t *lock, char *name, Uint16 flag ) { void erts_lcnt_init_lock_x(erts_lcnt_lock_t *lock, char *name, Uint16 flag, Eterm id) { int i; - if (!name) { - lock->flag = 0; - return; - } + if (name == NULL) { ERTS_LCNT_CLEAR_FLAG(lock); return; } lcnt_lock(); - + lock->next = NULL; lock->prev = NULL; lock->flag = flag; @@ -378,46 +368,55 @@ void erts_lcnt_init_lock_x(erts_lcnt_lock_t *lock, char *name, Uint16 flag, Eter ethr_atomic_init(&lock->r_state, 0); ethr_atomic_init(&lock->w_state, 0); - #ifdef DEBUG ethr_atomic_init(&lock->flowstate, 0); #endif - + lock->n_stats = 1; for (i = 0; i < ERTS_LCNT_MAX_LOCK_LOCATIONS; i++) { - lcnt_clear_stats(&lock->stats[i]); + lcnt_clear_stats(&lock->stats[i]); } erts_lcnt_list_insert(erts_lcnt_data->current_locks, lock); lcnt_unlock(); } - +/* init empty, instead of zero struct */ +void erts_lcnt_init_lock_empty(erts_lcnt_lock_t *lock) { + lock->next = NULL; + lock->prev = NULL; + lock->flag = 0; + lock->name = NULL; + lock->id = NIL; + ethr_atomic_init(&lock->r_state, 0); + ethr_atomic_init(&lock->w_state, 0); +#ifdef DEBUG + ethr_atomic_init(&lock->flowstate, 0); +#endif + lock->n_stats = 0; + sys_memzero(lock->stats, sizeof(lock->stats)); +} +/* destroy lock */ void erts_lcnt_destroy_lock(erts_lcnt_lock_t *lock) { - erts_lcnt_lock_t *deleted_lock; - - if (!ERTS_LCNT_LOCK_TYPE(lock)) return; - + if (ERTS_LCNT_IS_LOCK_INVALID(lock)) return; lcnt_lock(); if (erts_lcnt_rt_options & ERTS_LCNT_OPT_COPYSAVE) { - /* copy structure and insert the copy */ - - deleted_lock = (erts_lcnt_lock_t*)malloc(sizeof(erts_lcnt_lock_t)); + erts_lcnt_lock_t *deleted_lock; + /* copy structure and insert the copy */ + deleted_lock = (erts_lcnt_lock_t*)malloc(sizeof(erts_lcnt_lock_t)); if (!deleted_lock) { ERTS_INTERNAL_ERROR("Lock counter failed to allocate memory!"); } - memcpy(deleted_lock, lock, sizeof(erts_lcnt_lock_t)); - - deleted_lock->next = NULL; - deleted_lock->prev = NULL; - - erts_lcnt_list_insert(erts_lcnt_data->deleted_locks, deleted_lock); + memcpy(deleted_lock, lock, sizeof(erts_lcnt_lock_t)); + deleted_lock->next = NULL; + deleted_lock->prev = NULL; + erts_lcnt_list_insert(erts_lcnt_data->deleted_locks, deleted_lock); } /* delete original */ erts_lcnt_list_delete(erts_lcnt_data->current_locks, lock); - lock->flag = 0; - + ERTS_LCNT_CLEAR_FLAG(lock); + lcnt_unlock(); } @@ -426,16 +425,15 @@ void erts_lcnt_destroy_lock(erts_lcnt_lock_t *lock) { void erts_lcnt_lock_opt(erts_lcnt_lock_t *lock, Uint16 option) { erts_aint_t r_state = 0, w_state = 0; erts_lcnt_thread_data_t *eltd; - + if (erts_lcnt_rt_options & ERTS_LCNT_OPT_SUSPEND) return; - if (!ERTS_LCNT_LOCK_TYPE(lock)) return; + if (ERTS_LCNT_IS_LOCK_INVALID(lock)) return; eltd = lcnt_get_thread_data(); - ASSERT(eltd); - + w_state = ethr_atomic_read(&lock->w_state); - + if (option & ERTS_LCNT_LO_WRITE) { r_state = ethr_atomic_read(&lock->r_state); ethr_atomic_inc( &lock->w_state); @@ -443,48 +441,47 @@ void erts_lcnt_lock_opt(erts_lcnt_lock_t *lock, Uint16 option) { if (option & ERTS_LCNT_LO_READ) { ethr_atomic_inc( &lock->r_state); } - + /* we cannot acquire w_lock if either w or r are taken */ /* we cannot acquire r_lock if w_lock is taken */ - + if ((w_state > 0) || (r_state > 0)) { - eltd->lock_in_conflict = 1; - if (eltd->timer_set == 0) { - lcnt_time(&eltd->timer); - } - eltd->timer_set++; + eltd->lock_in_conflict = 1; + if (eltd->timer_set == 0) { + lcnt_time(&eltd->timer); + } + eltd->timer_set++; } else { - eltd->lock_in_conflict = 0; + eltd->lock_in_conflict = 0; } } void erts_lcnt_lock(erts_lcnt_lock_t *lock) { erts_aint_t w_state; erts_lcnt_thread_data_t *eltd; - + if (erts_lcnt_rt_options & ERTS_LCNT_OPT_SUSPEND) return; - if (!ERTS_LCNT_LOCK_TYPE(lock)) return; + if (ERTS_LCNT_IS_LOCK_INVALID(lock)) return; w_state = ethr_atomic_read(&lock->w_state); ethr_atomic_inc(&lock->w_state); - eltd = lcnt_get_thread_data(); ASSERT(eltd); if (w_state > 0) { - eltd->lock_in_conflict = 1; - /* only set the timer if nobody else has it - * This should only happen when proc_locks aquires several locks - * 'atomicly'. All other locks will block the thread if w_state > 0 - * i.e. locked. - */ - if (eltd->timer_set == 0) { - lcnt_time(&eltd->timer); - } - eltd->timer_set++; + eltd->lock_in_conflict = 1; + /* only set the timer if nobody else has it + * This should only happen when proc_locks aquires several locks + * 'atomicly'. All other locks will block the thread if w_state > 0 + * i.e. locked. + */ + if (eltd->timer_set == 0) { + lcnt_time(&eltd->timer); + } + eltd->timer_set++; } else { - eltd->lock_in_conflict = 0; + eltd->lock_in_conflict = 0; } } @@ -493,16 +490,19 @@ void erts_lcnt_lock(erts_lcnt_lock_t *lock) { void erts_lcnt_lock_unaquire(erts_lcnt_lock_t *lock) { /* should check if this thread was "waiting" */ if (erts_lcnt_rt_options & ERTS_LCNT_OPT_SUSPEND) return; - if (!ERTS_LCNT_LOCK_TYPE(lock)) return; + if (ERTS_LCNT_IS_LOCK_INVALID(lock)) return; ethr_atomic_dec(&lock->w_state); } -/* erts_lcnt_lock_post - * used when we get a lock (i.e. directly after a lock operation) +/* + * erts_lcnt_lock_post + * + * Used when we get a lock (i.e. directly after a lock operation) * if the timer was set then we had to wait for the lock * lock_post will calculate the wait time. */ + void erts_lcnt_lock_post(erts_lcnt_lock_t *lock) { erts_lcnt_lock_post_x(lock, (char*)str_undefined, 0); } @@ -517,31 +517,31 @@ void erts_lcnt_lock_post_x(erts_lcnt_lock_t *lock, char *file, unsigned int line #endif if (erts_lcnt_rt_options & ERTS_LCNT_OPT_SUSPEND) return; - if (!ERTS_LCNT_LOCK_TYPE(lock)) return; - + if (ERTS_LCNT_IS_LOCK_INVALID(lock)) return; + #ifdef DEBUG if (!(lock->flag & (ERTS_LCNT_LT_RWMUTEX | ERTS_LCNT_LT_RWSPINLOCK))) { - flowstate = ethr_atomic_read(&lock->flowstate); - ASSERT(flowstate == 0); - ethr_atomic_inc(&lock->flowstate); + flowstate = ethr_atomic_read(&lock->flowstate); + ASSERT(flowstate == 0); + ethr_atomic_inc(&lock->flowstate); } #endif - + eltd = lcnt_get_thread_data(); - + ASSERT(eltd); /* if lock was in conflict, time it */ stats = lcnt_get_lock_stats(lock, file, line); if (eltd->timer_set) { - lcnt_time(&timer); - - lcnt_time_diff(&time_wait, &timer, &(eltd->timer)); - lcnt_update_stats(stats, eltd->lock_in_conflict, &time_wait); - eltd->timer_set--; - ASSERT(eltd->timer_set >= 0); + lcnt_time(&timer); + + lcnt_time_diff(&time_wait, &timer, &(eltd->timer)); + lcnt_update_stats(stats, eltd->lock_in_conflict, &time_wait); + eltd->timer_set--; + ASSERT(eltd->timer_set >= 0); } else { - lcnt_update_stats(stats, eltd->lock_in_conflict, NULL); + lcnt_update_stats(stats, eltd->lock_in_conflict, NULL); } } @@ -550,27 +550,28 @@ void erts_lcnt_lock_post_x(erts_lcnt_lock_t *lock, char *file, unsigned int line void erts_lcnt_unlock_opt(erts_lcnt_lock_t *lock, Uint16 option) { if (erts_lcnt_rt_options & ERTS_LCNT_OPT_SUSPEND) return; - if (!ERTS_LCNT_LOCK_TYPE(lock)) return; + if (ERTS_LCNT_IS_LOCK_INVALID(lock)) return; if (option & ERTS_LCNT_LO_WRITE) ethr_atomic_dec(&lock->w_state); if (option & ERTS_LCNT_LO_READ ) ethr_atomic_dec(&lock->r_state); } void erts_lcnt_unlock(erts_lcnt_lock_t *lock) { -#ifdef DEBUG - erts_aint_t w_state; - erts_aint_t flowstate; -#endif if (erts_lcnt_rt_options & ERTS_LCNT_OPT_SUSPEND) return; - if (!ERTS_LCNT_LOCK_TYPE(lock)) return; + if (ERTS_LCNT_IS_LOCK_INVALID(lock)) return; #ifdef DEBUG - /* flowstate */ - flowstate = ethr_atomic_read(&lock->flowstate); - ASSERT(flowstate == 1); - ethr_atomic_dec(&lock->flowstate); - - /* write state */ - w_state = ethr_atomic_read(&lock->w_state); - ASSERT(w_state > 0); + { + erts_aint_t w_state; + erts_aint_t flowstate; + + /* flowstate */ + flowstate = ethr_atomic_read(&lock->flowstate); + ASSERT(flowstate == 1); + ethr_atomic_dec(&lock->flowstate); + + /* write state */ + w_state = ethr_atomic_read(&lock->w_state); + ASSERT(w_state > 0); + } #endif ethr_atomic_dec(&lock->w_state); } @@ -579,35 +580,34 @@ void erts_lcnt_unlock(erts_lcnt_lock_t *lock) { void erts_lcnt_trylock_opt(erts_lcnt_lock_t *lock, int res, Uint16 option) { if (erts_lcnt_rt_options & ERTS_LCNT_OPT_SUSPEND) return; - if (!ERTS_LCNT_LOCK_TYPE(lock)) return; + if (ERTS_LCNT_IS_LOCK_INVALID(lock)) return; /* Determine lock_state via res instead of state */ if (res != EBUSY) { - if (option & ERTS_LCNT_LO_WRITE) ethr_atomic_inc(&lock->w_state); - if (option & ERTS_LCNT_LO_READ ) ethr_atomic_inc(&lock->r_state); - lcnt_update_stats(&(lock->stats[0]), 0, NULL); + if (option & ERTS_LCNT_LO_WRITE) ethr_atomic_inc(&lock->w_state); + if (option & ERTS_LCNT_LO_READ ) ethr_atomic_inc(&lock->r_state); + lcnt_update_stats(&(lock->stats[0]), 0, NULL); } else { ethr_atomic_inc(&lock->stats[0].tries); ethr_atomic_inc(&lock->stats[0].colls); } } - + void erts_lcnt_trylock(erts_lcnt_lock_t *lock, int res) { /* Determine lock_state via res instead of state */ -#ifdef DEBUG - erts_aint_t flowstate; -#endif if (erts_lcnt_rt_options & ERTS_LCNT_OPT_SUSPEND) return; - if (!ERTS_LCNT_LOCK_TYPE(lock)) return; + if (ERTS_LCNT_IS_LOCK_INVALID(lock)) return; if (res != EBUSY) { - #ifdef DEBUG - flowstate = ethr_atomic_read(&lock->flowstate); - ASSERT(flowstate == 0); - ethr_atomic_inc( &lock->flowstate); + { + erts_aint_t flowstate; + flowstate = ethr_atomic_read(&lock->flowstate); + ASSERT(flowstate == 0); + ethr_atomic_inc( &lock->flowstate); + } #endif - ethr_atomic_inc(&lock->w_state); - lcnt_update_stats(&(lock->stats[0]), 0, NULL); + ethr_atomic_inc(&lock->w_state); + lcnt_update_stats(&(lock->stats[0]), 0, NULL); } else { ethr_atomic_inc(&lock->stats[0].tries); ethr_atomic_inc(&lock->stats[0].colls); @@ -662,13 +662,13 @@ void erts_lcnt_clear_counters(void) { lcnt_lock(); list = erts_lcnt_data->current_locks; - + for (lock = list->head; lock != NULL; lock = lock->next) { - for( i = 0; i < ERTS_LCNT_MAX_LOCK_LOCATIONS; i++) { - stats = &lock->stats[i]; - lcnt_clear_stats(stats); - } - lock->n_stats = 1; + for( i = 0; i < ERTS_LCNT_MAX_LOCK_LOCATIONS; i++) { + stats = &lock->stats[i]; + lcnt_clear_stats(stats); + } + lock->n_stats = 1; } /* empty deleted locks in lock list */ @@ -681,14 +681,14 @@ void erts_lcnt_clear_counters(void) { erts_lcnt_data_t *erts_lcnt_get_data(void) { erts_lcnt_time_t timer_stop; - + lcnt_lock(); - + lcnt_time(&timer_stop); lcnt_time_diff(&(erts_lcnt_data->duration), &timer_stop, &timer_start); - + lcnt_unlock(); - + return erts_lcnt_data; } diff --git a/erts/emulator/beam/erl_lock_count.h b/erts/emulator/beam/erl_lock_count.h index 09fadd7e9e..051f55271d 100644 --- a/erts/emulator/beam/erl_lock_count.h +++ b/erts/emulator/beam/erl_lock_count.h @@ -20,7 +20,7 @@ /* * Description: Statistics for locks. * - * Author: Bj�rn-Egil Dahlberg + * Author: Björn-Egil Dahlberg * Date: 2008-07-03 * Abstract: * Locks statistics internal representation. @@ -95,30 +95,35 @@ #define ERTS_LCNT_LO_WRITE (((Uint16) 1) << 7) #define ERTS_LCNT_LO_READ_WRITE ( ERTS_LCNT_LO_READ \ - | ERTS_LCNT_LO_WRITE ) + | ERTS_LCNT_LO_WRITE ) #define ERTS_LCNT_LT_ALL ( ERTS_LCNT_LT_SPINLOCK \ - | ERTS_LCNT_LT_RWSPINLOCK \ - | ERTS_LCNT_LT_MUTEX \ - | ERTS_LCNT_LT_RWMUTEX \ - | ERTS_LCNT_LT_PROCLOCK ) + | ERTS_LCNT_LT_RWSPINLOCK \ + | ERTS_LCNT_LT_MUTEX \ + | ERTS_LCNT_LT_RWMUTEX \ + | ERTS_LCNT_LT_PROCLOCK ) + +#define ERTS_LCNT_LOCK_TYPE(lock) ((lock)->flag & ERTS_LCNT_LT_ALL) +#define ERTS_LCNT_IS_LOCK_INVALID(lock) (!((lock)->flag & ERTS_LCNT_LT_ALL)) +#define ERTS_LCNT_CLEAR_FLAG(lock) ((lock)->flag = 0) + /* runtime options */ -#define ERTS_LCNT_OPT_SUSPEND (((Uint16) 1) << 0) -#define ERTS_LCNT_OPT_LOCATION (((Uint16) 1) << 1) -#define ERTS_LCNT_OPT_PROCLOCK (((Uint16) 1) << 2) -#define ERTS_LCNT_OPT_COPYSAVE (((Uint16) 1) << 3) -#define ERTS_LCNT_OPT_PORTLOCK (((Uint16) 1) << 4) +#define ERTS_LCNT_OPT_SUSPEND (((Uint16) 1) << 0) +#define ERTS_LCNT_OPT_LOCATION (((Uint16) 1) << 1) +#define ERTS_LCNT_OPT_PROCLOCK (((Uint16) 1) << 2) +#define ERTS_LCNT_OPT_PORTLOCK (((Uint16) 1) << 3) +#define ERTS_LCNT_OPT_COPYSAVE (((Uint16) 1) << 4) typedef struct { unsigned long s; unsigned long ns; } erts_lcnt_time_t; - + extern erts_lcnt_time_t timer_start; typedef struct { - Uint32 ns[ERTS_LCNT_HISTOGRAM_SLOT_SIZE]; /* log2 array of nano seconds occurences */ + Uint32 ns[ERTS_LCNT_HISTOGRAM_SLOT_SIZE]; /* log2 array of nano seconds occurences */ } erts_lcnt_hist_t; typedef struct erts_lcnt_lock_stats_s { @@ -129,10 +134,10 @@ typedef struct erts_lcnt_lock_stats_s { char *file; /* which file the lock was taken */ unsigned int line; /* line number in file */ - + ethr_atomic_t tries; /* n tries to get lock */ ethr_atomic_t colls; /* n collisions of tries to get lock */ - + unsigned long timer_n; /* #times waited for lock */ erts_lcnt_time_t timer; /* total wait time for lock */ erts_lcnt_hist_t hist; @@ -155,7 +160,7 @@ typedef struct erts_lcnt_lock_s { /* statistics */ unsigned int n_stats; erts_lcnt_lock_stats_t stats[ERTS_LCNT_MAX_LOCK_LOCATIONS]; /* first entry is "undefined"*/ - + /* chains for list handling */ /* data is hold by lcnt_lock */ struct erts_lcnt_lock_s *prev; @@ -167,7 +172,7 @@ typedef struct { erts_lcnt_lock_t *tail; unsigned long n; } erts_lcnt_lock_list_t; - + typedef struct { erts_lcnt_time_t duration; /* time since last clear */ erts_lcnt_lock_list_t *current_locks; @@ -205,6 +210,7 @@ void erts_lcnt_list_delete(erts_lcnt_lock_list_t *list, erts_lcnt_lock_t *lock); /* lock operations (global) */ void erts_lcnt_init_lock(erts_lcnt_lock_t *lock, char *name, Uint16 flag); void erts_lcnt_init_lock_x(erts_lcnt_lock_t *lock, char *name, Uint16 flag, Eterm id); +void erts_lcnt_init_lock_empty(erts_lcnt_lock_t *lock); void erts_lcnt_destroy_lock(erts_lcnt_lock_t *lock); void erts_lcnt_lock(erts_lcnt_lock_t *lock); @@ -226,7 +232,5 @@ void erts_lcnt_clear_counters(void); char *erts_lcnt_lock_type(Uint16 type); erts_lcnt_data_t *erts_lcnt_get_data(void); -#define ERTS_LCNT_LOCK_TYPE(lockp) ((lockp)->flag & ERTS_LCNT_LT_ALL) - #endif /* ifdef ERTS_ENABLE_LOCK_COUNT */ #endif /* ifndef ERTS_LOCK_COUNT_H__ */ diff --git a/erts/emulator/beam/erl_map.c b/erts/emulator/beam/erl_map.c index a1bd39dbc8..95a10daa67 100644 --- a/erts/emulator/beam/erl_map.c +++ b/erts/emulator/beam/erl_map.c @@ -32,6 +32,7 @@ #include "erl_process.h" #include "error.h" #include "bif.h" +#include "erl_binary.h" #include "erl_map.h" @@ -79,8 +80,13 @@ typedef struct { static Eterm flatmap_merge(Process *p, Eterm nodeA, Eterm nodeB); -static Eterm map_merge_mixed(Process *p, Eterm flat, Eterm tree, int swap_args); -static Eterm hashmap_merge(Process *p, Eterm nodeA, Eterm nodeB); +static BIF_RETTYPE map_merge_mixed(Process *p, Eterm flat, Eterm tree, int swap_args); +struct HashmapMergeContext_; +static BIF_RETTYPE hashmap_merge(Process *p, Eterm nodeA, Eterm nodeB, int swap_args, + struct HashmapMergeContext_*); +static Export hashmap_merge_trap_export; +static BIF_RETTYPE maps_merge_trap_1(BIF_ALIST_1); +static Uint hashmap_subtree_size(Eterm node); static Eterm hashmap_to_list(Process *p, Eterm map); static Eterm hashmap_keys(Process *p, Eterm map); static Eterm hashmap_values(Process *p, Eterm map); @@ -95,6 +101,15 @@ static Eterm hashmap_bld_tuple_uint(Uint **hpp, Uint *szp, Uint n, Uint nums[]); static int hxnodecmp(hxnode_t* a, hxnode_t* b); static int hxnodecmpkey(hxnode_t* a, hxnode_t* b); + +void erts_init_map(void) { + erts_init_trap_export(&hashmap_merge_trap_export, + am_maps, am_merge_trap, 1, + &maps_merge_trap_1); + return; +} + + /* erlang:map_size/1 * the corresponding instruction is implemented in: * beam/erl_bif_guard.c @@ -410,8 +425,9 @@ static Eterm hashmap_from_validated_list(Process *p, Eterm list, Uint size) { } UnUseTmpHeap(2,p); - factory.p = p; + erts_factory_proc_init(&factory, p); res = hashmap_from_unsorted_array(&factory, hxns, size, 0); + erts_factory_close(&factory); erts_free(ERTS_ALC_T_TMP, (void *) hxns); ERTS_VERIFY_UNUSED_TEMP_ALLOC(p); @@ -515,8 +531,9 @@ Eterm erts_hashmap_from_ks_and_vs_extra(Process *p, Eterm *ks, Eterm *vs, Uint n hxns[i].i = i; } - factory.p = p; + erts_factory_proc_init(&factory, p); res = hashmap_from_unsorted_array(&factory, hxns, sz, 0); + erts_factory_close(&factory); erts_free(ERTS_ALC_T_TMP, (void *) hxns); ERTS_VERIFY_UNUSED_TEMP_ALLOC(p); @@ -942,15 +959,15 @@ BIF_RETTYPE maps_merge_2(BIF_ALIST_2) { BIF_RET(flatmap_merge(BIF_P, BIF_ARG_1, BIF_ARG_2)); } else if (is_hashmap(BIF_ARG_2)) { /* Will always become a tree */ - BIF_RET(map_merge_mixed(BIF_P, BIF_ARG_1, BIF_ARG_2, 0)); + return map_merge_mixed(BIF_P, BIF_ARG_1, BIF_ARG_2, 0); } BIF_P->fvalue = BIF_ARG_2; } else if (is_hashmap(BIF_ARG_1)) { if (is_hashmap(BIF_ARG_2)) { - BIF_RET(hashmap_merge(BIF_P, BIF_ARG_1, BIF_ARG_2)); + return hashmap_merge(BIF_P, BIF_ARG_1, BIF_ARG_2, 0, NULL); } else if (is_flatmap(BIF_ARG_2)) { /* Will always become a tree */ - BIF_RET(map_merge_mixed(BIF_P, BIF_ARG_2, BIF_ARG_1, 1)); + return map_merge_mixed(BIF_P, BIF_ARG_2, BIF_ARG_1, 1); } BIF_P->fvalue = BIF_ARG_2; } else { @@ -1063,8 +1080,9 @@ static Eterm flatmap_merge(Process *p, Eterm nodeA, Eterm nodeB) { hxns[i].i = i; } - factory.p = p; + erts_factory_proc_init(&factory, p); res = hashmap_from_unsorted_array(&factory, hxns, n, 0); + erts_factory_close(&factory); erts_free(ERTS_ALC_T_TMP, (void *) hxns); ERTS_VERIFY_UNUSED_TEMP_ALLOC(p); @@ -1107,36 +1125,71 @@ static Eterm map_merge_mixed(Process *p, Eterm flat, Eterm tree, int swap_args) hxns[i].i = i; } - factory.p = p; + erts_factory_proc_init(&factory, p); res = hashmap_from_unsorted_array(&factory, hxns, n, 0); + erts_factory_close(&factory); erts_free(ERTS_ALC_T_TMP, (void *) hxns); ERTS_VERIFY_UNUSED_TEMP_ALLOC(p); - return swap_args ? hashmap_merge(p, tree, res) : hashmap_merge(p, res, tree); + return hashmap_merge(p, res, tree, swap_args, NULL); +} + +#define PSTACK_TYPE struct HashmapMergePStackType +struct HashmapMergePStackType { + Eterm nodeA, nodeB; + Eterm *srcA, *srcB; + Uint32 abm, bbm, rbm; /* node bitmaps */ + int mix; /* &1: there are unique A stuff in node + * &2: there are unique B stuff in node */ + int ix; + Eterm array[16]; /* temp node construction area */ +}; + +typedef struct HashmapMergeContext_ { + Uint size; /* total key-value counter */ + unsigned int lvl; + Eterm trap_bin; + ErtsPStack pstack; +#ifdef DEBUG + Eterm dbg_map_A, dbg_map_B; +#endif +} HashmapMergeContext; + +static void hashmap_merge_ctx_destructor(Binary* ctx_bin) +{ + HashmapMergeContext* ctx = (HashmapMergeContext*) ERTS_MAGIC_BIN_DATA(ctx_bin); + ASSERT(ERTS_MAGIC_BIN_DESTRUCTOR(ctx_bin) == hashmap_merge_ctx_destructor); + + PSTACK_DESTROY_SAVED(&ctx->pstack); +} + +BIF_RETTYPE maps_merge_trap_1(BIF_ALIST_1) { + Binary* ctx_bin = ((ProcBin *) binary_val(BIF_ARG_1))->val; + + ASSERT(ERTS_MAGIC_BIN_DESTRUCTOR(ctx_bin) == hashmap_merge_ctx_destructor); + + return hashmap_merge(BIF_P, NIL, NIL, 0, + (HashmapMergeContext*) ERTS_MAGIC_BIN_DATA(ctx_bin)); } #define HALLOC_EXTRA 200 +#define MAP_MERGE_LOOP_FACTOR 8 -static Eterm hashmap_merge(Process *p, Eterm nodeA, Eterm nodeB) { +static BIF_RETTYPE hashmap_merge(Process *p, Eterm map_A, Eterm map_B, + int swap_args, HashmapMergeContext* ctx) { #define PSTACK_TYPE struct HashmapMergePStackType - struct HashmapMergePStackType { - Eterm *srcA, *srcB; - Uint32 abm, bbm, rbm; /* node bitmaps */ - int keepA; - int ix; - Eterm array[16]; - }; PSTACK_DECLARE(s, 4); - struct HashmapMergePStackType* sp = PSTACK_PUSH(s); - Eterm *hp, *nhp; + HashmapMergeContext local_ctx; + struct HashmapMergePStackType* sp; + Uint32 hx; + Eterm res = THE_NON_VALUE; Eterm hdrA, hdrB; - Uint32 ahx, bhx; - Uint size; /* total key-value counter */ - int keepA = 0; - unsigned int lvl = 0; + Eterm *hp, *nhp; + Eterm trap_ret; + Sint initial_reds = (Sint) (ERTS_BIF_REDS_LEFT(p) * MAP_MERGE_LOOP_FACTOR); + Sint reds = initial_reds; DeclareTmpHeap(th,2,p); - Eterm res = THE_NON_VALUE; UseTmpHeap(2,p); /* @@ -1144,152 +1197,139 @@ static Eterm hashmap_merge(Process *p, Eterm nodeA, Eterm nodeB) { * and merge each pair of nodes. */ - { - hashmap_head_t* a = (hashmap_head_t*) hashmap_val(nodeA); - hashmap_head_t* b = (hashmap_head_t*) hashmap_val(nodeB); - size = a->size + b->size; + PSTACK_CHANGE_ALLOCATOR(s, ERTS_ALC_T_SAVED_ESTACK); + + if (ctx == NULL) { /* first call */ + hashmap_head_t* a = (hashmap_head_t*) hashmap_val(map_A); + hashmap_head_t* b = (hashmap_head_t*) hashmap_val(map_B); + + sp = PSTACK_PUSH(s); + sp->srcA = swap_args ? &map_B : &map_A; + sp->srcB = swap_args ? &map_A : &map_B; + sp->mix = 0; + local_ctx.size = a->size + b->size; + local_ctx.lvl = 0; + #ifdef DEBUG + local_ctx.dbg_map_A = map_A; + local_ctx.dbg_map_B = map_B; + local_ctx.trap_bin = THE_NON_VALUE; + #endif + ctx = &local_ctx; + } + else { + PSTACK_RESTORE(s, &ctx->pstack); + sp = PSTACK_TOP(s); + goto resume_from_trap; } recurse: - if (primary_tag(nodeA) == TAG_PRIMARY_BOXED && - primary_tag(nodeB) == TAG_PRIMARY_LIST) { - /* Avoid implementing this combination by switching places */ - Eterm tmp = nodeA; - nodeA = nodeB; - nodeB = tmp; - keepA = !keepA; - } - - switch (primary_tag(nodeA)) { - case TAG_PRIMARY_LIST: { - sp->srcA = list_val(nodeA); - switch (primary_tag(nodeB)) { - case TAG_PRIMARY_LIST: { /* LEAF + LEAF */ - sp->srcB = list_val(nodeB); - - if (EQ(CAR(sp->srcA), CAR(sp->srcB))) { - --size; - res = keepA ? nodeA : nodeB; - } else { - ahx = hashmap_restore_hash(th, lvl, CAR(sp->srcA)); - bhx = hashmap_restore_hash(th, lvl, CAR(sp->srcB)); - sp->abm = 1 << hashmap_index(ahx); - sp->bbm = 1 << hashmap_index(bhx); + sp->nodeA = *sp->srcA; + sp->nodeB = *sp->srcB; - sp->srcA = &nodeA; - sp->srcB = &nodeB; - } - break; - } - case TAG_PRIMARY_BOXED: { /* LEAF + NODE */ - sp->srcB = boxed_val(nodeB); - ASSERT(is_header(*sp->srcB)); - hdrB = *sp->srcB++; - - ahx = hashmap_restore_hash(th, lvl, CAR(sp->srcA)); - sp->abm = 1 << hashmap_index(ahx); - sp->srcA = &nodeA; - switch(hdrB & _HEADER_MAP_SUBTAG_MASK) { - case HAMT_SUBTAG_HEAD_ARRAY: - sp->srcB++; - sp->bbm = 0xffff; - break; + if (sp->nodeA == sp->nodeB) { + res = sp->nodeA; + ctx->size -= is_list(sp->nodeB) ? 1 : hashmap_subtree_size(sp->nodeB); + } + else { + if (is_list(sp->nodeA)) { /* A is LEAF */ + Eterm keyA = CAR(list_val(sp->nodeA)); + + if (is_list(sp->nodeB)) { /* LEAF + LEAF */ + Eterm keyB = CAR(list_val(sp->nodeB)); + + if (EQ(keyA, keyB)) { + --ctx->size; + res = sp->nodeB; + sp->mix = 2; /* We assume values differ. + + Don't spend time comparing big values. + - Might waste some heap space for internal + nodes that could otherwise be reused. */ + goto merge_nodes; + } + } + hx = hashmap_restore_hash(th, ctx->lvl, keyA); + sp->abm = 1 << hashmap_index(hx); + /* keep srcA pointing at the leaf */ + } + else { /* A is NODE */ + sp->srcA = boxed_val(sp->nodeA); + hdrA = *sp->srcA++; + ASSERT(is_header(hdrA)); + switch (hdrA & _HEADER_MAP_SUBTAG_MASK) { + case HAMT_SUBTAG_HEAD_ARRAY: { + sp->srcA++; + sp->abm = 0xffff; + break; + } + case HAMT_SUBTAG_HEAD_BITMAP: sp->srcA++; + case HAMT_SUBTAG_NODE_BITMAP: { + sp->abm = MAP_HEADER_VAL(hdrA); + break; + } + default: + erl_exit(ERTS_ABORT_EXIT, "bad header %ld\r\n", hdrA); + } + } - case HAMT_SUBTAG_HEAD_BITMAP: sp->srcB++; - case HAMT_SUBTAG_NODE_BITMAP: - sp->bbm = MAP_HEADER_VAL(hdrB); - break; + if (is_list(sp->nodeB)) { /* B is LEAF */ + Eterm keyB = CAR(list_val(sp->nodeB)); - default: - erl_exit(1, "bad header tag %ld\r\n", *sp->srcB & _HEADER_MAP_SUBTAG_MASK); - break; - } - break; - } - default: - erl_exit(1, "bad primary tag %ld\r\n", nodeB); - } - break; - } - case TAG_PRIMARY_BOXED: { /* NODE + NODE */ - sp->srcA = boxed_val(nodeA); - hdrA = *sp->srcA++; - ASSERT(is_header(hdrA)); - switch (hdrA & _HEADER_MAP_SUBTAG_MASK) { - case HAMT_SUBTAG_HEAD_ARRAY: { - sp->srcA++; - ASSERT(primary_tag(nodeB) == TAG_PRIMARY_BOXED); - sp->abm = 0xffff; - sp->srcB = boxed_val(nodeB); - hdrB = *sp->srcB++; - ASSERT(is_header(hdrB)); - switch (hdrB & _HEADER_MAP_SUBTAG_MASK) { - case HAMT_SUBTAG_HEAD_ARRAY: - sp->srcB++; - sp->bbm = 0xffff; - break; - case HAMT_SUBTAG_HEAD_BITMAP: sp->srcB++; - case HAMT_SUBTAG_NODE_BITMAP: - sp->bbm = MAP_HEADER_VAL(hdrB); - break; - default: - erl_exit(1, "bad header tag %ld\r\n", *sp->srcB & _HEADER_MAP_SUBTAG_MASK); - } - break; - } - case HAMT_SUBTAG_HEAD_BITMAP: sp->srcA++; - case HAMT_SUBTAG_NODE_BITMAP: { - ASSERT(primary_tag(nodeB) == TAG_PRIMARY_BOXED); - sp->abm = MAP_HEADER_VAL(hdrA); - sp->srcB = boxed_val(nodeB); - hdrB = *sp->srcB++; - ASSERT(is_header(hdrB)); - switch (hdrB & _HEADER_MAP_SUBTAG_MASK) { - case HAMT_SUBTAG_HEAD_ARRAY: + hx = hashmap_restore_hash(th, ctx->lvl, keyB); + sp->bbm = 1 << hashmap_index(hx); + /* keep srcB pointing at the leaf */ + } + else { /* B is NODE */ + sp->srcB = boxed_val(sp->nodeB); + hdrB = *sp->srcB++; + ASSERT(is_header(hdrB)); + switch (hdrB & _HEADER_MAP_SUBTAG_MASK) { + case HAMT_SUBTAG_HEAD_ARRAY: { sp->srcB++; - sp->bbm = 0xffff; - break; - case HAMT_SUBTAG_HEAD_BITMAP: sp->srcB++; - case HAMT_SUBTAG_NODE_BITMAP: - sp->bbm = MAP_HEADER_VAL(hdrB); - break; - - default: - erl_exit(1, "bad header tag %ld\r\n", *sp->srcB & _HEADER_MAP_SUBTAG_MASK); - } - break; - } - default: - erl_exit(1, "bad primary tag %ld\r\n", nodeA); - } - break; - } - default: - erl_exit(1, "bad primary tag %ld\r\n", nodeA); + sp->bbm = 0xffff; + break; + } + case HAMT_SUBTAG_HEAD_BITMAP: sp->srcB++; + case HAMT_SUBTAG_NODE_BITMAP: { + sp->bbm = MAP_HEADER_VAL(hdrB); + break; + } + default: + erl_exit(ERTS_ABORT_EXIT, "bad header %ld\r\n", hdrB); + } + } } +merge_nodes: + for (;;) { if (is_value(res)) { /* We have a complete (sub-)tree or leaf */ - if (lvl == 0) + int child_mix; + if (ctx->lvl == 0) break; /* Pop from stack and continue build parent node */ - lvl--; + ctx->lvl--; + child_mix = sp->mix; sp = PSTACK_POP(s); sp->array[sp->ix++] = res; + sp->mix |= child_mix; res = THE_NON_VALUE; if (sp->rbm) { sp->srcA++; sp->srcB++; - keepA = sp->keepA; } } else { /* Start build a node */ sp->ix = 0; sp->rbm = sp->abm | sp->bbm; - ASSERT(!(sp->rbm == 0 && lvl > 0)); + ASSERT(!(sp->rbm == 0 && ctx->lvl > 0)); } + if (--reds <= 0) { + goto trap; + } +resume_from_trap: + while (sp->rbm) { Uint32 next = sp->rbm & (sp->rbm-1); Uint32 bit = sp->rbm ^ next; @@ -1297,43 +1337,123 @@ recurse: if (sp->abm & bit) { if (sp->bbm & bit) { /* Bit clash. Push and resolve by recursive merge */ - if (sp->rbm) { - sp->keepA = keepA; - } - nodeA = *sp->srcA; - nodeB = *sp->srcB; - lvl++; + Eterm* srcA = sp->srcA; + Eterm* srcB = sp->srcB; + ctx->lvl++; sp = PSTACK_PUSH(s); + sp->srcA = srcA; + sp->srcB = srcB; + sp->mix = 0; goto recurse; } else { sp->array[sp->ix++] = *sp->srcA++; + sp->mix |= 1; } } else { ASSERT(sp->bbm & bit); sp->array[sp->ix++] = *sp->srcB++; + sp->mix |= 2; } } - ASSERT(sp->ix == hashmap_bitcount(sp->abm | sp->bbm)); - if (lvl == 0) { - nhp = HAllocX(p, HAMT_HEAD_BITMAP_SZ(sp->ix), HALLOC_EXTRA); - hp = nhp; - *hp++ = (sp->ix == 16 ? MAP_HEADER_HAMT_HEAD_ARRAY - : MAP_HEADER_HAMT_HEAD_BITMAP(sp->abm | sp->bbm)); - *hp++ = size; - } else { - nhp = HAllocX(p, HAMT_NODE_BITMAP_SZ(sp->ix), HALLOC_EXTRA); - hp = nhp; - *hp++ = MAP_HEADER_HAMT_NODE_BITMAP(sp->abm | sp->bbm); - } - memcpy(hp, sp->array, sp->ix * sizeof(Eterm)); - res = make_boxed(nhp); + switch (sp->mix) { + case 0: /* Nodes A and B contain the *EXACT* same sub-trees + => fall through and reuse nodeA */ + + case 1: /* Only unique A stuff => reuse nodeA */ + res = sp->nodeA; + break; + + case 2: /* Only unique B stuff => reuse nodeB */ + res = sp->nodeB; + break; + + case 3: /* We have a mix => must build new node */ + ASSERT(sp->ix == hashmap_bitcount(sp->abm | sp->bbm)); + if (ctx->lvl == 0) { + nhp = HAllocX(p, HAMT_HEAD_BITMAP_SZ(sp->ix), HALLOC_EXTRA); + hp = nhp; + *hp++ = (sp->ix == 16 ? MAP_HEADER_HAMT_HEAD_ARRAY + : MAP_HEADER_HAMT_HEAD_BITMAP(sp->abm | sp->bbm)); + *hp++ = ctx->size; + } else { + nhp = HAllocX(p, HAMT_NODE_BITMAP_SZ(sp->ix), HALLOC_EXTRA); + hp = nhp; + *hp++ = MAP_HEADER_HAMT_NODE_BITMAP(sp->abm | sp->bbm); + } + sys_memcpy(hp, sp->array, sp->ix * sizeof(Eterm)); + res = make_boxed(nhp); + break; + default: + erl_exit(ERTS_ABORT_EXIT, "strange mix %d\r\n", sp->mix); + } + } + + /* Done */ + +#ifdef DEBUG + { + Eterm *head = hashmap_val(res); + Uint size = head[1]; + Uint real_size = hashmap_subtree_size(res); + ASSERT(size == real_size); + } +#endif + + if (ctx != &local_ctx) { + ASSERT(ctx->trap_bin != THE_NON_VALUE); + ASSERT(p->flags & F_DISABLE_GC); + erts_set_gc_state(p, 1); + } + else { + ASSERT(ctx->trap_bin == THE_NON_VALUE); + ASSERT(!(p->flags & F_DISABLE_GC)); } PSTACK_DESTROY(s); UnUseTmpHeap(2,p); + BUMP_REDS(p, (initial_reds - reds) / MAP_MERGE_LOOP_FACTOR); return res; + +trap: /* Yield */ + + if (ctx == &local_ctx) { + Binary* ctx_b = erts_create_magic_binary(sizeof(HashmapMergeContext), + hashmap_merge_ctx_destructor); + ctx = ERTS_MAGIC_BIN_DATA(ctx_b); + sys_memcpy(ctx, &local_ctx, sizeof(HashmapMergeContext)); + hp = HAlloc(p, PROC_BIN_SIZE); + ASSERT(ctx->trap_bin == THE_NON_VALUE); + ctx->trap_bin = erts_mk_magic_binary_term(&hp, &MSO(p), ctx_b); + + erts_set_gc_state(p, 0); + } + else { + ASSERT(ctx->trap_bin != THE_NON_VALUE); + ASSERT(p->flags & F_DISABLE_GC); + } + + PSTACK_SAVE(s, &ctx->pstack); + + BUMP_ALL_REDS(p); + ERTS_BIF_PREP_TRAP1(trap_ret, &hashmap_merge_trap_export, + p, ctx->trap_bin); + UnUseTmpHeap(2,p); + return trap_ret; } +static Uint hashmap_subtree_size(Eterm node) { + DECLARE_WSTACK(stack); + Uint size = 0; + + hashmap_iterator_init(&stack, node, 0); + while (hashmap_iterator_next(&stack)) { + size++; + } + DESTROY_WSTACK(stack); + return size; +} + + static int hash_cmp(Uint32 ha, Uint32 hb) { int i; @@ -1756,10 +1876,11 @@ void hashmap_iterator_init(ErtsWStack* s, Eterm node, int reverse) { sz = 16; break; case HAMT_SUBTAG_HEAD_BITMAP: - sz = hashmap_bitcount(MAP_HEADER_VAL(hdr)); + case HAMT_SUBTAG_NODE_BITMAP: + sz = hashmap_bitcount(MAP_HEADER_VAL(hdr)); break; default: - erl_exit(1, "bad header"); + erl_exit(ERTS_ABORT_EXIT, "bad header"); } WSTACK_PUSH3((*s), (UWord)THE_NON_VALUE, /* end marker */ @@ -1796,7 +1917,7 @@ Eterm* hashmap_iterator_next(ErtsWStack* s) { ASSERT(sz < 17); break; default: - erl_exit(1, "bad header"); + erl_exit(ERTS_ABORT_EXIT, "bad header"); } idx++; @@ -2504,6 +2625,9 @@ int erts_validate_and_sort_flatmap(flatmap_t* mp) return 1; } +#if 0 /* Can't get myself to remove this beautiful piece of code + for probabilistic overestimation of nr of nodes in a hashmap */ + /* Really rough estimate of sqrt(x) * Guaranteed not to be less than sqrt(x) */ @@ -2525,7 +2649,10 @@ static int int_sqrt_ceiling(Uint x) } } -Uint hashmap_over_estimated_heap_size(Uint k) +/* May not be enough if hashing is broken (not uniform) + * or if hell freezes over. + */ +Uint hashmap_overestimated_node_count(Uint k) { /* k is nr of key-value pairs. N(k) is expected nr of nodes in hamt. @@ -2539,12 +2666,9 @@ Uint hashmap_over_estimated_heap_size(Uint k) by 15 std.devs above the average, which gives a probability for overrun less than 1.0e-49 (same magnitude as a git SHA1 collision). */ - Uint max_nodes = 2*k/5 + (15/3)*int_sqrt_ceiling(k); - return (k*2 + /* leaf cons cells */ - k + /* leaf list terms */ - max_nodes*2); /* headers + parent boxed terms */ + return 2*k/5 + 1 + (15/3)*int_sqrt_ceiling(k); } - +#endif BIF_RETTYPE erts_debug_map_info_1(BIF_ALIST_1) { if (is_hashmap(BIF_ARG_1)) { diff --git a/erts/emulator/beam/erl_map.h b/erts/emulator/beam/erl_map.h index 2cc6768bfc..b5941c5c9a 100644 --- a/erts/emulator/beam/erl_map.h +++ b/erts/emulator/beam/erl_map.h @@ -91,7 +91,6 @@ Eterm erts_hashmap_insert_up(Eterm *hp, Eterm key, Eterm value, Uint *upsz, struct ErtsEStack_ *sp); int erts_validate_and_sort_flatmap(flatmap_t* map); -Uint hashmap_over_estimated_heap_size(Uint n); void hashmap_iterator_init(struct ErtsWStack_* s, Eterm node, int reverse); Eterm* hashmap_iterator_next(struct ErtsWStack_* s); Eterm* hashmap_iterator_prev(struct ErtsWStack_* s); @@ -191,5 +190,18 @@ typedef struct hashmap_head_s { #define hashmap_index(hash) (((Uint32)hash) & 0xf) +/* hashmap heap size: + [one cons cell + one list term in parent node] per key + [one header + one boxed term in parent node] per inner node + [one header + one size word] for root node +*/ +#define HASHMAP_HEAP_SIZE(KEYS,NODES) ((KEYS)*3 + (NODES)*2) +#ifdef DEBUG +# define HASHMAP_ESTIMATED_NODE_COUNT(KEYS) (KEYS) +#else +# define HASHMAP_ESTIMATED_NODE_COUNT(KEYS) (2*(KEYS)/5) +#endif +#define HASHMAP_ESTIMATED_HEAP_SIZE(KEYS) \ + HASHMAP_HEAP_SIZE(KEYS,HASHMAP_ESTIMATED_NODE_COUNT(KEYS)) #endif diff --git a/erts/emulator/beam/erl_message.c b/erts/emulator/beam/erl_message.c index ccfc2e6458..f806d2c498 100644 --- a/erts/emulator/beam/erl_message.c +++ b/erts/emulator/beam/erl_message.c @@ -93,9 +93,6 @@ erts_resize_message_buffer(ErlHeapFragment *bp, Uint size, #endif ErlHeapFragment* nbp; - /* ToDo: Make use of 'used_size' to avoid realloc - when shrinking just a few words */ - #ifdef DEBUG { Uint off_sz = size < bp->used_size ? size : bp->used_size; @@ -110,8 +107,10 @@ erts_resize_message_buffer(ErlHeapFragment *bp, Uint size, } #endif - if (size == bp->used_size) + if (size >= (bp->used_size - bp->used_size / 16)) { + bp->used_size = size; return bp; + } #ifdef HARD_DEBUG dbg_brefs = erts_alloc(ERTS_ALC_T_UNDEF, sizeof(Eterm *)*brefs_size); @@ -237,8 +236,7 @@ erts_msg_distext2heap(Process *pp, Eterm msg; Uint tok_sz = 0; Eterm *hp = NULL; - Eterm *hp_end = NULL; - ErlOffHeap *ohp; + ErtsHeapFactory factory; Sint sz; *bpp = NULL; @@ -250,36 +248,26 @@ erts_msg_distext2heap(Process *pp, tok_sz = heap_frag->used_size; sz += tok_sz; } - if (pp) + if (pp) { + ErlOffHeap *ohp; hp = erts_alloc_message_heap(sz, bpp, &ohp, pp, plcksp); + } else { *bpp = new_message_buffer(sz); hp = (*bpp)->mem; - ohp = &(*bpp)->off_heap; } - hp_end = hp + sz; - msg = erts_decode_dist_ext(&hp, ohp, dist_extp); + erts_factory_message_init(&factory, pp, hp, *bpp); + msg = erts_decode_dist_ext(&factory, dist_extp); if (is_non_value(msg)) goto decode_error; if (is_not_nil(*tokenp)) { ErlHeapFragment *heap_frag = erts_dist_ext_trailer(dist_extp); - *tokenp = copy_struct(*tokenp, tok_sz, &hp, ohp); + hp = erts_produce_heap(&factory, tok_sz, 0); + *tokenp = copy_struct(*tokenp, tok_sz, &hp, factory.off_heap); erts_cleanup_offheap(&heap_frag->off_heap); } erts_free_dist_ext_copy(dist_extp); - if (hp_end != hp) { - if (!(*bpp)) { - HRelease(pp, hp_end, hp); - } - else { - Uint final_size = hp - &(*bpp)->mem[0]; - Eterm brefs[2] = {msg, *tokenp}; - ASSERT(sz - (hp_end - hp) == final_size); - *bpp = erts_resize_message_buffer(*bpp, final_size, &brefs[0], 2); - msg = brefs[0]; - *tokenp = brefs[1]; - } - } + erts_factory_close(&factory); return msg; decode_error: @@ -288,13 +276,7 @@ erts_msg_distext2heap(Process *pp, erts_cleanup_offheap(&heap_frag->off_heap); } erts_free_dist_ext_copy(dist_extp); - if (*bpp) { - free_message_buffer(*bpp); - *bpp = NULL; - } - else if (hp) { - HRelease(pp, hp_end, hp); - } + *bpp = NULL; return THE_NON_VALUE; } @@ -851,10 +833,11 @@ erts_msg_attached_data_size_aux(ErlMessage *msg) } void -erts_move_msg_attached_data_to_heap(Eterm **hpp, ErlOffHeap *ohp, ErlMessage *msg) +erts_move_msg_attached_data_to_heap(ErtsHeapFactory* factory, + ErlMessage *msg) { if (is_value(ERL_MESSAGE_TERM(msg))) - erts_move_msg_mbuf_to_heap(hpp, ohp, msg); + erts_move_msg_mbuf_to_heap(&factory->hp, factory->off_heap, msg); else if (msg->data.dist_ext) { ASSERT(msg->data.dist_ext->heap_size >= 0); if (is_not_nil(ERL_MESSAGE_TOKEN(msg))) { @@ -862,12 +845,11 @@ erts_move_msg_attached_data_to_heap(Eterm **hpp, ErlOffHeap *ohp, ErlMessage *ms heap_frag = erts_dist_ext_trailer(msg->data.dist_ext); ERL_MESSAGE_TOKEN(msg) = copy_struct(ERL_MESSAGE_TOKEN(msg), heap_frag->used_size, - hpp, - ohp); + &factory->hp, + factory->off_heap); erts_cleanup_offheap(&heap_frag->off_heap); } - ERL_MESSAGE_TERM(msg) = erts_decode_dist_ext(hpp, - ohp, + ERL_MESSAGE_TERM(msg) = erts_decode_dist_ext(factory, msg->data.dist_ext); erts_free_dist_ext_copy(msg->data.dist_ext); msg->data.dist_ext = NULL; @@ -1134,15 +1116,281 @@ erts_deliver_exit_message(Eterm from, Process *to, ErtsProcLocks *to_locksp, } } +void erts_factory_proc_init(ErtsHeapFactory* factory, + Process* p) +{ + erts_factory_proc_prealloc_init(factory, p, HEAP_LIMIT(p) - HEAP_TOP(p)); +} + +void erts_factory_proc_prealloc_init(ErtsHeapFactory* factory, + Process* p, + Sint size) +{ + factory->mode = FACTORY_HALLOC; + factory->p = p; + factory->hp_start = HAlloc(p, size); + factory->hp = factory->hp_start; + factory->hp_end = factory->hp_start + size; + factory->off_heap = &p->off_heap; + factory->off_heap_saved.first = p->off_heap.first; + factory->off_heap_saved.overhead = p->off_heap.overhead; + factory->heap_frags_saved = p->mbuf; + factory->heap_frags = NULL; /* not used */ + factory->alloc_type = 0; /* not used */ +} + +void erts_factory_message_init(ErtsHeapFactory* factory, + Process* rp, + Eterm* hp, + ErlHeapFragment* bp) +{ + if (bp) { + factory->mode = FACTORY_HEAP_FRAGS; + factory->p = NULL; + factory->hp_start = bp->mem; + factory->hp = hp ? hp : bp->mem; + factory->hp_end = bp->mem + bp->alloc_size; + factory->off_heap = &bp->off_heap; + factory->heap_frags = bp; + factory->heap_frags_saved = bp; + factory->alloc_type = ERTS_ALC_T_HEAP_FRAG; + ASSERT(!bp->next); + } + else { + factory->mode = FACTORY_HALLOC; + factory->p = rp; + factory->hp_start = hp; + factory->hp = hp; + factory->hp_end = HEAP_TOP(rp); + factory->off_heap = &rp->off_heap; + factory->heap_frags_saved = rp->mbuf; + factory->heap_frags = NULL; /* not used */ + factory->alloc_type = 0; /* not used */ + } + factory->off_heap_saved.first = factory->off_heap->first; + factory->off_heap_saved.overhead = factory->off_heap->overhead; + + ASSERT(factory->hp >= factory->hp_start && factory->hp <= factory->hp_end); +} + +void erts_factory_static_init(ErtsHeapFactory* factory, + Eterm* hp, + Uint size, + ErlOffHeap* off_heap) +{ + factory->mode = FACTORY_STATIC; + factory->hp_start = hp; + factory->hp = hp; + factory->hp_end = hp + size; + factory->off_heap = off_heap; + factory->off_heap_saved.first = factory->off_heap->first; + factory->off_heap_saved.overhead = factory->off_heap->overhead; +} + +/* When we know the term is an immediate and need no heap. +*/ +void erts_factory_dummy_init(ErtsHeapFactory* factory) +{ + factory->mode = FACTORY_CLOSED; +} + +static void reserve_heap(ErtsHeapFactory*, Uint need, Uint xtra); + Eterm* erts_produce_heap(ErtsHeapFactory* factory, Uint need, Uint xtra) { Eterm* res; - if (factory->p) { - res = HAllocX(factory->p, need, xtra); - } else { - res = factory->hp; - factory->hp += need; + + ASSERT((unsigned int)factory->mode > (unsigned int)FACTORY_CLOSED); + if (factory->hp + need > factory->hp_end) { + reserve_heap(factory, need, xtra); } + res = factory->hp; + factory->hp += need; return res; } +Eterm* erts_reserve_heap(ErtsHeapFactory* factory, Uint need) +{ + ASSERT((unsigned int)factory->mode > (unsigned int)FACTORY_CLOSED); + if (factory->hp + need > factory->hp_end) { + reserve_heap(factory, need, 200); + } + return factory->hp; +} + +static void reserve_heap(ErtsHeapFactory* factory, Uint need, Uint xtra) +{ + ErlHeapFragment* bp; + + switch (factory->mode) { + case FACTORY_HALLOC: + HRelease(factory->p, factory->hp_end, factory->hp); + factory->hp = HAllocX(factory->p, need, xtra); + factory->hp_end = factory->hp + need; + return; + + case FACTORY_HEAP_FRAGS: + bp = factory->heap_frags; + + if (bp) { + ASSERT(factory->hp > bp->mem); + ASSERT(factory->hp <= factory->hp_end); + ASSERT(factory->hp_end == bp->mem + bp->alloc_size); + + bp->used_size = factory->hp - bp->mem; + } + bp = (ErlHeapFragment*) ERTS_HEAP_ALLOC(factory->alloc_type, + ERTS_HEAP_FRAG_SIZE(need+xtra)); + bp->next = factory->heap_frags; + factory->heap_frags = bp; + bp->alloc_size = need + xtra; + bp->used_size = need; + bp->off_heap.first = NULL; + bp->off_heap.overhead = 0; + + factory->hp = bp->mem; + factory->hp_end = bp->mem + bp->alloc_size; + return; + + case FACTORY_STATIC: + case FACTORY_CLOSED: + default: + ASSERT(!"Invalid factory mode"); + } +} + +void erts_factory_close(ErtsHeapFactory* factory) +{ + ErlHeapFragment* bp; + + switch (factory->mode) { + case FACTORY_HALLOC: + HRelease(factory->p, factory->hp_end, factory->hp); + break; + + case FACTORY_HEAP_FRAGS: + bp = factory->heap_frags; + + if (bp) { + ASSERT(factory->hp >= bp->mem); + ASSERT(factory->hp <= factory->hp_end); + ASSERT(factory->hp_end == bp->mem + bp->alloc_size); + + bp->used_size = factory->hp - bp->mem; + } + break; + case FACTORY_STATIC: break; + case FACTORY_CLOSED: break; + default: + ASSERT(!"Invalid factory mode"); + } + factory->mode = FACTORY_CLOSED; +} + +void erts_factory_trim_and_close(ErtsHeapFactory* factory, + Eterm *brefs, Uint brefs_size) +{ + if (factory->mode == FACTORY_HEAP_FRAGS) { + ErlHeapFragment* bp = factory->heap_frags; + if (bp->next == NULL) { + Uint used_sz = factory->hp - bp->mem; + ASSERT(used_sz <= bp->alloc_size); + factory->heap_frags = erts_resize_message_buffer(bp, used_sz, + brefs, brefs_size); + factory->mode = FACTORY_CLOSED; + return; + } + /*else we don't trim multi fragmented messages for now */ + } + erts_factory_close(factory); +} + +void erts_factory_undo(ErtsHeapFactory* factory) +{ + ErlHeapFragment* bp; + struct erl_off_heap_header *hdr, **hdr_nextp; + + switch (factory->mode) { + case FACTORY_HALLOC: + case FACTORY_STATIC: + /* Cleanup off-heap + */ + hdr_nextp = NULL; + for (hdr = factory->off_heap->first; + hdr != factory->off_heap_saved.first; + hdr = hdr->next) { + + hdr_nextp = &hdr->next; + } + + if (hdr_nextp != NULL) { + *hdr_nextp = NULL; + erts_cleanup_offheap(factory->off_heap); + factory->off_heap->first = factory->off_heap_saved.first; + factory->off_heap->overhead = factory->off_heap_saved.overhead; + } + + if (factory->mode == FACTORY_HALLOC) { + /* Free heap frags + */ + bp = factory->p->mbuf; + if (bp != factory->heap_frags_saved) { + do { + ErlHeapFragment *next_bp = bp->next; + ASSERT(bp->off_heap.first == NULL); + ERTS_HEAP_FREE(ERTS_ALC_T_HEAP_FRAG, (void *) bp, + ERTS_HEAP_FRAG_SIZE(bp->alloc_size)); + bp = next_bp; + } while (bp != factory->heap_frags_saved); + + factory->p->mbuf = bp; + } + + /* Rollback heap top + */ + if (factory->heap_frags_saved == NULL) { /* No heap frags when we started */ + ASSERT(factory->hp_start >= HEAP_START(factory->p)); + ASSERT(factory->hp_start <= HEAP_LIMIT(factory->p)); + + HEAP_TOP(factory->p) = factory->hp_start; + } + else { + ASSERT(factory->heap_frags_saved == factory->p->mbuf); + if (factory->hp_start == factory->heap_frags_saved->mem) { + factory->p->mbuf = factory->p->mbuf->next; + ERTS_HEAP_FREE(ERTS_ALC_T_HEAP_FRAG, factory->heap_frags_saved, + ERTS_HEAP_FRAG_SIZE(factory->heap_frags_saved->alloc_size)); + } + else if (factory->hp_start != factory->hp_end) { + unsigned remains = factory->hp_start - factory->heap_frags_saved->mem; + ASSERT(remains > 0 && remains < factory->heap_frags_saved->used_size); + factory->heap_frags_saved->used_size = remains; + } + } + } + break; + + case FACTORY_HEAP_FRAGS: + bp = factory->heap_frags; + do { + ErlHeapFragment* next_bp = bp->next; + + erts_cleanup_offheap(&bp->off_heap); + ERTS_HEAP_FREE(factory->alloc_type, (void *) bp, + ERTS_HEAP_FRAG_SIZE(bp->size)); + bp = next_bp; + }while (bp != NULL); + break; + + case FACTORY_CLOSED: break; + default: + ASSERT(!"Invalid factory mode"); + } + factory->mode = FACTORY_CLOSED; +#ifdef DEBUG + factory->p = NULL; + factory->hp = NULL; + factory->heap_frags = NULL; +#endif +} + diff --git a/erts/emulator/beam/erl_message.h b/erts/emulator/beam/erl_message.h index 1e1dafee90..705ba5e506 100644 --- a/erts/emulator/beam/erl_message.h +++ b/erts/emulator/beam/erl_message.h @@ -51,6 +51,45 @@ typedef struct erl_off_heap { (OHP)->first = NULL; \ (OHP)->overhead = 0; \ } while (0) + +typedef struct { + enum { + FACTORY_CLOSED = 0, + FACTORY_HALLOC, + FACTORY_HEAP_FRAGS, + FACTORY_STATIC + } mode; + Process* p; + Eterm* hp_start; + Eterm* hp; + Eterm* hp_end; + struct erl_heap_fragment* heap_frags; + struct erl_heap_fragment* heap_frags_saved; + ErlOffHeap* off_heap; + ErlOffHeap off_heap_saved; + Uint32 alloc_type; +} ErtsHeapFactory; + +void erts_factory_proc_init(ErtsHeapFactory*, Process*); +void erts_factory_proc_prealloc_init(ErtsHeapFactory*, Process*, Sint size); +void erts_factory_message_init(ErtsHeapFactory*, Process*, Eterm* hp, struct erl_heap_fragment*); +void erts_factory_static_init(ErtsHeapFactory*, Eterm* hp, Uint size, ErlOffHeap*); +void erts_factory_dummy_init(ErtsHeapFactory*); + +Eterm* erts_produce_heap(ErtsHeapFactory*, Uint need, Uint xtra); +Eterm* erts_reserve_heap(ErtsHeapFactory*, Uint need); +void erts_factory_close(ErtsHeapFactory*); +void erts_factory_trim_and_close(ErtsHeapFactory*,Eterm *brefs, Uint brefs_size); +void erts_factory_undo(ErtsHeapFactory*); + +#ifdef CHECK_FOR_HOLES +# define ERTS_FACTORY_HOLE_CHECK(f) do { \ + /*if ((f)->p) erts_check_for_holes((f)->p);*/ \ + } while (0) +#else +# define ERTS_FACTORY_HOLE_CHECK(p) +#endif + #include "external.h" #include "erl_process.h" @@ -68,21 +107,6 @@ struct erl_heap_fragment { Eterm mem[1]; /* Data */ }; -typedef struct { - Process* p; - Eterm* hp; -} ErtsHeapFactory; - -Eterm* erts_produce_heap(ErtsHeapFactory*, Uint need, Uint xtra); -#ifdef CHECK_FOR_HOLES -# define ERTS_FACTORY_HOLE_CHECK(f) do { \ - if ((f)->p) erts_check_for_holes((f)->p); \ - } while (0) -#else -# define ERTS_FACTORY_HOLE_CHECK(p) -#endif - - typedef struct erl_mesg { struct erl_mesg* next; /* Next message */ union { @@ -139,7 +163,7 @@ typedef struct { *(p)->msg.last = (mp); \ (p)->msg.last = &(mp)->next; \ (p)->msg.len++; \ -} while(0) +} while (0) #ifdef ERTS_SMP @@ -212,17 +236,23 @@ do { \ do { \ if ((M)->data.attached) { \ Uint need__ = erts_msg_attached_data_size((M)); \ + { SWPO ; } \ if ((ST) - (HT) >= need__) { \ - Uint *htop__ = (HT); \ - erts_move_msg_attached_data_to_heap(&htop__, &MSO((P)), (M));\ - ASSERT(htop__ - (HT) <= need__); \ - (HT) = htop__; \ + ErtsHeapFactory factory__; \ + erts_factory_proc_prealloc_init(&factory__, (P), need__); \ + erts_move_msg_attached_data_to_heap(&factory__, (M)); \ + erts_factory_close(&factory__); \ + if ((P)->mbuf != NULL) { \ + /* Heap was exhausted by messages. This is a rare case */ \ + /* that can currently (OTP 18) only happen if hamts are */ \ + /* far exceeding the estimated heap size. Do GC. */ \ + (FC) -= erts_garbage_collect((P), 0, NULL, 0); \ + } \ } \ else { \ - { SWPO ; } \ (FC) -= erts_garbage_collect((P), 0, NULL, 0); \ - { SWPI ; } \ } \ + { SWPI ; } \ ASSERT(!(M)->data.attached); \ } \ } while (0) @@ -266,23 +296,21 @@ void erts_link_mbuf_to_proc(Process *proc, ErlHeapFragment *bp); void erts_move_msg_mbuf_to_heap(Eterm**, ErlOffHeap*, ErlMessage *); Uint erts_msg_attached_data_size_aux(ErlMessage *msg); -void erts_move_msg_attached_data_to_heap(Eterm **, ErlOffHeap *, ErlMessage *); - +void erts_move_msg_attached_data_to_heap(ErtsHeapFactory*, ErlMessage *); Eterm erts_msg_distext2heap(Process *, ErtsProcLocks *, ErlHeapFragment **, Eterm *, ErtsDistExternal *); void erts_cleanup_offheap(ErlOffHeap *offheap); -ERTS_GLB_INLINE Uint erts_msg_used_frag_sz(const ErlMessage *msg); +ERTS_GLB_INLINE Uint erts_used_frag_sz(const ErlHeapFragment*); ERTS_GLB_INLINE Uint erts_msg_attached_data_size(ErlMessage *msg); #if ERTS_GLB_INLINE_INCL_FUNC_DEF -ERTS_GLB_INLINE Uint erts_msg_used_frag_sz(const ErlMessage *msg) +ERTS_GLB_INLINE Uint erts_used_frag_sz(const ErlHeapFragment* bp) { - const ErlHeapFragment *bp; Uint sz = 0; - for (bp = msg->data.heap_frag; bp!=NULL; bp=bp->next) { + for ( ; bp!=NULL; bp=bp->next) { sz += bp->used_size; } return sz; @@ -292,7 +320,7 @@ ERTS_GLB_INLINE Uint erts_msg_attached_data_size(ErlMessage *msg) { ASSERT(msg->data.attached); if (is_value(ERL_MESSAGE_TERM(msg))) - return erts_msg_used_frag_sz(msg); + return erts_used_frag_sz(msg->data.heap_frag); else if (msg->data.dist_ext->heap_size < 0) return erts_msg_attached_data_size_aux(msg); else { diff --git a/erts/emulator/beam/erl_nif.c b/erts/emulator/beam/erl_nif.c index 45fc949b81..27f6c6f00d 100644 --- a/erts/emulator/beam/erl_nif.c +++ b/erts/emulator/beam/erl_nif.c @@ -1208,7 +1208,11 @@ typedef struct enif_resource_t struct enif_resource_type_t* type; #ifdef DEBUG erts_refc_t nif_refc; +# ifdef ARCH_32 + byte align__[4]; +# endif #endif + char data[1]; }ErlNifResource; @@ -1384,7 +1388,7 @@ static void rollback_opened_resource_types(void) static void nif_resource_dtor(Binary* bin) { - ErlNifResource* resource = (ErlNifResource*) ERTS_MAGIC_BIN_DATA(bin); + ErlNifResource* resource = (ErlNifResource*) ERTS_MAGIC_BIN_UNALIGNED_DATA(bin); ErlNifResourceType* type = resource->type; ASSERT(ERTS_MAGIC_BIN_DESTRUCTOR(bin) == &nif_resource_dtor); @@ -1405,8 +1409,10 @@ static void nif_resource_dtor(Binary* bin) void* enif_alloc_resource(ErlNifResourceType* type, size_t size) { - Binary* bin = erts_create_magic_binary(SIZEOF_ErlNifResource(size), &nif_resource_dtor); - ErlNifResource* resource = ERTS_MAGIC_BIN_DATA(bin); + Binary* bin = erts_create_magic_binary_x(SIZEOF_ErlNifResource(size), + &nif_resource_dtor, + 1); /* unaligned */ + ErlNifResource* resource = ERTS_MAGIC_BIN_UNALIGNED_DATA(bin); ASSERT(type->owner && type->next && type->prev); /* not allowed in load/upgrade */ resource->type = type; @@ -1421,7 +1427,7 @@ void* enif_alloc_resource(ErlNifResourceType* type, size_t size) void enif_release_resource(void* obj) { ErlNifResource* resource = DATA_TO_RESOURCE(obj); - ErtsBinary* bin = ERTS_MAGIC_BIN_FROM_DATA(resource); + ErtsBinary* bin = ERTS_MAGIC_BIN_FROM_UNALIGNED_DATA(resource); ASSERT(ERTS_MAGIC_BIN_DESTRUCTOR(bin) == &nif_resource_dtor); #ifdef DEBUG @@ -1435,7 +1441,7 @@ void enif_release_resource(void* obj) void enif_keep_resource(void* obj) { ErlNifResource* resource = DATA_TO_RESOURCE(obj); - ErtsBinary* bin = ERTS_MAGIC_BIN_FROM_DATA(resource); + ErtsBinary* bin = ERTS_MAGIC_BIN_FROM_UNALIGNED_DATA(resource); ASSERT(ERTS_MAGIC_BIN_DESTRUCTOR(bin) == &nif_resource_dtor); #ifdef DEBUG @@ -1447,7 +1453,7 @@ void enif_keep_resource(void* obj) ERL_NIF_TERM enif_make_resource(ErlNifEnv* env, void* obj) { ErlNifResource* resource = DATA_TO_RESOURCE(obj); - ErtsBinary* bin = ERTS_MAGIC_BIN_FROM_DATA(resource); + ErtsBinary* bin = ERTS_MAGIC_BIN_FROM_UNALIGNED_DATA(resource); Eterm* hp = alloc_heap(env,PROC_BIN_SIZE); return erts_mk_magic_binary_term(&hp, &MSO(env->proc), &bin->binary); } @@ -1476,7 +1482,7 @@ int enif_get_resource(ErlNifEnv* env, ERL_NIF_TERM term, ErlNifResourceType* typ return 0; / * Or should we allow "resource binaries" as handles? * / }*/ mbin = pb->val; - resource = (ErlNifResource*) ERTS_MAGIC_BIN_DATA(mbin); + resource = (ErlNifResource*) ERTS_MAGIC_BIN_UNALIGNED_DATA(mbin); if (ERTS_MAGIC_BIN_DESTRUCTOR(mbin) != &nif_resource_dtor || resource->type != type) { return 0; @@ -1488,8 +1494,8 @@ int enif_get_resource(ErlNifEnv* env, ERL_NIF_TERM term, ErlNifResourceType* typ size_t enif_sizeof_resource(void* obj) { ErlNifResource* resource = DATA_TO_RESOURCE(obj); - Binary* bin = &ERTS_MAGIC_BIN_FROM_DATA(resource)->binary; - return ERTS_MAGIC_BIN_DATA_SIZE(bin) - offsetof(ErlNifResource,data); + Binary* bin = &ERTS_MAGIC_BIN_FROM_UNALIGNED_DATA(resource)->binary; + return ERTS_MAGIC_BIN_UNALIGNED_DATA_SIZE(bin) - offsetof(ErlNifResource,data); } @@ -2712,6 +2718,8 @@ erts_unload_nif(struct erl_module_nif* lib) void erl_nif_init() { + ERTS_CT_ASSERT((offsetof(ErlNifResource,data) % 8) == ERTS_MAGIC_BIN_BYTES_TO_ALIGN); + resource_type_list.next = &resource_type_list; resource_type_list.prev = &resource_type_list; resource_type_list.dtor = NULL; diff --git a/erts/emulator/beam/erl_node_tables.c b/erts/emulator/beam/erl_node_tables.c index 6d827c6bda..0950d7e7ef 100644 --- a/erts/emulator/beam/erl_node_tables.c +++ b/erts/emulator/beam/erl_node_tables.c @@ -51,6 +51,9 @@ static Uint dist_entries; static int references_atoms_need_init = 1; +static ErtsMonotonicTime orig_node_tab_delete_delay; +static ErtsMonotonicTime node_tab_delete_delay; + /* -- The distribution table ---------------------------------------------- */ #ifdef DEBUG @@ -290,21 +293,46 @@ DistEntry *erts_find_dist_entry(Eterm sysname) return res; } -void erts_delete_dist_entry(DistEntry *dep) +static void try_delete_dist_entry(void *vdep) +{ + DistEntry *dep = (DistEntry *) vdep; + erts_aint_t refc; + + erts_smp_rwmtx_rwlock(&erts_dist_table_rwmtx); + /* + * Another thread might have looked up this dist entry after + * we decided to delete it (refc became zero). If so, the other + * thread incremented refc twice. Once for the new reference + * and once for this thread. + * + * If refc reach -1, noone has used the entry since we + * set up the timer. Delete the entry. + * + * If refc reach 0, the entry is currently not in use + * but has been used since we set up the timer. Set up a + * new timer. + * + * If refc > 0, the entry is in use. Keep the entry. + */ + refc = erts_refc_dectest(&dep->refc, -1); + if (refc == -1) + (void) hash_erase(&erts_dist_table, (void *) dep); + erts_smp_rwmtx_rwunlock(&erts_dist_table_rwmtx); + + if (refc == 0) + erts_schedule_delete_dist_entry(dep); +} + +void erts_schedule_delete_dist_entry(DistEntry *dep) { ASSERT(dep != erts_this_dist_entry); - if(dep != erts_this_dist_entry) { - erts_smp_rwmtx_rwlock(&erts_dist_table_rwmtx); - /* - * Another thread might have looked up this dist entry after - * we decided to delete it (refc became zero). If so, the other - * thread incremented refc twice. Once for the new reference - * and once for this thread. Therefore, delete dist entry if - * refc is 0 or -1 after a decrement. - */ - if (erts_refc_dectest(&dep->refc, -1) <= 0) - (void) hash_erase(&erts_dist_table, (void *) dep); - erts_smp_rwmtx_rwunlock(&erts_dist_table_rwmtx); + if (dep != erts_this_dist_entry) { + if (node_tab_delete_delay == 0) + try_delete_dist_entry((void *) dep); + else if (node_tab_delete_delay > 0) + erts_start_timer_callback(node_tab_delete_delay, + try_delete_dist_entry, + (void *) dep); } } @@ -556,14 +584,14 @@ erts_node_table_size(void) #endif int lock = !ERTS_IS_CRASH_DUMPING; if (lock) - erts_smp_rwmtx_rwlock(&erts_node_table_rwmtx); + erts_smp_rwmtx_rlock(&erts_node_table_rwmtx); #ifdef DEBUG hash_get_info(&hi, &erts_node_table); ASSERT(node_entries == hi.objs); #endif res = hash_table_sz(&erts_node_table) + node_entries*sizeof(ErlNode); if (lock) - erts_smp_rwmtx_rwunlock(&erts_node_table_rwmtx); + erts_smp_rwmtx_runlock(&erts_node_table_rwmtx); return res; } @@ -572,10 +600,10 @@ erts_node_table_info(int to, void *to_arg) { int lock = !ERTS_IS_CRASH_DUMPING; if (lock) - erts_smp_rwmtx_rwlock(&erts_node_table_rwmtx); + erts_smp_rwmtx_rlock(&erts_node_table_rwmtx); hash_info(to, to_arg, &erts_node_table); if (lock) - erts_smp_rwmtx_rwunlock(&erts_node_table_rwmtx); + erts_smp_rwmtx_runlock(&erts_node_table_rwmtx); } @@ -609,21 +637,46 @@ ErlNode *erts_find_or_insert_node(Eterm sysname, Uint creation) return res; } -void erts_delete_node(ErlNode *enp) +static void try_delete_node(void *venp) +{ + ErlNode *enp = (ErlNode *) venp; + erts_aint_t refc; + + erts_smp_rwmtx_rwlock(&erts_node_table_rwmtx); + /* + * Another thread might have looked up this node after we + * decided to delete it (refc became zero). If so, the other + * thread incremented refc twice. Once for the new reference + * and once for this thread. + * + * If refc reach -1, noone has used the entry since we + * set up the timer. Delete the entry. + * + * If refc reach 0, the entry is currently not in use + * but has been used since we set up the timer. Set up a + * new timer. + * + * If refc > 0, the entry is in use. Keep the entry. + */ + refc = erts_refc_dectest(&enp->refc, -1); + if (refc == -1) + (void) hash_erase(&erts_node_table, (void *) enp); + erts_smp_rwmtx_rwunlock(&erts_node_table_rwmtx); + + if (refc == 0) + erts_schedule_delete_node(enp); +} + +void erts_schedule_delete_node(ErlNode *enp) { ASSERT(enp != erts_this_node); - if(enp != erts_this_node) { - erts_smp_rwmtx_rwlock(&erts_node_table_rwmtx); - /* - * Another thread might have looked up this node after we - * decided to delete it (refc became zero). If so, the other - * thread incremented refc twice. Once for the new reference - * and once for this thread. Therefore, delete node if refc - * is 0 or -1 after a decrement. - */ - if (erts_refc_dectest(&enp->refc, -1) <= 0) - (void) hash_erase(&erts_node_table, (void *) enp); - erts_smp_rwmtx_rwunlock(&erts_node_table_rwmtx); + if (enp != erts_this_node) { + if (node_tab_delete_delay == 0) + try_delete_node((void *) enp); + else if (node_tab_delete_delay > 0) + erts_start_timer_callback(node_tab_delete_delay, + try_delete_node, + (void *) enp); } } @@ -651,7 +704,7 @@ static void print_node(void *venp, void *vpndp) erts_print(pndp->to, pndp->to_arg, " %d", enp->creation); #ifdef DEBUG erts_print(pndp->to, pndp->to_arg, " (refc=%ld)", - erts_refc_read(&enp->refc, 1)); + erts_refc_read(&enp->refc, 0)); #endif pndp->no_sysname++; } @@ -674,13 +727,13 @@ void erts_print_node_info(int to, pnd.no_total = 0; if (lock) - erts_smp_rwmtx_rwlock(&erts_node_table_rwmtx); + erts_smp_rwmtx_rlock(&erts_node_table_rwmtx); hash_foreach(&erts_node_table, print_node, (void *) &pnd); if (pnd.no_sysname != 0) { erts_print(to, to_arg, "\n"); } if (lock) - erts_smp_rwmtx_rwunlock(&erts_node_table_rwmtx); + erts_smp_rwmtx_runlock(&erts_node_table_rwmtx); if(no_sysname) *no_sysname = pnd.no_sysname; @@ -714,11 +767,28 @@ erts_set_this_node(Eterm sysname, Uint creation) } -void erts_init_node_tables(void) +Uint +erts_delayed_node_table_gc(void) +{ + if (node_tab_delete_delay < 0) + return (Uint) ERTS_NODE_TAB_DELAY_GC_INFINITY; + if (node_tab_delete_delay == 0) + return (Uint) 0; + return (Uint) ((node_tab_delete_delay-1)/1000 + 1); +} + +void erts_init_node_tables(int dd_sec) { erts_smp_rwmtx_opt_t rwmtx_opt = ERTS_SMP_RWMTX_OPT_DEFAULT_INITER; HashFunctions f; + if (dd_sec == ERTS_NODE_TAB_DELAY_GC_INFINITY) + node_tab_delete_delay = (ErtsMonotonicTime) -1; + else + node_tab_delete_delay = ((ErtsMonotonicTime) dd_sec)*1000; + + orig_node_tab_delete_delay = node_tab_delete_delay; + rwmtx_opt.type = ERTS_SMP_RWMTX_TYPE_FREQUENT_READ; rwmtx_opt.lived = ERTS_SMP_RWMTX_LONG_LIVED; @@ -847,6 +917,7 @@ static Eterm AM_dist_references; static Eterm AM_node_references; static Eterm AM_system; static Eterm AM_timer; +static Eterm AM_delayed_delete_timer; static void setup_reference_table(void); static Eterm reference_table_term(Uint **hpp, Uint *szp); @@ -881,8 +952,10 @@ typedef struct dist_referrer_ { int heap_ref; int node_ref; int ctrl_ref; + int system_ref; Eterm id; Uint creation; + Uint id_heap[ID_HEAP_SIZE]; } DistReferrer; typedef struct { @@ -931,6 +1004,7 @@ erts_get_node_and_dist_references(struct process *proc) INIT_AM(node_references); INIT_AM(timer); INIT_AM(system); + INIT_AM(delayed_delete_timer); references_atoms_need_init = 0; } @@ -988,17 +1062,25 @@ insert_dist_referrer(ReferredDist *referred_dist, sizeof(DistReferrer)); drp->next = referred_dist->referrers; referred_dist->referrers = drp; - drp->id = id; + if(IS_CONST(id)) + drp->id = id; + else { + Uint *hp = &drp->id_heap[0]; + ASSERT(is_tuple(id)); + drp->id = copy_struct(id, size_object(id), &hp, NULL); + } drp->creation = creation; drp->heap_ref = 0; drp->node_ref = 0; drp->ctrl_ref = 0; + drp->system_ref = 0; } switch (type) { case NODE_REF: drp->node_ref++; break; case CTRL_REF: drp->ctrl_ref++; break; case HEAP_REF: drp->heap_ref++; break; + case SYSTEM_REF: drp->system_ref++; break; default: ASSERT(0); } } @@ -1261,6 +1343,33 @@ insert_sys_msg(Eterm from, Eterm to, Eterm msg, ErlHeapFragment *bp) #endif static void +insert_delayed_delete_node(void *state, + ErtsMonotonicTime timeout_pos, + void *vnp) +{ + DeclareTmpHeapNoproc(heap,3); + UseTmpHeapNoproc(3); + insert_node((ErlNode *) vnp, + SYSTEM_REF, + TUPLE2(&heap[0], AM_system, AM_delayed_delete_timer)); + UnUseTmpHeapNoproc(3); +} + +static void +insert_delayed_delete_dist_entry(void *state, + ErtsMonotonicTime timeout_pos, + void *vdep) +{ + DeclareTmpHeapNoproc(heap,3); + UseTmpHeapNoproc(3); + insert_dist_entry((DistEntry *) vdep, + SYSTEM_REF, + TUPLE2(&heap[0], AM_system, AM_delayed_delete_timer), + 0); + UnUseTmpHeapNoproc(3); +} + +static void setup_reference_table(void) { ErlHeapFragment *hfp; @@ -1288,6 +1397,13 @@ setup_reference_table(void) /* Go through the hole system, and build a table of all references to ErlNode and DistEntry structures */ + erts_debug_callback_timer_foreach(try_delete_node, + insert_delayed_delete_node, + NULL); + erts_debug_callback_timer_foreach(try_delete_dist_entry, + insert_delayed_delete_dist_entry, + NULL); + UseTmpHeapNoproc(3); insert_node(erts_this_node, SYSTEM_REF, @@ -1601,7 +1717,7 @@ reference_table_term(Uint **hpp, Uint *szp) tup = MK_2TUP(referred_nodes[i].node->sysname, MK_UINT(referred_nodes[i].node->creation)); - tup = MK_3TUP(tup, MK_UINT(erts_refc_read(&referred_nodes[i].node->refc, 1)), nril); + tup = MK_3TUP(tup, MK_UINT(erts_refc_read(&referred_nodes[i].node->refc, 0)), nril); nl = MK_CONS(tup, nl); } @@ -1624,6 +1740,10 @@ reference_table_term(Uint **hpp, Uint *szp) tup = MK_2TUP(AM_heap, MK_UINT(drp->heap_ref)); drl = MK_CONS(tup, drl); } + if(drp->system_ref) { + tup = MK_2TUP(AM_system, MK_UINT(drp->system_ref)); + drl = MK_CONS(tup, drl); + } if (is_internal_pid(drp->id)) { ASSERT(!drp->node_ref); @@ -1633,6 +1753,14 @@ reference_table_term(Uint **hpp, Uint *szp) ASSERT(drp->ctrl_ref && !drp->node_ref); tup = MK_2TUP(AM_port, drp->id); } + else if (is_tuple(drp->id)) { + Eterm *t; + ASSERT(drp->system_ref && !drp->node_ref + && !drp->ctrl_ref && !drp->heap_ref); + t = tuple_val(drp->id); + ASSERT(2 == arityval(t[0])); + tup = MK_2TUP(t[1], t[2]); + } else { ASSERT(!drp->ctrl_ref && drp->node_ref); ASSERT(is_atom(drp->id)); @@ -1650,7 +1778,7 @@ reference_table_term(Uint **hpp, Uint *szp) /* DistList = [{Dist, Refc, ReferenceIdList}] */ tup = MK_3TUP(referred_dists[i].dist->sysname, - MK_UINT(erts_refc_read(&referred_dists[i].dist->refc, 1)), + MK_UINT(erts_refc_read(&referred_dists[i].dist->refc, 0)), dril); dl = MK_CONS(tup, dl); } @@ -1705,3 +1833,15 @@ delete_reference_table(void) } } +void +erts_debug_test_node_tab_delayed_delete(Sint64 millisecs) +{ + erts_smp_thr_progress_block(); + + if (millisecs < 0) + node_tab_delete_delay = orig_node_tab_delete_delay; + else + node_tab_delete_delay = millisecs; + + erts_smp_thr_progress_unblock(); +} diff --git a/erts/emulator/beam/erl_node_tables.h b/erts/emulator/beam/erl_node_tables.h index af60071ea5..54c5cd1d11 100644 --- a/erts/emulator/beam/erl_node_tables.h +++ b/erts/emulator/beam/erl_node_tables.h @@ -46,6 +46,10 @@ #define ERTS_PORT_TASK_ONLY_BASIC_TYPES__ #include "erl_port_task.h" #undef ERTS_PORT_TASK_ONLY_BASIC_TYPES__ + +#define ERTS_NODE_TAB_DELAY_GC_DEFAULT (60) +#define ERTS_NODE_TAB_DELAY_GC_MAX (100*1000*1000) +#define ERTS_NODE_TAB_DELAY_GC_INFINITY (ERTS_NODE_TAB_DELAY_GC_MAX+1) #define ERST_INTERNAL_CHANNEL_NO 0 @@ -166,20 +170,21 @@ extern DistEntry *erts_this_dist_entry; extern ErlNode *erts_this_node; extern char *erts_this_node_sysname; /* must match erl_node_tables.c */ +Uint erts_delayed_node_table_gc(void); DistEntry *erts_channel_no_to_dist_entry(Uint); DistEntry *erts_sysname_to_connected_dist_entry(Eterm); DistEntry *erts_find_or_insert_dist_entry(Eterm); DistEntry *erts_find_dist_entry(Eterm); -void erts_delete_dist_entry(DistEntry *); +void erts_schedule_delete_dist_entry(DistEntry *); Uint erts_dist_table_size(void); void erts_dist_table_info(int, void *); void erts_set_dist_entry_not_connected(DistEntry *); void erts_set_dist_entry_connected(DistEntry *, Eterm, Uint); ErlNode *erts_find_or_insert_node(Eterm, Uint); -void erts_delete_node(ErlNode *); +void erts_schedule_delete_node(ErlNode *); void erts_set_this_node(Eterm, Uint); Uint erts_node_table_size(void); -void erts_init_node_tables(void); +void erts_init_node_tables(int); void erts_node_table_info(int, void *); void erts_print_node_info(int, void *, Eterm, int*, int*); Eterm erts_get_node_and_dist_references(struct process *); @@ -204,7 +209,7 @@ erts_deref_dist_entry(DistEntry *dep) { ASSERT(dep); if (erts_refc_dectest(&dep->refc, 0) == 0) - erts_delete_dist_entry(dep); + erts_schedule_delete_dist_entry(dep); } ERTS_GLB_INLINE void @@ -212,7 +217,7 @@ erts_deref_node_entry(ErlNode *np) { ASSERT(np); if (erts_refc_dectest(&np->refc, 0) == 0) - erts_delete_node(np); + erts_schedule_delete_node(np); } ERTS_GLB_INLINE void @@ -253,5 +258,6 @@ erts_smp_de_links_unlock(DistEntry *dep) #endif /* #if ERTS_GLB_INLINE_INCL_FUNC_DEF */ +void erts_debug_test_node_tab_delayed_delete(Sint64 millisecs); #endif diff --git a/erts/emulator/beam/erl_port.h b/erts/emulator/beam/erl_port.h index 3920fae2d9..66cbb67a0d 100644 --- a/erts/emulator/beam/erl_port.h +++ b/erts/emulator/beam/erl_port.h @@ -266,8 +266,7 @@ erts_prtsd_set(Port *prt, int ix, void *data) #endif -extern erts_smp_atomic_t erts_bytes_out; /* no bytes written out */ -extern erts_smp_atomic_t erts_bytes_in; /* no bytes sent into the system */ +Eterm erts_request_io_bytes(Process *c_p); /* port status flags */ diff --git a/erts/emulator/beam/erl_process.c b/erts/emulator/beam/erl_process.c index 4940ffc4a0..88c1b5c121 100644 --- a/erts/emulator/beam/erl_process.c +++ b/erts/emulator/beam/erl_process.c @@ -165,6 +165,9 @@ Uint erts_no_dirty_cpu_schedulers; Uint erts_no_dirty_io_schedulers; #endif +static char *erts_aux_work_flag_descr[ERTS_SSI_AUX_WORK_NO_FLAGS] = {0}; +int erts_aux_work_no_flags = ERTS_SSI_AUX_WORK_NO_FLAGS; + #define ERTS_THR_PRGR_LATER_CLEANUP_OP_THRESHOLD_VERY_LAZY (4*1024*1024) #define ERTS_THR_PRGR_LATER_CLEANUP_OP_THRESHOLD_LAZY (512*1024) #define ERTS_THR_PRGR_LATER_CLEANUP_OP_THRESHOLD_MEDIUM (64*1024) @@ -508,6 +511,7 @@ dbg_chk_aux_work_val(erts_aint32_t value) #ifdef ERTS_SSI_AUX_WORK_REAP_PORTS valid |= ERTS_SSI_AUX_WORK_REAP_PORTS; #endif + valid |= ERTS_SSI_AUX_WORK_DEBUG_WAIT_COMPLETED; if (~valid & value) erl_exit(ERTS_ABORT_EXIT, @@ -566,6 +570,41 @@ erts_pre_init_process(void) erts_tsd_key_create(&sched_data_key, "erts_sched_data_key"); #endif + erts_aux_work_flag_descr[ERTS_SSI_AUX_WORK_DELAYED_AW_WAKEUP_IX] + = "DELAYED_AW_WAKEUP"; + erts_aux_work_flag_descr[ERTS_SSI_AUX_WORK_DD_IX] + = "DD"; + erts_aux_work_flag_descr[ERTS_SSI_AUX_WORK_DD_THR_PRGR_IX] + = "DD_THR_PRGR"; + erts_aux_work_flag_descr[ERTS_SSI_AUX_WORK_FIX_ALLOC_DEALLOC_IX] + = "FIX_ALLOC_DEALLOC"; + erts_aux_work_flag_descr[ERTS_SSI_AUX_WORK_FIX_ALLOC_LOWER_LIM_IX] + = "FIX_ALLOC_LOWER_LIM"; + erts_aux_work_flag_descr[ERTS_SSI_AUX_WORK_THR_PRGR_LATER_OP_IX] + = "THR_PRGR_LATER_OP"; + erts_aux_work_flag_descr[ERTS_SSI_AUX_WORK_CNCLD_TMRS_IX] + = "CNCLD_TMRS"; + erts_aux_work_flag_descr[ERTS_SSI_AUX_WORK_CNCLD_TMRS_THR_PRGR_IX] + = "CNCLD_TMRS_THR_PRGR"; + erts_aux_work_flag_descr[ERTS_SSI_AUX_WORK_ASYNC_READY_IX] + = "ASYNC_READY"; + erts_aux_work_flag_descr[ERTS_SSI_AUX_WORK_ASYNC_READY_CLEAN_IX] + = "ASYNC_READY_CLEAN"; + erts_aux_work_flag_descr[ERTS_SSI_AUX_WORK_MISC_THR_PRGR_IX] + = "MISC_THR_PRGR"; + erts_aux_work_flag_descr[ERTS_SSI_AUX_WORK_MISC_IX] + = "MISC"; + erts_aux_work_flag_descr[ERTS_SSI_AUX_WORK_CHECK_CHILDREN_IX] + = "CHECK_CHILDREN"; + erts_aux_work_flag_descr[ERTS_SSI_AUX_WORK_SET_TMO_IX] + = "SET_TMO"; + erts_aux_work_flag_descr[ERTS_SSI_AUX_WORK_MSEG_CACHE_CHECK_IX] + = "MSEG_CACHE_CHECK"; + erts_aux_work_flag_descr[ERTS_SSI_AUX_WORK_REAP_PORTS_IX] + = "REAP_PORTS"; + erts_aux_work_flag_descr[ERTS_SSI_AUX_WORK_DEBUG_WAIT_COMPLETED_IX] + = "DEBUG_WAIT_COMPLETED"; + #ifdef ERTS_ENABLE_LOCK_CHECK { int ix; @@ -1193,11 +1232,11 @@ set_aux_work_flags_wakeup_nob(ErtsSchedulerSleepInfo *ssi, ERTS_DBG_CHK_SSI_AUX_WORK(ssi); old_flgs = erts_atomic32_read_nob(&ssi->aux_work); - if ((old_flgs & flgs) == 0) { + if ((old_flgs & flgs) != flgs) { old_flgs = erts_atomic32_read_bor_nob(&ssi->aux_work, flgs); - if ((old_flgs & flgs) == 0) { + if ((old_flgs & flgs) != flgs) { #ifdef ERTS_SMP erts_sched_poke(ssi); #else @@ -1217,7 +1256,7 @@ set_aux_work_flags_wakeup_relb(ErtsSchedulerSleepInfo *ssi, old_flgs = erts_atomic32_read_bor_relb(&ssi->aux_work, flgs); - if ((old_flgs & flgs) == 0) { + if ((old_flgs & flgs) != flgs) { #ifdef ERTS_SMP erts_sched_poke(ssi); #else @@ -1715,11 +1754,6 @@ handle_delayed_dealloc(ErtsAuxWorkData *awdp, erts_aint32_t aux_work, int waitin awdp->dd.thr_prgr = wakeup; haw_thr_prgr_soft_wakeup(awdp, wakeup); } - else if (awdp->dd.completed_callback) { - awdp->dd.completed_callback(awdp->dd.completed_arg); - awdp->dd.completed_callback = NULL; - awdp->dd.completed_arg = NULL; - } return aux_work & ~ERTS_SSI_AUX_WORK_DD; } @@ -1761,11 +1795,6 @@ handle_delayed_dealloc_thr_prgr(ErtsAuxWorkData *awdp, erts_aint32_t aux_work, i } else { unset_aux_work_flags(ssi, ERTS_SSI_AUX_WORK_DD_THR_PRGR); - if (awdp->dd.completed_callback) { - awdp->dd.completed_callback(awdp->dd.completed_arg); - awdp->dd.completed_callback = NULL; - awdp->dd.completed_arg = NULL; - } } return aux_work & ~ERTS_SSI_AUX_WORK_DD_THR_PRGR; @@ -1955,78 +1984,142 @@ erts_schedule_thr_prgr_later_cleanup_op(void (*later_func)(void *), #endif } -#ifdef ERTS_SMP +static ERTS_INLINE erts_aint32_t +handle_debug_wait_completed(ErtsAuxWorkData *awdp, erts_aint32_t aux_work, int waiting) +{ + ErtsSchedulerSleepInfo *ssi = awdp->ssi; + erts_aint32_t saved_aux_work, flags; + +#ifdef ERTS_DIRTY_SCHEDULERS + ASSERT(!awdp->esdp || !ERTS_SCHEDULER_IS_DIRTY(awdp->esdp)); +#endif + + flags = awdp->debug.wait_completed.flags; + + if (aux_work & flags) + return aux_work; + + saved_aux_work = erts_atomic32_read_acqb(&ssi->aux_work); -static erts_atomic32_t completed_dealloc_count; + if (saved_aux_work & flags) + return aux_work & ~ERTS_SSI_AUX_WORK_DEBUG_WAIT_COMPLETED; + + awdp->debug.wait_completed.callback(awdp->debug.wait_completed.arg); + + awdp->debug.wait_completed.flags = 0; + awdp->debug.wait_completed.callback = NULL; + awdp->debug.wait_completed.arg = NULL; + + unset_aux_work_flags(ssi, ERTS_SSI_AUX_WORK_DEBUG_WAIT_COMPLETED); + + return aux_work & ~ERTS_SSI_AUX_WORK_DEBUG_WAIT_COMPLETED; +} + +static erts_atomic32_t debug_wait_completed_count; +static int debug_wait_completed_flags; static void -completed_dealloc(void *vproc) +thr_debug_wait_completed(void *vproc) { - if (erts_atomic32_dec_read_mb(&completed_dealloc_count) == 0) { + if (erts_atomic32_dec_read_mb(&debug_wait_completed_count) == 0) { erts_resume((Process *) vproc, (ErtsProcLocks) 0); erts_proc_dec_refc((Process *) vproc); } } static void -setup_completed_dealloc(void *vproc) +setup_thr_debug_wait_completed(void *vproc) { ErtsSchedulerData *esdp = erts_get_scheduler_data(); - ErtsAuxWorkData *awdp = (esdp - ? &esdp->aux_work_data - : aux_thread_aux_work_data); - erts_alloc_fix_alloc_shrink(awdp->sched_id, 0); - set_aux_work_flags_wakeup_nob(awdp->ssi, ERTS_SSI_AUX_WORK_DD); - awdp->dd.completed_callback = completed_dealloc; - awdp->dd.completed_arg = vproc; + ErtsAuxWorkData *awdp; + erts_aint32_t wait_flags, aux_work_flags; +#ifdef ERTS_SMP + awdp = esdp ? &esdp->aux_work_data : aux_thread_aux_work_data; +#else + awdp = &esdp->aux_work_data; +#endif + + wait_flags = 0; + aux_work_flags = ERTS_SSI_AUX_WORK_DEBUG_WAIT_COMPLETED; + + if (debug_wait_completed_flags & ERTS_DEBUG_WAIT_COMPLETED_DEALLOCATIONS) { + erts_alloc_fix_alloc_shrink(awdp->sched_id, 0); + wait_flags |= (ERTS_SSI_AUX_WORK_DD + | ERTS_SSI_AUX_WORK_DD_THR_PRGR + | ERTS_SSI_AUX_WORK_THR_PRGR_LATER_OP); +#ifdef ERTS_SMP + aux_work_flags |= ERTS_SSI_AUX_WORK_DD; +#endif + } + + if (debug_wait_completed_flags & ERTS_DEBUG_WAIT_COMPLETED_TIMER_CANCELLATIONS) { + wait_flags |= (ERTS_SSI_AUX_WORK_CNCLD_TMRS + | ERTS_SSI_AUX_WORK_CNCLD_TMRS_THR_PRGR + | ERTS_SSI_AUX_WORK_THR_PRGR_LATER_OP); +#ifdef ERTS_SMP + if (awdp->esdp && !ERTS_SCHEDULER_IS_DIRTY(awdp->esdp)) + aux_work_flags |= ERTS_SSI_AUX_WORK_CNCLD_TMRS; +#endif + } + + set_aux_work_flags_wakeup_nob(awdp->ssi, aux_work_flags); + + awdp->debug.wait_completed.flags = wait_flags; + awdp->debug.wait_completed.callback = thr_debug_wait_completed; + awdp->debug.wait_completed.arg = vproc; } static void -prep_setup_completed_dealloc(void *vproc) +prep_setup_thr_debug_wait_completed(void *vproc) { - erts_aint32_t count = (erts_aint32_t) (erts_no_schedulers+1); - if (erts_atomic32_dec_read_mb(&completed_dealloc_count) == count) { + erts_aint32_t count = (erts_aint32_t) erts_no_schedulers; +#ifdef ERTS_SMP + count += 1; /* aux thread */ +#endif + if (erts_atomic32_dec_read_mb(&debug_wait_completed_count) == count) { /* scheduler threads */ erts_schedule_multi_misc_aux_work(0, erts_no_schedulers, - setup_completed_dealloc, + setup_thr_debug_wait_completed, vproc); +#ifdef ERTS_SMP /* aux_thread */ erts_schedule_misc_aux_work(0, - setup_completed_dealloc, + setup_thr_debug_wait_completed, vproc); +#endif } } -#endif /* ERTS_SMP */ int -erts_debug_wait_deallocations(Process *c_p) +erts_debug_wait_completed(Process *c_p, int flags) { -#ifndef ERTS_SMP - erts_alloc_fix_alloc_shrink(1, 0); - return 1; -#else /* Only one process at a time can do this */ - erts_aint32_t count = (erts_aint32_t) (2*(erts_no_schedulers+1)); - if (0 == erts_atomic32_cmpxchg_mb(&completed_dealloc_count, + erts_aint32_t count = (erts_aint32_t) (2*erts_no_schedulers); +#ifdef ERTS_SMP + count += 2; /* aux thread */ +#endif + if (0 == erts_atomic32_cmpxchg_mb(&debug_wait_completed_count, count, 0)) { + debug_wait_completed_flags = flags; erts_suspend(c_p, ERTS_PROC_LOCK_MAIN, NULL); erts_proc_inc_refc(c_p); /* scheduler threads */ erts_schedule_multi_misc_aux_work(0, erts_no_schedulers, - prep_setup_completed_dealloc, + prep_setup_thr_debug_wait_completed, (void *) c_p); +#ifdef ERTS_SMP /* aux_thread */ erts_schedule_misc_aux_work(0, - prep_setup_completed_dealloc, + prep_setup_thr_debug_wait_completed, (void *) c_p); +#endif return 1; } return 0; -#endif } @@ -2227,6 +2320,14 @@ handle_aux_work(ErtsAuxWorkData *awdp, erts_aint32_t orig_aux_work, int waiting) HANDLE_AUX_WORK(ERTS_SSI_AUX_WORK_REAP_PORTS, handle_reap_ports); + /* + * ERTS_SSI_AUX_WORK_DEBUG_WAIT_COMPLETED *need* to be + * the last flag checked! + */ + + HANDLE_AUX_WORK(ERTS_SSI_AUX_WORK_DEBUG_WAIT_COMPLETED, + handle_debug_wait_completed); + ERTS_DBG_CHK_AUX_WORK_VAL(aux_work); #ifdef ERTS_SMP @@ -5354,8 +5455,6 @@ init_aux_work_data(ErtsAuxWorkData *awdp, ErtsSchedulerData *esdp, char *dawwp) awdp->latest_wakeup = ERTS_THR_PRGR_VAL_FIRST; awdp->misc.thr_prgr = ERTS_THR_PRGR_VAL_WAITING; awdp->dd.thr_prgr = ERTS_THR_PRGR_VAL_WAITING; - awdp->dd.completed_callback = NULL; - awdp->dd.completed_arg = NULL; awdp->cncld_tmrs.thr_prgr = ERTS_THR_PRGR_VAL_WAITING; awdp->later_op.thr_prgr = ERTS_THR_PRGR_VAL_FIRST; awdp->later_op.size = 0; @@ -5386,6 +5485,9 @@ init_aux_work_data(ErtsAuxWorkData *awdp, ErtsSchedulerData *esdp, char *dawwp) awdp->delayed_wakeup.sched2jix[i] = -1; } #endif + awdp->debug.wait_completed.flags = 0; + awdp->debug.wait_completed.callback = NULL; + awdp->debug.wait_completed.arg = NULL; } static void @@ -5440,6 +5542,9 @@ init_scheduler_data(ErtsSchedulerData* esdp, int num, esdp->thr_id = (Uint32) num; erts_sched_bif_unique_init(esdp); + esdp->io.out = (Uint64) 0; + esdp->io.in = (Uint64) 0; + if (daww_ptr) { init_aux_work_data(&esdp->aux_work_data, esdp, *daww_ptr); #ifdef ERTS_SMP @@ -5694,11 +5799,11 @@ erts_init_scheduling(int no_schedulers, int no_schedulers_online init_swtreq_alloc(); #endif + erts_atomic32_init_nob(&debug_wait_completed_count, 0); /* debug only */ + debug_wait_completed_flags = 0; #ifdef ERTS_SMP - erts_atomic32_init_nob(&completed_dealloc_count, 0); /* debug only */ - aux_thread_aux_work_data = erts_alloc_permanent_cache_aligned(ERTS_ALC_T_SCHDLR_DATA, sizeof(ErtsAuxWorkData)); @@ -10855,7 +10960,9 @@ erl_create_process(Process* parent, /* Parent of process (default group leader). p->msg_inq.len = 0; #endif p->bif_timers = NULL; +#ifdef ERTS_BTM_ACCESSOR_SUPPORT p->accessor_bif_timers = NULL; +#endif p->mbuf = NULL; p->mbuf_sz = 0; p->psd = NULL; @@ -11035,7 +11142,9 @@ void erts_init_empty_process(Process *p) p->msg.save = &p->msg.first; p->msg.len = 0; p->bif_timers = NULL; +#ifdef ERTS_BTM_ACCESSOR_SUPPORT p->accessor_bif_timers = NULL; +#endif p->dictionary = NULL; p->seq_trace_clock = 0; p->seq_trace_lastcnt = 0; @@ -11130,7 +11239,9 @@ erts_debug_verify_clean_empty_process(Process* p) ASSERT(p->msg.first == NULL); ASSERT(p->msg.len == 0); ASSERT(p->bif_timers == NULL); +#ifdef ERTS_BTM_ACCESSOR_SUPPORT ASSERT(p->accessor_bif_timers == NULL); +#endif ASSERT(p->dictionary == NULL); ASSERT(p->catches == 0); ASSERT(p->cp == NULL); @@ -12086,6 +12197,7 @@ erts_continue_exit_process(Process *p) p->bif_timers = NULL; } +#ifdef ERTS_BTM_ACCESSOR_SUPPORT if (p->accessor_bif_timers) { if (erts_detach_accessor_bif_timers(p, p->accessor_bif_timers, @@ -12096,6 +12208,7 @@ erts_continue_exit_process(Process *p) ASSERT(erts_proc_read_refc(p) > 0); p->accessor_bif_timers = NULL; } +#endif #ifdef ERTS_SMP if (p->flags & F_HAVE_BLCKD_MSCHED) { @@ -12458,45 +12571,13 @@ erts_print_scheduler_info(int to, void *to_arg, ErtsSchedulerData *esdp) { flg = erts_atomic32_read_dirty(&esdp->ssi->aux_work); erts_print(to, to_arg, "Scheduler Sleep Info Aux Work: "); - for (i = 0; i < ERTS_SSI_AUX_WORK_MAX && flg; i++) { + for (i = 0; i < ERTS_SSI_AUX_WORK_NO_FLAGS && flg; i++) { erts_aint32_t chk = (1 << i); if (flg & chk) { - switch (chk) { - case ERTS_SSI_AUX_WORK_DELAYED_AW_WAKEUP: - erts_print(to, to_arg, "DELAYED_AW_WAKEUP"); break; - case ERTS_SSI_AUX_WORK_DD: - erts_print(to, to_arg, "DELAYED_DEALLOC"); break; - case ERTS_SSI_AUX_WORK_DD_THR_PRGR: - erts_print(to, to_arg, "DELAYED_DEALLOC_THR_PRGR"); break; - case ERTS_SSI_AUX_WORK_FIX_ALLOC_DEALLOC: - erts_print(to, to_arg, "FIX_ALLOC_DEALLOC"); break; - case ERTS_SSI_AUX_WORK_FIX_ALLOC_LOWER_LIM: - erts_print(to, to_arg, "FIX_ALLOC_LOWER_LIM"); break; - case ERTS_SSI_AUX_WORK_THR_PRGR_LATER_OP: - erts_print(to, to_arg, "THR_PRGR_LATER_OP"); break; - case ERTS_SSI_AUX_WORK_CNCLD_TMRS: - erts_print(to, to_arg, "CANCELED_TIMERS"); break; - case ERTS_SSI_AUX_WORK_CNCLD_TMRS_THR_PRGR: - erts_print(to, to_arg, "CANCELED_TIMERS_THR_PRGR"); break; - case ERTS_SSI_AUX_WORK_ASYNC_READY: - erts_print(to, to_arg, "ASYNC_READY"); break; - case ERTS_SSI_AUX_WORK_ASYNC_READY_CLEAN: - erts_print(to, to_arg, "ASYNC_READY_CLEAN"); break; - case ERTS_SSI_AUX_WORK_MISC_THR_PRGR: - erts_print(to, to_arg, "MISC_THR_PRGR"); break; - case ERTS_SSI_AUX_WORK_MISC: - erts_print(to, to_arg, "MISC"); break; - case ERTS_SSI_AUX_WORK_CHECK_CHILDREN: - erts_print(to, to_arg, "CHECK_CHILDREN"); break; - case ERTS_SSI_AUX_WORK_SET_TMO: - erts_print(to, to_arg, "SET_TMO"); break; - case ERTS_SSI_AUX_WORK_MSEG_CACHE_CHECK: - erts_print(to, to_arg, "MSEG_CACHE_CHECK"); break; - case ERTS_SSI_AUX_WORK_REAP_PORTS: - erts_print(to, to_arg, "REAP_PORTS"); break; - default: - erts_print(to, to_arg, "UNKNOWN(%d)", flg); break; - } + if (erts_aux_work_flag_descr[i]) + erts_print(to, to_arg, "%s", erts_aux_work_flag_descr[i]); + else + erts_print(to, to_arg, "1<<%d", i); if (flg > chk) erts_print(to, to_arg, " | "); flg -= chk; diff --git a/erts/emulator/beam/erl_process.h b/erts/emulator/beam/erl_process.h index b1c30e7652..a6c3790991 100644 --- a/erts/emulator/beam/erl_process.h +++ b/erts/emulator/beam/erl_process.h @@ -268,28 +268,73 @@ typedef enum { | ERTS_SSI_FLG_SUSPENDED) /* - * Keep ERTS_SSI_AUX_WORK flags in expected frequency order relative - * eachother. Most frequent - lowest bit number. + * Keep ERTS_SSI_AUX_WORK flags ordered in expected frequency + * order relative eachother. Most frequent at lowest at lowest + * index. + * + * ERTS_SSI_AUX_WORK_DEBUG_WAIT_COMPLETED_IX *need* to be + * highest index... + * + * Remember to update description in erts_pre_init_process() + * when adding new flags... */ -#define ERTS_SSI_AUX_WORK_DELAYED_AW_WAKEUP (((erts_aint32_t) 1) << 0) -#define ERTS_SSI_AUX_WORK_DD (((erts_aint32_t) 1) << 1) -#define ERTS_SSI_AUX_WORK_DD_THR_PRGR (((erts_aint32_t) 1) << 2) -#define ERTS_SSI_AUX_WORK_FIX_ALLOC_DEALLOC (((erts_aint32_t) 1) << 3) -#define ERTS_SSI_AUX_WORK_FIX_ALLOC_LOWER_LIM (((erts_aint32_t) 1) << 4) -#define ERTS_SSI_AUX_WORK_THR_PRGR_LATER_OP (((erts_aint32_t) 1) << 5) -#define ERTS_SSI_AUX_WORK_CNCLD_TMRS (((erts_aint32_t) 1) << 6) -#define ERTS_SSI_AUX_WORK_CNCLD_TMRS_THR_PRGR (((erts_aint32_t) 1) << 7) -#define ERTS_SSI_AUX_WORK_ASYNC_READY (((erts_aint32_t) 1) << 8) -#define ERTS_SSI_AUX_WORK_ASYNC_READY_CLEAN (((erts_aint32_t) 1) << 9) -#define ERTS_SSI_AUX_WORK_MISC_THR_PRGR (((erts_aint32_t) 1) << 10) -#define ERTS_SSI_AUX_WORK_MISC (((erts_aint32_t) 1) << 11) -#define ERTS_SSI_AUX_WORK_CHECK_CHILDREN (((erts_aint32_t) 1) << 12) -#define ERTS_SSI_AUX_WORK_SET_TMO (((erts_aint32_t) 1) << 13) -#define ERTS_SSI_AUX_WORK_MSEG_CACHE_CHECK (((erts_aint32_t) 1) << 14) -#define ERTS_SSI_AUX_WORK_REAP_PORTS (((erts_aint32_t) 1) << 15) - -#define ERTS_SSI_AUX_WORK_MAX 16 +typedef enum { + ERTS_SSI_AUX_WORK_DELAYED_AW_WAKEUP_IX, + ERTS_SSI_AUX_WORK_DD_IX, + ERTS_SSI_AUX_WORK_DD_THR_PRGR_IX, + ERTS_SSI_AUX_WORK_FIX_ALLOC_DEALLOC_IX, + ERTS_SSI_AUX_WORK_FIX_ALLOC_LOWER_LIM_IX, + ERTS_SSI_AUX_WORK_THR_PRGR_LATER_OP_IX, + ERTS_SSI_AUX_WORK_CNCLD_TMRS_IX, + ERTS_SSI_AUX_WORK_CNCLD_TMRS_THR_PRGR_IX, + ERTS_SSI_AUX_WORK_ASYNC_READY_IX, + ERTS_SSI_AUX_WORK_ASYNC_READY_CLEAN_IX, + ERTS_SSI_AUX_WORK_MISC_THR_PRGR_IX, + ERTS_SSI_AUX_WORK_MISC_IX, + ERTS_SSI_AUX_WORK_CHECK_CHILDREN_IX, + ERTS_SSI_AUX_WORK_SET_TMO_IX, + ERTS_SSI_AUX_WORK_MSEG_CACHE_CHECK_IX, + ERTS_SSI_AUX_WORK_REAP_PORTS_IX, + ERTS_SSI_AUX_WORK_DEBUG_WAIT_COMPLETED_IX, /* SHOULD be last flag index */ + + ERTS_SSI_AUX_WORK_NO_FLAGS /* Not a flag index... */ +} ErtsSsiAuxWorkFlagIndex; + +#define ERTS_SSI_AUX_WORK_DELAYED_AW_WAKEUP \ + (((erts_aint32_t) 1) << ERTS_SSI_AUX_WORK_DELAYED_AW_WAKEUP_IX) +#define ERTS_SSI_AUX_WORK_DD \ + (((erts_aint32_t) 1) << ERTS_SSI_AUX_WORK_DD_IX) +#define ERTS_SSI_AUX_WORK_DD_THR_PRGR \ + (((erts_aint32_t) 1) << ERTS_SSI_AUX_WORK_DD_THR_PRGR_IX) +#define ERTS_SSI_AUX_WORK_FIX_ALLOC_DEALLOC \ + (((erts_aint32_t) 1) << ERTS_SSI_AUX_WORK_FIX_ALLOC_DEALLOC_IX) +#define ERTS_SSI_AUX_WORK_FIX_ALLOC_LOWER_LIM \ + (((erts_aint32_t) 1) << ERTS_SSI_AUX_WORK_FIX_ALLOC_LOWER_LIM_IX) +#define ERTS_SSI_AUX_WORK_THR_PRGR_LATER_OP \ + (((erts_aint32_t) 1) << ERTS_SSI_AUX_WORK_THR_PRGR_LATER_OP_IX) +#define ERTS_SSI_AUX_WORK_CNCLD_TMRS \ + (((erts_aint32_t) 1) << ERTS_SSI_AUX_WORK_CNCLD_TMRS_IX) +#define ERTS_SSI_AUX_WORK_CNCLD_TMRS_THR_PRGR \ + (((erts_aint32_t) 1) << ERTS_SSI_AUX_WORK_CNCLD_TMRS_THR_PRGR_IX) +#define ERTS_SSI_AUX_WORK_ASYNC_READY \ + (((erts_aint32_t) 1) << ERTS_SSI_AUX_WORK_ASYNC_READY_IX) +#define ERTS_SSI_AUX_WORK_ASYNC_READY_CLEAN \ + (((erts_aint32_t) 1) << ERTS_SSI_AUX_WORK_ASYNC_READY_CLEAN_IX) +#define ERTS_SSI_AUX_WORK_MISC_THR_PRGR \ + (((erts_aint32_t) 1) << ERTS_SSI_AUX_WORK_MISC_THR_PRGR_IX) +#define ERTS_SSI_AUX_WORK_MISC \ + (((erts_aint32_t) 1) << ERTS_SSI_AUX_WORK_MISC_IX) +#define ERTS_SSI_AUX_WORK_CHECK_CHILDREN \ + (((erts_aint32_t) 1) << ERTS_SSI_AUX_WORK_CHECK_CHILDREN_IX) +#define ERTS_SSI_AUX_WORK_SET_TMO \ + (((erts_aint32_t) 1) << ERTS_SSI_AUX_WORK_SET_TMO_IX) +#define ERTS_SSI_AUX_WORK_MSEG_CACHE_CHECK \ + (((erts_aint32_t) 1) << ERTS_SSI_AUX_WORK_MSEG_CACHE_CHECK_IX) +#define ERTS_SSI_AUX_WORK_REAP_PORTS \ + (((erts_aint32_t) 1) << ERTS_SSI_AUX_WORK_REAP_PORTS_IX) +#define ERTS_SSI_AUX_WORK_DEBUG_WAIT_COMPLETED \ + (((erts_aint32_t) 1) << ERTS_SSI_AUX_WORK_DEBUG_WAIT_COMPLETED_IX) typedef struct ErtsSchedulerSleepInfo_ ErtsSchedulerSleepInfo; @@ -515,8 +560,6 @@ typedef struct { #ifdef ERTS_SMP struct { ErtsThrPrgrVal thr_prgr; - void (*completed_callback)(void *); - void (*completed_arg)(void *); } dd; struct { ErtsThrPrgrVal thr_prgr; @@ -545,6 +588,13 @@ typedef struct { ErtsDelayedAuxWorkWakeupJob *job; } delayed_wakeup; #endif + struct { + struct { + erts_aint32_t flags; + void (*callback)(void *); + void *arg; + } wait_completed; + } debug; } ErtsAuxWorkData; #ifdef ERTS_DIRTY_SCHEDULERS @@ -609,6 +659,11 @@ struct ErtsSchedulerData_ { ErtsSchedAllocData alloc_data; + struct { + Uint64 out; + Uint64 in; + } io; + Uint64 reductions; ErtsSchedWallTime sched_wall_time; ErtsGCInfo gc_info; @@ -925,7 +980,9 @@ struct process { ErlMessageQueue msg; /* Message queue */ ErtsBifTimers *bif_timers; /* Bif timers aiming at this process */ +#ifdef ERTS_BTM_ACCESSOR_SUPPORT ErtsBifTimers *accessor_bif_timers; /* Accessor bif timers */ +#endif ProcDict *dictionary; /* Process dictionary, may be NULL */ @@ -1692,7 +1749,11 @@ Eterm erts_get_reader_groups_map(Process *c_p); Eterm erts_debug_reader_groups_map(Process *c_p, int groups); Uint erts_debug_nbalance(void); -int erts_debug_wait_deallocations(Process *c_p); + +#define ERTS_DEBUG_WAIT_COMPLETED_DEALLOCATIONS (1 << 0) +#define ERTS_DEBUG_WAIT_COMPLETED_TIMER_CANCELLATIONS (1 << 1) + +int erts_debug_wait_completed(Process *c_p, int flags); Uint erts_process_memory(Process *c_p); @@ -2001,14 +2062,10 @@ ERTS_GLB_INLINE Eterm erts_get_current_pid(void); ERTS_GLB_INLINE Uint erts_get_scheduler_id(void); ERTS_GLB_INLINE ErtsRunQueue *erts_get_runq_proc(Process *p); ERTS_GLB_INLINE ErtsRunQueue *erts_get_runq_current(ErtsSchedulerData *esdp); -#ifndef ERTS_ENABLE_LOCK_COUNT ERTS_GLB_INLINE void erts_smp_runq_lock(ErtsRunQueue *rq); -#endif ERTS_GLB_INLINE int erts_smp_runq_trylock(ErtsRunQueue *rq); ERTS_GLB_INLINE void erts_smp_runq_unlock(ErtsRunQueue *rq); -#ifndef ERTS_ENABLE_LOCK_COUNT ERTS_GLB_INLINE void erts_smp_xrunq_lock(ErtsRunQueue *rq, ErtsRunQueue *xrq); -#endif ERTS_GLB_INLINE void erts_smp_xrunq_unlock(ErtsRunQueue *rq, ErtsRunQueue *xrq); ERTS_GLB_INLINE void erts_smp_runqs_lock(ErtsRunQueue *rq1, ErtsRunQueue *rq2); ERTS_GLB_INLINE void erts_smp_runqs_unlock(ErtsRunQueue *rq1, ErtsRunQueue *rq2); @@ -2086,12 +2143,6 @@ erts_smp_runq_lock(ErtsRunQueue *rq) #endif } -#ifdef ERTS_ENABLE_LOCK_COUNT - -#define erts_smp_runq_lock(rq) erts_smp_mtx_lock_x(&(rq)->mtx, __FILE__, __LINE__) - -#endif - ERTS_GLB_INLINE int erts_smp_runq_trylock(ErtsRunQueue *rq) { @@ -2110,31 +2161,6 @@ erts_smp_runq_unlock(ErtsRunQueue *rq) #endif } -#ifdef ERTS_ENABLE_LOCK_COUNT - -#define erts_smp_xrunq_lock(rq, xrq) erts_smp_xrunq_lock_x((rq), (xrq), __FILE__, __LINE__) - -ERTS_GLB_INLINE void -erts_smp_xrunq_lock_x(ErtsRunQueue *rq, ErtsRunQueue *xrq, char* file, int line) -{ -#ifdef ERTS_SMP - ERTS_SMP_LC_ASSERT(erts_smp_lc_mtx_is_locked(&rq->mtx)); - if (xrq != rq) { - if (erts_smp_mtx_trylock(&xrq->mtx) == EBUSY) { - if (rq < xrq) - erts_smp_mtx_lock_x(&xrq->mtx, file, line); - else { - erts_smp_mtx_unlock(&rq->mtx); - erts_smp_mtx_lock_x(&xrq->mtx, file, line); - erts_smp_mtx_lock_x(&rq->mtx, file, line); - } - } - } -#endif -} - -#else - ERTS_GLB_INLINE void erts_smp_xrunq_lock(ErtsRunQueue *rq, ErtsRunQueue *xrq) { @@ -2154,8 +2180,6 @@ erts_smp_xrunq_lock(ErtsRunQueue *rq, ErtsRunQueue *xrq) #endif } -#endif - ERTS_GLB_INLINE void erts_smp_xrunq_unlock(ErtsRunQueue *rq, ErtsRunQueue *xrq) { diff --git a/erts/emulator/beam/erl_process_lock.c b/erts/emulator/beam/erl_process_lock.c index fff267ff2a..0548c6137c 100644 --- a/erts/emulator/beam/erl_process_lock.c +++ b/erts/emulator/beam/erl_process_lock.c @@ -110,6 +110,9 @@ static struct { erts_pix_lock_t erts_pix_locks[ERTS_NO_OF_PIX_LOCKS]; +#ifdef ERTS_ENABLE_LOCK_COUNT +static void lcnt_enable_proc_lock_count(Process *proc, int enable); +#endif void erts_init_proc_lock(int cpus) @@ -1003,7 +1006,9 @@ erts_pid2proc_opt(Process *c_p, void erts_proc_lock_init(Process *p) { +#if ERTS_PROC_LOCK_OWN_IMPL || defined(ERTS_PROC_LOCK_DEBUG) int i; +#endif #if ERTS_PROC_LOCK_OWN_IMPL /* We always start with all locks locked */ #if ERTS_PROC_LOCK_ATOMIC_IMPL @@ -1081,30 +1086,32 @@ erts_proc_lock_fin(Process *p) /* --- Process lock counting ----------------------------------------------- */ #if ERTS_PROC_LOCK_OWN_IMPL && defined(ERTS_ENABLE_LOCK_COUNT) -void erts_lcnt_proc_lock_init(Process *p) { - if (erts_lcnt_rt_options & ERTS_LCNT_OPT_PROCLOCK) { - if (p->common.id != ERTS_INVALID_PID) { - erts_lcnt_init_lock_x(&(p->lock.lcnt_main), "proc_main", ERTS_LCNT_LT_PROCLOCK, p->common.id); - erts_lcnt_init_lock_x(&(p->lock.lcnt_msgq), "proc_msgq", ERTS_LCNT_LT_PROCLOCK, p->common.id); - erts_lcnt_init_lock_x(&(p->lock.lcnt_btm), "proc_btm", ERTS_LCNT_LT_PROCLOCK, p->common.id); - erts_lcnt_init_lock_x(&(p->lock.lcnt_link), "proc_link", ERTS_LCNT_LT_PROCLOCK, p->common.id); - erts_lcnt_init_lock_x(&(p->lock.lcnt_status), "proc_status", ERTS_LCNT_LT_PROCLOCK, p->common.id); - } else { - erts_lcnt_init_lock(&(p->lock.lcnt_main), "proc_main", ERTS_LCNT_LT_PROCLOCK); - erts_lcnt_init_lock(&(p->lock.lcnt_msgq), "proc_msgq", ERTS_LCNT_LT_PROCLOCK); - erts_lcnt_init_lock(&(p->lock.lcnt_btm), "proc_btm", ERTS_LCNT_LT_PROCLOCK); - erts_lcnt_init_lock(&(p->lock.lcnt_link), "proc_link", ERTS_LCNT_LT_PROCLOCK); - erts_lcnt_init_lock(&(p->lock.lcnt_status), "proc_status", ERTS_LCNT_LT_PROCLOCK); - } - } else { - sys_memzero(&(p->lock.lcnt_main), sizeof(p->lock.lcnt_main)); - sys_memzero(&(p->lock.lcnt_msgq), sizeof(p->lock.lcnt_msgq)); - sys_memzero(&(p->lock.lcnt_btm), sizeof(p->lock.lcnt_btm)); - sys_memzero(&(p->lock.lcnt_link), sizeof(p->lock.lcnt_link)); - sys_memzero(&(p->lock.lcnt_status), sizeof(p->lock.lcnt_status)); - } + +void erts_lcnt_enable_proc_lock_count(int enable) { + int ix, max = erts_ptab_max(&erts_proc); + Process *proc = NULL; + for (ix = 0; ix < max; ++ix) { + if ((proc = erts_pix2proc(ix)) != NULL) + lcnt_enable_proc_lock_count(proc, enable); + } /* for all processes */ } - + +void erts_lcnt_proc_lock_init(Process *p) { + if (!(erts_lcnt_rt_options & ERTS_LCNT_OPT_PROCLOCK)) { + erts_lcnt_init_lock_empty(&(p->lock.lcnt_main)); + erts_lcnt_init_lock_empty(&(p->lock.lcnt_msgq)); + erts_lcnt_init_lock_empty(&(p->lock.lcnt_btm)); + erts_lcnt_init_lock_empty(&(p->lock.lcnt_link)); + erts_lcnt_init_lock_empty(&(p->lock.lcnt_status)); + } else { /* now the common case */ + Eterm pid = (p->common.id != ERTS_INVALID_PID) ? p->common.id : NIL; + erts_lcnt_init_lock_x(&(p->lock.lcnt_main), "proc_main", ERTS_LCNT_LT_PROCLOCK, pid); + erts_lcnt_init_lock_x(&(p->lock.lcnt_msgq), "proc_msgq", ERTS_LCNT_LT_PROCLOCK, pid); + erts_lcnt_init_lock_x(&(p->lock.lcnt_btm), "proc_btm", ERTS_LCNT_LT_PROCLOCK, pid); + erts_lcnt_init_lock_x(&(p->lock.lcnt_link), "proc_link", ERTS_LCNT_LT_PROCLOCK, pid); + erts_lcnt_init_lock_x(&(p->lock.lcnt_status),"proc_status",ERTS_LCNT_LT_PROCLOCK, pid); + } /* the lock names should really be aligned to four characters */ +} /* logic reversed */ void erts_lcnt_proc_lock_destroy(Process *p) { erts_lcnt_destroy_lock(&(p->lock.lcnt_main)); @@ -1114,28 +1121,31 @@ void erts_lcnt_proc_lock_destroy(Process *p) { erts_lcnt_destroy_lock(&(p->lock.lcnt_status)); } -void erts_lcnt_proc_lock(erts_proc_lock_t *lock, ErtsProcLocks locks) { - if (erts_lcnt_rt_options & ERTS_LCNT_OPT_PROCLOCK) { - if (locks & ERTS_PROC_LOCK_MAIN) { - erts_lcnt_lock(&(lock->lcnt_main)); - } - if (locks & ERTS_PROC_LOCK_MSGQ) { - erts_lcnt_lock(&(lock->lcnt_msgq)); - } - if (locks & ERTS_PROC_LOCK_BTM) { - erts_lcnt_lock(&(lock->lcnt_btm)); - } - if (locks & ERTS_PROC_LOCK_LINK) { - erts_lcnt_lock(&(lock->lcnt_link)); - } - if (locks & ERTS_PROC_LOCK_STATUS) { - erts_lcnt_lock(&(lock->lcnt_status)); +static void lcnt_enable_proc_lock_count(Process *proc, int enable) { + if (enable) { + if (!ERTS_LCNT_LOCK_TYPE(&(proc->lock.lcnt_main))) { + erts_lcnt_proc_lock_init(proc); + } } + else { + if (ERTS_LCNT_LOCK_TYPE(&(proc->lock.lcnt_main))) { + erts_lcnt_proc_lock_destroy(proc); + } } } -void erts_lcnt_proc_lock_post_x(erts_proc_lock_t *lock, ErtsProcLocks locks, char *file, unsigned int line) { - if (erts_lcnt_rt_options & ERTS_LCNT_OPT_PROCLOCK) { +void erts_lcnt_proc_lock(erts_proc_lock_t *lock, ErtsProcLocks locks) { + if (!(erts_lcnt_rt_options & ERTS_LCNT_OPT_PROCLOCK)) return; + if (locks & ERTS_PROC_LOCK_MAIN) { erts_lcnt_lock(&(lock->lcnt_main)); } + if (locks & ERTS_PROC_LOCK_MSGQ) { erts_lcnt_lock(&(lock->lcnt_msgq)); } + if (locks & ERTS_PROC_LOCK_BTM) { erts_lcnt_lock(&(lock->lcnt_btm)); } + if (locks & ERTS_PROC_LOCK_LINK) { erts_lcnt_lock(&(lock->lcnt_link)); } + if (locks & ERTS_PROC_LOCK_STATUS) { erts_lcnt_lock(&(lock->lcnt_status)); } +} + +void erts_lcnt_proc_lock_post_x(erts_proc_lock_t *lock, ErtsProcLocks locks, + char *file, unsigned int line) { + if (!(erts_lcnt_rt_options & ERTS_LCNT_OPT_PROCLOCK)) return; if (locks & ERTS_PROC_LOCK_MAIN) { erts_lcnt_lock_post_x(&(lock->lcnt_main), file, line); } @@ -1151,90 +1161,34 @@ void erts_lcnt_proc_lock_post_x(erts_proc_lock_t *lock, ErtsProcLocks locks, cha if (locks & ERTS_PROC_LOCK_STATUS) { erts_lcnt_lock_post_x(&(lock->lcnt_status), file, line); } - } } void erts_lcnt_proc_lock_unaquire(erts_proc_lock_t *lock, ErtsProcLocks locks) { - if (erts_lcnt_rt_options & ERTS_LCNT_OPT_PROCLOCK) { - if (locks & ERTS_PROC_LOCK_MAIN) { - erts_lcnt_lock_unaquire(&(lock->lcnt_main)); - } - if (locks & ERTS_PROC_LOCK_MSGQ) { - erts_lcnt_lock_unaquire(&(lock->lcnt_msgq)); - } - if (locks & ERTS_PROC_LOCK_BTM) { - erts_lcnt_lock_unaquire(&(lock->lcnt_btm)); - } - if (locks & ERTS_PROC_LOCK_LINK) { - erts_lcnt_lock_unaquire(&(lock->lcnt_link)); - } - if (locks & ERTS_PROC_LOCK_STATUS) { - erts_lcnt_lock_unaquire(&(lock->lcnt_status)); - } - } + if (!(erts_lcnt_rt_options & ERTS_LCNT_OPT_PROCLOCK)) return; + if (locks & ERTS_PROC_LOCK_MAIN) { erts_lcnt_lock_unaquire(&(lock->lcnt_main)); } + if (locks & ERTS_PROC_LOCK_MSGQ) { erts_lcnt_lock_unaquire(&(lock->lcnt_msgq)); } + if (locks & ERTS_PROC_LOCK_BTM) { erts_lcnt_lock_unaquire(&(lock->lcnt_btm)); } + if (locks & ERTS_PROC_LOCK_LINK) { erts_lcnt_lock_unaquire(&(lock->lcnt_link)); } + if (locks & ERTS_PROC_LOCK_STATUS) { erts_lcnt_lock_unaquire(&(lock->lcnt_status)); } } void erts_lcnt_proc_unlock(erts_proc_lock_t *lock, ErtsProcLocks locks) { - if (erts_lcnt_rt_options & ERTS_LCNT_OPT_PROCLOCK) { - if (locks & ERTS_PROC_LOCK_MAIN) { - erts_lcnt_unlock(&(lock->lcnt_main)); - } - if (locks & ERTS_PROC_LOCK_MSGQ) { - erts_lcnt_unlock(&(lock->lcnt_msgq)); - } - if (locks & ERTS_PROC_LOCK_BTM) { - erts_lcnt_unlock(&(lock->lcnt_btm)); - } - if (locks & ERTS_PROC_LOCK_LINK) { - erts_lcnt_unlock(&(lock->lcnt_link)); - } - if (locks & ERTS_PROC_LOCK_STATUS) { - erts_lcnt_unlock(&(lock->lcnt_status)); - } - } + if (!(erts_lcnt_rt_options & ERTS_LCNT_OPT_PROCLOCK)) return; + if (locks & ERTS_PROC_LOCK_MAIN) { erts_lcnt_unlock(&(lock->lcnt_main)); } + if (locks & ERTS_PROC_LOCK_MSGQ) { erts_lcnt_unlock(&(lock->lcnt_msgq)); } + if (locks & ERTS_PROC_LOCK_BTM) { erts_lcnt_unlock(&(lock->lcnt_btm)); } + if (locks & ERTS_PROC_LOCK_LINK) { erts_lcnt_unlock(&(lock->lcnt_link)); } + if (locks & ERTS_PROC_LOCK_STATUS) { erts_lcnt_unlock(&(lock->lcnt_status)); } } void erts_lcnt_proc_trylock(erts_proc_lock_t *lock, ErtsProcLocks locks, int res) { - if (erts_lcnt_rt_options & ERTS_LCNT_OPT_PROCLOCK) { - if (locks & ERTS_PROC_LOCK_MAIN) { - erts_lcnt_trylock(&(lock->lcnt_main), res); - } - if (locks & ERTS_PROC_LOCK_MSGQ) { - erts_lcnt_trylock(&(lock->lcnt_msgq), res); - } - if (locks & ERTS_PROC_LOCK_BTM) { - erts_lcnt_trylock(&(lock->lcnt_btm), res); - } - if (locks & ERTS_PROC_LOCK_LINK) { - erts_lcnt_trylock(&(lock->lcnt_link), res); - } - if (locks & ERTS_PROC_LOCK_STATUS) { - erts_lcnt_trylock(&(lock->lcnt_status), res); - } - } -} - - -void erts_lcnt_enable_proc_lock_count(int enable) -{ - int i, max = erts_ptab_max(&erts_proc); - - for (i = 0; i < max; ++i) { - Process* p = erts_pix2proc(i); - if (p) { - if (enable) { - if (!ERTS_LCNT_LOCK_TYPE(&(p->lock.lcnt_main))) { - erts_lcnt_proc_lock_init(p); - } - } else { - if (ERTS_LCNT_LOCK_TYPE(&(p->lock.lcnt_main))) { - erts_lcnt_proc_lock_destroy(p); - } - } - } - } -} - -#endif /* ifdef ERTS_ENABLE_LOCK_COUNT */ + if (!(erts_lcnt_rt_options & ERTS_LCNT_OPT_PROCLOCK)) return; + if (locks & ERTS_PROC_LOCK_MAIN) { erts_lcnt_trylock(&(lock->lcnt_main), res); } + if (locks & ERTS_PROC_LOCK_MSGQ) { erts_lcnt_trylock(&(lock->lcnt_msgq), res); } + if (locks & ERTS_PROC_LOCK_BTM) { erts_lcnt_trylock(&(lock->lcnt_btm), res); } + if (locks & ERTS_PROC_LOCK_LINK) { erts_lcnt_trylock(&(lock->lcnt_link), res); } + if (locks & ERTS_PROC_LOCK_STATUS) { erts_lcnt_trylock(&(lock->lcnt_status), res); } +} /* reversed logic */ +#endif /* ERTS_ENABLE_LOCK_COUNT */ /* --- Process lock checking ----------------------------------------------- */ diff --git a/erts/emulator/beam/erl_term.c b/erts/emulator/beam/erl_term.c index bc04d7b78e..565528193e 100644 --- a/erts/emulator/beam/erl_term.c +++ b/erts/emulator/beam/erl_term.c @@ -128,10 +128,10 @@ FUNTY checked_##FUN(ARGTY x, const char *file, unsigned line) \ return _unchecked_##FUN(x); \ } -ET_DEFINE_CHECKED(Eterm,make_boxed,Eterm*,_is_taggable_pointer); +ET_DEFINE_CHECKED(Eterm,make_boxed,const Eterm*,_is_taggable_pointer); ET_DEFINE_CHECKED(int,is_boxed,Eterm,!is_header); ET_DEFINE_CHECKED(Eterm*,boxed_val,Wterm,_boxed_precond); -ET_DEFINE_CHECKED(Eterm,make_list,Eterm*,_is_taggable_pointer); +ET_DEFINE_CHECKED(Eterm,make_list,const Eterm*,_is_taggable_pointer); ET_DEFINE_CHECKED(int,is_not_list,Eterm,!is_header); ET_DEFINE_CHECKED(Eterm*,list_val,Wterm,_list_precond); ET_DEFINE_CHECKED(Uint,unsigned_val,Eterm,is_small); diff --git a/erts/emulator/beam/erl_term.h b/erts/emulator/beam/erl_term.h index 602aab46dc..7b15b34da1 100644 --- a/erts/emulator/beam/erl_term.h +++ b/erts/emulator/beam/erl_term.h @@ -198,7 +198,7 @@ struct erl_node_; /* Declared in erl_node_tables.h */ #endif #define _is_aligned(x) (((Uint)(x) & 0x3) == 0) #define _unchecked_make_boxed(x) ((Uint) COMPRESS_POINTER(x) + TAG_PRIMARY_BOXED) -_ET_DECLARE_CHECKED(Eterm,make_boxed,Eterm*) +_ET_DECLARE_CHECKED(Eterm,make_boxed,const Eterm*) #define make_boxed(x) _ET_APPLY(make_boxed,(x)) #if 1 #define _is_not_boxed(x) ((x) & (_TAG_PRIMARY_MASK-TAG_PRIMARY_BOXED)) @@ -214,7 +214,7 @@ _ET_DECLARE_CHECKED(Eterm*,boxed_val,Wterm) /* cons cell ("list") access methods */ #define _unchecked_make_list(x) ((Uint) COMPRESS_POINTER(x) + TAG_PRIMARY_LIST) -_ET_DECLARE_CHECKED(Eterm,make_list,Eterm*) +_ET_DECLARE_CHECKED(Eterm,make_list,const Eterm*) #define make_list(x) _ET_APPLY(make_list,(x)) #if 1 #define _unchecked_is_not_list(x) ((x) & (_TAG_PRIMARY_MASK-TAG_PRIMARY_LIST)) diff --git a/erts/emulator/beam/erl_time.h b/erts/emulator/beam/erl_time.h index 4560cd23af..e64b86496a 100644 --- a/erts/emulator/beam/erl_time.h +++ b/erts/emulator/beam/erl_time.h @@ -453,4 +453,12 @@ ERTS_GLB_INLINE ErtsMonotonicTime erts_next_timeout_time(ErtsNextTimeoutRef nxt_ #endif /* ERTS_GLB_INLINE_INCL_FUNC_DEF */ +void +erts_twheel_debug_foreach(ErtsTimerWheel *tiw, + void (*tclbk)(void *), + void (*func)(void *, + ErtsMonotonicTime, + void *), + void *arg); + #endif /* timer wheel api */ diff --git a/erts/emulator/beam/external.c b/erts/emulator/beam/external.c index fe48298155..0a69172980 100644 --- a/erts/emulator/beam/external.c +++ b/erts/emulator/beam/external.c @@ -94,9 +94,9 @@ static Uint is_external_string(Eterm obj, int* p_is_string); static byte* enc_atom(ErtsAtomCacheMap *, Eterm, byte*, Uint32); static byte* enc_pid(ErtsAtomCacheMap *, Eterm, byte*, Uint32); struct B2TContext_t; -static byte* dec_term(ErtsDistExternal *, Eterm**, byte*, ErlOffHeap*, Eterm*, struct B2TContext_t*); +static byte* dec_term(ErtsDistExternal*, ErtsHeapFactory*, byte*, Eterm*, struct B2TContext_t*); static byte* dec_atom(ErtsDistExternal *, byte*, Eterm*); -static byte* dec_pid(ErtsDistExternal *, Eterm**, byte*, ErlOffHeap*, Eterm*); +static byte* dec_pid(ErtsDistExternal *, ErtsHeapFactory*, byte*, Eterm*); static Sint decoded_size(byte *ep, byte* endp, int internal_tags, struct B2TContext_t*); static BIF_RETTYPE term_to_binary_trap_1(BIF_ALIST_1); @@ -930,8 +930,7 @@ Sint erts_decode_ext_size_ets(byte *ext, Uint size) ** on return hpp is updated to point after allocated data */ Eterm -erts_decode_dist_ext(Eterm** hpp, - ErlOffHeap* off_heap, +erts_decode_dist_ext(ErtsHeapFactory* factory, ErtsDistExternal *edep) { Eterm obj; @@ -951,7 +950,7 @@ erts_decode_dist_ext(Eterm** hpp, goto error; ep++; } - ep = dec_term(edep, hpp, ep, off_heap, &obj, NULL); + ep = dec_term(edep, factory, ep, &obj, NULL); if (!ep) goto error; @@ -960,19 +959,22 @@ erts_decode_dist_ext(Eterm** hpp, return obj; error: + erts_factory_undo(factory); bad_dist_ext(edep); return THE_NON_VALUE; } -Eterm erts_decode_ext(Eterm **hpp, ErlOffHeap *off_heap, byte **ext) +Eterm erts_decode_ext(ErtsHeapFactory* factory, byte **ext) { Eterm obj; byte *ep = *ext; - if (*ep++ != VERSION_MAGIC) + if (*ep++ != VERSION_MAGIC) { + erts_factory_undo(factory); return THE_NON_VALUE; - ep = dec_term(NULL, hpp, ep, off_heap, &obj, NULL); + } + ep = dec_term(NULL, factory, ep, &obj, NULL); if (!ep) { #ifdef DEBUG bin_write(ERTS_PRINT_STDERR,NULL,*ext,500); @@ -983,10 +985,10 @@ Eterm erts_decode_ext(Eterm **hpp, ErlOffHeap *off_heap, byte **ext) return obj; } -Eterm erts_decode_ext_ets(Eterm **hpp, ErlOffHeap *off_heap, byte *ext) +Eterm erts_decode_ext_ets(ErtsHeapFactory* factory, byte *ext) { Eterm obj; - ext = dec_term(NULL, hpp, ext, off_heap, &obj, NULL); + ext = dec_term(NULL, factory, ext, &obj, NULL); ASSERT(ext); return obj; } @@ -995,9 +997,8 @@ Eterm erts_decode_ext_ets(Eterm **hpp, ErlOffHeap *off_heap, byte *ext) BIF_RETTYPE erts_debug_dist_ext_to_term_2(BIF_ALIST_2) { + ErtsHeapFactory factory; Eterm res; - Eterm *hp; - Eterm *hendp; Sint hsz; ErtsDistExternal ede; Eterm *tp; @@ -1044,12 +1045,9 @@ BIF_RETTYPE erts_debug_dist_ext_to_term_2(BIF_ALIST_2) if (hsz < 0) goto badarg; - hp = HAlloc(BIF_P, (Uint) hsz); - hendp = hp + hsz; - - res = erts_decode_dist_ext(&hp, &MSO(BIF_P), &ede); - - HRelease(BIF_P, hendp, hp); + erts_factory_proc_prealloc_init(&factory, BIF_P, hsz); + res = erts_decode_dist_ext(&factory, &ede); + erts_factory_close(&factory); if (is_value(res)) BIF_RET(res); @@ -1177,13 +1175,11 @@ typedef struct { byte* ep; Eterm res; Eterm* next; - Eterm* hp_start; - Eterm* hp; - Eterm* hp_end; + ErtsHeapFactory factory; int remaining_n; char* remaining_bytes; Eterm* maps_list; - struct dec_term_hamt_placeholder* hamt_list; + ErtsPStack hamt_array; } B2TDecodeContext; typedef struct { @@ -1307,10 +1303,12 @@ binary2term_abort(ErtsBinary2TermState *state) } static ERTS_INLINE Eterm -binary2term_create(ErtsDistExternal *edep, ErtsBinary2TermState *state, Eterm **hpp, ErlOffHeap *ohp) +binary2term_create(ErtsDistExternal *edep, ErtsBinary2TermState *state, + ErtsHeapFactory* factory) { Eterm res; - if (!dec_term(edep, hpp, state->extp, ohp, &res, NULL)) + + if (!dec_term(edep, factory, state->extp, &res, NULL)) res = THE_NON_VALUE; if (state->exttmp) { state->exttmp = 0; @@ -1343,9 +1341,9 @@ erts_binary2term_abort(ErtsBinary2TermState *state) } Eterm -erts_binary2term_create(ErtsBinary2TermState *state, Eterm **hpp, ErlOffHeap *ohp) +erts_binary2term_create(ErtsBinary2TermState *state, ErtsHeapFactory* factory) { - return binary2term_create(NULL,state, hpp, ohp); + return binary2term_create(NULL,state, factory); } static void b2t_destroy_context(B2TContext* context) @@ -1354,8 +1352,21 @@ static void b2t_destroy_context(B2TContext* context) ERTS_ALC_T_EXT_TERM_DATA); context->aligned_alloc = NULL; binary2term_abort(&context->b2ts); - if (context->state == B2TUncompressChunk) { + switch (context->state) { + case B2TUncompressChunk: erl_zlib_inflate_finish(&context->u.uc.stream); + break; + case B2TDecode: + case B2TDecodeList: + case B2TDecodeTuple: + case B2TDecodeString: + case B2TDecodeBinary: + if (context->u.dc.hamt_array.pstart) { + erts_free(context->u.dc.hamt_array.alloc_type, + context->u.dc.hamt_array.pstart); + } + break; + default:; } } @@ -1506,11 +1517,9 @@ static BIF_RETTYPE binary_to_term_int(Process* p, Uint32 flags, Eterm bin, Binar ctx->u.dc.ep = ctx->b2ts.extp; ctx->u.dc.res = (Eterm) (UWord) NULL; ctx->u.dc.next = &ctx->u.dc.res; - ctx->u.dc.hp_start = HAlloc(p, ctx->heap_size); - ctx->u.dc.hp = ctx->u.dc.hp_start; - ctx->u.dc.hp_end = ctx->u.dc.hp_start + ctx->heap_size; + erts_factory_proc_prealloc_init(&ctx->u.dc.factory, p, ctx->heap_size); ctx->u.dc.maps_list = NULL; - ctx->u.dc.hamt_list = NULL; + ctx->u.dc.hamt_array.pstart = NULL; ctx->state = B2TDecode; /*fall through*/ case B2TDecode: @@ -1520,11 +1529,10 @@ static BIF_RETTYPE binary_to_term_int(Process* p, Uint32 flags, Eterm bin, Binar case B2TDecodeBinary: { ErtsDistExternal fakedep; fakedep.flags = ctx->flags; - dec_term(&fakedep, NULL, NULL, &MSO(p), NULL, ctx); + dec_term(&fakedep, NULL, NULL, NULL, ctx); break; } case B2TDecodeFail: - HRelease(p, ctx->u.dc.hp_end, ctx->u.dc.hp_start); /*fall through*/ case B2TBadArg: BUMP_REDS(p, (initial_reds - ctx->reds) / B2T_BYTES_PER_REDUCTION); @@ -1549,11 +1557,11 @@ static BIF_RETTYPE binary_to_term_int(Process* p, Uint32 flags, Eterm bin, Binar case B2TDone: b2t_destroy_context(ctx); - if (ctx->u.dc.hp > ctx->u.dc.hp_end) { + if (ctx->u.dc.factory.hp > ctx->u.dc.factory.hp_end) { erl_exit(1, ":%s, line %d: heap overrun by %d words(s)\n", - __FILE__, __LINE__, ctx->u.dc.hp - ctx->u.dc.hp_end); + __FILE__, __LINE__, ctx->u.dc.factory.hp - ctx->u.dc.factory.hp_end); } - HRelease(p, ctx->u.dc.hp_end, ctx->u.dc.hp); + erts_factory_close(&ctx->u.dc.factory); if (!is_first_call) { erts_set_gc_state(p, 1); @@ -2247,7 +2255,7 @@ static ERTS_INLINE ErlNode* dec_get_node(Eterm sysname, Uint creation) } static byte* -dec_pid(ErtsDistExternal *edep, Eterm** hpp, byte* ep, ErlOffHeap* off_heap, Eterm* objp) +dec_pid(ErtsDistExternal *edep, ErtsHeapFactory* factory, byte* ep, Eterm* objp) { Eterm sysname; Uint data; @@ -2286,15 +2294,15 @@ dec_pid(ErtsDistExternal *edep, Eterm** hpp, byte* ep, ErlOffHeap* off_heap, Ete if(node == erts_this_node) { *objp = make_internal_pid(data); } else { - ExternalThing *etp = (ExternalThing *) *hpp; - *hpp += EXTERNAL_THING_HEAD_SIZE + 1; + ExternalThing *etp = (ExternalThing *) factory->hp; + factory->hp += EXTERNAL_THING_HEAD_SIZE + 1; etp->header = make_external_pid_header(1); - etp->next = off_heap->first; + etp->next = factory->off_heap->first; etp->node = node; etp->data.ui[0] = data; - off_heap->first = (struct erl_off_heap_header*) etp; + factory->off_heap->first = (struct erl_off_heap_header*) etp; *objp = make_external_pid(etp); } return ep; @@ -2905,69 +2913,43 @@ is_external_string(Eterm list, int* p_is_string) return len; } -/* Assumes that the ones to undo are preluding the list. */ -static void -undo_offheap_in_area(ErlOffHeap* off_heap, Eterm* start, Eterm* end) -{ - const Uint area_sz = (end - start) * sizeof(Eterm); - struct erl_off_heap_header* hdr; - struct erl_off_heap_header** hdr_nextp = NULL; - - for (hdr = off_heap->first; ; hdr=hdr->next) { - if (!in_area(hdr, start, area_sz)) { - if (hdr_nextp != NULL) { - *hdr_nextp = NULL; - erts_cleanup_offheap(off_heap); - off_heap->first = hdr; - } - break; - } - hdr_nextp = &hdr->next; - } - - /* Assert that the ones to undo were indeed preluding the list. */ -#ifdef DEBUG - for (hdr = off_heap->first; hdr != NULL; hdr = hdr->next) { - ASSERT(!in_area(hdr, start, area_sz)); - } -#endif /* DEBUG */ -} -struct dec_term_hamt_placeholder +struct dec_term_hamt { - struct dec_term_hamt_placeholder* next; Eterm* objp; /* write result here */ Uint size; /* nr of leafs */ - Eterm leafs[1]; + Eterm* leaf_array; }; -#define DEC_TERM_HAMT_PLACEHOLDER_SIZE \ - (offsetof(struct dec_term_hamt_placeholder, leafs) / sizeof(Eterm)) /* Decode term from external format into *objp. -** On failure return NULL and *hpp will be unchanged. +** On failure calls erts_factory_undo() and returns NULL */ static byte* -dec_term(ErtsDistExternal *edep, Eterm** hpp, byte* ep, ErlOffHeap* off_heap, - Eterm* objp, B2TContext* ctx) +dec_term(ErtsDistExternal *edep, + ErtsHeapFactory* factory, + byte* ep, + Eterm* objp, + B2TContext* ctx) { - Eterm* hp_saved; +#define PSTACK_TYPE struct dec_term_hamt + PSTACK_DECLARE(hamt_array, 5); int n; ErtsAtomEncoding char_enc; register Eterm* hp; /* Please don't take the address of hp */ Eterm *maps_list; /* for preprocessing of small maps */ - struct dec_term_hamt_placeholder* hamt_list; /* for preprocessing of big maps */ Eterm* next; SWord reds; +#ifdef DEBUG + Eterm* dbg_resultp = ctx ? &ctx->u.dc.res : objp; +#endif if (ctx) { - hp_saved = ctx->u.dc.hp_start; reds = ctx->reds; next = ctx->u.dc.next; ep = ctx->u.dc.ep; - hpp = &ctx->u.dc.hp; + factory = &ctx->u.dc.factory; maps_list = ctx->u.dc.maps_list; - hamt_list = ctx->u.dc.hamt_list; if (ctx->state != B2TDecode) { int n_limit = reds; @@ -3012,7 +2994,7 @@ dec_term(ErtsDistExternal *edep, Eterm** hpp, byte* ep, ErlOffHeap* off_heap, break; case B2TDecodeString: - hp = *hpp; + hp = factory->hp; hp[-1] = make_list(hp); /* overwrite the premature NIL */ while (n-- > 0) { hp[0] = make_small(*ep++); @@ -3020,7 +3002,7 @@ dec_term(ErtsDistExternal *edep, Eterm** hpp, byte* ep, ErlOffHeap* off_heap, hp += 2; } hp[-1] = NIL; - *hpp = hp; + factory->hp = hp; break; case B2TDecodeBinary: @@ -3042,16 +3024,18 @@ dec_term(ErtsDistExternal *edep, Eterm** hpp, byte* ep, ErlOffHeap* off_heap, return NULL; } } + PSTACK_CHANGE_ALLOCATOR(hamt_array, ERTS_ALC_T_SAVED_ESTACK); + if (ctx->u.dc.hamt_array.pstart) { + PSTACK_RESTORE(hamt_array, &ctx->u.dc.hamt_array); + } } else { - hp_saved = *hpp; reds = ERTS_SWORD_MAX; next = objp; *next = (Eterm) (UWord) NULL; maps_list = NULL; - hamt_list = NULL; } - hp = *hpp; + hp = factory->hp; while (next != NULL) { @@ -3288,9 +3272,9 @@ dec_term_atom_common: break; } case PID_EXT: - *hpp = hp; - ep = dec_pid(edep, hpp, ep, off_heap, objp); - hp = *hpp; + factory->hp = hp; + ep = dec_pid(edep, factory, ep, objp); + hp = factory->hp; if (ep == NULL) { goto error; } @@ -3324,11 +3308,11 @@ dec_term_atom_common: hp += EXTERNAL_THING_HEAD_SIZE + 1; etp->header = make_external_port_header(1); - etp->next = off_heap->first; + etp->next = factory->off_heap->first; etp->node = node; etp->data.ui[0] = num; - off_heap->first = (struct erl_off_heap_header*)etp; + factory->off_heap->first = (struct erl_off_heap_header*)etp; *objp = make_external_port(etp); } @@ -3408,10 +3392,10 @@ dec_term_atom_common: #else etp->header = make_external_ref_header(ref_words); #endif - etp->next = off_heap->first; + etp->next = factory->off_heap->first; etp->node = node; - off_heap->first = (struct erl_off_heap_header*)etp; + factory->off_heap->first = (struct erl_off_heap_header*)etp; *objp = make_external_ref(etp); ref_num = &(etp->data.ui32[0]); } @@ -3451,9 +3435,9 @@ dec_term_atom_common: hp += PROC_BIN_SIZE; pb->thing_word = HEADER_PROC_BIN; pb->size = n; - pb->next = off_heap->first; - off_heap->first = (struct erl_off_heap_header*)pb; - OH_OVERHEAD(off_heap, pb->size / sizeof(Eterm)); + pb->next = factory->off_heap->first; + factory->off_heap->first = (struct erl_off_heap_header*)pb; + OH_OVERHEAD(factory->off_heap, pb->size / sizeof(Eterm)); pb->val = dbin; pb->bytes = (byte*) dbin->orig_bytes; pb->flags = 0; @@ -3503,9 +3487,9 @@ dec_term_atom_common: pb = (ProcBin *) hp; pb->thing_word = HEADER_PROC_BIN; pb->size = n; - pb->next = off_heap->first; - off_heap->first = (struct erl_off_heap_header*)pb; - OH_OVERHEAD(off_heap, pb->size / sizeof(Eterm)); + pb->next = factory->off_heap->first; + factory->off_heap->first = (struct erl_off_heap_header*)pb; + OH_OVERHEAD(factory->off_heap, pb->size / sizeof(Eterm)); pb->val = dbin; pb->bytes = (byte*) dbin->orig_bytes; pb->flags = 0; @@ -3557,9 +3541,9 @@ dec_term_atom_common: if ((ep = dec_atom(edep, ep, &name)) == NULL) { goto error; } - *hpp = hp; - ep = dec_term(edep, hpp, ep, off_heap, &temp, NULL); - hp = *hpp; + factory->hp = hp; + ep = dec_term(edep, factory, ep, &temp, NULL); + hp = factory->hp; if (ep == NULL) { goto error; } @@ -3631,15 +3615,11 @@ dec_term_atom_common: } } else { /* Make hamt */ - struct dec_term_hamt_placeholder* holder = - (struct dec_term_hamt_placeholder*) hp; - - holder->next = hamt_list; - hamt_list = holder; - holder->objp = objp; - holder->size = size; + struct dec_term_hamt* hamt = PSTACK_PUSH(hamt_array); - hp += DEC_TERM_HAMT_PLACEHOLDER_SIZE; + hamt->objp = objp; + hamt->size = size; + hamt->leaf_array = hp; for (n = size; n; n--) { CDR(hp) = (Eterm) COMPRESS_POINTER(next); @@ -3681,9 +3661,9 @@ dec_term_atom_common: if ((ep = dec_atom(edep, ep, &module)) == NULL) { goto error; } - *hpp = hp; + factory->hp = hp; /* Index */ - if ((ep = dec_term(edep, hpp, ep, off_heap, &temp, NULL)) == NULL) { + if ((ep = dec_term(edep, factory, ep, &temp, NULL)) == NULL) { goto error; } if (!is_small(temp)) { @@ -3692,7 +3672,7 @@ dec_term_atom_common: old_index = unsigned_val(temp); /* Uniq */ - if ((ep = dec_term(edep, hpp, ep, off_heap, &temp, NULL)) == NULL) { + if ((ep = dec_term(edep, factory, ep, &temp, NULL)) == NULL) { goto error; } if (!is_small(temp)) { @@ -3704,8 +3684,8 @@ dec_term_atom_common: * It is safe to link the fun into the fun list only when * no more validity tests can fail. */ - funp->next = off_heap->first; - off_heap->first = (struct erl_off_heap_header*)funp; + funp->next = factory->off_heap->first; + factory->off_heap->first = (struct erl_off_heap_header*)funp; funp->fe = erts_put_fun_entry2(module, old_uniq, old_index, uniq, index, arity); @@ -3716,7 +3696,7 @@ dec_term_atom_common: } funp->native_address = funp->fe->native_address; #endif - hp = *hpp; + hp = factory->hp; /* Environment */ for (i = num_free-1; i >= 0; i--) { @@ -3742,14 +3722,14 @@ dec_term_atom_common: ep += 4; hp += ERL_FUN_SIZE; hp += num_free; - *hpp = hp; + factory->hp = hp; funp->thing_word = HEADER_FUN; funp->num_free = num_free; *objp = make_fun(funp); /* Creator pid */ if (*ep != PID_EXT - || (ep = dec_pid(edep, hpp, ++ep, off_heap, + || (ep = dec_pid(edep, factory, ++ep, &funp->creator))==NULL) { goto error; } @@ -3760,7 +3740,7 @@ dec_term_atom_common: } /* Index */ - if ((ep = dec_term(edep, hpp, ep, off_heap, &temp, NULL)) == NULL) { + if ((ep = dec_term(edep, factory, ep, &temp, NULL)) == NULL) { goto error; } if (!is_small(temp)) { @@ -3769,7 +3749,7 @@ dec_term_atom_common: old_index = unsigned_val(temp); /* Uniq */ - if ((ep = dec_term(edep, hpp, ep, off_heap, &temp, NULL)) == NULL) { + if ((ep = dec_term(edep, factory, ep, &temp, NULL)) == NULL) { goto error; } if (!is_small(temp)) { @@ -3780,8 +3760,8 @@ dec_term_atom_common: * It is safe to link the fun into the fun list only when * no more validity tests can fail. */ - funp->next = off_heap->first; - off_heap->first = (struct erl_off_heap_header*)funp; + funp->next = factory->off_heap->first; + factory->off_heap->first = (struct erl_off_heap_header*)funp; old_uniq = unsigned_val(temp); funp->fe = erts_put_fun_entry(module, old_uniq, old_index); @@ -3789,7 +3769,7 @@ dec_term_atom_common: #ifdef HIPE funp->native_address = funp->fe->native_address; #endif - hp = *hpp; + hp = factory->hp; /* Environment */ for (i = num_free-1; i >= 0; i--) { @@ -3823,9 +3803,9 @@ dec_term_atom_common: erts_refc_inc(&pb->val->refc, 1); hp += PROC_BIN_SIZE; - pb->next = off_heap->first; - off_heap->first = (struct erl_off_heap_header*)pb; - OH_OVERHEAD(off_heap, pb->size / sizeof(Eterm)); + pb->next = factory->off_heap->first; + factory->off_heap->first = (struct erl_off_heap_header*)pb; + OH_OVERHEAD(factory->off_heap, pb->size / sizeof(Eterm)); pb->flags = 0; *objp = make_binary(pb); break; @@ -3841,9 +3821,9 @@ dec_term_atom_common: erts_refc_inc(&pb->val->refc, 1); hp += PROC_BIN_SIZE; - pb->next = off_heap->first; - off_heap->first = (struct erl_off_heap_header*)pb; - OH_OVERHEAD(off_heap, pb->size / sizeof(Eterm)); + pb->next = factory->off_heap->first; + factory->off_heap->first = (struct erl_off_heap_header*)pb; + OH_OVERHEAD(factory->off_heap, pb->size / sizeof(Eterm)); pb->flags = 0; sub = (ErlSubBin*)hp; @@ -3869,9 +3849,11 @@ dec_term_atom_common: if (next || ctx->state != B2TDecode) { ctx->u.dc.ep = ep; ctx->u.dc.next = next; - ctx->u.dc.hp = hp; + ctx->u.dc.factory.hp = hp; ctx->u.dc.maps_list = maps_list; - ctx->u.dc.hamt_list = hamt_list; + if (!PSTACK_IS_EMPTY(hamt_array)) { + PSTACK_SAVE(hamt_array, &ctx->u.dc.hamt_array); + } ctx->reds = 0; return NULL; } @@ -3894,40 +3876,36 @@ dec_term_atom_common: maps_list = next; } - /* Iterate through all the hamts and build tree nodes. + ASSERT(hp <= factory->hp_end + || (factory->mode == FACTORY_CLOSED && is_immed(*dbg_resultp))); + factory->hp = hp; + /* + * From here on factory may produce (more) heap fragments */ - if (hamt_list) { - ErtsHeapFactory factory; - factory.p = NULL; - factory.hp = hp; - /* We assume heap will suffice (see hashmap_over_estimated_heap_size) */ + if (!PSTACK_IS_EMPTY(hamt_array)) { + do { + struct dec_term_hamt* hamt = PSTACK_TOP(hamt_array); - do { - struct dec_term_hamt_placeholder* hamt = hamt_list; - *hamt->objp = erts_hashmap_from_array(&factory, - hamt->leafs, + *hamt->objp = erts_hashmap_from_array(factory, + hamt->leaf_array, hamt->size, 1); if (is_non_value(*hamt->objp)) - goto error; + goto error_hamt; - hamt_list = hamt->next; - - /* Yes, we waste a couple of heap words per hamt - for the temporary placeholder */ - *(Eterm*)hamt = make_pos_bignum_header(DEC_TERM_HAMT_PLACEHOLDER_SIZE-1); - } while (hamt_list); - - hp = factory.hp; + (void) PSTACK_POP(hamt_array); + } while (!PSTACK_IS_EMPTY(hamt_array)); + PSTACK_DESTROY(hamt_array); } + ASSERT((Eterm*)EXPAND_POINTER(*dbg_resultp) != NULL); + if (ctx) { ctx->state = B2TDone; ctx->reds = reds; } - *hpp = hp; return ep; error: @@ -3935,11 +3913,12 @@ error: * Must unlink all off-heap objects that may have been * linked into the process. */ - if (hp < *hpp) { /* Sometimes we used hp and sometimes *hpp */ - hp = *hpp; /* the largest must be the freshest */ + if (factory->hp < hp) { /* Sometimes we used hp and sometimes factory->hp */ + factory->hp = hp; /* the largest must be the freshest */ } - undo_offheap_in_area(off_heap, hp_saved, hp); - *hpp = hp_saved; +error_hamt: + erts_factory_undo(factory); + PSTACK_DESTROY(hamt_array); if (ctx) { ctx->state = B2TDecodeFail; ctx->reds = reds; @@ -4465,7 +4444,7 @@ init_done: if (n <= MAP_SMALL_MAP_LIMIT) { heap_size += 3 + n + 1 + n; } else { - heap_size += hashmap_over_estimated_heap_size(n); + heap_size += HASHMAP_ESTIMATED_HEAP_SIZE(n); } break; case STRING_EXT: diff --git a/erts/emulator/beam/external.h b/erts/emulator/beam/external.h index 50fcfa04d6..508ab8dc17 100644 --- a/erts/emulator/beam/external.h +++ b/erts/emulator/beam/external.h @@ -148,6 +148,7 @@ typedef struct { byte *extp; int exttmp; Uint extsize; + Uint heap_size; } ErtsBinary2TermState; @@ -185,18 +186,18 @@ void erts_destroy_dist_ext_copy(ErtsDistExternal *); int erts_prepare_dist_ext(ErtsDistExternal *, byte *, Uint, DistEntry *, ErtsAtomCache *); Sint erts_decode_dist_ext_size(ErtsDistExternal *); -Eterm erts_decode_dist_ext(Eterm **, ErlOffHeap *, ErtsDistExternal *); +Eterm erts_decode_dist_ext(ErtsHeapFactory* factory, ErtsDistExternal *); Sint erts_decode_ext_size(byte*, Uint); Sint erts_decode_ext_size_ets(byte*, Uint); -Eterm erts_decode_ext(Eterm **, ErlOffHeap *, byte**); -Eterm erts_decode_ext_ets(Eterm **, ErlOffHeap *, byte*); +Eterm erts_decode_ext(ErtsHeapFactory*, byte**); +Eterm erts_decode_ext_ets(ErtsHeapFactory*, byte*); Eterm erts_term_to_binary(Process* p, Eterm Term, int level, Uint flags); Sint erts_binary2term_prepare(ErtsBinary2TermState *, byte *, Sint); void erts_binary2term_abort(ErtsBinary2TermState *); -Eterm erts_binary2term_create(ErtsBinary2TermState *, Eterm **hpp, ErlOffHeap *); +Eterm erts_binary2term_create(ErtsBinary2TermState *, ErtsHeapFactory*); int erts_debug_max_atom_out_cache_index(void); int erts_debug_atom_to_out_cache_index(Eterm); diff --git a/erts/emulator/beam/global.h b/erts/emulator/beam/global.h index 340c7033ab..14d42599a1 100644 --- a/erts/emulator/beam/global.h +++ b/erts/emulator/beam/global.h @@ -230,9 +230,23 @@ typedef struct { ERTS_INTERNAL_BINARY_FIELDS SWord orig_size; void (*destructor)(Binary *); - char magic_bin_data[1]; + union { + struct { + ERTS_BINARY_STRUCT_ALIGNMENT + char data[1]; + } aligned; + struct { + char data[1]; + } unaligned; + } u; } ErtsMagicBinary; +#ifdef ARCH_32 +#define ERTS_MAGIC_BIN_BYTES_TO_ALIGN 4 +#else +#define ERTS_MAGIC_BIN_BYTES_TO_ALIGN 0 +#endif + typedef union { Binary binary; ErtsMagicBinary magic_binary; @@ -252,15 +266,30 @@ typedef union { #define ERTS_MAGIC_BIN_DESTRUCTOR(BP) \ ((ErtsBinary *) (BP))->magic_binary.destructor #define ERTS_MAGIC_BIN_DATA(BP) \ - ((void *) ((ErtsBinary *) (BP))->magic_binary.magic_bin_data) -#define ERTS_MAGIC_BIN_DATA_SIZE(BP) \ - ((BP)->orig_size - sizeof(void (*)(Binary *))) + ((void *) ((ErtsBinary *) (BP))->magic_binary.u.aligned.data) +#define ERTS_MAGIC_DATA_OFFSET \ + (offsetof(ErtsMagicBinary,u.aligned.data) - offsetof(Binary,orig_bytes)) #define ERTS_MAGIC_BIN_ORIG_SIZE(Sz) \ - (sizeof(void (*)(Binary *)) + (Sz)) + (ERTS_MAGIC_DATA_OFFSET + (Sz)) #define ERTS_MAGIC_BIN_SIZE(Sz) \ - (offsetof(ErtsMagicBinary,magic_bin_data) + (Sz)) -#define ERTS_MAGIC_BIN_FROM_DATA(DATA) \ - ((ErtsBinary*)((char*)(DATA) - offsetof(ErtsMagicBinary,magic_bin_data))) + (offsetof(ErtsMagicBinary,u.aligned.data) + (Sz)) + +/* On 32-bit arch these macro variants will save memory + by not forcing 8-byte alignment for the magic payload. +*/ +#define ERTS_MAGIC_BIN_UNALIGNED_DATA(BP) \ + ((void *) ((ErtsBinary *) (BP))->magic_binary.u.unaligned.data) +#define ERTS_MAGIC_UNALIGNED_DATA_OFFSET \ + (offsetof(ErtsMagicBinary,u.unaligned.data) - offsetof(Binary,orig_bytes)) +#define ERTS_MAGIC_BIN_UNALIGNED_DATA_SIZE(BP) \ + ((BP)->orig_size - ERTS_MAGIC_UNALIGNED_DATA_OFFSET) +#define ERTS_MAGIC_BIN_UNALIGNED_ORIG_SIZE(Sz) \ + (ERTS_MAGIC_UNALIGNED_DATA_OFFSET + (Sz)) +#define ERTS_MAGIC_BIN_UNALIGNED_SIZE(Sz) \ + (offsetof(ErtsMagicBinary,u.unaligned.data) + (Sz)) +#define ERTS_MAGIC_BIN_FROM_UNALIGNED_DATA(DATA) \ + ((ErtsBinary*)((char*)(DATA) - offsetof(ErtsMagicBinary,u.unaligned.data))) + #define Binary2ErlDrvBinary(B) (&((ErtsBinary *) (B))->driver.binary) #define ErlDrvBinary2Binary(D) ((Binary *) \ @@ -748,6 +777,15 @@ ErtsPStack s = { (byte*)PSTK_DEF_STACK(s), /* pstart */ \ ERTS_ALC_T_ESTACK /* alloc_type */ \ } +#define PSTACK_CHANGE_ALLOCATOR(s,t) \ +do { \ + if (s.pstart != (byte*)PSTK_DEF_STACK(s)) { \ + erl_exit(1, "Internal error - trying to change allocator " \ + "type of active pstack\n"); \ + } \ + s.alloc_type = (t); \ + } while (0) + #define PSTACK_DESTROY(s) \ do { \ if (s.pstart != (byte*)PSTK_DEF_STACK(s)) { \ @@ -757,6 +795,8 @@ do { \ #define PSTACK_IS_EMPTY(s) (s.psp < s.pstart) +#define PSTACK_COUNT(s) (((PSTACK_TYPE*)s.psp + 1) - (PSTACK_TYPE*)s.pstart) + #define PSTACK_TOP(s) (ASSERT(!PSTACK_IS_EMPTY(s)), (PSTACK_TYPE*)(s.psp)) #define PSTACK_PUSH(s) \ @@ -767,6 +807,45 @@ do { \ #define PSTACK_POP(s) ((PSTACK_TYPE*) (s.psp -= sizeof(PSTACK_TYPE))) +/* + * Do not free the stack after this, it may have pointers into what + * was saved in 'dst'. + */ +#define PSTACK_SAVE(s,dst)\ +do {\ + if (s.pstart == (byte*)PSTK_DEF_STACK(s)) {\ + UWord _pbytes = PSTACK_COUNT(s) * sizeof(PSTACK_TYPE);\ + (dst)->pstart = erts_alloc(s.alloc_type,\ + sizeof(PSTK_DEF_STACK(s)));\ + sys_memcpy((dst)->pstart, s.pstart, _pbytes);\ + (dst)->psp = (dst)->pstart + _pbytes - sizeof(PSTACK_TYPE);\ + (dst)->pend = (dst)->pstart + sizeof(PSTK_DEF_STACK(s));\ + (dst)->alloc_type = s.alloc_type;\ + } else\ + *(dst) = s;\ + } while (0) + +/* + * Use on empty stack, only the allocator can be changed before this. + * The src stack is reset to NULL. + */ +#define PSTACK_RESTORE(s, src) \ +do { \ + ASSERT(s.pstart == (byte*)PSTK_DEF_STACK(s)); \ + s = *(src); /* struct copy */ \ + (src)->pstart = NULL; \ + ASSERT(s.psp >= (s.pstart - sizeof(PSTACK_TYPE))); \ + ASSERT(s.psp < s.pend); \ +} while (0) + +#define PSTACK_DESTROY_SAVED(pstack)\ +do {\ + if ((pstack)->pstart) {\ + erts_free((pstack)->alloc_type, (pstack)->pstart);\ + (pstack)->pstart = NULL;\ + }\ +} while(0) + /* binary.c */ @@ -1055,6 +1134,9 @@ Sint erts_binary_set_loop_limit(Sint limit); /* external.c */ void erts_init_external(void); +/* erl_map.c */ +void erts_init_map(void); + /* erl_unicode.c */ void erts_init_unicode(void); Sint erts_unicode_set_loop_limit(Sint limit); diff --git a/erts/emulator/beam/io.c b/erts/emulator/beam/io.c index ccc7da265e..75d80b1a58 100644 --- a/erts/emulator/beam/io.c +++ b/erts/emulator/beam/io.c @@ -65,8 +65,6 @@ static erts_smp_tsd_key_t driver_list_last_error_key; /* Save last DDLL error o per thread basis (for BC interfaces) */ ErtsPTab erts_port erts_align_attribute(ERTS_CACHE_LINE_SIZE); /* The port table */ -erts_smp_atomic_t erts_bytes_out; /* No bytes sent out of the system */ -erts_smp_atomic_t erts_bytes_in; /* No bytes gotten into the system */ const ErlDrvTermData driver_term_nil = (ErlDrvTermData)NIL; @@ -80,6 +78,9 @@ int erts_port_synchronous_ops = 0; int erts_port_schedule_all_ops = 0; int erts_port_parallelism = 0; +static erts_atomic64_t bytes_in; +static erts_atomic64_t bytes_out; + static void deliver_result(Eterm sender, Eterm pid, Eterm res); static int init_driver(erts_driver_t *, ErlDrvEntry *, DE_Handle *); static void terminate_port(Port *p); @@ -1395,31 +1396,22 @@ finalize_force_imm_drv_call(ErtsTryImmDrvCallState *sp) static ERTS_INLINE void queue_port_sched_op_reply(Process *rp, ErtsProcLocks *rp_locksp, - Eterm *hp_start, - Eterm *hp, - Uint h_size, - ErlHeapFragment* bp, + ErtsHeapFactory* factory, Uint32 *ref_num, Eterm msg) { - Eterm ref = make_internal_ref(hp); + Eterm* hp = erts_produce_heap(factory, ERTS_QUEUE_PORT_SCHED_OP_REPLY_SIZE, 0); + Eterm ref; + + ref= make_internal_ref(hp); write_ref_thing(hp, ref_num[0], ref_num[1], ref_num[2]); hp += REF_THING_SIZE; msg = TUPLE2(hp, ref, msg); - hp += 3; - if (!bp) { - HRelease(rp, hp_start + h_size, hp); - } - else { - Uint used_h_size = hp - hp_start; - ASSERT(h_size >= used_h_size); - if (h_size > used_h_size) - bp = erts_resize_message_buffer(bp, used_h_size, &msg, 1); - } + erts_factory_trim_and_close(factory, &msg, 1); - erts_queue_message(rp, rp_locksp, bp, msg, NIL); + erts_queue_message(rp, rp_locksp, factory->heap_frags, msg, NIL); } static void @@ -1429,9 +1421,10 @@ port_sched_op_reply(Eterm to, Uint32 *ref_num, Eterm msg) if (rp) { ErlOffHeap *ohp; ErlHeapFragment* bp; + ErtsHeapFactory factory; Eterm msg_copy; Uint hsz, msg_sz; - Eterm *hp, *hp_start; + Eterm *hp; ErtsProcLocks rp_locks = 0; hsz = ERTS_QUEUE_PORT_SCHED_OP_REPLY_SIZE; @@ -1442,22 +1435,22 @@ port_sched_op_reply(Eterm to, Uint32 *ref_num, Eterm msg) hsz += msg_sz; } - hp_start = hp = erts_alloc_message_heap(hsz, + hp = erts_alloc_message_heap(hsz, &bp, &ohp, rp, &rp_locks); + erts_factory_message_init(&factory, rp, hp, bp); if (is_immed(msg)) msg_copy = msg; - else + else { msg_copy = copy_struct(msg, msg_sz, &hp, ohp); + factory.hp = hp; + } queue_port_sched_op_reply(rp, &rp_locks, - hp_start, - hp, - hsz, - bp, + &factory, ref_num, msg_copy); @@ -1542,12 +1535,10 @@ erts_schedule_proc2port_signal(Process *c_p, } static ERTS_INLINE void -send_badsig(Port *prt) -{ +send_badsig(Port *prt) { ErtsProcLocks rp_locks = ERTS_PROC_LOCKS_XSIG_SEND; Process* rp; Eterm connected = ERTS_PORT_GET_CONNECTED(prt); - ERTS_SMP_CHK_NO_PROC_LOCKS; ERTS_LC_ASSERT(erts_get_scheduler_id()); @@ -1567,15 +1558,13 @@ send_badsig(Port *prt) 0); if (rp_locks) erts_smp_proc_unlock(rp, rp_locks); - } -} + } /* exit sent */ +} /* send_badsig */ static void -badsig_received(int bang_op, - Port *prt, +badsig_received(int bang_op, Port *prt, erts_aint32_t state, - int bad_output_value) -{ + int bad_output_value) { /* * if (bang_op) * we are part of a "Prt ! Something" operation @@ -1591,12 +1580,12 @@ badsig_received(int bang_op, } if (bang_op) send_badsig(prt); - } -} + } /* not invalid */ +} /* behaved accordingly */ static int -port_badsig(Port *prt, erts_aint32_t state, int op, ErtsProc2PortSigData *sigdp) -{ +port_badsig(Port *prt, erts_aint32_t state, int op, + ErtsProc2PortSigData *sigdp) { if (op == ERTS_PROC2PORT_SIG_EXEC) badsig_received(sigdp->flags & ERTS_P2P_SIG_DATA_FLG_BANG_OP, prt, @@ -1605,16 +1594,14 @@ port_badsig(Port *prt, erts_aint32_t state, int op, ErtsProc2PortSigData *sigdp) if (sigdp->flags & ERTS_P2P_SIG_DATA_FLG_REPLY) port_sched_op_reply(sigdp->caller, sigdp->ref, am_badarg); return ERTS_PORT_REDS_BADSIG; -} - - -/* - * bad_port_signal() will +} /* port_badsig */ +/* bad_port_signal() will * - preserve signal order of signals. * - send a 'badsig' exit signal to connected process if 'from' is an * internal pid and the port is alive when the bad signal reaches * it. */ + static ErtsPortOpResult bad_port_signal(Process *c_p, int flags, @@ -1689,6 +1676,7 @@ call_driver_outputv(int bang_op, if (bang_op && from != ERTS_PORT_GET_CONNECTED(prt)) send_badsig(prt); else { + ErtsSchedulerData *esdp = erts_get_scheduler_data(); ErlDrvSizeT size = evp->size; ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt) @@ -1706,7 +1694,10 @@ call_driver_outputv(int bang_op, prt->caller = NIL; prt->bytes_out += size; - erts_smp_atomic_add_nob(&erts_bytes_out, size); + if (esdp) + esdp->io.out += (Uint64) size; + else + erts_atomic64_add_nob(&bytes_out, (erts_aint64_t) size); } } @@ -1786,7 +1777,7 @@ call_driver_output(int bang_op, if (bang_op && from != ERTS_PORT_GET_CONNECTED(prt)) send_badsig(prt); else { - + ErtsSchedulerData *esdp = erts_get_scheduler_data(); ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt) || ERTS_IS_CRASH_DUMPING); @@ -1802,7 +1793,10 @@ call_driver_output(int bang_op, prt->caller = NIL; prt->bytes_out += size; - erts_smp_atomic_add_nob(&erts_bytes_out, size); + if (esdp) + esdp->io.out += (Uint64) size; + else + erts_atomic64_add_nob(&bytes_out, (erts_aint64_t) size); } } @@ -2749,6 +2743,9 @@ void erts_init_io(int port_tab_size, drv_list_rwmtx_opts.type = ERTS_SMP_RWMTX_TYPE_EXTREMELY_FREQUENT_READ; drv_list_rwmtx_opts.lived = ERTS_SMP_RWMTX_LONG_LIVED; + erts_atomic64_init_nob(&bytes_in, 0); + erts_atomic64_init_nob(&bytes_out, 0); + common_element_size = ERTS_ALC_DATA_ALIGN_SIZE(sizeof(Port)); common_element_size += ERTS_ALC_DATA_ALIGN_SIZE(sizeof(ErtsPortTaskBusyPortQ)); common_element_size += 10; /* name */ @@ -2790,9 +2787,6 @@ void erts_init_io(int port_tab_size, legacy_port_tab, 1); - erts_smp_atomic_init_nob(&erts_bytes_out, 0); - erts_smp_atomic_init_nob(&erts_bytes_in, 0); - sys_init_io(); erts_smp_tsd_set(driver_list_lock_status_key, (void *) 1); @@ -2812,7 +2806,6 @@ void erts_init_io(int port_tab_size, } #if defined(ERTS_ENABLE_LOCK_COUNT) && defined(ERTS_SMP) - static ERTS_INLINE void lcnt_enable_drv_lock_count(erts_driver_t *dp, int enable) { if (dp->lock) { @@ -2852,25 +2845,26 @@ static ERTS_INLINE void lcnt_enable_port_lock_count(Port *prt, int enable) } } -void erts_lcnt_enable_io_lock_count(int enable) -{ +void erts_lcnt_enable_io_lock_count(int enable) { erts_driver_t *dp; - int i, max = erts_ptab_max(&erts_port); + int ix, max = erts_ptab_max(&erts_port); + Port *prt; - for (i = 0; i < max; i++) { - Port *prt = erts_pix2port(i); - if (prt) + for (ix = 0; ix < max; ix++) { + if ((prt = erts_pix2port(ix)) != NULL) { lcnt_enable_port_lock_count(prt, enable); - } + } + } /* for all ports */ lcnt_enable_drv_lock_count(&vanilla_driver, enable); lcnt_enable_drv_lock_count(&spawn_driver, enable); lcnt_enable_drv_lock_count(&fd_driver, enable); - for (dp = driver_list; dp; dp = dp->next) + /* enable lock counting in all drivers */ + for (dp = driver_list; dp; dp = dp->next) { lcnt_enable_drv_lock_count(dp, enable); -} -#endif - + } +} /* enable/disable lock counting of ports */ +#endif /* defined(ERTS_ENABLE_LOCK_COUNT) && defined(ERTS_SMP) */ /* * Buffering of data when using line oriented I/O on ports */ @@ -3896,12 +3890,13 @@ port_sig_control(Port *prt, if (res == ERTS_PORT_OP_DONE) { Eterm msg; - Eterm *hp, *hp_start; + Eterm *hp; ErlHeapFragment *bp; ErlOffHeap *ohp; + ErtsHeapFactory factory; Process *rp; ErtsProcLocks rp_locks = 0; - Uint hsz; + Uint hsz, rsz; int control_flags; rp = erts_proc_lookup_raw(sigdp->caller); @@ -3910,17 +3905,19 @@ port_sig_control(Port *prt, control_flags = prt->control_flags; - hsz = ERTS_QUEUE_PORT_SCHED_OP_REPLY_SIZE; - hsz += port_control_result_size(control_flags, + rsz = port_control_result_size(control_flags, resp_bufp, &resp_size, &resp_buf[0]); + hsz = rsz + ERTS_QUEUE_PORT_SCHED_OP_REPLY_SIZE; - hp_start = hp = erts_alloc_message_heap(hsz, + + hp = erts_alloc_message_heap(hsz, &bp, &ohp, rp, &rp_locks); + erts_factory_message_init(&factory, rp, hp, bp); msg = write_port_control_result(control_flags, resp_bufp, @@ -3929,13 +3926,11 @@ port_sig_control(Port *prt, &hp, bp, ohp); + factory.hp = hp; queue_port_sched_op_reply(rp, &rp_locks, - hp_start, - hp, - hsz, - bp, + &factory, sigdp->ref, msg); @@ -4222,8 +4217,6 @@ port_sig_call(Port *prt, if (res == ERTS_PORT_OP_DONE) { Eterm msg; Eterm *hp; - ErlHeapFragment *bp; - ErlOffHeap *ohp; Process *rp; ErtsProcLocks rp_locks = 0; Sint hsz; @@ -4234,29 +4227,31 @@ port_sig_call(Port *prt, hsz = erts_decode_ext_size((byte *) resp_bufp, resp_size); if (hsz >= 0) { - Eterm *hp_start; + ErlHeapFragment* bp; + ErlOffHeap* ohp; + ErtsHeapFactory factory; byte *endp; hsz += 3; /* ok tuple */ hsz += ERTS_QUEUE_PORT_SCHED_OP_REPLY_SIZE; - hp_start = hp = erts_alloc_message_heap(hsz, - &bp, - &ohp, - rp, - &rp_locks); + hp = erts_alloc_message_heap(hsz, + &bp, + &ohp, + rp, + &rp_locks); endp = (byte *) resp_bufp; - msg = erts_decode_ext(&hp, ohp, &endp); + erts_factory_message_init(&factory, rp, hp, bp); + msg = erts_decode_ext(&factory, &endp); if (is_value(msg)) { + hp = erts_produce_heap(&factory, + 3, + ERTS_QUEUE_PORT_SCHED_OP_REPLY_SIZE); msg = TUPLE2(hp, am_ok, msg); - hp += 3; queue_port_sched_op_reply(rp, &rp_locks, - hp_start, - hp, - hsz, - bp, + &factory, sigdp->ref, msg); @@ -4264,8 +4259,6 @@ port_sig_call(Port *prt, erts_smp_proc_unlock(rp, rp_locks); goto done; } - if (bp) - free_message_buffer(bp); if (rp_locks) erts_smp_proc_unlock(rp, rp_locks); } @@ -4342,10 +4335,11 @@ erts_port_call(Process* c_p, try_call_res = try_imm_drv_call(&try_call_state); switch (try_call_res) { case ERTS_TRY_IMM_DRV_CALL_OK: { - Eterm *hp, *hp_end; + ErtsHeapFactory factory; Sint hsz; unsigned ret_flags = 0U; Eterm term; + Eterm* hp; res = call_driver_call(c_p->common.id, prt, @@ -4365,15 +4359,14 @@ erts_port_call(Process* c_p, if (hsz < 0) return ERTS_PORT_OP_BADARG; hsz += 3; - hp = HAlloc(c_p, hsz); - hp_end = hp + hsz; + erts_factory_proc_prealloc_init(&factory, c_p, hsz); endp = (byte *) resp_bufp; - term = erts_decode_ext(&hp, &MSO(c_p), &endp); + term = erts_decode_ext(&factory, &endp); if (term == THE_NON_VALUE) return ERTS_PORT_OP_BADARG; + hp = erts_produce_heap(&factory,3,0); *retvalp = TUPLE2(hp, am_ok, term); - hp += 3; - HRelease(c_p, hp_end, hp); + erts_factory_close(&factory); if (resp_bufp != &resp_buf[0] && !(ret_flags & DRIVER_CALL_KEEP_BUFFER)) driver_free(resp_bufp); @@ -4508,12 +4501,11 @@ port_sig_info(Port *prt, prt, sigdp->u.info.item); if (is_value(value)) { + ErtsHeapFactory factory; + erts_factory_message_init(&factory, NULL, hp, bp); queue_port_sched_op_reply(rp, &rp_locks, - hp_start, - hp, - hsz, - bp, + &factory, sigdp->ref, value); } @@ -4584,6 +4576,102 @@ erts_port_info(Process* c_p, } typedef struct { + Uint sched_id; + Eterm pid; + Uint32 refn[ERTS_REF_NUMBERS]; + erts_smp_atomic32_t refc; +} ErtsIOBytesReq; + +static void +reply_io_bytes(void *vreq) +{ + ErtsIOBytesReq *req = (ErtsIOBytesReq *) vreq; + Process *rp; + + rp = erts_proc_lookup(req->pid); + if (rp) { + ErlOffHeap *ohp = NULL; + ErlHeapFragment *bp = NULL; + ErtsProcLocks rp_locks; + Eterm ref, msg, ein, eout, *hp; + Uint64 in, out; + Uint hsz; + ErtsSchedulerData *esdp = erts_get_scheduler_data(); + Uint sched_id = esdp->no; + in = esdp->io.in; + out = esdp->io.out; + if (req->sched_id != sched_id) + rp_locks = 0; + else { + in += (Uint64) erts_atomic64_read_nob(&bytes_in); + out += (Uint64) erts_atomic64_read_nob(&bytes_out); + rp_locks = ERTS_PROC_LOCK_MAIN; + } + + hsz = 5 /* 4-tuple */ + REF_THING_SIZE; + + erts_bld_uint64(NULL, &hsz, in); + erts_bld_uint64(NULL, &hsz, out); + + hp = erts_alloc_message_heap(hsz, &bp, &ohp, rp, &rp_locks); + + ref = make_internal_ref(hp); + write_ref_thing(hp, req->refn[0], req->refn[1], req->refn[2]); + hp += REF_THING_SIZE; + + ein = erts_bld_uint64(&hp, NULL, in); + eout = erts_bld_uint64(&hp, NULL, out); + + msg = TUPLE4(hp, ref, make_small(sched_id), ein, eout); + erts_queue_message(rp, &rp_locks, bp, msg, NIL); + + if (req->sched_id == sched_id) + rp_locks &= ~ERTS_PROC_LOCK_MAIN; + if (rp_locks) + erts_smp_proc_unlock(rp, rp_locks); + } + + if (erts_smp_atomic32_dec_read_nob(&req->refc) == 0) + erts_free(ERTS_ALC_T_IOB_REQ, req); +} + +Eterm +erts_request_io_bytes(Process *c_p) +{ + Uint *hp; + Eterm ref; + Uint32 *refn; + ErtsSchedulerData *esdp = ERTS_PROC_GET_SCHDATA(c_p); + ErtsIOBytesReq *req = erts_alloc(ERTS_ALC_T_IOB_REQ, + sizeof(ErtsIOBytesReq)); + + hp = HAlloc(c_p, REF_THING_SIZE); + ref = erts_sched_make_ref_in_buffer(esdp, hp); + refn = internal_ref_numbers(ref); + + req->sched_id = esdp->no; + req->pid = c_p->common.id; + req->refn[0] = refn[0]; + req->refn[1] = refn[1]; + req->refn[2] = refn[2]; + erts_smp_atomic32_init_nob(&req->refc, + (erts_aint32_t) erts_no_schedulers); + +#ifdef ERTS_SMP + if (erts_no_schedulers > 1) + erts_schedule_multi_misc_aux_work(1, + erts_no_schedulers, + reply_io_bytes, + (void *) req); +#endif + + reply_io_bytes((void *) req); + + return ref; +} + + +typedef struct { int to; void *arg; } prt_one_lnk_data; @@ -5106,22 +5194,27 @@ cleanup_b2t_states(struct b2t_states__ *b2tsp) static int driver_deliver_term(Eterm to, ErlDrvTermData* data, int len) { +#define HEAP_EXTRA 200 #define ERTS_DDT_FAIL do { res = -1; goto done; } while (0) Uint need = 0; int depth = 0; int res; - Eterm *hp = NULL, *hp_start = NULL, *hp_end = NULL; ErlDrvTermData* ptr; ErlDrvTermData* ptr_end; DECLARE_ESTACK(stack); - Eterm mess = NIL; /* keeps compiler happy */ + Eterm mess; Process* rp = NULL; - ErlHeapFragment *bp = NULL; - ErlOffHeap *ohp; + ErtsHeapFactory factory; ErtsProcLocks rp_locks = 0; struct b2t_states__ b2t; - int scheduler = 1; /* Silence erroneous warning... */ + int scheduler; + int is_heap_need_limited = 1; + ErtsSchedulerData *esdp = erts_get_scheduler_data(); + ERTS_UNDEF(mess,NIL); + ERTS_UNDEF(scheduler,1); + + factory.mode = FACTORY_CLOSED; init_b2t_states(&b2t); /* @@ -5285,19 +5378,24 @@ driver_deliver_term(Eterm to, ErlDrvTermData* data, int len) #ifdef DEBUG b2t.org_ext[b2t.ix] = ext; #endif - hsz = erts_binary2term_prepare(&b2t.state[b2t.ix++], ext, size); + hsz = erts_binary2term_prepare(&b2t.state[b2t.ix], ext, size); if (hsz < 0) ERTS_DDT_FAIL; /* Invalid data */ + b2t.state[b2t.ix++].heap_size = hsz; need += hsz; ptr += 2; depth++; + if (size > MAP_SMALL_MAP_LIMIT*3) { /* may contain big map */ + is_heap_need_limited = 0; + } break; } case ERL_DRV_MAP: { /* int */ ERTS_DDT_CHK_ENOUGH_ARGS(1); if ((int) ptr[0] < 0) ERTS_DDT_FAIL; if (ptr[0] > MAP_SMALL_MAP_LIMIT) { - need += hashmap_over_estimated_heap_size(ptr[0]); + need += HASHMAP_ESTIMATED_HEAP_SIZE(ptr[0]); + is_heap_need_limited = 0; } else { need += MAP_HEADER_FLATMAP_SZ + 1 + 2*ptr[0]; } @@ -5336,8 +5434,17 @@ driver_deliver_term(Eterm to, ErlDrvTermData* data, int len) goto done; } - hp_start = hp = erts_alloc_message_heap(need, &bp, &ohp, rp, &rp_locks); - hp_end = hp + need; + /* Try copy directly to destination heap if we know there are no big maps */ + if (is_heap_need_limited) { + ErlOffHeap *ohp; + ErlHeapFragment* bp; + Eterm* hp = erts_alloc_message_heap(need, &bp, &ohp, rp, &rp_locks); + erts_factory_message_init(&factory, rp, hp, bp); + } + else { + erts_factory_message_init(&factory, NULL, NULL, + new_message_buffer(need)); + } /* * Interpret the instructions and build the term. @@ -5358,13 +5465,15 @@ driver_deliver_term(Eterm to, ErlDrvTermData* data, int len) case ERL_DRV_INT: /* signed int argument */ #if HALFWORD_HEAP - mess = erts_bld_sint64(&hp, NULL, (Sint64)ptr[0]); + erts_reserve_heap(&factory, BIG_NEED_SIZE(2)); + mess = erts_bld_sint64(&factory.hp, NULL, (Sint64)ptr[0]); #else + erts_reserve_heap(&factory, BIG_UINT_HEAP_SIZE); if (IS_SSMALL((Sint)ptr[0])) mess = make_small((Sint)ptr[0]); else { - mess = small_to_big((Sint)ptr[0], hp); - hp += BIG_UINT_HEAP_SIZE; + mess = small_to_big((Sint)ptr[0], factory.hp); + factory.hp += BIG_UINT_HEAP_SIZE; } #endif ptr++; @@ -5372,25 +5481,29 @@ driver_deliver_term(Eterm to, ErlDrvTermData* data, int len) case ERL_DRV_UINT: /* unsigned int argument */ #if HALFWORD_HEAP - mess = erts_bld_uint64(&hp, NULL, (Uint64)ptr[0]); + erts_reserve_heap(&factory, BIG_NEED_FOR_BITS(64)); + mess = erts_bld_uint64(&factory.hp, NULL, (Uint64)ptr[0]); #else + erts_reserve_heap(&factory, BIG_UINT_HEAP_SIZE); if (IS_USMALL(0, (Uint)ptr[0])) mess = make_small((Uint)ptr[0]); else { - mess = uint_to_big((Uint)ptr[0], hp); - hp += BIG_UINT_HEAP_SIZE; + mess = uint_to_big((Uint)ptr[0], factory.hp); + factory.hp += BIG_UINT_HEAP_SIZE; } #endif ptr++; break; case ERL_DRV_INT64: /* pointer to unsigned 64-bit int argument */ - mess = erts_bld_sint64(&hp, NULL, *((Sint64 *) ptr[0])); + erts_reserve_heap(&factory, BIG_NEED_FOR_BITS(64)); + mess = erts_bld_sint64(&factory.hp, NULL, *((Sint64 *) ptr[0])); ptr++; break; case ERL_DRV_UINT64: /* pointer to unsigned 64-bit int argument */ - mess = erts_bld_uint64(&hp, NULL, *((Uint64 *) ptr[0])); + erts_reserve_heap(&factory, BIG_NEED_FOR_BITS(64)); + mess = erts_bld_uint64(&factory.hp, NULL, *((Uint64 *) ptr[0])); ptr++; break; @@ -5404,11 +5517,14 @@ driver_deliver_term(Eterm to, ErlDrvTermData* data, int len) Uint size = ptr[1]; Uint offset = ptr[2]; - erts_smp_atomic_add_nob(&erts_bytes_in, (erts_aint_t) size); + if (esdp) + esdp->io.in += (Uint64) size; + else + erts_atomic64_add_nob(&bytes_in, (erts_aint64_t) size); if (size <= ERL_ONHEAP_BIN_LIMIT) { - ErlHeapBin* hbp = (ErlHeapBin *) hp; - hp += heap_bin_size(size); + ErlHeapBin* hbp = (ErlHeapBin *) erts_produce_heap(&factory, + heap_bin_size(size), HEAP_EXTRA); hbp->thing_word = header_heap_bin(size); hbp->size = size; if (size > 0) { @@ -5417,18 +5533,18 @@ driver_deliver_term(Eterm to, ErlDrvTermData* data, int len) mess = make_binary(hbp); } else { - ProcBin* pb = (ProcBin *) hp; + ProcBin* pb = (ProcBin *) erts_produce_heap(&factory, + PROC_BIN_SIZE, HEAP_EXTRA); driver_binary_inc_refc(b); /* caller will free binary */ pb->thing_word = HEADER_PROC_BIN; pb->size = size; - pb->next = ohp->first; - ohp->first = (struct erl_off_heap_header*)pb; + pb->next = factory.off_heap->first; + factory.off_heap->first = (struct erl_off_heap_header*)pb; pb->val = ErlDrvBinary2Binary(b); pb->bytes = ((byte*) b->orig_bytes) + offset; pb->flags = 0; mess = make_binary(pb); - hp += PROC_BIN_SIZE; - OH_OVERHEAD(ohp, pb->size / sizeof(Eterm)); + OH_OVERHEAD(factory.off_heap, pb->size / sizeof(Eterm)); } ptr += 3; break; @@ -5438,11 +5554,15 @@ driver_deliver_term(Eterm to, ErlDrvTermData* data, int len) byte *bufp = (byte *) ptr[0]; Uint size = (Uint) ptr[1]; - erts_smp_atomic_add_nob(&erts_bytes_in, (erts_aint_t) size); + if (esdp) + esdp->io.in += (Uint64) size; + else + erts_atomic64_add_nob(&bytes_in, (erts_aint64_t) size); if (size <= ERL_ONHEAP_BIN_LIMIT) { - ErlHeapBin* hbp = (ErlHeapBin *) hp; - hp += heap_bin_size(size); + ErlHeapBin* hbp = (ErlHeapBin *) erts_produce_heap(&factory, + heap_bin_size(size), + HEAP_EXTRA); hbp->thing_word = header_heap_bin(size); hbp->size = size; if (size > 0) { @@ -5457,16 +5577,16 @@ driver_deliver_term(Eterm to, ErlDrvTermData* data, int len) ASSERT(bufp); erts_refc_init(&bp->refc, 1); sys_memcpy((void *) bp->orig_bytes, (void *) bufp, size); - pbp = (ProcBin *) hp; - hp += PROC_BIN_SIZE; + pbp = (ProcBin *) erts_produce_heap(&factory, + PROC_BIN_SIZE, HEAP_EXTRA); pbp->thing_word = HEADER_PROC_BIN; pbp->size = size; - pbp->next = ohp->first; - ohp->first = (struct erl_off_heap_header*)pbp; + pbp->next = factory.off_heap->first; + factory.off_heap->first = (struct erl_off_heap_header*)pbp; pbp->val = bp; pbp->bytes = (byte*) bp->orig_bytes; pbp->flags = 0; - OH_OVERHEAD(ohp, pbp->size / sizeof(Eterm)); + OH_OVERHEAD(factory.off_heap, pbp->size / sizeof(Eterm)); mess = make_binary(pbp); } ptr += 2; @@ -5474,14 +5594,19 @@ driver_deliver_term(Eterm to, ErlDrvTermData* data, int len) } case ERL_DRV_STRING: /* char*, length */ - erts_smp_atomic_add_nob(&erts_bytes_in, (erts_aint_t) ptr[1]); - mess = buf_to_intlist(&hp, (char*)ptr[0], ptr[1], NIL); + if (esdp) + esdp->io.in += (Uint64) ptr[1]; + else + erts_atomic64_add_nob(&bytes_in, (erts_aint64_t) ptr[1]); + erts_reserve_heap(&factory, 2*ptr[1]); + mess = buf_to_intlist(&factory.hp, (char*)ptr[0], ptr[1], NIL); ptr += 2; break; case ERL_DRV_STRING_CONS: /* char*, length */ mess = ESTACK_POP(stack); - mess = buf_to_intlist(&hp, (char*)ptr[0], ptr[1], mess); + erts_reserve_heap(&factory, 2*ptr[1]); + mess = buf_to_intlist(&factory.hp, (char*)ptr[0], ptr[1], mess); ptr += 2; break; @@ -5490,11 +5615,12 @@ driver_deliver_term(Eterm to, ErlDrvTermData* data, int len) mess = ESTACK_POP(stack); i--; + erts_reserve_heap(&factory, 2*i); while(i > 0) { Eterm hd = ESTACK_POP(stack); - mess = CONS(hp, hd, mess); - hp += 2; + mess = CONS(factory.hp, hd, mess); + factory.hp += 2; i--; } ptr++; @@ -5503,13 +5629,12 @@ driver_deliver_term(Eterm to, ErlDrvTermData* data, int len) case ERL_DRV_TUPLE: { /* int */ int size = (int)ptr[0]; - Eterm* tp = hp; + Eterm* tp = erts_produce_heap(&factory, size+1, HEAP_EXTRA); *tp = make_arityval(size); mess = make_tuple(tp); tp += size; /* point at last element */ - hp = tp+1; /* advance "heap" pointer */ while(size--) { *tp-- = ESTACK_POP(stack); @@ -5525,20 +5650,22 @@ driver_deliver_term(Eterm to, ErlDrvTermData* data, int len) case ERL_DRV_FLOAT: { /* double * */ FloatDef f; + Eterm* fp = erts_produce_heap(&factory, FLOAT_SIZE_OBJECT, HEAP_EXTRA); - mess = make_float(hp); + mess = make_float(fp); f.fd = *((double *) ptr[0]); if (!erts_isfinite(f.fd)) ERTS_DDT_FAIL; - PUT_DOUBLE(f, hp); - hp += FLOAT_SIZE_OBJECT; + PUT_DOUBLE(f, fp); ptr++; break; } case ERL_DRV_EXT2TERM: /* char *ext, int size */ ASSERT(b2t.org_ext[b2t.ix] == (byte *) ptr[0]); - mess = erts_binary2term_create(&b2t.state[b2t.ix++], &hp, ohp); + + erts_reserve_heap(&factory, b2t.state[b2t.ix].heap_size); + mess = erts_binary2term_create(&b2t.state[b2t.ix++], &factory); if (mess == THE_NON_VALUE) ERTS_DDT_FAIL; ptr += 2; @@ -5548,41 +5675,32 @@ driver_deliver_term(Eterm to, ErlDrvTermData* data, int len) int size = (int)ptr[0]; if (size > MAP_SMALL_MAP_LIMIT) { int ix = 2*size; - ErtsHeapFactory factory; - Eterm* leafs = hp; - - hp += 2*size; - while(ix--) { *--hp = ESTACK_POP(stack); } + Eterm* leafs; - hp += 2*size; - factory.p = NULL; - factory.hp = hp; - /* We assume heap will suffice (see hashmap_over_estimated_heap_size) */ + erts_produce_heap(&factory, ix, HEAP_EXTRA); + leafs = factory.hp; + while(ix--) { *--leafs = ESTACK_POP(stack); } mess = erts_hashmap_from_array(&factory, leafs, size, 1); - if (is_non_value(mess)) ERTS_DDT_FAIL; - - hp = factory.hp; } else { - Eterm* tp = hp; Eterm* vp; flatmap_t *mp; + Eterm* tp = erts_produce_heap(&factory, + 2*size + 1 + MAP_HEADER_FLATMAP_SZ, + HEAP_EXTRA); *tp = make_arityval(size); - hp += 1 + size; - mp = (flatmap_t*)hp; + mp = (flatmap_t*) (tp + 1 + size); mp->thing_word = MAP_HEADER_FLATMAP; mp->size = size; mp->keys = make_tuple(tp); mess = make_flatmap(mp); - hp += MAP_HEADER_FLATMAP_SZ + size; - tp += size; /* point at last key */ - vp = hp - 1; /* point at last value */ + vp = factory.hp - 1; /* point at last value */ while(size--) { *vp-- = ESTACK_POP(stack); @@ -5605,25 +5723,16 @@ driver_deliver_term(Eterm to, ErlDrvTermData* data, int len) if (res > 0) { mess = ESTACK_POP(stack); /* get resulting value */ - if (bp) - bp = erts_resize_message_buffer(bp, hp - hp_start, &mess, 1); - else { - ASSERT(hp); - HRelease(rp, hp_end, hp); - } + erts_factory_close(&factory); /* send message */ - erts_queue_message(rp, &rp_locks, bp, mess, am_undefined); + erts_queue_message(rp, &rp_locks, factory.heap_frags, mess, am_undefined); } else { if (b2t.ix > b2t.used) b2t.used = b2t.ix; for (b2t.ix = 0; b2t.ix < b2t.used; b2t.ix++) erts_binary2term_abort(&b2t.state[b2t.ix]); - if (bp) - free_message_buffer(bp); - else if (hp) { - HRelease(rp, hp_end, hp); - } + erts_factory_undo(&factory); } if (rp) { if (rp_locks) @@ -5635,6 +5744,7 @@ driver_deliver_term(Eterm to, ErlDrvTermData* data, int len) DESTROY_ESTACK(stack); return res; #undef ERTS_DDT_FAIL +#undef HEAP_EXTRA } static ERTS_INLINE int @@ -5763,6 +5873,7 @@ int driver_output_binary(ErlDrvPort ix, char* hbuf, ErlDrvSizeT hlen, { erts_aint32_t state; Port* prt = erts_drvport2port_state(ix, &state); + ErtsSchedulerData *esdp = erts_get_scheduler_data(); ERTS_SMP_CHK_NO_PROC_LOCKS; @@ -5773,7 +5884,10 @@ int driver_output_binary(ErlDrvPort ix, char* hbuf, ErlDrvSizeT hlen, return 0; prt->bytes_in += (hlen + len); - erts_smp_atomic_add_nob(&erts_bytes_in, (erts_aint_t) (hlen + len)); + if (esdp) + esdp->io.in += (Uint64) (hlen + len); + else + erts_atomic64_add_nob(&bytes_in, (erts_aint64_t) (hlen + len)); if (state & ERTS_PORT_SFLG_DISTRIBUTION) { return erts_net_message(prt, prt->dist_entry, @@ -5798,6 +5912,7 @@ int driver_output2(ErlDrvPort ix, char* hbuf, ErlDrvSizeT hlen, { erts_aint32_t state; Port* prt = erts_drvport2port_state(ix, &state); + ErtsSchedulerData *esdp = erts_get_scheduler_data(); ERTS_SMP_CHK_NO_PROC_LOCKS; @@ -5809,7 +5924,10 @@ int driver_output2(ErlDrvPort ix, char* hbuf, ErlDrvSizeT hlen, return 0; prt->bytes_in += (hlen + len); - erts_smp_atomic_add_nob(&erts_bytes_in, (erts_aint_t) (hlen + len)); + if (esdp) + esdp->io.in += (Uint64) (hlen + len); + else + erts_atomic64_add_nob(&bytes_in, (erts_aint64_t) (hlen + len)); if (state & ERTS_PORT_SFLG_DISTRIBUTION) { if (len == 0) return erts_net_message(prt, @@ -5849,6 +5967,7 @@ int driver_outputv(ErlDrvPort ix, char* hbuf, ErlDrvSizeT hlen, ErlDrvBinary** binv; Port* prt; erts_aint32_t state; + ErtsSchedulerData *esdp = erts_get_scheduler_data(); ERTS_SMP_CHK_NO_PROC_LOCKS; @@ -5887,7 +6006,10 @@ int driver_outputv(ErlDrvPort ix, char* hbuf, ErlDrvSizeT hlen, /* XXX handle distribution !!! */ prt->bytes_in += (hlen + size); - erts_smp_atomic_add_nob(&erts_bytes_in, (erts_aint_t) (hlen + size)); + if (esdp) + esdp->io.in += (Uint64) (hlen + size); + else + erts_atomic64_add_nob(&bytes_in, (erts_aint64_t) (hlen + size)); deliver_vec_message(prt, ERTS_PORT_GET_CONNECTED(prt), hbuf, hlen, binv, iov, n, size); return 0; diff --git a/erts/emulator/beam/sys.h b/erts/emulator/beam/sys.h index cd53069872..78eb0bfe2b 100644 --- a/erts/emulator/beam/sys.h +++ b/erts/emulator/beam/sys.h @@ -20,6 +20,22 @@ #ifndef __SYS_H__ #define __SYS_H__ +#ifdef ERTS_INLINE +# ifndef ERTS_CAN_INLINE +# define ERTS_CAN_INLINE 1 +# endif +#else +# if defined(__GNUC__) +# define ERTS_CAN_INLINE 1 +# define ERTS_INLINE __inline__ +# elif defined(__WIN32__) +# define ERTS_CAN_INLINE 1 +# define ERTS_INLINE __inline +# else +# define ERTS_CAN_INLINE 0 +# define ERTS_INLINE +# endif +#endif #if defined(DEBUG) || defined(ERTS_ENABLE_LOCK_CHECK) # undef ERTS_CAN_INLINE @@ -94,23 +110,6 @@ typedef int ErtsSysFdType; typedef ERTS_SYS_FD_TYPE ErtsSysFdType; #endif -#ifdef ERTS_INLINE -# ifndef ERTS_CAN_INLINE -# define ERTS_CAN_INLINE 1 -# endif -#else -# if defined(__GNUC__) -# define ERTS_CAN_INLINE 1 -# define ERTS_INLINE __inline__ -# elif defined(__WIN32__) -# define ERTS_CAN_INLINE 1 -# define ERTS_INLINE __inline -# else -# define ERTS_CAN_INLINE 0 -# define ERTS_INLINE -# endif -#endif - #if !defined(__GNUC__) # define ERTS_AT_LEAST_GCC_VSN__(MAJ, MIN, PL) 0 #elif !defined(__GNUC_MINOR__) diff --git a/erts/emulator/beam/time.c b/erts/emulator/beam/time.c index 8bffdedb2b..fe7c5826f5 100644 --- a/erts/emulator/beam/time.c +++ b/erts/emulator/beam/time.c @@ -529,6 +529,9 @@ erts_create_timer_wheel(ErtsSchedulerData *esdp) tiw->next_timeout_time = mtime + ERTS_MONOTONIC_DAY; tiw->sentinel.next = &tiw->sentinel; tiw->sentinel.prev = &tiw->sentinel; + tiw->sentinel.u.func.timeout = NULL; + tiw->sentinel.u.func.cancel = NULL; + tiw->sentinel.u.func.arg = NULL; return tiw; } @@ -624,6 +627,41 @@ erts_twheel_cancel_timer(ErtsTimerWheel *tiw, ErtsTWheelTimer *p) } } +void +erts_twheel_debug_foreach(ErtsTimerWheel *tiw, + void (*tclbk)(void *), + void (*func)(void *, + ErtsMonotonicTime, + void *), + void *arg) +{ + ErtsTWheelTimer *tmr; + int ix; + + tmr = tiw->sentinel.next; + while (tmr != &tiw->sentinel) { + if (tmr->u.func.timeout == tclbk) + (*func)(arg, tmr->timeout_pos, tmr->u.func.arg); + tmr = tmr->next; + } + + for (tmr = tiw->at_once.head; tmr; tmr = tmr->next) { + if (tmr->u.func.timeout == tclbk) + (*func)(arg, tmr->timeout_pos, tmr->u.func.arg); + } + + for (ix = 0; ix < ERTS_TIW_SIZE; ix++) { + tmr = tiw->w[ix]; + if (tmr) { + do { + if (tmr->u.func.timeout == tclbk) + (*func)(arg, tmr->timeout_pos, tmr->u.func.arg); + tmr = tmr->next; + } while (tmr != tiw->w[ix]); + } + } +} + #ifdef ERTS_TW_DEBUG void erts_p_slpq(void) { diff --git a/erts/emulator/drivers/common/inet_drv.c b/erts/emulator/drivers/common/inet_drv.c index e001f31932..119d6c097d 100644 --- a/erts/emulator/drivers/common/inet_drv.c +++ b/erts/emulator/drivers/common/inet_drv.c @@ -835,6 +835,10 @@ static int my_strncasecmp(const char *s1, const char *s2, size_t n) #define TCP_ADDF_PENDING_SHUT_RDWR 32 /* Call shutdown(sock, SHUT_RDWR) when queue empties */ #define TCP_ADDF_PENDING_SHUTDOWN \ (TCP_ADDF_PENDING_SHUT_WR | TCP_ADDF_PENDING_SHUT_RDWR) +#define TCP_ADDF_SHOW_ECONNRESET 64 /* Tell user about incoming RST */ +#define TCP_ADDF_DELAYED_ECONNRESET 128 /* An ECONNRESET error occured on send or shutdown */ +#define TCP_ADDF_SHUTDOWN_WR_DONE 256 /* A shutdown(sock, SHUT_WR) or SHUT_RDWR was made */ +#define TCP_ADDF_LINGER_ZERO 512 /* Discard driver queue on port close */ /* *_REQ_* replies */ #define INET_REP_ERROR 0 @@ -879,6 +883,7 @@ static int my_strncasecmp(const char *s1, const char *s2, size_t n) #define INET_LOPT_MSGQ_HIWTRMRK 36 /* set local msgq high watermark */ #define INET_LOPT_MSGQ_LOWTRMRK 37 /* set local msgq low watermark */ #define INET_LOPT_NETNS 38 /* Network namespace pathname */ +#define INET_LOPT_TCP_SHOW_ECONNRESET 39 /* tell user about incoming RST */ /* SCTP options: a separate range, from 100: */ #define SCTP_OPT_RTOINFO 100 #define SCTP_OPT_ASSOCINFO 101 @@ -6138,7 +6143,12 @@ static int inet_set_opts(inet_descriptor* desc, char* ptr, int len) desc->active_count = 0; if ((desc->stype == SOCK_STREAM) && (desc->active != INET_PASSIVE) && (desc->state == INET_STATE_CLOSED)) { - tcp_closed_message((tcp_descriptor *) desc); + tcp_descriptor *tdesc = (tcp_descriptor *) desc; + if (tdesc->tcp_add_flags & TCP_ADDF_DELAYED_ECONNRESET) { + tdesc->tcp_add_flags &= ~TCP_ADDF_DELAYED_ECONNRESET; + tcp_error_message(tdesc, ECONNRESET); + } + tcp_closed_message(tdesc); if (desc->exitf) { driver_exit(desc->port, 0); return 0; /* Give up on this socket, descriptor lost */ @@ -6255,6 +6265,16 @@ static int inet_set_opts(inet_descriptor* desc, char* ptr, int len) continue; #endif + case INET_LOPT_TCP_SHOW_ECONNRESET: + if (desc->sprotocol == IPPROTO_TCP) { + tcp_descriptor* tdesc = (tcp_descriptor*) desc; + if (ival) + tdesc->tcp_add_flags |= TCP_ADDF_SHOW_ECONNRESET; + else + tdesc->tcp_add_flags &= ~TCP_ADDF_SHOW_ECONNRESET; + } + continue; + case INET_OPT_REUSEADDR: #ifdef __WIN32__ continue; /* Bjorn says */ @@ -6299,6 +6319,13 @@ static int inet_set_opts(inet_descriptor* desc, char* ptr, int len) arg_sz = sizeof(li_val); DEBUGF(("inet_set_opts(%ld): s=%d, SO_LINGER=%d,%d", (long)desc->port, desc->s, li_val.l_onoff,li_val.l_linger)); + if (desc->sprotocol == IPPROTO_TCP) { + tcp_descriptor* tdesc = (tcp_descriptor*) desc; + if (li_val.l_onoff && li_val.l_linger == 0) + tdesc->tcp_add_flags |= TCP_ADDF_LINGER_ZERO; + else + tdesc->tcp_add_flags &= ~TCP_ADDF_LINGER_ZERO; + } break; case INET_OPT_PRIORITY: @@ -7234,6 +7261,17 @@ static ErlDrvSSizeT inet_fill_opts(inet_descriptor* desc, continue; #endif + case INET_LOPT_TCP_SHOW_ECONNRESET: + if (desc->sprotocol == IPPROTO_TCP) { + tcp_descriptor* tdesc = (tcp_descriptor*) desc; + *ptr++ = opt; + ival = !!(tdesc->tcp_add_flags & TCP_ADDF_SHOW_ECONNRESET); + put_int32(ival, ptr); + } else { + TRUNCATE_TO(0,ptr); + } + continue; + case INET_OPT_PRIORITY: #ifdef SO_PRIORITY type = SO_PRIORITY; @@ -9077,6 +9115,11 @@ static tcp_descriptor* tcp_inet_copy(tcp_descriptor* desc,SOCKET s, copy_desc->low = desc->low; copy_desc->send_timeout = desc->send_timeout; copy_desc->send_timeout_close = desc->send_timeout_close; + + if (desc->tcp_add_flags & TCP_ADDF_SHOW_ECONNRESET) + copy_desc->tcp_add_flags |= TCP_ADDF_SHOW_ECONNRESET; + else + copy_desc->tcp_add_flags &= ~TCP_ADDF_SHOW_ECONNRESET; /* The new port will be linked and connected to the original caller */ port = driver_create_port(port, owner, "tcp_inet", (ErlDrvData) copy_desc); @@ -9438,7 +9481,11 @@ static ErlDrvSSizeT tcp_inet_ctl(ErlDrvData e, unsigned int cmd, if (desc->tcp_add_flags & TCP_ADDF_DELAYED_CLOSE_RECV) { desc->tcp_add_flags &= ~(TCP_ADDF_DELAYED_CLOSE_RECV| TCP_ADDF_DELAYED_CLOSE_SEND); - return ctl_reply(INET_REP_ERROR, "closed", 6, rbuf, rsize); + if (desc->tcp_add_flags & TCP_ADDF_DELAYED_ECONNRESET) { + desc->tcp_add_flags &= ~TCP_ADDF_DELAYED_ECONNRESET; + return ctl_reply(INET_REP_ERROR, "econnreset", 10, rbuf, rsize); + } else + return ctl_reply(INET_REP_ERROR, "closed", 6, rbuf, rsize); } return ctl_error(ENOTCONN, rbuf, rsize); } @@ -9499,6 +9546,8 @@ static ErlDrvSSizeT tcp_inet_ctl(ErlDrvData e, unsigned int cmd, return ctl_reply(INET_REP_OK, NULL, 0, rbuf, rsize); } if (IS_SOCKET_ERROR(sock_shutdown(INETP(desc)->s, how))) { + if (how != TCP_SHUT_RD) + desc->tcp_add_flags |= TCP_ADDF_SHUTDOWN_WR_DONE; return ctl_error(sock_errno(), rbuf, rsize); } else { return ctl_reply(INET_REP_OK, NULL, 0, rbuf, rsize); @@ -9633,7 +9682,13 @@ static void tcp_inet_commandv(ErlDrvData e, ErlIOVec* ev) if (!IS_CONNECTED(INETP(desc))) { if (desc->tcp_add_flags & TCP_ADDF_DELAYED_CLOSE_SEND) { desc->tcp_add_flags &= ~TCP_ADDF_DELAYED_CLOSE_SEND; - inet_reply_error_am(INETP(desc), am_closed); + if (desc->tcp_add_flags & TCP_ADDF_DELAYED_ECONNRESET) { + /* Don't clear flag. Leave it enabled for the next receive + * operation. + */ + inet_reply_error(INETP(desc), ECONNRESET); + } else + inet_reply_error_am(INETP(desc), am_closed); } else inet_reply_error(INETP(desc), ENOTCONN); @@ -9652,6 +9707,8 @@ static void tcp_inet_flush(ErlDrvData e) /* Discard send queue to avoid hanging port (OTP-7615) */ tcp_clear_output(desc); } + if (desc->tcp_add_flags & TCP_ADDF_LINGER_ZERO) + tcp_clear_output(desc); } static void tcp_inet_process_exit(ErlDrvData e, ErlDrvMonitor *monitorp) @@ -10014,7 +10071,10 @@ static int tcp_recv(tcp_descriptor* desc, int request_len) int err = sock_errno(); if (err == ECONNRESET) { DEBUGF((" => detected close (connreset)\r\n")); - return tcp_recv_closed(desc); + if (desc->tcp_add_flags & TCP_ADDF_SHOW_ECONNRESET) + return tcp_recv_error(desc, err); + else + return tcp_recv_closed(desc); } if (err == ERRNO_BLOCK) { DEBUGF((" => would block\r\n")); @@ -10226,7 +10286,19 @@ static void tcp_inet_event(ErlDrvData e, ErlDrvEvent event) if (netEv.lNetworkEvents & FD_CLOSE) { /* error in err = netEv.iErrorCode[FD_CLOSE_BIT] */ DEBUGF(("Detected close in %s, line %d\r\n", __FILE__, __LINE__)); - tcp_recv_closed(desc); + if (desc->tcp_add_flags & TCP_ADDF_SHOW_ECONNRESET) { + err = netEv.iErrorCode[FD_CLOSE_BIT]; + if (err == ECONNRESET) + tcp_recv_error(desc, err); + else if (err == ECONNABORTED && IS_CONNECTED(INETP(desc))) { + /* translate this error to ECONNRESET */ + tcp_recv_error(desc, ECONNRESET); + } + else + tcp_recv_closed(desc); + } + else + tcp_recv_closed(desc); } DEBUGF(("tcp_inet_event(%ld) }\r\n", (long)desc->inet.port)); return; @@ -10535,6 +10607,9 @@ static int tcp_inet_input(tcp_descriptor* desc, HANDLE event) static int tcp_send_or_shutdown_error(tcp_descriptor* desc, int err) { + int show_econnreset = (err == ECONNRESET + && desc->tcp_add_flags & TCP_ADDF_SHOW_ECONNRESET); + /* * If the port is busy, we must do some clean-up before proceeding. */ @@ -10550,14 +10625,21 @@ static int tcp_send_or_shutdown_error(tcp_descriptor* desc, int err) /* * We used to handle "expected errors" differently from unexpected ones. - * Now we handle all errors in the same way. We just have to distinguish - * between passive and active sockets. + * Now we handle all errors in the same way (unless the show_econnreset + * socket option is enabled). We just have to distinguish between passive + * and active sockets. */ DEBUGF(("driver_failure_eof(%ld) in %s, line %d\r\n", (long)desc->inet.port, __FILE__, __LINE__)); if (desc->inet.active) { - tcp_closed_message(desc); - inet_reply_error_am(INETP(desc), am_closed); + if (show_econnreset) { + tcp_error_message(desc, err); + tcp_closed_message(desc); + inet_reply_error(INETP(desc), err); + } else { + tcp_closed_message(desc); + inet_reply_error_am(INETP(desc), am_closed); + } if (desc->inet.exitf) driver_exit(desc->inet.port, 0); else @@ -10569,7 +10651,10 @@ static int tcp_send_or_shutdown_error(tcp_descriptor* desc, int err) erl_inet_close(INETP(desc)); if (desc->inet.caller) { - inet_reply_error_am(INETP(desc), am_closed); + if (show_econnreset) + inet_reply_error(INETP(desc), err); + else + inet_reply_error_am(INETP(desc), am_closed); } else { /* No blocking send op to reply to right now. @@ -10586,12 +10671,46 @@ static int tcp_send_or_shutdown_error(tcp_descriptor* desc, int err) * in the receive operation. */ desc->tcp_add_flags |= TCP_ADDF_DELAYED_CLOSE_RECV; + + if (show_econnreset) { + /* Return {error, econnreset} instead of {error, closed} + * on send or receive operations. + */ + desc->tcp_add_flags |= TCP_ADDF_DELAYED_ECONNRESET; + } } return -1; } static int tcp_send_error(tcp_descriptor* desc, int err) { + /* EPIPE errors usually occur in one of three ways: + * 1. We write to a socket when we've already shutdown() the write side. On + * Windows the error returned for this is ESHUTDOWN rather than EPIPE. + * 2. The TCP peer sends us an RST through no fault of our own (perhaps + * by aborting the connection using SO_LINGER) and we then attempt + * to write to the socket. On Linux and Windows we would actually + * receive an ECONNRESET error for this, but on the BSDs, Darwin, + * Illumos and presumably Solaris, it's an EPIPE. + * 3. We cause the TCP peer to send us an RST by writing to a socket + * after we receive a FIN from them. Our first write will be + * successful, but if the they have closed the connection (rather + * than just shutting down the write side of it) this will cause their + * OS to send us an RST. Then, when we attempt to write to the socket + * a second time, we will get an EPIPE error. On Windows we get an + * ECONNABORTED. + * + * What we are going to do here is to treat all EPIPE messages that aren't + * of type 1 as ECONNRESET errors. This will allow users who have the + * show_econnreset socket option enabled to receive {error, econnreset} on + * both send and recv operations to indicate that an RST has been received. + */ +#ifdef __WIN_32__ + if (err == ECONNABORTED) + err = ECONNRESET; +#endif + if (err == EPIPE && !(desc->tcp_add_flags & TCP_ADDF_SHUTDOWN_WR_DONE)) + err = ECONNRESET; return tcp_send_or_shutdown_error(desc, err); } @@ -10811,6 +10930,8 @@ static void tcp_shutdown_async(tcp_descriptor* desc) TCP_SHUT_WR : TCP_SHUT_RDWR; if (IS_SOCKET_ERROR(sock_shutdown(INETP(desc)->s, how))) tcp_shutdown_error(desc, sock_errno()); + else + desc->tcp_add_flags |= TCP_ADDF_SHUTDOWN_WR_DONE; } #ifdef __OSE__ @@ -12172,6 +12293,8 @@ static MultiTimerData *add_multi_timer(MultiTimerData **first, ErlDrvPort port, void (*timeout_fun)(ErlDrvData drv_data, ErlDrvTermData caller)) { +#define eq_mega(a, b) ((a)->when.megasecs == (b)->when.megasecs) +#define eq_sec(a, b) ((a)->when.secs == (b)->when.secs) MultiTimerData *mtd, *p, *s; mtd = ALLOC(sizeof(MultiTimerData)); absolute_timeout(timeout, &(mtd->when)); @@ -12183,23 +12306,17 @@ static MultiTimerData *add_multi_timer(MultiTimerData **first, ErlDrvPort port, break; } } - if (!p || p->when.megasecs > mtd->when.megasecs) { - goto found; - } - for (; p!= NULL; s = p, p = p->next) { + for (; p!= NULL && eq_mega(p, mtd); s = p, p = p->next) { if (p->when.secs >= mtd->when.secs) { break; } } - if (!p || p->when.secs > mtd->when.secs) { - goto found; - } - for (; p!= NULL; s = p, p = p->next) { + for (; p!= NULL && eq_mega(p, mtd) && eq_sec(p, mtd); s = p, p = p->next) { if (p->when.microsecs >= mtd->when.microsecs) { break; } } - found: + if (!p) { if (!s) { *first = mtd; @@ -12225,6 +12342,8 @@ static MultiTimerData *add_multi_timer(MultiTimerData **first, ErlDrvPort port, } return mtd; } +#undef eq_mega +#undef eq_sec diff --git a/erts/emulator/sys/unix/erl_child_setup.c b/erts/emulator/sys/unix/erl_child_setup.c index d050748703..e1d2f66b7e 100644 --- a/erts/emulator/sys/unix/erl_child_setup.c +++ b/erts/emulator/sys/unix/erl_child_setup.c @@ -107,14 +107,16 @@ main(int argc, char *argv[]) if (from <= to) { int spfd = system_properties_fd(); for (i = from; i <= to; i++) { - if (i != spfd) + if (i != spfd) { (void) close(i); + } } } -#else - for (i = from; i <= to; i++) +#else /* !__ANDROID__ */ + for (i = from; i <= to; i++) { (void) close(i); -#endif + } +#endif /* HAVE_CLOSEFROM */ if (!(argv[CS_ARGV_WD_IX][0] == '.' && argv[CS_ARGV_WD_IX][1] == '\0') && chdir(argv[CS_ARGV_WD_IX]) < 0) diff --git a/erts/emulator/test/long_timers_test.erl b/erts/emulator/test/long_timers_test.erl index f381332b51..8dd960ffd4 100644 --- a/erts/emulator/test/long_timers_test.erl +++ b/erts/emulator/test/long_timers_test.erl @@ -28,7 +28,7 @@ -define(MAX_TIMEOUT, 60). % Minutes --define(MAX_LATE_MS, 10*1000). % Milliseconds +-define(MAX_LATE_MS, 15*1000). % Milliseconds -define(REG_NAME, '___LONG___TIMERS___TEST___SERVER___'). -define(DRV_NAME, timer_driver). @@ -196,8 +196,8 @@ driver(Timeout) -> end. bif_timer(Timeout) -> - Tmr = erlang:start_timer(Timeout, self(), ok), Start = erlang:monotonic_time(), + Tmr = erlang:start_timer(Timeout, self(), ok), receive {get_result, ?REG_NAME} -> ?REG_NAME ! #timeout_rec{pid = self(), diff --git a/erts/emulator/test/module_info_SUITE.erl b/erts/emulator/test/module_info_SUITE.erl index 1125cf3072..25ba6d1787 100644 --- a/erts/emulator/test/module_info_SUITE.erl +++ b/erts/emulator/test/module_info_SUITE.erl @@ -24,7 +24,7 @@ -export([all/0, suite/0,groups/0,init_per_suite/1, end_per_suite/1, init_per_group/2,end_per_group/2, init_per_testcase/2,end_per_testcase/2, - exports/1,functions/1,native/1,info/1]). + exports/1,functions/1,deleted/1,native/1,info/1]). %%-compile(native). @@ -51,9 +51,8 @@ init_per_group(_GroupName, Config) -> end_per_group(_GroupName, Config) -> Config. - modules() -> - [exports, functions, native, info]. + [exports, functions, deleted, native, info]. init_per_testcase(Func, Config) when is_atom(Func), is_list(Config) -> Dog = ?t:timetrap(?t:minutes(3)), @@ -81,43 +80,66 @@ all_functions() -> %% Test that the list of exported functions from this module is correct. exports(Config) when is_list(Config) -> - ?line All = all_exported(), - ?line All = lists:sort(?MODULE:module_info(exports)), - ?line (catch ?MODULE:foo()), - ?line All = lists:sort(?MODULE:module_info(exports)), + All = all_exported(), + All = lists:sort(?MODULE:module_info(exports)), + (catch ?MODULE:foo()), + All = lists:sort(?MODULE:module_info(exports)), ok. %% Test that the list of exported functions from this module is correct. functions(Config) when is_list(Config) -> - ?line All = all_functions(), - ?line All = lists:sort(?MODULE:module_info(functions)), + All = all_functions(), + All = lists:sort(?MODULE:module_info(functions)), + ok. + +%% Test that deleted modules cause badarg +deleted(Config) when is_list(Config) -> + Data = ?config(data_dir, Config), + File = filename:join(Data, "module_info_test"), + {ok,module_info_test,Code} = compile:file(File, [binary]), + {module,module_info_test} = erlang:load_module(module_info_test, Code), + 17 = module_info_test:f(), + [_|_] = erlang:get_module_info(module_info_test, attributes), + [_|_] = erlang:get_module_info(module_info_test), + + %% first delete it + true = erlang:delete_module(module_info_test), + {'EXIT',{undef, _}} = (catch module_info_test:f()), + {'EXIT',{badarg, _}} = (catch erlang:get_module_info(module_info_test,attributes)), + {'EXIT',{badarg, _}} = (catch erlang:get_module_info(module_info_test)), + + %% then purge it + true = erlang:purge_module(module_info_test), + {'EXIT',{undef, _}} = (catch module_info_test:f()), + {'EXIT',{badarg, _}} = (catch erlang:get_module_info(module_info_test,attributes)), + {'EXIT',{badarg, _}} = (catch erlang:get_module_info(module_info_test)), ok. %% Test that the list of exported functions from this module is correct. %% Verify that module_info(native) works. native(Config) when is_list(Config) -> - ?line All = all_functions(), - ?line case ?MODULE:module_info(native_addresses) of - [] -> - ?line false = ?MODULE:module_info(native), - {comment,"no native functions"}; - L -> - ?line true = ?MODULE:module_info(native), - %% Verify that all functions have unique addresses. - ?line S0 = sofs:set(L, [{name,arity,addr}]), - ?line S1 = sofs:projection({external,fun ?MODULE:native_proj/1}, S0), - ?line S2 = sofs:relation_to_family(S1), - ?line S3 = sofs:family_specification(fun ?MODULE:native_filter/1, S2), - ?line 0 = sofs:no_elements(S3), - ?line S4 = sofs:range(S1), - - %% Verify that the set of function with native addresses - %% is a subset of all functions in the module. - ?line AllSet = sofs:set(All, [{name,arity}]), - ?line true = sofs:is_subset(S4, AllSet), - - {comment,integer_to_list(sofs:no_elements(S0))++" native functions"} - end. + All = all_functions(), + case ?MODULE:module_info(native_addresses) of + [] -> + false = ?MODULE:module_info(native), + {comment,"no native functions"}; + L -> + true = ?MODULE:module_info(native), + %% Verify that all functions have unique addresses. + S0 = sofs:set(L, [{name,arity,addr}]), + S1 = sofs:projection({external,fun ?MODULE:native_proj/1}, S0), + S2 = sofs:relation_to_family(S1), + S3 = sofs:family_specification(fun ?MODULE:native_filter/1, S2), + 0 = sofs:no_elements(S3), + S4 = sofs:range(S1), + + %% Verify that the set of function with native addresses + %% is a subset of all functions in the module. + AllSet = sofs:set(All, [{name,arity}]), + true = sofs:is_subset(S4, AllSet), + + {comment,integer_to_list(sofs:no_elements(S0))++" native functions"} + end. native_proj({Name,Arity,Addr}) -> {Addr,{Name,Arity}}. diff --git a/erts/emulator/test/module_info_SUITE_data/module_info_test.erl b/erts/emulator/test/module_info_SUITE_data/module_info_test.erl new file mode 100644 index 0000000000..f045f38464 --- /dev/null +++ b/erts/emulator/test/module_info_SUITE_data/module_info_test.erl @@ -0,0 +1,24 @@ +%% +%% %CopyrightBegin% +%% +%% Copyright Ericsson AB 2015. All Rights Reserved. +%% +%% The contents of this file are subject to the Erlang Public License, +%% Version 1.1, (the "License"); you may not use this file except in +%% compliance with the License. You should have received a copy of the +%% Erlang Public License along with this software. If not, it can be +%% retrieved online at http://www.erlang.org/. +%% +%% Software distributed under the License is distributed on an "AS IS" +%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See +%% the License for the specific language governing rights and limitations +%% under the License. +%% +%% %CopyrightEnd% +%% + +-module(module_info_test). +-export([f/0]). + +f() -> + 17. diff --git a/erts/emulator/test/node_container_SUITE.erl b/erts/emulator/test/node_container_SUITE.erl index 2f505893b4..479e6f200a 100644 --- a/erts/emulator/test/node_container_SUITE.erl +++ b/erts/emulator/test/node_container_SUITE.erl @@ -73,6 +73,8 @@ init_per_suite(Config) -> Config. end_per_suite(_Config) -> + erts_debug:set_internal_state(available_internal_state, true), + erts_debug:set_internal_state(node_tab_delayed_delete, -1), %% restore original value available_internal_state(false). init_per_group(_GroupName, Config) -> @@ -419,6 +421,8 @@ node_table_gc(doc) -> ["Tests that node tables are garbage collected."]; node_table_gc(suite) -> []; node_table_gc(Config) when is_list(Config) -> + erts_debug:set_internal_state(available_internal_state, true), + erts_debug:set_internal_state(node_tab_delayed_delete, 0), ?line PreKnown = nodes(known), ?line ?t:format("PreKnown = ~p~n", [PreKnown]), ?line make_node_garbage(0, 200000, 1000, []), @@ -428,6 +432,7 @@ node_table_gc(Config) when is_list(Config) -> ?line ?t:format("PostAreas = ~p~n", [PostAreas]), ?line true = length(PostKnown) =< length(PreKnown), ?line nc_refc_check(node()), + erts_debug:set_internal_state(node_tab_delayed_delete, -1), %% restore original value ?line ok. make_node_garbage(N, L, I, Ps) when N < L -> @@ -579,6 +584,8 @@ node_controller_refc(doc) -> "as they should for entities controlling a connections."]; node_controller_refc(suite) -> []; node_controller_refc(Config) when is_list(Config) -> + erts_debug:set_internal_state(available_internal_state, true), + erts_debug:set_internal_state(node_tab_delayed_delete, 0), ?line NodeFirstName = get_nodefirstname(), ?line ?line {ok, Node} = start_node(NodeFirstName), ?line true = lists:member(Node, nodes()), @@ -606,6 +613,7 @@ node_controller_refc(Config) when is_list(Config) -> ?line false = get_dist_references(Node), ?line false = lists:member(Node, nodes(known)), ?line nc_refc_check(node()), + erts_debug:set_internal_state(node_tab_delayed_delete, -1), %% restore original value ?line ok. %% @@ -686,9 +694,6 @@ timer_refc(doc) -> "as they should for data stored in bif timers."]; timer_refc(suite) -> []; timer_refc(Config) when is_list(Config) -> - {skipped, "Test needs to be UPDATED for new timer implementation"}. - -timer_refc_test(Config) when is_list(Config) -> ?line RNode = {get_nodename(), 1}, ?line RPid = mk_pid(RNode, 4711, 2), ?line RPort = mk_port(RNode, 4711), @@ -992,23 +997,32 @@ check_nd_refc({ThisNodeName, ThisCreation}, NodeRefs, DistRefs, Fail) -> check_refc(ThisNodeName,ThisCreation,Table,EntryList) when is_list(EntryList) -> lists:foreach( fun ({Entry, Refc, ReferrerList}) -> - FoundRefs = + {DelayedDeleteTimer, + FoundRefs} = lists:foldl( - fun ({_Referrer, ReferencesList}, A1) -> - A1 + lists:foldl(fun ({_T,Rs},A2) -> - A2+Rs - end, - 0, - ReferencesList) + fun ({Referrer, ReferencesList}, {DDT, A1}) -> + {case Referrer of + {system,delayed_delete_timer} -> + true; + _ -> + DDT + end, + A1 + lists:foldl(fun ({_T,Rs},A2) -> + A2+Rs + end, + 0, + ReferencesList)} end, - 0, + {false, 0}, ReferrerList), - %% Reference count equals found references ? - case Refc =:= FoundRefs of - true -> + %% Reference count equals found references? + case {Refc, FoundRefs, DelayedDeleteTimer} of + {X, X, _} -> + ok; + {0, 1, true} -> ok; - false -> + _ -> exit({invalid_reference_count, Table, Entry}) end, @@ -1016,7 +1030,8 @@ check_refc(ThisNodeName,ThisCreation,Table,EntryList) when is_list(EntryList) -> case {Entry, Refc} of {ThisNodeName, 0} -> ok; {{ThisNodeName, ThisCreation}, 0} -> ok; - {_, 0} -> exit({not_referred_entry_in_table, Table, Entry}); + {_, 0} when DelayedDeleteTimer == false -> + exit({not_referred_entry_in_table, Table, Entry}); {_, _} -> ok end diff --git a/erts/emulator/test/send_term_SUITE.erl b/erts/emulator/test/send_term_SUITE.erl index 8e1f8df43a..4deae63ce7 100644 --- a/erts/emulator/test/send_term_SUITE.erl +++ b/erts/emulator/test/send_term_SUITE.erl @@ -279,7 +279,10 @@ generate_external_terms_files(BaseDir) -> {4444444444444444,-44444, [[[[[[[[[[[5]]]]]]]]]]], make_ref()}, {444444444444444444444,-44444, {{{{{{{{{{{{6}}}}}}}}}}}}, make_ref()}, {444444444444444,-44444, {{{{{{{{{{{{7}}}}}}}}}}}}, make_ref()}, - {4444444444444444444,-44444, {{{{{{{{{{{{8}}}}}}}}}}}}, make_ref()}], + {4444444444444444444,-44444, {{{{{{{{{{{{8}}}}}}}}}}}}, make_ref()}, + #{}, + #{1 => 11, 2 => 22, 3 => 33}, + maps:from_list([{K,K*11} || K <- lists:seq(1,100)])], ok = file:write_file(filename:join([BaseDir, "send_term_SUITE_data", "ext_terms.bin"]), diff --git a/erts/emulator/test/send_term_SUITE_data/ext_terms.bin b/erts/emulator/test/send_term_SUITE_data/ext_terms.bin Binary files differindex b239284323..5ff0b2ccf1 100644 --- a/erts/emulator/test/send_term_SUITE_data/ext_terms.bin +++ b/erts/emulator/test/send_term_SUITE_data/ext_terms.bin diff --git a/erts/emulator/test/send_term_SUITE_data/ext_terms.h b/erts/emulator/test/send_term_SUITE_data/ext_terms.h index 08134f3b05..5585585ec3 100644 --- a/erts/emulator/test/send_term_SUITE_data/ext_terms.h +++ b/erts/emulator/test/send_term_SUITE_data/ext_terms.h @@ -24,9 +24,9 @@ #ifndef EXT_TERMS_H__ #define EXT_TERMS_H__ static struct { - unsigned char ext[162]; + unsigned char ext[637]; int ext_size; - unsigned char cext[162]; + unsigned char cext[637]; int cext_size; } ext_terms[] = { {{131,104,3,98,0,0,18,103,98,255,255,237,153,108,0,0,0,2,100,0,7,97,110,95,97,116,111,109,107,0,6,97,32,108,105,115,116,106}, @@ -37,26 +37,26 @@ static struct { 46, {131,108,0,0,0,4,110,9,0,0,0,160,222,197,173,201,53,54,110,7,1,199,113,21,183,140,242,3,107,0,6,98,108,117,112,112,33,100,0,5,98,108,105,112,112,106}, 46}, - {{131,104,5,103,100,0,13,97,95,110,111,100,101,64,103,111,114,98,97,103,0,0,0,38,0,0,0,0,3,104,2,114,0,3,100,0,13,97,95,110,111,100,101,64,103,111,114,98,97,103,3,0,0,0,40,0,0,0,0,0,0,0,0,102,100,0,13,97,95,110,111,100,101,64,103,111,114,98,97,103,0,0,0,1,3,103,100,0,12,97,110,111,100,101,64,103,111,114,98,97,103,0,0,0,37,0,0,0,0,3,102,100,0,12,97,110,111,100,101,64,103,111,114,98,97,103,0,0,0,1,3,114,0,3,100,0,12,97,110,111,100,101,64,103,111,114,98,97,103,3,0,0,0,59,0,0,0,0,0,0,0,0}, - 162, - {131,80,0,0,0,161,120,156,203,96,77,79,97,224,77,140,207,203,79,73,117,72,207,47,74,74,76,103,96,96,80,3,98,6,230,12,166,34,6,102,116,89,102,160,140,6,3,20,164,97,209,203,200,12,52,145,39,17,85,80,21,108,96,26,166,4,35,51,216,14,20,97,144,21,214,48,43,0,1,209,36,52}, - 82}, - {{131,104,5,104,0,106,106,112,0,0,0,79,0,21,87,190,182,1,38,106,214,65,228,1,52,27,227,2,212,0,0,0,1,0,0,0,0,100,0,15,115,101,110,100,95,116,101,114,109,95,83,85,73,84,69,97,1,98,0,184,11,180,103,100,0,12,97,110,111,100,101,64,103,111,114,98,97,103,0,0,0,37,0,0,0,0,3,109,0,0,0,31,104,101,106,32,104,111,112,112,32,116,114,97,108,108,97,108,97,97,97,97,97,97,97,97,97,97,97,97,97,97,97}, - 123, - {131,80,0,0,0,122,120,156,203,96,205,96,200,202,42,96,96,96,240,103,16,13,223,183,141,81,45,235,154,227,19,70,19,233,199,76,87,128,130,140,64,204,144,194,192,95,156,154,151,18,95,146,90,148,27,31,28,234,25,226,154,200,152,196,176,131,123,75,122,10,3,79,98,94,126,74,170,67,122,126,81,82,98,58,80,173,42,72,3,115,46,144,144,207,72,205,82,200,200,47,40,80,40,41,74,204,201,73,204,73,68,5,0,18,237,35,68}, - 117}, + {{131,104,5,103,100,0,20,97,95,110,111,100,101,64,101,108,120,51,52,104,110,121,50,50,45,49,98,0,0,0,41,0,0,0,0,2,104,2,114,0,3,100,0,20,97,95,110,111,100,101,64,101,108,120,51,52,104,110,121,50,50,45,49,98,2,0,0,0,42,0,0,0,3,0,0,0,0,102,100,0,20,97,95,110,111,100,101,64,101,108,120,51,52,104,110,121,50,50,45,49,98,0,0,0,0,2,103,100,0,19,107,97,108,108,101,64,101,108,120,51,52,104,110,121,50,50,45,49,98,0,0,0,40,0,0,0,0,3,102,100,0,19,107,97,108,108,101,64,101,108,120,51,52,104,110,121,50,50,45,49,98,0,0,0,0,3,114,0,3,100,0,19,107,97,108,108,101,64,101,108,120,51,52,104,110,121,50,50,45,49,98,3,0,0,0,56,0,0,0,3,0,0,0,0}, + 204, + {131,80,0,0,0,203,120,156,203,96,77,79,97,16,73,140,207,203,79,73,117,72,205,169,48,54,201,200,171,52,50,210,53,76,98,96,96,208,4,98,6,166,12,166,34,6,102,28,138,152,128,10,180,128,152,25,164,50,13,183,73,12,76,64,107,132,179,19,115,114,48,229,52,64,242,204,105,56,229,25,152,193,246,99,147,5,89,107,1,179,30,0,103,37,46,144}, + 96}, + {{131,104,5,104,0,106,106,112,0,0,0,86,0,123,56,104,225,98,55,108,63,185,201,160,64,191,31,210,203,0,0,0,2,0,0,0,0,100,0,15,115,101,110,100,95,116,101,114,109,95,83,85,73,84,69,97,2,98,3,217,195,71,103,100,0,19,107,97,108,108,101,64,101,108,120,51,52,104,110,121,50,50,45,49,98,0,0,0,40,0,0,0,0,3,109,0,0,0,31,104,101,106,32,104,111,112,112,32,116,114,97,108,108,97,108,97,97,97,97,97,97,97,97,97,97,97,97,97,97,97}, + 130, + {131,80,0,0,0,129,120,156,203,96,205,96,200,202,42,96,96,96,8,99,168,182,200,120,152,100,158,99,191,243,228,2,135,253,242,151,78,3,5,153,128,152,33,133,129,191,56,53,47,37,190,36,181,40,55,62,56,212,51,196,53,145,41,137,249,230,97,247,244,20,6,225,236,196,156,156,84,135,212,156,10,99,147,140,188,74,35,35,93,195,36,160,22,13,144,62,230,92,32,33,159,145,154,165,144,145,95,80,160,80,82,4,84,154,152,147,136,10,0,219,221,39,33}, + 123}, {{131,108,0,0,0,4,110,10,0,28,199,113,166,118,185,145,86,105,9,110,5,1,28,103,24,89,10,107,0,2,98,33,100,0,10,98,108,105,112,112,112,112,112,112,112,106}, 46, {131,108,0,0,0,4,110,10,0,28,199,113,166,118,185,145,86,105,9,110,5,1,28,103,24,89,10,107,0,2,98,33,100,0,10,98,108,105,112,112,112,112,112,112,112,106}, 46}, - {{131,104,5,98,0,0,18,103,103,100,0,13,97,95,110,111,100,101,64,103,111,114,98,97,103,0,0,0,38,0,0,0,0,3,104,2,114,0,3,100,0,13,97,95,110,111,100,101,64,103,111,114,98,97,103,3,0,0,0,40,0,0,0,0,0,0,0,0,102,100,0,13,97,95,110,111,100,101,64,103,111,114,98,97,103,0,0,0,1,3,98,255,255,237,153,108,0,0,0,2,100,0,7,97,110,95,97,116,111,109,107,0,6,97,32,108,105,115,116,106}, - 120, - {131,80,0,0,0,119,120,156,203,96,77,98,96,16,74,79,79,97,224,77,140,207,203,79,73,117,72,207,47,74,74,76,103,96,96,80,3,98,6,230,12,166,34,6,102,116,89,102,160,140,6,3,20,164,97,209,203,200,156,244,255,255,219,153,57,64,38,83,10,3,123,98,94,124,98,73,126,110,54,3,91,162,66,78,102,113,73,22,0,167,192,30,158}, - 93}, - {{131,104,4,103,100,0,13,97,95,110,111,100,101,64,103,111,114,98,97,103,0,0,0,38,0,0,0,0,3,104,2,114,0,3,100,0,13,97,95,110,111,100,101,64,103,111,114,98,97,103,3,0,0,0,40,0,0,0,0,0,0,0,0,102,100,0,13,97,95,110,111,100,101,64,103,111,114,98,97,103,0,0,0,1,3,103,100,0,12,97,110,111,100,101,64,103,111,114,98,97,103,0,0,0,0,0,0,0,0,3,102,100,0,12,97,110,111,100,101,64,103,111,114,98,97,103,0,0,0,1,3}, - 131, - {131,80,0,0,0,130,120,156,203,96,73,79,97,224,77,140,207,203,79,73,117,72,207,47,74,74,76,103,96,96,80,3,98,6,230,12,166,34,6,102,116,89,102,160,140,6,3,20,164,97,209,203,200,12,52,145,39,17,85,16,12,152,211,48,37,24,153,1,215,214,30,50}, - 72}, + {{131,104,5,98,0,0,18,103,103,100,0,20,97,95,110,111,100,101,64,101,108,120,51,52,104,110,121,50,50,45,49,98,0,0,0,41,0,0,0,0,2,104,2,114,0,3,100,0,20,97,95,110,111,100,101,64,101,108,120,51,52,104,110,121,50,50,45,49,98,2,0,0,0,42,0,0,0,3,0,0,0,0,102,100,0,20,97,95,110,111,100,101,64,101,108,120,51,52,104,110,121,50,50,45,49,98,0,0,0,0,2,98,255,255,237,153,108,0,0,0,2,100,0,7,97,110,95,97,116,111,109,107,0,6,97,32,108,105,115,116,106}, + 141, + {131,80,0,0,0,140,120,156,203,96,77,98,96,16,74,79,79,97,16,73,140,207,203,79,73,117,72,205,169,48,54,201,200,171,52,50,210,53,4,202,49,104,2,49,3,83,6,83,17,3,51,14,69,76,64,5,90,64,204,12,82,153,134,219,36,6,166,164,255,255,223,206,204,1,177,82,24,216,19,243,226,19,75,242,115,179,25,216,18,21,114,50,139,75,178,0,77,99,35,202}, + 100}, + {{131,104,4,103,100,0,20,97,95,110,111,100,101,64,101,108,120,51,52,104,110,121,50,50,45,49,98,0,0,0,41,0,0,0,0,2,104,2,114,0,3,100,0,20,97,95,110,111,100,101,64,101,108,120,51,52,104,110,121,50,50,45,49,98,2,0,0,0,42,0,0,0,3,0,0,0,0,102,100,0,20,97,95,110,111,100,101,64,101,108,120,51,52,104,110,121,50,50,45,49,98,0,0,0,0,2,103,100,0,19,107,97,108,108,101,64,101,108,120,51,52,104,110,121,50,50,45,49,98,0,0,0,0,0,0,0,0,3,102,100,0,19,107,97,108,108,101,64,101,108,120,51,52,104,110,121,50,50,45,49,98,0,0,0,0,3}, + 166, + {131,80,0,0,0,165,120,156,203,96,73,79,97,16,73,140,207,203,79,73,117,72,205,169,48,54,201,200,171,52,50,210,53,76,98,96,96,208,4,98,6,166,12,166,34,6,102,28,138,152,128,10,180,128,152,25,164,50,13,183,73,12,76,64,107,132,179,19,115,114,176,200,129,0,115,26,110,121,102,0,219,33,38,209}, + 84}, {{131,104,3,98,0,0,18,103,98,255,255,237,153,108,0,0,0,2,100,0,7,97,110,95,97,116,111,109,107,0,6,97,32,108,105,115,116,106}, 38, {131,104,3,98,0,0,18,103,98,255,255,237,153,108,0,0,0,2,100,0,7,97,110,95,97,116,111,109,107,0,6,97,32,108,105,115,116,106}, @@ -65,46 +65,58 @@ static struct { 33, {131,104,3,98,0,0,18,103,98,255,255,237,153,108,0,0,0,2,100,0,4,97,116,111,109,107,0,4,108,105,115,116,106}, 33}, - {{131,104,4,103,100,0,13,97,95,110,111,100,101,64,103,111,114,98,97,103,0,0,0,38,0,0,0,0,3,104,2,114,0,3,100,0,13,97,95,110,111,100,101,64,103,111,114,98,97,103,3,0,0,0,40,0,0,0,0,0,0,0,0,102,100,0,13,97,95,110,111,100,101,64,103,111,114,98,97,103,0,0,0,1,3,103,100,0,12,97,110,111,100,101,64,103,111,114,98,97,103,0,0,0,0,0,0,0,0,3,102,100,0,12,97,110,111,100,101,64,103,111,114,98,97,103,0,0,0,1,3}, - 131, - {131,80,0,0,0,130,120,156,203,96,73,79,97,224,77,140,207,203,79,73,117,72,207,47,74,74,76,103,96,96,80,3,98,6,230,12,166,34,6,102,116,89,102,160,140,6,3,20,164,97,209,203,200,12,52,145,39,17,85,16,12,152,211,48,37,24,153,1,215,214,30,50}, + {{131,104,4,103,100,0,20,97,95,110,111,100,101,64,101,108,120,51,52,104,110,121,50,50,45,49,98,0,0,0,41,0,0,0,0,2,104,2,114,0,3,100,0,20,97,95,110,111,100,101,64,101,108,120,51,52,104,110,121,50,50,45,49,98,2,0,0,0,42,0,0,0,3,0,0,0,0,102,100,0,20,97,95,110,111,100,101,64,101,108,120,51,52,104,110,121,50,50,45,49,98,0,0,0,0,2,103,100,0,19,107,97,108,108,101,64,101,108,120,51,52,104,110,121,50,50,45,49,98,0,0,0,0,0,0,0,0,3,102,100,0,19,107,97,108,108,101,64,101,108,120,51,52,104,110,121,50,50,45,49,98,0,0,0,0,3}, + 166, + {131,80,0,0,0,165,120,156,203,96,73,79,97,16,73,140,207,203,79,73,117,72,205,169,48,54,201,200,171,52,50,210,53,76,98,96,96,208,4,98,6,166,12,166,34,6,102,28,138,152,128,10,180,128,152,25,164,50,13,183,73,12,76,64,107,132,179,19,115,114,176,200,129,0,115,26,110,121,102,0,219,33,38,209}, + 84}, + {{131,104,4,110,8,0,28,199,17,175,172,214,173,61,98,255,255,82,100,104,1,104,1,104,1,104,1,104,1,104,1,104,1,104,1,104,1,104,1,104,1,104,0,114,0,3,100,0,19,107,97,108,108,101,64,101,108,120,51,52,104,110,121,50,50,45,49,98,3,0,0,0,57,0,0,0,3,0,0,0,0}, + 81, + {131,80,0,0,0,80,120,156,203,96,201,227,96,144,57,46,184,126,205,181,181,182,73,255,255,7,165,100,48,98,133,12,69,12,204,41,12,194,217,137,57,57,169,14,169,57,21,198,38,25,121,149,70,70,186,134,73,204,12,12,12,150,64,12,162,25,0,231,161,20,138}, + 71}, + {{131,104,4,110,9,0,28,199,241,98,116,219,231,23,24,98,255,255,82,100,108,0,0,0,1,108,0,0,0,1,108,0,0,0,1,108,0,0,0,1,108,0,0,0,1,108,0,0,0,1,108,0,0,0,1,108,0,0,0,1,108,0,0,0,1,108,0,0,0,1,107,0,1,1,106,106,106,106,106,106,106,106,106,106,114,0,3,100,0,19,107,97,108,108,101,64,101,108,120,51,52,104,110,121,50,50,45,49,98,3,0,0,0,58,0,0,0,3,0,0,0,0}, + 122, + {131,80,0,0,0,121,120,156,203,96,201,227,100,144,57,254,49,169,228,246,115,113,137,164,255,255,131,82,114,24,24,24,24,73,34,178,25,24,25,179,224,160,136,129,57,133,65,56,59,49,39,39,213,33,53,167,194,216,36,35,175,210,200,72,215,48,137,25,168,214,10,136,65,52,3,0,142,142,25,0}, + 80}, + {{131,104,4,110,8,0,28,199,129,17,222,251,42,6,98,255,255,82,100,104,1,104,1,104,1,104,1,104,1,104,1,104,1,104,1,104,1,104,1,104,1,104,1,97,2,114,0,3,100,0,19,107,97,108,108,101,64,101,108,120,51,52,104,110,121,50,50,45,49,98,3,0,0,0,59,0,0,0,3,0,0,0,0}, + 83, + {131,80,0,0,0,82,120,156,203,96,201,227,96,144,57,222,40,120,239,183,22,91,210,255,255,65,41,25,140,216,97,34,83,17,3,115,10,131,112,118,98,78,78,170,67,106,78,133,177,73,70,94,165,145,145,174,97,18,51,3,3,131,53,16,131,104,6,0,233,167,20,95}, 72}, - {{131,104,4,110,8,0,28,199,17,175,172,214,173,61,98,255,255,82,100,104,1,104,1,104,1,104,1,104,1,104,1,104,1,104,1,104,1,104,1,104,1,104,0,114,0,3,100,0,12,97,110,111,100,101,64,103,111,114,98,97,103,3,0,0,0,60,0,0,0,0,0,0,0,0}, - 74, - {131,80,0,0,0,73,120,156,203,96,201,227,96,144,57,46,184,126,205,181,181,182,73,255,255,7,165,100,48,98,133,12,69,12,204,41,12,60,137,121,249,41,169,14,233,249,69,73,137,233,204,12,12,12,54,12,80,0,0,73,17,18,208}, - 63}, - {{131,104,4,110,9,0,28,199,241,98,116,219,231,23,24,98,255,255,82,100,108,0,0,0,1,108,0,0,0,1,108,0,0,0,1,108,0,0,0,1,108,0,0,0,1,108,0,0,0,1,108,0,0,0,1,108,0,0,0,1,108,0,0,0,1,108,0,0,0,1,107,0,1,1,106,106,106,106,106,106,106,106,106,106,114,0,3,100,0,12,97,110,111,100,101,64,103,111,114,98,97,103,3,0,0,0,61,0,0,0,0,0,0,0,0}, - 115, - {131,80,0,0,0,114,120,156,203,96,201,227,100,144,57,254,49,169,228,246,115,113,137,164,255,255,131,82,114,24,24,24,24,73,34,178,25,24,25,179,224,160,136,129,57,133,129,39,49,47,63,37,213,33,61,191,40,41,49,157,25,168,200,150,1,10,0,208,188,23,70}, + {{131,104,4,110,9,0,28,199,113,221,139,146,14,239,240,98,255,255,82,100,104,1,104,1,104,1,104,1,104,1,104,1,104,1,104,1,104,1,104,1,104,1,104,1,97,3,114,0,3,100,0,19,107,97,108,108,101,64,101,108,120,51,52,104,110,121,50,50,45,49,98,3,0,0,0,60,0,0,0,3,0,0,0,0}, + 84, + {131,80,0,0,0,83,120,156,203,96,201,227,100,144,57,94,120,183,123,18,223,251,15,73,255,255,7,165,100,48,98,135,137,204,69,12,204,41,12,194,217,137,57,57,169,14,169,57,21,198,38,25,121,149,70,70,186,134,73,204,12,12,12,54,64,12,162,25,0,106,11,22,31}, + 73}, + {{131,104,4,110,9,0,28,199,177,214,190,98,202,104,2,98,255,255,82,100,104,1,104,1,104,1,104,1,104,1,104,1,104,1,104,1,104,1,104,1,104,1,104,1,97,4,114,0,3,100,0,19,107,97,108,108,101,64,101,108,120,51,52,104,110,121,50,50,45,49,98,3,0,0,0,61,0,0,0,3,0,0,0,0}, + 84, + {131,80,0,0,0,83,120,156,203,96,201,227,100,144,57,190,241,218,190,164,83,25,76,73,255,255,7,165,100,48,98,135,137,44,69,12,204,41,12,194,217,137,57,57,169,14,169,57,21,198,38,25,121,149,70,70,186,134,73,204,12,12,12,182,64,12,162,25,0,74,151,21,164}, + 73}, + {{131,104,4,110,7,0,28,199,85,220,50,202,15,98,255,255,82,100,108,0,0,0,1,108,0,0,0,1,108,0,0,0,1,108,0,0,0,1,108,0,0,0,1,108,0,0,0,1,108,0,0,0,1,108,0,0,0,1,108,0,0,0,1,108,0,0,0,1,107,0,1,5,106,106,106,106,106,106,106,106,106,106,114,0,3,100,0,19,107,97,108,108,101,64,101,108,120,51,52,104,110,121,50,50,45,49,98,3,0,0,0,62,0,0,0,3,0,0,0,0}, + 120, + {131,80,0,0,0,119,120,156,203,96,201,99,103,144,57,30,122,199,232,20,127,210,255,255,65,41,57,12,12,12,140,36,17,217,12,140,172,89,112,80,196,192,156,194,32,156,157,152,147,147,234,144,154,83,97,108,146,145,87,105,100,164,107,152,196,12,84,107,7,196,32,154,1,0,225,225,23,138}, + 78}, + {{131,104,4,110,9,0,28,199,241,98,116,219,231,23,24,98,255,255,82,100,104,1,104,1,104,1,104,1,104,1,104,1,104,1,104,1,104,1,104,1,104,1,104,1,97,6,114,0,3,100,0,19,107,97,108,108,101,64,101,108,120,51,52,104,110,121,50,50,45,49,98,3,0,0,0,63,0,0,0,3,0,0,0,0}, + 84, + {131,80,0,0,0,83,120,156,203,96,201,227,100,144,57,254,49,169,228,246,115,113,137,164,255,255,131,82,50,24,177,195,68,182,34,6,230,20,6,225,236,196,156,156,84,135,212,156,10,99,147,140,188,74,35,35,93,195,36,102,6,6,6,123,32,6,209,12,0,64,205,21,133}, + 73}, + {{131,104,4,110,7,0,28,199,59,73,56,148,1,98,255,255,82,100,104,1,104,1,104,1,104,1,104,1,104,1,104,1,104,1,104,1,104,1,104,1,104,1,97,7,114,0,3,100,0,19,107,97,108,108,101,64,101,108,120,51,52,104,110,121,50,50,45,49,98,3,0,0,0,64,0,0,0,3,0,0,0,0}, + 82, + {131,80,0,0,0,81,120,156,203,96,201,99,103,144,57,110,237,105,49,133,49,233,255,255,160,148,12,70,236,48,145,189,136,129,57,133,65,56,59,49,39,39,213,33,53,167,194,216,36,35,175,210,200,72,215,48,137,153,129,129,193,1,136,65,52,3,0,137,143,19,30}, 71}, - {{131,104,4,110,8,0,28,199,129,17,222,251,42,6,98,255,255,82,100,104,1,104,1,104,1,104,1,104,1,104,1,104,1,104,1,104,1,104,1,104,1,104,1,97,2,114,0,3,100,0,12,97,110,111,100,101,64,103,111,114,98,97,103,3,0,0,0,62,0,0,0,0,0,0,0,0}, - 76, - {131,80,0,0,0,75,120,156,203,96,201,227,96,144,57,222,40,120,239,183,22,91,210,255,255,65,41,25,140,216,97,34,83,17,3,115,10,3,79,98,94,126,74,170,67,122,126,81,82,98,58,51,3,3,131,29,3,20,0,0,76,82,18,165}, - 64}, - {{131,104,4,110,9,0,28,199,113,221,139,146,14,239,240,98,255,255,82,100,104,1,104,1,104,1,104,1,104,1,104,1,104,1,104,1,104,1,104,1,104,1,104,1,97,3,114,0,3,100,0,12,97,110,111,100,101,64,103,111,114,98,97,103,3,0,0,0,63,0,0,0,0,0,0,0,0}, - 77, - {131,80,0,0,0,76,120,156,203,96,201,227,100,144,57,94,120,183,123,18,223,251,15,73,255,255,7,165,100,48,98,135,137,204,69,12,204,41,12,60,137,121,249,41,169,14,233,249,69,73,137,233,204,12,12,12,246,12,80,0,0,192,110,20,101}, - 65}, - {{131,104,4,110,9,0,28,199,177,214,190,98,202,104,2,98,255,255,82,100,104,1,104,1,104,1,104,1,104,1,104,1,104,1,104,1,104,1,104,1,104,1,104,1,97,4,114,0,3,100,0,12,97,110,111,100,101,64,103,111,114,98,97,103,3,0,0,0,64,0,0,0,0,0,0,0,0}, - 77, - {131,80,0,0,0,76,120,156,203,96,201,227,100,144,57,190,241,218,190,164,83,25,76,73,255,255,7,165,100,48,98,135,137,44,69,12,204,41,12,60,137,121,249,41,169,14,233,249,69,73,137,233,204,12,12,12,14,12,80,0,0,164,94,19,234}, - 65}, - {{131,104,4,110,7,0,28,199,85,220,50,202,15,98,255,255,82,100,108,0,0,0,1,108,0,0,0,1,108,0,0,0,1,108,0,0,0,1,108,0,0,0,1,108,0,0,0,1,108,0,0,0,1,108,0,0,0,1,108,0,0,0,1,108,0,0,0,1,107,0,1,5,106,106,106,106,106,106,106,106,106,106,114,0,3,100,0,12,97,110,111,100,101,64,103,111,114,98,97,103,3,0,0,0,65,0,0,0,0,0,0,0,0}, - 113, - {131,80,0,0,0,112,120,156,203,96,201,99,103,144,57,30,122,199,232,20,127,210,255,255,65,41,57,12,12,12,140,36,17,217,12,140,172,89,112,80,196,192,156,194,192,147,152,151,159,146,234,144,158,95,148,148,152,206,12,84,228,200,0,5,0,46,116,21,208}, - 69}, - {{131,104,4,110,9,0,28,199,241,98,116,219,231,23,24,98,255,255,82,100,104,1,104,1,104,1,104,1,104,1,104,1,104,1,104,1,104,1,104,1,104,1,104,1,97,6,114,0,3,100,0,12,97,110,111,100,101,64,103,111,114,98,97,103,3,0,0,0,66,0,0,0,0,0,0,0,0}, - 77, - {131,80,0,0,0,76,120,156,203,96,201,227,100,144,57,254,49,169,228,246,115,113,137,164,255,255,131,82,50,24,177,195,68,182,34,6,230,20,6,158,196,188,252,148,84,135,244,252,162,164,196,116,102,6,6,6,39,6,40,0,0,155,123,19,203}, - 65}, - {{131,104,4,110,7,0,28,199,59,73,56,148,1,98,255,255,82,100,104,1,104,1,104,1,104,1,104,1,104,1,104,1,104,1,104,1,104,1,104,1,104,1,97,7,114,0,3,100,0,12,97,110,111,100,101,64,103,111,114,98,97,103,3,0,0,0,67,0,0,0,0,0,0,0,0}, - 75, - {131,80,0,0,0,74,120,156,203,96,201,99,103,144,57,110,237,105,49,133,49,233,255,255,160,148,12,70,236,48,145,189,136,129,57,133,129,39,49,47,63,37,213,33,61,191,40,41,49,157,153,129,129,193,153,1,10,0,245,21,17,100}, - 62}, - {{131,104,4,110,8,0,28,199,17,175,172,214,173,61,98,255,255,82,100,104,1,104,1,104,1,104,1,104,1,104,1,104,1,104,1,104,1,104,1,104,1,104,1,97,8,114,0,3,100,0,12,97,110,111,100,101,64,103,111,114,98,97,103,3,0,0,0,68,0,0,0,0,0,0,0,0}, - 76, - {131,80,0,0,0,75,120,156,203,96,201,227,96,144,57,46,184,126,205,181,181,182,73,255,255,7,165,100,48,98,135,137,28,69,12,204,41,12,60,137,121,249,41,169,14,233,249,69,73,137,233,204,12,12,12,46,12,80,0,0,112,226,19,66}, - 64} + {{131,104,4,110,8,0,28,199,17,175,172,214,173,61,98,255,255,82,100,104,1,104,1,104,1,104,1,104,1,104,1,104,1,104,1,104,1,104,1,104,1,104,1,97,8,114,0,3,100,0,19,107,97,108,108,101,64,101,108,120,51,52,104,110,121,50,50,45,49,98,3,0,0,0,65,0,0,0,3,0,0,0,0}, + 83, + {131,80,0,0,0,82,120,156,203,96,201,227,96,144,57,46,184,126,205,181,181,182,73,255,255,7,165,100,48,98,135,137,28,69,12,204,41,12,194,217,137,57,57,169,14,169,57,21,198,38,25,121,149,70,70,186,134,73,204,12,12,12,142,64,12,162,25,0,18,103,20,252}, + 72}, + {{131,116,0,0,0,0}, + 6, + {131,116,0,0,0,0}, + 6}, + {{131,116,0,0,0,3,97,1,97,11,97,2,97,22,97,3,97,33}, + 18, + {131,116,0,0,0,3,97,1,97,11,97,2,97,22,97,3,97,33}, + 18}, + {{131,116,0,0,0,100,97,48,98,0,0,2,16,97,62,98,0,0,2,170,97,11,97,121,97,39,98,0,0,1,173,97,83,98,0,0,3,145,97,63,98,0,0,2,181,97,34,98,0,0,1,118,97,68,98,0,0,2,236,97,26,98,0,0,1,30,97,78,98,0,0,3,90,97,52,98,0,0,2,60,97,15,97,165,97,64,98,0,0,2,192,97,75,98,0,0,3,57,97,81,98,0,0,3,123,97,71,98,0,0,3,13,97,20,97,220,97,50,98,0,0,2,38,97,17,97,187,97,25,98,0,0,1,19,97,65,98,0,0,2,203,97,98,98,0,0,4,54,97,79,98,0,0,3,101,97,13,97,143,97,44,98,0,0,1,228,97,8,97,88,97,99,98,0,0,4,65,97,36,98,0,0,1,140,97,67,98,0,0,2,225,97,7,97,77,97,66,98,0,0,2,214,97,85,98,0,0,3,167,97,76,98,0,0,3,68,97,1,97,11,97,32,98,0,0,1,96,97,69,98,0,0,2,247,97,37,98,0,0,1,151,97,35,98,0,0,1,129,97,84,98,0,0,3,156,97,3,97,33,97,82,98,0,0,3,134,97,45,98,0,0,1,239,97,55,98,0,0,2,93,97,6,97,66,97,2,97,22,97,94,98,0,0,4,10,97,49,98,0,0,2,27,97,41,98,0,0,1,195,97,91,98,0,0,3,233,97,87,98,0,0,3,189,97,33,98,0,0,1,107,97,42,98,0,0,1,206,97,74,98,0,0,3,46,97,60,98,0,0,2,148,97,43,98,0,0,1,217,97,10,97,110,97,70,98,0,0,3,2,97,9,97,99,97,72,98,0,0,3,24,97,86,98,0,0,3,178,97,19,97,209,97,56,98,0,0,2,104,97,95,98,0,0,4,21,97,57,98,0,0,2,115,97,51,98,0,0,2,49,97,14,97,154,97,5,97,55,97,54,98,0,0,2,82,97,18,97,198,97,61,98,0,0,2,159,97,31,98,0,0,1,85,97,22,97,242,97,29,98,0,0,1,63,97,97,98,0,0,4,43,97,21,97,231,97,89,98,0,0,3,211,97,27,98,0,0,1,41,97,24,98,0,0,1,8,97,47,98,0,0,2,5,97,100,98,0,0,4,76,97,40,98,0,0,1,184,97,96,98,0,0,4,32,97,73,98,0,0,3,35,97,90,98,0,0,3,222,97,30,98,0,0,1,74,97,58,98,0,0,2,126,97,80,98,0,0,3,112,97,88,98,0,0,3,200,97,59,98,0,0,2,137,97,77,98,0,0,3,79,97,23,97,253,97,28,98,0,0,1,52,97,46,98,0,0,1,250,97,92,98,0,0,3,244,97,53,98,0,0,2,71,97,93,98,0,0,3,255,97,16,97,176,97,38,98,0,0,1,162,97,4,97,44,97,12,97,132}, + 637, + {131,80,0,0,2,124,120,156,21,143,123,100,87,113,24,135,207,126,219,218,165,86,171,109,181,182,118,107,181,181,123,171,181,181,218,90,91,91,171,93,90,91,173,221,219,158,93,24,145,40,145,49,70,98,140,68,68,70,196,196,136,137,24,137,136,68,68,70,140,68,68,34,34,35,34,117,158,191,158,247,115,121,223,239,57,55,130,32,152,228,224,120,16,68,146,57,33,150,217,204,45,10,195,49,234,41,23,66,68,223,163,193,224,57,123,53,111,210,172,250,65,134,42,155,115,86,6,169,210,172,99,27,75,156,116,124,69,187,65,45,221,98,134,86,145,68,42,159,56,100,94,192,118,94,176,219,27,41,52,234,188,99,60,68,76,53,93,86,167,72,226,46,165,230,95,137,167,159,9,195,70,246,233,44,112,202,141,47,196,209,73,147,227,71,122,221,122,66,135,104,38,42,252,139,92,171,99,180,152,255,102,191,234,1,249,98,142,139,214,22,137,38,143,30,199,59,148,25,252,164,198,246,8,155,104,34,194,78,46,251,106,34,149,186,153,20,217,121,205,144,27,223,233,19,47,201,211,188,66,177,120,79,155,102,57,117,46,220,167,68,115,157,68,174,114,218,32,66,2,19,156,113,76,231,146,120,70,10,31,56,106,125,154,81,95,75,163,86,117,157,195,162,146,173,60,36,150,26,170,149,61,236,224,13,245,142,143,200,241,122,111,248,149,191,200,114,108,0,15,148,144,198,55,6,188,190,70,166,65,17,233,34,158,10,23,99,153,180,214,193,1,205,85,198,84,185,156,117,33,159,65,241,153,108,179,54,142,185,48,203,121,205,107,244,139,183,28,215,156,167,83,213,197,46,254,178,199,118,21,229,226,15,195,6,27,28,177,214,202,136,234,31,201,172,80,96,254,152,24,74,217,194,237,255,248,120,145,79}, + 418} }; -#define NO_OF_EXT_TERMS 19 +#define NO_OF_EXT_TERMS 22 #endif diff --git a/erts/emulator/test/timer_bif_SUITE.erl b/erts/emulator/test/timer_bif_SUITE.erl index d406456f98..f41fc7552e 100644 --- a/erts/emulator/test/timer_bif_SUITE.erl +++ b/erts/emulator/test/timer_bif_SUITE.erl @@ -29,7 +29,8 @@ read_timer_trivial/1, read_timer/1, read_timer_async/1, cleanup/1, evil_timers/1, registered_process/1, same_time_yielding/1, same_time_yielding_with_cancel/1, same_time_yielding_with_cancel_other/1, - same_time_yielding_with_cancel_other_accessor/1, auto_cancel_yielding/1]). +% same_time_yielding_with_cancel_other_accessor/1, + auto_cancel_yielding/1]). -include_lib("test_server/include/test_server.hrl"). @@ -67,7 +68,7 @@ all() -> cleanup, evil_timers, registered_process, same_time_yielding, same_time_yielding_with_cancel, same_time_yielding_with_cancel_other, - same_time_yielding_with_cancel_other_accessor, +% same_time_yielding_with_cancel_other_accessor, auto_cancel_yielding]. groups() -> @@ -532,8 +533,8 @@ same_time_yielding_with_cancel(Config) when is_list(Config) -> same_time_yielding_with_cancel_other(Config) when is_list(Config) -> same_time_yielding_with_cancel_test(true, false). -same_time_yielding_with_cancel_other_accessor(Config) when is_list(Config) -> - same_time_yielding_with_cancel_test(true, true). +%same_time_yielding_with_cancel_other_accessor(Config) when is_list(Config) -> +% same_time_yielding_with_cancel_test(true, true). do_cancel_tmrs(Tmo, Tmrs, Tester) -> BeginCancel = erlang:convert_time_unit(Tmo, @@ -631,7 +632,6 @@ auto_cancel_yielding(Config) when is_list(Config) -> true = mem_larger_than(Mem), exit(P, bang), wait_until(fun () -> process_is_cleaned_up(P) end), - receive after 1000 -> ok end, Mem = mem(), ok. @@ -747,7 +747,7 @@ mem_larger_than(Mem) -> mem() > Mem. mem() -> - erts_debug:set_internal_state(wait, deallocations), + erts_debug:set_internal_state(wait, timer_cancellations), erts_debug:set_internal_state(wait, deallocations), case mem_get() of {-1, -1} -> no_fix_alloc; |