diff options
Diffstat (limited to 'erts/emulator/beam')
42 files changed, 3779 insertions, 3091 deletions
diff --git a/erts/emulator/beam/atom.names b/erts/emulator/beam/atom.names index cceca66850..45b7540aeb 100644 --- a/erts/emulator/beam/atom.names +++ b/erts/emulator/beam/atom.names @@ -142,6 +142,7 @@ atom bsr atom bsr_anycrlf atom bsr_unicode atom build_type +atom busy atom busy_dist_port atom busy_port atom call @@ -252,6 +253,7 @@ atom exception_from atom exception_trace atom exclusive atom exit_status +atom exited atom existing atom existing_processes atom existing_ports @@ -358,6 +360,7 @@ atom loaded atom load_cancelled atom load_failure atom local +atom logger atom long_gc atom long_schedule atom low @@ -444,6 +447,7 @@ atom no_float atom no_integer atom no_network atom no_start_optimize +atom not_suspended atom not atom not_a_list atom not_loaded diff --git a/erts/emulator/beam/beam_bif_load.c b/erts/emulator/beam/beam_bif_load.c index 5c76aafae7..a0dbd9ec7b 100644 --- a/erts/emulator/beam/beam_bif_load.c +++ b/erts/emulator/beam/beam_bif_load.c @@ -603,8 +603,9 @@ badarg: BIF_RETTYPE erts_internal_check_dirty_process_code_2(BIF_ALIST_2) { + erts_aint32_t state; Process *rp; - int reds = 0; + int dirty, busy, reds = 0; Eterm res; if (BIF_P != erts_dirty_process_signal_handler @@ -618,20 +619,29 @@ BIF_RETTYPE erts_internal_check_dirty_process_code_2(BIF_ALIST_2) if (is_not_atom(BIF_ARG_2)) BIF_ERROR(BIF_P, BADARG); - rp = erts_pid2proc_not_running(BIF_P, ERTS_PROC_LOCK_MAIN, - BIF_ARG_1, ERTS_PROC_LOCK_MAIN); - if (rp == ERTS_PROC_LOCK_BUSY) - ERTS_BIF_YIELD2(bif_export[BIF_erts_internal_check_dirty_process_code_2], - BIF_P, BIF_ARG_1, BIF_ARG_2); + if (BIF_ARG_1 == BIF_P->common.id) + BIF_RET(am_normal); + + rp = erts_proc_lookup_raw(BIF_ARG_1); if (!rp) - BIF_RET(am_false); - + BIF_RET(am_false); + + state = erts_atomic32_read_nob(&rp->state); + dirty = (state & (ERTS_PSFLG_DIRTY_RUNNING + | ERTS_PSFLG_DIRTY_RUNNING_SYS)); + if (!dirty) + BIF_RET(am_normal); + + busy = erts_proc_trylock(rp, ERTS_PROC_LOCK_MAIN) == EBUSY; + + if (busy) + BIF_RET(am_busy); + res = erts_check_process_code(rp, BIF_ARG_2, &reds, BIF_P->fcalls); - if (BIF_P != rp) - erts_proc_unlock(rp, ERTS_PROC_LOCK_MAIN); + erts_proc_unlock(rp, ERTS_PROC_LOCK_MAIN); - ASSERT(is_value(res)); + ASSERT(res == am_true || res == am_false); BIF_RET2(res, reds); } @@ -1757,11 +1767,11 @@ BIF_RETTYPE erts_internal_purge_module_2(BIF_ALIST_2) release_literal_areas.last = ref; } erts_mtx_unlock(&release_literal_areas.mtx); - erts_queue_message(erts_literal_area_collector, + erts_queue_proc_message(BIF_P, + erts_literal_area_collector, 0, erts_alloc_message(0, NULL), - am_copy_literals, - BIF_P->common.id); + am_copy_literals); } return ret; diff --git a/erts/emulator/beam/beam_debug.c b/erts/emulator/beam/beam_debug.c index 5eb68b817e..b8a8d06315 100644 --- a/erts/emulator/beam/beam_debug.c +++ b/erts/emulator/beam/beam_debug.c @@ -1191,7 +1191,7 @@ dirty_send_message(Process *c_p, Eterm to, Eterm tag) mp = erts_alloc_message_heap(rp, &rp_locks, 3, &hp, &ohp); msg = TUPLE2(hp, tag, c_p->common.id); - erts_queue_message(rp, rp_locks, mp, msg, c_p->common.id); + erts_queue_proc_message(c_p, rp, rp_locks, mp, msg); if (rp == real_c_p) rp_locks &= ~c_p_locks; diff --git a/erts/emulator/beam/beam_emu.c b/erts/emulator/beam/beam_emu.c index ee287243a4..ab5920a67e 100644 --- a/erts/emulator/beam/beam_emu.c +++ b/erts/emulator/beam/beam_emu.c @@ -1166,6 +1166,9 @@ void erts_dirty_process_main(ErtsSchedulerData *esdp) reds_used = treds > INT_MAX ? INT_MAX : (int) treds; } + if (c_p && ERTS_PROC_GET_PENDING_SUSPEND(c_p)) + erts_proc_sig_handle_pending_suspend(c_p); + PROCESS_MAIN_CHK_LOCKS(c_p); ERTS_UNREQ_PROC_MAIN_LOCK(c_p); ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p); diff --git a/erts/emulator/beam/bif.c b/erts/emulator/beam/bif.c index 79244b8544..97e1ee1286 100644 --- a/erts/emulator/beam/bif.c +++ b/erts/emulator/beam/bif.c @@ -1364,13 +1364,14 @@ BIF_RETTYPE exit_signal_2(BIF_ALIST_2) /* Handle flags common to both process_flag_2 and process_flag_3. */ -static BIF_RETTYPE process_flag_aux(Process *BIF_P, - Process *rp, - Eterm flag, - Eterm val) +static Eterm process_flag_aux(Process *c_p, int *redsp, Eterm flag, Eterm val) { Eterm old_value = NIL; /* shut up warning about use before set */ Sint i; + + if (redsp) + *redsp = 1; + if (flag == am_save_calls) { struct saved_calls *scb; if (!is_small(val)) @@ -1390,30 +1391,89 @@ static BIF_RETTYPE process_flag_aux(Process *BIF_P, } #ifdef HIPE - if (rp->flags & F_HIPE_MODE) { - ASSERT(!ERTS_PROC_GET_SAVED_CALLS_BUF(rp)); - scb = ERTS_PROC_SET_SUSPENDED_SAVED_CALLS_BUF(rp, scb); + if (c_p->flags & F_HIPE_MODE) { + ASSERT(!ERTS_PROC_GET_SAVED_CALLS_BUF(c_p)); + scb = ERTS_PROC_SET_SUSPENDED_SAVED_CALLS_BUF(c_p, scb); } else #endif { #ifdef HIPE - ASSERT(!ERTS_PROC_GET_SUSPENDED_SAVED_CALLS_BUF(rp)); + ASSERT(!ERTS_PROC_GET_SUSPENDED_SAVED_CALLS_BUF(c_p)); #endif - scb = ERTS_PROC_SET_SAVED_CALLS_BUF(rp, scb); - if (rp == BIF_P && ((scb && i == 0) || (!scb && i != 0))) { - /* Adjust fcalls to match save calls setting... */ - if (i == 0) - BIF_P->fcalls += CONTEXT_REDS; /* disabled it */ - else - BIF_P->fcalls -= CONTEXT_REDS; /* enabled it */ - - /* - * Make sure we reschedule immediately so the - * change take effect at once. - */ - ERTS_VBUMP_ALL_REDS(BIF_P); - } + scb = ERTS_PROC_SET_SAVED_CALLS_BUF(c_p, scb); + + if (((scb && i == 0) || (!scb && i != 0))) { + + /* + * Make sure we reschedule immediately so the + * change take effect at once. + */ + if (!redsp) { + /* Executed via BIF call.. */ + via_bif: + + /* Adjust fcalls to match save calls setting... */ + if (i == 0) + c_p->fcalls += CONTEXT_REDS; /* disabled it */ + else + c_p->fcalls -= CONTEXT_REDS; /* enabled it */ + + ERTS_VBUMP_ALL_REDS(c_p); + } + else { + erts_aint32_t state; + /* + * Executed via signal handler. Try to figure + * out in what context we are executing... + */ + + state = erts_atomic32_read_nob(&c_p->state); + if (state & (ERTS_PSFLG_RUNNING_SYS + | ERTS_PSFLG_DIRTY_RUNNING_SYS + | ERTS_PSFLG_DIRTY_RUNNING)) { + /* + * We are either processing signals before + * being executed or executing dirty. That + * is, no need to adjust anything... + */ + *redsp = 1; + } + else { + ErtsSchedulerData *esdp; + ASSERT(state & ERTS_PSFLG_RUNNING); + + /* + * F_DELAY_GC is currently only set when + * we handle signals in state running via + * receive helper... + */ + + if (!(c_p->flags & F_DELAY_GC)) { + *redsp = 1; + goto via_bif; + } + + /* + * Executing via receive helper... + * + * We utilize the virtual reds counter + * in order to get correct calculation + * of reductions consumed when scheduling + * out the process... + */ + + esdp = erts_get_scheduler_data(); + + if (i == 0) + esdp->virtual_reds += CONTEXT_REDS; /* disabled it */ + else + esdp->virtual_reds -= CONTEXT_REDS; /* enabled it */ + + *redsp = -1; + } + } + } } if (!scb) @@ -1423,11 +1483,12 @@ static BIF_RETTYPE process_flag_aux(Process *BIF_P, erts_free(ERTS_ALC_T_CALLS_BUF, (void *) scb); } - BIF_RET(old_value); + ASSERT(is_immed(old_value)); + return old_value; } error: - BIF_ERROR(BIF_P, BADARG); + return am_badarg; } BIF_RETTYPE process_flag_2(BIF_ALIST_2) @@ -1596,29 +1657,73 @@ BIF_RETTYPE process_flag_2(BIF_ALIST_2) /* Fall through and try process_flag_aux() ... */ } - BIF_RET(process_flag_aux(BIF_P, BIF_P, BIF_ARG_1, BIF_ARG_2)); + old_value = process_flag_aux(BIF_P, NULL, BIF_ARG_1, BIF_ARG_2); + if (old_value != am_badarg) + BIF_RET(old_value); error: BIF_ERROR(BIF_P, BADARG); } -BIF_RETTYPE process_flag_3(BIF_ALIST_3) +typedef struct { + Eterm flag; + Eterm value; + ErlOffHeap oh; + Eterm heap[1]; +} ErtsProcessFlag3Args; + +static Eterm +exec_process_flag_3(Process *c_p, void *arg, int *redsp, ErlHeapFragment **bpp) { - Process *rp; - Eterm res; + ErtsProcessFlag3Args *pf3a = arg; + Eterm res; + + if (ERTS_PROC_IS_EXITING(c_p)) + res = am_badarg; + else + res = process_flag_aux(c_p, redsp, pf3a->flag, pf3a->value); + erts_cleanup_offheap(&pf3a->oh); + erts_free(ERTS_ALC_T_PF3_ARGS, arg); + return res; +} + + +BIF_RETTYPE erts_internal_process_flag_3(BIF_ALIST_3) +{ + Eterm res, *hp; + ErlOffHeap *ohp; + ErtsProcessFlag3Args *pf3a; + Uint flag_sz, value_sz; + + if (BIF_P->common.id == BIF_ARG_1) { + res = process_flag_aux(BIF_P, NULL, BIF_ARG_2, BIF_ARG_3); + BIF_RET(res); + } + + if (is_not_internal_pid(BIF_ARG_1)) + BIF_RET(am_badarg); + + flag_sz = is_immed(BIF_ARG_2) ? 0 : size_object(BIF_ARG_2); + value_sz = is_immed(BIF_ARG_3) ? 0 : size_object(BIF_ARG_3); + + pf3a = erts_alloc(ERTS_ALC_T_PF3_ARGS, + sizeof(ErtsProcessFlag3Args) + + sizeof(Eterm)*(flag_sz+value_sz-1)); + + ohp = &pf3a->oh; + ERTS_INIT_OFF_HEAP(&pf3a->oh); - rp = erts_pid2proc_not_running(BIF_P, ERTS_PROC_LOCK_MAIN, - BIF_ARG_1, ERTS_PROC_LOCK_MAIN); - if (rp == ERTS_PROC_LOCK_BUSY) - ERTS_BIF_YIELD3(bif_export[BIF_process_flag_3], BIF_P, - BIF_ARG_1, BIF_ARG_2, BIF_ARG_3); + hp = &pf3a->heap[0]; - if (!rp) - BIF_ERROR(BIF_P, BADARG); + pf3a->flag = copy_struct(BIF_ARG_2, flag_sz, &hp, ohp); + pf3a->value = copy_struct(BIF_ARG_3, value_sz, &hp, ohp); - res = process_flag_aux(BIF_P, rp, BIF_ARG_2, BIF_ARG_3); + res = erts_proc_sig_send_rpc_request(BIF_P, BIF_ARG_1, + !0, + exec_process_flag_3, + (void *) pf3a); - if (rp != BIF_P) - erts_proc_unlock(rp, ERTS_PROC_LOCK_MAIN); + if (is_non_value(res)) + BIF_RET(am_badarg); return res; } diff --git a/erts/emulator/beam/bif.h b/erts/emulator/beam/bif.h index a47339253e..cf9f61c0b8 100644 --- a/erts/emulator/beam/bif.h +++ b/erts/emulator/beam/bif.h @@ -295,6 +295,19 @@ do { \ (Ret) = THE_NON_VALUE; \ } while (0) +#define ERTS_BIF_PREP_TRAP4(Ret, Trap, Proc, A0, A1, A2, A3) \ +do { \ + Eterm* reg = erts_proc_sched_data((Proc))->x_reg_array; \ + (Proc)->arity = 4; \ + reg[0] = (Eterm) (A0); \ + reg[1] = (Eterm) (A1); \ + reg[2] = (Eterm) (A2); \ + reg[3] = (Eterm) (A3); \ + (Proc)->i = (BeamInstr*) ((Trap)->addressv[erts_active_code_ix()]); \ + (Proc)->freason = TRAP; \ + (Ret) = THE_NON_VALUE; \ +} while (0) + #define ERTS_BIF_PREP_TRAP3_NO_RET(Trap, Proc, A0, A1, A2)\ do { \ Eterm* reg = erts_proc_sched_data((Proc))->x_reg_array; \ @@ -343,6 +356,18 @@ do { \ return THE_NON_VALUE; \ } while(0) +#define BIF_TRAP4(Trap_, p, A0, A1, A2, A3) do { \ + Eterm* reg = erts_proc_sched_data((p))->x_reg_array; \ + (p)->arity = 4; \ + reg[0] = (A0); \ + reg[1] = (A1); \ + reg[2] = (A2); \ + reg[3] = (A3); \ + (p)->i = (BeamInstr*) ((Trap_)->addressv[erts_active_code_ix()]); \ + (p)->freason = TRAP; \ + return THE_NON_VALUE; \ + } while(0) + #define BIF_TRAP_CODE_PTR_0(p, Code_) do { \ (p)->arity = 0; \ (p)->i = (BeamInstr*) (Code_); \ @@ -401,6 +426,12 @@ do { \ ERTS_BIF_PREP_TRAP3(RET, (TRP), (P), (A0), (A1), (A2)); \ } while (0) +#define ERTS_BIF_PREP_YIELD4(RET, TRP, P, A0, A1, A2, A3) \ +do { \ + ERTS_VBUMP_ALL_REDS((P)); \ + ERTS_BIF_PREP_TRAP4(RET, (TRP), (P), (A0), (A1), (A2), (A3)); \ +} while (0) + #define ERTS_BIF_YIELD0(TRP, P) \ do { \ ERTS_VBUMP_ALL_REDS((P)); \ @@ -425,6 +456,12 @@ do { \ BIF_TRAP3((TRP), (P), (A0), (A1), (A2)); \ } while (0) +#define ERTS_BIF_YIELD4(TRP, P, A0, A1, A2, A3) \ +do { \ + ERTS_VBUMP_ALL_REDS((P)); \ + BIF_TRAP4((TRP), (P), (A0), (A1), (A2), (A3)); \ +} while (0) + #define ERTS_BIF_PREP_EXITED(RET, PROC) \ do { \ KILL_CATCHES((PROC)); \ diff --git a/erts/emulator/beam/bif.tab b/erts/emulator/beam/bif.tab index 1276048317..7548924178 100644 --- a/erts/emulator/beam/bif.tab +++ b/erts/emulator/beam/bif.tab @@ -125,7 +125,7 @@ bif erlang:pid_to_list/1 bif erlang:ports/0 bif erlang:pre_loaded/0 bif erlang:process_flag/2 -bif erlang:process_flag/3 +bif erts_internal:process_flag/3 bif erlang:process_info/1 bif erlang:process_info/2 bif erlang:processes/0 @@ -154,7 +154,6 @@ bif erlang:unregister/1 bif erlang:whereis/1 bif erlang:spawn_opt/1 bif erlang:setnode/2 -bif erlang:setnode/3 bif erlang:dist_get_stat/1 bif erlang:dist_ctrl_input_handler/2 bif erlang:dist_ctrl_put_data/2 @@ -191,6 +190,8 @@ bif erts_internal:scheduler_wall_time/1 bif erts_internal:dirty_process_handle_signals/1 +bif erts_internal:create_dist_channel/4 + # inet_db support bif erlang:port_set_data/2 bif erlang:port_get_data/1 @@ -204,9 +205,9 @@ bif erlang:seq_trace/2 bif erlang:seq_trace_info/1 bif erlang:seq_trace_print/1 bif erlang:seq_trace_print/2 -bif erlang:suspend_process/2 +bif erts_internal:suspend_process/2 bif erlang:resume_process/1 -bif erlang:process_display/2 +bif erts_internal:process_display/2 bif erlang:bump_reductions/1 @@ -691,5 +692,9 @@ bif erts_internal:new_connection/1 bif erts_internal:abort_connection/2 bif erts_internal:map_next/3 bif ets:whereis/1 +bif erts_internal:gather_alloc_histograms/1 +bif erts_internal:gather_carrier_info/1 +ubif erlang:map_get/2 +ubif erlang:is_map_key/2 bif ets:internal_delete_all/2 bif ets:internal_select_delete/2 diff --git a/erts/emulator/beam/bif_instrs.tab b/erts/emulator/beam/bif_instrs.tab index 0932b8b985..0f074280db 100644 --- a/erts/emulator/beam/bif_instrs.tab +++ b/erts/emulator/beam/bif_instrs.tab @@ -432,9 +432,17 @@ nif_bif.call_nif() { live_hf_end = c_p->mbuf; ERTS_CHK_MBUF_SZ(c_p); erts_pre_nif(&env, c_p, (struct erl_module_nif*)I[2], NULL); + + ASSERT((c_p->scheduler_data)->current_nif == NULL); + (c_p->scheduler_data)->current_nif = &env; + nif_bif_result = (*fp)(&env, bif_nif_arity, reg); if (env.exception_thrown) nif_bif_result = THE_NON_VALUE; + + ASSERT((c_p->scheduler_data)->current_nif == &env); + (c_p->scheduler_data)->current_nif = NULL; + erts_post_nif(&env); ERTS_CHK_MBUF_SZ(c_p); diff --git a/erts/emulator/beam/break.c b/erts/emulator/beam/break.c index ba8cc5e2ba..9ff52c92b8 100644 --- a/erts/emulator/beam/break.c +++ b/erts/emulator/beam/break.c @@ -36,7 +36,6 @@ #include "hash.h" #include "atom.h" #include "beam_load.h" -#include "erl_instrument.h" #include "erl_hl_timer.h" #include "erl_thr_progress.h" #include "erl_proc_sig_queue.h" @@ -955,20 +954,6 @@ erl_crash_dump_v(char *file, int line, char* fmt, va_list args) erts_cbprintf(to, to_arg, "=atoms\n"); dump_atoms(to, to_arg); - /* Keep the instrumentation data at the end of the dump */ - if (erts_instr_memory_map || erts_instr_stat) { - erts_cbprintf(to, to_arg, "=instr_data\n"); - - if (erts_instr_stat) { - erts_cbprintf(to, to_arg, "=memory_status\n"); - erts_instr_dump_stat_to(to, to_arg, 0); - } - if (erts_instr_memory_map) { - erts_cbprintf(to, to_arg, "=memory_map\n"); - erts_instr_dump_memory_map_to(to, to_arg); - } - } - erts_cbprintf(to, to_arg, "=end\n"); if (fp) { fclose(fp); diff --git a/erts/emulator/beam/dist.c b/erts/emulator/beam/dist.c index f203d85ca9..70474898b2 100644 --- a/erts/emulator/beam/dist.c +++ b/erts/emulator/beam/dist.c @@ -1471,22 +1471,16 @@ int erts_net_message(Port *prt, mdp = erts_monitor_create(ERTS_MON_TYPE_DIST_PROC, ref, watcher, pid, name); -#ifdef DEBUG - code = -#endif - erts_monitor_dist_insert(&mdp->origin, dep->mld); - ASSERT(code); + code = erts_monitor_dist_insert(&mdp->origin, dep->mld); + ASSERT(code); (void)code; if (erts_proc_sig_send_monitor(&mdp->target, pid)) break; /* done */ /* Failed to send to local proc; cleanup reply noproc... */ -#ifdef DEBUG - code = -#endif - erts_monitor_dist_delete(&mdp->origin); - ASSERT(code); + code = erts_monitor_dist_delete(&mdp->origin); + ASSERT(code); (void)code; erts_monitor_release_both(mdp); } @@ -3144,60 +3138,60 @@ BIF_RETTYPE setnode_2(BIF_ALIST_2) BIF_ERROR(BIF_P, BADARG); } -/********************************************************************** - ** Allocate a dist entry, set node name install the connection handler - ** setnode_3({name@host, Creation}, Cid, {Type, Version, Initial, IC, OC}) - ** Type = flag field, where the flags are specified in dist.h - ** Version = distribution version, >= 1 - ** IC = in_cookie (ignored) - ** OC = out_cookie (ignored) - ** - ** Note that in distribution protocols above 1, the Initial parameter - ** is always NIL and the cookies are always the atom '', cookies are not - ** sent in the distribution messages but are only used in - ** the handshake. - ** - ***********************************************************************/ +/* + * erts_internal:create_dist_channel/4 is used by + * erlang:setnode/3. + */ + +typedef struct { + DistEntry *dep; + Uint flags; + Uint version; +} ErtsSetupConnDistCtrl; + +static void +setup_connection_epiloge_rwunlock(Process *c_p, DistEntry *dep, + Eterm ctrlr, Uint flags, + Uint version); -BIF_RETTYPE setnode_3(BIF_ALIST_3) +static Eterm +setup_connection_distctrl(Process *c_p, void *arg, + int *redsp, ErlHeapFragment **bpp); + +BIF_RETTYPE erts_internal_create_dist_channel_4(BIF_ALIST_4) { BIF_RETTYPE ret; Uint flags; - unsigned long version; - Eterm ic, oc; - Eterm *tp; + Uint version; + Eterm *hp, res_tag = THE_NON_VALUE, res = THE_NON_VALUE; DistEntry *dep = NULL; - ErtsProcLocks proc_unlock = 0; - Process *proc; + int de_locked = 0; Port *pp = NULL; - Eterm notify_proc; - erts_aint32_t qflgs; /* * Check and pick out arguments */ - if (!is_node_name_atom(BIF_ARG_1) || - !(is_internal_port(BIF_ARG_2) - || is_internal_pid(BIF_ARG_2)) - || (erts_this_node->sysname == am_Noname)) { - goto badarg; - } + /* Node name... */ + if (!is_node_name_atom(BIF_ARG_1)) + goto badarg; - if (!is_tuple(BIF_ARG_3)) - goto badarg; - tp = tuple_val(BIF_ARG_3); - if (*tp++ != make_arityval(4)) - goto badarg; - if (!is_small(*tp)) - goto badarg; - flags = unsigned_val(*tp++); - if (!is_small(*tp) || (version = unsigned_val(*tp)) == 0) - goto badarg; - ic = *(++tp); - oc = *(++tp); - if (!is_atom(ic) || !is_atom(oc)) - goto badarg; + /* Distribution controller... */ + if (!is_internal_port(BIF_ARG_2) && !is_internal_pid(BIF_ARG_2)) + goto badarg; + + /* Dist flags... */ + if (!is_small(BIF_ARG_3)) + goto badarg; + flags = unsigned_val(BIF_ARG_3); + + /* Version... */ + if (!is_small(BIF_ARG_4)) + goto badarg; + version = unsigned_val(BIF_ARG_4); + + if (version == 0) + goto badarg; if (~flags & DFLAG_DIST_MANDATORY) { erts_dsprintf_buf_t *dsbufp = erts_create_logger_dsbuf(); @@ -3228,74 +3222,79 @@ BIF_RETTYPE setnode_3(BIF_ALIST_3) else if (!dep) goto system_limit; /* Should never happen!!! */ + erts_de_rlock(dep); + de_locked = -1; + + if (dep->state == ERTS_DE_STATE_EXITING) { + /* Suspend on dist entry waiting for the exit to finish */ + ErtsProcList *plp = erts_proclist_create(BIF_P); + plp->next = NULL; + erts_suspend(BIF_P, ERTS_PROC_LOCK_MAIN, NULL); + erts_mtx_lock(&dep->qlock); + erts_proclist_store_last(&dep->suspended, plp); + erts_mtx_unlock(&dep->qlock); + goto yield; + } + + erts_de_runlock(dep); + de_locked = 0; + if (is_internal_pid(BIF_ARG_2)) { if (BIF_P->common.id == BIF_ARG_2) { - proc_unlock = 0; - proc = BIF_P; - } - else { - proc_unlock = ERTS_PROC_LOCK_MAIN; - proc = erts_pid2proc_not_running(BIF_P, ERTS_PROC_LOCK_MAIN, - BIF_ARG_2, proc_unlock); - } - erts_de_rwlock(dep); - - if (!proc) - goto badarg; - else if (proc == ERTS_PROC_LOCK_BUSY) { - proc_unlock = 0; - goto yield; - } + ErtsSetupConnDistCtrl scdc; - erts_proc_lock(proc, ERTS_PROC_LOCK_STATUS); - proc_unlock |= ERTS_PROC_LOCK_STATUS; + scdc.dep = dep; + scdc.flags = flags; + scdc.version = version; - if (ERTS_PROC_GET_DIST_ENTRY(proc)) { - if (dep == ERTS_PROC_GET_DIST_ENTRY(proc) - && (proc->flags & F_DISTRIBUTION) - && dep->cid == BIF_ARG_2) { - ERTS_BIF_PREP_RET(ret, erts_make_dhandle(BIF_P, dep)); - goto done; - } - goto badarg; - } + res = setup_connection_distctrl(BIF_P, &scdc, NULL, NULL); + BUMP_REDS(BIF_P, 5); + dep = NULL; - if (dep->state == ERTS_DE_STATE_EXITING) { - /* Suspend on dist entry waiting for the exit to finish */ - ErtsProcList *plp = erts_proclist_create(BIF_P); - plp->next = NULL; - erts_suspend(BIF_P, ERTS_PROC_LOCK_MAIN, NULL); - erts_mtx_lock(&dep->qlock); - erts_proclist_store_last(&dep->suspended, plp); - erts_mtx_unlock(&dep->qlock); - goto yield; - } - if (dep->state != ERTS_DE_STATE_PENDING) { - if (dep->state == ERTS_DE_STATE_IDLE) - erts_set_dist_entry_pending(dep); - else + if (res == am_badarg) goto badarg; + + ASSERT(is_internal_magic_ref(res)); + res_tag = am_ok; /* Connection up */ } + else { + ErtsSetupConnDistCtrl *scdcp; - if (is_not_nil(dep->cid)) - goto badarg; + scdcp = erts_alloc(ERTS_ALC_T_SETUP_CONN_ARG, + sizeof(ErtsSetupConnDistCtrl)); - proc->flags |= F_DISTRIBUTION; - ERTS_PROC_SET_DIST_ENTRY(proc, dep); + scdcp->dep = dep; + scdcp->flags = flags; + scdcp->version = version; - proc_unlock &= ~ERTS_PROC_LOCK_STATUS; - erts_proc_unlock(proc, ERTS_PROC_LOCK_STATUS); + res = erts_proc_sig_send_rpc_request(BIF_P, + BIF_ARG_2, + !0, + setup_connection_distctrl, + (void *) scdcp); + if (is_non_value(res)) + goto badarg; + + dep = NULL; - dep->send = NULL; /* Only for distr ports... */ + ASSERT(is_internal_ordinary_ref(res)); + res_tag = am_message; /* Caller need to wait for dhandle in message */ + } + hp = HAlloc(BIF_P, 3); } else { + int new; pp = erts_id2port_sflgs(BIF_ARG_2, BIF_P, ERTS_PROC_LOCK_MAIN, ERTS_PORT_SFLGS_INVALID_LOOKUP); erts_de_rwlock(dep); + de_locked = 1; + + if (dep->state == ERTS_DE_STATE_EXITING) + goto badarg; if (!pp || (erts_atomic32_read_nob(&pp->state) & ERTS_PORT_SFLG_EXITING)) @@ -3304,65 +3303,108 @@ BIF_RETTYPE setnode_3(BIF_ALIST_3) if ((pp->drv_ptr->flags & ERL_DRV_FLAG_SOFT_BUSY) == 0) goto badarg; - if (dep->cid == BIF_ARG_2 && pp->dist_entry == dep) { - ERTS_BIF_PREP_RET(ret, erts_make_dhandle(BIF_P, dep)); - goto done; /* Already set */ - } + if (dep->cid == BIF_ARG_2 && pp->dist_entry == dep) + new = 0; + else { + if (dep->state != ERTS_DE_STATE_PENDING) { + if (dep->state == ERTS_DE_STATE_IDLE) + erts_set_dist_entry_pending(dep); + else + goto badarg; + } - if (dep->state == ERTS_DE_STATE_EXITING) { - /* Suspend on dist entry waiting for the exit to finish */ - ErtsProcList *plp = erts_proclist_create(BIF_P); - plp->next = NULL; - erts_suspend(BIF_P, ERTS_PROC_LOCK_MAIN, NULL); - erts_mtx_lock(&dep->qlock); - erts_proclist_store_last(&dep->suspended, plp); - erts_mtx_unlock(&dep->qlock); - goto yield; - } - if (dep->state != ERTS_DE_STATE_PENDING) { - if (dep->state == ERTS_DE_STATE_IDLE) - erts_set_dist_entry_pending(dep); - else + if (pp->dist_entry || is_not_nil(dep->cid)) goto badarg; - } - if (pp->dist_entry || is_not_nil(dep->cid)) - goto badarg; + erts_atomic32_read_bor_nob(&pp->state, ERTS_PORT_SFLG_DISTRIBUTION); - erts_atomic32_read_bor_nob(&pp->state, ERTS_PORT_SFLG_DISTRIBUTION); + pp->dist_entry = dep; - pp->dist_entry = dep; + ASSERT(pp->drv_ptr->outputv || pp->drv_ptr->output); - ASSERT(pp->drv_ptr->outputv || pp->drv_ptr->output); + dep->send = (pp->drv_ptr->outputv + ? dist_port_commandv + : dist_port_command); + ASSERT(dep->send); - dep->send = (pp->drv_ptr->outputv - ? dist_port_commandv - : dist_port_command); - ASSERT(dep->send); + /* + * Dist-ports do not use the "busy port message queue" functionality, but + * instead use "busy dist entry" functionality. + */ + { + ErlDrvSizeT disable = ERL_DRV_BUSY_MSGQ_DISABLED; + erl_drv_busy_msgq_limits(ERTS_Port2ErlDrvPort(pp), &disable, NULL); + } - /* - * Dist-ports do not use the "busy port message queue" functionality, but - * instead use "busy dist entry" functionality. - */ - { - ErlDrvSizeT disable = ERL_DRV_BUSY_MSGQ_DISABLED; - erl_drv_busy_msgq_limits(ERTS_Port2ErlDrvPort(pp), &disable, NULL); + setup_connection_epiloge_rwunlock(BIF_P, dep, BIF_ARG_2, flags, version); + de_locked = 0; + new = !0; } + hp = HAlloc(BIF_P, 3 + ERTS_MAGIC_REF_THING_SIZE); + res = erts_build_dhandle(&hp, &BIF_P->off_heap, dep); + res_tag = am_ok; /* Connection up */ + if (new) + dep = NULL; /* inc of refc transferred to port (dist_entry field) */ + } + + ASSERT(is_value(res) && is_value(res_tag)); + + res = TUPLE2(hp, res_tag, res); + + ERTS_BIF_PREP_RET(ret, res); + + done: + + if (dep && dep != erts_this_dist_entry) { + if (de_locked) { + if (de_locked > 0) + erts_de_rwunlock(dep); + else + erts_de_runlock(dep); + } + erts_deref_dist_entry(dep); } + if (pp) + erts_port_release(pp); + + return ret; + + yield: + ERTS_BIF_PREP_YIELD4(ret, + bif_export[BIF_erts_internal_create_dist_channel_4], + BIF_P, BIF_ARG_1, BIF_ARG_2, BIF_ARG_3, BIF_ARG_4); + goto done; + + badarg: + ERTS_BIF_PREP_RET(ret, am_badarg); + goto done; + + system_limit: + ERTS_BIF_PREP_RET(ret, am_system_limit); + goto done; +} + +static void +setup_connection_epiloge_rwunlock(Process *c_p, DistEntry *dep, + Eterm ctrlr, Uint flags, + Uint version) +{ + Eterm notify_proc = NIL; + erts_aint32_t qflgs; + dep->version = version; dep->creation = 0; -#ifdef DEBUG + ASSERT(is_internal_port(ctrlr) || is_internal_pid(ctrlr)); ASSERT(erts_atomic_read_nob(&dep->qsize) == 0 || (dep->state == ERTS_DE_STATE_PENDING)); -#endif if (flags & DFLAG_DIST_HDR_ATOM_CACHE) create_cache(dep); - erts_set_dist_entry_connected(dep, BIF_ARG_2, flags); + erts_set_dist_entry_connected(dep, ctrlr, flags); notify_proc = NIL; if (erts_atomic_read_nob(&dep->qsize)) { @@ -3381,50 +3423,100 @@ BIF_RETTYPE setnode_3(BIF_ALIST_3) } } } - erts_de_rwunlock(dep); - if (is_internal_pid(notify_proc)) - notify_dist_data(BIF_P, notify_proc); - ERTS_BIF_PREP_RET(ret, erts_make_dhandle(BIF_P, dep)); + erts_de_rwunlock(dep); - dep = NULL; /* inc of refc transferred to port (dist_entry field) */ + if (is_internal_pid(notify_proc)) + notify_dist_data(c_p, notify_proc); inc_no_nodes(); - send_nodes_mon_msgs(BIF_P, + send_nodes_mon_msgs(c_p, am_nodeup, - BIF_ARG_1, + dep->sysname, flags & DFLAG_PUBLISHED ? am_visible : am_hidden, NIL); - done: +} - if (dep && dep != erts_this_dist_entry) { - erts_de_rwunlock(dep); - erts_deref_dist_entry(dep); +static Eterm +setup_connection_distctrl(Process *c_p, void *arg, int *redsp, ErlHeapFragment **bpp) +{ + ErtsSetupConnDistCtrl *scdcp = (ErtsSetupConnDistCtrl *) arg; + DistEntry *dep = scdcp->dep; + int dep_locked = 0; + Eterm *hp; + erts_aint32_t state; + + if (redsp) + *redsp = 1; + + state = erts_atomic32_read_nob(&c_p->state); + + if (state & ERTS_PSFLG_EXITING) + goto badarg; + + erts_de_rwlock(dep); + dep_locked = !0; + + if (dep->state == ERTS_DE_STATE_EXITING) + goto badarg; + + if (ERTS_PROC_GET_DIST_ENTRY(c_p)) { + if (dep == ERTS_PROC_GET_DIST_ENTRY(c_p) + && (c_p->flags & F_DISTRIBUTION) + && dep->cid == c_p->common.id) { + goto connected; + } + goto badarg; } - if (pp) - erts_port_release(pp); + if (dep->state != ERTS_DE_STATE_PENDING) { + if (dep->state == ERTS_DE_STATE_IDLE) + erts_set_dist_entry_pending(dep); + else + goto badarg; + } - if (proc_unlock) - erts_proc_unlock(proc, proc_unlock); + if (is_not_nil(dep->cid)) + goto badarg; - return ret; + c_p->flags |= F_DISTRIBUTION; + ERTS_PROC_SET_DIST_ENTRY(c_p, dep); - yield: - ERTS_BIF_PREP_YIELD3(ret, bif_export[BIF_setnode_3], BIF_P, - BIF_ARG_1, BIF_ARG_2, BIF_ARG_3); - goto done; + dep->send = NULL; /* Only for distr ports... */ - badarg: - ERTS_BIF_PREP_ERROR(ret, BIF_P, BADARG); - goto done; + if (redsp) + *redsp = 5; - system_limit: - ERTS_BIF_PREP_ERROR(ret, BIF_P, SYSTEM_LIMIT); - goto done; + setup_connection_epiloge_rwunlock(c_p, dep, c_p->common.id, + scdcp->flags, scdcp->version); +connected: + + /* we take over previous inc in refc of dep */ + + if (!bpp) /* called directly... */ + return erts_make_dhandle(c_p, dep); + + erts_free(ERTS_ALC_T_SETUP_CONN_ARG, arg); + + *bpp = new_message_buffer(ERTS_MAGIC_REF_THING_SIZE); + hp = (*bpp)->mem; + return erts_build_dhandle(&hp, &(*bpp)->off_heap, dep); + +badarg: + + if (bpp) /* not called directly */ + erts_free(ERTS_ALC_T_SETUP_CONN_ARG, arg); + + if (dep_locked) + erts_de_rwunlock(dep); + + erts_deref_dist_entry(dep); + + return am_badarg; } + BIF_RETTYPE erts_internal_get_dflags_0(BIF_ALIST_0) { return erts_dflags_record; @@ -3593,7 +3685,7 @@ int erts_auto_connect(DistEntry* dep, Process *proc, ErtsProcLocks proc_locks) dhandle = erts_build_dhandle(&hp, ohp, dep); msg = TUPLE4(hp, am_auto_connect, dep->sysname, make_small(conn_id), dhandle); - erts_queue_message(net_kernel, nk_locks, mp, msg, proc->common.id); + erts_queue_proc_message(proc, net_kernel, nk_locks, mp, msg); erts_proc_unlock(net_kernel, nk_locks); } diff --git a/erts/emulator/beam/erl_alloc.c b/erts/emulator/beam/erl_alloc.c index 061b9df627..d99d2ea57b 100644 --- a/erts/emulator/beam/erl_alloc.c +++ b/erts/emulator/beam/erl_alloc.c @@ -38,7 +38,7 @@ #include "erl_db.h" #include "erl_binary.h" #include "erl_bits.h" -#include "erl_instrument.h" +#include "erl_mtrace.h" #include "erl_mseg.h" #include "erl_monitor_link.h" #include "erl_hl_timer.h" @@ -202,8 +202,6 @@ typedef struct { int top_pad; AlcUInit_t alloc_util; struct { - int stat; - int map; char *mtrace; char *nodename; } instr; @@ -428,6 +426,7 @@ set_default_binary_alloc_opts(struct au_init *ip) #endif ip->init.util.ts = ERTS_ALC_MTA_BINARY; ip->init.util.acul = ERTS_ALC_DEFAULT_ACUL; + ip->init.util.atags = 1; } static void @@ -464,6 +463,7 @@ set_default_driver_alloc_opts(struct au_init *ip) #endif ip->init.util.ts = ERTS_ALC_MTA_DRIVER; ip->init.util.acul = ERTS_ALC_DEFAULT_ACUL; + ip->init.util.atags = 1; } static void @@ -501,6 +501,7 @@ set_default_test_alloc_opts(struct au_init *ip) ip->init.util.mmbcs = 0; /* Main carrier size */ ip->init.util.ts = ERTS_ALC_MTA_TEST; ip->init.util.acul = ERTS_ALC_DEFAULT_ACUL; + ip->init.util.atags = 1; /* Use a constant minimal MBC size */ #if ERTS_SA_MB_CARRIERS @@ -906,7 +907,6 @@ erts_alloc_init(int *argc, char **argv, ErtsAllocInitOpts *eaiop) &test_alloc_state); erts_mtrace_install_wrapper_functions(); - extra_block_size += erts_instr_init(init.instr.stat, init.instr.map); init_aireq_alloc(); @@ -1411,7 +1411,9 @@ handle_au_arg(struct au_init *auip, } if (!strategy_support_carrier_migration(auip)) auip->init.util.acul = 0; - } + } else if (has_prefix("atags", sub_param)) { + auip->init.util.atags = get_bool_value(sub_param + 5, argv, ip); + } else goto bad_switch; break; @@ -1741,24 +1743,6 @@ handle_args(int *argc, char **argv, erts_alc_hndl_args_init_t *init) break; case 'i': switch (argv[i][3]) { - case 's': - arg = get_value(argv[i]+4, argv, &i); - if (sys_strcmp("true", arg) == 0) - init->instr.stat = 1; - else if (sys_strcmp("false", arg) == 0) - init->instr.stat = 0; - else - bad_value(param, param+3, arg); - break; - case 'm': - arg = get_value(argv[i]+4, argv, &i); - if (sys_strcmp("true", arg) == 0) - init->instr.map = 1; - else if (sys_strcmp("false", arg) == 0) - init->instr.map = 0; - else - bad_value(param, param+3, arg); - break; case 't': init->instr.mtrace = get_value(argv[i]+4, argv, &i); break; @@ -1817,9 +1801,7 @@ handle_args(int *argc, char **argv, erts_alc_hndl_args_init_t *init) case '-': if (argv[i][2] == '\0') { /* End of system flags reached */ - if (init->instr.mtrace - /* || init->instr.stat - || init->instr.map */) { + if (init->instr.mtrace) { while (i < *argc) { if(sys_strcmp(argv[i], "-sname") == 0 || sys_strcmp(argv[i], "-name") == 0) { @@ -2097,7 +2079,7 @@ erts_memory(fmtfn_t *print_to_p, void *print_to_arg, void *proc, Eterm earg) * NOTE! When updating this function, make sure to also update * erlang:memory/[0,1] in $ERL_TOP/erts/preloaded/src/erlang.erl */ -#define ERTS_MEM_NEED_ALL_ALCU (!erts_instr_stat && want_tot_or_sys) +#define ERTS_MEM_NEED_ALL_ALCU (want_tot_or_sys) struct { int total; int processes; @@ -2108,7 +2090,6 @@ erts_memory(fmtfn_t *print_to_p, void *print_to_arg, void *proc, Eterm earg) int binary; int code; int ets; - int maximum; } want = {0}; struct { UWord total; @@ -2120,7 +2101,6 @@ erts_memory(fmtfn_t *print_to_p, void *print_to_arg, void *proc, Eterm earg) UWord binary; UWord code; UWord ets; - UWord maximum; } size = {0}; Eterm atoms[sizeof(size)/sizeof(UWord)]; UWord *uintps[sizeof(size)/sizeof(UWord)]; @@ -2173,12 +2153,6 @@ erts_memory(fmtfn_t *print_to_p, void *print_to_arg, void *proc, Eterm earg) want.ets = 1; atoms[length] = am_ets; uintps[length++] = &size.ets; - - want.maximum = erts_instr_stat; - if (want.maximum) { - atoms[length] = am_maximum; - uintps[length++] = &size.maximum; - } } else { DeclareTmpHeapNoproc(tmp_heap,2); @@ -2260,18 +2234,6 @@ erts_memory(fmtfn_t *print_to_p, void *print_to_arg, void *proc, Eterm earg) uintps[length++] = &size.ets; } break; - case am_maximum: - if (erts_instr_stat) { - if (!want.maximum) { - want.maximum = 1; - atoms[length] = am_maximum; - uintps[length++] = &size.maximum; - } - } else { - UnUseTmpHeapNoproc(2); - return am_badarg; - } - break; default: UnUseTmpHeapNoproc(2); return am_badarg; @@ -2437,14 +2399,7 @@ erts_memory(fmtfn_t *print_to_p, void *print_to_arg, void *proc, Eterm earg) size.ets += erts_get_ets_misc_mem_size(); } - if (erts_instr_stat && (want_tot_or_sys || want.maximum)) { - if (want_tot_or_sys) { - size.total = erts_instr_get_total(); - size.system = size.total - size.processes; - } - size.maximum = erts_instr_get_max_total(); - } - else if (want_tot_or_sys) { + if (want_tot_or_sys) { size.system = size.total - size.processes; } @@ -2522,18 +2477,6 @@ erts_allocated_areas(fmtfn_t *print_to_p, void *print_to_arg, void *proc) i = 0; - if (erts_instr_stat) { - values[i].arity = 2; - values[i].name = "total"; - values[i].ui[0] = erts_instr_get_total(); - i++; - - values[i].arity = 2; - values[i].name = "maximum"; - values[i].ui[0] = erts_instr_get_max_total(); - i++; - } - values[i].arity = 2; values[i].name = "sys_misc"; values[i].ui[0] = erts_sys_misc_mem_sz(); @@ -2824,10 +2767,7 @@ erts_allocator_info(fmtfn_t to, void *arg) erts_alcu_au_info_options(&to, arg, NULL, NULL); erts_print(to, arg, "=allocator:instr\n"); - erts_print(to, arg, "option m: %s\n", - erts_instr_memory_map ? "true" : "false"); - erts_print(to, arg, "option s: %s\n", - erts_instr_stat ? "true" : "false"); + erts_print(to, arg, "option t: %s\n", erts_mtrace_enabled ? "true" : "false"); @@ -2933,16 +2873,12 @@ erts_allocator_options(void *proc) NULL, hpp, szp); #endif { - Eterm o[3], v[3]; - o[0] = am_atom_put("m", 1); - v[0] = erts_instr_memory_map ? am_true : am_false; - o[1] = am_atom_put("s", 1); - v[1] = erts_instr_stat ? am_true : am_false; - o[2] = am_atom_put("t", 1); - v[2] = erts_mtrace_enabled ? am_true : am_false; + Eterm o[1], v[1]; + o[0] = am_atom_put("t", 1); + v[0] = erts_mtrace_enabled ? am_true : am_false; atoms[length] = am_atom_put("instr", 5); - terms[length++] = erts_bld_2tup_list(hpp, szp, 3, o, v); + terms[length++] = erts_bld_2tup_list(hpp, szp, 1, o, v); } atoms[length] = am_atom_put("lock_physical_memory", 20); @@ -3458,8 +3394,8 @@ badarg: /* * The allocator wrapper prelocking stuff below is about the locking order. - * It only affects wrappers (erl_mtrace.c and erl_instrument.c) that keep locks - * during alloc/realloc/free. + * It only affects wrappers (erl_mtrace.c) that keep locks during + * alloc/realloc/free. * * Some query functions in erl_alloc_util.c lock the allocator mutex and then * use erts_printf that in turn may call the sys allocator through the wrappers. diff --git a/erts/emulator/beam/erl_alloc.types b/erts/emulator/beam/erl_alloc.types index 4a6a19b210..9db600dce0 100644 --- a/erts/emulator/beam/erl_alloc.types +++ b/erts/emulator/beam/erl_alloc.types @@ -287,6 +287,8 @@ type DIST_DEMONITOR SHORT_LIVED PROCESSES dist_demonitor type CML_CLEANUP SHORT_LIVED SYSTEM connection_ml_cleanup type ML_YIELD_STATE SHORT_LIVED SYSTEM monitor_link_yield_state type ML_DIST STANDARD SYSTEM monitor_link_dist +type PF3_ARGS SHORT_LIVED PROCESSES process_flag_3_arguments +type SETUP_CONN_ARG SHORT_LIVED PROCESSES setup_connection_argument type ENVIRONMENT SYSTEM SYSTEM environment @@ -346,6 +348,7 @@ type NIF_TRAP_EXPORT STANDARD PROCESSES nif_trap_export_entry type NIF_EXP_TRACE FIXED_SIZE PROCESSES nif_export_trace type EXPORT LONG_LIVED CODE export_entry type MONITOR FIXED_SIZE PROCESSES monitor +type MONITOR_SUSPEND STANDARD PROCESSES monitor_suspend type LINK FIXED_SIZE PROCESSES link type AINFO_REQ SHORT_LIVED SYSTEM alloc_info_request type SCHED_WTIME_REQ SHORT_LIVED SYSTEM sched_wall_time_request diff --git a/erts/emulator/beam/erl_alloc_util.c b/erts/emulator/beam/erl_alloc_util.c index e148be7af6..fdf355d503 100644 --- a/erts/emulator/beam/erl_alloc_util.c +++ b/erts/emulator/beam/erl_alloc_util.c @@ -48,6 +48,8 @@ #include "erl_mseg.h" #include "erl_threads.h" #include "erl_thr_progress.h" +#include "erl_bif_unique.h" +#include "erl_nif.h" #ifdef ERTS_ENABLE_LOCK_COUNT #include "erl_lock_count.h" @@ -139,6 +141,33 @@ MBC after deallocating first block: [Carrier_t|pad|Block_t 111| udata... ] */ +/* Allocation tags ... + * + * These are added to the footer of every block when enabled. Currently they + * consist of the allocation type and an atom identifying the allocating + * driver/nif (or 'system' if that can't be determined), but the format is not + * supposed to be set in stone. + * + * The packing scheme requires that the atom values are small enough to fit + * into a word with ERTS_ALC_N_BITS to spare. Users must check for overflow + * before MAKE_ATAG(). */ + +typedef UWord alcu_atag_t; + +#define MAKE_ATAG(IdAtom, Type) \ + (ASSERT((Type) >= ERTS_ALC_N_MIN && (Type) <= ERTS_ALC_N_MAX), \ + ASSERT(atom_val(IdAtom) <= MAX_ATAG_ATOM_ID), \ + (atom_val(IdAtom) << ERTS_ALC_N_BITS) | (Type)) + +#define ATAG_ID(AT) (make_atom((AT) >> ERTS_ALC_N_BITS)) +#define ATAG_TYPE(AT) ((AT) & ERTS_ALC_N_MASK) + +#define MAX_ATAG_ATOM_ID (ERTS_UWORD_MAX >> ERTS_ALC_N_BITS) + +#define DBG_IS_VALID_ATAG(Allocator, AT) \ + (ATAG_TYPE(AT) >= ERTS_ALC_N_MIN && \ + ATAG_TYPE(AT) <= ERTS_ALC_N_MAX && \ + (Allocator)->alloc_no == ERTS_ALC_T2A(ERTS_ALC_N2T(ATAG_TYPE(AT)))) /* Blocks ... */ @@ -153,10 +182,17 @@ MBC after deallocating first block: #endif #define FBLK_FTR_SZ (sizeof(FreeBlkFtr_t)) +#define GET_BLK_ATAG(B) \ + (((alcu_atag_t *) (((char *) (B)) + (BLK_SZ(B))))[-1]) +#define SET_BLK_ATAG(B, T) \ + (((alcu_atag_t *) (((char *) (B)) + (BLK_SZ(B))))[-1] = (T)) + +#define BLK_ATAG_SZ(AP) ((AP)->atags ? sizeof(alcu_atag_t) : 0) + #define UMEMSZ2BLKSZ(AP, SZ) \ - (ABLK_HDR_SZ + (SZ) <= (AP)->min_block_size \ + (ABLK_HDR_SZ + BLK_ATAG_SZ(AP) + (SZ) <= (AP)->min_block_size \ ? (AP)->min_block_size \ - : UNIT_CEILING(ABLK_HDR_SZ + (SZ))) + : UNIT_CEILING(ABLK_HDR_SZ + BLK_ATAG_SZ(AP) + (SZ))) #define UMEM2BLK(P) ((Block_t *) (((char *) (P)) - ABLK_HDR_SZ)) #define BLK2UMEM(P) ((void *) (((char *) (P)) + ABLK_HDR_SZ)) @@ -688,6 +724,62 @@ static void destroy_carrier(Allctr_t *, Block_t *, Carrier_t **); static void mbc_free(Allctr_t *allctr, void *p, Carrier_t **busy_pcrr_pp); static void dealloc_block(Allctr_t *, void *, ErtsAlcFixList_t *, int); +static alcu_atag_t determine_alloc_tag(Allctr_t *allocator, ErtsAlcType_t type) +{ + ErtsSchedulerData *esdp; + Eterm id; + + ERTS_CT_ASSERT(_unchecked_atom_val(am_system) <= MAX_ATAG_ATOM_ID); + ASSERT(allocator->atags); + + esdp = erts_get_scheduler_data(); + id = am_system; + + if (esdp) { + if (esdp->current_nif) { + Module *mod = erts_nif_get_module((esdp->current_nif)->mod_nif); + + /* Mod can be NULL if a resource destructor allocates memory after + * the module has been unloaded. */ + if (mod) { + id = make_atom(mod->module); + } + } else if (esdp->current_port) { + Port *p = esdp->current_port; + id = (p->drv_ptr)->name_atom; + } + + /* We fall back to 'system' if we can't pack the driver/NIF name into + * the tag. This may be a bit misleading but we've made no promises + * that the information is complete. + * + * This can only happen on 32-bit emulators when a new driver/NIF has + * been loaded *after* 16 million atoms have been used, and supporting + * that fringe case is not worth an extra word. 64-bit emulators are + * unaffected since the atom cache limits atom indexes to 32 bits. */ + if(MAX_ATOM_TABLE_SIZE > MAX_ATAG_ATOM_ID) { + if (atom_val(id) > MAX_ATAG_ATOM_ID) { + id = am_system; + } + } + } + + return MAKE_ATAG(id, type); +} + +static void set_alloc_tag(Allctr_t *allocator, void *p, alcu_atag_t tag) +{ + Block_t *block; + + ASSERT(DBG_IS_VALID_ATAG(allocator, tag)); + ASSERT(allocator->atags && p); + (void)allocator; + + block = UMEM2BLK(p); + + SET_BLK_ATAG(block, tag); +} + /* internal data... */ #if 0 @@ -4242,6 +4334,7 @@ static struct { Eterm e; Eterm t; Eterm ramv; + Eterm atags; #if HAVE_ERTS_MSEG Eterm asbcst; Eterm rsbcst; @@ -4311,7 +4404,7 @@ static struct { #endif } am; -static Eterm fix_type_atoms[ERTS_ALC_NO_FIXED_SIZES]; +static Eterm alloc_type_atoms[ERTS_ALC_N_MAX + 1]; static ERTS_INLINE void atom_init(Eterm *atom, char *name) { @@ -4342,6 +4435,7 @@ init_atoms(Allctr_t *allctr) AM_INIT(e); AM_INIT(t); AM_INIT(ramv); + AM_INIT(atags); #if HAVE_ERTS_MSEG AM_INIT(asbcst); AM_INIT(rsbcst); @@ -4413,12 +4507,12 @@ init_atoms(Allctr_t *allctr) } #endif - for (ix = 0; ix < ERTS_ALC_NO_FIXED_SIZES; ix++) { - ErtsAlcType_t n = ERTS_ALC_N_MIN_A_FIXED_SIZE + ix; - char *name = (char *) ERTS_ALC_N2TD(n); - size_t len = sys_strlen(name); - fix_type_atoms[ix] = am_atom_put(name, len); - } + for (ix = ERTS_ALC_N_MIN; ix <= ERTS_ALC_N_MAX; ix++) { + const char *name = ERTS_ALC_N2TD(ix); + size_t len = sys_strlen(name); + + alloc_type_atoms[ix] = am_atom_put(name, len); + } } if (allctr && !allctr->atoms_initialized) { @@ -4531,6 +4625,7 @@ sz_info_fix(Allctr_t *allctr, ErtsAlcFixList_t *fix = &allctr->fix[ix]; UWord alloced = fix->type_size * fix->u.cpool.allocated; UWord used = fix->type_size * fix->u.cpool.used; + ErtsAlcType_t n = ERTS_ALC_N_MIN_A_FIXED_SIZE + ix; if (print_to_p) { fmtfn_t to = *print_to_p; @@ -4538,15 +4633,14 @@ sz_info_fix(Allctr_t *allctr, erts_print(to, arg, "fix type internal: %s %bpu %bpu\n", - (char *) ERTS_ALC_N2TD(ERTS_ALC_N_MIN_A_FIXED_SIZE - + ix), + (char *) ERTS_ALC_N2TD(n), alloced, used); } if (hpp || szp) { add_3tup(hpp, szp, &res, - fix_type_atoms[ix], + alloc_type_atoms[n], bld_unstable_uint(hpp, szp, alloced), bld_unstable_uint(hpp, szp, used)); } @@ -4559,6 +4653,7 @@ sz_info_fix(Allctr_t *allctr, ErtsAlcFixList_t *fix = &allctr->fix[ix]; UWord alloced = fix->type_size * fix->u.nocpool.allocated; UWord used = fix->type_size*fix->u.nocpool.used; + ErtsAlcType_t n = ERTS_ALC_N_MIN_A_FIXED_SIZE + ix; if (print_to_p) { fmtfn_t to = *print_to_p; @@ -4566,15 +4661,14 @@ sz_info_fix(Allctr_t *allctr, erts_print(to, arg, "fix type: %s %bpu %bpu\n", - (char *) ERTS_ALC_N2TD(ERTS_ALC_N_MIN_A_FIXED_SIZE - + ix), + (char *) ERTS_ALC_N2TD(n), alloced, used); } if (hpp || szp) { add_3tup(hpp, szp, &res, - fix_type_atoms[ix], + alloc_type_atoms[n], bld_unstable_uint(hpp, szp, alloced), bld_unstable_uint(hpp, szp, used)); } @@ -5000,6 +5094,7 @@ info_options(Allctr_t *allctr, "option e: true\n" "option t: %s\n" "option ramv: %s\n" + "option atags: %s\n" "option sbct: %beu\n" #if HAVE_ERTS_MSEG "option asbcst: %bpu\n" @@ -5018,6 +5113,7 @@ info_options(Allctr_t *allctr, "option acul: %bpu\n", topt, allctr->ramv ? "true" : "false", + allctr->atags ? "true" : "false", allctr->sbc_threshold, #if HAVE_ERTS_MSEG allctr->mseg_opt.abs_shrink_th, @@ -5087,6 +5183,7 @@ info_options(Allctr_t *allctr, am_sbct, bld_uint(hpp, szp, allctr->sbc_threshold)); add_2tup(hpp, szp, &res, am.ramv, allctr->ramv ? am_true : am_false); + add_2tup(hpp, szp, &res, am.atags, allctr->atags ? am_true : am_false); add_2tup(hpp, szp, &res, am.t, (allctr->t ? am_true : am_false)); add_2tup(hpp, szp, &res, am.e, am_true); } @@ -5408,9 +5505,8 @@ erts_alcu_current_size(Allctr_t *allctr, AllctrSize_t *size, ErtsAlcUFixInfo_t * /* ----------------------------------------------------------------------- */ static ERTS_INLINE void * -do_erts_alcu_alloc(ErtsAlcType_t type, void *extra, Uint size) +do_erts_alcu_alloc(ErtsAlcType_t type, Allctr_t *allctr, Uint size) { - Allctr_t *allctr = (Allctr_t *) extra; void *res; ASSERT(initialized); @@ -5449,10 +5545,19 @@ do_erts_alcu_alloc(ErtsAlcType_t type, void *extra, Uint size) void *erts_alcu_alloc(ErtsAlcType_t type, void *extra, Uint size) { + Allctr_t *allctr = (Allctr_t *) extra; void *res; + ASSERT(!"This is not thread safe"); - res = do_erts_alcu_alloc(type, extra, size); + + res = do_erts_alcu_alloc(type, allctr, size); + + if (allctr->atags && res) { + set_alloc_tag(allctr, res, determine_alloc_tag(allctr, type)); + } + DEBUG_CHECK_ALIGNMENT(res); + return res; } @@ -5462,13 +5567,25 @@ void * erts_alcu_alloc_ts(ErtsAlcType_t type, void *extra, Uint size) { Allctr_t *allctr = (Allctr_t *) extra; + alcu_atag_t tag = 0; void *res; + + if (allctr->atags) { + tag = determine_alloc_tag(allctr, type); + } + erts_mtx_lock(&allctr->mutex); - res = do_erts_alcu_alloc(type, extra, size); - DEBUG_CHECK_ALIGNMENT(res); + res = do_erts_alcu_alloc(type, allctr, size); + + if (allctr->atags && res) { + set_alloc_tag(allctr, res, tag); + } erts_mtx_unlock(&allctr->mutex); + + DEBUG_CHECK_ALIGNMENT(res); + return res; } @@ -5478,6 +5595,7 @@ erts_alcu_alloc_thr_spec(ErtsAlcType_t type, void *extra, Uint size) { ErtsAllocatorThrSpec_t *tspec = (ErtsAllocatorThrSpec_t *) extra; int ix; + alcu_atag_t tag = 0; Allctr_t *allctr; void *res; @@ -5487,11 +5605,19 @@ erts_alcu_alloc_thr_spec(ErtsAlcType_t type, void *extra, Uint size) allctr = tspec->allctr[ix]; + if (allctr->atags) { + tag = determine_alloc_tag(allctr, type); + } + if (allctr->thread_safe) erts_mtx_lock(&allctr->mutex); res = do_erts_alcu_alloc(type, allctr, size); + if (allctr->atags && res) { + set_alloc_tag(allctr, res, tag); + } + if (allctr->thread_safe) erts_mtx_unlock(&allctr->mutex); @@ -5504,10 +5630,15 @@ void * erts_alcu_alloc_thr_pref(ErtsAlcType_t type, void *extra, Uint size) { Allctr_t *pref_allctr; + alcu_atag_t tag = 0; void *res; pref_allctr = get_pref_allctr(extra); + if (pref_allctr->atags) { + tag = determine_alloc_tag(pref_allctr, type); + } + if (pref_allctr->thread_safe) erts_mtx_lock(&pref_allctr->mutex); @@ -5523,12 +5654,15 @@ erts_alcu_alloc_thr_pref(ErtsAlcType_t type, void *extra, Uint size) res = do_erts_alcu_alloc(type, pref_allctr, size); } + if (pref_allctr->atags && res) { + set_alloc_tag(pref_allctr, res, tag); + } + if (pref_allctr->thread_safe) erts_mtx_unlock(&pref_allctr->mutex); DEBUG_CHECK_ALIGNMENT(res); - return res; } @@ -5537,10 +5671,9 @@ erts_alcu_alloc_thr_pref(ErtsAlcType_t type, void *extra, Uint size) /* ------------------------------------------------------------------------- */ static ERTS_INLINE void -do_erts_alcu_free(ErtsAlcType_t type, void *extra, void *p, +do_erts_alcu_free(ErtsAlcType_t type, Allctr_t *allctr, void *p, Carrier_t **busy_pcrr_pp) { - Allctr_t *allctr = (Allctr_t *) extra; ASSERT(initialized); ASSERT(allctr); @@ -5572,7 +5705,8 @@ do_erts_alcu_free(ErtsAlcType_t type, void *extra, void *p, void erts_alcu_free(ErtsAlcType_t type, void *extra, void *p) { - do_erts_alcu_free(type, extra, p, NULL); + Allctr_t *allctr = (Allctr_t *) extra; + do_erts_alcu_free(type, allctr, p, NULL); } @@ -5581,7 +5715,7 @@ erts_alcu_free_ts(ErtsAlcType_t type, void *extra, void *p) { Allctr_t *allctr = (Allctr_t *) extra; erts_mtx_lock(&allctr->mutex); - do_erts_alcu_free(type, extra, p, NULL); + do_erts_alcu_free(type, allctr, p, NULL); erts_mtx_unlock(&allctr->mutex); } @@ -5641,13 +5775,12 @@ erts_alcu_free_thr_pref(ErtsAlcType_t type, void *extra, void *p) static ERTS_INLINE void * do_erts_alcu_realloc(ErtsAlcType_t type, - void *extra, + Allctr_t *allctr, void *p, Uint size, Uint32 alcu_flgs, Carrier_t **busy_pcrr_pp) { - Allctr_t *allctr = (Allctr_t *) extra; Block_t *blk; void *res; @@ -5661,7 +5794,7 @@ do_erts_alcu_realloc(ErtsAlcType_t type, ERTS_ALCU_DBG_CHK_THR_ACCESS(allctr); if (!p) { - res = do_erts_alcu_alloc(type, extra, size); + res = do_erts_alcu_alloc(type, allctr, size); INC_CC(allctr->calls.this_realloc); DEC_CC(allctr->calls.this_alloc); return res; @@ -5670,7 +5803,7 @@ do_erts_alcu_realloc(ErtsAlcType_t type, #if ALLOC_ZERO_EQ_NULL if (!size) { ASSERT(p); - do_erts_alcu_free(type, extra, p, busy_pcrr_pp); + do_erts_alcu_free(type, allctr, p, busy_pcrr_pp); INC_CC(allctr->calls.this_realloc); DEC_CC(allctr->calls.this_free); return NULL; @@ -5755,19 +5888,29 @@ do_erts_alcu_realloc(ErtsAlcType_t type, void * erts_alcu_realloc(ErtsAlcType_t type, void *extra, void *p, Uint size) { + Allctr_t *allctr = (Allctr_t *)extra; void *res; - res = do_erts_alcu_realloc(type, extra, p, size, 0, NULL); + + res = do_erts_alcu_realloc(type, allctr, p, size, 0, NULL); + DEBUG_CHECK_ALIGNMENT(res); + + if (allctr->atags && res) { + set_alloc_tag(allctr, res, determine_alloc_tag(allctr, type)); + } + return res; } void * erts_alcu_realloc_mv(ErtsAlcType_t type, void *extra, void *p, Uint size) { + Allctr_t *allctr = (Allctr_t *)extra; void *res; - res = do_erts_alcu_alloc(type, extra, size); + + res = do_erts_alcu_alloc(type, allctr, size); if (!res) - res = erts_alcu_realloc(type, extra, p, size); + res = do_erts_alcu_realloc(type, allctr, p, size, 0, NULL); else { Block_t *blk; size_t cpy_size; @@ -5777,23 +5920,42 @@ erts_alcu_realloc_mv(ErtsAlcType_t type, void *extra, void *p, Uint size) if (cpy_size > size) cpy_size = size; sys_memcpy(res, p, cpy_size); - do_erts_alcu_free(type, extra, p, NULL); + do_erts_alcu_free(type, allctr, p, NULL); } + DEBUG_CHECK_ALIGNMENT(res); + + if (allctr->atags && res) { + set_alloc_tag(allctr, res, determine_alloc_tag(allctr, type)); + } + return res; } - void * erts_alcu_realloc_ts(ErtsAlcType_t type, void *extra, void *ptr, Uint size) { Allctr_t *allctr = (Allctr_t *) extra; + alcu_atag_t tag = 0; void *res; + + if (allctr->atags) { + tag = determine_alloc_tag(allctr, type); + } + erts_mtx_lock(&allctr->mutex); - res = do_erts_alcu_realloc(type, extra, ptr, size, 0, NULL); + + res = do_erts_alcu_realloc(type, allctr, ptr, size, 0, NULL); + + if (allctr->atags && res) { + set_alloc_tag(allctr, res, tag); + } + erts_mtx_unlock(&allctr->mutex); + DEBUG_CHECK_ALIGNMENT(res); + return res; } @@ -5801,11 +5963,17 @@ void * erts_alcu_realloc_mv_ts(ErtsAlcType_t type, void *extra, void *p, Uint size) { Allctr_t *allctr = (Allctr_t *) extra; + alcu_atag_t tag = 0; void *res; + + if (allctr->atags) { + tag = determine_alloc_tag(allctr, type); + } + erts_mtx_lock(&allctr->mutex); - res = do_erts_alcu_alloc(type, extra, size); + res = do_erts_alcu_alloc(type, allctr, size); if (!res) - res = erts_alcu_realloc_ts(type, extra, p, size); + res = do_erts_alcu_realloc(type, allctr, p, size, 0, NULL); else { Block_t *blk; size_t cpy_size; @@ -5815,10 +5983,17 @@ erts_alcu_realloc_mv_ts(ErtsAlcType_t type, void *extra, void *p, Uint size) if (cpy_size > size) cpy_size = size; sys_memcpy(res, p, cpy_size); - do_erts_alcu_free(type, extra, p, NULL); + do_erts_alcu_free(type, allctr, p, NULL); } + + if (allctr->atags && res) { + set_alloc_tag(allctr, res, tag); + } + erts_mtx_unlock(&allctr->mutex); + DEBUG_CHECK_ALIGNMENT(res); + return res; } @@ -5829,6 +6004,7 @@ erts_alcu_realloc_thr_spec(ErtsAlcType_t type, void *extra, { ErtsAllocatorThrSpec_t *tspec = (ErtsAllocatorThrSpec_t *) extra; int ix; + alcu_atag_t tag = 0; Allctr_t *allctr; void *res; @@ -5838,11 +6014,19 @@ erts_alcu_realloc_thr_spec(ErtsAlcType_t type, void *extra, allctr = tspec->allctr[ix]; + if (allctr->atags) { + tag = determine_alloc_tag(allctr, type); + } + if (allctr->thread_safe) erts_mtx_lock(&allctr->mutex); res = do_erts_alcu_realloc(type, allctr, ptr, size, 0, NULL); + if (allctr->atags && res) { + set_alloc_tag(allctr, res, tag); + } + if (allctr->thread_safe) erts_mtx_unlock(&allctr->mutex); @@ -5857,6 +6041,7 @@ erts_alcu_realloc_mv_thr_spec(ErtsAlcType_t type, void *extra, { ErtsAllocatorThrSpec_t *tspec = (ErtsAllocatorThrSpec_t *) extra; int ix; + alcu_atag_t tag = 0; Allctr_t *allctr; void *res; @@ -5866,14 +6051,16 @@ erts_alcu_realloc_mv_thr_spec(ErtsAlcType_t type, void *extra, allctr = tspec->allctr[ix]; + if (allctr->atags) { + tag = determine_alloc_tag(allctr, type); + } + if (allctr->thread_safe) erts_mtx_lock(&allctr->mutex); res = do_erts_alcu_alloc(type, allctr, size); if (!res) { - if (allctr->thread_safe) - erts_mtx_unlock(&allctr->mutex); - res = erts_alcu_realloc_thr_spec(type, allctr, ptr, size); + res = do_erts_alcu_realloc(type, allctr, ptr, size, 0, NULL); } else { Block_t *blk; @@ -5885,29 +6072,34 @@ erts_alcu_realloc_mv_thr_spec(ErtsAlcType_t type, void *extra, cpy_size = size; sys_memcpy(res, ptr, cpy_size); do_erts_alcu_free(type, allctr, ptr, NULL); - if (allctr->thread_safe) - erts_mtx_unlock(&allctr->mutex); } + if (allctr->atags && res) { + set_alloc_tag(allctr, res, tag); + } + + if (allctr->thread_safe) + erts_mtx_unlock(&allctr->mutex); + DEBUG_CHECK_ALIGNMENT(res); return res; } static ERTS_INLINE void * -realloc_thr_pref(ErtsAlcType_t type, void *extra, void *p, Uint size, +realloc_thr_pref(ErtsAlcType_t type, Allctr_t *pref_allctr, void *p, Uint size, int force_move) { void *res; - Allctr_t *pref_allctr, *used_allctr; + Allctr_t *used_allctr; UWord old_user_size; Carrier_t *busy_pcrr_p; + alcu_atag_t tag = 0; int retried; - if (!p) - return erts_alcu_alloc_thr_pref(type, extra, size); - - pref_allctr = get_pref_allctr(extra); + if (pref_allctr->atags) { + tag = determine_alloc_tag(pref_allctr, type); + } if (pref_allctr->thread_safe) erts_mtx_lock(&pref_allctr->mutex); @@ -5936,6 +6128,11 @@ restart: retried = 1; goto restart; } + + if (pref_allctr->atags && res) { + set_alloc_tag(pref_allctr, res, tag); + } + if (pref_allctr->thread_safe) erts_mtx_unlock(&pref_allctr->mutex); } @@ -5944,6 +6141,9 @@ restart: if (!res) goto unlock_ts_return; else { + if (pref_allctr->atags) { + set_alloc_tag(pref_allctr, res, tag); + } DEBUG_CHECK_ALIGNMENT(res); @@ -5974,20 +6174,34 @@ restart: } } + DEBUG_CHECK_ALIGNMENT(res); + return res; } void * erts_alcu_realloc_thr_pref(ErtsAlcType_t type, void *extra, void *p, Uint size) { - return realloc_thr_pref(type, extra, p, size, 0); + if (p) { + Allctr_t *pref_allctr = get_pref_allctr(extra); + + return realloc_thr_pref(type, pref_allctr, p, size, 0); + } + + return erts_alcu_alloc_thr_pref(type, extra, size); } void * erts_alcu_realloc_mv_thr_pref(ErtsAlcType_t type, void *extra, void *p, Uint size) { - return realloc_thr_pref(type, extra, p, size, 1); + if (p) { + Allctr_t *pref_allctr = get_pref_allctr(extra); + + return realloc_thr_pref(type, pref_allctr, p, size, 1); + } + + return erts_alcu_alloc_thr_pref(type, extra, size); } @@ -6071,6 +6285,7 @@ erts_alcu_start(Allctr_t *allctr, AllctrInit_t *init) allctr->t = 0; allctr->ramv = init->ramv; + allctr->atags = init->atags; allctr->main_carrier_size = init->mmbcs; #if HAVE_ERTS_MSEG @@ -6320,6 +6535,1072 @@ erts_alcu_init(AlcUInit_t *init) initialized = 1; } +/* ------------------------------------------------------------------------- */ + +/* Allocation histograms and carrier information is gathered by walking through + * all carriers associated with each allocator instance. This is done as + * aux_yield_work on the scheduler that owns each instance. + * + * Yielding is implemented by temporarily inserting a "dummy carrier" at the + * last position. It's permanently "busy" so it won't get picked up by someone + * else when in the carrier pool, and we never make the employer aware of it + * through callbacks so we can't accidentally allocate on it. + * + * Plain malloc/free is used to guarantee we won't allocate with the allocator + * we're scanning. */ + +/* Yield between carriers once this many blocks have been processed. Note that + * a single carrier scan may exceed this figure. */ +#ifndef DEBUG + #define BLOCKSCAN_REDUCTIONS (8000) +#else + #define BLOCKSCAN_REDUCTIONS (400) +#endif + +/* Abort a single carrier scan after this many blocks to prevent really large + * MBCs from blocking forever. */ +#define BLOCKSCAN_BAILOUT_THRESHOLD (16000) + +typedef struct alcu_blockscan { + /* A per-scheduler list used when multiple scans have been queued. The + * current scanner will always run until completion/abort before moving on + * to the next. */ + struct alcu_blockscan *scanner_queue; + + Allctr_t *allocator; + Process *process; + + int (*current_op)(struct alcu_blockscan *scanner); + int (*next_op)(struct alcu_blockscan *scanner); + int reductions; + + ErtsAlcCPoolData_t *cpool_cursor; + CarrierList_t *current_clist; + Carrier_t *clist_cursor; + Carrier_t dummy_carrier; + + /* Called if the process that started this job dies before we're done. */ + void (*abort)(void *user_data); + + /* Called on each carrier. The callback must return the number of blocks + * scanned to yield properly between carriers. + * + * Note that it's not possible to "yield back" into a carrier. */ + int (*scan)(Allctr_t *, void *user_data, Carrier_t *); + + /* Called when all carriers have been scanned. The callback may return + * non-zero to yield. */ + int (*finish)(void *user_data); + + void *user_data; +} blockscan_t; + +static Carrier_t *blockscan_restore_clist_cursor(blockscan_t *state) +{ + Carrier_t *cursor = state->clist_cursor; + + ASSERT(state->clist_cursor == (state->current_clist)->first || + state->clist_cursor == &state->dummy_carrier); + + if (cursor == &state->dummy_carrier) { + cursor = cursor->next; + + unlink_carrier(state->current_clist, state->clist_cursor); + } + + return cursor; +} + +static void blockscan_save_clist_cursor(blockscan_t *state, Carrier_t *after) +{ + ASSERT(state->clist_cursor == (state->current_clist)->first || + state->clist_cursor == &state->dummy_carrier); + + state->clist_cursor = &state->dummy_carrier; + + (state->clist_cursor)->next = after->next; + (state->clist_cursor)->prev = after; + + relink_carrier(state->current_clist, state->clist_cursor); +} + +static int blockscan_clist_yielding(blockscan_t *state) +{ + Carrier_t *cursor = blockscan_restore_clist_cursor(state); + + if (ERTS_PROC_IS_EXITING(state->process)) { + return 0; + } + + while (cursor) { + /* Skip dummy carriers inserted by another (concurrent) block scan. + * This can happen when scanning thread-safe allocators from multiple + * schedulers. */ + if (CARRIER_SZ(cursor) > 0) { + int blocks_scanned = state->scan(state->allocator, + state->user_data, + cursor); + + state->reductions -= blocks_scanned; + + if (state->reductions <= 0) { + blockscan_save_clist_cursor(state, cursor); + return 1; + } + } + + cursor = cursor->next; + } + + return 0; +} + +static ErtsAlcCPoolData_t *blockscan_restore_cpool_cursor(blockscan_t *state) +{ + ErtsAlcCPoolData_t *cursor; + + cursor = cpool_aint2cpd(cpool_read(&(state->cpool_cursor)->next)); + + if (state->cpool_cursor == &state->dummy_carrier.cpool) { + cpool_delete(state->allocator, state->allocator, &state->dummy_carrier); + } + + return cursor; +} + +static void blockscan_save_cpool_cursor(blockscan_t *state, + ErtsAlcCPoolData_t *after) +{ + ErtsAlcCPoolData_t *dummy_carrier, *prev_carrier, *next_carrier; + + dummy_carrier = &state->dummy_carrier.cpool; + + next_carrier = cpool_aint2cpd(cpool_mod_mark(&after->next)); + prev_carrier = cpool_aint2cpd(cpool_mod_mark(&next_carrier->prev)); + + cpool_init(&dummy_carrier->next, (erts_aint_t)next_carrier); + cpool_init(&dummy_carrier->prev, (erts_aint_t)prev_carrier); + + cpool_set_mod_marked(&prev_carrier->next, + (erts_aint_t)dummy_carrier, + (erts_aint_t)next_carrier); + cpool_set_mod_marked(&next_carrier->prev, + (erts_aint_t)dummy_carrier, + (erts_aint_t)prev_carrier); + + state->cpool_cursor = dummy_carrier; +} + +static int blockscan_cpool_yielding(blockscan_t *state) +{ + ErtsAlcCPoolData_t *sentinel, *cursor; + + sentinel = &carrier_pool[(state->allocator)->alloc_no].sentinel; + cursor = blockscan_restore_cpool_cursor(state); + + if (ERTS_PROC_IS_EXITING(state->process)) { + return 0; + } + + while (cursor != sentinel) { + Carrier_t *carrier; + erts_aint_t exp; + + /* When a deallocation happens on a pooled carrier it will be routed to + * its owner, so the only way to be sure that it isn't modified while + * scanning is to skip all carriers that aren't ours. The deallocations + * deferred to us will get handled when we're done. */ + while (cursor->orig_allctr != state->allocator) { + cursor = cpool_aint2cpd(cpool_read(&cursor->next)); + + if (cursor == sentinel) { + return 0; + } + } + + carrier = ErtsContainerStruct(cursor, Carrier_t, cpool); + exp = erts_atomic_read_rb(&carrier->allctr); + + if (exp & ERTS_CRR_ALCTR_FLG_IN_POOL) { + ASSERT(state->allocator == (Allctr_t*)(exp & ~ERTS_CRR_ALCTR_FLG_MASK)); + ASSERT(!(exp & ERTS_CRR_ALCTR_FLG_BUSY)); + + if (erts_atomic_cmpxchg_acqb(&carrier->allctr, + exp | ERTS_CRR_ALCTR_FLG_BUSY, + exp) == exp) { + /* Skip dummy carriers inserted by another (concurrent) block + * scan. This can happen when scanning thread-safe allocators + * from multiple schedulers. */ + if (CARRIER_SZ(carrier) > 0) { + int blocks_scanned = state->scan(state->allocator, + state->user_data, + carrier); + + state->reductions -= blocks_scanned; + + if (state->reductions <= 0) { + blockscan_save_cpool_cursor(state, cursor); + erts_atomic_set_relb(&carrier->allctr, exp); + + return 1; + } + } + + erts_atomic_set_relb(&carrier->allctr, exp); + } + } + + cursor = cpool_aint2cpd(cpool_read(&cursor->next)); + } + + return 0; +} + +static int blockscan_yield_helper(blockscan_t *state, + int (*yielding_op)(blockscan_t*)) +{ + /* Note that we don't check whether to abort here; only yielding_op knows + * whether the carrier is still in the list/pool. */ + + if ((state->allocator)->thread_safe) { + /* Locked scans have to be as short as possible. */ + state->reductions = 1; + + erts_mtx_lock(&(state->allocator)->mutex); + } else { + state->reductions = BLOCKSCAN_REDUCTIONS; + } + + if (yielding_op(state)) { + state->next_op = state->current_op; + } + + if ((state->allocator)->thread_safe) { + erts_mtx_unlock(&(state->allocator)->mutex); + } + + return 1; +} + +/* */ + +static int blockscan_finish(blockscan_t *state) +{ + if (ERTS_PROC_IS_EXITING(state->process)) { + state->abort(state->user_data); + return 0; + } + + state->current_op = blockscan_finish; + + return state->finish(state->user_data); +} + +static int blockscan_sweep_sbcs(blockscan_t *state) +{ + if (state->current_op != blockscan_sweep_sbcs) { + SET_CARRIER_HDR(&state->dummy_carrier, 0, SCH_SBC, state->allocator); + state->current_clist = &(state->allocator)->sbc_list; + state->clist_cursor = (state->current_clist)->first; + } + + state->current_op = blockscan_sweep_sbcs; + state->next_op = blockscan_finish; + + return blockscan_yield_helper(state, blockscan_clist_yielding); +} + +static int blockscan_sweep_mbcs(blockscan_t *state) +{ + if (state->current_op != blockscan_sweep_mbcs) { + SET_CARRIER_HDR(&state->dummy_carrier, 0, SCH_MBC, state->allocator); + state->current_clist = &(state->allocator)->mbc_list; + state->clist_cursor = (state->current_clist)->first; + } + + state->current_op = blockscan_sweep_mbcs; + state->next_op = blockscan_sweep_sbcs; + + return blockscan_yield_helper(state, blockscan_clist_yielding); +} + +static int blockscan_sweep_cpool(blockscan_t *state) +{ + if (state->current_op != blockscan_sweep_cpool) { + ErtsAlcCPoolData_t *sentinel; + + SET_CARRIER_HDR(&state->dummy_carrier, 0, SCH_MBC, state->allocator); + sentinel = &carrier_pool[(state->allocator)->alloc_no].sentinel; + state->cpool_cursor = sentinel; + } + + state->current_op = blockscan_sweep_cpool; + state->next_op = blockscan_sweep_mbcs; + + return blockscan_yield_helper(state, blockscan_cpool_yielding); +} + +static int blockscan_get_specific_allocator(int allocator_num, + int sched_id, + Allctr_t **out) +{ + ErtsAllocatorInfo_t *ai; + Allctr_t *allocator; + + ASSERT(allocator_num >= ERTS_ALC_A_MIN && + allocator_num <= ERTS_ALC_A_MAX); + ASSERT(sched_id >= 0 && sched_id <= erts_no_schedulers); + + ai = &erts_allctrs_info[allocator_num]; + + if (!ai->enabled || !ai->alloc_util) { + return 0; + } + + if (!ai->thr_spec) { + if (sched_id != 0) { + /* Only thread-specific allocators can be scanned on a specific + * scheduler. */ + return 0; + } + + allocator = (Allctr_t*)ai->extra; + ASSERT(allocator->thread_safe); + } else { + ErtsAllocatorThrSpec_t *tspec = (ErtsAllocatorThrSpec_t*)ai->extra; + + ASSERT(sched_id < tspec->size); + + allocator = tspec->allctr[sched_id]; + } + + *out = allocator; + + return 1; +} + +static void blockscan_sched_trampoline(void *arg) +{ + ErtsAlcuBlockscanYieldData *yield; + ErtsSchedulerData *esdp; + blockscan_t *scanner; + + esdp = erts_get_scheduler_data(); + scanner = (blockscan_t*)arg; + + yield = ERTS_SCHED_AUX_YIELD_DATA(esdp, alcu_blockscan); + + ASSERT((yield->last == NULL) == (yield->current == NULL)); + + if (yield->last != NULL) { + blockscan_t *prev_scanner = yield->last; + + ASSERT(prev_scanner->scanner_queue == NULL); + + prev_scanner->scanner_queue = scanner; + } else { + yield->current = scanner; + } + + scanner->scanner_queue = NULL; + yield->last = scanner; + + erts_notify_new_aux_yield_work(esdp); +} + +static void blockscan_dispatch(blockscan_t *scanner, Process *owner, + Allctr_t *allocator, int sched_id) +{ + ASSERT(erts_get_scheduler_id() != 0); + + if (sched_id == 0) { + /* Global instances are always handled on the current scheduler. */ + sched_id = ERTS_ALC_GET_THR_IX(); + ASSERT(allocator->thread_safe); + } + + scanner->allocator = allocator; + scanner->process = owner; + + erts_proc_inc_refc(scanner->process); + + cpool_init_carrier_data(scanner->allocator, &scanner->dummy_carrier); + erts_atomic_init_nob(&(scanner->dummy_carrier).allctr, + (erts_aint_t)allocator | ERTS_CRR_ALCTR_FLG_BUSY); + + if (ERTS_ALC_IS_CPOOL_ENABLED(scanner->allocator)) { + scanner->next_op = blockscan_sweep_cpool; + } else { + scanner->next_op = blockscan_sweep_mbcs; + } + + /* Aux yield jobs can only be set up while running on the scheduler that + * services them, so we move there before continuing. + * + * We can't drive the scan itself through this since the scheduler will + * always finish *all* misc aux work in one go which makes it impossible to + * yield. */ + erts_schedule_misc_aux_work(sched_id, blockscan_sched_trampoline, scanner); +} + +int erts_handle_yielded_alcu_blockscan(ErtsSchedulerData *esdp, + ErtsAlcuBlockscanYieldData *yield) +{ + blockscan_t *scanner = yield->current; + + (void)esdp; + + ASSERT((yield->last == NULL) == (yield->current == NULL)); + + if (scanner) { + if (scanner->next_op(scanner)) { + return 1; + } + + ASSERT(ERTS_PROC_IS_EXITING(scanner->process) || + scanner->current_op == blockscan_finish); + + yield->current = scanner->scanner_queue; + + if (yield->current == NULL) { + ASSERT(scanner == yield->last); + yield->last = NULL; + } + + erts_proc_dec_refc(scanner->process); + + /* Plain free is intentional. */ + free(scanner); + + return yield->current != NULL; + } + + return 0; +} + +void erts_alcu_sched_spec_data_init(ErtsSchedulerData *esdp) +{ + ErtsAlcuBlockscanYieldData *yield; + + yield = ERTS_SCHED_AUX_YIELD_DATA(esdp, alcu_blockscan); + + yield->current = NULL; + yield->last = NULL; +} + +/* ------------------------------------------------------------------------- */ + +static ERTS_INLINE int u64_log2(Uint64 v) +{ + static const int log2_tab64[64] = { + 63, 0, 58, 1, 59, 47, 53, 2, + 60, 39, 48, 27, 54, 33, 42, 3, + 61, 51, 37, 40, 49, 18, 28, 20, + 55, 30, 34, 11, 43, 14, 22, 4, + 62, 57, 46, 52, 38, 26, 32, 41, + 50, 36, 17, 19, 29, 10, 13, 21, + 56, 45, 25, 31, 35, 16, 9, 12, + 44, 24, 15, 8, 23, 7, 6, 5}; + + v |= v >> 1; + v |= v >> 2; + v |= v >> 4; + v |= v >> 8; + v |= v >> 16; + v |= v >> 32; + + return log2_tab64[((Uint64)((v - (v >> 1))*0x07EDD5E59A4E28C2)) >> 58]; +} + +/* ------------------------------------------------------------------------- */ + +typedef struct hist_tree__ { + struct hist_tree__ *parent; + struct hist_tree__ *left; + struct hist_tree__ *right; + + int is_red; + + alcu_atag_t tag; + UWord histogram[1]; +} hist_tree_t; + +#define ERTS_RBT_PREFIX hist_tree +#define ERTS_RBT_T hist_tree_t +#define ERTS_RBT_KEY_T UWord +#define ERTS_RBT_FLAGS_T int +#define ERTS_RBT_INIT_EMPTY_TNODE(T) ((void)0) +#define ERTS_RBT_IS_RED(T) ((T)->is_red) +#define ERTS_RBT_SET_RED(T) ((T)->is_red = 1) +#define ERTS_RBT_IS_BLACK(T) (!ERTS_RBT_IS_RED(T)) +#define ERTS_RBT_SET_BLACK(T) ((T)->is_red = 0) +#define ERTS_RBT_GET_FLAGS(T) ((T)->is_red) +#define ERTS_RBT_SET_FLAGS(T, F) ((T)->is_red = F) +#define ERTS_RBT_GET_PARENT(T) ((T)->parent) +#define ERTS_RBT_SET_PARENT(T, P) ((T)->parent = P) +#define ERTS_RBT_GET_RIGHT(T) ((T)->right) +#define ERTS_RBT_SET_RIGHT(T, R) ((T)->right = (R)) +#define ERTS_RBT_GET_LEFT(T) ((T)->left) +#define ERTS_RBT_SET_LEFT(T, L) ((T)->left = (L)) +#define ERTS_RBT_GET_KEY(T) ((T)->tag) +#define ERTS_RBT_IS_LT(KX, KY) (KX < KY) +#define ERTS_RBT_IS_EQ(KX, KY) (KX == KY) +#define ERTS_RBT_WANT_FOREACH_DESTROY_YIELDING +#define ERTS_RBT_WANT_FOREACH_DESTROY +#define ERTS_RBT_WANT_INSERT +#define ERTS_RBT_WANT_LOOKUP +#define ERTS_RBT_UNDEF + +#include "erl_rbtree.h" + +typedef struct { + blockscan_t common; + + ErtsIRefStorage iref; + Process *process; + + hist_tree_rbt_yield_state_t hist_tree_yield; + hist_tree_t *hist_tree; + UWord hist_count; + + UWord hist_slot_start; + int hist_slot_count; + + UWord unscanned_size; + + ErtsHeapFactory msg_factory; + int building_result; + Eterm result_list; +} gather_ahist_t; + +static void gather_ahist_update(gather_ahist_t *state, UWord tag, UWord size) +{ + hist_tree_t *hist_node; + UWord size_interval; + int hist_slot; + + hist_node = hist_tree_rbt_lookup(state->hist_tree, tag); + + if (hist_node == NULL) { + /* Plain calloc is intentional. */ + hist_node = (hist_tree_t*)calloc(1, sizeof(hist_tree_t) + + (state->hist_slot_count - 1) * + sizeof(hist_node->histogram[0])); + hist_node->tag = tag; + + hist_tree_rbt_insert(&state->hist_tree, hist_node); + state->hist_count++; + } + + size_interval = (size / state->hist_slot_start); + size_interval = u64_log2(size_interval + 1); + + hist_slot = MIN(size_interval, state->hist_slot_count - 1); + + hist_node->histogram[hist_slot]++; +} + +static int gather_ahist_scan(Allctr_t *allocator, + void *user_data, + Carrier_t *carrier) +{ + gather_ahist_t *state; + int blocks_scanned; + Block_t *block; + + state = (gather_ahist_t*)user_data; + blocks_scanned = 1; + + if (IS_SB_CARRIER(carrier)) { + alcu_atag_t tag; + + block = SBC2BLK(allocator, carrier); + tag = GET_BLK_ATAG(block); + + ASSERT(DBG_IS_VALID_ATAG(allocator, tag)); + + gather_ahist_update(state, tag, SBC_BLK_SZ(block)); + } else { + UWord scanned_bytes = MBC_HEADER_SIZE(allocator); + + ASSERT(IS_MB_CARRIER(carrier)); + + block = MBC_TO_FIRST_BLK(allocator, carrier); + + while (1) { + UWord block_size = MBC_BLK_SZ(block); + + if (IS_ALLOCED_BLK(block)) { + alcu_atag_t tag = GET_BLK_ATAG(block); + + ASSERT(DBG_IS_VALID_ATAG(allocator, tag)); + + gather_ahist_update(state, tag, block_size); + } + + scanned_bytes += block_size; + + if (blocks_scanned >= BLOCKSCAN_BAILOUT_THRESHOLD) { + state->unscanned_size += CARRIER_SZ(carrier) - scanned_bytes; + break; + } else if (IS_LAST_BLK(block)) { + break; + } + + block = NXT_BLK(block); + blocks_scanned++; + } + } + + return blocks_scanned; +} + +static void gather_ahist_append_result(hist_tree_t *node, void *arg) +{ + gather_ahist_t *state = (gather_ahist_t*)arg; + + Eterm histogram_tuple, tag_tuple; + + Eterm *hp; + int ix; + + ASSERT(state->building_result); + + hp = erts_produce_heap(&state->msg_factory, 7 + state->hist_slot_count, 0); + + hp[0] = make_arityval(state->hist_slot_count); + + for (ix = 0; ix < state->hist_slot_count; ix++) { + hp[1 + ix] = make_small(node->histogram[ix]); + } + + histogram_tuple = make_tuple(hp); + hp += 1 + state->hist_slot_count; + + hp[0] = make_arityval(3); + hp[1] = ATAG_ID(node->tag); + hp[2] = alloc_type_atoms[ATAG_TYPE(node->tag)]; + hp[3] = histogram_tuple; + + tag_tuple = make_tuple(hp); + hp += 4; + + state->result_list = CONS(hp, tag_tuple, state->result_list); + + /* Plain free is intentional. */ + free(node); +} + +static void gather_ahist_send(gather_ahist_t *state) +{ + Eterm result_tuple, unscanned_size, task_ref; + + Uint term_size; + Eterm *hp; + + ASSERT((state->result_list == NIL) ^ (state->hist_count > 0)); + ASSERT(state->building_result); + + term_size = 4 + erts_iref_storage_heap_size(&state->iref); + term_size += IS_USMALL(0, state->unscanned_size) ? 0 : BIG_UINT_HEAP_SIZE; + + hp = erts_produce_heap(&state->msg_factory, term_size, 0); + + task_ref = erts_iref_storage_make_ref(&state->iref, &hp, + &(state->msg_factory.message)->hfrag.off_heap, 0); + + unscanned_size = bld_unstable_uint(&hp, NULL, state->unscanned_size); + + hp[0] = make_arityval(3); + hp[1] = task_ref; + hp[2] = unscanned_size; + hp[3] = state->result_list; + + result_tuple = make_tuple(hp); + + erts_factory_trim_and_close(&state->msg_factory, &result_tuple, 1); + + erts_queue_message(state->process, 0, state->msg_factory.message, + result_tuple, am_system); +} + +static int gather_ahist_finish(void *arg) +{ + gather_ahist_t *state = (gather_ahist_t*)arg; + + if (!state->building_result) { + ErtsMessage *message; + Uint minimum_size; + Eterm *hp; + + /* {Ref, unscanned size, [{Tag, {Histogram}} | Rest]} */ + minimum_size = 4 + erts_iref_storage_heap_size(&state->iref) + + state->hist_count * (7 + state->hist_slot_count); + + message = erts_alloc_message(minimum_size, &hp); + erts_factory_selfcontained_message_init(&state->msg_factory, + message, hp); + + ERTS_RBT_YIELD_STAT_INIT(&state->hist_tree_yield); + + state->result_list = NIL; + state->building_result = 1; + } + + if (hist_tree_rbt_foreach_destroy_yielding(&state->hist_tree, + &gather_ahist_append_result, + state, + &state->hist_tree_yield, + BLOCKSCAN_REDUCTIONS)) { + return 1; + } + + gather_ahist_send(state); + + return 0; +} + +static void gather_ahist_destroy_result(hist_tree_t *node, void *arg) +{ + (void)arg; + free(node); +} + +static void gather_ahist_abort(void *arg) +{ + gather_ahist_t *state = (gather_ahist_t*)arg; + + if (state->building_result) { + erts_factory_undo(&state->msg_factory); + } + + hist_tree_rbt_foreach_destroy(&state->hist_tree, + &gather_ahist_destroy_result, + NULL); +} + +int erts_alcu_gather_alloc_histograms(Process *p, int allocator_num, + int sched_id, int hist_width, + UWord hist_start, Eterm ref) +{ + gather_ahist_t *gather_state; + blockscan_t *scanner; + Allctr_t *allocator; + + ASSERT(is_internal_ref(ref)); + + if (!blockscan_get_specific_allocator(allocator_num, + sched_id, + &allocator)) { + return 0; + } else if (!allocator->atags) { + return 0; + } + + ensure_atoms_initialized(allocator); + + /* Plain calloc is intentional. */ + gather_state = (gather_ahist_t*)calloc(1, sizeof(gather_ahist_t)); + scanner = &gather_state->common; + + scanner->abort = gather_ahist_abort; + scanner->scan = gather_ahist_scan; + scanner->finish = gather_ahist_finish; + scanner->user_data = gather_state; + + erts_iref_storage_save(&gather_state->iref, ref); + gather_state->hist_slot_start = hist_start; + gather_state->hist_slot_count = hist_width; + gather_state->process = p; + + blockscan_dispatch(scanner, p, allocator, sched_id); + + return 1; +} + +/* ------------------------------------------------------------------------- */ + +typedef struct chist_node__ { + struct chist_node__ *next; + + UWord carrier_size; + UWord unscanned_size; + UWord allocated_size; + + /* BLOCKSCAN_BAILOUT_THRESHOLD guarantees we won't overflow this or the + * counters in the free block histogram. */ + int allocated_count; + int flags; + + int histogram[1]; +} chist_node_t; + +typedef struct { + blockscan_t common; + + ErtsIRefStorage iref; + Process *process; + + Eterm allocator_desc; + + chist_node_t *info_list; + UWord info_count; + + UWord hist_slot_start; + int hist_slot_count; + + ErtsHeapFactory msg_factory; + int building_result; + Eterm result_list; +} gather_cinfo_t; + +static int gather_cinfo_scan(Allctr_t *allocator, + void *user_data, + Carrier_t *carrier) +{ + gather_cinfo_t *state; + chist_node_t *node; + int blocks_scanned; + Block_t *block; + + state = (gather_cinfo_t*)user_data; + node = calloc(1, sizeof(chist_node_t) + + (state->hist_slot_count - 1) * + sizeof(node->histogram[0])); + blocks_scanned = 1; + + /* ERTS_CRR_ALCTR_FLG_BUSY is ignored since we've set it ourselves and it + * would be misleading to include it. */ + node->flags = erts_atomic_read_rb(&carrier->allctr) & + (ERTS_CRR_ALCTR_FLG_MASK & ~ERTS_CRR_ALCTR_FLG_BUSY); + node->carrier_size = CARRIER_SZ(carrier); + + if (IS_SB_CARRIER(carrier)) { + UWord block_size; + + block = SBC2BLK(allocator, carrier); + block_size = SBC_BLK_SZ(block); + + node->allocated_size = block_size; + node->allocated_count = 1; + } else { + UWord scanned_bytes = MBC_HEADER_SIZE(allocator); + + block = MBC_TO_FIRST_BLK(allocator, carrier); + + while (1) { + UWord block_size = MBC_BLK_SZ(block); + + scanned_bytes += block_size; + + if (IS_ALLOCED_BLK(block)) { + node->allocated_size += block_size; + node->allocated_count++; + } else { + UWord size_interval; + int hist_slot; + + size_interval = (block_size / state->hist_slot_start); + size_interval = u64_log2(size_interval + 1); + + hist_slot = MIN(size_interval, state->hist_slot_count - 1); + + node->histogram[hist_slot]++; + } + + if (blocks_scanned >= BLOCKSCAN_BAILOUT_THRESHOLD) { + node->unscanned_size += CARRIER_SZ(carrier) - scanned_bytes; + break; + } else if (IS_LAST_BLK(block)) { + break; + } + + block = NXT_BLK(block); + blocks_scanned++; + } + } + + node->next = state->info_list; + state->info_list = node; + state->info_count++; + + return blocks_scanned; +} + +static void gather_cinfo_append_result(gather_cinfo_t *state, + chist_node_t *info) +{ + Eterm carrier_size, unscanned_size, allocated_size; + Eterm histogram_tuple, carrier_tuple; + + Uint term_size; + Eterm *hp; + int ix; + + ASSERT(state->building_result); + + term_size = 11 + state->hist_slot_count; + term_size += IS_USMALL(0, info->carrier_size) ? 0 : BIG_UINT_HEAP_SIZE; + term_size += IS_USMALL(0, info->unscanned_size) ? 0 : BIG_UINT_HEAP_SIZE; + term_size += IS_USMALL(0, info->allocated_size) ? 0 : BIG_UINT_HEAP_SIZE; + + hp = erts_produce_heap(&state->msg_factory, term_size, 0); + + hp[0] = make_arityval(state->hist_slot_count); + + for (ix = 0; ix < state->hist_slot_count; ix++) { + hp[1 + ix] = make_small(info->histogram[ix]); + } + + histogram_tuple = make_tuple(hp); + hp += 1 + state->hist_slot_count; + + carrier_size = bld_unstable_uint(&hp, NULL, info->carrier_size); + unscanned_size = bld_unstable_uint(&hp, NULL, info->unscanned_size); + allocated_size = bld_unstable_uint(&hp, NULL, info->allocated_size); + + hp[0] = make_arityval(7); + hp[1] = state->allocator_desc; + hp[2] = carrier_size; + hp[3] = unscanned_size; + hp[4] = allocated_size; + hp[5] = make_small(info->allocated_count); + hp[6] = (info->flags & ERTS_CRR_ALCTR_FLG_IN_POOL) ? am_true : am_false; + hp[7] = histogram_tuple; + + carrier_tuple = make_tuple(hp); + hp += 8; + + state->result_list = CONS(hp, carrier_tuple, state->result_list); + + free(info); +} + +static void gather_cinfo_send(gather_cinfo_t *state) +{ + Eterm result_tuple, task_ref; + + int term_size; + Eterm *hp; + + ASSERT((state->result_list == NIL) ^ (state->info_count > 0)); + ASSERT(state->building_result); + + term_size = 3 + erts_iref_storage_heap_size(&state->iref); + hp = erts_produce_heap(&state->msg_factory, term_size, 0); + + task_ref = erts_iref_storage_make_ref(&state->iref, &hp, + &(state->msg_factory.message)->hfrag.off_heap, 0); + + hp[0] = make_arityval(2); + hp[1] = task_ref; + hp[2] = state->result_list; + + result_tuple = make_tuple(hp); + + erts_factory_trim_and_close(&state->msg_factory, &result_tuple, 1); + + erts_queue_message(state->process, 0, state->msg_factory.message, + result_tuple, am_system); +} + +static int gather_cinfo_finish(void *arg) +{ + gather_cinfo_t *state = (gather_cinfo_t*)arg; + int reductions = BLOCKSCAN_REDUCTIONS; + + if (!state->building_result) { + ErtsMessage *message; + Uint minimum_size; + Eterm *hp; + + /* {Ref, [{Carrier size, unscanned size, allocated size, + * allocated block count, {Free block histogram}} | Rest]} */ + minimum_size = 3 + erts_iref_storage_heap_size(&state->iref) + + state->info_count * (11 + state->hist_slot_count); + + message = erts_alloc_message(minimum_size, &hp); + erts_factory_selfcontained_message_init(&state->msg_factory, + message, hp); + + state->result_list = NIL; + state->building_result = 1; + } + + while (state->info_list) { + chist_node_t *current = state->info_list; + state->info_list = current->next; + + gather_cinfo_append_result(state, current); + + if (reductions-- <= 0) { + return 1; + } + } + + gather_cinfo_send(state); + + return 0; +} + +static void gather_cinfo_abort(void *arg) +{ + gather_cinfo_t *state = (gather_cinfo_t*)arg; + + if (state->building_result) { + erts_factory_undo(&state->msg_factory); + } + + while (state->info_list) { + chist_node_t *current = state->info_list; + state->info_list = current->next; + + free(current); + } +} + +int erts_alcu_gather_carrier_info(struct process *p, int allocator_num, + int sched_id, int hist_width, + UWord hist_start, Eterm ref) +{ + gather_cinfo_t *gather_state; + blockscan_t *scanner; + + const char *allocator_desc; + Allctr_t *allocator; + + ASSERT(is_internal_ref(ref)); + + if (!blockscan_get_specific_allocator(allocator_num, + sched_id, + &allocator)) { + return 0; + } + + allocator_desc = ERTS_ALC_A2AD(allocator_num); + + /* Plain calloc is intentional. */ + gather_state = (gather_cinfo_t*)calloc(1, sizeof(gather_cinfo_t)); + scanner = &gather_state->common; + + scanner->abort = gather_cinfo_abort; + scanner->scan = gather_cinfo_scan; + scanner->finish = gather_cinfo_finish; + scanner->user_data = gather_state; + + gather_state->allocator_desc = erts_atom_put((byte *)allocator_desc, + sys_strlen(allocator_desc), + ERTS_ATOM_ENC_LATIN1, 1); + erts_iref_storage_save(&gather_state->iref, ref); + gather_state->hist_slot_start = hist_start * 2; + gather_state->hist_slot_count = hist_width; + gather_state->process = p; + + blockscan_dispatch(scanner, p, allocator, sched_id); + + return 1; +} + /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *\ * NOTE: erts_alcu_test() is only supposed to be used for testing. * @@ -6441,6 +7722,7 @@ erts_alcu_verify_unused_ts(Allctr_t *allctr) erts_mtx_unlock(&allctr->mutex); } + #ifdef DEBUG int is_sbc_blk(Block_t* blk) { diff --git a/erts/emulator/beam/erl_alloc_util.h b/erts/emulator/beam/erl_alloc_util.h index 05c8a0db3b..ff4d10b206 100644 --- a/erts/emulator/beam/erl_alloc_util.h +++ b/erts/emulator/beam/erl_alloc_util.h @@ -50,6 +50,7 @@ typedef struct { int tspec; int tpref; int ramv; + int atags; UWord sbct; UWord asbcst; UWord rsbcst; @@ -106,6 +107,7 @@ typedef struct { 0, /* (bool) tspec: thread specific */\ 0, /* (bool) tpref: thread preferred */\ 0, /* (bool) ramv: realloc always moves */\ + 0, /* (bool) atags: tagged allocations */\ 512*1024, /* (bytes) sbct: sbc threshold */\ 2*1024*2024, /* (amount) asbcst: abs sbc shrink threshold */\ 20, /* (%) rsbcst: rel sbc shrink threshold */\ @@ -142,6 +144,7 @@ typedef struct { 0, /* (bool) tspec: thread specific */\ 0, /* (bool) tpref: thread preferred */\ 0, /* (bool) ramv: realloc always moves */\ + 0, /* (bool) atags: tagged allocations */\ 64*1024, /* (bytes) sbct: sbc threshold */\ 2*1024*2024, /* (amount) asbcst: abs sbc shrink threshold */\ 20, /* (%) rsbcst: rel sbc shrink threshold */\ @@ -224,6 +227,36 @@ void erts_lcnt_update_allocator_locks(int enable); int erts_alcu_try_set_dyn_param(Allctr_t*, Eterm param, Uint value); +/* Gathers per-tag allocation histograms from the given allocator number + * (ERTS_ALC_A_*) and scheduler id. An id of 0 means the global instance will + * be used. + * + * The results are sent to `p`, and it returns the number of messages to wait + * for. */ +int erts_alcu_gather_alloc_histograms(struct process *p, int allocator_num, + int sched_id, int hist_width, + UWord hist_start, Eterm ref); + +/* Gathers per-carrier info from the given allocator number (ERTS_ALC_A_*) and + * scheduler id. An id of 0 means the global instance will be used. + * + * The results are sent to `p`, and it returns the number of messages to wait + * for. */ +int erts_alcu_gather_carrier_info(struct process *p, int allocator_num, + int sched_id, int hist_width, + UWord hist_start, Eterm ref); + +struct alcu_blockscan; + +typedef struct { + struct alcu_blockscan *current; + struct alcu_blockscan *last; +} ErtsAlcuBlockscanYieldData; + +int erts_handle_yielded_alcu_blockscan(struct ErtsSchedulerData_ *esdp, + ErtsAlcuBlockscanYieldData *yield); +void erts_alcu_sched_spec_data_init(struct ErtsSchedulerData_ *esdp); + #endif /* !ERL_ALLOC_UTIL__ */ #if defined(GET_ERL_ALLOC_UTIL_IMPL) && !defined(ERL_ALLOC_UTIL_IMPL__) @@ -548,6 +581,7 @@ struct Allctr_t_ { /* Options */ int t; int ramv; + int atags; Uint sbc_threshold; Uint sbc_move_threshold; Uint mbc_move_threshold; @@ -684,6 +718,7 @@ struct Allctr_t_ { #endif }; + int erts_alcu_start(Allctr_t *, AllctrInit_t *); void erts_alcu_stop(Allctr_t *); diff --git a/erts/emulator/beam/erl_bif_info.c b/erts/emulator/beam/erl_bif_info.c index 3c13991fb6..8b2b1a58c7 100644 --- a/erts/emulator/beam/erl_bif_info.c +++ b/erts/emulator/beam/erl_bif_info.c @@ -38,7 +38,7 @@ #include "erl_message.h" #include "erl_binary.h" #include "erl_db.h" -#include "erl_instrument.h" +#include "erl_mtrace.h" #include "dist.h" #include "erl_gc.h" #include "erl_cpu_topology.h" @@ -51,6 +51,7 @@ #include "erl_ptab.h" #include "erl_time.h" #include "erl_proc_sig_queue.h" +#include "erl_alloc_util.h" #ifdef HIPE #include "hipe_arch.h" #endif @@ -616,17 +617,13 @@ static void collect_one_target_monitor(ErtsMonitor *mon, void *vmicp) } typedef struct { - Process *c_p; - ErtsProcLocks c_p_locks; ErtsMonitorSuspend **smi; Uint smi_i; Uint smi_max; - int sz; + Uint sz; } ErtsSuspendMonitorInfoCollection; -#define ERTS_INIT_SUSPEND_MONITOR_INFOS(SMIC, CP, CPL) do { \ - (SMIC).c_p = (CP); \ - (SMIC).c_p_locks = (CPL); \ +#define ERTS_INIT_SUSPEND_MONITOR_INFOS(SMIC) do { \ (SMIC).smi = NULL; \ (SMIC).smi_i = (SMIC).smi_max = 0; \ (SMIC).sz = 0; \ @@ -659,34 +656,26 @@ do { \ static void collect_one_suspend_monitor(ErtsMonitor *mon, void *vsmicp) { - ErtsMonitorSuspend *smon = erts_monitor_suspend(mon); - ErtsSuspendMonitorInfoCollection *smicp = vsmicp; - Process *suspendee = erts_pid2proc(smicp->c_p, - smicp->c_p_locks, - mon->other.item, - 0); - if (suspendee) { /* suspendee is alive */ - Sint a, p; - if (smon->active) { - smon->active += smon->pending; - smon->pending = 0; - } + if (mon->type == ERTS_MON_TYPE_SUSPEND) { + Sint count; + erts_aint_t mstate; + ErtsMonitorSuspend *msp; + ErtsSuspendMonitorInfoCollection *smicp; - ASSERT((smon->active && !smon->pending) - || (smon->pending && !smon->active)); + msp = (ErtsMonitorSuspend *) erts_monitor_to_data(mon); + smicp = vsmicp; ERTS_EXTEND_SUSPEND_MONITOR_INFOS(smicp); - smicp->smi[smicp->smi_i] = smon; + smicp->smi[smicp->smi_i] = msp; smicp->sz += 2 /* cons */ + 4 /* 3-tuple */; - a = (Sint) smon->active; /* quiet compiler warnings */ - p = (Sint) smon->pending; /* on 64-bit machines */ + mstate = erts_atomic_read_nob(&msp->state); - if (!IS_SSMALL(a)) - smicp->sz += BIG_UINT_HEAP_SIZE; - if (!IS_SSMALL(p)) + count = (Sint) (mstate & ERTS_MSUSPEND_STATE_COUNTER_MASK); + if (!IS_SSMALL(count)) smicp->sz += BIG_UINT_HEAP_SIZE; + smicp->smi_i++; } } @@ -756,7 +745,7 @@ typedef struct { static ErtsProcessInfoArgs pi_args[] = { {am_registered_name, 0, 0, ERTS_PROC_LOCK_MAIN}, - {am_current_function, 4, 0, ERTS_PROC_LOCK_MAIN}, + {am_current_function, 4, ERTS_PI_FLAG_FORCE_SIG_SEND, ERTS_PROC_LOCK_MAIN}, {am_initial_call, 4, 0, ERTS_PROC_LOCK_MAIN}, {am_status, 0, 0, 0}, {am_messages, 0, ERTS_PI_FLAG_WANT_MSGS|ERTS_PI_FLAG_NEED_MSGQ_LEN|ERTS_PI_FLAG_FORCE_SIG_SEND, ERTS_PROC_LOCK_MAIN}, @@ -778,15 +767,15 @@ static ErtsProcessInfoArgs pi_args[] = { {am_binary, 0, ERTS_PI_FLAG_FORCE_SIG_SEND, ERTS_PROC_LOCK_MAIN}, {am_sequential_trace_token, 0, 0, ERTS_PROC_LOCK_MAIN}, {am_catchlevel, 0, 0, ERTS_PROC_LOCK_MAIN}, - {am_backtrace, 0, 0, ERTS_PROC_LOCK_MAIN}, + {am_backtrace, 0, ERTS_PI_FLAG_FORCE_SIG_SEND, ERTS_PROC_LOCK_MAIN}, {am_last_calls, 0, 0, ERTS_PROC_LOCK_MAIN}, {am_total_heap_size, 0, ERTS_PI_FLAG_NEED_MSGQ_LEN|ERTS_PI_FLAG_FORCE_SIG_SEND, ERTS_PROC_LOCK_MAIN}, {am_suspending, 0, ERTS_PI_FLAG_FORCE_SIG_SEND, 0}, {am_min_heap_size, 0, 0, ERTS_PROC_LOCK_MAIN}, {am_min_bin_vheap_size, 0, 0, ERTS_PROC_LOCK_MAIN}, {am_max_heap_size, 0, 0, ERTS_PROC_LOCK_MAIN}, - {am_current_location, 0, 0, ERTS_PROC_LOCK_MAIN}, - {am_current_stacktrace, 0, 0, ERTS_PROC_LOCK_MAIN}, + {am_current_location, 0, ERTS_PI_FLAG_FORCE_SIG_SEND, ERTS_PROC_LOCK_MAIN}, + {am_current_stacktrace, 0, ERTS_PI_FLAG_FORCE_SIG_SEND, ERTS_PROC_LOCK_MAIN}, {am_message_queue_data, 0, 0, ERTS_PROC_LOCK_MAIN}, {am_garbage_collection_info, ERTS_PROCESS_GC_INFO_MAX_SIZE, 0, ERTS_PROC_LOCK_MAIN}, {am_magic_ref, 0, ERTS_PI_FLAG_FORCE_SIG_SEND, ERTS_PROC_LOCK_MAIN}, @@ -1074,8 +1063,10 @@ process_info_bif(Process *c_p, Eterm pid, Eterm opt, int always_wrap, int pi2) if (c_p->common.id == pid) { int local_only = c_p->flags & F_LOCAL_SIGS_ONLY; - int sreds = ERTS_BIF_REDS_LEFT(c_p); - int sres; + int sres, sreds, reds_left; + + reds_left = ERTS_BIF_REDS_LEFT(c_p); + sreds = reds_left; if (!local_only) { erts_proc_lock(c_p, ERTS_PROC_LOCK_MSGQ); @@ -1084,15 +1075,19 @@ process_info_bif(Process *c_p, Eterm pid, Eterm opt, int always_wrap, int pi2) } sres = erts_proc_sig_handle_incoming(c_p, &state, &sreds, sreds, !0); + + BUMP_REDS(c_p, (int) sreds); + reds_left -= sreds; + if (state & ERTS_PSFLG_EXITING) { c_p->flags &= ~F_LOCAL_SIGS_ONLY; goto exited; } - if (!sres) { + if (!sres | (reds_left <= 0)) { /* - * More signals to handle; need to yield and continue. - * Prevent fetching of more signals by setting - * local-sigs-only flag. + * More signals to handle or out of reds; need + * to yield and continue. Prevent fetching of + * more signals by setting local-sigs-only flag. */ c_p->flags |= F_LOCAL_SIGS_ONLY; goto yield; @@ -1165,6 +1160,7 @@ process_info_bif(Process *c_p, Eterm pid, Eterm opt, int always_wrap, int pi2) else { if (flags & ERTS_PI_FLAG_FORCE_SIG_SEND) goto send_signal; + state = ERTS_PSFLG_RUNNING; /* fail state... */ rp = erts_try_lock_sig_free_proc(pid, locks, &state); if (!rp) goto undefined; @@ -1626,56 +1622,56 @@ process_info_aux(Process *c_p, case ERTS_PI_IX_SUSPENDING: { ErtsSuspendMonitorInfoCollection smic; int i; - Eterm item; - erts_proc_lock(rp, ERTS_PROC_LOCK_STATUS); + ERTS_INIT_SUSPEND_MONITOR_INFOS(smic); - ERTS_INIT_SUSPEND_MONITOR_INFOS(smic, - c_p, - (c_p == rp - ? ERTS_PROC_LOCK_MAIN - : 0) | ERTS_PROC_LOCK_STATUS); - - erts_monitor_tree_foreach(rp->suspend_monitors, - &collect_one_suspend_monitor, - &smic); + erts_monitor_tree_foreach(ERTS_P_MONITORS(rp), + collect_one_suspend_monitor, + (void *) &smic); reserve_size += smic.sz; res = NIL; for (i = 0; i < smic.smi_i; i++) { - Sint a = (Sint) smic.smi[i]->active; /* quiet compiler warnings */ - Sint p = (Sint) smic.smi[i]->pending; /* on 64-bit machines... */ - Eterm active; - Eterm pending; + ErtsMonitorSuspend *msp; + erts_aint_t mstate; + Sint ci; + Eterm ct, active, pending, item; Uint sz = 4 + 2; - if (!IS_SSMALL(a)) - sz += BIG_UINT_HEAP_SIZE; - if (!IS_SSMALL(p)) - sz += BIG_UINT_HEAP_SIZE; + + msp = smic.smi[i]; + mstate = erts_atomic_read_nob(&msp->state); + + ci = (Sint) (mstate & ERTS_MSUSPEND_STATE_COUNTER_MASK); + if (!IS_SSMALL(ci)) + sz += BIG_UINT_HEAP_SIZE; ERTS_PI_UNRESERVE(reserve_size, sz); hp = erts_produce_heap(hfact, sz, reserve_size); - if (IS_SSMALL(a)) - active = make_small(a); - else { - active = small_to_big(a, hp); - hp += BIG_UINT_HEAP_SIZE; - } - if (IS_SSMALL(p)) - pending = make_small(p); - else { - pending = small_to_big(p, hp); - hp += BIG_UINT_HEAP_SIZE; - } - item = TUPLE3(hp, smic.smi[i]->mon.other.item, active, pending); + if (IS_SSMALL(ci)) + ct = make_small(ci); + else { + ct = small_to_big(ci, hp); + hp += BIG_UINT_HEAP_SIZE; + } + + if (mstate & ERTS_MSUSPEND_STATE_FLG_ACTIVE) { + active = ct; + pending = make_small(0); + } + else { + active = make_small(0); + pending = ct; + } + + ASSERT(is_internal_pid(msp->md.origin.other.item)); + + item = TUPLE3(hp, msp->md.origin.other.item, active, pending); hp += 4; res = CONS(hp, item, res); } - erts_proc_unlock(rp, ERTS_PROC_LOCK_STATUS); - *reds += (Uint) smic.smi_i / 4; ERTS_DESTROY_SUSPEND_MONITOR_INFOS(smic); @@ -2151,45 +2147,6 @@ info_1_tuple(Process* BIF_P, /* Pointer to current process. */ return make_small(sizeof(UWord)); } goto badarg; - } else if (sel == am_allocated) { - if (arity == 2) { - Eterm res = THE_NON_VALUE; - char *buf; - Sint len = is_string(*tp); - if (len <= 0) - return res; - buf = (char *) erts_alloc(ERTS_ALC_T_TMP, len+1); - if (intlist_to_buf(*tp, buf, len) != len) - erts_exit(ERTS_ERROR_EXIT, "%s:%d: Internal error\n", __FILE__, __LINE__); - buf[len] = '\0'; - res = erts_instr_dump_memory_map(buf) ? am_true : am_false; - erts_free(ERTS_ALC_T_TMP, (void *) buf); - if (is_non_value(res)) - goto badarg; - return res; - } - else if (arity == 3 && tp[0] == am_status) { - if (is_atom(tp[1])) - return erts_instr_get_stat(BIF_P, tp[1], 1); - else { - Eterm res = THE_NON_VALUE; - char *buf; - Sint len = is_string(tp[1]); - if (len <= 0) - return res; - buf = (char *) erts_alloc(ERTS_ALC_T_TMP, len+1); - if (intlist_to_buf(tp[1], buf, len) != len) - erts_exit(ERTS_ERROR_EXIT, "%s:%d: Internal error\n", __FILE__, __LINE__); - buf[len] = '\0'; - res = erts_instr_dump_stat(buf, 1) ? am_true : am_false; - erts_free(ERTS_ALC_T_TMP, (void *) buf); - if (is_non_value(res)) - goto badarg; - return res; - } - } - else - goto badarg; } else if (sel == am_allocator) { switch (arity) { case 2: @@ -2557,8 +2514,6 @@ BIF_RETTYPE system_info_1(BIF_ALIST_1) } else if (BIF_ARG_1 == am_allocated_areas) { res = erts_allocated_areas(NULL, NULL, BIF_P); BIF_RET(res); - } else if (BIF_ARG_1 == am_allocated) { - BIF_RET(erts_instr_get_memory_map(BIF_P)); } else if (BIF_ARG_1 == am_hipe_architecture) { #if defined(HIPE) BIF_RET(hipe_arch_name); @@ -2699,9 +2654,6 @@ BIF_RETTYPE system_info_1(BIF_ALIST_1) sizeof(ERLANG_ARCHITECTURE)-1, NIL)); } - else if (BIF_ARG_1 == am_memory_types) { - return erts_instr_get_type_info(BIF_P); - } else if (BIF_ARG_1 == am_os_type) { BIF_RET(os_type_tuple); } @@ -3680,26 +3632,46 @@ BIF_RETTYPE is_process_alive_1(BIF_ALIST_1) BIF_ERROR(BIF_P, BADARG); } -BIF_RETTYPE process_display_2(BIF_ALIST_2) +static Eterm +process_display(Process *c_p, void *arg, int *redsp, ErlHeapFragment **bpp) { - Process *rp; + if (redsp) + *redsp = 1; - if (BIF_ARG_2 != am_backtrace) - BIF_ERROR(BIF_P, BADARG); + if (ERTS_PROC_IS_EXITING(c_p)) + return am_badarg; - rp = erts_pid2proc_nropt(BIF_P, ERTS_PROC_LOCK_MAIN, - BIF_ARG_1, ERTS_PROC_LOCKS_ALL); - if(!rp) { - BIF_ERROR(BIF_P, BADARG); - } - if (rp == ERTS_PROC_LOCK_BUSY) - ERTS_BIF_YIELD2(bif_export[BIF_process_display_2], BIF_P, - BIF_ARG_1, BIF_ARG_2); - erts_stack_dump(ERTS_PRINT_STDERR, NULL, rp); - erts_proc_unlock(rp, (BIF_P == rp - ? ERTS_PROC_LOCKS_ALL_MINOR - : ERTS_PROC_LOCKS_ALL)); - BIF_RET(am_true); + erts_proc_lock(c_p, ERTS_PROC_LOCKS_ALL_MINOR); + erts_stack_dump(ERTS_PRINT_STDERR, NULL, c_p); + erts_proc_unlock(c_p, ERTS_PROC_LOCKS_ALL_MINOR); + + return am_true; +} + + +BIF_RETTYPE erts_internal_process_display_2(BIF_ALIST_2) +{ + Eterm res; + + if (BIF_ARG_2 != am_backtrace) + BIF_RET(am_badarg); + + if (BIF_P->common.id == BIF_ARG_1) { + res = process_display(BIF_P, NULL, NULL, NULL); + BIF_RET(res); + } + + if (is_not_internal_pid(BIF_ARG_1)) + BIF_RET(am_badarg); + + res = erts_proc_sig_send_rpc_request(BIF_P, BIF_ARG_1, + !0, + process_display, + NULL); + if (is_non_value(res)) + BIF_RET(am_badarg); + + BIF_RET(res); } /* this is a general call which return some possibly useful information */ @@ -4640,27 +4612,6 @@ BIF_RETTYPE erts_debug_set_internal_state_2(BIF_ALIST_2) BIF_RET(am_true); } } - else if (ERTS_IS_ATOM_STR("not_running_optimization", BIF_ARG_1)) { - int old_use_opt, use_opt; - switch (BIF_ARG_2) { - case am_true: - use_opt = 1; - break; - case am_false: - use_opt = 0; - break; - default: - BIF_ERROR(BIF_P, BADARG); - } - - erts_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN); - erts_thr_progress_block(); - old_use_opt = !erts_disable_proc_not_running_opt; - erts_disable_proc_not_running_opt = !use_opt; - erts_thr_progress_unblock(); - erts_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN); - BIF_RET(old_use_opt ? am_true : am_false); - } else if (ERTS_IS_ATOM_STR("wait", BIF_ARG_1)) { if (ERTS_IS_ATOM_STR("deallocations", BIF_ARG_2)) { int flag = ERTS_DEBUG_WAIT_COMPLETED_DEALLOCATIONS; @@ -4744,6 +4695,55 @@ BIF_RETTYPE erts_debug_set_internal_state_2(BIF_ALIST_2) BIF_ERROR(BIF_P, BADARG); } +static BIF_RETTYPE +gather_histograms_helper(Process * c_p, Eterm arg_tuple, + int gather(Process *, int, int, int, UWord, Eterm)) +{ + SWord hist_start, hist_width, sched_id; + int msg_count, alloc_num; + Eterm *args; + + /* This is an internal BIF, so the error checking is mostly left to erlang + * code. */ + + ASSERT(is_tuple_arity(arg_tuple, 5)); + args = tuple_val(arg_tuple); + + for (alloc_num = ERTS_ALC_A_MIN; alloc_num <= ERTS_ALC_A_MAX; alloc_num++) { + if(erts_is_atom_str(ERTS_ALC_A2AD(alloc_num), args[1], 0)) { + break; + } + } + + if (alloc_num > ERTS_ALC_A_MAX) { + BIF_ERROR(c_p, BADARG); + } + + sched_id = signed_val(args[2]); + hist_width = signed_val(args[3]); + hist_start = signed_val(args[4]); + + if (sched_id < 0 || sched_id > erts_no_schedulers) { + BIF_ERROR(c_p, BADARG); + } + + msg_count = gather(c_p, alloc_num, sched_id, hist_width, hist_start, args[5]); + + BIF_RET(make_small(msg_count)); +} + +BIF_RETTYPE erts_internal_gather_alloc_histograms_1(BIF_ALIST_1) +{ + return gather_histograms_helper(BIF_P, BIF_ARG_1, + erts_alcu_gather_alloc_histograms); +} + +BIF_RETTYPE erts_internal_gather_carrier_info_1(BIF_ALIST_1) +{ + return gather_histograms_helper(BIF_P, BIF_ARG_1, + erts_alcu_gather_carrier_info); +} + #ifdef ERTS_ENABLE_LOCK_COUNT typedef struct { diff --git a/erts/emulator/beam/erl_bif_trace.c b/erts/emulator/beam/erl_bif_trace.c index 1953f79d79..f9d351e69e 100644 --- a/erts/emulator/beam/erl_bif_trace.c +++ b/erts/emulator/beam/erl_bif_trace.c @@ -809,10 +809,129 @@ Eterm trace_info_2(BIF_ALIST_2) BIF_ERROR(p, BADARG); } erts_release_code_write_permission(); + + if (is_internal_ref(res)) + BIF_TRAP1(erts_await_result, BIF_P, res); + BIF_RET(res); } static Eterm +build_trace_flags_term(Eterm **hpp, Uint *szp, Uint trace_flags) +{ + +#define ERTS_TFLAG__(F, FN) \ + if (trace_flags & F) { \ + if (szp) \ + sz += 2; \ + if (hp) { \ + res = CONS(hp, FN, res); \ + hp += 2; \ + } \ + } + + Eterm res; + Uint sz = 0; + Eterm *hp; + + if (hpp) { + hp = *hpp; + res = NIL; + } + else { + hp = NULL; + res = THE_NON_VALUE; + } + + ERTS_TFLAG__(F_NOW_TS, am_timestamp); + ERTS_TFLAG__(F_STRICT_MON_TS, am_strict_monotonic_timestamp); + ERTS_TFLAG__(F_MON_TS, am_monotonic_timestamp); + ERTS_TFLAG__(F_TRACE_SEND, am_send); + ERTS_TFLAG__(F_TRACE_RECEIVE, am_receive); + ERTS_TFLAG__(F_TRACE_SOS, am_set_on_spawn); + ERTS_TFLAG__(F_TRACE_CALLS, am_call); + ERTS_TFLAG__(F_TRACE_PROCS, am_procs); + ERTS_TFLAG__(F_TRACE_SOS1, am_set_on_first_spawn); + ERTS_TFLAG__(F_TRACE_SOL, am_set_on_link); + ERTS_TFLAG__(F_TRACE_SOL1, am_set_on_first_link); + ERTS_TFLAG__(F_TRACE_SCHED, am_running); + ERTS_TFLAG__(F_TRACE_SCHED_EXIT, am_exiting); + ERTS_TFLAG__(F_TRACE_GC, am_garbage_collection); + ERTS_TFLAG__(F_TRACE_ARITY_ONLY, am_arity); + ERTS_TFLAG__(F_TRACE_RETURN_TO, am_return_to); + ERTS_TFLAG__(F_TRACE_SILENT, am_silent); + ERTS_TFLAG__(F_TRACE_SCHED_NO, am_scheduler_id); + ERTS_TFLAG__(F_TRACE_PORTS, am_ports); + ERTS_TFLAG__(F_TRACE_SCHED_PORTS, am_running_ports); + ERTS_TFLAG__(F_TRACE_SCHED_PROCS, am_running_procs); + + if (szp) + *szp += sz; + + if (hpp) + *hpp = hp; + + return res; + +#undef ERTS_TFLAG__ +} + +static Eterm +trace_info_tracee(Process *c_p, void *arg, int *redsp, ErlHeapFragment **bpp) +{ + ErlHeapFragment *bp; + Eterm *hp, res, key; + Uint sz; + + *redsp = 1; + + if (ERTS_PROC_IS_EXITING(c_p)) + return am_undefined; + + key = (Eterm) arg; + sz = 3; + + if (!ERTS_TRACER_IS_NIL(ERTS_TRACER(c_p))) + erts_is_tracer_proc_enabled(c_p, ERTS_PROC_LOCK_MAIN, + &c_p->common); + + switch (key) { + case am_tracer: + + erts_build_tracer_to_term(NULL, NULL, &sz, ERTS_TRACER(c_p)); + bp = new_message_buffer(sz); + hp = bp->mem; + res = erts_build_tracer_to_term(&hp, &bp->off_heap, + NULL, ERTS_TRACER(c_p)); + if (res == am_false) + res = NIL; + break; + + case am_flags: + + build_trace_flags_term(NULL, &sz, ERTS_TRACE_FLAGS(c_p)); + bp = new_message_buffer(sz); + hp = bp->mem; + res = build_trace_flags_term(&hp, NULL, ERTS_TRACE_FLAGS(c_p)); + break; + + default: + + ERTS_INTERNAL_ERROR("Key not supported"); + res = NIL; + bp = NULL; + hp = NULL; + break; + } + + *redsp += 2; + + res = TUPLE2(hp, key, res); + *bpp = bp; + return res; +} + +static Eterm trace_info_pid(Process* p, Eterm pid_spec, Eterm key) { Eterm tracer; @@ -846,24 +965,19 @@ trace_info_pid(Process* p, Eterm pid_spec, Eterm key) erts_port_release(tracee); } else if (is_internal_pid(pid_spec)) { - Process *tracee = erts_pid2proc_not_running(p, ERTS_PROC_LOCK_MAIN, - pid_spec, ERTS_PROC_LOCK_MAIN); - - if (tracee == ERTS_PROC_LOCK_BUSY) - ERTS_BIF_YIELD2(bif_export[BIF_trace_info_2], p, pid_spec, key); + Eterm ref; - if (!tracee) - return am_undefined; + if (key != am_flags && key != am_tracer) + goto error; - if (!ERTS_TRACER_IS_NIL(ERTS_TRACER(tracee))) - erts_is_tracer_proc_enabled(tracee, ERTS_PROC_LOCK_MAIN, - &tracee->common); + ref = erts_proc_sig_send_rpc_request(p, pid_spec, !0, + trace_info_tracee, + (void *) key); - tracer = erts_tracer_to_term(p, ERTS_TRACER(tracee)); - trace_flags = ERTS_TRACE_FLAGS(tracee); + if (is_non_value(ref)) + return am_undefined; - if (tracee != p) - erts_proc_unlock(tracee, ERTS_PROC_LOCK_MAIN); + return ref; } else if (is_external_pid(pid_spec) && external_pid_dist_entry(pid_spec) == erts_this_dist_entry) { return am_undefined; @@ -873,48 +987,16 @@ trace_info_pid(Process* p, Eterm pid_spec, Eterm key) } if (key == am_flags) { - int num_flags = 21; /* MAXIMUM number of flags. */ - Uint needed = 3+2*num_flags; - Eterm flag_list = NIL; - Eterm* limit; + Eterm flag_list; + Uint sz = 3; + Eterm *hp; -#define FLAG0(flag_mask,flag) \ - if (trace_flags & (flag_mask)) { flag_list = CONS(hp, flag, flag_list); hp += 2; } else {} + build_trace_flags_term(NULL, &sz, trace_flags); + + hp = HAlloc(p, sz); + + flag_list = build_trace_flags_term(&hp, NULL, trace_flags); -#if defined(DEBUG) - /* - * Check num_flags if this assertion fires. - */ -# define FLAG ASSERT(num_flags-- > 0); FLAG0 -#else -# define FLAG FLAG0 -#endif - hp = HAlloc(p, needed); - limit = hp+needed; - FLAG(F_NOW_TS, am_timestamp); - FLAG(F_STRICT_MON_TS, am_strict_monotonic_timestamp); - FLAG(F_MON_TS, am_monotonic_timestamp); - FLAG(F_TRACE_SEND, am_send); - FLAG(F_TRACE_RECEIVE, am_receive); - FLAG(F_TRACE_SOS, am_set_on_spawn); - FLAG(F_TRACE_CALLS, am_call); - FLAG(F_TRACE_PROCS, am_procs); - FLAG(F_TRACE_SOS1, am_set_on_first_spawn); - FLAG(F_TRACE_SOL, am_set_on_link); - FLAG(F_TRACE_SOL1, am_set_on_first_link); - FLAG(F_TRACE_SCHED, am_running); - FLAG(F_TRACE_SCHED_EXIT, am_exiting); - FLAG(F_TRACE_GC, am_garbage_collection); - FLAG(F_TRACE_ARITY_ONLY, am_arity); - FLAG(F_TRACE_RETURN_TO, am_return_to); - FLAG(F_TRACE_SILENT, am_silent); - FLAG(F_TRACE_SCHED_NO, am_scheduler_id); - FLAG(F_TRACE_PORTS, am_ports); - FLAG(F_TRACE_SCHED_PORTS, am_running_ports); - FLAG(F_TRACE_SCHED_PROCS, am_running_procs); -#undef FLAG0 -#undef FLAG - HRelease(p,limit,hp+3); return TUPLE2(hp, key, flag_list); } else if (key == am_tracer) { if (tracer == am_false) diff --git a/erts/emulator/beam/erl_db.c b/erts/emulator/beam/erl_db.c index 468e22ed59..3a29f8cf56 100644 --- a/erts/emulator/beam/erl_db.c +++ b/erts/emulator/beam/erl_db.c @@ -3643,7 +3643,7 @@ send_ets_transfer_message(Process *c_p, Process *proc, hd_copy = copy_struct(heir_data, hd_sz, &hp, ohp); sender = c_p->common.id; msg = TUPLE4(hp, am_ETS_TRANSFER, tid, sender, hd_copy); - erts_queue_message(proc, *locks, mp, msg, sender); + erts_queue_proc_message(c_p, proc, *locks, mp, msg); } diff --git a/erts/emulator/beam/erl_db_util.c b/erts/emulator/beam/erl_db_util.c index fc23b793c8..37d261d0df 100644 --- a/erts/emulator/beam/erl_db_util.c +++ b/erts/emulator/beam/erl_db_util.c @@ -638,6 +638,18 @@ static DMCGuardBif guard_tab[] = DBIF_ALL }, { + am_map_get, + &map_get_2, + 2, + DBIF_ALL + }, + { + am_is_map_key, + &is_map_key_2, + 2, + DBIF_ALL + }, + { am_bit_size, &bit_size_1, 1, @@ -5737,5 +5749,3 @@ void db_match_dis(Binary *bp) } #endif /* DMC_DEBUG */ - - diff --git a/erts/emulator/beam/erl_gc.c b/erts/emulator/beam/erl_gc.c index b498fd9cf9..a65dbbf42b 100644 --- a/erts/emulator/beam/erl_gc.c +++ b/erts/emulator/beam/erl_gc.c @@ -61,7 +61,7 @@ # define ERTS_GC_ASSERT(B) ((void) 1) #endif -#if defined(DEBUG) && 1 +#if defined(DEBUG) && 0 # define HARDDEBUG 1 #endif @@ -223,23 +223,6 @@ ERTS_SCHED_PREF_QUICK_ALLOC_IMPL(gcireq, 5, ERTS_ALC_T_GC_INFO_REQ) -static ERTS_INLINE void -ensure_sigq_roots_available(Process *p) -{ - ERTS_LC_ASSERT(ERTS_PROC_LOCK_MAIN == erts_proc_lc_my_proc_locks(p)); - switch (p->flags & (F_OFF_HEAP_MSGQ|F_OFF_HEAP_MSGQ_CHNG)) { - case F_OFF_HEAP_MSGQ_CHNG: - case 0: - erts_proc_lock(p, ERTS_PROC_LOCK_MSGQ); - erts_proc_sig_fetch(p); - erts_proc_unlock(p, ERTS_PROC_LOCK_MSGQ); - break; - default: - break; - } -} - - /* * Initialize GC global data. */ @@ -430,28 +413,25 @@ erts_gc_after_bif_call_lhf(Process* p, ErlHeapFragment *live_hf_end, { int cost; - if (p->flags & F_HIBERNATE_SCHED) { + if (p->flags & (F_HIBERNATE_SCHED|F_HIPE_RECV_LOCKED)) { /* * We just hibernated. We do *not* want to mess * up the hibernation by an ordinary GC... + * + * OR + * + * We left a receive in HiPE with message + * queue lock locked, and we do not want to + * do a GC with message queue locked... */ return result; } -#ifdef HIPE - if (p->hipe_smp.have_receive_locks) { - /* Do not want to GC with message queue locked... */ - return result; - } -#endif - if (!p->mbuf) { /* Must have GC:d in BIF call... invalidate live_hf_end */ live_hf_end = ERTS_INVALID_HFRAG_PTR; } - ensure_sigq_roots_available(p); - if (is_non_value(result)) { if (p->freason == TRAP) { #ifdef HIPE @@ -895,11 +875,8 @@ do_major_collection: int erts_garbage_collect_nobump(Process* p, int need, Eterm* objv, int nobj, int fcalls) { - int reds; - int reds_left; - ensure_sigq_roots_available(p); - reds = garbage_collect(p, ERTS_INVALID_HFRAG_PTR, need, objv, nobj, fcalls, 0); - reds_left = ERTS_REDS_LEFT(p, fcalls); + int reds = garbage_collect(p, ERTS_INVALID_HFRAG_PTR, need, objv, nobj, fcalls, 0); + int reds_left = ERTS_REDS_LEFT(p, fcalls); if (reds > reds_left) reds = reds_left; ASSERT(CONTEXT_REDS - (reds_left - reds) >= erts_proc_sched_data(p)->virtual_reds); @@ -909,9 +886,7 @@ erts_garbage_collect_nobump(Process* p, int need, Eterm* objv, int nobj, int fca void erts_garbage_collect(Process* p, int need, Eterm* objv, int nobj) { - int reds; - ensure_sigq_roots_available(p); - reds = garbage_collect(p, ERTS_INVALID_HFRAG_PTR, need, objv, nobj, p->fcalls, 0); + int reds = garbage_collect(p, ERTS_INVALID_HFRAG_PTR, need, objv, nobj, p->fcalls, 0); BUMP_REDS(p, reds); ASSERT(CONTEXT_REDS - ERTS_BIF_REDS_LEFT(p) >= erts_proc_sched_data(p)->virtual_reds); @@ -1137,8 +1112,6 @@ erts_garbage_collect_literals(Process* p, Eterm* literals, * First an ordinary major collection... */ - ensure_sigq_roots_available(p); - p->flags |= F_NEED_FULLSWEEP; if (ERTS_SCHEDULER_IS_DIRTY(erts_proc_sched_data(p))) diff --git a/erts/emulator/beam/erl_init.c b/erts/emulator/beam/erl_init.c index 179822cc0b..57c6c10c7f 100644 --- a/erts/emulator/beam/erl_init.c +++ b/erts/emulator/beam/erl_init.c @@ -37,7 +37,7 @@ #include "erl_mseg.h" #include "erl_threads.h" #include "erl_hl_timer.h" -#include "erl_instrument.h" +#include "erl_mtrace.h" #include "erl_printf_term.h" #include "erl_misc_utils.h" #include "packet_parser.h" diff --git a/erts/emulator/beam/erl_instrument.c b/erts/emulator/beam/erl_instrument.c deleted file mode 100644 index 2f70e7996e..0000000000 --- a/erts/emulator/beam/erl_instrument.c +++ /dev/null @@ -1,1257 +0,0 @@ -/* - * %CopyrightBegin% - * - * Copyright Ericsson AB 2003-2016. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - * %CopyrightEnd% - */ - -#ifdef HAVE_CONFIG_H -# include "config.h" -#endif - -#include "global.h" -#include "big.h" -#include "erl_instrument.h" -#include "erl_threads.h" - -typedef union { long l; double d; } Align_t; - -typedef struct { - Uint size; -#ifdef VALGRIND - void* valgrind_leak_suppressor; -#endif - Align_t mem[1]; -} StatBlock_t; - -#define STAT_BLOCK_HEADER_SIZE (sizeof(StatBlock_t) - sizeof(Align_t)) - -typedef struct MapStatBlock_t_ MapStatBlock_t; -struct MapStatBlock_t_ { - Uint size; - ErtsAlcType_t type_no; - Eterm pid; - MapStatBlock_t *prev; - MapStatBlock_t *next; - Align_t mem[1]; -}; - -#define MAP_STAT_BLOCK_HEADER_SIZE (sizeof(MapStatBlock_t) - sizeof(Align_t)) - -typedef struct { - Uint size; - Uint max_size; - Uint max_size_ever; - - Uint blocks; - Uint max_blocks; - Uint max_blocks_ever; -} Stat_t; - -static erts_mtx_t instr_mutex; -static erts_mtx_t instr_x_mutex; - -int erts_instr_memory_map; -int erts_instr_stat; - -static ErtsAllocatorFunctions_t real_allctrs[ERTS_ALC_A_MAX+1]; - -struct stats_ { - Stat_t tot; - Stat_t a[ERTS_ALC_A_MAX+1]; - Stat_t *ap[ERTS_ALC_A_MAX+1]; - Stat_t c[ERTS_ALC_C_MAX+1]; - Stat_t n[ERTS_ALC_N_MAX+1]; -}; - -static struct stats_ *stats; - -static MapStatBlock_t *mem_anchor; - -static Eterm *am_tot; -static Eterm *am_n; -static Eterm *am_a; -static Eterm *am_c; - -static int atoms_initialized; - -static struct { - Eterm total; - Eterm allocators; - Eterm classes; - Eterm types; - Eterm sizes; - Eterm blocks; - Eterm instr_hdr; -#ifdef DEBUG - Eterm end_of_atoms; -#endif -} am; - -static void ERTS_INLINE atom_init(Eterm *atom, const char *name) -{ - *atom = am_atom_put((char *) name, sys_strlen(name)); -} -#define AM_INIT(AM) atom_init(&am.AM, #AM) - -static void -init_atoms(void) -{ -#ifdef DEBUG - Eterm *atom; - for (atom = (Eterm *) &am; atom <= &am.end_of_atoms; atom++) { - *atom = THE_NON_VALUE; - } -#endif - - AM_INIT(total); - AM_INIT(allocators); - AM_INIT(classes); - AM_INIT(types); - AM_INIT(sizes); - AM_INIT(blocks); - AM_INIT(instr_hdr); - -#ifdef DEBUG - for (atom = (Eterm *) &am; atom < &am.end_of_atoms; atom++) { - ASSERT(*atom != THE_NON_VALUE); - } -#endif - - atoms_initialized = 1; -} - -#undef AM_INIT - -static void -init_am_tot(void) -{ - am_tot = (Eterm *) erts_alloc(ERTS_ALC_T_INSTR_INFO, - sizeof(Eterm)); - atom_init(am_tot, "total"); -} - - -static void -init_am_n(void) -{ - int i; - am_n = (Eterm *) erts_alloc(ERTS_ALC_T_INSTR_INFO, - (ERTS_ALC_N_MAX+1)*sizeof(Eterm)); - - for (i = ERTS_ALC_N_MIN; i <= ERTS_ALC_N_MAX; i++) { - atom_init(&am_n[i], ERTS_ALC_N2TD(i)); - } - -} - -static void -init_am_c(void) -{ - int i; - am_c = (Eterm *) erts_alloc(ERTS_ALC_T_INSTR_INFO, - (ERTS_ALC_C_MAX+1)*sizeof(Eterm)); - - for (i = ERTS_ALC_C_MIN; i <= ERTS_ALC_C_MAX; i++) { - atom_init(&am_c[i], ERTS_ALC_C2CD(i)); - } - -} - -static void -init_am_a(void) -{ - int i; - am_a = (Eterm *) erts_alloc(ERTS_ALC_T_INSTR_INFO, - (ERTS_ALC_A_MAX+1)*sizeof(Eterm)); - - for (i = ERTS_ALC_A_MIN; i <= ERTS_ALC_A_MAX; i++) { - atom_init(&am_a[i], ERTS_ALC_A2AD(i)); - } - -} - -static ERTS_INLINE void -stat_upd_alloc(ErtsAlcType_t n, Uint size) -{ - ErtsAlcType_t t = ERTS_ALC_N2T(n); - ErtsAlcType_t a = ERTS_ALC_T2A(t); - ErtsAlcType_t c = ERTS_ALC_T2C(t); - - stats->ap[a]->size += size; - if (stats->ap[a]->max_size < stats->ap[a]->size) - stats->ap[a]->max_size = stats->ap[a]->size; - - stats->c[c].size += size; - if (stats->c[c].max_size < stats->c[c].size) - stats->c[c].max_size = stats->c[c].size; - - stats->n[n].size += size; - if (stats->n[n].max_size < stats->n[n].size) - stats->n[n].max_size = stats->n[n].size; - - stats->tot.size += size; - if (stats->tot.max_size < stats->tot.size) - stats->tot.max_size = stats->tot.size; - - stats->ap[a]->blocks++; - if (stats->ap[a]->max_blocks < stats->ap[a]->blocks) - stats->ap[a]->max_blocks = stats->ap[a]->blocks; - - stats->c[c].blocks++; - if (stats->c[c].max_blocks < stats->c[c].blocks) - stats->c[c].max_blocks = stats->c[c].blocks; - - stats->n[n].blocks++; - if (stats->n[n].max_blocks < stats->n[n].blocks) - stats->n[n].max_blocks = stats->n[n].blocks; - - stats->tot.blocks++; - if (stats->tot.max_blocks < stats->tot.blocks) - stats->tot.max_blocks = stats->tot.blocks; - -} - - -static ERTS_INLINE void -stat_upd_free(ErtsAlcType_t n, Uint size) -{ - ErtsAlcType_t t = ERTS_ALC_N2T(n); - ErtsAlcType_t a = ERTS_ALC_T2A(t); - ErtsAlcType_t c = ERTS_ALC_T2C(t); - - ASSERT(stats->ap[a]->size >= size); - stats->ap[a]->size -= size; - - ASSERT(stats->c[c].size >= size); - stats->c[c].size -= size; - - ASSERT(stats->n[n].size >= size); - stats->n[n].size -= size; - - ASSERT(stats->tot.size >= size); - stats->tot.size -= size; - - ASSERT(stats->ap[a]->blocks > 0); - stats->ap[a]->blocks--; - - ASSERT(stats->c[c].blocks > 0); - stats->c[c].blocks--; - - ASSERT(stats->n[n].blocks > 0); - stats->n[n].blocks--; - - ASSERT(stats->tot.blocks > 0); - stats->tot.blocks--; - -} - - -static ERTS_INLINE void -stat_upd_realloc(ErtsAlcType_t n, Uint size, Uint old_size) -{ - if (old_size) - stat_upd_free(n, old_size); - stat_upd_alloc(n, size); -} - -/* - * stat instrumentation callback functions - */ - -static void stat_pre_lock(void) -{ - erts_mtx_lock(&instr_mutex); -} - -static void stat_pre_unlock(void) -{ - erts_mtx_unlock(&instr_mutex); -} - -static ErtsAllocatorWrapper_t instr_wrapper; - -static void * -stat_alloc(ErtsAlcType_t n, void *extra, Uint size) -{ - ErtsAllocatorFunctions_t *real_af = (ErtsAllocatorFunctions_t *) extra; - Uint ssize; - void *res; - - if (!erts_is_allctr_wrapper_prelocked()) { - erts_mtx_lock(&instr_mutex); - } - - ssize = size + STAT_BLOCK_HEADER_SIZE; - res = (*real_af->alloc)(n, real_af->extra, ssize); - if (res) { - stat_upd_alloc(n, size); - ((StatBlock_t *) res)->size = size; -#ifdef VALGRIND - /* Suppress "possibly leaks" by storing an actual dummy pointer - to the _start_ of the allocated block.*/ - ((StatBlock_t *) res)->valgrind_leak_suppressor = res; -#endif - res = (void *) ((StatBlock_t *) res)->mem; - } - - if (!erts_is_allctr_wrapper_prelocked()) { - erts_mtx_unlock(&instr_mutex); - } - - return res; -} - -static void * -stat_realloc(ErtsAlcType_t n, void *extra, void *ptr, Uint size) -{ - ErtsAllocatorFunctions_t *real_af = (ErtsAllocatorFunctions_t *) extra; - Uint old_size; - Uint ssize; - void *sptr; - void *res; - - if (!erts_is_allctr_wrapper_prelocked()) { - erts_mtx_lock(&instr_mutex); - } - - if (ptr) { - sptr = (void *) (((char *) ptr) - STAT_BLOCK_HEADER_SIZE); - old_size = ((StatBlock_t *) sptr)->size; - } - else { - sptr = NULL; - old_size = 0; - } - - ssize = size + STAT_BLOCK_HEADER_SIZE; - res = (*real_af->realloc)(n, real_af->extra, sptr, ssize); - if (res) { - stat_upd_realloc(n, size, old_size); - ((StatBlock_t *) res)->size = size; -#ifdef VALGRIND - ((StatBlock_t *) res)->valgrind_leak_suppressor = res; -#endif - res = (void *) ((StatBlock_t *) res)->mem; - } - - if (!erts_is_allctr_wrapper_prelocked()) { - erts_mtx_unlock(&instr_mutex); - } - - return res; -} - -static void -stat_free(ErtsAlcType_t n, void *extra, void *ptr) -{ - ErtsAllocatorFunctions_t *real_af = (ErtsAllocatorFunctions_t *) extra; - void *sptr; - - if (!erts_is_allctr_wrapper_prelocked()) { - erts_mtx_lock(&instr_mutex); - } - - if (ptr) { - sptr = (void *) (((char *) ptr) - STAT_BLOCK_HEADER_SIZE); - stat_upd_free(n, ((StatBlock_t *) sptr)->size); - } - else { - sptr = NULL; - } - - (*real_af->free)(n, real_af->extra, sptr); - - if (!erts_is_allctr_wrapper_prelocked()) { - erts_mtx_unlock(&instr_mutex); - } - -} - -/* - * map stat instrumentation callback functions - */ - -static void map_stat_pre_lock(void) -{ - erts_mtx_lock(&instr_x_mutex); - erts_mtx_lock(&instr_mutex); -} - -static void map_stat_pre_unlock(void) -{ - erts_mtx_unlock(&instr_mutex); - erts_mtx_unlock(&instr_x_mutex); -} - -static void * -map_stat_alloc(ErtsAlcType_t n, void *extra, Uint size) -{ - ErtsAllocatorFunctions_t *real_af = (ErtsAllocatorFunctions_t *) extra; - Uint msize; - void *res; - - if (!erts_is_allctr_wrapper_prelocked()) { - erts_mtx_lock(&instr_mutex); - } - - msize = size + MAP_STAT_BLOCK_HEADER_SIZE; - res = (*real_af->alloc)(n, real_af->extra, msize); - if (res) { - MapStatBlock_t *mb = (MapStatBlock_t *) res; - stat_upd_alloc(n, size); - - mb->size = size; - mb->type_no = n; - mb->pid = erts_get_current_pid(); - - mb->prev = NULL; - mb->next = mem_anchor; - if (mem_anchor) - mem_anchor->prev = mb; - mem_anchor = mb; - - res = (void *) mb->mem; - } - - if (!erts_is_allctr_wrapper_prelocked()) { - erts_mtx_unlock(&instr_mutex); - } - - return res; -} - -static void * -map_stat_realloc(ErtsAlcType_t n, void *extra, void *ptr, Uint size) -{ - ErtsAllocatorFunctions_t *real_af = (ErtsAllocatorFunctions_t *) extra; - Uint old_size; - Uint msize; - void *mptr; - void *res; - - if (!erts_is_allctr_wrapper_prelocked()) { - erts_mtx_lock(&instr_x_mutex); - erts_mtx_lock(&instr_mutex); - } - - if (ptr) { - mptr = (void *) (((char *) ptr) - MAP_STAT_BLOCK_HEADER_SIZE); - old_size = ((MapStatBlock_t *) mptr)->size; - } - else { - mptr = NULL; - old_size = 0; - } - - msize = size + MAP_STAT_BLOCK_HEADER_SIZE; - res = (*real_af->realloc)(n, real_af->extra, mptr, msize); - if (res) { - MapStatBlock_t *mb = (MapStatBlock_t *) res; - - mb->size = size; - mb->type_no = n; - mb->pid = erts_get_current_pid(); - - stat_upd_realloc(n, size, old_size); - - if (mptr != res) { - - if (mptr) { - if (mb->prev) - mb->prev->next = mb; - else { - ASSERT(mem_anchor == (MapStatBlock_t *) mptr); - mem_anchor = mb; - } - if (mb->next) - mb->next->prev = mb; - } - else { - mb->prev = NULL; - mb->next = mem_anchor; - if (mem_anchor) - mem_anchor->prev = mb; - mem_anchor = mb; - } - - } - - res = (void *) mb->mem; - } - if (!erts_is_allctr_wrapper_prelocked()) { - erts_mtx_unlock(&instr_mutex); - erts_mtx_unlock(&instr_x_mutex); - } - - return res; -} - -static void -map_stat_free(ErtsAlcType_t n, void *extra, void *ptr) -{ - ErtsAllocatorFunctions_t *real_af = (ErtsAllocatorFunctions_t *) extra; - void *mptr; - - if (!erts_is_allctr_wrapper_prelocked()) { - erts_mtx_lock(&instr_x_mutex); - erts_mtx_lock(&instr_mutex); - } - - if (ptr) { - MapStatBlock_t *mb; - - mptr = (void *) (((char *) ptr) - MAP_STAT_BLOCK_HEADER_SIZE); - mb = (MapStatBlock_t *) mptr; - - stat_upd_free(n, mb->size); - - if (mb->prev) - mb->prev->next = mb->next; - else - mem_anchor = mb->next; - if (mb->next) - mb->next->prev = mb->prev; - } - else { - mptr = NULL; - } - - (*real_af->free)(n, real_af->extra, mptr); - - if (!erts_is_allctr_wrapper_prelocked()) { - erts_mtx_unlock(&instr_mutex); - erts_mtx_unlock(&instr_x_mutex); - } - -} - -static void dump_memory_map_to_stream(fmtfn_t to, void* to_arg) -{ - ErtsAlcType_t n; - MapStatBlock_t *bp; - int lock = !ERTS_IS_CRASH_DUMPING; - if (lock) { - ASSERT(!erts_is_allctr_wrapper_prelocked()); - erts_mtx_lock(&instr_mutex); - } - - /* Write header */ - - erts_cbprintf(to, to_arg, - "{instr_hdr,\n" - " %lu,\n" - " %lu,\n" - " {", - (unsigned long) ERTS_INSTR_VSN, - (unsigned long) MAP_STAT_BLOCK_HEADER_SIZE); - -#if ERTS_ALC_N_MIN != 1 -#error ERTS_ALC_N_MIN is not 1 -#endif - - for (n = ERTS_ALC_N_MIN; n <= ERTS_ALC_N_MAX; n++) { - ErtsAlcType_t t = ERTS_ALC_N2T(n); - ErtsAlcType_t a = ERTS_ALC_T2A(t); - ErtsAlcType_t c = ERTS_ALC_T2C(t); - const char *astr; - - if (erts_allctrs_info[a].enabled) - astr = ERTS_ALC_A2AD(a); - else - astr = ERTS_ALC_A2AD(ERTS_ALC_A_SYSTEM); - - erts_cbprintf(to, to_arg, - "%s{%s,%s,%s}%s", - (n == ERTS_ALC_N_MIN) ? "" : " ", - ERTS_ALC_N2TD(n), - astr, - ERTS_ALC_C2CD(c), - (n == ERTS_ALC_N_MAX) ? "" : ",\n"); - } - - erts_cbprintf(to, to_arg, "}}.\n"); - - /* Write memory data */ - for (bp = mem_anchor; bp; bp = bp->next) { - if (is_internal_pid(bp->pid)) - erts_cbprintf(to, to_arg, - "{%lu, %lu, %lu, {%lu,%lu,%lu}}.\n", - (UWord) bp->type_no, - (UWord) bp->mem, - (UWord) bp->size, - (UWord) pid_channel_no(bp->pid), - (UWord) pid_number(bp->pid), - (UWord) pid_serial(bp->pid)); - else - erts_cbprintf(to, to_arg, - "{%lu, %lu, %lu, undefined}.\n", - (UWord) bp->type_no, - (UWord) bp->mem, - (UWord) bp->size); - } - - if (lock) - erts_mtx_unlock(&instr_mutex); -} - -int erts_instr_dump_memory_map_to(fmtfn_t to, void* to_arg) -{ - if (!erts_instr_memory_map) - return 0; - - dump_memory_map_to_stream(to, to_arg); - return 1; -} - -int erts_instr_dump_memory_map(const char *name) -{ - int fd; - - if (!erts_instr_memory_map) - return 0; - - fd = open(name, O_WRONLY | O_CREAT | O_TRUNC, 0640); - if (fd < 0) - return 0; - - dump_memory_map_to_stream(erts_write_fd, (void*)&fd); - - close(fd); - return 1; -} - -Eterm erts_instr_get_memory_map(Process *proc) -{ - MapStatBlock_t *org_mem_anchor; - Eterm hdr_tuple, md_list, res; - Eterm *hp; - Uint hsz; - MapStatBlock_t *bp; -#ifdef DEBUG - Eterm *end_hp; -#endif - - if (!erts_instr_memory_map) - return am_false; - - if (!atoms_initialized) - init_atoms(); - if (!am_n) - init_am_n(); - if (!am_c) - init_am_c(); - if (!am_a) - init_am_a(); - - erts_mtx_lock(&instr_x_mutex); - erts_mtx_lock(&instr_mutex); - - /* Header size */ - hsz = 5 + 1 + (ERTS_ALC_N_MAX+1-ERTS_ALC_N_MIN)*(1 + 4); - - /* Memory data list */ - for (bp = mem_anchor; bp; bp = bp->next) { - if (is_internal_pid(bp->pid)) { -#if (_PID_NUM_SIZE - 1 > MAX_SMALL) - if (internal_pid_number(bp->pid) > MAX_SMALL) - hsz += BIG_UINT_HEAP_SIZE; -#endif -#if (_PID_SER_SIZE - 1 > MAX_SMALL) - if (internal_pid_serial(bp->pid) > MAX_SMALL) - hsz += BIG_UINT_HEAP_SIZE; -#endif - hsz += 4; - } - - if ((UWord) bp->mem > MAX_SMALL) - hsz += BIG_UINT_HEAP_SIZE; - if (bp->size > MAX_SMALL) - hsz += BIG_UINT_HEAP_SIZE; - - hsz += 5 + 2; - } - - hsz += 3; /* Root tuple */ - - org_mem_anchor = mem_anchor; - mem_anchor = NULL; - - erts_mtx_unlock(&instr_mutex); - - hp = HAlloc(proc, hsz); /* May end up calling map_stat_alloc() */ - - erts_mtx_lock(&instr_mutex); - -#ifdef DEBUG - end_hp = hp + hsz; -#endif - - { /* Build header */ - ErtsAlcType_t n; - Eterm type_map; - Uint *hp2 = hp; -#ifdef DEBUG - Uint *hp2_end; -#endif - - hp += (ERTS_ALC_N_MAX + 1 - ERTS_ALC_N_MIN)*4; - -#ifdef DEBUG - hp2_end = hp; -#endif - - type_map = make_tuple(hp); - *(hp++) = make_arityval(ERTS_ALC_N_MAX + 1 - ERTS_ALC_N_MIN); - - for (n = ERTS_ALC_N_MIN; n <= ERTS_ALC_N_MAX; n++) { - ErtsAlcType_t t = ERTS_ALC_N2T(n); - ErtsAlcType_t a = ERTS_ALC_T2A(t); - ErtsAlcType_t c = ERTS_ALC_T2C(t); - - if (!erts_allctrs_info[a].enabled) - a = ERTS_ALC_A_SYSTEM; - - *(hp++) = TUPLE3(hp2, am_n[n], am_a[a], am_c[c]); - hp2 += 4; - } - - ASSERT(hp2 == hp2_end); - - hdr_tuple = TUPLE4(hp, - am.instr_hdr, - make_small(ERTS_INSTR_VSN), - make_small(MAP_STAT_BLOCK_HEADER_SIZE), - type_map); - - hp += 5; - } - - /* Build memory data list */ - - for (md_list = NIL, bp = org_mem_anchor; bp; bp = bp->next) { - Eterm tuple; - Eterm type; - Eterm ptr; - Eterm size; - Eterm pid; - - if (is_not_internal_pid(bp->pid)) - pid = am_undefined; - else { - Eterm c; - Eterm n; - Eterm s; - -#if (ERST_INTERNAL_CHANNEL_NO > MAX_SMALL) -#error Oversized internal channel number -#endif - c = make_small(ERST_INTERNAL_CHANNEL_NO); - -#if (_PID_NUM_SIZE - 1 > MAX_SMALL) - if (internal_pid_number(bp->pid) > MAX_SMALL) { - n = uint_to_big(internal_pid_number(bp->pid), hp); - hp += BIG_UINT_HEAP_SIZE; - } - else -#endif - n = make_small(internal_pid_number(bp->pid)); - -#if (_PID_SER_SIZE - 1 > MAX_SMALL) - if (internal_pid_serial(bp->pid) > MAX_SMALL) { - s = uint_to_big(internal_pid_serial(bp->pid), hp); - hp += BIG_UINT_HEAP_SIZE; - } - else -#endif - s = make_small(internal_pid_serial(bp->pid)); - pid = TUPLE3(hp, c, n, s); - hp += 4; - } - - -#if ERTS_ALC_N_MAX > MAX_SMALL -#error Oversized memory type number -#endif - type = make_small(bp->type_no); - - if ((UWord) bp->mem > MAX_SMALL) { - ptr = uint_to_big((UWord) bp->mem, hp); - hp += BIG_UINT_HEAP_SIZE; - } - else - ptr = make_small((UWord) bp->mem); - - if (bp->size > MAX_SMALL) { - size = uint_to_big(bp->size, hp); - hp += BIG_UINT_HEAP_SIZE; - } - else - size = make_small(bp->size); - - tuple = TUPLE4(hp, type, ptr, size, pid); - hp += 5; - - md_list = CONS(hp, tuple, md_list); - hp += 2; - } - - res = TUPLE2(hp, hdr_tuple, md_list); - - ASSERT(hp + 3 == end_hp); - - if (mem_anchor) { - for (bp = mem_anchor; bp->next; bp = bp->next) - ; - ASSERT(org_mem_anchor); - org_mem_anchor->prev = bp; - bp->next = org_mem_anchor; - } - else { - mem_anchor = org_mem_anchor; - } - - erts_mtx_unlock(&instr_mutex); - erts_mtx_unlock(&instr_x_mutex); - - return res; -} - -static ERTS_INLINE void -begin_new_max_period(Stat_t *stat, int min, int max) -{ - int i; - for (i = min; i <= max; i++) { - stat[i].max_size = stat[i].size; - stat[i].max_blocks = stat[i].blocks; - } -} - -static ERTS_INLINE void -update_max_ever_values(Stat_t *stat, int min, int max) -{ - int i; - for (i = min; i <= max; i++) { - if (stat[i].max_size_ever < stat[i].max_size) - stat[i].max_size_ever = stat[i].max_size; - if (stat[i].max_blocks_ever < stat[i].max_blocks) - stat[i].max_blocks_ever = stat[i].max_blocks; - } -} - -#define bld_string erts_bld_string -#define bld_tuple erts_bld_tuple -#define bld_tuplev erts_bld_tuplev -#define bld_list erts_bld_list -#define bld_2tup_list erts_bld_2tup_list -#define bld_uint erts_bld_uint - -Eterm -erts_instr_get_stat(Process *proc, Eterm what, int begin_max_period) -{ - int i, len, max, min, allctr; - Eterm *names, *values, res; - Uint arr_size, stat_size, hsz, *hszp, *hp, **hpp; - Stat_t *stat_src, *stat; - - if (!erts_instr_stat) - return am_false; - - if (!atoms_initialized) - init_atoms(); - - if (what == am.total) { - min = 0; - max = 0; - allctr = 0; - stat_size = sizeof(Stat_t); - stat_src = &stats->tot; - if (!am_tot) - init_am_tot(); - names = am_tot; - } - else if (what == am.allocators) { - min = ERTS_ALC_A_MIN; - max = ERTS_ALC_A_MAX; - allctr = 1; - stat_size = sizeof(Stat_t)*(ERTS_ALC_A_MAX+1); - stat_src = stats->a; - if (!am_a) - init_am_a(); - names = am_a; - } - else if (what == am.classes) { - min = ERTS_ALC_C_MIN; - max = ERTS_ALC_C_MAX; - allctr = 0; - stat_size = sizeof(Stat_t)*(ERTS_ALC_C_MAX+1); - stat_src = stats->c; - if (!am_c) - init_am_c(); - names = &am_c[ERTS_ALC_C_MIN]; - } - else if (what == am.types) { - min = ERTS_ALC_N_MIN; - max = ERTS_ALC_N_MAX; - allctr = 0; - stat_size = sizeof(Stat_t)*(ERTS_ALC_N_MAX+1); - stat_src = stats->n; - if (!am_n) - init_am_n(); - names = &am_n[ERTS_ALC_N_MIN]; - } - else { - return THE_NON_VALUE; - } - - stat = (Stat_t *) erts_alloc(ERTS_ALC_T_TMP, stat_size); - - arr_size = (max - min + 1)*sizeof(Eterm); - - if (allctr) - names = (Eterm *) erts_alloc(ERTS_ALC_T_TMP, arr_size); - - values = (Eterm *) erts_alloc(ERTS_ALC_T_TMP, arr_size); - - erts_mtx_lock(&instr_mutex); - - update_max_ever_values(stat_src, min, max); - - sys_memcpy((void *) stat, (void *) stat_src, stat_size); - - if (begin_max_period) - begin_new_max_period(stat_src, min, max); - - erts_mtx_unlock(&instr_mutex); - - hsz = 0; - hszp = &hsz; - hpp = NULL; - - restart_bld: - - len = 0; - for (i = min; i <= max; i++) { - if (!allctr || erts_allctrs_info[i].enabled) { - Eterm s[2]; - - if (allctr) - names[len] = am_a[i]; - - s[0] = bld_tuple(hpp, hszp, 4, - am.sizes, - bld_uint(hpp, hszp, stat[i].size), - bld_uint(hpp, hszp, stat[i].max_size), - bld_uint(hpp, hszp, stat[i].max_size_ever)); - - s[1] = bld_tuple(hpp, hszp, 4, - am.blocks, - bld_uint(hpp, hszp, stat[i].blocks), - bld_uint(hpp, hszp, stat[i].max_blocks), - bld_uint(hpp, hszp, stat[i].max_blocks_ever)); - - values[len] = bld_list(hpp, hszp, 2, s); - - len++; - } - } - - res = bld_2tup_list(hpp, hszp, len, names, values); - - if (!hpp) { - hp = HAlloc(proc, hsz); - hszp = NULL; - hpp = &hp; - goto restart_bld; - } - - erts_free(ERTS_ALC_T_TMP, (void *) stat); - erts_free(ERTS_ALC_T_TMP, (void *) values); - if (allctr) - erts_free(ERTS_ALC_T_TMP, (void *) names); - - return res; -} - -static void -dump_stat_to_stream(fmtfn_t to, void* to_arg, int begin_max_period) -{ - ErtsAlcType_t i, a_max, a_min; - - erts_mtx_lock(&instr_mutex); - - erts_cbprintf(to, to_arg, - "{instr_vsn,%lu}.\n", - (unsigned long) ERTS_INSTR_VSN); - - update_max_ever_values(&stats->tot, 0, 0); - - erts_cbprintf(to, to_arg, - "{total,[{total,[{sizes,%lu,%lu,%lu},{blocks,%lu,%lu,%lu}]}]}.\n", - (UWord) stats->tot.size, - (UWord) stats->tot.max_size, - (UWord) stats->tot.max_size_ever, - (UWord) stats->tot.blocks, - (UWord) stats->tot.max_blocks, - (UWord) stats->tot.max_blocks_ever); - - a_max = 0; - a_min = ~0; - for (i = ERTS_ALC_A_MIN; i <= ERTS_ALC_A_MAX; i++) { - if (erts_allctrs_info[i].enabled) { - if (a_min > i) - a_min = i; - if (a_max < i) - a_max = i; - } - } - - ASSERT(ERTS_ALC_A_MIN <= a_min && a_min <= ERTS_ALC_A_MAX); - ASSERT(ERTS_ALC_A_MIN <= a_max && a_max <= ERTS_ALC_A_MAX); - ASSERT(a_min <= a_max); - - update_max_ever_values(stats->a, a_min, a_max); - - for (i = ERTS_ALC_A_MIN; i <= ERTS_ALC_A_MAX; i++) { - if (erts_allctrs_info[i].enabled) { - erts_cbprintf(to, to_arg, - "%s{%s,[{sizes,%lu,%lu,%lu},{blocks,%lu,%lu,%lu}]}%s", - i == a_min ? "{allocators,\n [" : " ", - ERTS_ALC_A2AD(i), - (UWord) stats->a[i].size, - (UWord) stats->a[i].max_size, - (UWord) stats->a[i].max_size_ever, - (UWord) stats->a[i].blocks, - (UWord) stats->a[i].max_blocks, - (UWord) stats->a[i].max_blocks_ever, - i == a_max ? "]}.\n" : ",\n"); - } - } - - update_max_ever_values(stats->c, ERTS_ALC_C_MIN, ERTS_ALC_C_MAX); - - for (i = ERTS_ALC_C_MIN; i <= ERTS_ALC_C_MAX; i++) { - erts_cbprintf(to, to_arg, - "%s{%s,[{sizes,%lu,%lu,%lu},{blocks,%lu,%lu,%lu}]}%s", - i == ERTS_ALC_C_MIN ? "{classes,\n [" : " ", - ERTS_ALC_C2CD(i), - (UWord) stats->c[i].size, - (UWord) stats->c[i].max_size, - (UWord) stats->c[i].max_size_ever, - (UWord) stats->c[i].blocks, - (UWord) stats->c[i].max_blocks, - (UWord) stats->c[i].max_blocks_ever, - i == ERTS_ALC_C_MAX ? "]}.\n" : ",\n" ); - } - - update_max_ever_values(stats->n, ERTS_ALC_N_MIN, ERTS_ALC_N_MAX); - - for (i = ERTS_ALC_N_MIN; i <= ERTS_ALC_N_MAX; i++) { - erts_cbprintf(to, to_arg, - "%s{%s,[{sizes,%lu,%lu,%lu},{blocks,%lu,%lu,%lu}]}%s", - i == ERTS_ALC_N_MIN ? "{types,\n [" : " ", - ERTS_ALC_N2TD(i), - (UWord) stats->n[i].size, - (UWord) stats->n[i].max_size, - (UWord) stats->n[i].max_size_ever, - (UWord) stats->n[i].blocks, - (UWord) stats->n[i].max_blocks, - (UWord) stats->n[i].max_blocks_ever, - i == ERTS_ALC_N_MAX ? "]}.\n" : ",\n" ); - } - - if (begin_max_period) { - begin_new_max_period(&stats->tot, 0, 0); - begin_new_max_period(stats->a, a_min, a_max); - begin_new_max_period(stats->c, ERTS_ALC_C_MIN, ERTS_ALC_C_MAX); - begin_new_max_period(stats->n, ERTS_ALC_N_MIN, ERTS_ALC_N_MAX); - } - - erts_mtx_unlock(&instr_mutex); - -} - -int erts_instr_dump_stat_to(fmtfn_t to, void* to_arg, int begin_max_period) -{ - if (!erts_instr_stat) - return 0; - - dump_stat_to_stream(to, to_arg, begin_max_period); - return 1; -} - -int erts_instr_dump_stat(const char *name, int begin_max_period) -{ - int fd; - - if (!erts_instr_stat) - return 0; - - fd = open(name, O_WRONLY | O_CREAT | O_TRUNC,0640); - if (fd < 0) - return 0; - - dump_stat_to_stream(erts_write_fd, (void*)&fd, begin_max_period); - - close(fd); - return 1; -} - - -Uint -erts_instr_get_total(void) -{ - return erts_instr_stat ? stats->tot.size : 0; -} - -Uint -erts_instr_get_max_total(void) -{ - if (erts_instr_stat) { - update_max_ever_values(&stats->tot, 0, 0); - return stats->tot.max_size_ever; - } - return 0; -} - -Eterm -erts_instr_get_type_info(Process *proc) -{ - Eterm res, *tpls; - Uint hsz, *hszp, *hp, **hpp; - ErtsAlcType_t n; - - if (!am_n) - init_am_n(); - if (!am_a) - init_am_a(); - if (!am_c) - init_am_c(); - - tpls = (Eterm *) erts_alloc(ERTS_ALC_T_TMP, - (ERTS_ALC_N_MAX-ERTS_ALC_N_MIN+1) - * sizeof(Eterm)); - hsz = 0; - hszp = &hsz; - hpp = NULL; - - restart_bld: - -#if ERTS_ALC_N_MIN != 1 -#error ERTS_ALC_N_MIN is not 1 -#endif - - for (n = ERTS_ALC_N_MIN; n <= ERTS_ALC_N_MAX; n++) { - ErtsAlcType_t t = ERTS_ALC_N2T(n); - ErtsAlcType_t a = ERTS_ALC_T2A(t); - ErtsAlcType_t c = ERTS_ALC_T2C(t); - - if (!erts_allctrs_info[a].enabled) - a = ERTS_ALC_A_SYSTEM; - - tpls[n - ERTS_ALC_N_MIN] - = bld_tuple(hpp, hszp, 3, am_n[n], am_a[a], am_c[c]); - } - - res = bld_tuplev(hpp, hszp, ERTS_ALC_N_MAX-ERTS_ALC_N_MIN+1, tpls); - - if (!hpp) { - hp = HAlloc(proc, hsz); - hszp = NULL; - hpp = &hp; - goto restart_bld; - } - - erts_free(ERTS_ALC_T_TMP, tpls); - - return res; -} - -Uint -erts_instr_init(int stat, int map_stat) -{ - Uint extra_sz; - int i; - - am_tot = NULL; - am_n = NULL; - am_c = NULL; - am_a = NULL; - - erts_instr_memory_map = 0; - erts_instr_stat = 0; - atoms_initialized = 0; - - if (!stat && !map_stat) - return 0; - - stats = erts_alloc(ERTS_ALC_T_INSTR_INFO, sizeof(struct stats_)); - - erts_mtx_init(&instr_mutex, "instr", NIL, - ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_DEBUG); - - mem_anchor = NULL; - - /* Install instrumentation functions */ - ERTS_CT_ASSERT(sizeof(erts_allctrs) == sizeof(real_allctrs)); - - sys_memcpy((void *)real_allctrs,(void *)erts_allctrs,sizeof(erts_allctrs)); - - sys_memzero((void *) &stats->tot, sizeof(Stat_t)); - sys_memzero((void *) stats->a, sizeof(Stat_t)*(ERTS_ALC_A_MAX+1)); - sys_memzero((void *) stats->c, sizeof(Stat_t)*(ERTS_ALC_C_MAX+1)); - sys_memzero((void *) stats->n, sizeof(Stat_t)*(ERTS_ALC_N_MAX+1)); - - for (i = ERTS_ALC_A_MIN; i <= ERTS_ALC_A_MAX; i++) { - if (erts_allctrs_info[i].enabled) - stats->ap[i] = &stats->a[i]; - else - stats->ap[i] = &stats->a[ERTS_ALC_A_SYSTEM]; - } - - if (map_stat) { - - erts_mtx_init(&instr_x_mutex, "instr_x", NIL, - ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_DEBUG); - - erts_instr_memory_map = 1; - erts_instr_stat = 1; - for (i = ERTS_ALC_A_MIN; i <= ERTS_ALC_A_MAX; i++) { - erts_allctrs[i].alloc = map_stat_alloc; - erts_allctrs[i].realloc = map_stat_realloc; - erts_allctrs[i].free = map_stat_free; - erts_allctrs[i].extra = (void *) &real_allctrs[i]; - } - instr_wrapper.lock = map_stat_pre_lock; - instr_wrapper.unlock = map_stat_pre_unlock; - extra_sz = MAP_STAT_BLOCK_HEADER_SIZE; - } - else { - erts_instr_stat = 1; - for (i = ERTS_ALC_A_MIN; i <= ERTS_ALC_A_MAX; i++) { - erts_allctrs[i].alloc = stat_alloc; - erts_allctrs[i].realloc = stat_realloc; - erts_allctrs[i].free = stat_free; - erts_allctrs[i].extra = (void *) &real_allctrs[i]; - } - instr_wrapper.lock = stat_pre_lock; - instr_wrapper.unlock = stat_pre_unlock; - extra_sz = STAT_BLOCK_HEADER_SIZE; - } - erts_allctr_wrapper_prelock_init(&instr_wrapper); - return extra_sz; -} - diff --git a/erts/emulator/beam/erl_instrument.h b/erts/emulator/beam/erl_instrument.h deleted file mode 100644 index 351172b2fa..0000000000 --- a/erts/emulator/beam/erl_instrument.h +++ /dev/null @@ -1,42 +0,0 @@ -/* - * %CopyrightBegin% - * - * Copyright Ericsson AB 2003-2016. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - * %CopyrightEnd% - */ - -#ifndef ERL_INSTRUMENT_H__ -#define ERL_INSTRUMENT_H__ - -#include "erl_mtrace.h" - -#define ERTS_INSTR_VSN 2 - -extern int erts_instr_memory_map; -extern int erts_instr_stat; - -Uint erts_instr_init(int stat, int map_stat); -int erts_instr_dump_memory_map_to(fmtfn_t to, void* to_arg); -int erts_instr_dump_memory_map(const char *name); -Eterm erts_instr_get_memory_map(Process *process); -int erts_instr_dump_stat_to(fmtfn_t to, void* to_arg, int begin_max_period); -int erts_instr_dump_stat(const char *name, int begin_max_period); -Eterm erts_instr_get_stat(Process *proc, Eterm what, int begin_max_period); -Eterm erts_instr_get_type_info(Process *proc); -Uint erts_instr_get_total(void); -Uint erts_instr_get_max_total(void); - -#endif diff --git a/erts/emulator/beam/erl_lock_check.c b/erts/emulator/beam/erl_lock_check.c index d66410367b..463ae898a3 100644 --- a/erts/emulator/beam/erl_lock_check.c +++ b/erts/emulator/beam/erl_lock_check.c @@ -217,6 +217,14 @@ typedef struct { static lc_matrix_t tot_lc_matrix; +#define ERTS_LC_FB_CHUNK_SIZE 10 + +typedef struct lc_alloc_chunk_t_ lc_alloc_chunk_t; +struct lc_alloc_chunk_t_ { + lc_alloc_chunk_t* next; + lc_free_block_t array[ERTS_LC_FB_CHUNK_SIZE]; +}; + typedef struct lc_thread_t_ lc_thread_t; struct lc_thread_t_ { char *thread_name; @@ -227,6 +235,7 @@ struct lc_thread_t_ { lc_locked_lock_list_t locked; lc_locked_lock_list_t required; lc_free_block_t *free_blocks; + lc_alloc_chunk_t *chunks; lc_matrix_t matrix; }; @@ -235,14 +244,6 @@ static ethr_tsd_key locks_key; static lc_thread_t *lc_threads = NULL; static ethr_spinlock_t lc_threads_lock; - -#ifdef ERTS_LC_STATIC_ALLOC -#define ERTS_LC_FB_CHUNK_SIZE 10000 -#else -#define ERTS_LC_FB_CHUNK_SIZE 10 -#endif - - static ERTS_INLINE void lc_lock_threads(void) { @@ -268,12 +269,16 @@ static ERTS_INLINE void lc_free(lc_thread_t* thr, lc_locked_lock_t *p) static lc_locked_lock_t *lc_core_alloc(lc_thread_t* thr) { int i; - lc_free_block_t *fbs; - fbs = (lc_free_block_t *) malloc(sizeof(lc_free_block_t) - * ERTS_LC_FB_CHUNK_SIZE); - if (!fbs) { + lc_alloc_chunk_t* chunk; + lc_free_block_t* fbs; + chunk = (lc_alloc_chunk_t*) malloc(sizeof(lc_alloc_chunk_t)); + if (!chunk) { ERTS_INTERNAL_ERROR("Lock checker failed to allocate memory!"); } + chunk->next = thr->chunks; + thr->chunks = chunk; + + fbs = chunk->array; for (i = 1; i < ERTS_LC_FB_CHUNK_SIZE - 1; i++) { #ifdef DEBUG sys_memset((void *) &fbs[i], 0xdf, sizeof(lc_free_block_t)); @@ -321,6 +326,7 @@ create_thread_data(char *thread_name) thr->locked.last = NULL; thr->prev = NULL; thr->free_blocks = NULL; + thr->chunks = NULL; sys_memzero(&thr->matrix, sizeof(thr->matrix)); lc_lock_threads(); @@ -336,7 +342,7 @@ create_thread_data(char *thread_name) static void collect_matrix(lc_matrix_t*); static void -destroy_locked_locks(lc_thread_t *thr) +destroy_thread_data(lc_thread_t *thr) { ASSERT(thr->thread_name); free((void *) thr->thread_name); @@ -359,8 +365,13 @@ destroy_locked_locks(lc_thread_t *thr) lc_unlock_threads(); - free((void *) thr); + while (thr->chunks) { + lc_alloc_chunk_t* free_me = thr->chunks; + thr->chunks = thr->chunks->next; + free(free_me); + } + free((void *) thr); } static ERTS_INLINE lc_thread_t * @@ -615,7 +626,7 @@ thread_exit_handler(void) print_curr_locks(thr); lc_abort(); } - destroy_locked_locks(thr); + destroy_thread_data(thr); /* erts_tsd_set(locks_key, NULL); */ } } diff --git a/erts/emulator/beam/erl_map.c b/erts/emulator/beam/erl_map.c index 4ec6960997..48154b5d0f 100644 --- a/erts/emulator/beam/erl_map.c +++ b/erts/emulator/beam/erl_map.c @@ -43,7 +43,9 @@ * * DONE: * - erlang:is_map/1 + * - erlang:is_map_key/2 * - erlang:map_size/1 + * - erlang:map_get/2 * * - maps:find/2 * - maps:from_list/1 @@ -202,7 +204,7 @@ BIF_RETTYPE maps_find_2(BIF_ALIST_2) { BIF_ERROR(BIF_P, BADMAP); } -/* maps:get/2 +/* maps:get/2 and erlang:map_get/2 * return value if key *matches* a key in the map * exception badkey if none matches */ @@ -223,6 +225,10 @@ BIF_RETTYPE maps_get_2(BIF_ALIST_2) { BIF_ERROR(BIF_P, BADMAP); } +BIF_RETTYPE map_get_2(BIF_ALIST_2) { + BIF_RET(maps_get_2(BIF_CALL_ARGS)); +} + /* maps:from_list/1 * List may be unsorted [{K,V}] */ @@ -914,7 +920,7 @@ static int hxnodecmp(hxnode_t *a, hxnode_t *b) { return -1; } -/* maps:is_key/2 */ +/* maps:is_key/2 and erlang:is_map_key/2 */ BIF_RETTYPE maps_is_key_2(BIF_ALIST_2) { if (is_map(BIF_ARG_2)) { @@ -924,6 +930,10 @@ BIF_RETTYPE maps_is_key_2(BIF_ALIST_2) { BIF_ERROR(BIF_P, BADMAP); } +BIF_RETTYPE is_map_key_2(BIF_ALIST_2) { + BIF_RET(maps_is_key_2(BIF_CALL_ARGS)); +} + /* maps:keys/1 */ BIF_RETTYPE maps_keys_1(BIF_ALIST_1) { @@ -3048,7 +3058,7 @@ BIF_RETTYPE erts_internal_map_next_3(BIF_ALIST_3) { Uint path_length = 0; Uint *path_rest = NULL; int i, elems, orig_elems; - Eterm node = map, res, *path_ptr = NULL, *hp; + Eterm node = map, res, *patch_ptr = NULL, *hp; /* A stack WSTACK is used when traversing the hashmap. * It contains: node, idx, sz, ptr @@ -3107,15 +3117,22 @@ BIF_RETTYPE erts_internal_map_next_3(BIF_ALIST_3) { } if (type == iterator) { - /* iterator uses the format {K, V, {K, V, {K, V, [Path | Map]}}}, - * so each element is 4 words large */ + /* + * Iterator uses the format {K1, V1, {K2, V2, {K3, V3, [Path | Map]}}}, + * so each element is 4 words large. + * To make iteration order independent of input reductions + * the KV-pairs are here built in DESTRUCTIVE non-reverse order. + */ hp = HAlloc(BIF_P, 4 * elems); - res = am_none; } else { - /* list used the format [Path, Map, {K,V}, {K,V} | BIF_ARG_3], - * so each element is 2+3 words large */ + /* + * List used the format [Path, Map, {K3,V3}, {K2,V2}, {K1,V1} | BIF_ARG_3], + * so each element is 2+3 words large. + * To make list order independent of input reductions + * the KV-pairs are here built in FUNCTIONAL reverse order + * as this is how the list as a whole is constructed. + */ hp = HAlloc(BIF_P, (2 + 3) * elems); - res = BIF_ARG_3; } orig_elems = elems; @@ -3139,12 +3156,15 @@ BIF_RETTYPE erts_internal_map_next_3(BIF_ALIST_3) { if (is_list(ptr[PATH_ELEM(curr_path)])) { Eterm *lst = list_val(ptr[PATH_ELEM(curr_path)]); if (type == iterator) { - res = TUPLE3(hp, CAR(lst), CDR(lst), res); hp += 4; - /* Note where we should patch the Iterator is needed */ - path_ptr = hp-1; + res = make_tuple(hp); + hp[0] = make_arityval(3); + hp[1] = CAR(lst); + hp[2] = CDR(lst); + patch_ptr = &hp[3]; + hp += 4; } else { Eterm tup = TUPLE2(hp, CAR(lst), CDR(lst)); hp += 3; - res = CONS(hp, tup, res); hp += 2; + res = CONS(hp, tup, BIF_ARG_3); hp += 2; } elems--; break; @@ -3178,7 +3198,12 @@ BIF_RETTYPE erts_internal_map_next_3(BIF_ALIST_3) { while (idx < sz && elems != 0 && is_list(ptr[idx])) { Eterm *lst = list_val(ptr[idx]); if (type == iterator) { - res = TUPLE3(hp, CAR(lst), CDR(lst), res); hp += 4; + *patch_ptr = make_tuple(hp); + hp[0] = make_arityval(3); + hp[1] = CAR(lst); + hp[2] = CDR(lst); + patch_ptr = &hp[3]; + hp += 4; } else { Eterm tup = TUPLE2(hp, CAR(lst), CDR(lst)); hp += 3; res = CONS(hp, tup, res); hp += 2; @@ -3276,7 +3301,7 @@ BIF_RETTYPE erts_internal_map_next_3(BIF_ALIST_3) { if (type == iterator) { hp = HAlloc(BIF_P, 2); - *path_ptr = CONS(hp, path, map); hp += 2; + *patch_ptr = CONS(hp, path, map); hp += 2; } else { hp = HAlloc(BIF_P, 4); res = CONS(hp, map, res); hp += 2; @@ -3284,6 +3309,7 @@ BIF_RETTYPE erts_internal_map_next_3(BIF_ALIST_3) { } } else { if (type == iterator) { + *patch_ptr = am_none; HRelease(BIF_P, hp + 4 * elems, hp); } else { HRelease(BIF_P, hp + (2+3) * elems, hp); diff --git a/erts/emulator/beam/erl_message.c b/erts/emulator/beam/erl_message.c index 34bd11d87c..507cc989d2 100644 --- a/erts/emulator/beam/erl_message.c +++ b/erts/emulator/beam/erl_message.c @@ -303,8 +303,10 @@ erts_queue_dist_message(Process *rcvr, erts_cleanup_messages(mp); } else { + LINK_MESSAGE(rcvr, mp); - LINK_MESSAGE(rcvr, mp, &mp->next, 1); + if (rcvr_locks & ERTS_PROC_LOCK_MAIN) + erts_proc_sig_fetch(rcvr); if (!(rcvr_locks & ERTS_PROC_LOCK_MSGQ)) erts_proc_unlock(rcvr, ERTS_PROC_LOCK_MSGQ); @@ -314,9 +316,8 @@ erts_queue_dist_message(Process *rcvr, } /* Add messages last in message queue */ -static Sint +static void queue_messages(Process* receiver, - erts_aint32_t *receiver_state, ErtsProcLocks receiver_locks, ErtsMessage* first, ErtsMessage** last, @@ -325,86 +326,124 @@ queue_messages(Process* receiver, int locked_msgq = 0; erts_aint32_t state; - ASSERT(is_value(ERL_MESSAGE_TERM(first))); - ASSERT(is_value(ERL_MESSAGE_FROM(first))); - ASSERT(ERL_MESSAGE_TOKEN(first) == am_undefined || - ERL_MESSAGE_TOKEN(first) == NIL || - is_tuple(ERL_MESSAGE_TOKEN(first))); - -#ifdef ERTS_ENABLE_LOCK_CHECK - ERTS_LC_ASSERT(erts_proc_lc_my_proc_locks(receiver) < ERTS_PROC_LOCK_MSGQ || - receiver_locks == erts_proc_lc_my_proc_locks(receiver)); +#ifdef DEBUG + { + ErtsMessage* fmsg = ERTS_SIG_IS_MSG(first) ? first : first->next; + ASSERT(fmsg); + ASSERT(is_value(ERL_MESSAGE_TERM(fmsg))); + ASSERT(is_value(ERL_MESSAGE_FROM(fmsg))); + ASSERT(ERL_MESSAGE_TOKEN(fmsg) == am_undefined || + ERL_MESSAGE_TOKEN(fmsg) == NIL || + is_tuple(ERL_MESSAGE_TOKEN(fmsg))); + } #endif - if (!(receiver_locks & ERTS_PROC_LOCK_MSGQ)) { - if (erts_proc_trylock(receiver, ERTS_PROC_LOCK_MSGQ) == EBUSY) { - ErtsProcLocks need_locks; - - if (receiver_state) - state = *receiver_state; - else - state = erts_atomic32_read_nob(&receiver->state); - if (state & ERTS_PSFLG_EXITING) - goto exiting; + ERTS_LC_ASSERT((erts_proc_lc_my_proc_locks(receiver) & ERTS_PROC_LOCK_MSGQ) + == (receiver_locks & ERTS_PROC_LOCK_MSGQ)); - need_locks = receiver_locks & ERTS_PROC_LOCKS_HIGHER_THAN(ERTS_PROC_LOCK_MSGQ); - if (need_locks) { - erts_proc_unlock(receiver, need_locks); - } - need_locks |= ERTS_PROC_LOCK_MSGQ; - erts_proc_lock(receiver, need_locks); - } + if (!(receiver_locks & ERTS_PROC_LOCK_MSGQ)) { + erts_proc_lock(receiver, ERTS_PROC_LOCK_MSGQ); locked_msgq = 1; } - state = erts_atomic32_read_nob(&receiver->state); if (state & ERTS_PSFLG_EXITING) { - exiting: /* Drop message if receiver is exiting or has a pending exit... */ if (locked_msgq) - erts_proc_unlock(receiver, ERTS_PROC_LOCK_MSGQ); + erts_proc_unlock(receiver, ERTS_PROC_LOCK_MSGQ); + if (ERTS_SIG_IS_NON_MSG(first)) { + ErtsSchedulerData* esdp = erts_get_scheduler_data(); + ASSERT(esdp); + ASSERT(!esdp->pending_signal.sig); + esdp->pending_signal.sig = (ErtsSignal*) first; + esdp->pending_signal.to = receiver->common.id; + first = first->next; + } erts_cleanup_messages(first); - return 0; + return; + } + + if (last == &first->next) { + ASSERT(len == 1); + LINK_MESSAGE(receiver, first); + } + else { + erts_enqueue_signals(receiver, first, last, NULL, len, state); } - LINK_MESSAGE(receiver, first, last, len); + if (receiver_locks & ERTS_PROC_LOCK_MAIN) + erts_proc_sig_fetch(receiver); if (locked_msgq) { erts_proc_unlock(receiver, ERTS_PROC_LOCK_MSGQ); } - erts_proc_notify_new_message(receiver, receiver_locks); - return 0; + if (last == &first->next) + erts_proc_notify_new_message(receiver, receiver_locks); + else + erts_proc_notify_new_sig(receiver, state, ERTS_PSFLG_ACTIVE); } -static Sint -queue_message(Process* receiver, - erts_aint32_t *receiver_state, - ErtsProcLocks receiver_locks, - ErtsMessage* mp, Eterm msg, Eterm from) +static ERTS_INLINE +ErtsMessage* prepend_pending_sig_maybe(Process* sender, Process* receiver, + ErtsMessage* mp) { - ERL_MESSAGE_TERM(mp) = msg; - ERL_MESSAGE_FROM(mp) = from; - return queue_messages(receiver, receiver_state, receiver_locks, - mp, &mp->next, 1); + ErtsSchedulerData* esdp = sender->scheduler_data; + ErtsSignal* pend_sig; + + if (!esdp || esdp->pending_signal.to != receiver->common.id) + return mp; + + pend_sig = esdp->pending_signal.sig; + + ASSERT(esdp->pending_signal.dbg_from == sender); + esdp->pending_signal.sig = NULL; + esdp->pending_signal.to = THE_NON_VALUE; + pend_sig->common.next = mp; + pend_sig->common.specific.next = NULL; + return (ErtsMessage*) pend_sig; } -Sint +/** + * + * @brief Send one message from *NOT* a local process. + * + */ +void erts_queue_message(Process* receiver, ErtsProcLocks receiver_locks, ErtsMessage* mp, Eterm msg, Eterm from) { - return queue_message(receiver, NULL, receiver_locks, mp, msg, from); + ASSERT(is_not_internal_pid(from)); + ERL_MESSAGE_TERM(mp) = msg; + ERL_MESSAGE_FROM(mp) = from; + queue_messages(receiver, receiver_locks, mp, &mp->next, 1); } +/** + * @brief Send one message from a local process. + */ +void +erts_queue_proc_message(Process* sender, + Process* receiver, ErtsProcLocks receiver_locks, + ErtsMessage* mp, Eterm msg) +{ + ERL_MESSAGE_TERM(mp) = msg; + ERL_MESSAGE_FROM(mp) = sender->common.id; + queue_messages(receiver, receiver_locks, + prepend_pending_sig_maybe(sender, receiver, mp), + &mp->next, 1); +} -Sint -erts_queue_messages(Process* receiver, ErtsProcLocks receiver_locks, - ErtsMessage* first, ErtsMessage** last, Uint len) + +void +erts_queue_proc_messages(Process* sender, + Process* receiver, ErtsProcLocks receiver_locks, + ErtsMessage* first, ErtsMessage** last, Uint len) { - return queue_messages(receiver, NULL, receiver_locks, - first, last, len); + queue_messages(receiver, receiver_locks, + prepend_pending_sig_maybe(sender, receiver, first), + last, len); } void @@ -541,7 +580,7 @@ erts_try_alloc_message_on_heap(Process *pp, * Send a local message when sender & receiver processes are known. */ -Sint +void erts_send_message(Process* sender, Process* receiver, ErtsProcLocks *receiver_locks, @@ -552,7 +591,6 @@ erts_send_message(Process* sender, ErtsMessage* mp; ErlOffHeap *ohp; Eterm token = NIL; - Sint res = 0; #ifdef USE_VM_PROBES DTRACE_CHARBUF(sender_name, 64); DTRACE_CHARBUF(receiver_name, 64); @@ -695,13 +733,8 @@ erts_send_message(Process* sender, #ifdef USE_VM_PROBES ERL_MESSAGE_DT_UTAG(mp) = utag; #endif - res = queue_message(receiver, - &receiver_state, - *receiver_locks, - mp, message, - sender->common.id); - return res; + erts_queue_proc_message(sender, receiver, *receiver_locks, mp, message); } diff --git a/erts/emulator/beam/erl_message.h b/erts/emulator/beam/erl_message.h index ee87297ba4..d120111634 100644 --- a/erts/emulator/beam/erl_message.h +++ b/erts/emulator/beam/erl_message.h @@ -229,7 +229,7 @@ typedef union { typedef struct { /* pointers to next pointers pointing to... */ ErtsMessage **next; /* ... next (non-message) signal */ - ErtsMessage **last; /* ... next (non-message) signal */ + ErtsMessage **last; /* ... last (non-message) signal */ } ErtsMsgQNMSigs; /* Size of default message buffer (erl_message.c) */ @@ -296,8 +296,11 @@ typedef struct { typedef struct { ErtsMessage* first; ErtsMessage** last; /* point to the last next pointer */ - Sint len; /* queue length */ + Sint len; /* number of messages in queue */ ErtsMsgQNMSigs nmsigs; +#ifdef ERTS_PROC_SIG_HARD_DEBUG + int may_contain_heap_terms; +#endif } ErtsSignalInQueue; typedef struct erl_trace_message_queue__ { @@ -364,13 +367,14 @@ typedef struct erl_trace_message_queue__ { #endif -/* Add message last_msg in message queue */ -#define LINK_MESSAGE(p, first_msg, last_msg, num_msgs) \ +/* Add one message last in message queue */ +#define LINK_MESSAGE(p, msg) \ do { \ + ASSERT(ERTS_SIG_IS_MSG(msg)); \ ERTS_HDBG_CHECK_SIGNAL_IN_QUEUE__((p), "before"); \ - *(p)->sig_inq.last = (first_msg); \ - (p)->sig_inq.last = (last_msg); \ - (p)->sig_inq.len += (num_msgs); \ + *(p)->sig_inq.last = (msg); \ + (p)->sig_inq.last = &(msg)->next; \ + (p)->sig_inq.len++; \ ERTS_HDBG_CHECK_SIGNAL_IN_QUEUE__((p), "before"); \ } while(0) @@ -437,11 +441,12 @@ ErlHeapFragment* erts_resize_message_buffer(ErlHeapFragment *, Uint, Eterm *, Uint); void free_message_buffer(ErlHeapFragment *); void erts_queue_dist_message(Process*, ErtsProcLocks, ErtsDistExternal *, Eterm, Eterm); -Sint erts_queue_message(Process*, ErtsProcLocks,ErtsMessage*, Eterm, Eterm); -Sint erts_queue_messages(Process*, ErtsProcLocks, - ErtsMessage*, ErtsMessage**, Uint); +void erts_queue_message(Process*, ErtsProcLocks,ErtsMessage*, Eterm, Eterm); +void erts_queue_proc_message(Process* from,Process* to, ErtsProcLocks,ErtsMessage*, Eterm); +void erts_queue_proc_messages(Process* from, Process* to, ErtsProcLocks, + ErtsMessage*, ErtsMessage**, Uint); void erts_deliver_exit_message(Eterm, Process*, ErtsProcLocks *, Eterm, Eterm); -Sint erts_send_message(Process*, Process*, ErtsProcLocks*, Eterm, unsigned); +void erts_send_message(Process*, Process*, ErtsProcLocks*, Eterm, unsigned); void erts_link_mbuf_to_proc(Process *proc, ErlHeapFragment *bp); Uint erts_msg_attached_data_size_aux(ErtsMessage *msg); diff --git a/erts/emulator/beam/erl_monitor_link.c b/erts/emulator/beam/erl_monitor_link.c index 70f36fb6b7..48d9bd4ca5 100644 --- a/erts/emulator/beam/erl_monitor_link.c +++ b/erts/emulator/beam/erl_monitor_link.c @@ -630,7 +630,9 @@ erts_monitor_tree_lookup_create(ErtsMonitor **root, int *created, Uint16 type, ErtsMonitor *res; ErtsMonitorCreateCtxt cctxt = {type, origin}; - ERTS_ML_ASSERT(type == ERTS_MON_TYPE_NODE || type == ERTS_MON_TYPE_NODES); + ERTS_ML_ASSERT(type == ERTS_MON_TYPE_NODE + || type == ERTS_MON_TYPE_NODES + || type == ERTS_MON_TYPE_SUSPEND); res = (ErtsMonitor *) ml_rbt_lookup_create((ErtsMonLnkNode **) root, target, create_monitor, @@ -760,11 +762,13 @@ erts_monitor_create(Uint16 type, Eterm ref, Eterm orgn, Eterm trgt, Eterm name) switch (type) { case ERTS_MON_TYPE_PROC: case ERTS_MON_TYPE_PORT: - case ERTS_MON_TYPE_TIME_OFFSET: if (is_nil(name)) { ErtsMonitorDataHeap *mdhp; ErtsORefThing *ortp; + case ERTS_MON_TYPE_TIME_OFFSET: + + ERTS_ML_ASSERT(is_nil(name)); ERTS_ML_ASSERT(is_immed(orgn) && is_immed(trgt)); ERTS_ML_ASSERT(is_internal_ordinary_ref(ref)); @@ -860,10 +864,38 @@ erts_monitor_create(Uint16 type, Eterm ref, Eterm orgn, Eterm trgt, Eterm name) mdep->dist = NULL; break; } - case ERTS_MON_TYPE_SUSPEND: - ERTS_INTERNAL_ERROR("Use erts_monitor_suspend_create() instead..."); - mdp = NULL; + case ERTS_MON_TYPE_SUSPEND: { + ErtsMonitorSuspend *msp; + + ERTS_ML_ASSERT(is_nil(name)); + ERTS_ML_ASSERT(is_nil(ref)); + ERTS_ML_ASSERT(is_internal_pid(orgn) && is_internal_pid(trgt)); + + msp = erts_alloc(ERTS_ALC_T_MONITOR_SUSPEND, + sizeof(ErtsMonitorSuspend)); + mdp = &msp->md; + ERTS_ML_ASSERT(((void *) mdp) == ((void *) msp)); + + mdp->ref = NIL; + + mdp->origin.other.item = trgt; + mdp->origin.offset = (Uint16) offsetof(ErtsMonitorData, origin); + mdp->origin.key_offset = (Uint16) offsetof(ErtsMonitor, other.item); + ERTS_ML_ASSERT(mdp->origin.key_offset >= mdp->origin.offset); + mdp->origin.flags = (Uint16) ERTS_ML_FLG_EXTENDED; + mdp->origin.type = type; + + mdp->target.other.item = orgn; + mdp->target.offset = (Uint16) offsetof(ErtsMonitorData, target); + mdp->target.key_offset = (Uint16) offsetof(ErtsMonitor, other.item); + mdp->target.flags = ERTS_ML_FLG_TARGET|ERTS_ML_FLG_EXTENDED; + mdp->target.type = type; + + msp->next = NULL; + erts_atomic_init_relb(&msp->state, 0); + break; + } default: ERTS_INTERNAL_ERROR("Invalid monitor type"); mdp = NULL; @@ -887,10 +919,11 @@ erts_monitor_destroy__(ErtsMonitorData *mdp) ERTS_ML_ASSERT(!(mdp->target.flags & ERTS_ML_FLG_IN_TABLE)); ERTS_ML_ASSERT((mdp->origin.flags & ERTS_ML_FLGS_SAME) == (mdp->target.flags & ERTS_ML_FLGS_SAME)); - ERTS_ML_ASSERT(mdp->origin.type != ERTS_MON_TYPE_SUSPEND); if (!(mdp->origin.flags & ERTS_ML_FLG_EXTENDED)) erts_free(ERTS_ALC_T_MONITOR, mdp); + else if (mdp->origin.type == ERTS_MON_TYPE_SUSPEND) + erts_free(ERTS_ALC_T_MONITOR_SUSPEND, mdp); else { ErtsMonitorDataExtended *mdep = (ErtsMonitorDataExtended *) mdp; ErlOffHeap oh; @@ -927,10 +960,10 @@ erts_monitor_size(ErtsMonitor *mon) Uint size, refc; ErtsMonitorData *mdp = erts_monitor_to_data(mon); - ERTS_ML_ASSERT(mon->type != ERTS_MON_TYPE_SUSPEND); - if (!(mon->flags & ERTS_ML_FLG_EXTENDED)) size = sizeof(ErtsMonitorDataHeap); + else if (mon->type == ERTS_MON_TYPE_SUSPEND) + size = sizeof(ErtsMonitorSuspend); else { ErtsMonitorDataExtended *mdep; Uint hsz = 0; @@ -957,54 +990,6 @@ erts_monitor_size(ErtsMonitor *mon) return size / refc; } - -/* suspend monitors... */ - -ErtsMonitorSuspend * -erts_monitor_suspend_create(Eterm pid) -{ - ErtsMonitorSuspend *msp; - - ERTS_ML_ASSERT(is_internal_pid(pid)); - - msp = erts_alloc(ERTS_ALC_T_SUSPEND_MON, - sizeof(ErtsMonitorSuspend)); - msp->mon.offset = (Uint16) offsetof(ErtsMonitorSuspend, mon); - msp->mon.key_offset = (Uint16) offsetof(ErtsMonitor, other.item); - msp->mon.other.item = pid; - msp->mon.flags = 0; - msp->mon.type = ERTS_MON_TYPE_SUSPEND; - msp->pending = 0; - msp->active = 0; - return msp; -} - -static ErtsMonLnkNode * -create_monitor_suspend(Eterm pid, void *unused) -{ - ErtsMonitorSuspend *msp = erts_monitor_suspend_create(pid); - return (ErtsMonLnkNode *) &msp->mon; -} - -ErtsMonitorSuspend * -erts_monitor_suspend_tree_lookup_create(ErtsMonitor **root, int *created, - Eterm pid) -{ - ErtsMonitor *mon; - mon = (ErtsMonitor *) ml_rbt_lookup_create((ErtsMonLnkNode **) root, - pid, create_monitor_suspend, - NULL, - created); - return erts_monitor_suspend(mon); -} - -void -erts_monitor_suspend_destroy(ErtsMonitorSuspend *msp) -{ - ERTS_ML_ASSERT(!(msp->mon.flags & ERTS_ML_FLG_IN_TABLE)); - erts_free(ERTS_ALC_T_SUSPEND_MON, msp); -} - /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *\ * Link Operations * * * diff --git a/erts/emulator/beam/erl_monitor_link.h b/erts/emulator/beam/erl_monitor_link.h index 603aead8cc..9ff8aa509a 100644 --- a/erts/emulator/beam/erl_monitor_link.h +++ b/erts/emulator/beam/erl_monitor_link.h @@ -246,15 +246,28 @@ * * --- ERTS_MON_TYPE_SUSPEND ------------------------------------- * - * Suspend monitor. + * Suspend monitor. A local process (origin) suspends another + * local process (target). * - * Other Item: Suspendee process identifier - * Key: Suspendee process identifier - * - * Valid keys are only ordinary internal references. + * Origin: + * Other Item: Process identifier of suspendee + * (target) + * Key: Process identifier of suspendee + * (target) + * Target: + * Other Item: Process identifier of suspender + * (origin) + * Key: Process identifier of suspender + * (origin) + * Shared: + * Next: Pointer to another suspend monitor + * State: Number of suspends and a flag + * indicating if the suspend is + * active or not. * - * This type of monitor is a bit strange and the whole process - * suspend functionality should be improved... + * Origin part of the monitor is stored in the monitor tree of + * origin process and target part of the monitor is stored in + * monitor list for local targets on the target process. * * * @@ -638,11 +651,15 @@ struct ErtsMonitorDataExtended__ { Eterm heap[1]; /* heap start... */ }; -typedef struct { - ErtsMonitor mon; - int pending; - int active; -} ErtsMonitorSuspend; +typedef struct ErtsMonitorSuspend__ ErtsMonitorSuspend; + +struct ErtsMonitorSuspend__ { + ErtsMonitorData md; /* origin = suspender; target = suspendee */ + ErtsMonitorSuspend *next; + erts_atomic_t state; +}; +#define ERTS_MSUSPEND_STATE_FLG_ACTIVE ((erts_aint_t) (((Uint) 1) << (sizeof(Uint)*8 - 1))) +#define ERTS_MSUSPEND_STATE_COUNTER_MASK (~ERTS_MSUSPEND_STATE_FLG_ACTIVE) /* * --- Monitor tree operations --- @@ -1094,24 +1111,25 @@ int erts_monitor_list_foreach_delete_yielding(ErtsMonitor **list, * * @brief Create a monitor * - * Can create all types of monitors exept for suspend monitors + * Can create all types of monitors * * When the funcion is called it is assumed that: * - 'ref' is an internal ordinary reference if type is ERTS_MON_TYPE_PROC, * ERTS_MON_TYPE_PORT, ERTS_MON_TYPE_TIME_OFFSET, or ERTS_MON_TYPE_RESOURCE - * - 'ref' is NIL if type is ERTS_MON_TYPE_NODE or ERTS_MON_TYPE_NODES + * - 'ref' is NIL if type is ERTS_MON_TYPE_NODE, ERTS_MON_TYPE_NODES, or + * ERTS_MON_TYPE_SUSPEND * - 'ref' is and ordinary internal reference or an external reference if * type is ERTS_MON_TYPE_DIST_PROC * - 'name' is an atom or NIL if type is ERTS_MON_TYPE_PROC, * ERTS_MON_TYPE_PORT, or ERTS_MON_TYPE_DIST_PROC * - 'name is NIL if type is ERTS_MON_TYPE_TIME_OFFSET, ERTS_MON_TYPE_RESOURCE, - * ERTS_MON_TYPE_NODE, or ERTS_MON_TYPE_NODES + * ERTS_MON_TYPE_NODE, ERTS_MON_TYPE_NODES, or ERTS_MON_TYPE_SUSPEND * If the above is not true, bad things will happen. * * @param[in] type ERTS_MON_TYPE_PROC, ERTS_MON_TYPE_PORT, * ERTS_MON_TYPE_TIME_OFFSET, ERTS_MON_TYPE_DIST_PROC, * ERTS_MON_TYPE_RESOURCE, ERTS_MON_TYPE_NODE, - * or ERTS_MON_TYPE_NODES + * ERTS_MON_TYPE_NODES, or ERTS_MON_TYPE_SUSPEND * * @param[in] ref A reference or NIL depending on type * @@ -1119,6 +1137,10 @@ int erts_monitor_list_foreach_delete_yielding(ErtsMonitor **list, * * @param[in] target The key of the target * + * @param[in] name An atom (the name) or NIL depending on type + * + * @returns A pointer to monitor data structure + * */ ErtsMonitorData *erts_monitor_create(Uint16 type, Eterm ref, Eterm origin, Eterm target, Eterm name); @@ -1347,7 +1369,8 @@ erts_monitor_to_data(ErtsMonitor *mon) ERTS_ML_ASSERT(erts_monitor_origin_offset == (size_t) mdp->origin.offset); ERTS_ML_ASSERT(!!(mdp->target.flags & ERTS_ML_FLG_TARGET)); ERTS_ML_ASSERT(erts_monitor_target_offset == (size_t) mdp->target.offset); - if (mon->type == ERTS_MON_TYPE_NODE || mon->type == ERTS_MON_TYPE_NODES) { + if (mon->type == ERTS_MON_TYPE_NODE || mon->type == ERTS_MON_TYPE_NODES + || mon->type == ERTS_MON_TYPE_SUSPEND) { ERTS_ML_ASSERT(erts_monitor_node_key_offset == (size_t) mdp->origin.key_offset); ERTS_ML_ASSERT(erts_monitor_node_key_offset == (size_t) mdp->target.key_offset); } diff --git a/erts/emulator/beam/erl_nif.c b/erts/emulator/beam/erl_nif.c index 0e0013e8a4..0fbf0eb03a 100644 --- a/erts/emulator/beam/erl_nif.c +++ b/erts/emulator/beam/erl_nif.c @@ -388,12 +388,18 @@ erts_call_dirty_nif(ErtsSchedulerData *esdp, Process *c_p, BeamInstr *I, Eterm * erts_atomic32_read_band_mb(&c_p->state, ~(ERTS_PSFLG_DIRTY_CPU_PROC | ERTS_PSFLG_DIRTY_IO_PROC)); + ASSERT(esdp->current_nif == NULL); + esdp->current_nif = &env; + erts_proc_unlock(c_p, ERTS_PROC_LOCK_MAIN); result = (*dirty_nif)(&env, codemfa->arity, argv); /* Call dirty NIF */ erts_proc_lock(c_p, ERTS_PROC_LOCK_MAIN); + ASSERT(esdp->current_nif == &env); + esdp->current_nif = NULL; + ASSERT(env.proc->static_flags & ERTS_STC_FLG_SHADOW_PROC); ASSERT(env.proc->next == c_p); @@ -659,7 +665,7 @@ int erts_flush_trace_messages(Process *c_p, ErtsProcLocks c_p_locks) rp_locks = 0; if (rp->common.id == c_p->common.id) rp_locks = c_p_locks; - erts_queue_messages(rp, rp_locks, first, last, len); + erts_queue_proc_messages(c_p, rp, rp_locks, first, last, len); if (rp->common.id == c_p->common.id) rp_locks &= ~c_p_locks; if (rp_locks) @@ -850,7 +856,10 @@ int enif_send(ErlNifEnv* env, const ErlNifPid* to_pid, } } - erts_queue_message(rp, rp_locks, mp, msg, from); + if (c_p) + erts_queue_proc_message(c_p, rp, rp_locks, mp, msg); + else + erts_queue_message(rp, rp_locks, mp, msg, from); done: if (c_p == rp) @@ -1246,8 +1255,10 @@ size_t enif_binary_to_term(ErlNifEnv *dst_env, if (is_non_value(*term)) { return 0; } - erts_factory_close(&factory); - cache_env(dst_env); + if (size > 0) { + erts_factory_close(&factory); + cache_env(dst_env); + } ASSERT(bp > data); return bp - data; @@ -4258,6 +4269,10 @@ int erts_nif_get_funcs(struct erl_module_nif* mod, return mod->entry.num_of_funcs; } +Module *erts_nif_get_module(struct erl_module_nif *nif_mod) { + return nif_mod->mod; +} + Eterm erts_nif_call_function(Process *p, Process *tracee, struct erl_module_nif* mod, ErlNifFunc *fun, int argc, Eterm *argv) diff --git a/erts/emulator/beam/erl_port.h b/erts/emulator/beam/erl_port.h index 2a98a6f00b..9b52b648e5 100644 --- a/erts/emulator/beam/erl_port.h +++ b/erts/emulator/beam/erl_port.h @@ -485,6 +485,8 @@ ERTS_GLB_INLINE Uint32 erts_portid2status(Eterm); ERTS_GLB_INLINE int erts_is_port_alive(Eterm); ERTS_GLB_INLINE int erts_is_valid_tracer_port(Eterm); ERTS_GLB_INLINE int erts_port_driver_callback_epilogue(Port *, erts_aint32_t *); +ERTS_GLB_INLINE Port *erts_get_current_port(void); +ERTS_GLB_INLINE Eterm erts_get_current_port_id(void); #define erts_drvport2port(Prt) erts_drvport2port_state((Prt), NULL) @@ -812,6 +814,20 @@ erts_port_driver_callback_epilogue(Port *prt, erts_aint32_t *statep) return reds; } +ERTS_GLB_INLINE +Port *erts_get_current_port(void) +{ + ErtsSchedulerData *esdp = erts_get_scheduler_data(); + return esdp ? esdp->current_port : NULL; +} + +ERTS_GLB_INLINE +Eterm erts_get_current_port_id(void) +{ + Port *port = erts_get_current_port(); + return port ? port->common.id : THE_NON_VALUE; +} + #endif /* #if ERTS_GLB_INLINE_INCL_FUNC_DEF */ void erts_port_resume_procs(Port *); diff --git a/erts/emulator/beam/erl_proc_sig_queue.c b/erts/emulator/beam/erl_proc_sig_queue.c index bcc4fc6d9b..e9b41ad298 100644 --- a/erts/emulator/beam/erl_proc_sig_queue.c +++ b/erts/emulator/beam/erl_proc_sig_queue.c @@ -39,6 +39,7 @@ #include "big.h" #include "erl_gc.h" #include "bif.h" +#include "erl_bif_unique.h" #include "erl_proc_sig_queue.h" #include "dtrace-wrapper.h" @@ -49,7 +50,7 @@ * Note that not all signal are handled using this functionality! */ -#define ERTS_SIG_Q_OP_MAX 11 +#define ERTS_SIG_Q_OP_MAX 13 #define ERTS_SIG_Q_OP_EXIT 0 #define ERTS_SIG_Q_OP_EXIT_LINKED 1 @@ -62,7 +63,9 @@ #define ERTS_SIG_Q_OP_TRACE_CHANGE_STATE 8 #define ERTS_SIG_Q_OP_PERSISTENT_MON_MSG 9 #define ERTS_SIG_Q_OP_IS_ALIVE 10 -#define ERTS_SIG_Q_OP_PROCESS_INFO ERTS_SIG_Q_OP_MAX +#define ERTS_SIG_Q_OP_PROCESS_INFO 11 +#define ERTS_SIG_Q_OP_SYNC_SUSPEND 12 +#define ERTS_SIG_Q_OP_RPC ERTS_SIG_Q_OP_MAX #define ERTS_SIG_Q_TYPE_MAX (ERTS_MON_LNK_TYPE_MAX + 5) @@ -154,6 +157,17 @@ typedef struct { } ErtsIsAliveRequest; typedef struct { + Eterm message; + Eterm requester; + int async; +} ErtsSyncSuspendRequest; + +typedef struct { + ErtsMonitorSuspend *mon; + ErtsMessage *sync; +} ErtsProcSigPendingSuspend; + +typedef struct { ErtsSignalCommon common; Sint refc; Sint delayed_len; @@ -176,6 +190,15 @@ typedef struct { #define ERTS_PROC_SIG_PI_MSGQ_LEN_IGNORE ((Sint) -1) #define ERTS_PROC_SIG_PI_MSGQ_LEN_SYNC ((Sint) -2) +typedef struct { + ErtsSignalCommon common; + Eterm requester; + Eterm (*func)(Process *, void *, int *, ErlHeapFragment **); + void *arg; + Eterm ref; + ErtsORefThing oref_thing; +} ErtsProcSigRPC; + static int handle_msg_tracing(Process *c_p, ErtsSigRecvTracing *tracing, ErtsMessage ***next_nm_sig); @@ -308,9 +331,8 @@ destroy_sig_group_leader(ErtsSigGroupLeader *sgl) } static ERTS_INLINE void -sig_enqueue_trace(Process *c_p, ErtsMessage *sig, int op, - Process *rp, ErtsMessage **first, - ErtsMessage **last, ErtsMessage ***last_next) +sig_enqueue_trace(Process *c_p, ErtsMessage **sigp, int op, + Process *rp, ErtsMessage ***last_next) { switch (op) { case ERTS_SIG_Q_OP_LINK: @@ -326,12 +348,11 @@ sig_enqueue_trace(Process *c_p, ErtsMessage *sig, int op, * Prepend a trace-change-state signal before the * link signal... */ - tag = ERTS_PROC_SIG_MAKE_TAG(ERTS_SIG_Q_OP_TRACE_CHANGE_STATE, ERTS_SIG_Q_TYPE_ADJUST_TRACE_INFO, 0); ti = erts_alloc(ERTS_ALC_T_SIG_DATA, sizeof(ErtsSigTraceInfo)); - ti->common.next = *last; + ti->common.next = *sigp; ti->common.specific.next = &ti->common.next; ti->common.tag = tag; ti->flags_on = ERTS_TRACE_FLAGS(c_p) & TRACEE_FLAGS; @@ -344,8 +365,9 @@ sig_enqueue_trace(Process *c_p, ErtsMessage *sig, int op, erts_proc_unlock(c_p, ERTS_PROC_LOCKS_ALL_MINOR); } erts_tracer_update(&ti->tracer, ERTS_TRACER(c_p)); - *first = (ErtsMessage *) ti; - *last_next = &ti->common.next; + *sigp = (ErtsMessage *) ti; + if (!*last_next || *last_next == sigp) + *last_next = &ti->common.next; } break; @@ -354,6 +376,7 @@ sig_enqueue_trace(Process *c_p, ErtsMessage *sig, int op, case ERTS_SIG_Q_OP_EXIT_LINKED: if (DTRACE_ENABLED(process_exit_signal)) { + ErtsMessage* sig = *sigp; Uint16 type = ERTS_PROC_SIG_TYPE(((ErtsSignal *) sig)->common.tag); Eterm reason, from; @@ -430,9 +453,24 @@ sig_enqueue_trace_cleanup(ErtsMessage *first, ErtsSignal *sig, ErtsMessage *last } } +#ifdef DEBUG +static int dbg_count_nmsigs(ErtsMessage *first) +{ + ErtsMessage *sig; + int cnt = 0; + + for (sig = first; sig; sig = sig->next) { + if (ERTS_SIG_IS_NON_MSG(sig)) + ++cnt; + } + return cnt; +} +#endif + static ERTS_INLINE erts_aint32_t enqueue_signals(Process *rp, ErtsMessage *first, - ErtsMessage *last, ErtsMessage **last_next, + ErtsMessage **last, ErtsMessage **last_next, + Uint num_msgs, erts_aint32_t in_state) { erts_aint32_t state = in_state; @@ -442,13 +480,23 @@ enqueue_signals(Process *rp, ErtsMessage *first, ASSERT(!*this); *this = first; - rp->sig_inq.last = &last->next; + rp->sig_inq.last = last; if (!rp->sig_inq.nmsigs.next) { ASSERT(!rp->sig_inq.nmsigs.last); - rp->sig_inq.nmsigs.next = this; + if (ERTS_SIG_IS_NON_MSG(first)) { + rp->sig_inq.nmsigs.next = this; + } + else if (last_next) { + ASSERT(first->next && ERTS_SIG_IS_NON_MSG(first->next)); + rp->sig_inq.nmsigs.next = &first->next; + } + else + goto no_nmsig; + state = erts_atomic32_read_bor_nob(&rp->state, ERTS_PSFLG_SIG_IN_Q); + no_nmsig: ASSERT(!(state & ERTS_PSFLG_SIG_IN_Q)); } else { @@ -459,83 +507,168 @@ enqueue_signals(Process *rp, ErtsMessage *first, ASSERT(sig && !sig->common.specific.next); ASSERT(state & ERTS_PSFLG_SIG_IN_Q); - sig->common.specific.next = this; + if (ERTS_SIG_IS_NON_MSG(first)) { + sig->common.specific.next = this; + } + else if (last_next) { + ASSERT(first->next && ERTS_SIG_IS_NON_MSG(first->next)); + sig->common.specific.next = &first->next; + } } if (last_next) { - ASSERT(first != last); + ASSERT(dbg_count_nmsigs(first) >= 2); rp->sig_inq.nmsigs.last = last_next; } - else { - ASSERT(first == last); + else if (ERTS_SIG_IS_NON_MSG(first)) { + ASSERT(dbg_count_nmsigs(first) == 1); rp->sig_inq.nmsigs.last = this; } + else + ASSERT(dbg_count_nmsigs(first) == 0); + + rp->sig_inq.len += num_msgs; ERTS_HDBG_CHECK_SIGNAL_IN_QUEUE(rp); return state; } -static ERTS_INLINE void -ensure_dirty_proc_handled(Eterm pid, - erts_aint32_t state, - erts_aint32_t prio) +erts_aint32_t erts_enqueue_signals(Process *rp, ErtsMessage *first, + ErtsMessage **last, ErtsMessage **last_next, + Uint num_msgs, + erts_aint32_t in_state) { - if (state & (ERTS_PSFLG_DIRTY_RUNNING - | ERTS_PSFLG_DIRTY_RUNNING_SYS)) { - Eterm *hp; - ErtsMessage *mp; - Process *sig_handler; + return enqueue_signals(rp, first, last, last_next, num_msgs, in_state); +} - if (prio < 0) - prio = (int) ERTS_PSFLGS_GET_USR_PRIO(state); +void +erts_make_dirty_proc_handled(Eterm pid, + erts_aint32_t state, + erts_aint32_t prio) +{ + Eterm *hp; + ErtsMessage *mp; + Process *sig_handler; - switch (prio) { - case PRIORITY_MAX: - sig_handler = erts_dirty_process_signal_handler_max; - break; - case PRIORITY_HIGH: - sig_handler = erts_dirty_process_signal_handler_high; - break; - default: - sig_handler = erts_dirty_process_signal_handler; - break; - } + ASSERT(state & (ERTS_PSFLG_DIRTY_RUNNING | + ERTS_PSFLG_DIRTY_RUNNING_SYS)); + + if (prio < 0) + prio = (int) ERTS_PSFLGS_GET_USR_PRIO(state); - /* Make sure signals are handled... */ - mp = erts_alloc_message(0, &hp); - erts_queue_message(sig_handler, 0, mp, pid, am_system); + switch (prio) { + case PRIORITY_MAX: + sig_handler = erts_dirty_process_signal_handler_max; + break; + case PRIORITY_HIGH: + sig_handler = erts_dirty_process_signal_handler_high; + break; + default: + sig_handler = erts_dirty_process_signal_handler; + break; } + + /* Make sure signals are handled... */ + mp = erts_alloc_message(0, &hp); + erts_queue_message(sig_handler, 0, mp, pid, am_system); } static void check_push_msgq_len_offs_marker(Process *rp, ErtsSignal *sig); + static int proc_queue_signal(Process *c_p, Eterm pid, ErtsSignal *sig, int op) { int res; Process *rp; - ErtsMessage *first, *last, **last_next; + ErtsMessage *first, *last, **last_next, **sigp; ErtsSchedulerData *esdp = erts_get_scheduler_data(); int is_normal_sched = !!esdp && esdp->type == ERTS_SCHED_NORMAL; erts_aint32_t state; + ErtsSignal *pend_sig; - if (is_normal_sched) - rp = erts_proc_lookup_raw(pid); - else - rp = erts_proc_lookup_raw_inc_refc(pid); + if (is_normal_sched) { + pend_sig = esdp->pending_signal.sig; + if (op == ERTS_SIG_Q_OP_MONITOR + && ((ErtsMonitor*)sig)->type == ERTS_MON_TYPE_PROC) { - if (!rp) - return 0; + if (!pend_sig) { + esdp->pending_signal.sig = sig; + esdp->pending_signal.to = pid; +#ifdef DEBUG + esdp->pending_signal.dbg_from = esdp->current_process; +#endif + return 1; + } + ASSERT(esdp->pending_signal.dbg_from == esdp->current_process); + if (pend_sig != sig) { + /* Switch them and send previously pending signal instead */ + Eterm pend_to = esdp->pending_signal.to; + esdp->pending_signal.sig = sig; + esdp->pending_signal.to = pid; + sig = pend_sig; + pid = pend_to; + } + else { + /* Caller wants to flush pending signal */ + ASSERT(pid == esdp->pending_signal.to); + esdp->pending_signal.sig = NULL; + esdp->pending_signal.to = THE_NON_VALUE; +#ifdef DEBUG + esdp->pending_signal.dbg_from = NULL; +#endif + pend_sig = NULL; + } + rp = erts_proc_lookup_raw(pid); + if (!rp) { + erts_proc_sig_send_monitor_down((ErtsMonitor*)sig, am_noproc); + return 1; + } + } + else if (pend_sig && pid == esdp->pending_signal.to) { + /* Flush pending signal to maintain signal order */ + esdp->pending_signal.sig = NULL; + esdp->pending_signal.to = THE_NON_VALUE; + + rp = erts_proc_lookup_raw(pid); + if (!rp) { + erts_proc_sig_send_monitor_down((ErtsMonitor*)pend_sig, am_noproc); + return 0; + } + + /* Prepend pending signal */ + pend_sig->common.next = (ErtsMessage*) sig; + pend_sig->common.specific.next = &pend_sig->common.next; + first = (ErtsMessage*) pend_sig; + last = (ErtsMessage*) sig; + sigp = last_next = &pend_sig->common.next; + goto first_last_done; + } + else { + pend_sig = NULL; + rp = erts_proc_lookup_raw(pid); + if (!rp) + return 0; + } + } + else { + rp = erts_proc_lookup_raw_inc_refc(pid); + if (!rp) + return 0; + pend_sig = NULL; + } - sig->common.specific.next = NULL; first = last = (ErtsMessage *) sig; last_next = NULL; + sigp = &first; + +first_last_done: + sig->common.specific.next = NULL; /* may add signals before and/or after sig */ - sig_enqueue_trace(c_p, first, op, rp, - &first, &last, &last_next); + sig_enqueue_trace(c_p, sigp, op, rp, &last_next); last->next = NULL; @@ -546,7 +679,7 @@ proc_queue_signal(Process *c_p, Eterm pid, ErtsSignal *sig, int op) if (ERTS_PSFLG_FREE & state) res = 0; else { - state = enqueue_signals(rp, first, last, last_next, state); + state = enqueue_signals(rp, first, &last->next, last_next, 0, state); if (ERTS_UNLIKELY(op == ERTS_SIG_Q_OP_PROCESS_INFO)) check_push_msgq_len_offs_marker(rp, sig); res = !0; @@ -554,17 +687,23 @@ proc_queue_signal(Process *c_p, Eterm pid, ErtsSignal *sig, int op) erts_proc_unlock(rp, ERTS_PROC_LOCK_MSGQ); - if (res == 0) + if (res == 0) { + if (pend_sig) { + if (sig == pend_sig) { + /* We did a switch, callers signal is now pending (still ok) */ + ASSERT(esdp->pending_signal.sig); + res = 1; + } + else { + ASSERT(first == (ErtsMessage*)pend_sig); + first = first->next; + } + erts_proc_sig_send_monitor_down((ErtsMonitor*)pend_sig, am_noproc); + } sig_enqueue_trace_cleanup(first, sig, last); - - if (!(state & (ERTS_PSFLG_EXITING - | ERTS_PSFLG_ACTIVE_SYS - | ERTS_PSFLG_SIG_IN_Q))) { - /* Schedule process... */ - state = erts_proc_sys_schedule(rp, state, 0); } - ensure_dirty_proc_handled(rp->common.id, state, -1); + erts_proc_notify_new_sig(rp, state, 0); if (!is_normal_sched) erts_proc_dec_refc(rp); @@ -572,6 +711,24 @@ proc_queue_signal(Process *c_p, Eterm pid, ErtsSignal *sig, int op) return res; } +void erts_proc_sig_send_pending(ErtsSchedulerData* esdp) +{ + ErtsSignal* sig = esdp->pending_signal.sig; + int op; + + ASSERT(esdp && esdp->type == ERTS_SCHED_NORMAL); + ASSERT(sig); + ASSERT(is_internal_pid(esdp->pending_signal.to)); + + op = ERTS_SIG_Q_OP_MONITOR; + ASSERT(op == ERTS_PROC_SIG_OP(sig->common.tag)); + + if (!proc_queue_signal(NULL, esdp->pending_signal.to, sig, op)) { + ErtsMonitor* mon = (ErtsMonitor*)sig; + erts_proc_sig_send_monitor_down(mon, am_noproc); + } +} + static int maybe_elevate_sig_handling_prio(Process *c_p, Eterm other) { @@ -602,7 +759,10 @@ maybe_elevate_sig_handling_prio(Process *c_p, Eterm other) if (res) { /* ensure handled if dirty executing... */ state = erts_atomic32_read_nob(&rp->state); - ensure_dirty_proc_handled(other, state, my_prio); + if (state & (ERTS_PSFLG_DIRTY_RUNNING + | ERTS_PSFLG_DIRTY_RUNNING_SYS)) { + erts_make_dirty_proc_handled(other, state, my_prio); + } } } } @@ -612,11 +772,6 @@ maybe_elevate_sig_handling_prio(Process *c_p, Eterm other) void erts_proc_sig_fetch__(Process *proc) { -#ifdef ERTS_PROC_SIG_HARD_DEBUG - ErtsSignalPrivQueues sig_qs = proc->sig_qs; - ErtsSignalInQueue sig_inq = proc->sig_inq; -#endif - ASSERT(proc->sig_inq.first); if (!proc->sig_inq.nmsigs.next) { @@ -634,9 +789,7 @@ erts_proc_sig_fetch__(Process *proc) } } else { -#ifdef DEBUG erts_aint32_t s; -#endif ASSERT(proc->sig_inq.nmsigs.last); if (!proc->sig_qs.nmsigs.last) { ASSERT(!proc->sig_qs.nmsigs.next); @@ -645,16 +798,13 @@ erts_proc_sig_fetch__(Process *proc) else proc->sig_qs.nmsigs.next = proc->sig_inq.nmsigs.next; -#ifdef DEBUG - s = -#endif - erts_atomic32_read_bset_nob(&proc->state, + s = erts_atomic32_read_bset_nob(&proc->state, (ERTS_PSFLG_SIG_Q | ERTS_PSFLG_SIG_IN_Q), ERTS_PSFLG_SIG_Q); ASSERT((s & (ERTS_PSFLG_SIG_Q|ERTS_PSFLG_SIG_IN_Q)) - == ERTS_PSFLG_SIG_IN_Q); + == ERTS_PSFLG_SIG_IN_Q); (void)s; } else { ErtsSignal *sig; @@ -667,14 +817,11 @@ erts_proc_sig_fetch__(Process *proc) else sig->common.specific.next = proc->sig_inq.nmsigs.next; -#ifdef DEBUG - s = -#endif - erts_atomic32_read_band_nob(&proc->state, + s = erts_atomic32_read_band_nob(&proc->state, ~ERTS_PSFLG_SIG_IN_Q); ASSERT((s & (ERTS_PSFLG_SIG_Q|ERTS_PSFLG_SIG_IN_Q)) - == (ERTS_PSFLG_SIG_Q|ERTS_PSFLG_SIG_IN_Q)); + == (ERTS_PSFLG_SIG_Q|ERTS_PSFLG_SIG_IN_Q)); (void)s; } if (proc->sig_inq.nmsigs.last == &proc->sig_inq.first) proc->sig_qs.nmsigs.last = proc->sig_qs.cont_last; @@ -1184,6 +1331,8 @@ erts_proc_sig_send_monitor_down(ErtsMonitor *mon, Eterm reason) /* Pass signal using old monitor structure... */ ErtsSignal *sig; + send_using_monitor_struct: + mon->other.item = reason; /* Pass immed reason via other.item... */ sig = (ErtsSignal *) mon; sig->common.tag = ERTS_PROC_SIG_MAKE_TAG(ERTS_SIG_Q_OP_MONITOR_DOWN, @@ -1195,6 +1344,18 @@ erts_proc_sig_send_monitor_down(ErtsMonitor *mon, Eterm reason) ErtsMonitorData *mdp = erts_monitor_to_data(mon); Eterm from_tag, monitored, heap[3]; + if (mon->type == ERTS_MON_TYPE_SUSPEND) { + /* + * Set reason to 'undefined', since exit + * reason is not used for suspend monitors, + * and send using monitor structure. This + * since we don't want to trigger + * unnecessary memory allocation etc... + */ + reason = am_undefined; + goto send_using_monitor_struct; + } + if (!(mon->flags & ERTS_ML_FLG_NAME)) { from_tag = monitored = mdp->origin.other.item; if (is_external_pid(from_tag)) { @@ -1417,8 +1578,7 @@ erts_proc_sig_send_is_alive_request(Process *c_p, Eterm to, Eterm ref) /* It wasn't alive; reply to ourselves... */ mp->next = NULL; mp->data.attached = ERTS_MSG_COMBINED_HFRAG; - erts_queue_message(c_p, ERTS_PROC_LOCK_MAIN, - mp, msg, am_system); + erts_queue_message(c_p, ERTS_PROC_LOCK_MAIN, mp, msg, am_system); } } @@ -1473,7 +1633,173 @@ erts_proc_sig_send_process_info_request(Process *c_p, else erts_free(ERTS_ALC_T_SIG_DATA, pis); return res; -} +} + +void +erts_proc_sig_send_sync_suspend(Process *c_p, Eterm to, Eterm tag, Eterm reply) +{ + ErlHeapFragment *hfrag; + Uint hsz, tag_sz; + Eterm *hp, *start_hp, tag_cpy, msg, default_reply; + ErlOffHeap *ohp; + ErtsMessage *mp; + ErtsSyncSuspendRequest *ssusp; + int async_suspend; + + tag_sz = size_object(tag); + + hsz = 3 + tag_sz + sizeof(ErtsSyncSuspendRequest)/sizeof(Eterm); + + mp = erts_alloc_message(hsz, &hp); + hfrag = &mp->hfrag; + mp->next = NULL; + ohp = &hfrag->off_heap; + start_hp = hp; + + tag_cpy = copy_struct(tag, tag_sz, &hp, ohp); + + async_suspend = is_non_value(reply); + default_reply = async_suspend ? am_suspended : reply; + + msg = TUPLE2(hp, tag_cpy, default_reply); + hp += 3; + + hfrag->used_size = hp - start_hp; + + ssusp = (ErtsSyncSuspendRequest *) (char *) hp; + ssusp->message = msg; + ssusp->requester = c_p->common.id; + ssusp->async = async_suspend; + + ERL_MESSAGE_TERM(mp) = ERTS_PROC_SIG_MAKE_TAG(ERTS_SIG_Q_OP_SYNC_SUSPEND, + ERTS_SIG_Q_TYPE_UNDEFINED, + 0); + ERL_MESSAGE_TOKEN(mp) = NIL; + ERL_MESSAGE_FROM(mp) = am_system; +#ifdef USE_VM_PROBES + ERL_MESSAGE_DT_UTAG(mp) = NIL; +#endif + + if (proc_queue_signal(c_p, to, (ErtsSignal *) mp, ERTS_SIG_Q_OP_SYNC_SUSPEND)) + (void) maybe_elevate_sig_handling_prio(c_p, to); + else { + Eterm *tp; + /* It wasn't alive; reply to ourselves... */ + mp->next = NULL; + mp->data.attached = ERTS_MSG_COMBINED_HFRAG; + tp = tuple_val(msg); + tp[2] = async_suspend ? am_badarg : am_exited; + erts_queue_message(c_p, ERTS_PROC_LOCK_MAIN, + mp, msg, am_system); + } +} + +Eterm +erts_proc_sig_send_rpc_request(Process *c_p, + Eterm to, + int reply, + Eterm (*func)(Process *, void *, int *, ErlHeapFragment **), + void *arg) +{ + Eterm res; + ErtsProcSigRPC *sig = erts_alloc(ERTS_ALC_T_SIG_DATA, + sizeof(ErtsProcSigRPC)); + sig->common.tag = ERTS_PROC_SIG_MAKE_TAG(ERTS_SIG_Q_OP_RPC, + ERTS_SIG_Q_TYPE_UNDEFINED, + 0); + sig->requester = reply ? c_p->common.id : NIL; + sig->func = func; + sig->arg = arg; + + if (!reply) { + res = am_ok; + sig->ref = am_ok; + } + else { + res = erts_make_ref(c_p); + + sys_memcpy((void *) &sig->oref_thing, + (void *) internal_ref_val(res), + sizeof(ErtsORefThing)); + + sig->ref = make_internal_ref(&sig->oref_thing); + + ERTS_RECV_MARK_SAVE(c_p); + ERTS_RECV_MARK_SET(c_p); + } + + if (proc_queue_signal(c_p, to, (ErtsSignal *) sig, ERTS_SIG_Q_OP_RPC)) + (void) maybe_elevate_sig_handling_prio(c_p, to); + else { + erts_free(ERTS_ALC_T_SIG_DATA, sig); + res = THE_NON_VALUE; + if (reply) + JOIN_MESSAGE(c_p); + } + + return res; +} + +static int +handle_rpc(Process *c_p, ErtsProcSigRPC *rpc, int cnt, int limit, int *yieldp) +{ + Process *rp; + ErlHeapFragment *bp = NULL; + Eterm res; + Uint hsz; + int reds, out_cnt; + + /* + * reds in: + * Reductions left. + * + * reds out: + * Absolute value of reds out equals consumed + * amount of reds. If a negative value, force + * a yield. + */ + + reds = (limit - cnt) / ERTS_SIG_REDS_CNT_FACTOR; + if (reds <= 0) + reds = 1; + + res = (*rpc->func)(c_p, rpc->arg, &reds, &bp); + + if (reds < 0) { + /* Force yield... */ + *yieldp = !0; + reds *= -1; + } + + out_cnt = reds*ERTS_SIG_REDS_CNT_FACTOR; + + hsz = 3 + sizeof(ErtsORefThing)/sizeof(Eterm); + + rp = erts_proc_lookup(rpc->requester); + if (!rp) { + if (bp) + free_message_buffer(bp); + } + else { + Eterm *hp, msg, ref; + ErtsMessage *mp = erts_alloc_message(hsz, &hp); + + sys_memcpy((void *) hp, (void *) &rpc->oref_thing, + sizeof(rpc->oref_thing)); + + ref = make_internal_ref(hp); + hp += sizeof(rpc->oref_thing)/sizeof(Eterm); + msg = TUPLE2(hp, ref, res); + + mp->hfrag.next = bp; + + erts_queue_proc_message(c_p, rp, 0, mp, msg); + } + + erts_free(ERTS_ALC_T_SIG_DATA, rpc); + + return out_cnt; +} static void is_alive_response(Process *c_p, ErtsMessage *mp, int is_alive) @@ -2391,8 +2717,9 @@ destroy_process_info_request(Process *c_p, ErtsProcessInfoSig *pisig) } static int -handle_process_info(Process *c_p, ErtsMessage *sig, - ErtsMessage ***next_nm_sig, int is_alive) +handle_process_info(Process *c_p, ErtsSigRecvTracing *tracing, + ErtsMessage *sig, ErtsMessage ***next_nm_sig, + int is_alive) { ErtsProcessInfoSig *pisig = (ErtsProcessInfoSig *) sig; Uint reds = 0; @@ -2416,7 +2743,11 @@ handle_process_info(Process *c_p, ErtsMessage *sig, * Move messages part of message queue into inner * signal queue... */ + ASSERT(tracing); + if (*next_nm_sig != &c_p->sig_qs.cont) { + if (*next_nm_sig == tracing->messages.next) + tracing->messages.next = &c_p->sig_qs.cont; *c_p->sig_qs.last = c_p->sig_qs.cont; c_p->sig_qs.last = *next_nm_sig; @@ -2427,18 +2758,6 @@ handle_process_info(Process *c_p, ErtsMessage *sig, *c_p->sig_qs.last = NULL; } - if (!pisig->common.specific.next) { - /* - * No more signals in middle queue... - * - * Process-info 'status' needs sig-q - * process flag to be updated in order - * to show accurate result... - */ - erts_atomic32_read_band_nob(&c_p->state, - ~ERTS_PSFLG_SIG_Q); - } - #ifdef ERTS_PROC_SIG_HARD_DEBUG_SIGQ_MSG_LEN { Sint len; @@ -2452,8 +2771,20 @@ handle_process_info(Process *c_p, ErtsMessage *sig, #endif } } - if (is_alive) + if (is_alive) { + if (!pisig->common.specific.next) { + /* + * No more signals in middle queue... + * + * Process-info 'status' needs sig-q + * process flag to be updated in order + * to show accurate result... + */ + erts_atomic32_read_band_nob(&c_p->state, + ~ERTS_PSFLG_SIG_Q); + } remove_nm_sig(c_p, sig, next_nm_sig); + } rp = erts_proc_lookup(pisig->requester); ASSERT(c_p != rp); @@ -2498,7 +2829,7 @@ handle_process_info(Process *c_p, ErtsMessage *sig, if (is_alive) erts_factory_trim_and_close(&hfact, &msg, 1); - erts_queue_message(rp, locks, mp, msg, c_p->common.id); + erts_queue_proc_message(c_p, rp, locks, mp, msg); if (!is_alive && locks) erts_proc_unlock(rp, locks); @@ -2512,6 +2843,155 @@ handle_process_info(Process *c_p, ErtsMessage *sig, return ((int) reds)*4 + 8; } +static void +handle_suspend(Process *c_p, ErtsMonitor *mon, int *yieldp) +{ + erts_aint32_t state = erts_atomic32_read_nob(&c_p->state); + + ASSERT(mon->type == ERTS_MON_TYPE_SUSPEND); + + if (!(state & ERTS_PSFLG_DIRTY_RUNNING)) { + ErtsMonitorSuspend *msp; + erts_aint_t mstate; + + msp = (ErtsMonitorSuspend *) erts_monitor_to_data(mon); + mstate = erts_atomic_read_bor_acqb(&msp->state, + ERTS_MSUSPEND_STATE_FLG_ACTIVE); + ASSERT(!(mstate & ERTS_MSUSPEND_STATE_FLG_ACTIVE)); (void) mstate; + erts_suspend(c_p, ERTS_PROC_LOCK_MAIN, NULL); + *yieldp = !0; + } + else { + /* Executing dirty; delay suspend... */ + ErtsProcSigPendingSuspend *psusp; + ErtsMonitorSuspend *msp; + + psusp = ERTS_PROC_GET_PENDING_SUSPEND(c_p); + if (!psusp) { + psusp = erts_alloc(ERTS_ALC_T_SIG_DATA, + sizeof(ErtsProcSigPendingSuspend)); + psusp->mon = NULL; + psusp->sync = NULL; + ERTS_PROC_SET_PENDING_SUSPEND(c_p, (void *) psusp); + } + + msp = (ErtsMonitorSuspend *) erts_monitor_to_data(mon); + + msp->next = psusp->mon; + psusp->mon = msp; + + erts_atomic32_inc_nob(&msp->md.refc); + } +} + +static void +sync_suspend_reply(Process *c_p, ErtsMessage *mp, erts_aint32_t state) +{ + /* + * Sender prepared the message for us. Just patch + * the result if necessary. The default prepared + * result is 'false'. + */ + Process *rp; + ErtsSyncSuspendRequest *ssusp; + + ssusp = (ErtsSyncSuspendRequest *) (char *) (&mp->hfrag.mem[0] + + mp->hfrag.used_size); + + ASSERT(ERTS_SIG_IS_NON_MSG(mp)); + ASSERT(ERTS_PROC_SIG_OP(((ErtsSignal *) mp)->common.tag) + == ERTS_SIG_Q_OP_SYNC_SUSPEND); + ASSERT(mp->hfrag.alloc_size > mp->hfrag.used_size); + ASSERT((mp->hfrag.alloc_size - mp->hfrag.used_size)*sizeof(UWord) + >= sizeof(ErtsSyncSuspendRequest)); + ASSERT(is_internal_pid(ssusp->requester)); + ASSERT(ssusp->requester != c_p->common.id); + ASSERT(is_tuple_arity(ssusp->message, 2)); + ASSERT(is_immed(tuple_val(ssusp->message)[2])); + + ERL_MESSAGE_TERM(mp) = ssusp->message; + mp->data.attached = ERTS_MSG_COMBINED_HFRAG; + mp->next = NULL; + + rp = erts_proc_lookup(ssusp->requester); + if (!rp) + erts_cleanup_messages(mp); + else { + if ((state & (ERTS_PSFLG_EXITING + | ERTS_PSFLG_SUSPENDED)) != ERTS_PSFLG_SUSPENDED) { + /* Not suspended -> patch result... */ + if (state & ERTS_PSFLG_EXITING) { + Eterm *tp = tuple_val(ssusp->message); + tp[2] = ssusp->async ? am_exited : am_badarg; + } + else { + Eterm *tp = tuple_val(ssusp->message); + ASSERT(!(state & ERTS_PSFLG_SUSPENDED)); + tp[2] = ssusp->async ? am_not_suspended : am_internal_error; + } + } + erts_queue_proc_message(c_p, rp, 0, mp, ssusp->message); + } +} + +static void +handle_sync_suspend(Process *c_p, ErtsMessage *mp) +{ + ErtsProcSigPendingSuspend *psusp; + + psusp = (ErtsProcSigPendingSuspend *) ERTS_PROC_GET_PENDING_SUSPEND(c_p); + if (!psusp) + sync_suspend_reply(c_p, mp, erts_atomic32_read_nob(&c_p->state)); + else { + mp->next = psusp->sync; + psusp->sync = mp; + } +} + +void +erts_proc_sig_handle_pending_suspend(Process *c_p) +{ + ErtsMonitorSuspend *msp; + ErtsMessage *sync; + ErtsProcSigPendingSuspend *psusp; + erts_aint32_t state = erts_atomic32_read_nob(&c_p->state); + + psusp = (ErtsProcSigPendingSuspend *) ERTS_PROC_GET_PENDING_SUSPEND(c_p); + + msp = psusp->mon; + + while (msp) { + ErtsMonitorSuspend *next_msp = msp->next; + msp->next = NULL; + if (!(state & ERTS_PSFLG_EXITING) + && erts_monitor_is_in_table(&msp->md.target)) { + erts_aint_t mstate; + + mstate = erts_atomic_read_bor_acqb(&msp->state, + ERTS_MSUSPEND_STATE_FLG_ACTIVE); + ASSERT(!(mstate & ERTS_MSUSPEND_STATE_FLG_ACTIVE)); (void) mstate; + erts_suspend(c_p, ERTS_PROC_LOCK_MAIN, NULL); + } + + erts_monitor_release(&msp->md.target); + + msp = next_msp; + } + + sync = psusp->sync; + + while (sync) { + ErtsMessage *next_sync = sync->next; + sync->next = NULL; + sync_suspend_reply(c_p, sync, state); + sync = next_sync; + } + + erts_free(ERTS_ALC_T_SIG_DATA, psusp); + + ERTS_PROC_SET_PENDING_SUSPEND(c_p, NULL); +} + /* * Called in order to handle incoming signals. */ @@ -2522,7 +3002,7 @@ erts_proc_sig_handle_incoming(Process *c_p, erts_aint32_t *statep, { Eterm tag; erts_aint32_t state; - int cnt, limit, abs_lim, msg_tracing; + int yield, cnt, limit, abs_lim, msg_tracing; ErtsMessage *sig, ***next_nm_sig; ErtsSigRecvTracing tracing; @@ -2542,6 +3022,7 @@ erts_proc_sig_handle_incoming(Process *c_p, erts_aint32_t *statep, limit = *redsp; *redsp = 0; + yield = 0; if (!c_p->sig_qs.cont) { if (state == -1) @@ -2655,6 +3136,18 @@ erts_proc_sig_handle_incoming(Process *c_p, erts_aint32_t *statep, cnt += handle_nodedown(c_p, sig, mdp, next_nm_sig); } break; + case ERTS_MON_TYPE_SUSPEND: + tmon = (ErtsMonitor *) sig; + ASSERT(erts_monitor_is_target(tmon)); + ASSERT(!erts_monitor_is_in_table(tmon)); + mdp = erts_monitor_to_data(tmon); + if (erts_monitor_is_in_table(&mdp->origin)) { + erts_monitor_tree_delete(&ERTS_P_MONITORS(c_p), + &mdp->origin); + omon = &mdp->origin; + } + remove_nm_sig(c_p, sig, next_nm_sig); + break; default: ERTS_INTERNAL_ERROR("invalid monitor type"); break; @@ -2718,9 +3211,13 @@ erts_proc_sig_handle_incoming(Process *c_p, erts_aint32_t *statep, if (mon->type == ERTS_MON_TYPE_DIST_PROC) erts_monitor_tree_insert(&ERTS_P_MONITORS(c_p), mon); - else + else { erts_monitor_list_insert(&ERTS_P_LT_MONITORS(c_p), mon); + if (mon->type == ERTS_MON_TYPE_SUSPEND) + handle_suspend(c_p, mon, &yield); + } ERTS_PROC_SIG_HDBG_PRIV_CHKQ(c_p, &tracing, next_nm_sig); + cnt += 2; break; } @@ -2764,9 +3261,16 @@ erts_proc_sig_handle_incoming(Process *c_p, erts_aint32_t *statep, erts_monitor_tree_delete(&ERTS_P_MONITORS(c_p), tmon); else { erts_monitor_list_delete(&ERTS_P_LT_MONITORS(c_p), tmon); - if (type == ERTS_MON_TYPE_RESOURCE) { + switch (type) { + case ERTS_MON_TYPE_RESOURCE: erts_nif_demonitored((ErtsResource *) tmon->other.ptr); cnt++; + break; + case ERTS_MON_TYPE_SUSPEND: + erts_resume(c_p, ERTS_PROC_LOCK_MAIN); + break; + default: + break; } } erts_monitor_release_both(mdp); @@ -2877,7 +3381,22 @@ erts_proc_sig_handle_incoming(Process *c_p, erts_aint32_t *statep, case ERTS_SIG_Q_OP_PROCESS_INFO: ERTS_PROC_SIG_HDBG_PRIV_CHKQ(c_p, &tracing, next_nm_sig); - handle_process_info(c_p, sig, next_nm_sig, !0); + handle_process_info(c_p, &tracing, sig, next_nm_sig, !0); + ERTS_PROC_SIG_HDBG_PRIV_CHKQ(c_p, &tracing, next_nm_sig); + break; + + case ERTS_SIG_Q_OP_SYNC_SUSPEND: + ERTS_PROC_SIG_HDBG_PRIV_CHKQ(c_p, &tracing, next_nm_sig); + remove_nm_sig(c_p, sig, next_nm_sig); + handle_sync_suspend(c_p, sig); + ERTS_PROC_SIG_HDBG_PRIV_CHKQ(c_p, &tracing, next_nm_sig); + break; + + case ERTS_SIG_Q_OP_RPC: + ERTS_PROC_SIG_HDBG_PRIV_CHKQ(c_p, &tracing, next_nm_sig); + remove_nm_sig(c_p, sig, next_nm_sig); + cnt += handle_rpc(c_p, (ErtsProcSigRPC *) sig, cnt, + limit, &yield); ERTS_PROC_SIG_HDBG_PRIV_CHKQ(c_p, &tracing, next_nm_sig); break; @@ -3038,6 +3557,15 @@ stop: { *redsp = cnt/4 + 1; + if (yield) { + int vreds = max_reds - *redsp; + if (vreds > 0) { + ErtsSchedulerData *esdp = erts_get_scheduler_data(); + esdp->virtual_reds += vreds; + } + *redsp = max_reds; + } + return res; } } @@ -3146,6 +3674,8 @@ erts_proc_sig_handle_exit(Process *c_p, int *redsp) case ERTS_MON_TYPE_PROC: case ERTS_MON_TYPE_DIST_PROC: case ERTS_MON_TYPE_NODE: + case ERTS_MON_TYPE_NODES: + case ERTS_MON_TYPE_SUSPEND: erts_monitor_release((ErtsMonitor *) sig); break; default: @@ -3198,9 +3728,20 @@ erts_proc_sig_handle_exit(Process *c_p, int *redsp) break; case ERTS_SIG_Q_OP_PROCESS_INFO: - handle_process_info(c_p, sig, next_nm_sig, 0); + handle_process_info(c_p, NULL, sig, next_nm_sig, 0); break; + case ERTS_SIG_Q_OP_SYNC_SUSPEND: + handle_sync_suspend(c_p, sig); + break; + + case ERTS_SIG_Q_OP_RPC: { + int yield = 0; + handle_rpc(c_p, (ErtsProcSigRPC *) sig, + cnt, limit, &yield); + break; + } + case ERTS_SIG_Q_OP_TRACE_CHANGE_STATE: destroy_trace_info((ErtsSigTraceInfo *) sig); break; @@ -3336,6 +3877,7 @@ erts_proc_sig_signal_size(ErtsSignal *sig) } break; + case ERTS_SIG_Q_OP_SYNC_SUSPEND: case ERTS_SIG_Q_OP_PERSISTENT_MON_MSG: case ERTS_SIG_Q_OP_IS_ALIVE: size = ((ErtsMessage *) sig)->hfrag.alloc_size; @@ -3391,6 +3933,10 @@ erts_proc_sig_signal_size(ErtsSignal *sig) break; } + case ERTS_SIG_Q_OP_RPC: + size = sizeof(ErtsProcSigRPC); + break; + default: ERTS_INTERNAL_ERROR("Unknown signal"); break; @@ -3467,17 +4013,13 @@ erts_proc_sig_receive_helper(Process *c_p, */ *get_outp = 0; *msgpp = NULL; + return consumed_reds; } erts_proc_unlock(c_p, ERTS_PROC_LOCK_MSGQ); - if (left_reds <= 0) { - *get_outp = -1; /* yield */ - *msgpp = NULL; - - ASSERT(consumed_reds >= (fcalls - neg_o_reds)); - return consumed_reds; - } + if (left_reds <= 0) + break; /* Yield */ /* handle newly arrived signals... */ } @@ -3498,19 +4040,27 @@ erts_proc_sig_receive_helper(Process *c_p, max_reds, !0); consumed_reds += reds; left_reds -= reds; - /* we may have exited by an incoming signal... */ - if (state & ERTS_PSFLG_EXITING) { + + /* we may have exited or suspended by an incoming signal... */ + + if (state & (ERTS_PSFLG_EXITING|ERTS_PSFLG_SUSPENDED)) { + if (state & ERTS_PSFLG_SUSPENDED) + break; /* Yield */ + /* * Process need to schedule out in order * to terminate. Prepare this a bit... */ + ASSERT(state & ERTS_PSFLG_EXITING); ASSERT(c_p->flags & F_DELAY_GC); c_p->flags &= ~F_DELAY_GC; c_p->arity = 0; c_p->current = NULL; + *get_outp = 1; *msgpp = NULL; + return consumed_reds; } @@ -3521,17 +4071,20 @@ erts_proc_sig_receive_helper(Process *c_p, return consumed_reds; } - if (left_reds <= 0) { - *get_outp = -1; /* yield */ - *msgpp = NULL; - - ASSERT(consumed_reds >= (fcalls - neg_o_reds)); - return consumed_reds; - } + if (left_reds <= 0) + break; /* yield */ ASSERT(!c_p->sig_qs.cont); /* Go fetch again... */ } + + /* Yield... */ + + *get_outp = -1; + *msgpp = NULL; + + ASSERT(consumed_reds >= (fcalls - neg_o_reds)); + return consumed_reds; } static int @@ -4268,7 +4821,7 @@ erts_proc_sig_hdbg_check_in_queue(Process *p, char *what, char *file, int line) NULL, NULL, ERTS_PSFLG_SIG_IN_Q); - ASSERT(p->sig_inq.len == len); + ASSERT(p->sig_inq.len == len); (void)len; } -#endif +#endif /* ERTS_PROC_SIG_HARD_DEBUG */ diff --git a/erts/emulator/beam/erl_proc_sig_queue.h b/erts/emulator/beam/erl_proc_sig_queue.h index d250ad820f..efa7c08664 100644 --- a/erts/emulator/beam/erl_proc_sig_queue.h +++ b/erts/emulator/beam/erl_proc_sig_queue.h @@ -33,6 +33,11 @@ * - Group leader * - Is process alive * - Process info request + * - Suspend request (monitor of suspend type) + * - Resume request (demonitor of suspend type) + * - Suspend cleanup (monitor down of suspend type) + * - Sync suspend + * - RPC request * - Trace change * * The signal queue consists of three parts: @@ -557,6 +562,102 @@ erts_proc_sig_send_process_info_request(Process *c_p, Uint reserve_size, Eterm ref); +/** + * + * @brief Send a 'sync suspend' signal to a process. + * + * A response message '{Tag, Reply}' is sent to the + * sender when performed where Tag is the term passed + * as 'tag' argument. Reply is either 'suspended', + * 'not_suspended', 'exited' if the operation is + * asynchronous; otherwise, the 'reply' argument or + * 'badarg' if process terminated. + * + * This signal does *not* change the suspend state, only + * reads and reply the state. This signal is typically + * sent after a suspend request (monitor of suspend type) + * signal has been sent to the process in order to get a + * response when the suspend monitor has been processed. + * + * @param[in] c_p Pointer to process struct of + * currently executing process. + * + * @param[in] to Identifier of receiver. + * + * @param[in] tag Tag to use in response + * message to the sending + * process (i.e., c_p). + * + * @param[in] reply Reply to send if this + * is a synchronous operation; + * otherwise, THE_NON_VALUE. + */ +void +erts_proc_sig_send_sync_suspend(Process *c_p, Eterm to, + Eterm tag, Eterm reply); + +/** + * + * @brief Send an 'rpc' signal to a process. + * + * The function 'func' will be executed in the + * context of the receiving process. A response + * message '{Ref, Result}' is sent to the sender + * when 'func' has been called. 'Ref' is the reference + * returned by this function and 'Result' is the + * term returned by 'func'. If the return value of + * 'func' is not an immediate term, 'func' has to + * allocate a heap fragment where the result is stored + * and update the the heap fragment pointer pointer + * passed as third argument to point to it. + * + * If this function returns a reference, 'func' will + * be called in the context of the receiver. However, + * note that this might happen when the receiver is in + * an exiting state. The caller of this function + * *unconditionally* has to enter a receive that match + * on the returned reference in all clauses as next + * receive; otherwise, bad things will happen! + * + * If THE_NON_VALUE is returned, the receiver did not + * exist. The signal was not sent, and no specific + * receive has to be entered by the caller. + * + * @param[in] c_p Pointer to process struct of + * currently executing process. + * + * @param[in] to Identifier of receiver process. + * + * @param[in] reply Non-zero if a reply is wanted. + * + * @param[in] func Function to execute in the + * context of the receiver. + * First argument will be a + * pointer to the process struct + * of the receiver process. + * Second argument will be 'arg' + * (see below). Third argument + * will be a pointer to a pointer + * to a heap fragment for storage + * of result returned from 'func' + * (i.e. an 'out' parameter). + * + * @param[in] arg Void pointer to argument + * to pass as second argument + * in call of 'func'. + * + * @returns If the request was sent, + * an internal ordinary + * reference; otherwise, + * THE_NON_VALUE (non-existing + * receiver). + */ +Eterm +erts_proc_sig_send_rpc_request(Process *c_p, + Eterm to, + int reply, + Eterm (*func)(Process *, void *, int *, ErlHeapFragment **), + void *arg); /* * End of send operations of currently supported process signals. @@ -733,6 +834,25 @@ Sint erts_proc_sig_privqs_len(Process *c_p); +/* SVERK: Doc me up! */ +erts_aint32_t +erts_enqueue_signals(Process *rp, ErtsMessage *first, + ErtsMessage **last, ErtsMessage **last_next, + Uint msg_cnt, + erts_aint32_t in_state); + +/* SVERK: Doc me up! */ +void +erts_proc_sig_send_pending(ErtsSchedulerData* esdp); + +/* SVERK Doc me up! */ +ERTS_GLB_INLINE void erts_proc_notify_new_sig(Process* rp, erts_aint32_t state, + erts_aint32_t enable_flag); + +void erts_make_dirty_proc_handled(Eterm pid, erts_aint32_t state, + erts_aint32_t prio); + + typedef struct { Uint size; ErtsMessage *msgp; @@ -801,6 +921,21 @@ void erts_proc_sig_clear_seq_trace_tokens(Process *c_p); /** + * + * @brief Handle pending suspend requests + * + * Should be called by processes when they stop + * execution on a dirty scheduler if they have + * pending suspend requests (i.e. when + * ERTS_PROC_GET_PENDING_SUSPEND(c_p) != NULL). + * + * @param[in] c_p Pointer to executing + * process + */ +void +erts_proc_sig_handle_pending_suspend(Process *c_p); + +/** * @brief Initialize this functionality */ void erts_proc_sig_queue_init(void); @@ -867,6 +1002,24 @@ erts_proc_sig_fetch(Process *proc) return res; } +ERTS_GLB_INLINE void +erts_proc_notify_new_sig(Process* rp, erts_aint32_t state, + erts_aint32_t enable_flag) +{ + if (~(state & (ERTS_PSFLG_EXITING + | ERTS_PSFLG_ACTIVE_SYS + | ERTS_PSFLG_SIG_IN_Q)) + | (~state & enable_flag)) { + /* Schedule process... */ + state = erts_proc_sys_schedule(rp, state, enable_flag); + } + + if (state & (ERTS_PSFLG_DIRTY_RUNNING + | ERTS_PSFLG_DIRTY_RUNNING_SYS)) { + erts_make_dirty_proc_handled(rp->common.id, state, -1); + } +} + #endif /* ERTS_GLB_INLINE_INCL_FUNC_DEF */ #endif /* ERTS_PROC_SIG_QUEUE_H__ */ diff --git a/erts/emulator/beam/erl_process.c b/erts/emulator/beam/erl_process.c index e8f58a196a..1478b71195 100644 --- a/erts/emulator/beam/erl_process.c +++ b/erts/emulator/beam/erl_process.c @@ -36,7 +36,6 @@ #include "erl_db.h" #include "dist.h" #include "beam_catches.h" -#include "erl_instrument.h" #include "erl_threads.h" #include "erl_binary.h" #include "beam_bp.h" @@ -186,8 +185,6 @@ sched_get_busy_wait_params(ErtsSchedulerData *esdp) return &sched_busy_wait_params[esdp->type]; } -int erts_disable_proc_not_running_opt; - static ErtsAuxWorkData *aux_thread_aux_work_data; static ErtsAuxWorkData *poll_thread_aux_work_data; @@ -731,6 +728,11 @@ erts_pre_init_process(void) = ERTS_PSD_DIST_ENTRY_GET_LOCKS; erts_psd_required_locks[ERTS_PSD_DIST_ENTRY].set_locks = ERTS_PSD_DIST_ENTRY_SET_LOCKS; + + erts_psd_required_locks[ERTS_PSD_PENDING_SUSPEND].get_locks + = ERTS_PSD_PENDING_SUSPEND_GET_LOCKS; + erts_psd_required_locks[ERTS_PSD_PENDING_SUSPEND].set_locks + = ERTS_PSD_PENDING_SUSPEND_SET_LOCKS; #endif } @@ -745,7 +747,6 @@ void erts_init_process(int ncpu, int proc_tab_size, int legacy_proc_tab) { - erts_disable_proc_not_running_opt = 0; erts_init_proc_lock(ncpu); init_proclist_alloc(); @@ -2508,6 +2509,8 @@ handle_yield(ErtsAuxWorkData *awdp, erts_aint32_t aux_work, int waiting) yield |= erts_handle_yielded_ets_all_request(awdp->esdp, &awdp->yield.ets_all); + yield |= erts_handle_yielded_alcu_blockscan(awdp->esdp, + &awdp->yield.alcu_blockscan); /* * Other yielding operations... @@ -5698,6 +5701,7 @@ init_scheduler_data(ErtsSchedulerData* esdp, int num, esdp->ssi = ssi; esdp->current_process = NULL; esdp->current_port = NULL; + esdp->current_nif = NULL; esdp->virtual_reds = 0; esdp->cpu_id = -1; @@ -5713,6 +5717,12 @@ init_scheduler_data(ErtsSchedulerData* esdp, int num, esdp->io.out = (Uint64) 0; esdp->io.in = (Uint64) 0; + esdp->pending_signal.sig = NULL; + esdp->pending_signal.to = THE_NON_VALUE; +#ifdef DEBUG + esdp->pending_signal.dbg_from = NULL; +#endif + if (daww_ptr) { init_aux_work_data(&esdp->aux_work_data, esdp, *daww_ptr); *daww_ptr += daww_sz; @@ -6604,13 +6614,13 @@ change_proc_schedule_state(Process *p, if (((n & (ERTS_PSFLG_SUSPENDED | ERTS_PSFLG_ACTIVE)) == ERTS_PSFLG_ACTIVE) - && (!(a & (ERTS_PSFLG_ACTIVE_SYS - | ERTS_PSFLG_RUNNING - | ERTS_PSFLG_RUNNING_SYS - | ERTS_PSFLG_DIRTY_RUNNING - | ERTS_PSFLG_DIRTY_RUNNING_SYS) - && (!(a & ERTS_PSFLG_ACTIVE) - || (a & ERTS_PSFLG_SUSPENDED))))) { + & ((a & (ERTS_PSFLG_SUSPENDED + | ERTS_PSFLG_ACTIVE)) != ERTS_PSFLG_ACTIVE) + & !(a & (ERTS_PSFLG_ACTIVE_SYS + | ERTS_PSFLG_RUNNING + | ERTS_PSFLG_RUNNING_SYS + | ERTS_PSFLG_DIRTY_RUNNING + | ERTS_PSFLG_DIRTY_RUNNING_SYS))) { /* We activated a prevously inactive process */ profile_runnable_proc(p, am_active); } @@ -8333,6 +8343,7 @@ sched_thread_func(void *vesdp) ERTS_VERIFY_UNUSED_TEMP_ALLOC(NULL); #endif + erts_alcu_sched_spec_data_init(esdp); erts_ets_sched_spec_data_init(esdp); process_main(esdp->x_reg_array, esdp->f_reg_array); @@ -8544,427 +8555,22 @@ erts_start_schedulers(void) } } - - -static void -add_pend_suspend(Process *suspendee, - Eterm originator_pid, - void (*handle_func)(Process *, - ErtsProcLocks, - int, - Eterm)) -{ - ErtsPendingSuspend *psp = erts_alloc(ERTS_ALC_T_PEND_SUSPEND, - sizeof(ErtsPendingSuspend)); - psp->next = NULL; -#ifdef DEBUG -#if defined(ARCH_64) - psp->end = (ErtsPendingSuspend *) 0xdeaddeaddeaddead; -#else - psp->end = (ErtsPendingSuspend *) 0xdeaddead; -#endif -#endif - psp->pid = originator_pid; - psp->handle_func = handle_func; - - if (suspendee->pending_suspenders) - suspendee->pending_suspenders->end->next = psp; - else - suspendee->pending_suspenders = psp; - suspendee->pending_suspenders->end = psp; -} - -static void -handle_pending_suspend(Process *p, ErtsProcLocks p_locks) -{ - ErtsPendingSuspend *psp; - int is_alive = !ERTS_PROC_IS_EXITING(p); - - ERTS_LC_ASSERT(p_locks & ERTS_PROC_LOCK_STATUS); - - /* - * New pending suspenders might appear while we are processing - * (since we may release the status lock on p while processing). - */ - while (p->pending_suspenders) { - psp = p->pending_suspenders; - p->pending_suspenders = NULL; - while (psp) { - ErtsPendingSuspend *free_psp; - (*psp->handle_func)(p, p_locks, is_alive, psp->pid); - free_psp = psp; - psp = psp->next; - erts_free(ERTS_ALC_T_PEND_SUSPEND, (void *) free_psp); - } - } - -} - -static ERTS_INLINE void -cancel_suspend_of_suspendee(Process *p, ErtsProcLocks p_locks) -{ - if (is_not_nil(p->suspendee)) { - ErtsMonitor *mon; - Eterm suspendee = p->suspendee; - Process *rp; - if (!(p_locks & ERTS_PROC_LOCK_STATUS)) - erts_proc_lock(p, ERTS_PROC_LOCK_STATUS); - rp = erts_pid2proc(p, p_locks|ERTS_PROC_LOCK_STATUS, - suspendee, ERTS_PROC_LOCK_STATUS); - if (rp) { - erts_resume(rp, ERTS_PROC_LOCK_STATUS); - erts_proc_unlock(rp, ERTS_PROC_LOCK_STATUS); - } - if (!(p_locks & ERTS_PROC_LOCK_STATUS)) - erts_proc_unlock(p, ERTS_PROC_LOCK_STATUS); - p->suspendee = NIL; - - mon = erts_monitor_tree_lookup(p->suspend_monitors, - suspendee); - if (mon) { - erts_monitor_tree_delete(&p->suspend_monitors, - mon); - erts_monitor_suspend_destroy(erts_monitor_suspend(mon)); - } - } -} - -static void -handle_pend_sync_suspend(Process *suspendee, - ErtsProcLocks suspendee_locks, - int suspendee_alive, - Eterm suspender_pid) -{ - Process *suspender; - - ERTS_LC_ASSERT(suspendee_locks & ERTS_PROC_LOCK_STATUS); - - suspender = erts_pid2proc(suspendee, - suspendee_locks, - suspender_pid, - ERTS_PROC_LOCK_STATUS); - if (suspender) { - ASSERT(is_nil(suspender->suspendee)); - if (suspendee_alive) { - erts_suspend(suspendee, suspendee_locks, NULL); - suspender->suspendee = suspendee->common.id; - } - /* suspender is suspended waiting for suspendee to suspend; - resume suspender */ - ASSERT(suspendee != suspender); - resume_process(suspender, ERTS_PROC_LOCK_STATUS); - erts_proc_unlock(suspender, ERTS_PROC_LOCK_STATUS); - } -} - -static Process * -pid2proc_not_running(Process *c_p, ErtsProcLocks c_p_locks, - Eterm pid, ErtsProcLocks pid_locks, int suspend) -{ - Process *rp; - int unlock_c_p_status; - - ERTS_LC_ASSERT(c_p_locks == erts_proc_lc_my_proc_locks(c_p)); - - ERTS_LC_ASSERT(c_p_locks & ERTS_PROC_LOCK_MAIN); - ERTS_LC_ASSERT(pid_locks & (ERTS_PROC_LOCK_MAIN|ERTS_PROC_LOCK_STATUS)); - - if (c_p->common.id == pid) - return erts_pid2proc(c_p, c_p_locks, pid, pid_locks); - - if (c_p_locks & ERTS_PROC_LOCK_STATUS) - unlock_c_p_status = 0; - else { - unlock_c_p_status = 1; - erts_proc_lock(c_p, ERTS_PROC_LOCK_STATUS); - } - - if (c_p->suspendee == pid) { - /* Process previously suspended by c_p (below)... */ - ErtsProcLocks rp_locks = pid_locks|ERTS_PROC_LOCK_STATUS; - rp = erts_pid2proc(c_p, c_p_locks|ERTS_PROC_LOCK_STATUS, pid, rp_locks); - c_p->suspendee = NIL; - ASSERT(c_p->flags & F_P2PNR_RESCHED); - c_p->flags &= ~F_P2PNR_RESCHED; - if (!suspend && rp) - resume_process(rp, rp_locks); - } - else { - rp = erts_pid2proc(c_p, c_p_locks|ERTS_PROC_LOCK_STATUS, - pid, ERTS_PROC_LOCK_STATUS); - - if (!rp) { - c_p->flags &= ~F_P2PNR_RESCHED; - goto done; - } - - ASSERT(!(c_p->flags & F_P2PNR_RESCHED)); - - /* - * Suspend the other process in order to prevent - * it from being selected for normal execution. - * This will however not prevent it from being - * selected for execution of a system task. If - * it is selected for execution of a system task - * we might be blocked for quite a while if the - * try-lock below fails. That is, there is room - * for improvement here... - */ - - if (!suspend_process(c_p, rp)) { - /* Other process running */ - - ASSERT((ERTS_PSFLG_RUNNING | ERTS_PSFLG_DIRTY_RUNNING) - & erts_atomic32_read_nob(&rp->state)); - - if (!suspend - && (erts_atomic32_read_nob(&rp->state) - & ERTS_PSFLG_DIRTY_RUNNING)) { - ErtsProcLocks need_locks = pid_locks & ~ERTS_PROC_LOCK_STATUS; - if (need_locks && erts_proc_trylock(rp, need_locks) == EBUSY) { - erts_proc_unlock(rp, ERTS_PROC_LOCK_STATUS); - rp = erts_pid2proc(c_p, c_p_locks|ERTS_PROC_LOCK_STATUS, - pid, pid_locks|ERTS_PROC_LOCK_STATUS); - } - goto done; - } - - running: - - /* - * If we got pending suspenders and suspend ourselves waiting - * to suspend another process we might deadlock. - * In this case we have to yield, be suspended by - * someone else and then do it all over again. - */ - if (!c_p->pending_suspenders) { - /* Mark rp pending for suspend by c_p */ - add_pend_suspend(rp, c_p->common.id, handle_pend_sync_suspend); - ASSERT(is_nil(c_p->suspendee)); - - /* Suspend c_p; when rp is suspended c_p will be resumed. */ - suspend_process(c_p, c_p); - c_p->flags |= F_P2PNR_RESCHED; - } - /* Yield (caller is assumed to yield immediately in bif). */ - erts_proc_unlock(rp, ERTS_PROC_LOCK_STATUS); - rp = ERTS_PROC_LOCK_BUSY; - } - else { - ErtsProcLocks need_locks = pid_locks & ~ERTS_PROC_LOCK_STATUS; - if (need_locks && erts_proc_trylock(rp, need_locks) == EBUSY) { - if ((ERTS_PSFLG_RUNNING_SYS|ERTS_PSFLG_DIRTY_RUNNING_SYS) - & erts_atomic32_read_nob(&rp->state)) { - /* Executing system task... */ - resume_process(rp, ERTS_PROC_LOCK_STATUS); - goto running; - } - erts_proc_unlock(rp, ERTS_PROC_LOCK_STATUS); - /* - * If we are unlucky, the process just got selected for - * execution of a system task. In this case we may be - * blocked here for quite a while... Execution of system - * tasks are fortunately quite rare events. We try to - * avoid this by checking if it is in a state executing - * system tasks (above), but it will not prevent all - * scenarios for a long block here... - */ - rp = erts_pid2proc(c_p, c_p_locks|ERTS_PROC_LOCK_STATUS, - pid, pid_locks|ERTS_PROC_LOCK_STATUS); - if (!rp) - goto done; - } - - /* - * The previous suspend has prevented the process - * from being selected for normal execution regardless - * of locks held or not held on it... - */ -#ifdef DEBUG - { - erts_aint32_t state; - state = erts_atomic32_read_nob(&rp->state); - ASSERT(!(state & ERTS_PSFLG_RUNNING)); - } -#endif - - if (!suspend) - resume_process(rp, pid_locks|ERTS_PROC_LOCK_STATUS); - } - } - - done: - - if (rp && rp != ERTS_PROC_LOCK_BUSY && !(pid_locks & ERTS_PROC_LOCK_STATUS)) - erts_proc_unlock(rp, ERTS_PROC_LOCK_STATUS); - if (unlock_c_p_status) - erts_proc_unlock(c_p, ERTS_PROC_LOCK_STATUS); - return rp; -} - - -/* - * Like erts_pid2proc() but: - * - * * At least ERTS_PROC_LOCK_MAIN have to be held on c_p. - * * At least ERTS_PROC_LOCK_MAIN have to be taken on pid. - * * It also waits for proc to be in a state != running and garbing. - * * If ERTS_PROC_LOCK_BUSY is returned, the calling process has to - * yield (ERTS_BIF_YIELD[0-3]()). c_p might in this case have been - * suspended. - */ -Process * -erts_pid2proc_not_running(Process *c_p, ErtsProcLocks c_p_locks, - Eterm pid, ErtsProcLocks pid_locks) -{ - return pid2proc_not_running(c_p, c_p_locks, pid, pid_locks, 0); -} - -/* - * erts_pid2proc_nropt() is normally the same as - * erts_pid2proc_not_running(). However it is only - * to be used when 'not running' is a pure optimization, - * not a requirement. - */ - -Process * -erts_pid2proc_nropt(Process *c_p, ErtsProcLocks c_p_locks, - Eterm pid, ErtsProcLocks pid_locks) -{ - if (erts_disable_proc_not_running_opt) - return erts_pid2proc(c_p, c_p_locks, pid, pid_locks); - else - return erts_pid2proc_not_running(c_p, c_p_locks, pid, pid_locks); -} - -static ERTS_INLINE int -do_bif_suspend_process(Process *c_p, - ErtsMonitorSuspend *smon, - Process *suspendee) -{ - ASSERT(suspendee); - ASSERT(!ERTS_PROC_IS_EXITING(suspendee)); - ERTS_LC_ASSERT(ERTS_PROC_LOCK_STATUS - & erts_proc_lc_my_proc_locks(suspendee)); - if (smon) { - if (!smon->active) { - if (!suspend_process(c_p, suspendee)) - return 0; - } - smon->active += smon->pending; - ASSERT(smon->active); - smon->pending = 0; - return 1; - } - return 0; -} - -static void -handle_pend_bif_sync_suspend(Process *suspendee, - ErtsProcLocks suspendee_locks, - int suspendee_alive, - Eterm suspender_pid) -{ - Process *suspender; - - ERTS_LC_ASSERT(suspendee_locks & ERTS_PROC_LOCK_STATUS); - - suspender = erts_pid2proc(suspendee, - suspendee_locks, - suspender_pid, - ERTS_PROC_LOCK_STATUS); - if (suspender) { - ErtsMonitorSuspend *smon; - ErtsMonitor *mon; - mon = erts_monitor_tree_lookup(suspender->suspend_monitors, - suspendee->common.id); - smon = erts_monitor_suspend(mon); - - ASSERT(is_nil(suspender->suspendee)); - if (!suspendee_alive) { - if (mon) { - erts_monitor_tree_delete(&suspender->suspend_monitors, - mon); - erts_monitor_suspend_destroy(smon); - } - } - else { -#ifdef DEBUG - int res = -#endif - do_bif_suspend_process(suspendee, smon, suspendee); - ASSERT(!smon || res != 0); - suspender->suspendee = suspendee->common.id; - } - /* suspender is suspended waiting for suspendee to suspend; - resume suspender */ - ASSERT(suspender != suspendee); - resume_process(suspender, ERTS_PROC_LOCK_STATUS); - erts_proc_unlock(suspender, ERTS_PROC_LOCK_STATUS); - } -} - -static void -handle_pend_bif_async_suspend(Process *suspendee, - ErtsProcLocks suspendee_locks, - int suspendee_alive, - Eterm suspender_pid) -{ - - Process *suspender; - - ERTS_LC_ASSERT(suspendee_locks & ERTS_PROC_LOCK_STATUS); - - suspender = erts_pid2proc(suspendee, - suspendee_locks, - suspender_pid, - ERTS_PROC_LOCK_STATUS); - if (suspender) { - ErtsMonitorSuspend *smon; - ErtsMonitor *mon; - mon = erts_monitor_tree_lookup(suspender->suspend_monitors, - suspendee->common.id); - smon = erts_monitor_suspend(mon); - ASSERT(is_nil(suspender->suspendee)); - if (!suspendee_alive) { - if (mon) { - erts_monitor_tree_delete(&suspender->suspend_monitors, - mon); - erts_monitor_suspend_destroy(smon); - } - } - else { -#ifdef DEBUG - int res = -#endif - do_bif_suspend_process(suspendee, smon, suspendee); - ASSERT(!smon || res != 0); - } - erts_proc_unlock(suspender, ERTS_PROC_LOCK_STATUS); - } -} - - -/* - * The erlang:suspend_process/2 BIF - */ - BIF_RETTYPE -suspend_process_2(BIF_ALIST_2) +erts_internal_suspend_process_2(BIF_ALIST_2) { Eterm res; - Process* suspendee = NULL; - ErtsMonitorSuspend *smon; - ErtsProcLocks xlocks = (ErtsProcLocks) 0; - int created; - - /* Options and default values: */ - int asynchronous = 0; + Eterm reply_tag = THE_NON_VALUE; + Eterm reply_res = THE_NON_VALUE; + int suspend; + int sync = 0; + int async = 0; int unless_suspending = 0; - + erts_aint_t mstate; + ErtsMonitorSuspend *msp; + ErtsMonitorData *mdp; if (BIF_P->common.id == BIF_ARG_1) - goto badarg; /* We are not allowed to suspend ourselves */ + BIF_RET(am_badarg); /* We are not allowed to suspend ourselves */ if (is_not_nil(BIF_ARG_2)) { /* Parse option list */ @@ -8978,192 +8584,128 @@ suspend_process_2(BIF_ALIST_2) unless_suspending = 1; break; case am_asynchronous: - asynchronous = 1; + async = 1; break; - default: - goto badarg; + default: { + if (is_tuple_arity(arg, 2)) { + Eterm *tp = tuple_val(arg); + if (tp[1] == am_asynchronous) { + async = 1; + reply_tag = tp[2]; + break; + } + } + BIF_RET(am_badarg); } + } arg = CDR(lp); - } + } if (is_not_nil(arg)) - goto badarg; - } - - xlocks = ERTS_PROC_LOCK_STATUS; - - erts_proc_lock(BIF_P, xlocks); - - suspendee = erts_pid2proc(BIF_P, - ERTS_PROC_LOCK_MAIN|xlocks, - BIF_ARG_1, - ERTS_PROC_LOCK_STATUS); - if (!suspendee) - goto no_suspendee; - - smon = erts_monitor_suspend_tree_lookup_create(&BIF_P->suspend_monitors, - &created, - BIF_ARG_1); - - if (asynchronous) { - /* --- Asynchronous suspend begin ---------------------------------- */ - - ERTS_LC_ASSERT(ERTS_PROC_LOCK_STATUS - & erts_proc_lc_my_proc_locks(BIF_P)); - ERTS_LC_ASSERT(ERTS_PROC_LOCK_STATUS - == erts_proc_lc_my_proc_locks(suspendee)); - - if (smon->active) { - smon->active += smon->pending; - smon->pending = 0; - if (unless_suspending) - res = am_false; - else if (smon->active == INT_MAX) - goto system_limit; - else { - smon->active++; - res = am_true; - } - /* done */ - } - else { - /* We havn't got any active suspends on the suspendee */ - if (smon->pending && unless_suspending) - res = am_false; - else { - if (smon->pending == INT_MAX) - goto system_limit; - - smon->pending++; - - if (!do_bif_suspend_process(BIF_P, smon, suspendee)) - add_pend_suspend(suspendee, - BIF_P->common.id, - handle_pend_bif_async_suspend); - - res = am_true; - } - /* done */ - } - /* --- Asynchronous suspend end ------------------------------------ */ - } - else /* if (!asynchronous) */ { - /* --- Synchronous suspend begin ----------------------------------- */ - - ERTS_LC_ASSERT(((ERTS_PROC_LOCK_STATUS|ERTS_PROC_LOCK_STATUS) - & erts_proc_lc_my_proc_locks(BIF_P)) - == (ERTS_PROC_LOCK_STATUS|ERTS_PROC_LOCK_STATUS)); - ERTS_LC_ASSERT(ERTS_PROC_LOCK_STATUS - == erts_proc_lc_my_proc_locks(suspendee)); - - if (BIF_P->suspendee == BIF_ARG_1) { - /* We are back after a yield and the suspendee - has been suspended on behalf of us. */ - ASSERT(smon->active >= 1); - BIF_P->suspendee = NIL; - res = (!unless_suspending || smon->active == 1 - ? am_true - : am_false); - /* done */ - } - else if (smon->active) { - if (unless_suspending) - res = am_false; - else { - smon->active++; - res = am_true; - } - /* done */ - } - else { - /* We haven't got any active suspends on the suspendee */ - - /* - * If we have pending suspenders and suspend ourselves waiting - * to suspend another process, or suspend another process - * we might deadlock. In this case we have to yield, - * be suspended by someone else, and then do it all over again. - */ - if (BIF_P->pending_suspenders) - goto yield; - - if (!unless_suspending && smon->pending == INT_MAX) - goto system_limit; - if (!unless_suspending || smon->pending == 0) - smon->pending++; - - if (do_bif_suspend_process(BIF_P, smon, suspendee)) { - res = (!unless_suspending || smon->active == 1 - ? am_true - : am_false); - /* done */ - } - else { - /* Mark suspendee pending for suspend by BIF_P */ - add_pend_suspend(suspendee, - BIF_P->common.id, - handle_pend_bif_sync_suspend); - - ASSERT(is_nil(BIF_P->suspendee)); - - /* - * Suspend BIF_P; when suspendee is suspended, BIF_P - * will be resumed and this BIF will be called again. - * This time with BIF_P->suspendee == BIF_ARG_1 (see - * above). - */ - suspend_process(BIF_P, BIF_P); - goto yield; - } - } - /* --- Synchronous suspend end ------------------------------------- */ + BIF_RET(am_badarg); } -#ifdef DEBUG - { - erts_aint32_t state = erts_atomic32_read_acqb(&suspendee->state); - ASSERT((state & ERTS_PSFLG_SUSPENDED) - || (asynchronous && smon->pending)); - ASSERT((state & ERTS_PSFLG_SUSPENDED) - || !smon->active); + if (!unless_suspending) { + ErtsMonitor *mon; + mon = erts_monitor_tree_lookup_create(&ERTS_P_MONITORS(BIF_P), + &suspend, + ERTS_MON_TYPE_SUSPEND, + BIF_P->common.id, + BIF_ARG_1); + ASSERT(mon->other.item == BIF_ARG_1); + + mdp = erts_monitor_to_data(mon); + msp = (ErtsMonitorSuspend *) mdp; + + mstate = erts_atomic_inc_read_relb(&msp->state); + ASSERT(suspend || (mstate & ERTS_MSUSPEND_STATE_COUNTER_MASK) > 1); + sync = !async & !suspend & !(mstate & ERTS_MSUSPEND_STATE_FLG_ACTIVE); + suspend = !!suspend; /* ensure 0|1 */ + res = am_true; } -#endif - - erts_proc_unlock(suspendee, ERTS_PROC_LOCK_STATUS); - erts_proc_unlock(BIF_P, xlocks); - BIF_RET(res); - - system_limit: - ERTS_BIF_PREP_ERROR(res, BIF_P, SYSTEM_LIMIT); - goto do_return; - - no_suspendee: { + else { ErtsMonitor *mon; - BIF_P->suspendee = NIL; - mon = erts_monitor_tree_lookup(BIF_P->suspend_monitors, BIF_ARG_1); + mon = erts_monitor_tree_lookup(ERTS_P_MONITORS(BIF_P), + BIF_ARG_1); if (mon) { - erts_monitor_tree_delete(&BIF_P->suspend_monitors, mon); - erts_monitor_suspend_destroy(erts_monitor_suspend(mon)); + ASSERT(mon->type == ERTS_MON_TYPE_SUSPEND); + mdp = erts_monitor_to_data(mon); + msp = (ErtsMonitorSuspend *) mdp; + mstate = erts_atomic_read_nob(&msp->state); + ASSERT((mstate & ERTS_MSUSPEND_STATE_COUNTER_MASK) > 0); + mdp = NULL; + sync = !async & !(mstate & ERTS_MSUSPEND_STATE_FLG_ACTIVE); + suspend = 0; + res = am_false; + } + else { + mdp = erts_monitor_create(ERTS_MON_TYPE_SUSPEND, NIL, + BIF_P->common.id, + BIF_ARG_1, NIL); + mon = &mdp->origin; + erts_monitor_tree_insert(&ERTS_P_MONITORS(BIF_P), mon); + msp = (ErtsMonitorSuspend *) mdp; + mstate = erts_atomic_inc_read_relb(&msp->state); + ASSERT(!(mstate & ERTS_MSUSPEND_STATE_FLG_ACTIVE)); + suspend = !0; + res = am_true; + } + } + + if (suspend) { + erts_aint32_t state; + Process *rp; + int send_sig = 0; + + /* fail state... */ + state = (ERTS_PSFLG_EXITING + | ERTS_PSFLG_RUNNING + | ERTS_PSFLG_RUNNING_SYS + | ERTS_PSFLG_DIRTY_RUNNING + | ERTS_PSFLG_DIRTY_RUNNING_SYS); + + rp = erts_try_lock_sig_free_proc(BIF_ARG_1, + ERTS_PROC_LOCK_MAIN|ERTS_PROC_LOCK_STATUS, + &state); + if (!rp) + goto noproc; + if (rp == ERTS_PROC_LOCK_BUSY) + send_sig = !0; + else { + send_sig = !suspend_process(BIF_P, rp); + if (!send_sig) { + erts_monitor_list_insert(&ERTS_P_LT_MONITORS(rp), &mdp->target); + erts_atomic_read_bor_relb(&msp->state, + ERTS_MSUSPEND_STATE_FLG_ACTIVE); + } + erts_proc_unlock(rp, ERTS_PROC_LOCK_MAIN|ERTS_PROC_LOCK_STATUS); + } + if (send_sig) { + if (erts_proc_sig_send_monitor(&mdp->target, BIF_ARG_1)) + sync = !async; + else { + noproc: + erts_monitor_tree_delete(&ERTS_P_MONITORS(BIF_P), &mdp->origin); + erts_monitor_release_both(mdp); + if (!async) + res = am_badarg; + } } } - badarg: - ERTS_BIF_PREP_ERROR(res, BIF_P, BADARG); - goto do_return; + if (sync) { + ASSERT(is_non_value(reply_tag)); + reply_res = res; + reply_tag = res = erts_make_ref(BIF_P); + ERTS_RECV_MARK_SAVE(BIF_P); + ERTS_RECV_MARK_SET(BIF_P); + } - yield: - ERTS_BIF_PREP_YIELD2(res, bif_export[BIF_suspend_process_2], - BIF_P, BIF_ARG_1, BIF_ARG_2); - - do_return: - if (suspendee) - erts_proc_unlock(suspendee, ERTS_PROC_LOCK_STATUS); - if (xlocks) - erts_proc_unlock(BIF_P, xlocks); - return res; + if (is_value(reply_tag)) + erts_proc_sig_send_sync_suspend(BIF_P, BIF_ARG_1, reply_tag, reply_res); + BIF_RET(res); } - /* * The erlang:resume_process/1 BIF */ @@ -9172,90 +8714,32 @@ BIF_RETTYPE resume_process_1(BIF_ALIST_1) { ErtsMonitor *mon; - ErtsMonitorSuspend *smon; - Process *suspendee; - int is_active; + ErtsMonitorSuspend *msp; + erts_aint_t mstate; if (BIF_P->common.id == BIF_ARG_1) BIF_ERROR(BIF_P, BADARG); - erts_proc_lock(BIF_P, ERTS_PROC_LOCK_STATUS); - mon = erts_monitor_tree_lookup(BIF_P->suspend_monitors, BIF_ARG_1); - smon = erts_monitor_suspend(mon); - - if (!smon) { - /* No previous suspend or dead suspendee */ - goto error; - } - else if (smon->pending) { - smon->pending--; - ASSERT(smon->pending >= 0); - if (smon->active) { - smon->active += smon->pending; - smon->pending = 0; - } - is_active = smon->active; - } - else if (smon->active) { - smon->active--; - ASSERT(smon->pending == 0); - is_active = 1; - } - else { + mon = erts_monitor_tree_lookup(ERTS_P_MONITORS(BIF_P), + BIF_ARG_1); + if (!mon) { /* No previous suspend or dead suspendee */ - goto no_suspendee; + BIF_ERROR(BIF_P, BADARG); } - if (smon->active || smon->pending || !is_active) { - /* Leave the suspendee as it is; just verify that it is still alive */ - suspendee = erts_proc_lookup(BIF_ARG_1); - if (!suspendee) - goto no_suspendee; + ASSERT(mon->type == ERTS_MON_TYPE_SUSPEND); + msp = (ErtsMonitorSuspend *) erts_monitor_to_data(mon); - } - else { - /* Resume */ - suspendee = erts_pid2proc(BIF_P, - ERTS_PROC_LOCK_MAIN|ERTS_PROC_LOCK_STATUS, - BIF_ARG_1, - ERTS_PROC_LOCK_STATUS); - if (!suspendee) { - mon = erts_monitor_tree_lookup(BIF_P->suspend_monitors, BIF_ARG_1); - smon = erts_monitor_suspend(mon); - if (!mon) - goto error; - goto no_suspendee; - } + mstate = erts_atomic_dec_read_relb(&msp->state); - ASSERT(mon == erts_monitor_tree_lookup(BIF_P->suspend_monitors, BIF_ARG_1)); + ASSERT((mstate & ERTS_MSUSPEND_STATE_COUNTER_MASK) >= 0); - ASSERT(ERTS_PSFLG_SUSPENDED - & erts_atomic32_read_nob(&suspendee->state)); - ASSERT(BIF_P != suspendee); - resume_process(suspendee, ERTS_PROC_LOCK_STATUS); - - erts_proc_unlock(suspendee, ERTS_PROC_LOCK_STATUS); - } - - if (!smon->active && !smon->pending) { - ASSERT(mon); - erts_monitor_tree_delete(&BIF_P->suspend_monitors, mon); - erts_monitor_suspend_destroy(smon); + if ((mstate & ERTS_MSUSPEND_STATE_COUNTER_MASK) == 0) { + erts_monitor_tree_delete(&ERTS_P_MONITORS(BIF_P), mon); + erts_proc_sig_send_demonitor(mon); } - erts_proc_unlock(BIF_P, ERTS_PROC_LOCK_STATUS); - BIF_RET(am_true); - - no_suspendee: - /* cleanup */ - ASSERT(mon); - erts_monitor_tree_delete(&BIF_P->suspend_monitors, mon); - erts_monitor_suspend_destroy(smon); - - error: - erts_proc_unlock(BIF_P, ERTS_PROC_LOCK_STATUS); - BIF_ERROR(BIF_P, BADARG); } BIF_RETTYPE @@ -9601,6 +9085,17 @@ scheduler_gc_proc(Process *c_p, int reds_left) return reds; } +static void +unlock_lock_rq(int pre_free, void *vrq) +{ + ErtsRunQueue *rq = vrq; + if (pre_free) + erts_runq_unlock(rq); + else + erts_runq_lock(rq); +} + + /* * schedule() is called from BEAM (process_main()) or HiPE * (hipe_mode_switch()) when the current process is to be @@ -9671,8 +9166,13 @@ Process *erts_schedule(ErtsSchedulerData *esdp, Process *p, int calls) } else { is_normal_sched = !esdp; if (is_normal_sched) { - esdp = p->scheduler_data; + esdp = p->scheduler_data; ASSERT(!ERTS_SCHEDULER_IS_DIRTY(esdp)); + + if (esdp->pending_signal.sig) { + ASSERT(esdp->pending_signal.dbg_from == p); + erts_proc_sig_send_pending(esdp); + } } else { ASSERT(ERTS_SCHEDULER_IS_DIRTY(esdp)); @@ -9680,12 +9180,13 @@ Process *erts_schedule(ErtsSchedulerData *esdp, Process *p, int calls) ASSERT(esdp->current_process == p || esdp->free_process == p); - sched_out_proc: - - ERTS_CHK_HAVE_ONLY_MAIN_PROC_LOCK(p); reds = actual_reds = calls - esdp->virtual_reds; + internal_sched_out_proc: + + ERTS_CHK_HAVE_ONLY_MAIN_PROC_LOCK(p); + ASSERT(actual_reds >= 0); if (reds < ERTS_PROC_MIN_CONTEXT_SWITCH_REDS_COST) reds = ERTS_PROC_MIN_CONTEXT_SWITCH_REDS_COST; @@ -9727,11 +9228,6 @@ Process *erts_schedule(ErtsSchedulerData *esdp, Process *p, int calls) /* have to re-read state after taking lock */ state = erts_atomic32_read_nob(&p->state); - if (p->pending_suspenders) - handle_pending_suspend(p, (ERTS_PROC_LOCK_MAIN - | ERTS_PROC_LOCK_TRACE - | ERTS_PROC_LOCK_STATUS)); - esdp->reductions += reds; { @@ -9768,7 +9264,9 @@ Process *erts_schedule(ErtsSchedulerData *esdp, Process *p, int calls) } if (dec_refc) - erts_proc_dec_refc(p); + erts_proc_dec_refc_free_func(p, + unlock_lock_rq, + (void *) rq); } ASSERT(!esdp->free_process); @@ -10181,8 +9679,9 @@ Process *erts_schedule(ErtsSchedulerData *esdp, Process *p, int calls) if (is_normal_sched) { if (state & ERTS_PSFLG_RUNNING_SYS) { if (state & (ERTS_PSFLG_SIG_Q|ERTS_PSFLG_SIG_IN_Q)) { - int local_only = !!(p->flags & F_LOCAL_SIGS_ONLY); - if (!local_only || (state & ERTS_PSFLG_SIG_Q)) { + int local_only = (!!(p->flags & F_LOCAL_SIGS_ONLY) + & !(state & ERTS_PSFLG_SUSPENDED)); + if (!local_only | !!(state & ERTS_PSFLG_SIG_Q)) { int sig_reds; /* * If we have dirty work scheduled we allow @@ -10268,7 +9767,17 @@ Process *erts_schedule(ErtsSchedulerData *esdp, Process *p, int calls) } p->fcalls = reds; - + if (reds != context_reds) { + actual_reds = context_reds - reds - esdp->virtual_reds; + ASSERT(actual_reds >= 0); + esdp->virtual_reds = 0; + p->reds += actual_reds; + ERTS_PROC_REDUCTIONS_EXECUTED(esdp, rq, + (int) ERTS_PSFLGS_GET_USR_PRIO(state), + reds, + actual_reds); + } + ERTS_CHK_HAVE_ONLY_MAIN_PROC_LOCK(p); ASSERT(erts_proc_read_refc(p) > 0); @@ -10318,6 +9827,14 @@ Process *erts_schedule(ErtsSchedulerData *esdp, Process *p, int calls) #endif return p; + + sched_out_proc: + actual_reds = context_reds; + actual_reds -= reds; + actual_reds -= esdp->virtual_reds; + reds = actual_reds; + goto internal_sched_out_proc; + } } @@ -10372,7 +9889,7 @@ notify_sys_task_executed(Process *c_p, ErtsProcSysTask *st, ASSERT(hp_start + hsz == hp); #endif - erts_queue_message(rp, rp_locks, mp, msg, c_p->common.id); + erts_queue_proc_message(c_p, rp, rp_locks, mp, msg); if (c_p == rp) rp_locks &= ~ERTS_PROC_LOCK_MAIN; @@ -10934,7 +10451,7 @@ dispatch_system_task(Process *c_p, erts_aint_t fail_state, msg = copy_struct(operation, osz, &hp, ohp); msg = TUPLE4(hp, st->requester, target, prio, msg); - erts_queue_message(rp, rp_locks, mp, msg, st->requester); + erts_queue_message(rp, rp_locks, mp, msg, am_system); if (rp_locks) erts_proc_unlock(rp, rp_locks); @@ -11830,7 +11347,6 @@ erl_create_process(Process* parent, /* Parent of process (default group leader). #ifdef HIPE hipe_init_process(&p->hipe); - hipe_init_process_smp(&p->hipe_smp); #endif p->heap = (Eterm *) ERTS_HEAP_ALLOC(ERTS_ALC_T_HEAP, sizeof(Eterm)*sz); p->old_hend = p->old_htop = p->old_heap = NULL; @@ -11879,7 +11395,6 @@ erl_create_process(Process* parent, /* Parent of process (default group leader). ERTS_P_LINKS(p) = NULL; ERTS_P_MONITORS(p) = NULL; ERTS_P_LT_MONITORS(p) = NULL; - p->suspend_monitors = NULL; ASSERT(is_pid(parent->group_leader)); @@ -11909,6 +11424,9 @@ erl_create_process(Process* parent, /* Parent of process (default group leader). p->sig_inq.len = 0; p->sig_inq.nmsigs.next = NULL; p->sig_inq.nmsigs.last = NULL; +#ifdef ERTS_PROC_SIG_HARD_DEBUG + p->sig_inq.may_contain_heap_terms = 0; +#endif p->bif_timers = NULL; p->mbuf = NULL; p->msg_frag = NULL; @@ -11933,8 +11451,6 @@ erl_create_process(Process* parent, /* Parent of process (default group leader). p->trace_msg_q = NULL; p->scheduler_data = NULL; - p->suspendee = NIL; - p->pending_suspenders = NULL; #if !defined(NO_FPE_SIGNALS) || defined(HIPE) p->fp_exception = 0; @@ -12101,7 +11617,6 @@ void erts_init_empty_process(Process *p) ERTS_P_MONITORS(p) = NULL; ERTS_P_LT_MONITORS(p) = NULL; ERTS_P_LINKS(p) = NULL; /* List of links */ - p->suspend_monitors = NULL; p->sig_qs.first = NULL; p->sig_qs.last = &p->sig_qs.first; p->sig_qs.cont = NULL; @@ -12116,6 +11631,9 @@ void erts_init_empty_process(Process *p) p->sig_inq.len = 0; p->sig_inq.nmsigs.next = NULL; p->sig_inq.nmsigs.last = NULL; +#ifdef ERTS_PROC_SIG_HARD_DEBUG + p->sig_inq.may_contain_heap_terms = 0; +#endif p->bif_timers = NULL; p->dictionary = NULL; p->seq_trace_clock = 0; @@ -12149,7 +11667,6 @@ void erts_init_empty_process(Process *p) #ifdef HIPE hipe_init_process(&p->hipe); - hipe_init_process_smp(&p->hipe_smp); #endif INIT_HOLE_CHECK(p); @@ -12162,8 +11679,6 @@ void erts_init_empty_process(Process *p) erts_atomic32_init_nob(&p->state, (erts_aint32_t) PRIORITY_NORMAL); p->scheduler_data = NULL; - p->suspendee = NIL; - p->pending_suspenders = NULL; erts_proc_lock_init(p); erts_proc_unlock(p, ERTS_PROC_LOCKS_ALL); erts_init_runq_proc(p, ERTS_RUNQ_IX(0), 0); @@ -12201,7 +11716,6 @@ erts_debug_verify_clean_empty_process(Process* p) ASSERT(ERTS_P_MONITORS(p) == NULL); ASSERT(ERTS_P_LT_MONITORS(p) == NULL); ASSERT(ERTS_P_LINKS(p) == NULL); - ASSERT(p->suspend_monitors == NULL); ASSERT(p->sig_qs.first == NULL); ASSERT(p->sig_qs.len == 0); ASSERT(p->bif_timers == NULL); @@ -12215,8 +11729,6 @@ erts_debug_verify_clean_empty_process(Process* p) ASSERT(p->sig_inq.first == NULL); ASSERT(p->sig_inq.len == 0); - ASSERT(p->suspendee == NIL); - ASSERT(p->pending_suspenders == NULL); /* Thing that erts_cleanup_empty_process() cleans up */ @@ -12322,8 +11834,6 @@ delete_process(Process* p) erts_cleanup_messages(p->sig_qs.cont); p->sig_qs.cont = NULL; - ASSERT(!p->suspend_monitors); - p->fvalue = NIL; } @@ -12403,6 +11913,7 @@ erts_proc_exit_handle_monitor(ErtsMonitor *mon, void *vctxt) if (erts_monitor_is_target(mon)) { /* We are being watched... */ switch (mon->type) { + case ERTS_MON_TYPE_SUSPEND: case ERTS_MON_TYPE_PROC: erts_proc_sig_send_monitor_down(mon, reason); mon = NULL; @@ -12474,6 +11985,7 @@ erts_proc_exit_handle_monitor(ErtsMonitor *mon, void *vctxt) else { /* Origin monitor */ /* We are watching someone else... */ switch (mon->type) { + case ERTS_MON_TYPE_SUSPEND: case ERTS_MON_TYPE_PROC: erts_proc_sig_send_demonitor(mon); mon = NULL; @@ -12626,21 +12138,6 @@ erts_proc_exit_handle_link(ErtsLink *lnk, void *vctxt) erts_link_release(lnk); } -static void -resume_suspend_monitor(ErtsMonitor *mon, void *vc_p) -{ - ErtsMonitorSuspend *smon = erts_monitor_suspend(mon); - Process *suspendee = erts_pid2proc((Process *) vc_p, ERTS_PROC_LOCK_MAIN, - smon->mon.other.item, ERTS_PROC_LOCK_STATUS); - if (suspendee) { - ASSERT(suspendee != vc_p); - if (smon->active) - resume_process(suspendee, ERTS_PROC_LOCK_STATUS); - erts_proc_unlock(suspendee, ERTS_PROC_LOCK_STATUS); - } - erts_monitor_suspend_destroy(smon); -} - /* this function fishishes a process and propagates exit messages - called by process_main when a process dies */ void @@ -12672,8 +12169,6 @@ erts_do_exit_process(Process* p, Eterm reason) set_self_exiting(p, reason, NULL, NULL, NULL); - cancel_suspend_of_suspendee(p, ERTS_PROC_LOCKS_ALL); - if (IS_TRACED(p)) { if (IS_TRACED_FL(p, F_TRACE_CALLS)) erts_schedule_time_break(p, ERTS_BP_CALL_TIME_SCHEDULE_EXITING); @@ -12806,11 +12301,6 @@ erts_continue_exit_process(Process *p) p->flags &= ~F_USING_DDLL; } - if (p->suspend_monitors) - erts_monitor_tree_foreach_delete(&p->suspend_monitors, - resume_suspend_monitor, - p); - /* * The registered name *should* be the last "erlang resource" to * cleanup. @@ -13018,7 +12508,13 @@ erts_try_lock_sig_free_proc(Eterm pid, ErtsProcLocks locks, erts_aint32_t *statep) { Process *rp = erts_proc_lookup_raw(pid); + erts_aint32_t fail_state = ERTS_PSFLG_SIG_IN_Q|ERTS_PSFLG_SIG_Q; erts_aint32_t state; + ErtsProcLocks tmp_locks = ERTS_PROC_LOCK_MAIN|ERTS_PROC_LOCK_MSGQ; + + tmp_locks |= locks; + if (statep) + fail_state |= *statep; if (!rp) { if (statep) @@ -13035,28 +12531,28 @@ erts_try_lock_sig_free_proc(Eterm pid, ErtsProcLocks locks, if (state & ERTS_PSFLG_FREE) return NULL; - if (state & (ERTS_PSFLG_SIG_IN_Q|ERTS_PSFLG_SIG_Q)) + if (state & fail_state) return ERTS_PROC_LOCK_BUSY; - if (!locks) - return rp; - - if (erts_proc_trylock(rp, locks) == EBUSY) + if (erts_proc_trylock(rp, tmp_locks) == EBUSY) return ERTS_PROC_LOCK_BUSY; state = erts_atomic32_read_nob(&rp->state); if (statep) *statep = state; - if (state & ERTS_PSFLG_FREE) { - erts_proc_unlock(rp, locks); - return NULL; + if ((state & fail_state) + || rp->sig_inq.first + || rp->sig_qs.cont) { + erts_proc_unlock(rp, tmp_locks); + if (state & ERTS_PSFLG_FREE) + return NULL; + else + return ERTS_PROC_LOCK_BUSY; } - if (state & (ERTS_PSFLG_SIG_IN_Q|ERTS_PSFLG_SIG_Q)) { - erts_proc_unlock(rp, locks); - return ERTS_PROC_LOCK_BUSY; - } + if (tmp_locks != locks) + erts_proc_unlock(rp, tmp_locks & ~locks); return rp; } diff --git a/erts/emulator/beam/erl_process.h b/erts/emulator/beam/erl_process.h index e232776016..a60e117bab 100644 --- a/erts/emulator/beam/erl_process.h +++ b/erts/emulator/beam/erl_process.h @@ -593,6 +593,7 @@ typedef struct { ErtsDelayedAuxWorkWakeupJob *job; } delayed_wakeup; struct { + ErtsAlcuBlockscanYieldData alcu_blockscan; ErtsEtsAllYieldData ets_all; /* Other yielding operations... */ } yield; @@ -637,6 +638,7 @@ struct ErtsSchedulerData_ { ErtsSchedType type; Uint no; /* Scheduler number for normal schedulers */ Uint dirty_no; /* Scheduler number for dirty schedulers */ + struct enif_environment_t *current_nif; Process *dirty_shadow_process; Port *current_port; ErtsRunQueue *run_queue; @@ -658,6 +660,13 @@ struct ErtsSchedulerData_ { Uint64 out; Uint64 in; } io; + struct { + ErtsSignal* sig; + Eterm to; +#ifdef DEBUG + Process* dbg_from; +#endif + } pending_signal; Uint64 reductions; ErtsSchedWallTime sched_wall_time; @@ -796,14 +805,15 @@ erts_reset_max_len(ErtsRunQueue *rq, ErtsRunQueueInfo *rqi) #define ERTS_PSD_ETS_OWNED_TABLES 6 #define ERTS_PSD_ETS_FIXED_TABLES 7 #define ERTS_PSD_DIST_ENTRY 8 -#define ERTS_PSD_SUSPENDED_SAVED_CALLS_BUF 9 /* keep last... */ +#define ERTS_PSD_PENDING_SUSPEND 9 +#define ERTS_PSD_SUSPENDED_SAVED_CALLS_BUF 10 /* keep last... */ -#define ERTS_PSD_SIZE 10 +#define ERTS_PSD_SIZE 11 #if !defined(HIPE) # undef ERTS_PSD_SUSPENDED_SAVED_CALLS_BUF # undef ERTS_PSD_SIZE -# define ERTS_PSD_SIZE 9 +# define ERTS_PSD_SIZE 10 #endif typedef struct { @@ -840,6 +850,9 @@ typedef struct { #define ERTS_PSD_DIST_ENTRY_GET_LOCKS ERTS_PROC_LOCK_MAIN #define ERTS_PSD_DIST_ENTRY_SET_LOCKS ERTS_PROC_LOCK_MAIN +#define ERTS_PSD_PENDING_SUSPEND_GET_LOCKS ERTS_PROC_LOCK_MAIN +#define ERTS_PSD_PENDING_SUSPEND_SET_LOCKS ERTS_PROC_LOCK_MAIN + typedef struct { ErtsProcLocks get_locks; ErtsProcLocks set_locks; @@ -875,20 +888,6 @@ typedef struct { typedef struct ErtsProcSysTask_ ErtsProcSysTask; typedef struct ErtsProcSysTaskQs_ ErtsProcSysTaskQs; - -typedef struct ErtsPendingSuspend_ ErtsPendingSuspend; -struct ErtsPendingSuspend_ { - ErtsPendingSuspend *next; - ErtsPendingSuspend *end; - Eterm pid; - void (*handle_func)(Process *suspendee, - ErtsProcLocks suspendee_locks, - int suspendee_alive, - Eterm pid); -}; - - - /* Defines to ease the change of memory architecture */ # define HEAP_START(p) (p)->heap # define HEAP_TOP(p) (p)->htop @@ -983,9 +982,6 @@ struct process { Process *next; /* Pointer to next process in run queue */ - ErtsMonitor *suspend_monitors; /* Processes suspended by this process via - erlang:suspend_process/1 */ - ErtsSignalPrivQueues sig_qs; /* Signal queues */ ErtsBifTimers *bif_timers; /* Bif timers aiming at this process */ @@ -1049,12 +1045,7 @@ struct process { ErlTraceMessageQueue *trace_msg_q; erts_proc_lock_t lock; ErtsSchedulerData *scheduler_data; - Eterm suspendee; - ErtsPendingSuspend *pending_suspenders; erts_atomic_t run_queue; -#ifdef HIPE - struct hipe_process_state_smp hipe_smp; -#endif #ifdef CHECK_FOR_HOLES Eterm* last_htop; /* No need to scan the heap below this point. */ @@ -1371,7 +1362,7 @@ extern int erts_system_profile_ts_type; #define F_DISTRIBUTION (1 << 6) /* Process used in distribution */ #define F_USING_DDLL (1 << 7) /* Process has used the DDLL interface */ #define F_HAVE_BLCKD_MSCHED (1 << 8) /* Process has blocked multi-scheduling */ -#define F_P2PNR_RESCHED (1 << 9) /* Process has been rescheduled via erts_pid2proc_not_running() */ +#define F_UNUSED (1 << 9) #define F_FORCE_GC (1 << 10) /* Force gc at process in-scheduling */ #define F_DISABLE_GC (1 << 11) /* Disable GC (see below) */ #define F_OFF_HEAP_MSGQ (1 << 12) /* Off heap msg queue */ @@ -1388,10 +1379,12 @@ extern int erts_system_profile_ts_type; #define F_DIRTY_MAJOR_GC (1 << 23) /* Dirty major GC scheduled */ #define F_DIRTY_MINOR_GC (1 << 24) /* Dirty minor GC scheduled */ #define F_HIBERNATED (1 << 25) /* Hibernated */ -#define F_LOCAL_SIGS_ONLY (1 << 26) +#define F_LOCAL_SIGS_ONLY (1 << 26) /* Handle privq sigs only */ #define F_TRAP_EXIT (1 << 27) /* Trapping exit */ -#define F_DEFERRED_SAVED_LAST (1 << 28) -#define F_DELAYED_PSIGQS_LEN (1 << 29) +#define F_DEFERRED_SAVED_LAST (1 << 28) /* Deferred sig_qs.saved_last */ +#define F_DELAYED_PSIGQS_LEN (1 << 29) /* Delayed update of sig_qs.len */ +#define F_HIPE_RECV_LOCKED (1 << 30) /* HiPE message queue locked */ +#define F_HIPE_RECV_YIELD (1 << 31) /* HiPE receive yield */ /* * F_DISABLE_GC and F_DELAY_GC are similar. Both will prevent @@ -2039,6 +2032,11 @@ erts_psd_set(Process *p, int ix, void *data) #define ERTS_PROC_SET_DIST_ENTRY(P, DE) \ ((DistEntry *) erts_psd_set((P), ERTS_PSD_DIST_ENTRY, (void *) (DE))) +#define ERTS_PROC_GET_PENDING_SUSPEND(P) \ + ((void *) erts_psd_get((P), ERTS_PSD_PENDING_SUSPEND)) +#define ERTS_PROC_SET_PENDING_SUSPEND(P, PS) \ + ((void *) erts_psd_set((P), ERTS_PSD_PENDING_SUSPEND, (void *) (PS))) + #ifdef HIPE #define ERTS_PROC_GET_SUSPENDED_SAVED_CALLS_BUF(P) \ ((struct saved_calls *) erts_psd_get((P), ERTS_PSD_SUSPENDED_SAVED_CALLS_BUF)) @@ -2603,16 +2601,6 @@ Process *erts_try_lock_sig_free_proc(Eterm pid, ErtsProcLocks locks, erts_aint32_t *statep); -Process *erts_pid2proc_not_running(Process *, - ErtsProcLocks, - Eterm, - ErtsProcLocks); -Process *erts_pid2proc_nropt(Process *c_p, - ErtsProcLocks c_p_locks, - Eterm pid, - ErtsProcLocks pid_locks); -extern int erts_disable_proc_not_running_opt; - #ifdef DEBUG #define ERTS_ASSERT_IS_NOT_EXITING(P) \ do { ASSERT(!ERTS_PROC_IS_EXITING((P))); } while (0) diff --git a/erts/emulator/beam/erl_process_lock.h b/erts/emulator/beam/erl_process_lock.h index 43f396c547..bd38eca4dc 100644 --- a/erts/emulator/beam/erl_process_lock.h +++ b/erts/emulator/beam/erl_process_lock.h @@ -921,6 +921,9 @@ ERTS_GLB_INLINE int erts_proc_trylock(Process *, ErtsProcLocks); ERTS_GLB_INLINE void erts_proc_inc_refc(Process *); ERTS_GLB_INLINE void erts_proc_dec_refc(Process *); +ERTS_GLB_INLINE void erts_proc_dec_refc_free_func(Process *p, + void (*func)(int, void *), + void *arg); ERTS_GLB_INLINE void erts_proc_add_refc(Process *, Sint); ERTS_GLB_INLINE Sint erts_proc_read_refc(Process *); @@ -993,6 +996,21 @@ ERTS_GLB_INLINE void erts_proc_dec_refc(Process *p) } } +ERTS_GLB_INLINE void erts_proc_dec_refc_free_func(Process *p, + void (*func)(int, void *), + void *arg) +{ + Sint referred; + ASSERT(!(erts_atomic32_read_nob(&p->state) & ERTS_PSFLG_PROXY)); + referred = erts_ptab_atmc_dec_test_refc(&p->common); + if (!referred) { + ASSERT(ERTS_PROC_IS_EXITING(p)); + (*func)(!0, arg); + erts_free_proc(p); + (*func)(0, arg); + } +} + ERTS_GLB_INLINE void erts_proc_add_refc(Process *p, Sint add_refc) { Sint referred; diff --git a/erts/emulator/beam/erl_term.h b/erts/emulator/beam/erl_term.h index 18483fca35..bddf403b0a 100644 --- a/erts/emulator/beam/erl_term.h +++ b/erts/emulator/beam/erl_term.h @@ -1184,6 +1184,54 @@ _ET_DECLARE_CHECKED(struct erl_node_*,external_ref_node,Eterm) #define is_map(x) (is_boxed((x)) && is_map_header(*boxed_val(x))) #define is_not_map(x) (!is_map(x)) +#define MAP_HEADER(hp, sz, keys) \ + ((hp)[0] = MAP_HEADER_FLATMAP, \ + (hp)[1] = sz, \ + (hp)[2] = keys) + +#define MAP_SZ(sz) (MAP_HEADER_FLATMAP_SZ + 2*sz + 1) + +#define MAP0_SZ MAP_SZ(0) +#define MAP1_SZ MAP_SZ(1) +#define MAP2_SZ MAP_SZ(2) +#define MAP3_SZ MAP_SZ(3) +#define MAP4_SZ MAP_SZ(4) +#define MAP5_SZ MAP_SZ(5) +#define MAP0(hp) \ + (MAP_HEADER(hp, 0, TUPLE0(hp+MAP_HEADER_FLATMAP_SZ)), \ + make_flatmap(hp)) +#define MAP1(hp, k1, v1) \ + (MAP_HEADER(hp, 1, TUPLE1(hp+1+MAP_HEADER_FLATMAP_SZ, k1)), \ + (hp)[MAP_HEADER_FLATMAP_SZ+0] = v1, \ + make_flatmap(hp)) +#define MAP2(hp, k1, v1, k2, v2) \ + (MAP_HEADER(hp, 2, TUPLE2(hp+2+MAP_HEADER_FLATMAP_SZ, k1, k2)), \ + (hp)[MAP_HEADER_FLATMAP_SZ+0] = v1, \ + (hp)[MAP_HEADER_FLATMAP_SZ+1] = v2, \ + make_flatmap(hp)) +#define MAP3(hp, k1, v1, k2, v2, k3, v3) \ + (MAP_HEADER(hp, 3, TUPLE3(hp+3+MAP_HEADER_FLATMAP_SZ, k1, k2, k3)), \ + (hp)[MAP_HEADER_FLATMAP_SZ+0] = v1, \ + (hp)[MAP_HEADER_FLATMAP_SZ+1] = v2, \ + (hp)[MAP_HEADER_FLATMAP_SZ+2] = v3, \ + make_flatmap(hp)) +#define MAP4(hp, k1, v1, k2, v2, k3, v3, k4, v4) \ + (MAP_HEADER(hp, 4, TUPLE4(hp+4+MAP_HEADER_FLATMAP_SZ, k1, k2, k3, k4)), \ + (hp)[MAP_HEADER_FLATMAP_SZ+0] = v1, \ + (hp)[MAP_HEADER_FLATMAP_SZ+1] = v2, \ + (hp)[MAP_HEADER_FLATMAP_SZ+2] = v3, \ + (hp)[MAP_HEADER_FLATMAP_SZ+3] = v4, \ + make_flatmap(hp)) +#define MAP5(hp, k1, v1, k2, v2, k3, v3, k4, v4, k5, v5) \ + (MAP_HEADER(hp, 5, TUPLE5(hp+5+MAP_HEADER_FLATMAP_SZ, k1, k2, k3, k4, k5)), \ + (hp)[MAP_HEADER_FLATMAP_SZ+0] = v1, \ + (hp)[MAP_HEADER_FLATMAP_SZ+1] = v2, \ + (hp)[MAP_HEADER_FLATMAP_SZ+2] = v3, \ + (hp)[MAP_HEADER_FLATMAP_SZ+3] = v4, \ + (hp)[MAP_HEADER_FLATMAP_SZ+4] = v5, \ + make_flatmap(hp)) + + /* number tests */ #define is_integer(x) (is_small(x) || is_big(x)) diff --git a/erts/emulator/beam/erl_trace.c b/erts/emulator/beam/erl_trace.c index 1e833539b3..f074dd8bdb 100644 --- a/erts/emulator/beam/erl_trace.c +++ b/erts/emulator/beam/erl_trace.c @@ -2044,7 +2044,7 @@ enqueue_sys_msg(enum ErtsSysMsgType type, void erts_queue_error_logger_message(Eterm from, Eterm msg, ErlHeapFragment *bp) { - enqueue_sys_msg(SYS_MSG_TYPE_ERRLGR, from, am_error_logger, msg, bp); + enqueue_sys_msg(SYS_MSG_TYPE_ERRLGR, from, am_logger, msg, bp); } void @@ -2110,13 +2110,13 @@ sys_msg_disp_failure(ErtsSysMsgQ *smqp, Eterm receiver) erts_thr_progress_unblock(); break; case SYS_MSG_TYPE_ERRLGR: { - char *no_elgger = "(no error logger present)"; + char *no_elgger = "(no logger present)"; Eterm *tp; Eterm tag; if (is_not_tuple(smqp->msg)) { unexpected_elmsg: erts_fprintf(stderr, - "%s unexpected error logger message: %T\n", + "%s unexpected logger message: %T\n", no_elgger, smqp->msg); } @@ -2284,7 +2284,7 @@ sys_msg_dispatcher_func(void *unused) } break; case SYS_MSG_TYPE_ERRLGR: - receiver = am_error_logger; + receiver = am_logger; break; default: receiver = NIL; @@ -2313,7 +2313,7 @@ sys_msg_dispatcher_func(void *unused) erts_proc_unlock(proc, proc_locks); } } - else if (receiver == am_error_logger) { + else if (receiver == am_logger) { proc = erts_whereis_process(NULL,0,receiver,proc_locks,0); if (!proc) goto failure; @@ -2379,7 +2379,7 @@ erts_foreach_sys_msg_in_q(void (*func)(Eterm, to = erts_get_system_profile(); break; case SYS_MSG_TYPE_ERRLGR: - to = am_error_logger; + to = am_logger; break; default: to = NIL; @@ -2629,6 +2629,38 @@ erts_tracer_to_term(Process *p, ErtsTracer tracer) } } +Eterm +erts_build_tracer_to_term(Eterm **hpp, ErlOffHeap *ohp, Uint *szp, ErtsTracer tracer) +{ + Eterm res; + Eterm state; + Uint sz; + + if (ERTS_TRACER_IS_NIL(tracer)) + return am_false; + + state = ERTS_TRACER_STATE(tracer); + sz = is_immed(state) ? 0 : size_object(state); + + if (szp) + *szp += sz; + + if (hpp) + res = is_immed(state) ? state : copy_struct(state, sz, hpp, ohp); + else + res = THE_NON_VALUE; + + if (ERTS_TRACER_MODULE(tracer) != am_erl_tracer) { + if (szp) + *szp += 3; + if (hpp) { + res = TUPLE2(*hpp, ERTS_TRACER_MODULE(tracer), res); + *hpp += 3; + } + } + + return res; +} static ERTS_INLINE int send_to_tracer_nif_raw(Process *c_p, Process *tracee, diff --git a/erts/emulator/beam/erl_trace.h b/erts/emulator/beam/erl_trace.h index dbf7ebd2a1..3228e19809 100644 --- a/erts/emulator/beam/erl_trace.h +++ b/erts/emulator/beam/erl_trace.h @@ -198,6 +198,8 @@ int erts_is_tracer_proc_enabled_send(Process* c_p, ErtsProcLocks c_p_locks, ErtsPTabElementCommon *t_p); int erts_is_tracer_enabled(const ErtsTracer tracer, ErtsPTabElementCommon *t_p); Eterm erts_tracer_to_term(Process *p, ErtsTracer tracer); +Eterm erts_build_tracer_to_term(Eterm **hpp, ErlOffHeap *ohp, Uint *szp, ErtsTracer tracer); + ErtsTracer erts_term_to_tracer(Eterm prefix, Eterm term); void erts_tracer_replace(ErtsPTabElementCommon *t_p, const ErtsTracer new_tracer); diff --git a/erts/emulator/beam/global.h b/erts/emulator/beam/global.h index 256670ff22..2cf268162d 100644 --- a/erts/emulator/beam/global.h +++ b/erts/emulator/beam/global.h @@ -123,6 +123,7 @@ void erts_unload_nif(struct erl_module_nif* nif); extern void erl_nif_init(void); extern int erts_nif_get_funcs(struct erl_module_nif*, struct enif_func_t **funcs); +extern Module *erts_nif_get_module(struct erl_module_nif*); extern Eterm erts_nif_call_function(Process *p, Process *tracee, struct erl_module_nif*, struct enif_func_t *, @@ -200,6 +201,7 @@ typedef struct { struct erts_driver_t_ { erts_driver_t *next; erts_driver_t *prev; + Eterm name_atom; char *name; struct { int major; diff --git a/erts/emulator/beam/io.c b/erts/emulator/beam/io.c index 66b8005975..2446b3c074 100644 --- a/erts/emulator/beam/io.c +++ b/erts/emulator/beam/io.c @@ -956,6 +956,9 @@ try_imm_drv_call(ErtsTryImmDrvCallState *sp) reds_left_in = ERTS_BIF_REDS_LEFT(c_p); erts_proc_unlock(c_p, ERTS_PROC_LOCK_MAIN); + + ASSERT((c_p->scheduler_data)->current_port == NULL); + (c_p->scheduler_data)->current_port = prt; } ASSERT(0 <= reds_left_in && reds_left_in <= CONTEXT_REDS); @@ -1017,6 +1020,9 @@ finalize_imm_drv_call(ErtsTryImmDrvCallState *sp) erts_port_release(prt); if (c_p) { + ASSERT((c_p->scheduler_data)->current_port == prt); + (c_p->scheduler_data)->current_port = NULL; + erts_proc_lock(c_p, ERTS_PROC_LOCK_MAIN); if (reds != (CONTEXT_REDS - sp->reds_left_in)) { @@ -2906,11 +2912,8 @@ static void lcnt_enable_driver_lock_count(erts_driver_t *dp, int enable) { if (dp->lock) { if (enable) { - Eterm name_as_atom = erts_atom_put((byte*)dp->name, sys_strlen(dp->name), - ERTS_ATOM_ENC_LATIN1, 1); - - erts_lcnt_install_new_lock_info(&dp->lock->lcnt, "driver_lock", name_as_atom, - ERTS_LOCK_TYPE_MUTEX | ERTS_LOCK_FLAGS_CATEGORY_IO); + erts_lcnt_install_new_lock_info(&dp->lock->lcnt, "driver_lock", + dp->name_atom, ERTS_LOCK_TYPE_MUTEX | ERTS_LOCK_FLAGS_CATEGORY_IO); } else { erts_lcnt_uninstall(&dp->lock->lcnt); } @@ -7357,7 +7360,11 @@ no_stop_select_callback(ErlDrvEvent event, void* private) static int init_driver(erts_driver_t *drv, ErlDrvEntry *de, DE_Handle *handle) { + drv->name_atom = erts_atom_put((byte*)de->driver_name, + sys_strlen(de->driver_name), + ERTS_ATOM_ENC_LATIN1, 1); drv->name = de->driver_name; + ASSERT(de->extended_marker == ERL_DRV_EXTENDED_MARKER); ASSERT(de->major_version >= 2); drv->version.major = de->major_version; @@ -7367,13 +7374,10 @@ init_driver(erts_driver_t *drv, ErlDrvEntry *de, DE_Handle *handle) if (drv->flags & ERL_DRV_FLAG_USE_PORT_LOCKING) { drv->lock = NULL; } else { - Eterm driver_id = erts_atom_put((byte *) drv->name, - sys_strlen(drv->name), - ERTS_ATOM_ENC_LATIN1, 1); - drv->lock = erts_alloc(ERTS_ALC_T_DRIVER_LOCK, sizeof(erts_mtx_t)); - erts_mtx_init(drv->lock, "driver_lock", driver_id, ERTS_LOCK_FLAGS_CATEGORY_IO); + erts_mtx_init(drv->lock, "driver_lock", drv->name_atom, + ERTS_LOCK_FLAGS_CATEGORY_IO); } drv->entry = de; @@ -7459,18 +7463,12 @@ int erts_add_driver_entry(ErlDrvEntry *de, DE_Handle *handle, erts_tsd_set(driver_list_lock_status_key, (void *) 1); } - if (taint) { - Eterm name_atom = erts_atom_put((byte*)de->driver_name, - sys_strlen(de->driver_name), - ERTS_ATOM_ENC_LATIN1, 0); - if (is_atom(name_atom)) - erts_add_taint(name_atom); - else - err = 1; - } - if (!err) { err = init_driver(dp, de, handle); + + if (taint) { + erts_add_taint(dp->name_atom); + } } if (err) { diff --git a/erts/emulator/beam/msg_instrs.tab b/erts/emulator/beam/msg_instrs.tab index 26bea0efc6..9bf3aefaca 100644 --- a/erts/emulator/beam/msg_instrs.tab +++ b/erts/emulator/beam/msg_instrs.tab @@ -102,6 +102,9 @@ i_loop_rec(Dest) { if (ERTS_UNLIKELY(msgp == NULL)) { int get_out; SWAPOUT; + $SET_CP_I_ABS(I); + c_p->arity = 0; + c_p->current = NULL; FCALLS -= erts_proc_sig_receive_helper(c_p, FCALLS, neg_o_reds, &msgp, &get_out); SWAPIN; diff --git a/erts/emulator/beam/utils.c b/erts/emulator/beam/utils.c index 188e02eff8..d74052d8b2 100644 --- a/erts/emulator/beam/utils.c +++ b/erts/emulator/beam/utils.c @@ -140,7 +140,7 @@ Eterm* erts_set_hole_marker(Eterm* ptr, Uint sz) { Eterm* p = ptr; - int i; + Uint i; for (i = 0; i < sz; i++) { *p++ = ERTS_HOLE_MARKER; @@ -1924,155 +1924,165 @@ make_internal_hash(Eterm term, Uint32 salt) #undef HCONST #undef MIX +/* error_logger ! + {log, Level, format, [args], #{ gl, pid, time, error_logger => #{tag, emulator => true} }} +*/ static Eterm -do_allocate_logger_message(Eterm gleader, Eterm **hp, ErlOffHeap **ohp, - ErlHeapFragment **bp, Process **p, Uint sz) +do_allocate_logger_message(Eterm gleader, ErtsMonotonicTime *ts, Eterm *pid, + Eterm **hp, ErlOffHeap **ohp, + ErlHeapFragment **bp, Uint sz) { Uint gl_sz; gl_sz = IS_CONST(gleader) ? 0 : size_object(gleader); - sz = sz + gl_sz; + sz = sz + gl_sz + 6 /*outer 5-tuple*/ + + MAP2_SZ /* error_logger map */; + + *pid = erts_get_current_pid(); + + if (is_nil(gleader) && is_non_value(*pid)) { + sz += MAP2_SZ /* metadata map no gl, no pid */; + } else if (is_nil(gleader) || is_non_value(*pid)) + sz += MAP3_SZ /* metadata map no gl or no pid*/; + else + sz += MAP4_SZ /* metadata map w gl w pid*/; + + *ts = ERTS_MONOTONIC_TO_USEC(erts_get_monotonic_time(NULL)) + ERTS_MONOTONIC_OFFSET_USEC; + erts_bld_sint64(NULL, &sz, *ts); *bp = new_message_buffer(sz); *ohp = &(*bp)->off_heap; *hp = (*bp)->mem; - return (is_nil(gleader) - ? am_noproc - : (IS_CONST(gleader) - ? gleader - : copy_struct(gleader,gl_sz,hp,*ohp))); + return copy_struct(gleader,gl_sz,hp,*ohp); } -static void do_send_logger_message(Eterm *hp, ErlOffHeap *ohp, ErlHeapFragment *bp, - Process *p, Eterm message) +static void do_send_logger_message(Eterm gl, Eterm tag, Eterm format, Eterm args, + ErtsMonotonicTime ts, Eterm pid, + Eterm *hp, ErlHeapFragment *bp) { -#ifdef HARDDEBUG - erts_fprintf(stderr, "%T\n", message); -#endif - { - Eterm from = erts_get_current_pid(); - if (is_not_internal_pid(from)) - from = NIL; - erts_queue_error_logger_message(from, message, bp); + Eterm message, md, el_tag = tag; + Eterm time = erts_bld_sint64(&hp, NULL, ts); + + /* This mapping is needed for the backwards compatible error_logger */ + switch (tag) { + case am_info: el_tag = am_info_msg; break; + case am_warning: el_tag = am_warning_msg; break; + default: + ASSERT(am_error); + break; } + + md = MAP2(hp, am_emulator, am_true, + am_atom_put("tag", 3), el_tag); + hp += MAP2_SZ; + + if (is_nil(gl) && is_non_value(pid)) { + /* no gl and no pid, probably from a port */ + md = MAP2(hp, + am_error_logger, md, + am_time, time); + hp += MAP2_SZ; + pid = NIL; + } else if (is_nil(gl)) { + /* no gl */ + md = MAP3(hp, + am_error_logger, md, + am_pid, pid, + am_time, time); + hp += MAP3_SZ; + } else if (is_non_value(pid)) { + /* no gl */ + md = MAP3(hp, + am_error_logger, md, + am_atom_put("gl", 2), gl, + am_time, time); + hp += MAP3_SZ; + pid = NIL; + } else { + md = MAP4(hp, + am_error_logger, md, + am_atom_put("gl", 2), gl, + am_pid, pid, + am_time, time); + hp += MAP4_SZ; + } + + message = TUPLE5(hp, am_log, tag, format, args, md); + erts_queue_error_logger_message(pid, message, bp); } -/* error_logger ! - {notify,{info_msg,gleader,{emulator,format,[args]}}} | - {notify,{error,gleader,{emulator,format,[args]}}} | - {notify,{warning_msg,gleader,{emulator,format,[args}]}} */ -static int do_send_to_logger(Eterm tag, Eterm gleader, char *buf, int len) +static int do_send_to_logger(Eterm tag, Eterm gl, char *buf, size_t len) { Uint sz; - Eterm gl; - Eterm list,args,format,tuple1,tuple2,tuple3; + Eterm list, args, format, pid; + ErtsMonotonicTime ts; Eterm *hp = NULL; ErlOffHeap *ohp = NULL; ErlHeapFragment *bp = NULL; - Process *p = NULL; - - ASSERT(is_atom(tag)); - - if (len <= 0) { - return -1; - } sz = len * 2 /* message list */ + 2 /* cons surrounding message list */ - + 3 /*outer 2-tuple*/ + 4 /* middle 3-tuple */ + 4 /*inner 3-tuple */ + 8 /* "~s~n" */; /* gleader size is accounted and allocated next */ - gl = do_allocate_logger_message(gleader, &hp, &ohp, &bp, &p, sz); - - if(is_nil(gl)) { - /* buf *always* points to a null terminated string */ - erts_fprintf(stderr, "(no error logger present) %T: \"%s\"\n", - tag, buf); - return 0; - } + gl = do_allocate_logger_message(gl, &ts, &pid, &hp, &ohp, &bp, sz); list = buf_to_intlist(&hp, buf, len, NIL); args = CONS(hp,list,NIL); hp += 2; format = buf_to_intlist(&hp, "~s~n", 4, NIL); - tuple1 = TUPLE3(hp, am_emulator, format, args); - hp += 4; - tuple2 = TUPLE3(hp, tag, gl, tuple1); - hp += 4; - tuple3 = TUPLE2(hp, am_notify, tuple2); - do_send_logger_message(hp, ohp, bp, p, tuple3); + do_send_logger_message(gl, tag, format, args, ts, pid, hp, bp); return 0; } -static int do_send_term_to_logger(Eterm tag, Eterm gleader, - char *buf, int len, Eterm args) +static int do_send_term_to_logger(Eterm tag, Eterm gl, + char *buf, size_t len, Eterm args) { Uint sz; - Eterm gl; Uint args_sz; - Eterm format,tuple1,tuple2,tuple3; + Eterm format, pid; + ErtsMonotonicTime ts; Eterm *hp = NULL; ErlOffHeap *ohp = NULL; ErlHeapFragment *bp = NULL; - Process *p = NULL; - ASSERT(is_atom(tag)); + ASSERT(len > 0); args_sz = size_object(args); - sz = len * 2 /* format */ + args_sz - + 3 /*outer 2-tuple*/ + 4 /* middle 3-tuple */ + 4 /*inner 3-tuple */; + sz = len * 2 /* format */ + args_sz; /* gleader size is accounted and allocated next */ - gl = do_allocate_logger_message(gleader, &hp, &ohp, &bp, &p, sz); - - if(is_nil(gl)) { - /* buf *always* points to a null terminated string */ - erts_fprintf(stderr, "(no error logger present) %T: \"%s\" %T\n", - tag, buf, args); - return 0; - } + gl = do_allocate_logger_message(gl, &ts, &pid, &hp, &ohp, &bp, sz); format = buf_to_intlist(&hp, buf, len, NIL); args = copy_struct(args, args_sz, &hp, ohp); - tuple1 = TUPLE3(hp, am_emulator, format, args); - hp += 4; - tuple2 = TUPLE3(hp, tag, gl, tuple1); - hp += 4; - tuple3 = TUPLE2(hp, am_notify, tuple2); - do_send_logger_message(hp, ohp, bp, p, tuple3); + do_send_logger_message(gl, tag, format, args, ts, pid, hp, bp); return 0; } static ERTS_INLINE int -send_info_to_logger(Eterm gleader, char *buf, int len) +send_info_to_logger(Eterm gleader, char *buf, size_t len) { - return do_send_to_logger(am_info_msg, gleader, buf, len); + return do_send_to_logger(am_info, gleader, buf, len); } static ERTS_INLINE int -send_warning_to_logger(Eterm gleader, char *buf, int len) +send_warning_to_logger(Eterm gleader, char *buf, size_t len) { - Eterm tag; - switch (erts_error_logger_warnings) { - case am_info: tag = am_info_msg; break; - case am_warning: tag = am_warning_msg; break; - default: tag = am_error; break; - } - return do_send_to_logger(tag, gleader, buf, len); + return do_send_to_logger(erts_error_logger_warnings, gleader, buf, len); } static ERTS_INLINE int -send_error_to_logger(Eterm gleader, char *buf, int len) +send_error_to_logger(Eterm gleader, char *buf, size_t len) { return do_send_to_logger(am_error, gleader, buf, len); } static ERTS_INLINE int -send_error_term_to_logger(Eterm gleader, char *buf, int len, Eterm args) +send_error_term_to_logger(Eterm gleader, char *buf, size_t len, Eterm args) { return do_send_term_to_logger(am_error, gleader, buf, len, args); } |