diff options
-rw-r--r-- | erts/emulator/beam/break.c | 17 | ||||
-rw-r--r-- | erts/emulator/beam/erl_process.c | 63 | ||||
-rw-r--r-- | erts/emulator/beam/erl_process.h | 1 | ||||
-rw-r--r-- | lib/observer/src/cdv_sched_cb.erl | 18 | ||||
-rw-r--r-- | lib/observer/src/crashdump_viewer.erl | 84 | ||||
-rw-r--r-- | lib/observer/src/crashdump_viewer.hrl | 3 |
6 files changed, 153 insertions, 33 deletions
diff --git a/erts/emulator/beam/break.c b/erts/emulator/beam/break.c index 127c14e21b..5ace997344 100644 --- a/erts/emulator/beam/break.c +++ b/erts/emulator/beam/break.c @@ -911,6 +911,23 @@ erl_crash_dump_v(char *file, int line, char* fmt, va_list args) erts_print_scheduler_info(to, to_arg, ERTS_SCHEDULER_IX(i)), erts_cbprintf(to, to_arg, "** crashed **\n")); } +#ifdef ERTS_DIRTY_SCHEDULERS + for (i = 0; i < erts_no_dirty_cpu_schedulers; i++) { + ERTS_SYS_TRY_CATCH( + erts_print_scheduler_info(to, to_arg, ERTS_DIRTY_CPU_SCHEDULER_IX(i)), + erts_cbprintf(to, to_arg, "** crashed **\n")); + } + erts_cbprintf(to, to_arg, "=dirty_cpu_run_queue\n"); + erts_print_run_queue_info(to, to_arg, ERTS_DIRTY_CPU_RUNQ); + + for (i = 0; i < erts_no_dirty_io_schedulers; i++) { + ERTS_SYS_TRY_CATCH( + erts_print_scheduler_info(to, to_arg, ERTS_DIRTY_IO_SCHEDULER_IX(i)), + erts_cbprintf(to, to_arg, "** crashed **\n")); + } + erts_cbprintf(to, to_arg, "=dirty_io_run_queue\n"); + erts_print_run_queue_info(to, to_arg, ERTS_DIRTY_IO_RUNQ); +#endif /* ERTS_DIRTY_SCHEDULERS */ #endif #ifdef ERTS_SMP diff --git a/erts/emulator/beam/erl_process.c b/erts/emulator/beam/erl_process.c index 63e9275ac1..5fcb100fae 100644 --- a/erts/emulator/beam/erl_process.c +++ b/erts/emulator/beam/erl_process.c @@ -14259,16 +14259,35 @@ stack_element_dump(fmtfn_t to, void *to_arg, Eterm* sp, int yreg) return yreg; } +static void print_current_process_info(fmtfn_t, void *to_arg, ErtsSchedulerData*); + /* * Print scheduler information */ void -erts_print_scheduler_info(fmtfn_t to, void *to_arg, ErtsSchedulerData *esdp) { +erts_print_scheduler_info(fmtfn_t to, void *to_arg, ErtsSchedulerData *esdp) +{ int i; erts_aint32_t flg; - Process *p; - erts_print(to, to_arg, "=scheduler:%u\n", esdp->no); + switch (esdp->type) { + case ERTS_SCHED_NORMAL: + erts_print(to, to_arg, "=scheduler:%u\n", esdp->no); + break; +#ifdef ERTS_DIRTY_SCHEDULERS + case ERTS_SCHED_DIRTY_CPU: + erts_print(to, to_arg, "=dirty_cpu_scheduler:%u\n", + (esdp->dirty_no + erts_no_schedulers)); + break; + case ERTS_SCHED_DIRTY_IO: + erts_print(to, to_arg, "=dirty_io_scheduler:%u\n", + (esdp->dirty_no + erts_no_schedulers + erts_no_dirty_cpu_schedulers)); + break; +#endif + default: + erts_print(to, to_arg, "=unknown_scheduler_type:%u\n", esdp->type); + break; + } #ifdef ERTS_SMP flg = erts_smp_atomic32_read_dirty(&esdp->ssi->flags); @@ -14316,10 +14335,24 @@ erts_print_scheduler_info(fmtfn_t to, void *to_arg, ErtsSchedulerData *esdp) { } erts_print(to, to_arg, "\n"); - erts_print(to, to_arg, "Current Port: "); - if (esdp->current_port) - erts_print(to, to_arg, "%T", esdp->current_port->common.id); - erts_print(to, to_arg, "\n"); + if (esdp->type == ERTS_SCHED_NORMAL) { + erts_print(to, to_arg, "Current Port: "); + if (esdp->current_port) + erts_print(to, to_arg, "%T", esdp->current_port->common.id); + erts_print(to, to_arg, "\n"); + + erts_print_run_queue_info(to, to_arg, esdp->run_queue); + } + + /* This *MUST* to be the last information in scheduler block */ + print_current_process_info(to, to_arg, esdp); +} + +void erts_print_run_queue_info(fmtfn_t to, void *to_arg, + ErtsRunQueue *run_queue) +{ + erts_aint32_t flg; + int i; for (i = 0; i < ERTS_NO_PROC_PRIO_LEVELS; i++) { erts_print(to, to_arg, "Run Queue "); @@ -14341,12 +14374,12 @@ erts_print_scheduler_info(fmtfn_t to, void *to_arg, ErtsSchedulerData *esdp) { break; } erts_print(to, to_arg, "Length: %d\n", - erts_smp_atomic32_read_dirty(&esdp->run_queue->procs.prio_info[i].len)); + erts_smp_atomic32_read_dirty(&run_queue->procs.prio_info[i].len)); } erts_print(to, to_arg, "Run Queue Port Length: %d\n", - erts_smp_atomic32_read_dirty(&esdp->run_queue->ports.info.len)); + erts_smp_atomic32_read_dirty(&run_queue->ports.info.len)); - flg = erts_smp_atomic32_read_dirty(&esdp->run_queue->flags); + flg = erts_smp_atomic32_read_dirty(&run_queue->flags); erts_print(to, to_arg, "Run Queue Flags: "); for (i = 0; i < ERTS_RUNQ_FLG_MAX && flg; i++) { erts_aint32_t chk = (1 << i); @@ -14413,9 +14446,15 @@ erts_print_scheduler_info(fmtfn_t to, void *to_arg, ErtsSchedulerData *esdp) { } } erts_print(to, to_arg, "\n"); +} + + +static void print_current_process_info(fmtfn_t to, void *to_arg, + ErtsSchedulerData* esdp) +{ + Process *p = esdp->current_process; + erts_aint32_t flg; - /* This *MUST* to be the last information in scheduler block */ - p = esdp->current_process; erts_print(to, to_arg, "Current Process: "); if (esdp->current_process && !(ERTS_TRACE_FLAGS(p) & F_SENSITIVE)) { flg = erts_smp_atomic32_read_dirty(&p->state); diff --git a/erts/emulator/beam/erl_process.h b/erts/emulator/beam/erl_process.h index 639818c20c..dd89e30f82 100644 --- a/erts/emulator/beam/erl_process.h +++ b/erts/emulator/beam/erl_process.h @@ -1868,6 +1868,7 @@ void erts_stack_dump(fmtfn_t to, void *to_arg, Process *); void erts_limited_stack_trace(fmtfn_t to, void *to_arg, Process *); void erts_program_counter_info(fmtfn_t to, void *to_arg, Process *); void erts_print_scheduler_info(fmtfn_t to, void *to_arg, ErtsSchedulerData *esdp); +void erts_print_run_queue_info(fmtfn_t, void *to_arg, ErtsRunQueue*); void erts_dump_extended_process_state(fmtfn_t to, void *to_arg, erts_aint32_t psflg); void erts_dump_process_state(fmtfn_t to, void *to_arg, erts_aint32_t psflg); diff --git a/lib/observer/src/cdv_sched_cb.erl b/lib/observer/src/cdv_sched_cb.erl index 192aaf31a7..d2696a276f 100644 --- a/lib/observer/src/cdv_sched_cb.erl +++ b/lib/observer/src/cdv_sched_cb.erl @@ -31,7 +31,8 @@ %% Columns -define(COL_ID, 0). --define(COL_PROC, ?COL_ID+1). +-define(COL_TYPE, ?COL_ID+1). +-define(COL_PROC, ?COL_TYPE+1). -define(COL_PORT, ?COL_PROC+1). -define(COL_RQL, ?COL_PORT+1). -define(COL_PQL, ?COL_RQL+1). @@ -39,6 +40,7 @@ %% Callbacks for cdv_virtual_list_wx col_to_elem(id) -> col_to_elem(?COL_ID); col_to_elem(?COL_ID) -> #sched.name; +col_to_elem(?COL_TYPE) -> #sched.type; col_to_elem(?COL_PROC) -> #sched.process; col_to_elem(?COL_PORT) -> #sched.port; col_to_elem(?COL_RQL) -> #sched.run_q; @@ -46,6 +48,7 @@ col_to_elem(?COL_PQL) -> #sched.port_q. col_spec() -> [{"Id", ?wxLIST_FORMAT_RIGHT, 50}, + {"Type", ?wxLIST_FORMAT_CENTER, 100}, {"Current Process", ?wxLIST_FORMAT_CENTER, 130}, {"Current Port", ?wxLIST_FORMAT_CENTER, 130}, {"Run Queue Length", ?wxLIST_FORMAT_RIGHT, 180}, @@ -73,7 +76,8 @@ detail_pages() -> [{"Scheduler Information", fun init_gen_page/2}]. init_gen_page(Parent, Info0) -> - Fields = info_fields(), + Type = proplists:get_value(type, Info0), + Fields = info_fields(Type), Details = proplists:get_value(details, Info0), Info = if is_map(Details) -> Info0 ++ maps:to_list(Details); true -> Info0 @@ -81,15 +85,16 @@ init_gen_page(Parent, Info0) -> cdv_info_wx:start_link(Parent,{Fields,Info,[]}). %%% Internal -info_fields() -> +info_fields(Type) -> [{"Scheduler Overview", [{"Id", id}, + {"Type", type}, {"Current Process",process}, {"Current Port", port}, {"Sleep Info Flags", sleep_info}, {"Sleep Aux Work", sleep_aux} ]}, - {"Run Queues", + {run_queues_header(Type), [{"Flags", runq_flags}, {"Priority Max Length", runq_max}, {"Priority High Length", runq_high}, @@ -116,3 +121,8 @@ info_fields() -> {" ", {currp_stack, 11}} ]} ]. + +run_queues_header(normal) -> + "Run Queues"; +run_queues_header(DirtyX) -> + "Run Queues (common for all '" ++ atom_to_list(DirtyX) ++ "' schedulers)". diff --git a/lib/observer/src/crashdump_viewer.erl b/lib/observer/src/crashdump_viewer.erl index e209f8e78b..6b85dc6bc2 100644 --- a/lib/observer/src/crashdump_viewer.erl +++ b/lib/observer/src/crashdump_viewer.erl @@ -116,6 +116,10 @@ -define(allocator,allocator). -define(atoms,atoms). -define(binary,binary). +-define(dirty_cpu_scheduler,dirty_cpu_scheduler). +-define(dirty_cpu_run_queue,dirty_cpu_run_queue). +-define(dirty_io_scheduler,dirty_io_scheduler). +-define(dirty_io_run_queue,dirty_io_run_queue). -define(ende,ende). -define(erl_crash_dump,erl_crash_dump). -define(ets,ets). @@ -2522,22 +2526,49 @@ get_indextableinfo1(Fd,IndexTable) -> %%----------------------------------------------------------------- %% Page with scheduler table information schedulers(File) -> - case lookup_index(?scheduler) of - [] -> - []; - Schedulers -> - Fd = open(File), - R = lists:map(fun({Name,Start}) -> - get_schedulerinfo(Fd,Name,Start) - end, - Schedulers), - close(Fd), - R - end. + Fd = open(File), + + Schds0 = case lookup_index(?scheduler) of + [] -> + []; + Normals -> + [{Normals, #sched{type=normal}}] + end, + Schds1 = case lookup_index(?dirty_cpu_scheduler) of + [] -> + Schds0; + DirtyCpus -> + [{DirtyCpus, get_dirty_runqueue(Fd, ?dirty_cpu_run_queue)} + | Schds0] + end, + Schds2 = case lookup_index(?dirty_io_scheduler) of + [] -> + Schds1; + DirtyIos -> + [{DirtyIos, get_dirty_runqueue(Fd, ?dirty_io_run_queue)} + | Schds1] + end, -get_schedulerinfo(Fd,Name,Start) -> + R = schedulers1(Fd, Schds2, []), + close(Fd), + R. + +schedulers1(_Fd, [], Acc) -> + Acc; +schedulers1(Fd, [{Scheds,Sched0} | Tail], Acc0) -> + Acc1 = lists:foldl(fun({Name,Start}, AccIn) -> + [get_schedulerinfo(Fd,Name,Start,Sched0) | AccIn] + end, + Acc0, + Scheds), + schedulers1(Fd, Tail, Acc1). + +get_schedulerinfo(Fd,Name,Start,Sched0) -> pos_bof(Fd,Start), - get_schedulerinfo1(Fd,#sched{name=Name}). + get_schedulerinfo1(Fd,Sched0#sched{name=list_to_integer(Name)}). + +sched_type(?dirty_cpu_run_queue) -> dirty_cpu; +sched_type(?dirty_io_run_queue) -> dirty_io. get_schedulerinfo1(Fd, Sched) -> case get_schedulerinfo2(Fd, Sched) of @@ -2577,14 +2608,31 @@ get_schedulerinfo2(Fd, Sched=#sched{details=Ds}) -> Other -> case Sched#sched.type of normal -> - get_runqueue_info(Fd, Other, Sched); + get_runqueue_info2(Fd, Other, Sched); _ -> unexpected(Fd,Other,"dirty scheduler information"), {done, Sched} end end. -get_runqueue_info(Fd, LineHead, Sched=#sched{details=Ds}) -> +get_dirty_runqueue(Fd, Tag) -> + case lookup_index(Tag) of + [{_, Start}] -> + pos_bof(Fd,Start), + get_runqueue_info1(Fd,#sched{type=sched_type(Tag)}); + [] -> + #sched{} + end. + +get_runqueue_info1(Fd, Sched) -> + case get_runqueue_info2(Fd, line_head(Fd), Sched) of + {more, Sched2} -> + get_runqueue_info1(Fd, Sched2); + {done, Sched2} -> + Sched2 + end. + +get_runqueue_info2(Fd, LineHead, Sched=#sched{details=Ds}) -> case LineHead of "Run Queue Max Length" -> RQMax = list_to_integer(bytes(Fd)), @@ -3044,6 +3092,10 @@ tag_to_atom("allocated_areas") -> ?allocated_areas; tag_to_atom("allocator") -> ?allocator; tag_to_atom("atoms") -> ?atoms; tag_to_atom("binary") -> ?binary; +tag_to_atom("dirty_cpu_scheduler") -> ?dirty_cpu_scheduler; +tag_to_atom("dirty_cpu_run_queue") -> ?dirty_cpu_run_queue; +tag_to_atom("dirty_io_scheduler") -> ?dirty_io_scheduler; +tag_to_atom("dirty_io_run_queue") -> ?dirty_io_run_queue; tag_to_atom("end") -> ?ende; tag_to_atom("erl_crash_dump") -> ?erl_crash_dump; tag_to_atom("ets") -> ?ets; diff --git a/lib/observer/src/crashdump_viewer.hrl b/lib/observer/src/crashdump_viewer.hrl index 25a37656be..7f24fcca9a 100644 --- a/lib/observer/src/crashdump_viewer.hrl +++ b/lib/observer/src/crashdump_viewer.hrl @@ -112,10 +112,11 @@ -record(sched, {name, + type, process, port, run_q=0, - port_q=0, + port_q, details=#{} }). |