diff options
author | Patrik Nyblom <[email protected]> | 2012-02-07 18:00:35 +0100 |
---|---|---|
committer | Patrik Nyblom <[email protected]> | 2012-02-07 18:00:35 +0100 |
commit | 6004ec3e1785913ff0b74054cf614a35c180d54a (patch) | |
tree | 1865eab06f4254649f4d9f3e0fbd693c854df5c0 /erts | |
parent | 8d59a1fc518719c8c445d9d94b23c173c18b4438 (diff) | |
parent | 6992261d258f2fc0b25ddf99ebcbf66ae02f5df8 (diff) | |
download | otp-6004ec3e1785913ff0b74054cf614a35c180d54a.tar.gz otp-6004ec3e1785913ff0b74054cf614a35c180d54a.tar.bz2 otp-6004ec3e1785913ff0b74054cf614a35c180d54a.zip |
Merge branch 'dgud/sched-work-time/OTP-9858' into maint
* dgud/sched-work-time/OTP-9858:
emulator: Document and test scheduler_wall_time
Implement statistics(scheduler_wall_time)
Diffstat (limited to 'erts')
-rw-r--r-- | erts/doc/src/erlang.xml | 26 | ||||
-rw-r--r-- | erts/emulator/beam/atom.names | 3 | ||||
-rw-r--r-- | erts/emulator/beam/bif.c | 18 | ||||
-rw-r--r-- | erts/emulator/beam/erl_alloc.types | 2 | ||||
-rw-r--r-- | erts/emulator/beam/erl_bif_info.c | 11 | ||||
-rw-r--r-- | erts/emulator/beam/erl_process.c | 252 | ||||
-rw-r--r-- | erts/emulator/beam/erl_process.h | 14 | ||||
-rw-r--r-- | erts/emulator/test/statistics_SUITE.erl | 98 | ||||
-rw-r--r-- | erts/preloaded/ebin/erlang.beam | bin | 41136 -> 42416 bytes | |||
-rw-r--r-- | erts/preloaded/src/erlang.erl | 34 |
10 files changed, 436 insertions, 22 deletions
diff --git a/erts/doc/src/erlang.xml b/erts/doc/src/erlang.xml index a21021d864..fbe7b36163 100644 --- a/erts/doc/src/erlang.xml +++ b/erts/doc/src/erlang.xml @@ -4,7 +4,7 @@ <erlref> <header> <copyright> - <year>1996</year><year>2011</year> + <year>1996</year><year>2012</year> <holder>Ericsson AB. All Rights Reserved.</holder> </copyright> <legalnotice> @@ -4933,6 +4933,21 @@ true</pre> threads in the Erlang run-time system and may therefore be greater than the wall-clock time.</p> </item> + <tag><marker id="statistics_scheduler_wall_time"><c>scheduler_wall_time</c></marker></tag> + <item> + <p>Returns + <c>[{Scheduler_Id, Scheduler_Worked_Time, Scheduler_Total_Time}]</c>, time lapses are since the + the system flag <seealso marker="#system_flag_scheduler_wall_time">scheduler_wall_time</seealso> + was set to true. + Returns <c>undefined</c> if the system flag <seealso marker="#system_flag_scheduler_wall_time"> + scheduler_wall_time</seealso> is set to false. + </p> + <p>The list of scheduler information is unsorted and may come in different order + between calls. The time unit is undefined and may be changed and should only be used + to calculate relative utilization. + </p> + </item> + <tag><c>wall_clock</c></tag> <item> <p>Returns @@ -5305,6 +5320,14 @@ true</pre> flags. </p> </item> + <tag><marker id="system_flag_scheduler_wall_time"><c>erlang:system_flag(scheduler_wall_time, Boolean)</c></marker></tag> + <item> + <p>Turns on/off scheduler wall time measurements. </p> + <p>For more information see, + <seealso marker="#statistics_scheduler_wall_time">erlang:statistics(scheduler_wall_time)</seealso>. + </p> + </item> + <tag><marker id="system_flag_schedulers_online"><c>erlang:system_flag(schedulers_online, SchedulersOnline)</c></marker></tag> <item> <p>Sets the amount of schedulers online. Valid range is @@ -5316,6 +5339,7 @@ true</pre> <seealso marker="#system_info_schedulers_online">erlang:system_info(schedulers_online)</seealso>. </p> </item> + <tag><c>erlang:system_flag(trace_control_word, TCW)</c></tag> <item> <p>Sets the value of the node's trace control word to diff --git a/erts/emulator/beam/atom.names b/erts/emulator/beam/atom.names index 71454b3e57..7be40976f6 100644 --- a/erts/emulator/beam/atom.names +++ b/erts/emulator/beam/atom.names @@ -95,6 +95,7 @@ atom atom atom atom_used atom attributes atom await_proc_exit +atom await_sched_wall_time_modifications atom awaiting_load atom awaiting_unload atom backtrace backtrace_depth @@ -239,6 +240,7 @@ atom generational atom get_seq_token atom get_tcw atom getenv +atom gather_sched_wall_time_result atom getting_linked atom getting_unlinked atom global @@ -554,6 +556,7 @@ atom waiting atom wall_clock atom warning atom warning_msg +atom scheduler_wall_time atom wordsize atom write_concurrency atom xor diff --git a/erts/emulator/beam/bif.c b/erts/emulator/beam/bif.c index 55f4798892..f8305944a4 100644 --- a/erts/emulator/beam/bif.c +++ b/erts/emulator/beam/bif.c @@ -43,6 +43,9 @@ static Export* set_cpu_topology_trap = NULL; static Export* await_proc_exit_trap = NULL; Export* erts_format_cpu_topology_trap = NULL; +static Export *await_sched_wall_time_mod_trap; +static erts_smp_atomic32_t sched_wall_time; + #define DECL_AM(S) Eterm AM_ ## S = am_atom_put(#S, sizeof(#S) - 1) /* @@ -4160,6 +4163,18 @@ BIF_RETTYPE system_flag_2(BIF_ALIST_2) erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN); BIF_RET(am_true); + } else if (BIF_ARG_1 == am_scheduler_wall_time) { + if (BIF_ARG_2 == am_true || BIF_ARG_2 == am_false) { + erts_aint32_t new = BIF_ARG_2 == am_true ? 1 : 0; + erts_aint32_t old = erts_smp_atomic32_xchg_nob(&sched_wall_time, + new); + Eterm ref = erts_sched_wall_time_request(BIF_P, 1, new); + ASSERT(is_value(ref)); + BIF_TRAP2(await_sched_wall_time_mod_trap, + BIF_P, + ref, + old ? am_true : am_false); + } } else if (ERTS_IS_ATOM_STR("scheduling_statistics", BIF_ARG_1)) { int what; if (ERTS_IS_ATOM_STR("disable", BIF_ARG_2)) @@ -4457,6 +4472,9 @@ void erts_init_bif(void) am_format_cpu_topology, 1); await_proc_exit_trap = erts_export_put(am_erlang,am_await_proc_exit,3); + await_sched_wall_time_mod_trap + = erts_export_put(am_erlang, am_await_sched_wall_time_modifications, 2); + erts_smp_atomic32_init_nob(&sched_wall_time, 0); } #ifdef HARDDEBUG diff --git a/erts/emulator/beam/erl_alloc.types b/erts/emulator/beam/erl_alloc.types index 962db8b831..90a6c0cbee 100644 --- a/erts/emulator/beam/erl_alloc.types +++ b/erts/emulator/beam/erl_alloc.types @@ -367,6 +367,7 @@ type EXPORT LONG_LIVED_LOW CODE export_entry type MONITOR_SH STANDARD_LOW PROCESSES monitor_sh type NLINK_SH STANDARD_LOW PROCESSES nlink_sh type AINFO_REQ STANDARD_LOW SYSTEM alloc_info_request +type SCHED_WTIME_REQ STANDARD_LOW SYSTEM sched_wall_time_request +else # "fullword" @@ -383,6 +384,7 @@ type EXPORT LONG_LIVED CODE export_entry type MONITOR_SH FIXED_SIZE PROCESSES monitor_sh type NLINK_SH FIXED_SIZE PROCESSES nlink_sh type AINFO_REQ SHORT_LIVED SYSTEM alloc_info_request +type SCHED_WTIME_REQ SHORT_LIVED SYSTEM sched_wall_time_request +endif diff --git a/erts/emulator/beam/erl_bif_info.c b/erts/emulator/beam/erl_bif_info.c index 5a806777fe..ebd475f73a 100644 --- a/erts/emulator/beam/erl_bif_info.c +++ b/erts/emulator/beam/erl_bif_info.c @@ -57,6 +57,8 @@ static Export* alloc_info_trap = NULL; static Export* alloc_sizes_trap = NULL; +static Export *gather_sched_wall_time_res_trap; + #define DECL_AM(S) Eterm AM_ ## S = am_atom_put(#S, sizeof(#S) - 1) /* Keep erts_system_version as a global variable for easy access from a core */ @@ -3180,7 +3182,12 @@ BIF_RETTYPE statistics_1(BIF_ALIST_1) Eterm res; Eterm* hp; - if (BIF_ARG_1 == am_context_switches) { + if (BIF_ARG_1 == am_scheduler_wall_time) { + res = erts_sched_wall_time_request(BIF_P, 0, 0); + if (is_non_value(res)) + BIF_RET(am_undefined); + BIF_TRAP1(gather_sched_wall_time_res_trap, BIF_P, res); + } else if (BIF_ARG_1 == am_context_switches) { Eterm cs = erts_make_integer(erts_get_total_context_switches(), BIF_P); hp = HAlloc(BIF_P, 3); res = TUPLE2(hp, cs, SMALL_ZERO); @@ -4160,6 +4167,8 @@ erts_bif_info_init(void) alloc_info_trap = erts_export_put(am_erlang, am_alloc_info, 1); alloc_sizes_trap = erts_export_put(am_erlang, am_alloc_sizes, 1); + gather_sched_wall_time_res_trap + = erts_export_put(am_erlang, am_gather_sched_wall_time_result, 1); process_info_init(); os_info_init(); } diff --git a/erts/emulator/beam/erl_process.c b/erts/emulator/beam/erl_process.c index 5469a59d8c..30c91af630 100644 --- a/erts/emulator/beam/erl_process.c +++ b/erts/emulator/beam/erl_process.c @@ -537,6 +537,209 @@ erts_late_init_process(void) } +static void +init_sched_wall_time(ErtsSchedWallTime *swtp) +{ + swtp->enabled = 0; + swtp->start = 0; + swtp->working.total = 0; + swtp->working.start = 0; + swtp->working.currently = 0; +} + +static ERTS_INLINE Uint64 +sched_wall_time_ts(void) +{ +#ifdef HAVE_GETHRTIME + return (Uint64) sys_gethrtime(); +#else + Uint64 res; + SysTimeval tv; + sys_gettimeofday(&tv); + res = (Uint64) tv.tv_sec*1000000; + res += (Uint64) tv.tv_usec; + return res; +#endif +} + +static ERTS_INLINE void +sched_wall_time_change(ErtsSchedulerData *esdp, int working) +{ + if (esdp->sched_wall_time.enabled) { + Uint64 ts = sched_wall_time_ts(); + if (working) { +#ifdef DEBUG + ASSERT(!esdp->sched_wall_time.working.currently); + esdp->sched_wall_time.working.currently = 1; +#endif + ts -= esdp->sched_wall_time.start; + esdp->sched_wall_time.working.start = ts; + } + else { +#ifdef DEBUG + ASSERT(esdp->sched_wall_time.working.currently); + esdp->sched_wall_time.working.currently = 0; +#endif + ts -= esdp->sched_wall_time.start; + ts -= esdp->sched_wall_time.working.start; + esdp->sched_wall_time.working.total += ts; + } + } +} + +typedef struct { + int set; + int enable; + Process *proc; + Eterm ref; + Eterm ref_heap[REF_THING_SIZE]; + Uint req_sched; + erts_smp_atomic32_t refc; +} ErtsSchedWallTimeReq; + +#if !HALFWORD_HEAP +ERTS_SCHED_PREF_QUICK_ALLOC_IMPL(swtreq, + ErtsSchedWallTimeReq, + 5, + ERTS_ALC_T_SCHED_WTIME_REQ) +#else +static ERTS_INLINE ErtsSchedWallTimeReq * +swtreq_alloc(void) +{ + return erts_alloc(ERTS_ALC_T_SCHED_WTIME_REQ, + sizeof(ErtsSchedWallTimeReq)); +} + +static ERTS_INLINE void +swtreq_free(ErtsSchedWallTimeReq *ptr) +{ + erts_free(ERTS_ALC_T_SCHED_WTIME_REQ, ptr); +} +#endif + +static void +reply_sched_wall_time(void *vswtrp) +{ + Uint64 working = 0, total = 0; + ErtsSchedulerData *esdp = erts_get_scheduler_data(); + ErtsSchedWallTimeReq *swtrp = (ErtsSchedWallTimeReq *) vswtrp; + ErtsProcLocks rp_locks = (swtrp->req_sched == esdp->no + ? ERTS_PROC_LOCK_MAIN + : 0); + Process *rp = swtrp->proc; + Eterm ref_copy = NIL, msg; + Eterm *hp = NULL; + Eterm **hpp; + Uint sz, *szp; + ErlOffHeap *ohp = NULL; + ErlHeapFragment *bp = NULL; + + ASSERT(esdp); + + if (swtrp->set) { + if (!swtrp->enable && esdp->sched_wall_time.enabled) + esdp->sched_wall_time.enabled = 0; + else if (swtrp->enable && !esdp->sched_wall_time.enabled) { + Uint64 ts = sched_wall_time_ts(); + esdp->sched_wall_time.enabled = 1; + esdp->sched_wall_time.start = ts; + esdp->sched_wall_time.working.total = 0; + esdp->sched_wall_time.working.start = 0; + esdp->sched_wall_time.working.currently = 1; + } + } + + if (esdp->sched_wall_time.enabled) { + Uint64 ts = sched_wall_time_ts(); + ASSERT(esdp->sched_wall_time.working.currently); + ts -= esdp->sched_wall_time.start; + total = ts; + ts -= esdp->sched_wall_time.working.start; + working = esdp->sched_wall_time.working.total + ts; + } + + sz = 0; + hpp = NULL; + szp = &sz; + + while (1) { + if (hpp) + ref_copy = STORE_NC(hpp, ohp, swtrp->ref); + else + *szp += REF_THING_SIZE; + + if (swtrp->set) + msg = ref_copy; + else { + msg = (!esdp->sched_wall_time.enabled + ? am_notsup + : erts_bld_tuple(hpp, szp, 3, + make_small(esdp->no), + erts_bld_uint64(hpp, szp, working), + erts_bld_uint64(hpp, szp, total))); + + msg = erts_bld_tuple(hpp, szp, 2, ref_copy, msg); + } + if (hpp) + break; + + hp = erts_alloc_message_heap(sz, &bp, &ohp, rp, &rp_locks); + szp = NULL; + hpp = &hp; + } + + erts_queue_message(rp, &rp_locks, bp, msg, NIL); + + if (swtrp->req_sched == esdp->no) + rp_locks &= ~ERTS_PROC_LOCK_MAIN; + + if (rp_locks) + erts_smp_proc_unlock(rp, rp_locks); + + erts_smp_proc_dec_refc(rp); + + if (erts_smp_atomic32_dec_read_nob(&swtrp->refc) == 0) + swtreq_free(vswtrp); +} + +Eterm +erts_sched_wall_time_request(Process *c_p, int set, int enable) +{ + ErtsSchedulerData *esdp = ERTS_PROC_GET_SCHDATA(c_p); + Eterm ref; + ErtsSchedWallTimeReq *swtrp; + Eterm *hp; + + if (!set && !esdp->sched_wall_time.enabled) + return THE_NON_VALUE; + + swtrp = swtreq_alloc(); + ref = erts_make_ref(c_p); + hp = &swtrp->ref_heap[0]; + + swtrp->set = set; + swtrp->enable = enable; + swtrp->proc = c_p; + swtrp->ref = STORE_NC(&hp, NULL, ref); + swtrp->req_sched = esdp->no; + erts_smp_atomic32_init_nob(&swtrp->refc, + (erts_aint32_t) erts_no_schedulers); + + erts_smp_proc_add_refc(c_p, (Sint32) erts_no_schedulers); + +#ifdef ERTS_SMP + if (erts_no_schedulers > 1) + erts_schedule_multi_misc_aux_work(1, + erts_no_schedulers, + reply_sched_wall_time, + (void *) swtrp); +#endif + + reply_sched_wall_time((void *) swtrp); + + return ref; +} + static ERTS_INLINE ErtsProcList * proclist_create(Process *p) { @@ -1707,6 +1910,7 @@ aux_thread(void *unused) static void scheduler_wait(int *fcalls, ErtsSchedulerData *esdp, ErtsRunQueue *rq) { + int working = 1; ErtsSchedulerSleepInfo *ssi = esdp->ssi; int spincount; erts_aint32_t aux_work = 0; @@ -1737,12 +1941,17 @@ scheduler_wait(int *fcalls, ErtsSchedulerData *esdp, ErtsRunQueue *rq) tse_wait: + if (thr_prgr_active != working) + sched_wall_time_change(esdp, thr_prgr_active); + while (1) { aux_work = erts_atomic32_read_acqb(&ssi->aux_work); if (aux_work) { - if (!thr_prgr_active) + if (!thr_prgr_active) { erts_thr_progress_active(esdp, thr_prgr_active = 1); + sched_wall_time_change(esdp, 1); + } aux_work = handle_aux_work(&esdp->aux_work_data, aux_work); if (aux_work && erts_thr_progress_update(esdp)) erts_thr_progress_leader_update(esdp); @@ -1751,8 +1960,10 @@ scheduler_wait(int *fcalls, ErtsSchedulerData *esdp, ErtsRunQueue *rq) if (aux_work) flgs = erts_smp_atomic32_read_acqb(&ssi->flags); else { - if (thr_prgr_active) + if (thr_prgr_active) { erts_thr_progress_active(esdp, thr_prgr_active = 0); + sched_wall_time_change(esdp, 0); + } erts_thr_progress_prepare_wait(esdp); flgs = sched_spin_wait(ssi, spincount); @@ -1789,8 +2000,10 @@ scheduler_wait(int *fcalls, ErtsSchedulerData *esdp, ErtsRunQueue *rq) if (flgs & ~ERTS_SSI_FLG_SUSPENDED) erts_smp_atomic32_read_band_nob(&ssi->flags, ERTS_SSI_FLG_SUSPENDED); - if (!thr_prgr_active) + if (!thr_prgr_active) { erts_thr_progress_active(esdp, thr_prgr_active = 1); + sched_wall_time_change(esdp, 1); + } erts_smp_runq_lock(rq); sched_active(esdp->no, rq); @@ -1806,14 +2019,21 @@ scheduler_wait(int *fcalls, ErtsSchedulerData *esdp, ErtsRunQueue *rq) sched_waiting_sys(esdp->no, rq); + erts_smp_runq_unlock(rq); + ASSERT(working); + sched_wall_time_change(esdp, working = 0); + spincount = ERTS_SCHED_SYS_SLEEP_SPINCOUNT; while (spincount-- > 0) { sys_poll_aux_work: + if (working) + sched_wall_time_change(esdp, working = 0); + ASSERT(!erts_port_task_have_outstanding_io_tasks()); erl_sys_schedule(1); /* Might give us something to do */ @@ -1828,6 +2048,8 @@ scheduler_wait(int *fcalls, ErtsSchedulerData *esdp, ErtsRunQueue *rq) aux_work = erts_atomic32_read_acqb(&ssi->aux_work); if (aux_work) { + if (!working) + sched_wall_time_change(esdp, working = 1); #ifdef ERTS_SMP if (!thr_prgr_active) erts_thr_progress_active(esdp, thr_prgr_active = 1); @@ -1920,6 +2142,9 @@ scheduler_wait(int *fcalls, ErtsSchedulerData *esdp, ErtsRunQueue *rq) erts_smp_runq_unlock(rq); + if (working) + sched_wall_time_change(esdp, working = 0); + #ifdef ERTS_SMP if (thr_prgr_active) erts_thr_progress_active(esdp, thr_prgr_active = 0); @@ -1955,6 +2180,8 @@ scheduler_wait(int *fcalls, ErtsSchedulerData *esdp, ErtsRunQueue *rq) if (flgs & ~ERTS_SSI_FLG_SUSPENDED) erts_smp_atomic32_read_band_nob(&ssi->flags, ERTS_SSI_FLG_SUSPENDED); #endif + if (!working) + sched_wall_time_change(esdp, working = 1); sched_active_sys(esdp->no, rq); } @@ -3430,9 +3657,14 @@ erts_init_scheduling(int no_schedulers, int no_schedulers_online) esdp->run_queue->scheduler = esdp; init_aux_work_data(&esdp->aux_work_data, esdp); + init_sched_wall_time(&esdp->sched_wall_time); } init_misc_aux_work(); +#if !HALFWORD_HEAP + init_swtreq_alloc(); +#endif + #ifdef ERTS_SMP @@ -3755,6 +3987,8 @@ suspend_scheduler(ErtsSchedulerData *esdp) if (erts_system_profile_flags.scheduler) profile_scheduler(make_small(esdp->no), am_inactive); + sched_wall_time_change(esdp, 0); + erts_smp_mtx_lock(&schdlr_sspnd.mtx); flgs = sched_prep_spin_suspended(ssi, ERTS_SSI_FLG_SUSPENDED); @@ -3813,16 +4047,20 @@ suspend_scheduler(ErtsSchedulerData *esdp) aux_work = erts_atomic32_read_acqb(&ssi->aux_work); if (aux_work) { - if (!thr_prgr_active) + if (!thr_prgr_active) { erts_thr_progress_active(esdp, thr_prgr_active = 1); + sched_wall_time_change(esdp, 1); + } aux_work = handle_aux_work(&esdp->aux_work_data, aux_work); if (aux_work && erts_thr_progress_update(esdp)) erts_thr_progress_leader_update(esdp); } if (!aux_work) { - if (thr_prgr_active) + if (thr_prgr_active) { erts_thr_progress_active(esdp, thr_prgr_active = 0); + sched_wall_time_change(esdp, 0); + } erts_thr_progress_prepare_wait(esdp); flgs = sched_spin_suspended(ssi, ERTS_SCHED_SUSPEND_SLEEP_SPINCOUNT); @@ -3877,8 +4115,10 @@ suspend_scheduler(ErtsSchedulerData *esdp) if (erts_system_profile_flags.scheduler) profile_scheduler(make_small(esdp->no), am_active); - if (!thr_prgr_active) + if (!thr_prgr_active) { erts_thr_progress_active(esdp, thr_prgr_active = 1); + sched_wall_time_change(esdp, 1); + } erts_smp_runq_lock(esdp->run_queue); non_empty_runq(esdp->run_queue); diff --git a/erts/emulator/beam/erl_process.h b/erts/emulator/beam/erl_process.h index a51b380bb0..173b7df69d 100644 --- a/erts/emulator/beam/erl_process.h +++ b/erts/emulator/beam/erl_process.h @@ -394,6 +394,16 @@ do { \ } while (0) typedef struct { + int enabled; + Uint64 start; + struct { + Uint64 total; + Uint64 start; + int currently; + } working; +} ErtsSchedWallTime; + +typedef struct { int sched_id; ErtsSchedulerData *esdp; ErtsSchedulerSleepInfo *ssi; @@ -457,6 +467,8 @@ struct ErtsSchedulerData_ { ErtsSchedAllocData alloc_data; + ErtsSchedWallTime sched_wall_time; + #ifdef ERTS_DO_VERIFY_UNUSED_TEMP_ALLOC erts_alloc_verify_func_t verify_unused_temp_alloc; Allctr_t *verify_unused_temp_alloc_data; @@ -1064,6 +1076,8 @@ void erts_late_init_process(void); void erts_early_init_scheduling(int); void erts_init_scheduling(int, int); +Eterm erts_sched_wall_time_request(Process *c_p, int set, int enable); + ErtsProcList *erts_proclist_create(Process *); void erts_proclist_destroy(ErtsProcList *); int erts_proclist_same(ErtsProcList *, Process *); diff --git a/erts/emulator/test/statistics_SUITE.erl b/erts/emulator/test/statistics_SUITE.erl index 0392312a6f..a93dd309c1 100644 --- a/erts/emulator/test/statistics_SUITE.erl +++ b/erts/emulator/test/statistics_SUITE.erl @@ -1,7 +1,7 @@ %% %% %CopyrightBegin% %% -%% Copyright Ericsson AB 1997-2011. All Rights Reserved. +%% Copyright Ericsson AB 1997-2012. All Rights Reserved. %% %% The contents of this file are subject to the Erlang Public License, %% Version 1.1, (the "License"); you may not use this file except in @@ -29,6 +29,7 @@ runtime_zero_diff/1, runtime_update/1, runtime_diff/1, run_queue_one/1, + scheduler_wall_time/1, reductions/1, reductions_big/1, garbage_collection/1, io/1, badarg/1]). @@ -51,8 +52,8 @@ suite() -> [{ct_hooks,[ts_install_cth]}]. all() -> [{group, wall_clock}, {group, runtime}, reductions, - reductions_big, {group, run_queue}, garbage_collection, - io, badarg]. + reductions_big, {group, run_queue}, scheduler_wall_time, + garbage_collection, io, badarg]. groups() -> [{wall_clock, [], @@ -266,11 +267,10 @@ run_queue_one(Config) when is_list(Config) -> run_queue_one_test(Config) when is_list(Config) -> - ?line Hog = spawn_link(?MODULE, hog, [self()]), + ?line _Hog = spawn_link(?MODULE, hog, [self()]), ?line receive - hog_started -> - Hog ! go - end, + hog_started -> ok + end, ?line receive after 100 -> ok end, % Give hog a head start. ?line case statistics(run_queue) of N when N >= 1 -> ok; @@ -280,18 +280,88 @@ run_queue_one_test(Config) when is_list(Config) -> %% CPU-bound process, going at low priority. It will always be ready %% to run. - + hog(Pid) -> ?line process_flag(priority, low), ?line Pid ! hog_started, - ?line receive - go -> hog_iter(0) + ?line Mon = erlang:monitor(process, Pid), + ?line hog_iter(0, Mon). + +hog_iter(N, Mon) when N > 0 -> + receive + {'DOWN', Mon, _, _, _} -> ok + after 0 -> + ?line hog_iter(N-1, Mon) + end; +hog_iter(0, Mon) -> + ?line hog_iter(10000, Mon). + +%%% Tests of statistics(scheduler_wall_time). + +scheduler_wall_time(doc) -> + "Tests that statistics(scheduler_wall_time) works as intended"; +scheduler_wall_time(Config) when is_list(Config) -> + %% Should return undefined if system_flag is not turned on yet + undefined = statistics(scheduler_wall_time), + %% Turn on statistics + false = erlang:system_flag(scheduler_wall_time, true), + try + Schedulers = erlang:system_info(schedulers_online), + %% Let testserver and everyone else finish their work + timer:sleep(500), + %% Empty load + EmptyLoad = get_load(), + {false, _} = {lists:any(fun(Load) -> Load > 50 end, EmptyLoad),EmptyLoad}, + MeMySelfAndI = self(), + StartHog = fun() -> + Pid = spawn(?MODULE, hog, [self()]), + receive hog_started -> MeMySelfAndI ! go end, + Pid + end, + P1 = StartHog(), + %% Max on one, the other schedulers empty (hopefully) + %% Be generous the process can jump between schedulers + %% which is ok and we don't want the test to fail for wrong reasons + _L1 = [S1Load|EmptyScheds1] = get_load(), + {true,_} = {S1Load > 50,S1Load}, + {false,_} = {lists:any(fun(Load) -> Load > 50 end, EmptyScheds1),EmptyScheds1}, + {true,_} = {lists:sum(EmptyScheds1) < 60,EmptyScheds1}, + + %% 50% load + HalfHogs = [StartHog() || _ <- lists:seq(1, (Schedulers-1) div 2)], + HalfLoad = lists:sum(get_load()) div Schedulers, + if Schedulers < 2, HalfLoad > 80 -> ok; %% Ok only one scheduler online and one hog + %% We want roughly 50% load + HalfLoad > 40, HalfLoad < 60 -> ok; + true -> exit({halfload, HalfLoad}) + end, + + %% 100% load + LastHogs = [StartHog() || _ <- lists:seq(1, Schedulers div 2)], + FullScheds = get_load(), + {false,_} = {lists:any(fun(Load) -> Load < 80 end, FullScheds),FullScheds}, + FullLoad = lists:sum(FullScheds) div Schedulers, + if FullLoad > 90 -> ok; + true -> exit({fullload, FullLoad}) + end, + + [exit(Pid, kill) || Pid <- [P1|HalfHogs++LastHogs]], + AfterLoad = get_load(), + {false,_} = {lists:any(fun(Load) -> Load > 5 end, AfterLoad),AfterLoad}, + true = erlang:system_flag(scheduler_wall_time, false) + after + erlang:system_flag(scheduler_wall_time, false) end. -hog_iter(N) when N > 0 -> - ?line hog_iter(N-1); -hog_iter(0) -> - ?line hog_iter(10000). +get_load() -> + Start = erlang:statistics(scheduler_wall_time), + timer:sleep(500), + End = erlang:statistics(scheduler_wall_time), + lists:reverse(lists:sort(load_percentage(lists:sort(Start),lists:sort(End)))). + +load_percentage([{Id, WN, TN}|Ss], [{Id, WP, TP}|Ps]) -> + [100*(WN-WP) div (TN-TP)|load_percentage(Ss, Ps)]; +load_percentage([], []) -> []. garbage_collection(doc) -> diff --git a/erts/preloaded/ebin/erlang.beam b/erts/preloaded/ebin/erlang.beam Binary files differindex 9e369d5348..b78747bc84 100644 --- a/erts/preloaded/ebin/erlang.beam +++ b/erts/preloaded/ebin/erlang.beam diff --git a/erts/preloaded/src/erlang.erl b/erts/preloaded/src/erlang.erl index 4affc9bffe..4cbff3f36e 100644 --- a/erts/preloaded/src/erlang.erl +++ b/erts/preloaded/src/erlang.erl @@ -43,6 +43,9 @@ -export([memory/0, memory/1]). -export([alloc_info/1, alloc_sizes/1]). +-export([gather_sched_wall_time_result/1, + await_sched_wall_time_modifications/2]). + -deprecated([hash/2]). % Get rid of autoimports of spawn to avoid clashes with ourselves. @@ -1202,3 +1205,34 @@ receive_allocator(Ref, N, Acc) -> {Ref, _, InfoList} -> receive_allocator(Ref, N-1, insert_info(InfoList, Acc)) end. + +-spec await_sched_wall_time_modifications(Ref, Result) -> boolean() when + Ref :: reference(), + Result :: boolean(). + +await_sched_wall_time_modifications(Ref, Result) -> + sched_wall_time(Ref, erlang:system_info(schedulers)), + Result. + +-spec gather_sched_wall_time_result(Ref) -> [{pos_integer(), + non_neg_integer(), + non_neg_integer()}] when + Ref :: reference(). + +gather_sched_wall_time_result(Ref) when is_reference(Ref) -> + sched_wall_time(Ref, erlang:system_info(schedulers), []). + +sched_wall_time(_Ref, 0) -> + ok; +sched_wall_time(Ref, N) -> + receive Ref -> sched_wall_time(Ref, N-1) end. + +sched_wall_time(_Ref, 0, Acc) -> + Acc; +sched_wall_time(Ref, N, undefined) -> + receive {Ref, _} -> sched_wall_time(Ref, N-1, undefined) end; +sched_wall_time(Ref, N, Acc) -> + receive + {Ref, undefined} -> sched_wall_time(Ref, N-1, undefined); + {Ref, SWT} -> sched_wall_time(Ref, N-1, [SWT|Acc]) + end. |