aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorRickard Green <[email protected]>2015-12-30 14:22:19 +0100
committerRickard Green <[email protected]>2015-12-30 14:22:19 +0100
commit3ea54c2dc82a2e067fbd19b201e4eb03a7dbc29d (patch)
tree629e47d7f70ae54fa76f3d0458579897a3f103ba
parent10bcaeeea71a77a0223f2989e9a99d208eae7057 (diff)
parentdf18cd270ad682cedc8c21990641b0bdb8788092 (diff)
downloadotp-3ea54c2dc82a2e067fbd19b201e4eb03a7dbc29d.tar.gz
otp-3ea54c2dc82a2e067fbd19b201e4eb03a7dbc29d.tar.bz2
otp-3ea54c2dc82a2e067fbd19b201e4eb03a7dbc29d.zip
Merge branch 'maint'
* maint: Light weight statistics of run queue lengths Conflicts: erts/preloaded/ebin/erlang.beam
-rw-r--r--erts/doc/src/erlang.xml111
-rw-r--r--erts/emulator/beam/atom.names4
-rw-r--r--erts/emulator/beam/erl_bif_info.c37
-rw-r--r--erts/emulator/beam/erl_process.c65
-rw-r--r--erts/emulator/beam/erl_process.h48
-rw-r--r--erts/emulator/test/statistics_SUITE.erl61
-rw-r--r--erts/preloaded/ebin/erlang.beambin102012 -> 102216 bytes
-rw-r--r--erts/preloaded/src/erlang.erl10
8 files changed, 281 insertions, 55 deletions
diff --git a/erts/doc/src/erlang.xml b/erts/doc/src/erlang.xml
index 6ed03f3dfc..8ddbd95de5 100644
--- a/erts/doc/src/erlang.xml
+++ b/erts/doc/src/erlang.xml
@@ -5771,8 +5771,31 @@ true</pre>
<anno>Dest</anno>, <anno>Msg</anno>, [])</c></seealso>.</p>
</desc>
</func>
+
<func>
<name name="statistics" arity="1" clause_i="1"/>
+ <fsummary>Information about active processes and ports.</fsummary>
+ <desc><marker id="statistics_active_tasks"></marker>
+ <p>
+ Returns a list where each element represents the amount
+ of active processes and ports on each run queue and its
+ associated scheduler. That is, the number of processes and
+ ports that are ready to run, or are currently running. The
+ element location in the list corresponds to the scheduler
+ and its run queue. The first element corresponds to scheduler
+ number 1 and so on. The information is <em>not</em> gathered
+ atomically. That is, the result is not necessarily a
+ consistent snapshot of the state, but instead quite
+ efficiently gathered. See also,
+ <seealso marker="#statistics_total_active_tasks"><c>statistics(total_active_tasks)</c></seealso>,
+ <seealso marker="#statistics_run_queue_lengths"><c>statistics(run_queue_lengths)</c></seealso>, and
+ <seealso marker="#statistics_total_run_queue_lengths"><c>statistics(total_run_queue_lengths)</c></seealso>.
+ </p>
+ </desc>
+ </func>
+
+ <func>
+ <name name="statistics" arity="1" clause_i="2"/>
<fsummary>Information about context switches.</fsummary>
<desc>
<p>Returns the total number of context switches since the
@@ -5781,7 +5804,7 @@ true</pre>
</func>
<func>
- <name name="statistics" arity="1" clause_i="2"/>
+ <name name="statistics" arity="1" clause_i="3"/>
<fsummary>Information about exact reductions.</fsummary>
<desc>
<marker id="statistics_exact_reductions"></marker>
@@ -5795,7 +5818,7 @@ true</pre>
</func>
<func>
- <name name="statistics" arity="1" clause_i="3"/>
+ <name name="statistics" arity="1" clause_i="4"/>
<fsummary>Information about garbage collection.</fsummary>
<desc>
<p>Returns information about garbage collection, for example:</p>
@@ -5807,7 +5830,7 @@ true</pre>
</func>
<func>
- <name name="statistics" arity="1" clause_i="4"/>
+ <name name="statistics" arity="1" clause_i="5"/>
<fsummary>Information about I/O.</fsummary>
<desc>
<p>Returns <c><anno>Input</anno></c>,
@@ -5818,7 +5841,7 @@ true</pre>
</func>
<func>
- <name name="statistics" arity="1" clause_i="5"/>
+ <name name="statistics" arity="1" clause_i="6"/>
<fsummary>Information about reductions.</fsummary>
<desc>
<marker id="statistics_reductions"></marker>
@@ -5836,16 +5859,43 @@ true</pre>
</func>
<func>
- <name name="statistics" arity="1" clause_i="6"/>
- <fsummary>Information about the run-queue.</fsummary>
- <desc>
- <p>Returns the total length of run-queues, that is, the number
- of processes that are ready to run on all available run-queues.</p>
+ <name name="statistics" arity="1" clause_i="7"/>
+ <fsummary>Information about the run-queues.</fsummary>
+ <desc><marker id="statistics_run_queue"></marker>
+ <p>
+ Returns the total length of the run-queues. That is, the number
+ of processes and ports that are ready to run on all available
+ run-queues. The information is gathered atomically. That
+ is, the result is a consistent snapshot of the state, but
+ this operation is much more expensive compared to
+ <seealso marker="#statistics_total_run_queue_lengths"><c>statistics(total_run_queue_lengths)</c></seealso>.
+ This especially when a large amount of schedulers is used.
+ </p>
</desc>
</func>
<func>
- <name name="statistics" arity="1" clause_i="7"/>
+ <name name="statistics" arity="1" clause_i="8"/>
+ <fsummary>Information about the run-queue lengths.</fsummary>
+ <desc><marker id="statistics_run_queue_lengths"></marker>
+ <p>
+ Returns a list where each element represents the amount
+ of processes and ports ready to run for each run queue. The
+ element location in the list corresponds to the run queue
+ of a scheduler. The first element corresponds to the run
+ queue of scheduler number 1 and so on. The information is
+ <em>not</em> gathered atomically. That is, the result is
+ not necessarily a consistent snapshot of the state, but
+ instead quite efficiently gathered. See also,
+ <seealso marker="#statistics_total_run_queue_lengths"><c>statistics(total_run_queue_lengths)</c></seealso>,
+ <seealso marker="#statistics_active_tasks"><c>statistics(active_tasks)</c></seealso>, and
+ <seealso marker="#statistics_total_active_tasks"><c>statistics(total_active_tasks)</c></seealso>.
+ </p>
+ </desc>
+ </func>
+
+ <func>
+ <name name="statistics" arity="1" clause_i="9"/>
<fsummary>Information about runtime.</fsummary>
<desc>
<p>Returns information about runtime, in milliseconds.</p>
@@ -5860,7 +5910,7 @@ true</pre>
</func>
<func>
- <name name="statistics" arity="1" clause_i="8"/>
+ <name name="statistics" arity="1" clause_i="10"/>
<fsummary>Information about each schedulers work time.</fsummary>
<desc>
<marker id="statistics_scheduler_wall_time"></marker>
@@ -5931,7 +5981,44 @@ ok
</func>
<func>
- <name name="statistics" arity="1" clause_i="9"/>
+ <name name="statistics" arity="1" clause_i="11"/>
+ <fsummary>Information about active processes and ports.</fsummary>
+ <desc><marker id="statistics_total_active_tasks"></marker>
+ <p>
+ Returns the total amount of active processes and ports in
+ the system. That is, the number of processes and ports that
+ are ready to run, or are currently running. The information
+ is <em>not</em> gathered atomically. That is, the result
+ is not necessarily a consistent snapshot of the state, but
+ instead quite efficiently gathered. See also,
+ <seealso marker="#statistics_active_tasks"><c>statistics(active_tasks)</c></seealso>,
+ <seealso marker="#statistics_run_queue_lengths"><c>statistics(run_queue_lengths)</c></seealso>, and
+ <seealso marker="#statistics_total_run_queue_lengths"><c>statistics(total_run_queue_lengths)</c></seealso>.
+ </p>
+ </desc>
+ </func>
+
+ <func>
+ <name name="statistics" arity="1" clause_i="12"/>
+ <fsummary>Information about the run-queue lengths.</fsummary>
+ <desc><marker id="statistics_total_run_queue_lengths"></marker>
+ <p>
+ Returns the total length of the run-queues. That is, the number
+ of processes and ports that are ready to run on all available
+ run-queues. The information is <em>not</em> gathered atomically.
+ That is, the result is not necessarily a consistent snapshot of
+ the state, but much more efficiently gathered compared to
+ <seealso marker="#statistics_run_queue"><c>statistics(run_queue)</c></seealso>.
+ See also,
+ <seealso marker="#statistics_run_queue_lengths"><c>statistics(run_queue_lengths)</c></seealso>,
+ <seealso marker="#statistics_total_active_tasks"><c>statistics(total_active_tasks)</c></seealso>, and
+ <seealso marker="#statistics_active_tasks"><c>statistics(active_tasks)</c></seealso>.
+ </p>
+ </desc>
+ </func>
+
+ <func>
+ <name name="statistics" arity="1" clause_i="13"/>
<fsummary>Information about wall clock.</fsummary>
<desc>
<p>Returns information about wall clock. <c>wall_clock</c> can
diff --git a/erts/emulator/beam/atom.names b/erts/emulator/beam/atom.names
index 13c2a0f8f9..ae7f405cce 100644
--- a/erts/emulator/beam/atom.names
+++ b/erts/emulator/beam/atom.names
@@ -71,6 +71,7 @@ atom absoluteURI
atom ac
atom accessor
atom active
+atom active_tasks
atom all
atom all_but_first
atom all_names
@@ -509,6 +510,7 @@ atom return_from
atom return_to
atom return_trace
atom run_queue
+atom run_queue_lengths
atom runnable
atom runnable_ports
atom runnable_procs
@@ -576,7 +578,9 @@ atom timeout_value
atom Times='*'
atom timestamp
atom total
+atom total_active_tasks
atom total_heap_size
+atom total_run_queue_lengths
atom tpkt
atom trace trace_ts traced
atom trace_control_word
diff --git a/erts/emulator/beam/erl_bif_info.c b/erts/emulator/beam/erl_bif_info.c
index f952f937ce..ab3ebd6bac 100644
--- a/erts/emulator/beam/erl_bif_info.c
+++ b/erts/emulator/beam/erl_bif_info.c
@@ -3199,6 +3199,39 @@ BIF_RETTYPE statistics_1(BIF_ALIST_1)
if (is_non_value(res))
BIF_RET(am_undefined);
BIF_TRAP1(gather_sched_wall_time_res_trap, BIF_P, res);
+ } else if (BIF_ARG_1 == am_total_active_tasks
+ || BIF_ARG_1 == am_total_run_queue_lengths) {
+ Uint no = erts_run_queues_len(NULL, 0, BIF_ARG_1 == am_total_active_tasks);
+ if (IS_USMALL(0, no))
+ res = make_small(no);
+ else {
+ Eterm *hp = HAlloc(BIF_P, BIG_UINT_HEAP_SIZE);
+ res = uint_to_big(no, hp);
+ }
+ BIF_RET(res);
+ } else if (BIF_ARG_1 == am_active_tasks
+ || BIF_ARG_1 == am_run_queue_lengths) {
+ Eterm res, *hp, **hpp;
+ Uint sz, *szp;
+ int no_qs = erts_no_run_queues;
+ Uint *qszs = erts_alloc(ERTS_ALC_T_TMP,sizeof(Uint)*no_qs*2);
+ (void) erts_run_queues_len(qszs, 0, BIF_ARG_1 == am_active_tasks);
+ sz = 0;
+ szp = &sz;
+ hpp = NULL;
+ while (1) {
+ int i;
+ for (i = 0; i < no_qs; i++)
+ qszs[no_qs+i] = erts_bld_uint(hpp, szp, qszs[i]);
+ res = erts_bld_list(hpp, szp, no_qs, &qszs[no_qs]);
+ if (hpp) {
+ erts_free(ERTS_ALC_T_TMP, qszs);
+ BIF_RET(res);
+ }
+ hp = HAlloc(BIF_P, sz);
+ szp = NULL;
+ hpp = &hp;
+ }
} else if (BIF_ARG_1 == am_context_switches) {
Eterm cs = erts_make_integer(erts_get_total_context_switches(), BIF_P);
hp = HAlloc(BIF_P, 3);
@@ -3247,7 +3280,7 @@ BIF_RETTYPE statistics_1(BIF_ALIST_1)
res = TUPLE2(hp, b1, b2);
BIF_RET(res);
} else if (BIF_ARG_1 == am_run_queue) {
- res = erts_run_queues_len(NULL);
+ res = erts_run_queues_len(NULL, 1, 0);
BIF_RET(make_small(res));
} else if (BIF_ARG_1 == am_wall_clock) {
UWord w1, w2;
@@ -3267,7 +3300,7 @@ BIF_RETTYPE statistics_1(BIF_ALIST_1)
Uint sz, *szp;
int no_qs = erts_no_run_queues;
Uint *qszs = erts_alloc(ERTS_ALC_T_TMP,sizeof(Uint)*no_qs*2);
- (void) erts_run_queues_len(qszs);
+ (void) erts_run_queues_len(qszs, 0, 0);
sz = 0;
szp = &sz;
hpp = NULL;
diff --git a/erts/emulator/beam/erl_process.c b/erts/emulator/beam/erl_process.c
index 2c62685f8c..9495436ba6 100644
--- a/erts/emulator/beam/erl_process.c
+++ b/erts/emulator/beam/erl_process.c
@@ -3060,7 +3060,7 @@ scheduler_wait(int *fcalls, ErtsSchedulerData *esdp, ErtsRunQueue *rq)
}
#ifndef ERTS_SMP
- if (rq->len != 0 || rq->misc.start)
+ if (erts_smp_atomic32_read_dirty(&rq->len) != 0 || rq->misc.start)
goto sys_woken;
#else
flgs = erts_smp_atomic32_read_acqb(&ssi->flags);
@@ -3159,7 +3159,7 @@ scheduler_wait(int *fcalls, ErtsSchedulerData *esdp, ErtsRunQueue *rq)
}
#ifndef ERTS_SMP
- if (rq->len == 0 && !rq->misc.start)
+ if (erts_smp_atomic32_read_dirty(&rq->len) == 0 && !rq->misc.start)
goto sys_aux_work;
sys_woken:
#else
@@ -4876,7 +4876,7 @@ erts_fprintf(stderr, "--------------------------------\n");
rq->out_of_work_count = 0;
(void) ERTS_RUNQ_FLGS_READ_BSET(rq, ERTS_RUNQ_FLGS_MIGRATION_INFO, flags);
- rq->max_len = rq->len;
+ rq->max_len = erts_smp_atomic32_read_dirty(&rq->len);
for (pix = 0; pix < ERTS_NO_PRIO_LEVELS; pix++) {
ErtsRunQueueInfo *rqi;
rqi = (pix == ERTS_PORT_PRIO_LEVEL
@@ -5034,7 +5034,7 @@ wakeup_other_check(ErtsRunQueue *rq, Uint32 flags)
{
int wo_reds = rq->wakeup_other_reds;
if (wo_reds) {
- int left_len = rq->len - 1;
+ int left_len = erts_smp_atomic32_read_dirty(&rq->len) - 1;
if (left_len < 1) {
int wo_reduce = wo_reds << wakeup_other.dec_shift;
wo_reduce &= wakeup_other.dec_mask;
@@ -5107,7 +5107,7 @@ wakeup_other_check_legacy(ErtsRunQueue *rq, Uint32 flags)
{
int wo_reds = rq->wakeup_other_reds;
if (wo_reds) {
- erts_aint32_t len = rq->len;
+ erts_aint32_t len = erts_smp_atomic32_read_dirty(&rq->len);
if (len < 2) {
rq->wakeup_other -= ERTS_WAKEUP_OTHER_DEC_LEGACY*wo_reds;
if (rq->wakeup_other < 0)
@@ -5203,7 +5203,7 @@ runq_supervisor(void *unused)
ErtsRunQueue *rq = ERTS_RUNQ_IX(ix);
if (ERTS_RUNQ_FLGS_GET(rq) & ERTS_RUNQ_FLG_NONEMPTY) {
erts_smp_runq_lock(rq);
- if (rq->len != 0)
+ if (erts_smp_atomic32_read_dirty(&rq->len) != 0)
wake_scheduler_on_empty_runq(rq); /* forced wakeup... */
erts_smp_runq_unlock(rq);
}
@@ -5544,7 +5544,7 @@ erts_init_scheduling(int no_schedulers, int no_schedulers_online
}
rq->out_of_work_count = 0;
rq->max_len = 0;
- rq->len = 0;
+ erts_smp_atomic32_set_nob(&rq->len, 0);
rq->wakeup_other = 0;
rq->wakeup_other_reds = 0;
rq->halt_in_progress = 0;
@@ -7913,6 +7913,9 @@ sched_thread_func(void *vesdp)
erts_sched_init_time_sup(esdp);
+ (void) ERTS_RUNQ_FLGS_SET_NOB(esdp->run_queue,
+ ERTS_RUNQ_FLG_EXEC);
+
#ifdef ERTS_SMP
tse = erts_tse_fetch();
erts_tse_prepare_timed(tse);
@@ -8910,24 +8913,39 @@ resume_process_1(BIF_ALIST_1)
}
Uint
-erts_run_queues_len(Uint *qlen)
+erts_run_queues_len(Uint *qlen, int atomic_queues_read, int incl_active_sched)
{
int i = 0;
Uint len = 0;
- ERTS_ATOMIC_FOREACH_RUNQ(rq,
- {
- Sint pqlen = 0;
- int pix;
- for (pix = 0; pix < ERTS_NO_PROC_PRIO_LEVELS; pix++)
- pqlen += RUNQ_READ_LEN(&rq->procs.prio_info[pix].len);
+ if (atomic_queues_read)
+ ERTS_ATOMIC_FOREACH_RUNQ(rq,
+ {
+ Sint rq_len = (Sint) erts_smp_atomic32_read_dirty(&rq->len);
+ ASSERT(rq_len >= 0);
+ if (incl_active_sched
+ && (ERTS_RUNQ_FLGS_GET_NOB(rq) & ERTS_RUNQ_FLG_EXEC)) {
+ rq_len++;
+ }
+ if (qlen)
+ qlen[i++] = rq_len;
+ len += (Uint) rq_len;
+ }
+ );
+ else {
+ for (i = 0; i < erts_no_run_queues; i++) {
+ ErtsRunQueue *rq = ERTS_RUNQ_IX(i);
+ Sint rq_len = (Sint) erts_smp_atomic32_read_nob(&rq->len);
+ ASSERT(rq_len >= 0);
+ if (incl_active_sched
+ && (ERTS_RUNQ_FLGS_GET_NOB(rq) & ERTS_RUNQ_FLG_EXEC)) {
+ rq_len++;
+ }
+ if (qlen)
+ qlen[i] = rq_len;
+ len += (Uint) rq_len;
+ }
- if (pqlen < 0)
- pqlen = 0;
- if (qlen)
- qlen[i++] = pqlen;
- len += pqlen;
}
- );
return len;
}
@@ -9356,8 +9374,10 @@ Process *schedule(Process *p, int calls)
if (flags & (ERTS_RUNQ_FLG_CHK_CPU_BIND|ERTS_RUNQ_FLG_SUSPENDED)) {
if (flags & ERTS_RUNQ_FLG_SUSPENDED) {
+ (void) ERTS_RUNQ_FLGS_UNSET_NOB(rq, ERTS_RUNQ_FLG_EXEC);
suspend_scheduler(esdp);
- flags = ERTS_RUNQ_FLGS_GET_NOB(rq);
+ flags = ERTS_RUNQ_FLGS_SET_NOB(rq, ERTS_RUNQ_FLG_EXEC);
+ flags |= ERTS_RUNQ_FLG_EXEC;
}
if (flags & ERTS_RUNQ_FLG_CHK_CPU_BIND) {
flags = ERTS_RUNQ_FLGS_UNSET(rq, ERTS_RUNQ_FLG_CHK_CPU_BIND);
@@ -9446,7 +9466,10 @@ Process *schedule(Process *p, int calls)
}
#endif
+ (void) ERTS_RUNQ_FLGS_UNSET(rq, ERTS_RUNQ_FLG_EXEC);
scheduler_wait(&fcalls, esdp, rq);
+ flags = ERTS_RUNQ_FLGS_SET_NOB(rq, ERTS_RUNQ_FLG_EXEC);
+ flags |= ERTS_RUNQ_FLG_EXEC;
#ifdef ERTS_SMP
non_empty_runq(rq);
diff --git a/erts/emulator/beam/erl_process.h b/erts/emulator/beam/erl_process.h
index 4bc879eacb..71818b2330 100644
--- a/erts/emulator/beam/erl_process.h
+++ b/erts/emulator/beam/erl_process.h
@@ -170,8 +170,10 @@ extern int erts_sched_thread_suggested_stack_size;
(((Uint32) 1) << (ERTS_RUNQ_FLG_BASE2 + 5))
#define ERTS_RUNQ_FLG_PROTECTED \
(((Uint32) 1) << (ERTS_RUNQ_FLG_BASE2 + 6))
+#define ERTS_RUNQ_FLG_EXEC \
+ (((Uint32) 1) << (ERTS_RUNQ_FLG_BASE2 + 7))
-#define ERTS_RUNQ_FLG_MAX (ERTS_RUNQ_FLG_BASE2 + 7)
+#define ERTS_RUNQ_FLG_MAX (ERTS_RUNQ_FLG_BASE2 + 8)
#define ERTS_RUNQ_FLGS_MIGRATION_QMASKS \
(ERTS_RUNQ_FLGS_EMIGRATE_QMASK \
@@ -215,6 +217,9 @@ extern int erts_sched_thread_suggested_stack_size;
#define ERTS_RUNQ_FLGS_SET(RQ, FLGS) \
((Uint32) erts_smp_atomic32_read_bor_relb(&(RQ)->flags, \
(erts_aint32_t) (FLGS)))
+#define ERTS_RUNQ_FLGS_SET_NOB(RQ, FLGS) \
+ ((Uint32) erts_smp_atomic32_read_bor_nob(&(RQ)->flags, \
+ (erts_aint32_t) (FLGS)))
#define ERTS_RUNQ_FLGS_BSET(RQ, MSK, FLGS) \
((Uint32) erts_smp_atomic32_read_bset_relb(&(RQ)->flags, \
(erts_aint32_t) (MSK), \
@@ -222,6 +227,9 @@ extern int erts_sched_thread_suggested_stack_size;
#define ERTS_RUNQ_FLGS_UNSET(RQ, FLGS) \
((Uint32) erts_smp_atomic32_read_band_relb(&(RQ)->flags, \
(erts_aint32_t) ~(FLGS)))
+#define ERTS_RUNQ_FLGS_UNSET_NOB(RQ, FLGS) \
+ ((Uint32) erts_smp_atomic32_read_band_nob(&(RQ)->flags, \
+ (erts_aint32_t) ~(FLGS)))
#define ERTS_RUNQ_FLGS_GET(RQ) \
((Uint32) erts_smp_atomic32_read_acqb(&(RQ)->flags))
#define ERTS_RUNQ_FLGS_GET_NOB(RQ) \
@@ -464,7 +472,7 @@ struct ErtsRunQueue_ {
int full_reds_history[ERTS_FULL_REDS_HISTORY_SIZE];
int out_of_work_count;
erts_aint32_t max_len;
- erts_aint32_t len;
+ erts_smp_atomic32_t len;
int wakeup_other;
int wakeup_other_reds;
int halt_in_progress;
@@ -712,7 +720,19 @@ erts_smp_inc_runq_len(ErtsRunQueue *rq, ErtsRunQueueInfo *rqi, int prio)
ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(rq));
- len = erts_smp_atomic32_read_nob(&rqi->len);
+ len = erts_smp_atomic32_read_dirty(&rq->len);
+
+#ifdef ERTS_SMP
+ if (len == 0)
+ erts_non_empty_runq(rq);
+#endif
+ len++;
+ if (rq->max_len < len)
+ rq->max_len = len;
+ ASSERT(len > 0);
+ erts_smp_atomic32_set_nob(&rq->len, len);
+
+ len = erts_smp_atomic32_read_dirty(&rqi->len);
ASSERT(len >= 0);
if (len == 0) {
ASSERT((erts_smp_atomic32_read_nob(&rq->flags)
@@ -725,15 +745,6 @@ erts_smp_inc_runq_len(ErtsRunQueue *rq, ErtsRunQueueInfo *rqi, int prio)
rqi->max_len = len;
erts_smp_atomic32_set_relb(&rqi->len, len);
-
-#ifdef ERTS_SMP
- if (rq->len == 0)
- erts_non_empty_runq(rq);
-#endif
- rq->len++;
- if (rq->max_len < rq->len)
- rq->max_len = len;
- ASSERT(rq->len > 0);
}
ERTS_GLB_INLINE void
@@ -743,7 +754,12 @@ erts_smp_dec_runq_len(ErtsRunQueue *rq, ErtsRunQueueInfo *rqi, int prio)
ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(rq));
- len = erts_smp_atomic32_read_nob(&rqi->len);
+ len = erts_smp_atomic32_read_dirty(&rq->len);
+ len--;
+ ASSERT(len >= 0);
+ erts_smp_atomic32_set_nob(&rq->len, len);
+
+ len = erts_smp_atomic32_read_dirty(&rqi->len);
len--;
ASSERT(len >= 0);
if (len == 0) {
@@ -754,8 +770,6 @@ erts_smp_dec_runq_len(ErtsRunQueue *rq, ErtsRunQueueInfo *rqi, int prio)
}
erts_smp_atomic32_set_relb(&rqi->len, len);
- rq->len--;
- ASSERT(rq->len >= 0);
}
ERTS_GLB_INLINE void
@@ -765,7 +779,7 @@ erts_smp_reset_max_len(ErtsRunQueue *rq, ErtsRunQueueInfo *rqi)
ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(rq));
- len = erts_smp_atomic32_read_nob(&rqi->len);
+ len = erts_smp_atomic32_read_dirty(&rqi->len);
ASSERT(rqi->max_len >= len);
rqi->max_len = len;
}
@@ -1684,7 +1698,7 @@ void erts_sched_notify_check_cpu_bind(void);
Uint erts_active_schedulers(void);
void erts_init_process(int, int, int);
Eterm erts_process_status(Process *, ErtsProcLocks, Process *, Eterm);
-Uint erts_run_queues_len(Uint *);
+Uint erts_run_queues_len(Uint *, int, int);
void erts_add_to_runq(Process *);
Eterm erts_bound_schedulers_term(Process *c_p);
Eterm erts_get_cpu_topology_term(Process *c_p, Eterm which);
diff --git a/erts/emulator/test/statistics_SUITE.erl b/erts/emulator/test/statistics_SUITE.erl
index 56ecf4195a..53c9ba8715 100644
--- a/erts/emulator/test/statistics_SUITE.erl
+++ b/erts/emulator/test/statistics_SUITE.erl
@@ -32,7 +32,7 @@
run_queue_one/1,
scheduler_wall_time/1,
reductions/1, reductions_big/1, garbage_collection/1, io/1,
- badarg/1]).
+ badarg/1, run_queues_lengths_active_tasks/1]).
%% Internal exports.
@@ -54,7 +54,8 @@ suite() -> [{ct_hooks,[ts_install_cth]}].
all() ->
[{group, wall_clock}, {group, runtime}, reductions,
reductions_big, {group, run_queue}, scheduler_wall_time,
- garbage_collection, io, badarg].
+ garbage_collection, io, badarg,
+ run_queues_lengths_active_tasks].
groups() ->
[{wall_clock, [],
@@ -409,3 +410,59 @@ badarg(Config) when is_list(Config) ->
?line case catch statistics(bad_atom) of
{'EXIT', {badarg, _}} -> ok
end.
+
+tok_loop() ->
+ tok_loop().
+
+run_queues_lengths_active_tasks(Config) ->
+ TokLoops = lists:map(fun (_) ->
+ spawn_opt(fun () ->
+ tok_loop()
+ end,
+ [link, {priority, low}])
+ end,
+ lists:seq(1,10)),
+
+ TRQLs0 = statistics(total_run_queue_lengths),
+ TATs0 = statistics(total_active_tasks),
+ true = is_integer(TRQLs0),
+ true = is_integer(TATs0),
+ true = TRQLs0 >= 0,
+ true = TATs0 >= 11,
+
+ NoScheds = erlang:system_info(schedulers),
+ RQLs0 = statistics(run_queue_lengths),
+ ATs0 = statistics(active_tasks),
+ NoScheds = length(RQLs0),
+ NoScheds = length(ATs0),
+ true = lists:sum(RQLs0) >= 0,
+ true = lists:sum(ATs0) >= 11,
+
+ SO = erlang:system_flag(schedulers_online, 1),
+
+ TRQLs1 = statistics(total_run_queue_lengths),
+ TATs1 = statistics(total_active_tasks),
+ true = TRQLs1 >= 10,
+ true = TATs1 >= 11,
+ NoScheds = erlang:system_info(schedulers),
+
+ RQLs1 = statistics(run_queue_lengths),
+ ATs1 = statistics(active_tasks),
+ NoScheds = length(RQLs1),
+ NoScheds = length(ATs1),
+ TRQLs2 = lists:sum(RQLs1),
+ TATs2 = lists:sum(ATs1),
+ true = TRQLs2 >= 10,
+ true = TATs2 >= 11,
+ [TRQLs2|_] = RQLs1,
+ [TATs2|_] = ATs1,
+
+ erlang:system_flag(schedulers_online, SO),
+
+ lists:foreach(fun (P) ->
+ unlink(P),
+ exit(P, bang)
+ end,
+ TokLoops),
+
+ ok.
diff --git a/erts/preloaded/ebin/erlang.beam b/erts/preloaded/ebin/erlang.beam
index 77f25653a3..68578c3a49 100644
--- a/erts/preloaded/ebin/erlang.beam
+++ b/erts/preloaded/ebin/erlang.beam
Binary files differ
diff --git a/erts/preloaded/src/erlang.erl b/erts/preloaded/src/erlang.erl
index 9517acd78e..d9dc9a1976 100644
--- a/erts/preloaded/src/erlang.erl
+++ b/erts/preloaded/src/erlang.erl
@@ -2226,7 +2226,9 @@ setelement(_Index, _Tuple1, _Value) ->
spawn_opt(_Tuple) ->
erlang:nif_error(undefined).
--spec statistics(context_switches) -> {ContextSwitches,0} when
+-spec statistics(active_tasks) -> [ActiveTasks] when
+ ActiveTasks :: non_neg_integer();
+ (context_switches) -> {ContextSwitches,0} when
ContextSwitches :: non_neg_integer();
(exact_reductions) -> {Total_Exact_Reductions,
Exact_Reductions_Since_Last_Call} when
@@ -2243,6 +2245,8 @@ spawn_opt(_Tuple) ->
Total_Reductions :: non_neg_integer(),
Reductions_Since_Last_Call :: non_neg_integer();
(run_queue) -> non_neg_integer();
+ (run_queue_lengths) -> [RunQueueLenght] when
+ RunQueueLenght :: non_neg_integer();
(runtime) -> {Total_Run_Time, Time_Since_Last_Call} when
Total_Run_Time :: non_neg_integer(),
Time_Since_Last_Call :: non_neg_integer();
@@ -2250,6 +2254,10 @@ spawn_opt(_Tuple) ->
SchedulerId :: pos_integer(),
ActiveTime :: non_neg_integer(),
TotalTime :: non_neg_integer();
+ (total_active_tasks) -> ActiveTasks when
+ ActiveTasks :: non_neg_integer();
+ (total_run_queue_lengths) -> TotalRunQueueLenghts when
+ TotalRunQueueLenghts :: non_neg_integer();
(wall_clock) -> {Total_Wallclock_Time,
Wallclock_Time_Since_Last_Call} when
Total_Wallclock_Time :: non_neg_integer(),