aboutsummaryrefslogtreecommitdiffstats
path: root/erts/emulator
diff options
context:
space:
mode:
authorRickard Green <[email protected]>2016-01-14 11:44:08 +0100
committerRickard Green <[email protected]>2016-01-14 11:44:08 +0100
commit45292e9a8a65800f298129fa3a2f661136f02ed5 (patch)
tree00e661f19fe0aed30885d497933e879751d25a53 /erts/emulator
parenta00f5b7360b11991bee771638831e21cc4414b9b (diff)
parentf8da487826f96a7c5baf84074639cae03aee6b0b (diff)
downloadotp-45292e9a8a65800f298129fa3a2f661136f02ed5.tar.gz
otp-45292e9a8a65800f298129fa3a2f661136f02ed5.tar.bz2
otp-45292e9a8a65800f298129fa3a2f661136f02ed5.zip
Merge branch 'maint'
* maint: Do not allow aux work on dirty schedulers
Diffstat (limited to 'erts/emulator')
-rw-r--r--erts/emulator/beam/erl_process.c75
-rw-r--r--erts/emulator/beam/erl_process.h2
2 files changed, 37 insertions, 40 deletions
diff --git a/erts/emulator/beam/erl_process.c b/erts/emulator/beam/erl_process.c
index 9495436ba6..2cdb98b2aa 100644
--- a/erts/emulator/beam/erl_process.c
+++ b/erts/emulator/beam/erl_process.c
@@ -2182,6 +2182,7 @@ handle_aux_work(ErtsAuxWorkData *awdp, erts_aint32_t orig_aux_work, int waiting)
erts_aint32_t aux_work = orig_aux_work;
erts_aint32_t ignore = 0;
+ ASSERT(!ERTS_SCHEDULER_IS_DIRTY(awdp->esdp));
#ifdef ERTS_SMP
haw_thr_prgr_current_reset(awdp);
#endif
@@ -2890,14 +2891,13 @@ scheduler_wait(int *fcalls, ErtsSchedulerData *esdp, ErtsRunQueue *rq)
ErtsMonotonicTime current_time;
aux_work = erts_atomic32_read_acqb(&ssi->aux_work);
- if (aux_work) {
- if (!ERTS_SCHEDULER_IS_DIRTY(esdp) && !thr_prgr_active) {
+ if (aux_work && !ERTS_SCHEDULER_IS_DIRTY(esdp)) {
+ if (!thr_prgr_active) {
erts_thr_progress_active(esdp, thr_prgr_active = 1);
sched_wall_time_change(esdp, 1);
}
aux_work = handle_aux_work(&esdp->aux_work_data, aux_work, 1);
- if (aux_work && !ERTS_SCHEDULER_IS_DIRTY(esdp)
- && erts_thr_progress_update(esdp))
+ if (aux_work && erts_thr_progress_update(esdp))
erts_thr_progress_leader_update(esdp);
}
@@ -3042,19 +3042,16 @@ scheduler_wait(int *fcalls, ErtsSchedulerData *esdp, ErtsRunQueue *rq)
#endif
aux_work = erts_atomic32_read_acqb(&ssi->aux_work);
- if (aux_work) {
- if (!ERTS_SCHEDULER_IS_DIRTY(esdp)) {
- if (!working)
- sched_wall_time_change(esdp, working = 1);
+ if (aux_work && !ERTS_SCHEDULER_IS_DIRTY(esdp)) {
+ if (!working)
+ sched_wall_time_change(esdp, working = 1);
#ifdef ERTS_SMP
- if (!thr_prgr_active)
- erts_thr_progress_active(esdp, thr_prgr_active = 1);
+ if (!thr_prgr_active)
+ erts_thr_progress_active(esdp, thr_prgr_active = 1);
#endif
- }
aux_work = handle_aux_work(&esdp->aux_work_data, aux_work, 1);
#ifdef ERTS_SMP
- if (!ERTS_SCHEDULER_IS_DIRTY(esdp) && aux_work &&
- erts_thr_progress_update(esdp))
+ if (aux_work && erts_thr_progress_update(esdp))
erts_thr_progress_leader_update(esdp);
#endif
}
@@ -6772,18 +6769,19 @@ suspend_scheduler(ErtsSchedulerData *esdp)
& ERTS_RUNQ_FLGS_QMASK);
aux_work = erts_atomic32_read_acqb(&ssi->aux_work);
if (aux_work|qmask) {
- if (!ERTS_SCHEDULER_IS_DIRTY(esdp) && !thr_prgr_active) {
- erts_thr_progress_active(esdp, thr_prgr_active = 1);
- sched_wall_time_change(esdp, 1);
+ if (!ERTS_SCHEDULER_IS_DIRTY(esdp)) {
+ if (!thr_prgr_active) {
+ erts_thr_progress_active(esdp, thr_prgr_active = 1);
+ sched_wall_time_change(esdp, 1);
+ }
+ if (aux_work)
+ aux_work = handle_aux_work(&esdp->aux_work_data,
+ aux_work,
+ 1);
+
+ if (aux_work && erts_thr_progress_update(esdp))
+ erts_thr_progress_leader_update(esdp);
}
- if (aux_work)
- aux_work = handle_aux_work(&esdp->aux_work_data,
- aux_work,
- 1);
-
- if (!ERTS_SCHEDULER_IS_DIRTY(esdp) &&
- (aux_work && erts_thr_progress_update(esdp)))
- erts_thr_progress_leader_update(esdp);
if (qmask) {
#ifdef ERTS_DIRTY_SCHEDULERS
if (ERTS_SCHEDULER_IS_DIRTY(esdp)) {
@@ -7000,17 +6998,18 @@ suspend_scheduler(ErtsSchedulerData *esdp)
& ERTS_RUNQ_FLGS_QMASK);
aux_work = erts_atomic32_read_acqb(&ssi->aux_work);
if (aux_work|qmask) {
- if (!ERTS_SCHEDULER_IS_DIRTY(esdp) && !thr_prgr_active) {
- erts_thr_progress_active(esdp, thr_prgr_active = 1);
- sched_wall_time_change(esdp, 1);
+ if (!ERTS_SCHEDULER_IS_DIRTY(esdp)) {
+ if (!thr_prgr_active) {
+ erts_thr_progress_active(esdp, thr_prgr_active = 1);
+ sched_wall_time_change(esdp, 1);
+ }
+ if (aux_work)
+ aux_work = handle_aux_work(&esdp->aux_work_data,
+ aux_work,
+ 1);
+ if (aux_work && erts_thr_progress_update(esdp))
+ erts_thr_progress_leader_update(esdp);
}
- if (aux_work)
- aux_work = handle_aux_work(&esdp->aux_work_data,
- aux_work,
- 1);
- if (!ERTS_SCHEDULER_IS_DIRTY(esdp) && aux_work &&
- erts_thr_progress_update(esdp))
- erts_thr_progress_leader_update(esdp);
if (qmask) {
erts_smp_runq_lock(esdp->run_queue);
evacuate_run_queue(esdp->run_queue, &sbp);
@@ -9392,10 +9391,9 @@ Process *schedule(Process *p, int calls)
suspend_scheduler(esdp);
#endif
- {
+ if (!ERTS_SCHEDULER_IS_DIRTY(esdp)) {
erts_aint32_t aux_work;
- int leader_update = ERTS_SCHEDULER_IS_DIRTY(esdp) ? 0
- : erts_thr_progress_update(esdp);
+ int leader_update = erts_thr_progress_update(esdp);
aux_work = erts_atomic32_read_acqb(&esdp->ssi->aux_work);
if (aux_work | leader_update) {
erts_smp_runq_unlock(rq);
@@ -9406,8 +9404,7 @@ Process *schedule(Process *p, int calls)
erts_smp_runq_lock(rq);
}
- ERTS_SMP_LC_ASSERT(ERTS_SCHEDULER_IS_DIRTY(esdp)
- || !erts_thr_progress_is_blocking());
+ ERTS_SMP_LC_ASSERT(!erts_thr_progress_is_blocking());
}
ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(rq));
diff --git a/erts/emulator/beam/erl_process.h b/erts/emulator/beam/erl_process.h
index 71818b2330..b50fa5a10f 100644
--- a/erts/emulator/beam/erl_process.h
+++ b/erts/emulator/beam/erl_process.h
@@ -612,7 +612,7 @@ typedef enum {
typedef union {
struct {
ErtsDirtySchedulerType type: 1;
- unsigned num: 31;
+ Uint num: sizeof(Uint)*8 - 1;
} s;
Uint no;
} ErtsDirtySchedId;