diff options
author | Lukas Larsson <[email protected]> | 2014-02-24 16:45:04 +0100 |
---|---|---|
committer | Lukas Larsson <[email protected]> | 2014-02-24 16:45:04 +0100 |
commit | a157ffce8e25012512e0e5d88e05fb135792f161 (patch) | |
tree | 8c2bf21e8a401f696483d6e7d08ff00dd805c06c /erts/emulator/beam/erl_process.c | |
parent | 4ce83eb728748787f8a2da6155112c6da42a0eba (diff) | |
parent | 81abbc48e12317a07a8d2fcc041031c1c1567c8d (diff) | |
download | otp-a157ffce8e25012512e0e5d88e05fb135792f161.tar.gz otp-a157ffce8e25012512e0e5d88e05fb135792f161.tar.bz2 otp-a157ffce8e25012512e0e5d88e05fb135792f161.zip |
Merge branch 'lukas/ose/master/OTP-11334'
* lukas/ose/master/OTP-11334: (71 commits)
erts: Fix unix efile assert
ose: Use -O2 when building
ose: Expand OSE docs
ose: Add dummy ttsl driver
ose: Cleanup cleanup of mutex selection defines
ose: Polish mmap configure checks
ose: Add ose specific x-compile flags
ose: Updating fd_driver and spawn_driver for OSE
ose: Updating event and signal API for OSE
ose: Cleanup of mutex selection defines
win32: Compile erl_log.exe
ose: Remove uneccesary define
ose: Fix ssl configure test for osx
erts: Fix sys_msg_dispatcher assert
ose: Fix broken doc links
ose: Thread priorities configurable from lmconf
ose: Yielding the cpu is done "the OSE" way
ose: Start using ppdata for tse key
ose: Do not use spinlocks on OSE
ose: Fix support for crypto
...
Conflicts:
lib/crypto/c_src/crypto.c
Diffstat (limited to 'erts/emulator/beam/erl_process.c')
-rw-r--r-- | erts/emulator/beam/erl_process.c | 106 |
1 files changed, 86 insertions, 20 deletions
diff --git a/erts/emulator/beam/erl_process.c b/erts/emulator/beam/erl_process.c index 1d0275c524..37e1d07107 100644 --- a/erts/emulator/beam/erl_process.c +++ b/erts/emulator/beam/erl_process.c @@ -44,6 +44,7 @@ #include "dtrace-wrapper.h" #include "erl_ptab.h" + #define ERTS_DELAYED_WAKEUP_INFINITY (~(Uint64) 0) #define ERTS_DELAYED_WAKEUP_REDUCTIONS ((Uint64) CONTEXT_REDS/2) @@ -53,7 +54,11 @@ #define ERTS_PROC_MIN_CONTEXT_SWITCH_REDS_COST (CONTEXT_REDS/10) +#ifndef ERTS_SCHED_MIN_SPIN #define ERTS_SCHED_SPIN_UNTIL_YIELD 100 +#else +#define ERTS_SCHED_SPIN_UNTIL_YIELD 1 +#endif #define ERTS_SCHED_SYS_SLEEP_SPINCOUNT_VERY_LONG 40 #define ERTS_SCHED_AUX_WORK_SLEEP_SPINCOUNT_FACT_VERY_LONG 1000 @@ -508,7 +513,7 @@ dbg_chk_aux_work_val(erts_aint32_t value) #ifdef ERTS_SMP static void handle_pending_exiters(ErtsProcList *); - +static void wake_scheduler(ErtsRunQueue *rq); #endif #if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_CHECK) @@ -548,7 +553,7 @@ void erts_pre_init_process(void) { #ifdef USE_THREADS - erts_tsd_key_create(&sched_data_key); + erts_tsd_key_create(&sched_data_key, "erts_sched_data_key"); #endif #ifdef ERTS_ENABLE_LOCK_CHECK @@ -2375,14 +2380,24 @@ try_set_sys_scheduling(void) #endif static ERTS_INLINE int -prepare_for_sys_schedule(void) +prepare_for_sys_schedule(ErtsSchedulerData *esdp) { #ifdef ERTS_SMP while (!erts_port_task_have_outstanding_io_tasks() && try_set_sys_scheduling()) { - if (!erts_port_task_have_outstanding_io_tasks()) - return 1; +#ifdef ERTS_SCHED_ONLY_POLL_SCHED_1 + if (esdp->no != 1) { + /* If we are not scheduler 1 and ERTS_SCHED_ONLY_POLL_SCHED_1 is used + then we make sure to wake scheduler 1 */ + ErtsRunQueue *rq = ERTS_RUNQ_IX(0); clear_sys_scheduling(); + wake_scheduler(rq); + return 0; + } +#endif + if (!erts_port_task_have_outstanding_io_tasks()) + return 1; + clear_sys_scheduling(); } return 0; #else @@ -2688,6 +2703,8 @@ aux_thread(void *unused) erts_thr_progress_active(NULL, thr_prgr_active = 0); erts_thr_progress_prepare_wait(NULL); + ERTS_SCHED_FAIR_YIELD(); + flgs = sched_spin_wait(ssi, 0); if (flgs & ERTS_SSI_FLG_SLEEPING) { @@ -2755,7 +2772,7 @@ scheduler_wait(int *fcalls, ErtsSchedulerData *esdp, ErtsRunQueue *rq) * be waiting in erl_sys_schedule() */ - if (ERTS_SCHEDULER_IS_DIRTY(esdp) || !prepare_for_sys_schedule()) { + if (ERTS_SCHEDULER_IS_DIRTY(esdp) || !prepare_for_sys_schedule(esdp)) { sched_waiting(esdp->no, rq); @@ -2793,6 +2810,8 @@ scheduler_wait(int *fcalls, ErtsSchedulerData *esdp, ErtsRunQueue *rq) erts_thr_progress_prepare_wait(esdp); } + ERTS_SCHED_FAIR_YIELD(); + flgs = sched_spin_wait(ssi, spincount); if (flgs & ERTS_SSI_FLG_SLEEPING) { ASSERT(flgs & ERTS_SSI_FLG_WAITING); @@ -2848,6 +2867,10 @@ scheduler_wait(int *fcalls, ErtsSchedulerData *esdp, ErtsRunQueue *rq) #ifdef ERTS_DIRTY_SCHEDULERS ASSERT(!ERTS_SCHEDULER_IS_DIRTY(esdp)); #endif + +#ifdef ERTS_SCHED_ONLY_POLL_SCHED_1 + ASSERT(esdp->no == 1); +#endif sched_waiting_sys(esdp->no, rq); @@ -2868,7 +2891,6 @@ scheduler_wait(int *fcalls, ErtsSchedulerData *esdp, ErtsRunQueue *rq) sched_wall_time_change(esdp, working = 0); ASSERT(!erts_port_task_have_outstanding_io_tasks()); - erl_sys_schedule(1); /* Might give us something to do */ dt = erts_do_time_read_and_reset(); @@ -2914,7 +2936,7 @@ scheduler_wait(int *fcalls, ErtsSchedulerData *esdp, ErtsRunQueue *rq) * Got to check that we still got I/O tasks; otherwise * we have to continue checking for I/O... */ - if (!prepare_for_sys_schedule()) { + if (!prepare_for_sys_schedule(esdp)) { spincount *= ERTS_SCHED_TSE_SLEEP_SPINCOUNT_FACT; goto tse_wait; } @@ -2936,7 +2958,7 @@ scheduler_wait(int *fcalls, ErtsSchedulerData *esdp, ErtsRunQueue *rq) * Got to check that we still got I/O tasks; otherwise * we have to wait in erl_sys_schedule() after all... */ - if (!prepare_for_sys_schedule()) { + if (!prepare_for_sys_schedule(esdp)) { /* * Not allowed to wait in erl_sys_schedule; * do tse wait instead... @@ -5044,11 +5066,17 @@ erts_early_init_scheduling(int no_schedulers) wakeup_other.threshold = ERTS_SCHED_WAKEUP_OTHER_THRESHOLD_MEDIUM; wakeup_other.type = ERTS_SCHED_WAKEUP_OTHER_TYPE_DEFAULT; #endif +#ifndef ERTS_SCHED_MIN_SPIN sched_busy_wait.sys_schedule = ERTS_SCHED_SYS_SLEEP_SPINCOUNT_MEDIUM; sched_busy_wait.tse = (ERTS_SCHED_SYS_SLEEP_SPINCOUNT_MEDIUM * ERTS_SCHED_TSE_SLEEP_SPINCOUNT_FACT); sched_busy_wait.aux_work = (ERTS_SCHED_SYS_SLEEP_SPINCOUNT_MEDIUM * ERTS_SCHED_AUX_WORK_SLEEP_SPINCOUNT_FACT_MEDIUM); +#else + sched_busy_wait.sys_schedule = ERTS_SCHED_SYS_SLEEP_SPINCOUNT_NONE; + sched_busy_wait.tse = ERTS_SCHED_SYS_SLEEP_SPINCOUNT_NONE; + sched_busy_wait.aux_work = ERTS_SCHED_SYS_SLEEP_SPINCOUNT_NONE; +#endif } int @@ -7774,16 +7802,23 @@ void erts_start_schedulers(void) { int res = 0; - Uint actual = 0; + Uint actual; Uint wanted = erts_no_schedulers; Uint wanted_no_schedulers = erts_no_schedulers; ethr_thr_opts opts = ETHR_THR_OPTS_DEFAULT_INITER; opts.detached = 1; +#ifdef ETHR_HAVE_THREAD_NAMES + opts.name = malloc(80); +#endif + #ifdef ERTS_SMP if (erts_runq_supervision_interval) { opts.suggested_stack_size = 16; +#ifdef ETHR_HAVE_THREAD_NAMES + sprintf(opts.name, "runq_supervisor"); +#endif erts_atomic_init_nob(&runq_supervisor_sleeping, 0); if (0 != ethr_event_init(&runq_supervision_event)) erl_exit(1, "Failed to create run-queue supervision event\n"); @@ -7805,17 +7840,27 @@ erts_start_schedulers(void) res = ENOTSUP; } - while (actual < wanted) { + for (actual = 0; actual < wanted; actual++) { ErtsSchedulerData *esdp = ERTS_SCHEDULER_IX(actual); - actual++; - ASSERT(actual == esdp->no); - res = ethr_thr_create(&esdp->tid,sched_thread_func,(void*)esdp,&opts); + + ASSERT(actual == esdp->no - 1); + +#ifdef ETHR_HAVE_THREAD_NAMES + sprintf(opts.name, "scheduler_%d", actual + 1); +#endif + +#ifdef __OSE__ + /* This should be done in the bind strategy */ + opts.coreNo = (actual+1) % ose_num_cpus(); +#endif + + res = ethr_thr_create(&esdp->tid, sched_thread_func, (void*)esdp, &opts); + if (res != 0) { - actual--; - break; + break; } } - + erts_no_schedulers = actual; #ifdef ERTS_DIRTY_SCHEDULERS @@ -7824,12 +7869,18 @@ erts_start_schedulers(void) int ix; for (ix = 0; ix < erts_no_dirty_cpu_schedulers; ix++) { ErtsSchedulerData *esdp = ERTS_DIRTY_CPU_SCHEDULER_IX(ix); +#ifdef ETHR_HAVE_THREAD_NAMES + sprintf(opts.name,"dirty_cpu_scheduler_%d", ix + 1); +#endif res = ethr_thr_create(&esdp->tid,sched_dirty_cpu_thread_func,(void*)esdp,&opts); if (res != 0) erl_exit(1, "Failed to create dirty cpu scheduler thread %d\n", ix); } for (ix = 0; ix < erts_no_dirty_io_schedulers; ix++) { ErtsSchedulerData *esdp = ERTS_DIRTY_IO_SCHEDULER_IX(ix); +#ifdef ETHR_HAVE_THREAD_NAMES + sprintf(opts.name,"dirty_io_scheduler_%d", ix + 1); +#endif res = ethr_thr_create(&esdp->tid,sched_dirty_io_thread_func,(void*)esdp,&opts); if (res != 0) erl_exit(1, "Failed to create dirty io scheduler thread %d\n", ix); @@ -7840,6 +7891,14 @@ erts_start_schedulers(void) ERTS_THR_MEMORY_BARRIER; +#ifdef ETHR_HAVE_THREAD_NAMES + sprintf(opts.name, "aux"); +#endif + +#ifdef __OSE__ + opts.coreNo = 0; +#endif /* __OSE__ */ + res = ethr_thr_create(&aux_tid, aux_thread, NULL, &opts); if (res != 0) erl_exit(1, "Failed to create aux thread\n"); @@ -7859,6 +7918,10 @@ erts_start_schedulers(void) actual, actual == 1 ? " was" : "s were"); erts_send_error_to_logger_nogl(dsbufp); } + +#ifdef ETHR_HAVE_THREAD_NAMES + free(opts.name); +#endif } #endif /* ERTS_SMP */ @@ -8996,10 +9059,12 @@ Process *schedule(Process *p, int calls) int leader_update = ERTS_SCHEDULER_IS_DIRTY(esdp) ? 0 : erts_thr_progress_update(esdp); aux_work = erts_atomic32_read_acqb(&esdp->ssi->aux_work); - if (aux_work | leader_update) { + if (aux_work | leader_update | ERTS_SCHED_FAIR) { erts_smp_runq_unlock(rq); if (leader_update) erts_thr_progress_leader_update(esdp); + else if (ERTS_SCHED_FAIR) + ERTS_SCHED_FAIR_YIELD(); if (aux_work) handle_aux_work(&esdp->aux_work_data, aux_work, 0); erts_smp_runq_lock(rq); @@ -9074,7 +9139,8 @@ Process *schedule(Process *p, int calls) goto check_activities_to_run; } else if (!ERTS_SCHEDULER_IS_DIRTY(esdp) && - (fcalls > input_reductions && prepare_for_sys_schedule())) { + (fcalls > input_reductions && + prepare_for_sys_schedule(esdp))) { /* * Schedule system-level activities. */ @@ -10384,7 +10450,7 @@ erl_create_process(Process* parent, /* Parent of process (default group leader). * Check for errors. */ - if (is_not_atom(mod) || is_not_atom(func) || ((arity = list_length(args)) < 0)) { + if (is_not_atom(mod) || is_not_atom(func) || ((arity = erts_list_length(args)) < 0)) { so->error_code = BADARG; goto error; } |