diff options
author | Rickard Green <[email protected]> | 2010-06-03 12:43:57 +0000 |
---|---|---|
committer | Erlang/OTP <[email protected]> | 2010-06-03 12:43:57 +0000 |
commit | 97fa9fe68af5990a8ef7a71ccb7fc235d20a2c04 (patch) | |
tree | 8fbbe0bb2d4e1357b5dbf5af7e9efa2c401408d3 /erts/emulator/beam/erl_process.c | |
parent | 8335159b919cc330e1c529464b6bbf89edbbe0a0 (diff) | |
download | otp-97fa9fe68af5990a8ef7a71ccb7fc235d20a2c04.tar.gz otp-97fa9fe68af5990a8ef7a71ccb7fc235d20a2c04.tar.bz2 otp-97fa9fe68af5990a8ef7a71ccb7fc235d20a2c04.zip |
OTP-8666 Enable automatic binding of schedulers when possible
The runtime system will by default bind schedulers to logical processors
using the default_bind bind type if the amount of schedulers are at least
equal to the amount of logical processors configured, binding of schedulers
is supported, and a CPU topology is available at startup.
NOTE: If the Erlang runtime system is the only operating system process
that binds threads to logical processors, this improves the performance of
the runtime system. However, if other operating system processes (as for
example another Erlang runtime system) also bind threads to logical
processors, there might be a performance penalty instead. If this is the
case you, are are advised to unbind the schedulers using the <seealso
marker="erl#+sbt">+sbtu</seealso> command line argument, or by invoking
<seealso
marker="erlang#system_flag_scheduler_bind_type">erlang:system_flag(schedule
r_bind_type, unbound)</seealso>.
Diffstat (limited to 'erts/emulator/beam/erl_process.c')
-rw-r--r-- | erts/emulator/beam/erl_process.c | 38 |
1 files changed, 28 insertions, 10 deletions
diff --git a/erts/emulator/beam/erl_process.c b/erts/emulator/beam/erl_process.c index e329bfd2d1..c61fab5d36 100644 --- a/erts/emulator/beam/erl_process.c +++ b/erts/emulator/beam/erl_process.c @@ -180,6 +180,7 @@ static ErtsCpuBindData *scheduler2cpu_map; erts_smp_rwmtx_t erts_cpu_bind_rwmtx; typedef enum { + ERTS_CPU_BIND_UNDEFINED, ERTS_CPU_BIND_SPREAD, ERTS_CPU_BIND_PROCESSOR_SPREAD, ERTS_CPU_BIND_THREAD_SPREAD, @@ -190,6 +191,9 @@ typedef enum { ERTS_CPU_BIND_NONE } ErtsCpuBindOrder; +#define ERTS_CPU_BIND_DEFAULT_BIND \ + ERTS_CPU_BIND_THREAD_NO_NODE_PROCESSOR_SPREAD + ErtsCpuBindOrder cpu_bind_order; static erts_cpu_topology_t *user_cpudata; @@ -3503,14 +3507,15 @@ erts_init_scheduler_bind_type(char *how) if (!system_cpudata && !user_cpudata) return ERTS_INIT_SCHED_BIND_TYPE_ERROR_NO_CPU_TOPOLOGY; - if (sys_strcmp(how, "s") == 0) + if (sys_strcmp(how, "db") == 0) + cpu_bind_order = ERTS_CPU_BIND_DEFAULT_BIND; + else if (sys_strcmp(how, "s") == 0) cpu_bind_order = ERTS_CPU_BIND_SPREAD; else if (sys_strcmp(how, "ps") == 0) cpu_bind_order = ERTS_CPU_BIND_PROCESSOR_SPREAD; else if (sys_strcmp(how, "ts") == 0) cpu_bind_order = ERTS_CPU_BIND_THREAD_SPREAD; - else if (sys_strcmp(how, "db") == 0 - || sys_strcmp(how, "tnnps") == 0) + else if (sys_strcmp(how, "tnnps") == 0) cpu_bind_order = ERTS_CPU_BIND_THREAD_NO_NODE_PROCESSOR_SPREAD; else if (sys_strcmp(how, "nnps") == 0) cpu_bind_order = ERTS_CPU_BIND_NO_NODE_PROCESSOR_SPREAD; @@ -4153,14 +4158,15 @@ erts_bind_schedulers(Process *c_p, Eterm how) old_cpu_bind_order = cpu_bind_order; - if (ERTS_IS_ATOM_STR("spread", how)) + if (ERTS_IS_ATOM_STR("default_bind", how)) + cpu_bind_order = ERTS_CPU_BIND_DEFAULT_BIND; + else if (ERTS_IS_ATOM_STR("spread", how)) cpu_bind_order = ERTS_CPU_BIND_SPREAD; else if (ERTS_IS_ATOM_STR("processor_spread", how)) cpu_bind_order = ERTS_CPU_BIND_PROCESSOR_SPREAD; else if (ERTS_IS_ATOM_STR("thread_spread", how)) cpu_bind_order = ERTS_CPU_BIND_THREAD_SPREAD; - else if (ERTS_IS_ATOM_STR("default_bind", how) - || ERTS_IS_ATOM_STR("thread_no_node_processor_spread", how)) + else if (ERTS_IS_ATOM_STR("thread_no_node_processor_spread", how)) cpu_bind_order = ERTS_CPU_BIND_THREAD_NO_NODE_PROCESSOR_SPREAD; else if (ERTS_IS_ATOM_STR("no_node_processor_spread", how)) cpu_bind_order = ERTS_CPU_BIND_NO_NODE_PROCESSOR_SPREAD; @@ -4206,14 +4212,15 @@ erts_fake_scheduler_bindings(Process *p, Eterm how) int cpudata_size; Eterm res; - if (ERTS_IS_ATOM_STR("spread", how)) + if (ERTS_IS_ATOM_STR("default_bind", how)) + fake_cpu_bind_order = ERTS_CPU_BIND_DEFAULT_BIND; + else if (ERTS_IS_ATOM_STR("spread", how)) fake_cpu_bind_order = ERTS_CPU_BIND_SPREAD; else if (ERTS_IS_ATOM_STR("processor_spread", how)) fake_cpu_bind_order = ERTS_CPU_BIND_PROCESSOR_SPREAD; else if (ERTS_IS_ATOM_STR("thread_spread", how)) fake_cpu_bind_order = ERTS_CPU_BIND_THREAD_SPREAD; - else if (ERTS_IS_ATOM_STR("default_bind", how) - || ERTS_IS_ATOM_STR("thread_no_node_processor_spread", how)) + else if (ERTS_IS_ATOM_STR("thread_no_node_processor_spread", how)) fake_cpu_bind_order = ERTS_CPU_BIND_THREAD_NO_NODE_PROCESSOR_SPREAD; else if (ERTS_IS_ATOM_STR("no_node_processor_spread", how)) fake_cpu_bind_order = ERTS_CPU_BIND_NO_NODE_PROCESSOR_SPREAD; @@ -4438,7 +4445,7 @@ early_cpu_bind_init(void) (sizeof(erts_cpu_topology_t) * system_cpudata_size)); - cpu_bind_order = ERTS_CPU_BIND_NONE; + cpu_bind_order = ERTS_CPU_BIND_UNDEFINED; if (!erts_get_cpu_topology(erts_cpuinfo, system_cpudata) || ERTS_INIT_CPU_TOPOLOGY_OK != verify_topology(system_cpudata, @@ -4464,6 +4471,17 @@ late_cpu_bind_init(void) scheduler2cpu_map[ix].bound_id = -1; } + if (cpu_bind_order == ERTS_CPU_BIND_UNDEFINED) { + int ncpus = erts_get_cpu_configured(erts_cpuinfo); + if (ncpus < 1 || erts_no_schedulers < ncpus) + cpu_bind_order = ERTS_CPU_BIND_NONE; + else + cpu_bind_order = ((system_cpudata || user_cpudata) + && (erts_bind_to_cpu(erts_cpuinfo, -1) != -ENOTSUP) + ? ERTS_CPU_BIND_DEFAULT_BIND + : ERTS_CPU_BIND_NONE); + } + if (cpu_bind_order != ERTS_CPU_BIND_NONE) { erts_cpu_topology_t *cpudata; int cpudata_size; |