aboutsummaryrefslogtreecommitdiffstats
path: root/erts
diff options
context:
space:
mode:
Diffstat (limited to 'erts')
-rw-r--r--erts/doc/src/erl.xml105
-rw-r--r--erts/doc/src/erlang.xml150
-rw-r--r--erts/emulator/beam/bif.c16
-rw-r--r--erts/emulator/beam/erl_bif_info.c3
-rw-r--r--erts/emulator/beam/erl_cpu_topology.c11
-rw-r--r--erts/emulator/beam/erl_init.c9
-rw-r--r--erts/emulator/beam/erl_lock_check.c1
-rw-r--r--erts/emulator/beam/erl_port_task.c2
-rw-r--r--erts/emulator/beam/erl_process.c272
-rw-r--r--erts/emulator/beam/erl_process.h29
-rw-r--r--erts/emulator/beam/global.h2
-rw-r--r--erts/preloaded/ebin/erlang.beambin39568 -> 41124 bytes
-rw-r--r--erts/preloaded/src/erlang.erl2
13 files changed, 233 insertions, 369 deletions
diff --git a/erts/doc/src/erl.xml b/erts/doc/src/erl.xml
index 53f46d2a2e..9fb718e60f 100644
--- a/erts/doc/src/erl.xml
+++ b/erts/doc/src/erl.xml
@@ -665,41 +665,66 @@
</p>
<taglist>
<tag><c>u</c></tag>
- <item><p>Same as
- <seealso marker="erlang#system_flag_scheduler_bind_type">erlang:system_flag(scheduler_bind_type, unbound)</seealso>.
- </p></item>
+ <item>
+ <p><c>unbound</c> - Schedulers will not be bound to logical
+ processors, i.e., the operating system decides where the
+ scheduler threads execute, and when to migrate them. This is
+ the default.</p>
+ </item>
<tag><c>ns</c></tag>
- <item><p>Same as
- <seealso marker="erlang#system_flag_scheduler_bind_type">erlang:system_flag(scheduler_bind_type, no_spread)</seealso>.
- </p></item>
+ <item>
+ <p><c>no_spread</c> - Schedulers with close scheduler
+ identifiers will be bound as close as possible in hardware.</p>
+ </item>
<tag><c>ts</c></tag>
- <item><p>Same as
- <seealso marker="erlang#system_flag_scheduler_bind_type">erlang:system_flag(scheduler_bind_type, thread_spread)</seealso>.
- </p></item>
+ <item>
+ <p><c>thread_spread</c> - Thread refers to hardware threads
+ (e.g. Intels hyper-threads). Schedulers with low scheduler
+ identifiers, will be bound to the first hardware thread of
+ each core, then schedulers with higher scheduler identifiers
+ will be bound to the second hardware thread of each core,
+ etc.</p>
+ </item>
<tag><c>ps</c></tag>
- <item><p>Same as
- <seealso marker="erlang#system_flag_scheduler_bind_type">erlang:system_flag(scheduler_bind_type, processor_spread)</seealso>.
- </p></item>
+ <item>
+ <p><c>processor_spread</c> - Schedulers will be spread like
+ <c>thread_spread</c>, but also over physical processor chips.</p>
+ </item>
<tag><c>s</c></tag>
- <item><p>Same as
- <seealso marker="erlang#system_flag_scheduler_bind_type">erlang:system_flag(scheduler_bind_type, spread)</seealso>.
- </p></item>
+ <item>
+ <p><c>spread</c> - Schedulers will be spread as much as
+ possible.</p>
+ </item>
<tag><c>nnts</c></tag>
- <item><p>Same as
- <seealso marker="erlang#system_flag_scheduler_bind_type">erlang:system_flag(scheduler_bind_type, no_node_thread_spread)</seealso>.
- </p></item>
+ <item>
+ <p><c>no_node_thread_spread</c> - Like <c>thread_spread</c>,
+ but if multiple NUMA (Non-Uniform Memory Access) nodes exists,
+ schedulers will be spread over one NUMA node at a time,
+ i.e., all logical processors of one NUMA node will be bound
+ to schedulers in sequence.</p>
+ </item>
<tag><c>nnps</c></tag>
- <item><p>Same as
- <seealso marker="erlang#system_flag_scheduler_bind_type">erlang:system_flag(scheduler_bind_type, no_node_processor_spread)</seealso>.
- </p></item>
+ <item>
+ <p><c>no_node_processor_spread</c> - Like
+ <c>processor_spread</c>, but if multiple NUMA nodes exists,
+ schedulers will be spread over one NUMA node at a time, i.e.,
+ all logical processors of one NUMA node will be bound to
+ schedulers in sequence.</p>
+ </item>
<tag><c>tnnps</c></tag>
- <item><p>Same as
- <seealso marker="erlang#system_flag_scheduler_bind_type">erlang:system_flag(scheduler_bind_type, thread_no_node_processor_spread)</seealso>.
- </p></item>
+ <item>
+ <p><c>thread_no_node_processor_spread</c> - A combination of
+ <c>thread_spread</c>, and <c>no_node_processor_spread</c>.
+ Schedulers will be spread over hardware threads across NUMA
+ nodes, but schedulers will only be spread over processors
+ internally in one NUMA node at a time.</p>
+ </item>
<tag><c>db</c></tag>
- <item><p>Same as
- <seealso marker="erlang#system_flag_scheduler_bind_type">erlang:system_flag(scheduler_bind_type, default_bind)</seealso>.
- </p></item>
+ <item>
+ <p><c>default_bind</c> - Binds schedulers the default way.
+ Currently the default is <c>thread_no_node_processor_spread</c>
+ (which might change in the future).</p>
+ </item>
</taglist>
<p>Binding of schedulers is currently only supported on newer
Linux, Solaris, FreeBSD, and Windows systems.</p>
@@ -723,8 +748,22 @@
instead. In some cases this performance penalty might be
severe. If this is the case, you are advised to not
bind the schedulers.</p>
- <p>For more information, see
- <seealso marker="erlang#system_flag_scheduler_bind_type">erlang:system_flag(scheduler_bind_type, SchedulerBindType)</seealso>.
+ <p>How schedulers are bound matters. For example, in
+ situations when there are fewer running processes than
+ schedulers online, the runtime system tries to migrate
+ processes to schedulers with low scheduler identifiers.
+ The more the schedulers are spread over the hardware,
+ the more resources will be available to the runtime
+ system in such situations.
+ </p>
+ <p>
+ <em>NOTE:</em> If a scheduler fails to bind, this
+ will often be silently ignored. This since it isn't always
+ possible to verify valid logical processor identifiers. If
+ an error is reported, it will be reported to the
+ <c>error_logger</c>. If you want to verify that the
+ schedulers actually have bound as requested, call
+ <seealso marker="erlang#system_info_scheduler_bindings">erlang:system_info(scheduler_bindings)</seealso>.
</p>
</item>
<tag><marker id="+scl"><c>+scl true|false</c></marker></tag>
@@ -755,6 +794,12 @@
<item><c><![CDATA[<IdDefs> = <LogicalIds><ThreadIds><CoreIds><ProcessorIds><NodeIds> | <LogicalIds><ThreadIds><CoreIds><NodeIds><ProcessorIds>]]></c></item>
<item><c><![CDATA[CpuTopology = <IdDefs>:<IdDefs> | <IdDefs>]]></c></item>
</list>
+ <p>Set a user defined CPU topolgy. The user defined
+ CPU topology will override any automatically detected
+ CPU topology. The CPU topology is used when
+ <seealso marker="#+sbt">binding schedulers to logical
+ processors</seealso>.
+ </p>
<p>Upper-case letters signify real identifiers and lower-case
letters signify fake identifiers only used for description
of the topology. Identifiers passed as real identifiers may
@@ -854,7 +899,7 @@
how the real CPU topology looks like is likely to
decrease the performance of the runtime system.</p>
<p>For more information, see
- <seealso marker="erlang#system_flag_cpu_topology">erlang:system_flag(cpu_topology, CpuTopology)</seealso>.</p>
+ <seealso marker="erlang#system_info_cpu_topology">erlang:system_info(cpu_topology)</seealso>.</p>
</item>
<tag><marker id="+swt"><c>+swt very_low|low|medium|high|very_high</c></marker></tag>
<item>
diff --git a/erts/doc/src/erlang.xml b/erts/doc/src/erlang.xml
index d685728862..42a4e6a999 100644
--- a/erts/doc/src/erlang.xml
+++ b/erts/doc/src/erlang.xml
@@ -5069,6 +5069,14 @@ true</pre>
<v>Flag, Value, OldValue -- see below</v>
</type>
<desc>
+ <warning>
+ <p>The
+ <seealso marker="#system_flag_cpu_topology">cpu_topology</seealso>,
+ and
+ <seealso marker="#system_flag_scheduler_bind_type">scheduler_bind_type</seealso>
+ <c>Flag</c>s are <em>deprecated</em> and have been scheduled for
+ removal in erts-5.10/OTP-R16.</p>
+ </warning>
<p>Sets various system properties of the Erlang node. Returns
the old value of the flag.</p>
<taglist>
@@ -5079,6 +5087,12 @@ true</pre>
</item>
<tag><marker id="system_flag_cpu_topology"><c>erlang:system_flag(cpu_topology, CpuTopology)</c></marker></tag>
<item>
+ <p><em>NOTE:</em> This argument is <em>deprecated</em> and
+ scheduled for removal in erts-5.10/OTP-R16. Instead of using
+ this argument you are advised to use the <c>erl</c> command
+ line argument <seealso marker="erts:erl#+sct">+sct</seealso>.
+ When this argument has been removed a final CPU topology to use
+ will be determined at emulator boot time.</p>
<p>Sets the user defined <c>CpuTopology</c>. The user defined
CPU topology will override any automatically detected
CPU topology. By passing <c>undefined</c> as <c>CpuTopology</c>
@@ -5093,15 +5107,15 @@ true</pre>
to rebind according to the new CPU topology.
</p>
<p>The user defined CPU topology can also be set by passing
- the <seealso marker="erl#+sct">+sct</seealso> command
+ the <seealso marker="erts:erl#+sct">+sct</seealso> command
line argument to <c>erl</c>.
</p>
<p>For information on the <c>CpuTopology</c> type
and more, see the documentation of
<seealso marker="#system_info_cpu_topology">erlang:system_info(cpu_topology)</seealso>,
- the <c>erl</c> <seealso marker="erl#+sct">+sct</seealso>
- emulator flag, and
- <seealso marker="#system_flag_scheduler_bind_type">erlang:system_flag(scheduler_bind_type, How)</seealso>.
+ and the <c>erl</c> <seealso marker="erts:erl#+sct">+sct</seealso>
+ and <seealso marker="erts:erl#+sbt">+sbt</seealso>
+ command line flags.
</p>
</item>
<tag><c>erlang:system_flag(fullsweep_after, Number)</c></tag>
@@ -5177,6 +5191,12 @@ true</pre>
</item>
<tag><marker id="system_flag_scheduler_bind_type"><c>erlang:system_flag(scheduler_bind_type, How)</c></marker></tag>
<item>
+ <p><em>NOTE:</em> This argument is <em>deprecated</em> and
+ scheduled for removal in erts-5.10/OTP-R16. Instead of using
+ this argument you are advised to use the <c>erl</c> command
+ line argument <seealso marker="erts:erl#+sbt">+sbt</seealso>.
+ When this argument has been removed a final scheduler bind type
+ to use will be determined at emulator boot time.</p>
<p>Controls if and how schedulers are bound to logical
processors.</p>
<p>When <c>erlang:system_flag(scheduler_bind_type, How)</c> is
@@ -5198,7 +5218,8 @@ true</pre>
the CPU topology needs to be known. If the runtime system fails
to automatically detect the CPU topology, it can be defined.
For more information on how to define the CPU topology, see
- <seealso marker="#system_flag_cpu_topology">erlang:system_flag(cpu_topology, CpuTopology)</seealso>.
+ the <c>erl</c> <seealso marker="erts:erl#+sct">+sct</seealso> command
+ line flag.
</p>
<p>The runtime system will by default <em>not</em> bind schedulers
to logical processors.
@@ -5216,71 +5237,42 @@ true</pre>
currently be one of:</p>
<taglist>
<tag><c>unbound</c></tag>
- <item>
- <p>Schedulers will not be bound to logical processors, i.e.,
- the operating system decides where the scheduler threads
- execute, and when to migrate them. This is the default.</p>
- </item>
+ <item><p>Same as the <c>erl</c> command line argument
+ <seealso marker="erts:erl#+sbt">+sbt u</seealso>.
+ </p></item>
<tag><c>no_spread</c></tag>
- <item>
- <p>Schedulers with close scheduler identifiers will be bound
- as close as possible in hardware.</p>
- </item>
+ <item><p>Same as the <c>erl</c> command line argument
+ <seealso marker="erts:erl#+sbt">+sbt ns</seealso>.
+ </p></item>
<tag><c>thread_spread</c></tag>
- <item>
- <p>Thread refers to hardware threads (e.g. Intels
- hyper-threads). Schedulers with low scheduler identifiers,
- will be bound to the first hardware thread of each core,
- then schedulers with higher scheduler identifiers will be
- bound to the second hardware thread of each core, etc.</p>
- </item>
+ <item><p>Same as the <c>erl</c> command line argument
+ <seealso marker="erts:erl#+sbt">+sbt ts</seealso>.
+ </p></item>
<tag><c>processor_spread</c></tag>
- <item>
- <p>Schedulers will be spread like <c>thread_spread</c>, but
- also over physical processor chips.</p>
- </item>
+ <item><p>Same as the <c>erl</c> command line argument
+ <seealso marker="erts:erl#+sbt">+sbt ps</seealso>.
+ </p></item>
<tag><c>spread</c></tag>
- <item>
- <p>Schedulers will be spread as much as possible.</p>
- </item>
+ <item><p>Same as the <c>erl</c> command line argument
+ <seealso marker="erts:erl#+sbt">+sbt s</seealso>.
+ </p></item>
<tag><c>no_node_thread_spread</c></tag>
- <item>
- <p>Like <c>thread_spread</c>, but if multiple NUMA
- (Non-Uniform Memory Access) nodes exists,
- schedulers will be spread over one NUMA node at a time,
- i.e., all logical processors of one NUMA node will
- be bound to schedulers in sequence.</p>
- </item>
+ <item><p>Same as the <c>erl</c> command line argument
+ <seealso marker="erts:erl#+sbt">+sbt nnts</seealso>.
+ </p></item>
<tag><c>no_node_processor_spread</c></tag>
- <item>
- <p>Like <c>processor_spread</c>, but if multiple NUMA
- nodes exists, schedulers will be spread over one
- NUMA node at a time, i.e., all logical processors of
- one NUMA node will be bound to schedulers in sequence.</p>
- </item>
+ <item><p>Same as the <c>erl</c> command line argument
+ <seealso marker="erts:erl#+sbt">+sbt nnps</seealso>.
+ </p></item>
<tag><c>thread_no_node_processor_spread</c></tag>
- <item>
- <p>A combination of <c>thread_spread</c>, and
- <c>no_node_processor_spread</c>. Schedulers will be
- spread over hardware threads across NUMA nodes, but
- schedulers will only be spread over processors internally
- in one NUMA node at a time.</p>
- </item>
+ <item><p>Same as the <c>erl</c> command line argument
+ <seealso marker="erts:erl#+sbt">+sbt tnnps</seealso>.
+ </p></item>
<tag><c>default_bind</c></tag>
- <item>
- <p>Binds schedulers the default way. Currently the default
- is <c>thread_no_node_processor_spread</c> (which might change
- in the future).</p>
- </item>
+ <item><p>Same as the <c>erl</c> command line argument
+ <seealso marker="erts:erl#+sbt">+sbt db</seealso>.
+ </p></item>
</taglist>
- <p>How schedulers are bound matters. For example, in
- situations when there are fewer running processes than
- schedulers online, the runtime system tries to migrate
- processes to schedulers with low scheduler identifiers.
- The more the schedulers are spread over the hardware,
- the more resources will be available to the runtime
- system in such situations.
- </p>
<p>The value returned equals <c>How</c> before the
<c>scheduler_bind_type</c> flag was changed.</p>
<p>Failure:</p>
@@ -5299,15 +5291,15 @@ true</pre>
</item>
</taglist>
<p>The scheduler bind type can also be set by passing
- the <seealso marker="erl#+sbt">+sbt</seealso> command
+ the <seealso marker="erts:erl#+sbt">+sbt</seealso> command
line argument to <c>erl</c>.
</p>
<p>For more information, see
<seealso marker="#system_info_scheduler_bind_type">erlang:system_info(scheduler_bind_type)</seealso>,
<seealso marker="#system_info_scheduler_bindings">erlang:system_info(scheduler_bindings)</seealso>,
- the <c>erl</c> <seealso marker="erl#+sbt">+sbt</seealso>
- emulator flag, and
- <seealso marker="#system_flag_cpu_topology">erlang:system_flag(cpu_topology, CpuTopology)</seealso>.
+ the <c>erl</c> <seealso marker="erts:erl#+sbt">+sbt</seealso>
+ and <seealso marker="erts:erl#+sct">+sct</seealso> command line
+ flags.
</p>
</item>
<tag><marker id="system_flag_schedulers_online"><c>erlang:system_flag(schedulers_online, SchedulersOnline)</c></marker></tag>
@@ -5508,10 +5500,12 @@ true</pre>
<item>
<p>Returns the <c>CpuTopology</c> which currently is used by the
emulator. The CPU topology is used when binding schedulers
- to logical processors. The CPU topology used is the user defined
- CPU topology if such exist; otherwise, the automatically
- detected CPU topology if such exist. If no CPU topology
- exist <c>undefined</c> is returned.</p>
+ to logical processors. The CPU topology used is the
+ <seealso marker="erlang#system_info_cpu_topology_defined">user
+ defined CPU topology</seealso> if such exists; otherwise, the
+ <seealso marker="erlang#system_info_cpu_topology_detected">automatically
+ detected CPU topology</seealso> if such exists. If no CPU topology
+ exists, <c>undefined</c> is returned.</p>
<p>Types:</p>
<list type="bulleted">
<item><c>CpuTopology = LevelEntryList | undefined</c></item>
@@ -5558,8 +5552,8 @@ true</pre>
<item>
<p>Returns the user defined <c>CpuTopology</c>. For more
information see the documentation of
- <seealso marker="#system_flag_cpu_topology">erlang:system_flag(cpu_topology, CpuTopology)</seealso>
- and the documentation of the
+ the <c>erl</c> <seealso marker="erts:erl#+sct">+sct</seealso> command
+ line flag, and the documentation of the
<seealso marker="#system_info_cpu_topology">cpu_topology</seealso>
argument.
</p>
@@ -5637,7 +5631,7 @@ true</pre>
<item>
<p>Returns the value of the distribution buffer busy limit
in bytes. This limit can be set on startup by passing the
- <seealso marker="erl#+zdbbl">+zdbbl</seealso> command line
+ <seealso marker="erts:erl#+zdbbl">+zdbbl</seealso> command line
flag to <c>erl</c>.</p>
</item>
<tag><c>fullsweep_after</c></tag>
@@ -5839,14 +5833,13 @@ true</pre>
<p>Returns information on how user has requested
schedulers to be bound or not bound.</p>
<p><em>NOTE:</em> Even though user has requested
- schedulers to be bound via
- <seealso marker="#system_flag_scheduler_bind_type">erlang:system_flag(scheduler_bind_type, How)</seealso>,
- they might have silently failed to bind. In order to
- inspect actual scheduler bindings call
+ schedulers to be bound, they might have silently failed
+ to bind. In order to inspect actual scheduler bindings call
<seealso marker="#system_info_scheduler_bindings">erlang:system_info(scheduler_bindings)</seealso>.
</p>
<p>For more information, see
- <seealso marker="#system_flag_scheduler_bind_type">erlang:system_flag(scheduler_bind_type, How)</seealso>, and
+ the <c>erl</c> <seealso marker="erts:erl#+sbt">+sbt</seealso>
+ command line argument, and
<seealso marker="#system_info_scheduler_bindings">erlang:system_info(scheduler_bindings)</seealso>.
</p>
</item>
@@ -5869,7 +5862,8 @@ true</pre>
<p>Note that only schedulers online can be bound to logical
processors.</p>
<p>For more information, see
- <seealso marker="#system_flag_scheduler_bind_type">erlang:system_flag(scheduler_bind_type, How)</seealso>,
+ the <c>erl</c> <seealso marker="erts:erl#+sbt">+sbt</seealso>
+ command line argument,
<seealso marker="#system_info_schedulers_online">erlang:system_info(schedulers_online)</seealso>.
</p>
</item>
diff --git a/erts/emulator/beam/bif.c b/erts/emulator/beam/bif.c
index 8ab363a1ec..26f1b4facb 100644
--- a/erts/emulator/beam/bif.c
+++ b/erts/emulator/beam/bif.c
@@ -870,8 +870,6 @@ BIF_RETTYPE spawn_opt_1(BIF_ALIST_1)
}
} else if (arg == am_scheduler && is_small(val)) {
Sint scheduler = signed_val(val);
- if (erts_common_run_queue && erts_no_schedulers > 1)
- goto error;
if (scheduler < 0 || erts_no_schedulers < scheduler)
goto error;
so.scheduler = (int) scheduler;
@@ -1535,8 +1533,6 @@ BIF_RETTYPE process_flag_2(BIF_ALIST_2)
ErtsRunQueue *old;
ErtsRunQueue *new;
Sint sched;
- if (erts_common_run_queue && erts_no_schedulers > 1)
- goto error;
if (!is_small(BIF_ARG_2))
goto error;
sched = signed_val(BIF_ARG_2);
@@ -4128,8 +4124,20 @@ BIF_RETTYPE system_flag_2(BIF_ALIST_2)
if (is_value(res))
BIF_RET(res);
} else if (ERTS_IS_ATOM_STR("cpu_topology", BIF_ARG_1)) {
+ erts_send_warning_to_logger_str(
+ BIF_P->group_leader,
+ "A call to erlang:system_flag(cpu_topology, _) was made.\n"
+ "The cpu_topology argument is deprecated and scheduled\n"
+ "for removal in erts-5.10/OTP-R16. For more information\n"
+ "see the erlang:system_flag/2 documentation.\n");
BIF_TRAP1(set_cpu_topology_trap, BIF_P, BIF_ARG_2);
} else if (ERTS_IS_ATOM_STR("scheduler_bind_type", BIF_ARG_1)) {
+ erts_send_warning_to_logger_str(
+ BIF_P->group_leader,
+ "A call to erlang:system_flag(scheduler_bind_type, _) was\n"
+ "made. The scheduler_bind_type argument is deprecated and\n"
+ "scheduled for removal in erts-5.10/OTP-R16. For more\n"
+ "information see the erlang:system_flag/2 documentation.\n");
return erts_bind_schedulers(BIF_P, BIF_ARG_2);
}
error:
diff --git a/erts/emulator/beam/erl_bif_info.c b/erts/emulator/beam/erl_bif_info.c
index a79feaebdb..db04fe5a54 100644
--- a/erts/emulator/beam/erl_bif_info.c
+++ b/erts/emulator/beam/erl_bif_info.c
@@ -78,7 +78,6 @@ static char erts_system_version[] = ("Erlang " ERLANG_OTP_RELEASE
#ifdef ERTS_SMP
" [smp:%beu:%beu]"
#endif
- " [rq:%beu]"
#ifdef USE_THREADS
" [async-threads:%d]"
#endif
@@ -301,7 +300,7 @@ erts_print_system_version(int to, void *arg, Process *c_p)
#endif
return erts_print(to, arg, erts_system_version
#ifdef ERTS_SMP
- , total, online, erts_no_run_queues
+ , total, online
#else
, 1
#endif
diff --git a/erts/emulator/beam/erl_cpu_topology.c b/erts/emulator/beam/erl_cpu_topology.c
index cc930ba1e3..fe3693d0ca 100644
--- a/erts/emulator/beam/erl_cpu_topology.c
+++ b/erts/emulator/beam/erl_cpu_topology.c
@@ -486,10 +486,7 @@ erts_sched_check_cpu_bind_post_suspend(ErtsSchedulerData *esdp)
erts_thr_set_main_status(1, (int) esdp->no);
/* Make sure we check if we should bind to a cpu or not... */
- if (esdp->run_queue->flags & ERTS_RUNQ_FLG_SHARED_RUNQ)
- erts_smp_atomic32_set_nob(&esdp->chk_cpu_bind, 1);
- else
- esdp->run_queue->flags |= ERTS_RUNQ_FLG_CHK_CPU_BIND;
+ esdp->run_queue->flags |= ERTS_RUNQ_FLG_CHK_CPU_BIND;
}
#endif
@@ -502,11 +499,7 @@ erts_sched_check_cpu_bind(ErtsSchedulerData *esdp)
erts_cpu_groups_callback_list_t *cgcl;
erts_cpu_groups_callback_call_t *cgcc;
#ifdef ERTS_SMP
- if (erts_common_run_queue)
- erts_smp_atomic32_set_nob(&esdp->chk_cpu_bind, 0);
- else {
- esdp->run_queue->flags &= ~ERTS_RUNQ_FLG_CHK_CPU_BIND;
- }
+ esdp->run_queue->flags &= ~ERTS_RUNQ_FLG_CHK_CPU_BIND;
#endif
erts_smp_runq_unlock(esdp->run_queue);
erts_smp_rwmtx_rwlock(&cpuinfo_rwmtx);
diff --git a/erts/emulator/beam/erl_init.c b/erts/emulator/beam/erl_init.c
index 6c4ba2af68..717315d8bd 100644
--- a/erts/emulator/beam/erl_init.c
+++ b/erts/emulator/beam/erl_init.c
@@ -111,7 +111,6 @@ Eterm erts_error_logger_warnings; /* What to map warning logs to, am_error,
int erts_compat_rel;
-static int use_multi_run_queue;
static int no_schedulers;
static int no_schedulers_online;
@@ -254,8 +253,7 @@ erl_init(int ncpu)
erts_init_time();
erts_init_sys_common_misc();
erts_init_process(ncpu);
- erts_init_scheduling(use_multi_run_queue,
- no_schedulers,
+ erts_init_scheduling(no_schedulers,
no_schedulers_online);
erts_init_cpu_topology(); /* Must be after init_scheduling */
erts_alloc_late_init();
@@ -613,7 +611,6 @@ early_init(int *argc, char **argv) /*
size_t envbufsz;
erts_sched_compact_load = 1;
- use_multi_run_queue = 1;
erts_printf_eterm_func = erts_printf_term;
erts_disable_tolerant_timeofday = 0;
display_items = 200;
@@ -1257,12 +1254,8 @@ erl_start(int argc, char **argv)
erts_usage();
}
}
- else if (sys_strcmp("mrq", sub_param) == 0)
- use_multi_run_queue = 1;
else if (sys_strcmp("nsp", sub_param) == 0)
erts_use_sender_punish = 0;
- else if (sys_strcmp("srq", sub_param) == 0)
- use_multi_run_queue = 0;
else if (sys_strcmp("wt", sub_param) == 0) {
arg = get_arg(sub_param+2, argv[i+1], &i);
if (erts_sched_set_wakeup_limit(arg) != 0) {
diff --git a/erts/emulator/beam/erl_lock_check.c b/erts/emulator/beam/erl_lock_check.c
index 0dd99b3aa1..09e85893c3 100644
--- a/erts/emulator/beam/erl_lock_check.c
+++ b/erts/emulator/beam/erl_lock_check.c
@@ -173,7 +173,6 @@ static erts_lc_lock_order_t erts_lock_order[] = {
{ "pix_lock", "address" },
{ "run_queues_lists", NULL },
{ "sched_stat", NULL },
- { "run_queue_sleep_list", "address" },
#endif
{ "async_init_mtx", NULL },
#ifdef ERTS_SMP
diff --git a/erts/emulator/beam/erl_port_task.c b/erts/emulator/beam/erl_port_task.c
index 87b8e5131b..2b5e65b11a 100644
--- a/erts/emulator/beam/erl_port_task.c
+++ b/erts/emulator/beam/erl_port_task.c
@@ -1048,8 +1048,6 @@ erts_port_migrate(Port *prt, int *prt_locked,
ERTS_SMP_LC_CHK_RUNQ_LOCK(from_rq, *from_locked);
ERTS_SMP_LC_CHK_RUNQ_LOCK(to_rq, *to_locked);
- ASSERT(!erts_common_run_queue);
-
if (!*from_locked || !*to_locked) {
if (from_rq < to_rq) {
if (!*to_locked) {
diff --git a/erts/emulator/beam/erl_process.c b/erts/emulator/beam/erl_process.c
index 6657fe6efa..ec4b1dcd98 100644
--- a/erts/emulator/beam/erl_process.c
+++ b/erts/emulator/beam/erl_process.c
@@ -196,8 +196,6 @@ do { \
erts_sched_stat_t erts_sched_stat;
-ErtsRunQueue *erts_common_run_queue;
-
#ifdef USE_THREADS
static erts_tsd_key_t sched_data_key;
#endif
@@ -1717,21 +1715,12 @@ scheduler_wait(int *fcalls, ErtsSchedulerData *esdp, ErtsRunQueue *rq)
ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(rq));
- erts_smp_spin_lock(&rq->sleepers.lock);
flgs = sched_prep_spin_wait(ssi);
if (flgs & ERTS_SSI_FLG_SUSPENDED) {
/* Go suspend instead... */
- erts_smp_spin_unlock(&rq->sleepers.lock);
return;
}
- ssi->prev = NULL;
- ssi->next = rq->sleepers.list;
- if (rq->sleepers.list)
- rq->sleepers.list->prev = ssi;
- rq->sleepers.list = ssi;
- erts_smp_spin_unlock(&rq->sleepers.lock);
-
/*
* If all schedulers are waiting, one of them *should*
* be waiting in erl_sys_schedule()
@@ -1990,10 +1979,10 @@ ssi_flags_set_wake(ErtsSchedulerSleepInfo *ssi)
}
static void
-wake_scheduler(ErtsRunQueue *rq, int incq, int one)
+wake_scheduler(ErtsRunQueue *rq, int incq)
{
ErtsSchedulerSleepInfo *ssi;
- ErtsSchedulerSleepList *sl;
+ erts_aint32_t flgs;
/*
* The unlocked run queue is not strictly necessary
@@ -2005,56 +1994,13 @@ wake_scheduler(ErtsRunQueue *rq, int incq, int one)
*/
ERTS_SMP_LC_ASSERT(!erts_smp_lc_runq_is_locked(rq));
- sl = &rq->sleepers;
-
- erts_smp_spin_lock(&sl->lock);
- ssi = sl->list;
- if (!ssi)
- erts_smp_spin_unlock(&sl->lock);
- else if (one) {
- erts_aint32_t flgs;
- if (ssi->prev)
- ssi->prev->next = ssi->next;
- else {
- ASSERT(sl->list == ssi);
- sl->list = ssi->next;
- }
- if (ssi->next)
- ssi->next->prev = ssi->prev;
-
- erts_smp_spin_unlock(&sl->lock);
-
- flgs = ssi_flags_set_wake(ssi);
- erts_sched_finish_poke(ssi, flgs);
-
- if (incq && !erts_common_run_queue && (flgs & ERTS_SSI_FLG_WAITING))
- non_empty_runq(rq);
- }
- else {
- sl->list = NULL;
- erts_smp_spin_unlock(&sl->lock);
+ ssi = rq->scheduler->ssi;
- ERTS_THR_MEMORY_BARRIER;
- do {
- ErtsSchedulerSleepInfo *wake_ssi = ssi;
- ssi = ssi->next;
- erts_sched_finish_poke(wake_ssi, ssi_flags_set_wake(wake_ssi));
- } while (ssi);
- }
-}
+ flgs = ssi_flags_set_wake(ssi);
+ erts_sched_finish_poke(ssi, flgs);
-static void
-wake_all_schedulers(void)
-{
- if (erts_common_run_queue)
- wake_scheduler(erts_common_run_queue, 0, 0);
- else {
- int ix;
- for (ix = 0; ix < erts_no_run_queues; ix++) {
- ErtsRunQueue *rq = ERTS_RUNQ_IX(ix);
- wake_scheduler(rq, 0, 1);
- }
- }
+ if (incq && (flgs & ERTS_SSI_FLG_WAITING))
+ non_empty_runq(rq);
}
#define ERTS_NO_USED_RUNQS_SHIFT 16
@@ -2147,7 +2093,7 @@ chk_wake_sched(ErtsRunQueue *crq, int ix, int activate)
erts_smp_xrunq_unlock(crq, wrq);
}
}
- wake_scheduler(wrq, 0, 1);
+ wake_scheduler(wrq, 0);
return 1;
}
return 0;
@@ -2195,7 +2141,7 @@ smp_notify_inc_runq(ErtsRunQueue *runq)
{
#ifdef ERTS_SMP
if (runq)
- wake_scheduler(runq, 1, 1);
+ wake_scheduler(runq, 1);
#endif
}
@@ -2210,19 +2156,12 @@ erts_sched_notify_check_cpu_bind(void)
{
#ifdef ERTS_SMP
int ix;
- if (erts_common_run_queue) {
- for (ix = 0; ix < erts_no_schedulers; ix++)
- erts_smp_atomic32_set_relb(&ERTS_SCHEDULER_IX(ix)->chk_cpu_bind, 1);
- wake_all_schedulers();
- }
- else {
- for (ix = 0; ix < erts_no_run_queues; ix++) {
- ErtsRunQueue *rq = ERTS_RUNQ_IX(ix);
- erts_smp_runq_lock(rq);
- rq->flags |= ERTS_RUNQ_FLG_CHK_CPU_BIND;
- erts_smp_runq_unlock(rq);
- wake_scheduler(rq, 0, 1);
- };
+ for (ix = 0; ix < erts_no_run_queues; ix++) {
+ ErtsRunQueue *rq = ERTS_RUNQ_IX(ix);
+ erts_smp_runq_lock(rq);
+ rq->flags |= ERTS_RUNQ_FLG_CHK_CPU_BIND;
+ erts_smp_runq_unlock(rq);
+ wake_scheduler(rq, 0);
}
#else
erts_sched_check_cpu_bind(erts_get_scheduler_data());
@@ -2491,7 +2430,7 @@ evacuate_run_queue(ErtsRunQueue *evac_rq, ErtsRunQueue *rq)
if (notify_to_rq)
smp_notify_inc_runq(rq);
- wake_scheduler(evac_rq, 0, 1);
+ wake_scheduler(evac_rq, 0);
}
static int
@@ -2649,9 +2588,6 @@ static int
try_steal_task(ErtsRunQueue *rq)
{
int res, rq_locked, vix, active_rqs, blnc_rqs;
-
- if (erts_common_run_queue)
- return 0;
/*
* We are not allowed to steal jobs to this run queue
@@ -3328,14 +3264,10 @@ init_aux_work_data(ErtsAuxWorkData *awdp, ErtsSchedulerData *esdp)
}
void
-erts_init_scheduling(int mrq, int no_schedulers, int no_schedulers_online)
+erts_init_scheduling(int no_schedulers, int no_schedulers_online)
{
int ix, n, no_ssi;
-#ifndef ERTS_SMP
- mrq = 0;
-#endif
-
init_misc_op_list_alloc();
ASSERT(no_schedulers_online <= no_schedulers);
@@ -3344,7 +3276,7 @@ erts_init_scheduling(int mrq, int no_schedulers, int no_schedulers_online)
/* Create and initialize run queues */
- n = (int) (mrq ? no_schedulers : 1);
+ n = no_schedulers;
erts_aligned_run_queues =
erts_alloc_permanent_cache_aligned(ERTS_ALC_T_RUNQS,
@@ -3369,14 +3301,9 @@ erts_init_scheduling(int mrq, int no_schedulers, int no_schedulers_online)
erts_smp_mtx_init_x(&rq->mtx, "run_queue", make_small(ix + 1));
erts_smp_cnd_init(&rq->cnd);
-#ifdef ERTS_SMP
- erts_smp_spinlock_init(&rq->sleepers.lock, "run_queue_sleep_list");
- rq->sleepers.list = NULL;
-#endif
-
rq->waiting = 0;
rq->woken = 0;
- rq->flags = !mrq ? ERTS_RUNQ_FLG_SHARED_RUNQ : 0;
+ rq->flags = 0;
rq->check_balance_reds = ERTS_RUNQ_CALL_CHECK_BALANCE_REDS;
rq->full_reds_history_sum = 0;
for (rix = 0; rix < ERTS_FULL_REDS_HISTORY_SIZE; rix++) {
@@ -3422,8 +3349,6 @@ erts_init_scheduling(int mrq, int no_schedulers, int no_schedulers_online)
rq->ports.end = NULL;
}
- erts_common_run_queue = !mrq ? ERTS_RUNQ_IX(0) : NULL;
-
#ifdef ERTS_SMP
if (erts_no_run_queues != 1) {
@@ -3500,18 +3425,9 @@ erts_init_scheduling(int mrq, int no_schedulers, int no_schedulers_online)
erts_init_atom_cache_map(&esdp->atom_cache_map);
- if (erts_common_run_queue) {
- esdp->run_queue = erts_common_run_queue;
- esdp->run_queue->scheduler = NULL;
- }
- else {
- esdp->run_queue = ERTS_RUNQ_IX(ix);
- esdp->run_queue->scheduler = esdp;
- }
+ esdp->run_queue = ERTS_RUNQ_IX(ix);
+ esdp->run_queue->scheduler = esdp;
-#ifdef ERTS_SMP
- erts_smp_atomic32_init_nob(&esdp->chk_cpu_bind, 0);
-#endif
init_aux_work_data(&esdp->aux_work_data, esdp);
}
@@ -3534,8 +3450,7 @@ erts_init_scheduling(int mrq, int no_schedulers, int no_schedulers_online)
schdlr_sspnd.msb.ongoing = 0;
erts_smp_atomic32_init_nob(&schdlr_sspnd.active, no_schedulers);
schdlr_sspnd.msb.procs = NULL;
- init_no_runqs(no_schedulers,
- erts_common_run_queue ? 1 : no_schedulers_online);
+ init_no_runqs(no_schedulers, no_schedulers_online);
balance_info.last_active_runqs = no_schedulers;
erts_smp_mtx_init(&balance_info.update_mtx, "migration_info_update");
balance_info.forced_check_balance = 0;
@@ -3548,16 +3463,9 @@ erts_init_scheduling(int mrq, int no_schedulers, int no_schedulers_online)
balance_info.n = 0;
if (no_schedulers_online < no_schedulers) {
- if (erts_common_run_queue) {
- for (ix = no_schedulers_online; ix < no_schedulers; ix++)
- erts_smp_atomic32_read_bor_nob(&ERTS_SCHED_SLEEP_INFO_IX(ix)->flags,
- ERTS_SSI_FLG_SUSPENDED);
- }
- else {
- for (ix = no_schedulers_online; ix < erts_no_run_queues; ix++)
- evacuate_run_queue(ERTS_RUNQ_IX(ix),
- ERTS_RUNQ_IX(ix % no_schedulers_online));
- }
+ for (ix = no_schedulers_online; ix < erts_no_run_queues; ix++)
+ evacuate_run_queue(ERTS_RUNQ_IX(ix),
+ ERTS_RUNQ_IX(ix % no_schedulers_online));
}
schdlr_sspnd.wait_curr_online = no_schedulers_online;
@@ -3602,8 +3510,6 @@ ErtsRunQueue *
erts_schedid2runq(Uint id)
{
int ix;
- if (erts_common_run_queue)
- return erts_common_run_queue;
ix = (int) id - 1;
ASSERT(0 <= ix && ix < erts_no_run_queues);
return ERTS_RUNQ_IX(ix);
@@ -3894,9 +3800,11 @@ suspend_scheduler(ErtsSchedulerData *esdp)
wake = 0;
}
- flgs = erts_smp_atomic32_read_acqb(&ssi->flags);
- if (!(flgs & ERTS_SSI_FLG_SUSPENDED))
- break;
+ if (curr_online && !ongoing_multi_scheduling_block()) {
+ flgs = erts_smp_atomic32_read_acqb(&ssi->flags);
+ if (!(flgs & ERTS_SSI_FLG_SUSPENDED))
+ break;
+ }
erts_smp_mtx_unlock(&schdlr_sspnd.mtx);
while (1) {
@@ -4088,10 +3996,6 @@ erts_set_schedulers_online(Process *p,
for (ix = online; ix < no; ix++)
erts_sched_poke(ERTS_SCHED_SLEEP_INFO_IX(ix));
}
- else if (erts_common_run_queue) {
- for (ix = online; ix < no; ix++)
- scheduler_ix_resume_wake(ix);
- }
else {
if (plocks) {
have_unlocked_plocks = 1;
@@ -4139,15 +4043,6 @@ erts_set_schedulers_online(Process *p,
for (ix = no; ix < online; ix++)
erts_sched_poke(ERTS_SCHED_SLEEP_INFO_IX(ix));
}
- else if (erts_common_run_queue) {
- for (ix = no; ix < online; ix++) {
- ErtsSchedulerSleepInfo *ssi;
- ssi = ERTS_SCHED_SLEEP_INFO_IX(ix);
- erts_smp_atomic32_read_bor_nob(&ssi->flags,
- ERTS_SSI_FLG_SUSPENDED);
- }
- wake_all_schedulers();
- }
else {
if (plocks) {
have_unlocked_plocks = 1;
@@ -4174,7 +4069,7 @@ erts_set_schedulers_online(Process *p,
erts_smp_mtx_lock(&schdlr_sspnd.mtx);
for (ix = no; ix < online; ix++) {
ErtsRunQueue *rq = ERTS_RUNQ_IX(ix);
- wake_scheduler(rq, 0, 1);
+ wake_scheduler(rq, 0);
}
}
}
@@ -4267,33 +4162,26 @@ erts_block_multi_scheduling(Process *p, ErtsProcLocks plocks, int on, int all)
res = ERTS_SCHDLR_SSPND_YIELD_DONE_MSCHED_BLOCKED;
schdlr_sspnd.msb.wait_active = 2;
}
- if (erts_common_run_queue) {
- for (ix = 1; ix < online; ix++)
- erts_smp_atomic32_read_bor_nob(&ERTS_SCHED_SLEEP_INFO_IX(ix)->flags,
- ERTS_SSI_FLG_SUSPENDED);
- wake_all_schedulers();
- }
- else {
- erts_smp_mtx_unlock(&schdlr_sspnd.mtx);
- erts_smp_mtx_lock(&balance_info.update_mtx);
- set_no_used_runqs(1);
- for (ix = 0; ix < online; ix++) {
- ErtsRunQueue *rq = ERTS_RUNQ_IX(ix);
- erts_smp_runq_lock(rq);
- ASSERT(!(rq->flags & ERTS_RUNQ_FLG_SUSPENDED));
- ERTS_RUNQ_RESET_MIGRATION_PATHS(rq, 0x7);
- erts_smp_runq_unlock(rq);
- }
- /*
- * Evacuate all activities in all other run queues
- * into the first run queue. Note order is important,
- * online run queues has to be evacuated last.
- */
- for (ix = erts_no_run_queues-1; ix >= 1; ix--)
- evacuate_run_queue(ERTS_RUNQ_IX(ix), ERTS_RUNQ_IX(0));
- erts_smp_mtx_unlock(&balance_info.update_mtx);
- erts_smp_mtx_lock(&schdlr_sspnd.mtx);
+
+ erts_smp_mtx_unlock(&schdlr_sspnd.mtx);
+ erts_smp_mtx_lock(&balance_info.update_mtx);
+ set_no_used_runqs(1);
+ for (ix = 0; ix < online; ix++) {
+ ErtsRunQueue *rq = ERTS_RUNQ_IX(ix);
+ erts_smp_runq_lock(rq);
+ ASSERT(!(rq->flags & ERTS_RUNQ_FLG_SUSPENDED));
+ ERTS_RUNQ_RESET_MIGRATION_PATHS(rq, 0x7);
+ erts_smp_runq_unlock(rq);
}
+ /*
+ * Evacuate all activities in all other run queues
+ * into the first run queue. Note order is important,
+ * online run queues has to be evacuated last.
+ */
+ for (ix = erts_no_run_queues-1; ix >= 1; ix--)
+ evacuate_run_queue(ERTS_RUNQ_IX(ix), ERTS_RUNQ_IX(0));
+ erts_smp_mtx_unlock(&balance_info.update_mtx);
+ erts_smp_mtx_lock(&schdlr_sspnd.mtx);
if (erts_smp_atomic32_read_nob(&schdlr_sspnd.active)
!= schdlr_sspnd.msb.wait_active) {
@@ -4405,12 +4293,6 @@ erts_block_multi_scheduling(Process *p, ErtsProcLocks plocks, int on, int all)
ASSERT(erts_smp_atomic32_read_nob(&schdlr_sspnd.active) == 1);
ERTS_SCHDLR_SSPND_CHNG_SET(0, ERTS_SCHDLR_SSPND_CHNG_MSB);
}
- else if (erts_common_run_queue) {
- for (ix = 1; ix < schdlr_sspnd.online; ix++)
- erts_smp_atomic32_read_band_nob(&ERTS_SCHED_SLEEP_INFO_IX(ix)->flags,
- ~ERTS_SSI_FLG_SUSPENDED);
- wake_all_schedulers();
- }
else {
int online = schdlr_sspnd.online;
erts_smp_mtx_unlock(&schdlr_sspnd.mtx);
@@ -5089,7 +4971,7 @@ suspend_process_2(BIF_ALIST_2)
/* This is really a piece of cake without SMP support... */
if (!smon->active) {
- suspend_process(erts_common_run_queue, suspendee);
+ suspend_process(ERTS_RUNQ_IX(0), suspendee);
smon->active++;
res = am_true;
}
@@ -5659,8 +5541,6 @@ erts_proc_migrate(Process *p, ErtsProcLocks *plcks,
|| from_locked);
ERTS_SMP_LC_CHK_RUNQ_LOCK(from_rq, *from_locked);
ERTS_SMP_LC_CHK_RUNQ_LOCK(to_rq, *to_locked);
-
- ASSERT(!erts_common_run_queue);
/*
* If we have the lock on the run queue to migrate to,
@@ -5811,25 +5691,17 @@ erts_process_status(Process *c_p, ErtsProcLocks c_p_locks,
int i;
ErtsSchedulerData *esdp;
- if (erts_common_run_queue)
- erts_smp_runq_lock(erts_common_run_queue);
-
for (i = 0; i < erts_no_schedulers; i++) {
esdp = ERTS_SCHEDULER_IX(i);
- if (!erts_common_run_queue)
- erts_smp_runq_lock(esdp->run_queue);
+ erts_smp_runq_lock(esdp->run_queue);
if (esdp->free_process && esdp->free_process->id == rpid) {
res = am_free;
- if (!erts_common_run_queue)
- erts_smp_runq_unlock(esdp->run_queue);
+ erts_smp_runq_unlock(esdp->run_queue);
break;
}
- if (!erts_common_run_queue)
- erts_smp_runq_unlock(esdp->run_queue);
+ erts_smp_runq_unlock(esdp->run_queue);
}
- if (erts_common_run_queue)
- erts_smp_runq_unlock(erts_common_run_queue);
#endif
}
@@ -6140,10 +6012,8 @@ Process *schedule(Process *p, int calls)
#ifdef ERTS_SMP
- if (!(rq->flags & ERTS_RUNQ_FLG_SHARED_RUNQ)
- && rq->check_balance_reds <= 0) {
+ if (rq->check_balance_reds <= 0)
check_balance(rq);
- }
ERTS_SMP_LC_ASSERT(!erts_thr_progress_is_blocking());
ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(rq));
@@ -6153,20 +6023,15 @@ Process *schedule(Process *p, int calls)
continue_check_activities_to_run:
- if (rq->flags & (ERTS_RUNQ_FLG_SHARED_RUNQ
- | ERTS_RUNQ_FLG_CHK_CPU_BIND
+ if (rq->flags & (ERTS_RUNQ_FLG_CHK_CPU_BIND
| ERTS_RUNQ_FLG_SUSPENDED)) {
- if ((rq->flags & ERTS_RUNQ_FLG_SUSPENDED)
- || (erts_smp_atomic32_read_acqb(&esdp->ssi->flags)
- & ERTS_SSI_FLG_SUSPENDED)) {
+ if (rq->flags & ERTS_RUNQ_FLG_SUSPENDED) {
ASSERT(erts_smp_atomic32_read_nob(&esdp->ssi->flags)
& ERTS_SSI_FLG_SUSPENDED);
suspend_scheduler(esdp);
}
- if ((rq->flags & ERTS_RUNQ_FLG_CHK_CPU_BIND)
- || erts_smp_atomic32_read_acqb(&esdp->chk_cpu_bind)) {
+ if (rq->flags & ERTS_RUNQ_FLG_CHK_CPU_BIND)
erts_sched_check_cpu_bind(esdp);
- }
}
{
@@ -6208,16 +6073,11 @@ Process *schedule(Process *p, int calls)
empty_runq(rq);
- if (rq->flags & (ERTS_RUNQ_FLG_SHARED_RUNQ
- | ERTS_RUNQ_FLG_SUSPENDED)) {
- if ((rq->flags & ERTS_RUNQ_FLG_SUSPENDED)
- || (erts_smp_atomic32_read_acqb(&esdp->ssi->flags)
- & ERTS_SSI_FLG_SUSPENDED)) {
- ASSERT(erts_smp_atomic32_read_nob(&esdp->ssi->flags)
- & ERTS_SSI_FLG_SUSPENDED);
- non_empty_runq(rq);
- goto continue_check_activities_to_run;
- }
+ if (rq->flags & ERTS_RUNQ_FLG_SUSPENDED) {
+ ASSERT(erts_smp_atomic32_read_nob(&esdp->ssi->flags)
+ & ERTS_SSI_FLG_SUSPENDED);
+ non_empty_runq(rq);
+ goto continue_check_activities_to_run;
}
else if (!(rq->flags & ERTS_RUNQ_FLG_INACTIVE)) {
/*
@@ -6282,11 +6142,7 @@ Process *schedule(Process *p, int calls)
else if (rq->wakeup_other < wakeup_other_limit)
rq->wakeup_other += rq->len*wo_reds + ERTS_WAKEUP_OTHER_FIXED_INC;
else {
- if (erts_common_run_queue) {
- if (erts_common_run_queue->waiting)
- wake_scheduler(erts_common_run_queue, 0, 1);
- }
- else if (erts_smp_atomic32_read_acqb(&no_empty_run_queues) != 0) {
+ if (erts_smp_atomic32_read_acqb(&no_empty_run_queues) != 0) {
wake_scheduler_on_empty_runq(rq);
rq->wakeup_other = 0;
}
diff --git a/erts/emulator/beam/erl_process.h b/erts/emulator/beam/erl_process.h
index 69ff423133..a51b380bb0 100644
--- a/erts/emulator/beam/erl_process.h
+++ b/erts/emulator/beam/erl_process.h
@@ -144,12 +144,10 @@ extern int erts_sched_thread_suggested_stack_size;
(((Uint32) 1) << (ERTS_RUNQ_FLG_BASE2 + 1))
#define ERTS_RUNQ_FLG_SUSPENDED \
(((Uint32) 1) << (ERTS_RUNQ_FLG_BASE2 + 2))
-#define ERTS_RUNQ_FLG_SHARED_RUNQ \
- (((Uint32) 1) << (ERTS_RUNQ_FLG_BASE2 + 3))
#define ERTS_RUNQ_FLG_CHK_CPU_BIND \
- (((Uint32) 1) << (ERTS_RUNQ_FLG_BASE2 + 4))
+ (((Uint32) 1) << (ERTS_RUNQ_FLG_BASE2 + 3))
#define ERTS_RUNQ_FLG_INACTIVE \
- (((Uint32) 1) << (ERTS_RUNQ_FLG_BASE2 + 5))
+ (((Uint32) 1) << (ERTS_RUNQ_FLG_BASE2 + 4))
#define ERTS_RUNQ_FLGS_MIGRATION_QMASKS \
(ERTS_RUNQ_FLGS_EMIGRATE_QMASK \
@@ -272,11 +270,6 @@ typedef enum {
typedef struct ErtsSchedulerSleepInfo_ ErtsSchedulerSleepInfo;
-typedef struct {
- erts_smp_spinlock_t lock;
- ErtsSchedulerSleepInfo *list;
-} ErtsSchedulerSleepList;
-
struct ErtsSchedulerSleepInfo_ {
#ifdef ERTS_SMP
ErtsSchedulerSleepInfo *next;
@@ -339,10 +332,6 @@ struct ErtsRunQueue_ {
erts_smp_mtx_t mtx;
erts_smp_cnd_t cnd;
-#ifdef ERTS_SMP
- ErtsSchedulerSleepList sleepers;
-#endif
-
ErtsSchedulerData *scheduler;
int waiting; /* < 0 in sys schedule; > 0 on cnd variable */
int woken;
@@ -388,7 +377,6 @@ typedef union {
} ErtsAlignedRunQueue;
extern ErtsAlignedRunQueue *erts_aligned_run_queues;
-extern ErtsRunQueue *erts_common_run_queue;
#define ERTS_PROC_REDUCTIONS_EXECUTED(RQ, PRIO, REDS, AREDS) \
do { \
@@ -469,11 +457,6 @@ struct ErtsSchedulerData_ {
ErtsSchedAllocData alloc_data;
-#ifdef ERTS_SMP
- /* NOTE: These fields are modified under held mutexes by other threads */
- erts_smp_atomic32_t chk_cpu_bind; /* Only used when common run queue */
-#endif
-
#ifdef ERTS_DO_VERIFY_UNUSED_TEMP_ALLOC
erts_alloc_verify_func_t verify_unused_temp_alloc;
Allctr_t *verify_unused_temp_alloc_data;
@@ -1079,7 +1062,7 @@ extern struct erts_system_profile_flags_t erts_system_profile_flags;
void erts_pre_init_process(void);
void erts_late_init_process(void);
void erts_early_init_scheduling(int);
-void erts_init_scheduling(int, int, int);
+void erts_init_scheduling(int, int);
ErtsProcList *erts_proclist_create(Process *);
void erts_proclist_destroy(ErtsProcList *);
@@ -1464,8 +1447,7 @@ erts_get_runq_proc(Process *p)
ASSERT(p->run_queue);
return p->run_queue;
#else
- ASSERT(erts_common_run_queue);
- return erts_common_run_queue;
+ return ERTS_RUNQ_IX(0);
#endif
}
@@ -1478,8 +1460,7 @@ erts_get_runq_current(ErtsSchedulerData *esdp)
esdp = erts_get_scheduler_data();
return esdp->run_queue;
#else
- ASSERT(erts_common_run_queue);
- return erts_common_run_queue;
+ return ERTS_RUNQ_IX(0);
#endif
}
diff --git a/erts/emulator/beam/global.h b/erts/emulator/beam/global.h
index b247576f1c..d5b74efd98 100644
--- a/erts/emulator/beam/global.h
+++ b/erts/emulator/beam/global.h
@@ -205,7 +205,7 @@ erts_port_runq(Port *prt)
rq1 = rq2;
}
#else
- return erts_common_run_queue;
+ return ERTS_RUNQ_IX(0);
#endif
}
diff --git a/erts/preloaded/ebin/erlang.beam b/erts/preloaded/ebin/erlang.beam
index dda24d4405..a21e5c6e06 100644
--- a/erts/preloaded/ebin/erlang.beam
+++ b/erts/preloaded/ebin/erlang.beam
Binary files differ
diff --git a/erts/preloaded/src/erlang.erl b/erts/preloaded/src/erlang.erl
index e9a59a7aaf..4affc9bffe 100644
--- a/erts/preloaded/src/erlang.erl
+++ b/erts/preloaded/src/erlang.erl
@@ -1091,8 +1091,6 @@ receive_emd(Ref, EMD, N) ->
receive_emd(Ref) ->
receive_emd(Ref, #memory{}, erlang:system_info(schedulers)).
-aa_mem_data(notsup, _) ->
- notsup;
aa_mem_data(#memory{} = Mem,
[{maximum, Max} | Rest]) ->
aa_mem_data(Mem#memory{maximum = Max},