aboutsummaryrefslogtreecommitdiffstats
path: root/erts/emulator/beam
diff options
context:
space:
mode:
authorLukas Larsson <[email protected]>2017-10-02 10:40:25 +0200
committerLukas Larsson <[email protected]>2017-10-02 10:40:25 +0200
commit9033a41375f3a31a18eb0cba3ea0dc84efbc0aa0 (patch)
tree8731693cd5c15ffd8f064a3f304aec49fdcebca8 /erts/emulator/beam
parentf75fa8129fcd18fc56407778960252e6c10a3a37 (diff)
parent0c6df1f92be6337efe4d7e3d1964063f9acf770f (diff)
downloadotp-9033a41375f3a31a18eb0cba3ea0dc84efbc0aa0.tar.gz
otp-9033a41375f3a31a18eb0cba3ea0dc84efbc0aa0.tar.bz2
otp-9033a41375f3a31a18eb0cba3ea0dc84efbc0aa0.zip
Merge branch 'lukas/erts/poll-thread/OTP-14346'
* lukas/erts/poll-thread/OTP-14346: (25 commits) erts: Trigger ready events when erts_io_control fails erts: enif_select steal test kernel: Rewrite gen_udp_SUITE:read_packet tc erts: disable kernel-poll on OS X vsn < 16 erts: Fix msacc testcase with new poll-thread erts: Add testcases to test IOp and IOt options erts: get_internal_state(check_io_debug) now prints to error_logger erts: Remove eager check io erts: Move all I/O polling to a seperate thread erts: Fix smp_select testcase to use ERL_DRV_USE erts: Fix msacc unmanaged state counter erts: Optimize port_task quick allocator erts: Add ERTS_THR_PREF_QUICK_ALLOC_IMPL erts: Update suspend of scheduler to handle multiple pollsets erts: Add multiple poll sets erts: Some code cleanup for gdb to work better erts: temp_alloc can no longer be disabled erts: Refactor check_io to use one static struct erts: Replace check_io spinlock with lock-less list insertion erts: Add number of enif_select's to check_io_debug ...
Diffstat (limited to 'erts/emulator/beam')
-rw-r--r--erts/emulator/beam/erl_alloc.c6
-rw-r--r--erts/emulator/beam/erl_alloc.h80
-rw-r--r--erts/emulator/beam/erl_alloc.types2
-rw-r--r--erts/emulator/beam/erl_bif_info.c15
-rw-r--r--erts/emulator/beam/erl_driver.h15
-rw-r--r--erts/emulator/beam/erl_init.c77
-rw-r--r--erts/emulator/beam/erl_lock_check.c1
-rw-r--r--erts/emulator/beam/erl_lock_count.c22
-rw-r--r--erts/emulator/beam/erl_msacc.h2
-rw-r--r--erts/emulator/beam/erl_port_task.c80
-rw-r--r--erts/emulator/beam/erl_port_task.h26
-rw-r--r--erts/emulator/beam/erl_process.c948
-rw-r--r--erts/emulator/beam/erl_process.h8
-rw-r--r--erts/emulator/beam/erl_sched_spec_pre_alloc.c30
-rw-r--r--erts/emulator/beam/erl_sched_spec_pre_alloc.h13
-rw-r--r--erts/emulator/beam/erl_threads.h1
-rw-r--r--erts/emulator/beam/erl_trace.c1
-rw-r--r--erts/emulator/beam/erl_vm.h2
-rw-r--r--erts/emulator/beam/erlang_lttng.h15
-rw-r--r--erts/emulator/beam/global.h3
-rw-r--r--erts/emulator/beam/io.c28
-rw-r--r--erts/emulator/beam/safe_hash.c7
-rw-r--r--erts/emulator/beam/safe_hash.h2
-rw-r--r--erts/emulator/beam/sys.h10
24 files changed, 552 insertions, 842 deletions
diff --git a/erts/emulator/beam/erl_alloc.c b/erts/emulator/beam/erl_alloc.c
index 845cef24c7..88285d8be6 100644
--- a/erts/emulator/beam/erl_alloc.c
+++ b/erts/emulator/beam/erl_alloc.c
@@ -385,6 +385,7 @@ set_default_temp_alloc_opts(struct au_init *ip)
SET_DEFAULT_ALLOC_OPTS(ip);
ip->enable = AU_ALLOC_DEFAULT_ENABLE(1);
ip->thr_spec = 1;
+ ip->disable_allowed = 0;
ip->carrier_migration_allowed = 0;
ip->atype = AFIT;
ip->init.util.name_prefix = "temp_";
@@ -644,8 +645,6 @@ erts_alloc_init(int *argc, char **argv, ErtsAllocInitOpts *eaiop)
= ERTS_MONITOR_SH_SIZE * sizeof(Uint);
fix_type_sizes[ERTS_ALC_FIX_TYPE_IX(ERTS_ALC_T_NLINK_SH)]
= ERTS_LINK_SH_SIZE * sizeof(Uint);
- fix_type_sizes[ERTS_ALC_FIX_TYPE_IX(ERTS_ALC_T_DRV_EV_D_STATE)]
- = sizeof(ErtsDrvEventDataState);
fix_type_sizes[ERTS_ALC_FIX_TYPE_IX(ERTS_ALC_T_DRV_SEL_D_STATE)]
= sizeof(ErtsDrvSelectDataState);
fix_type_sizes[ERTS_ALC_FIX_TYPE_IX(ERTS_ALC_T_NIF_SEL_D_STATE)]
@@ -1494,8 +1493,7 @@ handle_args(int *argc, char **argv, erts_alc_hndl_args_init_t *init)
&init->ll_alloc,
&init->driver_alloc,
&init->fix_alloc,
- &init->sl_alloc,
- &init->temp_alloc
+ &init->sl_alloc
/* test_alloc not affected by +Mea??? or +Mu??? */
};
int aui_sz = (int) sizeof(aui)/sizeof(aui[0]);
diff --git a/erts/emulator/beam/erl_alloc.h b/erts/emulator/beam/erl_alloc.h
index c661d0b226..117f96a4ad 100644
--- a/erts/emulator/beam/erl_alloc.h
+++ b/erts/emulator/beam/erl_alloc.h
@@ -402,6 +402,32 @@ NAME##_free(TYPE *p) \
erts_free(ALCT, (void *) p); \
}
+#define ERTS_THR_PREF_AUX(NAME, TYPE, PASZ) \
+ERTS_THR_PREF_PRE_ALLOC_IMPL(NAME##_pre, TYPE, PASZ)
+
+#define ERTS_THR_PREF_QUICK_ALLOC_IMPL(NAME, TYPE, PASZ, ALCT) \
+ERTS_THR_PREF_AUX(NAME, TYPE, PASZ) \
+static void \
+init_##NAME##_alloc(int nthreads) \
+{ \
+ init_##NAME##_pre_alloc(nthreads); \
+} \
+static ERTS_INLINE TYPE * \
+NAME##_alloc(void) \
+{ \
+ TYPE *res = NAME##_pre_alloc(); \
+ if (!res) \
+ res = erts_alloc(ALCT, sizeof(TYPE)); \
+ return res; \
+} \
+static ERTS_INLINE void \
+NAME##_free(TYPE *p) \
+{ \
+ if (!NAME##_pre_free(p)) \
+ erts_free(ALCT, (void *) p); \
+}
+
+
#ifdef DEBUG
#define ERTS_PRE_ALLOC_SIZE(SZ) ((SZ) < 1000 ? (SZ)/10 + 10 : 100)
#define ERTS_PRE_ALLOC_CLOBBER(P, T) memset((void *) (P), 0xfd, sizeof(T))
@@ -482,7 +508,8 @@ init_##NAME##_alloc(void) \
{ \
sspa_data_##NAME##__ = \
erts_sspa_create(sizeof(union erts_sspa_##NAME##__), \
- ERTS_PRE_ALLOC_SIZE((PASZ))); \
+ ERTS_PRE_ALLOC_SIZE((PASZ)), \
+ 0, NULL); \
} \
\
static TYPE * \
@@ -504,6 +531,57 @@ NAME##_free(TYPE *p) \
(char *) p); \
}
+
+#define ERTS_THR_PREF_PRE_ALLOC_IMPL(NAME, TYPE, PASZ) \
+union erts_sspa_##NAME##__ { \
+ erts_sspa_blk_t next; \
+ TYPE type; \
+}; \
+ \
+static erts_sspa_data_t *sspa_data_##NAME##__; \
+ \
+static void \
+init_##NAME##_alloc(int nthreads) \
+{ \
+ sspa_data_##NAME##__ = \
+ erts_sspa_create(sizeof(union erts_sspa_##NAME##__), \
+ ERTS_PRE_ALLOC_SIZE((PASZ)), \
+ nthreads, \
+ #NAME); \
+} \
+ \
+void \
+erts_##NAME##_alloc_init_thread(void) \
+{ \
+ int id = erts_atomic_inc_read_nob(&sspa_data_##NAME##__->id_generator);\
+ if (id > sspa_data_##NAME##__->nthreads) { \
+ erts_exit(ERTS_ABORT_EXIT, \
+ "%s:%d:%s(): Too many threads for '" #NAME "'\n", \
+ __FILE__, __LINE__, __func__); \
+ } \
+ erts_tsd_set(sspa_data_##NAME##__->tsd_key, (void*)(SWord)id); \
+} \
+ \
+static TYPE * \
+NAME##_alloc(void) \
+{ \
+ int id = (int)(SWord)erts_tsd_get(sspa_data_##NAME##__->tsd_key); \
+ if (id == 0) \
+ return NULL; \
+ return (TYPE *) erts_sspa_alloc(sspa_data_##NAME##__, \
+ id-1); \
+} \
+ \
+static int \
+NAME##_free(TYPE *p) \
+{ \
+ int id = (int)(SWord)erts_tsd_get(sspa_data_##NAME##__->tsd_key); \
+ return erts_sspa_free(sspa_data_##NAME##__, \
+ id - 1, \
+ (char *) p); \
+}
+
+
#ifdef DEBUG
#define ERTS_ALC_DBG_BLK_SZ(PTR) (*(((UWord *) (PTR)) - 2))
#endif /* #ifdef DEBUG */
diff --git a/erts/emulator/beam/erl_alloc.types b/erts/emulator/beam/erl_alloc.types
index 11884299e2..bf5487d31e 100644
--- a/erts/emulator/beam/erl_alloc.types
+++ b/erts/emulator/beam/erl_alloc.types
@@ -351,11 +351,9 @@ type SYS_CHECK_REQ SHORT_LIVED SYSTEM system_check_request
type TEMP_TERM TEMPORARY SYSTEM temp_term
type DRV_TAB LONG_LIVED SYSTEM drv_tab
type DRV_EV_STATE LONG_LIVED SYSTEM driver_event_state
-type DRV_EV_D_STATE FIXED_SIZE SYSTEM driver_event_data_state
type DRV_SEL_D_STATE FIXED_SIZE SYSTEM driver_select_data_state
type NIF_SEL_D_STATE FIXED_SIZE SYSTEM enif_select_data_state
type FD_LIST SHORT_LIVED SYSTEM fd_list
-type ACTIVE_FD_ARR SHORT_LIVED SYSTEM active_fd_array
type POLLSET LONG_LIVED SYSTEM pollset
type POLLSET_UPDREQ SHORT_LIVED SYSTEM pollset_update_req
type POLL_FDS LONG_LIVED SYSTEM poll_fds
diff --git a/erts/emulator/beam/erl_bif_info.c b/erts/emulator/beam/erl_bif_info.c
index 36939d6acc..17c936f041 100644
--- a/erts/emulator/beam/erl_bif_info.c
+++ b/erts/emulator/beam/erl_bif_info.c
@@ -46,6 +46,7 @@
#include "erl_thr_progress.h"
#include "erl_bif_unique.h"
#include "erl_map.h"
+#include "erl_check_io.h"
#define ERTS_PTAB_WANT_DEBUG_FUNCS__
#include "erl_ptab.h"
#include "erl_time.h"
@@ -97,9 +98,6 @@ static char erts_system_version[] = ("Erlang/OTP " ERLANG_OTP_RELEASE
#ifdef HIPE
" [hipe]"
#endif
-#ifdef ERTS_ENABLE_KERNEL_POLL
- " [kernel-poll:%s]"
-#endif
#ifdef ET_DEBUG
#if ET_DEBUG
" [type-assertions]"
@@ -371,9 +369,6 @@ erts_print_system_version(fmtfn_t to, void *arg, Process *c_p)
, total, online
, dirty_cpu, dirty_cpu_onln, dirty_io
, erts_async_max_threads
-#ifdef ERTS_ENABLE_KERNEL_POLL
- , erts_use_kernel_poll ? "true" : "false"
-#endif
);
}
@@ -2695,7 +2690,7 @@ BIF_RETTYPE system_info_1(BIF_ALIST_1)
BIF_RET(make_small(CONTEXT_REDS));
} else if (ERTS_IS_ATOM_STR("kernel_poll", BIF_ARG_1)) {
#ifdef ERTS_ENABLE_KERNEL_POLL
- BIF_RET(erts_use_kernel_poll ? am_true : am_false);
+ BIF_RET(am_true);
#else
BIF_RET(am_false);
#endif
@@ -2848,7 +2843,7 @@ BIF_RETTYPE system_info_1(BIF_ALIST_1)
BIF_RET(am_disabled);
}
else if (ERTS_IS_ATOM_STR("eager_check_io",BIF_ARG_1)) {
- BIF_RET(erts_eager_check_io ? am_true : am_false);
+ BIF_RET(am_true);
}
else if (ERTS_IS_ATOM_STR("literal_test",BIF_ARG_1)) {
#ifdef ERTS_HAVE_IS_IN_LITERAL_RANGE
@@ -3580,8 +3575,8 @@ BIF_RETTYPE erts_debug_get_internal_state_1(BIF_ALIST_1)
(Uint) ciodi.no_used_fds),
erts_bld_uint(hpp, szp,
(Uint) ciodi.no_driver_select_structs),
- erts_bld_uint(hpp, szp,
- (Uint) ciodi.no_driver_event_structs));
+ erts_bld_uint(hpp, szp,
+ (Uint) ciodi.no_enif_select_structs));
if (hpp)
break;
hp = HAlloc(BIF_P, sz);
diff --git a/erts/emulator/beam/erl_driver.h b/erts/emulator/beam/erl_driver.h
index 5ad616fec3..d5379a40d5 100644
--- a/erts/emulator/beam/erl_driver.h
+++ b/erts/emulator/beam/erl_driver.h
@@ -148,14 +148,6 @@ typedef struct _erl_drv_event* ErlDrvEvent; /* An event to be selected on. */
typedef struct _erl_drv_port* ErlDrvPort; /* A port descriptor. */
typedef struct _erl_drv_port* ErlDrvThreadData; /* Thread data. */
-#if !defined(__WIN32__) && !defined(_WIN32) && !defined(_WIN32_) && !defined(USE_SELECT)
-struct erl_drv_event_data {
- short events;
- short revents;
-};
-#endif
-typedef struct erl_drv_event_data *ErlDrvEventData; /* Event data */
-
typedef struct {
unsigned long megasecs;
unsigned long secs;
@@ -270,10 +262,7 @@ typedef struct erl_drv_entry {
unsigned int *flags); /* Works mostly like 'control',
a synchronous
call into the driver. */
- void (*event)(ErlDrvData drv_data, ErlDrvEvent event,
- ErlDrvEventData event_data);
- /* Called when an event selected by
- driver_event() has occurred */
+ void (*unused_event_callback)(void);
int extended_marker; /* ERL_DRV_EXTENDED_MARKER */
int major_version; /* ERL_DRV_EXTENDED_MAJOR_VERSION */
int minor_version; /* ERL_DRV_EXTENDED_MINOR_VERSION */
@@ -340,8 +329,6 @@ EXTERN void erl_drv_busy_msgq_limits(ErlDrvPort port,
ErlDrvSizeT *high);
EXTERN int driver_select(ErlDrvPort port, ErlDrvEvent event, int mode, int on);
-EXTERN int driver_event(ErlDrvPort port, ErlDrvEvent event,
- ErlDrvEventData event_data);
EXTERN int driver_output(ErlDrvPort port, char *buf, ErlDrvSizeT len);
EXTERN int driver_output2(ErlDrvPort port, char *hbuf, ErlDrvSizeT hlen,
diff --git a/erts/emulator/beam/erl_init.c b/erts/emulator/beam/erl_init.c
index 8c14c86219..6cef9bd0e3 100644
--- a/erts/emulator/beam/erl_init.c
+++ b/erts/emulator/beam/erl_init.c
@@ -49,6 +49,7 @@
#include "erl_bif_unique.h"
#define ERTS_WANT_TIMER_WHEEL_API
#include "erl_time.h"
+#include "erl_check_io.h"
#ifdef HIPE
#include "hipe_mode_switch.h" /* for hipe_mode_switch_init() */
@@ -76,8 +77,10 @@ const char etp_otp_release[] = ERLANG_OTP_RELEASE;
const char etp_compile_date[] = ERLANG_COMPILE_DATE;
const char etp_arch[] = ERLANG_ARCHITECTURE;
#ifdef ERTS_ENABLE_KERNEL_POLL
+const int erts_use_kernel_poll = 1;
const int etp_kernel_poll_support = 1;
#else
+const int erts_use_kernel_poll = 0;
const int etp_kernel_poll_support = 0;
#endif
#if defined(ARCH_64)
@@ -203,16 +206,16 @@ int erts_no_line_info = 0; /* -L: Don't load line information */
*/
ErtsModifiedTimings erts_modified_timings[] = {
- /* 0 */ {make_small(0), CONTEXT_REDS, INPUT_REDUCTIONS},
- /* 1 */ {make_small(0), (3*CONTEXT_REDS)/4, 2*INPUT_REDUCTIONS},
- /* 2 */ {make_small(0), CONTEXT_REDS/2, INPUT_REDUCTIONS/2},
- /* 3 */ {make_small(0), (7*CONTEXT_REDS)/8, 3*INPUT_REDUCTIONS},
- /* 4 */ {make_small(0), CONTEXT_REDS/3, 3*INPUT_REDUCTIONS},
- /* 5 */ {make_small(0), (10*CONTEXT_REDS)/11, INPUT_REDUCTIONS/2},
- /* 6 */ {make_small(1), CONTEXT_REDS/4, 2*INPUT_REDUCTIONS},
- /* 7 */ {make_small(1), (5*CONTEXT_REDS)/7, INPUT_REDUCTIONS/3},
- /* 8 */ {make_small(10), CONTEXT_REDS/5, 3*INPUT_REDUCTIONS},
- /* 9 */ {make_small(10), (6*CONTEXT_REDS)/7, INPUT_REDUCTIONS/4}
+ /* 0 */ {make_small(0), CONTEXT_REDS},
+ /* 1 */ {make_small(0), (3*CONTEXT_REDS)/4},
+ /* 2 */ {make_small(0), CONTEXT_REDS/2},
+ /* 3 */ {make_small(0), (7*CONTEXT_REDS)/8},
+ /* 4 */ {make_small(0), CONTEXT_REDS/3},
+ /* 5 */ {make_small(0), (10*CONTEXT_REDS)/11},
+ /* 6 */ {make_small(1), CONTEXT_REDS/4},
+ /* 7 */ {make_small(1), (5*CONTEXT_REDS)/7},
+ /* 8 */ {make_small(10), CONTEXT_REDS/5},
+ /* 9 */ {make_small(10), (6*CONTEXT_REDS)/7}
};
#define ERTS_MODIFIED_TIMING_LEVELS \
@@ -310,8 +313,9 @@ erl_init(int ncpu,
erts_init_sys_common_misc();
erts_init_process(ncpu, proc_tab_sz, legacy_proc_tab);
erts_init_scheduling(no_schedulers,
- no_schedulers_online
- , no_dirty_cpu_schedulers,
+ no_schedulers_online,
+ erts_no_poll_threads,
+ no_dirty_cpu_schedulers,
no_dirty_cpu_schedulers_online,
no_dirty_io_schedulers
);
@@ -558,9 +562,19 @@ void erts_usage(void)
erts_fprintf(stderr, "-hmqd val set default message queue data flag for processes,\n");
erts_fprintf(stderr, " valid values are: off_heap | on_heap\n");
+ erts_fprintf(stderr, "-IOp number set number of pollsets to be used to poll for I/O,\n");
+ erts_fprintf(stderr, " This value has to be equal or smaller than the\n");
+ erts_fprintf(stderr, " number of poll threads. If the current platform\n");
+ erts_fprintf(stderr, " does not support concurrent update of pollsets\n");
+ erts_fprintf(stderr, " this value is ignored.\n");
+ erts_fprintf(stderr, "-IOt number set number of threads to be used to poll for I/O\n");
+ erts_fprintf(stderr, "-IOPp number set number of pollsets as a percentage of the\n");
+ erts_fprintf(stderr, " number of poll threads.");
+ erts_fprintf(stderr, "-IOPt number set number of threads to be used to poll for I/O\n");
+ erts_fprintf(stderr, " as a percentage of the number of schedulers.");
+
/* erts_fprintf(stderr, "-i module set the boot module (default init)\n"); */
- erts_fprintf(stderr, "-K boolean enable or disable kernel poll\n");
erts_fprintf(stderr, "-n[s|a|d] Control behavior of signals to ports\n");
erts_fprintf(stderr, " Note that this flag is deprecated!\n");
erts_fprintf(stderr, "-M<X> <Y> memory allocator switches,\n");
@@ -727,7 +741,6 @@ early_init(int *argc, char **argv) /*
char envbuf[21]; /* enough for any 64-bit integer */
size_t envbufsz;
-
erts_save_emu_args(*argc, argv);
erts_sched_compact_load = 1;
@@ -839,6 +852,7 @@ early_init(int *argc, char **argv) /*
}
break;
}
+
case 'S' :
if (argv[i][2] == 'P') {
int ptot, ponln;
@@ -1100,6 +1114,9 @@ early_init(int *argc, char **argv) /*
erts_alloc_init(argc, argv, &alloc_opts); /* Handles (and removes)
-M flags. */
/* Require allocators */
+
+ erts_init_check_io(argc, argv);
+
/*
* Thread progress management:
*
@@ -1107,13 +1124,14 @@ early_init(int *argc, char **argv) /*
* ** Scheduler threads (see erl_process.c)
* ** Aux thread (see erl_process.c)
* ** Sys message dispatcher thread (see erl_trace.c)
+ * ** IO Poll threads (see erl_check_io.c)
*
* * Unmanaged threads that need to register:
* ** Async threads (see erl_async.c)
* ** Dirty scheduler threads
*/
erts_thr_progress_init(no_schedulers,
- no_schedulers+2,
+ no_schedulers+2+erts_no_poll_threads,
erts_async_max_threads +
erts_no_dirty_cpu_schedulers +
erts_no_dirty_io_schedulers
@@ -1561,16 +1579,6 @@ erl_start(int argc, char **argv)
have_break_handler = 0;
break;
- case 'K':
- /* If kernel poll support is present,
- erl_sys_args() will remove the K parameter
- and value */
- get_arg(argv[i]+2, argv[i+1], &i);
- erts_fprintf(stderr,
- "kernel-poll not supported; \"K\" parameter ignored\n",
- arg);
- break;
-
case 'n':
arg = get_arg(argv[i]+2, argv[i+1], &i);
switch (arg[0]) {
@@ -1740,22 +1748,9 @@ erl_start(int argc, char **argv)
erts_usage();
}
}
- else if (has_prefix("ecio", sub_param)) {
- arg = get_arg(sub_param+4, argv[i+1], &i);
-#ifndef __OSE__
- if (sys_strcmp("true", arg) == 0)
- erts_eager_check_io = 1;
- else
-#endif
- if (sys_strcmp("false", arg) == 0)
- erts_eager_check_io = 0;
- else {
- erts_fprintf(stderr,
- "bad schedule eager check I/O value '%s'\n",
- arg);
- erts_usage();
- }
- }
+ else if (has_prefix("ecio", sub_param)) {
+ /* ignore argument, eager check io no longer used */
+ }
else if (has_prefix("pp", sub_param)) {
arg = get_arg(sub_param+2, argv[i+1], &i);
if (sys_strcmp(arg, "true") == 0)
diff --git a/erts/emulator/beam/erl_lock_check.c b/erts/emulator/beam/erl_lock_check.c
index f81c90818f..4cdef0200f 100644
--- a/erts/emulator/beam/erl_lock_check.c
+++ b/erts/emulator/beam/erl_lock_check.c
@@ -113,7 +113,6 @@ static erts_lc_lock_order_t erts_lock_order[] = {
{ "drv_ev_state_grow", NULL, },
{ "drv_ev_state", "address" },
{ "safe_hash", "address" },
- { "pollset_rm_list", NULL },
{ "removed_fd_pre_alloc_lock", "address" },
{ "state_prealloc", NULL },
{ "schdlr_sspnd", NULL },
diff --git a/erts/emulator/beam/erl_lock_count.c b/erts/emulator/beam/erl_lock_count.c
index d2e8f47d59..1ae6076b12 100644
--- a/erts/emulator/beam/erl_lock_count.c
+++ b/erts/emulator/beam/erl_lock_count.c
@@ -554,16 +554,6 @@ erts_lock_flags_t erts_lcnt_get_category_mask() {
return lcnt_category_mask__;
}
-#ifdef ERTS_ENABLE_KERNEL_POLL
-/* erl_poll/erl_check_io only exports one of these variants at a time, and we
- * may need to use either one depending on emulator startup flags. */
-void erts_lcnt_update_pollset_locks_nkp(int);
-void erts_lcnt_update_pollset_locks_kp(int);
-
-void erts_lcnt_update_cio_locks_nkp(int);
-void erts_lcnt_update_cio_locks_kp(int);
-#endif
-
void erts_lcnt_set_category_mask(erts_lock_flags_t mask) {
erts_lock_flags_t changed_categories;
@@ -590,19 +580,7 @@ void erts_lcnt_set_category_mask(erts_lock_flags_t mask) {
}
if(changed_categories & ERTS_LOCK_FLAGS_CATEGORY_IO) {
-#ifdef ERTS_ENABLE_KERNEL_POLL
- if(erts_use_kernel_poll) {
- erts_lcnt_update_pollset_locks_kp(mask & ERTS_LOCK_FLAGS_CATEGORY_IO);
- erts_lcnt_update_cio_locks_kp(mask & ERTS_LOCK_FLAGS_CATEGORY_IO);
- } else {
- erts_lcnt_update_pollset_locks_nkp(mask & ERTS_LOCK_FLAGS_CATEGORY_IO);
- erts_lcnt_update_cio_locks_nkp(mask & ERTS_LOCK_FLAGS_CATEGORY_IO);
- }
-#else
- erts_lcnt_update_pollset_locks(mask & ERTS_LOCK_FLAGS_CATEGORY_IO);
erts_lcnt_update_cio_locks(mask & ERTS_LOCK_FLAGS_CATEGORY_IO);
-#endif
-
erts_lcnt_update_driver_locks(mask & ERTS_LOCK_FLAGS_CATEGORY_IO);
erts_lcnt_update_port_locks(mask & ERTS_LOCK_FLAGS_CATEGORY_IO);
}
diff --git a/erts/emulator/beam/erl_msacc.h b/erts/emulator/beam/erl_msacc.h
index 8349a7e297..2d4637f800 100644
--- a/erts/emulator/beam/erl_msacc.h
+++ b/erts/emulator/beam/erl_msacc.h
@@ -318,8 +318,8 @@ ERTS_GLB_INLINE
void erts_msacc_set_state_um__(ErtsMsAcc *msacc, Uint new_state, int increment) {
if (ERTS_UNLIKELY(msacc->unmanaged)) {
erts_mtx_lock(&msacc->mtx);
- msacc->state = new_state;
if (ERTS_LIKELY(!msacc->perf_counter)) {
+ msacc->state = new_state;
erts_mtx_unlock(&msacc->mtx);
return;
}
diff --git a/erts/emulator/beam/erl_port_task.c b/erts/emulator/beam/erl_port_task.c
index 14977dfa17..a588477320 100644
--- a/erts/emulator/beam/erl_port_task.c
+++ b/erts/emulator/beam/erl_port_task.c
@@ -36,6 +36,7 @@
#include "erl_check_io.h"
#include "dtrace-wrapper.h"
#include "lttng-wrapper.h"
+#include "erl_check_io.h"
#include <stdarg.h>
/*
@@ -90,8 +91,6 @@ static void chk_task_queues(Port *pp, ErtsPortTask *execq, int processing_busy_q
erts_atomic_read_nob(&(PP)->run_queue))); \
} while (0)
-erts_atomic_t erts_port_task_outstanding_io_tasks;
-
#define ERTS_PT_STATE_SCHEDULED 0
#define ERTS_PT_STATE_ABORTED 1
#define ERTS_PT_STATE_EXECUTING 2
@@ -99,7 +98,6 @@ erts_atomic_t erts_port_task_outstanding_io_tasks;
typedef union {
struct { /* I/O tasks */
ErlDrvEvent event;
- ErlDrvEventData event_data;
} io;
struct {
ErtsProc2PortSigCallback callback;
@@ -149,10 +147,10 @@ static void begin_port_cleanup(Port *pp,
ErtsPortTask **execq,
int *processing_busy_q_p);
-ERTS_SCHED_PREF_QUICK_ALLOC_IMPL(port_task,
- ErtsPortTask,
- 1000,
- ERTS_ALC_T_PORT_TASK)
+ERTS_THR_PREF_QUICK_ALLOC_IMPL(port_task,
+ ErtsPortTask,
+ 1000,
+ ERTS_ALC_T_PORT_TASK)
ERTS_SCHED_PREF_QUICK_ALLOC_IMPL(busy_caller_table,
ErtsPortTaskBusyCallerTable,
@@ -585,8 +583,9 @@ reset_executed_io_task_handle(ErtsPortTask *ptp)
{
if (ptp->u.alive.handle) {
ASSERT(ptp == handle2task(ptp->u.alive.handle));
- erts_io_notify_port_task_executed(ptp->u.alive.handle);
- reset_port_task_handle(ptp->u.alive.handle);
+ /* The port task handle is reset inside task_executed */
+ erts_io_notify_port_task_executed(ptp->type, ptp->u.alive.handle,
+ reset_port_task_handle);
}
}
@@ -1308,21 +1307,7 @@ erts_port_task_abort(ErtsPortTaskHandle *pthp)
if (old_state != ERTS_PT_STATE_SCHEDULED)
res = - 1; /* Task already aborted, executing, or executed */
else {
-
reset_port_task_handle(pthp);
-
- switch (ptp->type) {
- case ERTS_PORT_TASK_INPUT:
- case ERTS_PORT_TASK_OUTPUT:
- case ERTS_PORT_TASK_EVENT:
- ASSERT(erts_atomic_read_nob(
- &erts_port_task_outstanding_io_tasks) > 0);
- erts_atomic_dec_relb(&erts_port_task_outstanding_io_tasks);
- break;
- default:
- break;
- }
-
res = 0;
}
}
@@ -1459,16 +1444,6 @@ erts_port_task_schedule(Eterm id,
va_start(argp, type);
ptp->u.alive.td.io.event = va_arg(argp, ErlDrvEvent);
va_end(argp);
- erts_atomic_inc_relb(&erts_port_task_outstanding_io_tasks);
- break;
- }
- case ERTS_PORT_TASK_EVENT: {
- va_list argp;
- va_start(argp, type);
- ptp->u.alive.td.io.event = va_arg(argp, ErlDrvEvent);
- ptp->u.alive.td.io.event_data = va_arg(argp, ErlDrvEventData);
- va_end(argp);
- erts_atomic_inc_relb(&erts_port_task_outstanding_io_tasks);
break;
}
case ERTS_PORT_TASK_PROC_SIG: {
@@ -1644,13 +1619,12 @@ erts_port_task_free_port(Port *pp)
* scheduling of processes. Run-queue lock should be held by caller.
*/
-int
+void
erts_port_task_execute(ErtsRunQueue *runq, Port **curr_port_pp)
{
Port *pp;
ErtsPortTask *execq;
int processing_busy_q;
- int res = 0;
int vreds = 0;
int reds = 0;
erts_aint_t io_tasks_executed = 0;
@@ -1665,7 +1639,6 @@ erts_port_task_execute(ErtsRunQueue *runq, Port **curr_port_pp)
pp = pop_port(runq);
if (!pp) {
- res = 0;
goto done;
}
@@ -1768,17 +1741,6 @@ erts_port_task_execute(ErtsRunQueue *runq, Port **curr_port_pp)
reset_executed_io_task_handle(ptp);
io_tasks_executed++;
break;
- case ERTS_PORT_TASK_EVENT:
- reds = ERTS_PORT_REDS_EVENT;
- ASSERT((state & ERTS_PORT_SFLGS_DEAD) == 0);
- DTRACE_DRIVER(driver_event, pp);
- LTTNG_DRIVER(driver_event, pp);
- (*pp->drv_ptr->event)((ErlDrvData) pp->drv_data,
- ptp->u.alive.td.io.event,
- ptp->u.alive.td.io.event_data);
- reset_executed_io_task_handle(ptp);
- io_tasks_executed++;
- break;
case ERTS_PORT_TASK_PROC_SIG: {
ErtsProc2PortSigData *sigdp = &ptp->u.alive.td.psig.data;
reset_handle(ptp);
@@ -1843,14 +1805,6 @@ erts_port_task_execute(ErtsRunQueue *runq, Port **curr_port_pp)
erts_unblock_fpe(fpe_was_unmasked);
ERTS_MSACC_POP_STATE_M();
-
- if (io_tasks_executed) {
- ASSERT(erts_atomic_read_nob(&erts_port_task_outstanding_io_tasks)
- >= io_tasks_executed);
- erts_atomic_add_relb(&erts_port_task_outstanding_io_tasks,
- -1*io_tasks_executed);
- }
-
ASSERT(runq == (ErtsRunQueue *) erts_atomic_read_nob(&pp->run_queue));
active = finalize_exec(pp, &execq, processing_busy_q);
@@ -1891,15 +1845,11 @@ erts_port_task_execute(ErtsRunQueue *runq, Port **curr_port_pp)
}
done:
- res = (erts_atomic_read_nob(&erts_port_task_outstanding_io_tasks)
- != (erts_aint_t) 0);
runq->scheduler->reductions += reds;
ERTS_LC_ASSERT(erts_lc_runq_is_locked(runq));
ERTS_PORT_REDUCTIONS_EXECUTED(esdp, runq, reds);
-
- return res;
}
static void
@@ -2031,13 +1981,6 @@ begin_port_cleanup(Port *pp, ErtsPortTask **execqp, int *processing_busy_q_p)
DO_WRITE,
1);
break;
- case ERTS_PORT_TASK_EVENT:
- erts_stale_drv_select(pp->common.id,
- ERTS_Port2ErlDrvPort(pp),
- ptp->u.alive.td.io.event,
- 0,
- 1);
- break;
case ERTS_PORT_TASK_DIST_CMD:
break;
case ERTS_PORT_TASK_PROC_SIG: {
@@ -2151,8 +2094,7 @@ erts_dequeue_port(ErtsRunQueue *rq)
void
erts_port_task_init(void)
{
- erts_atomic_init_nob(&erts_port_task_outstanding_io_tasks,
- (erts_aint_t) 0);
- init_port_task_alloc();
+ init_port_task_alloc(erts_no_schedulers + erts_no_poll_threads
+ + 1); /* aux_thread */
init_busy_caller_table_alloc();
}
diff --git a/erts/emulator/beam/erl_port_task.h b/erts/emulator/beam/erl_port_task.h
index 561f4ca936..ae78a7d8a3 100644
--- a/erts/emulator/beam/erl_port_task.h
+++ b/erts/emulator/beam/erl_port_task.h
@@ -56,17 +56,11 @@ typedef erts_atomic_t ErtsPortTaskHandle;
typedef enum {
ERTS_PORT_TASK_INPUT,
ERTS_PORT_TASK_OUTPUT,
- ERTS_PORT_TASK_EVENT,
ERTS_PORT_TASK_TIMEOUT,
ERTS_PORT_TASK_DIST_CMD,
ERTS_PORT_TASK_PROC_SIG
} ErtsPortTaskType;
-#ifdef ERTS_INCLUDE_SCHEDULER_INTERNALS
-/* NOTE: Do not access any of the exported variables directly */
-extern erts_atomic_t erts_port_task_outstanding_io_tasks;
-#endif
-
#define ERTS_PTS_FLG_IN_RUNQ (((erts_aint32_t) 1) << 0)
#define ERTS_PTS_FLG_EXEC (((erts_aint32_t) 1) << 1)
#define ERTS_PTS_FLG_HAVE_TASKS (((erts_aint32_t) 1) << 2)
@@ -140,10 +134,6 @@ ERTS_GLB_INLINE void erts_port_task_sched_unlock(ErtsPortTaskSched *ptsp);
ERTS_GLB_INLINE int erts_port_task_sched_lock_is_locked(ErtsPortTaskSched *ptsp);
ERTS_GLB_INLINE void erts_port_task_sched_enter_exiting_state(ErtsPortTaskSched *ptsp);
-#ifdef ERTS_INCLUDE_SCHEDULER_INTERNALS
-ERTS_GLB_INLINE int erts_port_task_have_outstanding_io_tasks(void);
-#endif
-
#if ERTS_GLB_INLINE_INCL_FUNC_DEF
ERTS_GLB_INLINE void
@@ -221,24 +211,16 @@ erts_port_task_sched_enter_exiting_state(ErtsPortTaskSched *ptsp)
erts_atomic32_read_bor_nob(&ptsp->flags, ERTS_PTS_FLG_EXITING);
}
-#ifdef ERTS_INCLUDE_SCHEDULER_INTERNALS
-
-ERTS_GLB_INLINE int
-erts_port_task_have_outstanding_io_tasks(void)
-{
- return (erts_atomic_read_acqb(&erts_port_task_outstanding_io_tasks)
- != 0);
-}
-
-#endif /* ERTS_INCLUDE_SCHEDULER_INTERNALS */
-
#endif
#ifdef ERTS_INCLUDE_SCHEDULER_INTERNALS
-int erts_port_task_execute(ErtsRunQueue *, Port **);
+void erts_port_task_execute(ErtsRunQueue *, Port **);
void erts_port_task_init(void);
#endif
+/* generated for 'port_task' quick allocator */
+void erts_port_task_pre_alloc_init_thread(void);
+
void erts_port_task_tmp_handle_detach(ErtsPortTaskHandle *);
int erts_port_task_abort(ErtsPortTaskHandle *);
diff --git a/erts/emulator/beam/erl_process.c b/erts/emulator/beam/erl_process.c
index f3f99f1835..61fdf86a56 100644
--- a/erts/emulator/beam/erl_process.c
+++ b/erts/emulator/beam/erl_process.c
@@ -24,6 +24,8 @@
# include "config.h"
#endif
+#define ERTS_WANT_BREAK_HANDLING
+
#include <stddef.h> /* offsetof() */
#include "sys.h"
#include "erl_vm.h"
@@ -49,6 +51,8 @@
#define ERTS_WANT_TIMER_WHEEL_API
#include "erl_time.h"
#include "erl_nfunc_sched.h"
+#include "erl_check_io.h"
+#include "erl_poll.h"
#define ERTS_CHECK_TIME_REDS CONTEXT_REDS
#define ERTS_DELAYED_WAKEUP_INFINITY (~(Uint64) 0)
@@ -170,7 +174,6 @@ extern BeamInstr beam_exit[];
extern BeamInstr beam_continue_exit[];
int ERTS_WRITE_UNLIKELY(erts_default_spo_flags) = SPO_ON_HEAP_MSGQ;
-int ERTS_WRITE_UNLIKELY(erts_eager_check_io) = 1;
int ERTS_WRITE_UNLIKELY(erts_sched_compact_load);
int ERTS_WRITE_UNLIKELY(erts_sched_balance_util) = 0;
Uint ERTS_WRITE_UNLIKELY(erts_no_schedulers);
@@ -198,7 +201,7 @@ int erts_dio_sched_thread_suggested_stack_size = -1;
ErtsLcPSDLocks erts_psd_required_locks[ERTS_PSD_SIZE];
#endif
-static struct {
+static struct ErtsSchedBusyWait_ {
int aux_work;
int tse;
int sys_schedule;
@@ -207,25 +210,26 @@ static struct {
int erts_disable_proc_not_running_opt;
static ErtsAuxWorkData *aux_thread_aux_work_data;
+static ErtsAuxWorkData *poll_thread_aux_work_data;
#define ERTS_SCHDLR_SSPND_CHNG_NMSB (((erts_aint32_t) 1) << 0)
#define ERTS_SCHDLR_SSPND_CHNG_MSB (((erts_aint32_t) 1) << 1)
#define ERTS_SCHDLR_SSPND_CHNG_ONLN (((erts_aint32_t) 1) << 2)
#define ERTS_SCHDLR_SSPND_CHNG_DCPU_ONLN (((erts_aint32_t) 1) << 3)
-typedef struct {
+typedef struct ErtsMultiSchedulingBlock_ {
int ongoing;
ErtsProcList *blckrs;
ErtsProcList *chngq;
} ErtsMultiSchedulingBlock;
-typedef struct {
+typedef struct ErtsSchedTypeCounters_ {
Uint32 normal;
Uint32 dirty_cpu;
Uint32 dirty_io;
} ErtsSchedTypeCounters;
-static struct {
+static struct ErtsSchedSuspend_ {
erts_mtx_t mtx;
ErtsSchedTypeCounters online;
ErtsSchedTypeCounters curr_online;
@@ -364,9 +368,6 @@ erts_sched_stat_t erts_sched_stat;
static erts_tsd_key_t ERTS_WRITE_UNLIKELY(sched_data_key);
-static erts_atomic32_t function_calls;
-
-static erts_atomic32_t doing_sys_schedule;
static erts_atomic32_t no_empty_run_queues;
long erts_runq_supervision_interval = 0;
static ethr_event runq_supervision_event;
@@ -434,6 +435,7 @@ typedef union {
static ErtsAlignedSchedulerSleepInfo *aligned_sched_sleep_info;
static ErtsAlignedSchedulerSleepInfo *aligned_dirty_cpu_sched_sleep_info;
static ErtsAlignedSchedulerSleepInfo *aligned_dirty_io_sched_sleep_info;
+static ErtsAlignedSchedulerSleepInfo *aligned_poll_thread_sleep_info;
static Uint last_reductions;
static Uint last_exact_reductions;
@@ -501,9 +503,13 @@ ERTS_SCHED_PREF_QUICK_ALLOC_IMPL(proclist,
200,
ERTS_ALC_T_PROC_LIST)
+#define ERTS_POLL_THREAD_SLEEP_INFO_IX(IX) \
+ (ASSERT(0 <= ((int) (IX)) \
+ && ((int) (IX)) < ((int) erts_no_poll_threads)), \
+ &aligned_poll_thread_sleep_info[(IX)].ssi)
#define ERTS_SCHED_SLEEP_INFO_IX(IX) \
- (ASSERT(-1 <= ((int) (IX)) \
- && ((int) (IX)) < ((int) erts_no_schedulers)), \
+ (ASSERT(((int)-1) <= ((int) (IX)) \
+ && ((int) (IX)) < ((int) erts_no_schedulers)), \
&aligned_sched_sleep_info[(IX)].ssi)
#define ERTS_DIRTY_CPU_SCHED_SLEEP_INFO_IX(IX) \
(ASSERT(0 <= ((int) (IX)) \
@@ -1135,13 +1141,11 @@ dirty_sched_wall_time_change(ErtsSchedulerData *esdp, int working)
mod++;
erts_atomic32_set_nob(&esdp->sched_wall_time.u.mod, mod);
-#if 0
if (!working) {
- ERTS_MSACC_SET_STATE_M_X(ERTS_MSACC_STATE_BUSY_WAIT);
+ ERTS_MSACC_SET_STATE_X(ERTS_MSACC_STATE_BUSY_WAIT);
} else {
- ERTS_MSACC_SET_STATE_M_X(ERTS_MSACC_STATE_OTHER);
+ ERTS_MSACC_SET_STATE_X(ERTS_MSACC_STATE_OTHER);
}
-#endif
}
@@ -1559,18 +1563,19 @@ erts_psd_set_init(Process *p, int ix, void *data)
void
-erts_sched_finish_poke(ErtsSchedulerSleepInfo *ssi, erts_aint32_t flags)
+erts_sched_finish_poke(ErtsSchedulerSleepInfo *ssi,
+ erts_aint32_t flags)
{
switch (flags & ERTS_SSI_FLGS_SLEEP_TYPE) {
case ERTS_SSI_FLG_POLL_SLEEPING:
- erts_sys_schedule_interrupt(1);
+ erts_check_io_interrupt(ssi->psi, 1);
break;
case ERTS_SSI_FLG_POLL_SLEEPING|ERTS_SSI_FLG_TSE_SLEEPING:
/*
* Thread progress blocking while poll sleeping; need
* to signal on both...
*/
- erts_sys_schedule_interrupt(1);
+ erts_check_io_interrupt(ssi->psi, 1);
/* fall through */
case ERTS_SSI_FLG_TSE_SLEEPING:
erts_tse_set(ssi->event);
@@ -2830,36 +2835,6 @@ erts_set_aux_work_timeout(int ix, erts_aint32_t type, int enable)
return old;
}
-
-
-static ERTS_INLINE void
-sched_waiting_sys(Uint no, ErtsRunQueue *rq)
-{
- ERTS_LC_ASSERT(erts_lc_runq_is_locked(rq));
- ASSERT(rq->waiting >= 0);
- (void) ERTS_RUNQ_FLGS_SET(rq, (ERTS_RUNQ_FLG_OUT_OF_WORK
- | ERTS_RUNQ_FLG_HALFTIME_OUT_OF_WORK));
- rq->waiting++;
- rq->waiting *= -1;
- rq->woken = 0;
- if (erts_system_profile_flags.scheduler)
- profile_scheduler(make_small(no), am_inactive);
-}
-
-static ERTS_INLINE void
-sched_active_sys(Uint no, ErtsRunQueue *rq)
-{
- ERTS_LC_ASSERT(erts_lc_runq_is_locked(rq));
-
- ASSERT(!ERTS_RUNQ_IX_IS_DIRTY(rq->ix));
-
- ASSERT(rq->waiting < 0);
- rq->waiting *= -1;
- rq->waiting--;
- if (erts_system_profile_flags.scheduler)
- profile_scheduler(make_small(no), am_active);
-}
-
Uint
erts_active_schedulers(void)
{
@@ -2870,49 +2845,6 @@ erts_active_schedulers(void)
return as;
}
-
-static ERTS_INLINE void
-clear_sys_scheduling(void)
-{
- erts_atomic32_set_mb(&doing_sys_schedule, 0);
-}
-
-static ERTS_INLINE int
-try_set_sys_scheduling(void)
-{
- return 0 == erts_atomic32_cmpxchg_acqb(&doing_sys_schedule, 1, 0);
-}
-
-
-static ERTS_INLINE int
-prepare_for_sys_schedule(int non_blocking)
-{
- if (non_blocking && erts_eager_check_io) {
- return try_set_sys_scheduling();
- }
- else {
- while (!erts_port_task_have_outstanding_io_tasks()
- && try_set_sys_scheduling()) {
- if (!erts_port_task_have_outstanding_io_tasks())
- return 1;
- clear_sys_scheduling();
- }
- return 0;
- }
-}
-
-
-static ERTS_INLINE void
-sched_change_waiting_sys_to_waiting(Uint no, ErtsRunQueue *rq)
-{
- ERTS_LC_ASSERT(erts_lc_runq_is_locked(rq));
-
- ASSERT(!ERTS_RUNQ_IX_IS_DIRTY(rq->ix));
-
- ASSERT(rq->waiting < 0);
- rq->waiting *= -1;
-}
-
static ERTS_INLINE void
sched_waiting(Uint no, ErtsRunQueue *rq)
{
@@ -3084,7 +3016,7 @@ sched_set_sleeptype(ErtsSchedulerSleepInfo *ssi, erts_aint32_t sleep_type)
erts_tse_reset(ssi->event);
else {
ASSERT(sleep_type == ERTS_SSI_FLG_POLL_SLEEPING);
- erts_sys_schedule_interrupt(0);
+ erts_check_io_interrupt(ssi->psi, 0);
}
while (1) {
@@ -3152,6 +3084,12 @@ thr_prgr_fin_wait(void *vssi)
static void init_aux_work_data(ErtsAuxWorkData *awdp, ErtsSchedulerData *esdp, char *dawwp);
+void
+erts_aux_thread_poke()
+{
+ erts_sched_poke(ERTS_SCHED_SLEEP_INFO_IX(-1));
+}
+
static void *
aux_thread(void *unused)
{
@@ -3160,6 +3098,7 @@ aux_thread(void *unused)
erts_aint32_t aux_work;
ErtsThrPrgrCallbacks callbacks;
int thr_prgr_active = 1;
+ ERTS_MSACC_DECLARE_CACHE();
#ifdef ERTS_ENABLE_LOCK_CHECK
{
@@ -3168,6 +3107,7 @@ aux_thread(void *unused)
}
#endif
+ erts_port_task_pre_alloc_init_thread();
ssi->event = erts_tse_fetch();
erts_msacc_init_thread("aux", 1, 1);
@@ -3182,9 +3122,14 @@ aux_thread(void *unused)
init_aux_work_data(awdp, NULL, NULL);
awdp->ssi = ssi;
+#if ERTS_POLL_USE_FALLBACK
+ ssi->psi = erts_create_pollset_thread(-1);
+#endif
sched_prep_spin_wait(ssi);
+ ERTS_MSACC_SET_STATE_CACHED(ERTS_MSACC_STATE_OTHER);
+
while (1) {
erts_aint32_t flgs;
@@ -3193,30 +3138,54 @@ aux_thread(void *unused)
if (!thr_prgr_active)
erts_thr_progress_active(NULL, thr_prgr_active = 1);
aux_work = handle_aux_work(awdp, aux_work, 1);
+ ERTS_MSACC_UPDATE_CACHE();
if (aux_work && erts_thr_progress_update(NULL))
erts_thr_progress_leader_update(NULL);
}
if (!aux_work) {
+
+#ifdef ERTS_BREAK_REQUESTED
+ if (ERTS_BREAK_REQUESTED)
+ erts_do_break_handling();
+#endif
+
if (thr_prgr_active)
erts_thr_progress_active(NULL, thr_prgr_active = 0);
- erts_thr_progress_prepare_wait(NULL);
+
+#if ERTS_POLL_USE_FALLBACK
flgs = sched_spin_wait(ssi, 0);
if (flgs & ERTS_SSI_FLG_SLEEPING) {
ASSERT(flgs & ERTS_SSI_FLG_WAITING);
+ flgs = sched_set_sleeptype(ssi, ERTS_SSI_FLG_POLL_SLEEPING);
+ if (flgs & ERTS_SSI_FLG_SLEEPING) {
+ ASSERT(flgs & ERTS_SSI_FLG_POLL_SLEEPING);
+ ASSERT(flgs & ERTS_SSI_FLG_WAITING);
+ erts_check_io(ssi->psi);
+ }
+ }
+#else
+ erts_thr_progress_prepare_wait(NULL);
+
+ flgs = sched_spin_wait(ssi, 0);
+
+ if (flgs & ERTS_SSI_FLG_SLEEPING) {
flgs = sched_set_sleeptype(ssi, ERTS_SSI_FLG_TSE_SLEEPING);
if (flgs & ERTS_SSI_FLG_SLEEPING) {
- int res;
+ int res;
ASSERT(flgs & ERTS_SSI_FLG_TSE_SLEEPING);
ASSERT(flgs & ERTS_SSI_FLG_WAITING);
- do {
- res = erts_tse_wait(ssi->event);
- } while (res == EINTR);
+ ERTS_MSACC_SET_STATE_CACHED(ERTS_MSACC_STATE_SLEEP);
+ do {
+ res = erts_tse_wait(ssi->event);
+ } while (res == EINTR);
+ ERTS_MSACC_SET_STATE_CACHED(ERTS_MSACC_STATE_OTHER);
}
- }
- erts_thr_progress_finalize_wait(NULL);
+ }
+ erts_thr_progress_finalize_wait(NULL);
+#endif
}
flgs = sched_prep_spin_wait(ssi);
@@ -3224,8 +3193,79 @@ aux_thread(void *unused)
return NULL;
}
-static void suspend_scheduler(ErtsSchedulerData *esdp);
+static void *
+poll_thread(void *arg)
+{
+ int id = (int)(UWord)arg;
+ ErtsAuxWorkData *awdp = poll_thread_aux_work_data+id;
+ ErtsSchedulerSleepInfo *ssi = ERTS_POLL_THREAD_SLEEP_INFO_IX(id);
+ erts_aint32_t aux_work;
+ ErtsThrPrgrCallbacks callbacks;
+ int thr_prgr_active = 1;
+ struct erts_poll_thread *psi = erts_create_pollset_thread(id);
+ ERTS_MSACC_DECLARE_CACHE();
+
+#ifdef ERTS_ENABLE_LOCK_CHECK
+ {
+ char buf[] = "poll_thread";
+ erts_lc_set_thread_name(buf);
+ }
+#endif
+
+ erts_port_task_pre_alloc_init_thread();
+ ssi->event = erts_tse_fetch();
+
+ erts_msacc_init_thread("poll", id, 0);
+
+ callbacks.arg = (void *) ssi;
+ callbacks.wakeup = thr_prgr_wakeup;
+ callbacks.prepare_wait = thr_prgr_prep_wait;
+ callbacks.wait = thr_prgr_wait;
+ callbacks.finalize_wait = thr_prgr_fin_wait;
+
+ erts_thr_progress_register_managed_thread(NULL, &callbacks, 0);
+ init_aux_work_data(awdp, NULL, NULL);
+ awdp->ssi = ssi;
+ ssi->psi = psi;
+
+ sched_prep_spin_wait(ssi);
+
+ ERTS_MSACC_SET_STATE_CACHED(ERTS_MSACC_STATE_OTHER);
+ while (1) {
+ erts_aint32_t flgs;
+
+ aux_work = erts_atomic32_read_acqb(&ssi->aux_work);
+ if (aux_work) {
+ if (!thr_prgr_active)
+ erts_thr_progress_active(NULL, thr_prgr_active = 1);
+ aux_work = handle_aux_work(awdp, aux_work, 1);
+ ERTS_MSACC_UPDATE_CACHE();
+ if (aux_work && erts_thr_progress_update(NULL))
+ erts_thr_progress_leader_update(NULL);
+ }
+
+ if (!aux_work) {
+ if (thr_prgr_active)
+ erts_thr_progress_active(NULL, thr_prgr_active = 0);
+
+ flgs = sched_spin_wait(ssi, 0);
+
+ if (flgs & ERTS_SSI_FLG_SLEEPING) {
+ ASSERT(flgs & ERTS_SSI_FLG_WAITING);
+ flgs = sched_set_sleeptype(ssi, ERTS_SSI_FLG_POLL_SLEEPING);
+ if (flgs & ERTS_SSI_FLG_SLEEPING) {
+ ASSERT(flgs & ERTS_SSI_FLG_POLL_SLEEPING);
+ ASSERT(flgs & ERTS_SSI_FLG_WAITING);
+ erts_check_io(psi);
+ }
+ }
+ }
+
+ flgs = sched_prep_spin_wait(ssi);
+ }
+ return NULL;
+}
static void
scheduler_wait(int *fcalls, ErtsSchedulerData *esdp, ErtsRunQueue *rq)
@@ -3260,318 +3300,136 @@ scheduler_wait(int *fcalls, ErtsSchedulerData *esdp, ErtsRunQueue *rq)
dirty_active(esdp, -1);
}
- /*
- * If all schedulers are waiting, one of them *should*
- * be waiting in erl_sys_schedule()
- */
-
- if (ERTS_SCHEDULER_IS_DIRTY(esdp) || !prepare_for_sys_schedule(0)) {
-
- sched_waiting(esdp->no, rq);
-
- erts_runq_unlock(rq);
-
- spincount = sched_busy_wait.tse;
+ sched_waiting(esdp->no, rq);
- tse_wait:
-
- if (ERTS_SCHEDULER_IS_DIRTY(esdp))
- dirty_sched_wall_time_change(esdp, working = 0);
- else if (thr_prgr_active != working)
- sched_wall_time_change(esdp, working = thr_prgr_active);
-
- while (1) {
- ErtsMonotonicTime current_time = 0;
+ erts_runq_unlock(rq);
- aux_work = erts_atomic32_read_acqb(&ssi->aux_work);
- if (aux_work && !ERTS_SCHEDULER_IS_DIRTY(esdp)) {
- if (!thr_prgr_active) {
- erts_thr_progress_active(esdp, thr_prgr_active = 1);
- sched_wall_time_change(esdp, 1);
- }
- aux_work = handle_aux_work(&esdp->aux_work_data, aux_work, 1);
- ERTS_MSACC_UPDATE_CACHE();
- if (aux_work && erts_thr_progress_update(esdp))
- erts_thr_progress_leader_update(esdp);
- }
+ spincount = sched_busy_wait.tse;
- if (aux_work) {
- if (!ERTS_SCHEDULER_IS_DIRTY(esdp)) {
- flgs = erts_atomic32_read_acqb(&ssi->flags);
- current_time = erts_get_monotonic_time(esdp);
- if (current_time >= erts_next_timeout_time(esdp->next_tmo_ref)) {
- if (!thr_prgr_active) {
- erts_thr_progress_active(esdp, thr_prgr_active = 1);
- sched_wall_time_change(esdp, 1);
- }
- erts_bump_timers(esdp->timer_wheel, current_time);
- }
- }
- }
- else {
- ErtsMonotonicTime timeout_time;
- int do_timeout = 0;
- if (!ERTS_SCHEDULER_IS_DIRTY(esdp)) {
- timeout_time = erts_check_next_timeout_time(esdp);
- current_time = erts_get_monotonic_time(esdp);
- do_timeout = (current_time >= timeout_time);
- } else {
- current_time = 0;
- timeout_time = ERTS_MONOTONIC_TIME_MAX;
- }
- if (do_timeout) {
- if (!thr_prgr_active) {
- erts_thr_progress_active(esdp, thr_prgr_active = 1);
- sched_wall_time_change(esdp, 1);
- }
- }
- else {
- if (!ERTS_SCHEDULER_IS_DIRTY(esdp)) {
- if (thr_prgr_active) {
- erts_thr_progress_active(esdp, thr_prgr_active = 0);
- sched_wall_time_change(esdp, 0);
- }
- erts_thr_progress_prepare_wait(esdp);
- }
+ if (ERTS_SCHEDULER_IS_DIRTY(esdp))
+ dirty_sched_wall_time_change(esdp, working = 0);
+ else if (thr_prgr_active != working)
+ sched_wall_time_change(esdp, working = thr_prgr_active);
- flgs = sched_spin_wait(ssi, spincount);
- if (flgs & ERTS_SSI_FLG_SLEEPING) {
- ASSERT(flgs & ERTS_SSI_FLG_WAITING);
- flgs = sched_set_sleeptype(ssi, ERTS_SSI_FLG_TSE_SLEEPING);
- if (flgs & ERTS_SSI_FLG_SLEEPING) {
- int res;
- ASSERT(flgs & ERTS_SSI_FLG_TSE_SLEEPING);
- ASSERT(flgs & ERTS_SSI_FLG_WAITING);
- current_time = ERTS_SCHEDULER_IS_DIRTY(esdp) ? 0 :
- erts_get_monotonic_time(esdp);
- do {
- Sint64 timeout;
- if (current_time >= timeout_time)
- break;
- if (!ERTS_SCHEDULER_IS_DIRTY(esdp)) {
- timeout = ERTS_MONOTONIC_TO_NSEC(timeout_time
- - current_time
- - 1) + 1;
- } else
- timeout = -1;
- ERTS_MSACC_SET_STATE_CACHED_M(ERTS_MSACC_STATE_SLEEP);
- res = erts_tse_twait(ssi->event, timeout);
- ERTS_MSACC_POP_STATE_M();
- current_time = ERTS_SCHEDULER_IS_DIRTY(esdp) ? 0 :
- erts_get_monotonic_time(esdp);
- } while (res == EINTR);
- }
- }
- if (!ERTS_SCHEDULER_IS_DIRTY(esdp))
- erts_thr_progress_finalize_wait(esdp);
- }
- if (!ERTS_SCHEDULER_IS_DIRTY(esdp) && current_time >= timeout_time)
- erts_bump_timers(esdp->timer_wheel, current_time);
- }
+ while (1) {
+ ErtsMonotonicTime current_time = 0;
- if (!(flgs & ERTS_SSI_FLG_WAITING)) {
- ASSERT(!(flgs & ERTS_SSI_FLG_SLEEPING));
- break;
- }
+ aux_work = erts_atomic32_read_acqb(&ssi->aux_work);
+ if (aux_work && !ERTS_SCHEDULER_IS_DIRTY(esdp)) {
+ if (!thr_prgr_active) {
+ erts_thr_progress_active(esdp, thr_prgr_active = 1);
+ sched_wall_time_change(esdp, 1);
+ }
+ aux_work = handle_aux_work(&esdp->aux_work_data, aux_work, 1);
+ ERTS_MSACC_UPDATE_CACHE();
+ if (aux_work && erts_thr_progress_update(esdp))
+ erts_thr_progress_leader_update(esdp);
+ }
- flgs = sched_prep_cont_spin_wait(ssi);
- spincount = sched_busy_wait.aux_work;
+ if (aux_work) {
+ if (!ERTS_SCHEDULER_IS_DIRTY(esdp)) {
+ flgs = erts_atomic32_read_acqb(&ssi->flags);
+ current_time = erts_get_monotonic_time(esdp);
+ if (current_time >= erts_next_timeout_time(esdp->next_tmo_ref)) {
+ if (!thr_prgr_active) {
+ erts_thr_progress_active(esdp, thr_prgr_active = 1);
+ sched_wall_time_change(esdp, 1);
+ }
+ erts_bump_timers(esdp->timer_wheel, current_time);
+ }
+ }
+ }
+ else {
+ ErtsMonotonicTime timeout_time;
+ int do_timeout = 0;
+ if (!ERTS_SCHEDULER_IS_DIRTY(esdp)) {
+ timeout_time = erts_check_next_timeout_time(esdp);
+ current_time = erts_get_monotonic_time(esdp);
+ do_timeout = (current_time >= timeout_time);
+ } else {
+ current_time = 0;
+ timeout_time = ERTS_MONOTONIC_TIME_MAX;
+ }
+ if (do_timeout) {
+ if (!thr_prgr_active) {
+ erts_thr_progress_active(esdp, thr_prgr_active = 1);
+ sched_wall_time_change(esdp, 1);
+ }
+ }
+ else {
+ if (!ERTS_SCHEDULER_IS_DIRTY(esdp)) {
+ if (thr_prgr_active) {
+ erts_thr_progress_active(esdp, thr_prgr_active = 0);
+ sched_wall_time_change(esdp, 0);
+ }
+ erts_thr_progress_prepare_wait(esdp);
+ }
- if (!(flgs & ERTS_SSI_FLG_WAITING)) {
- ASSERT(!(flgs & ERTS_SSI_FLG_SLEEPING));
- break;
- }
+ flgs = sched_spin_wait(ssi, spincount);
+ if (flgs & ERTS_SSI_FLG_SLEEPING) {
+ ASSERT(flgs & ERTS_SSI_FLG_WAITING);
+ flgs = sched_set_sleeptype(ssi, ERTS_SSI_FLG_TSE_SLEEPING);
+ if (flgs & ERTS_SSI_FLG_SLEEPING) {
+ int res;
+ ASSERT(flgs & ERTS_SSI_FLG_TSE_SLEEPING);
+ ASSERT(flgs & ERTS_SSI_FLG_WAITING);
+ current_time = ERTS_SCHEDULER_IS_DIRTY(esdp) ? 0 :
+ erts_get_monotonic_time(esdp);
+ do {
+ Sint64 timeout;
+ if (current_time >= timeout_time)
+ break;
+ if (!ERTS_SCHEDULER_IS_DIRTY(esdp)) {
+ timeout = ERTS_MONOTONIC_TO_NSEC(timeout_time
+ - current_time
+ - 1) + 1;
+ } else
+ timeout = -1;
+ ERTS_MSACC_SET_STATE_CACHED_M(ERTS_MSACC_STATE_SLEEP);
+ res = erts_tse_twait(ssi->event, timeout);
+ ERTS_MSACC_POP_STATE_M();
+ current_time = ERTS_SCHEDULER_IS_DIRTY(esdp) ? 0 :
+ erts_get_monotonic_time(esdp);
+ } while (res == EINTR);
+ }
+ }
+ if (!ERTS_SCHEDULER_IS_DIRTY(esdp))
+ erts_thr_progress_finalize_wait(esdp);
+ }
+ if (!ERTS_SCHEDULER_IS_DIRTY(esdp) && current_time >= timeout_time)
+ erts_bump_timers(esdp->timer_wheel, current_time);
+ }
- }
+ if (!(flgs & ERTS_SSI_FLG_WAITING)) {
+ ASSERT(!(flgs & ERTS_SSI_FLG_SLEEPING));
+ break;
+ }
- if (flgs & ~(ERTS_SSI_FLG_SUSPENDED|ERTS_SSI_FLG_MSB_EXEC))
- erts_atomic32_read_band_nob(&ssi->flags,
- (ERTS_SSI_FLG_SUSPENDED
- | ERTS_SSI_FLG_MSB_EXEC));
+ flgs = sched_prep_cont_spin_wait(ssi);
+ spincount = sched_busy_wait.aux_work;
- if (ERTS_SCHEDULER_IS_DIRTY(esdp))
- dirty_sched_wall_time_change(esdp, working = 1);
- else if (!thr_prgr_active) {
- erts_thr_progress_active(esdp, thr_prgr_active = 1);
- sched_wall_time_change(esdp, 1);
+ if (!(flgs & ERTS_SSI_FLG_WAITING)) {
+ ASSERT(!(flgs & ERTS_SSI_FLG_SLEEPING));
+ break;
}
- erts_runq_lock(rq);
- sched_active(esdp->no, rq);
-
}
- else
- {
-
- erts_atomic32_set_relb(&function_calls, 0);
- *fcalls = 0;
-
- ASSERT(!ERTS_SCHEDULER_IS_DIRTY(esdp));
-
- sched_waiting_sys(esdp->no, rq);
-
- erts_runq_unlock(rq);
-
- ASSERT(working);
- sched_wall_time_change(esdp, working = 0);
-
- spincount = sched_busy_wait.sys_schedule;
- if (spincount == 0)
- goto sys_aux_work;
-
- while (spincount-- > 0) {
- ErtsMonotonicTime current_time;
-
- sys_poll_aux_work:
-
- if (working)
- sched_wall_time_change(esdp, working = 0);
-
- ERTS_MSACC_SET_STATE_CACHED_M(ERTS_MSACC_STATE_CHECK_IO);
-
- ASSERT(!erts_port_task_have_outstanding_io_tasks());
- LTTNG2(scheduler_poll, esdp->no, 1);
- erl_sys_schedule(1); /* Might give us something to do */
-
- ERTS_MSACC_POP_STATE_M();
-
- if (!ERTS_SCHEDULER_IS_DIRTY(esdp)) {
- current_time = erts_get_monotonic_time(esdp);
- if (current_time >= erts_next_timeout_time(esdp->next_tmo_ref))
- erts_bump_timers(esdp->timer_wheel, current_time);
- }
-
- sys_aux_work:
-
- aux_work = erts_atomic32_read_acqb(&ssi->aux_work);
- if (aux_work && !ERTS_SCHEDULER_IS_DIRTY(esdp)) {
- if (!working)
- sched_wall_time_change(esdp, working = 1);
- if (!thr_prgr_active)
- erts_thr_progress_active(esdp, thr_prgr_active = 1);
- aux_work = handle_aux_work(&esdp->aux_work_data, aux_work, 1);
- ERTS_MSACC_UPDATE_CACHE();
- if (aux_work && erts_thr_progress_update(esdp))
- erts_thr_progress_leader_update(esdp);
- }
-
- flgs = erts_atomic32_read_acqb(&ssi->flags);
- if (!(flgs & ERTS_SSI_FLG_WAITING)) {
- ASSERT(!(flgs & ERTS_SSI_FLG_SLEEPING));
- goto sys_woken;
- }
-
- /*
- * If we got new I/O tasks we aren't allowed to
- * call erl_sys_schedule() until it is handled.
- */
- if (erts_port_task_have_outstanding_io_tasks()) {
- clear_sys_scheduling();
- /*
- * Got to check that we still got I/O tasks; otherwise
- * we have to continue checking for I/O...
- */
- if (!prepare_for_sys_schedule(0)) {
- spincount *= ERTS_SCHED_TSE_SLEEP_SPINCOUNT_FACT;
- goto tse_wait;
- }
- }
- }
-
- erts_runq_lock(rq);
-
- /*
- * If we got new I/O tasks we aren't allowed to
- * sleep in erl_sys_schedule().
- */
- if (erts_port_task_have_outstanding_io_tasks()) {
- clear_sys_scheduling();
-
- /*
- * Got to check that we still got I/O tasks; otherwise
- * we have to wait in erl_sys_schedule() after all...
- */
- if (!prepare_for_sys_schedule(0)) {
- /*
- * Not allowed to wait in erl_sys_schedule;
- * do tse wait instead...
- */
- sched_change_waiting_sys_to_waiting(esdp->no, rq);
- erts_runq_unlock(rq);
- spincount = 0;
- goto tse_wait;
- }
- }
- if (aux_work) {
- erts_runq_unlock(rq);
- goto sys_poll_aux_work;
- }
- flgs = sched_set_sleeptype(ssi, ERTS_SSI_FLG_POLL_SLEEPING);
- if (!(flgs & ERTS_SSI_FLG_SLEEPING)) {
- if (!(flgs & ERTS_SSI_FLG_WAITING)) {
- ASSERT(!(flgs & ERTS_SSI_FLG_SLEEPING));
- goto sys_locked_woken;
- }
- erts_runq_unlock(rq);
- flgs = sched_prep_cont_spin_wait(ssi);
- if (!(flgs & ERTS_SSI_FLG_WAITING)) {
- ASSERT(!(flgs & ERTS_SSI_FLG_SLEEPING));
- goto sys_woken;
- }
- ASSERT(!erts_port_task_have_outstanding_io_tasks());
- goto sys_poll_aux_work;
- }
-
- ASSERT(flgs & ERTS_SSI_FLG_POLL_SLEEPING);
- ASSERT(flgs & ERTS_SSI_FLG_WAITING);
-
- erts_runq_unlock(rq);
-
- if (working)
- sched_wall_time_change(esdp, working = 0);
-
- if (thr_prgr_active)
- erts_thr_progress_active(esdp, thr_prgr_active = 0);
-
- ASSERT(!erts_port_task_have_outstanding_io_tasks());
-
- ERTS_MSACC_SET_STATE_CACHED_M(ERTS_MSACC_STATE_CHECK_IO);
- LTTNG2(scheduler_poll, esdp->no, 0);
- erl_sys_schedule(0);
+ if (flgs & ~(ERTS_SSI_FLG_SUSPENDED|ERTS_SSI_FLG_MSB_EXEC))
+ erts_atomic32_read_band_nob(&ssi->flags,
+ (ERTS_SSI_FLG_SUSPENDED
+ | ERTS_SSI_FLG_MSB_EXEC));
- ERTS_MSACC_POP_STATE_M();
-
- if (!ERTS_SCHEDULER_IS_DIRTY(esdp)) {
- ErtsMonotonicTime current_time = erts_get_monotonic_time(esdp);
- if (current_time >= erts_next_timeout_time(esdp->next_tmo_ref))
- erts_bump_timers(esdp->timer_wheel, current_time);
- }
-
- flgs = sched_prep_cont_spin_wait(ssi);
- if (flgs & ERTS_SSI_FLG_WAITING)
- goto sys_aux_work;
-
- sys_woken:
- if (!thr_prgr_active)
- erts_thr_progress_active(esdp, thr_prgr_active = 1);
- erts_runq_lock(rq);
- sys_locked_woken:
- if (!thr_prgr_active) {
- erts_runq_unlock(rq);
- erts_thr_progress_active(esdp, thr_prgr_active = 1);
- erts_runq_lock(rq);
- }
- clear_sys_scheduling();
- if (flgs & ~(ERTS_SSI_FLG_SUSPENDED|ERTS_SSI_FLG_MSB_EXEC))
- erts_atomic32_read_band_nob(&ssi->flags,
- (ERTS_SSI_FLG_SUSPENDED
- | ERTS_SSI_FLG_MSB_EXEC));
- if (!working)
- sched_wall_time_change(esdp, working = 1);
- sched_active_sys(esdp->no, rq);
+ if (ERTS_SCHEDULER_IS_DIRTY(esdp))
+ dirty_sched_wall_time_change(esdp, working = 1);
+ else if (!thr_prgr_active) {
+ erts_thr_progress_active(esdp, thr_prgr_active = 1);
+ sched_wall_time_change(esdp, 1);
}
+ erts_runq_lock(rq);
+ sched_active(esdp->no, rq);
+
if (ERTS_SCHEDULER_IS_DIRTY(esdp))
dirty_active(esdp, 1);
@@ -5872,6 +5730,7 @@ init_scheduler_data(ErtsSchedulerData* esdp, int num,
shadow_proc->static_flags = ERTS_STC_FLG_SHADOW_PROC;
}
+ ssi->esdp = esdp;
esdp->ssi = ssi;
esdp->current_process = NULL;
esdp->current_port = NULL;
@@ -5902,8 +5761,8 @@ init_scheduler_data(ErtsSchedulerData* esdp, int num,
}
void
-erts_init_scheduling(int no_schedulers, int no_schedulers_online
- , int no_dirty_cpu_schedulers, int no_dirty_cpu_schedulers_online,
+erts_init_scheduling(int no_schedulers, int no_schedulers_online, int no_poll_threads,
+ int no_dirty_cpu_schedulers, int no_dirty_cpu_schedulers_online,
int no_dirty_io_schedulers
)
{
@@ -5930,6 +5789,7 @@ erts_init_scheduling(int no_schedulers, int no_schedulers_online
ASSERT(no_dirty_cpu_schedulers >= 1);
ASSERT(no_dirty_cpu_schedulers_online <= no_schedulers_online);
ASSERT(no_dirty_cpu_schedulers_online >= 1);
+ ASSERT(erts_no_poll_threads == no_poll_threads);
/* Create and initialize run queues */
@@ -6027,7 +5887,7 @@ erts_init_scheduling(int no_schedulers, int no_schedulers_online
erts_no_total_schedulers += no_dirty_io_schedulers;
/* Create and initialize scheduler sleep info */
- no_ssi = n+1;
+ no_ssi = n + 1 /* aux thread */;
aligned_sched_sleep_info =
erts_alloc_permanent_cache_aligned(
ERTS_ALC_T_SCHDLR_SLP_INFO,
@@ -6038,12 +5898,13 @@ erts_init_scheduling(int no_schedulers, int no_schedulers_online
ssi->next = NULL;
ssi->prev = NULL;
#endif
+ ssi->esdp = NULL;
erts_atomic32_init_nob(&ssi->flags, 0);
ssi->event = NULL; /* initialized in sched_thread_func */
erts_atomic32_init_nob(&ssi->aux_work, 0);
}
- aligned_sched_sleep_info++;
+ aligned_sched_sleep_info += 1 /* aux thread */;
aligned_dirty_cpu_sched_sleep_info =
erts_alloc_permanent_cache_aligned(
@@ -6066,6 +5927,18 @@ erts_init_scheduling(int no_schedulers, int no_schedulers_online
erts_atomic32_init_nob(&ssi->aux_work, 0);
}
+ aligned_poll_thread_sleep_info =
+ erts_alloc_permanent_cache_aligned(
+ ERTS_ALC_T_SCHDLR_SLP_INFO,
+ no_poll_threads*sizeof(ErtsAlignedSchedulerSleepInfo));
+ for (ix = 0; ix < no_poll_threads; ix++) {
+ ErtsSchedulerSleepInfo *ssi = &aligned_poll_thread_sleep_info[ix].ssi;
+ ssi->esdp = NULL;
+ erts_atomic32_init_nob(&ssi->flags, 0);
+ ssi->event = NULL; /* initialized in poll_thread */
+ erts_atomic32_init_nob(&ssi->aux_work, 0);
+ }
+
/* Create and initialize scheduler specific data */
daww_sz = ERTS_ALC_CACHE_LINE_ALIGN_SIZE((sizeof(ErtsDelayedAuxWorkWakeupJob)
@@ -6122,11 +5995,14 @@ erts_init_scheduling(int no_schedulers, int no_schedulers_online
erts_atomic32_init_nob(&debug_wait_completed_count, 0); /* debug only */
debug_wait_completed_flags = 0;
-
aux_thread_aux_work_data =
erts_alloc_permanent_cache_aligned(ERTS_ALC_T_SCHDLR_DATA,
sizeof(ErtsAuxWorkData));
+ poll_thread_aux_work_data =
+ erts_alloc_permanent_cache_aligned(ERTS_ALC_T_SCHDLR_DATA,
+ no_poll_threads * sizeof(ErtsAuxWorkData));
+
init_no_runqs(no_schedulers_online, no_schedulers_online);
balance_info.last_active_runqs = no_schedulers;
erts_mtx_init(&balance_info.update_mtx, "migration_info_update", NIL,
@@ -6205,13 +6081,9 @@ erts_init_scheduling(int no_schedulers, int no_schedulers_online
erts_atomic32_set_nob(&schdlr_sspnd.changing,
set_schdlr_sspnd_change_flags);
- erts_atomic32_init_nob(&doing_sys_schedule, 0);
-
init_misc_aux_work();
- erts_atomic32_init_nob(&function_calls, 0);
-
/* init port tasks */
erts_port_task_init();
@@ -7107,17 +6979,19 @@ sched_spin_suspended(ErtsSchedulerSleepInfo *ssi, int spincount)
}
static erts_aint32_t
-sched_set_suspended_sleeptype(ErtsSchedulerSleepInfo *ssi)
+sched_set_suspended_sleeptype(ErtsSchedulerSleepInfo *ssi,
+ erts_aint32_t sleep_type)
{
erts_aint32_t oflgs;
- erts_aint32_t nflgs = (ERTS_SSI_FLG_SLEEPING
- | ERTS_SSI_FLG_TSE_SLEEPING
- | ERTS_SSI_FLG_WAITING
- | ERTS_SSI_FLG_SUSPENDED);
+ erts_aint32_t nflgs = ((ERTS_SSI_FLG_SLEEPING
+ | ERTS_SSI_FLG_WAITING
+ | ERTS_SSI_FLG_SUSPENDED)
+ | sleep_type);
erts_aint32_t xflgs = (ERTS_SSI_FLG_SLEEPING
| ERTS_SSI_FLG_WAITING
| ERTS_SSI_FLG_SUSPENDED);
+ ASSERT(sleep_type == ERTS_SSI_FLG_TSE_SLEEPING);
erts_tse_reset(ssi->event);
while (1) {
@@ -7370,17 +7244,6 @@ msb_scheduler_type_switch(ErtsSchedType sched_type,
if (exec_type != ERTS_SCHED_NORMAL)
schdlr_sspnd.last_msb_dirty_type = exec_type;
else {
- erts_aint32_t calls;
- /*
- * Going back to normal scheduler after
- * dirty execution; make sure it will check
- * for I/O...
- */
- if (ERTS_USE_MODIFIED_TIMING())
- calls = ERTS_MODIFIED_TIMING_INPUT_REDS + 1;
- else
- calls = INPUT_REDUCTIONS + 1;
- erts_atomic32_set_nob(&function_calls, calls);
if ((nrml_prio == ERTS_MSB_NONE_PRIO_BIT)
& ((dcpu_prio != ERTS_MSB_NONE_PRIO_BIT)
@@ -7454,6 +7317,34 @@ msb_scheduler_type_switch(ErtsSchedType sched_type,
}
+static ERTS_INLINE void
+suspend_normal_scheduler_sleep(ErtsSchedulerData *esdp)
+{
+ ErtsSchedulerSleepInfo *ssi = esdp->ssi;
+ erts_aint32_t flgs = sched_spin_suspended(ssi,
+ ERTS_SCHED_SUSPEND_SLEEP_SPINCOUNT);
+ if (flgs == (ERTS_SSI_FLG_SLEEPING
+ | ERTS_SSI_FLG_WAITING
+ | ERTS_SSI_FLG_SUSPENDED)) {
+ flgs = sched_set_suspended_sleeptype(ssi, ERTS_SSI_FLG_TSE_SLEEPING);
+ if (flgs == (ERTS_SSI_FLG_SLEEPING
+ | ERTS_SSI_FLG_TSE_SLEEPING
+ | ERTS_SSI_FLG_WAITING
+ | ERTS_SSI_FLG_SUSPENDED)) {
+ int res;
+
+ do {
+ res = erts_tse_wait(ssi->event);
+ } while (res == EINTR);
+ }
+ }
+}
+
+static ERTS_INLINE void
+suspend_dirty_scheduler_sleep(ErtsSchedulerData *esdp)
+{
+ suspend_normal_scheduler_sleep(esdp);
+}
static void
suspend_scheduler(ErtsSchedulerData *esdp)
@@ -7642,14 +7533,15 @@ suspend_scheduler(ErtsSchedulerData *esdp)
schdlr_sspnd_resume_procs(sched_type, &resume);
while (1) {
- ErtsMonotonicTime current_time;
- erts_aint32_t flgs;
-
if (sched_type != ERTS_SCHED_NORMAL)
- aux_work = 0;
- else {
+ suspend_dirty_scheduler_sleep(esdp);
+ else
+ {
+ ErtsMonotonicTime current_time, timeout_time;
int evacuate = no == 1 ? 0 : !ERTS_EMPTY_RUNQ(esdp->run_queue);
+ ASSERT(sched_type == ERTS_SCHED_NORMAL);
+
aux_work = erts_atomic32_read_acqb(&ssi->aux_work);
if (aux_work|evacuate) {
@@ -7671,95 +7563,34 @@ suspend_scheduler(ErtsSchedulerData *esdp)
}
}
- }
- if (aux_work) {
- ASSERT(sched_type == ERTS_SCHED_NORMAL);
- current_time = erts_get_monotonic_time(esdp);
- if (current_time >= erts_next_timeout_time(esdp->next_tmo_ref)) {
- if (!thr_prgr_active) {
- erts_thr_progress_active(esdp, thr_prgr_active = 1);
- sched_wall_time_change(esdp, 1);
- }
- erts_bump_timers(esdp->timer_wheel, current_time);
- }
- }
- else {
- ErtsMonotonicTime timeout_time;
- int do_timeout;
+ if (aux_work)
+ timeout_time = erts_next_timeout_time(esdp->next_tmo_ref);
+ else
+ timeout_time = erts_check_next_timeout_time(esdp);
- if (sched_type == ERTS_SCHED_NORMAL) {
- timeout_time = erts_check_next_timeout_time(esdp);
- current_time = erts_get_monotonic_time(esdp);
- do_timeout = (current_time >= timeout_time);
- }
- else {
- timeout_time = ERTS_MONOTONIC_TIME_MAX;
- current_time = 0;
- do_timeout = 0;
- }
+ current_time = erts_get_monotonic_time(esdp);
- if (do_timeout) {
- ASSERT(sched_type == ERTS_SCHED_NORMAL);
- if (!thr_prgr_active) {
- erts_thr_progress_active(esdp, thr_prgr_active = 1);
- sched_wall_time_change(esdp, 1);
- }
- }
- else {
- if (sched_type == ERTS_SCHED_NORMAL) {
- if (thr_prgr_active) {
- erts_thr_progress_active(esdp, thr_prgr_active = 0);
- sched_wall_time_change(esdp, 0);
- }
- erts_thr_progress_prepare_wait(esdp);
- }
- flgs = sched_spin_suspended(ssi,
- ERTS_SCHED_SUSPEND_SLEEP_SPINCOUNT);
- if (flgs == (ERTS_SSI_FLG_SLEEPING
- | ERTS_SSI_FLG_WAITING
- | ERTS_SSI_FLG_SUSPENDED)) {
- flgs = sched_set_suspended_sleeptype(ssi);
- if (flgs == (ERTS_SSI_FLG_SLEEPING
- | ERTS_SSI_FLG_TSE_SLEEPING
- | ERTS_SSI_FLG_WAITING
- | ERTS_SSI_FLG_SUSPENDED)) {
- int res;
-
- if (sched_type == ERTS_SCHED_NORMAL)
- current_time = erts_get_monotonic_time(esdp);
- else
- current_time = 0;
-
- do {
- Sint64 timeout;
- if (current_time >= timeout_time)
- break;
- if (sched_type != ERTS_SCHED_NORMAL)
- timeout = -1;
- else
- timeout = ERTS_MONOTONIC_TO_NSEC(timeout_time
- - current_time
- - 1) + 1;
- res = erts_tse_twait(ssi->event, timeout);
-
- if (sched_type == ERTS_SCHED_NORMAL)
- current_time = erts_get_monotonic_time(esdp);
- else
- current_time = 0;
-
- } while (res == EINTR);
- }
- }
- if (sched_type == ERTS_SCHED_NORMAL)
- erts_thr_progress_finalize_wait(esdp);
- }
+ if (!aux_work && current_time < timeout_time) {
+ /* go to sleep... */
+ if (thr_prgr_active) {
+ erts_thr_progress_active(esdp, thr_prgr_active = 0);
+ sched_wall_time_change(esdp, 0);
+ }
+ erts_thr_progress_prepare_wait(NULL);
+ suspend_normal_scheduler_sleep(esdp);
+ erts_thr_progress_finalize_wait(NULL);
+ current_time = erts_get_monotonic_time(esdp);
+ }
- if (current_time >= timeout_time) {
- ASSERT(sched_type == ERTS_SCHED_NORMAL);
- erts_bump_timers(esdp->timer_wheel, current_time);
- }
- }
+ if (current_time >= timeout_time) {
+ if (!thr_prgr_active) {
+ erts_thr_progress_active(esdp, thr_prgr_active = 1);
+ sched_wall_time_change(esdp, 1);
+ }
+ erts_bump_timers(esdp->timer_wheel, current_time);
+ }
+ }
flgs = sched_prep_spin_suspended(ssi, (ERTS_SSI_FLG_WAITING
| ERTS_SSI_FLG_SUSPENDED));
@@ -8425,6 +8256,7 @@ sched_thread_func(void *vesdp)
Uint no = esdp->no;
erts_tse_t *tse;
+ erts_port_task_pre_alloc_init_thread();
erts_sched_init_time_sup(esdp);
if (no == 1)
@@ -8578,17 +8410,17 @@ sched_dirty_io_thread_func(void *vesdp)
return NULL;
}
-static ethr_tid aux_tid;
-
void
erts_start_schedulers(void)
{
+ ethr_tid tid;
int res = 0;
Uint actual;
Uint wanted = erts_no_schedulers;
Uint wanted_no_schedulers = erts_no_schedulers;
char name[16];
ethr_thr_opts opts = ETHR_THR_OPTS_DEFAULT_INITER;
+ int ix;
opts.detached = 1;
@@ -8635,7 +8467,6 @@ erts_start_schedulers(void)
erts_no_schedulers = actual;
{
- int ix;
for (ix = 0; ix < erts_no_dirty_cpu_schedulers; ix++) {
ErtsSchedulerData *esdp = ERTS_DIRTY_CPU_SCHEDULER_IX(ix);
erts_snprintf(opts.name, 16, "%d_dirty_cpu_scheduler", ix + 1);
@@ -8658,10 +8489,18 @@ erts_start_schedulers(void)
erts_snprintf(opts.name, 16, "aux");
- res = ethr_thr_create(&aux_tid, aux_thread, NULL, &opts);
+ res = ethr_thr_create(&tid, aux_thread, NULL, &opts);
if (res != 0)
erts_exit(ERTS_ERROR_EXIT, "Failed to create aux thread, error = %d\n", res);
+ for (ix = 0; ix < erts_no_poll_threads; ix++) {
+ erts_snprintf(opts.name, 16, "%d_poller", ix);
+
+ res = ethr_thr_create(&tid, poll_thread, (void*)(UWord)ix, &opts);
+ if (res != 0)
+ erts_exit(ERTS_ERROR_EXIT, "Failed to create poll thread\n");
+ }
+
if (actual < 1)
erts_exit(ERTS_ERROR_EXIT,
"Failed to create any scheduler-threads: %s (%d)\n",
@@ -9728,7 +9567,6 @@ Process *erts_schedule(ErtsSchedulerData *esdp, Process *p, int calls)
ErtsRunQueue *rq;
int context_reds;
int fcalls;
- int input_reductions;
int actual_reds;
int reds;
Uint32 flags;
@@ -9748,11 +9586,9 @@ Process *erts_schedule(ErtsSchedulerData *esdp, Process *p, int calls)
if (ERTS_USE_MODIFIED_TIMING()) {
context_reds = ERTS_MODIFIED_TIMING_CONTEXT_REDS;
- input_reductions = ERTS_MODIFIED_TIMING_INPUT_REDS;
}
else {
context_reds = CONTEXT_REDS;
- input_reductions = INPUT_REDUCTIONS;
}
ERTS_LC_ASSERT(ERTS_SCHEDULER_IS_DIRTY(erts_get_scheduler_data())
@@ -9772,7 +9608,6 @@ Process *erts_schedule(ErtsSchedulerData *esdp, Process *p, int calls)
}
rq = erts_get_runq_current(esdp);
ASSERT(esdp);
- fcalls = (int) erts_atomic32_read_acqb(&function_calls);
actual_reds = reds = 0;
erts_runq_lock(rq);
} else {
@@ -9798,7 +9633,6 @@ Process *erts_schedule(ErtsSchedulerData *esdp, Process *p, int calls)
reds = ERTS_PROC_MIN_CONTEXT_SWITCH_REDS_COST;
esdp->virtual_reds = 0;
- fcalls = (int) erts_atomic32_add_read_acqb(&function_calls, reds);
ASSERT(esdp && esdp == erts_get_scheduler_data());
rq = erts_get_runq_current(esdp);
@@ -9888,16 +9722,6 @@ Process *erts_schedule(ErtsSchedulerData *esdp, Process *p, int calls)
ERTS_CHK_NO_PROC_LOCKS;
- if (is_normal_sched) {
- if (esdp->check_time_reds >= ERTS_CHECK_TIME_REDS)
- (void) erts_get_monotonic_time(esdp);
-
- if (esdp->last_monotonic_time >= erts_next_timeout_time(esdp->next_tmo_ref)) {
- erts_runq_unlock(rq);
- erts_bump_timers(esdp->timer_wheel, esdp->last_monotonic_time);
- erts_runq_lock(rq);
- }
- }
}
ERTS_LC_ASSERT(!is_normal_sched || !erts_thr_progress_is_blocking());
@@ -9908,6 +9732,16 @@ Process *erts_schedule(ErtsSchedulerData *esdp, Process *p, int calls)
ErtsMigrationPath *mp;
if (is_normal_sched) {
+
+ if (esdp->check_time_reds >= ERTS_CHECK_TIME_REDS)
+ (void) erts_get_monotonic_time(esdp);
+
+ if (esdp->last_monotonic_time >= erts_next_timeout_time(esdp->next_tmo_ref)) {
+ erts_runq_unlock(rq);
+ erts_bump_timers(esdp->timer_wheel, esdp->last_monotonic_time);
+ erts_runq_lock(rq);
+ }
+
if (rq->check_balance_reds <= 0)
check_balance(rq);
@@ -10037,38 +9871,6 @@ Process *erts_schedule(ErtsSchedulerData *esdp, Process *p, int calls)
goto check_activities_to_run;
}
- else if (is_normal_sched
- && (fcalls > input_reductions
- && prepare_for_sys_schedule(!0))) {
- ErtsMonotonicTime current_time;
- /*
- * Schedule system-level activities.
- */
-
- ERTS_MSACC_PUSH_STATE_CACHED_M();
-
- erts_atomic32_set_relb(&function_calls, 0);
- fcalls = 0;
-
-#if 0 /* Not needed since we wont wait in sys schedule */
- erts_sys_schedule_interrupt(0);
-#endif
- erts_runq_unlock(rq);
-
- ERTS_MSACC_SET_STATE_CACHED_M(ERTS_MSACC_STATE_CHECK_IO);
- LTTNG2(scheduler_poll, esdp->no, 1);
-
- erl_sys_schedule(1);
- ERTS_MSACC_POP_STATE_M();
-
- current_time = erts_get_monotonic_time(esdp);
- if (current_time >= erts_next_timeout_time(esdp->next_tmo_ref))
- erts_bump_timers(esdp->timer_wheel, current_time);
-
- erts_runq_lock(rq);
- clear_sys_scheduling();
- goto continue_check_activities_to_run;
- }
if (flags & ERTS_RUNQ_FLG_MISC_OP)
exec_misc_ops(rq);
@@ -10082,29 +9884,9 @@ Process *erts_schedule(ErtsSchedulerData *esdp, Process *p, int calls)
flags = ERTS_RUNQ_FLGS_GET_NOB(rq);
if (flags & PORT_BIT) {
- int have_outstanding_io;
- have_outstanding_io = erts_port_task_execute(rq, &esdp->current_port);
- if ((!erts_eager_check_io
- && have_outstanding_io
- && fcalls > 2*input_reductions)
- || (flags & ERTS_RUNQ_FLG_HALTING)) {
- /*
- * If we have performed more than 2*INPUT_REDUCTIONS since
- * last call to erl_sys_schedule() and we still haven't
- * handled all I/O tasks we stop running processes and
- * focus completely on ports.
- *
- * One could argue that this is a strange behavior. The
- * reason for doing it this way is that it is similar
- * to the behavior before port tasks were introduced.
- * We don't want to change the behavior too much, at
- * least not at the time of writing. This behavior
- * might change in the future.
- *
- * /rickard
- */
- goto check_activities_to_run;
- }
+ erts_port_task_execute(rq, &esdp->current_port);
+ if (flags & ERTS_RUNQ_FLG_HALTING)
+ goto check_activities_to_run;
}
/*
diff --git a/erts/emulator/beam/erl_process.h b/erts/emulator/beam/erl_process.h
index e63da2d9db..66d7848f89 100644
--- a/erts/emulator/beam/erl_process.h
+++ b/erts/emulator/beam/erl_process.h
@@ -105,7 +105,6 @@ struct saved_calls {
};
extern Export exp_send, exp_receive, exp_timeout;
-extern int erts_eager_check_io;
extern int erts_sched_compact_load;
extern int erts_sched_balance_util;
extern Uint erts_no_schedulers;
@@ -364,10 +363,12 @@ typedef struct {
} ErtsSchedulerSleepList;
struct ErtsSchedulerSleepInfo_ {
+ struct ErtsSchedulerData_ *esdp;
ErtsSchedulerSleepInfo *next;
ErtsSchedulerSleepInfo *prev;
erts_atomic32_t flags;
erts_tse_t *event;
+ struct erts_poll_thread *psi;
erts_atomic32_t aux_work;
};
@@ -1531,9 +1532,7 @@ extern int erts_system_profile_ts_type;
void erts_pre_init_process(void);
void erts_late_init_process(void);
void erts_early_init_scheduling(int);
-void erts_init_scheduling(int, int
- , int, int, int
- );
+void erts_init_scheduling(int, int, int, int, int, int);
void erts_execute_dirty_system_task(Process *c_p);
int erts_set_gc_state(Process *c_p, int enable);
Eterm erts_sched_wall_time_request(Process *c_p, int set, int enable,
@@ -2470,6 +2469,7 @@ void erts_notify_inc_runq(ErtsRunQueue *runq);
void erts_sched_finish_poke(ErtsSchedulerSleepInfo *, erts_aint32_t);
ERTS_GLB_INLINE void erts_sched_poke(ErtsSchedulerSleepInfo *ssi);
+void erts_aux_thread_poke(void);
#if ERTS_GLB_INLINE_INCL_FUNC_DEF
diff --git a/erts/emulator/beam/erl_sched_spec_pre_alloc.c b/erts/emulator/beam/erl_sched_spec_pre_alloc.c
index 6cb7ccab8d..ab204303d7 100644
--- a/erts/emulator/beam/erl_sched_spec_pre_alloc.c
+++ b/erts/emulator/beam/erl_sched_spec_pre_alloc.c
@@ -37,7 +37,7 @@
#include "erl_thr_progress.h"
erts_sspa_data_t *
-erts_sspa_create(size_t blk_sz, int pa_size)
+erts_sspa_create(size_t blk_sz, int pa_size, int nthreads, const char* name)
{
erts_sspa_data_t *data;
size_t tot_size;
@@ -48,22 +48,30 @@ erts_sspa_create(size_t blk_sz, int pa_size)
int no_blocks = pa_size;
int no_blocks_per_chunk;
- if (erts_no_schedulers == 1)
+ if (!name) { /* schedulers only variant */
+ ASSERT(!nthreads);
+ nthreads = erts_no_schedulers;
+ }
+ else {
+ ASSERT(nthreads > 0);
+ }
+
+ if (nthreads == 1)
no_blocks_per_chunk = no_blocks;
else {
int extra = (no_blocks - 1)/4 + 1;
if (extra == 0)
extra = 1;
no_blocks_per_chunk = no_blocks;
- no_blocks_per_chunk += extra*erts_no_schedulers;
- no_blocks_per_chunk /= erts_no_schedulers;
+ no_blocks_per_chunk += extra * nthreads;
+ no_blocks_per_chunk /= nthreads;
}
- no_blocks = no_blocks_per_chunk * erts_no_schedulers;
+ no_blocks = no_blocks_per_chunk * nthreads;
chunk_mem_size = ERTS_ALC_CACHE_LINE_ALIGN_SIZE(sizeof(erts_sspa_chunk_header_t));
chunk_mem_size += blk_sz * no_blocks_per_chunk;
chunk_mem_size = ERTS_ALC_CACHE_LINE_ALIGN_SIZE(chunk_mem_size);
tot_size = ERTS_ALC_CACHE_LINE_ALIGN_SIZE(sizeof(erts_sspa_data_t));
- tot_size += chunk_mem_size*erts_no_schedulers;
+ tot_size += chunk_mem_size * nthreads;
p = erts_alloc_permanent_cache_aligned(ERTS_ALC_T_PRE_ALLOC_DATA, tot_size);
data = (erts_sspa_data_t *) p;
@@ -72,10 +80,16 @@ erts_sspa_create(size_t blk_sz, int pa_size)
data->chunks_mem_size = chunk_mem_size;
data->start = chunk_start;
- data->end = chunk_start + chunk_mem_size*erts_no_schedulers;
+ data->end = chunk_start + chunk_mem_size * nthreads;
+ data->nthreads = nthreads;
+
+ if (name) { /* thread variant */
+ erts_tsd_key_create(&data->tsd_key, (char*)name);
+ erts_atomic_init_nob(&data->id_generator, 0);
+ }
/* Initialize all chunks */
- for (cix = 0; cix < erts_no_schedulers; cix++) {
+ for (cix = 0; cix < nthreads; cix++) {
erts_sspa_chunk_t *chnk = erts_sspa_cix2chunk(data, cix);
erts_sspa_chunk_header_t *chdr = &chnk->aligned.header;
erts_sspa_blk_t *blk;
diff --git a/erts/emulator/beam/erl_sched_spec_pre_alloc.h b/erts/emulator/beam/erl_sched_spec_pre_alloc.h
index 1307e65962..d232db0e69 100644
--- a/erts/emulator/beam/erl_sched_spec_pre_alloc.h
+++ b/erts/emulator/beam/erl_sched_spec_pre_alloc.h
@@ -59,6 +59,11 @@ typedef struct {
char *start;
char *end;
int chunks_mem_size;
+ int nthreads;
+
+ /* Used only by thread variant: */
+ erts_tsd_key_t tsd_key;
+ erts_atomic_t id_generator;
} erts_sspa_data_t;
typedef union erts_sspa_blk_t_ erts_sspa_blk_t;
@@ -140,7 +145,9 @@ check_local_list(erts_sspa_chunk_header_t *chdr)
#endif
erts_sspa_data_t *erts_sspa_create(size_t blk_sz,
- int pa_size);
+ int pa_size,
+ int nthreads,
+ const char* name);
void erts_sspa_remote_free(erts_sspa_chunk_header_t *chdr,
erts_sspa_blk_t *blk,
int cinit);
@@ -158,7 +165,7 @@ ERTS_GLB_INLINE int erts_sspa_free(erts_sspa_data_t *data, int cix, char *blk);
ERTS_GLB_INLINE erts_sspa_chunk_t *
erts_sspa_cix2chunk(erts_sspa_data_t *data, int cix)
{
- ASSERT(0 <= cix && cix < erts_no_schedulers);
+ ASSERT(0 <= cix && cix < data->nthreads);
return (erts_sspa_chunk_t *) (data->start + cix*data->chunks_mem_size);
}
@@ -171,7 +178,7 @@ erts_sspa_ptr2cix(erts_sspa_data_t *data, void *ptr)
return -1;
diff = ((char *) ptr) - data->start;
cix = (int) diff / data->chunks_mem_size;
- ASSERT(0 <= cix && cix < erts_no_schedulers);
+ ASSERT(0 <= cix && cix < data->nthreads);
return cix;
}
diff --git a/erts/emulator/beam/erl_threads.h b/erts/emulator/beam/erl_threads.h
index e306df818d..aedceb6fc2 100644
--- a/erts/emulator/beam/erl_threads.h
+++ b/erts/emulator/beam/erl_threads.h
@@ -454,7 +454,6 @@ ERTS_GLB_INLINE void erts_rwmtx_runlock(erts_rwmtx_t *rwmtx);
ERTS_GLB_INLINE void erts_rwmtx_rwunlock(erts_rwmtx_t *rwmtx);
ERTS_GLB_INLINE int erts_lc_rwmtx_is_rlocked(erts_rwmtx_t *mtx);
ERTS_GLB_INLINE int erts_lc_rwmtx_is_rwlocked(erts_rwmtx_t *mtx);
-
ERTS_GLB_INLINE void erts_spinlock_init(erts_spinlock_t *lock,
char *name,
Eterm extra,
diff --git a/erts/emulator/beam/erl_trace.c b/erts/emulator/beam/erl_trace.c
index a07e3642f6..4b996d8fc2 100644
--- a/erts/emulator/beam/erl_trace.c
+++ b/erts/emulator/beam/erl_trace.c
@@ -1449,7 +1449,6 @@ monitor_long_schedule_port(Port *pp, ErtsPortTaskType type, Uint time)
case ERTS_PORT_TASK_TIMEOUT: op = am_timeout; break;
case ERTS_PORT_TASK_INPUT: op = am_input; break;
case ERTS_PORT_TASK_OUTPUT: op = am_output; break;
- case ERTS_PORT_TASK_EVENT: op = am_event; break;
case ERTS_PORT_TASK_DIST_CMD: op = am_dist_cmd; break;
default: op = am_undefined; break;
}
diff --git a/erts/emulator/beam/erl_vm.h b/erts/emulator/beam/erl_vm.h
index 076767c7cd..295130f60c 100644
--- a/erts/emulator/beam/erl_vm.h
+++ b/erts/emulator/beam/erl_vm.h
@@ -46,8 +46,6 @@
*/
#define ERTS_X_REGS_ALLOCATED (MAX_REG+3)
-#define INPUT_REDUCTIONS (2 * CONTEXT_REDS)
-
#define H_DEFAULT_SIZE 233 /* default (heap + stack) min size */
#define VH_DEFAULT_SIZE 32768 /* default virtual (bin) heap min size (words) */
#define H_DEFAULT_MAX_SIZE 0 /* default max heap size is off */
diff --git a/erts/emulator/beam/erlang_lttng.h b/erts/emulator/beam/erlang_lttng.h
index 4e869671f7..feb05f4f4c 100644
--- a/erts/emulator/beam/erlang_lttng.h
+++ b/erts/emulator/beam/erlang_lttng.h
@@ -159,21 +159,6 @@ TRACEPOINT_EVENT(
TRACEPOINT_EVENT(
org_erlang_otp,
- driver_event,
- TP_ARGS(
- char*, pid,
- char*, port,
- char*, driver
- ),
- TP_FIELDS(
- ctf_string(pid, pid)
- ctf_string(port, port)
- ctf_string(driver, driver)
- )
-)
-
-TRACEPOINT_EVENT(
- org_erlang_otp,
driver_timeout,
TP_ARGS(
char*, pid,
diff --git a/erts/emulator/beam/global.h b/erts/emulator/beam/global.h
index 2b0ad0b98a..09aeba00fa 100644
--- a/erts/emulator/beam/global.h
+++ b/erts/emulator/beam/global.h
@@ -222,8 +222,6 @@ struct erts_driver_t_ {
char *buf, ErlDrvSizeT len,
char **rbuf, ErlDrvSizeT rlen, /* Might be NULL */
unsigned int *flags);
- void (*event)(ErlDrvData drv_data, ErlDrvEvent event,
- ErlDrvEventData event_data);
void (*ready_input)(ErlDrvData drv_data, ErlDrvEvent event);
void (*ready_output)(ErlDrvData drv_data, ErlDrvEvent event);
void (*timeout)(ErlDrvData drv_data);
@@ -1103,7 +1101,6 @@ void erts_save_stacktrace(Process* p, struct StackTrace* s, int depth);
typedef struct {
Eterm delay_time;
int context_reds;
- int input_reds;
} ErtsModifiedTimings;
extern Export *erts_delay_trap;
diff --git a/erts/emulator/beam/io.c b/erts/emulator/beam/io.c
index bc1b9b6ef4..85013af3ad 100644
--- a/erts/emulator/beam/io.c
+++ b/erts/emulator/beam/io.c
@@ -5390,24 +5390,17 @@ erts_stale_drv_select(Eterm port,
switch (mode) {
case ERL_DRV_READ | ERL_DRV_WRITE:
type = "Input/Output";
- goto deselect;
case ERL_DRV_WRITE:
type = "Output";
- goto deselect;
case ERL_DRV_READ:
type = "Input";
- deselect:
- if (deselect) {
- driver_select(drv_port, hndl,
- mode | ERL_DRV_USE_NO_CALLBACK,
- 0);
- }
- break;
default:
- type = "Event";
- if (deselect)
- driver_event(drv_port, hndl, NULL);
- break;
+ type = "";
+ }
+ if (deselect) {
+ driver_select(drv_port, hndl,
+ mode | ERL_DRV_USE_NO_CALLBACK,
+ 0);
}
dsbufp = erts_create_logger_dsbuf();
@@ -7511,14 +7504,6 @@ no_output_callback(ErlDrvData drv_data, char *buf, ErlDrvSizeT len)
}
static void
-no_event_callback(ErlDrvData drv_data, ErlDrvEvent event, ErlDrvEventData event_data)
-{
- Port *prt = get_current_port();
- report_missing_drv_callback(prt, "Event", "event()");
- driver_event(ERTS_Port2ErlDrvPort(prt), event, NULL);
-}
-
-static void
no_ready_input_callback(ErlDrvData drv_data, ErlDrvEvent event)
{
Port *prt = get_current_port();
@@ -7585,7 +7570,6 @@ init_driver(erts_driver_t *drv, ErlDrvEntry *de, DE_Handle *handle)
drv->outputv = de->outputv;
drv->control = de->control;
drv->call = de->call;
- drv->event = de->event ? de->event : no_event_callback;
drv->ready_input = de->ready_input ? de->ready_input : no_ready_input_callback;
drv->ready_output = de->ready_output ? de->ready_output : no_ready_output_callback;
drv->timeout = de->timeout ? de->timeout : no_timeout_callback;
diff --git a/erts/emulator/beam/safe_hash.c b/erts/emulator/beam/safe_hash.c
index ac9ebd4714..73306030ae 100644
--- a/erts/emulator/beam/safe_hash.c
+++ b/erts/emulator/beam/safe_hash.c
@@ -260,16 +260,17 @@ void* safe_hash_erase(SafeHash* h, void* tmpl)
}
/*
-** Call 'func(obj,func_arg2)' for all objects in table. NOT SAFE!!!
+** Call 'func(obj,func_arg2,func_arg3)' for all objects in table. NOT SAFE!!!
*/
-void safe_hash_for_each(SafeHash* h, void (*func)(void *, void *), void *func_arg2)
+void safe_hash_for_each(SafeHash* h, void (*func)(void *, void *, void *),
+ void *func_arg2, void *func_arg3)
{
int i;
for (i = 0; i <= h->size_mask; i++) {
SafeHashBucket* b = h->tab[i];
while (b != NULL) {
- (*func)((void *) b, func_arg2);
+ (*func)((void *) b, func_arg2, func_arg3);
b = b->next;
}
}
diff --git a/erts/emulator/beam/safe_hash.h b/erts/emulator/beam/safe_hash.h
index 259c58cff9..af97b4cb4d 100644
--- a/erts/emulator/beam/safe_hash.h
+++ b/erts/emulator/beam/safe_hash.h
@@ -95,7 +95,7 @@ void* safe_hash_get(SafeHash*, void*);
void* safe_hash_put(SafeHash*, void*);
void* safe_hash_erase(SafeHash*, void*);
-void safe_hash_for_each(SafeHash*, void (*func)(void *, void *), void *);
+void safe_hash_for_each(SafeHash*, void (*func)(void *, void *, void *), void *, void *);
#ifdef ERTS_ENABLE_LOCK_COUNT
void erts_lcnt_enable_hash_lock_count(SafeHash*, erts_lock_flags_t, int);
diff --git a/erts/emulator/beam/sys.h b/erts/emulator/beam/sys.h
index 68ef0a23f3..9106091ca6 100644
--- a/erts/emulator/beam/sys.h
+++ b/erts/emulator/beam/sys.h
@@ -575,8 +575,6 @@ __decl_noreturn void __noreturn erts_exit(int n, char*, ...);
erts_exit(ERTS_ABORT_EXIT, "%s:%d:%s(): Internal error: %s\n", \
__FILE__, __LINE__, __func__, What)
-Eterm erts_check_io_info(void *p);
-
UWord erts_sys_get_page_size(void);
/* Size of misc memory allocated from system dependent code */
@@ -739,8 +737,6 @@ extern char *erts_sys_ddll_error(int code);
/*
* System interfaces for startup.
*/
-void erts_sys_schedule_interrupt(int set);
-void erts_sys_schedule_interrupt_timed(int, ErtsMonotonicTime);
void erts_sys_main_thread(void);
extern int erts_sys_prepare_crash_dump(int secs);
@@ -794,7 +790,7 @@ void fini_getenv_state(GETENV_STATE *);
typedef struct {
int no_used_fds;
int no_driver_select_structs;
- int no_driver_event_structs;
+ int no_enif_select_structs;
} ErtsCheckIoDebugInfo;
int erts_check_io_debug(ErtsCheckIoDebugInfo *ip);
@@ -996,10 +992,6 @@ erts_refc_read(erts_refc_t *refcp, erts_aint_t min_val)
#endif /* #if ERTS_GLB_INLINE_INCL_FUNC_DEF */
-#ifdef ERTS_ENABLE_KERNEL_POLL
-extern int erts_use_kernel_poll;
-#endif
-
#define sys_memcpy(s1,s2,n) memcpy(s1,s2,n)
#define sys_memmove(s1,s2,n) memmove(s1,s2,n)
#define sys_memcmp(s1,s2,n) memcmp(s1,s2,n)