aboutsummaryrefslogtreecommitdiffstats
path: root/erts/emulator/beam
diff options
context:
space:
mode:
Diffstat (limited to 'erts/emulator/beam')
-rw-r--r--erts/emulator/beam/atom.names1
-rw-r--r--erts/emulator/beam/beam_bif_load.c2
-rw-r--r--erts/emulator/beam/beam_bp.c3
-rw-r--r--erts/emulator/beam/beam_emu.c14
-rw-r--r--erts/emulator/beam/beam_load.c2
-rw-r--r--erts/emulator/beam/bif.c41
-rw-r--r--erts/emulator/beam/code_ix.c3
-rw-r--r--erts/emulator/beam/dist.h1
-rw-r--r--erts/emulator/beam/erl_alloc.c3
-rw-r--r--erts/emulator/beam/erl_alloc.h4
-rw-r--r--erts/emulator/beam/erl_alloc.types15
-rw-r--r--erts/emulator/beam/erl_alloc_util.c4
-rw-r--r--erts/emulator/beam/erl_async.c13
-rw-r--r--erts/emulator/beam/erl_bif_lists.c6
-rw-r--r--erts/emulator/beam/erl_bif_port.c5
-rw-r--r--erts/emulator/beam/erl_cpu_topology.c2
-rw-r--r--erts/emulator/beam/erl_db_hash.h5
-rw-r--r--erts/emulator/beam/erl_db_util.c3
-rw-r--r--erts/emulator/beam/erl_driver.h11
-rw-r--r--erts/emulator/beam/erl_drv_nif.h7
-rw-r--r--erts/emulator/beam/erl_drv_thread.c5
-rw-r--r--erts/emulator/beam/erl_init.c29
-rw-r--r--erts/emulator/beam/erl_lock_check.c76
-rw-r--r--erts/emulator/beam/erl_lock_check.h26
-rw-r--r--erts/emulator/beam/erl_lock_count.c2
-rw-r--r--erts/emulator/beam/erl_lock_count.h6
-rw-r--r--erts/emulator/beam/erl_nif.c26
-rw-r--r--erts/emulator/beam/erl_nif.h4
-rw-r--r--erts/emulator/beam/erl_port_task.c3
-rw-r--r--erts/emulator/beam/erl_port_task.h8
-rw-r--r--erts/emulator/beam/erl_process.c1469
-rw-r--r--erts/emulator/beam/erl_process.h63
-rw-r--r--erts/emulator/beam/erl_process_dict.c2
-rw-r--r--erts/emulator/beam/erl_process_lock.c64
-rw-r--r--erts/emulator/beam/erl_process_lock.h25
-rw-r--r--erts/emulator/beam/erl_ptab.c3
-rw-r--r--erts/emulator/beam/erl_smp.h85
-rw-r--r--erts/emulator/beam/erl_thr_progress.c3
-rw-r--r--erts/emulator/beam/erl_threads.h128
-rw-r--r--erts/emulator/beam/erl_trace.c10
-rw-r--r--erts/emulator/beam/erl_utils.h2
-rw-r--r--erts/emulator/beam/external.c8
-rw-r--r--erts/emulator/beam/io.c23
-rw-r--r--erts/emulator/beam/ops.tab10
-rw-r--r--erts/emulator/beam/sys.h2
-rw-r--r--erts/emulator/beam/utils.c2
46 files changed, 1811 insertions, 418 deletions
diff --git a/erts/emulator/beam/atom.names b/erts/emulator/beam/atom.names
index 96547ba743..d28e519ae1 100644
--- a/erts/emulator/beam/atom.names
+++ b/erts/emulator/beam/atom.names
@@ -179,6 +179,7 @@ atom dexit
atom depth
atom dgroup_leader
atom dictionary
+atom dirty_cpu_schedulers_online
atom disable_trace
atom disabled
atom display_items
diff --git a/erts/emulator/beam/beam_bif_load.c b/erts/emulator/beam/beam_bif_load.c
index 3f92c5b025..df1983a83d 100644
--- a/erts/emulator/beam/beam_bif_load.c
+++ b/erts/emulator/beam/beam_bif_load.c
@@ -201,7 +201,7 @@ finish_loading_1(BIF_ALIST_1)
* to keep the elements in.
*/
- n = list_length(BIF_ARG_1);
+ n = erts_list_length(BIF_ARG_1);
if (n == -1) {
ERTS_BIF_PREP_ERROR(res, BIF_P, BADARG);
goto done;
diff --git a/erts/emulator/beam/beam_bp.c b/erts/emulator/beam/beam_bp.c
index 68907a771a..49a34ab4ad 100644
--- a/erts/emulator/beam/beam_bp.c
+++ b/erts/emulator/beam/beam_bp.c
@@ -46,7 +46,8 @@
#if defined(ERTS_ENABLE_LOCK_CHECK) && defined(ERTS_SMP)
# define ERTS_SMP_REQ_PROC_MAIN_LOCK(P) \
- if ((P)) erts_proc_lc_require_lock((P), ERTS_PROC_LOCK_MAIN)
+ if ((P)) erts_proc_lc_require_lock((P), ERTS_PROC_LOCK_MAIN,\
+ __FILE__, __LINE__)
# define ERTS_SMP_UNREQ_PROC_MAIN_LOCK(P) \
if ((P)) erts_proc_lc_unrequire_lock((P), ERTS_PROC_LOCK_MAIN)
#else
diff --git a/erts/emulator/beam/beam_emu.c b/erts/emulator/beam/beam_emu.c
index 6f295530b9..0cec9ea3ec 100644
--- a/erts/emulator/beam/beam_emu.c
+++ b/erts/emulator/beam/beam_emu.c
@@ -71,7 +71,8 @@ do { \
ERTS_SMP_LC_ASSERT(!erts_thr_progress_is_blocking()); \
} while (0)
# define ERTS_SMP_REQ_PROC_MAIN_LOCK(P) \
- if ((P)) erts_proc_lc_require_lock((P), ERTS_PROC_LOCK_MAIN)
+ if ((P)) erts_proc_lc_require_lock((P), ERTS_PROC_LOCK_MAIN,\
+ __FILE__, __LINE__)
# define ERTS_SMP_UNREQ_PROC_MAIN_LOCK(P) \
if ((P)) erts_proc_lc_unrequire_lock((P), ERTS_PROC_LOCK_MAIN)
# else
@@ -1190,11 +1191,16 @@ void process_main(void)
* c_p->arg_reg before calling the scheduler.
*/
if (!init_done) {
+ /* This should only be reached during the init phase when only the main
+ * process is running. I.e. there is no race for init_done.
+ */
init_done = 1;
goto init_emulator;
}
+
c_p = NULL;
reds_used = 0;
+
goto do_schedule1;
do_schedule:
@@ -1203,7 +1209,11 @@ void process_main(void)
if (start_time != 0) {
Sint64 diff = erts_timestamp_millis() - start_time;
- if (diff > 0 && (Uint) diff > erts_system_monitor_long_schedule) {
+ if (diff > 0 && (Uint) diff > erts_system_monitor_long_schedule
+#ifdef ERTS_DIRTY_SCHEDULERS
+ && !ERTS_SCHEDULER_IS_DIRTY(c_p->scheduler_data)
+#endif
+ ) {
BeamInstr *inptr = find_function_from_pc(start_time_i);
BeamInstr *outptr = find_function_from_pc(c_p->i);
monitor_long_schedule_proc(c_p,inptr,outptr,(Uint) diff);
diff --git a/erts/emulator/beam/beam_load.c b/erts/emulator/beam/beam_load.c
index 53df7876d8..e96177cfd9 100644
--- a/erts/emulator/beam/beam_load.c
+++ b/erts/emulator/beam/beam_load.c
@@ -5870,7 +5870,7 @@ erts_make_stub_module(Process* p, Eterm Mod, Eterm Beam, Eterm Info)
Funcs = tp[1];
Patchlist = tp[2];
- if ((n = list_length(Funcs)) < 0) {
+ if ((n = erts_list_length(Funcs)) < 0) {
goto error;
}
if ((bytes = erts_get_aligned_binary_bytes(Beam, &temp_alloc)) == NULL) {
diff --git a/erts/emulator/beam/bif.c b/erts/emulator/beam/bif.c
index 9c4801041f..06a1230ca0 100644
--- a/erts/emulator/beam/bif.c
+++ b/erts/emulator/beam/bif.c
@@ -2675,7 +2675,7 @@ BIF_RETTYPE list_to_atom_1(BIF_ALIST_1)
if (i < 0) {
erts_free(ERTS_ALC_T_TMP, (void *) buf);
- i = list_length(BIF_ARG_1);
+ i = erts_list_length(BIF_ARG_1);
if (i > MAX_ATOM_CHARACTERS) {
BIF_ERROR(BIF_P, SYSTEM_LIMIT);
}
@@ -2953,7 +2953,7 @@ BIF_RETTYPE list_to_integer_2(BIF_ALIST_2)
char *buf = NULL;
int base;
- i = list_length(BIF_ARG_1);
+ i = erts_list_length(BIF_ARG_1);
if (i < 0)
BIF_ERROR(BIF_P, BADARG);
@@ -3292,7 +3292,7 @@ BIF_RETTYPE list_to_float_1(BIF_ALIST_1)
Eterm res;
char *buf = NULL;
- i = list_length(BIF_ARG_1);
+ i = erts_list_length(BIF_ARG_1);
if (i < 0)
BIF_ERROR(BIF_P, BADARG);
@@ -3407,7 +3407,7 @@ BIF_RETTYPE list_to_tuple_1(BIF_ALIST_1)
Eterm* hp;
int len;
- if ((len = list_length(list)) < 0 || len > ERTS_MAX_TUPLE_SIZE) {
+ if ((len = erts_list_length(list)) < 0 || len > ERTS_MAX_TUPLE_SIZE) {
BIF_ERROR(BIF_P, BADARG);
}
@@ -4333,7 +4333,11 @@ BIF_RETTYPE system_flag_2(BIF_ALIST_2)
switch (erts_set_schedulers_online(BIF_P,
ERTS_PROC_LOCK_MAIN,
signed_val(BIF_ARG_2),
- &old_no)) {
+ &old_no
+#ifdef ERTS_DIRTY_SCHEDULERS
+ , 0
+#endif
+ )) {
case ERTS_SCHDLR_SSPND_DONE:
BIF_RET(make_small(old_no));
case ERTS_SCHDLR_SSPND_YIELD_RESTART:
@@ -4465,6 +4469,33 @@ BIF_RETTYPE system_flag_2(BIF_ALIST_2)
ref,
old ? am_true : am_false);
}
+#if defined(ERTS_SMP) && defined(ERTS_DIRTY_SCHEDULERS)
+ } else if (BIF_ARG_1 == am_dirty_cpu_schedulers_online) {
+ Sint old_no;
+ if (!is_small(BIF_ARG_2))
+ goto error;
+ switch (erts_set_schedulers_online(BIF_P,
+ ERTS_PROC_LOCK_MAIN,
+ signed_val(BIF_ARG_2),
+ &old_no,
+ 1)) {
+ case ERTS_SCHDLR_SSPND_DONE:
+ BIF_RET(make_small(old_no));
+ case ERTS_SCHDLR_SSPND_YIELD_RESTART:
+ ERTS_VBUMP_ALL_REDS(BIF_P);
+ BIF_TRAP2(bif_export[BIF_system_flag_2],
+ BIF_P, BIF_ARG_1, BIF_ARG_2);
+ case ERTS_SCHDLR_SSPND_YIELD_DONE:
+ ERTS_BIF_YIELD_RETURN_X(BIF_P, make_small(old_no),
+ am_dirty_cpu_schedulers_online);
+ case ERTS_SCHDLR_SSPND_EINVAL:
+ goto error;
+ default:
+ ASSERT(0);
+ BIF_ERROR(BIF_P, EXC_INTERNAL_ERROR);
+ break;
+ }
+#endif
} else if (ERTS_IS_ATOM_STR("scheduling_statistics", BIF_ARG_1)) {
int what;
if (ERTS_IS_ATOM_STR("disable", BIF_ARG_2))
diff --git a/erts/emulator/beam/code_ix.c b/erts/emulator/beam/code_ix.c
index c66d5a2f05..4344558348 100644
--- a/erts/emulator/beam/code_ix.c
+++ b/erts/emulator/beam/code_ix.c
@@ -58,7 +58,8 @@ void erts_code_ix_init(void)
erts_smp_atomic32_init_nob(&the_staging_code_index, 0);
erts_smp_mtx_init(&code_write_permission_mtx, "code_write_permission");
#ifdef ERTS_ENABLE_LOCK_CHECK
- erts_tsd_key_create(&has_code_write_permission);
+ erts_tsd_key_create(&has_code_write_permission,
+ "erts_has_code_write_permission");
#endif
CIX_TRACE("init");
}
diff --git a/erts/emulator/beam/dist.h b/erts/emulator/beam/dist.h
index ff8f5e106f..0519a9225e 100644
--- a/erts/emulator/beam/dist.h
+++ b/erts/emulator/beam/dist.h
@@ -44,6 +44,7 @@
/* All flags that should be enabled when term_to_binary/1 is used. */
#define TERM_TO_BINARY_DFLAGS (DFLAG_EXTENDED_REFERENCES \
| DFLAG_NEW_FUN_TAGS \
+ | DFLAG_NEW_FLOATS \
| DFLAG_EXTENDED_PIDS_PORTS \
| DFLAG_EXPORT_PTR_TAG \
| DFLAG_BIT_BINARIES)
diff --git a/erts/emulator/beam/erl_alloc.c b/erts/emulator/beam/erl_alloc.c
index 64b3e01ed4..05ac24e04d 100644
--- a/erts/emulator/beam/erl_alloc.c
+++ b/erts/emulator/beam/erl_alloc.c
@@ -585,7 +585,8 @@ erts_alloc_init(int *argc, char **argv, ErtsAllocInitOpts *eaiop)
if (ncpu < 1)
ncpu = 1;
- erts_tsd_key_create(&erts_allctr_prelock_tsd_key);
+ erts_tsd_key_create(&erts_allctr_prelock_tsd_key,
+ "erts_allctr_prelock_tsd_key");
erts_sys_alloc_init();
erts_init_utils_mem();
diff --git a/erts/emulator/beam/erl_alloc.h b/erts/emulator/beam/erl_alloc.h
index f83f6b39cf..942eaa47d0 100644
--- a/erts/emulator/beam/erl_alloc.h
+++ b/erts/emulator/beam/erl_alloc.h
@@ -209,8 +209,8 @@ int erts_is_allctr_wrapper_prelocked(void);
void *erts_alloc_permanent_cache_aligned(ErtsAlcType_t type, Uint size);
#ifndef ERTS_CACHE_LINE_SIZE
-/* Assume a cache line size of 64 bytes */
-# define ERTS_CACHE_LINE_SIZE ((UWord) 64)
+/* Assumed cache line size */
+# define ERTS_CACHE_LINE_SIZE ((UWord) ASSUMED_CACHE_LINE_SIZE)
# define ERTS_CACHE_LINE_MASK (ERTS_CACHE_LINE_SIZE - 1)
#endif
diff --git a/erts/emulator/beam/erl_alloc.types b/erts/emulator/beam/erl_alloc.types
index b4e52770e3..17ac6316b7 100644
--- a/erts/emulator/beam/erl_alloc.types
+++ b/erts/emulator/beam/erl_alloc.types
@@ -414,6 +414,21 @@ type PRT_REP_EXIT STANDARD SYSTEM port_report_exit
+endif
++if ose
+
+type SYS_READ_BUF TEMPORARY SYSTEM sys_read_buf
+type FD_TAB LONG_LIVED SYSTEM fd_tab
+type FD_ENTRY_BUF STANDARD SYSTEM fd_entry_buf
+type FD_SIG_LIST SHORT_LIVED SYSTEM fd_sig_list
+type DRV_EV STANDARD SYSTEM driver_event
+type CS_PROG_PATH LONG_LIVED SYSTEM cs_prog_path
+type ENVIRONMENT TEMPORARY SYSTEM environment
+type PUTENV_STR SYSTEM SYSTEM putenv_string
+type PRT_REP_EXIT STANDARD SYSTEM port_report_exit
+
++endif
+
+
+if win32
type DRV_DATA_BUF SYSTEM SYSTEM drv_data_buf
diff --git a/erts/emulator/beam/erl_alloc_util.c b/erts/emulator/beam/erl_alloc_util.c
index c6cea0185f..45f0cc4312 100644
--- a/erts/emulator/beam/erl_alloc_util.c
+++ b/erts/emulator/beam/erl_alloc_util.c
@@ -5561,11 +5561,11 @@ erts_alcu_start(Allctr_t *allctr, AllctrInit_t *init)
erts_mtx_init_x_opt(&allctr->mutex,
"alcu_allocator",
make_small(allctr->alloc_no),
- ERTS_LCNT_LT_ALLOC);
+ ERTS_LCNT_LT_ALLOC,1);
#else
erts_mtx_init_x(&allctr->mutex,
"alcu_allocator",
- make_small(allctr->alloc_no));
+ make_small(allctr->alloc_no),1);
#endif /*ERTS_ENABLE_LOCK_COUNT*/
#ifdef DEBUG
diff --git a/erts/emulator/beam/erl_async.c b/erts/emulator/beam/erl_async.c
index f0cec1c53c..b3dc327704 100644
--- a/erts/emulator/beam/erl_async.c
+++ b/erts/emulator/beam/erl_async.c
@@ -166,6 +166,7 @@ async_ready_q(Uint sched_id)
#endif
+
void
erts_init_async(void)
{
@@ -226,11 +227,23 @@ erts_init_async(void)
thr_opts.suggested_stack_size
= erts_async_thread_suggested_stack_size;
+#ifdef ETHR_HAVE_THREAD_NAMES
+ thr_opts.name = malloc(sizeof(char)*(strlen("async_XXXX")+1));
+#endif
+
for (i = 0; i < erts_async_max_threads; i++) {
ErtsAsyncQ *aq = async_q(i);
+
+#ifdef ETHR_HAVE_THREAD_NAMES
+ sprintf(thr_opts.name, "async_%d", i+1);
+#endif
+
erts_thr_create(&aq->thr_id, async_main, (void*) aq, &thr_opts);
}
+#ifdef ETHR_HAVE_THREAD_NAMES
+ free(thr_opts.name);
+#endif
/* Wait for async threads to initialize... */
erts_mtx_lock(&async->init.data.mtx);
diff --git a/erts/emulator/beam/erl_bif_lists.c b/erts/emulator/beam/erl_bif_lists.c
index 1805366cfe..820ed2385d 100644
--- a/erts/emulator/beam/erl_bif_lists.c
+++ b/erts/emulator/beam/erl_bif_lists.c
@@ -43,7 +43,7 @@ static BIF_RETTYPE append(Process* p, Eterm A, Eterm B)
Eterm* hp;
int i;
- if ((i = list_length(A)) < 0) {
+ if ((i = erts_list_length(A)) < 0) {
BIF_ERROR(p, BADARG);
}
if (i == 0) {
@@ -102,10 +102,10 @@ static Eterm subtract(Process* p, Eterm A, Eterm B)
int n;
int m;
- if ((n = list_length(A)) < 0) {
+ if ((n = erts_list_length(A)) < 0) {
BIF_ERROR(p, BADARG);
}
- if ((m = list_length(B)) < 0) {
+ if ((m = erts_list_length(B)) < 0) {
BIF_ERROR(p, BADARG);
}
diff --git a/erts/emulator/beam/erl_bif_port.c b/erts/emulator/beam/erl_bif_port.c
index f298422267..77627a6897 100644
--- a/erts/emulator/beam/erl_bif_port.c
+++ b/erts/emulator/beam/erl_bif_port.c
@@ -941,7 +941,8 @@ static char **convert_args(Eterm l)
if (is_not_list(l) && is_not_nil(l)) {
return NULL;
}
- n = list_length(l);
+
+ n = erts_list_length(l);
/* We require at least one element in argv[0] + NULL at end */
pp = erts_alloc(ERTS_ALC_T_TMP, (n + 2) * sizeof(char **));
pp[i++] = erts_default_arg0;
@@ -986,7 +987,7 @@ static byte* convert_environment(Process* p, Eterm env)
byte* bytes;
int encoding = erts_get_native_filename_encoding();
- if ((n = list_length(env)) < 0) {
+ if ((n = erts_list_length(env)) < 0) {
return NULL;
}
heap_size = 2*(5*n+1);
diff --git a/erts/emulator/beam/erl_cpu_topology.c b/erts/emulator/beam/erl_cpu_topology.c
index 88c6c34881..f594cb9392 100644
--- a/erts/emulator/beam/erl_cpu_topology.c
+++ b/erts/emulator/beam/erl_cpu_topology.c
@@ -1699,7 +1699,7 @@ erts_early_init_cpu_topology(int no_schedulers,
}
max_main_threads = erts_get_cpu_configured(cpuinfo);
- if (max_main_threads > no_schedulers)
+ if (max_main_threads > no_schedulers || max_main_threads < 0)
max_main_threads = no_schedulers;
*max_main_threads_p = max_main_threads;
diff --git a/erts/emulator/beam/erl_db_hash.h b/erts/emulator/beam/erl_db_hash.h
index d17bd9f693..908cec11d4 100644
--- a/erts/emulator/beam/erl_db_hash.h
+++ b/erts/emulator/beam/erl_db_hash.h
@@ -33,7 +33,12 @@ typedef struct hash_db_term {
DbTerm dbterm; /* The actual term */
} HashDbTerm;
+#ifdef ERTS_DB_HASH_LOCK_CNT
+#define DB_HASH_LOCK_CNT ERTS_DB_HASH_LOCK_CNT
+#else
#define DB_HASH_LOCK_CNT 64
+#endif
+
typedef struct db_table_hash_fine_locks {
union {
erts_smp_rwmtx_t lck;
diff --git a/erts/emulator/beam/erl_db_util.c b/erts/emulator/beam/erl_db_util.c
index 3986ccd4d3..3927615e04 100644
--- a/erts/emulator/beam/erl_db_util.c
+++ b/erts/emulator/beam/erl_db_util.c
@@ -483,7 +483,8 @@ void
match_pseudo_process_init(void)
{
#ifdef ERTS_SMP
- erts_smp_tsd_key_create(&match_pseudo_process_key);
+ erts_smp_tsd_key_create(&match_pseudo_process_key,
+ "erts_match_pseudo_process_key");
erts_smp_install_exit_handler(destroy_match_pseudo_process);
#else
match_pseudo_process = create_match_pseudo_process();
diff --git a/erts/emulator/beam/erl_driver.h b/erts/emulator/beam/erl_driver.h
index ab9ee63104..5517c26ba4 100644
--- a/erts/emulator/beam/erl_driver.h
+++ b/erts/emulator/beam/erl_driver.h
@@ -271,7 +271,6 @@ typedef struct ErlDrvCond_ ErlDrvCond;
typedef struct ErlDrvRWLock_ ErlDrvRWLock;
typedef int ErlDrvTSDKey;
-
/*
*
*/
@@ -680,6 +679,16 @@ EXTERN char *driver_dl_error(void);
EXTERN int erl_drv_putenv(char *key, char *value);
EXTERN int erl_drv_getenv(char *key, char *value, size_t *value_size);
+#ifdef __OSE__
+typedef ErlDrvUInt ErlDrvOseEventId;
+EXTERN union SIGNAL *erl_drv_ose_get_signal(ErlDrvEvent ev);
+EXTERN ErlDrvEvent erl_drv_ose_event_alloc(SIGSELECT sig, ErlDrvOseEventId handle,
+ ErlDrvOseEventId (*resolve_signal)(union SIGNAL *sig), void *extra);
+EXTERN void erl_drv_ose_event_free(ErlDrvEvent ev);
+EXTERN void erl_drv_ose_event_fetch(ErlDrvEvent ev, SIGSELECT *sig,
+ ErlDrvOseEventId *handle, void **extra);
+#endif
+
#endif /* !ERL_DRIVER_TYPES_ONLY */
#ifdef WIN32_DYNAMIC_ERL_DRIVER
diff --git a/erts/emulator/beam/erl_drv_nif.h b/erts/emulator/beam/erl_drv_nif.h
index ea013a49a3..3f829ea7ea 100644
--- a/erts/emulator/beam/erl_drv_nif.h
+++ b/erts/emulator/beam/erl_drv_nif.h
@@ -41,6 +41,13 @@ typedef struct {
int suggested_stack_size;
} ErlDrvThreadOpts;
+#if defined(ERL_DRV_DIRTY_SCHEDULER_SUPPORT) || defined(ERL_NIF_DIRTY_SCHEDULER_SUPPORT)
+typedef enum {
+ ERL_DRV_DIRTY_JOB_CPU_BOUND = 1,
+ ERL_DRV_DIRTY_JOB_IO_BOUND = 2
+} ErlDrvDirtyJobFlags;
+#endif
+
#endif /* __ERL_DRV_NIF_H__ */
diff --git a/erts/emulator/beam/erl_drv_thread.c b/erts/emulator/beam/erl_drv_thread.c
index 4f1bba8657..147249f751 100644
--- a/erts/emulator/beam/erl_drv_thread.c
+++ b/erts/emulator/beam/erl_drv_thread.c
@@ -78,8 +78,6 @@ struct ErlDrvTid_ {
static ethr_tsd_key tid_key;
-static ethr_thr_opts def_ethr_opts = ETHR_THR_OPTS_DEFAULT_INITER;
-
#else /* USE_THREADS */
static Uint tsd_len;
static void **tsd;
@@ -123,7 +121,7 @@ void erl_drv_thr_init(void)
{
int i;
#ifdef USE_THREADS
- int res = ethr_tsd_key_create(&tid_key);
+ int res = ethr_tsd_key_create(&tid_key,"erts_tid_key");
if (res == 0)
res = ethr_install_exit_handler(thread_exit_handler);
if (res != 0)
@@ -605,6 +603,7 @@ erl_drv_thread_create(char *name,
struct ErlDrvTid_ *dtid;
ethr_thr_opts ethr_opts;
ethr_thr_opts *use_opts;
+ ethr_thr_opts def_ethr_opts = ETHR_THR_OPTS_DEFAULT_INITER;
if (!opts)
use_opts = NULL;
diff --git a/erts/emulator/beam/erl_init.c b/erts/emulator/beam/erl_init.c
index c17256f466..d54658f1ea 100644
--- a/erts/emulator/beam/erl_init.c
+++ b/erts/emulator/beam/erl_init.c
@@ -178,6 +178,11 @@ int erts_compat_rel;
static int no_schedulers;
static int no_schedulers_online;
+#ifdef ERTS_DIRTY_SCHEDULERS
+static int no_dirty_cpu_schedulers;
+static int no_dirty_cpu_schedulers_online;
+static int no_dirty_io_schedulers;
+#endif
#ifdef DEBUG
Uint32 verbose; /* See erl_debug.h for information about verbose */
@@ -304,7 +309,13 @@ erl_init(int ncpu,
erts_init_sys_common_misc();
erts_init_process(ncpu, proc_tab_sz, legacy_proc_tab);
erts_init_scheduling(no_schedulers,
- no_schedulers_online);
+ no_schedulers_online
+#ifdef ERTS_DIRTY_SCHEDULERS
+ , no_dirty_cpu_schedulers,
+ no_dirty_cpu_schedulers_online,
+ no_dirty_io_schedulers
+#endif
+ );
erts_init_cpu_topology(); /* Must be after init_scheduling */
erts_init_gc(); /* Must be after init_scheduling */
erts_alloc_late_init();
@@ -709,7 +720,7 @@ early_init(int *argc, char **argv) /*
#endif
#ifdef ERTS_SMP
erts_smp_atomic32_init_nob(&erts_writing_erl_crash_dump, 0L);
- erts_tsd_key_create(&erts_is_crash_dumping_key);
+ erts_tsd_key_create(&erts_is_crash_dumping_key,"erts_is_crash_dumping_key");
#else
erts_writing_erl_crash_dump = 0;
#endif
@@ -780,7 +791,7 @@ early_init(int *argc, char **argv) /*
case 'A': {
/* set number of threads in thread pool */
char *arg = get_arg(argv[i]+2, argv[i+1], &i);
- if (((erts_async_max_threads = atoi(arg)) < 0) ||
+ if (((erts_async_max_threads = atoi(arg)) < ERTS_MIN_NO_OF_ASYNC_THREADS) ||
(erts_async_max_threads > ERTS_MAX_NO_OF_ASYNC_THREADS)) {
erts_fprintf(stderr,
"bad number of async threads %s\n",
@@ -835,7 +846,7 @@ early_init(int *argc, char **argv) /*
else if (argv[i][2] == 'D') {
char *arg;
char *type = argv[i]+3;
- if (strcmp(type, "Pcpu") == 0) {
+ if (strncmp(type, "Pcpu", 4) == 0) {
int ptot, ponln;
arg = get_arg(argv[i]+7, argv[i+1], &i);
switch (sscanf(arg, "%d:%d", &ptot, &ponln)) {
@@ -872,7 +883,7 @@ early_init(int *argc, char **argv) /*
VERBOSE(DEBUG_SYSTEM,
("using %d:%d dirty CPU scheduler percentages\n",
dirty_cpu_scheds_pctg, dirty_cpu_scheds_onln_pctg));
- } else if (strcmp(type, "cpu") == 0) {
+ } else if (strncmp(type, "cpu", 3) == 0) {
int tot, onln;
arg = get_arg(argv[i]+6, argv[i+1], &i);
switch (sscanf(arg, "%d:%d", &tot, &onln)) {
@@ -923,7 +934,7 @@ early_init(int *argc, char **argv) /*
}
VERBOSE(DEBUG_SYSTEM,
("using %d:%d dirty CPU scheduler(s)\n", tot, onln));
- } else if (strcmp(type, "io") == 0) {
+ } else if (strncmp(type, "io", 2) == 0) {
arg = get_arg(argv[i]+5, argv[i+1], &i);
dirty_io_scheds = atoi(arg);
if (dirty_io_scheds < 0 ||
@@ -1055,9 +1066,9 @@ early_init(int *argc, char **argv) /*
erts_no_schedulers = (Uint) no_schedulers;
#endif
#ifdef ERTS_DIRTY_SCHEDULERS
- erts_no_dirty_cpu_schedulers = dirty_cpu_scheds;
- erts_no_dirty_cpu_schedulers_online = dirty_cpu_scheds_online;
- erts_no_dirty_io_schedulers = dirty_io_scheds;
+ erts_no_dirty_cpu_schedulers = no_dirty_cpu_schedulers = dirty_cpu_scheds;
+ no_dirty_cpu_schedulers_online = dirty_cpu_scheds_online;
+ erts_no_dirty_io_schedulers = no_dirty_io_schedulers = dirty_io_scheds;
#endif
erts_early_init_scheduling(no_schedulers);
diff --git a/erts/emulator/beam/erl_lock_check.c b/erts/emulator/beam/erl_lock_check.c
index a8ff94ac89..7e3a90779d 100644
--- a/erts/emulator/beam/erl_lock_check.c
+++ b/erts/emulator/beam/erl_lock_check.c
@@ -241,6 +241,8 @@ struct erts_lc_locked_lock_t_ {
erts_lc_locked_lock_t *prev;
UWord extra;
Sint16 id;
+ char *file;
+ unsigned int line;
Uint16 flags;
};
@@ -430,47 +432,51 @@ make_my_locked_locks(void)
}
static ERTS_INLINE erts_lc_locked_lock_t *
-new_locked_lock(erts_lc_lock_t *lck, Uint16 op_flags)
+new_locked_lock(erts_lc_lock_t *lck, Uint16 op_flags,
+ char *file, unsigned int line)
{
erts_lc_locked_lock_t *l_lck = (erts_lc_locked_lock_t *) lc_alloc();
l_lck->next = NULL;
l_lck->prev = NULL;
l_lck->id = lck->id;
l_lck->extra = lck->extra;
+ l_lck->file = file;
+ l_lck->line = line;
l_lck->flags = lck->flags | op_flags;
return l_lck;
}
static void
-print_lock2(char *prefix, Sint16 id, Wterm extra, Uint16 flags, char *suffix)
+raw_print_lock(char *prefix, Sint16 id, Wterm extra, Uint16 flags,
+ char* file, unsigned int line, char *suffix)
{
char *lname = (0 <= id && id < ERTS_LOCK_ORDER_SIZE
? erts_lock_order[id].name
: "unknown");
+ erts_fprintf(stderr,"%s'%s:",prefix,lname);
+
if (is_not_immed(extra))
- erts_fprintf(stderr,
- "%s'%s:%p%s'%s%s",
- prefix,
- lname,
- _unchecked_boxed_val(extra),
- lock_type(flags),
- rw_op_str(flags),
- suffix);
+ erts_fprintf(stderr,"%p",_unchecked_boxed_val(extra));
else
- erts_fprintf(stderr,
- "%s'%s:%T%s'%s%s",
- prefix,
- lname,
- extra,
- lock_type(flags),
- rw_op_str(flags),
- suffix);
+ erts_fprintf(stderr,"%T",extra);
+ erts_fprintf(stderr,"%s",lock_type(flags));
+
+ if (file)
+ erts_fprintf(stderr,"(%s:%d)",file,line);
+
+ erts_fprintf(stderr,"'%s%s",rw_op_str(flags),suffix);
+}
+
+static void
+print_lock2(char *prefix, Sint16 id, Wterm extra, Uint16 flags, char *suffix)
+{
+ raw_print_lock(prefix, id, extra, flags, NULL, 0, suffix);
}
static void
print_lock(char *prefix, erts_lc_lock_t *lck, char *suffix)
{
- print_lock2(prefix, lck->id, lck->extra, lck->flags, suffix);
+ raw_print_lock(prefix, lck->id, lck->extra, lck->flags, NULL, 0, suffix);
}
static void
@@ -486,7 +492,8 @@ print_curr_locks(erts_lc_locked_locks_t *l_lcks)
"Currently these locks are locked by the %s thread:\n",
l_lcks->thread_name);
for (l_lck = l_lcks->locked.first; l_lck; l_lck = l_lck->next)
- print_lock2(" ", l_lck->id, l_lck->extra, l_lck->flags, "\n");
+ raw_print_lock(" ", l_lck->id, l_lck->extra, l_lck->flags,
+ l_lck->file, l_lck->line, "\n");
}
}
@@ -1002,7 +1009,8 @@ erts_lc_trylock_force_busy_flg(erts_lc_lock_t *lck, Uint16 op_flags)
#endif
}
-void erts_lc_trylock_flg(int locked, erts_lc_lock_t *lck, Uint16 op_flags)
+void erts_lc_trylock_flg_x(int locked, erts_lc_lock_t *lck, Uint16 op_flags,
+ char *file, unsigned int line)
{
erts_lc_locked_locks_t *l_lcks;
erts_lc_locked_lock_t *l_lck;
@@ -1014,7 +1022,7 @@ void erts_lc_trylock_flg(int locked, erts_lc_lock_t *lck, Uint16 op_flags)
return;
l_lcks = make_my_locked_locks();
- l_lck = locked ? new_locked_lock(lck, op_flags) : NULL;
+ l_lck = locked ? new_locked_lock(lck, op_flags, file, line) : NULL;
if (!l_lcks->locked.last) {
ASSERT(!l_lcks->locked.first);
@@ -1055,13 +1063,14 @@ void erts_lc_trylock_flg(int locked, erts_lc_lock_t *lck, Uint16 op_flags)
}
-void erts_lc_require_lock_flg(erts_lc_lock_t *lck, Uint16 op_flags)
+void erts_lc_require_lock_flg(erts_lc_lock_t *lck, Uint16 op_flags,
+ char *file, unsigned int line)
{
erts_lc_locked_locks_t *l_lcks = make_my_locked_locks();
erts_lc_locked_lock_t *l_lck = l_lcks->locked.first;
if (!find_lock(&l_lck, lck))
required_not_locked(l_lcks, lck);
- l_lck = new_locked_lock(lck, op_flags);
+ l_lck = new_locked_lock(lck, op_flags, file, line);
if (!l_lcks->required.last) {
ASSERT(!l_lcks->required.first);
l_lck->next = l_lck->prev = NULL;
@@ -1129,7 +1138,8 @@ void erts_lc_unrequire_lock_flg(erts_lc_lock_t *lck, Uint16 op_flags)
lc_free((void *) l_lck);
}
-void erts_lc_lock_flg(erts_lc_lock_t *lck, Uint16 op_flags)
+void erts_lc_lock_flg_x(erts_lc_lock_t *lck, Uint16 op_flags,
+ char *file, unsigned int line)
{
erts_lc_locked_locks_t *l_lcks;
erts_lc_locked_lock_t *l_lck;
@@ -1141,7 +1151,7 @@ void erts_lc_lock_flg(erts_lc_lock_t *lck, Uint16 op_flags)
return;
l_lcks = make_my_locked_locks();
- l_lck = new_locked_lock(lck, op_flags);
+ l_lck = new_locked_lock(lck, op_flags, file, line);
if (!l_lcks->locked.last) {
ASSERT(!l_lcks->locked.first);
@@ -1232,15 +1242,15 @@ erts_lc_trylock_force_busy(erts_lc_lock_t *lck)
}
void
-erts_lc_trylock(int locked, erts_lc_lock_t *lck)
+erts_lc_trylock_x(int locked, erts_lc_lock_t *lck, char *file, unsigned int line)
{
- erts_lc_trylock_flg(locked, lck, 0);
+ erts_lc_trylock_flg_x(locked, lck, 0, file, line);
}
void
-erts_lc_lock(erts_lc_lock_t *lck)
+erts_lc_lock_x(erts_lc_lock_t *lck, char *file, unsigned int line)
{
- erts_lc_lock_flg(lck, 0);
+ erts_lc_lock_flg_x(lck, 0, file, line);
}
void
@@ -1254,9 +1264,9 @@ void erts_lc_might_unlock(erts_lc_lock_t *lck)
erts_lc_might_unlock_flg(lck, 0);
}
-void erts_lc_require_lock(erts_lc_lock_t *lck)
+void erts_lc_require_lock(erts_lc_lock_t *lck, char *file, unsigned int line)
{
- erts_lc_require_lock_flg(lck, 0);
+ erts_lc_require_lock_flg(lck, 0, file, line);
}
void erts_lc_unrequire_lock(erts_lc_lock_t *lck)
@@ -1322,7 +1332,7 @@ erts_lc_init(void)
if (ethr_spinlock_init(&free_blocks_lock) != 0)
lc_abort();
- erts_tsd_key_create(&locks_key);
+ erts_tsd_key_create(&locks_key,"erts_lock_check_key");
}
void
diff --git a/erts/emulator/beam/erl_lock_check.h b/erts/emulator/beam/erl_lock_check.h
index 068340abe7..3f7f417e61 100644
--- a/erts/emulator/beam/erl_lock_check.h
+++ b/erts/emulator/beam/erl_lock_check.h
@@ -35,6 +35,11 @@
#ifdef ERTS_ENABLE_LOCK_CHECK
+#ifndef ERTS_ENABLE_LOCK_POSITION
+/* Enable in order for _x variants of mtx functions to be used. */
+#define ERTS_ENABLE_LOCK_POSITION 1
+#endif
+
typedef struct {
int inited;
Sint16 id;
@@ -79,13 +84,16 @@ void erts_lc_have_locks(int *resv, erts_lc_lock_t *lcks, int len);
void erts_lc_have_lock_ids(int *resv, int *ids, int len);
void erts_lc_check_no_locked_of_type(Uint16 flags);
int erts_lc_trylock_force_busy_flg(erts_lc_lock_t *lck, Uint16 op_flags);
-void erts_lc_trylock_flg(int locked, erts_lc_lock_t *lck, Uint16 op_flags);
-void erts_lc_lock_flg(erts_lc_lock_t *lck, Uint16 op_flags);
+void erts_lc_trylock_flg_x(int locked, erts_lc_lock_t *lck, Uint16 op_flags,
+ char *file, unsigned int line);
+void erts_lc_lock_flg_x(erts_lc_lock_t *lck, Uint16 op_flags,
+ char *file, unsigned int line);
void erts_lc_unlock_flg(erts_lc_lock_t *lck, Uint16 op_flags);
void erts_lc_might_unlock_flg(erts_lc_lock_t *lck, Uint16 op_flags);
int erts_lc_trylock_force_busy(erts_lc_lock_t *lck);
-void erts_lc_trylock(int locked, erts_lc_lock_t *lck);
-void erts_lc_lock(erts_lc_lock_t *lck);
+void erts_lc_trylock_x(int locked, erts_lc_lock_t *lck,
+ char* file, unsigned int line);
+void erts_lc_lock_x(erts_lc_lock_t *lck, char* file, unsigned int line);
void erts_lc_unlock(erts_lc_lock_t *lck);
void erts_lc_might_unlock(erts_lc_lock_t *lck);
void erts_lc_init_lock(erts_lc_lock_t *lck, char *name, Uint16 flags);
@@ -96,10 +104,11 @@ int erts_lc_assert_failed(char *file, int line, char *assertion);
void erts_lc_set_thread_name(char *thread_name);
void erts_lc_pll(void);
-void erts_lc_require_lock_flg(erts_lc_lock_t *lck, Uint16 op_flags);
+void erts_lc_require_lock_flg(erts_lc_lock_t *lck, Uint16 op_flags,
+ char *file, unsigned int line);
void erts_lc_unrequire_lock_flg(erts_lc_lock_t *lck, Uint16 op_flags);
-void erts_lc_require_lock(erts_lc_lock_t *lck);
+void erts_lc_require_lock(erts_lc_lock_t *lck, char *file, unsigned int line);
void erts_lc_unrequire_lock(erts_lc_lock_t *lck);
int erts_lc_is_emu_thr(void);
@@ -116,4 +125,9 @@ int erts_lc_is_emu_thr(void);
#define ERTS_LC_ASSERT(A) ((void) 1)
#endif /* #ifdef ERTS_ENABLE_LOCK_CHECK */
+#define erts_lc_lock(lck) erts_lc_lock_x(lck,__FILE__,__LINE__)
+#define erts_lc_trylock(res,lck) erts_lc_trylock_x(res,lck,__FILE__,__LINE__)
+#define erts_lc_lock_flg(lck) erts_lc_lock_flg_x(lck,__FILE__,__LINE__)
+#define erts_lc_trylock_flg(res,lck) erts_lc_trylock_flg_x(res,lck,__FILE__,__LINE__)
+
#endif /* #ifndef ERTS_LOCK_CHECK_H__ */
diff --git a/erts/emulator/beam/erl_lock_count.c b/erts/emulator/beam/erl_lock_count.c
index 5f75b0ac0b..6f44bf097b 100644
--- a/erts/emulator/beam/erl_lock_count.c
+++ b/erts/emulator/beam/erl_lock_count.c
@@ -236,7 +236,7 @@ void erts_lcnt_init() {
/* init tsd */
lcnt_n_thr = 0;
- ethr_tsd_key_create(&lcnt_thr_data_key);
+ ethr_tsd_key_create(&lcnt_thr_data_key,"lcnt_data");
lcnt_lock();
diff --git a/erts/emulator/beam/erl_lock_count.h b/erts/emulator/beam/erl_lock_count.h
index a4fc91b510..75f7cd028b 100644
--- a/erts/emulator/beam/erl_lock_count.h
+++ b/erts/emulator/beam/erl_lock_count.h
@@ -61,8 +61,14 @@
#define ERTS_LOCK_COUNT_H__
#ifdef ERTS_ENABLE_LOCK_COUNT
+#ifndef ERTS_ENABLE_LOCK_POSITION
+/* Enable in order for _x variants of mtx functions to be used. */
+#define ERTS_ENABLE_LOCK_POSITION 1
+#endif
+
#include "ethread.h"
+
#define ERTS_LCNT_MAX_LOCK_LOCATIONS (10)
#define ERTS_LCNT_LT_SPINLOCK (((Uint16) 1) << 0)
diff --git a/erts/emulator/beam/erl_nif.c b/erts/emulator/beam/erl_nif.c
index 4126df6f34..40860e141c 100644
--- a/erts/emulator/beam/erl_nif.c
+++ b/erts/emulator/beam/erl_nif.c
@@ -876,7 +876,7 @@ int enif_get_list_cell(ErlNifEnv* env, Eterm term, Eterm* head, Eterm* tail)
int enif_get_list_length(ErlNifEnv* env, Eterm term, unsigned* len)
{
if (is_not_list(term) && is_not_nil(term)) return 0;
- *len = list_length(term);
+ *len = erts_list_length(term);
return 1;
}
@@ -1570,6 +1570,13 @@ enif_schedule_dirty_nif(ErlNifEnv* env, int flags,
a = erts_smp_atomic32_read_acqb(&proc->state);
while (1) {
n = state = a;
+ /*
+ * clear any current dirty flags and dirty queue indicators,
+ * in case the application is shifting a job from one type
+ * of dirty scheduler to the other
+ */
+ n &= ~(ERTS_PSFLG_DIRTY_CPU_PROC|ERTS_PSFLG_DIRTY_IO_PROC
+ |ERTS_PSFLG_DIRTY_CPU_PROC_IN_Q|ERTS_PSFLG_DIRTY_IO_PROC_IN_Q);
if (chkflgs == ERL_NIF_DIRTY_JOB_CPU_BOUND)
n |= ERTS_PSFLG_DIRTY_CPU_PROC;
else
@@ -1600,22 +1607,15 @@ enif_schedule_dirty_nif_finalizer(ErlNifEnv* env, ERL_NIF_TERM result,
ERL_NIF_TERM (*fp)(ErlNifEnv*, ERL_NIF_TERM))
{
#ifdef USE_THREADS
- erts_aint32_t state, n, a;
Process* proc = env->proc;
Eterm* reg = ERTS_PROC_GET_SCHDATA(proc)->x_reg_array;
Export* ep;
- a = erts_smp_atomic32_read_acqb(&proc->state);
- while (1) {
- n = state = a;
- if (!(n & (ERTS_PSFLG_DIRTY_CPU_PROC_IN_Q|ERTS_PSFLG_DIRTY_IO_PROC_IN_Q)))
- break;
- n &= ~(ERTS_PSFLG_DIRTY_CPU_PROC|ERTS_PSFLG_DIRTY_IO_PROC
- |ERTS_PSFLG_DIRTY_CPU_PROC_IN_Q|ERTS_PSFLG_DIRTY_IO_PROC_IN_Q);
- a = erts_smp_atomic32_cmpxchg_mb(&proc->state, n, state);
- if (a == state)
- break;
- }
+ erts_smp_atomic32_read_band_mb(&proc->state,
+ ~(ERTS_PSFLG_DIRTY_CPU_PROC
+ |ERTS_PSFLG_DIRTY_IO_PROC
+ |ERTS_PSFLG_DIRTY_CPU_PROC_IN_Q
+ |ERTS_PSFLG_DIRTY_IO_PROC_IN_Q));
if (!(ep = ERTS_PROC_GET_DIRTY_SCHED_TRAP_EXPORT(proc)))
alloc_proc_psd(proc, &ep);
ERTS_VBUMP_ALL_REDS(proc);
diff --git a/erts/emulator/beam/erl_nif.h b/erts/emulator/beam/erl_nif.h
index 7613446f64..c12ba4d554 100644
--- a/erts/emulator/beam/erl_nif.h
+++ b/erts/emulator/beam/erl_nif.h
@@ -172,8 +172,8 @@ typedef ErlDrvThreadOpts ErlNifThreadOpts;
#ifdef ERL_NIF_DIRTY_SCHEDULER_SUPPORT
typedef enum
{
- ERL_NIF_DIRTY_JOB_CPU_BOUND = 1,
- ERL_NIF_DIRTY_JOB_IO_BOUND = 2
+ ERL_NIF_DIRTY_JOB_CPU_BOUND = ERL_DRV_DIRTY_JOB_CPU_BOUND,
+ ERL_NIF_DIRTY_JOB_IO_BOUND = ERL_DRV_DIRTY_JOB_IO_BOUND
}ErlNifDirtyTaskFlags;
#endif
diff --git a/erts/emulator/beam/erl_port_task.c b/erts/emulator/beam/erl_port_task.c
index d4108067d0..fb6048b41f 100644
--- a/erts/emulator/beam/erl_port_task.c
+++ b/erts/emulator/beam/erl_port_task.c
@@ -1681,7 +1681,8 @@ erts_port_task_execute(ErtsRunQueue *runq, Port **curr_port_pp)
reds = ERTS_PORT_REDS_INPUT;
ASSERT((state & ERTS_PORT_SFLGS_DEAD) == 0);
DTRACE_DRIVER(driver_ready_input, pp);
- /* NOTE some windows drivers use ->ready_input for input and output */
+ /* NOTE some windows/ose drivers use ->ready_input
+ for input and output */
(*pp->drv_ptr->ready_input)((ErlDrvData) pp->drv_data,
ptp->u.alive.td.io.event);
io_tasks_executed++;
diff --git a/erts/emulator/beam/erl_port_task.h b/erts/emulator/beam/erl_port_task.h
index 123253a057..1d30465ec9 100644
--- a/erts/emulator/beam/erl_port_task.h
+++ b/erts/emulator/beam/erl_port_task.h
@@ -185,11 +185,13 @@ erts_port_task_init_sched(ErtsPortTaskSched *ptsp, Eterm instr_id)
ptsp->taskq.in.last = NULL;
erts_smp_atomic32_init_nob(&ptsp->flags, 0);
#ifdef ERTS_SMP
+ erts_mtx_init_x(&ptsp->mtx, lock_str, instr_id,
#ifdef ERTS_ENABLE_LOCK_COUNT
- if (!(erts_lcnt_rt_options & ERTS_LCNT_OPT_PORTLOCK))
- lock_str = NULL;
+ (erts_lcnt_rt_options & ERTS_LCNT_OPT_PORTLOCK)
+#else
+ 1
#endif
- erts_mtx_init_x(&ptsp->mtx, lock_str, instr_id);
+ );
#endif
}
diff --git a/erts/emulator/beam/erl_process.c b/erts/emulator/beam/erl_process.c
index 937881212a..37e1d07107 100644
--- a/erts/emulator/beam/erl_process.c
+++ b/erts/emulator/beam/erl_process.c
@@ -44,6 +44,7 @@
#include "dtrace-wrapper.h"
#include "erl_ptab.h"
+
#define ERTS_DELAYED_WAKEUP_INFINITY (~(Uint64) 0)
#define ERTS_DELAYED_WAKEUP_REDUCTIONS ((Uint64) CONTEXT_REDS/2)
@@ -53,7 +54,11 @@
#define ERTS_PROC_MIN_CONTEXT_SWITCH_REDS_COST (CONTEXT_REDS/10)
+#ifndef ERTS_SCHED_MIN_SPIN
#define ERTS_SCHED_SPIN_UNTIL_YIELD 100
+#else
+#define ERTS_SCHED_SPIN_UNTIL_YIELD 1
+#endif
#define ERTS_SCHED_SYS_SLEEP_SPINCOUNT_VERY_LONG 40
#define ERTS_SCHED_AUX_WORK_SLEEP_SPINCOUNT_FACT_VERY_LONG 1000
@@ -148,7 +153,6 @@ int erts_sched_balance_util = 0;
Uint erts_no_schedulers;
#ifdef ERTS_DIRTY_SCHEDULERS
Uint erts_no_dirty_cpu_schedulers;
-Uint erts_no_dirty_cpu_schedulers_online;
Uint erts_no_dirty_io_schedulers;
#endif
@@ -188,6 +192,13 @@ static ErtsAuxWorkData *aux_thread_aux_work_data;
#define ERTS_SCHDLR_SSPND_CHNG_SET(VAL, OLD_VAL) \
erts_smp_atomic32_set_nob(&schdlr_sspnd.changing, (VAL))
+#ifdef ERTS_DIRTY_SCHEDULERS
+#define ERTS_SCHDLR_SSPND_DIRTY_CPU_CHNG_SET(VAL, OLD_VAL) \
+ erts_smp_atomic32_set_nob(&schdlr_sspnd.dirty_cpu_changing, (VAL))
+#define ERTS_SCHDLR_SSPND_DIRTY_IO_CHNG_SET(VAL, OLD_VAL) \
+ erts_smp_atomic32_set_nob(&schdlr_sspnd.dirty_io_changing, (VAL))
+#endif
+
#else
#define ERTS_SCHDLR_SSPND_CHNG_SET(VAL, OLD_VAL) \
@@ -198,6 +209,23 @@ do { \
ASSERT(old_val__ == (OLD_VAL)); \
} while (0)
+#ifdef ERTS_DIRTY_SCHEDULERS
+#define ERTS_SCHDLR_SSPND_DIRTY_CPU_CHNG_SET(VAL, OLD_VAL) \
+do { \
+ erts_aint32_t old_val__; \
+ old_val__ = erts_smp_atomic32_xchg_nob(&schdlr_sspnd.dirty_cpu_changing, \
+ (VAL)); \
+ ASSERT(old_val__ == (OLD_VAL)); \
+} while (0)
+#define ERTS_SCHDLR_SSPND_DIRTY_IO_CHNG_SET(VAL, OLD_VAL) \
+do { \
+ erts_aint32_t old_val__; \
+ old_val__ = erts_smp_atomic32_xchg_nob(&schdlr_sspnd.dirty_io_changing, \
+ (VAL)); \
+ ASSERT(old_val__ == (OLD_VAL)); \
+} while (0)
+#endif
+
#endif
@@ -207,11 +235,29 @@ static struct {
int online;
int curr_online;
int wait_curr_online;
+#ifdef ERTS_DIRTY_SCHEDULERS
+ int dirty_cpu_online;
+ int dirty_cpu_curr_online;
+ int dirty_cpu_wait_curr_online;
+ int dirty_io_online;
+ int dirty_io_curr_online;
+ int dirty_io_wait_curr_online;
+#endif
erts_smp_atomic32_t changing;
erts_smp_atomic32_t active;
+#ifdef ERTS_DIRTY_SCHEDULERS
+ erts_smp_atomic32_t dirty_cpu_changing;
+ erts_smp_atomic32_t dirty_cpu_active;
+ erts_smp_atomic32_t dirty_io_changing;
+ erts_smp_atomic32_t dirty_io_active;
+#endif
struct {
int ongoing;
long wait_active;
+#ifdef ERTS_DIRTY_SCHEDULERS
+ long dirty_cpu_wait_active;
+ long dirty_io_wait_active;
+#endif
ErtsProcList *procs;
} msb; /* Multi Scheduling Block */
} schdlr_sspnd;
@@ -467,7 +513,7 @@ dbg_chk_aux_work_val(erts_aint32_t value)
#ifdef ERTS_SMP
static void handle_pending_exiters(ErtsProcList *);
-
+static void wake_scheduler(ErtsRunQueue *rq);
#endif
#if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_CHECK)
@@ -507,7 +553,7 @@ void
erts_pre_init_process(void)
{
#ifdef USE_THREADS
- erts_tsd_key_create(&sched_data_key);
+ erts_tsd_key_create(&sched_data_key, "erts_sched_data_key");
#endif
#ifdef ERTS_ENABLE_LOCK_CHECK
@@ -1306,6 +1352,9 @@ static ERTS_INLINE void
haw_thr_prgr_current_check_progress(ErtsAuxWorkData *awdp)
{
ErtsThrPrgrVal current = awdp->current_thr_prgr;
+#ifdef ERTS_DIRTY_SCHEDULERS
+ ASSERT(!awdp->esdp || !ERTS_SCHEDULER_IS_DIRTY(awdp->esdp));
+#endif
if (current != ERTS_THR_PRGR_INVALID
&& !erts_thr_progress_equal(current, erts_thr_progress_current())) {
/*
@@ -1322,6 +1371,10 @@ handle_delayed_aux_work_wakeup(ErtsAuxWorkData *awdp, erts_aint32_t aux_work, in
{
int jix, max_jix;
+#ifdef ERTS_DIRTY_SCHEDULERS
+ ASSERT(!awdp->esdp || !ERTS_SCHEDULER_IS_DIRTY(awdp->esdp));
+#endif
+
ASSERT(awdp->delayed_wakeup.next != ERTS_DELAYED_WAKEUP_INFINITY);
if (!waiting && awdp->delayed_wakeup.next > awdp->esdp->reductions)
@@ -1477,6 +1530,9 @@ handle_misc_aux_work_thr_prgr(ErtsAuxWorkData *awdp,
erts_aint32_t aux_work,
int waiting)
{
+#ifdef ERTS_DIRTY_SCHEDULERS
+ ASSERT(!awdp->esdp || !ERTS_SCHEDULER_IS_DIRTY(awdp->esdp));
+#endif
if (!erts_thr_progress_has_reached_this(haw_thr_prgr_current(awdp),
awdp->misc.thr_prgr))
return aux_work & ~ERTS_SSI_AUX_WORK_MISC_THR_PRGR;
@@ -1561,6 +1617,9 @@ handle_async_ready(ErtsAuxWorkData *awdp,
int waiting)
{
ErtsSchedulerSleepInfo *ssi = awdp->ssi;
+#ifdef ERTS_DIRTY_SCHEDULERS
+ ASSERT(!awdp->esdp || !ERTS_SCHEDULER_IS_DIRTY(awdp->esdp));
+#endif
unset_aux_work_flags(ssi, ERTS_SSI_AUX_WORK_ASYNC_READY);
if (erts_check_async_ready(awdp->async_ready.queue)) {
if (set_aux_work_flags(ssi, ERTS_SSI_AUX_WORK_ASYNC_READY)
@@ -1585,6 +1644,9 @@ handle_async_ready_clean(ErtsAuxWorkData *awdp,
{
void *thr_prgr_p;
+#ifdef ERTS_DIRTY_SCHEDULERS
+ ASSERT(!awdp->esdp || !ERTS_SCHEDULER_IS_DIRTY(awdp->esdp));
+#endif
#ifdef ERTS_SMP
if (awdp->async_ready.need_thr_prgr
&& !erts_thr_progress_has_reached_this(haw_thr_prgr_current(awdp),
@@ -1622,6 +1684,9 @@ handle_fix_alloc(ErtsAuxWorkData *awdp, erts_aint32_t aux_work, int waiting)
ErtsSchedulerSleepInfo *ssi = awdp->ssi;
erts_aint32_t res;
+#ifdef ERTS_DIRTY_SCHEDULERS
+ ASSERT(!awdp->esdp || !ERTS_SCHEDULER_IS_DIRTY(awdp->esdp));
+#endif
unset_aux_work_flags(ssi, (ERTS_SSI_AUX_WORK_FIX_ALLOC_LOWER_LIM
| ERTS_SSI_AUX_WORK_FIX_ALLOC_DEALLOC));
aux_work &= ~(ERTS_SSI_AUX_WORK_FIX_ALLOC_LOWER_LIM
@@ -1655,11 +1720,7 @@ erts_alloc_ensure_handle_delayed_dealloc_call(int ix)
{
#ifdef DEBUG
ErtsSchedulerData *esdp = erts_get_scheduler_data();
-#ifdef ERTS_DIRTY_SCHEDULERS
- if (esdp && ERTS_SCHEDULER_IS_DIRTY(esdp))
- return;
-#endif
- ASSERT(!esdp || ix == (int) esdp->no);
+ ASSERT(!esdp || (ERTS_SCHEDULER_IS_DIRTY(esdp) || ix == (int) esdp->no));
#endif
set_aux_work_flags_wakeup_nob(ERTS_SCHED_SLEEP_INFO_IX(ix-1),
ERTS_SSI_AUX_WORK_DD);
@@ -1673,6 +1734,9 @@ handle_delayed_dealloc(ErtsAuxWorkData *awdp, erts_aint32_t aux_work, int waitin
ErtsThrPrgrVal wakeup = ERTS_THR_PRGR_INVALID;
int more_work = 0;
+#ifdef ERTS_DIRTY_SCHEDULERS
+ ASSERT(!awdp->esdp || !ERTS_SCHEDULER_IS_DIRTY(awdp->esdp));
+#endif
unset_aux_work_flags(ssi, ERTS_SSI_AUX_WORK_DD);
erts_alloc_scheduler_handle_delayed_dealloc((void *) awdp->esdp,
&need_thr_progress,
@@ -1712,6 +1776,9 @@ handle_delayed_dealloc_thr_prgr(ErtsAuxWorkData *awdp, erts_aint32_t aux_work, i
ErtsThrPrgrVal wakeup = ERTS_THR_PRGR_INVALID;
ErtsThrPrgrVal current = haw_thr_prgr_current(awdp);
+#ifdef ERTS_DIRTY_SCHEDULERS
+ ASSERT(!awdp->esdp || !ERTS_SCHEDULER_IS_DIRTY(awdp->esdp));
+#endif
if (!erts_thr_progress_has_reached_this(current, awdp->dd.thr_prgr))
return aux_work & ~ERTS_SSI_AUX_WORK_DD_THR_PRGR;
@@ -1759,6 +1826,9 @@ handle_thr_prgr_later_op(ErtsAuxWorkData *awdp, erts_aint32_t aux_work, int wait
int lops;
ErtsThrPrgrVal current = haw_thr_prgr_current(awdp);
+#ifdef ERTS_DIRTY_SCHEDULERS
+ ASSERT(!awdp->esdp || !ERTS_SCHEDULER_IS_DIRTY(awdp->esdp));
+#endif
for (lops = 0; lops < ERTS_MAX_THR_PRGR_LATER_OPS; lops++) {
ErtsThrPrgrLaterOp *lop = awdp->later_op.first;
if (!erts_thr_progress_has_reached_this(current, lop->later))
@@ -1917,6 +1987,14 @@ erts_smp_notify_check_children_needed(void)
for (i = 0; i < erts_no_schedulers; i++)
set_aux_work_flags_wakeup_nob(ERTS_SCHED_SLEEP_INFO_IX(i),
ERTS_SSI_AUX_WORK_CHECK_CHILDREN);
+#ifdef ERTS_DIRTY_SCHEDULERS
+ for (i = 0; i < erts_no_dirty_cpu_schedulers; i++)
+ set_aux_work_flags_wakeup_nob(ERTS_DIRTY_CPU_SCHED_SLEEP_INFO_IX(i),
+ ERTS_SSI_AUX_WORK_CHECK_CHILDREN);
+ for (i = 0; i < erts_no_dirty_io_schedulers; i++)
+ set_aux_work_flags_wakeup_nob(ERTS_DIRTY_IO_SCHED_SLEEP_INFO_IX(i),
+ ERTS_SSI_AUX_WORK_CHECK_CHILDREN);
+#endif
}
static ERTS_INLINE erts_aint32_t
@@ -2302,14 +2380,24 @@ try_set_sys_scheduling(void)
#endif
static ERTS_INLINE int
-prepare_for_sys_schedule(void)
+prepare_for_sys_schedule(ErtsSchedulerData *esdp)
{
#ifdef ERTS_SMP
while (!erts_port_task_have_outstanding_io_tasks()
&& try_set_sys_scheduling()) {
- if (!erts_port_task_have_outstanding_io_tasks())
- return 1;
+#ifdef ERTS_SCHED_ONLY_POLL_SCHED_1
+ if (esdp->no != 1) {
+ /* If we are not scheduler 1 and ERTS_SCHED_ONLY_POLL_SCHED_1 is used
+ then we make sure to wake scheduler 1 */
+ ErtsRunQueue *rq = ERTS_RUNQ_IX(0);
clear_sys_scheduling();
+ wake_scheduler(rq);
+ return 0;
+ }
+#endif
+ if (!erts_port_task_have_outstanding_io_tasks())
+ return 1;
+ clear_sys_scheduling();
}
return 0;
#else
@@ -2615,6 +2703,8 @@ aux_thread(void *unused)
erts_thr_progress_active(NULL, thr_prgr_active = 0);
erts_thr_progress_prepare_wait(NULL);
+ ERTS_SCHED_FAIR_YIELD();
+
flgs = sched_spin_wait(ssi, 0);
if (flgs & ERTS_SSI_FLG_SLEEPING) {
@@ -2682,7 +2772,7 @@ scheduler_wait(int *fcalls, ErtsSchedulerData *esdp, ErtsRunQueue *rq)
* be waiting in erl_sys_schedule()
*/
- if (ERTS_SCHEDULER_IS_DIRTY(esdp) || !prepare_for_sys_schedule()) {
+ if (ERTS_SCHEDULER_IS_DIRTY(esdp) || !prepare_for_sys_schedule(esdp)) {
sched_waiting(esdp->no, rq);
@@ -2697,15 +2787,15 @@ scheduler_wait(int *fcalls, ErtsSchedulerData *esdp, ErtsRunQueue *rq)
while (1) {
- aux_work = ERTS_SCHEDULER_IS_DIRTY(esdp) ? 0 :
- erts_atomic32_read_acqb(&ssi->aux_work);
+ aux_work = erts_atomic32_read_acqb(&ssi->aux_work);
if (aux_work) {
- if (!thr_prgr_active) {
+ if (!ERTS_SCHEDULER_IS_DIRTY(esdp) && !thr_prgr_active) {
erts_thr_progress_active(esdp, thr_prgr_active = 1);
sched_wall_time_change(esdp, 1);
}
aux_work = handle_aux_work(&esdp->aux_work_data, aux_work, 1);
- if (aux_work && erts_thr_progress_update(esdp))
+ if (aux_work && !ERTS_SCHEDULER_IS_DIRTY(esdp)
+ && erts_thr_progress_update(esdp))
erts_thr_progress_leader_update(esdp);
}
@@ -2720,6 +2810,8 @@ scheduler_wait(int *fcalls, ErtsSchedulerData *esdp, ErtsRunQueue *rq)
erts_thr_progress_prepare_wait(esdp);
}
+ ERTS_SCHED_FAIR_YIELD();
+
flgs = sched_spin_wait(ssi, spincount);
if (flgs & ERTS_SSI_FLG_SLEEPING) {
ASSERT(flgs & ERTS_SSI_FLG_WAITING);
@@ -2775,6 +2867,10 @@ scheduler_wait(int *fcalls, ErtsSchedulerData *esdp, ErtsRunQueue *rq)
#ifdef ERTS_DIRTY_SCHEDULERS
ASSERT(!ERTS_SCHEDULER_IS_DIRTY(esdp));
#endif
+
+#ifdef ERTS_SCHED_ONLY_POLL_SCHED_1
+ ASSERT(esdp->no == 1);
+#endif
sched_waiting_sys(esdp->no, rq);
@@ -2795,7 +2891,6 @@ scheduler_wait(int *fcalls, ErtsSchedulerData *esdp, ErtsRunQueue *rq)
sched_wall_time_change(esdp, working = 0);
ASSERT(!erts_port_task_have_outstanding_io_tasks());
-
erl_sys_schedule(1); /* Might give us something to do */
dt = erts_do_time_read_and_reset();
@@ -2841,7 +2936,7 @@ scheduler_wait(int *fcalls, ErtsSchedulerData *esdp, ErtsRunQueue *rq)
* Got to check that we still got I/O tasks; otherwise
* we have to continue checking for I/O...
*/
- if (!prepare_for_sys_schedule()) {
+ if (!prepare_for_sys_schedule(esdp)) {
spincount *= ERTS_SCHED_TSE_SLEEP_SPINCOUNT_FACT;
goto tse_wait;
}
@@ -2863,7 +2958,7 @@ scheduler_wait(int *fcalls, ErtsSchedulerData *esdp, ErtsRunQueue *rq)
* Got to check that we still got I/O tasks; otherwise
* we have to wait in erl_sys_schedule() after all...
*/
- if (!prepare_for_sys_schedule()) {
+ if (!prepare_for_sys_schedule(esdp)) {
/*
* Not allowed to wait in erl_sys_schedule;
* do tse wait instead...
@@ -2990,7 +3085,7 @@ wake_scheduler(ErtsRunQueue *rq)
#ifdef ERTS_DIRTY_SCHEDULERS
static void
-wake_dirty_scheduler(ErtsRunQueue *rq)
+wake_dirty_schedulers(ErtsRunQueue *rq, int one)
{
ErtsSchedulerSleepInfo *ssi;
ErtsSchedulerSleepList *sl;
@@ -3000,9 +3095,27 @@ wake_dirty_scheduler(ErtsRunQueue *rq)
sl = &rq->sleepers;
erts_smp_spin_lock(&sl->lock);
ssi = sl->list;
- if (!ssi)
+ if (!ssi) {
erts_smp_spin_unlock(&sl->lock);
- else {
+ if (one)
+ wake_scheduler(rq);
+ } else if (one) {
+ erts_aint32_t flgs;
+ if (ssi->prev)
+ ssi->prev->next = ssi->next;
+ else {
+ ASSERT(sl->list == ssi);
+ sl->list = ssi->next;
+ }
+ if (ssi->next)
+ ssi->next->prev = ssi->prev;
+
+ erts_smp_spin_unlock(&sl->lock);
+
+ ERTS_THR_MEMORY_BARRIER;
+ flgs = ssi_flags_set_wake(ssi);
+ erts_sched_finish_poke(ssi, flgs);
+ } else {
sl->list = NULL;
erts_smp_spin_unlock(&sl->lock);
@@ -3154,7 +3267,7 @@ smp_notify_inc_runq(ErtsRunQueue *runq)
if (runq) {
#ifdef ERTS_DIRTY_SCHEDULERS
if (ERTS_RUNQ_IX_IS_DIRTY(runq->ix))
- wake_dirty_scheduler(runq);
+ wake_dirty_schedulers(runq, 1);
else
#endif
wake_scheduler(runq);
@@ -3452,6 +3565,7 @@ suspend_run_queue(ErtsRunQueue *rq)
}
static void scheduler_ix_resume_wake(Uint ix);
+static void scheduler_ssi_resume_wake(ErtsSchedulerSleepInfo *ssi);
static ERTS_INLINE void
resume_run_queue(ErtsRunQueue *rq)
@@ -3478,7 +3592,10 @@ resume_run_queue(ErtsRunQueue *rq)
erts_smp_runq_unlock(rq);
- scheduler_ix_resume_wake(rq->ix);
+#ifdef ERTS_DIRTY_SCHEDULERS
+ if (!ERTS_RUNQ_IX_IS_DIRTY(rq->ix))
+#endif
+ scheduler_ix_resume_wake(rq->ix);
}
typedef struct {
@@ -3509,20 +3626,28 @@ evacuate_run_queue(ErtsRunQueue *rq,
int prio_q;
ErtsRunQueue *to_rq;
ErtsMigrationPaths *mps;
- ErtsMigrationPath *mp;
+ ErtsMigrationPath *mp = NULL;
ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(rq));
(void) ERTS_RUNQ_FLGS_UNSET(rq, ERTS_RUNQ_FLG_PROTECTED);
- mps = erts_get_migration_paths_managed();
- mp = &mps->mpath[rq->ix];
+#ifdef ERTS_DIRTY_SCHEDULERS
+ if (!ERTS_RUNQ_IX_IS_DIRTY(rq->ix))
+#endif
+ {
+ mps = erts_get_migration_paths_managed();
+ mp = &mps->mpath[rq->ix];
+ }
/* Evacuate scheduled misc ops */
if (rq->misc.start) {
ErtsMiscOpList *start, *end;
+#ifdef ERTS_DIRTY_SCHEDULERS
+ ASSERT(!ERTS_RUNQ_IX_IS_DIRTY(rq->ix));
+#endif
to_rq = mp->misc_evac_runq;
if (!to_rq)
return;
@@ -3551,6 +3676,9 @@ evacuate_run_queue(ErtsRunQueue *rq,
if (rq->ports.start) {
Port *prt;
+#ifdef ERTS_DIRTY_SCHEDULERS
+ ASSERT(!ERTS_RUNQ_IX_IS_DIRTY(rq->ix));
+#endif
to_rq = mp->prio[ERTS_PORT_PRIO_LEVEL].runq;
if (!to_rq)
return;
@@ -3586,15 +3714,26 @@ evacuate_run_queue(ErtsRunQueue *rq,
erts_aint32_t state;
Process *proc;
int notify = 0;
+#ifdef ERTS_DIRTY_SCHEDULERS
+ int requeue;
+#endif
to_rq = NULL;
- if (!mp->prio[prio_q].runq)
- return;
- if (prio_q == PRIORITY_NORMAL && !mp->prio[PRIORITY_LOW].runq)
- return;
+#ifdef ERTS_DIRTY_SCHEDULERS
+ if (!ERTS_RUNQ_IX_IS_DIRTY(rq->ix))
+#endif
+ {
+ if (!mp->prio[prio_q].runq)
+ return;
+ if (prio_q == PRIORITY_NORMAL && !mp->prio[PRIORITY_LOW].runq)
+ return;
+ }
proc = dequeue_process(rq, prio_q, &state);
while (proc) {
+#ifdef ERTS_DIRTY_SCHEDULERS
+ requeue = 1;
+#endif
if (ERTS_PSFLG_BOUND & state) {
/* Bound processes get stuck here... */
proc->next = NULL;
@@ -3603,12 +3742,43 @@ evacuate_run_queue(ErtsRunQueue *rq,
else
sbpp->first = proc;
sbpp->last = proc;
+#ifdef ERTS_DIRTY_SCHEDULERS
+ requeue = 0;
+#endif
}
+#ifdef ERTS_DIRTY_SCHEDULERS
+ else if (state & ERTS_PSFLG_DIRTY_CPU_PROC_IN_Q) {
+ erts_aint32_t old;
+ old = erts_smp_atomic32_read_band_nob(&proc->state,
+ ~(ERTS_PSFLG_DIRTY_CPU_PROC
+ | ERTS_PSFLG_DIRTY_CPU_PROC_IN_Q));
+ /* assert that no other dirty flags are set */
+ ASSERT(!(old & (ERTS_PSFLG_DIRTY_IO_PROC|ERTS_PSFLG_DIRTY_IO_PROC_IN_Q)));
+ } else if (state & ERTS_PSFLG_DIRTY_IO_PROC_IN_Q) {
+ erts_aint32_t old;
+ old = erts_smp_atomic32_read_band_nob(&proc->state,
+ ~(ERTS_PSFLG_DIRTY_IO_PROC
+ | ERTS_PSFLG_DIRTY_IO_PROC_IN_Q));
+ /* assert that no other dirty flags are set */
+ ASSERT(!(old & (ERTS_PSFLG_DIRTY_CPU_PROC|ERTS_PSFLG_DIRTY_CPU_PROC_IN_Q)));
+ }
+ if (requeue) {
+#else
else {
+#endif
int prio = (int) ERTS_PSFLGS_GET_PRQ_PRIO(state);
erts_smp_runq_unlock(rq);
- to_rq = mp->prio[prio].runq;
+#ifdef ERTS_DIRTY_SCHEDULERS
+ if (ERTS_RUNQ_IX_IS_DIRTY(rq->ix))
+ /*
+ * dirty run queues evacuate only to run
+ * queue 0 during multi-scheduling blocking
+ */
+ to_rq = ERTS_RUNQ_IX(0);
+ else
+#endif
+ to_rq = mp->prio[prio].runq;
RUNQ_SET_RQ(&proc->run_queue, to_rq);
erts_smp_runq_lock(to_rq);
@@ -4722,13 +4892,20 @@ wakeup_other_check(ErtsRunQueue *rq, Uint32 flags)
rq->wakeup_other += (left_len*wo_reds
+ ERTS_WAKEUP_OTHER_FIXED_INC);
if (rq->wakeup_other > wakeup_other.limit) {
- int empty_rqs =
- erts_smp_atomic32_read_acqb(&no_empty_run_queues);
- if (flags & ERTS_RUNQ_FLG_PROTECTED)
- (void) ERTS_RUNQ_FLGS_UNSET(rq, ERTS_RUNQ_FLG_PROTECTED);
- if (empty_rqs != 0)
- wake_scheduler_on_empty_runq(rq);
- rq->wakeup_other = 0;
+#ifdef ERTS_DIRTY_SCHEDULERS
+ if (ERTS_RUNQ_IX_IS_DIRTY(rq->ix) && rq->waiting)
+ wake_dirty_schedulers(rq, 1);
+ else
+#endif
+ {
+ int empty_rqs =
+ erts_smp_atomic32_read_acqb(&no_empty_run_queues);
+ if (flags & ERTS_RUNQ_FLG_PROTECTED)
+ (void) ERTS_RUNQ_FLGS_UNSET(rq, ERTS_RUNQ_FLG_PROTECTED);
+ if (empty_rqs != 0)
+ wake_scheduler_on_empty_runq(rq);
+ rq->wakeup_other = 0;
+ }
}
}
rq->wakeup_other_reds = 0;
@@ -4889,11 +5066,17 @@ erts_early_init_scheduling(int no_schedulers)
wakeup_other.threshold = ERTS_SCHED_WAKEUP_OTHER_THRESHOLD_MEDIUM;
wakeup_other.type = ERTS_SCHED_WAKEUP_OTHER_TYPE_DEFAULT;
#endif
+#ifndef ERTS_SCHED_MIN_SPIN
sched_busy_wait.sys_schedule = ERTS_SCHED_SYS_SLEEP_SPINCOUNT_MEDIUM;
sched_busy_wait.tse = (ERTS_SCHED_SYS_SLEEP_SPINCOUNT_MEDIUM
* ERTS_SCHED_TSE_SLEEP_SPINCOUNT_FACT);
sched_busy_wait.aux_work = (ERTS_SCHED_SYS_SLEEP_SPINCOUNT_MEDIUM
* ERTS_SCHED_AUX_WORK_SLEEP_SPINCOUNT_FACT_MEDIUM);
+#else
+ sched_busy_wait.sys_schedule = ERTS_SCHED_SYS_SLEEP_SPINCOUNT_NONE;
+ sched_busy_wait.tse = ERTS_SCHED_SYS_SLEEP_SPINCOUNT_NONE;
+ sched_busy_wait.aux_work = ERTS_SCHED_SYS_SLEEP_SPINCOUNT_NONE;
+#endif
}
int
@@ -5009,8 +5192,12 @@ erts_sched_set_wake_cleanup_threshold(char *str)
static void
init_aux_work_data(ErtsAuxWorkData *awdp, ErtsSchedulerData *esdp, char *dawwp)
{
- if (!esdp || ERTS_SCHEDULER_IS_DIRTY(esdp))
+ if (!esdp)
awdp->sched_id = 0;
+#ifdef ERTS_DIRTY_SCHEDULERS
+ else if (ERTS_SCHEDULER_IS_DIRTY(esdp))
+ awdp->sched_id = (int) ERTS_DIRTY_SCHEDULER_NO(esdp);
+#endif
else
awdp->sched_id = (int) esdp->no;
awdp->esdp = esdp;
@@ -5076,11 +5263,11 @@ init_scheduler_data(ErtsSchedulerData* esdp, int num,
#ifdef ERTS_DIRTY_SCHEDULERS
if (ERTS_RUNQ_IX_IS_DIRTY(runq->ix)) {
esdp->no = 0;
- esdp->dirty_no = (Uint) num;
+ ERTS_DIRTY_SCHEDULER_NO(esdp) = (Uint) num;
}
else {
esdp->no = (Uint) num;
- esdp->dirty_no = 0;
+ ERTS_DIRTY_SCHEDULER_NO(esdp) = 0;
}
#else
esdp->no = (Uint) num;
@@ -5111,7 +5298,12 @@ init_scheduler_data(ErtsSchedulerData* esdp, int num,
}
void
-erts_init_scheduling(int no_schedulers, int no_schedulers_online)
+erts_init_scheduling(int no_schedulers, int no_schedulers_online
+#ifdef ERTS_DIRTY_SCHEDULERS
+ , int no_dirty_cpu_schedulers, int no_dirty_cpu_schedulers_online,
+ int no_dirty_io_schedulers
+#endif
+ )
{
int ix, n, no_ssi;
char *daww_ptr;
@@ -5133,6 +5325,12 @@ erts_init_scheduling(int no_schedulers, int no_schedulers_online)
ASSERT(no_schedulers_online <= no_schedulers);
ASSERT(no_schedulers_online >= 1);
ASSERT(no_schedulers >= 1);
+#ifdef ERTS_DIRTY_SCHEDULERS
+ ASSERT(no_dirty_cpu_schedulers <= no_schedulers);
+ ASSERT(no_dirty_cpu_schedulers >= 1);
+ ASSERT(no_dirty_cpu_schedulers_online <= no_schedulers_online);
+ ASSERT(no_dirty_cpu_schedulers_online >= 1);
+#endif
/* Create and initialize run queues */
@@ -5169,10 +5367,9 @@ erts_init_scheduling(int no_schedulers, int no_schedulers_online)
#ifdef ERTS_DIRTY_SCHEDULERS
#ifdef ERTS_SMP
- if (ERTS_RUNQ_IX_IS_DIRTY(ix)) {
+ if (ERTS_RUNQ_IX_IS_DIRTY(ix))
erts_smp_spinlock_init(&rq->sleepers.lock, "dirty_run_queue_sleep_list");
- rq->sleepers.list = NULL;
- }
+ rq->sleepers.list = NULL;
#endif
#endif
@@ -5236,6 +5433,10 @@ erts_init_scheduling(int no_schedulers, int no_schedulers_online)
n = (int) no_schedulers;
erts_no_schedulers = n;
+#ifdef ERTS_DIRTY_SCHEDULERS
+ erts_no_dirty_cpu_schedulers = no_dirty_cpu_schedulers;
+ erts_no_dirty_io_schedulers = no_dirty_io_schedulers;
+#endif
/* Create and initialize scheduler sleep info */
#ifdef ERTS_SMP
@@ -5267,21 +5468,21 @@ erts_init_scheduling(int no_schedulers, int no_schedulers_online)
aligned_dirty_cpu_sched_sleep_info =
erts_alloc_permanent_cache_aligned(
ERTS_ALC_T_SCHDLR_SLP_INFO,
- erts_no_dirty_cpu_schedulers*sizeof(ErtsAlignedSchedulerSleepInfo));
- for (ix = 0; ix < erts_no_dirty_cpu_schedulers; ix++) {
+ no_dirty_cpu_schedulers*sizeof(ErtsAlignedSchedulerSleepInfo));
+ for (ix = 0; ix < no_dirty_cpu_schedulers; ix++) {
ErtsSchedulerSleepInfo *ssi = &aligned_dirty_cpu_sched_sleep_info[ix].ssi;
erts_smp_atomic32_init_nob(&ssi->flags, 0);
- ssi->event = NULL; /* initialized in sched_thread_func */
+ ssi->event = NULL; /* initialized in sched_dirty_cpu_thread_func */
erts_atomic32_init_nob(&ssi->aux_work, 0);
}
aligned_dirty_io_sched_sleep_info =
erts_alloc_permanent_cache_aligned(
ERTS_ALC_T_SCHDLR_SLP_INFO,
- erts_no_dirty_io_schedulers*sizeof(ErtsAlignedSchedulerSleepInfo));
- for (ix = 0; ix < erts_no_dirty_io_schedulers; ix++) {
+ no_dirty_io_schedulers*sizeof(ErtsAlignedSchedulerSleepInfo));
+ for (ix = 0; ix < no_dirty_io_schedulers; ix++) {
ErtsSchedulerSleepInfo *ssi = &aligned_dirty_io_sched_sleep_info[ix].ssi;
erts_smp_atomic32_init_nob(&ssi->flags, 0);
- ssi->event = NULL; /* initialized in sched_thread_func */
+ ssi->event = NULL; /* initialized in sched_dirty_io_thread_func */
erts_atomic32_init_nob(&ssi->aux_work, 0);
}
#endif
@@ -5314,8 +5515,8 @@ erts_init_scheduling(int no_schedulers, int no_schedulers_online)
erts_aligned_dirty_cpu_scheduler_data =
erts_alloc_permanent_cache_aligned(
ERTS_ALC_T_SCHDLR_DATA,
- erts_no_dirty_cpu_schedulers*sizeof(ErtsAlignedSchedulerData));
- for (ix = 0; ix < erts_no_dirty_cpu_schedulers; ix++) {
+ no_dirty_cpu_schedulers*sizeof(ErtsAlignedSchedulerData));
+ for (ix = 0; ix < no_dirty_cpu_schedulers; ix++) {
ErtsSchedulerData *esdp = ERTS_DIRTY_CPU_SCHEDULER_IX(ix);
init_scheduler_data(esdp, ix+1, ERTS_DIRTY_CPU_SCHED_SLEEP_INFO_IX(ix),
ERTS_DIRTY_CPU_RUNQ, NULL, 0);
@@ -5323,8 +5524,8 @@ erts_init_scheduling(int no_schedulers, int no_schedulers_online)
erts_aligned_dirty_io_scheduler_data =
erts_alloc_permanent_cache_aligned(
ERTS_ALC_T_SCHDLR_DATA,
- erts_no_dirty_io_schedulers*sizeof(ErtsAlignedSchedulerData));
- for (ix = 0; ix < erts_no_dirty_io_schedulers; ix++) {
+ no_dirty_io_schedulers*sizeof(ErtsAlignedSchedulerData));
+ for (ix = 0; ix < no_dirty_io_schedulers; ix++) {
ErtsSchedulerData *esdp = ERTS_DIRTY_IO_SCHEDULER_IX(ix);
init_scheduler_data(esdp, ix+1, ERTS_DIRTY_IO_SCHED_SLEEP_INFO_IX(ix),
ERTS_DIRTY_IO_RUNQ, NULL, 0);
@@ -5354,6 +5555,16 @@ erts_init_scheduling(int no_schedulers, int no_schedulers_online)
schdlr_sspnd.curr_online = no_schedulers;
schdlr_sspnd.msb.ongoing = 0;
erts_smp_atomic32_init_nob(&schdlr_sspnd.active, no_schedulers);
+#ifdef ERTS_DIRTY_SCHEDULERS
+ erts_smp_atomic32_init_nob(&schdlr_sspnd.dirty_cpu_changing, 0);
+ schdlr_sspnd.dirty_cpu_online = no_dirty_cpu_schedulers_online;
+ schdlr_sspnd.dirty_cpu_curr_online = no_dirty_cpu_schedulers;
+ erts_smp_atomic32_init_nob(&schdlr_sspnd.dirty_cpu_active, no_dirty_cpu_schedulers);
+ erts_smp_atomic32_init_nob(&schdlr_sspnd.dirty_io_changing, 0);
+ schdlr_sspnd.dirty_io_online = no_dirty_io_schedulers;
+ schdlr_sspnd.dirty_io_curr_online = no_dirty_io_schedulers;
+ erts_smp_atomic32_init_nob(&schdlr_sspnd.dirty_io_active, no_dirty_io_schedulers);
+#endif
schdlr_sspnd.msb.procs = NULL;
init_no_runqs(no_schedulers_online, no_schedulers_online);
balance_info.last_active_runqs = no_schedulers;
@@ -5379,6 +5590,21 @@ erts_init_scheduling(int no_schedulers, int no_schedulers_online)
schdlr_sspnd.curr_online *= 2; /* Boot strapping... */
ERTS_SCHDLR_SSPND_CHNG_SET((ERTS_SCHDLR_SSPND_CHNG_ONLN
| ERTS_SCHDLR_SSPND_CHNG_WAITER), 0);
+#ifdef ERTS_DIRTY_SCHEDULERS
+ schdlr_sspnd.dirty_cpu_wait_curr_online = no_dirty_cpu_schedulers_online;
+ schdlr_sspnd.dirty_cpu_curr_online *= 2;
+ ERTS_SCHDLR_SSPND_DIRTY_CPU_CHNG_SET((ERTS_SCHDLR_SSPND_CHNG_ONLN
+ | ERTS_SCHDLR_SSPND_CHNG_WAITER), 0);
+ for (ix = no_dirty_cpu_schedulers_online; ix < no_dirty_cpu_schedulers; ix++) {
+ ErtsSchedulerData* esdp = ERTS_DIRTY_CPU_SCHEDULER_IX(ix);
+ erts_smp_atomic32_read_bor_nob(&esdp->ssi->flags, ERTS_SSI_FLG_SUSPENDED);
+ }
+
+ schdlr_sspnd.dirty_io_wait_curr_online = no_dirty_io_schedulers;
+ schdlr_sspnd.dirty_io_curr_online *= 2;
+ ERTS_SCHDLR_SSPND_DIRTY_IO_CHNG_SET((ERTS_SCHDLR_SSPND_CHNG_ONLN
+ | ERTS_SCHDLR_SSPND_CHNG_WAITER), 0);
+#endif
erts_smp_atomic32_init_nob(&doing_sys_schedule, 0);
@@ -5497,9 +5723,16 @@ free_proxy_proc(Process *proxy)
erts_free(ERTS_ALC_T_PROC, proxy);
}
+#define ERTS_ENQUEUE_NOT 0
+#define ERTS_ENQUEUE_NORMAL_QUEUE 1
+#ifdef ERTS_DIRTY_SCHEDULERS
+#define ERTS_ENQUEUE_DIRTY_CPU_QUEUE 2
+#define ERTS_ENQUEUE_DIRTY_IO_QUEUE 3
+#endif
static ERTS_INLINE int
-check_enqueue_in_prio_queue(erts_aint32_t *prq_prio_p,
+check_enqueue_in_prio_queue(Process *c_p,
+ erts_aint32_t *prq_prio_p,
erts_aint32_t *newp,
erts_aint32_t actual)
{
@@ -5511,56 +5744,105 @@ check_enqueue_in_prio_queue(erts_aint32_t *prq_prio_p,
*prq_prio_p = aprio;
#ifdef ERTS_DIRTY_SCHEDULERS
- if (!(actual & (ERTS_PSFLG_DIRTY_CPU_PROC|ERTS_PSFLG_DIRTY_IO_PROC))) {
+ if (actual & (ERTS_PSFLG_DIRTY_CPU_PROC|ERTS_PSFLG_DIRTY_IO_PROC)) {
+ /*
+ * If we have system tasks of a priority higher
+ * or equal to the user priority, we enqueue
+ * on ordinary run-queue and take care of
+ * those system tasks first.
+ */
+ if (actual & ERTS_PSFLG_ACTIVE_SYS) {
+ erts_aint32_t uprio, stprio, qmask;
+ uprio = (actual >> ERTS_PSFLGS_USR_PRIO_OFFSET) & ERTS_PSFLGS_PRIO_MASK;
+ if (aprio < uprio)
+ goto enqueue_normal_runq; /* system tasks with higher prio */
+ erts_smp_proc_lock(c_p, ERTS_PROC_LOCK_STATUS);
+ qmask = c_p->sys_task_qs->qmask;
+ erts_smp_proc_unlock(c_p, ERTS_PROC_LOCK_STATUS);
+ switch (qmask & -qmask) {
+ case MAX_BIT:
+ stprio = PRIORITY_MAX;
+ break;
+ case HIGH_BIT:
+ stprio = PRIORITY_HIGH;
+ break;
+ case NORMAL_BIT:
+ stprio = PRIORITY_NORMAL;
+ break;
+ case LOW_BIT:
+ stprio = PRIORITY_LOW;
+ break;
+ default:
+ stprio = PRIORITY_LOW+1;
+ break;
+ }
+ if (stprio <= uprio)
+ goto enqueue_normal_runq; /* system tasks with higher prio */
+ }
+
+ /* Enqueue in dirty run queue if not already enqueued */
+ if (actual & (ERTS_PSFLG_DIRTY_CPU_PROC_IN_Q|ERTS_PSFLG_DIRTY_IO_PROC_IN_Q))
+ return ERTS_ENQUEUE_NOT; /* already in queue */
+ if (actual & ERTS_PSFLG_DIRTY_CPU_PROC) {
+ *newp |= ERTS_PSFLG_DIRTY_CPU_PROC_IN_Q;
+ if (actual & ERTS_PSFLG_IN_RUNQ)
+ return -ERTS_ENQUEUE_DIRTY_CPU_QUEUE; /* use proxy */
+ *newp |= ERTS_PSFLG_IN_RUNQ;
+ return ERTS_ENQUEUE_DIRTY_CPU_QUEUE;
+ }
+ *newp |= ERTS_PSFLG_DIRTY_IO_PROC_IN_Q;
+ if (actual & ERTS_PSFLG_IN_RUNQ)
+ return -ERTS_ENQUEUE_DIRTY_IO_QUEUE; /* use proxy */
+ *newp |= ERTS_PSFLG_IN_RUNQ;
+ return ERTS_ENQUEUE_DIRTY_IO_QUEUE;
+ }
+
+ enqueue_normal_runq:
#endif
- max_qbit = (actual >> ERTS_PSFLGS_IN_PRQ_MASK_OFFSET) & ERTS_PSFLGS_QMASK;
- max_qbit |= 1 << ERTS_PSFLGS_QMASK_BITS;
- max_qbit &= -max_qbit;
- /*
- * max_qbit now either contain bit set for highest prio queue or a bit
- * out of range (which will have a value larger than valid range).
- */
+ max_qbit = (actual >> ERTS_PSFLGS_IN_PRQ_MASK_OFFSET) & ERTS_PSFLGS_QMASK;
+ max_qbit |= 1 << ERTS_PSFLGS_QMASK_BITS;
+ max_qbit &= -max_qbit;
+ /*
+ * max_qbit now either contain bit set for highest prio queue or a bit
+ * out of range (which will have a value larger than valid range).
+ */
- if (qbit >= max_qbit)
- return 0; /* Already queued in higher or equal prio */
+ if (qbit >= max_qbit)
+ return ERTS_ENQUEUE_NOT; /* Already queued in higher or equal prio */
- /* Need to enqueue (if already enqueued, it is in lower prio) */
- *newp |= qbit << ERTS_PSFLGS_IN_PRQ_MASK_OFFSET;
+ /* Need to enqueue (if already enqueued, it is in lower prio) */
+ *newp |= qbit << ERTS_PSFLGS_IN_PRQ_MASK_OFFSET;
- if ((actual & (ERTS_PSFLG_IN_RUNQ|ERTS_PSFLGS_USR_PRIO_MASK))
- != (aprio << ERTS_PSFLGS_USR_PRIO_OFFSET)) {
- /*
- * Process struct already enqueued, or actual prio not
- * equal to user prio, i.e., enqueue using proxy.
- */
- return -1;
- }
-#ifdef ERTS_DIRTY_SCHEDULERS
- } else {
- if (actual & ERTS_PSFLG_DIRTY_CPU_PROC)
- *newp |= ERTS_PSFLG_DIRTY_CPU_PROC_IN_Q;
- else
- *newp |= ERTS_PSFLG_DIRTY_IO_PROC_IN_Q;
+ if ((actual & (ERTS_PSFLG_IN_RUNQ|ERTS_PSFLGS_USR_PRIO_MASK))
+ != (aprio << ERTS_PSFLGS_USR_PRIO_OFFSET)) {
+ /*
+ * Process struct already enqueued, or actual prio not
+ * equal to user prio, i.e., enqueue using proxy.
+ */
+ return -ERTS_ENQUEUE_NORMAL_QUEUE;
}
-#endif
/*
* Enqueue using process struct.
*/
*newp &= ~ERTS_PSFLGS_PRQ_PRIO_MASK;
*newp |= ERTS_PSFLG_IN_RUNQ | (aprio << ERTS_PSFLGS_PRQ_PRIO_OFFSET);
- return 1;
+ return ERTS_ENQUEUE_NORMAL_QUEUE;
}
/*
- * scheduler_out_process() return with c_rq locked.
+ * schedule_out_process() return with c_rq locked.
*/
static ERTS_INLINE int
schedule_out_process(ErtsRunQueue *c_rq, erts_aint32_t state, Process *p, Process *proxy)
{
erts_aint32_t a, e, n, enq_prio = -1;
- int res = 0;
int enqueue; /* < 0 -> use proxy */
+ Process* sched_p;
+ ErtsRunQueue* runq;
+#ifdef ERTS_SMP
+ int check_emigration_need;
+#endif
a = state;
@@ -5569,20 +5851,20 @@ schedule_out_process(ErtsRunQueue *c_rq, erts_aint32_t state, Process *p, Proces
ASSERT(a & (ERTS_PSFLG_RUNNING|ERTS_PSFLG_RUNNING_SYS));
- enqueue = 0;
+ enqueue = ERTS_ENQUEUE_NOT;
n &= ~(ERTS_PSFLG_RUNNING|ERTS_PSFLG_RUNNING_SYS);
if (a & ERTS_PSFLG_ACTIVE_SYS
|| (a & (ERTS_PSFLG_ACTIVE|ERTS_PSFLG_SUSPENDED)) == ERTS_PSFLG_ACTIVE) {
- enqueue = check_enqueue_in_prio_queue(&enq_prio, &n, a);
+ enqueue = check_enqueue_in_prio_queue(p, &enq_prio, &n, a);
}
a = erts_smp_atomic32_cmpxchg_mb(&p->state, n, e);
if (a == e)
break;
}
- if (!enqueue) {
-
+ switch (enqueue) {
+ case ERTS_ENQUEUE_NOT:
if (erts_system_profile_flags.runnable_procs) {
if (!(a & ERTS_PSFLG_ACTIVE_SYS)
@@ -5595,60 +5877,76 @@ schedule_out_process(ErtsRunQueue *c_rq, erts_aint32_t state, Process *p, Proces
if (proxy)
free_proxy_proc(proxy);
- }
- else {
- Process *sched_p;
- ErtsRunQueue *runq;
- ASSERT(!(n & ERTS_PSFLG_SUSPENDED) || (n & ERTS_PSFLG_ACTIVE_SYS));
+ erts_smp_runq_lock(c_rq);
+ return 0;
#ifdef ERTS_DIRTY_SCHEDULERS
#ifdef ERTS_SMP
- if (ERTS_PSFLG_DIRTY_CPU_PROC & a)
- runq = ERTS_DIRTY_CPU_RUNQ;
- else if (ERTS_PSFLG_DIRTY_IO_PROC & a)
- runq = ERTS_DIRTY_IO_RUNQ;
- else
+ case ERTS_ENQUEUE_DIRTY_CPU_QUEUE:
+ case -ERTS_ENQUEUE_DIRTY_CPU_QUEUE:
+ runq = ERTS_DIRTY_CPU_RUNQ;
+ ASSERT(ERTS_SCHEDULER_IS_DIRTY_CPU(runq->scheduler));
+#ifdef ERTS_SMP
+ check_emigration_need = 0;
+#endif
+ break;
+
+ case ERTS_ENQUEUE_DIRTY_IO_QUEUE:
+ case -ERTS_ENQUEUE_DIRTY_IO_QUEUE:
+ runq = ERTS_DIRTY_IO_RUNQ;
+ ASSERT(ERTS_SCHEDULER_IS_DIRTY_IO(runq->scheduler));
+#ifdef ERTS_SMP
+ check_emigration_need = 0;
+#endif
+ break;
#endif
#endif
- runq = erts_get_runq_proc(p);
- if (enqueue < 0)
- sched_p = make_proxy_proc(proxy, p, enq_prio);
- else {
- sched_p = p;
- if (proxy)
- free_proxy_proc(proxy);
- }
+ default:
+ ASSERT(enqueue == ERTS_ENQUEUE_NORMAL_QUEUE
+ || enqueue == -ERTS_ENQUEUE_NORMAL_QUEUE);
+ runq = erts_get_runq_proc(p);
#ifdef ERTS_SMP
- if (!(ERTS_PSFLG_BOUND & n)
-#ifdef ERTS_DIRTY_SCHEDULERS
- && !(n & (ERTS_PSFLG_DIRTY_CPU_PROC_IN_Q|ERTS_PSFLG_DIRTY_IO_PROC_IN_Q))
+ check_emigration_need = !(ERTS_PSFLG_BOUND & n);
#endif
- ) {
- ErtsRunQueue *new_runq = erts_check_emigration_need(runq, enq_prio);
- if (new_runq) {
- RUNQ_SET_RQ(&sched_p->run_queue, new_runq);
- runq = new_runq;
- }
+ break;
+ }
+
+ ASSERT(!(n & ERTS_PSFLG_SUSPENDED) || (n & ERTS_PSFLG_ACTIVE_SYS));
+
+ if (enqueue < 0)
+ sched_p = make_proxy_proc(proxy, p, enq_prio);
+ else {
+ sched_p = p;
+ if (proxy)
+ free_proxy_proc(proxy);
+ }
+
+#ifdef ERTS_SMP
+ if (check_emigration_need) {
+ ErtsRunQueue *new_runq = erts_check_emigration_need(runq, enq_prio);
+ if (new_runq) {
+ RUNQ_SET_RQ(&sched_p->run_queue, new_runq);
+ runq = new_runq;
}
+ }
#endif
- ASSERT(runq);
- res = 1;
- erts_smp_runq_lock(runq);
+ ASSERT(runq);
+
+ erts_smp_runq_lock(runq);
- /* Enqueue the process */
- enqueue_process(runq, (int) enq_prio, sched_p);
+ /* Enqueue the process */
+ enqueue_process(runq, (int) enq_prio, sched_p);
- if (runq == c_rq)
- return res;
- erts_smp_runq_unlock(runq);
- smp_notify_inc_runq(runq);
- }
+ if (runq == c_rq)
+ return 1;
+ erts_smp_runq_unlock(runq);
+ smp_notify_inc_runq(runq);
erts_smp_runq_lock(c_rq);
- return res;
+ return 1;
}
static ERTS_INLINE void
@@ -5704,7 +6002,7 @@ change_proc_schedule_state(Process *p,
erts_aint32_t e;
n = e = a;
- enqueue = 0;
+ enqueue = ERTS_ENQUEUE_NOT;
if (a & ERTS_PSFLG_FREE)
break; /* We don't want to schedule free processes... */
@@ -5725,13 +6023,13 @@ change_proc_schedule_state(Process *p,
* process may be in a run queue via proxy, need
* further inspection...
*/
- enqueue = check_enqueue_in_prio_queue(enq_prio_p, &n, a);
+ enqueue = check_enqueue_in_prio_queue(p, enq_prio_p, &n, a);
}
a = erts_smp_atomic32_cmpxchg_mb(&p->state, n, e);
if (a == e)
break;
- if (enqueue == 0 && n == a)
+ if (enqueue == ERTS_ENQUEUE_NOT && n == a)
break;
}
@@ -5765,7 +6063,7 @@ schedule_process(Process *p, erts_aint32_t in_state)
ERTS_PSFLG_ACTIVE,
&state,
&enq_prio);
- if (enqueue)
+ if (enqueue != ERTS_ENQUEUE_NOT)
add2runq(enqueue > 0 ? p : make_proxy_proc(NULL, p, enq_prio),
state,
enq_prio);
@@ -5792,14 +6090,14 @@ schedule_process_sys_task(Process *p, erts_aint32_t state, Process *proxy)
if (a & ERTS_PSFLG_FREE)
return; /* We don't want to schedule free processes... */
- enqueue = 0;
+ enqueue = ERTS_ENQUEUE_NOT;
n |= ERTS_PSFLG_ACTIVE_SYS;
if (!(a & (ERTS_PSFLG_RUNNING|ERTS_PSFLG_RUNNING_SYS)))
- enqueue = check_enqueue_in_prio_queue(&enq_prio, &n, a);
+ enqueue = check_enqueue_in_prio_queue(p, &enq_prio, &n, a);
a = erts_smp_atomic32_cmpxchg_mb(&p->state, n, e);
if (a == e)
break;
- if (a == n && !enqueue)
+ if (a == n && enqueue == ERTS_ENQUEUE_NOT)
goto cleanup;
}
@@ -5815,7 +6113,7 @@ schedule_process_sys_task(Process *p, erts_aint32_t state, Process *proxy)
}
- if (enqueue) {
+ if (enqueue != ERTS_ENQUEUE_NOT) {
Process *sched_p;
if (enqueue > 0)
sched_p = p;
@@ -5938,6 +6236,12 @@ static void
scheduler_ix_resume_wake(Uint ix)
{
ErtsSchedulerSleepInfo *ssi = ERTS_SCHED_SLEEP_INFO_IX(ix);
+ scheduler_ssi_resume_wake(ssi);
+}
+
+static void
+scheduler_ssi_resume_wake(ErtsSchedulerSleepInfo *ssi)
+{
erts_aint32_t xflgs = (ERTS_SSI_FLG_SLEEPING
| ERTS_SSI_FLG_TSE_SLEEPING
| ERTS_SSI_FLG_WAITING
@@ -6028,12 +6332,20 @@ sched_set_suspended_sleeptype(ErtsSchedulerSleepInfo *ssi)
}
}
+#ifdef ERTS_DIRTY_SCHEDULERS
+
static void
suspend_scheduler(ErtsSchedulerData *esdp)
{
erts_aint32_t flgs;
erts_aint32_t changing;
+#ifdef ERTS_DIRTY_SCHEDULERS
+ long no = (long) (ERTS_SCHEDULER_IS_DIRTY(esdp)
+ ? ERTS_DIRTY_SCHEDULER_NO(esdp)
+ : esdp->no);
+#else
long no = (long) esdp->no;
+#endif
ErtsSchedulerSleepInfo *ssi = esdp->ssi;
long active_schedulers;
int curr_online = 1;
@@ -6041,21 +6353,305 @@ suspend_scheduler(ErtsSchedulerData *esdp)
erts_aint32_t aux_work;
int thr_prgr_active = 1;
ErtsStuckBoundProcesses sbp = {NULL, NULL};
+ int* ss_onlinep;
+ int* ss_curr_onlinep;
+ int* ss_wait_curr_onlinep;
+ long* ss_wait_activep;
+ long ss_wait_active_target;
+ erts_smp_atomic32_t* ss_changingp;
+ erts_smp_atomic32_t* ss_activep;
/*
* Schedulers may be suspended in two different ways:
* - A scheduler may be suspended since it is not online.
* All schedulers with scheduler ids greater than
- * schdlr_sspnd.online are suspended.
+ * schdlr_sspnd.online are suspended; same for dirty
+ * schedulers and schdlr_sspnd.dirty_cpu_online and
+ * schdlr_sspnd.dirty_io_online.
* - Multi scheduling is blocked. All schedulers except the
- * scheduler with scheduler id 1 are suspended.
+ * scheduler with scheduler id 1 are suspended, and all
+ * dirty CPU and dirty I/O schedulers are suspended.
*
* Regardless of why a scheduler is suspended, it ends up here.
*/
+ ASSERT(ERTS_SCHEDULER_IS_DIRTY(esdp) || no != 1);
+
#ifdef ERTS_DIRTY_SCHEDULERS
- ASSERT(!ERTS_SCHEDULER_IS_DIRTY(esdp));
+ if (ERTS_SCHEDULER_IS_DIRTY(esdp)) {
+ if (erts_smp_mtx_trylock(&schdlr_sspnd.mtx) == EBUSY) {
+ erts_smp_runq_unlock(esdp->run_queue);
+ erts_smp_mtx_lock(&schdlr_sspnd.mtx);
+ erts_smp_runq_lock(esdp->run_queue);
+ }
+ if (ongoing_multi_scheduling_block())
+ evacuate_run_queue(esdp->run_queue, &sbp);
+ } else
+#endif
+ evacuate_run_queue(esdp->run_queue, &sbp);
+
+ erts_smp_runq_unlock(esdp->run_queue);
+
+#ifdef ERTS_DIRTY_SCHEDULERS
+ if (!ERTS_SCHEDULER_IS_DIRTY(esdp))
+#endif
+ {
+ erts_sched_check_cpu_bind_prep_suspend(esdp);
+
+ if (erts_system_profile_flags.scheduler)
+ profile_scheduler(make_small(esdp->no), am_inactive);
+
+ sched_wall_time_change(esdp, 0);
+
+ erts_smp_mtx_lock(&schdlr_sspnd.mtx);
+ }
+
+ flgs = sched_prep_spin_suspended(ssi, ERTS_SSI_FLG_SUSPENDED);
+ if (flgs & ERTS_SSI_FLG_SUSPENDED) {
+
+#ifdef ERTS_DIRTY_SCHEDULERS
+ if (ERTS_SCHEDULER_IS_DIRTY(esdp)) {
+ if (ERTS_RUNQ_IS_DIRTY_CPU_RUNQ(esdp->run_queue)) {
+ active_schedulers = erts_smp_atomic32_dec_read_nob(&schdlr_sspnd.dirty_cpu_active);
+ ASSERT(active_schedulers >= 0);
+ changing = erts_smp_atomic32_read_nob(&schdlr_sspnd.dirty_cpu_changing);
+ ss_onlinep = &schdlr_sspnd.dirty_cpu_online;
+ ss_curr_onlinep = &schdlr_sspnd.dirty_cpu_curr_online;
+ ss_wait_curr_onlinep = &schdlr_sspnd.dirty_cpu_wait_curr_online;
+ ss_changingp = &schdlr_sspnd.dirty_cpu_changing;
+ ss_wait_activep = &schdlr_sspnd.msb.dirty_cpu_wait_active;
+ ss_activep = &schdlr_sspnd.dirty_cpu_active;
+ } else {
+ active_schedulers = erts_smp_atomic32_dec_read_nob(&schdlr_sspnd.dirty_io_active);
+ ASSERT(active_schedulers >= 0);
+ changing = erts_smp_atomic32_read_nob(&schdlr_sspnd.dirty_io_changing);
+ ss_onlinep = &schdlr_sspnd.dirty_io_online;
+ ss_curr_onlinep = &schdlr_sspnd.dirty_io_curr_online;
+ ss_wait_curr_onlinep = &schdlr_sspnd.dirty_io_wait_curr_online;
+ ss_changingp = &schdlr_sspnd.dirty_io_changing;
+ ss_wait_activep = &schdlr_sspnd.msb.dirty_io_wait_active;
+ ss_activep = &schdlr_sspnd.dirty_io_active;
+ }
+ ss_wait_active_target = 0;
+ }
+ else
+#endif
+ {
+ active_schedulers = erts_smp_atomic32_dec_read_nob(&schdlr_sspnd.active);
+ ASSERT(active_schedulers >= 1);
+ changing = erts_smp_atomic32_read_nob(&schdlr_sspnd.changing);
+ ss_onlinep = &schdlr_sspnd.online;
+ ss_curr_onlinep = &schdlr_sspnd.curr_online;
+ ss_wait_curr_onlinep = &schdlr_sspnd.wait_curr_online;
+ ss_changingp = &schdlr_sspnd.changing;
+ ss_wait_activep = &schdlr_sspnd.msb.wait_active;
+ ss_activep = &schdlr_sspnd.active;
+ ss_wait_active_target = 1;
+ }
+ if (changing & ERTS_SCHDLR_SSPND_CHNG_MSB) {
+ if (active_schedulers == *ss_wait_activep)
+ wake = 1;
+ if (active_schedulers == ss_wait_active_target) {
+ changing = erts_smp_atomic32_read_band_nob(ss_changingp,
+ ~ERTS_SCHDLR_SSPND_CHNG_MSB);
+ changing &= ~ERTS_SCHDLR_SSPND_CHNG_MSB;
+ }
+ }
+
+ while (1) {
+ if (changing & ERTS_SCHDLR_SSPND_CHNG_ONLN) {
+ int changed = 0;
+ if (no > *ss_onlinep && curr_online) {
+ (*ss_curr_onlinep)--;
+ curr_online = 0;
+ changed = 1;
+ }
+ else if (no <= *ss_onlinep && !curr_online) {
+ (*ss_curr_onlinep)++;
+ curr_online = 1;
+ changed = 1;
+ }
+ if (changed
+ && *ss_curr_onlinep == *ss_wait_curr_onlinep)
+ wake = 1;
+ if (*ss_onlinep == *ss_curr_onlinep) {
+ changing = erts_smp_atomic32_read_band_nob(ss_changingp,
+ ~ERTS_SCHDLR_SSPND_CHNG_ONLN);
+ changing &= ~ERTS_SCHDLR_SSPND_CHNG_ONLN;
+ }
+ }
+
+ if (wake) {
+ erts_smp_cnd_signal(&schdlr_sspnd.cnd);
+ wake = 0;
+ }
+
+ if (curr_online && !ongoing_multi_scheduling_block()) {
+ flgs = erts_smp_atomic32_read_acqb(&ssi->flags);
+ if (!(flgs & ERTS_SSI_FLG_SUSPENDED))
+ break;
+ }
+ erts_smp_mtx_unlock(&schdlr_sspnd.mtx);
+
+ while (1) {
+ erts_aint32_t qmask;
+ erts_aint32_t flgs;
+
+ qmask = (ERTS_RUNQ_FLGS_GET(esdp->run_queue)
+ & ERTS_RUNQ_FLGS_QMASK);
+ aux_work = erts_atomic32_read_acqb(&ssi->aux_work);
+ if (aux_work|qmask) {
+ if (!ERTS_SCHEDULER_IS_DIRTY(esdp) && !thr_prgr_active) {
+ erts_thr_progress_active(esdp, thr_prgr_active = 1);
+ sched_wall_time_change(esdp, 1);
+ }
+ if (aux_work)
+ aux_work = handle_aux_work(&esdp->aux_work_data,
+ aux_work,
+ 1);
+
+ if (!ERTS_SCHEDULER_IS_DIRTY(esdp) &&
+ (aux_work && erts_thr_progress_update(esdp)))
+ erts_thr_progress_leader_update(esdp);
+ if (qmask) {
+#ifdef ERTS_DIRTY_SCHEDULERS
+ if (ERTS_SCHEDULER_IS_DIRTY(esdp)) {
+ erts_smp_mtx_lock(&schdlr_sspnd.mtx);
+ erts_smp_runq_lock(esdp->run_queue);
+ if (ongoing_multi_scheduling_block())
+ evacuate_run_queue(esdp->run_queue, &sbp);
+ erts_smp_runq_unlock(esdp->run_queue);
+ erts_smp_mtx_unlock(&schdlr_sspnd.mtx);
+ } else
+#endif
+ {
+ erts_smp_runq_lock(esdp->run_queue);
+ evacuate_run_queue(esdp->run_queue, &sbp);
+ erts_smp_runq_unlock(esdp->run_queue);
+ }
+ }
+ }
+
+ if (!aux_work) {
+#ifdef ERTS_DIRTY_SCHEDULERS
+ if (!ERTS_SCHEDULER_IS_DIRTY(esdp))
#endif
+ {
+ if (thr_prgr_active) {
+ erts_thr_progress_active(esdp, thr_prgr_active = 0);
+ sched_wall_time_change(esdp, 0);
+ }
+ erts_thr_progress_prepare_wait(esdp);
+ }
+ flgs = sched_spin_suspended(ssi,
+ ERTS_SCHED_SUSPEND_SLEEP_SPINCOUNT);
+ if (flgs == (ERTS_SSI_FLG_SLEEPING
+ | ERTS_SSI_FLG_WAITING
+ | ERTS_SSI_FLG_SUSPENDED)) {
+ flgs = sched_set_suspended_sleeptype(ssi);
+ if (flgs == (ERTS_SSI_FLG_SLEEPING
+ | ERTS_SSI_FLG_TSE_SLEEPING
+ | ERTS_SSI_FLG_WAITING
+ | ERTS_SSI_FLG_SUSPENDED)) {
+ int res;
+
+ do {
+ res = erts_tse_wait(ssi->event);
+ } while (res == EINTR);
+ }
+ }
+#ifdef ERTS_DIRTY_SCHEDULERS
+ if (!ERTS_SCHEDULER_IS_DIRTY(esdp))
+#endif
+ erts_thr_progress_finalize_wait(esdp);
+ }
+
+ flgs = sched_prep_spin_suspended(ssi, (ERTS_SSI_FLG_WAITING
+ | ERTS_SSI_FLG_SUSPENDED));
+ if (!(flgs & ERTS_SSI_FLG_SUSPENDED))
+ break;
+ changing = erts_smp_atomic32_read_nob(ss_changingp);
+ if (changing & ~ERTS_SCHDLR_SSPND_CHNG_WAITER)
+ break;
+ }
+
+ erts_smp_mtx_lock(&schdlr_sspnd.mtx);
+ changing = erts_smp_atomic32_read_nob(ss_changingp);
+ }
+
+ active_schedulers = erts_smp_atomic32_inc_read_nob(ss_activep);
+ changing = erts_smp_atomic32_read_nob(ss_changingp);
+ if ((changing & ERTS_SCHDLR_SSPND_CHNG_MSB)
+ && *ss_onlinep == active_schedulers) {
+ erts_smp_atomic32_read_band_nob(ss_changingp,
+ ~ERTS_SCHDLR_SSPND_CHNG_MSB);
+ }
+
+#ifdef ERTS_DIRTY_SCHEDULERS
+ if (!ERTS_SCHEDULER_IS_DIRTY(esdp))
+#endif
+ ASSERT(no <= *ss_onlinep);
+ ASSERT(!ongoing_multi_scheduling_block());
+
+ }
+
+ erts_smp_mtx_unlock(&schdlr_sspnd.mtx);
+
+ ASSERT(curr_online);
+
+#ifdef ERTS_DIRTY_SCHEDULERS
+ if (!ERTS_SCHEDULER_IS_DIRTY(esdp))
+#endif
+ {
+ if (erts_system_profile_flags.scheduler)
+ profile_scheduler(make_small(esdp->no), am_active);
+
+ if (!thr_prgr_active) {
+ erts_thr_progress_active(esdp, thr_prgr_active = 1);
+ sched_wall_time_change(esdp, 1);
+ }
+ }
+
+ erts_smp_runq_lock(esdp->run_queue);
+ non_empty_runq(esdp->run_queue);
+
+#ifdef ERTS_DIRTY_SCHEDULERS
+ if (!ERTS_SCHEDULER_IS_DIRTY(esdp))
+#endif
+ {
+ schedule_bound_processes(esdp->run_queue, &sbp);
+
+ erts_sched_check_cpu_bind_post_suspend(esdp);
+ }
+}
+
+#else /* !ERTS_DIRTY_SCHEDULERS */
+
+static void
+suspend_scheduler(ErtsSchedulerData *esdp)
+{
+ erts_aint32_t flgs;
+ erts_aint32_t changing;
+ long no = (long) esdp->no;
+ ErtsSchedulerSleepInfo *ssi = esdp->ssi;
+ long active_schedulers;
+ int curr_online = 1;
+ int wake = 0;
+ erts_aint32_t aux_work;
+ int thr_prgr_active = 1;
+ ErtsStuckBoundProcesses sbp = {NULL, NULL};
+
+ /*
+ * Schedulers may be suspended in two different ways:
+ * - A scheduler may be suspended since it is not online.
+ * All schedulers with scheduler ids greater than
+ * schdlr_sspnd.online are suspended.
+ * - Multi scheduling is blocked. All schedulers except the
+ * scheduler with scheduler id 1 are suspended.
+ *
+ * Regardless of why a scheduler is suspended, it ends up here.
+ */
+
ASSERT(no != 1);
evacuate_run_queue(esdp->run_queue, &sbp);
@@ -6219,6 +6815,8 @@ suspend_scheduler(ErtsSchedulerData *esdp)
erts_sched_check_cpu_bind_post_suspend(esdp);
}
+#endif
+
ErtsSchedSuspendResult
erts_schedulers_state(Uint *total,
Uint *online,
@@ -6230,40 +6828,315 @@ erts_schedulers_state(Uint *total,
{
int res = ERTS_SCHDLR_SSPND_EINVAL;
erts_aint32_t changing;
- if (total) {
- ASSERT(online);
- ASSERT(active);
- erts_smp_mtx_lock(&schdlr_sspnd.mtx);
- changing = erts_smp_atomic32_read_nob(&schdlr_sspnd.changing);
- if (yield_allowed && (changing & ~ERTS_SCHDLR_SSPND_CHNG_WAITER))
- res = ERTS_SCHDLR_SSPND_YIELD_RESTART;
- else {
- *active = *online = schdlr_sspnd.online;
- if (ongoing_multi_scheduling_block())
- *active = 1;
- res = ERTS_SCHDLR_SSPND_DONE;
- }
- erts_smp_mtx_unlock(&schdlr_sspnd.mtx);
- *total = erts_no_schedulers;
+ erts_smp_mtx_lock(&schdlr_sspnd.mtx);
+ changing = erts_smp_atomic32_read_nob(&schdlr_sspnd.changing);
+#ifdef ERTS_DIRTY_SCHEDULERS
+ changing |= (erts_smp_atomic32_read_nob(&schdlr_sspnd.dirty_cpu_changing)
+ | erts_smp_atomic32_read_nob(&schdlr_sspnd.dirty_io_changing));
+#endif
+ if (yield_allowed && (changing & ~ERTS_SCHDLR_SSPND_CHNG_WAITER))
+ res = ERTS_SCHDLR_SSPND_YIELD_RESTART;
+ else {
+ if (active)
+ *active = schdlr_sspnd.online;
+ if (online)
+ *online = schdlr_sspnd.online;
+ if (ongoing_multi_scheduling_block() && active)
+ *active = 1;
+#ifdef ERTS_DIRTY_SCHEDULERS
+ if (dirty_cpu_online)
+ *dirty_cpu_online = schdlr_sspnd.dirty_cpu_online;
+#endif
+ res = ERTS_SCHDLR_SSPND_DONE;
}
+ erts_smp_mtx_unlock(&schdlr_sspnd.mtx);
+ if (total)
+ *total = erts_no_schedulers;
#ifdef ERTS_DIRTY_SCHEDULERS
if (dirty_cpu)
*dirty_cpu = erts_no_dirty_cpu_schedulers;
- if (dirty_cpu_online)
- *dirty_cpu_online = erts_no_dirty_cpu_schedulers_online;
if (dirty_io)
*dirty_io = erts_no_dirty_io_schedulers;
-#else
- if (dirty_cpu)
- *dirty_cpu = 0;
- if (dirty_cpu_online)
- *dirty_cpu_online = 0;
- if (dirty_io)
- *dirty_io = 0;
#endif
return res;
}
+#ifdef ERTS_DIRTY_SCHEDULERS
+
+ErtsSchedSuspendResult
+erts_set_schedulers_online(Process *p,
+ ErtsProcLocks plocks,
+ Sint new_no,
+ Sint *old_no
+#ifdef ERTS_DIRTY_SCHEDULERS
+ , int dirty_only
+#endif
+ )
+{
+ ErtsSchedulerData *esdp;
+ int ix, res = -1, no, have_unlocked_plocks, end_wait;
+ erts_aint32_t changing = 0;
+#ifdef ERTS_DIRTY_SCHEDULERS
+ ErtsSchedulerSleepInfo* ssi;
+ int dirty_no, change_dirty;
+#endif
+
+ if (new_no < 1)
+ return ERTS_SCHDLR_SSPND_EINVAL;
+#ifdef ERTS_DIRTY_SCHEDULERS
+ else if (dirty_only && erts_no_dirty_cpu_schedulers < new_no)
+ return ERTS_SCHDLR_SSPND_EINVAL;
+#endif
+ else if (erts_no_schedulers < new_no)
+ return ERTS_SCHDLR_SSPND_EINVAL;
+
+ esdp = ERTS_PROC_GET_SCHDATA(p);
+ end_wait = 0;
+
+ erts_smp_mtx_lock(&schdlr_sspnd.mtx);
+
+ have_unlocked_plocks = 0;
+ no = (int) new_no;
+
+#ifdef ERTS_DIRTY_SCHEDULERS
+ ASSERT(schdlr_sspnd.dirty_cpu_online <= erts_no_dirty_cpu_schedulers);
+ if (dirty_only) {
+ if (no > schdlr_sspnd.online) {
+ erts_smp_mtx_unlock(&schdlr_sspnd.mtx);
+ return ERTS_SCHDLR_SSPND_EINVAL;
+ }
+ dirty_no = no;
+ } else {
+ /*
+ * Adjust the number of dirty CPU schedulers online relative to the
+ * adjustment made to the number of normal schedulers online.
+ */
+ int total_pct = erts_no_dirty_cpu_schedulers*100/erts_no_schedulers;
+ int onln_pct = no*total_pct/schdlr_sspnd.online;
+ dirty_no = schdlr_sspnd.dirty_cpu_online*onln_pct/100;
+ if (dirty_no == 0)
+ dirty_no = 1;
+ ASSERT(dirty_no <= erts_no_dirty_cpu_schedulers);
+ }
+#endif
+ changing = erts_smp_atomic32_read_nob(&schdlr_sspnd.changing);
+#ifdef ERTS_DIRTY_SCHEDULERS
+ changing |= erts_smp_atomic32_read_nob(&schdlr_sspnd.dirty_cpu_changing);
+#endif
+ if (changing) {
+ res = ERTS_SCHDLR_SSPND_YIELD_RESTART;
+ }
+ else {
+ int online = *old_no = schdlr_sspnd.online;
+#ifdef ERTS_DIRTY_SCHEDULERS
+ int dirty_online = schdlr_sspnd.dirty_cpu_online;
+
+ if (dirty_only) {
+ *old_no = schdlr_sspnd.dirty_cpu_online;
+ if (dirty_no == schdlr_sspnd.dirty_cpu_online) {
+ res = ERTS_SCHDLR_SSPND_DONE;
+ }
+ change_dirty = 1;
+ } else {
+#endif
+ if (no == schdlr_sspnd.online) {
+#ifdef ERTS_DIRTY_SCHEDULERS
+ dirty_only = 1;
+ if (dirty_no == schdlr_sspnd.dirty_cpu_online)
+#endif
+ res = ERTS_SCHDLR_SSPND_DONE;
+#ifdef ERTS_DIRTY_SCHEDULERS
+ else
+ change_dirty = 1;
+#endif
+ }
+#ifdef ERTS_DIRTY_SCHEDULERS
+ else
+ change_dirty = (dirty_no != schdlr_sspnd.dirty_cpu_online);
+ }
+#endif
+ if (res == -1)
+ {
+ int increase = (no > online);
+#ifdef ERTS_DIRTY_SCHEDULERS
+ if (!dirty_only) {
+#endif
+ ERTS_SCHDLR_SSPND_CHNG_SET((ERTS_SCHDLR_SSPND_CHNG_ONLN
+ | ERTS_SCHDLR_SSPND_CHNG_WAITER), 0);
+ schdlr_sspnd.online = no;
+#ifdef ERTS_DIRTY_SCHEDULERS
+ } else
+ increase = (dirty_no > dirty_online);
+ if (change_dirty) {
+ ERTS_SCHDLR_SSPND_DIRTY_CPU_CHNG_SET((ERTS_SCHDLR_SSPND_CHNG_ONLN
+ | ERTS_SCHDLR_SSPND_CHNG_WAITER), 0);
+ schdlr_sspnd.dirty_cpu_online = dirty_no;
+ }
+#endif
+ if (increase) {
+ int ix;
+#ifdef ERTS_DIRTY_SCHEDULERS
+ if (!dirty_only) {
+#endif
+ schdlr_sspnd.wait_curr_online = no;
+ if (ongoing_multi_scheduling_block()) {
+ for (ix = online; ix < no; ix++)
+ erts_sched_poke(ERTS_SCHED_SLEEP_INFO_IX(ix));
+ }
+ else {
+ if (plocks) {
+ have_unlocked_plocks = 1;
+ erts_smp_proc_unlock(p, plocks);
+ }
+ change_no_used_runqs(no);
+
+ for (ix = online; ix < no; ix++)
+ resume_run_queue(ERTS_RUNQ_IX(ix));
+
+ for (ix = no; ix < erts_no_run_queues; ix++)
+ suspend_run_queue(ERTS_RUNQ_IX(ix));
+ }
+#ifdef ERTS_DIRTY_SCHEDULERS
+ }
+ if (change_dirty) {
+ schdlr_sspnd.dirty_cpu_wait_curr_online = dirty_no;
+ ASSERT(schdlr_sspnd.dirty_cpu_curr_online !=
+ schdlr_sspnd.dirty_cpu_wait_curr_online);
+ if (ongoing_multi_scheduling_block()) {
+ for (ix = dirty_online; ix < dirty_no; ix++) {
+ ssi = ERTS_DIRTY_CPU_SCHED_SLEEP_INFO_IX(ix);
+ erts_sched_poke(ssi);
+ }
+ } else {
+ for (ix = dirty_online; ix < dirty_no; ix++) {
+ ssi = ERTS_DIRTY_CPU_SCHED_SLEEP_INFO_IX(ix);
+ scheduler_ssi_resume_wake(ssi);
+ erts_smp_atomic32_read_band_nob(&ssi->flags,
+ ~ERTS_SSI_FLG_SUSPENDED);
+ }
+ wake_dirty_schedulers(ERTS_DIRTY_CPU_RUNQ, 0);
+ }
+ }
+#endif
+ res = ERTS_SCHDLR_SSPND_DONE;
+ }
+ else /* if (no < online) */ {
+#ifdef ERTS_DIRTY_SCHEDULERS
+ if (change_dirty) {
+ schdlr_sspnd.dirty_cpu_wait_curr_online = dirty_no;
+ ASSERT(schdlr_sspnd.dirty_cpu_curr_online !=
+ schdlr_sspnd.dirty_cpu_wait_curr_online);
+ if (ongoing_multi_scheduling_block()) {
+ for (ix = dirty_no; ix < dirty_online; ix++) {
+ ssi = ERTS_DIRTY_CPU_SCHED_SLEEP_INFO_IX(ix);
+ erts_sched_poke(ssi);
+ }
+ } else {
+ for (ix = dirty_no; ix < dirty_online; ix++) {
+ ssi = ERTS_DIRTY_CPU_SCHED_SLEEP_INFO_IX(ix);
+ erts_smp_atomic32_read_bor_nob(&ssi->flags,
+ ERTS_SSI_FLG_SUSPENDED);
+ }
+ wake_dirty_schedulers(ERTS_DIRTY_CPU_RUNQ, 0);
+ }
+ }
+ if (dirty_only) {
+ res = ERTS_SCHDLR_SSPND_DONE;
+ }
+ else
+#endif
+ {
+ if (p->scheduler_data->no <= no) {
+ res = ERTS_SCHDLR_SSPND_DONE;
+ schdlr_sspnd.wait_curr_online = no;
+ }
+ else {
+ /*
+ * Yield! Current process needs to migrate
+ * before bif returns.
+ */
+ res = ERTS_SCHDLR_SSPND_YIELD_DONE;
+ schdlr_sspnd.wait_curr_online = no+1;
+ }
+
+ if (ongoing_multi_scheduling_block()) {
+ for (ix = no; ix < online; ix++)
+ erts_sched_poke(ERTS_SCHED_SLEEP_INFO_IX(ix));
+ }
+ else {
+ if (plocks) {
+ have_unlocked_plocks = 1;
+ erts_smp_proc_unlock(p, plocks);
+ }
+
+ change_no_used_runqs(no);
+ for (ix = no; ix < erts_no_run_queues; ix++)
+ suspend_run_queue(ERTS_RUNQ_IX(ix));
+
+ for (ix = no; ix < online; ix++) {
+ ErtsRunQueue *rq = ERTS_RUNQ_IX(ix);
+ wake_scheduler(rq);
+ }
+ }
+ }
+ }
+
+#ifdef ERTS_DIRTY_SCHEDULERS
+ if (change_dirty) {
+ while (schdlr_sspnd.dirty_cpu_curr_online != schdlr_sspnd.dirty_cpu_wait_curr_online)
+ erts_smp_cnd_wait(&schdlr_sspnd.cnd, &schdlr_sspnd.mtx);
+ ERTS_SCHDLR_SSPND_DIRTY_CPU_CHNG_SET(0, ERTS_SCHDLR_SSPND_CHNG_WAITER);
+ erts_smp_atomic32_read_band_nob(&schdlr_sspnd.dirty_cpu_changing,
+ ~ERTS_SCHDLR_SSPND_CHNG_WAITER);
+ }
+ if (!dirty_only)
+#endif
+ {
+ if (schdlr_sspnd.curr_online != schdlr_sspnd.wait_curr_online) {
+ erts_smp_mtx_unlock(&schdlr_sspnd.mtx);
+ if (plocks && !have_unlocked_plocks) {
+ have_unlocked_plocks = 1;
+ erts_smp_proc_unlock(p, plocks);
+ }
+ erts_thr_progress_active(esdp, 0);
+ erts_thr_progress_prepare_wait(esdp);
+ end_wait = 1;
+ erts_smp_mtx_lock(&schdlr_sspnd.mtx);
+ }
+
+ while (schdlr_sspnd.curr_online != schdlr_sspnd.wait_curr_online)
+ erts_smp_cnd_wait(&schdlr_sspnd.cnd, &schdlr_sspnd.mtx);
+
+ ASSERT(res != ERTS_SCHDLR_SSPND_DONE
+ ? (ERTS_SCHDLR_SSPND_CHNG_WAITER
+ & erts_smp_atomic32_read_nob(&schdlr_sspnd.changing))
+ : (ERTS_SCHDLR_SSPND_CHNG_WAITER
+ == erts_smp_atomic32_read_nob(&schdlr_sspnd.changing)));
+ erts_smp_atomic32_read_band_nob(&schdlr_sspnd.changing,
+ ~ERTS_SCHDLR_SSPND_CHNG_WAITER);
+ }
+ }
+ }
+
+ erts_smp_mtx_unlock(&schdlr_sspnd.mtx);
+#ifdef ERTS_DIRTY_SCHEDULERS
+ ASSERT(schdlr_sspnd.dirty_cpu_online <= schdlr_sspnd.online);
+ if (!dirty_only)
+#endif
+ {
+ if (end_wait) {
+ erts_thr_progress_finalize_wait(esdp);
+ erts_thr_progress_active(esdp, 1);
+ }
+ if (have_unlocked_plocks)
+ erts_smp_proc_lock(p, plocks);
+ }
+
+ return res;
+}
+
+#else /* !ERTS_DIRTY_SCHEDULERS */
+
ErtsSchedSuspendResult
erts_set_schedulers_online(Process *p,
ErtsProcLocks plocks,
@@ -6352,10 +7225,6 @@ erts_set_schedulers_online(Process *p,
ErtsRunQueue *rq = ERTS_RUNQ_IX(ix);
wake_scheduler(rq);
}
-#ifdef ERTS_DIRTY_SCHEDULERS
- wake_dirty_scheduler(ERTS_DIRTY_CPU_RUNQ);
- wake_dirty_scheduler(ERTS_DIRTY_IO_RUNQ);
-#endif
}
}
@@ -6396,15 +7265,24 @@ erts_set_schedulers_online(Process *p,
return res;
}
+#endif
+
ErtsSchedSuspendResult
erts_block_multi_scheduling(Process *p, ErtsProcLocks plocks, int on, int all)
{
- int ix, res, have_unlocked_plocks = 0;
+ int ix, res, have_unlocked_plocks = 0, online;
erts_aint32_t changing;
ErtsProcList *plp;
+#ifdef ERTS_DIRTY_SCHEDULERS
+ ErtsSchedulerSleepInfo* ssi;
+#endif
erts_smp_mtx_lock(&schdlr_sspnd.mtx);
changing = erts_smp_atomic32_read_nob(&schdlr_sspnd.changing);
+#ifdef ERTS_DIRTY_SCHEDULERS
+ changing |= (erts_smp_atomic32_read_nob(&schdlr_sspnd.dirty_cpu_changing)
+ | erts_smp_atomic32_read_nob(&schdlr_sspnd.dirty_io_changing));
+#endif
if (changing) {
res = ERTS_SCHDLR_SSPND_YIELD_RESTART; /* Yield */
}
@@ -6414,10 +7292,13 @@ erts_block_multi_scheduling(Process *p, ErtsProcLocks plocks, int on, int all)
erts_proclist_store_last(&schdlr_sspnd.msb.procs, plp);
p->flags |= F_HAVE_BLCKD_MSCHED;
ASSERT(erts_smp_atomic32_read_nob(&schdlr_sspnd.active) == 1);
+#ifdef ERTS_DIRTY_SCHEDULERS
+ ASSERT(erts_smp_atomic32_read_nob(&schdlr_sspnd.dirty_cpu_active) == 0);
+ ASSERT(erts_smp_atomic32_read_nob(&schdlr_sspnd.dirty_io_active) == 0);
+#endif
ASSERT(p->scheduler_data->no == 1);
res = ERTS_SCHDLR_SSPND_DONE_MSCHED_BLOCKED;
- }
- else {
+ } else {
int online = schdlr_sspnd.online;
p->flags |= F_HAVE_BLCKD_MSCHED;
if (plocks) {
@@ -6429,6 +7310,35 @@ erts_block_multi_scheduling(Process *p, ErtsProcLocks plocks, int on, int all)
if (online == 1) {
res = ERTS_SCHDLR_SSPND_DONE_MSCHED_BLOCKED;
ASSERT(erts_smp_atomic32_read_nob(&schdlr_sspnd.active) == 1);
+#ifdef ERTS_DIRTY_SCHEDULERS
+ ASSERT(erts_smp_atomic32_read_nob(&schdlr_sspnd.dirty_cpu_active) == 1);
+ ASSERT(!(erts_smp_atomic32_read_nob(&ERTS_DIRTY_CPU_SCHED_SLEEP_INFO_IX(0)->flags)
+ & ERTS_SSI_FLG_SUSPENDED));
+ schdlr_sspnd.msb.dirty_cpu_wait_active = 0;
+ ERTS_SCHDLR_SSPND_DIRTY_CPU_CHNG_SET((ERTS_SCHDLR_SSPND_CHNG_MSB
+ | ERTS_SCHDLR_SSPND_CHNG_WAITER), 0);
+ ssi = ERTS_DIRTY_CPU_SCHED_SLEEP_INFO_IX(0);
+ erts_smp_atomic32_read_bor_nob(&ssi->flags, ERTS_SSI_FLG_SUSPENDED);
+ wake_dirty_schedulers(ERTS_DIRTY_CPU_RUNQ, 0);
+ while (erts_smp_atomic32_read_nob(&schdlr_sspnd.dirty_cpu_active)
+ != schdlr_sspnd.msb.dirty_cpu_wait_active)
+ erts_smp_cnd_wait(&schdlr_sspnd.cnd, &schdlr_sspnd.mtx);
+ ERTS_SCHDLR_SSPND_DIRTY_CPU_CHNG_SET(0, ERTS_SCHDLR_SSPND_CHNG_WAITER);
+
+ schdlr_sspnd.msb.dirty_io_wait_active = 0;
+ ERTS_SCHDLR_SSPND_DIRTY_IO_CHNG_SET((ERTS_SCHDLR_SSPND_CHNG_MSB
+ | ERTS_SCHDLR_SSPND_CHNG_WAITER), 0);
+ for (ix = 0; ix < erts_no_dirty_io_schedulers; ix++) {
+ ssi = ERTS_DIRTY_IO_SCHED_SLEEP_INFO_IX(ix);
+ erts_smp_atomic32_read_bor_nob(&ssi->flags,
+ ERTS_SSI_FLG_SUSPENDED);
+ }
+ wake_dirty_schedulers(ERTS_DIRTY_IO_RUNQ, 0);
+ while (erts_smp_atomic32_read_nob(&schdlr_sspnd.dirty_io_active)
+ != schdlr_sspnd.msb.dirty_io_wait_active)
+ erts_smp_cnd_wait(&schdlr_sspnd.cnd, &schdlr_sspnd.mtx);
+ ERTS_SCHDLR_SSPND_DIRTY_IO_CHNG_SET(0, ERTS_SCHDLR_SSPND_CHNG_WAITER);
+#endif
ASSERT(p->scheduler_data->no == 1);
}
else {
@@ -6447,6 +7357,37 @@ erts_block_multi_scheduling(Process *p, ErtsProcLocks plocks, int on, int all)
schdlr_sspnd.msb.wait_active = 2;
}
+#ifdef ERTS_DIRTY_SCHEDULERS
+ schdlr_sspnd.msb.dirty_cpu_wait_active = 0;
+ ERTS_SCHDLR_SSPND_DIRTY_CPU_CHNG_SET((ERTS_SCHDLR_SSPND_CHNG_MSB
+ | ERTS_SCHDLR_SSPND_CHNG_WAITER), 0);
+ for (ix = 0; ix < erts_no_dirty_cpu_schedulers; ix++) {
+ ssi = ERTS_DIRTY_CPU_SCHED_SLEEP_INFO_IX(ix);
+ erts_smp_atomic32_read_bor_nob(&ssi->flags,
+ ERTS_SSI_FLG_SUSPENDED);
+ }
+ wake_dirty_schedulers(ERTS_DIRTY_CPU_RUNQ, 0);
+ while (erts_smp_atomic32_read_nob(&schdlr_sspnd.dirty_cpu_active)
+ != schdlr_sspnd.msb.dirty_cpu_wait_active)
+ erts_smp_cnd_wait(&schdlr_sspnd.cnd, &schdlr_sspnd.mtx);
+ ERTS_SCHDLR_SSPND_DIRTY_CPU_CHNG_SET(0, ERTS_SCHDLR_SSPND_CHNG_WAITER);
+ ASSERT(schdlr_sspnd.dirty_cpu_curr_online == schdlr_sspnd.dirty_cpu_online);
+
+ schdlr_sspnd.msb.dirty_io_wait_active = 0;
+ ERTS_SCHDLR_SSPND_DIRTY_IO_CHNG_SET((ERTS_SCHDLR_SSPND_CHNG_MSB
+ | ERTS_SCHDLR_SSPND_CHNG_WAITER), 0);
+ for (ix = 0; ix < erts_no_dirty_io_schedulers; ix++) {
+ ssi = ERTS_DIRTY_IO_SCHED_SLEEP_INFO_IX(ix);
+ erts_smp_atomic32_read_bor_nob(&ssi->flags,
+ ERTS_SSI_FLG_SUSPENDED);
+ }
+ wake_dirty_schedulers(ERTS_DIRTY_IO_RUNQ, 0);
+ while (erts_smp_atomic32_read_nob(&schdlr_sspnd.dirty_io_active)
+ != schdlr_sspnd.msb.dirty_io_wait_active)
+ erts_smp_cnd_wait(&schdlr_sspnd.cnd, &schdlr_sspnd.mtx);
+ ERTS_SCHDLR_SSPND_DIRTY_IO_CHNG_SET(0, ERTS_SCHDLR_SSPND_CHNG_WAITER);
+ ASSERT(schdlr_sspnd.dirty_io_curr_online == schdlr_sspnd.dirty_io_online);
+#endif
change_no_used_runqs(1);
for (ix = 1; ix < erts_no_run_queues; ix++)
suspend_run_queue(ERTS_RUNQ_IX(ix));
@@ -6487,6 +7428,7 @@ erts_block_multi_scheduling(Process *p, ErtsProcLocks plocks, int on, int all)
erts_smp_mtx_lock(&schdlr_sspnd.mtx);
}
+
ASSERT(res != ERTS_SCHDLR_SSPND_DONE_MSCHED_BLOCKED
? (ERTS_SCHDLR_SSPND_CHNG_WAITER
& erts_smp_atomic32_read_nob(&schdlr_sspnd.changing))
@@ -6527,12 +7469,12 @@ erts_block_multi_scheduling(Process *p, ErtsProcLocks plocks, int on, int all)
p->flags &= ~F_HAVE_BLCKD_MSCHED;
schdlr_sspnd.msb.ongoing = 0;
if (schdlr_sspnd.online == 1) {
- /* No schedulers to resume */
+ /* No normal schedulers to resume */
ASSERT(erts_smp_atomic32_read_nob(&schdlr_sspnd.active) == 1);
ERTS_SCHDLR_SSPND_CHNG_SET(0, ERTS_SCHDLR_SSPND_CHNG_MSB);
}
else {
- int online = schdlr_sspnd.online;
+ online = schdlr_sspnd.online;
if (plocks) {
have_unlocked_plocks = 1;
erts_smp_proc_unlock(p, plocks);
@@ -6547,6 +7489,27 @@ erts_block_multi_scheduling(Process *p, ErtsProcLocks plocks, int on, int all)
for (ix = online; ix < erts_no_run_queues; ix++)
suspend_run_queue(ERTS_RUNQ_IX(ix));
}
+#ifdef ERTS_DIRTY_SCHEDULERS
+ ERTS_SCHDLR_SSPND_DIRTY_CPU_CHNG_SET(ERTS_SCHDLR_SSPND_CHNG_MSB, 0);
+ schdlr_sspnd.msb.dirty_cpu_wait_active = schdlr_sspnd.dirty_cpu_online;
+ for (ix = 0; ix < schdlr_sspnd.dirty_cpu_online; ix++) {
+ ssi = ERTS_DIRTY_CPU_SCHED_SLEEP_INFO_IX(ix);
+ scheduler_ssi_resume_wake(ssi);
+ erts_smp_atomic32_read_band_nob(&ssi->flags,
+ ~ERTS_SSI_FLG_SUSPENDED);
+ }
+ wake_dirty_schedulers(ERTS_DIRTY_CPU_RUNQ, 0);
+
+ ERTS_SCHDLR_SSPND_DIRTY_IO_CHNG_SET(ERTS_SCHDLR_SSPND_CHNG_MSB, 0);
+ schdlr_sspnd.msb.dirty_io_wait_active = erts_no_dirty_io_schedulers;
+ for (ix = 0; ix < erts_no_dirty_io_schedulers; ix++) {
+ ssi = ERTS_DIRTY_IO_SCHED_SLEEP_INFO_IX(ix);
+ scheduler_ssi_resume_wake(ssi);
+ erts_smp_atomic32_read_band_nob(&ssi->flags,
+ ~ERTS_SSI_FLG_SUSPENDED);
+ }
+ wake_dirty_schedulers(ERTS_DIRTY_IO_RUNQ, 0);
+#endif
res = ERTS_SCHDLR_SSPND_DONE;
}
}
@@ -6673,7 +7636,11 @@ sched_thread_func(void *vesdp)
erts_smp_atomic32_read_band_nob(&schdlr_sspnd.changing,
~ERTS_SCHDLR_SSPND_CHNG_ONLN);
if (no != 1)
+#ifdef ERTS_DIRTY_SCHEDULERS
+ erts_smp_cnd_broadcast(&schdlr_sspnd.cnd);
+#else
erts_smp_cnd_signal(&schdlr_sspnd.cnd);
+#endif
}
if (no == 1) {
@@ -6710,7 +7677,9 @@ sched_dirty_cpu_thread_func(void *vesdp)
{
ErtsThrPrgrCallbacks callbacks;
ErtsSchedulerData *esdp = vesdp;
- Uint no = esdp->dirty_no;
+ Uint no = ERTS_DIRTY_SCHEDULER_NO(esdp);
+ ERTS_DIRTY_SCHEDULER_TYPE(esdp) = ERTS_DIRTY_CPU_SCHEDULER;
+ ASSERT(no != 0);
ERTS_DIRTY_CPU_SCHED_SLEEP_INFO_IX(no-1)->event = erts_tse_fetch();
callbacks.arg = (void *) esdp->ssi;
callbacks.wakeup = thr_prgr_wakeup;
@@ -6738,6 +7707,24 @@ sched_dirty_cpu_thread_func(void *vesdp)
#endif
erts_thread_init_float();
+ erts_smp_mtx_lock(&schdlr_sspnd.mtx);
+ ASSERT(erts_smp_atomic32_read_nob(&schdlr_sspnd.dirty_cpu_changing)
+ & ERTS_SCHDLR_SSPND_CHNG_ONLN);
+
+ if (--schdlr_sspnd.dirty_cpu_curr_online == schdlr_sspnd.dirty_cpu_wait_curr_online) {
+ erts_smp_atomic32_read_band_nob(&schdlr_sspnd.dirty_cpu_changing,
+ ~ERTS_SCHDLR_SSPND_CHNG_ONLN);
+ if (no != 1)
+ erts_smp_cnd_broadcast(&schdlr_sspnd.cnd);
+ }
+
+ if (no == 1) {
+ while (schdlr_sspnd.dirty_cpu_curr_online != schdlr_sspnd.dirty_cpu_wait_curr_online)
+ erts_smp_cnd_wait(&schdlr_sspnd.cnd, &schdlr_sspnd.mtx);
+ ERTS_SCHDLR_SSPND_DIRTY_CPU_CHNG_SET(0, ERTS_SCHDLR_SSPND_CHNG_WAITER);
+ }
+ erts_smp_mtx_unlock(&schdlr_sspnd.mtx);
+
process_main();
/* No schedulers should *ever* terminate */
erl_exit(ERTS_ABORT_EXIT,
@@ -6751,7 +7738,9 @@ sched_dirty_io_thread_func(void *vesdp)
{
ErtsThrPrgrCallbacks callbacks;
ErtsSchedulerData *esdp = vesdp;
- Uint no = esdp->dirty_no;
+ Uint no = ERTS_DIRTY_SCHEDULER_NO(esdp);
+ ERTS_DIRTY_SCHEDULER_TYPE(esdp) = ERTS_DIRTY_IO_SCHEDULER;
+ ASSERT(no != 0);
ERTS_DIRTY_IO_SCHED_SLEEP_INFO_IX(no-1)->event = erts_tse_fetch();
callbacks.arg = (void *) esdp->ssi;
callbacks.wakeup = thr_prgr_wakeup;
@@ -6779,6 +7768,24 @@ sched_dirty_io_thread_func(void *vesdp)
#endif
erts_thread_init_float();
+ erts_smp_mtx_lock(&schdlr_sspnd.mtx);
+ ASSERT(erts_smp_atomic32_read_nob(&schdlr_sspnd.dirty_io_changing)
+ & ERTS_SCHDLR_SSPND_CHNG_ONLN);
+
+ if (--schdlr_sspnd.dirty_io_curr_online == schdlr_sspnd.dirty_io_wait_curr_online) {
+ erts_smp_atomic32_read_band_nob(&schdlr_sspnd.dirty_io_changing,
+ ~ERTS_SCHDLR_SSPND_CHNG_ONLN);
+ if (no != 1)
+ erts_smp_cnd_broadcast(&schdlr_sspnd.cnd);
+ }
+
+ if (no == 1) {
+ while (schdlr_sspnd.dirty_io_curr_online != schdlr_sspnd.dirty_io_wait_curr_online)
+ erts_smp_cnd_wait(&schdlr_sspnd.cnd, &schdlr_sspnd.mtx);
+ ERTS_SCHDLR_SSPND_DIRTY_IO_CHNG_SET(0, ERTS_SCHDLR_SSPND_CHNG_WAITER);
+ }
+ erts_smp_mtx_unlock(&schdlr_sspnd.mtx);
+
process_main();
/* No schedulers should *ever* terminate */
erl_exit(ERTS_ABORT_EXIT,
@@ -6795,16 +7802,23 @@ void
erts_start_schedulers(void)
{
int res = 0;
- Uint actual = 0;
+ Uint actual;
Uint wanted = erts_no_schedulers;
Uint wanted_no_schedulers = erts_no_schedulers;
ethr_thr_opts opts = ETHR_THR_OPTS_DEFAULT_INITER;
opts.detached = 1;
+#ifdef ETHR_HAVE_THREAD_NAMES
+ opts.name = malloc(80);
+#endif
+
#ifdef ERTS_SMP
if (erts_runq_supervision_interval) {
opts.suggested_stack_size = 16;
+#ifdef ETHR_HAVE_THREAD_NAMES
+ sprintf(opts.name, "runq_supervisor");
+#endif
erts_atomic_init_nob(&runq_supervisor_sleeping, 0);
if (0 != ethr_event_init(&runq_supervision_event))
erl_exit(1, "Failed to create run-queue supervision event\n");
@@ -6826,17 +7840,27 @@ erts_start_schedulers(void)
res = ENOTSUP;
}
- while (actual < wanted) {
+ for (actual = 0; actual < wanted; actual++) {
ErtsSchedulerData *esdp = ERTS_SCHEDULER_IX(actual);
- actual++;
- ASSERT(actual == esdp->no);
- res = ethr_thr_create(&esdp->tid,sched_thread_func,(void*)esdp,&opts);
+
+ ASSERT(actual == esdp->no - 1);
+
+#ifdef ETHR_HAVE_THREAD_NAMES
+ sprintf(opts.name, "scheduler_%d", actual + 1);
+#endif
+
+#ifdef __OSE__
+ /* This should be done in the bind strategy */
+ opts.coreNo = (actual+1) % ose_num_cpus();
+#endif
+
+ res = ethr_thr_create(&esdp->tid, sched_thread_func, (void*)esdp, &opts);
+
if (res != 0) {
- actual--;
- break;
+ break;
}
}
-
+
erts_no_schedulers = actual;
#ifdef ERTS_DIRTY_SCHEDULERS
@@ -6845,12 +7869,18 @@ erts_start_schedulers(void)
int ix;
for (ix = 0; ix < erts_no_dirty_cpu_schedulers; ix++) {
ErtsSchedulerData *esdp = ERTS_DIRTY_CPU_SCHEDULER_IX(ix);
+#ifdef ETHR_HAVE_THREAD_NAMES
+ sprintf(opts.name,"dirty_cpu_scheduler_%d", ix + 1);
+#endif
res = ethr_thr_create(&esdp->tid,sched_dirty_cpu_thread_func,(void*)esdp,&opts);
if (res != 0)
erl_exit(1, "Failed to create dirty cpu scheduler thread %d\n", ix);
}
for (ix = 0; ix < erts_no_dirty_io_schedulers; ix++) {
ErtsSchedulerData *esdp = ERTS_DIRTY_IO_SCHEDULER_IX(ix);
+#ifdef ETHR_HAVE_THREAD_NAMES
+ sprintf(opts.name,"dirty_io_scheduler_%d", ix + 1);
+#endif
res = ethr_thr_create(&esdp->tid,sched_dirty_io_thread_func,(void*)esdp,&opts);
if (res != 0)
erl_exit(1, "Failed to create dirty io scheduler thread %d\n", ix);
@@ -6861,6 +7891,14 @@ erts_start_schedulers(void)
ERTS_THR_MEMORY_BARRIER;
+#ifdef ETHR_HAVE_THREAD_NAMES
+ sprintf(opts.name, "aux");
+#endif
+
+#ifdef __OSE__
+ opts.coreNo = 0;
+#endif /* __OSE__ */
+
res = ethr_thr_create(&aux_tid, aux_thread, NULL, &opts);
if (res != 0)
erl_exit(1, "Failed to create aux thread\n");
@@ -6880,6 +7918,10 @@ erts_start_schedulers(void)
actual, actual == 1 ? " was" : "s were");
erts_send_error_to_logger_nogl(dsbufp);
}
+
+#ifdef ETHR_HAVE_THREAD_NAMES
+ free(opts.name);
+#endif
}
#endif /* ERTS_SMP */
@@ -7995,7 +9037,6 @@ Process *schedule(Process *p, int calls)
|| flags & ERTS_RUNQ_FLG_NONEMPTY);
if (flags & (ERTS_RUNQ_FLG_CHK_CPU_BIND|ERTS_RUNQ_FLG_SUSPENDED)) {
-
if (flags & ERTS_RUNQ_FLG_SUSPENDED) {
suspend_scheduler(esdp);
flags = ERTS_RUNQ_FLGS_GET_NOB(rq);
@@ -8006,21 +9047,31 @@ Process *schedule(Process *p, int calls)
erts_sched_check_cpu_bind(esdp);
}
}
+#ifdef ERTS_DIRTY_SCHEDULERS
+ else if (ERTS_SCHEDULER_IS_DIRTY(esdp)
+ && (erts_smp_atomic32_read_acqb(&esdp->ssi->flags)
+ & ERTS_SSI_FLG_SUSPENDED))
+ suspend_scheduler(esdp);
+#endif
- if (!ERTS_SCHEDULER_IS_DIRTY(esdp)) {
+ {
erts_aint32_t aux_work;
- int leader_update = erts_thr_progress_update(esdp);
+ int leader_update = ERTS_SCHEDULER_IS_DIRTY(esdp) ? 0
+ : erts_thr_progress_update(esdp);
aux_work = erts_atomic32_read_acqb(&esdp->ssi->aux_work);
- if (aux_work | leader_update) {
+ if (aux_work | leader_update | ERTS_SCHED_FAIR) {
erts_smp_runq_unlock(rq);
if (leader_update)
erts_thr_progress_leader_update(esdp);
+ else if (ERTS_SCHED_FAIR)
+ ERTS_SCHED_FAIR_YIELD();
if (aux_work)
handle_aux_work(&esdp->aux_work_data, aux_work, 0);
erts_smp_runq_lock(rq);
}
- ERTS_SMP_LC_ASSERT(!erts_thr_progress_is_blocking());
+ ERTS_SMP_LC_ASSERT(ERTS_SCHEDULER_IS_DIRTY(esdp)
+ || !erts_thr_progress_is_blocking());
}
ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(rq));
@@ -8035,6 +9086,17 @@ Process *schedule(Process *p, int calls)
flags = ERTS_RUNQ_FLGS_GET_NOB(rq);
+#ifdef ERTS_DIRTY_SCHEDULERS
+ if (ERTS_RUNQ_IX_IS_DIRTY(rq->ix) && rq->halt_in_progress) {
+ /*
+ * TODO: if halt in progress, need to put the dirty scheduler
+ * to sleep somewhere around here to prevent it from picking up
+ * new work
+ */
+ }
+ else
+#endif
+
if ((!(flags & ERTS_RUNQ_FLGS_QMASK) && !rq->misc.start)
|| (rq->halt_in_progress && ERTS_EMPTY_RUNQ_PORTS(rq))) {
/* Prepare for scheduler wait */
@@ -8077,7 +9139,8 @@ Process *schedule(Process *p, int calls)
goto check_activities_to_run;
}
else if (!ERTS_SCHEDULER_IS_DIRTY(esdp) &&
- (fcalls > input_reductions && prepare_for_sys_schedule())) {
+ (fcalls > input_reductions &&
+ prepare_for_sys_schedule(esdp))) {
/*
* Schedule system-level activities.
*/
@@ -8182,11 +9245,15 @@ Process *schedule(Process *p, int calls)
+ ERTS_PSFLGS_IN_PRQ_MASK_OFFSET));
#ifdef ERTS_DIRTY_SCHEDULERS
- /* if a non-dirty scheduler picks up a process marked as already being
- in a dirty run queue, just drop it and go get another process */
- if (state & (ERTS_PSFLG_DIRTY_CPU_PROC_IN_Q|ERTS_PSFLG_DIRTY_IO_PROC_IN_Q) &&
- !ERTS_SCHEDULER_IS_DIRTY(esdp))
- goto pick_next_process;
+ ASSERT((state & (ERTS_PSFLG_DIRTY_CPU_PROC|ERTS_PSFLG_DIRTY_IO_PROC)) !=
+ (ERTS_PSFLG_DIRTY_CPU_PROC|ERTS_PSFLG_DIRTY_IO_PROC));
+ if (state & (ERTS_PSFLG_DIRTY_CPU_PROC|ERTS_PSFLG_DIRTY_IO_PROC)) {
+ ASSERT((ERTS_SCHEDULER_IS_DIRTY_CPU(esdp) && (state & ERTS_PSFLG_DIRTY_CPU_PROC)) ||
+ (ERTS_SCHEDULER_IS_DIRTY_IO(esdp) && (state & ERTS_PSFLG_DIRTY_IO_PROC)));
+ if (!ERTS_SCHEDULER_IS_DIRTY(esdp) && !(state & ERTS_PSFLG_ACTIVE_SYS))
+ goto pick_next_process;
+ state &= ~(ERTS_PSFLG_DIRTY_CPU_PROC_IN_Q|ERTS_PSFLG_DIRTY_IO_PROC_IN_Q);
+ }
#endif
if (!(state & ERTS_PSFLG_PROXY))
@@ -8322,7 +9389,11 @@ Process *schedule(Process *p, int calls)
if (state & ERTS_PSFLG_RUNNING_SYS) {
reds -= execute_sys_tasks(p, &state, reds);
- if (reds <= 0) {
+ if (reds <= 0
+#ifdef ERTS_DIRTY_SCHEDULERS
+ || (state & (ERTS_PSFLG_DIRTY_CPU_PROC|ERTS_PSFLG_DIRTY_IO_PROC))
+#endif
+ ) {
p->fcalls = reds;
goto sched_out_proc;
}
@@ -9379,7 +10450,7 @@ erl_create_process(Process* parent, /* Parent of process (default group leader).
* Check for errors.
*/
- if (is_not_atom(mod) || is_not_atom(func) || ((arity = list_length(args)) < 0)) {
+ if (is_not_atom(mod) || is_not_atom(func) || ((arity = erts_list_length(args)) < 0)) {
so->error_code = BADARG;
goto error;
}
@@ -10087,7 +11158,7 @@ save_pending_exiter(Process *p)
erts_smp_runq_unlock(rq);
#ifdef ERTS_DIRTY_SCHEDULERS
if (ERTS_RUNQ_IX_IS_DIRTY(rq->ix))
- wake_dirty_scheduler(rq);
+ wake_dirty_schedulers(rq, 0);
else
#endif
wake_scheduler(rq);
@@ -11148,6 +12219,10 @@ void erl_halt(int code)
if (-1 == erts_smp_atomic32_cmpxchg_acqb(&erts_halt_progress,
erts_no_schedulers,
-1)) {
+#ifdef ERTS_DIRTY_SCHEDULERS
+ ERTS_DIRTY_CPU_RUNQ->halt_in_progress = 1;
+ ERTS_DIRTY_IO_RUNQ->halt_in_progress = 1;
+#endif
erts_halt_code = code;
notify_reap_ports_relb();
}
diff --git a/erts/emulator/beam/erl_process.h b/erts/emulator/beam/erl_process.h
index dcb9251d0d..ed6dadbffa 100644
--- a/erts/emulator/beam/erl_process.h
+++ b/erts/emulator/beam/erl_process.h
@@ -109,7 +109,6 @@ extern int erts_sched_balance_util;
extern Uint erts_no_schedulers;
#ifdef ERTS_DIRTY_SCHEDULERS
extern Uint erts_no_dirty_cpu_schedulers;
-extern Uint erts_no_dirty_cpu_schedulers_online;
extern Uint erts_no_dirty_io_schedulers;
#endif
extern Uint erts_no_run_queues;
@@ -544,6 +543,21 @@ typedef struct {
#endif
} ErtsAuxWorkData;
+#ifdef ERTS_DIRTY_SCHEDULERS
+typedef enum {
+ ERTS_DIRTY_CPU_SCHEDULER,
+ ERTS_DIRTY_IO_SCHEDULER
+} ErtsDirtySchedulerType;
+
+typedef union {
+ struct {
+ ErtsDirtySchedulerType type: 1;
+ unsigned num: 31;
+ } s;
+ Uint no;
+} ErtsDirtySchedId;
+#endif
+
struct ErtsSchedulerData_ {
/*
* Keep X registers first (so we get as many low
@@ -570,7 +584,7 @@ struct ErtsSchedulerData_ {
Process *current_process;
Uint no; /* Scheduler number for normal schedulers */
#ifdef ERTS_DIRTY_SCHEDULERS
- Uint dirty_no; /* Scheduler number for dirty schedulers */
+ ErtsDirtySchedId dirty_no; /* Scheduler number for dirty schedulers */
#endif
Port *current_port;
ErtsRunQueue *run_queue;
@@ -607,6 +621,13 @@ extern ErtsAlignedSchedulerData *erts_aligned_dirty_io_scheduler_data;
extern ErtsSchedulerData *erts_scheduler_data;
#endif
+#ifdef ERTS_SCHED_FAIR
+#define ERTS_SCHED_FAIR_YIELD() ETHR_YIELD()
+#else
+#define ERTS_SCHED_FAIR 0
+#define ERTS_SCHED_FAIR_YIELD()
+#endif
+
#if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_CHECK)
int erts_smp_lc_runq_is_locked(ErtsRunQueue *);
#endif
@@ -1292,6 +1313,8 @@ extern struct erts_system_profile_flags_t erts_system_profile_flags;
&erts_aligned_run_queues[(IX)].runq)
#define ERTS_DIRTY_CPU_RUNQ (&erts_aligned_run_queues[-1].runq)
#define ERTS_DIRTY_IO_RUNQ (&erts_aligned_run_queues[-2].runq)
+#define ERTS_RUNQ_IS_DIRTY_CPU_RUNQ(RQ) ((RQ)->ix == -1)
+#define ERTS_RUNQ_IS_DIRTY_IO_RUNQ(RQ) ((RQ)->ix == -2)
#else
#define ERTS_RUNQ_IX_IS_DIRTY(IX) 0
#endif
@@ -1305,21 +1328,37 @@ extern struct erts_system_profile_flags_t erts_system_profile_flags;
#define ERTS_DIRTY_IO_SCHEDULER_IX(IX) \
(ASSERT(0 <= (IX) && (IX) < erts_no_dirty_io_schedulers), \
&erts_aligned_dirty_io_scheduler_data[(IX)].esd)
+#define ERTS_DIRTY_SCHEDULER_NO(ESDP) \
+ ((ESDP)->dirty_no.s.num)
+#define ERTS_DIRTY_SCHEDULER_TYPE(ESDP) \
+ ((ESDP)->dirty_no.s.type)
#ifdef ERTS_SMP
#define ERTS_SCHEDULER_IS_DIRTY(ESDP) \
- ((ESDP)->dirty_no != 0)
+ ((ESDP)->dirty_no.s.num != 0)
+#define ERTS_SCHEDULER_IS_DIRTY_CPU(ESDP) \
+ ((ESDP)->dirty_no.s.type == 0)
+#define ERTS_SCHEDULER_IS_DIRTY_IO(ESDP) \
+ ((ESDP)->dirty_no.s.type == 1)
#else
#define ERTS_SCHEDULER_IS_DIRTY(ESDP) 0
+#define ERTS_SCHEDULER_IS_DIRTY_CPU(ESDP) 0
+#define ERTS_SCHEDULER_IS_DIRTY_IO(ESDP) 0
#endif
#else
#define ERTS_RUNQ_IX_IS_DIRTY(IX) 0
#define ERTS_SCHEDULER_IS_DIRTY(ESDP) 0
+#define ERTS_SCHEDULER_IS_DIRTY_CPU(ESDP) 0
+#define ERTS_SCHEDULER_IS_DIRTY_IO(ESDP) 0
#endif
void erts_pre_init_process(void);
void erts_late_init_process(void);
void erts_early_init_scheduling(int);
-void erts_init_scheduling(int, int);
+void erts_init_scheduling(int, int
+#ifdef ERTS_DIRTY_SCHEDULERS
+ , int, int, int
+#endif
+ );
int erts_set_gc_state(Process *c_p, int enable);
Eterm erts_sched_wall_time_request(Process *c_p, int set, int enable);
@@ -1526,7 +1565,11 @@ ErtsSchedSuspendResult
erts_set_schedulers_online(Process *p,
ErtsProcLocks plocks,
Sint new_no,
- Sint *old_no);
+ Sint *old_no
+#ifdef ERTS_DIRTY_SCHEDULERS
+ , int dirty_only
+#endif
+ );
ErtsSchedSuspendResult
erts_block_multi_scheduling(Process *, ErtsProcLocks, int, int);
int erts_is_multi_scheduling_blocked(void);
@@ -2009,12 +2052,6 @@ erts_get_runq_current(ErtsSchedulerData *esdp)
#endif
}
-#ifdef ERTS_ENABLE_LOCK_COUNT
-
-#define erts_smp_runq_lock(rq) erts_smp_mtx_lock_x(&(rq)->mtx, __FILE__, __LINE__)
-
-#else
-
ERTS_GLB_INLINE void
erts_smp_runq_lock(ErtsRunQueue *rq)
{
@@ -2023,6 +2060,10 @@ erts_smp_runq_lock(ErtsRunQueue *rq)
#endif
}
+#ifdef ERTS_ENABLE_LOCK_COUNT
+
+#define erts_smp_runq_lock(rq) erts_smp_mtx_lock_x(&(rq)->mtx, __FILE__, __LINE__)
+
#endif
ERTS_GLB_INLINE int
diff --git a/erts/emulator/beam/erl_process_dict.c b/erts/emulator/beam/erl_process_dict.c
index a611b52af2..23e5bf737f 100644
--- a/erts/emulator/beam/erl_process_dict.c
+++ b/erts/emulator/beam/erl_process_dict.c
@@ -659,7 +659,7 @@ static void shrink(Process *p, Eterm* ret)
} else {
int needed = 4;
if (is_list(hi) && is_list(lo)) {
- needed = 2*list_length(hi);
+ needed = 2*erts_list_length(hi);
}
if (HeapWordsLeft(p) < needed) {
BUMP_REDS(p, erts_garbage_collect(p, needed, ret, 1));
diff --git a/erts/emulator/beam/erl_process_lock.c b/erts/emulator/beam/erl_process_lock.c
index 2db5df06b4..82cc68222d 100644
--- a/erts/emulator/beam/erl_process_lock.c
+++ b/erts/emulator/beam/erl_process_lock.c
@@ -117,7 +117,7 @@ erts_init_proc_lock(int cpus)
for (i = 0; i < ERTS_NO_OF_PIX_LOCKS; i++) {
#ifdef ERTS_ENABLE_LOCK_COUNT
erts_mtx_init_x(&erts_pix_locks[i].u.mtx,
- "pix_lock", make_small(i));
+ "pix_lock", make_small(i), 1);
#else
erts_mtx_init(&erts_pix_locks[i].u.mtx, "pix_lock");
#endif
@@ -901,7 +901,7 @@ erts_pid2proc_opt(Process *c_p,
busy = (int) erts_smp_proc_raw_trylock__(proc, need_locks);
#if ERTS_PROC_LOCK_OWN_IMPL && defined(ERTS_ENABLE_LOCK_CHECK)
- erts_proc_lc_trylock(proc, need_locks, !busy);
+ erts_proc_lc_trylock(proc, need_locks, !busy, __FILE__,__LINE__);
#endif
#ifdef ERTS_PROC_LOCK_DEBUG
if (!busy)
@@ -1001,8 +1001,8 @@ erts_pid2proc_opt(Process *c_p,
void
erts_proc_lock_init(Process *p)
{
-#if ERTS_PROC_LOCK_OWN_IMPL
int i;
+#if ERTS_PROC_LOCK_OWN_IMPL
/* We always start with all locks locked */
#if ERTS_PROC_LOCK_ATOMIC_IMPL
erts_smp_atomic32_init_nob(&p->lock.flags,
@@ -1013,25 +1013,33 @@ erts_proc_lock_init(Process *p)
for (i = 0; i <= ERTS_PROC_LOCK_MAX_BIT; i++)
p->lock.queue[i] = NULL;
#ifdef ERTS_ENABLE_LOCK_CHECK
- erts_proc_lc_trylock(p, ERTS_PROC_LOCKS_ALL, 1);
+ erts_proc_lc_trylock(p, ERTS_PROC_LOCKS_ALL, 1,__FILE__,__LINE__);
#endif
#elif ERTS_PROC_LOCK_RAW_MUTEX_IMPL
- erts_mtx_init_x(&p->lock.main, "proc_main", p->common.id);
+
+#ifdef ERTS_ENABLE_LOCK_COUNT
+ int do_lock_count = 1;
+#else
+ int do_lock_count = 0;
+#endif
+
+ erts_mtx_init_x(&p->lock.main, "proc_main", p->common.id, do_lock_count);
ethr_mutex_lock(&p->lock.main.mtx);
#ifdef ERTS_ENABLE_LOCK_CHECK
erts_lc_trylock(1, &p->lock.main.lc);
#endif
- erts_mtx_init_x(&p->lock.link, "proc_link", p->common.id);
+ erts_mtx_init_x(&p->lock.link, "proc_link", p->common.id, do_lock_count);
ethr_mutex_lock(&p->lock.link.mtx);
#ifdef ERTS_ENABLE_LOCK_CHECK
erts_lc_trylock(1, &p->lock.link.lc);
#endif
- erts_mtx_init_x(&p->lock.msgq, "proc_msgq", p->common.id);
+ erts_mtx_init_x(&p->lock.msgq, "proc_msgq", p->common.id, do_lock_count);
ethr_mutex_lock(&p->lock.msgq.mtx);
#ifdef ERTS_ENABLE_LOCK_CHECK
erts_lc_trylock(1, &p->lock.msgq.lc);
#endif
- erts_mtx_init_x(&p->lock.status, "proc_status", p->common.id);
+ erts_mtx_init_x(&p->lock.status, "proc_status", p->common.id,
+ do_lock_count);
ethr_mutex_lock(&p->lock.status.mtx);
#ifdef ERTS_ENABLE_LOCK_CHECK
erts_lc_trylock(1, &p->lock.status.lc);
@@ -1210,50 +1218,51 @@ void erts_lcnt_enable_proc_lock_count(int enable)
#if ERTS_PROC_LOCK_OWN_IMPL
void
-erts_proc_lc_lock(Process *p, ErtsProcLocks locks)
+erts_proc_lc_lock(Process *p, ErtsProcLocks locks, char *file, unsigned int line)
{
erts_lc_lock_t lck = ERTS_LC_LOCK_INIT(-1,
p->common.id,
ERTS_LC_FLG_LT_PROCLOCK);
if (locks & ERTS_PROC_LOCK_MAIN) {
lck.id = lc_id.proc_lock_main;
- erts_lc_lock(&lck);
+ erts_lc_lock_x(&lck,file,line);
}
if (locks & ERTS_PROC_LOCK_LINK) {
lck.id = lc_id.proc_lock_link;
- erts_lc_lock(&lck);
+ erts_lc_lock_x(&lck,file,line);
}
if (locks & ERTS_PROC_LOCK_MSGQ) {
lck.id = lc_id.proc_lock_msgq;
- erts_lc_lock(&lck);
+ erts_lc_lock_x(&lck,file,line);
}
if (locks & ERTS_PROC_LOCK_STATUS) {
lck.id = lc_id.proc_lock_status;
- erts_lc_lock(&lck);
+ erts_lc_lock_x(&lck,file,line);
}
}
void
-erts_proc_lc_trylock(Process *p, ErtsProcLocks locks, int locked)
+erts_proc_lc_trylock(Process *p, ErtsProcLocks locks, int locked,
+ char* file, unsigned int line)
{
erts_lc_lock_t lck = ERTS_LC_LOCK_INIT(-1,
p->common.id,
ERTS_LC_FLG_LT_PROCLOCK);
if (locks & ERTS_PROC_LOCK_MAIN) {
lck.id = lc_id.proc_lock_main;
- erts_lc_trylock(locked, &lck);
+ erts_lc_trylock_x(locked, &lck, file, line);
}
if (locks & ERTS_PROC_LOCK_LINK) {
lck.id = lc_id.proc_lock_link;
- erts_lc_trylock(locked, &lck);
+ erts_lc_trylock_x(locked, &lck, file, line);
}
if (locks & ERTS_PROC_LOCK_MSGQ) {
lck.id = lc_id.proc_lock_msgq;
- erts_lc_trylock(locked, &lck);
+ erts_lc_trylock_x(locked, &lck, file, line);
}
if (locks & ERTS_PROC_LOCK_STATUS) {
lck.id = lc_id.proc_lock_status;
- erts_lc_trylock(locked, &lck);
+ erts_lc_trylock_x(locked, &lck, file, line);
}
}
@@ -1319,7 +1328,8 @@ erts_proc_lc_might_unlock(Process *p, ErtsProcLocks locks)
}
void
-erts_proc_lc_require_lock(Process *p, ErtsProcLocks locks)
+erts_proc_lc_require_lock(Process *p, ErtsProcLocks locks, char *file,
+ unsigned int line)
{
#if ERTS_PROC_LOCK_OWN_IMPL
erts_lc_lock_t lck = ERTS_LC_LOCK_INIT(-1,
@@ -1327,29 +1337,29 @@ erts_proc_lc_require_lock(Process *p, ErtsProcLocks locks)
ERTS_LC_FLG_LT_PROCLOCK);
if (locks & ERTS_PROC_LOCK_MAIN) {
lck.id = lc_id.proc_lock_main;
- erts_lc_require_lock(&lck);
+ erts_lc_require_lock(&lck, file, line);
}
if (locks & ERTS_PROC_LOCK_LINK) {
lck.id = lc_id.proc_lock_link;
- erts_lc_require_lock(&lck);
+ erts_lc_require_lock(&lck, file, line);
}
if (locks & ERTS_PROC_LOCK_MSGQ) {
lck.id = lc_id.proc_lock_msgq;
- erts_lc_require_lock(&lck);
+ erts_lc_require_lock(&lck, file, line);
}
if (locks & ERTS_PROC_LOCK_STATUS) {
lck.id = lc_id.proc_lock_status;
- erts_lc_require_lock(&lck);
+ erts_lc_require_lock(&lck, file, line);
}
#elif ERTS_PROC_LOCK_RAW_MUTEX_IMPL
if (locks & ERTS_PROC_LOCK_MAIN)
- erts_lc_require_lock(&p->lock.main.lc);
+ erts_lc_require_lock(&p->lock.main.lc, file, line);
if (locks & ERTS_PROC_LOCK_LINK)
- erts_lc_require_lock(&p->lock.link.lc);
+ erts_lc_require_lock(&p->lock.link.lc, file, line);
if (locks & ERTS_PROC_LOCK_MSGQ)
- erts_lc_require_lock(&p->lock.msgq.lc);
+ erts_lc_require_lock(&p->lock.msgq.lc, file, line);
if (locks & ERTS_PROC_LOCK_STATUS)
- erts_lc_require_lock(&p->lock.status.lc);
+ erts_lc_require_lock(&p->lock.status.lc, file, line);
#endif
}
diff --git a/erts/emulator/beam/erl_process_lock.h b/erts/emulator/beam/erl_process_lock.h
index 9dd503f3cb..052d992d3f 100644
--- a/erts/emulator/beam/erl_process_lock.h
+++ b/erts/emulator/beam/erl_process_lock.h
@@ -215,7 +215,7 @@ typedef struct erts_proc_lock_t_ {
/* Lock counter implemetation */
-#ifdef ERTS_ENABLE_LOCK_COUNT
+#ifdef ERTS_ENABLE_LOCK_POSITION
#define erts_smp_proc_lock__(P,I,L) erts_smp_proc_lock_x__(P,I,L,__FILE__,__LINE__)
#define erts_smp_proc_lock(P,L) erts_smp_proc_lock_x(P,L,__FILE__,__LINE__)
#endif
@@ -243,8 +243,10 @@ void erts_lcnt_enable_proc_lock_count(int enable);
erts_proc_lc_chk_no_proc_locks(__FILE__, __LINE__)
#define ERTS_SMP_CHK_HAVE_ONLY_MAIN_PROC_LOCK(P) \
erts_proc_lc_chk_only_proc_main((P))
-void erts_proc_lc_lock(Process *p, ErtsProcLocks locks);
-void erts_proc_lc_trylock(Process *p, ErtsProcLocks locks, int locked);
+void erts_proc_lc_lock(Process *p, ErtsProcLocks locks,
+ char *file, unsigned int line);
+void erts_proc_lc_trylock(Process *p, ErtsProcLocks locks, int locked,
+ char *file, unsigned int line);
void erts_proc_lc_unlock(Process *p, ErtsProcLocks locks);
void erts_proc_lc_might_unlock(Process *p, ErtsProcLocks locks);
void erts_proc_lc_chk_have_proc_locks(Process *p, ErtsProcLocks locks);
@@ -253,7 +255,8 @@ void erts_proc_lc_chk_only_proc_main(Process *p);
void erts_proc_lc_chk_no_proc_locks(char *file, int line);
ErtsProcLocks erts_proc_lc_my_proc_locks(Process *p);
int erts_proc_lc_trylock_force_busy(Process *p, ErtsProcLocks locks);
-void erts_proc_lc_require_lock(Process *p, ErtsProcLocks locks);
+void erts_proc_lc_require_lock(Process *p, ErtsProcLocks locks,
+ char* file, unsigned int line);
void erts_proc_lc_unrequire_lock(Process *p, ErtsProcLocks locks);
#else
#define ERTS_SMP_CHK_NO_PROC_LOCKS
@@ -372,7 +375,7 @@ ERTS_GLB_INLINE int erts_lc_pix_lock_is_locked(erts_pix_lock_t *);
ERTS_GLB_INLINE ErtsProcLocks erts_smp_proc_raw_trylock__(Process *p,
ErtsProcLocks locks);
-#ifdef ERTS_ENABLE_LOCK_COUNT
+#ifdef ERTS_ENABLE_LOCK_POSITION
ERTS_GLB_INLINE void erts_smp_proc_lock_x__(Process *,
erts_pix_lock_t *,
ErtsProcLocks,
@@ -482,7 +485,7 @@ busy_main:
}
ERTS_GLB_INLINE void
-#ifdef ERTS_ENABLE_LOCK_COUNT
+#ifdef ERTS_ENABLE_LOCK_POSITION
erts_smp_proc_lock_x__(Process *p,
erts_pix_lock_t *pix_lck,
ErtsProcLocks locks,
@@ -528,7 +531,7 @@ erts_smp_proc_lock__(Process *p,
erts_lcnt_proc_lock_post_x(&(p->lock), locks, file, line);
#endif
#ifdef ERTS_ENABLE_LOCK_CHECK
- erts_proc_lc_lock(p, locks);
+ erts_proc_lc_lock(p, locks, file, line);
#endif
#ifdef ERTS_PROC_LOCK_DEBUG
@@ -695,7 +698,7 @@ erts_smp_proc_trylock__(Process *p,
#endif
#ifdef ERTS_ENABLE_LOCK_CHECK
- erts_proc_lc_trylock(p, locks, res == 0);
+ erts_proc_lc_trylock(p, locks, res == 0, __FILE__, __LINE__);
#endif
#if ERTS_PROC_LOCK_ATOMIC_IMPL
@@ -741,7 +744,7 @@ erts_proc_lock_op_debug(Process *p, ErtsProcLocks locks, int locked)
#endif /* ERTS_SMP */
-#ifdef ERTS_ENABLE_LOCK_COUNT
+#ifdef ERTS_ENABLE_LOCK_POSITION
ERTS_GLB_INLINE void erts_smp_proc_lock_x(Process *, ErtsProcLocks, char *file, unsigned int line);
#else
ERTS_GLB_INLINE void erts_smp_proc_lock(Process *, ErtsProcLocks);
@@ -756,13 +759,13 @@ ERTS_GLB_INLINE void erts_smp_proc_add_refc(Process *, Sint32);
#if ERTS_GLB_INLINE_INCL_FUNC_DEF
ERTS_GLB_INLINE void
-#ifdef ERTS_ENABLE_LOCK_COUNT
+#ifdef ERTS_ENABLE_LOCK_POSITION
erts_smp_proc_lock_x(Process *p, ErtsProcLocks locks, char *file, unsigned int line)
#else
erts_smp_proc_lock(Process *p, ErtsProcLocks locks)
#endif
{
-#if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_COUNT)
+#if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_POSITION)
erts_smp_proc_lock_x__(p,
#if ERTS_PROC_LOCK_ATOMIC_IMPL
NULL,
diff --git a/erts/emulator/beam/erl_ptab.c b/erts/emulator/beam/erl_ptab.c
index fa5482b841..eabf016081 100644
--- a/erts/emulator/beam/erl_ptab.c
+++ b/erts/emulator/beam/erl_ptab.c
@@ -756,7 +756,8 @@ erts_ptab_delete_element(ErtsPTab *ptab,
pix = erts_ptab_id2pix(ptab, ptab_el->id);
- ASSERT(erts_get_scheduler_id()); /* *Need* to be a scheduler */
+ /* *Need* to be an managed thread */
+ ERTS_SMP_LC_ASSERT(erts_thr_progress_is_managed_thread());
erts_ptab_rlock(ptab);
maybe_save = ptab->list.data.deleted.end != NULL;
diff --git a/erts/emulator/beam/erl_smp.h b/erts/emulator/beam/erl_smp.h
index ecb5525022..c38ef47d87 100644
--- a/erts/emulator/beam/erl_smp.h
+++ b/erts/emulator/beam/erl_smp.h
@@ -26,10 +26,13 @@
#define ERL_SMP_H
#include "erl_threads.h"
-#ifdef ERTS_ENABLE_LOCK_COUNT
+#ifdef ERTS_ENABLE_LOCK_POSITION
#define erts_smp_mtx_lock(L) erts_smp_mtx_lock_x(L, __FILE__, __LINE__)
+#define erts_smp_mtx_trylock(L) erts_smp_mtx_trylock_x(L, __FILE__, __LINE__)
#define erts_smp_spin_lock(L) erts_smp_spin_lock_x(L, __FILE__, __LINE__)
+#define erts_smp_rwmtx_tryrlock(L) erts_smp_rwmtx_tryrlock_x(L, __FILE__, __LINE__)
#define erts_smp_rwmtx_rlock(L) erts_smp_rwmtx_rlock_x(L, __FILE__, __LINE__)
+#define erts_smp_rwmtx_tryrwlock(L) erts_smp_rwmtx_tryrwlock_x(L, __FILE__, __LINE__)
#define erts_smp_rwmtx_rwlock(L) erts_smp_rwmtx_rwlock_x(L, __FILE__, __LINE__)
#define erts_smp_read_lock(L) erts_smp_read_lock_x(L, __FILE__, __LINE__)
#define erts_smp_write_lock(L) erts_smp_write_lock_x(L, __FILE__, __LINE__)
@@ -131,10 +134,11 @@ ERTS_GLB_INLINE void erts_smp_mtx_init_locked_x(erts_smp_mtx_t *mtx,
ERTS_GLB_INLINE void erts_smp_mtx_init(erts_smp_mtx_t *mtx, char *name);
ERTS_GLB_INLINE void erts_smp_mtx_init_locked(erts_smp_mtx_t *mtx, char *name);
ERTS_GLB_INLINE void erts_smp_mtx_destroy(erts_smp_mtx_t *mtx);
-ERTS_GLB_INLINE int erts_smp_mtx_trylock(erts_smp_mtx_t *mtx);
-#ifdef ERTS_ENABLE_LOCK_COUNT
-ERTS_GLB_INLINE void erts_smp_mtx_lock_x(erts_smp_mtx_t *mtx, char *file, int line);
+#ifdef ERTS_ENABLE_LOCK_POSITION
+ERTS_GLB_INLINE int erts_smp_mtx_trylock_x(erts_smp_mtx_t *mtx, char *file, unsigned int line);
+ERTS_GLB_INLINE void erts_smp_mtx_lock_x(erts_smp_mtx_t *mtx, char *file, unsigned int line);
#else
+ERTS_GLB_INLINE int erts_smp_mtx_trylock(erts_smp_mtx_t *mtx);
ERTS_GLB_INLINE void erts_smp_mtx_lock(erts_smp_mtx_t *mtx);
#endif
ERTS_GLB_INLINE void erts_smp_mtx_unlock(erts_smp_mtx_t *mtx);
@@ -159,16 +163,18 @@ ERTS_GLB_INLINE void erts_smp_rwmtx_init_opt(erts_smp_rwmtx_t *rwmtx,
ERTS_GLB_INLINE void erts_smp_rwmtx_init(erts_smp_rwmtx_t *rwmtx,
char *name);
ERTS_GLB_INLINE void erts_smp_rwmtx_destroy(erts_smp_rwmtx_t *rwmtx);
-ERTS_GLB_INLINE int erts_smp_rwmtx_tryrlock(erts_smp_rwmtx_t *rwmtx);
-#ifdef ERTS_ENABLE_LOCK_COUNT
+#ifdef ERTS_ENABLE_LOCK_POSITION
+ERTS_GLB_INLINE int erts_smp_rwmtx_tryrlock_x(erts_smp_rwmtx_t *rwmtx, char *file, unsigned int line);
ERTS_GLB_INLINE void erts_smp_rwmtx_rlock_x(erts_smp_rwmtx_t *rwmtx, char *file, unsigned int line);
ERTS_GLB_INLINE void erts_smp_rwmtx_rwlock_x(erts_smp_rwmtx_t *rwmtx, char *file, unsigned int line);
+ERTS_GLB_INLINE int erts_smp_rwmtx_tryrwlock_x(erts_smp_rwmtx_t *rwmtx, char *file, unsigned int line);
#else
+ERTS_GLB_INLINE int erts_smp_rwmtx_tryrlock(erts_smp_rwmtx_t *rwmtx);
ERTS_GLB_INLINE void erts_smp_rwmtx_rlock(erts_smp_rwmtx_t *rwmtx);
ERTS_GLB_INLINE void erts_smp_rwmtx_rwlock(erts_smp_rwmtx_t *rwmtx);
+ERTS_GLB_INLINE int erts_smp_rwmtx_tryrwlock(erts_smp_rwmtx_t *rwmtx);
#endif
ERTS_GLB_INLINE void erts_smp_rwmtx_runlock(erts_smp_rwmtx_t *rwmtx);
-ERTS_GLB_INLINE int erts_smp_rwmtx_tryrwlock(erts_smp_rwmtx_t *rwmtx);
ERTS_GLB_INLINE void erts_smp_rwmtx_rwunlock(erts_smp_rwmtx_t *rwmtx);
ERTS_GLB_INLINE int erts_smp_lc_rwmtx_is_rlocked(erts_smp_rwmtx_t *mtx);
ERTS_GLB_INLINE int erts_smp_lc_rwmtx_is_rwlocked(erts_smp_rwmtx_t *mtx);
@@ -179,7 +185,7 @@ ERTS_GLB_INLINE void erts_smp_spinlock_init(erts_smp_spinlock_t *lock,
char *name);
ERTS_GLB_INLINE void erts_smp_spinlock_destroy(erts_smp_spinlock_t *lock);
ERTS_GLB_INLINE void erts_smp_spin_unlock(erts_smp_spinlock_t *lock);
-#ifdef ERTS_ENABLE_LOCK_COUNT
+#ifdef ERTS_ENABLE_LOCK_POSITION
ERTS_GLB_INLINE void erts_smp_spin_lock_x(erts_smp_spinlock_t *lock, char *file, unsigned int line);
#else
ERTS_GLB_INLINE void erts_smp_spin_lock(erts_smp_spinlock_t *lock);
@@ -192,7 +198,7 @@ ERTS_GLB_INLINE void erts_smp_rwlock_init(erts_smp_rwlock_t *lock,
char *name);
ERTS_GLB_INLINE void erts_smp_rwlock_destroy(erts_smp_rwlock_t *lock);
ERTS_GLB_INLINE void erts_smp_read_unlock(erts_smp_rwlock_t *lock);
-#ifdef ERTS_ENABLE_LOCK_COUNT
+#ifdef ERTS_ENABLE_LOCK_POSITION
ERTS_GLB_INLINE void erts_smp_read_lock_x(erts_smp_rwlock_t *lock, char *file, unsigned int line);
ERTS_GLB_INLINE void erts_smp_write_lock_x(erts_smp_rwlock_t *lock, char *file, unsigned int line);
#else
@@ -202,7 +208,8 @@ ERTS_GLB_INLINE void erts_smp_write_lock(erts_smp_rwlock_t *lock);
ERTS_GLB_INLINE void erts_smp_write_unlock(erts_smp_rwlock_t *lock);
ERTS_GLB_INLINE int erts_smp_lc_rwlock_is_rlocked(erts_smp_rwlock_t *lock);
ERTS_GLB_INLINE int erts_smp_lc_rwlock_is_rwlocked(erts_smp_rwlock_t *lock);
-ERTS_GLB_INLINE void erts_smp_tsd_key_create(erts_smp_tsd_key_t *keyp);
+ERTS_GLB_INLINE void erts_smp_tsd_key_create(erts_smp_tsd_key_t *keyp,
+ char *keyname);
ERTS_GLB_INLINE void erts_smp_tsd_key_delete(erts_smp_tsd_key_t key);
ERTS_GLB_INLINE void erts_smp_tsd_set(erts_smp_tsd_key_t key, void *value);
ERTS_GLB_INLINE void * erts_smp_tsd_get(erts_smp_tsd_key_t key);
@@ -835,7 +842,7 @@ ERTS_GLB_INLINE void
erts_smp_mtx_init_x(erts_smp_mtx_t *mtx, char *name, Eterm extra)
{
#ifdef ERTS_SMP
- erts_mtx_init_x(mtx, name, extra);
+ erts_mtx_init_x(mtx, name, extra, 1);
#endif
}
@@ -843,7 +850,7 @@ ERTS_GLB_INLINE void
erts_smp_mtx_init_locked_x(erts_smp_mtx_t *mtx, char *name, Eterm extra)
{
#ifdef ERTS_SMP
- erts_mtx_init_locked_x(mtx, name, extra);
+ erts_mtx_init_locked_x(mtx, name, extra, 1);
#endif
}
@@ -872,9 +879,15 @@ erts_smp_mtx_destroy(erts_smp_mtx_t *mtx)
}
ERTS_GLB_INLINE int
+#ifdef ERTS_ENABLE_LOCK_POSITION
+erts_smp_mtx_trylock_x(erts_smp_mtx_t *mtx, char *file, unsigned int line)
+#else
erts_smp_mtx_trylock(erts_smp_mtx_t *mtx)
+#endif
{
-#ifdef ERTS_SMP
+#if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_POSITION)
+ return erts_mtx_trylock_x(mtx,file,line);
+#elif defined(ERTS_SMP)
return erts_mtx_trylock(mtx);
#else
return 0;
@@ -884,13 +897,13 @@ erts_smp_mtx_trylock(erts_smp_mtx_t *mtx)
ERTS_GLB_INLINE void
-#ifdef ERTS_ENABLE_LOCK_COUNT
-erts_smp_mtx_lock_x(erts_smp_mtx_t *mtx, char *file, int line)
+#ifdef ERTS_ENABLE_LOCK_POSITION
+erts_smp_mtx_lock_x(erts_smp_mtx_t *mtx, char *file, unsigned int line)
#else
erts_smp_mtx_lock(erts_smp_mtx_t *mtx)
#endif
{
-#if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_COUNT)
+#if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_POSITION)
erts_mtx_lock_x(mtx, file, line);
#elif defined(ERTS_SMP)
erts_mtx_lock(mtx);
@@ -1020,9 +1033,15 @@ erts_smp_rwmtx_destroy(erts_smp_rwmtx_t *rwmtx)
}
ERTS_GLB_INLINE int
+#ifdef ERTS_ENABLE_LOCK_POSITION
+erts_smp_rwmtx_tryrlock_x(erts_smp_rwmtx_t *rwmtx, char *file, unsigned int line)
+#else
erts_smp_rwmtx_tryrlock(erts_smp_rwmtx_t *rwmtx)
+#endif
{
-#ifdef ERTS_SMP
+#if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_POSITION)
+ return erts_rwmtx_tryrlock_x(rwmtx, file, line);
+#elif defined(ERTS_SMP)
return erts_rwmtx_tryrlock(rwmtx);
#else
return 0;
@@ -1030,13 +1049,13 @@ erts_smp_rwmtx_tryrlock(erts_smp_rwmtx_t *rwmtx)
}
ERTS_GLB_INLINE void
-#ifdef ERTS_ENABLE_LOCK_COUNT
+#ifdef ERTS_ENABLE_LOCK_POSITION
erts_smp_rwmtx_rlock_x(erts_smp_rwmtx_t *rwmtx, char *file, unsigned int line)
#else
erts_smp_rwmtx_rlock(erts_smp_rwmtx_t *rwmtx)
#endif
{
-#if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_COUNT)
+#if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_POSITION)
erts_rwmtx_rlock_x(rwmtx, file, line);
#elif defined(ERTS_SMP)
erts_rwmtx_rlock(rwmtx);
@@ -1053,9 +1072,15 @@ erts_smp_rwmtx_runlock(erts_smp_rwmtx_t *rwmtx)
ERTS_GLB_INLINE int
+#ifdef ERTS_ENABLE_LOCK_POSITION
+erts_smp_rwmtx_tryrwlock_x(erts_smp_rwmtx_t *rwmtx, char *file, unsigned int line)
+#else
erts_smp_rwmtx_tryrwlock(erts_smp_rwmtx_t *rwmtx)
+#endif
{
-#ifdef ERTS_SMP
+#if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_POSITION)
+ return erts_rwmtx_tryrwlock_x(rwmtx, file, line);
+#elif defined(ERTS_SMP)
return erts_rwmtx_tryrwlock(rwmtx);
#else
return 0;
@@ -1063,13 +1088,13 @@ erts_smp_rwmtx_tryrwlock(erts_smp_rwmtx_t *rwmtx)
}
ERTS_GLB_INLINE void
-#ifdef ERTS_ENABLE_LOCK_COUNT
+#ifdef ERTS_ENABLE_LOCK_POSITION
erts_smp_rwmtx_rwlock_x(erts_smp_rwmtx_t *rwmtx, char *file, unsigned int line)
#else
erts_smp_rwmtx_rwlock(erts_smp_rwmtx_t *rwmtx)
#endif
{
-#if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_COUNT)
+#if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_POSITION)
erts_rwmtx_rwlock_x(rwmtx, file, line);
#elif defined(ERTS_SMP)
erts_rwmtx_rwlock(rwmtx);
@@ -1171,13 +1196,13 @@ erts_smp_spin_unlock(erts_smp_spinlock_t *lock)
}
ERTS_GLB_INLINE void
-#ifdef ERTS_ENABLE_LOCK_COUNT
+#ifdef ERTS_ENABLE_LOCK_POSITION
erts_smp_spin_lock_x(erts_smp_spinlock_t *lock, char *file, unsigned int line)
#else
erts_smp_spin_lock(erts_smp_spinlock_t *lock)
#endif
{
-#if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_COUNT)
+#if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_POSITION)
erts_spin_lock_x(lock, file, line);
#elif defined(ERTS_SMP)
erts_spin_lock(lock);
@@ -1237,13 +1262,13 @@ erts_smp_read_unlock(erts_smp_rwlock_t *lock)
}
ERTS_GLB_INLINE void
-#ifdef ERTS_ENABLE_LOCK_COUNT
+#ifdef ERTS_ENABLE_LOCK_POSITION
erts_smp_read_lock_x(erts_smp_rwlock_t *lock, char *file, unsigned int line)
#else
erts_smp_read_lock(erts_smp_rwlock_t *lock)
#endif
{
-#if defined(ERTS_ENABLE_LOCK_COUNT) && defined(ERTS_SMP)
+#if defined(ERTS_ENABLE_LOCK_POSITION) && defined(ERTS_SMP)
erts_read_lock_x(lock, file, line);
#elif defined(ERTS_SMP)
erts_read_lock(lock);
@@ -1263,13 +1288,13 @@ erts_smp_write_unlock(erts_smp_rwlock_t *lock)
}
ERTS_GLB_INLINE void
-#ifdef ERTS_ENABLE_LOCK_COUNT
+#ifdef ERTS_ENABLE_LOCK_POSITION
erts_smp_write_lock_x(erts_smp_rwlock_t *lock, char *file, unsigned int line)
#else
erts_smp_write_lock(erts_smp_rwlock_t *lock)
#endif
{
-#if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_COUNT)
+#if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_POSITION)
erts_write_lock_x(lock, file, line);
#elif defined(ERTS_SMP)
erts_write_lock(lock);
@@ -1299,10 +1324,10 @@ erts_smp_lc_rwlock_is_rwlocked(erts_smp_rwlock_t *lock)
}
ERTS_GLB_INLINE void
-erts_smp_tsd_key_create(erts_smp_tsd_key_t *keyp)
+erts_smp_tsd_key_create(erts_smp_tsd_key_t *keyp, char* keyname)
{
#ifdef ERTS_SMP
- erts_tsd_key_create(keyp);
+ erts_tsd_key_create(keyp,keyname);
#endif
}
diff --git a/erts/emulator/beam/erl_thr_progress.c b/erts/emulator/beam/erl_thr_progress.c
index cf5e3dc012..545a0343d0 100644
--- a/erts/emulator/beam/erl_thr_progress.c
+++ b/erts/emulator/beam/erl_thr_progress.c
@@ -417,7 +417,8 @@ void
erts_thr_progress_pre_init(void)
{
intrnl = NULL;
- erts_tsd_key_create(&erts_thr_prgr_data_key__);
+ erts_tsd_key_create(&erts_thr_prgr_data_key__,
+ "erts_thr_prgr_data_key");
init_nob(&erts_thr_prgr__.current, ERTS_THR_PRGR_VAL_FIRST);
}
diff --git a/erts/emulator/beam/erl_threads.h b/erts/emulator/beam/erl_threads.h
index 759c8f4c33..80026104db 100644
--- a/erts/emulator/beam/erl_threads.h
+++ b/erts/emulator/beam/erl_threads.h
@@ -281,10 +281,13 @@
#define ERTS_THR_READ_MEMORY_BARRIER ETHR_READ_MEMORY_BARRIER
#define ERTS_THR_DATA_DEPENDENCY_READ_MEMORY_BARRIER ETHR_READ_DEPEND_MEMORY_BARRIER
-#ifdef ERTS_ENABLE_LOCK_COUNT
+#ifdef ERTS_ENABLE_LOCK_POSITION
#define erts_mtx_lock(L) erts_mtx_lock_x(L, __FILE__, __LINE__)
+#define erts_mtx_trylock(L) erts_mtx_trylock_x(L, __FILE__, __LINE__)
#define erts_spin_lock(L) erts_spin_lock_x(L, __FILE__, __LINE__)
+#define erts_rwmtx_tryrlock(L) erts_rwmtx_tryrlock_x(L, __FILE__, __LINE__)
#define erts_rwmtx_rlock(L) erts_rwmtx_rlock_x(L, __FILE__, __LINE__)
+#define erts_rwmtx_tryrwlock(L) erts_rwmtx_tryrwlock_x(L, __FILE__, __LINE__)
#define erts_rwmtx_rwlock(L) erts_rwmtx_rwlock_x(L, __FILE__, __LINE__)
#define erts_read_lock(L) erts_read_lock_x(L, __FILE__, __LINE__)
#define erts_write_lock(L) erts_write_lock_x(L, __FILE__, __LINE__)
@@ -461,18 +464,24 @@ ERTS_GLB_INLINE void erts_thr_exit(void *res);
ERTS_GLB_INLINE void erts_thr_install_exit_handler(void (*exit_handler)(void));
ERTS_GLB_INLINE erts_tid_t erts_thr_self(void);
ERTS_GLB_INLINE int erts_equal_tids(erts_tid_t x, erts_tid_t y);
-ERTS_GLB_INLINE void erts_mtx_init_x(erts_mtx_t *mtx, char *name, Eterm extra);
-ERTS_GLB_INLINE void erts_mtx_init_x_opt(erts_mtx_t *mtx, char *name, Eterm extra, Uint16 opt);
+ERTS_GLB_INLINE void erts_mtx_init_x(erts_mtx_t *mtx, char *name, Eterm extra,
+ int enable_lcnt);
+ERTS_GLB_INLINE void erts_mtx_init_x_opt(erts_mtx_t *mtx, char *name, Eterm extra,
+ Uint16 opt, int enable_lcnt);
ERTS_GLB_INLINE void erts_mtx_init_locked_x(erts_mtx_t *mtx,
char *name,
- Eterm extra);
+ Eterm extra,
+ int enable_lcnt);
ERTS_GLB_INLINE void erts_mtx_init(erts_mtx_t *mtx, char *name);
ERTS_GLB_INLINE void erts_mtx_init_locked(erts_mtx_t *mtx, char *name);
ERTS_GLB_INLINE void erts_mtx_destroy(erts_mtx_t *mtx);
-ERTS_GLB_INLINE int erts_mtx_trylock(erts_mtx_t *mtx);
-#ifdef ERTS_ENABLE_LOCK_COUNT
-ERTS_GLB_INLINE void erts_mtx_lock_x(erts_mtx_t *mtx, char *file, unsigned int line);
+#ifdef ERTS_ENABLE_LOCK_POSITION
+ERTS_GLB_INLINE int erts_mtx_trylock_x(erts_mtx_t *mtx, char *file,
+ unsigned int line);
+ERTS_GLB_INLINE void erts_mtx_lock_x(erts_mtx_t *mtx, char *file,
+ unsigned int line);
#else
+ERTS_GLB_INLINE int erts_mtx_trylock(erts_mtx_t *mtx);
ERTS_GLB_INLINE void erts_mtx_lock(erts_mtx_t *mtx);
#endif
ERTS_GLB_INLINE void erts_mtx_unlock(erts_mtx_t *mtx);
@@ -496,16 +505,18 @@ ERTS_GLB_INLINE void erts_rwmtx_init_opt(erts_rwmtx_t *rwmtx,
ERTS_GLB_INLINE void erts_rwmtx_init(erts_rwmtx_t *rwmtx,
char *name);
ERTS_GLB_INLINE void erts_rwmtx_destroy(erts_rwmtx_t *rwmtx);
-ERTS_GLB_INLINE int erts_rwmtx_tryrlock(erts_rwmtx_t *rwmtx);
-#ifdef ERTS_ENABLE_LOCK_COUNT
+#ifdef ERTS_ENABLE_LOCK_POSITION
+ERTS_GLB_INLINE int erts_rwmtx_tryrlock_x(erts_rwmtx_t *rwmtx, char *file, unsigned int line);
ERTS_GLB_INLINE void erts_rwmtx_rlock_x(erts_rwmtx_t *rwmtx, char *file, unsigned int line);
ERTS_GLB_INLINE void erts_rwmtx_rwlock_x(erts_rwmtx_t *rwmtx, char *file, unsigned int line);
+ERTS_GLB_INLINE int erts_rwmtx_tryrwlock_x(erts_rwmtx_t *rwmtx, char *file, unsigned int line);
#else
+ERTS_GLB_INLINE int erts_rwmtx_tryrlock(erts_rwmtx_t *rwmtx);
ERTS_GLB_INLINE void erts_rwmtx_rlock(erts_rwmtx_t *rwmtx);
ERTS_GLB_INLINE void erts_rwmtx_rwlock(erts_rwmtx_t *rwmtx);
+ERTS_GLB_INLINE int erts_rwmtx_tryrwlock(erts_rwmtx_t *rwmtx);
#endif
ERTS_GLB_INLINE void erts_rwmtx_runlock(erts_rwmtx_t *rwmtx);
-ERTS_GLB_INLINE int erts_rwmtx_tryrwlock(erts_rwmtx_t *rwmtx);
ERTS_GLB_INLINE void erts_rwmtx_rwunlock(erts_rwmtx_t *rwmtx);
ERTS_GLB_INLINE int erts_lc_rwmtx_is_rlocked(erts_rwmtx_t *mtx);
ERTS_GLB_INLINE int erts_lc_rwmtx_is_rwlocked(erts_rwmtx_t *mtx);
@@ -571,7 +582,7 @@ ERTS_GLB_INLINE void erts_spinlock_init(erts_spinlock_t *lock,
char *name);
ERTS_GLB_INLINE void erts_spinlock_destroy(erts_spinlock_t *lock);
ERTS_GLB_INLINE void erts_spin_unlock(erts_spinlock_t *lock);
-#ifdef ERTS_ENABLE_LOCK_COUNT
+#ifdef ERTS_ENABLE_LOCK_POSITION
ERTS_GLB_INLINE void erts_spin_lock_x(erts_spinlock_t *lock, char *file, unsigned int line);
#else
ERTS_GLB_INLINE void erts_spin_lock(erts_spinlock_t *lock);
@@ -584,7 +595,7 @@ ERTS_GLB_INLINE void erts_rwlock_init(erts_rwlock_t *lock,
char *name);
ERTS_GLB_INLINE void erts_rwlock_destroy(erts_rwlock_t *lock);
ERTS_GLB_INLINE void erts_read_unlock(erts_rwlock_t *lock);
-#ifdef ERTS_ENABLE_LOCK_COUNT
+#ifdef ERTS_ENABLE_LOCK_POSITION
ERTS_GLB_INLINE void erts_read_lock_x(erts_rwlock_t *lock, char *file, unsigned int line);
ERTS_GLB_INLINE void erts_write_lock_x(erts_rwlock_t *lock, char *file, unsigned int line);
#else
@@ -594,7 +605,7 @@ ERTS_GLB_INLINE void erts_write_lock(erts_rwlock_t *lock);
ERTS_GLB_INLINE void erts_write_unlock(erts_rwlock_t *lock);
ERTS_GLB_INLINE int erts_lc_rwlock_is_rlocked(erts_rwlock_t *lock);
ERTS_GLB_INLINE int erts_lc_rwlock_is_rwlocked(erts_rwlock_t *lock);
-ERTS_GLB_INLINE void erts_tsd_key_create(erts_tsd_key_t *keyp);
+ERTS_GLB_INLINE void erts_tsd_key_create(erts_tsd_key_t *keyp, char *keyname);
ERTS_GLB_INLINE void erts_tsd_key_delete(erts_tsd_key_t key);
ERTS_GLB_INLINE void erts_tsd_set(erts_tsd_key_t key, void *value);
ERTS_GLB_INLINE void * erts_tsd_get(erts_tsd_key_t key);
@@ -1549,7 +1560,7 @@ erts_equal_tids(erts_tid_t x, erts_tid_t y)
}
ERTS_GLB_INLINE void
-erts_mtx_init_x(erts_mtx_t *mtx, char *name, Eterm extra)
+erts_mtx_init_x(erts_mtx_t *mtx, char *name, Eterm extra, int enable_lcnt)
{
#ifdef USE_THREADS
int res = ethr_mutex_init(&mtx->mtx);
@@ -1559,13 +1570,17 @@ erts_mtx_init_x(erts_mtx_t *mtx, char *name, Eterm extra)
erts_lc_init_lock_x(&mtx->lc, name, ERTS_LC_FLG_LT_MUTEX, extra);
#endif
#ifdef ERTS_ENABLE_LOCK_COUNT
- erts_lcnt_init_lock_x(&mtx->lcnt, name, ERTS_LCNT_LT_MUTEX, extra);
+ if (enable_lcnt)
+ erts_lcnt_init_lock_x(&mtx->lcnt, name, ERTS_LCNT_LT_MUTEX, extra);
+ else
+ erts_lcnt_init_lock_x(&mtx->lcnt, NULL, ERTS_LCNT_LT_MUTEX, extra);
#endif
#endif
}
ERTS_GLB_INLINE void
-erts_mtx_init_x_opt(erts_mtx_t *mtx, char *name, Eterm extra, Uint16 opt)
+erts_mtx_init_x_opt(erts_mtx_t *mtx, char *name, Eterm extra, Uint16 opt,
+ int enable_lcnt)
{
#ifdef USE_THREADS
int res = ethr_mutex_init(&mtx->mtx);
@@ -1575,14 +1590,17 @@ erts_mtx_init_x_opt(erts_mtx_t *mtx, char *name, Eterm extra, Uint16 opt)
erts_lc_init_lock_x(&mtx->lc, name, ERTS_LC_FLG_LT_MUTEX, extra);
#endif
#ifdef ERTS_ENABLE_LOCK_COUNT
- erts_lcnt_init_lock_x(&mtx->lcnt, name, ERTS_LCNT_LT_MUTEX | opt, extra);
+ if (enable_lcnt)
+ erts_lcnt_init_lock_x(&mtx->lcnt, name, ERTS_LCNT_LT_MUTEX | opt, extra);
+ else
+ erts_lcnt_init_lock_x(&mtx->lcnt, NULL, ERTS_LCNT_LT_MUTEX | opt, extra);
#endif
#endif
}
ERTS_GLB_INLINE void
-erts_mtx_init_locked_x(erts_mtx_t *mtx, char *name, Eterm extra)
+erts_mtx_init_locked_x(erts_mtx_t *mtx, char *name, Eterm extra, int enable_lcnt)
{
#ifdef USE_THREADS
int res = ethr_mutex_init(&mtx->mtx);
@@ -1592,7 +1610,10 @@ erts_mtx_init_locked_x(erts_mtx_t *mtx, char *name, Eterm extra)
erts_lc_init_lock_x(&mtx->lc, name, ERTS_LC_FLG_LT_MUTEX, extra);
#endif
#ifdef ERTS_ENABLE_LOCK_COUNT
- erts_lcnt_init_lock_x(&mtx->lcnt, name, ERTS_LCNT_LT_MUTEX, extra);
+ if (enable_lcnt)
+ erts_lcnt_init_lock_x(&mtx->lcnt, name, ERTS_LCNT_LT_MUTEX, extra);
+ else
+ erts_lcnt_init_lock_x(&mtx->lcnt, NULL, ERTS_LCNT_LT_MUTEX, extra);
#endif
ethr_mutex_lock(&mtx->mtx);
#ifdef ERTS_ENABLE_LOCK_CHECK
@@ -1670,7 +1691,11 @@ erts_mtx_destroy(erts_mtx_t *mtx)
}
ERTS_GLB_INLINE int
+#ifdef ERTS_ENABLE_LOCK_POSITION
+erts_mtx_trylock_x(erts_mtx_t *mtx, char *file, unsigned int line)
+#else
erts_mtx_trylock(erts_mtx_t *mtx)
+#endif
{
#ifdef USE_THREADS
int res;
@@ -1684,8 +1709,12 @@ erts_mtx_trylock(erts_mtx_t *mtx)
res = ethr_mutex_trylock(&mtx->mtx);
#ifdef ERTS_ENABLE_LOCK_CHECK
+#ifdef ERTS_ENABLE_LOCK_POSITION
+ erts_lc_trylock_x(res == 0, &mtx->lc,file,line);
+#else
erts_lc_trylock(res == 0, &mtx->lc);
#endif
+#endif
#ifdef ERTS_ENABLE_LOCK_COUNT
erts_lcnt_trylock(&mtx->lcnt, res);
#endif
@@ -1697,7 +1726,7 @@ erts_mtx_trylock(erts_mtx_t *mtx)
}
ERTS_GLB_INLINE void
-#ifdef ERTS_ENABLE_LOCK_COUNT
+#ifdef ERTS_ENABLE_LOCK_POSITION
erts_mtx_lock_x(erts_mtx_t *mtx, char *file, unsigned int line)
#else
erts_mtx_lock(erts_mtx_t *mtx)
@@ -1705,8 +1734,12 @@ erts_mtx_lock(erts_mtx_t *mtx)
{
#ifdef USE_THREADS
#ifdef ERTS_ENABLE_LOCK_CHECK
+#ifdef ERTS_ENABLE_LOCK_POSITION
+ erts_lc_lock_x(&mtx->lc, file, line);
+#else
erts_lc_lock(&mtx->lc);
#endif
+#endif
#ifdef ERTS_ENABLE_LOCK_COUNT
erts_lcnt_lock(&mtx->lcnt);
#endif
@@ -1857,7 +1890,10 @@ erts_rwmtx_init_opt_x(erts_rwmtx_t *rwmtx,
erts_lc_init_lock_x(&rwmtx->lc, name, ERTS_LC_FLG_LT_RWMUTEX, extra);
#endif
#ifdef ERTS_ENABLE_LOCK_COUNT
- erts_lcnt_init_lock_x(&rwmtx->lcnt, name, ERTS_LCNT_LT_RWMUTEX, extra);
+ if (name && name[0] == '\0')
+ erts_lcnt_init_lock_x(&rwmtx->lcnt, NULL, ERTS_LCNT_LT_RWMUTEX, extra);
+ else
+ erts_lcnt_init_lock_x(&rwmtx->lcnt, name, ERTS_LCNT_LT_RWMUTEX, extra);
#endif
#endif
}
@@ -1921,7 +1957,11 @@ erts_rwmtx_destroy(erts_rwmtx_t *rwmtx)
}
ERTS_GLB_INLINE int
+#ifdef ERTS_ENABLE_LOCK_POSITION
+erts_rwmtx_tryrlock_x(erts_rwmtx_t *rwmtx, char *file, unsigned int line)
+#else
erts_rwmtx_tryrlock(erts_rwmtx_t *rwmtx)
+#endif
{
#ifdef USE_THREADS
int res;
@@ -1935,8 +1975,12 @@ erts_rwmtx_tryrlock(erts_rwmtx_t *rwmtx)
res = ethr_rwmutex_tryrlock(&rwmtx->rwmtx);
#ifdef ERTS_ENABLE_LOCK_CHECK
+#ifdef ERTS_ENABLE_LOCK_POSITION
+ erts_lc_trylock_flg_x(res == 0, &rwmtx->lc, ERTS_LC_FLG_LO_READ,file,line);
+#else
erts_lc_trylock_flg(res == 0, &rwmtx->lc, ERTS_LC_FLG_LO_READ);
#endif
+#endif
#ifdef ERTS_ENABLE_LOCK_COUNT
erts_lcnt_trylock_opt(&rwmtx->lcnt, res, ERTS_LCNT_LO_READ);
#endif
@@ -1948,7 +1992,7 @@ erts_rwmtx_tryrlock(erts_rwmtx_t *rwmtx)
}
ERTS_GLB_INLINE void
-#ifdef ERTS_ENABLE_LOCK_COUNT
+#ifdef ERTS_ENABLE_LOCK_POSITION
erts_rwmtx_rlock_x(erts_rwmtx_t *rwmtx, char *file, unsigned int line)
#else
erts_rwmtx_rlock(erts_rwmtx_t *rwmtx)
@@ -1956,8 +2000,12 @@ erts_rwmtx_rlock(erts_rwmtx_t *rwmtx)
{
#ifdef USE_THREADS
#ifdef ERTS_ENABLE_LOCK_CHECK
+#ifdef ERTS_ENABLE_LOCK_POSITION
+ erts_lc_lock_flg_x(&rwmtx->lc, ERTS_LC_FLG_LO_READ,file,line);
+#else
erts_lc_lock_flg(&rwmtx->lc, ERTS_LC_FLG_LO_READ);
#endif
+#endif
#ifdef ERTS_ENABLE_LOCK_COUNT
erts_lcnt_lock_opt(&rwmtx->lcnt, ERTS_LCNT_LO_READ);
#endif
@@ -1984,7 +2032,11 @@ erts_rwmtx_runlock(erts_rwmtx_t *rwmtx)
ERTS_GLB_INLINE int
+#ifdef ERTS_ENABLE_LOCK_POSITION
+erts_rwmtx_tryrwlock_x(erts_rwmtx_t *rwmtx, char *file, unsigned int line)
+#else
erts_rwmtx_tryrwlock(erts_rwmtx_t *rwmtx)
+#endif
{
#ifdef USE_THREADS
int res;
@@ -1998,8 +2050,12 @@ erts_rwmtx_tryrwlock(erts_rwmtx_t *rwmtx)
res = ethr_rwmutex_tryrwlock(&rwmtx->rwmtx);
#ifdef ERTS_ENABLE_LOCK_CHECK
+#ifdef ERTS_ENABLE_LOCK_POSITION
+ erts_lc_trylock_flg_x(res == 0, &rwmtx->lc, ERTS_LC_FLG_LO_READ_WRITE,file,line);
+#else
erts_lc_trylock_flg(res == 0, &rwmtx->lc, ERTS_LC_FLG_LO_READ_WRITE);
#endif
+#endif
#ifdef ERTS_ENABLE_LOCK_COUNT
erts_lcnt_trylock_opt(&rwmtx->lcnt, res, ERTS_LCNT_LO_READ_WRITE);
#endif
@@ -2011,7 +2067,7 @@ erts_rwmtx_tryrwlock(erts_rwmtx_t *rwmtx)
}
ERTS_GLB_INLINE void
-#ifdef ERTS_ENABLE_LOCK_COUNT
+#ifdef ERTS_ENABLE_LOCK_POSITION
erts_rwmtx_rwlock_x(erts_rwmtx_t *rwmtx, char *file, unsigned int line)
#else
erts_rwmtx_rwlock(erts_rwmtx_t *rwmtx)
@@ -2019,8 +2075,12 @@ erts_rwmtx_rwlock(erts_rwmtx_t *rwmtx)
{
#ifdef USE_THREADS
#ifdef ERTS_ENABLE_LOCK_CHECK
+#ifdef ERTS_ENABLE_LOCK_POSITION
+ erts_lc_lock_flg_x(&rwmtx->lc, ERTS_LC_FLG_LO_READ_WRITE,file,line);
+#else
erts_lc_lock_flg(&rwmtx->lc, ERTS_LC_FLG_LO_READ_WRITE);
#endif
+#endif
#ifdef ERTS_ENABLE_LOCK_COUNT
erts_lcnt_lock_opt(&rwmtx->lcnt, ERTS_LCNT_LO_READ_WRITE);
#endif
@@ -2426,7 +2486,7 @@ erts_spin_unlock(erts_spinlock_t *lock)
}
ERTS_GLB_INLINE void
-#ifdef ERTS_ENABLE_LOCK_COUNT
+#ifdef ERTS_ENABLE_LOCK_POSITION
erts_spin_lock_x(erts_spinlock_t *lock, char *file, unsigned int line)
#else
erts_spin_lock(erts_spinlock_t *lock)
@@ -2434,8 +2494,12 @@ erts_spin_lock(erts_spinlock_t *lock)
{
#ifdef USE_THREADS
#ifdef ERTS_ENABLE_LOCK_CHECK
+#ifdef ERTS_ENABLE_LOCK_POSITION
+ erts_lc_lock_x(&lock->lc,file,line);
+#else
erts_lc_lock(&lock->lc);
#endif
+#endif
#ifdef ERTS_ENABLE_LOCK_COUNT
erts_lcnt_lock(&lock->lcnt);
#endif
@@ -2545,7 +2609,7 @@ erts_read_unlock(erts_rwlock_t *lock)
}
ERTS_GLB_INLINE void
-#ifdef ERTS_ENABLE_LOCK_COUNT
+#ifdef ERTS_ENABLE_LOCK_POSITION
erts_read_lock_x(erts_rwlock_t *lock, char *file, unsigned int line)
#else
erts_read_lock(erts_rwlock_t *lock)
@@ -2553,8 +2617,12 @@ erts_read_lock(erts_rwlock_t *lock)
{
#ifdef USE_THREADS
#ifdef ERTS_ENABLE_LOCK_CHECK
+#ifdef ERTS_ENABLE_LOCK_POSITION
+ erts_lc_lock_flg_x(&lock->lc, ERTS_LC_FLG_LO_READ,file,line);
+#else
erts_lc_lock_flg(&lock->lc, ERTS_LC_FLG_LO_READ);
#endif
+#endif
#ifdef ERTS_ENABLE_LOCK_COUNT
erts_lcnt_lock_opt(&lock->lcnt, ERTS_LCNT_LO_READ);
#endif
@@ -2584,7 +2652,7 @@ erts_write_unlock(erts_rwlock_t *lock)
}
ERTS_GLB_INLINE void
-#ifdef ERTS_ENABLE_LOCK_COUNT
+#ifdef ERTS_ENABLE_LOCK_POSITION
erts_write_lock_x(erts_rwlock_t *lock, char *file, unsigned int line)
#else
erts_write_lock(erts_rwlock_t *lock)
@@ -2592,8 +2660,12 @@ erts_write_lock(erts_rwlock_t *lock)
{
#ifdef USE_THREADS
#ifdef ERTS_ENABLE_LOCK_CHECK
+#ifdef ERTS_ENABLE_LOCK_POSITION
+ erts_lc_lock_flg_x(&lock->lc, ERTS_LC_FLG_LO_READ_WRITE,file,line);
+#else
erts_lc_lock_flg(&lock->lc, ERTS_LC_FLG_LO_READ_WRITE);
#endif
+#endif
#ifdef ERTS_ENABLE_LOCK_COUNT
erts_lcnt_lock_opt(&lock->lcnt, ERTS_LCNT_LO_READ_WRITE);
#endif
@@ -2635,10 +2707,10 @@ erts_lc_rwlock_is_rwlocked(erts_rwlock_t *lock)
}
ERTS_GLB_INLINE void
-erts_tsd_key_create(erts_tsd_key_t *keyp)
+erts_tsd_key_create(erts_tsd_key_t *keyp, char *keyname)
{
#ifdef USE_THREADS
- int res = ethr_tsd_key_create(keyp);
+ int res = ethr_tsd_key_create(keyp, keyname);
if (res)
erts_thr_fatal_error(res, "create thread specific data key");
#endif
diff --git a/erts/emulator/beam/erl_trace.c b/erts/emulator/beam/erl_trace.c
index ff7fdfcfca..6978a5f11a 100644
--- a/erts/emulator/beam/erl_trace.c
+++ b/erts/emulator/beam/erl_trace.c
@@ -3309,6 +3309,8 @@ sys_msg_dispatcher_func(void *unused)
if (erts_thr_progress_update(NULL))
erts_thr_progress_leader_update(NULL);
+ ERTS_SCHED_FAIR_YIELD();
+
#ifdef DEBUG_PRINTOUTS
print_msg_type(smqp);
#endif
@@ -3467,12 +3469,20 @@ static void
init_sys_msg_dispatcher(void)
{
erts_smp_thr_opts_t thr_opts = ERTS_SMP_THR_OPTS_DEFAULT_INITER;
+#ifdef __OSE__
+ thr_opts.coreNo = 0;
+#endif
thr_opts.detached = 1;
init_smq_element_alloc();
sys_message_queue = NULL;
sys_message_queue_end = NULL;
erts_smp_cnd_init(&smq_cnd);
erts_smp_mtx_init(&smq_mtx, "sys_msg_q");
+
+#ifdef ETHR_HAVE_THREAD_NAMES
+ thr_opts.name = "sys_msg_dispatcher";
+#endif
+
erts_smp_thr_create(&sys_msg_dispatcher_tid,
sys_msg_dispatcher_func,
NULL,
diff --git a/erts/emulator/beam/erl_utils.h b/erts/emulator/beam/erl_utils.h
index 43a19e97f1..0807649ea1 100644
--- a/erts/emulator/beam/erl_utils.h
+++ b/erts/emulator/beam/erl_utils.h
@@ -150,7 +150,7 @@ void erts_silence_warn_unused_result(long unused);
int erts_fit_in_bits_int64(Sint64);
int erts_fit_in_bits_int32(Sint32);
-int list_length(Eterm);
+int erts_list_length(Eterm);
int erts_is_builtin(Eterm, Eterm, int);
Uint32 make_broken_hash(Eterm);
Uint32 block_hash(byte *, unsigned, Uint32);
diff --git a/erts/emulator/beam/external.c b/erts/emulator/beam/external.c
index 9fb2dbd8bf..b8e6b3b072 100644
--- a/erts/emulator/beam/external.c
+++ b/erts/emulator/beam/external.c
@@ -529,7 +529,7 @@ Uint erts_encode_ext_size(Eterm term)
Uint erts_encode_ext_size_2(Eterm term, unsigned dflags)
{
- return encode_size_struct2(NULL, term, TERM_TO_BINARY_DFLAGS|dflags)
+ return encode_size_struct2(NULL, term, dflags)
+ 1 /* VERSION_MAGIC */;
}
@@ -1099,10 +1099,10 @@ BIF_RETTYPE term_to_binary_2(BIF_ALIST_2)
if (tp[1] == am_minor_version && is_small(tp[2])) {
switch (signed_val(tp[2])) {
case 0:
- flags = TERM_TO_BINARY_DFLAGS;
+ flags = TERM_TO_BINARY_DFLAGS & ~DFLAG_NEW_FLOATS;
break;
case 1:
- flags = TERM_TO_BINARY_DFLAGS|DFLAG_NEW_FLOATS;
+ flags = TERM_TO_BINARY_DFLAGS;
break;
default:
goto error;
@@ -1605,9 +1605,9 @@ external_size_2(BIF_ALIST_2)
if (tp[1] == am_minor_version && is_small(tp[2])) {
switch (signed_val(tp[2])) {
case 0:
+ flags &= ~DFLAG_NEW_FLOATS;
break;
case 1:
- flags |= DFLAG_NEW_FLOATS;
break;
default:
goto error;
diff --git a/erts/emulator/beam/io.c b/erts/emulator/beam/io.c
index 3b16cdeb4a..cd5060ebb3 100644
--- a/erts/emulator/beam/io.c
+++ b/erts/emulator/beam/io.c
@@ -49,7 +49,9 @@
#include "erl_map.h"
extern ErlDrvEntry fd_driver_entry;
+#ifndef __OSE__
extern ErlDrvEntry vanilla_driver_entry;
+#endif
extern ErlDrvEntry spawn_driver_entry;
extern ErlDrvEntry *driver_tab[]; /* table of static drivers, only used during initialization */
@@ -245,11 +247,13 @@ static ERTS_INLINE void port_init_instr(Port *prt
ASSERT(prt->drv_ptr && prt->lock);
if (!prt->drv_ptr->lock) {
char *lock_str = "port_lock";
+ erts_mtx_init_locked_x(prt->lock, lock_str, id,
#ifdef ERTS_ENABLE_LOCK_COUNT
- if (!(erts_lcnt_rt_options & ERTS_LCNT_OPT_PORTLOCK))
- lock_str = NULL;
+ (erts_lcnt_rt_options & ERTS_LCNT_OPT_PORTLOCK)
+#else
+ 0
#endif
- erts_mtx_init_locked_x(prt->lock, lock_str, id);
+ );
}
#endif
erts_port_task_init_sched(&prt->sched, id);
@@ -2743,8 +2747,10 @@ void erts_init_io(int port_tab_size,
&drv_list_rwmtx_opts,
"driver_list");
driver_list = NULL;
- erts_smp_tsd_key_create(&driver_list_lock_status_key);
- erts_smp_tsd_key_create(&driver_list_last_error_key);
+ erts_smp_tsd_key_create(&driver_list_lock_status_key,
+ "erts_driver_list_lock_status_key");
+ erts_smp_tsd_key_create(&driver_list_last_error_key,
+ "erts_driver_list_last_error_key");
erts_ptab_init_table(&erts_port,
ERTS_ALC_T_PORT_TABLE,
@@ -2764,7 +2770,9 @@ void erts_init_io(int port_tab_size,
erts_smp_rwmtx_rwlock(&erts_driver_list_lock);
init_driver(&fd_driver, &fd_driver_entry, NULL);
+#ifndef __OSE__
init_driver(&vanilla_driver, &vanilla_driver_entry, NULL);
+#endif
init_driver(&spawn_driver, &spawn_driver_entry, NULL);
erts_init_static_drivers();
for (dp = driver_tab; *dp != NULL; dp++)
@@ -7306,10 +7314,11 @@ init_driver(erts_driver_t *drv, ErlDrvEntry *de, DE_Handle *handle)
erts_atom_put((byte *) drv->name,
sys_strlen(drv->name),
ERTS_ATOM_ENC_LATIN1,
- 1)
+ 1),
#else
- NIL
+ NIL,
#endif
+ 1
);
}
#endif
diff --git a/erts/emulator/beam/ops.tab b/erts/emulator/beam/ops.tab
index 8406f5344a..73630fda8e 100644
--- a/erts/emulator/beam/ops.tab
+++ b/erts/emulator/beam/ops.tab
@@ -1470,11 +1470,15 @@ apply_last I P
#
put_map_assoc F n Dst Live Size Rest=* => new_map F Dst Live Size Rest
-put_map_exact F n Dst Live Size Rest=* => new_map F Dst Live Size Rest
-put_map_assoc F Src Dst Live Size Rest=* => \
+put_map_assoc F Src=s Dst Live Size Rest=* => \
update_map_assoc F Src Dst Live Size Rest
-put_map_exact F Src Dst Live Size Rest=* => \
+put_map_assoc F Src Dst Live Size Rest=* => \
+ move Src x | update_map_assoc F x Dst Live Size Rest
+put_map_exact F n Dst Live Size Rest=* => new_map F Dst Live Size Rest
+put_map_exact F Src=s Dst Live Size Rest=* => \
update_map_exact F Src Dst Live Size Rest
+put_map_exact F Src Dst Live Size Rest=* => \
+ move Src x | update_map_exact F x Dst Live Size Rest
new_map j d I I
update_map_assoc j s d I I
diff --git a/erts/emulator/beam/sys.h b/erts/emulator/beam/sys.h
index 189d9ebac8..e273056a2b 100644
--- a/erts/emulator/beam/sys.h
+++ b/erts/emulator/beam/sys.h
@@ -38,6 +38,8 @@
#if defined (__WIN32__)
# include "erl_win_sys.h"
+#elif defined (__OSE__)
+# include "erl_ose_sys.h"
#else
# include "erl_unix_sys.h"
#ifndef UNIX
diff --git a/erts/emulator/beam/utils.c b/erts/emulator/beam/utils.c
index 7da555b18d..5b43d25e3c 100644
--- a/erts/emulator/beam/utils.c
+++ b/erts/emulator/beam/utils.c
@@ -258,7 +258,7 @@ erl_grow_wstack(ErtsWStack* s, UWord* default_wstack)
* Returns -1 if not a proper list (i.e. not terminated with NIL)
*/
int
-list_length(Eterm list)
+erts_list_length(Eterm list)
{
int i = 0;