aboutsummaryrefslogtreecommitdiffstats
path: root/erts/emulator
diff options
context:
space:
mode:
authorRickard Green <[email protected]>2010-06-17 10:23:50 +0200
committerRickard Green <[email protected]>2010-08-10 11:41:14 +0200
commit300b419486c1ca88e33938f182d5d5a8b90fb73f (patch)
tree55c37d5fb042bf6b3b5f56d89c9238a7e22f8b29 /erts/emulator
parentc1e94fa9a6fe4ae717d35dfbd1b628dc2e06d26a (diff)
downloadotp-300b419486c1ca88e33938f182d5d5a8b90fb73f.tar.gz
otp-300b419486c1ca88e33938f182d5d5a8b90fb73f.tar.bz2
otp-300b419486c1ca88e33938f182d5d5a8b90fb73f.zip
Rewrite ethread library
Large parts of the ethread library have been rewritten. The ethread library is an Erlang runtime system internal, portable thread library used by the runtime system itself. Most notable improvement is a reader optimized rwlock implementation which dramatically improve the performance of read-lock/read-unlock operations on multi processor systems by avoiding ping-ponging of the rwlock cache lines. The reader optimized rwlock implementation is used by miscellaneous rwlocks in the runtime system that are known to be read-locked frequently, and can be enabled on ETS tables by passing the `{read_concurrency, true}' option upon table creation. See the documentation of `ets:new/2' for more information. The ethread library can now also use the libatomic_ops library for atomic memory accesses. This makes it possible for the Erlang runtime system to utilize optimized atomic operations on more platforms than before. Use the `--with-libatomic_ops=PATH' configure command line argument when specifying where the libatomic_ops installation is located. The libatomic_ops library can be downloaded from: http://www.hpl.hp.com/research/linux/atomic_ops/ The changed API of the ethread library has also caused modifications in the Erlang runtime system. Preparations for the to come "delayed deallocation" feature has also been done since it depends on the ethread library. Note: When building for x86, the ethread library will now use instructions that first appeared on the pentium 4 processor. If you want the runtime system to be compatible with older processors (back to 486) you need to pass the `--enable-ethread-pre-pentium4-compatibility' configure command line argument when configuring the system.
Diffstat (limited to 'erts/emulator')
-rw-r--r--erts/emulator/Makefile.in35
-rw-r--r--erts/emulator/beam/atom.c10
-rw-r--r--erts/emulator/beam/atom.names1
-rw-r--r--erts/emulator/beam/erl_alloc.c174
-rw-r--r--erts/emulator/beam/erl_alloc.types8
-rw-r--r--erts/emulator/beam/erl_bif_info.c16
-rw-r--r--erts/emulator/beam/erl_db.c319
-rw-r--r--erts/emulator/beam/erl_db_hash.c6
-rw-r--r--erts/emulator/beam/erl_db_util.h3
-rw-r--r--erts/emulator/beam/erl_drv_thread.c78
-rw-r--r--erts/emulator/beam/erl_fun.c8
-rw-r--r--erts/emulator/beam/erl_init.c116
-rw-r--r--erts/emulator/beam/erl_lock_check.c109
-rw-r--r--erts/emulator/beam/erl_lock_check.h1
-rw-r--r--erts/emulator/beam/erl_lock_count.c18
-rw-r--r--erts/emulator/beam/erl_mtrace.c2
-rw-r--r--erts/emulator/beam/erl_node_tables.c31
-rw-r--r--erts/emulator/beam/erl_port_task.c7
-rw-r--r--erts/emulator/beam/erl_process.c1832
-rw-r--r--erts/emulator/beam/erl_process.h89
-rw-r--r--erts/emulator/beam/erl_process_lock.c350
-rw-r--r--erts/emulator/beam/erl_process_lock.h46
-rw-r--r--erts/emulator/beam/erl_smp.h222
-rw-r--r--erts/emulator/beam/erl_threads.h509
-rw-r--r--erts/emulator/beam/export.c8
-rw-r--r--erts/emulator/beam/register.c7
-rw-r--r--erts/emulator/sys/unix/sys.c19
-rw-r--r--erts/emulator/sys/win32/sys.c18
-rw-r--r--erts/emulator/test/scheduler_SUITE.erl361
29 files changed, 3207 insertions, 1196 deletions
diff --git a/erts/emulator/Makefile.in b/erts/emulator/Makefile.in
index eca6121a1e..2b9f70b0f4 100644
--- a/erts/emulator/Makefile.in
+++ b/erts/emulator/Makefile.in
@@ -199,6 +199,14 @@ MKDIR = @MKDIR@
USING_MINGW=@MIXED_CYGWIN_MINGW@
+ifeq ($(TARGET),win32)
+LIB_PREFIX=
+LIB_SUFFIX=.lib
+else
+LIB_PREFIX=lib
+LIB_SUFFIX=.a
+endif
+
OMIT_OMIT_FP=no
ifeq (@EMU_LOCK_CHECKING@,yes)
@@ -279,15 +287,12 @@ endif
ifeq ($(TARGET),win32)
LIBS += -L$(ERL_TOP)/erts/emulator/pcre/obj/$(TARGET)/$(TYPE) -lepcre
-DEPLIBS += $(ERL_TOP)/erts/emulator/pcre/obj/$(TARGET)/$(TYPE)/epcre.lib
else
-LIBS += $(ERL_TOP)/erts/emulator/pcre/obj/$(TARGET)/$(TYPE)/libepcre.a
-DEPLIBS += \
- $(ERL_TOP)/erts/emulator/pcre/obj/$(TARGET)/$(TYPE)/libepcre.a \
- $(ERL_TOP)/erts/lib/internal/$(TARGET)/liberts_internal.a
-# rem liberts_internal.a
+LIBS += $(ERL_TOP)/erts/emulator/pcre/obj/$(TARGET)/$(TYPE)/$(LIB_PREFIX)epcre$(LIB_SUFFIX)
endif
+DEPLIBS += $(ERL_TOP)/erts/emulator/pcre/obj/$(TARGET)/$(TYPE)/$(LIB_PREFIX)epcre$(LIB_SUFFIX)
+
ELIB_FLAGS = -DENABLE_ELIB_MALLOC -DELIB_ALLOC_IS_CLIB -DELIB_HEAP_SBRK
PERFCTR_PATH=@PERFCTR_PATH@
@@ -305,7 +310,19 @@ LIBSCTP = @LIBSCTP@
ORG_THR_LIBS=@EMU_THR_LIBS@
THR_LIB_NAME=@EMU_THR_LIB_NAME@
-THR_LIBS=$(subst -l$(THR_LIB_NAME),-l$(THR_LIB_NAME)$(TYPEMARKER),$(ORG_THR_LIBS))
+ifneq ($(strip $(THR_LIB_NAME)),)
+DEPLIBS += $(ERL_TOP)/erts/lib/internal/$(TARGET)/$(LIB_PREFIX)erts_internal_r$(TYPEMARKER)$(LIB_SUFFIX) \
+ $(ERL_TOP)/erts/lib/internal/$(TARGET)/$(LIB_PREFIX)ethread$(TYPEMARKER)$(LIB_SUFFIX)
+else
+DEPLIBS += $(ERL_TOP)/erts/lib/internal/$(TARGET)/$(LIB_PREFIX)erts_internal$(TYPEMARKER)$(LIB_SUFFIX)
+endif
+
+THR_LIBS=$(subst -l$(THR_LIB_NAME),-l$(THR_LIB_NAME)$(TYPEMARKER), \
+ $(subst -lerts_internal_r,-lerts_internal_r$(TYPEMARKER),$(ORG_THR_LIBS)))
+
+LIBS += $(THR_LIBS)
+
+ifneq ($(findstring erts_internal_r, $(THR_LIBS)),erts_internal_r)
ifeq ($(findstring vxworks,$(TARGET)),vxworks)
ERTS_INTERNAL_LIB=erts_internal
@@ -317,7 +334,9 @@ ERTS_INTERNAL_LIB=erts_internal
endif
endif
-LIBS += $(THR_LIBS) -l$(ERTS_INTERNAL_LIB)$(TYPEMARKER)
+LIBS += -l$(ERTS_INTERNAL_LIB)$(TYPEMARKER)
+
+endif # erts_internal_r
LIBS += @LIBRT@
diff --git a/erts/emulator/beam/atom.c b/erts/emulator/beam/atom.c
index e2a79d6e4f..6b3c106a97 100644
--- a/erts/emulator/beam/atom.c
+++ b/erts/emulator/beam/atom.c
@@ -41,8 +41,7 @@ static erts_smp_rwmtx_t atom_table_lock;
#define atom_read_unlock() erts_smp_rwmtx_runlock(&atom_table_lock)
#define atom_write_lock() erts_smp_rwmtx_rwlock(&atom_table_lock)
#define atom_write_unlock() erts_smp_rwmtx_rwunlock(&atom_table_lock)
-#define atom_init_lock() erts_smp_rwmtx_init(&atom_table_lock, \
- "atom_tab")
+
#if 0
#define ERTS_ATOM_PUT_OPS_STAT
#endif
@@ -304,12 +303,17 @@ init_atom_table(void)
HashFunctions f;
int i;
Atom a;
+ erts_smp_rwmtx_opt_t rwmtx_opt = ERTS_SMP_THR_OPTS_DEFAULT_INITER;
+
+ rwmtx_opt.type = ERTS_SMP_RWMTX_TYPE_FREQUENT_READ;
+ rwmtx_opt.lived = ERTS_SMP_RWMTX_LONG_LIVED;
#ifdef ERTS_ATOM_PUT_OPS_STAT
erts_smp_atomic_init(&atom_put_ops, 0);
#endif
- atom_init_lock();
+ erts_smp_rwmtx_init_opt(&atom_table_lock, &rwmtx_opt, "atom_tab");
+
f.hash = (H_FUN) atom_hash;
f.cmp = (HCMP_FUN) atom_cmp;
f.alloc = (HALLOC_FUN) atom_alloc;
diff --git a/erts/emulator/beam/atom.names b/erts/emulator/beam/atom.names
index 28f69b9460..0815cdbc7f 100644
--- a/erts/emulator/beam/atom.names
+++ b/erts/emulator/beam/atom.names
@@ -432,6 +432,7 @@ atom raw
atom re
atom re_pattern
atom re_run_trap
+atom read_concurrency
atom ready_input
atom ready_output
atom ready_async
diff --git a/erts/emulator/beam/erl_alloc.c b/erts/emulator/beam/erl_alloc.c
index 16ae643ed9..87503af7d5 100644
--- a/erts/emulator/beam/erl_alloc.c
+++ b/erts/emulator/beam/erl_alloc.c
@@ -64,6 +64,14 @@
#ifdef DEBUG
static Uint install_debug_functions(void);
+#if 0
+#define HARD_DEBUG
+#ifdef __GNUC__
+#warning "* * * * * * * * * * * * * *"
+#warning "* HARD DEBUG IS ENABLED! *"
+#warning "* * * * * * * * * * * * * *"
+#endif
+#endif
#endif
extern void elib_ensure_initialized(void);
@@ -391,6 +399,10 @@ refuse_af_strategy(struct au_init *init)
static void init_thr_ix(int static_ixs);
+#ifdef HARD_DEBUG
+static void hdbg_init(void);
+#endif
+
void
erts_alloc_init(int *argc, char **argv, ErtsAllocInitOpts *eaiop)
{
@@ -406,6 +418,10 @@ erts_alloc_init(int *argc, char **argv, ErtsAllocInitOpts *eaiop)
ERTS_DEFAULT_ALCU_INIT
};
+#ifdef HARD_DEBUG
+ hdbg_init();
+#endif
+
erts_sys_alloc_init();
init_thr_ix(erts_no_schedulers);
erts_init_utils_mem();
@@ -2862,12 +2878,10 @@ unsigned long erts_alc_test(unsigned long op,
break;
}
case 0xf0a:
- if (ethr_mutex_lock((ethr_mutex *) a1) != 0)
- ERTS_ALC_TEST_ABORT;
+ ethr_mutex_lock((ethr_mutex *) a1);
break;
case 0xf0b:
- if (ethr_mutex_unlock((ethr_mutex *) a1) != 0)
- ERTS_ALC_TEST_ABORT;
+ ethr_mutex_unlock((ethr_mutex *) a1);
break;
case 0xf0c: {
ethr_cond *cnd = erts_alloc(ERTS_ALC_T_UNDEF, sizeof(ethr_cond));
@@ -2883,16 +2897,13 @@ unsigned long erts_alc_test(unsigned long op,
break;
}
case 0xf0e:
- if (ethr_cond_broadcast((ethr_cond *) a1) != 0)
- ERTS_ALC_TEST_ABORT;
+ ethr_cond_broadcast((ethr_cond *) a1);
break;
case 0xf0f: {
int res;
do {
res = ethr_cond_wait((ethr_cond *) a1, (ethr_mutex *) a2);
} while (res == EINTR);
- if (res != 0)
- ERTS_ALC_TEST_ABORT;
break;
}
case 0xf10: {
@@ -2939,8 +2950,11 @@ unsigned long erts_alc_test(unsigned long op,
#undef PRINT_OPS
#endif
-
+#ifdef HARD_DEBUG
+#define FENCE_SZ (4*sizeof(UWord))
+#else
#define FENCE_SZ (3*sizeof(UWord))
+#endif
#if defined(ARCH_64)
#define FENCE_PATTERN 0xABCDEF97ABCDEF97
@@ -2962,6 +2976,104 @@ unsigned long erts_alc_test(unsigned long op,
#define GET_TYPE_OF_PATTERN(P) \
(((P) >> TYPE_PATTERN_SHIFT) & TYPE_PATTERN_MASK)
+#ifdef HARD_DEBUG
+
+#define ERL_ALC_HDBG_MAX_MBLK 100000
+#define ERTS_ALC_O_CHECK -1
+
+typedef struct hdbg_mblk_ hdbg_mblk;
+struct hdbg_mblk_ {
+ hdbg_mblk *next;
+ hdbg_mblk *prev;
+ void *p;
+ Uint s;
+ ErtsAlcType_t n;
+};
+
+static hdbg_mblk hdbg_mblks[ERL_ALC_HDBG_MAX_MBLK];
+
+static hdbg_mblk *free_hdbg_mblks;
+static hdbg_mblk *used_hdbg_mblks;
+static erts_mtx_t hdbg_mblk_mtx;
+
+static void
+hdbg_init(void)
+{
+ int i;
+ for (i = 0; i < ERL_ALC_HDBG_MAX_MBLK-1; i++)
+ hdbg_mblks[i].next = &hdbg_mblks[i+1];
+ hdbg_mblks[ERL_ALC_HDBG_MAX_MBLK-1].next = NULL;
+ free_hdbg_mblks = &hdbg_mblks[0];
+ used_hdbg_mblks = NULL;
+ erts_mtx_init(&hdbg_mblk_mtx, "erts_alloc_hard_debug");
+}
+
+static void *check_memory_fence(void *ptr,
+ Uint *size,
+ ErtsAlcType_t n,
+ int func);
+void erts_hdbg_chk_blks(void);
+
+void
+erts_hdbg_chk_blks(void)
+{
+ hdbg_mblk *mblk;
+
+ erts_mtx_lock(&hdbg_mblk_mtx);
+ for (mblk = used_hdbg_mblks; mblk; mblk = mblk->next) {
+ Uint sz;
+ check_memory_fence(mblk->p, &sz, mblk->n, ERTS_ALC_O_CHECK);
+ ASSERT(sz == mblk->s);
+ }
+ erts_mtx_unlock(&hdbg_mblk_mtx);
+}
+
+static hdbg_mblk *
+hdbg_alloc(void *p, Uint s, ErtsAlcType_t n)
+{
+ hdbg_mblk *mblk;
+
+ erts_mtx_lock(&hdbg_mblk_mtx);
+ mblk = free_hdbg_mblks;
+ if (!mblk) {
+ erts_fprintf(stderr,
+ "Ran out of debug blocks; please increase "
+ "ERL_ALC_HDBG_MAX_MBLK=%d and recompile!\n",
+ ERL_ALC_HDBG_MAX_MBLK);
+ abort();
+ }
+ free_hdbg_mblks = mblk->next;
+
+ mblk->p = p;
+ mblk->s = s;
+ mblk->n = n;
+
+ mblk->next = used_hdbg_mblks;
+ mblk->prev = NULL;
+ if (used_hdbg_mblks)
+ used_hdbg_mblks->prev = mblk;
+ used_hdbg_mblks = mblk;
+ erts_mtx_unlock(&hdbg_mblk_mtx);
+ return (void *) mblk;
+}
+
+static void
+hdbg_free(hdbg_mblk *mblk)
+{
+ erts_mtx_lock(&hdbg_mblk_mtx);
+ if (mblk->next)
+ mblk->next->prev = mblk->prev;
+ if (mblk->prev)
+ mblk->prev->next = mblk->next;
+ else
+ used_hdbg_mblks = mblk->next;
+
+ mblk->next = free_hdbg_mblks;
+ free_hdbg_mblks = mblk;
+ erts_mtx_unlock(&hdbg_mblk_mtx);
+}
+
+#endif
#ifdef ERTS_ALLOC_UTIL_HARD_DEBUG
static void *check_memory_fence(void *ptr, Uint *size, ErtsAlcType_t n, int func);
@@ -3007,17 +3119,28 @@ set_memory_fence(void *ptr, Uint sz, ErtsAlcType_t n)
{
UWord *ui_ptr;
UWord pattern;
+#ifdef HARD_DEBUG
+ hdbg_mblk **mblkpp;
+#endif
if (!ptr)
return NULL;
ui_ptr = (UWord *) ptr;
pattern = MK_PATTERN(n);
-
+
+#ifdef HARD_DEBUG
+ mblkpp = (hdbg_mblk **) ui_ptr++;
+#endif
+
*(ui_ptr++) = sz;
*(ui_ptr++) = pattern;
memcpy((void *) (((char *) ui_ptr)+sz), (void *) &pattern, sizeof(UWord));
+#ifdef HARD_DEBUG
+ *mblkpp = hdbg_alloc((void *) ui_ptr, sz, n);
+#endif
+
return (void *) ui_ptr;
}
@@ -3029,6 +3152,9 @@ check_memory_fence(void *ptr, Uint *size, ErtsAlcType_t n, int func)
UWord pre_pattern;
UWord post_pattern;
UWord *ui_ptr;
+#ifdef HARD_DEBUG
+ hdbg_mblk *mblk;
+#endif
if (!ptr)
return NULL;
@@ -3036,6 +3162,9 @@ check_memory_fence(void *ptr, Uint *size, ErtsAlcType_t n, int func)
ui_ptr = (UWord *) ptr;
pre_pattern = *(--ui_ptr);
*size = sz = *(--ui_ptr);
+#ifdef HARD_DEBUG
+ mblk = (hdbg_mblk *) *(--ui_ptr);
+#endif
found_type = GET_TYPE_OF_PATTERN(pre_pattern);
if (pre_pattern != MK_PATTERN(n)) {
@@ -3091,6 +3220,17 @@ check_memory_fence(void *ptr, Uint *size, ErtsAlcType_t n, int func)
(unsigned long) ptr, (unsigned long) sz, ftype, op_str, otype);
}
+#ifdef HARD_DEBUG
+ switch (func) {
+ case ERTS_ALC_O_REALLOC:
+ case ERTS_ALC_O_FREE:
+ hdbg_free(mblk);
+ break;
+ default:
+ break;
+ }
+#endif
+
return (void *) ui_ptr;
}
@@ -3103,6 +3243,10 @@ debug_alloc(ErtsAlcType_t n, void *extra, Uint size)
Uint dsize;
void *res;
+#ifdef HARD_DEBUG
+ erts_hdbg_chk_blks();
+#endif
+
ASSERT(ERTS_ALC_N_MIN <= n && n <= ERTS_ALC_N_MAX);
dsize = size + FENCE_SZ;
res = (*real_af->alloc)(n, real_af->extra, dsize);
@@ -3132,13 +3276,17 @@ debug_realloc(ErtsAlcType_t n, void *extra, void *ptr, Uint size)
dsize = size + FENCE_SZ;
dptr = check_memory_fence(ptr, &old_size, n, ERTS_ALC_O_REALLOC);
+#ifdef HARD_DEBUG
+ erts_hdbg_chk_blks();
+#endif
+
if (old_size > size)
sys_memset((void *) (((char *) ptr) + size),
0xf,
sizeof(Uint) + old_size - size);
res = (*real_af->realloc)(n, real_af->extra, dptr, dsize);
-
+
res = set_memory_fence(res, size, n);
#ifdef PRINT_OPS
@@ -3168,6 +3316,10 @@ debug_free(ErtsAlcType_t n, void *extra, void *ptr)
fprintf(stderr, "free(%s, 0x%lx)\r\n", ERTS_ALC_N2TD(n), (Uint) ptr);
#endif
+#ifdef HARD_DEBUG
+ erts_hdbg_chk_blks();
+#endif
+
}
static Uint
diff --git a/erts/emulator/beam/erl_alloc.types b/erts/emulator/beam/erl_alloc.types
index 6f88bbe5b8..7df9f19af0 100644
--- a/erts/emulator/beam/erl_alloc.types
+++ b/erts/emulator/beam/erl_alloc.types
@@ -212,7 +212,8 @@ type LOGGER_DSBUF TEMPORARY SYSTEM logger_dsbuf
type TMP_DSBUF TEMPORARY SYSTEM tmp_dsbuf
type INFO_DSBUF SYSTEM SYSTEM info_dsbuf
# INFO_DSBUF have to use the SYSTEM allocator; otherwise, a deadlock might occur
-type SCHDLR_DATA LONG_LIVED PROCESSES scheduler_data
+type SCHDLR_DATA LONG_LIVED SYSTEM scheduler_data
+type SCHDLR_SLP_INFO LONG_LIVED SYSTEM scheduler_sleep_info
type RUNQS LONG_LIVED SYSTEM run_queues
type DDLL_PROCESS STANDARD SYSTEM ddll_processes
type DDLL_HANDLE STANDARD SYSTEM ddll_handle
@@ -246,6 +247,7 @@ type CPUDATA LONG_LIVED SYSTEM cpu_data
type TMP_CPU_IDS SHORT_LIVED SYSTEM tmp_cpu_ids
type EXT_TERM_DATA SHORT_LIVED PROCESSES external_term_data
type ZLIB STANDARD SYSTEM zlib
+type RDR_GRPS_MAP LONG_LIVED SYSTEM reader_groups_map
+if smp
type ASYNC SHORT_LIVED SYSTEM async
@@ -269,7 +271,9 @@ type RUNQ_BLNS LONG_LIVED SYSTEM run_queue_balancing
+if threads
-type ETHR_INTERNAL SYSTEM SYSTEM ethread_internal
+type ETHR_STD STANDARD SYSTEM ethread_standard
+type ETHR_SL SHORT_LIVED SYSTEM ethread_short_lived
+type ETHR_LL LONG_LIVED SYSTEM ethread_long_lived
+ifnot smp
diff --git a/erts/emulator/beam/erl_bif_info.c b/erts/emulator/beam/erl_bif_info.c
index 48cda52612..f582f4470f 100644
--- a/erts/emulator/beam/erl_bif_info.c
+++ b/erts/emulator/beam/erl_bif_info.c
@@ -2563,6 +2563,8 @@ BIF_RETTYPE system_info_1(BIF_ALIST_1)
BIF_RET(erts_sched_stat_term(BIF_P, 1));
} else if (ERTS_IS_ATOM_STR("taints", BIF_ARG_1)) {
BIF_RET(erts_nif_taints(BIF_P));
+ } else if (ERTS_IS_ATOM_STR("reader_groups_map", BIF_ARG_1)) {
+ BIF_RET(erts_get_reader_groups_map(BIF_P));
}
BIF_ERROR(BIF_P, BADARG);
@@ -3426,6 +3428,16 @@ BIF_RETTYPE erts_debug_get_internal_state_1(BIF_ALIST_1)
else if (ERTS_IS_ATOM_STR("fake_scheduler_bindings", tp[1])) {
return erts_fake_scheduler_bindings(BIF_P, tp[2]);
}
+ else if (ERTS_IS_ATOM_STR("reader_groups_map", tp[1])) {
+ Sint groups;
+ if (is_not_small(tp[2]))
+ BIF_ERROR(BIF_P, BADARG);
+ groups = signed_val(tp[2]);
+ if (groups < (Sint) 1 || groups > (Sint) INT_MAX)
+ BIF_ERROR(BIF_P, BADARG);
+
+ BIF_RET(erts_debug_reader_groups_map(BIF_P, (int) groups));
+ }
break;
}
default:
@@ -3730,8 +3742,8 @@ static Eterm lcnt_build_lock_stats_term(Eterm **hpp, Uint *szp, erts_lcnt_lock_s
* [{{file, line}, {tries, colls, {seconds, nanoseconds, n_blocks}}}]
*/
- ethr_atomic_read(&stats->tries, (long *)&tries);
- ethr_atomic_read(&stats->colls, (long *)&colls);
+ tries = (unsigned long) ethr_atomic_read(&stats->tries);
+ colls = (unsigned long) ethr_atomic_read(&stats->colls);
line = stats->line;
timer_s = stats->timer.s;
diff --git a/erts/emulator/beam/erl_db.c b/erts/emulator/beam/erl_db.c
index cbdaa459de..b0369a402b 100644
--- a/erts/emulator/beam/erl_db.c
+++ b/erts/emulator/beam/erl_db.c
@@ -78,11 +78,19 @@ enum DbIterSafety {
** The main meta table, containing all ets tables.
*/
#ifdef ERTS_SMP
-# define META_MAIN_TAB_LOCK_CNT 16
-static union {
- erts_smp_spinlock_t lck;
- byte _cache_line_alignment[64];
-}meta_main_tab_locks[META_MAIN_TAB_LOCK_CNT];
+
+#define ERTS_META_MAIN_TAB_LOCK_TAB_BITS 8
+#define ERTS_META_MAIN_TAB_LOCK_TAB_SIZE (1 << ERTS_META_MAIN_TAB_LOCK_TAB_BITS)
+#define ERTS_META_MAIN_TAB_LOCK_TAB_MASK (ERTS_META_MAIN_TAB_LOCK_TAB_SIZE - 1)
+
+typedef union {
+ erts_smp_rwmtx_t rwmtx;
+ byte cache_line_align__[ERTS_ALC_CACHE_LINE_ALIGN_SIZE(
+ sizeof(erts_smp_rwmtx_t))];
+} erts_meta_main_tab_lock_t;
+
+static erts_meta_main_tab_lock_t *meta_main_tab_locks;
+
#endif
static struct {
union {
@@ -104,17 +112,13 @@ static struct {
#define MARK_SLOT_DEAD(i) (meta_main_tab[(i)].u.next_free |= 2)
#define GET_ANY_SLOT_TAB(i) ((DbTable*)(meta_main_tab[(i)].u.next_free & ~(1|2))) /* dead or alive */
-static ERTS_INLINE void meta_main_tab_lock(unsigned slot)
+static ERTS_INLINE erts_smp_rwmtx_t *
+get_meta_main_tab_lock(unsigned slot)
{
#ifdef ERTS_SMP
- erts_smp_spin_lock(&meta_main_tab_locks[slot % META_MAIN_TAB_LOCK_CNT].lck);
-#endif
-}
-
-static ERTS_INLINE void meta_main_tab_unlock(unsigned slot)
-{
-#ifdef ERTS_SMP
- erts_smp_spin_unlock(&meta_main_tab_locks[slot % META_MAIN_TAB_LOCK_CNT].lck);
+ return &meta_main_tab_locks[slot & ERTS_META_MAIN_TAB_LOCK_TAB_MASK].rwmtx;
+#else
+ return NULL;
#endif
}
@@ -166,7 +170,8 @@ struct meta_name_tab_entry* meta_name_tab_bucket(Eterm name,
typedef enum {
LCK_READ=1, /* read only access */
LCK_WRITE=2, /* exclusive table write access */
- LCK_WRITE_REC=3 /* record write access */
+ LCK_WRITE_REC=3, /* record write access */
+ LCK_NONE=4
} db_lock_kind_t;
extern DbTableMethod db_hash;
@@ -214,17 +219,17 @@ Export ets_select_continue_exp;
*/
static Export ets_delete_continue_exp;
-static ERTS_INLINE DbTable* db_ref(DbTable* tb)
+static ERTS_INLINE DbTable* db_ref(DbTable* tb, db_lock_kind_t kind)
{
- if (tb != NULL) {
+ if (tb != NULL && kind != LCK_READ) {
erts_refc_inc(&tb->common.ref, 2);
}
return tb;
}
-static ERTS_INLINE DbTable* db_unref(DbTable* tb)
+static ERTS_INLINE DbTable* db_unref(DbTable* tb, db_lock_kind_t kind)
{
- if (!erts_refc_dectest(&tb->common.ref, 0)) {
+ if (kind != LCK_READ && !erts_refc_dectest(&tb->common.ref, 0)) {
#ifdef HARDDEBUG
if (erts_smp_atomic_read(&tb->common.memory_size) != sizeof(DbTable)) {
erts_fprintf(stderr, "ets: db_unref memory remain=%ld fix=%x\n",
@@ -256,12 +261,19 @@ static ERTS_INLINE DbTable* db_unref(DbTable* tb)
return tb;
}
-static ERTS_INLINE void db_init_lock(DbTable* tb, char *rwname, char* fixname)
+static ERTS_INLINE void db_init_lock(DbTable* tb, int use_frequent_read_lock,
+ char *rwname, char* fixname)
{
+#ifdef ERTS_SMP
+ erts_smp_rwmtx_opt_t rwmtx_opt = ERTS_SMP_THR_OPTS_DEFAULT_INITER;
+ if (use_frequent_read_lock)
+ rwmtx_opt.type = ERTS_SMP_RWMTX_TYPE_FREQUENT_READ;
+#endif
erts_refc_init(&tb->common.ref, 1);
erts_refc_init(&tb->common.fixref, 0);
#ifdef ERTS_SMP
- erts_smp_rwmtx_init_x(&tb->common.rwlock, rwname, tb->common.the_name);
+ erts_smp_rwmtx_init_opt_x(&tb->common.rwlock, &rwmtx_opt,
+ rwname, tb->common.the_name);
erts_smp_mtx_init_x(&tb->common.fixlock, fixname, tb->common.the_name);
tb->common.is_thread_safe = !(tb->common.status & DB_FINE_LOCKED);
#endif
@@ -297,7 +309,7 @@ static ERTS_INLINE void db_lock_take_over_ref(DbTable* tb, db_lock_kind_t kind)
static ERTS_INLINE void db_lock(DbTable* tb, db_lock_kind_t kind)
{
- (void) db_ref(tb);
+ (void) db_ref(tb, kind);
#ifdef ERTS_SMP
db_lock_take_over_ref(tb, kind);
#endif
@@ -331,7 +343,7 @@ static ERTS_INLINE void db_unlock(DbTable* tb, db_lock_kind_t kind)
}
}
#endif
- (void) db_unref(tb); /* May delete table... */
+ (void) db_unref(tb, kind); /* May delete table... */
}
@@ -349,32 +361,49 @@ static ERTS_INLINE void db_meta_unlock(DbTable* tb, db_lock_kind_t kind)
}
static ERTS_INLINE
-DbTable* db_get_table(Process *p,
- Eterm id,
- int what,
- db_lock_kind_t kind)
+DbTable* db_get_table_aux(Process *p,
+ Eterm id,
+ int what,
+ db_lock_kind_t kind,
+ int meta_already_locked)
{
DbTable *tb = NULL;
+ erts_smp_rwmtx_t *mtl = NULL;
if (is_small(id)) {
Uint slot = unsigned_val(id) & meta_main_tab_slot_mask;
- meta_main_tab_lock(slot);
+ if (!meta_already_locked) {
+ mtl = get_meta_main_tab_lock(slot);
+ erts_smp_rwmtx_rlock(mtl);
+ }
+#if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_CHECK)
+ else {
+ erts_smp_rwmtx_t *test_mtl = get_meta_main_tab_lock(slot);
+ ERTS_SMP_LC_ASSERT(erts_lc_rwmtx_is_rlocked(test_mtl)
+ || erts_lc_rwmtx_is_rwlocked(test_mtl));
+ }
+#endif
if (slot < db_max_tabs && IS_SLOT_ALIVE(slot)) {
/* SMP: inc to prevent race, between unlock of meta_main_tab_lock
* and the table locking outside the meta_main_tab_lock
*/
- tb = db_ref(meta_main_tab[slot].u.tb);
+ tb = db_ref(meta_main_tab[slot].u.tb, kind);
}
- meta_main_tab_unlock(slot);
}
else if (is_atom(id)) {
- erts_smp_rwmtx_t* rwlock;
- struct meta_name_tab_entry* bucket = meta_name_tab_bucket(id,&rwlock);
- erts_smp_rwmtx_rlock(rwlock);
+ struct meta_name_tab_entry* bucket = meta_name_tab_bucket(id,&mtl);
+ if (!meta_already_locked)
+ erts_smp_rwmtx_rlock(mtl);
+ else{
+ ERTS_SMP_LC_ASSERT(erts_lc_rwmtx_is_rlocked(mtl)
+ || erts_lc_rwmtx_is_rwlocked(mtl));
+ mtl = NULL;
+ }
+
if (bucket->pu.tb != NULL) {
if (is_atom(bucket->u.name_atom)) { /* single */
if (bucket->u.name_atom == id) {
- tb = db_ref(bucket->pu.tb);
+ tb = db_ref(bucket->pu.tb, kind);
}
}
else { /* multi */
@@ -382,23 +411,33 @@ DbTable* db_get_table(Process *p,
Uint i;
for (i=0; i<cnt; i++) {
if (bucket->pu.mvec[i].u.name_atom == id) {
- tb = db_ref(bucket->pu.mvec[i].pu.tb);
+ tb = db_ref(bucket->pu.mvec[i].pu.tb, kind);
break;
}
}
}
}
- erts_smp_rwmtx_runlock(rwlock);
}
if (tb) {
db_lock_take_over_ref(tb, kind);
- if (tb->common.id == id && ((tb->common.status & what) != 0 ||
- p->id == tb->common.owner)) {
- return tb;
+ if (tb->common.id != id
+ || ((tb->common.status & what) == 0 && p->id != tb->common.owner)) {
+ db_unlock(tb, kind);
+ tb = NULL;
}
- db_unlock(tb, kind);
}
- return NULL;
+ if (mtl)
+ erts_smp_rwmtx_runlock(mtl);
+ return tb;
+}
+
+static ERTS_INLINE
+DbTable* db_get_table(Process *p,
+ Eterm id,
+ int what,
+ db_lock_kind_t kind)
+{
+ return db_get_table_aux(p, id, what, kind, 0);
}
/* Requires meta_main_tab_locks[slot] locked.
@@ -413,15 +452,15 @@ static ERTS_INLINE void free_slot(int slot)
erts_smp_spin_unlock(&meta_main_tab_main_lock);
}
-static int insert_named_tab(Eterm name_atom, DbTable* tb)
+static int insert_named_tab(Eterm name_atom, DbTable* tb, int have_lock)
{
int ret = 0;
erts_smp_rwmtx_t* rwlock;
struct meta_name_tab_entry* new_entry;
struct meta_name_tab_entry* bucket = meta_name_tab_bucket(name_atom,
&rwlock);
-
- erts_smp_rwmtx_rwlock(rwlock);
+ if (!have_lock)
+ erts_smp_rwmtx_rwlock(rwlock);
if (bucket->pu.tb == NULL) { /* empty */
new_entry = bucket;
@@ -468,17 +507,32 @@ static int insert_named_tab(Eterm name_atom, DbTable* tb)
ret = 1; /* Ok */
done:
- erts_smp_rwmtx_rwunlock(rwlock);
+ if (!have_lock)
+ erts_smp_rwmtx_rwunlock(rwlock);
return ret;
}
-static int remove_named_tab(Eterm name_atom)
+static int remove_named_tab(DbTable *tb, int have_lock)
{
int ret = 0;
erts_smp_rwmtx_t* rwlock;
+ Eterm name_atom = tb->common.id;
struct meta_name_tab_entry* bucket = meta_name_tab_bucket(name_atom,
&rwlock);
- erts_smp_rwmtx_rwlock(rwlock);
+#ifdef ERTS_SMP
+ if (!have_lock && erts_smp_rwmtx_tryrwlock(rwlock) == EBUSY) {
+ /*
+ * We keep our increased refc over this op in order to
+ * prevent the table from disapearing.
+ */
+ erts_smp_rwmtx_rwunlock(&tb->common.rwlock);
+ erts_smp_rwmtx_rwlock(rwlock);
+ erts_smp_rwmtx_rwlock(&tb->common.rwlock);
+ }
+#endif
+
+ ERTS_SMP_LC_ASSERT(erts_lc_rwmtx_is_rwlocked(rwlock));
+
if (bucket->pu.tb == NULL) {
goto done;
}
@@ -529,7 +583,8 @@ static int remove_named_tab(Eterm name_atom)
ret = 1; /* Ok */
done:
- erts_smp_rwmtx_rwunlock(rwlock);
+ if (!have_lock)
+ erts_smp_rwmtx_rwunlock(rwlock);
return ret;
}
@@ -1133,6 +1188,7 @@ BIF_RETTYPE ets_rename_2(BIF_ALIST_2)
{
DbTable* tb;
Eterm ret;
+ erts_smp_rwmtx_t *lck1, *lck2;
#ifdef HARDDEBUG
erts_fprintf(stderr,
@@ -1141,34 +1197,65 @@ BIF_RETTYPE ets_rename_2(BIF_ALIST_2)
BIF_P->initial[0], BIF_P->initial[1], BIF_P->initial[2]);
#endif
- if ((tb = db_get_table(BIF_P, BIF_ARG_1, DB_WRITE, LCK_WRITE)) == NULL) {
+
+ if (is_not_atom(BIF_ARG_2)) {
BIF_ERROR(BIF_P, BADARG);
}
- if (is_not_atom(BIF_ARG_2)) {
- goto badarg;
+ (void) meta_name_tab_bucket(BIF_ARG_2, &lck1);
+
+ if (is_small(BIF_ARG_1)) {
+ Uint slot = unsigned_val(BIF_ARG_1) & meta_main_tab_slot_mask;
+ lck2 = get_meta_main_tab_lock(slot);
+ }
+ else if (is_atom(BIF_ARG_1)) {
+ (void) meta_name_tab_bucket(BIF_ARG_1, &lck2);
+ if (lck1 == lck2)
+ lck2 = NULL;
+ else if (lck1 > lck2) {
+ erts_smp_rwmtx_t *tmp = lck1;
+ lck1 = lck2;
+ lck2 = tmp;
+ }
+ }
+ else {
+ BIF_ERROR(BIF_P, BADARG);
}
+ erts_smp_rwmtx_rwlock(lck1);
+ if (lck2)
+ erts_smp_rwmtx_rwlock(lck2);
+
+ tb = db_get_table_aux(BIF_P, BIF_ARG_1, DB_WRITE, LCK_WRITE, 1);
+ if (!tb)
+ goto badarg;
+
if (is_not_atom(tb->common.id)) { /* Not a named table */
tb->common.the_name = BIF_ARG_2;
goto done;
}
- if (!insert_named_tab(BIF_ARG_2,tb)) {
+ if (!insert_named_tab(BIF_ARG_2, tb, 1))
goto badarg;
- }
- if (!remove_named_tab(tb->common.id)) {
+
+ if (!remove_named_tab(tb, 1))
erl_exit(1,"Could not find named tab %s", tb->common.id);
- }
tb->common.id = tb->common.the_name = BIF_ARG_2;
done:
ret = tb->common.id;
db_unlock(tb, LCK_WRITE);
+ erts_smp_rwmtx_rwunlock(lck1);
+ if (lck2)
+ erts_smp_rwmtx_rwunlock(lck2);
BIF_RET(ret);
badarg:
- db_unlock(tb, LCK_WRITE);
+ if (tb)
+ db_unlock(tb, LCK_WRITE);
+ erts_smp_rwmtx_rwunlock(lck1);
+ if (lck2)
+ erts_smp_rwmtx_rwunlock(lck2);
BIF_ERROR(BIF_P, BADARG);
}
@@ -1189,10 +1276,11 @@ BIF_RETTYPE ets_new_2(BIF_ALIST_2)
UWord heir_data;
Uint32 status;
Sint keypos;
- int is_named, is_fine_locked;
+ int is_named, is_fine_locked, frequent_read;
int cret;
DeclareTmpHeap(meta_tuple,3,BIF_P);
DbTableMethod* meth;
+ erts_smp_rwmtx_t *mmtl;
if (is_not_atom(BIF_ARG_1)) {
BIF_ERROR(BIF_P, BADARG);
@@ -1205,6 +1293,7 @@ BIF_RETTYPE ets_new_2(BIF_ALIST_2)
keypos = 1;
is_named = 0;
is_fine_locked = 0;
+ frequent_read = 0;
heir = am_none;
heir_data = (UWord) am_undefined;
@@ -1238,6 +1327,13 @@ BIF_RETTYPE ets_new_2(BIF_ALIST_2)
is_fine_locked = 0;
} else break;
}
+ else if (tp[1] == am_read_concurrency) {
+ if (tp[2] == am_true) {
+ frequent_read = 1;
+ } else if (tp[2] == am_false) {
+ frequent_read = 0;
+ } else break;
+ }
else if (tp[1] == am_heir && tp[2] == am_none) {
heir = am_none;
heir_data = am_undefined;
@@ -1286,6 +1382,11 @@ BIF_RETTYPE ets_new_2(BIF_ALIST_2)
BIF_ERROR(BIF_P, BADARG);
}
+#ifdef ERTS_SMP
+ if (frequent_read && !(status & DB_PRIVATE))
+ status |= DB_FREQ_READ;
+#endif
+
/* we create table outside any table lock
* and take the unusal cost of destroy table if it
* fails to find a slot
@@ -1308,7 +1409,8 @@ BIF_RETTYPE ets_new_2(BIF_ALIST_2)
tb->common.type = status & ERTS_ETS_TABLE_TYPES;
/* Note, 'type' is *read only* from now on... */
#endif
- db_init_lock(tb, "db_tab", "db_tab_fix");
+ db_init_lock(tb, status & (DB_FINE_LOCKED|DB_FREQ_READ),
+ "db_tab", "db_tab_fix");
tb->common.keypos = keypos;
tb->common.owner = BIF_P->id;
set_heir(BIF_P, tb, heir, heir_data);
@@ -1351,15 +1453,17 @@ BIF_RETTYPE ets_new_2(BIF_ALIST_2)
tb->common.id = ret;
tb->common.slot = slot; /* store slot for erase */
- meta_main_tab_lock(slot);
+ mmtl = get_meta_main_tab_lock(slot);
+ erts_smp_rwmtx_rwlock(mmtl);
meta_main_tab[slot].u.tb = tb;
ASSERT(IS_SLOT_ALIVE(slot));
- meta_main_tab_unlock(slot);
+ erts_smp_rwmtx_rwunlock(mmtl);
- if (is_named && !insert_named_tab(BIF_ARG_1, tb)) {
- meta_main_tab_lock(slot);
+ if (is_named && !insert_named_tab(BIF_ARG_1, tb, 0)) {
+ mmtl = get_meta_main_tab_lock(slot);
+ erts_smp_rwmtx_rwlock(mmtl);
free_slot(slot);
- meta_main_tab_unlock(slot);
+ erts_smp_rwmtx_rwunlock(mmtl);
db_lock_take_over_ref(tb,LCK_WRITE);
free_heir_data(tb);
@@ -1499,6 +1603,7 @@ BIF_RETTYPE ets_delete_1(BIF_ALIST_1)
{
int trap;
DbTable* tb;
+ erts_smp_rwmtx_t *mmtl;
#ifdef HARDDEBUG
erts_fprintf(stderr,
@@ -1520,13 +1625,23 @@ BIF_RETTYPE ets_delete_1(BIF_ALIST_1)
tb->common.status &= ~(DB_PROTECTED|DB_PUBLIC|DB_PRIVATE);
tb->common.status |= DB_DELETE;
- meta_main_tab_lock(tb->common.slot);
+ mmtl = get_meta_main_tab_lock(tb->common.slot);
+#ifdef ERTS_SMP
+ if (erts_smp_rwmtx_tryrwlock(mmtl) == EBUSY) {
+ /*
+ * We keep our increased refc over this op in order to
+ * prevent the table from disapearing.
+ */
+ erts_smp_rwmtx_rwunlock(&tb->common.rwlock);
+ erts_smp_rwmtx_rwlock(mmtl);
+ erts_smp_rwmtx_rwlock(&tb->common.rwlock);
+ }
+#endif
/* We must keep the slot, to be found by db_proc_dead() if process dies */
MARK_SLOT_DEAD(tb->common.slot);
- meta_main_tab_unlock(tb->common.slot);
- if (is_atom(tb->common.id)) {
- remove_named_tab(tb->common.id);
- }
+ erts_smp_rwmtx_rwunlock(mmtl);
+ if (is_atom(tb->common.id))
+ remove_named_tab(tb, 0);
if (tb->common.owner != BIF_P->id) {
DeclareTmpHeap(meta_tuple,3,BIF_P);
@@ -1919,14 +2034,15 @@ BIF_RETTYPE ets_all_0(BIF_ALIST_0)
previous = NIL;
j = 0;
for(i = 0; (i < t_max_tabs && j < t_tabs_cnt); i++) {
- meta_main_tab_lock(i);
+ erts_smp_rwmtx_t *mmtl = get_meta_main_tab_lock(i);
+ erts_smp_rwmtx_rlock(mmtl);
if (IS_SLOT_ALIVE(i)) {
j++;
tb = meta_main_tab[i].u.tb;
previous = CONS(hp, tb->common.id, previous);
hp += 2;
}
- meta_main_tab_unlock(i);
+ erts_smp_rwmtx_runlock(mmtl);
}
HRelease(BIF_P, hendp, hp);
BIF_RET(previous);
@@ -2630,12 +2746,30 @@ void init_db(void)
size_t size;
#ifdef ERTS_SMP
- for (i=0; i<META_MAIN_TAB_LOCK_CNT; i++) {
- erts_smp_spinlock_init_x(&meta_main_tab_locks[i].lck, "meta_main_tab_slot", make_small(i));
+ erts_smp_rwmtx_opt_t rwmtx_opt = ERTS_SMP_THR_OPTS_DEFAULT_INITER;
+ rwmtx_opt.type = ERTS_SMP_RWMTX_TYPE_FREQUENT_READ;
+ rwmtx_opt.lived = ERTS_SMP_RWMTX_LONG_LIVED;
+
+ meta_main_tab_locks = erts_alloc(ERTS_ALC_T_DB_TABLES,
+ (sizeof(erts_meta_main_tab_lock_t)
+ * (ERTS_META_MAIN_TAB_LOCK_TAB_SIZE+1)));
+
+ if ((((Uint) meta_main_tab_locks) & ERTS_CACHE_LINE_MASK) != 0)
+ meta_main_tab_locks = ((erts_meta_main_tab_lock_t *)
+ ((((Uint) meta_main_tab_locks)
+ & ~ERTS_CACHE_LINE_MASK)
+ + ERTS_CACHE_LINE_SIZE));
+
+ ASSERT((((Uint) meta_main_tab_locks) & ERTS_CACHE_LINE_MASK) == 0);
+
+ for (i = 0; i < ERTS_META_MAIN_TAB_LOCK_TAB_SIZE; i++) {
+ erts_smp_rwmtx_init_opt_x(&meta_main_tab_locks[i].rwmtx, &rwmtx_opt,
+ "meta_main_tab_slot", make_small(i));
}
erts_smp_spinlock_init(&meta_main_tab_main_lock, "meta_main_tab_main");
for (i=0; i<META_NAME_TAB_LOCK_CNT; i++) {
- erts_smp_rwmtx_init_x(&meta_name_tab_rwlocks[i].lck, "meta_name_tab", make_small(i));
+ erts_smp_rwmtx_init_opt_x(&meta_name_tab_rwlocks[i].lck, &rwmtx_opt,
+ "meta_name_tab", make_small(i));
}
#endif
@@ -2895,12 +3029,12 @@ retry:
to_pid, to_locks,
ERTS_P2P_FLG_TRY_LOCK);
if (to_proc == ERTS_PROC_LOCK_BUSY) {
- db_ref(tb); /* while unlocked */
+ db_ref(tb, LCK_NONE); /* while unlocked */
db_unlock(tb,LCK_WRITE);
to_proc = erts_pid2proc(p, ERTS_PROC_LOCK_MAIN,
to_pid, to_locks);
db_lock(tb,LCK_WRITE);
- tb = db_unref(tb);
+ tb = db_unref(tb, LCK_NONE);
ASSERT(tb != NULL);
if (tb->common.owner != p->id) {
@@ -3008,12 +3142,13 @@ erts_db_process_exiting(Process *c_p, ErtsProcLocks c_p_locks)
while (state->slots.ix < state->slots.size) {
DbTable *tb = NULL;
Sint ix = unsigned_val(state->slots.arr[state->slots.ix]);
- meta_main_tab_lock(ix);
+ erts_smp_rwmtx_t *mmtl = get_meta_main_tab_lock(ix);
+ erts_smp_rwmtx_rlock(mmtl);
if (!IS_SLOT_FREE(ix)) {
- tb = db_ref(GET_ANY_SLOT_TAB(ix));
+ tb = db_ref(GET_ANY_SLOT_TAB(ix), LCK_WRITE);
ASSERT(tb);
}
- meta_main_tab_unlock(ix);
+ erts_smp_rwmtx_runlock(mmtl);
if (tb) {
int do_yield;
db_lock_take_over_ref(tb, LCK_WRITE);
@@ -3045,7 +3180,7 @@ erts_db_process_exiting(Process *c_p, ErtsProcLocks c_p_locks)
tb->common.status |= DB_DELETE;
if (is_atom(tb->common.id))
- remove_named_tab(tb->common.id);
+ remove_named_tab(tb, 0);
free_heir_data(tb);
free_fixations_locked(tb);
@@ -3095,12 +3230,13 @@ erts_db_process_exiting(Process *c_p, ErtsProcLocks c_p_locks)
while (state->slots.ix < state->slots.size) {
DbTable *tb = NULL;
Sint ix = unsigned_val(state->slots.arr[state->slots.ix]);
- meta_main_tab_lock(ix);
+ erts_smp_rwmtx_t *mmtl = get_meta_main_tab_lock(ix);
+ erts_smp_rwmtx_rlock(mmtl);
if (IS_SLOT_ALIVE(ix)) {
- tb = db_ref(meta_main_tab[ix].u.tb);
+ tb = db_ref(meta_main_tab[ix].u.tb, LCK_WRITE_REC);
ASSERT(tb);
}
- meta_main_tab_unlock(ix);
+ erts_smp_rwmtx_runlock(mmtl);
if (tb) {
int reds;
DbFixation** pp;
@@ -3274,6 +3410,7 @@ unlocked:
#ifdef ERTS_SMP
if (*kind_p == LCK_READ && tb->common.is_thread_safe) {
/* Must have write lock while purging pseudo-deleted (OTP-8166) */
+ db_ref(tb, LCK_WRITE); /* LCK_WRITE need it, but not LCK_READ */
erts_smp_rwmtx_runlock(&tb->common.rwlock);
erts_smp_rwmtx_rwlock(&tb->common.rwlock);
*kind_p = LCK_WRITE;
@@ -3386,6 +3523,7 @@ static int free_table_cont(Process *p,
int clean_meta_tab)
{
Eterm result;
+ erts_smp_rwmtx_t *mmtl;
#ifdef HARDDEBUG
if (!first) {
@@ -3411,9 +3549,20 @@ static int free_table_cont(Process *p,
tb->common.id);
#endif
/* Completely done - we will not get called again. */
- meta_main_tab_lock(tb->common.slot);
+ mmtl = get_meta_main_tab_lock(tb->common.slot);
+#ifdef ERTS_SMP
+ if (erts_smp_rwmtx_tryrwlock(mmtl) == EBUSY) {
+ /*
+ * We keep our increased refc over this op in order to
+ * prevent the table from disapearing.
+ */
+ erts_smp_rwmtx_rwunlock(&tb->common.rwlock);
+ erts_smp_rwmtx_rwlock(mmtl);
+ erts_smp_rwmtx_rwlock(&tb->common.rwlock);
+ }
+#endif
free_slot(tb->common.slot);
- meta_main_tab_unlock(tb->common.slot);
+ erts_smp_rwmtx_rwunlock(mmtl);
if (clean_meta_tab) {
db_meta_lock(meta_pid_to_tab, LCK_WRITE_REC);
@@ -3421,7 +3570,7 @@ static int free_table_cont(Process *p,
make_small(tb->common.slot));
db_meta_unlock(meta_pid_to_tab, LCK_WRITE_REC);
}
- db_unref(tb);
+ db_unref(tb, LCK_NONE);
BUMP_REDS(p, 100);
return 0;
}
diff --git a/erts/emulator/beam/erl_db_hash.c b/erts/emulator/beam/erl_db_hash.c
index 124129a371..95ffc3afcd 100644
--- a/erts/emulator/beam/erl_db_hash.c
+++ b/erts/emulator/beam/erl_db_hash.c
@@ -623,12 +623,16 @@ int db_create_hash(Process *p, DbTable *tbl)
erts_smp_atomic_init(&tb->is_resizing, 0);
#ifdef ERTS_SMP
if (tb->common.type & DB_FINE_LOCKED) {
+ erts_smp_rwmtx_opt_t rwmtx_opt = ERTS_SMP_THR_OPTS_DEFAULT_INITER;
int i;
+ if (tb->common.type & DB_FREQ_READ)
+ rwmtx_opt.type = ERTS_SMP_RWMTX_TYPE_FREQUENT_READ;
tb->locks = (DbTableHashFineLocks*) erts_db_alloc_fnf(ERTS_ALC_T_DB_SEG, /* Other type maybe? */
(DbTable *) tb,
sizeof(DbTableHashFineLocks));
for (i=0; i<DB_HASH_LOCK_CNT; ++i) {
- erts_rwmtx_init_x(&tb->locks->lck_vec[i].lck, "db_hash_slot", make_small(i));
+ erts_smp_rwmtx_init_opt_x(&tb->locks->lck_vec[i].lck, &rwmtx_opt,
+ "db_hash_slot", make_small(i));
}
/* This important property is needed to guarantee that the buckets
* involved in a grow/shrink operation it protected by the same lock:
diff --git a/erts/emulator/beam/erl_db_util.h b/erts/emulator/beam/erl_db_util.h
index 382e5dceb5..672c5f2cd1 100644
--- a/erts/emulator/beam/erl_db_util.h
+++ b/erts/emulator/beam/erl_db_util.h
@@ -240,8 +240,9 @@ typedef struct db_table_common {
#define DB_DUPLICATE_BAG (1 << 8)
#define DB_ORDERED_SET (1 << 9)
#define DB_DELETE (1 << 10) /* table is being deleted */
+#define DB_FREQ_READ (1 << 11)
-#define ERTS_ETS_TABLE_TYPES (DB_BAG|DB_SET|DB_DUPLICATE_BAG|DB_ORDERED_SET|DB_FINE_LOCKED)
+#define ERTS_ETS_TABLE_TYPES (DB_BAG|DB_SET|DB_DUPLICATE_BAG|DB_ORDERED_SET|DB_FINE_LOCKED|DB_FREQ_READ)
#define IS_HASH_TABLE(Status) (!!((Status) & \
(DB_BAG | DB_SET | DB_DUPLICATE_BAG)))
diff --git a/erts/emulator/beam/erl_drv_thread.c b/erts/emulator/beam/erl_drv_thread.c
index aa37edafd1..d42820ddf3 100644
--- a/erts/emulator/beam/erl_drv_thread.c
+++ b/erts/emulator/beam/erl_drv_thread.c
@@ -186,10 +186,9 @@ int
erl_drv_mutex_trylock(ErlDrvMutex *dmtx)
{
#ifdef USE_THREADS
- int res = dmtx ? ethr_mutex_trylock(&dmtx->mtx) : EINVAL;
- if (res != 0 && res != EBUSY)
- fatal_error(res, "erl_drv_mutex_trylock()");
- return res;
+ if (!dmtx)
+ fatal_error(EINVAL, "erl_drv_mutex_trylock()");
+ return ethr_mutex_trylock(&dmtx->mtx);
#else
return 0;
#endif
@@ -199,9 +198,9 @@ void
erl_drv_mutex_lock(ErlDrvMutex *dmtx)
{
#ifdef USE_THREADS
- int res = dmtx ? ethr_mutex_lock(&dmtx->mtx) : EINVAL;
- if (res != 0)
- fatal_error(res, "erl_drv_mutex_lock()");
+ if (!dmtx)
+ fatal_error(EINVAL, "erl_drv_mutex_lock()");
+ ethr_mutex_lock(&dmtx->mtx);
#endif
}
@@ -209,9 +208,9 @@ void
erl_drv_mutex_unlock(ErlDrvMutex *dmtx)
{
#ifdef USE_THREADS
- int res = dmtx ? ethr_mutex_unlock(&dmtx->mtx) : EINVAL;
- if (res != 0)
- fatal_error(res, "erl_drv_mutex_unlock()");
+ if (!dmtx)
+ fatal_error(EINVAL, "erl_drv_mutex_unlock()");
+ ethr_mutex_unlock(&dmtx->mtx);
#endif
}
@@ -256,9 +255,9 @@ void
erl_drv_cond_signal(ErlDrvCond *dcnd)
{
#ifdef USE_THREADS
- int res = dcnd ? ethr_cond_signal(&dcnd->cnd) : EINVAL;
- if (res != 0)
- fatal_error(res, "erl_drv_cond_signal()");
+ if (!dcnd)
+ fatal_error(EINVAL, "erl_drv_cond_signal()");
+ ethr_cond_signal(&dcnd->cnd);
#endif
}
@@ -266,9 +265,9 @@ void
erl_drv_cond_broadcast(ErlDrvCond *dcnd)
{
#ifdef USE_THREADS
- int res = dcnd ? ethr_cond_broadcast(&dcnd->cnd) : EINVAL;
- if (res != 0)
- fatal_error(res, "erl_drv_cond_broadcast()");
+ if (!dcnd)
+ fatal_error(EINVAL, "erl_drv_cond_broadcast()");
+ ethr_cond_broadcast(&dcnd->cnd);
#endif
}
@@ -277,18 +276,13 @@ void
erl_drv_cond_wait(ErlDrvCond *dcnd, ErlDrvMutex *dmtx)
{
#ifdef USE_THREADS
- int res;
if (!dcnd || !dmtx) {
- res = EINVAL;
- error:
- fatal_error(res, "erl_drv_cond_wait()");
+ fatal_error(EINVAL, "erl_drv_cond_wait()");
}
while (1) {
- res = ethr_cond_wait(&dcnd->cnd, &dmtx->mtx);
+ int res = ethr_cond_wait(&dcnd->cnd, &dmtx->mtx);
if (res == 0)
break;
- if (res != EINTR)
- goto error;
}
#endif
}
@@ -333,10 +327,9 @@ int
erl_drv_rwlock_tryrlock(ErlDrvRWLock *drwlck)
{
#ifdef USE_THREADS
- int res = drwlck ? ethr_rwmutex_tryrlock(&drwlck->rwmtx) : EINVAL;
- if (res != 0 && res != EBUSY)
- fatal_error(res, "erl_drv_rwlock_tryrlock()");
- return res;
+ if (!drwlck)
+ fatal_error(EINVAL, "erl_drv_rwlock_tryrlock()");
+ return ethr_rwmutex_tryrlock(&drwlck->rwmtx);
#else
return 0;
#endif
@@ -346,9 +339,9 @@ void
erl_drv_rwlock_rlock(ErlDrvRWLock *drwlck)
{
#ifdef USE_THREADS
- int res = drwlck ? ethr_rwmutex_rlock(&drwlck->rwmtx) : EINVAL;
- if (res != 0)
- fatal_error(res, "erl_drv_rwlock_rlock()");
+ if (!drwlck)
+ fatal_error(EINVAL, "erl_drv_rwlock_rlock()");
+ ethr_rwmutex_rlock(&drwlck->rwmtx);
#endif
}
@@ -356,9 +349,9 @@ void
erl_drv_rwlock_runlock(ErlDrvRWLock *drwlck)
{
#ifdef USE_THREADS
- int res = drwlck ? ethr_rwmutex_runlock(&drwlck->rwmtx) : EINVAL;
- if (res != 0)
- fatal_error(res, "erl_drv_rwlock_runlock()");
+ if (!drwlck)
+ fatal_error(EINVAL, "erl_drv_rwlock_runlock()");
+ ethr_rwmutex_runlock(&drwlck->rwmtx);
#endif
}
@@ -366,10 +359,9 @@ int
erl_drv_rwlock_tryrwlock(ErlDrvRWLock *drwlck)
{
#ifdef USE_THREADS
- int res = drwlck ? ethr_rwmutex_tryrwlock(&drwlck->rwmtx) : EINVAL;
- if (res != 0 && res != EBUSY)
- fatal_error(res, "erl_drv_rwlock_tryrwlock()");
- return res;
+ if (!drwlck)
+ fatal_error(EINVAL, "erl_drv_rwlock_tryrwlock()");
+ return ethr_rwmutex_tryrwlock(&drwlck->rwmtx);
#else
return 0;
#endif
@@ -379,9 +371,9 @@ void
erl_drv_rwlock_rwlock(ErlDrvRWLock *drwlck)
{
#ifdef USE_THREADS
- int res = drwlck ? ethr_rwmutex_rwlock(&drwlck->rwmtx) : EINVAL;
- if (res != 0)
- fatal_error(res, "erl_drv_rwlock_rwlock()");
+ if (!drwlck)
+ fatal_error(EINVAL, "erl_drv_rwlock_rwlock()");
+ ethr_rwmutex_rwlock(&drwlck->rwmtx);
#endif
}
@@ -389,9 +381,9 @@ void
erl_drv_rwlock_rwunlock(ErlDrvRWLock *drwlck)
{
#ifdef USE_THREADS
- int res = drwlck ? ethr_rwmutex_rwunlock(&drwlck->rwmtx) : EINVAL;
- if (res != 0)
- fatal_error(res, "erl_drv_rwlock_rwunlock()");
+ if (!drwlck)
+ fatal_error(EINVAL, "erl_drv_rwlock_rwunlock()");
+ ethr_rwmutex_rwunlock(&drwlck->rwmtx);
#endif
}
diff --git a/erts/emulator/beam/erl_fun.c b/erts/emulator/beam/erl_fun.c
index 15d9538301..74e3ca048e 100644
--- a/erts/emulator/beam/erl_fun.c
+++ b/erts/emulator/beam/erl_fun.c
@@ -37,8 +37,6 @@ static erts_smp_rwmtx_t erts_fun_table_lock;
#define erts_fun_read_unlock() erts_smp_rwmtx_runlock(&erts_fun_table_lock)
#define erts_fun_write_lock() erts_smp_rwmtx_rwlock(&erts_fun_table_lock)
#define erts_fun_write_unlock() erts_smp_rwmtx_rwunlock(&erts_fun_table_lock)
-#define erts_fun_init_lock() erts_smp_rwmtx_init(&erts_fun_table_lock, \
- "fun_tab")
static HashValue fun_hash(ErlFunEntry* obj);
static int fun_cmp(ErlFunEntry* obj1, ErlFunEntry* obj2);
@@ -57,8 +55,12 @@ void
erts_init_fun_table(void)
{
HashFunctions f;
+ erts_smp_rwmtx_opt_t rwmtx_opt = ERTS_SMP_THR_OPTS_DEFAULT_INITER;
+ rwmtx_opt.type = ERTS_SMP_RWMTX_TYPE_FREQUENT_READ;
+ rwmtx_opt.lived = ERTS_SMP_RWMTX_LONG_LIVED;
+
+ erts_smp_rwmtx_init_opt(&erts_fun_table_lock, &rwmtx_opt, "fun_tab");
- erts_fun_init_lock();
f.hash = (H_FUN) fun_hash;
f.cmp = (HCMP_FUN) fun_cmp;
f.alloc = (HALLOC_FUN) fun_alloc;
diff --git a/erts/emulator/beam/erl_init.c b/erts/emulator/beam/erl_init.c
index 4a4507b212..14bd10b42c 100644
--- a/erts/emulator/beam/erl_init.c
+++ b/erts/emulator/beam/erl_init.c
@@ -78,6 +78,7 @@ static erts_tid_t main_thread;
erts_cpu_info_t *erts_cpuinfo;
+int erts_reader_groups;
int erts_use_sender_punish;
/*
@@ -110,6 +111,7 @@ int erts_compat_rel;
static int use_multi_run_queue;
static int no_schedulers;
static int no_schedulers_online;
+static int max_reader_groups;
#ifdef DEBUG
Uint32 verbose; /* See erl_debug.h for information about verbose */
@@ -505,6 +507,7 @@ void erts_usage(void)
ERTS_MIN_COMPAT_REL, this_rel_num());
erts_fprintf(stderr, "-r force ets memory block to be moved on realloc\n");
+ erts_fprintf(stderr, "-rg amount set reader groups limit\n");
erts_fprintf(stderr, "-sbt type set scheduler bind type, valid types are:\n");
erts_fprintf(stderr, " u|ns|ts|ps|s|nnts|nnps|tnnps|db\n");
erts_fprintf(stderr, "-sct cput set cpu topology,\n");
@@ -538,6 +541,50 @@ void erts_usage(void)
erl_exit(-1, "");
}
+#ifdef USE_THREADS
+/*
+ * allocators for thread lib
+ */
+
+static void *ethr_std_alloc(size_t size)
+{
+ return erts_alloc_fnf(ERTS_ALC_T_ETHR_STD, (Uint) size);
+}
+static void *ethr_std_realloc(void *ptr, size_t size)
+{
+ return erts_realloc_fnf(ERTS_ALC_T_ETHR_STD, ptr, (Uint) size);
+}
+static void ethr_std_free(void *ptr)
+{
+ erts_free(ERTS_ALC_T_ETHR_STD, ptr);
+}
+static void *ethr_sl_alloc(size_t size)
+{
+ return erts_alloc_fnf(ERTS_ALC_T_ETHR_SL, (Uint) size);
+}
+static void *ethr_sl_realloc(void *ptr, size_t size)
+{
+ return erts_realloc_fnf(ERTS_ALC_T_ETHR_SL, ptr, (Uint) size);
+}
+static void ethr_sl_free(void *ptr)
+{
+ erts_free(ERTS_ALC_T_ETHR_SL, ptr);
+}
+static void *ethr_ll_alloc(size_t size)
+{
+ return erts_alloc_fnf(ERTS_ALC_T_ETHR_LL, (Uint) size);
+}
+static void *ethr_ll_realloc(void *ptr, size_t size)
+{
+ return erts_realloc_fnf(ERTS_ALC_T_ETHR_LL, ptr, (Uint) size);
+}
+static void ethr_ll_free(void *ptr)
+{
+ erts_free(ERTS_ALC_T_ETHR_LL, ptr);
+}
+
+#endif
+
static void
early_init(int *argc, char **argv) /*
* Only put things here which are
@@ -615,9 +662,15 @@ early_init(int *argc, char **argv) /*
? ncpuavail
: (ncpuonln > 0 ? ncpuonln : no_schedulers));
+#ifdef ERTS_SMP
+ erts_max_main_threads = no_schedulers_online;
+#endif
+
schdlrs = no_schedulers;
schdlrs_onln = no_schedulers_online;
+ max_reader_groups = ERTS_MAX_READER_GROUPS;
+
if (argc && argv) {
int i = 1;
while (i < *argc) {
@@ -627,6 +680,24 @@ early_init(int *argc, char **argv) /*
}
if (argv[i][0] == '-') {
switch (argv[i][1]) {
+ case 'r': {
+ char *sub_param = argv[i]+2;
+ if (has_prefix("g", sub_param)) {
+ char *arg = get_arg(sub_param+1, argv[i+1], &i);
+ if (sscanf(arg, "%d", &max_reader_groups) != 1) {
+ erts_fprintf(stderr,
+ "bad reader groups limit: %s\n", arg);
+ erts_usage();
+ }
+ if (max_reader_groups < 0) {
+ erts_fprintf(stderr,
+ "bad reader groups limit: %d\n",
+ max_reader_groups);
+ erts_usage();
+ }
+ }
+ break;
+ }
case 'S' : {
int tot, onln;
char *arg = get_arg(argv[i]+2, argv[i+1], &i);
@@ -699,6 +770,36 @@ early_init(int *argc, char **argv) /*
erts_early_init_scheduling(); /* Require allocators */
erts_init_utils(); /* Require allocators */
+#ifdef USE_THREADS
+ {
+ erts_thr_late_init_data_t elid = ERTS_THR_LATE_INIT_DATA_DEF_INITER;
+ elid.mem.std.alloc = ethr_std_alloc;
+ elid.mem.std.realloc = ethr_std_realloc;
+ elid.mem.std.free = ethr_std_free;
+ elid.mem.sl.alloc = ethr_sl_alloc;
+ elid.mem.sl.realloc = ethr_sl_realloc;
+ elid.mem.sl.free = ethr_sl_free;
+ elid.mem.ll.alloc = ethr_ll_alloc;
+ elid.mem.ll.realloc = ethr_ll_realloc;
+ elid.mem.ll.free = ethr_ll_free;
+
+#ifdef ERTS_SMP
+ elid.main_threads = erts_max_main_threads;
+#else
+ elid.main_threads = 1;
+#endif
+ elid.reader_groups = (elid.main_threads > 1
+ ? elid.main_threads
+ : 0);
+ if (max_reader_groups <= 1)
+ elid.reader_groups = 0;
+ if (elid.reader_groups > max_reader_groups)
+ elid.reader_groups = max_reader_groups;
+ erts_reader_groups = elid.reader_groups;
+
+ erts_thr_late_init(&elid);
+ }
+#endif
#ifdef ERTS_ENABLE_LOCK_CHECK
erts_lc_late_init();
#endif
@@ -1193,9 +1294,17 @@ erl_start(int argc, char **argv)
erts_async_thread_suggested_stack_size));
break;
- case 'r':
- erts_ets_realloc_always_moves = 1;
+ case 'r': {
+ char *sub_param = argv[i]+2;
+ if (has_prefix("g", sub_param)) {
+ get_arg(sub_param+1, argv[i+1], &i);
+ /* already handled */
+ }
+ else {
+ erts_ets_realloc_always_moves = 1;
+ }
break;
+ }
case 'n': /* XXX obsolete */
break;
case 'c':
@@ -1280,6 +1389,7 @@ erl_start(int argc, char **argv)
erts_sys_main_thread(); /* May or may not return! */
#else
+ erts_thr_set_main_status(1, 1);
set_main_stack_size();
process_main();
#endif
@@ -1353,7 +1463,7 @@ system_cleanup(int exit_code)
erts_cleanup_incgc();
#endif
-#if defined(USE_THREADS) && !defined(ERTS_SMP)
+#if defined(USE_THREADS)
exit_async();
#endif
#if HAVE_ERTS_MSEG
diff --git a/erts/emulator/beam/erl_lock_check.c b/erts/emulator/beam/erl_lock_check.c
index cee470ae37..99cc80e259 100644
--- a/erts/emulator/beam/erl_lock_check.c
+++ b/erts/emulator/beam/erl_lock_check.c
@@ -96,10 +96,10 @@ static erts_lc_lock_order_t erts_lock_order[] = {
{ "proc_status", "pid" },
{ "proc_tab", NULL },
{ "ports_snapshot", NULL },
- { "db_tab", "address" },
- { "db_tab_fix", "address" },
{ "meta_name_tab", "address" },
{ "meta_main_tab_slot", "address" },
+ { "db_tab", "address" },
+ { "db_tab_fix", "address" },
{ "meta_main_tab_main", NULL },
{ "db_hash_slot", "address" },
{ "node_table", NULL },
@@ -119,9 +119,9 @@ static erts_lc_lock_order_t erts_lock_order[] = {
{ "child_status", NULL },
#endif
#ifdef __WIN32__
- { "sys_driver_data_lock", NULL },
+ { "sys_driver_data_lock", NULL },
#endif
- { "drv_ev_state_grow", NULL, },
+ { "drv_ev_state_grow", NULL, },
{ "drv_ev_state", "address" },
{ "safe_hash", "address" },
{ "pollset_rm_list", NULL },
@@ -153,6 +153,7 @@ static erts_lc_lock_order_t erts_lock_order[] = {
{ "instr", NULL },
{ "fix_alloc", "index" },
{ "alcu_allocator", "index" },
+ { "alcu_delayed_free", "index" },
{ "mseg", NULL },
#ifdef HALFWORD_HEAP
{ "pmmap", NULL },
@@ -177,17 +178,19 @@ static erts_lc_lock_order_t erts_lock_order[] = {
{ "pix_lock", "address" },
{ "run_queues_lists", NULL },
{ "sched_stat", NULL },
+ { "run_queue_sleep_list", "address" },
#endif
{ "alloc_thr_ix_lock", NULL },
#ifdef ERTS_SMP
- { "proc_lck_wtr_alloc", NULL },
+ { "proc_lck_qs_alloc", NULL },
#endif
#ifdef __WIN32__
#ifdef DEBUG
{ "save_ops_lock", NULL },
#endif
#endif
- { "mtrace_buf", NULL }
+ { "mtrace_buf", NULL },
+ { "erts_alloc_hard_debug", NULL }
};
#define ERTS_LOCK_ORDER_SIZE \
@@ -199,6 +202,8 @@ static erts_lc_lock_order_t erts_lock_order[] = {
& ERTS_LC_FLG_LT_ALL \
& ~(ERTS_LC_FLG_LT_SPINLOCK|ERTS_LC_FLG_LT_RWSPINLOCK)))
+static __decl_noreturn void __noreturn lc_abort(void);
+
static char *
lock_type(Uint16 flags)
{
@@ -222,7 +227,7 @@ rw_op_str(Uint16 flags)
return " (r)";
case ERTS_LC_FLG_LO_WRITE:
erts_fprintf(stderr, "\nInternal error\n");
- abort();
+ lc_abort();
default:
break;
}
@@ -271,28 +276,18 @@ static erts_lc_free_block_t *free_blocks;
#define ERTS_LC_FB_CHUNK_SIZE 10
#endif
-#ifdef ETHR_HAVE_NATIVE_LOCKS
static ethr_spinlock_t free_blocks_lock;
-#define ERTS_LC_LOCK ethr_spin_lock
-#define ERTS_LC_UNLOCK ethr_spin_unlock
-#else
-static ethr_mutex free_blocks_lock;
-#define ERTS_LC_LOCK ethr_mutex_lock
-#define ERTS_LC_UNLOCK ethr_mutex_unlock
-#endif
static ERTS_INLINE void
lc_lock(void)
{
- if (ERTS_LC_LOCK(&free_blocks_lock) != 0)
- abort();
+ ethr_spin_lock(&free_blocks_lock);
}
static ERTS_INLINE void
lc_unlock(void)
{
- if (ERTS_LC_UNLOCK(&free_blocks_lock) != 0)
- abort();
+ ethr_spin_unlock(&free_blocks_lock);
}
static ERTS_INLINE void lc_free(void *p)
@@ -313,7 +308,7 @@ static void *lc_core_alloc(void)
{
lc_unlock();
erts_fprintf(stderr, "Lock checker out of memory!\n");
- abort();
+ lc_abort();
}
#else
@@ -327,7 +322,7 @@ static void *lc_core_alloc(void)
* ERTS_LC_FB_CHUNK_SIZE);
if (!fbs) {
erts_fprintf(stderr, "Lock checker failed to allocate memory!\n");
- abort();
+ lc_abort();
}
for (i = 1; i < ERTS_LC_FB_CHUNK_SIZE - 1; i++) {
#ifdef DEBUG
@@ -367,11 +362,11 @@ create_locked_locks(char *thread_name)
{
erts_lc_locked_locks_t *l_lcks = malloc(sizeof(erts_lc_locked_locks_t));
if (!l_lcks)
- abort();
+ lc_abort();
l_lcks->thread_name = strdup(thread_name ? thread_name : "unknown");
if (!l_lcks->thread_name)
- abort();
+ lc_abort();
l_lcks->tid = erts_thr_self();
l_lcks->required.first = NULL;
@@ -513,7 +508,7 @@ uninitialized_lock(void)
{
erts_fprintf(stderr, "Performing operations on uninitialized lock!\n");
print_curr_locks(get_my_locked_locks());
- abort();
+ lc_abort();
}
static void
@@ -523,7 +518,7 @@ lock_twice(char *prefix, erts_lc_locked_locks_t *l_lcks, erts_lc_lock_t *lck,
erts_fprintf(stderr, "%s%s", prefix, rw_op_str(op_flags));
print_lock(" ", lck, " lock which is already locked by thread!\n");
print_curr_locks(l_lcks);
- abort();
+ lc_abort();
}
static void
@@ -533,7 +528,7 @@ unlock_op_mismatch(erts_lc_locked_locks_t *l_lcks, erts_lc_lock_t *lck,
erts_fprintf(stderr, "Unlocking%s ", rw_op_str(op_flags));
print_lock("", lck, " lock which mismatch previous lock operation!\n");
print_curr_locks(l_lcks);
- abort();
+ lc_abort();
}
static void
@@ -541,7 +536,7 @@ unlock_of_not_locked(erts_lc_locked_locks_t *l_lcks, erts_lc_lock_t *lck)
{
print_lock("Unlocking ", lck, " lock which is not locked by thread!\n");
print_curr_locks(l_lcks);
- abort();
+ lc_abort();
}
static void
@@ -550,7 +545,7 @@ lock_order_violation(erts_lc_locked_locks_t *l_lcks, erts_lc_lock_t *lck)
print_lock("Lock order violation occured when locking ", lck, "!\n");
print_curr_locks(l_lcks);
print_lock_order();
- abort();
+ lc_abort();
}
static void
@@ -561,7 +556,7 @@ type_order_violation(char *op, erts_lc_locked_locks_t *l_lcks,
print_lock(op, lck, "!\n");
ASSERT(l_lcks);
print_curr_locks(l_lcks);
- abort();
+ lc_abort();
}
static void
@@ -613,7 +608,7 @@ lock_mismatch(erts_lc_locked_locks_t *l_lcks, int exact,
}
}
print_curr_locks(l_lcks);
- abort();
+ lc_abort();
}
static void
@@ -621,7 +616,7 @@ unlock_of_required_lock(erts_lc_locked_locks_t *l_lcks, erts_lc_lock_t *lck)
{
print_lock("Unlocking required ", lck, " lock!\n");
print_curr_locks(l_lcks);
- abort();
+ lc_abort();
}
static void
@@ -629,7 +624,7 @@ unrequire_of_not_required_lock(erts_lc_locked_locks_t *l_lcks, erts_lc_lock_t *l
{
print_lock("Unrequire on ", lck, " lock not required!\n");
print_curr_locks(l_lcks);
- abort();
+ lc_abort();
}
static void
@@ -637,7 +632,7 @@ require_twice(erts_lc_locked_locks_t *l_lcks, erts_lc_lock_t *lck)
{
print_lock("Require on ", lck, " lock already required!\n");
print_curr_locks(l_lcks);
- abort();
+ lc_abort();
}
static void
@@ -645,7 +640,7 @@ required_not_locked(erts_lc_locked_locks_t *l_lcks, erts_lc_lock_t *lck)
{
print_lock("Required ", lck, " lock not locked!\n");
print_curr_locks(l_lcks);
- abort();
+ lc_abort();
}
@@ -658,13 +653,23 @@ thread_exit_handler(void)
erts_fprintf(stderr,
"Thread exiting while having locked locks!\n");
print_curr_locks(l_lcks);
- abort();
+ lc_abort();
}
destroy_locked_locks(l_lcks);
/* erts_tsd_set(locks_key, NULL); */
}
}
+static __decl_noreturn void
+lc_abort(void)
+{
+#ifdef __WIN32__
+ DebugBreak();
+#else
+ abort();
+#endif
+}
+
void
erts_lc_set_thread_name(char *thread_name)
{
@@ -676,7 +681,7 @@ erts_lc_set_thread_name(char *thread_name)
free((void *) l_lcks->thread_name);
l_lcks->thread_name = strdup(thread_name ? thread_name : "unknown");
if (!l_lcks->thread_name)
- abort();
+ lc_abort();
}
}
@@ -686,7 +691,7 @@ erts_lc_assert_failed(char *file, int line, char *assertion)
erts_fprintf(stderr, "%s:%d: Lock check assertion \"%s\" failed!\n",
file, line, assertion);
print_curr_locks(get_my_locked_locks());
- abort();
+ lc_abort();
return 0;
}
@@ -699,7 +704,7 @@ void erts_lc_fail(char *fmt, ...)
va_end(args);
erts_fprintf(stderr, "\n");
print_curr_locks(get_my_locked_locks());
- abort();
+ lc_abort();
}
@@ -719,7 +724,7 @@ erts_lc_get_lock_order_id(char *name)
"(update erl_lock_check.c)\n",
name);
}
- abort();
+ lc_abort();
return (Sint16) -1;
}
@@ -895,6 +900,25 @@ erts_lc_check_exact(erts_lc_lock_t *have, int have_len)
}
}
+void
+erts_lc_check_no_locked_of_type(Uint16 flags)
+{
+ erts_lc_locked_locks_t *l_lcks = get_my_locked_locks();
+ if (l_lcks) {
+ erts_lc_locked_lock_t *l_lck = l_lcks->locked.first;
+ for (l_lck = l_lcks->locked.first; l_lck; l_lck = l_lck->next) {
+ if (l_lck->flags & flags) {
+ erts_fprintf(stderr,
+ "Locked lock of type %s found which isn't "
+ "allowed here!\n",
+ lock_type(l_lck->flags));
+ print_curr_locks(l_lcks);
+ lc_abort();
+ }
+ }
+ }
+}
+
int
erts_lc_trylock_force_busy_flg(erts_lc_lock_t *lck, Uint16 op_flags)
{
@@ -1283,13 +1307,8 @@ erts_lc_init(void)
free_blocks = NULL;
#endif /* #ifdef ERTS_LC_STATIC_ALLOC */
-#ifdef ETHR_HAVE_NATIVE_LOCKS
if (ethr_spinlock_init(&free_blocks_lock) != 0)
- abort();
-#else
- if (ethr_mutex_init(&free_blocks_lock) != 0)
- abort();
-#endif
+ lc_abort();
erts_tsd_key_create(&locks_key);
}
diff --git a/erts/emulator/beam/erl_lock_check.h b/erts/emulator/beam/erl_lock_check.h
index d5e2ede9ac..0372e6850d 100644
--- a/erts/emulator/beam/erl_lock_check.h
+++ b/erts/emulator/beam/erl_lock_check.h
@@ -77,6 +77,7 @@ void erts_lc_check(erts_lc_lock_t *have, int have_len,
void erts_lc_check_exact(erts_lc_lock_t *have, int have_len);
void erts_lc_have_locks(int *resv, erts_lc_lock_t *lcks, int len);
void erts_lc_have_lock_ids(int *resv, int *ids, int len);
+void erts_lc_check_no_locked_of_type(Uint16 flags);
int erts_lc_trylock_force_busy_flg(erts_lc_lock_t *lck, Uint16 op_flags);
void erts_lc_trylock_flg(int locked, erts_lc_lock_t *lck, Uint16 op_flags);
void erts_lc_lock_flg(erts_lc_lock_t *lck, Uint16 op_flags);
diff --git a/erts/emulator/beam/erl_lock_count.c b/erts/emulator/beam/erl_lock_count.c
index 26028aeefc..239773f366 100644
--- a/erts/emulator/beam/erl_lock_count.c
+++ b/erts/emulator/beam/erl_lock_count.c
@@ -166,8 +166,8 @@ static void print_lock_x(erts_lcnt_lock_t *lock, Uint16 flag, char *action, char
int i;
type = lcnt_lock_type(lock->flag);
- ethr_atomic_read(&lock->r_state, &r_state);
- ethr_atomic_read(&lock->w_state, &w_state);
+ r_state = ethr_atomic_read(&lock->r_state);
+ w_state = ethr_atomic_read(&lock->w_state);
if (lock->flag & flag) {
@@ -394,10 +394,10 @@ void erts_lcnt_lock_opt(erts_lcnt_lock_t *lock, Uint16 option) {
ASSERT(eltd);
- ethr_atomic_read(&lock->w_state, &w_state);
+ w_state = ethr_atomic_read(&lock->w_state);
if (option & ERTS_LCNT_LO_WRITE) {
- ethr_atomic_read(&lock->r_state, &r_state);
+ r_state = ethr_atomic_read(&lock->r_state);
ethr_atomic_inc( &lock->w_state);
}
if (option & ERTS_LCNT_LO_READ) {
@@ -423,7 +423,7 @@ void erts_lcnt_lock(erts_lcnt_lock_t *lock) {
if (erts_lcnt_rt_options & ERTS_LCNT_OPT_SUSPEND) return;
- ethr_atomic_read(&lock->w_state, &w_state);
+ w_state = ethr_atomic_read(&lock->w_state);
ethr_atomic_inc( &lock->w_state);
eltd = lcnt_get_thread_data();
@@ -478,7 +478,7 @@ void erts_lcnt_lock_post_x(erts_lcnt_lock_t *lock, char *file, unsigned int line
#ifdef DEBUG
if (!(lock->flag & (ERTS_LCNT_LT_RWMUTEX | ERTS_LCNT_LT_RWSPINLOCK))) {
- ethr_atomic_read(&lock->flowstate, &flowstate);
+ flowstate = ethr_atomic_read(&lock->flowstate);
ASSERT(flowstate == 0);
ethr_atomic_inc( &lock->flowstate);
}
@@ -522,12 +522,12 @@ void erts_lcnt_unlock(erts_lcnt_lock_t *lock) {
if (erts_lcnt_rt_options & ERTS_LCNT_OPT_SUSPEND) return;
#ifdef DEBUG
/* flowstate */
- ethr_atomic_read(&lock->flowstate, &flowstate);
+ flowstate = ethr_atomic_read(&lock->flowstate);
ASSERT(flowstate == 1);
ethr_atomic_dec( &lock->flowstate);
/* write state */
- ethr_atomic_read(&lock->w_state, &w_state);
+ w_state = ethr_atomic_read(&lock->w_state);
ASSERT(w_state > 0)
#endif
ethr_atomic_dec(&lock->w_state);
@@ -558,7 +558,7 @@ void erts_lcnt_trylock(erts_lcnt_lock_t *lock, int res) {
if (res != EBUSY) {
#ifdef DEBUG
- ethr_atomic_read(&lock->flowstate, &flowstate);
+ flowstate = ethr_atomic_read(&lock->flowstate);
ASSERT(flowstate == 0);
ethr_atomic_inc( &lock->flowstate);
#endif
diff --git a/erts/emulator/beam/erl_mtrace.c b/erts/emulator/beam/erl_mtrace.c
index 9cf55ee319..b1478758a1 100644
--- a/erts/emulator/beam/erl_mtrace.c
+++ b/erts/emulator/beam/erl_mtrace.c
@@ -585,9 +585,7 @@ void erts_mtrace_init(char *receiver, char *nodename)
Uint16 port;
erts_mtx_init(&mtrace_buf_mutex, "mtrace_buf");
- erts_mtx_set_forksafe(&mtrace_buf_mutex);
erts_mtx_init(&mtrace_op_mutex, "mtrace_op");
- erts_mtx_set_forksafe(&mtrace_op_mutex);
socket_desc = erts_sock_open();
if (socket_desc == ERTS_SOCK_INVALID_SOCKET) {
diff --git a/erts/emulator/beam/erl_node_tables.c b/erts/emulator/beam/erl_node_tables.c
index 5865d33138..a3a1d95cd3 100644
--- a/erts/emulator/beam/erl_node_tables.c
+++ b/erts/emulator/beam/erl_node_tables.c
@@ -80,6 +80,8 @@ dist_table_alloc(void *dep_tmpl)
Eterm chnl_nr;
Eterm sysname;
DistEntry *dep;
+ erts_smp_rwmtx_opt_t rwmtx_opt = ERTS_SMP_THR_OPTS_DEFAULT_INITER;
+ rwmtx_opt.type = ERTS_SMP_RWMTX_TYPE_FREQUENT_READ;
if(((DistEntry *) dep_tmpl) == erts_this_dist_entry)
return dep_tmpl;
@@ -92,7 +94,7 @@ dist_table_alloc(void *dep_tmpl)
dep->prev = NULL;
erts_refc_init(&dep->refc, -1);
- erts_smp_rwmtx_init_x(&dep->rwmtx, "dist_entry", chnl_nr);
+ erts_smp_rwmtx_init_opt_x(&dep->rwmtx, &rwmtx_opt, "dist_entry", chnl_nr);
dep->sysname = sysname;
dep->cid = NIL;
dep->connection_id = 0;
@@ -580,6 +582,18 @@ ErlNode *erts_find_or_insert_node(Eterm sysname, Uint creation)
ErlNode ne;
ne.sysname = sysname;
ne.creation = creation;
+
+ erts_smp_rwmtx_rlock(&erts_node_table_rwmtx);
+ res = hash_get(&erts_node_table, (void *) &ne);
+ if (res && res != erts_this_node) {
+ long refc = erts_refc_inctest(&res->refc, 0);
+ if (refc < 2) /* New or pending delete */
+ erts_refc_inc(&res->refc, 1);
+ }
+ erts_smp_rwmtx_runlock(&erts_node_table_rwmtx);
+ if (res)
+ return res;
+
erts_smp_rwmtx_rwlock(&erts_node_table_rwmtx);
res = hash_put(&erts_node_table, (void *) &ne);
ASSERT(res);
@@ -696,8 +710,12 @@ erts_set_this_node(Eterm sysname, Uint creation)
void erts_init_node_tables(void)
{
+ erts_smp_rwmtx_opt_t rwmtx_opt = ERTS_SMP_THR_OPTS_DEFAULT_INITER;
HashFunctions f;
+ rwmtx_opt.type = ERTS_SMP_RWMTX_TYPE_FREQUENT_READ;
+ rwmtx_opt.lived = ERTS_SMP_RWMTX_LONG_LIVED;
+
f.hash = (H_FUN) dist_table_hash;
f.cmp = (HCMP_FUN) dist_table_cmp;
f.alloc = (HALLOC_FUN) dist_table_alloc;
@@ -719,9 +737,10 @@ void erts_init_node_tables(void)
erts_this_dist_entry->prev = NULL;
erts_refc_init(&erts_this_dist_entry->refc, 1); /* erts_this_node */
- erts_smp_rwmtx_init_x(&erts_this_dist_entry->rwmtx,
- "dist_entry",
- make_small(ERST_INTERNAL_CHANNEL_NO));
+ erts_smp_rwmtx_init_opt_x(&erts_this_dist_entry->rwmtx,
+ &rwmtx_opt,
+ "dist_entry",
+ make_small(ERST_INTERNAL_CHANNEL_NO));
erts_this_dist_entry->sysname = am_Noname;
erts_this_dist_entry->cid = NIL;
erts_this_dist_entry->connection_id = 0;
@@ -772,8 +791,8 @@ void erts_init_node_tables(void)
(void) hash_put(&erts_node_table, (void *) erts_this_node);
- erts_smp_rwmtx_init(&erts_node_table_rwmtx, "node_table");
- erts_smp_rwmtx_init(&erts_dist_table_rwmtx, "dist_table");
+ erts_smp_rwmtx_init_opt(&erts_node_table_rwmtx, &rwmtx_opt, "node_table");
+ erts_smp_rwmtx_init_opt(&erts_dist_table_rwmtx, &rwmtx_opt, "dist_table");
references_atoms_need_init = 1;
}
diff --git a/erts/emulator/beam/erl_port_task.c b/erts/emulator/beam/erl_port_task.c
index 967a14f0d1..c10724b951 100644
--- a/erts/emulator/beam/erl_port_task.c
+++ b/erts/emulator/beam/erl_port_task.c
@@ -625,6 +625,7 @@ erts_port_task_schedule(Eterm id,
if (!enq_port) {
ERTS_PT_CHK_PRES_PORTQ(runq, pp);
+ erts_smp_runq_unlock(runq);
}
else {
enqueue_port(runq, pp);
@@ -634,9 +635,10 @@ erts_port_task_schedule(Eterm id,
profile_runnable_port(pp, am_active);
}
+ erts_smp_runq_unlock(runq);
+
erts_smp_notify_inc_runq(runq);
}
- erts_smp_runq_unlock(runq);
return 0;
}
@@ -944,8 +946,8 @@ erts_port_task_execute(ErtsRunQueue *runq, Port **curr_port_pp)
enqueue_port(xrunq, pp);
ASSERT(pp->sched.exe_taskq);
pp->sched.exe_taskq = NULL;
- erts_smp_notify_inc_runq(xrunq);
erts_smp_runq_unlock(xrunq);
+ erts_smp_notify_inc_runq(xrunq);
}
#endif
port_was_enqueued = 1;
@@ -1112,7 +1114,6 @@ erts_port_migrate(Port *prt, int *prt_locked,
dequeue_port(from_rq, prt);
erts_smp_atomic_set(&prt->run_queue, (long) to_rq);
enqueue_port(to_rq, prt);
- erts_smp_notify_inc_runq(to_rq);
return ERTS_MIGRATE_SUCCESS;
}
diff --git a/erts/emulator/beam/erl_process.c b/erts/emulator/beam/erl_process.c
index 41031f5468..abf4c9d96a 100644
--- a/erts/emulator/beam/erl_process.c
+++ b/erts/emulator/beam/erl_process.c
@@ -46,7 +46,13 @@
#define ERTS_PROC_MIN_CONTEXT_SWITCH_REDS_COST (CONTEXT_REDS/10)
-#define ERTS_SCHED_SLEEP_SPINCOUNT 10000
+#define ERTS_SCHED_SPIN_UNTIL_YIELD 100
+
+#define ERTS_SCHED_SYS_SLEEP_SPINCOUNT 10
+#define ERTS_SCHED_TSE_SLEEP_SPINCOUNT_FACT 1000
+#define ERTS_SCHED_TSE_SLEEP_SPINCOUNT \
+ (ERTS_SCHED_SYS_SLEEP_SPINCOUNT*ERTS_SCHED_TSE_SLEEP_SPINCOUNT_FACT)
+#define ERTS_SCHED_SUSPEND_SLEEP_SPINCOUNT 0
#define ERTS_WAKEUP_OTHER_LIMIT (100*CONTEXT_REDS/2)
#define ERTS_WAKEUP_OTHER_DEC 10
@@ -106,6 +112,10 @@ Uint erts_no_schedulers;
Uint erts_max_processes = ERTS_DEFAULT_MAX_PROCESSES;
Uint erts_process_tab_index_mask;
+#ifdef ERTS_SMP
+Uint erts_max_main_threads;
+#endif
+
int erts_sched_thread_suggested_stack_size = -1;
#ifdef ERTS_ENABLE_LOCK_CHECK
@@ -116,16 +126,34 @@ ErtsLcPSDLocks erts_psd_required_locks[ERTS_PSD_SIZE];
int erts_disable_proc_not_running_opt;
-#define ERTS_SCHED_CHANGING_ONLINE 1
-#define ERTS_SCHED_CHANGING_MULTI_SCHED 2
+#define ERTS_SCHDLR_SSPND_CHNG_WAITER (((long) 1) << 0)
+#define ERTS_SCHDLR_SSPND_CHNG_MSB (((long) 1) << 1)
+#define ERTS_SCHDLR_SSPND_CHNG_ONLN (((long) 1) << 2)
+
+#ifndef DEBUG
+
+#define ERTS_SCHDLR_SSPND_CHNG_SET(VAL, OLD_VAL) \
+ erts_smp_atomic_set(&schdlr_sspnd.changing, (VAL))
+
+#else
+
+#define ERTS_SCHDLR_SSPND_CHNG_SET(VAL, OLD_VAL) \
+do { \
+ long old_val__ = erts_smp_atomic_xchg(&schdlr_sspnd.changing, \
+ (VAL)); \
+ ASSERT(old_val__ == (OLD_VAL)); \
+} while (0)
+
+#endif
+
static struct {
erts_smp_mtx_t mtx;
erts_smp_cnd_t cnd;
- int changing;
int online;
int curr_online;
int wait_curr_online;
+ erts_smp_atomic_t changing;
erts_smp_atomic_t active;
struct {
erts_smp_atomic_t ongoing;
@@ -231,6 +259,17 @@ typedef union {
ErtsAlignedSchedulerData *erts_aligned_scheduler_data;
+#ifdef ERTS_SMP
+
+typedef union {
+ ErtsSchedulerSleepInfo ssi;
+ char align[ERTS_ALC_CACHE_LINE_ALIGN_SIZE(sizeof(ErtsSchedulerSleepInfo))];
+} ErtsAlignedSchedulerSleepInfo;
+
+static ErtsAlignedSchedulerSleepInfo *aligned_sched_sleep_info;
+
+#endif
+
#ifndef BM_COUNTERS
static int processes_busy;
#endif
@@ -288,8 +327,15 @@ ERTS_SCHED_PREF_QUICK_ALLOC_IMPL(proclist,
200,
ERTS_ALC_T_PROC_LIST)
-#define ERTS_RUNQ_IX(IX) (&erts_aligned_run_queues[(IX)].runq)
-#define ERTS_SCHEDULER_IX(IX) (&erts_aligned_scheduler_data[(IX)].esd)
+#define ERTS_RUNQ_IX(IX) \
+ (ASSERT_EXPR(0 <= (IX) && (IX) < erts_no_run_queues), \
+ &erts_aligned_run_queues[(IX)].runq)
+#define ERTS_SCHEDULER_IX(IX) \
+ (ASSERT_EXPR(0 <= (IX) && (IX) < erts_no_schedulers), \
+ &erts_aligned_scheduler_data[(IX)].esd)
+#define ERTS_SCHED_SLEEP_INFO_IX(IX) \
+ (ASSERT_EXPR(0 <= (IX) && (IX) < erts_no_schedulers), \
+ &aligned_sched_sleep_info[(IX)].ssi)
#define ERTS_FOREACH_RUNQ(RQVAR, DO) \
do { \
@@ -353,6 +399,11 @@ static void signal_schedulers_bind_change(erts_cpu_topology_t *cpudata, int size
#endif
+static int reader_group_lookup(int logical);
+static void create_tmp_cpu_topology_copy(erts_cpu_topology_t **cpudata,
+ int *cpudata_size);
+static void destroy_tmp_cpu_topology_copy(erts_cpu_topology_t *cpudata);
+
static void early_cpu_bind_init(void);
static void late_cpu_bind_init(void);
@@ -582,6 +633,76 @@ erts_psd_set_init(Process *p, ErtsProcLocks plocks, int ix, void *data)
#ifdef ERTS_SMP
+void
+erts_sched_finish_poke(ErtsSchedulerSleepInfo *ssi, long flags)
+{
+ switch (flags & ERTS_SSI_FLGS_SLEEP_TYPE) {
+ case ERTS_SSI_FLG_POLL_SLEEPING:
+ erts_sys_schedule_interrupt(1);
+ break;
+ case ERTS_SSI_FLG_TSE_SLEEPING:
+ erts_tse_set(ssi->event);
+ break;
+ case 0:
+ break;
+ default:
+ erl_exit(ERTS_ABORT_EXIT, "%s:%d: Internal error\n",
+ __FILE__, __LINE__);
+ break;
+ }
+}
+
+#ifdef ERTS_SMP_SCHEDULERS_NEED_TO_CHECK_CHILDREN
+void
+erts_smp_notify_check_children_needed(void)
+{
+ int i;
+
+ for (i = 0; i < erts_no_schedulers; i++) {
+ long aux_work;
+ ErtsSchedulerSleepInfo *ssi;
+ ssi = ERTS_SCHED_SLEEP_INFO_IX(i);
+ aux_work = erts_smp_atomic_bor(&ssi->aux_work,
+ ERTS_SSI_AUX_WORK_CHECK_CHILDREN);
+ if (!(aux_work & ERTS_SSI_AUX_WORK_CHECK_CHILDREN))
+ erts_sched_poke(ssi);
+ }
+}
+#endif
+
+#ifdef ERTS_SCHED_NEED_BLOCKABLE_AUX_WORK
+static ERTS_INLINE long
+blockable_aux_work(ErtsSchedulerData *esdp,
+ ErtsSchedulerSleepInfo *ssi,
+ long aux_work)
+{
+ if (aux_work & ERTS_SSI_BLOCKABLE_AUX_WORK_MASK) {
+#ifdef ERTS_SMP_SCHEDULERS_NEED_TO_CHECK_CHILDREN
+ if (aux_work & ERTS_SSI_AUX_WORK_CHECK_CHILDREN) {
+ aux_work = erts_smp_atomic_band(&ssi->aux_work,
+ ~ERTS_SSI_AUX_WORK_CHECK_CHILDREN);
+ aux_work &= ~ERTS_SSI_AUX_WORK_CHECK_CHILDREN;
+ erts_check_children();
+ }
+#endif
+ }
+ return aux_work;
+}
+
+#endif
+
+#ifdef ERTS_SCHED_NEED_NONBLOCKABLE_AUX_WORK
+static ERTS_INLINE long
+nonblockable_aux_work(ErtsSchedulerData *esdp,
+ ErtsSchedulerSleepInfo *ssi,
+ long aux_work)
+{
+ if (aux_work & ERTS_SSI_NONBLOCKABLE_AUX_WORK_MASK) {
+
+ }
+}
+#endif
+
static void
prepare_for_block(void *vrq)
{
@@ -634,7 +755,31 @@ erts_active_schedulers(void)
return as;
}
+static ERTS_INLINE int
+prepare_for_sys_schedule(void)
+{
#ifdef ERTS_SMP
+ while (!erts_port_task_have_outstanding_io_tasks()
+ && !erts_smp_atomic_xchg(&doing_sys_schedule, 1)) {
+ if (!erts_port_task_have_outstanding_io_tasks())
+ return 1;
+ erts_smp_atomic_set(&doing_sys_schedule, 0);
+ }
+ return 0;
+#else
+ return !erts_port_task_have_outstanding_io_tasks();
+#endif
+}
+
+#ifdef ERTS_SMP
+
+static ERTS_INLINE void
+sched_change_waiting_sys_to_waiting(Uint no, ErtsRunQueue *rq)
+{
+ ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(rq));
+ ASSERT(rq->waiting < 0);
+ rq->waiting *= -1;
+}
static ERTS_INLINE void
sched_waiting(Uint no, ErtsRunQueue *rq)
@@ -695,101 +840,304 @@ non_empty_runq(ErtsRunQueue *rq)
}
}
-static ERTS_INLINE int
-sched_spin_wake(ErtsRunQueue *rq)
+static long
+sched_prep_spin_wait(ErtsSchedulerSleepInfo *ssi)
{
-#if ERTS_SCHED_SLEEP_SPINCOUNT == 0
- return 0;
-#else
- long val;
- ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(rq));
+ long oflgs;
+ long nflgs = (ERTS_SSI_FLG_SLEEPING
+ | ERTS_SSI_FLG_WAITING);
+ long xflgs = 0;
- val = erts_smp_atomic_read(&rq->spin_waiter);
- ASSERT(val >= 0);
- if (val != 0) {
- erts_smp_atomic_inc(&rq->spin_wake);
- return 1;
- }
- return 0;
-#endif
+ do {
+ oflgs = erts_smp_atomic_cmpxchg(&ssi->flags, nflgs, xflgs);
+ if (oflgs == xflgs)
+ return nflgs;
+ xflgs = oflgs;
+ } while (!(oflgs & ERTS_SSI_FLG_SUSPENDED));
+ return oflgs;
}
-static ERTS_INLINE int
-sched_spin_wake_all(ErtsRunQueue *rq)
+static long
+sched_prep_cont_spin_wait(ErtsSchedulerSleepInfo *ssi)
{
-#if ERTS_SCHED_SLEEP_SPINCOUNT == 0
- return 0;
-#else
- long val;
- ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(rq));
+ long oflgs;
+ long nflgs = (ERTS_SSI_FLG_SLEEPING
+ | ERTS_SSI_FLG_WAITING);
+ long xflgs = ERTS_SSI_FLG_WAITING;
- val = erts_smp_atomic_read(&rq->spin_waiter);
- ASSERT(val >= 0);
- if (val != 0)
- erts_smp_atomic_add(&rq->spin_wake, val);
- return val;
-#endif
+ do {
+ oflgs = erts_smp_atomic_cmpxchg(&ssi->flags, nflgs, xflgs);
+ if (oflgs == xflgs)
+ return nflgs;
+ xflgs = oflgs;
+ nflgs |= oflgs & ERTS_SSI_FLG_SUSPENDED;
+ } while (oflgs & ERTS_SSI_FLG_WAITING);
+ return oflgs;
+}
+
+static long
+sched_spin_wait(ErtsSchedulerSleepInfo *ssi, int spincount)
+{
+ long until_yield = ERTS_SCHED_SPIN_UNTIL_YIELD;
+ int sc = spincount;
+ long flgs;
+
+ do {
+ flgs = erts_smp_atomic_read(&ssi->flags);
+ if ((flgs & (ERTS_SSI_FLG_SLEEPING|ERTS_SSI_FLG_WAITING))
+ != (ERTS_SSI_FLG_SLEEPING|ERTS_SSI_FLG_WAITING)) {
+ break;
+ }
+ ERTS_SPIN_BODY;
+ if (--until_yield == 0) {
+ until_yield = ERTS_SCHED_SPIN_UNTIL_YIELD;
+ erts_thr_yield();
+ }
+ } while (--sc > 0);
+ return flgs;
}
+static long
+sched_set_sleeptype(ErtsSchedulerSleepInfo *ssi, long sleep_type)
+{
+ long oflgs;
+ long nflgs = ERTS_SSI_FLG_SLEEPING|ERTS_SSI_FLG_WAITING|sleep_type;
+ long xflgs = ERTS_SSI_FLG_SLEEPING|ERTS_SSI_FLG_WAITING;
+
+ if (sleep_type == ERTS_SSI_FLG_TSE_SLEEPING)
+ erts_tse_reset(ssi->event);
+
+ while (1) {
+ oflgs = erts_smp_atomic_cmpxchg(&ssi->flags, nflgs, xflgs);
+ if (oflgs == xflgs)
+ return nflgs;
+ if ((oflgs & (ERTS_SSI_FLG_SLEEPING|ERTS_SSI_FLG_WAITING))
+ != (ERTS_SSI_FLG_SLEEPING|ERTS_SSI_FLG_WAITING)) {
+ return oflgs;
+ }
+ xflgs = oflgs;
+ nflgs |= oflgs & ERTS_SSI_FLG_SUSPENDED;
+ }
+}
+
+#define ERTS_SCHED_WAIT_WOKEN(FLGS) \
+ (((FLGS) & (ERTS_SSI_FLG_WAITING|ERTS_SSI_FLG_SUSPENDED)) \
+ != ERTS_SSI_FLG_WAITING)
+
static void
-sched_sys_wait(Uint no, ErtsRunQueue *rq)
+scheduler_wait(long *fcalls, ErtsSchedulerData *esdp, ErtsRunQueue *rq)
{
- long dt;
-#if ERTS_SCHED_SLEEP_SPINCOUNT != 0
- int val;
- int spincount = ERTS_SCHED_SLEEP_SPINCOUNT;
+ ErtsSchedulerSleepInfo *ssi = esdp->ssi;
+ int spincount;
+ long flgs;
+#if defined(ERTS_SCHED_NEED_NONBLOCKABLE_AUX_WORK) \
+ || defined(ERTS_SCHED_NEED_BLOCKABLE_AUX_WORK)
+ long aux_work;
+#endif
+
ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(rq));
+ erts_smp_spin_lock(&rq->sleepers.lock);
+ flgs = sched_prep_spin_wait(ssi);
+ if (flgs & ERTS_SSI_FLG_SUSPENDED) {
+ /* Go suspend instead... */
+ erts_smp_spin_unlock(&rq->sleepers.lock);
+ return;
+ }
+
+ ssi->prev = NULL;
+ ssi->next = rq->sleepers.list;
+ if (rq->sleepers.list)
+ rq->sleepers.list->prev = ssi;
+ rq->sleepers.list = ssi;
+ erts_smp_spin_unlock(&rq->sleepers.lock);
+
+ /*
+ * If all schedulers are waiting, one of them *should*
+ * be waiting in erl_sys_schedule()
+ */
+
+ if (!prepare_for_sys_schedule()) {
+
+ sched_waiting(esdp->no, rq);
+
+ erts_smp_runq_unlock(rq);
+
+ spincount = ERTS_SCHED_TSE_SLEEP_SPINCOUNT;
+
+ tse_wait:
+
+#ifdef ERTS_SCHED_NEED_BLOCKABLE_AUX_WORK
+ aux_work = erts_smp_atomic_read(&ssi->aux_work);
+ tse_blockable_aux_work:
+ aux_work = blockable_aux_work(esdp, ssi, aux_work);
#endif
+ erts_smp_activity_begin(ERTS_ACTIVITY_WAIT, NULL, NULL, NULL);
- sched_waiting_sys(no, rq);
+ while (1) {
-#if ERTS_SCHED_SLEEP_SPINCOUNT != 0
- erts_smp_atomic_inc(&rq->spin_waiter);
- erts_smp_runq_unlock(rq);
+#ifdef ERTS_SCHED_NEED_NONBLOCKABLE_AUX_WORK
+#ifndef ERTS_SCHED_NEED_BLOCKABLE_AUX_WORK
+ aux_work = erts_smp_atomic_read(&ssi->aux_work);
+#endif
+ nonblockable_aux_work(esdp, ssi, aux_work);
+#endif
- erl_sys_schedule(1); /* Might give us something to do */
+ flgs = sched_spin_wait(ssi, spincount);
+ if (flgs & ERTS_SSI_FLG_SLEEPING) {
+ ASSERT(flgs & ERTS_SSI_FLG_WAITING);
+ flgs = sched_set_sleeptype(ssi, ERTS_SSI_FLG_TSE_SLEEPING);
+ if (flgs & ERTS_SSI_FLG_SLEEPING) {
+ int res;
+ ASSERT(flgs & ERTS_SSI_FLG_TSE_SLEEPING);
+ ASSERT(flgs & ERTS_SSI_FLG_WAITING);
+ do {
+ res = erts_tse_wait(ssi->event);
+ } while (res == EINTR);
+ }
+ }
- dt = do_time_read_and_reset();
- if (dt) bump_timer(dt);
+ if (!(flgs & ERTS_SSI_FLG_WAITING)) {
+ ASSERT(!(flgs & ERTS_SSI_FLG_SLEEPING));
+ break;
+ }
+
+ flgs = sched_prep_cont_spin_wait(ssi);
+ spincount = ERTS_SCHED_TSE_SLEEP_SPINCOUNT;
+
+ if (!(flgs & ERTS_SSI_FLG_WAITING)) {
+ ASSERT(!(flgs & ERTS_SSI_FLG_SLEEPING));
+ break;
+ }
+
+#ifdef ERTS_SCHED_NEED_BLOCKABLE_AUX_WORK
+ aux_work = erts_smp_atomic_read(&ssi->aux_work);
+ if (aux_work & ERTS_SSI_BLOCKABLE_AUX_WORK_MASK) {
+ erts_smp_activity_end(ERTS_ACTIVITY_WAIT, NULL, NULL, NULL);
+ goto tse_blockable_aux_work;
+ }
+#endif
- while (spincount-- > 0) {
- val = erts_smp_atomic_read(&rq->spin_wake);
- ASSERT(val >= 0);
- if (val != 0) {
- erts_smp_runq_lock(rq);
- val = erts_smp_atomic_read(&rq->spin_wake);
- ASSERT(val >= 0);
- if (val != 0)
- goto woken;
- if (spincount == 0)
- goto sleep;
- erts_smp_runq_unlock(rq);
}
- }
- erts_smp_runq_lock(rq);
- val = erts_smp_atomic_read(&rq->spin_wake);
- ASSERT(val >= 0);
- if (val != 0) {
- woken:
- erts_smp_atomic_dec(&rq->spin_wake);
- ASSERT(erts_smp_atomic_read(&rq->spin_wake) >= 0);
- erts_smp_atomic_dec(&rq->spin_waiter);
- ASSERT(erts_smp_atomic_read(&rq->spin_waiter) >= 0);
+ erts_smp_activity_end(ERTS_ACTIVITY_WAIT, NULL, NULL, NULL);
+
+ if (flgs & ~ERTS_SSI_FLG_SUSPENDED)
+ erts_smp_atomic_band(&ssi->flags, ERTS_SSI_FLG_SUSPENDED);
+
+ erts_smp_runq_lock(rq);
+ sched_active(esdp->no, rq);
+
}
else {
- sleep:
- erts_smp_atomic_dec(&rq->spin_waiter);
- ASSERT(erts_smp_atomic_read(&rq->spin_waiter) >= 0);
+ long dt;
+
+ erts_smp_atomic_set(&function_calls, 0);
+ *fcalls = 0;
+
+ sched_waiting_sys(esdp->no, rq);
+
+ erts_smp_runq_unlock(rq);
+
+ spincount = ERTS_SCHED_SYS_SLEEP_SPINCOUNT;
+
+ while (spincount-- > 0) {
+
+ sys_poll_aux_work:
+
+ erl_sys_schedule(1); /* Might give us something to do */
+
+ dt = do_time_read_and_reset();
+ if (dt) bump_timer(dt);
+
+ sys_aux_work:
+
+#ifdef ERTS_SCHED_NEED_BLOCKABLE_AUX_WORK
+ aux_work = erts_smp_atomic_read(&ssi->aux_work);
+ aux_work = blockable_aux_work(esdp, ssi, aux_work);
+#endif
+#ifdef ERTS_SCHED_NEED_NONBLOCKABLE_AUX_WORK
+#ifndef ERTS_SCHED_NEED_BLOCKABLE_AUX_WORK
+ aux_work = erts_smp_atomic_read(&ssi->aux_work);
+#endif
+ nonblockable_aux_work(esdp, ssi, aux_work);
+#endif
+
+ flgs = erts_smp_atomic_read(&ssi->flags);
+ if (!(flgs & ERTS_SSI_FLG_WAITING)) {
+ ASSERT(!(flgs & ERTS_SSI_FLG_SLEEPING));
+ goto sys_woken;
+ }
+ if (!(flgs & ERTS_SSI_FLG_SLEEPING)) {
+ flgs = sched_prep_cont_spin_wait(ssi);
+ if (!(flgs & ERTS_SSI_FLG_WAITING)) {
+ ASSERT(!(flgs & ERTS_SSI_FLG_SLEEPING));
+ goto sys_woken;
+ }
+ }
+
+ /*
+ * If we got new I/O tasks we aren't allowed to
+ * call erl_sys_schedule() until it is handled.
+ */
+ if (erts_port_task_have_outstanding_io_tasks()) {
+ erts_smp_atomic_set(&doing_sys_schedule, 0);
+ /*
+ * Got to check that we still got I/O tasks; otherwise
+ * we have to continue checking for I/O...
+ */
+ if (!prepare_for_sys_schedule()) {
+ spincount *= ERTS_SCHED_TSE_SLEEP_SPINCOUNT_FACT;
+ goto tse_wait;
+ }
+ }
+ }
+
+ erts_smp_runq_lock(rq);
+
/*
* If we got new I/O tasks we aren't allowed to
* sleep in erl_sys_schedule().
*/
- if (!erts_port_task_have_outstanding_io_tasks()) {
-#endif
+ if (erts_port_task_have_outstanding_io_tasks()) {
+ erts_smp_atomic_set(&doing_sys_schedule, 0);
+ /*
+ * Got to check that we still got I/O tasks; otherwise
+ * we have to wait in erl_sys_schedule() after all...
+ */
+ if (prepare_for_sys_schedule())
+ goto do_sys_schedule;
+
+ /*
+ * Not allowed to wait in erl_sys_schedule;
+ * do tse wait instead...
+ */
+ sched_change_waiting_sys_to_waiting(esdp->no, rq);
+ erts_smp_runq_unlock(rq);
+ spincount = 0;
+ goto tse_wait;
+ }
+ else {
+ do_sys_schedule:
erts_sys_schedule_interrupt(0);
+ flgs = sched_set_sleeptype(ssi, ERTS_SSI_FLG_POLL_SLEEPING);
+ if (!(flgs & ERTS_SSI_FLG_SLEEPING)) {
+ if (!(flgs & ERTS_SSI_FLG_WAITING))
+ goto sys_locked_woken;
+ erts_smp_runq_unlock(rq);
+ flgs = sched_prep_cont_spin_wait(ssi);
+ if (!(flgs & ERTS_SSI_FLG_WAITING)) {
+ ASSERT(!(flgs & ERTS_SSI_FLG_SLEEPING));
+ goto sys_woken;
+ }
+ ASSERT(!erts_port_task_have_outstanding_io_tasks());
+ goto sys_poll_aux_work;
+ }
+
+ ASSERT(flgs & ERTS_SSI_FLG_POLL_SLEEPING);
+ ASSERT(flgs & ERTS_SSI_FLG_WAITING);
+
erts_smp_runq_unlock(rq);
erl_sys_schedule(0);
@@ -797,134 +1145,103 @@ sched_sys_wait(Uint no, ErtsRunQueue *rq)
dt = do_time_read_and_reset();
if (dt) bump_timer(dt);
- erts_smp_runq_lock(rq);
+ flgs = sched_prep_cont_spin_wait(ssi);
+ if (flgs & ERTS_SSI_FLG_WAITING)
+ goto sys_aux_work;
-#if ERTS_SCHED_SLEEP_SPINCOUNT != 0
+ sys_woken:
+ erts_smp_runq_lock(rq);
+ sys_locked_woken:
+ erts_smp_atomic_set(&doing_sys_schedule, 0);
+ if (flgs & ~ERTS_SSI_FLG_SUSPENDED)
+ erts_smp_atomic_band(&ssi->flags, ERTS_SSI_FLG_SUSPENDED);
+ sched_active_sys(esdp->no, rq);
}
}
-#endif
- sched_active_sys(no, rq);
-}
-
-static void
-sched_cnd_wait(Uint no, ErtsRunQueue *rq)
-{
-#if ERTS_SCHED_SLEEP_SPINCOUNT != 0
- int val;
- int spincount = ERTS_SCHED_SLEEP_SPINCOUNT;
ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(rq));
-#endif
-
- sched_waiting(no, rq);
- erts_smp_activity_begin(ERTS_ACTIVITY_WAIT,
- prepare_for_block,
- resume_after_block,
- (void *) rq);
-
-#if ERTS_SCHED_SLEEP_SPINCOUNT == 0
- erts_smp_cnd_wait(&rq->cnd, &rq->mtx);
-#else
- erts_smp_atomic_inc(&rq->spin_waiter);
- erts_smp_mtx_unlock(&rq->mtx);
-
- while (spincount-- > 0) {
- val = erts_smp_atomic_read(&rq->spin_wake);
- ASSERT(val >= 0);
- if (val != 0) {
- erts_smp_mtx_lock(&rq->mtx);
- val = erts_smp_atomic_read(&rq->spin_wake);
- ASSERT(val >= 0);
- if (val != 0)
- goto woken;
- if (spincount == 0)
- goto sleep;
- erts_smp_mtx_unlock(&rq->mtx);
- }
- }
-
- erts_smp_mtx_lock(&rq->mtx);
- val = erts_smp_atomic_read(&rq->spin_wake);
- ASSERT(val >= 0);
- if (val == 0) {
- sleep:
- erts_smp_atomic_dec(&rq->spin_waiter);
- ASSERT(erts_smp_atomic_read(&rq->spin_waiter) >= 0);
- erts_smp_cnd_wait(&rq->cnd, &rq->mtx);
- }
- else {
- woken:
- erts_smp_atomic_dec(&rq->spin_wake);
- ASSERT(erts_smp_atomic_read(&rq->spin_wake) >= 0);
- erts_smp_atomic_dec(&rq->spin_waiter);
- ASSERT(erts_smp_atomic_read(&rq->spin_waiter) >= 0);
- }
-#endif
-
- erts_smp_activity_end(ERTS_ACTIVITY_WAIT,
- prepare_for_block,
- resume_after_block,
- (void *) rq);
-
- sched_active(no, rq);
}
-static void
-wake_one_scheduler(void)
-{
- ASSERT(erts_common_run_queue);
- ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(erts_common_run_queue));
- if (erts_common_run_queue->waiting) {
- if (!sched_spin_wake(erts_common_run_queue)) {
- if (erts_common_run_queue->waiting == -1) /* One scheduler waiting
- and doing so in
- sys_schedule */
- erts_sys_schedule_interrupt(1);
- else
- erts_smp_cnd_signal(&erts_common_run_queue->cnd);
- }
+static ERTS_INLINE long
+ssi_flags_set_wake(ErtsSchedulerSleepInfo *ssi)
+{
+ /* reset all flags but suspended */
+ long oflgs;
+ long nflgs = 0;
+ long xflgs = ERTS_SSI_FLG_SLEEPING|ERTS_SSI_FLG_WAITING;
+ while (1) {
+ oflgs = erts_smp_atomic_cmpxchg(&ssi->flags, nflgs, xflgs);
+ if (oflgs == xflgs)
+ return oflgs;
+ nflgs = oflgs & ERTS_SSI_FLG_SUSPENDED;
+ xflgs = oflgs;
}
}
static void
-wake_scheduler(ErtsRunQueue *rq, int incq)
+wake_scheduler(ErtsRunQueue *rq, int incq, int one)
{
- ASSERT(!erts_common_run_queue);
- ASSERT(-1 <= rq->waiting && rq->waiting <= 1);
- ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(rq));
- if (rq->waiting && !rq->woken) {
- if (!sched_spin_wake(rq)) {
- if (rq->waiting < 0)
- erts_sys_schedule_interrupt(1);
- else
- erts_smp_cnd_signal(&rq->cnd);
+ int res;
+ ErtsSchedulerSleepInfo *ssi;
+ ErtsSchedulerSleepList *sl;
+
+ /*
+ * The unlocked run queue is not strictly necessary
+ * from a thread safety or deadlock prevention
+ * perspective. It will, however, cost us performance
+ * if it is locked during wakup of another scheduler,
+ * so all code *should* handle this without having
+ * the lock on the run queue.
+ */
+ ERTS_SMP_LC_ASSERT(!erts_smp_lc_runq_is_locked(rq));
+
+ sl = &rq->sleepers;
+
+ erts_smp_spin_lock(&sl->lock);
+ ssi = sl->list;
+ if (!ssi)
+ erts_smp_spin_unlock(&sl->lock);
+ else if (one) {
+ long flgs;
+ if (ssi->prev)
+ ssi->prev->next = ssi->next;
+ else {
+ ASSERT(sl->list == ssi);
+ sl->list = ssi->next;
}
- rq->woken = 1;
- if (incq)
+ if (ssi->next)
+ ssi->next->prev = ssi->prev;
+
+ res = sl->list != NULL;
+ erts_smp_spin_unlock(&sl->lock);
+
+ flgs = ssi_flags_set_wake(ssi);
+ erts_sched_finish_poke(ssi, flgs);
+
+ if (incq && !erts_common_run_queue && (flgs & ERTS_SSI_FLG_WAITING))
non_empty_runq(rq);
}
+ else {
+ sl->list = NULL;
+ erts_smp_spin_unlock(&sl->lock);
+ do {
+ ErtsSchedulerSleepInfo *wake_ssi = ssi;
+ ssi = ssi->next;
+ erts_sched_finish_poke(ssi, ssi_flags_set_wake(wake_ssi));
+ } while (ssi);
+ }
}
static void
wake_all_schedulers(void)
{
- if (erts_common_run_queue) {
- erts_smp_runq_lock(erts_common_run_queue);
- if (erts_common_run_queue->waiting) {
- if (erts_common_run_queue->waiting < 0)
- erts_sys_schedule_interrupt(1);
- sched_spin_wake_all(erts_common_run_queue);
- erts_smp_cnd_broadcast(&erts_common_run_queue->cnd);
- }
- erts_smp_runq_unlock(erts_common_run_queue);
- }
+ if (erts_common_run_queue)
+ wake_scheduler(erts_common_run_queue, 0, 0);
else {
int ix;
for (ix = 0; ix < erts_no_run_queues; ix++) {
ErtsRunQueue *rq = ERTS_RUNQ_IX(ix);
- erts_smp_runq_lock(rq);
- wake_scheduler(rq, 0);
- erts_smp_runq_unlock(rq);
+ wake_scheduler(rq, 0, 1);
}
}
}
@@ -939,14 +1256,14 @@ chk_wake_sched(ErtsRunQueue *crq, int ix, int activate)
wrq = ERTS_RUNQ_IX(ix);
iflgs = erts_smp_atomic_read(&wrq->info_flags);
if (!(iflgs & (ERTS_RUNQ_IFLG_SUSPENDED|ERTS_RUNQ_IFLG_NONEMPTY))) {
- erts_smp_xrunq_lock(crq, wrq);
if (activate) {
if (ix == erts_smp_atomic_cmpxchg(&balance_info.active_runqs, ix+1, ix)) {
+ erts_smp_xrunq_lock(crq, wrq);
wrq->flags &= ~ERTS_RUNQ_FLG_INACTIVE;
+ erts_smp_xrunq_unlock(crq, wrq);
}
}
- wake_scheduler(wrq, 0);
- erts_smp_xrunq_unlock(crq, wrq);
+ wake_scheduler(wrq, 0, 1);
return 1;
}
return 0;
@@ -992,15 +1309,13 @@ static ERTS_INLINE void
smp_notify_inc_runq(ErtsRunQueue *runq)
{
#ifdef ERTS_SMP
- if (erts_common_run_queue)
- wake_one_scheduler();
- else
- wake_scheduler(runq, 1);
+ if (runq)
+ wake_scheduler(runq, 1, 1);
#endif
}
void
-erts_smp_notify_inc_runq__(ErtsRunQueue *runq)
+erts_smp_notify_inc_runq(ErtsRunQueue *runq)
{
smp_notify_inc_runq(runq);
}
@@ -1146,20 +1461,23 @@ static void
evacuate_run_queue(ErtsRunQueue *evac_rq, ErtsRunQueue *rq)
{
Port *prt;
+ int notify_to_rq = 0;
int prio;
int prt_locked = 0;
int rq_locked = 0;
int evac_rq_locked = 1;
+ ErtsMigrateResult mres;
erts_smp_runq_lock(evac_rq);
+ erts_smp_atomic_bor(&evac_rq->scheduler->ssi->flags, ERTS_SSI_FLG_SUSPENDED);
+
evac_rq->flags &= ~ERTS_RUNQ_FLGS_IMMIGRATE_QMASK;
evac_rq->flags |= (ERTS_RUNQ_FLGS_EMIGRATE_QMASK
| ERTS_RUNQ_FLGS_EVACUATE_QMASK
| ERTS_RUNQ_FLG_SUSPENDED);
erts_smp_atomic_bor(&evac_rq->info_flags, ERTS_RUNQ_IFLG_SUSPENDED);
-
/*
* Need to set up evacuation paths first since we
* may release the run queue lock on evac_rq
@@ -1187,9 +1505,11 @@ evacuate_run_queue(ErtsRunQueue *evac_rq, ErtsRunQueue *rq)
/* Evacuate scheduled ports */
prt = evac_rq->ports.start;
while (prt) {
- (void) erts_port_migrate(prt, &prt_locked,
+ mres = erts_port_migrate(prt, &prt_locked,
evac_rq, &evac_rq_locked,
rq, &rq_locked);
+ if (mres == ERTS_MIGRATE_SUCCESS)
+ notify_to_rq = 1;
if (prt_locked)
erts_smp_port_unlock(prt);
if (!evac_rq_locked) {
@@ -1218,9 +1538,11 @@ evacuate_run_queue(ErtsRunQueue *evac_rq, ErtsRunQueue *rq)
goto end_of_proc;
}
- (void) erts_proc_migrate(proc, &proc_locks,
+ mres = erts_proc_migrate(proc, &proc_locks,
evac_rq, &evac_rq_locked,
rq, &rq_locked);
+ if (mres == ERTS_MIGRATE_SUCCESS)
+ notify_to_rq = 1;
if (proc_locks)
erts_smp_proc_unlock(proc, proc_locks);
if (!evac_rq_locked) {
@@ -1252,10 +1574,13 @@ evacuate_run_queue(ErtsRunQueue *evac_rq, ErtsRunQueue *rq)
if (rq_locked)
erts_smp_runq_unlock(rq);
- if (!evac_rq_locked)
- erts_smp_runq_lock(evac_rq);
- wake_scheduler(evac_rq, 0);
- erts_smp_runq_unlock(evac_rq);
+ if (evac_rq_locked)
+ erts_smp_runq_unlock(evac_rq);
+
+ if (notify_to_rq)
+ smp_notify_inc_runq(rq);
+
+ wake_scheduler(evac_rq, 0, 1);
}
static int
@@ -1483,31 +1808,6 @@ try_steal_task(ErtsRunQueue *rq)
return res;
}
-#ifdef ERTS_SMP_SCHEDULERS_NEED_TO_CHECK_CHILDREN
-void
-erts_smp_notify_check_children_needed(void)
-{
- int i;
- for (i = 0; i < erts_no_schedulers; i++) {
- erts_smp_runq_lock(ERTS_SCHEDULER_IX(i)->run_queue);
- ERTS_SCHEDULER_IX(i)->check_children = 1;
- if (!erts_common_run_queue)
- wake_scheduler(ERTS_SCHEDULER_IX(i)->run_queue, 0);
- erts_smp_runq_unlock(ERTS_SCHEDULER_IX(i)->run_queue);
- }
- if (ongoing_multi_scheduling_block()) {
- /* Also blocked schedulers need to check children */
- erts_smp_mtx_lock(&schdlr_sspnd.mtx);
- for (i = 0; i < erts_no_schedulers; i++)
- ERTS_SCHEDULER_IX(i)->blocked_check_children = 1;
- erts_smp_cnd_broadcast(&schdlr_sspnd.cnd);
- erts_smp_mtx_unlock(&schdlr_sspnd.mtx);
- }
- if (erts_common_run_queue)
- wake_all_schedulers();
-}
-#endif
-
/* Run queue balancing */
typedef struct {
@@ -2100,6 +2400,8 @@ erts_init_scheduling(int mrq, int no_schedulers, int no_schedulers_online)
erts_smp_atomic_init(&no_empty_run_queues, 0);
#endif
+ erts_no_run_queues = n;
+
for (ix = 0; ix < n; ix++) {
int pix, rix;
ErtsRunQueue *rq = ERTS_RUNQ_IX(ix);
@@ -2114,8 +2416,10 @@ erts_init_scheduling(int mrq, int no_schedulers, int no_schedulers_online)
erts_smp_mtx_init_x(&rq->mtx, "run_queue", make_small(ix + 1));
erts_smp_cnd_init(&rq->cnd);
- erts_smp_atomic_init(&rq->spin_waiter, 0);
- erts_smp_atomic_init(&rq->spin_wake, 0);
+#ifdef ERTS_SMP
+ erts_smp_spinlock_init(&rq->sleepers.lock, "run_queue_sleep_list");
+ rq->sleepers.list = NULL;
+#endif
rq->waiting = 0;
rq->woken = 0;
@@ -2166,7 +2470,6 @@ erts_init_scheduling(int mrq, int no_schedulers, int no_schedulers_online)
}
erts_common_run_queue = !mrq ? ERTS_RUNQ_IX(0) : NULL;
- erts_no_run_queues = n;
#ifdef ERTS_SMP
@@ -2181,9 +2484,34 @@ erts_init_scheduling(int mrq, int no_schedulers, int no_schedulers_online)
#endif
+ n = (int) no_schedulers;
+ erts_no_schedulers = n;
+
+#ifdef ERTS_SMP
+ /* Create and initialize scheduler sleep info */
+
+ aligned_sched_sleep_info = erts_alloc(ERTS_ALC_T_SCHDLR_SLP_INFO,
+ (sizeof(ErtsAlignedSchedulerSleepInfo)
+ *(n+1)));
+ if ((((Uint) aligned_sched_sleep_info) & ERTS_CACHE_LINE_MASK) == 0)
+ aligned_sched_sleep_info = ((ErtsAlignedSchedulerSleepInfo *)
+ ((((Uint) aligned_sched_sleep_info)
+ & ~ERTS_CACHE_LINE_MASK)
+ + ERTS_CACHE_LINE_SIZE));
+ for (ix = 0; ix < n; ix++) {
+ ErtsSchedulerSleepInfo *ssi = ERTS_SCHED_SLEEP_INFO_IX(ix);
+#if 0 /* no need to initialize these... */
+ ssi->next = NULL;
+ ssi->prev = NULL;
+#endif
+ erts_smp_atomic_init(&ssi->flags, 0);
+ ssi->event = NULL; /* initialized in sched_thread_func */
+ erts_smp_atomic_init(&ssi->aux_work, 0);
+ }
+#endif
+
/* Create and initialize scheduler specific data */
- n = (int) no_schedulers;
erts_aligned_scheduler_data = erts_alloc(ERTS_ALC_T_SCHDLR_DATA,
(sizeof(ErtsAlignedSchedulerData)
*(n+1)));
@@ -2200,6 +2528,7 @@ erts_init_scheduling(int mrq, int no_schedulers, int no_schedulers_online)
#ifdef ERTS_SMP
erts_bits_init_state(&esdp->erl_bits_state);
esdp->match_pseudo_process = NULL;
+ esdp->ssi = ERTS_SCHED_SLEEP_INFO_IX(ix);
esdp->free_process = NULL;
#if HALFWORD_HEAP
/* Registers need to be heap allocated (correct memory range) for tracing to work */
@@ -2228,11 +2557,6 @@ erts_init_scheduling(int mrq, int no_schedulers, int no_schedulers_online)
}
#ifdef ERTS_SMP
-#ifdef ERTS_SMP_SCHEDULERS_NEED_TO_CHECK_CHILDREN
- esdp->check_children = 0;
- esdp->blocked_check_children = 0;
-#endif
- erts_smp_atomic_init(&esdp->suspended, 0);
erts_smp_atomic_init(&esdp->chk_cpu_bind, 0);
#endif
}
@@ -2241,7 +2565,7 @@ erts_init_scheduling(int mrq, int no_schedulers, int no_schedulers_online)
erts_smp_mtx_init(&schdlr_sspnd.mtx, "schdlr_sspnd");
erts_smp_cnd_init(&schdlr_sspnd.cnd);
- schdlr_sspnd.changing = 0;
+ erts_smp_atomic_init(&schdlr_sspnd.changing, 0);
schdlr_sspnd.online = no_schedulers_online;
schdlr_sspnd.curr_online = no_schedulers;
erts_smp_atomic_init(&schdlr_sspnd.msb.ongoing, 0);
@@ -2264,7 +2588,8 @@ erts_init_scheduling(int mrq, int no_schedulers, int no_schedulers_online)
if (no_schedulers_online < no_schedulers) {
if (erts_common_run_queue) {
for (ix = no_schedulers_online; ix < no_schedulers; ix++)
- erts_smp_atomic_set(&(ERTS_SCHEDULER_IX(ix)->suspended), 1);
+ erts_smp_atomic_bor(&ERTS_SCHED_SLEEP_INFO_IX(ix)->flags,
+ ERTS_SSI_FLG_SUSPENDED);
}
else {
for (ix = no_schedulers_online; ix < erts_no_run_queues; ix++)
@@ -2275,7 +2600,8 @@ erts_init_scheduling(int mrq, int no_schedulers, int no_schedulers_online)
schdlr_sspnd.wait_curr_online = no_schedulers_online;
schdlr_sspnd.curr_online *= 2; /* Boot strapping... */
- schdlr_sspnd.changing = ERTS_SCHED_CHANGING_ONLINE;
+ ERTS_SCHDLR_SSPND_CHNG_SET((ERTS_SCHDLR_SSPND_CHNG_ONLN
+ | ERTS_SCHDLR_SSPND_CHNG_WAITER), 0);
erts_smp_atomic_init(&doing_sys_schedule, 0);
@@ -2423,13 +2749,115 @@ susp_sched_resume_block(void *unused)
}
static void
+scheduler_ix_resume_wake(Uint ix)
+{
+ ErtsSchedulerSleepInfo *ssi = ERTS_SCHED_SLEEP_INFO_IX(ix);
+ long xflgs = (ERTS_SSI_FLG_SLEEPING
+ | ERTS_SSI_FLG_TSE_SLEEPING
+ | ERTS_SSI_FLG_WAITING
+ | ERTS_SSI_FLG_SUSPENDED);
+ long oflgs;
+ do {
+ oflgs = erts_smp_atomic_cmpxchg(&ssi->flags, 0, xflgs);
+ if (oflgs == xflgs) {
+ erts_sched_finish_poke(ssi, oflgs);
+ break;
+ }
+ xflgs = oflgs;
+ } while (oflgs & ERTS_SSI_FLG_SUSPENDED);
+}
+
+static long
+sched_prep_spin_suspended(ErtsSchedulerSleepInfo *ssi, long xpct)
+{
+ long oflgs;
+ long nflgs = (ERTS_SSI_FLG_SLEEPING
+ | ERTS_SSI_FLG_WAITING
+ | ERTS_SSI_FLG_SUSPENDED);
+ long xflgs = xpct;
+
+ do {
+ oflgs = erts_smp_atomic_cmpxchg(&ssi->flags, nflgs, xflgs);
+ if (oflgs == xflgs)
+ return nflgs;
+ xflgs = oflgs;
+ } while (oflgs & ERTS_SSI_FLG_SUSPENDED);
+
+ return oflgs;
+}
+
+static long
+sched_spin_suspended(ErtsSchedulerSleepInfo *ssi, int spincount)
+{
+ int until_yield = ERTS_SCHED_SPIN_UNTIL_YIELD;
+ int sc = spincount;
+ long flgs;
+
+ do {
+ flgs = erts_smp_atomic_read(&ssi->flags);
+ if ((flgs & (ERTS_SSI_FLG_SLEEPING
+ | ERTS_SSI_FLG_WAITING
+ | ERTS_SSI_FLG_SUSPENDED))
+ != (ERTS_SSI_FLG_SLEEPING
+ | ERTS_SSI_FLG_WAITING
+ | ERTS_SSI_FLG_SUSPENDED)) {
+ break;
+ }
+ ERTS_SPIN_BODY;
+ if (--until_yield == 0) {
+ until_yield = ERTS_SCHED_SPIN_UNTIL_YIELD;
+ erts_thr_yield();
+ }
+ } while (--sc > 0);
+ return flgs;
+}
+
+static long
+sched_set_suspended_sleeptype(ErtsSchedulerSleepInfo *ssi)
+{
+ long oflgs;
+ long nflgs = (ERTS_SSI_FLG_SLEEPING
+ | ERTS_SSI_FLG_TSE_SLEEPING
+ | ERTS_SSI_FLG_WAITING
+ | ERTS_SSI_FLG_SUSPENDED);
+ long xflgs = (ERTS_SSI_FLG_SLEEPING
+ | ERTS_SSI_FLG_WAITING
+ | ERTS_SSI_FLG_SUSPENDED);
+
+ erts_tse_reset(ssi->event);
+
+ while (1) {
+ oflgs = erts_smp_atomic_cmpxchg(&ssi->flags, nflgs, xflgs);
+ if (oflgs == xflgs)
+ return nflgs;
+ if ((oflgs & (ERTS_SSI_FLG_SLEEPING
+ | ERTS_SSI_FLG_WAITING
+ | ERTS_SSI_FLG_SUSPENDED))
+ != (ERTS_SSI_FLG_SLEEPING
+ | ERTS_SSI_FLG_WAITING
+ | ERTS_SSI_FLG_SUSPENDED)) {
+ return oflgs;
+ }
+ xflgs = oflgs;
+ }
+}
+
+static void
suspend_scheduler(ErtsSchedulerData *esdp)
{
+ long flgs;
+ int changing;
long no = (long) esdp->no;
ErtsRunQueue *rq = esdp->run_queue;
+ ErtsSchedulerSleepInfo *ssi = esdp->ssi;
long active_schedulers;
int curr_online = 1;
int wake = 0;
+ int reset_read_group = 0;
+#if defined(ERTS_SCHED_NEED_NONBLOCKABLE_AUX_WORK) \
+ || defined(ERTS_SCHED_NEED_BLOCKABLE_AUX_WORK)
+ long aux_work;
+#endif
/*
* Schedulers may be suspended in two different ways:
@@ -2451,113 +2879,151 @@ suspend_scheduler(ErtsSchedulerData *esdp)
if (scheduler2cpu_map[esdp->no].bound_id >= 0
&& erts_unbind_from_cpu(erts_cpuinfo) == 0) {
esdp->cpu_id = scheduler2cpu_map[esdp->no].bound_id = -1;
+ reset_read_group = 1;
}
erts_smp_rwmtx_rwunlock(&erts_cpu_bind_rwmtx);
+ if (reset_read_group)
+ erts_smp_rwmtx_set_reader_group(0);
+
+ if (esdp->no <= erts_max_main_threads)
+ erts_thr_set_main_status(0, 0);
+
if (erts_system_profile_flags.scheduler)
profile_scheduler(make_small(esdp->no), am_inactive);
erts_smp_mtx_lock(&schdlr_sspnd.mtx);
- active_schedulers = erts_smp_atomic_dectest(&schdlr_sspnd.active);
- ASSERT(active_schedulers >= 1);
- if (schdlr_sspnd.changing == ERTS_SCHED_CHANGING_MULTI_SCHED) {
- if (active_schedulers == schdlr_sspnd.msb.wait_active)
- wake = 1;
- if (active_schedulers == 1)
- schdlr_sspnd.changing = 0;
- }
-
- while (1) {
+ flgs = sched_prep_spin_suspended(ssi, ERTS_SSI_FLG_SUSPENDED);
+ if (flgs & ERTS_SSI_FLG_SUSPENDED) {
-#ifdef ERTS_SMP_SCHEDULERS_NEED_TO_CHECK_CHILDREN
- int check_children;
- erts_smp_runq_lock(esdp->run_queue);
- check_children = esdp->check_children;
- esdp->check_children = 0;
- erts_smp_runq_unlock(esdp->run_queue);
- if (check_children) {
- erts_smp_mtx_unlock(&schdlr_sspnd.mtx);
- erts_check_children();
- erts_smp_mtx_lock(&schdlr_sspnd.mtx);
+ active_schedulers = erts_smp_atomic_dectest(&schdlr_sspnd.active);
+ ASSERT(active_schedulers >= 1);
+ changing = erts_smp_atomic_read(&schdlr_sspnd.changing);
+ if (changing & ERTS_SCHDLR_SSPND_CHNG_MSB) {
+ if (active_schedulers == schdlr_sspnd.msb.wait_active)
+ wake = 1;
+ if (active_schedulers == 1) {
+ changing = erts_smp_atomic_band(&schdlr_sspnd.changing,
+ ~ERTS_SCHDLR_SSPND_CHNG_MSB);
+ changing &= ~ERTS_SCHDLR_SSPND_CHNG_MSB;
+ }
}
-#endif
- if (schdlr_sspnd.changing == ERTS_SCHED_CHANGING_ONLINE) {
- int changed = 0;
- if (no > schdlr_sspnd.online && curr_online) {
- schdlr_sspnd.curr_online--;
- curr_online = 0;
- changed = 1;
+ while (1) {
+ if (changing & ERTS_SCHDLR_SSPND_CHNG_ONLN) {
+ int changed = 0;
+ if (no > schdlr_sspnd.online && curr_online) {
+ schdlr_sspnd.curr_online--;
+ curr_online = 0;
+ changed = 1;
+ }
+ else if (no <= schdlr_sspnd.online && !curr_online) {
+ schdlr_sspnd.curr_online++;
+ curr_online = 1;
+ changed = 1;
+ }
+ if (changed
+ && schdlr_sspnd.curr_online == schdlr_sspnd.wait_curr_online)
+ wake = 1;
+ if (schdlr_sspnd.online == schdlr_sspnd.curr_online) {
+ changing = erts_smp_atomic_band(&schdlr_sspnd.changing,
+ ~ERTS_SCHDLR_SSPND_CHNG_ONLN);
+ changing &= ~ERTS_SCHDLR_SSPND_CHNG_ONLN;
+ }
}
- else if (no <= schdlr_sspnd.online && !curr_online) {
- schdlr_sspnd.curr_online++;
- curr_online = 1;
- changed = 1;
+
+ if (wake) {
+ erts_smp_cnd_signal(&schdlr_sspnd.cnd);
+ wake = 0;
}
- if (changed
- && schdlr_sspnd.curr_online == schdlr_sspnd.wait_curr_online)
- wake = 1;
- if (schdlr_sspnd.online == schdlr_sspnd.curr_online)
- schdlr_sspnd.changing = 0;
- }
- if (wake) {
- erts_smp_cnd_broadcast(&schdlr_sspnd.cnd);
- wake = 0;
- }
+ flgs = erts_smp_atomic_read(&ssi->flags);
+ if (!(flgs & ERTS_SSI_FLG_SUSPENDED))
+ break;
+ erts_smp_mtx_unlock(&schdlr_sspnd.mtx);
- if (!(rq->flags & (ERTS_RUNQ_FLG_SHARED_RUNQ|ERTS_RUNQ_FLG_SUSPENDED)))
- break;
- if ((rq->flags & ERTS_RUNQ_FLG_SHARED_RUNQ)
- && !erts_smp_atomic_read(&esdp->suspended))
- break;
+#ifdef ERTS_SCHED_NEED_BLOCKABLE_AUX_WORK
+ aux_work = erts_smp_atomic_read(&ssi->aux_work);
+ blockable_aux_work:
+ blockable_aux_work(esdp, ssi, aux_work);
+#endif
- erts_smp_activity_begin(ERTS_ACTIVITY_WAIT,
- susp_sched_prep_block,
- susp_sched_resume_block,
- NULL);
- while (1) {
+ erts_smp_activity_begin(ERTS_ACTIVITY_WAIT, NULL, NULL, NULL);
+ while (1) {
+ long flgs;
+#ifdef ERTS_SCHED_NEED_NONBLOCKABLE_AUX_WORK
+#ifndef ERTS_SCHED_NEED_BLOCKABLE_AUX_WORK
+ aux_work = erts_smp_atomic_read(&ssi->aux_work);
+#endif
+ nonblockable_aux_work(esdp, ssi, aux_work);
+#endif
-#ifdef ERTS_SMP_SCHEDULERS_NEED_TO_CHECK_CHILDREN
- if (esdp->blocked_check_children)
- break;
+ flgs = sched_spin_suspended(ssi, ERTS_SCHED_SUSPEND_SLEEP_SPINCOUNT);
+ if (flgs == (ERTS_SSI_FLG_SLEEPING
+ | ERTS_SSI_FLG_WAITING
+ | ERTS_SSI_FLG_SUSPENDED)) {
+ flgs = sched_set_suspended_sleeptype(ssi);
+ if (flgs == (ERTS_SSI_FLG_SLEEPING
+ | ERTS_SSI_FLG_TSE_SLEEPING
+ | ERTS_SSI_FLG_WAITING
+ | ERTS_SSI_FLG_SUSPENDED)) {
+ int res;
+ do {
+ res = erts_tse_wait(ssi->event);
+ } while (res == EINTR);
+ }
+ }
+
+ flgs = sched_prep_spin_suspended(ssi, (ERTS_SSI_FLG_WAITING
+ | ERTS_SSI_FLG_SUSPENDED));
+ if (!(flgs & ERTS_SSI_FLG_SUSPENDED))
+ break;
+ changing = erts_smp_atomic_read(&schdlr_sspnd.changing);
+ if (changing & ~ERTS_SCHDLR_SSPND_CHNG_WAITER)
+ break;
+
+
+#ifdef ERTS_SCHED_NEED_BLOCKABLE_AUX_WORK
+ aux_work = erts_smp_atomic_read(&ssi->aux_work);
+ if (aux_work & ERTS_SSI_BLOCKABLE_AUX_WORK_MASK) {
+ erts_smp_activity_end(ERTS_ACTIVITY_WAIT, NULL, NULL, NULL);
+ goto blockable_aux_work;
+ }
#endif
- erts_smp_cnd_wait(&schdlr_sspnd.cnd, &schdlr_sspnd.mtx);
+ }
+
+ erts_smp_activity_end(ERTS_ACTIVITY_WAIT, NULL, NULL, NULL);
- if (schdlr_sspnd.changing == ERTS_SCHED_CHANGING_ONLINE)
- break;
+ erts_smp_mtx_lock(&schdlr_sspnd.mtx);
+ changing = erts_smp_atomic_read(&schdlr_sspnd.changing);
+ }
- if (!(rq->flags & (ERTS_RUNQ_FLG_SHARED_RUNQ
- | ERTS_RUNQ_FLG_SUSPENDED)))
- break;
- if ((rq->flags & ERTS_RUNQ_FLG_SHARED_RUNQ)
- && !erts_smp_atomic_read(&esdp->suspended))
- break;
+ active_schedulers = erts_smp_atomic_inctest(&schdlr_sspnd.active);
+ changing = erts_smp_atomic_read(&schdlr_sspnd.changing);
+ if ((changing & ERTS_SCHDLR_SSPND_CHNG_MSB)
+ && schdlr_sspnd.online == active_schedulers) {
+ erts_smp_atomic_band(&schdlr_sspnd.changing,
+ ~ERTS_SCHDLR_SSPND_CHNG_MSB);
}
-#ifdef ERTS_SMP_SCHEDULERS_NEED_TO_CHECK_CHILDREN
- esdp->blocked_check_children = 0;
-#endif
+ ASSERT(no <= schdlr_sspnd.online);
+ ASSERT(!erts_smp_atomic_read(&schdlr_sspnd.msb.ongoing));
- erts_smp_activity_end(ERTS_ACTIVITY_WAIT,
- susp_sched_prep_block,
- susp_sched_resume_block,
- NULL);
}
- active_schedulers = erts_smp_atomic_inctest(&schdlr_sspnd.active);
- if (schdlr_sspnd.changing == ERTS_SCHED_CHANGING_MULTI_SCHED
- && schdlr_sspnd.online == active_schedulers) {
- schdlr_sspnd.changing = 0;
- }
erts_smp_mtx_unlock(&schdlr_sspnd.mtx);
+ ASSERT(curr_online);
+
if (erts_system_profile_flags.scheduler)
profile_scheduler(make_small(esdp->no), am_active);
+ if (esdp->no <= erts_max_main_threads)
+ erts_thr_set_main_status(1, (int) esdp->no);
+
erts_smp_runq_lock(esdp->run_queue);
non_empty_runq(esdp->run_queue);
@@ -2622,8 +3088,10 @@ erts_schedulers_state(Uint *total,
int yield_allowed)
{
int res;
+ long changing;
erts_smp_mtx_lock(&schdlr_sspnd.mtx);
- if (yield_allowed && schdlr_sspnd.changing)
+ changing = erts_smp_atomic_read(&schdlr_sspnd.changing);
+ if (yield_allowed && (changing & ~ERTS_SCHDLR_SSPND_CHNG_WAITER))
res = ERTS_SCHDLR_SSPND_YIELD_RESTART;
else {
*active = *online = schdlr_sspnd.online;
@@ -2643,6 +3111,7 @@ erts_set_schedulers_online(Process *p,
Sint *old_no)
{
int ix, res, no, have_unlocked_plocks;
+ long changing;
if (new_no < 1 || erts_no_schedulers < new_no)
return ERTS_SCHDLR_SSPND_EINVAL;
@@ -2652,7 +3121,8 @@ erts_set_schedulers_online(Process *p,
have_unlocked_plocks = 0;
no = (int) new_no;
- if (schdlr_sspnd.changing) {
+ changing = erts_smp_atomic_read(&schdlr_sspnd.changing);
+ if (changing) {
res = ERTS_SCHDLR_SSPND_YIELD_RESTART;
}
else {
@@ -2661,17 +3131,19 @@ erts_set_schedulers_online(Process *p,
res = ERTS_SCHDLR_SSPND_DONE;
}
else {
- schdlr_sspnd.changing = ERTS_SCHED_CHANGING_ONLINE;
+ ERTS_SCHDLR_SSPND_CHNG_SET((ERTS_SCHDLR_SSPND_CHNG_ONLN
+ | ERTS_SCHDLR_SSPND_CHNG_WAITER), 0);
schdlr_sspnd.online = no;
if (no > online) {
int ix;
schdlr_sspnd.wait_curr_online = no;
- if (ongoing_multi_scheduling_block())
- /* No schedulers to resume */;
+ if (ongoing_multi_scheduling_block()) {
+ for (ix = online; ix < no; ix++)
+ erts_sched_poke(ERTS_SCHED_SLEEP_INFO_IX(ix));
+ }
else if (erts_common_run_queue) {
for (ix = online; ix < no; ix++)
- erts_smp_atomic_set(&ERTS_SCHEDULER_IX(ix)->suspended,
- 0);
+ scheduler_ix_resume_wake(ix);
}
else {
if (plocks) {
@@ -2685,6 +3157,7 @@ erts_set_schedulers_online(Process *p,
erts_smp_runq_lock(rq);
ERTS_RUNQ_RESET_SUSPEND_INFO(rq, 0x5);
erts_smp_runq_unlock(rq);
+ scheduler_ix_resume_wake(ix);
}
/*
* Spread evacuation paths among all online
@@ -2699,7 +3172,6 @@ erts_set_schedulers_online(Process *p,
erts_smp_mtx_unlock(&balance_info.update_mtx);
erts_smp_mtx_lock(&schdlr_sspnd.mtx);
}
- erts_smp_cnd_broadcast(&schdlr_sspnd.cnd);
res = ERTS_SCHDLR_SSPND_DONE;
}
else /* if (no < online) */ {
@@ -2716,12 +3188,17 @@ erts_set_schedulers_online(Process *p,
schdlr_sspnd.wait_curr_online = no+1;
}
- if (ongoing_multi_scheduling_block())
- erts_smp_cnd_broadcast(&schdlr_sspnd.cnd);
- else if (erts_common_run_queue) {
+ if (ongoing_multi_scheduling_block()) {
for (ix = no; ix < online; ix++)
- erts_smp_atomic_set(&ERTS_SCHEDULER_IX(ix)->suspended,
- 1);
+ erts_sched_poke(ERTS_SCHED_SLEEP_INFO_IX(ix));
+ }
+ else if (erts_common_run_queue) {
+ for (ix = no; ix < online; ix++) {
+ ErtsSchedulerSleepInfo *ssi;
+ ssi = ERTS_SCHED_SLEEP_INFO_IX(ix);
+ erts_smp_atomic_bor(&ssi->flags,
+ ERTS_SSI_FLG_SUSPENDED);
+ }
wake_all_schedulers();
}
else {
@@ -2748,7 +3225,10 @@ erts_set_schedulers_online(Process *p,
erts_smp_atomic_set(&balance_info.used_runqs, no);
erts_smp_mtx_unlock(&balance_info.update_mtx);
erts_smp_mtx_lock(&schdlr_sspnd.mtx);
- ERTS_FOREACH_OP_RUNQ(rq, wake_scheduler(rq, 0));
+ for (ix = no; ix < online; ix++) {
+ ErtsRunQueue *rq = ERTS_RUNQ_IX(ix);
+ wake_scheduler(rq, 0, 1);
+ }
}
}
@@ -2762,6 +3242,12 @@ erts_set_schedulers_online(Process *p,
susp_sched_prep_block,
susp_sched_resume_block,
NULL);
+ ASSERT(res != ERTS_SCHDLR_SSPND_DONE
+ ? (ERTS_SCHDLR_SSPND_CHNG_WAITER
+ & erts_smp_atomic_read(&schdlr_sspnd.changing))
+ : (ERTS_SCHDLR_SSPND_CHNG_WAITER
+ == erts_smp_atomic_read(&schdlr_sspnd.changing)));
+ erts_smp_atomic_band(&schdlr_sspnd.changing, ~ERTS_SCHDLR_SSPND_CHNG_WAITER);
}
}
@@ -2776,11 +3262,12 @@ ErtsSchedSuspendResult
erts_block_multi_scheduling(Process *p, ErtsProcLocks plocks, int on, int all)
{
int ix, res, have_unlocked_plocks = 0;
+ long changing;
ErtsProcList *plp;
erts_smp_mtx_lock(&schdlr_sspnd.mtx);
-
- if (schdlr_sspnd.changing) {
+ changing = erts_smp_atomic_read(&schdlr_sspnd.changing);
+ if (changing) {
res = ERTS_SCHDLR_SSPND_YIELD_RESTART; /* Yield */
}
else if (on) { /* ------ BLOCK ------ */
@@ -2794,19 +3281,22 @@ erts_block_multi_scheduling(Process *p, ErtsProcLocks plocks, int on, int all)
res = ERTS_SCHDLR_SSPND_DONE_MSCHED_BLOCKED;
}
else {
+ int online = schdlr_sspnd.online;
p->flags |= F_HAVE_BLCKD_MSCHED;
if (plocks) {
have_unlocked_plocks = 1;
erts_smp_proc_unlock(p, plocks);
}
+ ASSERT(0 == erts_smp_atomic_read(&schdlr_sspnd.msb.ongoing));
erts_smp_atomic_set(&schdlr_sspnd.msb.ongoing, 1);
- if (schdlr_sspnd.online == 1) {
+ if (online == 1) {
res = ERTS_SCHDLR_SSPND_DONE_MSCHED_BLOCKED;
ASSERT(erts_smp_atomic_read(&schdlr_sspnd.active) == 1);
ASSERT(p->scheduler_data->no == 1);
}
else {
- schdlr_sspnd.changing = ERTS_SCHED_CHANGING_MULTI_SCHED;
+ ERTS_SCHDLR_SSPND_CHNG_SET((ERTS_SCHDLR_SSPND_CHNG_MSB
+ | ERTS_SCHDLR_SSPND_CHNG_WAITER), 0);
if (p->scheduler_data->no == 1) {
res = ERTS_SCHDLR_SSPND_DONE_MSCHED_BLOCKED;
schdlr_sspnd.msb.wait_active = 1;
@@ -2820,17 +3310,19 @@ erts_block_multi_scheduling(Process *p, ErtsProcLocks plocks, int on, int all)
schdlr_sspnd.msb.wait_active = 2;
}
if (erts_common_run_queue) {
- for (ix = 1; ix < schdlr_sspnd.online; ix++)
- erts_smp_atomic_set(&ERTS_SCHEDULER_IX(ix)->suspended, 1);
+ for (ix = 1; ix < online; ix++)
+ erts_smp_atomic_bor(&ERTS_SCHED_SLEEP_INFO_IX(ix)->flags,
+ ERTS_SSI_FLG_SUSPENDED);
wake_all_schedulers();
}
else {
erts_smp_mtx_unlock(&schdlr_sspnd.mtx);
erts_smp_mtx_lock(&balance_info.update_mtx);
erts_smp_atomic_set(&balance_info.used_runqs, 1);
- for (ix = 0; ix < schdlr_sspnd.online; ix++) {
+ for (ix = 0; ix < online; ix++) {
ErtsRunQueue *rq = ERTS_RUNQ_IX(ix);
erts_smp_runq_lock(rq);
+ ASSERT(!(rq->flags & ERTS_RUNQ_FLG_SUSPENDED));
ERTS_RUNQ_RESET_MIGRATION_PATHS(rq, 0x7);
erts_smp_runq_unlock(rq);
}
@@ -2855,6 +3347,13 @@ erts_block_multi_scheduling(Process *p, ErtsProcLocks plocks, int on, int all)
susp_sched_prep_block,
susp_sched_resume_block,
NULL);
+ ASSERT(res != ERTS_SCHDLR_SSPND_DONE_MSCHED_BLOCKED
+ ? (ERTS_SCHDLR_SSPND_CHNG_WAITER
+ & erts_smp_atomic_read(&schdlr_sspnd.changing))
+ : (ERTS_SCHDLR_SSPND_CHNG_WAITER
+ == erts_smp_atomic_read(&schdlr_sspnd.changing)));
+ erts_smp_atomic_band(&schdlr_sspnd.changing,
+ ~ERTS_SCHDLR_SSPND_CHNG_WAITER);
}
plp = proclist_create(p);
plp->next = schdlr_sspnd.msb.procs;
@@ -2898,7 +3397,7 @@ erts_block_multi_scheduling(Process *p, ErtsProcLocks plocks, int on, int all)
if (schdlr_sspnd.msb.procs)
res = ERTS_SCHDLR_SSPND_DONE_MSCHED_BLOCKED;
else {
- schdlr_sspnd.changing = ERTS_SCHED_CHANGING_MULTI_SCHED;
+ ERTS_SCHDLR_SSPND_CHNG_SET(ERTS_SCHDLR_SSPND_CHNG_MSB, 0);
#ifdef DEBUG
ERTS_FOREACH_RUNQ(rq,
{
@@ -2925,13 +3424,13 @@ erts_block_multi_scheduling(Process *p, ErtsProcLocks plocks, int on, int all)
if (schdlr_sspnd.online == 1) {
/* No schedulers to resume */
ASSERT(erts_smp_atomic_read(&schdlr_sspnd.active) == 1);
- schdlr_sspnd.changing = 0;
+ ERTS_SCHDLR_SSPND_CHNG_SET(0, ERTS_SCHDLR_SSPND_CHNG_MSB);
}
else if (erts_common_run_queue) {
for (ix = 1; ix < schdlr_sspnd.online; ix++)
- erts_smp_atomic_set(&ERTS_SCHEDULER_IX(ix)->suspended, 0);
+ erts_smp_atomic_band(&ERTS_SCHED_SLEEP_INFO_IX(ix)->flags,
+ ~ERTS_SSI_FLG_SUSPENDED);
wake_all_schedulers();
- erts_smp_cnd_broadcast(&schdlr_sspnd.cnd);
}
else {
int online = schdlr_sspnd.online;
@@ -2948,6 +3447,7 @@ erts_block_multi_scheduling(Process *p, ErtsProcLocks plocks, int on, int all)
erts_smp_runq_lock(rq);
ERTS_RUNQ_RESET_SUSPEND_INFO(rq, 0x4);
erts_smp_runq_unlock(rq);
+ scheduler_ix_resume_wake(ix);
}
/* Spread evacuation paths among all online run queues */
@@ -2963,7 +3463,6 @@ erts_block_multi_scheduling(Process *p, ErtsProcLocks plocks, int on, int all)
erts_smp_runq_unlock(ERTS_RUNQ_IX(0));
erts_smp_mtx_unlock(&balance_info.update_mtx);
erts_smp_mtx_lock(&schdlr_sspnd.mtx);
- erts_smp_cnd_broadcast(&schdlr_sspnd.cnd);
}
res = ERTS_SCHDLR_SSPND_DONE;
}
@@ -3035,18 +3534,34 @@ erts_multi_scheduling_blockers(Process *p)
static void *
sched_thread_func(void *vesdp)
{
+#ifdef ERTS_SMP
+ Uint no = ((ErtsSchedulerData *) vesdp)->no;
+#endif
#ifdef ERTS_ENABLE_LOCK_CHECK
{
char buf[31];
- Uint no = ((ErtsSchedulerData *) vesdp)->no;
erts_snprintf(&buf[0], 31, "scheduler %bpu", no);
erts_lc_set_thread_name(&buf[0]);
}
#endif
- erts_alloc_reg_scheduler_id(((ErtsSchedulerData *) vesdp)->no);
+ erts_alloc_reg_scheduler_id(no);
erts_tsd_set(sched_data_key, vesdp);
#ifdef ERTS_SMP
+
+ if (no <= erts_max_main_threads) {
+ erts_thr_set_main_status(1, (int) no);
+ if (erts_reader_groups) {
+ int rg = (int) no;
+ if (rg > erts_reader_groups)
+ rg = (((int) no) - 1) % erts_reader_groups + 1;
+ erts_smp_rwmtx_set_reader_group(rg);
+ }
+ }
+
erts_proc_lock_prepare_proc_lock_waiter();
+ ERTS_SCHED_SLEEP_INFO_IX(no - 1)->event = erts_tse_fetch();
+
+
#endif
erts_register_blockable_thread();
#ifdef HIPE
@@ -3055,30 +3570,30 @@ sched_thread_func(void *vesdp)
erts_thread_init_float();
erts_smp_mtx_lock(&schdlr_sspnd.mtx);
- ASSERT(schdlr_sspnd.changing == ERTS_SCHED_CHANGING_ONLINE);
+ ASSERT(erts_smp_atomic_read(&schdlr_sspnd.changing)
+ & ERTS_SCHDLR_SSPND_CHNG_ONLN);
- schdlr_sspnd.curr_online--;
+ if (--schdlr_sspnd.curr_online == schdlr_sspnd.wait_curr_online) {
+ erts_smp_atomic_band(&schdlr_sspnd.changing,
+ ~ERTS_SCHDLR_SSPND_CHNG_ONLN);
+ if (((ErtsSchedulerData *) vesdp)->no != 1)
+ erts_smp_cnd_signal(&schdlr_sspnd.cnd);
+ }
- if (((ErtsSchedulerData *) vesdp)->no != 1) {
- if (schdlr_sspnd.online == schdlr_sspnd.curr_online) {
- schdlr_sspnd.changing = 0;
- erts_smp_cnd_broadcast(&schdlr_sspnd.cnd);
+ if (((ErtsSchedulerData *) vesdp)->no == 1) {
+ if (schdlr_sspnd.curr_online != schdlr_sspnd.wait_curr_online) {
+ erts_smp_activity_begin(ERTS_ACTIVITY_WAIT,
+ susp_sched_prep_block,
+ susp_sched_resume_block,
+ NULL);
+ while (schdlr_sspnd.curr_online != schdlr_sspnd.wait_curr_online)
+ erts_smp_cnd_wait(&schdlr_sspnd.cnd, &schdlr_sspnd.mtx);
+ erts_smp_activity_end(ERTS_ACTIVITY_WAIT,
+ susp_sched_prep_block,
+ susp_sched_resume_block,
+ NULL);
}
- }
- else if (schdlr_sspnd.curr_online == schdlr_sspnd.wait_curr_online)
- schdlr_sspnd.changing = 0;
- else {
- erts_smp_activity_begin(ERTS_ACTIVITY_WAIT,
- susp_sched_prep_block,
- susp_sched_resume_block,
- NULL);
- while (schdlr_sspnd.curr_online != schdlr_sspnd.wait_curr_online)
- erts_smp_cnd_wait(&schdlr_sspnd.cnd, &schdlr_sspnd.mtx);
- erts_smp_activity_end(ERTS_ACTIVITY_WAIT,
- susp_sched_prep_block,
- susp_sched_resume_block,
- NULL);
- ASSERT(!schdlr_sspnd.changing);
+ ERTS_SCHDLR_SSPND_CHNG_SET(0, ERTS_SCHDLR_SSPND_CHNG_WAITER);
}
erts_smp_mtx_unlock(&schdlr_sspnd.mtx);
@@ -3428,6 +3943,7 @@ processor_order_cmp(const void *vx, const void *vy)
static void
check_cpu_bind(ErtsSchedulerData *esdp)
{
+ int rg = 0;
int res;
int cpu_id;
erts_smp_runq_unlock(esdp->run_queue);
@@ -3459,6 +3975,12 @@ check_cpu_bind(ErtsSchedulerData *esdp)
erts_send_error_to_logger_nogl(dsbufp);
}
}
+ if (erts_reader_groups) {
+ if (esdp->cpu_id >= 0)
+ rg = reader_group_lookup(esdp->cpu_id);
+ else
+ rg = (((int) esdp->no) - 1) % erts_reader_groups + 1;
+ }
erts_smp_runq_lock(esdp->run_queue);
#ifdef ERTS_SMP
if (erts_common_run_queue)
@@ -3469,6 +3991,8 @@ check_cpu_bind(ErtsSchedulerData *esdp)
#endif
erts_smp_rwmtx_rwunlock(&erts_cpu_bind_rwmtx);
+ if (erts_reader_groups)
+ erts_smp_rwmtx_set_reader_group(rg);
}
static void
@@ -3497,11 +4021,13 @@ signal_schedulers_bind_change(erts_cpu_topology_t *cpudata, int size)
wake_all_schedulers();
}
else {
- ERTS_FOREACH_RUNQ(rq,
- {
+ for (s_ix = 0; s_ix < erts_no_run_queues; s_ix++) {
+ ErtsRunQueue *rq = ERTS_RUNQ_IX(s_ix);
+ erts_smp_runq_lock(rq);
rq->flags |= ERTS_RUNQ_FLG_CHK_CPU_BIND;
- wake_scheduler(rq, 0);
- });
+ erts_smp_runq_unlock(rq);
+ wake_scheduler(rq, 0, 1);
+ };
}
#else
check_cpu_bind(erts_get_scheduler_data());
@@ -3541,6 +4067,490 @@ erts_init_scheduler_bind_type(char *how)
return ERTS_INIT_SCHED_BIND_TYPE_SUCCESS;
}
+/*
+ * reader groups map
+ */
+
+typedef struct {
+ int level[ERTS_TOPOLOGY_MAX_DEPTH+1];
+} erts_avail_cput;
+
+typedef struct {
+ int *map;
+ int size;
+ int groups;
+} erts_reader_groups_map_test;
+
+typedef struct {
+ int id;
+ int sub_levels;
+ int reader_groups;
+} erts_rg_count_t;
+
+typedef struct {
+ int logical;
+ int reader_group;
+} erts_reader_groups_map_t;
+
+typedef struct {
+ erts_reader_groups_map_t *map;
+ int map_size;
+ int logical_processors;
+ int groups;
+} erts_make_reader_groups_map_test;
+
+static int reader_groups_available_cpu_check;
+static int reader_groups_logical_processors;
+static int reader_groups_map_size;
+static erts_reader_groups_map_t *reader_groups_map;
+
+#define ERTS_TOPOLOGY_RG ERTS_TOPOLOGY_MAX_DEPTH
+
+static void
+make_reader_groups_map(erts_make_reader_groups_map_test *test);
+
+static Eterm
+get_reader_groups_map(Process *c_p,
+ erts_reader_groups_map_t *map,
+ int map_size,
+ int logical_processors)
+{
+#ifdef DEBUG
+ Eterm *endp;
+#endif
+ Eterm res = NIL, tuple;
+ Eterm *hp;
+ int i;
+
+ hp = HAlloc(c_p, logical_processors*(2+3));
+#ifdef DEBUG
+ endp = hp + logical_processors*(2+3);
+#endif
+ for (i = map_size - 1; i >= 0; i--) {
+ if (map[i].logical >= 0) {
+ tuple = TUPLE2(hp,
+ make_small(map[i].logical),
+ make_small(map[i].reader_group));
+ hp += 3;
+ res = CONS(hp, tuple, res);
+ hp += 2;
+ }
+ }
+ ASSERT(hp == endp);
+ return res;
+}
+
+Eterm
+erts_debug_reader_groups_map(Process *c_p, int groups)
+{
+ Eterm res;
+ erts_make_reader_groups_map_test test;
+
+ test.groups = groups;
+ make_reader_groups_map(&test);
+ if (!test.map)
+ res = NIL;
+ else {
+ res = get_reader_groups_map(c_p,
+ test.map,
+ test.map_size,
+ test.logical_processors);
+ erts_free(ERTS_ALC_T_TMP, test.map);
+ }
+ return res;
+}
+
+
+Eterm
+erts_get_reader_groups_map(Process *c_p)
+{
+ Eterm res;
+ erts_smp_rwmtx_rlock(&erts_cpu_bind_rwmtx);
+ res = get_reader_groups_map(c_p,
+ reader_groups_map,
+ reader_groups_map_size,
+ reader_groups_logical_processors);
+ erts_smp_rwmtx_runlock(&erts_cpu_bind_rwmtx);
+ return res;
+}
+
+static void
+make_available_cpu_topology(erts_avail_cput *no,
+ erts_avail_cput *avail,
+ erts_cpu_topology_t *cpudata,
+ int *size,
+ int test)
+{
+ int len = *size;
+ erts_cpu_topology_t last;
+ int a, i, j;
+
+ no->level[ERTS_TOPOLOGY_NODE] = -1;
+ no->level[ERTS_TOPOLOGY_PROCESSOR] = -1;
+ no->level[ERTS_TOPOLOGY_PROCESSOR_NODE] = -1;
+ no->level[ERTS_TOPOLOGY_CORE] = -1;
+ no->level[ERTS_TOPOLOGY_THREAD] = -1;
+ no->level[ERTS_TOPOLOGY_LOGICAL] = -1;
+
+ last.node = INT_MIN;
+ last.processor = INT_MIN;
+ last.processor_node = INT_MIN;
+ last.core = INT_MIN;
+ last.thread = INT_MIN;
+ last.logical = INT_MIN;
+
+ a = 0;
+
+ for (i = 0; i < len; i++) {
+
+ if (!test && !erts_is_cpu_available(erts_cpuinfo, cpudata[i].logical))
+ continue;
+
+ if (last.node != cpudata[i].node)
+ goto node;
+ if (last.processor != cpudata[i].processor)
+ goto processor;
+ if (last.processor_node != cpudata[i].processor_node)
+ goto processor_node;
+ if (last.core != cpudata[i].core)
+ goto core;
+ ASSERT(last.thread != cpudata[i].thread);
+ goto thread;
+
+ node:
+ no->level[ERTS_TOPOLOGY_NODE]++;
+ processor:
+ no->level[ERTS_TOPOLOGY_PROCESSOR]++;
+ processor_node:
+ no->level[ERTS_TOPOLOGY_PROCESSOR_NODE]++;
+ core:
+ no->level[ERTS_TOPOLOGY_CORE]++;
+ thread:
+ no->level[ERTS_TOPOLOGY_THREAD]++;
+
+ no->level[ERTS_TOPOLOGY_LOGICAL]++;
+
+ for (j = 0; j < ERTS_TOPOLOGY_LOGICAL; j++)
+ avail[a].level[j] = no->level[j];
+
+ avail[a].level[ERTS_TOPOLOGY_LOGICAL] = cpudata[i].logical;
+ avail[a].level[ERTS_TOPOLOGY_RG] = 0;
+
+ ASSERT(last.logical != cpudata[a].logical);
+
+ last = cpudata[i];
+ a++;
+ }
+
+ no->level[ERTS_TOPOLOGY_NODE]++;
+ no->level[ERTS_TOPOLOGY_PROCESSOR]++;
+ no->level[ERTS_TOPOLOGY_PROCESSOR_NODE]++;
+ no->level[ERTS_TOPOLOGY_CORE]++;
+ no->level[ERTS_TOPOLOGY_THREAD]++;
+ no->level[ERTS_TOPOLOGY_LOGICAL]++;
+
+ *size = a;
+}
+
+static int
+reader_group_lookup(int logical)
+{
+ int start = logical % reader_groups_map_size;
+ int ix = start;
+
+ do {
+ if (reader_groups_map[ix].logical == logical) {
+ ASSERT(reader_groups_map[ix].reader_group > 0);
+ return reader_groups_map[ix].reader_group;
+ }
+ ix++;
+ if (ix == reader_groups_map_size)
+ ix = 0;
+ } while (ix != start);
+
+ erl_exit(ERTS_ABORT_EXIT, "Logical cpu id %d not found\n", logical);
+}
+
+static void
+reader_group_insert(erts_reader_groups_map_t *map, int map_size,
+ int logical, int reader_group)
+{
+ int start = logical % map_size;
+ int ix = start;
+
+ do {
+ if (map[ix].logical < 0) {
+ map[ix].logical = logical;
+ map[ix].reader_group = reader_group;
+ return;
+ }
+ ix++;
+ if (ix == map_size)
+ ix = 0;
+ } while (ix != start);
+
+ erl_exit(ERTS_ABORT_EXIT, "Reader groups map full\n");
+}
+
+
+static int
+sub_levels(erts_rg_count_t *rgc, int level, int aix, int avail_sz, erts_avail_cput *avail)
+{
+ int sub_level = level+1;
+ int last = -1;
+ rgc->sub_levels = 0;
+
+ do {
+ if (last != avail[aix].level[sub_level]) {
+ rgc->sub_levels++;
+ last = avail[aix].level[sub_level];
+ }
+ aix++;
+ }
+ while (aix < avail_sz && rgc->id == avail[aix].level[level]);
+ rgc->reader_groups = 0;
+ return aix;
+}
+
+static int
+write_reader_groups(int *rgp, erts_rg_count_t *rgcp,
+ int level, int a,
+ int avail_sz, erts_avail_cput *avail)
+{
+ int rg = *rgp;
+ int sub_level = level+1;
+ int sl_per_gr = rgcp->sub_levels / rgcp->reader_groups;
+ int xsl = rgcp->sub_levels % rgcp->reader_groups;
+ int sls = 0;
+ int last = -1;
+ int xsl_rg_lim = (rgcp->reader_groups - xsl) + rg + 1;
+
+ ASSERT(level < 0 || avail[a].level[level] == rgcp->id)
+
+ do {
+ if (last != avail[a].level[sub_level]) {
+ if (!sls) {
+ sls = sl_per_gr;
+ rg++;
+ if (rg >= xsl_rg_lim)
+ sls++;
+ }
+ last = avail[a].level[sub_level];
+ sls--;
+ }
+ avail[a].level[ERTS_TOPOLOGY_RG] = rg;
+ a++;
+ } while (a < avail_sz && (level < 0
+ || avail[a].level[level] == rgcp->id));
+
+ ASSERT(rgcp->reader_groups == rg - *rgp);
+
+ *rgp = rg;
+
+ return a;
+}
+
+static int
+rg_count_sub_levels_compare(const void *vx, const void *vy)
+{
+ erts_rg_count_t *x = (erts_rg_count_t *) vx;
+ erts_rg_count_t *y = (erts_rg_count_t *) vy;
+ if (x->sub_levels != y->sub_levels)
+ return y->sub_levels - x->sub_levels;
+ return x->id - y->id;
+}
+
+static int
+rg_count_id_compare(const void *vx, const void *vy)
+{
+ erts_rg_count_t *x = (erts_rg_count_t *) vx;
+ erts_rg_count_t *y = (erts_rg_count_t *) vy;
+ return x->id - y->id;
+}
+
+static void
+make_reader_groups_map(erts_make_reader_groups_map_test *test)
+{
+ int i, spread_level, avail_sz;
+ erts_avail_cput no, *avail;
+ erts_cpu_topology_t *cpudata;
+ erts_reader_groups_map_t *map;
+ int map_sz;
+ int groups = erts_reader_groups;
+
+ if (test) {
+ test->map = NULL;
+ test->map_size = 0;
+ groups = test->groups;
+ }
+
+ if (!groups)
+ return;
+
+ if (!test) {
+ if (reader_groups_map)
+ erts_free(ERTS_ALC_T_RDR_GRPS_MAP, reader_groups_map);
+
+ reader_groups_logical_processors = 0;
+ reader_groups_map_size = 0;
+ reader_groups_map = NULL;
+ }
+
+ create_tmp_cpu_topology_copy(&cpudata, &avail_sz);
+
+ if (!cpudata)
+ return;
+
+ cpu_bind_order_sort(cpudata,
+ avail_sz,
+ ERTS_CPU_BIND_NO_SPREAD,
+ 1);
+
+ avail = erts_alloc(ERTS_ALC_T_TMP,
+ sizeof(erts_avail_cput)*avail_sz);
+
+ make_available_cpu_topology(&no, avail, cpudata,
+ &avail_sz, test != NULL);
+
+ destroy_tmp_cpu_topology_copy(cpudata);
+
+ map_sz = avail_sz*2+1;
+
+ if (test) {
+ map = erts_alloc(ERTS_ALC_T_TMP,
+ (sizeof(erts_reader_groups_map_t)
+ * map_sz));
+ test->map = map;
+ test->map_size = map_sz;
+ test->logical_processors = avail_sz;
+ }
+ else {
+ map = erts_alloc(ERTS_ALC_T_RDR_GRPS_MAP,
+ (sizeof(erts_reader_groups_map_t)
+ * map_sz));
+ reader_groups_map = map;
+ reader_groups_logical_processors = avail_sz;
+ reader_groups_map_size = map_sz;
+
+ }
+
+ for (i = 0; i < map_sz; i++) {
+ map[i].logical = -1;
+ map[i].reader_group = 0;
+ }
+
+ spread_level = ERTS_TOPOLOGY_CORE;
+ for (i = ERTS_TOPOLOGY_NODE; i < ERTS_TOPOLOGY_THREAD; i++) {
+ if (no.level[i] > groups) {
+ spread_level = i;
+ break;
+ }
+ }
+
+ if (no.level[spread_level] <= groups) {
+ int a, rg, last = -1;
+ rg = 0;
+ ASSERT(spread_level == ERTS_TOPOLOGY_CORE);
+ for (a = 0; a < avail_sz; a++) {
+ if (last != avail[a].level[spread_level]) {
+ rg++;
+ last = avail[a].level[spread_level];
+ }
+ reader_group_insert(map,
+ map_sz,
+ avail[a].level[ERTS_TOPOLOGY_LOGICAL],
+ rg);
+ }
+ }
+ else { /* groups < no.level[spread_level] */
+ erts_rg_count_t *rg_count;
+ int a, rg, tl, toplevels;
+
+ tl = spread_level-1;
+
+ if (spread_level == ERTS_TOPOLOGY_NODE)
+ toplevels = 1;
+ else
+ toplevels = no.level[tl];
+
+ rg_count = erts_alloc(ERTS_ALC_T_TMP,
+ toplevels*sizeof(erts_rg_count_t));
+
+ if (toplevels == 1) {
+ rg_count[0].id = 0;
+ rg_count[0].sub_levels = no.level[spread_level];
+ rg_count[0].reader_groups = groups;
+ }
+ else {
+ int rgs_per_tl, rgs;
+ rgs = groups;
+ rgs_per_tl = rgs / toplevels;
+
+ a = 0;
+ for (i = 0; i < toplevels; i++) {
+ rg_count[i].id = avail[a].level[tl];
+ a = sub_levels(&rg_count[i], tl, a, avail_sz, avail);
+ }
+
+ qsort(rg_count,
+ toplevels,
+ sizeof(erts_rg_count_t),
+ rg_count_sub_levels_compare);
+
+ for (i = 0; i < toplevels; i++) {
+ if (rg_count[i].sub_levels < rgs_per_tl) {
+ rg_count[i].reader_groups = rg_count[i].sub_levels;
+ rgs -= rg_count[i].sub_levels;
+ }
+ else {
+ rg_count[i].reader_groups = rgs_per_tl;
+ rgs -= rgs_per_tl;
+ }
+ }
+
+ while (rgs > 0) {
+ for (i = 0; i < toplevels; i++) {
+ if (rg_count[i].sub_levels == rg_count[i].reader_groups)
+ break;
+ else {
+ rg_count[i].reader_groups++;
+ if (--rgs == 0)
+ break;
+ }
+ }
+ }
+
+ qsort(rg_count,
+ toplevels,
+ sizeof(erts_rg_count_t),
+ rg_count_id_compare);
+ }
+
+ a = i = rg = 0;
+ while (a < avail_sz) {
+ a = write_reader_groups(&rg, &rg_count[i], tl,
+ a, avail_sz, avail);
+ i++;
+ }
+
+ ASSERT(groups == rg);
+
+ for (a = 0; a < avail_sz; a++)
+ reader_group_insert(map,
+ map_sz,
+ avail[a].level[ERTS_TOPOLOGY_LOGICAL],
+ avail[a].level[ERTS_TOPOLOGY_RG]);
+
+ erts_free(ERTS_ALC_T_TMP, rg_count);
+ }
+
+ erts_free(ERTS_ALC_T_TMP, avail);
+}
+
+/*
+ * CPU topology
+ */
+
typedef struct {
int *id;
int used;
@@ -4054,6 +5064,8 @@ erts_set_cpu_topology(Process *c_p, Eterm term)
sizeof(erts_cpu_topology_t)*cpudata_size);
}
+ make_reader_groups_map(NULL);
+
signal_schedulers_bind_change(cpudata, cpudata_size);
done:
@@ -4457,6 +5469,11 @@ early_cpu_bind_init(void)
cpu_bind_order = ERTS_CPU_BIND_UNDEFINED;
+ reader_groups_available_cpu_check = 1;
+ reader_groups_logical_processors = 0;
+ reader_groups_map_size = 0;
+ reader_groups_map = NULL;
+
if (!erts_get_cpu_topology(erts_cpuinfo, system_cpudata)
|| ERTS_INIT_CPU_TOPOLOGY_OK != verify_topology(system_cpudata,
system_cpudata_size)) {
@@ -4492,6 +5509,8 @@ late_cpu_bind_init(void)
: ERTS_CPU_BIND_NONE);
}
+ make_reader_groups_map(NULL);
+
if (cpu_bind_order != ERTS_CPU_BIND_NONE) {
erts_cpu_topology_t *cpudata;
int cpudata_size;
@@ -5357,7 +6376,7 @@ dequeue_process(ErtsRunQueue *runq, Process *p)
}
/* schedule a process */
-static ERTS_INLINE void
+static ERTS_INLINE ErtsRunQueue *
internal_add_to_runq(ErtsRunQueue *runq, Process *p)
{
Uint32 prev_status = p->status;
@@ -5368,12 +6387,12 @@ internal_add_to_runq(ErtsRunQueue *runq, Process *p)
ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(runq));
if (p->status_flags & ERTS_PROC_SFLG_INRUNQ)
- return;
+ return NULL;
else if (p->runq_flags & ERTS_PROC_RUNQ_FLG_RUNNING) {
ASSERT(p->status != P_SUSPENDED);
ERTS_DBG_CHK_PROCS_RUNQ_NOPROC(runq, p);
p->status_flags |= ERTS_PROC_SFLG_PENDADD2SCHEDQ;
- return;
+ return NULL;
}
ASSERT(!p->scheduler_data);
#endif
@@ -5412,20 +6431,23 @@ internal_add_to_runq(ErtsRunQueue *runq, Process *p)
profile_runnable_proc(p, am_active);
}
- smp_notify_inc_runq(add_runq);
-
if (add_runq != runq)
erts_smp_runq_unlock(add_runq);
+
+ return add_runq;
}
void
erts_add_to_runq(Process *p)
{
+ ErtsRunQueue *notify_runq;
ErtsRunQueue *runq = erts_get_runq_proc(p);
erts_smp_runq_lock(runq);
- internal_add_to_runq(runq, p);
+ notify_runq = internal_add_to_runq(runq, p);
erts_smp_runq_unlock(runq);
+ smp_notify_inc_runq(notify_runq);
+
}
/* Possibly remove a scheduled process we need to suspend */
@@ -5564,8 +6586,6 @@ erts_proc_migrate(Process *p, ErtsProcLocks *plcks,
p->run_queue = to_rq;
enqueue_process(to_rq, p);
- smp_notify_inc_runq(to_rq);
-
return ERTS_MIGRATE_SUCCESS;
}
#endif /* ERTS_SMP */
@@ -5762,30 +6782,6 @@ erts_set_process_priority(Process *p, Eterm new_value)
return old_value;
}
-#ifdef ERTS_SMP
-
-static ERTS_INLINE int
-prepare_for_sys_schedule(void)
-{
- while (!erts_port_task_have_outstanding_io_tasks()
- && !erts_smp_atomic_xchg(&doing_sys_schedule, 1)) {
- if (!erts_port_task_have_outstanding_io_tasks())
- return 1;
- erts_smp_atomic_set(&doing_sys_schedule, 0);
- }
- return 0;
-}
-
-#else
-
-static ERTS_INLINE int
-prepare_for_sys_schedule(void)
-{
- return !erts_port_task_have_outstanding_io_tasks();
-}
-
-#endif
-
/* note that P_RUNNING is only set so that we don't try to remove
** running processes from the schedule queue if they exit - a running
** process not being in the schedule queue!!
@@ -5920,8 +6916,11 @@ Process *schedule(Process *p, int calls)
p->status_flags &= ~ERTS_PROC_SFLG_RUNNING;
if (p->status_flags & ERTS_PROC_SFLG_PENDADD2SCHEDQ) {
+ ErtsRunQueue *notify_runq;
p->status_flags &= ~ERTS_PROC_SFLG_PENDADD2SCHEDQ;
- internal_add_to_runq(rq, p);
+ notify_runq = internal_add_to_runq(rq, p);
+ if (notify_runq != rq)
+ smp_notify_inc_runq(notify_runq);
}
#endif
@@ -5989,7 +6988,10 @@ Process *schedule(Process *p, int calls)
| ERTS_RUNQ_FLG_CHK_CPU_BIND
| ERTS_RUNQ_FLG_SUSPENDED)) {
if ((rq->flags & ERTS_RUNQ_FLG_SUSPENDED)
- || erts_smp_atomic_read(&esdp->suspended)) {
+ || (erts_smp_atomic_read(&esdp->ssi->flags)
+ & ERTS_SSI_FLG_SUSPENDED)) {
+ ASSERT(erts_smp_atomic_read(&esdp->ssi->flags)
+ & ERTS_SSI_FLG_SUSPENDED);
suspend_scheduler(esdp);
}
if ((rq->flags & ERTS_RUNQ_FLG_CHK_CPU_BIND)
@@ -5998,12 +7000,21 @@ Process *schedule(Process *p, int calls)
}
}
-#ifdef ERTS_SMP_SCHEDULERS_NEED_TO_CHECK_CHILDREN
- if (esdp->check_children) {
- esdp->check_children = 0;
- erts_smp_runq_unlock(rq);
- erts_check_children();
- erts_smp_runq_lock(rq);
+#if defined(ERTS_SCHED_NEED_BLOCKABLE_AUX_WORK) \
+ || defined(ERTS_SCHED_NEED_NONBLOCKABLE_AUX_WORK)
+ {
+ ErtsSchedulerSleepInfo *ssi = esdp->ssi;
+ long aux_work = erts_smp_atomic_read(&ssi->aux_work);
+ if (aux_work) {
+ erts_smp_runq_unlock(rq);
+#ifdef ERTS_SCHED_NEED_BLOCKABLE_AUX_WORK
+ aux_work = blockable_aux_work(esdp, ssi, aux_work);
+#endif
+#ifdef ERTS_SCHED_NEED_NONBLOCKABLE_AUX_WORK
+ nonblockable_aux_work(esdp, ssi, aux_work);
+#endif
+ erts_smp_runq_lock(rq);
+ }
}
#endif
@@ -6035,7 +7046,10 @@ Process *schedule(Process *p, int calls)
if (rq->flags & (ERTS_RUNQ_FLG_SHARED_RUNQ
| ERTS_RUNQ_FLG_SUSPENDED)) {
if ((rq->flags & ERTS_RUNQ_FLG_SUSPENDED)
- || erts_smp_atomic_read(&esdp->suspended)) {
+ || (erts_smp_atomic_read(&esdp->ssi->flags)
+ & ERTS_SSI_FLG_SUSPENDED)) {
+ ASSERT(erts_smp_atomic_read(&esdp->ssi->flags)
+ & ERTS_SSI_FLG_SUSPENDED);
non_empty_runq(rq);
goto continue_check_activities_to_run;
}
@@ -6052,17 +7066,7 @@ Process *schedule(Process *p, int calls)
}
}
- if (prepare_for_sys_schedule()) {
- erts_smp_atomic_set(&function_calls, 0);
- fcalls = 0;
- sched_sys_wait(esdp->no, rq);
- erts_smp_atomic_set(&doing_sys_schedule, 0);
- }
- else {
- /* If all schedulers are waiting, one of them *should*
- be waiting in erl_sys_schedule() */
- sched_cnd_wait(esdp->no, rq);
- }
+ scheduler_wait(&fcalls, esdp, rq);
non_empty_runq(rq);
@@ -6124,7 +7128,7 @@ Process *schedule(Process *p, int calls)
else {
if (erts_common_run_queue) {
if (erts_common_run_queue->waiting)
- wake_one_scheduler();
+ wake_scheduler(erts_common_run_queue, 0, 1);
}
else if (erts_smp_atomic_read(&no_empty_run_queues) != 0) {
wake_scheduler_on_empty_runq(rq);
@@ -6439,8 +7443,8 @@ erts_schedule_misc_op(void (*func)(void *), void *arg)
else
rq->misc.start = molp;
rq->misc.end = molp;
- smp_notify_inc_runq(rq);
erts_smp_runq_unlock(rq);
+ smp_notify_inc_runq(rq);
}
static void
@@ -6682,7 +7686,7 @@ erl_create_process(Process* parent, /* Parent of process (default group leader).
Eterm args, /* Arguments for function (must be well-formed list). */
ErlSpawnOpts* so) /* Options for spawn. */
{
- ErtsRunQueue *rq;
+ ErtsRunQueue *rq, *notify_runq;
Process *p;
Sint arity; /* Number of arguments. */
#ifndef HYBRID
@@ -6999,10 +8003,12 @@ erl_create_process(Process* parent, /* Parent of process (default group leader).
#endif
p->status = P_WAITING;
- internal_add_to_runq(rq, p);
+ notify_runq = internal_add_to_runq(rq, p);
erts_smp_runq_unlock(rq);
+ smp_notify_inc_runq(notify_runq);
+
res = p->id;
erts_smp_proc_unlock(p, ERTS_PROC_LOCKS_ALL);
diff --git a/erts/emulator/beam/erl_process.h b/erts/emulator/beam/erl_process.h
index 8f9f7f004e..6d7de237f3 100644
--- a/erts/emulator/beam/erl_process.h
+++ b/erts/emulator/beam/erl_process.h
@@ -89,6 +89,7 @@ extern int erts_sched_thread_suggested_stack_size;
#define ERTS_SCHED_THREAD_MAX_STACK_SIZE 8192 /* Kilo words */
#ifdef ERTS_SMP
+extern Uint erts_max_main_threads;
#include "erl_bits.h"
#endif
@@ -219,6 +220,51 @@ typedef enum {
ERTS_MIGRATE_FAILED_RUNQ_SUSPENDED
} ErtsMigrateResult;
+#define ERTS_SSI_FLG_SLEEPING (((long) 1) << 0)
+#define ERTS_SSI_FLG_POLL_SLEEPING (((long) 1) << 1)
+#define ERTS_SSI_FLG_TSE_SLEEPING (((long) 1) << 2)
+#define ERTS_SSI_FLG_WAITING (((long) 1) << 3)
+#define ERTS_SSI_FLG_SUSPENDED (((long) 1) << 4)
+
+#define ERTS_SSI_FLGS_SLEEP_TYPE \
+ (ERTS_SSI_FLG_TSE_SLEEPING|ERTS_SSI_FLG_POLL_SLEEPING)
+
+#define ERTS_SSI_FLGS_SLEEP \
+ (ERTS_SSI_FLG_SLEEPING|ERTS_SSI_FLGS_SLEEP_TYPE)
+
+#define ERTS_SSI_FLGS_ALL \
+ (ERTS_SSI_FLGS_SLEEP \
+ | ERTS_SSI_FLG_WAITING \
+ | ERTS_SSI_FLG_SUSPENDED)
+
+
+#if !defined(ERTS_SCHED_NEED_BLOCKABLE_AUX_WORK) \
+ && defined(ERTS_SMP_SCHEDULERS_NEED_TO_CHECK_CHILDREN)
+#define ERTS_SCHED_NEED_BLOCKABLE_AUX_WORK
+#endif
+
+#define ERTS_SSI_AUX_WORK_CHECK_CHILDREN (((long) 1) << 0)
+
+#define ERTS_SSI_BLOCKABLE_AUX_WORK_MASK \
+ (ERTS_SSI_AUX_WORK_CHECK_CHILDREN)
+#define ERTS_SSI_NONBLOCKABLE_AUX_WORK_MASK \
+ (0)
+
+typedef struct ErtsSchedulerSleepInfo_ ErtsSchedulerSleepInfo;
+
+typedef struct {
+ erts_smp_spinlock_t lock;
+ ErtsSchedulerSleepInfo *list;
+} ErtsSchedulerSleepList;
+
+struct ErtsSchedulerSleepInfo_ {
+ ErtsSchedulerSleepInfo *next;
+ ErtsSchedulerSleepInfo *prev;
+ erts_smp_atomic_t flags;
+ erts_tse_t *event;
+ erts_smp_atomic_t aux_work;
+};
+
/* times to reschedule low prio process before running */
#define RESCHEDULE_LOW 8
@@ -271,8 +317,9 @@ struct ErtsRunQueue_ {
erts_smp_mtx_t mtx;
erts_smp_cnd_t cnd;
- erts_smp_atomic_t spin_waiter;
- erts_smp_atomic_t spin_wake;
+#ifdef ERTS_SMP
+ ErtsSchedulerSleepList sleepers;
+#endif
ErtsSchedulerData *scheduler;
int waiting; /* < 0 in sys schedule; > 0 on cnd variable */
@@ -353,6 +400,7 @@ struct ErtsSchedulerData_ {
ethr_tid tid; /* Thread id */
struct erl_bits_state erl_bits_state; /* erl_bits.c state */
void *match_pseudo_process; /* erl_db_util.c:db_prog_match() */
+ ErtsSchedulerSleepInfo *ssi;
Process *free_process;
#endif
#if !HEAP_ON_C_STACK
@@ -374,11 +422,6 @@ struct ErtsSchedulerData_ {
#ifdef ERTS_SMP
/* NOTE: These fields are modified under held mutexes by other threads */
-#ifdef ERTS_SMP_SCHEDULERS_NEED_TO_CHECK_CHILDREN
- int check_children; /* run queue mutex */
- int blocked_check_children; /* schdlr_sspnd mutex */
-#endif
- erts_smp_atomic_t suspended; /* Only used when common run queue */
erts_smp_atomic_t chk_cpu_bind; /* Only used when common run queue */
#endif
};
@@ -1085,6 +1128,9 @@ void erts_handle_pending_exit(Process *, ErtsProcLocks);
void erts_deep_process_dump(int, void *);
+Eterm erts_get_reader_groups_map(Process *c_p);
+Eterm erts_debug_reader_groups_map(Process *c_p, int groups);
+
Sint erts_test_next_pid(int, Uint);
Eterm erts_debug_processes(Process *c_p);
Eterm erts_debug_processes_bif_info(Process *c_p);
@@ -1509,29 +1555,30 @@ extern int erts_disable_proc_not_running_opt;
#define ERTS_MIN_PROCESSES 16
#endif
-#ifdef ERTS_INCLUDE_SCHEDULER_INTERNALS
-ERTS_GLB_INLINE void erts_smp_notify_inc_runq(ErtsRunQueue *runq);
-void erts_smp_notify_inc_runq__(ErtsRunQueue *runq);
-#endif /* ERTS_INCLUDE_SCHEDULER_INTERNALS */
+void erts_smp_notify_inc_runq(ErtsRunQueue *runq);
-#if ERTS_GLB_INLINE_INCL_FUNC_DEF
+#ifdef ERTS_SMP
+void erts_sched_finish_poke(ErtsSchedulerSleepInfo *, long);
+ERTS_GLB_INLINE void erts_sched_poke(ErtsSchedulerSleepInfo *ssi);
-#ifdef ERTS_INCLUDE_SCHEDULER_INTERNALS
+#if ERTS_GLB_INLINE_INCL_FUNC_DEF
ERTS_GLB_INLINE void
-erts_smp_notify_inc_runq(ErtsRunQueue *runq)
+erts_sched_poke(ErtsSchedulerSleepInfo *ssi)
{
-#ifdef ERTS_SMP
- ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(runq));
- if (runq->waiting)
- erts_smp_notify_inc_runq__(runq);
-#endif
+ long flags = erts_smp_atomic_read(&ssi->flags);
+ ASSERT(!(flags & ERTS_SSI_FLG_SLEEPING)
+ || (flags & ERTS_SSI_FLG_WAITING));
+ if (flags & ERTS_SSI_FLG_SLEEPING) {
+ flags = erts_smp_atomic_band(&ssi->flags, ~ERTS_SSI_FLGS_SLEEP);
+ erts_sched_finish_poke(ssi, flags);
+ }
}
-#endif /* ERTS_INCLUDE_SCHEDULER_INTERNALS */
-
#endif /* #if ERTS_GLB_INLINE_INCL_FUNC_DEF */
+#endif /* #ifdef ERTS_SMP */
+
#include "erl_process_lock.h"
#undef ERTS_INCLUDE_SCHEDULER_INTERNALS
diff --git a/erts/emulator/beam/erl_process_lock.c b/erts/emulator/beam/erl_process_lock.c
index 52440fb635..a4d12139e9 100644
--- a/erts/emulator/beam/erl_process_lock.c
+++ b/erts/emulator/beam/erl_process_lock.c
@@ -1,19 +1,19 @@
/*
* %CopyrightBegin%
- *
- * Copyright Ericsson AB 2007-2009. All Rights Reserved.
- *
+ *
+ * Copyright Ericsson AB 2007-2010. All Rights Reserved.
+ *
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
* compliance with the License. You should have received a copy of the
* Erlang Public License along with this software. If not, it can be
* retrieved online at http://www.erlang.org/.
- *
+ *
* Software distributed under the License is distributed on an "AS IS"
* basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
* the License for the specific language governing rights and limitations
* under the License.
- *
+ *
* %CopyrightEnd%
*/
@@ -71,9 +71,12 @@ const Process erts_proc_lock_busy;
#ifdef ERTS_SMP
-/*#define ERTS_PROC_LOCK_SPIN_ON_GATE*/
-#define ERTS_PROC_LOCK_SPIN_COUNT_MAX 16000
+#define ERTS_PROC_LOCK_SPIN_COUNT_MAX 2000
+#define ERTS_PROC_LOCK_SPIN_COUNT_SCHED_INC 32
#define ERTS_PROC_LOCK_SPIN_COUNT_BASE 1000
+#define ERTS_PROC_LOCK_AUX_SPIN_COUNT 50
+
+#define ERTS_PROC_LOCK_SPIN_UNTIL_YIELD 25
#ifdef ERTS_PROC_LOCK_DEBUG
#define ERTS_PROC_LOCK_HARD_DEBUG
@@ -83,32 +86,19 @@ const Process erts_proc_lock_busy;
static void check_queue(erts_proc_lock_t *lck);
#endif
-
-typedef struct erts_proc_lock_waiter_t_ erts_proc_lock_waiter_t;
-struct erts_proc_lock_waiter_t_ {
- erts_proc_lock_waiter_t *next;
- erts_proc_lock_waiter_t *prev;
- ErtsProcLocks wait_locks;
- erts_smp_gate_t gate;
- erts_proc_lock_queues_t *queues;
-};
+#if SIZEOF_INT < 4
+#error "The size of the 'uflgs' field of the erts_tse_t type is too small"
+#endif
struct erts_proc_lock_queues_t_ {
erts_proc_lock_queues_t *next;
- erts_proc_lock_waiter_t *queue[ERTS_PROC_LOCK_MAX_BIT+1];
-};
-
-struct erts_proc_lock_thr_spec_data_t_ {
- erts_proc_lock_queues_t *qs;
- erts_proc_lock_waiter_t *wtr;
+ erts_tse_t *queue[ERTS_PROC_LOCK_MAX_BIT+1];
};
static erts_proc_lock_queues_t zeroqs = {0};
-static erts_smp_spinlock_t wtr_lock;
-static erts_proc_lock_waiter_t *waiter_free_list;
+static erts_smp_spinlock_t qs_lock;
static erts_proc_lock_queues_t *queue_free_list;
-static erts_tsd_key_t waiter_key;
#ifdef ERTS_ENABLE_LOCK_CHECK
static struct {
@@ -122,35 +112,26 @@ static struct {
erts_pix_lock_t erts_pix_locks[ERTS_NO_OF_PIX_LOCKS];
static int proc_lock_spin_count;
-static int proc_lock_trans_spin_cost;
+static int aux_thr_proc_lock_spin_count;
-static void cleanup_waiter(void);
+static void cleanup_tse(void);
void
erts_init_proc_lock(void)
{
int i;
int cpus;
- erts_smp_spinlock_init(&wtr_lock, "proc_lck_wtr_alloc");
+ erts_smp_spinlock_init(&qs_lock, "proc_lck_qs_alloc");
for (i = 0; i < ERTS_NO_OF_PIX_LOCKS; i++) {
-#if ERTS_PROC_LOCK_MUTEX_IMPL
-#ifdef ERTS_ENABLE_LOCK_COUNT
- erts_smp_mtx_init_x(&erts_pix_locks[i].u.mtx, "pix_lock", make_small(i));
-#else
- erts_smp_mtx_init(&erts_pix_locks[i].u.mtx, "pix_lock");
-#endif
-#else
#ifdef ERTS_ENABLE_LOCK_COUNT
- erts_smp_spinlock_init_x(&erts_pix_locks[i].u.spnlck, "pix_lock", make_small(i));
+ erts_smp_spinlock_init_x(&erts_pix_locks[i].u.spnlck,
+ "pix_lock", make_small(i));
#else
erts_smp_spinlock_init(&erts_pix_locks[i].u.spnlck, "pix_lock");
#endif
-#endif
}
- waiter_free_list = NULL;
queue_free_list = NULL;
- erts_tsd_key_create(&waiter_key);
- erts_thr_install_exit_handler(cleanup_waiter);
+ erts_thr_install_exit_handler(cleanup_tse);
#ifdef ERTS_ENABLE_LOCK_CHECK
lc_id.proc_lock_main = erts_lc_get_lock_order_id("proc_main");
lc_id.proc_lock_link = erts_lc_get_lock_order_id("proc_link");
@@ -158,86 +139,106 @@ erts_init_proc_lock(void)
lc_id.proc_lock_status = erts_lc_get_lock_order_id("proc_status");
#endif
cpus = erts_get_cpu_configured(erts_cpuinfo);
- if (cpus > 1)
- proc_lock_spin_count = (ERTS_PROC_LOCK_SPIN_COUNT_BASE
- * ((int) erts_no_schedulers));
- else if (cpus == 1)
- proc_lock_spin_count = 0;
- else /* No of cpus unknown. Assume multi proc, but be conservative. */
+ if (cpus > 1) {
proc_lock_spin_count = ERTS_PROC_LOCK_SPIN_COUNT_BASE;
- if (proc_lock_spin_count > ERTS_PROC_LOCK_SPIN_COUNT_MAX)
- proc_lock_spin_count = ERTS_PROC_LOCK_SPIN_COUNT_MAX;
- proc_lock_trans_spin_cost = proc_lock_spin_count/20;
-}
-
-static ERTS_INLINE erts_proc_lock_waiter_t *
-alloc_wtr(void)
-{
- erts_proc_lock_waiter_t *wtr;
- erts_smp_spin_lock(&wtr_lock);
- wtr = waiter_free_list;
- if (wtr) {
- waiter_free_list = wtr->next;
- ERTS_LC_ASSERT(queue_free_list);
- wtr->queues = queue_free_list;
- queue_free_list = wtr->queues->next;
- erts_smp_spin_unlock(&wtr_lock);
+ proc_lock_spin_count += (ERTS_PROC_LOCK_SPIN_COUNT_SCHED_INC
+ * ((int) erts_no_schedulers));
+ aux_thr_proc_lock_spin_count = ERTS_PROC_LOCK_AUX_SPIN_COUNT;
}
- else {
- erts_smp_spin_unlock(&wtr_lock);
- wtr = erts_alloc(ERTS_ALC_T_PROC_LCK_WTR,
- sizeof(erts_proc_lock_waiter_t));
- erts_smp_gate_init(&wtr->gate);
- wtr->wait_locks = (ErtsProcLocks) 0;
- wtr->queues = erts_alloc(ERTS_ALC_T_PROC_LCK_QS,
- sizeof(erts_proc_lock_queues_t));
- sys_memcpy((void *) wtr->queues,
- (void *) &zeroqs,
- sizeof(erts_proc_lock_queues_t));
+ else if (cpus == 1) {
+ proc_lock_spin_count = 0;
+ aux_thr_proc_lock_spin_count = 0;
}
- return wtr;
+ else { /* No of cpus unknown. Assume multi proc, but be conservative. */
+ proc_lock_spin_count = ERTS_PROC_LOCK_SPIN_COUNT_BASE/2;
+ aux_thr_proc_lock_spin_count = ERTS_PROC_LOCK_AUX_SPIN_COUNT/2;
+ }
+ if (proc_lock_spin_count > ERTS_PROC_LOCK_SPIN_COUNT_MAX)
+ proc_lock_spin_count = ERTS_PROC_LOCK_SPIN_COUNT_MAX;
}
#ifdef ERTS_ENABLE_LOCK_CHECK
static void
-check_unused_waiter(erts_proc_lock_waiter_t *wtr)
+check_unused_tse(erts_tse_t *wtr)
{
int i;
- ERTS_LC_ASSERT(wtr->wait_locks == 0);
+ erts_proc_lock_queues_t *queues = wtr->udata;
+ ERTS_LC_ASSERT(wtr->uflgs == 0);
for (i = 0; i <= ERTS_PROC_LOCK_MAX_BIT; i++)
- ERTS_LC_ASSERT(!wtr->queues->queue[i]);
+ ERTS_LC_ASSERT(!queues->queue[i]);
}
-#define CHECK_UNUSED_WAITER(W) check_unused_waiter((W))
+#define CHECK_UNUSED_TSE(W) check_unused_tse((W))
#else
-#define CHECK_UNUSED_WAITER(W)
+#define CHECK_UNUSED_TSE(W)
#endif
+static ERTS_INLINE erts_tse_t *
+tse_fetch(erts_pix_lock_t *pix_lock)
+{
+ erts_tse_t *tse = erts_tse_fetch();
+ if (!tse->udata) {
+ erts_proc_lock_queues_t *qs;
+#if ERTS_PROC_LOCK_SPINLOCK_IMPL && !ERTS_PROC_LOCK_ATOMIC_IMPL
+ if (pix_lock)
+ erts_pix_unlock(pix_lock);
+#endif
+ erts_smp_spin_lock(&qs_lock);
+ qs = queue_free_list;
+ if (qs) {
+ queue_free_list = queue_free_list->next;
+ erts_smp_spin_unlock(&qs_lock);
+ }
+ else {
+ erts_smp_spin_unlock(&qs_lock);
+ qs = erts_alloc(ERTS_ALC_T_PROC_LCK_QS,
+ sizeof(erts_proc_lock_queues_t));
+ sys_memcpy((void *) qs,
+ (void *) &zeroqs,
+ sizeof(erts_proc_lock_queues_t));
+ }
+ tse->udata = qs;
+#if ERTS_PROC_LOCK_SPINLOCK_IMPL && !ERTS_PROC_LOCK_ATOMIC_IMPL
+ if (pix_lock)
+ erts_pix_lock(pix_lock);
+#endif
+ }
+ tse->uflgs = 0;
+ return tse;
+}
static ERTS_INLINE void
-free_wtr(erts_proc_lock_waiter_t *wtr)
+tse_return(erts_tse_t *tse, int force_free_q)
{
- CHECK_UNUSED_WAITER(wtr);
- erts_smp_spin_lock(&wtr_lock);
- wtr->next = waiter_free_list;
- waiter_free_list = wtr;
- wtr->queues->next = queue_free_list;
- queue_free_list = wtr->queues;
- erts_smp_spin_unlock(&wtr_lock);
+ CHECK_UNUSED_TSE(tse);
+ if (force_free_q || erts_tse_is_tmp(tse)) {
+ erts_proc_lock_queues_t *qs = tse->udata;
+ ASSERT(qs);
+ erts_smp_spin_lock(&qs_lock);
+ qs->next = queue_free_list;
+ queue_free_list = qs;
+ erts_smp_spin_unlock(&qs_lock);
+ tse->udata = NULL;
+ }
+ erts_tse_return(tse);
}
void
erts_proc_lock_prepare_proc_lock_waiter(void)
{
- erts_tsd_set(waiter_key, (void *) alloc_wtr());
+ tse_return(tse_fetch(NULL), 0);
}
static void
-cleanup_waiter(void)
+cleanup_tse(void)
{
- erts_proc_lock_waiter_t *wtr = erts_tsd_get(waiter_key);
- if (wtr)
- free_wtr(wtr);
+ erts_tse_t *tse = erts_tse_fetch();
+ if (tse) {
+ if (tse->udata)
+ tse_return(tse, 1);
+ else
+ erts_tse_return(tse);
+ }
}
@@ -250,7 +251,7 @@ cleanup_waiter(void)
static ERTS_INLINE void
enqueue_waiter(erts_proc_lock_queues_t *qs,
int ix,
- erts_proc_lock_waiter_t *wtr)
+ erts_tse_t *wtr)
{
if (!qs->queue[ix]) {
qs->queue[ix] = wtr;
@@ -266,10 +267,10 @@ enqueue_waiter(erts_proc_lock_queues_t *qs,
}
}
-static erts_proc_lock_waiter_t *
+static erts_tse_t *
dequeue_waiter(erts_proc_lock_queues_t *qs, int ix)
{
- erts_proc_lock_waiter_t *wtr = qs->queue[ix];
+ erts_tse_t *wtr = qs->queue[ix];
ERTS_LC_ASSERT(qs->queue[ix]);
if (wtr->next == wtr) {
ERTS_LC_ASSERT(qs->queue[ix]->prev == wtr);
@@ -295,10 +296,10 @@ dequeue_waiter(erts_proc_lock_queues_t *qs, int ix)
* lock.
*/
static ERTS_INLINE void
-try_aquire(erts_proc_lock_t *lck, erts_proc_lock_waiter_t *wtr)
+try_aquire(erts_proc_lock_t *lck, erts_tse_t *wtr)
{
ErtsProcLocks got_locks = (ErtsProcLocks) 0;
- ErtsProcLocks locks = wtr->wait_locks;
+ ErtsProcLocks locks = wtr->uflgs;
int lock_no;
ERTS_LC_ASSERT(lck->queues);
@@ -334,7 +335,7 @@ try_aquire(erts_proc_lock_t *lck, erts_proc_lock_waiter_t *wtr)
}
}
- wtr->wait_locks &= ~got_locks;
+ wtr->uflgs &= ~got_locks;
}
/*
@@ -350,8 +351,8 @@ transfer_locks(Process *p,
int unlock)
{
int transferred = 0;
- erts_proc_lock_waiter_t *wake = NULL;
- erts_proc_lock_waiter_t *wtr;
+ erts_tse_t *wake = NULL;
+ erts_tse_t *wtr;
ErtsProcLocks unset_waiter = 0;
ErtsProcLocks tlocks = trnsfr_lcks;
int lock_no;
@@ -377,11 +378,11 @@ transfer_locks(Process *p,
ERTS_LC_ASSERT(wtr);
if (!qs->queue[lock_no])
unset_waiter |= lock;
- ERTS_LC_ASSERT(wtr->wait_locks & lock);
- wtr->wait_locks &= ~lock;
- if (wtr->wait_locks)
+ ERTS_LC_ASSERT(wtr->uflgs & lock);
+ wtr->uflgs &= ~lock;
+ if (wtr->uflgs)
try_aquire(&p->lock, wtr);
- if (!wtr->wait_locks) {
+ if (!wtr->uflgs) {
/*
* The other thread got all locks it needs;
* need to wake it up.
@@ -412,9 +413,10 @@ transfer_locks(Process *p,
erts_pix_unlock(pix_lock);
do {
- erts_proc_lock_waiter_t *tmp = wake;
+ erts_tse_t *tmp = wake;
wake = wake->next;
- erts_smp_gate_let_through(&tmp->gate, 1);
+ erts_atomic_set(&tmp->uaflgs, 0);
+ erts_tse_set(tmp);
} while (wake);
if (!unlock)
@@ -462,26 +464,16 @@ wait_for_locks(Process *p,
ErtsProcLocks olflgs)
{
erts_pix_lock_t *pix_lock = pixlck ? pixlck : ERTS_PID2PIXLOCK(p->id);
- int tsd;
- erts_proc_lock_waiter_t *wtr;
+ erts_tse_t *wtr;
+ erts_proc_lock_queues_t *qs;
/* Acquire a waiter object on which this thread can wait. */
- wtr = erts_tsd_get(waiter_key);
- if (wtr)
- tsd = 1;
- else {
-#if ERTS_PROC_LOCK_SPINLOCK_IMPL && !ERTS_PROC_LOCK_ATOMIC_IMPL
- erts_pix_unlock(pix_lock);
-#endif
- wtr = alloc_wtr();
- tsd = 0;
-#if ERTS_PROC_LOCK_SPINLOCK_IMPL && !ERTS_PROC_LOCK_ATOMIC_IMPL
- erts_pix_lock(pix_lock);
-#endif
- }
+ wtr = tse_fetch(pix_lock);
/* Record which locks this waiter needs. */
- wtr->wait_locks = need_locks;
+ wtr->uflgs = need_locks;
+
+ ASSERT((wtr->uflgs & ~ERTS_PROC_LOCKS_ALL) == 0);
#if ERTS_PROC_LOCK_ATOMIC_IMPL
erts_pix_lock(pix_lock);
@@ -489,14 +481,16 @@ wait_for_locks(Process *p,
ERTS_LC_ASSERT(erts_lc_pix_lock_is_locked(pix_lock));
+ qs = wtr->udata;
+ ASSERT(qs);
/* Provide the process with waiter queues, if it doesn't have one. */
if (!p->lock.queues) {
- wtr->queues->next = NULL;
- p->lock.queues = wtr->queues;
+ qs->next = NULL;
+ p->lock.queues = qs;
}
else {
- wtr->queues->next = p->lock.queues->next;
- p->lock.queues->next = wtr->queues;
+ qs->next = p->lock.queues->next;
+ p->lock.queues->next = qs;
}
#ifdef ERTS_PROC_LOCK_HARD_DEBUG
@@ -506,46 +500,59 @@ wait_for_locks(Process *p,
/* Try to aquire locks one at a time in lock order and set wait flag */
try_aquire(&p->lock, wtr);
+ ASSERT((wtr->uflgs & ~ERTS_PROC_LOCKS_ALL) == 0);
+
#ifdef ERTS_PROC_LOCK_HARD_DEBUG
check_queue(&p->lock);
#endif
- if (wtr->wait_locks) { /* We didn't get them all; need to wait... */
- /* Got to wait for locks... */
+ if (wtr->uflgs) {
+ /* We didn't get them all; need to wait... */
+
+ ASSERT((wtr->uflgs & ~ERTS_PROC_LOCKS_ALL) == 0);
+
+ erts_atomic_set(&wtr->uaflgs, 1);
erts_pix_unlock(pix_lock);
- /*
- * Wait for needed locks. When we return all needed locks have
- * have been acquired by other threads and transfered to us.
- */
-#ifdef ERTS_PROC_LOCK_SPIN_ON_GATE
- erts_smp_gate_swait(&wtr->gate, proc_lock_spin_count);
-#else
- erts_smp_gate_wait(&wtr->gate);
-#endif
+ while (1) {
+ int res;
+ erts_tse_reset(wtr);
+
+ if (erts_atomic_read(&wtr->uaflgs) == 0)
+ break;
+
+ /*
+ * Wait for needed locks. When we are woken all needed locks have
+ * have been acquired by other threads and transfered to us.
+ * However, we need to be prepared for spurious wakeups.
+ */
+ do {
+ res = erts_tse_wait(wtr); /* might return EINTR */
+ } while (res != 0);
+ }
erts_pix_lock(pix_lock);
+
+ ASSERT(wtr->uflgs == 0);
}
/* Recover some queues to store in the waiter. */
ERTS_LC_ASSERT(p->lock.queues);
if (p->lock.queues->next) {
- wtr->queues = p->lock.queues->next;
- p->lock.queues->next = wtr->queues->next;
+ qs = p->lock.queues->next;
+ p->lock.queues->next = qs->next;
}
else {
- wtr->queues = p->lock.queues;
+ qs = p->lock.queues;
p->lock.queues = NULL;
}
+ wtr->udata = qs;
erts_pix_unlock(pix_lock);
ERTS_LC_ASSERT(locks == (ERTS_PROC_LOCK_FLGS_READ_(&p->lock) & locks));
- if (tsd)
- CHECK_UNUSED_WAITER(wtr);
- else
- free_wtr(wtr);
+ tse_return(wtr, 0);
}
/*
@@ -563,52 +570,57 @@ erts_proc_lock_failed(Process *p,
ErtsProcLocks locks,
ErtsProcLocks old_lflgs)
{
-#ifdef ERTS_PROC_LOCK_SPIN_ON_GATE
- int spin_count = 0;
-#else
- int spin_count = proc_lock_spin_count;
-#endif
-
+ int until_yield = ERTS_PROC_LOCK_SPIN_UNTIL_YIELD;
+ int thr_spin_count;
+ int spin_count;
ErtsProcLocks need_locks = locks;
ErtsProcLocks olflgs = old_lflgs;
- while (need_locks != 0)
- {
- ErtsProcLocks can_grab = in_order_locks(olflgs, need_locks);
+ if (erts_thr_get_main_status())
+ thr_spin_count = proc_lock_spin_count;
+ else
+ thr_spin_count = aux_thr_proc_lock_spin_count;
+
+ spin_count = thr_spin_count;
+
+ while (need_locks != 0) {
+ ErtsProcLocks can_grab;
+
+ can_grab = in_order_locks(olflgs, need_locks);
- if (can_grab == 0)
- {
+ if (can_grab == 0) {
/* Someone already has the lowest-numbered lock we want. */
- if (spin_count-- <= 0)
- {
+ if (spin_count-- <= 0) {
/* Too many retries, give up and sleep for the lock. */
wait_for_locks(p, pixlck, locks, need_locks, olflgs);
return;
}
+ ERTS_SPIN_BODY;
+
+ if (--until_yield == 0) {
+ until_yield = ERTS_PROC_LOCK_SPIN_UNTIL_YIELD;
+ erts_thr_yield();
+ }
+
olflgs = ERTS_PROC_LOCK_FLGS_READ_(&p->lock);
}
- else
- {
+ else {
/* Try to grab all of the grabbable locks at once with cmpxchg. */
ErtsProcLocks grabbed = olflgs | can_grab;
ErtsProcLocks nflgs =
- ERTS_PROC_LOCK_FLGS_CMPXCHG_(&p->lock, grabbed, olflgs);
+ ERTS_PROC_LOCK_FLGS_CMPXCHG_ACQB_(&p->lock, grabbed, olflgs);
- if (nflgs == olflgs)
- {
+ if (nflgs == olflgs) {
/* Success! We grabbed the 'can_grab' locks. */
olflgs = grabbed;
need_locks &= ~can_grab;
-#ifndef ERTS_PROC_LOCK_SPIN_ON_GATE
/* Since we made progress, reset the spin count. */
- spin_count = proc_lock_spin_count;
-#endif
+ spin_count = thr_spin_count;
}
- else
- {
+ else {
/* Compare-and-exchange failed, try again. */
olflgs = nflgs;
}
@@ -1407,7 +1419,7 @@ check_queue(erts_proc_lock_t *lck)
wtr = (((ErtsProcLocks) 1) << lock_no) << ERTS_PROC_LOCK_WAITER_SHIFT;
if (lflgs & wtr) {
int n;
- erts_proc_lock_waiter_t *wtr;
+ erts_tse_t *wtr;
ERTS_LC_ASSERT(lck->queues && lck->queues->queue[lock_no]);
wtr = lck->queues->queue[lock_no];
n = 0;
diff --git a/erts/emulator/beam/erl_process_lock.h b/erts/emulator/beam/erl_process_lock.h
index d71e5a0a6e..7cfc9893fa 100644
--- a/erts/emulator/beam/erl_process_lock.h
+++ b/erts/emulator/beam/erl_process_lock.h
@@ -255,11 +255,7 @@ void erts_proc_lc_unrequire_lock(Process *p, ErtsProcLocks locks);
typedef struct {
union {
-#if ERTS_PROC_LOCK_MUTEX_IMPL
- erts_smp_mtx_t mtx;
-#else
erts_smp_spinlock_t spnlck;
-#endif
char buf[64]; /* Try to get locks in different cache lines */
} u;
} erts_pix_lock_t;
@@ -277,9 +273,12 @@ typedef struct {
((ErtsProcLocks) erts_smp_atomic_band(&(L)->flags, (long) (MSK)))
#define ERTS_PROC_LOCK_FLGS_BOR_(L, MSK) \
((ErtsProcLocks) erts_smp_atomic_bor(&(L)->flags, (long) (MSK)))
-#define ERTS_PROC_LOCK_FLGS_CMPXCHG_(L, NEW, EXPECTED) \
- ((ErtsProcLocks) erts_smp_atomic_cmpxchg(&(L)->flags, \
- (long) (NEW), (long) (EXPECTED)))
+#define ERTS_PROC_LOCK_FLGS_CMPXCHG_ACQB_(L, NEW, EXPECTED) \
+ ((ErtsProcLocks) erts_smp_atomic_cmpxchg_acqb(&(L)->flags, \
+ (long) (NEW), (long) (EXPECTED)))
+#define ERTS_PROC_LOCK_FLGS_CMPXCHG_RELB_(L, NEW, EXPECTED) \
+ ((ErtsProcLocks) erts_smp_atomic_cmpxchg_relb(&(L)->flags, \
+ (long) (NEW), (long) (EXPECTED)))
#define ERTS_PROC_LOCK_FLGS_READ_(L) \
((ErtsProcLocks) erts_smp_atomic_read(&(L)->flags))
@@ -289,6 +288,9 @@ ERTS_GLB_INLINE ErtsProcLocks erts_proc_lock_flags_band(erts_proc_lock_t *,
ErtsProcLocks);
ERTS_GLB_INLINE ErtsProcLocks erts_proc_lock_flags_bor(erts_proc_lock_t *,
ErtsProcLocks);
+ERTS_GLB_INLINE ErtsProcLocks erts_proc_lock_flags_cmpxchg(erts_proc_lock_t *,
+ ErtsProcLocks,
+ ErtsProcLocks);
#if ERTS_GLB_INLINE_INCL_FUNC_DEF
@@ -322,7 +324,9 @@ erts_proc_lock_flags_cmpxchg(erts_proc_lock_t *lck, ErtsProcLocks new,
#define ERTS_PROC_LOCK_FLGS_BAND_(L, MSK) erts_proc_lock_flags_band((L), (MSK))
#define ERTS_PROC_LOCK_FLGS_BOR_(L, MSK) erts_proc_lock_flags_bor((L), (MSK))
-#define ERTS_PROC_LOCK_FLGS_CMPXCHG_(L, NEW, EXPECTED) \
+#define ERTS_PROC_LOCK_FLGS_CMPXCHG_ACQB_(L, NEW, EXPECTED) \
+ erts_proc_lock_flags_cmpxchg((L), (NEW), (EXPECTED))
+#define ERTS_PROC_LOCK_FLGS_CMPXCHG_RELB_(L, NEW, EXPECTED) \
erts_proc_lock_flags_cmpxchg((L), (NEW), (EXPECTED))
#define ERTS_PROC_LOCK_FLGS_READ_(L) ((L)->flags)
@@ -348,9 +352,9 @@ ERTS_GLB_INLINE ErtsProcLocks erts_smp_proc_raw_trylock__(Process *p,
ErtsProcLocks locks);
#ifdef ERTS_ENABLE_LOCK_COUNT
ERTS_GLB_INLINE void erts_smp_proc_lock_x__(Process *,
- erts_pix_lock_t *,
- ErtsProcLocks,
- char *file, unsigned int line);
+ erts_pix_lock_t *,
+ ErtsProcLocks,
+ char *file, unsigned int line);
#else
ERTS_GLB_INLINE void erts_smp_proc_lock__(Process *,
erts_pix_lock_t *,
@@ -372,30 +376,18 @@ ERTS_GLB_INLINE void erts_proc_lock_op_debug(Process *, ErtsProcLocks, int);
ERTS_GLB_INLINE void erts_pix_lock(erts_pix_lock_t *pixlck)
{
ERTS_LC_ASSERT(pixlck);
-#if ERTS_PROC_LOCK_MUTEX_IMPL
- erts_smp_mtx_lock(&pixlck->u.mtx);
-#else
erts_smp_spin_lock(&pixlck->u.spnlck);
-#endif
}
ERTS_GLB_INLINE void erts_pix_unlock(erts_pix_lock_t *pixlck)
{
ERTS_LC_ASSERT(pixlck);
-#if ERTS_PROC_LOCK_MUTEX_IMPL
- erts_smp_mtx_unlock(&pixlck->u.mtx);
-#else
erts_smp_spin_unlock(&pixlck->u.spnlck);
-#endif
}
ERTS_GLB_INLINE int erts_lc_pix_lock_is_locked(erts_pix_lock_t *pixlck)
{
-#if ERTS_PROC_LOCK_MUTEX_IMPL
- return erts_smp_lc_mtx_is_locked(&pixlck->u.mtx);
-#else
return erts_smp_lc_spinlock_is_locked(&pixlck->u.spnlck);
-#endif
}
/*
@@ -417,9 +409,9 @@ erts_smp_proc_raw_trylock__(Process *p, ErtsProcLocks locks)
ErtsProcLocks expct_lflgs = 0;
while (1) {
- ErtsProcLocks lflgs = ERTS_PROC_LOCK_FLGS_CMPXCHG_(&p->lock,
- expct_lflgs | locks,
- expct_lflgs);
+ ErtsProcLocks lflgs = ERTS_PROC_LOCK_FLGS_CMPXCHG_ACQB_(&p->lock,
+ expct_lflgs | locks,
+ expct_lflgs);
if (ERTS_LIKELY(lflgs == expct_lflgs)) {
/* We successfully grabbed all locks. */
return 0;
@@ -535,7 +527,7 @@ erts_smp_proc_unlock__(Process *p,
if (want_lflgs != old_lflgs) {
ErtsProcLocks new_lflgs =
- ERTS_PROC_LOCK_FLGS_CMPXCHG_(&p->lock, want_lflgs, old_lflgs);
+ ERTS_PROC_LOCK_FLGS_CMPXCHG_RELB_(&p->lock, want_lflgs, old_lflgs);
if (new_lflgs != old_lflgs) {
/* cmpxchg failed, try again. */
diff --git a/erts/emulator/beam/erl_smp.h b/erts/emulator/beam/erl_smp.h
index 03d2a586e3..b41fa70476 100644
--- a/erts/emulator/beam/erl_smp.h
+++ b/erts/emulator/beam/erl_smp.h
@@ -1,19 +1,19 @@
/*
* %CopyrightBegin%
- *
- * Copyright Ericsson AB 2005-2009. All Rights Reserved.
- *
+ *
+ * Copyright Ericsson AB 2005-2010. All Rights Reserved.
+ *
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
* compliance with the License. You should have received a copy of the
* Erlang Public License along with this software. If not, it can be
* retrieved online at http://www.erlang.org/.
- *
+ *
* Software distributed under the License is distributed on an "AS IS"
* basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
* the License for the specific language governing rights and limitations
* under the License.
- *
+ *
* %CopyrightEnd%
*/
/*
@@ -43,9 +43,17 @@ typedef erts_thr_init_data_t erts_smp_thr_init_data_t;
typedef erts_tid_t erts_smp_tid_t;
typedef erts_mtx_t erts_smp_mtx_t;
typedef erts_cnd_t erts_smp_cnd_t;
+#define ERTS_SMP_RWMTX_OPT_DEFAULT_INITER ERTS_RWMTX_OPT_DEFAULT_INITER
+#define ERTS_SMP_RWMTX_TYPE_NORMAL ERTS_RWMTX_TYPE_NORMAL
+#define ERTS_SMP_RWMTX_TYPE_FREQUENT_READ ERTS_RWMTX_TYPE_FREQUENT_READ
+#define ERTS_SMP_RWMTX_TYPE_EXTREMELY_FREQUENT_READ \
+ ERTS_RWMTX_TYPE_EXTREMELY_FREQUENT_READ
+#define ERTS_SMP_RWMTX_LONG_LIVED ERTS_RWMTX_LONG_LIVED
+#define ERTS_SMP_RWMTX_SHORT_LIVED ERTS_RWMTX_SHORT_LIVED
+#define ERTS_SMP_RWMTX_UNKNOWN_LIVED ERTS_RWMTX_UNKNOWN_LIVED
+typedef erts_rwmtx_opt_t erts_smp_rwmtx_opt_t;
typedef erts_rwmtx_t erts_smp_rwmtx_t;
typedef erts_tsd_key_t erts_smp_tsd_key_t;
-typedef erts_gate_t erts_smp_gate_t;
typedef ethr_atomic_t erts_smp_atomic_t;
typedef erts_spinlock_t erts_smp_spinlock_t;
typedef erts_rwlock_t erts_smp_rwlock_t;
@@ -54,15 +62,27 @@ void erts_thr_fatal_error(int, char *); /* implemented in erl_init.c */
#else /* #ifdef ERTS_SMP */
-#define ERTS_SMP_THR_OPTS_DEFAULT_INITER 0
+#define ERTS_SMP_THR_OPTS_DEFAULT_INITER {0}
typedef int erts_smp_thr_opts_t;
typedef int erts_smp_thr_init_data_t;
typedef int erts_smp_tid_t;
typedef int erts_smp_mtx_t;
typedef int erts_smp_cnd_t;
+#define ERTS_SMP_RWMTX_OPT_DEFAULT_INITER {0}
+#define ERTS_SMP_RWMTX_TYPE_NORMAL 0
+#define ERTS_SMP_RWMTX_TYPE_FREQUENT_READ 0
+#define ERTS_SMP_RWMTX_TYPE_EXTREMELY_FREQUENT_READ 0
+#define ERTS_SMP_RWMTX_LONG_LIVED 0
+#define ERTS_SMP_RWMTX_SHORT_LIVED 0
+#define ERTS_SMP_RWMTX_UNKNOWN_LIVED 0
+typedef struct {
+ char type;
+ char lived;
+ int main_spincount;
+ int aux_spincount;
+} erts_smp_rwmtx_opt_t;
typedef int erts_smp_rwmtx_t;
typedef int erts_smp_tsd_key_t;
-typedef int erts_smp_gate_t;
typedef long erts_smp_atomic_t;
#if __GNUC__ > 2
typedef struct { } erts_smp_spinlock_t;
@@ -103,8 +123,6 @@ ERTS_GLB_INLINE void erts_smp_mtx_init_locked_x(erts_smp_mtx_t *mtx,
ERTS_GLB_INLINE void erts_smp_mtx_init(erts_smp_mtx_t *mtx, char *name);
ERTS_GLB_INLINE void erts_smp_mtx_init_locked(erts_smp_mtx_t *mtx, char *name);
ERTS_GLB_INLINE void erts_smp_mtx_destroy(erts_smp_mtx_t *mtx);
-ERTS_GLB_INLINE void erts_smp_mtx_set_forksafe(erts_smp_mtx_t *mtx);
-ERTS_GLB_INLINE void erts_smp_mtx_unset_forksafe(erts_smp_mtx_t *mtx);
ERTS_GLB_INLINE int erts_smp_mtx_trylock(erts_smp_mtx_t *mtx);
#ifdef ERTS_ENABLE_LOCK_COUNT
ERTS_GLB_INLINE void erts_smp_mtx_lock_x(erts_smp_mtx_t *mtx, char *file, int line);
@@ -119,9 +137,17 @@ ERTS_GLB_INLINE void erts_smp_cnd_wait(erts_smp_cnd_t *cnd,
erts_smp_mtx_t *mtx);
ERTS_GLB_INLINE void erts_smp_cnd_signal(erts_smp_cnd_t *cnd);
ERTS_GLB_INLINE void erts_smp_cnd_broadcast(erts_smp_cnd_t *cnd);
+ERTS_GLB_INLINE void erts_smp_rwmtx_set_reader_group(int no);
+ERTS_GLB_INLINE void erts_smp_rwmtx_init_opt_x(erts_smp_rwmtx_t *rwmtx,
+ erts_smp_rwmtx_opt_t *opt,
+ char *name,
+ Eterm extra);
ERTS_GLB_INLINE void erts_smp_rwmtx_init_x(erts_smp_rwmtx_t *rwmtx,
char *name,
Eterm extra);
+ERTS_GLB_INLINE void erts_smp_rwmtx_init_opt(erts_smp_rwmtx_t *rwmtx,
+ erts_smp_rwmtx_opt_t *opt,
+ char *name);
ERTS_GLB_INLINE void erts_smp_rwmtx_init(erts_smp_rwmtx_t *rwmtx,
char *name);
ERTS_GLB_INLINE void erts_smp_rwmtx_destroy(erts_smp_rwmtx_t *rwmtx);
@@ -155,6 +181,16 @@ ERTS_GLB_INLINE long erts_smp_atomic_cmpxchg(erts_smp_atomic_t *xchgp,
long expected);
ERTS_GLB_INLINE long erts_smp_atomic_bor(erts_smp_atomic_t *var, long mask);
ERTS_GLB_INLINE long erts_smp_atomic_band(erts_smp_atomic_t *var, long mask);
+ERTS_GLB_INLINE long erts_smp_atomic_read_acqb(erts_smp_atomic_t *var);
+ERTS_GLB_INLINE void erts_smp_atomic_set_relb(erts_smp_atomic_t *var, long i);
+ERTS_GLB_INLINE void erts_smp_atomic_dec_relb(erts_smp_atomic_t *decp);
+ERTS_GLB_INLINE long erts_smp_atomic_dectest_relb(erts_smp_atomic_t *decp);
+ERTS_GLB_INLINE long erts_smp_atomic_cmpxchg_acqb(erts_smp_atomic_t *xchgp,
+ long new,
+ long exp);
+ERTS_GLB_INLINE long erts_smp_atomic_cmpxchg_relb(erts_smp_atomic_t *xchgp,
+ long new,
+ long exp);
ERTS_GLB_INLINE void erts_smp_spinlock_init_x(erts_smp_spinlock_t *lock,
char *name,
Eterm extra);
@@ -190,12 +226,6 @@ ERTS_GLB_INLINE void erts_smp_tsd_key_create(erts_smp_tsd_key_t *keyp);
ERTS_GLB_INLINE void erts_smp_tsd_key_delete(erts_smp_tsd_key_t key);
ERTS_GLB_INLINE void erts_smp_tsd_set(erts_smp_tsd_key_t key, void *value);
ERTS_GLB_INLINE void * erts_smp_tsd_get(erts_smp_tsd_key_t key);
-ERTS_GLB_INLINE void erts_smp_gate_init(erts_smp_gate_t *gp);
-ERTS_GLB_INLINE void erts_smp_gate_destroy(erts_smp_gate_t *gp);
-ERTS_GLB_INLINE void erts_smp_gate_close(erts_smp_gate_t *gp);
-ERTS_GLB_INLINE void erts_smp_gate_let_through(erts_smp_gate_t *gp, unsigned no);
-ERTS_GLB_INLINE void erts_smp_gate_wait(erts_smp_gate_t *gp);
-ERTS_GLB_INLINE void erts_smp_gate_swait(erts_smp_gate_t *gp, int spincount);
#ifdef ERTS_THR_HAVE_SIG_FUNCS
#define ERTS_SMP_THR_HAVE_SIG_FUNCS 1
@@ -331,22 +361,6 @@ erts_smp_mtx_destroy(erts_smp_mtx_t *mtx)
#endif
}
-ERTS_GLB_INLINE void
-erts_smp_mtx_set_forksafe(erts_smp_mtx_t *mtx)
-{
-#ifdef ERTS_SMP
- erts_mtx_set_forksafe(mtx);
-#endif
-}
-
-ERTS_GLB_INLINE void
-erts_smp_mtx_unset_forksafe(erts_smp_mtx_t *mtx)
-{
-#ifdef ERTS_SMP
- erts_mtx_unset_forksafe(mtx);
-#endif
-}
-
ERTS_GLB_INLINE int
erts_smp_mtx_trylock(erts_smp_mtx_t *mtx)
{
@@ -433,6 +447,25 @@ erts_smp_cnd_broadcast(erts_smp_cnd_t *cnd)
}
ERTS_GLB_INLINE void
+erts_smp_rwmtx_set_reader_group(int no)
+{
+#ifdef ERTS_SMP
+ erts_rwmtx_set_reader_group(no);
+#endif
+}
+
+ERTS_GLB_INLINE void
+erts_smp_rwmtx_init_opt_x(erts_smp_rwmtx_t *rwmtx,
+ erts_smp_rwmtx_opt_t *opt,
+ char *name,
+ Eterm extra)
+{
+#ifdef ERTS_SMP
+ erts_rwmtx_init_opt_x(rwmtx, opt, name, extra);
+#endif
+}
+
+ERTS_GLB_INLINE void
erts_smp_rwmtx_init_x(erts_smp_rwmtx_t *rwmtx, char *name, Eterm extra)
{
#ifdef ERTS_SMP
@@ -441,6 +474,16 @@ erts_smp_rwmtx_init_x(erts_smp_rwmtx_t *rwmtx, char *name, Eterm extra)
}
ERTS_GLB_INLINE void
+erts_smp_rwmtx_init_opt(erts_smp_rwmtx_t *rwmtx,
+ erts_smp_rwmtx_opt_t *opt,
+ char *name)
+{
+#ifdef ERTS_SMP
+ erts_rwmtx_init_opt(rwmtx, opt, name);
+#endif
+}
+
+ERTS_GLB_INLINE void
erts_smp_rwmtx_init(erts_smp_rwmtx_t *rwmtx, char *name)
{
#ifdef ERTS_SMP
@@ -709,6 +752,73 @@ erts_smp_atomic_band(erts_smp_atomic_t *var, long mask)
#endif
}
+ERTS_GLB_INLINE long
+erts_smp_atomic_read_acqb(erts_smp_atomic_t *var)
+{
+#ifdef ERTS_SMP
+ return erts_atomic_read_acqb(var);
+#else
+ return *var;
+#endif
+}
+
+ERTS_GLB_INLINE void
+erts_smp_atomic_set_relb(erts_smp_atomic_t *var, long i)
+{
+#ifdef ERTS_SMP
+ erts_atomic_set_relb(var, i);
+#else
+ *var = i;
+#endif
+}
+
+ERTS_GLB_INLINE void erts_smp_atomic_dec_relb(erts_smp_atomic_t *decp)
+{
+#ifdef ERTS_SMP
+ erts_atomic_dec_relb(decp);
+#else
+ --(*decp);
+#endif
+}
+
+ERTS_GLB_INLINE long
+erts_smp_atomic_dectest_relb(erts_smp_atomic_t *decp)
+{
+#ifdef ERTS_SMP
+ return erts_atomic_dectest_relb(decp);
+#else
+ return --(*decp);
+#endif
+}
+
+ERTS_GLB_INLINE long erts_smp_atomic_cmpxchg_acqb(erts_smp_atomic_t *xchgp,
+ long new,
+ long exp)
+{
+#ifdef ERTS_SMP
+ return erts_atomic_cmpxchg_acqb(xchgp, new, exp);
+#else
+ long old = *xchgp;
+ if (old == exp)
+ *xchgp = new;
+ return old;
+#endif
+}
+
+ERTS_GLB_INLINE long erts_smp_atomic_cmpxchg_relb(erts_smp_atomic_t *xchgp,
+ long new,
+ long exp)
+{
+#ifdef ERTS_SMP
+ return erts_atomic_cmpxchg_relb(xchgp, new, exp);
+#else
+ long old = *xchgp;
+ if (old == exp)
+ *xchgp = new;
+ return old;
+#endif
+}
+
ERTS_GLB_INLINE void
erts_smp_spinlock_init_x(erts_smp_spinlock_t *lock, char *name, Eterm extra)
{
@@ -919,54 +1029,6 @@ erts_smp_tsd_get(erts_smp_tsd_key_t key)
#endif
}
-ERTS_GLB_INLINE void
-erts_smp_gate_init(erts_smp_gate_t *gp)
-{
-#ifdef ERTS_SMP
- erts_gate_init((erts_gate_t *) gp);
-#endif
-}
-
-ERTS_GLB_INLINE void
-erts_smp_gate_destroy(erts_smp_gate_t *gp)
-{
-#ifdef ERTS_SMP
- erts_gate_destroy((erts_gate_t *) gp);
-#endif
-}
-
-ERTS_GLB_INLINE void
-erts_smp_gate_close(erts_smp_gate_t *gp)
-{
-#ifdef ERTS_SMP
- erts_gate_close((erts_gate_t *) gp);
-#endif
-}
-
-ERTS_GLB_INLINE void
-erts_smp_gate_let_through(erts_smp_gate_t *gp, unsigned no)
-{
-#ifdef ERTS_SMP
- erts_gate_let_through((erts_gate_t *) gp, no);
-#endif
-}
-
-ERTS_GLB_INLINE void
-erts_smp_gate_wait(erts_smp_gate_t *gp)
-{
-#ifdef ERTS_SMP
- erts_gate_wait((erts_gate_t *) gp);
-#endif
-}
-
-ERTS_GLB_INLINE void
-erts_smp_gate_swait(erts_smp_gate_t *gp, int spincount)
-{
-#ifdef ERTS_SMP
- erts_gate_swait((erts_gate_t *) gp, spincount);
-#endif
-}
-
#ifdef ERTS_THR_HAVE_SIG_FUNCS
#define ERTS_SMP_THR_HAVE_SIG_FUNCS 1
diff --git a/erts/emulator/beam/erl_threads.h b/erts/emulator/beam/erl_threads.h
index 35b338c6eb..0b7269262e 100644
--- a/erts/emulator/beam/erl_threads.h
+++ b/erts/emulator/beam/erl_threads.h
@@ -25,6 +25,11 @@
#ifndef ERL_THREAD_H__
#define ERL_THREAD_H__
+#define ERTS_SPIN_BODY ETHR_SPIN_BODY
+
+#define ERTS_MAX_READER_GROUPS 8
+extern int erts_reader_groups;
+
#include "sys.h"
#ifdef USE_THREADS
@@ -48,6 +53,7 @@
#define ERTS_THR_OPTS_DEFAULT_INITER ETHR_THR_OPTS_DEFAULT_INITER
typedef ethr_thr_opts erts_thr_opts_t;
typedef ethr_init_data erts_thr_init_data_t;
+typedef ethr_late_init_data erts_thr_late_init_data_t;
typedef ethr_tid erts_tid_t;
/* mutex */
@@ -73,8 +79,19 @@ typedef struct {
erts_lcnt_lock_t lcnt;
#endif
} erts_rwmtx_t;
+
+#define ERTS_RWMTX_OPT_DEFAULT_INITER ETHR_RWMUTEX_OPT_DEFAULT_INITER
+#define ERTS_RWMTX_TYPE_NORMAL ETHR_RWMUTEX_TYPE_NORMAL
+#define ERTS_RWMTX_TYPE_FREQUENT_READ ETHR_RWMUTEX_TYPE_FREQUENT_READ
+#define ERTS_RWMTX_TYPE_EXTREMELY_FREQUENT_READ \
+ ETHR_RWMUTEX_TYPE_EXTREMELY_FREQUENT_READ
+#define ERTS_RWMTX_LONG_LIVED ETHR_RWMUTEX_LONG_LIVED
+#define ERTS_RWMTX_SHORT_LIVED ETHR_RWMUTEX_SHORT_LIVED
+#define ERTS_RWMTX_UNKNOWN_LIVED ETHR_RWMUTEX_UNKNOWN_LIVED
+typedef ethr_rwmutex_opt erts_rwmtx_opt_t;
+
typedef ethr_tsd_key erts_tsd_key_t;
-typedef ethr_gate erts_gate_t;
+typedef ethr_ts_event erts_tse_t;
typedef ethr_atomic_t erts_atomic_t;
/* spinlock */
@@ -103,25 +120,14 @@ typedef ethr_timeval erts_thr_timeval_t;
__decl_noreturn void __noreturn erts_thr_fatal_error(int, char *);
/* implemented in erl_init.c */
-#ifdef ERTS_ENABLE_LOCK_CHECK
-#define ERTS_REC_MTX_INITER \
- {ETHR_REC_MUTEX_INITER, \
- ERTS_LC_LOCK_INIT(-1,THE_NON_VALUE,ERTS_LC_FLG_LT_MUTEX)}
-#define ERTS_MTX_INITER \
- {ETHR_MUTEX_INITER, \
- ERTS_LC_LOCK_INIT(-1, THE_NON_VALUE, ERTS_LC_FLG_LT_MUTEX)}
-#else
-#define ERTS_REC_MTX_INITER {ETHR_REC_MUTEX_INITER}
-#define ERTS_MTX_INITER {ETHR_MUTEX_INITER}
-#endif
-#define ERTS_CND_INITER ETHR_COND_INITER
#define ERTS_THR_INIT_DATA_DEF_INITER ETHR_INIT_DATA_DEFAULT_INITER
+#define ERTS_THR_LATE_INIT_DATA_DEF_INITER \
+ ETHR_LATE_INIT_DATA_DEFAULT_INITER
#ifdef ETHR_HAVE_ETHR_REC_MUTEX_INIT
# define ERTS_HAVE_REC_MTX_INIT ETHR_HAVE_ETHR_REC_MUTEX_INIT
#endif
-
#else /* #ifdef USE_THREADS */
#define ERTS_THR_MEMORY_BARRIER
@@ -129,12 +135,26 @@ __decl_noreturn void __noreturn erts_thr_fatal_error(int, char *);
#define ERTS_THR_OPTS_DEFAULT_INITER 0
typedef int erts_thr_opts_t;
typedef int erts_thr_init_data_t;
+typedef int erts_thr_late_init_data_t;
typedef int erts_tid_t;
typedef int erts_mtx_t;
typedef int erts_cnd_t;
+#define ERTS_RWMTX_OPT_DEFAULT_INITER {0}
+#define ERTS_RWMTX_TYPE_NORMAL 0
+#define ERTS_RWMTX_TYPE_FREQUENT_READ 0
+#define ERTS_RWMTX_TYPE_EXTREMELY_FREQUENT_READ 0
+#define ERTS_RWMTX_LONG_LIVED 0
+#define ERTS_RWMTX_SHORT_LIVED 0
+#define ERTS_RWMTX_UNKNOWN_LIVED 0
+typedef struct {
+ char type;
+ char lived;
+ int main_spincount;
+ int aux_spincount;
+} erts_rwmtx_opt_t;
typedef int erts_rwmtx_t;
typedef int erts_tsd_key_t;
-typedef int erts_gate_t;
+typedef int erts_tse_t;
typedef long erts_atomic_t;
#if __GNUC__ > 2
typedef struct { } erts_spinlock_t;
@@ -148,7 +168,6 @@ typedef struct {
long tv_nsec;
} erts_thr_timeval_t;
-#define ERTS_REC_MTX_INITER 0
#define ERTS_MTX_INITER 0
#define ERTS_CND_INITER 0
#define ERTS_THR_INIT_DATA_DEF_INITER 0
@@ -158,6 +177,7 @@ typedef struct {
#endif /* #ifdef USE_THREADS */
ERTS_GLB_INLINE void erts_thr_init(erts_thr_init_data_t *id);
+ERTS_GLB_INLINE void erts_thr_late_init(erts_thr_late_init_data_t *id);
ERTS_GLB_INLINE void erts_thr_create(erts_tid_t *tid, void * (*func)(void *),
void *arg, erts_thr_opts_t *opts);
ERTS_GLB_INLINE void erts_thr_join(erts_tid_t tid, void **thr_res);
@@ -166,9 +186,6 @@ ERTS_GLB_INLINE void erts_thr_exit(void *res);
ERTS_GLB_INLINE void erts_thr_install_exit_handler(void (*exit_handler)(void));
ERTS_GLB_INLINE erts_tid_t erts_thr_self(void);
ERTS_GLB_INLINE int erts_equal_tids(erts_tid_t x, erts_tid_t y);
-#ifdef ERTS_HAVE_REC_MTX_INIT
-ERTS_GLB_INLINE void erts_rec_mtx_init(erts_mtx_t *mtx);
-#endif
ERTS_GLB_INLINE void erts_mtx_init_x(erts_mtx_t *mtx, char *name, Eterm extra);
ERTS_GLB_INLINE void erts_mtx_init_x_opt(erts_mtx_t *mtx, char *name, Eterm extra, Uint16 opt);
ERTS_GLB_INLINE void erts_mtx_init_locked_x(erts_mtx_t *mtx,
@@ -177,8 +194,6 @@ ERTS_GLB_INLINE void erts_mtx_init_locked_x(erts_mtx_t *mtx,
ERTS_GLB_INLINE void erts_mtx_init(erts_mtx_t *mtx, char *name);
ERTS_GLB_INLINE void erts_mtx_init_locked(erts_mtx_t *mtx, char *name);
ERTS_GLB_INLINE void erts_mtx_destroy(erts_mtx_t *mtx);
-ERTS_GLB_INLINE void erts_mtx_set_forksafe(erts_mtx_t *mtx);
-ERTS_GLB_INLINE void erts_mtx_unset_forksafe(erts_mtx_t *mtx);
ERTS_GLB_INLINE int erts_mtx_trylock(erts_mtx_t *mtx);
#ifdef ERTS_ENABLE_LOCK_COUNT
ERTS_GLB_INLINE void erts_mtx_lock_x(erts_mtx_t *mtx, char *file, unsigned int line);
@@ -192,9 +207,17 @@ ERTS_GLB_INLINE void erts_cnd_destroy(erts_cnd_t *cnd);
ERTS_GLB_INLINE void erts_cnd_wait(erts_cnd_t *cnd, erts_mtx_t *mtx);
ERTS_GLB_INLINE void erts_cnd_signal(erts_cnd_t *cnd);
ERTS_GLB_INLINE void erts_cnd_broadcast(erts_cnd_t *cnd);
+ERTS_GLB_INLINE void erts_rwmtx_set_reader_group(int no);
+ERTS_GLB_INLINE void erts_rwmtx_init_opt_x(erts_rwmtx_t *rwmtx,
+ erts_rwmtx_opt_t *opt,
+ char *name,
+ Eterm extra);
ERTS_GLB_INLINE void erts_rwmtx_init_x(erts_rwmtx_t *rwmtx,
char *name,
Eterm extra);
+ERTS_GLB_INLINE void erts_rwmtx_init_opt(erts_rwmtx_t *rwmtx,
+ erts_rwmtx_opt_t *opt,
+ char *name);
ERTS_GLB_INLINE void erts_rwmtx_init(erts_rwmtx_t *rwmtx,
char *name);
ERTS_GLB_INLINE void erts_rwmtx_destroy(erts_rwmtx_t *rwmtx);
@@ -228,6 +251,20 @@ ERTS_GLB_INLINE long erts_atomic_cmpxchg(erts_atomic_t *xchgp,
long expected);
ERTS_GLB_INLINE long erts_atomic_bor(erts_atomic_t *var, long mask);
ERTS_GLB_INLINE long erts_atomic_band(erts_atomic_t *var, long mask);
+ERTS_GLB_INLINE long erts_atomic_read_acqb(erts_atomic_t *var);
+ERTS_GLB_INLINE void erts_atomic_set_relb(erts_atomic_t *var, long i);
+ERTS_GLB_INLINE void erts_atomic_dec_relb(erts_atomic_t *decp);
+ERTS_GLB_INLINE long erts_atomic_dectest_relb(erts_atomic_t *decp);
+ERTS_GLB_INLINE long erts_atomic_cmpxchg_acqb(erts_atomic_t *xchgp,
+ long new,
+ long exp);
+ERTS_GLB_INLINE long erts_atomic_cmpxchg_relb(erts_atomic_t *xchgp,
+ long new,
+ long exp);
+ERTS_GLB_INLINE void erts_spinlock_init_x_opt(erts_spinlock_t *lock,
+ char *name,
+ Eterm extra,
+ Uint16 opt);
ERTS_GLB_INLINE void erts_spinlock_init_x(erts_spinlock_t *lock,
char *name,
Eterm extra);
@@ -263,12 +300,16 @@ ERTS_GLB_INLINE void erts_tsd_key_create(erts_tsd_key_t *keyp);
ERTS_GLB_INLINE void erts_tsd_key_delete(erts_tsd_key_t key);
ERTS_GLB_INLINE void erts_tsd_set(erts_tsd_key_t key, void *value);
ERTS_GLB_INLINE void * erts_tsd_get(erts_tsd_key_t key);
-ERTS_GLB_INLINE void erts_gate_init(erts_gate_t *gp);
-ERTS_GLB_INLINE void erts_gate_destroy(erts_gate_t *gp);
-ERTS_GLB_INLINE void erts_gate_close(erts_gate_t *gp);
-ERTS_GLB_INLINE void erts_gate_let_through(erts_gate_t *gp, unsigned no);
-ERTS_GLB_INLINE void erts_gate_wait(erts_gate_t *gp);
-ERTS_GLB_INLINE void erts_gate_swait(erts_gate_t *gp, int spincount);
+ERTS_GLB_INLINE erts_tse_t *erts_tse_fetch(void);
+ERTS_GLB_INLINE void erts_tse_return(erts_tse_t *ep);
+ERTS_GLB_INLINE void erts_tse_set(erts_tse_t *ep);
+ERTS_GLB_INLINE void erts_tse_reset(erts_tse_t *ep);
+ERTS_GLB_INLINE int erts_tse_wait(erts_tse_t *ep);
+ERTS_GLB_INLINE int erts_tse_swait(erts_tse_t *ep, int spincount);
+ERTS_GLB_INLINE int erts_tse_is_tmp(erts_tse_t *ep);
+ERTS_GLB_INLINE void erts_thr_set_main_status(int, int);
+ERTS_GLB_INLINE int erts_thr_get_main_status(void);
+ERTS_GLB_INLINE void erts_thr_yield(void);
#ifdef ETHR_HAVE_ETHR_SIG_FUNCS
#define ERTS_THR_HAVE_SIG_FUNCS 1
@@ -290,6 +331,16 @@ erts_thr_init(erts_thr_init_data_t *id)
}
ERTS_GLB_INLINE void
+erts_thr_late_init(erts_thr_late_init_data_t *id)
+{
+#ifdef USE_THREADS
+ int res = ethr_late_init(id);
+ if (res)
+ erts_thr_fatal_error(res, "complete initialization of thread library");
+#endif
+}
+
+ERTS_GLB_INLINE void
erts_thr_create(erts_tid_t *tid, void * (*func)(void *), void *arg,
erts_thr_opts_t *opts)
{
@@ -362,20 +413,6 @@ erts_equal_tids(erts_tid_t x, erts_tid_t y)
#endif
}
-
-#ifdef ERTS_HAVE_REC_MTX_INIT
-ERTS_GLB_INLINE void
-erts_rec_mtx_init(erts_mtx_t *mtx)
-{
-#ifdef USE_THREADS
- int res = ethr_rec_mutex_init(&mtx->mtx);
- if (res)
- erts_thr_fatal_error(res, "initialize recursive mutex");
-#endif
-}
-#endif
-
-
ERTS_GLB_INLINE void
erts_mtx_init_x(erts_mtx_t *mtx, char *name, Eterm extra)
{
@@ -422,9 +459,7 @@ erts_mtx_init_locked_x(erts_mtx_t *mtx, char *name, Eterm extra)
#ifdef ERTS_ENABLE_LOCK_COUNT
erts_lcnt_init_lock_x(&mtx->lcnt, name, ERTS_LCNT_LT_MUTEX, extra);
#endif
- res = ethr_mutex_lock(&mtx->mtx);
- if (res)
- erts_thr_fatal_error(res, "lock mutex");
+ ethr_mutex_lock(&mtx->mtx);
#ifdef ERTS_ENABLE_LOCK_CHECK
erts_lc_trylock(1, &mtx->lc);
#endif
@@ -463,9 +498,7 @@ erts_mtx_init_locked(erts_mtx_t *mtx, char *name)
#ifdef ERTS_ENABLE_LOCK_COUNT
erts_lcnt_init_lock(&mtx->lcnt, name, ERTS_LCNT_LT_MUTEX);
#endif
- res = ethr_mutex_lock(&mtx->mtx);
- if (res)
- erts_thr_fatal_error(res, "lock mutex");
+ ethr_mutex_lock(&mtx->mtx);
#ifdef ERTS_ENABLE_LOCK_CHECK
erts_lc_trylock(1, &mtx->lc);
#endif
@@ -492,26 +525,6 @@ erts_mtx_destroy(erts_mtx_t *mtx)
#endif
}
-ERTS_GLB_INLINE void
-erts_mtx_set_forksafe(erts_mtx_t *mtx)
-{
-#ifdef USE_THREADS
- int res = ethr_mutex_set_forksafe(&mtx->mtx);
- if (res != 0 && res != ENOTSUP)
- erts_thr_fatal_error(res, "set mutex forksafe");
-#endif
-}
-
-ERTS_GLB_INLINE void
-erts_mtx_unset_forksafe(erts_mtx_t *mtx)
-{
-#ifdef USE_THREADS
- int res = ethr_mutex_unset_forksafe(&mtx->mtx);
- if (res != 0 && res != ENOTSUP)
- erts_thr_fatal_error(res, "unset mutex forksafe");
-#endif
-}
-
ERTS_GLB_INLINE int
erts_mtx_trylock(erts_mtx_t *mtx)
{
@@ -531,11 +544,7 @@ erts_mtx_trylock(erts_mtx_t *mtx)
#endif
#ifdef ERTS_ENABLE_LOCK_COUNT
erts_lcnt_trylock(&mtx->lcnt, res);
-#endif
-
- if (res != 0 && res != EBUSY)
- erts_thr_fatal_error(res, "try lock mutex");
-
+#endif
return res;
#else
return 0;
@@ -551,19 +560,16 @@ erts_mtx_lock(erts_mtx_t *mtx)
#endif
{
#ifdef USE_THREADS
- int res;
#ifdef ERTS_ENABLE_LOCK_CHECK
erts_lc_lock(&mtx->lc);
#endif
#ifdef ERTS_ENABLE_LOCK_COUNT
erts_lcnt_lock(&mtx->lcnt);
#endif
- res = ethr_mutex_lock(&mtx->mtx);
+ ethr_mutex_lock(&mtx->mtx);
#ifdef ERTS_ENABLE_LOCK_COUNT
erts_lcnt_lock_post_x(&mtx->lcnt, file, line);
#endif
- if (res)
- erts_thr_fatal_error(res, "lock mutex");
#endif
}
@@ -571,16 +577,13 @@ ERTS_GLB_INLINE void
erts_mtx_unlock(erts_mtx_t *mtx)
{
#ifdef USE_THREADS
- int res;
#ifdef ERTS_ENABLE_LOCK_CHECK
erts_lc_unlock(&mtx->lc);
#endif
#ifdef ERTS_ENABLE_LOCK_COUNT
erts_lcnt_unlock(&mtx->lcnt);
#endif
- res = ethr_mutex_unlock(&mtx->mtx);
- if (res)
- erts_thr_fatal_error(res, "unlock mutex");
+ ethr_mutex_unlock(&mtx->mtx);
#endif
}
@@ -648,9 +651,7 @@ ERTS_GLB_INLINE void
erts_cnd_signal(erts_cnd_t *cnd)
{
#ifdef USE_THREADS
- int res = ethr_cond_signal(cnd);
- if (res)
- erts_thr_fatal_error(res, "signal on condition variable");
+ ethr_cond_signal(cnd);
#endif
}
@@ -659,19 +660,34 @@ ERTS_GLB_INLINE void
erts_cnd_broadcast(erts_cnd_t *cnd)
{
#ifdef USE_THREADS
- int res = ethr_cond_broadcast(cnd);
- if (res)
- erts_thr_fatal_error(res, "broadcast on condition variable");
+ ethr_cond_broadcast(cnd);
#endif
}
/* rwmutex */
ERTS_GLB_INLINE void
-erts_rwmtx_init_x(erts_rwmtx_t *rwmtx, char *name, Eterm extra)
+erts_rwmtx_set_reader_group(int no)
{
#ifdef USE_THREADS
- int res = ethr_rwmutex_init(&rwmtx->rwmtx);
+ int res;
+#ifdef ERTS_ENABLE_LOCK_CHECK
+ erts_lc_check_no_locked_of_type(ERTS_LC_FLG_LT_RWMUTEX);
+#endif
+ res = ethr_rwmutex_set_reader_group(no);
+ if (res != 0)
+ erts_thr_fatal_error(res, "set reader group");
+#endif
+}
+
+ERTS_GLB_INLINE void
+erts_rwmtx_init_opt_x(erts_rwmtx_t *rwmtx,
+ erts_rwmtx_opt_t *opt,
+ char *name,
+ Eterm extra)
+{
+#ifdef USE_THREADS
+ int res = ethr_rwmutex_init_opt(&rwmtx->rwmtx, opt);
if (res != 0)
erts_thr_fatal_error(res, "initialize rwmutex");
#ifdef ERTS_ENABLE_LOCK_CHECK
@@ -684,10 +700,20 @@ erts_rwmtx_init_x(erts_rwmtx_t *rwmtx, char *name, Eterm extra)
}
ERTS_GLB_INLINE void
-erts_rwmtx_init(erts_rwmtx_t *rwmtx, char *name)
+erts_rwmtx_init_x(erts_rwmtx_t *rwmtx,
+ char *name,
+ Eterm extra)
+{
+ erts_rwmtx_init_opt_x(rwmtx, NULL, name, extra);
+}
+
+ERTS_GLB_INLINE void
+erts_rwmtx_init_opt(erts_rwmtx_t *rwmtx,
+ erts_rwmtx_opt_t *opt,
+ char *name)
{
#ifdef USE_THREADS
- int res = ethr_rwmutex_init(&rwmtx->rwmtx);
+ int res = ethr_rwmutex_init_opt(&rwmtx->rwmtx, opt);
if (res != 0)
erts_thr_fatal_error(res, "initialize rwmutex");
#ifdef ERTS_ENABLE_LOCK_CHECK
@@ -700,6 +726,12 @@ erts_rwmtx_init(erts_rwmtx_t *rwmtx, char *name)
}
ERTS_GLB_INLINE void
+erts_rwmtx_init(erts_rwmtx_t *rwmtx, char *name)
+{
+ erts_rwmtx_init_opt(rwmtx, NULL, name);
+}
+
+ERTS_GLB_INLINE void
erts_rwmtx_destroy(erts_rwmtx_t *rwmtx)
{
#ifdef USE_THREADS
@@ -736,9 +768,6 @@ erts_rwmtx_tryrlock(erts_rwmtx_t *rwmtx)
#ifdef ERTS_ENABLE_LOCK_COUNT
erts_lcnt_trylock_opt(&rwmtx->lcnt, res, ERTS_LCNT_LO_READ);
#endif
-
- if (res != 0 && res != EBUSY)
- erts_thr_fatal_error(res, "try read lock rwmutex");
return res;
#else
@@ -754,19 +783,16 @@ erts_rwmtx_rlock(erts_rwmtx_t *rwmtx)
#endif
{
#ifdef USE_THREADS
- int res;
#ifdef ERTS_ENABLE_LOCK_CHECK
erts_lc_lock_flg(&rwmtx->lc, ERTS_LC_FLG_LO_READ);
#endif
#ifdef ERTS_ENABLE_LOCK_COUNT
erts_lcnt_lock_opt(&rwmtx->lcnt, ERTS_LCNT_LO_READ);
#endif
- res = ethr_rwmutex_rlock(&rwmtx->rwmtx);
+ ethr_rwmutex_rlock(&rwmtx->rwmtx);
#ifdef ERTS_ENABLE_LOCK_COUNT
erts_lcnt_lock_post_x(&rwmtx->lcnt, file, line);
#endif
- if (res != 0)
- erts_thr_fatal_error(res, "read lock rwmutex");
#endif
}
@@ -774,16 +800,13 @@ ERTS_GLB_INLINE void
erts_rwmtx_runlock(erts_rwmtx_t *rwmtx)
{
#ifdef USE_THREADS
- int res;
#ifdef ERTS_ENABLE_LOCK_CHECK
erts_lc_unlock_flg(&rwmtx->lc, ERTS_LC_FLG_LO_READ);
#endif
#ifdef ERTS_ENABLE_LOCK_COUNT
erts_lcnt_unlock_opt(&rwmtx->lcnt, ERTS_LCNT_LO_READ);
#endif
- res = ethr_rwmutex_runlock(&rwmtx->rwmtx);
- if (res != 0)
- erts_thr_fatal_error(res, "read unlock rwmutex");
+ ethr_rwmutex_runlock(&rwmtx->rwmtx);
#endif
}
@@ -808,9 +831,6 @@ erts_rwmtx_tryrwlock(erts_rwmtx_t *rwmtx)
#ifdef ERTS_ENABLE_LOCK_COUNT
erts_lcnt_trylock_opt(&rwmtx->lcnt, res, ERTS_LCNT_LO_READ_WRITE);
#endif
-
- if (res != 0 && res != EBUSY)
- erts_thr_fatal_error(res, "try write lock rwmutex");
return res;
#else
@@ -826,19 +846,16 @@ erts_rwmtx_rwlock(erts_rwmtx_t *rwmtx)
#endif
{
#ifdef USE_THREADS
- int res;
#ifdef ERTS_ENABLE_LOCK_CHECK
erts_lc_lock_flg(&rwmtx->lc, ERTS_LC_FLG_LO_READ_WRITE);
#endif
#ifdef ERTS_ENABLE_LOCK_COUNT
erts_lcnt_lock_opt(&rwmtx->lcnt, ERTS_LCNT_LO_READ_WRITE);
#endif
- res = ethr_rwmutex_rwlock(&rwmtx->rwmtx);
+ ethr_rwmutex_rwlock(&rwmtx->rwmtx);
#ifdef ERTS_ENABLE_LOCK_COUNT
erts_lcnt_lock_post_x(&rwmtx->lcnt, file, line);
#endif
- if (res != 0)
- erts_thr_fatal_error(res, "write lock rwmutex");
#endif
}
@@ -846,16 +863,13 @@ ERTS_GLB_INLINE void
erts_rwmtx_rwunlock(erts_rwmtx_t *rwmtx)
{
#ifdef USE_THREADS
- int res;
#ifdef ERTS_ENABLE_LOCK_CHECK
erts_lc_unlock_flg(&rwmtx->lc, ERTS_LC_FLG_LO_READ_WRITE);
#endif
#ifdef ERTS_ENABLE_LOCK_COUNT
erts_lcnt_unlock_opt(&rwmtx->lcnt, ERTS_LCNT_LO_READ_WRITE);
#endif
- res = ethr_rwmutex_rwunlock(&rwmtx->rwmtx);
- if (res != 0)
- erts_thr_fatal_error(res, "write unlock rwmutex");
+ ethr_rwmutex_rwunlock(&rwmtx->rwmtx);
#endif
}
@@ -917,9 +931,7 @@ ERTS_GLB_INLINE void
erts_atomic_init(erts_atomic_t *var, long i)
{
#ifdef USE_THREADS
- int res = ethr_atomic_init(var, i);
- if (res)
- erts_thr_fatal_error(res, "perform atomic init");
+ ethr_atomic_init(var, i);
#else
*var = i;
#endif
@@ -929,9 +941,7 @@ ERTS_GLB_INLINE void
erts_atomic_set(erts_atomic_t *var, long i)
{
#ifdef USE_THREADS
- int res = ethr_atomic_set(var, i);
- if (res)
- erts_thr_fatal_error(res, "perform atomic set");
+ ethr_atomic_set(var, i);
#else
*var = i;
#endif
@@ -941,11 +951,7 @@ ERTS_GLB_INLINE long
erts_atomic_read(erts_atomic_t *var)
{
#ifdef USE_THREADS
- long i;
- int res = ethr_atomic_read(var, &i);
- if (res)
- erts_thr_fatal_error(res, "perform atomic read");
- return i;
+ return ethr_atomic_read(var);
#else
return *var;
#endif
@@ -955,11 +961,7 @@ ERTS_GLB_INLINE long
erts_atomic_inctest(erts_atomic_t *incp)
{
#ifdef USE_THREADS
- long test;
- int res = ethr_atomic_inctest(incp, &test);
- if (res)
- erts_thr_fatal_error(res, "perform atomic increment and test");
- return test;
+ return ethr_atomic_inc_read(incp);
#else
return ++(*incp);
#endif
@@ -969,11 +971,7 @@ ERTS_GLB_INLINE long
erts_atomic_dectest(erts_atomic_t *decp)
{
#ifdef USE_THREADS
- long test;
- int res = ethr_atomic_dectest(decp, &test);
- if (res)
- erts_thr_fatal_error(res, "perform atomic decrement and test");
- return test;
+ return ethr_atomic_dec_read(decp);
#else
return --(*decp);
#endif
@@ -983,9 +981,7 @@ ERTS_GLB_INLINE void
erts_atomic_inc(erts_atomic_t *incp)
{
#ifdef USE_THREADS
- int res = ethr_atomic_inc(incp);
- if (res)
- erts_thr_fatal_error(res, "perform atomic increment");
+ ethr_atomic_inc(incp);
#else
++(*incp);
#endif
@@ -995,9 +991,7 @@ ERTS_GLB_INLINE void
erts_atomic_dec(erts_atomic_t *decp)
{
#ifdef USE_THREADS
- int res = ethr_atomic_dec(decp);
- if (res)
- erts_thr_fatal_error(res, "perform atomic decrement");
+ ethr_atomic_dec(decp);
#else
--(*decp);
#endif
@@ -1007,11 +1001,7 @@ ERTS_GLB_INLINE long
erts_atomic_addtest(erts_atomic_t *addp, long i)
{
#ifdef USE_THREADS
- long test;
- int res = ethr_atomic_addtest(addp, i, &test);
- if (res)
- erts_thr_fatal_error(res, "perform atomic addition and test");
- return test;
+ return ethr_atomic_add_read(addp, i);
#else
return *addp += i;
#endif
@@ -1021,9 +1011,7 @@ ERTS_GLB_INLINE void
erts_atomic_add(erts_atomic_t *addp, long i)
{
#ifdef USE_THREADS
- int res = ethr_atomic_add(addp, i);
- if (res)
- erts_thr_fatal_error(res, "perform atomic addition");
+ ethr_atomic_add(addp, i);
#else
*addp += i;
#endif
@@ -1034,9 +1022,7 @@ erts_atomic_xchg(erts_atomic_t *xchgp, long new)
{
long old;
#ifdef USE_THREADS
- int res = ethr_atomic_xchg(xchgp, new, &old);
- if (res)
- erts_thr_fatal_error(res, "perform atomic exchange");
+ return ethr_atomic_xchg(xchgp, new);
#else
old = *xchgp;
*xchgp = new;
@@ -1048,11 +1034,7 @@ ERTS_GLB_INLINE long
erts_atomic_cmpxchg(erts_atomic_t *xchgp, long new, long expected)
{
#ifdef USE_THREADS
- long old;
- int res = ethr_atomic_cmpxchg(xchgp, new, expected, &old);
- if (ERTS_UNLIKELY(res != 0))
- erts_thr_fatal_error(res, "perform atomic exchange");
- return old;
+ return ethr_atomic_cmpxchg(xchgp, new, expected);
#else
long old = *xchgp;
if (old == expected)
@@ -1064,31 +1046,95 @@ erts_atomic_cmpxchg(erts_atomic_t *xchgp, long new, long expected)
ERTS_GLB_INLINE long
erts_atomic_bor(erts_atomic_t *var, long mask)
{
- long old;
#ifdef USE_THREADS
- int res = ethr_atomic_or_old(var, mask, &old);
- if (res != 0)
- erts_thr_fatal_error(res, "perform atomic bitwise or");
+ return ethr_atomic_read_bor(var, mask);
#else
+ long old;
old = *var;
*var |= mask;
-#endif
return old;
+#endif
}
ERTS_GLB_INLINE long
erts_atomic_band(erts_atomic_t *var, long mask)
{
- long old;
#ifdef USE_THREADS
- int res = ethr_atomic_and_old(var, mask, &old);
- if (res != 0)
- erts_thr_fatal_error(res, "perform atomic bitwise and");
+ return ethr_atomic_read_band(var, mask);
#else
+ long old;
old = *var;
*var &= mask;
+ return old;
+#endif
+}
+
+ERTS_GLB_INLINE long
+erts_atomic_read_acqb(erts_atomic_t *var)
+{
+#ifdef USE_THREADS
+ return ethr_atomic_read_acqb(var);
+#else
+ return *var;
+#endif
+}
+
+ERTS_GLB_INLINE void
+erts_atomic_set_relb(erts_atomic_t *var, long i)
+{
+#ifdef USE_THREADS
+ ethr_atomic_set_relb(var, i);
+#else
+ *var = i;
+#endif
+}
+
+ERTS_GLB_INLINE void
+erts_atomic_dec_relb(erts_atomic_t *decp)
+{
+#ifdef USE_THREADS
+ ethr_atomic_dec_relb(decp);
+#else
+ --(*decp);
+#endif
+}
+
+ERTS_GLB_INLINE long
+erts_atomic_dectest_relb(erts_atomic_t *decp)
+{
+#ifdef USE_THREADS
+ return ethr_atomic_dec_read_relb(decp);
+#else
+ return --(*decp);
#endif
+}
+
+ERTS_GLB_INLINE long erts_atomic_cmpxchg_acqb(erts_atomic_t *xchgp,
+ long new,
+ long exp)
+{
+#ifdef USE_THREADS
+ return ethr_atomic_cmpxchg_acqb(xchgp, new, exp);
+#else
+ long old = *xchgp;
+ if (old == exp)
+ *xchgp = new;
+ return old;
+#endif
+}
+
+ERTS_GLB_INLINE long erts_atomic_cmpxchg_relb(erts_atomic_t *xchgp,
+ long new,
+ long exp)
+{
+#ifdef USE_THREADS
+ return ethr_atomic_cmpxchg_relb(xchgp, new, exp);
+#else
+ long old = *xchgp;
+ if (old == exp)
+ *xchgp = new;
return old;
+#endif
}
/* spinlock */
@@ -1112,6 +1158,26 @@ erts_spinlock_init_x(erts_spinlock_t *lock, char *name, Eterm extra)
}
ERTS_GLB_INLINE void
+erts_spinlock_init_x_opt(erts_spinlock_t *lock, char *name, Eterm extra,
+ Uint16 opt)
+{
+#ifdef USE_THREADS
+ int res = ethr_spinlock_init(&lock->slck);
+ if (res)
+ erts_thr_fatal_error(res, "init spinlock");
+#ifdef ERTS_ENABLE_LOCK_CHECK
+ erts_lc_init_lock_x(&lock->lc, name, ERTS_LC_FLG_LT_SPINLOCK, extra);
+#endif
+#ifdef ERTS_ENABLE_LOCK_COUNT
+ erts_lcnt_init_lock_x(&lock->lcnt, name, ERTS_LCNT_LT_SPINLOCK|opt, extra);
+#endif
+#else
+ (void)lock;
+#endif
+}
+
+
+ERTS_GLB_INLINE void
erts_spinlock_init(erts_spinlock_t *lock, char *name)
{
#ifdef USE_THREADS
@@ -1152,16 +1218,13 @@ ERTS_GLB_INLINE void
erts_spin_unlock(erts_spinlock_t *lock)
{
#ifdef USE_THREADS
- int res;
#ifdef ERTS_ENABLE_LOCK_CHECK
erts_lc_unlock(&lock->lc);
#endif
#ifdef ERTS_ENABLE_LOCK_COUNT
erts_lcnt_unlock(&lock->lcnt);
#endif
- res = ethr_spin_unlock(&lock->slck);
- if (res)
- erts_thr_fatal_error(res, "release spin lock");
+ ethr_spin_unlock(&lock->slck);
#else
(void)lock;
#endif
@@ -1175,19 +1238,16 @@ erts_spin_lock(erts_spinlock_t *lock)
#endif
{
#ifdef USE_THREADS
- int res;
#ifdef ERTS_ENABLE_LOCK_CHECK
erts_lc_lock(&lock->lc);
#endif
#ifdef ERTS_ENABLE_LOCK_COUNT
erts_lcnt_lock(&lock->lcnt);
#endif
- res = ethr_spin_lock(&lock->slck);
+ ethr_spin_lock(&lock->slck);
#ifdef ERTS_ENABLE_LOCK_COUNT
erts_lcnt_lock_post_x(&lock->lcnt, file, line);
#endif
- if (res)
- erts_thr_fatal_error(res, "take spin lock");
#else
(void)lock;
#endif
@@ -1268,16 +1328,13 @@ ERTS_GLB_INLINE void
erts_read_unlock(erts_rwlock_t *lock)
{
#ifdef USE_THREADS
- int res;
#ifdef ERTS_ENABLE_LOCK_CHECK
erts_lc_unlock_flg(&lock->lc, ERTS_LC_FLG_LO_READ);
#endif
#ifdef ERTS_ENABLE_LOCK_COUNT
erts_lcnt_unlock_opt(&lock->lcnt, ERTS_LCNT_LO_READ);
#endif
- res = ethr_read_unlock(&lock->rwlck);
- if (res)
- erts_thr_fatal_error(res, "release read lock");
+ ethr_read_unlock(&lock->rwlck);
#else
(void)lock;
#endif
@@ -1291,19 +1348,16 @@ erts_read_lock(erts_rwlock_t *lock)
#endif
{
#ifdef USE_THREADS
- int res;
#ifdef ERTS_ENABLE_LOCK_CHECK
erts_lc_lock_flg(&lock->lc, ERTS_LC_FLG_LO_READ);
#endif
#ifdef ERTS_ENABLE_LOCK_COUNT
erts_lcnt_lock_opt(&lock->lcnt, ERTS_LCNT_LO_READ);
#endif
- res = ethr_read_lock(&lock->rwlck);
+ ethr_read_lock(&lock->rwlck);
#ifdef ERTS_ENABLE_LOCK_COUNT
erts_lcnt_lock_post_x(&lock->lcnt, file, line);
#endif
- if (res)
- erts_thr_fatal_error(res, "take read lock");
#else
(void)lock;
#endif
@@ -1313,16 +1367,13 @@ ERTS_GLB_INLINE void
erts_write_unlock(erts_rwlock_t *lock)
{
#ifdef USE_THREADS
- int res;
#ifdef ERTS_ENABLE_LOCK_CHECK
erts_lc_unlock_flg(&lock->lc, ERTS_LC_FLG_LO_READ_WRITE);
#endif
#ifdef ERTS_ENABLE_LOCK_COUNT
erts_lcnt_unlock_opt(&lock->lcnt, ERTS_LCNT_LO_READ_WRITE);
#endif
- res = ethr_write_unlock(&lock->rwlck);
- if (res)
- erts_thr_fatal_error(res, "release write lock");
+ ethr_write_unlock(&lock->rwlck);
#else
(void)lock;
#endif
@@ -1336,19 +1387,16 @@ erts_write_lock(erts_rwlock_t *lock)
#endif
{
#ifdef USE_THREADS
- int res;
#ifdef ERTS_ENABLE_LOCK_CHECK
erts_lc_lock_flg(&lock->lc, ERTS_LC_FLG_LO_READ_WRITE);
#endif
#ifdef ERTS_ENABLE_LOCK_COUNT
erts_lcnt_lock_opt(&lock->lcnt, ERTS_LCNT_LO_READ_WRITE);
#endif
- res = ethr_write_lock(&lock->rwlck);
+ ethr_write_lock(&lock->rwlck);
#ifdef ERTS_ENABLE_LOCK_COUNT
erts_lcnt_lock_post_x(&lock->lcnt, file, line);
#endif
- if (res)
- erts_thr_fatal_error(res, "take write lock");
#else
(void)lock;
#endif
@@ -1432,66 +1480,95 @@ erts_tsd_get(erts_tsd_key_t key)
#endif
}
-ERTS_GLB_INLINE void
-erts_gate_init(erts_gate_t *gp)
+ERTS_GLB_INLINE erts_tse_t *erts_tse_fetch(void)
{
#ifdef USE_THREADS
- int res = ethr_gate_init((ethr_gate *) gp);
- if (res != 0)
- erts_thr_fatal_error(res, "initialize gate");
+ return (erts_tse_t *) ethr_get_ts_event();
+#else
+ return (erts_tse_t *) NULL;
#endif
}
-ERTS_GLB_INLINE void
-erts_gate_destroy(erts_gate_t *gp)
+ERTS_GLB_INLINE void erts_tse_return(erts_tse_t *ep)
{
#ifdef USE_THREADS
- int res = ethr_gate_destroy((ethr_gate *) gp);
- if (res != 0)
- erts_thr_fatal_error(res, "destroy gate");
+ ethr_leave_ts_event(ep);
#endif
}
-ERTS_GLB_INLINE void
-erts_gate_close(erts_gate_t *gp)
+ERTS_GLB_INLINE void erts_tse_set(erts_tse_t *ep)
{
#ifdef USE_THREADS
- int res = ethr_gate_close((ethr_gate *) gp);
- if (res != 0)
- erts_thr_fatal_error(res, "close gate");
+ ethr_event_set(&((ethr_ts_event *) ep)->event);
#endif
}
-ERTS_GLB_INLINE void
-erts_gate_let_through(erts_gate_t *gp, unsigned no)
+ERTS_GLB_INLINE void erts_tse_reset(erts_tse_t *ep)
{
#ifdef USE_THREADS
- int res = ethr_gate_let_through((ethr_gate *) gp, no);
- if (res != 0)
- erts_thr_fatal_error(res, "let through gate");
+ ethr_event_reset(&((ethr_ts_event *) ep)->event);
#endif
}
-ERTS_GLB_INLINE void
-erts_gate_wait(erts_gate_t *gp)
+ERTS_GLB_INLINE int erts_tse_wait(erts_tse_t *ep)
+{
+#ifdef USE_THREADS
+ return ethr_event_wait(&((ethr_ts_event *) ep)->event);
+#else
+ return ENOTSUP;
+#endif
+}
+
+ERTS_GLB_INLINE int erts_tse_swait(erts_tse_t *ep, int spincount)
+{
+#ifdef USE_THREADS
+ return ethr_event_swait(&((ethr_ts_event *) ep)->event, spincount);
+#else
+ return ENOTSUP;
+#endif
+}
+
+ERTS_GLB_INLINE int erts_tse_is_tmp(erts_tse_t *ep)
{
#ifdef USE_THREADS
- int res = ethr_gate_wait((ethr_gate *) gp);
+ return (ep->iflgs & ETHR_TS_EV_TMP) == ETHR_TS_EV_TMP;
+#else
+ return 0;
+#endif
+}
+
+ERTS_GLB_INLINE void erts_thr_set_main_status(int on, int no)
+{
+#ifdef USE_THREADS
+ int res = ethr_set_main_thr_status(on, no);
if (res != 0)
- erts_thr_fatal_error(res, "wait on gate");
+ erts_thr_fatal_error(res, "set thread main status");
#endif
}
-ERTS_GLB_INLINE void
-erts_gate_swait(erts_gate_t *gp, int spincount)
+ERTS_GLB_INLINE int erts_thr_get_main_status(void)
{
#ifdef USE_THREADS
- int res = ethr_gate_swait((ethr_gate *) gp, spincount);
+ int main_status;
+ int res = ethr_get_main_thr_status(&main_status);
if (res != 0)
- erts_thr_fatal_error(res, "swait on gate");
+ erts_thr_fatal_error(res, "get thread main status");
+ return main_status;
+#else
+ return 1;
#endif
}
+ERTS_GLB_INLINE void erts_thr_yield(void)
+{
+#ifdef USE_THREADS
+ int res = ETHR_YIELD();
+ if (res != 0)
+ erts_thr_fatal_error(res, "yield");
+#endif
+}
+
+
#ifdef ETHR_HAVE_ETHR_SIG_FUNCS
ERTS_GLB_INLINE void
diff --git a/erts/emulator/beam/export.c b/erts/emulator/beam/export.c
index 66b05c0e9d..5e81a2d624 100644
--- a/erts/emulator/beam/export.c
+++ b/erts/emulator/beam/export.c
@@ -43,8 +43,6 @@ static erts_smp_rwmtx_t export_table_lock; /* Locks the secondary export table.
#define export_read_unlock() erts_smp_rwmtx_runlock(&export_table_lock)
#define export_write_lock() erts_smp_rwmtx_rwlock(&export_table_lock)
#define export_write_unlock() erts_smp_rwmtx_rwunlock(&export_table_lock)
-#define export_init_lock() erts_smp_rwmtx_init(&export_table_lock, \
- "export_tab")
extern BeamInstr* em_call_error_handler;
extern BeamInstr* em_call_traced_function;
@@ -111,8 +109,12 @@ void
init_export_table(void)
{
HashFunctions f;
+ erts_smp_rwmtx_opt_t rwmtx_opt = ERTS_SMP_THR_OPTS_DEFAULT_INITER;
+ rwmtx_opt.type = ERTS_SMP_RWMTX_TYPE_FREQUENT_READ;
+ rwmtx_opt.lived = ERTS_SMP_RWMTX_LONG_LIVED;
+
+ erts_smp_rwmtx_init_opt(&export_table_lock, &rwmtx_opt, "export_tab");
- export_init_lock();
f.hash = (H_FUN) export_hash;
f.cmp = (HCMP_FUN) export_cmp;
f.alloc = (HALLOC_FUN) export_alloc;
diff --git a/erts/emulator/beam/register.c b/erts/emulator/beam/register.c
index 900ebcbbf7..c9bb7bbe91 100644
--- a/erts/emulator/beam/register.c
+++ b/erts/emulator/beam/register.c
@@ -39,8 +39,6 @@ static Hash process_reg;
static erts_smp_rwmtx_t regtab_rwmtx;
-#define reg_lock_init() erts_smp_rwmtx_init(&regtab_rwmtx, \
- "reg_tab")
#define reg_try_read_lock() erts_smp_rwmtx_tryrlock(&regtab_rwmtx)
#define reg_try_write_lock() erts_smp_rwmtx_tryrwlock(&regtab_rwmtx)
#define reg_read_lock() erts_smp_rwmtx_rlock(&regtab_rwmtx)
@@ -147,8 +145,11 @@ static void reg_free(RegProc *obj)
void init_register_table(void)
{
HashFunctions f;
+ erts_smp_rwmtx_opt_t rwmtx_opt = ERTS_SMP_THR_OPTS_DEFAULT_INITER;
+ rwmtx_opt.type = ERTS_SMP_RWMTX_TYPE_FREQUENT_READ;
+ rwmtx_opt.lived = ERTS_SMP_RWMTX_LONG_LIVED;
- reg_lock_init();
+ erts_smp_rwmtx_init_opt(&regtab_rwmtx, &rwmtx_opt, "reg_tab");
f.hash = (H_FUN) reg_hash;
f.cmp = (HCMP_FUN) reg_cmp;
diff --git a/erts/emulator/sys/unix/sys.c b/erts/emulator/sys/unix/sys.c
index 737ffd9f94..400ef6c0ce 100644
--- a/erts/emulator/sys/unix/sys.c
+++ b/erts/emulator/sys/unix/sys.c
@@ -384,18 +384,6 @@ MALLOC_USE_HASH(1);
#endif
#ifdef USE_THREADS
-static void *ethr_internal_alloc(size_t size)
-{
- return erts_alloc_fnf(ERTS_ALC_T_ETHR_INTERNAL, (Uint) size);
-}
-static void *ethr_internal_realloc(void *ptr, size_t size)
-{
- return erts_realloc_fnf(ERTS_ALC_T_ETHR_INTERNAL, ptr, (Uint) size);
-}
-static void ethr_internal_free(void *ptr)
-{
- erts_free(ERTS_ALC_T_ETHR_INTERNAL, ptr);
-}
#ifdef ERTS_THR_HAVE_SIG_FUNCS
/*
@@ -488,9 +476,6 @@ erts_sys_pre_init(void)
#ifdef USE_THREADS
{
erts_thr_init_data_t eid = ERTS_THR_INIT_DATA_DEF_INITER;
- eid.alloc = ethr_internal_alloc;
- eid.realloc = ethr_internal_realloc;
- eid.free = ethr_internal_free;
eid.thread_create_child_func = thr_create_prepare_child;
/* Before creation in parent */
@@ -538,13 +523,14 @@ erts_sys_pre_init(void)
#endif
#endif /* USE_THREADS */
erts_smp_atomic_init(&sys_misc_mem_sz, 0);
- erts_smp_rwmtx_init(&environ_rwmtx, "environ");
}
void
erl_sys_init(void)
{
+ erts_smp_rwmtx_init(&environ_rwmtx, "environ");
#if !DISABLE_VFORK
+ {
int res;
char bindir[MAXPATHLEN];
size_t bindirsz = sizeof(bindir);
@@ -574,6 +560,7 @@ erl_sys_init(void)
bindir,
DIR_SEPARATOR_CHAR,
CHILD_SETUP_PROG_NAME);
+ }
#endif
#ifdef USE_SETLINEBUF
diff --git a/erts/emulator/sys/win32/sys.c b/erts/emulator/sys/win32/sys.c
index bd02cf85eb..54f71b202d 100644
--- a/erts/emulator/sys/win32/sys.c
+++ b/erts/emulator/sys/win32/sys.c
@@ -2974,19 +2974,6 @@ check_supported_os_version(void)
}
#ifdef USE_THREADS
-static void *ethr_internal_alloc(size_t size)
-{
- return erts_alloc_fnf(ERTS_ALC_T_ETHR_INTERNAL, (Uint) size);
-}
-static void *ethr_internal_realloc(void *ptr, size_t size)
-{
- return erts_realloc_fnf(ERTS_ALC_T_ETHR_INTERNAL, ptr, (Uint) size);
-}
-static void ethr_internal_free(void *ptr)
-{
- erts_free(ERTS_ALC_T_ETHR_INTERNAL, ptr);
-}
-
#ifdef ERTS_ENABLE_LOCK_COUNT
static void
thr_create_prepare_child(void *vtcdp)
@@ -3005,14 +2992,9 @@ erts_sys_pre_init(void)
#ifdef USE_THREADS
{
erts_thr_init_data_t eid = ERTS_THR_INIT_DATA_DEF_INITER;
- eid.alloc = ethr_internal_alloc;
- eid.realloc = ethr_internal_realloc;
- eid.free = ethr_internal_free;
-
#ifdef ERTS_ENABLE_LOCK_COUNT
eid.thread_create_child_func = thr_create_prepare_child;
#endif
-
erts_thr_init(&eid);
#ifdef ERTS_ENABLE_LOCK_COUNT
erts_lcnt_init();
diff --git a/erts/emulator/test/scheduler_SUITE.erl b/erts/emulator/test/scheduler_SUITE.erl
index ab727b7cb1..4f4458802c 100644
--- a/erts/emulator/test/scheduler_SUITE.erl
+++ b/erts/emulator/test/scheduler_SUITE.erl
@@ -33,7 +33,7 @@
-include("test_server.hrl").
%-compile(export_all).
--export([all/1, init_per_testcase/2, fin_per_testcase/2]).
+-export([all/1, init_per_testcase/2, fin_per_testcase/2, end_per_suite/1]).
-export([equal/1,
few_low/1,
@@ -49,7 +49,8 @@
cpu_topology/1,
sct_cmd/1,
sbt_cmd/1,
- scheduler_suspend/1]).
+ scheduler_suspend/1,
+ reader_groups/1]).
-define(DEFAULT_TIMEOUT, ?t:minutes(10)).
@@ -67,7 +68,8 @@ all(suite) ->
equal_with_high_max,
bound_process,
scheduler_bind,
- scheduler_suspend].
+ scheduler_suspend,
+ reader_groups].
init_per_testcase(Case, Config) when is_list(Config) ->
Dog = ?t:timetrap(?DEFAULT_TIMEOUT),
@@ -81,6 +83,10 @@ fin_per_testcase(_Case, Config) when is_list(Config) ->
?t:timetrap_cancel(Dog),
ok.
+end_per_suite(Config) ->
+ catch erts_debug:set_internal_state(available_internal_state, false),
+ Config.
+
-define(ERTS_RUNQ_CHECK_BALANCE_REDS_PER_SCHED, (2000*2000)).
-define(DEFAULT_TEST_REDS_PER_SCHED, 200000000).
@@ -965,12 +971,361 @@ sst3_loop(S, N) ->
erlang:system_flag(schedulers_online, 1),
erlang:system_flag(schedulers_online, S),
sst3_loop(S, N-1).
+
+reader_groups(Config) when is_list(Config) ->
+ %% White box testing. These results are correct, but other results
+ %% could be too...
+
+ %% The actual tilepro64 topology
+ CPUT0 = [{processor,[{node,[{core,{logical,0}},
+ {core,{logical,1}},
+ {core,{logical,2}},
+ {core,{logical,8}},
+ {core,{logical,9}},
+ {core,{logical,10}},
+ {core,{logical,11}},
+ {core,{logical,16}},
+ {core,{logical,17}},
+ {core,{logical,18}},
+ {core,{logical,19}},
+ {core,{logical,24}},
+ {core,{logical,25}},
+ {core,{logical,27}},
+ {core,{logical,29}}]},
+ {node,[{core,{logical,3}},
+ {core,{logical,4}},
+ {core,{logical,5}},
+ {core,{logical,6}},
+ {core,{logical,7}},
+ {core,{logical,12}},
+ {core,{logical,13}},
+ {core,{logical,14}},
+ {core,{logical,15}},
+ {core,{logical,20}},
+ {core,{logical,21}},
+ {core,{logical,22}},
+ {core,{logical,23}},
+ {core,{logical,28}},
+ {core,{logical,30}}]},
+ {node,[{core,{logical,31}},
+ {core,{logical,36}},
+ {core,{logical,37}},
+ {core,{logical,38}},
+ {core,{logical,44}},
+ {core,{logical,45}},
+ {core,{logical,46}},
+ {core,{logical,47}},
+ {core,{logical,51}},
+ {core,{logical,52}},
+ {core,{logical,53}},
+ {core,{logical,54}},
+ {core,{logical,55}},
+ {core,{logical,60}},
+ {core,{logical,61}}]},
+ {node,[{core,{logical,26}},
+ {core,{logical,32}},
+ {core,{logical,33}},
+ {core,{logical,34}},
+ {core,{logical,35}},
+ {core,{logical,39}},
+ {core,{logical,40}},
+ {core,{logical,41}},
+ {core,{logical,42}},
+ {core,{logical,43}},
+ {core,{logical,48}},
+ {core,{logical,49}},
+ {core,{logical,50}},
+ {core,{logical,58}}]}]}],
+
+ ?line [{0,1},{1,1},{2,1},{3,3},{4,3},{5,3},{6,3},{7,3},{8,1},{9,1},{10,1},
+ {11,1},{12,3},{13,3},{14,4},{15,4},{16,2},{17,2},{18,2},{19,2},
+ {20,4},{21,4},{22,4},{23,4},{24,2},{25,2},{26,7},{27,2},{28,4},
+ {29,2},{30,4},{31,5},{32,7},{33,7},{34,7},{35,7},{36,5},{37,5},
+ {38,5},{39,7},{40,7},{41,8},{42,8},{43,8},{44,5},{45,5},{46,5},
+ {47,6},{48,8},{49,8},{50,8},{51,6},{52,6},{53,6},{54,6},{55,6},
+ {58,8},{60,6},{61,6}]
+ = reader_groups_map(CPUT0, 8),
+
+ CPUT1 = [n([p([c([t(l(0)),t(l(1)),t(l(2)),t(l(3))]),
+ c([t(l(4)),t(l(5)),t(l(6)),t(l(7))]),
+ c([t(l(8)),t(l(9)),t(l(10)),t(l(11))]),
+ c([t(l(12)),t(l(13)),t(l(14)),t(l(15))])]),
+ p([c([t(l(16)),t(l(17)),t(l(18)),t(l(19))]),
+ c([t(l(20)),t(l(21)),t(l(22)),t(l(23))]),
+ c([t(l(24)),t(l(25)),t(l(26)),t(l(27))]),
+ c([t(l(28)),t(l(29)),t(l(30)),t(l(31))])])]),
+ n([p([c([t(l(32)),t(l(33)),t(l(34)),t(l(35))]),
+ c([t(l(36)),t(l(37)),t(l(38)),t(l(39))]),
+ c([t(l(40)),t(l(41)),t(l(42)),t(l(43))]),
+ c([t(l(44)),t(l(45)),t(l(46)),t(l(47))])]),
+ p([c([t(l(48)),t(l(49)),t(l(50)),t(l(51))]),
+ c([t(l(52)),t(l(53)),t(l(54)),t(l(55))]),
+ c([t(l(56)),t(l(57)),t(l(58)),t(l(59))]),
+ c([t(l(60)),t(l(61)),t(l(62)),t(l(63))])])]),
+ n([p([c([t(l(64)),t(l(65)),t(l(66)),t(l(67))]),
+ c([t(l(68)),t(l(69)),t(l(70)),t(l(71))]),
+ c([t(l(72)),t(l(73)),t(l(74)),t(l(75))]),
+ c([t(l(76)),t(l(77)),t(l(78)),t(l(79))])]),
+ p([c([t(l(80)),t(l(81)),t(l(82)),t(l(83))]),
+ c([t(l(84)),t(l(85)),t(l(86)),t(l(87))]),
+ c([t(l(88)),t(l(89)),t(l(90)),t(l(91))]),
+ c([t(l(92)),t(l(93)),t(l(94)),t(l(95))])])]),
+ n([p([c([t(l(96)),t(l(97)),t(l(98)),t(l(99))]),
+ c([t(l(100)),t(l(101)),t(l(102)),t(l(103))]),
+ c([t(l(104)),t(l(105)),t(l(106)),t(l(107))]),
+ c([t(l(108)),t(l(109)),t(l(110)),t(l(111))])]),
+ p([c([t(l(112)),t(l(113)),t(l(114)),t(l(115))]),
+ c([t(l(116)),t(l(117)),t(l(118)),t(l(119))]),
+ c([t(l(120)),t(l(121)),t(l(122)),t(l(123))]),
+ c([t(l(124)),t(l(125)),t(l(126)),t(l(127))])])])],
+
+ ?line [{0,1},{1,1},{2,1},{3,1},{4,2},{5,2},{6,2},{7,2},{8,3},{9,3},
+ {10,3},{11,3},{12,4},{13,4},{14,4},{15,4},{16,5},{17,5},{18,5},
+ {19,5},{20,6},{21,6},{22,6},{23,6},{24,7},{25,7},{26,7},{27,7},
+ {28,8},{29,8},{30,8},{31,8},{32,9},{33,9},{34,9},{35,9},{36,10},
+ {37,10},{38,10},{39,10},{40,11},{41,11},{42,11},{43,11},{44,12},
+ {45,12},{46,12},{47,12},{48,13},{49,13},{50,13},{51,13},{52,14},
+ {53,14},{54,14},{55,14},{56,15},{57,15},{58,15},{59,15},{60,16},
+ {61,16},{62,16},{63,16},{64,17},{65,17},{66,17},{67,17},{68,18},
+ {69,18},{70,18},{71,18},{72,19},{73,19},{74,19},{75,19},{76,20},
+ {77,20},{78,20},{79,20},{80,21},{81,21},{82,21},{83,21},{84,22},
+ {85,22},{86,22},{87,22},{88,23},{89,23},{90,23},{91,23},{92,24},
+ {93,24},{94,24},{95,24},{96,25},{97,25},{98,25},{99,25},{100,26},
+ {101,26},{102,26},{103,26},{104,27},{105,27},{106,27},{107,27},
+ {108,28},{109,28},{110,28},{111,28},{112,29},{113,29},{114,29},
+ {115,29},{116,30},{117,30},{118,30},{119,30},{120,31},{121,31},
+ {122,31},{123,31},{124,32},{125,32},{126,32},{127,32}]
+ = reader_groups_map(CPUT1, 128),
+
+ ?line [{0,1},{1,1},{2,1},{3,1},{4,1},{5,1},{6,1},{7,1},{8,1},{9,1},{10,1},
+ {11,1},{12,1},{13,1},{14,1},{15,1},{16,1},{17,1},{18,1},{19,1},
+ {20,1},{21,1},{22,1},{23,1},{24,1},{25,1},{26,1},{27,1},{28,1},
+ {29,1},{30,1},{31,1},{32,1},{33,1},{34,1},{35,1},{36,1},{37,1},
+ {38,1},{39,1},{40,1},{41,1},{42,1},{43,1},{44,1},{45,1},{46,1},
+ {47,1},{48,1},{49,1},{50,1},{51,1},{52,1},{53,1},{54,1},{55,1},
+ {56,1},{57,1},{58,1},{59,1},{60,1},{61,1},{62,1},{63,1},{64,2},
+ {65,2},{66,2},{67,2},{68,2},{69,2},{70,2},{71,2},{72,2},{73,2},
+ {74,2},{75,2},{76,2},{77,2},{78,2},{79,2},{80,2},{81,2},{82,2},
+ {83,2},{84,2},{85,2},{86,2},{87,2},{88,2},{89,2},{90,2},{91,2},
+ {92,2},{93,2},{94,2},{95,2},{96,2},{97,2},{98,2},{99,2},{100,2},
+ {101,2},{102,2},{103,2},{104,2},{105,2},{106,2},{107,2},{108,2},
+ {109,2},{110,2},{111,2},{112,2},{113,2},{114,2},{115,2},{116,2},
+ {117,2},{118,2},{119,2},{120,2},{121,2},{122,2},{123,2},{124,2},
+ {125,2},{126,2},{127,2}]
+ = reader_groups_map(CPUT1, 2),
+
+ ?line [{0,1},{1,1},{2,1},{3,1},{4,2},{5,2},{6,2},{7,2},{8,3},{9,3},{10,3},
+ {11,3},{12,3},{13,3},{14,3},{15,3},{16,4},{17,4},{18,4},{19,4},
+ {20,4},{21,4},{22,4},{23,4},{24,5},{25,5},{26,5},{27,5},{28,5},
+ {29,5},{30,5},{31,5},{32,6},{33,6},{34,6},{35,6},{36,6},{37,6},
+ {38,6},{39,6},{40,7},{41,7},{42,7},{43,7},{44,7},{45,7},{46,7},
+ {47,7},{48,8},{49,8},{50,8},{51,8},{52,8},{53,8},{54,8},{55,8},
+ {56,9},{57,9},{58,9},{59,9},{60,9},{61,9},{62,9},{63,9},{64,10},
+ {65,10},{66,10},{67,10},{68,10},{69,10},{70,10},{71,10},{72,11},
+ {73,11},{74,11},{75,11},{76,11},{77,11},{78,11},{79,11},{80,12},
+ {81,12},{82,12},{83,12},{84,12},{85,12},{86,12},{87,12},{88,13},
+ {89,13},{90,13},{91,13},{92,13},{93,13},{94,13},{95,13},{96,14},
+ {97,14},{98,14},{99,14},{100,14},{101,14},{102,14},{103,14},
+ {104,15},{105,15},{106,15},{107,15},{108,15},{109,15},{110,15},
+ {111,15},{112,16},{113,16},{114,16},{115,16},{116,16},{117,16},
+ {118,16},{119,16},{120,17},{121,17},{122,17},{123,17},{124,17},
+ {125,17},{126,17},{127,17}]
+ = reader_groups_map(CPUT1, 17),
+
+ ?line [{0,1},{1,1},{2,1},{3,1},{4,1},{5,1},{6,1},{7,1},{8,1},{9,1},{10,1},
+ {11,1},{12,1},{13,1},{14,1},{15,1},{16,2},{17,2},{18,2},{19,2},
+ {20,2},{21,2},{22,2},{23,2},{24,2},{25,2},{26,2},{27,2},{28,2},
+ {29,2},{30,2},{31,2},{32,3},{33,3},{34,3},{35,3},{36,3},{37,3},
+ {38,3},{39,3},{40,3},{41,3},{42,3},{43,3},{44,3},{45,3},{46,3},
+ {47,3},{48,4},{49,4},{50,4},{51,4},{52,4},{53,4},{54,4},{55,4},
+ {56,4},{57,4},{58,4},{59,4},{60,4},{61,4},{62,4},{63,4},{64,5},
+ {65,5},{66,5},{67,5},{68,5},{69,5},{70,5},{71,5},{72,5},{73,5},
+ {74,5},{75,5},{76,5},{77,5},{78,5},{79,5},{80,6},{81,6},{82,6},
+ {83,6},{84,6},{85,6},{86,6},{87,6},{88,6},{89,6},{90,6},{91,6},
+ {92,6},{93,6},{94,6},{95,6},{96,7},{97,7},{98,7},{99,7},{100,7},
+ {101,7},{102,7},{103,7},{104,7},{105,7},{106,7},{107,7},{108,7},
+ {109,7},{110,7},{111,7},{112,7},{113,7},{114,7},{115,7},{116,7},
+ {117,7},{118,7},{119,7},{120,7},{121,7},{122,7},{123,7},{124,7},
+ {125,7},{126,7},{127,7}]
+ = reader_groups_map(CPUT1, 7),
+
+ ?line CPUT2 = [p([c(l(0)),c(l(1)),c(l(2)),c(l(3)),c(l(4))]),
+ p([t(l(5)),t(l(6)),t(l(7)),t(l(8)),t(l(9))]),
+ p([t(l(10))]),
+ p([c(l(11)),c(l(12)),c(l(13))]),
+ p([c(l(14)),c(l(15))])],
+
+ ?line [{0,1},{1,1},{2,1},{3,1},{4,1},
+ {5,2},{6,2},{7,2},{8,2},{9,2},
+ {10,3},
+ {11,4},{12,4},{13,4},
+ {14,5},{15,5}] = reader_groups_map(CPUT2, 5),
+
+
+ ?line [{0,1},{1,1},{2,2},{3,2},{4,2},
+ {5,3},{6,3},{7,3},{8,3},{9,3},
+ {10,4},
+ {11,5},{12,5},{13,5},
+ {14,6},{15,6}] = reader_groups_map(CPUT2, 6),
+
+ ?line [{0,1},{1,1},{2,2},{3,2},{4,2},
+ {5,3},{6,3},{7,3},{8,3},{9,3},
+ {10,4},
+ {11,5},{12,6},{13,6},
+ {14,7},{15,7}] = reader_groups_map(CPUT2, 7),
+
+ ?line [{0,1},{1,1},{2,2},{3,2},{4,2},
+ {5,3},{6,3},{7,3},{8,3},{9,3},
+ {10,4},
+ {11,5},{12,6},{13,6},
+ {14,7},{15,8}] = reader_groups_map(CPUT2, 8),
+
+ ?line [{0,1},{1,2},{2,2},{3,3},{4,3},
+ {5,4},{6,4},{7,4},{8,4},{9,4},
+ {10,5},
+ {11,6},{12,7},{13,7},
+ {14,8},{15,9}] = reader_groups_map(CPUT2, 9),
+
+ ?line [{0,1},{1,2},{2,2},{3,3},{4,3},
+ {5,4},{6,4},{7,4},{8,4},{9,4},
+ {10,5},
+ {11,6},{12,7},{13,8},
+ {14,9},{15,10}] = reader_groups_map(CPUT2, 10),
+
+ ?line [{0,1},{1,2},{2,3},{3,4},{4,4},
+ {5,5},{6,5},{7,5},{8,5},{9,5},
+ {10,6},
+ {11,7},{12,8},{13,9},
+ {14,10},{15,11}] = reader_groups_map(CPUT2, 11),
+
+ ?line [{0,1},{1,2},{2,3},{3,4},{4,5},
+ {5,6},{6,6},{7,6},{8,6},{9,6},
+ {10,7},
+ {11,8},{12,9},{13,10},
+ {14,11},{15,12}] = reader_groups_map(CPUT2, 100),
+
+ CPUT3 = [p([t(l(5)),t(l(6)),t(l(7)),t(l(8)),t(l(9))]),
+ p([t(l(10))]),
+ p([c(l(11)),c(l(12)),c(l(13))]),
+ p([c(l(14)),c(l(15))]),
+ p([c(l(0)),c(l(1)),c(l(2)),c(l(3)),c(l(4))])],
+
+ ?line [{0,5},{1,5},{2,6},{3,6},{4,6},
+ {5,1},{6,1},{7,1},{8,1},{9,1},
+ {10,2},{11,3},{12,3},{13,3},
+ {14,4},{15,4}] = reader_groups_map(CPUT3, 6),
+
+ CPUT4 = [p([t(l(0)),t(l(1)),t(l(2)),t(l(3)),t(l(4))]),
+ p([t(l(5))]),
+ p([c(l(6)),c(l(7)),c(l(8))]),
+ p([c(l(9)),c(l(10))]),
+ p([c(l(11)),c(l(12)),c(l(13)),c(l(14)),c(l(15))])],
+
+ ?line [{0,1},{1,1},{2,1},{3,1},{4,1},
+ {5,2},
+ {6,3},{7,3},{8,3},
+ {9,4},{10,4},
+ {11,5},{12,5},{13,6},{14,6},{15,6}] = reader_groups_map(CPUT4, 6),
+
+ ?line [{0,1},{1,1},{2,1},{3,1},{4,1},
+ {5,2},
+ {6,3},{7,4},{8,4},
+ {9,5},{10,5},
+ {11,6},{12,6},{13,7},{14,7},{15,7}] = reader_groups_map(CPUT4, 7),
+
+ ?line [{0,1},{65535,2}] = reader_groups_map([c(l(0)),c(l(65535))], 10),
+
+ ?line ok.
+reader_groups_map(CPUT, Groups) ->
+ Old = erlang:system_info({cpu_topology, defined}),
+ erlang:system_flag(cpu_topology, CPUT),
+ enable_internal_state(),
+ Res = erts_debug:get_internal_state({reader_groups_map, Groups}),
+ erlang:system_flag(cpu_topology, Old),
+ lists:sort(Res).
+
%%
%% Utils
%%
+tilera_cpu_topology() ->
+ [{processor,[{node,[{core,{logical,0}},
+ {core,{logical,1}},
+ {core,{logical,2}},
+ {core,{logical,8}},
+ {core,{logical,9}},
+ {core,{logical,10}},
+ {core,{logical,11}},
+ {core,{logical,16}},
+ {core,{logical,17}},
+ {core,{logical,18}},
+ {core,{logical,19}},
+ {core,{logical,24}},
+ {core,{logical,25}},
+ {core,{logical,27}},
+ {core,{logical,29}}]},
+ {node,[{core,{logical,3}},
+ {core,{logical,4}},
+ {core,{logical,5}},
+ {core,{logical,6}},
+ {core,{logical,7}},
+ {core,{logical,12}},
+ {core,{logical,13}},
+ {core,{logical,14}},
+ {core,{logical,15}},
+ {core,{logical,20}},
+ {core,{logical,21}},
+ {core,{logical,22}},
+ {core,{logical,23}},
+ {core,{logical,28}},
+ {core,{logical,30}}]},
+ {node,[{core,{logical,31}},
+ {core,{logical,36}},
+ {core,{logical,37}},
+ {core,{logical,38}},
+ {core,{logical,44}},
+ {core,{logical,45}},
+ {core,{logical,46}},
+ {core,{logical,47}},
+ {core,{logical,51}},
+ {core,{logical,52}},
+ {core,{logical,53}},
+ {core,{logical,54}},
+ {core,{logical,55}},
+ {core,{logical,60}},
+ {core,{logical,61}}]},
+ {node,[{core,{logical,26}},
+ {core,{logical,32}},
+ {core,{logical,33}},
+ {core,{logical,34}},
+ {core,{logical,35}},
+ {core,{logical,39}},
+ {core,{logical,40}},
+ {core,{logical,41}},
+ {core,{logical,42}},
+ {core,{logical,43}},
+ {core,{logical,48}},
+ {core,{logical,49}},
+ {core,{logical,50}},
+ {core,{logical,58}}]}]}].
+
+l(Id) ->
+ {logical, Id}.
+
+t(X) ->
+ {thread, X}.
+
+c(X) ->
+ {core, X}.
+
+p(X) ->
+ {processor, X}.
+
+n(X) ->
+ {node, X}.
+
mcall(Node, Funs) ->
Parent = self(),
Refs = lists:map(fun (Fun) ->