aboutsummaryrefslogtreecommitdiffstats
path: root/erts
diff options
context:
space:
mode:
Diffstat (limited to 'erts')
-rw-r--r--erts/aclocal.m4215
-rw-r--r--erts/configure.in61
-rw-r--r--erts/doc/src/erl.xml18
-rw-r--r--erts/emulator/Makefile.in35
-rw-r--r--erts/emulator/beam/atom.c10
-rw-r--r--erts/emulator/beam/atom.names1
-rw-r--r--erts/emulator/beam/erl_alloc.c174
-rw-r--r--erts/emulator/beam/erl_alloc.types8
-rw-r--r--erts/emulator/beam/erl_bif_info.c16
-rw-r--r--erts/emulator/beam/erl_db.c319
-rw-r--r--erts/emulator/beam/erl_db_hash.c6
-rw-r--r--erts/emulator/beam/erl_db_util.h3
-rw-r--r--erts/emulator/beam/erl_drv_thread.c78
-rw-r--r--erts/emulator/beam/erl_fun.c8
-rw-r--r--erts/emulator/beam/erl_init.c116
-rw-r--r--erts/emulator/beam/erl_lock_check.c109
-rw-r--r--erts/emulator/beam/erl_lock_check.h1
-rw-r--r--erts/emulator/beam/erl_lock_count.c18
-rw-r--r--erts/emulator/beam/erl_mtrace.c2
-rw-r--r--erts/emulator/beam/erl_node_tables.c31
-rw-r--r--erts/emulator/beam/erl_port_task.c7
-rw-r--r--erts/emulator/beam/erl_process.c1832
-rw-r--r--erts/emulator/beam/erl_process.h89
-rw-r--r--erts/emulator/beam/erl_process_lock.c350
-rw-r--r--erts/emulator/beam/erl_process_lock.h46
-rw-r--r--erts/emulator/beam/erl_smp.h222
-rw-r--r--erts/emulator/beam/erl_threads.h509
-rw-r--r--erts/emulator/beam/export.c8
-rw-r--r--erts/emulator/beam/external.c2
-rw-r--r--erts/emulator/beam/external.h2
-rw-r--r--erts/emulator/beam/register.c7
-rw-r--r--erts/emulator/beam/sys.h9
-rw-r--r--erts/emulator/sys/unix/sys.c19
-rw-r--r--erts/emulator/sys/win32/sys.c18
-rw-r--r--erts/emulator/test/scheduler_SUITE.erl361
-rw-r--r--erts/etc/common/erlexec.c30
-rw-r--r--erts/include/internal/erl_misc_utils.h4
-rw-r--r--erts/include/internal/ethr_internal.h67
-rw-r--r--erts/include/internal/ethr_mutex.h510
-rw-r--r--erts/include/internal/ethr_optimized_fallbacks.h185
-rw-r--r--erts/include/internal/ethread.h1653
-rw-r--r--erts/include/internal/ethread_header_config.h.in57
-rw-r--r--erts/include/internal/gcc/ethr_atomic.h19
-rw-r--r--erts/include/internal/i386/atomic.h25
-rw-r--r--erts/include/internal/i386/ethread.h3
-rw-r--r--erts/include/internal/i386/rwlock.h12
-rw-r--r--erts/include/internal/i386/spinlock.h14
-rw-r--r--erts/include/internal/libatomic_ops/ethr_atomic.h292
-rw-r--r--erts/include/internal/libatomic_ops/ethread.h30
-rw-r--r--erts/include/internal/ppc32/atomic.h22
-rw-r--r--erts/include/internal/ppc32/ethread.h3
-rw-r--r--erts/include/internal/ppc32/rwlock.h12
-rw-r--r--erts/include/internal/ppc32/spinlock.h12
-rw-r--r--erts/include/internal/pthread/ethr_event.h151
-rw-r--r--erts/include/internal/sparc32/atomic.h39
-rw-r--r--erts/include/internal/sparc32/ethread.h3
-rw-r--r--erts/include/internal/sparc32/rwlock.h12
-rw-r--r--erts/include/internal/sparc32/spinlock.h12
-rw-r--r--erts/include/internal/tile/atomic.h67
-rw-r--r--erts/include/internal/win/ethr_atomic.h244
-rw-r--r--erts/include/internal/win/ethr_event.h62
-rw-r--r--erts/include/internal/win/ethread.h31
-rw-r--r--erts/lib_src/Makefile.in24
-rw-r--r--erts/lib_src/common/erl_misc_utils.c179
-rw-r--r--erts/lib_src/common/ethr_aux.c762
-rw-r--r--erts/lib_src/common/ethr_cbf.c36
-rw-r--r--erts/lib_src/common/ethr_mutex.c2758
-rw-r--r--erts/lib_src/common/ethread.c3369
-rw-r--r--erts/lib_src/pthread/ethr_event.c219
-rw-r--r--erts/lib_src/pthread/ethread.c477
-rw-r--r--erts/lib_src/win/ethr_event.c120
-rw-r--r--erts/lib_src/win/ethread.c625
-rw-r--r--erts/test/ethread_SUITE.erl54
-rw-r--r--erts/test/ethread_SUITE_data/ethread_tests.c1198
74 files changed, 11251 insertions, 6851 deletions
diff --git a/erts/aclocal.m4 b/erts/aclocal.m4
index 61244c7cd3..0fa3fec244 100644
--- a/erts/aclocal.m4
+++ b/erts/aclocal.m4
@@ -653,6 +653,19 @@ fi
])
+AC_DEFUN(ERL_INTERNAL_LIBS,
+[
+
+ERTS_INTERNAL_X_LIBS=
+
+AC_CHECK_LIB(kstat, kstat_open,
+[AC_DEFINE(HAVE_KSTAT, 1, [Define if you have kstat])
+ERTS_INTERNAL_X_LIBS="$ERTS_INTERNAL_X_LIBS -lkstat"])
+
+AC_SUBST(ERTS_INTERNAL_X_LIBS)
+
+])
+
dnl ----------------------------------------------------------------------
dnl
dnl ERL_FIND_ETHR_LIB
@@ -676,10 +689,13 @@ AC_DEFUN(ERL_FIND_ETHR_LIB,
[
LM_CHECK_THR_LIB
+ERL_INTERNAL_LIBS
+ethr_have_native_atomics=no
+ethr_have_native_spinlock=no
ETHR_THR_LIB_BASE="$THR_LIB_NAME"
ETHR_DEFS="$THR_DEFS"
-ETHR_X_LIBS="$THR_LIBS"
+ETHR_X_LIBS="$THR_LIBS $ERTS_INTERNAL_X_LIBS"
ETHR_LIBS=
ETHR_LIB_NAME=
@@ -691,6 +707,7 @@ ethr_lib_name=ethread
case "$THR_LIB_NAME" in
win32_threads)
+ ETHR_THR_LIB_BASE_DIR=win
# * _WIN32_WINNT >= 0x0400 is needed for
# TryEnterCriticalSection
# * _WIN32_WINNT >= 0x0403 is needed for
@@ -716,10 +733,13 @@ case "$THR_LIB_NAME" in
if test $found_win32_winnt = no; then
AC_MSG_ERROR([-D_WIN32_WINNT missing in CPPFLAGS])
fi
+ ethr_have_native_atomics=yes
+ ethr_have_native_spinlock=yes
AC_DEFINE(ETHR_WIN32_THREADS, 1, [Define if you have win32 threads])
;;
pthread)
+ ETHR_THR_LIB_BASE_DIR=pthread
AC_DEFINE(ETHR_PTHREADS, 1, [Define if you have pthreads])
case $host_os in
openbsd*)
@@ -775,9 +795,7 @@ case "$THR_LIB_NAME" in
if test $usable_sigaltstack = no; then
ETHR_DEFS="$ETHR_DEFS -DETHR_UNUSABLE_SIGALTSTACK"
fi
-
- AC_DEFINE(ETHR_INIT_MUTEX_IN_CHILD_AT_FORK, 1, \
-[Define if mutexes should be reinitialized (instead of unlocked) in child at fork.]) ;;
+ ;;
*) ;;
esac
@@ -808,6 +826,10 @@ case "$THR_LIB_NAME" in
[Define if you need the <nptl/pthread.h> header file.])
fi
+ AC_CHECK_HEADER(sched.h, \
+ AC_DEFINE(ETHR_HAVE_SCHED_H, 1, \
+[Define if you have the <sched.h> header file.]))
+
AC_CHECK_HEADER(sys/time.h, \
AC_DEFINE(ETHR_HAVE_SYS_TIME_H, 1, \
[Define if you have the <sys/time.h> header file.]))
@@ -823,26 +845,63 @@ case "$THR_LIB_NAME" in
dnl Check for functions
dnl
- AC_CHECK_FUNC(pthread_atfork, \
- AC_DEFINE(ETHR_HAVE_PTHREAD_ATFORK, 1, \
-[Define if you have the pthread_atfork function.]))
- AC_CHECK_FUNC(pthread_mutexattr_settype, \
- AC_DEFINE(ETHR_HAVE_PTHREAD_MUTEXATTR_SETTYPE, 1, \
-[Define if you have the pthread_mutexattr_settype function.]))
- AC_CHECK_FUNC(pthread_mutexattr_setkind_np, \
- AC_DEFINE(ETHR_HAVE_PTHREAD_MUTEXATTR_SETKIND_NP, 1, \
-[Define if you have the pthread_mutexattr_setkind_np function.]))
AC_CHECK_FUNC(pthread_spin_lock, \
- AC_DEFINE(ETHR_HAVE_PTHREAD_SPIN_LOCK, 1, \
-[Define if you have the pthread_spin_lock function.]))
+ [ethr_have_native_spinlock=yes \
+ AC_DEFINE(ETHR_HAVE_PTHREAD_SPIN_LOCK, 1, \
+[Define if you have the pthread_spin_lock function.])])
+
+ have_sched_yield=no
+ have_librt_sched_yield=no
+ AC_CHECK_FUNC(sched_yield, [have_sched_yield=yes])
+ if test $have_sched_yield = no; then
+ AC_CHECK_LIB(rt, sched_yield,
+ [have_librt_sched_yield=yes
+ ETHR_X_LIBS="$ETHR_X_LIBS -lrt"])
+ fi
+ if test $have_sched_yield = yes || test $have_librt_sched_yield = yes; then
+ AC_DEFINE(ETHR_HAVE_SCHED_YIELD, 1, [Define if you have the sched_yield() function.])
+ AC_MSG_CHECKING([whether sched_yield() returns an int])
+ sched_yield_ret_int=no
+ AC_TRY_COMPILE([
+ #ifdef ETHR_HAVE_SCHED_H
+ #include <sched.h>
+ #endif
+ ],
+ [int sched_yield();],
+ [sched_yield_ret_int=yes])
+ AC_MSG_RESULT([$sched_yield_ret_int])
+ if test $sched_yield_ret_int = yes; then
+ AC_DEFINE(ETHR_SCHED_YIELD_RET_INT, 1, [Define if sched_yield() returns an int.])
+ fi
+ fi
+
+ have_pthread_yield=no
+ AC_CHECK_FUNC(pthread_yield, [have_pthread_yield=yes])
+ if test $have_pthread_yield = yes; then
+ AC_DEFINE(ETHR_HAVE_PTHREAD_YIELD, 1, [Define if you have the pthread_yield() function.])
+ AC_MSG_CHECKING([whether pthread_yield() returns an int])
+ pthread_yield_ret_int=no
+ AC_TRY_COMPILE([
+ #if defined(ETHR_NEED_NPTL_PTHREAD_H)
+ #include <nptl/pthread.h>
+ #elif defined(ETHR_HAVE_MIT_PTHREAD_H)
+ #include <pthread/mit/pthread.h>
+ #elif defined(ETHR_HAVE_PTHREAD_H)
+ #include <pthread.h>
+ #endif
+ ],
+ [int pthread_yield();],
+ [pthread_yield_ret_int=yes])
+ AC_MSG_RESULT([$pthread_yield_ret_int])
+ if test $pthread_yield_ret_int = yes; then
+ AC_DEFINE(ETHR_PTHREAD_YIELD_RET_INT, 1, [Define if pthread_yield() returns an int.])
+ fi
+ fi
have_pthread_rwlock_init=no
AC_CHECK_FUNC(pthread_rwlock_init, [have_pthread_rwlock_init=yes])
if test $have_pthread_rwlock_init = yes; then
- AC_DEFINE(ETHR_HAVE_PTHREAD_RWLOCK_INIT, 1, \
-[Define if you have a pthread_rwlock implementation that can be used.])
-
ethr_have_pthread_rwlockattr_setkind_np=no
AC_CHECK_FUNC(pthread_rwlockattr_setkind_np,
[ethr_have_pthread_rwlockattr_setkind_np=yes])
@@ -876,12 +935,42 @@ case "$THR_LIB_NAME" in
fi
fi
+ if test "$force_pthread_rwlocks" = "yes"; then
+ AC_DEFINE(ETHR_FORCE_PTHREAD_RWLOCK, 1, \
+[Define if you want to force usage of pthread rwlocks])
+
+ if test $have_pthread_rwlock_init = yes; then
+ AC_MSG_WARN([Forced usage of pthread rwlocks. Note that this implementation may suffer from starvation issues.])
+ else
+ AC_MSG_ERROR([User forced usage of pthread rwlock, but no such implementation was found])
+ fi
+ fi
AC_CHECK_FUNC(pthread_attr_setguardsize, \
AC_DEFINE(ETHR_HAVE_PTHREAD_ATTR_SETGUARDSIZE, 1, \
[Define if you have the pthread_attr_setguardsize function.]))
+ linux_futex=no
+ AC_MSG_CHECKING([for Linux futexes])
+ AC_TRY_LINK([
+ #include <sys/syscall.h>
+ #include <unistd.h>
+ #include <linux/futex.h>
+ #include <sys/time.h>
+ ],
+ [
+ int i = 1;
+ syscall(__NR_futex, (void *) &i, FUTEX_WAKE, 1,
+ (void*)0,(void*)0, 0);
+ syscall(__NR_futex, (void *) &i, FUTEX_WAIT, 0,
+ (void*)0,(void*)0, 0);
+ return 0;
+ ],
+ linux_futex=yes)
+ AC_MSG_RESULT([$linux_futex])
+ test $linux_futex = yes && AC_DEFINE(ETHR_HAVE_LINUX_FUTEX, 1, [Define if you have a linux futex implementation.])
+
AC_MSG_CHECKING([for GCC atomic operations])
ethr_have_gcc_atomic_ops=no
AC_TRY_LINK([],
@@ -899,10 +988,69 @@ case "$THR_LIB_NAME" in
AC_MSG_RESULT([$ethr_have_gcc_atomic_ops])
test $ethr_have_gcc_atomic_ops = yes && AC_DEFINE(ETHR_HAVE_GCC_ATOMIC_OPS, 1, [Define if you have gcc atomic operations])
+ case "$host_cpu" in
+ sun4u | sparc64 | sun4v)
+ ethr_have_native_atomics=yes;;
+ i86pc | i386 | i486 | i586 | i686 | x86_64 | amd64)
+ ethr_have_native_atomics=yes;;
+ macppc | ppc | "Power Macintosh")
+ ethr_have_native_atomics=yes;;
+ tile)
+ ethr_have_native_atomics=yes;;
+ *)
+ ;;
+ esac
+
+ AC_MSG_CHECKING([for a usable libatomic_ops implementation])
+ case "x$with_libatomic_ops" in
+ xno | xyes | x)
+ libatomic_ops_include=
+ ;;
+ *)
+ if test -d "${with_libatomic_ops}/include"; then
+ libatomic_ops_include="-I$with_libatomic_ops/include"
+ CPPFLAGS="$CPPFLAGS $libatomic_ops_include"
+ else
+ AC_MSG_ERROR([libatomic_ops include directory $with_libatomic_ops/include not found])
+ fi;;
+ esac
+ ethr_have_libatomic_ops=no
+ AC_TRY_LINK([#include "atomic_ops.h"],
+ [
+ volatile AO_t x;
+ AO_t y;
+ int z;
+
+ AO_nop_full();
+ AO_store(&x, (AO_t) 0);
+ z = AO_load(&x);
+ z = AO_compare_and_swap(&x, (AO_t) 0, (AO_t) 1);
+ ],
+ [ethr_have_native_atomics=yes
+ ethr_have_libatomic_ops=yes])
+ AC_MSG_RESULT([$ethr_have_libatomic_ops])
+ if test $ethr_have_libatomic_ops = yes; then
+ AC_CHECK_SIZEOF(AO_t, ,
+ [
+ #include <stdio.h>
+ #include "atomic_ops.h"
+ ])
+ AC_DEFINE_UNQUOTED(ETHR_SIZEOF_AO_T, $ac_cv_sizeof_AO_t, [Define to the size of AO_t if libatomic_ops is used])
+
+ AC_DEFINE(ETHR_HAVE_LIBATOMIC_OPS, 1, [Define if you have libatomic_ops atomic operations])
+ if test "x$with_libatomic_ops" != "xno" && test "x$with_libatomic_ops" != "x"; then
+ AC_DEFINE(ETHR_PREFER_LIBATOMIC_OPS_NATIVE_IMPLS, 1, [Define if you prefer libatomic_ops native ethread implementations])
+ fi
+ ETHR_DEFS="$ETHR_DEFS $libatomic_ops_include"
+ elif test "x$with_libatomic_ops" != "xno" && test "x$with_libatomic_ops" != "x"; then
+ AC_MSG_ERROR([No usable libatomic_ops implementation found])
+ fi
+
dnl Restore LIBS
LIBS=$saved_libs
dnl restore CPPFLAGS
CPPFLAGS=$saved_cppflags
+
;;
*)
;;
@@ -918,16 +1066,23 @@ fi
if test "x$ETHR_THR_LIB_BASE" != "x"; then
ETHR_DEFS="-DUSE_THREADS $ETHR_DEFS"
- ETHR_LIBS="-l$ethr_lib_name $ETHR_X_LIBS"
+ ETHR_LIBS="-l$ethr_lib_name -lerts_internal_r $ETHR_X_LIBS"
ETHR_LIB_NAME=$ethr_lib_name
fi
AC_CHECK_SIZEOF(void *)
AC_DEFINE_UNQUOTED(ETHR_SIZEOF_PTR, $ac_cv_sizeof_void_p, [Define to the size of pointers])
-if test "X$disable_native_ethr_impls" = "Xyes"; then
- AC_DEFINE(ETHR_DISABLE_NATIVE_IMPLS, 1, [Define if you want to disable native ethread implementations])
-fi
+AC_ARG_ENABLE(native-ethr-impls,
+ AS_HELP_STRING([--disable-native-ethr-impls],
+ [disable native ethread implementations]),
+[ case "$enableval" in
+ no) disable_native_ethr_impls=yes ;;
+ *) disable_native_ethr_impls=no ;;
+ esac ], disable_native_ethr_impls=no)
+
+test "X$disable_native_ethr_impls" = "Xyes" &&
+ AC_DEFINE(ETHR_DISABLE_NATIVE_IMPLS, 1, [Define if you want to disable native ethread implementations])
AC_ARG_ENABLE(prefer-gcc-native-ethr-impls,
AS_HELP_STRING([--enable-prefer-gcc-native-ethr-impls],
@@ -940,6 +1095,21 @@ AC_ARG_ENABLE(prefer-gcc-native-ethr-impls,
test $enable_prefer_gcc_native_ethr_impls = yes &&
AC_DEFINE(ETHR_PREFER_GCC_NATIVE_IMPLS, 1, [Define if you prefer gcc native ethread implementations])
+AC_ARG_ENABLE(ethread-pre-pentium4-compatibility,
+ AS_HELP_STRING([--enable-ethread-pre-pentium4-compatibility],
+ [enable compatibility with x86 processors before pentium 4 (back to 486) in the ethread library]),
+[ case "$enableval" in
+ yes) enable_ethread_pre_pentium4_compatibility=yes ;;
+ *) enable_ethread_pre_pentium4_compatibilit=no ;;
+ esac ], enable_ethread_pre_pentium4_compatibilit=no)
+
+test $enable_ethread_pre_pentium4_compatibilit = yes &&
+ AC_DEFINE(ETHR_PRE_PENTIUM4_COMPAT, 1, [Define if you want compatibilty with x86 processors before pentium4.])
+
+AC_ARG_WITH(libatomic_ops,
+ AS_HELP_STRING([--with-libatomic_ops=PATH],
+ [use libatomic_ops with the ethread library]))
+
AC_DEFINE(ETHR_HAVE_ETHREAD_DEFINES, 1, \
[Define if you have all ethread defines])
@@ -948,6 +1118,7 @@ AC_SUBST(ETHR_LIBS)
AC_SUBST(ETHR_LIB_NAME)
AC_SUBST(ETHR_DEFS)
AC_SUBST(ETHR_THR_LIB_BASE)
+AC_SUBST(ETHR_THR_LIB_BASE_DIR)
])
diff --git a/erts/configure.in b/erts/configure.in
index 63bf548c89..157163fe0b 100644
--- a/erts/configure.in
+++ b/erts/configure.in
@@ -61,6 +61,9 @@ if test x"${ERL_TOP}/erts" != x"$srcdir"; then
fi
erl_top=${ERL_TOP}
+# Remove old configuration information
+/bin/rm -f "$ERL_TOP/erts/CONF_INFO"
+
# echo XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
# echo X
# echo "X srcdir = $srcdir"
@@ -277,20 +280,6 @@ AC_ARG_ENABLE(clock-gettime,
*) clock_gettime_correction=yes ;;
esac ], clock_gettime_correction=unknown)
-AC_ARG_ENABLE(native-ethr-impls,
-[ --enable-native-ethr-impls enable native ethread implementations
- --disable-native-ethr-impls disable native ethread implementations],
-[ case "$enableval" in
- no) disable_native_ethr_impls=yes ;;
- *) disable_native_ethr_impls=no ;;
- esac ], disable_native_ethr_impls=no)
-
-dnl Defined in libraries/megaco/configure.in but we need it here
-dnl also in order to show it to the "top user"
-
-AC_ARG_ENABLE(megaco_flex_scanner_lineno,
-[ --disable-megaco-flex-scanner-lineno disable megaco flex scanner lineno])
-
dnl Magic test for clearcase.
OTP_RELEASE=
if test "${ERLANG_COMMERCIAL_BUILD}" != ""; then
@@ -1049,6 +1038,42 @@ if test $ERTS_BUILD_SMP_EMU = yes; then
fi
AC_DEFINE(ERTS_HAVE_SMP_EMU, 1, [Define if the smp emulator is built])
+
+ case "$ethr_have_native_atomics-$ethr_have_native_spinlock" in
+ yes-*)
+ ;;
+
+ no-yes)
+
+ test -f "$ERL_TOP/erts/CONF_INFO" ||
+ echo "" > "$ERL_TOP/erts/CONF_INFO"
+ cat >> $ERL_TOP/erts/CONF_INFO <<EOF
+
+ No native atomic implementation available.
+ Fallbacks implemented using spinlocks will be
+ used. Note that the performance of the SMP
+ runtime system will suffer due to this.
+
+EOF
+ ;;
+
+ no-no)
+
+ test -f "$ERL_TOP/erts/CONF_INFO" ||
+ echo "" > "$ERL_TOP/erts/CONF_INFO"
+ cat >> "$ERL_TOP/erts/CONF_INFO" <<EOF
+
+ No native atomic implementation, nor no native
+ spinlock implementation available. Fallbacks
+ implemented using mutexes will be used. Note
+ that the performance of the SMP runtime system
+ will suffer much due to this.
+
+EOF
+ ;;
+
+ esac
+
enable_threads=force
fi
@@ -1221,13 +1246,7 @@ fi
AC_SUBST(EMU_LOCK_CHECKING)
-ERTS_INTERNAL_X_LIBS=
-
-AC_CHECK_LIB(kstat, kstat_open,
-[AC_DEFINE(HAVE_KSTAT, 1, [Define if you have kstat])
-ERTS_INTERNAL_X_LIBS="$ERTS_INTERNAL_X_LIBS -lkstat"])
-
-AC_SUBST(ERTS_INTERNAL_X_LIBS)
+ERL_INTERNAL_LIBS
dnl THR_LIBS and THR_DEFS are only used by odbc
THR_LIBS=$ETHR_X_LIBS
diff --git a/erts/doc/src/erl.xml b/erts/doc/src/erl.xml
index df80142ce1..0e26d62548 100644
--- a/erts/doc/src/erl.xml
+++ b/erts/doc/src/erl.xml
@@ -603,6 +603,24 @@
<item>
<p>Force ets memory block to be moved on realloc.</p>
</item>
+ <tag><marker id="+rg"><c><![CDATA[+rg ReaderGroupsLimit]]></c></marker></tag>
+ <item>
+ <p>Limits the amount of reader groups used by read/write locks
+ optimized for read operations in the Erlang runtime system. By
+ default the reader groups limit equals 8.</p>
+ <p>When the amount of schedulers is less than or equal to the reader
+ groups limit, each scheduler has its own reader group. When the
+ amount of schedulers is larger than the reader groups limit,
+ schedulers share reader groups. Shared reader groups degrades
+ read lock and read unlock performance while a large amount of
+ reader groups degrades write lock performance, so the limit is a
+ tradeoff between performance for read operations and performance
+ for write operations. Each reader group currently consumes 64 byte
+ in each read/write lock. Also note that a runtime system using
+ shared reader groups benefits from <seealso marker="#+sbt">binding
+ schedulers to logical processors</seealso>, since the reader groups
+ are distributed better between schedulers.</p>
+ </item>
<tag><marker id="+S"><c><![CDATA[+S Schedulers:SchedulerOnline]]></c></marker></tag>
<item>
<p>Sets the amount of scheduler threads to create and scheduler
diff --git a/erts/emulator/Makefile.in b/erts/emulator/Makefile.in
index eca6121a1e..2b9f70b0f4 100644
--- a/erts/emulator/Makefile.in
+++ b/erts/emulator/Makefile.in
@@ -199,6 +199,14 @@ MKDIR = @MKDIR@
USING_MINGW=@MIXED_CYGWIN_MINGW@
+ifeq ($(TARGET),win32)
+LIB_PREFIX=
+LIB_SUFFIX=.lib
+else
+LIB_PREFIX=lib
+LIB_SUFFIX=.a
+endif
+
OMIT_OMIT_FP=no
ifeq (@EMU_LOCK_CHECKING@,yes)
@@ -279,15 +287,12 @@ endif
ifeq ($(TARGET),win32)
LIBS += -L$(ERL_TOP)/erts/emulator/pcre/obj/$(TARGET)/$(TYPE) -lepcre
-DEPLIBS += $(ERL_TOP)/erts/emulator/pcre/obj/$(TARGET)/$(TYPE)/epcre.lib
else
-LIBS += $(ERL_TOP)/erts/emulator/pcre/obj/$(TARGET)/$(TYPE)/libepcre.a
-DEPLIBS += \
- $(ERL_TOP)/erts/emulator/pcre/obj/$(TARGET)/$(TYPE)/libepcre.a \
- $(ERL_TOP)/erts/lib/internal/$(TARGET)/liberts_internal.a
-# rem liberts_internal.a
+LIBS += $(ERL_TOP)/erts/emulator/pcre/obj/$(TARGET)/$(TYPE)/$(LIB_PREFIX)epcre$(LIB_SUFFIX)
endif
+DEPLIBS += $(ERL_TOP)/erts/emulator/pcre/obj/$(TARGET)/$(TYPE)/$(LIB_PREFIX)epcre$(LIB_SUFFIX)
+
ELIB_FLAGS = -DENABLE_ELIB_MALLOC -DELIB_ALLOC_IS_CLIB -DELIB_HEAP_SBRK
PERFCTR_PATH=@PERFCTR_PATH@
@@ -305,7 +310,19 @@ LIBSCTP = @LIBSCTP@
ORG_THR_LIBS=@EMU_THR_LIBS@
THR_LIB_NAME=@EMU_THR_LIB_NAME@
-THR_LIBS=$(subst -l$(THR_LIB_NAME),-l$(THR_LIB_NAME)$(TYPEMARKER),$(ORG_THR_LIBS))
+ifneq ($(strip $(THR_LIB_NAME)),)
+DEPLIBS += $(ERL_TOP)/erts/lib/internal/$(TARGET)/$(LIB_PREFIX)erts_internal_r$(TYPEMARKER)$(LIB_SUFFIX) \
+ $(ERL_TOP)/erts/lib/internal/$(TARGET)/$(LIB_PREFIX)ethread$(TYPEMARKER)$(LIB_SUFFIX)
+else
+DEPLIBS += $(ERL_TOP)/erts/lib/internal/$(TARGET)/$(LIB_PREFIX)erts_internal$(TYPEMARKER)$(LIB_SUFFIX)
+endif
+
+THR_LIBS=$(subst -l$(THR_LIB_NAME),-l$(THR_LIB_NAME)$(TYPEMARKER), \
+ $(subst -lerts_internal_r,-lerts_internal_r$(TYPEMARKER),$(ORG_THR_LIBS)))
+
+LIBS += $(THR_LIBS)
+
+ifneq ($(findstring erts_internal_r, $(THR_LIBS)),erts_internal_r)
ifeq ($(findstring vxworks,$(TARGET)),vxworks)
ERTS_INTERNAL_LIB=erts_internal
@@ -317,7 +334,9 @@ ERTS_INTERNAL_LIB=erts_internal
endif
endif
-LIBS += $(THR_LIBS) -l$(ERTS_INTERNAL_LIB)$(TYPEMARKER)
+LIBS += -l$(ERTS_INTERNAL_LIB)$(TYPEMARKER)
+
+endif # erts_internal_r
LIBS += @LIBRT@
diff --git a/erts/emulator/beam/atom.c b/erts/emulator/beam/atom.c
index e2a79d6e4f..6b3c106a97 100644
--- a/erts/emulator/beam/atom.c
+++ b/erts/emulator/beam/atom.c
@@ -41,8 +41,7 @@ static erts_smp_rwmtx_t atom_table_lock;
#define atom_read_unlock() erts_smp_rwmtx_runlock(&atom_table_lock)
#define atom_write_lock() erts_smp_rwmtx_rwlock(&atom_table_lock)
#define atom_write_unlock() erts_smp_rwmtx_rwunlock(&atom_table_lock)
-#define atom_init_lock() erts_smp_rwmtx_init(&atom_table_lock, \
- "atom_tab")
+
#if 0
#define ERTS_ATOM_PUT_OPS_STAT
#endif
@@ -304,12 +303,17 @@ init_atom_table(void)
HashFunctions f;
int i;
Atom a;
+ erts_smp_rwmtx_opt_t rwmtx_opt = ERTS_SMP_THR_OPTS_DEFAULT_INITER;
+
+ rwmtx_opt.type = ERTS_SMP_RWMTX_TYPE_FREQUENT_READ;
+ rwmtx_opt.lived = ERTS_SMP_RWMTX_LONG_LIVED;
#ifdef ERTS_ATOM_PUT_OPS_STAT
erts_smp_atomic_init(&atom_put_ops, 0);
#endif
- atom_init_lock();
+ erts_smp_rwmtx_init_opt(&atom_table_lock, &rwmtx_opt, "atom_tab");
+
f.hash = (H_FUN) atom_hash;
f.cmp = (HCMP_FUN) atom_cmp;
f.alloc = (HALLOC_FUN) atom_alloc;
diff --git a/erts/emulator/beam/atom.names b/erts/emulator/beam/atom.names
index 28f69b9460..0815cdbc7f 100644
--- a/erts/emulator/beam/atom.names
+++ b/erts/emulator/beam/atom.names
@@ -432,6 +432,7 @@ atom raw
atom re
atom re_pattern
atom re_run_trap
+atom read_concurrency
atom ready_input
atom ready_output
atom ready_async
diff --git a/erts/emulator/beam/erl_alloc.c b/erts/emulator/beam/erl_alloc.c
index 16ae643ed9..87503af7d5 100644
--- a/erts/emulator/beam/erl_alloc.c
+++ b/erts/emulator/beam/erl_alloc.c
@@ -64,6 +64,14 @@
#ifdef DEBUG
static Uint install_debug_functions(void);
+#if 0
+#define HARD_DEBUG
+#ifdef __GNUC__
+#warning "* * * * * * * * * * * * * *"
+#warning "* HARD DEBUG IS ENABLED! *"
+#warning "* * * * * * * * * * * * * *"
+#endif
+#endif
#endif
extern void elib_ensure_initialized(void);
@@ -391,6 +399,10 @@ refuse_af_strategy(struct au_init *init)
static void init_thr_ix(int static_ixs);
+#ifdef HARD_DEBUG
+static void hdbg_init(void);
+#endif
+
void
erts_alloc_init(int *argc, char **argv, ErtsAllocInitOpts *eaiop)
{
@@ -406,6 +418,10 @@ erts_alloc_init(int *argc, char **argv, ErtsAllocInitOpts *eaiop)
ERTS_DEFAULT_ALCU_INIT
};
+#ifdef HARD_DEBUG
+ hdbg_init();
+#endif
+
erts_sys_alloc_init();
init_thr_ix(erts_no_schedulers);
erts_init_utils_mem();
@@ -2862,12 +2878,10 @@ unsigned long erts_alc_test(unsigned long op,
break;
}
case 0xf0a:
- if (ethr_mutex_lock((ethr_mutex *) a1) != 0)
- ERTS_ALC_TEST_ABORT;
+ ethr_mutex_lock((ethr_mutex *) a1);
break;
case 0xf0b:
- if (ethr_mutex_unlock((ethr_mutex *) a1) != 0)
- ERTS_ALC_TEST_ABORT;
+ ethr_mutex_unlock((ethr_mutex *) a1);
break;
case 0xf0c: {
ethr_cond *cnd = erts_alloc(ERTS_ALC_T_UNDEF, sizeof(ethr_cond));
@@ -2883,16 +2897,13 @@ unsigned long erts_alc_test(unsigned long op,
break;
}
case 0xf0e:
- if (ethr_cond_broadcast((ethr_cond *) a1) != 0)
- ERTS_ALC_TEST_ABORT;
+ ethr_cond_broadcast((ethr_cond *) a1);
break;
case 0xf0f: {
int res;
do {
res = ethr_cond_wait((ethr_cond *) a1, (ethr_mutex *) a2);
} while (res == EINTR);
- if (res != 0)
- ERTS_ALC_TEST_ABORT;
break;
}
case 0xf10: {
@@ -2939,8 +2950,11 @@ unsigned long erts_alc_test(unsigned long op,
#undef PRINT_OPS
#endif
-
+#ifdef HARD_DEBUG
+#define FENCE_SZ (4*sizeof(UWord))
+#else
#define FENCE_SZ (3*sizeof(UWord))
+#endif
#if defined(ARCH_64)
#define FENCE_PATTERN 0xABCDEF97ABCDEF97
@@ -2962,6 +2976,104 @@ unsigned long erts_alc_test(unsigned long op,
#define GET_TYPE_OF_PATTERN(P) \
(((P) >> TYPE_PATTERN_SHIFT) & TYPE_PATTERN_MASK)
+#ifdef HARD_DEBUG
+
+#define ERL_ALC_HDBG_MAX_MBLK 100000
+#define ERTS_ALC_O_CHECK -1
+
+typedef struct hdbg_mblk_ hdbg_mblk;
+struct hdbg_mblk_ {
+ hdbg_mblk *next;
+ hdbg_mblk *prev;
+ void *p;
+ Uint s;
+ ErtsAlcType_t n;
+};
+
+static hdbg_mblk hdbg_mblks[ERL_ALC_HDBG_MAX_MBLK];
+
+static hdbg_mblk *free_hdbg_mblks;
+static hdbg_mblk *used_hdbg_mblks;
+static erts_mtx_t hdbg_mblk_mtx;
+
+static void
+hdbg_init(void)
+{
+ int i;
+ for (i = 0; i < ERL_ALC_HDBG_MAX_MBLK-1; i++)
+ hdbg_mblks[i].next = &hdbg_mblks[i+1];
+ hdbg_mblks[ERL_ALC_HDBG_MAX_MBLK-1].next = NULL;
+ free_hdbg_mblks = &hdbg_mblks[0];
+ used_hdbg_mblks = NULL;
+ erts_mtx_init(&hdbg_mblk_mtx, "erts_alloc_hard_debug");
+}
+
+static void *check_memory_fence(void *ptr,
+ Uint *size,
+ ErtsAlcType_t n,
+ int func);
+void erts_hdbg_chk_blks(void);
+
+void
+erts_hdbg_chk_blks(void)
+{
+ hdbg_mblk *mblk;
+
+ erts_mtx_lock(&hdbg_mblk_mtx);
+ for (mblk = used_hdbg_mblks; mblk; mblk = mblk->next) {
+ Uint sz;
+ check_memory_fence(mblk->p, &sz, mblk->n, ERTS_ALC_O_CHECK);
+ ASSERT(sz == mblk->s);
+ }
+ erts_mtx_unlock(&hdbg_mblk_mtx);
+}
+
+static hdbg_mblk *
+hdbg_alloc(void *p, Uint s, ErtsAlcType_t n)
+{
+ hdbg_mblk *mblk;
+
+ erts_mtx_lock(&hdbg_mblk_mtx);
+ mblk = free_hdbg_mblks;
+ if (!mblk) {
+ erts_fprintf(stderr,
+ "Ran out of debug blocks; please increase "
+ "ERL_ALC_HDBG_MAX_MBLK=%d and recompile!\n",
+ ERL_ALC_HDBG_MAX_MBLK);
+ abort();
+ }
+ free_hdbg_mblks = mblk->next;
+
+ mblk->p = p;
+ mblk->s = s;
+ mblk->n = n;
+
+ mblk->next = used_hdbg_mblks;
+ mblk->prev = NULL;
+ if (used_hdbg_mblks)
+ used_hdbg_mblks->prev = mblk;
+ used_hdbg_mblks = mblk;
+ erts_mtx_unlock(&hdbg_mblk_mtx);
+ return (void *) mblk;
+}
+
+static void
+hdbg_free(hdbg_mblk *mblk)
+{
+ erts_mtx_lock(&hdbg_mblk_mtx);
+ if (mblk->next)
+ mblk->next->prev = mblk->prev;
+ if (mblk->prev)
+ mblk->prev->next = mblk->next;
+ else
+ used_hdbg_mblks = mblk->next;
+
+ mblk->next = free_hdbg_mblks;
+ free_hdbg_mblks = mblk;
+ erts_mtx_unlock(&hdbg_mblk_mtx);
+}
+
+#endif
#ifdef ERTS_ALLOC_UTIL_HARD_DEBUG
static void *check_memory_fence(void *ptr, Uint *size, ErtsAlcType_t n, int func);
@@ -3007,17 +3119,28 @@ set_memory_fence(void *ptr, Uint sz, ErtsAlcType_t n)
{
UWord *ui_ptr;
UWord pattern;
+#ifdef HARD_DEBUG
+ hdbg_mblk **mblkpp;
+#endif
if (!ptr)
return NULL;
ui_ptr = (UWord *) ptr;
pattern = MK_PATTERN(n);
-
+
+#ifdef HARD_DEBUG
+ mblkpp = (hdbg_mblk **) ui_ptr++;
+#endif
+
*(ui_ptr++) = sz;
*(ui_ptr++) = pattern;
memcpy((void *) (((char *) ui_ptr)+sz), (void *) &pattern, sizeof(UWord));
+#ifdef HARD_DEBUG
+ *mblkpp = hdbg_alloc((void *) ui_ptr, sz, n);
+#endif
+
return (void *) ui_ptr;
}
@@ -3029,6 +3152,9 @@ check_memory_fence(void *ptr, Uint *size, ErtsAlcType_t n, int func)
UWord pre_pattern;
UWord post_pattern;
UWord *ui_ptr;
+#ifdef HARD_DEBUG
+ hdbg_mblk *mblk;
+#endif
if (!ptr)
return NULL;
@@ -3036,6 +3162,9 @@ check_memory_fence(void *ptr, Uint *size, ErtsAlcType_t n, int func)
ui_ptr = (UWord *) ptr;
pre_pattern = *(--ui_ptr);
*size = sz = *(--ui_ptr);
+#ifdef HARD_DEBUG
+ mblk = (hdbg_mblk *) *(--ui_ptr);
+#endif
found_type = GET_TYPE_OF_PATTERN(pre_pattern);
if (pre_pattern != MK_PATTERN(n)) {
@@ -3091,6 +3220,17 @@ check_memory_fence(void *ptr, Uint *size, ErtsAlcType_t n, int func)
(unsigned long) ptr, (unsigned long) sz, ftype, op_str, otype);
}
+#ifdef HARD_DEBUG
+ switch (func) {
+ case ERTS_ALC_O_REALLOC:
+ case ERTS_ALC_O_FREE:
+ hdbg_free(mblk);
+ break;
+ default:
+ break;
+ }
+#endif
+
return (void *) ui_ptr;
}
@@ -3103,6 +3243,10 @@ debug_alloc(ErtsAlcType_t n, void *extra, Uint size)
Uint dsize;
void *res;
+#ifdef HARD_DEBUG
+ erts_hdbg_chk_blks();
+#endif
+
ASSERT(ERTS_ALC_N_MIN <= n && n <= ERTS_ALC_N_MAX);
dsize = size + FENCE_SZ;
res = (*real_af->alloc)(n, real_af->extra, dsize);
@@ -3132,13 +3276,17 @@ debug_realloc(ErtsAlcType_t n, void *extra, void *ptr, Uint size)
dsize = size + FENCE_SZ;
dptr = check_memory_fence(ptr, &old_size, n, ERTS_ALC_O_REALLOC);
+#ifdef HARD_DEBUG
+ erts_hdbg_chk_blks();
+#endif
+
if (old_size > size)
sys_memset((void *) (((char *) ptr) + size),
0xf,
sizeof(Uint) + old_size - size);
res = (*real_af->realloc)(n, real_af->extra, dptr, dsize);
-
+
res = set_memory_fence(res, size, n);
#ifdef PRINT_OPS
@@ -3168,6 +3316,10 @@ debug_free(ErtsAlcType_t n, void *extra, void *ptr)
fprintf(stderr, "free(%s, 0x%lx)\r\n", ERTS_ALC_N2TD(n), (Uint) ptr);
#endif
+#ifdef HARD_DEBUG
+ erts_hdbg_chk_blks();
+#endif
+
}
static Uint
diff --git a/erts/emulator/beam/erl_alloc.types b/erts/emulator/beam/erl_alloc.types
index 6f88bbe5b8..7df9f19af0 100644
--- a/erts/emulator/beam/erl_alloc.types
+++ b/erts/emulator/beam/erl_alloc.types
@@ -212,7 +212,8 @@ type LOGGER_DSBUF TEMPORARY SYSTEM logger_dsbuf
type TMP_DSBUF TEMPORARY SYSTEM tmp_dsbuf
type INFO_DSBUF SYSTEM SYSTEM info_dsbuf
# INFO_DSBUF have to use the SYSTEM allocator; otherwise, a deadlock might occur
-type SCHDLR_DATA LONG_LIVED PROCESSES scheduler_data
+type SCHDLR_DATA LONG_LIVED SYSTEM scheduler_data
+type SCHDLR_SLP_INFO LONG_LIVED SYSTEM scheduler_sleep_info
type RUNQS LONG_LIVED SYSTEM run_queues
type DDLL_PROCESS STANDARD SYSTEM ddll_processes
type DDLL_HANDLE STANDARD SYSTEM ddll_handle
@@ -246,6 +247,7 @@ type CPUDATA LONG_LIVED SYSTEM cpu_data
type TMP_CPU_IDS SHORT_LIVED SYSTEM tmp_cpu_ids
type EXT_TERM_DATA SHORT_LIVED PROCESSES external_term_data
type ZLIB STANDARD SYSTEM zlib
+type RDR_GRPS_MAP LONG_LIVED SYSTEM reader_groups_map
+if smp
type ASYNC SHORT_LIVED SYSTEM async
@@ -269,7 +271,9 @@ type RUNQ_BLNS LONG_LIVED SYSTEM run_queue_balancing
+if threads
-type ETHR_INTERNAL SYSTEM SYSTEM ethread_internal
+type ETHR_STD STANDARD SYSTEM ethread_standard
+type ETHR_SL SHORT_LIVED SYSTEM ethread_short_lived
+type ETHR_LL LONG_LIVED SYSTEM ethread_long_lived
+ifnot smp
diff --git a/erts/emulator/beam/erl_bif_info.c b/erts/emulator/beam/erl_bif_info.c
index ca1ab44012..5128776c47 100644
--- a/erts/emulator/beam/erl_bif_info.c
+++ b/erts/emulator/beam/erl_bif_info.c
@@ -2565,6 +2565,8 @@ BIF_RETTYPE system_info_1(BIF_ALIST_1)
BIF_RET(erts_sched_stat_term(BIF_P, 1));
} else if (ERTS_IS_ATOM_STR("taints", BIF_ARG_1)) {
BIF_RET(erts_nif_taints(BIF_P));
+ } else if (ERTS_IS_ATOM_STR("reader_groups_map", BIF_ARG_1)) {
+ BIF_RET(erts_get_reader_groups_map(BIF_P));
}
BIF_ERROR(BIF_P, BADARG);
@@ -3428,6 +3430,16 @@ BIF_RETTYPE erts_debug_get_internal_state_1(BIF_ALIST_1)
else if (ERTS_IS_ATOM_STR("fake_scheduler_bindings", tp[1])) {
return erts_fake_scheduler_bindings(BIF_P, tp[2]);
}
+ else if (ERTS_IS_ATOM_STR("reader_groups_map", tp[1])) {
+ Sint groups;
+ if (is_not_small(tp[2]))
+ BIF_ERROR(BIF_P, BADARG);
+ groups = signed_val(tp[2]);
+ if (groups < (Sint) 1 || groups > (Sint) INT_MAX)
+ BIF_ERROR(BIF_P, BADARG);
+
+ BIF_RET(erts_debug_reader_groups_map(BIF_P, (int) groups));
+ }
break;
}
default:
@@ -3732,8 +3744,8 @@ static Eterm lcnt_build_lock_stats_term(Eterm **hpp, Uint *szp, erts_lcnt_lock_s
* [{{file, line}, {tries, colls, {seconds, nanoseconds, n_blocks}}}]
*/
- ethr_atomic_read(&stats->tries, (long *)&tries);
- ethr_atomic_read(&stats->colls, (long *)&colls);
+ tries = (unsigned long) ethr_atomic_read(&stats->tries);
+ colls = (unsigned long) ethr_atomic_read(&stats->colls);
line = stats->line;
timer_s = stats->timer.s;
diff --git a/erts/emulator/beam/erl_db.c b/erts/emulator/beam/erl_db.c
index cbdaa459de..b0369a402b 100644
--- a/erts/emulator/beam/erl_db.c
+++ b/erts/emulator/beam/erl_db.c
@@ -78,11 +78,19 @@ enum DbIterSafety {
** The main meta table, containing all ets tables.
*/
#ifdef ERTS_SMP
-# define META_MAIN_TAB_LOCK_CNT 16
-static union {
- erts_smp_spinlock_t lck;
- byte _cache_line_alignment[64];
-}meta_main_tab_locks[META_MAIN_TAB_LOCK_CNT];
+
+#define ERTS_META_MAIN_TAB_LOCK_TAB_BITS 8
+#define ERTS_META_MAIN_TAB_LOCK_TAB_SIZE (1 << ERTS_META_MAIN_TAB_LOCK_TAB_BITS)
+#define ERTS_META_MAIN_TAB_LOCK_TAB_MASK (ERTS_META_MAIN_TAB_LOCK_TAB_SIZE - 1)
+
+typedef union {
+ erts_smp_rwmtx_t rwmtx;
+ byte cache_line_align__[ERTS_ALC_CACHE_LINE_ALIGN_SIZE(
+ sizeof(erts_smp_rwmtx_t))];
+} erts_meta_main_tab_lock_t;
+
+static erts_meta_main_tab_lock_t *meta_main_tab_locks;
+
#endif
static struct {
union {
@@ -104,17 +112,13 @@ static struct {
#define MARK_SLOT_DEAD(i) (meta_main_tab[(i)].u.next_free |= 2)
#define GET_ANY_SLOT_TAB(i) ((DbTable*)(meta_main_tab[(i)].u.next_free & ~(1|2))) /* dead or alive */
-static ERTS_INLINE void meta_main_tab_lock(unsigned slot)
+static ERTS_INLINE erts_smp_rwmtx_t *
+get_meta_main_tab_lock(unsigned slot)
{
#ifdef ERTS_SMP
- erts_smp_spin_lock(&meta_main_tab_locks[slot % META_MAIN_TAB_LOCK_CNT].lck);
-#endif
-}
-
-static ERTS_INLINE void meta_main_tab_unlock(unsigned slot)
-{
-#ifdef ERTS_SMP
- erts_smp_spin_unlock(&meta_main_tab_locks[slot % META_MAIN_TAB_LOCK_CNT].lck);
+ return &meta_main_tab_locks[slot & ERTS_META_MAIN_TAB_LOCK_TAB_MASK].rwmtx;
+#else
+ return NULL;
#endif
}
@@ -166,7 +170,8 @@ struct meta_name_tab_entry* meta_name_tab_bucket(Eterm name,
typedef enum {
LCK_READ=1, /* read only access */
LCK_WRITE=2, /* exclusive table write access */
- LCK_WRITE_REC=3 /* record write access */
+ LCK_WRITE_REC=3, /* record write access */
+ LCK_NONE=4
} db_lock_kind_t;
extern DbTableMethod db_hash;
@@ -214,17 +219,17 @@ Export ets_select_continue_exp;
*/
static Export ets_delete_continue_exp;
-static ERTS_INLINE DbTable* db_ref(DbTable* tb)
+static ERTS_INLINE DbTable* db_ref(DbTable* tb, db_lock_kind_t kind)
{
- if (tb != NULL) {
+ if (tb != NULL && kind != LCK_READ) {
erts_refc_inc(&tb->common.ref, 2);
}
return tb;
}
-static ERTS_INLINE DbTable* db_unref(DbTable* tb)
+static ERTS_INLINE DbTable* db_unref(DbTable* tb, db_lock_kind_t kind)
{
- if (!erts_refc_dectest(&tb->common.ref, 0)) {
+ if (kind != LCK_READ && !erts_refc_dectest(&tb->common.ref, 0)) {
#ifdef HARDDEBUG
if (erts_smp_atomic_read(&tb->common.memory_size) != sizeof(DbTable)) {
erts_fprintf(stderr, "ets: db_unref memory remain=%ld fix=%x\n",
@@ -256,12 +261,19 @@ static ERTS_INLINE DbTable* db_unref(DbTable* tb)
return tb;
}
-static ERTS_INLINE void db_init_lock(DbTable* tb, char *rwname, char* fixname)
+static ERTS_INLINE void db_init_lock(DbTable* tb, int use_frequent_read_lock,
+ char *rwname, char* fixname)
{
+#ifdef ERTS_SMP
+ erts_smp_rwmtx_opt_t rwmtx_opt = ERTS_SMP_THR_OPTS_DEFAULT_INITER;
+ if (use_frequent_read_lock)
+ rwmtx_opt.type = ERTS_SMP_RWMTX_TYPE_FREQUENT_READ;
+#endif
erts_refc_init(&tb->common.ref, 1);
erts_refc_init(&tb->common.fixref, 0);
#ifdef ERTS_SMP
- erts_smp_rwmtx_init_x(&tb->common.rwlock, rwname, tb->common.the_name);
+ erts_smp_rwmtx_init_opt_x(&tb->common.rwlock, &rwmtx_opt,
+ rwname, tb->common.the_name);
erts_smp_mtx_init_x(&tb->common.fixlock, fixname, tb->common.the_name);
tb->common.is_thread_safe = !(tb->common.status & DB_FINE_LOCKED);
#endif
@@ -297,7 +309,7 @@ static ERTS_INLINE void db_lock_take_over_ref(DbTable* tb, db_lock_kind_t kind)
static ERTS_INLINE void db_lock(DbTable* tb, db_lock_kind_t kind)
{
- (void) db_ref(tb);
+ (void) db_ref(tb, kind);
#ifdef ERTS_SMP
db_lock_take_over_ref(tb, kind);
#endif
@@ -331,7 +343,7 @@ static ERTS_INLINE void db_unlock(DbTable* tb, db_lock_kind_t kind)
}
}
#endif
- (void) db_unref(tb); /* May delete table... */
+ (void) db_unref(tb, kind); /* May delete table... */
}
@@ -349,32 +361,49 @@ static ERTS_INLINE void db_meta_unlock(DbTable* tb, db_lock_kind_t kind)
}
static ERTS_INLINE
-DbTable* db_get_table(Process *p,
- Eterm id,
- int what,
- db_lock_kind_t kind)
+DbTable* db_get_table_aux(Process *p,
+ Eterm id,
+ int what,
+ db_lock_kind_t kind,
+ int meta_already_locked)
{
DbTable *tb = NULL;
+ erts_smp_rwmtx_t *mtl = NULL;
if (is_small(id)) {
Uint slot = unsigned_val(id) & meta_main_tab_slot_mask;
- meta_main_tab_lock(slot);
+ if (!meta_already_locked) {
+ mtl = get_meta_main_tab_lock(slot);
+ erts_smp_rwmtx_rlock(mtl);
+ }
+#if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_CHECK)
+ else {
+ erts_smp_rwmtx_t *test_mtl = get_meta_main_tab_lock(slot);
+ ERTS_SMP_LC_ASSERT(erts_lc_rwmtx_is_rlocked(test_mtl)
+ || erts_lc_rwmtx_is_rwlocked(test_mtl));
+ }
+#endif
if (slot < db_max_tabs && IS_SLOT_ALIVE(slot)) {
/* SMP: inc to prevent race, between unlock of meta_main_tab_lock
* and the table locking outside the meta_main_tab_lock
*/
- tb = db_ref(meta_main_tab[slot].u.tb);
+ tb = db_ref(meta_main_tab[slot].u.tb, kind);
}
- meta_main_tab_unlock(slot);
}
else if (is_atom(id)) {
- erts_smp_rwmtx_t* rwlock;
- struct meta_name_tab_entry* bucket = meta_name_tab_bucket(id,&rwlock);
- erts_smp_rwmtx_rlock(rwlock);
+ struct meta_name_tab_entry* bucket = meta_name_tab_bucket(id,&mtl);
+ if (!meta_already_locked)
+ erts_smp_rwmtx_rlock(mtl);
+ else{
+ ERTS_SMP_LC_ASSERT(erts_lc_rwmtx_is_rlocked(mtl)
+ || erts_lc_rwmtx_is_rwlocked(mtl));
+ mtl = NULL;
+ }
+
if (bucket->pu.tb != NULL) {
if (is_atom(bucket->u.name_atom)) { /* single */
if (bucket->u.name_atom == id) {
- tb = db_ref(bucket->pu.tb);
+ tb = db_ref(bucket->pu.tb, kind);
}
}
else { /* multi */
@@ -382,23 +411,33 @@ DbTable* db_get_table(Process *p,
Uint i;
for (i=0; i<cnt; i++) {
if (bucket->pu.mvec[i].u.name_atom == id) {
- tb = db_ref(bucket->pu.mvec[i].pu.tb);
+ tb = db_ref(bucket->pu.mvec[i].pu.tb, kind);
break;
}
}
}
}
- erts_smp_rwmtx_runlock(rwlock);
}
if (tb) {
db_lock_take_over_ref(tb, kind);
- if (tb->common.id == id && ((tb->common.status & what) != 0 ||
- p->id == tb->common.owner)) {
- return tb;
+ if (tb->common.id != id
+ || ((tb->common.status & what) == 0 && p->id != tb->common.owner)) {
+ db_unlock(tb, kind);
+ tb = NULL;
}
- db_unlock(tb, kind);
}
- return NULL;
+ if (mtl)
+ erts_smp_rwmtx_runlock(mtl);
+ return tb;
+}
+
+static ERTS_INLINE
+DbTable* db_get_table(Process *p,
+ Eterm id,
+ int what,
+ db_lock_kind_t kind)
+{
+ return db_get_table_aux(p, id, what, kind, 0);
}
/* Requires meta_main_tab_locks[slot] locked.
@@ -413,15 +452,15 @@ static ERTS_INLINE void free_slot(int slot)
erts_smp_spin_unlock(&meta_main_tab_main_lock);
}
-static int insert_named_tab(Eterm name_atom, DbTable* tb)
+static int insert_named_tab(Eterm name_atom, DbTable* tb, int have_lock)
{
int ret = 0;
erts_smp_rwmtx_t* rwlock;
struct meta_name_tab_entry* new_entry;
struct meta_name_tab_entry* bucket = meta_name_tab_bucket(name_atom,
&rwlock);
-
- erts_smp_rwmtx_rwlock(rwlock);
+ if (!have_lock)
+ erts_smp_rwmtx_rwlock(rwlock);
if (bucket->pu.tb == NULL) { /* empty */
new_entry = bucket;
@@ -468,17 +507,32 @@ static int insert_named_tab(Eterm name_atom, DbTable* tb)
ret = 1; /* Ok */
done:
- erts_smp_rwmtx_rwunlock(rwlock);
+ if (!have_lock)
+ erts_smp_rwmtx_rwunlock(rwlock);
return ret;
}
-static int remove_named_tab(Eterm name_atom)
+static int remove_named_tab(DbTable *tb, int have_lock)
{
int ret = 0;
erts_smp_rwmtx_t* rwlock;
+ Eterm name_atom = tb->common.id;
struct meta_name_tab_entry* bucket = meta_name_tab_bucket(name_atom,
&rwlock);
- erts_smp_rwmtx_rwlock(rwlock);
+#ifdef ERTS_SMP
+ if (!have_lock && erts_smp_rwmtx_tryrwlock(rwlock) == EBUSY) {
+ /*
+ * We keep our increased refc over this op in order to
+ * prevent the table from disapearing.
+ */
+ erts_smp_rwmtx_rwunlock(&tb->common.rwlock);
+ erts_smp_rwmtx_rwlock(rwlock);
+ erts_smp_rwmtx_rwlock(&tb->common.rwlock);
+ }
+#endif
+
+ ERTS_SMP_LC_ASSERT(erts_lc_rwmtx_is_rwlocked(rwlock));
+
if (bucket->pu.tb == NULL) {
goto done;
}
@@ -529,7 +583,8 @@ static int remove_named_tab(Eterm name_atom)
ret = 1; /* Ok */
done:
- erts_smp_rwmtx_rwunlock(rwlock);
+ if (!have_lock)
+ erts_smp_rwmtx_rwunlock(rwlock);
return ret;
}
@@ -1133,6 +1188,7 @@ BIF_RETTYPE ets_rename_2(BIF_ALIST_2)
{
DbTable* tb;
Eterm ret;
+ erts_smp_rwmtx_t *lck1, *lck2;
#ifdef HARDDEBUG
erts_fprintf(stderr,
@@ -1141,34 +1197,65 @@ BIF_RETTYPE ets_rename_2(BIF_ALIST_2)
BIF_P->initial[0], BIF_P->initial[1], BIF_P->initial[2]);
#endif
- if ((tb = db_get_table(BIF_P, BIF_ARG_1, DB_WRITE, LCK_WRITE)) == NULL) {
+
+ if (is_not_atom(BIF_ARG_2)) {
BIF_ERROR(BIF_P, BADARG);
}
- if (is_not_atom(BIF_ARG_2)) {
- goto badarg;
+ (void) meta_name_tab_bucket(BIF_ARG_2, &lck1);
+
+ if (is_small(BIF_ARG_1)) {
+ Uint slot = unsigned_val(BIF_ARG_1) & meta_main_tab_slot_mask;
+ lck2 = get_meta_main_tab_lock(slot);
+ }
+ else if (is_atom(BIF_ARG_1)) {
+ (void) meta_name_tab_bucket(BIF_ARG_1, &lck2);
+ if (lck1 == lck2)
+ lck2 = NULL;
+ else if (lck1 > lck2) {
+ erts_smp_rwmtx_t *tmp = lck1;
+ lck1 = lck2;
+ lck2 = tmp;
+ }
+ }
+ else {
+ BIF_ERROR(BIF_P, BADARG);
}
+ erts_smp_rwmtx_rwlock(lck1);
+ if (lck2)
+ erts_smp_rwmtx_rwlock(lck2);
+
+ tb = db_get_table_aux(BIF_P, BIF_ARG_1, DB_WRITE, LCK_WRITE, 1);
+ if (!tb)
+ goto badarg;
+
if (is_not_atom(tb->common.id)) { /* Not a named table */
tb->common.the_name = BIF_ARG_2;
goto done;
}
- if (!insert_named_tab(BIF_ARG_2,tb)) {
+ if (!insert_named_tab(BIF_ARG_2, tb, 1))
goto badarg;
- }
- if (!remove_named_tab(tb->common.id)) {
+
+ if (!remove_named_tab(tb, 1))
erl_exit(1,"Could not find named tab %s", tb->common.id);
- }
tb->common.id = tb->common.the_name = BIF_ARG_2;
done:
ret = tb->common.id;
db_unlock(tb, LCK_WRITE);
+ erts_smp_rwmtx_rwunlock(lck1);
+ if (lck2)
+ erts_smp_rwmtx_rwunlock(lck2);
BIF_RET(ret);
badarg:
- db_unlock(tb, LCK_WRITE);
+ if (tb)
+ db_unlock(tb, LCK_WRITE);
+ erts_smp_rwmtx_rwunlock(lck1);
+ if (lck2)
+ erts_smp_rwmtx_rwunlock(lck2);
BIF_ERROR(BIF_P, BADARG);
}
@@ -1189,10 +1276,11 @@ BIF_RETTYPE ets_new_2(BIF_ALIST_2)
UWord heir_data;
Uint32 status;
Sint keypos;
- int is_named, is_fine_locked;
+ int is_named, is_fine_locked, frequent_read;
int cret;
DeclareTmpHeap(meta_tuple,3,BIF_P);
DbTableMethod* meth;
+ erts_smp_rwmtx_t *mmtl;
if (is_not_atom(BIF_ARG_1)) {
BIF_ERROR(BIF_P, BADARG);
@@ -1205,6 +1293,7 @@ BIF_RETTYPE ets_new_2(BIF_ALIST_2)
keypos = 1;
is_named = 0;
is_fine_locked = 0;
+ frequent_read = 0;
heir = am_none;
heir_data = (UWord) am_undefined;
@@ -1238,6 +1327,13 @@ BIF_RETTYPE ets_new_2(BIF_ALIST_2)
is_fine_locked = 0;
} else break;
}
+ else if (tp[1] == am_read_concurrency) {
+ if (tp[2] == am_true) {
+ frequent_read = 1;
+ } else if (tp[2] == am_false) {
+ frequent_read = 0;
+ } else break;
+ }
else if (tp[1] == am_heir && tp[2] == am_none) {
heir = am_none;
heir_data = am_undefined;
@@ -1286,6 +1382,11 @@ BIF_RETTYPE ets_new_2(BIF_ALIST_2)
BIF_ERROR(BIF_P, BADARG);
}
+#ifdef ERTS_SMP
+ if (frequent_read && !(status & DB_PRIVATE))
+ status |= DB_FREQ_READ;
+#endif
+
/* we create table outside any table lock
* and take the unusal cost of destroy table if it
* fails to find a slot
@@ -1308,7 +1409,8 @@ BIF_RETTYPE ets_new_2(BIF_ALIST_2)
tb->common.type = status & ERTS_ETS_TABLE_TYPES;
/* Note, 'type' is *read only* from now on... */
#endif
- db_init_lock(tb, "db_tab", "db_tab_fix");
+ db_init_lock(tb, status & (DB_FINE_LOCKED|DB_FREQ_READ),
+ "db_tab", "db_tab_fix");
tb->common.keypos = keypos;
tb->common.owner = BIF_P->id;
set_heir(BIF_P, tb, heir, heir_data);
@@ -1351,15 +1453,17 @@ BIF_RETTYPE ets_new_2(BIF_ALIST_2)
tb->common.id = ret;
tb->common.slot = slot; /* store slot for erase */
- meta_main_tab_lock(slot);
+ mmtl = get_meta_main_tab_lock(slot);
+ erts_smp_rwmtx_rwlock(mmtl);
meta_main_tab[slot].u.tb = tb;
ASSERT(IS_SLOT_ALIVE(slot));
- meta_main_tab_unlock(slot);
+ erts_smp_rwmtx_rwunlock(mmtl);
- if (is_named && !insert_named_tab(BIF_ARG_1, tb)) {
- meta_main_tab_lock(slot);
+ if (is_named && !insert_named_tab(BIF_ARG_1, tb, 0)) {
+ mmtl = get_meta_main_tab_lock(slot);
+ erts_smp_rwmtx_rwlock(mmtl);
free_slot(slot);
- meta_main_tab_unlock(slot);
+ erts_smp_rwmtx_rwunlock(mmtl);
db_lock_take_over_ref(tb,LCK_WRITE);
free_heir_data(tb);
@@ -1499,6 +1603,7 @@ BIF_RETTYPE ets_delete_1(BIF_ALIST_1)
{
int trap;
DbTable* tb;
+ erts_smp_rwmtx_t *mmtl;
#ifdef HARDDEBUG
erts_fprintf(stderr,
@@ -1520,13 +1625,23 @@ BIF_RETTYPE ets_delete_1(BIF_ALIST_1)
tb->common.status &= ~(DB_PROTECTED|DB_PUBLIC|DB_PRIVATE);
tb->common.status |= DB_DELETE;
- meta_main_tab_lock(tb->common.slot);
+ mmtl = get_meta_main_tab_lock(tb->common.slot);
+#ifdef ERTS_SMP
+ if (erts_smp_rwmtx_tryrwlock(mmtl) == EBUSY) {
+ /*
+ * We keep our increased refc over this op in order to
+ * prevent the table from disapearing.
+ */
+ erts_smp_rwmtx_rwunlock(&tb->common.rwlock);
+ erts_smp_rwmtx_rwlock(mmtl);
+ erts_smp_rwmtx_rwlock(&tb->common.rwlock);
+ }
+#endif
/* We must keep the slot, to be found by db_proc_dead() if process dies */
MARK_SLOT_DEAD(tb->common.slot);
- meta_main_tab_unlock(tb->common.slot);
- if (is_atom(tb->common.id)) {
- remove_named_tab(tb->common.id);
- }
+ erts_smp_rwmtx_rwunlock(mmtl);
+ if (is_atom(tb->common.id))
+ remove_named_tab(tb, 0);
if (tb->common.owner != BIF_P->id) {
DeclareTmpHeap(meta_tuple,3,BIF_P);
@@ -1919,14 +2034,15 @@ BIF_RETTYPE ets_all_0(BIF_ALIST_0)
previous = NIL;
j = 0;
for(i = 0; (i < t_max_tabs && j < t_tabs_cnt); i++) {
- meta_main_tab_lock(i);
+ erts_smp_rwmtx_t *mmtl = get_meta_main_tab_lock(i);
+ erts_smp_rwmtx_rlock(mmtl);
if (IS_SLOT_ALIVE(i)) {
j++;
tb = meta_main_tab[i].u.tb;
previous = CONS(hp, tb->common.id, previous);
hp += 2;
}
- meta_main_tab_unlock(i);
+ erts_smp_rwmtx_runlock(mmtl);
}
HRelease(BIF_P, hendp, hp);
BIF_RET(previous);
@@ -2630,12 +2746,30 @@ void init_db(void)
size_t size;
#ifdef ERTS_SMP
- for (i=0; i<META_MAIN_TAB_LOCK_CNT; i++) {
- erts_smp_spinlock_init_x(&meta_main_tab_locks[i].lck, "meta_main_tab_slot", make_small(i));
+ erts_smp_rwmtx_opt_t rwmtx_opt = ERTS_SMP_THR_OPTS_DEFAULT_INITER;
+ rwmtx_opt.type = ERTS_SMP_RWMTX_TYPE_FREQUENT_READ;
+ rwmtx_opt.lived = ERTS_SMP_RWMTX_LONG_LIVED;
+
+ meta_main_tab_locks = erts_alloc(ERTS_ALC_T_DB_TABLES,
+ (sizeof(erts_meta_main_tab_lock_t)
+ * (ERTS_META_MAIN_TAB_LOCK_TAB_SIZE+1)));
+
+ if ((((Uint) meta_main_tab_locks) & ERTS_CACHE_LINE_MASK) != 0)
+ meta_main_tab_locks = ((erts_meta_main_tab_lock_t *)
+ ((((Uint) meta_main_tab_locks)
+ & ~ERTS_CACHE_LINE_MASK)
+ + ERTS_CACHE_LINE_SIZE));
+
+ ASSERT((((Uint) meta_main_tab_locks) & ERTS_CACHE_LINE_MASK) == 0);
+
+ for (i = 0; i < ERTS_META_MAIN_TAB_LOCK_TAB_SIZE; i++) {
+ erts_smp_rwmtx_init_opt_x(&meta_main_tab_locks[i].rwmtx, &rwmtx_opt,
+ "meta_main_tab_slot", make_small(i));
}
erts_smp_spinlock_init(&meta_main_tab_main_lock, "meta_main_tab_main");
for (i=0; i<META_NAME_TAB_LOCK_CNT; i++) {
- erts_smp_rwmtx_init_x(&meta_name_tab_rwlocks[i].lck, "meta_name_tab", make_small(i));
+ erts_smp_rwmtx_init_opt_x(&meta_name_tab_rwlocks[i].lck, &rwmtx_opt,
+ "meta_name_tab", make_small(i));
}
#endif
@@ -2895,12 +3029,12 @@ retry:
to_pid, to_locks,
ERTS_P2P_FLG_TRY_LOCK);
if (to_proc == ERTS_PROC_LOCK_BUSY) {
- db_ref(tb); /* while unlocked */
+ db_ref(tb, LCK_NONE); /* while unlocked */
db_unlock(tb,LCK_WRITE);
to_proc = erts_pid2proc(p, ERTS_PROC_LOCK_MAIN,
to_pid, to_locks);
db_lock(tb,LCK_WRITE);
- tb = db_unref(tb);
+ tb = db_unref(tb, LCK_NONE);
ASSERT(tb != NULL);
if (tb->common.owner != p->id) {
@@ -3008,12 +3142,13 @@ erts_db_process_exiting(Process *c_p, ErtsProcLocks c_p_locks)
while (state->slots.ix < state->slots.size) {
DbTable *tb = NULL;
Sint ix = unsigned_val(state->slots.arr[state->slots.ix]);
- meta_main_tab_lock(ix);
+ erts_smp_rwmtx_t *mmtl = get_meta_main_tab_lock(ix);
+ erts_smp_rwmtx_rlock(mmtl);
if (!IS_SLOT_FREE(ix)) {
- tb = db_ref(GET_ANY_SLOT_TAB(ix));
+ tb = db_ref(GET_ANY_SLOT_TAB(ix), LCK_WRITE);
ASSERT(tb);
}
- meta_main_tab_unlock(ix);
+ erts_smp_rwmtx_runlock(mmtl);
if (tb) {
int do_yield;
db_lock_take_over_ref(tb, LCK_WRITE);
@@ -3045,7 +3180,7 @@ erts_db_process_exiting(Process *c_p, ErtsProcLocks c_p_locks)
tb->common.status |= DB_DELETE;
if (is_atom(tb->common.id))
- remove_named_tab(tb->common.id);
+ remove_named_tab(tb, 0);
free_heir_data(tb);
free_fixations_locked(tb);
@@ -3095,12 +3230,13 @@ erts_db_process_exiting(Process *c_p, ErtsProcLocks c_p_locks)
while (state->slots.ix < state->slots.size) {
DbTable *tb = NULL;
Sint ix = unsigned_val(state->slots.arr[state->slots.ix]);
- meta_main_tab_lock(ix);
+ erts_smp_rwmtx_t *mmtl = get_meta_main_tab_lock(ix);
+ erts_smp_rwmtx_rlock(mmtl);
if (IS_SLOT_ALIVE(ix)) {
- tb = db_ref(meta_main_tab[ix].u.tb);
+ tb = db_ref(meta_main_tab[ix].u.tb, LCK_WRITE_REC);
ASSERT(tb);
}
- meta_main_tab_unlock(ix);
+ erts_smp_rwmtx_runlock(mmtl);
if (tb) {
int reds;
DbFixation** pp;
@@ -3274,6 +3410,7 @@ unlocked:
#ifdef ERTS_SMP
if (*kind_p == LCK_READ && tb->common.is_thread_safe) {
/* Must have write lock while purging pseudo-deleted (OTP-8166) */
+ db_ref(tb, LCK_WRITE); /* LCK_WRITE need it, but not LCK_READ */
erts_smp_rwmtx_runlock(&tb->common.rwlock);
erts_smp_rwmtx_rwlock(&tb->common.rwlock);
*kind_p = LCK_WRITE;
@@ -3386,6 +3523,7 @@ static int free_table_cont(Process *p,
int clean_meta_tab)
{
Eterm result;
+ erts_smp_rwmtx_t *mmtl;
#ifdef HARDDEBUG
if (!first) {
@@ -3411,9 +3549,20 @@ static int free_table_cont(Process *p,
tb->common.id);
#endif
/* Completely done - we will not get called again. */
- meta_main_tab_lock(tb->common.slot);
+ mmtl = get_meta_main_tab_lock(tb->common.slot);
+#ifdef ERTS_SMP
+ if (erts_smp_rwmtx_tryrwlock(mmtl) == EBUSY) {
+ /*
+ * We keep our increased refc over this op in order to
+ * prevent the table from disapearing.
+ */
+ erts_smp_rwmtx_rwunlock(&tb->common.rwlock);
+ erts_smp_rwmtx_rwlock(mmtl);
+ erts_smp_rwmtx_rwlock(&tb->common.rwlock);
+ }
+#endif
free_slot(tb->common.slot);
- meta_main_tab_unlock(tb->common.slot);
+ erts_smp_rwmtx_rwunlock(mmtl);
if (clean_meta_tab) {
db_meta_lock(meta_pid_to_tab, LCK_WRITE_REC);
@@ -3421,7 +3570,7 @@ static int free_table_cont(Process *p,
make_small(tb->common.slot));
db_meta_unlock(meta_pid_to_tab, LCK_WRITE_REC);
}
- db_unref(tb);
+ db_unref(tb, LCK_NONE);
BUMP_REDS(p, 100);
return 0;
}
diff --git a/erts/emulator/beam/erl_db_hash.c b/erts/emulator/beam/erl_db_hash.c
index 96008ccef0..08117bb6e5 100644
--- a/erts/emulator/beam/erl_db_hash.c
+++ b/erts/emulator/beam/erl_db_hash.c
@@ -623,12 +623,16 @@ int db_create_hash(Process *p, DbTable *tbl)
erts_smp_atomic_init(&tb->is_resizing, 0);
#ifdef ERTS_SMP
if (tb->common.type & DB_FINE_LOCKED) {
+ erts_smp_rwmtx_opt_t rwmtx_opt = ERTS_SMP_THR_OPTS_DEFAULT_INITER;
int i;
+ if (tb->common.type & DB_FREQ_READ)
+ rwmtx_opt.type = ERTS_SMP_RWMTX_TYPE_FREQUENT_READ;
tb->locks = (DbTableHashFineLocks*) erts_db_alloc_fnf(ERTS_ALC_T_DB_SEG, /* Other type maybe? */
(DbTable *) tb,
sizeof(DbTableHashFineLocks));
for (i=0; i<DB_HASH_LOCK_CNT; ++i) {
- erts_rwmtx_init_x(&tb->locks->lck_vec[i].lck, "db_hash_slot", make_small(i));
+ erts_smp_rwmtx_init_opt_x(&tb->locks->lck_vec[i].lck, &rwmtx_opt,
+ "db_hash_slot", make_small(i));
}
/* This important property is needed to guarantee that the buckets
* involved in a grow/shrink operation it protected by the same lock:
diff --git a/erts/emulator/beam/erl_db_util.h b/erts/emulator/beam/erl_db_util.h
index 382e5dceb5..672c5f2cd1 100644
--- a/erts/emulator/beam/erl_db_util.h
+++ b/erts/emulator/beam/erl_db_util.h
@@ -240,8 +240,9 @@ typedef struct db_table_common {
#define DB_DUPLICATE_BAG (1 << 8)
#define DB_ORDERED_SET (1 << 9)
#define DB_DELETE (1 << 10) /* table is being deleted */
+#define DB_FREQ_READ (1 << 11)
-#define ERTS_ETS_TABLE_TYPES (DB_BAG|DB_SET|DB_DUPLICATE_BAG|DB_ORDERED_SET|DB_FINE_LOCKED)
+#define ERTS_ETS_TABLE_TYPES (DB_BAG|DB_SET|DB_DUPLICATE_BAG|DB_ORDERED_SET|DB_FINE_LOCKED|DB_FREQ_READ)
#define IS_HASH_TABLE(Status) (!!((Status) & \
(DB_BAG | DB_SET | DB_DUPLICATE_BAG)))
diff --git a/erts/emulator/beam/erl_drv_thread.c b/erts/emulator/beam/erl_drv_thread.c
index aa37edafd1..d42820ddf3 100644
--- a/erts/emulator/beam/erl_drv_thread.c
+++ b/erts/emulator/beam/erl_drv_thread.c
@@ -186,10 +186,9 @@ int
erl_drv_mutex_trylock(ErlDrvMutex *dmtx)
{
#ifdef USE_THREADS
- int res = dmtx ? ethr_mutex_trylock(&dmtx->mtx) : EINVAL;
- if (res != 0 && res != EBUSY)
- fatal_error(res, "erl_drv_mutex_trylock()");
- return res;
+ if (!dmtx)
+ fatal_error(EINVAL, "erl_drv_mutex_trylock()");
+ return ethr_mutex_trylock(&dmtx->mtx);
#else
return 0;
#endif
@@ -199,9 +198,9 @@ void
erl_drv_mutex_lock(ErlDrvMutex *dmtx)
{
#ifdef USE_THREADS
- int res = dmtx ? ethr_mutex_lock(&dmtx->mtx) : EINVAL;
- if (res != 0)
- fatal_error(res, "erl_drv_mutex_lock()");
+ if (!dmtx)
+ fatal_error(EINVAL, "erl_drv_mutex_lock()");
+ ethr_mutex_lock(&dmtx->mtx);
#endif
}
@@ -209,9 +208,9 @@ void
erl_drv_mutex_unlock(ErlDrvMutex *dmtx)
{
#ifdef USE_THREADS
- int res = dmtx ? ethr_mutex_unlock(&dmtx->mtx) : EINVAL;
- if (res != 0)
- fatal_error(res, "erl_drv_mutex_unlock()");
+ if (!dmtx)
+ fatal_error(EINVAL, "erl_drv_mutex_unlock()");
+ ethr_mutex_unlock(&dmtx->mtx);
#endif
}
@@ -256,9 +255,9 @@ void
erl_drv_cond_signal(ErlDrvCond *dcnd)
{
#ifdef USE_THREADS
- int res = dcnd ? ethr_cond_signal(&dcnd->cnd) : EINVAL;
- if (res != 0)
- fatal_error(res, "erl_drv_cond_signal()");
+ if (!dcnd)
+ fatal_error(EINVAL, "erl_drv_cond_signal()");
+ ethr_cond_signal(&dcnd->cnd);
#endif
}
@@ -266,9 +265,9 @@ void
erl_drv_cond_broadcast(ErlDrvCond *dcnd)
{
#ifdef USE_THREADS
- int res = dcnd ? ethr_cond_broadcast(&dcnd->cnd) : EINVAL;
- if (res != 0)
- fatal_error(res, "erl_drv_cond_broadcast()");
+ if (!dcnd)
+ fatal_error(EINVAL, "erl_drv_cond_broadcast()");
+ ethr_cond_broadcast(&dcnd->cnd);
#endif
}
@@ -277,18 +276,13 @@ void
erl_drv_cond_wait(ErlDrvCond *dcnd, ErlDrvMutex *dmtx)
{
#ifdef USE_THREADS
- int res;
if (!dcnd || !dmtx) {
- res = EINVAL;
- error:
- fatal_error(res, "erl_drv_cond_wait()");
+ fatal_error(EINVAL, "erl_drv_cond_wait()");
}
while (1) {
- res = ethr_cond_wait(&dcnd->cnd, &dmtx->mtx);
+ int res = ethr_cond_wait(&dcnd->cnd, &dmtx->mtx);
if (res == 0)
break;
- if (res != EINTR)
- goto error;
}
#endif
}
@@ -333,10 +327,9 @@ int
erl_drv_rwlock_tryrlock(ErlDrvRWLock *drwlck)
{
#ifdef USE_THREADS
- int res = drwlck ? ethr_rwmutex_tryrlock(&drwlck->rwmtx) : EINVAL;
- if (res != 0 && res != EBUSY)
- fatal_error(res, "erl_drv_rwlock_tryrlock()");
- return res;
+ if (!drwlck)
+ fatal_error(EINVAL, "erl_drv_rwlock_tryrlock()");
+ return ethr_rwmutex_tryrlock(&drwlck->rwmtx);
#else
return 0;
#endif
@@ -346,9 +339,9 @@ void
erl_drv_rwlock_rlock(ErlDrvRWLock *drwlck)
{
#ifdef USE_THREADS
- int res = drwlck ? ethr_rwmutex_rlock(&drwlck->rwmtx) : EINVAL;
- if (res != 0)
- fatal_error(res, "erl_drv_rwlock_rlock()");
+ if (!drwlck)
+ fatal_error(EINVAL, "erl_drv_rwlock_rlock()");
+ ethr_rwmutex_rlock(&drwlck->rwmtx);
#endif
}
@@ -356,9 +349,9 @@ void
erl_drv_rwlock_runlock(ErlDrvRWLock *drwlck)
{
#ifdef USE_THREADS
- int res = drwlck ? ethr_rwmutex_runlock(&drwlck->rwmtx) : EINVAL;
- if (res != 0)
- fatal_error(res, "erl_drv_rwlock_runlock()");
+ if (!drwlck)
+ fatal_error(EINVAL, "erl_drv_rwlock_runlock()");
+ ethr_rwmutex_runlock(&drwlck->rwmtx);
#endif
}
@@ -366,10 +359,9 @@ int
erl_drv_rwlock_tryrwlock(ErlDrvRWLock *drwlck)
{
#ifdef USE_THREADS
- int res = drwlck ? ethr_rwmutex_tryrwlock(&drwlck->rwmtx) : EINVAL;
- if (res != 0 && res != EBUSY)
- fatal_error(res, "erl_drv_rwlock_tryrwlock()");
- return res;
+ if (!drwlck)
+ fatal_error(EINVAL, "erl_drv_rwlock_tryrwlock()");
+ return ethr_rwmutex_tryrwlock(&drwlck->rwmtx);
#else
return 0;
#endif
@@ -379,9 +371,9 @@ void
erl_drv_rwlock_rwlock(ErlDrvRWLock *drwlck)
{
#ifdef USE_THREADS
- int res = drwlck ? ethr_rwmutex_rwlock(&drwlck->rwmtx) : EINVAL;
- if (res != 0)
- fatal_error(res, "erl_drv_rwlock_rwlock()");
+ if (!drwlck)
+ fatal_error(EINVAL, "erl_drv_rwlock_rwlock()");
+ ethr_rwmutex_rwlock(&drwlck->rwmtx);
#endif
}
@@ -389,9 +381,9 @@ void
erl_drv_rwlock_rwunlock(ErlDrvRWLock *drwlck)
{
#ifdef USE_THREADS
- int res = drwlck ? ethr_rwmutex_rwunlock(&drwlck->rwmtx) : EINVAL;
- if (res != 0)
- fatal_error(res, "erl_drv_rwlock_rwunlock()");
+ if (!drwlck)
+ fatal_error(EINVAL, "erl_drv_rwlock_rwunlock()");
+ ethr_rwmutex_rwunlock(&drwlck->rwmtx);
#endif
}
diff --git a/erts/emulator/beam/erl_fun.c b/erts/emulator/beam/erl_fun.c
index 1f74393a96..5dce5ad262 100644
--- a/erts/emulator/beam/erl_fun.c
+++ b/erts/emulator/beam/erl_fun.c
@@ -37,8 +37,6 @@ static erts_smp_rwmtx_t erts_fun_table_lock;
#define erts_fun_read_unlock() erts_smp_rwmtx_runlock(&erts_fun_table_lock)
#define erts_fun_write_lock() erts_smp_rwmtx_rwlock(&erts_fun_table_lock)
#define erts_fun_write_unlock() erts_smp_rwmtx_rwunlock(&erts_fun_table_lock)
-#define erts_fun_init_lock() erts_smp_rwmtx_init(&erts_fun_table_lock, \
- "fun_tab")
static HashValue fun_hash(ErlFunEntry* obj);
static int fun_cmp(ErlFunEntry* obj1, ErlFunEntry* obj2);
@@ -57,8 +55,12 @@ void
erts_init_fun_table(void)
{
HashFunctions f;
+ erts_smp_rwmtx_opt_t rwmtx_opt = ERTS_SMP_THR_OPTS_DEFAULT_INITER;
+ rwmtx_opt.type = ERTS_SMP_RWMTX_TYPE_FREQUENT_READ;
+ rwmtx_opt.lived = ERTS_SMP_RWMTX_LONG_LIVED;
+
+ erts_smp_rwmtx_init_opt(&erts_fun_table_lock, &rwmtx_opt, "fun_tab");
- erts_fun_init_lock();
f.hash = (H_FUN) fun_hash;
f.cmp = (HCMP_FUN) fun_cmp;
f.alloc = (HALLOC_FUN) fun_alloc;
diff --git a/erts/emulator/beam/erl_init.c b/erts/emulator/beam/erl_init.c
index 4a4507b212..14bd10b42c 100644
--- a/erts/emulator/beam/erl_init.c
+++ b/erts/emulator/beam/erl_init.c
@@ -78,6 +78,7 @@ static erts_tid_t main_thread;
erts_cpu_info_t *erts_cpuinfo;
+int erts_reader_groups;
int erts_use_sender_punish;
/*
@@ -110,6 +111,7 @@ int erts_compat_rel;
static int use_multi_run_queue;
static int no_schedulers;
static int no_schedulers_online;
+static int max_reader_groups;
#ifdef DEBUG
Uint32 verbose; /* See erl_debug.h for information about verbose */
@@ -505,6 +507,7 @@ void erts_usage(void)
ERTS_MIN_COMPAT_REL, this_rel_num());
erts_fprintf(stderr, "-r force ets memory block to be moved on realloc\n");
+ erts_fprintf(stderr, "-rg amount set reader groups limit\n");
erts_fprintf(stderr, "-sbt type set scheduler bind type, valid types are:\n");
erts_fprintf(stderr, " u|ns|ts|ps|s|nnts|nnps|tnnps|db\n");
erts_fprintf(stderr, "-sct cput set cpu topology,\n");
@@ -538,6 +541,50 @@ void erts_usage(void)
erl_exit(-1, "");
}
+#ifdef USE_THREADS
+/*
+ * allocators for thread lib
+ */
+
+static void *ethr_std_alloc(size_t size)
+{
+ return erts_alloc_fnf(ERTS_ALC_T_ETHR_STD, (Uint) size);
+}
+static void *ethr_std_realloc(void *ptr, size_t size)
+{
+ return erts_realloc_fnf(ERTS_ALC_T_ETHR_STD, ptr, (Uint) size);
+}
+static void ethr_std_free(void *ptr)
+{
+ erts_free(ERTS_ALC_T_ETHR_STD, ptr);
+}
+static void *ethr_sl_alloc(size_t size)
+{
+ return erts_alloc_fnf(ERTS_ALC_T_ETHR_SL, (Uint) size);
+}
+static void *ethr_sl_realloc(void *ptr, size_t size)
+{
+ return erts_realloc_fnf(ERTS_ALC_T_ETHR_SL, ptr, (Uint) size);
+}
+static void ethr_sl_free(void *ptr)
+{
+ erts_free(ERTS_ALC_T_ETHR_SL, ptr);
+}
+static void *ethr_ll_alloc(size_t size)
+{
+ return erts_alloc_fnf(ERTS_ALC_T_ETHR_LL, (Uint) size);
+}
+static void *ethr_ll_realloc(void *ptr, size_t size)
+{
+ return erts_realloc_fnf(ERTS_ALC_T_ETHR_LL, ptr, (Uint) size);
+}
+static void ethr_ll_free(void *ptr)
+{
+ erts_free(ERTS_ALC_T_ETHR_LL, ptr);
+}
+
+#endif
+
static void
early_init(int *argc, char **argv) /*
* Only put things here which are
@@ -615,9 +662,15 @@ early_init(int *argc, char **argv) /*
? ncpuavail
: (ncpuonln > 0 ? ncpuonln : no_schedulers));
+#ifdef ERTS_SMP
+ erts_max_main_threads = no_schedulers_online;
+#endif
+
schdlrs = no_schedulers;
schdlrs_onln = no_schedulers_online;
+ max_reader_groups = ERTS_MAX_READER_GROUPS;
+
if (argc && argv) {
int i = 1;
while (i < *argc) {
@@ -627,6 +680,24 @@ early_init(int *argc, char **argv) /*
}
if (argv[i][0] == '-') {
switch (argv[i][1]) {
+ case 'r': {
+ char *sub_param = argv[i]+2;
+ if (has_prefix("g", sub_param)) {
+ char *arg = get_arg(sub_param+1, argv[i+1], &i);
+ if (sscanf(arg, "%d", &max_reader_groups) != 1) {
+ erts_fprintf(stderr,
+ "bad reader groups limit: %s\n", arg);
+ erts_usage();
+ }
+ if (max_reader_groups < 0) {
+ erts_fprintf(stderr,
+ "bad reader groups limit: %d\n",
+ max_reader_groups);
+ erts_usage();
+ }
+ }
+ break;
+ }
case 'S' : {
int tot, onln;
char *arg = get_arg(argv[i]+2, argv[i+1], &i);
@@ -699,6 +770,36 @@ early_init(int *argc, char **argv) /*
erts_early_init_scheduling(); /* Require allocators */
erts_init_utils(); /* Require allocators */
+#ifdef USE_THREADS
+ {
+ erts_thr_late_init_data_t elid = ERTS_THR_LATE_INIT_DATA_DEF_INITER;
+ elid.mem.std.alloc = ethr_std_alloc;
+ elid.mem.std.realloc = ethr_std_realloc;
+ elid.mem.std.free = ethr_std_free;
+ elid.mem.sl.alloc = ethr_sl_alloc;
+ elid.mem.sl.realloc = ethr_sl_realloc;
+ elid.mem.sl.free = ethr_sl_free;
+ elid.mem.ll.alloc = ethr_ll_alloc;
+ elid.mem.ll.realloc = ethr_ll_realloc;
+ elid.mem.ll.free = ethr_ll_free;
+
+#ifdef ERTS_SMP
+ elid.main_threads = erts_max_main_threads;
+#else
+ elid.main_threads = 1;
+#endif
+ elid.reader_groups = (elid.main_threads > 1
+ ? elid.main_threads
+ : 0);
+ if (max_reader_groups <= 1)
+ elid.reader_groups = 0;
+ if (elid.reader_groups > max_reader_groups)
+ elid.reader_groups = max_reader_groups;
+ erts_reader_groups = elid.reader_groups;
+
+ erts_thr_late_init(&elid);
+ }
+#endif
#ifdef ERTS_ENABLE_LOCK_CHECK
erts_lc_late_init();
#endif
@@ -1193,9 +1294,17 @@ erl_start(int argc, char **argv)
erts_async_thread_suggested_stack_size));
break;
- case 'r':
- erts_ets_realloc_always_moves = 1;
+ case 'r': {
+ char *sub_param = argv[i]+2;
+ if (has_prefix("g", sub_param)) {
+ get_arg(sub_param+1, argv[i+1], &i);
+ /* already handled */
+ }
+ else {
+ erts_ets_realloc_always_moves = 1;
+ }
break;
+ }
case 'n': /* XXX obsolete */
break;
case 'c':
@@ -1280,6 +1389,7 @@ erl_start(int argc, char **argv)
erts_sys_main_thread(); /* May or may not return! */
#else
+ erts_thr_set_main_status(1, 1);
set_main_stack_size();
process_main();
#endif
@@ -1353,7 +1463,7 @@ system_cleanup(int exit_code)
erts_cleanup_incgc();
#endif
-#if defined(USE_THREADS) && !defined(ERTS_SMP)
+#if defined(USE_THREADS)
exit_async();
#endif
#if HAVE_ERTS_MSEG
diff --git a/erts/emulator/beam/erl_lock_check.c b/erts/emulator/beam/erl_lock_check.c
index cee470ae37..99cc80e259 100644
--- a/erts/emulator/beam/erl_lock_check.c
+++ b/erts/emulator/beam/erl_lock_check.c
@@ -96,10 +96,10 @@ static erts_lc_lock_order_t erts_lock_order[] = {
{ "proc_status", "pid" },
{ "proc_tab", NULL },
{ "ports_snapshot", NULL },
- { "db_tab", "address" },
- { "db_tab_fix", "address" },
{ "meta_name_tab", "address" },
{ "meta_main_tab_slot", "address" },
+ { "db_tab", "address" },
+ { "db_tab_fix", "address" },
{ "meta_main_tab_main", NULL },
{ "db_hash_slot", "address" },
{ "node_table", NULL },
@@ -119,9 +119,9 @@ static erts_lc_lock_order_t erts_lock_order[] = {
{ "child_status", NULL },
#endif
#ifdef __WIN32__
- { "sys_driver_data_lock", NULL },
+ { "sys_driver_data_lock", NULL },
#endif
- { "drv_ev_state_grow", NULL, },
+ { "drv_ev_state_grow", NULL, },
{ "drv_ev_state", "address" },
{ "safe_hash", "address" },
{ "pollset_rm_list", NULL },
@@ -153,6 +153,7 @@ static erts_lc_lock_order_t erts_lock_order[] = {
{ "instr", NULL },
{ "fix_alloc", "index" },
{ "alcu_allocator", "index" },
+ { "alcu_delayed_free", "index" },
{ "mseg", NULL },
#ifdef HALFWORD_HEAP
{ "pmmap", NULL },
@@ -177,17 +178,19 @@ static erts_lc_lock_order_t erts_lock_order[] = {
{ "pix_lock", "address" },
{ "run_queues_lists", NULL },
{ "sched_stat", NULL },
+ { "run_queue_sleep_list", "address" },
#endif
{ "alloc_thr_ix_lock", NULL },
#ifdef ERTS_SMP
- { "proc_lck_wtr_alloc", NULL },
+ { "proc_lck_qs_alloc", NULL },
#endif
#ifdef __WIN32__
#ifdef DEBUG
{ "save_ops_lock", NULL },
#endif
#endif
- { "mtrace_buf", NULL }
+ { "mtrace_buf", NULL },
+ { "erts_alloc_hard_debug", NULL }
};
#define ERTS_LOCK_ORDER_SIZE \
@@ -199,6 +202,8 @@ static erts_lc_lock_order_t erts_lock_order[] = {
& ERTS_LC_FLG_LT_ALL \
& ~(ERTS_LC_FLG_LT_SPINLOCK|ERTS_LC_FLG_LT_RWSPINLOCK)))
+static __decl_noreturn void __noreturn lc_abort(void);
+
static char *
lock_type(Uint16 flags)
{
@@ -222,7 +227,7 @@ rw_op_str(Uint16 flags)
return " (r)";
case ERTS_LC_FLG_LO_WRITE:
erts_fprintf(stderr, "\nInternal error\n");
- abort();
+ lc_abort();
default:
break;
}
@@ -271,28 +276,18 @@ static erts_lc_free_block_t *free_blocks;
#define ERTS_LC_FB_CHUNK_SIZE 10
#endif
-#ifdef ETHR_HAVE_NATIVE_LOCKS
static ethr_spinlock_t free_blocks_lock;
-#define ERTS_LC_LOCK ethr_spin_lock
-#define ERTS_LC_UNLOCK ethr_spin_unlock
-#else
-static ethr_mutex free_blocks_lock;
-#define ERTS_LC_LOCK ethr_mutex_lock
-#define ERTS_LC_UNLOCK ethr_mutex_unlock
-#endif
static ERTS_INLINE void
lc_lock(void)
{
- if (ERTS_LC_LOCK(&free_blocks_lock) != 0)
- abort();
+ ethr_spin_lock(&free_blocks_lock);
}
static ERTS_INLINE void
lc_unlock(void)
{
- if (ERTS_LC_UNLOCK(&free_blocks_lock) != 0)
- abort();
+ ethr_spin_unlock(&free_blocks_lock);
}
static ERTS_INLINE void lc_free(void *p)
@@ -313,7 +308,7 @@ static void *lc_core_alloc(void)
{
lc_unlock();
erts_fprintf(stderr, "Lock checker out of memory!\n");
- abort();
+ lc_abort();
}
#else
@@ -327,7 +322,7 @@ static void *lc_core_alloc(void)
* ERTS_LC_FB_CHUNK_SIZE);
if (!fbs) {
erts_fprintf(stderr, "Lock checker failed to allocate memory!\n");
- abort();
+ lc_abort();
}
for (i = 1; i < ERTS_LC_FB_CHUNK_SIZE - 1; i++) {
#ifdef DEBUG
@@ -367,11 +362,11 @@ create_locked_locks(char *thread_name)
{
erts_lc_locked_locks_t *l_lcks = malloc(sizeof(erts_lc_locked_locks_t));
if (!l_lcks)
- abort();
+ lc_abort();
l_lcks->thread_name = strdup(thread_name ? thread_name : "unknown");
if (!l_lcks->thread_name)
- abort();
+ lc_abort();
l_lcks->tid = erts_thr_self();
l_lcks->required.first = NULL;
@@ -513,7 +508,7 @@ uninitialized_lock(void)
{
erts_fprintf(stderr, "Performing operations on uninitialized lock!\n");
print_curr_locks(get_my_locked_locks());
- abort();
+ lc_abort();
}
static void
@@ -523,7 +518,7 @@ lock_twice(char *prefix, erts_lc_locked_locks_t *l_lcks, erts_lc_lock_t *lck,
erts_fprintf(stderr, "%s%s", prefix, rw_op_str(op_flags));
print_lock(" ", lck, " lock which is already locked by thread!\n");
print_curr_locks(l_lcks);
- abort();
+ lc_abort();
}
static void
@@ -533,7 +528,7 @@ unlock_op_mismatch(erts_lc_locked_locks_t *l_lcks, erts_lc_lock_t *lck,
erts_fprintf(stderr, "Unlocking%s ", rw_op_str(op_flags));
print_lock("", lck, " lock which mismatch previous lock operation!\n");
print_curr_locks(l_lcks);
- abort();
+ lc_abort();
}
static void
@@ -541,7 +536,7 @@ unlock_of_not_locked(erts_lc_locked_locks_t *l_lcks, erts_lc_lock_t *lck)
{
print_lock("Unlocking ", lck, " lock which is not locked by thread!\n");
print_curr_locks(l_lcks);
- abort();
+ lc_abort();
}
static void
@@ -550,7 +545,7 @@ lock_order_violation(erts_lc_locked_locks_t *l_lcks, erts_lc_lock_t *lck)
print_lock("Lock order violation occured when locking ", lck, "!\n");
print_curr_locks(l_lcks);
print_lock_order();
- abort();
+ lc_abort();
}
static void
@@ -561,7 +556,7 @@ type_order_violation(char *op, erts_lc_locked_locks_t *l_lcks,
print_lock(op, lck, "!\n");
ASSERT(l_lcks);
print_curr_locks(l_lcks);
- abort();
+ lc_abort();
}
static void
@@ -613,7 +608,7 @@ lock_mismatch(erts_lc_locked_locks_t *l_lcks, int exact,
}
}
print_curr_locks(l_lcks);
- abort();
+ lc_abort();
}
static void
@@ -621,7 +616,7 @@ unlock_of_required_lock(erts_lc_locked_locks_t *l_lcks, erts_lc_lock_t *lck)
{
print_lock("Unlocking required ", lck, " lock!\n");
print_curr_locks(l_lcks);
- abort();
+ lc_abort();
}
static void
@@ -629,7 +624,7 @@ unrequire_of_not_required_lock(erts_lc_locked_locks_t *l_lcks, erts_lc_lock_t *l
{
print_lock("Unrequire on ", lck, " lock not required!\n");
print_curr_locks(l_lcks);
- abort();
+ lc_abort();
}
static void
@@ -637,7 +632,7 @@ require_twice(erts_lc_locked_locks_t *l_lcks, erts_lc_lock_t *lck)
{
print_lock("Require on ", lck, " lock already required!\n");
print_curr_locks(l_lcks);
- abort();
+ lc_abort();
}
static void
@@ -645,7 +640,7 @@ required_not_locked(erts_lc_locked_locks_t *l_lcks, erts_lc_lock_t *lck)
{
print_lock("Required ", lck, " lock not locked!\n");
print_curr_locks(l_lcks);
- abort();
+ lc_abort();
}
@@ -658,13 +653,23 @@ thread_exit_handler(void)
erts_fprintf(stderr,
"Thread exiting while having locked locks!\n");
print_curr_locks(l_lcks);
- abort();
+ lc_abort();
}
destroy_locked_locks(l_lcks);
/* erts_tsd_set(locks_key, NULL); */
}
}
+static __decl_noreturn void
+lc_abort(void)
+{
+#ifdef __WIN32__
+ DebugBreak();
+#else
+ abort();
+#endif
+}
+
void
erts_lc_set_thread_name(char *thread_name)
{
@@ -676,7 +681,7 @@ erts_lc_set_thread_name(char *thread_name)
free((void *) l_lcks->thread_name);
l_lcks->thread_name = strdup(thread_name ? thread_name : "unknown");
if (!l_lcks->thread_name)
- abort();
+ lc_abort();
}
}
@@ -686,7 +691,7 @@ erts_lc_assert_failed(char *file, int line, char *assertion)
erts_fprintf(stderr, "%s:%d: Lock check assertion \"%s\" failed!\n",
file, line, assertion);
print_curr_locks(get_my_locked_locks());
- abort();
+ lc_abort();
return 0;
}
@@ -699,7 +704,7 @@ void erts_lc_fail(char *fmt, ...)
va_end(args);
erts_fprintf(stderr, "\n");
print_curr_locks(get_my_locked_locks());
- abort();
+ lc_abort();
}
@@ -719,7 +724,7 @@ erts_lc_get_lock_order_id(char *name)
"(update erl_lock_check.c)\n",
name);
}
- abort();
+ lc_abort();
return (Sint16) -1;
}
@@ -895,6 +900,25 @@ erts_lc_check_exact(erts_lc_lock_t *have, int have_len)
}
}
+void
+erts_lc_check_no_locked_of_type(Uint16 flags)
+{
+ erts_lc_locked_locks_t *l_lcks = get_my_locked_locks();
+ if (l_lcks) {
+ erts_lc_locked_lock_t *l_lck = l_lcks->locked.first;
+ for (l_lck = l_lcks->locked.first; l_lck; l_lck = l_lck->next) {
+ if (l_lck->flags & flags) {
+ erts_fprintf(stderr,
+ "Locked lock of type %s found which isn't "
+ "allowed here!\n",
+ lock_type(l_lck->flags));
+ print_curr_locks(l_lcks);
+ lc_abort();
+ }
+ }
+ }
+}
+
int
erts_lc_trylock_force_busy_flg(erts_lc_lock_t *lck, Uint16 op_flags)
{
@@ -1283,13 +1307,8 @@ erts_lc_init(void)
free_blocks = NULL;
#endif /* #ifdef ERTS_LC_STATIC_ALLOC */
-#ifdef ETHR_HAVE_NATIVE_LOCKS
if (ethr_spinlock_init(&free_blocks_lock) != 0)
- abort();
-#else
- if (ethr_mutex_init(&free_blocks_lock) != 0)
- abort();
-#endif
+ lc_abort();
erts_tsd_key_create(&locks_key);
}
diff --git a/erts/emulator/beam/erl_lock_check.h b/erts/emulator/beam/erl_lock_check.h
index d5e2ede9ac..0372e6850d 100644
--- a/erts/emulator/beam/erl_lock_check.h
+++ b/erts/emulator/beam/erl_lock_check.h
@@ -77,6 +77,7 @@ void erts_lc_check(erts_lc_lock_t *have, int have_len,
void erts_lc_check_exact(erts_lc_lock_t *have, int have_len);
void erts_lc_have_locks(int *resv, erts_lc_lock_t *lcks, int len);
void erts_lc_have_lock_ids(int *resv, int *ids, int len);
+void erts_lc_check_no_locked_of_type(Uint16 flags);
int erts_lc_trylock_force_busy_flg(erts_lc_lock_t *lck, Uint16 op_flags);
void erts_lc_trylock_flg(int locked, erts_lc_lock_t *lck, Uint16 op_flags);
void erts_lc_lock_flg(erts_lc_lock_t *lck, Uint16 op_flags);
diff --git a/erts/emulator/beam/erl_lock_count.c b/erts/emulator/beam/erl_lock_count.c
index 26028aeefc..239773f366 100644
--- a/erts/emulator/beam/erl_lock_count.c
+++ b/erts/emulator/beam/erl_lock_count.c
@@ -166,8 +166,8 @@ static void print_lock_x(erts_lcnt_lock_t *lock, Uint16 flag, char *action, char
int i;
type = lcnt_lock_type(lock->flag);
- ethr_atomic_read(&lock->r_state, &r_state);
- ethr_atomic_read(&lock->w_state, &w_state);
+ r_state = ethr_atomic_read(&lock->r_state);
+ w_state = ethr_atomic_read(&lock->w_state);
if (lock->flag & flag) {
@@ -394,10 +394,10 @@ void erts_lcnt_lock_opt(erts_lcnt_lock_t *lock, Uint16 option) {
ASSERT(eltd);
- ethr_atomic_read(&lock->w_state, &w_state);
+ w_state = ethr_atomic_read(&lock->w_state);
if (option & ERTS_LCNT_LO_WRITE) {
- ethr_atomic_read(&lock->r_state, &r_state);
+ r_state = ethr_atomic_read(&lock->r_state);
ethr_atomic_inc( &lock->w_state);
}
if (option & ERTS_LCNT_LO_READ) {
@@ -423,7 +423,7 @@ void erts_lcnt_lock(erts_lcnt_lock_t *lock) {
if (erts_lcnt_rt_options & ERTS_LCNT_OPT_SUSPEND) return;
- ethr_atomic_read(&lock->w_state, &w_state);
+ w_state = ethr_atomic_read(&lock->w_state);
ethr_atomic_inc( &lock->w_state);
eltd = lcnt_get_thread_data();
@@ -478,7 +478,7 @@ void erts_lcnt_lock_post_x(erts_lcnt_lock_t *lock, char *file, unsigned int line
#ifdef DEBUG
if (!(lock->flag & (ERTS_LCNT_LT_RWMUTEX | ERTS_LCNT_LT_RWSPINLOCK))) {
- ethr_atomic_read(&lock->flowstate, &flowstate);
+ flowstate = ethr_atomic_read(&lock->flowstate);
ASSERT(flowstate == 0);
ethr_atomic_inc( &lock->flowstate);
}
@@ -522,12 +522,12 @@ void erts_lcnt_unlock(erts_lcnt_lock_t *lock) {
if (erts_lcnt_rt_options & ERTS_LCNT_OPT_SUSPEND) return;
#ifdef DEBUG
/* flowstate */
- ethr_atomic_read(&lock->flowstate, &flowstate);
+ flowstate = ethr_atomic_read(&lock->flowstate);
ASSERT(flowstate == 1);
ethr_atomic_dec( &lock->flowstate);
/* write state */
- ethr_atomic_read(&lock->w_state, &w_state);
+ w_state = ethr_atomic_read(&lock->w_state);
ASSERT(w_state > 0)
#endif
ethr_atomic_dec(&lock->w_state);
@@ -558,7 +558,7 @@ void erts_lcnt_trylock(erts_lcnt_lock_t *lock, int res) {
if (res != EBUSY) {
#ifdef DEBUG
- ethr_atomic_read(&lock->flowstate, &flowstate);
+ flowstate = ethr_atomic_read(&lock->flowstate);
ASSERT(flowstate == 0);
ethr_atomic_inc( &lock->flowstate);
#endif
diff --git a/erts/emulator/beam/erl_mtrace.c b/erts/emulator/beam/erl_mtrace.c
index 9cf55ee319..b1478758a1 100644
--- a/erts/emulator/beam/erl_mtrace.c
+++ b/erts/emulator/beam/erl_mtrace.c
@@ -585,9 +585,7 @@ void erts_mtrace_init(char *receiver, char *nodename)
Uint16 port;
erts_mtx_init(&mtrace_buf_mutex, "mtrace_buf");
- erts_mtx_set_forksafe(&mtrace_buf_mutex);
erts_mtx_init(&mtrace_op_mutex, "mtrace_op");
- erts_mtx_set_forksafe(&mtrace_op_mutex);
socket_desc = erts_sock_open();
if (socket_desc == ERTS_SOCK_INVALID_SOCKET) {
diff --git a/erts/emulator/beam/erl_node_tables.c b/erts/emulator/beam/erl_node_tables.c
index 7482da251e..e430b4ad77 100644
--- a/erts/emulator/beam/erl_node_tables.c
+++ b/erts/emulator/beam/erl_node_tables.c
@@ -80,6 +80,8 @@ dist_table_alloc(void *dep_tmpl)
Eterm chnl_nr;
Eterm sysname;
DistEntry *dep;
+ erts_smp_rwmtx_opt_t rwmtx_opt = ERTS_SMP_THR_OPTS_DEFAULT_INITER;
+ rwmtx_opt.type = ERTS_SMP_RWMTX_TYPE_FREQUENT_READ;
if(((DistEntry *) dep_tmpl) == erts_this_dist_entry)
return dep_tmpl;
@@ -92,7 +94,7 @@ dist_table_alloc(void *dep_tmpl)
dep->prev = NULL;
erts_refc_init(&dep->refc, -1);
- erts_smp_rwmtx_init_x(&dep->rwmtx, "dist_entry", chnl_nr);
+ erts_smp_rwmtx_init_opt_x(&dep->rwmtx, &rwmtx_opt, "dist_entry", chnl_nr);
dep->sysname = sysname;
dep->cid = NIL;
dep->connection_id = 0;
@@ -580,6 +582,18 @@ ErlNode *erts_find_or_insert_node(Eterm sysname, Uint creation)
ErlNode ne;
ne.sysname = sysname;
ne.creation = creation;
+
+ erts_smp_rwmtx_rlock(&erts_node_table_rwmtx);
+ res = hash_get(&erts_node_table, (void *) &ne);
+ if (res && res != erts_this_node) {
+ long refc = erts_refc_inctest(&res->refc, 0);
+ if (refc < 2) /* New or pending delete */
+ erts_refc_inc(&res->refc, 1);
+ }
+ erts_smp_rwmtx_runlock(&erts_node_table_rwmtx);
+ if (res)
+ return res;
+
erts_smp_rwmtx_rwlock(&erts_node_table_rwmtx);
res = hash_put(&erts_node_table, (void *) &ne);
ASSERT(res);
@@ -696,8 +710,12 @@ erts_set_this_node(Eterm sysname, Uint creation)
void erts_init_node_tables(void)
{
+ erts_smp_rwmtx_opt_t rwmtx_opt = ERTS_SMP_THR_OPTS_DEFAULT_INITER;
HashFunctions f;
+ rwmtx_opt.type = ERTS_SMP_RWMTX_TYPE_FREQUENT_READ;
+ rwmtx_opt.lived = ERTS_SMP_RWMTX_LONG_LIVED;
+
f.hash = (H_FUN) dist_table_hash;
f.cmp = (HCMP_FUN) dist_table_cmp;
f.alloc = (HALLOC_FUN) dist_table_alloc;
@@ -719,9 +737,10 @@ void erts_init_node_tables(void)
erts_this_dist_entry->prev = NULL;
erts_refc_init(&erts_this_dist_entry->refc, 1); /* erts_this_node */
- erts_smp_rwmtx_init_x(&erts_this_dist_entry->rwmtx,
- "dist_entry",
- make_small(ERST_INTERNAL_CHANNEL_NO));
+ erts_smp_rwmtx_init_opt_x(&erts_this_dist_entry->rwmtx,
+ &rwmtx_opt,
+ "dist_entry",
+ make_small(ERST_INTERNAL_CHANNEL_NO));
erts_this_dist_entry->sysname = am_Noname;
erts_this_dist_entry->cid = NIL;
erts_this_dist_entry->connection_id = 0;
@@ -772,8 +791,8 @@ void erts_init_node_tables(void)
(void) hash_put(&erts_node_table, (void *) erts_this_node);
- erts_smp_rwmtx_init(&erts_node_table_rwmtx, "node_table");
- erts_smp_rwmtx_init(&erts_dist_table_rwmtx, "dist_table");
+ erts_smp_rwmtx_init_opt(&erts_node_table_rwmtx, &rwmtx_opt, "node_table");
+ erts_smp_rwmtx_init_opt(&erts_dist_table_rwmtx, &rwmtx_opt, "dist_table");
references_atoms_need_init = 1;
}
diff --git a/erts/emulator/beam/erl_port_task.c b/erts/emulator/beam/erl_port_task.c
index 967a14f0d1..c10724b951 100644
--- a/erts/emulator/beam/erl_port_task.c
+++ b/erts/emulator/beam/erl_port_task.c
@@ -625,6 +625,7 @@ erts_port_task_schedule(Eterm id,
if (!enq_port) {
ERTS_PT_CHK_PRES_PORTQ(runq, pp);
+ erts_smp_runq_unlock(runq);
}
else {
enqueue_port(runq, pp);
@@ -634,9 +635,10 @@ erts_port_task_schedule(Eterm id,
profile_runnable_port(pp, am_active);
}
+ erts_smp_runq_unlock(runq);
+
erts_smp_notify_inc_runq(runq);
}
- erts_smp_runq_unlock(runq);
return 0;
}
@@ -944,8 +946,8 @@ erts_port_task_execute(ErtsRunQueue *runq, Port **curr_port_pp)
enqueue_port(xrunq, pp);
ASSERT(pp->sched.exe_taskq);
pp->sched.exe_taskq = NULL;
- erts_smp_notify_inc_runq(xrunq);
erts_smp_runq_unlock(xrunq);
+ erts_smp_notify_inc_runq(xrunq);
}
#endif
port_was_enqueued = 1;
@@ -1112,7 +1114,6 @@ erts_port_migrate(Port *prt, int *prt_locked,
dequeue_port(from_rq, prt);
erts_smp_atomic_set(&prt->run_queue, (long) to_rq);
enqueue_port(to_rq, prt);
- erts_smp_notify_inc_runq(to_rq);
return ERTS_MIGRATE_SUCCESS;
}
diff --git a/erts/emulator/beam/erl_process.c b/erts/emulator/beam/erl_process.c
index 8c88353593..a644520442 100644
--- a/erts/emulator/beam/erl_process.c
+++ b/erts/emulator/beam/erl_process.c
@@ -46,7 +46,13 @@
#define ERTS_PROC_MIN_CONTEXT_SWITCH_REDS_COST (CONTEXT_REDS/10)
-#define ERTS_SCHED_SLEEP_SPINCOUNT 10000
+#define ERTS_SCHED_SPIN_UNTIL_YIELD 100
+
+#define ERTS_SCHED_SYS_SLEEP_SPINCOUNT 10
+#define ERTS_SCHED_TSE_SLEEP_SPINCOUNT_FACT 1000
+#define ERTS_SCHED_TSE_SLEEP_SPINCOUNT \
+ (ERTS_SCHED_SYS_SLEEP_SPINCOUNT*ERTS_SCHED_TSE_SLEEP_SPINCOUNT_FACT)
+#define ERTS_SCHED_SUSPEND_SLEEP_SPINCOUNT 0
#define ERTS_WAKEUP_OTHER_LIMIT (100*CONTEXT_REDS/2)
#define ERTS_WAKEUP_OTHER_DEC 10
@@ -106,6 +112,10 @@ Uint erts_no_schedulers;
Uint erts_max_processes = ERTS_DEFAULT_MAX_PROCESSES;
Uint erts_process_tab_index_mask;
+#ifdef ERTS_SMP
+Uint erts_max_main_threads;
+#endif
+
int erts_sched_thread_suggested_stack_size = -1;
#ifdef ERTS_ENABLE_LOCK_CHECK
@@ -116,16 +126,34 @@ ErtsLcPSDLocks erts_psd_required_locks[ERTS_PSD_SIZE];
int erts_disable_proc_not_running_opt;
-#define ERTS_SCHED_CHANGING_ONLINE 1
-#define ERTS_SCHED_CHANGING_MULTI_SCHED 2
+#define ERTS_SCHDLR_SSPND_CHNG_WAITER (((long) 1) << 0)
+#define ERTS_SCHDLR_SSPND_CHNG_MSB (((long) 1) << 1)
+#define ERTS_SCHDLR_SSPND_CHNG_ONLN (((long) 1) << 2)
+
+#ifndef DEBUG
+
+#define ERTS_SCHDLR_SSPND_CHNG_SET(VAL, OLD_VAL) \
+ erts_smp_atomic_set(&schdlr_sspnd.changing, (VAL))
+
+#else
+
+#define ERTS_SCHDLR_SSPND_CHNG_SET(VAL, OLD_VAL) \
+do { \
+ long old_val__ = erts_smp_atomic_xchg(&schdlr_sspnd.changing, \
+ (VAL)); \
+ ASSERT(old_val__ == (OLD_VAL)); \
+} while (0)
+
+#endif
+
static struct {
erts_smp_mtx_t mtx;
erts_smp_cnd_t cnd;
- int changing;
int online;
int curr_online;
int wait_curr_online;
+ erts_smp_atomic_t changing;
erts_smp_atomic_t active;
struct {
erts_smp_atomic_t ongoing;
@@ -231,6 +259,17 @@ typedef union {
ErtsAlignedSchedulerData *erts_aligned_scheduler_data;
+#ifdef ERTS_SMP
+
+typedef union {
+ ErtsSchedulerSleepInfo ssi;
+ char align[ERTS_ALC_CACHE_LINE_ALIGN_SIZE(sizeof(ErtsSchedulerSleepInfo))];
+} ErtsAlignedSchedulerSleepInfo;
+
+static ErtsAlignedSchedulerSleepInfo *aligned_sched_sleep_info;
+
+#endif
+
#ifndef BM_COUNTERS
static int processes_busy;
#endif
@@ -288,8 +327,15 @@ ERTS_SCHED_PREF_QUICK_ALLOC_IMPL(proclist,
200,
ERTS_ALC_T_PROC_LIST)
-#define ERTS_RUNQ_IX(IX) (&erts_aligned_run_queues[(IX)].runq)
-#define ERTS_SCHEDULER_IX(IX) (&erts_aligned_scheduler_data[(IX)].esd)
+#define ERTS_RUNQ_IX(IX) \
+ (ASSERT_EXPR(0 <= (IX) && (IX) < erts_no_run_queues), \
+ &erts_aligned_run_queues[(IX)].runq)
+#define ERTS_SCHEDULER_IX(IX) \
+ (ASSERT_EXPR(0 <= (IX) && (IX) < erts_no_schedulers), \
+ &erts_aligned_scheduler_data[(IX)].esd)
+#define ERTS_SCHED_SLEEP_INFO_IX(IX) \
+ (ASSERT_EXPR(0 <= (IX) && (IX) < erts_no_schedulers), \
+ &aligned_sched_sleep_info[(IX)].ssi)
#define ERTS_FOREACH_RUNQ(RQVAR, DO) \
do { \
@@ -353,6 +399,11 @@ static void signal_schedulers_bind_change(erts_cpu_topology_t *cpudata, int size
#endif
+static int reader_group_lookup(int logical);
+static void create_tmp_cpu_topology_copy(erts_cpu_topology_t **cpudata,
+ int *cpudata_size);
+static void destroy_tmp_cpu_topology_copy(erts_cpu_topology_t *cpudata);
+
static void early_cpu_bind_init(void);
static void late_cpu_bind_init(void);
@@ -582,6 +633,76 @@ erts_psd_set_init(Process *p, ErtsProcLocks plocks, int ix, void *data)
#ifdef ERTS_SMP
+void
+erts_sched_finish_poke(ErtsSchedulerSleepInfo *ssi, long flags)
+{
+ switch (flags & ERTS_SSI_FLGS_SLEEP_TYPE) {
+ case ERTS_SSI_FLG_POLL_SLEEPING:
+ erts_sys_schedule_interrupt(1);
+ break;
+ case ERTS_SSI_FLG_TSE_SLEEPING:
+ erts_tse_set(ssi->event);
+ break;
+ case 0:
+ break;
+ default:
+ erl_exit(ERTS_ABORT_EXIT, "%s:%d: Internal error\n",
+ __FILE__, __LINE__);
+ break;
+ }
+}
+
+#ifdef ERTS_SMP_SCHEDULERS_NEED_TO_CHECK_CHILDREN
+void
+erts_smp_notify_check_children_needed(void)
+{
+ int i;
+
+ for (i = 0; i < erts_no_schedulers; i++) {
+ long aux_work;
+ ErtsSchedulerSleepInfo *ssi;
+ ssi = ERTS_SCHED_SLEEP_INFO_IX(i);
+ aux_work = erts_smp_atomic_bor(&ssi->aux_work,
+ ERTS_SSI_AUX_WORK_CHECK_CHILDREN);
+ if (!(aux_work & ERTS_SSI_AUX_WORK_CHECK_CHILDREN))
+ erts_sched_poke(ssi);
+ }
+}
+#endif
+
+#ifdef ERTS_SCHED_NEED_BLOCKABLE_AUX_WORK
+static ERTS_INLINE long
+blockable_aux_work(ErtsSchedulerData *esdp,
+ ErtsSchedulerSleepInfo *ssi,
+ long aux_work)
+{
+ if (aux_work & ERTS_SSI_BLOCKABLE_AUX_WORK_MASK) {
+#ifdef ERTS_SMP_SCHEDULERS_NEED_TO_CHECK_CHILDREN
+ if (aux_work & ERTS_SSI_AUX_WORK_CHECK_CHILDREN) {
+ aux_work = erts_smp_atomic_band(&ssi->aux_work,
+ ~ERTS_SSI_AUX_WORK_CHECK_CHILDREN);
+ aux_work &= ~ERTS_SSI_AUX_WORK_CHECK_CHILDREN;
+ erts_check_children();
+ }
+#endif
+ }
+ return aux_work;
+}
+
+#endif
+
+#ifdef ERTS_SCHED_NEED_NONBLOCKABLE_AUX_WORK
+static ERTS_INLINE long
+nonblockable_aux_work(ErtsSchedulerData *esdp,
+ ErtsSchedulerSleepInfo *ssi,
+ long aux_work)
+{
+ if (aux_work & ERTS_SSI_NONBLOCKABLE_AUX_WORK_MASK) {
+
+ }
+}
+#endif
+
static void
prepare_for_block(void *vrq)
{
@@ -634,7 +755,31 @@ erts_active_schedulers(void)
return as;
}
+static ERTS_INLINE int
+prepare_for_sys_schedule(void)
+{
#ifdef ERTS_SMP
+ while (!erts_port_task_have_outstanding_io_tasks()
+ && !erts_smp_atomic_xchg(&doing_sys_schedule, 1)) {
+ if (!erts_port_task_have_outstanding_io_tasks())
+ return 1;
+ erts_smp_atomic_set(&doing_sys_schedule, 0);
+ }
+ return 0;
+#else
+ return !erts_port_task_have_outstanding_io_tasks();
+#endif
+}
+
+#ifdef ERTS_SMP
+
+static ERTS_INLINE void
+sched_change_waiting_sys_to_waiting(Uint no, ErtsRunQueue *rq)
+{
+ ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(rq));
+ ASSERT(rq->waiting < 0);
+ rq->waiting *= -1;
+}
static ERTS_INLINE void
sched_waiting(Uint no, ErtsRunQueue *rq)
@@ -695,101 +840,304 @@ non_empty_runq(ErtsRunQueue *rq)
}
}
-static ERTS_INLINE int
-sched_spin_wake(ErtsRunQueue *rq)
+static long
+sched_prep_spin_wait(ErtsSchedulerSleepInfo *ssi)
{
-#if ERTS_SCHED_SLEEP_SPINCOUNT == 0
- return 0;
-#else
- long val;
- ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(rq));
+ long oflgs;
+ long nflgs = (ERTS_SSI_FLG_SLEEPING
+ | ERTS_SSI_FLG_WAITING);
+ long xflgs = 0;
- val = erts_smp_atomic_read(&rq->spin_waiter);
- ASSERT(val >= 0);
- if (val != 0) {
- erts_smp_atomic_inc(&rq->spin_wake);
- return 1;
- }
- return 0;
-#endif
+ do {
+ oflgs = erts_smp_atomic_cmpxchg(&ssi->flags, nflgs, xflgs);
+ if (oflgs == xflgs)
+ return nflgs;
+ xflgs = oflgs;
+ } while (!(oflgs & ERTS_SSI_FLG_SUSPENDED));
+ return oflgs;
}
-static ERTS_INLINE int
-sched_spin_wake_all(ErtsRunQueue *rq)
+static long
+sched_prep_cont_spin_wait(ErtsSchedulerSleepInfo *ssi)
{
-#if ERTS_SCHED_SLEEP_SPINCOUNT == 0
- return 0;
-#else
- long val;
- ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(rq));
+ long oflgs;
+ long nflgs = (ERTS_SSI_FLG_SLEEPING
+ | ERTS_SSI_FLG_WAITING);
+ long xflgs = ERTS_SSI_FLG_WAITING;
- val = erts_smp_atomic_read(&rq->spin_waiter);
- ASSERT(val >= 0);
- if (val != 0)
- erts_smp_atomic_add(&rq->spin_wake, val);
- return val;
-#endif
+ do {
+ oflgs = erts_smp_atomic_cmpxchg(&ssi->flags, nflgs, xflgs);
+ if (oflgs == xflgs)
+ return nflgs;
+ xflgs = oflgs;
+ nflgs |= oflgs & ERTS_SSI_FLG_SUSPENDED;
+ } while (oflgs & ERTS_SSI_FLG_WAITING);
+ return oflgs;
+}
+
+static long
+sched_spin_wait(ErtsSchedulerSleepInfo *ssi, int spincount)
+{
+ long until_yield = ERTS_SCHED_SPIN_UNTIL_YIELD;
+ int sc = spincount;
+ long flgs;
+
+ do {
+ flgs = erts_smp_atomic_read(&ssi->flags);
+ if ((flgs & (ERTS_SSI_FLG_SLEEPING|ERTS_SSI_FLG_WAITING))
+ != (ERTS_SSI_FLG_SLEEPING|ERTS_SSI_FLG_WAITING)) {
+ break;
+ }
+ ERTS_SPIN_BODY;
+ if (--until_yield == 0) {
+ until_yield = ERTS_SCHED_SPIN_UNTIL_YIELD;
+ erts_thr_yield();
+ }
+ } while (--sc > 0);
+ return flgs;
}
+static long
+sched_set_sleeptype(ErtsSchedulerSleepInfo *ssi, long sleep_type)
+{
+ long oflgs;
+ long nflgs = ERTS_SSI_FLG_SLEEPING|ERTS_SSI_FLG_WAITING|sleep_type;
+ long xflgs = ERTS_SSI_FLG_SLEEPING|ERTS_SSI_FLG_WAITING;
+
+ if (sleep_type == ERTS_SSI_FLG_TSE_SLEEPING)
+ erts_tse_reset(ssi->event);
+
+ while (1) {
+ oflgs = erts_smp_atomic_cmpxchg(&ssi->flags, nflgs, xflgs);
+ if (oflgs == xflgs)
+ return nflgs;
+ if ((oflgs & (ERTS_SSI_FLG_SLEEPING|ERTS_SSI_FLG_WAITING))
+ != (ERTS_SSI_FLG_SLEEPING|ERTS_SSI_FLG_WAITING)) {
+ return oflgs;
+ }
+ xflgs = oflgs;
+ nflgs |= oflgs & ERTS_SSI_FLG_SUSPENDED;
+ }
+}
+
+#define ERTS_SCHED_WAIT_WOKEN(FLGS) \
+ (((FLGS) & (ERTS_SSI_FLG_WAITING|ERTS_SSI_FLG_SUSPENDED)) \
+ != ERTS_SSI_FLG_WAITING)
+
static void
-sched_sys_wait(Uint no, ErtsRunQueue *rq)
+scheduler_wait(long *fcalls, ErtsSchedulerData *esdp, ErtsRunQueue *rq)
{
- long dt;
-#if ERTS_SCHED_SLEEP_SPINCOUNT != 0
- int val;
- int spincount = ERTS_SCHED_SLEEP_SPINCOUNT;
+ ErtsSchedulerSleepInfo *ssi = esdp->ssi;
+ int spincount;
+ long flgs;
+#if defined(ERTS_SCHED_NEED_NONBLOCKABLE_AUX_WORK) \
+ || defined(ERTS_SCHED_NEED_BLOCKABLE_AUX_WORK)
+ long aux_work;
+#endif
+
ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(rq));
+ erts_smp_spin_lock(&rq->sleepers.lock);
+ flgs = sched_prep_spin_wait(ssi);
+ if (flgs & ERTS_SSI_FLG_SUSPENDED) {
+ /* Go suspend instead... */
+ erts_smp_spin_unlock(&rq->sleepers.lock);
+ return;
+ }
+
+ ssi->prev = NULL;
+ ssi->next = rq->sleepers.list;
+ if (rq->sleepers.list)
+ rq->sleepers.list->prev = ssi;
+ rq->sleepers.list = ssi;
+ erts_smp_spin_unlock(&rq->sleepers.lock);
+
+ /*
+ * If all schedulers are waiting, one of them *should*
+ * be waiting in erl_sys_schedule()
+ */
+
+ if (!prepare_for_sys_schedule()) {
+
+ sched_waiting(esdp->no, rq);
+
+ erts_smp_runq_unlock(rq);
+
+ spincount = ERTS_SCHED_TSE_SLEEP_SPINCOUNT;
+
+ tse_wait:
+
+#ifdef ERTS_SCHED_NEED_BLOCKABLE_AUX_WORK
+ aux_work = erts_smp_atomic_read(&ssi->aux_work);
+ tse_blockable_aux_work:
+ aux_work = blockable_aux_work(esdp, ssi, aux_work);
#endif
+ erts_smp_activity_begin(ERTS_ACTIVITY_WAIT, NULL, NULL, NULL);
- sched_waiting_sys(no, rq);
+ while (1) {
-#if ERTS_SCHED_SLEEP_SPINCOUNT != 0
- erts_smp_atomic_inc(&rq->spin_waiter);
- erts_smp_runq_unlock(rq);
+#ifdef ERTS_SCHED_NEED_NONBLOCKABLE_AUX_WORK
+#ifndef ERTS_SCHED_NEED_BLOCKABLE_AUX_WORK
+ aux_work = erts_smp_atomic_read(&ssi->aux_work);
+#endif
+ nonblockable_aux_work(esdp, ssi, aux_work);
+#endif
- erl_sys_schedule(1); /* Might give us something to do */
+ flgs = sched_spin_wait(ssi, spincount);
+ if (flgs & ERTS_SSI_FLG_SLEEPING) {
+ ASSERT(flgs & ERTS_SSI_FLG_WAITING);
+ flgs = sched_set_sleeptype(ssi, ERTS_SSI_FLG_TSE_SLEEPING);
+ if (flgs & ERTS_SSI_FLG_SLEEPING) {
+ int res;
+ ASSERT(flgs & ERTS_SSI_FLG_TSE_SLEEPING);
+ ASSERT(flgs & ERTS_SSI_FLG_WAITING);
+ do {
+ res = erts_tse_wait(ssi->event);
+ } while (res == EINTR);
+ }
+ }
- dt = do_time_read_and_reset();
- if (dt) bump_timer(dt);
+ if (!(flgs & ERTS_SSI_FLG_WAITING)) {
+ ASSERT(!(flgs & ERTS_SSI_FLG_SLEEPING));
+ break;
+ }
+
+ flgs = sched_prep_cont_spin_wait(ssi);
+ spincount = ERTS_SCHED_TSE_SLEEP_SPINCOUNT;
+
+ if (!(flgs & ERTS_SSI_FLG_WAITING)) {
+ ASSERT(!(flgs & ERTS_SSI_FLG_SLEEPING));
+ break;
+ }
+
+#ifdef ERTS_SCHED_NEED_BLOCKABLE_AUX_WORK
+ aux_work = erts_smp_atomic_read(&ssi->aux_work);
+ if (aux_work & ERTS_SSI_BLOCKABLE_AUX_WORK_MASK) {
+ erts_smp_activity_end(ERTS_ACTIVITY_WAIT, NULL, NULL, NULL);
+ goto tse_blockable_aux_work;
+ }
+#endif
- while (spincount-- > 0) {
- val = erts_smp_atomic_read(&rq->spin_wake);
- ASSERT(val >= 0);
- if (val != 0) {
- erts_smp_runq_lock(rq);
- val = erts_smp_atomic_read(&rq->spin_wake);
- ASSERT(val >= 0);
- if (val != 0)
- goto woken;
- if (spincount == 0)
- goto sleep;
- erts_smp_runq_unlock(rq);
}
- }
- erts_smp_runq_lock(rq);
- val = erts_smp_atomic_read(&rq->spin_wake);
- ASSERT(val >= 0);
- if (val != 0) {
- woken:
- erts_smp_atomic_dec(&rq->spin_wake);
- ASSERT(erts_smp_atomic_read(&rq->spin_wake) >= 0);
- erts_smp_atomic_dec(&rq->spin_waiter);
- ASSERT(erts_smp_atomic_read(&rq->spin_waiter) >= 0);
+ erts_smp_activity_end(ERTS_ACTIVITY_WAIT, NULL, NULL, NULL);
+
+ if (flgs & ~ERTS_SSI_FLG_SUSPENDED)
+ erts_smp_atomic_band(&ssi->flags, ERTS_SSI_FLG_SUSPENDED);
+
+ erts_smp_runq_lock(rq);
+ sched_active(esdp->no, rq);
+
}
else {
- sleep:
- erts_smp_atomic_dec(&rq->spin_waiter);
- ASSERT(erts_smp_atomic_read(&rq->spin_waiter) >= 0);
+ long dt;
+
+ erts_smp_atomic_set(&function_calls, 0);
+ *fcalls = 0;
+
+ sched_waiting_sys(esdp->no, rq);
+
+ erts_smp_runq_unlock(rq);
+
+ spincount = ERTS_SCHED_SYS_SLEEP_SPINCOUNT;
+
+ while (spincount-- > 0) {
+
+ sys_poll_aux_work:
+
+ erl_sys_schedule(1); /* Might give us something to do */
+
+ dt = do_time_read_and_reset();
+ if (dt) bump_timer(dt);
+
+ sys_aux_work:
+
+#ifdef ERTS_SCHED_NEED_BLOCKABLE_AUX_WORK
+ aux_work = erts_smp_atomic_read(&ssi->aux_work);
+ aux_work = blockable_aux_work(esdp, ssi, aux_work);
+#endif
+#ifdef ERTS_SCHED_NEED_NONBLOCKABLE_AUX_WORK
+#ifndef ERTS_SCHED_NEED_BLOCKABLE_AUX_WORK
+ aux_work = erts_smp_atomic_read(&ssi->aux_work);
+#endif
+ nonblockable_aux_work(esdp, ssi, aux_work);
+#endif
+
+ flgs = erts_smp_atomic_read(&ssi->flags);
+ if (!(flgs & ERTS_SSI_FLG_WAITING)) {
+ ASSERT(!(flgs & ERTS_SSI_FLG_SLEEPING));
+ goto sys_woken;
+ }
+ if (!(flgs & ERTS_SSI_FLG_SLEEPING)) {
+ flgs = sched_prep_cont_spin_wait(ssi);
+ if (!(flgs & ERTS_SSI_FLG_WAITING)) {
+ ASSERT(!(flgs & ERTS_SSI_FLG_SLEEPING));
+ goto sys_woken;
+ }
+ }
+
+ /*
+ * If we got new I/O tasks we aren't allowed to
+ * call erl_sys_schedule() until it is handled.
+ */
+ if (erts_port_task_have_outstanding_io_tasks()) {
+ erts_smp_atomic_set(&doing_sys_schedule, 0);
+ /*
+ * Got to check that we still got I/O tasks; otherwise
+ * we have to continue checking for I/O...
+ */
+ if (!prepare_for_sys_schedule()) {
+ spincount *= ERTS_SCHED_TSE_SLEEP_SPINCOUNT_FACT;
+ goto tse_wait;
+ }
+ }
+ }
+
+ erts_smp_runq_lock(rq);
+
/*
* If we got new I/O tasks we aren't allowed to
* sleep in erl_sys_schedule().
*/
- if (!erts_port_task_have_outstanding_io_tasks()) {
-#endif
+ if (erts_port_task_have_outstanding_io_tasks()) {
+ erts_smp_atomic_set(&doing_sys_schedule, 0);
+ /*
+ * Got to check that we still got I/O tasks; otherwise
+ * we have to wait in erl_sys_schedule() after all...
+ */
+ if (prepare_for_sys_schedule())
+ goto do_sys_schedule;
+
+ /*
+ * Not allowed to wait in erl_sys_schedule;
+ * do tse wait instead...
+ */
+ sched_change_waiting_sys_to_waiting(esdp->no, rq);
+ erts_smp_runq_unlock(rq);
+ spincount = 0;
+ goto tse_wait;
+ }
+ else {
+ do_sys_schedule:
erts_sys_schedule_interrupt(0);
+ flgs = sched_set_sleeptype(ssi, ERTS_SSI_FLG_POLL_SLEEPING);
+ if (!(flgs & ERTS_SSI_FLG_SLEEPING)) {
+ if (!(flgs & ERTS_SSI_FLG_WAITING))
+ goto sys_locked_woken;
+ erts_smp_runq_unlock(rq);
+ flgs = sched_prep_cont_spin_wait(ssi);
+ if (!(flgs & ERTS_SSI_FLG_WAITING)) {
+ ASSERT(!(flgs & ERTS_SSI_FLG_SLEEPING));
+ goto sys_woken;
+ }
+ ASSERT(!erts_port_task_have_outstanding_io_tasks());
+ goto sys_poll_aux_work;
+ }
+
+ ASSERT(flgs & ERTS_SSI_FLG_POLL_SLEEPING);
+ ASSERT(flgs & ERTS_SSI_FLG_WAITING);
+
erts_smp_runq_unlock(rq);
erl_sys_schedule(0);
@@ -797,134 +1145,103 @@ sched_sys_wait(Uint no, ErtsRunQueue *rq)
dt = do_time_read_and_reset();
if (dt) bump_timer(dt);
- erts_smp_runq_lock(rq);
+ flgs = sched_prep_cont_spin_wait(ssi);
+ if (flgs & ERTS_SSI_FLG_WAITING)
+ goto sys_aux_work;
-#if ERTS_SCHED_SLEEP_SPINCOUNT != 0
+ sys_woken:
+ erts_smp_runq_lock(rq);
+ sys_locked_woken:
+ erts_smp_atomic_set(&doing_sys_schedule, 0);
+ if (flgs & ~ERTS_SSI_FLG_SUSPENDED)
+ erts_smp_atomic_band(&ssi->flags, ERTS_SSI_FLG_SUSPENDED);
+ sched_active_sys(esdp->no, rq);
}
}
-#endif
- sched_active_sys(no, rq);
-}
-
-static void
-sched_cnd_wait(Uint no, ErtsRunQueue *rq)
-{
-#if ERTS_SCHED_SLEEP_SPINCOUNT != 0
- int val;
- int spincount = ERTS_SCHED_SLEEP_SPINCOUNT;
ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(rq));
-#endif
-
- sched_waiting(no, rq);
- erts_smp_activity_begin(ERTS_ACTIVITY_WAIT,
- prepare_for_block,
- resume_after_block,
- (void *) rq);
-
-#if ERTS_SCHED_SLEEP_SPINCOUNT == 0
- erts_smp_cnd_wait(&rq->cnd, &rq->mtx);
-#else
- erts_smp_atomic_inc(&rq->spin_waiter);
- erts_smp_mtx_unlock(&rq->mtx);
-
- while (spincount-- > 0) {
- val = erts_smp_atomic_read(&rq->spin_wake);
- ASSERT(val >= 0);
- if (val != 0) {
- erts_smp_mtx_lock(&rq->mtx);
- val = erts_smp_atomic_read(&rq->spin_wake);
- ASSERT(val >= 0);
- if (val != 0)
- goto woken;
- if (spincount == 0)
- goto sleep;
- erts_smp_mtx_unlock(&rq->mtx);
- }
- }
-
- erts_smp_mtx_lock(&rq->mtx);
- val = erts_smp_atomic_read(&rq->spin_wake);
- ASSERT(val >= 0);
- if (val == 0) {
- sleep:
- erts_smp_atomic_dec(&rq->spin_waiter);
- ASSERT(erts_smp_atomic_read(&rq->spin_waiter) >= 0);
- erts_smp_cnd_wait(&rq->cnd, &rq->mtx);
- }
- else {
- woken:
- erts_smp_atomic_dec(&rq->spin_wake);
- ASSERT(erts_smp_atomic_read(&rq->spin_wake) >= 0);
- erts_smp_atomic_dec(&rq->spin_waiter);
- ASSERT(erts_smp_atomic_read(&rq->spin_waiter) >= 0);
- }
-#endif
-
- erts_smp_activity_end(ERTS_ACTIVITY_WAIT,
- prepare_for_block,
- resume_after_block,
- (void *) rq);
-
- sched_active(no, rq);
}
-static void
-wake_one_scheduler(void)
-{
- ASSERT(erts_common_run_queue);
- ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(erts_common_run_queue));
- if (erts_common_run_queue->waiting) {
- if (!sched_spin_wake(erts_common_run_queue)) {
- if (erts_common_run_queue->waiting == -1) /* One scheduler waiting
- and doing so in
- sys_schedule */
- erts_sys_schedule_interrupt(1);
- else
- erts_smp_cnd_signal(&erts_common_run_queue->cnd);
- }
+static ERTS_INLINE long
+ssi_flags_set_wake(ErtsSchedulerSleepInfo *ssi)
+{
+ /* reset all flags but suspended */
+ long oflgs;
+ long nflgs = 0;
+ long xflgs = ERTS_SSI_FLG_SLEEPING|ERTS_SSI_FLG_WAITING;
+ while (1) {
+ oflgs = erts_smp_atomic_cmpxchg(&ssi->flags, nflgs, xflgs);
+ if (oflgs == xflgs)
+ return oflgs;
+ nflgs = oflgs & ERTS_SSI_FLG_SUSPENDED;
+ xflgs = oflgs;
}
}
static void
-wake_scheduler(ErtsRunQueue *rq, int incq)
+wake_scheduler(ErtsRunQueue *rq, int incq, int one)
{
- ASSERT(!erts_common_run_queue);
- ASSERT(-1 <= rq->waiting && rq->waiting <= 1);
- ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(rq));
- if (rq->waiting && !rq->woken) {
- if (!sched_spin_wake(rq)) {
- if (rq->waiting < 0)
- erts_sys_schedule_interrupt(1);
- else
- erts_smp_cnd_signal(&rq->cnd);
+ int res;
+ ErtsSchedulerSleepInfo *ssi;
+ ErtsSchedulerSleepList *sl;
+
+ /*
+ * The unlocked run queue is not strictly necessary
+ * from a thread safety or deadlock prevention
+ * perspective. It will, however, cost us performance
+ * if it is locked during wakup of another scheduler,
+ * so all code *should* handle this without having
+ * the lock on the run queue.
+ */
+ ERTS_SMP_LC_ASSERT(!erts_smp_lc_runq_is_locked(rq));
+
+ sl = &rq->sleepers;
+
+ erts_smp_spin_lock(&sl->lock);
+ ssi = sl->list;
+ if (!ssi)
+ erts_smp_spin_unlock(&sl->lock);
+ else if (one) {
+ long flgs;
+ if (ssi->prev)
+ ssi->prev->next = ssi->next;
+ else {
+ ASSERT(sl->list == ssi);
+ sl->list = ssi->next;
}
- rq->woken = 1;
- if (incq)
+ if (ssi->next)
+ ssi->next->prev = ssi->prev;
+
+ res = sl->list != NULL;
+ erts_smp_spin_unlock(&sl->lock);
+
+ flgs = ssi_flags_set_wake(ssi);
+ erts_sched_finish_poke(ssi, flgs);
+
+ if (incq && !erts_common_run_queue && (flgs & ERTS_SSI_FLG_WAITING))
non_empty_runq(rq);
}
+ else {
+ sl->list = NULL;
+ erts_smp_spin_unlock(&sl->lock);
+ do {
+ ErtsSchedulerSleepInfo *wake_ssi = ssi;
+ ssi = ssi->next;
+ erts_sched_finish_poke(ssi, ssi_flags_set_wake(wake_ssi));
+ } while (ssi);
+ }
}
static void
wake_all_schedulers(void)
{
- if (erts_common_run_queue) {
- erts_smp_runq_lock(erts_common_run_queue);
- if (erts_common_run_queue->waiting) {
- if (erts_common_run_queue->waiting < 0)
- erts_sys_schedule_interrupt(1);
- sched_spin_wake_all(erts_common_run_queue);
- erts_smp_cnd_broadcast(&erts_common_run_queue->cnd);
- }
- erts_smp_runq_unlock(erts_common_run_queue);
- }
+ if (erts_common_run_queue)
+ wake_scheduler(erts_common_run_queue, 0, 0);
else {
int ix;
for (ix = 0; ix < erts_no_run_queues; ix++) {
ErtsRunQueue *rq = ERTS_RUNQ_IX(ix);
- erts_smp_runq_lock(rq);
- wake_scheduler(rq, 0);
- erts_smp_runq_unlock(rq);
+ wake_scheduler(rq, 0, 1);
}
}
}
@@ -939,14 +1256,14 @@ chk_wake_sched(ErtsRunQueue *crq, int ix, int activate)
wrq = ERTS_RUNQ_IX(ix);
iflgs = erts_smp_atomic_read(&wrq->info_flags);
if (!(iflgs & (ERTS_RUNQ_IFLG_SUSPENDED|ERTS_RUNQ_IFLG_NONEMPTY))) {
- erts_smp_xrunq_lock(crq, wrq);
if (activate) {
if (ix == erts_smp_atomic_cmpxchg(&balance_info.active_runqs, ix+1, ix)) {
+ erts_smp_xrunq_lock(crq, wrq);
wrq->flags &= ~ERTS_RUNQ_FLG_INACTIVE;
+ erts_smp_xrunq_unlock(crq, wrq);
}
}
- wake_scheduler(wrq, 0);
- erts_smp_xrunq_unlock(crq, wrq);
+ wake_scheduler(wrq, 0, 1);
return 1;
}
return 0;
@@ -992,15 +1309,13 @@ static ERTS_INLINE void
smp_notify_inc_runq(ErtsRunQueue *runq)
{
#ifdef ERTS_SMP
- if (erts_common_run_queue)
- wake_one_scheduler();
- else
- wake_scheduler(runq, 1);
+ if (runq)
+ wake_scheduler(runq, 1, 1);
#endif
}
void
-erts_smp_notify_inc_runq__(ErtsRunQueue *runq)
+erts_smp_notify_inc_runq(ErtsRunQueue *runq)
{
smp_notify_inc_runq(runq);
}
@@ -1146,20 +1461,23 @@ static void
evacuate_run_queue(ErtsRunQueue *evac_rq, ErtsRunQueue *rq)
{
Port *prt;
+ int notify_to_rq = 0;
int prio;
int prt_locked = 0;
int rq_locked = 0;
int evac_rq_locked = 1;
+ ErtsMigrateResult mres;
erts_smp_runq_lock(evac_rq);
+ erts_smp_atomic_bor(&evac_rq->scheduler->ssi->flags, ERTS_SSI_FLG_SUSPENDED);
+
evac_rq->flags &= ~ERTS_RUNQ_FLGS_IMMIGRATE_QMASK;
evac_rq->flags |= (ERTS_RUNQ_FLGS_EMIGRATE_QMASK
| ERTS_RUNQ_FLGS_EVACUATE_QMASK
| ERTS_RUNQ_FLG_SUSPENDED);
erts_smp_atomic_bor(&evac_rq->info_flags, ERTS_RUNQ_IFLG_SUSPENDED);
-
/*
* Need to set up evacuation paths first since we
* may release the run queue lock on evac_rq
@@ -1187,9 +1505,11 @@ evacuate_run_queue(ErtsRunQueue *evac_rq, ErtsRunQueue *rq)
/* Evacuate scheduled ports */
prt = evac_rq->ports.start;
while (prt) {
- (void) erts_port_migrate(prt, &prt_locked,
+ mres = erts_port_migrate(prt, &prt_locked,
evac_rq, &evac_rq_locked,
rq, &rq_locked);
+ if (mres == ERTS_MIGRATE_SUCCESS)
+ notify_to_rq = 1;
if (prt_locked)
erts_smp_port_unlock(prt);
if (!evac_rq_locked) {
@@ -1218,9 +1538,11 @@ evacuate_run_queue(ErtsRunQueue *evac_rq, ErtsRunQueue *rq)
goto end_of_proc;
}
- (void) erts_proc_migrate(proc, &proc_locks,
+ mres = erts_proc_migrate(proc, &proc_locks,
evac_rq, &evac_rq_locked,
rq, &rq_locked);
+ if (mres == ERTS_MIGRATE_SUCCESS)
+ notify_to_rq = 1;
if (proc_locks)
erts_smp_proc_unlock(proc, proc_locks);
if (!evac_rq_locked) {
@@ -1252,10 +1574,13 @@ evacuate_run_queue(ErtsRunQueue *evac_rq, ErtsRunQueue *rq)
if (rq_locked)
erts_smp_runq_unlock(rq);
- if (!evac_rq_locked)
- erts_smp_runq_lock(evac_rq);
- wake_scheduler(evac_rq, 0);
- erts_smp_runq_unlock(evac_rq);
+ if (evac_rq_locked)
+ erts_smp_runq_unlock(evac_rq);
+
+ if (notify_to_rq)
+ smp_notify_inc_runq(rq);
+
+ wake_scheduler(evac_rq, 0, 1);
}
static int
@@ -1483,31 +1808,6 @@ try_steal_task(ErtsRunQueue *rq)
return res;
}
-#ifdef ERTS_SMP_SCHEDULERS_NEED_TO_CHECK_CHILDREN
-void
-erts_smp_notify_check_children_needed(void)
-{
- int i;
- for (i = 0; i < erts_no_schedulers; i++) {
- erts_smp_runq_lock(ERTS_SCHEDULER_IX(i)->run_queue);
- ERTS_SCHEDULER_IX(i)->check_children = 1;
- if (!erts_common_run_queue)
- wake_scheduler(ERTS_SCHEDULER_IX(i)->run_queue, 0);
- erts_smp_runq_unlock(ERTS_SCHEDULER_IX(i)->run_queue);
- }
- if (ongoing_multi_scheduling_block()) {
- /* Also blocked schedulers need to check children */
- erts_smp_mtx_lock(&schdlr_sspnd.mtx);
- for (i = 0; i < erts_no_schedulers; i++)
- ERTS_SCHEDULER_IX(i)->blocked_check_children = 1;
- erts_smp_cnd_broadcast(&schdlr_sspnd.cnd);
- erts_smp_mtx_unlock(&schdlr_sspnd.mtx);
- }
- if (erts_common_run_queue)
- wake_all_schedulers();
-}
-#endif
-
/* Run queue balancing */
typedef struct {
@@ -2100,6 +2400,8 @@ erts_init_scheduling(int mrq, int no_schedulers, int no_schedulers_online)
erts_smp_atomic_init(&no_empty_run_queues, 0);
#endif
+ erts_no_run_queues = n;
+
for (ix = 0; ix < n; ix++) {
int pix, rix;
ErtsRunQueue *rq = ERTS_RUNQ_IX(ix);
@@ -2114,8 +2416,10 @@ erts_init_scheduling(int mrq, int no_schedulers, int no_schedulers_online)
erts_smp_mtx_init_x(&rq->mtx, "run_queue", make_small(ix + 1));
erts_smp_cnd_init(&rq->cnd);
- erts_smp_atomic_init(&rq->spin_waiter, 0);
- erts_smp_atomic_init(&rq->spin_wake, 0);
+#ifdef ERTS_SMP
+ erts_smp_spinlock_init(&rq->sleepers.lock, "run_queue_sleep_list");
+ rq->sleepers.list = NULL;
+#endif
rq->waiting = 0;
rq->woken = 0;
@@ -2166,7 +2470,6 @@ erts_init_scheduling(int mrq, int no_schedulers, int no_schedulers_online)
}
erts_common_run_queue = !mrq ? ERTS_RUNQ_IX(0) : NULL;
- erts_no_run_queues = n;
#ifdef ERTS_SMP
@@ -2181,9 +2484,34 @@ erts_init_scheduling(int mrq, int no_schedulers, int no_schedulers_online)
#endif
+ n = (int) no_schedulers;
+ erts_no_schedulers = n;
+
+#ifdef ERTS_SMP
+ /* Create and initialize scheduler sleep info */
+
+ aligned_sched_sleep_info = erts_alloc(ERTS_ALC_T_SCHDLR_SLP_INFO,
+ (sizeof(ErtsAlignedSchedulerSleepInfo)
+ *(n+1)));
+ if ((((Uint) aligned_sched_sleep_info) & ERTS_CACHE_LINE_MASK) == 0)
+ aligned_sched_sleep_info = ((ErtsAlignedSchedulerSleepInfo *)
+ ((((Uint) aligned_sched_sleep_info)
+ & ~ERTS_CACHE_LINE_MASK)
+ + ERTS_CACHE_LINE_SIZE));
+ for (ix = 0; ix < n; ix++) {
+ ErtsSchedulerSleepInfo *ssi = ERTS_SCHED_SLEEP_INFO_IX(ix);
+#if 0 /* no need to initialize these... */
+ ssi->next = NULL;
+ ssi->prev = NULL;
+#endif
+ erts_smp_atomic_init(&ssi->flags, 0);
+ ssi->event = NULL; /* initialized in sched_thread_func */
+ erts_smp_atomic_init(&ssi->aux_work, 0);
+ }
+#endif
+
/* Create and initialize scheduler specific data */
- n = (int) no_schedulers;
erts_aligned_scheduler_data = erts_alloc(ERTS_ALC_T_SCHDLR_DATA,
(sizeof(ErtsAlignedSchedulerData)
*(n+1)));
@@ -2200,6 +2528,7 @@ erts_init_scheduling(int mrq, int no_schedulers, int no_schedulers_online)
#ifdef ERTS_SMP
erts_bits_init_state(&esdp->erl_bits_state);
esdp->match_pseudo_process = NULL;
+ esdp->ssi = ERTS_SCHED_SLEEP_INFO_IX(ix);
esdp->free_process = NULL;
#if HALFWORD_HEAP
/* Registers need to be heap allocated (correct memory range) for tracing to work */
@@ -2228,11 +2557,6 @@ erts_init_scheduling(int mrq, int no_schedulers, int no_schedulers_online)
}
#ifdef ERTS_SMP
-#ifdef ERTS_SMP_SCHEDULERS_NEED_TO_CHECK_CHILDREN
- esdp->check_children = 0;
- esdp->blocked_check_children = 0;
-#endif
- erts_smp_atomic_init(&esdp->suspended, 0);
erts_smp_atomic_init(&esdp->chk_cpu_bind, 0);
#endif
}
@@ -2241,7 +2565,7 @@ erts_init_scheduling(int mrq, int no_schedulers, int no_schedulers_online)
erts_smp_mtx_init(&schdlr_sspnd.mtx, "schdlr_sspnd");
erts_smp_cnd_init(&schdlr_sspnd.cnd);
- schdlr_sspnd.changing = 0;
+ erts_smp_atomic_init(&schdlr_sspnd.changing, 0);
schdlr_sspnd.online = no_schedulers_online;
schdlr_sspnd.curr_online = no_schedulers;
erts_smp_atomic_init(&schdlr_sspnd.msb.ongoing, 0);
@@ -2264,7 +2588,8 @@ erts_init_scheduling(int mrq, int no_schedulers, int no_schedulers_online)
if (no_schedulers_online < no_schedulers) {
if (erts_common_run_queue) {
for (ix = no_schedulers_online; ix < no_schedulers; ix++)
- erts_smp_atomic_set(&(ERTS_SCHEDULER_IX(ix)->suspended), 1);
+ erts_smp_atomic_bor(&ERTS_SCHED_SLEEP_INFO_IX(ix)->flags,
+ ERTS_SSI_FLG_SUSPENDED);
}
else {
for (ix = no_schedulers_online; ix < erts_no_run_queues; ix++)
@@ -2275,7 +2600,8 @@ erts_init_scheduling(int mrq, int no_schedulers, int no_schedulers_online)
schdlr_sspnd.wait_curr_online = no_schedulers_online;
schdlr_sspnd.curr_online *= 2; /* Boot strapping... */
- schdlr_sspnd.changing = ERTS_SCHED_CHANGING_ONLINE;
+ ERTS_SCHDLR_SSPND_CHNG_SET((ERTS_SCHDLR_SSPND_CHNG_ONLN
+ | ERTS_SCHDLR_SSPND_CHNG_WAITER), 0);
erts_smp_atomic_init(&doing_sys_schedule, 0);
@@ -2423,13 +2749,115 @@ susp_sched_resume_block(void *unused)
}
static void
+scheduler_ix_resume_wake(Uint ix)
+{
+ ErtsSchedulerSleepInfo *ssi = ERTS_SCHED_SLEEP_INFO_IX(ix);
+ long xflgs = (ERTS_SSI_FLG_SLEEPING
+ | ERTS_SSI_FLG_TSE_SLEEPING
+ | ERTS_SSI_FLG_WAITING
+ | ERTS_SSI_FLG_SUSPENDED);
+ long oflgs;
+ do {
+ oflgs = erts_smp_atomic_cmpxchg(&ssi->flags, 0, xflgs);
+ if (oflgs == xflgs) {
+ erts_sched_finish_poke(ssi, oflgs);
+ break;
+ }
+ xflgs = oflgs;
+ } while (oflgs & ERTS_SSI_FLG_SUSPENDED);
+}
+
+static long
+sched_prep_spin_suspended(ErtsSchedulerSleepInfo *ssi, long xpct)
+{
+ long oflgs;
+ long nflgs = (ERTS_SSI_FLG_SLEEPING
+ | ERTS_SSI_FLG_WAITING
+ | ERTS_SSI_FLG_SUSPENDED);
+ long xflgs = xpct;
+
+ do {
+ oflgs = erts_smp_atomic_cmpxchg(&ssi->flags, nflgs, xflgs);
+ if (oflgs == xflgs)
+ return nflgs;
+ xflgs = oflgs;
+ } while (oflgs & ERTS_SSI_FLG_SUSPENDED);
+
+ return oflgs;
+}
+
+static long
+sched_spin_suspended(ErtsSchedulerSleepInfo *ssi, int spincount)
+{
+ int until_yield = ERTS_SCHED_SPIN_UNTIL_YIELD;
+ int sc = spincount;
+ long flgs;
+
+ do {
+ flgs = erts_smp_atomic_read(&ssi->flags);
+ if ((flgs & (ERTS_SSI_FLG_SLEEPING
+ | ERTS_SSI_FLG_WAITING
+ | ERTS_SSI_FLG_SUSPENDED))
+ != (ERTS_SSI_FLG_SLEEPING
+ | ERTS_SSI_FLG_WAITING
+ | ERTS_SSI_FLG_SUSPENDED)) {
+ break;
+ }
+ ERTS_SPIN_BODY;
+ if (--until_yield == 0) {
+ until_yield = ERTS_SCHED_SPIN_UNTIL_YIELD;
+ erts_thr_yield();
+ }
+ } while (--sc > 0);
+ return flgs;
+}
+
+static long
+sched_set_suspended_sleeptype(ErtsSchedulerSleepInfo *ssi)
+{
+ long oflgs;
+ long nflgs = (ERTS_SSI_FLG_SLEEPING
+ | ERTS_SSI_FLG_TSE_SLEEPING
+ | ERTS_SSI_FLG_WAITING
+ | ERTS_SSI_FLG_SUSPENDED);
+ long xflgs = (ERTS_SSI_FLG_SLEEPING
+ | ERTS_SSI_FLG_WAITING
+ | ERTS_SSI_FLG_SUSPENDED);
+
+ erts_tse_reset(ssi->event);
+
+ while (1) {
+ oflgs = erts_smp_atomic_cmpxchg(&ssi->flags, nflgs, xflgs);
+ if (oflgs == xflgs)
+ return nflgs;
+ if ((oflgs & (ERTS_SSI_FLG_SLEEPING
+ | ERTS_SSI_FLG_WAITING
+ | ERTS_SSI_FLG_SUSPENDED))
+ != (ERTS_SSI_FLG_SLEEPING
+ | ERTS_SSI_FLG_WAITING
+ | ERTS_SSI_FLG_SUSPENDED)) {
+ return oflgs;
+ }
+ xflgs = oflgs;
+ }
+}
+
+static void
suspend_scheduler(ErtsSchedulerData *esdp)
{
+ long flgs;
+ int changing;
long no = (long) esdp->no;
ErtsRunQueue *rq = esdp->run_queue;
+ ErtsSchedulerSleepInfo *ssi = esdp->ssi;
long active_schedulers;
int curr_online = 1;
int wake = 0;
+ int reset_read_group = 0;
+#if defined(ERTS_SCHED_NEED_NONBLOCKABLE_AUX_WORK) \
+ || defined(ERTS_SCHED_NEED_BLOCKABLE_AUX_WORK)
+ long aux_work;
+#endif
/*
* Schedulers may be suspended in two different ways:
@@ -2451,113 +2879,151 @@ suspend_scheduler(ErtsSchedulerData *esdp)
if (scheduler2cpu_map[esdp->no].bound_id >= 0
&& erts_unbind_from_cpu(erts_cpuinfo) == 0) {
esdp->cpu_id = scheduler2cpu_map[esdp->no].bound_id = -1;
+ reset_read_group = 1;
}
erts_smp_rwmtx_rwunlock(&erts_cpu_bind_rwmtx);
+ if (reset_read_group)
+ erts_smp_rwmtx_set_reader_group(0);
+
+ if (esdp->no <= erts_max_main_threads)
+ erts_thr_set_main_status(0, 0);
+
if (erts_system_profile_flags.scheduler)
profile_scheduler(make_small(esdp->no), am_inactive);
erts_smp_mtx_lock(&schdlr_sspnd.mtx);
- active_schedulers = erts_smp_atomic_dectest(&schdlr_sspnd.active);
- ASSERT(active_schedulers >= 1);
- if (schdlr_sspnd.changing == ERTS_SCHED_CHANGING_MULTI_SCHED) {
- if (active_schedulers == schdlr_sspnd.msb.wait_active)
- wake = 1;
- if (active_schedulers == 1)
- schdlr_sspnd.changing = 0;
- }
-
- while (1) {
+ flgs = sched_prep_spin_suspended(ssi, ERTS_SSI_FLG_SUSPENDED);
+ if (flgs & ERTS_SSI_FLG_SUSPENDED) {
-#ifdef ERTS_SMP_SCHEDULERS_NEED_TO_CHECK_CHILDREN
- int check_children;
- erts_smp_runq_lock(esdp->run_queue);
- check_children = esdp->check_children;
- esdp->check_children = 0;
- erts_smp_runq_unlock(esdp->run_queue);
- if (check_children) {
- erts_smp_mtx_unlock(&schdlr_sspnd.mtx);
- erts_check_children();
- erts_smp_mtx_lock(&schdlr_sspnd.mtx);
+ active_schedulers = erts_smp_atomic_dectest(&schdlr_sspnd.active);
+ ASSERT(active_schedulers >= 1);
+ changing = erts_smp_atomic_read(&schdlr_sspnd.changing);
+ if (changing & ERTS_SCHDLR_SSPND_CHNG_MSB) {
+ if (active_schedulers == schdlr_sspnd.msb.wait_active)
+ wake = 1;
+ if (active_schedulers == 1) {
+ changing = erts_smp_atomic_band(&schdlr_sspnd.changing,
+ ~ERTS_SCHDLR_SSPND_CHNG_MSB);
+ changing &= ~ERTS_SCHDLR_SSPND_CHNG_MSB;
+ }
}
-#endif
- if (schdlr_sspnd.changing == ERTS_SCHED_CHANGING_ONLINE) {
- int changed = 0;
- if (no > schdlr_sspnd.online && curr_online) {
- schdlr_sspnd.curr_online--;
- curr_online = 0;
- changed = 1;
+ while (1) {
+ if (changing & ERTS_SCHDLR_SSPND_CHNG_ONLN) {
+ int changed = 0;
+ if (no > schdlr_sspnd.online && curr_online) {
+ schdlr_sspnd.curr_online--;
+ curr_online = 0;
+ changed = 1;
+ }
+ else if (no <= schdlr_sspnd.online && !curr_online) {
+ schdlr_sspnd.curr_online++;
+ curr_online = 1;
+ changed = 1;
+ }
+ if (changed
+ && schdlr_sspnd.curr_online == schdlr_sspnd.wait_curr_online)
+ wake = 1;
+ if (schdlr_sspnd.online == schdlr_sspnd.curr_online) {
+ changing = erts_smp_atomic_band(&schdlr_sspnd.changing,
+ ~ERTS_SCHDLR_SSPND_CHNG_ONLN);
+ changing &= ~ERTS_SCHDLR_SSPND_CHNG_ONLN;
+ }
}
- else if (no <= schdlr_sspnd.online && !curr_online) {
- schdlr_sspnd.curr_online++;
- curr_online = 1;
- changed = 1;
+
+ if (wake) {
+ erts_smp_cnd_signal(&schdlr_sspnd.cnd);
+ wake = 0;
}
- if (changed
- && schdlr_sspnd.curr_online == schdlr_sspnd.wait_curr_online)
- wake = 1;
- if (schdlr_sspnd.online == schdlr_sspnd.curr_online)
- schdlr_sspnd.changing = 0;
- }
- if (wake) {
- erts_smp_cnd_broadcast(&schdlr_sspnd.cnd);
- wake = 0;
- }
+ flgs = erts_smp_atomic_read(&ssi->flags);
+ if (!(flgs & ERTS_SSI_FLG_SUSPENDED))
+ break;
+ erts_smp_mtx_unlock(&schdlr_sspnd.mtx);
- if (!(rq->flags & (ERTS_RUNQ_FLG_SHARED_RUNQ|ERTS_RUNQ_FLG_SUSPENDED)))
- break;
- if ((rq->flags & ERTS_RUNQ_FLG_SHARED_RUNQ)
- && !erts_smp_atomic_read(&esdp->suspended))
- break;
+#ifdef ERTS_SCHED_NEED_BLOCKABLE_AUX_WORK
+ aux_work = erts_smp_atomic_read(&ssi->aux_work);
+ blockable_aux_work:
+ blockable_aux_work(esdp, ssi, aux_work);
+#endif
- erts_smp_activity_begin(ERTS_ACTIVITY_WAIT,
- susp_sched_prep_block,
- susp_sched_resume_block,
- NULL);
- while (1) {
+ erts_smp_activity_begin(ERTS_ACTIVITY_WAIT, NULL, NULL, NULL);
+ while (1) {
+ long flgs;
+#ifdef ERTS_SCHED_NEED_NONBLOCKABLE_AUX_WORK
+#ifndef ERTS_SCHED_NEED_BLOCKABLE_AUX_WORK
+ aux_work = erts_smp_atomic_read(&ssi->aux_work);
+#endif
+ nonblockable_aux_work(esdp, ssi, aux_work);
+#endif
-#ifdef ERTS_SMP_SCHEDULERS_NEED_TO_CHECK_CHILDREN
- if (esdp->blocked_check_children)
- break;
+ flgs = sched_spin_suspended(ssi, ERTS_SCHED_SUSPEND_SLEEP_SPINCOUNT);
+ if (flgs == (ERTS_SSI_FLG_SLEEPING
+ | ERTS_SSI_FLG_WAITING
+ | ERTS_SSI_FLG_SUSPENDED)) {
+ flgs = sched_set_suspended_sleeptype(ssi);
+ if (flgs == (ERTS_SSI_FLG_SLEEPING
+ | ERTS_SSI_FLG_TSE_SLEEPING
+ | ERTS_SSI_FLG_WAITING
+ | ERTS_SSI_FLG_SUSPENDED)) {
+ int res;
+ do {
+ res = erts_tse_wait(ssi->event);
+ } while (res == EINTR);
+ }
+ }
+
+ flgs = sched_prep_spin_suspended(ssi, (ERTS_SSI_FLG_WAITING
+ | ERTS_SSI_FLG_SUSPENDED));
+ if (!(flgs & ERTS_SSI_FLG_SUSPENDED))
+ break;
+ changing = erts_smp_atomic_read(&schdlr_sspnd.changing);
+ if (changing & ~ERTS_SCHDLR_SSPND_CHNG_WAITER)
+ break;
+
+
+#ifdef ERTS_SCHED_NEED_BLOCKABLE_AUX_WORK
+ aux_work = erts_smp_atomic_read(&ssi->aux_work);
+ if (aux_work & ERTS_SSI_BLOCKABLE_AUX_WORK_MASK) {
+ erts_smp_activity_end(ERTS_ACTIVITY_WAIT, NULL, NULL, NULL);
+ goto blockable_aux_work;
+ }
#endif
- erts_smp_cnd_wait(&schdlr_sspnd.cnd, &schdlr_sspnd.mtx);
+ }
+
+ erts_smp_activity_end(ERTS_ACTIVITY_WAIT, NULL, NULL, NULL);
- if (schdlr_sspnd.changing == ERTS_SCHED_CHANGING_ONLINE)
- break;
+ erts_smp_mtx_lock(&schdlr_sspnd.mtx);
+ changing = erts_smp_atomic_read(&schdlr_sspnd.changing);
+ }
- if (!(rq->flags & (ERTS_RUNQ_FLG_SHARED_RUNQ
- | ERTS_RUNQ_FLG_SUSPENDED)))
- break;
- if ((rq->flags & ERTS_RUNQ_FLG_SHARED_RUNQ)
- && !erts_smp_atomic_read(&esdp->suspended))
- break;
+ active_schedulers = erts_smp_atomic_inctest(&schdlr_sspnd.active);
+ changing = erts_smp_atomic_read(&schdlr_sspnd.changing);
+ if ((changing & ERTS_SCHDLR_SSPND_CHNG_MSB)
+ && schdlr_sspnd.online == active_schedulers) {
+ erts_smp_atomic_band(&schdlr_sspnd.changing,
+ ~ERTS_SCHDLR_SSPND_CHNG_MSB);
}
-#ifdef ERTS_SMP_SCHEDULERS_NEED_TO_CHECK_CHILDREN
- esdp->blocked_check_children = 0;
-#endif
+ ASSERT(no <= schdlr_sspnd.online);
+ ASSERT(!erts_smp_atomic_read(&schdlr_sspnd.msb.ongoing));
- erts_smp_activity_end(ERTS_ACTIVITY_WAIT,
- susp_sched_prep_block,
- susp_sched_resume_block,
- NULL);
}
- active_schedulers = erts_smp_atomic_inctest(&schdlr_sspnd.active);
- if (schdlr_sspnd.changing == ERTS_SCHED_CHANGING_MULTI_SCHED
- && schdlr_sspnd.online == active_schedulers) {
- schdlr_sspnd.changing = 0;
- }
erts_smp_mtx_unlock(&schdlr_sspnd.mtx);
+ ASSERT(curr_online);
+
if (erts_system_profile_flags.scheduler)
profile_scheduler(make_small(esdp->no), am_active);
+ if (esdp->no <= erts_max_main_threads)
+ erts_thr_set_main_status(1, (int) esdp->no);
+
erts_smp_runq_lock(esdp->run_queue);
non_empty_runq(esdp->run_queue);
@@ -2622,8 +3088,10 @@ erts_schedulers_state(Uint *total,
int yield_allowed)
{
int res;
+ long changing;
erts_smp_mtx_lock(&schdlr_sspnd.mtx);
- if (yield_allowed && schdlr_sspnd.changing)
+ changing = erts_smp_atomic_read(&schdlr_sspnd.changing);
+ if (yield_allowed && (changing & ~ERTS_SCHDLR_SSPND_CHNG_WAITER))
res = ERTS_SCHDLR_SSPND_YIELD_RESTART;
else {
*active = *online = schdlr_sspnd.online;
@@ -2643,6 +3111,7 @@ erts_set_schedulers_online(Process *p,
Sint *old_no)
{
int ix, res, no, have_unlocked_plocks;
+ long changing;
if (new_no < 1 || erts_no_schedulers < new_no)
return ERTS_SCHDLR_SSPND_EINVAL;
@@ -2652,7 +3121,8 @@ erts_set_schedulers_online(Process *p,
have_unlocked_plocks = 0;
no = (int) new_no;
- if (schdlr_sspnd.changing) {
+ changing = erts_smp_atomic_read(&schdlr_sspnd.changing);
+ if (changing) {
res = ERTS_SCHDLR_SSPND_YIELD_RESTART;
}
else {
@@ -2661,17 +3131,19 @@ erts_set_schedulers_online(Process *p,
res = ERTS_SCHDLR_SSPND_DONE;
}
else {
- schdlr_sspnd.changing = ERTS_SCHED_CHANGING_ONLINE;
+ ERTS_SCHDLR_SSPND_CHNG_SET((ERTS_SCHDLR_SSPND_CHNG_ONLN
+ | ERTS_SCHDLR_SSPND_CHNG_WAITER), 0);
schdlr_sspnd.online = no;
if (no > online) {
int ix;
schdlr_sspnd.wait_curr_online = no;
- if (ongoing_multi_scheduling_block())
- /* No schedulers to resume */;
+ if (ongoing_multi_scheduling_block()) {
+ for (ix = online; ix < no; ix++)
+ erts_sched_poke(ERTS_SCHED_SLEEP_INFO_IX(ix));
+ }
else if (erts_common_run_queue) {
for (ix = online; ix < no; ix++)
- erts_smp_atomic_set(&ERTS_SCHEDULER_IX(ix)->suspended,
- 0);
+ scheduler_ix_resume_wake(ix);
}
else {
if (plocks) {
@@ -2685,6 +3157,7 @@ erts_set_schedulers_online(Process *p,
erts_smp_runq_lock(rq);
ERTS_RUNQ_RESET_SUSPEND_INFO(rq, 0x5);
erts_smp_runq_unlock(rq);
+ scheduler_ix_resume_wake(ix);
}
/*
* Spread evacuation paths among all online
@@ -2699,7 +3172,6 @@ erts_set_schedulers_online(Process *p,
erts_smp_mtx_unlock(&balance_info.update_mtx);
erts_smp_mtx_lock(&schdlr_sspnd.mtx);
}
- erts_smp_cnd_broadcast(&schdlr_sspnd.cnd);
res = ERTS_SCHDLR_SSPND_DONE;
}
else /* if (no < online) */ {
@@ -2716,12 +3188,17 @@ erts_set_schedulers_online(Process *p,
schdlr_sspnd.wait_curr_online = no+1;
}
- if (ongoing_multi_scheduling_block())
- erts_smp_cnd_broadcast(&schdlr_sspnd.cnd);
- else if (erts_common_run_queue) {
+ if (ongoing_multi_scheduling_block()) {
for (ix = no; ix < online; ix++)
- erts_smp_atomic_set(&ERTS_SCHEDULER_IX(ix)->suspended,
- 1);
+ erts_sched_poke(ERTS_SCHED_SLEEP_INFO_IX(ix));
+ }
+ else if (erts_common_run_queue) {
+ for (ix = no; ix < online; ix++) {
+ ErtsSchedulerSleepInfo *ssi;
+ ssi = ERTS_SCHED_SLEEP_INFO_IX(ix);
+ erts_smp_atomic_bor(&ssi->flags,
+ ERTS_SSI_FLG_SUSPENDED);
+ }
wake_all_schedulers();
}
else {
@@ -2748,7 +3225,10 @@ erts_set_schedulers_online(Process *p,
erts_smp_atomic_set(&balance_info.used_runqs, no);
erts_smp_mtx_unlock(&balance_info.update_mtx);
erts_smp_mtx_lock(&schdlr_sspnd.mtx);
- ERTS_FOREACH_OP_RUNQ(rq, wake_scheduler(rq, 0));
+ for (ix = no; ix < online; ix++) {
+ ErtsRunQueue *rq = ERTS_RUNQ_IX(ix);
+ wake_scheduler(rq, 0, 1);
+ }
}
}
@@ -2762,6 +3242,12 @@ erts_set_schedulers_online(Process *p,
susp_sched_prep_block,
susp_sched_resume_block,
NULL);
+ ASSERT(res != ERTS_SCHDLR_SSPND_DONE
+ ? (ERTS_SCHDLR_SSPND_CHNG_WAITER
+ & erts_smp_atomic_read(&schdlr_sspnd.changing))
+ : (ERTS_SCHDLR_SSPND_CHNG_WAITER
+ == erts_smp_atomic_read(&schdlr_sspnd.changing)));
+ erts_smp_atomic_band(&schdlr_sspnd.changing, ~ERTS_SCHDLR_SSPND_CHNG_WAITER);
}
}
@@ -2776,11 +3262,12 @@ ErtsSchedSuspendResult
erts_block_multi_scheduling(Process *p, ErtsProcLocks plocks, int on, int all)
{
int ix, res, have_unlocked_plocks = 0;
+ long changing;
ErtsProcList *plp;
erts_smp_mtx_lock(&schdlr_sspnd.mtx);
-
- if (schdlr_sspnd.changing) {
+ changing = erts_smp_atomic_read(&schdlr_sspnd.changing);
+ if (changing) {
res = ERTS_SCHDLR_SSPND_YIELD_RESTART; /* Yield */
}
else if (on) { /* ------ BLOCK ------ */
@@ -2794,19 +3281,22 @@ erts_block_multi_scheduling(Process *p, ErtsProcLocks plocks, int on, int all)
res = ERTS_SCHDLR_SSPND_DONE_MSCHED_BLOCKED;
}
else {
+ int online = schdlr_sspnd.online;
p->flags |= F_HAVE_BLCKD_MSCHED;
if (plocks) {
have_unlocked_plocks = 1;
erts_smp_proc_unlock(p, plocks);
}
+ ASSERT(0 == erts_smp_atomic_read(&schdlr_sspnd.msb.ongoing));
erts_smp_atomic_set(&schdlr_sspnd.msb.ongoing, 1);
- if (schdlr_sspnd.online == 1) {
+ if (online == 1) {
res = ERTS_SCHDLR_SSPND_DONE_MSCHED_BLOCKED;
ASSERT(erts_smp_atomic_read(&schdlr_sspnd.active) == 1);
ASSERT(p->scheduler_data->no == 1);
}
else {
- schdlr_sspnd.changing = ERTS_SCHED_CHANGING_MULTI_SCHED;
+ ERTS_SCHDLR_SSPND_CHNG_SET((ERTS_SCHDLR_SSPND_CHNG_MSB
+ | ERTS_SCHDLR_SSPND_CHNG_WAITER), 0);
if (p->scheduler_data->no == 1) {
res = ERTS_SCHDLR_SSPND_DONE_MSCHED_BLOCKED;
schdlr_sspnd.msb.wait_active = 1;
@@ -2820,17 +3310,19 @@ erts_block_multi_scheduling(Process *p, ErtsProcLocks plocks, int on, int all)
schdlr_sspnd.msb.wait_active = 2;
}
if (erts_common_run_queue) {
- for (ix = 1; ix < schdlr_sspnd.online; ix++)
- erts_smp_atomic_set(&ERTS_SCHEDULER_IX(ix)->suspended, 1);
+ for (ix = 1; ix < online; ix++)
+ erts_smp_atomic_bor(&ERTS_SCHED_SLEEP_INFO_IX(ix)->flags,
+ ERTS_SSI_FLG_SUSPENDED);
wake_all_schedulers();
}
else {
erts_smp_mtx_unlock(&schdlr_sspnd.mtx);
erts_smp_mtx_lock(&balance_info.update_mtx);
erts_smp_atomic_set(&balance_info.used_runqs, 1);
- for (ix = 0; ix < schdlr_sspnd.online; ix++) {
+ for (ix = 0; ix < online; ix++) {
ErtsRunQueue *rq = ERTS_RUNQ_IX(ix);
erts_smp_runq_lock(rq);
+ ASSERT(!(rq->flags & ERTS_RUNQ_FLG_SUSPENDED));
ERTS_RUNQ_RESET_MIGRATION_PATHS(rq, 0x7);
erts_smp_runq_unlock(rq);
}
@@ -2855,6 +3347,13 @@ erts_block_multi_scheduling(Process *p, ErtsProcLocks plocks, int on, int all)
susp_sched_prep_block,
susp_sched_resume_block,
NULL);
+ ASSERT(res != ERTS_SCHDLR_SSPND_DONE_MSCHED_BLOCKED
+ ? (ERTS_SCHDLR_SSPND_CHNG_WAITER
+ & erts_smp_atomic_read(&schdlr_sspnd.changing))
+ : (ERTS_SCHDLR_SSPND_CHNG_WAITER
+ == erts_smp_atomic_read(&schdlr_sspnd.changing)));
+ erts_smp_atomic_band(&schdlr_sspnd.changing,
+ ~ERTS_SCHDLR_SSPND_CHNG_WAITER);
}
plp = proclist_create(p);
plp->next = schdlr_sspnd.msb.procs;
@@ -2898,7 +3397,7 @@ erts_block_multi_scheduling(Process *p, ErtsProcLocks plocks, int on, int all)
if (schdlr_sspnd.msb.procs)
res = ERTS_SCHDLR_SSPND_DONE_MSCHED_BLOCKED;
else {
- schdlr_sspnd.changing = ERTS_SCHED_CHANGING_MULTI_SCHED;
+ ERTS_SCHDLR_SSPND_CHNG_SET(ERTS_SCHDLR_SSPND_CHNG_MSB, 0);
#ifdef DEBUG
ERTS_FOREACH_RUNQ(rq,
{
@@ -2925,13 +3424,13 @@ erts_block_multi_scheduling(Process *p, ErtsProcLocks plocks, int on, int all)
if (schdlr_sspnd.online == 1) {
/* No schedulers to resume */
ASSERT(erts_smp_atomic_read(&schdlr_sspnd.active) == 1);
- schdlr_sspnd.changing = 0;
+ ERTS_SCHDLR_SSPND_CHNG_SET(0, ERTS_SCHDLR_SSPND_CHNG_MSB);
}
else if (erts_common_run_queue) {
for (ix = 1; ix < schdlr_sspnd.online; ix++)
- erts_smp_atomic_set(&ERTS_SCHEDULER_IX(ix)->suspended, 0);
+ erts_smp_atomic_band(&ERTS_SCHED_SLEEP_INFO_IX(ix)->flags,
+ ~ERTS_SSI_FLG_SUSPENDED);
wake_all_schedulers();
- erts_smp_cnd_broadcast(&schdlr_sspnd.cnd);
}
else {
int online = schdlr_sspnd.online;
@@ -2948,6 +3447,7 @@ erts_block_multi_scheduling(Process *p, ErtsProcLocks plocks, int on, int all)
erts_smp_runq_lock(rq);
ERTS_RUNQ_RESET_SUSPEND_INFO(rq, 0x4);
erts_smp_runq_unlock(rq);
+ scheduler_ix_resume_wake(ix);
}
/* Spread evacuation paths among all online run queues */
@@ -2963,7 +3463,6 @@ erts_block_multi_scheduling(Process *p, ErtsProcLocks plocks, int on, int all)
erts_smp_runq_unlock(ERTS_RUNQ_IX(0));
erts_smp_mtx_unlock(&balance_info.update_mtx);
erts_smp_mtx_lock(&schdlr_sspnd.mtx);
- erts_smp_cnd_broadcast(&schdlr_sspnd.cnd);
}
res = ERTS_SCHDLR_SSPND_DONE;
}
@@ -3035,18 +3534,34 @@ erts_multi_scheduling_blockers(Process *p)
static void *
sched_thread_func(void *vesdp)
{
+#ifdef ERTS_SMP
+ Uint no = ((ErtsSchedulerData *) vesdp)->no;
+#endif
#ifdef ERTS_ENABLE_LOCK_CHECK
{
char buf[31];
- Uint no = ((ErtsSchedulerData *) vesdp)->no;
erts_snprintf(&buf[0], 31, "scheduler %bpu", no);
erts_lc_set_thread_name(&buf[0]);
}
#endif
- erts_alloc_reg_scheduler_id(((ErtsSchedulerData *) vesdp)->no);
+ erts_alloc_reg_scheduler_id(no);
erts_tsd_set(sched_data_key, vesdp);
#ifdef ERTS_SMP
+
+ if (no <= erts_max_main_threads) {
+ erts_thr_set_main_status(1, (int) no);
+ if (erts_reader_groups) {
+ int rg = (int) no;
+ if (rg > erts_reader_groups)
+ rg = (((int) no) - 1) % erts_reader_groups + 1;
+ erts_smp_rwmtx_set_reader_group(rg);
+ }
+ }
+
erts_proc_lock_prepare_proc_lock_waiter();
+ ERTS_SCHED_SLEEP_INFO_IX(no - 1)->event = erts_tse_fetch();
+
+
#endif
erts_register_blockable_thread();
#ifdef HIPE
@@ -3055,30 +3570,30 @@ sched_thread_func(void *vesdp)
erts_thread_init_float();
erts_smp_mtx_lock(&schdlr_sspnd.mtx);
- ASSERT(schdlr_sspnd.changing == ERTS_SCHED_CHANGING_ONLINE);
+ ASSERT(erts_smp_atomic_read(&schdlr_sspnd.changing)
+ & ERTS_SCHDLR_SSPND_CHNG_ONLN);
- schdlr_sspnd.curr_online--;
+ if (--schdlr_sspnd.curr_online == schdlr_sspnd.wait_curr_online) {
+ erts_smp_atomic_band(&schdlr_sspnd.changing,
+ ~ERTS_SCHDLR_SSPND_CHNG_ONLN);
+ if (((ErtsSchedulerData *) vesdp)->no != 1)
+ erts_smp_cnd_signal(&schdlr_sspnd.cnd);
+ }
- if (((ErtsSchedulerData *) vesdp)->no != 1) {
- if (schdlr_sspnd.online == schdlr_sspnd.curr_online) {
- schdlr_sspnd.changing = 0;
- erts_smp_cnd_broadcast(&schdlr_sspnd.cnd);
+ if (((ErtsSchedulerData *) vesdp)->no == 1) {
+ if (schdlr_sspnd.curr_online != schdlr_sspnd.wait_curr_online) {
+ erts_smp_activity_begin(ERTS_ACTIVITY_WAIT,
+ susp_sched_prep_block,
+ susp_sched_resume_block,
+ NULL);
+ while (schdlr_sspnd.curr_online != schdlr_sspnd.wait_curr_online)
+ erts_smp_cnd_wait(&schdlr_sspnd.cnd, &schdlr_sspnd.mtx);
+ erts_smp_activity_end(ERTS_ACTIVITY_WAIT,
+ susp_sched_prep_block,
+ susp_sched_resume_block,
+ NULL);
}
- }
- else if (schdlr_sspnd.curr_online == schdlr_sspnd.wait_curr_online)
- schdlr_sspnd.changing = 0;
- else {
- erts_smp_activity_begin(ERTS_ACTIVITY_WAIT,
- susp_sched_prep_block,
- susp_sched_resume_block,
- NULL);
- while (schdlr_sspnd.curr_online != schdlr_sspnd.wait_curr_online)
- erts_smp_cnd_wait(&schdlr_sspnd.cnd, &schdlr_sspnd.mtx);
- erts_smp_activity_end(ERTS_ACTIVITY_WAIT,
- susp_sched_prep_block,
- susp_sched_resume_block,
- NULL);
- ASSERT(!schdlr_sspnd.changing);
+ ERTS_SCHDLR_SSPND_CHNG_SET(0, ERTS_SCHDLR_SSPND_CHNG_WAITER);
}
erts_smp_mtx_unlock(&schdlr_sspnd.mtx);
@@ -3428,6 +3943,7 @@ processor_order_cmp(const void *vx, const void *vy)
static void
check_cpu_bind(ErtsSchedulerData *esdp)
{
+ int rg = 0;
int res;
int cpu_id;
erts_smp_runq_unlock(esdp->run_queue);
@@ -3459,6 +3975,12 @@ check_cpu_bind(ErtsSchedulerData *esdp)
erts_send_error_to_logger_nogl(dsbufp);
}
}
+ if (erts_reader_groups) {
+ if (esdp->cpu_id >= 0)
+ rg = reader_group_lookup(esdp->cpu_id);
+ else
+ rg = (((int) esdp->no) - 1) % erts_reader_groups + 1;
+ }
erts_smp_runq_lock(esdp->run_queue);
#ifdef ERTS_SMP
if (erts_common_run_queue)
@@ -3469,6 +3991,8 @@ check_cpu_bind(ErtsSchedulerData *esdp)
#endif
erts_smp_rwmtx_rwunlock(&erts_cpu_bind_rwmtx);
+ if (erts_reader_groups)
+ erts_smp_rwmtx_set_reader_group(rg);
}
static void
@@ -3497,11 +4021,13 @@ signal_schedulers_bind_change(erts_cpu_topology_t *cpudata, int size)
wake_all_schedulers();
}
else {
- ERTS_FOREACH_RUNQ(rq,
- {
+ for (s_ix = 0; s_ix < erts_no_run_queues; s_ix++) {
+ ErtsRunQueue *rq = ERTS_RUNQ_IX(s_ix);
+ erts_smp_runq_lock(rq);
rq->flags |= ERTS_RUNQ_FLG_CHK_CPU_BIND;
- wake_scheduler(rq, 0);
- });
+ erts_smp_runq_unlock(rq);
+ wake_scheduler(rq, 0, 1);
+ };
}
#else
check_cpu_bind(erts_get_scheduler_data());
@@ -3541,6 +4067,490 @@ erts_init_scheduler_bind_type(char *how)
return ERTS_INIT_SCHED_BIND_TYPE_SUCCESS;
}
+/*
+ * reader groups map
+ */
+
+typedef struct {
+ int level[ERTS_TOPOLOGY_MAX_DEPTH+1];
+} erts_avail_cput;
+
+typedef struct {
+ int *map;
+ int size;
+ int groups;
+} erts_reader_groups_map_test;
+
+typedef struct {
+ int id;
+ int sub_levels;
+ int reader_groups;
+} erts_rg_count_t;
+
+typedef struct {
+ int logical;
+ int reader_group;
+} erts_reader_groups_map_t;
+
+typedef struct {
+ erts_reader_groups_map_t *map;
+ int map_size;
+ int logical_processors;
+ int groups;
+} erts_make_reader_groups_map_test;
+
+static int reader_groups_available_cpu_check;
+static int reader_groups_logical_processors;
+static int reader_groups_map_size;
+static erts_reader_groups_map_t *reader_groups_map;
+
+#define ERTS_TOPOLOGY_RG ERTS_TOPOLOGY_MAX_DEPTH
+
+static void
+make_reader_groups_map(erts_make_reader_groups_map_test *test);
+
+static Eterm
+get_reader_groups_map(Process *c_p,
+ erts_reader_groups_map_t *map,
+ int map_size,
+ int logical_processors)
+{
+#ifdef DEBUG
+ Eterm *endp;
+#endif
+ Eterm res = NIL, tuple;
+ Eterm *hp;
+ int i;
+
+ hp = HAlloc(c_p, logical_processors*(2+3));
+#ifdef DEBUG
+ endp = hp + logical_processors*(2+3);
+#endif
+ for (i = map_size - 1; i >= 0; i--) {
+ if (map[i].logical >= 0) {
+ tuple = TUPLE2(hp,
+ make_small(map[i].logical),
+ make_small(map[i].reader_group));
+ hp += 3;
+ res = CONS(hp, tuple, res);
+ hp += 2;
+ }
+ }
+ ASSERT(hp == endp);
+ return res;
+}
+
+Eterm
+erts_debug_reader_groups_map(Process *c_p, int groups)
+{
+ Eterm res;
+ erts_make_reader_groups_map_test test;
+
+ test.groups = groups;
+ make_reader_groups_map(&test);
+ if (!test.map)
+ res = NIL;
+ else {
+ res = get_reader_groups_map(c_p,
+ test.map,
+ test.map_size,
+ test.logical_processors);
+ erts_free(ERTS_ALC_T_TMP, test.map);
+ }
+ return res;
+}
+
+
+Eterm
+erts_get_reader_groups_map(Process *c_p)
+{
+ Eterm res;
+ erts_smp_rwmtx_rlock(&erts_cpu_bind_rwmtx);
+ res = get_reader_groups_map(c_p,
+ reader_groups_map,
+ reader_groups_map_size,
+ reader_groups_logical_processors);
+ erts_smp_rwmtx_runlock(&erts_cpu_bind_rwmtx);
+ return res;
+}
+
+static void
+make_available_cpu_topology(erts_avail_cput *no,
+ erts_avail_cput *avail,
+ erts_cpu_topology_t *cpudata,
+ int *size,
+ int test)
+{
+ int len = *size;
+ erts_cpu_topology_t last;
+ int a, i, j;
+
+ no->level[ERTS_TOPOLOGY_NODE] = -1;
+ no->level[ERTS_TOPOLOGY_PROCESSOR] = -1;
+ no->level[ERTS_TOPOLOGY_PROCESSOR_NODE] = -1;
+ no->level[ERTS_TOPOLOGY_CORE] = -1;
+ no->level[ERTS_TOPOLOGY_THREAD] = -1;
+ no->level[ERTS_TOPOLOGY_LOGICAL] = -1;
+
+ last.node = INT_MIN;
+ last.processor = INT_MIN;
+ last.processor_node = INT_MIN;
+ last.core = INT_MIN;
+ last.thread = INT_MIN;
+ last.logical = INT_MIN;
+
+ a = 0;
+
+ for (i = 0; i < len; i++) {
+
+ if (!test && !erts_is_cpu_available(erts_cpuinfo, cpudata[i].logical))
+ continue;
+
+ if (last.node != cpudata[i].node)
+ goto node;
+ if (last.processor != cpudata[i].processor)
+ goto processor;
+ if (last.processor_node != cpudata[i].processor_node)
+ goto processor_node;
+ if (last.core != cpudata[i].core)
+ goto core;
+ ASSERT(last.thread != cpudata[i].thread);
+ goto thread;
+
+ node:
+ no->level[ERTS_TOPOLOGY_NODE]++;
+ processor:
+ no->level[ERTS_TOPOLOGY_PROCESSOR]++;
+ processor_node:
+ no->level[ERTS_TOPOLOGY_PROCESSOR_NODE]++;
+ core:
+ no->level[ERTS_TOPOLOGY_CORE]++;
+ thread:
+ no->level[ERTS_TOPOLOGY_THREAD]++;
+
+ no->level[ERTS_TOPOLOGY_LOGICAL]++;
+
+ for (j = 0; j < ERTS_TOPOLOGY_LOGICAL; j++)
+ avail[a].level[j] = no->level[j];
+
+ avail[a].level[ERTS_TOPOLOGY_LOGICAL] = cpudata[i].logical;
+ avail[a].level[ERTS_TOPOLOGY_RG] = 0;
+
+ ASSERT(last.logical != cpudata[a].logical);
+
+ last = cpudata[i];
+ a++;
+ }
+
+ no->level[ERTS_TOPOLOGY_NODE]++;
+ no->level[ERTS_TOPOLOGY_PROCESSOR]++;
+ no->level[ERTS_TOPOLOGY_PROCESSOR_NODE]++;
+ no->level[ERTS_TOPOLOGY_CORE]++;
+ no->level[ERTS_TOPOLOGY_THREAD]++;
+ no->level[ERTS_TOPOLOGY_LOGICAL]++;
+
+ *size = a;
+}
+
+static int
+reader_group_lookup(int logical)
+{
+ int start = logical % reader_groups_map_size;
+ int ix = start;
+
+ do {
+ if (reader_groups_map[ix].logical == logical) {
+ ASSERT(reader_groups_map[ix].reader_group > 0);
+ return reader_groups_map[ix].reader_group;
+ }
+ ix++;
+ if (ix == reader_groups_map_size)
+ ix = 0;
+ } while (ix != start);
+
+ erl_exit(ERTS_ABORT_EXIT, "Logical cpu id %d not found\n", logical);
+}
+
+static void
+reader_group_insert(erts_reader_groups_map_t *map, int map_size,
+ int logical, int reader_group)
+{
+ int start = logical % map_size;
+ int ix = start;
+
+ do {
+ if (map[ix].logical < 0) {
+ map[ix].logical = logical;
+ map[ix].reader_group = reader_group;
+ return;
+ }
+ ix++;
+ if (ix == map_size)
+ ix = 0;
+ } while (ix != start);
+
+ erl_exit(ERTS_ABORT_EXIT, "Reader groups map full\n");
+}
+
+
+static int
+sub_levels(erts_rg_count_t *rgc, int level, int aix, int avail_sz, erts_avail_cput *avail)
+{
+ int sub_level = level+1;
+ int last = -1;
+ rgc->sub_levels = 0;
+
+ do {
+ if (last != avail[aix].level[sub_level]) {
+ rgc->sub_levels++;
+ last = avail[aix].level[sub_level];
+ }
+ aix++;
+ }
+ while (aix < avail_sz && rgc->id == avail[aix].level[level]);
+ rgc->reader_groups = 0;
+ return aix;
+}
+
+static int
+write_reader_groups(int *rgp, erts_rg_count_t *rgcp,
+ int level, int a,
+ int avail_sz, erts_avail_cput *avail)
+{
+ int rg = *rgp;
+ int sub_level = level+1;
+ int sl_per_gr = rgcp->sub_levels / rgcp->reader_groups;
+ int xsl = rgcp->sub_levels % rgcp->reader_groups;
+ int sls = 0;
+ int last = -1;
+ int xsl_rg_lim = (rgcp->reader_groups - xsl) + rg + 1;
+
+ ASSERT(level < 0 || avail[a].level[level] == rgcp->id)
+
+ do {
+ if (last != avail[a].level[sub_level]) {
+ if (!sls) {
+ sls = sl_per_gr;
+ rg++;
+ if (rg >= xsl_rg_lim)
+ sls++;
+ }
+ last = avail[a].level[sub_level];
+ sls--;
+ }
+ avail[a].level[ERTS_TOPOLOGY_RG] = rg;
+ a++;
+ } while (a < avail_sz && (level < 0
+ || avail[a].level[level] == rgcp->id));
+
+ ASSERT(rgcp->reader_groups == rg - *rgp);
+
+ *rgp = rg;
+
+ return a;
+}
+
+static int
+rg_count_sub_levels_compare(const void *vx, const void *vy)
+{
+ erts_rg_count_t *x = (erts_rg_count_t *) vx;
+ erts_rg_count_t *y = (erts_rg_count_t *) vy;
+ if (x->sub_levels != y->sub_levels)
+ return y->sub_levels - x->sub_levels;
+ return x->id - y->id;
+}
+
+static int
+rg_count_id_compare(const void *vx, const void *vy)
+{
+ erts_rg_count_t *x = (erts_rg_count_t *) vx;
+ erts_rg_count_t *y = (erts_rg_count_t *) vy;
+ return x->id - y->id;
+}
+
+static void
+make_reader_groups_map(erts_make_reader_groups_map_test *test)
+{
+ int i, spread_level, avail_sz;
+ erts_avail_cput no, *avail;
+ erts_cpu_topology_t *cpudata;
+ erts_reader_groups_map_t *map;
+ int map_sz;
+ int groups = erts_reader_groups;
+
+ if (test) {
+ test->map = NULL;
+ test->map_size = 0;
+ groups = test->groups;
+ }
+
+ if (!groups)
+ return;
+
+ if (!test) {
+ if (reader_groups_map)
+ erts_free(ERTS_ALC_T_RDR_GRPS_MAP, reader_groups_map);
+
+ reader_groups_logical_processors = 0;
+ reader_groups_map_size = 0;
+ reader_groups_map = NULL;
+ }
+
+ create_tmp_cpu_topology_copy(&cpudata, &avail_sz);
+
+ if (!cpudata)
+ return;
+
+ cpu_bind_order_sort(cpudata,
+ avail_sz,
+ ERTS_CPU_BIND_NO_SPREAD,
+ 1);
+
+ avail = erts_alloc(ERTS_ALC_T_TMP,
+ sizeof(erts_avail_cput)*avail_sz);
+
+ make_available_cpu_topology(&no, avail, cpudata,
+ &avail_sz, test != NULL);
+
+ destroy_tmp_cpu_topology_copy(cpudata);
+
+ map_sz = avail_sz*2+1;
+
+ if (test) {
+ map = erts_alloc(ERTS_ALC_T_TMP,
+ (sizeof(erts_reader_groups_map_t)
+ * map_sz));
+ test->map = map;
+ test->map_size = map_sz;
+ test->logical_processors = avail_sz;
+ }
+ else {
+ map = erts_alloc(ERTS_ALC_T_RDR_GRPS_MAP,
+ (sizeof(erts_reader_groups_map_t)
+ * map_sz));
+ reader_groups_map = map;
+ reader_groups_logical_processors = avail_sz;
+ reader_groups_map_size = map_sz;
+
+ }
+
+ for (i = 0; i < map_sz; i++) {
+ map[i].logical = -1;
+ map[i].reader_group = 0;
+ }
+
+ spread_level = ERTS_TOPOLOGY_CORE;
+ for (i = ERTS_TOPOLOGY_NODE; i < ERTS_TOPOLOGY_THREAD; i++) {
+ if (no.level[i] > groups) {
+ spread_level = i;
+ break;
+ }
+ }
+
+ if (no.level[spread_level] <= groups) {
+ int a, rg, last = -1;
+ rg = 0;
+ ASSERT(spread_level == ERTS_TOPOLOGY_CORE);
+ for (a = 0; a < avail_sz; a++) {
+ if (last != avail[a].level[spread_level]) {
+ rg++;
+ last = avail[a].level[spread_level];
+ }
+ reader_group_insert(map,
+ map_sz,
+ avail[a].level[ERTS_TOPOLOGY_LOGICAL],
+ rg);
+ }
+ }
+ else { /* groups < no.level[spread_level] */
+ erts_rg_count_t *rg_count;
+ int a, rg, tl, toplevels;
+
+ tl = spread_level-1;
+
+ if (spread_level == ERTS_TOPOLOGY_NODE)
+ toplevels = 1;
+ else
+ toplevels = no.level[tl];
+
+ rg_count = erts_alloc(ERTS_ALC_T_TMP,
+ toplevels*sizeof(erts_rg_count_t));
+
+ if (toplevels == 1) {
+ rg_count[0].id = 0;
+ rg_count[0].sub_levels = no.level[spread_level];
+ rg_count[0].reader_groups = groups;
+ }
+ else {
+ int rgs_per_tl, rgs;
+ rgs = groups;
+ rgs_per_tl = rgs / toplevels;
+
+ a = 0;
+ for (i = 0; i < toplevels; i++) {
+ rg_count[i].id = avail[a].level[tl];
+ a = sub_levels(&rg_count[i], tl, a, avail_sz, avail);
+ }
+
+ qsort(rg_count,
+ toplevels,
+ sizeof(erts_rg_count_t),
+ rg_count_sub_levels_compare);
+
+ for (i = 0; i < toplevels; i++) {
+ if (rg_count[i].sub_levels < rgs_per_tl) {
+ rg_count[i].reader_groups = rg_count[i].sub_levels;
+ rgs -= rg_count[i].sub_levels;
+ }
+ else {
+ rg_count[i].reader_groups = rgs_per_tl;
+ rgs -= rgs_per_tl;
+ }
+ }
+
+ while (rgs > 0) {
+ for (i = 0; i < toplevels; i++) {
+ if (rg_count[i].sub_levels == rg_count[i].reader_groups)
+ break;
+ else {
+ rg_count[i].reader_groups++;
+ if (--rgs == 0)
+ break;
+ }
+ }
+ }
+
+ qsort(rg_count,
+ toplevels,
+ sizeof(erts_rg_count_t),
+ rg_count_id_compare);
+ }
+
+ a = i = rg = 0;
+ while (a < avail_sz) {
+ a = write_reader_groups(&rg, &rg_count[i], tl,
+ a, avail_sz, avail);
+ i++;
+ }
+
+ ASSERT(groups == rg);
+
+ for (a = 0; a < avail_sz; a++)
+ reader_group_insert(map,
+ map_sz,
+ avail[a].level[ERTS_TOPOLOGY_LOGICAL],
+ avail[a].level[ERTS_TOPOLOGY_RG]);
+
+ erts_free(ERTS_ALC_T_TMP, rg_count);
+ }
+
+ erts_free(ERTS_ALC_T_TMP, avail);
+}
+
+/*
+ * CPU topology
+ */
+
typedef struct {
int *id;
int used;
@@ -4054,6 +5064,8 @@ erts_set_cpu_topology(Process *c_p, Eterm term)
sizeof(erts_cpu_topology_t)*cpudata_size);
}
+ make_reader_groups_map(NULL);
+
signal_schedulers_bind_change(cpudata, cpudata_size);
done:
@@ -4457,6 +5469,11 @@ early_cpu_bind_init(void)
cpu_bind_order = ERTS_CPU_BIND_UNDEFINED;
+ reader_groups_available_cpu_check = 1;
+ reader_groups_logical_processors = 0;
+ reader_groups_map_size = 0;
+ reader_groups_map = NULL;
+
if (!erts_get_cpu_topology(erts_cpuinfo, system_cpudata)
|| ERTS_INIT_CPU_TOPOLOGY_OK != verify_topology(system_cpudata,
system_cpudata_size)) {
@@ -4492,6 +5509,8 @@ late_cpu_bind_init(void)
: ERTS_CPU_BIND_NONE);
}
+ make_reader_groups_map(NULL);
+
if (cpu_bind_order != ERTS_CPU_BIND_NONE) {
erts_cpu_topology_t *cpudata;
int cpudata_size;
@@ -5357,7 +6376,7 @@ dequeue_process(ErtsRunQueue *runq, Process *p)
}
/* schedule a process */
-static ERTS_INLINE void
+static ERTS_INLINE ErtsRunQueue *
internal_add_to_runq(ErtsRunQueue *runq, Process *p)
{
Uint32 prev_status = p->status;
@@ -5368,12 +6387,12 @@ internal_add_to_runq(ErtsRunQueue *runq, Process *p)
ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(runq));
if (p->status_flags & ERTS_PROC_SFLG_INRUNQ)
- return;
+ return NULL;
else if (p->runq_flags & ERTS_PROC_RUNQ_FLG_RUNNING) {
ASSERT(p->status != P_SUSPENDED);
ERTS_DBG_CHK_PROCS_RUNQ_NOPROC(runq, p);
p->status_flags |= ERTS_PROC_SFLG_PENDADD2SCHEDQ;
- return;
+ return NULL;
}
ASSERT(!p->scheduler_data);
#endif
@@ -5412,20 +6431,23 @@ internal_add_to_runq(ErtsRunQueue *runq, Process *p)
profile_runnable_proc(p, am_active);
}
- smp_notify_inc_runq(add_runq);
-
if (add_runq != runq)
erts_smp_runq_unlock(add_runq);
+
+ return add_runq;
}
void
erts_add_to_runq(Process *p)
{
+ ErtsRunQueue *notify_runq;
ErtsRunQueue *runq = erts_get_runq_proc(p);
erts_smp_runq_lock(runq);
- internal_add_to_runq(runq, p);
+ notify_runq = internal_add_to_runq(runq, p);
erts_smp_runq_unlock(runq);
+ smp_notify_inc_runq(notify_runq);
+
}
/* Possibly remove a scheduled process we need to suspend */
@@ -5564,8 +6586,6 @@ erts_proc_migrate(Process *p, ErtsProcLocks *plcks,
p->run_queue = to_rq;
enqueue_process(to_rq, p);
- smp_notify_inc_runq(to_rq);
-
return ERTS_MIGRATE_SUCCESS;
}
#endif /* ERTS_SMP */
@@ -5762,30 +6782,6 @@ erts_set_process_priority(Process *p, Eterm new_value)
return old_value;
}
-#ifdef ERTS_SMP
-
-static ERTS_INLINE int
-prepare_for_sys_schedule(void)
-{
- while (!erts_port_task_have_outstanding_io_tasks()
- && !erts_smp_atomic_xchg(&doing_sys_schedule, 1)) {
- if (!erts_port_task_have_outstanding_io_tasks())
- return 1;
- erts_smp_atomic_set(&doing_sys_schedule, 0);
- }
- return 0;
-}
-
-#else
-
-static ERTS_INLINE int
-prepare_for_sys_schedule(void)
-{
- return !erts_port_task_have_outstanding_io_tasks();
-}
-
-#endif
-
/* note that P_RUNNING is only set so that we don't try to remove
** running processes from the schedule queue if they exit - a running
** process not being in the schedule queue!!
@@ -5920,8 +6916,11 @@ Process *schedule(Process *p, int calls)
p->status_flags &= ~ERTS_PROC_SFLG_RUNNING;
if (p->status_flags & ERTS_PROC_SFLG_PENDADD2SCHEDQ) {
+ ErtsRunQueue *notify_runq;
p->status_flags &= ~ERTS_PROC_SFLG_PENDADD2SCHEDQ;
- internal_add_to_runq(rq, p);
+ notify_runq = internal_add_to_runq(rq, p);
+ if (notify_runq != rq)
+ smp_notify_inc_runq(notify_runq);
}
#endif
@@ -5989,7 +6988,10 @@ Process *schedule(Process *p, int calls)
| ERTS_RUNQ_FLG_CHK_CPU_BIND
| ERTS_RUNQ_FLG_SUSPENDED)) {
if ((rq->flags & ERTS_RUNQ_FLG_SUSPENDED)
- || erts_smp_atomic_read(&esdp->suspended)) {
+ || (erts_smp_atomic_read(&esdp->ssi->flags)
+ & ERTS_SSI_FLG_SUSPENDED)) {
+ ASSERT(erts_smp_atomic_read(&esdp->ssi->flags)
+ & ERTS_SSI_FLG_SUSPENDED);
suspend_scheduler(esdp);
}
if ((rq->flags & ERTS_RUNQ_FLG_CHK_CPU_BIND)
@@ -5998,12 +7000,21 @@ Process *schedule(Process *p, int calls)
}
}
-#ifdef ERTS_SMP_SCHEDULERS_NEED_TO_CHECK_CHILDREN
- if (esdp->check_children) {
- esdp->check_children = 0;
- erts_smp_runq_unlock(rq);
- erts_check_children();
- erts_smp_runq_lock(rq);
+#if defined(ERTS_SCHED_NEED_BLOCKABLE_AUX_WORK) \
+ || defined(ERTS_SCHED_NEED_NONBLOCKABLE_AUX_WORK)
+ {
+ ErtsSchedulerSleepInfo *ssi = esdp->ssi;
+ long aux_work = erts_smp_atomic_read(&ssi->aux_work);
+ if (aux_work) {
+ erts_smp_runq_unlock(rq);
+#ifdef ERTS_SCHED_NEED_BLOCKABLE_AUX_WORK
+ aux_work = blockable_aux_work(esdp, ssi, aux_work);
+#endif
+#ifdef ERTS_SCHED_NEED_NONBLOCKABLE_AUX_WORK
+ nonblockable_aux_work(esdp, ssi, aux_work);
+#endif
+ erts_smp_runq_lock(rq);
+ }
}
#endif
@@ -6035,7 +7046,10 @@ Process *schedule(Process *p, int calls)
if (rq->flags & (ERTS_RUNQ_FLG_SHARED_RUNQ
| ERTS_RUNQ_FLG_SUSPENDED)) {
if ((rq->flags & ERTS_RUNQ_FLG_SUSPENDED)
- || erts_smp_atomic_read(&esdp->suspended)) {
+ || (erts_smp_atomic_read(&esdp->ssi->flags)
+ & ERTS_SSI_FLG_SUSPENDED)) {
+ ASSERT(erts_smp_atomic_read(&esdp->ssi->flags)
+ & ERTS_SSI_FLG_SUSPENDED);
non_empty_runq(rq);
goto continue_check_activities_to_run;
}
@@ -6052,17 +7066,7 @@ Process *schedule(Process *p, int calls)
}
}
- if (prepare_for_sys_schedule()) {
- erts_smp_atomic_set(&function_calls, 0);
- fcalls = 0;
- sched_sys_wait(esdp->no, rq);
- erts_smp_atomic_set(&doing_sys_schedule, 0);
- }
- else {
- /* If all schedulers are waiting, one of them *should*
- be waiting in erl_sys_schedule() */
- sched_cnd_wait(esdp->no, rq);
- }
+ scheduler_wait(&fcalls, esdp, rq);
non_empty_runq(rq);
@@ -6124,7 +7128,7 @@ Process *schedule(Process *p, int calls)
else {
if (erts_common_run_queue) {
if (erts_common_run_queue->waiting)
- wake_one_scheduler();
+ wake_scheduler(erts_common_run_queue, 0, 1);
}
else if (erts_smp_atomic_read(&no_empty_run_queues) != 0) {
wake_scheduler_on_empty_runq(rq);
@@ -6439,8 +7443,8 @@ erts_schedule_misc_op(void (*func)(void *), void *arg)
else
rq->misc.start = molp;
rq->misc.end = molp;
- smp_notify_inc_runq(rq);
erts_smp_runq_unlock(rq);
+ smp_notify_inc_runq(rq);
}
static void
@@ -6682,7 +7686,7 @@ erl_create_process(Process* parent, /* Parent of process (default group leader).
Eterm args, /* Arguments for function (must be well-formed list). */
ErlSpawnOpts* so) /* Options for spawn. */
{
- ErtsRunQueue *rq;
+ ErtsRunQueue *rq, *notify_runq;
Process *p;
Sint arity; /* Number of arguments. */
#ifndef HYBRID
@@ -6995,10 +7999,12 @@ erl_create_process(Process* parent, /* Parent of process (default group leader).
#endif
p->status = P_WAITING;
- internal_add_to_runq(rq, p);
+ notify_runq = internal_add_to_runq(rq, p);
erts_smp_runq_unlock(rq);
+ smp_notify_inc_runq(notify_runq);
+
res = p->id;
erts_smp_proc_unlock(p, ERTS_PROC_LOCKS_ALL);
diff --git a/erts/emulator/beam/erl_process.h b/erts/emulator/beam/erl_process.h
index 6ce74a917f..c3fef6d38e 100644
--- a/erts/emulator/beam/erl_process.h
+++ b/erts/emulator/beam/erl_process.h
@@ -89,6 +89,7 @@ extern int erts_sched_thread_suggested_stack_size;
#define ERTS_SCHED_THREAD_MAX_STACK_SIZE 8192 /* Kilo words */
#ifdef ERTS_SMP
+extern Uint erts_max_main_threads;
#include "erl_bits.h"
#endif
@@ -219,6 +220,51 @@ typedef enum {
ERTS_MIGRATE_FAILED_RUNQ_SUSPENDED
} ErtsMigrateResult;
+#define ERTS_SSI_FLG_SLEEPING (((long) 1) << 0)
+#define ERTS_SSI_FLG_POLL_SLEEPING (((long) 1) << 1)
+#define ERTS_SSI_FLG_TSE_SLEEPING (((long) 1) << 2)
+#define ERTS_SSI_FLG_WAITING (((long) 1) << 3)
+#define ERTS_SSI_FLG_SUSPENDED (((long) 1) << 4)
+
+#define ERTS_SSI_FLGS_SLEEP_TYPE \
+ (ERTS_SSI_FLG_TSE_SLEEPING|ERTS_SSI_FLG_POLL_SLEEPING)
+
+#define ERTS_SSI_FLGS_SLEEP \
+ (ERTS_SSI_FLG_SLEEPING|ERTS_SSI_FLGS_SLEEP_TYPE)
+
+#define ERTS_SSI_FLGS_ALL \
+ (ERTS_SSI_FLGS_SLEEP \
+ | ERTS_SSI_FLG_WAITING \
+ | ERTS_SSI_FLG_SUSPENDED)
+
+
+#if !defined(ERTS_SCHED_NEED_BLOCKABLE_AUX_WORK) \
+ && defined(ERTS_SMP_SCHEDULERS_NEED_TO_CHECK_CHILDREN)
+#define ERTS_SCHED_NEED_BLOCKABLE_AUX_WORK
+#endif
+
+#define ERTS_SSI_AUX_WORK_CHECK_CHILDREN (((long) 1) << 0)
+
+#define ERTS_SSI_BLOCKABLE_AUX_WORK_MASK \
+ (ERTS_SSI_AUX_WORK_CHECK_CHILDREN)
+#define ERTS_SSI_NONBLOCKABLE_AUX_WORK_MASK \
+ (0)
+
+typedef struct ErtsSchedulerSleepInfo_ ErtsSchedulerSleepInfo;
+
+typedef struct {
+ erts_smp_spinlock_t lock;
+ ErtsSchedulerSleepInfo *list;
+} ErtsSchedulerSleepList;
+
+struct ErtsSchedulerSleepInfo_ {
+ ErtsSchedulerSleepInfo *next;
+ ErtsSchedulerSleepInfo *prev;
+ erts_smp_atomic_t flags;
+ erts_tse_t *event;
+ erts_smp_atomic_t aux_work;
+};
+
/* times to reschedule low prio process before running */
#define RESCHEDULE_LOW 8
@@ -271,8 +317,9 @@ struct ErtsRunQueue_ {
erts_smp_mtx_t mtx;
erts_smp_cnd_t cnd;
- erts_smp_atomic_t spin_waiter;
- erts_smp_atomic_t spin_wake;
+#ifdef ERTS_SMP
+ ErtsSchedulerSleepList sleepers;
+#endif
ErtsSchedulerData *scheduler;
int waiting; /* < 0 in sys schedule; > 0 on cnd variable */
@@ -353,6 +400,7 @@ struct ErtsSchedulerData_ {
ethr_tid tid; /* Thread id */
struct erl_bits_state erl_bits_state; /* erl_bits.c state */
void *match_pseudo_process; /* erl_db_util.c:db_prog_match() */
+ ErtsSchedulerSleepInfo *ssi;
Process *free_process;
#endif
#if !HEAP_ON_C_STACK
@@ -374,11 +422,6 @@ struct ErtsSchedulerData_ {
#ifdef ERTS_SMP
/* NOTE: These fields are modified under held mutexes by other threads */
-#ifdef ERTS_SMP_SCHEDULERS_NEED_TO_CHECK_CHILDREN
- int check_children; /* run queue mutex */
- int blocked_check_children; /* schdlr_sspnd mutex */
-#endif
- erts_smp_atomic_t suspended; /* Only used when common run queue */
erts_smp_atomic_t chk_cpu_bind; /* Only used when common run queue */
#endif
};
@@ -1085,6 +1128,9 @@ void erts_handle_pending_exit(Process *, ErtsProcLocks);
void erts_deep_process_dump(int, void *);
+Eterm erts_get_reader_groups_map(Process *c_p);
+Eterm erts_debug_reader_groups_map(Process *c_p, int groups);
+
Sint erts_test_next_pid(int, Uint);
Eterm erts_debug_processes(Process *c_p);
Eterm erts_debug_processes_bif_info(Process *c_p);
@@ -1509,29 +1555,30 @@ extern int erts_disable_proc_not_running_opt;
#define ERTS_MIN_PROCESSES 16
#endif
-#ifdef ERTS_INCLUDE_SCHEDULER_INTERNALS
-ERTS_GLB_INLINE void erts_smp_notify_inc_runq(ErtsRunQueue *runq);
-void erts_smp_notify_inc_runq__(ErtsRunQueue *runq);
-#endif /* ERTS_INCLUDE_SCHEDULER_INTERNALS */
+void erts_smp_notify_inc_runq(ErtsRunQueue *runq);
-#if ERTS_GLB_INLINE_INCL_FUNC_DEF
+#ifdef ERTS_SMP
+void erts_sched_finish_poke(ErtsSchedulerSleepInfo *, long);
+ERTS_GLB_INLINE void erts_sched_poke(ErtsSchedulerSleepInfo *ssi);
-#ifdef ERTS_INCLUDE_SCHEDULER_INTERNALS
+#if ERTS_GLB_INLINE_INCL_FUNC_DEF
ERTS_GLB_INLINE void
-erts_smp_notify_inc_runq(ErtsRunQueue *runq)
+erts_sched_poke(ErtsSchedulerSleepInfo *ssi)
{
-#ifdef ERTS_SMP
- ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(runq));
- if (runq->waiting)
- erts_smp_notify_inc_runq__(runq);
-#endif
+ long flags = erts_smp_atomic_read(&ssi->flags);
+ ASSERT(!(flags & ERTS_SSI_FLG_SLEEPING)
+ || (flags & ERTS_SSI_FLG_WAITING));
+ if (flags & ERTS_SSI_FLG_SLEEPING) {
+ flags = erts_smp_atomic_band(&ssi->flags, ~ERTS_SSI_FLGS_SLEEP);
+ erts_sched_finish_poke(ssi, flags);
+ }
}
-#endif /* ERTS_INCLUDE_SCHEDULER_INTERNALS */
-
#endif /* #if ERTS_GLB_INLINE_INCL_FUNC_DEF */
+#endif /* #ifdef ERTS_SMP */
+
#include "erl_process_lock.h"
#undef ERTS_INCLUDE_SCHEDULER_INTERNALS
diff --git a/erts/emulator/beam/erl_process_lock.c b/erts/emulator/beam/erl_process_lock.c
index 52440fb635..a4d12139e9 100644
--- a/erts/emulator/beam/erl_process_lock.c
+++ b/erts/emulator/beam/erl_process_lock.c
@@ -1,19 +1,19 @@
/*
* %CopyrightBegin%
- *
- * Copyright Ericsson AB 2007-2009. All Rights Reserved.
- *
+ *
+ * Copyright Ericsson AB 2007-2010. All Rights Reserved.
+ *
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
* compliance with the License. You should have received a copy of the
* Erlang Public License along with this software. If not, it can be
* retrieved online at http://www.erlang.org/.
- *
+ *
* Software distributed under the License is distributed on an "AS IS"
* basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
* the License for the specific language governing rights and limitations
* under the License.
- *
+ *
* %CopyrightEnd%
*/
@@ -71,9 +71,12 @@ const Process erts_proc_lock_busy;
#ifdef ERTS_SMP
-/*#define ERTS_PROC_LOCK_SPIN_ON_GATE*/
-#define ERTS_PROC_LOCK_SPIN_COUNT_MAX 16000
+#define ERTS_PROC_LOCK_SPIN_COUNT_MAX 2000
+#define ERTS_PROC_LOCK_SPIN_COUNT_SCHED_INC 32
#define ERTS_PROC_LOCK_SPIN_COUNT_BASE 1000
+#define ERTS_PROC_LOCK_AUX_SPIN_COUNT 50
+
+#define ERTS_PROC_LOCK_SPIN_UNTIL_YIELD 25
#ifdef ERTS_PROC_LOCK_DEBUG
#define ERTS_PROC_LOCK_HARD_DEBUG
@@ -83,32 +86,19 @@ const Process erts_proc_lock_busy;
static void check_queue(erts_proc_lock_t *lck);
#endif
-
-typedef struct erts_proc_lock_waiter_t_ erts_proc_lock_waiter_t;
-struct erts_proc_lock_waiter_t_ {
- erts_proc_lock_waiter_t *next;
- erts_proc_lock_waiter_t *prev;
- ErtsProcLocks wait_locks;
- erts_smp_gate_t gate;
- erts_proc_lock_queues_t *queues;
-};
+#if SIZEOF_INT < 4
+#error "The size of the 'uflgs' field of the erts_tse_t type is too small"
+#endif
struct erts_proc_lock_queues_t_ {
erts_proc_lock_queues_t *next;
- erts_proc_lock_waiter_t *queue[ERTS_PROC_LOCK_MAX_BIT+1];
-};
-
-struct erts_proc_lock_thr_spec_data_t_ {
- erts_proc_lock_queues_t *qs;
- erts_proc_lock_waiter_t *wtr;
+ erts_tse_t *queue[ERTS_PROC_LOCK_MAX_BIT+1];
};
static erts_proc_lock_queues_t zeroqs = {0};
-static erts_smp_spinlock_t wtr_lock;
-static erts_proc_lock_waiter_t *waiter_free_list;
+static erts_smp_spinlock_t qs_lock;
static erts_proc_lock_queues_t *queue_free_list;
-static erts_tsd_key_t waiter_key;
#ifdef ERTS_ENABLE_LOCK_CHECK
static struct {
@@ -122,35 +112,26 @@ static struct {
erts_pix_lock_t erts_pix_locks[ERTS_NO_OF_PIX_LOCKS];
static int proc_lock_spin_count;
-static int proc_lock_trans_spin_cost;
+static int aux_thr_proc_lock_spin_count;
-static void cleanup_waiter(void);
+static void cleanup_tse(void);
void
erts_init_proc_lock(void)
{
int i;
int cpus;
- erts_smp_spinlock_init(&wtr_lock, "proc_lck_wtr_alloc");
+ erts_smp_spinlock_init(&qs_lock, "proc_lck_qs_alloc");
for (i = 0; i < ERTS_NO_OF_PIX_LOCKS; i++) {
-#if ERTS_PROC_LOCK_MUTEX_IMPL
-#ifdef ERTS_ENABLE_LOCK_COUNT
- erts_smp_mtx_init_x(&erts_pix_locks[i].u.mtx, "pix_lock", make_small(i));
-#else
- erts_smp_mtx_init(&erts_pix_locks[i].u.mtx, "pix_lock");
-#endif
-#else
#ifdef ERTS_ENABLE_LOCK_COUNT
- erts_smp_spinlock_init_x(&erts_pix_locks[i].u.spnlck, "pix_lock", make_small(i));
+ erts_smp_spinlock_init_x(&erts_pix_locks[i].u.spnlck,
+ "pix_lock", make_small(i));
#else
erts_smp_spinlock_init(&erts_pix_locks[i].u.spnlck, "pix_lock");
#endif
-#endif
}
- waiter_free_list = NULL;
queue_free_list = NULL;
- erts_tsd_key_create(&waiter_key);
- erts_thr_install_exit_handler(cleanup_waiter);
+ erts_thr_install_exit_handler(cleanup_tse);
#ifdef ERTS_ENABLE_LOCK_CHECK
lc_id.proc_lock_main = erts_lc_get_lock_order_id("proc_main");
lc_id.proc_lock_link = erts_lc_get_lock_order_id("proc_link");
@@ -158,86 +139,106 @@ erts_init_proc_lock(void)
lc_id.proc_lock_status = erts_lc_get_lock_order_id("proc_status");
#endif
cpus = erts_get_cpu_configured(erts_cpuinfo);
- if (cpus > 1)
- proc_lock_spin_count = (ERTS_PROC_LOCK_SPIN_COUNT_BASE
- * ((int) erts_no_schedulers));
- else if (cpus == 1)
- proc_lock_spin_count = 0;
- else /* No of cpus unknown. Assume multi proc, but be conservative. */
+ if (cpus > 1) {
proc_lock_spin_count = ERTS_PROC_LOCK_SPIN_COUNT_BASE;
- if (proc_lock_spin_count > ERTS_PROC_LOCK_SPIN_COUNT_MAX)
- proc_lock_spin_count = ERTS_PROC_LOCK_SPIN_COUNT_MAX;
- proc_lock_trans_spin_cost = proc_lock_spin_count/20;
-}
-
-static ERTS_INLINE erts_proc_lock_waiter_t *
-alloc_wtr(void)
-{
- erts_proc_lock_waiter_t *wtr;
- erts_smp_spin_lock(&wtr_lock);
- wtr = waiter_free_list;
- if (wtr) {
- waiter_free_list = wtr->next;
- ERTS_LC_ASSERT(queue_free_list);
- wtr->queues = queue_free_list;
- queue_free_list = wtr->queues->next;
- erts_smp_spin_unlock(&wtr_lock);
+ proc_lock_spin_count += (ERTS_PROC_LOCK_SPIN_COUNT_SCHED_INC
+ * ((int) erts_no_schedulers));
+ aux_thr_proc_lock_spin_count = ERTS_PROC_LOCK_AUX_SPIN_COUNT;
}
- else {
- erts_smp_spin_unlock(&wtr_lock);
- wtr = erts_alloc(ERTS_ALC_T_PROC_LCK_WTR,
- sizeof(erts_proc_lock_waiter_t));
- erts_smp_gate_init(&wtr->gate);
- wtr->wait_locks = (ErtsProcLocks) 0;
- wtr->queues = erts_alloc(ERTS_ALC_T_PROC_LCK_QS,
- sizeof(erts_proc_lock_queues_t));
- sys_memcpy((void *) wtr->queues,
- (void *) &zeroqs,
- sizeof(erts_proc_lock_queues_t));
+ else if (cpus == 1) {
+ proc_lock_spin_count = 0;
+ aux_thr_proc_lock_spin_count = 0;
}
- return wtr;
+ else { /* No of cpus unknown. Assume multi proc, but be conservative. */
+ proc_lock_spin_count = ERTS_PROC_LOCK_SPIN_COUNT_BASE/2;
+ aux_thr_proc_lock_spin_count = ERTS_PROC_LOCK_AUX_SPIN_COUNT/2;
+ }
+ if (proc_lock_spin_count > ERTS_PROC_LOCK_SPIN_COUNT_MAX)
+ proc_lock_spin_count = ERTS_PROC_LOCK_SPIN_COUNT_MAX;
}
#ifdef ERTS_ENABLE_LOCK_CHECK
static void
-check_unused_waiter(erts_proc_lock_waiter_t *wtr)
+check_unused_tse(erts_tse_t *wtr)
{
int i;
- ERTS_LC_ASSERT(wtr->wait_locks == 0);
+ erts_proc_lock_queues_t *queues = wtr->udata;
+ ERTS_LC_ASSERT(wtr->uflgs == 0);
for (i = 0; i <= ERTS_PROC_LOCK_MAX_BIT; i++)
- ERTS_LC_ASSERT(!wtr->queues->queue[i]);
+ ERTS_LC_ASSERT(!queues->queue[i]);
}
-#define CHECK_UNUSED_WAITER(W) check_unused_waiter((W))
+#define CHECK_UNUSED_TSE(W) check_unused_tse((W))
#else
-#define CHECK_UNUSED_WAITER(W)
+#define CHECK_UNUSED_TSE(W)
#endif
+static ERTS_INLINE erts_tse_t *
+tse_fetch(erts_pix_lock_t *pix_lock)
+{
+ erts_tse_t *tse = erts_tse_fetch();
+ if (!tse->udata) {
+ erts_proc_lock_queues_t *qs;
+#if ERTS_PROC_LOCK_SPINLOCK_IMPL && !ERTS_PROC_LOCK_ATOMIC_IMPL
+ if (pix_lock)
+ erts_pix_unlock(pix_lock);
+#endif
+ erts_smp_spin_lock(&qs_lock);
+ qs = queue_free_list;
+ if (qs) {
+ queue_free_list = queue_free_list->next;
+ erts_smp_spin_unlock(&qs_lock);
+ }
+ else {
+ erts_smp_spin_unlock(&qs_lock);
+ qs = erts_alloc(ERTS_ALC_T_PROC_LCK_QS,
+ sizeof(erts_proc_lock_queues_t));
+ sys_memcpy((void *) qs,
+ (void *) &zeroqs,
+ sizeof(erts_proc_lock_queues_t));
+ }
+ tse->udata = qs;
+#if ERTS_PROC_LOCK_SPINLOCK_IMPL && !ERTS_PROC_LOCK_ATOMIC_IMPL
+ if (pix_lock)
+ erts_pix_lock(pix_lock);
+#endif
+ }
+ tse->uflgs = 0;
+ return tse;
+}
static ERTS_INLINE void
-free_wtr(erts_proc_lock_waiter_t *wtr)
+tse_return(erts_tse_t *tse, int force_free_q)
{
- CHECK_UNUSED_WAITER(wtr);
- erts_smp_spin_lock(&wtr_lock);
- wtr->next = waiter_free_list;
- waiter_free_list = wtr;
- wtr->queues->next = queue_free_list;
- queue_free_list = wtr->queues;
- erts_smp_spin_unlock(&wtr_lock);
+ CHECK_UNUSED_TSE(tse);
+ if (force_free_q || erts_tse_is_tmp(tse)) {
+ erts_proc_lock_queues_t *qs = tse->udata;
+ ASSERT(qs);
+ erts_smp_spin_lock(&qs_lock);
+ qs->next = queue_free_list;
+ queue_free_list = qs;
+ erts_smp_spin_unlock(&qs_lock);
+ tse->udata = NULL;
+ }
+ erts_tse_return(tse);
}
void
erts_proc_lock_prepare_proc_lock_waiter(void)
{
- erts_tsd_set(waiter_key, (void *) alloc_wtr());
+ tse_return(tse_fetch(NULL), 0);
}
static void
-cleanup_waiter(void)
+cleanup_tse(void)
{
- erts_proc_lock_waiter_t *wtr = erts_tsd_get(waiter_key);
- if (wtr)
- free_wtr(wtr);
+ erts_tse_t *tse = erts_tse_fetch();
+ if (tse) {
+ if (tse->udata)
+ tse_return(tse, 1);
+ else
+ erts_tse_return(tse);
+ }
}
@@ -250,7 +251,7 @@ cleanup_waiter(void)
static ERTS_INLINE void
enqueue_waiter(erts_proc_lock_queues_t *qs,
int ix,
- erts_proc_lock_waiter_t *wtr)
+ erts_tse_t *wtr)
{
if (!qs->queue[ix]) {
qs->queue[ix] = wtr;
@@ -266,10 +267,10 @@ enqueue_waiter(erts_proc_lock_queues_t *qs,
}
}
-static erts_proc_lock_waiter_t *
+static erts_tse_t *
dequeue_waiter(erts_proc_lock_queues_t *qs, int ix)
{
- erts_proc_lock_waiter_t *wtr = qs->queue[ix];
+ erts_tse_t *wtr = qs->queue[ix];
ERTS_LC_ASSERT(qs->queue[ix]);
if (wtr->next == wtr) {
ERTS_LC_ASSERT(qs->queue[ix]->prev == wtr);
@@ -295,10 +296,10 @@ dequeue_waiter(erts_proc_lock_queues_t *qs, int ix)
* lock.
*/
static ERTS_INLINE void
-try_aquire(erts_proc_lock_t *lck, erts_proc_lock_waiter_t *wtr)
+try_aquire(erts_proc_lock_t *lck, erts_tse_t *wtr)
{
ErtsProcLocks got_locks = (ErtsProcLocks) 0;
- ErtsProcLocks locks = wtr->wait_locks;
+ ErtsProcLocks locks = wtr->uflgs;
int lock_no;
ERTS_LC_ASSERT(lck->queues);
@@ -334,7 +335,7 @@ try_aquire(erts_proc_lock_t *lck, erts_proc_lock_waiter_t *wtr)
}
}
- wtr->wait_locks &= ~got_locks;
+ wtr->uflgs &= ~got_locks;
}
/*
@@ -350,8 +351,8 @@ transfer_locks(Process *p,
int unlock)
{
int transferred = 0;
- erts_proc_lock_waiter_t *wake = NULL;
- erts_proc_lock_waiter_t *wtr;
+ erts_tse_t *wake = NULL;
+ erts_tse_t *wtr;
ErtsProcLocks unset_waiter = 0;
ErtsProcLocks tlocks = trnsfr_lcks;
int lock_no;
@@ -377,11 +378,11 @@ transfer_locks(Process *p,
ERTS_LC_ASSERT(wtr);
if (!qs->queue[lock_no])
unset_waiter |= lock;
- ERTS_LC_ASSERT(wtr->wait_locks & lock);
- wtr->wait_locks &= ~lock;
- if (wtr->wait_locks)
+ ERTS_LC_ASSERT(wtr->uflgs & lock);
+ wtr->uflgs &= ~lock;
+ if (wtr->uflgs)
try_aquire(&p->lock, wtr);
- if (!wtr->wait_locks) {
+ if (!wtr->uflgs) {
/*
* The other thread got all locks it needs;
* need to wake it up.
@@ -412,9 +413,10 @@ transfer_locks(Process *p,
erts_pix_unlock(pix_lock);
do {
- erts_proc_lock_waiter_t *tmp = wake;
+ erts_tse_t *tmp = wake;
wake = wake->next;
- erts_smp_gate_let_through(&tmp->gate, 1);
+ erts_atomic_set(&tmp->uaflgs, 0);
+ erts_tse_set(tmp);
} while (wake);
if (!unlock)
@@ -462,26 +464,16 @@ wait_for_locks(Process *p,
ErtsProcLocks olflgs)
{
erts_pix_lock_t *pix_lock = pixlck ? pixlck : ERTS_PID2PIXLOCK(p->id);
- int tsd;
- erts_proc_lock_waiter_t *wtr;
+ erts_tse_t *wtr;
+ erts_proc_lock_queues_t *qs;
/* Acquire a waiter object on which this thread can wait. */
- wtr = erts_tsd_get(waiter_key);
- if (wtr)
- tsd = 1;
- else {
-#if ERTS_PROC_LOCK_SPINLOCK_IMPL && !ERTS_PROC_LOCK_ATOMIC_IMPL
- erts_pix_unlock(pix_lock);
-#endif
- wtr = alloc_wtr();
- tsd = 0;
-#if ERTS_PROC_LOCK_SPINLOCK_IMPL && !ERTS_PROC_LOCK_ATOMIC_IMPL
- erts_pix_lock(pix_lock);
-#endif
- }
+ wtr = tse_fetch(pix_lock);
/* Record which locks this waiter needs. */
- wtr->wait_locks = need_locks;
+ wtr->uflgs = need_locks;
+
+ ASSERT((wtr->uflgs & ~ERTS_PROC_LOCKS_ALL) == 0);
#if ERTS_PROC_LOCK_ATOMIC_IMPL
erts_pix_lock(pix_lock);
@@ -489,14 +481,16 @@ wait_for_locks(Process *p,
ERTS_LC_ASSERT(erts_lc_pix_lock_is_locked(pix_lock));
+ qs = wtr->udata;
+ ASSERT(qs);
/* Provide the process with waiter queues, if it doesn't have one. */
if (!p->lock.queues) {
- wtr->queues->next = NULL;
- p->lock.queues = wtr->queues;
+ qs->next = NULL;
+ p->lock.queues = qs;
}
else {
- wtr->queues->next = p->lock.queues->next;
- p->lock.queues->next = wtr->queues;
+ qs->next = p->lock.queues->next;
+ p->lock.queues->next = qs;
}
#ifdef ERTS_PROC_LOCK_HARD_DEBUG
@@ -506,46 +500,59 @@ wait_for_locks(Process *p,
/* Try to aquire locks one at a time in lock order and set wait flag */
try_aquire(&p->lock, wtr);
+ ASSERT((wtr->uflgs & ~ERTS_PROC_LOCKS_ALL) == 0);
+
#ifdef ERTS_PROC_LOCK_HARD_DEBUG
check_queue(&p->lock);
#endif
- if (wtr->wait_locks) { /* We didn't get them all; need to wait... */
- /* Got to wait for locks... */
+ if (wtr->uflgs) {
+ /* We didn't get them all; need to wait... */
+
+ ASSERT((wtr->uflgs & ~ERTS_PROC_LOCKS_ALL) == 0);
+
+ erts_atomic_set(&wtr->uaflgs, 1);
erts_pix_unlock(pix_lock);
- /*
- * Wait for needed locks. When we return all needed locks have
- * have been acquired by other threads and transfered to us.
- */
-#ifdef ERTS_PROC_LOCK_SPIN_ON_GATE
- erts_smp_gate_swait(&wtr->gate, proc_lock_spin_count);
-#else
- erts_smp_gate_wait(&wtr->gate);
-#endif
+ while (1) {
+ int res;
+ erts_tse_reset(wtr);
+
+ if (erts_atomic_read(&wtr->uaflgs) == 0)
+ break;
+
+ /*
+ * Wait for needed locks. When we are woken all needed locks have
+ * have been acquired by other threads and transfered to us.
+ * However, we need to be prepared for spurious wakeups.
+ */
+ do {
+ res = erts_tse_wait(wtr); /* might return EINTR */
+ } while (res != 0);
+ }
erts_pix_lock(pix_lock);
+
+ ASSERT(wtr->uflgs == 0);
}
/* Recover some queues to store in the waiter. */
ERTS_LC_ASSERT(p->lock.queues);
if (p->lock.queues->next) {
- wtr->queues = p->lock.queues->next;
- p->lock.queues->next = wtr->queues->next;
+ qs = p->lock.queues->next;
+ p->lock.queues->next = qs->next;
}
else {
- wtr->queues = p->lock.queues;
+ qs = p->lock.queues;
p->lock.queues = NULL;
}
+ wtr->udata = qs;
erts_pix_unlock(pix_lock);
ERTS_LC_ASSERT(locks == (ERTS_PROC_LOCK_FLGS_READ_(&p->lock) & locks));
- if (tsd)
- CHECK_UNUSED_WAITER(wtr);
- else
- free_wtr(wtr);
+ tse_return(wtr, 0);
}
/*
@@ -563,52 +570,57 @@ erts_proc_lock_failed(Process *p,
ErtsProcLocks locks,
ErtsProcLocks old_lflgs)
{
-#ifdef ERTS_PROC_LOCK_SPIN_ON_GATE
- int spin_count = 0;
-#else
- int spin_count = proc_lock_spin_count;
-#endif
-
+ int until_yield = ERTS_PROC_LOCK_SPIN_UNTIL_YIELD;
+ int thr_spin_count;
+ int spin_count;
ErtsProcLocks need_locks = locks;
ErtsProcLocks olflgs = old_lflgs;
- while (need_locks != 0)
- {
- ErtsProcLocks can_grab = in_order_locks(olflgs, need_locks);
+ if (erts_thr_get_main_status())
+ thr_spin_count = proc_lock_spin_count;
+ else
+ thr_spin_count = aux_thr_proc_lock_spin_count;
+
+ spin_count = thr_spin_count;
+
+ while (need_locks != 0) {
+ ErtsProcLocks can_grab;
+
+ can_grab = in_order_locks(olflgs, need_locks);
- if (can_grab == 0)
- {
+ if (can_grab == 0) {
/* Someone already has the lowest-numbered lock we want. */
- if (spin_count-- <= 0)
- {
+ if (spin_count-- <= 0) {
/* Too many retries, give up and sleep for the lock. */
wait_for_locks(p, pixlck, locks, need_locks, olflgs);
return;
}
+ ERTS_SPIN_BODY;
+
+ if (--until_yield == 0) {
+ until_yield = ERTS_PROC_LOCK_SPIN_UNTIL_YIELD;
+ erts_thr_yield();
+ }
+
olflgs = ERTS_PROC_LOCK_FLGS_READ_(&p->lock);
}
- else
- {
+ else {
/* Try to grab all of the grabbable locks at once with cmpxchg. */
ErtsProcLocks grabbed = olflgs | can_grab;
ErtsProcLocks nflgs =
- ERTS_PROC_LOCK_FLGS_CMPXCHG_(&p->lock, grabbed, olflgs);
+ ERTS_PROC_LOCK_FLGS_CMPXCHG_ACQB_(&p->lock, grabbed, olflgs);
- if (nflgs == olflgs)
- {
+ if (nflgs == olflgs) {
/* Success! We grabbed the 'can_grab' locks. */
olflgs = grabbed;
need_locks &= ~can_grab;
-#ifndef ERTS_PROC_LOCK_SPIN_ON_GATE
/* Since we made progress, reset the spin count. */
- spin_count = proc_lock_spin_count;
-#endif
+ spin_count = thr_spin_count;
}
- else
- {
+ else {
/* Compare-and-exchange failed, try again. */
olflgs = nflgs;
}
@@ -1407,7 +1419,7 @@ check_queue(erts_proc_lock_t *lck)
wtr = (((ErtsProcLocks) 1) << lock_no) << ERTS_PROC_LOCK_WAITER_SHIFT;
if (lflgs & wtr) {
int n;
- erts_proc_lock_waiter_t *wtr;
+ erts_tse_t *wtr;
ERTS_LC_ASSERT(lck->queues && lck->queues->queue[lock_no]);
wtr = lck->queues->queue[lock_no];
n = 0;
diff --git a/erts/emulator/beam/erl_process_lock.h b/erts/emulator/beam/erl_process_lock.h
index d71e5a0a6e..7cfc9893fa 100644
--- a/erts/emulator/beam/erl_process_lock.h
+++ b/erts/emulator/beam/erl_process_lock.h
@@ -255,11 +255,7 @@ void erts_proc_lc_unrequire_lock(Process *p, ErtsProcLocks locks);
typedef struct {
union {
-#if ERTS_PROC_LOCK_MUTEX_IMPL
- erts_smp_mtx_t mtx;
-#else
erts_smp_spinlock_t spnlck;
-#endif
char buf[64]; /* Try to get locks in different cache lines */
} u;
} erts_pix_lock_t;
@@ -277,9 +273,12 @@ typedef struct {
((ErtsProcLocks) erts_smp_atomic_band(&(L)->flags, (long) (MSK)))
#define ERTS_PROC_LOCK_FLGS_BOR_(L, MSK) \
((ErtsProcLocks) erts_smp_atomic_bor(&(L)->flags, (long) (MSK)))
-#define ERTS_PROC_LOCK_FLGS_CMPXCHG_(L, NEW, EXPECTED) \
- ((ErtsProcLocks) erts_smp_atomic_cmpxchg(&(L)->flags, \
- (long) (NEW), (long) (EXPECTED)))
+#define ERTS_PROC_LOCK_FLGS_CMPXCHG_ACQB_(L, NEW, EXPECTED) \
+ ((ErtsProcLocks) erts_smp_atomic_cmpxchg_acqb(&(L)->flags, \
+ (long) (NEW), (long) (EXPECTED)))
+#define ERTS_PROC_LOCK_FLGS_CMPXCHG_RELB_(L, NEW, EXPECTED) \
+ ((ErtsProcLocks) erts_smp_atomic_cmpxchg_relb(&(L)->flags, \
+ (long) (NEW), (long) (EXPECTED)))
#define ERTS_PROC_LOCK_FLGS_READ_(L) \
((ErtsProcLocks) erts_smp_atomic_read(&(L)->flags))
@@ -289,6 +288,9 @@ ERTS_GLB_INLINE ErtsProcLocks erts_proc_lock_flags_band(erts_proc_lock_t *,
ErtsProcLocks);
ERTS_GLB_INLINE ErtsProcLocks erts_proc_lock_flags_bor(erts_proc_lock_t *,
ErtsProcLocks);
+ERTS_GLB_INLINE ErtsProcLocks erts_proc_lock_flags_cmpxchg(erts_proc_lock_t *,
+ ErtsProcLocks,
+ ErtsProcLocks);
#if ERTS_GLB_INLINE_INCL_FUNC_DEF
@@ -322,7 +324,9 @@ erts_proc_lock_flags_cmpxchg(erts_proc_lock_t *lck, ErtsProcLocks new,
#define ERTS_PROC_LOCK_FLGS_BAND_(L, MSK) erts_proc_lock_flags_band((L), (MSK))
#define ERTS_PROC_LOCK_FLGS_BOR_(L, MSK) erts_proc_lock_flags_bor((L), (MSK))
-#define ERTS_PROC_LOCK_FLGS_CMPXCHG_(L, NEW, EXPECTED) \
+#define ERTS_PROC_LOCK_FLGS_CMPXCHG_ACQB_(L, NEW, EXPECTED) \
+ erts_proc_lock_flags_cmpxchg((L), (NEW), (EXPECTED))
+#define ERTS_PROC_LOCK_FLGS_CMPXCHG_RELB_(L, NEW, EXPECTED) \
erts_proc_lock_flags_cmpxchg((L), (NEW), (EXPECTED))
#define ERTS_PROC_LOCK_FLGS_READ_(L) ((L)->flags)
@@ -348,9 +352,9 @@ ERTS_GLB_INLINE ErtsProcLocks erts_smp_proc_raw_trylock__(Process *p,
ErtsProcLocks locks);
#ifdef ERTS_ENABLE_LOCK_COUNT
ERTS_GLB_INLINE void erts_smp_proc_lock_x__(Process *,
- erts_pix_lock_t *,
- ErtsProcLocks,
- char *file, unsigned int line);
+ erts_pix_lock_t *,
+ ErtsProcLocks,
+ char *file, unsigned int line);
#else
ERTS_GLB_INLINE void erts_smp_proc_lock__(Process *,
erts_pix_lock_t *,
@@ -372,30 +376,18 @@ ERTS_GLB_INLINE void erts_proc_lock_op_debug(Process *, ErtsProcLocks, int);
ERTS_GLB_INLINE void erts_pix_lock(erts_pix_lock_t *pixlck)
{
ERTS_LC_ASSERT(pixlck);
-#if ERTS_PROC_LOCK_MUTEX_IMPL
- erts_smp_mtx_lock(&pixlck->u.mtx);
-#else
erts_smp_spin_lock(&pixlck->u.spnlck);
-#endif
}
ERTS_GLB_INLINE void erts_pix_unlock(erts_pix_lock_t *pixlck)
{
ERTS_LC_ASSERT(pixlck);
-#if ERTS_PROC_LOCK_MUTEX_IMPL
- erts_smp_mtx_unlock(&pixlck->u.mtx);
-#else
erts_smp_spin_unlock(&pixlck->u.spnlck);
-#endif
}
ERTS_GLB_INLINE int erts_lc_pix_lock_is_locked(erts_pix_lock_t *pixlck)
{
-#if ERTS_PROC_LOCK_MUTEX_IMPL
- return erts_smp_lc_mtx_is_locked(&pixlck->u.mtx);
-#else
return erts_smp_lc_spinlock_is_locked(&pixlck->u.spnlck);
-#endif
}
/*
@@ -417,9 +409,9 @@ erts_smp_proc_raw_trylock__(Process *p, ErtsProcLocks locks)
ErtsProcLocks expct_lflgs = 0;
while (1) {
- ErtsProcLocks lflgs = ERTS_PROC_LOCK_FLGS_CMPXCHG_(&p->lock,
- expct_lflgs | locks,
- expct_lflgs);
+ ErtsProcLocks lflgs = ERTS_PROC_LOCK_FLGS_CMPXCHG_ACQB_(&p->lock,
+ expct_lflgs | locks,
+ expct_lflgs);
if (ERTS_LIKELY(lflgs == expct_lflgs)) {
/* We successfully grabbed all locks. */
return 0;
@@ -535,7 +527,7 @@ erts_smp_proc_unlock__(Process *p,
if (want_lflgs != old_lflgs) {
ErtsProcLocks new_lflgs =
- ERTS_PROC_LOCK_FLGS_CMPXCHG_(&p->lock, want_lflgs, old_lflgs);
+ ERTS_PROC_LOCK_FLGS_CMPXCHG_RELB_(&p->lock, want_lflgs, old_lflgs);
if (new_lflgs != old_lflgs) {
/* cmpxchg failed, try again. */
diff --git a/erts/emulator/beam/erl_smp.h b/erts/emulator/beam/erl_smp.h
index 03d2a586e3..b41fa70476 100644
--- a/erts/emulator/beam/erl_smp.h
+++ b/erts/emulator/beam/erl_smp.h
@@ -1,19 +1,19 @@
/*
* %CopyrightBegin%
- *
- * Copyright Ericsson AB 2005-2009. All Rights Reserved.
- *
+ *
+ * Copyright Ericsson AB 2005-2010. All Rights Reserved.
+ *
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
* compliance with the License. You should have received a copy of the
* Erlang Public License along with this software. If not, it can be
* retrieved online at http://www.erlang.org/.
- *
+ *
* Software distributed under the License is distributed on an "AS IS"
* basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
* the License for the specific language governing rights and limitations
* under the License.
- *
+ *
* %CopyrightEnd%
*/
/*
@@ -43,9 +43,17 @@ typedef erts_thr_init_data_t erts_smp_thr_init_data_t;
typedef erts_tid_t erts_smp_tid_t;
typedef erts_mtx_t erts_smp_mtx_t;
typedef erts_cnd_t erts_smp_cnd_t;
+#define ERTS_SMP_RWMTX_OPT_DEFAULT_INITER ERTS_RWMTX_OPT_DEFAULT_INITER
+#define ERTS_SMP_RWMTX_TYPE_NORMAL ERTS_RWMTX_TYPE_NORMAL
+#define ERTS_SMP_RWMTX_TYPE_FREQUENT_READ ERTS_RWMTX_TYPE_FREQUENT_READ
+#define ERTS_SMP_RWMTX_TYPE_EXTREMELY_FREQUENT_READ \
+ ERTS_RWMTX_TYPE_EXTREMELY_FREQUENT_READ
+#define ERTS_SMP_RWMTX_LONG_LIVED ERTS_RWMTX_LONG_LIVED
+#define ERTS_SMP_RWMTX_SHORT_LIVED ERTS_RWMTX_SHORT_LIVED
+#define ERTS_SMP_RWMTX_UNKNOWN_LIVED ERTS_RWMTX_UNKNOWN_LIVED
+typedef erts_rwmtx_opt_t erts_smp_rwmtx_opt_t;
typedef erts_rwmtx_t erts_smp_rwmtx_t;
typedef erts_tsd_key_t erts_smp_tsd_key_t;
-typedef erts_gate_t erts_smp_gate_t;
typedef ethr_atomic_t erts_smp_atomic_t;
typedef erts_spinlock_t erts_smp_spinlock_t;
typedef erts_rwlock_t erts_smp_rwlock_t;
@@ -54,15 +62,27 @@ void erts_thr_fatal_error(int, char *); /* implemented in erl_init.c */
#else /* #ifdef ERTS_SMP */
-#define ERTS_SMP_THR_OPTS_DEFAULT_INITER 0
+#define ERTS_SMP_THR_OPTS_DEFAULT_INITER {0}
typedef int erts_smp_thr_opts_t;
typedef int erts_smp_thr_init_data_t;
typedef int erts_smp_tid_t;
typedef int erts_smp_mtx_t;
typedef int erts_smp_cnd_t;
+#define ERTS_SMP_RWMTX_OPT_DEFAULT_INITER {0}
+#define ERTS_SMP_RWMTX_TYPE_NORMAL 0
+#define ERTS_SMP_RWMTX_TYPE_FREQUENT_READ 0
+#define ERTS_SMP_RWMTX_TYPE_EXTREMELY_FREQUENT_READ 0
+#define ERTS_SMP_RWMTX_LONG_LIVED 0
+#define ERTS_SMP_RWMTX_SHORT_LIVED 0
+#define ERTS_SMP_RWMTX_UNKNOWN_LIVED 0
+typedef struct {
+ char type;
+ char lived;
+ int main_spincount;
+ int aux_spincount;
+} erts_smp_rwmtx_opt_t;
typedef int erts_smp_rwmtx_t;
typedef int erts_smp_tsd_key_t;
-typedef int erts_smp_gate_t;
typedef long erts_smp_atomic_t;
#if __GNUC__ > 2
typedef struct { } erts_smp_spinlock_t;
@@ -103,8 +123,6 @@ ERTS_GLB_INLINE void erts_smp_mtx_init_locked_x(erts_smp_mtx_t *mtx,
ERTS_GLB_INLINE void erts_smp_mtx_init(erts_smp_mtx_t *mtx, char *name);
ERTS_GLB_INLINE void erts_smp_mtx_init_locked(erts_smp_mtx_t *mtx, char *name);
ERTS_GLB_INLINE void erts_smp_mtx_destroy(erts_smp_mtx_t *mtx);
-ERTS_GLB_INLINE void erts_smp_mtx_set_forksafe(erts_smp_mtx_t *mtx);
-ERTS_GLB_INLINE void erts_smp_mtx_unset_forksafe(erts_smp_mtx_t *mtx);
ERTS_GLB_INLINE int erts_smp_mtx_trylock(erts_smp_mtx_t *mtx);
#ifdef ERTS_ENABLE_LOCK_COUNT
ERTS_GLB_INLINE void erts_smp_mtx_lock_x(erts_smp_mtx_t *mtx, char *file, int line);
@@ -119,9 +137,17 @@ ERTS_GLB_INLINE void erts_smp_cnd_wait(erts_smp_cnd_t *cnd,
erts_smp_mtx_t *mtx);
ERTS_GLB_INLINE void erts_smp_cnd_signal(erts_smp_cnd_t *cnd);
ERTS_GLB_INLINE void erts_smp_cnd_broadcast(erts_smp_cnd_t *cnd);
+ERTS_GLB_INLINE void erts_smp_rwmtx_set_reader_group(int no);
+ERTS_GLB_INLINE void erts_smp_rwmtx_init_opt_x(erts_smp_rwmtx_t *rwmtx,
+ erts_smp_rwmtx_opt_t *opt,
+ char *name,
+ Eterm extra);
ERTS_GLB_INLINE void erts_smp_rwmtx_init_x(erts_smp_rwmtx_t *rwmtx,
char *name,
Eterm extra);
+ERTS_GLB_INLINE void erts_smp_rwmtx_init_opt(erts_smp_rwmtx_t *rwmtx,
+ erts_smp_rwmtx_opt_t *opt,
+ char *name);
ERTS_GLB_INLINE void erts_smp_rwmtx_init(erts_smp_rwmtx_t *rwmtx,
char *name);
ERTS_GLB_INLINE void erts_smp_rwmtx_destroy(erts_smp_rwmtx_t *rwmtx);
@@ -155,6 +181,16 @@ ERTS_GLB_INLINE long erts_smp_atomic_cmpxchg(erts_smp_atomic_t *xchgp,
long expected);
ERTS_GLB_INLINE long erts_smp_atomic_bor(erts_smp_atomic_t *var, long mask);
ERTS_GLB_INLINE long erts_smp_atomic_band(erts_smp_atomic_t *var, long mask);
+ERTS_GLB_INLINE long erts_smp_atomic_read_acqb(erts_smp_atomic_t *var);
+ERTS_GLB_INLINE void erts_smp_atomic_set_relb(erts_smp_atomic_t *var, long i);
+ERTS_GLB_INLINE void erts_smp_atomic_dec_relb(erts_smp_atomic_t *decp);
+ERTS_GLB_INLINE long erts_smp_atomic_dectest_relb(erts_smp_atomic_t *decp);
+ERTS_GLB_INLINE long erts_smp_atomic_cmpxchg_acqb(erts_smp_atomic_t *xchgp,
+ long new,
+ long exp);
+ERTS_GLB_INLINE long erts_smp_atomic_cmpxchg_relb(erts_smp_atomic_t *xchgp,
+ long new,
+ long exp);
ERTS_GLB_INLINE void erts_smp_spinlock_init_x(erts_smp_spinlock_t *lock,
char *name,
Eterm extra);
@@ -190,12 +226,6 @@ ERTS_GLB_INLINE void erts_smp_tsd_key_create(erts_smp_tsd_key_t *keyp);
ERTS_GLB_INLINE void erts_smp_tsd_key_delete(erts_smp_tsd_key_t key);
ERTS_GLB_INLINE void erts_smp_tsd_set(erts_smp_tsd_key_t key, void *value);
ERTS_GLB_INLINE void * erts_smp_tsd_get(erts_smp_tsd_key_t key);
-ERTS_GLB_INLINE void erts_smp_gate_init(erts_smp_gate_t *gp);
-ERTS_GLB_INLINE void erts_smp_gate_destroy(erts_smp_gate_t *gp);
-ERTS_GLB_INLINE void erts_smp_gate_close(erts_smp_gate_t *gp);
-ERTS_GLB_INLINE void erts_smp_gate_let_through(erts_smp_gate_t *gp, unsigned no);
-ERTS_GLB_INLINE void erts_smp_gate_wait(erts_smp_gate_t *gp);
-ERTS_GLB_INLINE void erts_smp_gate_swait(erts_smp_gate_t *gp, int spincount);
#ifdef ERTS_THR_HAVE_SIG_FUNCS
#define ERTS_SMP_THR_HAVE_SIG_FUNCS 1
@@ -331,22 +361,6 @@ erts_smp_mtx_destroy(erts_smp_mtx_t *mtx)
#endif
}
-ERTS_GLB_INLINE void
-erts_smp_mtx_set_forksafe(erts_smp_mtx_t *mtx)
-{
-#ifdef ERTS_SMP
- erts_mtx_set_forksafe(mtx);
-#endif
-}
-
-ERTS_GLB_INLINE void
-erts_smp_mtx_unset_forksafe(erts_smp_mtx_t *mtx)
-{
-#ifdef ERTS_SMP
- erts_mtx_unset_forksafe(mtx);
-#endif
-}
-
ERTS_GLB_INLINE int
erts_smp_mtx_trylock(erts_smp_mtx_t *mtx)
{
@@ -433,6 +447,25 @@ erts_smp_cnd_broadcast(erts_smp_cnd_t *cnd)
}
ERTS_GLB_INLINE void
+erts_smp_rwmtx_set_reader_group(int no)
+{
+#ifdef ERTS_SMP
+ erts_rwmtx_set_reader_group(no);
+#endif
+}
+
+ERTS_GLB_INLINE void
+erts_smp_rwmtx_init_opt_x(erts_smp_rwmtx_t *rwmtx,
+ erts_smp_rwmtx_opt_t *opt,
+ char *name,
+ Eterm extra)
+{
+#ifdef ERTS_SMP
+ erts_rwmtx_init_opt_x(rwmtx, opt, name, extra);
+#endif
+}
+
+ERTS_GLB_INLINE void
erts_smp_rwmtx_init_x(erts_smp_rwmtx_t *rwmtx, char *name, Eterm extra)
{
#ifdef ERTS_SMP
@@ -441,6 +474,16 @@ erts_smp_rwmtx_init_x(erts_smp_rwmtx_t *rwmtx, char *name, Eterm extra)
}
ERTS_GLB_INLINE void
+erts_smp_rwmtx_init_opt(erts_smp_rwmtx_t *rwmtx,
+ erts_smp_rwmtx_opt_t *opt,
+ char *name)
+{
+#ifdef ERTS_SMP
+ erts_rwmtx_init_opt(rwmtx, opt, name);
+#endif
+}
+
+ERTS_GLB_INLINE void
erts_smp_rwmtx_init(erts_smp_rwmtx_t *rwmtx, char *name)
{
#ifdef ERTS_SMP
@@ -709,6 +752,73 @@ erts_smp_atomic_band(erts_smp_atomic_t *var, long mask)
#endif
}
+ERTS_GLB_INLINE long
+erts_smp_atomic_read_acqb(erts_smp_atomic_t *var)
+{
+#ifdef ERTS_SMP
+ return erts_atomic_read_acqb(var);
+#else
+ return *var;
+#endif
+}
+
+ERTS_GLB_INLINE void
+erts_smp_atomic_set_relb(erts_smp_atomic_t *var, long i)
+{
+#ifdef ERTS_SMP
+ erts_atomic_set_relb(var, i);
+#else
+ *var = i;
+#endif
+}
+
+ERTS_GLB_INLINE void erts_smp_atomic_dec_relb(erts_smp_atomic_t *decp)
+{
+#ifdef ERTS_SMP
+ erts_atomic_dec_relb(decp);
+#else
+ --(*decp);
+#endif
+}
+
+ERTS_GLB_INLINE long
+erts_smp_atomic_dectest_relb(erts_smp_atomic_t *decp)
+{
+#ifdef ERTS_SMP
+ return erts_atomic_dectest_relb(decp);
+#else
+ return --(*decp);
+#endif
+}
+
+ERTS_GLB_INLINE long erts_smp_atomic_cmpxchg_acqb(erts_smp_atomic_t *xchgp,
+ long new,
+ long exp)
+{
+#ifdef ERTS_SMP
+ return erts_atomic_cmpxchg_acqb(xchgp, new, exp);
+#else
+ long old = *xchgp;
+ if (old == exp)
+ *xchgp = new;
+ return old;
+#endif
+}
+
+ERTS_GLB_INLINE long erts_smp_atomic_cmpxchg_relb(erts_smp_atomic_t *xchgp,
+ long new,
+ long exp)
+{
+#ifdef ERTS_SMP
+ return erts_atomic_cmpxchg_relb(xchgp, new, exp);
+#else
+ long old = *xchgp;
+ if (old == exp)
+ *xchgp = new;
+ return old;
+#endif
+}
+
ERTS_GLB_INLINE void
erts_smp_spinlock_init_x(erts_smp_spinlock_t *lock, char *name, Eterm extra)
{
@@ -919,54 +1029,6 @@ erts_smp_tsd_get(erts_smp_tsd_key_t key)
#endif
}
-ERTS_GLB_INLINE void
-erts_smp_gate_init(erts_smp_gate_t *gp)
-{
-#ifdef ERTS_SMP
- erts_gate_init((erts_gate_t *) gp);
-#endif
-}
-
-ERTS_GLB_INLINE void
-erts_smp_gate_destroy(erts_smp_gate_t *gp)
-{
-#ifdef ERTS_SMP
- erts_gate_destroy((erts_gate_t *) gp);
-#endif
-}
-
-ERTS_GLB_INLINE void
-erts_smp_gate_close(erts_smp_gate_t *gp)
-{
-#ifdef ERTS_SMP
- erts_gate_close((erts_gate_t *) gp);
-#endif
-}
-
-ERTS_GLB_INLINE void
-erts_smp_gate_let_through(erts_smp_gate_t *gp, unsigned no)
-{
-#ifdef ERTS_SMP
- erts_gate_let_through((erts_gate_t *) gp, no);
-#endif
-}
-
-ERTS_GLB_INLINE void
-erts_smp_gate_wait(erts_smp_gate_t *gp)
-{
-#ifdef ERTS_SMP
- erts_gate_wait((erts_gate_t *) gp);
-#endif
-}
-
-ERTS_GLB_INLINE void
-erts_smp_gate_swait(erts_smp_gate_t *gp, int spincount)
-{
-#ifdef ERTS_SMP
- erts_gate_swait((erts_gate_t *) gp, spincount);
-#endif
-}
-
#ifdef ERTS_THR_HAVE_SIG_FUNCS
#define ERTS_SMP_THR_HAVE_SIG_FUNCS 1
diff --git a/erts/emulator/beam/erl_threads.h b/erts/emulator/beam/erl_threads.h
index 35b338c6eb..0b7269262e 100644
--- a/erts/emulator/beam/erl_threads.h
+++ b/erts/emulator/beam/erl_threads.h
@@ -25,6 +25,11 @@
#ifndef ERL_THREAD_H__
#define ERL_THREAD_H__
+#define ERTS_SPIN_BODY ETHR_SPIN_BODY
+
+#define ERTS_MAX_READER_GROUPS 8
+extern int erts_reader_groups;
+
#include "sys.h"
#ifdef USE_THREADS
@@ -48,6 +53,7 @@
#define ERTS_THR_OPTS_DEFAULT_INITER ETHR_THR_OPTS_DEFAULT_INITER
typedef ethr_thr_opts erts_thr_opts_t;
typedef ethr_init_data erts_thr_init_data_t;
+typedef ethr_late_init_data erts_thr_late_init_data_t;
typedef ethr_tid erts_tid_t;
/* mutex */
@@ -73,8 +79,19 @@ typedef struct {
erts_lcnt_lock_t lcnt;
#endif
} erts_rwmtx_t;
+
+#define ERTS_RWMTX_OPT_DEFAULT_INITER ETHR_RWMUTEX_OPT_DEFAULT_INITER
+#define ERTS_RWMTX_TYPE_NORMAL ETHR_RWMUTEX_TYPE_NORMAL
+#define ERTS_RWMTX_TYPE_FREQUENT_READ ETHR_RWMUTEX_TYPE_FREQUENT_READ
+#define ERTS_RWMTX_TYPE_EXTREMELY_FREQUENT_READ \
+ ETHR_RWMUTEX_TYPE_EXTREMELY_FREQUENT_READ
+#define ERTS_RWMTX_LONG_LIVED ETHR_RWMUTEX_LONG_LIVED
+#define ERTS_RWMTX_SHORT_LIVED ETHR_RWMUTEX_SHORT_LIVED
+#define ERTS_RWMTX_UNKNOWN_LIVED ETHR_RWMUTEX_UNKNOWN_LIVED
+typedef ethr_rwmutex_opt erts_rwmtx_opt_t;
+
typedef ethr_tsd_key erts_tsd_key_t;
-typedef ethr_gate erts_gate_t;
+typedef ethr_ts_event erts_tse_t;
typedef ethr_atomic_t erts_atomic_t;
/* spinlock */
@@ -103,25 +120,14 @@ typedef ethr_timeval erts_thr_timeval_t;
__decl_noreturn void __noreturn erts_thr_fatal_error(int, char *);
/* implemented in erl_init.c */
-#ifdef ERTS_ENABLE_LOCK_CHECK
-#define ERTS_REC_MTX_INITER \
- {ETHR_REC_MUTEX_INITER, \
- ERTS_LC_LOCK_INIT(-1,THE_NON_VALUE,ERTS_LC_FLG_LT_MUTEX)}
-#define ERTS_MTX_INITER \
- {ETHR_MUTEX_INITER, \
- ERTS_LC_LOCK_INIT(-1, THE_NON_VALUE, ERTS_LC_FLG_LT_MUTEX)}
-#else
-#define ERTS_REC_MTX_INITER {ETHR_REC_MUTEX_INITER}
-#define ERTS_MTX_INITER {ETHR_MUTEX_INITER}
-#endif
-#define ERTS_CND_INITER ETHR_COND_INITER
#define ERTS_THR_INIT_DATA_DEF_INITER ETHR_INIT_DATA_DEFAULT_INITER
+#define ERTS_THR_LATE_INIT_DATA_DEF_INITER \
+ ETHR_LATE_INIT_DATA_DEFAULT_INITER
#ifdef ETHR_HAVE_ETHR_REC_MUTEX_INIT
# define ERTS_HAVE_REC_MTX_INIT ETHR_HAVE_ETHR_REC_MUTEX_INIT
#endif
-
#else /* #ifdef USE_THREADS */
#define ERTS_THR_MEMORY_BARRIER
@@ -129,12 +135,26 @@ __decl_noreturn void __noreturn erts_thr_fatal_error(int, char *);
#define ERTS_THR_OPTS_DEFAULT_INITER 0
typedef int erts_thr_opts_t;
typedef int erts_thr_init_data_t;
+typedef int erts_thr_late_init_data_t;
typedef int erts_tid_t;
typedef int erts_mtx_t;
typedef int erts_cnd_t;
+#define ERTS_RWMTX_OPT_DEFAULT_INITER {0}
+#define ERTS_RWMTX_TYPE_NORMAL 0
+#define ERTS_RWMTX_TYPE_FREQUENT_READ 0
+#define ERTS_RWMTX_TYPE_EXTREMELY_FREQUENT_READ 0
+#define ERTS_RWMTX_LONG_LIVED 0
+#define ERTS_RWMTX_SHORT_LIVED 0
+#define ERTS_RWMTX_UNKNOWN_LIVED 0
+typedef struct {
+ char type;
+ char lived;
+ int main_spincount;
+ int aux_spincount;
+} erts_rwmtx_opt_t;
typedef int erts_rwmtx_t;
typedef int erts_tsd_key_t;
-typedef int erts_gate_t;
+typedef int erts_tse_t;
typedef long erts_atomic_t;
#if __GNUC__ > 2
typedef struct { } erts_spinlock_t;
@@ -148,7 +168,6 @@ typedef struct {
long tv_nsec;
} erts_thr_timeval_t;
-#define ERTS_REC_MTX_INITER 0
#define ERTS_MTX_INITER 0
#define ERTS_CND_INITER 0
#define ERTS_THR_INIT_DATA_DEF_INITER 0
@@ -158,6 +177,7 @@ typedef struct {
#endif /* #ifdef USE_THREADS */
ERTS_GLB_INLINE void erts_thr_init(erts_thr_init_data_t *id);
+ERTS_GLB_INLINE void erts_thr_late_init(erts_thr_late_init_data_t *id);
ERTS_GLB_INLINE void erts_thr_create(erts_tid_t *tid, void * (*func)(void *),
void *arg, erts_thr_opts_t *opts);
ERTS_GLB_INLINE void erts_thr_join(erts_tid_t tid, void **thr_res);
@@ -166,9 +186,6 @@ ERTS_GLB_INLINE void erts_thr_exit(void *res);
ERTS_GLB_INLINE void erts_thr_install_exit_handler(void (*exit_handler)(void));
ERTS_GLB_INLINE erts_tid_t erts_thr_self(void);
ERTS_GLB_INLINE int erts_equal_tids(erts_tid_t x, erts_tid_t y);
-#ifdef ERTS_HAVE_REC_MTX_INIT
-ERTS_GLB_INLINE void erts_rec_mtx_init(erts_mtx_t *mtx);
-#endif
ERTS_GLB_INLINE void erts_mtx_init_x(erts_mtx_t *mtx, char *name, Eterm extra);
ERTS_GLB_INLINE void erts_mtx_init_x_opt(erts_mtx_t *mtx, char *name, Eterm extra, Uint16 opt);
ERTS_GLB_INLINE void erts_mtx_init_locked_x(erts_mtx_t *mtx,
@@ -177,8 +194,6 @@ ERTS_GLB_INLINE void erts_mtx_init_locked_x(erts_mtx_t *mtx,
ERTS_GLB_INLINE void erts_mtx_init(erts_mtx_t *mtx, char *name);
ERTS_GLB_INLINE void erts_mtx_init_locked(erts_mtx_t *mtx, char *name);
ERTS_GLB_INLINE void erts_mtx_destroy(erts_mtx_t *mtx);
-ERTS_GLB_INLINE void erts_mtx_set_forksafe(erts_mtx_t *mtx);
-ERTS_GLB_INLINE void erts_mtx_unset_forksafe(erts_mtx_t *mtx);
ERTS_GLB_INLINE int erts_mtx_trylock(erts_mtx_t *mtx);
#ifdef ERTS_ENABLE_LOCK_COUNT
ERTS_GLB_INLINE void erts_mtx_lock_x(erts_mtx_t *mtx, char *file, unsigned int line);
@@ -192,9 +207,17 @@ ERTS_GLB_INLINE void erts_cnd_destroy(erts_cnd_t *cnd);
ERTS_GLB_INLINE void erts_cnd_wait(erts_cnd_t *cnd, erts_mtx_t *mtx);
ERTS_GLB_INLINE void erts_cnd_signal(erts_cnd_t *cnd);
ERTS_GLB_INLINE void erts_cnd_broadcast(erts_cnd_t *cnd);
+ERTS_GLB_INLINE void erts_rwmtx_set_reader_group(int no);
+ERTS_GLB_INLINE void erts_rwmtx_init_opt_x(erts_rwmtx_t *rwmtx,
+ erts_rwmtx_opt_t *opt,
+ char *name,
+ Eterm extra);
ERTS_GLB_INLINE void erts_rwmtx_init_x(erts_rwmtx_t *rwmtx,
char *name,
Eterm extra);
+ERTS_GLB_INLINE void erts_rwmtx_init_opt(erts_rwmtx_t *rwmtx,
+ erts_rwmtx_opt_t *opt,
+ char *name);
ERTS_GLB_INLINE void erts_rwmtx_init(erts_rwmtx_t *rwmtx,
char *name);
ERTS_GLB_INLINE void erts_rwmtx_destroy(erts_rwmtx_t *rwmtx);
@@ -228,6 +251,20 @@ ERTS_GLB_INLINE long erts_atomic_cmpxchg(erts_atomic_t *xchgp,
long expected);
ERTS_GLB_INLINE long erts_atomic_bor(erts_atomic_t *var, long mask);
ERTS_GLB_INLINE long erts_atomic_band(erts_atomic_t *var, long mask);
+ERTS_GLB_INLINE long erts_atomic_read_acqb(erts_atomic_t *var);
+ERTS_GLB_INLINE void erts_atomic_set_relb(erts_atomic_t *var, long i);
+ERTS_GLB_INLINE void erts_atomic_dec_relb(erts_atomic_t *decp);
+ERTS_GLB_INLINE long erts_atomic_dectest_relb(erts_atomic_t *decp);
+ERTS_GLB_INLINE long erts_atomic_cmpxchg_acqb(erts_atomic_t *xchgp,
+ long new,
+ long exp);
+ERTS_GLB_INLINE long erts_atomic_cmpxchg_relb(erts_atomic_t *xchgp,
+ long new,
+ long exp);
+ERTS_GLB_INLINE void erts_spinlock_init_x_opt(erts_spinlock_t *lock,
+ char *name,
+ Eterm extra,
+ Uint16 opt);
ERTS_GLB_INLINE void erts_spinlock_init_x(erts_spinlock_t *lock,
char *name,
Eterm extra);
@@ -263,12 +300,16 @@ ERTS_GLB_INLINE void erts_tsd_key_create(erts_tsd_key_t *keyp);
ERTS_GLB_INLINE void erts_tsd_key_delete(erts_tsd_key_t key);
ERTS_GLB_INLINE void erts_tsd_set(erts_tsd_key_t key, void *value);
ERTS_GLB_INLINE void * erts_tsd_get(erts_tsd_key_t key);
-ERTS_GLB_INLINE void erts_gate_init(erts_gate_t *gp);
-ERTS_GLB_INLINE void erts_gate_destroy(erts_gate_t *gp);
-ERTS_GLB_INLINE void erts_gate_close(erts_gate_t *gp);
-ERTS_GLB_INLINE void erts_gate_let_through(erts_gate_t *gp, unsigned no);
-ERTS_GLB_INLINE void erts_gate_wait(erts_gate_t *gp);
-ERTS_GLB_INLINE void erts_gate_swait(erts_gate_t *gp, int spincount);
+ERTS_GLB_INLINE erts_tse_t *erts_tse_fetch(void);
+ERTS_GLB_INLINE void erts_tse_return(erts_tse_t *ep);
+ERTS_GLB_INLINE void erts_tse_set(erts_tse_t *ep);
+ERTS_GLB_INLINE void erts_tse_reset(erts_tse_t *ep);
+ERTS_GLB_INLINE int erts_tse_wait(erts_tse_t *ep);
+ERTS_GLB_INLINE int erts_tse_swait(erts_tse_t *ep, int spincount);
+ERTS_GLB_INLINE int erts_tse_is_tmp(erts_tse_t *ep);
+ERTS_GLB_INLINE void erts_thr_set_main_status(int, int);
+ERTS_GLB_INLINE int erts_thr_get_main_status(void);
+ERTS_GLB_INLINE void erts_thr_yield(void);
#ifdef ETHR_HAVE_ETHR_SIG_FUNCS
#define ERTS_THR_HAVE_SIG_FUNCS 1
@@ -290,6 +331,16 @@ erts_thr_init(erts_thr_init_data_t *id)
}
ERTS_GLB_INLINE void
+erts_thr_late_init(erts_thr_late_init_data_t *id)
+{
+#ifdef USE_THREADS
+ int res = ethr_late_init(id);
+ if (res)
+ erts_thr_fatal_error(res, "complete initialization of thread library");
+#endif
+}
+
+ERTS_GLB_INLINE void
erts_thr_create(erts_tid_t *tid, void * (*func)(void *), void *arg,
erts_thr_opts_t *opts)
{
@@ -362,20 +413,6 @@ erts_equal_tids(erts_tid_t x, erts_tid_t y)
#endif
}
-
-#ifdef ERTS_HAVE_REC_MTX_INIT
-ERTS_GLB_INLINE void
-erts_rec_mtx_init(erts_mtx_t *mtx)
-{
-#ifdef USE_THREADS
- int res = ethr_rec_mutex_init(&mtx->mtx);
- if (res)
- erts_thr_fatal_error(res, "initialize recursive mutex");
-#endif
-}
-#endif
-
-
ERTS_GLB_INLINE void
erts_mtx_init_x(erts_mtx_t *mtx, char *name, Eterm extra)
{
@@ -422,9 +459,7 @@ erts_mtx_init_locked_x(erts_mtx_t *mtx, char *name, Eterm extra)
#ifdef ERTS_ENABLE_LOCK_COUNT
erts_lcnt_init_lock_x(&mtx->lcnt, name, ERTS_LCNT_LT_MUTEX, extra);
#endif
- res = ethr_mutex_lock(&mtx->mtx);
- if (res)
- erts_thr_fatal_error(res, "lock mutex");
+ ethr_mutex_lock(&mtx->mtx);
#ifdef ERTS_ENABLE_LOCK_CHECK
erts_lc_trylock(1, &mtx->lc);
#endif
@@ -463,9 +498,7 @@ erts_mtx_init_locked(erts_mtx_t *mtx, char *name)
#ifdef ERTS_ENABLE_LOCK_COUNT
erts_lcnt_init_lock(&mtx->lcnt, name, ERTS_LCNT_LT_MUTEX);
#endif
- res = ethr_mutex_lock(&mtx->mtx);
- if (res)
- erts_thr_fatal_error(res, "lock mutex");
+ ethr_mutex_lock(&mtx->mtx);
#ifdef ERTS_ENABLE_LOCK_CHECK
erts_lc_trylock(1, &mtx->lc);
#endif
@@ -492,26 +525,6 @@ erts_mtx_destroy(erts_mtx_t *mtx)
#endif
}
-ERTS_GLB_INLINE void
-erts_mtx_set_forksafe(erts_mtx_t *mtx)
-{
-#ifdef USE_THREADS
- int res = ethr_mutex_set_forksafe(&mtx->mtx);
- if (res != 0 && res != ENOTSUP)
- erts_thr_fatal_error(res, "set mutex forksafe");
-#endif
-}
-
-ERTS_GLB_INLINE void
-erts_mtx_unset_forksafe(erts_mtx_t *mtx)
-{
-#ifdef USE_THREADS
- int res = ethr_mutex_unset_forksafe(&mtx->mtx);
- if (res != 0 && res != ENOTSUP)
- erts_thr_fatal_error(res, "unset mutex forksafe");
-#endif
-}
-
ERTS_GLB_INLINE int
erts_mtx_trylock(erts_mtx_t *mtx)
{
@@ -531,11 +544,7 @@ erts_mtx_trylock(erts_mtx_t *mtx)
#endif
#ifdef ERTS_ENABLE_LOCK_COUNT
erts_lcnt_trylock(&mtx->lcnt, res);
-#endif
-
- if (res != 0 && res != EBUSY)
- erts_thr_fatal_error(res, "try lock mutex");
-
+#endif
return res;
#else
return 0;
@@ -551,19 +560,16 @@ erts_mtx_lock(erts_mtx_t *mtx)
#endif
{
#ifdef USE_THREADS
- int res;
#ifdef ERTS_ENABLE_LOCK_CHECK
erts_lc_lock(&mtx->lc);
#endif
#ifdef ERTS_ENABLE_LOCK_COUNT
erts_lcnt_lock(&mtx->lcnt);
#endif
- res = ethr_mutex_lock(&mtx->mtx);
+ ethr_mutex_lock(&mtx->mtx);
#ifdef ERTS_ENABLE_LOCK_COUNT
erts_lcnt_lock_post_x(&mtx->lcnt, file, line);
#endif
- if (res)
- erts_thr_fatal_error(res, "lock mutex");
#endif
}
@@ -571,16 +577,13 @@ ERTS_GLB_INLINE void
erts_mtx_unlock(erts_mtx_t *mtx)
{
#ifdef USE_THREADS
- int res;
#ifdef ERTS_ENABLE_LOCK_CHECK
erts_lc_unlock(&mtx->lc);
#endif
#ifdef ERTS_ENABLE_LOCK_COUNT
erts_lcnt_unlock(&mtx->lcnt);
#endif
- res = ethr_mutex_unlock(&mtx->mtx);
- if (res)
- erts_thr_fatal_error(res, "unlock mutex");
+ ethr_mutex_unlock(&mtx->mtx);
#endif
}
@@ -648,9 +651,7 @@ ERTS_GLB_INLINE void
erts_cnd_signal(erts_cnd_t *cnd)
{
#ifdef USE_THREADS
- int res = ethr_cond_signal(cnd);
- if (res)
- erts_thr_fatal_error(res, "signal on condition variable");
+ ethr_cond_signal(cnd);
#endif
}
@@ -659,19 +660,34 @@ ERTS_GLB_INLINE void
erts_cnd_broadcast(erts_cnd_t *cnd)
{
#ifdef USE_THREADS
- int res = ethr_cond_broadcast(cnd);
- if (res)
- erts_thr_fatal_error(res, "broadcast on condition variable");
+ ethr_cond_broadcast(cnd);
#endif
}
/* rwmutex */
ERTS_GLB_INLINE void
-erts_rwmtx_init_x(erts_rwmtx_t *rwmtx, char *name, Eterm extra)
+erts_rwmtx_set_reader_group(int no)
{
#ifdef USE_THREADS
- int res = ethr_rwmutex_init(&rwmtx->rwmtx);
+ int res;
+#ifdef ERTS_ENABLE_LOCK_CHECK
+ erts_lc_check_no_locked_of_type(ERTS_LC_FLG_LT_RWMUTEX);
+#endif
+ res = ethr_rwmutex_set_reader_group(no);
+ if (res != 0)
+ erts_thr_fatal_error(res, "set reader group");
+#endif
+}
+
+ERTS_GLB_INLINE void
+erts_rwmtx_init_opt_x(erts_rwmtx_t *rwmtx,
+ erts_rwmtx_opt_t *opt,
+ char *name,
+ Eterm extra)
+{
+#ifdef USE_THREADS
+ int res = ethr_rwmutex_init_opt(&rwmtx->rwmtx, opt);
if (res != 0)
erts_thr_fatal_error(res, "initialize rwmutex");
#ifdef ERTS_ENABLE_LOCK_CHECK
@@ -684,10 +700,20 @@ erts_rwmtx_init_x(erts_rwmtx_t *rwmtx, char *name, Eterm extra)
}
ERTS_GLB_INLINE void
-erts_rwmtx_init(erts_rwmtx_t *rwmtx, char *name)
+erts_rwmtx_init_x(erts_rwmtx_t *rwmtx,
+ char *name,
+ Eterm extra)
+{
+ erts_rwmtx_init_opt_x(rwmtx, NULL, name, extra);
+}
+
+ERTS_GLB_INLINE void
+erts_rwmtx_init_opt(erts_rwmtx_t *rwmtx,
+ erts_rwmtx_opt_t *opt,
+ char *name)
{
#ifdef USE_THREADS
- int res = ethr_rwmutex_init(&rwmtx->rwmtx);
+ int res = ethr_rwmutex_init_opt(&rwmtx->rwmtx, opt);
if (res != 0)
erts_thr_fatal_error(res, "initialize rwmutex");
#ifdef ERTS_ENABLE_LOCK_CHECK
@@ -700,6 +726,12 @@ erts_rwmtx_init(erts_rwmtx_t *rwmtx, char *name)
}
ERTS_GLB_INLINE void
+erts_rwmtx_init(erts_rwmtx_t *rwmtx, char *name)
+{
+ erts_rwmtx_init_opt(rwmtx, NULL, name);
+}
+
+ERTS_GLB_INLINE void
erts_rwmtx_destroy(erts_rwmtx_t *rwmtx)
{
#ifdef USE_THREADS
@@ -736,9 +768,6 @@ erts_rwmtx_tryrlock(erts_rwmtx_t *rwmtx)
#ifdef ERTS_ENABLE_LOCK_COUNT
erts_lcnt_trylock_opt(&rwmtx->lcnt, res, ERTS_LCNT_LO_READ);
#endif
-
- if (res != 0 && res != EBUSY)
- erts_thr_fatal_error(res, "try read lock rwmutex");
return res;
#else
@@ -754,19 +783,16 @@ erts_rwmtx_rlock(erts_rwmtx_t *rwmtx)
#endif
{
#ifdef USE_THREADS
- int res;
#ifdef ERTS_ENABLE_LOCK_CHECK
erts_lc_lock_flg(&rwmtx->lc, ERTS_LC_FLG_LO_READ);
#endif
#ifdef ERTS_ENABLE_LOCK_COUNT
erts_lcnt_lock_opt(&rwmtx->lcnt, ERTS_LCNT_LO_READ);
#endif
- res = ethr_rwmutex_rlock(&rwmtx->rwmtx);
+ ethr_rwmutex_rlock(&rwmtx->rwmtx);
#ifdef ERTS_ENABLE_LOCK_COUNT
erts_lcnt_lock_post_x(&rwmtx->lcnt, file, line);
#endif
- if (res != 0)
- erts_thr_fatal_error(res, "read lock rwmutex");
#endif
}
@@ -774,16 +800,13 @@ ERTS_GLB_INLINE void
erts_rwmtx_runlock(erts_rwmtx_t *rwmtx)
{
#ifdef USE_THREADS
- int res;
#ifdef ERTS_ENABLE_LOCK_CHECK
erts_lc_unlock_flg(&rwmtx->lc, ERTS_LC_FLG_LO_READ);
#endif
#ifdef ERTS_ENABLE_LOCK_COUNT
erts_lcnt_unlock_opt(&rwmtx->lcnt, ERTS_LCNT_LO_READ);
#endif
- res = ethr_rwmutex_runlock(&rwmtx->rwmtx);
- if (res != 0)
- erts_thr_fatal_error(res, "read unlock rwmutex");
+ ethr_rwmutex_runlock(&rwmtx->rwmtx);
#endif
}
@@ -808,9 +831,6 @@ erts_rwmtx_tryrwlock(erts_rwmtx_t *rwmtx)
#ifdef ERTS_ENABLE_LOCK_COUNT
erts_lcnt_trylock_opt(&rwmtx->lcnt, res, ERTS_LCNT_LO_READ_WRITE);
#endif
-
- if (res != 0 && res != EBUSY)
- erts_thr_fatal_error(res, "try write lock rwmutex");
return res;
#else
@@ -826,19 +846,16 @@ erts_rwmtx_rwlock(erts_rwmtx_t *rwmtx)
#endif
{
#ifdef USE_THREADS
- int res;
#ifdef ERTS_ENABLE_LOCK_CHECK
erts_lc_lock_flg(&rwmtx->lc, ERTS_LC_FLG_LO_READ_WRITE);
#endif
#ifdef ERTS_ENABLE_LOCK_COUNT
erts_lcnt_lock_opt(&rwmtx->lcnt, ERTS_LCNT_LO_READ_WRITE);
#endif
- res = ethr_rwmutex_rwlock(&rwmtx->rwmtx);
+ ethr_rwmutex_rwlock(&rwmtx->rwmtx);
#ifdef ERTS_ENABLE_LOCK_COUNT
erts_lcnt_lock_post_x(&rwmtx->lcnt, file, line);
#endif
- if (res != 0)
- erts_thr_fatal_error(res, "write lock rwmutex");
#endif
}
@@ -846,16 +863,13 @@ ERTS_GLB_INLINE void
erts_rwmtx_rwunlock(erts_rwmtx_t *rwmtx)
{
#ifdef USE_THREADS
- int res;
#ifdef ERTS_ENABLE_LOCK_CHECK
erts_lc_unlock_flg(&rwmtx->lc, ERTS_LC_FLG_LO_READ_WRITE);
#endif
#ifdef ERTS_ENABLE_LOCK_COUNT
erts_lcnt_unlock_opt(&rwmtx->lcnt, ERTS_LCNT_LO_READ_WRITE);
#endif
- res = ethr_rwmutex_rwunlock(&rwmtx->rwmtx);
- if (res != 0)
- erts_thr_fatal_error(res, "write unlock rwmutex");
+ ethr_rwmutex_rwunlock(&rwmtx->rwmtx);
#endif
}
@@ -917,9 +931,7 @@ ERTS_GLB_INLINE void
erts_atomic_init(erts_atomic_t *var, long i)
{
#ifdef USE_THREADS
- int res = ethr_atomic_init(var, i);
- if (res)
- erts_thr_fatal_error(res, "perform atomic init");
+ ethr_atomic_init(var, i);
#else
*var = i;
#endif
@@ -929,9 +941,7 @@ ERTS_GLB_INLINE void
erts_atomic_set(erts_atomic_t *var, long i)
{
#ifdef USE_THREADS
- int res = ethr_atomic_set(var, i);
- if (res)
- erts_thr_fatal_error(res, "perform atomic set");
+ ethr_atomic_set(var, i);
#else
*var = i;
#endif
@@ -941,11 +951,7 @@ ERTS_GLB_INLINE long
erts_atomic_read(erts_atomic_t *var)
{
#ifdef USE_THREADS
- long i;
- int res = ethr_atomic_read(var, &i);
- if (res)
- erts_thr_fatal_error(res, "perform atomic read");
- return i;
+ return ethr_atomic_read(var);
#else
return *var;
#endif
@@ -955,11 +961,7 @@ ERTS_GLB_INLINE long
erts_atomic_inctest(erts_atomic_t *incp)
{
#ifdef USE_THREADS
- long test;
- int res = ethr_atomic_inctest(incp, &test);
- if (res)
- erts_thr_fatal_error(res, "perform atomic increment and test");
- return test;
+ return ethr_atomic_inc_read(incp);
#else
return ++(*incp);
#endif
@@ -969,11 +971,7 @@ ERTS_GLB_INLINE long
erts_atomic_dectest(erts_atomic_t *decp)
{
#ifdef USE_THREADS
- long test;
- int res = ethr_atomic_dectest(decp, &test);
- if (res)
- erts_thr_fatal_error(res, "perform atomic decrement and test");
- return test;
+ return ethr_atomic_dec_read(decp);
#else
return --(*decp);
#endif
@@ -983,9 +981,7 @@ ERTS_GLB_INLINE void
erts_atomic_inc(erts_atomic_t *incp)
{
#ifdef USE_THREADS
- int res = ethr_atomic_inc(incp);
- if (res)
- erts_thr_fatal_error(res, "perform atomic increment");
+ ethr_atomic_inc(incp);
#else
++(*incp);
#endif
@@ -995,9 +991,7 @@ ERTS_GLB_INLINE void
erts_atomic_dec(erts_atomic_t *decp)
{
#ifdef USE_THREADS
- int res = ethr_atomic_dec(decp);
- if (res)
- erts_thr_fatal_error(res, "perform atomic decrement");
+ ethr_atomic_dec(decp);
#else
--(*decp);
#endif
@@ -1007,11 +1001,7 @@ ERTS_GLB_INLINE long
erts_atomic_addtest(erts_atomic_t *addp, long i)
{
#ifdef USE_THREADS
- long test;
- int res = ethr_atomic_addtest(addp, i, &test);
- if (res)
- erts_thr_fatal_error(res, "perform atomic addition and test");
- return test;
+ return ethr_atomic_add_read(addp, i);
#else
return *addp += i;
#endif
@@ -1021,9 +1011,7 @@ ERTS_GLB_INLINE void
erts_atomic_add(erts_atomic_t *addp, long i)
{
#ifdef USE_THREADS
- int res = ethr_atomic_add(addp, i);
- if (res)
- erts_thr_fatal_error(res, "perform atomic addition");
+ ethr_atomic_add(addp, i);
#else
*addp += i;
#endif
@@ -1034,9 +1022,7 @@ erts_atomic_xchg(erts_atomic_t *xchgp, long new)
{
long old;
#ifdef USE_THREADS
- int res = ethr_atomic_xchg(xchgp, new, &old);
- if (res)
- erts_thr_fatal_error(res, "perform atomic exchange");
+ return ethr_atomic_xchg(xchgp, new);
#else
old = *xchgp;
*xchgp = new;
@@ -1048,11 +1034,7 @@ ERTS_GLB_INLINE long
erts_atomic_cmpxchg(erts_atomic_t *xchgp, long new, long expected)
{
#ifdef USE_THREADS
- long old;
- int res = ethr_atomic_cmpxchg(xchgp, new, expected, &old);
- if (ERTS_UNLIKELY(res != 0))
- erts_thr_fatal_error(res, "perform atomic exchange");
- return old;
+ return ethr_atomic_cmpxchg(xchgp, new, expected);
#else
long old = *xchgp;
if (old == expected)
@@ -1064,31 +1046,95 @@ erts_atomic_cmpxchg(erts_atomic_t *xchgp, long new, long expected)
ERTS_GLB_INLINE long
erts_atomic_bor(erts_atomic_t *var, long mask)
{
- long old;
#ifdef USE_THREADS
- int res = ethr_atomic_or_old(var, mask, &old);
- if (res != 0)
- erts_thr_fatal_error(res, "perform atomic bitwise or");
+ return ethr_atomic_read_bor(var, mask);
#else
+ long old;
old = *var;
*var |= mask;
-#endif
return old;
+#endif
}
ERTS_GLB_INLINE long
erts_atomic_band(erts_atomic_t *var, long mask)
{
- long old;
#ifdef USE_THREADS
- int res = ethr_atomic_and_old(var, mask, &old);
- if (res != 0)
- erts_thr_fatal_error(res, "perform atomic bitwise and");
+ return ethr_atomic_read_band(var, mask);
#else
+ long old;
old = *var;
*var &= mask;
+ return old;
+#endif
+}
+
+ERTS_GLB_INLINE long
+erts_atomic_read_acqb(erts_atomic_t *var)
+{
+#ifdef USE_THREADS
+ return ethr_atomic_read_acqb(var);
+#else
+ return *var;
+#endif
+}
+
+ERTS_GLB_INLINE void
+erts_atomic_set_relb(erts_atomic_t *var, long i)
+{
+#ifdef USE_THREADS
+ ethr_atomic_set_relb(var, i);
+#else
+ *var = i;
+#endif
+}
+
+ERTS_GLB_INLINE void
+erts_atomic_dec_relb(erts_atomic_t *decp)
+{
+#ifdef USE_THREADS
+ ethr_atomic_dec_relb(decp);
+#else
+ --(*decp);
+#endif
+}
+
+ERTS_GLB_INLINE long
+erts_atomic_dectest_relb(erts_atomic_t *decp)
+{
+#ifdef USE_THREADS
+ return ethr_atomic_dec_read_relb(decp);
+#else
+ return --(*decp);
#endif
+}
+
+ERTS_GLB_INLINE long erts_atomic_cmpxchg_acqb(erts_atomic_t *xchgp,
+ long new,
+ long exp)
+{
+#ifdef USE_THREADS
+ return ethr_atomic_cmpxchg_acqb(xchgp, new, exp);
+#else
+ long old = *xchgp;
+ if (old == exp)
+ *xchgp = new;
+ return old;
+#endif
+}
+
+ERTS_GLB_INLINE long erts_atomic_cmpxchg_relb(erts_atomic_t *xchgp,
+ long new,
+ long exp)
+{
+#ifdef USE_THREADS
+ return ethr_atomic_cmpxchg_relb(xchgp, new, exp);
+#else
+ long old = *xchgp;
+ if (old == exp)
+ *xchgp = new;
return old;
+#endif
}
/* spinlock */
@@ -1112,6 +1158,26 @@ erts_spinlock_init_x(erts_spinlock_t *lock, char *name, Eterm extra)
}
ERTS_GLB_INLINE void
+erts_spinlock_init_x_opt(erts_spinlock_t *lock, char *name, Eterm extra,
+ Uint16 opt)
+{
+#ifdef USE_THREADS
+ int res = ethr_spinlock_init(&lock->slck);
+ if (res)
+ erts_thr_fatal_error(res, "init spinlock");
+#ifdef ERTS_ENABLE_LOCK_CHECK
+ erts_lc_init_lock_x(&lock->lc, name, ERTS_LC_FLG_LT_SPINLOCK, extra);
+#endif
+#ifdef ERTS_ENABLE_LOCK_COUNT
+ erts_lcnt_init_lock_x(&lock->lcnt, name, ERTS_LCNT_LT_SPINLOCK|opt, extra);
+#endif
+#else
+ (void)lock;
+#endif
+}
+
+
+ERTS_GLB_INLINE void
erts_spinlock_init(erts_spinlock_t *lock, char *name)
{
#ifdef USE_THREADS
@@ -1152,16 +1218,13 @@ ERTS_GLB_INLINE void
erts_spin_unlock(erts_spinlock_t *lock)
{
#ifdef USE_THREADS
- int res;
#ifdef ERTS_ENABLE_LOCK_CHECK
erts_lc_unlock(&lock->lc);
#endif
#ifdef ERTS_ENABLE_LOCK_COUNT
erts_lcnt_unlock(&lock->lcnt);
#endif
- res = ethr_spin_unlock(&lock->slck);
- if (res)
- erts_thr_fatal_error(res, "release spin lock");
+ ethr_spin_unlock(&lock->slck);
#else
(void)lock;
#endif
@@ -1175,19 +1238,16 @@ erts_spin_lock(erts_spinlock_t *lock)
#endif
{
#ifdef USE_THREADS
- int res;
#ifdef ERTS_ENABLE_LOCK_CHECK
erts_lc_lock(&lock->lc);
#endif
#ifdef ERTS_ENABLE_LOCK_COUNT
erts_lcnt_lock(&lock->lcnt);
#endif
- res = ethr_spin_lock(&lock->slck);
+ ethr_spin_lock(&lock->slck);
#ifdef ERTS_ENABLE_LOCK_COUNT
erts_lcnt_lock_post_x(&lock->lcnt, file, line);
#endif
- if (res)
- erts_thr_fatal_error(res, "take spin lock");
#else
(void)lock;
#endif
@@ -1268,16 +1328,13 @@ ERTS_GLB_INLINE void
erts_read_unlock(erts_rwlock_t *lock)
{
#ifdef USE_THREADS
- int res;
#ifdef ERTS_ENABLE_LOCK_CHECK
erts_lc_unlock_flg(&lock->lc, ERTS_LC_FLG_LO_READ);
#endif
#ifdef ERTS_ENABLE_LOCK_COUNT
erts_lcnt_unlock_opt(&lock->lcnt, ERTS_LCNT_LO_READ);
#endif
- res = ethr_read_unlock(&lock->rwlck);
- if (res)
- erts_thr_fatal_error(res, "release read lock");
+ ethr_read_unlock(&lock->rwlck);
#else
(void)lock;
#endif
@@ -1291,19 +1348,16 @@ erts_read_lock(erts_rwlock_t *lock)
#endif
{
#ifdef USE_THREADS
- int res;
#ifdef ERTS_ENABLE_LOCK_CHECK
erts_lc_lock_flg(&lock->lc, ERTS_LC_FLG_LO_READ);
#endif
#ifdef ERTS_ENABLE_LOCK_COUNT
erts_lcnt_lock_opt(&lock->lcnt, ERTS_LCNT_LO_READ);
#endif
- res = ethr_read_lock(&lock->rwlck);
+ ethr_read_lock(&lock->rwlck);
#ifdef ERTS_ENABLE_LOCK_COUNT
erts_lcnt_lock_post_x(&lock->lcnt, file, line);
#endif
- if (res)
- erts_thr_fatal_error(res, "take read lock");
#else
(void)lock;
#endif
@@ -1313,16 +1367,13 @@ ERTS_GLB_INLINE void
erts_write_unlock(erts_rwlock_t *lock)
{
#ifdef USE_THREADS
- int res;
#ifdef ERTS_ENABLE_LOCK_CHECK
erts_lc_unlock_flg(&lock->lc, ERTS_LC_FLG_LO_READ_WRITE);
#endif
#ifdef ERTS_ENABLE_LOCK_COUNT
erts_lcnt_unlock_opt(&lock->lcnt, ERTS_LCNT_LO_READ_WRITE);
#endif
- res = ethr_write_unlock(&lock->rwlck);
- if (res)
- erts_thr_fatal_error(res, "release write lock");
+ ethr_write_unlock(&lock->rwlck);
#else
(void)lock;
#endif
@@ -1336,19 +1387,16 @@ erts_write_lock(erts_rwlock_t *lock)
#endif
{
#ifdef USE_THREADS
- int res;
#ifdef ERTS_ENABLE_LOCK_CHECK
erts_lc_lock_flg(&lock->lc, ERTS_LC_FLG_LO_READ_WRITE);
#endif
#ifdef ERTS_ENABLE_LOCK_COUNT
erts_lcnt_lock_opt(&lock->lcnt, ERTS_LCNT_LO_READ_WRITE);
#endif
- res = ethr_write_lock(&lock->rwlck);
+ ethr_write_lock(&lock->rwlck);
#ifdef ERTS_ENABLE_LOCK_COUNT
erts_lcnt_lock_post_x(&lock->lcnt, file, line);
#endif
- if (res)
- erts_thr_fatal_error(res, "take write lock");
#else
(void)lock;
#endif
@@ -1432,66 +1480,95 @@ erts_tsd_get(erts_tsd_key_t key)
#endif
}
-ERTS_GLB_INLINE void
-erts_gate_init(erts_gate_t *gp)
+ERTS_GLB_INLINE erts_tse_t *erts_tse_fetch(void)
{
#ifdef USE_THREADS
- int res = ethr_gate_init((ethr_gate *) gp);
- if (res != 0)
- erts_thr_fatal_error(res, "initialize gate");
+ return (erts_tse_t *) ethr_get_ts_event();
+#else
+ return (erts_tse_t *) NULL;
#endif
}
-ERTS_GLB_INLINE void
-erts_gate_destroy(erts_gate_t *gp)
+ERTS_GLB_INLINE void erts_tse_return(erts_tse_t *ep)
{
#ifdef USE_THREADS
- int res = ethr_gate_destroy((ethr_gate *) gp);
- if (res != 0)
- erts_thr_fatal_error(res, "destroy gate");
+ ethr_leave_ts_event(ep);
#endif
}
-ERTS_GLB_INLINE void
-erts_gate_close(erts_gate_t *gp)
+ERTS_GLB_INLINE void erts_tse_set(erts_tse_t *ep)
{
#ifdef USE_THREADS
- int res = ethr_gate_close((ethr_gate *) gp);
- if (res != 0)
- erts_thr_fatal_error(res, "close gate");
+ ethr_event_set(&((ethr_ts_event *) ep)->event);
#endif
}
-ERTS_GLB_INLINE void
-erts_gate_let_through(erts_gate_t *gp, unsigned no)
+ERTS_GLB_INLINE void erts_tse_reset(erts_tse_t *ep)
{
#ifdef USE_THREADS
- int res = ethr_gate_let_through((ethr_gate *) gp, no);
- if (res != 0)
- erts_thr_fatal_error(res, "let through gate");
+ ethr_event_reset(&((ethr_ts_event *) ep)->event);
#endif
}
-ERTS_GLB_INLINE void
-erts_gate_wait(erts_gate_t *gp)
+ERTS_GLB_INLINE int erts_tse_wait(erts_tse_t *ep)
+{
+#ifdef USE_THREADS
+ return ethr_event_wait(&((ethr_ts_event *) ep)->event);
+#else
+ return ENOTSUP;
+#endif
+}
+
+ERTS_GLB_INLINE int erts_tse_swait(erts_tse_t *ep, int spincount)
+{
+#ifdef USE_THREADS
+ return ethr_event_swait(&((ethr_ts_event *) ep)->event, spincount);
+#else
+ return ENOTSUP;
+#endif
+}
+
+ERTS_GLB_INLINE int erts_tse_is_tmp(erts_tse_t *ep)
{
#ifdef USE_THREADS
- int res = ethr_gate_wait((ethr_gate *) gp);
+ return (ep->iflgs & ETHR_TS_EV_TMP) == ETHR_TS_EV_TMP;
+#else
+ return 0;
+#endif
+}
+
+ERTS_GLB_INLINE void erts_thr_set_main_status(int on, int no)
+{
+#ifdef USE_THREADS
+ int res = ethr_set_main_thr_status(on, no);
if (res != 0)
- erts_thr_fatal_error(res, "wait on gate");
+ erts_thr_fatal_error(res, "set thread main status");
#endif
}
-ERTS_GLB_INLINE void
-erts_gate_swait(erts_gate_t *gp, int spincount)
+ERTS_GLB_INLINE int erts_thr_get_main_status(void)
{
#ifdef USE_THREADS
- int res = ethr_gate_swait((ethr_gate *) gp, spincount);
+ int main_status;
+ int res = ethr_get_main_thr_status(&main_status);
if (res != 0)
- erts_thr_fatal_error(res, "swait on gate");
+ erts_thr_fatal_error(res, "get thread main status");
+ return main_status;
+#else
+ return 1;
#endif
}
+ERTS_GLB_INLINE void erts_thr_yield(void)
+{
+#ifdef USE_THREADS
+ int res = ETHR_YIELD();
+ if (res != 0)
+ erts_thr_fatal_error(res, "yield");
+#endif
+}
+
+
#ifdef ETHR_HAVE_ETHR_SIG_FUNCS
ERTS_GLB_INLINE void
diff --git a/erts/emulator/beam/export.c b/erts/emulator/beam/export.c
index 66b05c0e9d..5e81a2d624 100644
--- a/erts/emulator/beam/export.c
+++ b/erts/emulator/beam/export.c
@@ -43,8 +43,6 @@ static erts_smp_rwmtx_t export_table_lock; /* Locks the secondary export table.
#define export_read_unlock() erts_smp_rwmtx_runlock(&export_table_lock)
#define export_write_lock() erts_smp_rwmtx_rwlock(&export_table_lock)
#define export_write_unlock() erts_smp_rwmtx_rwunlock(&export_table_lock)
-#define export_init_lock() erts_smp_rwmtx_init(&export_table_lock, \
- "export_tab")
extern BeamInstr* em_call_error_handler;
extern BeamInstr* em_call_traced_function;
@@ -111,8 +109,12 @@ void
init_export_table(void)
{
HashFunctions f;
+ erts_smp_rwmtx_opt_t rwmtx_opt = ERTS_SMP_THR_OPTS_DEFAULT_INITER;
+ rwmtx_opt.type = ERTS_SMP_RWMTX_TYPE_FREQUENT_READ;
+ rwmtx_opt.lived = ERTS_SMP_RWMTX_LONG_LIVED;
+
+ erts_smp_rwmtx_init_opt(&export_table_lock, &rwmtx_opt, "export_tab");
- export_init_lock();
f.hash = (H_FUN) export_hash;
f.cmp = (HCMP_FUN) export_cmp;
f.alloc = (HALLOC_FUN) export_alloc;
diff --git a/erts/emulator/beam/external.c b/erts/emulator/beam/external.c
index 310a83200b..d7c8aa84e9 100644
--- a/erts/emulator/beam/external.c
+++ b/erts/emulator/beam/external.c
@@ -502,7 +502,7 @@ erts_make_dist_ext_copy(ErtsDistExternal *edep, Uint xsize)
ASSERT(edep->ext_endp >= edep->extp);
ext_sz = edep->ext_endp - edep->extp;
- align_sz = ERTS_WORD_ALIGN_PAD_SZ(dist_ext_sz + ext_sz);
+ align_sz = ERTS_EXTRA_DATA_ALIGN_SZ(dist_ext_sz + ext_sz);
new_edep = erts_alloc(ERTS_ALC_T_EXT_TERM_DATA,
dist_ext_sz + ext_sz + align_sz + xsize);
diff --git a/erts/emulator/beam/external.h b/erts/emulator/beam/external.h
index db86b4d796..cee48bbeb0 100644
--- a/erts/emulator/beam/external.h
+++ b/erts/emulator/beam/external.h
@@ -211,7 +211,7 @@ ERTS_GLB_INLINE void *
erts_dist_ext_trailer(ErtsDistExternal *edep)
{
void *res = (void *) (edep->ext_endp
- + ERTS_WORD_ALIGN_PAD_SZ(edep->ext_endp));
+ + ERTS_EXTRA_DATA_ALIGN_SZ(edep->ext_endp));
ASSERT((((UWord) res) % sizeof(Uint)) == 0);
return res;
}
diff --git a/erts/emulator/beam/register.c b/erts/emulator/beam/register.c
index 900ebcbbf7..c9bb7bbe91 100644
--- a/erts/emulator/beam/register.c
+++ b/erts/emulator/beam/register.c
@@ -39,8 +39,6 @@ static Hash process_reg;
static erts_smp_rwmtx_t regtab_rwmtx;
-#define reg_lock_init() erts_smp_rwmtx_init(&regtab_rwmtx, \
- "reg_tab")
#define reg_try_read_lock() erts_smp_rwmtx_tryrlock(&regtab_rwmtx)
#define reg_try_write_lock() erts_smp_rwmtx_tryrwlock(&regtab_rwmtx)
#define reg_read_lock() erts_smp_rwmtx_rlock(&regtab_rwmtx)
@@ -147,8 +145,11 @@ static void reg_free(RegProc *obj)
void init_register_table(void)
{
HashFunctions f;
+ erts_smp_rwmtx_opt_t rwmtx_opt = ERTS_SMP_THR_OPTS_DEFAULT_INITER;
+ rwmtx_opt.type = ERTS_SMP_RWMTX_TYPE_FREQUENT_READ;
+ rwmtx_opt.lived = ERTS_SMP_RWMTX_LONG_LIVED;
- reg_lock_init();
+ erts_smp_rwmtx_init_opt(&regtab_rwmtx, &rwmtx_opt, "reg_tab");
f.hash = (H_FUN) reg_hash;
f.cmp = (HCMP_FUN) reg_cmp;
diff --git a/erts/emulator/beam/sys.h b/erts/emulator/beam/sys.h
index ca87d3d70f..eac38674e7 100644
--- a/erts/emulator/beam/sys.h
+++ b/erts/emulator/beam/sys.h
@@ -359,15 +359,8 @@ typedef unsigned char byte;
#error 64-bit architecture, but no appropriate type to use for Uint64 and Sint64 found
#endif
-#if defined(ARCH_64)
-# define ERTS_WORD_ALIGN_PAD_SZ(X) \
+# define ERTS_EXTRA_DATA_ALIGN_SZ(X) \
(((size_t) 8) - (((size_t) (X)) & ((size_t) 7)))
-#elif defined(ARCH_32)
-# define ERTS_WORD_ALIGN_PAD_SZ(X) \
- (((size_t) 4) - (((size_t) (X)) & ((size_t) 3)))
-#else
-#error "Not supported..."
-#endif
#include "erl_lock_check.h"
#include "erl_smp.h"
diff --git a/erts/emulator/sys/unix/sys.c b/erts/emulator/sys/unix/sys.c
index 737ffd9f94..400ef6c0ce 100644
--- a/erts/emulator/sys/unix/sys.c
+++ b/erts/emulator/sys/unix/sys.c
@@ -384,18 +384,6 @@ MALLOC_USE_HASH(1);
#endif
#ifdef USE_THREADS
-static void *ethr_internal_alloc(size_t size)
-{
- return erts_alloc_fnf(ERTS_ALC_T_ETHR_INTERNAL, (Uint) size);
-}
-static void *ethr_internal_realloc(void *ptr, size_t size)
-{
- return erts_realloc_fnf(ERTS_ALC_T_ETHR_INTERNAL, ptr, (Uint) size);
-}
-static void ethr_internal_free(void *ptr)
-{
- erts_free(ERTS_ALC_T_ETHR_INTERNAL, ptr);
-}
#ifdef ERTS_THR_HAVE_SIG_FUNCS
/*
@@ -488,9 +476,6 @@ erts_sys_pre_init(void)
#ifdef USE_THREADS
{
erts_thr_init_data_t eid = ERTS_THR_INIT_DATA_DEF_INITER;
- eid.alloc = ethr_internal_alloc;
- eid.realloc = ethr_internal_realloc;
- eid.free = ethr_internal_free;
eid.thread_create_child_func = thr_create_prepare_child;
/* Before creation in parent */
@@ -538,13 +523,14 @@ erts_sys_pre_init(void)
#endif
#endif /* USE_THREADS */
erts_smp_atomic_init(&sys_misc_mem_sz, 0);
- erts_smp_rwmtx_init(&environ_rwmtx, "environ");
}
void
erl_sys_init(void)
{
+ erts_smp_rwmtx_init(&environ_rwmtx, "environ");
#if !DISABLE_VFORK
+ {
int res;
char bindir[MAXPATHLEN];
size_t bindirsz = sizeof(bindir);
@@ -574,6 +560,7 @@ erl_sys_init(void)
bindir,
DIR_SEPARATOR_CHAR,
CHILD_SETUP_PROG_NAME);
+ }
#endif
#ifdef USE_SETLINEBUF
diff --git a/erts/emulator/sys/win32/sys.c b/erts/emulator/sys/win32/sys.c
index bd02cf85eb..54f71b202d 100644
--- a/erts/emulator/sys/win32/sys.c
+++ b/erts/emulator/sys/win32/sys.c
@@ -2974,19 +2974,6 @@ check_supported_os_version(void)
}
#ifdef USE_THREADS
-static void *ethr_internal_alloc(size_t size)
-{
- return erts_alloc_fnf(ERTS_ALC_T_ETHR_INTERNAL, (Uint) size);
-}
-static void *ethr_internal_realloc(void *ptr, size_t size)
-{
- return erts_realloc_fnf(ERTS_ALC_T_ETHR_INTERNAL, ptr, (Uint) size);
-}
-static void ethr_internal_free(void *ptr)
-{
- erts_free(ERTS_ALC_T_ETHR_INTERNAL, ptr);
-}
-
#ifdef ERTS_ENABLE_LOCK_COUNT
static void
thr_create_prepare_child(void *vtcdp)
@@ -3005,14 +2992,9 @@ erts_sys_pre_init(void)
#ifdef USE_THREADS
{
erts_thr_init_data_t eid = ERTS_THR_INIT_DATA_DEF_INITER;
- eid.alloc = ethr_internal_alloc;
- eid.realloc = ethr_internal_realloc;
- eid.free = ethr_internal_free;
-
#ifdef ERTS_ENABLE_LOCK_COUNT
eid.thread_create_child_func = thr_create_prepare_child;
#endif
-
erts_thr_init(&eid);
#ifdef ERTS_ENABLE_LOCK_COUNT
erts_lcnt_init();
diff --git a/erts/emulator/test/scheduler_SUITE.erl b/erts/emulator/test/scheduler_SUITE.erl
index ab727b7cb1..4f4458802c 100644
--- a/erts/emulator/test/scheduler_SUITE.erl
+++ b/erts/emulator/test/scheduler_SUITE.erl
@@ -33,7 +33,7 @@
-include("test_server.hrl").
%-compile(export_all).
--export([all/1, init_per_testcase/2, fin_per_testcase/2]).
+-export([all/1, init_per_testcase/2, fin_per_testcase/2, end_per_suite/1]).
-export([equal/1,
few_low/1,
@@ -49,7 +49,8 @@
cpu_topology/1,
sct_cmd/1,
sbt_cmd/1,
- scheduler_suspend/1]).
+ scheduler_suspend/1,
+ reader_groups/1]).
-define(DEFAULT_TIMEOUT, ?t:minutes(10)).
@@ -67,7 +68,8 @@ all(suite) ->
equal_with_high_max,
bound_process,
scheduler_bind,
- scheduler_suspend].
+ scheduler_suspend,
+ reader_groups].
init_per_testcase(Case, Config) when is_list(Config) ->
Dog = ?t:timetrap(?DEFAULT_TIMEOUT),
@@ -81,6 +83,10 @@ fin_per_testcase(_Case, Config) when is_list(Config) ->
?t:timetrap_cancel(Dog),
ok.
+end_per_suite(Config) ->
+ catch erts_debug:set_internal_state(available_internal_state, false),
+ Config.
+
-define(ERTS_RUNQ_CHECK_BALANCE_REDS_PER_SCHED, (2000*2000)).
-define(DEFAULT_TEST_REDS_PER_SCHED, 200000000).
@@ -965,12 +971,361 @@ sst3_loop(S, N) ->
erlang:system_flag(schedulers_online, 1),
erlang:system_flag(schedulers_online, S),
sst3_loop(S, N-1).
+
+reader_groups(Config) when is_list(Config) ->
+ %% White box testing. These results are correct, but other results
+ %% could be too...
+
+ %% The actual tilepro64 topology
+ CPUT0 = [{processor,[{node,[{core,{logical,0}},
+ {core,{logical,1}},
+ {core,{logical,2}},
+ {core,{logical,8}},
+ {core,{logical,9}},
+ {core,{logical,10}},
+ {core,{logical,11}},
+ {core,{logical,16}},
+ {core,{logical,17}},
+ {core,{logical,18}},
+ {core,{logical,19}},
+ {core,{logical,24}},
+ {core,{logical,25}},
+ {core,{logical,27}},
+ {core,{logical,29}}]},
+ {node,[{core,{logical,3}},
+ {core,{logical,4}},
+ {core,{logical,5}},
+ {core,{logical,6}},
+ {core,{logical,7}},
+ {core,{logical,12}},
+ {core,{logical,13}},
+ {core,{logical,14}},
+ {core,{logical,15}},
+ {core,{logical,20}},
+ {core,{logical,21}},
+ {core,{logical,22}},
+ {core,{logical,23}},
+ {core,{logical,28}},
+ {core,{logical,30}}]},
+ {node,[{core,{logical,31}},
+ {core,{logical,36}},
+ {core,{logical,37}},
+ {core,{logical,38}},
+ {core,{logical,44}},
+ {core,{logical,45}},
+ {core,{logical,46}},
+ {core,{logical,47}},
+ {core,{logical,51}},
+ {core,{logical,52}},
+ {core,{logical,53}},
+ {core,{logical,54}},
+ {core,{logical,55}},
+ {core,{logical,60}},
+ {core,{logical,61}}]},
+ {node,[{core,{logical,26}},
+ {core,{logical,32}},
+ {core,{logical,33}},
+ {core,{logical,34}},
+ {core,{logical,35}},
+ {core,{logical,39}},
+ {core,{logical,40}},
+ {core,{logical,41}},
+ {core,{logical,42}},
+ {core,{logical,43}},
+ {core,{logical,48}},
+ {core,{logical,49}},
+ {core,{logical,50}},
+ {core,{logical,58}}]}]}],
+
+ ?line [{0,1},{1,1},{2,1},{3,3},{4,3},{5,3},{6,3},{7,3},{8,1},{9,1},{10,1},
+ {11,1},{12,3},{13,3},{14,4},{15,4},{16,2},{17,2},{18,2},{19,2},
+ {20,4},{21,4},{22,4},{23,4},{24,2},{25,2},{26,7},{27,2},{28,4},
+ {29,2},{30,4},{31,5},{32,7},{33,7},{34,7},{35,7},{36,5},{37,5},
+ {38,5},{39,7},{40,7},{41,8},{42,8},{43,8},{44,5},{45,5},{46,5},
+ {47,6},{48,8},{49,8},{50,8},{51,6},{52,6},{53,6},{54,6},{55,6},
+ {58,8},{60,6},{61,6}]
+ = reader_groups_map(CPUT0, 8),
+
+ CPUT1 = [n([p([c([t(l(0)),t(l(1)),t(l(2)),t(l(3))]),
+ c([t(l(4)),t(l(5)),t(l(6)),t(l(7))]),
+ c([t(l(8)),t(l(9)),t(l(10)),t(l(11))]),
+ c([t(l(12)),t(l(13)),t(l(14)),t(l(15))])]),
+ p([c([t(l(16)),t(l(17)),t(l(18)),t(l(19))]),
+ c([t(l(20)),t(l(21)),t(l(22)),t(l(23))]),
+ c([t(l(24)),t(l(25)),t(l(26)),t(l(27))]),
+ c([t(l(28)),t(l(29)),t(l(30)),t(l(31))])])]),
+ n([p([c([t(l(32)),t(l(33)),t(l(34)),t(l(35))]),
+ c([t(l(36)),t(l(37)),t(l(38)),t(l(39))]),
+ c([t(l(40)),t(l(41)),t(l(42)),t(l(43))]),
+ c([t(l(44)),t(l(45)),t(l(46)),t(l(47))])]),
+ p([c([t(l(48)),t(l(49)),t(l(50)),t(l(51))]),
+ c([t(l(52)),t(l(53)),t(l(54)),t(l(55))]),
+ c([t(l(56)),t(l(57)),t(l(58)),t(l(59))]),
+ c([t(l(60)),t(l(61)),t(l(62)),t(l(63))])])]),
+ n([p([c([t(l(64)),t(l(65)),t(l(66)),t(l(67))]),
+ c([t(l(68)),t(l(69)),t(l(70)),t(l(71))]),
+ c([t(l(72)),t(l(73)),t(l(74)),t(l(75))]),
+ c([t(l(76)),t(l(77)),t(l(78)),t(l(79))])]),
+ p([c([t(l(80)),t(l(81)),t(l(82)),t(l(83))]),
+ c([t(l(84)),t(l(85)),t(l(86)),t(l(87))]),
+ c([t(l(88)),t(l(89)),t(l(90)),t(l(91))]),
+ c([t(l(92)),t(l(93)),t(l(94)),t(l(95))])])]),
+ n([p([c([t(l(96)),t(l(97)),t(l(98)),t(l(99))]),
+ c([t(l(100)),t(l(101)),t(l(102)),t(l(103))]),
+ c([t(l(104)),t(l(105)),t(l(106)),t(l(107))]),
+ c([t(l(108)),t(l(109)),t(l(110)),t(l(111))])]),
+ p([c([t(l(112)),t(l(113)),t(l(114)),t(l(115))]),
+ c([t(l(116)),t(l(117)),t(l(118)),t(l(119))]),
+ c([t(l(120)),t(l(121)),t(l(122)),t(l(123))]),
+ c([t(l(124)),t(l(125)),t(l(126)),t(l(127))])])])],
+
+ ?line [{0,1},{1,1},{2,1},{3,1},{4,2},{5,2},{6,2},{7,2},{8,3},{9,3},
+ {10,3},{11,3},{12,4},{13,4},{14,4},{15,4},{16,5},{17,5},{18,5},
+ {19,5},{20,6},{21,6},{22,6},{23,6},{24,7},{25,7},{26,7},{27,7},
+ {28,8},{29,8},{30,8},{31,8},{32,9},{33,9},{34,9},{35,9},{36,10},
+ {37,10},{38,10},{39,10},{40,11},{41,11},{42,11},{43,11},{44,12},
+ {45,12},{46,12},{47,12},{48,13},{49,13},{50,13},{51,13},{52,14},
+ {53,14},{54,14},{55,14},{56,15},{57,15},{58,15},{59,15},{60,16},
+ {61,16},{62,16},{63,16},{64,17},{65,17},{66,17},{67,17},{68,18},
+ {69,18},{70,18},{71,18},{72,19},{73,19},{74,19},{75,19},{76,20},
+ {77,20},{78,20},{79,20},{80,21},{81,21},{82,21},{83,21},{84,22},
+ {85,22},{86,22},{87,22},{88,23},{89,23},{90,23},{91,23},{92,24},
+ {93,24},{94,24},{95,24},{96,25},{97,25},{98,25},{99,25},{100,26},
+ {101,26},{102,26},{103,26},{104,27},{105,27},{106,27},{107,27},
+ {108,28},{109,28},{110,28},{111,28},{112,29},{113,29},{114,29},
+ {115,29},{116,30},{117,30},{118,30},{119,30},{120,31},{121,31},
+ {122,31},{123,31},{124,32},{125,32},{126,32},{127,32}]
+ = reader_groups_map(CPUT1, 128),
+
+ ?line [{0,1},{1,1},{2,1},{3,1},{4,1},{5,1},{6,1},{7,1},{8,1},{9,1},{10,1},
+ {11,1},{12,1},{13,1},{14,1},{15,1},{16,1},{17,1},{18,1},{19,1},
+ {20,1},{21,1},{22,1},{23,1},{24,1},{25,1},{26,1},{27,1},{28,1},
+ {29,1},{30,1},{31,1},{32,1},{33,1},{34,1},{35,1},{36,1},{37,1},
+ {38,1},{39,1},{40,1},{41,1},{42,1},{43,1},{44,1},{45,1},{46,1},
+ {47,1},{48,1},{49,1},{50,1},{51,1},{52,1},{53,1},{54,1},{55,1},
+ {56,1},{57,1},{58,1},{59,1},{60,1},{61,1},{62,1},{63,1},{64,2},
+ {65,2},{66,2},{67,2},{68,2},{69,2},{70,2},{71,2},{72,2},{73,2},
+ {74,2},{75,2},{76,2},{77,2},{78,2},{79,2},{80,2},{81,2},{82,2},
+ {83,2},{84,2},{85,2},{86,2},{87,2},{88,2},{89,2},{90,2},{91,2},
+ {92,2},{93,2},{94,2},{95,2},{96,2},{97,2},{98,2},{99,2},{100,2},
+ {101,2},{102,2},{103,2},{104,2},{105,2},{106,2},{107,2},{108,2},
+ {109,2},{110,2},{111,2},{112,2},{113,2},{114,2},{115,2},{116,2},
+ {117,2},{118,2},{119,2},{120,2},{121,2},{122,2},{123,2},{124,2},
+ {125,2},{126,2},{127,2}]
+ = reader_groups_map(CPUT1, 2),
+
+ ?line [{0,1},{1,1},{2,1},{3,1},{4,2},{5,2},{6,2},{7,2},{8,3},{9,3},{10,3},
+ {11,3},{12,3},{13,3},{14,3},{15,3},{16,4},{17,4},{18,4},{19,4},
+ {20,4},{21,4},{22,4},{23,4},{24,5},{25,5},{26,5},{27,5},{28,5},
+ {29,5},{30,5},{31,5},{32,6},{33,6},{34,6},{35,6},{36,6},{37,6},
+ {38,6},{39,6},{40,7},{41,7},{42,7},{43,7},{44,7},{45,7},{46,7},
+ {47,7},{48,8},{49,8},{50,8},{51,8},{52,8},{53,8},{54,8},{55,8},
+ {56,9},{57,9},{58,9},{59,9},{60,9},{61,9},{62,9},{63,9},{64,10},
+ {65,10},{66,10},{67,10},{68,10},{69,10},{70,10},{71,10},{72,11},
+ {73,11},{74,11},{75,11},{76,11},{77,11},{78,11},{79,11},{80,12},
+ {81,12},{82,12},{83,12},{84,12},{85,12},{86,12},{87,12},{88,13},
+ {89,13},{90,13},{91,13},{92,13},{93,13},{94,13},{95,13},{96,14},
+ {97,14},{98,14},{99,14},{100,14},{101,14},{102,14},{103,14},
+ {104,15},{105,15},{106,15},{107,15},{108,15},{109,15},{110,15},
+ {111,15},{112,16},{113,16},{114,16},{115,16},{116,16},{117,16},
+ {118,16},{119,16},{120,17},{121,17},{122,17},{123,17},{124,17},
+ {125,17},{126,17},{127,17}]
+ = reader_groups_map(CPUT1, 17),
+
+ ?line [{0,1},{1,1},{2,1},{3,1},{4,1},{5,1},{6,1},{7,1},{8,1},{9,1},{10,1},
+ {11,1},{12,1},{13,1},{14,1},{15,1},{16,2},{17,2},{18,2},{19,2},
+ {20,2},{21,2},{22,2},{23,2},{24,2},{25,2},{26,2},{27,2},{28,2},
+ {29,2},{30,2},{31,2},{32,3},{33,3},{34,3},{35,3},{36,3},{37,3},
+ {38,3},{39,3},{40,3},{41,3},{42,3},{43,3},{44,3},{45,3},{46,3},
+ {47,3},{48,4},{49,4},{50,4},{51,4},{52,4},{53,4},{54,4},{55,4},
+ {56,4},{57,4},{58,4},{59,4},{60,4},{61,4},{62,4},{63,4},{64,5},
+ {65,5},{66,5},{67,5},{68,5},{69,5},{70,5},{71,5},{72,5},{73,5},
+ {74,5},{75,5},{76,5},{77,5},{78,5},{79,5},{80,6},{81,6},{82,6},
+ {83,6},{84,6},{85,6},{86,6},{87,6},{88,6},{89,6},{90,6},{91,6},
+ {92,6},{93,6},{94,6},{95,6},{96,7},{97,7},{98,7},{99,7},{100,7},
+ {101,7},{102,7},{103,7},{104,7},{105,7},{106,7},{107,7},{108,7},
+ {109,7},{110,7},{111,7},{112,7},{113,7},{114,7},{115,7},{116,7},
+ {117,7},{118,7},{119,7},{120,7},{121,7},{122,7},{123,7},{124,7},
+ {125,7},{126,7},{127,7}]
+ = reader_groups_map(CPUT1, 7),
+
+ ?line CPUT2 = [p([c(l(0)),c(l(1)),c(l(2)),c(l(3)),c(l(4))]),
+ p([t(l(5)),t(l(6)),t(l(7)),t(l(8)),t(l(9))]),
+ p([t(l(10))]),
+ p([c(l(11)),c(l(12)),c(l(13))]),
+ p([c(l(14)),c(l(15))])],
+
+ ?line [{0,1},{1,1},{2,1},{3,1},{4,1},
+ {5,2},{6,2},{7,2},{8,2},{9,2},
+ {10,3},
+ {11,4},{12,4},{13,4},
+ {14,5},{15,5}] = reader_groups_map(CPUT2, 5),
+
+
+ ?line [{0,1},{1,1},{2,2},{3,2},{4,2},
+ {5,3},{6,3},{7,3},{8,3},{9,3},
+ {10,4},
+ {11,5},{12,5},{13,5},
+ {14,6},{15,6}] = reader_groups_map(CPUT2, 6),
+
+ ?line [{0,1},{1,1},{2,2},{3,2},{4,2},
+ {5,3},{6,3},{7,3},{8,3},{9,3},
+ {10,4},
+ {11,5},{12,6},{13,6},
+ {14,7},{15,7}] = reader_groups_map(CPUT2, 7),
+
+ ?line [{0,1},{1,1},{2,2},{3,2},{4,2},
+ {5,3},{6,3},{7,3},{8,3},{9,3},
+ {10,4},
+ {11,5},{12,6},{13,6},
+ {14,7},{15,8}] = reader_groups_map(CPUT2, 8),
+
+ ?line [{0,1},{1,2},{2,2},{3,3},{4,3},
+ {5,4},{6,4},{7,4},{8,4},{9,4},
+ {10,5},
+ {11,6},{12,7},{13,7},
+ {14,8},{15,9}] = reader_groups_map(CPUT2, 9),
+
+ ?line [{0,1},{1,2},{2,2},{3,3},{4,3},
+ {5,4},{6,4},{7,4},{8,4},{9,4},
+ {10,5},
+ {11,6},{12,7},{13,8},
+ {14,9},{15,10}] = reader_groups_map(CPUT2, 10),
+
+ ?line [{0,1},{1,2},{2,3},{3,4},{4,4},
+ {5,5},{6,5},{7,5},{8,5},{9,5},
+ {10,6},
+ {11,7},{12,8},{13,9},
+ {14,10},{15,11}] = reader_groups_map(CPUT2, 11),
+
+ ?line [{0,1},{1,2},{2,3},{3,4},{4,5},
+ {5,6},{6,6},{7,6},{8,6},{9,6},
+ {10,7},
+ {11,8},{12,9},{13,10},
+ {14,11},{15,12}] = reader_groups_map(CPUT2, 100),
+
+ CPUT3 = [p([t(l(5)),t(l(6)),t(l(7)),t(l(8)),t(l(9))]),
+ p([t(l(10))]),
+ p([c(l(11)),c(l(12)),c(l(13))]),
+ p([c(l(14)),c(l(15))]),
+ p([c(l(0)),c(l(1)),c(l(2)),c(l(3)),c(l(4))])],
+
+ ?line [{0,5},{1,5},{2,6},{3,6},{4,6},
+ {5,1},{6,1},{7,1},{8,1},{9,1},
+ {10,2},{11,3},{12,3},{13,3},
+ {14,4},{15,4}] = reader_groups_map(CPUT3, 6),
+
+ CPUT4 = [p([t(l(0)),t(l(1)),t(l(2)),t(l(3)),t(l(4))]),
+ p([t(l(5))]),
+ p([c(l(6)),c(l(7)),c(l(8))]),
+ p([c(l(9)),c(l(10))]),
+ p([c(l(11)),c(l(12)),c(l(13)),c(l(14)),c(l(15))])],
+
+ ?line [{0,1},{1,1},{2,1},{3,1},{4,1},
+ {5,2},
+ {6,3},{7,3},{8,3},
+ {9,4},{10,4},
+ {11,5},{12,5},{13,6},{14,6},{15,6}] = reader_groups_map(CPUT4, 6),
+
+ ?line [{0,1},{1,1},{2,1},{3,1},{4,1},
+ {5,2},
+ {6,3},{7,4},{8,4},
+ {9,5},{10,5},
+ {11,6},{12,6},{13,7},{14,7},{15,7}] = reader_groups_map(CPUT4, 7),
+
+ ?line [{0,1},{65535,2}] = reader_groups_map([c(l(0)),c(l(65535))], 10),
+
+ ?line ok.
+reader_groups_map(CPUT, Groups) ->
+ Old = erlang:system_info({cpu_topology, defined}),
+ erlang:system_flag(cpu_topology, CPUT),
+ enable_internal_state(),
+ Res = erts_debug:get_internal_state({reader_groups_map, Groups}),
+ erlang:system_flag(cpu_topology, Old),
+ lists:sort(Res).
+
%%
%% Utils
%%
+tilera_cpu_topology() ->
+ [{processor,[{node,[{core,{logical,0}},
+ {core,{logical,1}},
+ {core,{logical,2}},
+ {core,{logical,8}},
+ {core,{logical,9}},
+ {core,{logical,10}},
+ {core,{logical,11}},
+ {core,{logical,16}},
+ {core,{logical,17}},
+ {core,{logical,18}},
+ {core,{logical,19}},
+ {core,{logical,24}},
+ {core,{logical,25}},
+ {core,{logical,27}},
+ {core,{logical,29}}]},
+ {node,[{core,{logical,3}},
+ {core,{logical,4}},
+ {core,{logical,5}},
+ {core,{logical,6}},
+ {core,{logical,7}},
+ {core,{logical,12}},
+ {core,{logical,13}},
+ {core,{logical,14}},
+ {core,{logical,15}},
+ {core,{logical,20}},
+ {core,{logical,21}},
+ {core,{logical,22}},
+ {core,{logical,23}},
+ {core,{logical,28}},
+ {core,{logical,30}}]},
+ {node,[{core,{logical,31}},
+ {core,{logical,36}},
+ {core,{logical,37}},
+ {core,{logical,38}},
+ {core,{logical,44}},
+ {core,{logical,45}},
+ {core,{logical,46}},
+ {core,{logical,47}},
+ {core,{logical,51}},
+ {core,{logical,52}},
+ {core,{logical,53}},
+ {core,{logical,54}},
+ {core,{logical,55}},
+ {core,{logical,60}},
+ {core,{logical,61}}]},
+ {node,[{core,{logical,26}},
+ {core,{logical,32}},
+ {core,{logical,33}},
+ {core,{logical,34}},
+ {core,{logical,35}},
+ {core,{logical,39}},
+ {core,{logical,40}},
+ {core,{logical,41}},
+ {core,{logical,42}},
+ {core,{logical,43}},
+ {core,{logical,48}},
+ {core,{logical,49}},
+ {core,{logical,50}},
+ {core,{logical,58}}]}]}].
+
+l(Id) ->
+ {logical, Id}.
+
+t(X) ->
+ {thread, X}.
+
+c(X) ->
+ {core, X}.
+
+p(X) ->
+ {processor, X}.
+
+n(X) ->
+ {node, X}.
+
mcall(Node, Funs) ->
Parent = self(),
Refs = lists:map(fun (Fun) ->
diff --git a/erts/etc/common/erlexec.c b/erts/etc/common/erlexec.c
index f79f5cc978..76ce0a7e3c 100644
--- a/erts/etc/common/erlexec.c
+++ b/erts/etc/common/erlexec.c
@@ -131,6 +131,12 @@ static char *plush_val_switches[] = {
NULL
};
+/* +r arguments with values */
+static char *plusr_val_switches[] = {
+ "g",
+ NULL
+};
+
/*
* Define sleep(seconds) in terms of Sleep() on Windows.
@@ -872,6 +878,21 @@ int main(int argc, char **argv)
i++;
}
break;
+ case 'r':
+ if (!is_one_of_strings(&argv[i][2],
+ plusr_val_switches))
+ goto the_default;
+ else {
+ if (i+1 >= argc
+ || argv[i+1][0] == '-'
+ || argv[i+1][0] == '+')
+ usage(argv[i]);
+ argv[i][0] = '-';
+ add_Eargs(argv[i]);
+ add_Eargs(argv[i+1]);
+ i++;
+ }
+ break;
case 's':
if (!is_one_of_strings(&argv[i][2],
pluss_val_switches))
@@ -1069,11 +1090,12 @@ usage_aux(void)
"[-hybrid] "
#endif
"[-make] [-man [manopts] MANPAGE] [-x] [-emu_args] "
- "[-args_file FILENAME] "
- "[+A THREADS] [+a SIZE] [+B[c|d|i]] [+c] [+h HEAP_SIZE_OPTION] [+K BOOLEAN] "
+ "[-args_file FILENAME] [+A THREADS] [+a SIZE] [+B[c|d|i]] [+c] "
+ "[+h HEAP_SIZE_OPTION] [+K BOOLEAN] "
"[+l] [+M<SUBSWITCH> <ARGUMENT>] [+P MAX_PROCS] [+R COMPAT_REL] "
- "[+r] [+s SCHEDULER_OPTION] [+S NO_SCHEDULERS:NO_SCHEDULERS_ONLINE] [+T LEVEL] [+V] [+v] [+W<i|w>] "
- "[args ...]\n");
+ "[+r] [+rg READER_GROUPS_LIMIT] [+s SCHEDULER_OPTION] "
+ "[+S NO_SCHEDULERS:NO_SCHEDULERS_ONLINE] [+T LEVEL] [+V] [+v] "
+ "[+W<i|w>] [args ...]\n");
exit(1);
}
diff --git a/erts/include/internal/erl_misc_utils.h b/erts/include/internal/erl_misc_utils.h
index 82e9ba3798..6b875ff824 100644
--- a/erts/include/internal/erl_misc_utils.h
+++ b/erts/include/internal/erl_misc_utils.h
@@ -50,4 +50,8 @@ int erts_unbind_from_cpu_str(char *str);
int erts_milli_sleep(long);
+#ifdef __WIN32__
+int erts_get_last_win_errno(void);
+#endif
+
#endif /* #ifndef ERL_MISC_UTILS_H_ */
diff --git a/erts/include/internal/ethr_internal.h b/erts/include/internal/ethr_internal.h
new file mode 100644
index 0000000000..e9c3daf783
--- /dev/null
+++ b/erts/include/internal/ethr_internal.h
@@ -0,0 +1,67 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2010. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+/*
+ * Description: Internal ethread exports
+ * Author: Rickard Green
+ */
+
+#ifndef ETHR_INTERNAL_H__
+#define ETHR_INTERNAL_H__
+
+#include "erl_misc_utils.h"
+
+extern ethr_memory_allocators ethr_mem__;
+extern erts_cpu_info_t *ethr_cpu_info__;
+extern size_t ethr_pagesize__;
+extern size_t ethr_min_stack_size__; /* kilo words */
+extern size_t ethr_max_stack_size__; /* kilo words */
+extern int ethr_not_completely_inited__;
+extern int ethr_not_inited__;
+
+extern void *(*ethr_thr_prepare_func__)(void);
+extern void (*ethr_thr_parent_func__)(void *);
+extern void (*ethr_thr_child_func__)(void *);
+
+#define ETHR_PAGE_ALIGN(SZ) \
+ (((((size_t) (SZ)) - 1)/ethr_pagesize__ + 1)*ethr_pagesize__)
+#define ETHR_B2KW(B) ((((size_t) (B)) - 1)/(sizeof(void *)*1024) + 1)
+#define ETHR_KW2B(KW) (((size_t) (KW))*sizeof(void *)*1024)
+
+#undef ETHR_STACK_GUARD_SIZE
+#ifdef ETHR_HAVE_PTHREAD_ATTR_SETGUARDSIZE
+# define ETHR_STACK_GUARD_SIZE (ethr_pagesize__)
+#endif
+
+/* implemented in lib_src/<thr-lib>/ethread.c */
+int ethr_set_tse__(ethr_ts_event *tsep);
+ethr_ts_event *ethr_get_tse__(void);
+ETHR_PROTO_NORETURN__ ethr_abort__(void);
+#ifdef ETHR_WIN32_THREADS
+int ethr_win_get_errno__(void);
+#endif
+
+/* implemented in lib_src/common/ethread_aux.c */
+int ethr_init_common__(ethr_init_data *id);
+int ethr_late_init_common__(ethr_late_init_data *lid);
+void ethr_run_exit_handlers__(void);
+void ethr_ts_event_destructor__(void *vtsep);
+
+
+#endif
diff --git a/erts/include/internal/ethr_mutex.h b/erts/include/internal/ethr_mutex.h
new file mode 100644
index 0000000000..4ce3e75c78
--- /dev/null
+++ b/erts/include/internal/ethr_mutex.h
@@ -0,0 +1,510 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2010. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+/*
+ * Description: Mutex, rwmutex and condition variable implementation
+ * Author: Rickard Green
+ */
+
+#ifndef ETHR_MUTEX_H__
+#define ETHR_MUTEX_H__
+
+#define ETHR_RWMUTEX_INITIALIZED 0x99999999
+#define ETHR_MUTEX_INITIALIZED 0x77777777
+#define ETHR_COND_INITIALIZED 0x55555555
+
+#if 0
+# define ETHR_MTX_HARD_DEBUG
+#endif
+
+#ifdef ETHR_MTX_HARD_DEBUG
+# ifdef __GNUC__
+# warning ETHR_MTX_HARD_DEBUG
+# endif
+/*# define ETHR_MTX_HARD_DEBUG_LFS*/
+/*# define ETHR_MTX_HARD_DEBUG_FENCE*/
+/*# define ETHR_MTX_HARD_DEBUG_Q*/
+# define ETHR_MTX_HARD_DEBUG_WSQ
+
+# if !defined(ETHR_MTX_HARD_DEBUG_WSQ) && defined(ETHR_MTX_HARD_DEBUG_Q)
+# define ETHR_MTX_HARD_DEBUG_WSQ
+# endif
+#endif
+
+#if defined(ETHR_USE_OWN_RWMTX_IMPL__) || defined(ETHR_USE_OWN_MTX_IMPL__)
+
+#if 0
+# define ETHR_MTX_Q_LOCK_SPINLOCK__
+# define ETHR_MTX_QLOCK_TYPE__ ethr_spinlock_t
+#elif defined(ETHR_PTHREADS)
+# define ETHR_MTX_Q_LOCK_PTHREAD_MUTEX__
+# define ETHR_MTX_QLOCK_TYPE__ pthread_mutex_t
+#elif defined(ETHR_WIN32_THREADS)
+# define ETHR_MTX_Q_LOCK_CRITICAL_SECTION__
+# define ETHR_MTX_QLOCK_TYPE__ CRITICAL_SECTION
+#else
+# error Need a qlock implementation
+#endif
+
+#define ETHR_RWMTX_W_FLG__ (((long) 1) << 31)
+#define ETHR_RWMTX_W_WAIT_FLG__ (((long) 1) << 30)
+#define ETHR_RWMTX_R_WAIT_FLG__ (((long) 1) << 29)
+
+/* frequent read kind */
+#define ETHR_RWMTX_R_FLG__ (((long) 1) << 28)
+#define ETHR_RWMTX_R_PEND_UNLCK_MASK__ (ETHR_RWMTX_R_FLG__ - 1)
+#define ETHR_RWMTX_R_MASK__ (ETHR_RWMTX_R_WAIT_FLG__ - 1)
+
+/* normal kind */
+#define ETHR_RWMTX_RS_MASK__ (ETHR_RWMTX_R_WAIT_FLG__ - 1)
+
+#define ETHR_RWMTX_WAIT_FLGS__ \
+ (ETHR_RWMTX_W_WAIT_FLG__|ETHR_RWMTX_R_WAIT_FLG__)
+
+#define ETHR_CND_WAIT_FLG__ ETHR_RWMTX_R_WAIT_FLG__
+
+struct ethr_mutex_base_ {
+#ifdef ETHR_MTX_HARD_DEBUG_FENCE
+ long pre_fence;
+#endif
+ ethr_atomic_t flgs;
+ ETHR_MTX_QLOCK_TYPE__ qlck;
+ ethr_ts_event *q;
+ short aux_scnt;
+ short main_scnt;
+#ifdef ETHR_MTX_HARD_DEBUG_WSQ
+ int ws;
+#endif
+#ifdef ETHR_MTX_HARD_DEBUG_LFS
+ ethr_atomic_t hdbg_lfs;
+#endif
+};
+
+#endif
+
+typedef struct {
+ int main_spincount;
+ int aux_spincount;
+} ethr_mutex_opt;
+
+typedef struct {
+ int main_spincount;
+ int aux_spincount;
+} ethr_cond_opt;
+
+#ifdef ETHR_USE_OWN_MTX_IMPL__
+
+typedef struct ethr_mutex_ ethr_mutex;
+struct ethr_mutex_ {
+ struct ethr_mutex_base_ mtxb;
+#ifdef ETHR_MTX_HARD_DEBUG_FENCE
+ long post_fence;
+#endif
+#if ETHR_XCHK
+ int initialized;
+#endif
+};
+
+typedef struct ethr_cond_ ethr_cond;
+struct ethr_cond_ {
+#ifdef ETHR_MTX_HARD_DEBUG_FENCE
+ struct {
+ long pre_fence;
+ } mtxb; /* mtxb allows us to use same macro as for mutex and rwmutex... */
+#endif
+ ETHR_MTX_QLOCK_TYPE__ qlck;
+ ethr_ts_event *q;
+ short aux_scnt;
+ short main_scnt;
+#ifdef ETHR_MTX_HARD_DEBUG_FENCE
+ long post_fence;
+#endif
+#if ETHR_XCHK
+ int initialized;
+#endif
+};
+
+#else /* pthread */
+
+typedef struct ethr_mutex_ ethr_mutex;
+struct ethr_mutex_ {
+ pthread_mutex_t pt_mtx;
+#if ETHR_XCHK
+ int initialized;
+#endif
+};
+
+typedef struct ethr_cond_ ethr_cond;
+struct ethr_cond_ {
+ pthread_cond_t pt_cnd;
+#if ETHR_XCHK
+ int initialized;
+#endif
+};
+
+#endif /* pthread */
+
+int ethr_mutex_init_opt(ethr_mutex *, ethr_mutex_opt *);
+int ethr_mutex_init(ethr_mutex *);
+int ethr_mutex_destroy(ethr_mutex *);
+#if !defined(ETHR_TRY_INLINE_FUNCS) || defined(ETHR_MUTEX_IMPL__)
+int ethr_mutex_trylock(ethr_mutex *);
+void ethr_mutex_lock(ethr_mutex *);
+void ethr_mutex_unlock(ethr_mutex *);
+#endif
+int ethr_cond_init_opt(ethr_cond *, ethr_cond_opt *);
+int ethr_cond_init(ethr_cond *);
+int ethr_cond_destroy(ethr_cond *);
+void ethr_cond_signal(ethr_cond *);
+void ethr_cond_broadcast(ethr_cond *);
+int ethr_cond_wait(ethr_cond *, ethr_mutex *);
+
+typedef enum {
+ ETHR_RWMUTEX_TYPE_NORMAL,
+ ETHR_RWMUTEX_TYPE_FREQUENT_READ,
+ ETHR_RWMUTEX_TYPE_EXTREMELY_FREQUENT_READ
+} ethr_rwmutex_type;
+
+typedef enum {
+ ETHR_RWMUTEX_LONG_LIVED,
+ ETHR_RWMUTEX_SHORT_LIVED,
+ ETHR_RWMUTEX_UNKNOWN_LIVED
+} ethr_rwmutex_lived;
+
+typedef struct {
+ ethr_rwmutex_type type;
+ ethr_rwmutex_lived lived;
+ int main_spincount;
+ int aux_spincount;
+} ethr_rwmutex_opt;
+
+#define ETHR_RWMUTEX_OPT_DEFAULT_INITER \
+ {ETHR_RWMUTEX_TYPE_NORMAL, ETHR_RWMUTEX_UNKNOWN_LIVED, -1, -1}
+
+#ifdef ETHR_USE_OWN_RWMTX_IMPL__
+
+typedef union {
+ struct {
+ ethr_atomic_t readers;
+ int waiting_readers;
+ int byte_offset;
+ ethr_rwmutex_lived lived;
+ } data;
+ char align__[ETHR_CACHE_LINE_SIZE];
+} ethr_rwmtx_readers_array__;
+
+typedef struct ethr_rwmutex_ ethr_rwmutex;
+struct ethr_rwmutex_ {
+ struct ethr_mutex_base_ mtxb;
+ ethr_rwmutex_type type;
+ ethr_ts_event *rq_end;
+ union {
+ ethr_rwmtx_readers_array__ *ra;
+ int rs;
+ } tdata;
+#ifdef ETHR_MTX_HARD_DEBUG_FENCE
+ long post_fence;
+#endif
+#if ETHR_XCHK
+ int initialized;
+#endif
+};
+
+#else /* pthread_rwlock */
+
+typedef struct ethr_rwmutex_ ethr_rwmutex;
+struct ethr_rwmutex_ {
+ pthread_rwlock_t pt_rwlock;
+#if ETHR_XCHK
+ int initialized;
+#endif
+};
+
+#endif /* pthread_rwlock */
+
+int ethr_rwmutex_set_reader_group(int);
+int ethr_rwmutex_init_opt(ethr_rwmutex *, ethr_rwmutex_opt *);
+int ethr_rwmutex_init(ethr_rwmutex *);
+int ethr_rwmutex_destroy(ethr_rwmutex *);
+#if defined(ETHR_USE_OWN_RWMTX_IMPL__) \
+ || !defined(ETHR_TRY_INLINE_FUNCS) \
+ || defined(ETHR_MUTEX_IMPL__)
+int ethr_rwmutex_tryrlock(ethr_rwmutex *);
+void ethr_rwmutex_rlock(ethr_rwmutex *);
+void ethr_rwmutex_runlock(ethr_rwmutex *);
+int ethr_rwmutex_tryrwlock(ethr_rwmutex *);
+void ethr_rwmutex_rwlock(ethr_rwmutex *);
+void ethr_rwmutex_rwunlock(ethr_rwmutex *);
+#endif
+
+#ifdef ETHR_MTX_HARD_DEBUG
+#define ETHR_MTX_HARD_ASSERT(A) \
+ ((void) ((A) ? 1 : ethr_assert_failed(__FILE__, __LINE__, #A)))
+#else
+#define ETHR_MTX_HARD_ASSERT(A) ((void) 1)
+#endif
+
+#ifdef ETHR_MTX_HARD_DEBUG_LFS
+# define ETHR_MTX_HARD_DEBUG_LFS_INIT(MTXB) \
+do { \
+ ethr_atomic_init(&(MTXB)->hdbg_lfs, 0); \
+} while (0)
+# define ETHR_MTX_HARD_DEBUG_LFS_RLOCK(MTXB) \
+do { \
+ long val__; \
+ ETHR_COMPILER_BARRIER; \
+ val__ = ethr_atomic_inc_read(&(MTXB)->hdbg_lfs); \
+ ETHR_MTX_HARD_ASSERT(val__ > 0); \
+} while (0)
+# define ETHR_MTX_HARD_DEBUG_LFS_TRYRLOCK(MTXB, RES) \
+do { \
+ ETHR_COMPILER_BARRIER; \
+ if ((RES) == 0) \
+ ETHR_MTX_HARD_DEBUG_LFS_RLOCK((MTXB)); \
+ else \
+ ETHR_MTX_HARD_ASSERT((RES) == EBUSY); \
+} while (0)
+# define ETHR_MTX_HARD_DEBUG_LFS_RUNLOCK(MTXB) \
+do { \
+ long val__ = ethr_atomic_dec_read(&(MTXB)->hdbg_lfs); \
+ ETHR_MTX_HARD_ASSERT(val__ >= 0); \
+ ETHR_COMPILER_BARRIER; \
+} while (0)
+# define ETHR_MTX_HARD_DEBUG_LFS_RWLOCK(MTXB) \
+do { \
+ long val__; \
+ ETHR_COMPILER_BARRIER; \
+ val__ = ethr_atomic_dec_read(&(MTXB)->hdbg_lfs); \
+ ETHR_MTX_HARD_ASSERT(val__ == -1); \
+} while (0)
+# define ETHR_MTX_HARD_DEBUG_LFS_TRYRWLOCK(MTXB, RES) \
+do { \
+ ETHR_COMPILER_BARRIER; \
+ if ((RES) == 0) \
+ ETHR_MTX_HARD_DEBUG_LFS_RWLOCK((MTXB)); \
+ else \
+ ETHR_MTX_HARD_ASSERT((RES) == EBUSY); \
+} while (0)
+# define ETHR_MTX_HARD_DEBUG_LFS_RWUNLOCK(MTXB) \
+do { \
+ long val__ = ethr_atomic_inctest(&(MTXB)->hdbg_lfs); \
+ ETHR_MTX_HARD_ASSERT(val__ == 0); \
+ ETHR_COMPILER_BARRIER; \
+} while (0)
+#else
+# define ETHR_MTX_HARD_DEBUG_LFS_INIT(MTXB)
+# define ETHR_MTX_HARD_DEBUG_LFS_RLOCK(MTXB)
+# define ETHR_MTX_HARD_DEBUG_LFS_TRYRLOCK(MTXB, RES)
+# define ETHR_MTX_HARD_DEBUG_LFS_RUNLOCK(MTXB)
+# define ETHR_MTX_HARD_DEBUG_LFS_RWLOCK(MTXB)
+# define ETHR_MTX_HARD_DEBUG_LFS_TRYRWLOCK(MTXB, RES)
+# define ETHR_MTX_HARD_DEBUG_LFS_RWUNLOCK(MTXB)
+#endif
+
+#ifdef ETHR_MTX_HARD_DEBUG_FENCE
+
+#if ETHR_SIZEOF_PTR == 8
+# define ETHR_MTX_HARD_DEBUG_PRE_FENCE 0xdeadbeefdeadbeefL
+# define ETHR_MTX_HARD_DEBUG_POST_FENCE 0xdeaddeaddeaddeadL
+#else
+# define ETHR_MTX_HARD_DEBUG_PRE_FENCE 0xdeaddeadL
+# define ETHR_MTX_HARD_DEBUG_POST_FENCE 0xdeaddeadL
+#endif
+
+#define ETHR_MTX_HARD_DEBUG_FENCE_CHK(X) \
+do { \
+ ETHR_COMPILER_BARRIER; \
+ ETHR_MTX_HARD_ASSERT((X)->mtxb.pre_fence == ETHR_MTX_HARD_DEBUG_PRE_FENCE);\
+ ETHR_MTX_HARD_ASSERT((X)->post_fence == ETHR_MTX_HARD_DEBUG_POST_FENCE); \
+ ETHR_COMPILER_BARRIER; \
+} while (0)
+#define ETHR_MTX_HARD_DEBUG_FENCE_INIT(X) \
+do { \
+ (X)->mtxb.pre_fence = ETHR_MTX_HARD_DEBUG_PRE_FENCE; \
+ (X)->post_fence = ETHR_MTX_HARD_DEBUG_POST_FENCE; \
+} while (0)
+#else
+#define ETHR_MTX_HARD_DEBUG_FENCE_CHK(X)
+#define ETHR_MTX_HARD_DEBUG_FENCE_INIT(X)
+#endif
+
+#ifdef ETHR_USE_OWN_MTX_IMPL__
+
+#define ETHR_MTX_DEFAULT_MAIN_SPINCOUNT 1000
+#define ETHR_MTX_DEFAULT_AUX_SPINCOUNT 50
+
+#define ETHR_CND_DEFAULT_MAIN_SPINCOUNT 0
+#define ETHR_CND_DEFAULT_AUX_SPINCOUNT 0
+
+#if defined(ETHR_TRY_INLINE_FUNCS) || defined(ETHR_MUTEX_IMPL__)
+
+void ethr_mutex_lock_wait__(ethr_mutex *, long);
+void ethr_mutex_unlock_wake__(ethr_mutex *, long);
+
+static ETHR_INLINE int
+ETHR_INLINE_FUNC_NAME_(ethr_mutex_trylock)(ethr_mutex *mtx)
+{
+ long act;
+ int res;
+ ETHR_MTX_HARD_DEBUG_FENCE_CHK(mtx);
+
+ act = ethr_atomic_cmpxchg_acqb(&mtx->mtxb.flgs, ETHR_RWMTX_W_FLG__, 0);
+ res = (act == 0) ? 0 : EBUSY;
+
+ ETHR_MTX_HARD_DEBUG_LFS_TRYRWLOCK(&mtx->mtxb, res);
+ ETHR_MTX_HARD_DEBUG_FENCE_CHK(mtx);
+
+ ETHR_COMPILER_BARRIER;
+ return res;
+}
+
+static ETHR_INLINE void
+ETHR_INLINE_FUNC_NAME_(ethr_mutex_lock)(ethr_mutex *mtx)
+{
+ long act;
+ ETHR_MTX_HARD_DEBUG_FENCE_CHK(mtx);
+
+ act = ethr_atomic_cmpxchg_acqb(&mtx->mtxb.flgs, ETHR_RWMTX_W_FLG__, 0);
+ if (act != 0)
+ ethr_mutex_lock_wait__(mtx, act);
+
+ ETHR_MTX_HARD_DEBUG_LFS_RWLOCK(&mtx->mtxb);
+ ETHR_MTX_HARD_DEBUG_FENCE_CHK(mtx);
+
+ ETHR_COMPILER_BARRIER;
+}
+
+static ETHR_INLINE void
+ETHR_INLINE_FUNC_NAME_(ethr_mutex_unlock)(ethr_mutex *mtx)
+{
+ long act;
+ ETHR_COMPILER_BARRIER;
+ ETHR_MTX_HARD_DEBUG_FENCE_CHK(mtx);
+ ETHR_MTX_HARD_DEBUG_LFS_RWUNLOCK(&mtx->mtxb);
+
+ act = ethr_atomic_cmpxchg_relb(&mtx->mtxb.flgs, 0, ETHR_RWMTX_W_FLG__);
+ if (act != ETHR_RWMTX_W_FLG__)
+ ethr_mutex_unlock_wake__(mtx, act);
+
+ ETHR_MTX_HARD_DEBUG_FENCE_CHK(mtx);
+}
+
+#endif /* ETHR_TRY_INLINE_FUNCS */
+
+#else /* pthread_mutex */
+
+#if defined(ETHR_TRY_INLINE_FUNCS) || defined(ETHR_MUTEX_IMPL__)
+
+static ETHR_INLINE int
+ETHR_INLINE_FUNC_NAME_(ethr_mutex_trylock)(ethr_mutex *mtx)
+{
+ int res;
+ res = pthread_mutex_trylock(&mtx->pt_mtx);
+ if (res != 0 && res != EBUSY)
+ ETHR_FATAL_ERROR__(res);
+ return res;
+}
+
+static ETHR_INLINE void
+ETHR_INLINE_FUNC_NAME_(ethr_mutex_lock)(ethr_mutex *mtx)
+{
+ int res = pthread_mutex_lock(&mtx->pt_mtx);
+ if (res != 0)
+ ETHR_FATAL_ERROR__(res);
+}
+
+static ETHR_INLINE void
+ETHR_INLINE_FUNC_NAME_(ethr_mutex_unlock)(ethr_mutex *mtx)
+{
+ int res = pthread_mutex_unlock(&mtx->pt_mtx);
+ if (res != 0)
+ ETHR_FATAL_ERROR__(res);
+}
+
+#endif /* ETHR_TRY_INLINE_FUNCS */
+
+#endif /* pthread_mutex */
+
+#ifdef ETHR_USE_OWN_RWMTX_IMPL__
+
+#define ETHR_RWMTX_DEFAULT_MAIN_SPINCOUNT 1000
+#define ETHR_RWMTX_DEFAULT_AUX_SPINCOUNT 50
+
+#else /* pthread_rwlock */
+
+#if defined(ETHR_TRY_INLINE_FUNCS) || defined(ETHR_MUTEX_IMPL__)
+
+static ETHR_INLINE int
+ETHR_INLINE_FUNC_NAME_(ethr_rwmutex_tryrlock)(ethr_rwmutex *rwmtx)
+{
+ int res = pthread_rwlock_tryrdlock(&rwmtx->pt_rwlock);
+ if (res != 0 && res != EBUSY)
+ ETHR_FATAL_ERROR__(res);
+ return res;
+}
+
+static ETHR_INLINE void
+ETHR_INLINE_FUNC_NAME_(ethr_rwmutex_rlock)(ethr_rwmutex *rwmtx)
+{
+ int res = pthread_rwlock_rdlock(&rwmtx->pt_rwlock);
+ if (res != 0)
+ ETHR_FATAL_ERROR__(res);
+}
+
+static ETHR_INLINE void
+ETHR_INLINE_FUNC_NAME_(ethr_rwmutex_runlock)(ethr_rwmutex *rwmtx)
+{
+ int res = pthread_rwlock_unlock(&rwmtx->pt_rwlock);
+ if (res != 0)
+ ETHR_FATAL_ERROR__(res);
+}
+
+static ETHR_INLINE int
+ETHR_INLINE_FUNC_NAME_(ethr_rwmutex_tryrwlock)(ethr_rwmutex *rwmtx)
+{
+ int res = pthread_rwlock_trywrlock(&rwmtx->pt_rwlock);
+ if (res != 0 && res != EBUSY)
+ ETHR_FATAL_ERROR__(res);
+ return res;
+}
+
+static ETHR_INLINE void
+ETHR_INLINE_FUNC_NAME_(ethr_rwmutex_rwlock)(ethr_rwmutex *rwmtx)
+{
+ int res = pthread_rwlock_wrlock(&rwmtx->pt_rwlock);
+ if (res != 0)
+ ETHR_FATAL_ERROR__(res);
+}
+
+static ETHR_INLINE void
+ETHR_INLINE_FUNC_NAME_(ethr_rwmutex_rwunlock)(ethr_rwmutex *rwmtx)
+{
+ int res = pthread_rwlock_unlock(&rwmtx->pt_rwlock);
+ if (res != 0)
+ ETHR_FATAL_ERROR__(res);
+}
+
+#endif /* ETHR_TRY_INLINE_FUNCS */
+
+#endif /* pthread_rwlock */
+
+int ethr_mutex_lib_init(int);
+int ethr_mutex_lib_late_init(int, int);
+
+#endif /* #ifndef ETHR_MUTEX_H__ */
diff --git a/erts/include/internal/ethr_optimized_fallbacks.h b/erts/include/internal/ethr_optimized_fallbacks.h
new file mode 100644
index 0000000000..2f9f987d0b
--- /dev/null
+++ b/erts/include/internal/ethr_optimized_fallbacks.h
@@ -0,0 +1,185 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2010. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+/*
+ * Description: "Optimized" fallbacks used when native ops are missing
+ * Author: Rickard Green
+ */
+
+#ifndef ETHR_OPTIMIZED_FALLBACKS_H__
+#define ETHR_OPTIMIZED_FALLBACKS_H__
+
+#ifdef ETHR_HAVE_NATIVE_ATOMICS
+#define ETHR_HAVE_OPTIMIZED_ATOMIC_OPS 1
+#endif
+
+#ifdef ETHR_HAVE_NATIVE_SPINLOCKS
+#define ETHR_HAVE_OPTIMIZED_SPINLOCKS 1
+#elif defined(ETHR_HAVE_PTHREAD_SPIN_LOCK)
+/* --- Optimized spinlocks using pthread spinlocks -------------------------- */
+#define ETHR_HAVE_OPTIMIZED_SPINLOCKS 1
+
+typedef pthread_spinlock_t ethr_opt_spinlock_t;
+
+#if defined(ETHR_TRY_INLINE_FUNCS) || defined(ETHR_AUX_IMPL__)
+
+static ETHR_INLINE int
+ethr_opt_spinlock_init(ethr_opt_spinlock_t *lock)
+{
+ return pthread_spin_init((pthread_spinlock_t *) lock, 0);
+}
+
+static ETHR_INLINE int
+ethr_opt_spinlock_destroy(ethr_opt_spinlock_t *lock)
+{
+ return pthread_spin_destroy((pthread_spinlock_t *) lock);
+}
+
+
+static ETHR_INLINE int
+ethr_opt_spin_unlock(ethr_opt_spinlock_t *lock)
+{
+ return pthread_spin_unlock((pthread_spinlock_t *) lock);
+}
+
+static ETHR_INLINE int
+ethr_opt_spin_lock(ethr_opt_spinlock_t *lock)
+{
+ return pthread_spin_lock((pthread_spinlock_t *) lock);
+}
+
+#endif
+
+#elif defined(ETHR_HAVE_NATIVE_ATOMICS)
+/* --- Native spinlocks using native atomics -------------------------------- */
+#define ETHR_HAVE_NATIVE_SPINLOCKS 1
+#define ETHR_HAVE_OPTIMIZED_SPINLOCKS 1
+
+typedef ethr_native_atomic_t ethr_native_spinlock_t;
+
+#if defined(ETHR_TRY_INLINE_FUNCS) || defined(ETHR_AUX_IMPL__)
+
+static ETHR_INLINE void
+ethr_native_spinlock_init(ethr_native_spinlock_t *lock)
+{
+ ethr_native_atomic_init((ethr_native_atomic_t *) lock, 0);
+}
+
+static ETHR_INLINE void
+ethr_native_spin_unlock(ethr_native_spinlock_t *lock)
+{
+ ETHR_COMPILER_BARRIER;
+ ETHR_ASSERT(ethr_native_atomic_read((ethr_native_atomic_t *) lock) == 1);
+ ethr_native_atomic_set_relb((ethr_native_atomic_t *) lock, 0);
+}
+
+static ETHR_INLINE void
+ethr_native_spin_lock(ethr_native_spinlock_t *lock)
+{
+ while (ethr_native_atomic_cmpxchg_acqb((ethr_native_atomic_t *) lock,
+ (long) 1, (long) 0) != 0) {
+ ETHR_SPIN_BODY;
+ }
+ ETHR_COMPILER_BARRIER;
+}
+
+#endif
+
+#endif
+
+
+#ifdef ETHR_HAVE_NATIVE_RWSPINLOCKS
+#define ETHR_HAVE_OPTIMIZED_RWSPINLOCKS 1
+#elif defined(ETHR_HAVE_NATIVE_ATOMICS)
+/* --- Native rwspinlocks using native atomics ------------------------------ */
+#define ETHR_HAVE_NATIVE_RWSPINLOCKS 1
+#define ETHR_HAVE_OPTIMIZED_RWSPINLOCKS 1
+
+typedef ethr_native_atomic_t ethr_native_rwlock_t;
+
+#if defined(ETHR_TRY_INLINE_FUNCS) || defined(ETHR_AUX_IMPL__)
+
+#define ETHR_WLOCK_FLAG__ (((long) 1) << 30)
+
+static ETHR_INLINE void
+ethr_native_rwlock_init(ethr_native_rwlock_t *lock)
+{
+ ethr_native_atomic_init((ethr_native_atomic_t *) lock, 0);
+}
+
+static ETHR_INLINE void
+ethr_native_read_unlock(ethr_native_rwlock_t *lock)
+{
+ ETHR_COMPILER_BARRIER;
+#ifdef DEBUG
+ ETHR_ASSERT(ethr_native_atomic_read((ethr_native_atomic_t *) lock) >= 0);
+#endif
+ ethr_native_atomic_dec_relb((ethr_native_atomic_t *) lock);
+}
+
+static ETHR_INLINE void
+ethr_native_read_lock(ethr_native_rwlock_t *lock)
+{
+ long act, exp = 0;
+ while (1) {
+ act = ethr_native_atomic_cmpxchg_acqb((ethr_native_atomic_t *) lock,
+ exp+1, exp);
+ if (act == exp)
+ break;
+ ETHR_SPIN_BODY;
+ exp = (act & ETHR_WLOCK_FLAG__) ? 0 : act;
+ }
+ ETHR_COMPILER_BARRIER;
+}
+
+static ETHR_INLINE void
+ethr_native_write_unlock(ethr_native_rwlock_t *lock)
+{
+ ETHR_COMPILER_BARRIER;
+ ETHR_ASSERT(ethr_native_atomic_read((ethr_native_atomic_t *) lock)
+ == ETHR_WLOCK_FLAG__);
+ ethr_native_atomic_set_relb((ethr_native_atomic_t *) lock, 0);
+}
+
+static ETHR_INLINE void
+ethr_native_write_lock(ethr_native_rwlock_t *lock)
+{
+ long act, exp = 0;
+ while (1) {
+ act = ethr_native_atomic_cmpxchg_acqb((ethr_native_atomic_t *) lock,
+ exp|ETHR_WLOCK_FLAG__, exp);
+ if (act == exp)
+ break;
+ ETHR_SPIN_BODY;
+ exp = act & ~ETHR_WLOCK_FLAG__;
+ }
+ act |= ETHR_WLOCK_FLAG__;
+ /* Wait for readers to leave */
+ while (act != ETHR_WLOCK_FLAG__) {
+ ETHR_SPIN_BODY;
+ act = ethr_native_atomic_read_acqb((ethr_native_atomic_t *) lock);
+ }
+ ETHR_COMPILER_BARRIER;
+}
+
+#endif
+
+#endif
+
+#endif
diff --git a/erts/include/internal/ethread.h b/erts/include/internal/ethread.h
index 4e7a38cd5c..4a205699bd 100644
--- a/erts/include/internal/ethread.h
+++ b/erts/include/internal/ethread.h
@@ -33,20 +33,9 @@
#include <stdlib.h>
#include "erl_errno.h"
-/*
- * Extra memory barrier requirements:
- * - ethr_atomic_or_old() needs to enforce a memory barrier sufficient
- * for a lock operation.
- * - ethr_atomic_and_old() needs to enforce a memory barrier sufficient
- * for an unlock operation.
- * - ethr_atomic_cmpxchg() needs to enforce a memory barrier sufficient
- * for a lock and unlock operation.
- */
-
-
-#undef ETHR_USE_RWMTX_FALLBACK
#undef ETHR_HAVE_OPTIMIZED_ATOMIC_OPS
-#undef ETHR_HAVE_OPTIMIZED_LOCKS
+#undef ETHR_HAVE_OPTIMIZED_SPINLOCK
+#undef ETHR_HAVE_OPTIMIZED_RWSPINLOCK
typedef struct {
long tv_sec;
@@ -54,6 +43,10 @@ typedef struct {
} ethr_timeval;
#if defined(DEBUG)
+# define ETHR_DEBUG
+#endif
+
+#if defined(ETHR_DEBUG)
# undef ETHR_XCHK
# define ETHR_XCHK 1
#else
@@ -68,47 +61,57 @@ typedef struct {
#elif defined(__WIN32__)
# define ETHR_INLINE __forceinline
#endif
-#if defined(DEBUG) || !defined(ETHR_INLINE) || ETHR_XCHK \
+#if defined(ETHR_DEBUG) || !defined(ETHR_INLINE) || ETHR_XCHK \
|| (defined(__GNUC__) && defined(ERTS_MIXED_CYGWIN_VC))
# undef ETHR_INLINE
# define ETHR_INLINE
# undef ETHR_TRY_INLINE_FUNCS
#endif
-#ifdef ETHR_FORCE_INLINE_FUNCS
-# define ETHR_TRY_INLINE_FUNCS
-#endif
-#if !defined(ETHR_DISABLE_NATIVE_IMPLS) \
- && (defined(PURIFY) || defined(VALGRIND) || defined(ERTS_MIXED_CYGWIN_VC))
+#if !defined(ETHR_DISABLE_NATIVE_IMPLS) && (defined(PURIFY)||defined(VALGRIND))
# define ETHR_DISABLE_NATIVE_IMPLS
#endif
-#define ETHR_RWMUTEX_INITIALIZED 0x99999999
-#define ETHR_MUTEX_INITIALIZED 0x77777777
-#define ETHR_COND_INITIALIZED 0x55555555
+/* Assume 64-byte cache line size */
+#define ETHR_CACHE_LINE_SIZE 64L
+#define ETHR_CACHE_LINE_MASK (ETHR_CACHE_LINE_SIZE - 1)
-#define ETHR_CACHE_LINE_SIZE 64
+#define ETHR_CACHE_LINE_ALIGN_SIZE(SZ) \
+ (((((SZ) - 1) / ETHR_CACHE_LINE_SIZE) + 1) * ETHR_CACHE_LINE_SIZE)
-#ifdef ETHR_INLINE_FUNC_NAME_
-# define ETHR_CUSTOM_INLINE_FUNC_NAME_
-#else
+#ifndef ETHR_INLINE_FUNC_NAME_
# define ETHR_INLINE_FUNC_NAME_(X) X
#endif
-#define ETHR_COMPILER_BARRIER ethr_compiler_barrier()
-#ifdef __GNUC__
-# undef ETHR_COMPILER_BARRIER
-# define ETHR_COMPILER_BARRIER __asm__ __volatile__("":::"memory")
+#if !defined(__func__)
+# if !defined(__STDC_VERSION__) || __STDC_VERSION__ < 199901L
+# if !defined(__GNUC__) || __GNUC__ < 2
+# define __func__ "[unknown_function]"
+# else
+# define __func__ __FUNCTION__
+# endif
+# endif
#endif
-#ifdef DEBUG
+int ethr_assert_failed(const char *file, int line, const char *func, char *a);
+#ifdef ETHR_DEBUG
#define ETHR_ASSERT(A) \
- ((void) ((A) ? 1 : ethr_assert_failed(__FILE__, __LINE__, #A)))
-int ethr_assert_failed(char *f, int l, char *a);
+ ((void) ((A) ? 1 : ethr_assert_failed(__FILE__, __LINE__, __func__, #A)))
#else
#define ETHR_ASSERT(A) ((void) 1)
#endif
+#if defined(__GNUC__)
+# define ETHR_PROTO_NORETURN__ void __attribute__((noreturn))
+# define ETHR_IMPL_NORETURN__ void
+#elif defined(__WIN32__) && defined(_MSC_VER)
+# define ETHR_PROTO_NORETURN__ __declspec(noreturn) void
+# define ETHR_IMPL_NORETURN__ __declspec(noreturn) void
+#else
+# define ETHR_PROTO_NORETURN__ void
+# define ETHR_IMPL_NORETURN__ void
+#endif
+
#if defined(ETHR_PTHREADS)
/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *\
* The pthread implementation *
@@ -118,7 +121,9 @@ int ethr_assert_failed(char *f, int l, char *a);
#error "_GNU_SOURCE not defined. Please, compile all files with -D_GNU_SOURCE."
#endif
-#if defined(ETHR_HAVE_MIT_PTHREAD_H)
+#if defined(ETHR_NEED_NPTL_PTHREAD_H)
+#include <nptl/pthread.h>
+#elif defined(ETHR_HAVE_MIT_PTHREAD_H)
#include <pthread/mit/pthread.h>
#elif defined(ETHR_HAVE_PTHREAD_H)
#include <pthread.h>
@@ -128,130 +133,23 @@ int ethr_assert_failed(char *f, int l, char *a);
typedef pthread_t ethr_tid;
-typedef struct ethr_mutex_ ethr_mutex;
-struct ethr_mutex_ {
- pthread_mutex_t pt_mtx;
- int is_rec_mtx;
- ethr_mutex *prev;
- ethr_mutex *next;
-#if ETHR_XCHK
- int initialized;
-#endif
-};
-
-typedef struct ethr_cond_ ethr_cond;
-struct ethr_cond_ {
- pthread_cond_t pt_cnd;
-#if ETHR_XCHK
- int initialized;
-#endif
-};
+typedef pthread_key_t ethr_tsd_key;
-#ifndef ETHR_HAVE_PTHREAD_RWLOCK_INIT
-#define ETHR_USE_RWMTX_FALLBACK
-#else
-typedef struct ethr_rwmutex_ ethr_rwmutex;
-struct ethr_rwmutex_ {
- pthread_rwlock_t pt_rwlock;
-#if ETHR_XCHK
- int initialized;
-#endif
-};
-#endif
+#define ETHR_HAVE_ETHR_SIG_FUNCS 1
-/* Static initializers */
-#if ETHR_XCHK
-#define ETHR_MUTEX_XCHK_INITER , ETHR_MUTEX_INITIALIZED
-#define ETHR_COND_XCHK_INITER , ETHR_COND_INITIALIZED
-#else
-#define ETHR_MUTEX_XCHK_INITER
-#define ETHR_COND_XCHK_INITER
+#if defined(PURIFY) || defined(VALGRIND)
+# define ETHR_FORCE_PTHREAD_RWLOCK
+# define ETHR_FORCE_PTHREAD_MUTEX
#endif
-#define ETHR_MUTEX_INITER {PTHREAD_MUTEX_INITIALIZER, 0, NULL, NULL ETHR_MUTEX_XCHK_INITER}
-#define ETHR_COND_INITER {PTHREAD_COND_INITIALIZER ETHR_COND_XCHK_INITER}
-
-#if defined(ETHR_HAVE_PTHREAD_MUTEXATTR_SETTYPE) \
- || defined(ETHR_HAVE_PTHREAD_MUTEXATTR_SETKIND_NP)
-# define ETHR_HAVE_ETHR_REC_MUTEX_INIT 1
-# ifdef PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP
-# define ETHR_REC_MUTEX_INITER \
- {PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP, 1, NULL, NULL ETHR_MUTEX_XCHK_INITER}
-# endif
-#else
-# undef ETHR_HAVE_ETHR_REC_MUTEX_INIT
+#if !defined(ETHR_FORCE_PTHREAD_RWLOCK)
+# define ETHR_USE_OWN_RWMTX_IMPL__
#endif
-#ifndef ETHR_HAVE_PTHREAD_ATFORK
-# define ETHR_NO_FORKSAFETY 1
+#if !defined(ETHR_FORCE_PTHREAD_MUTEX) && 0
+# define ETHR_USE_OWN_MTX_IMPL__
#endif
-typedef pthread_key_t ethr_tsd_key;
-
-#define ETHR_HAVE_ETHR_SIG_FUNCS 1
-
-#ifdef ETHR_TRY_INLINE_FUNCS
-
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_mutex_trylock)(ethr_mutex *mtx)
-{
- return pthread_mutex_trylock(&mtx->pt_mtx);
-}
-
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_mutex_lock)(ethr_mutex *mtx)
-{
- return pthread_mutex_lock(&mtx->pt_mtx);
-}
-
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_mutex_unlock)(ethr_mutex *mtx)
-{
- return pthread_mutex_unlock(&mtx->pt_mtx);
-}
-
-#ifdef ETHR_HAVE_PTHREAD_RWLOCK_INIT
-
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_rwmutex_tryrlock)(ethr_rwmutex *rwmtx)
-{
- return pthread_rwlock_tryrdlock(&rwmtx->pt_rwlock);
-}
-
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_rwmutex_rlock)(ethr_rwmutex *rwmtx)
-{
- return pthread_rwlock_rdlock(&rwmtx->pt_rwlock);
-}
-
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_rwmutex_runlock)(ethr_rwmutex *rwmtx)
-{
- return pthread_rwlock_unlock(&rwmtx->pt_rwlock);
-}
-
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_rwmutex_tryrwlock)(ethr_rwmutex *rwmtx)
-{
- return pthread_rwlock_trywrlock(&rwmtx->pt_rwlock);
-}
-
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_rwmutex_rwlock)(ethr_rwmutex *rwmtx)
-{
- return pthread_rwlock_wrlock(&rwmtx->pt_rwlock);
-}
-
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_rwmutex_rwunlock)(ethr_rwmutex *rwmtx)
-{
- return pthread_rwlock_unlock(&rwmtx->pt_rwlock);
-}
-
-#endif /* ETHR_HAVE_PTHREAD_RWLOCK_INIT */
-
-#endif /* ETHR_TRY_INLINE_FUNCS */
-
#elif defined(ETHR_WIN32_THREADS)
/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *\
* The native win32 threads implementation *
@@ -273,409 +171,22 @@ ETHR_INLINE_FUNC_NAME_(ethr_rwmutex_rwunlock)(ethr_rwmutex *rwmtx)
# undef WIN32_LEAN_AND_MEAN
#endif
-/* Types */
-typedef long ethr_tid; /* thread id type */
-typedef struct {
- volatile int initialized;
- CRITICAL_SECTION cs;
-#if ETHR_XCHK
- int is_rec_mtx;
-#endif
-} ethr_mutex;
-
-typedef struct cnd_wait_event__ cnd_wait_event_;
+struct ethr_join_data_;
+/* Types */
typedef struct {
- volatile int initialized;
- CRITICAL_SECTION cs;
- cnd_wait_event_ *queue;
- cnd_wait_event_ *queue_end;
-} ethr_cond;
-
-#define ETHR_USE_RWMTX_FALLBACK
-
-/* Static initializers */
-
-#define ETHR_MUTEX_INITER {0}
-#define ETHR_COND_INITER {0}
-
-#define ETHR_REC_MUTEX_INITER ETHR_MUTEX_INITER
-
-#define ETHR_HAVE_ETHR_REC_MUTEX_INIT 1
+ long id;
+ struct ethr_join_data_ *jdata;
+} ethr_tid; /* thread id type */
typedef DWORD ethr_tsd_key;
#undef ETHR_HAVE_ETHR_SIG_FUNCS
-#ifdef ETHR_TRY_INLINE_FUNCS
-int ethr_fake_static_mutex_init(ethr_mutex *mtx);
-
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_mutex_trylock)(ethr_mutex *mtx)
-{
- if (!mtx->initialized) {
- int res = ethr_fake_static_mutex_init(mtx);
- if (res != 0)
- return res;
- }
- return TryEnterCriticalSection(&mtx->cs) ? 0 : EBUSY;
-}
-
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_mutex_lock)(ethr_mutex *mtx)
-{
- if (!mtx->initialized) {
- int res = ethr_fake_static_mutex_init(mtx);
- if (res != 0)
- return res;
- }
- EnterCriticalSection(&mtx->cs);
- return 0;
-}
-
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_mutex_unlock)(ethr_mutex *mtx)
-{
- LeaveCriticalSection(&mtx->cs);
- return 0;
-}
-
-#endif /* #ifdef ETHR_TRY_INLINE_FUNCS */
-
-#ifdef ERTS_MIXED_CYGWIN_VC
-
-/* atomics */
-
-#ifdef _MSC_VER
-# if _MSC_VER < 1300
-# define ETHR_IMMED_ATOMIC_SET_GET_SAFE__ 0 /* Dont trust really old compilers */
-# else
-# if defined(_M_IX86)
-# define ETHR_IMMED_ATOMIC_SET_GET_SAFE__ 1
-# else /* I.e. IA64 */
-# if _MSC_VER >= 1400
-# define ETHR_IMMED_ATOMIC_SET_GET_SAFE__ 1
-# else
-# define ETHR_IMMED_ATOMIC_SET_GET_SAFE__ 0
-# endif
-# endif
-# endif
-# if _MSC_VER >= 1400
-# include <intrin.h>
-# undef ETHR_COMPILER_BARRIER
-# define ETHR_COMPILER_BARRIER _ReadWriteBarrier()
-# endif
-#pragma intrinsic(_ReadWriteBarrier)
-#pragma intrinsic(_InterlockedAnd)
-#pragma intrinsic(_InterlockedOr)
-#else
-# define ETHR_IMMED_ATOMIC_SET_GET_SAFE__ 0
-#endif
-
-#define ETHR_HAVE_OPTIMIZED_ATOMIC_OPS 1
-#define ETHR_HAVE_OPTIMIZED_LOCKS 1
-
-#define ETHR_MEMORY_BARRIER \
-do { \
- volatile LONG x___ = 0; \
- (void) _InterlockedCompareExchange(&x___, (LONG) 1, (LONG) 0); \
-} while (0)
-
-typedef struct {
- volatile LONG value;
-} ethr_atomic_t;
-
-typedef struct {
- volatile LONG locked;
-} ethr_spinlock_t;
-
-typedef struct {
- volatile LONG counter;
-} ethr_rwlock_t;
-#define ETHR_WLOCK_FLAG__ (((LONG) 1) << 30)
-
-#ifdef ETHR_TRY_INLINE_FUNCS
-
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_atomic_init)(ethr_atomic_t *var, long i)
-{
-#if ETHR_IMMED_ATOMIC_SET_GET_SAFE__
- var->value = (LONG) i;
-#else
- (void) InterlockedExchange(&var->value, (LONG) i);
-#endif
- return 0;
-}
-
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_atomic_set)(ethr_atomic_t *var, long i)
-{
-#if ETHR_IMMED_ATOMIC_SET_GET_SAFE__
- var->value = (LONG) i;
-#else
- (void) InterlockedExchange(&var->value, (LONG) i);
-#endif
- return 0;
-}
-
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_atomic_read)(ethr_atomic_t *var, long *i)
-{
-#if ETHR_IMMED_ATOMIC_SET_GET_SAFE__
- *i = var->value;
-#else
- *i = InterlockedExchangeAdd(&var->value, (LONG) 0);
-#endif
- return 0;
-}
-
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_atomic_add)(ethr_atomic_t *var, long incr)
-{
- (void) InterlockedExchangeAdd(&var->value, (LONG) incr);
- return 0;
-}
-
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_atomic_addtest)(ethr_atomic_t *var,
- long i,
- long *testp)
-{
- *testp = InterlockedExchangeAdd(&var->value, (LONG) i);
- *testp += i;
- return 0;
-}
-
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_atomic_inc)(ethr_atomic_t *var)
-{
- (void) InterlockedIncrement(&var->value);
- return 0;
-}
-
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_atomic_dec)(ethr_atomic_t *var)
-{
- (void) InterlockedDecrement(&var->value);
- return 0;
-}
-
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_atomic_inctest)(ethr_atomic_t *var, long *testp)
-{
- *testp = (long) InterlockedIncrement(&var->value);
- return 0;
-}
-
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_atomic_dectest)(ethr_atomic_t *var, long *testp)
-{
- *testp = (long) InterlockedDecrement(&var->value);
- return 0;
-}
-
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_atomic_and_old)(ethr_atomic_t *var,
- long mask,
- long *old)
-{
- /*
- * See "Extra memory barrier requirements" note at the top
- * of the file.
- *
- * According to msdn _InterlockedAnd() provides a full
- * memory barrier.
- */
- *old = (long) _InterlockedAnd(&var->value, mask);
- return 0;
-}
-
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_atomic_or_old)(ethr_atomic_t *var,
- long mask,
- long *old)
-{
- /*
- * See "Extra memory barrier requirements" note at the top
- * of the file.
- *
- * According to msdn _InterlockedOr() provides a full
- * memory barrier.
- */
- *old = (long) _InterlockedOr(&var->value, mask);
- return 0;
-}
-
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_atomic_cmpxchg)(ethr_atomic_t *var,
- long new,
- long expected,
- long *old)
-{
- /*
- * See "Extra memory barrier requirements" note at the top
- * of the file.
- *
- * According to msdn _InterlockedCompareExchange() provides a full
- * memory barrier.
- */
- *old = _InterlockedCompareExchange(&var->value, (LONG) new, (LONG) expected);
- return 0;
-}
-
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_atomic_xchg)(ethr_atomic_t *var,
- long new,
- long *old)
-{
- *old = (long) InterlockedExchange(&var->value, (LONG) new);
- return 0;
-}
-
-/*
- * According to msdn InterlockedExchange() provides a full
- * memory barrier.
- */
-
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_spinlock_init)(ethr_spinlock_t *lock)
-{
-#if ETHR_IMMED_ATOMIC_SET_GET_SAFE__
- lock->locked = (LONG) 0;
-#else
- (void) InterlockedExchange(&lock->locked, (LONG) 0);
-#endif
- return 0;
-}
-
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_spinlock_destroy)(ethr_spinlock_t *lock)
-{
- return 0;
-}
-
-
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_spin_unlock)(ethr_spinlock_t *lock)
-{
- ETHR_COMPILER_BARRIER;
- {
-#ifdef DEBUG
- LONG old =
-#endif
- InterlockedExchange(&lock->locked, (LONG) 0);
-#ifdef DEBUG
- ETHR_ASSERT(old == 1);
-#endif
- }
- return 0;
-}
-
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_spin_lock)(ethr_spinlock_t *lock)
-{
- LONG old;
- do {
- old = InterlockedExchange(&lock->locked, (LONG) 1);
- } while (old != (LONG) 0);
- ETHR_COMPILER_BARRIER;
- return 0;
-}
+#define ETHR_USE_OWN_RWMTX_IMPL__
+#define ETHR_USE_OWN_MTX_IMPL__
-/*
- * According to msdn InterlockedIncrement, InterlockedDecrement,
- * and InterlockedExchangeAdd(), _InterlockedAnd, and _InterlockedOr
- * provides full memory barriers.
- */
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_rwlock_init)(ethr_rwlock_t *lock)
-{
-#if ETHR_IMMED_ATOMIC_SET_GET_SAFE__
- lock->counter = (LONG) 0;
-#else
- (void) InterlockedExchange(&lock->counter, (LONG) 0);
-#endif
- return 0;
-}
-
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_rwlock_destroy)(ethr_rwlock_t *lock)
-{
- return 0;
-}
-
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_read_unlock)(ethr_rwlock_t *lock)
-{
- ETHR_COMPILER_BARRIER;
- {
-#ifdef DEBUG
- LONG old =
-#endif
- InterlockedDecrement(&lock->counter);
- ETHR_ASSERT(old != 0);
- }
- return 0;
-}
-
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_read_lock)(ethr_rwlock_t *lock)
-{
- while (1) {
- LONG old = InterlockedIncrement(&lock->counter);
- if ((old & ETHR_WLOCK_FLAG__) == 0)
- break; /* Got read lock */
- /* Restore and wait for writers to unlock */
- old = InterlockedDecrement(&lock->counter);
- while (old & ETHR_WLOCK_FLAG__) {
-#if ETHR_IMMED_ATOMIC_SET_GET_SAFE__
- old = lock->counter;
-#else
- old = InterlockedExchangeAdd(&lock->counter, (LONG) 0);
-#endif
- }
- }
- ETHR_COMPILER_BARRIER;
- return 0;
-}
-
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_write_unlock)(ethr_rwlock_t *lock)
-{
- ETHR_COMPILER_BARRIER;
- {
-#ifdef DEBUG
- LONG old =
-#endif
- _InterlockedAnd(&lock->counter, ~ETHR_WLOCK_FLAG__);
- ETHR_ASSERT(old & ETHR_WLOCK_FLAG__);
- }
- return 0;
-}
-
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_write_lock)(ethr_rwlock_t *lock)
-{
- LONG old;
- do {
- old = _InterlockedOr(&lock->counter, ETHR_WLOCK_FLAG__);
- } while (old & ETHR_WLOCK_FLAG__);
- /* We got the write part of the lock; wait for readers to unlock */
- while ((old & ~ETHR_WLOCK_FLAG__) != 0) {
-#if ETHR_IMMED_ATOMIC_SET_GET_SAFE__
- old = lock->counter;
-#else
- old = InterlockedExchangeAdd(&lock->counter, (LONG) 0);
-#endif
- ETHR_ASSERT(old & ETHR_WLOCK_FLAG__);
- }
- ETHR_COMPILER_BARRIER;
- return 0;
-}
-
-#endif /* #ifdef ETHR_TRY_INLINE_FUNCS */
-
-#endif /* #ifdef ERTS_MIXED_CYGWIN_VC */
+#define ETHR_YIELD() (Sleep(0), 0)
#else /* No supported thread lib found */
@@ -687,6 +198,12 @@ ETHR_INLINE_FUNC_NAME_(ethr_write_lock)(ethr_rwlock_t *lock)
#endif
+#ifdef SIZEOF_LONG
+#if SIZEOF_LONG < ETHR_SIZEOF_PTR
+#error size of long currently needs to be at least the same as size of void *
+#endif
+#endif
+
/* __builtin_expect() is needed by both native atomics code
* and the fallback code */
#if !defined(__GNUC__) || (__GNUC__ < 2) || (__GNUC__ == 2 && __GNUC_MINOR__ < 96)
@@ -698,6 +215,8 @@ ETHR_INLINE_FUNC_NAME_(ethr_write_lock)(ethr_rwlock_t *lock)
# if defined(__GNUC__)
# if defined(ETHR_PREFER_GCC_NATIVE_IMPLS)
# include "gcc/ethread.h"
+# elif defined(ETHR_PREFER_LIBATOMIC_OPS_NATIVE_IMPLS)
+# include "libatomic_ops/ethread.h"
# endif
# ifndef ETHR_HAVE_NATIVE_ATOMICS
# if ETHR_SIZEOF_PTR == 4
@@ -718,115 +237,141 @@ ETHR_INLINE_FUNC_NAME_(ethr_write_lock)(ethr_rwlock_t *lock)
# endif
# endif
# include "gcc/ethread.h"
+# include "libatomic_ops/ethread.h"
# endif
+# elif defined(ETHR_WIN32_THREADS)
+# include "win/ethread.h"
# endif
-#endif /* !defined(ETHR_DISABLE_NATIVE_IMPLS) */
-
-#ifdef ETHR_HAVE_OPTIMIZED_ATOMIC_OPS
-# undef ETHR_HAVE_NATIVE_ATOMICS
-#endif
-#ifdef ETHR_HAVE_OPTIMIZED_LOCKS
-# undef ETHR_HAVE_NATIVE_LOCKS
-#endif
+#endif /* !ETHR_DISABLE_NATIVE_IMPLS */
-#ifdef ETHR_HAVE_NATIVE_ATOMICS
-#define ETHR_HAVE_OPTIMIZED_ATOMIC_OPS 1
-#endif
-#ifdef ETHR_HAVE_NATIVE_LOCKS
-#define ETHR_HAVE_OPTIMIZED_LOCKS 1
+#if defined(__GNUC__)
+# ifndef ETHR_COMPILER_BARRIER
+# define ETHR_COMPILER_BARRIER __asm__ __volatile__("" : : : "memory")
+# endif
+# ifndef ETHR_SPIN_BODY
+# if defined(__i386__) || defined(__x86_64__)
+# define ETHR_SPIN_BODY __asm__ __volatile__("rep;nop" : : : "memory")
+# elif defined(__ia64__)
+# define ETHR_SPIN_BODY __asm__ __volatile__("hint @pause" : : : "memory")
+# elif defined(__sparc__)
+# define ETHR_SPIN_BODY __asm__ __volatile__("membar #LoadLoad")
+# else
+# define ETHR_SPIN_BODY ETHR_COMPILER_BARRIER
+# endif
+# endif
+#elif defined(ETHR_WIN32_THREADS)
+# ifndef ETHR_COMPILER_BARRIER
+# include <intrin.h>
+# pragma intrinsic(_ReadWriteBarrier)
+# define ETHR_COMPILER_BARRIER _ReadWriteBarrier()
+# endif
+# ifndef ETHR_SPIN_BODY
+# define ETHR_SPIN_BODY do {YieldProcessor();ETHR_COMPILER_BARRIER;} while(0)
+# endif
#endif
-typedef struct {
- unsigned open;
- ethr_mutex mtx;
- ethr_cond cnd;
-} ethr_gate;
+#define ETHR_YIELD_AFTER_BUSY_LOOPS 50
-#ifdef ETHR_HAVE_NATIVE_ATOMICS
+#ifndef ETHR_HAVE_NATIVE_ATOMICS
/*
- * Map ethread native atomics to ethread API atomics.
+ * ETHR_*MEMORY_BARRIER orders between locked and atomic accesses only,
+ * i.e. when our lock based atomic fallback is used, a noop is sufficient.
*/
-typedef ethr_native_atomic_t ethr_atomic_t;
+#define ETHR_MEMORY_BARRIER do { } while (0)
+#define ETHR_WRITE_MEMORY_BARRIER do { } while (0)
+#define ETHR_READ_MEMORY_BARRIER do { } while (0)
+#define ETHR_READ_DEPEND_MEMORY_BARRIER do { } while (0)
#endif
-#ifdef ETHR_HAVE_NATIVE_LOCKS
-/*
- * Map ethread native spinlocks to ethread API spinlocks.
- */
-typedef ethr_native_spinlock_t ethr_spinlock_t;
-/*
- * Map ethread native rwlocks to ethread API rwlocks.
- */
-typedef ethr_native_rwlock_t ethr_rwlock_t;
+#ifndef ETHR_WRITE_MEMORY_BARRIER
+# define ETHR_WRITE_MEMORY_BARRIER ETHR_MEMORY_BARRIER
+# define ETHR_WRITE_MEMORY_BARRIER_IS_FULL
#endif
-
-#ifdef ETHR_USE_RWMTX_FALLBACK
-typedef struct {
- ethr_mutex mtx;
- ethr_cond rcnd;
- ethr_cond wcnd;
- unsigned readers;
- unsigned waiting_readers;
- unsigned waiting_writers;
-#if ETHR_XCHK
- int initialized;
+#ifndef ETHR_READ_MEMORY_BARRIER
+# define ETHR_READ_MEMORY_BARRIER ETHR_MEMORY_BARRIER
+# define ETHR_READ_MEMORY_BARRIER_IS_FULL
#endif
-} ethr_rwmutex;
+#ifndef ETHR_READ_DEPEND_MEMORY_BARRIER
+# define ETHR_READ_DEPEND_MEMORY_BARRIER ETHR_COMPILER_BARRIER
+# define ETHR_READ_DEPEND_MEMORY_BARRIER_IS_COMPILER_BARRIER
#endif
-#ifndef ETHR_HAVE_OPTIMIZED_ATOMIC_OPS
-typedef long ethr_atomic_t;
-#endif
+#define ETHR_FATAL_ERROR__(ERR) \
+ ethr_fatal_error__(__FILE__, __LINE__, __func__, (ERR))
-#ifndef ETHR_HAVE_OPTIMIZED_LOCKS
+ETHR_PROTO_NORETURN__ ethr_fatal_error__(const char *file,
+ int line,
+ const char *func,
+ int err);
-#if defined(ETHR_WIN32_THREADS)
-typedef struct {
- CRITICAL_SECTION cs;
-} ethr_spinlock_t;
-typedef struct {
- CRITICAL_SECTION cs;
- unsigned counter;
-} ethr_rwlock_t;
+void ethr_compiler_barrier_fallback(void);
+#ifndef ETHR_COMPILER_BARRIER
+# define ETHR_COMPILER_BARRIER ethr_compiler_barrier_fallback()
+#endif
-int ethr_do_spinlock_init(ethr_spinlock_t *lock);
-int ethr_do_rwlock_init(ethr_rwlock_t *lock);
+#ifndef ETHR_SPIN_BODY
+# define ETHR_SPIN_BODY ETHR_COMPILER_BARRIER
+#endif
-#define ETHR_RWLOCK_WRITERS (((unsigned) 1) << 31)
+#ifndef ETHR_YIELD
+# if defined(ETHR_HAVE_SCHED_YIELD)
+# ifdef ETHR_HAVE_SCHED_H
+# include <sched.h>
+# endif
+# include <errno.h>
+# if defined(ETHR_SCHED_YIELD_RET_INT)
+# define ETHR_YIELD() (sched_yield() < 0 ? errno : 0)
+# else
+# define ETHR_YIELD() (sched_yield(), 0)
+# endif
+# elif defined(ETHR_HAVE_PTHREAD_YIELD)
+# if defined(ETHR_PTHREAD_YIELD_RET_INT)
+# define ETHR_YIELD() pthread_yield()
+# else
+# define ETHR_YIELD() (pthread_yield(), 0)
+# endif
+# else
+# define ETHR_YIELD() (ethr_compiler_barrier(), 0)
+# endif
+#endif
+
+#include "ethr_optimized_fallbacks.h"
-#elif defined(ETHR_HAVE_PTHREAD_SPIN_LOCK)
-typedef struct {
- pthread_spinlock_t spnlck;
-} ethr_spinlock_t;
typedef struct {
- pthread_spinlock_t spnlck;
- unsigned counter;
-} ethr_rwlock_t;
-#define ETHR_RWLOCK_WRITERS (((unsigned) 1) << 31)
+ void *(*thread_create_prepare_func)(void);
+ void (*thread_create_parent_func)(void *);
+ void (*thread_create_child_func)(void *);
+} ethr_init_data;
-#else /* ethr mutex/rwmutex */
+#define ETHR_INIT_DATA_DEFAULT_INITER {NULL, NULL, NULL}
typedef struct {
- ethr_mutex mtx;
-} ethr_spinlock_t;
+ void *(*alloc)(size_t);
+ void *(*realloc)(void *, size_t);
+ void (*free)(void *);
+} ethr_memory_allocator;
+
+#define ETHR_MEM_ALLOC_DEF_INITER__ {NULL, NULL, NULL}
typedef struct {
- ethr_rwmutex rwmtx;
-} ethr_rwlock_t;
+ ethr_memory_allocator std;
+ ethr_memory_allocator sl;
+ ethr_memory_allocator ll;
+} ethr_memory_allocators;
-#endif /* end mutex/rwmutex */
-#endif /* ETHR_HAVE_OPTIMIZED_LOCKS */
+#define ETHR_MEM_ALLOCS_DEF_INITER__ \
+ {ETHR_MEM_ALLOC_DEF_INITER__, \
+ ETHR_MEM_ALLOC_DEF_INITER__, \
+ ETHR_MEM_ALLOC_DEF_INITER__}
typedef struct {
- void *(*alloc)(size_t);
- void *(*realloc)(void *, size_t);
- void (*free)(void *);
- void *(*thread_create_prepare_func)(void);
- void (*thread_create_parent_func)(void *);
- void (*thread_create_child_func)(void *);
-} ethr_init_data;
+ ethr_memory_allocators mem;
+ int reader_groups;
+ int main_threads;
+} ethr_late_init_data;
-#define ETHR_INIT_DATA_DEFAULT_INITER {malloc, realloc, free, NULL, NULL, NULL}
+#define ETHR_LATE_INIT_DATA_DEFAULT_INITER \
+ {ETHR_MEM_ALLOCS_DEF_INITER__, 0, 0}
typedef struct {
int detached; /* boolean (default false) */
@@ -835,18 +380,15 @@ typedef struct {
#define ETHR_THR_OPTS_DEFAULT_INITER {0, -1}
-#if defined(ETHR_CUSTOM_INLINE_FUNC_NAME_) || !defined(ETHR_TRY_INLINE_FUNCS)
-# define ETHR_NEED_MTX_PROTOTYPES__
-# define ETHR_NEED_RWMTX_PROTOTYPES__
+
+#if !defined(ETHR_TRY_INLINE_FUNCS) || defined(ETHR_AUX_IMPL__)
# define ETHR_NEED_SPINLOCK_PROTOTYPES__
+# define ETHR_NEED_RWSPINLOCK_PROTOTYPES__
# define ETHR_NEED_ATOMIC_PROTOTYPES__
#endif
-#if !defined(ETHR_NEED_RWMTX_PROTOTYPES__) && defined(ETHR_USE_RWMTX_FALLBACK)
-# define ETHR_NEED_RWMTX_PROTOTYPES__
-#endif
-
int ethr_init(ethr_init_data *);
+int ethr_late_init(ethr_late_init_data *);
int ethr_install_exit_handler(void (*funcp)(void));
int ethr_thr_create(ethr_tid *, void * (*)(void *), void *, ethr_thr_opts *);
int ethr_thr_join(ethr_tid, void **);
@@ -854,65 +396,6 @@ int ethr_thr_detach(ethr_tid);
void ethr_thr_exit(void *);
ethr_tid ethr_self(void);
int ethr_equal_tids(ethr_tid, ethr_tid);
-int ethr_mutex_init(ethr_mutex *);
-#ifdef ETHR_HAVE_ETHR_REC_MUTEX_INIT
-int ethr_rec_mutex_init(ethr_mutex *);
-#endif
-int ethr_mutex_destroy(ethr_mutex *);
-int ethr_mutex_set_forksafe(ethr_mutex *);
-int ethr_mutex_unset_forksafe(ethr_mutex *);
-#ifdef ETHR_NEED_MTX_PROTOTYPES__
-int ethr_mutex_trylock(ethr_mutex *);
-int ethr_mutex_lock(ethr_mutex *);
-int ethr_mutex_unlock(ethr_mutex *);
-#endif
-int ethr_cond_init(ethr_cond *);
-int ethr_cond_destroy(ethr_cond *);
-int ethr_cond_signal(ethr_cond *);
-int ethr_cond_broadcast(ethr_cond *);
-int ethr_cond_wait(ethr_cond *, ethr_mutex *);
-int ethr_cond_timedwait(ethr_cond *, ethr_mutex *, ethr_timeval *);
-
-int ethr_rwmutex_init(ethr_rwmutex *);
-int ethr_rwmutex_destroy(ethr_rwmutex *);
-#ifdef ETHR_NEED_RWMTX_PROTOTYPES__
-int ethr_rwmutex_tryrlock(ethr_rwmutex *);
-int ethr_rwmutex_rlock(ethr_rwmutex *);
-int ethr_rwmutex_runlock(ethr_rwmutex *);
-int ethr_rwmutex_tryrwlock(ethr_rwmutex *);
-int ethr_rwmutex_rwlock(ethr_rwmutex *);
-int ethr_rwmutex_rwunlock(ethr_rwmutex *);
-#endif
-
-#ifdef ETHR_NEED_ATOMIC_PROTOTYPES__
-int ethr_atomic_init(ethr_atomic_t *, long);
-int ethr_atomic_set(ethr_atomic_t *, long);
-int ethr_atomic_read(ethr_atomic_t *, long *);
-int ethr_atomic_inctest(ethr_atomic_t *, long *);
-int ethr_atomic_dectest(ethr_atomic_t *, long *);
-int ethr_atomic_inc(ethr_atomic_t *);
-int ethr_atomic_dec(ethr_atomic_t *);
-int ethr_atomic_addtest(ethr_atomic_t *, long, long *);
-int ethr_atomic_add(ethr_atomic_t *, long);
-int ethr_atomic_and_old(ethr_atomic_t *, long, long *);
-int ethr_atomic_or_old(ethr_atomic_t *, long, long *);
-int ethr_atomic_xchg(ethr_atomic_t *, long, long *);
-int ethr_atomic_cmpxchg(ethr_atomic_t *, long, long, long *);
-#endif
-
-#ifdef ETHR_NEED_SPINLOCK_PROTOTYPES__
-int ethr_spinlock_init(ethr_spinlock_t *);
-int ethr_spinlock_destroy(ethr_spinlock_t *);
-int ethr_spin_unlock(ethr_spinlock_t *);
-int ethr_spin_lock(ethr_spinlock_t *);
-
-int ethr_rwlock_init(ethr_rwlock_t *);
-int ethr_rwlock_destroy(ethr_rwlock_t *);
-int ethr_read_unlock(ethr_rwlock_t *);
-int ethr_read_lock(ethr_rwlock_t *);
-int ethr_write_unlock(ethr_rwlock_t *);
-int ethr_write_lock(ethr_rwlock_t *);
-#endif
int ethr_time_now(ethr_timeval *);
int ethr_tsd_key_create(ethr_tsd_key *);
@@ -920,13 +403,6 @@ int ethr_tsd_key_delete(ethr_tsd_key);
int ethr_tsd_set(ethr_tsd_key, void *);
void *ethr_tsd_get(ethr_tsd_key);
-int ethr_gate_init(ethr_gate *);
-int ethr_gate_destroy(ethr_gate *);
-int ethr_gate_close(ethr_gate *);
-int ethr_gate_let_through(ethr_gate *, unsigned);
-int ethr_gate_wait(ethr_gate *);
-int ethr_gate_swait(ethr_gate *, int);
-
#ifdef ETHR_HAVE_ETHR_SIG_FUNCS
#include <signal.h>
int ethr_sigmask(int how, const sigset_t *set, sigset_t *oset);
@@ -935,534 +411,579 @@ int ethr_sigwait(const sigset_t *set, int *sig);
void ethr_compiler_barrier(void);
-#ifdef ETHR_TRY_INLINE_FUNCS
-
-#ifdef ETHR_HAVE_NATIVE_ATOMICS
-
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_atomic_init)(ethr_atomic_t *var, long i)
-{
- ethr_native_atomic_init(var, i);
- return 0;
-}
-
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_atomic_set)(ethr_atomic_t *var, long i)
-{
- ethr_native_atomic_set(var, i);
- return 0;
-}
-
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_atomic_read)(ethr_atomic_t *var, long *i)
-{
- *i = ethr_native_atomic_read(var);
- return 0;
-}
-
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_atomic_add)(ethr_atomic_t *var, long incr)
-{
- ethr_native_atomic_add(var, incr);
- return 0;
-}
-
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_atomic_addtest)(ethr_atomic_t *var,
- long i,
- long *testp)
-{
- *testp = ethr_native_atomic_add_return(var, i);
- return 0;
-}
-
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_atomic_inc)(ethr_atomic_t *var)
-{
- ethr_native_atomic_inc(var);
- return 0;
-}
-
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_atomic_dec)(ethr_atomic_t *var)
-{
- ethr_native_atomic_dec(var);
- return 0;
-}
-
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_atomic_inctest)(ethr_atomic_t *var, long *testp)
-{
- *testp = ethr_native_atomic_inc_return(var);
- return 0;
-}
-
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_atomic_dectest)(ethr_atomic_t *var, long *testp)
-{
- *testp = ethr_native_atomic_dec_return(var);
- return 0;
-}
-
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_atomic_and_old)(ethr_atomic_t *var,
- long mask,
- long *old)
-{
- /*
- * See "Extra memory barrier requirements" note at the top
- * of the file.
- */
- *old = ethr_native_atomic_and_retold(var, mask);
- return 0;
-}
-
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_atomic_or_old)(ethr_atomic_t *var,
- long mask,
- long *old)
-{
- /*
- * See "Extra memory barrier requirements" note at the top
- * of the file.
- */
- *old = ethr_native_atomic_or_retold(var, mask);
- return 0;
-}
-
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_atomic_xchg)(ethr_atomic_t *var,
- long new,
- long *old)
-{
- *old = ethr_native_atomic_xchg(var, new);
- return 0;
-}
-
-/*
- * If *var == *old, replace *old with new, else do nothing.
- * In any case return the original value of *var in *old.
- */
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_atomic_cmpxchg)(ethr_atomic_t *var,
- long new,
- long expected,
- long *old)
-{
- /*
- * See "Extra memory barrier requirements" note at the top
- * of the file.
- */
- *old = ethr_native_atomic_cmpxchg(var, new, expected);
- return 0;
-}
+#if defined(ETHR_HAVE_NATIVE_SPINLOCKS)
+typedef ethr_native_spinlock_t ethr_spinlock_t;
+#elif defined(ETHR_HAVE_OPTIMIZED_SPINLOCKS)
+typedef ethr_opt_spinlock_t ethr_spinlock_t;
+#elif defined(__WIN32__)
+typedef CRITICAL_SECTION ethr_spinlock_t;
+#else
+typedef pthread_mutex_t ethr_spinlock_t;
+#endif
-#endif /* ETHR_HAVE_NATIVE_ATOMICS */
+#ifdef ETHR_NEED_SPINLOCK_PROTOTYPES__
+int ethr_spinlock_init(ethr_spinlock_t *);
+int ethr_spinlock_destroy(ethr_spinlock_t *);
+void ethr_spin_unlock(ethr_spinlock_t *);
+void ethr_spin_lock(ethr_spinlock_t *);
+#endif
-#ifdef ETHR_HAVE_NATIVE_LOCKS
+#if defined(ETHR_TRY_INLINE_FUNCS) || defined(ETHR_AUX_IMPL__)
static ETHR_INLINE int
ETHR_INLINE_FUNC_NAME_(ethr_spinlock_init)(ethr_spinlock_t *lock)
{
+#ifdef ETHR_HAVE_NATIVE_SPINLOCKS
ethr_native_spinlock_init(lock);
return 0;
+#elif defined(ETHR_HAVE_OPTIMIZED_SPINLOCKS)
+ return ethr_opt_spinlock_init((ethr_opt_spinlock_t *) lock);
+#elif defined(__WIN32__)
+ if (!InitializeCriticalSectionAndSpinCount((CRITICAL_SECTION *) lock, INT_MAX))
+ return ethr_win_get_errno__();
+ return 0;
+#else
+ return pthread_mutex_init((pthread_mutex_t *) lock, NULL);
+#endif
}
static ETHR_INLINE int
ETHR_INLINE_FUNC_NAME_(ethr_spinlock_destroy)(ethr_spinlock_t *lock)
{
+#ifdef ETHR_HAVE_NATIVE_SPINLOCKS
return 0;
+#elif defined(ETHR_HAVE_OPTIMIZED_SPINLOCKS)
+ return ethr_opt_spinlock_destroy((ethr_opt_spinlock_t *) lock);
+#elif defined(__WIN32__)
+ DeleteCriticalSection((CRITICAL_SECTION *) lock);
+ return 0;
+#else
+ return pthread_mutex_destroy((pthread_mutex_t *) lock);
+#endif
}
-static ETHR_INLINE int
+static ETHR_INLINE void
ETHR_INLINE_FUNC_NAME_(ethr_spin_unlock)(ethr_spinlock_t *lock)
{
+#ifdef ETHR_HAVE_NATIVE_SPINLOCKS
ethr_native_spin_unlock(lock);
- return 0;
+#elif defined(ETHR_HAVE_OPTIMIZED_SPINLOCKS)
+ int err = ethr_opt_spin_unlock((ethr_opt_spinlock_t *) lock);
+ if (err)
+ ETHR_FATAL_ERROR__(err);
+#elif defined(__WIN32__)
+ LeaveCriticalSection((CRITICAL_SECTION *) lock);
+#else
+ int err = pthread_mutex_unlock((pthread_mutex_t *) lock);
+ if (err)
+ ETHR_FATAL_ERROR__(err);
+#endif
}
-static ETHR_INLINE int
+static ETHR_INLINE void
ETHR_INLINE_FUNC_NAME_(ethr_spin_lock)(ethr_spinlock_t *lock)
{
+#ifdef ETHR_HAVE_NATIVE_SPINLOCKS
ethr_native_spin_lock(lock);
- return 0;
-}
-
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_rwlock_init)(ethr_rwlock_t *lock)
-{
- ethr_native_rwlock_init(lock);
- return 0;
-}
-
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_rwlock_destroy)(ethr_rwlock_t *lock)
-{
- return 0;
-}
-
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_read_unlock)(ethr_rwlock_t *lock)
-{
- ethr_native_read_unlock(lock);
- return 0;
-}
-
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_read_lock)(ethr_rwlock_t *lock)
-{
- ethr_native_read_lock(lock);
- return 0;
-}
-
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_write_unlock)(ethr_rwlock_t *lock)
-{
- ethr_native_write_unlock(lock);
- return 0;
-}
-
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_write_lock)(ethr_rwlock_t *lock)
-{
- ethr_native_write_lock(lock);
- return 0;
+#elif defined(ETHR_HAVE_OPTIMIZED_SPINLOCKS)
+ int err = ethr_opt_spin_lock((ethr_opt_spinlock_t *) lock);
+ if (err)
+ ETHR_FATAL_ERROR__(err);
+#elif defined(__WIN32__)
+ EnterCriticalSection((CRITICAL_SECTION *) lock);
+#else
+ int err = pthread_mutex_lock((pthread_mutex_t *) lock);
+ if (err)
+ ETHR_FATAL_ERROR__(err);
+#endif
}
-#endif /* ETHR_HAVE_NATIVE_LOCKS */
-
#endif /* ETHR_TRY_INLINE_FUNCS */
+#ifdef ETHR_HAVE_NATIVE_ATOMICS
/*
- * Fallbacks for atomics used in absence of optimized implementation.
+ * Map ethread native atomics to ethread API atomics.
*/
-#ifndef ETHR_HAVE_OPTIMIZED_ATOMIC_OPS
+typedef ethr_native_atomic_t ethr_atomic_t;
+#else
+typedef long ethr_atomic_t;
+#endif
+#ifdef ETHR_NEED_ATOMIC_PROTOTYPES__
+void ethr_atomic_init(ethr_atomic_t *, long);
+void ethr_atomic_set(ethr_atomic_t *, long);
+long ethr_atomic_read(ethr_atomic_t *);
+long ethr_atomic_inc_read(ethr_atomic_t *);
+long ethr_atomic_dec_read(ethr_atomic_t *);
+void ethr_atomic_inc(ethr_atomic_t *);
+void ethr_atomic_dec(ethr_atomic_t *);
+long ethr_atomic_add_read(ethr_atomic_t *, long);
+void ethr_atomic_add(ethr_atomic_t *, long);
+long ethr_atomic_read_band(ethr_atomic_t *, long);
+long ethr_atomic_read_bor(ethr_atomic_t *, long);
+long ethr_atomic_xchg(ethr_atomic_t *, long);
+long ethr_atomic_cmpxchg(ethr_atomic_t *, long, long);
+long ethr_atomic_read_acqb(ethr_atomic_t *);
+long ethr_atomic_inc_read_acqb(ethr_atomic_t *);
+void ethr_atomic_set_relb(ethr_atomic_t *, long);
+void ethr_atomic_dec_relb(ethr_atomic_t *);
+long ethr_atomic_dec_read_relb(ethr_atomic_t *);
+long ethr_atomic_cmpxchg_acqb(ethr_atomic_t *, long, long);
+long ethr_atomic_cmpxchg_relb(ethr_atomic_t *, long, long);
+#endif
+
+#if defined(ETHR_TRY_INLINE_FUNCS) || defined(ETHR_AUX_IMPL__)
+
+#ifndef ETHR_HAVE_NATIVE_ATOMICS
/*
- * ETHR_MEMORY_BARRIER orders between locked and atomic accesses only,
- * i.e. when this atomic fallback is used a noop is sufficient.
+ * Fallbacks for atomics used in absence of a native implementation.
*/
-#define ETHR_MEMORY_BARRIER
#define ETHR_ATOMIC_ADDR_BITS 10
#define ETHR_ATOMIC_ADDR_SHIFT 6
typedef struct {
union {
-#if defined(ETHR_HAVE_PTHREAD_SPIN_LOCK)
- pthread_spinlock_t spnlck;
-#else
- ethr_mutex mtx;
-#endif
+ ethr_spinlock_t lck;
char buf[ETHR_CACHE_LINE_SIZE];
} u;
} ethr_atomic_protection_t;
extern ethr_atomic_protection_t ethr_atomic_protection__[1 << ETHR_ATOMIC_ADDR_BITS];
-
-#if defined(ETHR_HAVE_PTHREAD_SPIN_LOCK)
-
#define ETHR_ATOMIC_PTR2LCK__(PTR) \
(&ethr_atomic_protection__[((((unsigned long) (PTR)) >> ETHR_ATOMIC_ADDR_SHIFT) \
- & ((1 << ETHR_ATOMIC_ADDR_BITS) - 1))].u.spnlck)
+ & ((1 << ETHR_ATOMIC_ADDR_BITS) - 1))].u.lck)
#define ETHR_ATOMIC_OP_FALLBACK_IMPL__(AP, EXPS) \
do { \
- pthread_spinlock_t *slp__ = ETHR_ATOMIC_PTR2LCK__((AP)); \
- int res__ = pthread_spin_lock(slp__); \
- if (res__ != 0) \
- return res__; \
+ ethr_spinlock_t *slp__ = ETHR_ATOMIC_PTR2LCK__((AP)); \
+ ethr_spin_lock(slp__); \
{ EXPS; } \
- return pthread_spin_unlock(slp__); \
+ ethr_spin_unlock(slp__); \
} while (0)
-#else /* ethread mutex */
-
-#define ETHR_ATOMIC_PTR2LCK__(PTR) \
-(&ethr_atomic_protection__[((((unsigned long) (PTR)) >> ETHR_ATOMIC_ADDR_SHIFT) \
- & ((1 << ETHR_ATOMIC_ADDR_BITS) - 1))].u.mtx)
-
-#define ETHR_ATOMIC_OP_FALLBACK_IMPL__(AP, EXPS) \
-do { \
- ethr_mutex *mtxp__ = ETHR_ATOMIC_PTR2LCK__((AP)); \
- int res__ = ETHR_INLINE_FUNC_NAME_(ethr_mutex_lock)(mtxp__); \
- if (res__ != 0) \
- return res__; \
- { EXPS; } \
- return ETHR_INLINE_FUNC_NAME_(ethr_mutex_unlock)(mtxp__); \
-} while (0)
-
-#endif /* end ethread mutex */
-
-#ifdef ETHR_TRY_INLINE_FUNCS
+#endif
-static ETHR_INLINE int
+static ETHR_INLINE void
ETHR_INLINE_FUNC_NAME_(ethr_atomic_init)(ethr_atomic_t *var, long i)
{
- ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, *var = (ethr_atomic_t) i);
+#ifdef ETHR_HAVE_NATIVE_ATOMICS
+ ethr_native_atomic_init(var, i);
+#else
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, *var = i);
+#endif
}
-static ETHR_INLINE int
+static ETHR_INLINE void
ETHR_INLINE_FUNC_NAME_(ethr_atomic_set)(ethr_atomic_t *var, long i)
{
- ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, *var = (ethr_atomic_t) i);
+#ifdef ETHR_HAVE_NATIVE_ATOMICS
+ ethr_native_atomic_set(var, i);
+#else
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, *var = i);
+#endif
}
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_atomic_read)(ethr_atomic_t *var, long *i)
+static ETHR_INLINE long
+ETHR_INLINE_FUNC_NAME_(ethr_atomic_read)(ethr_atomic_t *var)
{
- ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, *i = (long) *var);
+#ifdef ETHR_HAVE_NATIVE_ATOMICS
+ return ethr_native_atomic_read(var);
+#else
+ long res;
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, res = (long) *var);
+ return res;
+#endif
}
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_atomic_inctest)(ethr_atomic_t *incp, long *testp)
+static ETHR_INLINE void
+ETHR_INLINE_FUNC_NAME_(ethr_atomic_add)(ethr_atomic_t *var, long incr)
{
- ETHR_ATOMIC_OP_FALLBACK_IMPL__(incp, *testp = (long) ++(*incp));
+#ifdef ETHR_HAVE_NATIVE_ATOMICS
+ ethr_native_atomic_add(var, incr);
+#else
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, *var += incr);
+#endif
+}
+
+static ETHR_INLINE long
+ETHR_INLINE_FUNC_NAME_(ethr_atomic_add_read)(ethr_atomic_t *var, long i)
+{
+#ifdef ETHR_HAVE_NATIVE_ATOMICS
+ return ethr_native_atomic_add_return(var, i);
+#else
+ long res;
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, *var += i; res = *var);
+ return res;
+#endif
}
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_atomic_dectest)(ethr_atomic_t *decp, long *testp)
+static ETHR_INLINE void
+ETHR_INLINE_FUNC_NAME_(ethr_atomic_inc)(ethr_atomic_t *var)
{
- ETHR_ATOMIC_OP_FALLBACK_IMPL__(decp, *testp = (long) --(*decp));
+#ifdef ETHR_HAVE_NATIVE_ATOMICS
+ ethr_native_atomic_inc(var);
+#else
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, ++(*var));
+#endif
}
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_atomic_add)(ethr_atomic_t *var, long incr)
-{
- ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, *var += incr);
-}
-
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_atomic_addtest)(ethr_atomic_t *incp,
- long i,
- long *testp)
+static ETHR_INLINE void
+ETHR_INLINE_FUNC_NAME_(ethr_atomic_dec)(ethr_atomic_t *var)
{
- ETHR_ATOMIC_OP_FALLBACK_IMPL__(incp, *incp += i; *testp = *incp);
+#ifdef ETHR_HAVE_NATIVE_ATOMICS
+ ethr_native_atomic_dec(var);
+#else
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, --(*var));
+#endif
}
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_atomic_inc)(ethr_atomic_t *incp)
+static ETHR_INLINE long
+ETHR_INLINE_FUNC_NAME_(ethr_atomic_inc_read)(ethr_atomic_t *var)
{
- ETHR_ATOMIC_OP_FALLBACK_IMPL__(incp, ++(*incp));
+#ifdef ETHR_HAVE_NATIVE_ATOMICS
+ return ethr_native_atomic_inc_return(var);
+#else
+ long res;
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, res = (long) ++(*var));
+ return res;
+#endif
}
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_atomic_dec)(ethr_atomic_t *decp)
+static ETHR_INLINE long
+ETHR_INLINE_FUNC_NAME_(ethr_atomic_dec_read)(ethr_atomic_t *var)
{
- ETHR_ATOMIC_OP_FALLBACK_IMPL__(decp, --(*decp));
+#ifdef ETHR_HAVE_NATIVE_ATOMICS
+ return ethr_native_atomic_dec_return(var);
+#else
+ long res;
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, res = (long) --(*var));
+ return res;
+#endif
}
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_atomic_and_old)(ethr_atomic_t *var,
- long mask,
- long *old)
+static ETHR_INLINE long
+ETHR_INLINE_FUNC_NAME_(ethr_atomic_read_band)(ethr_atomic_t *var,
+ long mask)
{
- /*
- * See "Extra memory barrier requirements" note at the top
- * of the file.
- */
- ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, *old = *var; *var &= mask);
+#ifdef ETHR_HAVE_NATIVE_ATOMICS
+ return ethr_native_atomic_and_retold(var, mask);
+#else
+ long res;
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, res = *var; *var &= mask);
+ return res;
+#endif
}
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_atomic_or_old)(ethr_atomic_t *var,
- long mask,
- long *old)
+static ETHR_INLINE long
+ETHR_INLINE_FUNC_NAME_(ethr_atomic_read_bor)(ethr_atomic_t *var,
+ long mask)
{
- /*
- * See "Extra memory barrier requirements" note at the top
- * of the file.
- */
- ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, *old = *var; *var |= mask);
+#ifdef ETHR_HAVE_NATIVE_ATOMICS
+ return ethr_native_atomic_or_retold(var, mask);
+#else
+ long res;
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, res = *var; *var |= mask);
+ return res;
+#endif
}
-static ETHR_INLINE int
+static ETHR_INLINE long
ETHR_INLINE_FUNC_NAME_(ethr_atomic_xchg)(ethr_atomic_t *var,
- long new,
- long *old)
+ long new)
{
- ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, *old = *var; *var = new);
-}
+#ifdef ETHR_HAVE_NATIVE_ATOMICS
+ return ethr_native_atomic_xchg(var, new);
+#else
+ long res;
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, res = *var; *var = new);
+ return res;
+#endif
+}
-/*
- * If *var == *old, replace *old with new, else do nothing.
- * In any case return the original value of *var in *old.
- */
-static ETHR_INLINE int
+static ETHR_INLINE long
ETHR_INLINE_FUNC_NAME_(ethr_atomic_cmpxchg)(ethr_atomic_t *var,
long new,
- long expected,
- long *old)
+ long exp)
{
- /*
- * See "Extra memory barrier requirements" note at the top
- * of the file.
- */
- ETHR_ATOMIC_OP_FALLBACK_IMPL__(
- var,
- long old_val = *var;
- *old = old_val;
- if (__builtin_expect(old_val == expected, 1))
- *var = new;
- );
- return 0;
+#ifdef ETHR_HAVE_NATIVE_ATOMICS
+ return ethr_native_atomic_cmpxchg(var, new, exp);
+#else
+ long res;
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var,
+ {
+ res = *var;
+ if (__builtin_expect(res == exp, 1))
+ *var = new;
+ });
+ return res;
+#endif
}
-#endif /* #ifdef ETHR_TRY_INLINE_FUNCS */
-#endif /* #ifndef ETHR_HAVE_OPTIMIZED_ATOMIC_OPS */
-
/*
- * Fallbacks for spin locks, and rw spin locks used in absence of
- * optimized implementation.
+ * Important memory barrier requirements.
+ *
+ * The following atomic operations *must* supply a memory barrier of
+ * at least the type specified by its suffix:
+ * _acqb = acquire barrier
+ * _relb = release barrier
*/
-#ifndef ETHR_HAVE_OPTIMIZED_LOCKS
-#ifdef ETHR_TRY_INLINE_FUNCS
+static ETHR_INLINE long
+ETHR_INLINE_FUNC_NAME_(ethr_atomic_read_acqb)(ethr_atomic_t *var)
+{
+#ifdef ETHR_HAVE_NATIVE_ATOMICS
+ return ethr_native_atomic_read_acqb(var);
+#else
+ return ETHR_INLINE_FUNC_NAME_(ethr_atomic_read)(var);
+#endif
+}
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_spinlock_init)(ethr_spinlock_t *lock)
+static ETHR_INLINE long
+ETHR_INLINE_FUNC_NAME_(ethr_atomic_inc_read_acqb)(ethr_atomic_t *var)
{
-#if defined(ETHR_HAVE_PTHREAD_SPIN_LOCK)
- return pthread_spin_init(&lock->spnlck, 0);
+#ifdef ETHR_HAVE_NATIVE_ATOMICS
+ return ethr_native_atomic_inc_return_acqb(var);
#else
- return ethr_mutex_init(&lock->mtx);
+ return ETHR_INLINE_FUNC_NAME_(ethr_atomic_inc_read)(var);
#endif
}
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_spinlock_destroy)(ethr_spinlock_t *lock)
+static ETHR_INLINE void
+ETHR_INLINE_FUNC_NAME_(ethr_atomic_set_relb)(ethr_atomic_t *var, long val)
+{
+#ifdef ETHR_HAVE_NATIVE_ATOMICS
+ ethr_native_atomic_set_relb(var, val);
+#else
+ return ETHR_INLINE_FUNC_NAME_(ethr_atomic_set)(var, val);
+#endif
+}
+
+static ETHR_INLINE void
+ETHR_INLINE_FUNC_NAME_(ethr_atomic_dec_relb)(ethr_atomic_t *var)
{
-#if defined(ETHR_HAVE_PTHREAD_SPIN_LOCK)
- return pthread_spin_destroy(&lock->spnlck);
+#ifdef ETHR_HAVE_NATIVE_ATOMICS
+ ethr_native_atomic_dec_relb(var);
#else
- return ethr_mutex_destroy(&lock->mtx);
+ ETHR_INLINE_FUNC_NAME_(ethr_atomic_dec)(var);
#endif
}
+static ETHR_INLINE long
+ETHR_INLINE_FUNC_NAME_(ethr_atomic_dec_read_relb)(ethr_atomic_t *var)
+{
+#ifdef ETHR_HAVE_NATIVE_ATOMICS
+ return ethr_native_atomic_dec_return_relb(var);
+#else
+ return ETHR_INLINE_FUNC_NAME_(ethr_atomic_dec_read)(var);
+#endif
+}
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_spin_unlock)(ethr_spinlock_t *lock)
+static ETHR_INLINE long
+ETHR_INLINE_FUNC_NAME_(ethr_atomic_cmpxchg_acqb)(ethr_atomic_t *var,
+ long new,
+ long exp)
{
-#if defined(ETHR_HAVE_PTHREAD_SPIN_LOCK)
- return pthread_spin_unlock(&lock->spnlck);
+#ifdef ETHR_HAVE_NATIVE_ATOMICS
+ return ethr_native_atomic_cmpxchg_acqb(var, new, exp);
#else
- return ETHR_INLINE_FUNC_NAME_(ethr_mutex_unlock)(&lock->mtx);
+ return ETHR_INLINE_FUNC_NAME_(ethr_atomic_cmpxchg)(var, new, exp);
#endif
}
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_spin_lock)(ethr_spinlock_t *lock)
+static ETHR_INLINE long
+ETHR_INLINE_FUNC_NAME_(ethr_atomic_cmpxchg_relb)(ethr_atomic_t *var,
+ long new,
+ long exp)
{
-#if defined(ETHR_HAVE_PTHREAD_SPIN_LOCK)
- return pthread_spin_lock(&lock->spnlck);
+#ifdef ETHR_HAVE_NATIVE_ATOMICS
+ return ethr_native_atomic_cmpxchg_relb(var, new, exp);
#else
- return ETHR_INLINE_FUNC_NAME_(ethr_mutex_lock)(&lock->mtx);
+ return ETHR_INLINE_FUNC_NAME_(ethr_atomic_cmpxchg)(var, new, exp);
#endif
}
-#ifdef ETHR_USE_RWMTX_FALLBACK
-#define ETHR_RWLOCK_RWMTX_FALLBACK_NAME_(X) X
+#endif /* ETHR_TRY_INLINE_FUNCS */
+
+typedef struct ethr_ts_event_ ethr_ts_event; /* Needed by ethr_mutex.h */
+
+#if defined(ETHR_WIN32_THREADS)
+# include "win/ethr_event.h"
#else
-#define ETHR_RWLOCK_RWMTX_FALLBACK_NAME_(X) ETHR_INLINE_FUNC_NAME_(X)
+# include "pthread/ethr_event.h"
+#endif
+
+int ethr_set_main_thr_status(int, int);
+int ethr_get_main_thr_status(int *);
+
+struct ethr_ts_event_ {
+ ethr_ts_event *next;
+ ethr_ts_event *prev;
+ ethr_event event;
+ void *udata;
+ ethr_atomic_t uaflgs;
+ unsigned uflgs;
+ unsigned iflgs; /* for ethr lib only */
+ short rgix; /* for ethr lib only */
+ short mtix; /* for ethr lib only */
+};
+
+#define ETHR_TS_EV_ETHREAD (((unsigned) 1) << 0)
+#define ETHR_TS_EV_INITED (((unsigned) 1) << 1)
+#define ETHR_TS_EV_TMP (((unsigned) 1) << 2)
+#define ETHR_TS_EV_MAIN_THR (((unsigned) 1) << 3)
+
+int ethr_get_tmp_ts_event__(ethr_ts_event **tsepp);
+int ethr_free_ts_event__(ethr_ts_event *tsep);
+int ethr_make_ts_event__(ethr_ts_event **tsepp);
+
+#if !defined(ETHR_TRY_INLINE_FUNCS) || defined(ETHREAD_IMPL__)
+ethr_ts_event *ethr_get_ts_event(void);
+void ethr_leave_ts_event(ethr_ts_event *);
+#endif
+
+#if defined(ETHR_PTHREADS)
+
+#if defined(ETHR_TRY_INLINE_FUNCS) || defined(ETHREAD_IMPL__)
+
+extern pthread_key_t ethr_ts_event_key__;
+
+static ETHR_INLINE ethr_ts_event *
+ETHR_INLINE_FUNC_NAME_(ethr_get_ts_event)(void)
+{
+ ethr_ts_event *tsep = pthread_getspecific(ethr_ts_event_key__);
+ if (!tsep) {
+ int res = ethr_make_ts_event__(&tsep);
+ if (res != 0)
+ ETHR_FATAL_ERROR__(res);
+ ETHR_ASSERT(tsep);
+ }
+ return tsep;
+}
+
+static ETHR_INLINE void
+ETHR_INLINE_FUNC_NAME_(ethr_leave_ts_event)(ethr_ts_event *tsep)
+{
+
+}
+
#endif
+#elif defined(ETHR_WIN32_THREADS)
+
+#if defined(ETHR_TRY_INLINE_FUNCS) || defined(ETHREAD_IMPL__)
+
+extern DWORD ethr_ts_event_key__;
+
+static ETHR_INLINE ethr_ts_event *
+ETHR_INLINE_FUNC_NAME_(ethr_get_ts_event)(void)
+{
+ ethr_ts_event *tsep = TlsGetValue(ethr_ts_event_key__);
+ if (!tsep) {
+ int res = ethr_get_tmp_ts_event__(&tsep);
+ if (res != 0)
+ ETHR_FATAL_ERROR__(res);
+ ETHR_ASSERT(tsep);
+ }
+ return tsep;
+}
+
+static ETHR_INLINE void
+ETHR_INLINE_FUNC_NAME_(ethr_leave_ts_event)(ethr_ts_event *tsep)
+{
+ if (tsep->iflgs & ETHR_TS_EV_TMP) {
+ int res = ethr_free_ts_event__(tsep);
+ if (res != 0)
+ ETHR_FATAL_ERROR__(res);
+ }
+}
+
+#endif
+
+#endif
+
+#include "ethr_mutex.h" /* Need atomic declarations and tse */
+
+#ifdef ETHR_HAVE_NATIVE_RWSPINLOCKS
+typedef ethr_native_rwlock_t ethr_rwlock_t;
+#else
+typedef ethr_rwmutex ethr_rwlock_t;
+#endif
+
+#ifdef ETHR_NEED_RWSPINLOCK_PROTOTYPES__
+int ethr_rwlock_init(ethr_rwlock_t *);
+int ethr_rwlock_destroy(ethr_rwlock_t *);
+void ethr_read_unlock(ethr_rwlock_t *);
+void ethr_read_lock(ethr_rwlock_t *);
+void ethr_write_unlock(ethr_rwlock_t *);
+void ethr_write_lock(ethr_rwlock_t *);
+#endif
+
+#if defined(ETHR_TRY_INLINE_FUNCS) || defined(ETHR_AUX_IMPL__)
+
static ETHR_INLINE int
ETHR_INLINE_FUNC_NAME_(ethr_rwlock_init)(ethr_rwlock_t *lock)
{
-#if defined(ETHR_HAVE_PTHREAD_SPIN_LOCK)
- lock->counter = 0;
- return pthread_spin_init(&lock->spnlck, 0);
+#ifdef ETHR_HAVE_NATIVE_RWSPINLOCKS
+ ethr_native_rwlock_init(lock);
+ return 0;
#else
- return ethr_rwmutex_init(&lock->rwmtx);
+ return ethr_rwmutex_init_opt((ethr_rwmutex *) lock, NULL);
#endif
}
static ETHR_INLINE int
ETHR_INLINE_FUNC_NAME_(ethr_rwlock_destroy)(ethr_rwlock_t *lock)
{
-#if defined(ETHR_HAVE_PTHREAD_SPIN_LOCK)
- return pthread_spin_destroy(&lock->spnlck);
+#ifdef ETHR_HAVE_NATIVE_RWSPINLOCKS
+ return 0;
#else
- return ethr_rwmutex_destroy(&lock->rwmtx);
+ return ethr_rwmutex_destroy((ethr_rwmutex *) lock);
#endif
}
-static ETHR_INLINE int
+static ETHR_INLINE void
ETHR_INLINE_FUNC_NAME_(ethr_read_unlock)(ethr_rwlock_t *lock)
{
-#if defined(ETHR_HAVE_PTHREAD_SPIN_LOCK)
- int res = pthread_spin_lock(&lock->spnlck);
- if (res != 0)
- return res;
- lock->counter--;
- return pthread_spin_unlock(&lock->spnlck);
+#ifdef ETHR_HAVE_NATIVE_RWSPINLOCKS
+ ethr_native_read_unlock(lock);
#else
- return ETHR_RWLOCK_RWMTX_FALLBACK_NAME_(ethr_rwmutex_runlock)(&lock->rwmtx);
+ ethr_rwmutex_runlock((ethr_rwmutex *) lock);
#endif
}
-static ETHR_INLINE int
+static ETHR_INLINE void
ETHR_INLINE_FUNC_NAME_(ethr_read_lock)(ethr_rwlock_t *lock)
{
-#if defined(ETHR_HAVE_PTHREAD_SPIN_LOCK)
- int locked = 0;
- do {
- int res = pthread_spin_lock(&lock->spnlck);
- if (res != 0)
- return res;
- if ((lock->counter & ETHR_RWLOCK_WRITERS) == 0) {
- lock->counter++;
- locked = 1;
- }
- res = pthread_spin_unlock(&lock->spnlck);
- if (res != 0)
- return res;
- } while (!locked);
- return 0;
+#ifdef ETHR_HAVE_NATIVE_RWSPINLOCKS
+ ethr_native_read_lock(lock);
#else
- return ETHR_RWLOCK_RWMTX_FALLBACK_NAME_(ethr_rwmutex_rlock)(&lock->rwmtx);
+ ethr_rwmutex_rlock((ethr_rwmutex *) lock);
#endif
}
-static ETHR_INLINE int
+static ETHR_INLINE void
ETHR_INLINE_FUNC_NAME_(ethr_write_unlock)(ethr_rwlock_t *lock)
{
-#if defined(ETHR_HAVE_PTHREAD_SPIN_LOCK)
- lock->counter = 0;
- return pthread_spin_unlock(&lock->spnlck);
+#ifdef ETHR_HAVE_NATIVE_RWSPINLOCKS
+ ethr_native_write_unlock(lock);
#else
- return ETHR_RWLOCK_RWMTX_FALLBACK_NAME_(ethr_rwmutex_rwunlock)(&lock->rwmtx);
+ ethr_rwmutex_rwunlock((ethr_rwmutex *) lock);
#endif
}
-static ETHR_INLINE int
+static ETHR_INLINE void
ETHR_INLINE_FUNC_NAME_(ethr_write_lock)(ethr_rwlock_t *lock)
{
-#if defined(ETHR_HAVE_PTHREAD_SPIN_LOCK)
- while (1) {
- int res = pthread_spin_lock(&lock->spnlck);
- if (res != 0)
- return res;
- lock->counter |= ETHR_RWLOCK_WRITERS;
- if (lock->counter == ETHR_RWLOCK_WRITERS)
- return 0;
- res = pthread_spin_unlock(&lock->spnlck);
- if (res != 0)
- return res;
- }
+#ifdef ETHR_HAVE_NATIVE_RWSPINLOCKS
+ ethr_native_write_lock(lock);
#else
- return ETHR_RWLOCK_RWMTX_FALLBACK_NAME_(ethr_rwmutex_rwlock)(&lock->rwmtx);
+ ethr_rwmutex_rwlock((ethr_rwmutex *) lock);
#endif
}
-#endif /* #ifdef ETHR_TRY_INLINE_FUNCS */
-
-#endif /* ETHR_HAVE_OPTIMIZED_LOCKS */
-
-#if defined(ETHR_HAVE_OPTIMIZED_LOCKS) || defined(ETHR_HAVE_PTHREAD_SPIN_LOCK)
-# define ETHR_HAVE_OPTIMIZED_SPINLOCK
-#endif
+#endif /* ETHR_TRY_INLINE_FUNCS */
#endif /* #ifndef ETHREAD_H__ */
diff --git a/erts/include/internal/ethread_header_config.h.in b/erts/include/internal/ethread_header_config.h.in
index c9fd87c2f6..5debb44756 100644
--- a/erts/include/internal/ethread_header_config.h.in
+++ b/erts/include/internal/ethread_header_config.h.in
@@ -29,26 +29,54 @@
/* Define if you have pthreads */
#undef ETHR_PTHREADS
+/* Define if you need the <nptl/pthread.h> header file. */
+#undef ETHR_NEED_NPTL_PTHREAD_H
+
/* Define if you have the <pthread.h> header file. */
#undef ETHR_HAVE_PTHREAD_H
/* Define if the pthread.h header file is in pthread/mit directory. */
#undef ETHR_HAVE_MIT_PTHREAD_H
-/* Define if you have the pthread_mutexattr_settype function. */
-#undef ETHR_HAVE_PTHREAD_MUTEXATTR_SETTYPE
+/* Define if you have the pthread_spin_lock function. */
+#undef ETHR_HAVE_PTHREAD_SPIN_LOCK
-/* Define if you have the pthread_mutexattr_setkind_np function. */
-#undef ETHR_HAVE_PTHREAD_MUTEXATTR_SETKIND_NP
+/* Define if you want to force usage of pthread rwlocks */
+#undef ETHR_FORCE_PTHREAD_RWLOCK
-/* Define if you have the pthread_atfork function. */
-#undef ETHR_HAVE_PTHREAD_ATFORK
+/* Define if you have the pthread_rwlockattr_setkind_np() function. */
+#undef ETHR_HAVE_PTHREAD_RWLOCKATTR_SETKIND_NP
-/* Define if you have the pthread_spin_lock function. */
-#undef ETHR_HAVE_PTHREAD_SPIN_LOCK
+/* Define if you have the PTHREAD_RWLOCK_PREFER_WRITER_NONRECURSIVE_NP rwlock
+ attribute. */
+#undef ETHR_HAVE_PTHREAD_RWLOCK_PREFER_WRITER_NONRECURSIVE_NP
-/* Define if you have a pthread_rwlock implementation that can be used */
-#undef ETHR_HAVE_PTHREAD_RWLOCK_INIT
+/* Define if you have a linux futex implementation. */
+#undef ETHR_HAVE_LINUX_FUTEX
+
+/* Define if you have gcc atomic operations */
+#undef ETHR_HAVE_GCC_ATOMIC_OPS
+
+/* Define if you prefer gcc native ethread implementations */
+#undef ETHR_PREFER_GCC_NATIVE_IMPLS
+
+/* Define if you have the <sched.h> header file. */
+#undef ETHR_HAVE_SCHED_H
+
+/* Define if you have the sched_yield() function. */
+#undef ETHR_HAVE_SCHED_YIELD
+
+/* Define if you have the pthread_yield() function. */
+#undef ETHR_HAVE_PTHREAD_YIELD
+
+/* Define if pthread_yield() returns an int. */
+#undef ETHR_PTHREAD_YIELD_RET_INT
+
+/* Define if sched_yield() returns an int. */
+#undef ETHR_SCHED_YIELD_RET_INT
+
+/* Define if you want compatibilty with x86 processors before pentium4. */
+#undef ETHR_PRE_PENTIUM4_COMPAT
/* Define if you have the pthread_rwlockattr_setkind_np() function. */
#undef ETHR_HAVE_PTHREAD_RWLOCKATTR_SETKIND_NP
@@ -63,6 +91,15 @@
/* Define if you prefer gcc native ethread implementations */
#undef ETHR_PREFER_GCC_NATIVE_IMPLS
+/* Define if you have libatomic_ops atomic operations */
+#undef ETHR_HAVE_LIBATOMIC_OPS
+
+/* Define if you prefer libatomic_ops native ethread implementations */
+#undef ETHR_PREFER_LIBATOMIC_OPS_NATIVE_IMPLS
+
+/* Define to the size of AO_t if libatomic_ops is used */
+#undef ETHR_SIZEOF_AO_T
+
/* Define if you want to turn on extra sanity checking in the ethread library */
#undef ETHR_XCHK
diff --git a/erts/include/internal/gcc/ethr_atomic.h b/erts/include/internal/gcc/ethr_atomic.h
index 775030c8d5..5fe6e23477 100644
--- a/erts/include/internal/gcc/ethr_atomic.h
+++ b/erts/include/internal/gcc/ethr_atomic.h
@@ -55,6 +55,7 @@ do { \
volatile long x___ = 0; \
(void) __sync_val_compare_and_swap(&x___, (long) 0, (long) 1); \
} while (0)
+#define ETHR_READ_DEPEND_MEMORY_BARRIER ETHR_MEMORY_BARRIER
#if defined(ETHR_TRY_INLINE_FUNCS) || defined(ETHR_AUX_IMPL__)
@@ -157,6 +158,24 @@ ethr_native_atomic_xchg(ethr_native_atomic_t *var, long new)
return act;
}
+/*
+ * Atomic ops with at least specified barriers.
+ */
+
+static ETHR_INLINE long
+ethr_native_atomic_read_acqb(ethr_native_atomic_t *var)
+{
+ return __sync_add_and_fetch(&var->counter, (long) 0);
+}
+
+#define ethr_native_atomic_inc_return_acqb ethr_native_atomic_inc_return
+#define ethr_native_atomic_set_relb ethr_native_atomic_xchg
+#define ethr_native_atomic_dec_relb ethr_native_atomic_dec_return
+#define ethr_native_atomic_dec_return_relb ethr_native_atomic_dec_return
+
+#define ethr_native_atomic_cmpxchg_acqb ethr_native_atomic_cmpxchg
+#define ethr_native_atomic_cmpxchg_relb ethr_native_atomic_cmpxchg
+
#endif
#endif
diff --git a/erts/include/internal/i386/atomic.h b/erts/include/internal/i386/atomic.h
index 90b4c5f773..f28258059f 100644
--- a/erts/include/internal/i386/atomic.h
+++ b/erts/include/internal/i386/atomic.h
@@ -32,8 +32,11 @@ typedef struct {
volatile long counter;
} ethr_native_atomic_t;
-#ifdef __x86_64__
+#if defined(__x86_64__) || !defined(ETHR_PRE_PENTIUM4_COMPAT)
#define ETHR_MEMORY_BARRIER __asm__ __volatile__("mfence" : : : "memory")
+#define ETHR_WRITE_MEMORY_BARRIER __asm__ __volatile__("sfence" : : : "memory")
+#define ETHR_READ_MEMORY_BARRIER __asm__ __volatile__("lfence" : : : "memory")
+#define ETHR_READ_DEPEND_MEMORY_BARRIER __asm__ __volatile__("" : : : "memory")
#else
#define ETHR_MEMORY_BARRIER \
do { \
@@ -42,7 +45,9 @@ do { \
} while (0)
#endif
-#ifdef ETHR_TRY_INLINE_FUNCS
+#define ETHR_ATOMIC_HAVE_INC_DEC_INSTRUCTIONS 1
+
+#if defined(ETHR_TRY_INLINE_FUNCS) || defined(ETHR_AUX_IMPL__)
#ifdef __x86_64__
#define LONG_SUFFIX "q"
@@ -158,6 +163,22 @@ ethr_native_atomic_xchg(ethr_native_atomic_t *var, long val)
return tmp;
}
+/*
+ * Atomic ops with at least specified barriers.
+ */
+
+#define ethr_native_atomic_read_acqb ethr_native_atomic_read
+#define ethr_native_atomic_inc_return_acqb ethr_native_atomic_inc_return
+#if defined(__x86_64__) || !defined(ETHR_PRE_PENTIUM4_COMPAT)
+#define ethr_native_atomic_set_relb ethr_native_atomic_set
+#else
+#define ethr_native_atomic_set_relb ethr_native_atomic_xchg
+#endif
+#define ethr_native_atomic_dec_relb ethr_native_atomic_dec
+#define ethr_native_atomic_dec_return_relb ethr_native_atomic_dec_return
+#define ethr_native_atomic_cmpxchg_acqb ethr_native_atomic_cmpxchg
+#define ethr_native_atomic_cmpxchg_relb ethr_native_atomic_cmpxchg
+
#undef LONG_SUFFIX
#endif /* ETHR_TRY_INLINE_FUNCS */
diff --git a/erts/include/internal/i386/ethread.h b/erts/include/internal/i386/ethread.h
index fad8b108fa..ed43e77279 100644
--- a/erts/include/internal/i386/ethread.h
+++ b/erts/include/internal/i386/ethread.h
@@ -29,6 +29,7 @@
#include "rwlock.h"
#define ETHR_HAVE_NATIVE_ATOMICS 1
-#define ETHR_HAVE_NATIVE_LOCKS 1
+#define ETHR_HAVE_NATIVE_SPINLOCKS 1
+#define ETHR_HAVE_NATIVE_RWSPINLOCKS 1
#endif /* ETHREAD_I386_ETHREAD_H */
diff --git a/erts/include/internal/i386/rwlock.h b/erts/include/internal/i386/rwlock.h
index c009be8ef1..be47f459ce 100644
--- a/erts/include/internal/i386/rwlock.h
+++ b/erts/include/internal/i386/rwlock.h
@@ -1,19 +1,19 @@
/*
* %CopyrightBegin%
- *
- * Copyright Ericsson AB 2005-2009. All Rights Reserved.
- *
+ *
+ * Copyright Ericsson AB 2005-2010. All Rights Reserved.
+ *
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
* compliance with the License. You should have received a copy of the
* Erlang Public License along with this software. If not, it can be
* retrieved online at http://www.erlang.org/.
- *
+ *
* Software distributed under the License is distributed on an "AS IS"
* basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
* the License for the specific language governing rights and limitations
* under the License.
- *
+ *
* %CopyrightEnd%
*/
@@ -31,7 +31,7 @@ typedef struct {
volatile int lock;
} ethr_native_rwlock_t;
-#ifdef ETHR_TRY_INLINE_FUNCS
+#if defined(ETHR_TRY_INLINE_FUNCS) || defined(ETHR_AUX_IMPL__)
#define ETHR_RWLOCK_OFFSET (1<<24)
diff --git a/erts/include/internal/i386/spinlock.h b/erts/include/internal/i386/spinlock.h
index 2b4832e26a..0325324895 100644
--- a/erts/include/internal/i386/spinlock.h
+++ b/erts/include/internal/i386/spinlock.h
@@ -1,19 +1,19 @@
/*
* %CopyrightBegin%
- *
- * Copyright Ericsson AB 2005-2009. All Rights Reserved.
- *
+ *
+ * Copyright Ericsson AB 2005-2010. All Rights Reserved.
+ *
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
* compliance with the License. You should have received a copy of the
* Erlang Public License along with this software. If not, it can be
* retrieved online at http://www.erlang.org/.
- *
+ *
* Software distributed under the License is distributed on an "AS IS"
* basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
* the License for the specific language governing rights and limitations
* under the License.
- *
+ *
* %CopyrightEnd%
*/
@@ -31,7 +31,7 @@ typedef struct {
volatile unsigned int lock;
} ethr_native_spinlock_t;
-#ifdef ETHR_TRY_INLINE_FUNCS
+#if defined(ETHR_TRY_INLINE_FUNCS) || defined(ETHR_AUX_IMPL__)
static ETHR_INLINE void
ethr_native_spinlock_init(ethr_native_spinlock_t *lock)
@@ -46,7 +46,7 @@ ethr_native_spin_unlock(ethr_native_spinlock_t *lock)
* On i386 this needs to be a locked operation
* to avoid Pentium Pro errata 66 and 92.
*/
-#if defined(__x86_64__)
+#if defined(__x86_64__) || !defined(ETHR_PRE_PENTIUM4_COMPAT)
__asm__ __volatile__("" : : : "memory");
*(unsigned char*)&lock->lock = 0;
#else
diff --git a/erts/include/internal/libatomic_ops/ethr_atomic.h b/erts/include/internal/libatomic_ops/ethr_atomic.h
new file mode 100644
index 0000000000..a6eb43a0bd
--- /dev/null
+++ b/erts/include/internal/libatomic_ops/ethr_atomic.h
@@ -0,0 +1,292 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2010. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+/*
+ * Description: Native atomics ethread support using libatomic_ops
+ * Author: Rickard Green
+ */
+
+#ifndef ETHR_LIBATOMIC_OPS_ATOMIC_H__
+#define ETHR_LIBATOMIC_OPS_ATOMIC_H__
+
+#if !defined(ETHR_HAVE_NATIVE_ATOMICS) && defined(ETHR_HAVE_LIBATOMIC_OPS)
+#define ETHR_HAVE_NATIVE_ATOMICS 1
+
+#if (defined(__i386__) && !defined(ETHR_PRE_PENTIUM4_COMPAT)) \
+ || defined(__x86_64__)
+#define AO_USE_PENTIUM4_INSTRS
+#endif
+
+#include "atomic_ops.h"
+
+/*
+ * libatomic_ops can be downloaded from:
+ * http://www.hpl.hp.com/research/linux/atomic_ops/
+ *
+ * These operations need to be defined by libatomic_ops;
+ * otherwise, we won't compile:
+ * - AO_nop_full()
+ * - AO_load()
+ * - AO_store()
+ * - AO_compare_and_swap()
+ *
+ * The `AO_t' type also have to be at least as large as
+ * `void *' and `long' types.
+ */
+
+#if ETHR_SIZEOF_AO_T < ETHR_SIZEOF_PTR
+#error The AO_t type is too small
+#endif
+
+typedef struct {
+ volatile AO_t counter;
+} ethr_native_atomic_t;
+
+#define ETHR_MEMORY_BARRIER AO_nop_full()
+#ifdef AO_HAVE_nop_write
+# define ETHR_WRITE_MEMORY_BARRIER AO_nop_write()
+#else
+# define ETHR_WRITE_MEMORY_BARRIER ETHR_MEMORY_BARRIER
+#endif
+#ifdef AO_HAVE_nop_read
+# define ETHR_READ_MEMORY_BARRIER AO_nop_read()
+#else
+# define ETHR_READ_MEMORY_BARRIER ETHR_MEMORY_BARRIER
+#endif
+#ifdef AO_NO_DD_ORDERING
+# define ETHR_READ_DEPEND_MEMORY_BARRIER ETHR_READ_MEMORY_BARRIER
+#else
+# define ETHR_READ_DEPEND_MEMORY_BARRIER __asm__ __volatile__("":::"memory")
+#endif
+
+#if defined(ETHR_TRY_INLINE_FUNCS) || defined(ETHR_AUX_IMPL__)
+
+static ETHR_INLINE void
+ethr_native_atomic_set(ethr_native_atomic_t *var, long value)
+{
+ AO_store(&var->counter, (AO_t) value);
+}
+
+static ETHR_INLINE void
+ethr_native_atomic_init(ethr_native_atomic_t *var, long value)
+{
+ ethr_native_atomic_set(var, value);
+}
+
+static ETHR_INLINE long
+ethr_native_atomic_read(ethr_native_atomic_t *var)
+{
+ return (long) AO_load(&var->counter);
+}
+
+static ETHR_INLINE long
+ethr_native_atomic_add_return(ethr_native_atomic_t *var, long incr)
+{
+#ifdef AO_HAVE_fetch_and_add
+ return ((long) AO_fetch_and_add(&var->counter, (AO_t) incr)) + incr;
+#else
+ while (1) {
+ AO_t exp = AO_load(&var->counter);
+ AO_t new = exp + (AO_t) incr;
+ if (AO_compare_and_swap(&var->counter, exp, new))
+ return (long) new;
+ }
+#endif
+}
+
+static ETHR_INLINE void
+ethr_native_atomic_add(ethr_native_atomic_t *var, long incr)
+{
+ (void) ethr_native_atomic_add_return(var, incr);
+}
+
+static ETHR_INLINE long
+ethr_native_atomic_inc_return(ethr_native_atomic_t *var)
+{
+#ifdef AO_HAVE_fetch_and_add1
+ return ((long) AO_fetch_and_add1(&var->counter)) + 1;
+#else
+ return ethr_native_atomic_add_return(var, 1);
+#endif
+}
+
+static ETHR_INLINE void
+ethr_native_atomic_inc(ethr_native_atomic_t *var)
+{
+ (void) ethr_native_atomic_inc_return(var);
+}
+
+static ETHR_INLINE long
+ethr_native_atomic_dec_return(ethr_native_atomic_t *var)
+{
+#ifdef AO_HAVE_fetch_and_sub1
+ return ((long) AO_fetch_and_sub1(&var->counter)) - 1;
+#else
+ return ethr_native_atomic_add_return(var, -1);
+#endif
+}
+
+static ETHR_INLINE void
+ethr_native_atomic_dec(ethr_native_atomic_t *var)
+{
+ (void) ethr_native_atomic_dec_return(var);
+}
+
+static ETHR_INLINE long
+ethr_native_atomic_and_retold(ethr_native_atomic_t *var, long mask)
+{
+ while (1) {
+ AO_t exp = AO_load(&var->counter);
+ AO_t new = exp & ((AO_t) mask);
+ if (AO_compare_and_swap(&var->counter, exp, new))
+ return (long) exp;
+ }
+}
+
+static ETHR_INLINE long
+ethr_native_atomic_or_retold(ethr_native_atomic_t *var, long mask)
+{
+ while (1) {
+ AO_t exp = AO_load(&var->counter);
+ AO_t new = exp | ((AO_t) mask);
+ if (AO_compare_and_swap(&var->counter, exp, new))
+ return (long) exp;
+ }
+}
+
+static ETHR_INLINE long
+ethr_native_atomic_cmpxchg(ethr_native_atomic_t *var, long new, long exp)
+{
+ long act;
+ do {
+ if (AO_compare_and_swap(&var->counter, (AO_t) exp, (AO_t) new))
+ return exp;
+ act = (long) AO_load(&var->counter);
+ } while (act == exp);
+ return act;
+}
+
+static ETHR_INLINE long
+ethr_native_atomic_xchg(ethr_native_atomic_t *var, long new)
+{
+ while (1) {
+ AO_t exp = AO_load(&var->counter);
+ if (AO_compare_and_swap(&var->counter, exp, (AO_t) new))
+ return (long) exp;
+ }
+}
+
+/*
+ * Atomic ops with at least specified barriers.
+ */
+
+static ETHR_INLINE long
+ethr_native_atomic_read_acqb(ethr_native_atomic_t *var)
+{
+#ifdef AO_HAVE_load_acquire
+ return (long) AO_load_acquire(&var->counter);
+#else
+ long res = ethr_native_atomic_read(var);
+ ETHR_MEMORY_BARRIER;
+ return res;
+#endif
+}
+
+static ETHR_INLINE long
+ethr_native_atomic_inc_return_acqb(ethr_native_atomic_t *var)
+{
+#ifdef AO_HAVE_fetch_and_add1_acquire
+ return ((long) AO_fetch_and_add1_acquire(&var->counter)) + 1;
+#else
+ long res = ethr_native_atomic_add_return(var, 1);
+ ETHR_MEMORY_BARRIER;
+ return res;
+#endif
+}
+
+static ETHR_INLINE void
+ethr_native_atomic_set_relb(ethr_native_atomic_t *var, long value)
+{
+#ifdef AO_HAVE_store_release
+ AO_store_release(&var->counter, (AO_t) value);
+#else
+ ETHR_MEMORY_BARRIER;
+ ethr_native_atomic_set(var, value);
+#endif
+}
+
+static ETHR_INLINE long
+ethr_native_atomic_dec_return_relb(ethr_native_atomic_t *var)
+{
+#ifdef AO_HAVE_fetch_and_sub1_release
+ return ((long) AO_fetch_and_sub1_release(&var->counter)) - 1;
+#else
+ ETHR_MEMORY_BARRIER;
+ return ethr_native_atomic_dec_return(var);
+#endif
+}
+
+static ETHR_INLINE void
+ethr_native_atomic_dec_relb(ethr_native_atomic_t *var)
+{
+ (void) ethr_native_atomic_dec_return_relb(var);
+}
+
+static ETHR_INLINE long
+ethr_native_atomic_cmpxchg_acqb(ethr_native_atomic_t *var, long new, long exp)
+{
+#ifdef AO_HAVE_compare_and_swap_acquire
+ long act;
+ do {
+ if (AO_compare_and_swap_acquire(&var->counter, (AO_t) exp, (AO_t) new))
+ return exp;
+ act = (long) AO_load(&var->counter);
+ } while (act == exp);
+ AO_nop_full();
+ return act;
+#else
+ long act = ethr_native_atomic_cmpxchg(var, new, exp);
+ ETHR_MEMORY_BARRIER;
+ return act;
+#endif
+}
+
+static ETHR_INLINE long
+ethr_native_atomic_cmpxchg_relb(ethr_native_atomic_t *var, long new, long exp)
+{
+#ifdef AO_HAVE_compare_and_swap_release
+ long act;
+ do {
+ if (AO_compare_and_swap_release(&var->counter, (AO_t) exp, (AO_t) new))
+ return exp;
+ act = (long) AO_load(&var->counter);
+ } while (act == exp);
+ return act;
+#else
+ ETHR_MEMORY_BARRIER;
+ return ethr_native_atomic_cmpxchg(var, new, exp);
+#endif
+}
+
+
+#endif
+
+#endif
+
+#endif
diff --git a/erts/include/internal/libatomic_ops/ethread.h b/erts/include/internal/libatomic_ops/ethread.h
new file mode 100644
index 0000000000..ee73ba73bc
--- /dev/null
+++ b/erts/include/internal/libatomic_ops/ethread.h
@@ -0,0 +1,30 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2010. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+/*
+ * Description: Native atomics ethread support using libatomic_ops
+ * Author: Rickard Green
+ */
+
+#ifndef ETHREAD_LIBATOMIC_OPS_H__
+#define ETHREAD_LIBATOMIC_OPS_H__
+
+#include "ethr_atomic.h"
+
+#endif
diff --git a/erts/include/internal/ppc32/atomic.h b/erts/include/internal/ppc32/atomic.h
index 105d874995..f21f7c9588 100644
--- a/erts/include/internal/ppc32/atomic.h
+++ b/erts/include/internal/ppc32/atomic.h
@@ -34,7 +34,7 @@ typedef struct {
#define ETHR_MEMORY_BARRIER __asm__ __volatile__("sync" : : : "memory")
-#ifdef ETHR_TRY_INLINE_FUNCS
+#if defined(ETHR_TRY_INLINE_FUNCS) || defined(ETHR_AUX_IMPL__)
static ETHR_INLINE void
ethr_native_atomic_init(ethr_native_atomic_t *var, int i)
@@ -205,6 +205,26 @@ ethr_native_atomic_cmpxchg(ethr_native_atomic_t *var, int new, int expected)
return old;
}
+/*
+ * Atomic ops with at least specified barriers.
+ */
+
+static ETHR_INLINE long
+ethr_native_atomic_read_acqb(ethr_native_atomic_t *var)
+{
+ long res = ethr_native_atomic_read(var);
+ ETHR_MEMORY_BARRIER;
+ return res;
+}
+
+#define ethr_native_atomic_set_relb ethr_native_atomic_xchg
+#define ethr_native_atomic_inc_return_acqb ethr_native_atomic_inc_return
+#define ethr_native_atomic_dec_relb ethr_native_atomic_dec_return
+#define ethr_native_atomic_dec_return_relb ethr_native_atomic_dec_return
+
+#define ethr_native_atomic_cmpxchg_acqb ethr_native_atomic_cmpxchg
+#define ethr_native_atomic_cmpxchg_relb ethr_native_atomic_cmpxchg
+
#endif /* ETHR_TRY_INLINE_FUNCS */
#endif /* ETHREAD_PPC_ATOMIC_H */
diff --git a/erts/include/internal/ppc32/ethread.h b/erts/include/internal/ppc32/ethread.h
index d2a72c3dc1..12efc1b653 100644
--- a/erts/include/internal/ppc32/ethread.h
+++ b/erts/include/internal/ppc32/ethread.h
@@ -29,6 +29,7 @@
#include "rwlock.h"
#define ETHR_HAVE_NATIVE_ATOMICS 1
-#define ETHR_HAVE_NATIVE_LOCKS 1
+#define ETHR_HAVE_NATIVE_SPINLOCKS 1
+#define ETHR_HAVE_NATIVE_RWSPINLOCKS 1
#endif /* ETHREAD_PPC32_ETHREAD_H */
diff --git a/erts/include/internal/ppc32/rwlock.h b/erts/include/internal/ppc32/rwlock.h
index 9bdab12826..19ec26ab68 100644
--- a/erts/include/internal/ppc32/rwlock.h
+++ b/erts/include/internal/ppc32/rwlock.h
@@ -1,19 +1,19 @@
/*
* %CopyrightBegin%
- *
- * Copyright Ericsson AB 2005-2009. All Rights Reserved.
- *
+ *
+ * Copyright Ericsson AB 2005-2010. All Rights Reserved.
+ *
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
* compliance with the License. You should have received a copy of the
* Erlang Public License along with this software. If not, it can be
* retrieved online at http://www.erlang.org/.
- *
+ *
* Software distributed under the License is distributed on an "AS IS"
* basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
* the License for the specific language governing rights and limitations
* under the License.
- *
+ *
* %CopyrightEnd%
*/
@@ -34,7 +34,7 @@ typedef struct {
volatile int lock;
} ethr_native_rwlock_t;
-#ifdef ETHR_TRY_INLINE_FUNCS
+#if defined(ETHR_TRY_INLINE_FUNCS) || defined(ETHR_AUX_IMPL__)
static ETHR_INLINE void
ethr_native_rwlock_init(ethr_native_rwlock_t *lock)
diff --git a/erts/include/internal/ppc32/spinlock.h b/erts/include/internal/ppc32/spinlock.h
index 034c20c143..c8460a3e8a 100644
--- a/erts/include/internal/ppc32/spinlock.h
+++ b/erts/include/internal/ppc32/spinlock.h
@@ -1,19 +1,19 @@
/*
* %CopyrightBegin%
- *
- * Copyright Ericsson AB 2005-2009. All Rights Reserved.
- *
+ *
+ * Copyright Ericsson AB 2005-2010. All Rights Reserved.
+ *
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
* compliance with the License. You should have received a copy of the
* Erlang Public License along with this software. If not, it can be
* retrieved online at http://www.erlang.org/.
- *
+ *
* Software distributed under the License is distributed on an "AS IS"
* basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
* the License for the specific language governing rights and limitations
* under the License.
- *
+ *
* %CopyrightEnd%
*/
@@ -34,7 +34,7 @@ typedef struct {
volatile unsigned int lock;
} ethr_native_spinlock_t;
-#ifdef ETHR_TRY_INLINE_FUNCS
+#if defined(ETHR_TRY_INLINE_FUNCS) || defined(ETHR_AUX_IMPL__)
static ETHR_INLINE void
ethr_native_spinlock_init(ethr_native_spinlock_t *lock)
diff --git a/erts/include/internal/pthread/ethr_event.h b/erts/include/internal/pthread/ethr_event.h
new file mode 100644
index 0000000000..104ec287e0
--- /dev/null
+++ b/erts/include/internal/pthread/ethr_event.h
@@ -0,0 +1,151 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2009-2010. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+/*
+ * Author: Rickard Green
+ */
+
+#if defined(ETHR_HAVE_LINUX_FUTEX) && defined(ETHR_HAVE_NATIVE_ATOMICS)
+/* --- Linux futex implementation of ethread events ------------------------- */
+#define ETHR_LINUX_FUTEX_IMPL__
+
+#include <sys/syscall.h>
+#include <unistd.h>
+#include <linux/futex.h>
+#include <sys/time.h>
+
+/*
+ * Note: Linux futexes operate on 32-bit integers, but
+ * ethr_native_atomic_t are 64-bits on 64-bit
+ * platforms. This has to be taken into account.
+ * Therefore, in each individual value used each
+ * byte look the same.
+ */
+
+#if ETHR_SIZEOF_PTR == 8
+
+#define ETHR_EVENT_OFF_WAITER__ 0xffffffffffffffffL
+#define ETHR_EVENT_OFF__ 0x7777777777777777L
+#define ETHR_EVENT_ON__ 0L
+
+#elif ETHR_SIZEOF_PTR == 4
+
+#define ETHR_EVENT_OFF_WAITER__ 0xffffffffL
+#define ETHR_EVENT_OFF__ 0x77777777L
+#define ETHR_EVENT_ON__ 0L
+
+#else
+
+#error ehrm...
+
+#endif
+
+#if defined(FUTEX_WAIT_PRIVATE) && defined(FUTEX_WAKE_PRIVATE)
+# define ETHR_FUTEX_WAIT__ FUTEX_WAIT_PRIVATE
+# define ETHR_FUTEX_WAKE__ FUTEX_WAKE_PRIVATE
+#else
+# define ETHR_FUTEX_WAIT__ FUTEX_WAIT
+# define ETHR_FUTEX_WAKE__ FUTEX_WAKE
+#endif
+
+typedef struct {
+ ethr_atomic_t futex;
+} ethr_event;
+
+#define ETHR_FUTEX__(FTX, OP, VAL) \
+ (-1 == syscall(__NR_futex, (void *) (FTX), (OP), (int) (VAL), NULL, NULL, 0)\
+ ? errno : 0)
+
+#if defined(ETHR_TRY_INLINE_FUNCS) || defined(ETHR_EVENT_IMPL__)
+
+static void ETHR_INLINE
+ETHR_INLINE_FUNC_NAME_(ethr_event_set)(ethr_event *e)
+{
+ long val;
+ ETHR_WRITE_MEMORY_BARRIER;
+ val = ethr_atomic_xchg(&e->futex, ETHR_EVENT_ON__);
+ if (val == ETHR_EVENT_OFF_WAITER__) {
+ int res = ETHR_FUTEX__(&e->futex, ETHR_FUTEX_WAKE__, 1);
+ if (res != 0)
+ ETHR_FATAL_ERROR__(res);
+ }
+}
+
+static void ETHR_INLINE
+ETHR_INLINE_FUNC_NAME_(ethr_event_reset)(ethr_event *e)
+{
+ ethr_atomic_set(&e->futex, ETHR_EVENT_OFF__);
+ ETHR_MEMORY_BARRIER;
+}
+
+#endif
+
+#elif defined(ETHR_PTHREADS)
+/* --- Posix mutex/cond implementation of events ---------------------------- */
+
+typedef struct {
+ ethr_atomic_t state;
+ pthread_mutex_t mtx;
+ pthread_cond_t cnd;
+} ethr_event;
+
+#define ETHR_EVENT_OFF_WAITER__ -1L
+#define ETHR_EVENT_OFF__ 1L
+#define ETHR_EVENT_ON__ 0L
+
+#if defined(ETHR_TRY_INLINE_FUNCS) || defined(ETHR_EVENT_IMPL__)
+
+static void ETHR_INLINE
+ETHR_INLINE_FUNC_NAME_(ethr_event_set)(ethr_event *e)
+{
+ long val;
+ ETHR_WRITE_MEMORY_BARRIER;
+ val = ethr_atomic_xchg(&e->state, ETHR_EVENT_ON__);
+ if (val == ETHR_EVENT_OFF_WAITER__) {
+ int res = pthread_mutex_lock(&e->mtx);
+ if (res != 0)
+ ETHR_FATAL_ERROR__(res);
+ res = pthread_cond_signal(&e->cnd);
+ if (res != 0)
+ ETHR_FATAL_ERROR__(res);
+ res = pthread_mutex_unlock(&e->mtx);
+ if (res != 0)
+ ETHR_FATAL_ERROR__(res);
+ }
+}
+
+static void ETHR_INLINE
+ETHR_INLINE_FUNC_NAME_(ethr_event_reset)(ethr_event *e)
+{
+ ethr_atomic_set(&e->state, ETHR_EVENT_OFF__);
+ ETHR_MEMORY_BARRIER;
+}
+
+#endif
+
+#endif
+
+int ethr_event_init(ethr_event *e);
+int ethr_event_destroy(ethr_event *e);
+int ethr_event_wait(ethr_event *e);
+int ethr_event_swait(ethr_event *e, int spincount);
+#if !defined(ETHR_TRY_INLINE_FUNCS) || defined(ETHR_EVENT_IMPL__)
+void ethr_event_set(ethr_event *e);
+void ethr_event_reset(ethr_event *e);
+#endif
diff --git a/erts/include/internal/sparc32/atomic.h b/erts/include/internal/sparc32/atomic.h
index 8fde449a52..2a995d4465 100644
--- a/erts/include/internal/sparc32/atomic.h
+++ b/erts/include/internal/sparc32/atomic.h
@@ -32,7 +32,7 @@ typedef struct {
__asm__ __volatile__("membar #LoadLoad|#LoadStore|#StoreLoad|#StoreStore\n" \
: : : "memory")
-#ifdef ETHR_TRY_INLINE_FUNCS
+#if defined(ETHR_TRY_INLINE_FUNCS) || defined(ETHR_AUX_IMPL__)
#if defined(__arch64__)
#define CASX "casx"
@@ -172,6 +172,43 @@ ethr_native_atomic_cmpxchg(ethr_native_atomic_t *var, long new, long old)
return new;
}
+/*
+ * Atomic ops with at least specified barriers.
+ */
+
+static ETHR_INLINE long
+ethr_native_atomic_read_acqb(ethr_native_atomic_t *var)
+{
+ long res = ethr_native_atomic_read(var);
+ __asm__ __volatile__("membar #StoreLoad|#StoreStore");
+ return res;
+}
+
+static ETHR_INLINE void
+ethr_native_atomic_set_relb(ethr_native_atomic_t *var, long i)
+{
+ __asm__ __volatile__("membar #LoadStore|#StoreStore");
+ ethr_native_atomic_set(var, i);
+}
+
+static ETHR_INLINE void
+ethr_native_atomic_dec_relb(ethr_native_atomic_t *var)
+{
+ __asm__ __volatile__("membar #LoadStore|#StoreStore");
+ ethr_native_atomic_dec(var);
+}
+
+static ETHR_INLINE long
+ethr_native_atomic_dec_return_relb(ethr_native_atomic_t *var)
+{
+ __asm__ __volatile__("membar #LoadStore|#StoreStore");
+ return ethr_native_atomic_dec_return(var);
+}
+
+#define ethr_native_atomic_inc_return_acqb ethr_native_atomic_inc_return
+#define ethr_native_atomic_cmpxchg_acqb ethr_native_atomic_cmpxchg
+#define ethr_native_atomic_cmpxchg_relb ethr_native_atomic_cmpxchg
+
#endif /* ETHR_TRY_INLINE_FUNCS */
#endif /* ETHR_SPARC32_ATOMIC_H */
diff --git a/erts/include/internal/sparc32/ethread.h b/erts/include/internal/sparc32/ethread.h
index 1d55399640..dca113b4d6 100644
--- a/erts/include/internal/sparc32/ethread.h
+++ b/erts/include/internal/sparc32/ethread.h
@@ -29,6 +29,7 @@
#include "rwlock.h"
#define ETHR_HAVE_NATIVE_ATOMICS 1
-#define ETHR_HAVE_NATIVE_LOCKS 1
+#define ETHR_HAVE_NATIVE_SPINLOCKS 1
+#define ETHR_HAVE_NATIVE_RWSPINLOCKS 1
#endif /* ETHREAD_SPARC32_ETHREAD_H */
diff --git a/erts/include/internal/sparc32/rwlock.h b/erts/include/internal/sparc32/rwlock.h
index 12448e0b06..465ec96866 100644
--- a/erts/include/internal/sparc32/rwlock.h
+++ b/erts/include/internal/sparc32/rwlock.h
@@ -1,19 +1,19 @@
/*
* %CopyrightBegin%
- *
- * Copyright Ericsson AB 2005-2009. All Rights Reserved.
- *
+ *
+ * Copyright Ericsson AB 2005-2010. All Rights Reserved.
+ *
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
* compliance with the License. You should have received a copy of the
* Erlang Public License along with this software. If not, it can be
* retrieved online at http://www.erlang.org/.
- *
+ *
* Software distributed under the License is distributed on an "AS IS"
* basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
* the License for the specific language governing rights and limitations
* under the License.
- *
+ *
* %CopyrightEnd%
*/
@@ -29,7 +29,7 @@ typedef struct {
volatile int lock;
} ethr_native_rwlock_t;
-#ifdef ETHR_TRY_INLINE_FUNCS
+#if defined(ETHR_TRY_INLINE_FUNCS) || defined(ETHR_AUX_IMPL__)
static ETHR_INLINE void
ethr_native_rwlock_init(ethr_native_rwlock_t *lock)
diff --git a/erts/include/internal/sparc32/spinlock.h b/erts/include/internal/sparc32/spinlock.h
index b4fe48b714..493d514210 100644
--- a/erts/include/internal/sparc32/spinlock.h
+++ b/erts/include/internal/sparc32/spinlock.h
@@ -1,19 +1,19 @@
/*
* %CopyrightBegin%
- *
- * Copyright Ericsson AB 2005-2009. All Rights Reserved.
- *
+ *
+ * Copyright Ericsson AB 2005-2010. All Rights Reserved.
+ *
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
* compliance with the License. You should have received a copy of the
* Erlang Public License along with this software. If not, it can be
* retrieved online at http://www.erlang.org/.
- *
+ *
* Software distributed under the License is distributed on an "AS IS"
* basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
* the License for the specific language governing rights and limitations
* under the License.
- *
+ *
* %CopyrightEnd%
*/
@@ -29,7 +29,7 @@ typedef struct {
volatile unsigned char lock;
} ethr_native_spinlock_t;
-#ifdef ETHR_TRY_INLINE_FUNCS
+#if defined(ETHR_TRY_INLINE_FUNCS) || defined(ETHR_AUX_IMPL__)
static ETHR_INLINE void
ethr_native_spinlock_init(ethr_native_spinlock_t *lock)
diff --git a/erts/include/internal/tile/atomic.h b/erts/include/internal/tile/atomic.h
index 5e4c7ac9fe..69569d82d1 100644
--- a/erts/include/internal/tile/atomic.h
+++ b/erts/include/internal/tile/atomic.h
@@ -34,7 +34,7 @@ typedef struct {
#define ETHR_MEMORY_BARRIER __insn_mf()
-#ifdef ETHR_TRY_INLINE_FUNCS
+#if defined(ETHR_TRY_INLINE_FUNCS) || defined(ETHR_AUX_IMPL__)
static ETHR_INLINE void
ethr_native_atomic_init(ethr_native_atomic_t *var, long i)
@@ -45,7 +45,6 @@ ethr_native_atomic_init(ethr_native_atomic_t *var, long i)
static ETHR_INLINE void
ethr_native_atomic_set(ethr_native_atomic_t *var, long i)
{
- __insn_mf();
atomic_exchange_acq(&var->counter, i);
}
@@ -58,28 +57,24 @@ ethr_native_atomic_read(ethr_native_atomic_t *var)
static ETHR_INLINE void
ethr_native_atomic_add(ethr_native_atomic_t *var, long incr)
{
- __insn_mf();
atomic_add(&var->counter, incr);
}
static ETHR_INLINE void
ethr_native_atomic_inc(ethr_native_atomic_t *var)
{
- __insn_mf();
atomic_increment(&var->counter);
}
static ETHR_INLINE void
ethr_native_atomic_dec(ethr_native_atomic_t *var)
{
- __insn_mf();
atomic_decrement(&var->counter);
}
static ETHR_INLINE long
ethr_native_atomic_add_return(ethr_native_atomic_t *var, long incr)
{
- __insn_mf();
return atomic_exchange_and_add(&var->counter, incr) + incr;
}
@@ -98,33 +93,81 @@ ethr_native_atomic_dec_return(ethr_native_atomic_t *var)
static ETHR_INLINE long
ethr_native_atomic_and_retold(ethr_native_atomic_t *var, long mask)
{
- /* Implement a barrier suitable for a mutex unlock. */
- __insn_mf();
return atomic_and_val(&var->counter, mask);
}
static ETHR_INLINE long
ethr_native_atomic_or_retold(ethr_native_atomic_t *var, long mask)
{
- __insn_mf();
return atomic_or_val(&var->counter, mask);
}
static ETHR_INLINE long
ethr_native_atomic_xchg(ethr_native_atomic_t *var, long val)
{
- __insn_mf();
return atomic_exchange_acq(&var->counter, val);
}
static ETHR_INLINE long
ethr_native_atomic_cmpxchg(ethr_native_atomic_t *var, long new, long expected)
{
- /* Implement a barrier suitable for a mutex unlock. */
- __insn_mf();
return atomic_compare_and_exchange_val_acq(&var->counter, new, expected);
}
+/*
+ * Atomic ops with at least specified barriers.
+ */
+
+static ETHR_INLINE long
+ethr_native_atomic_read_acqb(ethr_native_atomic_t *var)
+{
+ long res = ethr_native_atomic_read(var);
+ ETHR_MEMORY_BARRIER;
+ return res;
+}
+
+static ETHR_INLINE long
+ethr_native_atomic_inc_return_acqb(ethr_native_atomic_t *var)
+{
+ long res = ethr_native_atomic_inc_return(var);
+ ETHR_MEMORY_BARRIER;
+ return res;
+}
+
+static ETHR_INLINE void
+ethr_native_atomic_set_relb(ethr_native_atomic_t *var, long val)
+{
+ ETHR_MEMORY_BARRIER;
+ ethr_native_atomic_set(var, val);
+}
+
+static ETHR_INLINE void
+ethr_native_atomic_dec_relb(ethr_native_atomic_t *var)
+{
+ ETHR_MEMORY_BARRIER;
+ ethr_native_atomic_dec(var);
+}
+
+static ETHR_INLINE long
+ethr_native_atomic_dec_return_relb(ethr_native_atomic_t *var)
+{
+ ETHR_MEMORY_BARRIER;
+ return ethr_native_atomic_dec_return(var);
+}
+
+static ETHR_INLINE long
+ethr_native_atomic_cmpxchg_acqb(ethr_native_atomic_t *var, long new, long exp)
+{
+ return ethr_native_atomic_cmpxchg(var, new, exp);
+}
+
+static ETHR_INLINE long
+ethr_native_atomic_cmpxchg_relb(ethr_native_atomic_t *var, long new, long exp)
+{
+ ETHR_MEMORY_BARRIER;
+ return ethr_native_atomic_cmpxchg(var, new, exp);
+}
+
#endif /* ETHR_TRY_INLINE_FUNCS */
#endif /* ETHREAD_TILE_ATOMIC_H */
diff --git a/erts/include/internal/win/ethr_atomic.h b/erts/include/internal/win/ethr_atomic.h
new file mode 100644
index 0000000000..500459dd6c
--- /dev/null
+++ b/erts/include/internal/win/ethr_atomic.h
@@ -0,0 +1,244 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2010. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+/*
+ * Description: Native atomics ethread support when using VC++
+ * Author: Rickard Green
+ */
+
+#ifndef ETHR_WIN_ATOMIC_H__
+#define ETHR_WIN_ATOMIC_H__
+
+#ifdef _MSC_VER
+# if _MSC_VER < 1300
+# define ETHR_IMMED_ATOMIC_SET_GET_SAFE__ 0 /* Dont trust really old compilers */
+# else
+# if defined(_M_IX86)
+# define ETHR_IMMED_ATOMIC_SET_GET_SAFE__ 1
+# else /* I.e. IA64 */
+# if _MSC_VER >= 1400
+# define ETHR_IMMED_ATOMIC_SET_GET_SAFE__ 1
+# else
+# define ETHR_IMMED_ATOMIC_SET_GET_SAFE__ 0
+# endif
+# endif
+# endif
+# if _MSC_VER >= 1400
+# include <intrin.h>
+# undef ETHR_COMPILER_BARRIER
+# define ETHR_COMPILER_BARRIER _ReadWriteBarrier()
+# endif
+#pragma intrinsic(_ReadWriteBarrier)
+#pragma intrinsic(_InterlockedAnd)
+#pragma intrinsic(_InterlockedOr)
+#else
+# define ETHR_IMMED_ATOMIC_SET_GET_SAFE__ 0
+#endif
+
+/*
+ * No configure test checking for _Interlocked*_{acq,rel} and
+ * Interlocked*{Acquire,Release} have been written yet...
+ *
+ * Note, that these are pure optimizations for the itanium
+ * processor.
+ */
+
+#ifdef ETHR_HAVE_INTERLOCKEDCOMPAREEXCHANGE_ACQ
+#pragma intrinsic(_InterlockedCompareExchange_acq)
+#endif
+#ifdef ETHR_HAVE_INTERLOCKEDCOMPAREEXCHANGE_REL
+#pragma intrinsic(_InterlockedCompareExchange_rel)
+#endif
+
+
+typedef struct {
+ volatile LONG value;
+} ethr_native_atomic_t;
+
+#define ETHR_MEMORY_BARRIER \
+do { \
+ volatile LONG x___ = 0; \
+ _InterlockedCompareExchange(&x___, (LONG) 1, (LONG) 0); \
+} while (0)
+
+#if defined(ETHR_TRY_INLINE_FUNCS) || defined(ETHR_AUX_IMPL__)
+
+static ETHR_INLINE void
+ethr_native_atomic_init(ethr_native_atomic_t *var, long i)
+{
+ var->value = (LONG) i;
+}
+
+static ETHR_INLINE void
+ethr_native_atomic_set(ethr_native_atomic_t *var, long i)
+{
+#if ETHR_IMMED_ATOMIC_SET_GET_SAFE__
+ var->value = (LONG) i;
+#else
+ (void) InterlockedExchange(&var->value, (LONG) i);
+#endif
+}
+
+static ETHR_INLINE long
+ethr_native_atomic_read(ethr_native_atomic_t *var)
+{
+#if ETHR_IMMED_ATOMIC_SET_GET_SAFE__
+ return var->value;
+#else
+ return InterlockedExchangeAdd(&var->value, (LONG) 0);
+#endif
+}
+
+static ETHR_INLINE void
+ethr_native_atomic_add(ethr_native_atomic_t *var, long incr)
+{
+ (void) InterlockedExchangeAdd(&var->value, (LONG) incr);
+}
+
+static ETHR_INLINE long
+ethr_native_atomic_add_return(ethr_native_atomic_t *var, long i)
+{
+ LONG tmp = InterlockedExchangeAdd(&var->value, (LONG) i);
+ return tmp + i;
+}
+
+static ETHR_INLINE void
+ethr_native_atomic_inc(ethr_native_atomic_t *var)
+{
+ (void) InterlockedIncrement(&var->value);
+}
+
+static ETHR_INLINE void
+ethr_native_atomic_dec(ethr_native_atomic_t *var)
+{
+ (void) InterlockedDecrement(&var->value);
+}
+
+static ETHR_INLINE long
+ethr_native_atomic_inc_return(ethr_native_atomic_t *var)
+{
+ return (long) InterlockedIncrement(&var->value);
+}
+
+static ETHR_INLINE long
+ethr_native_atomic_dec_return(ethr_native_atomic_t *var)
+{
+ return (long) InterlockedDecrement(&var->value);
+}
+
+static ETHR_INLINE long
+ethr_native_atomic_and_retold(ethr_native_atomic_t *var, long mask)
+{
+ return (long) _InterlockedAnd(&var->value, mask);
+}
+
+static ETHR_INLINE long
+ethr_native_atomic_or_retold(ethr_native_atomic_t *var, long mask)
+{
+ return (long) _InterlockedOr(&var->value, mask);
+}
+
+
+static ETHR_INLINE long
+ethr_native_atomic_cmpxchg(ethr_native_atomic_t *var, long new, long old)
+{
+ return (long) _InterlockedCompareExchange(&var->value, (LONG) new, (LONG) old);
+}
+
+
+static ETHR_INLINE long
+ethr_native_atomic_xchg(ethr_native_atomic_t *var, long new)
+{
+ return (long) InterlockedExchange(&var->value, (LONG) new);
+}
+
+/*
+ * Atomic ops with at least specified barriers.
+ */
+
+static ETHR_INLINE long
+ethr_native_atomic_read_acqb(ethr_native_atomic_t *var)
+{
+#ifdef ETHR_HAVE_INTERLOCKEDEXCHANGEADDACQUIRE
+ return (long) InterlockedExchangeAddAcquire(&var->value, (LONG) 0);
+#else
+ return (long) InterlockedExchangeAdd(&var->value, (LONG) 0);
+#endif
+}
+
+static ETHR_INLINE long
+ethr_native_atomic_inc_return_acqb(ethr_native_atomic_t *var)
+{
+#ifdef ETHR_HAVE_INTERLOCKEDINCREMENTACQUIRE
+ return (long) InterlockedIncrementAcquire(&var->value);
+#else
+ return (long) InterlockedIncrement(&var->value);
+#endif
+}
+
+static ETHR_INLINE void
+ethr_native_atomic_set_relb(ethr_native_atomic_t *var, long i)
+{
+ (void) InterlockedExchange(&var->value, (LONG) i);
+}
+
+static ETHR_INLINE void
+ethr_native_atomic_dec_relb(ethr_native_atomic_t *var)
+{
+#ifdef ETHR_HAVE_INTERLOCKEDDECREMENTRELEASE
+ (void) InterlockedDecrementRelease(&var->value);
+#else
+ (void) InterlockedDecrement(&var->value);
+#endif
+}
+
+static ETHR_INLINE long
+ethr_native_atomic_dec_return_relb(ethr_native_atomic_t *var)
+{
+#ifdef ETHR_HAVE_INTERLOCKEDDECREMENTRELEASE
+ return (long) InterlockedDecrementRelease(&var->value);
+#else
+ return (long) InterlockedDecrement(&var->value);
+#endif
+}
+
+static ETHR_INLINE long
+ethr_native_atomic_cmpxchg_acqb(ethr_native_atomic_t *var, long new, long old)
+{
+#ifdef ETHR_HAVE_INTERLOCKEDCOMPAREEXCHANGE_ACQ
+ return (long) _InterlockedCompareExchange_acq(&var->value, (LONG) new, (LONG) old);
+#else
+ return (long) _InterlockedCompareExchange(&var->value, (LONG) new, (LONG) old);
+#endif
+}
+
+static ETHR_INLINE long
+ethr_native_atomic_cmpxchg_relb(ethr_native_atomic_t *var, long new, long old)
+{
+
+#ifdef ETHR_HAVE_INTERLOCKEDCOMPAREEXCHANGE_REL
+ return (long) _InterlockedCompareExchange_rel(&var->value, (LONG) new, (LONG) old);
+#else
+ return (long) _InterlockedCompareExchange(&var->value, (LONG) new, (LONG) old);
+#endif
+}
+
+#endif
+
+#endif
diff --git a/erts/include/internal/win/ethr_event.h b/erts/include/internal/win/ethr_event.h
new file mode 100644
index 0000000000..af57c20f91
--- /dev/null
+++ b/erts/include/internal/win/ethr_event.h
@@ -0,0 +1,62 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2009-2010. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+/*
+ * Author: Rickard Green
+ */
+
+#define ETHR_EVENT_OFF_WAITER__ ((LONG) -1)
+#define ETHR_EVENT_OFF__ ((LONG) 1)
+#define ETHR_EVENT_ON__ ((LONG) 0)
+
+typedef struct {
+ volatile LONG state;
+ HANDLE handle;
+} ethr_event;
+
+#if defined(ETHR_TRY_INLINE_FUNCS) || defined(ETHR_EVENT_IMPL__)
+
+static ETHR_INLINE void
+ETHR_INLINE_FUNC_NAME_(ethr_event_set)(ethr_event *e)
+{
+ /* InterlockedExchange() imply a full memory barrier which is important */
+ LONG state = InterlockedExchange(&e->state, ETHR_EVENT_ON__);
+ if (state == ETHR_EVENT_OFF_WAITER__) {
+ if (!SetEvent(e->handle))
+ ETHR_FATAL_ERROR__(ethr_win_get_errno__());
+ }
+}
+
+static ETHR_INLINE void
+ETHR_INLINE_FUNC_NAME_(ethr_event_reset)(ethr_event *e)
+{
+ /* InterlockedExchange() imply a full memory barrier which is important */
+ InterlockedExchange(&e->state, ETHR_EVENT_OFF__);
+}
+
+#endif
+
+int ethr_event_init(ethr_event *e);
+int ethr_event_destroy(ethr_event *e);
+int ethr_event_wait(ethr_event *e);
+int ethr_event_swait(ethr_event *e, int spincount);
+#if !defined(ETHR_TRY_INLINE_FUNCS) || defined(ETHR_EVENT_IMPL__)
+void ethr_event_set(ethr_event *e);
+void ethr_event_reset(ethr_event *e);
+#endif
diff --git a/erts/include/internal/win/ethread.h b/erts/include/internal/win/ethread.h
new file mode 100644
index 0000000000..b52710f6a3
--- /dev/null
+++ b/erts/include/internal/win/ethread.h
@@ -0,0 +1,31 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2010. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+/*
+ * Description: Native atomic and spinlock ethread support when using VC++
+ * Author: Rickard Green
+ */
+
+#ifndef ETHREAD_WIN_H__
+#define ETHREAD_WIN_H__
+
+#include "ethr_atomic.h"
+#define ETHR_HAVE_NATIVE_ATOMICS 1
+
+#endif
diff --git a/erts/lib_src/Makefile.in b/erts/lib_src/Makefile.in
index e7caac8072..0d3181cace 100644
--- a/erts/lib_src/Makefile.in
+++ b/erts/lib_src/Makefile.in
@@ -280,8 +280,13 @@ endif
#
# ethread library
#
+ETHR_THR_LIB_BASE_DIR=@ETHR_THR_LIB_BASE_DIR@
ifneq ($(strip $(ETHR_LIB_NAME)),)
-ETHREAD_LIB_SRC=common/ethread.c
+ETHREAD_LIB_SRC=common/ethr_aux.c \
+ common/ethr_mutex.c \
+ common/ethr_cbf.c \
+ $(ETHR_THR_LIB_BASE_DIR)/ethread.c \
+ $(ETHR_THR_LIB_BASE_DIR)/ethr_event.c
ETHREAD_LIB_NAME=ethread$(TYPE_SUFFIX)
ifeq ($(USING_VC),yes)
@@ -379,13 +384,13 @@ $(ERTS_LIB): $(ERTS_LIB_OBJS)
$(r_OBJ_DIR)/%.o: common/%.c
$(CC) $(THR_DEFS) $(CFLAGS) $(INCLUDES) -c $< -o $@
-$(r_OBJ_DIR)/%.o: $(ERLANG_OSTYPE)/%.c
+$(r_OBJ_DIR)/%.o: $(ETHR_THR_LIB_BASE_DIR)/%.c
$(CC) $(THR_DEFS) $(CFLAGS) $(INCLUDES) -c $< -o $@
$(OBJ_DIR)/%.o: common/%.c
$(CC) $(CFLAGS) $(INCLUDES) -c $< -o $@
-$(OBJ_DIR)/%.o: $(ERLANG_OSTYPE)/%.c
+$(OBJ_DIR)/%.o: $(ETHR_THR_LIB_BASE_DIR)/%.c
$(CC) $(CFLAGS) $(INCLUDES) -c $< -o $@
# Win32 specific
@@ -393,25 +398,25 @@ $(OBJ_DIR)/%.o: $(ERLANG_OSTYPE)/%.c
$(MD_OBJ_DIR)/%.o: common/%.c
$(CC) $(THR_DEFS) $(CFLAGS) -MD $(INCLUDES) -c $< -o $@
-$(MD_OBJ_DIR)/%.o: $(ERLANG_OSTYPE)/%.c
+$(MD_OBJ_DIR)/%.o: $(ETHR_THR_LIB_BASE_DIR)/%.c
$(CC) $(THR_DEFS) $(CFLAGS) -MD $(INCLUDES) -c $< -o $@
$(MDd_OBJ_DIR)/%.o: common/%.c
$(CC) $(THR_DEFS) $(CFLAGS) -MDd $(INCLUDES) -c $< -o $@
-$(MDd_OBJ_DIR)/%.o: $(ERLANG_OSTYPE)/%.c
+$(MDd_OBJ_DIR)/%.o: $(ETHR_THR_LIB_BASE_DIR)/%.c
$(CC) $(THR_DEFS) $(CFLAGS) -MDd $(INCLUDES) -c $< -o $@
$(MT_OBJ_DIR)/%.o: common/%.c
$(CC) $(THR_DEFS) $(CFLAGS) -MT $(INCLUDES) -c $< -o $@
-$(MT_OBJ_DIR)/%.o: $(ERLANG_OSTYPE)/%.c
+$(MT_OBJ_DIR)/%.o: $(ETHR_THR_LIB_BASE_DIR)/%.c
$(CC) $(THR_DEFS) $(CFLAGS) -MT $(INCLUDES) -c $< -o $@
$(MTd_OBJ_DIR)/%.o: common/%.c
$(CC) $(THR_DEFS) $(CFLAGS) -MTd $(INCLUDES) -c $< -o $@
-$(MTd_OBJ_DIR)/%.o: $(ERLANG_OSTYPE)/%.c
+$(MTd_OBJ_DIR)/%.o: $(ETHR_THR_LIB_BASE_DIR)/%.c
$(CC) $(THR_DEFS) $(CFLAGS) -MTd $(INCLUDES) -c $< -o $@
#
@@ -438,6 +443,8 @@ RELEASE_LIBS=$(ERTS_LIBS)
INTERNAL_RELEASE_INCLUDES= \
$(ERTS_INCL_INT)/README \
$(ERTS_INCL_INT)/ethread.h \
+ $(ERTS_INCL_INT)/ethr_mutex.h \
+ $(ERTS_INCL_INT)/ethr_optimized_fallbacks.h \
$(ERTS_INCL_INT)/$(TARGET)/ethread.mk \
$(ERTS_INCL_INT)/$(TARGET)/erts_internal.mk \
$(ERTS_INCL_INT)/$(TARGET)/ethread_header_config.h \
@@ -447,7 +454,8 @@ INTERNAL_RELEASE_INCLUDES= \
$(ERTS_INCL_INT)/erl_misc_utils.h \
$(ERTS_INCL_INT)/erl_errno.h
-INTERNAL_X_RELEASE_INCLUDE_DIRS= i386 x86_64 ppc32 sparc32 sparc64 tile gcc
+INTERNAL_X_RELEASE_INCLUDE_DIRS= \
+ i386 x86_64 ppc32 sparc32 sparc64 tile gcc pthread win libatomic_ops
INTERNAL_RELEASE_LIBS= \
../lib/internal/README \
diff --git a/erts/lib_src/common/erl_misc_utils.c b/erts/lib_src/common/erl_misc_utils.c
index f70db86960..d2ef7140a5 100644
--- a/erts/lib_src/common/erl_misc_utils.c
+++ b/erts/lib_src/common/erl_misc_utils.c
@@ -59,8 +59,25 @@
# endif
#endif
-#ifdef HAVE_SCHED_xETAFFINITY
+#if defined(HAVE_SCHED_xETAFFINITY)
# include <sched.h>
+# define ERTS_HAVE_MISC_UTIL_AFFINITY_MASK__
+#define ERTS_MU_GET_PROC_AFFINITY__(CPUINFOP) \
+ (sched_getaffinity((CPUINFOP)->pid, \
+ sizeof(cpu_set_t), \
+ &(CPUINFOP)->cpuset) != 0 ? -errno : 0)
+#define ERTS_MU_SET_THR_AFFINITY__(SETP) \
+ (sched_setaffinity(0, sizeof(cpu_set_t), (SETP)) != 0 ? -errno : 0)
+#elif defined(__WIN32__)
+# define ERTS_HAVE_MISC_UTIL_AFFINITY_MASK__
+# define cpu_set_t DWORD
+# define CPU_SETSIZE (sizeof(DWORD)*8)
+# define CPU_ZERO(SETP) (*(SETP) = (DWORD) 0)
+# define CPU_SET(CPU, SETP) (*(SETP) |= (((DWORD) 1) << (CPU)))
+# define CPU_CLR(CPU, SETP) (*(SETP) &= ~(((DWORD) 1) << (CPU)))
+# define CPU_ISSET(CPU, SETP) ((*(SETP) & (((DWORD) 1) << (CPU))) != (DWORD) 0)
+#define ERTS_MU_GET_PROC_AFFINITY__ get_proc_affinity
+#define ERTS_MU_SET_THR_AFFINITY__ set_thr_affinity
#endif
#ifdef HAVE_PSET_INFO
# include <sys/pset.h>
@@ -105,25 +122,58 @@ struct erts_cpu_info_t_ {
int available;
int topology_size;
erts_cpu_topology_t *topology;
-#if defined(HAVE_SCHED_xETAFFINITY)
+#if defined(ERTS_HAVE_MISC_UTIL_AFFINITY_MASK__)
char *affinity_str;
char affinity_str_buf[CPU_SETSIZE/4+2];
cpu_set_t cpuset;
+#if defined(HAVE_SCHED_xETAFFINITY)
pid_t pid;
+#endif
#elif defined(HAVE_PSET_INFO)
processorid_t *cpuids;
#endif
};
+#if defined(__WIN32__)
+
+static __forceinline int
+get_proc_affinity(erts_cpu_info_t *cpuinfo)
+{
+ DWORD pamask, samask;
+ if (GetProcessAffinityMask(GetCurrentProcess(), &pamask, &samask)) {
+ cpuinfo->cpuset = (cpu_set_t) pamask;
+ return 0;
+ }
+ else {
+ cpuinfo->cpuset = (cpu_set_t) 0;
+ return -erts_get_last_win_errno();
+ }
+}
+
+static __forceinline int
+set_thr_affinity(cpu_set_t *set)
+{
+ if (*set == (cpu_set_t) 0)
+ return -ENOTSUP;
+ if (SetThreadAffinityMask(GetCurrentThread(), *set) == 0)
+ return -erts_get_last_win_errno();
+ else
+ return 0;
+}
+
+#endif
+
erts_cpu_info_t *
erts_cpu_info_create(void)
{
erts_cpu_info_t *cpuinfo = malloc(sizeof(erts_cpu_info_t));
if (!cpuinfo)
return NULL;
-#if defined(HAVE_SCHED_xETAFFINITY)
+#if defined(ERTS_HAVE_MISC_UTIL_AFFINITY_MASK__)
cpuinfo->affinity_str = NULL;
+#if defined(HAVE_SCHED_xETAFFINITY)
cpuinfo->pid = getpid();
+#endif
#elif defined(HAVE_PSET_INFO)
cpuinfo->cpuids = NULL;
#endif
@@ -162,10 +212,13 @@ erts_cpu_info_update(erts_cpu_info_t *cpuinfo)
#ifdef __WIN32__
{
+ int i;
SYSTEM_INFO sys_info;
GetSystemInfo(&sys_info);
cpuinfo->configured = (int) sys_info.dwNumberOfProcessors;
-
+ for (i = 0; i < sizeof(DWORD)*8; i++)
+ if (sys_info.dwActiveProcessorMask & (((DWORD) 1) << i))
+ cpuinfo->online++;
}
#elif !defined(NO_SYSCONF) && (defined(_SC_NPROCESSORS_CONF) \
|| defined(_SC_NPROCESSORS_ONLN))
@@ -205,8 +258,8 @@ erts_cpu_info_update(erts_cpu_info_t *cpuinfo)
if (cpuinfo->online > cpuinfo->configured)
cpuinfo->online = cpuinfo->configured;
-#ifdef HAVE_SCHED_xETAFFINITY
- if (sched_getaffinity(cpuinfo->pid, sizeof(cpu_set_t), &cpuinfo->cpuset) == 0) {
+#if defined(ERTS_HAVE_MISC_UTIL_AFFINITY_MASK__)
+ if (ERTS_MU_GET_PROC_AFFINITY__(cpuinfo) == 0) {
int i, c, cn, si;
c = cn = 0;
si = sizeof(cpuinfo->affinity_str_buf) - 1;
@@ -289,7 +342,7 @@ erts_get_cpu_available(erts_cpu_info_t *cpuinfo)
char *
erts_get_unbind_from_cpu_str(erts_cpu_info_t *cpuinfo)
{
-#if defined(HAVE_SCHED_xETAFFINITY)
+#if defined(ERTS_HAVE_MISC_UTIL_AFFINITY_MASK__)
if (!cpuinfo)
return "false";
return cpuinfo->affinity_str;
@@ -303,7 +356,7 @@ erts_get_available_cpu(erts_cpu_info_t *cpuinfo, int no)
{
if (!cpuinfo || no < 1 || cpuinfo->available < no)
return -EINVAL;
-#ifdef HAVE_SCHED_xETAFFINITY
+#if defined(ERTS_HAVE_MISC_UTIL_AFFINITY_MASK__)
{
cpu_set_t *allowed = &cpuinfo->cpuset;
int ix, n;
@@ -335,8 +388,8 @@ int
erts_is_cpu_available(erts_cpu_info_t *cpuinfo, int id)
{
if (cpuinfo && 0 <= id) {
-#ifdef HAVE_SCHED_xETAFFINITY
- if (id <= CPU_SETSIZE)
+#if defined(ERTS_HAVE_MISC_UTIL_AFFINITY_MASK__)
+ if (id < CPU_SETSIZE)
return CPU_ISSET(id, &cpuinfo->cpuset);
#elif defined(HAVE_PROCESSOR_BIND)
int no;
@@ -388,7 +441,7 @@ erts_bind_to_cpu(erts_cpu_info_t *cpuinfo, int cpu)
*/
if (!cpuinfo)
return -EINVAL;
-#ifdef HAVE_SCHED_xETAFFINITY
+#if defined(ERTS_HAVE_MISC_UTIL_AFFINITY_MASK__)
{
cpu_set_t bind_set;
if (cpu < 0)
@@ -398,9 +451,7 @@ erts_bind_to_cpu(erts_cpu_info_t *cpuinfo, int cpu)
CPU_ZERO(&bind_set);
CPU_SET(cpu, &bind_set);
- if (sched_setaffinity(0, sizeof(cpu_set_t), &bind_set) != 0)
- return -errno;
- return 0;
+ return ERTS_MU_SET_THR_AFFINITY__(&bind_set);
}
#elif defined(HAVE_PROCESSOR_BIND)
if (cpu < 0)
@@ -418,10 +469,8 @@ erts_unbind_from_cpu(erts_cpu_info_t *cpuinfo)
{
if (!cpuinfo)
return -EINVAL;
-#if defined(HAVE_SCHED_xETAFFINITY)
- if (sched_setaffinity(0, sizeof(cpu_set_t), &cpuinfo->cpuset) != 0)
- return -errno;
- return 0;
+#if defined(ERTS_HAVE_MISC_UTIL_AFFINITY_MASK__)
+ return ERTS_MU_SET_THR_AFFINITY__(&cpuinfo->cpuset);
#elif defined(HAVE_PROCESSOR_BIND)
if (processor_bind(P_LWPID, P_MYID, PBIND_NONE, NULL) != 0)
return -errno;
@@ -434,7 +483,7 @@ erts_unbind_from_cpu(erts_cpu_info_t *cpuinfo)
int
erts_unbind_from_cpu_str(char *str)
{
-#if defined(HAVE_SCHED_xETAFFINITY)
+#if defined(ERTS_HAVE_MISC_UTIL_AFFINITY_MASK__)
char *c = str;
int cpus = 0;
int shft = 0;
@@ -486,9 +535,7 @@ erts_unbind_from_cpu_str(char *str)
if (!cpus)
return -EINVAL;
- if (sched_setaffinity(0, sizeof(cpu_set_t), &cpuset) != 0)
- return -errno;
- return 0;
+ return ERTS_MU_SET_THR_AFFINITY__(&cpuset);
#elif defined(HAVE_PROCESSOR_BIND)
if (processor_bind(P_LWPID, P_MYID, PBIND_NONE, NULL) != 0)
return -errno;
@@ -965,3 +1012,91 @@ read_topology(erts_cpu_info_t *cpuinfo)
}
#endif
+
+#if defined(__WIN32__)
+
+int
+erts_get_last_win_errno(void)
+{
+ switch (GetLastError()) {
+ case ERROR_INVALID_FUNCTION: return EINVAL; /* 1 */
+ case ERROR_FILE_NOT_FOUND: return ENOENT; /* 2 */
+ case ERROR_PATH_NOT_FOUND: return ENOENT; /* 3 */
+ case ERROR_TOO_MANY_OPEN_FILES: return EMFILE; /* 4 */
+ case ERROR_ACCESS_DENIED: return EACCES; /* 5 */
+ case ERROR_INVALID_HANDLE: return EBADF; /* 6 */
+ case ERROR_ARENA_TRASHED: return ENOMEM; /* 7 */
+ case ERROR_NOT_ENOUGH_MEMORY: return ENOMEM; /* 8 */
+ case ERROR_INVALID_BLOCK: return ENOMEM; /* 9 */
+ case ERROR_BAD_ENVIRONMENT: return E2BIG; /* 10 */
+ case ERROR_BAD_FORMAT: return ENOEXEC; /* 11 */
+ case ERROR_INVALID_ACCESS: return EINVAL; /* 12 */
+ case ERROR_INVALID_DATA: return EINVAL; /* 13 */
+ case ERROR_OUTOFMEMORY: return ENOMEM; /* 14 */
+ case ERROR_INVALID_DRIVE: return ENOENT; /* 15 */
+ case ERROR_CURRENT_DIRECTORY: return EACCES; /* 16 */
+ case ERROR_NOT_SAME_DEVICE: return EXDEV; /* 17 */
+ case ERROR_NO_MORE_FILES: return ENOENT; /* 18 */
+ case ERROR_WRITE_PROTECT: return EACCES; /* 19 */
+ case ERROR_BAD_UNIT: return EACCES; /* 20 */
+ case ERROR_NOT_READY: return EACCES; /* 21 */
+ case ERROR_BAD_COMMAND: return EACCES; /* 22 */
+ case ERROR_CRC: return EACCES; /* 23 */
+ case ERROR_BAD_LENGTH: return EACCES; /* 24 */
+ case ERROR_SEEK: return EACCES; /* 25 */
+ case ERROR_NOT_DOS_DISK: return EACCES; /* 26 */
+ case ERROR_SECTOR_NOT_FOUND: return EACCES; /* 27 */
+ case ERROR_OUT_OF_PAPER: return EACCES; /* 28 */
+ case ERROR_WRITE_FAULT: return EACCES; /* 29 */
+ case ERROR_READ_FAULT: return EACCES; /* 30 */
+ case ERROR_GEN_FAILURE: return EACCES; /* 31 */
+ case ERROR_SHARING_VIOLATION: return EACCES; /* 32 */
+ case ERROR_LOCK_VIOLATION: return EACCES; /* 33 */
+ case ERROR_WRONG_DISK: return EACCES; /* 34 */
+ case ERROR_SHARING_BUFFER_EXCEEDED: return EACCES; /* 36 */
+ case ERROR_BAD_NETPATH: return ENOENT; /* 53 */
+ case ERROR_NETWORK_ACCESS_DENIED: return EACCES; /* 65 */
+ case ERROR_BAD_NET_NAME: return ENOENT; /* 67 */
+ case ERROR_FILE_EXISTS: return EEXIST; /* 80 */
+ case ERROR_CANNOT_MAKE: return EACCES; /* 82 */
+ case ERROR_FAIL_I24: return EACCES; /* 83 */
+ case ERROR_INVALID_PARAMETER: return EINVAL; /* 87 */
+ case ERROR_NO_PROC_SLOTS: return EAGAIN; /* 89 */
+ case ERROR_DRIVE_LOCKED: return EACCES; /* 108 */
+ case ERROR_BROKEN_PIPE: return EPIPE; /* 109 */
+ case ERROR_DISK_FULL: return ENOSPC; /* 112 */
+ case ERROR_INVALID_TARGET_HANDLE: return EBADF; /* 114 */
+ case ERROR_WAIT_NO_CHILDREN: return ECHILD; /* 128 */
+ case ERROR_CHILD_NOT_COMPLETE: return ECHILD; /* 129 */
+ case ERROR_DIRECT_ACCESS_HANDLE: return EBADF; /* 130 */
+ case ERROR_NEGATIVE_SEEK: return EINVAL; /* 131 */
+ case ERROR_SEEK_ON_DEVICE: return EACCES; /* 132 */
+ case ERROR_DIR_NOT_EMPTY: return ENOTEMPTY;/* 145 */
+ case ERROR_NOT_LOCKED: return EACCES; /* 158 */
+ case ERROR_BAD_PATHNAME: return ENOENT; /* 161 */
+ case ERROR_MAX_THRDS_REACHED: return EAGAIN; /* 164 */
+ case ERROR_LOCK_FAILED: return EACCES; /* 167 */
+ case ERROR_ALREADY_EXISTS: return EEXIST; /* 183 */
+ case ERROR_INVALID_STARTING_CODESEG: return ENOEXEC; /* 188 */
+ case ERROR_INVALID_STACKSEG: return ENOEXEC; /* 189 */
+ case ERROR_INVALID_MODULETYPE: return ENOEXEC; /* 190 */
+ case ERROR_INVALID_EXE_SIGNATURE: return ENOEXEC; /* 191 */
+ case ERROR_EXE_MARKED_INVALID: return ENOEXEC; /* 192 */
+ case ERROR_BAD_EXE_FORMAT: return ENOEXEC; /* 193 */
+ case ERROR_ITERATED_DATA_EXCEEDS_64k: return ENOEXEC; /* 194 */
+ case ERROR_INVALID_MINALLOCSIZE: return ENOEXEC; /* 195 */
+ case ERROR_DYNLINK_FROM_INVALID_RING: return ENOEXEC; /* 196 */
+ case ERROR_IOPL_NOT_ENABLED: return ENOEXEC; /* 197 */
+ case ERROR_INVALID_SEGDPL: return ENOEXEC; /* 198 */
+ case ERROR_AUTODATASEG_EXCEEDS_64k: return ENOEXEC; /* 199 */
+ case ERROR_RING2SEG_MUST_BE_MOVABLE: return ENOEXEC; /* 200 */
+ case ERROR_RELOC_CHAIN_XEEDS_SEGLIM: return ENOEXEC; /* 201 */
+ case ERROR_INFLOOP_IN_RELOC_CHAIN: return ENOEXEC; /* 202 */
+ case ERROR_FILENAME_EXCED_RANGE: return ENOENT; /* 206 */
+ case ERROR_NESTING_NOT_ALLOWED: return EAGAIN; /* 215 */
+ case ERROR_NOT_ENOUGH_QUOTA: return ENOMEM; /* 1816 */
+ default: return EINVAL;
+ }
+}
+
+#endif
diff --git a/erts/lib_src/common/ethr_aux.c b/erts/lib_src/common/ethr_aux.c
new file mode 100644
index 0000000000..4db4cffd3a
--- /dev/null
+++ b/erts/lib_src/common/ethr_aux.c
@@ -0,0 +1,762 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2010. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+/*
+ * Description: A Thread library for use in the ERTS and other OTP
+ * applications.
+ * Author: Rickard Green
+ */
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include <stdio.h>
+
+#define ETHR_INLINE_FUNC_NAME_(X) X ## __
+#define ETHR_AUX_IMPL__
+
+#include "ethread.h"
+#include "ethr_internal.h"
+#include <string.h>
+#include <limits.h>
+
+#ifndef __WIN32__
+#include <unistd.h>
+#endif
+
+#define ERTS_TS_EV_ALLOC_DEFAULT_POOL_SIZE 100
+#define ERTS_TS_EV_ALLOC_POOL_SIZE 25
+
+erts_cpu_info_t *ethr_cpu_info__;
+
+int ethr_not_completely_inited__ = 1;
+int ethr_not_inited__ = 1;
+
+ethr_memory_allocators ethr_mem__ = ETHR_MEM_ALLOCS_DEF_INITER__;
+
+#ifndef ETHR_HAVE_OPTIMIZED_ATOMIC_OPS
+ethr_atomic_protection_t ethr_atomic_protection__[1 << ETHR_ATOMIC_ADDR_BITS];
+#endif
+
+void *(*ethr_thr_prepare_func__)(void) = NULL;
+void (*ethr_thr_parent_func__)(void *) = NULL;
+void (*ethr_thr_child_func__)(void *) = NULL;
+
+typedef struct ethr_xhndl_list_ ethr_xhndl_list;
+struct ethr_xhndl_list_ {
+ ethr_xhndl_list *next;
+ void (*funcp)(void);
+};
+
+size_t ethr_pagesize__;
+size_t ethr_min_stack_size__; /* kilo words */
+size_t ethr_max_stack_size__; /* kilo words */
+
+ethr_rwmutex xhndl_rwmtx;
+ethr_xhndl_list *xhndl_list;
+
+static int main_threads;
+
+static int init_ts_event_alloc(void);
+
+int
+ethr_init_common__(ethr_init_data *id)
+{
+ int res;
+ if (id) {
+ ethr_thr_prepare_func__ = id->thread_create_prepare_func;
+ ethr_thr_parent_func__ = id->thread_create_parent_func;
+ ethr_thr_child_func__ = id->thread_create_child_func;
+ }
+
+ ethr_cpu_info__ = erts_cpu_info_create();
+ if (!ethr_cpu_info__)
+ return ENOMEM;
+
+#ifdef _SC_PAGESIZE
+ ethr_pagesize__ = (size_t) sysconf(_SC_PAGESIZE);
+#elif defined(HAVE_GETPAGESIZE)
+ ethr_pagesize__ = (size_t) getpagesize();
+#else
+ ethr_pagesize__ = (size_t) 4*1024; /* Guess 4 KB */
+#endif
+
+ /* User needs at least 4 KB */
+ ethr_min_stack_size__ = 4*1024;
+#if SIZEOF_VOID_P == 8
+ /* Double that on 64-bit archs */
+ ethr_min_stack_size__ *= 2;
+#endif
+ /* On some systems as much as about 4 KB is used by the system */
+ ethr_min_stack_size__ += 4*1024;
+ /* There should be room for signal handlers */
+#ifdef SIGSTKSZ
+ ethr_min_stack_size__ += SIGSTKSZ;
+#else
+ ethr_min_stack_size__ += ethr_pagesize__;
+#endif
+ /* The system may think that we need more stack */
+#if defined(PTHREAD_STACK_MIN)
+ if (ethr_min_stack_size__ < PTHREAD_STACK_MIN)
+ ethr_min_stack_size__ = PTHREAD_STACK_MIN;
+#elif defined(_SC_THREAD_STACK_MIN)
+ {
+ size_t thr_min_stk_sz = (size_t) sysconf(_SC_THREAD_STACK_MIN);
+ if (ethr_min_stack_size__ < thr_min_stk_sz)
+ ethr_min_stack_size__ = thr_min_stk_sz;
+ }
+#endif
+ /* The guard is at least on some platforms included in the stack size
+ passed when creating threads */
+#ifdef ETHR_STACK_GUARD_SIZE
+ ethr_min_stack_size__ += ETHR_STACK_GUARD_SIZE;
+#endif
+ ethr_min_stack_size__ = ETHR_PAGE_ALIGN(ethr_min_stack_size__);
+
+ ethr_min_stack_size__ = ETHR_B2KW(ethr_min_stack_size__);
+
+ ethr_max_stack_size__ = 32*1024*1024;
+#if SIZEOF_VOID_P == 8
+ ethr_max_stack_size__ *= 2;
+#endif
+ ethr_max_stack_size__ = ETHR_B2KW(ethr_max_stack_size__);
+
+#ifndef ETHR_HAVE_OPTIMIZED_ATOMIC_OPS
+ {
+ int i;
+ for (i = 0; i < (1 << ETHR_ATOMIC_ADDR_BITS); i++) {
+ res = ethr_spinlock_init(&ethr_atomic_protection__[i].u.lck);
+ if (res != 0)
+ return res;
+ }
+ }
+#endif
+
+ res = ethr_mutex_lib_init(erts_get_cpu_configured(ethr_cpu_info__));
+ if (res != 0)
+ return res;
+
+ xhndl_list = NULL;
+
+ return 0;
+}
+
+int
+ethr_late_init_common__(ethr_late_init_data *lid)
+{
+ ethr_ts_event *tsep = NULL;
+ int reader_groups;
+ int res;
+ int i;
+ ethr_memory_allocator *m[] = {&ethr_mem__.std,
+ &ethr_mem__.sl,
+ &ethr_mem__.ll};
+ if (lid)
+ ethr_mem__ = lid->mem;
+ if (!ethr_mem__.std.alloc
+ || !ethr_mem__.std.realloc
+ || !ethr_mem__.std.free) {
+ ethr_mem__.std.alloc = malloc;
+ ethr_mem__.std.realloc = realloc;
+ ethr_mem__.std.free = free;
+ }
+ for (i = 0; i < sizeof(m)/sizeof(m[0]); i++) {
+ if (!m[i]->alloc || !m[i]->realloc || !m[i]->free) {
+ m[i]->alloc = ethr_mem__.std.alloc;
+ m[i]->realloc = ethr_mem__.std.realloc;
+ m[i]->free = ethr_mem__.std.free;
+ }
+
+ }
+ res = init_ts_event_alloc();
+ if (res != 0)
+ return res;
+ res = ethr_make_ts_event__(&tsep);
+ if (res == 0)
+ tsep->iflgs |= ETHR_TS_EV_ETHREAD;
+ if (!lid) {
+ main_threads = 0;
+ reader_groups = 0;
+ }
+ else {
+ if (lid->main_threads < 0 || USHRT_MAX < lid->main_threads)
+ return res;
+ main_threads = lid->main_threads;
+ reader_groups = lid->reader_groups;
+ }
+ res = ethr_mutex_lib_late_init(reader_groups, main_threads);
+ if (res != 0)
+ return res;
+ ethr_not_completely_inited__ = 0; /* Need it for
+ rwmutex_init */
+ res = ethr_rwmutex_init(&xhndl_rwmtx);
+ ethr_not_completely_inited__ = 1;
+ if (res != 0)
+ return res;
+ return 0;
+}
+
+int
+ethr_install_exit_handler(void (*funcp)(void))
+{
+ ethr_xhndl_list *xhp;
+
+#if ETHR_XCHK
+ if (ethr_not_completely_inited__) {
+ ETHR_ASSERT(0);
+ return EACCES;
+ }
+#endif
+
+ if (!funcp)
+ return EINVAL;
+
+ xhp = (ethr_xhndl_list *) ethr_mem__.std.alloc(sizeof(ethr_xhndl_list));
+ if (!xhp)
+ return ENOMEM;
+
+ ethr_rwmutex_rwlock(&xhndl_rwmtx);
+
+ xhp->funcp = funcp;
+ xhp->next = xhndl_list;
+ xhndl_list = xhp;
+
+ ethr_rwmutex_rwunlock(&xhndl_rwmtx);
+
+ return 0;
+}
+
+void
+ethr_run_exit_handlers__(void)
+{
+ ethr_xhndl_list *xhp;
+
+ ethr_rwmutex_rlock(&xhndl_rwmtx);
+
+ xhp = xhndl_list;
+
+ ethr_rwmutex_runlock(&xhndl_rwmtx);
+
+ for (; xhp; xhp = xhp->next)
+ (*xhp->funcp)();
+}
+
+/*
+ * Thread specific event alloc, etc.
+ *
+ * Note that we don't know when it is safe to destroy an event, but
+ * we know when it is safe to reuse it. ts_event_free() therefore
+ * never destroys an event (but makes freed events available for
+ * reuse).
+ *
+ * We could easily keep track of the usage of events, and by this
+ * make it possible to destroy events. We would however suffer a
+ * performance penalty for this and save very little memory.
+ */
+
+typedef union {
+ ethr_ts_event ts_ev;
+ char align[ETHR_CACHE_LINE_ALIGN_SIZE(sizeof(ethr_ts_event))];
+} ethr_aligned_ts_event;
+
+static ethr_spinlock_t ts_ev_alloc_lock;
+static ethr_ts_event *free_ts_ev;
+
+#if SIZEOF_VOID_P == SIZEOF_INT
+typedef unsigned int EthrPtrSzUInt;
+#elif SIZEOF_VOID_P == SIZEOF_LONG
+typedef unsigned long EthrPtrSzUInt;
+#else
+#error No pointer sized integer type
+#endif
+
+static ethr_ts_event *ts_event_pool(int size, ethr_ts_event **endpp)
+{
+ int i;
+ ethr_aligned_ts_event *atsev;
+ atsev = ethr_mem__.std.alloc(sizeof(ethr_aligned_ts_event) * size
+ + ETHR_CACHE_LINE_SIZE);
+ if (!atsev)
+ return NULL;
+ if ((((EthrPtrSzUInt) atsev) & ETHR_CACHE_LINE_MASK) == 0)
+ atsev = ((ethr_aligned_ts_event *)
+ ((((EthrPtrSzUInt) atsev) & ~ETHR_CACHE_LINE_MASK)
+ + ETHR_CACHE_LINE_SIZE));
+ for (i = 1; i < size; i++) {
+ atsev[i-1].ts_ev.next = &atsev[i].ts_ev;
+ ethr_atomic_init(&atsev[i-1].ts_ev.uaflgs, 0);
+ atsev[i-1].ts_ev.iflgs = 0;
+ }
+ ethr_atomic_init(&atsev[size-1].ts_ev.uaflgs, 0);
+ atsev[size-1].ts_ev.iflgs = 0;
+ atsev[size-1].ts_ev.next = NULL;
+ if (endpp)
+ *endpp = &atsev[size-1].ts_ev;
+ return &atsev[0].ts_ev;
+}
+
+static int init_ts_event_alloc(void)
+{
+ free_ts_ev = ts_event_pool(ERTS_TS_EV_ALLOC_DEFAULT_POOL_SIZE,
+ NULL);
+ if (!free_ts_ev)
+ return ENOMEM;
+ return ethr_spinlock_init(&ts_ev_alloc_lock);
+}
+
+static ethr_ts_event *ts_event_alloc(void)
+{
+ ethr_ts_event *ts_ev;
+ ethr_spin_lock(&ts_ev_alloc_lock);
+ if (free_ts_ev) {
+ ts_ev = free_ts_ev;
+ free_ts_ev = ts_ev->next;
+ ethr_spin_unlock(&ts_ev_alloc_lock);
+ }
+ else {
+ ethr_ts_event *ts_ev_pool_end;
+ ethr_spin_unlock(&ts_ev_alloc_lock);
+
+ ts_ev = ts_event_pool(ERTS_TS_EV_ALLOC_POOL_SIZE, &ts_ev_pool_end);
+ if (!ts_ev)
+ return NULL;
+
+ ethr_spin_lock(&ts_ev_alloc_lock);
+ ts_ev_pool_end->next = free_ts_ev;
+ free_ts_ev = ts_ev->next;
+ ethr_spin_unlock(&ts_ev_alloc_lock);
+ }
+ return ts_ev;
+}
+
+static void ts_event_free(ethr_ts_event *ts_ev)
+{
+ ETHR_ASSERT(!ts_ev->udata);
+ ethr_spin_lock(&ts_ev_alloc_lock);
+ ts_ev->next = free_ts_ev;
+ free_ts_ev = ts_ev;
+ ethr_spin_unlock(&ts_ev_alloc_lock);
+}
+
+int ethr_make_ts_event__(ethr_ts_event **tsepp)
+{
+ int res;
+ ethr_ts_event *tsep = *tsepp;
+
+ if (!tsep) {
+ tsep = ts_event_alloc();
+ if (!tsep)
+ return ENOMEM;
+ }
+
+ if ((tsep->iflgs & ETHR_TS_EV_INITED) == 0) {
+ res = ethr_event_init(&tsep->event);
+ if (res != 0) {
+ ts_event_free(tsep);
+ return res;
+ }
+ }
+
+ tsep->iflgs = ETHR_TS_EV_INITED;
+ tsep->udata = NULL;
+ tsep->rgix = 0;
+ tsep->mtix = 0;
+
+ res = ethr_set_tse__(tsep);
+ if (res != 0 && tsepp && *tsepp) {
+ ts_event_free(tsep);
+ return res;
+ }
+
+ if (tsepp)
+ *tsepp = tsep;
+
+ return 0;
+}
+
+int ethr_get_tmp_ts_event__(ethr_ts_event **tsepp)
+{
+ int res;
+ ethr_ts_event *tsep = *tsepp;
+
+ if (!tsep) {
+ tsep = ts_event_alloc();
+ if (!tsep)
+ return ENOMEM;
+ }
+
+ if ((tsep->iflgs & ETHR_TS_EV_INITED) == 0) {
+ res = ethr_event_init(&tsep->event);
+ if (res != 0) {
+ ts_event_free(tsep);
+ return res;
+ }
+ }
+
+ tsep->iflgs = ETHR_TS_EV_INITED|ETHR_TS_EV_TMP;
+ tsep->udata = NULL;
+
+ if (tsepp)
+ *tsepp = tsep;
+
+ return 0;
+}
+
+int ethr_free_ts_event__(ethr_ts_event *tsep)
+{
+ ts_event_free(tsep);
+ return 0;
+}
+
+void ethr_ts_event_destructor__(void *vtsep)
+{
+ if (vtsep) {
+ ethr_ts_event *tsep = (ethr_ts_event *) vtsep;
+ ts_event_free(tsep);
+ ethr_set_tse__(NULL);
+ }
+}
+
+int ethr_set_main_thr_status(int on, int no)
+{
+ ethr_ts_event *tsep = ethr_get_tse__();
+ if (!tsep)
+ return EINVAL;
+ if (on) {
+ if (no < 1 || main_threads < no)
+ return EINVAL;
+ tsep->mtix = (unsigned short) no;
+ tsep->iflgs |= ETHR_TS_EV_MAIN_THR;
+ }
+ else {
+ tsep->iflgs &= ~ETHR_TS_EV_MAIN_THR;
+ tsep->mtix = (unsigned short) 0;
+ }
+ return 0;
+}
+
+int ethr_get_main_thr_status(int *on)
+{
+ ethr_ts_event *tsep = ethr_get_tse__();
+ if (!tsep)
+ *on = 0;
+ else {
+ if (tsep->iflgs & ETHR_TS_EV_MAIN_THR)
+ *on = 1;
+ else
+ *on = 0;
+ }
+ return 0;
+}
+
+
+/* Atomics */
+
+void
+ethr_atomic_init(ethr_atomic_t *var, long i)
+{
+ ETHR_ASSERT(var);
+ ethr_atomic_init__(var, i);
+}
+
+void
+ethr_atomic_set(ethr_atomic_t *var, long i)
+{
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(var);
+ ethr_atomic_set__(var, i);
+}
+
+long
+ethr_atomic_read(ethr_atomic_t *var)
+{
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(var);
+ return ethr_atomic_read__(var);
+}
+
+
+long
+ethr_atomic_add_read(ethr_atomic_t *var, long incr)
+{
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(var);
+ return ethr_atomic_add_read__(var, incr);
+}
+
+long
+ethr_atomic_inc_read(ethr_atomic_t *var)
+{
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(var);
+ return ethr_atomic_inc_read__(var);
+}
+
+long
+ethr_atomic_dec_read(ethr_atomic_t *var)
+{
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(var);
+ return ethr_atomic_dec_read__(var);
+}
+
+void
+ethr_atomic_add(ethr_atomic_t *var, long incr)
+{
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(var);
+ ethr_atomic_add__(var, incr);
+}
+
+void
+ethr_atomic_inc(ethr_atomic_t *var)
+{
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(var);
+ ethr_atomic_inc__(var);
+}
+
+void
+ethr_atomic_dec(ethr_atomic_t *var)
+{
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(var);
+ ethr_atomic_dec__(var);
+}
+
+long
+ethr_atomic_read_band(ethr_atomic_t *var, long mask)
+{
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(var);
+ return ethr_atomic_read_band__(var, mask);
+}
+
+long
+ethr_atomic_read_bor(ethr_atomic_t *var, long mask)
+{
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(var);
+ return ethr_atomic_read_bor__(var, mask);
+}
+
+long
+ethr_atomic_xchg(ethr_atomic_t *var, long new)
+{
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(var);
+ return ethr_atomic_xchg__(var, new);
+}
+
+long
+ethr_atomic_cmpxchg(ethr_atomic_t *var, long new, long expected)
+{
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(var);
+ return ethr_atomic_cmpxchg__(var, new, expected);
+}
+
+long
+ethr_atomic_read_acqb(ethr_atomic_t *var)
+{
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(var);
+ return ethr_atomic_read_acqb__(var);
+}
+
+long
+ethr_atomic_inc_read_acqb(ethr_atomic_t *var)
+{
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(var);
+ return ethr_atomic_inc_read_acqb__(var);
+}
+
+void
+ethr_atomic_set_relb(ethr_atomic_t *var, long i)
+{
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(var);
+ ethr_atomic_set_relb__(var, i);
+}
+
+void
+ethr_atomic_dec_relb(ethr_atomic_t *var)
+{
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(var);
+ ethr_atomic_dec_relb__(var);
+}
+
+long
+ethr_atomic_dec_read_relb(ethr_atomic_t *var)
+{
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(var);
+ return ethr_atomic_dec_read_relb__(var);
+}
+
+long
+ethr_atomic_cmpxchg_acqb(ethr_atomic_t *var, long new, long exp)
+{
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(var);
+ return ethr_atomic_cmpxchg_acqb__(var, new, exp);
+}
+
+long
+ethr_atomic_cmpxchg_relb(ethr_atomic_t *var, long new, long exp)
+{
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(var);
+ return ethr_atomic_cmpxchg_relb__(var, new, exp);
+}
+
+
+/* Spinlocks and rwspinlocks */
+
+int
+ethr_spinlock_init(ethr_spinlock_t *lock)
+{
+#if ETHR_XCHK
+ if (!lock) {
+ ETHR_ASSERT(0);
+ return EINVAL;
+ }
+#endif
+ return ethr_spinlock_init__(lock);
+}
+
+int
+ethr_spinlock_destroy(ethr_spinlock_t *lock)
+{
+#if ETHR_XCHK
+ if (ethr_not_inited__) {
+ ETHR_ASSERT(0);
+ return EACCES;
+ }
+ if (!lock) {
+ ETHR_ASSERT(0);
+ return EINVAL;
+ }
+#endif
+ return ethr_spinlock_destroy__(lock);
+}
+
+void
+ethr_spin_unlock(ethr_spinlock_t *lock)
+{
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(lock);
+ ethr_spin_unlock__(lock);
+}
+
+void
+ethr_spin_lock(ethr_spinlock_t *lock)
+{
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(lock);
+ ethr_spin_lock__(lock);
+}
+
+int
+ethr_rwlock_init(ethr_rwlock_t *lock)
+{
+#if ETHR_XCHK
+ if (!lock) {
+ ETHR_ASSERT(0);
+ return EINVAL;
+ }
+#endif
+ return ethr_rwlock_init__(lock);
+}
+
+int
+ethr_rwlock_destroy(ethr_rwlock_t *lock)
+{
+#if ETHR_XCHK
+ if (ethr_not_inited__) {
+ ETHR_ASSERT(0);
+ return EACCES;
+ }
+ if (!lock) {
+ ETHR_ASSERT(0);
+ return EINVAL;
+ }
+#endif
+ return ethr_rwlock_destroy__(lock);
+}
+
+void
+ethr_read_unlock(ethr_rwlock_t *lock)
+{
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(lock);
+ ethr_read_unlock__(lock);
+}
+
+void
+ethr_read_lock(ethr_rwlock_t *lock)
+{
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(lock);
+ ethr_read_lock__(lock);
+}
+
+void
+ethr_write_unlock(ethr_rwlock_t *lock)
+{
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(lock);
+ ethr_write_unlock__(lock);
+}
+
+void
+ethr_write_lock(ethr_rwlock_t *lock)
+{
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(lock);
+ ethr_write_lock__(lock);
+}
+
+ETHR_IMPL_NORETURN__ ethr_fatal_error__(const char *file,
+ int line,
+ const char *func,
+ int err)
+{
+ char *errstr;
+ if (err == ENOTSUP)
+ errstr = "Operation not supported";
+ else {
+ errstr = strerror(err);
+ if (!errstr)
+ errstr = "Unknown error";
+ }
+ fprintf(stderr, "%s:%d: Fatal error in %s(): %s (%d)\n",
+ file, line, func, errstr, err);
+ ethr_abort__();
+}
+
+int ethr_assert_failed(const char *file, int line, const char *func, char *a)
+{
+ fprintf(stderr, "%s:%d: %s(): Assertion failed: %s\n", file, line, func, a);
+ ethr_abort__();
+ return 0;
+}
diff --git a/erts/lib_src/common/ethr_cbf.c b/erts/lib_src/common/ethr_cbf.c
new file mode 100644
index 0000000000..04feceec89
--- /dev/null
+++ b/erts/lib_src/common/ethr_cbf.c
@@ -0,0 +1,36 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2010. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+
+/*
+ * We keep this function alone in a separate file so the
+ * compiler wont optimize it away.
+ */
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include "ethread.h"
+
+void
+ethr_compiler_barrier_fallback(void)
+{
+
+}
diff --git a/erts/lib_src/common/ethr_mutex.c b/erts/lib_src/common/ethr_mutex.c
new file mode 100644
index 0000000000..f918bba81d
--- /dev/null
+++ b/erts/lib_src/common/ethr_mutex.c
@@ -0,0 +1,2758 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2010. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+/*
+ * Description: Mutex, rwmutex and condition variable implementation
+ * Author: Rickard Green
+ */
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#define ETHR_INLINE_FUNC_NAME_(X) X ## __
+#define ETHR_MUTEX_IMPL__
+
+#include <limits.h>
+#include "ethread.h"
+#include "ethr_internal.h"
+
+#define ETHR_SPIN_WITH_WAITERS 1
+
+#define ETHR_MTX_MAX_FLGS_SPIN 1000
+
+#ifdef ETHR_USE_OWN_RWMTX_IMPL__
+static int default_rwmtx_main_spincount;
+static int default_rwmtx_aux_spincount;
+#endif
+#ifdef ETHR_USE_OWN_MTX_IMPL__
+static int default_mtx_main_spincount;
+static int default_mtx_aux_spincount;
+static int default_cnd_main_spincount;
+static int default_cnd_aux_spincount;
+#endif
+
+static int no_spin;
+
+#ifndef ETHR_USE_OWN_RWMTX_IMPL__
+static pthread_rwlockattr_t write_pref_attr_data;
+static pthread_rwlockattr_t *write_pref_attr;
+#endif
+
+#if defined(ETHR_MTX_Q_LOCK_SPINLOCK__)
+# define ETHR_MTX_QLOCK_INIT ethr_spinlock_init
+# define ETHR_MTX_QLOCK_DESTROY ethr_spinlock_destroy
+# define ETHR_MTX_Q_LOCK ethr_spin_lock
+# define ETHR_MTX_Q_UNLOCK ethr_spin_unlock
+#elif defined(ETHR_MTX_Q_LOCK_PTHREAD_MUTEX__)
+# define ETHR_MTX_QLOCK_INIT(QL) pthread_mutex_init((QL), NULL)
+# define ETHR_MTX_QLOCK_DESTROY pthread_mutex_destroy
+# define ETHR_MTX_Q_LOCK(L) \
+do { \
+ int res__ = pthread_mutex_lock(L); \
+ if (res__ != 0) \
+ ETHR_FATAL_ERROR__(res__); \
+} while (0)
+# define ETHR_MTX_Q_UNLOCK(L) \
+do { \
+ int res__ = pthread_mutex_unlock(L); \
+ if (res__ != 0) \
+ ETHR_FATAL_ERROR__(res__); \
+} while (0)
+#elif defined(ETHR_MTX_Q_LOCK_CRITICAL_SECTION__)
+# define ETHR_MTX_QLOCK_INIT(QL) (InitializeCriticalSection((QL)), 0)
+# define ETHR_MTX_QLOCK_DESTROY(QL) (DeleteCriticalSection((QL)), 0)
+# define ETHR_MTX_Q_LOCK(QL) EnterCriticalSection((QL))
+# define ETHR_MTX_Q_UNLOCK(QL) LeaveCriticalSection((QL))
+#endif
+
+int
+ethr_mutex_lib_init(int cpu_conf)
+{
+ int res = 0;
+
+ no_spin = cpu_conf == 1;
+
+#ifdef ETHR_USE_OWN_MTX_IMPL__
+ default_mtx_main_spincount = ETHR_MTX_DEFAULT_MAIN_SPINCOUNT;
+ default_mtx_aux_spincount = ETHR_MTX_DEFAULT_AUX_SPINCOUNT;
+ default_cnd_main_spincount = ETHR_CND_DEFAULT_MAIN_SPINCOUNT;
+ default_cnd_aux_spincount = ETHR_CND_DEFAULT_AUX_SPINCOUNT;
+#endif
+
+#ifdef ETHR_USE_OWN_RWMTX_IMPL__
+
+ default_rwmtx_main_spincount = ETHR_RWMTX_DEFAULT_MAIN_SPINCOUNT;
+ default_rwmtx_aux_spincount = ETHR_RWMTX_DEFAULT_AUX_SPINCOUNT;
+
+#else
+
+#if defined(ETHR_HAVE_PTHREAD_RWLOCKATTR_SETKIND_NP) \
+ && defined(ETHR_HAVE_PTHREAD_RWLOCK_PREFER_WRITER_NONRECURSIVE_NP)
+ res = pthread_rwlockattr_init(&write_pref_attr_data);
+ if (res != 0)
+ return res;
+ res = pthread_rwlockattr_setkind_np(
+ &write_pref_attr_data,
+ PTHREAD_RWLOCK_PREFER_WRITER_NONRECURSIVE_NP);
+ write_pref_attr = &write_pref_attr_data;
+#else
+ write_pref_attr = NULL;
+#endif
+
+#endif
+
+ return res;
+}
+
+#ifdef ETHR_USE_OWN_RWMTX_IMPL__
+
+#ifdef ETHR_ATOMIC_HAVE_INC_DEC_INSTRUCTIONS
+#if 0 /*
+ * When inc and dec are real atomic instructions as on x86, the
+ * ETHR_RLOCK_WITH_INC_DEC implementations performs better with
+ * lots of read locks compared to the cmpxchg based implementation.
+ * It, however, performs worse with lots of mixed reads and writes.
+ * It could be used for rwlocks that are known to be read locked
+ * much, but the readers array based implementation outperforms it
+ * by far. Therefore, it has been disabled, and will probably be
+ * removed some time in the future.
+ */
+# define ETHR_RLOCK_WITH_INC_DEC
+#endif
+#endif
+
+static int reader_groups_array_size = 0;
+static int main_threads_array_size = 0;
+
+#endif
+
+int
+ethr_mutex_lib_late_init(int no_reader_groups, int no_main_threads)
+{
+#ifdef ETHR_USE_OWN_RWMTX_IMPL__
+ reader_groups_array_size = (no_reader_groups <= 1
+ ? 1
+ : no_reader_groups + 1);
+ main_threads_array_size = (no_main_threads <= 1
+ ? 1
+ : no_main_threads + 1);
+#endif
+ return 0;
+}
+
+int
+ethr_rwmutex_set_reader_group(int ix)
+{
+#ifdef ETHR_USE_OWN_RWMTX_IMPL__
+ ethr_ts_event *tse;
+
+ if (ix < 0 || reader_groups_array_size <= ix)
+ return EINVAL;
+
+ tse = ethr_get_ts_event();
+
+ if ((tse->iflgs & ETHR_TS_EV_ETHREAD) == 0) {
+ ethr_leave_ts_event(tse);
+ return EINVAL;
+ }
+
+ tse->rgix = ix;
+
+ ethr_leave_ts_event(tse);
+#endif
+ return 0;
+}
+
+#if defined(ETHR_MTX_HARD_DEBUG_Q) || defined(ETHR_MTX_HARD_DEBUG_WSQ)
+static void hard_debug_chk_q__(struct ethr_mutex_base_ *, int);
+#define ETHR_RWMTX_HARD_DEBUG_CHK_Q(RWMTX) hard_debug_chk_q__(&(RWMTX)->mtxb,1)
+#define ETHR_MTX_HARD_DEBUG_CHK_Q(MTX) hard_debug_chk_q__(&(MTX)->mtxb, 0)
+#else
+#define ETHR_RWMTX_HARD_DEBUG_CHK_Q(RWMTX)
+#define ETHR_MTX_HARD_DEBUG_CHK_Q(MTX)
+#endif
+
+#ifdef ETHR_USE_OWN_RWMTX_IMPL__
+static void
+rwmutex_unlock_wake(ethr_rwmutex *rwmtx,
+ int have_w,
+ long initial);
+static int
+rwmutex_try_complete_runlock(ethr_rwmutex *rwmtx,
+ long initial,
+ ethr_ts_event *tse,
+ int start_next_ix,
+ int check_before_try,
+ int try_write_lock);
+#endif
+
+#if defined(ETHR_USE_OWN_RWMTX_IMPL__) || defined(ETHR_USE_OWN_MTX_IMPL__)
+
+/* -- Utilities operating both on ordinary mutexes and read write mutexes -- */
+
+static ETHR_INLINE void
+rwmutex_freqread_wtng_rdrs_inc(ethr_rwmutex *rwmtx, ethr_ts_event *tse)
+{
+ int ix = (rwmtx->type == ETHR_RWMUTEX_TYPE_FREQUENT_READ
+ ? tse->rgix
+ : tse->mtix);
+ rwmtx->tdata.ra[ix].data.waiting_readers++;
+}
+
+static ETHR_INLINE void
+rwmutex_freqread_rdrs_add(ethr_rwmutex *rwmtx,
+ ethr_rwmutex_type type,
+ int ix,
+ int inc)
+{
+ if (type == ETHR_RWMUTEX_TYPE_FREQUENT_READ || ix == 0)
+ ethr_atomic_add(&rwmtx->tdata.ra[ix].data.readers, inc);
+ else {
+ ETHR_ASSERT(type == ETHR_RWMUTEX_TYPE_EXTREMELY_FREQUENT_READ);
+ ETHR_ASSERT(ethr_atomic_read(&rwmtx->tdata.ra[ix].data.readers) == 0);
+ ETHR_ASSERT(inc == 1);
+ ethr_atomic_set(&rwmtx->tdata.ra[ix].data.readers, (long) 1);
+ }
+}
+
+static ETHR_INLINE void
+rwmutex_freqread_rdrs_inc(ethr_rwmutex *rwmtx, ethr_ts_event *tse)
+{
+ int ix;
+ if (rwmtx->type == ETHR_RWMUTEX_TYPE_FREQUENT_READ) {
+ ix = tse->rgix;
+ atomic_inc:
+ ethr_atomic_inc(&rwmtx->tdata.ra[ix].data.readers);
+ }
+ else {
+ ix = tse->mtix;
+ if (ix == 0)
+ goto atomic_inc;
+ ETHR_ASSERT(rwmtx->type == ETHR_RWMUTEX_TYPE_EXTREMELY_FREQUENT_READ);
+ ETHR_ASSERT(ethr_atomic_read(&rwmtx->tdata.ra[ix].data.readers) == 0);
+ ethr_atomic_set(&rwmtx->tdata.ra[ix].data.readers, (long) 1);
+ }
+}
+
+static ETHR_INLINE void
+rwmutex_freqread_rdrs_dec(ethr_rwmutex *rwmtx, ethr_ts_event *tse)
+{
+ int ix;
+ if (rwmtx->type == ETHR_RWMUTEX_TYPE_FREQUENT_READ) {
+ ix = tse->rgix;
+ atomic_dec:
+ ethr_atomic_dec(&rwmtx->tdata.ra[ix].data.readers);
+ }
+ else {
+ ix = tse->mtix;
+ if (ix == 0)
+ goto atomic_dec;
+ ETHR_ASSERT(rwmtx->type == ETHR_RWMUTEX_TYPE_EXTREMELY_FREQUENT_READ);
+ ETHR_ASSERT(ethr_atomic_read(&rwmtx->tdata.ra[ix].data.readers) == 1);
+ ethr_atomic_set(&rwmtx->tdata.ra[ix].data.readers, (long) 0);
+ }
+}
+
+static ETHR_INLINE long
+rwmutex_freqread_rdrs_dec_read(ethr_rwmutex *rwmtx, ethr_ts_event *tse)
+{
+ int ix;
+ if (rwmtx->type == ETHR_RWMUTEX_TYPE_FREQUENT_READ) {
+ ix = tse->rgix;
+ atomic_dec_read:
+ return ethr_atomic_dec_read(&rwmtx->tdata.ra[ix].data.readers);
+ }
+ else {
+ ix = tse->mtix;
+ if (ix == 0)
+ goto atomic_dec_read;
+ ETHR_ASSERT(rwmtx->type == ETHR_RWMUTEX_TYPE_EXTREMELY_FREQUENT_READ);
+ ETHR_ASSERT(ethr_atomic_read(&rwmtx->tdata.ra[ix].data.readers) == 1);
+ ethr_atomic_set(&rwmtx->tdata.ra[ix].data.readers, (long) 0);
+ return (long) 0;
+ }
+}
+
+static ETHR_INLINE long
+rwmutex_freqread_rdrs_dec_read_relb(ethr_rwmutex *rwmtx, ethr_ts_event *tse)
+{
+ int ix;
+ if (rwmtx->type == ETHR_RWMUTEX_TYPE_FREQUENT_READ) {
+ ix = tse->rgix;
+ atomic_dec_read:
+ return ethr_atomic_dec_read_relb(&rwmtx->tdata.ra[ix].data.readers);
+ }
+ else {
+ ix = tse->mtix;
+ if (ix == 0)
+ goto atomic_dec_read;
+ ETHR_ASSERT(rwmtx->type == ETHR_RWMUTEX_TYPE_EXTREMELY_FREQUENT_READ);
+ ETHR_ASSERT(ethr_atomic_read(&rwmtx->tdata.ra[ix].data.readers) == 1);
+ ethr_atomic_set_relb(&rwmtx->tdata.ra[ix].data.readers, (long) 0);
+ return (long) 0;
+ }
+}
+
+static ETHR_INLINE long
+rwmutex_freqread_rdrs_read(ethr_rwmutex *rwmtx, int ix)
+{
+ long res = ethr_atomic_read(&rwmtx->tdata.ra[ix].data.readers);
+#ifdef ETHR_DEBUG
+ switch (rwmtx->type) {
+ case ETHR_RWMUTEX_TYPE_FREQUENT_READ:
+ ETHR_ASSERT(res >= 0);
+ break;
+ case ETHR_RWMUTEX_TYPE_EXTREMELY_FREQUENT_READ:
+ ETHR_ASSERT(res == 0 || res == 1);
+ break;
+ default:
+ ETHR_ASSERT(0);
+ break;
+ }
+#endif
+ return res;
+}
+
+
+static ETHR_INLINE void
+enqueue(ethr_ts_event **queue,
+ ethr_ts_event *tse_start,
+ ethr_ts_event *tse_end)
+{
+ if (!*queue) {
+ *queue = tse_start;
+ tse_start->prev = tse_end;
+ tse_end->next = tse_start;
+ }
+ else {
+ tse_end->next = *queue;
+ tse_start->prev = (*queue)->prev;
+ (*queue)->prev->next = tse_start;
+ (*queue)->prev = tse_end;
+ }
+}
+
+static ETHR_INLINE void
+insert(ethr_ts_event *tse_pred, ethr_ts_event *tse)
+{
+ tse->next = tse_pred->next;
+ tse->prev = tse_pred;
+ tse_pred->next->prev = tse;
+ tse_pred->next = tse;
+}
+
+static ETHR_INLINE void
+dequeue(ethr_ts_event **queue,
+ ethr_ts_event *tse_start,
+ ethr_ts_event *tse_end)
+{
+ if (tse_start->prev == tse_end) {
+ ETHR_ASSERT(*queue == tse_start && tse_end->next == tse_start);
+ *queue = NULL;
+ }
+ else {
+ if (*queue == tse_start)
+ *queue = tse_end->next;
+ tse_end->next->prev = tse_start->prev;
+ tse_start->prev->next = tse_end->next;
+ }
+}
+
+static void
+event_wait(struct ethr_mutex_base_ *mtxb,
+ ethr_ts_event *tse,
+ int spincount,
+ long type,
+ int is_rwmtx,
+ int is_freq_read)
+{
+ int locked = 0;
+ long act;
+ int need_try_complete_runlock = 0;
+
+ /* Need to enqueue and wait... */
+
+ tse->uflgs = type;
+ ethr_atomic_set(&tse->uaflgs, type);
+
+ ETHR_MTX_Q_LOCK(&mtxb->qlck);
+ locked = 1;
+
+#ifdef ETHR_MTX_HARD_DEBUG_Q
+ hard_debug_chk_q__(mtxb, is_rwmtx);
+#endif
+
+ act = ethr_atomic_read(&mtxb->flgs);
+
+ if (act & type) {
+
+ /* Wait bit already there; enqueue... */
+
+ ETHR_ASSERT(mtxb->q);
+ if (type == ETHR_RWMTX_W_WAIT_FLG__) {
+ enqueue(&mtxb->q, tse, tse);
+#ifdef ETHR_MTX_HARD_DEBUG_WSQ
+ mtxb->ws++;
+#endif
+ }
+ else {
+ ethr_rwmutex *rwmtx = (ethr_rwmutex *) mtxb;
+ ETHR_ASSERT(is_rwmtx);
+ ETHR_ASSERT(rwmtx->rq_end);
+ insert(rwmtx->rq_end, tse);
+ rwmtx->rq_end = tse;
+ if (is_freq_read)
+ rwmutex_freqread_wtng_rdrs_inc(rwmtx, tse);
+ else
+ rwmtx->tdata.rs++;
+ }
+ }
+ else {
+
+ /* Set wait bit */
+
+ while (1) {
+ long new, exp = act;
+ int freqread_tryrlock = 0;
+ need_try_complete_runlock = 0;
+
+ if (type == ETHR_RWMTX_W_WAIT_FLG__) {
+ if (is_freq_read && act == ETHR_RWMTX_R_FLG__)
+ need_try_complete_runlock = 1;
+ if (act != 0)
+ new = act | ETHR_RWMTX_W_WAIT_FLG__;
+ else
+ new = ETHR_RWMTX_W_FLG__; /* Try to get it */
+ }
+ else {
+ ETHR_ASSERT(is_rwmtx);
+
+ if (!is_freq_read) {
+ if (act & (ETHR_RWMTX_W_FLG__| ETHR_RWMTX_W_WAIT_FLG__))
+ new = act | ETHR_RWMTX_R_WAIT_FLG__;
+ else
+ new = act + 1; /* Try to get it */
+ }
+ else {
+ if (act & ~ETHR_RWMTX_R_FLG__)
+ new = act | ETHR_RWMTX_R_WAIT_FLG__;
+ else { /* Try to get it */
+ ethr_rwmutex *rwmtx = (ethr_rwmutex *) mtxb;
+ rwmutex_freqread_rdrs_inc(rwmtx, tse);
+ ETHR_MEMORY_BARRIER;
+ new = act | ETHR_RWMTX_R_FLG__;
+ freqread_tryrlock = 1;
+ }
+ }
+ }
+
+ act = ethr_atomic_cmpxchg_acqb(&mtxb->flgs, new, exp);
+ if (exp == act) {
+ if (new & type) {
+ act = new;
+ break;
+ }
+ else {
+ /* Got it */
+ goto done;
+ }
+ }
+
+ if (freqread_tryrlock) {
+ ethr_rwmutex *rwmtx = (ethr_rwmutex *) mtxb;
+
+ /* We didn't set ETHR_RWMTX_R_FLG__, however someone
+ else might have */
+ if (act == ETHR_RWMTX_R_FLG__)
+ goto done; /* Got it by help from someone else */
+
+ ETHR_ASSERT((act & ETHR_RWMTX_WAIT_FLGS__) == 0);
+ /*
+ * We know that no waiter flags have been set, i.e.,
+ * we cannot get into a situation where we need to wake
+ * someone up here. Just restore the readers counter
+ * and do it over again...
+ */
+ rwmutex_freqread_rdrs_dec(rwmtx, tse);
+ }
+ }
+
+ /* Enqueue */
+
+ if (type == ETHR_RWMTX_R_WAIT_FLG__) {
+ ethr_rwmutex *rwmtx = (ethr_rwmutex *) mtxb;
+ ETHR_ASSERT(is_rwmtx);
+ ETHR_ASSERT(!rwmtx->rq_end);
+ rwmtx->rq_end = tse;
+ if (is_freq_read)
+ rwmutex_freqread_wtng_rdrs_inc(rwmtx, tse);
+ else
+ rwmtx->tdata.rs++;
+ }
+#ifdef ETHR_MTX_HARD_DEBUG_WSQ
+ else {
+ mtxb->ws++;
+ }
+#endif
+
+ enqueue(&mtxb->q, tse, tse);
+ }
+
+#ifdef ETHR_MTX_HARD_DEBUG_Q
+ hard_debug_chk_q__(mtxb, is_rwmtx);
+#endif
+
+ /* Wait */
+ locked = 0;
+ ETHR_MTX_Q_UNLOCK(&mtxb->qlck);
+
+ if (need_try_complete_runlock) {
+ ETHR_ASSERT(((ethr_rwmutex *) mtxb)->type
+ != ETHR_RWMUTEX_TYPE_NORMAL);
+ /*
+ * We were the only one in queue when we enqueued, and it
+ * was seemingly read locked. We need to try to complete a
+ * runlock otherwise we might be hanging forever. If the
+ * runlock could be completed we will be dequeued and
+ * woken by ourselves.
+ */
+ rwmutex_try_complete_runlock((ethr_rwmutex *) mtxb,
+ act, tse, 0, 1, 0);
+ }
+
+ while (1) {
+ ethr_event_reset(&tse->event);
+
+ act = ethr_atomic_read_acqb(&tse->uaflgs);
+ if (!act)
+ goto done; /* Got it */
+
+ ETHR_ASSERT(act == type);
+ ethr_event_swait(&tse->event, spincount);
+ /* swait result: 0 || EINTR */
+
+ act = ethr_atomic_read_acqb(&tse->uaflgs);
+ if (!act)
+ goto done; /* Got it */
+ }
+
+ done:
+ if (locked)
+ ETHR_MTX_Q_UNLOCK(&mtxb->qlck);
+}
+
+static void
+wake_writer(struct ethr_mutex_base_ *mtxb, int is_rwmtx)
+{
+ ethr_ts_event *tse;
+
+ tse = mtxb->q;
+ ETHR_ASSERT(tse);
+ dequeue(&mtxb->q, tse, tse);
+
+ ETHR_ASSERT(tse->uflgs == ETHR_RWMTX_W_WAIT_FLG__);
+ ETHR_ASSERT(ethr_atomic_read(&tse->uaflgs) == ETHR_RWMTX_W_WAIT_FLG__);
+#ifdef ETHR_MTX_HARD_DEBUG_WSQ
+ mtxb->ws--;
+#endif
+#if defined(ETHR_MTX_HARD_DEBUG_Q) || defined(ETHR_MTX_HARD_DEBUG_WSQ)
+ hard_debug_chk_q__(mtxb, is_rwmtx);
+#endif
+
+ ETHR_MTX_Q_UNLOCK(&mtxb->qlck);
+
+ ethr_atomic_set(&tse->uaflgs, 0);
+ ethr_event_set(&tse->event);
+}
+
+static ETHR_INLINE int
+initial_spincount(struct ethr_mutex_base_ *mtxb)
+{
+ return (mtxb->aux_scnt < ETHR_MTX_MAX_FLGS_SPIN
+ ? mtxb->aux_scnt
+ : ETHR_MTX_MAX_FLGS_SPIN);
+}
+
+static ETHR_INLINE int
+update_spincount(struct ethr_mutex_base_ *mtxb,
+ ethr_ts_event *tse,
+ int *start_scnt,
+ int *scnt)
+{
+ int sscnt = *start_scnt;
+ if (sscnt < 0) {
+ *scnt = ((tse->iflgs & ETHR_TS_EV_MAIN_THR)
+ ? mtxb->main_scnt
+ : mtxb->aux_scnt);
+ *scnt -= ETHR_MTX_MAX_FLGS_SPIN;
+ }
+ else {
+ *scnt = ((tse->iflgs & ETHR_TS_EV_MAIN_THR)
+ ? mtxb->main_scnt
+ : mtxb->aux_scnt);
+ *scnt -= sscnt;
+ if (*scnt > 0 && sscnt < ETHR_MTX_MAX_FLGS_SPIN) {
+ *scnt = ETHR_MTX_MAX_FLGS_SPIN - sscnt;
+ *start_scnt = -1;
+ return 0;
+ }
+ }
+ return 1;
+}
+
+int check_readers_array(ethr_rwmutex *rwmtx,
+ int start_rix,
+ int length,
+ int pre_check);
+
+static ETHR_INLINE void
+write_lock_wait(struct ethr_mutex_base_ *mtxb,
+ long initial,
+ int is_rwmtx,
+ int is_freq_read)
+{
+ long act = initial;
+ int scnt, start_scnt;
+ ethr_ts_event *tse = NULL;
+ int until_yield = ETHR_YIELD_AFTER_BUSY_LOOPS;
+ int res;
+ int freq_read_size = -1;
+ int freq_read_start_ix = -1;
+
+ ETHR_ASSERT(!is_freq_read || is_rwmtx);
+
+ start_scnt = scnt = initial_spincount(mtxb);
+
+ /*
+ * Spin trying to write lock for a while. If unsuccessful,
+ * wait on event.
+ */
+
+ while (1) {
+ long exp;
+
+ while (act != 0) {
+
+ if (is_freq_read && act == ETHR_RWMTX_R_FLG__) {
+ ethr_rwmutex *rwmtx = (ethr_rwmutex *) mtxb;
+ if (!tse)
+ tse = ethr_get_ts_event();
+ if (freq_read_size < 0) {
+ if (rwmtx->type == ETHR_RWMUTEX_TYPE_FREQUENT_READ) {
+ freq_read_size = reader_groups_array_size;
+ freq_read_start_ix = tse->rgix;
+ }
+ else {
+ freq_read_size = main_threads_array_size;
+ freq_read_start_ix = tse->mtix;
+ }
+ }
+ res = check_readers_array(rwmtx,
+ freq_read_start_ix,
+ freq_read_size,
+ 1);
+ scnt--;
+ if (res == 0) {
+ act = ethr_atomic_read(&mtxb->flgs);
+ if (act & ETHR_RWMTX_R_MASK__) {
+ res = rwmutex_try_complete_runlock(rwmtx, act,
+ tse, 0, 0,
+ 1);
+ if (res != EBUSY)
+ goto done; /* Got it */
+ }
+ if (scnt <= 0)
+ goto chk_spin;
+ if (--until_yield == 0) {
+ until_yield = ETHR_YIELD_AFTER_BUSY_LOOPS;
+ ETHR_YIELD();
+ }
+ continue;
+ }
+ }
+
+ if (scnt <= 0) {
+ chk_spin:
+ scnt = 0;
+
+ if (!tse)
+ tse = ethr_get_ts_event();
+ if (update_spincount(mtxb, tse, &start_scnt, &scnt)) {
+ event_wait(mtxb, tse, scnt, ETHR_RWMTX_W_WAIT_FLG__,
+ is_rwmtx, is_freq_read);
+ goto done;
+ }
+ }
+ ETHR_SPIN_BODY;
+ if (--until_yield == 0) {
+ until_yield = ETHR_YIELD_AFTER_BUSY_LOOPS;
+ ETHR_YIELD();
+ }
+ act = ethr_atomic_read(&mtxb->flgs);
+ scnt--;
+ }
+ ETHR_ASSERT(scnt >= 0);
+
+ exp = act;
+
+ act = ethr_atomic_cmpxchg_acqb(&mtxb->flgs,
+ ETHR_RWMTX_W_FLG__,
+ exp);
+ if (act == 0)
+ goto done; /* Got it */
+ }
+
+ done:
+ if (tse)
+ ethr_leave_ts_event(tse);
+}
+
+static int
+mtxb_init(struct ethr_mutex_base_ *mtxb,
+ int def_main_scnt,
+ int main_scnt,
+ int def_aux_scnt,
+ int aux_scnt)
+{
+ ETHR_MTX_HARD_DEBUG_LFS_INIT(mtxb);
+#ifdef ETHR_MTX_HARD_DEBUG_WSQ
+ mtxb->ws = 0;
+#endif
+ if (no_spin) {
+ mtxb->main_scnt = 0;
+ mtxb->aux_scnt = 0;
+ }
+ else {
+ if (main_scnt > SHRT_MAX)
+ mtxb->main_scnt = SHRT_MAX;
+ else if (main_scnt < 0)
+ mtxb->main_scnt = def_main_scnt;
+ else
+ mtxb->main_scnt = (short) main_scnt;
+ if (aux_scnt > SHRT_MAX)
+ mtxb->aux_scnt = SHRT_MAX;
+ else if (aux_scnt < 0)
+ mtxb->aux_scnt = def_aux_scnt;
+ else
+ mtxb->aux_scnt = (short) aux_scnt;
+ if (mtxb->main_scnt < mtxb->aux_scnt)
+ mtxb->main_scnt = mtxb->aux_scnt;
+
+ }
+ mtxb->q = NULL;
+ ethr_atomic_init(&mtxb->flgs, 0);
+ return ETHR_MTX_QLOCK_INIT(&mtxb->qlck);
+}
+
+static int
+mtxb_destroy(struct ethr_mutex_base_ *mtxb)
+{
+ long act;
+ ETHR_MTX_Q_LOCK(&mtxb->qlck);
+ act = ethr_atomic_read(&mtxb->flgs);
+ ETHR_MTX_Q_UNLOCK(&mtxb->qlck);
+ if (act != 0)
+ return EINVAL;
+ return ETHR_MTX_QLOCK_DESTROY(&mtxb->qlck);
+}
+
+
+#endif /* ETHR_USE_OWN_RWMTX_IMPL__ || ETHR_USE_OWN_MTX_IMPL__ */
+
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *\
+ * Mutex and condition variable implementation *
+\* */
+
+#ifdef ETHR_USE_OWN_MTX_IMPL__
+
+/* -- Mutex ---------------------------------------------------------------- */
+
+int
+ethr_mutex_init_opt(ethr_mutex *mtx, ethr_mutex_opt *opt)
+{
+ int res;
+#if ETHR_XCHK
+ if (!mtx) {
+ ETHR_ASSERT(0);
+ return EINVAL;
+ }
+ mtx->initialized = ETHR_MUTEX_INITIALIZED;
+#endif
+ ETHR_MTX_HARD_DEBUG_FENCE_INIT(mtx);
+ res = mtxb_init(&mtx->mtxb,
+ default_mtx_main_spincount,
+ opt ? opt->main_spincount : -1,
+ default_mtx_aux_spincount,
+ opt ? opt->aux_spincount : -1);
+#if ETHR_XCHK
+ if (res != 0)
+ mtx->initialized = 0;
+#endif
+ return res;
+}
+
+int
+ethr_mutex_init(ethr_mutex *mtx)
+{
+ return ethr_mutex_init_opt(mtx, NULL);
+}
+
+int
+ethr_mutex_destroy(ethr_mutex *mtx)
+{
+#if ETHR_XCHK
+ if (ethr_not_inited__) {
+ ETHR_ASSERT(0);
+ return EACCES;
+ }
+ if (!mtx) {
+ ETHR_ASSERT(0);
+ return EINVAL;
+ }
+ mtx->initialized = 0;
+#endif
+ return mtxb_destroy(&mtx->mtxb);
+}
+
+void
+ethr_mutex_lock_wait__(ethr_mutex *mtx, long initial)
+{
+ write_lock_wait(&mtx->mtxb, initial, 0, 0);
+}
+
+void
+ethr_mutex_unlock_wake__(ethr_mutex *mtx, long initial)
+{
+ ethr_ts_event *tse;
+
+ ETHR_MTX_Q_LOCK(&mtx->mtxb.qlck);
+ tse = mtx->mtxb.q;
+
+ ETHR_ASSERT(tse);
+ ETHR_ASSERT(ethr_atomic_read(&mtx->mtxb.flgs)
+ == (ETHR_RWMTX_W_FLG__|ETHR_RWMTX_W_WAIT_FLG__));
+ ETHR_ASSERT(initial & ETHR_RWMTX_W_WAIT_FLG__);
+ ETHR_MTX_HARD_DEBUG_CHK_Q(mtx);
+
+ /*
+ * If we have multiple waiters, there is no need to modify
+ * mtxb->flgs; otherwise, we need to clear the write wait bit...
+ */
+ if (tse->next == mtx->mtxb.q)
+ ethr_atomic_set(&mtx->mtxb.flgs, ETHR_RWMTX_W_FLG__);
+
+ wake_writer(&mtx->mtxb, 0);
+}
+
+/* -- Condition variables -------------------------------------------------- */
+
+static void
+enqueue_mtx(ethr_mutex *mtx, ethr_ts_event *tse_start, ethr_ts_event *tse_end)
+{
+ long act;
+
+ /*
+ * `ethr_cond_signal()' and `ethr_cond_broadcast()' end up here. If `mtx'
+ * is not currently locked by current thread, we almost certainly have a
+ * hard to debug race condition. There might however be some (strange)
+ * use for it. POSIX also allow a call to `pthread_cond_signal' or
+ * `pthread_cond_broadcast' even though the the associated mutex isn't
+ * locked by the caller. Therefore, we also allow this kind of strange
+ * usage, but optimize for the case where the mutex is locked by the
+ * calling thread.
+ */
+
+ ETHR_MTX_Q_LOCK(&mtx->mtxb.qlck);
+
+ ETHR_MTX_HARD_DEBUG_CHK_Q(mtx);
+
+#ifdef ETHR_MTX_HARD_DEBUG_WSQ
+ {
+ int dbg_nws__ = 0;
+ ethr_ts_event *dbg_tse__;
+ for (dbg_tse__ = tse_start;
+ dbg_tse__ != tse_end;
+ dbg_tse__ = dbg_tse__->next)
+ dbg_nws__++;
+ mtx->mtxb.ws += dbg_nws__ + 1;
+ }
+#endif
+
+ act = ethr_atomic_read(&mtx->mtxb.flgs);
+ ETHR_ASSERT(act == 0
+ || act == ETHR_RWMTX_W_FLG__
+ || act == (ETHR_RWMTX_W_FLG__|ETHR_RWMTX_W_WAIT_FLG__));
+ if (act & ETHR_RWMTX_W_FLG__) {
+ /* The normal sane case */
+ if (!(act & ETHR_RWMTX_W_WAIT_FLG__)) {
+ ETHR_ASSERT(!mtx->mtxb.q);
+ act = ethr_atomic_cmpxchg(&mtx->mtxb.flgs,
+ (ETHR_RWMTX_W_FLG__
+ | ETHR_RWMTX_W_WAIT_FLG__),
+ ETHR_RWMTX_W_FLG__);
+ if (act != ETHR_RWMTX_W_FLG__) {
+ /*
+ * Sigh... this wasn't so sane after all since, the mutex was
+ * obviously not locked by the current thread....
+ */
+ ETHR_ASSERT(act == 0);
+ goto mtx_unlocked;
+ }
+ }
+
+#ifdef ETHR_DEBUG
+ if (act & ETHR_RWMTX_W_WAIT_FLG__)
+ ETHR_ASSERT(mtx->mtxb.q);
+ else
+ ETHR_ASSERT(!mtx->mtxb.q);
+#endif
+
+ enqueue(&mtx->mtxb.q, tse_start, tse_end);
+
+ ETHR_MTX_HARD_DEBUG_CHK_Q(mtx);
+ ETHR_MTX_Q_UNLOCK(&mtx->mtxb.qlck);
+
+ }
+ else {
+ int multi;
+ mtx_unlocked:
+ /* Sigh... mutex isn't locked... */
+
+ multi = tse_start != tse_end;
+
+ while (1) {
+ long new, exp = act;
+
+ if (multi || (act & ETHR_RWMTX_W_FLG__))
+ new = ETHR_RWMTX_W_FLG__|ETHR_RWMTX_W_WAIT_FLG__;
+ else
+ new = ETHR_RWMTX_W_FLG__;
+
+ act = ethr_atomic_cmpxchg(&mtx->mtxb.flgs, new, exp);
+ if (exp == act) {
+ ETHR_ASSERT(!mtx->mtxb.q);
+ if (act & ETHR_RWMTX_W_FLG__) {
+ enqueue(&mtx->mtxb.q, tse_start, tse_end);
+
+ ETHR_MTX_HARD_DEBUG_CHK_Q(mtx);
+ ETHR_MTX_Q_UNLOCK(&mtx->mtxb.qlck);
+
+ }
+ else {
+ ETHR_ASSERT(!mtx->mtxb.q);
+ /*
+ * Acquired the mutex on behalf of the
+ * first thread in the queue; wake
+ * it and enqueue the rest...
+ */
+#ifdef ETHR_MTX_HARD_DEBUG_WSQ
+ mtx->mtxb.ws--;
+#endif
+ if (multi) {
+ enqueue(&mtx->mtxb.q, tse_start->next, tse_end);
+ ETHR_ASSERT(mtx->mtxb.q);
+ }
+
+ ETHR_MTX_HARD_DEBUG_CHK_Q(mtx);
+ ETHR_MTX_Q_UNLOCK(&mtx->mtxb.qlck);
+
+ ethr_atomic_set(&tse_start->uaflgs, 0);
+ ethr_event_set(&tse_start->event);
+ }
+ break;
+ }
+ }
+ }
+}
+
+int
+ethr_cond_init_opt(ethr_cond *cnd, ethr_cond_opt *opt)
+{
+ int res;
+#if ETHR_XCHK
+ if (!cnd) {
+ ETHR_ASSERT(0);
+ return EINVAL;
+ }
+ cnd->initialized = ETHR_COND_INITIALIZED;
+#endif
+ ETHR_MTX_HARD_DEBUG_FENCE_INIT(cnd);
+ cnd->q = NULL;
+ if (no_spin) {
+ cnd->main_scnt = 0;
+ cnd->aux_scnt = 0;
+ }
+ else {
+ if (!opt || opt->main_spincount < 0)
+ cnd->main_scnt = default_cnd_main_spincount;
+ else if (opt->main_spincount > SHRT_MAX)
+ cnd->main_scnt = SHRT_MAX;
+ else
+ cnd->main_scnt = (short) opt->main_spincount;
+ if (!opt || opt->aux_spincount < 0)
+ cnd->aux_scnt = default_cnd_aux_spincount;
+ else if (opt->aux_spincount > SHRT_MAX)
+ cnd->aux_scnt = SHRT_MAX;
+ else
+ cnd->aux_scnt = (short) opt->aux_spincount;
+ if (cnd->main_scnt < cnd->aux_scnt)
+ cnd->main_scnt = cnd->aux_scnt;
+ }
+ ETHR_MTX_QLOCK_INIT(&cnd->qlck);
+ return 0;
+}
+
+int
+ethr_cond_init(ethr_cond *cnd)
+{
+ return ethr_cond_init_opt(cnd, NULL);
+}
+
+int
+ethr_cond_destroy(ethr_cond *cnd)
+{
+#if ETHR_XCHK
+ if (ethr_not_inited__) {
+ ETHR_ASSERT(0);
+ return EACCES;
+ }
+ if (!cnd || cnd->initialized != ETHR_COND_INITIALIZED) {
+ ETHR_ASSERT(0);
+ return EINVAL;
+ }
+ cnd->initialized = 0;
+#endif
+ return ETHR_MTX_QLOCK_DESTROY(&cnd->qlck);
+}
+
+void
+ethr_cond_signal(ethr_cond *cnd)
+{
+ ethr_ts_event *tse;
+
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(!cnd);
+ ETHR_ASSERT(cnd->initialized == ETHR_COND_INITIALIZED);
+ ETHR_MTX_HARD_DEBUG_FENCE_CHK(cnd);
+
+ ETHR_MTX_Q_LOCK(&cnd->qlck);
+
+ tse = cnd->q;
+
+ if (!tse) {
+ ETHR_MTX_HARD_DEBUG_FENCE_CHK(cnd);
+ ETHR_MTX_Q_UNLOCK(&cnd->qlck);
+ }
+ else {
+ ethr_mutex *mtx = (ethr_mutex *) tse->udata;
+
+ ETHR_MTX_HARD_DEBUG_FENCE_CHK(mtx);
+ ETHR_ASSERT(tse->uflgs == ETHR_RWMTX_W_WAIT_FLG__);
+ ETHR_ASSERT(ethr_atomic_read(&tse->uaflgs) == ETHR_CND_WAIT_FLG__);
+
+ ethr_atomic_set(&tse->uaflgs, ETHR_RWMTX_W_WAIT_FLG__);
+
+ dequeue(&cnd->q, tse, tse);
+
+ ETHR_MTX_Q_UNLOCK(&cnd->qlck);
+
+ tse->next = tse->prev = NULL;
+
+ enqueue_mtx(mtx, tse, tse);
+
+ ETHR_MTX_HARD_DEBUG_FENCE_CHK(cnd);
+ ETHR_MTX_HARD_DEBUG_FENCE_CHK(mtx);
+ }
+}
+
+void
+ethr_cond_broadcast(ethr_cond *cnd)
+{
+ int res;
+ int got_all;
+ ethr_ts_event *tse;
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(!cnd);
+ ETHR_ASSERT(cnd->initialized == ETHR_COND_INITIALIZED);
+
+ ETHR_MTX_HARD_DEBUG_FENCE_CHK(cnd);
+ do {
+ got_all = 1;
+
+ ETHR_MTX_Q_LOCK(&cnd->qlck);
+
+ tse = cnd->q;
+
+ if (!tse) {
+ ETHR_MTX_HARD_DEBUG_FENCE_CHK(cnd);
+ ETHR_MTX_Q_UNLOCK(&cnd->qlck);
+ }
+ else {
+ ethr_mutex *mtx = (ethr_mutex *) tse->udata;
+ ethr_ts_event *tse_tmp, *tse_end;
+
+ ETHR_MTX_HARD_DEBUG_FENCE_CHK(mtx);
+ tse_end = cnd->q->prev;
+
+ tse_tmp = tse;
+
+ do {
+
+ if (mtx == (ethr_mutex *) tse_tmp->udata) {
+ /* The normal case */
+
+ ETHR_ASSERT(tse_tmp->uflgs == ETHR_RWMTX_W_WAIT_FLG__);
+ ETHR_ASSERT(ethr_atomic_read(&tse_tmp->uaflgs)
+ == ETHR_CND_WAIT_FLG__);
+
+ ethr_atomic_set(&tse_tmp->uaflgs, ETHR_RWMTX_W_WAIT_FLG__);
+ }
+ else {
+ /* Should be very unusual */
+ ETHR_MTX_HARD_DEBUG_FENCE_CHK(mtx);
+ tse_end = tse_tmp->prev;
+ got_all = 0;
+ break;
+ }
+
+ tse_tmp = tse_tmp->next;
+
+ } while (tse_tmp != cnd->q);
+
+ dequeue(&cnd->q, tse, tse_end);
+
+ ETHR_MTX_Q_UNLOCK(&cnd->qlck);
+
+ enqueue_mtx(mtx, tse, tse_end);
+ }
+
+ } while (!got_all);
+
+ ETHR_MTX_HARD_DEBUG_FENCE_CHK(cnd);
+}
+
+int
+ethr_cond_wait(ethr_cond *cnd, ethr_mutex *mtx)
+{
+ int woken;
+ int scnt;
+ int res;
+ void *udata = NULL;
+ ethr_ts_event *tse;
+
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(!cnd);
+ ETHR_ASSERT(cnd->initialized == ETHR_COND_INITIALIZED);
+ ETHR_ASSERT(!mtx);
+ ETHR_ASSERT(mtx->initialized == ETHR_MUTEX_INITIALIZED);
+
+ tse = ethr_get_ts_event();
+
+ scnt = ((tse->iflgs & ETHR_TS_EV_MAIN_THR)
+ ? cnd->main_scnt
+ : cnd->aux_scnt);
+
+ ETHR_MTX_HARD_DEBUG_FENCE_CHK(cnd);
+ ETHR_MTX_HARD_DEBUG_FENCE_CHK(mtx);
+
+ udata = tse->udata; /* Got to restore udata before returning */
+ tse->udata = (void *) mtx;
+
+ tse->uflgs = ETHR_RWMTX_W_WAIT_FLG__; /* Prep for mutex lock op */
+ ethr_atomic_set(&tse->uaflgs, ETHR_CND_WAIT_FLG__);
+
+ ETHR_MTX_Q_LOCK(&cnd->qlck);
+
+ enqueue(&cnd->q, tse, tse);
+
+ ETHR_MTX_Q_UNLOCK(&cnd->qlck);
+
+ ethr_mutex_unlock(mtx);
+
+ /* Wait */
+ woken = 0;
+ while (1) {
+ long act;
+
+ ethr_event_reset(&tse->event);
+
+ act = ethr_atomic_read_acqb(&tse->uaflgs);
+ if (!act)
+ break; /* Mtx locked */
+
+ /* First time, got EINTR, or spurious wakeup... */
+
+ ETHR_ASSERT(act == ETHR_CND_WAIT_FLG__
+ || act == ETHR_RWMTX_W_WAIT_FLG__);
+
+ if (woken) {
+ /*
+ * If act == ETHR_RWMTX_W_WAIT_FLG__, we have already been enqueued
+ * on the mutex; continue wait until locked...
+ */
+ if (act == ETHR_CND_WAIT_FLG__) {
+ ETHR_MTX_Q_LOCK(&cnd->qlck);
+ act = ethr_atomic_read(&tse->uaflgs);
+ ETHR_ASSERT(act == ETHR_CND_WAIT_FLG__
+ || act == ETHR_RWMTX_W_WAIT_FLG__);
+ /*
+ * If act == ETHR_RWMTX_W_WAIT_FLG__, we have already
+ * enqueued on the mutex; continue wait until locked...
+ */
+ if (act == ETHR_CND_WAIT_FLG__)
+ dequeue(&cnd->q, tse, tse);
+
+ ETHR_MTX_Q_UNLOCK(&cnd->qlck);
+
+ if (act == ETHR_CND_WAIT_FLG__) {
+ tse->udata = udata;
+ ethr_leave_ts_event(tse);
+ ETHR_MTX_HARD_DEBUG_FENCE_CHK(cnd);
+ ethr_mutex_lock(mtx);
+ return EINTR;
+ }
+ }
+ ETHR_ASSERT(act == ETHR_RWMTX_W_WAIT_FLG__);
+ }
+ ethr_event_swait(&tse->event, scnt);
+ /* swait result: 0 || EINTR */
+ woken = 1;
+ }
+
+ ETHR_MTX_HARD_DEBUG_LFS_RWLOCK(&mtx->mtxb);
+ ETHR_MTX_HARD_DEBUG_FENCE_CHK(cnd);
+ ETHR_MTX_HARD_DEBUG_FENCE_CHK(mtx);
+
+ tse->udata = udata;
+ ethr_leave_ts_event(tse);
+ return 0;
+}
+
+#else
+/* -- pthread mutex and condition variables -------------------------------- */
+
+int
+ethr_mutex_init(ethr_mutex *mtx)
+{
+#if ETHR_XCHK
+ if (!mtx) {
+ ETHR_ASSERT(0);
+ return EINVAL;
+ }
+ mtx->initialized = ETHR_MUTEX_INITIALIZED;
+#endif
+ return pthread_mutex_init(&mtx->pt_mtx, NULL);
+}
+
+int
+ethr_mutex_destroy(ethr_mutex *mtx)
+{
+#if ETHR_XCHK
+ if (ethr_not_inited__) {
+ ETHR_ASSERT(0);
+ return EACCES;
+ }
+ if (!mtx || mtx->initialized != ETHR_MUTEX_INITIALIZED) {
+ ETHR_ASSERT(0);
+ return EINVAL;
+ }
+#endif
+#if ETHR_XCHK
+ mtx->initialized = 0;
+#endif
+ return pthread_mutex_destroy(&mtx->pt_mtx);
+}
+
+int
+ethr_cond_init(ethr_cond *cnd)
+{
+#if ETHR_XCHK
+ if (!cnd) {
+ ETHR_ASSERT(0);
+ return EINVAL;
+ }
+ cnd->initialized = ETHR_COND_INITIALIZED;
+#endif
+ return pthread_cond_init(&cnd->pt_cnd, NULL);
+}
+
+int
+ethr_cond_destroy(ethr_cond *cnd)
+{
+#if ETHR_XCHK
+ if (ethr_not_inited__) {
+ ETHR_ASSERT(0);
+ return EACCES;
+ }
+ if (!cnd || cnd->initialized != ETHR_COND_INITIALIZED) {
+ ETHR_ASSERT(0);
+ return EINVAL;
+ }
+ cnd->initialized = 0;
+#endif
+ return pthread_cond_destroy(&cnd->pt_cnd);
+}
+
+void
+ethr_cond_signal(ethr_cond *cnd)
+{
+ int res;
+
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(cnd);
+ ETHR_ASSERT(cnd->initialized == ETHR_COND_INITIALIZED);
+
+ res = pthread_cond_signal(&cnd->pt_cnd);
+ if (res != 0)
+ ETHR_FATAL_ERROR__(res);
+}
+
+void
+ethr_cond_broadcast(ethr_cond *cnd)
+{
+ int res;
+
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(cnd);
+ ETHR_ASSERT(cnd->initialized == ETHR_COND_INITIALIZED);
+
+ res = pthread_cond_broadcast(&cnd->pt_cnd);
+ if (res != 0)
+ ETHR_FATAL_ERROR__(res);
+}
+
+int
+ethr_cond_wait(ethr_cond *cnd, ethr_mutex *mtx)
+{
+ int res;
+
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(cnd);
+ ETHR_ASSERT(cnd->initialized == ETHR_COND_INITIALIZED);
+ ETHR_ASSERT(mtx);
+ ETHR_ASSERT(mtx->initialized == ETHR_MUTEX_INITIALIZED);
+
+ res = pthread_cond_wait(&cnd->pt_cnd, &mtx->pt_mtx);
+ if (res != 0 && res != EINTR)
+ ETHR_FATAL_ERROR__(res);
+ return res;
+}
+
+#endif /* pthread_mutex */
+
+/* -- Exported symbols of inline functions --------------------------------- */
+
+int
+ethr_mutex_trylock(ethr_mutex *mtx)
+{
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(mtx);
+ ETHR_ASSERT(mtx->initialized == ETHR_MUTEX_INITIALIZED);
+
+ return ethr_mutex_trylock__(mtx);
+}
+
+void
+ethr_mutex_lock(ethr_mutex *mtx)
+{
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(mtx);
+ ETHR_ASSERT(mtx->initialized == ETHR_MUTEX_INITIALIZED);
+
+ ethr_mutex_lock__(mtx);
+}
+
+void
+ethr_mutex_unlock(ethr_mutex *mtx)
+{
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(mtx);
+ ETHR_ASSERT(mtx->initialized == ETHR_MUTEX_INITIALIZED);
+
+ ethr_mutex_unlock__(mtx);
+}
+
+
+#ifdef ETHR_USE_OWN_RWMTX_IMPL__
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *\
+ * Read/Write Mutex *
+\* */
+
+static void
+wake_readers(ethr_rwmutex *rwmtx, int rs)
+{
+ ethr_ts_event *tse;
+#ifdef ETHR_DEBUG
+ int drs = 0;
+#endif
+
+ tse = rwmtx->mtxb.q;
+ ETHR_ASSERT(tse);
+ ETHR_ASSERT(rwmtx->rq_end);
+ dequeue(&rwmtx->mtxb.q, tse, rwmtx->rq_end);
+ rwmtx->rq_end->next = NULL;
+ rwmtx->rq_end = NULL;
+
+ ETHR_ASSERT(!rwmtx->mtxb.q
+ || (ethr_atomic_read(&rwmtx->mtxb.q->uaflgs)
+ == ETHR_RWMTX_W_WAIT_FLG__));
+
+ ETHR_RWMTX_HARD_DEBUG_CHK_Q(rwmtx);
+ ETHR_MTX_Q_UNLOCK(&rwmtx->mtxb.qlck);
+
+ while (tse) {
+ ethr_ts_event *tse_next;
+
+#ifdef ETHR_DEBUG
+ ETHR_ASSERT(tse->uflgs == ETHR_RWMTX_R_WAIT_FLG__);
+ ETHR_ASSERT(ethr_atomic_read(&tse->uaflgs)
+ == ETHR_RWMTX_R_WAIT_FLG__);
+ drs++;
+#endif
+
+ tse_next = tse->next; /* we aren't allowed to read tse->next
+ after we have reset uaflgs */
+
+ ethr_atomic_set(&tse->uaflgs, 0);
+ ethr_event_set(&tse->event);
+ tse = tse_next;
+ }
+
+ ETHR_ASSERT(rs == drs);
+}
+
+static ETHR_INLINE int
+is_w_waiter(ethr_ts_event *tse)
+{
+ ETHR_ASSERT(tse->uflgs == ETHR_RWMTX_W_WAIT_FLG__
+ || tse->uflgs == ETHR_RWMTX_R_WAIT_FLG__);
+ return tse->uflgs == ETHR_RWMTX_W_WAIT_FLG__;
+}
+
+static ETHR_INLINE int
+multiple_w_waiters(ethr_rwmutex *rwmtx)
+{
+ ETHR_ASSERT(rwmtx->mtxb.q);
+ ETHR_ASSERT(rwmtx->mtxb.q->uflgs == ETHR_RWMTX_W_WAIT_FLG__);
+
+ if (!rwmtx->rq_end)
+ return rwmtx->mtxb.q->next != rwmtx->mtxb.q;
+ else {
+ ETHR_ASSERT(rwmtx->mtxb.q->next != rwmtx->mtxb.q);
+ if (rwmtx->mtxb.q->next->uflgs == ETHR_RWMTX_W_WAIT_FLG__)
+ return 1;
+ ETHR_ASSERT(rwmtx->rq_end->next == rwmtx->mtxb.q
+ || rwmtx->rq_end->next->uflgs == ETHR_RWMTX_W_WAIT_FLG__);
+ return rwmtx->rq_end->next != rwmtx->mtxb.q;
+ }
+}
+
+int check_readers_array(ethr_rwmutex *rwmtx,
+ int start_rix,
+ int length,
+ int pre_check)
+{
+ int ix = start_rix;
+
+#ifndef ETHR_READ_MEMORY_BARRIER_IS_FULL
+ if (pre_check)
+ ETHR_READ_MEMORY_BARRIER;
+ else
+#endif
+ ETHR_MEMORY_BARRIER;
+
+ do {
+ long act = rwmutex_freqread_rdrs_read(rwmtx, ix);
+ if (act != 0)
+ return EBUSY;
+ ix++;
+ if (ix == length)
+ ix = 0;
+ } while (ix != start_rix);
+
+ return 0;
+}
+
+static ETHR_INLINE void
+rwmutex_freqread_restore_failed_tryrlock(ethr_rwmutex *rwmtx,
+ ethr_ts_event *tse)
+{
+ long act;
+ /*
+ * Restore failed increment
+ */
+ act = rwmutex_freqread_rdrs_dec_read(rwmtx, tse);
+
+ ETHR_WRITE_MEMORY_BARRIER;
+
+ if (act == 0) {
+
+#ifndef ETHR_WRITE_MEMORY_BARRIER_IS_FULL
+ ETHR_READ_MEMORY_BARRIER;
+#endif
+
+ act = ethr_atomic_read(&rwmtx->mtxb.flgs);
+
+ if ((act & ETHR_RWMTX_W_FLG__) == 0
+ && act & (ETHR_RWMTX_WAIT_FLGS__|ETHR_RWMTX_R_PEND_UNLCK_MASK__)) {
+ /*
+ * We either got waiters, or someone else trying
+ * to read unlock which we might have to help.
+ */
+ rwmutex_try_complete_runlock(rwmtx, act, tse, 1, 1, 0);
+ }
+ }
+}
+
+static int
+rwmutex_try_complete_runlock(ethr_rwmutex *rwmtx,
+ long initial,
+ ethr_ts_event *tse,
+ int start_next_ix,
+ int check_before_try,
+ int try_write_lock)
+{
+ ethr_ts_event *tse_tmp;
+ long act = initial;
+ int six, res, length;
+
+ tse_tmp = tse;
+ if (!tse_tmp)
+ tse_tmp = ethr_get_ts_event();
+
+ if ((act & ETHR_RWMTX_WAIT_FLGS__)
+ && (act & ~ETHR_RWMTX_WAIT_FLGS__) == 0)
+ goto check_waiters;
+
+ if (rwmtx->type == ETHR_RWMUTEX_TYPE_FREQUENT_READ) {
+ length = reader_groups_array_size;
+ six = tse_tmp->rgix;
+ }
+ else {
+ length = main_threads_array_size;
+ six = tse_tmp->mtix;
+ }
+ if (start_next_ix) {
+ six++;
+ if (six >= length)
+ six = 0;
+ }
+
+ if (!tse)
+ ethr_leave_ts_event(tse_tmp);
+
+ if (check_before_try) {
+ res = check_readers_array(rwmtx, six, length, 1);
+ if (res == EBUSY)
+ return try_write_lock ? EBUSY : 0;
+ }
+
+ while (1) {
+ long exp = act;
+ long new = act+1;
+
+ ETHR_ASSERT((act & ETHR_RWMTX_R_PEND_UNLCK_MASK__)
+ < ETHR_RWMTX_R_PEND_UNLCK_MASK__);
+
+ act = ethr_atomic_cmpxchg(&rwmtx->mtxb.flgs, new, exp);
+ if (exp == act) {
+ act = new;
+ break;
+ }
+ if (!try_write_lock) {
+ if (act == ETHR_RWMTX_W_FLG__ || act == 0)
+ return 0;
+ if ((act & ETHR_RWMTX_WAIT_FLGS__) == 0) {
+ if ((act & ETHR_RWMTX_R_FLG__) == 0)
+ return 0;
+ }
+ else if ((act & ETHR_RWMTX_R_FLG__) == 0) {
+ if (act & ETHR_RWMTX_R_PEND_UNLCK_MASK__)
+ return 0;
+ goto check_waiters;
+ }
+ }
+ else {
+ if (act == 0)
+ goto tryrwlock;
+ if (act & (ETHR_RWMTX_W_FLG__|ETHR_RWMTX_WAIT_FLGS__))
+ return EBUSY;
+ }
+ }
+
+ res = check_readers_array(rwmtx, six, length, 0);
+ if (res == EBUSY) {
+ act = ethr_atomic_dec_read(&rwmtx->mtxb.flgs);
+ if (act & ETHR_RWMTX_R_MASK__)
+ return try_write_lock ? EBUSY : 0;
+ }
+ else {
+ while (1) {
+ long exp = act;
+ long new = act;
+ new &= ~ETHR_RWMTX_R_FLG__;
+ new--;
+
+ ETHR_ASSERT(act & ETHR_RWMTX_R_PEND_UNLCK_MASK__);
+
+ act = ethr_atomic_cmpxchg(&rwmtx->mtxb.flgs, new, exp);
+ if (exp == act) {
+ if (new & ETHR_RWMTX_R_PEND_UNLCK_MASK__)
+ return try_write_lock ? EBUSY : 0;
+ act = new;
+ break;
+ }
+ }
+ }
+
+ /*
+ * Read unlock completed, but we have to check if
+ * threads have to be woken (or if we should try
+ * to write lock it).
+ */
+
+ if (act & ETHR_RWMTX_W_FLG__)
+ return try_write_lock ? EBUSY : 0;
+
+ if (act & ETHR_RWMTX_WAIT_FLGS__) {
+ check_waiters:
+ rwmutex_unlock_wake(rwmtx, 0, act);
+ return try_write_lock ? EBUSY : 0;
+ }
+
+ if (!try_write_lock)
+ return 0;
+
+ tryrwlock:
+ /* Try to write lock it */
+
+ act = ethr_atomic_cmpxchg_acqb(&rwmtx->mtxb.flgs,
+ ETHR_RWMTX_W_FLG__,
+ 0);
+ return act == 0 ? 0 : EBUSY;
+}
+
+#ifdef ETHR_RLOCK_WITH_INC_DEC
+
+static ETHR_INLINE void
+rwmutex_incdec_restore_failed_tryrlock(ethr_rwmutex *rwmtx)
+{
+ long act;
+ /*
+ * Restore failed increment
+ */
+ act = ethr_atomic_dec_read(&rwmtx->mtxb.flgs);
+ if ((act & ETHR_RWMTX_WAIT_FLGS__)
+ && (act & ~ETHR_RWMTX_WAIT_FLGS__) == 0) {
+ rwmutex_unlock_wake(rwmtx, 0, act);
+ }
+}
+
+#endif
+
+static void
+rwmutex_normal_rlock_wait(ethr_rwmutex *rwmtx,
+ long initial)
+{
+ long act = initial, exp;
+ int scnt, start_scnt;
+ ethr_ts_event *tse = NULL;
+ int until_yield = ETHR_YIELD_AFTER_BUSY_LOOPS;
+
+ start_scnt = scnt = initial_spincount(&rwmtx->mtxb);
+
+ /*
+ * Spin trying to read lock for a while. If unsuccessful,
+ * wait on event.
+ */
+
+ while (1) {
+
+#ifdef ETHR_RLOCK_WITH_INC_DEC
+ rwmutex_incdec_restore_failed_tryrlock(rwmtx);
+ act = ethr_atomic_read(&rwmtx->mtxb.flgs);
+#endif
+
+ while (act & (ETHR_RWMTX_W_FLG__|ETHR_RWMTX_W_WAIT_FLG__)) {
+ if (scnt == 0) {
+ tse = ethr_get_ts_event();
+ if (update_spincount(&rwmtx->mtxb, tse, &start_scnt, &scnt)) {
+ event_wait(&rwmtx->mtxb, tse, scnt,
+ ETHR_RWMTX_R_WAIT_FLG__, 1, 0);
+ goto done;
+ }
+ }
+ ETHR_SPIN_BODY;
+ if (--until_yield == 0) {
+ until_yield = ETHR_YIELD_AFTER_BUSY_LOOPS;
+ ETHR_YIELD();
+ }
+ act = ethr_atomic_read(&rwmtx->mtxb.flgs);
+ scnt--;
+ }
+ exp = act;
+ ETHR_ASSERT(scnt >= 0);
+
+#ifdef ETHR_RLOCK_WITH_INC_DEC
+ act = ethr_atomic_inc_read(&rwmtx->mtxb.flgs);
+ if ((act & (ETHR_RWMTX_W_FLG__|ETHR_RWMTX_W_WAIT_FLG__)) == 0)
+ goto done; /* Got it */
+#else
+ act = ethr_atomic_cmpxchg_acqb(&rwmtx->mtxb.flgs, exp+1, exp);
+ if (act == exp)
+ goto done; /* Got it */
+#endif
+ }
+
+ done:
+ if (tse)
+ ethr_leave_ts_event(tse);
+}
+
+static void
+rwmutex_freqread_rlock_wait(ethr_rwmutex *rwmtx,
+ ethr_ts_event *tse,
+ long initial)
+{
+ long act = initial;
+ int scnt, start_scnt;
+ int until_yield = ETHR_YIELD_AFTER_BUSY_LOOPS;
+
+ start_scnt = scnt = initial_spincount(&rwmtx->mtxb);
+
+ /*
+ * Spin trying to read lock for a while. If unsuccessful,
+ * wait on event.
+ */
+
+ while (1) {
+
+ rwmutex_freqread_restore_failed_tryrlock(rwmtx, tse);
+
+ act = ethr_atomic_read(&rwmtx->mtxb.flgs);
+
+ while (act & ~(ETHR_RWMTX_R_FLG__|ETHR_RWMTX_R_WAIT_FLG__)) {
+ if (scnt == 0) {
+ if (update_spincount(&rwmtx->mtxb, tse, &start_scnt, &scnt)) {
+ event_wait(&rwmtx->mtxb, tse, scnt,
+ ETHR_RWMTX_R_WAIT_FLG__, 1, 1);
+ return; /* Got it */
+ }
+ }
+ ETHR_SPIN_BODY;
+ if (--until_yield == 0) {
+ until_yield = ETHR_YIELD_AFTER_BUSY_LOOPS;
+ ETHR_YIELD();
+ }
+ act = ethr_atomic_read(&rwmtx->mtxb.flgs);
+ scnt--;
+ }
+
+ ETHR_ASSERT(scnt >= 0);
+
+ rwmutex_freqread_rdrs_inc(rwmtx, tse);
+
+ ETHR_MEMORY_BARRIER;
+
+ act = ethr_atomic_read(&rwmtx->mtxb.flgs);
+
+ if (act == ETHR_RWMTX_R_FLG__)
+ return; /* Got it */
+
+ while (1) {
+ long exp, new;
+
+ if (act & ~(ETHR_RWMTX_R_FLG__|ETHR_RWMTX_R_WAIT_FLG__))
+ break; /* Busy (need to restore inc) */
+
+ if (act & ETHR_RWMTX_R_FLG__)
+ return; /* Got it */
+
+ exp = act;
+ new = act | ETHR_RWMTX_R_FLG__;
+ act = ethr_atomic_cmpxchg(&rwmtx->mtxb.flgs, new, exp);
+ if (act == exp)
+ return; /* Got it */
+ }
+ }
+}
+
+static void
+rwmutex_normal_rwlock_wait(ethr_rwmutex *rwmtx, long initial)
+{
+ write_lock_wait(&rwmtx->mtxb, initial, 1, 0);
+}
+
+static void
+rwmutex_freqread_rwlock_wait(ethr_rwmutex *rwmtx, long initial)
+{
+ write_lock_wait(&rwmtx->mtxb, initial, 1, 1);
+}
+
+static ETHR_INLINE void
+rwlock_wake_set_flags(ethr_rwmutex *rwmtx, long new_initial, int act_initial)
+{
+ long act, act_mask;
+ if (rwmtx->type != ETHR_RWMUTEX_TYPE_NORMAL) {
+ /* r pend unlock mask may vary and must be retained */
+ act_mask = ETHR_RWMTX_R_PEND_UNLCK_MASK__;
+ }
+ else {
+#ifdef ETHR_RLOCK_WITH_INC_DEC
+ /* rs mask may vary and must be retained */
+ act_mask = ETHR_RWMTX_RS_MASK__;
+#else
+ /* rs mask always zero */
+ ETHR_ASSERT((act_initial & ETHR_RWMTX_RS_MASK__) == 0);
+ ethr_atomic_set(&rwmtx->mtxb.flgs, new_initial);
+ return;
+#endif
+ }
+
+ act = act_initial;
+ while (1) {
+ long exp = act;
+ long new = new_initial + (act & act_mask);
+ act = ethr_atomic_cmpxchg(&rwmtx->mtxb.flgs, new, exp);
+ if (act == exp)
+ break;
+ exp = act;
+ }
+}
+
+#ifdef ETHR_DEBUG
+
+static void
+dbg_unlock_wake(ethr_rwmutex *rwmtx,
+ int have_w,
+ ethr_ts_event *tse)
+{
+ long exp, act, imask;
+
+ exp = have_w ? ETHR_RWMTX_W_FLG__ : 0;
+
+ if (rwmtx->type != ETHR_RWMUTEX_TYPE_NORMAL)
+ imask = ETHR_RWMTX_R_PEND_UNLCK_MASK__;
+ else {
+#ifdef ETHR_RLOCK_WITH_INC_DEC
+ imask = ETHR_RWMTX_RS_MASK__;
+#else
+ imask = 0;
+#endif
+ }
+
+ ETHR_ASSERT(tse);
+
+ if (is_w_waiter(tse)) {
+
+ exp |= ETHR_RWMTX_W_WAIT_FLG__;
+ if (rwmtx->rq_end) {
+ exp |= ETHR_RWMTX_R_WAIT_FLG__;
+ }
+ act = ethr_atomic_read(&rwmtx->mtxb.flgs);
+ ETHR_ASSERT((exp & ~imask) == (act & ~imask));
+
+ ETHR_RWMTX_HARD_DEBUG_CHK_Q(rwmtx);
+
+ }
+ else {
+
+ exp |= ETHR_RWMTX_R_WAIT_FLG__;
+ if (rwmtx->rq_end->next != rwmtx->mtxb.q)
+ exp |= ETHR_RWMTX_W_WAIT_FLG__;
+ act = ethr_atomic_read(&rwmtx->mtxb.flgs);
+ ETHR_ASSERT((exp & ~imask) == (act & ~imask));
+
+ ETHR_RWMTX_HARD_DEBUG_CHK_Q(rwmtx);
+
+ }
+}
+
+#endif
+
+static void
+rwmutex_unlock_wake(ethr_rwmutex *rwmtx, int have_w, long initial)
+{
+ long new, act = initial;
+ ethr_ts_event *tse;
+
+ if ((act & ETHR_RWMTX_WAIT_FLGS__) == 0) {
+ if (!have_w)
+ return;
+ else {
+ while ((act & ETHR_RWMTX_WAIT_FLGS__) == 0) {
+ long exp = act;
+ new = exp & ~ETHR_RWMTX_W_FLG__;
+ act = ethr_atomic_cmpxchg(&rwmtx->mtxb.flgs, new, exp);
+ if (act == exp)
+ return;
+ }
+ }
+ }
+
+ ETHR_MTX_Q_LOCK(&rwmtx->mtxb.qlck);
+ tse = rwmtx->mtxb.q;
+
+ if (!have_w) {
+ if (!tse) {
+#ifdef ETHR_DEBUG
+ act = ethr_atomic_read(&rwmtx->mtxb.flgs);
+ ETHR_ASSERT((act & ETHR_RWMTX_WAIT_FLGS__) == 0);
+#endif
+ goto already_served;
+ }
+ act = ethr_atomic_read(&rwmtx->mtxb.flgs);
+ if (act & ~ETHR_RWMTX_WAIT_FLGS__) {
+ already_served:
+ ETHR_MTX_Q_UNLOCK(&rwmtx->mtxb.qlck);
+ return;
+ }
+ }
+
+#ifdef ETHR_DEBUG
+ dbg_unlock_wake(rwmtx, have_w, tse);
+#endif
+
+ if (is_w_waiter(tse)) {
+
+ if (!have_w) {
+ act = ethr_atomic_read_bor(&rwmtx->mtxb.flgs,
+ ETHR_RWMTX_W_FLG__);
+ ETHR_ASSERT((act & ~ETHR_RWMTX_WAIT_FLGS__) == 0);
+ ETHR_ASSERT(act & ETHR_RWMTX_W_WAIT_FLG__);
+ act |= ETHR_RWMTX_W_FLG__;
+ }
+
+ /*
+ * If we have multiple write waiters, there
+ * is no need to modify mtxb->flgs; otherwise,
+ * we need to clear the write wait bit...
+ */
+ if (!multiple_w_waiters(rwmtx)) {
+ new = ETHR_RWMTX_W_FLG__;
+ if (tse->next != rwmtx->mtxb.q) {
+ ETHR_ASSERT(tse->next->uflgs == ETHR_RWMTX_R_WAIT_FLG__);
+ new |= ETHR_RWMTX_R_WAIT_FLG__;
+ }
+
+ rwlock_wake_set_flags(rwmtx, new, act);
+ }
+
+ wake_writer(&rwmtx->mtxb, 1);
+ }
+ else {
+ int rs;
+
+ if (rwmtx->type == ETHR_RWMUTEX_TYPE_NORMAL) {
+ rs = rwmtx->tdata.rs;
+ new = (long) rs;
+ rwmtx->tdata.rs = 0;
+ }
+ else {
+ ethr_rwmutex_type type = rwmtx->type;
+ int length = (type == ETHR_RWMUTEX_TYPE_FREQUENT_READ
+ ? reader_groups_array_size
+ : main_threads_array_size);
+ int ix;
+
+ rs = 0;
+ for (ix = 0; ix < length; ix++) {
+ int wrs = rwmtx->tdata.ra[ix].data.waiting_readers;
+ rwmtx->tdata.ra[ix].data.waiting_readers = 0;
+ ETHR_ASSERT(wrs >= 0);
+ if (wrs) {
+ rs += wrs;
+ rwmutex_freqread_rdrs_add(rwmtx, type, ix, wrs);
+ }
+ }
+ new = ETHR_RWMTX_R_FLG__;
+ }
+
+ if (rwmtx->rq_end->next != rwmtx->mtxb.q)
+ new |= ETHR_RWMTX_W_WAIT_FLG__;
+
+ rwlock_wake_set_flags(rwmtx, new, act);
+ wake_readers(rwmtx, rs);
+ }
+}
+
+static ethr_rwmtx_readers_array__ *
+alloc_readers_array(int length, ethr_rwmutex_lived lived)
+{
+ ethr_rwmtx_readers_array__ *ra;
+ size_t sz;
+ void *mem;
+
+ sz = sizeof(ethr_rwmtx_readers_array__) * (length + 1);
+
+ switch (lived) {
+ case ETHR_RWMUTEX_LONG_LIVED:
+ mem = ethr_mem__.ll.alloc(sz);
+ break;
+ case ETHR_RWMUTEX_SHORT_LIVED:
+ mem = ethr_mem__.sl.alloc(sz);
+ break;
+ default:
+ mem = ethr_mem__.std.alloc(sz);
+ break;
+ }
+ if (!mem)
+ return NULL;
+
+ if ((((unsigned long) mem) & ETHR_CACHE_LINE_MASK) == 0) {
+ ra = (ethr_rwmtx_readers_array__ *) mem;
+ ra->data.byte_offset = 0;
+ }
+ else {
+ ra = ((ethr_rwmtx_readers_array__ *)
+ ((((unsigned long) mem) & ~ETHR_CACHE_LINE_MASK)
+ + ETHR_CACHE_LINE_SIZE));
+ ra->data.byte_offset = (int) ((unsigned long) ra
+ - (unsigned long) mem);
+ }
+ ra->data.lived = lived;
+ return ra;
+}
+
+static void
+free_readers_array(ethr_rwmtx_readers_array__ *ra)
+{
+ void *ptr = (void *) (((char *) ra) - ra->data.byte_offset);
+ switch (ra->data.lived) {
+ case ETHR_RWMUTEX_LONG_LIVED:
+ ethr_mem__.ll.free(ptr);
+ break;
+ case ETHR_RWMUTEX_SHORT_LIVED:
+ ethr_mem__.sl.free(ptr);
+ break;
+ default:
+ ethr_mem__.std.free(ptr);
+ break;
+ }
+}
+
+int
+ethr_rwmutex_init_opt(ethr_rwmutex *rwmtx, ethr_rwmutex_opt *opt)
+{
+ int res;
+ ethr_rwmtx_readers_array__ *ra = NULL;
+#if ETHR_XCHK
+ if (ethr_not_completely_inited__) {
+ ETHR_ASSERT(0);
+ return EACCES;
+ }
+ if (!rwmtx) {
+ ETHR_ASSERT(0);
+ return EINVAL;
+ }
+ rwmtx->initialized = ETHR_RWMUTEX_INITIALIZED;
+#endif
+ ETHR_MTX_HARD_DEBUG_FENCE_INIT(rwmtx);
+ rwmtx->rq_end = NULL;
+ rwmtx->type = opt ? opt->type : ETHR_RWMUTEX_TYPE_NORMAL;
+ switch (rwmtx->type) {
+ case ETHR_RWMUTEX_TYPE_FREQUENT_READ:
+ if (main_threads_array_size <= reader_groups_array_size) {
+ /* No point using reader groups... */
+ rwmtx->type = ETHR_RWMUTEX_TYPE_EXTREMELY_FREQUENT_READ;
+ }
+ /* Fall through */
+ case ETHR_RWMUTEX_TYPE_EXTREMELY_FREQUENT_READ: {
+ int length;
+
+ length = (rwmtx->type == ETHR_RWMUTEX_TYPE_EXTREMELY_FREQUENT_READ
+ ? main_threads_array_size
+ : reader_groups_array_size);
+
+ if (length == 1) {
+ /* No point using a frequent reader type... */
+ rwmtx->type = ETHR_RWMUTEX_TYPE_NORMAL;
+ }
+ else {
+ int ix;
+ ra = alloc_readers_array(length,
+ (opt
+ ? opt->lived
+ : ETHR_RWMUTEX_UNKNOWN_LIVED));
+ if (!ra) {
+ res = ENOMEM;
+ goto error;
+ }
+
+ rwmtx->tdata.ra = ra;
+
+ for (ix = 0; ix < length; ix++) {
+ ethr_atomic_init(&rwmtx->tdata.ra[ix].data.readers, 0);
+ rwmtx->tdata.ra[ix].data.waiting_readers = 0;
+ }
+ break;
+ }
+ }
+ case ETHR_RWMUTEX_TYPE_NORMAL:
+ rwmtx->tdata.rs = 0;
+ break;
+ default:
+ res = EINVAL;
+ goto error;
+ }
+ res = mtxb_init(&rwmtx->mtxb,
+ default_rwmtx_main_spincount,
+ opt ? opt->main_spincount : -1,
+ default_rwmtx_aux_spincount,
+ opt ? opt->aux_spincount : -1);
+ if (res == 0)
+ return 0;
+
+ error:
+
+ if (ra)
+ free_readers_array(ra);
+
+#if ETHR_XCHK
+ rwmtx->initialized = 0;
+#endif
+ return res;
+}
+
+int
+ethr_rwmutex_init(ethr_rwmutex *rwmtx)
+{
+ return ethr_rwmutex_init_opt(rwmtx, NULL);
+}
+
+int
+ethr_rwmutex_destroy(ethr_rwmutex *rwmtx)
+{
+ int res;
+#if ETHR_XCHK
+ if (ethr_not_inited__) {
+ ETHR_ASSERT(0);
+ return EACCES;
+ }
+ if (!rwmtx || rwmtx->initialized != ETHR_RWMUTEX_INITIALIZED) {
+ ETHR_ASSERT(0);
+ return EINVAL;
+ }
+#endif
+ if (rwmtx->type != ETHR_RWMUTEX_TYPE_NORMAL) {
+ long act = ethr_atomic_read(&rwmtx->mtxb.flgs);
+ if (act == ETHR_RWMTX_R_FLG__)
+ rwmutex_try_complete_runlock(rwmtx, act, NULL, 0, 0, 0);
+ }
+ res = mtxb_destroy(&rwmtx->mtxb);
+ if (res != 0)
+ return res;
+ if (rwmtx->type != ETHR_RWMUTEX_TYPE_NORMAL)
+ free_readers_array(rwmtx->tdata.ra);
+#if ETHR_XCHK
+ rwmtx->initialized = 0;
+#endif
+ return 0;
+}
+
+#define ETHR_MAX_TRYRLOCK_TRIES 5
+
+int
+ethr_rwmutex_tryrlock(ethr_rwmutex *rwmtx)
+{
+ int res = 0;
+ long act;
+
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(rwmtx);
+ ETHR_ASSERT(rwmtx->initialized == ETHR_RWMUTEX_INITIALIZED);
+
+ ETHR_MTX_HARD_DEBUG_FENCE_CHK(rwmtx);
+
+ switch (rwmtx->type) {
+ case ETHR_RWMUTEX_TYPE_NORMAL: {
+#ifdef ETHR_RLOCK_WITH_INC_DEC
+ act = ethr_atomic_read(&rwmtx->mtxb.flgs);
+ if (act & (ETHR_RWMTX_W_FLG__|ETHR_RWMTX_W_WAIT_FLG__))
+ res = EBUSY;
+ else {
+ act = ethr_atomic_inc_read_acqb(&rwmtx->mtxb.flgs);
+ if (act & (ETHR_RWMTX_W_FLG__|ETHR_RWMTX_W_WAIT_FLG__)) {
+ rwmutex_incdec_restore_failed_tryrlock(rwmtx);
+ res = EBUSY;
+ }
+ }
+#else
+ long exp = 0;
+ int tries = 0;
+
+ while (1) {
+ act = ethr_atomic_cmpxchg_acqb(&rwmtx->mtxb.flgs, exp+1, exp);
+ if (act == exp) {
+ res = 0;
+ break;
+ }
+ if (tries > ETHR_MAX_TRYRLOCK_TRIES
+ || (act & (ETHR_RWMTX_W_FLG__|ETHR_RWMTX_W_WAIT_FLG__))) {
+ res = EBUSY;
+ break;
+ }
+ tries++;
+ exp = act;
+ }
+#endif
+ break;
+ }
+
+ case ETHR_RWMUTEX_TYPE_FREQUENT_READ:
+ case ETHR_RWMUTEX_TYPE_EXTREMELY_FREQUENT_READ: {
+ ethr_ts_event *tse = ethr_get_ts_event();
+
+ rwmutex_freqread_rdrs_inc(rwmtx, tse);
+
+ ETHR_MEMORY_BARRIER;
+
+ act = ethr_atomic_read_acqb(&rwmtx->mtxb.flgs);
+
+ if (act != ETHR_RWMTX_R_FLG__) {
+ while (1) {
+ long exp, new;
+
+ if (act & ~(ETHR_RWMTX_R_FLG__|ETHR_RWMTX_R_WAIT_FLG__)) {
+ rwmutex_freqread_restore_failed_tryrlock(rwmtx, tse);
+ res = EBUSY;
+ break;
+ }
+
+ if (act & ETHR_RWMTX_R_FLG__)
+ break;
+
+ exp = act;
+ new = act | ETHR_RWMTX_R_FLG__;
+ act = ethr_atomic_cmpxchg_acqb(&rwmtx->mtxb.flgs, new, exp);
+ if (act == exp)
+ break;
+ }
+ }
+
+ ethr_leave_ts_event(tse);
+ break;
+ }
+ }
+
+ ETHR_MTX_HARD_DEBUG_LFS_TRYRLOCK(&rwmtx->mtxb, res);
+ ETHR_MTX_HARD_DEBUG_FENCE_CHK(rwmtx);
+
+ return res;
+}
+
+void
+ethr_rwmutex_rlock(ethr_rwmutex *rwmtx)
+{
+ long act;
+
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(rwmtx);
+ ETHR_ASSERT(rwmtx->initialized == ETHR_RWMUTEX_INITIALIZED);
+
+ ETHR_MTX_HARD_DEBUG_FENCE_CHK(rwmtx);
+
+ switch (rwmtx->type) {
+ case ETHR_RWMUTEX_TYPE_NORMAL: {
+#ifdef ETHR_RLOCK_WITH_INC_DEC
+ act = ethr_atomic_inc_read_acqb(&rwmtx->mtxb.flgs);
+ if (act & (ETHR_RWMTX_W_FLG__|ETHR_RWMTX_W_WAIT_FLG__))
+ rwmutex_normal_rlock_wait(rwmtx, act);
+#else
+ long exp = 0;
+
+ while (1) {
+ act = ethr_atomic_cmpxchg_acqb(&rwmtx->mtxb.flgs, exp+1, exp);
+ if (act == exp) {
+ break;
+ }
+
+ if (act & (ETHR_RWMTX_W_FLG__|ETHR_RWMTX_W_WAIT_FLG__)) {
+ rwmutex_normal_rlock_wait(rwmtx, act);
+ break;
+ }
+ exp = act;
+ }
+#endif
+ break;
+ }
+
+ case ETHR_RWMUTEX_TYPE_FREQUENT_READ:
+ case ETHR_RWMUTEX_TYPE_EXTREMELY_FREQUENT_READ: {
+ ethr_ts_event *tse = ethr_get_ts_event();
+
+ rwmutex_freqread_rdrs_inc(rwmtx, tse);
+
+ ETHR_MEMORY_BARRIER;
+
+ act = ethr_atomic_read_acqb(&rwmtx->mtxb.flgs);
+
+ if (act != ETHR_RWMTX_R_FLG__) {
+ while (1) {
+ long exp, new;
+
+ if (act & ~(ETHR_RWMTX_R_FLG__|ETHR_RWMTX_R_WAIT_FLG__)) {
+ rwmutex_freqread_rlock_wait(rwmtx, tse, act);
+ break;
+ }
+
+ if (act & ETHR_RWMTX_R_FLG__)
+ break;
+
+ exp = act;
+ new = act | ETHR_RWMTX_R_FLG__;
+ act = ethr_atomic_cmpxchg_acqb(&rwmtx->mtxb.flgs, new, exp);
+ if (act == exp)
+ break;
+ }
+ }
+
+ ethr_leave_ts_event(tse);
+ break;
+ }
+ }
+
+ ETHR_MTX_HARD_DEBUG_LFS_RLOCK(&rwmtx->mtxb);
+ ETHR_MTX_HARD_DEBUG_FENCE_CHK(rwmtx);
+}
+
+void
+ethr_rwmutex_runlock(ethr_rwmutex *rwmtx)
+{
+ long act;
+
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(rwmtx);
+ ETHR_ASSERT(rwmtx->initialized == ETHR_RWMUTEX_INITIALIZED);
+
+ ETHR_MTX_HARD_DEBUG_FENCE_CHK(rwmtx);
+ ETHR_MTX_HARD_DEBUG_LFS_RUNLOCK(&rwmtx->mtxb);
+
+ switch (rwmtx->type) {
+ case ETHR_RWMUTEX_TYPE_NORMAL:
+ act = ethr_atomic_dec_read_relb(&rwmtx->mtxb.flgs);
+ if ((act & ETHR_RWMTX_WAIT_FLGS__)
+ && (act & ~ETHR_RWMTX_WAIT_FLGS__) == 0) {
+ ETHR_ASSERT((act & ETHR_RWMTX_W_FLG__) == 0);
+ rwmutex_unlock_wake(rwmtx, 0, act);
+ }
+ break;
+
+ case ETHR_RWMUTEX_TYPE_FREQUENT_READ:
+ case ETHR_RWMUTEX_TYPE_EXTREMELY_FREQUENT_READ: {
+ ethr_ts_event *tse = ethr_get_ts_event();
+
+ act = rwmutex_freqread_rdrs_dec_read_relb(rwmtx, tse);
+
+ ETHR_ASSERT(act >= 0);
+
+ ETHR_WRITE_MEMORY_BARRIER;
+
+ if (act == 0) {
+
+#ifndef ETHR_WRITE_MEMORY_BARRIER_IS_FULL
+ ETHR_READ_MEMORY_BARRIER;
+#endif
+ act = ethr_atomic_read(&rwmtx->mtxb.flgs);
+
+ if ((act & ETHR_RWMTX_W_FLG__) == 0
+ && (act & (ETHR_RWMTX_WAIT_FLGS__
+ | ETHR_RWMTX_R_PEND_UNLCK_MASK__))) {
+ rwmutex_try_complete_runlock(rwmtx, act, tse, 1, 0, 0);
+ }
+
+ }
+
+ ethr_leave_ts_event(tse);
+ break;
+ }
+ }
+
+ ETHR_MTX_HARD_DEBUG_FENCE_CHK(rwmtx);
+}
+
+int
+ethr_rwmutex_tryrwlock(ethr_rwmutex *rwmtx)
+{
+ int res = 0;
+ long act;
+
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(rwmtx);
+ ETHR_ASSERT(rwmtx->initialized == ETHR_RWMUTEX_INITIALIZED);
+
+ ETHR_MTX_HARD_DEBUG_FENCE_CHK(rwmtx);
+
+ switch (rwmtx->type) {
+ case ETHR_RWMUTEX_TYPE_NORMAL:
+ act = ethr_atomic_cmpxchg_acqb(&rwmtx->mtxb.flgs,
+ ETHR_RWMTX_W_FLG__, 0);
+ if (act != 0)
+ res = EBUSY;
+ break;
+
+ case ETHR_RWMUTEX_TYPE_FREQUENT_READ:
+ case ETHR_RWMUTEX_TYPE_EXTREMELY_FREQUENT_READ:
+
+ res = 0;
+ act = ethr_atomic_read(&rwmtx->mtxb.flgs);
+
+ do {
+
+ if (act & (ETHR_RWMTX_W_FLG__|ETHR_RWMTX_WAIT_FLGS__)) {
+ res = EBUSY;
+ break;
+ }
+
+ if (act & ETHR_RWMTX_R_MASK__) {
+ res = rwmutex_try_complete_runlock(rwmtx, act, NULL,
+ 0, 1, 1);
+ break;
+ }
+
+ act = ethr_atomic_cmpxchg_acqb(&rwmtx->mtxb.flgs,
+ ETHR_RWMTX_W_FLG__, 0);
+
+ } while (act != 0);
+
+ break;
+ }
+
+ ETHR_MTX_HARD_DEBUG_LFS_TRYRWLOCK(&rwmtx->mtxb, res);
+ ETHR_MTX_HARD_DEBUG_FENCE_CHK(rwmtx);
+
+ return res;
+}
+
+void
+ethr_rwmutex_rwlock(ethr_rwmutex *rwmtx)
+{
+ long act;
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(rwmtx);
+ ETHR_ASSERT(rwmtx->initialized == ETHR_RWMUTEX_INITIALIZED);
+
+ ETHR_MTX_HARD_DEBUG_FENCE_CHK(rwmtx);
+
+ switch (rwmtx->type) {
+ case ETHR_RWMUTEX_TYPE_NORMAL:
+ act = ethr_atomic_cmpxchg_acqb(&rwmtx->mtxb.flgs,
+ ETHR_RWMTX_W_FLG__, 0);
+ if (act != 0)
+ rwmutex_normal_rwlock_wait(rwmtx, act);
+ break;
+
+ case ETHR_RWMUTEX_TYPE_FREQUENT_READ:
+ case ETHR_RWMUTEX_TYPE_EXTREMELY_FREQUENT_READ:
+
+ act = ethr_atomic_read(&rwmtx->mtxb.flgs);
+
+ do {
+
+ if (act != 0) {
+ rwmutex_freqread_rwlock_wait(rwmtx, act);
+ break;
+ }
+
+ act = ethr_atomic_cmpxchg_acqb(&rwmtx->mtxb.flgs,
+ ETHR_RWMTX_W_FLG__, 0);
+
+ } while (act != 0);
+
+ break;
+ }
+
+ ETHR_MTX_HARD_DEBUG_LFS_RWLOCK(&rwmtx->mtxb);
+ ETHR_MTX_HARD_DEBUG_FENCE_CHK(rwmtx);
+
+}
+
+void
+ethr_rwmutex_rwunlock(ethr_rwmutex *rwmtx)
+{
+ long act;
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(rwmtx);
+ ETHR_ASSERT(rwmtx->initialized == ETHR_RWMUTEX_INITIALIZED);
+
+ ETHR_MTX_HARD_DEBUG_FENCE_CHK(rwmtx);
+ ETHR_MTX_HARD_DEBUG_LFS_RWUNLOCK(&rwmtx->mtxb);
+
+ switch (rwmtx->type) {
+ case ETHR_RWMUTEX_TYPE_NORMAL:
+ act = ethr_atomic_cmpxchg_relb(&rwmtx->mtxb.flgs,
+ 0, ETHR_RWMTX_W_FLG__);
+ if (act != ETHR_RWMTX_W_FLG__)
+ rwmutex_unlock_wake(rwmtx, 1, act);
+ break;
+
+ case ETHR_RWMUTEX_TYPE_FREQUENT_READ:
+ case ETHR_RWMUTEX_TYPE_EXTREMELY_FREQUENT_READ:
+ act = ethr_atomic_cmpxchg_relb(&rwmtx->mtxb.flgs, 0,
+ ETHR_RWMTX_W_FLG__);
+ if (act != ETHR_RWMTX_W_FLG__)
+ rwmutex_unlock_wake(rwmtx, 1, act);
+ break;
+ }
+
+ ETHR_MTX_HARD_DEBUG_FENCE_CHK(rwmtx);
+}
+
+#else
+/* -- pthread read/write mutex --------------------------------------------- */
+
+int
+ethr_rwmutex_init(ethr_rwmutex *rwmtx)
+{
+#if ETHR_XCHK
+ if (!rwmtx) {
+ ETHR_ASSERT(0);
+ return EINVAL;
+ }
+ rwmtx->initialized = ETHR_RWMUTEX_INITIALIZED;
+#endif
+ return pthread_rwlock_init(&rwmtx->pt_rwlock, write_pref_attr);
+}
+
+int
+ethr_rwmutex_init_opt(ethr_rwmutex *rwmtx, ethr_rwmutex_opt *opt)
+{
+ return ethr_rwmutex_init(rwmtx);
+}
+
+int
+ethr_rwmutex_destroy(ethr_rwmutex *rwmtx)
+{
+ int res;
+#if ETHR_XCHK
+ if (ethr_not_inited__) {
+ ETHR_ASSERT(0);
+ return EACCES;
+ }
+ if (!rwmtx || rwmtx->initialized != ETHR_RWMUTEX_INITIALIZED) {
+ ETHR_ASSERT(0);
+ return EINVAL;
+ }
+#endif
+ res = pthread_rwlock_destroy(&rwmtx->pt_rwlock);
+#if ETHR_XCHK
+ rwmtx->initialized = 0;
+#endif
+ return res;
+}
+
+/* -- Exported symbols of inline functions --------------------------------- */
+
+int
+ethr_rwmutex_tryrlock(ethr_rwmutex *rwmtx)
+{
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(rwmtx);
+ ETHR_ASSERT(rwmtx->initialized == ETHR_RWMUTEX_INITIALIZED);
+
+ return ethr_rwmutex_tryrlock__(rwmtx);
+}
+
+void
+ethr_rwmutex_rlock(ethr_rwmutex *rwmtx)
+{
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(rwmtx);
+ ETHR_ASSERT(rwmtx->initialized == ETHR_RWMUTEX_INITIALIZED);
+
+ ethr_rwmutex_rlock__(rwmtx);
+}
+
+void
+ethr_rwmutex_runlock(ethr_rwmutex *rwmtx)
+{
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(rwmtx);
+ ETHR_ASSERT(rwmtx->initialized == ETHR_RWMUTEX_INITIALIZED);
+
+ ethr_rwmutex_runlock__(rwmtx);
+}
+
+int
+ethr_rwmutex_tryrwlock(ethr_rwmutex *rwmtx)
+{
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(rwmtx);
+ ETHR_ASSERT(rwmtx->initialized == ETHR_RWMUTEX_INITIALIZED);
+
+ return ethr_rwmutex_tryrwlock__(rwmtx);
+}
+
+void
+ethr_rwmutex_rwlock(ethr_rwmutex *rwmtx)
+{
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(rwmtx);
+ ETHR_ASSERT(rwmtx->initialized == ETHR_RWMUTEX_INITIALIZED);
+
+ return ethr_rwmutex_rwlock__(rwmtx);
+}
+
+void
+ethr_rwmutex_rwunlock(ethr_rwmutex *rwmtx)
+{
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(rwmtx);
+ ETHR_ASSERT(rwmtx->initialized == ETHR_RWMUTEX_INITIALIZED);
+
+ ethr_rwmutex_rwunlock__(rwmtx);
+}
+
+#endif /* pthread */
+
+
+#if defined(ETHR_USE_OWN_RWMTX_IMPL__) || defined(ETHR_USE_OWN_MTX_IMPL__)
+
+#ifdef ETHR_MTX_HARD_DEBUG_Q
+static void
+hard_debug_chk_q__(struct ethr_mutex_base_ *mtxb, int is_rwmtx)
+{
+ int res;
+ long flgs = ethr_atomic_read(&mtxb->flgs);
+
+ ETHR_MTX_HARD_ASSERT(res == 0);
+
+ ETHR_MTX_HARD_ASSERT(!(flgs & ETHR_RWMTX_R_WAIT_FLG__) || is_rwmtx);
+
+ if (!(flgs & ETHR_RWMTX_WAIT_FLGS__)) {
+ ETHR_MTX_HARD_ASSERT(!mtxb->q);
+ if (is_rwmtx) {
+ ethr_rwmutex *rwmtx = (ethr_rwmutex *) mtxb;
+ ETHR_MTX_HARD_ASSERT(!rwmtx->rq_end);
+ ETHR_MTX_HARD_ASSERT(!rwmtx->rs);
+ }
+ }
+ else {
+ ethr_ts_event *tse;
+ int ws = 0, rs = 0, rsf = 0, ref = 0;
+
+ ETHR_MTX_HARD_ASSERT(mtxb->q);
+
+ tse = mtxb->q;
+
+ do {
+ long type;
+
+ ETHR_MTX_HARD_ASSERT(tse->next->prev == tse);
+ ETHR_MTX_HARD_ASSERT(tse->prev->next == tse);
+
+ type = ethr_atomic_read(&tse->uaflgs);
+ ETHR_MTX_HARD_ASSERT(type == tse->uflgs);
+ switch (type) {
+ case ETHR_RWMTX_W_WAIT_FLG__:
+ ws++;
+ break;
+ case ETHR_RWMTX_R_WAIT_FLG__: {
+ ethr_rwmutex *rwmtx = (ethr_rwmutex *) mtxb;
+ ETHR_MTX_HARD_ASSERT(is_rwmtx);
+ if (!rsf)
+ rsf = 1;
+ ETHR_MTX_HARD_ASSERT(!ref);
+ if (rwmtx->rq_end == tse) {
+ ETHR_MTX_HARD_ASSERT(
+ tse->next == rwmtx->mtxb.q
+ || tse->next->uflgs == ETHR_RWMTX_W_WAIT_FLG__);
+ ref = 1;
+ }
+ rs++;
+ break;
+ }
+ default:
+ ETHR_MTX_HARD_ASSERT(! "invalid wait type found");
+ }
+
+ tse = tse->next;
+ } while (tse != mtxb->q);
+
+ if (is_rwmtx) {
+ ethr_rwmutex *rwmtx = (ethr_rwmutex *) mtxb;
+ ETHR_MTX_HARD_ASSERT(rs == rwmtx->rs);
+ }
+
+#ifdef ETHR_MTX_HARD_DEBUG_WSQ
+ ETHR_MTX_HARD_ASSERT(ws == mtxb->ws);
+#endif
+
+ if (flgs & ETHR_RWMTX_W_WAIT_FLG__)
+ ETHR_MTX_HARD_ASSERT(ws);
+ else
+ ETHR_MTX_HARD_ASSERT(!ws);
+
+ if (flgs & ETHR_RWMTX_R_WAIT_FLG__) {
+ ethr_rwmutex *rwmtx = (ethr_rwmutex *) mtxb;
+ ETHR_MTX_HARD_ASSERT(is_rwmtx);
+ ETHR_MTX_HARD_ASSERT(rwmtx->rq_end);
+ ETHR_MTX_HARD_ASSERT(rsf);
+ ETHR_MTX_HARD_ASSERT(ref);
+ ETHR_MTX_HARD_ASSERT(rs);
+ }
+ else {
+ if (is_rwmtx) {
+ ethr_rwmutex *rwmtx = (ethr_rwmutex *) mtxb;
+ ETHR_MTX_HARD_ASSERT(!rwmtx->rq_end);
+ }
+ ETHR_MTX_HARD_ASSERT(!rsf);
+ ETHR_MTX_HARD_ASSERT(!ref);
+ ETHR_MTX_HARD_ASSERT(!rs);
+ }
+ }
+}
+
+#elif defined(ETHR_MTX_HARD_DEBUG_WSQ)
+
+static void
+hard_debug_chk_q__(struct ethr_mutex_base_ *mtxb, int is_rwmtx)
+{
+ int ws = 0;
+ int rs = 0;
+
+ if (mtxb->q) {
+ ethr_ts_event *tse = mtxb->q;
+ do {
+ switch (tse->uflgs) {
+ case ETHR_RWMTX_W_WAIT_FLG__:
+ ws++;
+ break;
+ case ETHR_RWMTX_R_WAIT_FLG__:
+ rs++;
+ break;
+ default:
+ ETHR_MTX_HARD_ASSERT(0);
+ break;
+ }
+ tse = tse->next;
+ } while (tse != mtxb->q);
+ }
+
+ ETHR_MTX_HARD_ASSERT(mtxb->ws == ws);
+ if (is_rwmtx) {
+ ethr_rwmutex *rwmtx = (ethr_rwmutex *) mtxb;
+ ETHR_MTX_HARD_ASSERT(rwmtx->rs == rs);
+ }
+}
+
+#endif
+
+#endif
diff --git a/erts/lib_src/common/ethread.c b/erts/lib_src/common/ethread.c
deleted file mode 100644
index 9c88233934..0000000000
--- a/erts/lib_src/common/ethread.c
+++ /dev/null
@@ -1,3369 +0,0 @@
-/*
- * %CopyrightBegin%
- *
- * Copyright Ericsson AB 2004-2010. All Rights Reserved.
- *
- * The contents of this file are subject to the Erlang Public License,
- * Version 1.1, (the "License"); you may not use this file except in
- * compliance with the License. You should have received a copy of the
- * Erlang Public License along with this software. If not, it can be
- * retrieved online at http://www.erlang.org/.
- *
- * Software distributed under the License is distributed on an "AS IS"
- * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
- * the License for the specific language governing rights and limitations
- * under the License.
- *
- * %CopyrightEnd%
- */
-
-/*
- * Description: A Thread library for use in the ERTS and other OTP
- * applications.
- * Author: Rickard Green
- */
-
-#ifdef HAVE_CONFIG_H
-#include "config.h"
-#endif
-
-#undef ETHR_STACK_GUARD_SIZE
-
-#if defined(ETHR_PTHREADS)
-
-#ifdef ETHR_TIME_WITH_SYS_TIME
-# include <time.h>
-# include <sys/time.h>
-#else
-# ifdef ETHR_HAVE_SYS_TIME_H
-# include <sys/time.h>
-# else
-# include <time.h>
-# endif
-#endif
-#include <sys/types.h>
-#include <unistd.h>
-#include <signal.h>
-
-#ifdef ETHR_HAVE_PTHREAD_ATTR_SETGUARDSIZE
-# define ETHR_STACK_GUARD_SIZE (pagesize)
-#endif
-
-#elif defined(ETHR_WIN32_THREADS)
-
-#undef WIN32_LEAN_AND_MEAN
-#define WIN32_LEAN_AND_MEAN
-#include <windows.h>
-#include <process.h>
-#include <winerror.h>
-
-#else
-#error "Missing thread implementation"
-#endif
-
-#include <limits.h>
-
-#define ETHR_FORCE_INLINE_FUNCS
-#define ETHR_INLINE_FUNC_NAME_(X) X ## __
-#include "ethread.h"
-
-#ifndef ETHR_HAVE_ETHREAD_DEFINES
-#error Missing configure defines
-#endif
-
-/*
- * ----------------------------------------------------------------------------
- * Common stuff
- * ----------------------------------------------------------------------------
- */
-
-#define ETHR_MAX_THREADS 2048 /* Has to be an even power of 2 */
-
-static int ethr_not_inited = 1;
-
-#define ASSERT(A) ETHR_ASSERT((A))
-
-static void *(*allocp)(size_t) = malloc;
-static void *(*reallocp)(void *, size_t) = realloc;
-static void (*freep)(void *) = free;
-
-#ifndef ETHR_HAVE_OPTIMIZED_ATOMIC_OPS
-ethr_atomic_protection_t ethr_atomic_protection__[1 << ETHR_ATOMIC_ADDR_BITS];
-#endif
-
-void *(*thread_create_prepare_func)(void) = NULL;
-void (*thread_create_parent_func)(void *) = NULL;
-void (*thread_create_child_func)(void *) = NULL;
-
-typedef struct ethr_xhndl_list_ ethr_xhndl_list;
-struct ethr_xhndl_list_ {
- ethr_xhndl_list *next;
- void (*funcp)(void);
-};
-
-static size_t pagesize;
-#define ETHR_PAGE_ALIGN(SZ) (((((size_t) (SZ)) - 1)/pagesize + 1)*pagesize)
-static size_t min_stack_size; /* kilo words */
-static size_t max_stack_size; /* kilo words */
-#define ETHR_B2KW(B) ((((size_t) (B)) - 1)/(sizeof(void *)*1024) + 1)
-#define ETHR_KW2B(KW) (((size_t) (KW))*sizeof(void *)*1024)
-
-ethr_mutex xhndl_mtx;
-ethr_xhndl_list *xhndl_list;
-
-static int
-init_common(ethr_init_data *id)
-{
- int res;
- if (id) {
- allocp = id->alloc;
- reallocp = id->realloc;
- freep = id->free;
- thread_create_prepare_func = id->thread_create_prepare_func;
- thread_create_parent_func = id->thread_create_parent_func;
- thread_create_child_func = id->thread_create_child_func;
- }
- if (!allocp || !reallocp || !freep)
- return EINVAL;
-
-#ifdef _SC_PAGESIZE
- pagesize = (size_t) sysconf(_SC_PAGESIZE);
-#elif defined(HAVE_GETPAGESIZE)
- pagesize = (size_t) getpagesize();
-#else
- pagesize = (size_t) 4*1024; /* Guess 4 KB */
-#endif
-
- /* User needs at least 4 KB */
- min_stack_size = 4*1024;
-#if SIZEOF_VOID_P == 8
- /* Double that on 64-bit archs */
- min_stack_size *= 2;
-#endif
- /* On some systems as much as about 4 KB is used by the system */
- min_stack_size += 4*1024;
- /* There should be room for signal handlers */
-#ifdef SIGSTKSZ
- min_stack_size += SIGSTKSZ;
-#else
- min_stack_size += pagesize;
-#endif
- /* The system may think that we need more stack */
-#if defined(PTHREAD_STACK_MIN)
- if (min_stack_size < PTHREAD_STACK_MIN)
- min_stack_size = PTHREAD_STACK_MIN;
-#elif defined(_SC_THREAD_STACK_MIN)
- {
- size_t thr_min_stk_sz = (size_t) sysconf(_SC_THREAD_STACK_MIN);
- if (min_stack_size < thr_min_stk_sz)
- min_stack_size = thr_min_stk_sz;
- }
-#endif
- /* The guard is at least on some platforms included in the stack size
- passed when creating threads */
-#ifdef ETHR_STACK_GUARD_SIZE
- min_stack_size += ETHR_STACK_GUARD_SIZE;
-#endif
- min_stack_size = ETHR_PAGE_ALIGN(min_stack_size);
-
- min_stack_size = ETHR_B2KW(min_stack_size);
-
- max_stack_size = 32*1024*1024;
-#if SIZEOF_VOID_P == 8
- max_stack_size *= 2;
-#endif
- max_stack_size = ETHR_B2KW(max_stack_size);
-
- xhndl_list = NULL;
-
- res = ethr_mutex_init(&xhndl_mtx);
- if (res != 0)
- return res;
-
- res = ethr_mutex_set_forksafe(&xhndl_mtx);
- if (res != 0 && res != ENOTSUP)
- return res;
-
- return 0;
-}
-
-int
-ethr_install_exit_handler(void (*funcp)(void))
-{
- ethr_xhndl_list *xhp;
- int res;
-
-#if ETHR_XCHK
- if (ethr_not_inited) {
- ASSERT(0);
- return EACCES;
- }
-#endif
-
- if (!funcp)
- return EINVAL;
-
- xhp = (ethr_xhndl_list *) (*allocp)(sizeof(ethr_xhndl_list));
- if (!xhp)
- return ENOMEM;
-
- res = ethr_mutex_lock__(&xhndl_mtx);
- if (res != 0) {
- (*freep)((void *) xhp);
- return res;
- }
-
- xhp->funcp = funcp;
- xhp->next = xhndl_list;
- xhndl_list = xhp;
-
- res = ethr_mutex_unlock__(&xhndl_mtx);
- if (res != 0)
- abort();
-
- return res;
-}
-
-static void
-run_exit_handlers(void)
-{
- int res;
- ethr_xhndl_list *xhp;
-
- res = ethr_mutex_lock__(&xhndl_mtx);
- if (res != 0)
- abort();
-
- xhp = xhndl_list;
-
- res = ethr_mutex_unlock__(&xhndl_mtx);
- if (res != 0)
- abort();
-
- for (; xhp; xhp = xhp->next)
- (*xhp->funcp)();
-}
-
-#if defined(ETHR_PTHREADS)
-/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *\
- * pthread implementation *
-\* */
-
-typedef struct {
- pthread_mutex_t mtx;
- pthread_cond_t cnd;
- int initialized;
- void *(*thr_func)(void *);
- void *arg;
- void *prep_func_res;
-} thr_wrap_data_;
-
-static int no_ethreads;
-static ethr_mutex no_ethrs_mtx;
-
-#ifndef ETHR_HAVE_PTHREAD_ATFORK
-#define ETHR_HAVE_PTHREAD_ATFORK 0
-#endif
-
-#if !ETHR_HAVE_PTHREAD_ATFORK
-#warning "Cannot enforce fork-safety"
-#endif
-
-#ifdef ETHR_HAVE_PTHREAD_RWLOCK_INIT
-static pthread_rwlockattr_t write_pref_attr_data;
-static pthread_rwlockattr_t *write_pref_attr;
-#endif
-
-/*
- * ----------------------------------------------------------------------------
- * Static functions
- * ----------------------------------------------------------------------------
- */
-
-/*
- * Functions with safe_ prefix aborts on failure. To be used when
- * we cannot recover after failure.
- */
-
-static ETHR_INLINE void
-safe_mutex_lock(pthread_mutex_t *mtxp)
-{
- int res = pthread_mutex_lock(mtxp);
- if (res != 0)
- abort();
-}
-
-static ETHR_INLINE void
-safe_mutex_unlock(pthread_mutex_t *mtxp)
-{
- int res = pthread_mutex_unlock(mtxp);
- if (res != 0)
- abort();
-}
-
-static ETHR_INLINE void
-safe_cond_signal(pthread_cond_t *cndp)
-{
- int res = pthread_cond_signal(cndp);
- if (res != 0)
- abort();
-}
-
-#ifdef ETHR_HAVE_ETHR_REC_MUTEX_INIT
-
-static volatile int rec_mtx_attr_need_init = 1;
-static pthread_mutexattr_t rec_mtx_attr;
-
-static int init_rec_mtx_attr(void);
-
-#endif
-
-#if ETHR_HAVE_PTHREAD_ATFORK
-
-static ethr_mutex forksafe_mtx = ETHR_MUTEX_INITER;
-
-static void lock_mutexes(void)
-{
- ethr_mutex *m = &forksafe_mtx;
- do {
-
- safe_mutex_lock(&m->pt_mtx);
-
- m = m->next;
-
- } while (m != &forksafe_mtx);
-}
-
-static void unlock_mutexes(void)
-{
- ethr_mutex *m = forksafe_mtx.prev;
- do {
-
- safe_mutex_unlock(&m->pt_mtx);
-
- m = m->prev;
-
- } while (m->next != &forksafe_mtx);
-}
-
-#if ETHR_INIT_MUTEX_IN_CHILD_AT_FORK
-
-static void reinit_mutexes(void)
-{
- ethr_mutex *m = forksafe_mtx.prev;
- do {
- pthread_mutexattr_t *attrp = NULL;
-
-#ifdef ETHR_HAVE_ETHR_REC_MUTEX_INIT
- if (m->is_rec_mtx) {
- if (rec_mtx_attr_need_init) {
- int res = init_rec_mtx_attr();
- if (res != 0)
- abort();
- }
- attrp = &rec_mtx_attr;
- }
-#endif
- if (pthread_mutex_init(&m->pt_mtx, attrp) != 0)
- abort();
-
- m = m->prev;
-
- } while (m->next != &forksafe_mtx);
-}
-
-#endif
-
-static int
-init_forksafe(void)
-{
- static int init_done = 0;
- int res = 0;
-
- if (init_done)
- return res;
-
- forksafe_mtx.prev = &forksafe_mtx;
- forksafe_mtx.next = &forksafe_mtx;
-
- res = pthread_atfork(lock_mutexes,
- unlock_mutexes,
-#if ETHR_INIT_MUTEX_IN_CHILD_AT_FORK
- reinit_mutexes
-#else
- unlock_mutexes
-#endif
- );
-
- init_done = 1;
- return res;
-}
-
-#endif
-
-
-#ifdef ETHR_HAVE_ETHR_REC_MUTEX_INIT
-
-#if defined(ETHR_HAVE_PTHREAD_MUTEXATTR_SETTYPE)
-
-#define SET_REC_MUTEX_ATTR(AP) \
- pthread_mutexattr_settype((AP), PTHREAD_MUTEX_RECURSIVE);
-
-#elif defined(ETHR_HAVE_PTHREAD_MUTEXATTR_SETKIND_NP)
-
-#define SET_REC_MUTEX_ATTR(AP) \
- pthread_mutexattr_setkind_np((AP), PTHREAD_MUTEX_RECURSIVE_NP);
-
-#else
-
-#error "Don't know how to set recursive mutex attributes"
-
-#endif
-
-static int
-init_rec_mtx_attr(void)
-{
- int res, mres;
- static pthread_mutex_t attrinit_mtx = PTHREAD_MUTEX_INITIALIZER;
-
- mres = pthread_mutex_lock(&attrinit_mtx);
- if (mres != 0)
- return mres;
- /* Got here under race conditions; check again ... */
- if (!rec_mtx_attr_need_init)
- res = 0;
- else {
- res = pthread_mutexattr_init(&rec_mtx_attr);
- if (res == 0) {
- res = SET_REC_MUTEX_ATTR(&rec_mtx_attr);
- if (res == 0)
- rec_mtx_attr_need_init = 0;
- else
- (void) pthread_mutexattr_destroy(&rec_mtx_attr);
- }
- }
-
- mres = pthread_mutex_unlock(&attrinit_mtx);
- if (mres != 0)
- return mres;
- return res;
-}
-
-#endif /* #if ETHR_HAVE_ETHR_REC_MUTEX_INIT */
-
-static ETHR_INLINE void thr_exit_cleanup(void)
-{
- run_exit_handlers();
- safe_mutex_lock(&no_ethrs_mtx.pt_mtx);
- ASSERT(no_ethreads > 0);
- no_ethreads--;
- safe_mutex_unlock(&no_ethrs_mtx.pt_mtx);
-}
-
-static void *thr_wrapper(void *vtwd)
-{
- void *res;
- thr_wrap_data_ *twd = (thr_wrap_data_ *) vtwd;
- void *(*thr_func)(void *) = twd->thr_func;
- void *arg = twd->arg;
-
- safe_mutex_lock(&twd->mtx);
-
- if (thread_create_child_func)
- (*thread_create_child_func)(twd->prep_func_res);
-
- twd->initialized = 1;
-
- safe_cond_signal(&twd->cnd);
- safe_mutex_unlock(&twd->mtx);
-
- res = (*thr_func)(arg);
- thr_exit_cleanup();
- return res;
-}
-
-
-/*
- * ----------------------------------------------------------------------------
- * Exported functions
- * ----------------------------------------------------------------------------
- */
-
-int
-ethr_init(ethr_init_data *id)
-{
- int res;
-
- if (!ethr_not_inited)
- return EINVAL;
-
- ethr_not_inited = 0;
-
- res = init_common(id);
- if (res != 0)
- goto error;
-
-#if ETHR_HAVE_PTHREAD_ATFORK
- init_forksafe();
-#endif
-
- no_ethreads = 1;
- res = ethr_mutex_init(&no_ethrs_mtx);
- if (res != 0)
- goto error;
- res = ethr_mutex_set_forksafe(&no_ethrs_mtx);
- if (res != 0 && res != ENOTSUP)
- goto error;
-
-#ifndef ETHR_HAVE_OPTIMIZED_ATOMIC_OPS
- {
- int i;
- for (i = 0; i < (1 << ETHR_ATOMIC_ADDR_BITS); i++) {
-#ifdef ETHR_HAVE_PTHREAD_SPIN_LOCK
- res = pthread_spin_init(&ethr_atomic_protection__[i].u.spnlck, 0);
-#else
- res = ethr_mutex_init(&ethr_atomic_protection__[i].u.mtx);
-#endif
- if (res != 0)
- goto error;
- }
- }
-#endif
-
-#ifdef ETHR_HAVE_PTHREAD_RWLOCK_INIT
-#if defined(ETHR_HAVE_PTHREAD_RWLOCKATTR_SETKIND_NP) \
- && defined(ETHR_HAVE_PTHREAD_RWLOCK_PREFER_WRITER_NONRECURSIVE_NP)
- res = pthread_rwlockattr_init(&write_pref_attr_data);
- if (res != 0)
- goto error;
- res = pthread_rwlockattr_setkind_np(
- &write_pref_attr_data,
- PTHREAD_RWLOCK_PREFER_WRITER_NONRECURSIVE_NP);
- if (res != 0)
- goto error;
- write_pref_attr = &write_pref_attr_data;
-#else
- write_pref_attr = NULL;
-#endif
-#endif
-
-
- return 0;
-
- error:
- ethr_not_inited = 1;
- return res;
-
-}
-
-int
-ethr_thr_create(ethr_tid *tid, void * (*func)(void *), void *arg,
- ethr_thr_opts *opts)
-{
- thr_wrap_data_ twd;
- pthread_attr_t attr;
- int res, dres;
- int use_stack_size = (opts && opts->suggested_stack_size >= 0
- ? opts->suggested_stack_size
- : -1 /* Use system default */);
-
-#ifdef ETHR_MODIFIED_DEFAULT_STACK_SIZE
- if (use_stack_size < 0)
- use_stack_size = ETHR_MODIFIED_DEFAULT_STACK_SIZE;
-#endif
-
- twd.initialized = 0;
- twd.thr_func = func;
- twd.arg = arg;
-
-#if ETHR_XCHK
- if (ethr_not_inited) {
- ASSERT(0);
- return EACCES;
- }
- if (!tid || !func) {
- ASSERT(0);
- return EINVAL;
- }
-#endif
-
- /* Call prepare func if it exist */
- if (thread_create_prepare_func)
- twd.prep_func_res = (*thread_create_prepare_func)();
- else
- twd.prep_func_res = NULL;
-
- /* Set som thread attributes */
- res = pthread_attr_init(&attr);
- if (res != 0)
- goto cleanup_parent_func;
- res = pthread_mutex_init(&twd.mtx, NULL);
- if (res != 0)
- goto cleanup_attr_destroy;
- res = pthread_cond_init(&twd.cnd, NULL);
- if (res != 0)
- goto cleanup_mutex_destroy;
-
- /* Schedule child thread in system scope (if possible) ... */
- res = pthread_attr_setscope(&attr, PTHREAD_SCOPE_SYSTEM);
- if (res != 0 && res != ENOTSUP)
- goto cleanup_cond_destroy;
-
- if (use_stack_size >= 0) {
- size_t suggested_stack_size = (size_t) use_stack_size;
- size_t stack_size;
-#ifdef DEBUG
- suggested_stack_size /= 2; /* Make sure we got margin */
-#endif
-#ifdef ETHR_STACK_GUARD_SIZE
- /* The guard is at least on some platforms included in the stack size
- passed when creating threads */
- suggested_stack_size += ETHR_B2KW(ETHR_STACK_GUARD_SIZE);
-#endif
- if (suggested_stack_size < min_stack_size)
- stack_size = ETHR_KW2B(min_stack_size);
- else if (suggested_stack_size > max_stack_size)
- stack_size = ETHR_KW2B(max_stack_size);
- else
- stack_size = ETHR_PAGE_ALIGN(ETHR_KW2B(suggested_stack_size));
- (void) pthread_attr_setstacksize(&attr, stack_size);
- }
-
-#ifdef ETHR_STACK_GUARD_SIZE
- (void) pthread_attr_setguardsize(&attr, ETHR_STACK_GUARD_SIZE);
-#endif
-
- /* Detached or joinable... */
- res = pthread_attr_setdetachstate(&attr,
- (opts && opts->detached
- ? PTHREAD_CREATE_DETACHED
- : PTHREAD_CREATE_JOINABLE));
- if (res != 0)
- goto cleanup_cond_destroy;
-
- res = pthread_mutex_lock(&twd.mtx);
-
- if (res != 0)
- goto cleanup_cond_destroy;
-
- safe_mutex_lock(&no_ethrs_mtx.pt_mtx);
- if (no_ethreads < ETHR_MAX_THREADS) {
- no_ethreads++;
- safe_mutex_unlock(&no_ethrs_mtx.pt_mtx);
- }
- else {
- res = EAGAIN;
- safe_mutex_unlock(&no_ethrs_mtx.pt_mtx);
- goto cleanup_mutex_unlock;
- }
-
- res = pthread_create((pthread_t *) tid, &attr, thr_wrapper, (void *) &twd);
-
- if (res != 0) {
- safe_mutex_lock(&no_ethrs_mtx.pt_mtx);
- ASSERT(no_ethreads > 0);
- no_ethreads--;
- safe_mutex_unlock(&no_ethrs_mtx.pt_mtx);
- }
- else {
-
- /* Wait for child to initialize... */
- while (!twd.initialized) {
- res = pthread_cond_wait(&twd.cnd, &twd.mtx);
- if (res != 0 && res != EINTR)
- break;
- }
-
- }
-
- /* Cleanup... */
- cleanup_mutex_unlock:
- dres = pthread_mutex_unlock(&twd.mtx);
- if (res == 0)
- res = dres;
- cleanup_cond_destroy:
- dres = pthread_cond_destroy(&twd.cnd);
- if (res == 0)
- res = dres;
- cleanup_mutex_destroy:
- dres = pthread_mutex_destroy(&twd.mtx);
- if (res == 0)
- res = dres;
- cleanup_attr_destroy:
- dres = pthread_attr_destroy(&attr);
- if (res == 0)
- res = dres;
- cleanup_parent_func:
- if (thread_create_parent_func)
- (*thread_create_parent_func)(twd.prep_func_res);
-
- return res;
-}
-
-int
-ethr_thr_join(ethr_tid tid, void **res)
-{
-#if ETHR_XCHK
- if (ethr_not_inited) {
- ASSERT(0);
- return EACCES;
- }
-#endif
- return pthread_join((pthread_t) tid, res);
-}
-
-int
-ethr_thr_detach(ethr_tid tid)
-{
-#if ETHR_XCHK
- if (ethr_not_inited) {
- ASSERT(0);
- return EACCES;
- }
-#endif
- return pthread_detach((pthread_t) tid);
-}
-
-void
-ethr_thr_exit(void *res)
-{
-#if ETHR_XCHK
- if (ethr_not_inited) {
- ASSERT(0);
- return;
- }
-#endif
- thr_exit_cleanup();
- pthread_exit(res);
-}
-
-ethr_tid
-ethr_self(void)
-{
- return (ethr_tid) pthread_self();
-}
-
-int
-ethr_equal_tids(ethr_tid tid1, ethr_tid tid2)
-{
- return pthread_equal((pthread_t) tid1, (pthread_t) tid2);
-}
-
-
-/*
- * Mutex functions
- */
-
-
-int
-ethr_mutex_init(ethr_mutex *mtx)
-{
-#if ETHR_XCHK
- if (ethr_not_inited) {
- ASSERT(0);
- return EACCES;
- }
- if (!mtx) {
- ASSERT(0);
- return EINVAL;
- }
- mtx->initialized = ETHR_MUTEX_INITIALIZED;
-#endif
- mtx->prev = NULL;
- mtx->next = NULL;
- mtx->is_rec_mtx = 0;
- return pthread_mutex_init(&mtx->pt_mtx, NULL);
-}
-
-#ifdef ETHR_HAVE_ETHR_REC_MUTEX_INIT
-
-int
-ethr_rec_mutex_init(ethr_mutex *mtx)
-{
-#if ETHR_XCHK
- if (ethr_not_inited) {
- ASSERT(0);
- return EACCES;
- }
- if (!mtx) {
- ASSERT(0);
- return EINVAL;
- }
- mtx->initialized = ETHR_MUTEX_INITIALIZED;
-#endif
- if (rec_mtx_attr_need_init)
- init_rec_mtx_attr();
-
- mtx->prev = NULL;
- mtx->next = NULL;
- mtx->is_rec_mtx = 1;
- return pthread_mutex_init(&mtx->pt_mtx, &rec_mtx_attr);
-}
-
-#endif /* #if ETHR_HAVE_ETHR_REC_MUTEX_INIT */
-
-int
-ethr_mutex_destroy(ethr_mutex *mtx)
-{
-#if ETHR_XCHK
- if (ethr_not_inited) {
- ASSERT(0);
- return EACCES;
- }
- if (!mtx || mtx->initialized != ETHR_MUTEX_INITIALIZED) {
- ASSERT(0);
- return EINVAL;
- }
-#endif
- if (mtx->next) {
- ASSERT(mtx->prev);
- ethr_mutex_unset_forksafe(mtx);
- }
-#if ETHR_XCHK
- mtx->initialized = 0;
-#endif
- return pthread_mutex_destroy(&mtx->pt_mtx);
-}
-
-int ethr_mutex_set_forksafe(ethr_mutex *mtx)
-{
- int res;
-#if ETHR_XCHK
- if (ethr_not_inited) {
- ASSERT(0);
- return EACCES;
- }
- if (!mtx || mtx->initialized != ETHR_MUTEX_INITIALIZED) {
- ASSERT(0);
- return EINVAL;
- }
-#endif
-#if ETHR_HAVE_PTHREAD_ATFORK
- res = pthread_mutex_lock(&forksafe_mtx.pt_mtx);
- if (res != 0)
- return res;
- if (!forksafe_mtx.next) {
- ASSERT(!forksafe_mtx.prev);
- init_forksafe();
- }
- if (mtx->next) {
- /* forksafe already set for this mutex */
- ASSERT(mtx->prev);
- }
- else {
- mtx->next = forksafe_mtx.next;
- mtx->prev = &forksafe_mtx;
- forksafe_mtx.next->prev = mtx;
- forksafe_mtx.next = mtx;
- }
-
- res = pthread_mutex_unlock(&forksafe_mtx.pt_mtx);
-
-#else /* #if ETHR_HAVE_PTHREAD_ATFORK */
- res = ENOTSUP;
-#endif /* #if ETHR_HAVE_PTHREAD_ATFORK */
- return res;
-}
-
-int ethr_mutex_unset_forksafe(ethr_mutex *mtx)
-{
- int res;
-#if ETHR_XCHK
- if (ethr_not_inited) {
- ASSERT(0);
- return EACCES;
- }
- if (!mtx || mtx->initialized != ETHR_MUTEX_INITIALIZED) {
- ASSERT(0);
- return EINVAL;
- }
-#endif
-#if ETHR_HAVE_PTHREAD_ATFORK
- res = pthread_mutex_lock(&forksafe_mtx.pt_mtx);
- if (res != 0)
- return res;
- if (!forksafe_mtx.next) {
- ASSERT(!forksafe_mtx.prev);
- init_forksafe();
- }
- if (!mtx->next) {
- /* forksafe already unset for this mutex */
- ASSERT(!mtx->prev);
- }
- else {
- mtx->prev->next = mtx->next;
- mtx->next->prev = mtx->prev;
- mtx->next = NULL;
- mtx->prev = NULL;
- }
- res = pthread_mutex_unlock(&forksafe_mtx.pt_mtx);
-
-#else /* #if ETHR_HAVE_PTHREAD_ATFORK */
- res = ENOTSUP;
-#endif /* #if ETHR_HAVE_PTHREAD_ATFORK */
- return res;
-}
-
-int
-ethr_mutex_trylock(ethr_mutex *mtx)
-{
-#if ETHR_XCHK
- if (ethr_not_inited) {
- ASSERT(0);
- return EACCES;
- }
- if (!mtx || mtx->initialized != ETHR_MUTEX_INITIALIZED) {
- ASSERT(0);
- return EINVAL;
- }
-#endif
- return ethr_mutex_trylock__(mtx);
-}
-
-int
-ethr_mutex_lock(ethr_mutex *mtx)
-{
-#if ETHR_XCHK
- if (ethr_not_inited) {
- ASSERT(0);
- return EACCES;
- }
- if (!mtx || mtx->initialized != ETHR_MUTEX_INITIALIZED) {
- ASSERT(0);
- return EINVAL;
- }
-#endif
- return ethr_mutex_lock__(mtx);
-}
-
-int
-ethr_mutex_unlock(ethr_mutex *mtx)
-{
-#if ETHR_XCHK
- if (ethr_not_inited) {
- ASSERT(0);
- return EACCES;
- }
- if (!mtx || mtx->initialized != ETHR_MUTEX_INITIALIZED) {
- ASSERT(0);
- return EINVAL;
- }
-#endif
- return ethr_mutex_unlock__(mtx);
-}
-
-/*
- * Condition variable functions
- */
-
-int
-ethr_cond_init(ethr_cond *cnd)
-{
-#if ETHR_XCHK
- if (ethr_not_inited) {
- ASSERT(0);
- return EACCES;
- }
- if (!cnd) {
- ASSERT(0);
- return EINVAL;
- }
- cnd->initialized = ETHR_COND_INITIALIZED;
-#endif
- return pthread_cond_init(&cnd->pt_cnd, NULL);
-}
-
-int
-ethr_cond_destroy(ethr_cond *cnd)
-{
-#if ETHR_XCHK
- if (ethr_not_inited) {
- ASSERT(0);
- return EACCES;
- }
- if (!cnd || cnd->initialized != ETHR_COND_INITIALIZED) {
- ASSERT(0);
- return EINVAL;
- }
- cnd->initialized = 0;
-#endif
- return pthread_cond_destroy(&cnd->pt_cnd);
-}
-
-int
-ethr_cond_signal(ethr_cond *cnd)
-{
-#if ETHR_XCHK
- if (ethr_not_inited) {
- ASSERT(0);
- return EACCES;
- }
- if (!cnd || cnd->initialized != ETHR_COND_INITIALIZED) {
- ASSERT(0);
- return EINVAL;
- }
-#endif
- return pthread_cond_signal(&cnd->pt_cnd);
-}
-
-int
-ethr_cond_broadcast(ethr_cond *cnd)
-{
-#if ETHR_XCHK
- if (ethr_not_inited) {
- ASSERT(0);
- return EACCES;
- }
- if (!cnd || cnd->initialized != ETHR_COND_INITIALIZED) {
- ASSERT(0);
- return EINVAL;
- }
-#endif
- return pthread_cond_broadcast(&cnd->pt_cnd);
-}
-
-int
-ethr_cond_wait(ethr_cond *cnd, ethr_mutex *mtx)
-{
-#if ETHR_XCHK
- if (ethr_not_inited) {
- ASSERT(0);
- return EACCES;
- }
- if (!cnd
- || cnd->initialized != ETHR_COND_INITIALIZED
- || !mtx
- || mtx->initialized != ETHR_MUTEX_INITIALIZED) {
- ASSERT(0);
- return EINVAL;
- }
-#endif
- return pthread_cond_wait(&cnd->pt_cnd, &mtx->pt_mtx);
-}
-
-int
-ethr_cond_timedwait(ethr_cond *cnd, ethr_mutex *mtx, ethr_timeval *timeout)
-{
- struct timespec to;
-#if ETHR_XCHK
- if (ethr_not_inited) {
- ASSERT(0);
- return EACCES;
- }
- if (!cnd
- || cnd->initialized != ETHR_COND_INITIALIZED
- || !mtx
- || mtx->initialized != ETHR_MUTEX_INITIALIZED
- || !timeout) {
- ASSERT(0);
- return EINVAL;
- }
-#endif
-
- to.tv_sec = timeout->tv_sec;
- to.tv_nsec = timeout->tv_nsec;
-
- return pthread_cond_timedwait(&cnd->pt_cnd, &mtx->pt_mtx, &to);
-}
-
-
-#ifdef ETHR_HAVE_PTHREAD_RWLOCK_INIT
-
-int
-ethr_rwmutex_init(ethr_rwmutex *rwmtx)
-{
-#if ETHR_XCHK
- if (ethr_not_inited) {
- ASSERT(0);
- return EACCES;
- }
- if (!rwmtx) {
- ASSERT(0);
- return EINVAL;
- }
- rwmtx->initialized = ETHR_RWMUTEX_INITIALIZED;
-#endif
- return pthread_rwlock_init(&rwmtx->pt_rwlock, write_pref_attr);
-}
-
-int
-ethr_rwmutex_destroy(ethr_rwmutex *rwmtx)
-{
- int res;
-#if ETHR_XCHK
- if (ethr_not_inited) {
- ASSERT(0);
- return EACCES;
- }
- if (!rwmtx || rwmtx->initialized != ETHR_RWMUTEX_INITIALIZED) {
- ASSERT(0);
- return EINVAL;
- }
-#endif
- res = pthread_rwlock_destroy(&rwmtx->pt_rwlock);
-#if ETHR_XCHK
- rwmtx->initialized = 0;
-#endif
- return res;
-}
-
-int
-ethr_rwmutex_tryrlock(ethr_rwmutex *rwmtx)
-{
-#if ETHR_XCHK
- if (ethr_not_inited) {
- ASSERT(0);
- return EACCES;
- }
- if (!rwmtx || rwmtx->initialized != ETHR_RWMUTEX_INITIALIZED) {
- ASSERT(0);
- return EINVAL;
- }
-#endif
- return ethr_rwmutex_tryrlock__(rwmtx);
-}
-
-int
-ethr_rwmutex_rlock(ethr_rwmutex *rwmtx)
-{
-#if ETHR_XCHK
- if (ethr_not_inited) {
- ASSERT(0);
- return EACCES;
- }
- if (!rwmtx || rwmtx->initialized != ETHR_RWMUTEX_INITIALIZED) {
- ASSERT(0);
- return EINVAL;
- }
-#endif
- return ethr_rwmutex_rlock__(rwmtx);
-}
-
-int
-ethr_rwmutex_runlock(ethr_rwmutex *rwmtx)
-{
-#if ETHR_XCHK
- if (ethr_not_inited) {
- ASSERT(0);
- return EACCES;
- }
- if (!rwmtx || rwmtx->initialized != ETHR_RWMUTEX_INITIALIZED) {
- ASSERT(0);
- return EINVAL;
- }
-#endif
- return ethr_rwmutex_runlock__(rwmtx);
-}
-
-int
-ethr_rwmutex_tryrwlock(ethr_rwmutex *rwmtx)
-{
-#if ETHR_XCHK
- if (ethr_not_inited) {
- ASSERT(0);
- return EACCES;
- }
- if (!rwmtx || rwmtx->initialized != ETHR_RWMUTEX_INITIALIZED) {
- ASSERT(0);
- return EINVAL;
- }
-#endif
- return ethr_rwmutex_tryrwlock__(rwmtx);
-}
-
-int
-ethr_rwmutex_rwlock(ethr_rwmutex *rwmtx)
-{
-#if ETHR_XCHK
- if (ethr_not_inited) {
- ASSERT(0);
- return EACCES;
- }
- if (!rwmtx || rwmtx->initialized != ETHR_RWMUTEX_INITIALIZED) {
- ASSERT(0);
- return EINVAL;
- }
-#endif
- return ethr_rwmutex_rwlock__(rwmtx);
-}
-
-int
-ethr_rwmutex_rwunlock(ethr_rwmutex *rwmtx)
-{
-#if ETHR_XCHK
- if (ethr_not_inited) {
- ASSERT(0);
- return EACCES;
- }
- if (!rwmtx || rwmtx->initialized != ETHR_RWMUTEX_INITIALIZED) {
- ASSERT(0);
- return EINVAL;
- }
-#endif
- return ethr_rwmutex_rwunlock__(rwmtx);
-}
-
-#endif /* #ifdef ETHR_HAVE_PTHREAD_RWLOCK_INIT */
-
-/*
- * Current time
- */
-
-int
-ethr_time_now(ethr_timeval *time)
-{
- int res;
- struct timeval tv;
-#if ETHR_XCHK
- if (ethr_not_inited) {
- ASSERT(0);
- return EACCES;
- }
- if (!time) {
- ASSERT(0);
- return EINVAL;
- }
-#endif
-
- res = gettimeofday(&tv, NULL);
- time->tv_sec = (long) tv.tv_sec;
- time->tv_nsec = ((long) tv.tv_usec)*1000;
- return res;
-}
-
-/*
- * Thread specific data
- */
-
-int
-ethr_tsd_key_create(ethr_tsd_key *keyp)
-{
-#if ETHR_XCHK
- if (ethr_not_inited) {
- ASSERT(0);
- return EACCES;
- }
- if (!keyp) {
- ASSERT(0);
- return EINVAL;
- }
-#endif
- return pthread_key_create((pthread_key_t *) keyp, NULL);
-}
-
-int
-ethr_tsd_key_delete(ethr_tsd_key key)
-{
-#if ETHR_XCHK
- if (ethr_not_inited) {
- ASSERT(0);
- return EACCES;
- }
-#endif
- return pthread_key_delete((pthread_key_t) key);
-}
-
-int
-ethr_tsd_set(ethr_tsd_key key, void *value)
-{
-#if ETHR_XCHK
- if (ethr_not_inited) {
- ASSERT(0);
- return EACCES;
- }
-#endif
- return pthread_setspecific((pthread_key_t) key, value);
-}
-
-void *
-ethr_tsd_get(ethr_tsd_key key)
-{
-#if ETHR_XCHK
- if (ethr_not_inited) {
- ASSERT(0);
- return NULL;
- }
-#endif
- return pthread_getspecific((pthread_key_t) key);
-}
-
-/*
- * Signal functions
- */
-
-#if ETHR_HAVE_ETHR_SIG_FUNCS
-
-int ethr_sigmask(int how, const sigset_t *set, sigset_t *oset)
-{
-#if ETHR_XCHK
- if (ethr_not_inited) {
- ASSERT(0);
- return EACCES;
- }
- if (!set && !oset) {
- ASSERT(0);
- return EINVAL;
- }
-#endif
- return pthread_sigmask(how, set, oset);
-}
-
-int ethr_sigwait(const sigset_t *set, int *sig)
-{
-#if ETHR_XCHK
- if (ethr_not_inited) {
- ASSERT(0);
- return EACCES;
- }
- if (!set || !sig) {
- ASSERT(0);
- return EINVAL;
- }
-#endif
- if (sigwait(set, sig) < 0)
- return errno;
- return 0;
-}
-
-#endif /* #if ETHR_HAVE_ETHR_SIG_FUNCS */
-
-#elif defined(ETHR_WIN32_THREADS)
-/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *\
- * Native win32 threads implementation *
-\* */
-
-#define INVALID_TID -1
-
-/* The spin count values are more or less taken out of the blue */
-#define ETHR_MUTEX_SPIN_COUNT 5000
-#define ETHR_COND_SPIN_COUNT 1000
-
-ethr_tid serial_shift; /* Bits to shift serial when constructing a tid */
-ethr_tid last_serial; /* Last thread table serial used */
-ethr_tid last_ix; /* Last thread table index used */
-ethr_tid thr_ix_mask; /* Mask used to mask out thread table index from a tid */
-
-/* Event used for conditional variables. On per thread. */
-/*typedef struct cnd_wait_event__ cnd_wait_event_;*/
-struct cnd_wait_event__ {
- HANDLE handle;
- cnd_wait_event_ *prev;
- cnd_wait_event_ *next;
- int in_queue;
-};
-
-/* Thread specific data. Stored in the thread table */
-typedef struct {
- ethr_tid thr_id;
- HANDLE thr_handle;
- ethr_tid joiner;
- void *result;
- cnd_wait_event_ wait_event;
-} thr_data_;
-
-/* Argument passed to thr_wrapper() */
-typedef struct {
- void * (*func)(void *);
- void * arg;
- thr_data_ *ptd;
- thr_data_ *td;
- int res;
- void *prep_func_res;
-} thr_wrap_data_;
-
-
-static CRITICAL_SECTION thr_table_cs; /* Critical section used to protect
- the thread table from concurrent
- accesses. */
-static CRITICAL_SECTION fake_static_init_cs; /* Critical section used to protect
- initialazition of 'statically
- initialized' mutexes */
-static thr_data_ * thr_table[ETHR_MAX_THREADS]; /* The thread table */
-
-static DWORD tls_own_thr_data;
-
-static thr_data_ main_thr_data;
-
-#define THR_IX(TID) ((TID) & thr_ix_mask)
-#define OWN_THR_DATA ((thr_data_ *) TlsGetValue(tls_own_thr_data))
-
-/*
- * ----------------------------------------------------------------------------
- * Static functions
- * ----------------------------------------------------------------------------
- */
-
-static int
-get_errno(void)
-{
- switch (GetLastError()) {
- case ERROR_INVALID_FUNCTION: return EINVAL; /* 1 */
- case ERROR_FILE_NOT_FOUND: return ENOENT; /* 2 */
- case ERROR_PATH_NOT_FOUND: return ENOENT; /* 3 */
- case ERROR_TOO_MANY_OPEN_FILES: return EMFILE; /* 4 */
- case ERROR_ACCESS_DENIED: return EACCES; /* 5 */
- case ERROR_INVALID_HANDLE: return EBADF; /* 6 */
- case ERROR_ARENA_TRASHED: return ENOMEM; /* 7 */
- case ERROR_NOT_ENOUGH_MEMORY: return ENOMEM; /* 8 */
- case ERROR_INVALID_BLOCK: return ENOMEM; /* 9 */
- case ERROR_BAD_ENVIRONMENT: return E2BIG; /* 10 */
- case ERROR_BAD_FORMAT: return ENOEXEC; /* 11 */
- case ERROR_INVALID_ACCESS: return EINVAL; /* 12 */
- case ERROR_INVALID_DATA: return EINVAL; /* 13 */
- case ERROR_OUTOFMEMORY: return ENOMEM; /* 14 */
- case ERROR_INVALID_DRIVE: return ENOENT; /* 15 */
- case ERROR_CURRENT_DIRECTORY: return EACCES; /* 16 */
- case ERROR_NOT_SAME_DEVICE: return EXDEV; /* 17 */
- case ERROR_NO_MORE_FILES: return ENOENT; /* 18 */
- case ERROR_WRITE_PROTECT: return EACCES; /* 19 */
- case ERROR_BAD_UNIT: return EACCES; /* 20 */
- case ERROR_NOT_READY: return EACCES; /* 21 */
- case ERROR_BAD_COMMAND: return EACCES; /* 22 */
- case ERROR_CRC: return EACCES; /* 23 */
- case ERROR_BAD_LENGTH: return EACCES; /* 24 */
- case ERROR_SEEK: return EACCES; /* 25 */
- case ERROR_NOT_DOS_DISK: return EACCES; /* 26 */
- case ERROR_SECTOR_NOT_FOUND: return EACCES; /* 27 */
- case ERROR_OUT_OF_PAPER: return EACCES; /* 28 */
- case ERROR_WRITE_FAULT: return EACCES; /* 29 */
- case ERROR_READ_FAULT: return EACCES; /* 30 */
- case ERROR_GEN_FAILURE: return EACCES; /* 31 */
- case ERROR_SHARING_VIOLATION: return EACCES; /* 32 */
- case ERROR_LOCK_VIOLATION: return EACCES; /* 33 */
- case ERROR_WRONG_DISK: return EACCES; /* 34 */
- case ERROR_SHARING_BUFFER_EXCEEDED: return EACCES; /* 36 */
- case ERROR_BAD_NETPATH: return ENOENT; /* 53 */
- case ERROR_NETWORK_ACCESS_DENIED: return EACCES; /* 65 */
- case ERROR_BAD_NET_NAME: return ENOENT; /* 67 */
- case ERROR_FILE_EXISTS: return EEXIST; /* 80 */
- case ERROR_CANNOT_MAKE: return EACCES; /* 82 */
- case ERROR_FAIL_I24: return EACCES; /* 83 */
- case ERROR_INVALID_PARAMETER: return EINVAL; /* 87 */
- case ERROR_NO_PROC_SLOTS: return EAGAIN; /* 89 */
- case ERROR_DRIVE_LOCKED: return EACCES; /* 108 */
- case ERROR_BROKEN_PIPE: return EPIPE; /* 109 */
- case ERROR_DISK_FULL: return ENOSPC; /* 112 */
- case ERROR_INVALID_TARGET_HANDLE: return EBADF; /* 114 */
- case ERROR_WAIT_NO_CHILDREN: return ECHILD; /* 128 */
- case ERROR_CHILD_NOT_COMPLETE: return ECHILD; /* 129 */
- case ERROR_DIRECT_ACCESS_HANDLE: return EBADF; /* 130 */
- case ERROR_NEGATIVE_SEEK: return EINVAL; /* 131 */
- case ERROR_SEEK_ON_DEVICE: return EACCES; /* 132 */
- case ERROR_DIR_NOT_EMPTY: return ENOTEMPTY;/* 145 */
- case ERROR_NOT_LOCKED: return EACCES; /* 158 */
- case ERROR_BAD_PATHNAME: return ENOENT; /* 161 */
- case ERROR_MAX_THRDS_REACHED: return EAGAIN; /* 164 */
- case ERROR_LOCK_FAILED: return EACCES; /* 167 */
- case ERROR_ALREADY_EXISTS: return EEXIST; /* 183 */
- case ERROR_INVALID_STARTING_CODESEG: return ENOEXEC; /* 188 */
- case ERROR_INVALID_STACKSEG: return ENOEXEC; /* 189 */
- case ERROR_INVALID_MODULETYPE: return ENOEXEC; /* 190 */
- case ERROR_INVALID_EXE_SIGNATURE: return ENOEXEC; /* 191 */
- case ERROR_EXE_MARKED_INVALID: return ENOEXEC; /* 192 */
- case ERROR_BAD_EXE_FORMAT: return ENOEXEC; /* 193 */
- case ERROR_ITERATED_DATA_EXCEEDS_64k: return ENOEXEC; /* 194 */
- case ERROR_INVALID_MINALLOCSIZE: return ENOEXEC; /* 195 */
- case ERROR_DYNLINK_FROM_INVALID_RING: return ENOEXEC; /* 196 */
- case ERROR_IOPL_NOT_ENABLED: return ENOEXEC; /* 197 */
- case ERROR_INVALID_SEGDPL: return ENOEXEC; /* 198 */
- case ERROR_AUTODATASEG_EXCEEDS_64k: return ENOEXEC; /* 199 */
- case ERROR_RING2SEG_MUST_BE_MOVABLE: return ENOEXEC; /* 200 */
- case ERROR_RELOC_CHAIN_XEEDS_SEGLIM: return ENOEXEC; /* 201 */
- case ERROR_INFLOOP_IN_RELOC_CHAIN: return ENOEXEC; /* 202 */
- case ERROR_FILENAME_EXCED_RANGE: return ENOENT; /* 206 */
- case ERROR_NESTING_NOT_ALLOWED: return EAGAIN; /* 215 */
- case ERROR_NOT_ENOUGH_QUOTA: return ENOMEM; /* 1816 */
- default: return EINVAL;
- }
-}
-
-static ETHR_INLINE thr_data_ *
-tid2thr(ethr_tid tid)
-{
- ethr_tid ix;
- thr_data_ *td;
-
- if (tid < 0)
- return NULL;
- ix = THR_IX(tid);
- if (ix >= ETHR_MAX_THREADS)
- return NULL;
- td = thr_table[ix];
- if (!td)
- return NULL;
- if (td->thr_id != tid)
- return NULL;
- return td;
-}
-
-static ETHR_INLINE void
-new_tid(ethr_tid *new_tid, ethr_tid *new_serial, ethr_tid *new_ix)
-{
- ethr_tid tmp_serial = last_serial;
- ethr_tid tmp_ix = last_ix + 1;
- ethr_tid start_ix = tmp_ix;
-
-
- do {
- if (tmp_ix >= ETHR_MAX_THREADS) {
- tmp_serial++;
- if ((tmp_serial << serial_shift) < 0)
- tmp_serial = 0;
- tmp_ix = 0;
- }
- if (!thr_table[tmp_ix]) {
- *new_tid = (tmp_serial << serial_shift) | tmp_ix;
- *new_serial = tmp_serial;
- *new_ix = tmp_ix;
- return;
- }
- tmp_ix++;
- } while (tmp_ix != start_ix);
-
- *new_tid = INVALID_TID;
- *new_serial = INVALID_TID;
- *new_ix = INVALID_TID;
-
-}
-
-
-static void thr_exit_cleanup(thr_data_ *td, void *res)
-{
-
- ASSERT(td == OWN_THR_DATA);
-
- run_exit_handlers();
-
- EnterCriticalSection(&thr_table_cs);
- CloseHandle(td->wait_event.handle);
- if (td->thr_handle == INVALID_HANDLE_VALUE) {
- /* We are detached; cleanup thread table */
- ASSERT(td->joiner == INVALID_TID);
- ASSERT(td == thr_table[THR_IX(td->thr_id)]);
- thr_table[THR_IX(td->thr_id)] = NULL;
- if (td != &main_thr_data)
- (*freep)((void *) td);
- }
- else {
- /* Save result and let joining thread cleanup */
- td->result = res;
- }
- LeaveCriticalSection(&thr_table_cs);
-}
-
-static unsigned __stdcall thr_wrapper(LPVOID args)
-{
- void *(*func)(void*) = ((thr_wrap_data_ *) args)->func;
- void *arg = ((thr_wrap_data_ *) args)->arg;
- thr_data_ *td = ((thr_wrap_data_ *) args)->td;
-
- td->wait_event.handle = CreateEvent(NULL, FALSE, FALSE, NULL);
- if (td->wait_event.handle == INVALID_HANDLE_VALUE
- || !TlsSetValue(tls_own_thr_data, (LPVOID) td)) {
- ((thr_wrap_data_ *) args)->res = get_errno();
- if (td->wait_event.handle != INVALID_HANDLE_VALUE)
- CloseHandle(td->wait_event.handle);
- SetEvent(((thr_wrap_data_ *) args)->ptd->wait_event.handle);
- _endthreadex((unsigned) 0);
- ASSERT(0);
- }
-
- td->wait_event.prev = NULL;
- td->wait_event.next = NULL;
- td->wait_event.in_queue = 0;
-
- if (thread_create_child_func)
- (*thread_create_child_func)(((thr_wrap_data_ *) args)->prep_func_res);
-
- ASSERT(td == OWN_THR_DATA);
-
- ((thr_wrap_data_ *) args)->res = 0;
- SetEvent(((thr_wrap_data_ *) args)->ptd->wait_event.handle);
-
- thr_exit_cleanup(td, (*func)(arg));
- return 0;
-}
-
-int
-ethr_fake_static_mutex_init(ethr_mutex *mtx)
-{
- EnterCriticalSection((CRITICAL_SECTION *) &fake_static_init_cs);
- /* Got here under race conditions; check again... */
- if (!mtx->initialized) {
- if (!InitializeCriticalSectionAndSpinCount(&mtx->cs,
- ETHR_MUTEX_SPIN_COUNT))
- return get_errno();
- mtx->initialized = ETHR_MUTEX_INITIALIZED;
- }
- LeaveCriticalSection((CRITICAL_SECTION *) &fake_static_init_cs);
- return 0;
-}
-
-static int
-fake_static_cond_init(ethr_cond *cnd)
-{
- EnterCriticalSection((CRITICAL_SECTION *) &fake_static_init_cs);
- /* Got here under race conditions; check again... */
- if (!cnd->initialized) {
- if (!InitializeCriticalSectionAndSpinCount(&cnd->cs,
- ETHR_COND_SPIN_COUNT))
- return get_errno();
- cnd->queue = NULL;
- cnd->queue_end = NULL;
- cnd->initialized = ETHR_COND_INITIALIZED;
- }
- LeaveCriticalSection((CRITICAL_SECTION *) &fake_static_init_cs);
- return 0;
-}
-
-#ifdef __GNUC__
-#define LL_LITERAL(X) X##LL
-#else
-#define LL_LITERAL(X) X##i64
-#endif
-
-#define EPOCH_JULIAN_DIFF LL_LITERAL(11644473600)
-
-static ETHR_INLINE void
-get_curr_time(long *sec, long *nsec)
-{
- SYSTEMTIME t;
- FILETIME ft;
- LONGLONG lft;
-
- GetSystemTime(&t);
- SystemTimeToFileTime(&t, &ft);
- memcpy(&lft, &ft, sizeof(lft));
- *nsec = ((long) (lft % LL_LITERAL(10000000)))*100;
- *sec = (long) ((lft / LL_LITERAL(10000000)) - EPOCH_JULIAN_DIFF);
-}
-
-static cnd_wait_event_ *cwe_freelist;
-static CRITICAL_SECTION cwe_cs;
-
-static int
-alloc_cwe(cnd_wait_event_ **cwe_res)
-{
- cnd_wait_event_ *cwe;
- EnterCriticalSection(&cwe_cs);
- cwe = cwe_freelist;
- if (cwe) {
- cwe_freelist = cwe->next;
- LeaveCriticalSection(&cwe_cs);
- }
- else {
- LeaveCriticalSection(&cwe_cs);
- cwe = (*allocp)(sizeof(cnd_wait_event_));
- if (!cwe)
- return ENOMEM;
- cwe->handle = CreateEvent(NULL, FALSE, FALSE, NULL);
- if (cwe->handle == INVALID_HANDLE_VALUE) {
- int res = get_errno();
- (*freep)(cwe);
- return res;
- }
- }
- *cwe_res = cwe;
- return 0;
-}
-
-static
-free_cwe(cnd_wait_event_ *cwe)
-{
- EnterCriticalSection(&cwe_cs);
- cwe->next = cwe_freelist;
- cwe_freelist = cwe;
- LeaveCriticalSection(&cwe_cs);
-}
-
-static ETHR_INLINE int
-condwait(ethr_cond *cnd,
- ethr_mutex *mtx,
- int with_timeout,
- ethr_timeval *timeout)
-{
- int res;
- thr_data_ *td;
- cnd_wait_event_ *cwe;
- DWORD code;
- long time; /* time until timeout in milli seconds */
-
-#if ETHR_XCHK
- if (ethr_not_inited) {
- ASSERT(0);
- return EACCES;
- }
-
- if (!mtx
- || mtx->initialized != ETHR_MUTEX_INITIALIZED
- || !cnd
- || (cnd->initialized && cnd->initialized != ETHR_COND_INITIALIZED)
- || (with_timeout && !timeout)) {
- ASSERT(0);
- return EINVAL;
- }
-#endif
-
- td = OWN_THR_DATA;
- if (td)
- cwe = &td->wait_event;
- else { /* A non-ethread thread */
- res = alloc_cwe(&cwe);
- if (res != 0)
- return res;
- }
-
- if (!cnd->initialized)
- fake_static_cond_init(cnd);
- EnterCriticalSection(&cnd->cs);
-
- ASSERT(!cwe->in_queue);
- if (cnd->queue_end) {
- ASSERT(cnd->queue);
- cwe->prev = cnd->queue_end;
- cwe->next = NULL;
- cnd->queue_end->next = cwe;
- cnd->queue_end = cwe;
- }
- else {
- ASSERT(!cnd->queue);
- cwe->prev = NULL;
- cwe->next = NULL;
- cnd->queue = cwe;
- cnd->queue_end = cwe;
- }
- cwe->in_queue = 1;
-
- LeaveCriticalSection(&cnd->cs);
-
- LeaveCriticalSection(&mtx->cs);
-
- if (!with_timeout)
- time = INFINITE;
- else {
- long sec, nsec;
- ASSERT(timeout);
- get_curr_time(&sec, &nsec);
- time = (timeout->tv_sec - sec)*1000;
- time += (timeout->tv_nsec - nsec + 500)/1000000;
- if (time < 0)
- time = 0;
- }
-
- /* wait for event to signal */
- code = WaitForSingleObject(cwe->handle, time);
-
- EnterCriticalSection(&mtx->cs);
-
- if (code == WAIT_OBJECT_0) {
- /* We were woken by a signal or a broadcast ... */
- res = 0;
-
- /* ... no need to remove event from wait queue since this was
- taken care of by the signal or broadcast */
-#ifdef DEBUG
- EnterCriticalSection(&cnd->cs);
- ASSERT(!cwe->in_queue);
- LeaveCriticalSection(&cnd->cs);
-#endif
-
- }
- else {
- /* We timed out... */
- res = ETIMEDOUT;
-
- /* ... probably have to remove event from wait queue ... */
- EnterCriticalSection(&cnd->cs);
-
- if (cwe->in_queue) { /* ... but we must check that we are in queue
- since a signal or broadcast after timeout
- may have removed us from the queue */
- if (cwe->prev) {
- cwe->prev->next = cwe->next;
- }
- else {
- ASSERT(cnd->queue == cwe);
- cnd->queue = cwe->next;
- }
-
- if (cwe->next) {
- cwe->next->prev = cwe->prev;
- }
- else {
- ASSERT(cnd->queue_end == cwe);
- cnd->queue_end = cwe->prev;
- }
- cwe->in_queue = 0;
- }
-
- LeaveCriticalSection(&cnd->cs);
-
- }
-
- if (!td)
- free_cwe(cwe);
-
- return res;
-
-}
-
-
-/*
- * ----------------------------------------------------------------------------
- * Exported functions
- * ----------------------------------------------------------------------------
- */
-
-int
-ethr_init(ethr_init_data *id)
-{
-#ifdef _WIN32_WINNT
- DWORD major = (_WIN32_WINNT >> 8) & 0xff;
- DWORD minor = _WIN32_WINNT & 0xff;
- OSVERSIONINFO os_version;
-#endif
- int err = 0;
- thr_data_ *td = &main_thr_data;
- unsigned long i;
-
- if (!ethr_not_inited)
- return EINVAL;
-
-#ifdef _WIN32_WINNT
- os_version.dwOSVersionInfoSize = sizeof(OSVERSIONINFO);
- GetVersionEx(&os_version);
- if (os_version.dwPlatformId != VER_PLATFORM_WIN32_NT
- || os_version.dwMajorVersion < major
- || (os_version.dwMajorVersion == major
- && os_version.dwMinorVersion < minor))
- return ENOTSUP;
-#endif
-
- ASSERT(ETHR_MAX_THREADS > 0);
- for (i = ETHR_MAX_THREADS - 1, serial_shift = 0;
- i;
- serial_shift++, i >>= 1);
- thr_ix_mask = ~(~((ethr_tid) 0) << serial_shift);
-
- tls_own_thr_data = TlsAlloc();
- if (tls_own_thr_data == TLS_OUT_OF_INDEXES)
- goto error;
-
- last_serial = 0;
- last_ix = 0;
-
- td->thr_id = 0;
- td->thr_handle = GetCurrentThread();
- td->joiner = INVALID_TID;
- td->result = NULL;
- td->wait_event.handle = CreateEvent(NULL, FALSE, FALSE, NULL);
- if (td->wait_event.handle == INVALID_HANDLE_VALUE)
- goto error;
- td->wait_event.prev = NULL;
- td->wait_event.next = NULL;
- td->wait_event.in_queue = 0;
- thr_table[0] = td;
-
- if (!TlsSetValue(tls_own_thr_data, (LPVOID) td))
- goto error;
-
- ASSERT(td == OWN_THR_DATA);
-
-
- cwe_freelist = NULL;
- if (!InitializeCriticalSectionAndSpinCount(&cwe_cs,
- ETHR_MUTEX_SPIN_COUNT))
- goto error;
-
- for (i = 1; i < ETHR_MAX_THREADS; i++)
- thr_table[i] = NULL;
-
- if (!InitializeCriticalSectionAndSpinCount(&thr_table_cs,
- ETHR_MUTEX_SPIN_COUNT))
- goto error;
- if (!InitializeCriticalSectionAndSpinCount(&fake_static_init_cs,
- ETHR_MUTEX_SPIN_COUNT))
- goto error;
- ethr_not_inited = 0;
-
- err = init_common(id);
- if (err)
- goto error;
-
- return 0;
-
- error:
- ethr_not_inited = 1;
- if (err == 0)
- err = get_errno();
- ASSERT(err != 0);
- if (td->thr_handle != INVALID_HANDLE_VALUE)
- CloseHandle(td->thr_handle);
- if (td->wait_event.handle != INVALID_HANDLE_VALUE)
- CloseHandle(td->wait_event.handle);
- return err;
-}
-
-/*
- * Thread functions.
- */
-
-int
-ethr_thr_create(ethr_tid *tid, void * (*func)(void *), void *arg,
- ethr_thr_opts *opts)
-{
- int err = 0;
- thr_wrap_data_ twd;
- thr_data_ *my_td, *child_td = NULL;
- ethr_tid child_tid, child_serial, child_ix;
- DWORD code;
- unsigned ID;
- unsigned stack_size = 0; /* 0 = system default */
- int use_stack_size = (opts && opts->suggested_stack_size >= 0
- ? opts->suggested_stack_size
- : -1 /* Use system default */);
-
-#ifdef ETHR_MODIFIED_DEFAULT_STACK_SIZE
- if (use_stack_size < 0)
- use_stack_size = ETHR_MODIFIED_DEFAULT_STACK_SIZE;
-#endif
-
-#if ETHR_XCHK
- if (ethr_not_inited) {
- ASSERT(0);
- return EACCES;
- }
- if (!tid || !func) {
- ASSERT(0);
- return EINVAL;
- }
-#endif
-
- my_td = OWN_THR_DATA;
- if (!my_td) {
- /* Only ethreads are allowed to call this function */
- ASSERT(0);
- return EACCES;
- }
-
- if (use_stack_size >= 0) {
- size_t suggested_stack_size = (size_t) use_stack_size;
-#ifdef DEBUG
- suggested_stack_size /= 2; /* Make sure we got margin */
-#endif
- if (suggested_stack_size < min_stack_size)
- stack_size = (unsigned) ETHR_KW2B(min_stack_size);
- else if (suggested_stack_size > max_stack_size)
- stack_size = (unsigned) ETHR_KW2B(max_stack_size);
- else
- stack_size =
- (unsigned) ETHR_PAGE_ALIGN(ETHR_KW2B(suggested_stack_size));
- }
-
- EnterCriticalSection(&thr_table_cs);
-
- /* Call prepare func if it exist */
- if (thread_create_prepare_func)
- twd.prep_func_res = (*thread_create_prepare_func)();
- else
- twd.prep_func_res = NULL;
-
- /* Find a new thread id to use */
- new_tid(&child_tid, &child_serial, &child_ix);
- if (child_tid == INVALID_TID) {
- err = EAGAIN;
- goto error;
- }
-
- ASSERT(child_ix == THR_IX(child_tid));
-
- *tid = child_tid;
-
- ASSERT(!thr_table[child_ix]);
-
- /* Alloc thread data */
- thr_table[child_ix] = child_td = (thr_data_ *) (*allocp)(sizeof(thr_data_));
- if (!child_td) {
- err = ENOMEM;
- goto error;
- }
-
- /* Init thread data */
-
- child_td->thr_id = child_tid;
- child_td->thr_handle = INVALID_HANDLE_VALUE;
- child_td->joiner = INVALID_TID;
- child_td->result = NULL;
- /* 'child_td->wait_event' is initialized by child thread */
-
-
- /* Init thread wrapper data */
-
- twd.func = func;
- twd.arg = arg;
- twd.ptd = my_td;
- twd.td = child_td;
- twd.res = 0;
-
- ASSERT(!my_td->wait_event.in_queue);
-
- /* spawn the thr_wrapper function */
- child_td->thr_handle = (HANDLE) _beginthreadex(NULL,
- stack_size,
- thr_wrapper,
- (LPVOID) &twd,
- 0,
- &ID);
- if (child_td->thr_handle == (HANDLE) 0) {
- child_td->thr_handle = INVALID_HANDLE_VALUE;
- goto error;
- }
-
- ASSERT(child_td->thr_handle != INVALID_HANDLE_VALUE);
-
- /* Wait for child to finish initialization */
- code = WaitForSingleObject(my_td->wait_event.handle, INFINITE);
- if (twd.res || code != WAIT_OBJECT_0) {
- err = twd.res;
- goto error;
- }
-
- if (opts && opts->detached) {
- CloseHandle(child_td->thr_handle);
- child_td->thr_handle = INVALID_HANDLE_VALUE;
- }
-
- last_serial = child_serial;
- last_ix = child_ix;
-
- ASSERT(thr_table[child_ix] == child_td);
-
- if (thread_create_parent_func)
- (*thread_create_parent_func)(twd.prep_func_res);
-
- LeaveCriticalSection(&thr_table_cs);
-
- return 0;
-
- error:
-
- if (err == 0)
- err = get_errno();
- ASSERT(err != 0);
-
- if (thread_create_parent_func)
- (*thread_create_parent_func)(twd.prep_func_res);
-
- if (child_ix != INVALID_TID) {
-
- if (child_td) {
- ASSERT(thr_table[child_ix] == child_td);
-
- if (child_td->thr_handle != INVALID_HANDLE_VALUE) {
- WaitForSingleObject(child_td->thr_handle, INFINITE);
- CloseHandle(child_td->thr_handle);
- }
-
- (*freep)((void *) child_td);
- thr_table[child_ix] = NULL;
- }
- }
-
- *tid = INVALID_TID;
-
- LeaveCriticalSection(&thr_table_cs);
- return err;
-}
-
-int ethr_thr_join(ethr_tid tid, void **res)
-{
- int err = 0;
- DWORD code;
- thr_data_ *td;
- thr_data_ *my_td;
-
-#if ETHR_XCHK
- if (ethr_not_inited) {
- ASSERT(0);
- return EACCES;
- }
-#endif
-
- my_td = OWN_THR_DATA;
-
- if (!my_td) {
- /* Only ethreads are allowed to call this function */
- ASSERT(0);
- return EACCES;
- }
-
- EnterCriticalSection(&thr_table_cs);
-
- td = tid2thr(tid);
- if (!td)
- err = ESRCH;
- else if (td->thr_handle == INVALID_HANDLE_VALUE /* i.e. detached */
- || td->joiner != INVALID_TID) /* i.e. someone else is joining */
- err = EINVAL;
- else if (my_td == td)
- err = EDEADLK;
- else
- td->joiner = my_td->thr_id;
-
- LeaveCriticalSection(&thr_table_cs);
-
- if (err)
- goto error;
-
- /* Wait for thread to terminate */
- code = WaitForSingleObject(td->thr_handle, INFINITE);
- if (code != WAIT_OBJECT_0)
- goto error;
-
- EnterCriticalSection(&thr_table_cs);
-
- ASSERT(td == tid2thr(tid));
- ASSERT(td->thr_handle != INVALID_HANDLE_VALUE);
- ASSERT(td->joiner == my_td->thr_id);
-
- if (res)
- *res = td->result;
-
- CloseHandle(td->thr_handle);
- ASSERT(td == thr_table[THR_IX(td->thr_id)]);
- thr_table[THR_IX(td->thr_id)] = NULL;
- if (td != &main_thr_data)
- (*freep)((void *) td);
-
- LeaveCriticalSection(&thr_table_cs);
-
- return 0;
-
- error:
- if (err == 0)
- err = get_errno();
- ASSERT(err != 0);
- return err;
-}
-
-
-int
-ethr_thr_detach(ethr_tid tid)
-{
- int res;
- DWORD code;
- thr_data_ *td;
-
-#if ETHR_XCHK
- if (ethr_not_inited) {
- ASSERT(0);
- return EACCES;
- }
-#endif
-
- if (!OWN_THR_DATA) {
- /* Only ethreads are allowed to call this function */
- ASSERT(0);
- return EACCES;
- }
-
- EnterCriticalSection(&thr_table_cs);
-
- td = tid2thr(tid);
- if (!td)
- res = ESRCH;
- if (td->thr_handle == INVALID_HANDLE_VALUE /* i.e. detached */
- || td->joiner != INVALID_TID) /* i.e. someone is joining */
- res = EINVAL;
- else {
- res = 0;
- CloseHandle(td->thr_handle);
- td->thr_handle = INVALID_HANDLE_VALUE;
- }
-
- LeaveCriticalSection(&thr_table_cs);
-
- return res;
-}
-
-
-void
-ethr_thr_exit(void *res)
-{
- thr_data_ *td;
-#if ETHR_XCHK
- if (ethr_not_inited) {
- ASSERT(0);
- return;
- }
-#endif
- td = OWN_THR_DATA;
- if (!td) {
- /* Only ethreads are allowed to call this function */
- ASSERT(0);
- return;
- }
- thr_exit_cleanup(td, res);
- _endthreadex((unsigned) 0);
-}
-
-ethr_tid
-ethr_self(void)
-{
- thr_data_ *td;
-#if ETHR_XCHK
- if (ethr_not_inited) {
- ASSERT(0);
- return INVALID_TID;
- }
-#endif
- /* It is okay for non-ethreads (i.e. native win32 threads) to call
- ethr_self(). They will however be returned the INVALID_TID. */
- td = OWN_THR_DATA;
- if (!td)
- return INVALID_TID;
- return td->thr_id;
-}
-
-int
-ethr_equal_tids(ethr_tid tid1, ethr_tid tid2)
-{
- /* INVALID_TID does not equal any tid, not even the INVALID_TID */
- return tid1 == tid2 && tid1 != INVALID_TID;
-}
-
-/*
- * Mutex functions.
- */
-
-int
-ethr_mutex_init(ethr_mutex *mtx)
-{
-#if ETHR_XCHK
- if (ethr_not_inited) {
- ASSERT(0);
- return EACCES;
- }
- if (!mtx) {
- ASSERT(0);
- return EINVAL;
- }
-#endif
- if (!InitializeCriticalSectionAndSpinCount(&mtx->cs, ETHR_MUTEX_SPIN_COUNT))
- return get_errno();
- mtx->initialized = ETHR_MUTEX_INITIALIZED;
-#if ETHR_XCHK
- mtx->is_rec_mtx = 0;
-#endif
- return 0;
-}
-
-int
-ethr_rec_mutex_init(ethr_mutex *mtx)
-{
- int res;
- res = ethr_mutex_init(mtx);
-#if ETHR_XCHK
- mtx->is_rec_mtx = 1;
-#endif
- return res;
-}
-
-int
-ethr_mutex_destroy(ethr_mutex *mtx)
-{
-#if ETHR_XCHK
- if (ethr_not_inited) {
- ASSERT(0);
- return EACCES;
- }
- if (!mtx || mtx->initialized != ETHR_MUTEX_INITIALIZED) {
- ASSERT(0);
- return EINVAL;
- }
-#endif
- DeleteCriticalSection(&mtx->cs);
- mtx->initialized = 0;
- return 0;
-}
-
-int ethr_mutex_set_forksafe(ethr_mutex *mtx)
-{
-#if ETHR_XCHK
- if (ethr_not_inited) {
- ASSERT(0);
- return EACCES;
- }
-#endif
- return 0; /* No fork() */
-}
-
-int ethr_mutex_unset_forksafe(ethr_mutex *mtx)
-{
-#if ETHR_XCHK
- if (ethr_not_inited) {
- ASSERT(0);
- return EACCES;
- }
-#endif
- return 0; /* No fork() */
-}
-
-int
-ethr_mutex_trylock(ethr_mutex *mtx)
-{
-#if ETHR_XCHK
- if (ethr_not_inited) {
- ASSERT(0);
- return EACCES;
- }
- if (!mtx
- || (mtx->initialized && mtx->initialized != ETHR_MUTEX_INITIALIZED)) {
- ASSERT(0);
- return EINVAL;
- }
-#endif
- if (!mtx->initialized) {
- int res = ethr_fake_static_mutex_init(mtx);
- if (res != 0)
- return res;
- }
- return ethr_mutex_trylock__(mtx);
-}
-
-int
-ethr_mutex_lock(ethr_mutex *mtx)
-{
- int res;
-#if ETHR_XCHK
- if (ethr_not_inited) {
- ASSERT(0);
- return EACCES;
- }
- if (!mtx
- || (mtx->initialized && mtx->initialized != ETHR_MUTEX_INITIALIZED)) {
- ASSERT(0);
- return EINVAL;
- }
-#endif
- return ethr_mutex_lock__(mtx);
-}
-
-int
-ethr_mutex_unlock(ethr_mutex *mtx)
-{
-#if ETHR_XCHK
- int res;
- if (ethr_not_inited) {
- ASSERT(0);
- return EACCES;
- }
- if (!mtx || mtx->initialized != ETHR_MUTEX_INITIALIZED) {
- ASSERT(0);
- return EINVAL;
- }
-#endif
- return ethr_mutex_unlock__(mtx);
-}
-
-/*
- * Condition variable functions.
- */
-
-int
-ethr_cond_init(ethr_cond *cnd)
-{
-#if ETHR_XCHK
- if (ethr_not_inited) {
- ASSERT(0);
- return EACCES;
- }
- if (!cnd) {
- ASSERT(0);
- return EINVAL;
- }
-#endif
- if (!InitializeCriticalSectionAndSpinCount(&cnd->cs, ETHR_COND_SPIN_COUNT))
- return get_errno();
- cnd->queue = NULL;
- cnd->queue_end = NULL;
- cnd->initialized = ETHR_COND_INITIALIZED;
- return 0;
-}
-
-int
-ethr_cond_destroy(ethr_cond *cnd)
-{
-#if ETHR_XCHK
- if (ethr_not_inited) {
- ASSERT(0);
- return EACCES;
- }
- if (!cnd
- || (cnd->initialized && cnd->initialized != ETHR_COND_INITIALIZED)
- || cnd->queue) {
- ASSERT(0);
- return EINVAL;
- }
-#endif
- DeleteCriticalSection(&cnd->cs);
- cnd->initialized = 0;
- return 0;
-}
-
-int
-ethr_cond_signal(ethr_cond *cnd)
-{
- cnd_wait_event_ *cwe;
-#if ETHR_XCHK
- if (ethr_not_inited) {
- ASSERT(0);
- return EACCES;
- }
- if (!cnd
- || (cnd->initialized && cnd->initialized != ETHR_COND_INITIALIZED)) {
- ASSERT(0);
- return EINVAL;
- }
-#endif
- if (!cnd->initialized) {
- int res = fake_static_cond_init(cnd);
- if (res != 0)
- return res;
- }
- EnterCriticalSection(&cnd->cs);
- cwe = cnd->queue;
- if (cwe) {
- ASSERT(cwe->in_queue);
- SetEvent(cnd->queue->handle);
- if (cwe->next)
- cwe->next->prev = NULL;
- else {
- ASSERT(cnd->queue_end == cnd->queue);
- cnd->queue_end = NULL;
- }
- cnd->queue = cwe->next;
- cwe->in_queue = 0;
- }
- LeaveCriticalSection(&cnd->cs);
- return 0;
-}
-
-int
-ethr_cond_broadcast(ethr_cond *cnd)
-{
- cnd_wait_event_ *cwe;
-
-#if ETHR_XCHK
- if (ethr_not_inited) {
- ASSERT(0);
- return EACCES;
- }
- if (!cnd
- || (cnd->initialized && cnd->initialized != ETHR_COND_INITIALIZED)) {
- ASSERT(0);
- return EINVAL;
- }
-#endif
- if (!cnd->initialized) {
- int res = fake_static_cond_init(cnd);
- if (res != 0)
- return res;
- }
- EnterCriticalSection(&cnd->cs);
- for (cwe = cnd->queue; cwe; cwe = cwe->next) {
- ASSERT(cwe->in_queue);
- SetEvent(cwe->handle);
- cwe->in_queue = 0;
- }
- cnd->queue = NULL;
- cnd->queue_end = NULL;
- LeaveCriticalSection(&cnd->cs);
- return 0;
-
-}
-
-int
-ethr_cond_wait(ethr_cond *cnd, ethr_mutex *mtx)
-{
- return condwait(cnd, mtx, 0, NULL);
-}
-
-int
-ethr_cond_timedwait(ethr_cond *cnd, ethr_mutex *mtx, ethr_timeval *timeout)
-{
- return condwait(cnd, mtx, 1, timeout);
-}
-
-int
-ethr_time_now(ethr_timeval *time)
-{
-#if ETHR_XCHK
- if (ethr_not_inited) {
- ASSERT(0);
- return EACCES;
- }
- if (!time) {
- ASSERT(0);
- return EINVAL;
- }
-#endif
- get_curr_time(&time->tv_sec, &time->tv_nsec);
- return 0;
-}
-
-/*
- * Thread specific data
- */
-
-int
-ethr_tsd_key_create(ethr_tsd_key *keyp)
-{
- DWORD key;
-#if ETHR_XCHK
- if (ethr_not_inited) {
- ASSERT(0);
- return EACCES;
- }
- if (!keyp) {
- ASSERT(0);
- return EINVAL;
- }
-#endif
- key = TlsAlloc();
- if (key == TLS_OUT_OF_INDEXES)
- return get_errno();
- *keyp = (ethr_tsd_key) key;
- return 0;
-}
-
-int
-ethr_tsd_key_delete(ethr_tsd_key key)
-{
-#if ETHR_XCHK
- if (ethr_not_inited) {
- ASSERT(0);
- return EACCES;
- }
-#endif
- if (!TlsFree((DWORD) key))
- return get_errno();
- return 0;
-}
-
-int
-ethr_tsd_set(ethr_tsd_key key, void *value)
-{
-#if ETHR_XCHK
- if (ethr_not_inited) {
- ASSERT(0);
- return EACCES;
- }
-#endif
- if (!TlsSetValue((DWORD) key, (LPVOID) value))
- return get_errno();
- return 0;
-}
-
-void *
-ethr_tsd_get(ethr_tsd_key key)
-{
-#if ETHR_XCHK
- if (ethr_not_inited) {
- ASSERT(0);
- return NULL;
- }
-#endif
- return (void *) TlsGetValue((DWORD) key);
-}
-
-/* Misc */
-
-#ifndef ETHR_HAVE_OPTIMIZED_LOCKS
-
-int
-ethr_do_spinlock_init(ethr_spinlock_t *lock)
-{
-#if ETHR_XCHK
- if (ethr_not_inited) {
- ASSERT(0);
- return EACCES;
- }
- if (!lock) {
- ASSERT(0);
- return EINVAL;
- }
-#endif
- if (InitializeCriticalSectionAndSpinCount(&lock->cs, INT_MAX))
- return 0;
- else
- return get_errno();
-}
-
-int
-ethr_do_rwlock_init(ethr_rwlock_t *lock)
-{
-#if ETHR_XCHK
- if (ethr_not_inited) {
- ASSERT(0);
- return EACCES;
- }
- if (!lock) {
- ASSERT(0);
- return EINVAL;
- }
-#endif
- lock->counter = 0;
- if (InitializeCriticalSectionAndSpinCount(&lock->cs, INT_MAX))
- return 0;
- else
- return get_errno();
-}
-
-#endif /* #ifndef ETHR_HAVE_OPTIMIZED_ATOMIC_OPS */
-
-#else
-#error "Missing thread implementation"
-#endif
-
-/* Atomics */
-
-int
-ethr_atomic_init(ethr_atomic_t *var, long i)
-{
-#if ETHR_XCHK
- if (ethr_not_inited) {
- ASSERT(0);
- return EACCES;
- }
- if (!var) {
- ASSERT(0);
- return EINVAL;
- }
-#endif
- return ethr_atomic_init__(var, i);
-}
-
-int
-ethr_atomic_set(ethr_atomic_t *var, long i)
-{
-#if ETHR_XCHK
- if (ethr_not_inited) {
- ASSERT(0);
- return EACCES;
- }
- if (!var) {
- ASSERT(0);
- return EINVAL;
- }
-#endif
- return ethr_atomic_set__(var, i);
-}
-
-int
-ethr_atomic_read(ethr_atomic_t *var, long *i)
-{
-#if ETHR_XCHK
- if (ethr_not_inited) {
- ASSERT(0);
- return EACCES;
- }
- if (!var || !i) {
- ASSERT(0);
- return EINVAL;
- }
-#endif
- return ethr_atomic_read__(var, i);
-}
-
-
-int
-ethr_atomic_addtest(ethr_atomic_t *var, long incr, long *testp)
-{
-#if ETHR_XCHK
- if (ethr_not_inited) {
- ASSERT(0);
- return EACCES;
- }
- if (!var || !testp) {
- ASSERT(0);
- return EINVAL;
- }
-#endif
- return ethr_atomic_addtest__(var, incr, testp);
-}
-
-int
-ethr_atomic_inctest(ethr_atomic_t *incp, long *testp)
-{
-#if ETHR_XCHK
- if (ethr_not_inited) {
- ASSERT(0);
- return EACCES;
- }
- if (!incp || !testp) {
- ASSERT(0);
- return EINVAL;
- }
-#endif
- return ethr_atomic_inctest__(incp, testp);
-}
-
-int
-ethr_atomic_dectest(ethr_atomic_t *decp, long *testp)
-{
-#if ETHR_XCHK
- if (ethr_not_inited) {
- ASSERT(0);
- return EACCES;
- }
- if (!decp || !testp) {
- ASSERT(0);
- return EINVAL;
- }
-#endif
- return ethr_atomic_dectest__(decp, testp);
-}
-
-int
-ethr_atomic_add(ethr_atomic_t *var, long incr)
-{
-#if ETHR_XCHK
- if (ethr_not_inited) {
- ASSERT(0);
- return EACCES;
- }
- if (!var) {
- ASSERT(0);
- return EINVAL;
- }
-#endif
- return ethr_atomic_add__(var, incr);
-}
-
-int
-ethr_atomic_inc(ethr_atomic_t *incp)
-{
-#if ETHR_XCHK
- if (ethr_not_inited) {
- ASSERT(0);
- return EACCES;
- }
- if (!incp) {
- ASSERT(0);
- return EINVAL;
- }
-#endif
- return ethr_atomic_inc__(incp);
-}
-
-int
-ethr_atomic_dec(ethr_atomic_t *decp)
-{
-#if ETHR_XCHK
- if (ethr_not_inited) {
- ASSERT(0);
- return EACCES;
- }
- if (!decp) {
- ASSERT(0);
- return EINVAL;
- }
-#endif
- return ethr_atomic_dec__(decp);
-}
-
-int
-ethr_atomic_and_old(ethr_atomic_t *var, long mask, long *old)
-{
-#if ETHR_XCHK
- if (ethr_not_inited) {
- ASSERT(0);
- return EACCES;
- }
- if (!var || !old) {
- ASSERT(0);
- return EINVAL;
- }
-#endif
- return ethr_atomic_and_old__(var, mask, old);
-}
-
-int
-ethr_atomic_or_old(ethr_atomic_t *var, long mask, long *old)
-{
-#if ETHR_XCHK
- if (ethr_not_inited) {
- ASSERT(0);
- return EACCES;
- }
- if (!var || !old) {
- ASSERT(0);
- return EINVAL;
- }
-#endif
- return ethr_atomic_or_old__(var, mask, old);
-}
-
-int
-ethr_atomic_xchg(ethr_atomic_t *var, long new, long *old)
-{
-#if ETHR_XCHK
- if (ethr_not_inited) {
- ASSERT(0);
- return EACCES;
- }
- if (!var || !old) {
- ASSERT(0);
- return EINVAL;
- }
-#endif
- return ethr_atomic_xchg__(var, new, old);
-}
-
-int
-ethr_atomic_cmpxchg(ethr_atomic_t *var, long new, long expected, long *old)
-{
-#if ETHR_XCHK
- if (ethr_not_inited) {
- ASSERT(0);
- return EACCES;
- }
- if (!var || !old) {
- ASSERT(0);
- return EINVAL;
- }
-#endif
- return ethr_atomic_cmpxchg__(var, new, expected, old);
-}
-
-/* Spinlocks and rwspinlocks */
-
-int
-ethr_spinlock_init(ethr_spinlock_t *lock)
-{
-#if ETHR_XCHK
- if (ethr_not_inited) {
- ASSERT(0);
- return EACCES;
- }
- if (!lock) {
- ASSERT(0);
- return EINVAL;
- }
-#endif
- return ethr_spinlock_init__(lock);
-}
-
-int
-ethr_spinlock_destroy(ethr_spinlock_t *lock)
-{
-#if ETHR_XCHK
- if (ethr_not_inited) {
- ASSERT(0);
- return EACCES;
- }
- if (!lock) {
- ASSERT(0);
- return EINVAL;
- }
-#endif
- return ethr_spinlock_destroy__(lock);
-}
-
-
-int
-ethr_spin_unlock(ethr_spinlock_t *lock)
-{
-#if ETHR_XCHK
- if (ethr_not_inited) {
- ASSERT(0);
- return EACCES;
- }
- if (!lock) {
- ASSERT(0);
- return EINVAL;
- }
-#endif
- return ethr_spin_unlock__(lock);
-}
-
-int
-ethr_spin_lock(ethr_spinlock_t *lock)
-{
-#if ETHR_XCHK
- if (ethr_not_inited) {
- ASSERT(0);
- return EACCES;
- }
- if (!lock) {
- ASSERT(0);
- return EINVAL;
- }
-#endif
- return ethr_spin_lock__(lock);
-}
-
-int
-ethr_rwlock_init(ethr_rwlock_t *lock)
-{
-#if ETHR_XCHK
- if (ethr_not_inited) {
- ASSERT(0);
- return EACCES;
- }
- if (!lock) {
- ASSERT(0);
- return EINVAL;
- }
-#endif
- return ethr_rwlock_init__(lock);
-}
-
-int
-ethr_rwlock_destroy(ethr_rwlock_t *lock)
-{
-#if ETHR_XCHK
- if (ethr_not_inited) {
- ASSERT(0);
- return EACCES;
- }
- if (!lock) {
- ASSERT(0);
- return EINVAL;
- }
-#endif
- return ethr_rwlock_destroy__(lock);
-}
-
-int
-ethr_read_unlock(ethr_rwlock_t *lock)
-{
-#if ETHR_XCHK
- if (ethr_not_inited) {
- ASSERT(0);
- return EACCES;
- }
- if (!lock) {
- ASSERT(0);
- return EINVAL;
- }
-#endif
- return ethr_read_unlock__(lock);
-}
-
-int
-ethr_read_lock(ethr_rwlock_t *lock)
-{
-#if ETHR_XCHK
- if (ethr_not_inited) {
- ASSERT(0);
- return EACCES;
- }
- if (!lock) {
- ASSERT(0);
- return EINVAL;
- }
-#endif
- return ethr_read_lock__(lock);
-}
-
-int
-ethr_write_unlock(ethr_rwlock_t *lock)
-{
-#if ETHR_XCHK
- if (ethr_not_inited) {
- ASSERT(0);
- return EACCES;
- }
- if (!lock) {
- ASSERT(0);
- return EINVAL;
- }
-#endif
- return ethr_write_unlock__(lock);
-}
-
-int
-ethr_write_lock(ethr_rwlock_t *lock)
-{
-#if ETHR_XCHK
- if (ethr_not_inited) {
- ASSERT(0);
- return EACCES;
- }
- if (!lock) {
- ASSERT(0);
- return EINVAL;
- }
-#endif
- return ethr_write_lock__(lock);
-}
-
-
-int
-ethr_gate_init(ethr_gate *gp)
-{
- int res;
-#if ETHR_XCHK
- if (ethr_not_inited) {
- ASSERT(0);
- return EACCES;
- }
- if (!gp) {
- ASSERT(0);
- return EINVAL;
- }
-#endif
- res = ethr_mutex_init(&gp->mtx);
- if (res != 0)
- return res;
- res = ethr_cond_init(&gp->cnd);
- if (res != 0) {
- ethr_mutex_destroy(&gp->mtx);
- return res;
- }
- gp->open = 0;
- return 0;
-}
-
-int
-ethr_gate_destroy(ethr_gate *gp)
-{
- int res, dres;
-#if ETHR_XCHK
- if (ethr_not_inited) {
- ASSERT(0);
- return EACCES;
- }
- if (!gp) {
- ASSERT(0);
- return EINVAL;
- }
-#endif
- res = ethr_mutex_destroy(&gp->mtx);
- dres = ethr_cond_destroy(&gp->cnd);
- if (res == 0)
- res = dres;
- gp->open = 0;
- return res;
-}
-
-int
-ethr_gate_close(ethr_gate *gp)
-{
- int res;
-#if ETHR_XCHK
- if (ethr_not_inited) {
- ASSERT(0);
- return EACCES;
- }
- if (!gp) {
- ASSERT(0);
- return EINVAL;
- }
-#endif
- res = ethr_mutex_lock__(&gp->mtx);
- if (res != 0)
- return res;
- gp->open = 0;
- res = ethr_mutex_unlock__(&gp->mtx);
- return res;
-}
-
-int
-ethr_gate_let_through(ethr_gate *gp, unsigned no)
-{
- int res, ures;
-#if ETHR_XCHK
- if (ethr_not_inited) {
- ASSERT(0);
- return EACCES;
- }
- if (!gp) {
- ASSERT(0);
- return EINVAL;
- }
-#endif
- res = ethr_mutex_lock__(&gp->mtx);
- if (res != 0)
- return res;
- gp->open += no;
- res = (gp->open == 1
- ? ethr_cond_signal(&gp->cnd)
- : ethr_cond_broadcast(&gp->cnd));
- ures = ethr_mutex_unlock__(&gp->mtx);
- if (res != 0)
- res = ures;
- return res;
-}
-
-int
-ethr_gate_swait(ethr_gate *gp, int spincount)
-{
- int res, ures, n;
-#if ETHR_XCHK
- if (ethr_not_inited) {
- ASSERT(0);
- return EACCES;
- }
- if (!gp) {
- ASSERT(0);
- return EINVAL;
- }
-#endif
- n = spincount;
- res = ethr_mutex_lock__(&gp->mtx);
- if (res != 0)
- return res;
- while (n >= 0 && !gp->open) {
- res = ethr_mutex_unlock__(&gp->mtx);
- if (res != 0)
- return res;
- res = ethr_mutex_lock__(&gp->mtx);
- if (res != 0)
- return res;
- n--;
- }
- while (!gp->open) {
- res = ethr_cond_wait(&gp->cnd, &gp->mtx);
- if (res != 0 && res != EINTR)
- goto done;
- }
- gp->open--;
- done:
- ures = ethr_mutex_unlock__(&gp->mtx);
- if (res == 0)
- res = ures;
- return res;
-}
-
-
-int
-ethr_gate_wait(ethr_gate *gp)
-{
- return ethr_gate_swait(gp, 0);
-}
-
-
-/* rwmutex fallback */
-#ifdef ETHR_USE_RWMTX_FALLBACK
-
-int
-ethr_rwmutex_init(ethr_rwmutex *rwmtx)
-{
- int res;
-#if ETHR_XCHK
- if (!rwmtx) {
- ASSERT(0);
- return EINVAL;
- }
-#endif
- res = ethr_mutex_init(&rwmtx->mtx);
- if (res != 0)
- return res;
- ethr_cond_init(&rwmtx->rcnd);
- if (res != 0)
- goto error_cleanup1;
- res = ethr_cond_init(&rwmtx->wcnd);
- if (res != 0)
- goto error_cleanup2;
- rwmtx->readers = 0;
- rwmtx->waiting_readers = 0;
- rwmtx->waiting_writers = 0;
-#if ETHR_XCHK
- rwmtx->initialized = ETHR_RWMUTEX_INITIALIZED;
-#endif
- return 0;
- error_cleanup2:
- ethr_cond_destroy(&rwmtx->rcnd);
- error_cleanup1:
- ethr_mutex_destroy(&rwmtx->mtx);
- return res;
-}
-
-int
-ethr_rwmutex_destroy(ethr_rwmutex *rwmtx)
-{
- int res, pres;
-#if ETHR_XCHK
- if (!rwmtx || rwmtx->initialized != ETHR_RWMUTEX_INITIALIZED) {
- ASSERT(0);
- return EINVAL;
- }
- rwmtx->initialized = 0;
-#endif
- res = ethr_mutex_destroy(&rwmtx->mtx);
- pres = ethr_cond_destroy(&rwmtx->rcnd);
- if (res == 0)
- res = pres;
- pres = ethr_cond_destroy(&rwmtx->wcnd);
- if (res == 0)
- res = pres;
- return res;
-}
-
-int
-ethr_rwmutex_tryrlock(ethr_rwmutex *rwmtx)
-{
- int res;
-#if ETHR_XCHK
- if (!rwmtx || rwmtx->initialized != ETHR_RWMUTEX_INITIALIZED) {
- ASSERT(0);
- return EINVAL;
- }
-#endif
- res = ethr_mutex_trylock__(&rwmtx->mtx);
- if (res != 0)
- return res;
- if (rwmtx->waiting_writers) {
- res = ethr_mutex_unlock__(&rwmtx->mtx);
- if (res == 0)
- return EBUSY;
- return res;
- }
- rwmtx->readers++;
- return ethr_mutex_unlock__(&rwmtx->mtx);
-}
-
-int
-ethr_rwmutex_rlock(ethr_rwmutex *rwmtx)
-{
- int res;
-#if ETHR_XCHK
- if (!rwmtx || rwmtx->initialized != ETHR_RWMUTEX_INITIALIZED) {
- ASSERT(0);
- return EINVAL;
- }
-#endif
- res = ethr_mutex_lock__(&rwmtx->mtx);
- if (res != 0)
- return res;
- while (rwmtx->waiting_writers) {
- rwmtx->waiting_readers++;
- res = ethr_cond_wait(&rwmtx->rcnd, &rwmtx->mtx);
- rwmtx->waiting_readers--;
- if (res != 0 && res != EINTR) {
- (void) ethr_mutex_unlock__(&rwmtx->mtx);
- return res;
- }
- }
- rwmtx->readers++;
- return ethr_mutex_unlock__(&rwmtx->mtx);
-}
-
-int
-ethr_rwmutex_runlock(ethr_rwmutex *rwmtx)
-{
- int res, ures;
-#if ETHR_XCHK
- if (!rwmtx || rwmtx->initialized != ETHR_RWMUTEX_INITIALIZED) {
- ASSERT(0);
- return EINVAL;
- }
-#endif
- res = ethr_mutex_lock__(&rwmtx->mtx);
- if (res != 0)
- return res;
- rwmtx->readers--;
- if (!rwmtx->readers && rwmtx->waiting_writers)
- res = ethr_cond_signal(&rwmtx->wcnd);
- ures = ethr_mutex_unlock__(&rwmtx->mtx);
- if (res == 0)
- res = ures;
- return res;
-}
-
-int
-ethr_rwmutex_tryrwlock(ethr_rwmutex *rwmtx)
-{
- int res;
-#if ETHR_XCHK
- if (!rwmtx || rwmtx->initialized != ETHR_RWMUTEX_INITIALIZED) {
- ASSERT(0);
- return EINVAL;
- }
-#endif
- res = ethr_mutex_trylock__(&rwmtx->mtx);
- if (res != 0)
- return res;
- if (!rwmtx->readers && !rwmtx->waiting_writers)
- return 0;
- else {
- res = ethr_mutex_unlock__(&rwmtx->mtx);
- if (res == 0)
- return EBUSY;
- return res;
- }
-}
-
-int
-ethr_rwmutex_rwlock(ethr_rwmutex *rwmtx)
-{
- int res;
-#if ETHR_XCHK
- if (!rwmtx || rwmtx->initialized != ETHR_RWMUTEX_INITIALIZED) {
- ASSERT(0);
- return EINVAL;
- }
-#endif
- res = ethr_mutex_lock__(&rwmtx->mtx);
- if (res != 0)
- return res;
- if (!rwmtx->readers && !rwmtx->waiting_writers)
- return 0;
-
- while (rwmtx->readers) {
- rwmtx->waiting_writers++;
- res = ethr_cond_wait(&rwmtx->wcnd, &rwmtx->mtx);
- rwmtx->waiting_writers--;
- if (res != 0 && res != EINTR) {
- (void) ethr_rwmutex_rwunlock(rwmtx);
- return res;
- }
- }
- return 0;
-}
-
-int
-ethr_rwmutex_rwunlock(ethr_rwmutex *rwmtx)
-{
- int res, ures;
-#if ETHR_XCHK
- if (!rwmtx || rwmtx->initialized != ETHR_RWMUTEX_INITIALIZED) {
- ASSERT(0);
- return EINVAL;
- }
-#endif
- res = 0;
- if (rwmtx->waiting_writers)
- res = ethr_cond_signal(&rwmtx->wcnd);
- else if (rwmtx->waiting_readers)
- res = ethr_cond_broadcast(&rwmtx->rcnd);
- ures = ethr_mutex_unlock__(&rwmtx->mtx);
- if (res == 0)
- res = ures;
- return res;
-}
-
-#endif /* #ifdef ETHR_USE_RWMTX_FALLBACK */
-
-void
-ethr_compiler_barrier(void)
-{
-
-}
-
-#ifdef DEBUG
-
-#include <stdio.h>
-int ethr_assert_failed(char *f, int l, char *a)
-{
- fprintf(stderr, "%s:%d: Assertion failed: %s\n", f, l, a);
- abort();
- return 0;
-}
-
-#endif
-
-
diff --git a/erts/lib_src/pthread/ethr_event.c b/erts/lib_src/pthread/ethr_event.c
new file mode 100644
index 0000000000..6731c0eb46
--- /dev/null
+++ b/erts/lib_src/pthread/ethr_event.c
@@ -0,0 +1,219 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2009-2010. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+/*
+ * Author: Rickard Green
+ */
+
+#define ETHR_INLINE_FUNC_NAME_(X) X ## __
+#define ETHR_EVENT_IMPL__
+
+#include "ethread.h"
+
+#if defined(ETHR_LINUX_FUTEX_IMPL__)
+/* --- Linux futex implementation of ethread events ------------------------- */
+
+#include <sched.h>
+#include <errno.h>
+
+#define ETHR_YIELD_AFTER_BUSY_LOOPS 50
+
+int
+ethr_event_init(ethr_event *e)
+{
+ ethr_atomic_init(&e->futex, ETHR_EVENT_OFF__);
+ return 0;
+}
+
+int
+ethr_event_destroy(ethr_event *e)
+{
+ return 0;
+}
+
+static ETHR_INLINE int
+wait__(ethr_event *e, int spincount)
+{
+ unsigned sc = spincount;
+ int res;
+ long val;
+ int until_yield = ETHR_YIELD_AFTER_BUSY_LOOPS;
+
+ if (spincount < 0)
+ ETHR_FATAL_ERROR__(EINVAL);
+
+ while (1) {
+ while (1) {
+ val = ethr_atomic_read(&e->futex);
+ if (val == ETHR_EVENT_ON__)
+ return 0;
+ if (sc == 0)
+ break;
+ sc--;
+ ETHR_SPIN_BODY;
+ if (--until_yield == 0) {
+ until_yield = ETHR_YIELD_AFTER_BUSY_LOOPS;
+ res = ETHR_YIELD();
+ if (res != 0)
+ ETHR_FATAL_ERROR__(res);
+ }
+ }
+
+ if (val != ETHR_EVENT_OFF_WAITER__) {
+ val = ethr_atomic_cmpxchg(&e->futex,
+ ETHR_EVENT_OFF_WAITER__,
+ ETHR_EVENT_OFF__);
+
+ if (val == ETHR_EVENT_ON__)
+ return 0;
+ ETHR_ASSERT(val == ETHR_EVENT_OFF__);
+ }
+
+ res = ETHR_FUTEX__(&e->futex, ETHR_FUTEX_WAIT__, ETHR_EVENT_OFF_WAITER__);
+ if (res == EINTR)
+ break;
+ if (res != 0 && res != EWOULDBLOCK)
+ ETHR_FATAL_ERROR__(res);
+ }
+
+ return res;
+}
+
+#elif defined(ETHR_PTHREADS)
+/* --- Posix mutex/cond implementation of events ---------------------------- */
+
+int
+ethr_event_init(ethr_event *e)
+{
+ int res;
+ ethr_atomic_init(&e->state, ETHR_EVENT_OFF__);
+ res = pthread_mutex_init(&e->mtx, NULL);
+ if (res != 0)
+ return res;
+ res = pthread_cond_init(&e->cnd, NULL);
+ if (res != 0) {
+ pthread_mutex_destroy(&e->mtx);
+ return res;
+ }
+ return 0;
+}
+
+int
+ethr_event_destroy(ethr_event *e)
+{
+ int res;
+ res = pthread_mutex_destroy(&e->mtx);
+ if (res != 0)
+ return res;
+ res = pthread_cond_destroy(&e->cnd);
+ if (res != 0)
+ return res;
+ return 0;
+}
+
+static ETHR_INLINE int
+wait__(ethr_event *e, int spincount)
+{
+ int sc = spincount;
+ long val;
+ int res, ulres;
+ int until_yield = ETHR_YIELD_AFTER_BUSY_LOOPS;
+
+ if (spincount < 0)
+ ETHR_FATAL_ERROR__(EINVAL);
+
+ while (1) {
+ val = ethr_atomic_read(&e->state);
+ if (val == ETHR_EVENT_ON__)
+ return 0;
+ if (sc == 0)
+ break;
+ sc--;
+ ETHR_SPIN_BODY;
+ if (--until_yield == 0) {
+ until_yield = ETHR_YIELD_AFTER_BUSY_LOOPS;
+ res = ETHR_YIELD();
+ if (res != 0)
+ ETHR_FATAL_ERROR__(res);
+ }
+ }
+
+ if (val != ETHR_EVENT_OFF_WAITER__) {
+ val = ethr_atomic_cmpxchg(&e->state,
+ ETHR_EVENT_OFF_WAITER__,
+ ETHR_EVENT_OFF__);
+ if (val == ETHR_EVENT_ON__)
+ return 0;
+ ETHR_ASSERT(val == ETHR_EVENT_OFF__);
+ }
+
+ ETHR_ASSERT(val == ETHR_EVENT_OFF_WAITER__
+ || val == ETHR_EVENT_OFF__);
+
+ res = pthread_mutex_lock(&e->mtx);
+ if (res != 0)
+ ETHR_FATAL_ERROR__(res);
+
+ while (1) {
+
+ val = ethr_atomic_read(&e->state);
+ if (val == ETHR_EVENT_ON__)
+ break;
+
+ res = pthread_cond_wait(&e->cnd, &e->mtx);
+ if (res == EINTR)
+ break;
+ if (res != 0)
+ ETHR_FATAL_ERROR__(res);
+ }
+
+ ulres = pthread_mutex_unlock(&e->mtx);
+ if (ulres != 0)
+ ETHR_FATAL_ERROR__(ulres);
+
+ return res; /* 0 || EINTR */
+}
+
+#else
+#error No ethread event implementation
+#endif
+
+void
+ethr_event_reset(ethr_event *e)
+{
+ ethr_event_reset__(e);
+}
+
+void
+ethr_event_set(ethr_event *e)
+{
+ ethr_event_set__(e);
+}
+
+int
+ethr_event_wait(ethr_event *e)
+{
+ return wait__(e, 0);
+}
+
+int
+ethr_event_swait(ethr_event *e, int spincount)
+{
+ return wait__(e, spincount);
+}
diff --git a/erts/lib_src/pthread/ethread.c b/erts/lib_src/pthread/ethread.c
new file mode 100644
index 0000000000..ea1d9d43f0
--- /dev/null
+++ b/erts/lib_src/pthread/ethread.c
@@ -0,0 +1,477 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2010. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+/*
+ * Description: Pthread implementation of the ethread library
+ * Author: Rickard Green
+ */
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#define ETHR_CHILD_WAIT_SPIN_COUNT 4000
+
+#include <stdio.h>
+#ifdef ETHR_TIME_WITH_SYS_TIME
+# include <time.h>
+# include <sys/time.h>
+#else
+# ifdef ETHR_HAVE_SYS_TIME_H
+# include <sys/time.h>
+# else
+# include <time.h>
+# endif
+#endif
+#include <sys/types.h>
+#include <unistd.h>
+#include <signal.h>
+
+#include <limits.h>
+
+#define ETHR_INLINE_FUNC_NAME_(X) X ## __
+#define ETHREAD_IMPL__
+
+#include "ethread.h"
+#include "ethr_internal.h"
+
+#ifndef ETHR_HAVE_ETHREAD_DEFINES
+#error Missing configure defines
+#endif
+
+pthread_key_t ethr_ts_event_key__;
+static int child_wait_spin_count;
+
+/*
+ * --------------------------------------------------------------------------
+ * Static functions
+ * --------------------------------------------------------------------------
+ */
+
+static void thr_exit_cleanup(void)
+{
+ ethr_run_exit_handlers__();
+}
+
+
+/* Argument passed to thr_wrapper() */
+typedef struct {
+ ethr_atomic_t result;
+ ethr_ts_event *tse;
+ void *(*thr_func)(void *);
+ void *arg;
+ void *prep_func_res;
+} ethr_thr_wrap_data__;
+
+static void *thr_wrapper(void *vtwd)
+{
+ long result;
+ void *res;
+ ethr_thr_wrap_data__ *twd = (ethr_thr_wrap_data__ *) vtwd;
+ void *(*thr_func)(void *) = twd->thr_func;
+ void *arg = twd->arg;
+ ethr_ts_event *tsep = NULL;
+
+ result = (long) ethr_make_ts_event__(&tsep);
+
+ if (result == 0) {
+ tsep->iflgs |= ETHR_TS_EV_ETHREAD;
+ if (ethr_thr_child_func__)
+ ethr_thr_child_func__(twd->prep_func_res);
+ }
+
+ tsep = twd->tse; /* We aren't allowed to follow twd after
+ result has been set! */
+
+ ethr_atomic_set(&twd->result, result);
+
+ ethr_event_set(&tsep->event);
+
+ res = result == 0 ? (*thr_func)(arg) : NULL;
+
+ thr_exit_cleanup();
+ return res;
+}
+
+/* internal exports */
+
+int ethr_set_tse__(ethr_ts_event *tsep)
+{
+ return pthread_setspecific(ethr_ts_event_key__, (void *) tsep);
+}
+
+ethr_ts_event *ethr_get_tse__(void)
+{
+ return pthread_getspecific(ethr_ts_event_key__);
+}
+
+/*
+ * --------------------------------------------------------------------------
+ * Exported functions
+ * --------------------------------------------------------------------------
+ */
+
+int
+ethr_init(ethr_init_data *id)
+{
+ int res;
+
+ if (!ethr_not_inited__)
+ return EINVAL;
+
+ ethr_not_inited__ = 0;
+
+ res = ethr_init_common__(id);
+ if (res != 0)
+ goto error;
+
+ child_wait_spin_count = ETHR_CHILD_WAIT_SPIN_COUNT;
+ if (erts_get_cpu_configured(ethr_cpu_info__) == 1)
+ child_wait_spin_count = 0;
+
+ res = pthread_key_create(&ethr_ts_event_key__, ethr_ts_event_destructor__);
+
+ return 0;
+ error:
+ ethr_not_inited__ = 1;
+ return res;
+
+}
+
+int
+ethr_late_init(ethr_late_init_data *id)
+{
+ int res = ethr_late_init_common__(id);
+ if (res != 0)
+ return res;
+ ethr_not_completely_inited__ = 0;
+ return res;
+}
+
+int
+ethr_thr_create(ethr_tid *tid, void * (*func)(void *), void *arg,
+ ethr_thr_opts *opts)
+{
+ ethr_thr_wrap_data__ twd;
+ pthread_attr_t attr;
+ int res, dres;
+ int use_stack_size = (opts && opts->suggested_stack_size >= 0
+ ? opts->suggested_stack_size
+ : -1 /* Use system default */);
+
+#ifdef ETHR_MODIFIED_DEFAULT_STACK_SIZE
+ if (use_stack_size < 0)
+ use_stack_size = ETHR_MODIFIED_DEFAULT_STACK_SIZE;
+#endif
+
+#if ETHR_XCHK
+ if (ethr_not_completely_inited__) {
+ ETHR_ASSERT(0);
+ return EACCES;
+ }
+ if (!tid || !func) {
+ ETHR_ASSERT(0);
+ return EINVAL;
+ }
+#endif
+
+ ethr_atomic_init(&twd.result, -1);
+ twd.tse = ethr_get_ts_event();
+ twd.thr_func = func;
+ twd.arg = arg;
+
+ res = pthread_attr_init(&attr);
+ if (res != 0)
+ return res;
+
+ /* Error cleanup needed after this point */
+
+ /* Schedule child thread in system scope (if possible) ... */
+ res = pthread_attr_setscope(&attr, PTHREAD_SCOPE_SYSTEM);
+ if (res != 0 && res != ENOTSUP)
+ goto error;
+
+ if (use_stack_size >= 0) {
+ size_t suggested_stack_size = (size_t) use_stack_size;
+ size_t stack_size;
+#ifdef ETHR_DEBUG
+ suggested_stack_size /= 2; /* Make sure we got margin */
+#endif
+#ifdef ETHR_STACK_GUARD_SIZE
+ /* The guard is at least on some platforms included in the stack size
+ passed when creating threads */
+ suggested_stack_size += ETHR_B2KW(ETHR_STACK_GUARD_SIZE);
+#endif
+ if (suggested_stack_size < ethr_min_stack_size__)
+ stack_size = ETHR_KW2B(ethr_min_stack_size__);
+ else if (suggested_stack_size > ethr_max_stack_size__)
+ stack_size = ETHR_KW2B(ethr_max_stack_size__);
+ else
+ stack_size = ETHR_PAGE_ALIGN(ETHR_KW2B(suggested_stack_size));
+ (void) pthread_attr_setstacksize(&attr, stack_size);
+ }
+
+#ifdef ETHR_STACK_GUARD_SIZE
+ (void) pthread_attr_setguardsize(&attr, ETHR_STACK_GUARD_SIZE);
+#endif
+
+ /* Detached or joinable... */
+ res = pthread_attr_setdetachstate(&attr,
+ (opts && opts->detached
+ ? PTHREAD_CREATE_DETACHED
+ : PTHREAD_CREATE_JOINABLE));
+ if (res != 0)
+ goto error;
+
+ /* Call prepare func if it exist */
+ if (ethr_thr_prepare_func__)
+ twd.prep_func_res = ethr_thr_prepare_func__();
+ else
+ twd.prep_func_res = NULL;
+
+ res = pthread_create((pthread_t *) tid, &attr, thr_wrapper, (void*) &twd);
+
+ if (res == 0) {
+ int spin_count = child_wait_spin_count;
+
+ /* Wait for child to initialize... */
+ while (1) {
+ long result;
+ ethr_event_reset(&twd.tse->event);
+
+ result = ethr_atomic_read(&twd.result);
+ if (result == 0)
+ break;
+
+ if (result > 0) {
+ res = (int) result;
+ goto error;
+ }
+
+ res = ethr_event_swait(&twd.tse->event, spin_count);
+ if (res != 0 && res != EINTR)
+ goto error;
+ spin_count = 0;
+ }
+ }
+
+ /* Cleanup... */
+
+ error:
+ dres = pthread_attr_destroy(&attr);
+ if (res == 0)
+ res = dres;
+ if (ethr_thr_parent_func__)
+ ethr_thr_parent_func__(twd.prep_func_res);
+ return res;
+}
+
+int
+ethr_thr_join(ethr_tid tid, void **res)
+{
+#if ETHR_XCHK
+ if (ethr_not_inited__) {
+ ETHR_ASSERT(0);
+ return EACCES;
+ }
+#endif
+ return pthread_join((pthread_t) tid, res);
+}
+
+int
+ethr_thr_detach(ethr_tid tid)
+{
+#if ETHR_XCHK
+ if (ethr_not_inited__) {
+ ETHR_ASSERT(0);
+ return EACCES;
+ }
+#endif
+ return pthread_detach((pthread_t) tid);
+}
+
+void
+ethr_thr_exit(void *res)
+{
+#if ETHR_XCHK
+ if (ethr_not_inited__) {
+ ETHR_ASSERT(0);
+ return;
+ }
+#endif
+ thr_exit_cleanup();
+ pthread_exit(res);
+}
+
+ethr_tid
+ethr_self(void)
+{
+ return (ethr_tid) pthread_self();
+}
+
+int
+ethr_equal_tids(ethr_tid tid1, ethr_tid tid2)
+{
+ return pthread_equal((pthread_t) tid1, (pthread_t) tid2);
+}
+
+
+/*
+ * Thread specific events
+ */
+
+ethr_ts_event *
+ethr_get_ts_event(void)
+{
+ return ethr_get_ts_event__();
+}
+
+void
+ethr_leave_ts_event(ethr_ts_event *tsep)
+{
+ ethr_leave_ts_event__(tsep);
+}
+
+/*
+ * Current time
+ */
+
+int
+ethr_time_now(ethr_timeval *time)
+{
+ int res;
+ struct timeval tv;
+#if ETHR_XCHK
+ if (ethr_not_inited__) {
+ ETHR_ASSERT(0);
+ return EACCES;
+ }
+ if (!time) {
+ ETHR_ASSERT(0);
+ return EINVAL;
+ }
+#endif
+
+ res = gettimeofday(&tv, NULL);
+ time->tv_sec = (long) tv.tv_sec;
+ time->tv_nsec = ((long) tv.tv_usec)*1000;
+ return res;
+}
+
+/*
+ * Thread specific data
+ */
+
+int
+ethr_tsd_key_create(ethr_tsd_key *keyp)
+{
+#if ETHR_XCHK
+ if (ethr_not_inited__) {
+ ETHR_ASSERT(0);
+ return EACCES;
+ }
+ if (!keyp) {
+ ETHR_ASSERT(0);
+ return EINVAL;
+ }
+#endif
+ return pthread_key_create((pthread_key_t *) keyp, NULL);
+}
+
+int
+ethr_tsd_key_delete(ethr_tsd_key key)
+{
+#if ETHR_XCHK
+ if (ethr_not_inited__) {
+ ETHR_ASSERT(0);
+ return EACCES;
+ }
+#endif
+ return pthread_key_delete((pthread_key_t) key);
+}
+
+int
+ethr_tsd_set(ethr_tsd_key key, void *value)
+{
+#if ETHR_XCHK
+ if (ethr_not_inited__) {
+ ETHR_ASSERT(0);
+ return EACCES;
+ }
+#endif
+ return pthread_setspecific((pthread_key_t) key, value);
+}
+
+void *
+ethr_tsd_get(ethr_tsd_key key)
+{
+#if ETHR_XCHK
+ if (ethr_not_inited__) {
+ ETHR_ASSERT(0);
+ return NULL;
+ }
+#endif
+ return pthread_getspecific((pthread_key_t) key);
+}
+
+/*
+ * Signal functions
+ */
+
+#if ETHR_HAVE_ETHR_SIG_FUNCS
+
+int ethr_sigmask(int how, const sigset_t *set, sigset_t *oset)
+{
+#if ETHR_XCHK
+ if (ethr_not_inited__) {
+ ETHR_ASSERT(0);
+ return EACCES;
+ }
+ if (!set && !oset) {
+ ETHR_ASSERT(0);
+ return EINVAL;
+ }
+#endif
+ return pthread_sigmask(how, set, oset);
+}
+
+int ethr_sigwait(const sigset_t *set, int *sig)
+{
+#if ETHR_XCHK
+ if (ethr_not_inited__) {
+ ETHR_ASSERT(0);
+ return EACCES;
+ }
+ if (!set || !sig) {
+ ETHR_ASSERT(0);
+ return EINVAL;
+ }
+#endif
+ if (sigwait(set, sig) < 0)
+ return errno;
+ return 0;
+}
+
+#endif /* #if ETHR_HAVE_ETHR_SIG_FUNCS */
+
+ETHR_IMPL_NORETURN__
+ethr_abort__(void)
+{
+ abort();
+}
diff --git a/erts/lib_src/win/ethr_event.c b/erts/lib_src/win/ethr_event.c
new file mode 100644
index 0000000000..ddb4780ff1
--- /dev/null
+++ b/erts/lib_src/win/ethr_event.c
@@ -0,0 +1,120 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2009-2010. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+/*
+ * Author: Rickard Green
+ */
+
+#define ETHR_INLINE_FUNC_NAME_(X) X ## __
+#define ETHR_EVENT_IMPL__
+
+#include "ethread.h"
+
+/* --- Windows implementation of thread events ------------------------------ */
+
+int
+ethr_event_init(ethr_event *e)
+{
+ e->state = ETHR_EVENT_OFF__;
+ e->handle = CreateEvent(NULL, FALSE, FALSE, NULL);
+ if (e->handle == INVALID_HANDLE_VALUE)
+ return ethr_win_get_errno__();
+ return 0;
+}
+
+int
+ethr_event_destroy(ethr_event *e)
+{
+ BOOL res = CloseHandle(e->handle);
+ return res == 0 ? ethr_win_get_errno__() : 0;
+}
+
+void
+ethr_event_set(ethr_event *e)
+{
+ ethr_event_set__(e);
+}
+
+void
+ethr_event_reset(ethr_event *e)
+{
+ ethr_event_reset__(e);
+}
+
+static ETHR_INLINE int
+wait(ethr_event *e, int spincount)
+{
+ LONG state;
+ DWORD code;
+ int sc, res, until_yield = ETHR_YIELD_AFTER_BUSY_LOOPS;
+
+ if (spincount < 0)
+ ETHR_FATAL_ERROR__(EINVAL);
+
+ sc = spincount;
+
+ while (1) {
+ long on;
+ while (1) {
+#if ETHR_IMMED_ATOMIC_SET_GET_SAFE__
+ state = e->state;
+#else
+ state = InterlockedExchangeAdd(&e->state, (LONG) 0);
+#endif
+ if (state == ETHR_EVENT_ON__)
+ return 0;
+ if (sc == 0)
+ break;
+ sc--;
+ ETHR_SPIN_BODY;
+ if (--until_yield == 0) {
+ until_yield = ETHR_YIELD_AFTER_BUSY_LOOPS;
+ res = ETHR_YIELD();
+ if (res != 0)
+ ETHR_FATAL_ERROR__(res);
+ }
+ }
+
+ if (state != ETHR_EVENT_OFF_WAITER__) {
+ state = _InterlockedCompareExchange(&e->state,
+ ETHR_EVENT_OFF_WAITER__,
+ ETHR_EVENT_OFF__);
+ if (state == ETHR_EVENT_ON__)
+ return 0;
+ ETHR_ASSERT(state == ETHR_EVENT_OFF__);
+ }
+
+ code = WaitForSingleObject(e->handle, INFINITE);
+ if (code != WAIT_OBJECT_0)
+ ETHR_FATAL_ERROR__(ethr_win_get_errno__());
+ }
+
+}
+
+int
+ethr_event_wait(ethr_event *e)
+{
+ return wait(e, 0);
+}
+
+int
+ethr_event_swait(ethr_event *e, int spincount)
+{
+ return wait(e, spincount);
+}
diff --git a/erts/lib_src/win/ethread.c b/erts/lib_src/win/ethread.c
new file mode 100644
index 0000000000..69523edf94
--- /dev/null
+++ b/erts/lib_src/win/ethread.c
@@ -0,0 +1,625 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2010. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+/*
+ * Description: Windows native threads implementation of the ethread library
+ * Author: Rickard Green
+ */
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#define ETHR_CHILD_WAIT_SPIN_COUNT 4000
+
+#undef WIN32_LEAN_AND_MEAN
+#define WIN32_LEAN_AND_MEAN
+#include <windows.h>
+#include <process.h>
+#include <winerror.h>
+#include <stdio.h>
+#include <limits.h>
+
+#define ETHR_INLINE_FUNC_NAME_(X) X ## __
+#define ETHREAD_IMPL__
+
+#include "ethread.h"
+#include "ethr_internal.h"
+
+#ifndef ETHR_HAVE_ETHREAD_DEFINES
+#error Missing configure defines
+#endif
+
+/* Argument passed to thr_wrapper() */
+typedef struct {
+ ethr_tid *tid;
+ ethr_atomic_t result;
+ ethr_ts_event *tse;
+ void *(*thr_func)(void *);
+ void *arg;
+ void *prep_func_res;
+} ethr_thr_wrap_data__;
+
+#define ETHR_INVALID_TID_ID -1
+
+struct ethr_join_data_ {
+ HANDLE handle;
+ void *res;
+};
+
+static ethr_atomic_t thread_id_counter;
+static DWORD own_tid_key;
+static ethr_tid main_thr_tid;
+static int child_wait_spin_count;
+
+DWORD ethr_ts_event_key__;
+
+#define ETHR_GET_OWN_TID__ ((ethr_tid *) TlsGetValue(own_tid_key))
+
+/*
+ * --------------------------------------------------------------------------
+ * Static functions
+ * --------------------------------------------------------------------------
+ */
+
+static void thr_exit_cleanup(ethr_tid *tid, void *res)
+{
+
+ ETHR_ASSERT(tid == ETHR_GET_OWN_TID__);
+
+ if (tid->jdata)
+ tid->jdata->res = res;
+
+ ethr_run_exit_handlers__();
+ ethr_ts_event_destructor__((void *) ethr_get_tse__());
+}
+
+static unsigned __stdcall thr_wrapper(LPVOID vtwd)
+{
+ ethr_tid my_tid;
+ long result;
+ void *res;
+ ethr_thr_wrap_data__ *twd = (ethr_thr_wrap_data__ *) vtwd;
+ void *(*thr_func)(void *) = twd->thr_func;
+ void *arg = twd->arg;
+ ethr_ts_event *tsep = NULL;
+
+ result = (long) ethr_make_ts_event__(&tsep);
+
+ if (result == 0) {
+ tsep->iflgs |= ETHR_TS_EV_ETHREAD;
+ my_tid = *twd->tid;
+ if (!TlsSetValue(own_tid_key, (LPVOID) &my_tid)) {
+ result = (long) ethr_win_get_errno__();
+ ethr_free_ts_event__(tsep);
+ }
+ else {
+ if (ethr_thr_child_func__)
+ ethr_thr_child_func__(twd->prep_func_res);
+ }
+ }
+
+ tsep = twd->tse; /* We aren't allowed to follow twd after
+ result has been set! */
+
+ ethr_atomic_set(&twd->result, result);
+
+ ethr_event_set(&tsep->event);
+
+ res = result == 0 ? (*thr_func)(arg) : NULL;
+
+ thr_exit_cleanup(&my_tid, res);
+ return 0;
+}
+
+#ifdef __GNUC__
+#define LL_LITERAL(X) X##LL
+#else
+#define LL_LITERAL(X) X##i64
+#endif
+
+#define EPOCH_JULIAN_DIFF LL_LITERAL(11644473600)
+
+static ETHR_INLINE void
+get_curr_time(long *sec, long *nsec)
+{
+ SYSTEMTIME t;
+ FILETIME ft;
+ LONGLONG lft;
+
+ GetSystemTime(&t);
+ SystemTimeToFileTime(&t, &ft);
+ memcpy(&lft, &ft, sizeof(lft));
+ *nsec = ((long) (lft % LL_LITERAL(10000000)))*100;
+ *sec = (long) ((lft / LL_LITERAL(10000000)) - EPOCH_JULIAN_DIFF);
+}
+
+/* internal exports */
+
+int
+ethr_win_get_errno__(void)
+{
+ return erts_get_last_win_errno();
+}
+
+int ethr_set_tse__(ethr_ts_event *tsep)
+{
+ return (TlsSetValue(ethr_ts_event_key__, (LPVOID) tsep)
+ ? 0
+ : ethr_win_get_errno__());
+}
+
+ethr_ts_event *ethr_get_tse__(void)
+{
+ return (ethr_ts_event *) TlsGetValue(ethr_ts_event_key__);
+}
+
+ETHR_IMPL_NORETURN__
+ethr_abort__(void)
+{
+#if 1
+ DebugBreak();
+#else
+ abort();
+#endif
+}
+
+/*
+ * ----------------------------------------------------------------------------
+ * Exported functions
+ * ----------------------------------------------------------------------------
+ */
+
+int
+ethr_init(ethr_init_data *id)
+{
+#ifdef _WIN32_WINNT
+ DWORD major = (_WIN32_WINNT >> 8) & 0xff;
+ DWORD minor = _WIN32_WINNT & 0xff;
+ OSVERSIONINFO os_version;
+#endif
+ int err = 0;
+ unsigned long i;
+
+ if (!ethr_not_inited__)
+ return EINVAL;
+
+#ifdef _WIN32_WINNT
+ os_version.dwOSVersionInfoSize = sizeof(OSVERSIONINFO);
+ GetVersionEx(&os_version);
+ if (os_version.dwPlatformId != VER_PLATFORM_WIN32_NT
+ || os_version.dwMajorVersion < major
+ || (os_version.dwMajorVersion == major
+ && os_version.dwMinorVersion < minor))
+ return ENOTSUP;
+#endif
+ err = ethr_init_common__(id);
+ if (err)
+ goto error;
+
+ own_tid_key = TlsAlloc();
+ if (own_tid_key == TLS_OUT_OF_INDEXES)
+ goto error;
+
+ ethr_atomic_init(&thread_id_counter, 0);
+
+ main_thr_tid.id = 0;
+ main_thr_tid.jdata = NULL;
+
+ if (!TlsSetValue(own_tid_key, (LPVOID) &main_thr_tid))
+ goto error;
+
+ ETHR_ASSERT(&main_thr_tid == ETHR_GET_OWN_TID__);
+
+ ethr_ts_event_key__ = TlsAlloc();
+ if (ethr_ts_event_key__ == TLS_OUT_OF_INDEXES)
+ goto error;
+
+ child_wait_spin_count = ETHR_CHILD_WAIT_SPIN_COUNT;
+ if (erts_get_cpu_configured(ethr_cpu_info__) == 1)
+ child_wait_spin_count = 0;
+
+ ethr_not_inited__ = 0;
+
+ return 0;
+
+ error:
+ ethr_not_inited__ = 1;
+ if (err == 0)
+ err = ethr_win_get_errno__();
+ ETHR_ASSERT(err != 0);
+ return err;
+}
+
+int
+ethr_late_init(ethr_late_init_data *id)
+{
+ int res = ethr_late_init_common__(id);
+ if (res != 0)
+ return res;
+ ethr_not_completely_inited__ = 0;
+ return res;
+}
+
+
+/*
+ * Thread functions.
+ */
+
+int
+ethr_thr_create(ethr_tid *tid, void * (*func)(void *), void *arg,
+ ethr_thr_opts *opts)
+{
+ HANDLE handle = INVALID_HANDLE_VALUE;
+ int err = 0;
+ ethr_thr_wrap_data__ twd;
+ DWORD code;
+ unsigned ID;
+ unsigned stack_size = 0; /* 0 = system default */
+ int use_stack_size = (opts && opts->suggested_stack_size >= 0
+ ? opts->suggested_stack_size
+ : -1 /* Use system default */);
+
+#ifdef ETHR_MODIFIED_DEFAULT_STACK_SIZE
+ if (use_stack_size < 0)
+ use_stack_size = ETHR_MODIFIED_DEFAULT_STACK_SIZE;
+#endif
+
+#if ETHR_XCHK
+ if (ethr_not_completely_inited__) {
+ ETHR_ASSERT(0);
+ return EACCES;
+ }
+ if (!tid || !func) {
+ ETHR_ASSERT(0);
+ return EINVAL;
+ }
+#endif
+
+ do {
+ tid->id = ethr_atomic_inc_read(&thread_id_counter);
+ } while (tid->id == ETHR_INVALID_TID_ID);
+
+ if (opts && opts->detached)
+ tid->jdata = NULL;
+ else {
+ tid->jdata = ethr_mem__.std.alloc(sizeof(struct ethr_join_data_));
+ if (!tid->jdata)
+ return ENOMEM;
+ tid->jdata->handle = INVALID_HANDLE_VALUE;
+ tid->jdata->res = NULL;
+ }
+
+ if (use_stack_size >= 0) {
+ size_t suggested_stack_size = (size_t) use_stack_size;
+#ifdef ETHR_DEBUG
+ suggested_stack_size /= 2; /* Make sure we got margin */
+#endif
+ if (suggested_stack_size < ethr_min_stack_size__)
+ stack_size = (unsigned) ETHR_KW2B(ethr_min_stack_size__);
+ else if (suggested_stack_size > ethr_max_stack_size__)
+ stack_size = (unsigned) ETHR_KW2B(ethr_max_stack_size__);
+ else
+ stack_size = (unsigned)
+ ETHR_PAGE_ALIGN(ETHR_KW2B(suggested_stack_size));
+ }
+
+ ethr_atomic_init(&twd.result, -1);
+
+ twd.tid = tid;
+ twd.thr_func = func;
+ twd.arg = arg;
+ twd.tse = ethr_get_ts_event();
+
+ /* Call prepare func if it exist */
+ if (ethr_thr_prepare_func__)
+ twd.prep_func_res = ethr_thr_prepare_func__();
+ else
+ twd.prep_func_res = NULL;
+
+ /* spawn the thr_wrapper function */
+ handle = (HANDLE) _beginthreadex(NULL, stack_size, thr_wrapper,
+ (LPVOID) &twd, 0, &ID);
+ if (handle == (HANDLE) 0) {
+ handle = INVALID_HANDLE_VALUE;
+ goto error;
+ }
+ else {
+ int spin_count = child_wait_spin_count;
+
+ ETHR_ASSERT(handle != INVALID_HANDLE_VALUE);
+
+ if (!tid->jdata)
+ CloseHandle(handle);
+ else
+ tid->jdata->handle = handle;
+
+ /* Wait for child to initialize... */
+ while (1) {
+ long result;
+ int err;
+ ethr_event_reset(&twd.tse->event);
+
+ result = ethr_atomic_read(&twd.result);
+ if (result == 0)
+ break;
+
+ if (result > 0) {
+ err = (int) result;
+ goto error;
+ }
+
+ err = ethr_event_swait(&twd.tse->event, spin_count);
+ if (err && err != EINTR)
+ goto error;
+ spin_count = 0;
+ }
+ }
+
+ if (ethr_thr_parent_func__)
+ ethr_thr_parent_func__(twd.prep_func_res);
+
+ if (twd.tse)
+ ethr_leave_ts_event(twd.tse);
+
+ return 0;
+
+ error:
+
+ if (err == 0)
+ err = ethr_win_get_errno__();
+ ETHR_ASSERT(err != 0);
+
+ if (ethr_thr_parent_func__)
+ ethr_thr_parent_func__(twd.prep_func_res);
+
+ if (handle != INVALID_HANDLE_VALUE) {
+ WaitForSingleObject(handle, INFINITE);
+ CloseHandle(handle);
+ }
+
+ if (tid->jdata) {
+ ethr_mem__.std.free(tid->jdata);
+ tid->jdata = NULL;
+ }
+
+ tid->id = ETHR_INVALID_TID_ID;
+
+ if (twd.tse)
+ ethr_leave_ts_event(twd.tse);
+
+ return err;
+}
+
+int ethr_thr_join(ethr_tid tid, void **res)
+{
+ DWORD code;
+
+#if ETHR_XCHK
+ if (ethr_not_inited__) {
+ ETHR_ASSERT(0);
+ return EACCES;
+ }
+#endif
+
+ if (tid.id == ETHR_INVALID_TID_ID || !tid.jdata)
+ return EINVAL;
+
+ /* Wait for thread to terminate */
+ code = WaitForSingleObject(tid.jdata->handle, INFINITE);
+ if (code != WAIT_OBJECT_0)
+ return ethr_win_get_errno__();
+
+ CloseHandle(tid.jdata->handle);
+ tid.jdata->handle = INVALID_HANDLE_VALUE;
+
+ if (res)
+ *res = tid.jdata->res;
+
+ /*
+ * User better not try to join or detach again; or
+ * bad things will happen... (users responsibility)
+ */
+
+ ethr_mem__.std.free(tid.jdata);
+
+ return 0;
+}
+
+
+int
+ethr_thr_detach(ethr_tid tid)
+{
+#if ETHR_XCHK
+ if (ethr_not_inited__) {
+ ETHR_ASSERT(0);
+ return EACCES;
+ }
+#endif
+
+ if (tid.id == ETHR_INVALID_TID_ID || !tid.jdata)
+ return EINVAL;
+
+ CloseHandle(tid.jdata->handle);
+ tid.jdata->handle = INVALID_HANDLE_VALUE;
+
+ /*
+ * User better not try to join or detach again; or
+ * bad things will happen... (users responsibility)
+ */
+
+ ethr_mem__.std.free(tid.jdata);
+
+ return 0;
+}
+
+
+void
+ethr_thr_exit(void *res)
+{
+ ethr_tid *tid;
+#if ETHR_XCHK
+ if (ethr_not_inited__) {
+ ETHR_ASSERT(0);
+ return;
+ }
+#endif
+ tid = ETHR_GET_OWN_TID__;
+ if (!tid) {
+ ETHR_ASSERT(0);
+ _endthreadex((unsigned) 0);
+ }
+ thr_exit_cleanup(tid, res);
+ _endthreadex((unsigned) 0);
+}
+
+ethr_tid
+ethr_self(void)
+{
+ ethr_tid *tid;
+#if ETHR_XCHK
+ if (ethr_not_inited__) {
+ ethr_tid dummy_tid = {ETHR_INVALID_TID_ID, NULL};
+ ETHR_ASSERT(0);
+ return dummy_tid;
+ }
+#endif
+ /* It is okay for non-ethreads (i.e. native win32 threads) to call
+ ethr_self(). They will however be returned an invalid tid. */
+ tid = ETHR_GET_OWN_TID__;
+ if (!tid) {
+ ethr_tid dummy_tid = {ETHR_INVALID_TID_ID, NULL};
+ return dummy_tid;
+ }
+ return *tid;
+}
+
+int
+ethr_equal_tids(ethr_tid tid1, ethr_tid tid2)
+{
+ /* An invalid tid does not equal any tid, not even an invalid tid */
+ return tid1.id == tid2.id && tid1.id != ETHR_INVALID_TID_ID;
+}
+
+int
+ethr_time_now(ethr_timeval *time)
+{
+#if ETHR_XCHK
+ if (ethr_not_inited__) {
+ ETHR_ASSERT(0);
+ return EACCES;
+ }
+ if (!time) {
+ ETHR_ASSERT(0);
+ return EINVAL;
+ }
+#endif
+ get_curr_time(&time->tv_sec, &time->tv_nsec);
+ return 0;
+}
+
+/*
+ * Thread specific data
+ */
+
+int
+ethr_tsd_key_create(ethr_tsd_key *keyp)
+{
+ DWORD key;
+#if ETHR_XCHK
+ if (ethr_not_inited__) {
+ ETHR_ASSERT(0);
+ return EACCES;
+ }
+ if (!keyp) {
+ ETHR_ASSERT(0);
+ return EINVAL;
+ }
+#endif
+ key = TlsAlloc();
+ if (key == TLS_OUT_OF_INDEXES)
+ return ethr_win_get_errno__();
+ *keyp = (ethr_tsd_key) key;
+ return 0;
+}
+
+int
+ethr_tsd_key_delete(ethr_tsd_key key)
+{
+#if ETHR_XCHK
+ if (ethr_not_inited__) {
+ ETHR_ASSERT(0);
+ return EACCES;
+ }
+#endif
+ if (!TlsFree((DWORD) key))
+ return ethr_win_get_errno__();
+ return 0;
+}
+
+int
+ethr_tsd_set(ethr_tsd_key key, void *value)
+{
+#if ETHR_XCHK
+ if (ethr_not_inited__) {
+ ETHR_ASSERT(0);
+ return EACCES;
+ }
+#endif
+ if (!TlsSetValue((DWORD) key, (LPVOID) value))
+ return ethr_win_get_errno__();
+ return 0;
+}
+
+void *
+ethr_tsd_get(ethr_tsd_key key)
+{
+#if ETHR_XCHK
+ if (ethr_not_inited__) {
+ ETHR_ASSERT(0);
+ return NULL;
+ }
+#endif
+ return (void *) TlsGetValue((DWORD) key);
+}
+
+
+/*
+ * Thread specific events
+ */
+
+ethr_ts_event *
+ethr_get_ts_event(void)
+{
+ return ethr_get_ts_event__();
+}
+
+void
+ethr_leave_ts_event(ethr_ts_event *tsep)
+{
+ ethr_leave_ts_event__(tsep);
+}
+
+ethr_ts_event *
+ethr_create_ts_event__(void)
+{
+ ethr_ts_event *tsep;
+ ethr_make_ts_event__(&tsep);
+ return tsep;
+}
diff --git a/erts/test/ethread_SUITE.erl b/erts/test/ethread_SUITE.erl
index bbc79e9381..0cc315e9be 100644
--- a/erts/test/ethread_SUITE.erl
+++ b/erts/test/ethread_SUITE.erl
@@ -37,21 +37,16 @@
equal_tids/1,
mutex/1,
try_lock_mutex/1,
- recursive_mutex/1,
time_now/1,
cond_wait/1,
- cond_timedwait/1,
broadcast/1,
detached_thread/1,
max_threads/1,
- forksafety/1,
- vfork/1,
tsd/1,
spinlock/1,
rwspinlock/1,
rwmutex/1,
- atomic/1,
- gate/1]).
+ atomic/1]).
-include_lib("test_server/include/test_server.hrl").
@@ -60,21 +55,16 @@ tests() ->
equal_tids,
mutex,
try_lock_mutex,
- recursive_mutex,
time_now,
cond_wait,
- cond_timedwait,
broadcast,
detached_thread,
max_threads,
- forksafety,
- vfork,
tsd,
spinlock,
rwspinlock,
rwmutex,
- atomic,
- gate].
+ atomic].
all(doc) -> [];
all(suite) -> tests().
@@ -114,13 +104,6 @@ try_lock_mutex(suite) ->
try_lock_mutex(Config) ->
run_case(Config, "try_lock_mutex", "").
-recursive_mutex(doc) ->
- ["Tests recursive mutexes."];
-recursive_mutex(suite) ->
- [];
-recursive_mutex(Config) ->
- run_case(Config, "recursive_mutex", "").
-
time_now(doc) ->
["Tests ethr_time_now by comparing time values with Erlang."];
time_now(suite) ->
@@ -169,13 +152,6 @@ cond_wait(suite) ->
cond_wait(Config) ->
run_case(Config, "cond_wait", "").
-cond_timedwait(doc) ->
- ["Tests ethr_cond_timedwait with ethr_cond_signal and ethr_cond_broadcast."];
-cond_timedwait(suite) ->
- [];
-cond_timedwait(Config) ->
- run_case(Config, "cond_timedwait", "").
-
broadcast(doc) ->
["Tests that a ethr_cond_broadcast really wakes up all waiting threads"];
broadcast(suite) ->
@@ -197,25 +173,6 @@ max_threads(suite) ->
max_threads(Config) ->
run_case(Config, "max_threads", "").
-forksafety(doc) ->
- ["Tests forksafety."];
-forksafety(suite) ->
- [];
-forksafety(Config) ->
- run_case(Config, "forksafety", "").
-
-vfork(doc) ->
- ["Tests vfork with threads."];
-vfork(suite) ->
- case ?t:os_type() of
- {unix, osf1} ->
- {skip, "vfork() known to hang multi-threaded applications on osf1"};
- _ ->
- []
- end;
-vfork(Config) ->
- run_case(Config, "vfork", "").
-
tsd(doc) ->
["Tests thread specific data."];
tsd(suite) ->
@@ -251,13 +208,6 @@ atomic(suite) ->
atomic(Config) ->
run_case(Config, "atomic", "").
-gate(doc) ->
- ["Tests gates."];
-gate(suite) ->
- [];
-gate(Config) ->
- run_case(Config, "gate", "").
-
%%
%%
%% Auxiliary functions
diff --git a/erts/test/ethread_SUITE_data/ethread_tests.c b/erts/test/ethread_SUITE_data/ethread_tests.c
index f779f13c51..7fc71d8047 100644
--- a/erts/test/ethread_SUITE_data/ethread_tests.c
+++ b/erts/test/ethread_SUITE_data/ethread_tests.c
@@ -1,19 +1,19 @@
/*
* %CopyrightBegin%
- *
- * Copyright Ericsson AB 2004-2009. All Rights Reserved.
- *
+ *
+ * Copyright Ericsson AB 2004-2010. All Rights Reserved.
+ *
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
* compliance with the License. You should have received a copy of the
* Erlang Public License along with this software. If not, it can be
* retrieved online at http://www.erlang.org/.
- *
+ *
* Software distributed under the License is distributed on an "AS IS"
* basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
* the License for the specific language governing rights and limitations
* under the License.
- *
+ *
* %CopyrightEnd%
*/
@@ -90,10 +90,12 @@ static void print_line(char *frmt,...)
print_eol();
}
+#if 0 /* Currently not used; silence annoying warning... */
static void print(char *frmt,...)
{
PRINT_VA_LIST(frmt);
}
+#endif
static void fail(char *frmt,...)
{
@@ -197,8 +199,8 @@ create_join_thread_test(void)
}
for (i = 1; i <= CJTT_NO_THREADS; i++) {
- int *tres;
- res = ethr_thr_join(cjtt_tids[i], (void **) &tres);
+ void *tres;
+ res = ethr_thr_join(cjtt_tids[i], &tres);
ASSERT(res == 0);
ASSERT(tres == &cjtt_res[i]);
ASSERT(cjtt_res[i] == i);
@@ -216,8 +218,8 @@ create_join_thread_test(void)
#define ETT_THREADS 100000
static ethr_tid ett_tids[3];
-static ethr_mutex ett_mutex = ETHR_MUTEX_INITER;
-static ethr_cond ett_cond = ETHR_COND_INITER;
+static ethr_mutex ett_mutex;
+static ethr_cond ett_cond;
static int ett_terminate;
static void *
@@ -234,14 +236,12 @@ static void *
ett_thread2(void *unused)
{
int res;
- res = ethr_mutex_lock(&ett_mutex);
- ASSERT(res == 0);
+ ethr_mutex_lock(&ett_mutex);
while (!ett_terminate) {
res = ethr_cond_wait(&ett_cond, &ett_mutex);
ASSERT(res == 0);
}
- res = ethr_mutex_unlock(&ett_mutex);
- ASSERT(res == 0);
+ ethr_mutex_unlock(&ett_mutex);
return NULL;
}
@@ -250,6 +250,10 @@ equal_tids_test(void)
{
int res, i;
+ res = ethr_mutex_init(&ett_mutex);
+ ASSERT(res == 0);
+ res = ethr_cond_init(&ett_cond);
+ ASSERT(res == 0);
ett_tids[0] = ethr_self();
res = ethr_thr_create(&ett_tids[1], ett_thread, (void *) &ett_tids[1], NULL);
@@ -298,13 +302,10 @@ equal_tids_test(void)
ASSERT(res == 0);
}
- res = ethr_mutex_lock(&ett_mutex);
- ASSERT(res == 0);
+ ethr_mutex_lock(&ett_mutex);
ett_terminate = 1;
- res = ethr_cond_signal(&ett_cond);
- ASSERT(res == 0);
- res = ethr_mutex_unlock(&ett_mutex);
- ASSERT(res == 0);
+ ethr_cond_signal(&ett_cond);
+ ethr_mutex_unlock(&ett_mutex);
res = ethr_thr_join(ett_tids[1], NULL);
ASSERT(res == 0);
@@ -321,17 +322,14 @@ equal_tids_test(void)
* Tests mutexes.
*/
-static ethr_mutex mt_mutex = ETHR_MUTEX_INITER;
+static ethr_mutex mt_mutex;
static int mt_data;
void *
mt_thread(void *unused)
{
- int res;
-
print_line("Aux thread tries to lock mutex");
- res = ethr_mutex_lock(&mt_mutex);
- ASSERT(res == 0);
+ ethr_mutex_lock(&mt_mutex);
print_line("Aux thread locked mutex");
ASSERT(mt_data == 0);
@@ -345,8 +343,7 @@ mt_thread(void *unused)
ASSERT(mt_data == 1);
- res = ethr_mutex_unlock(&mt_mutex);
- ASSERT(res == 0);
+ ethr_mutex_unlock(&mt_mutex);
print_line("Aux thread unlocked mutex");
return NULL;
@@ -356,18 +353,18 @@ mt_thread(void *unused)
static void
mutex_test(void)
{
- int do_restart = 1;
int res;
ethr_tid tid;
- print_line("Running test with statically initialized mutex");
+ print_line("Trying to initialize mutex");
+ res = ethr_mutex_init(&mt_mutex);
+ ASSERT(res == 0);
+ print_line("Initialized mutex");
- restart:
mt_data = 0;
print_line("Main thread tries to lock mutex");
- res = ethr_mutex_lock(&mt_mutex);
- ASSERT(res == 0);
+ ethr_mutex_lock(&mt_mutex);
print_line("Main thread locked mutex");
ASSERT(mt_data == 0);
@@ -383,8 +380,7 @@ mutex_test(void)
ASSERT(mt_data == 0);
- res = ethr_mutex_unlock(&mt_mutex);
- ASSERT(res == 0);
+ ethr_mutex_unlock(&mt_mutex);
print_line("Main thread unlocked mutex");
print_line("Main thread goes to sleep for 1 second");
@@ -392,8 +388,7 @@ mutex_test(void)
print_line("Main thread woke up");
print_line("Main thread tries to lock mutex");
- res = ethr_mutex_lock(&mt_mutex);
- ASSERT(res == 0);
+ ethr_mutex_lock(&mt_mutex);
print_line("Main thread locked mutex");
ASSERT(mt_data == 1);
@@ -404,8 +399,7 @@ mutex_test(void)
ASSERT(mt_data == 1);
- res = ethr_mutex_unlock(&mt_mutex);
- ASSERT(res == 0);
+ ethr_mutex_unlock(&mt_mutex);
print_line("Main thread unlocked mutex");
res = ethr_thr_join(tid, NULL);
@@ -416,20 +410,6 @@ mutex_test(void)
ASSERT(res == 0);
print_line("Main thread destroyed mutex");
- if (do_restart) {
- do_restart = 0;
-
- print_line("Running test with dynamically initialized mutex");
-
- print_line("Trying to initialize mutex");
- res = ethr_mutex_init(&mt_mutex);
- ASSERT(res == 0);
- print_line("Initialized mutex");
-
- goto restart;
-
- }
-
}
/*
@@ -438,9 +418,9 @@ mutex_test(void)
* Tests try lock mutex operation.
*/
-static ethr_mutex tlmt_mtx1 = ETHR_MUTEX_INITER;
-static ethr_mutex tlmt_mtx2 = ETHR_MUTEX_INITER;
-static ethr_cond tlmt_cnd2 = ETHR_COND_INITER;
+static ethr_mutex tlmt_mtx1;
+static ethr_mutex tlmt_mtx2;
+static ethr_cond tlmt_cnd2;
static int tlmt_mtx1_locked;
static int tlmt_mtx1_do_unlock;
@@ -450,32 +430,24 @@ tlmt_thread(void *unused)
{
int res;
- res = ethr_mutex_lock(&tlmt_mtx1);
- ASSERT(res == 0);
- res = ethr_mutex_lock(&tlmt_mtx2);
- ASSERT(res == 0);
+ ethr_mutex_lock(&tlmt_mtx1);
+ ethr_mutex_lock(&tlmt_mtx2);
tlmt_mtx1_locked = 1;
- res = ethr_cond_signal(&tlmt_cnd2);
- ASSERT(res == 0);
+ ethr_cond_signal(&tlmt_cnd2);
while (!tlmt_mtx1_do_unlock) {
res = ethr_cond_wait(&tlmt_cnd2, &tlmt_mtx2);
ASSERT(res == 0 || res == EINTR);
}
- res = ethr_mutex_unlock(&tlmt_mtx2);
- ASSERT(res == 0);
- res = ethr_mutex_unlock(&tlmt_mtx1);
- ASSERT(res == 0);
+ ethr_mutex_unlock(&tlmt_mtx2);
+ ethr_mutex_unlock(&tlmt_mtx1);
- res = ethr_mutex_lock(&tlmt_mtx2);
- ASSERT(res == 0);
+ ethr_mutex_lock(&tlmt_mtx2);
tlmt_mtx1_locked = 0;
- res = ethr_cond_signal(&tlmt_cnd2);
- ASSERT(res == 0);
- res = ethr_mutex_unlock(&tlmt_mtx2);
- ASSERT(res == 0);
+ ethr_cond_signal(&tlmt_cnd2);
+ ethr_mutex_unlock(&tlmt_mtx2);
return NULL;
}
@@ -486,48 +458,49 @@ try_lock_mutex_test(void)
int i, res;
ethr_tid tid;
+ res = ethr_mutex_init(&tlmt_mtx1);
+ ASSERT(res == 0);
+ res = ethr_mutex_init(&tlmt_mtx2);
+ ASSERT(res == 0);
+ res = ethr_cond_init(&tlmt_cnd2);
+ ASSERT(res == 0);
+
tlmt_mtx1_locked = 0;
tlmt_mtx1_do_unlock = 0;
res = ethr_thr_create(&tid, tlmt_thread, NULL, NULL);
ASSERT(res == 0);
- res = ethr_mutex_lock(&tlmt_mtx2);
- ASSERT(res == 0);
+ ethr_mutex_lock(&tlmt_mtx2);
while (!tlmt_mtx1_locked) {
res = ethr_cond_wait(&tlmt_cnd2, &tlmt_mtx2);
ASSERT(res == 0 || res == EINTR);
}
- res = ethr_mutex_unlock(&tlmt_mtx2);
- ASSERT(res == 0);
+ ethr_mutex_unlock(&tlmt_mtx2);
for (i = 0; i < 10; i++) {
res = ethr_mutex_trylock(&tlmt_mtx1);
ASSERT(res == EBUSY);
}
- res = ethr_mutex_lock(&tlmt_mtx2);
- ASSERT(res == 0);
+ ethr_mutex_lock(&tlmt_mtx2);
tlmt_mtx1_do_unlock = 1;
- res = ethr_cond_signal(&tlmt_cnd2);
- ASSERT(res == 0);
+ ethr_cond_signal(&tlmt_cnd2);
while (tlmt_mtx1_locked) {
res = ethr_cond_wait(&tlmt_cnd2, &tlmt_mtx2);
ASSERT(res == 0 || res == EINTR);
}
- res = ethr_mutex_unlock(&tlmt_mtx2);
- ASSERT(res == 0);
+ ethr_mutex_unlock(&tlmt_mtx2);
res = ethr_mutex_trylock(&tlmt_mtx1);
ASSERT(res == 0);
- res = ethr_mutex_unlock(&tlmt_mtx1);
- ASSERT(res == 0);
+ ethr_mutex_unlock(&tlmt_mtx1);
res = ethr_thr_join(tid, NULL);
ASSERT(res == 0);
@@ -541,159 +514,6 @@ try_lock_mutex_test(void)
}
/*
- * The recursive mutex test case.
- *
- * Tests recursive mutexes.
- */
-
-#ifdef ETHR_HAVE_ETHR_REC_MUTEX_INIT
-
-static ethr_mutex rmt_mutex
-#ifdef ETHR_REC_MUTEX_INITER
- = ETHR_REC_MUTEX_INITER
-#endif
- ;
-static int rmt_data;
-
-void *
-rmt_thread(void *unused)
-{
- int res;
-
- print_line("Aux thread tries to lock mutex");
- res = ethr_mutex_lock(&rmt_mutex);
- ASSERT(res == 0);
- print_line("Aux thread locked mutex");
-
- ASSERT(rmt_data == 0);
-
- rmt_data = 1;
- print_line("Aux thread wrote");
-
- print_line("Aux thread goes to sleep for 1 second");
- do_sleep(1);
- print_line("Aux thread woke up");
-
- ASSERT(rmt_data == 1);
-
- res = ethr_mutex_unlock(&rmt_mutex);
- ASSERT(res == 0);
- print_line("Aux thread unlocked mutex");
-
- return NULL;
-}
-
-#endif
-
-static void
-recursive_mutex_test(void)
-{
-#ifdef ETHR_HAVE_ETHR_REC_MUTEX_INIT
- int do_restart = 1;
- int res;
- ethr_tid tid;
-
-#ifdef ETHR_REC_MUTEX_INITER
- print_line("Running test with statically initialized mutex");
-#else
- goto dynamic_init;
-#endif
-
- restart:
- rmt_data = 0;
-
- print_line("Main thread tries to lock mutex");
- res = ethr_mutex_lock(&rmt_mutex);
- ASSERT(res == 0);
- print_line("Main thread locked mutex");
-
- print_line("Main thread tries to lock mutex again");
- res = ethr_mutex_lock(&rmt_mutex);
- ASSERT(res == 0);
- print_line("Main thread locked mutex again");
-
- ASSERT(rmt_data == 0);
-
- print_line("Main thread about to create aux thread");
- res = ethr_thr_create(&tid, rmt_thread, NULL, NULL);
- ASSERT(res == 0);
- print_line("Main thread created aux thread");
-
- print_line("Main thread goes to sleep for 1 second");
- do_sleep(1);
- print_line("Main thread woke up");
-
- ASSERT(rmt_data == 0);
-
- res = ethr_mutex_unlock(&rmt_mutex);
- ASSERT(res == 0);
- print_line("Main thread unlocked mutex");
-
- print_line("Main thread goes to sleep for 1 second");
- do_sleep(1);
- print_line("Main thread woke up");
-
- ASSERT(rmt_data == 0);
-
- res = ethr_mutex_unlock(&rmt_mutex);
- ASSERT(res == 0);
- print_line("Main thread unlocked mutex again");
-
- print_line("Main thread goes to sleep for 1 second");
- do_sleep(1);
- print_line("Main thread woke up");
-
- print_line("Main thread tries to lock mutex");
- res = ethr_mutex_lock(&rmt_mutex);
- ASSERT(res == 0);
- print_line("Main thread locked mutex");
-
- ASSERT(rmt_data == 1);
-
- print_line("Main thread goes to sleep for 1 second");
- do_sleep(1);
- print_line("Main thread woke up");
-
- ASSERT(rmt_data == 1);
-
- res = ethr_mutex_unlock(&rmt_mutex);
- ASSERT(res == 0);
- print_line("Main thread unlocked mutex");
-
- res = ethr_thr_join(tid, NULL);
- ASSERT(res == 0);
- print_line("Main thread joined aux thread");
-
- res = ethr_mutex_destroy(&rmt_mutex);
- ASSERT(res == 0);
- print_line("Main thread destroyed mutex");
-
- if (do_restart) {
-#ifndef ETHR_REC_MUTEX_INITER
- dynamic_init:
-#endif
- do_restart = 0;
-
- print_line("Running test with dynamically initialized mutex");
-
- print_line("Trying to initialize mutex");
- res = ethr_rec_mutex_init(&rmt_mutex);
- ASSERT(res == 0);
- print_line("Initialized mutex");
-
- goto restart;
- }
-
-#ifndef ETHR_REC_MUTEX_INITER
- succeed("Static initializer for recursive mutexes not supported");
-#endif
-
-#else /* #ifdef ETHR_HAVE_ETHR_REC_MUTEX_INIT */
- skip("Recursive mutexes not supported");
-#endif /* #ifdef ETHR_HAVE_ETHR_REC_MUTEX_INIT */
-}
-
-/*
* The time now test.
*
* Tests ethr_time_now by comparing time values with Erlang.
@@ -763,106 +583,82 @@ time_now_test(void)
*/
-static ethr_mutex cwt_mutex = ETHR_MUTEX_INITER;
-static ethr_cond cwt_cond = ETHR_COND_INITER;
+static ethr_mutex cwt_mutex;
+static ethr_cond cwt_cond;
static int cwt_counter;
void *
-cwt_thread(void *is_timedwait_test_ptr)
+cwt_thread(void *unused)
{
- int use_timedwait = *((int *) is_timedwait_test_ptr);
int res;
- res = ethr_mutex_lock(&cwt_mutex);
- ASSERT(res == 0);
-
- if (use_timedwait) {
- ethr_timeval tv;
- res = ethr_time_now(&tv);
- ASSERT(res == 0);
- tv.tv_sec += 3600; /* Make sure we won't time out */
+ ethr_mutex_lock(&cwt_mutex);
- do {
- res = ethr_cond_timedwait(&cwt_cond, &cwt_mutex, &tv);
- } while (res == EINTR);
- ASSERT(res == 0);
- }
- else {
- do {
- res = ethr_cond_wait(&cwt_cond, &cwt_mutex);
- } while (res == EINTR);
- ASSERT(res == 0);
- }
+ do {
+ res = ethr_cond_wait(&cwt_cond, &cwt_mutex);
+ } while (res == EINTR);
+ ASSERT(res == 0);
cwt_counter++;
- res = ethr_mutex_unlock(&cwt_mutex);
- ASSERT(res == 0);
+ ethr_mutex_unlock(&cwt_mutex);
return NULL;
}
static void
-cond_wait_test(int is_timedwait_test)
+cond_wait_test(void)
{
- int do_restart = !is_timedwait_test;
ethr_tid tid1, tid2;
int res;
- if (!is_timedwait_test)
- print_line("Running test with statically initialized mutex and cond");
+ res = ethr_mutex_init(&cwt_mutex);
+ ASSERT(res == 0);
+ res = ethr_cond_init(&cwt_cond);
+ ASSERT(res == 0);
- restart:
/* Wake with signal */
cwt_counter = 0;
- res = ethr_thr_create(&tid1, cwt_thread, (void *) &is_timedwait_test, NULL);
+ res = ethr_thr_create(&tid1, cwt_thread, NULL, NULL);
ASSERT(res == 0);
- res = ethr_thr_create(&tid2, cwt_thread, (void *) &is_timedwait_test, NULL);
+ res = ethr_thr_create(&tid2, cwt_thread, NULL, NULL);
ASSERT(res == 0);
do_sleep(1); /* Make sure threads waits on cond var */
- res = ethr_mutex_lock(&cwt_mutex);
- ASSERT(res == 0);
+ ethr_mutex_lock(&cwt_mutex);
- res = ethr_cond_signal(&cwt_cond); /* Wake one thread */
- ASSERT(res == 0);
+ ethr_cond_signal(&cwt_cond); /* Wake one thread */
do_sleep(1); /* Make sure awakened thread waits on mutex */
ASSERT(cwt_counter == 0);
- res = ethr_mutex_unlock(&cwt_mutex);
- ASSERT(res == 0);
+ ethr_mutex_unlock(&cwt_mutex);
do_sleep(1); /* Let awakened thread proceed */
- res = ethr_mutex_lock(&cwt_mutex);
- ASSERT(res == 0);
+ ethr_mutex_lock(&cwt_mutex);
ASSERT(cwt_counter == 1);
- res = ethr_cond_signal(&cwt_cond); /* Wake the other thread */
- ASSERT(res == 0);
+ ethr_cond_signal(&cwt_cond); /* Wake the other thread */
do_sleep(1); /* Make sure awakened thread waits on mutex */
ASSERT(cwt_counter == 1);
- res = ethr_mutex_unlock(&cwt_mutex);
- ASSERT(res == 0);
+ ethr_mutex_unlock(&cwt_mutex);
do_sleep(1); /* Let awakened thread proceed */
- res = ethr_mutex_lock(&cwt_mutex);
- ASSERT(res == 0);
+ ethr_mutex_lock(&cwt_mutex);
ASSERT(cwt_counter == 2);
- res = ethr_mutex_unlock(&cwt_mutex);
- ASSERT(res == 0);
+ ethr_mutex_unlock(&cwt_mutex);
res = ethr_thr_join(tid1, NULL);
ASSERT(res == 0);
@@ -875,35 +671,30 @@ cond_wait_test(int is_timedwait_test)
cwt_counter = 0;
- res = ethr_thr_create(&tid1, cwt_thread, (void *) &is_timedwait_test, NULL);
+ res = ethr_thr_create(&tid1, cwt_thread, NULL, NULL);
ASSERT(res == 0);
- res = ethr_thr_create(&tid2, cwt_thread, (void *) &is_timedwait_test, NULL);
+ res = ethr_thr_create(&tid2, cwt_thread, NULL, NULL);
ASSERT(res == 0);
do_sleep(1); /* Make sure threads waits on cond var */
- res = ethr_mutex_lock(&cwt_mutex);
- ASSERT(res == 0);
+ ethr_mutex_lock(&cwt_mutex);
- res = ethr_cond_broadcast(&cwt_cond); /* Wake the threads */
- ASSERT(res == 0);
+ ethr_cond_broadcast(&cwt_cond); /* Wake the threads */
do_sleep(1); /* Make sure awakened threads wait on mutex */
ASSERT(cwt_counter == 0);
- res = ethr_mutex_unlock(&cwt_mutex);
- ASSERT(res == 0);
+ ethr_mutex_unlock(&cwt_mutex);
do_sleep(1); /* Let awakened threads proceed */
- res = ethr_mutex_lock(&cwt_mutex);
- ASSERT(res == 0);
+ ethr_mutex_lock(&cwt_mutex);
ASSERT(cwt_counter == 2);
- res = ethr_mutex_unlock(&cwt_mutex);
- ASSERT(res == 0);
+ ethr_mutex_unlock(&cwt_mutex);
res = ethr_thr_join(tid1, NULL);
ASSERT(res == 0);
@@ -916,113 +707,6 @@ cond_wait_test(int is_timedwait_test)
res = ethr_cond_destroy(&cwt_cond);
ASSERT(res == 0);
- if (do_restart) {
- do_restart = 0;
- res = ethr_mutex_init(&cwt_mutex);
- ASSERT(res == 0);
- res = ethr_cond_init(&cwt_cond);
- ASSERT(res == 0);
- print_line("Running test with dynamically initialized mutex and cond");
- goto restart;
- }
-}
-
-/*
- * The cond timedwait test case.
- *
- * Tests ethr_cond_timedwait with ethr_cond_signal and ethr_cond_broadcast.
- */
-
-#define CTWT_MAX_TIME_DIFF 100000
-
-static long
-ctwt_check_timeout(long to)
-{
- int res;
- ethr_timeval tva, tvb;
- long diff, abs_diff;
-
- res = ethr_time_now(&tva);
- ASSERT(res == 0);
-
- tva.tv_sec += to / 1000;
- tva.tv_nsec += (to % 1000) * 1000000;
- if (tva.tv_nsec >= 1000000000) {
- tva.tv_sec++;
- tva.tv_nsec -= 1000000000;
- ASSERT(tva.tv_nsec < 1000000000);
- }
-
- do {
- res = ethr_cond_timedwait(&cwt_cond, &cwt_mutex, &tva);
- } while (res == EINTR);
- ASSERT(res == ETIMEDOUT);
-
- res = ethr_time_now(&tvb);
- ASSERT(res == 0);
-
- diff = (tvb.tv_sec - tva.tv_sec) * 1000000;
- diff += (tvb.tv_nsec - tva.tv_nsec)/1000;
-
- print("Timeout=%ld; ", to);
- print("tva.tv_sec=%ld tva.tv_nsec=%ld; ", tva.tv_sec, tva.tv_nsec);
- print("tvb.tv_sec=%ld tvb.tv_nsec=%ld; ", tvb.tv_sec, tvb.tv_nsec);
- print_line("diff (tvb - tva) = %ld us", diff);
-
- abs_diff = (long) abs((int) diff);
-
- ASSERT(CTWT_MAX_TIME_DIFF >= abs_diff);
- return abs_diff;
-}
-
-static void
-cond_timedwait_test(void)
-{
- int do_restart = 1;
- long abs_diff, max_abs_diff = 0;
- int res;
-
-#define CTWT_UPD_MAX_DIFF if (abs_diff > max_abs_diff) max_abs_diff = abs_diff;
-
- print_line("Running test with statically initialized mutex and cond");
-
- print_line("CTWT_MAX_TIME_DIFF=%d", CTWT_MAX_TIME_DIFF);
-
- restart:
-
- res = ethr_mutex_lock(&cwt_mutex);
- ASSERT(res == 0);
-
- abs_diff = ctwt_check_timeout(300);
- CTWT_UPD_MAX_DIFF;
- abs_diff = ctwt_check_timeout(700);
- CTWT_UPD_MAX_DIFF;
- abs_diff = ctwt_check_timeout(1000);
- CTWT_UPD_MAX_DIFF;
- abs_diff = ctwt_check_timeout(2300);
- CTWT_UPD_MAX_DIFF;
- abs_diff = ctwt_check_timeout(5100);
- CTWT_UPD_MAX_DIFF;
-
- res = ethr_mutex_unlock(&cwt_mutex);
- ASSERT(res == 0);
-
- cond_wait_test(1);
-
- if (do_restart) {
- do_restart = 0;
- res = ethr_mutex_init(&cwt_mutex);
- ASSERT(res == 0);
- res = ethr_cond_init(&cwt_cond);
- ASSERT(res == 0);
- print_line("Running test with dynamically initialized mutex and cond");
- goto restart;
- }
-
- print_line("Max absolute diff = %d us", max_abs_diff);
- succeed("Max absolute diff = %d us", max_abs_diff);
-
-#undef CTWT_UPD_MAX_DIFF
}
/*
@@ -1037,9 +721,9 @@ cond_timedwait_test(void)
static int bct_woken = 0;
static int bct_waiting = 0;
static int bct_done = 0;
-static ethr_mutex bct_mutex = ETHR_MUTEX_INITER;
-static ethr_cond bct_cond = ETHR_COND_INITER;
-static ethr_cond bct_cntrl_cond = ETHR_COND_INITER;
+static ethr_mutex bct_mutex;
+static ethr_cond bct_cond;
+static ethr_cond bct_cntrl_cond;
static void *
@@ -1047,30 +731,24 @@ bct_thread(void *unused)
{
int res;
- res = ethr_mutex_lock(&bct_mutex);
- ASSERT(res == 0);
+ ethr_mutex_lock(&bct_mutex);
while (!bct_done) {
bct_waiting++;
- if (bct_waiting == BCT_THREADS) {
- res = ethr_cond_signal(&bct_cntrl_cond);
- ASSERT(res == 0);
- }
+ if (bct_waiting == BCT_THREADS)
+ ethr_cond_signal(&bct_cntrl_cond);
do {
res = ethr_cond_wait(&bct_cond, &bct_mutex);
} while (res == EINTR);
ASSERT(res == 0);
bct_woken++;
- if (bct_woken == BCT_THREADS) {
- res = ethr_cond_signal(&bct_cntrl_cond);
- ASSERT(res == 0);
- }
+ if (bct_woken == BCT_THREADS)
+ ethr_cond_signal(&bct_cntrl_cond);
}
- res = ethr_mutex_unlock(&bct_mutex);
- ASSERT(res == 0);
+ ethr_mutex_unlock(&bct_mutex);
return NULL;
}
@@ -1081,14 +759,20 @@ broadcast_test(void)
int res, i;
ethr_tid tid[BCT_THREADS];
+ res = ethr_mutex_init(&bct_mutex);
+ ASSERT(res == 0);
+ res = ethr_cond_init(&bct_cntrl_cond);
+ ASSERT(res == 0);
+ res = ethr_cond_init(&bct_cond);
+ ASSERT(res == 0);
+
for (i = 0; i < BCT_THREADS; i++) {
res = ethr_thr_create(&tid[i], bct_thread, NULL, NULL);
ASSERT(res == 0);
}
- res = ethr_mutex_lock(&bct_mutex);
- ASSERT(res == 0);
+ ethr_mutex_lock(&bct_mutex);
for (i = 0; i < BCT_NO_OF_WAITS; i++) {
@@ -1101,8 +785,7 @@ broadcast_test(void)
bct_woken = 0;
/* Wake all threads */
- res = ethr_cond_broadcast(&bct_cond);
- ASSERT(res == 0);
+ ethr_cond_broadcast(&bct_cond);
while (bct_woken != BCT_THREADS) {
res = ethr_cond_wait(&bct_cntrl_cond, &bct_mutex);
@@ -1114,13 +797,11 @@ broadcast_test(void)
bct_done = 1;
/* Wake all threads */
- res = ethr_cond_broadcast(&bct_cond);
- ASSERT(res == 0);
+ ethr_cond_broadcast(&bct_cond);
- res = ethr_mutex_unlock(&bct_mutex);
- ASSERT(res == 0);
+ ethr_mutex_unlock(&bct_mutex);
- for (i = 0; i < BCT_THREADS - 1; i++) {
+ for (i = 0; i < BCT_THREADS; i++) {
res = ethr_thr_join(tid[i], NULL);
ASSERT(res == 0);
}
@@ -1143,26 +824,22 @@ broadcast_test(void)
#define DT_THREADS (50*1024)
#define DT_BATCH_SIZE 64
-static ethr_mutex dt_mutex = ETHR_MUTEX_INITER;
-static ethr_cond dt_cond = ETHR_COND_INITER;
+static ethr_mutex dt_mutex;
+static ethr_cond dt_cond;
static int dt_count;
static int dt_limit;
static void *
dt_thread(void *unused)
{
- int res;
-
- res = ethr_mutex_lock(&dt_mutex);
- ASSERT(res == 0);
+ ethr_mutex_lock(&dt_mutex);
dt_count++;
if (dt_count >= dt_limit)
ethr_cond_signal(&dt_cond);
- res = ethr_mutex_unlock(&dt_mutex);
- ASSERT(res == 0);
+ ethr_mutex_unlock(&dt_mutex);
return NULL;
}
@@ -1174,6 +851,11 @@ detached_thread_test(void)
ethr_tid tid[DT_BATCH_SIZE];
int i, j, res;
+ res = ethr_mutex_init(&dt_mutex);
+ ASSERT(res == 0);
+ res = ethr_cond_init(&dt_cond);
+ ASSERT(res == 0);
+
thr_opts.detached = 1;
dt_count = 0;
dt_limit = 0;
@@ -1187,14 +869,12 @@ detached_thread_test(void)
ASSERT(res == 0);
}
- res = ethr_mutex_lock(&dt_mutex);
- ASSERT(res == 0);
+ ethr_mutex_lock(&dt_mutex);
while (dt_count < dt_limit) {
res = ethr_cond_wait(&dt_cond, &dt_mutex);
ASSERT(res == 0 || res == EINTR);
}
- res = ethr_mutex_unlock(&dt_mutex);
- ASSERT(res == 0);
+ ethr_mutex_unlock(&dt_mutex);
print_line("dt_count = %d", dt_count);
}
@@ -1211,8 +891,8 @@ detached_thread_test(void)
#define MTT_TIMES 10
static int mtt_terminate;
-static ethr_mutex mtt_mutex = ETHR_MUTEX_INITER;
-static ethr_cond mtt_cond = ETHR_COND_INITER;
+static ethr_mutex mtt_mutex;
+static ethr_cond mtt_cond;
static char mtt_string[22*MTT_TIMES]; /* 22 is enough for ", %d" */
@@ -1220,16 +900,14 @@ void *mtt_thread(void *unused)
{
int res;
- res = ethr_mutex_lock(&mtt_mutex);
- ASSERT(res == 0);
+ ethr_mutex_lock(&mtt_mutex);
while (!mtt_terminate) {
res = ethr_cond_wait(&mtt_cond, &mtt_mutex);
ASSERT(res == 0 || res == EINTR);
}
- res = ethr_mutex_unlock(&mtt_mutex);
- ASSERT(res == 0);
+ ethr_mutex_unlock(&mtt_mutex);
return NULL;
}
@@ -1265,16 +943,13 @@ mtt_create_join_threads(void)
print_line("%d = ethr_thr_create()", res);
print_line("Number of created threads: %d", no_threads);
- res = ethr_mutex_lock(&mtt_mutex);
- ASSERT(res == 0);
+ ethr_mutex_lock(&mtt_mutex);
mtt_terminate = 1;
- res = ethr_cond_broadcast(&mtt_cond);
- ASSERT(res == 0);
+ ethr_cond_broadcast(&mtt_cond);
- res = ethr_mutex_unlock(&mtt_mutex);
- ASSERT(res == 0);
+ ethr_mutex_unlock(&mtt_mutex);
while (ix) {
res = ethr_thr_join(tids[--ix], NULL);
@@ -1292,9 +967,14 @@ mtt_create_join_threads(void)
static void
max_threads_test(void)
{
- int no_threads[MTT_TIMES], i, up, down, eq;
+ int no_threads[MTT_TIMES], i, up, down, eq, res;
char *str;
+ res = ethr_mutex_init(&mtt_mutex);
+ ASSERT(res == 0);
+ res = ethr_cond_init(&mtt_cond);
+ ASSERT(res == 0);
+
for (i = 0; i < MTT_TIMES; i++) {
no_threads[i] = mtt_create_join_threads();
}
@@ -1326,272 +1006,6 @@ max_threads_test(void)
}
-
-/*
- * The forksafety test case.
- *
- * Tests forksafety.
- */
-#ifdef __WIN32__
-#define NO_FORK_PRESENT
-#endif
-
-#ifndef NO_FORK_PRESENT
-
-static ethr_mutex ft_test_inner_mutex = ETHR_MUTEX_INITER;
-static ethr_mutex ft_test_outer_mutex = ETHR_MUTEX_INITER;
-static ethr_mutex ft_go_mutex = ETHR_MUTEX_INITER;
-static ethr_cond ft_go_cond = ETHR_COND_INITER;
-static int ft_go;
-static int ft_have_forked;
-
-static void *
-ft_thread(void *unused)
-{
- int res;
-
- res = ethr_mutex_lock(&ft_test_outer_mutex);
- ASSERT(res == 0);
-
- res = ethr_mutex_lock(&ft_go_mutex);
- ASSERT(res == 0);
-
- ft_go = 1;
- res = ethr_cond_signal(&ft_go_cond);
- ASSERT(res == 0);
- res = ethr_mutex_unlock(&ft_go_mutex);
- ASSERT(res == 0);
-
- do_sleep(1);
- ASSERT(!ft_have_forked);
-
- res = ethr_mutex_lock(&ft_test_inner_mutex);
- ASSERT(res == 0);
-
- res = ethr_mutex_unlock(&ft_test_inner_mutex);
- ASSERT(res == 0);
-
- do_sleep(1);
- ASSERT(!ft_have_forked);
-
- res = ethr_mutex_unlock(&ft_test_outer_mutex);
- ASSERT(res == 0);
-
- do_sleep(1);
- ASSERT(ft_have_forked);
-
-
- return NULL;
-}
-
-#endif /* #ifndef NO_FORK_PRESENT */
-
-static void
-forksafety_test(void)
-{
-#ifdef NO_FORK_PRESENT
- skip("No fork() present; nothing to test");
-#elif defined(DEBUG)
- skip("Doesn't work in debug build");
-#else
- char snd_msg[] = "ok, bye!";
- char rec_msg[sizeof(snd_msg)*2];
- ethr_tid tid;
- int res;
- int fds[2];
-
-
- res = ethr_mutex_set_forksafe(&ft_test_inner_mutex);
- if (res == ENOTSUP) {
- skip("Forksafety not supported on this platform!");
- }
- ASSERT(res == 0);
- res = ethr_mutex_set_forksafe(&ft_test_outer_mutex);
- ASSERT(res == 0);
-
-
- res = pipe(fds);
- ASSERT(res == 0);
-
- ft_go = 0;
- ft_have_forked = 0;
-
- res = ethr_mutex_lock(&ft_go_mutex);
- ASSERT(res == 0);
-
- res = ethr_thr_create(&tid, ft_thread, NULL, NULL);
- ASSERT(res == 0);
-
- do {
- res = ethr_cond_wait(&ft_go_cond, &ft_go_mutex);
- } while (res == EINTR || !ft_go);
- ASSERT(res == 0);
-
- res = ethr_mutex_unlock(&ft_go_mutex);
- ASSERT(res == 0);
-
- res = fork();
- ft_have_forked = 1;
- if (res == 0) {
- close(fds[0]);
- res = ethr_mutex_lock(&ft_test_outer_mutex);
- if (res != 0)
- _exit(1);
- res = ethr_mutex_lock(&ft_test_inner_mutex);
- if (res != 0)
- _exit(1);
- res = ethr_mutex_unlock(&ft_test_inner_mutex);
- if (res != 0)
- _exit(1);
- res = ethr_mutex_unlock(&ft_test_outer_mutex);
- if (res != 0)
- _exit(1);
-
- res = ethr_mutex_destroy(&ft_test_inner_mutex);
- if (res != 0)
- _exit(1);
- res = ethr_mutex_destroy(&ft_test_outer_mutex);
- if (res != 0)
- _exit(1);
-
- res = (int) write(fds[1], (void *) snd_msg, sizeof(snd_msg));
- if (res != sizeof(snd_msg))
- _exit(1);
- close(fds[1]);
- _exit(0);
- }
- ASSERT(res > 0);
- close(fds[1]);
-
- res = (int) read(fds[0], (void *) rec_msg, sizeof(rec_msg));
- ASSERT(res == (int) sizeof(snd_msg));
- ASSERT(strcmp(snd_msg, rec_msg) == 0);
-
- close(fds[0]);
-#endif
-}
-
-
-/*
- * The vfork test case.
- *
- * Tests vfork with threads.
- */
-
-#ifdef __WIN32__
-#define NO_VFORK_PRESENT
-#endif
-
-#ifndef NO_VFORK_PRESENT
-
-#undef vfork
-
-static ethr_mutex vt_mutex = ETHR_MUTEX_INITER;
-
-static void *
-vt_thread(void *vprog)
-{
- char *prog = (char *) vprog;
- int res;
- char snd_msg[] = "ok, bye!";
- char rec_msg[sizeof(snd_msg)*2];
- int fds[2];
- char closefd[20];
- char writefd[20];
-
- res = pipe(fds);
- ASSERT(res == 0);
-
- res = sprintf(closefd, "%d", fds[0]);
- ASSERT(res <= 20);
- res = sprintf(writefd, "%d", fds[1]);
- ASSERT(res <= 20);
-
- print("parent: About to vfork and execute ");
- print("execlp(\"%s\", \"%s\", \"%s\", \"%s\", \"%s\", \"%s\", \"%s\", NULL)",
- prog, prog, "vfork", "exec", snd_msg, closefd, writefd);
- print_line(" in child");
- res = vfork();
- if (res == 0) {
- execlp(prog, prog, "vfork", "exec", snd_msg, closefd, writefd, NULL);
- _exit(1);
- }
- ASSERT(res > 0);
-
- print_line("parent: I'm back");
-
- close(fds[1]);
-
- res = (int) read(fds[0], (void *) rec_msg, sizeof(rec_msg));
- print_line("parent: %d = read()", res);
- print_line("parent: rec_msg=\"%s\"", rec_msg);
- ASSERT(res == (int) sizeof(snd_msg));
- ASSERT(strcmp(snd_msg, rec_msg) == 0);
-
- close(fds[0]);
-
- return NULL;
-}
-
-#endif /* #ifndef NO_VFORK_PRESENT */
-
-static void
-vfork_test(int argc, char *argv[])
-{
-#ifdef NO_VFORK_PRESENT
- skip("No vfork() present; nothing to test");
-#else
- int res;
- ethr_tid tid;
-
- if (argc == 6 && strcmp("exec", argv[2]) == 0) {
- /* We are child after vfork() and execlp() ... */
-
- char *snd_msg;
- int closefd;
- int writefd;
-
- snd_msg = argv[3];
- closefd = atoi(argv[4]);
- writefd = atoi(argv[5]);
-
- print_line("child: snd_msg=\"%s\"; closefd=%d writefd=%d",
- snd_msg, closefd, writefd);
-
- close(closefd);
-
- res = (int) write(writefd, (void *) snd_msg, strlen(snd_msg)+1);
- print_line("child: %d = write()", res);
- if (res != strlen(snd_msg)+1)
- exit(1);
- close(writefd);
- print_line("child: bye");
- exit(0);
- }
- ASSERT(argc == 2);
-
- res = ethr_mutex_set_forksafe(&vt_mutex);
- ASSERT(res == 0 || res == ENOTSUP);
- res = ethr_mutex_lock(&vt_mutex);
- ASSERT(res == 0);
-
- res = ethr_thr_create(&tid, vt_thread, (void *) argv[0], NULL);
- ASSERT(res == 0);
-
- do_sleep(1);
-
- res = ethr_mutex_unlock(&vt_mutex);
- ASSERT(res == 0);
-
- res = ethr_thr_join(tid, NULL);
- ASSERT(res == 0);
-
- res = ethr_mutex_destroy(&vt_mutex);
- ASSERT(res == 0);
-#endif
-}
-
-
/*
* The tsd test case.
*
@@ -1651,11 +1065,8 @@ static int st_data;
void *
st_thread(void *unused)
{
- int res;
-
print_line("Aux thread tries to lock spinlock");
- res = ethr_spin_lock(&st_spinlock);
- ASSERT(res == 0);
+ ethr_spin_lock(&st_spinlock);
print_line("Aux thread locked spinlock");
ASSERT(st_data == 0);
@@ -1669,8 +1080,7 @@ st_thread(void *unused)
ASSERT(st_data == 1);
- res = ethr_spin_unlock(&st_spinlock);
- ASSERT(res == 0);
+ ethr_spin_unlock(&st_spinlock);
print_line("Aux thread unlocked spinlock");
return NULL;
@@ -1691,8 +1101,7 @@ spinlock_test(void)
st_data = 0;
print_line("Main thread tries to lock spinlock");
- res = ethr_spin_lock(&st_spinlock);
- ASSERT(res == 0);
+ ethr_spin_lock(&st_spinlock);
print_line("Main thread locked spinlock");
ASSERT(st_data == 0);
@@ -1708,8 +1117,7 @@ spinlock_test(void)
ASSERT(st_data == 0);
- res = ethr_spin_unlock(&st_spinlock);
- ASSERT(res == 0);
+ ethr_spin_unlock(&st_spinlock);
print_line("Main thread unlocked spinlock");
print_line("Main thread goes to sleep for 1 second");
@@ -1717,8 +1125,7 @@ spinlock_test(void)
print_line("Main thread woke up");
print_line("Main thread tries to lock spinlock");
- res = ethr_spin_lock(&st_spinlock);
- ASSERT(res == 0);
+ ethr_spin_lock(&st_spinlock);
print_line("Main thread locked spinlock");
ASSERT(st_data == 1);
@@ -1729,8 +1136,7 @@ spinlock_test(void)
ASSERT(st_data == 1);
- res = ethr_spin_unlock(&st_spinlock);
- ASSERT(res == 0);
+ ethr_spin_unlock(&st_spinlock);
print_line("Main thread unlocked spinlock");
res = ethr_thr_join(tid, NULL);
@@ -1757,23 +1163,19 @@ void *
rwst_thread(void *unused)
{
int data;
- int res;
print_line("Aux thread tries to read lock rwspinlock");
- res = ethr_read_lock(&rwst_rwspinlock);
- ASSERT(res == 0);
+ ethr_read_lock(&rwst_rwspinlock);
print_line("Aux thread read locked rwspinlock");
ASSERT(rwst_data == 4711);
print_line("Aux thread tries to read unlock rwspinlock");
- res = ethr_read_unlock(&rwst_rwspinlock);
- ASSERT(res == 0);
+ ethr_read_unlock(&rwst_rwspinlock);
print_line("Aux thread read unlocked rwspinlock");
print_line("Aux thread tries to write lock rwspinlock");
- res = ethr_write_lock(&rwst_rwspinlock);
- ASSERT(res == 0);
+ ethr_write_lock(&rwst_rwspinlock);
print_line("Aux thread write locked rwspinlock");
data = ++rwst_data;
@@ -1787,8 +1189,7 @@ rwst_thread(void *unused)
++rwst_data;
print_line("Aux thread tries to write unlock rwspinlock");
- res = ethr_write_unlock(&rwst_rwspinlock);
- ASSERT(res == 0);
+ ethr_write_unlock(&rwst_rwspinlock);
print_line("Aux thread write unlocked rwspinlock");
return NULL;
@@ -1810,8 +1211,7 @@ rwspinlock_test(void)
rwst_data = 4711;
print_line("Main thread tries to read lock rwspinlock");
- res = ethr_read_lock(&rwst_rwspinlock);
- ASSERT(res == 0);
+ ethr_read_lock(&rwst_rwspinlock);
print_line("Main thread read locked rwspinlock");
ASSERT(rwst_data == 4711);
@@ -1828,13 +1228,11 @@ rwspinlock_test(void)
ASSERT(rwst_data == 4711);
print_line("Main thread tries to read unlock rwspinlock");
- res = ethr_read_unlock(&rwst_rwspinlock);
- ASSERT(res == 0);
+ ethr_read_unlock(&rwst_rwspinlock);
print_line("Main thread read unlocked rwspinlock");
print_line("Main thread tries to write lock rwspinlock");
- res = ethr_write_lock(&rwst_rwspinlock);
- ASSERT(res == 0);
+ ethr_write_lock(&rwst_rwspinlock);
print_line("Main thread write locked rwspinlock");
data = ++rwst_data;
@@ -1847,8 +1245,7 @@ rwspinlock_test(void)
++rwst_data;
print_line("Main thread tries to write unlock rwspinlock");
- res = ethr_write_unlock(&rwst_rwspinlock);
- ASSERT(res == 0);
+ ethr_write_unlock(&rwst_rwspinlock);
print_line("Main thread write unlocked rwspinlock");
res = ethr_thr_join(tid, NULL);
@@ -1875,23 +1272,19 @@ void *
rwmt_thread(void *unused)
{
int data;
- int res;
print_line("Aux thread tries to read lock rwmutex");
- res = ethr_rwmutex_rlock(&rwmt_rwmutex);
- ASSERT(res == 0);
+ ethr_rwmutex_rlock(&rwmt_rwmutex);
print_line("Aux thread read locked rwmutex");
ASSERT(rwmt_data == 4711);
print_line("Aux thread tries to read unlock rwmutex");
- res = ethr_rwmutex_runlock(&rwmt_rwmutex);
- ASSERT(res == 0);
+ ethr_rwmutex_runlock(&rwmt_rwmutex);
print_line("Aux thread read unlocked rwmutex");
print_line("Aux thread tries to write lock rwmutex");
- res = ethr_rwmutex_rwlock(&rwmt_rwmutex);
- ASSERT(res == 0);
+ ethr_rwmutex_rwlock(&rwmt_rwmutex);
print_line("Aux thread write locked rwmutex");
data = ++rwmt_data;
@@ -1905,8 +1298,7 @@ rwmt_thread(void *unused)
++rwmt_data;
print_line("Aux thread tries to write unlock rwmutex");
- res = ethr_rwmutex_rwunlock(&rwmt_rwmutex);
- ASSERT(res == 0);
+ ethr_rwmutex_rwunlock(&rwmt_rwmutex);
print_line("Aux thread write unlocked rwmutex");
return NULL;
@@ -1928,8 +1320,7 @@ rwmutex_test(void)
rwmt_data = 4711;
print_line("Main thread tries to read lock rwmutex");
- res = ethr_rwmutex_rlock(&rwmt_rwmutex);
- ASSERT(res == 0);
+ ethr_rwmutex_rlock(&rwmt_rwmutex);
print_line("Main thread read locked rwmutex");
ASSERT(rwmt_data == 4711);
@@ -1946,13 +1337,11 @@ rwmutex_test(void)
ASSERT(rwmt_data == 4711);
print_line("Main thread tries to read unlock rwmutex");
- res = ethr_rwmutex_runlock(&rwmt_rwmutex);
- ASSERT(res == 0);
+ ethr_rwmutex_runlock(&rwmt_rwmutex);
print_line("Main thread read unlocked rwmutex");
print_line("Main thread tries to write lock rwmutex");
- res = ethr_rwmutex_rwlock(&rwmt_rwmutex);
- ASSERT(res == 0);
+ ethr_rwmutex_rwlock(&rwmt_rwmutex);
print_line("Main thread write locked rwmutex");
data = ++rwmt_data;
@@ -1965,8 +1354,7 @@ rwmutex_test(void)
++rwmt_data;
print_line("Main thread tries to write unlock rwmutex");
- res = ethr_rwmutex_rwunlock(&rwmt_rwmutex);
- ASSERT(res == 0);
+ ethr_rwmutex_rwunlock(&rwmt_rwmutex);
print_line("Main thread write unlocked rwmutex");
res = ethr_thr_join(tid, NULL);
@@ -1998,65 +1386,53 @@ static ethr_atomic_t at_data;
void *
at_thread(void *unused)
{
- int res, i;
+ int i;
long val, go;
- res = ethr_atomic_inctest(&at_ready, &val);
- ASSERT(res == 0);
+ val = ethr_atomic_inc_read(&at_ready);
ASSERT(val > 0);
ASSERT(val <= AT_THREADS);
do {
- res = ethr_atomic_read(&at_go, &go);
- ASSERT(res == 0);
+ go = ethr_atomic_read(&at_go);
} while (!go);
for (i = 0; i < AT_ITER; i++) {
- res = ethr_atomic_or_old(&at_data, at_set_val, &val);
- ASSERT(res == 0);
+ val = ethr_atomic_read_bor(&at_data, at_set_val);
ASSERT(val >= (i == 0 ? 0 : at_set_val) + (long) 4711);
ASSERT(val <= at_max_val);
- res = ethr_atomic_and_old(&at_data, ~at_rm_val, &val);
- ASSERT(res == 0);
+ val = ethr_atomic_read_band(&at_data, ~at_rm_val);
ASSERT(val >= at_set_val + (long) 4711);
ASSERT(val <= at_max_val);
- res = ethr_atomic_read(&at_data, &val);
- ASSERT(res == 0);
+ val = ethr_atomic_read(&at_data);
ASSERT(val >= at_set_val + (long) 4711);
ASSERT(val <= at_max_val);
- res = ethr_atomic_inctest(&at_data, &val);
- ASSERT(res == 0);
+ val = ethr_atomic_inc_read(&at_data);
ASSERT(val > at_set_val + (long) 4711);
ASSERT(val <= at_max_val);
- res = ethr_atomic_dectest(&at_data, &val);
- ASSERT(res == 0);
+ val = ethr_atomic_dec_read(&at_data);
ASSERT(val >= at_set_val + (long) 4711);
ASSERT(val <= at_max_val);
- res = ethr_atomic_inc(&at_data);
- ASSERT(res == 0);
+ ethr_atomic_inc(&at_data);
- res = ethr_atomic_dec(&at_data);
- ASSERT(res == 0);
+ ethr_atomic_dec(&at_data);
- res = ethr_atomic_addtest(&at_data, (long) 4711, &val);
- ASSERT(res == 0);
+ val = ethr_atomic_add_read(&at_data, (long) 4711);
ASSERT(val >= at_set_val + (long) 2*4711);
ASSERT(val <= at_max_val);
- res = ethr_atomic_add(&at_data, (long) -4711);
- ASSERT(res == 0);
+ ethr_atomic_add(&at_data, (long) -4711);
ASSERT(val >= at_set_val + (long) 4711);
ASSERT(val <= at_max_val);
}
- res = ethr_atomic_inc(&at_done);
- ASSERT(res == 0);
+ ethr_atomic_inc(&at_done);
return NULL;
}
@@ -2069,14 +1445,13 @@ atomic_test(void)
ethr_tid tid[AT_THREADS];
ethr_thr_opts thr_opts = ETHR_THR_OPTS_DEFAULT_INITER;
- if (sizeof(long) > 4) {
+#if ETHR_SIZEOF_PTR > 4
at_rm_val = ((long) 1) << 57;
at_set_val = ((long) 1) << 60;
- }
- else {
+#else
at_rm_val = ((long) 1) << 27;
at_set_val = ((long) 1) << 30;
- }
+#endif
at_max_val = at_set_val + at_rm_val + ((long) AT_THREADS + 1) * 4711;
data_init = at_rm_val + (long) 4711;
@@ -2085,22 +1460,15 @@ atomic_test(void)
thr_opts.detached = 1;
print_line("Initializing");
- res = ethr_atomic_init(&at_ready, 0);
- ASSERT(res == 0);
- res = ethr_atomic_init(&at_go, 0);
- ASSERT(res == 0);
- res = ethr_atomic_init(&at_done, data_init);
- ASSERT(res == 0);
- res = ethr_atomic_init(&at_data, data_init);
- ASSERT(res == 0);
+ ethr_atomic_init(&at_ready, 0);
+ ethr_atomic_init(&at_go, 0);
+ ethr_atomic_init(&at_done, data_init);
+ ethr_atomic_init(&at_data, data_init);
- res = ethr_atomic_read(&at_data, &val);
- ASSERT(res == 0);
+ val = ethr_atomic_read(&at_data);
ASSERT(val == data_init);
- res = ethr_atomic_set(&at_done, 0);
- ASSERT(res == 0);
- res = ethr_atomic_read(&at_done, &val);
- ASSERT(res == 0);
+ ethr_atomic_set(&at_done, 0);
+ val = ethr_atomic_read(&at_done);
ASSERT(val == 0);
print_line("Creating threads");
@@ -2111,31 +1479,27 @@ atomic_test(void)
print_line("Waiting for threads to ready up");
do {
- res = ethr_atomic_read(&at_ready, &val);
- ASSERT(res == 0);
+ val = ethr_atomic_read(&at_ready);
ASSERT(val >= 0);
ASSERT(val <= AT_THREADS);
} while (val != AT_THREADS);
print_line("Letting threads loose");
- res = ethr_atomic_xchg(&at_go, 17, &val);
- ASSERT(res == 0);
+ val = ethr_atomic_xchg(&at_go, 17);
ASSERT(val == 0);
- res = ethr_atomic_read(&at_go, &val);
- ASSERT(res == 0);
+ val = ethr_atomic_read(&at_go);
ASSERT(val == 17);
print_line("Waiting for threads to finish");
do {
- res = ethr_atomic_read(&at_done, &val);
- ASSERT(res == 0);
+ val = ethr_atomic_read(&at_done);
ASSERT(val >= 0);
ASSERT(val <= AT_THREADS);
} while (val != AT_THREADS);
print_line("Checking result");
- res = ethr_atomic_read(&at_data, &val);
+ val = ethr_atomic_read(&at_data);
ASSERT(res == 0);
ASSERT(val == data_final);
print_line("Result ok");
@@ -2143,190 +1507,6 @@ atomic_test(void)
}
-/*
- * The gate test case.
- *
- * Tests gates.
- */
-
-#define GT_THREADS 10
-
-static ethr_atomic_t gt_wait1;
-static ethr_atomic_t gt_wait2;
-static ethr_atomic_t gt_done;
-
-static ethr_gate gt_gate1;
-static ethr_gate gt_gate2;
-
-void *
-gt_thread(void *thr_no)
-{
- int no = (int)(long) thr_no;
- int swait = no % 2 == 0;
- int res;
- long done;
-
-
- do {
-
- res = ethr_atomic_inc(&gt_wait1);
- ASSERT(res == 0);
-
- if (swait)
- res = ethr_gate_swait(&gt_gate1, INT_MAX);
- else
- res = ethr_gate_wait(&gt_gate1);
- ASSERT(res == 0);
-
- res = ethr_atomic_dec(&gt_wait1);
- ASSERT(res == 0);
-
- res = ethr_atomic_inc(&gt_wait2);
- ASSERT(res == 0);
-
- if (swait)
- res = ethr_gate_swait(&gt_gate2, INT_MAX);
- else
- res = ethr_gate_wait(&gt_gate2);
- ASSERT(res == 0);
-
- res = ethr_atomic_dec(&gt_wait2);
- ASSERT(res == 0);
-
- res = ethr_atomic_read(&gt_done, &done);
- ASSERT(res == 0);
- } while (!done);
- return NULL;
-}
-
-
-static void
-gate_test(void)
-{
- long val;
- int res, i;
- ethr_tid tid[GT_THREADS];
-
- print_line("Initializing");
- res = ethr_atomic_init(&gt_wait1, 0);
- ASSERT_EQ(res, 0, "%d");
- res = ethr_atomic_init(&gt_wait2, 0);
- ASSERT_EQ(res, 0, "%d");
- res = ethr_atomic_init(&gt_done, 0);
- ASSERT_EQ(res, 0, "%d");
- res = ethr_gate_init(&gt_gate1);
- ASSERT_EQ(res, 0, "%d");
- res = ethr_gate_init(&gt_gate2);
- ASSERT_EQ(res, 0, "%d");
-
- print_line("Creating threads");
- for (i = 0; i < GT_THREADS; i++) {
- res = ethr_thr_create(&tid[i], gt_thread, (void *) i, NULL);
- ASSERT_EQ(res, 0, "%d");
- }
-
- print_line("Waiting for threads to ready up");
- do {
- res = ethr_atomic_read(&gt_wait1, &val);
- ASSERT_EQ(res, 0, "%d");
- ASSERT(0 <= val && val <= GT_THREADS);
- } while (val != GT_THREADS);
-
- print_line("Testing");
-
- res = ethr_gate_let_through(&gt_gate1, 8);
- ASSERT_EQ(res, 0, "%d");
-
- WAIT_UNTIL_LIM((res = ethr_atomic_read(&gt_wait2, &val),
- (res != 0 || val == 8)),
- 60);
-
- res = ethr_atomic_read(&gt_wait1, &val);
- ASSERT_EQ(res, 0, "%d");
- ASSERT_EQ(val, GT_THREADS - 8, "%ld");
-
- res = ethr_atomic_read(&gt_wait2, &val);
- ASSERT_EQ(res, 0, "%d");
- ASSERT_EQ(val, 8, "%ld");
-
- res = ethr_gate_let_through(&gt_gate2, 4);
- ASSERT_EQ(res, 0, "%d");
-
- WAIT_UNTIL_LIM((res = ethr_atomic_read(&gt_wait2, &val),
- (res != 0 || val == 4)),
- 60);
-
- res = ethr_atomic_read(&gt_wait1, &val);
- ASSERT_EQ(res, 0, "%d");
- ASSERT_EQ(val, GT_THREADS - 4, "%ld");
-
- res = ethr_atomic_read(&gt_wait2, &val);
- ASSERT_EQ(res, 0, "%d");
- ASSERT_EQ(val, 4, "%ld");
-
- res = ethr_gate_let_through(&gt_gate1, GT_THREADS);
- ASSERT_EQ(res, 0, "%d");
-
- WAIT_UNTIL_LIM((res = ethr_atomic_read(&gt_wait2, &val),
- (res != 0 || val == GT_THREADS)),
- 60);
- res = ethr_atomic_read(&gt_wait1, &val);
- ASSERT_EQ(res, 0, "%d");
- ASSERT_EQ(val, 0, "%ld");
-
- res = ethr_atomic_read(&gt_wait2, &val);
- ASSERT_EQ(res, 0, "%d");
- ASSERT_EQ(val, GT_THREADS, "%ld");
-
- res = ethr_gate_let_through(&gt_gate2, GT_THREADS);
- ASSERT_EQ(res, 0, "%d");
-
- WAIT_UNTIL_LIM((res = ethr_atomic_read(&gt_wait2, &val),
- (res != 0 || val == 4)),
- 60);
- res = ethr_atomic_read(&gt_wait1, &val);
- ASSERT_EQ(res, 0, "%d");
- ASSERT_EQ(val, GT_THREADS - 4, "%ld");
-
- res = ethr_atomic_read(&gt_wait2, &val);
- ASSERT_EQ(res, 0, "%d");
- ASSERT_EQ(val, 4, "%ld");
-
- res = ethr_atomic_set(&gt_done, 1);
- ASSERT_EQ(res, 0, "%d");
-
- res = ethr_gate_let_through(&gt_gate2, GT_THREADS);
- ASSERT_EQ(res, 0, "%d");
- res = ethr_gate_let_through(&gt_gate1, GT_THREADS - 4);
- ASSERT_EQ(res, 0, "%d");
-
- WAIT_UNTIL_LIM(((res = ethr_atomic_read(&gt_wait1, &val)) != 0
- || (val == 0
- && ((res = ethr_atomic_read(&gt_wait2, &val)) != 0
- || val == 0))),
- 60);
-
- res = ethr_atomic_read(&gt_wait1, &val);
- ASSERT_EQ(res, 0, "%d");
- ASSERT_EQ(val, 0, "%ld");
-
- res = ethr_atomic_read(&gt_wait2, &val);
- ASSERT_EQ(res, 0, "%d");
- ASSERT_EQ(val, 0, "%ld");
-
- print_line("Joining threads");
- for (i = 0; i < GT_THREADS; i++) {
- res = ethr_thr_join(tid[i], NULL);
- ASSERT_EQ(res, 0, "%d");
- }
-
- res = ethr_gate_destroy(&gt_gate1);
- ASSERT_EQ(res, 0, "%d");
- res = ethr_gate_destroy(&gt_gate2);
- ASSERT_EQ(res, 0, "%d");
-
-}
-
#endif /* #ifndef ETHR_NO_THREAD_LIB */
/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
@@ -2342,14 +1522,12 @@ main(int argc, char *argv[])
#ifndef ETHR_NO_THREAD_LIB
{
char *testcase;
- int res;
send_my_pid();
testcase = argv[1];
- res = ethr_init(NULL);
- if (res != 0)
+ if (ethr_init(NULL) != 0 || ethr_late_init(NULL) != 0)
fail("Failed to initialize the ethread library");
if (strcmp(testcase, "create_join_thread") == 0)
@@ -2360,24 +1538,16 @@ main(int argc, char *argv[])
mutex_test();
else if (strcmp(testcase, "try_lock_mutex") == 0)
try_lock_mutex_test();
- else if (strcmp(testcase, "recursive_mutex") == 0)
- recursive_mutex_test();
else if (strcmp(testcase, "time_now") == 0)
time_now_test();
else if (strcmp(testcase, "cond_wait") == 0)
- cond_wait_test(0);
- else if (strcmp(testcase, "cond_timedwait") == 0)
- cond_timedwait_test();
+ cond_wait_test();
else if (strcmp(testcase, "broadcast") == 0)
broadcast_test();
else if (strcmp(testcase, "detached_thread") == 0)
detached_thread_test();
else if (strcmp(testcase, "max_threads") == 0)
max_threads_test();
- else if (strcmp(testcase, "forksafety") == 0)
- forksafety_test();
- else if (strcmp(testcase, "vfork") == 0)
- vfork_test(argc, argv);
else if (strcmp(testcase, "tsd") == 0)
tsd_test();
else if (strcmp(testcase, "spinlock") == 0)
@@ -2388,8 +1558,6 @@ main(int argc, char *argv[])
rwmutex_test();
else if (strcmp(testcase, "atomic") == 0)
atomic_test();
- else if (strcmp(testcase, "gate") == 0)
- gate_test();
else
skip("Test case \"%s\" not implemented yet", testcase);