aboutsummaryrefslogtreecommitdiffstats
path: root/erts
diff options
context:
space:
mode:
Diffstat (limited to 'erts')
-rw-r--r--erts/aclocal.m4219
-rw-r--r--erts/configure.in97
-rw-r--r--erts/doc/src/erl.xml22
-rw-r--r--erts/doc/src/erlang.xml144
-rw-r--r--erts/emulator/Makefile.in65
-rw-r--r--erts/emulator/beam/atom.c10
-rw-r--r--erts/emulator/beam/atom.names1
-rw-r--r--erts/emulator/beam/beam_bif_load.c47
-rw-r--r--erts/emulator/beam/beam_bp.h2
-rw-r--r--erts/emulator/beam/beam_emu.c22
-rw-r--r--erts/emulator/beam/bif.c4
-rw-r--r--erts/emulator/beam/big.c8
-rw-r--r--erts/emulator/beam/binary.c18
-rw-r--r--erts/emulator/beam/break.c32
-rw-r--r--erts/emulator/beam/copy.c98
-rw-r--r--erts/emulator/beam/decl.h55
-rw-r--r--erts/emulator/beam/dist.c26
-rw-r--r--erts/emulator/beam/elib_malloc.c2334
-rw-r--r--erts/emulator/beam/elib_stat.h45
-rw-r--r--erts/emulator/beam/erl_alloc.c210
-rw-r--r--erts/emulator/beam/erl_alloc.types8
-rw-r--r--erts/emulator/beam/erl_bif_binary.c4
-rw-r--r--erts/emulator/beam/erl_bif_info.c177
-rw-r--r--erts/emulator/beam/erl_bif_trace.c2
-rw-r--r--erts/emulator/beam/erl_bits.c8
-rw-r--r--erts/emulator/beam/erl_db.c319
-rw-r--r--erts/emulator/beam/erl_db_hash.c12
-rw-r--r--erts/emulator/beam/erl_db_tree.c19
-rw-r--r--erts/emulator/beam/erl_db_util.c13
-rw-r--r--erts/emulator/beam/erl_db_util.h3
-rw-r--r--erts/emulator/beam/erl_drv_thread.c78
-rw-r--r--erts/emulator/beam/erl_fun.c22
-rw-r--r--erts/emulator/beam/erl_fun.h4
-rw-r--r--erts/emulator/beam/erl_gc.c454
-rw-r--r--erts/emulator/beam/erl_init.c116
-rw-r--r--erts/emulator/beam/erl_lock_check.c109
-rw-r--r--erts/emulator/beam/erl_lock_check.h1
-rw-r--r--erts/emulator/beam/erl_lock_count.c18
-rw-r--r--erts/emulator/beam/erl_message.c149
-rw-r--r--erts/emulator/beam/erl_message.h21
-rw-r--r--erts/emulator/beam/erl_mtrace.c2
-rw-r--r--erts/emulator/beam/erl_nif.c10
-rw-r--r--erts/emulator/beam/erl_node_tables.c101
-rw-r--r--erts/emulator/beam/erl_obsolete.c186
-rw-r--r--erts/emulator/beam/erl_port_task.c7
-rw-r--r--erts/emulator/beam/erl_process.c1907
-rw-r--r--erts/emulator/beam/erl_process.h92
-rw-r--r--erts/emulator/beam/erl_process_lock.c350
-rw-r--r--erts/emulator/beam/erl_process_lock.h46
-rw-r--r--erts/emulator/beam/erl_smp.h222
-rw-r--r--erts/emulator/beam/erl_term.h15
-rw-r--r--erts/emulator/beam/erl_threads.h509
-rw-r--r--erts/emulator/beam/export.c8
-rw-r--r--erts/emulator/beam/external.c150
-rw-r--r--erts/emulator/beam/external.h2
-rw-r--r--erts/emulator/beam/global.h30
-rw-r--r--erts/emulator/beam/io.c20
-rw-r--r--erts/emulator/beam/packet_parser.c2
-rw-r--r--erts/emulator/beam/register.c7
-rw-r--r--erts/emulator/beam/sys.h59
-rw-r--r--erts/emulator/beam/utils.c36
-rw-r--r--erts/emulator/drivers/common/efile_drv.c2
-rw-r--r--erts/emulator/drivers/common/inet_drv.c10
-rw-r--r--erts/emulator/drivers/unix/mem_drv.c145
-rw-r--r--erts/emulator/drivers/unix/unix_efile.c6
-rw-r--r--erts/emulator/drivers/win32/mem_drv.c141
-rw-r--r--erts/emulator/hipe/hipe_bif0.c5
-rw-r--r--erts/emulator/hipe/hipe_mkliterals.c4
-rw-r--r--erts/emulator/obsolete/driver.h263
-rw-r--r--erts/emulator/sys/common/erl_poll.c39
-rw-r--r--erts/emulator/sys/unix/sys.c26
-rw-r--r--erts/emulator/sys/unix/sys_float.c4
-rw-r--r--erts/emulator/sys/win32/sys.c34
-rw-r--r--erts/emulator/test/Makefile1
-rw-r--r--erts/emulator/test/decode_packet_SUITE.erl4
-rw-r--r--erts/emulator/test/hash_SUITE.erl8
-rw-r--r--erts/emulator/test/nif_SUITE.erl168
-rw-r--r--erts/emulator/test/nif_SUITE_data/nif_SUITE.c114
-rw-r--r--erts/emulator/test/obsolete_SUITE.erl123
-rw-r--r--erts/emulator/test/obsolete_SUITE_data/Makefile.src33
-rw-r--r--erts/emulator/test/obsolete_SUITE_data/erl_threads.c302
-rw-r--r--erts/emulator/test/obsolete_SUITE_data/testcase_driver.c262
-rw-r--r--erts/emulator/test/obsolete_SUITE_data/testcase_driver.h57
-rw-r--r--erts/emulator/test/scheduler_SUITE.erl361
-rw-r--r--erts/emulator/test/send_term_SUITE.erl67
-rw-r--r--erts/emulator/test/send_term_SUITE_data/send_term_drv.c218
-rw-r--r--erts/epmd/src/epmd.c10
-rw-r--r--erts/epmd/src/epmd_cli.c36
-rw-r--r--erts/epmd/src/epmd_srv.c6
-rw-r--r--erts/etc/common/Makefile.in2
-rw-r--r--erts/etc/common/erlexec.c30
-rw-r--r--erts/include/internal/erl_misc_utils.h9
-rw-r--r--erts/include/internal/ethr_internal.h67
-rw-r--r--erts/include/internal/ethr_mutex.h510
-rw-r--r--erts/include/internal/ethr_optimized_fallbacks.h185
-rw-r--r--erts/include/internal/ethread.h1653
-rw-r--r--erts/include/internal/ethread_header_config.h.in57
-rw-r--r--erts/include/internal/gcc/ethr_atomic.h19
-rw-r--r--erts/include/internal/i386/atomic.h25
-rw-r--r--erts/include/internal/i386/ethread.h3
-rw-r--r--erts/include/internal/i386/rwlock.h12
-rw-r--r--erts/include/internal/i386/spinlock.h14
-rw-r--r--erts/include/internal/libatomic_ops/ethr_atomic.h292
-rw-r--r--erts/include/internal/libatomic_ops/ethread.h30
-rw-r--r--erts/include/internal/ppc32/atomic.h22
-rw-r--r--erts/include/internal/ppc32/ethread.h3
-rw-r--r--erts/include/internal/ppc32/rwlock.h12
-rw-r--r--erts/include/internal/ppc32/spinlock.h12
-rw-r--r--erts/include/internal/pthread/ethr_event.h151
-rw-r--r--erts/include/internal/sparc32/atomic.h39
-rw-r--r--erts/include/internal/sparc32/ethread.h3
-rw-r--r--erts/include/internal/sparc32/rwlock.h12
-rw-r--r--erts/include/internal/sparc32/spinlock.h12
-rw-r--r--erts/include/internal/tile/atomic.h67
-rw-r--r--erts/include/internal/win/ethr_atomic.h244
-rw-r--r--erts/include/internal/win/ethr_event.h62
-rw-r--r--erts/include/internal/win/ethread.h31
-rw-r--r--erts/lib_src/Makefile.in24
-rw-r--r--erts/lib_src/common/erl_misc_utils.c754
-rw-r--r--erts/lib_src/common/ethr_aux.c762
-rw-r--r--erts/lib_src/common/ethr_cbf.c36
-rw-r--r--erts/lib_src/common/ethr_mutex.c2758
-rw-r--r--erts/lib_src/common/ethread.c3369
-rw-r--r--erts/lib_src/pthread/ethr_event.c219
-rw-r--r--erts/lib_src/pthread/ethread.c477
-rw-r--r--erts/lib_src/win/ethr_event.c120
-rw-r--r--erts/lib_src/win/ethread.c625
-rw-r--r--erts/test/ethread_SUITE.erl54
-rw-r--r--erts/test/ethread_SUITE_data/ethread_tests.c1198
-rw-r--r--erts/vsn.mk4
130 files changed, 12928 insertions, 12256 deletions
diff --git a/erts/aclocal.m4 b/erts/aclocal.m4
index 61244c7cd3..3b1edd7605 100644
--- a/erts/aclocal.m4
+++ b/erts/aclocal.m4
@@ -521,11 +521,13 @@ if test "X$host_os" = "Xwin32"; then
THR_DEFS="-DWIN32_THREADS"
THR_LIBS=
THR_LIB_NAME=win32_threads
+ THR_LIB_TYPE=win32_threads
else
AC_MSG_RESULT(no)
THR_DEFS=
THR_LIBS=
THR_LIB_NAME=
+ THR_LIB_TYPE=posix_unknown
dnl Try to find POSIX threads
@@ -586,6 +588,7 @@ dnl On ofs1 the '-pthread' switch should be used
AC_MSG_WARN([result yes guessed because of cross compilation])
fi
if test $nptl = yes; then
+ THR_LIB_TYPE=posix_nptl
need_nptl_incldir=no
AC_CHECK_HEADER(nptl/pthread.h,
[need_nptl_incldir=yes
@@ -653,6 +656,19 @@ fi
])
+AC_DEFUN(ERL_INTERNAL_LIBS,
+[
+
+ERTS_INTERNAL_X_LIBS=
+
+AC_CHECK_LIB(kstat, kstat_open,
+[AC_DEFINE(HAVE_KSTAT, 1, [Define if you have kstat])
+ERTS_INTERNAL_X_LIBS="$ERTS_INTERNAL_X_LIBS -lkstat"])
+
+AC_SUBST(ERTS_INTERNAL_X_LIBS)
+
+])
+
dnl ----------------------------------------------------------------------
dnl
dnl ERL_FIND_ETHR_LIB
@@ -676,10 +692,14 @@ AC_DEFUN(ERL_FIND_ETHR_LIB,
[
LM_CHECK_THR_LIB
+ERL_INTERNAL_LIBS
+ethr_have_native_atomics=no
+ethr_have_native_spinlock=no
ETHR_THR_LIB_BASE="$THR_LIB_NAME"
+ETHR_THR_LIB_BASE_TYPE="$THR_LIB_TYPE"
ETHR_DEFS="$THR_DEFS"
-ETHR_X_LIBS="$THR_LIBS"
+ETHR_X_LIBS="$THR_LIBS $ERTS_INTERNAL_X_LIBS"
ETHR_LIBS=
ETHR_LIB_NAME=
@@ -691,6 +711,7 @@ ethr_lib_name=ethread
case "$THR_LIB_NAME" in
win32_threads)
+ ETHR_THR_LIB_BASE_DIR=win
# * _WIN32_WINNT >= 0x0400 is needed for
# TryEnterCriticalSection
# * _WIN32_WINNT >= 0x0403 is needed for
@@ -716,10 +737,13 @@ case "$THR_LIB_NAME" in
if test $found_win32_winnt = no; then
AC_MSG_ERROR([-D_WIN32_WINNT missing in CPPFLAGS])
fi
+ ethr_have_native_atomics=yes
+ ethr_have_native_spinlock=yes
AC_DEFINE(ETHR_WIN32_THREADS, 1, [Define if you have win32 threads])
;;
pthread)
+ ETHR_THR_LIB_BASE_DIR=pthread
AC_DEFINE(ETHR_PTHREADS, 1, [Define if you have pthreads])
case $host_os in
openbsd*)
@@ -775,9 +799,7 @@ case "$THR_LIB_NAME" in
if test $usable_sigaltstack = no; then
ETHR_DEFS="$ETHR_DEFS -DETHR_UNUSABLE_SIGALTSTACK"
fi
-
- AC_DEFINE(ETHR_INIT_MUTEX_IN_CHILD_AT_FORK, 1, \
-[Define if mutexes should be reinitialized (instead of unlocked) in child at fork.]) ;;
+ ;;
*) ;;
esac
@@ -808,6 +830,10 @@ case "$THR_LIB_NAME" in
[Define if you need the <nptl/pthread.h> header file.])
fi
+ AC_CHECK_HEADER(sched.h, \
+ AC_DEFINE(ETHR_HAVE_SCHED_H, 1, \
+[Define if you have the <sched.h> header file.]))
+
AC_CHECK_HEADER(sys/time.h, \
AC_DEFINE(ETHR_HAVE_SYS_TIME_H, 1, \
[Define if you have the <sys/time.h> header file.]))
@@ -823,26 +849,63 @@ case "$THR_LIB_NAME" in
dnl Check for functions
dnl
- AC_CHECK_FUNC(pthread_atfork, \
- AC_DEFINE(ETHR_HAVE_PTHREAD_ATFORK, 1, \
-[Define if you have the pthread_atfork function.]))
- AC_CHECK_FUNC(pthread_mutexattr_settype, \
- AC_DEFINE(ETHR_HAVE_PTHREAD_MUTEXATTR_SETTYPE, 1, \
-[Define if you have the pthread_mutexattr_settype function.]))
- AC_CHECK_FUNC(pthread_mutexattr_setkind_np, \
- AC_DEFINE(ETHR_HAVE_PTHREAD_MUTEXATTR_SETKIND_NP, 1, \
-[Define if you have the pthread_mutexattr_setkind_np function.]))
AC_CHECK_FUNC(pthread_spin_lock, \
- AC_DEFINE(ETHR_HAVE_PTHREAD_SPIN_LOCK, 1, \
-[Define if you have the pthread_spin_lock function.]))
+ [ethr_have_native_spinlock=yes \
+ AC_DEFINE(ETHR_HAVE_PTHREAD_SPIN_LOCK, 1, \
+[Define if you have the pthread_spin_lock function.])])
+
+ have_sched_yield=no
+ have_librt_sched_yield=no
+ AC_CHECK_FUNC(sched_yield, [have_sched_yield=yes])
+ if test $have_sched_yield = no; then
+ AC_CHECK_LIB(rt, sched_yield,
+ [have_librt_sched_yield=yes
+ ETHR_X_LIBS="$ETHR_X_LIBS -lrt"])
+ fi
+ if test $have_sched_yield = yes || test $have_librt_sched_yield = yes; then
+ AC_DEFINE(ETHR_HAVE_SCHED_YIELD, 1, [Define if you have the sched_yield() function.])
+ AC_MSG_CHECKING([whether sched_yield() returns an int])
+ sched_yield_ret_int=no
+ AC_TRY_COMPILE([
+ #ifdef ETHR_HAVE_SCHED_H
+ #include <sched.h>
+ #endif
+ ],
+ [int sched_yield();],
+ [sched_yield_ret_int=yes])
+ AC_MSG_RESULT([$sched_yield_ret_int])
+ if test $sched_yield_ret_int = yes; then
+ AC_DEFINE(ETHR_SCHED_YIELD_RET_INT, 1, [Define if sched_yield() returns an int.])
+ fi
+ fi
+
+ have_pthread_yield=no
+ AC_CHECK_FUNC(pthread_yield, [have_pthread_yield=yes])
+ if test $have_pthread_yield = yes; then
+ AC_DEFINE(ETHR_HAVE_PTHREAD_YIELD, 1, [Define if you have the pthread_yield() function.])
+ AC_MSG_CHECKING([whether pthread_yield() returns an int])
+ pthread_yield_ret_int=no
+ AC_TRY_COMPILE([
+ #if defined(ETHR_NEED_NPTL_PTHREAD_H)
+ #include <nptl/pthread.h>
+ #elif defined(ETHR_HAVE_MIT_PTHREAD_H)
+ #include <pthread/mit/pthread.h>
+ #elif defined(ETHR_HAVE_PTHREAD_H)
+ #include <pthread.h>
+ #endif
+ ],
+ [int pthread_yield();],
+ [pthread_yield_ret_int=yes])
+ AC_MSG_RESULT([$pthread_yield_ret_int])
+ if test $pthread_yield_ret_int = yes; then
+ AC_DEFINE(ETHR_PTHREAD_YIELD_RET_INT, 1, [Define if pthread_yield() returns an int.])
+ fi
+ fi
have_pthread_rwlock_init=no
AC_CHECK_FUNC(pthread_rwlock_init, [have_pthread_rwlock_init=yes])
if test $have_pthread_rwlock_init = yes; then
- AC_DEFINE(ETHR_HAVE_PTHREAD_RWLOCK_INIT, 1, \
-[Define if you have a pthread_rwlock implementation that can be used.])
-
ethr_have_pthread_rwlockattr_setkind_np=no
AC_CHECK_FUNC(pthread_rwlockattr_setkind_np,
[ethr_have_pthread_rwlockattr_setkind_np=yes])
@@ -876,12 +939,42 @@ case "$THR_LIB_NAME" in
fi
fi
+ if test "$force_pthread_rwlocks" = "yes"; then
+ AC_DEFINE(ETHR_FORCE_PTHREAD_RWLOCK, 1, \
+[Define if you want to force usage of pthread rwlocks])
+
+ if test $have_pthread_rwlock_init = yes; then
+ AC_MSG_WARN([Forced usage of pthread rwlocks. Note that this implementation may suffer from starvation issues.])
+ else
+ AC_MSG_ERROR([User forced usage of pthread rwlock, but no such implementation was found])
+ fi
+ fi
AC_CHECK_FUNC(pthread_attr_setguardsize, \
AC_DEFINE(ETHR_HAVE_PTHREAD_ATTR_SETGUARDSIZE, 1, \
[Define if you have the pthread_attr_setguardsize function.]))
+ linux_futex=no
+ AC_MSG_CHECKING([for Linux futexes])
+ AC_TRY_LINK([
+ #include <sys/syscall.h>
+ #include <unistd.h>
+ #include <linux/futex.h>
+ #include <sys/time.h>
+ ],
+ [
+ int i = 1;
+ syscall(__NR_futex, (void *) &i, FUTEX_WAKE, 1,
+ (void*)0,(void*)0, 0);
+ syscall(__NR_futex, (void *) &i, FUTEX_WAIT, 0,
+ (void*)0,(void*)0, 0);
+ return 0;
+ ],
+ linux_futex=yes)
+ AC_MSG_RESULT([$linux_futex])
+ test $linux_futex = yes && AC_DEFINE(ETHR_HAVE_LINUX_FUTEX, 1, [Define if you have a linux futex implementation.])
+
AC_MSG_CHECKING([for GCC atomic operations])
ethr_have_gcc_atomic_ops=no
AC_TRY_LINK([],
@@ -899,10 +992,69 @@ case "$THR_LIB_NAME" in
AC_MSG_RESULT([$ethr_have_gcc_atomic_ops])
test $ethr_have_gcc_atomic_ops = yes && AC_DEFINE(ETHR_HAVE_GCC_ATOMIC_OPS, 1, [Define if you have gcc atomic operations])
+ case "$host_cpu" in
+ sun4u | sparc64 | sun4v)
+ ethr_have_native_atomics=yes;;
+ i86pc | i386 | i486 | i586 | i686 | x86_64 | amd64)
+ ethr_have_native_atomics=yes;;
+ macppc | ppc | "Power Macintosh")
+ ethr_have_native_atomics=yes;;
+ tile)
+ ethr_have_native_atomics=yes;;
+ *)
+ ;;
+ esac
+
+ AC_MSG_CHECKING([for a usable libatomic_ops implementation])
+ case "x$with_libatomic_ops" in
+ xno | xyes | x)
+ libatomic_ops_include=
+ ;;
+ *)
+ if test -d "${with_libatomic_ops}/include"; then
+ libatomic_ops_include="-I$with_libatomic_ops/include"
+ CPPFLAGS="$CPPFLAGS $libatomic_ops_include"
+ else
+ AC_MSG_ERROR([libatomic_ops include directory $with_libatomic_ops/include not found])
+ fi;;
+ esac
+ ethr_have_libatomic_ops=no
+ AC_TRY_LINK([#include "atomic_ops.h"],
+ [
+ volatile AO_t x;
+ AO_t y;
+ int z;
+
+ AO_nop_full();
+ AO_store(&x, (AO_t) 0);
+ z = AO_load(&x);
+ z = AO_compare_and_swap(&x, (AO_t) 0, (AO_t) 1);
+ ],
+ [ethr_have_native_atomics=yes
+ ethr_have_libatomic_ops=yes])
+ AC_MSG_RESULT([$ethr_have_libatomic_ops])
+ if test $ethr_have_libatomic_ops = yes; then
+ AC_CHECK_SIZEOF(AO_t, ,
+ [
+ #include <stdio.h>
+ #include "atomic_ops.h"
+ ])
+ AC_DEFINE_UNQUOTED(ETHR_SIZEOF_AO_T, $ac_cv_sizeof_AO_t, [Define to the size of AO_t if libatomic_ops is used])
+
+ AC_DEFINE(ETHR_HAVE_LIBATOMIC_OPS, 1, [Define if you have libatomic_ops atomic operations])
+ if test "x$with_libatomic_ops" != "xno" && test "x$with_libatomic_ops" != "x"; then
+ AC_DEFINE(ETHR_PREFER_LIBATOMIC_OPS_NATIVE_IMPLS, 1, [Define if you prefer libatomic_ops native ethread implementations])
+ fi
+ ETHR_DEFS="$ETHR_DEFS $libatomic_ops_include"
+ elif test "x$with_libatomic_ops" != "xno" && test "x$with_libatomic_ops" != "x"; then
+ AC_MSG_ERROR([No usable libatomic_ops implementation found])
+ fi
+
dnl Restore LIBS
LIBS=$saved_libs
dnl restore CPPFLAGS
CPPFLAGS=$saved_cppflags
+
;;
*)
;;
@@ -918,16 +1070,23 @@ fi
if test "x$ETHR_THR_LIB_BASE" != "x"; then
ETHR_DEFS="-DUSE_THREADS $ETHR_DEFS"
- ETHR_LIBS="-l$ethr_lib_name $ETHR_X_LIBS"
+ ETHR_LIBS="-l$ethr_lib_name -lerts_internal_r $ETHR_X_LIBS"
ETHR_LIB_NAME=$ethr_lib_name
fi
AC_CHECK_SIZEOF(void *)
AC_DEFINE_UNQUOTED(ETHR_SIZEOF_PTR, $ac_cv_sizeof_void_p, [Define to the size of pointers])
-if test "X$disable_native_ethr_impls" = "Xyes"; then
- AC_DEFINE(ETHR_DISABLE_NATIVE_IMPLS, 1, [Define if you want to disable native ethread implementations])
-fi
+AC_ARG_ENABLE(native-ethr-impls,
+ AS_HELP_STRING([--disable-native-ethr-impls],
+ [disable native ethread implementations]),
+[ case "$enableval" in
+ no) disable_native_ethr_impls=yes ;;
+ *) disable_native_ethr_impls=no ;;
+ esac ], disable_native_ethr_impls=no)
+
+test "X$disable_native_ethr_impls" = "Xyes" &&
+ AC_DEFINE(ETHR_DISABLE_NATIVE_IMPLS, 1, [Define if you want to disable native ethread implementations])
AC_ARG_ENABLE(prefer-gcc-native-ethr-impls,
AS_HELP_STRING([--enable-prefer-gcc-native-ethr-impls],
@@ -940,6 +1099,21 @@ AC_ARG_ENABLE(prefer-gcc-native-ethr-impls,
test $enable_prefer_gcc_native_ethr_impls = yes &&
AC_DEFINE(ETHR_PREFER_GCC_NATIVE_IMPLS, 1, [Define if you prefer gcc native ethread implementations])
+AC_ARG_ENABLE(ethread-pre-pentium4-compatibility,
+ AS_HELP_STRING([--enable-ethread-pre-pentium4-compatibility],
+ [enable compatibility with x86 processors before pentium 4 (back to 486) in the ethread library]),
+[ case "$enableval" in
+ yes) enable_ethread_pre_pentium4_compatibility=yes ;;
+ *) enable_ethread_pre_pentium4_compatibilit=no ;;
+ esac ], enable_ethread_pre_pentium4_compatibilit=no)
+
+test $enable_ethread_pre_pentium4_compatibilit = yes &&
+ AC_DEFINE(ETHR_PRE_PENTIUM4_COMPAT, 1, [Define if you want compatibilty with x86 processors before pentium4.])
+
+AC_ARG_WITH(libatomic_ops,
+ AS_HELP_STRING([--with-libatomic_ops=PATH],
+ [use libatomic_ops with the ethread library]))
+
AC_DEFINE(ETHR_HAVE_ETHREAD_DEFINES, 1, \
[Define if you have all ethread defines])
@@ -948,6 +1122,7 @@ AC_SUBST(ETHR_LIBS)
AC_SUBST(ETHR_LIB_NAME)
AC_SUBST(ETHR_DEFS)
AC_SUBST(ETHR_THR_LIB_BASE)
+AC_SUBST(ETHR_THR_LIB_BASE_DIR)
])
diff --git a/erts/configure.in b/erts/configure.in
index 63bf548c89..d5dccdbb83 100644
--- a/erts/configure.in
+++ b/erts/configure.in
@@ -61,6 +61,9 @@ if test x"${ERL_TOP}/erts" != x"$srcdir"; then
fi
erl_top=${ERL_TOP}
+# Remove old configuration information
+/bin/rm -f "$ERL_TOP/erts/CONF_INFO"
+
# echo XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
# echo X
# echo "X srcdir = $srcdir"
@@ -87,6 +90,13 @@ else
host_os=$host
fi
+if test "$cross_compiling" = "yes"; then
+ CROSS_COMPILING=yes
+else
+ CROSS_COMPILING=no
+fi
+AC_SUBST(CROSS_COMPILING)
+
ERL_XCOMP_SYSROOT_INIT
AC_ISC_POSIX
@@ -198,9 +208,6 @@ AC_ARG_ENABLE(native-libs,
AC_ARG_ENABLE(tsp,
[ --enable-tsp compile tsp app])
-AC_ARG_ENABLE(elib-malloc,
-[ --enable-elib-malloc use elib_malloc instead of normal malloc])
-
AC_ARG_ENABLE(fp-exceptions,
[ --enable-fp-exceptions Use hardware floating point exceptions (default if hipe enabled)],
[ case "$enableval" in
@@ -277,20 +284,6 @@ AC_ARG_ENABLE(clock-gettime,
*) clock_gettime_correction=yes ;;
esac ], clock_gettime_correction=unknown)
-AC_ARG_ENABLE(native-ethr-impls,
-[ --enable-native-ethr-impls enable native ethread implementations
- --disable-native-ethr-impls disable native ethread implementations],
-[ case "$enableval" in
- no) disable_native_ethr_impls=yes ;;
- *) disable_native_ethr_impls=no ;;
- esac ], disable_native_ethr_impls=no)
-
-dnl Defined in libraries/megaco/configure.in but we need it here
-dnl also in order to show it to the "top user"
-
-AC_ARG_ENABLE(megaco_flex_scanner_lineno,
-[ --disable-megaco-flex-scanner-lineno disable megaco flex scanner lineno])
-
dnl Magic test for clearcase.
OTP_RELEASE=
if test "${ERLANG_COMMERCIAL_BUILD}" != ""; then
@@ -1049,6 +1042,42 @@ if test $ERTS_BUILD_SMP_EMU = yes; then
fi
AC_DEFINE(ERTS_HAVE_SMP_EMU, 1, [Define if the smp emulator is built])
+
+ case "$ethr_have_native_atomics-$ethr_have_native_spinlock" in
+ yes-*)
+ ;;
+
+ no-yes)
+
+ test -f "$ERL_TOP/erts/CONF_INFO" ||
+ echo "" > "$ERL_TOP/erts/CONF_INFO"
+ cat >> $ERL_TOP/erts/CONF_INFO <<EOF
+
+ No native atomic implementation available.
+ Fallbacks implemented using spinlocks will be
+ used. Note that the performance of the SMP
+ runtime system will suffer due to this.
+
+EOF
+ ;;
+
+ no-no)
+
+ test -f "$ERL_TOP/erts/CONF_INFO" ||
+ echo "" > "$ERL_TOP/erts/CONF_INFO"
+ cat >> "$ERL_TOP/erts/CONF_INFO" <<EOF
+
+ No native atomic implementation, nor no native
+ spinlock implementation available. Fallbacks
+ implemented using mutexes will be used. Note
+ that the performance of the SMP runtime system
+ will suffer much due to this.
+
+EOF
+ ;;
+
+ esac
+
enable_threads=force
fi
@@ -1149,17 +1178,15 @@ else
enable_child_waiter_thread=yes
;;
linux*)
- AC_DEFINE(USE_RECURSIVE_MALLOC_MUTEX,[1],
- [Define if malloc should use a recursive mutex])
AC_MSG_CHECKING([whether dlopen() needs to be called before first call to dlerror()])
- if test "x$ETHR_THR_LIB_BASE_NAME" != "xnptl"; then
+ if test "x$ETHR_THR_LIB_BASE_TYPE" != "xposix_nptl"; then
AC_DEFINE(ERTS_NEED_DLOPEN_BEFORE_DLERROR,[1],
[Define if dlopen() needs to be called before first call to dlerror()])
AC_MSG_RESULT(yes)
else
AC_MSG_RESULT(no)
fi
- if test "x$ETHR_THR_LIB_BASE_NAME" != "xnptl"; then
+ if test "x$ETHR_THR_LIB_BASE_TYPE" != "xposix_nptl"; then
# Child waiter thread cannot be enabled
disable_child_waiter_thread=yes
enable_child_waiter_thread=no
@@ -1221,13 +1248,7 @@ fi
AC_SUBST(EMU_LOCK_CHECKING)
-ERTS_INTERNAL_X_LIBS=
-
-AC_CHECK_LIB(kstat, kstat_open,
-[AC_DEFINE(HAVE_KSTAT, 1, [Define if you have kstat])
-ERTS_INTERNAL_X_LIBS="$ERTS_INTERNAL_X_LIBS -lkstat"])
-
-AC_SUBST(ERTS_INTERNAL_X_LIBS)
+ERL_INTERNAL_LIBS
dnl THR_LIBS and THR_DEFS are only used by odbc
THR_LIBS=$ETHR_X_LIBS
@@ -2980,17 +3001,6 @@ if test "x$HIPE_ENABLED" = "xyes" ; then
fi
#
-# Check if we should use elib_malloc.
-#
-
-if test X${enable_elib_malloc} = Xyes; then
- AC_DEFINE(ENABLE_ELIB_MALLOC,[],[Define to enable use of elib_malloc (a malloc() replacement)])
- AC_DEFINE(ELIB_HEAP_SBRK,[],[Elib sbrk])
- AC_DEFINE(ELIB_ALLOC_IS_CLIB,[],[Use elib malloc as clib])
- AC_DEFINE(ELIB_SORTED_BLOCKS,[],[Define to enable the use of sorted blocks when using elib_malloc])
-fi
-
-#
# Check for working poll().
#
AC_MSG_CHECKING([for working poll()])
@@ -3602,10 +3612,10 @@ dnl Should one use EXEEXT or ac_exeext?
if test -f "$erl_xcomp_sysroot$SSL_BINDIR/openssl$EXEEXT"; then
if test "$cross_compiling" = "yes"; then
dnl Cannot test it; hope it is working...
- OPENSSL_CMD="$SSL_BINDIR/openssl"
+ OPENSSL_CMD="$erl_xcomp_sysroot$SSL_BINDIR/openssl$EXEEXT"
else
- if "$SSL_BINDIR/openssl" version > /dev/null 2>&1; then
- OPENSSL_CMD="$SSL_BINDIR/openssl"
+ if "$SSL_BINDIR/openssl$EXEEXT" version > /dev/null 2>&1; then
+ OPENSSL_CMD="$SSL_BINDIR/openssl$EXEEXT"
else
is_real_ssl=no
fi
@@ -3614,6 +3624,9 @@ dnl Should one use EXEEXT or ac_exeext?
is_real_ssl=no
fi
if test "x$is_real_ssl" = "xyes" ; then
+ if test "$MIXED_CYGWIN" = "yes"; then
+ OPENSSL_CMD=`cygpath -s -m "$OPENSSL_CMD"` 2> /dev/null
+ fi
SSL_INCLUDE="-I$dir/include"
old_CPPFLAGS=$CPPFLAGS
CPPFLAGS=$SSL_INCLUDE
diff --git a/erts/doc/src/erl.xml b/erts/doc/src/erl.xml
index df80142ce1..f477280a6f 100644
--- a/erts/doc/src/erl.xml
+++ b/erts/doc/src/erl.xml
@@ -603,6 +603,24 @@
<item>
<p>Force ets memory block to be moved on realloc.</p>
</item>
+ <tag><marker id="+rg"><c><![CDATA[+rg ReaderGroupsLimit]]></c></marker></tag>
+ <item>
+ <p>Limits the amount of reader groups used by read/write locks
+ optimized for read operations in the Erlang runtime system. By
+ default the reader groups limit equals 8.</p>
+ <p>When the amount of schedulers is less than or equal to the reader
+ groups limit, each scheduler has its own reader group. When the
+ amount of schedulers is larger than the reader groups limit,
+ schedulers share reader groups. Shared reader groups degrades
+ read lock and read unlock performance while a large amount of
+ reader groups degrades write lock performance, so the limit is a
+ tradeoff between performance for read operations and performance
+ for write operations. Each reader group currently consumes 64 byte
+ in each read/write lock. Also note that a runtime system using
+ shared reader groups benefits from <seealso marker="#+sbt">binding
+ schedulers to logical processors</seealso>, since the reader groups
+ are distributed better between schedulers.</p>
+ </item>
<tag><marker id="+S"><c><![CDATA[+S Schedulers:SchedulerOnline]]></c></marker></tag>
<item>
<p>Sets the amount of scheduler threads to create and scheduler
@@ -667,8 +685,8 @@
<seealso marker="erlang#system_flag_scheduler_bind_type">erlang:system_flag(scheduler_bind_type, default_bind)</seealso>.
</p></item>
</taglist>
- <p>Binding of schedulers are currently only supported on newer
- Linux and Solaris systems.</p>
+ <p>Binding of schedulers is currently only supported on newer
+ Linux, Solaris, and Windows systems.</p>
<p>If no CPU topology is available when the <c>+sbt</c> flag
is processed and <c>BindType</c> is any other type than
<c>u</c>, the runtime system will fail to start. CPU
diff --git a/erts/doc/src/erlang.xml b/erts/doc/src/erlang.xml
index 77a628e82b..5d8a96a910 100644
--- a/erts/doc/src/erlang.xml
+++ b/erts/doc/src/erlang.xml
@@ -499,7 +499,7 @@ iolist() = [char() | binary() | iolist()]
<name>erlang:cancel_timer(TimerRef) -> Time | false</name>
<fsummary>Cancel a timer</fsummary>
<type>
- <v>TimerRef = ref()</v>
+ <v>TimerRef = reference()</v>
<v>Time = int()</v>
</type>
<desc>
@@ -763,7 +763,7 @@ false</pre>
<name>erlang:demonitor(MonitorRef) -> true</name>
<fsummary>Stop monitoring</fsummary>
<type>
- <v>MonitorRef = ref()</v>
+ <v>MonitorRef = reference()</v>
</type>
<desc>
<p>If <c>MonitorRef</c> is a reference which the calling process
@@ -803,7 +803,7 @@ false</pre>
<name>erlang:demonitor(MonitorRef, OptionList) -> true|false</name>
<fsummary>Stop monitoring</fsummary>
<type>
- <v>MonitorRef = ref()</v>
+ <v>MonitorRef = reference()</v>
<v>OptionList = [Option]</v>
<v>Option = flush</v>
<v>Option = info</v>
@@ -2160,7 +2160,7 @@ os_prompt%</pre>
</desc>
</func>
<func>
- <name>make_ref() -> ref()</name>
+ <name>make_ref() -> reference()</name>
<fsummary>Return an almost unique reference</fsummary>
<desc>
<p>Returns an almost unique reference.</p>
@@ -2690,7 +2690,7 @@ os_prompt%</pre>
<name>node(Arg) -> Node</name>
<fsummary>At which node is a pid, port or reference located</fsummary>
<type>
- <v>Arg = pid() | port() | ref()</v>
+ <v>Arg = pid() | port() | reference()</v>
<v>Node = node()</v>
</type>
<desc>
@@ -2767,8 +2767,10 @@ os_prompt%</pre>
Otherwise, some other point in time is chosen. It is also
guaranteed that subsequent calls to this BIF returns
continuously increasing values. Hence, the return value from
- <c>now()</c> can be used to generate unique time-stamps. It
- can only be used to check the local time of day if
+ <c>now()</c> can be used to generate unique time-stamps,
+ and if it is called in a tight loop on a fast machine
+ the time of the node can become skewed.</p>
+ <p>It can only be used to check the local time of day if
the time-zone info of the underlying operating system is
properly configured.</p>
</desc>
@@ -4096,7 +4098,7 @@ os_prompt%</pre>
<name>erlang:read_timer(TimerRef) -> int() | false</name>
<fsummary>Number of milliseconds remaining for a timer</fsummary>
<type>
- <v>TimerRef = ref()</v>
+ <v>TimerRef = reference()</v>
</type>
<desc>
<p><c>TimerRef</c> is a timer reference returned by
@@ -4119,7 +4121,7 @@ os_prompt%</pre>
<name>erlang:ref_to_list(Ref) -> string()</name>
<fsummary>Text representation of a reference</fsummary>
<type>
- <v>Ref = ref()</v>
+ <v>Ref = reference()</v>
</type>
<desc>
<p>Returns a string which corresponds to the text
@@ -4298,7 +4300,7 @@ true</pre>
<v>Dest = pid() | RegName </v>
<v>&nbsp;LocalPid = pid() (of a process, alive or dead, on the local node)</v>
<v>Msg = term()</v>
- <v>TimerRef = ref()</v>
+ <v>TimerRef = reference()</v>
</type>
<desc>
<p>Starts a timer which will send the message <c>Msg</c>
@@ -4827,7 +4829,7 @@ true</pre>
<v>&nbsp;LocalPid = pid() (of a process, alive or dead, on the local node)</v>
<v>&nbsp;RegName = atom()</v>
<v>Msg = term()</v>
- <v>TimerRef = ref()</v>
+ <v>TimerRef = reference()</v>
</type>
<desc>
<p>Starts a timer which will send the message
@@ -5173,9 +5175,9 @@ true</pre>
schedulers actually have bound as requested, call
<seealso marker="#system_info_scheduler_bindings">erlang:system_info(scheduler_bindings)</seealso>.
</p>
- <p>Schedulers can currently only be bound on newer Linux
- and Solaris systems, but more systems will be supported
- in the future.
+ <p>Schedulers can currently only be bound on newer Linux,
+ Solaris, and Windows systems, but more systems will be
+ supported in the future.
</p>
<p>In order for the runtime system to be able to bind schedulers,
the CPU topology needs to be known. If the runtime system fails
@@ -5362,7 +5364,7 @@ true</pre>
<p>Returns <c>{Allocator, Version, Features, Settings}.</c></p>
<p>Types:</p>
<list type="bulleted">
- <item><c>Allocator = undefined | elib_malloc | glibc</c></item>
+ <item><c>Allocator = undefined | glibc</c></item>
<item><c>Version = [int()]</c></item>
<item><c>Features = [atom()]</c></item>
<item><c>Settings = [{Subsystem, [{Parameter, Value}]}]</c></item>
@@ -5377,7 +5379,7 @@ true</pre>
implementation used. If <c>Allocator</c> equals
<c>undefined</c>, the <c>malloc()</c> implementation
used could not be identified. Currently
- <c>elib_malloc</c> and <c>glibc</c> can be identified.</p>
+ <c>glibc</c> can be identified.</p>
</item>
<item>
<p><c>Version</c> is a list of integers (but not a
@@ -5531,7 +5533,7 @@ true</pre>
<c>CpuTopology</c> type to change.
</p>
</item>
- <tag><c>{cpu_topology, defined}</c></tag>
+ <tag><marker id="system_info_cpu_topology_defined"><c>{cpu_topology, defined}</c></marker></tag>
<item>
<p>Returns the user defined <c>CpuTopology</c>. For more
information see the documentation of
@@ -5541,12 +5543,14 @@ true</pre>
argument.
</p>
</item>
- <tag><c>{cpu_topology, detected}</c></tag>
+ <tag><marker id="system_info_cpu_topology_detected"><c>{cpu_topology, detected}</c></marker></tag>
<item>
<p>Returns the automatically detected <c>CpuTopology</c>. The
emulator currently only detects the CPU topology on some newer
- linux and solaris systems. For more information see the
- documentation of the
+ Linux, Solaris, and Windows systems. On Windows system with
+ more than 32 logical processors the CPU topology is not detected.
+ </p>
+ <p>For more information see the documentation of the
<seealso marker="#system_info_cpu_topology">cpu_topology</seealso>
argument.
</p>
@@ -5604,52 +5608,9 @@ true</pre>
</item>
<tag><c>elib_malloc</c></tag>
<item>
- <p>If the emulator uses the <c>elib_malloc</c> memory
- allocator, a list of two-element tuples containing status
- information is returned; otherwise, <c>false</c> is
- returned. The list currently contains the following
- two-element tuples (all sizes are presented in bytes):</p>
- <taglist>
- <tag><c>{heap_size, Size}</c></tag>
- <item>
- <p>Where <c>Size</c> is the current heap size.</p>
- </item>
- <tag><c>{max_alloced_size, Size}</c></tag>
- <item>
- <p>Where <c>Size</c> is the maximum amount of memory
- allocated on the heap since the emulator started.</p>
- </item>
- <tag><c>{alloced_size, Size}</c></tag>
- <item>
- <p>Where <c>Size</c> is the current amount of memory
- allocated on the heap.</p>
- </item>
- <tag><c>{free_size, Size}</c></tag>
- <item>
- <p>Where <c>Size</c> is the current amount of free
- memory on the heap.</p>
- </item>
- <tag><c>{no_alloced_blocks, No}</c></tag>
- <item>
- <p>Where <c>No</c> is the current number of allocated
- blocks on the heap.</p>
- </item>
- <tag><c>{no_free_blocks, No}</c></tag>
- <item>
- <p>Where <c>No</c> is the current number of free blocks
- on the heap.</p>
- </item>
- <tag><c>{smallest_alloced_block, Size}</c></tag>
- <item>
- <p>Where <c>Size</c> is the size of the smallest
- allocated block on the heap.</p>
- </item>
- <tag><c>{largest_free_block, Size}</c></tag>
- <item>
- <p>Where <c>Size</c> is the size of the largest free
- block on the heap.</p>
- </item>
- </taglist>
+ <p>This option will be removed in a future release.
+ The return value will always be <c>false</c> since
+ the elib_malloc allocator has been removed.</p>
</item>
<tag><c>fullsweep_after</c></tag>
<item>
@@ -5725,11 +5686,34 @@ true</pre>
information see the <seealso marker="erts:crash_dump">"How to interpret the Erlang crash dumps"</seealso> chapter
in the ERTS User's Guide.</p>
</item>
- <tag><c>logical_processors</c></tag>
+ <tag><marker id="logical_processors"><c>logical_processors</c></marker></tag>
<item>
- <p>Returns the number of logical processors detected on the
- system as an integer or the atom <c>unknown</c> if the
- emulator wasn't able to detect any.
+ <p>Returns the detected number of logical processors configured
+ on the system. The return value is either an integer, or
+ the atom <c>unknown</c> if the emulator wasn't able to
+ detect logical processors configured.
+ </p>
+ </item>
+ <tag><marker id="logical_processors_available"><c>logical_processors_available</c></marker></tag>
+ <item>
+ <p>Returns the detected number of logical processors available to
+ the Erlang runtime system. The return value is either an
+ integer, or the atom <c>unknown</c> if the emulator wasn't
+ able to detect logical processors available. The number
+ of logical processors available is less than or equal to
+ the number of <seealso marker="#logical_processors_online">logical
+ processors online</seealso>.
+ </p>
+ </item>
+ <tag><marker id="logical_processors_online"><c>logical_processors_online</c></marker></tag>
+ <item>
+ <p>Returns the detected number of logical processors online on
+ the system. The return value is either an integer,
+ or the atom <c>unknown</c> if the emulator wasn't able to
+ detect logical processors online. The number of logical
+ processors online is less than or equal to the number of
+ <seealso marker="#logical_processors">logical processors
+ configured</seealso>.
</p>
</item>
<tag><c>machine</c></tag>
@@ -5934,6 +5918,26 @@ true</pre>
<c>get_tcw</c> in "Match Specifications in Erlang",
<seealso marker="erts:match_spec#get_tcw">ERTS User's Guide</seealso>.</p>
</item>
+ <tag><marker id="update_cpu_info"><c>update_cpu_info</c></marker></tag>
+ <item>
+ <p>The runtime system rereads the CPU information available and
+ updates its internally stored information about the
+ <seealso marker="#system_info_cpu_topology_detected">detected CPU
+ topology</seealso> and the amount of logical processors
+ <seealso marker="#logical_processors">configured</seealso>,
+ <seealso marker="#logical_processors_online">online</seealso>, and
+ <seealso marker="#logical_processors_available">available</seealso>.
+ If the CPU information has changed since the last time it was read,
+ the atom <c>changed</c> is returned; otherwise, the atom
+ <c>unchanged</c> is returned. If the CPU information has changed
+ you probably want to
+ <seealso marker="#system_flag_schedulers_online">adjust the amount
+ of schedulers online</seealso>. You typically want to have as
+ many schedulers online as
+ <seealso marker="#logical_processors_available">logical processors
+ available</seealso>.
+ </p>
+ </item>
<tag><marker id="system_info_version"><c>version</c></marker></tag>
<item>
<p>Returns a string containing the version number of the
diff --git a/erts/emulator/Makefile.in b/erts/emulator/Makefile.in
index eca6121a1e..ed0d1b3fa6 100644
--- a/erts/emulator/Makefile.in
+++ b/erts/emulator/Makefile.in
@@ -199,6 +199,14 @@ MKDIR = @MKDIR@
USING_MINGW=@MIXED_CYGWIN_MINGW@
+ifeq ($(TARGET),win32)
+LIB_PREFIX=
+LIB_SUFFIX=.lib
+else
+LIB_PREFIX=lib
+LIB_SUFFIX=.a
+endif
+
OMIT_OMIT_FP=no
ifeq (@EMU_LOCK_CHECKING@,yes)
@@ -279,16 +287,11 @@ endif
ifeq ($(TARGET),win32)
LIBS += -L$(ERL_TOP)/erts/emulator/pcre/obj/$(TARGET)/$(TYPE) -lepcre
-DEPLIBS += $(ERL_TOP)/erts/emulator/pcre/obj/$(TARGET)/$(TYPE)/epcre.lib
else
-LIBS += $(ERL_TOP)/erts/emulator/pcre/obj/$(TARGET)/$(TYPE)/libepcre.a
-DEPLIBS += \
- $(ERL_TOP)/erts/emulator/pcre/obj/$(TARGET)/$(TYPE)/libepcre.a \
- $(ERL_TOP)/erts/lib/internal/$(TARGET)/liberts_internal.a
-# rem liberts_internal.a
+LIBS += $(ERL_TOP)/erts/emulator/pcre/obj/$(TARGET)/$(TYPE)/$(LIB_PREFIX)epcre$(LIB_SUFFIX)
endif
-ELIB_FLAGS = -DENABLE_ELIB_MALLOC -DELIB_ALLOC_IS_CLIB -DELIB_HEAP_SBRK
+DEPLIBS += $(ERL_TOP)/erts/emulator/pcre/obj/$(TARGET)/$(TYPE)/$(LIB_PREFIX)epcre$(LIB_SUFFIX)
PERFCTR_PATH=@PERFCTR_PATH@
USE_PERFCTR=@USE_PERFCTR@
@@ -305,7 +308,19 @@ LIBSCTP = @LIBSCTP@
ORG_THR_LIBS=@EMU_THR_LIBS@
THR_LIB_NAME=@EMU_THR_LIB_NAME@
-THR_LIBS=$(subst -l$(THR_LIB_NAME),-l$(THR_LIB_NAME)$(TYPEMARKER),$(ORG_THR_LIBS))
+ifneq ($(strip $(THR_LIB_NAME)),)
+DEPLIBS += $(ERL_TOP)/erts/lib/internal/$(TARGET)/$(LIB_PREFIX)erts_internal_r$(TYPEMARKER)$(LIB_SUFFIX) \
+ $(ERL_TOP)/erts/lib/internal/$(TARGET)/$(LIB_PREFIX)ethread$(TYPEMARKER)$(LIB_SUFFIX)
+else
+DEPLIBS += $(ERL_TOP)/erts/lib/internal/$(TARGET)/$(LIB_PREFIX)erts_internal$(TYPEMARKER)$(LIB_SUFFIX)
+endif
+
+THR_LIBS=$(subst -l$(THR_LIB_NAME),-l$(THR_LIB_NAME)$(TYPEMARKER), \
+ $(subst -lerts_internal_r,-lerts_internal_r$(TYPEMARKER),$(ORG_THR_LIBS)))
+
+LIBS += $(THR_LIBS)
+
+ifneq ($(findstring erts_internal_r, $(THR_LIBS)),erts_internal_r)
ifeq ($(findstring vxworks,$(TARGET)),vxworks)
ERTS_INTERNAL_LIB=erts_internal
@@ -317,7 +332,9 @@ ERTS_INTERNAL_LIB=erts_internal
endif
endif
-LIBS += $(THR_LIBS) -l$(ERTS_INTERNAL_LIB)$(TYPEMARKER)
+LIBS += -l$(ERTS_INTERNAL_LIB)$(TYPEMARKER)
+
+endif # erts_internal_r
LIBS += @LIBRT@
@@ -441,8 +458,6 @@ release_spec: all
ifeq ($(ERLANG_OSTYPE), unix)
$(INSTALL_PROGRAM) $(BINDIR)/$(CS_EXECUTABLE) $(RELSYSDIR)/bin
endif
- $(INSTALL_DIR) $(RELEASE_PATH)/usr/include/obsolete
- $(INSTALL_DATA) obsolete/driver.h $(RELEASE_PATH)/usr/include/obsolete
endif
endif
@@ -636,9 +651,6 @@ $(BINDIR)/$(CS_EXECUTABLE): $(CS_SRC)
$(CS_PURIFY) $(CC) $(CS_LDFLAGS) -o $(BINDIR)/$(CS_EXECUTABLE) \
$(CS_CFLAGS) $(COMMON_INCLUDES) $(CS_SRC) $(CS_LIBS)
-$(OBJDIR)/%.elib.o: beam/%.c
- $(CC) $(ELIB_FLAGS) $(subst -O2, $(GEN_OPT_FLGS), $(CFLAGS)) $(INCLUDES) -c $< -o $@
-
$(OBJDIR)/%.kp.o: sys/common/%.c
$(CC) -DERTS_KERNEL_POLL_VERSION $(subst -O2, $(GEN_OPT_FLGS), $(CFLAGS)) $(INCLUDES) -c $< -o $@
@@ -647,9 +659,6 @@ $(OBJDIR)/%.nkp.o: sys/common/%.c
ifeq ($(GCC),yes)
-$(OBJDIR)/erl_obsolete.o: beam/erl_obsolete.c
- $(CC) $(subst -Wstrict-prototypes, , $(subst -O2, $(GEN_OPT_FLGS), $(CFLAGS))) $(INCLUDES) -c $< -o $@
-
$(OBJDIR)/erl_goodfit_alloc.o: beam/erl_goodfit_alloc.c
$(CC) $(subst -O2, $(GEN_OPT_FLGS) $(UNROLL_FLG), $(CFLAGS)) $(INCLUDES) -c $< -o $@
endif
@@ -724,7 +733,7 @@ RUN_OBJS = \
$(OBJDIR)/erl_fun.o $(OBJDIR)/erl_bif_port.o \
$(OBJDIR)/erl_term.o $(OBJDIR)/erl_node_tables.o \
$(OBJDIR)/erl_monitors.o $(OBJDIR)/erl_process_dump.o \
- $(OBJDIR)/erl_obsolete.o $(OBJDIR)/erl_bif_timer.o \
+ $(OBJDIR)/erl_bif_timer.o \
$(OBJDIR)/erl_drv_thread.o $(OBJDIR)/erl_bif_chksum.o \
$(OBJDIR)/erl_bif_re.o $(OBJDIR)/erl_unicode.o \
$(OBJDIR)/packet_parser.o $(OBJDIR)/safe_hash.o \
@@ -748,15 +757,13 @@ OS_OBJS = \
$(OBJDIR)/sys_time.o \
$(OBJDIR)/sys_interrupt.o \
$(OBJDIR)/sys_env.o \
- $(OBJDIR)/dosmap.o \
- $(OBJDIR)/elib_malloc.o
+ $(OBJDIR)/dosmap.o
else
OS_OBJS = \
$(OBJDIR)/sys.o \
$(OBJDIR)/driver_tab.o \
$(OBJDIR)/unix_efile.o \
$(OBJDIR)/gzio.o \
- $(OBJDIR)/elib_malloc.o \
$(OBJDIR)/elib_memmove.o
ifeq ($(findstring vxworks,$(TARGET)),vxworks)
@@ -820,16 +827,6 @@ BASE_OBJS = $(RUN_OBJS) $(EMU_OBJS) $(OS_OBJS) $(EXTRA_BASE_OBJS)
OBJS = $(BASE_OBJS) $(DRV_OBJS)
-ELIB_C_FILES = beam/elib_malloc.c \
- beam/elib_memmove.c \
- beam/erl_bif_info.c \
- beam/utils.c \
- beam/erl_alloc.c
-
-MOD_OBJS_ELIB = $(patsubst %.c,$(OBJDIR)/%.o,$(notdir $(ELIB_C_FILES)))
-OBJS_ELIB = $(patsubst %.o,%.elib.o,$(MOD_OBJS_ELIB)) \
- $(filter-out $(MOD_OBJS_ELIB),$(OBJS))
-
########################################
# HiPE section
@@ -921,10 +918,6 @@ $(BINDIR)/$(EMULATOR_EXECUTABLE): $(INIT_OBJS) $(OBJS) $(DEPLIBS)
$(PURIFY) $(LD) -o $(BINDIR)/$(EMULATOR_EXECUTABLE) \
$(HIPEBEAMLDFLAGS) $(LDFLAGS) $(DEXPORT) $(INIT_OBJS) $(OBJS) $(LIBS)
-$(BINDIR)/$(EMULATOR_EXECUTABLE_ELIB): $(INIT_OBJS) $(OBJS_ELIB) $(DEPLIBS)
- $(PURIFY) $(LD) -o $(BINDIR)/$(EMULATOR_EXECUTABLE_ELIB) \
- $(LDFLAGS) $(DEXPORT) $(INIT_OBJS) $(OBJS_ELIB) $(LIBS)
-
endif
#
@@ -1019,7 +1012,7 @@ depend:
$(DEP_CC) $(DEP_FLAGS) $(TARGET_SRC) \
| $(SED_DEPEND) >> $(TARGET)/depend.mk
ifneq ($(TARGET),win32)
- $(DEP_CC) $(DEP_FLAGS) $(ELIB_FLAGS) $(ELIB_C_FILES) \
+ $(DEP_CC) $(DEP_FLAGS) $(ELIB_C_FILES) \
| $(SED_ELIB_DEPEND) >> $(TARGET)/depend.mk
endif
ifdef HIPE_ENABLED
diff --git a/erts/emulator/beam/atom.c b/erts/emulator/beam/atom.c
index e2a79d6e4f..6b3c106a97 100644
--- a/erts/emulator/beam/atom.c
+++ b/erts/emulator/beam/atom.c
@@ -41,8 +41,7 @@ static erts_smp_rwmtx_t atom_table_lock;
#define atom_read_unlock() erts_smp_rwmtx_runlock(&atom_table_lock)
#define atom_write_lock() erts_smp_rwmtx_rwlock(&atom_table_lock)
#define atom_write_unlock() erts_smp_rwmtx_rwunlock(&atom_table_lock)
-#define atom_init_lock() erts_smp_rwmtx_init(&atom_table_lock, \
- "atom_tab")
+
#if 0
#define ERTS_ATOM_PUT_OPS_STAT
#endif
@@ -304,12 +303,17 @@ init_atom_table(void)
HashFunctions f;
int i;
Atom a;
+ erts_smp_rwmtx_opt_t rwmtx_opt = ERTS_SMP_THR_OPTS_DEFAULT_INITER;
+
+ rwmtx_opt.type = ERTS_SMP_RWMTX_TYPE_FREQUENT_READ;
+ rwmtx_opt.lived = ERTS_SMP_RWMTX_LONG_LIVED;
#ifdef ERTS_ATOM_PUT_OPS_STAT
erts_smp_atomic_init(&atom_put_ops, 0);
#endif
- atom_init_lock();
+ erts_smp_rwmtx_init_opt(&atom_table_lock, &rwmtx_opt, "atom_tab");
+
f.hash = (H_FUN) atom_hash;
f.cmp = (HCMP_FUN) atom_cmp;
f.alloc = (HALLOC_FUN) atom_alloc;
diff --git a/erts/emulator/beam/atom.names b/erts/emulator/beam/atom.names
index 28f69b9460..0815cdbc7f 100644
--- a/erts/emulator/beam/atom.names
+++ b/erts/emulator/beam/atom.names
@@ -432,6 +432,7 @@ atom raw
atom re
atom re_pattern
atom re_run_trap
+atom read_concurrency
atom ready_input
atom ready_output
atom ready_async
diff --git a/erts/emulator/beam/beam_bif_load.c b/erts/emulator/beam/beam_bif_load.c
index 596ad9a010..4fc271d41c 100644
--- a/erts/emulator/beam/beam_bif_load.c
+++ b/erts/emulator/beam/beam_bif_load.c
@@ -402,7 +402,7 @@ check_process_code(Process* rp, Module* modp)
BeamInstr* end;
Eterm* sp;
#ifndef HYBRID /* FIND ME! */
- ErlFunThing* funp;
+ struct erl_off_heap_header* oh;
int done_gc = 0;
#endif
@@ -470,27 +470,30 @@ check_process_code(Process* rp, Module* modp)
#ifndef HYBRID /* FIND ME! */
rescan:
- for (funp = MSO(rp).funs; funp; funp = funp->next) {
- BeamInstr* fun_code;
-
- fun_code = funp->fe->address;
-
- if (INSIDE((BeamInstr *) funp->fe->address)) {
- if (done_gc) {
- return am_true;
- } else {
- /*
- * Try to get rid of this fun by garbage collecting.
- * Clear both fvalue and ftrace to make sure they
- * don't hold any funs.
- */
- rp->freason = EXC_NULL;
- rp->fvalue = NIL;
- rp->ftrace = NIL;
- done_gc = 1;
- FLAGS(rp) |= F_NEED_FULLSWEEP;
- (void) erts_garbage_collect(rp, 0, rp->arg_reg, rp->arity);
- goto rescan;
+ for (oh = MSO(rp).first; oh; oh = oh->next) {
+ if (thing_subtag(oh->thing_word) == FUN_SUBTAG) {
+ ErlFunThing* funp = (ErlFunThing*) oh;
+ BeamInstr* fun_code;
+
+ fun_code = funp->fe->address;
+
+ if (INSIDE((BeamInstr *) funp->fe->address)) {
+ if (done_gc) {
+ return am_true;
+ } else {
+ /*
+ * Try to get rid of this fun by garbage collecting.
+ * Clear both fvalue and ftrace to make sure they
+ * don't hold any funs.
+ */
+ rp->freason = EXC_NULL;
+ rp->fvalue = NIL;
+ rp->ftrace = NIL;
+ done_gc = 1;
+ FLAGS(rp) |= F_NEED_FULLSWEEP;
+ (void) erts_garbage_collect(rp, 0, rp->arg_reg, rp->arity);
+ goto rescan;
+ }
}
}
}
diff --git a/erts/emulator/beam/beam_bp.h b/erts/emulator/beam/beam_bp.h
index b5d5b3c203..ebc171078d 100644
--- a/erts/emulator/beam/beam_bp.h
+++ b/erts/emulator/beam/beam_bp.h
@@ -123,7 +123,7 @@ typedef struct {
Uint ms;
Uint s;
Uint us;
- Uint *pc;
+ BeamInstr *pc;
} process_breakpoint_time_t; /* used within psd */
extern erts_smp_spinlock_t erts_bp_lock;
diff --git a/erts/emulator/beam/beam_emu.c b/erts/emulator/beam/beam_emu.c
index c0680086aa..260f349563 100644
--- a/erts/emulator/beam/beam_emu.c
+++ b/erts/emulator/beam/beam_emu.c
@@ -3385,8 +3385,8 @@ apply_bif_or_nif_epilogue:
HTOP += PROC_BIN_SIZE;
pb->thing_word = HEADER_PROC_BIN;
pb->size = num_bytes;
- pb->next = MSO(c_p).mso;
- MSO(c_p).mso = pb;
+ pb->next = MSO(c_p).first;
+ MSO(c_p).first = (struct erl_off_heap_header*) pb;
pb->val = bptr;
pb->bytes = (byte*) bptr->orig_bytes;
pb->flags = 0;
@@ -3486,8 +3486,8 @@ apply_bif_or_nif_epilogue:
HTOP += PROC_BIN_SIZE;
pb->thing_word = HEADER_PROC_BIN;
pb->size = tmp_arg1;
- pb->next = MSO(c_p).mso;
- MSO(c_p).mso = pb;
+ pb->next = MSO(c_p).first;
+ MSO(c_p).first = (struct erl_off_heap_header*) pb;
pb->val = bptr;
pb->bytes = (byte*) bptr->orig_bytes;
pb->flags = 0;
@@ -4463,7 +4463,7 @@ apply_bif_or_nif_epilogue:
E -= 2;
E[0] = make_cp(I);
E[1] = make_cp(c_p->cp); /* original return address */
- c_p->cp = (BeamInstr *) make_cp(beam_return_time_trace);
+ c_p->cp = beam_return_time_trace;
}
}
@@ -4493,20 +4493,20 @@ apply_bif_or_nif_epilogue:
BeamInstr real_I;
Uint32 flags;
Eterm tracer_pid;
- BeamInstr *cpp;
+ Uint* cpp;
int return_to_trace = 0, need = 0;
flags = 0;
SWAPOUT;
reg[0] = r(0);
if (*(c_p->cp) == (BeamInstr) OpCode(return_trace)) {
- cpp = (BeamInstr*)&E[2];
+ cpp = &E[2];
} else if (*(c_p->cp) == (BeamInstr) OpCode(i_return_to_trace)) {
return_to_trace = !0;
- cpp = (BeamInstr*)&E[0];
+ cpp = &E[0];
} else if (*(c_p->cp) == (BeamInstr) OpCode(i_return_time_trace)) {
return_to_trace = !0;
- cpp = (BeamInstr*)&E[0];
+ cpp = &E[0];
} else {
cpp = NULL;
}
@@ -6323,8 +6323,8 @@ new_fun(Process* p, Eterm* reg, ErlFunEntry* fe, int num_free)
erts_refc_inc(&fe->refc, 2);
funp->thing_word = HEADER_FUN;
#ifndef HYBRID /* FIND ME! */
- funp->next = MSO(p).funs;
- MSO(p).funs = funp;
+ funp->next = MSO(p).first;
+ MSO(p).first = (struct erl_off_heap_header*) funp;
#endif
funp->fe = fe;
funp->num_free = num_free;
diff --git a/erts/emulator/beam/bif.c b/erts/emulator/beam/bif.c
index 506bf383ca..6e9755ad48 100644
--- a/erts/emulator/beam/bif.c
+++ b/erts/emulator/beam/bif.c
@@ -3611,11 +3611,11 @@ BIF_RETTYPE list_to_pid_1(BIF_ALIST_1)
etp = (ExternalThing *) HAlloc(BIF_P, EXTERNAL_THING_HEAD_SIZE + 1);
etp->header = make_external_pid_header(1);
- etp->next = MSO(BIF_P).externals;
+ etp->next = MSO(BIF_P).first;
etp->node = enp;
etp->data.ui[0] = make_pid_data(c, b);
- MSO(BIF_P).externals = etp;
+ MSO(BIF_P).first = (struct erl_off_heap_header*) etp;
erts_deref_dist_entry(dep);
BIF_RET(make_external_pid(etp));
}
diff --git a/erts/emulator/beam/big.c b/erts/emulator/beam/big.c
index 90d3a0304a..2d250f32cf 100644
--- a/erts/emulator/beam/big.c
+++ b/erts/emulator/beam/big.c
@@ -1509,14 +1509,14 @@ Eterm erts_uint64_to_big(Uint64 x, Eterm **hpp)
*hp = make_pos_bignum_header(2);
BIG_DIGIT(hp, 0) = (Uint) (x & ((Uint) 0xffffffff));
BIG_DIGIT(hp, 1) = (Uint) ((x >> 32) & ((Uint) 0xffffffff));
- *hpp += 2;
+ *hpp += 3;
}
else
#endif
{
*hp = make_pos_bignum_header(1);
BIG_DIGIT(hp, 0) = (Uint) x;
- *hpp += 1;
+ *hpp += 2;
}
return make_big(hp);
}
@@ -1539,7 +1539,7 @@ Eterm erts_sint64_to_big(Sint64 x, Eterm **hpp)
*hp = make_pos_bignum_header(2);
BIG_DIGIT(hp, 0) = (Uint) (x & ((Uint) 0xffffffff));
BIG_DIGIT(hp, 1) = (Uint) ((x >> 32) & ((Uint) 0xffffffff));
- *hpp += 2;
+ *hpp += 3;
}
else
#endif
@@ -1549,7 +1549,7 @@ Eterm erts_sint64_to_big(Sint64 x, Eterm **hpp)
else
*hp = make_pos_bignum_header(1);
BIG_DIGIT(hp, 0) = (Uint) x;
- *hpp += 1;
+ *hpp += 2;
}
return make_big(hp);
}
diff --git a/erts/emulator/beam/binary.c b/erts/emulator/beam/binary.c
index c68392fad4..3fd714f9c2 100644
--- a/erts/emulator/beam/binary.c
+++ b/erts/emulator/beam/binary.c
@@ -88,8 +88,8 @@ new_binary(Process *p, byte *buf, int len)
pb = (ProcBin *) HAlloc(p, PROC_BIN_SIZE);
pb->thing_word = HEADER_PROC_BIN;
pb->size = len;
- pb->next = MSO(p).mso;
- MSO(p).mso = pb;
+ pb->next = MSO(p).first;
+ MSO(p).first = (struct erl_off_heap_header*)pb;
pb->val = bptr;
pb->bytes = (byte*) bptr->orig_bytes;
pb->flags = 0;
@@ -127,8 +127,8 @@ Eterm erts_new_mso_binary(Process *p, byte *buf, int len)
pb = (ProcBin *) HAlloc(p, PROC_BIN_SIZE);
pb->thing_word = HEADER_PROC_BIN;
pb->size = len;
- pb->next = MSO(p).mso;
- MSO(p).mso = pb;
+ pb->next = MSO(p).first;
+ MSO(p).first = (struct erl_off_heap_header*)pb;
pb->val = bptr;
pb->bytes = (byte*) bptr->orig_bytes;
pb->flags = 0;
@@ -487,16 +487,6 @@ BIF_RETTYPE split_binary_2(BIF_ALIST_2)
BIF_ERROR(BIF_P, BADARG);
}
-void
-erts_cleanup_mso(ProcBin* pb)
-{
- while (pb != NULL) {
- ProcBin* next = pb->next;
- if (erts_refc_dectest(&pb->val->refc, 0) == 0)
- erts_bin_free(pb->val);
- pb = next;
- }
-}
/*
* Local functions.
diff --git a/erts/emulator/beam/break.c b/erts/emulator/beam/break.c
index 857cb177c8..f339e19761 100644
--- a/erts/emulator/beam/break.c
+++ b/erts/emulator/beam/break.c
@@ -611,29 +611,29 @@ static void
bin_check(void)
{
Process *rp;
- ProcBin *bp;
- int i, printed;
+ struct erl_off_heap_header* hdr;
+ int i, printed = 0;
for (i=0; i < erts_max_processes; i++) {
if ((rp = process_tab[i]) == NULL)
continue;
- if (!(bp = rp->off_heap.mso))
- continue;
- printed = 0;
- while (bp) {
- if (printed == 0) {
- erts_printf("Process %T holding binary data \n", rp->id);
- printed = 1;
+ for (hdr = rp->off_heap.first; hdr; hdr = hdr->next) {
+ if (hdr->thing_word == HEADER_PROC_BIN) {
+ ProcBin *bp = (ProcBin*) hdr;
+ if (!printed) {
+ erts_printf("Process %T holding binary data \n", rp->id);
+ printed = 1;
+ }
+ erts_printf("0x%08lx orig_size: %ld, norefs = %ld\n",
+ (unsigned long)bp->val,
+ (long)bp->val->orig_size,
+ erts_smp_atomic_read(&bp->val->refc));
}
- erts_printf("0x%08lx orig_size: %ld, norefs = %ld\n",
- (unsigned long)bp->val,
- (long)bp->val->orig_size,
- erts_smp_atomic_read(&bp->val->refc));
-
- bp = bp->next;
}
- if (printed == 1)
+ if (printed) {
erts_printf("--------------------------------------\n");
+ printed = 0;
+ }
}
/* db_bin_check() has to be rewritten for the AVL trees... */
/*db_bin_check();*/
diff --git a/erts/emulator/beam/copy.c b/erts/emulator/beam/copy.c
index 521a1b1788..57be0169ba 100644
--- a/erts/emulator/beam/copy.c
+++ b/erts/emulator/beam/copy.c
@@ -314,9 +314,9 @@ copy_struct(Eterm obj, Uint sz, Eterm** hpp, ErlOffHeap* off_heap)
*argp = make_binary(hbot);
pb = (ProcBin*) hbot;
erts_refc_inc(&pb->val->refc, 2);
- pb->next = off_heap->mso;
+ pb->next = off_heap->first;
pb->flags = 0;
- off_heap->mso = pb;
+ off_heap->first = (struct erl_off_heap_header*) pb;
off_heap->overhead += pb->size / sizeof(Eterm);
}
break;
@@ -363,9 +363,9 @@ copy_struct(Eterm obj, Uint sz, Eterm** hpp, ErlOffHeap* off_heap)
to->val = from->val;
erts_refc_inc(&to->val->refc, 2);
to->bytes = from->bytes + offset;
- to->next = off_heap->mso;
+ to->next = off_heap->first;
to->flags = 0;
- off_heap->mso = to;
+ off_heap->first = (struct erl_off_heap_header*) to;
off_heap->overhead += to->size / sizeof(Eterm);
}
*argp = make_binary(hbot);
@@ -396,8 +396,8 @@ copy_struct(Eterm obj, Uint sz, Eterm** hpp, ErlOffHeap* off_heap)
}
#ifndef HYBRID /* FIND ME! */
funp = (ErlFunThing *) tp;
- funp->next = off_heap->funs;
- off_heap->funs = funp;
+ funp->next = off_heap->first;
+ off_heap->first = (struct erl_off_heap_header*) funp;
erts_refc_inc(&funp->fe->refc, 2);
#endif
*argp = make_fun(tp);
@@ -416,8 +416,8 @@ copy_struct(Eterm obj, Uint sz, Eterm** hpp, ErlOffHeap* off_heap)
*htop++ = *objp++;
}
- etp->next = off_heap->externals;
- off_heap->externals = etp;
+ etp->next = off_heap->first;
+ off_heap->first = (struct erl_off_heap_header*)etp;
erts_refc_inc(&etp->node->refc, 2);
*argp = make_external(tp);
@@ -650,8 +650,8 @@ Eterm copy_struct_lazy(Process *from, Eterm orig, Uint offs)
*hp++ = *objp++;
}
erts_refc_inc(&pb->val->refc, 2);
- pb->next = erts_global_offheap.mso;
- erts_global_offheap.mso = pb;
+ pb->next = erts_global_offheap.first;
+ erts_global_offheap.first = pb;
erts_global_offheap.overhead += pb->size / sizeof(Eterm);
continue;
}
@@ -672,9 +672,9 @@ Eterm copy_struct_lazy(Process *from, Eterm orig, Uint offs)
while (i--) {
*hp++ = *objp++;
}
-#ifndef HYBRID // FIND ME!
- funp->next = erts_global_offheap.funs;
- erts_global_offheap.funs = funp;
+#ifndef HYBRID /* FIND ME! */
+ funp->next = erts_global_offheap.first;
+ erts_global_offheap.first = funp;
erts_refc_inc(&funp->fe->refc, 2);
#endif
for (i = k; i < j; i++) {
@@ -718,8 +718,8 @@ Eterm copy_struct_lazy(Process *from, Eterm orig, Uint offs)
*hp++ = *objp++;
}
- etp->next = erts_global_offheap.externals;
- erts_global_offheap.externals = etp;
+ etp->next = erts_global_offheap.first;
+ erts_global_offheap.first = etp;
erts_refc_inc(&etp->node->refc, 2);
continue;
}
@@ -775,8 +775,8 @@ Eterm copy_struct_lazy(Process *from, Eterm orig, Uint offs)
to_bin->size = real_size;
to_bin->val = from_bin->val;
to_bin->bytes = from_bin->bytes + sub_offset;
- to_bin->next = erts_global_offheap.mso;
- erts_global_offheap.mso = to_bin;
+ to_bin->next = erts_global_offheap.first;
+ erts_global_offheap.first = to_bin;
erts_global_offheap.overhead += to_bin->size / sizeof(Eterm);
res_binary=make_binary(to_bin);
hp += PROC_BIN_SIZE;
@@ -910,57 +910,43 @@ copy_shallow(Eterm* ptr, Uint sz, Eterm** hpp, ErlOffHeap* off_heap)
break;
case REFC_BINARY_SUBTAG:
{
- ProcBin* pb = (ProcBin *) (hp-1);
- int tari = thing_arityval(val);
-
- sz -= tari;
- while (tari--) {
- *hp++ = *tp++;
- }
+ ProcBin* pb = (ProcBin *) (tp-1);
erts_refc_inc(&pb->val->refc, 2);
- pb->next = off_heap->mso;
- off_heap->mso = pb;
off_heap->overhead += pb->size / sizeof(Eterm);
}
- break;
+ goto off_heap_common;
+
case FUN_SUBTAG:
{
-#ifndef HYBRID /* FIND ME! */
- ErlFunThing* funp = (ErlFunThing *) (hp-1);
-#endif
- int tari = thing_arityval(val);
-
- sz -= tari;
- while (tari--) {
- *hp++ = *tp++;
- }
-#ifndef HYBRID /* FIND ME! */
- funp->next = off_heap->funs;
- off_heap->funs = funp;
+ ErlFunThing* funp = (ErlFunThing *) (tp-1);
erts_refc_inc(&funp->fe->refc, 2);
-#endif
}
- break;
+ goto off_heap_common;
+
case EXTERNAL_PID_SUBTAG:
case EXTERNAL_PORT_SUBTAG:
case EXTERNAL_REF_SUBTAG:
{
- ExternalThing* etp = (ExternalThing *) (hp-1);
+ ExternalThing* etp = (ExternalThing *) (tp-1);
+ erts_refc_inc(&etp->node->refc, 2);
+ }
+ off_heap_common:
+ {
+ struct erl_off_heap_header* ohh = (struct erl_off_heap_header*)(hp-1);
int tari = thing_arityval(val);
-
+
sz -= tari;
while (tari--) {
*hp++ = *tp++;
}
- etp->next = off_heap->externals;
- off_heap->externals = etp;
- erts_refc_inc(&etp->node->refc, 2);
+ ohh->next = off_heap->first;
+ off_heap->first = ohh;
}
break;
default:
{
int tari = header_arity(val);
-
+
sz -= tari;
while (tari--) {
*hp++ = *tp++;
@@ -1029,12 +1015,6 @@ void move_multi_frags(Eterm** hpp, ErlOffHeap* off_heap, ErlHeapFragment* first,
static void
move_one_frag(Eterm** hpp, Eterm* src, Uint src_sz, ErlOffHeap* off_heap)
{
- union {
- Uint *up;
- ProcBin *pbp;
- ErlFunThing *efp;
- ExternalThing *etp;
- } ohe;
Eterm* ptr = src;
Eterm* end = ptr + src_sz;
Eterm dummy_ref;
@@ -1046,23 +1026,17 @@ move_one_frag(Eterm** hpp, Eterm* src, Uint src_sz, ErlOffHeap* off_heap)
val = *ptr;
ASSERT(val != ERTS_HOLE_MARKER);
if (is_header(val)) {
+ struct erl_off_heap_header* hdr = (struct erl_off_heap_header*)hp;
ASSERT(ptr + header_arity(val) < end);
- ohe.up = hp;
MOVE_BOXED(ptr, val, hp, &dummy_ref);
switch (val & _HEADER_SUBTAG_MASK) {
case REFC_BINARY_SUBTAG:
- ohe.pbp->next = off_heap->mso;
- off_heap->mso = ohe.pbp;
- break;
case FUN_SUBTAG:
- ohe.efp->next = off_heap->funs;
- off_heap->funs = ohe.efp;
- break;
case EXTERNAL_PID_SUBTAG:
case EXTERNAL_PORT_SUBTAG:
case EXTERNAL_REF_SUBTAG:
- ohe.etp->next = off_heap->externals;
- off_heap->externals = ohe.etp;
+ hdr->next = off_heap->first;
+ off_heap->first = hdr;
break;
}
}
diff --git a/erts/emulator/beam/decl.h b/erts/emulator/beam/decl.h
deleted file mode 100644
index da1be29d53..0000000000
--- a/erts/emulator/beam/decl.h
+++ /dev/null
@@ -1,55 +0,0 @@
-/*
- * %CopyrightBegin%
- *
- * Copyright Ericsson AB 1996-2009. All Rights Reserved.
- *
- * The contents of this file are subject to the Erlang Public License,
- * Version 1.1, (the "License"); you may not use this file except in
- * compliance with the License. You should have received a copy of the
- * Erlang Public License along with this software. If not, it can be
- * retrieved online at http://www.erlang.org/.
- *
- * Software distributed under the License is distributed on an "AS IS"
- * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
- * the License for the specific language governing rights and limitations
- * under the License.
- *
- * %CopyrightEnd%
- */
-
-#ifndef __DECL_H__
-#define __DECL_H__
-
-#if defined(__STDC__) || defined(_MSC_VER)
-#define EXTERN_FUNCTION(t, f, x) extern t f x
-#define FUNCTION(t, f, x) t f x
-#define _DOTS_ ...
-#define _VOID_ void
-#elif defined(__cplusplus)
-#define EXTERN_FUNCTION(f, x) extern "C" { f x }
-#define FUNCTION(t, f, x) t f x
-#define _DOTS_ ...
-#define _VOID_ void
-#else
-#define EXTERN_FUNCTION(t, f, x) extern t f (/*x*/)
-#define FUNCTION(t, f, x) t f (/*x*/)
-#define _DOTS_
-#define _VOID_
-#endif
-
-/*
-** Example of declarations
-**
-** EXTERN_FUNCTION(void, foo, (int, int, char));
-** FUNCTION(void, bar, (int, char));
-**
-** struct funcs {
-** FUNCTION(int*, (*f1), (int, int));
-** FUNCTION(void, (*f2), (int, char));
-** FUNCTION(void, (*f3), (_VOID_));
-** FUNCTION(int, (*f4), (char*, _DOTS_));
-** };
-**
-*/
-
-#endif
diff --git a/erts/emulator/beam/dist.c b/erts/emulator/beam/dist.c
index d465017949..16b6aeac3f 100644
--- a/erts/emulator/beam/dist.c
+++ b/erts/emulator/beam/dist.c
@@ -922,12 +922,8 @@ int erts_net_message(Port *prt,
UseTmpHeapNoproc(DIST_CTL_DEFAULT_SIZE);
/* Thanks to Luke Gorrie */
- off_heap.mso = NULL;
-#ifndef HYBRID /* FIND ME! */
- off_heap.funs = NULL;
-#endif
+ off_heap.first = NULL;
off_heap.overhead = 0;
- off_heap.externals = NULL;
ERTS_SMP_CHK_NO_PROC_LOCKS;
@@ -1434,16 +1430,8 @@ int erts_net_message(Port *prt,
}
}
- if (off_heap.mso) {
- erts_cleanup_mso(off_heap.mso);
- }
- if (off_heap.externals) {
- erts_cleanup_externals(off_heap.externals);
- }
+ erts_cleanup_offheap(&off_heap);
#ifndef HYBRID /* FIND ME! */
- if (off_heap.funs) {
- erts_cleanup_funs(off_heap.funs);
- }
if (ctl != ctl_default) {
erts_free(ERTS_ALC_T_DCTRL_BUF, (void *) ctl);
}
@@ -1453,16 +1441,8 @@ int erts_net_message(Port *prt,
return 0;
data_error:
- if (off_heap.mso) {
- erts_cleanup_mso(off_heap.mso);
- }
- if (off_heap.externals) {
- erts_cleanup_externals(off_heap.externals);
- }
+ erts_cleanup_offheap(&off_heap);
#ifndef HYBRID /* FIND ME! */
- if (off_heap.funs) {
- erts_cleanup_funs(off_heap.funs);
- }
if (ctl != ctl_default) {
erts_free(ERTS_ALC_T_DCTRL_BUF, (void *) ctl);
}
diff --git a/erts/emulator/beam/elib_malloc.c b/erts/emulator/beam/elib_malloc.c
deleted file mode 100644
index b18c48d8d6..0000000000
--- a/erts/emulator/beam/elib_malloc.c
+++ /dev/null
@@ -1,2334 +0,0 @@
-/*
- * %CopyrightBegin%
- *
- * Copyright Ericsson AB 1997-2009. All Rights Reserved.
- *
- * The contents of this file are subject to the Erlang Public License,
- * Version 1.1, (the "License"); you may not use this file except in
- * compliance with the License. You should have received a copy of the
- * Erlang Public License along with this software. If not, it can be
- * retrieved online at http://www.erlang.org/.
- *
- * Software distributed under the License is distributed on an "AS IS"
- * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
- * the License for the specific language governing rights and limitations
- * under the License.
- *
- * %CopyrightEnd%
- */
-
-/*
-** Description: Faster malloc().
-*/
-#ifdef HAVE_CONFIG_H
-# include "config.h"
-#endif
-
-#include "sys.h"
-
-#ifdef ENABLE_ELIB_MALLOC
-
-#undef THREAD_SAFE_ELIB_MALLOC
-#ifdef USE_THREADS
-#define THREAD_SAFE_ELIB_MALLOC 1
-#else
-#define THREAD_SAFE_ELIB_MALLOC 0
-#endif
-
-#include "erl_driver.h"
-#include "erl_threads.h"
-#include "elib_stat.h"
-#include <stdio.h>
-#include <stdlib.h>
-
-/* To avoid clobbering of names becaure of reclaim on VxWorks,
- we undefine all possible malloc, calloc etc. */
-#undef malloc
-#undef calloc
-#undef free
-#undef realloc
-
-#define ELIB_INLINE /* inline all possible functions */
-
-#ifndef ELIB_ALIGN
-#define ELIB_ALIGN sizeof(double)
-#endif
-
-#ifndef ELIB_HEAP_SIZE
-#define ELIB_HEAP_SIZE (64*1024) /* Default 64K */
-#endif
-
-#ifndef ELIB_HEAP_INCREAMENT
-#define ELIB_HEAP_INCREAMENT (32*1024) /* Default 32K */
-#endif
-
-#ifndef ELIB_FAILURE
-#define ELIB_FAILURE abort()
-#endif
-
-#undef ASSERT
-#ifdef DEBUG
-#define ASSERT(B) \
- ((void) ((B) ? 1 : (fprintf(stderr, "%s:%d: Assertion failed: %s\n", \
- __FILE__, __LINE__, #B), abort(), 0)))
-#else
-#define ASSERT(B) ((void) 1)
-#endif
-
-#ifndef USE_RECURSIVE_MALLOC_MUTEX
-#define USE_RECURSIVE_MALLOC_MUTEX 0
-#endif
-
-#if USE_RECURSIVE_MALLOC_MUTEX
-static erts_mtx_t malloc_mutex = ERTS_REC_MTX_INITER;
-#else /* #if USE_RECURSIVE_MALLOC_MUTEX */
-static erts_mtx_t malloc_mutex = ERTS_MTX_INITER;
-#if THREAD_SAFE_ELIB_MALLOC
-static erts_cnd_t malloc_cond = ERTS_CND_INITER;
-#endif
-#endif /* #if USE_RECURSIVE_MALLOC_MUTEX */
-
-typedef unsigned long EWord; /* Assume 32-bit in this implementation */
-typedef unsigned short EHalfWord; /* Assume 16-bit in this implementation */
-typedef unsigned char EByte; /* Assume 8-bit byte */
-
-
-#define elib_printf fprintf
-#define elib_putc fputc
-
-
-#if defined(__STDC__) || defined(__WIN32__)
-#define CONCAT(x,y) x##y
-#else
-#define CONCAT(x,y) x/**/y
-#endif
-
-
-#ifdef ELIB_DEBUG
-#define ELIB_PREFIX(fun, args) CONCAT(elib__,fun) args
-#else
-#define ELIB_PREFIX(fun, args) CONCAT(elib_,fun) args
-#endif
-
-#if defined(__STDC__)
-void *ELIB_PREFIX(malloc, (size_t));
-void *ELIB_PREFIX(calloc, (size_t, size_t));
-void ELIB_PREFIX(cfree, (EWord *));
-void ELIB_PREFIX(free, (EWord *));
-void *ELIB_PREFIX(realloc, (EWord *, size_t));
-void* ELIB_PREFIX(memresize, (EWord *, int));
-void* ELIB_PREFIX(memalign, (int, int));
-void* ELIB_PREFIX(valloc, (int));
-void* ELIB_PREFIX(pvalloc, (int));
-int ELIB_PREFIX(memsize, (EWord *));
-/* Extern interfaces used by VxWorks */
-size_t elib_sizeof(void *);
-void elib_init(EWord *, EWord);
-void elib_force_init(EWord *, EWord);
-#endif
-
-#if defined(__STDC__)
-/* define prototypes for missing */
-void* memalign(size_t a, size_t s);
-void* pvalloc(size_t nb);
-void* memresize(void *p, int nb);
-int memsize(void *p);
-#endif
-
-/* bytes to pages */
-#define PAGES(x) (((x)+page_size-1) / page_size)
-#define PAGE_ALIGN(p) ((char*)((((EWord)(p))+page_size-1)&~(page_size-1)))
-
-/* bytes to words */
-#define WORDS(x) (((x)+sizeof(EWord)-1) / sizeof(EWord))
-
-/* Align an address */
-#define ALIGN(p) ((EWord*)((((EWord)(p)+ELIB_ALIGN-1)&~(ELIB_ALIGN-1))))
-
-/* Calculate the size needed to keep alignment */
-
-#define ALIGN_BSZ(nb) ((nb+sizeof(EWord)+ELIB_ALIGN-1) & ~(ELIB_ALIGN-1))
-
-#define ALIGN_WSZ(nb) WORDS(ALIGN_BSZ(nb))
-
-#define ALIGN_SIZE(nb) (ALIGN_WSZ(nb) - 1)
-
-
-/* PARAMETERS */
-
-#if defined(ELIB_HEAP_SBRK)
-
-#undef PAGE_SIZE
-
-/* Get the system page size (NEED MORE DEFINES HERE) */
-#ifdef _SC_PAGESIZE
-#define PAGE_SIZE sysconf(_SC_PAGESIZE)
-#elif defined(_MSC_VER)
-# ifdef _M_ALPHA
-# define PAGE_SIZE 0x2000
-# else
-# define PAGE_SIZE 0x1000
-# endif
-#else
-#define PAGE_SIZE getpagesize()
-#endif
-
-#define ELIB_EXPAND(need) expand_sbrk(need)
-static FUNCTION(int, expand_sbrk, (EWord));
-
-#elif defined(ELIB_HEAP_FIXED)
-
-#define PAGE_SIZE 1024
-#define ELIB_EXPAND(need) -1
-static EWord fix_heap[WORDS(ELIB_HEAP_SIZE)];
-
-#elif defined(ELIB_HEAP_USER)
-
-#define PAGE_SIZE 1024
-#define ELIB_EXPAND(need) -1
-
-#else
-
-#error "ELIB HEAP TYPE NOT SET"
-
-#endif
-
-
-#define STAT_ALLOCED_BLOCK(SZ) \
-do { \
- tot_allocated += (SZ); \
- if (max_allocated < tot_allocated) \
- max_allocated = tot_allocated; \
-} while (0)
-
-#define STAT_FREED_BLOCK(SZ) \
-do { \
- tot_allocated -= (SZ); \
-} while (0)
-
-static int max_allocated = 0;
-static int tot_allocated = 0;
-static EWord* eheap; /* Align heap start */
-static EWord* eheap_top; /* Point to end of heap */
-EWord page_size = 0; /* Set by elib_init */
-
-#if defined(ELIB_DEBUG) || defined(DEBUG)
-#define ALIGN_CHECK(a, p) \
- do { \
- if ((EWord)(p) & (a-1)) { \
- elib_printf(stderr, \
- "RUNTIME ERROR: bad alignment (0x%lx:%d:%d)\n", \
- (unsigned long) (p), (int) a, __LINE__); \
- ELIB_FAILURE; \
- } \
- } while(0)
-#define ELIB_ALIGN_CHECK(p) ALIGN_CHECK(ELIB_ALIGN, p)
-#else
-#define ALIGN_CHECK(a, p)
-#define ELIB_ALIGN_CHECK(p)
-#endif
-
-#define DYNAMIC 32
-
-/*
-** Free block layout
-** 1 1 30
-** +--------------------------+
-** |F|P| Size |
-** +--------------------------+
-**
-** Where F is the free bit
-** P is the free above bit
-** Size is messured in words and does not include the hdr word
-**
-** If block is on the free list the size is also stored last in the block.
-**
-*/
-typedef struct _free_block FreeBlock;
-struct _free_block {
- EWord hdr;
- Uint flags;
- FreeBlock* parent;
- FreeBlock* left;
- FreeBlock* right;
- EWord v[1];
-};
-
-typedef struct _allocated_block {
- EWord hdr;
- EWord v[5];
-} AllocatedBlock;
-
-
-/*
- * Interface to tree routines.
- */
-typedef Uint Block_t;
-
-static Block_t* get_free_block(Uint);
-static void link_free_block(Block_t *);
-static void unlink_free_block(Block_t *del);
-
-#define FREE_BIT 0x80000000
-#define FREE_ABOVE_BIT 0x40000000
-#define SIZE_MASK 0x3fffffff /* 2^30 words = 2^32 bytes */
-
-/* Work on both FreeBlock and AllocatedBlock */
-#define SIZEOF(p) ((p)->hdr & SIZE_MASK)
-#define IS_FREE(p) (((p)->hdr & FREE_BIT) != 0)
-#define IS_FREE_ABOVE(p) (((p)->hdr & FREE_ABOVE_BIT) != 0)
-
-/* Given that we have a free block above find its size */
-#define SIZEOF_ABOVE(p) *(((EWord*) (p)) - 1)
-
-#define MIN_BLOCK_SIZE (sizeof(FreeBlock)/sizeof(EWord))
-#define MIN_WORD_SIZE (MIN_BLOCK_SIZE-1)
-#define MIN_BYTE_SIZE (sizeof(FreeBlock)-sizeof(EWord))
-
-#define MIN_ALIGN_SIZE ALIGN_SIZE(MIN_BYTE_SIZE)
-
-
-static AllocatedBlock* heap_head = 0;
-static AllocatedBlock* heap_tail = 0;
-static EWord eheap_size = 0;
-
-static int heap_locked;
-
-static int elib_need_init = 1;
-#if THREAD_SAFE_ELIB_MALLOC
-static int elib_is_initing = 0;
-#endif
-
-typedef FreeBlock RBTree_t;
-
-static RBTree_t* root = NULL;
-
-
-static FUNCTION(void, deallocate, (AllocatedBlock*, int));
-
-/*
- * Unlink a free block
- */
-
-#define mark_allocated(p, szp) do { \
- (p)->hdr = ((p)->hdr & FREE_ABOVE_BIT) | (szp); \
- (p)->v[szp] &= ~FREE_ABOVE_BIT; \
- } while(0)
-
-#define mark_free(p, szp) do { \
- (p)->hdr = FREE_BIT | (szp); \
- ((FreeBlock *)p)->v[szp-sizeof(FreeBlock)/sizeof(EWord)+1] = (szp); \
- } while(0)
-
-#if 0
-/* Help macros to log2 */
-#define LOG_1(x) (((x) > 1) ? 1 : 0)
-#define LOG_2(x) (((x) > 3) ? 2+LOG_1((x) >> 2) : LOG_1(x))
-#define LOG_4(x) (((x) > 15) ? 4+LOG_2((x) >> 4) : LOG_2(x))
-#define LOG_8(x) (((x) > 255) ? 8+LOG_4((x)>>8) : LOG_4(x))
-#define LOG_16(x) (((x) > 65535) ? 16+LOG_8((x)>>16) : LOG_8(x))
-
-#define log2(x) LOG_16(x)
-#endif
-
-/*
- * Split a block to be allocated.
- * Mark block as ALLOCATED and clear
- * FREE_ABOVE_BIT on next block
- *
- * nw is SIZE aligned and szp is SIZE aligned + 1
- */
-static void
-split_block(FreeBlock* p, EWord nw, EWord szp)
-{
- EWord szq;
- FreeBlock* q;
-
- szq = szp - nw;
- /* Preserve FREE_ABOVE bit in p->hdr !!! */
-
- if (szq >= MIN_ALIGN_SIZE+1) {
- szq--;
- p->hdr = (p->hdr & FREE_ABOVE_BIT) | nw;
-
- q = (FreeBlock*) (((EWord*) p) + nw + 1);
- mark_free(q, szq);
- link_free_block((Block_t *) q);
-
- q = (FreeBlock*) (((EWord*) q) + szq + 1);
- q->hdr |= FREE_ABOVE_BIT;
- }
- else {
- mark_allocated((AllocatedBlock*)p, szp);
- }
-}
-
-/*
- * Find a free block
- */
-static FreeBlock*
-alloc_block(EWord nw)
-{
- for (;;) {
- FreeBlock* p = (FreeBlock *) get_free_block(nw);
-
- if (p != NULL) {
- return p;
- } else if (ELIB_EXPAND(nw+MIN_WORD_SIZE)) {
- return 0;
- }
- }
-}
-
-
-size_t elib_sizeof(void *p)
-{
- AllocatedBlock* pp;
-
- if (p != 0) {
- pp = (AllocatedBlock*) (((char *)p)-1);
- return SIZEOF(pp);
- }
- return 0;
-}
-
-static void locked_elib_init(EWord*, EWord);
-static void init_elib_malloc(EWord*, EWord);
-
-/*
-** Initialize the elib
-** The addr and sz is only used when compiled with EXPAND_ADDR
-*/
-/* Not static, this is used by VxWorks */
-void elib_init(EWord* addr, EWord sz)
-{
- if (!elib_need_init)
- return;
- erts_mtx_lock(&malloc_mutex);
- locked_elib_init(addr, sz);
- erts_mtx_unlock(&malloc_mutex);
-}
-
-static void locked_elib_init(EWord* addr, EWord sz)
-{
- if (!elib_need_init)
- return;
-
-#if THREAD_SAFE_ELIB_MALLOC
-
-#if !USE_RECURSIVE_MALLOC_MUTEX
- {
- static erts_tid_t initer_tid;
-
- if(elib_is_initing) {
-
- if(erts_equal_tids(initer_tid, erts_thr_self()))
- return;
-
- /* Wait until initializing thread is done with initialization */
-
- while(elib_need_init)
- erts_cnd_wait(&malloc_cond, &malloc_mutex);
-
- return;
- }
- else {
- initer_tid = erts_thr_self();
- elib_is_initing = 1;
- }
- }
-#else
- if(elib_is_initing)
- return;
- elib_is_initing = 1;
-#endif
-
-#endif /* #if THREAD_SAFE_ELIB_MALLOC */
-
- /* Do the actual initialization of the malloc implementation */
- init_elib_malloc(addr, sz);
-
-#if THREAD_SAFE_ELIB_MALLOC
-
-#if !USE_RECURSIVE_MALLOC_MUTEX
- erts_mtx_unlock(&malloc_mutex);
-#endif
-
- /* Recursive calls to malloc are allowed here... */
- erts_mtx_set_forksafe(&malloc_mutex);
-
-#if !USE_RECURSIVE_MALLOC_MUTEX
- erts_mtx_lock(&malloc_mutex);
- elib_is_initing = 0;
-#endif
-
-#endif /* #if THREAD_SAFE_ELIB_MALLOC */
-
- elib_need_init = 0;
-
-#if THREAD_SAFE_ELIB_MALLOC && !USE_RECURSIVE_MALLOC_MUTEX
- erts_cnd_broadcast(&malloc_cond);
-#endif
-
-}
-
-static void init_elib_malloc(EWord* addr, EWord sz)
-{
- int i;
- FreeBlock* freep;
- EWord tmp_sz;
-#ifdef ELIB_HEAP_SBRK
- char* top;
- EWord n;
-#endif
-
- max_allocated = 0;
- tot_allocated = 0;
- root = NULL;
-
- /* Get the page size (may involve system call!!!) */
- page_size = PAGE_SIZE;
-
-#if defined(ELIB_HEAP_SBRK)
- sz = PAGES(ELIB_HEAP_SIZE)*page_size;
-
- if ((top = (char*) sbrk(0)) == (char*)-1) {
- elib_printf(stderr, "could not initialize elib, sbrk(0)");
- ELIB_FAILURE;
- }
- n = PAGE_ALIGN(top) - top;
- if ((top = (char*) sbrk(n)) == (char*)-1) {
- elib_printf(stderr, "could not initialize elib, sbrk(n)");
- ELIB_FAILURE;
- }
- if ((eheap = (EWord*) sbrk(sz)) == (EWord*)-1) {
- elib_printf(stderr, "could not initialize elib, sbrk(SIZE)");
- ELIB_FAILURE;
- }
- sz = WORDS(ELIB_HEAP_SIZE);
-#elif defined(ELIB_HEAP_FIXED)
- eheap = fix_heap;
- sz = WORDS(ELIB_HEAP_SIZE);
-#elif defined(ELIB_HEAP_USER)
- eheap = addr;
- sz = WORDS(sz);
-#else
- return -1;
-#endif
- eheap_size = 0;
-
- /* Make sure that the first word of the heap_head is aligned */
- addr = ALIGN(eheap+1);
- sz -= ((addr - 1) - eheap); /* Subtract unusable size */
- eheap_top = eheap = addr - 1; /* Set new aligned heap start */
-
- eheap_top[sz-1] = 0; /* Heap stop mark */
-
- addr = eheap;
- heap_head = (AllocatedBlock*) addr;
- heap_head->hdr = MIN_ALIGN_SIZE;
- for (i = 0; i < MIN_ALIGN_SIZE; i++)
- heap_head->v[i] = 0;
-
- addr += (MIN_ALIGN_SIZE+1);
- freep = (FreeBlock*) addr;
- tmp_sz = sz - (((MIN_ALIGN_SIZE+1) + MIN_BLOCK_SIZE) + 1 + 1);
- mark_free(freep, tmp_sz);
- link_free_block((Block_t *) freep);
-
- /* No need to align heap tail */
- heap_tail = (AllocatedBlock*) &eheap_top[sz-MIN_BLOCK_SIZE-1];
- heap_tail->hdr = FREE_ABOVE_BIT | MIN_WORD_SIZE;
- heap_tail->v[0] = 0;
- heap_tail->v[1] = 0;
- heap_tail->v[2] = 0;
-
- eheap_top += sz;
- eheap_size += sz;
-
- heap_locked = 0;
-}
-
-#ifdef ELIB_HEAP_USER
-void elib_force_init(EWord* addr, EWord sz)
-{
- elib_need_init = 1;
- elib_init(addr,sz);
-}
-#endif
-
-#ifdef ELIB_HEAP_SBRK
-
-/*
-** need in number of words (should include head and tail words)
-*/
-static int expand_sbrk(EWord sz)
-{
- EWord* p;
- EWord bytes = sz * sizeof(EWord);
- EWord size;
- AllocatedBlock* tail;
-
- if (bytes < ELIB_HEAP_SIZE)
- size = PAGES(ELIB_HEAP_INCREAMENT)*page_size;
- else
- size = PAGES(bytes)*page_size;
-
- if ((p = (EWord*) sbrk(size)) == ((EWord*) -1))
- return -1;
-
- if (p != eheap_top) {
- elib_printf(stderr, "panic: sbrk moved\n");
- ELIB_FAILURE;
- }
-
- sz = WORDS(size);
-
- /* Set new endof heap marker and a new heap tail */
- eheap_top[sz-1] = 0;
-
- tail = (AllocatedBlock*) &eheap_top[sz-MIN_BLOCK_SIZE-1];
- tail->hdr = FREE_ABOVE_BIT | MIN_WORD_SIZE;
- tail->v[0] = 0;
- tail->v[1] = 0;
- tail->v[2] = 0;
-
- /* Patch old tail with new appended size */
- heap_tail->hdr = (heap_tail->hdr & FREE_ABOVE_BIT) |
- (MIN_WORD_SIZE+1+(sz-MIN_BLOCK_SIZE-1));
- deallocate(heap_tail, 0);
-
- heap_tail = tail;
-
- eheap_size += sz;
- eheap_top += sz;
-
- return 0;
-}
-
-#endif /* ELIB_HEAP_SBRK */
-
-
-/*
-** Scan heap and check for corrupted heap
-*/
-int elib_check_heap(void)
-{
- AllocatedBlock* p = heap_head;
- EWord sz;
-
- if (heap_locked) {
- elib_printf(stderr, "heap is locked no info avaiable\n");
- return 0;
- }
-
- while((sz = SIZEOF(p)) != 0) {
- if (IS_FREE(p)) {
- if (p->v[sz-1] != sz) {
- elib_printf(stderr, "panic: heap corrupted\r\n");
- ELIB_FAILURE;
- }
- p = (AllocatedBlock*) (p->v + sz);
- if (!IS_FREE_ABOVE(p)) {
- elib_printf(stderr, "panic: heap corrupted\r\n");
- ELIB_FAILURE;
- }
- }
- else
- p = (AllocatedBlock*) (p->v + sz);
- }
- return 1;
-}
-
-/*
-** Load the byte vector pointed to by v of length vsz
-** with a heap image
-** The scale is defined by vsz and the current heap size
-** free = 0, full = 255
-**
-**
-*/
-int elib_heap_map(EByte* v, int vsz)
-{
- AllocatedBlock* p = heap_head;
- EWord sz;
- int gsz = eheap_size / vsz; /* The granuality used */
- int fsz = 0;
- int usz = 0;
-
- if (gsz == 0)
- return -1; /* too good reolution */
-
- while((sz = SIZEOF(p)) != 0) {
- if (IS_FREE(p)) {
- fsz += sz;
- if ((fsz + usz) > gsz) {
- *v++ = (255*usz)/gsz;
- fsz -= (gsz - usz);
- usz = 0;
- while(fsz >= gsz) {
- *v++ = 0;
- fsz -= gsz;
- }
- }
- }
- else {
- usz += sz;
- if ((fsz + usz) > gsz) {
- *v++ = 255 - (255*fsz)/gsz;
- usz -= (gsz - fsz);
- fsz = 0;
- while(usz >= gsz) {
- *v++ = 255;
- usz -= gsz;
- }
- }
- }
- p = (AllocatedBlock*) (p->v + sz);
- }
- return 0;
-}
-
-/*
-** Generate a histogram of free/allocated blocks
-** Count granuality of 10 gives
-** (0-10],(10-100],(100-1000],(1000-10000] ...
-** (0-2], (2-4], (4-8], (8-16], ....
-*/
-static int i_logb(EWord size, int base)
-{
- int lg = 0;
- while(size >= base) {
- size /= base;
- lg++;
- }
- return lg;
-}
-
-int elib_histo(EWord* vf, EWord* va, int vsz, int base)
-{
- AllocatedBlock* p = heap_head;
- EWord sz;
- int i;
- int linear;
-
- if ((vsz <= 1) || (vf == 0 && va == 0))
- return -1;
-
- if (base < 0) {
- linear = 1;
- base = -base;
- }
- else
- linear = 0;
-
- if (base <= 1)
- return -1;
-
- if (vf != 0) {
- for (i = 0; i < vsz; i++)
- vf[i] = 0;
- }
- if (va != 0) {
- for (i = 0; i < vsz; i++)
- va[i] = 0;
- }
-
- while((sz = SIZEOF(p)) != 0) {
- if (IS_FREE(p)) {
- if (vf != 0) {
- int val;
- if (linear)
- val = sz / base;
- else
- val = i_logb(sz, base);
- if (val >= vsz)
- vf[vsz-1]++;
- else
- vf[val]++;
- }
- }
- else {
- if (va != 0) {
- int val;
- if (linear)
- val = sz / base;
- else
- val = i_logb(sz, base);
- if (val >= vsz)
- va[vsz-1]++;
- else
- va[val]++;
- }
- }
- p = (AllocatedBlock*) (p->v + sz);
- }
- return 0;
-}
-
-/*
-** Fill the info structure with actual values
-** Total
-** Allocated
-** Free
-** maxMaxFree
-*/
-void elib_stat(struct elib_stat* info)
-{
- EWord blks = 0;
- EWord sz_free = 0;
- EWord sz_alloc = 0;
- EWord sz_max_free = 0;
- EWord sz_min_used = 0x7fffffff;
- EWord sz;
- EWord num_free = 0;
- AllocatedBlock* p = heap_head;
-
- info->mem_total = eheap_size;
-
- p = (AllocatedBlock*) (p->v + SIZEOF(p));
-
- while((sz = SIZEOF(p)) != 0) {
- blks++;
- if (IS_FREE(p)) {
- if (sz > sz_max_free)
- sz_max_free = sz;
- sz_free += sz;
- ++num_free;
- }
- else {
- if (sz < sz_min_used)
- sz_min_used = sz;
- sz_alloc += sz;
- }
- p = (AllocatedBlock*) (p->v + sz);
- }
- info->mem_blocks = blks;
- info->free_blocks = num_free;
- info->mem_alloc = sz_alloc;
- info->mem_free = sz_free;
- info->min_used = sz_min_used;
- info->max_free = sz_max_free;
- info->mem_max_alloc = max_allocated;
- ASSERT(sz_alloc == tot_allocated);
-}
-
-/*
-** Dump the heap
-*/
-void elib_heap_dump(char* label)
-{
- AllocatedBlock* p = heap_head;
- EWord sz;
-
- elib_printf(stderr, "HEAP DUMP (%s)\n", label);
- if (!elib_check_heap())
- return;
-
- while((sz = SIZEOF(p)) != 0) {
- if (IS_FREE(p)) {
- elib_printf(stderr, "%p: FREE, size = %d\n", p, (int) sz);
- }
- else {
- elib_printf(stderr, "%p: USED, size = %d %s\n", p, (int) sz,
- IS_FREE_ABOVE(p)?"(FREE ABOVE)":"");
- }
- p = (AllocatedBlock*) (p->v + sz);
- }
-}
-
-/*
-** Scan heaps and count:
-** free_size, allocated_size, max_free_block
-*/
-void elib_statistics(void* to)
-{
- struct elib_stat info;
- EWord frag;
-
- if (!elib_check_heap())
- return;
-
- elib_stat(&info);
-
- frag = 1000 - ((1000 * info.max_free) / info.mem_free);
-
- elib_printf(to, "Heap Statistics: total(%d), blocks(%d), frag(%d.%d%%)\n",
- info.mem_total, info.mem_blocks,
- (int) frag/10, (int) frag % 10);
-
- elib_printf(to, " allocated(%d), free(%d), "
- "free_blocks(%d)\n",
- info.mem_alloc, info.mem_free,info.free_blocks);
- elib_printf(to, " max_free(%d), min_used(%d)\n",
- info.max_free, info.min_used);
-}
-
-/*
-** Allocate a least nb bytes with alignment a
-** Algorithm:
-** 1) Try locate a block which match exacly among the by direct index.
-** 2) Try using a fix block of greater size
-** 3) Try locate a block by searching in lists where block sizes
-** X may vary between 2^i < X <= 2^(i+1)
-**
-** Reset memory to zero if clear is true
-*/
-static AllocatedBlock* allocate(EWord nb, EWord a, int clear)
-{
- FreeBlock* p;
- EWord nw;
-
- if (a == ELIB_ALIGN) {
- /*
- * Common case: Called by malloc(), realloc(), calloc().
- */
- nw = nb < MIN_BYTE_SIZE ? MIN_ALIGN_SIZE : ALIGN_SIZE(nb);
-
- if ((p = alloc_block(nw)) == 0)
- return NULL;
- } else {
- /*
- * Special case: Called by memalign().
- */
- EWord asz, szp, szq, tmpsz;
- FreeBlock *q;
-
- if ((p = alloc_block((1+MIN_ALIGN_SIZE)*sizeof(EWord)+a-1+nb)) == 0)
- return NULL;
-
- asz = a - ((EWord) ((AllocatedBlock *)p)->v) % a;
-
- if (asz != a) {
- /* Enforce the alignment requirement by cutting of a free
- block at the beginning of the block. */
-
- if (asz < (1+MIN_ALIGN_SIZE)*sizeof(EWord) && !IS_FREE_ABOVE(p)) {
- /* Not enough room to cut of a free block;
- increase align size */
- asz += (((1+MIN_ALIGN_SIZE)*sizeof(EWord) + a - 1)/a)*a;
- }
-
- szq = ALIGN_SIZE(asz - sizeof(EWord));
- szp = SIZEOF(p) - szq - 1;
-
- q = p;
- p = (FreeBlock*) (((EWord*) q) + szq + 1);
- p->hdr = FREE_ABOVE_BIT | FREE_BIT | szp;
-
- if (IS_FREE_ABOVE(q)) { /* This should not be possible I think,
- but just in case... */
- tmpsz = SIZEOF_ABOVE(q) + 1;
- szq += tmpsz;
- q = (FreeBlock*) (((EWord*) q) - tmpsz);
- unlink_free_block((Block_t *) q);
- q->hdr = (q->hdr & FREE_ABOVE_BIT) | FREE_BIT | szq;
- }
- mark_free(q, szq);
- link_free_block((Block_t *) q);
-
- } /* else already had the correct alignment */
-
- nw = nb < MIN_BYTE_SIZE ? MIN_ALIGN_SIZE : ALIGN_SIZE(nb);
- }
-
- split_block(p, nw, SIZEOF(p));
-
- STAT_ALLOCED_BLOCK(SIZEOF(p));
-
- if (clear) {
- EWord* pp = ((AllocatedBlock*)p)->v;
-
- while(nw--)
- *pp++ = 0;
- }
-
- return (AllocatedBlock*) p;
-}
-
-
-/*
-** Deallocate memory pointed to by p
-** 1. Merge with block above if this block is free
-** 2. Merge with block below if this block is free
-** Link the block to the correct free list
-**
-** p points to the block header!
-**
-*/
-static void deallocate(AllocatedBlock* p, int stat_count)
-{
- FreeBlock* q;
- EWord szq;
- EWord szp;
-
- szp = SIZEOF(p);
-
- if (stat_count)
- STAT_FREED_BLOCK(SIZEOF(p));
-
- if (IS_FREE_ABOVE(p)) {
- szq = SIZEOF_ABOVE(p);
- q = (FreeBlock*) ( ((EWord*) p) - szq - 1);
- unlink_free_block((Block_t *) q);
-
- p = (AllocatedBlock*) q;
- szp += (szq + 1);
- }
- q = (FreeBlock*) (p->v + szp);
- if (IS_FREE(q)) {
- szq = SIZEOF(q);
- unlink_free_block((Block_t *) q);
- szp += (szq + 1);
- }
- else
- q->hdr |= FREE_ABOVE_BIT;
-
- /* The block above p can NEVER be free !!! */
- p->hdr = FREE_BIT | szp;
- p->v[szp-1] = szp;
-
- link_free_block((Block_t *) p);
-}
-
-/*
-** Reallocate memory
-** If preserve is true then data is moved if neccesary
-*/
-static AllocatedBlock* reallocate(AllocatedBlock* p, EWord nb, int preserve)
-{
- EWord szp;
- EWord szq;
- EWord sz;
- EWord nw;
- FreeBlock* q;
-
- if (nb < MIN_BYTE_SIZE)
- nw = MIN_ALIGN_SIZE;
- else
- nw = ALIGN_SIZE(nb);
-
- sz = szp = SIZEOF(p);
-
- STAT_FREED_BLOCK(szp);
-
- /* Merge with block below */
- q = (FreeBlock*) (p->v + szp);
- if (IS_FREE(q)) {
- szq = SIZEOF(q);
- unlink_free_block((Block_t *) q);
- szp += (szq + 1);
- }
-
- if (nw <= szp) {
- split_block((FreeBlock *) p, nw, szp);
- STAT_ALLOCED_BLOCK(SIZEOF(p));
- return p;
- }
- else {
- EWord* dp = p->v;
- AllocatedBlock* npp;
-
- if (IS_FREE_ABOVE(p)) {
- szq = SIZEOF_ABOVE(p);
- if (szq + szp + 1 >= nw) {
- q = (FreeBlock*) (((EWord*) p) - szq - 1);
- unlink_free_block((Block_t * )q);
- szp += (szq + 1);
- p = (AllocatedBlock*) q;
-
- if (preserve) {
- EWord* pp = p->v;
- while(sz--)
- *pp++ = *dp++;
- }
- split_block((FreeBlock *) p, nw, szp);
- STAT_ALLOCED_BLOCK(SIZEOF(p));
- return p;
- }
- }
-
- /*
- * Update p so that allocate() and deallocate() works.
- * (Note that allocate() may call expand_sbrk(), which in
- * in turn calls deallocate().)
- */
-
- p->hdr = (p->hdr & FREE_ABOVE_BIT) | szp;
- p->v[szp] &= ~FREE_ABOVE_BIT;
-
- npp = allocate(nb, ELIB_ALIGN, 0);
- if(npp == NULL)
- return NULL;
- if (preserve) {
- EWord* pp = npp->v;
- while(sz--)
- *pp++ = *dp++;
- }
- deallocate(p, 0);
- return npp;
- }
-}
-
-/*
-** What malloc() and friends should do (and return) when the heap is
-** exhausted. [sverkerw]
-*/
-static void* heap_exhausted(void)
-{
- /* Choose behaviour */
-#if 0
- /* Crash-and-burn --- leave a usable corpse (hopefully) */
- abort();
-#endif
- /* The usual ANSI-compliant behaviour */
- return NULL;
-}
-
-/*
-** Allocate size bytes of memory
-*/
-void* ELIB_PREFIX(malloc, (size_t nb))
-{
- void *res;
- AllocatedBlock* p;
-
- erts_mtx_lock(&malloc_mutex);
- if (elib_need_init)
- locked_elib_init(NULL,(EWord)0);
-
- if (nb == 0)
- res = NULL;
- else if ((p = allocate(nb, ELIB_ALIGN, 0)) != 0) {
- ELIB_ALIGN_CHECK(p->v);
- res = p->v;
- }
- else
- res = heap_exhausted();
-
- erts_mtx_unlock(&malloc_mutex);
-
- return res;
-}
-
-
-void* ELIB_PREFIX(calloc, (size_t nelem, size_t size))
-{
- void *res;
- int nb;
- AllocatedBlock* p;
-
- erts_mtx_lock(&malloc_mutex);
- if (elib_need_init)
- locked_elib_init(NULL,(EWord)0);
-
- if ((nb = nelem * size) == 0)
- res = NULL;
- else if ((p = allocate(nb, ELIB_ALIGN, 1)) != 0) {
- ELIB_ALIGN_CHECK(p->v);
- res = p->v;
- }
- else
- res = heap_exhausted();
-
- erts_mtx_unlock(&malloc_mutex);
-
- return res;
-}
-
-/*
-** Free memory allocated by malloc
-*/
-
-void ELIB_PREFIX(free, (EWord* p))
-{
- erts_mtx_lock(&malloc_mutex);
- if (elib_need_init)
- locked_elib_init(NULL,(EWord)0);
-
- if (p != 0)
- deallocate((AllocatedBlock*)(p-1), 1);
-
- erts_mtx_unlock(&malloc_mutex);
-}
-
-void ELIB_PREFIX(cfree, (EWord* p))
-{
- ELIB_PREFIX(free, (p));
-}
-
-
-/*
-** Realloc the memory allocated in p to nb number of bytes
-**
-*/
-
-void* ELIB_PREFIX(realloc, (EWord* p, size_t nb))
-{
- void *res = NULL;
- AllocatedBlock* pp;
-
- erts_mtx_lock(&malloc_mutex);
- if (elib_need_init)
- locked_elib_init(NULL,(EWord)0);
-
- if (p != 0) {
- pp = (AllocatedBlock*) (p-1);
- if (nb > 0) {
- if ((pp = reallocate(pp, nb, 1)) != 0) {
- ELIB_ALIGN_CHECK(pp->v);
- res = pp->v;
- }
- }
- else
- deallocate(pp, 1);
- }
- else if (nb > 0) {
- if ((pp = allocate(nb, ELIB_ALIGN, 0)) != 0) {
- ELIB_ALIGN_CHECK(pp->v);
- res = pp->v;
- }
- else
- res = heap_exhausted();
- }
-
- erts_mtx_unlock(&malloc_mutex);
-
- return res;
-}
-
-/*
-** Resize the memory area pointed to by p with nb number of bytes
-*/
-void* ELIB_PREFIX(memresize, (EWord* p, int nb))
-{
- void *res = NULL;
- AllocatedBlock* pp;
-
- erts_mtx_lock(&malloc_mutex);
- if (elib_need_init)
- locked_elib_init(NULL,(EWord)0);
-
- if (p != 0) {
- pp = (AllocatedBlock*) (p-1);
- if (nb > 0) {
- if ((pp = reallocate(pp, nb, 0)) != 0) {
- ELIB_ALIGN_CHECK(pp->v);
- res = pp->v;
- }
- }
- else
- deallocate(pp, 1);
- }
- else if (nb > 0) {
- if ((pp = allocate(nb, ELIB_ALIGN, 0)) != 0) {
- ELIB_ALIGN_CHECK(pp->v);
- res = pp->v;
- }
- else
- res = heap_exhausted();
- }
-
- erts_mtx_unlock(&malloc_mutex);
-
- return res;
-}
-
-
-/* Create aligned memory a must be a power of 2 !!! */
-
-void* ELIB_PREFIX(memalign, (int a, int nb))
-{
- void *res;
- AllocatedBlock* p;
-
- erts_mtx_lock(&malloc_mutex);
- if (elib_need_init)
- locked_elib_init(NULL,(EWord)0);
-
- if (nb == 0 || a <= 0)
- res = NULL;
- else if ((p = allocate(nb, a, 0)) != 0) {
- ALIGN_CHECK(a, p->v);
- res = p->v;
- }
- else
- res = heap_exhausted();
-
- erts_mtx_unlock(&malloc_mutex);
-
- return res;
-}
-
-void* ELIB_PREFIX(valloc, (int nb))
-{
- return ELIB_PREFIX(memalign, (page_size, nb));
-}
-
-
-void* ELIB_PREFIX(pvalloc, (int nb))
-{
- return ELIB_PREFIX(memalign, (page_size, PAGES(nb)*page_size));
-}
-/* Return memory size for pointer p in bytes */
-
-int ELIB_PREFIX(memsize, (p))
-EWord* p;
-{
- return SIZEOF((AllocatedBlock*)(p-1))*4;
-}
-
-
-/*
-** --------------------------------------------------------------------------
-** DEBUG LIBRARY
-** --------------------------------------------------------------------------
-*/
-
-#ifdef ELIB_DEBUG
-
-#define IN_HEAP(p) (((p) >= (char*) eheap) && (p) < (char*) eheap_top)
-/*
-** ptr_to_block: return the pointer to heap block pointed into by ptr
-** Returns 0 if not pointing into a block
-*/
-
-static EWord* ptr_to_block(char* ptr)
-{
- AllocatedBlock* p = heap_head;
- EWord sz;
-
- while((sz = SIZEOF(p)) != 0) {
- if ((ptr >= (char*) p->v) && (ptr < (char*)(p->v+sz)))
- return p->v;
- p = (AllocatedBlock*) (p->v + sz);
- }
- return 0;
-}
-
-/*
-** Validate a pointer
-** returns:
-** 0 - if points to start of a block
-** 1 - if points outsize heap
-** -1 - if points inside block
-**
-*/
-static int check_pointer(char* ptr)
-{
- if (IN_HEAP(ptr)) {
- if (ptr_to_block(ptr) == 0)
- return 1;
- return 0;
- }
- return -1;
-}
-
-/*
-** Validate a memory area
-** returns:
-** 0 - if area is included in a block
-** -1 - if area overlap a heap block
-** 1 - if area is outside heap
-*/
-static int check_area(char* ptr, int n)
-{
- if (IN_HEAP(ptr)) {
- if (IN_HEAP(ptr+n-1)) {
- EWord* p1 = ptr_to_block(ptr);
- EWord* p2 = ptr_to_block(ptr+n-1);
-
- if (p1 == p2)
- return (p1 == 0) ? -1 : 0;
- return -1;
- }
- }
- else if (IN_HEAP(ptr+n-1))
- return -1;
- return 1;
-}
-
-/*
-** Check if a block write will overwrite heap block
-*/
-static void check_write(char* ptr, int n, char* file, int line, char* fun)
-{
- if (check_area(ptr, n) == -1) {
- elib_printf(stderr, "RUNTIME ERROR: %s heap overwrite\n", fun);
- elib_printf(stderr, "File: %s Line: %d\n", file, line);
- ELIB_FAILURE;
- }
-}
-
-/*
-** Check if a pointer is an allocated object
-*/
-static void check_allocated_block(char* ptr, char* file, int line, char* fun)
-{
- EWord* q;
-
- if (!IN_HEAP(ptr) || ((q=ptr_to_block(ptr)) == 0) || (ptr != (char*) q)) {
- elib_printf(stderr, "RUNTIME ERROR: %s non heap pointer\n", fun);
- elib_printf(stderr, "File: %s Line: %d\n", file, line);
- ELIB_FAILURE;
- }
-
- if (IS_FREE((AllocatedBlock*)(q-1))) {
- elib_printf(stderr, "RUNTIME ERROR: %s free pointer\n", fun);
- elib_printf(stderr, "File: %s Line: %d\n", file, line);
- ELIB_FAILURE;
- }
-
-}
-
-/*
-** --------------------------------------------------------------------------
-** DEBUG VERSIONS (COMPILED WITH THE ELIB.H)
-** --------------------------------------------------------------------------
-*/
-
-void* elib_dbg_malloc(int n, char* file, int line)
-{
- return elib__malloc(n);
-}
-
-void* elib_dbg_calloc(int n, int s, char* file, int line)
-{
- return elib__calloc(n, s);
-}
-
-void* elib_dbg_realloc(EWord* p, int n, char* file, int line)
-{
- if (p == 0)
- return elib__malloc(n);
- check_allocated_block(p, file, line, "elib_realloc");
- return elib__realloc(p, n);
-}
-
-void elib_dbg_free(EWord* p, char* file, int line)
-{
- if (p == 0)
- return;
- check_allocated_block(p, file, line, "elib_free");
- elib__free(p);
-}
-
-void elib_dbg_cfree(EWord* p, char* file, int line)
-{
- if (p == 0)
- return;
- check_allocated_block(p, file, line, "elib_free");
- elib__cfree(p);
-}
-
-void* elib_dbg_memalign(int a, int n, char* file, int line)
-{
- return elib__memalign(a, n);
-}
-
-void* elib_dbg_valloc(int n, char* file, int line)
-{
- return elib__valloc(n);
-}
-
-void* elib_dbg_pvalloc(int n, char* file, int line)
-{
- return elib__pvalloc(n);
-}
-
-void* elib_dbg_memresize(EWord* p, int n, char* file, int line)
-{
- if (p == 0)
- return elib__malloc(n);
- check_allocated_block(p, file, line, "elib_memresize");
- return elib__memresize(p, n);
-}
-
-int elib_dbg_memsize(void* p, char* file, int line)
-{
- check_allocated_block(p, file, line, "elib_memsize");
- return elib__memsize(p);
-}
-
-/*
-** --------------------------------------------------------------------------
-** LINK TIME FUNCTIONS (NOT COMPILED CALLS)
-** --------------------------------------------------------------------------
-*/
-
-void* elib_malloc(int n)
-{
- return elib_dbg_malloc(n, "", -1);
-}
-
-void* elib_calloc(int n, int s)
-{
- return elib_dbg_calloc(n, s, "", -1);
-}
-
-void* elib_realloc(EWord* p, int n)
-{
- return elib_dbg_realloc(p, n, "", -1);
-}
-
-void elib_free(EWord* p)
-{
- elib_dbg_free(p, "", -1);
-}
-
-void elib_cfree(EWord* p)
-{
- elib_dbg_cfree(p, "", -1);
-}
-
-void* elib_memalign(int a, int n)
-{
- return elib_dbg_memalign(a, n, "", -1);
-}
-
-void* elib_valloc(int n)
-{
- return elib_dbg_valloc(n, "", -1);
-}
-
-void* elib_pvalloc(int n)
-{
- return elib_dbg_pvalloc(n, "", -1);
-}
-
-void* elib_memresize(EWord* p, int n)
-{
- return elib_dbg_memresize(p, n, "", -1);
-}
-
-
-int elib_memsize(EWord* p)
-{
- return elib_dbg_memsize(p, "", -1);
-}
-
-#endif /* ELIB_DEBUG */
-
-/*
-** --------------------------------------------------------------------------
-** Map c library functions to elib
-** --------------------------------------------------------------------------
-*/
-
-#if defined(ELIB_ALLOC_IS_CLIB)
-void* malloc(size_t nb)
-{
- return elib_malloc(nb);
-}
-
-void* calloc(size_t nelem, size_t size)
-{
- return elib_calloc(nelem, size);
-}
-
-
-void free(void *p)
-{
- elib_free(p);
-}
-
-void cfree(void *p)
-{
- elib_cfree(p);
-}
-
-void* realloc(void* p, size_t nb)
-{
- return elib_realloc(p, nb);
-}
-
-
-void* memalign(size_t a, size_t s)
-{
- return elib_memalign(a, s);
-}
-
-void* valloc(size_t nb)
-{
- return elib_valloc(nb);
-}
-
-void* pvalloc(size_t nb)
-{
- return elib_pvalloc(nb);
-}
-
-#if 0
-void* memresize(void* p, int nb)
-{
- return elib_memresize(p, nb);
-}
-
-int memsize(void* p)
-{
- return elib_memsize(p);
-}
-#endif
-#endif /* ELIB_ALLOC_IS_CLIB */
-
-#endif /* ENABLE_ELIB_MALLOC */
-
-void elib_ensure_initialized(void)
-{
-#ifdef ENABLE_ELIB_MALLOC
-#ifndef ELIB_DONT_INITIALIZE
- elib_init(NULL, 0);
-#endif
-#endif
-}
-
-#ifdef ENABLE_ELIB_MALLOC
-/**
- ** A Slightly modified version of the "address order best fit" algorithm
- ** used in erl_bestfit_alloc.c. Comments refer to that implementation.
- **/
-
-/*
- * Description: A combined "address order best fit"/"best fit" allocator
- * based on a Red-Black (binary search) Tree. The search,
- * insert, and delete operations are all O(log n) operations
- * on a Red-Black Tree. In the "address order best fit" case
- * n equals number of free blocks, and in the "best fit" case
- * n equals number of distinct sizes of free blocks. Red-Black
- * Trees are described in "Introduction to Algorithms", by
- * Thomas H. Cormen, Charles E. Leiserson, and
- * Ronald L. Riverest.
- *
- * This module is a callback-module for erl_alloc_util.c
- *
- * Author: Rickard Green
- */
-
-#ifdef DEBUG
-#if 0
-#define HARD_DEBUG
-#endif
-#else
-#undef HARD_DEBUG
-#endif
-
-#define SZ_MASK SIZE_MASK
-#define FLG_MASK (~(SZ_MASK))
-
-#define BLK_SZ(B) (*((Block_t *) (B)) & SZ_MASK)
-
-#define TREE_NODE_FLG (((Uint) 1) << 0)
-#define RED_FLG (((Uint) 1) << 1)
-#ifdef HARD_DEBUG
-# define LEFT_VISITED_FLG (((Uint) 1) << 2)
-# define RIGHT_VISITED_FLG (((Uint) 1) << 3)
-#endif
-
-#define IS_TREE_NODE(N) (((RBTree_t *) (N))->flags & TREE_NODE_FLG)
-#define IS_LIST_ELEM(N) (!IS_TREE_NODE(((RBTree_t *) (N))))
-
-#define SET_TREE_NODE(N) (((RBTree_t *) (N))->flags |= TREE_NODE_FLG)
-#define SET_LIST_ELEM(N) (((RBTree_t *) (N))->flags &= ~TREE_NODE_FLG)
-
-#define IS_RED(N) (((RBTree_t *) (N)) \
- && ((RBTree_t *) (N))->flags & RED_FLG)
-#define IS_BLACK(N) (!IS_RED(((RBTree_t *) (N))))
-
-#define SET_RED(N) (((RBTree_t *) (N))->flags |= RED_FLG)
-#define SET_BLACK(N) (((RBTree_t *) (N))->flags &= ~RED_FLG)
-
-#undef ASSERT
-#define ASSERT ASSERT_EXPR
-
-#if 1
-#define RBT_ASSERT ASSERT
-#else
-#define RBT_ASSERT(x)
-#endif
-
-
-#ifdef HARD_DEBUG
-static RBTree_t * check_tree(Uint);
-#endif
-
-#ifdef ERTS_INLINE
-# ifndef ERTS_CAN_INLINE
-# define ERTS_CAN_INLINE 1
-# endif
-#else
-# if defined(__GNUC__)
-# define ERTS_CAN_INLINE 1
-# define ERTS_INLINE __inline__
-# elif defined(__WIN32__)
-# define ERTS_CAN_INLINE 1
-# define ERTS_INLINE __inline
-# else
-# define ERTS_CAN_INLINE 0
-# define ERTS_INLINE
-# endif
-#endif
-
-/* Types... */
-#if 0
-typedef struct RBTree_t_ RBTree_t;
-
-struct RBTree_t_ {
- Block_t hdr;
- Uint flags;
- RBTree_t *parent;
- RBTree_t *left;
- RBTree_t *right;
-};
-#endif
-
-#if 0
-typedef struct {
- RBTree_t t;
- RBTree_t *next;
-} RBTreeList_t;
-
-#define LIST_NEXT(N) (((RBTreeList_t *) (N))->next)
-#define LIST_PREV(N) (((RBTreeList_t *) (N))->t.parent)
-#endif
-
-#ifdef DEBUG
-
-/* Destroy all tree fields */
-#define DESTROY_TREE_NODE(N) \
- sys_memset((void *) (((Block_t *) (N)) + 1), \
- 0xff, \
- (sizeof(RBTree_t) - sizeof(Block_t)))
-
-/* Destroy all tree and list fields */
-#define DESTROY_LIST_ELEM(N) \
- sys_memset((void *) (((Block_t *) (N)) + 1), \
- 0xff, \
- (sizeof(RBTreeList_t) - sizeof(Block_t)))
-
-#else
-
-#define DESTROY_TREE_NODE(N)
-#define DESTROY_LIST_ELEM(N)
-
-#endif
-
-
-/*
- * Red-Black Tree operations needed
- */
-
-static ERTS_INLINE void
-left_rotate(RBTree_t **root, RBTree_t *x)
-{
- RBTree_t *y = x->right;
- x->right = y->left;
- if (y->left)
- y->left->parent = x;
- y->parent = x->parent;
- if (!y->parent) {
- RBT_ASSERT(*root == x);
- *root = y;
- }
- else if (x == x->parent->left)
- x->parent->left = y;
- else {
- RBT_ASSERT(x == x->parent->right);
- x->parent->right = y;
- }
- y->left = x;
- x->parent = y;
-}
-
-static ERTS_INLINE void
-right_rotate(RBTree_t **root, RBTree_t *x)
-{
- RBTree_t *y = x->left;
- x->left = y->right;
- if (y->right)
- y->right->parent = x;
- y->parent = x->parent;
- if (!y->parent) {
- RBT_ASSERT(*root == x);
- *root = y;
- }
- else if (x == x->parent->right)
- x->parent->right = y;
- else {
- RBT_ASSERT(x == x->parent->left);
- x->parent->left = y;
- }
- y->right = x;
- x->parent = y;
-}
-
-
-/*
- * Replace node x with node y
- * NOTE: block header of y is not changed
- */
-static ERTS_INLINE void
-replace(RBTree_t **root, RBTree_t *x, RBTree_t *y)
-{
-
- if (!x->parent) {
- RBT_ASSERT(*root == x);
- *root = y;
- }
- else if (x == x->parent->left)
- x->parent->left = y;
- else {
- RBT_ASSERT(x == x->parent->right);
- x->parent->right = y;
- }
- if (x->left) {
- RBT_ASSERT(x->left->parent == x);
- x->left->parent = y;
- }
- if (x->right) {
- RBT_ASSERT(x->right->parent == x);
- x->right->parent = y;
- }
-
- y->flags = x->flags;
- y->parent = x->parent;
- y->right = x->right;
- y->left = x->left;
-
- DESTROY_TREE_NODE(x);
-
-}
-
-static void
-tree_insert_fixup(RBTree_t *blk)
-{
- RBTree_t *x = blk, *y;
-
- /*
- * Rearrange the tree so that it satisfies the Red-Black Tree properties
- */
-
- RBT_ASSERT(x != root && IS_RED(x->parent));
- do {
-
- /*
- * x and its parent are both red. Move the red pair up the tree
- * until we get to the root or until we can separate them.
- */
-
- RBT_ASSERT(IS_RED(x));
- RBT_ASSERT(IS_BLACK(x->parent->parent));
- RBT_ASSERT(x->parent->parent);
-
- if (x->parent == x->parent->parent->left) {
- y = x->parent->parent->right;
- if (IS_RED(y)) {
- SET_BLACK(y);
- x = x->parent;
- SET_BLACK(x);
- x = x->parent;
- SET_RED(x);
- }
- else {
-
- if (x == x->parent->right) {
- x = x->parent;
- left_rotate(&root, x);
- }
-
- RBT_ASSERT(x == x->parent->parent->left->left);
- RBT_ASSERT(IS_RED(x));
- RBT_ASSERT(IS_RED(x->parent));
- RBT_ASSERT(IS_BLACK(x->parent->parent));
- RBT_ASSERT(IS_BLACK(y));
-
- SET_BLACK(x->parent);
- SET_RED(x->parent->parent);
- right_rotate(&root, x->parent->parent);
-
- RBT_ASSERT(x == x->parent->left);
- RBT_ASSERT(IS_RED(x));
- RBT_ASSERT(IS_RED(x->parent->right));
- RBT_ASSERT(IS_BLACK(x->parent));
- break;
- }
- }
- else {
- RBT_ASSERT(x->parent == x->parent->parent->right);
- y = x->parent->parent->left;
- if (IS_RED(y)) {
- SET_BLACK(y);
- x = x->parent;
- SET_BLACK(x);
- x = x->parent;
- SET_RED(x);
- }
- else {
-
- if (x == x->parent->left) {
- x = x->parent;
- right_rotate(&root, x);
- }
-
- RBT_ASSERT(x == x->parent->parent->right->right);
- RBT_ASSERT(IS_RED(x));
- RBT_ASSERT(IS_RED(x->parent));
- RBT_ASSERT(IS_BLACK(x->parent->parent));
- RBT_ASSERT(IS_BLACK(y));
-
- SET_BLACK(x->parent);
- SET_RED(x->parent->parent);
- left_rotate(&root, x->parent->parent);
-
- RBT_ASSERT(x == x->parent->right);
- RBT_ASSERT(IS_RED(x));
- RBT_ASSERT(IS_RED(x->parent->left));
- RBT_ASSERT(IS_BLACK(x->parent));
- break;
- }
- }
- } while (x != root && IS_RED(x->parent));
-
- SET_BLACK(root);
-}
-
-static void
-unlink_free_block(Block_t *del)
-{
- Uint spliced_is_black;
- RBTree_t *x, *y, *z = (RBTree_t *) del;
- RBTree_t null_x; /* null_x is used to get the fixup started when we
- splice out a node without children. */
-
- null_x.parent = NULL;
-
-#ifdef HARD_DEBUG
- check_tree(0);
-#endif
-
- /* Remove node from tree... */
-
- /* Find node to splice out */
- if (!z->left || !z->right)
- y = z;
- else
- /* Set y to z:s successor */
- for(y = z->right; y->left; y = y->left);
- /* splice out y */
- x = y->left ? y->left : y->right;
- spliced_is_black = IS_BLACK(y);
- if (x) {
- x->parent = y->parent;
- }
- else if (!x && spliced_is_black) {
- x = &null_x;
- x->flags = 0;
- SET_BLACK(x);
- x->right = x->left = NULL;
- x->parent = y->parent;
- y->left = x;
- }
-
- if (!y->parent) {
- RBT_ASSERT(root == y);
- root = x;
- }
- else if (y == y->parent->left)
- y->parent->left = x;
- else {
- RBT_ASSERT(y == y->parent->right);
- y->parent->right = x;
- }
- if (y != z) {
- /* We spliced out the successor of z; replace z by the successor */
- replace(&root, z, y);
- }
-
- if (spliced_is_black) {
- /* We removed a black node which makes the resulting tree
- violate the Red-Black Tree properties. Fixup tree... */
-
- while (IS_BLACK(x) && x->parent) {
-
- /*
- * x has an "extra black" which we move up the tree
- * until we reach the root or until we can get rid of it.
- *
- * y is the sibbling of x
- */
-
- if (x == x->parent->left) {
- y = x->parent->right;
- RBT_ASSERT(y);
- if (IS_RED(y)) {
- RBT_ASSERT(y->right);
- RBT_ASSERT(y->left);
- SET_BLACK(y);
- RBT_ASSERT(IS_BLACK(x->parent));
- SET_RED(x->parent);
- left_rotate(&root, x->parent);
- y = x->parent->right;
- }
- RBT_ASSERT(y);
- RBT_ASSERT(IS_BLACK(y));
- if (IS_BLACK(y->left) && IS_BLACK(y->right)) {
- SET_RED(y);
- x = x->parent;
- }
- else {
- if (IS_BLACK(y->right)) {
- SET_BLACK(y->left);
- SET_RED(y);
- right_rotate(&root, y);
- y = x->parent->right;
- }
- RBT_ASSERT(y);
- if (IS_RED(x->parent)) {
-
- SET_BLACK(x->parent);
- SET_RED(y);
- }
- RBT_ASSERT(y->right);
- SET_BLACK(y->right);
- left_rotate(&root, x->parent);
- x = root;
- break;
- }
- }
- else {
- RBT_ASSERT(x == x->parent->right);
- y = x->parent->left;
- RBT_ASSERT(y);
- if (IS_RED(y)) {
- RBT_ASSERT(y->right);
- RBT_ASSERT(y->left);
- SET_BLACK(y);
- RBT_ASSERT(IS_BLACK(x->parent));
- SET_RED(x->parent);
- right_rotate(&root, x->parent);
- y = x->parent->left;
- }
- RBT_ASSERT(y);
- RBT_ASSERT(IS_BLACK(y));
- if (IS_BLACK(y->right) && IS_BLACK(y->left)) {
- SET_RED(y);
- x = x->parent;
- }
- else {
- if (IS_BLACK(y->left)) {
- SET_BLACK(y->right);
- SET_RED(y);
- left_rotate(&root, y);
- y = x->parent->left;
- }
- RBT_ASSERT(y);
- if (IS_RED(x->parent)) {
- SET_BLACK(x->parent);
- SET_RED(y);
- }
- RBT_ASSERT(y->left);
- SET_BLACK(y->left);
- right_rotate(&root, x->parent);
- x = root;
- break;
- }
- }
- }
- SET_BLACK(x);
-
- if (null_x.parent) {
- if (null_x.parent->left == &null_x)
- null_x.parent->left = NULL;
- else {
- RBT_ASSERT(null_x.parent->right == &null_x);
- null_x.parent->right = NULL;
- }
- RBT_ASSERT(!null_x.left);
- RBT_ASSERT(!null_x.right);
- }
- else if (root == &null_x) {
- root = NULL;
- RBT_ASSERT(!null_x.left);
- RBT_ASSERT(!null_x.right);
- }
- }
-
-
- DESTROY_TREE_NODE(del);
-
-#ifdef HARD_DEBUG
- check_tree(0);
-#endif
-
-}
-
-/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *\
- * "Address order best fit" specific callbacks. *
-\* */
-
-static void
-link_free_block(Block_t *block)
-{
- RBTree_t *blk = (RBTree_t *) block;
- Uint blk_sz = BLK_SZ(blk);
-
- blk->flags = 0;
- blk->left = NULL;
- blk->right = NULL;
-
- if (!root) {
- blk->parent = NULL;
- SET_BLACK(blk);
- root = blk;
- } else {
- RBTree_t *x = root;
- while (1) {
- Uint size;
-
- size = BLK_SZ(x);
-
- if (blk_sz < size || (blk_sz == size && blk < x)) {
- if (!x->left) {
- blk->parent = x;
- x->left = blk;
- break;
- }
- x = x->left;
- }
- else {
- if (!x->right) {
- blk->parent = x;
- x->right = blk;
- break;
- }
- x = x->right;
- }
-
- }
-
- /* Insert block into size tree */
- RBT_ASSERT(blk->parent);
-
- SET_RED(blk);
- if (IS_RED(blk->parent)) {
- tree_insert_fixup(blk);
- }
- }
-
-#ifdef HARD_DEBUG
- check_tree(0);
-#endif
-}
-
-
-static Block_t *
-get_free_block(Uint size)
-{
- RBTree_t *x = root;
- RBTree_t *blk = NULL;
- Uint blk_sz;
-
- while (x) {
- blk_sz = BLK_SZ(x);
- if (blk_sz < size) {
- x = x->right;
- }
- else {
- blk = x;
- x = x->left;
- }
- }
-
- if (!blk)
- return NULL;
-
-#ifdef HARD_DEBUG
- ASSERT(blk == check_tree(size));
-#endif
-
- unlink_free_block((Block_t *) blk);
-
- return (Block_t *) blk;
-}
-
-
-/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *\
- * Debug functions *
-\* */
-
-
-#ifdef HARD_DEBUG
-
-#define IS_LEFT_VISITED(FB) ((FB)->flags & LEFT_VISITED_FLG)
-#define IS_RIGHT_VISITED(FB) ((FB)->flags & RIGHT_VISITED_FLG)
-
-#define SET_LEFT_VISITED(FB) ((FB)->flags |= LEFT_VISITED_FLG)
-#define SET_RIGHT_VISITED(FB) ((FB)->flags |= RIGHT_VISITED_FLG)
-
-#define UNSET_LEFT_VISITED(FB) ((FB)->flags &= ~LEFT_VISITED_FLG)
-#define UNSET_RIGHT_VISITED(FB) ((FB)->flags &= ~RIGHT_VISITED_FLG)
-
-
-#if 0
-# define PRINT_TREE
-#else
-# undef PRINT_TREE
-#endif
-
-#ifdef PRINT_TREE
-static void print_tree(void);
-#endif
-
-/*
- * Checks that the order between parent and children are correct,
- * and that the Red-Black Tree properies are satisfied. if size > 0,
- * check_tree() returns a node that satisfies "best fit" resp.
- * "address order best fit".
- *
- * The Red-Black Tree properies are:
- * 1. Every node is either red or black.
- * 2. Every leaf (NIL) is black.
- * 3. If a node is red, then both its children are black.
- * 4. Every simple path from a node to a descendant leaf
- * contains the same number of black nodes.
- */
-
-static RBTree_t *
-check_tree(Uint size)
-{
- RBTree_t *res = NULL;
- Sint blacks;
- Sint curr_blacks;
- RBTree_t *x;
-
-#ifdef PRINT_TREE
- print_tree();
-#endif
-
- if (!root)
- return res;
-
- x = root;
- ASSERT(IS_BLACK(x));
- ASSERT(!x->parent);
- curr_blacks = 1;
- blacks = -1;
-
- while (x) {
- if (!IS_LEFT_VISITED(x)) {
- SET_LEFT_VISITED(x);
- if (x->left) {
- x = x->left;
- if (IS_BLACK(x))
- curr_blacks++;
- continue;
- }
- else {
- if (blacks < 0)
- blacks = curr_blacks;
- ASSERT(blacks == curr_blacks);
- }
- }
-
- if (!IS_RIGHT_VISITED(x)) {
- SET_RIGHT_VISITED(x);
- if (x->right) {
- x = x->right;
- if (IS_BLACK(x))
- curr_blacks++;
- continue;
- }
- else {
- if (blacks < 0)
- blacks = curr_blacks;
- ASSERT(blacks == curr_blacks);
- }
- }
-
-
- if (IS_RED(x)) {
- ASSERT(IS_BLACK(x->right));
- ASSERT(IS_BLACK(x->left));
- }
-
- ASSERT(x->parent || x == root);
-
- if (x->left) {
- ASSERT(x->left->parent == x);
- ASSERT(BLK_SZ(x->left) < BLK_SZ(x)
- || (BLK_SZ(x->left) == BLK_SZ(x) && x->left < x));
- }
-
- if (x->right) {
- ASSERT(x->right->parent == x);
- ASSERT(BLK_SZ(x->right) > BLK_SZ(x)
- || (BLK_SZ(x->right) == BLK_SZ(x) && x->right > x));
- }
-
- if (size && BLK_SZ(x) >= size) {
- if (!res
- || BLK_SZ(x) < BLK_SZ(res)
- || (BLK_SZ(x) == BLK_SZ(res) && x < res))
- res = x;
- }
-
- UNSET_LEFT_VISITED(x);
- UNSET_RIGHT_VISITED(x);
- if (IS_BLACK(x))
- curr_blacks--;
- x = x->parent;
-
- }
-
- ASSERT(curr_blacks == 0);
-
- UNSET_LEFT_VISITED(root);
- UNSET_RIGHT_VISITED(root);
-
- return res;
-
-}
-
-
-#ifdef PRINT_TREE
-#define INDENT_STEP 2
-
-#include <stdio.h>
-
-static void
-print_tree_aux(RBTree_t *x, int indent)
-{
- int i;
-
- if (!x) {
- for (i = 0; i < indent; i++) {
- putc(' ', stderr);
- }
- fprintf(stderr, "BLACK: nil\r\n");
- }
- else {
- print_tree_aux(x->right, indent + INDENT_STEP);
- for (i = 0; i < indent; i++) {
- putc(' ', stderr);
- }
- fprintf(stderr, "%s: sz=%lu addr=0x%lx\r\n",
- IS_BLACK(x) ? "BLACK" : "RED",
- BLK_SZ(x),
- (Uint) x);
- print_tree_aux(x->left, indent + INDENT_STEP);
- }
-}
-
-
-static void
-print_tree(void)
-{
- fprintf(stderr, " --- Size-Adress tree begin ---\r\n");
- print_tree_aux(root, 0);
- fprintf(stderr, " --- Size-Adress tree end ---\r\n");
-}
-
-#endif
-
-#endif
-
-#endif /* ENABLE_ELIB_MALLOC */
diff --git a/erts/emulator/beam/elib_stat.h b/erts/emulator/beam/elib_stat.h
deleted file mode 100644
index d8c7f31737..0000000000
--- a/erts/emulator/beam/elib_stat.h
+++ /dev/null
@@ -1,45 +0,0 @@
-/*
- * %CopyrightBegin%
- *
- * Copyright Ericsson AB 1996-2009. All Rights Reserved.
- *
- * The contents of this file are subject to the Erlang Public License,
- * Version 1.1, (the "License"); you may not use this file except in
- * compliance with the License. You should have received a copy of the
- * Erlang Public License along with this software. If not, it can be
- * retrieved online at http://www.erlang.org/.
- *
- * Software distributed under the License is distributed on an "AS IS"
- * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
- * the License for the specific language governing rights and limitations
- * under the License.
- *
- * %CopyrightEnd%
- */
-
-/*
-** Interface to elib statistics
-**
-*/
-#ifndef __ELIB_STAT_H__
-#define __ELIB_STAT_H__
-
-struct elib_stat {
- int mem_total; /* Number of heap words */
- int mem_blocks; /* Number of block */
- int mem_alloc; /* Number of words in use */
- int mem_free; /* Number of words free */
- int min_used; /* Size of the smallest block used */
- int max_free; /* Size of the largest free block */
- int free_blocks; /* Number of fragments in free list */
- int mem_max_alloc;/* Max number of words in use */
-};
-
-EXTERN_FUNCTION(void, elib_statistics, (void*));
-EXTERN_FUNCTION(int, elib_check_heap, (_VOID_));
-EXTERN_FUNCTION(void, elib_heap_dump, (char*));
-EXTERN_FUNCTION(void, elib_stat, (struct elib_stat*));
-EXTERN_FUNCTION(int, elib_heap_map, (unsigned char*, int));
-EXTERN_FUNCTION(int, elib_histo, (unsigned long*, unsigned long*, int, int));
-
-#endif
diff --git a/erts/emulator/beam/erl_alloc.c b/erts/emulator/beam/erl_alloc.c
index 16ae643ed9..07b4167b27 100644
--- a/erts/emulator/beam/erl_alloc.c
+++ b/erts/emulator/beam/erl_alloc.c
@@ -38,9 +38,6 @@
#include "erl_bits.h"
#include "erl_instrument.h"
#include "erl_mseg.h"
-#ifdef ELIB_ALLOC_IS_CLIB
-#include "erl_version.h"
-#endif
#include "erl_monitors.h"
#include "erl_bif_timer.h"
#if defined(ERTS_ALC_T_DRV_SEL_D_STATE) || defined(ERTS_ALC_T_DRV_EV_D_STATE)
@@ -64,8 +61,15 @@
#ifdef DEBUG
static Uint install_debug_functions(void);
+#if 0
+#define HARD_DEBUG
+#ifdef __GNUC__
+#warning "* * * * * * * * * * * * * *"
+#warning "* HARD DEBUG IS ENABLED! *"
+#warning "* * * * * * * * * * * * * *"
+#endif
+#endif
#endif
-extern void elib_ensure_initialized(void);
ErtsAllocatorFunctions_t erts_allctrs[ERTS_ALC_A_MAX+1];
ErtsAllocatorInfo_t erts_allctrs_info[ERTS_ALC_A_MAX+1];
@@ -391,6 +395,10 @@ refuse_af_strategy(struct au_init *init)
static void init_thr_ix(int static_ixs);
+#ifdef HARD_DEBUG
+static void hdbg_init(void);
+#endif
+
void
erts_alloc_init(int *argc, char **argv, ErtsAllocInitOpts *eaiop)
{
@@ -406,6 +414,10 @@ erts_alloc_init(int *argc, char **argv, ErtsAllocInitOpts *eaiop)
ERTS_DEFAULT_ALCU_INIT
};
+#ifdef HARD_DEBUG
+ hdbg_init();
+#endif
+
erts_sys_alloc_init();
init_thr_ix(erts_no_schedulers);
erts_init_utils_mem();
@@ -2305,13 +2317,8 @@ erts_allocator_info_term(void *proc, Eterm which_alloc, int only_sz)
l = 0;
as[l] = am_atom_put("e", 1);
ts[l++] = am_true;
-#ifdef ELIB_ALLOC_IS_CLIB
- as[l] = am_atom_put("m", 1);
- ts[l++] = am_atom_put("elib", 4);
-#else
as[l] = am_atom_put("m", 1);
ts[l++] = am_atom_put("libc", 4);
-#endif
if(sas.trim_threshold >= 0) {
as[l] = am_atom_put("tt", 2);
ts[l++] = erts_bld_uint(hpp, szp,
@@ -2465,11 +2472,7 @@ erts_allocator_info(int to, void *arg)
case ERTS_ALC_A_SYSTEM: {
SysAllocStat sas;
erts_print(to, arg, "option e: true\n");
-#ifdef ELIB_ALLOC_IS_CLIB
- erts_print(to, arg, "option m: elib\n");
-#else
erts_print(to, arg, "option m: libc\n");
-#endif
sys_alloc_stat(&sas);
if(sas.trim_threshold >= 0)
erts_print(to, arg, "option tt: %d\n", sas.trim_threshold);
@@ -2573,13 +2576,8 @@ erts_allocator_options(void *proc)
switch (a) {
case ERTS_ALC_A_SYSTEM:
-#ifdef ELIB_ALLOC_IS_CLIB
- as[l] = am_atom_put("m", 1);
- ts[l++] = am_atom_put("elib", 4);
-#else
as[l] = am_atom_put("m", 1);
ts[l++] = am_atom_put("libc", 4);
-#endif
if(sas.trim_threshold >= 0) {
as[l] = am_atom_put("tt", 2);
ts[l++] = erts_bld_uint(hpp, szp,
@@ -2650,23 +2648,7 @@ erts_allocator_options(void *proc)
features = length ? erts_bld_list(hpp, szp, length, terms) : NIL;
-#if defined(ELIB_ALLOC_IS_CLIB)
- {
- Eterm version;
- int i;
- int ver[5];
- i = sscanf(ERLANG_VERSION,
- "%d.%d.%d.%d.%d",
- &ver[0], &ver[1], &ver[2], &ver[3], &ver[4]);
-
- version = NIL;
- for(i--; i >= 0; i--)
- version = erts_bld_cons(hpp, szp, make_small(ver[i]), version);
-
- res = erts_bld_tuple(hpp, szp, 4,
- am_elib_malloc, version, features, settings);
- }
-#elif defined(__GLIBC__)
+#if defined(__GLIBC__)
{
Eterm AM_glibc = am_atom_put("glibc", 5);
Eterm version;
@@ -2862,12 +2844,10 @@ unsigned long erts_alc_test(unsigned long op,
break;
}
case 0xf0a:
- if (ethr_mutex_lock((ethr_mutex *) a1) != 0)
- ERTS_ALC_TEST_ABORT;
+ ethr_mutex_lock((ethr_mutex *) a1);
break;
case 0xf0b:
- if (ethr_mutex_unlock((ethr_mutex *) a1) != 0)
- ERTS_ALC_TEST_ABORT;
+ ethr_mutex_unlock((ethr_mutex *) a1);
break;
case 0xf0c: {
ethr_cond *cnd = erts_alloc(ERTS_ALC_T_UNDEF, sizeof(ethr_cond));
@@ -2883,16 +2863,13 @@ unsigned long erts_alc_test(unsigned long op,
break;
}
case 0xf0e:
- if (ethr_cond_broadcast((ethr_cond *) a1) != 0)
- ERTS_ALC_TEST_ABORT;
+ ethr_cond_broadcast((ethr_cond *) a1);
break;
case 0xf0f: {
int res;
do {
res = ethr_cond_wait((ethr_cond *) a1, (ethr_mutex *) a2);
} while (res == EINTR);
- if (res != 0)
- ERTS_ALC_TEST_ABORT;
break;
}
case 0xf10: {
@@ -2939,8 +2916,11 @@ unsigned long erts_alc_test(unsigned long op,
#undef PRINT_OPS
#endif
-
+#ifdef HARD_DEBUG
+#define FENCE_SZ (4*sizeof(UWord))
+#else
#define FENCE_SZ (3*sizeof(UWord))
+#endif
#if defined(ARCH_64)
#define FENCE_PATTERN 0xABCDEF97ABCDEF97
@@ -2962,6 +2942,104 @@ unsigned long erts_alc_test(unsigned long op,
#define GET_TYPE_OF_PATTERN(P) \
(((P) >> TYPE_PATTERN_SHIFT) & TYPE_PATTERN_MASK)
+#ifdef HARD_DEBUG
+
+#define ERL_ALC_HDBG_MAX_MBLK 100000
+#define ERTS_ALC_O_CHECK -1
+
+typedef struct hdbg_mblk_ hdbg_mblk;
+struct hdbg_mblk_ {
+ hdbg_mblk *next;
+ hdbg_mblk *prev;
+ void *p;
+ Uint s;
+ ErtsAlcType_t n;
+};
+
+static hdbg_mblk hdbg_mblks[ERL_ALC_HDBG_MAX_MBLK];
+
+static hdbg_mblk *free_hdbg_mblks;
+static hdbg_mblk *used_hdbg_mblks;
+static erts_mtx_t hdbg_mblk_mtx;
+
+static void
+hdbg_init(void)
+{
+ int i;
+ for (i = 0; i < ERL_ALC_HDBG_MAX_MBLK-1; i++)
+ hdbg_mblks[i].next = &hdbg_mblks[i+1];
+ hdbg_mblks[ERL_ALC_HDBG_MAX_MBLK-1].next = NULL;
+ free_hdbg_mblks = &hdbg_mblks[0];
+ used_hdbg_mblks = NULL;
+ erts_mtx_init(&hdbg_mblk_mtx, "erts_alloc_hard_debug");
+}
+
+static void *check_memory_fence(void *ptr,
+ Uint *size,
+ ErtsAlcType_t n,
+ int func);
+void erts_hdbg_chk_blks(void);
+
+void
+erts_hdbg_chk_blks(void)
+{
+ hdbg_mblk *mblk;
+
+ erts_mtx_lock(&hdbg_mblk_mtx);
+ for (mblk = used_hdbg_mblks; mblk; mblk = mblk->next) {
+ Uint sz;
+ check_memory_fence(mblk->p, &sz, mblk->n, ERTS_ALC_O_CHECK);
+ ASSERT(sz == mblk->s);
+ }
+ erts_mtx_unlock(&hdbg_mblk_mtx);
+}
+
+static hdbg_mblk *
+hdbg_alloc(void *p, Uint s, ErtsAlcType_t n)
+{
+ hdbg_mblk *mblk;
+
+ erts_mtx_lock(&hdbg_mblk_mtx);
+ mblk = free_hdbg_mblks;
+ if (!mblk) {
+ erts_fprintf(stderr,
+ "Ran out of debug blocks; please increase "
+ "ERL_ALC_HDBG_MAX_MBLK=%d and recompile!\n",
+ ERL_ALC_HDBG_MAX_MBLK);
+ abort();
+ }
+ free_hdbg_mblks = mblk->next;
+
+ mblk->p = p;
+ mblk->s = s;
+ mblk->n = n;
+
+ mblk->next = used_hdbg_mblks;
+ mblk->prev = NULL;
+ if (used_hdbg_mblks)
+ used_hdbg_mblks->prev = mblk;
+ used_hdbg_mblks = mblk;
+ erts_mtx_unlock(&hdbg_mblk_mtx);
+ return (void *) mblk;
+}
+
+static void
+hdbg_free(hdbg_mblk *mblk)
+{
+ erts_mtx_lock(&hdbg_mblk_mtx);
+ if (mblk->next)
+ mblk->next->prev = mblk->prev;
+ if (mblk->prev)
+ mblk->prev->next = mblk->next;
+ else
+ used_hdbg_mblks = mblk->next;
+
+ mblk->next = free_hdbg_mblks;
+ free_hdbg_mblks = mblk;
+ erts_mtx_unlock(&hdbg_mblk_mtx);
+}
+
+#endif
#ifdef ERTS_ALLOC_UTIL_HARD_DEBUG
static void *check_memory_fence(void *ptr, Uint *size, ErtsAlcType_t n, int func);
@@ -3007,17 +3085,28 @@ set_memory_fence(void *ptr, Uint sz, ErtsAlcType_t n)
{
UWord *ui_ptr;
UWord pattern;
+#ifdef HARD_DEBUG
+ hdbg_mblk **mblkpp;
+#endif
if (!ptr)
return NULL;
ui_ptr = (UWord *) ptr;
pattern = MK_PATTERN(n);
-
+
+#ifdef HARD_DEBUG
+ mblkpp = (hdbg_mblk **) ui_ptr++;
+#endif
+
*(ui_ptr++) = sz;
*(ui_ptr++) = pattern;
memcpy((void *) (((char *) ui_ptr)+sz), (void *) &pattern, sizeof(UWord));
+#ifdef HARD_DEBUG
+ *mblkpp = hdbg_alloc((void *) ui_ptr, sz, n);
+#endif
+
return (void *) ui_ptr;
}
@@ -3029,6 +3118,9 @@ check_memory_fence(void *ptr, Uint *size, ErtsAlcType_t n, int func)
UWord pre_pattern;
UWord post_pattern;
UWord *ui_ptr;
+#ifdef HARD_DEBUG
+ hdbg_mblk *mblk;
+#endif
if (!ptr)
return NULL;
@@ -3036,6 +3128,9 @@ check_memory_fence(void *ptr, Uint *size, ErtsAlcType_t n, int func)
ui_ptr = (UWord *) ptr;
pre_pattern = *(--ui_ptr);
*size = sz = *(--ui_ptr);
+#ifdef HARD_DEBUG
+ mblk = (hdbg_mblk *) *(--ui_ptr);
+#endif
found_type = GET_TYPE_OF_PATTERN(pre_pattern);
if (pre_pattern != MK_PATTERN(n)) {
@@ -3091,6 +3186,17 @@ check_memory_fence(void *ptr, Uint *size, ErtsAlcType_t n, int func)
(unsigned long) ptr, (unsigned long) sz, ftype, op_str, otype);
}
+#ifdef HARD_DEBUG
+ switch (func) {
+ case ERTS_ALC_O_REALLOC:
+ case ERTS_ALC_O_FREE:
+ hdbg_free(mblk);
+ break;
+ default:
+ break;
+ }
+#endif
+
return (void *) ui_ptr;
}
@@ -3103,6 +3209,10 @@ debug_alloc(ErtsAlcType_t n, void *extra, Uint size)
Uint dsize;
void *res;
+#ifdef HARD_DEBUG
+ erts_hdbg_chk_blks();
+#endif
+
ASSERT(ERTS_ALC_N_MIN <= n && n <= ERTS_ALC_N_MAX);
dsize = size + FENCE_SZ;
res = (*real_af->alloc)(n, real_af->extra, dsize);
@@ -3132,13 +3242,17 @@ debug_realloc(ErtsAlcType_t n, void *extra, void *ptr, Uint size)
dsize = size + FENCE_SZ;
dptr = check_memory_fence(ptr, &old_size, n, ERTS_ALC_O_REALLOC);
+#ifdef HARD_DEBUG
+ erts_hdbg_chk_blks();
+#endif
+
if (old_size > size)
sys_memset((void *) (((char *) ptr) + size),
0xf,
sizeof(Uint) + old_size - size);
res = (*real_af->realloc)(n, real_af->extra, dptr, dsize);
-
+
res = set_memory_fence(res, size, n);
#ifdef PRINT_OPS
@@ -3168,6 +3282,10 @@ debug_free(ErtsAlcType_t n, void *extra, void *ptr)
fprintf(stderr, "free(%s, 0x%lx)\r\n", ERTS_ALC_N2TD(n), (Uint) ptr);
#endif
+#ifdef HARD_DEBUG
+ erts_hdbg_chk_blks();
+#endif
+
}
static Uint
diff --git a/erts/emulator/beam/erl_alloc.types b/erts/emulator/beam/erl_alloc.types
index 6f88bbe5b8..7df9f19af0 100644
--- a/erts/emulator/beam/erl_alloc.types
+++ b/erts/emulator/beam/erl_alloc.types
@@ -212,7 +212,8 @@ type LOGGER_DSBUF TEMPORARY SYSTEM logger_dsbuf
type TMP_DSBUF TEMPORARY SYSTEM tmp_dsbuf
type INFO_DSBUF SYSTEM SYSTEM info_dsbuf
# INFO_DSBUF have to use the SYSTEM allocator; otherwise, a deadlock might occur
-type SCHDLR_DATA LONG_LIVED PROCESSES scheduler_data
+type SCHDLR_DATA LONG_LIVED SYSTEM scheduler_data
+type SCHDLR_SLP_INFO LONG_LIVED SYSTEM scheduler_sleep_info
type RUNQS LONG_LIVED SYSTEM run_queues
type DDLL_PROCESS STANDARD SYSTEM ddll_processes
type DDLL_HANDLE STANDARD SYSTEM ddll_handle
@@ -246,6 +247,7 @@ type CPUDATA LONG_LIVED SYSTEM cpu_data
type TMP_CPU_IDS SHORT_LIVED SYSTEM tmp_cpu_ids
type EXT_TERM_DATA SHORT_LIVED PROCESSES external_term_data
type ZLIB STANDARD SYSTEM zlib
+type RDR_GRPS_MAP LONG_LIVED SYSTEM reader_groups_map
+if smp
type ASYNC SHORT_LIVED SYSTEM async
@@ -269,7 +271,9 @@ type RUNQ_BLNS LONG_LIVED SYSTEM run_queue_balancing
+if threads
-type ETHR_INTERNAL SYSTEM SYSTEM ethread_internal
+type ETHR_STD STANDARD SYSTEM ethread_standard
+type ETHR_SL SHORT_LIVED SYSTEM ethread_short_lived
+type ETHR_LL LONG_LIVED SYSTEM ethread_long_lived
+ifnot smp
diff --git a/erts/emulator/beam/erl_bif_binary.c b/erts/emulator/beam/erl_bif_binary.c
index 82f1e06e8e..1d8fd11b7b 100644
--- a/erts/emulator/beam/erl_bif_binary.c
+++ b/erts/emulator/beam/erl_bif_binary.c
@@ -2511,8 +2511,8 @@ BIF_RETTYPE binary_copy_trap(BIF_ALIST_2)
pb = (ProcBin *) HAlloc(BIF_P, PROC_BIN_SIZE);
pb->thing_word = HEADER_PROC_BIN;
pb->size = target_size;
- pb->next = MSO(BIF_P).mso;
- MSO(BIF_P).mso = pb;
+ pb->next = MSO(BIF_P).first;
+ MSO(BIF_P).first = (struct erl_off_heap_header*) pb;
pb->val = save;
pb->bytes = t;
pb->flags = 0;
diff --git a/erts/emulator/beam/erl_bif_info.c b/erts/emulator/beam/erl_bif_info.c
index 48cda52612..dace5b9297 100644
--- a/erts/emulator/beam/erl_bif_info.c
+++ b/erts/emulator/beam/erl_bif_info.c
@@ -38,9 +38,6 @@
#include "erl_instrument.h"
#include "dist.h"
#include "erl_gc.h"
-#ifdef ELIB_ALLOC_IS_CLIB
-#include "elib_stat.h"
-#endif
#ifdef HIPE
#include "hipe_arch.h"
#endif
@@ -122,22 +119,26 @@ static char erts_system_version[] = ("Erlang " ERLANG_OTP_RELEASE
#endif
static Eterm
-bld_bin_list(Uint **hpp, Uint *szp, ProcBin* pb)
+bld_bin_list(Uint **hpp, Uint *szp, ErlOffHeap* oh)
{
+ struct erl_off_heap_header* ohh;
Eterm res = NIL;
Eterm tuple;
- for (; pb; pb = pb->next) {
- Eterm val = erts_bld_uword(hpp, szp, (UWord) pb->val);
- Eterm orig_size = erts_bld_uint(hpp, szp, pb->val->orig_size);
-
- if (szp)
- *szp += 4+2;
- if (hpp) {
- Uint refc = (Uint) erts_smp_atomic_read(&pb->val->refc);
- tuple = TUPLE3(*hpp, val, orig_size, make_small(refc));
- res = CONS(*hpp + 4, tuple, res);
- *hpp += 4+2;
+ for (ohh = oh->first; ohh; ohh = ohh->next) {
+ if (ohh->thing_word == HEADER_PROC_BIN) {
+ ProcBin* pb = (ProcBin*) ohh;
+ Eterm val = erts_bld_uword(hpp, szp, (UWord) pb->val);
+ Eterm orig_size = erts_bld_uint(hpp, szp, pb->val->orig_size);
+
+ if (szp)
+ *szp += 4+2;
+ if (hpp) {
+ Uint refc = (Uint) erts_smp_atomic_read(&pb->val->refc);
+ tuple = TUPLE3(*hpp, val, orig_size, make_small(refc));
+ res = CONS(*hpp + 4, tuple, res);
+ *hpp += 4+2;
+ }
}
}
return res;
@@ -176,10 +177,10 @@ static void do_make_one_mon_element(ErtsMonitor *mon, void * vpmlc)
Eterm tup;
Eterm r = (IS_CONST(mon->ref)
? mon->ref
- : STORE_NC(&(pmlc->hp), &MSO(pmlc->p).externals, mon->ref));
+ : STORE_NC(&(pmlc->hp), &MSO(pmlc->p), mon->ref));
Eterm p = (IS_CONST(mon->pid)
? mon->pid
- : STORE_NC(&(pmlc->hp), &MSO(pmlc->p).externals, mon->pid));
+ : STORE_NC(&(pmlc->hp), &MSO(pmlc->p), mon->pid));
tup = TUPLE5(pmlc->hp, pmlc->tag, make_small(mon->type), r, p, mon->name);
pmlc->hp += 6;
pmlc->res = CONS(pmlc->hp, tup, pmlc->res);
@@ -240,7 +241,7 @@ static void do_make_one_lnk_element(ErtsLink *lnk, void * vpllc)
Eterm old_res, targets = NIL;
Eterm p = (IS_CONST(lnk->pid)
? lnk->pid
- : STORE_NC(&(pllc->hp), &MSO(pllc->p).externals, lnk->pid));
+ : STORE_NC(&(pllc->hp), &MSO(pllc->p), lnk->pid));
if (lnk->type == LINK_NODE) {
targets = make_small(ERTS_LINK_REFC(lnk));
} else if (ERTS_LINK_ROOT(lnk) != NULL) {
@@ -1225,7 +1226,7 @@ process_info_aux(Process *BIF_P,
hp = HAlloc(BIF_P, 3 + mic.sz);
res = NIL;
for (i = 0; i < mic.mi_i; i++) {
- item = STORE_NC(&hp, &MSO(BIF_P).externals, mic.mi[i].entity);
+ item = STORE_NC(&hp, &MSO(BIF_P), mic.mi[i].entity);
res = CONS(hp, item, res);
hp += 2;
}
@@ -1258,9 +1259,7 @@ process_info_aux(Process *BIF_P,
else {
/* Monitor by pid. Build {process, Pid} and cons it. */
Eterm t;
- Eterm pid = STORE_NC(&hp,
- &MSO(BIF_P).externals,
- mic.mi[i].entity);
+ Eterm pid = STORE_NC(&hp, &MSO(BIF_P), mic.mi[i].entity);
t = TUPLE2(hp, am_process, pid);
hp += 3;
res = CONS(hp, t, res);
@@ -1282,7 +1281,7 @@ process_info_aux(Process *BIF_P,
res = NIL;
for (i = 0; i < mic.mi_i; ++i) {
- item = STORE_NC(&hp, &MSO(BIF_P).externals, mic.mi[i].entity);
+ item = STORE_NC(&hp, &MSO(BIF_P), mic.mi[i].entity);
res = CONS(hp, item, res);
hp += 2;
}
@@ -1491,7 +1490,7 @@ process_info_aux(Process *BIF_P,
case am_group_leader: {
int sz = NC_HEAP_SIZE(rp->group_leader);
hp = HAlloc(BIF_P, 3 + sz);
- res = STORE_NC(&hp, &MSO(BIF_P).externals, rp->group_leader);
+ res = STORE_NC(&hp, &MSO(BIF_P), rp->group_leader);
break;
}
@@ -1516,9 +1515,9 @@ process_info_aux(Process *BIF_P,
case am_binary: {
Uint sz = 3;
- (void) bld_bin_list(NULL, &sz, MSO(rp).mso);
+ (void) bld_bin_list(NULL, &sz, &MSO(rp));
hp = HAlloc(BIF_P, sz);
- res = bld_bin_list(&hp, NULL, MSO(rp).mso);
+ res = bld_bin_list(&hp, NULL, &MSO(rp));
break;
}
@@ -2124,86 +2123,8 @@ BIF_RETTYPE system_info_1(BIF_ALIST_1)
BIF_RET(erts_alloc_util_allocators((void *) BIF_P));
}
else if (BIF_ARG_1 == am_elib_malloc) {
-#ifdef ELIB_ALLOC_IS_CLIB
- struct elib_stat stat;
- DECL_AM(heap_size);
- DECL_AM(max_alloced_size);
- DECL_AM(alloced_size);
- DECL_AM(free_size);
- DECL_AM(no_alloced_blocks);
- DECL_AM(no_free_blocks);
- DECL_AM(smallest_alloced_block);
- DECL_AM(largest_free_block);
- Eterm atoms[8];
- Eterm ints[8];
- Uint **hpp;
- Uint sz;
- Uint *szp;
- int length;
-#ifdef DEBUG
- Uint *endp;
-#endif
-
- elib_stat(&stat);
-
- /* First find out the heap size needed ... */
- hpp = NULL;
- szp = &sz;
- sz = 0;
-
- build_elib_malloc_term:
- length = 0;
- atoms[length] = AM_heap_size;
- ints[length++] = erts_bld_uint(hpp, szp,
- (Uint) stat.mem_total*sizeof(Uint));
- atoms[length] = AM_max_alloced_size;
- ints[length++] = erts_bld_uint(hpp, szp,
- (Uint) stat.mem_max_alloc*sizeof(Uint));
- atoms[length] = AM_alloced_size;
- ints[length++] = erts_bld_uint(hpp, szp,
- (Uint) stat.mem_alloc*sizeof(Uint));
- atoms[length] = AM_free_size;
- ints[length++] = erts_bld_uint(hpp, szp,
- (Uint) stat.mem_free*sizeof(Uint));
- atoms[length] = AM_no_alloced_blocks;
- ints[length++] = erts_bld_uint(hpp, szp, (Uint) stat.mem_blocks);
- atoms[length] = AM_no_free_blocks;
- ints[length++] = erts_bld_uint(hpp, szp, (Uint) stat.free_blocks);
- atoms[length] = AM_smallest_alloced_block;
- ints[length++] = erts_bld_uint(hpp, szp,
- (Uint) stat.min_used*sizeof(Uint));
- atoms[length] = AM_largest_free_block;
- ints[length++] = erts_bld_uint(hpp, szp,
- (Uint) stat.max_free*sizeof(Uint));
-
-
-
- ASSERT(length <= sizeof(atoms)/sizeof(Eterm));
- ASSERT(length <= sizeof(ints)/sizeof(Eterm));
-
- res = erts_bld_2tup_list(hpp, szp, length, atoms, ints);
-
- if (szp) {
- /* ... and then build the term */
- hp = HAlloc(BIF_P, sz);
-#ifdef DEBUG
- endp = hp + sz;
-#endif
-
- szp = NULL;
- hpp = &hp;
- goto build_elib_malloc_term;
- }
-
-#ifdef DEBUG
- ASSERT(endp == hp);
-#endif
-
-#else /* #ifdef ELIB_ALLOC_IS_CLIB */
- res = am_false;
-#endif /* #ifdef ELIB_ALLOC_IS_CLIB */
-
- BIF_RET(res);
+ /* To be removed in R15 */
+ BIF_RET(am_false);
}
else if (BIF_ARG_1 == am_os_version) {
int major, minor, build;
@@ -2314,6 +2235,15 @@ BIF_RETTYPE system_info_1(BIF_ALIST_1)
} else if (ERTS_IS_ATOM_STR("cpu_topology", BIF_ARG_1)) {
res = erts_get_cpu_topology_term(BIF_P, am_used);
BIF_TRAP1(erts_format_cpu_topology_trap, BIF_P, res);
+ } else if (ERTS_IS_ATOM_STR("update_cpu_info", BIF_ARG_1)) {
+ if (erts_update_cpu_info()) {
+ ERTS_DECL_AM(changed);
+ BIF_RET(AM_changed);
+ }
+ else {
+ ERTS_DECL_AM(unchanged);
+ BIF_RET(AM_unchanged);
+ }
#if defined(__GNUC__) && defined(HAVE_SOLARIS_SPARC_PERFMON)
} else if (ERTS_IS_ATOM_STR("ultrasparc_read_tick1", BIF_ARG_1)) {
register unsigned high asm("%l0");
@@ -2385,7 +2315,10 @@ BIF_RETTYPE system_info_1(BIF_ALIST_1)
}
/* Arguments that are unusual follow ... */
else if (ERTS_IS_ATOM_STR("logical_processors", BIF_ARG_1)) {
- int no = erts_get_cpu_configured(erts_cpuinfo);
+ int no;
+ erts_smp_rwmtx_rlock(&erts_cpu_bind_rwmtx);
+ no = erts_get_cpu_configured(erts_cpuinfo);
+ erts_smp_rwmtx_runlock(&erts_cpu_bind_rwmtx);
if (no > 0)
BIF_RET(make_small((Uint) no));
else {
@@ -2394,7 +2327,10 @@ BIF_RETTYPE system_info_1(BIF_ALIST_1)
}
}
else if (ERTS_IS_ATOM_STR("logical_processors_online", BIF_ARG_1)) {
- int no = erts_get_cpu_online(erts_cpuinfo);
+ int no;
+ erts_smp_rwmtx_rlock(&erts_cpu_bind_rwmtx);
+ no = erts_get_cpu_online(erts_cpuinfo);
+ erts_smp_rwmtx_runlock(&erts_cpu_bind_rwmtx);
if (no > 0)
BIF_RET(make_small((Uint) no));
else {
@@ -2403,7 +2339,10 @@ BIF_RETTYPE system_info_1(BIF_ALIST_1)
}
}
else if (ERTS_IS_ATOM_STR("logical_processors_available", BIF_ARG_1)) {
- int no = erts_get_cpu_available(erts_cpuinfo);
+ int no;
+ erts_smp_rwmtx_rlock(&erts_cpu_bind_rwmtx);
+ no = erts_get_cpu_available(erts_cpuinfo);
+ erts_smp_rwmtx_runlock(&erts_cpu_bind_rwmtx);
if (no > 0)
BIF_RET(make_small((Uint) no));
else {
@@ -2563,6 +2502,8 @@ BIF_RETTYPE system_info_1(BIF_ALIST_1)
BIF_RET(erts_sched_stat_term(BIF_P, 1));
} else if (ERTS_IS_ATOM_STR("taints", BIF_ARG_1)) {
BIF_RET(erts_nif_taints(BIF_P));
+ } else if (ERTS_IS_ATOM_STR("reader_groups_map", BIF_ARG_1)) {
+ BIF_RET(erts_get_reader_groups_map(BIF_P));
}
BIF_ERROR(BIF_P, BADARG);
@@ -2678,7 +2619,7 @@ BIF_RETTYPE port_info_2(BIF_ALIST_2)
hp = HAlloc(BIF_P, 3 + mic.sz);
res = NIL;
for (i = 0; i < mic.mi_i; i++) {
- item = STORE_NC(&hp, &MSO(BIF_P).externals, mic.mi[i].entity);
+ item = STORE_NC(&hp, &MSO(BIF_P), mic.mi[i].entity);
res = CONS(hp, item, res);
hp += 2;
}
@@ -2698,7 +2639,7 @@ BIF_RETTYPE port_info_2(BIF_ALIST_2)
res = NIL;
for (i = 0; i < mic.mi_i; i++) {
Eterm t;
- item = STORE_NC(&hp, &MSO(BIF_P).externals, mic.mi[i].entity);
+ item = STORE_NC(&hp, &MSO(BIF_P), mic.mi[i].entity);
t = TUPLE2(hp, am_process, item);
hp += 3;
res = CONS(hp, t, res);
@@ -3426,6 +3367,16 @@ BIF_RETTYPE erts_debug_get_internal_state_1(BIF_ALIST_1)
else if (ERTS_IS_ATOM_STR("fake_scheduler_bindings", tp[1])) {
return erts_fake_scheduler_bindings(BIF_P, tp[2]);
}
+ else if (ERTS_IS_ATOM_STR("reader_groups_map", tp[1])) {
+ Sint groups;
+ if (is_not_small(tp[2]))
+ BIF_ERROR(BIF_P, BADARG);
+ groups = signed_val(tp[2]);
+ if (groups < (Sint) 1 || groups > (Sint) INT_MAX)
+ BIF_ERROR(BIF_P, BADARG);
+
+ BIF_RET(erts_debug_reader_groups_map(BIF_P, (int) groups));
+ }
break;
}
default:
@@ -3730,8 +3681,8 @@ static Eterm lcnt_build_lock_stats_term(Eterm **hpp, Uint *szp, erts_lcnt_lock_s
* [{{file, line}, {tries, colls, {seconds, nanoseconds, n_blocks}}}]
*/
- ethr_atomic_read(&stats->tries, (long *)&tries);
- ethr_atomic_read(&stats->colls, (long *)&colls);
+ tries = (unsigned long) ethr_atomic_read(&stats->tries);
+ colls = (unsigned long) ethr_atomic_read(&stats->colls);
line = stats->line;
timer_s = stats->timer.s;
diff --git a/erts/emulator/beam/erl_bif_trace.c b/erts/emulator/beam/erl_bif_trace.c
index 443cac9033..0509e51a6f 100644
--- a/erts/emulator/beam/erl_bif_trace.c
+++ b/erts/emulator/beam/erl_bif_trace.c
@@ -2161,7 +2161,7 @@ trace_delivered_1(BIF_ALIST_1)
#ifdef ERTS_SMP
bp = new_message_buffer(REF_THING_SIZE + 4);
hp = &bp->mem[0];
- msg_ref = STORE_NC(&hp, &bp->off_heap.externals, ref);
+ msg_ref = STORE_NC(&hp, &bp->off_heap, ref);
#else
hp = HAlloc(BIF_P, 4);
msg_ref = ref;
diff --git a/erts/emulator/beam/erl_bits.c b/erts/emulator/beam/erl_bits.c
index defe18c92b..96faa673f6 100644
--- a/erts/emulator/beam/erl_bits.c
+++ b/erts/emulator/beam/erl_bits.c
@@ -1335,8 +1335,8 @@ erts_bs_append(Process* c_p, Eterm* reg, Uint live, Eterm build_size_term,
hp += PROC_BIN_SIZE;
pb->thing_word = HEADER_PROC_BIN;
pb->size = used_size_in_bytes;
- pb->next = MSO(c_p).mso;
- MSO(c_p).mso = pb;
+ pb->next = MSO(c_p).first;
+ MSO(c_p).first = (struct erl_off_heap_header*)pb;
pb->val = bptr;
pb->bytes = (byte*) bptr->orig_bytes;
pb->flags = PB_IS_WRITABLE | PB_ACTIVE_WRITER;
@@ -1506,8 +1506,8 @@ erts_bs_init_writable(Process* p, Eterm sz)
hp += PROC_BIN_SIZE;
pb->thing_word = HEADER_PROC_BIN;
pb->size = 0;
- pb->next = MSO(p).mso;
- MSO(p).mso = pb;
+ pb->next = MSO(p).first;
+ MSO(p).first = (struct erl_off_heap_header*) pb;
pb->val = bptr;
pb->bytes = (byte*) bptr->orig_bytes;
pb->flags = PB_IS_WRITABLE | PB_ACTIVE_WRITER;
diff --git a/erts/emulator/beam/erl_db.c b/erts/emulator/beam/erl_db.c
index cbdaa459de..b0369a402b 100644
--- a/erts/emulator/beam/erl_db.c
+++ b/erts/emulator/beam/erl_db.c
@@ -78,11 +78,19 @@ enum DbIterSafety {
** The main meta table, containing all ets tables.
*/
#ifdef ERTS_SMP
-# define META_MAIN_TAB_LOCK_CNT 16
-static union {
- erts_smp_spinlock_t lck;
- byte _cache_line_alignment[64];
-}meta_main_tab_locks[META_MAIN_TAB_LOCK_CNT];
+
+#define ERTS_META_MAIN_TAB_LOCK_TAB_BITS 8
+#define ERTS_META_MAIN_TAB_LOCK_TAB_SIZE (1 << ERTS_META_MAIN_TAB_LOCK_TAB_BITS)
+#define ERTS_META_MAIN_TAB_LOCK_TAB_MASK (ERTS_META_MAIN_TAB_LOCK_TAB_SIZE - 1)
+
+typedef union {
+ erts_smp_rwmtx_t rwmtx;
+ byte cache_line_align__[ERTS_ALC_CACHE_LINE_ALIGN_SIZE(
+ sizeof(erts_smp_rwmtx_t))];
+} erts_meta_main_tab_lock_t;
+
+static erts_meta_main_tab_lock_t *meta_main_tab_locks;
+
#endif
static struct {
union {
@@ -104,17 +112,13 @@ static struct {
#define MARK_SLOT_DEAD(i) (meta_main_tab[(i)].u.next_free |= 2)
#define GET_ANY_SLOT_TAB(i) ((DbTable*)(meta_main_tab[(i)].u.next_free & ~(1|2))) /* dead or alive */
-static ERTS_INLINE void meta_main_tab_lock(unsigned slot)
+static ERTS_INLINE erts_smp_rwmtx_t *
+get_meta_main_tab_lock(unsigned slot)
{
#ifdef ERTS_SMP
- erts_smp_spin_lock(&meta_main_tab_locks[slot % META_MAIN_TAB_LOCK_CNT].lck);
-#endif
-}
-
-static ERTS_INLINE void meta_main_tab_unlock(unsigned slot)
-{
-#ifdef ERTS_SMP
- erts_smp_spin_unlock(&meta_main_tab_locks[slot % META_MAIN_TAB_LOCK_CNT].lck);
+ return &meta_main_tab_locks[slot & ERTS_META_MAIN_TAB_LOCK_TAB_MASK].rwmtx;
+#else
+ return NULL;
#endif
}
@@ -166,7 +170,8 @@ struct meta_name_tab_entry* meta_name_tab_bucket(Eterm name,
typedef enum {
LCK_READ=1, /* read only access */
LCK_WRITE=2, /* exclusive table write access */
- LCK_WRITE_REC=3 /* record write access */
+ LCK_WRITE_REC=3, /* record write access */
+ LCK_NONE=4
} db_lock_kind_t;
extern DbTableMethod db_hash;
@@ -214,17 +219,17 @@ Export ets_select_continue_exp;
*/
static Export ets_delete_continue_exp;
-static ERTS_INLINE DbTable* db_ref(DbTable* tb)
+static ERTS_INLINE DbTable* db_ref(DbTable* tb, db_lock_kind_t kind)
{
- if (tb != NULL) {
+ if (tb != NULL && kind != LCK_READ) {
erts_refc_inc(&tb->common.ref, 2);
}
return tb;
}
-static ERTS_INLINE DbTable* db_unref(DbTable* tb)
+static ERTS_INLINE DbTable* db_unref(DbTable* tb, db_lock_kind_t kind)
{
- if (!erts_refc_dectest(&tb->common.ref, 0)) {
+ if (kind != LCK_READ && !erts_refc_dectest(&tb->common.ref, 0)) {
#ifdef HARDDEBUG
if (erts_smp_atomic_read(&tb->common.memory_size) != sizeof(DbTable)) {
erts_fprintf(stderr, "ets: db_unref memory remain=%ld fix=%x\n",
@@ -256,12 +261,19 @@ static ERTS_INLINE DbTable* db_unref(DbTable* tb)
return tb;
}
-static ERTS_INLINE void db_init_lock(DbTable* tb, char *rwname, char* fixname)
+static ERTS_INLINE void db_init_lock(DbTable* tb, int use_frequent_read_lock,
+ char *rwname, char* fixname)
{
+#ifdef ERTS_SMP
+ erts_smp_rwmtx_opt_t rwmtx_opt = ERTS_SMP_THR_OPTS_DEFAULT_INITER;
+ if (use_frequent_read_lock)
+ rwmtx_opt.type = ERTS_SMP_RWMTX_TYPE_FREQUENT_READ;
+#endif
erts_refc_init(&tb->common.ref, 1);
erts_refc_init(&tb->common.fixref, 0);
#ifdef ERTS_SMP
- erts_smp_rwmtx_init_x(&tb->common.rwlock, rwname, tb->common.the_name);
+ erts_smp_rwmtx_init_opt_x(&tb->common.rwlock, &rwmtx_opt,
+ rwname, tb->common.the_name);
erts_smp_mtx_init_x(&tb->common.fixlock, fixname, tb->common.the_name);
tb->common.is_thread_safe = !(tb->common.status & DB_FINE_LOCKED);
#endif
@@ -297,7 +309,7 @@ static ERTS_INLINE void db_lock_take_over_ref(DbTable* tb, db_lock_kind_t kind)
static ERTS_INLINE void db_lock(DbTable* tb, db_lock_kind_t kind)
{
- (void) db_ref(tb);
+ (void) db_ref(tb, kind);
#ifdef ERTS_SMP
db_lock_take_over_ref(tb, kind);
#endif
@@ -331,7 +343,7 @@ static ERTS_INLINE void db_unlock(DbTable* tb, db_lock_kind_t kind)
}
}
#endif
- (void) db_unref(tb); /* May delete table... */
+ (void) db_unref(tb, kind); /* May delete table... */
}
@@ -349,32 +361,49 @@ static ERTS_INLINE void db_meta_unlock(DbTable* tb, db_lock_kind_t kind)
}
static ERTS_INLINE
-DbTable* db_get_table(Process *p,
- Eterm id,
- int what,
- db_lock_kind_t kind)
+DbTable* db_get_table_aux(Process *p,
+ Eterm id,
+ int what,
+ db_lock_kind_t kind,
+ int meta_already_locked)
{
DbTable *tb = NULL;
+ erts_smp_rwmtx_t *mtl = NULL;
if (is_small(id)) {
Uint slot = unsigned_val(id) & meta_main_tab_slot_mask;
- meta_main_tab_lock(slot);
+ if (!meta_already_locked) {
+ mtl = get_meta_main_tab_lock(slot);
+ erts_smp_rwmtx_rlock(mtl);
+ }
+#if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_CHECK)
+ else {
+ erts_smp_rwmtx_t *test_mtl = get_meta_main_tab_lock(slot);
+ ERTS_SMP_LC_ASSERT(erts_lc_rwmtx_is_rlocked(test_mtl)
+ || erts_lc_rwmtx_is_rwlocked(test_mtl));
+ }
+#endif
if (slot < db_max_tabs && IS_SLOT_ALIVE(slot)) {
/* SMP: inc to prevent race, between unlock of meta_main_tab_lock
* and the table locking outside the meta_main_tab_lock
*/
- tb = db_ref(meta_main_tab[slot].u.tb);
+ tb = db_ref(meta_main_tab[slot].u.tb, kind);
}
- meta_main_tab_unlock(slot);
}
else if (is_atom(id)) {
- erts_smp_rwmtx_t* rwlock;
- struct meta_name_tab_entry* bucket = meta_name_tab_bucket(id,&rwlock);
- erts_smp_rwmtx_rlock(rwlock);
+ struct meta_name_tab_entry* bucket = meta_name_tab_bucket(id,&mtl);
+ if (!meta_already_locked)
+ erts_smp_rwmtx_rlock(mtl);
+ else{
+ ERTS_SMP_LC_ASSERT(erts_lc_rwmtx_is_rlocked(mtl)
+ || erts_lc_rwmtx_is_rwlocked(mtl));
+ mtl = NULL;
+ }
+
if (bucket->pu.tb != NULL) {
if (is_atom(bucket->u.name_atom)) { /* single */
if (bucket->u.name_atom == id) {
- tb = db_ref(bucket->pu.tb);
+ tb = db_ref(bucket->pu.tb, kind);
}
}
else { /* multi */
@@ -382,23 +411,33 @@ DbTable* db_get_table(Process *p,
Uint i;
for (i=0; i<cnt; i++) {
if (bucket->pu.mvec[i].u.name_atom == id) {
- tb = db_ref(bucket->pu.mvec[i].pu.tb);
+ tb = db_ref(bucket->pu.mvec[i].pu.tb, kind);
break;
}
}
}
}
- erts_smp_rwmtx_runlock(rwlock);
}
if (tb) {
db_lock_take_over_ref(tb, kind);
- if (tb->common.id == id && ((tb->common.status & what) != 0 ||
- p->id == tb->common.owner)) {
- return tb;
+ if (tb->common.id != id
+ || ((tb->common.status & what) == 0 && p->id != tb->common.owner)) {
+ db_unlock(tb, kind);
+ tb = NULL;
}
- db_unlock(tb, kind);
}
- return NULL;
+ if (mtl)
+ erts_smp_rwmtx_runlock(mtl);
+ return tb;
+}
+
+static ERTS_INLINE
+DbTable* db_get_table(Process *p,
+ Eterm id,
+ int what,
+ db_lock_kind_t kind)
+{
+ return db_get_table_aux(p, id, what, kind, 0);
}
/* Requires meta_main_tab_locks[slot] locked.
@@ -413,15 +452,15 @@ static ERTS_INLINE void free_slot(int slot)
erts_smp_spin_unlock(&meta_main_tab_main_lock);
}
-static int insert_named_tab(Eterm name_atom, DbTable* tb)
+static int insert_named_tab(Eterm name_atom, DbTable* tb, int have_lock)
{
int ret = 0;
erts_smp_rwmtx_t* rwlock;
struct meta_name_tab_entry* new_entry;
struct meta_name_tab_entry* bucket = meta_name_tab_bucket(name_atom,
&rwlock);
-
- erts_smp_rwmtx_rwlock(rwlock);
+ if (!have_lock)
+ erts_smp_rwmtx_rwlock(rwlock);
if (bucket->pu.tb == NULL) { /* empty */
new_entry = bucket;
@@ -468,17 +507,32 @@ static int insert_named_tab(Eterm name_atom, DbTable* tb)
ret = 1; /* Ok */
done:
- erts_smp_rwmtx_rwunlock(rwlock);
+ if (!have_lock)
+ erts_smp_rwmtx_rwunlock(rwlock);
return ret;
}
-static int remove_named_tab(Eterm name_atom)
+static int remove_named_tab(DbTable *tb, int have_lock)
{
int ret = 0;
erts_smp_rwmtx_t* rwlock;
+ Eterm name_atom = tb->common.id;
struct meta_name_tab_entry* bucket = meta_name_tab_bucket(name_atom,
&rwlock);
- erts_smp_rwmtx_rwlock(rwlock);
+#ifdef ERTS_SMP
+ if (!have_lock && erts_smp_rwmtx_tryrwlock(rwlock) == EBUSY) {
+ /*
+ * We keep our increased refc over this op in order to
+ * prevent the table from disapearing.
+ */
+ erts_smp_rwmtx_rwunlock(&tb->common.rwlock);
+ erts_smp_rwmtx_rwlock(rwlock);
+ erts_smp_rwmtx_rwlock(&tb->common.rwlock);
+ }
+#endif
+
+ ERTS_SMP_LC_ASSERT(erts_lc_rwmtx_is_rwlocked(rwlock));
+
if (bucket->pu.tb == NULL) {
goto done;
}
@@ -529,7 +583,8 @@ static int remove_named_tab(Eterm name_atom)
ret = 1; /* Ok */
done:
- erts_smp_rwmtx_rwunlock(rwlock);
+ if (!have_lock)
+ erts_smp_rwmtx_rwunlock(rwlock);
return ret;
}
@@ -1133,6 +1188,7 @@ BIF_RETTYPE ets_rename_2(BIF_ALIST_2)
{
DbTable* tb;
Eterm ret;
+ erts_smp_rwmtx_t *lck1, *lck2;
#ifdef HARDDEBUG
erts_fprintf(stderr,
@@ -1141,34 +1197,65 @@ BIF_RETTYPE ets_rename_2(BIF_ALIST_2)
BIF_P->initial[0], BIF_P->initial[1], BIF_P->initial[2]);
#endif
- if ((tb = db_get_table(BIF_P, BIF_ARG_1, DB_WRITE, LCK_WRITE)) == NULL) {
+
+ if (is_not_atom(BIF_ARG_2)) {
BIF_ERROR(BIF_P, BADARG);
}
- if (is_not_atom(BIF_ARG_2)) {
- goto badarg;
+ (void) meta_name_tab_bucket(BIF_ARG_2, &lck1);
+
+ if (is_small(BIF_ARG_1)) {
+ Uint slot = unsigned_val(BIF_ARG_1) & meta_main_tab_slot_mask;
+ lck2 = get_meta_main_tab_lock(slot);
+ }
+ else if (is_atom(BIF_ARG_1)) {
+ (void) meta_name_tab_bucket(BIF_ARG_1, &lck2);
+ if (lck1 == lck2)
+ lck2 = NULL;
+ else if (lck1 > lck2) {
+ erts_smp_rwmtx_t *tmp = lck1;
+ lck1 = lck2;
+ lck2 = tmp;
+ }
+ }
+ else {
+ BIF_ERROR(BIF_P, BADARG);
}
+ erts_smp_rwmtx_rwlock(lck1);
+ if (lck2)
+ erts_smp_rwmtx_rwlock(lck2);
+
+ tb = db_get_table_aux(BIF_P, BIF_ARG_1, DB_WRITE, LCK_WRITE, 1);
+ if (!tb)
+ goto badarg;
+
if (is_not_atom(tb->common.id)) { /* Not a named table */
tb->common.the_name = BIF_ARG_2;
goto done;
}
- if (!insert_named_tab(BIF_ARG_2,tb)) {
+ if (!insert_named_tab(BIF_ARG_2, tb, 1))
goto badarg;
- }
- if (!remove_named_tab(tb->common.id)) {
+
+ if (!remove_named_tab(tb, 1))
erl_exit(1,"Could not find named tab %s", tb->common.id);
- }
tb->common.id = tb->common.the_name = BIF_ARG_2;
done:
ret = tb->common.id;
db_unlock(tb, LCK_WRITE);
+ erts_smp_rwmtx_rwunlock(lck1);
+ if (lck2)
+ erts_smp_rwmtx_rwunlock(lck2);
BIF_RET(ret);
badarg:
- db_unlock(tb, LCK_WRITE);
+ if (tb)
+ db_unlock(tb, LCK_WRITE);
+ erts_smp_rwmtx_rwunlock(lck1);
+ if (lck2)
+ erts_smp_rwmtx_rwunlock(lck2);
BIF_ERROR(BIF_P, BADARG);
}
@@ -1189,10 +1276,11 @@ BIF_RETTYPE ets_new_2(BIF_ALIST_2)
UWord heir_data;
Uint32 status;
Sint keypos;
- int is_named, is_fine_locked;
+ int is_named, is_fine_locked, frequent_read;
int cret;
DeclareTmpHeap(meta_tuple,3,BIF_P);
DbTableMethod* meth;
+ erts_smp_rwmtx_t *mmtl;
if (is_not_atom(BIF_ARG_1)) {
BIF_ERROR(BIF_P, BADARG);
@@ -1205,6 +1293,7 @@ BIF_RETTYPE ets_new_2(BIF_ALIST_2)
keypos = 1;
is_named = 0;
is_fine_locked = 0;
+ frequent_read = 0;
heir = am_none;
heir_data = (UWord) am_undefined;
@@ -1238,6 +1327,13 @@ BIF_RETTYPE ets_new_2(BIF_ALIST_2)
is_fine_locked = 0;
} else break;
}
+ else if (tp[1] == am_read_concurrency) {
+ if (tp[2] == am_true) {
+ frequent_read = 1;
+ } else if (tp[2] == am_false) {
+ frequent_read = 0;
+ } else break;
+ }
else if (tp[1] == am_heir && tp[2] == am_none) {
heir = am_none;
heir_data = am_undefined;
@@ -1286,6 +1382,11 @@ BIF_RETTYPE ets_new_2(BIF_ALIST_2)
BIF_ERROR(BIF_P, BADARG);
}
+#ifdef ERTS_SMP
+ if (frequent_read && !(status & DB_PRIVATE))
+ status |= DB_FREQ_READ;
+#endif
+
/* we create table outside any table lock
* and take the unusal cost of destroy table if it
* fails to find a slot
@@ -1308,7 +1409,8 @@ BIF_RETTYPE ets_new_2(BIF_ALIST_2)
tb->common.type = status & ERTS_ETS_TABLE_TYPES;
/* Note, 'type' is *read only* from now on... */
#endif
- db_init_lock(tb, "db_tab", "db_tab_fix");
+ db_init_lock(tb, status & (DB_FINE_LOCKED|DB_FREQ_READ),
+ "db_tab", "db_tab_fix");
tb->common.keypos = keypos;
tb->common.owner = BIF_P->id;
set_heir(BIF_P, tb, heir, heir_data);
@@ -1351,15 +1453,17 @@ BIF_RETTYPE ets_new_2(BIF_ALIST_2)
tb->common.id = ret;
tb->common.slot = slot; /* store slot for erase */
- meta_main_tab_lock(slot);
+ mmtl = get_meta_main_tab_lock(slot);
+ erts_smp_rwmtx_rwlock(mmtl);
meta_main_tab[slot].u.tb = tb;
ASSERT(IS_SLOT_ALIVE(slot));
- meta_main_tab_unlock(slot);
+ erts_smp_rwmtx_rwunlock(mmtl);
- if (is_named && !insert_named_tab(BIF_ARG_1, tb)) {
- meta_main_tab_lock(slot);
+ if (is_named && !insert_named_tab(BIF_ARG_1, tb, 0)) {
+ mmtl = get_meta_main_tab_lock(slot);
+ erts_smp_rwmtx_rwlock(mmtl);
free_slot(slot);
- meta_main_tab_unlock(slot);
+ erts_smp_rwmtx_rwunlock(mmtl);
db_lock_take_over_ref(tb,LCK_WRITE);
free_heir_data(tb);
@@ -1499,6 +1603,7 @@ BIF_RETTYPE ets_delete_1(BIF_ALIST_1)
{
int trap;
DbTable* tb;
+ erts_smp_rwmtx_t *mmtl;
#ifdef HARDDEBUG
erts_fprintf(stderr,
@@ -1520,13 +1625,23 @@ BIF_RETTYPE ets_delete_1(BIF_ALIST_1)
tb->common.status &= ~(DB_PROTECTED|DB_PUBLIC|DB_PRIVATE);
tb->common.status |= DB_DELETE;
- meta_main_tab_lock(tb->common.slot);
+ mmtl = get_meta_main_tab_lock(tb->common.slot);
+#ifdef ERTS_SMP
+ if (erts_smp_rwmtx_tryrwlock(mmtl) == EBUSY) {
+ /*
+ * We keep our increased refc over this op in order to
+ * prevent the table from disapearing.
+ */
+ erts_smp_rwmtx_rwunlock(&tb->common.rwlock);
+ erts_smp_rwmtx_rwlock(mmtl);
+ erts_smp_rwmtx_rwlock(&tb->common.rwlock);
+ }
+#endif
/* We must keep the slot, to be found by db_proc_dead() if process dies */
MARK_SLOT_DEAD(tb->common.slot);
- meta_main_tab_unlock(tb->common.slot);
- if (is_atom(tb->common.id)) {
- remove_named_tab(tb->common.id);
- }
+ erts_smp_rwmtx_rwunlock(mmtl);
+ if (is_atom(tb->common.id))
+ remove_named_tab(tb, 0);
if (tb->common.owner != BIF_P->id) {
DeclareTmpHeap(meta_tuple,3,BIF_P);
@@ -1919,14 +2034,15 @@ BIF_RETTYPE ets_all_0(BIF_ALIST_0)
previous = NIL;
j = 0;
for(i = 0; (i < t_max_tabs && j < t_tabs_cnt); i++) {
- meta_main_tab_lock(i);
+ erts_smp_rwmtx_t *mmtl = get_meta_main_tab_lock(i);
+ erts_smp_rwmtx_rlock(mmtl);
if (IS_SLOT_ALIVE(i)) {
j++;
tb = meta_main_tab[i].u.tb;
previous = CONS(hp, tb->common.id, previous);
hp += 2;
}
- meta_main_tab_unlock(i);
+ erts_smp_rwmtx_runlock(mmtl);
}
HRelease(BIF_P, hendp, hp);
BIF_RET(previous);
@@ -2630,12 +2746,30 @@ void init_db(void)
size_t size;
#ifdef ERTS_SMP
- for (i=0; i<META_MAIN_TAB_LOCK_CNT; i++) {
- erts_smp_spinlock_init_x(&meta_main_tab_locks[i].lck, "meta_main_tab_slot", make_small(i));
+ erts_smp_rwmtx_opt_t rwmtx_opt = ERTS_SMP_THR_OPTS_DEFAULT_INITER;
+ rwmtx_opt.type = ERTS_SMP_RWMTX_TYPE_FREQUENT_READ;
+ rwmtx_opt.lived = ERTS_SMP_RWMTX_LONG_LIVED;
+
+ meta_main_tab_locks = erts_alloc(ERTS_ALC_T_DB_TABLES,
+ (sizeof(erts_meta_main_tab_lock_t)
+ * (ERTS_META_MAIN_TAB_LOCK_TAB_SIZE+1)));
+
+ if ((((Uint) meta_main_tab_locks) & ERTS_CACHE_LINE_MASK) != 0)
+ meta_main_tab_locks = ((erts_meta_main_tab_lock_t *)
+ ((((Uint) meta_main_tab_locks)
+ & ~ERTS_CACHE_LINE_MASK)
+ + ERTS_CACHE_LINE_SIZE));
+
+ ASSERT((((Uint) meta_main_tab_locks) & ERTS_CACHE_LINE_MASK) == 0);
+
+ for (i = 0; i < ERTS_META_MAIN_TAB_LOCK_TAB_SIZE; i++) {
+ erts_smp_rwmtx_init_opt_x(&meta_main_tab_locks[i].rwmtx, &rwmtx_opt,
+ "meta_main_tab_slot", make_small(i));
}
erts_smp_spinlock_init(&meta_main_tab_main_lock, "meta_main_tab_main");
for (i=0; i<META_NAME_TAB_LOCK_CNT; i++) {
- erts_smp_rwmtx_init_x(&meta_name_tab_rwlocks[i].lck, "meta_name_tab", make_small(i));
+ erts_smp_rwmtx_init_opt_x(&meta_name_tab_rwlocks[i].lck, &rwmtx_opt,
+ "meta_name_tab", make_small(i));
}
#endif
@@ -2895,12 +3029,12 @@ retry:
to_pid, to_locks,
ERTS_P2P_FLG_TRY_LOCK);
if (to_proc == ERTS_PROC_LOCK_BUSY) {
- db_ref(tb); /* while unlocked */
+ db_ref(tb, LCK_NONE); /* while unlocked */
db_unlock(tb,LCK_WRITE);
to_proc = erts_pid2proc(p, ERTS_PROC_LOCK_MAIN,
to_pid, to_locks);
db_lock(tb,LCK_WRITE);
- tb = db_unref(tb);
+ tb = db_unref(tb, LCK_NONE);
ASSERT(tb != NULL);
if (tb->common.owner != p->id) {
@@ -3008,12 +3142,13 @@ erts_db_process_exiting(Process *c_p, ErtsProcLocks c_p_locks)
while (state->slots.ix < state->slots.size) {
DbTable *tb = NULL;
Sint ix = unsigned_val(state->slots.arr[state->slots.ix]);
- meta_main_tab_lock(ix);
+ erts_smp_rwmtx_t *mmtl = get_meta_main_tab_lock(ix);
+ erts_smp_rwmtx_rlock(mmtl);
if (!IS_SLOT_FREE(ix)) {
- tb = db_ref(GET_ANY_SLOT_TAB(ix));
+ tb = db_ref(GET_ANY_SLOT_TAB(ix), LCK_WRITE);
ASSERT(tb);
}
- meta_main_tab_unlock(ix);
+ erts_smp_rwmtx_runlock(mmtl);
if (tb) {
int do_yield;
db_lock_take_over_ref(tb, LCK_WRITE);
@@ -3045,7 +3180,7 @@ erts_db_process_exiting(Process *c_p, ErtsProcLocks c_p_locks)
tb->common.status |= DB_DELETE;
if (is_atom(tb->common.id))
- remove_named_tab(tb->common.id);
+ remove_named_tab(tb, 0);
free_heir_data(tb);
free_fixations_locked(tb);
@@ -3095,12 +3230,13 @@ erts_db_process_exiting(Process *c_p, ErtsProcLocks c_p_locks)
while (state->slots.ix < state->slots.size) {
DbTable *tb = NULL;
Sint ix = unsigned_val(state->slots.arr[state->slots.ix]);
- meta_main_tab_lock(ix);
+ erts_smp_rwmtx_t *mmtl = get_meta_main_tab_lock(ix);
+ erts_smp_rwmtx_rlock(mmtl);
if (IS_SLOT_ALIVE(ix)) {
- tb = db_ref(meta_main_tab[ix].u.tb);
+ tb = db_ref(meta_main_tab[ix].u.tb, LCK_WRITE_REC);
ASSERT(tb);
}
- meta_main_tab_unlock(ix);
+ erts_smp_rwmtx_runlock(mmtl);
if (tb) {
int reds;
DbFixation** pp;
@@ -3274,6 +3410,7 @@ unlocked:
#ifdef ERTS_SMP
if (*kind_p == LCK_READ && tb->common.is_thread_safe) {
/* Must have write lock while purging pseudo-deleted (OTP-8166) */
+ db_ref(tb, LCK_WRITE); /* LCK_WRITE need it, but not LCK_READ */
erts_smp_rwmtx_runlock(&tb->common.rwlock);
erts_smp_rwmtx_rwlock(&tb->common.rwlock);
*kind_p = LCK_WRITE;
@@ -3386,6 +3523,7 @@ static int free_table_cont(Process *p,
int clean_meta_tab)
{
Eterm result;
+ erts_smp_rwmtx_t *mmtl;
#ifdef HARDDEBUG
if (!first) {
@@ -3411,9 +3549,20 @@ static int free_table_cont(Process *p,
tb->common.id);
#endif
/* Completely done - we will not get called again. */
- meta_main_tab_lock(tb->common.slot);
+ mmtl = get_meta_main_tab_lock(tb->common.slot);
+#ifdef ERTS_SMP
+ if (erts_smp_rwmtx_tryrwlock(mmtl) == EBUSY) {
+ /*
+ * We keep our increased refc over this op in order to
+ * prevent the table from disapearing.
+ */
+ erts_smp_rwmtx_rwunlock(&tb->common.rwlock);
+ erts_smp_rwmtx_rwlock(mmtl);
+ erts_smp_rwmtx_rwlock(&tb->common.rwlock);
+ }
+#endif
free_slot(tb->common.slot);
- meta_main_tab_unlock(tb->common.slot);
+ erts_smp_rwmtx_rwunlock(mmtl);
if (clean_meta_tab) {
db_meta_lock(meta_pid_to_tab, LCK_WRITE_REC);
@@ -3421,7 +3570,7 @@ static int free_table_cont(Process *p,
make_small(tb->common.slot));
db_meta_unlock(meta_pid_to_tab, LCK_WRITE_REC);
}
- db_unref(tb);
+ db_unref(tb, LCK_NONE);
BUMP_REDS(p, 100);
return 0;
}
diff --git a/erts/emulator/beam/erl_db_hash.c b/erts/emulator/beam/erl_db_hash.c
index 124129a371..08117bb6e5 100644
--- a/erts/emulator/beam/erl_db_hash.c
+++ b/erts/emulator/beam/erl_db_hash.c
@@ -623,12 +623,16 @@ int db_create_hash(Process *p, DbTable *tbl)
erts_smp_atomic_init(&tb->is_resizing, 0);
#ifdef ERTS_SMP
if (tb->common.type & DB_FINE_LOCKED) {
+ erts_smp_rwmtx_opt_t rwmtx_opt = ERTS_SMP_THR_OPTS_DEFAULT_INITER;
int i;
+ if (tb->common.type & DB_FREQ_READ)
+ rwmtx_opt.type = ERTS_SMP_RWMTX_TYPE_FREQUENT_READ;
tb->locks = (DbTableHashFineLocks*) erts_db_alloc_fnf(ERTS_ALC_T_DB_SEG, /* Other type maybe? */
(DbTable *) tb,
sizeof(DbTableHashFineLocks));
for (i=0; i<DB_HASH_LOCK_CNT; ++i) {
- erts_rwmtx_init_x(&tb->locks->lck_vec[i].lck, "db_hash_slot", make_small(i));
+ erts_smp_rwmtx_init_opt_x(&tb->locks->lck_vec[i].lck, &rwmtx_opt,
+ "db_hash_slot", make_small(i));
}
/* This important property is needed to guarantee that the buckets
* involved in a grow/shrink operation it protected by the same lock:
@@ -2751,11 +2755,7 @@ static void db_finalize_dbterm_hash(DbUpdateHandle* handle)
newDbTerm = &newp->dbterm;
newDbTerm->size = handle->new_size;
- newDbTerm->off_heap.mso = NULL;
- newDbTerm->off_heap.externals = NULL;
- #ifndef HYBRID /* FIND ME! */
- newDbTerm->off_heap.funs = NULL;
- #endif
+ newDbTerm->off_heap.first = NULL;
newDbTerm->off_heap.overhead = 0;
/* make a flat copy */
diff --git a/erts/emulator/beam/erl_db_tree.c b/erts/emulator/beam/erl_db_tree.c
index b6b3cabafe..da2696163a 100644
--- a/erts/emulator/beam/erl_db_tree.c
+++ b/erts/emulator/beam/erl_db_tree.c
@@ -1081,11 +1081,12 @@ static int db_select_continue_tree(Process *p,
static int db_select_tree(Process *p, DbTable *tbl,
Eterm pattern, int reverse, Eterm *ret)
{
+ /* Strategy: Traverse backwards to build resulting list from tail to head */
DbTableTree *tb = &tbl->tree;
DbTreeStack* stack;
struct select_context sc;
struct mp_info mpi;
- Eterm lastkey = NIL;
+ Eterm lastkey = THE_NON_VALUE;
Eterm key;
Eterm continuation;
unsigned sz;
@@ -1293,7 +1294,7 @@ static int db_select_count_tree(Process *p, DbTable *tbl,
DbTreeStack* stack;
struct select_count_context sc;
struct mp_info mpi;
- Eterm lastkey = NIL;
+ Eterm lastkey = THE_NON_VALUE;
Eterm key;
Eterm continuation;
unsigned sz;
@@ -1395,7 +1396,7 @@ static int db_select_chunk_tree(Process *p, DbTable *tbl,
DbTreeStack* stack;
struct select_context sc;
struct mp_info mpi;
- Eterm lastkey = NIL;
+ Eterm lastkey = THE_NON_VALUE;
Eterm key;
Eterm continuation;
unsigned sz;
@@ -1636,7 +1637,7 @@ static int db_select_delete_tree(Process *p, DbTable *tbl,
DbTableTree *tb = &tbl->tree;
struct select_delete_context sc;
struct mp_info mpi;
- Eterm lastkey = NIL;
+ Eterm lastkey = THE_NON_VALUE;
Eterm key;
Eterm continuation;
unsigned sz;
@@ -2588,11 +2589,7 @@ static void db_finalize_dbterm_tree(DbUpdateHandle* handle)
newDbTerm = &newp->dbterm;
newDbTerm->size = handle->new_size;
- newDbTerm->off_heap.mso = NULL;
- newDbTerm->off_heap.externals = NULL;
- #ifndef HYBRID /* FIND ME! */
- newDbTerm->off_heap.funs = NULL;
- #endif
+ newDbTerm->off_heap.first = NULL;
newDbTerm->off_heap.overhead = 0;
/* make a flat copy */
@@ -2628,7 +2625,7 @@ static void traverse_backwards(DbTableTree *tb,
{
TreeDbTerm *this, *next;
- if (lastkey == NIL) {
+ if (lastkey == THE_NON_VALUE) {
stack->pos = stack->slot = 0;
if (( this = tb->root ) == NULL) {
return;
@@ -2666,7 +2663,7 @@ static void traverse_forward(DbTableTree *tb,
{
TreeDbTerm *this, *next;
- if (lastkey == NIL) {
+ if (lastkey == THE_NON_VALUE) {
stack->pos = stack->slot = 0;
if (( this = tb->root ) == NULL) {
return;
diff --git a/erts/emulator/beam/erl_db_util.c b/erts/emulator/beam/erl_db_util.c
index 93a47eb76f..48e0080525 100644
--- a/erts/emulator/beam/erl_db_util.c
+++ b/erts/emulator/beam/erl_db_util.c
@@ -363,12 +363,7 @@ static ErtsMatchPseudoProcess *match_pseudo_process;
static ERTS_INLINE void
cleanup_match_pseudo_process(ErtsMatchPseudoProcess *mpsp, int keep_heap)
{
- if (mpsp->process.mbuf
- || mpsp->process.off_heap.mso
-#ifndef HYBRID /* FIND ME! */
- || mpsp->process.off_heap.funs
-#endif
- || mpsp->process.off_heap.externals) {
+ if (mpsp->process.mbuf || mpsp->process.off_heap.first) {
erts_cleanup_empty_process(&mpsp->process);
}
#ifdef DEBUG
@@ -2495,11 +2490,7 @@ void* db_get_term(DbTableCommon *tb, DbTerm* old, Uint offset, Eterm obj)
p = (DbTerm*) ((void *)(((char *) structp) + offset));
}
p->size = size;
- p->off_heap.mso = NULL;
- p->off_heap.externals = NULL;
-#ifndef HYBRID /* FIND ME! */
- p->off_heap.funs = NULL;
-#endif
+ p->off_heap.first = NULL;
p->off_heap.overhead = 0;
top = DBTERM_BUF(p);
diff --git a/erts/emulator/beam/erl_db_util.h b/erts/emulator/beam/erl_db_util.h
index 382e5dceb5..672c5f2cd1 100644
--- a/erts/emulator/beam/erl_db_util.h
+++ b/erts/emulator/beam/erl_db_util.h
@@ -240,8 +240,9 @@ typedef struct db_table_common {
#define DB_DUPLICATE_BAG (1 << 8)
#define DB_ORDERED_SET (1 << 9)
#define DB_DELETE (1 << 10) /* table is being deleted */
+#define DB_FREQ_READ (1 << 11)
-#define ERTS_ETS_TABLE_TYPES (DB_BAG|DB_SET|DB_DUPLICATE_BAG|DB_ORDERED_SET|DB_FINE_LOCKED)
+#define ERTS_ETS_TABLE_TYPES (DB_BAG|DB_SET|DB_DUPLICATE_BAG|DB_ORDERED_SET|DB_FINE_LOCKED|DB_FREQ_READ)
#define IS_HASH_TABLE(Status) (!!((Status) & \
(DB_BAG | DB_SET | DB_DUPLICATE_BAG)))
diff --git a/erts/emulator/beam/erl_drv_thread.c b/erts/emulator/beam/erl_drv_thread.c
index aa37edafd1..d42820ddf3 100644
--- a/erts/emulator/beam/erl_drv_thread.c
+++ b/erts/emulator/beam/erl_drv_thread.c
@@ -186,10 +186,9 @@ int
erl_drv_mutex_trylock(ErlDrvMutex *dmtx)
{
#ifdef USE_THREADS
- int res = dmtx ? ethr_mutex_trylock(&dmtx->mtx) : EINVAL;
- if (res != 0 && res != EBUSY)
- fatal_error(res, "erl_drv_mutex_trylock()");
- return res;
+ if (!dmtx)
+ fatal_error(EINVAL, "erl_drv_mutex_trylock()");
+ return ethr_mutex_trylock(&dmtx->mtx);
#else
return 0;
#endif
@@ -199,9 +198,9 @@ void
erl_drv_mutex_lock(ErlDrvMutex *dmtx)
{
#ifdef USE_THREADS
- int res = dmtx ? ethr_mutex_lock(&dmtx->mtx) : EINVAL;
- if (res != 0)
- fatal_error(res, "erl_drv_mutex_lock()");
+ if (!dmtx)
+ fatal_error(EINVAL, "erl_drv_mutex_lock()");
+ ethr_mutex_lock(&dmtx->mtx);
#endif
}
@@ -209,9 +208,9 @@ void
erl_drv_mutex_unlock(ErlDrvMutex *dmtx)
{
#ifdef USE_THREADS
- int res = dmtx ? ethr_mutex_unlock(&dmtx->mtx) : EINVAL;
- if (res != 0)
- fatal_error(res, "erl_drv_mutex_unlock()");
+ if (!dmtx)
+ fatal_error(EINVAL, "erl_drv_mutex_unlock()");
+ ethr_mutex_unlock(&dmtx->mtx);
#endif
}
@@ -256,9 +255,9 @@ void
erl_drv_cond_signal(ErlDrvCond *dcnd)
{
#ifdef USE_THREADS
- int res = dcnd ? ethr_cond_signal(&dcnd->cnd) : EINVAL;
- if (res != 0)
- fatal_error(res, "erl_drv_cond_signal()");
+ if (!dcnd)
+ fatal_error(EINVAL, "erl_drv_cond_signal()");
+ ethr_cond_signal(&dcnd->cnd);
#endif
}
@@ -266,9 +265,9 @@ void
erl_drv_cond_broadcast(ErlDrvCond *dcnd)
{
#ifdef USE_THREADS
- int res = dcnd ? ethr_cond_broadcast(&dcnd->cnd) : EINVAL;
- if (res != 0)
- fatal_error(res, "erl_drv_cond_broadcast()");
+ if (!dcnd)
+ fatal_error(EINVAL, "erl_drv_cond_broadcast()");
+ ethr_cond_broadcast(&dcnd->cnd);
#endif
}
@@ -277,18 +276,13 @@ void
erl_drv_cond_wait(ErlDrvCond *dcnd, ErlDrvMutex *dmtx)
{
#ifdef USE_THREADS
- int res;
if (!dcnd || !dmtx) {
- res = EINVAL;
- error:
- fatal_error(res, "erl_drv_cond_wait()");
+ fatal_error(EINVAL, "erl_drv_cond_wait()");
}
while (1) {
- res = ethr_cond_wait(&dcnd->cnd, &dmtx->mtx);
+ int res = ethr_cond_wait(&dcnd->cnd, &dmtx->mtx);
if (res == 0)
break;
- if (res != EINTR)
- goto error;
}
#endif
}
@@ -333,10 +327,9 @@ int
erl_drv_rwlock_tryrlock(ErlDrvRWLock *drwlck)
{
#ifdef USE_THREADS
- int res = drwlck ? ethr_rwmutex_tryrlock(&drwlck->rwmtx) : EINVAL;
- if (res != 0 && res != EBUSY)
- fatal_error(res, "erl_drv_rwlock_tryrlock()");
- return res;
+ if (!drwlck)
+ fatal_error(EINVAL, "erl_drv_rwlock_tryrlock()");
+ return ethr_rwmutex_tryrlock(&drwlck->rwmtx);
#else
return 0;
#endif
@@ -346,9 +339,9 @@ void
erl_drv_rwlock_rlock(ErlDrvRWLock *drwlck)
{
#ifdef USE_THREADS
- int res = drwlck ? ethr_rwmutex_rlock(&drwlck->rwmtx) : EINVAL;
- if (res != 0)
- fatal_error(res, "erl_drv_rwlock_rlock()");
+ if (!drwlck)
+ fatal_error(EINVAL, "erl_drv_rwlock_rlock()");
+ ethr_rwmutex_rlock(&drwlck->rwmtx);
#endif
}
@@ -356,9 +349,9 @@ void
erl_drv_rwlock_runlock(ErlDrvRWLock *drwlck)
{
#ifdef USE_THREADS
- int res = drwlck ? ethr_rwmutex_runlock(&drwlck->rwmtx) : EINVAL;
- if (res != 0)
- fatal_error(res, "erl_drv_rwlock_runlock()");
+ if (!drwlck)
+ fatal_error(EINVAL, "erl_drv_rwlock_runlock()");
+ ethr_rwmutex_runlock(&drwlck->rwmtx);
#endif
}
@@ -366,10 +359,9 @@ int
erl_drv_rwlock_tryrwlock(ErlDrvRWLock *drwlck)
{
#ifdef USE_THREADS
- int res = drwlck ? ethr_rwmutex_tryrwlock(&drwlck->rwmtx) : EINVAL;
- if (res != 0 && res != EBUSY)
- fatal_error(res, "erl_drv_rwlock_tryrwlock()");
- return res;
+ if (!drwlck)
+ fatal_error(EINVAL, "erl_drv_rwlock_tryrwlock()");
+ return ethr_rwmutex_tryrwlock(&drwlck->rwmtx);
#else
return 0;
#endif
@@ -379,9 +371,9 @@ void
erl_drv_rwlock_rwlock(ErlDrvRWLock *drwlck)
{
#ifdef USE_THREADS
- int res = drwlck ? ethr_rwmutex_rwlock(&drwlck->rwmtx) : EINVAL;
- if (res != 0)
- fatal_error(res, "erl_drv_rwlock_rwlock()");
+ if (!drwlck)
+ fatal_error(EINVAL, "erl_drv_rwlock_rwlock()");
+ ethr_rwmutex_rwlock(&drwlck->rwmtx);
#endif
}
@@ -389,9 +381,9 @@ void
erl_drv_rwlock_rwunlock(ErlDrvRWLock *drwlck)
{
#ifdef USE_THREADS
- int res = drwlck ? ethr_rwmutex_rwunlock(&drwlck->rwmtx) : EINVAL;
- if (res != 0)
- fatal_error(res, "erl_drv_rwlock_rwunlock()");
+ if (!drwlck)
+ fatal_error(EINVAL, "erl_drv_rwlock_rwunlock()");
+ ethr_rwmutex_rwunlock(&drwlck->rwmtx);
#endif
}
diff --git a/erts/emulator/beam/erl_fun.c b/erts/emulator/beam/erl_fun.c
index 15d9538301..5dce5ad262 100644
--- a/erts/emulator/beam/erl_fun.c
+++ b/erts/emulator/beam/erl_fun.c
@@ -37,8 +37,6 @@ static erts_smp_rwmtx_t erts_fun_table_lock;
#define erts_fun_read_unlock() erts_smp_rwmtx_runlock(&erts_fun_table_lock)
#define erts_fun_write_lock() erts_smp_rwmtx_rwlock(&erts_fun_table_lock)
#define erts_fun_write_unlock() erts_smp_rwmtx_rwunlock(&erts_fun_table_lock)
-#define erts_fun_init_lock() erts_smp_rwmtx_init(&erts_fun_table_lock, \
- "fun_tab")
static HashValue fun_hash(ErlFunEntry* obj);
static int fun_cmp(ErlFunEntry* obj1, ErlFunEntry* obj2);
@@ -57,8 +55,12 @@ void
erts_init_fun_table(void)
{
HashFunctions f;
+ erts_smp_rwmtx_opt_t rwmtx_opt = ERTS_SMP_THR_OPTS_DEFAULT_INITER;
+ rwmtx_opt.type = ERTS_SMP_RWMTX_TYPE_FREQUENT_READ;
+ rwmtx_opt.lived = ERTS_SMP_RWMTX_LONG_LIVED;
+
+ erts_smp_rwmtx_init_opt(&erts_fun_table_lock, &rwmtx_opt, "fun_tab");
- erts_fun_init_lock();
f.hash = (H_FUN) fun_hash;
f.cmp = (HCMP_FUN) fun_cmp;
f.alloc = (HALLOC_FUN) fun_alloc;
@@ -192,20 +194,6 @@ erts_erase_fun_entry(ErlFunEntry* fe)
erts_fun_write_unlock();
}
-#ifndef HYBRID /* FIND ME! */
-void
-erts_cleanup_funs(ErlFunThing* funp)
-{
- while (funp) {
- ErlFunEntry* fe = funp->fe;
- if (erts_refc_dectest(&fe->refc, 0) == 0) {
- erts_erase_fun_entry(fe);
- }
- funp = funp->next;
- }
-}
-#endif
-
void
erts_cleanup_funs_on_purge(BeamInstr* start, BeamInstr* end)
{
diff --git a/erts/emulator/beam/erl_fun.h b/erts/emulator/beam/erl_fun.h
index 944d4b3df5..2f165afa06 100644
--- a/erts/emulator/beam/erl_fun.h
+++ b/erts/emulator/beam/erl_fun.h
@@ -53,10 +53,10 @@ typedef struct erl_fun_entry {
typedef struct erl_fun_thing {
Eterm thing_word; /* Subtag FUN_SUBTAG. */
+ ErlFunEntry* fe; /* Pointer to fun entry. */
#ifndef HYBRID /* FIND ME! */
- struct erl_fun_thing* next; /* Next fun in mso list. */
+ struct erl_off_heap_header* next;
#endif
- ErlFunEntry* fe; /* Pointer to fun entry. */
#ifdef HIPE
UWord* native_address; /* Native code for the fun. */
#endif
diff --git a/erts/emulator/beam/erl_gc.c b/erts/emulator/beam/erl_gc.c
index a19e090f1e..adc50675bf 100644
--- a/erts/emulator/beam/erl_gc.c
+++ b/erts/emulator/beam/erl_gc.c
@@ -110,9 +110,7 @@ static Uint adjust_after_fullsweep(Process *p, int size_before,
int need, Eterm *objv, int nobj);
static void shrink_new_heap(Process *p, Uint new_sz, Eterm *objv, int nobj);
static void grow_new_heap(Process *p, Uint new_sz, Eterm* objv, int nobj);
-static void sweep_proc_bins(Process *p, int fullsweep);
-static void sweep_proc_funs(Process *p, int fullsweep);
-static void sweep_proc_externals(Process *p, int fullsweep);
+static void sweep_off_heap(Process *p, int fullsweep);
static void offset_heap(Eterm* hp, Uint sz, Sint offs, char* area, Uint area_size);
static void offset_heap_ptr(Eterm* hp, Uint sz, Sint offs, char* area, Uint area_size);
static void offset_rootset(Process *p, Sint offs, char* area, Uint area_size,
@@ -145,6 +143,16 @@ erts_init_gc(void)
{
int i = 0;
+ ASSERT(offsetof(ProcBin,thing_word) == offsetof(struct erl_off_heap_header,thing_word));
+ ASSERT(offsetof(ProcBin,thing_word) == offsetof(ErlFunThing,thing_word));
+ ASSERT(offsetof(ProcBin,thing_word) == offsetof(ExternalThing,header));
+ ASSERT(offsetof(ProcBin,size) == offsetof(struct erl_off_heap_header,size));
+ ASSERT(offsetof(ProcBin,size) == offsetof(ErlSubBin,size));
+ ASSERT(offsetof(ProcBin,size) == offsetof(ErlHeapBin,size));
+ ASSERT(offsetof(ProcBin,next) == offsetof(struct erl_off_heap_header,next));
+ ASSERT(offsetof(ProcBin,next) == offsetof(ErlFunThing,next));
+ ASSERT(offsetof(ProcBin,next) == offsetof(ExternalThing,next));
+
erts_smp_spinlock_init(&info_lck, "gc_info");
garbage_cols = 0;
reclaimed = 0;
@@ -286,25 +294,14 @@ erts_offset_heap_ptr(Eterm* hp, Uint sz, Sint offs,
offset_heap_ptr(hp, sz, offs, (char *) low, ((char *)high)-((char *)low));
}
+
#define ptr_within(ptr, low, high) ((ptr) < (high) && (ptr) >= (low))
void
erts_offset_off_heap(ErlOffHeap *ohp, Sint offs, Eterm* low, Eterm* high)
{
- if (ohp->mso && ptr_within((Eterm *)ohp->mso, low, high)) {
- Eterm** uptr = (Eterm**) (void *) &ohp->mso;
- *uptr += offs;
- }
-
-#ifndef HYBRID /* FIND ME! */
- if (ohp->funs && ptr_within((Eterm *)ohp->funs, low, high)) {
- Eterm** uptr = (Eterm**) (void *) &ohp->funs;
- *uptr += offs;
- }
-#endif
-
- if (ohp->externals && ptr_within((Eterm *)ohp->externals, low, high)) {
- Eterm** uptr = (Eterm**) (void *) &ohp->externals;
+ if (ohp->first && ptr_within((Eterm *)ohp->first, low, high)) {
+ Eterm** uptr = (Eterm**) (void *) &ohp->first;
*uptr += offs;
}
}
@@ -504,14 +501,8 @@ erts_garbage_collect_hibernate(Process* p)
cleanup_rootset(&rootset);
- if (MSO(p).mso) {
- sweep_proc_bins(p, 1);
- }
- if (MSO(p).funs) {
- sweep_proc_funs(p, 1);
- }
- if (MSO(p).externals) {
- sweep_proc_externals(p, 1);
+ if (MSO(p).first) {
+ sweep_off_heap(p, 1);
}
/*
@@ -1041,15 +1032,8 @@ do_minor(Process *p, int new_sz, Eterm* objv, int nobj)
OLD_HTOP(p) = old_htop;
HIGH_WATER(p) = (HEAP_START(p) != HIGH_WATER(p)) ? n_heap : n_htop;
- if (MSO(p).mso) {
- sweep_proc_bins(p, 0);
- }
-
- if (MSO(p).funs) {
- sweep_proc_funs(p, 0);
- }
- if (MSO(p).externals) {
- sweep_proc_externals(p, 0);
+ if (MSO(p).first) {
+ sweep_off_heap(p, 0);
}
#ifdef HARDDEBUG
@@ -1271,17 +1255,11 @@ major_collection(Process* p, int need, Eterm* objv, int nobj, Uint *recl)
}
}
- if (MSO(p).mso) {
- sweep_proc_bins(p, 1);
- }
- if (MSO(p).funs) {
- sweep_proc_funs(p, 1);
- }
- if (MSO(p).externals) {
- sweep_proc_externals(p, 1);
+ if (MSO(p).first) {
+ sweep_off_heap(p, 1);
}
- if (OLD_HEAP(p) != NULL) {
+ if (OLD_HEAP(p) != NULL) {
ERTS_HEAP_FREE(ERTS_ALC_T_OLD_HEAP,
OLD_HEAP(p),
(OLD_HEND(p) - OLD_HEAP(p)) * sizeof(Eterm));
@@ -1305,6 +1283,7 @@ major_collection(Process* p, int need, Eterm* objv, int nobj, Uint *recl)
HIGH_WATER(p) = HEAP_TOP(p);
ErtsGcQuickSanityCheck(p);
+
/*
* Copy newly received message onto the end of the new heap.
*/
@@ -2044,108 +2023,24 @@ next_vheap_size(Process* p, Uint vheap, Uint vheap_sz) {
return vheap_sz < p->min_vheap_size ? p->min_vheap_size : vheap_sz;
}
-static void
-sweep_proc_externals(Process *p, int fullsweep)
-{
- ExternalThing** prev;
- ExternalThing* ptr;
- char* oh = 0;
- Uint oh_size = 0;
-
- if (fullsweep == 0) {
- oh = (char *) OLD_HEAP(p);
- oh_size = (char *) OLD_HEND(p) - oh;
- }
-
- prev = &MSO(p).externals;
- ptr = MSO(p).externals;
-
- while (ptr) {
- Eterm* ppt = (Eterm *) ptr;
-
- if (IS_MOVED_BOXED(*ppt)) { /* Object is alive */
- ExternalThing* ro = external_thing_ptr(*ppt);
-
- *prev = ro; /* Patch to moved pos */
- prev = &ro->next;
- ptr = ro->next;
- } else if (in_area(ppt, oh, oh_size)) {
- /*
- * Object resides on old heap, and we just did a
- * generational collection - keep object in list.
- */
- prev = &ptr->next;
- ptr = ptr->next;
- } else { /* Object has not been moved - deref it */
- erts_deref_node_entry(ptr->node);
- *prev = ptr = ptr->next;
- }
- }
- ASSERT(*prev == NULL);
-}
-
-static void
-sweep_proc_funs(Process *p, int fullsweep)
-{
- ErlFunThing** prev;
- ErlFunThing* ptr;
- char* oh = 0;
- Uint oh_size = 0;
-
- if (fullsweep == 0) {
- oh = (char *) OLD_HEAP(p);
- oh_size = (char *) OLD_HEND(p) - oh;
- }
-
- prev = &MSO(p).funs;
- ptr = MSO(p).funs;
-
- while (ptr) {
- Eterm* ppt = (Eterm *) ptr;
-
- if (IS_MOVED_BOXED(*ppt)) { /* Object is alive */
- ErlFunThing* ro = (ErlFunThing *) fun_val(*ppt);
-
- *prev = ro; /* Patch to moved pos */
- prev = &ro->next;
- ptr = ro->next;
- } else if (in_area(ppt, oh, oh_size)) {
- /*
- * Object resides on old heap, and we just did a
- * generational collection - keep object in list.
- */
- prev = &ptr->next;
- ptr = ptr->next;
- } else { /* Object has not been moved - deref it */
- ErlFunEntry* fe = ptr->fe;
-
- *prev = ptr = ptr->next;
- if (erts_refc_dectest(&fe->refc, 0) == 0) {
- erts_erase_fun_entry(fe);
- }
- }
- }
- ASSERT(*prev == NULL);
-}
-
struct shrink_cand_data {
- ProcBin* new_candidates;
- ProcBin* new_candidates_end;
- ProcBin* old_candidates;
+ struct erl_off_heap_header* new_candidates;
+ struct erl_off_heap_header* new_candidates_end;
+ struct erl_off_heap_header* old_candidates;
Uint no_of_candidates;
Uint no_of_active;
};
static ERTS_INLINE void
link_live_proc_bin(struct shrink_cand_data *shrink,
- ProcBin ***prevppp,
- ProcBin **pbpp,
+ struct erl_off_heap_header*** prevppp,
+ struct erl_off_heap_header** currpp,
int new_heap)
{
- ProcBin *pbp = *pbpp;
-
- *pbpp = pbp->next;
+ ProcBin *pbp = (ProcBin*) *currpp;
+ ASSERT(**prevppp == *currpp);
+ *currpp = pbp->next;
if (pbp->flags & (PB_ACTIVE_WRITER|PB_IS_WRITABLE)) {
ASSERT(((pbp->flags & (PB_ACTIVE_WRITER|PB_IS_WRITABLE))
== (PB_ACTIVE_WRITER|PB_IS_WRITABLE))
@@ -2162,15 +2057,16 @@ link_live_proc_bin(struct shrink_cand_data *shrink,
/* Our allocators are 8 byte aligned, i.e., shrinking with
less than 8 bytes will have no real effect */
if (unused >= 8) { /* A shrink candidate; save in candidate list */
+ **prevppp = pbp->next;
if (new_heap) {
if (!shrink->new_candidates)
- shrink->new_candidates_end = pbp;
+ shrink->new_candidates_end = (struct erl_off_heap_header*)pbp;
pbp->next = shrink->new_candidates;
- shrink->new_candidates = pbp;
+ shrink->new_candidates = (struct erl_off_heap_header*)pbp;
}
else {
pbp->next = shrink->old_candidates;
- shrink->old_candidates = pbp;
+ shrink->old_candidates = (struct erl_off_heap_header*)pbp;
}
shrink->no_of_candidates++;
return;
@@ -2178,67 +2074,101 @@ link_live_proc_bin(struct shrink_cand_data *shrink,
}
}
- /* Not a shrink candidate; keep in original mso list */
- **prevppp = pbp;
+ /* Not a shrink candidate; keep in original mso list */
*prevppp = &pbp->next;
-
}
static void
-sweep_proc_bins(Process *p, int fullsweep)
+sweep_off_heap(Process *p, int fullsweep)
{
struct shrink_cand_data shrink = {0};
- ProcBin** prev;
- ProcBin* ptr;
- Binary* bptr;
- char* oh = NULL;
- Uint oh_size = 0;
+ struct erl_off_heap_header* ptr;
+ struct erl_off_heap_header** prev;
+ char* oheap = NULL;
+ Uint oheap_sz = 0;
Uint bin_vheap = 0;
+#ifdef DEBUG
+ int seen_mature = 0;
+#endif
if (fullsweep == 0) {
- oh = (char *) OLD_HEAP(p);
- oh_size = (char *) OLD_HEND(p) - oh;
+ oheap = (char *) OLD_HEAP(p);
+ oheap_sz = (char *) OLD_HEND(p) - oheap;
}
BIN_OLD_VHEAP(p) = 0;
- prev = &MSO(p).mso;
- ptr = MSO(p).mso;
+ prev = &MSO(p).first;
+ ptr = MSO(p).first;
- /*
- * Note: In R7 we no longer force a fullsweep when we find binaries
- * on the old heap. The reason is that with the introduction of the
- * bit syntax we can expect binaries to be used a lot more. Note that
- * in earlier releases a brand new binary (or any other term) could
- * be put on the old heap during a gen-gc fullsweep, but this is
- * no longer the case in R7.
+ /* Firts part of the list will reside on the (old) new-heap.
+ * Keep if moved, otherwise deref.
*/
while (ptr) {
- Eterm* ppt = (Eterm *) ptr;
-
- if (IS_MOVED_BOXED(*ppt)) { /* Object is alive */
- bin_vheap += ptr->size / sizeof(Eterm);
- ptr = (ProcBin*) binary_val(*ppt);
- link_live_proc_bin(&shrink,
- &prev,
- &ptr,
- !in_area(ptr, oh, oh_size));
- } else if (in_area(ppt, oh, oh_size)) {
- /*
- * Object resides on old heap, and we just did a
- * generational collection - keep object in list.
- */
- BIN_OLD_VHEAP(p) += ptr->size / sizeof(Eterm); /* for binary gc (words)*/
- link_live_proc_bin(&shrink, &prev, &ptr, 0);
- } else { /* Object has not been moved - deref it */
-
- *prev = ptr->next;
- bptr = ptr->val;
- if (erts_refc_dectest(&bptr->refc, 0) == 0)
- erts_bin_free(bptr);
- ptr = *prev;
- }
+ if (IS_MOVED_BOXED(ptr->thing_word)) {
+ ASSERT(!in_area(ptr, oheap, oheap_sz));
+ *prev = ptr = (struct erl_off_heap_header*) boxed_val(ptr->thing_word);
+ ASSERT(!IS_MOVED_BOXED(ptr->thing_word));
+ if (ptr->thing_word == HEADER_PROC_BIN) {
+ int to_new_heap = !in_area(ptr, oheap, oheap_sz);
+ ASSERT(to_new_heap == !seen_mature || (!to_new_heap && (seen_mature=1)));
+ if (to_new_heap) {
+ bin_vheap += ptr->size / sizeof(Eterm);
+ } else {
+ BIN_OLD_VHEAP(p) += ptr->size / sizeof(Eterm); /* for binary gc (words)*/
+ }
+ link_live_proc_bin(&shrink, &prev, &ptr, to_new_heap);
+ }
+ else {
+ prev = &ptr->next;
+ ptr = ptr->next;
+ }
+ }
+ else if (!in_area(ptr, oheap, oheap_sz)) {
+ /* garbage */
+ switch (thing_subtag(ptr->thing_word)) {
+ case REFC_BINARY_SUBTAG:
+ {
+ Binary* bptr = ((ProcBin*)ptr)->val;
+ if (erts_refc_dectest(&bptr->refc, 0) == 0) {
+ erts_bin_free(bptr);
+ }
+ break;
+ }
+ case FUN_SUBTAG:
+ {
+ ErlFunEntry* fe = ((ErlFunThing*)ptr)->fe;
+ if (erts_refc_dectest(&fe->refc, 0) == 0) {
+ erts_erase_fun_entry(fe);
+ }
+ break;
+ }
+ default:
+ ASSERT(is_external_header(ptr->thing_word));
+ erts_deref_node_entry(((ExternalThing*)ptr)->node);
+ }
+ *prev = ptr = ptr->next;
+ }
+ else break; /* and let old-heap loop continue */
+ }
+
+ /* The rest of the list resides on old-heap, and we just did a
+ * generational collection - keep objects in list.
+ */
+ while (ptr) {
+ ASSERT(in_area(ptr, oheap, oheap_sz));
+ ASSERT(!IS_MOVED_BOXED(ptr->thing_word));
+ if (ptr->thing_word == HEADER_PROC_BIN) {
+ BIN_OLD_VHEAP(p) += ptr->size / sizeof(Eterm); /* for binary gc (words)*/
+ link_live_proc_bin(&shrink, &prev, &ptr, 0);
+ }
+ else {
+ ASSERT(is_fun_header(ptr->thing_word) ||
+ is_external_header(ptr->thing_word));
+ prev = &ptr->next;
+ ptr = ptr->next;
+ }
}
if (BIN_OLD_VHEAP(p) >= BIN_OLD_VHEAP_SZ(p)) {
@@ -2254,7 +2184,8 @@ sweep_proc_bins(Process *p, int fullsweep)
*/
if (shrink.no_of_candidates) {
- ProcBin *candlist[] = {shrink.new_candidates, shrink.old_candidates};
+ ProcBin *candlist[] = { (ProcBin*)shrink.new_candidates,
+ (ProcBin*)shrink.old_candidates };
Uint leave_unused = 0;
int i;
@@ -2266,21 +2197,21 @@ sweep_proc_bins(Process *p, int fullsweep)
}
for (i = 0; i < sizeof(candlist)/sizeof(candlist[0]); i++) {
-
- for (ptr = candlist[i]; ptr; ptr = ptr->next) {
- Uint new_size = ptr->size;
+ ProcBin* pb;
+ for (pb = candlist[i]; pb; pb = (ProcBin*)pb->next) {
+ Uint new_size = pb->size;
if (leave_unused) {
new_size += (new_size * 100) / leave_unused;
/* Our allocators are 8 byte aligned, i.e., shrinking with
less than 8 bytes will have no real effect */
- if (new_size + 8 >= ptr->val->orig_size)
+ if (new_size + 8 >= pb->val->orig_size)
continue;
}
- ptr->val = erts_bin_realloc(ptr->val, new_size);
- ptr->val->orig_size = new_size;
- ptr->bytes = (byte *) ptr->val->orig_bytes;
+ pb->val = erts_bin_realloc(pb->val, new_size);
+ pb->val->orig_size = new_size;
+ pb->bytes = (byte *) pb->val->orig_bytes;
}
}
@@ -2289,21 +2220,20 @@ sweep_proc_bins(Process *p, int fullsweep)
* We now potentially have the mso list divided into three lists:
* - shrink candidates on new heap (inactive writable with unused data)
* - shrink candidates on old heap (inactive writable with unused data)
- * - other binaries (read only + active writable ...)
+ * - other binaries (read only + active writable ...) + funs and externals
*
* Put them back together: new candidates -> other -> old candidates
* This order will ensure that the list only refers from new
* generation to old and never from old to new *which is important*.
*/
if (shrink.new_candidates) {
- if (prev == &MSO(p).mso) /* empty other binaries list */
+ if (prev == &MSO(p).first) /* empty other binaries list */
prev = &shrink.new_candidates_end->next;
else
- shrink.new_candidates_end->next = MSO(p).mso;
- MSO(p).mso = shrink.new_candidates;
+ shrink.new_candidates_end->next = MSO(p).first;
+ MSO(p).first = shrink.new_candidates;
}
}
-
*prev = shrink.old_candidates;
}
@@ -2334,15 +2264,17 @@ offset_heap(Eterm* hp, Uint sz, Sint offs, char* area, Uint area_size)
tari = thing_arityval(val);
switch (thing_subtag(val)) {
case REFC_BINARY_SUBTAG:
+ case FUN_SUBTAG:
+ case EXTERNAL_PID_SUBTAG:
+ case EXTERNAL_PORT_SUBTAG:
+ case EXTERNAL_REF_SUBTAG:
{
- ProcBin* pb = (ProcBin*) hp;
- Eterm** uptr = (Eterm **) (void *) &pb->next;
+ struct erl_off_heap_header* oh = (struct erl_off_heap_header*) hp;
- if (*uptr && in_area((Eterm *)pb->next, area, area_size)) {
+ if (in_area(oh->next, area, area_size)) {
+ Eterm** uptr = (Eterm **) (void *) &oh->next;
*uptr += offs; /* Patch the mso chain */
}
- sz -= tari;
- hp += tari + 1;
}
break;
case BIN_MATCHSTATE_SUBTAG:
@@ -2353,40 +2285,11 @@ offset_heap(Eterm* hp, Uint sz, Sint offs, char* area, Uint area_size)
mb->orig = offset_ptr(mb->orig, offs);
mb->base = binary_bytes(mb->orig);
}
- sz -= tari;
- hp += tari + 1;
}
break;
- case FUN_SUBTAG:
- {
- ErlFunThing* funp = (ErlFunThing *) hp;
- Eterm** uptr = (Eterm **) (void *) &funp->next;
-
- if (*uptr && in_area((Eterm *)funp->next, area, area_size)) {
- *uptr += offs;
- }
- sz -= tari;
- hp += tari + 1;
- }
- break;
- case EXTERNAL_PID_SUBTAG:
- case EXTERNAL_PORT_SUBTAG:
- case EXTERNAL_REF_SUBTAG:
- {
- ExternalThing* etp = (ExternalThing *) hp;
- Eterm** uptr = (Eterm **) (void *) &etp->next;
-
- if (*uptr && in_area((Eterm *)etp->next, area, area_size)) {
- *uptr += offs;
- }
- sz -= tari;
- hp += tari + 1;
- }
- break;
- default:
- sz -= tari;
- hp += tari + 1;
}
+ sz -= tari;
+ hp += tari + 1;
break;
}
default:
@@ -2423,18 +2326,8 @@ offset_heap_ptr(Eterm* hp, Uint sz, Sint offs, char* area, Uint area_size)
static void
offset_off_heap(Process* p, Sint offs, char* area, Uint area_size)
{
- if (MSO(p).mso && in_area((Eterm *)MSO(p).mso, area, area_size)) {
- Eterm** uptr = (Eterm**) (void *) &MSO(p).mso;
- *uptr += offs;
- }
-
- if (MSO(p).funs && in_area((Eterm *)MSO(p).funs, area, area_size)) {
- Eterm** uptr = (Eterm**) (void *) &MSO(p).funs;
- *uptr += offs;
- }
-
- if (MSO(p).externals && in_area((Eterm *)MSO(p).externals, area, area_size)) {
- Eterm** uptr = (Eterm**) (void *) &MSO(p).externals;
+ if (MSO(p).first && in_area((Eterm *)MSO(p).first, area, area_size)) {
+ Eterm** uptr = (Eterm**) (void *) &MSO(p).first;
*uptr += offs;
}
}
@@ -2555,8 +2448,8 @@ do { \
__FILE__, __LINE__, #EXP); \
} while (0)
-#ifdef ERTS_OFFHEAP_DEBUG_CHK_CIRCULAR_EXTERNAL_LIST
-# define ERTS_EXTERNAL_VISITED_BIT ((Eterm) 1 << 31)
+#ifdef ERTS_OFFHEAP_DEBUG_CHK_CIRCULAR_LIST
+# define ERTS_OFFHEAP_VISITED_BIT ((Eterm) 1 << 31)
#endif
@@ -2566,62 +2459,45 @@ erts_check_off_heap2(Process *p, Eterm *htop)
Eterm *oheap = (Eterm *) OLD_HEAP(p);
Eterm *ohtop = (Eterm *) OLD_HTOP(p);
int old;
- ProcBin *pb;
- ErlFunThing *eft;
- ExternalThing *et;
+ union erl_off_heap_ptr u;
old = 0;
- for (pb = MSO(p).mso; pb; pb = pb->next) {
- Eterm *ptr = (Eterm *) pb;
- long refc = erts_refc_read(&pb->val->refc, 1);
+ for (u.hdr = MSO(p).first; u.hdr; u.hdr = u.hdr->next) {
+ long refc;
+ switch (thing_subtag(u.hdr->thing_word)) {
+ case REFC_BINARY_SUBTAG:
+ refc = erts_refc_read(&u.pb->val->refc, 1);
+ break;
+ case FUN_SUBTAG:
+ refc = erts_refc_read(&u.fun->fe->refc, 1);
+ break;
+ case EXTERNAL_PID_SUBTAG:
+ case EXTERNAL_PORT_SUBTAG:
+ case EXTERNAL_REF_SUBTAG:
+ refc = erts_refc_read(&u.ext->node->refc, 1);
+ break;
+ default:
+ ASSERT(!!"erts_check_off_heap2: Invalid thing_word");
+ }
ERTS_CHK_OFFHEAP_ASSERT(refc >= 1);
+#ifdef ERTS_OFFHEAP_DEBUG_CHK_CIRCULAR_LIST
+ ERTS_CHK_OFFHEAP_ASSERT(!(u.hdr->thing_word & ERTS_EXTERNAL_VISITED_BIT));
+ u.hdr->thing_word |= ERTS_OFFHEAP_VISITED_BIT;
+#endif
if (old) {
- ERTS_CHK_OFFHEAP_ASSERT(oheap <= ptr && ptr < ohtop);
+ ERTS_CHK_OFFHEAP_ASSERT(oheap <= u.ep && u.ep < ohtop);
}
- else if (oheap <= ptr && ptr < ohtop)
+ else if (oheap <= u.ep && u.ep < ohtop)
old = 1;
else {
- ERTS_CHK_OFFHEAP_ASSERT(within2(ptr, p, htop));
+ ERTS_CHK_OFFHEAP_ASSERT(within2(u.ep, p, htop));
}
}
- old = 0;
- for (eft = MSO(p).funs; eft; eft = eft->next) {
- Eterm *ptr = (Eterm *) eft;
- long refc = erts_refc_read(&eft->fe->refc, 1);
- ERTS_CHK_OFFHEAP_ASSERT(refc >= 1);
- if (old)
- ERTS_CHK_OFFHEAP_ASSERT(oheap <= ptr && ptr < ohtop);
- else if (oheap <= ptr && ptr < ohtop)
- old = 1;
- else
- ERTS_CHK_OFFHEAP_ASSERT(within2(ptr, p, htop));
- }
-
- old = 0;
- for (et = MSO(p).externals; et; et = et->next) {
- Eterm *ptr = (Eterm *) et;
- long refc = erts_refc_read(&et->node->refc, 1);
- ERTS_CHK_OFFHEAP_ASSERT(refc >= 1);
-#ifdef ERTS_OFFHEAP_DEBUG_CHK_CIRCULAR_EXTERNAL_LIST
- ERTS_CHK_OFFHEAP_ASSERT(!(et->header & ERTS_EXTERNAL_VISITED_BIT));
-#endif
- if (old)
- ERTS_CHK_OFFHEAP_ASSERT(oheap <= ptr && ptr < ohtop);
- else if (oheap <= ptr && ptr < ohtop)
- old = 1;
- else
- ERTS_CHK_OFFHEAP_ASSERT(within2(ptr, p, htop));
-#ifdef ERTS_OFFHEAP_DEBUG_CHK_CIRCULAR_EXTERNAL_LIST
- et->header |= ERTS_EXTERNAL_VISITED_BIT;
-#endif
- }
-
#ifdef ERTS_OFFHEAP_DEBUG_CHK_CIRCULAR_EXTERNAL_LIST
- for (et = MSO(p).externals; et; et = et->next)
- et->header &= ~ERTS_EXTERNAL_VISITED_BIT;
+ for (u.hdr = MSO(p).first; u.hdr; u.hdr = u.hdr->next)
+ u.hdr->thing_word &= ~ERTS_OFFHEAP_VISITED_BIT;
#endif
-
}
void
diff --git a/erts/emulator/beam/erl_init.c b/erts/emulator/beam/erl_init.c
index 4a4507b212..14bd10b42c 100644
--- a/erts/emulator/beam/erl_init.c
+++ b/erts/emulator/beam/erl_init.c
@@ -78,6 +78,7 @@ static erts_tid_t main_thread;
erts_cpu_info_t *erts_cpuinfo;
+int erts_reader_groups;
int erts_use_sender_punish;
/*
@@ -110,6 +111,7 @@ int erts_compat_rel;
static int use_multi_run_queue;
static int no_schedulers;
static int no_schedulers_online;
+static int max_reader_groups;
#ifdef DEBUG
Uint32 verbose; /* See erl_debug.h for information about verbose */
@@ -505,6 +507,7 @@ void erts_usage(void)
ERTS_MIN_COMPAT_REL, this_rel_num());
erts_fprintf(stderr, "-r force ets memory block to be moved on realloc\n");
+ erts_fprintf(stderr, "-rg amount set reader groups limit\n");
erts_fprintf(stderr, "-sbt type set scheduler bind type, valid types are:\n");
erts_fprintf(stderr, " u|ns|ts|ps|s|nnts|nnps|tnnps|db\n");
erts_fprintf(stderr, "-sct cput set cpu topology,\n");
@@ -538,6 +541,50 @@ void erts_usage(void)
erl_exit(-1, "");
}
+#ifdef USE_THREADS
+/*
+ * allocators for thread lib
+ */
+
+static void *ethr_std_alloc(size_t size)
+{
+ return erts_alloc_fnf(ERTS_ALC_T_ETHR_STD, (Uint) size);
+}
+static void *ethr_std_realloc(void *ptr, size_t size)
+{
+ return erts_realloc_fnf(ERTS_ALC_T_ETHR_STD, ptr, (Uint) size);
+}
+static void ethr_std_free(void *ptr)
+{
+ erts_free(ERTS_ALC_T_ETHR_STD, ptr);
+}
+static void *ethr_sl_alloc(size_t size)
+{
+ return erts_alloc_fnf(ERTS_ALC_T_ETHR_SL, (Uint) size);
+}
+static void *ethr_sl_realloc(void *ptr, size_t size)
+{
+ return erts_realloc_fnf(ERTS_ALC_T_ETHR_SL, ptr, (Uint) size);
+}
+static void ethr_sl_free(void *ptr)
+{
+ erts_free(ERTS_ALC_T_ETHR_SL, ptr);
+}
+static void *ethr_ll_alloc(size_t size)
+{
+ return erts_alloc_fnf(ERTS_ALC_T_ETHR_LL, (Uint) size);
+}
+static void *ethr_ll_realloc(void *ptr, size_t size)
+{
+ return erts_realloc_fnf(ERTS_ALC_T_ETHR_LL, ptr, (Uint) size);
+}
+static void ethr_ll_free(void *ptr)
+{
+ erts_free(ERTS_ALC_T_ETHR_LL, ptr);
+}
+
+#endif
+
static void
early_init(int *argc, char **argv) /*
* Only put things here which are
@@ -615,9 +662,15 @@ early_init(int *argc, char **argv) /*
? ncpuavail
: (ncpuonln > 0 ? ncpuonln : no_schedulers));
+#ifdef ERTS_SMP
+ erts_max_main_threads = no_schedulers_online;
+#endif
+
schdlrs = no_schedulers;
schdlrs_onln = no_schedulers_online;
+ max_reader_groups = ERTS_MAX_READER_GROUPS;
+
if (argc && argv) {
int i = 1;
while (i < *argc) {
@@ -627,6 +680,24 @@ early_init(int *argc, char **argv) /*
}
if (argv[i][0] == '-') {
switch (argv[i][1]) {
+ case 'r': {
+ char *sub_param = argv[i]+2;
+ if (has_prefix("g", sub_param)) {
+ char *arg = get_arg(sub_param+1, argv[i+1], &i);
+ if (sscanf(arg, "%d", &max_reader_groups) != 1) {
+ erts_fprintf(stderr,
+ "bad reader groups limit: %s\n", arg);
+ erts_usage();
+ }
+ if (max_reader_groups < 0) {
+ erts_fprintf(stderr,
+ "bad reader groups limit: %d\n",
+ max_reader_groups);
+ erts_usage();
+ }
+ }
+ break;
+ }
case 'S' : {
int tot, onln;
char *arg = get_arg(argv[i]+2, argv[i+1], &i);
@@ -699,6 +770,36 @@ early_init(int *argc, char **argv) /*
erts_early_init_scheduling(); /* Require allocators */
erts_init_utils(); /* Require allocators */
+#ifdef USE_THREADS
+ {
+ erts_thr_late_init_data_t elid = ERTS_THR_LATE_INIT_DATA_DEF_INITER;
+ elid.mem.std.alloc = ethr_std_alloc;
+ elid.mem.std.realloc = ethr_std_realloc;
+ elid.mem.std.free = ethr_std_free;
+ elid.mem.sl.alloc = ethr_sl_alloc;
+ elid.mem.sl.realloc = ethr_sl_realloc;
+ elid.mem.sl.free = ethr_sl_free;
+ elid.mem.ll.alloc = ethr_ll_alloc;
+ elid.mem.ll.realloc = ethr_ll_realloc;
+ elid.mem.ll.free = ethr_ll_free;
+
+#ifdef ERTS_SMP
+ elid.main_threads = erts_max_main_threads;
+#else
+ elid.main_threads = 1;
+#endif
+ elid.reader_groups = (elid.main_threads > 1
+ ? elid.main_threads
+ : 0);
+ if (max_reader_groups <= 1)
+ elid.reader_groups = 0;
+ if (elid.reader_groups > max_reader_groups)
+ elid.reader_groups = max_reader_groups;
+ erts_reader_groups = elid.reader_groups;
+
+ erts_thr_late_init(&elid);
+ }
+#endif
#ifdef ERTS_ENABLE_LOCK_CHECK
erts_lc_late_init();
#endif
@@ -1193,9 +1294,17 @@ erl_start(int argc, char **argv)
erts_async_thread_suggested_stack_size));
break;
- case 'r':
- erts_ets_realloc_always_moves = 1;
+ case 'r': {
+ char *sub_param = argv[i]+2;
+ if (has_prefix("g", sub_param)) {
+ get_arg(sub_param+1, argv[i+1], &i);
+ /* already handled */
+ }
+ else {
+ erts_ets_realloc_always_moves = 1;
+ }
break;
+ }
case 'n': /* XXX obsolete */
break;
case 'c':
@@ -1280,6 +1389,7 @@ erl_start(int argc, char **argv)
erts_sys_main_thread(); /* May or may not return! */
#else
+ erts_thr_set_main_status(1, 1);
set_main_stack_size();
process_main();
#endif
@@ -1353,7 +1463,7 @@ system_cleanup(int exit_code)
erts_cleanup_incgc();
#endif
-#if defined(USE_THREADS) && !defined(ERTS_SMP)
+#if defined(USE_THREADS)
exit_async();
#endif
#if HAVE_ERTS_MSEG
diff --git a/erts/emulator/beam/erl_lock_check.c b/erts/emulator/beam/erl_lock_check.c
index cee470ae37..99cc80e259 100644
--- a/erts/emulator/beam/erl_lock_check.c
+++ b/erts/emulator/beam/erl_lock_check.c
@@ -96,10 +96,10 @@ static erts_lc_lock_order_t erts_lock_order[] = {
{ "proc_status", "pid" },
{ "proc_tab", NULL },
{ "ports_snapshot", NULL },
- { "db_tab", "address" },
- { "db_tab_fix", "address" },
{ "meta_name_tab", "address" },
{ "meta_main_tab_slot", "address" },
+ { "db_tab", "address" },
+ { "db_tab_fix", "address" },
{ "meta_main_tab_main", NULL },
{ "db_hash_slot", "address" },
{ "node_table", NULL },
@@ -119,9 +119,9 @@ static erts_lc_lock_order_t erts_lock_order[] = {
{ "child_status", NULL },
#endif
#ifdef __WIN32__
- { "sys_driver_data_lock", NULL },
+ { "sys_driver_data_lock", NULL },
#endif
- { "drv_ev_state_grow", NULL, },
+ { "drv_ev_state_grow", NULL, },
{ "drv_ev_state", "address" },
{ "safe_hash", "address" },
{ "pollset_rm_list", NULL },
@@ -153,6 +153,7 @@ static erts_lc_lock_order_t erts_lock_order[] = {
{ "instr", NULL },
{ "fix_alloc", "index" },
{ "alcu_allocator", "index" },
+ { "alcu_delayed_free", "index" },
{ "mseg", NULL },
#ifdef HALFWORD_HEAP
{ "pmmap", NULL },
@@ -177,17 +178,19 @@ static erts_lc_lock_order_t erts_lock_order[] = {
{ "pix_lock", "address" },
{ "run_queues_lists", NULL },
{ "sched_stat", NULL },
+ { "run_queue_sleep_list", "address" },
#endif
{ "alloc_thr_ix_lock", NULL },
#ifdef ERTS_SMP
- { "proc_lck_wtr_alloc", NULL },
+ { "proc_lck_qs_alloc", NULL },
#endif
#ifdef __WIN32__
#ifdef DEBUG
{ "save_ops_lock", NULL },
#endif
#endif
- { "mtrace_buf", NULL }
+ { "mtrace_buf", NULL },
+ { "erts_alloc_hard_debug", NULL }
};
#define ERTS_LOCK_ORDER_SIZE \
@@ -199,6 +202,8 @@ static erts_lc_lock_order_t erts_lock_order[] = {
& ERTS_LC_FLG_LT_ALL \
& ~(ERTS_LC_FLG_LT_SPINLOCK|ERTS_LC_FLG_LT_RWSPINLOCK)))
+static __decl_noreturn void __noreturn lc_abort(void);
+
static char *
lock_type(Uint16 flags)
{
@@ -222,7 +227,7 @@ rw_op_str(Uint16 flags)
return " (r)";
case ERTS_LC_FLG_LO_WRITE:
erts_fprintf(stderr, "\nInternal error\n");
- abort();
+ lc_abort();
default:
break;
}
@@ -271,28 +276,18 @@ static erts_lc_free_block_t *free_blocks;
#define ERTS_LC_FB_CHUNK_SIZE 10
#endif
-#ifdef ETHR_HAVE_NATIVE_LOCKS
static ethr_spinlock_t free_blocks_lock;
-#define ERTS_LC_LOCK ethr_spin_lock
-#define ERTS_LC_UNLOCK ethr_spin_unlock
-#else
-static ethr_mutex free_blocks_lock;
-#define ERTS_LC_LOCK ethr_mutex_lock
-#define ERTS_LC_UNLOCK ethr_mutex_unlock
-#endif
static ERTS_INLINE void
lc_lock(void)
{
- if (ERTS_LC_LOCK(&free_blocks_lock) != 0)
- abort();
+ ethr_spin_lock(&free_blocks_lock);
}
static ERTS_INLINE void
lc_unlock(void)
{
- if (ERTS_LC_UNLOCK(&free_blocks_lock) != 0)
- abort();
+ ethr_spin_unlock(&free_blocks_lock);
}
static ERTS_INLINE void lc_free(void *p)
@@ -313,7 +308,7 @@ static void *lc_core_alloc(void)
{
lc_unlock();
erts_fprintf(stderr, "Lock checker out of memory!\n");
- abort();
+ lc_abort();
}
#else
@@ -327,7 +322,7 @@ static void *lc_core_alloc(void)
* ERTS_LC_FB_CHUNK_SIZE);
if (!fbs) {
erts_fprintf(stderr, "Lock checker failed to allocate memory!\n");
- abort();
+ lc_abort();
}
for (i = 1; i < ERTS_LC_FB_CHUNK_SIZE - 1; i++) {
#ifdef DEBUG
@@ -367,11 +362,11 @@ create_locked_locks(char *thread_name)
{
erts_lc_locked_locks_t *l_lcks = malloc(sizeof(erts_lc_locked_locks_t));
if (!l_lcks)
- abort();
+ lc_abort();
l_lcks->thread_name = strdup(thread_name ? thread_name : "unknown");
if (!l_lcks->thread_name)
- abort();
+ lc_abort();
l_lcks->tid = erts_thr_self();
l_lcks->required.first = NULL;
@@ -513,7 +508,7 @@ uninitialized_lock(void)
{
erts_fprintf(stderr, "Performing operations on uninitialized lock!\n");
print_curr_locks(get_my_locked_locks());
- abort();
+ lc_abort();
}
static void
@@ -523,7 +518,7 @@ lock_twice(char *prefix, erts_lc_locked_locks_t *l_lcks, erts_lc_lock_t *lck,
erts_fprintf(stderr, "%s%s", prefix, rw_op_str(op_flags));
print_lock(" ", lck, " lock which is already locked by thread!\n");
print_curr_locks(l_lcks);
- abort();
+ lc_abort();
}
static void
@@ -533,7 +528,7 @@ unlock_op_mismatch(erts_lc_locked_locks_t *l_lcks, erts_lc_lock_t *lck,
erts_fprintf(stderr, "Unlocking%s ", rw_op_str(op_flags));
print_lock("", lck, " lock which mismatch previous lock operation!\n");
print_curr_locks(l_lcks);
- abort();
+ lc_abort();
}
static void
@@ -541,7 +536,7 @@ unlock_of_not_locked(erts_lc_locked_locks_t *l_lcks, erts_lc_lock_t *lck)
{
print_lock("Unlocking ", lck, " lock which is not locked by thread!\n");
print_curr_locks(l_lcks);
- abort();
+ lc_abort();
}
static void
@@ -550,7 +545,7 @@ lock_order_violation(erts_lc_locked_locks_t *l_lcks, erts_lc_lock_t *lck)
print_lock("Lock order violation occured when locking ", lck, "!\n");
print_curr_locks(l_lcks);
print_lock_order();
- abort();
+ lc_abort();
}
static void
@@ -561,7 +556,7 @@ type_order_violation(char *op, erts_lc_locked_locks_t *l_lcks,
print_lock(op, lck, "!\n");
ASSERT(l_lcks);
print_curr_locks(l_lcks);
- abort();
+ lc_abort();
}
static void
@@ -613,7 +608,7 @@ lock_mismatch(erts_lc_locked_locks_t *l_lcks, int exact,
}
}
print_curr_locks(l_lcks);
- abort();
+ lc_abort();
}
static void
@@ -621,7 +616,7 @@ unlock_of_required_lock(erts_lc_locked_locks_t *l_lcks, erts_lc_lock_t *lck)
{
print_lock("Unlocking required ", lck, " lock!\n");
print_curr_locks(l_lcks);
- abort();
+ lc_abort();
}
static void
@@ -629,7 +624,7 @@ unrequire_of_not_required_lock(erts_lc_locked_locks_t *l_lcks, erts_lc_lock_t *l
{
print_lock("Unrequire on ", lck, " lock not required!\n");
print_curr_locks(l_lcks);
- abort();
+ lc_abort();
}
static void
@@ -637,7 +632,7 @@ require_twice(erts_lc_locked_locks_t *l_lcks, erts_lc_lock_t *lck)
{
print_lock("Require on ", lck, " lock already required!\n");
print_curr_locks(l_lcks);
- abort();
+ lc_abort();
}
static void
@@ -645,7 +640,7 @@ required_not_locked(erts_lc_locked_locks_t *l_lcks, erts_lc_lock_t *lck)
{
print_lock("Required ", lck, " lock not locked!\n");
print_curr_locks(l_lcks);
- abort();
+ lc_abort();
}
@@ -658,13 +653,23 @@ thread_exit_handler(void)
erts_fprintf(stderr,
"Thread exiting while having locked locks!\n");
print_curr_locks(l_lcks);
- abort();
+ lc_abort();
}
destroy_locked_locks(l_lcks);
/* erts_tsd_set(locks_key, NULL); */
}
}
+static __decl_noreturn void
+lc_abort(void)
+{
+#ifdef __WIN32__
+ DebugBreak();
+#else
+ abort();
+#endif
+}
+
void
erts_lc_set_thread_name(char *thread_name)
{
@@ -676,7 +681,7 @@ erts_lc_set_thread_name(char *thread_name)
free((void *) l_lcks->thread_name);
l_lcks->thread_name = strdup(thread_name ? thread_name : "unknown");
if (!l_lcks->thread_name)
- abort();
+ lc_abort();
}
}
@@ -686,7 +691,7 @@ erts_lc_assert_failed(char *file, int line, char *assertion)
erts_fprintf(stderr, "%s:%d: Lock check assertion \"%s\" failed!\n",
file, line, assertion);
print_curr_locks(get_my_locked_locks());
- abort();
+ lc_abort();
return 0;
}
@@ -699,7 +704,7 @@ void erts_lc_fail(char *fmt, ...)
va_end(args);
erts_fprintf(stderr, "\n");
print_curr_locks(get_my_locked_locks());
- abort();
+ lc_abort();
}
@@ -719,7 +724,7 @@ erts_lc_get_lock_order_id(char *name)
"(update erl_lock_check.c)\n",
name);
}
- abort();
+ lc_abort();
return (Sint16) -1;
}
@@ -895,6 +900,25 @@ erts_lc_check_exact(erts_lc_lock_t *have, int have_len)
}
}
+void
+erts_lc_check_no_locked_of_type(Uint16 flags)
+{
+ erts_lc_locked_locks_t *l_lcks = get_my_locked_locks();
+ if (l_lcks) {
+ erts_lc_locked_lock_t *l_lck = l_lcks->locked.first;
+ for (l_lck = l_lcks->locked.first; l_lck; l_lck = l_lck->next) {
+ if (l_lck->flags & flags) {
+ erts_fprintf(stderr,
+ "Locked lock of type %s found which isn't "
+ "allowed here!\n",
+ lock_type(l_lck->flags));
+ print_curr_locks(l_lcks);
+ lc_abort();
+ }
+ }
+ }
+}
+
int
erts_lc_trylock_force_busy_flg(erts_lc_lock_t *lck, Uint16 op_flags)
{
@@ -1283,13 +1307,8 @@ erts_lc_init(void)
free_blocks = NULL;
#endif /* #ifdef ERTS_LC_STATIC_ALLOC */
-#ifdef ETHR_HAVE_NATIVE_LOCKS
if (ethr_spinlock_init(&free_blocks_lock) != 0)
- abort();
-#else
- if (ethr_mutex_init(&free_blocks_lock) != 0)
- abort();
-#endif
+ lc_abort();
erts_tsd_key_create(&locks_key);
}
diff --git a/erts/emulator/beam/erl_lock_check.h b/erts/emulator/beam/erl_lock_check.h
index d5e2ede9ac..0372e6850d 100644
--- a/erts/emulator/beam/erl_lock_check.h
+++ b/erts/emulator/beam/erl_lock_check.h
@@ -77,6 +77,7 @@ void erts_lc_check(erts_lc_lock_t *have, int have_len,
void erts_lc_check_exact(erts_lc_lock_t *have, int have_len);
void erts_lc_have_locks(int *resv, erts_lc_lock_t *lcks, int len);
void erts_lc_have_lock_ids(int *resv, int *ids, int len);
+void erts_lc_check_no_locked_of_type(Uint16 flags);
int erts_lc_trylock_force_busy_flg(erts_lc_lock_t *lck, Uint16 op_flags);
void erts_lc_trylock_flg(int locked, erts_lc_lock_t *lck, Uint16 op_flags);
void erts_lc_lock_flg(erts_lc_lock_t *lck, Uint16 op_flags);
diff --git a/erts/emulator/beam/erl_lock_count.c b/erts/emulator/beam/erl_lock_count.c
index 26028aeefc..239773f366 100644
--- a/erts/emulator/beam/erl_lock_count.c
+++ b/erts/emulator/beam/erl_lock_count.c
@@ -166,8 +166,8 @@ static void print_lock_x(erts_lcnt_lock_t *lock, Uint16 flag, char *action, char
int i;
type = lcnt_lock_type(lock->flag);
- ethr_atomic_read(&lock->r_state, &r_state);
- ethr_atomic_read(&lock->w_state, &w_state);
+ r_state = ethr_atomic_read(&lock->r_state);
+ w_state = ethr_atomic_read(&lock->w_state);
if (lock->flag & flag) {
@@ -394,10 +394,10 @@ void erts_lcnt_lock_opt(erts_lcnt_lock_t *lock, Uint16 option) {
ASSERT(eltd);
- ethr_atomic_read(&lock->w_state, &w_state);
+ w_state = ethr_atomic_read(&lock->w_state);
if (option & ERTS_LCNT_LO_WRITE) {
- ethr_atomic_read(&lock->r_state, &r_state);
+ r_state = ethr_atomic_read(&lock->r_state);
ethr_atomic_inc( &lock->w_state);
}
if (option & ERTS_LCNT_LO_READ) {
@@ -423,7 +423,7 @@ void erts_lcnt_lock(erts_lcnt_lock_t *lock) {
if (erts_lcnt_rt_options & ERTS_LCNT_OPT_SUSPEND) return;
- ethr_atomic_read(&lock->w_state, &w_state);
+ w_state = ethr_atomic_read(&lock->w_state);
ethr_atomic_inc( &lock->w_state);
eltd = lcnt_get_thread_data();
@@ -478,7 +478,7 @@ void erts_lcnt_lock_post_x(erts_lcnt_lock_t *lock, char *file, unsigned int line
#ifdef DEBUG
if (!(lock->flag & (ERTS_LCNT_LT_RWMUTEX | ERTS_LCNT_LT_RWSPINLOCK))) {
- ethr_atomic_read(&lock->flowstate, &flowstate);
+ flowstate = ethr_atomic_read(&lock->flowstate);
ASSERT(flowstate == 0);
ethr_atomic_inc( &lock->flowstate);
}
@@ -522,12 +522,12 @@ void erts_lcnt_unlock(erts_lcnt_lock_t *lock) {
if (erts_lcnt_rt_options & ERTS_LCNT_OPT_SUSPEND) return;
#ifdef DEBUG
/* flowstate */
- ethr_atomic_read(&lock->flowstate, &flowstate);
+ flowstate = ethr_atomic_read(&lock->flowstate);
ASSERT(flowstate == 1);
ethr_atomic_dec( &lock->flowstate);
/* write state */
- ethr_atomic_read(&lock->w_state, &w_state);
+ w_state = ethr_atomic_read(&lock->w_state);
ASSERT(w_state > 0)
#endif
ethr_atomic_dec(&lock->w_state);
@@ -558,7 +558,7 @@ void erts_lcnt_trylock(erts_lcnt_lock_t *lock, int res) {
if (res != EBUSY) {
#ifdef DEBUG
- ethr_atomic_read(&lock->flowstate, &flowstate);
+ flowstate = ethr_atomic_read(&lock->flowstate);
ASSERT(flowstate == 0);
ethr_atomic_inc( &lock->flowstate);
#endif
diff --git a/erts/emulator/beam/erl_message.c b/erts/emulator/beam/erl_message.c
index b63f3df7df..1f61dca230 100644
--- a/erts/emulator/beam/erl_message.c
+++ b/erts/emulator/beam/erl_message.c
@@ -30,6 +30,7 @@
#include "erl_message.h"
#include "erl_process.h"
#include "erl_nmgc.h"
+#include "erl_binary.h"
ERTS_SCHED_PREF_QUICK_ALLOC_IMPL(message,
ErlMessage,
@@ -164,16 +165,25 @@ erts_resize_message_buffer(ErlHeapFragment *bp, Uint size,
void
erts_cleanup_offheap(ErlOffHeap *offheap)
{
- if (offheap->mso) {
- erts_cleanup_mso(offheap->mso);
- }
-#ifndef HYBRID /* FIND ME! */
- if (offheap->funs) {
- erts_cleanup_funs(offheap->funs);
- }
-#endif
- if (offheap->externals) {
- erts_cleanup_externals(offheap->externals);
+ union erl_off_heap_ptr u;
+
+ for (u.hdr = offheap->first; u.hdr; u.hdr = u.hdr->next) {
+ switch (thing_subtag(u.hdr->thing_word)) {
+ case REFC_BINARY_SUBTAG:
+ if (erts_refc_dectest(&u.pb->val->refc, 0) == 0) {
+ erts_bin_free(u.pb->val);
+ }
+ break;
+ case FUN_SUBTAG:
+ if (erts_refc_dectest(&u.fun->fe->refc, 0) == 0) {
+ erts_erase_fun_entry(u.fun->fe);
+ }
+ break;
+ default:
+ ASSERT(is_external_header(u.hdr->thing_word));
+ erts_deref_node_entry(u.ext->node);
+ break;
+ }
}
}
@@ -201,41 +211,17 @@ link_mbuf_to_proc(Process *proc, ErlHeapFragment *bp)
MBUF_SIZE(proc) += bp->used_size;
FLAGS(proc) |= F_FORCE_GC;
- /* Move any binaries into the process */
- if (bp->off_heap.mso != NULL) {
- ProcBin** next_p = &bp->off_heap.mso;
+ /* Move any off_heap's into the process */
+ if (bp->off_heap.first != NULL) {
+ struct erl_off_heap_header** next_p = &bp->off_heap.first;
while (*next_p != NULL) {
next_p = &((*next_p)->next);
}
- *next_p = MSO(proc).mso;
- MSO(proc).mso = bp->off_heap.mso;
- bp->off_heap.mso = NULL;
+ *next_p = MSO(proc).first;
+ MSO(proc).first = bp->off_heap.first;
+ bp->off_heap.first = NULL;
MSO(proc).overhead += bp->off_heap.overhead;
}
-
- /* Move any funs into the process */
-#ifndef HYBRID
- if (bp->off_heap.funs != NULL) {
- ErlFunThing** next_p = &bp->off_heap.funs;
- while (*next_p != NULL) {
- next_p = &((*next_p)->next);
- }
- *next_p = MSO(proc).funs;
- MSO(proc).funs = bp->off_heap.funs;
- bp->off_heap.funs = NULL;
- }
-#endif
-
- /* Move any external things into the process */
- if (bp->off_heap.externals != NULL) {
- ExternalThing** next_p = &bp->off_heap.externals;
- while (*next_p != NULL) {
- next_p = &((*next_p)->next);
- }
- *next_p = MSO(proc).externals;
- MSO(proc).externals = bp->off_heap.externals;
- bp->off_heap.externals = NULL;
- }
}
}
@@ -506,19 +492,7 @@ erts_link_mbuf_to_proc(struct process *proc, ErlHeapFragment *bp)
void
erts_move_msg_mbuf_to_heap(Eterm** hpp, ErlOffHeap* off_heap, ErlMessage *msg)
{
- /* Unions for typecasts avoids warnings about type-punned pointers and aliasing */
- union {
- Uint** upp;
- ProcBin **pbpp;
- ErlFunThing **efpp;
- ExternalThing **etpp;
- } oh_list_pp, oh_el_next_pp;
- union {
- Uint *up;
- ProcBin *pbp;
- ErlFunThing *efp;
- ExternalThing *etp;
- } oh_el_p;
+ struct erl_off_heap_header* oh;
Eterm term, token, *fhp, *hp;
Sint offs;
Uint sz;
@@ -571,9 +545,7 @@ erts_move_msg_mbuf_to_heap(Eterm** hpp, ErlOffHeap* off_heap, ErlMessage *msg)
hp = *hpp;
offs = hp - fhp;
- oh_list_pp.upp = NULL;
- oh_el_next_pp.upp = NULL; /* Shut up compiler warning */
- oh_el_p.up = NULL; /* Shut up compiler warning */
+ oh = NULL;
while (sz--) {
Uint cpy_sz;
Eterm val = *fhp++;
@@ -593,25 +565,11 @@ erts_move_msg_mbuf_to_heap(Eterm** hpp, ErlOffHeap* off_heap, ErlMessage *msg)
case ARITYVAL_SUBTAG:
break;
case REFC_BINARY_SUBTAG:
- oh_list_pp.pbpp = &off_heap->mso;
- oh_el_p.up = (hp-1);
- oh_el_next_pp.pbpp = &(oh_el_p.pbp)->next;
- cpy_sz = thing_arityval(val);
- goto cpy_words;
case FUN_SUBTAG:
-#ifndef HYBRID
- oh_list_pp.efpp = &off_heap->funs;
- oh_el_p.up = (hp-1);
- oh_el_next_pp.efpp = &(oh_el_p.efp)->next;
-#endif
- cpy_sz = thing_arityval(val);
- goto cpy_words;
case EXTERNAL_PID_SUBTAG:
case EXTERNAL_PORT_SUBTAG:
case EXTERNAL_REF_SUBTAG:
- oh_list_pp.etpp = &off_heap->externals;
- oh_el_p.up = (hp-1);
- oh_el_next_pp.etpp = &(oh_el_p.etp)->next;
+ oh = (struct erl_off_heap_header*) (hp-1);
cpy_sz = thing_arityval(val);
goto cpy_words;
default:
@@ -641,44 +599,13 @@ erts_move_msg_mbuf_to_heap(Eterm** hpp, ErlOffHeap* off_heap, ErlMessage *msg)
case 1: *hp++ = *fhp++;
default: break;
}
- if (oh_list_pp.upp) {
-#ifdef HARD_DEBUG
- Uint *dbg_old_oh_list_p = *oh_list_pp.upp;
-#endif
+ if (oh) {
/* Add to offheap list */
- *oh_el_next_pp.upp = *oh_list_pp.upp;
- *oh_list_pp.upp = oh_el_p.up;
- ASSERT(*hpp <= oh_el_p.up);
- ASSERT(hp > oh_el_p.up);
-#ifdef HARD_DEBUG
- switch (val & _HEADER_SUBTAG_MASK) {
- case REFC_BINARY_SUBTAG:
- ASSERT(off_heap->mso == *oh_list_pp.pbpp);
- ASSERT(off_heap->mso->next
- == (ProcBin *) dbg_old_oh_list_p);
- break;
-#ifndef HYBRID
- case FUN_SUBTAG:
- ASSERT(off_heap->funs == *oh_list_pp.efpp);
- ASSERT(off_heap->funs->next
- == (ErlFunThing *) dbg_old_oh_list_p);
- break;
-#endif
- case EXTERNAL_PID_SUBTAG:
- case EXTERNAL_PORT_SUBTAG:
- case EXTERNAL_REF_SUBTAG:
- ASSERT(off_heap->externals
- == *oh_list_pp.etpp);
- ASSERT(off_heap->externals->next
- == (ExternalThing *) dbg_old_oh_list_p);
- break;
- default:
- ASSERT(0);
- }
-#endif
- oh_list_pp.upp = NULL;
-
-
+ oh->next = off_heap->first;
+ off_heap->first = oh;
+ ASSERT(*hpp <= (Eterm*)oh);
+ ASSERT(hp > (Eterm*)oh);
+ oh = NULL;
}
break;
}
@@ -765,11 +692,7 @@ copy_done:
#endif
- bp->off_heap.mso = NULL;
-#ifndef HYBRID
- bp->off_heap.funs = NULL;
-#endif
- bp->off_heap.externals = NULL;
+ bp->off_heap.first = NULL;
free_message_buffer(bp);
msg->data.heap_frag = NULL;
diff --git a/erts/emulator/beam/erl_message.h b/erts/emulator/beam/erl_message.h
index f478572ac2..55ea92860a 100644
--- a/erts/emulator/beam/erl_message.h
+++ b/erts/emulator/beam/erl_message.h
@@ -28,13 +28,18 @@ struct external_thing_;
* but is stored outside of any heap.
*/
-typedef struct erl_off_heap {
- struct proc_bin* mso; /* List of associated binaries. */
-#ifndef HYBRID /* FIND ME! */
- struct erl_fun_thing* funs; /* List of funs. */
+struct erl_off_heap_header {
+ Eterm thing_word;
+ Uint size;
+#if HALFWORD_HEAP
+ void* dummy_ptr_padding__;
#endif
- struct external_thing_* externals; /* List of external things. */
- int overhead; /* Administrative overhead (used to force GC). */
+ struct erl_off_heap_header* next;
+};
+
+typedef struct erl_off_heap {
+ struct erl_off_heap_header* first;
+ int overhead; /* Administrative overhead (used to force GC). */
} ErlOffHeap;
#include "external.h"
@@ -201,9 +206,7 @@ do { \
(HEAP_FRAG_P)->next = NULL; \
(HEAP_FRAG_P)->alloc_size = (DATA_WORDS); \
(HEAP_FRAG_P)->used_size = (DATA_WORDS); \
- (HEAP_FRAG_P)->off_heap.mso = NULL; \
- (HEAP_FRAG_P)->off_heap.funs = NULL; \
- (HEAP_FRAG_P)->off_heap.externals = NULL; \
+ (HEAP_FRAG_P)->off_heap.first = NULL; \
(HEAP_FRAG_P)->off_heap.overhead = 0; \
} while (0)
diff --git a/erts/emulator/beam/erl_mtrace.c b/erts/emulator/beam/erl_mtrace.c
index 9cf55ee319..b1478758a1 100644
--- a/erts/emulator/beam/erl_mtrace.c
+++ b/erts/emulator/beam/erl_mtrace.c
@@ -585,9 +585,7 @@ void erts_mtrace_init(char *receiver, char *nodename)
Uint16 port;
erts_mtx_init(&mtrace_buf_mutex, "mtrace_buf");
- erts_mtx_set_forksafe(&mtrace_buf_mutex);
erts_mtx_init(&mtrace_op_mutex, "mtrace_op");
- erts_mtx_set_forksafe(&mtrace_op_mutex);
socket_desc = erts_sock_open();
if (socket_desc == ERTS_SOCK_INVALID_SOCKET) {
diff --git a/erts/emulator/beam/erl_nif.c b/erts/emulator/beam/erl_nif.c
index 3d63fa1caf..e95d9c4f75 100644
--- a/erts/emulator/beam/erl_nif.c
+++ b/erts/emulator/beam/erl_nif.c
@@ -258,9 +258,7 @@ void enif_free_env(ErlNifEnv* env)
static ERTS_INLINE void clear_offheap(ErlOffHeap* oh)
{
- oh->mso = NULL;
- oh->externals = NULL;
- oh->funs = NULL;
+ oh->first = NULL;
oh->overhead = 0;
}
@@ -364,7 +362,7 @@ ERL_NIF_TERM enif_make_copy(ErlNifEnv* dst_env, ERL_NIF_TERM src_term)
#ifdef DEBUG
static int is_offheap(const ErlOffHeap* oh)
{
- return oh->mso != NULL || oh->funs != NULL || oh->externals != NULL;
+ return oh->first != NULL;
}
#endif
@@ -627,8 +625,8 @@ Eterm enif_make_binary(ErlNifEnv* env, ErlNifBinary* bin)
pb = (ProcBin *) alloc_heap(env, PROC_BIN_SIZE);
pb->thing_word = HEADER_PROC_BIN;
pb->size = bptr->orig_size;
- pb->next = MSO(env->proc).mso;
- MSO(env->proc).mso = pb;
+ pb->next = MSO(env->proc).first;
+ MSO(env->proc).first = (struct erl_off_heap_header*) pb;
pb->val = bptr;
pb->bytes = (byte*) bptr->orig_bytes;
pb->flags = 0;
diff --git a/erts/emulator/beam/erl_node_tables.c b/erts/emulator/beam/erl_node_tables.c
index 5865d33138..e430b4ad77 100644
--- a/erts/emulator/beam/erl_node_tables.c
+++ b/erts/emulator/beam/erl_node_tables.c
@@ -80,6 +80,8 @@ dist_table_alloc(void *dep_tmpl)
Eterm chnl_nr;
Eterm sysname;
DistEntry *dep;
+ erts_smp_rwmtx_opt_t rwmtx_opt = ERTS_SMP_THR_OPTS_DEFAULT_INITER;
+ rwmtx_opt.type = ERTS_SMP_RWMTX_TYPE_FREQUENT_READ;
if(((DistEntry *) dep_tmpl) == erts_this_dist_entry)
return dep_tmpl;
@@ -92,7 +94,7 @@ dist_table_alloc(void *dep_tmpl)
dep->prev = NULL;
erts_refc_init(&dep->refc, -1);
- erts_smp_rwmtx_init_x(&dep->rwmtx, "dist_entry", chnl_nr);
+ erts_smp_rwmtx_init_opt_x(&dep->rwmtx, &rwmtx_opt, "dist_entry", chnl_nr);
dep->sysname = sysname;
dep->cid = NIL;
dep->connection_id = 0;
@@ -580,6 +582,18 @@ ErlNode *erts_find_or_insert_node(Eterm sysname, Uint creation)
ErlNode ne;
ne.sysname = sysname;
ne.creation = creation;
+
+ erts_smp_rwmtx_rlock(&erts_node_table_rwmtx);
+ res = hash_get(&erts_node_table, (void *) &ne);
+ if (res && res != erts_this_node) {
+ long refc = erts_refc_inctest(&res->refc, 0);
+ if (refc < 2) /* New or pending delete */
+ erts_refc_inc(&res->refc, 1);
+ }
+ erts_smp_rwmtx_runlock(&erts_node_table_rwmtx);
+ if (res)
+ return res;
+
erts_smp_rwmtx_rwlock(&erts_node_table_rwmtx);
res = hash_put(&erts_node_table, (void *) &ne);
ASSERT(res);
@@ -696,8 +710,12 @@ erts_set_this_node(Eterm sysname, Uint creation)
void erts_init_node_tables(void)
{
+ erts_smp_rwmtx_opt_t rwmtx_opt = ERTS_SMP_THR_OPTS_DEFAULT_INITER;
HashFunctions f;
+ rwmtx_opt.type = ERTS_SMP_RWMTX_TYPE_FREQUENT_READ;
+ rwmtx_opt.lived = ERTS_SMP_RWMTX_LONG_LIVED;
+
f.hash = (H_FUN) dist_table_hash;
f.cmp = (HCMP_FUN) dist_table_cmp;
f.alloc = (HALLOC_FUN) dist_table_alloc;
@@ -719,9 +737,10 @@ void erts_init_node_tables(void)
erts_this_dist_entry->prev = NULL;
erts_refc_init(&erts_this_dist_entry->refc, 1); /* erts_this_node */
- erts_smp_rwmtx_init_x(&erts_this_dist_entry->rwmtx,
- "dist_entry",
- make_small(ERST_INTERNAL_CHANNEL_NO));
+ erts_smp_rwmtx_init_opt_x(&erts_this_dist_entry->rwmtx,
+ &rwmtx_opt,
+ "dist_entry",
+ make_small(ERST_INTERNAL_CHANNEL_NO));
erts_this_dist_entry->sysname = am_Noname;
erts_this_dist_entry->cid = NIL;
erts_this_dist_entry->connection_id = 0;
@@ -772,8 +791,8 @@ void erts_init_node_tables(void)
(void) hash_put(&erts_node_table, (void *) erts_this_node);
- erts_smp_rwmtx_init(&erts_node_table_rwmtx, "node_table");
- erts_smp_rwmtx_init(&erts_dist_table_rwmtx, "dist_table");
+ erts_smp_rwmtx_init_opt(&erts_node_table_rwmtx, &rwmtx_opt, "node_table");
+ erts_smp_rwmtx_init_opt(&erts_dist_table_rwmtx, &rwmtx_opt, "dist_table");
references_atoms_need_init = 1;
}
@@ -1087,30 +1106,24 @@ insert_offheap2(ErlOffHeap *oh, void *arg)
static void
insert_offheap(ErlOffHeap *oh, int type, Eterm id)
{
- if(oh->externals) {
- ExternalThing *etp = oh->externals;
- while (etp) {
- insert_node(etp->node, type, id);
- etp = etp->next;
- }
- }
+ union erl_off_heap_ptr u;
+ struct insert_offheap2_arg a;
+ a.type = BIN_REF;
- if(oh->mso) {
- ProcBin *pb;
- struct insert_offheap2_arg a;
- a.type = BIN_REF;
- for(pb = oh->mso; pb; pb = pb->next) {
- if(IsMatchProgBinary(pb->val)) {
+ for (u.hdr = oh->first; u.hdr; u.hdr = u.hdr->next) {
+ switch (thing_subtag(u.hdr->thing_word)) {
+ case REFC_BINARY_SUBTAG:
+ if(IsMatchProgBinary(u.pb->val)) {
InsertedBin *ib;
int insert_bin = 1;
for (ib = inserted_bins; ib; ib = ib->next)
- if(ib->bin_val == pb->val) {
+ if(ib->bin_val == u.pb->val) {
insert_bin = 0;
break;
}
if (insert_bin) {
#if HALFWORD_HEAP
- UWord val = (UWord) pb->val;
+ UWord val = (UWord) u.pb->val;
DeclareTmpHeapNoproc(id_heap,BIG_UINT_HEAP_SIZE*2); /* extra place allocated */
#else
DeclareTmpHeapNoproc(id_heap,BIG_UINT_HEAP_SIZE);
@@ -1124,13 +1137,13 @@ insert_offheap(ErlOffHeap *oh, int type, Eterm id)
a.id = erts_bld_uword(&hp, NULL, (UWord) val);
#else
UseTmpHeapNoproc(BIG_UINT_HEAP_SIZE);
- a.id = erts_bld_uint(&hp, NULL, (Uint) pb->val);
+ a.id = erts_bld_uint(&hp, NULL, (Uint) u.pb->val);
#endif
- erts_match_prog_foreach_offheap(pb->val,
+ erts_match_prog_foreach_offheap(u.pb->val,
insert_offheap2,
(void *) &a);
nib = erts_alloc(ERTS_ALC_T_NC_TMP, sizeof(InsertedBin));
- nib->bin_val = pb->val;
+ nib->bin_val = u.pb->val;
nib->next = inserted_bins;
inserted_bins = nib;
#if HALFWORD_HEAP
@@ -1139,15 +1152,16 @@ insert_offheap(ErlOffHeap *oh, int type, Eterm id)
UnUseTmpHeapNoproc(BIG_UINT_HEAP_SIZE);
#endif
}
- }
+ }
+ break;
+ case FUN_SUBTAG:
+ break; /* No need to */
+ default:
+ ASSERT(is_external_header(u.hdr->thing_word));
+ insert_node(u.ext->node, type, id);
+ break;
}
}
-
-#if 0
- if(oh->funs) {
- /* No need to */
- }
-#endif
}
static void doit_insert_monitor(ErtsMonitor *monitor, void *p)
@@ -1289,6 +1303,7 @@ setup_reference_table(void)
for (i = 0; i < erts_max_processes; i++)
if (process_tab[i]) {
ErlMessage *msg;
+
/* Insert Heap */
insert_offheap(&(process_tab[i]->off_heap),
HEAP_REF,
@@ -1375,21 +1390,22 @@ setup_reference_table(void)
{ /* Add binaries stored elsewhere ... */
ErlOffHeap oh;
- ProcBin pb[2] = {{0},{0}};
- ProcBin *mso = NULL;
+ ProcBin pb[2];
int i = 0;
Binary *default_match_spec;
Binary *default_meta_match_spec;
- /* Only the ProcBin members val and next will be inspected
+ oh.first = NULL;
+ /* Only the ProcBin members thing_word, val and next will be inspected
(by insert_offheap()) */
#undef ADD_BINARY
-#define ADD_BINARY(Bin) \
- if ((Bin)) { \
- pb[i].val = (Bin); \
- pb[i].next = mso; \
- mso = &pb[i]; \
- i++; \
+#define ADD_BINARY(Bin) \
+ if ((Bin)) { \
+ pb[i].thing_word = REFC_BINARY_SUBTAG; \
+ pb[i].val = (Bin); \
+ pb[i].next = oh.first; \
+ oh.first = (struct erl_off_heap_header*) &pb[i]; \
+ i++; \
}
erts_get_default_trace_pattern(NULL,
@@ -1401,11 +1417,6 @@ setup_reference_table(void)
ADD_BINARY(default_match_spec);
ADD_BINARY(default_meta_match_spec);
- oh.mso = mso;
- oh.externals = NULL;
-#ifndef HYBRID /* FIND ME! */
- oh.funs = NULL;
-#endif
insert_offheap(&oh, BIN_REF, AM_match_spec);
#undef ADD_BINARY
}
diff --git a/erts/emulator/beam/erl_obsolete.c b/erts/emulator/beam/erl_obsolete.c
deleted file mode 100644
index 9c5a7c7ff9..0000000000
--- a/erts/emulator/beam/erl_obsolete.c
+++ /dev/null
@@ -1,186 +0,0 @@
-/*
- * %CopyrightBegin%
- *
- * Copyright Ericsson AB 2004-2009. All Rights Reserved.
- *
- * The contents of this file are subject to the Erlang Public License,
- * Version 1.1, (the "License"); you may not use this file except in
- * compliance with the License. You should have received a copy of the
- * Erlang Public License along with this software. If not, it can be
- * retrieved online at http://www.erlang.org/.
- *
- * Software distributed under the License is distributed on an "AS IS"
- * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
- * the License for the specific language governing rights and limitations
- * under the License.
- *
- * %CopyrightEnd%
- */
-
-#ifdef HAVE_CONFIG_H
-# include "config.h"
-#endif
-
-#include "sys.h"
-#include "erl_driver.h"
-
-/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *\
- * *
- * ------------------------- OBSOLETE! DO NOT USE! ------------------------- *
- * *
-\* */
-
-/* cut from ../obsolete/driver.h (since it doesn't mix well with other
- * headers from the emulator).
- */
-#ifdef __WIN32__
-#ifdef CONST
-# undef CONST
-#endif
-#endif
-
-#if ((defined(__STDC__) || defined(SABER)) && !defined(NO_PROTOTYPE)) || defined(__cplusplus) || defined(USE_PROTOTYPE)
-# define _USING_PROTOTYPES_ 1
-# define _ANSI_ARGS_(x) x
-# define CONST const
-#else
-# define _ANSI_ARGS_(x) ()
-# define CONST
-#endif
-
-typedef void* erl_mutex_t;
-typedef void* erl_cond_t;
-typedef void* erl_thread_t;
-
-EXTERN erl_mutex_t erts_mutex_create _ANSI_ARGS_((void));
-EXTERN int erts_mutex_destroy _ANSI_ARGS_((erl_mutex_t));
-EXTERN int erts_mutex_lock _ANSI_ARGS_((erl_mutex_t));
-EXTERN int erts_mutex_unlock _ANSI_ARGS_((erl_mutex_t));
-
-EXTERN erl_cond_t erts_cond_create _ANSI_ARGS_((void));
-EXTERN int erts_cond_destroy _ANSI_ARGS_((erl_cond_t));
-EXTERN int erts_cond_signal _ANSI_ARGS_((erl_cond_t));
-EXTERN int erts_cond_broadcast _ANSI_ARGS_((erl_cond_t));
-EXTERN int erts_cond_wait _ANSI_ARGS_((erl_cond_t, erl_mutex_t));
-EXTERN int erts_cond_timedwait _ANSI_ARGS_((erl_cond_t, erl_mutex_t, long));
-
-EXTERN int erts_thread_create _ANSI_ARGS_((erl_thread_t*,
- void* (*func)(void*),
- void* arg,
- int detached));
-EXTERN erl_thread_t erts_thread_self _ANSI_ARGS_((void));
-EXTERN void erts_thread_exit _ANSI_ARGS_((void*));
-EXTERN int erts_thread_join _ANSI_ARGS_((erl_thread_t, void**));
-EXTERN int erts_thread_kill _ANSI_ARGS_((erl_thread_t));
-
-/*
- * These functions implement the thread interface in ../obsolete/driver.h.
- * Do *not* use this interface! Within the emulator, use the erl_threads.h,
- * erl_smp.h, or ethread.h interface. From a driver use the thread interface
- * in erl_driver.h.
- */
-
-erl_mutex_t
-erts_mutex_create(void)
-{
- return (erl_mutex_t) erl_drv_mutex_create(NULL);
-}
-
-int
-erts_mutex_destroy(erl_mutex_t mtx)
-{
- erl_drv_mutex_destroy((ErlDrvMutex *) mtx);
- return 0;
-}
-
-int
-erts_mutex_lock(erl_mutex_t mtx)
-{
- erl_drv_mutex_lock((ErlDrvMutex *) mtx);
- return 0;
-}
-
-int
-erts_mutex_unlock(erl_mutex_t mtx)
-{
- erl_drv_mutex_unlock((ErlDrvMutex *) mtx);
- return 0;
-}
-
-erl_cond_t
-erts_cond_create(void)
-{
- return (erl_cond_t) erl_drv_cond_create(NULL);
-}
-
-int
-erts_cond_destroy(erl_cond_t cnd)
-{
- erl_drv_cond_destroy((ErlDrvCond *) cnd);
- return 0;
-}
-
-
-int
-erts_cond_signal(erl_cond_t cnd)
-{
- erl_drv_cond_signal((ErlDrvCond *) cnd);
- return 0;
-}
-
-int
-erts_cond_broadcast(erl_cond_t cnd)
-{
- erl_drv_cond_broadcast((ErlDrvCond *) cnd);
- return 0;
-}
-
-
-int
-erts_cond_wait(erl_cond_t cnd, erl_mutex_t mtx)
-{
- erl_drv_cond_wait((ErlDrvCond *) cnd, (ErlDrvMutex *) mtx);
- return 0;
-}
-
-int
-erts_cond_timedwait(erl_cond_t cnd, erl_mutex_t mtx, long ms)
-{
- return ENOTSUP;
-}
-
-int
-erts_thread_create(erl_thread_t *tid,
- void* (*func)(void*),
- void* arg,
- int detached)
-{
- if (detached)
- return ENOTSUP;
- return erl_drv_thread_create(NULL, (ErlDrvTid *) tid, func, arg, NULL);
-}
-
-erl_thread_t
-erts_thread_self(void)
-{
- return (erl_thread_t) erl_drv_thread_self();
-}
-
-void
-erts_thread_exit(void *res)
-{
- erl_drv_thread_exit(res);
-}
-
-int
-erts_thread_join(erl_thread_t tid, void **respp)
-{
- return erl_drv_thread_join((ErlDrvTid) tid, respp);
-}
-
-int
-erts_thread_kill(erl_thread_t tid)
-{
- return ENOTSUP;
-}
-
diff --git a/erts/emulator/beam/erl_port_task.c b/erts/emulator/beam/erl_port_task.c
index 967a14f0d1..c10724b951 100644
--- a/erts/emulator/beam/erl_port_task.c
+++ b/erts/emulator/beam/erl_port_task.c
@@ -625,6 +625,7 @@ erts_port_task_schedule(Eterm id,
if (!enq_port) {
ERTS_PT_CHK_PRES_PORTQ(runq, pp);
+ erts_smp_runq_unlock(runq);
}
else {
enqueue_port(runq, pp);
@@ -634,9 +635,10 @@ erts_port_task_schedule(Eterm id,
profile_runnable_port(pp, am_active);
}
+ erts_smp_runq_unlock(runq);
+
erts_smp_notify_inc_runq(runq);
}
- erts_smp_runq_unlock(runq);
return 0;
}
@@ -944,8 +946,8 @@ erts_port_task_execute(ErtsRunQueue *runq, Port **curr_port_pp)
enqueue_port(xrunq, pp);
ASSERT(pp->sched.exe_taskq);
pp->sched.exe_taskq = NULL;
- erts_smp_notify_inc_runq(xrunq);
erts_smp_runq_unlock(xrunq);
+ erts_smp_notify_inc_runq(xrunq);
}
#endif
port_was_enqueued = 1;
@@ -1112,7 +1114,6 @@ erts_port_migrate(Port *prt, int *prt_locked,
dequeue_port(from_rq, prt);
erts_smp_atomic_set(&prt->run_queue, (long) to_rq);
enqueue_port(to_rq, prt);
- erts_smp_notify_inc_runq(to_rq);
return ERTS_MIGRATE_SUCCESS;
}
diff --git a/erts/emulator/beam/erl_process.c b/erts/emulator/beam/erl_process.c
index 41031f5468..0e78727316 100644
--- a/erts/emulator/beam/erl_process.c
+++ b/erts/emulator/beam/erl_process.c
@@ -46,7 +46,13 @@
#define ERTS_PROC_MIN_CONTEXT_SWITCH_REDS_COST (CONTEXT_REDS/10)
-#define ERTS_SCHED_SLEEP_SPINCOUNT 10000
+#define ERTS_SCHED_SPIN_UNTIL_YIELD 100
+
+#define ERTS_SCHED_SYS_SLEEP_SPINCOUNT 10
+#define ERTS_SCHED_TSE_SLEEP_SPINCOUNT_FACT 1000
+#define ERTS_SCHED_TSE_SLEEP_SPINCOUNT \
+ (ERTS_SCHED_SYS_SLEEP_SPINCOUNT*ERTS_SCHED_TSE_SLEEP_SPINCOUNT_FACT)
+#define ERTS_SCHED_SUSPEND_SLEEP_SPINCOUNT 0
#define ERTS_WAKEUP_OTHER_LIMIT (100*CONTEXT_REDS/2)
#define ERTS_WAKEUP_OTHER_DEC 10
@@ -106,6 +112,10 @@ Uint erts_no_schedulers;
Uint erts_max_processes = ERTS_DEFAULT_MAX_PROCESSES;
Uint erts_process_tab_index_mask;
+#ifdef ERTS_SMP
+Uint erts_max_main_threads;
+#endif
+
int erts_sched_thread_suggested_stack_size = -1;
#ifdef ERTS_ENABLE_LOCK_CHECK
@@ -116,16 +126,34 @@ ErtsLcPSDLocks erts_psd_required_locks[ERTS_PSD_SIZE];
int erts_disable_proc_not_running_opt;
-#define ERTS_SCHED_CHANGING_ONLINE 1
-#define ERTS_SCHED_CHANGING_MULTI_SCHED 2
+#define ERTS_SCHDLR_SSPND_CHNG_WAITER (((long) 1) << 0)
+#define ERTS_SCHDLR_SSPND_CHNG_MSB (((long) 1) << 1)
+#define ERTS_SCHDLR_SSPND_CHNG_ONLN (((long) 1) << 2)
+
+#ifndef DEBUG
+
+#define ERTS_SCHDLR_SSPND_CHNG_SET(VAL, OLD_VAL) \
+ erts_smp_atomic_set(&schdlr_sspnd.changing, (VAL))
+
+#else
+
+#define ERTS_SCHDLR_SSPND_CHNG_SET(VAL, OLD_VAL) \
+do { \
+ long old_val__ = erts_smp_atomic_xchg(&schdlr_sspnd.changing, \
+ (VAL)); \
+ ASSERT(old_val__ == (OLD_VAL)); \
+} while (0)
+
+#endif
+
static struct {
erts_smp_mtx_t mtx;
erts_smp_cnd_t cnd;
- int changing;
int online;
int curr_online;
int wait_curr_online;
+ erts_smp_atomic_t changing;
erts_smp_atomic_t active;
struct {
erts_smp_atomic_t ongoing;
@@ -231,6 +259,17 @@ typedef union {
ErtsAlignedSchedulerData *erts_aligned_scheduler_data;
+#ifdef ERTS_SMP
+
+typedef union {
+ ErtsSchedulerSleepInfo ssi;
+ char align[ERTS_ALC_CACHE_LINE_ALIGN_SIZE(sizeof(ErtsSchedulerSleepInfo))];
+} ErtsAlignedSchedulerSleepInfo;
+
+static ErtsAlignedSchedulerSleepInfo *aligned_sched_sleep_info;
+
+#endif
+
#ifndef BM_COUNTERS
static int processes_busy;
#endif
@@ -288,8 +327,15 @@ ERTS_SCHED_PREF_QUICK_ALLOC_IMPL(proclist,
200,
ERTS_ALC_T_PROC_LIST)
-#define ERTS_RUNQ_IX(IX) (&erts_aligned_run_queues[(IX)].runq)
-#define ERTS_SCHEDULER_IX(IX) (&erts_aligned_scheduler_data[(IX)].esd)
+#define ERTS_RUNQ_IX(IX) \
+ (ASSERT_EXPR(0 <= (IX) && (IX) < erts_no_run_queues), \
+ &erts_aligned_run_queues[(IX)].runq)
+#define ERTS_SCHEDULER_IX(IX) \
+ (ASSERT_EXPR(0 <= (IX) && (IX) < erts_no_schedulers), \
+ &erts_aligned_scheduler_data[(IX)].esd)
+#define ERTS_SCHED_SLEEP_INFO_IX(IX) \
+ (ASSERT_EXPR(0 <= (IX) && (IX) < erts_no_schedulers), \
+ &aligned_sched_sleep_info[(IX)].ssi)
#define ERTS_FOREACH_RUNQ(RQVAR, DO) \
do { \
@@ -353,6 +399,11 @@ static void signal_schedulers_bind_change(erts_cpu_topology_t *cpudata, int size
#endif
+static int reader_group_lookup(int logical);
+static void create_tmp_cpu_topology_copy(erts_cpu_topology_t **cpudata,
+ int *cpudata_size);
+static void destroy_tmp_cpu_topology_copy(erts_cpu_topology_t *cpudata);
+
static void early_cpu_bind_init(void);
static void late_cpu_bind_init(void);
@@ -582,6 +633,76 @@ erts_psd_set_init(Process *p, ErtsProcLocks plocks, int ix, void *data)
#ifdef ERTS_SMP
+void
+erts_sched_finish_poke(ErtsSchedulerSleepInfo *ssi, long flags)
+{
+ switch (flags & ERTS_SSI_FLGS_SLEEP_TYPE) {
+ case ERTS_SSI_FLG_POLL_SLEEPING:
+ erts_sys_schedule_interrupt(1);
+ break;
+ case ERTS_SSI_FLG_TSE_SLEEPING:
+ erts_tse_set(ssi->event);
+ break;
+ case 0:
+ break;
+ default:
+ erl_exit(ERTS_ABORT_EXIT, "%s:%d: Internal error\n",
+ __FILE__, __LINE__);
+ break;
+ }
+}
+
+#ifdef ERTS_SMP_SCHEDULERS_NEED_TO_CHECK_CHILDREN
+void
+erts_smp_notify_check_children_needed(void)
+{
+ int i;
+
+ for (i = 0; i < erts_no_schedulers; i++) {
+ long aux_work;
+ ErtsSchedulerSleepInfo *ssi;
+ ssi = ERTS_SCHED_SLEEP_INFO_IX(i);
+ aux_work = erts_smp_atomic_bor(&ssi->aux_work,
+ ERTS_SSI_AUX_WORK_CHECK_CHILDREN);
+ if (!(aux_work & ERTS_SSI_AUX_WORK_CHECK_CHILDREN))
+ erts_sched_poke(ssi);
+ }
+}
+#endif
+
+#ifdef ERTS_SCHED_NEED_BLOCKABLE_AUX_WORK
+static ERTS_INLINE long
+blockable_aux_work(ErtsSchedulerData *esdp,
+ ErtsSchedulerSleepInfo *ssi,
+ long aux_work)
+{
+ if (aux_work & ERTS_SSI_BLOCKABLE_AUX_WORK_MASK) {
+#ifdef ERTS_SMP_SCHEDULERS_NEED_TO_CHECK_CHILDREN
+ if (aux_work & ERTS_SSI_AUX_WORK_CHECK_CHILDREN) {
+ aux_work = erts_smp_atomic_band(&ssi->aux_work,
+ ~ERTS_SSI_AUX_WORK_CHECK_CHILDREN);
+ aux_work &= ~ERTS_SSI_AUX_WORK_CHECK_CHILDREN;
+ erts_check_children();
+ }
+#endif
+ }
+ return aux_work;
+}
+
+#endif
+
+#ifdef ERTS_SCHED_NEED_NONBLOCKABLE_AUX_WORK
+static ERTS_INLINE long
+nonblockable_aux_work(ErtsSchedulerData *esdp,
+ ErtsSchedulerSleepInfo *ssi,
+ long aux_work)
+{
+ if (aux_work & ERTS_SSI_NONBLOCKABLE_AUX_WORK_MASK) {
+
+ }
+}
+#endif
+
static void
prepare_for_block(void *vrq)
{
@@ -634,9 +755,33 @@ erts_active_schedulers(void)
return as;
}
+static ERTS_INLINE int
+prepare_for_sys_schedule(void)
+{
+#ifdef ERTS_SMP
+ while (!erts_port_task_have_outstanding_io_tasks()
+ && !erts_smp_atomic_xchg(&doing_sys_schedule, 1)) {
+ if (!erts_port_task_have_outstanding_io_tasks())
+ return 1;
+ erts_smp_atomic_set(&doing_sys_schedule, 0);
+ }
+ return 0;
+#else
+ return !erts_port_task_have_outstanding_io_tasks();
+#endif
+}
+
#ifdef ERTS_SMP
static ERTS_INLINE void
+sched_change_waiting_sys_to_waiting(Uint no, ErtsRunQueue *rq)
+{
+ ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(rq));
+ ASSERT(rq->waiting < 0);
+ rq->waiting *= -1;
+}
+
+static ERTS_INLINE void
sched_waiting(Uint no, ErtsRunQueue *rq)
{
ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(rq));
@@ -676,7 +821,11 @@ empty_runq(ErtsRunQueue *rq)
if (oifls & ERTS_RUNQ_IFLG_NONEMPTY) {
#ifdef DEBUG
long empty = erts_smp_atomic_read(&no_empty_run_queues);
- ASSERT(0 <= empty && empty < erts_no_run_queues);
+ /*
+ * For a short period of time no_empty_run_queues may have
+ * been increased twice for a specific run queue.
+ */
+ ASSERT(0 <= empty && empty < 2*erts_no_run_queues);
#endif
erts_smp_atomic_inc(&no_empty_run_queues);
}
@@ -689,107 +838,314 @@ non_empty_runq(ErtsRunQueue *rq)
if (!(oifls & ERTS_RUNQ_IFLG_NONEMPTY)) {
#ifdef DEBUG
long empty = erts_smp_atomic_read(&no_empty_run_queues);
- ASSERT(0 < empty && empty <= erts_no_run_queues);
+ /*
+ * For a short period of time no_empty_run_queues may have
+ * been increased twice for a specific run queue.
+ */
+ ASSERT(0 < empty && empty <= 2*erts_no_run_queues);
#endif
erts_smp_atomic_dec(&no_empty_run_queues);
}
}
-static ERTS_INLINE int
-sched_spin_wake(ErtsRunQueue *rq)
+static long
+sched_prep_spin_wait(ErtsSchedulerSleepInfo *ssi)
{
-#if ERTS_SCHED_SLEEP_SPINCOUNT == 0
- return 0;
-#else
- long val;
- ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(rq));
+ long oflgs;
+ long nflgs = (ERTS_SSI_FLG_SLEEPING
+ | ERTS_SSI_FLG_WAITING);
+ long xflgs = 0;
- val = erts_smp_atomic_read(&rq->spin_waiter);
- ASSERT(val >= 0);
- if (val != 0) {
- erts_smp_atomic_inc(&rq->spin_wake);
- return 1;
- }
- return 0;
-#endif
+ do {
+ oflgs = erts_smp_atomic_cmpxchg(&ssi->flags, nflgs, xflgs);
+ if (oflgs == xflgs)
+ return nflgs;
+ xflgs = oflgs;
+ } while (!(oflgs & ERTS_SSI_FLG_SUSPENDED));
+ return oflgs;
}
-static ERTS_INLINE int
-sched_spin_wake_all(ErtsRunQueue *rq)
+static long
+sched_prep_cont_spin_wait(ErtsSchedulerSleepInfo *ssi)
{
-#if ERTS_SCHED_SLEEP_SPINCOUNT == 0
- return 0;
-#else
- long val;
- ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(rq));
+ long oflgs;
+ long nflgs = (ERTS_SSI_FLG_SLEEPING
+ | ERTS_SSI_FLG_WAITING);
+ long xflgs = ERTS_SSI_FLG_WAITING;
- val = erts_smp_atomic_read(&rq->spin_waiter);
- ASSERT(val >= 0);
- if (val != 0)
- erts_smp_atomic_add(&rq->spin_wake, val);
- return val;
-#endif
+ do {
+ oflgs = erts_smp_atomic_cmpxchg(&ssi->flags, nflgs, xflgs);
+ if (oflgs == xflgs)
+ return nflgs;
+ xflgs = oflgs;
+ nflgs |= oflgs & ERTS_SSI_FLG_SUSPENDED;
+ } while (oflgs & ERTS_SSI_FLG_WAITING);
+ return oflgs;
+}
+
+static long
+sched_spin_wait(ErtsSchedulerSleepInfo *ssi, int spincount)
+{
+ long until_yield = ERTS_SCHED_SPIN_UNTIL_YIELD;
+ int sc = spincount;
+ long flgs;
+
+ do {
+ flgs = erts_smp_atomic_read(&ssi->flags);
+ if ((flgs & (ERTS_SSI_FLG_SLEEPING|ERTS_SSI_FLG_WAITING))
+ != (ERTS_SSI_FLG_SLEEPING|ERTS_SSI_FLG_WAITING)) {
+ break;
+ }
+ ERTS_SPIN_BODY;
+ if (--until_yield == 0) {
+ until_yield = ERTS_SCHED_SPIN_UNTIL_YIELD;
+ erts_thr_yield();
+ }
+ } while (--sc > 0);
+ return flgs;
+}
+
+static long
+sched_set_sleeptype(ErtsSchedulerSleepInfo *ssi, long sleep_type)
+{
+ long oflgs;
+ long nflgs = ERTS_SSI_FLG_SLEEPING|ERTS_SSI_FLG_WAITING|sleep_type;
+ long xflgs = ERTS_SSI_FLG_SLEEPING|ERTS_SSI_FLG_WAITING;
+
+ if (sleep_type == ERTS_SSI_FLG_TSE_SLEEPING)
+ erts_tse_reset(ssi->event);
+
+ while (1) {
+ oflgs = erts_smp_atomic_cmpxchg(&ssi->flags, nflgs, xflgs);
+ if (oflgs == xflgs)
+ return nflgs;
+ if ((oflgs & (ERTS_SSI_FLG_SLEEPING|ERTS_SSI_FLG_WAITING))
+ != (ERTS_SSI_FLG_SLEEPING|ERTS_SSI_FLG_WAITING)) {
+ return oflgs;
+ }
+ xflgs = oflgs;
+ nflgs |= oflgs & ERTS_SSI_FLG_SUSPENDED;
+ }
}
+#define ERTS_SCHED_WAIT_WOKEN(FLGS) \
+ (((FLGS) & (ERTS_SSI_FLG_WAITING|ERTS_SSI_FLG_SUSPENDED)) \
+ != ERTS_SSI_FLG_WAITING)
+
static void
-sched_sys_wait(Uint no, ErtsRunQueue *rq)
+scheduler_wait(long *fcalls, ErtsSchedulerData *esdp, ErtsRunQueue *rq)
{
- long dt;
-#if ERTS_SCHED_SLEEP_SPINCOUNT != 0
- int val;
- int spincount = ERTS_SCHED_SLEEP_SPINCOUNT;
+ ErtsSchedulerSleepInfo *ssi = esdp->ssi;
+ int spincount;
+ long flgs;
+#if defined(ERTS_SCHED_NEED_NONBLOCKABLE_AUX_WORK) \
+ || defined(ERTS_SCHED_NEED_BLOCKABLE_AUX_WORK)
+ long aux_work;
+#endif
+
ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(rq));
+ erts_smp_spin_lock(&rq->sleepers.lock);
+ flgs = sched_prep_spin_wait(ssi);
+ if (flgs & ERTS_SSI_FLG_SUSPENDED) {
+ /* Go suspend instead... */
+ erts_smp_spin_unlock(&rq->sleepers.lock);
+ return;
+ }
+
+ ssi->prev = NULL;
+ ssi->next = rq->sleepers.list;
+ if (rq->sleepers.list)
+ rq->sleepers.list->prev = ssi;
+ rq->sleepers.list = ssi;
+ erts_smp_spin_unlock(&rq->sleepers.lock);
+
+ /*
+ * If all schedulers are waiting, one of them *should*
+ * be waiting in erl_sys_schedule()
+ */
+
+ if (!prepare_for_sys_schedule()) {
+
+ sched_waiting(esdp->no, rq);
+
+ erts_smp_runq_unlock(rq);
+
+ spincount = ERTS_SCHED_TSE_SLEEP_SPINCOUNT;
+
+ tse_wait:
+
+#ifdef ERTS_SCHED_NEED_BLOCKABLE_AUX_WORK
+ aux_work = erts_smp_atomic_read(&ssi->aux_work);
+ tse_blockable_aux_work:
+ aux_work = blockable_aux_work(esdp, ssi, aux_work);
#endif
+ erts_smp_activity_begin(ERTS_ACTIVITY_WAIT, NULL, NULL, NULL);
+
+ while (1) {
- sched_waiting_sys(no, rq);
+#ifdef ERTS_SCHED_NEED_NONBLOCKABLE_AUX_WORK
+#ifndef ERTS_SCHED_NEED_BLOCKABLE_AUX_WORK
+ aux_work = erts_smp_atomic_read(&ssi->aux_work);
+#endif
+ nonblockable_aux_work(esdp, ssi, aux_work);
+#endif
-#if ERTS_SCHED_SLEEP_SPINCOUNT != 0
- erts_smp_atomic_inc(&rq->spin_waiter);
- erts_smp_runq_unlock(rq);
+ flgs = sched_spin_wait(ssi, spincount);
+ if (flgs & ERTS_SSI_FLG_SLEEPING) {
+ ASSERT(flgs & ERTS_SSI_FLG_WAITING);
+ flgs = sched_set_sleeptype(ssi, ERTS_SSI_FLG_TSE_SLEEPING);
+ if (flgs & ERTS_SSI_FLG_SLEEPING) {
+ int res;
+ ASSERT(flgs & ERTS_SSI_FLG_TSE_SLEEPING);
+ ASSERT(flgs & ERTS_SSI_FLG_WAITING);
+ do {
+ res = erts_tse_wait(ssi->event);
+ } while (res == EINTR);
+ }
+ }
- erl_sys_schedule(1); /* Might give us something to do */
+ if (!(flgs & ERTS_SSI_FLG_WAITING)) {
+ ASSERT(!(flgs & ERTS_SSI_FLG_SLEEPING));
+ break;
+ }
- dt = do_time_read_and_reset();
- if (dt) bump_timer(dt);
+ flgs = sched_prep_cont_spin_wait(ssi);
+ spincount = ERTS_SCHED_TSE_SLEEP_SPINCOUNT;
+
+ if (!(flgs & ERTS_SSI_FLG_WAITING)) {
+ ASSERT(!(flgs & ERTS_SSI_FLG_SLEEPING));
+ break;
+ }
+
+#ifdef ERTS_SCHED_NEED_BLOCKABLE_AUX_WORK
+ aux_work = erts_smp_atomic_read(&ssi->aux_work);
+ if (aux_work & ERTS_SSI_BLOCKABLE_AUX_WORK_MASK) {
+ erts_smp_activity_end(ERTS_ACTIVITY_WAIT, NULL, NULL, NULL);
+ goto tse_blockable_aux_work;
+ }
+#endif
- while (spincount-- > 0) {
- val = erts_smp_atomic_read(&rq->spin_wake);
- ASSERT(val >= 0);
- if (val != 0) {
- erts_smp_runq_lock(rq);
- val = erts_smp_atomic_read(&rq->spin_wake);
- ASSERT(val >= 0);
- if (val != 0)
- goto woken;
- if (spincount == 0)
- goto sleep;
- erts_smp_runq_unlock(rq);
}
- }
- erts_smp_runq_lock(rq);
- val = erts_smp_atomic_read(&rq->spin_wake);
- ASSERT(val >= 0);
- if (val != 0) {
- woken:
- erts_smp_atomic_dec(&rq->spin_wake);
- ASSERT(erts_smp_atomic_read(&rq->spin_wake) >= 0);
- erts_smp_atomic_dec(&rq->spin_waiter);
- ASSERT(erts_smp_atomic_read(&rq->spin_waiter) >= 0);
+ erts_smp_activity_end(ERTS_ACTIVITY_WAIT, NULL, NULL, NULL);
+
+ if (flgs & ~ERTS_SSI_FLG_SUSPENDED)
+ erts_smp_atomic_band(&ssi->flags, ERTS_SSI_FLG_SUSPENDED);
+
+ erts_smp_runq_lock(rq);
+ sched_active(esdp->no, rq);
+
}
else {
- sleep:
- erts_smp_atomic_dec(&rq->spin_waiter);
- ASSERT(erts_smp_atomic_read(&rq->spin_waiter) >= 0);
+ long dt;
+
+ erts_smp_atomic_set(&function_calls, 0);
+ *fcalls = 0;
+
+ sched_waiting_sys(esdp->no, rq);
+
+ erts_smp_runq_unlock(rq);
+
+ spincount = ERTS_SCHED_SYS_SLEEP_SPINCOUNT;
+
+ while (spincount-- > 0) {
+
+ sys_poll_aux_work:
+
+ erl_sys_schedule(1); /* Might give us something to do */
+
+ dt = do_time_read_and_reset();
+ if (dt) bump_timer(dt);
+
+ sys_aux_work:
+
+#ifdef ERTS_SCHED_NEED_BLOCKABLE_AUX_WORK
+ aux_work = erts_smp_atomic_read(&ssi->aux_work);
+ aux_work = blockable_aux_work(esdp, ssi, aux_work);
+#endif
+#ifdef ERTS_SCHED_NEED_NONBLOCKABLE_AUX_WORK
+#ifndef ERTS_SCHED_NEED_BLOCKABLE_AUX_WORK
+ aux_work = erts_smp_atomic_read(&ssi->aux_work);
+#endif
+ nonblockable_aux_work(esdp, ssi, aux_work);
+#endif
+
+ flgs = erts_smp_atomic_read(&ssi->flags);
+ if (!(flgs & ERTS_SSI_FLG_WAITING)) {
+ ASSERT(!(flgs & ERTS_SSI_FLG_SLEEPING));
+ goto sys_woken;
+ }
+ if (!(flgs & ERTS_SSI_FLG_SLEEPING)) {
+ flgs = sched_prep_cont_spin_wait(ssi);
+ if (!(flgs & ERTS_SSI_FLG_WAITING)) {
+ ASSERT(!(flgs & ERTS_SSI_FLG_SLEEPING));
+ goto sys_woken;
+ }
+ }
+
+ /*
+ * If we got new I/O tasks we aren't allowed to
+ * call erl_sys_schedule() until it is handled.
+ */
+ if (erts_port_task_have_outstanding_io_tasks()) {
+ erts_smp_atomic_set(&doing_sys_schedule, 0);
+ /*
+ * Got to check that we still got I/O tasks; otherwise
+ * we have to continue checking for I/O...
+ */
+ if (!prepare_for_sys_schedule()) {
+ spincount *= ERTS_SCHED_TSE_SLEEP_SPINCOUNT_FACT;
+ goto tse_wait;
+ }
+ }
+ }
+
+ erts_smp_runq_lock(rq);
+
/*
* If we got new I/O tasks we aren't allowed to
* sleep in erl_sys_schedule().
*/
- if (!erts_port_task_have_outstanding_io_tasks()) {
-#endif
+ if (erts_port_task_have_outstanding_io_tasks()) {
+ erts_smp_atomic_set(&doing_sys_schedule, 0);
+ /*
+ * Got to check that we still got I/O tasks; otherwise
+ * we have to wait in erl_sys_schedule() after all...
+ */
+ if (prepare_for_sys_schedule())
+ goto do_sys_schedule;
+
+ /*
+ * Not allowed to wait in erl_sys_schedule;
+ * do tse wait instead...
+ */
+ sched_change_waiting_sys_to_waiting(esdp->no, rq);
+ erts_smp_runq_unlock(rq);
+ spincount = 0;
+ goto tse_wait;
+ }
+ else {
+ do_sys_schedule:
erts_sys_schedule_interrupt(0);
+ flgs = sched_set_sleeptype(ssi, ERTS_SSI_FLG_POLL_SLEEPING);
+ if (!(flgs & ERTS_SSI_FLG_SLEEPING)) {
+ if (!(flgs & ERTS_SSI_FLG_WAITING))
+ goto sys_locked_woken;
+ erts_smp_runq_unlock(rq);
+ flgs = sched_prep_cont_spin_wait(ssi);
+ if (!(flgs & ERTS_SSI_FLG_WAITING)) {
+ ASSERT(!(flgs & ERTS_SSI_FLG_SLEEPING));
+ goto sys_woken;
+ }
+ ASSERT(!erts_port_task_have_outstanding_io_tasks());
+ goto sys_poll_aux_work;
+ }
+
+ ASSERT(flgs & ERTS_SSI_FLG_POLL_SLEEPING);
+ ASSERT(flgs & ERTS_SSI_FLG_WAITING);
+
erts_smp_runq_unlock(rq);
erl_sys_schedule(0);
@@ -797,134 +1153,103 @@ sched_sys_wait(Uint no, ErtsRunQueue *rq)
dt = do_time_read_and_reset();
if (dt) bump_timer(dt);
- erts_smp_runq_lock(rq);
+ flgs = sched_prep_cont_spin_wait(ssi);
+ if (flgs & ERTS_SSI_FLG_WAITING)
+ goto sys_aux_work;
-#if ERTS_SCHED_SLEEP_SPINCOUNT != 0
+ sys_woken:
+ erts_smp_runq_lock(rq);
+ sys_locked_woken:
+ erts_smp_atomic_set(&doing_sys_schedule, 0);
+ if (flgs & ~ERTS_SSI_FLG_SUSPENDED)
+ erts_smp_atomic_band(&ssi->flags, ERTS_SSI_FLG_SUSPENDED);
+ sched_active_sys(esdp->no, rq);
}
}
-#endif
-
- sched_active_sys(no, rq);
-}
-static void
-sched_cnd_wait(Uint no, ErtsRunQueue *rq)
-{
-#if ERTS_SCHED_SLEEP_SPINCOUNT != 0
- int val;
- int spincount = ERTS_SCHED_SLEEP_SPINCOUNT;
ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(rq));
-#endif
-
- sched_waiting(no, rq);
- erts_smp_activity_begin(ERTS_ACTIVITY_WAIT,
- prepare_for_block,
- resume_after_block,
- (void *) rq);
-
-#if ERTS_SCHED_SLEEP_SPINCOUNT == 0
- erts_smp_cnd_wait(&rq->cnd, &rq->mtx);
-#else
- erts_smp_atomic_inc(&rq->spin_waiter);
- erts_smp_mtx_unlock(&rq->mtx);
-
- while (spincount-- > 0) {
- val = erts_smp_atomic_read(&rq->spin_wake);
- ASSERT(val >= 0);
- if (val != 0) {
- erts_smp_mtx_lock(&rq->mtx);
- val = erts_smp_atomic_read(&rq->spin_wake);
- ASSERT(val >= 0);
- if (val != 0)
- goto woken;
- if (spincount == 0)
- goto sleep;
- erts_smp_mtx_unlock(&rq->mtx);
- }
- }
-
- erts_smp_mtx_lock(&rq->mtx);
- val = erts_smp_atomic_read(&rq->spin_wake);
- ASSERT(val >= 0);
- if (val == 0) {
- sleep:
- erts_smp_atomic_dec(&rq->spin_waiter);
- ASSERT(erts_smp_atomic_read(&rq->spin_waiter) >= 0);
- erts_smp_cnd_wait(&rq->cnd, &rq->mtx);
- }
- else {
- woken:
- erts_smp_atomic_dec(&rq->spin_wake);
- ASSERT(erts_smp_atomic_read(&rq->spin_wake) >= 0);
- erts_smp_atomic_dec(&rq->spin_waiter);
- ASSERT(erts_smp_atomic_read(&rq->spin_waiter) >= 0);
- }
-#endif
-
- erts_smp_activity_end(ERTS_ACTIVITY_WAIT,
- prepare_for_block,
- resume_after_block,
- (void *) rq);
-
- sched_active(no, rq);
}
-static void
-wake_one_scheduler(void)
-{
- ASSERT(erts_common_run_queue);
- ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(erts_common_run_queue));
- if (erts_common_run_queue->waiting) {
- if (!sched_spin_wake(erts_common_run_queue)) {
- if (erts_common_run_queue->waiting == -1) /* One scheduler waiting
- and doing so in
- sys_schedule */
- erts_sys_schedule_interrupt(1);
- else
- erts_smp_cnd_signal(&erts_common_run_queue->cnd);
- }
+static ERTS_INLINE long
+ssi_flags_set_wake(ErtsSchedulerSleepInfo *ssi)
+{
+ /* reset all flags but suspended */
+ long oflgs;
+ long nflgs = 0;
+ long xflgs = ERTS_SSI_FLG_SLEEPING|ERTS_SSI_FLG_WAITING;
+ while (1) {
+ oflgs = erts_smp_atomic_cmpxchg(&ssi->flags, nflgs, xflgs);
+ if (oflgs == xflgs)
+ return oflgs;
+ nflgs = oflgs & ERTS_SSI_FLG_SUSPENDED;
+ xflgs = oflgs;
}
}
static void
-wake_scheduler(ErtsRunQueue *rq, int incq)
+wake_scheduler(ErtsRunQueue *rq, int incq, int one)
{
- ASSERT(!erts_common_run_queue);
- ASSERT(-1 <= rq->waiting && rq->waiting <= 1);
- ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(rq));
- if (rq->waiting && !rq->woken) {
- if (!sched_spin_wake(rq)) {
- if (rq->waiting < 0)
- erts_sys_schedule_interrupt(1);
- else
- erts_smp_cnd_signal(&rq->cnd);
+ int res;
+ ErtsSchedulerSleepInfo *ssi;
+ ErtsSchedulerSleepList *sl;
+
+ /*
+ * The unlocked run queue is not strictly necessary
+ * from a thread safety or deadlock prevention
+ * perspective. It will, however, cost us performance
+ * if it is locked during wakup of another scheduler,
+ * so all code *should* handle this without having
+ * the lock on the run queue.
+ */
+ ERTS_SMP_LC_ASSERT(!erts_smp_lc_runq_is_locked(rq));
+
+ sl = &rq->sleepers;
+
+ erts_smp_spin_lock(&sl->lock);
+ ssi = sl->list;
+ if (!ssi)
+ erts_smp_spin_unlock(&sl->lock);
+ else if (one) {
+ long flgs;
+ if (ssi->prev)
+ ssi->prev->next = ssi->next;
+ else {
+ ASSERT(sl->list == ssi);
+ sl->list = ssi->next;
}
- rq->woken = 1;
- if (incq)
+ if (ssi->next)
+ ssi->next->prev = ssi->prev;
+
+ res = sl->list != NULL;
+ erts_smp_spin_unlock(&sl->lock);
+
+ flgs = ssi_flags_set_wake(ssi);
+ erts_sched_finish_poke(ssi, flgs);
+
+ if (incq && !erts_common_run_queue && (flgs & ERTS_SSI_FLG_WAITING))
non_empty_runq(rq);
}
+ else {
+ sl->list = NULL;
+ erts_smp_spin_unlock(&sl->lock);
+ do {
+ ErtsSchedulerSleepInfo *wake_ssi = ssi;
+ ssi = ssi->next;
+ erts_sched_finish_poke(ssi, ssi_flags_set_wake(wake_ssi));
+ } while (ssi);
+ }
}
static void
wake_all_schedulers(void)
{
- if (erts_common_run_queue) {
- erts_smp_runq_lock(erts_common_run_queue);
- if (erts_common_run_queue->waiting) {
- if (erts_common_run_queue->waiting < 0)
- erts_sys_schedule_interrupt(1);
- sched_spin_wake_all(erts_common_run_queue);
- erts_smp_cnd_broadcast(&erts_common_run_queue->cnd);
- }
- erts_smp_runq_unlock(erts_common_run_queue);
- }
+ if (erts_common_run_queue)
+ wake_scheduler(erts_common_run_queue, 0, 0);
else {
int ix;
for (ix = 0; ix < erts_no_run_queues; ix++) {
ErtsRunQueue *rq = ERTS_RUNQ_IX(ix);
- erts_smp_runq_lock(rq);
- wake_scheduler(rq, 0);
- erts_smp_runq_unlock(rq);
+ wake_scheduler(rq, 0, 1);
}
}
}
@@ -939,14 +1264,14 @@ chk_wake_sched(ErtsRunQueue *crq, int ix, int activate)
wrq = ERTS_RUNQ_IX(ix);
iflgs = erts_smp_atomic_read(&wrq->info_flags);
if (!(iflgs & (ERTS_RUNQ_IFLG_SUSPENDED|ERTS_RUNQ_IFLG_NONEMPTY))) {
- erts_smp_xrunq_lock(crq, wrq);
if (activate) {
if (ix == erts_smp_atomic_cmpxchg(&balance_info.active_runqs, ix+1, ix)) {
+ erts_smp_xrunq_lock(crq, wrq);
wrq->flags &= ~ERTS_RUNQ_FLG_INACTIVE;
+ erts_smp_xrunq_unlock(crq, wrq);
}
}
- wake_scheduler(wrq, 0);
- erts_smp_xrunq_unlock(crq, wrq);
+ wake_scheduler(wrq, 0, 1);
return 1;
}
return 0;
@@ -992,15 +1317,13 @@ static ERTS_INLINE void
smp_notify_inc_runq(ErtsRunQueue *runq)
{
#ifdef ERTS_SMP
- if (erts_common_run_queue)
- wake_one_scheduler();
- else
- wake_scheduler(runq, 1);
+ if (runq)
+ wake_scheduler(runq, 1, 1);
#endif
}
void
-erts_smp_notify_inc_runq__(ErtsRunQueue *runq)
+erts_smp_notify_inc_runq(ErtsRunQueue *runq)
{
smp_notify_inc_runq(runq);
}
@@ -1146,20 +1469,23 @@ static void
evacuate_run_queue(ErtsRunQueue *evac_rq, ErtsRunQueue *rq)
{
Port *prt;
+ int notify_to_rq = 0;
int prio;
int prt_locked = 0;
int rq_locked = 0;
int evac_rq_locked = 1;
+ ErtsMigrateResult mres;
erts_smp_runq_lock(evac_rq);
+ erts_smp_atomic_bor(&evac_rq->scheduler->ssi->flags, ERTS_SSI_FLG_SUSPENDED);
+
evac_rq->flags &= ~ERTS_RUNQ_FLGS_IMMIGRATE_QMASK;
evac_rq->flags |= (ERTS_RUNQ_FLGS_EMIGRATE_QMASK
| ERTS_RUNQ_FLGS_EVACUATE_QMASK
| ERTS_RUNQ_FLG_SUSPENDED);
erts_smp_atomic_bor(&evac_rq->info_flags, ERTS_RUNQ_IFLG_SUSPENDED);
-
/*
* Need to set up evacuation paths first since we
* may release the run queue lock on evac_rq
@@ -1187,9 +1513,11 @@ evacuate_run_queue(ErtsRunQueue *evac_rq, ErtsRunQueue *rq)
/* Evacuate scheduled ports */
prt = evac_rq->ports.start;
while (prt) {
- (void) erts_port_migrate(prt, &prt_locked,
+ mres = erts_port_migrate(prt, &prt_locked,
evac_rq, &evac_rq_locked,
rq, &rq_locked);
+ if (mres == ERTS_MIGRATE_SUCCESS)
+ notify_to_rq = 1;
if (prt_locked)
erts_smp_port_unlock(prt);
if (!evac_rq_locked) {
@@ -1218,9 +1546,11 @@ evacuate_run_queue(ErtsRunQueue *evac_rq, ErtsRunQueue *rq)
goto end_of_proc;
}
- (void) erts_proc_migrate(proc, &proc_locks,
+ mres = erts_proc_migrate(proc, &proc_locks,
evac_rq, &evac_rq_locked,
rq, &rq_locked);
+ if (mres == ERTS_MIGRATE_SUCCESS)
+ notify_to_rq = 1;
if (proc_locks)
erts_smp_proc_unlock(proc, proc_locks);
if (!evac_rq_locked) {
@@ -1252,10 +1582,13 @@ evacuate_run_queue(ErtsRunQueue *evac_rq, ErtsRunQueue *rq)
if (rq_locked)
erts_smp_runq_unlock(rq);
- if (!evac_rq_locked)
- erts_smp_runq_lock(evac_rq);
- wake_scheduler(evac_rq, 0);
- erts_smp_runq_unlock(evac_rq);
+ if (evac_rq_locked)
+ erts_smp_runq_unlock(evac_rq);
+
+ if (notify_to_rq)
+ smp_notify_inc_runq(rq);
+
+ wake_scheduler(evac_rq, 0, 1);
}
static int
@@ -1483,31 +1816,6 @@ try_steal_task(ErtsRunQueue *rq)
return res;
}
-#ifdef ERTS_SMP_SCHEDULERS_NEED_TO_CHECK_CHILDREN
-void
-erts_smp_notify_check_children_needed(void)
-{
- int i;
- for (i = 0; i < erts_no_schedulers; i++) {
- erts_smp_runq_lock(ERTS_SCHEDULER_IX(i)->run_queue);
- ERTS_SCHEDULER_IX(i)->check_children = 1;
- if (!erts_common_run_queue)
- wake_scheduler(ERTS_SCHEDULER_IX(i)->run_queue, 0);
- erts_smp_runq_unlock(ERTS_SCHEDULER_IX(i)->run_queue);
- }
- if (ongoing_multi_scheduling_block()) {
- /* Also blocked schedulers need to check children */
- erts_smp_mtx_lock(&schdlr_sspnd.mtx);
- for (i = 0; i < erts_no_schedulers; i++)
- ERTS_SCHEDULER_IX(i)->blocked_check_children = 1;
- erts_smp_cnd_broadcast(&schdlr_sspnd.cnd);
- erts_smp_mtx_unlock(&schdlr_sspnd.mtx);
- }
- if (erts_common_run_queue)
- wake_all_schedulers();
-}
-#endif
-
/* Run queue balancing */
typedef struct {
@@ -2100,6 +2408,8 @@ erts_init_scheduling(int mrq, int no_schedulers, int no_schedulers_online)
erts_smp_atomic_init(&no_empty_run_queues, 0);
#endif
+ erts_no_run_queues = n;
+
for (ix = 0; ix < n; ix++) {
int pix, rix;
ErtsRunQueue *rq = ERTS_RUNQ_IX(ix);
@@ -2114,8 +2424,10 @@ erts_init_scheduling(int mrq, int no_schedulers, int no_schedulers_online)
erts_smp_mtx_init_x(&rq->mtx, "run_queue", make_small(ix + 1));
erts_smp_cnd_init(&rq->cnd);
- erts_smp_atomic_init(&rq->spin_waiter, 0);
- erts_smp_atomic_init(&rq->spin_wake, 0);
+#ifdef ERTS_SMP
+ erts_smp_spinlock_init(&rq->sleepers.lock, "run_queue_sleep_list");
+ rq->sleepers.list = NULL;
+#endif
rq->waiting = 0;
rq->woken = 0;
@@ -2166,7 +2478,6 @@ erts_init_scheduling(int mrq, int no_schedulers, int no_schedulers_online)
}
erts_common_run_queue = !mrq ? ERTS_RUNQ_IX(0) : NULL;
- erts_no_run_queues = n;
#ifdef ERTS_SMP
@@ -2181,9 +2492,34 @@ erts_init_scheduling(int mrq, int no_schedulers, int no_schedulers_online)
#endif
+ n = (int) no_schedulers;
+ erts_no_schedulers = n;
+
+#ifdef ERTS_SMP
+ /* Create and initialize scheduler sleep info */
+
+ aligned_sched_sleep_info = erts_alloc(ERTS_ALC_T_SCHDLR_SLP_INFO,
+ (sizeof(ErtsAlignedSchedulerSleepInfo)
+ *(n+1)));
+ if ((((Uint) aligned_sched_sleep_info) & ERTS_CACHE_LINE_MASK) == 0)
+ aligned_sched_sleep_info = ((ErtsAlignedSchedulerSleepInfo *)
+ ((((Uint) aligned_sched_sleep_info)
+ & ~ERTS_CACHE_LINE_MASK)
+ + ERTS_CACHE_LINE_SIZE));
+ for (ix = 0; ix < n; ix++) {
+ ErtsSchedulerSleepInfo *ssi = ERTS_SCHED_SLEEP_INFO_IX(ix);
+#if 0 /* no need to initialize these... */
+ ssi->next = NULL;
+ ssi->prev = NULL;
+#endif
+ erts_smp_atomic_init(&ssi->flags, 0);
+ ssi->event = NULL; /* initialized in sched_thread_func */
+ erts_smp_atomic_init(&ssi->aux_work, 0);
+ }
+#endif
+
/* Create and initialize scheduler specific data */
- n = (int) no_schedulers;
erts_aligned_scheduler_data = erts_alloc(ERTS_ALC_T_SCHDLR_DATA,
(sizeof(ErtsAlignedSchedulerData)
*(n+1)));
@@ -2200,6 +2536,7 @@ erts_init_scheduling(int mrq, int no_schedulers, int no_schedulers_online)
#ifdef ERTS_SMP
erts_bits_init_state(&esdp->erl_bits_state);
esdp->match_pseudo_process = NULL;
+ esdp->ssi = ERTS_SCHED_SLEEP_INFO_IX(ix);
esdp->free_process = NULL;
#if HALFWORD_HEAP
/* Registers need to be heap allocated (correct memory range) for tracing to work */
@@ -2228,11 +2565,6 @@ erts_init_scheduling(int mrq, int no_schedulers, int no_schedulers_online)
}
#ifdef ERTS_SMP
-#ifdef ERTS_SMP_SCHEDULERS_NEED_TO_CHECK_CHILDREN
- esdp->check_children = 0;
- esdp->blocked_check_children = 0;
-#endif
- erts_smp_atomic_init(&esdp->suspended, 0);
erts_smp_atomic_init(&esdp->chk_cpu_bind, 0);
#endif
}
@@ -2241,7 +2573,7 @@ erts_init_scheduling(int mrq, int no_schedulers, int no_schedulers_online)
erts_smp_mtx_init(&schdlr_sspnd.mtx, "schdlr_sspnd");
erts_smp_cnd_init(&schdlr_sspnd.cnd);
- schdlr_sspnd.changing = 0;
+ erts_smp_atomic_init(&schdlr_sspnd.changing, 0);
schdlr_sspnd.online = no_schedulers_online;
schdlr_sspnd.curr_online = no_schedulers;
erts_smp_atomic_init(&schdlr_sspnd.msb.ongoing, 0);
@@ -2264,7 +2596,8 @@ erts_init_scheduling(int mrq, int no_schedulers, int no_schedulers_online)
if (no_schedulers_online < no_schedulers) {
if (erts_common_run_queue) {
for (ix = no_schedulers_online; ix < no_schedulers; ix++)
- erts_smp_atomic_set(&(ERTS_SCHEDULER_IX(ix)->suspended), 1);
+ erts_smp_atomic_bor(&ERTS_SCHED_SLEEP_INFO_IX(ix)->flags,
+ ERTS_SSI_FLG_SUSPENDED);
}
else {
for (ix = no_schedulers_online; ix < erts_no_run_queues; ix++)
@@ -2275,7 +2608,8 @@ erts_init_scheduling(int mrq, int no_schedulers, int no_schedulers_online)
schdlr_sspnd.wait_curr_online = no_schedulers_online;
schdlr_sspnd.curr_online *= 2; /* Boot strapping... */
- schdlr_sspnd.changing = ERTS_SCHED_CHANGING_ONLINE;
+ ERTS_SCHDLR_SSPND_CHNG_SET((ERTS_SCHDLR_SSPND_CHNG_ONLN
+ | ERTS_SCHDLR_SSPND_CHNG_WAITER), 0);
erts_smp_atomic_init(&doing_sys_schedule, 0);
@@ -2423,13 +2757,115 @@ susp_sched_resume_block(void *unused)
}
static void
+scheduler_ix_resume_wake(Uint ix)
+{
+ ErtsSchedulerSleepInfo *ssi = ERTS_SCHED_SLEEP_INFO_IX(ix);
+ long xflgs = (ERTS_SSI_FLG_SLEEPING
+ | ERTS_SSI_FLG_TSE_SLEEPING
+ | ERTS_SSI_FLG_WAITING
+ | ERTS_SSI_FLG_SUSPENDED);
+ long oflgs;
+ do {
+ oflgs = erts_smp_atomic_cmpxchg(&ssi->flags, 0, xflgs);
+ if (oflgs == xflgs) {
+ erts_sched_finish_poke(ssi, oflgs);
+ break;
+ }
+ xflgs = oflgs;
+ } while (oflgs & ERTS_SSI_FLG_SUSPENDED);
+}
+
+static long
+sched_prep_spin_suspended(ErtsSchedulerSleepInfo *ssi, long xpct)
+{
+ long oflgs;
+ long nflgs = (ERTS_SSI_FLG_SLEEPING
+ | ERTS_SSI_FLG_WAITING
+ | ERTS_SSI_FLG_SUSPENDED);
+ long xflgs = xpct;
+
+ do {
+ oflgs = erts_smp_atomic_cmpxchg(&ssi->flags, nflgs, xflgs);
+ if (oflgs == xflgs)
+ return nflgs;
+ xflgs = oflgs;
+ } while (oflgs & ERTS_SSI_FLG_SUSPENDED);
+
+ return oflgs;
+}
+
+static long
+sched_spin_suspended(ErtsSchedulerSleepInfo *ssi, int spincount)
+{
+ int until_yield = ERTS_SCHED_SPIN_UNTIL_YIELD;
+ int sc = spincount;
+ long flgs;
+
+ do {
+ flgs = erts_smp_atomic_read(&ssi->flags);
+ if ((flgs & (ERTS_SSI_FLG_SLEEPING
+ | ERTS_SSI_FLG_WAITING
+ | ERTS_SSI_FLG_SUSPENDED))
+ != (ERTS_SSI_FLG_SLEEPING
+ | ERTS_SSI_FLG_WAITING
+ | ERTS_SSI_FLG_SUSPENDED)) {
+ break;
+ }
+ ERTS_SPIN_BODY;
+ if (--until_yield == 0) {
+ until_yield = ERTS_SCHED_SPIN_UNTIL_YIELD;
+ erts_thr_yield();
+ }
+ } while (--sc > 0);
+ return flgs;
+}
+
+static long
+sched_set_suspended_sleeptype(ErtsSchedulerSleepInfo *ssi)
+{
+ long oflgs;
+ long nflgs = (ERTS_SSI_FLG_SLEEPING
+ | ERTS_SSI_FLG_TSE_SLEEPING
+ | ERTS_SSI_FLG_WAITING
+ | ERTS_SSI_FLG_SUSPENDED);
+ long xflgs = (ERTS_SSI_FLG_SLEEPING
+ | ERTS_SSI_FLG_WAITING
+ | ERTS_SSI_FLG_SUSPENDED);
+
+ erts_tse_reset(ssi->event);
+
+ while (1) {
+ oflgs = erts_smp_atomic_cmpxchg(&ssi->flags, nflgs, xflgs);
+ if (oflgs == xflgs)
+ return nflgs;
+ if ((oflgs & (ERTS_SSI_FLG_SLEEPING
+ | ERTS_SSI_FLG_WAITING
+ | ERTS_SSI_FLG_SUSPENDED))
+ != (ERTS_SSI_FLG_SLEEPING
+ | ERTS_SSI_FLG_WAITING
+ | ERTS_SSI_FLG_SUSPENDED)) {
+ return oflgs;
+ }
+ xflgs = oflgs;
+ }
+}
+
+static void
suspend_scheduler(ErtsSchedulerData *esdp)
{
+ long flgs;
+ int changing;
long no = (long) esdp->no;
ErtsRunQueue *rq = esdp->run_queue;
+ ErtsSchedulerSleepInfo *ssi = esdp->ssi;
long active_schedulers;
int curr_online = 1;
int wake = 0;
+ int reset_read_group = 0;
+#if defined(ERTS_SCHED_NEED_NONBLOCKABLE_AUX_WORK) \
+ || defined(ERTS_SCHED_NEED_BLOCKABLE_AUX_WORK)
+ long aux_work;
+#endif
/*
* Schedulers may be suspended in two different ways:
@@ -2451,113 +2887,151 @@ suspend_scheduler(ErtsSchedulerData *esdp)
if (scheduler2cpu_map[esdp->no].bound_id >= 0
&& erts_unbind_from_cpu(erts_cpuinfo) == 0) {
esdp->cpu_id = scheduler2cpu_map[esdp->no].bound_id = -1;
+ reset_read_group = 1;
}
erts_smp_rwmtx_rwunlock(&erts_cpu_bind_rwmtx);
+ if (reset_read_group)
+ erts_smp_rwmtx_set_reader_group(0);
+
+ if (esdp->no <= erts_max_main_threads)
+ erts_thr_set_main_status(0, 0);
+
if (erts_system_profile_flags.scheduler)
profile_scheduler(make_small(esdp->no), am_inactive);
erts_smp_mtx_lock(&schdlr_sspnd.mtx);
- active_schedulers = erts_smp_atomic_dectest(&schdlr_sspnd.active);
- ASSERT(active_schedulers >= 1);
- if (schdlr_sspnd.changing == ERTS_SCHED_CHANGING_MULTI_SCHED) {
- if (active_schedulers == schdlr_sspnd.msb.wait_active)
- wake = 1;
- if (active_schedulers == 1)
- schdlr_sspnd.changing = 0;
- }
-
- while (1) {
+ flgs = sched_prep_spin_suspended(ssi, ERTS_SSI_FLG_SUSPENDED);
+ if (flgs & ERTS_SSI_FLG_SUSPENDED) {
-#ifdef ERTS_SMP_SCHEDULERS_NEED_TO_CHECK_CHILDREN
- int check_children;
- erts_smp_runq_lock(esdp->run_queue);
- check_children = esdp->check_children;
- esdp->check_children = 0;
- erts_smp_runq_unlock(esdp->run_queue);
- if (check_children) {
- erts_smp_mtx_unlock(&schdlr_sspnd.mtx);
- erts_check_children();
- erts_smp_mtx_lock(&schdlr_sspnd.mtx);
+ active_schedulers = erts_smp_atomic_dectest(&schdlr_sspnd.active);
+ ASSERT(active_schedulers >= 1);
+ changing = erts_smp_atomic_read(&schdlr_sspnd.changing);
+ if (changing & ERTS_SCHDLR_SSPND_CHNG_MSB) {
+ if (active_schedulers == schdlr_sspnd.msb.wait_active)
+ wake = 1;
+ if (active_schedulers == 1) {
+ changing = erts_smp_atomic_band(&schdlr_sspnd.changing,
+ ~ERTS_SCHDLR_SSPND_CHNG_MSB);
+ changing &= ~ERTS_SCHDLR_SSPND_CHNG_MSB;
+ }
}
-#endif
- if (schdlr_sspnd.changing == ERTS_SCHED_CHANGING_ONLINE) {
- int changed = 0;
- if (no > schdlr_sspnd.online && curr_online) {
- schdlr_sspnd.curr_online--;
- curr_online = 0;
- changed = 1;
+ while (1) {
+ if (changing & ERTS_SCHDLR_SSPND_CHNG_ONLN) {
+ int changed = 0;
+ if (no > schdlr_sspnd.online && curr_online) {
+ schdlr_sspnd.curr_online--;
+ curr_online = 0;
+ changed = 1;
+ }
+ else if (no <= schdlr_sspnd.online && !curr_online) {
+ schdlr_sspnd.curr_online++;
+ curr_online = 1;
+ changed = 1;
+ }
+ if (changed
+ && schdlr_sspnd.curr_online == schdlr_sspnd.wait_curr_online)
+ wake = 1;
+ if (schdlr_sspnd.online == schdlr_sspnd.curr_online) {
+ changing = erts_smp_atomic_band(&schdlr_sspnd.changing,
+ ~ERTS_SCHDLR_SSPND_CHNG_ONLN);
+ changing &= ~ERTS_SCHDLR_SSPND_CHNG_ONLN;
+ }
}
- else if (no <= schdlr_sspnd.online && !curr_online) {
- schdlr_sspnd.curr_online++;
- curr_online = 1;
- changed = 1;
+
+ if (wake) {
+ erts_smp_cnd_signal(&schdlr_sspnd.cnd);
+ wake = 0;
}
- if (changed
- && schdlr_sspnd.curr_online == schdlr_sspnd.wait_curr_online)
- wake = 1;
- if (schdlr_sspnd.online == schdlr_sspnd.curr_online)
- schdlr_sspnd.changing = 0;
- }
- if (wake) {
- erts_smp_cnd_broadcast(&schdlr_sspnd.cnd);
- wake = 0;
- }
+ flgs = erts_smp_atomic_read(&ssi->flags);
+ if (!(flgs & ERTS_SSI_FLG_SUSPENDED))
+ break;
+ erts_smp_mtx_unlock(&schdlr_sspnd.mtx);
- if (!(rq->flags & (ERTS_RUNQ_FLG_SHARED_RUNQ|ERTS_RUNQ_FLG_SUSPENDED)))
- break;
- if ((rq->flags & ERTS_RUNQ_FLG_SHARED_RUNQ)
- && !erts_smp_atomic_read(&esdp->suspended))
- break;
+#ifdef ERTS_SCHED_NEED_BLOCKABLE_AUX_WORK
+ aux_work = erts_smp_atomic_read(&ssi->aux_work);
+ blockable_aux_work:
+ blockable_aux_work(esdp, ssi, aux_work);
+#endif
- erts_smp_activity_begin(ERTS_ACTIVITY_WAIT,
- susp_sched_prep_block,
- susp_sched_resume_block,
- NULL);
- while (1) {
+ erts_smp_activity_begin(ERTS_ACTIVITY_WAIT, NULL, NULL, NULL);
+ while (1) {
+ long flgs;
+#ifdef ERTS_SCHED_NEED_NONBLOCKABLE_AUX_WORK
+#ifndef ERTS_SCHED_NEED_BLOCKABLE_AUX_WORK
+ aux_work = erts_smp_atomic_read(&ssi->aux_work);
+#endif
+ nonblockable_aux_work(esdp, ssi, aux_work);
+#endif
-#ifdef ERTS_SMP_SCHEDULERS_NEED_TO_CHECK_CHILDREN
- if (esdp->blocked_check_children)
- break;
+ flgs = sched_spin_suspended(ssi, ERTS_SCHED_SUSPEND_SLEEP_SPINCOUNT);
+ if (flgs == (ERTS_SSI_FLG_SLEEPING
+ | ERTS_SSI_FLG_WAITING
+ | ERTS_SSI_FLG_SUSPENDED)) {
+ flgs = sched_set_suspended_sleeptype(ssi);
+ if (flgs == (ERTS_SSI_FLG_SLEEPING
+ | ERTS_SSI_FLG_TSE_SLEEPING
+ | ERTS_SSI_FLG_WAITING
+ | ERTS_SSI_FLG_SUSPENDED)) {
+ int res;
+ do {
+ res = erts_tse_wait(ssi->event);
+ } while (res == EINTR);
+ }
+ }
+
+ flgs = sched_prep_spin_suspended(ssi, (ERTS_SSI_FLG_WAITING
+ | ERTS_SSI_FLG_SUSPENDED));
+ if (!(flgs & ERTS_SSI_FLG_SUSPENDED))
+ break;
+ changing = erts_smp_atomic_read(&schdlr_sspnd.changing);
+ if (changing & ~ERTS_SCHDLR_SSPND_CHNG_WAITER)
+ break;
+
+
+#ifdef ERTS_SCHED_NEED_BLOCKABLE_AUX_WORK
+ aux_work = erts_smp_atomic_read(&ssi->aux_work);
+ if (aux_work & ERTS_SSI_BLOCKABLE_AUX_WORK_MASK) {
+ erts_smp_activity_end(ERTS_ACTIVITY_WAIT, NULL, NULL, NULL);
+ goto blockable_aux_work;
+ }
#endif
- erts_smp_cnd_wait(&schdlr_sspnd.cnd, &schdlr_sspnd.mtx);
+ }
- if (schdlr_sspnd.changing == ERTS_SCHED_CHANGING_ONLINE)
- break;
+ erts_smp_activity_end(ERTS_ACTIVITY_WAIT, NULL, NULL, NULL);
- if (!(rq->flags & (ERTS_RUNQ_FLG_SHARED_RUNQ
- | ERTS_RUNQ_FLG_SUSPENDED)))
- break;
- if ((rq->flags & ERTS_RUNQ_FLG_SHARED_RUNQ)
- && !erts_smp_atomic_read(&esdp->suspended))
- break;
+ erts_smp_mtx_lock(&schdlr_sspnd.mtx);
+ changing = erts_smp_atomic_read(&schdlr_sspnd.changing);
}
-#ifdef ERTS_SMP_SCHEDULERS_NEED_TO_CHECK_CHILDREN
- esdp->blocked_check_children = 0;
-#endif
+ active_schedulers = erts_smp_atomic_inctest(&schdlr_sspnd.active);
+ changing = erts_smp_atomic_read(&schdlr_sspnd.changing);
+ if ((changing & ERTS_SCHDLR_SSPND_CHNG_MSB)
+ && schdlr_sspnd.online == active_schedulers) {
+ erts_smp_atomic_band(&schdlr_sspnd.changing,
+ ~ERTS_SCHDLR_SSPND_CHNG_MSB);
+ }
- erts_smp_activity_end(ERTS_ACTIVITY_WAIT,
- susp_sched_prep_block,
- susp_sched_resume_block,
- NULL);
- }
+ ASSERT(no <= schdlr_sspnd.online);
+ ASSERT(!erts_smp_atomic_read(&schdlr_sspnd.msb.ongoing));
- active_schedulers = erts_smp_atomic_inctest(&schdlr_sspnd.active);
- if (schdlr_sspnd.changing == ERTS_SCHED_CHANGING_MULTI_SCHED
- && schdlr_sspnd.online == active_schedulers) {
- schdlr_sspnd.changing = 0;
}
+
erts_smp_mtx_unlock(&schdlr_sspnd.mtx);
+ ASSERT(curr_online);
+
if (erts_system_profile_flags.scheduler)
profile_scheduler(make_small(esdp->no), am_active);
+ if (esdp->no <= erts_max_main_threads)
+ erts_thr_set_main_status(1, (int) esdp->no);
+
erts_smp_runq_lock(esdp->run_queue);
non_empty_runq(esdp->run_queue);
@@ -2622,8 +3096,10 @@ erts_schedulers_state(Uint *total,
int yield_allowed)
{
int res;
+ long changing;
erts_smp_mtx_lock(&schdlr_sspnd.mtx);
- if (yield_allowed && schdlr_sspnd.changing)
+ changing = erts_smp_atomic_read(&schdlr_sspnd.changing);
+ if (yield_allowed && (changing & ~ERTS_SCHDLR_SSPND_CHNG_WAITER))
res = ERTS_SCHDLR_SSPND_YIELD_RESTART;
else {
*active = *online = schdlr_sspnd.online;
@@ -2643,6 +3119,7 @@ erts_set_schedulers_online(Process *p,
Sint *old_no)
{
int ix, res, no, have_unlocked_plocks;
+ long changing;
if (new_no < 1 || erts_no_schedulers < new_no)
return ERTS_SCHDLR_SSPND_EINVAL;
@@ -2652,7 +3129,8 @@ erts_set_schedulers_online(Process *p,
have_unlocked_plocks = 0;
no = (int) new_no;
- if (schdlr_sspnd.changing) {
+ changing = erts_smp_atomic_read(&schdlr_sspnd.changing);
+ if (changing) {
res = ERTS_SCHDLR_SSPND_YIELD_RESTART;
}
else {
@@ -2661,17 +3139,19 @@ erts_set_schedulers_online(Process *p,
res = ERTS_SCHDLR_SSPND_DONE;
}
else {
- schdlr_sspnd.changing = ERTS_SCHED_CHANGING_ONLINE;
+ ERTS_SCHDLR_SSPND_CHNG_SET((ERTS_SCHDLR_SSPND_CHNG_ONLN
+ | ERTS_SCHDLR_SSPND_CHNG_WAITER), 0);
schdlr_sspnd.online = no;
if (no > online) {
int ix;
schdlr_sspnd.wait_curr_online = no;
- if (ongoing_multi_scheduling_block())
- /* No schedulers to resume */;
+ if (ongoing_multi_scheduling_block()) {
+ for (ix = online; ix < no; ix++)
+ erts_sched_poke(ERTS_SCHED_SLEEP_INFO_IX(ix));
+ }
else if (erts_common_run_queue) {
for (ix = online; ix < no; ix++)
- erts_smp_atomic_set(&ERTS_SCHEDULER_IX(ix)->suspended,
- 0);
+ scheduler_ix_resume_wake(ix);
}
else {
if (plocks) {
@@ -2685,6 +3165,7 @@ erts_set_schedulers_online(Process *p,
erts_smp_runq_lock(rq);
ERTS_RUNQ_RESET_SUSPEND_INFO(rq, 0x5);
erts_smp_runq_unlock(rq);
+ scheduler_ix_resume_wake(ix);
}
/*
* Spread evacuation paths among all online
@@ -2699,7 +3180,6 @@ erts_set_schedulers_online(Process *p,
erts_smp_mtx_unlock(&balance_info.update_mtx);
erts_smp_mtx_lock(&schdlr_sspnd.mtx);
}
- erts_smp_cnd_broadcast(&schdlr_sspnd.cnd);
res = ERTS_SCHDLR_SSPND_DONE;
}
else /* if (no < online) */ {
@@ -2716,12 +3196,17 @@ erts_set_schedulers_online(Process *p,
schdlr_sspnd.wait_curr_online = no+1;
}
- if (ongoing_multi_scheduling_block())
- erts_smp_cnd_broadcast(&schdlr_sspnd.cnd);
- else if (erts_common_run_queue) {
+ if (ongoing_multi_scheduling_block()) {
for (ix = no; ix < online; ix++)
- erts_smp_atomic_set(&ERTS_SCHEDULER_IX(ix)->suspended,
- 1);
+ erts_sched_poke(ERTS_SCHED_SLEEP_INFO_IX(ix));
+ }
+ else if (erts_common_run_queue) {
+ for (ix = no; ix < online; ix++) {
+ ErtsSchedulerSleepInfo *ssi;
+ ssi = ERTS_SCHED_SLEEP_INFO_IX(ix);
+ erts_smp_atomic_bor(&ssi->flags,
+ ERTS_SSI_FLG_SUSPENDED);
+ }
wake_all_schedulers();
}
else {
@@ -2748,7 +3233,10 @@ erts_set_schedulers_online(Process *p,
erts_smp_atomic_set(&balance_info.used_runqs, no);
erts_smp_mtx_unlock(&balance_info.update_mtx);
erts_smp_mtx_lock(&schdlr_sspnd.mtx);
- ERTS_FOREACH_OP_RUNQ(rq, wake_scheduler(rq, 0));
+ for (ix = no; ix < online; ix++) {
+ ErtsRunQueue *rq = ERTS_RUNQ_IX(ix);
+ wake_scheduler(rq, 0, 1);
+ }
}
}
@@ -2762,6 +3250,12 @@ erts_set_schedulers_online(Process *p,
susp_sched_prep_block,
susp_sched_resume_block,
NULL);
+ ASSERT(res != ERTS_SCHDLR_SSPND_DONE
+ ? (ERTS_SCHDLR_SSPND_CHNG_WAITER
+ & erts_smp_atomic_read(&schdlr_sspnd.changing))
+ : (ERTS_SCHDLR_SSPND_CHNG_WAITER
+ == erts_smp_atomic_read(&schdlr_sspnd.changing)));
+ erts_smp_atomic_band(&schdlr_sspnd.changing, ~ERTS_SCHDLR_SSPND_CHNG_WAITER);
}
}
@@ -2776,11 +3270,12 @@ ErtsSchedSuspendResult
erts_block_multi_scheduling(Process *p, ErtsProcLocks plocks, int on, int all)
{
int ix, res, have_unlocked_plocks = 0;
+ long changing;
ErtsProcList *plp;
erts_smp_mtx_lock(&schdlr_sspnd.mtx);
-
- if (schdlr_sspnd.changing) {
+ changing = erts_smp_atomic_read(&schdlr_sspnd.changing);
+ if (changing) {
res = ERTS_SCHDLR_SSPND_YIELD_RESTART; /* Yield */
}
else if (on) { /* ------ BLOCK ------ */
@@ -2794,19 +3289,22 @@ erts_block_multi_scheduling(Process *p, ErtsProcLocks plocks, int on, int all)
res = ERTS_SCHDLR_SSPND_DONE_MSCHED_BLOCKED;
}
else {
+ int online = schdlr_sspnd.online;
p->flags |= F_HAVE_BLCKD_MSCHED;
if (plocks) {
have_unlocked_plocks = 1;
erts_smp_proc_unlock(p, plocks);
}
+ ASSERT(0 == erts_smp_atomic_read(&schdlr_sspnd.msb.ongoing));
erts_smp_atomic_set(&schdlr_sspnd.msb.ongoing, 1);
- if (schdlr_sspnd.online == 1) {
+ if (online == 1) {
res = ERTS_SCHDLR_SSPND_DONE_MSCHED_BLOCKED;
ASSERT(erts_smp_atomic_read(&schdlr_sspnd.active) == 1);
ASSERT(p->scheduler_data->no == 1);
}
else {
- schdlr_sspnd.changing = ERTS_SCHED_CHANGING_MULTI_SCHED;
+ ERTS_SCHDLR_SSPND_CHNG_SET((ERTS_SCHDLR_SSPND_CHNG_MSB
+ | ERTS_SCHDLR_SSPND_CHNG_WAITER), 0);
if (p->scheduler_data->no == 1) {
res = ERTS_SCHDLR_SSPND_DONE_MSCHED_BLOCKED;
schdlr_sspnd.msb.wait_active = 1;
@@ -2820,17 +3318,19 @@ erts_block_multi_scheduling(Process *p, ErtsProcLocks plocks, int on, int all)
schdlr_sspnd.msb.wait_active = 2;
}
if (erts_common_run_queue) {
- for (ix = 1; ix < schdlr_sspnd.online; ix++)
- erts_smp_atomic_set(&ERTS_SCHEDULER_IX(ix)->suspended, 1);
+ for (ix = 1; ix < online; ix++)
+ erts_smp_atomic_bor(&ERTS_SCHED_SLEEP_INFO_IX(ix)->flags,
+ ERTS_SSI_FLG_SUSPENDED);
wake_all_schedulers();
}
else {
erts_smp_mtx_unlock(&schdlr_sspnd.mtx);
erts_smp_mtx_lock(&balance_info.update_mtx);
erts_smp_atomic_set(&balance_info.used_runqs, 1);
- for (ix = 0; ix < schdlr_sspnd.online; ix++) {
+ for (ix = 0; ix < online; ix++) {
ErtsRunQueue *rq = ERTS_RUNQ_IX(ix);
erts_smp_runq_lock(rq);
+ ASSERT(!(rq->flags & ERTS_RUNQ_FLG_SUSPENDED));
ERTS_RUNQ_RESET_MIGRATION_PATHS(rq, 0x7);
erts_smp_runq_unlock(rq);
}
@@ -2855,6 +3355,13 @@ erts_block_multi_scheduling(Process *p, ErtsProcLocks plocks, int on, int all)
susp_sched_prep_block,
susp_sched_resume_block,
NULL);
+ ASSERT(res != ERTS_SCHDLR_SSPND_DONE_MSCHED_BLOCKED
+ ? (ERTS_SCHDLR_SSPND_CHNG_WAITER
+ & erts_smp_atomic_read(&schdlr_sspnd.changing))
+ : (ERTS_SCHDLR_SSPND_CHNG_WAITER
+ == erts_smp_atomic_read(&schdlr_sspnd.changing)));
+ erts_smp_atomic_band(&schdlr_sspnd.changing,
+ ~ERTS_SCHDLR_SSPND_CHNG_WAITER);
}
plp = proclist_create(p);
plp->next = schdlr_sspnd.msb.procs;
@@ -2898,7 +3405,7 @@ erts_block_multi_scheduling(Process *p, ErtsProcLocks plocks, int on, int all)
if (schdlr_sspnd.msb.procs)
res = ERTS_SCHDLR_SSPND_DONE_MSCHED_BLOCKED;
else {
- schdlr_sspnd.changing = ERTS_SCHED_CHANGING_MULTI_SCHED;
+ ERTS_SCHDLR_SSPND_CHNG_SET(ERTS_SCHDLR_SSPND_CHNG_MSB, 0);
#ifdef DEBUG
ERTS_FOREACH_RUNQ(rq,
{
@@ -2925,13 +3432,13 @@ erts_block_multi_scheduling(Process *p, ErtsProcLocks plocks, int on, int all)
if (schdlr_sspnd.online == 1) {
/* No schedulers to resume */
ASSERT(erts_smp_atomic_read(&schdlr_sspnd.active) == 1);
- schdlr_sspnd.changing = 0;
+ ERTS_SCHDLR_SSPND_CHNG_SET(0, ERTS_SCHDLR_SSPND_CHNG_MSB);
}
else if (erts_common_run_queue) {
for (ix = 1; ix < schdlr_sspnd.online; ix++)
- erts_smp_atomic_set(&ERTS_SCHEDULER_IX(ix)->suspended, 0);
+ erts_smp_atomic_band(&ERTS_SCHED_SLEEP_INFO_IX(ix)->flags,
+ ~ERTS_SSI_FLG_SUSPENDED);
wake_all_schedulers();
- erts_smp_cnd_broadcast(&schdlr_sspnd.cnd);
}
else {
int online = schdlr_sspnd.online;
@@ -2948,6 +3455,7 @@ erts_block_multi_scheduling(Process *p, ErtsProcLocks plocks, int on, int all)
erts_smp_runq_lock(rq);
ERTS_RUNQ_RESET_SUSPEND_INFO(rq, 0x4);
erts_smp_runq_unlock(rq);
+ scheduler_ix_resume_wake(ix);
}
/* Spread evacuation paths among all online run queues */
@@ -2963,7 +3471,6 @@ erts_block_multi_scheduling(Process *p, ErtsProcLocks plocks, int on, int all)
erts_smp_runq_unlock(ERTS_RUNQ_IX(0));
erts_smp_mtx_unlock(&balance_info.update_mtx);
erts_smp_mtx_lock(&schdlr_sspnd.mtx);
- erts_smp_cnd_broadcast(&schdlr_sspnd.cnd);
}
res = ERTS_SCHDLR_SSPND_DONE;
}
@@ -3035,18 +3542,34 @@ erts_multi_scheduling_blockers(Process *p)
static void *
sched_thread_func(void *vesdp)
{
+#ifdef ERTS_SMP
+ Uint no = ((ErtsSchedulerData *) vesdp)->no;
+#endif
#ifdef ERTS_ENABLE_LOCK_CHECK
{
char buf[31];
- Uint no = ((ErtsSchedulerData *) vesdp)->no;
erts_snprintf(&buf[0], 31, "scheduler %bpu", no);
erts_lc_set_thread_name(&buf[0]);
}
#endif
- erts_alloc_reg_scheduler_id(((ErtsSchedulerData *) vesdp)->no);
+ erts_alloc_reg_scheduler_id(no);
erts_tsd_set(sched_data_key, vesdp);
#ifdef ERTS_SMP
+
+ if (no <= erts_max_main_threads) {
+ erts_thr_set_main_status(1, (int) no);
+ if (erts_reader_groups) {
+ int rg = (int) no;
+ if (rg > erts_reader_groups)
+ rg = (((int) no) - 1) % erts_reader_groups + 1;
+ erts_smp_rwmtx_set_reader_group(rg);
+ }
+ }
+
erts_proc_lock_prepare_proc_lock_waiter();
+ ERTS_SCHED_SLEEP_INFO_IX(no - 1)->event = erts_tse_fetch();
+
+
#endif
erts_register_blockable_thread();
#ifdef HIPE
@@ -3055,30 +3578,30 @@ sched_thread_func(void *vesdp)
erts_thread_init_float();
erts_smp_mtx_lock(&schdlr_sspnd.mtx);
- ASSERT(schdlr_sspnd.changing == ERTS_SCHED_CHANGING_ONLINE);
+ ASSERT(erts_smp_atomic_read(&schdlr_sspnd.changing)
+ & ERTS_SCHDLR_SSPND_CHNG_ONLN);
- schdlr_sspnd.curr_online--;
+ if (--schdlr_sspnd.curr_online == schdlr_sspnd.wait_curr_online) {
+ erts_smp_atomic_band(&schdlr_sspnd.changing,
+ ~ERTS_SCHDLR_SSPND_CHNG_ONLN);
+ if (((ErtsSchedulerData *) vesdp)->no != 1)
+ erts_smp_cnd_signal(&schdlr_sspnd.cnd);
+ }
- if (((ErtsSchedulerData *) vesdp)->no != 1) {
- if (schdlr_sspnd.online == schdlr_sspnd.curr_online) {
- schdlr_sspnd.changing = 0;
- erts_smp_cnd_broadcast(&schdlr_sspnd.cnd);
+ if (((ErtsSchedulerData *) vesdp)->no == 1) {
+ if (schdlr_sspnd.curr_online != schdlr_sspnd.wait_curr_online) {
+ erts_smp_activity_begin(ERTS_ACTIVITY_WAIT,
+ susp_sched_prep_block,
+ susp_sched_resume_block,
+ NULL);
+ while (schdlr_sspnd.curr_online != schdlr_sspnd.wait_curr_online)
+ erts_smp_cnd_wait(&schdlr_sspnd.cnd, &schdlr_sspnd.mtx);
+ erts_smp_activity_end(ERTS_ACTIVITY_WAIT,
+ susp_sched_prep_block,
+ susp_sched_resume_block,
+ NULL);
}
- }
- else if (schdlr_sspnd.curr_online == schdlr_sspnd.wait_curr_online)
- schdlr_sspnd.changing = 0;
- else {
- erts_smp_activity_begin(ERTS_ACTIVITY_WAIT,
- susp_sched_prep_block,
- susp_sched_resume_block,
- NULL);
- while (schdlr_sspnd.curr_online != schdlr_sspnd.wait_curr_online)
- erts_smp_cnd_wait(&schdlr_sspnd.cnd, &schdlr_sspnd.mtx);
- erts_smp_activity_end(ERTS_ACTIVITY_WAIT,
- susp_sched_prep_block,
- susp_sched_resume_block,
- NULL);
- ASSERT(!schdlr_sspnd.changing);
+ ERTS_SCHDLR_SSPND_CHNG_SET(0, ERTS_SCHDLR_SSPND_CHNG_WAITER);
}
erts_smp_mtx_unlock(&schdlr_sspnd.mtx);
@@ -3428,6 +3951,7 @@ processor_order_cmp(const void *vx, const void *vy)
static void
check_cpu_bind(ErtsSchedulerData *esdp)
{
+ int rg = 0;
int res;
int cpu_id;
erts_smp_runq_unlock(esdp->run_queue);
@@ -3446,7 +3970,7 @@ check_cpu_bind(ErtsSchedulerData *esdp)
goto unbind;
}
}
- else if (cpu_id < 0 && scheduler2cpu_map[esdp->no].bound_id >= 0) {
+ else if (cpu_id < 0) /* && scheduler2cpu_map[esdp->no].bound_id >= 0) */ {
unbind:
/* Get rid of old binding */
res = erts_unbind_from_cpu(erts_cpuinfo);
@@ -3459,6 +3983,12 @@ check_cpu_bind(ErtsSchedulerData *esdp)
erts_send_error_to_logger_nogl(dsbufp);
}
}
+ if (erts_reader_groups) {
+ if (esdp->cpu_id >= 0)
+ rg = reader_group_lookup(esdp->cpu_id);
+ else
+ rg = (((int) esdp->no) - 1) % erts_reader_groups + 1;
+ }
erts_smp_runq_lock(esdp->run_queue);
#ifdef ERTS_SMP
if (erts_common_run_queue)
@@ -3469,6 +3999,8 @@ check_cpu_bind(ErtsSchedulerData *esdp)
#endif
erts_smp_rwmtx_rwunlock(&erts_cpu_bind_rwmtx);
+ if (erts_reader_groups)
+ erts_smp_rwmtx_set_reader_group(rg);
}
static void
@@ -3497,11 +4029,13 @@ signal_schedulers_bind_change(erts_cpu_topology_t *cpudata, int size)
wake_all_schedulers();
}
else {
- ERTS_FOREACH_RUNQ(rq,
- {
+ for (s_ix = 0; s_ix < erts_no_run_queues; s_ix++) {
+ ErtsRunQueue *rq = ERTS_RUNQ_IX(s_ix);
+ erts_smp_runq_lock(rq);
rq->flags |= ERTS_RUNQ_FLG_CHK_CPU_BIND;
- wake_scheduler(rq, 0);
- });
+ erts_smp_runq_unlock(rq);
+ wake_scheduler(rq, 0, 1);
+ };
}
#else
check_cpu_bind(erts_get_scheduler_data());
@@ -3541,6 +4075,490 @@ erts_init_scheduler_bind_type(char *how)
return ERTS_INIT_SCHED_BIND_TYPE_SUCCESS;
}
+/*
+ * reader groups map
+ */
+
+typedef struct {
+ int level[ERTS_TOPOLOGY_MAX_DEPTH+1];
+} erts_avail_cput;
+
+typedef struct {
+ int *map;
+ int size;
+ int groups;
+} erts_reader_groups_map_test;
+
+typedef struct {
+ int id;
+ int sub_levels;
+ int reader_groups;
+} erts_rg_count_t;
+
+typedef struct {
+ int logical;
+ int reader_group;
+} erts_reader_groups_map_t;
+
+typedef struct {
+ erts_reader_groups_map_t *map;
+ int map_size;
+ int logical_processors;
+ int groups;
+} erts_make_reader_groups_map_test;
+
+static int reader_groups_available_cpu_check;
+static int reader_groups_logical_processors;
+static int reader_groups_map_size;
+static erts_reader_groups_map_t *reader_groups_map;
+
+#define ERTS_TOPOLOGY_RG ERTS_TOPOLOGY_MAX_DEPTH
+
+static void
+make_reader_groups_map(erts_make_reader_groups_map_test *test);
+
+static Eterm
+get_reader_groups_map(Process *c_p,
+ erts_reader_groups_map_t *map,
+ int map_size,
+ int logical_processors)
+{
+#ifdef DEBUG
+ Eterm *endp;
+#endif
+ Eterm res = NIL, tuple;
+ Eterm *hp;
+ int i;
+
+ hp = HAlloc(c_p, logical_processors*(2+3));
+#ifdef DEBUG
+ endp = hp + logical_processors*(2+3);
+#endif
+ for (i = map_size - 1; i >= 0; i--) {
+ if (map[i].logical >= 0) {
+ tuple = TUPLE2(hp,
+ make_small(map[i].logical),
+ make_small(map[i].reader_group));
+ hp += 3;
+ res = CONS(hp, tuple, res);
+ hp += 2;
+ }
+ }
+ ASSERT(hp == endp);
+ return res;
+}
+
+Eterm
+erts_debug_reader_groups_map(Process *c_p, int groups)
+{
+ Eterm res;
+ erts_make_reader_groups_map_test test;
+
+ test.groups = groups;
+ make_reader_groups_map(&test);
+ if (!test.map)
+ res = NIL;
+ else {
+ res = get_reader_groups_map(c_p,
+ test.map,
+ test.map_size,
+ test.logical_processors);
+ erts_free(ERTS_ALC_T_TMP, test.map);
+ }
+ return res;
+}
+
+
+Eterm
+erts_get_reader_groups_map(Process *c_p)
+{
+ Eterm res;
+ erts_smp_rwmtx_rlock(&erts_cpu_bind_rwmtx);
+ res = get_reader_groups_map(c_p,
+ reader_groups_map,
+ reader_groups_map_size,
+ reader_groups_logical_processors);
+ erts_smp_rwmtx_runlock(&erts_cpu_bind_rwmtx);
+ return res;
+}
+
+static void
+make_available_cpu_topology(erts_avail_cput *no,
+ erts_avail_cput *avail,
+ erts_cpu_topology_t *cpudata,
+ int *size,
+ int test)
+{
+ int len = *size;
+ erts_cpu_topology_t last;
+ int a, i, j;
+
+ no->level[ERTS_TOPOLOGY_NODE] = -1;
+ no->level[ERTS_TOPOLOGY_PROCESSOR] = -1;
+ no->level[ERTS_TOPOLOGY_PROCESSOR_NODE] = -1;
+ no->level[ERTS_TOPOLOGY_CORE] = -1;
+ no->level[ERTS_TOPOLOGY_THREAD] = -1;
+ no->level[ERTS_TOPOLOGY_LOGICAL] = -1;
+
+ last.node = INT_MIN;
+ last.processor = INT_MIN;
+ last.processor_node = INT_MIN;
+ last.core = INT_MIN;
+ last.thread = INT_MIN;
+ last.logical = INT_MIN;
+
+ a = 0;
+
+ for (i = 0; i < len; i++) {
+
+ if (!test && !erts_is_cpu_available(erts_cpuinfo, cpudata[i].logical))
+ continue;
+
+ if (last.node != cpudata[i].node)
+ goto node;
+ if (last.processor != cpudata[i].processor)
+ goto processor;
+ if (last.processor_node != cpudata[i].processor_node)
+ goto processor_node;
+ if (last.core != cpudata[i].core)
+ goto core;
+ ASSERT(last.thread != cpudata[i].thread);
+ goto thread;
+
+ node:
+ no->level[ERTS_TOPOLOGY_NODE]++;
+ processor:
+ no->level[ERTS_TOPOLOGY_PROCESSOR]++;
+ processor_node:
+ no->level[ERTS_TOPOLOGY_PROCESSOR_NODE]++;
+ core:
+ no->level[ERTS_TOPOLOGY_CORE]++;
+ thread:
+ no->level[ERTS_TOPOLOGY_THREAD]++;
+
+ no->level[ERTS_TOPOLOGY_LOGICAL]++;
+
+ for (j = 0; j < ERTS_TOPOLOGY_LOGICAL; j++)
+ avail[a].level[j] = no->level[j];
+
+ avail[a].level[ERTS_TOPOLOGY_LOGICAL] = cpudata[i].logical;
+ avail[a].level[ERTS_TOPOLOGY_RG] = 0;
+
+ ASSERT(last.logical != cpudata[a].logical);
+
+ last = cpudata[i];
+ a++;
+ }
+
+ no->level[ERTS_TOPOLOGY_NODE]++;
+ no->level[ERTS_TOPOLOGY_PROCESSOR]++;
+ no->level[ERTS_TOPOLOGY_PROCESSOR_NODE]++;
+ no->level[ERTS_TOPOLOGY_CORE]++;
+ no->level[ERTS_TOPOLOGY_THREAD]++;
+ no->level[ERTS_TOPOLOGY_LOGICAL]++;
+
+ *size = a;
+}
+
+static int
+reader_group_lookup(int logical)
+{
+ int start = logical % reader_groups_map_size;
+ int ix = start;
+
+ do {
+ if (reader_groups_map[ix].logical == logical) {
+ ASSERT(reader_groups_map[ix].reader_group > 0);
+ return reader_groups_map[ix].reader_group;
+ }
+ ix++;
+ if (ix == reader_groups_map_size)
+ ix = 0;
+ } while (ix != start);
+
+ erl_exit(ERTS_ABORT_EXIT, "Logical cpu id %d not found\n", logical);
+}
+
+static void
+reader_group_insert(erts_reader_groups_map_t *map, int map_size,
+ int logical, int reader_group)
+{
+ int start = logical % map_size;
+ int ix = start;
+
+ do {
+ if (map[ix].logical < 0) {
+ map[ix].logical = logical;
+ map[ix].reader_group = reader_group;
+ return;
+ }
+ ix++;
+ if (ix == map_size)
+ ix = 0;
+ } while (ix != start);
+
+ erl_exit(ERTS_ABORT_EXIT, "Reader groups map full\n");
+}
+
+
+static int
+sub_levels(erts_rg_count_t *rgc, int level, int aix, int avail_sz, erts_avail_cput *avail)
+{
+ int sub_level = level+1;
+ int last = -1;
+ rgc->sub_levels = 0;
+
+ do {
+ if (last != avail[aix].level[sub_level]) {
+ rgc->sub_levels++;
+ last = avail[aix].level[sub_level];
+ }
+ aix++;
+ }
+ while (aix < avail_sz && rgc->id == avail[aix].level[level]);
+ rgc->reader_groups = 0;
+ return aix;
+}
+
+static int
+write_reader_groups(int *rgp, erts_rg_count_t *rgcp,
+ int level, int a,
+ int avail_sz, erts_avail_cput *avail)
+{
+ int rg = *rgp;
+ int sub_level = level+1;
+ int sl_per_gr = rgcp->sub_levels / rgcp->reader_groups;
+ int xsl = rgcp->sub_levels % rgcp->reader_groups;
+ int sls = 0;
+ int last = -1;
+ int xsl_rg_lim = (rgcp->reader_groups - xsl) + rg + 1;
+
+ ASSERT(level < 0 || avail[a].level[level] == rgcp->id)
+
+ do {
+ if (last != avail[a].level[sub_level]) {
+ if (!sls) {
+ sls = sl_per_gr;
+ rg++;
+ if (rg >= xsl_rg_lim)
+ sls++;
+ }
+ last = avail[a].level[sub_level];
+ sls--;
+ }
+ avail[a].level[ERTS_TOPOLOGY_RG] = rg;
+ a++;
+ } while (a < avail_sz && (level < 0
+ || avail[a].level[level] == rgcp->id));
+
+ ASSERT(rgcp->reader_groups == rg - *rgp);
+
+ *rgp = rg;
+
+ return a;
+}
+
+static int
+rg_count_sub_levels_compare(const void *vx, const void *vy)
+{
+ erts_rg_count_t *x = (erts_rg_count_t *) vx;
+ erts_rg_count_t *y = (erts_rg_count_t *) vy;
+ if (x->sub_levels != y->sub_levels)
+ return y->sub_levels - x->sub_levels;
+ return x->id - y->id;
+}
+
+static int
+rg_count_id_compare(const void *vx, const void *vy)
+{
+ erts_rg_count_t *x = (erts_rg_count_t *) vx;
+ erts_rg_count_t *y = (erts_rg_count_t *) vy;
+ return x->id - y->id;
+}
+
+static void
+make_reader_groups_map(erts_make_reader_groups_map_test *test)
+{
+ int i, spread_level, avail_sz;
+ erts_avail_cput no, *avail;
+ erts_cpu_topology_t *cpudata;
+ erts_reader_groups_map_t *map;
+ int map_sz;
+ int groups = erts_reader_groups;
+
+ if (test) {
+ test->map = NULL;
+ test->map_size = 0;
+ groups = test->groups;
+ }
+
+ if (!groups)
+ return;
+
+ if (!test) {
+ if (reader_groups_map)
+ erts_free(ERTS_ALC_T_RDR_GRPS_MAP, reader_groups_map);
+
+ reader_groups_logical_processors = 0;
+ reader_groups_map_size = 0;
+ reader_groups_map = NULL;
+ }
+
+ create_tmp_cpu_topology_copy(&cpudata, &avail_sz);
+
+ if (!cpudata)
+ return;
+
+ cpu_bind_order_sort(cpudata,
+ avail_sz,
+ ERTS_CPU_BIND_NO_SPREAD,
+ 1);
+
+ avail = erts_alloc(ERTS_ALC_T_TMP,
+ sizeof(erts_avail_cput)*avail_sz);
+
+ make_available_cpu_topology(&no, avail, cpudata,
+ &avail_sz, test != NULL);
+
+ destroy_tmp_cpu_topology_copy(cpudata);
+
+ map_sz = avail_sz*2+1;
+
+ if (test) {
+ map = erts_alloc(ERTS_ALC_T_TMP,
+ (sizeof(erts_reader_groups_map_t)
+ * map_sz));
+ test->map = map;
+ test->map_size = map_sz;
+ test->logical_processors = avail_sz;
+ }
+ else {
+ map = erts_alloc(ERTS_ALC_T_RDR_GRPS_MAP,
+ (sizeof(erts_reader_groups_map_t)
+ * map_sz));
+ reader_groups_map = map;
+ reader_groups_logical_processors = avail_sz;
+ reader_groups_map_size = map_sz;
+
+ }
+
+ for (i = 0; i < map_sz; i++) {
+ map[i].logical = -1;
+ map[i].reader_group = 0;
+ }
+
+ spread_level = ERTS_TOPOLOGY_CORE;
+ for (i = ERTS_TOPOLOGY_NODE; i < ERTS_TOPOLOGY_THREAD; i++) {
+ if (no.level[i] > groups) {
+ spread_level = i;
+ break;
+ }
+ }
+
+ if (no.level[spread_level] <= groups) {
+ int a, rg, last = -1;
+ rg = 0;
+ ASSERT(spread_level == ERTS_TOPOLOGY_CORE);
+ for (a = 0; a < avail_sz; a++) {
+ if (last != avail[a].level[spread_level]) {
+ rg++;
+ last = avail[a].level[spread_level];
+ }
+ reader_group_insert(map,
+ map_sz,
+ avail[a].level[ERTS_TOPOLOGY_LOGICAL],
+ rg);
+ }
+ }
+ else { /* groups < no.level[spread_level] */
+ erts_rg_count_t *rg_count;
+ int a, rg, tl, toplevels;
+
+ tl = spread_level-1;
+
+ if (spread_level == ERTS_TOPOLOGY_NODE)
+ toplevels = 1;
+ else
+ toplevels = no.level[tl];
+
+ rg_count = erts_alloc(ERTS_ALC_T_TMP,
+ toplevels*sizeof(erts_rg_count_t));
+
+ if (toplevels == 1) {
+ rg_count[0].id = 0;
+ rg_count[0].sub_levels = no.level[spread_level];
+ rg_count[0].reader_groups = groups;
+ }
+ else {
+ int rgs_per_tl, rgs;
+ rgs = groups;
+ rgs_per_tl = rgs / toplevels;
+
+ a = 0;
+ for (i = 0; i < toplevels; i++) {
+ rg_count[i].id = avail[a].level[tl];
+ a = sub_levels(&rg_count[i], tl, a, avail_sz, avail);
+ }
+
+ qsort(rg_count,
+ toplevels,
+ sizeof(erts_rg_count_t),
+ rg_count_sub_levels_compare);
+
+ for (i = 0; i < toplevels; i++) {
+ if (rg_count[i].sub_levels < rgs_per_tl) {
+ rg_count[i].reader_groups = rg_count[i].sub_levels;
+ rgs -= rg_count[i].sub_levels;
+ }
+ else {
+ rg_count[i].reader_groups = rgs_per_tl;
+ rgs -= rgs_per_tl;
+ }
+ }
+
+ while (rgs > 0) {
+ for (i = 0; i < toplevels; i++) {
+ if (rg_count[i].sub_levels == rg_count[i].reader_groups)
+ break;
+ else {
+ rg_count[i].reader_groups++;
+ if (--rgs == 0)
+ break;
+ }
+ }
+ }
+
+ qsort(rg_count,
+ toplevels,
+ sizeof(erts_rg_count_t),
+ rg_count_id_compare);
+ }
+
+ a = i = rg = 0;
+ while (a < avail_sz) {
+ a = write_reader_groups(&rg, &rg_count[i], tl,
+ a, avail_sz, avail);
+ i++;
+ }
+
+ ASSERT(groups == rg);
+
+ for (a = 0; a < avail_sz; a++)
+ reader_group_insert(map,
+ map_sz,
+ avail[a].level[ERTS_TOPOLOGY_LOGICAL],
+ avail[a].level[ERTS_TOPOLOGY_RG]);
+
+ erts_free(ERTS_ALC_T_TMP, rg_count);
+ }
+
+ erts_free(ERTS_ALC_T_TMP, avail);
+}
+
+/*
+ * CPU topology
+ */
+
typedef struct {
int *id;
int used;
@@ -4054,6 +5072,8 @@ erts_set_cpu_topology(Process *c_p, Eterm term)
sizeof(erts_cpu_topology_t)*cpudata_size);
}
+ make_reader_groups_map(NULL);
+
signal_schedulers_bind_change(cpudata, cpudata_size);
done:
@@ -4457,6 +5477,11 @@ early_cpu_bind_init(void)
cpu_bind_order = ERTS_CPU_BIND_UNDEFINED;
+ reader_groups_available_cpu_check = 1;
+ reader_groups_logical_processors = 0;
+ reader_groups_map_size = 0;
+ reader_groups_map = NULL;
+
if (!erts_get_cpu_topology(erts_cpuinfo, system_cpudata)
|| ERTS_INIT_CPU_TOPOLOGY_OK != verify_topology(system_cpudata,
system_cpudata_size)) {
@@ -4492,6 +5517,8 @@ late_cpu_bind_init(void)
: ERTS_CPU_BIND_NONE);
}
+ make_reader_groups_map(NULL);
+
if (cpu_bind_order != ERTS_CPU_BIND_NONE) {
erts_cpu_topology_t *cpudata;
int cpudata_size;
@@ -4502,6 +5529,39 @@ late_cpu_bind_init(void)
}
}
+int
+erts_update_cpu_info(void)
+{
+ int changed;
+ erts_smp_rwmtx_rwlock(&erts_cpu_bind_rwmtx);
+ changed = erts_cpu_info_update(erts_cpuinfo);
+ if (changed) {
+ erts_cpu_topology_t *cpudata;
+ int cpudata_size;
+ erts_free(ERTS_ALC_T_CPUDATA, system_cpudata);
+
+ system_cpudata_size = erts_get_cpu_topology_size(erts_cpuinfo);
+ system_cpudata = erts_alloc(ERTS_ALC_T_CPUDATA,
+ (sizeof(erts_cpu_topology_t)
+ * system_cpudata_size));
+
+ if (!erts_get_cpu_topology(erts_cpuinfo, system_cpudata)
+ || ERTS_INIT_CPU_TOPOLOGY_OK != verify_topology(system_cpudata,
+ system_cpudata_size)) {
+ erts_free(ERTS_ALC_T_CPUDATA, system_cpudata);
+ system_cpudata = NULL;
+ system_cpudata_size = 0;
+ }
+
+ create_tmp_cpu_topology_copy(&cpudata, &cpudata_size);
+ ASSERT(cpudata);
+ signal_schedulers_bind_change(cpudata, cpudata_size);
+ destroy_tmp_cpu_topology_copy(cpudata);
+ }
+ erts_smp_rwmtx_rwunlock(&erts_cpu_bind_rwmtx);
+ return changed;
+}
+
#ifdef ERTS_SMP
static void
@@ -5357,7 +6417,7 @@ dequeue_process(ErtsRunQueue *runq, Process *p)
}
/* schedule a process */
-static ERTS_INLINE void
+static ERTS_INLINE ErtsRunQueue *
internal_add_to_runq(ErtsRunQueue *runq, Process *p)
{
Uint32 prev_status = p->status;
@@ -5368,12 +6428,12 @@ internal_add_to_runq(ErtsRunQueue *runq, Process *p)
ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(runq));
if (p->status_flags & ERTS_PROC_SFLG_INRUNQ)
- return;
+ return NULL;
else if (p->runq_flags & ERTS_PROC_RUNQ_FLG_RUNNING) {
ASSERT(p->status != P_SUSPENDED);
ERTS_DBG_CHK_PROCS_RUNQ_NOPROC(runq, p);
p->status_flags |= ERTS_PROC_SFLG_PENDADD2SCHEDQ;
- return;
+ return NULL;
}
ASSERT(!p->scheduler_data);
#endif
@@ -5412,20 +6472,23 @@ internal_add_to_runq(ErtsRunQueue *runq, Process *p)
profile_runnable_proc(p, am_active);
}
- smp_notify_inc_runq(add_runq);
-
if (add_runq != runq)
erts_smp_runq_unlock(add_runq);
+
+ return add_runq;
}
void
erts_add_to_runq(Process *p)
{
+ ErtsRunQueue *notify_runq;
ErtsRunQueue *runq = erts_get_runq_proc(p);
erts_smp_runq_lock(runq);
- internal_add_to_runq(runq, p);
+ notify_runq = internal_add_to_runq(runq, p);
erts_smp_runq_unlock(runq);
+ smp_notify_inc_runq(notify_runq);
+
}
/* Possibly remove a scheduled process we need to suspend */
@@ -5564,8 +6627,6 @@ erts_proc_migrate(Process *p, ErtsProcLocks *plcks,
p->run_queue = to_rq;
enqueue_process(to_rq, p);
- smp_notify_inc_runq(to_rq);
-
return ERTS_MIGRATE_SUCCESS;
}
#endif /* ERTS_SMP */
@@ -5762,30 +6823,6 @@ erts_set_process_priority(Process *p, Eterm new_value)
return old_value;
}
-#ifdef ERTS_SMP
-
-static ERTS_INLINE int
-prepare_for_sys_schedule(void)
-{
- while (!erts_port_task_have_outstanding_io_tasks()
- && !erts_smp_atomic_xchg(&doing_sys_schedule, 1)) {
- if (!erts_port_task_have_outstanding_io_tasks())
- return 1;
- erts_smp_atomic_set(&doing_sys_schedule, 0);
- }
- return 0;
-}
-
-#else
-
-static ERTS_INLINE int
-prepare_for_sys_schedule(void)
-{
- return !erts_port_task_have_outstanding_io_tasks();
-}
-
-#endif
-
/* note that P_RUNNING is only set so that we don't try to remove
** running processes from the schedule queue if they exit - a running
** process not being in the schedule queue!!
@@ -5920,8 +6957,11 @@ Process *schedule(Process *p, int calls)
p->status_flags &= ~ERTS_PROC_SFLG_RUNNING;
if (p->status_flags & ERTS_PROC_SFLG_PENDADD2SCHEDQ) {
+ ErtsRunQueue *notify_runq;
p->status_flags &= ~ERTS_PROC_SFLG_PENDADD2SCHEDQ;
- internal_add_to_runq(rq, p);
+ notify_runq = internal_add_to_runq(rq, p);
+ if (notify_runq != rq)
+ smp_notify_inc_runq(notify_runq);
}
#endif
@@ -5989,7 +7029,10 @@ Process *schedule(Process *p, int calls)
| ERTS_RUNQ_FLG_CHK_CPU_BIND
| ERTS_RUNQ_FLG_SUSPENDED)) {
if ((rq->flags & ERTS_RUNQ_FLG_SUSPENDED)
- || erts_smp_atomic_read(&esdp->suspended)) {
+ || (erts_smp_atomic_read(&esdp->ssi->flags)
+ & ERTS_SSI_FLG_SUSPENDED)) {
+ ASSERT(erts_smp_atomic_read(&esdp->ssi->flags)
+ & ERTS_SSI_FLG_SUSPENDED);
suspend_scheduler(esdp);
}
if ((rq->flags & ERTS_RUNQ_FLG_CHK_CPU_BIND)
@@ -5998,12 +7041,21 @@ Process *schedule(Process *p, int calls)
}
}
-#ifdef ERTS_SMP_SCHEDULERS_NEED_TO_CHECK_CHILDREN
- if (esdp->check_children) {
- esdp->check_children = 0;
- erts_smp_runq_unlock(rq);
- erts_check_children();
- erts_smp_runq_lock(rq);
+#if defined(ERTS_SCHED_NEED_BLOCKABLE_AUX_WORK) \
+ || defined(ERTS_SCHED_NEED_NONBLOCKABLE_AUX_WORK)
+ {
+ ErtsSchedulerSleepInfo *ssi = esdp->ssi;
+ long aux_work = erts_smp_atomic_read(&ssi->aux_work);
+ if (aux_work) {
+ erts_smp_runq_unlock(rq);
+#ifdef ERTS_SCHED_NEED_BLOCKABLE_AUX_WORK
+ aux_work = blockable_aux_work(esdp, ssi, aux_work);
+#endif
+#ifdef ERTS_SCHED_NEED_NONBLOCKABLE_AUX_WORK
+ nonblockable_aux_work(esdp, ssi, aux_work);
+#endif
+ erts_smp_runq_lock(rq);
+ }
}
#endif
@@ -6035,7 +7087,10 @@ Process *schedule(Process *p, int calls)
if (rq->flags & (ERTS_RUNQ_FLG_SHARED_RUNQ
| ERTS_RUNQ_FLG_SUSPENDED)) {
if ((rq->flags & ERTS_RUNQ_FLG_SUSPENDED)
- || erts_smp_atomic_read(&esdp->suspended)) {
+ || (erts_smp_atomic_read(&esdp->ssi->flags)
+ & ERTS_SSI_FLG_SUSPENDED)) {
+ ASSERT(erts_smp_atomic_read(&esdp->ssi->flags)
+ & ERTS_SSI_FLG_SUSPENDED);
non_empty_runq(rq);
goto continue_check_activities_to_run;
}
@@ -6052,17 +7107,7 @@ Process *schedule(Process *p, int calls)
}
}
- if (prepare_for_sys_schedule()) {
- erts_smp_atomic_set(&function_calls, 0);
- fcalls = 0;
- sched_sys_wait(esdp->no, rq);
- erts_smp_atomic_set(&doing_sys_schedule, 0);
- }
- else {
- /* If all schedulers are waiting, one of them *should*
- be waiting in erl_sys_schedule() */
- sched_cnd_wait(esdp->no, rq);
- }
+ scheduler_wait(&fcalls, esdp, rq);
non_empty_runq(rq);
@@ -6124,7 +7169,7 @@ Process *schedule(Process *p, int calls)
else {
if (erts_common_run_queue) {
if (erts_common_run_queue->waiting)
- wake_one_scheduler();
+ wake_scheduler(erts_common_run_queue, 0, 1);
}
else if (erts_smp_atomic_read(&no_empty_run_queues) != 0) {
wake_scheduler_on_empty_runq(rq);
@@ -6439,8 +7484,8 @@ erts_schedule_misc_op(void (*func)(void *), void *arg)
else
rq->misc.start = molp;
rq->misc.end = molp;
- smp_notify_inc_runq(rq);
erts_smp_runq_unlock(rq);
+ smp_notify_inc_runq(rq);
}
static void
@@ -6682,7 +7727,7 @@ erl_create_process(Process* parent, /* Parent of process (default group leader).
Eterm args, /* Arguments for function (must be well-formed list). */
ErlSpawnOpts* so) /* Options for spawn. */
{
- ErtsRunQueue *rq;
+ ErtsRunQueue *rq, *notify_runq;
Process *p;
Sint arity; /* Number of arguments. */
#ifndef HYBRID
@@ -6761,11 +7806,7 @@ erl_create_process(Process* parent, /* Parent of process (default group leader).
/*
* Must initialize binary lists here before copying binaries to process.
*/
- p->off_heap.mso = NULL;
-#ifndef HYBRID /* FIND ME! */
- p->off_heap.funs = NULL;
-#endif
- p->off_heap.externals = NULL;
+ p->off_heap.first = NULL;
p->off_heap.overhead = 0;
heap_need +=
@@ -6855,7 +7896,7 @@ erl_create_process(Process* parent, /* Parent of process (default group leader).
p->group_leader =
IS_CONST(parent->group_leader)
? parent->group_leader
- : STORE_NC(&p->htop, &p->off_heap.externals, parent->group_leader);
+ : STORE_NC(&p->htop, &p->off_heap, parent->group_leader);
}
erts_get_default_tracing(&p->trace_flags, &p->tracer_proc);
@@ -6999,10 +8040,12 @@ erl_create_process(Process* parent, /* Parent of process (default group leader).
#endif
p->status = P_WAITING;
- internal_add_to_runq(rq, p);
+ notify_runq = internal_add_to_runq(rq, p);
erts_smp_runq_unlock(rq);
+ smp_notify_inc_runq(notify_runq);
+
res = p->id;
erts_smp_proc_unlock(p, ERTS_PROC_LOCKS_ALL);
@@ -7056,11 +8099,7 @@ void erts_init_empty_process(Process *p)
memset(&(p->u.tm), 0, sizeof(ErlTimer));
#endif
p->next = NULL;
- p->off_heap.mso = NULL;
-#ifndef HYBRID /* FIND ME! */
- p->off_heap.funs = NULL;
-#endif
- p->off_heap.externals = NULL;
+ p->off_heap.first = NULL;
p->off_heap.overhead = 0;
p->reg = NULL;
p->heap_sz = 0;
@@ -7207,11 +8246,7 @@ erts_debug_verify_clean_empty_process(Process* p)
/* Thing that erts_cleanup_empty_process() cleans up */
- ASSERT(p->off_heap.mso == NULL);
-#ifndef HYBRID /* FIND ME! */
- ASSERT(p->off_heap.funs == NULL);
-#endif
- ASSERT(p->off_heap.externals == NULL);
+ ASSERT(p->off_heap.first == NULL);
ASSERT(p->off_heap.overhead == 0);
ASSERT(p->mbuf == NULL);
@@ -7225,11 +8260,7 @@ erts_cleanup_empty_process(Process* p)
/* We only check fields that are known to be used... */
erts_cleanup_offheap(&p->off_heap);
- p->off_heap.mso = NULL;
-#ifndef HYBRID /* FIND ME! */
- p->off_heap.funs = NULL;
-#endif
- p->off_heap.externals = NULL;
+ p->off_heap.first = NULL;
p->off_heap.overhead = 0;
if (p->mbuf != NULL) {
@@ -7266,7 +8297,7 @@ delete_process(Process* p)
* The mso list should not be used anymore, but if it is, make sure that
* we'll notice.
*/
- p->off_heap.mso = (void *) 0x8DEFFACD;
+ p->off_heap.first = (void *) 0x8DEFFACD;
if (p->arg_reg != p->def_arg_reg) {
erts_free(ERTS_ALC_T_ARG_REG, p->arg_reg);
diff --git a/erts/emulator/beam/erl_process.h b/erts/emulator/beam/erl_process.h
index 8f9f7f004e..0cc4a715ad 100644
--- a/erts/emulator/beam/erl_process.h
+++ b/erts/emulator/beam/erl_process.h
@@ -89,6 +89,7 @@ extern int erts_sched_thread_suggested_stack_size;
#define ERTS_SCHED_THREAD_MAX_STACK_SIZE 8192 /* Kilo words */
#ifdef ERTS_SMP
+extern Uint erts_max_main_threads;
#include "erl_bits.h"
#endif
@@ -219,6 +220,51 @@ typedef enum {
ERTS_MIGRATE_FAILED_RUNQ_SUSPENDED
} ErtsMigrateResult;
+#define ERTS_SSI_FLG_SLEEPING (((long) 1) << 0)
+#define ERTS_SSI_FLG_POLL_SLEEPING (((long) 1) << 1)
+#define ERTS_SSI_FLG_TSE_SLEEPING (((long) 1) << 2)
+#define ERTS_SSI_FLG_WAITING (((long) 1) << 3)
+#define ERTS_SSI_FLG_SUSPENDED (((long) 1) << 4)
+
+#define ERTS_SSI_FLGS_SLEEP_TYPE \
+ (ERTS_SSI_FLG_TSE_SLEEPING|ERTS_SSI_FLG_POLL_SLEEPING)
+
+#define ERTS_SSI_FLGS_SLEEP \
+ (ERTS_SSI_FLG_SLEEPING|ERTS_SSI_FLGS_SLEEP_TYPE)
+
+#define ERTS_SSI_FLGS_ALL \
+ (ERTS_SSI_FLGS_SLEEP \
+ | ERTS_SSI_FLG_WAITING \
+ | ERTS_SSI_FLG_SUSPENDED)
+
+
+#if !defined(ERTS_SCHED_NEED_BLOCKABLE_AUX_WORK) \
+ && defined(ERTS_SMP_SCHEDULERS_NEED_TO_CHECK_CHILDREN)
+#define ERTS_SCHED_NEED_BLOCKABLE_AUX_WORK
+#endif
+
+#define ERTS_SSI_AUX_WORK_CHECK_CHILDREN (((long) 1) << 0)
+
+#define ERTS_SSI_BLOCKABLE_AUX_WORK_MASK \
+ (ERTS_SSI_AUX_WORK_CHECK_CHILDREN)
+#define ERTS_SSI_NONBLOCKABLE_AUX_WORK_MASK \
+ (0)
+
+typedef struct ErtsSchedulerSleepInfo_ ErtsSchedulerSleepInfo;
+
+typedef struct {
+ erts_smp_spinlock_t lock;
+ ErtsSchedulerSleepInfo *list;
+} ErtsSchedulerSleepList;
+
+struct ErtsSchedulerSleepInfo_ {
+ ErtsSchedulerSleepInfo *next;
+ ErtsSchedulerSleepInfo *prev;
+ erts_smp_atomic_t flags;
+ erts_tse_t *event;
+ erts_smp_atomic_t aux_work;
+};
+
/* times to reschedule low prio process before running */
#define RESCHEDULE_LOW 8
@@ -271,8 +317,9 @@ struct ErtsRunQueue_ {
erts_smp_mtx_t mtx;
erts_smp_cnd_t cnd;
- erts_smp_atomic_t spin_waiter;
- erts_smp_atomic_t spin_wake;
+#ifdef ERTS_SMP
+ ErtsSchedulerSleepList sleepers;
+#endif
ErtsSchedulerData *scheduler;
int waiting; /* < 0 in sys schedule; > 0 on cnd variable */
@@ -353,6 +400,7 @@ struct ErtsSchedulerData_ {
ethr_tid tid; /* Thread id */
struct erl_bits_state erl_bits_state; /* erl_bits.c state */
void *match_pseudo_process; /* erl_db_util.c:db_prog_match() */
+ ErtsSchedulerSleepInfo *ssi;
Process *free_process;
#endif
#if !HEAP_ON_C_STACK
@@ -374,11 +422,6 @@ struct ErtsSchedulerData_ {
#ifdef ERTS_SMP
/* NOTE: These fields are modified under held mutexes by other threads */
-#ifdef ERTS_SMP_SCHEDULERS_NEED_TO_CHECK_CHILDREN
- int check_children; /* run queue mutex */
- int blocked_check_children; /* schdlr_sspnd mutex */
-#endif
- erts_smp_atomic_t suspended; /* Only used when common run queue */
erts_smp_atomic_t chk_cpu_bind; /* Only used when common run queue */
#endif
};
@@ -828,7 +871,7 @@ extern struct erts_system_profile_flags_t erts_system_profile_flags;
#define F_INSLPQUEUE (1 << 1) /* Set if in timer queue */
#define F_TIMO (1 << 2) /* Set if timeout */
#define F_HEAP_GROW (1 << 3)
-#define F_NEED_FULLSWEEP (1 << 4) /* If process has old binaries & funs. */
+#define F_NEED_FULLSWEEP (1 << 4)
#define F_USING_DB (1 << 5) /* If have created tables */
#define F_DISTRIBUTION (1 << 6) /* Process used in distribution */
#define F_USING_DDLL (1 << 7) /* Process has used the DDLL interface */
@@ -982,6 +1025,7 @@ int erts_init_scheduler_bind_type(char *how);
#define ERTS_INIT_CPU_TOPOLOGY_MISSING 9
int erts_init_cpu_topology(char *topology_str);
+int erts_update_cpu_info(void);
void erts_pre_init_process(void);
void erts_late_init_process(void);
@@ -1085,6 +1129,9 @@ void erts_handle_pending_exit(Process *, ErtsProcLocks);
void erts_deep_process_dump(int, void *);
+Eterm erts_get_reader_groups_map(Process *c_p);
+Eterm erts_debug_reader_groups_map(Process *c_p, int groups);
+
Sint erts_test_next_pid(int, Uint);
Eterm erts_debug_processes(Process *c_p);
Eterm erts_debug_processes_bif_info(Process *c_p);
@@ -1509,29 +1556,30 @@ extern int erts_disable_proc_not_running_opt;
#define ERTS_MIN_PROCESSES 16
#endif
-#ifdef ERTS_INCLUDE_SCHEDULER_INTERNALS
-ERTS_GLB_INLINE void erts_smp_notify_inc_runq(ErtsRunQueue *runq);
-void erts_smp_notify_inc_runq__(ErtsRunQueue *runq);
-#endif /* ERTS_INCLUDE_SCHEDULER_INTERNALS */
+void erts_smp_notify_inc_runq(ErtsRunQueue *runq);
-#if ERTS_GLB_INLINE_INCL_FUNC_DEF
+#ifdef ERTS_SMP
+void erts_sched_finish_poke(ErtsSchedulerSleepInfo *, long);
+ERTS_GLB_INLINE void erts_sched_poke(ErtsSchedulerSleepInfo *ssi);
-#ifdef ERTS_INCLUDE_SCHEDULER_INTERNALS
+#if ERTS_GLB_INLINE_INCL_FUNC_DEF
ERTS_GLB_INLINE void
-erts_smp_notify_inc_runq(ErtsRunQueue *runq)
+erts_sched_poke(ErtsSchedulerSleepInfo *ssi)
{
-#ifdef ERTS_SMP
- ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(runq));
- if (runq->waiting)
- erts_smp_notify_inc_runq__(runq);
-#endif
+ long flags = erts_smp_atomic_read(&ssi->flags);
+ ASSERT(!(flags & ERTS_SSI_FLG_SLEEPING)
+ || (flags & ERTS_SSI_FLG_WAITING));
+ if (flags & ERTS_SSI_FLG_SLEEPING) {
+ flags = erts_smp_atomic_band(&ssi->flags, ~ERTS_SSI_FLGS_SLEEP);
+ erts_sched_finish_poke(ssi, flags);
+ }
}
-#endif /* ERTS_INCLUDE_SCHEDULER_INTERNALS */
-
#endif /* #if ERTS_GLB_INLINE_INCL_FUNC_DEF */
+#endif /* #ifdef ERTS_SMP */
+
#include "erl_process_lock.h"
#undef ERTS_INCLUDE_SCHEDULER_INTERNALS
diff --git a/erts/emulator/beam/erl_process_lock.c b/erts/emulator/beam/erl_process_lock.c
index 52440fb635..a4d12139e9 100644
--- a/erts/emulator/beam/erl_process_lock.c
+++ b/erts/emulator/beam/erl_process_lock.c
@@ -1,19 +1,19 @@
/*
* %CopyrightBegin%
- *
- * Copyright Ericsson AB 2007-2009. All Rights Reserved.
- *
+ *
+ * Copyright Ericsson AB 2007-2010. All Rights Reserved.
+ *
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
* compliance with the License. You should have received a copy of the
* Erlang Public License along with this software. If not, it can be
* retrieved online at http://www.erlang.org/.
- *
+ *
* Software distributed under the License is distributed on an "AS IS"
* basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
* the License for the specific language governing rights and limitations
* under the License.
- *
+ *
* %CopyrightEnd%
*/
@@ -71,9 +71,12 @@ const Process erts_proc_lock_busy;
#ifdef ERTS_SMP
-/*#define ERTS_PROC_LOCK_SPIN_ON_GATE*/
-#define ERTS_PROC_LOCK_SPIN_COUNT_MAX 16000
+#define ERTS_PROC_LOCK_SPIN_COUNT_MAX 2000
+#define ERTS_PROC_LOCK_SPIN_COUNT_SCHED_INC 32
#define ERTS_PROC_LOCK_SPIN_COUNT_BASE 1000
+#define ERTS_PROC_LOCK_AUX_SPIN_COUNT 50
+
+#define ERTS_PROC_LOCK_SPIN_UNTIL_YIELD 25
#ifdef ERTS_PROC_LOCK_DEBUG
#define ERTS_PROC_LOCK_HARD_DEBUG
@@ -83,32 +86,19 @@ const Process erts_proc_lock_busy;
static void check_queue(erts_proc_lock_t *lck);
#endif
-
-typedef struct erts_proc_lock_waiter_t_ erts_proc_lock_waiter_t;
-struct erts_proc_lock_waiter_t_ {
- erts_proc_lock_waiter_t *next;
- erts_proc_lock_waiter_t *prev;
- ErtsProcLocks wait_locks;
- erts_smp_gate_t gate;
- erts_proc_lock_queues_t *queues;
-};
+#if SIZEOF_INT < 4
+#error "The size of the 'uflgs' field of the erts_tse_t type is too small"
+#endif
struct erts_proc_lock_queues_t_ {
erts_proc_lock_queues_t *next;
- erts_proc_lock_waiter_t *queue[ERTS_PROC_LOCK_MAX_BIT+1];
-};
-
-struct erts_proc_lock_thr_spec_data_t_ {
- erts_proc_lock_queues_t *qs;
- erts_proc_lock_waiter_t *wtr;
+ erts_tse_t *queue[ERTS_PROC_LOCK_MAX_BIT+1];
};
static erts_proc_lock_queues_t zeroqs = {0};
-static erts_smp_spinlock_t wtr_lock;
-static erts_proc_lock_waiter_t *waiter_free_list;
+static erts_smp_spinlock_t qs_lock;
static erts_proc_lock_queues_t *queue_free_list;
-static erts_tsd_key_t waiter_key;
#ifdef ERTS_ENABLE_LOCK_CHECK
static struct {
@@ -122,35 +112,26 @@ static struct {
erts_pix_lock_t erts_pix_locks[ERTS_NO_OF_PIX_LOCKS];
static int proc_lock_spin_count;
-static int proc_lock_trans_spin_cost;
+static int aux_thr_proc_lock_spin_count;
-static void cleanup_waiter(void);
+static void cleanup_tse(void);
void
erts_init_proc_lock(void)
{
int i;
int cpus;
- erts_smp_spinlock_init(&wtr_lock, "proc_lck_wtr_alloc");
+ erts_smp_spinlock_init(&qs_lock, "proc_lck_qs_alloc");
for (i = 0; i < ERTS_NO_OF_PIX_LOCKS; i++) {
-#if ERTS_PROC_LOCK_MUTEX_IMPL
-#ifdef ERTS_ENABLE_LOCK_COUNT
- erts_smp_mtx_init_x(&erts_pix_locks[i].u.mtx, "pix_lock", make_small(i));
-#else
- erts_smp_mtx_init(&erts_pix_locks[i].u.mtx, "pix_lock");
-#endif
-#else
#ifdef ERTS_ENABLE_LOCK_COUNT
- erts_smp_spinlock_init_x(&erts_pix_locks[i].u.spnlck, "pix_lock", make_small(i));
+ erts_smp_spinlock_init_x(&erts_pix_locks[i].u.spnlck,
+ "pix_lock", make_small(i));
#else
erts_smp_spinlock_init(&erts_pix_locks[i].u.spnlck, "pix_lock");
#endif
-#endif
}
- waiter_free_list = NULL;
queue_free_list = NULL;
- erts_tsd_key_create(&waiter_key);
- erts_thr_install_exit_handler(cleanup_waiter);
+ erts_thr_install_exit_handler(cleanup_tse);
#ifdef ERTS_ENABLE_LOCK_CHECK
lc_id.proc_lock_main = erts_lc_get_lock_order_id("proc_main");
lc_id.proc_lock_link = erts_lc_get_lock_order_id("proc_link");
@@ -158,86 +139,106 @@ erts_init_proc_lock(void)
lc_id.proc_lock_status = erts_lc_get_lock_order_id("proc_status");
#endif
cpus = erts_get_cpu_configured(erts_cpuinfo);
- if (cpus > 1)
- proc_lock_spin_count = (ERTS_PROC_LOCK_SPIN_COUNT_BASE
- * ((int) erts_no_schedulers));
- else if (cpus == 1)
- proc_lock_spin_count = 0;
- else /* No of cpus unknown. Assume multi proc, but be conservative. */
+ if (cpus > 1) {
proc_lock_spin_count = ERTS_PROC_LOCK_SPIN_COUNT_BASE;
- if (proc_lock_spin_count > ERTS_PROC_LOCK_SPIN_COUNT_MAX)
- proc_lock_spin_count = ERTS_PROC_LOCK_SPIN_COUNT_MAX;
- proc_lock_trans_spin_cost = proc_lock_spin_count/20;
-}
-
-static ERTS_INLINE erts_proc_lock_waiter_t *
-alloc_wtr(void)
-{
- erts_proc_lock_waiter_t *wtr;
- erts_smp_spin_lock(&wtr_lock);
- wtr = waiter_free_list;
- if (wtr) {
- waiter_free_list = wtr->next;
- ERTS_LC_ASSERT(queue_free_list);
- wtr->queues = queue_free_list;
- queue_free_list = wtr->queues->next;
- erts_smp_spin_unlock(&wtr_lock);
+ proc_lock_spin_count += (ERTS_PROC_LOCK_SPIN_COUNT_SCHED_INC
+ * ((int) erts_no_schedulers));
+ aux_thr_proc_lock_spin_count = ERTS_PROC_LOCK_AUX_SPIN_COUNT;
}
- else {
- erts_smp_spin_unlock(&wtr_lock);
- wtr = erts_alloc(ERTS_ALC_T_PROC_LCK_WTR,
- sizeof(erts_proc_lock_waiter_t));
- erts_smp_gate_init(&wtr->gate);
- wtr->wait_locks = (ErtsProcLocks) 0;
- wtr->queues = erts_alloc(ERTS_ALC_T_PROC_LCK_QS,
- sizeof(erts_proc_lock_queues_t));
- sys_memcpy((void *) wtr->queues,
- (void *) &zeroqs,
- sizeof(erts_proc_lock_queues_t));
+ else if (cpus == 1) {
+ proc_lock_spin_count = 0;
+ aux_thr_proc_lock_spin_count = 0;
}
- return wtr;
+ else { /* No of cpus unknown. Assume multi proc, but be conservative. */
+ proc_lock_spin_count = ERTS_PROC_LOCK_SPIN_COUNT_BASE/2;
+ aux_thr_proc_lock_spin_count = ERTS_PROC_LOCK_AUX_SPIN_COUNT/2;
+ }
+ if (proc_lock_spin_count > ERTS_PROC_LOCK_SPIN_COUNT_MAX)
+ proc_lock_spin_count = ERTS_PROC_LOCK_SPIN_COUNT_MAX;
}
#ifdef ERTS_ENABLE_LOCK_CHECK
static void
-check_unused_waiter(erts_proc_lock_waiter_t *wtr)
+check_unused_tse(erts_tse_t *wtr)
{
int i;
- ERTS_LC_ASSERT(wtr->wait_locks == 0);
+ erts_proc_lock_queues_t *queues = wtr->udata;
+ ERTS_LC_ASSERT(wtr->uflgs == 0);
for (i = 0; i <= ERTS_PROC_LOCK_MAX_BIT; i++)
- ERTS_LC_ASSERT(!wtr->queues->queue[i]);
+ ERTS_LC_ASSERT(!queues->queue[i]);
}
-#define CHECK_UNUSED_WAITER(W) check_unused_waiter((W))
+#define CHECK_UNUSED_TSE(W) check_unused_tse((W))
#else
-#define CHECK_UNUSED_WAITER(W)
+#define CHECK_UNUSED_TSE(W)
#endif
+static ERTS_INLINE erts_tse_t *
+tse_fetch(erts_pix_lock_t *pix_lock)
+{
+ erts_tse_t *tse = erts_tse_fetch();
+ if (!tse->udata) {
+ erts_proc_lock_queues_t *qs;
+#if ERTS_PROC_LOCK_SPINLOCK_IMPL && !ERTS_PROC_LOCK_ATOMIC_IMPL
+ if (pix_lock)
+ erts_pix_unlock(pix_lock);
+#endif
+ erts_smp_spin_lock(&qs_lock);
+ qs = queue_free_list;
+ if (qs) {
+ queue_free_list = queue_free_list->next;
+ erts_smp_spin_unlock(&qs_lock);
+ }
+ else {
+ erts_smp_spin_unlock(&qs_lock);
+ qs = erts_alloc(ERTS_ALC_T_PROC_LCK_QS,
+ sizeof(erts_proc_lock_queues_t));
+ sys_memcpy((void *) qs,
+ (void *) &zeroqs,
+ sizeof(erts_proc_lock_queues_t));
+ }
+ tse->udata = qs;
+#if ERTS_PROC_LOCK_SPINLOCK_IMPL && !ERTS_PROC_LOCK_ATOMIC_IMPL
+ if (pix_lock)
+ erts_pix_lock(pix_lock);
+#endif
+ }
+ tse->uflgs = 0;
+ return tse;
+}
static ERTS_INLINE void
-free_wtr(erts_proc_lock_waiter_t *wtr)
+tse_return(erts_tse_t *tse, int force_free_q)
{
- CHECK_UNUSED_WAITER(wtr);
- erts_smp_spin_lock(&wtr_lock);
- wtr->next = waiter_free_list;
- waiter_free_list = wtr;
- wtr->queues->next = queue_free_list;
- queue_free_list = wtr->queues;
- erts_smp_spin_unlock(&wtr_lock);
+ CHECK_UNUSED_TSE(tse);
+ if (force_free_q || erts_tse_is_tmp(tse)) {
+ erts_proc_lock_queues_t *qs = tse->udata;
+ ASSERT(qs);
+ erts_smp_spin_lock(&qs_lock);
+ qs->next = queue_free_list;
+ queue_free_list = qs;
+ erts_smp_spin_unlock(&qs_lock);
+ tse->udata = NULL;
+ }
+ erts_tse_return(tse);
}
void
erts_proc_lock_prepare_proc_lock_waiter(void)
{
- erts_tsd_set(waiter_key, (void *) alloc_wtr());
+ tse_return(tse_fetch(NULL), 0);
}
static void
-cleanup_waiter(void)
+cleanup_tse(void)
{
- erts_proc_lock_waiter_t *wtr = erts_tsd_get(waiter_key);
- if (wtr)
- free_wtr(wtr);
+ erts_tse_t *tse = erts_tse_fetch();
+ if (tse) {
+ if (tse->udata)
+ tse_return(tse, 1);
+ else
+ erts_tse_return(tse);
+ }
}
@@ -250,7 +251,7 @@ cleanup_waiter(void)
static ERTS_INLINE void
enqueue_waiter(erts_proc_lock_queues_t *qs,
int ix,
- erts_proc_lock_waiter_t *wtr)
+ erts_tse_t *wtr)
{
if (!qs->queue[ix]) {
qs->queue[ix] = wtr;
@@ -266,10 +267,10 @@ enqueue_waiter(erts_proc_lock_queues_t *qs,
}
}
-static erts_proc_lock_waiter_t *
+static erts_tse_t *
dequeue_waiter(erts_proc_lock_queues_t *qs, int ix)
{
- erts_proc_lock_waiter_t *wtr = qs->queue[ix];
+ erts_tse_t *wtr = qs->queue[ix];
ERTS_LC_ASSERT(qs->queue[ix]);
if (wtr->next == wtr) {
ERTS_LC_ASSERT(qs->queue[ix]->prev == wtr);
@@ -295,10 +296,10 @@ dequeue_waiter(erts_proc_lock_queues_t *qs, int ix)
* lock.
*/
static ERTS_INLINE void
-try_aquire(erts_proc_lock_t *lck, erts_proc_lock_waiter_t *wtr)
+try_aquire(erts_proc_lock_t *lck, erts_tse_t *wtr)
{
ErtsProcLocks got_locks = (ErtsProcLocks) 0;
- ErtsProcLocks locks = wtr->wait_locks;
+ ErtsProcLocks locks = wtr->uflgs;
int lock_no;
ERTS_LC_ASSERT(lck->queues);
@@ -334,7 +335,7 @@ try_aquire(erts_proc_lock_t *lck, erts_proc_lock_waiter_t *wtr)
}
}
- wtr->wait_locks &= ~got_locks;
+ wtr->uflgs &= ~got_locks;
}
/*
@@ -350,8 +351,8 @@ transfer_locks(Process *p,
int unlock)
{
int transferred = 0;
- erts_proc_lock_waiter_t *wake = NULL;
- erts_proc_lock_waiter_t *wtr;
+ erts_tse_t *wake = NULL;
+ erts_tse_t *wtr;
ErtsProcLocks unset_waiter = 0;
ErtsProcLocks tlocks = trnsfr_lcks;
int lock_no;
@@ -377,11 +378,11 @@ transfer_locks(Process *p,
ERTS_LC_ASSERT(wtr);
if (!qs->queue[lock_no])
unset_waiter |= lock;
- ERTS_LC_ASSERT(wtr->wait_locks & lock);
- wtr->wait_locks &= ~lock;
- if (wtr->wait_locks)
+ ERTS_LC_ASSERT(wtr->uflgs & lock);
+ wtr->uflgs &= ~lock;
+ if (wtr->uflgs)
try_aquire(&p->lock, wtr);
- if (!wtr->wait_locks) {
+ if (!wtr->uflgs) {
/*
* The other thread got all locks it needs;
* need to wake it up.
@@ -412,9 +413,10 @@ transfer_locks(Process *p,
erts_pix_unlock(pix_lock);
do {
- erts_proc_lock_waiter_t *tmp = wake;
+ erts_tse_t *tmp = wake;
wake = wake->next;
- erts_smp_gate_let_through(&tmp->gate, 1);
+ erts_atomic_set(&tmp->uaflgs, 0);
+ erts_tse_set(tmp);
} while (wake);
if (!unlock)
@@ -462,26 +464,16 @@ wait_for_locks(Process *p,
ErtsProcLocks olflgs)
{
erts_pix_lock_t *pix_lock = pixlck ? pixlck : ERTS_PID2PIXLOCK(p->id);
- int tsd;
- erts_proc_lock_waiter_t *wtr;
+ erts_tse_t *wtr;
+ erts_proc_lock_queues_t *qs;
/* Acquire a waiter object on which this thread can wait. */
- wtr = erts_tsd_get(waiter_key);
- if (wtr)
- tsd = 1;
- else {
-#if ERTS_PROC_LOCK_SPINLOCK_IMPL && !ERTS_PROC_LOCK_ATOMIC_IMPL
- erts_pix_unlock(pix_lock);
-#endif
- wtr = alloc_wtr();
- tsd = 0;
-#if ERTS_PROC_LOCK_SPINLOCK_IMPL && !ERTS_PROC_LOCK_ATOMIC_IMPL
- erts_pix_lock(pix_lock);
-#endif
- }
+ wtr = tse_fetch(pix_lock);
/* Record which locks this waiter needs. */
- wtr->wait_locks = need_locks;
+ wtr->uflgs = need_locks;
+
+ ASSERT((wtr->uflgs & ~ERTS_PROC_LOCKS_ALL) == 0);
#if ERTS_PROC_LOCK_ATOMIC_IMPL
erts_pix_lock(pix_lock);
@@ -489,14 +481,16 @@ wait_for_locks(Process *p,
ERTS_LC_ASSERT(erts_lc_pix_lock_is_locked(pix_lock));
+ qs = wtr->udata;
+ ASSERT(qs);
/* Provide the process with waiter queues, if it doesn't have one. */
if (!p->lock.queues) {
- wtr->queues->next = NULL;
- p->lock.queues = wtr->queues;
+ qs->next = NULL;
+ p->lock.queues = qs;
}
else {
- wtr->queues->next = p->lock.queues->next;
- p->lock.queues->next = wtr->queues;
+ qs->next = p->lock.queues->next;
+ p->lock.queues->next = qs;
}
#ifdef ERTS_PROC_LOCK_HARD_DEBUG
@@ -506,46 +500,59 @@ wait_for_locks(Process *p,
/* Try to aquire locks one at a time in lock order and set wait flag */
try_aquire(&p->lock, wtr);
+ ASSERT((wtr->uflgs & ~ERTS_PROC_LOCKS_ALL) == 0);
+
#ifdef ERTS_PROC_LOCK_HARD_DEBUG
check_queue(&p->lock);
#endif
- if (wtr->wait_locks) { /* We didn't get them all; need to wait... */
- /* Got to wait for locks... */
+ if (wtr->uflgs) {
+ /* We didn't get them all; need to wait... */
+
+ ASSERT((wtr->uflgs & ~ERTS_PROC_LOCKS_ALL) == 0);
+
+ erts_atomic_set(&wtr->uaflgs, 1);
erts_pix_unlock(pix_lock);
- /*
- * Wait for needed locks. When we return all needed locks have
- * have been acquired by other threads and transfered to us.
- */
-#ifdef ERTS_PROC_LOCK_SPIN_ON_GATE
- erts_smp_gate_swait(&wtr->gate, proc_lock_spin_count);
-#else
- erts_smp_gate_wait(&wtr->gate);
-#endif
+ while (1) {
+ int res;
+ erts_tse_reset(wtr);
+
+ if (erts_atomic_read(&wtr->uaflgs) == 0)
+ break;
+
+ /*
+ * Wait for needed locks. When we are woken all needed locks have
+ * have been acquired by other threads and transfered to us.
+ * However, we need to be prepared for spurious wakeups.
+ */
+ do {
+ res = erts_tse_wait(wtr); /* might return EINTR */
+ } while (res != 0);
+ }
erts_pix_lock(pix_lock);
+
+ ASSERT(wtr->uflgs == 0);
}
/* Recover some queues to store in the waiter. */
ERTS_LC_ASSERT(p->lock.queues);
if (p->lock.queues->next) {
- wtr->queues = p->lock.queues->next;
- p->lock.queues->next = wtr->queues->next;
+ qs = p->lock.queues->next;
+ p->lock.queues->next = qs->next;
}
else {
- wtr->queues = p->lock.queues;
+ qs = p->lock.queues;
p->lock.queues = NULL;
}
+ wtr->udata = qs;
erts_pix_unlock(pix_lock);
ERTS_LC_ASSERT(locks == (ERTS_PROC_LOCK_FLGS_READ_(&p->lock) & locks));
- if (tsd)
- CHECK_UNUSED_WAITER(wtr);
- else
- free_wtr(wtr);
+ tse_return(wtr, 0);
}
/*
@@ -563,52 +570,57 @@ erts_proc_lock_failed(Process *p,
ErtsProcLocks locks,
ErtsProcLocks old_lflgs)
{
-#ifdef ERTS_PROC_LOCK_SPIN_ON_GATE
- int spin_count = 0;
-#else
- int spin_count = proc_lock_spin_count;
-#endif
-
+ int until_yield = ERTS_PROC_LOCK_SPIN_UNTIL_YIELD;
+ int thr_spin_count;
+ int spin_count;
ErtsProcLocks need_locks = locks;
ErtsProcLocks olflgs = old_lflgs;
- while (need_locks != 0)
- {
- ErtsProcLocks can_grab = in_order_locks(olflgs, need_locks);
+ if (erts_thr_get_main_status())
+ thr_spin_count = proc_lock_spin_count;
+ else
+ thr_spin_count = aux_thr_proc_lock_spin_count;
+
+ spin_count = thr_spin_count;
+
+ while (need_locks != 0) {
+ ErtsProcLocks can_grab;
+
+ can_grab = in_order_locks(olflgs, need_locks);
- if (can_grab == 0)
- {
+ if (can_grab == 0) {
/* Someone already has the lowest-numbered lock we want. */
- if (spin_count-- <= 0)
- {
+ if (spin_count-- <= 0) {
/* Too many retries, give up and sleep for the lock. */
wait_for_locks(p, pixlck, locks, need_locks, olflgs);
return;
}
+ ERTS_SPIN_BODY;
+
+ if (--until_yield == 0) {
+ until_yield = ERTS_PROC_LOCK_SPIN_UNTIL_YIELD;
+ erts_thr_yield();
+ }
+
olflgs = ERTS_PROC_LOCK_FLGS_READ_(&p->lock);
}
- else
- {
+ else {
/* Try to grab all of the grabbable locks at once with cmpxchg. */
ErtsProcLocks grabbed = olflgs | can_grab;
ErtsProcLocks nflgs =
- ERTS_PROC_LOCK_FLGS_CMPXCHG_(&p->lock, grabbed, olflgs);
+ ERTS_PROC_LOCK_FLGS_CMPXCHG_ACQB_(&p->lock, grabbed, olflgs);
- if (nflgs == olflgs)
- {
+ if (nflgs == olflgs) {
/* Success! We grabbed the 'can_grab' locks. */
olflgs = grabbed;
need_locks &= ~can_grab;
-#ifndef ERTS_PROC_LOCK_SPIN_ON_GATE
/* Since we made progress, reset the spin count. */
- spin_count = proc_lock_spin_count;
-#endif
+ spin_count = thr_spin_count;
}
- else
- {
+ else {
/* Compare-and-exchange failed, try again. */
olflgs = nflgs;
}
@@ -1407,7 +1419,7 @@ check_queue(erts_proc_lock_t *lck)
wtr = (((ErtsProcLocks) 1) << lock_no) << ERTS_PROC_LOCK_WAITER_SHIFT;
if (lflgs & wtr) {
int n;
- erts_proc_lock_waiter_t *wtr;
+ erts_tse_t *wtr;
ERTS_LC_ASSERT(lck->queues && lck->queues->queue[lock_no]);
wtr = lck->queues->queue[lock_no];
n = 0;
diff --git a/erts/emulator/beam/erl_process_lock.h b/erts/emulator/beam/erl_process_lock.h
index d71e5a0a6e..7cfc9893fa 100644
--- a/erts/emulator/beam/erl_process_lock.h
+++ b/erts/emulator/beam/erl_process_lock.h
@@ -255,11 +255,7 @@ void erts_proc_lc_unrequire_lock(Process *p, ErtsProcLocks locks);
typedef struct {
union {
-#if ERTS_PROC_LOCK_MUTEX_IMPL
- erts_smp_mtx_t mtx;
-#else
erts_smp_spinlock_t spnlck;
-#endif
char buf[64]; /* Try to get locks in different cache lines */
} u;
} erts_pix_lock_t;
@@ -277,9 +273,12 @@ typedef struct {
((ErtsProcLocks) erts_smp_atomic_band(&(L)->flags, (long) (MSK)))
#define ERTS_PROC_LOCK_FLGS_BOR_(L, MSK) \
((ErtsProcLocks) erts_smp_atomic_bor(&(L)->flags, (long) (MSK)))
-#define ERTS_PROC_LOCK_FLGS_CMPXCHG_(L, NEW, EXPECTED) \
- ((ErtsProcLocks) erts_smp_atomic_cmpxchg(&(L)->flags, \
- (long) (NEW), (long) (EXPECTED)))
+#define ERTS_PROC_LOCK_FLGS_CMPXCHG_ACQB_(L, NEW, EXPECTED) \
+ ((ErtsProcLocks) erts_smp_atomic_cmpxchg_acqb(&(L)->flags, \
+ (long) (NEW), (long) (EXPECTED)))
+#define ERTS_PROC_LOCK_FLGS_CMPXCHG_RELB_(L, NEW, EXPECTED) \
+ ((ErtsProcLocks) erts_smp_atomic_cmpxchg_relb(&(L)->flags, \
+ (long) (NEW), (long) (EXPECTED)))
#define ERTS_PROC_LOCK_FLGS_READ_(L) \
((ErtsProcLocks) erts_smp_atomic_read(&(L)->flags))
@@ -289,6 +288,9 @@ ERTS_GLB_INLINE ErtsProcLocks erts_proc_lock_flags_band(erts_proc_lock_t *,
ErtsProcLocks);
ERTS_GLB_INLINE ErtsProcLocks erts_proc_lock_flags_bor(erts_proc_lock_t *,
ErtsProcLocks);
+ERTS_GLB_INLINE ErtsProcLocks erts_proc_lock_flags_cmpxchg(erts_proc_lock_t *,
+ ErtsProcLocks,
+ ErtsProcLocks);
#if ERTS_GLB_INLINE_INCL_FUNC_DEF
@@ -322,7 +324,9 @@ erts_proc_lock_flags_cmpxchg(erts_proc_lock_t *lck, ErtsProcLocks new,
#define ERTS_PROC_LOCK_FLGS_BAND_(L, MSK) erts_proc_lock_flags_band((L), (MSK))
#define ERTS_PROC_LOCK_FLGS_BOR_(L, MSK) erts_proc_lock_flags_bor((L), (MSK))
-#define ERTS_PROC_LOCK_FLGS_CMPXCHG_(L, NEW, EXPECTED) \
+#define ERTS_PROC_LOCK_FLGS_CMPXCHG_ACQB_(L, NEW, EXPECTED) \
+ erts_proc_lock_flags_cmpxchg((L), (NEW), (EXPECTED))
+#define ERTS_PROC_LOCK_FLGS_CMPXCHG_RELB_(L, NEW, EXPECTED) \
erts_proc_lock_flags_cmpxchg((L), (NEW), (EXPECTED))
#define ERTS_PROC_LOCK_FLGS_READ_(L) ((L)->flags)
@@ -348,9 +352,9 @@ ERTS_GLB_INLINE ErtsProcLocks erts_smp_proc_raw_trylock__(Process *p,
ErtsProcLocks locks);
#ifdef ERTS_ENABLE_LOCK_COUNT
ERTS_GLB_INLINE void erts_smp_proc_lock_x__(Process *,
- erts_pix_lock_t *,
- ErtsProcLocks,
- char *file, unsigned int line);
+ erts_pix_lock_t *,
+ ErtsProcLocks,
+ char *file, unsigned int line);
#else
ERTS_GLB_INLINE void erts_smp_proc_lock__(Process *,
erts_pix_lock_t *,
@@ -372,30 +376,18 @@ ERTS_GLB_INLINE void erts_proc_lock_op_debug(Process *, ErtsProcLocks, int);
ERTS_GLB_INLINE void erts_pix_lock(erts_pix_lock_t *pixlck)
{
ERTS_LC_ASSERT(pixlck);
-#if ERTS_PROC_LOCK_MUTEX_IMPL
- erts_smp_mtx_lock(&pixlck->u.mtx);
-#else
erts_smp_spin_lock(&pixlck->u.spnlck);
-#endif
}
ERTS_GLB_INLINE void erts_pix_unlock(erts_pix_lock_t *pixlck)
{
ERTS_LC_ASSERT(pixlck);
-#if ERTS_PROC_LOCK_MUTEX_IMPL
- erts_smp_mtx_unlock(&pixlck->u.mtx);
-#else
erts_smp_spin_unlock(&pixlck->u.spnlck);
-#endif
}
ERTS_GLB_INLINE int erts_lc_pix_lock_is_locked(erts_pix_lock_t *pixlck)
{
-#if ERTS_PROC_LOCK_MUTEX_IMPL
- return erts_smp_lc_mtx_is_locked(&pixlck->u.mtx);
-#else
return erts_smp_lc_spinlock_is_locked(&pixlck->u.spnlck);
-#endif
}
/*
@@ -417,9 +409,9 @@ erts_smp_proc_raw_trylock__(Process *p, ErtsProcLocks locks)
ErtsProcLocks expct_lflgs = 0;
while (1) {
- ErtsProcLocks lflgs = ERTS_PROC_LOCK_FLGS_CMPXCHG_(&p->lock,
- expct_lflgs | locks,
- expct_lflgs);
+ ErtsProcLocks lflgs = ERTS_PROC_LOCK_FLGS_CMPXCHG_ACQB_(&p->lock,
+ expct_lflgs | locks,
+ expct_lflgs);
if (ERTS_LIKELY(lflgs == expct_lflgs)) {
/* We successfully grabbed all locks. */
return 0;
@@ -535,7 +527,7 @@ erts_smp_proc_unlock__(Process *p,
if (want_lflgs != old_lflgs) {
ErtsProcLocks new_lflgs =
- ERTS_PROC_LOCK_FLGS_CMPXCHG_(&p->lock, want_lflgs, old_lflgs);
+ ERTS_PROC_LOCK_FLGS_CMPXCHG_RELB_(&p->lock, want_lflgs, old_lflgs);
if (new_lflgs != old_lflgs) {
/* cmpxchg failed, try again. */
diff --git a/erts/emulator/beam/erl_smp.h b/erts/emulator/beam/erl_smp.h
index 03d2a586e3..b41fa70476 100644
--- a/erts/emulator/beam/erl_smp.h
+++ b/erts/emulator/beam/erl_smp.h
@@ -1,19 +1,19 @@
/*
* %CopyrightBegin%
- *
- * Copyright Ericsson AB 2005-2009. All Rights Reserved.
- *
+ *
+ * Copyright Ericsson AB 2005-2010. All Rights Reserved.
+ *
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
* compliance with the License. You should have received a copy of the
* Erlang Public License along with this software. If not, it can be
* retrieved online at http://www.erlang.org/.
- *
+ *
* Software distributed under the License is distributed on an "AS IS"
* basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
* the License for the specific language governing rights and limitations
* under the License.
- *
+ *
* %CopyrightEnd%
*/
/*
@@ -43,9 +43,17 @@ typedef erts_thr_init_data_t erts_smp_thr_init_data_t;
typedef erts_tid_t erts_smp_tid_t;
typedef erts_mtx_t erts_smp_mtx_t;
typedef erts_cnd_t erts_smp_cnd_t;
+#define ERTS_SMP_RWMTX_OPT_DEFAULT_INITER ERTS_RWMTX_OPT_DEFAULT_INITER
+#define ERTS_SMP_RWMTX_TYPE_NORMAL ERTS_RWMTX_TYPE_NORMAL
+#define ERTS_SMP_RWMTX_TYPE_FREQUENT_READ ERTS_RWMTX_TYPE_FREQUENT_READ
+#define ERTS_SMP_RWMTX_TYPE_EXTREMELY_FREQUENT_READ \
+ ERTS_RWMTX_TYPE_EXTREMELY_FREQUENT_READ
+#define ERTS_SMP_RWMTX_LONG_LIVED ERTS_RWMTX_LONG_LIVED
+#define ERTS_SMP_RWMTX_SHORT_LIVED ERTS_RWMTX_SHORT_LIVED
+#define ERTS_SMP_RWMTX_UNKNOWN_LIVED ERTS_RWMTX_UNKNOWN_LIVED
+typedef erts_rwmtx_opt_t erts_smp_rwmtx_opt_t;
typedef erts_rwmtx_t erts_smp_rwmtx_t;
typedef erts_tsd_key_t erts_smp_tsd_key_t;
-typedef erts_gate_t erts_smp_gate_t;
typedef ethr_atomic_t erts_smp_atomic_t;
typedef erts_spinlock_t erts_smp_spinlock_t;
typedef erts_rwlock_t erts_smp_rwlock_t;
@@ -54,15 +62,27 @@ void erts_thr_fatal_error(int, char *); /* implemented in erl_init.c */
#else /* #ifdef ERTS_SMP */
-#define ERTS_SMP_THR_OPTS_DEFAULT_INITER 0
+#define ERTS_SMP_THR_OPTS_DEFAULT_INITER {0}
typedef int erts_smp_thr_opts_t;
typedef int erts_smp_thr_init_data_t;
typedef int erts_smp_tid_t;
typedef int erts_smp_mtx_t;
typedef int erts_smp_cnd_t;
+#define ERTS_SMP_RWMTX_OPT_DEFAULT_INITER {0}
+#define ERTS_SMP_RWMTX_TYPE_NORMAL 0
+#define ERTS_SMP_RWMTX_TYPE_FREQUENT_READ 0
+#define ERTS_SMP_RWMTX_TYPE_EXTREMELY_FREQUENT_READ 0
+#define ERTS_SMP_RWMTX_LONG_LIVED 0
+#define ERTS_SMP_RWMTX_SHORT_LIVED 0
+#define ERTS_SMP_RWMTX_UNKNOWN_LIVED 0
+typedef struct {
+ char type;
+ char lived;
+ int main_spincount;
+ int aux_spincount;
+} erts_smp_rwmtx_opt_t;
typedef int erts_smp_rwmtx_t;
typedef int erts_smp_tsd_key_t;
-typedef int erts_smp_gate_t;
typedef long erts_smp_atomic_t;
#if __GNUC__ > 2
typedef struct { } erts_smp_spinlock_t;
@@ -103,8 +123,6 @@ ERTS_GLB_INLINE void erts_smp_mtx_init_locked_x(erts_smp_mtx_t *mtx,
ERTS_GLB_INLINE void erts_smp_mtx_init(erts_smp_mtx_t *mtx, char *name);
ERTS_GLB_INLINE void erts_smp_mtx_init_locked(erts_smp_mtx_t *mtx, char *name);
ERTS_GLB_INLINE void erts_smp_mtx_destroy(erts_smp_mtx_t *mtx);
-ERTS_GLB_INLINE void erts_smp_mtx_set_forksafe(erts_smp_mtx_t *mtx);
-ERTS_GLB_INLINE void erts_smp_mtx_unset_forksafe(erts_smp_mtx_t *mtx);
ERTS_GLB_INLINE int erts_smp_mtx_trylock(erts_smp_mtx_t *mtx);
#ifdef ERTS_ENABLE_LOCK_COUNT
ERTS_GLB_INLINE void erts_smp_mtx_lock_x(erts_smp_mtx_t *mtx, char *file, int line);
@@ -119,9 +137,17 @@ ERTS_GLB_INLINE void erts_smp_cnd_wait(erts_smp_cnd_t *cnd,
erts_smp_mtx_t *mtx);
ERTS_GLB_INLINE void erts_smp_cnd_signal(erts_smp_cnd_t *cnd);
ERTS_GLB_INLINE void erts_smp_cnd_broadcast(erts_smp_cnd_t *cnd);
+ERTS_GLB_INLINE void erts_smp_rwmtx_set_reader_group(int no);
+ERTS_GLB_INLINE void erts_smp_rwmtx_init_opt_x(erts_smp_rwmtx_t *rwmtx,
+ erts_smp_rwmtx_opt_t *opt,
+ char *name,
+ Eterm extra);
ERTS_GLB_INLINE void erts_smp_rwmtx_init_x(erts_smp_rwmtx_t *rwmtx,
char *name,
Eterm extra);
+ERTS_GLB_INLINE void erts_smp_rwmtx_init_opt(erts_smp_rwmtx_t *rwmtx,
+ erts_smp_rwmtx_opt_t *opt,
+ char *name);
ERTS_GLB_INLINE void erts_smp_rwmtx_init(erts_smp_rwmtx_t *rwmtx,
char *name);
ERTS_GLB_INLINE void erts_smp_rwmtx_destroy(erts_smp_rwmtx_t *rwmtx);
@@ -155,6 +181,16 @@ ERTS_GLB_INLINE long erts_smp_atomic_cmpxchg(erts_smp_atomic_t *xchgp,
long expected);
ERTS_GLB_INLINE long erts_smp_atomic_bor(erts_smp_atomic_t *var, long mask);
ERTS_GLB_INLINE long erts_smp_atomic_band(erts_smp_atomic_t *var, long mask);
+ERTS_GLB_INLINE long erts_smp_atomic_read_acqb(erts_smp_atomic_t *var);
+ERTS_GLB_INLINE void erts_smp_atomic_set_relb(erts_smp_atomic_t *var, long i);
+ERTS_GLB_INLINE void erts_smp_atomic_dec_relb(erts_smp_atomic_t *decp);
+ERTS_GLB_INLINE long erts_smp_atomic_dectest_relb(erts_smp_atomic_t *decp);
+ERTS_GLB_INLINE long erts_smp_atomic_cmpxchg_acqb(erts_smp_atomic_t *xchgp,
+ long new,
+ long exp);
+ERTS_GLB_INLINE long erts_smp_atomic_cmpxchg_relb(erts_smp_atomic_t *xchgp,
+ long new,
+ long exp);
ERTS_GLB_INLINE void erts_smp_spinlock_init_x(erts_smp_spinlock_t *lock,
char *name,
Eterm extra);
@@ -190,12 +226,6 @@ ERTS_GLB_INLINE void erts_smp_tsd_key_create(erts_smp_tsd_key_t *keyp);
ERTS_GLB_INLINE void erts_smp_tsd_key_delete(erts_smp_tsd_key_t key);
ERTS_GLB_INLINE void erts_smp_tsd_set(erts_smp_tsd_key_t key, void *value);
ERTS_GLB_INLINE void * erts_smp_tsd_get(erts_smp_tsd_key_t key);
-ERTS_GLB_INLINE void erts_smp_gate_init(erts_smp_gate_t *gp);
-ERTS_GLB_INLINE void erts_smp_gate_destroy(erts_smp_gate_t *gp);
-ERTS_GLB_INLINE void erts_smp_gate_close(erts_smp_gate_t *gp);
-ERTS_GLB_INLINE void erts_smp_gate_let_through(erts_smp_gate_t *gp, unsigned no);
-ERTS_GLB_INLINE void erts_smp_gate_wait(erts_smp_gate_t *gp);
-ERTS_GLB_INLINE void erts_smp_gate_swait(erts_smp_gate_t *gp, int spincount);
#ifdef ERTS_THR_HAVE_SIG_FUNCS
#define ERTS_SMP_THR_HAVE_SIG_FUNCS 1
@@ -331,22 +361,6 @@ erts_smp_mtx_destroy(erts_smp_mtx_t *mtx)
#endif
}
-ERTS_GLB_INLINE void
-erts_smp_mtx_set_forksafe(erts_smp_mtx_t *mtx)
-{
-#ifdef ERTS_SMP
- erts_mtx_set_forksafe(mtx);
-#endif
-}
-
-ERTS_GLB_INLINE void
-erts_smp_mtx_unset_forksafe(erts_smp_mtx_t *mtx)
-{
-#ifdef ERTS_SMP
- erts_mtx_unset_forksafe(mtx);
-#endif
-}
-
ERTS_GLB_INLINE int
erts_smp_mtx_trylock(erts_smp_mtx_t *mtx)
{
@@ -433,6 +447,25 @@ erts_smp_cnd_broadcast(erts_smp_cnd_t *cnd)
}
ERTS_GLB_INLINE void
+erts_smp_rwmtx_set_reader_group(int no)
+{
+#ifdef ERTS_SMP
+ erts_rwmtx_set_reader_group(no);
+#endif
+}
+
+ERTS_GLB_INLINE void
+erts_smp_rwmtx_init_opt_x(erts_smp_rwmtx_t *rwmtx,
+ erts_smp_rwmtx_opt_t *opt,
+ char *name,
+ Eterm extra)
+{
+#ifdef ERTS_SMP
+ erts_rwmtx_init_opt_x(rwmtx, opt, name, extra);
+#endif
+}
+
+ERTS_GLB_INLINE void
erts_smp_rwmtx_init_x(erts_smp_rwmtx_t *rwmtx, char *name, Eterm extra)
{
#ifdef ERTS_SMP
@@ -441,6 +474,16 @@ erts_smp_rwmtx_init_x(erts_smp_rwmtx_t *rwmtx, char *name, Eterm extra)
}
ERTS_GLB_INLINE void
+erts_smp_rwmtx_init_opt(erts_smp_rwmtx_t *rwmtx,
+ erts_smp_rwmtx_opt_t *opt,
+ char *name)
+{
+#ifdef ERTS_SMP
+ erts_rwmtx_init_opt(rwmtx, opt, name);
+#endif
+}
+
+ERTS_GLB_INLINE void
erts_smp_rwmtx_init(erts_smp_rwmtx_t *rwmtx, char *name)
{
#ifdef ERTS_SMP
@@ -709,6 +752,73 @@ erts_smp_atomic_band(erts_smp_atomic_t *var, long mask)
#endif
}
+ERTS_GLB_INLINE long
+erts_smp_atomic_read_acqb(erts_smp_atomic_t *var)
+{
+#ifdef ERTS_SMP
+ return erts_atomic_read_acqb(var);
+#else
+ return *var;
+#endif
+}
+
+ERTS_GLB_INLINE void
+erts_smp_atomic_set_relb(erts_smp_atomic_t *var, long i)
+{
+#ifdef ERTS_SMP
+ erts_atomic_set_relb(var, i);
+#else
+ *var = i;
+#endif
+}
+
+ERTS_GLB_INLINE void erts_smp_atomic_dec_relb(erts_smp_atomic_t *decp)
+{
+#ifdef ERTS_SMP
+ erts_atomic_dec_relb(decp);
+#else
+ --(*decp);
+#endif
+}
+
+ERTS_GLB_INLINE long
+erts_smp_atomic_dectest_relb(erts_smp_atomic_t *decp)
+{
+#ifdef ERTS_SMP
+ return erts_atomic_dectest_relb(decp);
+#else
+ return --(*decp);
+#endif
+}
+
+ERTS_GLB_INLINE long erts_smp_atomic_cmpxchg_acqb(erts_smp_atomic_t *xchgp,
+ long new,
+ long exp)
+{
+#ifdef ERTS_SMP
+ return erts_atomic_cmpxchg_acqb(xchgp, new, exp);
+#else
+ long old = *xchgp;
+ if (old == exp)
+ *xchgp = new;
+ return old;
+#endif
+}
+
+ERTS_GLB_INLINE long erts_smp_atomic_cmpxchg_relb(erts_smp_atomic_t *xchgp,
+ long new,
+ long exp)
+{
+#ifdef ERTS_SMP
+ return erts_atomic_cmpxchg_relb(xchgp, new, exp);
+#else
+ long old = *xchgp;
+ if (old == exp)
+ *xchgp = new;
+ return old;
+#endif
+}
+
ERTS_GLB_INLINE void
erts_smp_spinlock_init_x(erts_smp_spinlock_t *lock, char *name, Eterm extra)
{
@@ -919,54 +1029,6 @@ erts_smp_tsd_get(erts_smp_tsd_key_t key)
#endif
}
-ERTS_GLB_INLINE void
-erts_smp_gate_init(erts_smp_gate_t *gp)
-{
-#ifdef ERTS_SMP
- erts_gate_init((erts_gate_t *) gp);
-#endif
-}
-
-ERTS_GLB_INLINE void
-erts_smp_gate_destroy(erts_smp_gate_t *gp)
-{
-#ifdef ERTS_SMP
- erts_gate_destroy((erts_gate_t *) gp);
-#endif
-}
-
-ERTS_GLB_INLINE void
-erts_smp_gate_close(erts_smp_gate_t *gp)
-{
-#ifdef ERTS_SMP
- erts_gate_close((erts_gate_t *) gp);
-#endif
-}
-
-ERTS_GLB_INLINE void
-erts_smp_gate_let_through(erts_smp_gate_t *gp, unsigned no)
-{
-#ifdef ERTS_SMP
- erts_gate_let_through((erts_gate_t *) gp, no);
-#endif
-}
-
-ERTS_GLB_INLINE void
-erts_smp_gate_wait(erts_smp_gate_t *gp)
-{
-#ifdef ERTS_SMP
- erts_gate_wait((erts_gate_t *) gp);
-#endif
-}
-
-ERTS_GLB_INLINE void
-erts_smp_gate_swait(erts_smp_gate_t *gp, int spincount)
-{
-#ifdef ERTS_SMP
- erts_gate_swait((erts_gate_t *) gp, spincount);
-#endif
-}
-
#ifdef ERTS_THR_HAVE_SIG_FUNCS
#define ERTS_SMP_THR_HAVE_SIG_FUNCS 1
diff --git a/erts/emulator/beam/erl_term.h b/erts/emulator/beam/erl_term.h
index 3a8c30fe6a..b8e4473141 100644
--- a/erts/emulator/beam/erl_term.h
+++ b/erts/emulator/beam/erl_term.h
@@ -821,10 +821,10 @@ _ET_DECLARE_CHECKED(struct erl_node_*,internal_ref_node,Eterm)
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
* |A A A A A A A A A A A A A A A A A A A A A A A A A A|t t t t|0 0| Thing
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
- * |N N N N N N N N N N N N N N N N N N N N N N N N N N N N N N N N| Next
- * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
* |E E E E E E E E E E E E E E E E E E E E E E E E E E E E E E E E| ErlNode
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * |N N N N N N N N N N N N N N N N N N N N N N N N N N N N N N N N| Next
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
* |X X X X X X X X X X X X X X X X X X X X X X X X X X X X X X X X| Data 0
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
* . . .
@@ -835,7 +835,7 @@ _ET_DECLARE_CHECKED(struct erl_node_*,internal_ref_node,Eterm)
* t : External pid thing tag (1100)
* t : External port thing tag (1101)
* t : External ref thing tag (1110)
- * N : Next (external thing) pointer
+ * N : Next (off_heap) pointer
* E : ErlNode pointer
* X : Type specific data
*
@@ -852,13 +852,16 @@ _ET_DECLARE_CHECKED(struct erl_node_*,internal_ref_node,Eterm)
/* XXX:PaN - this structure is not perfect for halfword heap, it takes
a lot of memory due to padding, and the array will not begin at the end of the
structure, as otherwise expected. Be sure to access data.ui32 array and not try
- to do pointer manipulation on an Eterm * to reach the actual data... */
+ to do pointer manipulation on an Eterm * to reach the actual data...
+ XXX:Sverk - Problem made worse by "one off-heap list" when 'next' pointer
+ must align with 'next' in ProcBin, erl_fun_thing and erl_off_heap_header.
+*/
typedef struct external_thing_ {
/* ----+ */
Eterm header; /* | */
- struct external_thing_ *next; /* > External thing head */
- struct erl_node_ *node; /* | */
+ struct erl_node_* node; /* > External thing head */
+ struct erl_off_heap_header* next; /* | */
/* ----+ */
union {
Uint32 ui32[1];
diff --git a/erts/emulator/beam/erl_threads.h b/erts/emulator/beam/erl_threads.h
index 35b338c6eb..0b7269262e 100644
--- a/erts/emulator/beam/erl_threads.h
+++ b/erts/emulator/beam/erl_threads.h
@@ -25,6 +25,11 @@
#ifndef ERL_THREAD_H__
#define ERL_THREAD_H__
+#define ERTS_SPIN_BODY ETHR_SPIN_BODY
+
+#define ERTS_MAX_READER_GROUPS 8
+extern int erts_reader_groups;
+
#include "sys.h"
#ifdef USE_THREADS
@@ -48,6 +53,7 @@
#define ERTS_THR_OPTS_DEFAULT_INITER ETHR_THR_OPTS_DEFAULT_INITER
typedef ethr_thr_opts erts_thr_opts_t;
typedef ethr_init_data erts_thr_init_data_t;
+typedef ethr_late_init_data erts_thr_late_init_data_t;
typedef ethr_tid erts_tid_t;
/* mutex */
@@ -73,8 +79,19 @@ typedef struct {
erts_lcnt_lock_t lcnt;
#endif
} erts_rwmtx_t;
+
+#define ERTS_RWMTX_OPT_DEFAULT_INITER ETHR_RWMUTEX_OPT_DEFAULT_INITER
+#define ERTS_RWMTX_TYPE_NORMAL ETHR_RWMUTEX_TYPE_NORMAL
+#define ERTS_RWMTX_TYPE_FREQUENT_READ ETHR_RWMUTEX_TYPE_FREQUENT_READ
+#define ERTS_RWMTX_TYPE_EXTREMELY_FREQUENT_READ \
+ ETHR_RWMUTEX_TYPE_EXTREMELY_FREQUENT_READ
+#define ERTS_RWMTX_LONG_LIVED ETHR_RWMUTEX_LONG_LIVED
+#define ERTS_RWMTX_SHORT_LIVED ETHR_RWMUTEX_SHORT_LIVED
+#define ERTS_RWMTX_UNKNOWN_LIVED ETHR_RWMUTEX_UNKNOWN_LIVED
+typedef ethr_rwmutex_opt erts_rwmtx_opt_t;
+
typedef ethr_tsd_key erts_tsd_key_t;
-typedef ethr_gate erts_gate_t;
+typedef ethr_ts_event erts_tse_t;
typedef ethr_atomic_t erts_atomic_t;
/* spinlock */
@@ -103,25 +120,14 @@ typedef ethr_timeval erts_thr_timeval_t;
__decl_noreturn void __noreturn erts_thr_fatal_error(int, char *);
/* implemented in erl_init.c */
-#ifdef ERTS_ENABLE_LOCK_CHECK
-#define ERTS_REC_MTX_INITER \
- {ETHR_REC_MUTEX_INITER, \
- ERTS_LC_LOCK_INIT(-1,THE_NON_VALUE,ERTS_LC_FLG_LT_MUTEX)}
-#define ERTS_MTX_INITER \
- {ETHR_MUTEX_INITER, \
- ERTS_LC_LOCK_INIT(-1, THE_NON_VALUE, ERTS_LC_FLG_LT_MUTEX)}
-#else
-#define ERTS_REC_MTX_INITER {ETHR_REC_MUTEX_INITER}
-#define ERTS_MTX_INITER {ETHR_MUTEX_INITER}
-#endif
-#define ERTS_CND_INITER ETHR_COND_INITER
#define ERTS_THR_INIT_DATA_DEF_INITER ETHR_INIT_DATA_DEFAULT_INITER
+#define ERTS_THR_LATE_INIT_DATA_DEF_INITER \
+ ETHR_LATE_INIT_DATA_DEFAULT_INITER
#ifdef ETHR_HAVE_ETHR_REC_MUTEX_INIT
# define ERTS_HAVE_REC_MTX_INIT ETHR_HAVE_ETHR_REC_MUTEX_INIT
#endif
-
#else /* #ifdef USE_THREADS */
#define ERTS_THR_MEMORY_BARRIER
@@ -129,12 +135,26 @@ __decl_noreturn void __noreturn erts_thr_fatal_error(int, char *);
#define ERTS_THR_OPTS_DEFAULT_INITER 0
typedef int erts_thr_opts_t;
typedef int erts_thr_init_data_t;
+typedef int erts_thr_late_init_data_t;
typedef int erts_tid_t;
typedef int erts_mtx_t;
typedef int erts_cnd_t;
+#define ERTS_RWMTX_OPT_DEFAULT_INITER {0}
+#define ERTS_RWMTX_TYPE_NORMAL 0
+#define ERTS_RWMTX_TYPE_FREQUENT_READ 0
+#define ERTS_RWMTX_TYPE_EXTREMELY_FREQUENT_READ 0
+#define ERTS_RWMTX_LONG_LIVED 0
+#define ERTS_RWMTX_SHORT_LIVED 0
+#define ERTS_RWMTX_UNKNOWN_LIVED 0
+typedef struct {
+ char type;
+ char lived;
+ int main_spincount;
+ int aux_spincount;
+} erts_rwmtx_opt_t;
typedef int erts_rwmtx_t;
typedef int erts_tsd_key_t;
-typedef int erts_gate_t;
+typedef int erts_tse_t;
typedef long erts_atomic_t;
#if __GNUC__ > 2
typedef struct { } erts_spinlock_t;
@@ -148,7 +168,6 @@ typedef struct {
long tv_nsec;
} erts_thr_timeval_t;
-#define ERTS_REC_MTX_INITER 0
#define ERTS_MTX_INITER 0
#define ERTS_CND_INITER 0
#define ERTS_THR_INIT_DATA_DEF_INITER 0
@@ -158,6 +177,7 @@ typedef struct {
#endif /* #ifdef USE_THREADS */
ERTS_GLB_INLINE void erts_thr_init(erts_thr_init_data_t *id);
+ERTS_GLB_INLINE void erts_thr_late_init(erts_thr_late_init_data_t *id);
ERTS_GLB_INLINE void erts_thr_create(erts_tid_t *tid, void * (*func)(void *),
void *arg, erts_thr_opts_t *opts);
ERTS_GLB_INLINE void erts_thr_join(erts_tid_t tid, void **thr_res);
@@ -166,9 +186,6 @@ ERTS_GLB_INLINE void erts_thr_exit(void *res);
ERTS_GLB_INLINE void erts_thr_install_exit_handler(void (*exit_handler)(void));
ERTS_GLB_INLINE erts_tid_t erts_thr_self(void);
ERTS_GLB_INLINE int erts_equal_tids(erts_tid_t x, erts_tid_t y);
-#ifdef ERTS_HAVE_REC_MTX_INIT
-ERTS_GLB_INLINE void erts_rec_mtx_init(erts_mtx_t *mtx);
-#endif
ERTS_GLB_INLINE void erts_mtx_init_x(erts_mtx_t *mtx, char *name, Eterm extra);
ERTS_GLB_INLINE void erts_mtx_init_x_opt(erts_mtx_t *mtx, char *name, Eterm extra, Uint16 opt);
ERTS_GLB_INLINE void erts_mtx_init_locked_x(erts_mtx_t *mtx,
@@ -177,8 +194,6 @@ ERTS_GLB_INLINE void erts_mtx_init_locked_x(erts_mtx_t *mtx,
ERTS_GLB_INLINE void erts_mtx_init(erts_mtx_t *mtx, char *name);
ERTS_GLB_INLINE void erts_mtx_init_locked(erts_mtx_t *mtx, char *name);
ERTS_GLB_INLINE void erts_mtx_destroy(erts_mtx_t *mtx);
-ERTS_GLB_INLINE void erts_mtx_set_forksafe(erts_mtx_t *mtx);
-ERTS_GLB_INLINE void erts_mtx_unset_forksafe(erts_mtx_t *mtx);
ERTS_GLB_INLINE int erts_mtx_trylock(erts_mtx_t *mtx);
#ifdef ERTS_ENABLE_LOCK_COUNT
ERTS_GLB_INLINE void erts_mtx_lock_x(erts_mtx_t *mtx, char *file, unsigned int line);
@@ -192,9 +207,17 @@ ERTS_GLB_INLINE void erts_cnd_destroy(erts_cnd_t *cnd);
ERTS_GLB_INLINE void erts_cnd_wait(erts_cnd_t *cnd, erts_mtx_t *mtx);
ERTS_GLB_INLINE void erts_cnd_signal(erts_cnd_t *cnd);
ERTS_GLB_INLINE void erts_cnd_broadcast(erts_cnd_t *cnd);
+ERTS_GLB_INLINE void erts_rwmtx_set_reader_group(int no);
+ERTS_GLB_INLINE void erts_rwmtx_init_opt_x(erts_rwmtx_t *rwmtx,
+ erts_rwmtx_opt_t *opt,
+ char *name,
+ Eterm extra);
ERTS_GLB_INLINE void erts_rwmtx_init_x(erts_rwmtx_t *rwmtx,
char *name,
Eterm extra);
+ERTS_GLB_INLINE void erts_rwmtx_init_opt(erts_rwmtx_t *rwmtx,
+ erts_rwmtx_opt_t *opt,
+ char *name);
ERTS_GLB_INLINE void erts_rwmtx_init(erts_rwmtx_t *rwmtx,
char *name);
ERTS_GLB_INLINE void erts_rwmtx_destroy(erts_rwmtx_t *rwmtx);
@@ -228,6 +251,20 @@ ERTS_GLB_INLINE long erts_atomic_cmpxchg(erts_atomic_t *xchgp,
long expected);
ERTS_GLB_INLINE long erts_atomic_bor(erts_atomic_t *var, long mask);
ERTS_GLB_INLINE long erts_atomic_band(erts_atomic_t *var, long mask);
+ERTS_GLB_INLINE long erts_atomic_read_acqb(erts_atomic_t *var);
+ERTS_GLB_INLINE void erts_atomic_set_relb(erts_atomic_t *var, long i);
+ERTS_GLB_INLINE void erts_atomic_dec_relb(erts_atomic_t *decp);
+ERTS_GLB_INLINE long erts_atomic_dectest_relb(erts_atomic_t *decp);
+ERTS_GLB_INLINE long erts_atomic_cmpxchg_acqb(erts_atomic_t *xchgp,
+ long new,
+ long exp);
+ERTS_GLB_INLINE long erts_atomic_cmpxchg_relb(erts_atomic_t *xchgp,
+ long new,
+ long exp);
+ERTS_GLB_INLINE void erts_spinlock_init_x_opt(erts_spinlock_t *lock,
+ char *name,
+ Eterm extra,
+ Uint16 opt);
ERTS_GLB_INLINE void erts_spinlock_init_x(erts_spinlock_t *lock,
char *name,
Eterm extra);
@@ -263,12 +300,16 @@ ERTS_GLB_INLINE void erts_tsd_key_create(erts_tsd_key_t *keyp);
ERTS_GLB_INLINE void erts_tsd_key_delete(erts_tsd_key_t key);
ERTS_GLB_INLINE void erts_tsd_set(erts_tsd_key_t key, void *value);
ERTS_GLB_INLINE void * erts_tsd_get(erts_tsd_key_t key);
-ERTS_GLB_INLINE void erts_gate_init(erts_gate_t *gp);
-ERTS_GLB_INLINE void erts_gate_destroy(erts_gate_t *gp);
-ERTS_GLB_INLINE void erts_gate_close(erts_gate_t *gp);
-ERTS_GLB_INLINE void erts_gate_let_through(erts_gate_t *gp, unsigned no);
-ERTS_GLB_INLINE void erts_gate_wait(erts_gate_t *gp);
-ERTS_GLB_INLINE void erts_gate_swait(erts_gate_t *gp, int spincount);
+ERTS_GLB_INLINE erts_tse_t *erts_tse_fetch(void);
+ERTS_GLB_INLINE void erts_tse_return(erts_tse_t *ep);
+ERTS_GLB_INLINE void erts_tse_set(erts_tse_t *ep);
+ERTS_GLB_INLINE void erts_tse_reset(erts_tse_t *ep);
+ERTS_GLB_INLINE int erts_tse_wait(erts_tse_t *ep);
+ERTS_GLB_INLINE int erts_tse_swait(erts_tse_t *ep, int spincount);
+ERTS_GLB_INLINE int erts_tse_is_tmp(erts_tse_t *ep);
+ERTS_GLB_INLINE void erts_thr_set_main_status(int, int);
+ERTS_GLB_INLINE int erts_thr_get_main_status(void);
+ERTS_GLB_INLINE void erts_thr_yield(void);
#ifdef ETHR_HAVE_ETHR_SIG_FUNCS
#define ERTS_THR_HAVE_SIG_FUNCS 1
@@ -290,6 +331,16 @@ erts_thr_init(erts_thr_init_data_t *id)
}
ERTS_GLB_INLINE void
+erts_thr_late_init(erts_thr_late_init_data_t *id)
+{
+#ifdef USE_THREADS
+ int res = ethr_late_init(id);
+ if (res)
+ erts_thr_fatal_error(res, "complete initialization of thread library");
+#endif
+}
+
+ERTS_GLB_INLINE void
erts_thr_create(erts_tid_t *tid, void * (*func)(void *), void *arg,
erts_thr_opts_t *opts)
{
@@ -362,20 +413,6 @@ erts_equal_tids(erts_tid_t x, erts_tid_t y)
#endif
}
-
-#ifdef ERTS_HAVE_REC_MTX_INIT
-ERTS_GLB_INLINE void
-erts_rec_mtx_init(erts_mtx_t *mtx)
-{
-#ifdef USE_THREADS
- int res = ethr_rec_mutex_init(&mtx->mtx);
- if (res)
- erts_thr_fatal_error(res, "initialize recursive mutex");
-#endif
-}
-#endif
-
-
ERTS_GLB_INLINE void
erts_mtx_init_x(erts_mtx_t *mtx, char *name, Eterm extra)
{
@@ -422,9 +459,7 @@ erts_mtx_init_locked_x(erts_mtx_t *mtx, char *name, Eterm extra)
#ifdef ERTS_ENABLE_LOCK_COUNT
erts_lcnt_init_lock_x(&mtx->lcnt, name, ERTS_LCNT_LT_MUTEX, extra);
#endif
- res = ethr_mutex_lock(&mtx->mtx);
- if (res)
- erts_thr_fatal_error(res, "lock mutex");
+ ethr_mutex_lock(&mtx->mtx);
#ifdef ERTS_ENABLE_LOCK_CHECK
erts_lc_trylock(1, &mtx->lc);
#endif
@@ -463,9 +498,7 @@ erts_mtx_init_locked(erts_mtx_t *mtx, char *name)
#ifdef ERTS_ENABLE_LOCK_COUNT
erts_lcnt_init_lock(&mtx->lcnt, name, ERTS_LCNT_LT_MUTEX);
#endif
- res = ethr_mutex_lock(&mtx->mtx);
- if (res)
- erts_thr_fatal_error(res, "lock mutex");
+ ethr_mutex_lock(&mtx->mtx);
#ifdef ERTS_ENABLE_LOCK_CHECK
erts_lc_trylock(1, &mtx->lc);
#endif
@@ -492,26 +525,6 @@ erts_mtx_destroy(erts_mtx_t *mtx)
#endif
}
-ERTS_GLB_INLINE void
-erts_mtx_set_forksafe(erts_mtx_t *mtx)
-{
-#ifdef USE_THREADS
- int res = ethr_mutex_set_forksafe(&mtx->mtx);
- if (res != 0 && res != ENOTSUP)
- erts_thr_fatal_error(res, "set mutex forksafe");
-#endif
-}
-
-ERTS_GLB_INLINE void
-erts_mtx_unset_forksafe(erts_mtx_t *mtx)
-{
-#ifdef USE_THREADS
- int res = ethr_mutex_unset_forksafe(&mtx->mtx);
- if (res != 0 && res != ENOTSUP)
- erts_thr_fatal_error(res, "unset mutex forksafe");
-#endif
-}
-
ERTS_GLB_INLINE int
erts_mtx_trylock(erts_mtx_t *mtx)
{
@@ -531,11 +544,7 @@ erts_mtx_trylock(erts_mtx_t *mtx)
#endif
#ifdef ERTS_ENABLE_LOCK_COUNT
erts_lcnt_trylock(&mtx->lcnt, res);
-#endif
-
- if (res != 0 && res != EBUSY)
- erts_thr_fatal_error(res, "try lock mutex");
-
+#endif
return res;
#else
return 0;
@@ -551,19 +560,16 @@ erts_mtx_lock(erts_mtx_t *mtx)
#endif
{
#ifdef USE_THREADS
- int res;
#ifdef ERTS_ENABLE_LOCK_CHECK
erts_lc_lock(&mtx->lc);
#endif
#ifdef ERTS_ENABLE_LOCK_COUNT
erts_lcnt_lock(&mtx->lcnt);
#endif
- res = ethr_mutex_lock(&mtx->mtx);
+ ethr_mutex_lock(&mtx->mtx);
#ifdef ERTS_ENABLE_LOCK_COUNT
erts_lcnt_lock_post_x(&mtx->lcnt, file, line);
#endif
- if (res)
- erts_thr_fatal_error(res, "lock mutex");
#endif
}
@@ -571,16 +577,13 @@ ERTS_GLB_INLINE void
erts_mtx_unlock(erts_mtx_t *mtx)
{
#ifdef USE_THREADS
- int res;
#ifdef ERTS_ENABLE_LOCK_CHECK
erts_lc_unlock(&mtx->lc);
#endif
#ifdef ERTS_ENABLE_LOCK_COUNT
erts_lcnt_unlock(&mtx->lcnt);
#endif
- res = ethr_mutex_unlock(&mtx->mtx);
- if (res)
- erts_thr_fatal_error(res, "unlock mutex");
+ ethr_mutex_unlock(&mtx->mtx);
#endif
}
@@ -648,9 +651,7 @@ ERTS_GLB_INLINE void
erts_cnd_signal(erts_cnd_t *cnd)
{
#ifdef USE_THREADS
- int res = ethr_cond_signal(cnd);
- if (res)
- erts_thr_fatal_error(res, "signal on condition variable");
+ ethr_cond_signal(cnd);
#endif
}
@@ -659,19 +660,34 @@ ERTS_GLB_INLINE void
erts_cnd_broadcast(erts_cnd_t *cnd)
{
#ifdef USE_THREADS
- int res = ethr_cond_broadcast(cnd);
- if (res)
- erts_thr_fatal_error(res, "broadcast on condition variable");
+ ethr_cond_broadcast(cnd);
#endif
}
/* rwmutex */
ERTS_GLB_INLINE void
-erts_rwmtx_init_x(erts_rwmtx_t *rwmtx, char *name, Eterm extra)
+erts_rwmtx_set_reader_group(int no)
{
#ifdef USE_THREADS
- int res = ethr_rwmutex_init(&rwmtx->rwmtx);
+ int res;
+#ifdef ERTS_ENABLE_LOCK_CHECK
+ erts_lc_check_no_locked_of_type(ERTS_LC_FLG_LT_RWMUTEX);
+#endif
+ res = ethr_rwmutex_set_reader_group(no);
+ if (res != 0)
+ erts_thr_fatal_error(res, "set reader group");
+#endif
+}
+
+ERTS_GLB_INLINE void
+erts_rwmtx_init_opt_x(erts_rwmtx_t *rwmtx,
+ erts_rwmtx_opt_t *opt,
+ char *name,
+ Eterm extra)
+{
+#ifdef USE_THREADS
+ int res = ethr_rwmutex_init_opt(&rwmtx->rwmtx, opt);
if (res != 0)
erts_thr_fatal_error(res, "initialize rwmutex");
#ifdef ERTS_ENABLE_LOCK_CHECK
@@ -684,10 +700,20 @@ erts_rwmtx_init_x(erts_rwmtx_t *rwmtx, char *name, Eterm extra)
}
ERTS_GLB_INLINE void
-erts_rwmtx_init(erts_rwmtx_t *rwmtx, char *name)
+erts_rwmtx_init_x(erts_rwmtx_t *rwmtx,
+ char *name,
+ Eterm extra)
+{
+ erts_rwmtx_init_opt_x(rwmtx, NULL, name, extra);
+}
+
+ERTS_GLB_INLINE void
+erts_rwmtx_init_opt(erts_rwmtx_t *rwmtx,
+ erts_rwmtx_opt_t *opt,
+ char *name)
{
#ifdef USE_THREADS
- int res = ethr_rwmutex_init(&rwmtx->rwmtx);
+ int res = ethr_rwmutex_init_opt(&rwmtx->rwmtx, opt);
if (res != 0)
erts_thr_fatal_error(res, "initialize rwmutex");
#ifdef ERTS_ENABLE_LOCK_CHECK
@@ -700,6 +726,12 @@ erts_rwmtx_init(erts_rwmtx_t *rwmtx, char *name)
}
ERTS_GLB_INLINE void
+erts_rwmtx_init(erts_rwmtx_t *rwmtx, char *name)
+{
+ erts_rwmtx_init_opt(rwmtx, NULL, name);
+}
+
+ERTS_GLB_INLINE void
erts_rwmtx_destroy(erts_rwmtx_t *rwmtx)
{
#ifdef USE_THREADS
@@ -736,9 +768,6 @@ erts_rwmtx_tryrlock(erts_rwmtx_t *rwmtx)
#ifdef ERTS_ENABLE_LOCK_COUNT
erts_lcnt_trylock_opt(&rwmtx->lcnt, res, ERTS_LCNT_LO_READ);
#endif
-
- if (res != 0 && res != EBUSY)
- erts_thr_fatal_error(res, "try read lock rwmutex");
return res;
#else
@@ -754,19 +783,16 @@ erts_rwmtx_rlock(erts_rwmtx_t *rwmtx)
#endif
{
#ifdef USE_THREADS
- int res;
#ifdef ERTS_ENABLE_LOCK_CHECK
erts_lc_lock_flg(&rwmtx->lc, ERTS_LC_FLG_LO_READ);
#endif
#ifdef ERTS_ENABLE_LOCK_COUNT
erts_lcnt_lock_opt(&rwmtx->lcnt, ERTS_LCNT_LO_READ);
#endif
- res = ethr_rwmutex_rlock(&rwmtx->rwmtx);
+ ethr_rwmutex_rlock(&rwmtx->rwmtx);
#ifdef ERTS_ENABLE_LOCK_COUNT
erts_lcnt_lock_post_x(&rwmtx->lcnt, file, line);
#endif
- if (res != 0)
- erts_thr_fatal_error(res, "read lock rwmutex");
#endif
}
@@ -774,16 +800,13 @@ ERTS_GLB_INLINE void
erts_rwmtx_runlock(erts_rwmtx_t *rwmtx)
{
#ifdef USE_THREADS
- int res;
#ifdef ERTS_ENABLE_LOCK_CHECK
erts_lc_unlock_flg(&rwmtx->lc, ERTS_LC_FLG_LO_READ);
#endif
#ifdef ERTS_ENABLE_LOCK_COUNT
erts_lcnt_unlock_opt(&rwmtx->lcnt, ERTS_LCNT_LO_READ);
#endif
- res = ethr_rwmutex_runlock(&rwmtx->rwmtx);
- if (res != 0)
- erts_thr_fatal_error(res, "read unlock rwmutex");
+ ethr_rwmutex_runlock(&rwmtx->rwmtx);
#endif
}
@@ -808,9 +831,6 @@ erts_rwmtx_tryrwlock(erts_rwmtx_t *rwmtx)
#ifdef ERTS_ENABLE_LOCK_COUNT
erts_lcnt_trylock_opt(&rwmtx->lcnt, res, ERTS_LCNT_LO_READ_WRITE);
#endif
-
- if (res != 0 && res != EBUSY)
- erts_thr_fatal_error(res, "try write lock rwmutex");
return res;
#else
@@ -826,19 +846,16 @@ erts_rwmtx_rwlock(erts_rwmtx_t *rwmtx)
#endif
{
#ifdef USE_THREADS
- int res;
#ifdef ERTS_ENABLE_LOCK_CHECK
erts_lc_lock_flg(&rwmtx->lc, ERTS_LC_FLG_LO_READ_WRITE);
#endif
#ifdef ERTS_ENABLE_LOCK_COUNT
erts_lcnt_lock_opt(&rwmtx->lcnt, ERTS_LCNT_LO_READ_WRITE);
#endif
- res = ethr_rwmutex_rwlock(&rwmtx->rwmtx);
+ ethr_rwmutex_rwlock(&rwmtx->rwmtx);
#ifdef ERTS_ENABLE_LOCK_COUNT
erts_lcnt_lock_post_x(&rwmtx->lcnt, file, line);
#endif
- if (res != 0)
- erts_thr_fatal_error(res, "write lock rwmutex");
#endif
}
@@ -846,16 +863,13 @@ ERTS_GLB_INLINE void
erts_rwmtx_rwunlock(erts_rwmtx_t *rwmtx)
{
#ifdef USE_THREADS
- int res;
#ifdef ERTS_ENABLE_LOCK_CHECK
erts_lc_unlock_flg(&rwmtx->lc, ERTS_LC_FLG_LO_READ_WRITE);
#endif
#ifdef ERTS_ENABLE_LOCK_COUNT
erts_lcnt_unlock_opt(&rwmtx->lcnt, ERTS_LCNT_LO_READ_WRITE);
#endif
- res = ethr_rwmutex_rwunlock(&rwmtx->rwmtx);
- if (res != 0)
- erts_thr_fatal_error(res, "write unlock rwmutex");
+ ethr_rwmutex_rwunlock(&rwmtx->rwmtx);
#endif
}
@@ -917,9 +931,7 @@ ERTS_GLB_INLINE void
erts_atomic_init(erts_atomic_t *var, long i)
{
#ifdef USE_THREADS
- int res = ethr_atomic_init(var, i);
- if (res)
- erts_thr_fatal_error(res, "perform atomic init");
+ ethr_atomic_init(var, i);
#else
*var = i;
#endif
@@ -929,9 +941,7 @@ ERTS_GLB_INLINE void
erts_atomic_set(erts_atomic_t *var, long i)
{
#ifdef USE_THREADS
- int res = ethr_atomic_set(var, i);
- if (res)
- erts_thr_fatal_error(res, "perform atomic set");
+ ethr_atomic_set(var, i);
#else
*var = i;
#endif
@@ -941,11 +951,7 @@ ERTS_GLB_INLINE long
erts_atomic_read(erts_atomic_t *var)
{
#ifdef USE_THREADS
- long i;
- int res = ethr_atomic_read(var, &i);
- if (res)
- erts_thr_fatal_error(res, "perform atomic read");
- return i;
+ return ethr_atomic_read(var);
#else
return *var;
#endif
@@ -955,11 +961,7 @@ ERTS_GLB_INLINE long
erts_atomic_inctest(erts_atomic_t *incp)
{
#ifdef USE_THREADS
- long test;
- int res = ethr_atomic_inctest(incp, &test);
- if (res)
- erts_thr_fatal_error(res, "perform atomic increment and test");
- return test;
+ return ethr_atomic_inc_read(incp);
#else
return ++(*incp);
#endif
@@ -969,11 +971,7 @@ ERTS_GLB_INLINE long
erts_atomic_dectest(erts_atomic_t *decp)
{
#ifdef USE_THREADS
- long test;
- int res = ethr_atomic_dectest(decp, &test);
- if (res)
- erts_thr_fatal_error(res, "perform atomic decrement and test");
- return test;
+ return ethr_atomic_dec_read(decp);
#else
return --(*decp);
#endif
@@ -983,9 +981,7 @@ ERTS_GLB_INLINE void
erts_atomic_inc(erts_atomic_t *incp)
{
#ifdef USE_THREADS
- int res = ethr_atomic_inc(incp);
- if (res)
- erts_thr_fatal_error(res, "perform atomic increment");
+ ethr_atomic_inc(incp);
#else
++(*incp);
#endif
@@ -995,9 +991,7 @@ ERTS_GLB_INLINE void
erts_atomic_dec(erts_atomic_t *decp)
{
#ifdef USE_THREADS
- int res = ethr_atomic_dec(decp);
- if (res)
- erts_thr_fatal_error(res, "perform atomic decrement");
+ ethr_atomic_dec(decp);
#else
--(*decp);
#endif
@@ -1007,11 +1001,7 @@ ERTS_GLB_INLINE long
erts_atomic_addtest(erts_atomic_t *addp, long i)
{
#ifdef USE_THREADS
- long test;
- int res = ethr_atomic_addtest(addp, i, &test);
- if (res)
- erts_thr_fatal_error(res, "perform atomic addition and test");
- return test;
+ return ethr_atomic_add_read(addp, i);
#else
return *addp += i;
#endif
@@ -1021,9 +1011,7 @@ ERTS_GLB_INLINE void
erts_atomic_add(erts_atomic_t *addp, long i)
{
#ifdef USE_THREADS
- int res = ethr_atomic_add(addp, i);
- if (res)
- erts_thr_fatal_error(res, "perform atomic addition");
+ ethr_atomic_add(addp, i);
#else
*addp += i;
#endif
@@ -1034,9 +1022,7 @@ erts_atomic_xchg(erts_atomic_t *xchgp, long new)
{
long old;
#ifdef USE_THREADS
- int res = ethr_atomic_xchg(xchgp, new, &old);
- if (res)
- erts_thr_fatal_error(res, "perform atomic exchange");
+ return ethr_atomic_xchg(xchgp, new);
#else
old = *xchgp;
*xchgp = new;
@@ -1048,11 +1034,7 @@ ERTS_GLB_INLINE long
erts_atomic_cmpxchg(erts_atomic_t *xchgp, long new, long expected)
{
#ifdef USE_THREADS
- long old;
- int res = ethr_atomic_cmpxchg(xchgp, new, expected, &old);
- if (ERTS_UNLIKELY(res != 0))
- erts_thr_fatal_error(res, "perform atomic exchange");
- return old;
+ return ethr_atomic_cmpxchg(xchgp, new, expected);
#else
long old = *xchgp;
if (old == expected)
@@ -1064,31 +1046,95 @@ erts_atomic_cmpxchg(erts_atomic_t *xchgp, long new, long expected)
ERTS_GLB_INLINE long
erts_atomic_bor(erts_atomic_t *var, long mask)
{
- long old;
#ifdef USE_THREADS
- int res = ethr_atomic_or_old(var, mask, &old);
- if (res != 0)
- erts_thr_fatal_error(res, "perform atomic bitwise or");
+ return ethr_atomic_read_bor(var, mask);
#else
+ long old;
old = *var;
*var |= mask;
-#endif
return old;
+#endif
}
ERTS_GLB_INLINE long
erts_atomic_band(erts_atomic_t *var, long mask)
{
- long old;
#ifdef USE_THREADS
- int res = ethr_atomic_and_old(var, mask, &old);
- if (res != 0)
- erts_thr_fatal_error(res, "perform atomic bitwise and");
+ return ethr_atomic_read_band(var, mask);
#else
+ long old;
old = *var;
*var &= mask;
+ return old;
+#endif
+}
+
+ERTS_GLB_INLINE long
+erts_atomic_read_acqb(erts_atomic_t *var)
+{
+#ifdef USE_THREADS
+ return ethr_atomic_read_acqb(var);
+#else
+ return *var;
+#endif
+}
+
+ERTS_GLB_INLINE void
+erts_atomic_set_relb(erts_atomic_t *var, long i)
+{
+#ifdef USE_THREADS
+ ethr_atomic_set_relb(var, i);
+#else
+ *var = i;
+#endif
+}
+
+ERTS_GLB_INLINE void
+erts_atomic_dec_relb(erts_atomic_t *decp)
+{
+#ifdef USE_THREADS
+ ethr_atomic_dec_relb(decp);
+#else
+ --(*decp);
+#endif
+}
+
+ERTS_GLB_INLINE long
+erts_atomic_dectest_relb(erts_atomic_t *decp)
+{
+#ifdef USE_THREADS
+ return ethr_atomic_dec_read_relb(decp);
+#else
+ return --(*decp);
#endif
+}
+
+ERTS_GLB_INLINE long erts_atomic_cmpxchg_acqb(erts_atomic_t *xchgp,
+ long new,
+ long exp)
+{
+#ifdef USE_THREADS
+ return ethr_atomic_cmpxchg_acqb(xchgp, new, exp);
+#else
+ long old = *xchgp;
+ if (old == exp)
+ *xchgp = new;
+ return old;
+#endif
+}
+
+ERTS_GLB_INLINE long erts_atomic_cmpxchg_relb(erts_atomic_t *xchgp,
+ long new,
+ long exp)
+{
+#ifdef USE_THREADS
+ return ethr_atomic_cmpxchg_relb(xchgp, new, exp);
+#else
+ long old = *xchgp;
+ if (old == exp)
+ *xchgp = new;
return old;
+#endif
}
/* spinlock */
@@ -1112,6 +1158,26 @@ erts_spinlock_init_x(erts_spinlock_t *lock, char *name, Eterm extra)
}
ERTS_GLB_INLINE void
+erts_spinlock_init_x_opt(erts_spinlock_t *lock, char *name, Eterm extra,
+ Uint16 opt)
+{
+#ifdef USE_THREADS
+ int res = ethr_spinlock_init(&lock->slck);
+ if (res)
+ erts_thr_fatal_error(res, "init spinlock");
+#ifdef ERTS_ENABLE_LOCK_CHECK
+ erts_lc_init_lock_x(&lock->lc, name, ERTS_LC_FLG_LT_SPINLOCK, extra);
+#endif
+#ifdef ERTS_ENABLE_LOCK_COUNT
+ erts_lcnt_init_lock_x(&lock->lcnt, name, ERTS_LCNT_LT_SPINLOCK|opt, extra);
+#endif
+#else
+ (void)lock;
+#endif
+}
+
+
+ERTS_GLB_INLINE void
erts_spinlock_init(erts_spinlock_t *lock, char *name)
{
#ifdef USE_THREADS
@@ -1152,16 +1218,13 @@ ERTS_GLB_INLINE void
erts_spin_unlock(erts_spinlock_t *lock)
{
#ifdef USE_THREADS
- int res;
#ifdef ERTS_ENABLE_LOCK_CHECK
erts_lc_unlock(&lock->lc);
#endif
#ifdef ERTS_ENABLE_LOCK_COUNT
erts_lcnt_unlock(&lock->lcnt);
#endif
- res = ethr_spin_unlock(&lock->slck);
- if (res)
- erts_thr_fatal_error(res, "release spin lock");
+ ethr_spin_unlock(&lock->slck);
#else
(void)lock;
#endif
@@ -1175,19 +1238,16 @@ erts_spin_lock(erts_spinlock_t *lock)
#endif
{
#ifdef USE_THREADS
- int res;
#ifdef ERTS_ENABLE_LOCK_CHECK
erts_lc_lock(&lock->lc);
#endif
#ifdef ERTS_ENABLE_LOCK_COUNT
erts_lcnt_lock(&lock->lcnt);
#endif
- res = ethr_spin_lock(&lock->slck);
+ ethr_spin_lock(&lock->slck);
#ifdef ERTS_ENABLE_LOCK_COUNT
erts_lcnt_lock_post_x(&lock->lcnt, file, line);
#endif
- if (res)
- erts_thr_fatal_error(res, "take spin lock");
#else
(void)lock;
#endif
@@ -1268,16 +1328,13 @@ ERTS_GLB_INLINE void
erts_read_unlock(erts_rwlock_t *lock)
{
#ifdef USE_THREADS
- int res;
#ifdef ERTS_ENABLE_LOCK_CHECK
erts_lc_unlock_flg(&lock->lc, ERTS_LC_FLG_LO_READ);
#endif
#ifdef ERTS_ENABLE_LOCK_COUNT
erts_lcnt_unlock_opt(&lock->lcnt, ERTS_LCNT_LO_READ);
#endif
- res = ethr_read_unlock(&lock->rwlck);
- if (res)
- erts_thr_fatal_error(res, "release read lock");
+ ethr_read_unlock(&lock->rwlck);
#else
(void)lock;
#endif
@@ -1291,19 +1348,16 @@ erts_read_lock(erts_rwlock_t *lock)
#endif
{
#ifdef USE_THREADS
- int res;
#ifdef ERTS_ENABLE_LOCK_CHECK
erts_lc_lock_flg(&lock->lc, ERTS_LC_FLG_LO_READ);
#endif
#ifdef ERTS_ENABLE_LOCK_COUNT
erts_lcnt_lock_opt(&lock->lcnt, ERTS_LCNT_LO_READ);
#endif
- res = ethr_read_lock(&lock->rwlck);
+ ethr_read_lock(&lock->rwlck);
#ifdef ERTS_ENABLE_LOCK_COUNT
erts_lcnt_lock_post_x(&lock->lcnt, file, line);
#endif
- if (res)
- erts_thr_fatal_error(res, "take read lock");
#else
(void)lock;
#endif
@@ -1313,16 +1367,13 @@ ERTS_GLB_INLINE void
erts_write_unlock(erts_rwlock_t *lock)
{
#ifdef USE_THREADS
- int res;
#ifdef ERTS_ENABLE_LOCK_CHECK
erts_lc_unlock_flg(&lock->lc, ERTS_LC_FLG_LO_READ_WRITE);
#endif
#ifdef ERTS_ENABLE_LOCK_COUNT
erts_lcnt_unlock_opt(&lock->lcnt, ERTS_LCNT_LO_READ_WRITE);
#endif
- res = ethr_write_unlock(&lock->rwlck);
- if (res)
- erts_thr_fatal_error(res, "release write lock");
+ ethr_write_unlock(&lock->rwlck);
#else
(void)lock;
#endif
@@ -1336,19 +1387,16 @@ erts_write_lock(erts_rwlock_t *lock)
#endif
{
#ifdef USE_THREADS
- int res;
#ifdef ERTS_ENABLE_LOCK_CHECK
erts_lc_lock_flg(&lock->lc, ERTS_LC_FLG_LO_READ_WRITE);
#endif
#ifdef ERTS_ENABLE_LOCK_COUNT
erts_lcnt_lock_opt(&lock->lcnt, ERTS_LCNT_LO_READ_WRITE);
#endif
- res = ethr_write_lock(&lock->rwlck);
+ ethr_write_lock(&lock->rwlck);
#ifdef ERTS_ENABLE_LOCK_COUNT
erts_lcnt_lock_post_x(&lock->lcnt, file, line);
#endif
- if (res)
- erts_thr_fatal_error(res, "take write lock");
#else
(void)lock;
#endif
@@ -1432,66 +1480,95 @@ erts_tsd_get(erts_tsd_key_t key)
#endif
}
-ERTS_GLB_INLINE void
-erts_gate_init(erts_gate_t *gp)
+ERTS_GLB_INLINE erts_tse_t *erts_tse_fetch(void)
{
#ifdef USE_THREADS
- int res = ethr_gate_init((ethr_gate *) gp);
- if (res != 0)
- erts_thr_fatal_error(res, "initialize gate");
+ return (erts_tse_t *) ethr_get_ts_event();
+#else
+ return (erts_tse_t *) NULL;
#endif
}
-ERTS_GLB_INLINE void
-erts_gate_destroy(erts_gate_t *gp)
+ERTS_GLB_INLINE void erts_tse_return(erts_tse_t *ep)
{
#ifdef USE_THREADS
- int res = ethr_gate_destroy((ethr_gate *) gp);
- if (res != 0)
- erts_thr_fatal_error(res, "destroy gate");
+ ethr_leave_ts_event(ep);
#endif
}
-ERTS_GLB_INLINE void
-erts_gate_close(erts_gate_t *gp)
+ERTS_GLB_INLINE void erts_tse_set(erts_tse_t *ep)
{
#ifdef USE_THREADS
- int res = ethr_gate_close((ethr_gate *) gp);
- if (res != 0)
- erts_thr_fatal_error(res, "close gate");
+ ethr_event_set(&((ethr_ts_event *) ep)->event);
#endif
}
-ERTS_GLB_INLINE void
-erts_gate_let_through(erts_gate_t *gp, unsigned no)
+ERTS_GLB_INLINE void erts_tse_reset(erts_tse_t *ep)
{
#ifdef USE_THREADS
- int res = ethr_gate_let_through((ethr_gate *) gp, no);
- if (res != 0)
- erts_thr_fatal_error(res, "let through gate");
+ ethr_event_reset(&((ethr_ts_event *) ep)->event);
#endif
}
-ERTS_GLB_INLINE void
-erts_gate_wait(erts_gate_t *gp)
+ERTS_GLB_INLINE int erts_tse_wait(erts_tse_t *ep)
+{
+#ifdef USE_THREADS
+ return ethr_event_wait(&((ethr_ts_event *) ep)->event);
+#else
+ return ENOTSUP;
+#endif
+}
+
+ERTS_GLB_INLINE int erts_tse_swait(erts_tse_t *ep, int spincount)
+{
+#ifdef USE_THREADS
+ return ethr_event_swait(&((ethr_ts_event *) ep)->event, spincount);
+#else
+ return ENOTSUP;
+#endif
+}
+
+ERTS_GLB_INLINE int erts_tse_is_tmp(erts_tse_t *ep)
{
#ifdef USE_THREADS
- int res = ethr_gate_wait((ethr_gate *) gp);
+ return (ep->iflgs & ETHR_TS_EV_TMP) == ETHR_TS_EV_TMP;
+#else
+ return 0;
+#endif
+}
+
+ERTS_GLB_INLINE void erts_thr_set_main_status(int on, int no)
+{
+#ifdef USE_THREADS
+ int res = ethr_set_main_thr_status(on, no);
if (res != 0)
- erts_thr_fatal_error(res, "wait on gate");
+ erts_thr_fatal_error(res, "set thread main status");
#endif
}
-ERTS_GLB_INLINE void
-erts_gate_swait(erts_gate_t *gp, int spincount)
+ERTS_GLB_INLINE int erts_thr_get_main_status(void)
{
#ifdef USE_THREADS
- int res = ethr_gate_swait((ethr_gate *) gp, spincount);
+ int main_status;
+ int res = ethr_get_main_thr_status(&main_status);
if (res != 0)
- erts_thr_fatal_error(res, "swait on gate");
+ erts_thr_fatal_error(res, "get thread main status");
+ return main_status;
+#else
+ return 1;
#endif
}
+ERTS_GLB_INLINE void erts_thr_yield(void)
+{
+#ifdef USE_THREADS
+ int res = ETHR_YIELD();
+ if (res != 0)
+ erts_thr_fatal_error(res, "yield");
+#endif
+}
+
+
#ifdef ETHR_HAVE_ETHR_SIG_FUNCS
ERTS_GLB_INLINE void
diff --git a/erts/emulator/beam/export.c b/erts/emulator/beam/export.c
index 66b05c0e9d..5e81a2d624 100644
--- a/erts/emulator/beam/export.c
+++ b/erts/emulator/beam/export.c
@@ -43,8 +43,6 @@ static erts_smp_rwmtx_t export_table_lock; /* Locks the secondary export table.
#define export_read_unlock() erts_smp_rwmtx_runlock(&export_table_lock)
#define export_write_lock() erts_smp_rwmtx_rwlock(&export_table_lock)
#define export_write_unlock() erts_smp_rwmtx_rwunlock(&export_table_lock)
-#define export_init_lock() erts_smp_rwmtx_init(&export_table_lock, \
- "export_tab")
extern BeamInstr* em_call_error_handler;
extern BeamInstr* em_call_traced_function;
@@ -111,8 +109,12 @@ void
init_export_table(void)
{
HashFunctions f;
+ erts_smp_rwmtx_opt_t rwmtx_opt = ERTS_SMP_THR_OPTS_DEFAULT_INITER;
+ rwmtx_opt.type = ERTS_SMP_RWMTX_TYPE_FREQUENT_READ;
+ rwmtx_opt.lived = ERTS_SMP_RWMTX_LONG_LIVED;
+
+ erts_smp_rwmtx_init_opt(&export_table_lock, &rwmtx_opt, "export_tab");
- export_init_lock();
f.hash = (H_FUN) export_hash;
f.cmp = (HCMP_FUN) export_cmp;
f.alloc = (HALLOC_FUN) export_alloc;
diff --git a/erts/emulator/beam/external.c b/erts/emulator/beam/external.c
index f41b61d73d..d7c8aa84e9 100644
--- a/erts/emulator/beam/external.c
+++ b/erts/emulator/beam/external.c
@@ -65,11 +65,9 @@
# endif
#endif
-/*
- * For backward compatibility reasons, only encode integers that
- * fit in 28 bits (signed) using INTEGER_EXT.
+/* Does Sint fit in Sint32?
*/
-#define IS_SSMALL28(x) (((Uint) (((x) >> (28-1)) + 1)) < 2)
+#define IS_SSMALL32(x) (((Uint) (((x) >> (32-1)) + 1)) < 2)
/*
* Valid creations for nodes are 1, 2, or 3. 0 can also be sent
@@ -504,7 +502,7 @@ erts_make_dist_ext_copy(ErtsDistExternal *edep, Uint xsize)
ASSERT(edep->ext_endp >= edep->extp);
ext_sz = edep->ext_endp - edep->extp;
- align_sz = ERTS_WORD_ALIGN_PAD_SZ(dist_ext_sz + ext_sz);
+ align_sz = ERTS_EXTRA_DATA_ALIGN_SZ(dist_ext_sz + ext_sz);
new_edep = erts_alloc(ERTS_ALC_T_EXT_TERM_DATA,
dist_ext_sz + ext_sz + align_sz + xsize);
@@ -1470,11 +1468,11 @@ dec_pid(ErtsDistExternal *edep, Eterm** hpp, byte* ep, ErlOffHeap* off_heap, Ete
*hpp += EXTERNAL_THING_HEAD_SIZE + 1;
etp->header = make_external_pid_header(1);
- etp->next = off_heap->externals;
+ etp->next = off_heap->first;
etp->node = node;
etp->data.ui[0] = data;
- off_heap->externals = etp;
+ off_heap->first = (struct erl_off_heap_header*) etp;
*objp = make_external_pid(etp);
}
return ep;
@@ -1571,13 +1569,15 @@ enc_term(ErtsAtomCacheMap *acmp, Eterm obj, byte* ep, Uint32 dflags)
case SMALL_DEF:
{
+ /* From R14B we no longer restrict INTEGER_EXT to 28 bits,
+ * as done earlier for backward compatibility reasons. */
Sint val = signed_val(obj);
if ((Uint)val < 256) {
*ep++ = SMALL_INTEGER_EXT;
put_int8(val, ep);
ep++;
- } else if (sizeof(Sint) == 4 || IS_SSMALL28(val)) {
+ } else if (sizeof(Sint) == 4 || IS_SSMALL32(val)) {
*ep++ = INTEGER_EXT;
put_int32(val, ep);
ep += 4;
@@ -1599,18 +1599,32 @@ enc_term(ErtsAtomCacheMap *acmp, Eterm obj, byte* ep, Uint32 dflags)
break;
case BIG_DEF:
- if ((n = big_bytes(obj)) < 256) {
- *ep++ = SMALL_BIG_EXT;
- put_int8(n, ep);
- ep += 1;
- }
- else {
- *ep++ = LARGE_BIG_EXT;
- put_int32(n, ep);
- ep += 4;
+ {
+ int sign = big_sign(obj);
+ n = big_bytes(obj);
+ if (sizeof(Sint)==4 && n<=4) {
+ Uint dig = big_digit(obj,0);
+ Sint val = sign ? -dig : dig;
+ if ((val<0) == sign) {
+ *ep++ = INTEGER_EXT;
+ put_int32(val, ep);
+ ep += 4;
+ break;
+ }
+ }
+ if (n < 256) {
+ *ep++ = SMALL_BIG_EXT;
+ put_int8(n, ep);
+ ep += 1;
+ }
+ else {
+ *ep++ = LARGE_BIG_EXT;
+ put_int32(n, ep);
+ ep += 4;
+ }
+ *ep++ = sign;
+ ep = big_to_bytes(obj, ep);
}
- *ep++ = big_sign(obj);
- ep = big_to_bytes(obj, ep);
break;
case PID_DEF:
@@ -1896,69 +1910,30 @@ is_external_string(Eterm list, int* p_is_string)
return len;
}
-/* Assumes that the ones to undo are preluding the lists. */
+/* Assumes that the ones to undo are preluding the list. */
static void
undo_offheap_in_area(ErlOffHeap* off_heap, Eterm* start, Eterm* end)
{
const Uint area_sz = (end - start) * sizeof(Eterm);
- struct proc_bin* mso;
- struct proc_bin** mso_nextp = NULL;
-#ifndef HYBRID /* FIND ME! */
- struct erl_fun_thing* funs;
- struct erl_fun_thing** funs_nextp = NULL;
-#endif
- struct external_thing_* ext;
- struct external_thing_** ext_nextp = NULL;
-
- for (mso = off_heap->mso; ; mso=mso->next) {
- if (!in_area(mso, start, area_sz)) {
- if (mso_nextp != NULL) {
- *mso_nextp = NULL;
- erts_cleanup_mso(off_heap->mso);
- off_heap->mso = mso;
+ struct erl_off_heap_header* hdr;
+ struct erl_off_heap_header** hdr_nextp = NULL;
+
+ for (hdr = off_heap->first; ; hdr=hdr->next) {
+ if (!in_area(hdr, start, area_sz)) {
+ if (hdr_nextp != NULL) {
+ *hdr_nextp = NULL;
+ erts_cleanup_offheap(off_heap);
+ off_heap->first = hdr;
}
break;
}
- mso_nextp = &mso->next;
+ hdr_nextp = &hdr->next;
}
-#ifndef HYBRID /* FIND ME! */
- for (funs = off_heap->funs; ; funs=funs->next) {
- if (!in_area(funs, start, area_sz)) {
- if (funs_nextp != NULL) {
- *funs_nextp = NULL;
- erts_cleanup_funs(off_heap->funs);
- off_heap->funs = funs;
- }
- break;
- }
- funs_nextp = &funs->next;
- }
-#endif
- for (ext = off_heap->externals; ; ext=ext->next) {
- if (!in_area(ext, start, area_sz)) {
- if (ext_nextp != NULL) {
- *ext_nextp = NULL;
- erts_cleanup_externals(off_heap->externals);
- off_heap->externals = ext;
- }
- break;
- }
- ext_nextp = &ext->next;
- }
-
- /* Assert that the ones to undo were indeed preluding the lists. */
+ /* Assert that the ones to undo were indeed preluding the list. */
#ifdef DEBUG
- for (mso = off_heap->mso; mso != NULL; mso=mso->next) {
- ASSERT(!in_area(mso, start, area_sz));
- }
-# ifndef HYBRID /* FIND ME! */
- for (funs = off_heap->funs; funs != NULL; funs=funs->next) {
- ASSERT(!in_area(funs, start, area_sz));
- }
-# endif
- for (ext = off_heap->externals; ext != NULL; ext=ext->next) {
- ASSERT(!in_area(ext, start, area_sz));
+ for (hdr = off_heap->first; hdr != NULL; hdr = hdr->next) {
+ ASSERT(!in_area(hdr, start, area_sz));
}
#endif /* DEBUG */
}
@@ -2202,11 +2177,11 @@ dec_term_atom_common:
hp += EXTERNAL_THING_HEAD_SIZE + 1;
etp->header = make_external_port_header(1);
- etp->next = off_heap->externals;
+ etp->next = off_heap->first;
etp->node = node;
etp->data.ui[0] = num;
- off_heap->externals = etp;
+ off_heap->first = (struct erl_off_heap_header*)etp;
*objp = make_external_port(etp);
}
@@ -2284,10 +2259,10 @@ dec_term_atom_common:
#else
etp->header = make_external_ref_header(ref_words);
#endif
- etp->next = off_heap->externals;
+ etp->next = off_heap->first;
etp->node = node;
- off_heap->externals = etp;
+ off_heap->first = (struct erl_off_heap_header*)etp;
*objp = make_external_ref(etp);
ref_num = &(etp->data.ui32[0]);
}
@@ -2330,8 +2305,8 @@ dec_term_atom_common:
hp += PROC_BIN_SIZE;
pb->thing_word = HEADER_PROC_BIN;
pb->size = n;
- pb->next = off_heap->mso;
- off_heap->mso = pb;
+ pb->next = off_heap->first;
+ off_heap->first = (struct erl_off_heap_header*)pb;
pb->val = dbin;
pb->bytes = (byte*) dbin->orig_bytes;
pb->flags = 0;
@@ -2367,8 +2342,8 @@ dec_term_atom_common:
pb = (ProcBin *) hp;
pb->thing_word = HEADER_PROC_BIN;
pb->size = n;
- pb->next = off_heap->mso;
- off_heap->mso = pb;
+ pb->next = off_heap->first;
+ off_heap->first = (struct erl_off_heap_header*)pb;
pb->val = dbin;
pb->bytes = (byte*) dbin->orig_bytes;
pb->flags = 0;
@@ -2488,8 +2463,8 @@ dec_term_atom_common:
* It is safe to link the fun into the fun list only when
* no more validity tests can fail.
*/
- funp->next = off_heap->funs;
- off_heap->funs = funp;
+ funp->next = off_heap->first;
+ off_heap->first = (struct erl_off_heap_header*)funp;
#endif
funp->fe = erts_put_fun_entry2(module, old_uniq, old_index,
@@ -2566,8 +2541,8 @@ dec_term_atom_common:
* It is safe to link the fun into the fun list only when
* no more validity tests can fail.
*/
- funp->next = off_heap->funs;
- off_heap->funs = funp;
+ funp->next = off_heap->first;
+ off_heap->first = (struct erl_off_heap_header*)funp;
#endif
old_uniq = unsigned_val(temp);
@@ -2687,7 +2662,7 @@ encode_size_struct2(ErtsAtomCacheMap *acmp, Eterm obj, unsigned dflags)
if ((Uint)val < 256)
result += 1 + 1; /* SMALL_INTEGER_EXT */
- else if (sizeof(Sint) == 4 || IS_SSMALL28(val))
+ else if (sizeof(Sint) == 4 || IS_SSMALL32(val))
result += 1 + 4; /* INTEGER_EXT */
else {
DeclareTmpHeapNoproc(tmp_big,2);
@@ -2699,7 +2674,10 @@ encode_size_struct2(ErtsAtomCacheMap *acmp, Eterm obj, unsigned dflags)
}
break;
case BIG_DEF:
- if ((i = big_bytes(obj)) < 256)
+ i = big_bytes(obj);
+ if (sizeof(Sint)==4 && i <= 4 && (big_digit(obj,0)-big_sign(obj)) < (1<<31))
+ result += 1 + 4; /* INTEGER_EXT */
+ else if (i < 256)
result += 1 + 1 + 1 + i; /* tag,size,sign,digits */
else
result += 1 + 4 + 1 + i; /* tag,size,sign,digits */
diff --git a/erts/emulator/beam/external.h b/erts/emulator/beam/external.h
index db86b4d796..cee48bbeb0 100644
--- a/erts/emulator/beam/external.h
+++ b/erts/emulator/beam/external.h
@@ -211,7 +211,7 @@ ERTS_GLB_INLINE void *
erts_dist_ext_trailer(ErtsDistExternal *edep)
{
void *res = (void *) (edep->ext_endp
- + ERTS_WORD_ALIGN_PAD_SZ(edep->ext_endp));
+ + ERTS_EXTRA_DATA_ALIGN_SZ(edep->ext_endp));
ASSERT((((UWord) res) % sizeof(Uint)) == 0);
return res;
}
diff --git a/erts/emulator/beam/global.h b/erts/emulator/beam/global.h
index b4a7a22082..280421952e 100644
--- a/erts/emulator/beam/global.h
+++ b/erts/emulator/beam/global.h
@@ -464,7 +464,10 @@ typedef union {
typedef struct proc_bin {
Eterm thing_word; /* Subtag REFC_BINARY_SUBTAG. */
Uint size; /* Binary size in bytes. */
- struct proc_bin *next; /* Pointer to next ProcBin. */
+#if HALFWORD_HEAP
+ void* dummy_ptr_padding__;
+#endif
+ struct erl_off_heap_header *next;
Binary *val; /* Pointer to Binary structure. */
byte *bytes; /* Pointer to the actual data bytes. */
Uint flags; /* Flag word. */
@@ -494,8 +497,8 @@ erts_mk_magic_binary_term(Eterm **hpp, ErlOffHeap *ohp, Binary *mbp)
pb->thing_word = HEADER_PROC_BIN;
pb->size = 0;
- pb->next = ohp->mso;
- ohp->mso = pb;
+ pb->next = ohp->first;
+ ohp->first = (struct erl_off_heap_header*) pb;
pb->val = mbp;
pb->bytes = (byte *) mbp->orig_bytes;
pb->flags = 0;
@@ -512,6 +515,15 @@ erts_mk_magic_binary_term(Eterm **hpp, ErlOffHeap *ohp, Binary *mbp)
&& (thing_subtag(*binary_val((T))) == REFC_BINARY_SUBTAG) \
&& (((ProcBin *) binary_val((T)))->val->flags & BIN_FLAG_MAGIC))
+
+union erl_off_heap_ptr {
+ struct erl_off_heap_header* hdr;
+ ProcBin *pb;
+ struct erl_fun_thing* fun;
+ struct external_thing_* ext;
+ Eterm* ep;
+};
+
/* arrays that get malloced at startup */
extern Port* erts_port;
extern erts_smp_atomic_t erts_ports_alive;
@@ -823,7 +835,6 @@ Eterm erts_new_heap_binary(Process *p, byte *buf, int len, byte** datap);
Eterm erts_new_mso_binary(Process*, byte*, int);
Eterm new_binary(Process*, byte*, int);
Eterm erts_realloc_binary(Eterm bin, size_t size);
-void erts_cleanup_mso(ProcBin* pb);
/* erl_bif_info.c */
@@ -1015,7 +1026,7 @@ void print_pass_through(int, byte*, int);
/* beam_emu.c */
int catchlevel(Process*);
-void init_emulator(_VOID_);
+void init_emulator(void);
void process_main(void);
Eterm build_stacktrace(Process* c_p, Eterm exc);
Eterm expand_error_value(Process* c_p, Uint freason, Eterm Value);
@@ -1499,7 +1510,6 @@ void p_slpq(_VOID_);
void erts_silence_warn_unused_result(long unused);
void erts_cleanup_offheap(ErlOffHeap *offheap);
-void erts_cleanup_externals(ExternalThing *);
Uint erts_fit_in_bits(Uint);
int list_length(Eterm);
@@ -1532,7 +1542,7 @@ erts_bld_atom_2uint_3tup_list(Uint **hpp, Uint *szp, Sint length,
Eterm atoms[], Uint uints1[], Uint uints2[]);
Eterm store_external_or_ref_in_proc_(Process *, Eterm);
-Eterm store_external_or_ref_(Uint **, ExternalThing **, Eterm);
+Eterm store_external_or_ref_(Uint **, ErlOffHeap*, Eterm);
#define NC_HEAP_SIZE(NC) \
(ASSERT_EXPR(is_node_container((NC))), \
@@ -1676,7 +1686,7 @@ int io_list_to_buf(Eterm, char*, int);
int io_list_to_buf2(Eterm, char*, int);
int io_list_len(Eterm);
int is_string(Eterm);
-void erl_at_exit(FUNCTION(void,(*),(void*)), void*);
+void erl_at_exit(void (*) (void*), void*);
Eterm collect_memory(Process *);
void dump_memory_to_fd(int);
int dump_memory_data(const char *);
@@ -1957,5 +1967,5 @@ erts_alloc_message_heap(Uint size,
# define UnUseTmpHeap(Size,Proc) /* Nothing */
# define UseTmpHeapNoproc(Size) /* Nothing */
# define UnUseTmpHeapNoproc(Size) /* Nothing */
-#endif
-#endif
+#endif /* HEAP_ON_C_STACK */
+#endif /* !__GLOBAL_H__ */
diff --git a/erts/emulator/beam/io.c b/erts/emulator/beam/io.c
index 68625801cf..b840f65cdd 100644
--- a/erts/emulator/beam/io.c
+++ b/erts/emulator/beam/io.c
@@ -1575,8 +1575,8 @@ static void deliver_read_message(Port* prt, Eterm to,
pb = (ProcBin *) hp;
pb->thing_word = HEADER_PROC_BIN;
pb->size = len;
- pb->next = ohp->mso;
- ohp->mso = pb;
+ pb->next = ohp->first;
+ ohp->first = (struct erl_off_heap_header*)pb;
pb->val = bptr;
pb->bytes = (byte*) bptr->orig_bytes;
pb->flags = 0;
@@ -1725,8 +1725,8 @@ deliver_vec_message(Port* prt, /* Port */
}
pb->thing_word = HEADER_PROC_BIN;
pb->size = iov->iov_len;
- pb->next = ohp->mso;
- ohp->mso = pb;
+ pb->next = ohp->first;
+ ohp->first = (struct erl_off_heap_header*)pb;
pb->val = ErlDrvBinary2Binary(b);
pb->bytes = base;
pb->flags = 0;
@@ -2259,8 +2259,8 @@ erts_port_control(Process* p, Port* prt, Uint command, Eterm iolist)
ProcBin* pb = (ProcBin *) HAlloc(p, PROC_BIN_SIZE);
pb->thing_word = HEADER_PROC_BIN;
pb->size = dbin->orig_size;
- pb->next = MSO(p).mso;
- MSO(p).mso = pb;
+ pb->next = MSO(p).first;
+ MSO(p).first = (struct erl_off_heap_header*)pb;
pb->val = ErlDrvBinary2Binary(dbin);
pb->bytes = (byte*) dbin->orig_bytes;
pb->flags = 0;
@@ -3033,8 +3033,8 @@ driver_deliver_term(ErlDrvPort port,
driver_binary_inc_refc(b); /* caller will free binary */
pb->thing_word = HEADER_PROC_BIN;
pb->size = size;
- pb->next = ohp->mso;
- ohp->mso = pb;
+ pb->next = ohp->first;
+ ohp->first = (struct erl_off_heap_header*)pb;
pb->val = ErlDrvBinary2Binary(b);
pb->bytes = ((byte*) b->orig_bytes) + offset;
pb->flags = 0;
@@ -3072,8 +3072,8 @@ driver_deliver_term(ErlDrvPort port,
hp += PROC_BIN_SIZE;
pbp->thing_word = HEADER_PROC_BIN;
pbp->size = size;
- pbp->next = ohp->mso;
- ohp->mso = pbp;
+ pbp->next = ohp->first;
+ ohp->first = (struct erl_off_heap_header*)pbp;
pbp->val = bp;
pbp->bytes = (byte*) bp->orig_bytes;
pbp->flags = 0;
diff --git a/erts/emulator/beam/packet_parser.c b/erts/emulator/beam/packet_parser.c
index 8c8029d450..5bcd567b5f 100644
--- a/erts/emulator/beam/packet_parser.c
+++ b/erts/emulator/beam/packet_parser.c
@@ -679,7 +679,7 @@ int packet_parse_http(const char* buf, int len, int* statep,
while (n && SP(ptr)) {
ptr++; n--;
}
- if (ptr==p0) return -1;
+ if (ptr==p0 && n>0) return -1;
/* NOTE: the syntax allows empty reason phrases */
(*statep) = !0;
diff --git a/erts/emulator/beam/register.c b/erts/emulator/beam/register.c
index 900ebcbbf7..c9bb7bbe91 100644
--- a/erts/emulator/beam/register.c
+++ b/erts/emulator/beam/register.c
@@ -39,8 +39,6 @@ static Hash process_reg;
static erts_smp_rwmtx_t regtab_rwmtx;
-#define reg_lock_init() erts_smp_rwmtx_init(&regtab_rwmtx, \
- "reg_tab")
#define reg_try_read_lock() erts_smp_rwmtx_tryrlock(&regtab_rwmtx)
#define reg_try_write_lock() erts_smp_rwmtx_tryrwlock(&regtab_rwmtx)
#define reg_read_lock() erts_smp_rwmtx_rlock(&regtab_rwmtx)
@@ -147,8 +145,11 @@ static void reg_free(RegProc *obj)
void init_register_table(void)
{
HashFunctions f;
+ erts_smp_rwmtx_opt_t rwmtx_opt = ERTS_SMP_THR_OPTS_DEFAULT_INITER;
+ rwmtx_opt.type = ERTS_SMP_RWMTX_TYPE_FREQUENT_READ;
+ rwmtx_opt.lived = ERTS_SMP_RWMTX_LONG_LIVED;
- reg_lock_init();
+ erts_smp_rwmtx_init_opt(&regtab_rwmtx, &rwmtx_opt, "reg_tab");
f.hash = (H_FUN) reg_hash;
f.cmp = (HCMP_FUN) reg_cmp;
diff --git a/erts/emulator/beam/sys.h b/erts/emulator/beam/sys.h
index ca87d3d70f..0031568af6 100644
--- a/erts/emulator/beam/sys.h
+++ b/erts/emulator/beam/sys.h
@@ -25,14 +25,6 @@
# define NO_FPE_SIGNALS
#endif
-/* Never use elib-malloc when purify-memory-tracing */
-#if defined(PURIFY)
-#undef ENABLE_ELIB_MALLOC
-#undef ELIB_HEAP_SBRK
-#undef ELIB_ALLOC_IS_CLIB
-#endif
-
-
/* xxxP __VXWORKS__ */
#ifdef VXWORKS
#include <vxWorks.h>
@@ -171,23 +163,6 @@ void erl_assert_error(char* expr, char* file, int line);
#include <stdarg.h>
-#if defined(__STDC__) || defined(_MSC_VER)
-# define EXTERN_FUNCTION(t, f, x) extern t f x
-# define FUNCTION(t, f, x) t f x
-# define _DOTS_ ...
-# define _VOID_ void
-#elif defined(__cplusplus)
-# define EXTERN_FUNCTION(f, x) extern "C" { f x }
-# define FUNCTION(t, f, x) t f x
-# define _DOTS_ ...
-# define _VOID_ void
-#else
-# define EXTERN_FUNCTION(t, f, x) extern t f (/*x*/)
-# define FUNCTION(t, f, x) t f (/*x*/)
-# define _DOTS_
-# define _VOID_
-#endif
-
/* This isn't sys-dependent, but putting it here benefits sys.c and drivers
- allow use of 'const' regardless of compiler */
@@ -197,7 +172,7 @@ void erl_assert_error(char* expr, char* file, int line);
#ifdef VXWORKS
/* Replace VxWorks' printf with a real one that does fprintf(stdout, ...) */
-EXTERN_FUNCTION(int, real_printf, (const char *fmt, ...));
+int real_printf(const char *fmt, ...);
# define printf real_printf
#endif
@@ -359,15 +334,8 @@ typedef unsigned char byte;
#error 64-bit architecture, but no appropriate type to use for Uint64 and Sint64 found
#endif
-#if defined(ARCH_64)
-# define ERTS_WORD_ALIGN_PAD_SZ(X) \
+# define ERTS_EXTRA_DATA_ALIGN_SZ(X) \
(((size_t) 8) - (((size_t) (X)) & ((size_t) 7)))
-#elif defined(ARCH_32)
-# define ERTS_WORD_ALIGN_PAD_SZ(X) \
- (((size_t) 4) - (((size_t) (X)) & ((size_t) 3)))
-#else
-#error "Not supported..."
-#endif
#include "erl_lock_check.h"
#include "erl_smp.h"
@@ -667,12 +635,12 @@ extern void erl_sys_args(int *argc, char **argv);
extern void erl_sys_schedule(int);
void sys_tty_reset(int);
-EXTERN_FUNCTION(int, sys_max_files, (_VOID_));
+int sys_max_files(void);
void sys_init_io(void);
Preload* sys_preloaded(void);
-EXTERN_FUNCTION(unsigned char*, sys_preload_begin, (Preload*));
-EXTERN_FUNCTION(void, sys_preload_end, (Preload*));
-EXTERN_FUNCTION(int, sys_get_key, (int));
+unsigned char* sys_preload_begin(Preload*);
+void sys_preload_end(Preload*);
+int sys_get_key(int);
void elapsed_time_both(unsigned long *ms_user, unsigned long *ms_sys,
unsigned long *ms_user_diff, unsigned long *ms_sys_diff);
void wall_clock_elapsed_time_both(unsigned long *ms_total,
@@ -689,7 +657,7 @@ int local_to_univ(Sint *year, Sint *month, Sint *day,
Sint *hour, Sint *minute, Sint *second, int isdst);
void get_now(Uint*, Uint*, Uint*);
void get_sys_now(Uint*, Uint*, Uint*);
-EXTERN_FUNCTION(void, set_break_quit, (void (*)(void), void (*)(void)));
+void set_break_quit(void (*)(void), void (*)(void));
void os_flavor(char*, unsigned);
void os_version(int*, int*, int*);
@@ -729,7 +697,7 @@ int erts_write_env(char *key, char *value);
#define ERTS_DEFAULT_MMAP_THRESHOLD (128 * 1024)
#define ERTS_DEFAULT_MMAP_MAX 64
-EXTERN_FUNCTION(int, sys_alloc_opt, (int, int));
+int sys_alloc_opt(int, int);
typedef struct {
Sint trim_threshold;
@@ -738,7 +706,7 @@ typedef struct {
Sint mmap_max;
} SysAllocStat;
-EXTERN_FUNCTION(void, sys_alloc_stat, (SysAllocStat *));
+void sys_alloc_stat(SysAllocStat *);
/* Block the whole system... */
@@ -1127,13 +1095,10 @@ erts_refc_read(erts_refc_t *refcp, long min_val)
extern int erts_use_kernel_poll;
#endif
-void elib_ensure_initialized(void);
-
-
#if defined(VXWORKS)
/* NOTE! sys_calloc2 does not exist on other
platforms than VxWorks and OSE */
-EXTERN_FUNCTION(void*, sys_calloc2, (Uint, Uint));
+void* sys_calloc2(Uint, Uint);
#endif /* VXWORKS || OSE */
@@ -1225,8 +1190,8 @@ EXTERN_FUNCTION(void*, sys_calloc2, (Uint, Uint));
*/
#ifdef DEBUG
-EXTERN_FUNCTION(void, erl_debug, (char* format, ...));
-EXTERN_FUNCTION(void, erl_bin_write, (unsigned char *, int, int));
+void erl_debug(char* format, ...);
+void erl_bin_write(unsigned char *, int, int);
# define DEBUGF(x) erl_debug x
#else
diff --git a/erts/emulator/beam/utils.c b/erts/emulator/beam/utils.c
index da6f9ed12f..ab5e8b5d4a 100644
--- a/erts/emulator/beam/utils.c
+++ b/erts/emulator/beam/utils.c
@@ -48,11 +48,11 @@
#undef M_MMAP_THRESHOLD
#undef M_MMAP_MAX
-#if !defined(ELIB_ALLOC_IS_CLIB) && defined(__GLIBC__) && defined(HAVE_MALLOC_H)
+#if defined(__GLIBC__) && defined(HAVE_MALLOC_H)
#include <malloc.h>
#endif
-#if defined(ELIB_ALLOC_IS_CLIB) || !defined(HAVE_MALLOPT)
+#if !defined(HAVE_MALLOPT)
#undef HAVE_MALLOPT
#define HAVE_MALLOPT 0
#endif
@@ -162,13 +162,8 @@ erts_heap_alloc(Process* p, Uint need)
bp->alloc_size = n;
bp->used_size = n;
MBUF_SIZE(p) += n;
- bp->off_heap.mso = NULL;
-#ifndef HYBRID /* FIND ME! */
- bp->off_heap.funs = NULL;
-#endif
- bp->off_heap.externals = NULL;
+ bp->off_heap.first = NULL;
bp->off_heap.overhead = 0;
-
return bp->mem;
}
@@ -409,7 +404,7 @@ erts_bld_uint64(Uint **hpp, Uint *szp, Uint64 ui64)
}
else {
if (szp)
- *szp = ERTS_UINT64_HEAP_SIZE(ui64);
+ *szp += ERTS_UINT64_HEAP_SIZE(ui64);
if (hpp)
res = erts_uint64_to_big(ui64, hpp);
}
@@ -426,7 +421,7 @@ erts_bld_sint64(Uint **hpp, Uint *szp, Sint64 si64)
}
else {
if (szp)
- *szp = ERTS_SINT64_HEAP_SIZE(si64);
+ *szp += ERTS_SINT64_HEAP_SIZE(si64);
if (hpp)
res = erts_sint64_to_big(si64, hpp);
}
@@ -2729,21 +2724,8 @@ not_equal:
}
-void
-erts_cleanup_externals(ExternalThing *etp)
-{
- ExternalThing *tetp;
-
- tetp = etp;
-
- while(tetp) {
- erts_deref_node_entry(tetp->node);
- tetp = tetp->next;
- }
-}
-
Eterm
-store_external_or_ref_(Uint **hpp, ExternalThing **etpp, Eterm ns)
+store_external_or_ref_(Uint **hpp, ErlOffHeap* oh, Eterm ns)
{
Uint i;
Uint size;
@@ -2762,8 +2744,8 @@ store_external_or_ref_(Uint **hpp, ExternalThing **etpp, Eterm ns)
erts_refc_inc(&((ExternalThing *) to_hp)->node->refc, 2);
- ((ExternalThing *) to_hp)->next = *etpp;
- *etpp = (ExternalThing *) to_hp;
+ ((struct erl_off_heap_header*) to_hp)->next = oh->first;
+ oh->first = (struct erl_off_heap_header*) to_hp;
return make_external(to_hp);
}
@@ -2792,7 +2774,7 @@ store_external_or_ref_in_proc_(Process *proc, Eterm ns)
sz = NC_HEAP_SIZE(ns);
ASSERT(sz > 0);
hp = HAlloc(proc, sz);
- return store_external_or_ref_(&hp, &MSO(proc).externals, ns);
+ return store_external_or_ref_(&hp, &MSO(proc), ns);
}
void bin_write(int to, void *to_arg, byte* buf, int sz)
diff --git a/erts/emulator/drivers/common/efile_drv.c b/erts/emulator/drivers/common/efile_drv.c
index 60ae4cb108..c450f10f48 100644
--- a/erts/emulator/drivers/common/efile_drv.c
+++ b/erts/emulator/drivers/common/efile_drv.c
@@ -104,7 +104,7 @@
#include <ctype.h>
#include <sys/types.h>
-extern void erl_exit(int n, char *fmt, _DOTS_);
+void erl_exit(int n, char *fmt, ...);
static ErlDrvSysInfo sys_info;
diff --git a/erts/emulator/drivers/common/inet_drv.c b/erts/emulator/drivers/common/inet_drv.c
index 87691fc1bc..059288d1cb 100644
--- a/erts/emulator/drivers/common/inet_drv.c
+++ b/erts/emulator/drivers/common/inet_drv.c
@@ -1045,7 +1045,7 @@ struct erl_drv_entry inet_driver_entry =
};
/* XXX: is this a driver interface function ??? */
-extern void erl_exit(int n, char*, _DOTS_);
+void erl_exit(int n, char*, ...);
/*
* Malloc wrapper,
@@ -9333,11 +9333,13 @@ static int packet_inet_input(udp_descriptor* udesc, HANDLE event)
if (err != ERRNO_BLOCK) {
if (!desc->active) {
#ifdef HAVE_SCTP
- if (short_recv)
+ if (short_recv) {
async_error_am(desc, am_short_recv);
- else
-#else
+ } else {
async_error(desc, err);
+ }
+#else
+ async_error(desc, err);
#endif
driver_cancel_timer(desc->port);
sock_select(desc,FD_READ,0);
diff --git a/erts/emulator/drivers/unix/mem_drv.c b/erts/emulator/drivers/unix/mem_drv.c
deleted file mode 100644
index 1417ca1121..0000000000
--- a/erts/emulator/drivers/unix/mem_drv.c
+++ /dev/null
@@ -1,145 +0,0 @@
-/*
- * %CopyrightBegin%
- *
- * Copyright Ericsson AB 1996-2009. All Rights Reserved.
- *
- * The contents of this file are subject to the Erlang Public License,
- * Version 1.1, (the "License"); you may not use this file except in
- * compliance with the License. You should have received a copy of the
- * Erlang Public License along with this software. If not, it can be
- * retrieved online at http://www.erlang.org/.
- *
- * Software distributed under the License is distributed on an "AS IS"
- * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
- * the License for the specific language governing rights and limitations
- * under the License.
- *
- * %CopyrightEnd%
- */
-
-/* Purpose: Access to elib memory statistics */
-
-#ifdef HAVE_CONFIG_H
-# include "config.h"
-#endif
-
-#include "sys.h"
-#include "erl_driver.h"
-#include "elib_stat.h"
-
-#define MAP_BUF_SIZE 1000 /* Max map size */
-#define HISTO_BUF_SIZE 100 /* Max histogram buckets */
-
-static ErlDrvData mem_start(ErlDrvPort);
-static int mem_init(void);
-static void mem_stop(ErlDrvData);
-static void mem_command(ErlDrvData, char*, int);
-
-const struct driver_entry mem_driver_entry = {
- mem_init,
- mem_start,
- mem_stop,
- mem_command,
- NULL,
- NULL,
- "mem_drv"
-};
-
-static int mem_init(void)
-{
- return 0;
-}
-
-static ErlDrvData mem_start(ErlDrvPort port, char* buf)
-{
- return (ErlDrvData)port;
-}
-
-static void mem_stop(ErlDrvData port)
-{
-}
-
-void putint32(p, v)
-byte* p; int v;
-{
- p[0] = (v >> 24) & 0xff;
- p[1] = (v >> 16) & 0xff;
- p[2] = (v >> 8) & 0xff;
- p[3] = (v) & 0xff;
-}
-
-int getint16(p)
-byte* p;
-{
- return (p[0] << 8) | p[1];
-}
-
-/*
-** Command:
-** m L1 L0 -> a heap map of length L1*256 + L0 is returned
-** s -> X3 X2 X1 X0 Y3 Y2 Y1 Y0 Z3 Z2 Z1 Z0
-** X == Total heap size bytes
-** Y == Total free bytes
-** Z == Size of largest free block in bytes
-**
-** h L1 L0 B0 -> Generate a logarithm historgram base B with L buckets
-** l L1 L0 S0 -> Generate a linear histogram with step S with L buckets
-*/
-unsigned char outbuf[HISTO_BUF_SIZE*2*4];
-
-static void mem_command(ErlDrvData port, char* buf, int count)
-{
- if ((count == 1) && buf[0] == 's') {
- struct elib_stat info;
- char v[3*4];
-
- elib_stat(&info);
-
- putint32(v, info.mem_total*4);
- putint32(v+4, info.mem_free*4);
- putint32(v+8, info.max_free*4);
- driver_output((ErlDrvPort)port, v, 12);
- return;
- }
- else if ((count == 3) && buf[0] == 'm') {
- char w[MAP_BUF_SIZE];
- int n = getint16(buf+1);
-
- if (n > MAP_BUF_SIZE)
- n = MAP_BUF_SIZE;
- elib_heap_map(w, n);
- driver_output((ErlDrvPort)port, w, n);
- return;
- }
- else if ((count == 4) && (buf[0] == 'h' || buf[0] == 'l')) {
- unsigned long vf[HISTO_BUF_SIZE];
- unsigned long va[HISTO_BUF_SIZE];
- int n = getint16(buf+1);
- int base = (unsigned char) buf[3];
-
- if (n >= HISTO_BUF_SIZE)
- n = HISTO_BUF_SIZE;
- if (buf[0] == 'l')
- base = -base;
- if (elib_histo(vf, va, n, base) < 0) {
- driver_failure((ErlDrvPort)port, -1);
- return;
- }
- else {
- char* p = outbuf;
- int i;
-
- for (i = 0; i < n; i++) {
- putint32(p, vf[i]);
- p += 4;
- }
- for (i = 0; i < n; i++) {
- putint32(p, va[i]);
- p += 4;
- }
- driver_output((ErlDrvPort)port, outbuf, n*8);
- }
- return;
- }
- driver_failure((ErlDrvPort)port, -1);
-}
diff --git a/erts/emulator/drivers/unix/unix_efile.c b/erts/emulator/drivers/unix/unix_efile.c
index 0052ac0739..b19f632f52 100644
--- a/erts/emulator/drivers/unix/unix_efile.c
+++ b/erts/emulator/drivers/unix/unix_efile.c
@@ -98,7 +98,7 @@ extern STATUS copy(char *, char *);
#define EF_SAFE_REALLOC(P, S) ef_safe_realloc((P), (S))
#define EF_FREE(P) do { if((P)) driver_free((P)); } while(0)
-extern void erl_exit(int n, char *fmt, _DOTS_);
+void erl_exit(int n, char *fmt, ...);
static void *ef_safe_alloc(Uint s)
{
@@ -127,7 +127,7 @@ static void *ef_safe_realloc(void *op, Uint s)
(s[0] == '.' && (s[1] == '\0' || (s[1] == '.' && s[2] == '\0')))
#ifdef VXWORKS
-static FUNCTION(int, vxworks_to_posix, (int vx_errno));
+static int vxworks_to_posix(int vx_errno);
#endif
/*
@@ -146,7 +146,7 @@ static FUNCTION(int, vxworks_to_posix, (int vx_errno));
#define CHECK_PATHLEN(X,Y) /* Nothing */
#endif
-static FUNCTION(int, check_error, (int result, Efile_error* errInfo));
+static int check_error(int result, Efile_error* errInfo);
static int
check_error(int result, Efile_error *errInfo)
diff --git a/erts/emulator/drivers/win32/mem_drv.c b/erts/emulator/drivers/win32/mem_drv.c
deleted file mode 100644
index fa7c46eca8..0000000000
--- a/erts/emulator/drivers/win32/mem_drv.c
+++ /dev/null
@@ -1,141 +0,0 @@
-/*
- * %CopyrightBegin%
- *
- * Copyright Ericsson AB 1997-2009. All Rights Reserved.
- *
- * The contents of this file are subject to the Erlang Public License,
- * Version 1.1, (the "License"); you may not use this file except in
- * compliance with the License. You should have received a copy of the
- * Erlang Public License along with this software. If not, it can be
- * retrieved online at http://www.erlang.org/.
- *
- * Software distributed under the License is distributed on an "AS IS"
- * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
- * the License for the specific language governing rights and limitations
- * under the License.
- *
- * %CopyrightEnd%
- */
-/* Purpose: Access to elib memory statistics */
-
-#include "sys.h"
-#include "erl_driver.h"
-#include "elib_stat.h"
-
-#define MAP_BUF_SIZE 1000 /* Max map size */
-#define HISTO_BUF_SIZE 100 /* Max histogram buckets */
-
-static ErlDrvData mem_start(ErlDrvPort, char*);
-static int mem_init(void);
-static void mem_stop(ErlDrvData);
-static void mem_command(ErlDrvData);
-
-ErlDrvEntry mem_driver_entry = {
- mem_init,
- mem_start,
- mem_stop,
- mem_command,
- NULL,
- NULL,
- "mem_drv"
-};
-
-static int mem_init(void)
-{
- return 0;
-}
-
-static ErlDrvData mem_start(ErlDrvPort port, char* buf)
-{
- return (ErlDrvData)port;
-}
-
-static void mem_stop(ErlDrvData port)
-{
-}
-
-void putint32(p, v)
-byte* p; int v;
-{
- p[0] = (v >> 24) & 0xff;
- p[1] = (v >> 16) & 0xff;
- p[2] = (v >> 8) & 0xff;
- p[3] = (v) & 0xff;
-}
-
-int getint16(p)
-byte* p;
-{
- return (p[0] << 8) | p[1];
-}
-
-/*
-** Command:
-** m L1 L0 -> a heap map of length L1*256 + L0 is returned
-** s -> X3 X2 X1 X0 Y3 Y2 Y1 Y0 Z3 Z2 Z1 Z0
-** X == Total heap size bytes
-** Y == Total free bytes
-** Z == Size of largest free block in bytes
-**
-** h L1 L0 B0 -> Generate a logarithm histogram base B with L buckets
-** l L1 L0 S0 -> Generate a linear histogram with step S with L buckets
-*/
-unsigned char outbuf[HISTO_BUF_SIZE*2*4];
-
-static void mem_command(ErlDrvData port, char* buf, int count)
-{
- if ((count == 1) && buf[0] == 's') {
- struct elib_stat info;
- char v[3*4];
-
- elib_stat(&info);
-
- putint32(v, info.mem_total*4);
- putint32(v+4, info.mem_free*4);
- putint32(v+8, info.max_free*4);
- driver_output((ErlDrvPort)port, v, 12);
- return;
- }
- else if ((count == 3) && buf[0] == 'm') {
- char w[MAP_BUF_SIZE];
- int n = getint16(buf+1);
-
- if (n > MAP_BUF_SIZE)
- n = MAP_BUF_SIZE;
- elib_heap_map(w, n);
- driver_output((ErlDrvPort)port, w, n);
- return;
- }
- else if ((count == 4) && (buf[0] == 'h' || buf[0] == 'l')) {
- unsigned long vf[HISTO_BUF_SIZE];
- unsigned long va[HISTO_BUF_SIZE];
- int n = getint16(buf+1);
- int base = (unsigned char) buf[3];
-
- if (n >= HISTO_BUF_SIZE)
- n = HISTO_BUF_SIZE;
- if (buf[0] == 'l')
- base = -base;
- if (elib_histo(vf, va, n, base) < 0) {
- driver_failure((ErlDrvPort)port, -1);
- return;
- }
- else {
- char* p = outbuf;
- int i;
-
- for (i = 0; i < n; i++) {
- putint32(p, vf[i]);
- p += 4;
- }
- for (i = 0; i < n; i++) {
- putint32(p, va[i]);
- p += 4;
- }
- driver_output((ErlDrvPort)port, outbuf, n*8);
- }
- return;
- }
- driver_failure((ErlDrvPort)port, -1);
-}
-
diff --git a/erts/emulator/hipe/hipe_bif0.c b/erts/emulator/hipe/hipe_bif0.c
index b0abfd2310..2a877d8ace 100644
--- a/erts/emulator/hipe/hipe_bif0.c
+++ b/erts/emulator/hipe/hipe_bif0.c
@@ -440,9 +440,12 @@ BIF_RETTYPE hipe_bifs_alloc_data_2(BIF_ALIST_2)
align != sizeof(long) && align != sizeof(double)))
BIF_ERROR(BIF_P, BADARG);
nrbytes = unsigned_val(BIF_ARG_2);
+ if (nrbytes == 0)
+ BIF_RET(make_small(0));
block = erts_alloc(ERTS_ALC_T_HIPE, nrbytes);
if ((unsigned long)block & (align-1))
- fprintf(stderr, "Yikes! erts_alloc() returned misaligned address %p\r\n", block);
+ fprintf(stderr, "%s: erts_alloc(%lu) returned %p which is not %lu-byte aligned\r\n",
+ __FUNCTION__, (unsigned long)nrbytes, block, (unsigned long)align);
BIF_RET(address_to_term(block, BIF_P));
}
diff --git a/erts/emulator/hipe/hipe_mkliterals.c b/erts/emulator/hipe/hipe_mkliterals.c
index a77aec7919..900dfc5a8a 100644
--- a/erts/emulator/hipe/hipe_mkliterals.c
+++ b/erts/emulator/hipe/hipe_mkliterals.c
@@ -261,7 +261,7 @@ static const struct literal {
/* Field offsets in a process struct */
{ "P_HP", offsetof(struct process, htop) },
{ "P_HP_LIMIT", offsetof(struct process, stop) },
- { "P_OFF_HEAP_MSO", offsetof(struct process, off_heap.mso) },
+ { "P_OFF_HEAP_FIRST", offsetof(struct process, off_heap.first) },
{ "P_MBUF", offsetof(struct process, mbuf) },
{ "P_ID", offsetof(struct process, id) },
{ "P_FLAGS", offsetof(struct process, flags) },
@@ -456,7 +456,7 @@ static const struct rts_param {
} rts_params[] = {
{ 1, "P_OFF_HEAP_FUNS",
#if !defined(HYBRID)
- 1, offsetof(struct process, off_heap.funs)
+ 1, offsetof(struct process, off_heap.first)
#endif
},
diff --git a/erts/emulator/obsolete/driver.h b/erts/emulator/obsolete/driver.h
deleted file mode 100644
index 708fe68e1a..0000000000
--- a/erts/emulator/obsolete/driver.h
+++ /dev/null
@@ -1,263 +0,0 @@
-/*
- * %CopyrightBegin%
- *
- * Copyright Ericsson AB 1996-2009. All Rights Reserved.
- *
- * The contents of this file are subject to the Erlang Public License,
- * Version 1.1, (the "License"); you may not use this file except in
- * compliance with the License. You should have received a copy of the
- * Erlang Public License along with this software. If not, it can be
- * retrieved online at http://www.erlang.org/.
- *
- * Software distributed under the License is distributed on an "AS IS"
- * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
- * the License for the specific language governing rights and limitations
- * under the License.
- *
- * %CopyrightEnd%
- */
-/*
- * OLD, OBSOLETE include file for erlang driver writers.
- * New drivers should use erl_driver.h instead.
- */
-
-#ifndef __DRIVER_H__
-#define __DRIVER_H__
-
-#include <stdlib.h>
-#include "driver_int.h"
-
-#undef _ANSI_ARGS_
-#undef CONST
-
-#if ((defined(__STDC__) || defined(SABER)) && !defined(NO_PROTOTYPE)) || defined(__cplusplus) || defined(USE_PROTOTYPE)
-# define _USING_PROTOTYPES_ 1
-# define _ANSI_ARGS_(x) x
-# define CONST const
-#else
-# define _ANSI_ARGS_(x) ()
-# define CONST
-#endif
-
-#ifdef __cplusplus
-# define EXTERN extern "C"
-#else
-# define EXTERN extern
-#endif
-
-/* Values for mode arg to driver_select() */
-
-#define DO_READ (1 << 0)
-#define DO_WRITE (1 << 1)
-
-/* Flags for set_port_control_flags() */
-#define PORT_CONTROL_FLAG_BINARY 1
-#define PORT_CONTROL_FLAG_HEAVY 2
-
-/* This macro is used to name a dynamic driver's init function in */
-/* a way that doesn't lead to conflicts. This is crucial when using */
-/* operating systems that has one namespace for all symbols */
-/* (e.g. VxWorks). Example: if you have an dynamic driver C source */
-/* file named echo_drv.c, you use the macro like this: */
-/* int DRIVER_INIT(echo_drv)(void *handle) */
-#if defined(VXWORKS)
-# define DRIVER_INIT(DRIVER_NAME) DRIVER_NAME ## _init
-#elif defined(__WIN32__)
-# define DRIVER_INIT(DRIVER_NAME) __declspec(dllexport) driver_init
-#else
-# define DRIVER_INIT(DRIVER_NAME) driver_init
-#endif
-
-typedef int (*F_PTR)(); /* a function pointer */
-typedef long (*L_PTR)(); /* pointer to a function returning long */
-
-extern int null_func();
-
-/* This structure MUST match Binary in global.h exactly!!! */
-typedef struct driver_binary {
- int orig_size; /* total length of binary */
- char orig_bytes[1]; /* the data (char instead of byte!) */
-} DriverBinary;
-
-typedef struct {
- int vsize; /* length of vectors */
- int size; /* total size in bytes */
- SysIOVec* iov;
- DriverBinary** binv;
-} ErlIOVec;
-
-/*
- * OLD, OBSOLETE driver entry structure.
- */
-
-typedef struct driver_entry {
- F_PTR init; /* called at system start up (no args) */
- L_PTR start; /* called when some one does an open_port
- args: port, command (nul-terminated),
- additional/alternate args for fd/vanilla/spawn driver.
- return value -1 means failure, other
- is saved and passed to the other funcs */
- F_PTR stop; /* called when port is closed, and when the
- emulator is halted - arg: start_return */
- F_PTR output; /* called when we have output from erlang to the port
- args: start_return, buf, buflen */
- F_PTR ready_input; /* called when we have input from one of the driver's
- file descriptors - args: start_return, fd */
- F_PTR ready_output; /* called when output is possible to one of the driver's
- file descriptors - args: start_return, fd */
- char *driver_name; /* name supplied as {driver,Name,Args} to open_port */
-
- F_PTR finish; /* called before unloading (DYNAMIC DRIVERS ONLY) */
- void *handle; /* file handle (DYNAMIC DRIVERS ONLY) */
- F_PTR control; /* "ioctl" for drivers (invoked by port_command/3) */
- F_PTR timeout; /* Reserved */
- F_PTR outputv; /* Reserved */
- F_PTR ready_async; /* Completion routine for driver_async */
- F_PTR padding1[3]; /* pad to match size of modern driver struct */
- int padding2[4]; /* more pad */
- F_PTR padding3[3]; /* even more padding */
-} DriverEntry;
-
-
-/* These are the kernel functions available for driver writers */
-
-EXTERN int driver_select _ANSI_ARGS_((int,int,int,int));
-
-EXTERN int driver_output _ANSI_ARGS_((int, char*, int));
-EXTERN int driver_output2 _ANSI_ARGS_((int, char*, int, char*, int));
-EXTERN int driver_output_binary _ANSI_ARGS_((int, char*, int,
- DriverBinary*, int, int));
-EXTERN int driver_outputv _ANSI_ARGS_((int, char*,int,ErlIOVec*,int));
-
-EXTERN int driver_vec_to_buf _ANSI_ARGS_((ErlIOVec*, char*, int));
-
-EXTERN int driver_set_timer _ANSI_ARGS_((int, unsigned long));
-EXTERN int driver_cancel_timer _ANSI_ARGS_((int));
-
-/*
- * The following functions are used to initiate a close of a port
- * from a driver.
- */
-EXTERN int driver_failure_eof _ANSI_ARGS_((int));
-EXTERN int driver_failure_atom _ANSI_ARGS_((int, char *));
-EXTERN int driver_failure_posix _ANSI_ARGS_((int, int));
-EXTERN int driver_failure _ANSI_ARGS_((int, int));
-EXTERN int driver_exit _ANSI_ARGS_ ((int, int));
-
-EXTERN char* erl_errno_id _ANSI_ARGS_((int error));
-EXTERN void set_busy_port _ANSI_ARGS_((int, int));
-EXTERN void add_driver_entry _ANSI_ARGS_((DriverEntry *));
-EXTERN int remove_driver_entry _ANSI_ARGS_((DriverEntry *));
-EXTERN void set_port_control_flags _ANSI_ARGS_((int, int));
-
-/* Binary interface */
-/* NOTE: DO NOT overwrite a binary with new data (if the data is delivered);
-** since the binary is a shared object it MUST be written once.
-*/
-
-EXTERN DriverBinary* driver_alloc_binary _ANSI_ARGS_((int));
-EXTERN DriverBinary* driver_realloc_binary _ANSI_ARGS_((DriverBinary*, int));
-EXTERN void driver_free_binary _ANSI_ARGS_((DriverBinary*));
-
-
-/* Queue interface */
-EXTERN int driver_enqv _ANSI_ARGS_((int, ErlIOVec*, int));
-EXTERN int driver_pushqv _ANSI_ARGS_((int, ErlIOVec*, int));
-EXTERN int driver_deq _ANSI_ARGS_((int, int));
-EXTERN SysIOVec* driver_peekq _ANSI_ARGS_((int, int*));
-EXTERN int driver_sizeq _ANSI_ARGS_((int));
-EXTERN int driver_enq_bin _ANSI_ARGS_((int, DriverBinary*, int, int));
-EXTERN int driver_enq _ANSI_ARGS_((int, char*, int));
-EXTERN int driver_pushq_bin _ANSI_ARGS_((int, DriverBinary*, int, int));
-EXTERN int driver_pushq _ANSI_ARGS_((int, char*, int));
-
-/* Memory management */
-EXTERN void *driver_alloc _ANSI_ARGS_((size_t));
-EXTERN void *driver_realloc _ANSI_ARGS_((void*, size_t));
-EXTERN void driver_free _ANSI_ARGS_((void*));
-
-/* Shared / dynamic link libraries */
-EXTERN void *driver_dl_open _ANSI_ARGS_((char *));
-EXTERN void *driver_dl_sym _ANSI_ARGS_((void *, char *));
-EXTERN int driver_dl_close _ANSI_ARGS_((void *));
-EXTERN char *driver_dl_error _ANSI_ARGS_((void));
-
-/* Async IO functions */
-EXTERN long driver_async _ANSI_ARGS_((int,
- unsigned int*,
- void (*)(void*),
- void *,
- void (*)(void*)));
-EXTERN int driver_async_cancel _ANSI_ARGS_((long));
-
-EXTERN int driver_lock_driver _ANSI_ARGS_((int));
-
-/* Threads */
-typedef void* erl_mutex_t;
-typedef void* erl_cond_t;
-typedef void* erl_thread_t;
-
-EXTERN erl_mutex_t erts_mutex_create _ANSI_ARGS_((void));
-EXTERN int erts_mutex_destroy _ANSI_ARGS_((erl_mutex_t));
-EXTERN int erts_mutex_lock _ANSI_ARGS_((erl_mutex_t));
-EXTERN int erts_mutex_unlock _ANSI_ARGS_((erl_mutex_t));
-
-EXTERN erl_cond_t erts_cond_create _ANSI_ARGS_((void));
-EXTERN int erts_cond_destroy _ANSI_ARGS_((erl_cond_t));
-EXTERN int erts_cond_signal _ANSI_ARGS_((erl_cond_t));
-EXTERN int erts_cond_broadcast _ANSI_ARGS_((erl_cond_t));
-EXTERN int erts_cond_wait _ANSI_ARGS_((erl_cond_t, erl_mutex_t));
-EXTERN int erts_cond_timedwait _ANSI_ARGS_((erl_cond_t, erl_mutex_t, long));
-
-EXTERN int erts_thread_create _ANSI_ARGS_((erl_thread_t*,
- void* (*func)(void*),
- void* arg,
- int detached));
-EXTERN erl_thread_t erts_thread_self _ANSI_ARGS_((void));
-EXTERN void erts_thread_exit _ANSI_ARGS_((void*));
-EXTERN int erts_thread_join _ANSI_ARGS_((erl_thread_t, void**));
-EXTERN int erts_thread_kill _ANSI_ARGS_((erl_thread_t));
-
-
-typedef unsigned long DriverTermData;
-
-#define TERM_DATA(x) ((DriverTermData) (x))
-
-/* Possible types to send from driver Argument type */
-#define ERL_DRV_NIL ((DriverTermData) 1) /* None */
-#define ERL_DRV_ATOM ((DriverTermData) 2) /* driver_mk_atom(string) */
-#define ERL_DRV_INT ((DriverTermData) 3) /* int */
-#define ERL_DRV_PORT ((DriverTermData) 4) /* driver_mk_port(ix) */
-#define ERL_DRV_BINARY ((DriverTermData) 5) /* ErlDriverBinary*, int */
-#define ERL_DRV_STRING ((DriverTermData) 6) /* char*, int */
-#define ERL_DRV_TUPLE ((DriverTermData) 7) /* int */
-#define ERL_DRV_LIST ((DriverTermData) 8) /* int */
-#define ERL_DRV_STRING_CONS ((DriverTermData) 9) /* char*, int */
-#define ERL_DRV_PID ((DriverTermData) 10) /* driver_connected,... */
-
-/* DriverTermData is the type to use for casts when building
- * terms that should be sent to connected process,
- * for instance a tuple on the form {tcp, Port, [Tag|Binary]}
- *
- * DriverTermData spec[] = {
- * ERL_DRV_ATOM, driver_mk_atom("tcp"),
- * ERL_DRV_PORT, driver_mk_port(drv->ix),
- * ERL_DRV_INT, REPLY_TAG,
- * ERL_DRV_BIN, 50, TERM_DATA(buffer),
- * ERL_DRV_LIST, 2,
- * ERL_DRV_TUPLE, 3,
- * }
- *
- */
-
-EXTERN DriverTermData driver_mk_atom _ANSI_ARGS_ ((char*));
-EXTERN DriverTermData driver_mk_port _ANSI_ARGS_ ((int));
-EXTERN DriverTermData driver_connected _ANSI_ARGS_((int));
-EXTERN DriverTermData driver_caller _ANSI_ARGS_((int));
-
-EXTERN int driver_output_term _ANSI_ARGS_((int, DriverTermData *, int));
-EXTERN int driver_send_term _ANSI_ARGS_((int, DriverTermData, DriverTermData *, int));
-
-#endif
-
-
diff --git a/erts/emulator/sys/common/erl_poll.c b/erts/emulator/sys/common/erl_poll.c
index 09fb6337f7..c17806d96c 100644
--- a/erts/emulator/sys/common/erl_poll.c
+++ b/erts/emulator/sys/common/erl_poll.c
@@ -286,7 +286,7 @@ struct ErtsPollSet_ {
ErtsPollSet next;
int internal_fd_limit;
ErtsFdStatus *fds_status;
- int no_of_user_fds;
+ erts_smp_atomic_t no_of_user_fds;
int fds_status_len;
#if ERTS_POLL_USE_KERNEL_POLL
int kp_fd;
@@ -852,7 +852,7 @@ write_batch_buf(ErtsPollSet ps, ErtsPollBatchBuf *bbp)
ps->fds_status[fd].flags |= ERTS_POLL_FD_FLG_USEFLBCK;
ASSERT(ps->fds_status[fd].used_events);
ps->fds_status[fd].used_events = 0;
- ps->no_of_user_fds--;
+ erts_smp_atomic_dec(&ps->no_of_user_fds);
update_fallback_pollset(ps, fd);
ASSERT(ps->fds_status[fd].flags & ERTS_POLL_FD_FLG_INFLBCK);
break;
@@ -902,11 +902,11 @@ batch_update_pollset(ErtsPollSet ps, int fd, ErtsPollBatchBuf *bbp)
events = ERTS_POLL_EV_E2N(ps->fds_status[fd].events);
if (!events) {
buf[buf_len].events = POLLREMOVE;
- ps->no_of_user_fds--;
+ erts_smp_atomic_dec(&ps->no_of_user_fds);
}
else if (!ps->fds_status[fd].used_events) {
buf[buf_len].events = events;
- ps->no_of_user_fds++;
+ erts_smp_atomic_inc(&ps->no_of_user_fds);
}
else {
if ((ps->fds_status[fd].flags & ERTS_POLL_FD_FLG_RST)
@@ -996,12 +996,12 @@ batch_update_pollset(ErtsPollSet ps, int fd, ErtsPollBatchBuf *bbp)
}
if (used_events) {
if (!events) {
- ps->no_of_user_fds--;
+ erts_smp_atomic_dec(&ps->no_of_user_fds);
}
}
else {
if (events)
- ps->no_of_user_fds++;
+ erts_smp_atomic_inc(&ps->no_of_user_fds);
}
ASSERT((events & ~(ERTS_POLL_EV_IN|ERTS_POLL_EV_OUT)) == 0);
ASSERT((used_events & ~(ERTS_POLL_EV_IN|ERTS_POLL_EV_OUT)) == 0);
@@ -1075,7 +1075,7 @@ update_pollset(ErtsPollSet ps, int fd)
epe.data.fd = epe_templ.data.fd;
res = epoll_ctl(ps->kp_fd, EPOLL_CTL_DEL, fd, &epe);
} while (res != 0 && errno == EINTR);
- ps->no_of_user_fds--;
+ erts_smp_atomic_dec(&ps->no_of_user_fds);
ps->fds_status[fd].used_events = 0;
}
@@ -1083,11 +1083,11 @@ update_pollset(ErtsPollSet ps, int fd)
/* A note on EPOLL_CTL_DEL: linux kernel versions before 2.6.9
need a non-NULL event pointer even though it is ignored... */
op = EPOLL_CTL_DEL;
- ps->no_of_user_fds--;
+ erts_smp_atomic_dec(&ps->no_of_user_fds);
}
else if (!ps->fds_status[fd].used_events) {
op = EPOLL_CTL_ADD;
- ps->no_of_user_fds++;
+ erts_smp_atomic_inc(&ps->no_of_user_fds);
}
else {
op = EPOLL_CTL_MOD;
@@ -1137,7 +1137,7 @@ update_pollset(ErtsPollSet ps, int fd)
/* Fall through ... */
case EPOLL_CTL_ADD: {
ps->fds_status[fd].flags |= ERTS_POLL_FD_FLG_USEFLBCK;
- ps->no_of_user_fds--;
+ erts_smp_atomic_dec(&ps->no_of_user_fds);
#if ERTS_POLL_USE_CONCURRENT_UPDATE
if (!*update_fallback) {
*update_fallback = 1;
@@ -1225,7 +1225,7 @@ static int update_pollset(ErtsPollSet ps, int fd)
#if ERTS_POLL_USE_FALLBACK
ASSERT(ps->fds_status[fd].flags & ERTS_POLL_FD_FLG_INFLBCK);
#endif
- ps->no_of_user_fds--;
+ erts_smp_atomic_dec(&ps->no_of_user_fds);
last_pix = --ps->no_poll_fds;
if (pix != last_pix) {
/* Move last pix to this pix */
@@ -1252,7 +1252,7 @@ static int update_pollset(ErtsPollSet ps, int fd)
ASSERT(!(ps->fds_status[fd].flags & ERTS_POLL_FD_FLG_INFLBCK)
|| fd == ps->kp_fd);
#endif
- ps->no_of_user_fds++;
+ erts_smp_atomic_inc(&ps->no_of_user_fds);
ps->fds_status[fd].pix = pix = ps->no_poll_fds++;
if (pix >= ps->poll_fds_len)
grow_poll_fds(ps, pix);
@@ -1303,7 +1303,7 @@ static int update_pollset(ErtsPollSet ps, int fd)
if (!ps->fds_status[fd].used_events) {
ASSERT(events);
- ps->no_of_user_fds++;
+ erts_smp_atomic_inc(&ps->no_of_user_fds);
#if ERTS_POLL_USE_FALLBACK
ps->no_select_fds++;
ps->fds_status[fd].flags |= ERTS_POLL_FD_FLG_INFLBCK;
@@ -1311,7 +1311,7 @@ static int update_pollset(ErtsPollSet ps, int fd)
}
else if (!events) {
ASSERT(ps->fds_status[fd].used_events);
- ps->no_of_user_fds--;
+ erts_smp_atomic_dec(&ps->no_of_user_fds);
ps->fds_status[fd].events = events;
#if ERTS_POLL_USE_FALLBACK
ps->no_select_fds--;
@@ -1912,7 +1912,8 @@ static ERTS_INLINE int
check_fd_events(ErtsPollSet ps, SysTimeval *tv, int max_res, int *ps_locked)
{
ASSERT(!*ps_locked);
- if (ps->no_of_user_fds == 0 && tv->tv_usec == 0 && tv->tv_sec == 0) {
+ if (erts_smp_atomic_read(&ps->no_of_user_fds) == 0
+ && tv->tv_usec == 0 && tv->tv_sec == 0) {
/* Nothing to poll and zero timeout; done... */
return 0;
}
@@ -1950,7 +1951,7 @@ check_fd_events(ErtsPollSet ps, SysTimeval *tv, int max_res, int *ps_locked)
* the maximum number of file descriptors in the poll set.
*/
struct dvpoll poll_res;
- int nfds = ps->no_of_user_fds;
+ int nfds = (int) erts_smp_atomic_read(&ps->no_of_user_fds);
#ifdef ERTS_SMP
nfds++; /* Wakeup pipe */
#endif
@@ -2228,7 +2229,7 @@ ERTS_POLL_EXPORT(erts_poll_create_pollset)(void)
ps->internal_fd_limit = 0;
ps->fds_status = NULL;
ps->fds_status_len = 0;
- ps->no_of_user_fds = 0;
+ erts_smp_atomic_init(&ps->no_of_user_fds, 0);
#if ERTS_POLL_USE_KERNEL_POLL
ps->kp_fd = -1;
#if ERTS_POLL_USE_EPOLL
@@ -2326,7 +2327,7 @@ ERTS_POLL_EXPORT(erts_poll_create_pollset)(void)
#if ERTS_POLL_USE_FALLBACK
ps->fallback_used = 0;
#endif
- ps->no_of_user_fds = 0; /* Don't count wakeup pipe and fallback fd */
+ erts_smp_atomic_set(&ps->no_of_user_fds, 0); /* Don't count wakeup pipe and fallback fd */
erts_smp_spin_lock(&pollsets_lock);
ps->next = pollsets;
@@ -2472,7 +2473,7 @@ ERTS_POLL_EXPORT(erts_poll_info)(ErtsPollSet ps, ErtsPollInfo *pip)
pip->memory_size = size;
- pip->poll_set_size = ps->no_of_user_fds;
+ pip->poll_set_size = (int) erts_smp_atomic_read(&ps->no_of_user_fds);
#ifdef ERTS_SMP
pip->poll_set_size++; /* Wakeup pipe */
#endif
diff --git a/erts/emulator/sys/unix/sys.c b/erts/emulator/sys/unix/sys.c
index 737ffd9f94..af4ab693dc 100644
--- a/erts/emulator/sys/unix/sys.c
+++ b/erts/emulator/sys/unix/sys.c
@@ -123,8 +123,6 @@ static ErtsSysReportExit *report_exit_transit_list;
extern int check_async_ready(void);
extern int driver_interrupt(int, int);
-/*EXTERN_FUNCTION(void, increment_time, (int));*/
-/*EXTERN_FUNCTION(int, next_time, (_VOID_));*/
extern void do_break(void);
extern void erl_sys_args(int*, char**);
@@ -221,10 +219,10 @@ static struct fd_data {
} *fd_data; /* indexed by fd */
/* static FUNCTION(int, write_fill, (int, char*, int)); unused? */
-static FUNCTION(void, note_child_death, (int, int));
+static void note_child_death(int, int);
#if CHLDWTHR
-static FUNCTION(void *, child_waiter, (void *));
+static void* child_waiter(void *);
#endif
/********************* General functions ****************************/
@@ -384,18 +382,6 @@ MALLOC_USE_HASH(1);
#endif
#ifdef USE_THREADS
-static void *ethr_internal_alloc(size_t size)
-{
- return erts_alloc_fnf(ERTS_ALC_T_ETHR_INTERNAL, (Uint) size);
-}
-static void *ethr_internal_realloc(void *ptr, size_t size)
-{
- return erts_realloc_fnf(ERTS_ALC_T_ETHR_INTERNAL, ptr, (Uint) size);
-}
-static void ethr_internal_free(void *ptr)
-{
- erts_free(ERTS_ALC_T_ETHR_INTERNAL, ptr);
-}
#ifdef ERTS_THR_HAVE_SIG_FUNCS
/*
@@ -488,9 +474,6 @@ erts_sys_pre_init(void)
#ifdef USE_THREADS
{
erts_thr_init_data_t eid = ERTS_THR_INIT_DATA_DEF_INITER;
- eid.alloc = ethr_internal_alloc;
- eid.realloc = ethr_internal_realloc;
- eid.free = ethr_internal_free;
eid.thread_create_child_func = thr_create_prepare_child;
/* Before creation in parent */
@@ -538,13 +521,14 @@ erts_sys_pre_init(void)
#endif
#endif /* USE_THREADS */
erts_smp_atomic_init(&sys_misc_mem_sz, 0);
- erts_smp_rwmtx_init(&environ_rwmtx, "environ");
}
void
erl_sys_init(void)
{
+ erts_smp_rwmtx_init(&environ_rwmtx, "environ");
#if !DISABLE_VFORK
+ {
int res;
char bindir[MAXPATHLEN];
size_t bindirsz = sizeof(bindir);
@@ -574,6 +558,7 @@ erl_sys_init(void)
bindir,
DIR_SEPARATOR_CHAR,
CHILD_SETUP_PROG_NAME);
+ }
#endif
#ifdef USE_SETLINEBUF
@@ -2575,7 +2560,6 @@ extern Preload pre_loaded[];
void erts_sys_alloc_init(void)
{
- elib_ensure_initialized();
}
void *erts_sys_alloc(ErtsAlcType_t t, void *x, Uint sz)
diff --git a/erts/emulator/sys/unix/sys_float.c b/erts/emulator/sys/unix/sys_float.c
index c59c99f65e..d6d06d0036 100644
--- a/erts/emulator/sys/unix/sys_float.c
+++ b/erts/emulator/sys/unix/sys_float.c
@@ -810,7 +810,9 @@ int
matherr(struct exception *exc)
{
#if !defined(NO_FPE_SIGNALS)
- set_current_fp_exception((unsigned long)__builtin_return_address(0));
+ volatile unsigned long *fpexnp = erts_get_current_fp_exception();
+ if (fpexnp != NULL)
+ *fpexnp = (unsigned long)__builtin_return_address(0);
#endif
return 1;
}
diff --git a/erts/emulator/sys/win32/sys.c b/erts/emulator/sys/win32/sys.c
index bd02cf85eb..15d4cd7361 100644
--- a/erts/emulator/sys/win32/sys.c
+++ b/erts/emulator/sys/win32/sys.c
@@ -37,7 +37,7 @@
void erts_sys_init_float(void);
void erl_start(int, char**);
-void erl_exit(int n, char*, _DOTS_);
+void erl_exit(int n, char*, ...);
void erl_error(char*, va_list);
void erl_crash_dump(char*, int, char*, ...);
@@ -67,10 +67,10 @@ static void async_read_file(struct async_io* aio, LPVOID buf, DWORD numToRead);
static int async_write_file(struct async_io* aio, LPVOID buf, DWORD numToWrite);
static int get_overlapped_result(struct async_io* aio,
LPDWORD pBytesRead, BOOL wait);
-static FUNCTION(BOOL, CreateChildProcess, (char *, HANDLE, HANDLE,
- HANDLE, LPHANDLE, BOOL,
- LPVOID, LPTSTR, unsigned,
- char **, int *));
+static BOOL CreateChildProcess(char *, HANDLE, HANDLE,
+ HANDLE, LPHANDLE, BOOL,
+ LPVOID, LPTSTR, unsigned,
+ char **, int *);
static int create_pipe(LPHANDLE, LPHANDLE, BOOL, BOOL);
static int ApplicationType(const char* originalName, char fullPath[MAX_PATH],
BOOL search_in_path, BOOL handle_quotes,
@@ -93,7 +93,7 @@ static erts_smp_mtx_t sys_driver_data_lock;
#define APPL_WIN3X 2
#define APPL_WIN32 3
-static FUNCTION(int, driver_write, (long, HANDLE, byte*, int));
+static int driver_write(long, HANDLE, byte*, int);
static void common_stop(int);
static int create_file_thread(struct async_io* aio, int mode);
#ifdef ERTS_SMP
@@ -2627,7 +2627,6 @@ erts_sys_main_thread(void)
void erts_sys_alloc_init(void)
{
- elib_ensure_initialized();
}
void *erts_sys_alloc(ErtsAlcType_t t, void *x, Uint sz)
@@ -2974,19 +2973,6 @@ check_supported_os_version(void)
}
#ifdef USE_THREADS
-static void *ethr_internal_alloc(size_t size)
-{
- return erts_alloc_fnf(ERTS_ALC_T_ETHR_INTERNAL, (Uint) size);
-}
-static void *ethr_internal_realloc(void *ptr, size_t size)
-{
- return erts_realloc_fnf(ERTS_ALC_T_ETHR_INTERNAL, ptr, (Uint) size);
-}
-static void ethr_internal_free(void *ptr)
-{
- erts_free(ERTS_ALC_T_ETHR_INTERNAL, ptr);
-}
-
#ifdef ERTS_ENABLE_LOCK_COUNT
static void
thr_create_prepare_child(void *vtcdp)
@@ -3005,14 +2991,9 @@ erts_sys_pre_init(void)
#ifdef USE_THREADS
{
erts_thr_init_data_t eid = ERTS_THR_INIT_DATA_DEF_INITER;
- eid.alloc = ethr_internal_alloc;
- eid.realloc = ethr_internal_realloc;
- eid.free = ethr_internal_free;
-
#ifdef ERTS_ENABLE_LOCK_COUNT
eid.thread_create_child_func = thr_create_prepare_child;
#endif
-
erts_thr_init(&eid);
#ifdef ERTS_ENABLE_LOCK_COUNT
erts_lcnt_init();
@@ -3020,7 +3001,6 @@ erts_sys_pre_init(void)
}
#endif
erts_smp_atomic_init(&sys_misc_mem_sz, 0);
- erts_sys_env_init();
}
void noinherit_std_handle(DWORD type)
@@ -3036,6 +3016,8 @@ void erl_sys_init(void)
{
HANDLE handle;
+ erts_sys_env_init();
+
noinherit_std_handle(STD_OUTPUT_HANDLE);
noinherit_std_handle(STD_INPUT_HANDLE);
noinherit_std_handle(STD_ERROR_HANDLE);
diff --git a/erts/emulator/test/Makefile b/erts/emulator/test/Makefile
index 5ec17a5e2a..a4c02da626 100644
--- a/erts/emulator/test/Makefile
+++ b/erts/emulator/test/Makefile
@@ -75,7 +75,6 @@ MODULES= \
node_container_SUITE \
nofrag_SUITE \
num_bif_SUITE \
- obsolete_SUITE \
op_SUITE \
port_SUITE \
port_bif_SUITE \
diff --git a/erts/emulator/test/decode_packet_SUITE.erl b/erts/emulator/test/decode_packet_SUITE.erl
index 6cde286871..d9e961be2f 100644
--- a/erts/emulator/test/decode_packet_SUITE.erl
+++ b/erts/emulator/test/decode_packet_SUITE.erl
@@ -304,6 +304,10 @@ http(Config) when is_list(Config) ->
{ok, {http_request, 'GET', ResB, {1,1}}, Rest} = decode_pkt(http_bin,Bin)
end,
lists:foreach(UriF, http_uri_variants()),
+
+ %% Response with empty phrase
+ ?line {ok,{http_response,{1,1},200,[]},<<>>} = decode_pkt(http, <<"HTTP/1.1 200\r\n">>, []),
+ ?line {ok,{http_response,{1,1},200,<<>>},<<>>} = decode_pkt(http_bin, <<"HTTP/1.1 200\r\n">>, []),
ok.
http_with_bin(http) ->
diff --git a/erts/emulator/test/hash_SUITE.erl b/erts/emulator/test/hash_SUITE.erl
index 85bdb8bff8..f5d1871bfb 100644
--- a/erts/emulator/test/hash_SUITE.erl
+++ b/erts/emulator/test/hash_SUITE.erl
@@ -480,14 +480,14 @@ otp_5292_test() ->
S2 = md5([md5(hash_int(S, E, PH)) || {Start, N, Sz} <- d(),
{S, E} <- int(Start, N, Sz)]),
?line Comment = case S1 of
- <<43,186,76,102,87,4,110,245,203,177,206,6,130,69,43,99>> ->
+ <<4,248,208,156,200,131,7,1,173,13,239,173,112,81,16,174>> ->
?line big = erlang:system_info(endian),
"Big endian machine";
- <<21,206,139,15,149,28,167,81,98,225,132,254,49,125,174,195>> ->
+ <<180,28,33,231,239,184,71,125,76,47,227,241,78,184,176,233>> ->
?line little = erlang:system_info(endian),
"Little endian machine"
end,
- ?line <<140,37,79,80,26,242,130,22,20,229,123,240,223,244,43,99>> = S2,
+ ?line <<124,81,198,121,174,233,19,137,10,83,33,80,226,111,238,99>> = S2,
?line 2 = erlang:hash(1, (1 bsl 27) -1),
?line {'EXIT', _} = (catch erlang:hash(1, (1 bsl 27))),
{comment, Comment}.
@@ -507,7 +507,7 @@ hash_int(Start, End, F) ->
{Start, End, md5(HL)}.
md5(T) ->
- erlang:md5(term_to_binary(T)).
+ erlang:md5(term_to_binary(T)).
bit_level_binaries() ->
?line [3511317,7022633,14044578,28087749,56173436,112344123,90467083|_] =
diff --git a/erts/emulator/test/nif_SUITE.erl b/erts/emulator/test/nif_SUITE.erl
index 888bf582d8..f45cfa3e4a 100644
--- a/erts/emulator/test/nif_SUITE.erl
+++ b/erts/emulator/test/nif_SUITE.erl
@@ -30,7 +30,7 @@
fin_per_testcase/2, basic/1, reload/1, upgrade/1, heap_frag/1,
types/1, many_args/1, binaries/1, get_string/1, get_atom/1, api_macros/1,
from_array/1, iolist_as_binary/1, resource/1, resource_binary/1, resource_takeover/1,
- threading/1, send/1, send2/1, send_threaded/1, neg/1, is_checks/1,
+ threading/1, send/1, send2/1, send3/1, send_threaded/1, neg/1, is_checks/1,
get_length/1, make_atom/1, make_string/1]).
-export([many_args_100/100]).
@@ -51,7 +51,7 @@
all(suite) ->
[basic, reload, upgrade, heap_frag, types, many_args, binaries, get_string,
get_atom, api_macros, from_array, iolist_as_binary, resource, resource_binary,
- resource_takeover, threading, send, send2, send_threaded, neg, is_checks,
+ resource_takeover, threading, send, send2, send3, send_threaded, neg, is_checks,
get_length, make_atom, make_string].
%%init_per_testcase(_Case, Config) ->
@@ -916,7 +916,164 @@ forwarder(To, N) ->
other_term() ->
{fun(X,Y) -> X*Y end, make_ref()}.
-
+
+send3(doc) -> ["Message sending stress test"];
+send3(Config) when is_list(Config) ->
+ %% Let a number of processes send random message blobs between each other
+ %% using enif_send. Kill and spawn new ones randomly to keep a ~constant
+ %% number of workers running.
+ Seed = now(),
+ io:format("seed: ~p\n",[Seed]),
+ random:seed(Seed),
+ ets:new(nif_SUITE,[named_table,public]),
+ ?line true = ets:insert(nif_SUITE,{send3,0,0,0,0}),
+ timer:send_after(10000, timeout), % Run for 10 seconds
+ SpawnCnt = send3_controller(0, [], [], 20),
+ ?line [{_,Rcv,SndOk,SndFail,Balance}] = ets:lookup(nif_SUITE,send3),
+ io:format("spawns=~p received=~p, sent=~p send-failure=~p balance=~p\n",
+ [SpawnCnt,Rcv,SndOk,SndFail,Balance]),
+ ets:delete(nif_SUITE).
+
+send3_controller(SpawnCnt, [], _, infinity) ->
+ SpawnCnt;
+send3_controller(SpawnCnt0, Mons0, Pids0, Tick) ->
+ receive
+ timeout ->
+ io:format("Timeout. Sending 'halt' to ~p\n",[Pids0]),
+ lists:foreach(fun(P) -> P ! {halt,self()} end, Pids0),
+ lists:foreach(fun(P) -> receive {halted,P} -> ok end end, Pids0),
+ QTot = lists:foldl(fun(P,QSum) ->
+ {message_queue_len,QLen} =
+ erlang:process_info(P,message_queue_len),
+ QSum + QLen
+ end, 0, Pids0),
+ io:format("Total queue length ~p\n",[QTot]),
+ lists:foreach(fun(P) -> P ! die end, Pids0),
+ send3_controller(SpawnCnt0, Mons0, [], infinity);
+ {'DOWN', MonRef, process, _Pid, _} ->
+ Mons1 = lists:delete(MonRef, Mons0),
+ %%io:format("Got DOWN from ~p. Monitors left: ~p\n",[Pid,Mons1]),
+ send3_controller(SpawnCnt0, Mons1, Pids0, Tick)
+ after Tick ->
+ Max = 20,
+ N = length(Pids0),
+ PidN = random:uniform(Max),
+ %%io:format("N=~p PidN=~p Pids0=~p\n", [N,PidN,Pids0]),
+ case PidN > N of
+ true ->
+ {NewPid,Mon} = spawn_opt(fun send3_proc/0, [link,monitor]),
+ lists:foreach(fun(P) -> P ! {is_born,NewPid} end, Pids0),
+ ?line Balance = ets:lookup_element(nif_SUITE,send3,5),
+ Inject = (Balance =< 0),
+ case Inject of
+ true -> ok;
+ false -> ets:update_element(nif_SUITE,send3,{5,-1})
+ end,
+ NewPid ! {pids,Pids0,Inject},
+ send3_controller(SpawnCnt0+1, [Mon|Mons0], [NewPid|Pids0], Tick);
+ false ->
+ KillPid = lists:nth(PidN,Pids0),
+ KillPid ! die,
+ Pids1 = lists:delete(KillPid, Pids0),
+ lists:foreach(fun(P) -> P ! {is_dead,KillPid} end, Pids1),
+ send3_controller(SpawnCnt0, Mons0, Pids1, Tick)
+ end
+ end.
+
+send3_proc() ->
+ %%io:format("Process ~p spawned\n",[self()]),
+ send3_proc([self()], {0,0,0}, {1,2,3,4,5}).
+send3_proc(Pids0, Counters={Rcv,SndOk,SndFail}, State0) ->
+ %%io:format("~p: Pids0=~p", [self(), Pids0]),
+ %%timer:sleep(10),
+ receive
+ {pids, Pids1, Inject} ->
+ %%io:format("~p: got ~p Inject=~p\n", [self(), Pids1, Inject]),
+ ?line Pids0 = [self()],
+ Pids2 = [self() | Pids1],
+ case Inject of
+ true -> send3_proc_send(Pids2, Counters, State0);
+ false -> send3_proc(Pids2, Counters, State0)
+ end;
+ {is_born, Pid} ->
+ %%io:format("~p: is_born ~p, got ~p\n", [self(), Pid, Pids0]),
+ send3_proc([Pid | Pids0], Counters, State0);
+ {is_dead, Pid} ->
+ Pids1 = lists:delete(Pid,Pids0),
+ %%io:format("~p: is_dead ~p, got ~p\n", [self(), Pid, Pids1]),
+ send3_proc(Pids1, Counters, State0);
+ {blob, Blob0} ->
+ %%io:format("~p: blob ~p\n", [self(), Blob0]),
+ State1 = send3_new_state(State0, Blob0),
+ send3_proc_send(Pids0, {Rcv+1,SndOk,SndFail}, State1);
+ die ->
+ %%io:format("Process ~p terminating, stats = ~p\n",[self(),Counters]),
+ {message_queue_len,Dropped} = erlang:process_info(self(),message_queue_len),
+ _R = ets:update_counter(nif_SUITE,send3,
+ [{2,Rcv},{3,SndOk},{4,SndFail},{5,1-Dropped}]),
+ %%io:format("~p: dies R=~p\n", [self(), R]),
+ ok;
+ {halt,Papa} ->
+ Papa ! {halted,self()},
+ io:format("~p halted\n",[self()]),
+ receive die -> ok end,
+ io:format("~p dying\n",[self()])
+ end.
+
+send3_proc_send(Pids, {Rcv,SndOk,SndFail}, State0) ->
+ To = lists:nth(random:uniform(length(Pids)),Pids),
+ Blob = send3_make_blob(),
+ State1 = send3_new_state(State0,Blob),
+ case send3_send(To, Blob) of
+ true ->
+ send3_proc(Pids, {Rcv,SndOk+1,SndFail}, State1);
+ false ->
+ send3_proc(Pids, {Rcv,SndOk,SndFail+1}, State1)
+ end.
+
+
+send3_make_blob() ->
+ case random:uniform(20)-1 of
+ 0 -> {term,[]};
+ N ->
+ MsgEnv = alloc_msgenv(),
+ repeat(N bsr 1,
+ fun(_) -> grow_blob(MsgEnv,other_term(),random:uniform(1 bsl 20))
+ end, void),
+ case (N band 1) of
+ 0 -> {term,copy_blob(MsgEnv)};
+ 1 -> {msgenv,MsgEnv}
+ end
+ end.
+
+send3_send(Pid, Msg) ->
+ %% 90% enif_send and 10% normal bang
+ case random:uniform(10) of
+ 1 -> send3_send_bang(Pid,Msg);
+ _ -> send3_send_nif(Pid,Msg)
+ end.
+send3_send_nif(Pid, {term,Blob}) ->
+ %%io:format("~p send term nif\n",[self()]),
+ send_term(Pid, {blob, Blob}) =:= 1;
+send3_send_nif(Pid, {msgenv,MsgEnv}) ->
+ %%io:format("~p send blob nif\n",[self()]),
+ send3_blob(MsgEnv, Pid, blob) =:= 1.
+
+send3_send_bang(Pid, {term,Blob}) ->
+ %%io:format("~p send term bang\n",[self()]),
+ Pid ! {blob, Blob},
+ true;
+send3_send_bang(Pid, {msgenv,MsgEnv}) ->
+ %%io:format("~p send blob bang\n",[self()]),
+ Pid ! {blob, copy_blob(MsgEnv)},
+ true.
+
+send3_new_state(State, Blob) ->
+ case random:uniform(5+2) of
+ N when N =< 5-> setelement(N, State, Blob);
+ _ -> State % Don't store blob
+ end.
+
neg(doc) -> ["Negative testing of load_nif"];
neg(Config) when is_list(Config) ->
TmpMem = tmpmem(),
@@ -1070,10 +1227,13 @@ send_new_blob(_,_) -> ?nif_stub.
alloc_msgenv() -> ?nif_stub.
clear_msgenv(_) -> ?nif_stub.
grow_blob(_,_) -> ?nif_stub.
+grow_blob(_,_,_) -> ?nif_stub.
send_blob(_,_) -> ?nif_stub.
+send3_blob(_,_,_) -> ?nif_stub.
send_blob_thread(_,_,_) -> ?nif_stub.
join_send_thread(_) -> ?nif_stub.
-
+copy_blob(_) -> ?nif_stub.
+send_term(_,_) -> ?nif_stub.
nif_stub_error(Line) ->
exit({nif_not_loaded,module,?MODULE,line,Line}).
diff --git a/erts/emulator/test/nif_SUITE_data/nif_SUITE.c b/erts/emulator/test/nif_SUITE_data/nif_SUITE.c
index c8cd323b7e..17f644829f 100644
--- a/erts/emulator/test/nif_SUITE_data/nif_SUITE.c
+++ b/erts/emulator/test/nif_SUITE_data/nif_SUITE.c
@@ -1038,36 +1038,41 @@ static ERL_NIF_TERM make_term_copy(struct make_term_info* mti, int n)
{
return enif_make_copy(mti->dst_env, mti->other_term);
}
+
+typedef ERL_NIF_TERM Make_term_Func(struct make_term_info*, int);
+static Make_term_Func* make_funcs[] = {
+ make_term_binary,
+ make_term_int,
+ make_term_ulong,
+ make_term_double,
+ make_term_atom,
+ make_term_existing_atom,
+ make_term_string,
+ //make_term_ref,
+ make_term_sub_binary,
+ make_term_uint,
+ make_term_long,
+ make_term_tuple0,
+ make_term_list0,
+ make_term_resource,
+ make_term_new_binary,
+ make_term_caller_pid,
+ make_term_tuple,
+ make_term_list,
+ make_term_list_cell,
+ make_term_tuple_from_array,
+ make_term_list_from_array,
+ make_term_garbage,
+ make_term_copy
+};
+static unsigned num_of_make_funcs()
+{
+ return sizeof(make_funcs)/sizeof(*make_funcs);
+}
static int make_term_n(struct make_term_info* mti, int n, ERL_NIF_TERM* res)
{
- typedef ERL_NIF_TERM Make_term_Func(struct make_term_info*, int);
- static Make_term_Func* funcs[] = {
- make_term_binary,
- make_term_int,
- make_term_ulong,
- make_term_double,
- make_term_atom,
- make_term_existing_atom,
- make_term_string,
- //make_term_ref,
- make_term_sub_binary,
- make_term_uint,
- make_term_long,
- make_term_tuple0,
- make_term_list0,
- make_term_resource,
- make_term_new_binary,
- make_term_caller_pid,
- make_term_tuple,
- make_term_list,
- make_term_list_cell,
- make_term_tuple_from_array,
- make_term_list_from_array,
- make_term_garbage,
- make_term_copy
- };
- if (n < sizeof(funcs)/sizeof(*funcs)) {
- *res = funcs[n](mti, n);
+ if (n < num_of_make_funcs()) {
+ *res = make_funcs[n](mti, n);
push_term(mti, *res);
return 1;
}
@@ -1167,14 +1172,14 @@ static ERL_NIF_TERM grow_blob(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[
{
union { void* vp; struct make_term_info* p; }mti;
ERL_NIF_TERM term;
- if (!enif_get_resource(env, argv[0], msgenv_resource_type, &mti.vp)) {
+ if (!enif_get_resource(env, argv[0], msgenv_resource_type, &mti.vp)
+ || (argc>2 && !enif_get_uint(env,argv[2], &mti.p->n))) {
return enif_make_badarg(env);
}
mti.p->caller_env = env;
mti.p->other_term = argv[1];
- while (!make_term_n(mti.p, mti.p->n++, &term)) {
- mti.p->n = 0;
- }
+ mti.p->n %= num_of_make_funcs();
+ make_term_n(mti.p, mti.p->n++, &term);
mti.p->blob = enif_make_list_cell(mti.p->dst_env, term, mti.p->blob);
return atom_ok;
}
@@ -1194,6 +1199,23 @@ static ERL_NIF_TERM send_blob(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[
return enif_make_tuple3(env, atom_ok, enif_make_int(env,res), copy);
}
+static ERL_NIF_TERM send3_blob(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
+{
+ union { void* vp; struct make_term_info* p; }mti;
+ ErlNifPid to;
+ ERL_NIF_TERM copy;
+ int res;
+ if (!enif_get_resource(env, argv[0], msgenv_resource_type, &mti.vp)
+ || !enif_get_local_pid(env, argv[1], &to)) {
+ return enif_make_badarg(env);
+ }
+ mti.p->blob = enif_make_tuple2(mti.p->dst_env,
+ enif_make_copy(mti.p->dst_env, argv[2]),
+ mti.p->blob);
+ res = enif_send(env, &to, mti.p->dst_env, mti.p->blob);
+ return enif_make_int(env,res);
+}
+
void* threaded_sender(void *arg)
{
@@ -1253,6 +1275,28 @@ static ERL_NIF_TERM join_send_thread(ErlNifEnv* env, int argc, const ERL_NIF_TER
return enif_make_tuple2(env, atom_ok, enif_make_int(env, mti.p->send_res));
}
+static ERL_NIF_TERM copy_blob(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
+{
+ union { void* vp; struct make_term_info* p; }mti;
+ if (!enif_get_resource(env, argv[0], msgenv_resource_type, &mti.vp)) {
+ return enif_make_badarg(env);
+ }
+ return enif_make_copy(env, mti.p->blob);
+}
+
+static ERL_NIF_TERM send_term(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
+{
+ ErlNifEnv* menv;
+ ErlNifPid pid;
+ int ret;
+ if (!enif_get_local_pid(env, argv[0], &pid)) {
+ return enif_make_badarg(env);
+ }
+ menv = enif_alloc_env();
+ ret = enif_send(env, &pid, menv, enif_make_copy(menv, argv[1]));
+ enif_free_env(menv);
+ return enif_make_int(env, ret);
+}
static ErlNifFunc nif_funcs[] =
{
@@ -1291,9 +1335,13 @@ static ErlNifFunc nif_funcs[] =
{"alloc_msgenv", 0, alloc_msgenv},
{"clear_msgenv", 1, clear_msgenv},
{"grow_blob", 2, grow_blob},
+ {"grow_blob", 3, grow_blob},
{"send_blob", 2, send_blob},
+ {"send3_blob", 3, send3_blob},
{"send_blob_thread", 3, send_blob_thread},
- {"join_send_thread", 1, join_send_thread}
+ {"join_send_thread", 1, join_send_thread},
+ {"copy_blob", 1, copy_blob},
+ {"send_term", 2, send_term}
};
ERL_NIF_INIT(nif_SUITE,nif_funcs,load,reload,upgrade,unload)
diff --git a/erts/emulator/test/obsolete_SUITE.erl b/erts/emulator/test/obsolete_SUITE.erl
deleted file mode 100644
index b191f84ee0..0000000000
--- a/erts/emulator/test/obsolete_SUITE.erl
+++ /dev/null
@@ -1,123 +0,0 @@
-%%
-%% %CopyrightBegin%
-%%
-%% Copyright Ericsson AB 2004-2010. All Rights Reserved.
-%%
-%% The contents of this file are subject to the Erlang Public License,
-%% Version 1.1, (the "License"); you may not use this file except in
-%% compliance with the License. You should have received a copy of the
-%% Erlang Public License along with this software. If not, it can be
-%% retrieved online at http://www.erlang.org/.
-%%
-%% Software distributed under the License is distributed on an "AS IS"
-%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
-%% the License for the specific language governing rights and limitations
-%% under the License.
-%%
-%% %CopyrightEnd%
-%%
-
-
--module(obsolete_SUITE).
--author('[email protected]').
--compile(nowarn_obsolete_guard).
-
--export([all/1]).
-
--export([erl_threads/1]).
-
--include("test_server.hrl").
-
--define(DEFAULT_TIMETRAP_SECS, 240).
-
-all(doc) -> [];
-all(suite) ->
- case catch erlang:system_info({wordsize,external}) of
- 4 -> [erl_threads];
- _ -> {skip, "Only expected to work on true 32-bit architectures"}
- end.
-
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-%% %%
-%% Testcases %%
-%% %%
-
-erl_threads(suite) -> [];
-erl_threads(doc) -> [];
-erl_threads(Cfg) ->
- ?line case erlang:system_info(threads) of
- true ->
- ?line drv_case(Cfg, erl_threads);
- false ->
- ?line {skip, "Emulator not compiled with threads support"}
- end.
-
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-%% %%
-%% Internal functions %%
-%% %%
-
-drv_case(Config, CaseName) ->
- drv_case(Config, CaseName, "").
-
-drv_case(Config, CaseName, TimeTrap) when integer(TimeTrap) ->
- drv_case(Config, CaseName, "", TimeTrap);
-drv_case(Config, CaseName, Command) when list(Command) ->
- drv_case(Config, CaseName, Command, ?DEFAULT_TIMETRAP_SECS).
-
-drv_case(Config, CaseName, TimeTrap, Command) when list(Command),
- integer(TimeTrap) ->
- drv_case(Config, CaseName, Command, TimeTrap);
-drv_case(Config, CaseName, Command, TimeTrap) when list(Config),
- atom(CaseName),
- list(Command),
- integer(TimeTrap) ->
- case ?t:os_type() of
- {Family, _} when Family == unix; Family == win32 ->
- ?line run_drv_case(Config, CaseName, Command, TimeTrap);
- SkipOs ->
- ?line {skipped,
- lists:flatten(["Not run on "
- | io_lib:format("~p",[SkipOs])])}
- end.
-
-run_drv_case(Config, CaseName, Command, TimeTrap) ->
- ?line Dog = test_server:timetrap(test_server:seconds(TimeTrap)),
- ?line DataDir = ?config(data_dir,Config),
- case erl_ddll:load_driver(DataDir, CaseName) of
- ok -> ok;
- {error, Error} ->
- io:format("~s\n", [erl_ddll:format_error(Error)]),
- ?line ?t:fail()
- end,
- ?line Port = open_port({spawn, atom_to_list(CaseName)}, []),
- ?line true = is_port(Port),
- ?line Port ! {self(), {command, Command}},
- ?line Result = receive_drv_result(Port, CaseName),
- ?line Port ! {self(), close},
- ?line receive
- {Port, closed} ->
- ok
- end,
- ?line ok = erl_ddll:unload_driver(CaseName),
- ?line test_server:timetrap_cancel(Dog),
- ?line Result.
-
-receive_drv_result(Port, CaseName) ->
- ?line receive
- {print, Port, CaseName, Str} ->
- ?line ?t:format("~s", [Str]),
- ?line receive_drv_result(Port, CaseName);
- {'EXIT', Port, Error} ->
- ?line ?t:fail(Error);
- {'EXIT', error, Error} ->
- ?line ?t:fail(Error);
- {failed, Port, CaseName, Comment} ->
- ?line ?t:fail(Comment);
- {skipped, Port, CaseName, Comment} ->
- ?line {skipped, Comment};
- {succeeded, Port, CaseName, ""} ->
- ?line succeeded;
- {succeeded, Port, CaseName, Comment} ->
- ?line {comment, Comment}
- end.
diff --git a/erts/emulator/test/obsolete_SUITE_data/Makefile.src b/erts/emulator/test/obsolete_SUITE_data/Makefile.src
deleted file mode 100644
index d8e2b861c0..0000000000
--- a/erts/emulator/test/obsolete_SUITE_data/Makefile.src
+++ /dev/null
@@ -1,33 +0,0 @@
-# ``The contents of this file are subject to the Erlang Public License,
-# Version 1.1, (the "License"); you may not use this file except in
-# compliance with the License. You should have received a copy of the
-# Erlang Public License along with this software. If not, it can be
-# retrieved via the world wide web at http://www.erlang.org/.
-#
-# Software distributed under the License is distributed on an "AS IS"
-# basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
-# the License for the specific language governing rights and limitations
-# under the License.
-#
-# The Initial Developer of the Original Code is Ericsson Utvecklings AB.
-# Portions created by Ericsson are Copyright 1999, Ericsson Utvecklings
-# AB. All Rights Reserved.''
-#
-# $Id$
-#
-
-TEST_DRVS = erl_threads@dll@
-CC = @CC@
-LD = @LD@
-CFLAGS = @SHLIB_CFLAGS@ -I@erl_include@ @DEFS@
-SHLIB_EXTRA_LDLIBS = testcase_driver@obj@
-
-all: $(TEST_DRVS)
-
-@SHLIB_RULES@
-
-testcase_driver@obj@: testcase_driver.c testcase_driver.h
-$(TEST_DRVS): testcase_driver@obj@
-
-
-
diff --git a/erts/emulator/test/obsolete_SUITE_data/erl_threads.c b/erts/emulator/test/obsolete_SUITE_data/erl_threads.c
deleted file mode 100644
index 27a5163121..0000000000
--- a/erts/emulator/test/obsolete_SUITE_data/erl_threads.c
+++ /dev/null
@@ -1,302 +0,0 @@
-/* ``The contents of this file are subject to the Erlang Public License,
- * Version 1.1, (the "License"); you may not use this file except in
- * compliance with the License. You should have received a copy of the
- * Erlang Public License along with this software. If not, it can be
- * retrieved via the world wide web at http://www.erlang.org/.
- *
- * Software distributed under the License is distributed on an "AS IS"
- * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
- * the License for the specific language governing rights and limitations
- * under the License.
- *
- * The Initial Developer of the Original Code is Ericsson Utvecklings AB.
- * Portions created by Ericsson are Copyright 1999, Ericsson Utvecklings
- * AB. All Rights Reserved.''
- *
- * $Id$
- */
-
-#include "testcase_driver.h"
-
-#ifndef __WIN32__
-
-#define NO_OF_THREADS 2
-
-#include <unistd.h>
-#include <errno.h>
-
-static int die;
-static int cw_passed;
-static int res_tf0;
-static int res_tf1;
-static erl_mutex_t mtx;
-static erl_cond_t cnd;
-static erl_thread_t tid[NO_OF_THREADS];
-static int need_join[NO_OF_THREADS];
-
-typedef struct {
- int n;
-} thr_arg_t;
-
-
-static void *tf0(void *vta)
-{
- int r;
-
- if (((thr_arg_t *) vta)->n != 0)
- goto fail;
-
- r = erts_mutex_lock(mtx);
- if (r != 0) {
- erts_mutex_unlock(mtx);
- goto fail;
- }
-
- r = erts_cond_wait(cnd, mtx);
- if (r != 0 || die) {
- erts_mutex_unlock(mtx);
- goto fail;
- }
-
- cw_passed++;
-
- r = erts_cond_wait(cnd, mtx);
- if (r != 0 || die) {
- erts_mutex_unlock(mtx);
- goto fail;
- }
-
- cw_passed++;
-
- r = erts_mutex_unlock(mtx);
- if (r != 0)
- goto fail;
-
- res_tf0 = 0;
-
- return (void *) &res_tf0;
-
- fail:
- return NULL;
-}
-
-
-static void *tf1(void *vta)
-{
- int r;
-
- if (((thr_arg_t *) vta)->n != 1)
- goto fail;
-
- r = erts_mutex_lock(mtx);
- if (r != 0) {
- erts_mutex_unlock(mtx);
- goto fail;
- }
-
- r = erts_cond_wait(cnd, mtx);
- if (r != 0 || die) {
- erts_mutex_unlock(mtx);
- goto fail;
- }
-
- cw_passed++;
-
- r = erts_cond_wait(cnd, mtx);
- if (r != 0 || die) {
- erts_mutex_unlock(mtx);
- goto fail;
- }
-
- cw_passed++;
-
- r = erts_mutex_unlock(mtx);
- if (r != 0)
- goto fail;
-
- res_tf1 = 1;
-
- erts_thread_exit((void *) &res_tf1);
-
- res_tf1 = 4711;
-
- fail:
- return NULL;
-}
-
-#endif /* #ifndef __WIN32__ */
-
-void
-testcase_run(TestCaseState_t *tcs)
-{
-#ifdef __WIN32__
- testcase_skipped(tcs, "Nothing to test; not supported on windows.");
-#else
- int i, r;
- void *tres[NO_OF_THREADS];
- thr_arg_t ta[NO_OF_THREADS];
- erl_thread_t t1;
-
- die = 0;
- cw_passed = 0;
-
- for (i = 0; i < NO_OF_THREADS; i++)
- need_join[i] = 0;
-
- res_tf0 = 17;
- res_tf1 = 17;
-
- cnd = mtx = NULL;
-
- /* Create mutex and cond */
- mtx = erts_mutex_create();
- ASSERT(tcs, mtx);
- cnd = erts_cond_create();
- ASSERT(tcs, cnd);
-
- /* Create the threads */
- ta[0].n = 0;
- r = erts_thread_create(&tid[0], tf0, (void *) &ta[0], 0);
- ASSERT(tcs, r == 0);
- need_join[0] = 1;
-
- ta[1].n = 1;
- r = erts_thread_create(&tid[1], tf1, (void *) &ta[1], 0);
- ASSERT(tcs, r == 0);
- need_join[1] = 1;
-
- /* Make sure the threads waits on cond wait */
- sleep(1);
-
- r = erts_mutex_lock(mtx);
- ASSERT_CLNUP(tcs, r == 0, (void) erts_mutex_unlock(mtx));
-
- ASSERT_CLNUP(tcs, cw_passed == 0, (void) erts_mutex_unlock(mtx));
-
-
- /* Let one thread pass one cond wait */
- r = erts_cond_signal(cnd);
- ASSERT_CLNUP(tcs, r == 0, (void) erts_mutex_unlock(mtx));
-
- r = erts_mutex_unlock(mtx);
- ASSERT(tcs, r == 0);
-
- sleep(1);
-
- r = erts_mutex_lock(mtx);
- ASSERT_CLNUP(tcs, r == 0, (void) erts_mutex_unlock(mtx));
-
- ASSERT_CLNUP(tcs, cw_passed == 1, (void) erts_mutex_unlock(mtx));
-
-
- /* Let both threads pass one cond wait */
- r = erts_cond_broadcast(cnd);
- ASSERT_CLNUP(tcs, r == 0, (void) erts_mutex_unlock(mtx));
-
- r = erts_mutex_unlock(mtx);
- ASSERT(tcs, r == 0);
-
- sleep(1);
-
- r = erts_mutex_lock(mtx);
- ASSERT_CLNUP(tcs, r == 0, (void) erts_mutex_unlock(mtx));
-
- ASSERT_CLNUP(tcs, cw_passed == 3, (void) erts_mutex_unlock(mtx));
-
-
- /* Let the thread that only have passed one cond wait pass the other one */
- r = erts_cond_signal(cnd);
- ASSERT_CLNUP(tcs, r == 0, (void) erts_mutex_unlock(mtx));
-
- r = erts_mutex_unlock(mtx);
- ASSERT(tcs, r == 0);
-
- sleep(1);
-
- r = erts_mutex_lock(mtx);
- ASSERT_CLNUP(tcs, r == 0, (void) erts_mutex_unlock(mtx));
-
- ASSERT_CLNUP(tcs, cw_passed == 4, (void) erts_mutex_unlock(mtx));
-
- /* Both threads should have passed both cond waits and exited;
- join them and check returned values */
-
- r = erts_thread_join(tid[0], &tres[0]);
- ASSERT_CLNUP(tcs, r == 0, (void) erts_mutex_unlock(mtx));
- need_join[0] = 0;
-
- ASSERT_CLNUP(tcs, tres[0] == &res_tf0, (void) erts_mutex_unlock(mtx));
- ASSERT_CLNUP(tcs, res_tf0 == 0, (void) erts_mutex_unlock(mtx));
-
- r = erts_thread_join(tid[1], &tres[1]);
- ASSERT_CLNUP(tcs, r == 0, (void) erts_mutex_unlock(mtx));
- need_join[1] = 0;
-
- ASSERT_CLNUP(tcs, tres[1] == &res_tf1, (void) erts_mutex_unlock(mtx));
- ASSERT_CLNUP(tcs, res_tf1 == 1, (void) erts_mutex_unlock(mtx));
-
- /* Test signaling when noone waits */
-
- r = erts_cond_signal(cnd);
- ASSERT_CLNUP(tcs, r == 0, (void) erts_mutex_unlock(mtx));
-
- /* Test broadcasting when noone waits */
-
- r = erts_cond_broadcast(cnd);
- ASSERT_CLNUP(tcs, r == 0, (void) erts_mutex_unlock(mtx));
-
- /* erts_cond_timedwait() not supported anymore */
- r = erts_cond_timedwait(cnd, mtx, 1000);
- ASSERT_CLNUP(tcs, r != 0, (void) erts_mutex_unlock(mtx));
- ASSERT_CLNUP(tcs,
- strcmp(erl_errno_id(r), "enotsup") == 0,
- (void) erts_mutex_unlock(mtx));
-
- r = erts_mutex_unlock(mtx);
- ASSERT(tcs, r == 0);
-
- r = erts_mutex_destroy(mtx);
- ASSERT(tcs, r == 0);
- mtx = NULL;
-
- r = erts_cond_destroy(cnd);
- ASSERT(tcs, r == 0);
- cnd = NULL;
-
- /* ... */
- t1 = erts_thread_self();
-
- if (cw_passed == 4711) {
- /* We don't want to execute this just check that the
- symbol/symbols is/are defined */
- erts_thread_kill(t1);
- }
-
-#endif /* #ifndef __WIN32__ */
-}
-
-char *
-testcase_name(void)
-{
- return "erl_threads";
-}
-
-void
-testcase_cleanup(TestCaseState_t *tcs)
-{
- int i;
- for (i = 0; i < NO_OF_THREADS; i++) {
- if (need_join[i]) {
- erts_mutex_lock(mtx);
- die = 1;
- erts_cond_broadcast(cnd);
- erts_mutex_unlock(mtx);
- erts_thread_join(tid[1], NULL);
- }
- }
- if (mtx)
- erts_mutex_destroy(mtx);
- if (cnd)
- erts_cond_destroy(cnd);
-}
-
diff --git a/erts/emulator/test/obsolete_SUITE_data/testcase_driver.c b/erts/emulator/test/obsolete_SUITE_data/testcase_driver.c
deleted file mode 100644
index 99d5adb041..0000000000
--- a/erts/emulator/test/obsolete_SUITE_data/testcase_driver.c
+++ /dev/null
@@ -1,262 +0,0 @@
-/* ``The contents of this file are subject to the Erlang Public License,
- * Version 1.1, (the "License"); you may not use this file except in
- * compliance with the License. You should have received a copy of the
- * Erlang Public License along with this software. If not, it can be
- * retrieved via the world wide web at http://www.erlang.org/.
- *
- * Software distributed under the License is distributed on an "AS IS"
- * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
- * the License for the specific language governing rights and limitations
- * under the License.
- *
- * The Initial Developer of the Original Code is Ericsson Utvecklings AB.
- * Portions created by Ericsson are Copyright 1999, Ericsson Utvecklings
- * AB. All Rights Reserved.''
- *
- * $Id$
- */
-
-#include "testcase_driver.h"
-#include <stdio.h>
-#include <stdlib.h>
-#include <stdarg.h>
-#include <setjmp.h>
-#include <string.h>
-
-#ifdef __WIN32__
-#undef HAVE_VSNPRINTF
-#define HAVE_VSNPRINTF 1
-#define vsnprintf _vsnprintf
-#endif
-
-#ifndef HAVE_VSNPRINTF
-#define HAVE_VSNPRINTF 0
-#endif
-
-#define COMMENT_BUF_SZ 4096
-
-#define TESTCASE_FAILED 0
-#define TESTCASE_SKIPPED 1
-#define TESTCASE_SUCCEEDED 2
-
-typedef struct {
- TestCaseState_t visible;
- int port;
- int result;
- jmp_buf done_jmp_buf;
- char *comment;
- char comment_buf[COMMENT_BUF_SZ];
-} InternalTestCaseState_t;
-
-long testcase_drv_start(int port, char *command);
-int testcase_drv_stop(long drv_data);
-int testcase_drv_run(long drv_data, char *buf, int len);
-
-static DriverEntry testcase_drv_entry = {
- NULL,
- testcase_drv_start,
- testcase_drv_stop,
- testcase_drv_run
-};
-
-
-int DRIVER_INIT(testcase_drv)(void *arg)
-{
- testcase_drv_entry.driver_name = testcase_name();
- return (int) &testcase_drv_entry;
-}
-
-long
-testcase_drv_start(int port, char *command)
-{
- InternalTestCaseState_t *itcs = (InternalTestCaseState_t *)
- driver_alloc(sizeof(InternalTestCaseState_t));
- if (!itcs) {
- return -1;
- }
-
- itcs->visible.testcase_name = testcase_name();
- itcs->visible.extra = NULL;
- itcs->port = port;
- itcs->result = TESTCASE_FAILED;
- itcs->comment = "";
-
- return (long) itcs;
-}
-
-int
-testcase_drv_stop(long drv_data)
-{
- testcase_cleanup((TestCaseState_t *) drv_data);
- driver_free((void *) drv_data);
- return 0;
-}
-
-int
-testcase_drv_run(long drv_data, char *buf, int len)
-{
- InternalTestCaseState_t *itcs = (InternalTestCaseState_t *) drv_data;
- DriverTermData result_atom;
- DriverTermData msg[12];
-
- itcs->visible.command = buf;
- itcs->visible.command_len = len;
-
- if (setjmp(itcs->done_jmp_buf) == 0) {
- testcase_run((TestCaseState_t *) itcs);
- itcs->result = TESTCASE_SUCCEEDED;
- }
-
- switch (itcs->result) {
- case TESTCASE_SUCCEEDED:
- result_atom = driver_mk_atom("succeeded");
- break;
- case TESTCASE_SKIPPED:
- result_atom = driver_mk_atom("skipped");
- break;
- case TESTCASE_FAILED:
- default:
- result_atom = driver_mk_atom("failed");
- break;
- }
-
- msg[0] = ERL_DRV_ATOM;
- msg[1] = (DriverTermData) result_atom;
-
- msg[2] = ERL_DRV_PORT;
- msg[3] = driver_mk_port(itcs->port);
-
- msg[4] = ERL_DRV_ATOM;
- msg[5] = driver_mk_atom(itcs->visible.testcase_name);
-
- msg[6] = ERL_DRV_STRING;
- msg[7] = (DriverTermData) itcs->comment;
- msg[8] = (DriverTermData) strlen(itcs->comment);
-
- msg[9] = ERL_DRV_TUPLE;
- msg[10] = (DriverTermData) 4;
-
- driver_output_term(itcs->port, msg, 11);
- return 0;
-}
-
-int
-testcase_assertion_failed(TestCaseState_t *tcs,
- char *file, int line, char *assertion)
-{
- testcase_failed(tcs, "%s:%d: Assertion failed: \"%s\"",
- file, line, assertion);
- return 0;
-}
-
-void
-testcase_printf(TestCaseState_t *tcs, char *frmt, ...)
-{
- InternalTestCaseState_t *itcs = (InternalTestCaseState_t *) tcs;
- DriverTermData msg[12];
- va_list va;
- va_start(va, frmt);
-#if HAVE_VSNPRINTF
- vsnprintf(itcs->comment_buf, COMMENT_BUF_SZ, frmt, va);
-#else
- vsprintf(itcs->comment_buf, frmt, va);
-#endif
- va_end(va);
-
- msg[0] = ERL_DRV_ATOM;
- msg[1] = (DriverTermData) driver_mk_atom("print");
-
- msg[2] = ERL_DRV_PORT;
- msg[3] = driver_mk_port(itcs->port);
-
- msg[4] = ERL_DRV_ATOM;
- msg[5] = driver_mk_atom(itcs->visible.testcase_name);
-
- msg[6] = ERL_DRV_STRING;
- msg[7] = (DriverTermData) itcs->comment_buf;
- msg[8] = (DriverTermData) strlen(itcs->comment_buf);
-
- msg[9] = ERL_DRV_TUPLE;
- msg[10] = (DriverTermData) 4;
-
- driver_output_term(itcs->port, msg, 11);
-}
-
-
-void testcase_succeeded(TestCaseState_t *tcs, char *frmt, ...)
-{
- InternalTestCaseState_t *itcs = (InternalTestCaseState_t *) tcs;
- va_list va;
- va_start(va, frmt);
-#if HAVE_VSNPRINTF
- vsnprintf(itcs->comment_buf, COMMENT_BUF_SZ, frmt, va);
-#else
- vsprintf(itcs->comment_buf, frmt, va);
-#endif
- va_end(va);
-
- itcs->result = TESTCASE_SUCCEEDED;
- itcs->comment = itcs->comment_buf;
-
- longjmp(itcs->done_jmp_buf, 1);
-}
-
-void testcase_skipped(TestCaseState_t *tcs, char *frmt, ...)
-{
- InternalTestCaseState_t *itcs = (InternalTestCaseState_t *) tcs;
- va_list va;
- va_start(va, frmt);
-#if HAVE_VSNPRINTF
- vsnprintf(itcs->comment_buf, COMMENT_BUF_SZ, frmt, va);
-#else
- vsprintf(itcs->comment_buf, frmt, va);
-#endif
- va_end(va);
-
- itcs->result = TESTCASE_SKIPPED;
- itcs->comment = itcs->comment_buf;
-
- longjmp(itcs->done_jmp_buf, 1);
-}
-
-void testcase_failed(TestCaseState_t *tcs, char *frmt, ...)
-{
- InternalTestCaseState_t *itcs = (InternalTestCaseState_t *) tcs;
- char buf[10];
- size_t bufsz = sizeof(buf);
- va_list va;
- va_start(va, frmt);
-#if HAVE_VSNPRINTF
- vsnprintf(itcs->comment_buf, COMMENT_BUF_SZ, frmt, va);
-#else
- vsprintf(itcs->comment_buf, frmt, va);
-#endif
- va_end(va);
-
- itcs->result = TESTCASE_FAILED;
- itcs->comment = itcs->comment_buf;
-
- if (erl_drv_getenv("ERL_ABORT_ON_FAILURE", buf, &bufsz) == 0
- && strcmp("true", buf) == 0) {
- fprintf(stderr, "Testcase \"%s\" failed: %s\n",
- itcs->visible.testcase_name, itcs->comment);
- abort();
- }
-
- longjmp(itcs->done_jmp_buf, 1);
-}
-
-void *testcase_alloc(size_t size)
-{
- return driver_alloc(size);
-}
-
-void *testcase_realloc(void *ptr, size_t size)
-{
- return driver_realloc(ptr, size);
-}
-
-void testcase_free(void *ptr)
-{
- driver_free(ptr);
-}
diff --git a/erts/emulator/test/obsolete_SUITE_data/testcase_driver.h b/erts/emulator/test/obsolete_SUITE_data/testcase_driver.h
deleted file mode 100644
index 3d85ca6df0..0000000000
--- a/erts/emulator/test/obsolete_SUITE_data/testcase_driver.h
+++ /dev/null
@@ -1,57 +0,0 @@
-/* ``The contents of this file are subject to the Erlang Public License,
- * Version 1.1, (the "License"); you may not use this file except in
- * compliance with the License. You should have received a copy of the
- * Erlang Public License along with this software. If not, it can be
- * retrieved via the world wide web at http://www.erlang.org/.
- *
- * Software distributed under the License is distributed on an "AS IS"
- * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
- * the License for the specific language governing rights and limitations
- * under the License.
- *
- * The Initial Developer of the Original Code is Ericsson Utvecklings AB.
- * Portions created by Ericsson are Copyright 1999, Ericsson Utvecklings
- * AB. All Rights Reserved.''
- *
- * $Id$
- */
-
-#ifndef TESTCASE_DRIVER_H__
-#define TESTCASE_DRIVER_H__
-
-#include "obsolete/driver.h"
-#include <stdlib.h>
-
-typedef struct {
- char *testcase_name;
- char *command;
- int command_len;
- void *extra;
-} TestCaseState_t;
-
-#define ASSERT_CLNUP(TCS, B, CLN) \
-do { \
- if (!(B)) { \
- CLN; \
- testcase_assertion_failed((TCS), __FILE__, __LINE__, #B); \
- } \
-} while (0)
-
-#define ASSERT(TCS, B) ASSERT_CLNUP(TCS, B, (void) 0)
-
-void testcase_printf(TestCaseState_t *tcs, char *frmt, ...);
-void testcase_succeeded(TestCaseState_t *tcs, char *frmt, ...);
-void testcase_skipped(TestCaseState_t *tcs, char *frmt, ...);
-void testcase_failed(TestCaseState_t *tcs, char *frmt, ...);
-int testcase_assertion_failed(TestCaseState_t *tcs, char *file, int line,
- char *assertion);
-void *testcase_alloc(size_t size);
-void *testcase_realloc(void *ptr, size_t size);
-void testcase_free(void *ptr);
-
-
-char *testcase_name(void);
-void testcase_run(TestCaseState_t *tcs);
-void testcase_cleanup(TestCaseState_t *tcs);
-
-#endif
diff --git a/erts/emulator/test/scheduler_SUITE.erl b/erts/emulator/test/scheduler_SUITE.erl
index ab727b7cb1..4f4458802c 100644
--- a/erts/emulator/test/scheduler_SUITE.erl
+++ b/erts/emulator/test/scheduler_SUITE.erl
@@ -33,7 +33,7 @@
-include("test_server.hrl").
%-compile(export_all).
--export([all/1, init_per_testcase/2, fin_per_testcase/2]).
+-export([all/1, init_per_testcase/2, fin_per_testcase/2, end_per_suite/1]).
-export([equal/1,
few_low/1,
@@ -49,7 +49,8 @@
cpu_topology/1,
sct_cmd/1,
sbt_cmd/1,
- scheduler_suspend/1]).
+ scheduler_suspend/1,
+ reader_groups/1]).
-define(DEFAULT_TIMEOUT, ?t:minutes(10)).
@@ -67,7 +68,8 @@ all(suite) ->
equal_with_high_max,
bound_process,
scheduler_bind,
- scheduler_suspend].
+ scheduler_suspend,
+ reader_groups].
init_per_testcase(Case, Config) when is_list(Config) ->
Dog = ?t:timetrap(?DEFAULT_TIMEOUT),
@@ -81,6 +83,10 @@ fin_per_testcase(_Case, Config) when is_list(Config) ->
?t:timetrap_cancel(Dog),
ok.
+end_per_suite(Config) ->
+ catch erts_debug:set_internal_state(available_internal_state, false),
+ Config.
+
-define(ERTS_RUNQ_CHECK_BALANCE_REDS_PER_SCHED, (2000*2000)).
-define(DEFAULT_TEST_REDS_PER_SCHED, 200000000).
@@ -965,12 +971,361 @@ sst3_loop(S, N) ->
erlang:system_flag(schedulers_online, 1),
erlang:system_flag(schedulers_online, S),
sst3_loop(S, N-1).
+
+reader_groups(Config) when is_list(Config) ->
+ %% White box testing. These results are correct, but other results
+ %% could be too...
+
+ %% The actual tilepro64 topology
+ CPUT0 = [{processor,[{node,[{core,{logical,0}},
+ {core,{logical,1}},
+ {core,{logical,2}},
+ {core,{logical,8}},
+ {core,{logical,9}},
+ {core,{logical,10}},
+ {core,{logical,11}},
+ {core,{logical,16}},
+ {core,{logical,17}},
+ {core,{logical,18}},
+ {core,{logical,19}},
+ {core,{logical,24}},
+ {core,{logical,25}},
+ {core,{logical,27}},
+ {core,{logical,29}}]},
+ {node,[{core,{logical,3}},
+ {core,{logical,4}},
+ {core,{logical,5}},
+ {core,{logical,6}},
+ {core,{logical,7}},
+ {core,{logical,12}},
+ {core,{logical,13}},
+ {core,{logical,14}},
+ {core,{logical,15}},
+ {core,{logical,20}},
+ {core,{logical,21}},
+ {core,{logical,22}},
+ {core,{logical,23}},
+ {core,{logical,28}},
+ {core,{logical,30}}]},
+ {node,[{core,{logical,31}},
+ {core,{logical,36}},
+ {core,{logical,37}},
+ {core,{logical,38}},
+ {core,{logical,44}},
+ {core,{logical,45}},
+ {core,{logical,46}},
+ {core,{logical,47}},
+ {core,{logical,51}},
+ {core,{logical,52}},
+ {core,{logical,53}},
+ {core,{logical,54}},
+ {core,{logical,55}},
+ {core,{logical,60}},
+ {core,{logical,61}}]},
+ {node,[{core,{logical,26}},
+ {core,{logical,32}},
+ {core,{logical,33}},
+ {core,{logical,34}},
+ {core,{logical,35}},
+ {core,{logical,39}},
+ {core,{logical,40}},
+ {core,{logical,41}},
+ {core,{logical,42}},
+ {core,{logical,43}},
+ {core,{logical,48}},
+ {core,{logical,49}},
+ {core,{logical,50}},
+ {core,{logical,58}}]}]}],
+
+ ?line [{0,1},{1,1},{2,1},{3,3},{4,3},{5,3},{6,3},{7,3},{8,1},{9,1},{10,1},
+ {11,1},{12,3},{13,3},{14,4},{15,4},{16,2},{17,2},{18,2},{19,2},
+ {20,4},{21,4},{22,4},{23,4},{24,2},{25,2},{26,7},{27,2},{28,4},
+ {29,2},{30,4},{31,5},{32,7},{33,7},{34,7},{35,7},{36,5},{37,5},
+ {38,5},{39,7},{40,7},{41,8},{42,8},{43,8},{44,5},{45,5},{46,5},
+ {47,6},{48,8},{49,8},{50,8},{51,6},{52,6},{53,6},{54,6},{55,6},
+ {58,8},{60,6},{61,6}]
+ = reader_groups_map(CPUT0, 8),
+
+ CPUT1 = [n([p([c([t(l(0)),t(l(1)),t(l(2)),t(l(3))]),
+ c([t(l(4)),t(l(5)),t(l(6)),t(l(7))]),
+ c([t(l(8)),t(l(9)),t(l(10)),t(l(11))]),
+ c([t(l(12)),t(l(13)),t(l(14)),t(l(15))])]),
+ p([c([t(l(16)),t(l(17)),t(l(18)),t(l(19))]),
+ c([t(l(20)),t(l(21)),t(l(22)),t(l(23))]),
+ c([t(l(24)),t(l(25)),t(l(26)),t(l(27))]),
+ c([t(l(28)),t(l(29)),t(l(30)),t(l(31))])])]),
+ n([p([c([t(l(32)),t(l(33)),t(l(34)),t(l(35))]),
+ c([t(l(36)),t(l(37)),t(l(38)),t(l(39))]),
+ c([t(l(40)),t(l(41)),t(l(42)),t(l(43))]),
+ c([t(l(44)),t(l(45)),t(l(46)),t(l(47))])]),
+ p([c([t(l(48)),t(l(49)),t(l(50)),t(l(51))]),
+ c([t(l(52)),t(l(53)),t(l(54)),t(l(55))]),
+ c([t(l(56)),t(l(57)),t(l(58)),t(l(59))]),
+ c([t(l(60)),t(l(61)),t(l(62)),t(l(63))])])]),
+ n([p([c([t(l(64)),t(l(65)),t(l(66)),t(l(67))]),
+ c([t(l(68)),t(l(69)),t(l(70)),t(l(71))]),
+ c([t(l(72)),t(l(73)),t(l(74)),t(l(75))]),
+ c([t(l(76)),t(l(77)),t(l(78)),t(l(79))])]),
+ p([c([t(l(80)),t(l(81)),t(l(82)),t(l(83))]),
+ c([t(l(84)),t(l(85)),t(l(86)),t(l(87))]),
+ c([t(l(88)),t(l(89)),t(l(90)),t(l(91))]),
+ c([t(l(92)),t(l(93)),t(l(94)),t(l(95))])])]),
+ n([p([c([t(l(96)),t(l(97)),t(l(98)),t(l(99))]),
+ c([t(l(100)),t(l(101)),t(l(102)),t(l(103))]),
+ c([t(l(104)),t(l(105)),t(l(106)),t(l(107))]),
+ c([t(l(108)),t(l(109)),t(l(110)),t(l(111))])]),
+ p([c([t(l(112)),t(l(113)),t(l(114)),t(l(115))]),
+ c([t(l(116)),t(l(117)),t(l(118)),t(l(119))]),
+ c([t(l(120)),t(l(121)),t(l(122)),t(l(123))]),
+ c([t(l(124)),t(l(125)),t(l(126)),t(l(127))])])])],
+
+ ?line [{0,1},{1,1},{2,1},{3,1},{4,2},{5,2},{6,2},{7,2},{8,3},{9,3},
+ {10,3},{11,3},{12,4},{13,4},{14,4},{15,4},{16,5},{17,5},{18,5},
+ {19,5},{20,6},{21,6},{22,6},{23,6},{24,7},{25,7},{26,7},{27,7},
+ {28,8},{29,8},{30,8},{31,8},{32,9},{33,9},{34,9},{35,9},{36,10},
+ {37,10},{38,10},{39,10},{40,11},{41,11},{42,11},{43,11},{44,12},
+ {45,12},{46,12},{47,12},{48,13},{49,13},{50,13},{51,13},{52,14},
+ {53,14},{54,14},{55,14},{56,15},{57,15},{58,15},{59,15},{60,16},
+ {61,16},{62,16},{63,16},{64,17},{65,17},{66,17},{67,17},{68,18},
+ {69,18},{70,18},{71,18},{72,19},{73,19},{74,19},{75,19},{76,20},
+ {77,20},{78,20},{79,20},{80,21},{81,21},{82,21},{83,21},{84,22},
+ {85,22},{86,22},{87,22},{88,23},{89,23},{90,23},{91,23},{92,24},
+ {93,24},{94,24},{95,24},{96,25},{97,25},{98,25},{99,25},{100,26},
+ {101,26},{102,26},{103,26},{104,27},{105,27},{106,27},{107,27},
+ {108,28},{109,28},{110,28},{111,28},{112,29},{113,29},{114,29},
+ {115,29},{116,30},{117,30},{118,30},{119,30},{120,31},{121,31},
+ {122,31},{123,31},{124,32},{125,32},{126,32},{127,32}]
+ = reader_groups_map(CPUT1, 128),
+
+ ?line [{0,1},{1,1},{2,1},{3,1},{4,1},{5,1},{6,1},{7,1},{8,1},{9,1},{10,1},
+ {11,1},{12,1},{13,1},{14,1},{15,1},{16,1},{17,1},{18,1},{19,1},
+ {20,1},{21,1},{22,1},{23,1},{24,1},{25,1},{26,1},{27,1},{28,1},
+ {29,1},{30,1},{31,1},{32,1},{33,1},{34,1},{35,1},{36,1},{37,1},
+ {38,1},{39,1},{40,1},{41,1},{42,1},{43,1},{44,1},{45,1},{46,1},
+ {47,1},{48,1},{49,1},{50,1},{51,1},{52,1},{53,1},{54,1},{55,1},
+ {56,1},{57,1},{58,1},{59,1},{60,1},{61,1},{62,1},{63,1},{64,2},
+ {65,2},{66,2},{67,2},{68,2},{69,2},{70,2},{71,2},{72,2},{73,2},
+ {74,2},{75,2},{76,2},{77,2},{78,2},{79,2},{80,2},{81,2},{82,2},
+ {83,2},{84,2},{85,2},{86,2},{87,2},{88,2},{89,2},{90,2},{91,2},
+ {92,2},{93,2},{94,2},{95,2},{96,2},{97,2},{98,2},{99,2},{100,2},
+ {101,2},{102,2},{103,2},{104,2},{105,2},{106,2},{107,2},{108,2},
+ {109,2},{110,2},{111,2},{112,2},{113,2},{114,2},{115,2},{116,2},
+ {117,2},{118,2},{119,2},{120,2},{121,2},{122,2},{123,2},{124,2},
+ {125,2},{126,2},{127,2}]
+ = reader_groups_map(CPUT1, 2),
+
+ ?line [{0,1},{1,1},{2,1},{3,1},{4,2},{5,2},{6,2},{7,2},{8,3},{9,3},{10,3},
+ {11,3},{12,3},{13,3},{14,3},{15,3},{16,4},{17,4},{18,4},{19,4},
+ {20,4},{21,4},{22,4},{23,4},{24,5},{25,5},{26,5},{27,5},{28,5},
+ {29,5},{30,5},{31,5},{32,6},{33,6},{34,6},{35,6},{36,6},{37,6},
+ {38,6},{39,6},{40,7},{41,7},{42,7},{43,7},{44,7},{45,7},{46,7},
+ {47,7},{48,8},{49,8},{50,8},{51,8},{52,8},{53,8},{54,8},{55,8},
+ {56,9},{57,9},{58,9},{59,9},{60,9},{61,9},{62,9},{63,9},{64,10},
+ {65,10},{66,10},{67,10},{68,10},{69,10},{70,10},{71,10},{72,11},
+ {73,11},{74,11},{75,11},{76,11},{77,11},{78,11},{79,11},{80,12},
+ {81,12},{82,12},{83,12},{84,12},{85,12},{86,12},{87,12},{88,13},
+ {89,13},{90,13},{91,13},{92,13},{93,13},{94,13},{95,13},{96,14},
+ {97,14},{98,14},{99,14},{100,14},{101,14},{102,14},{103,14},
+ {104,15},{105,15},{106,15},{107,15},{108,15},{109,15},{110,15},
+ {111,15},{112,16},{113,16},{114,16},{115,16},{116,16},{117,16},
+ {118,16},{119,16},{120,17},{121,17},{122,17},{123,17},{124,17},
+ {125,17},{126,17},{127,17}]
+ = reader_groups_map(CPUT1, 17),
+
+ ?line [{0,1},{1,1},{2,1},{3,1},{4,1},{5,1},{6,1},{7,1},{8,1},{9,1},{10,1},
+ {11,1},{12,1},{13,1},{14,1},{15,1},{16,2},{17,2},{18,2},{19,2},
+ {20,2},{21,2},{22,2},{23,2},{24,2},{25,2},{26,2},{27,2},{28,2},
+ {29,2},{30,2},{31,2},{32,3},{33,3},{34,3},{35,3},{36,3},{37,3},
+ {38,3},{39,3},{40,3},{41,3},{42,3},{43,3},{44,3},{45,3},{46,3},
+ {47,3},{48,4},{49,4},{50,4},{51,4},{52,4},{53,4},{54,4},{55,4},
+ {56,4},{57,4},{58,4},{59,4},{60,4},{61,4},{62,4},{63,4},{64,5},
+ {65,5},{66,5},{67,5},{68,5},{69,5},{70,5},{71,5},{72,5},{73,5},
+ {74,5},{75,5},{76,5},{77,5},{78,5},{79,5},{80,6},{81,6},{82,6},
+ {83,6},{84,6},{85,6},{86,6},{87,6},{88,6},{89,6},{90,6},{91,6},
+ {92,6},{93,6},{94,6},{95,6},{96,7},{97,7},{98,7},{99,7},{100,7},
+ {101,7},{102,7},{103,7},{104,7},{105,7},{106,7},{107,7},{108,7},
+ {109,7},{110,7},{111,7},{112,7},{113,7},{114,7},{115,7},{116,7},
+ {117,7},{118,7},{119,7},{120,7},{121,7},{122,7},{123,7},{124,7},
+ {125,7},{126,7},{127,7}]
+ = reader_groups_map(CPUT1, 7),
+
+ ?line CPUT2 = [p([c(l(0)),c(l(1)),c(l(2)),c(l(3)),c(l(4))]),
+ p([t(l(5)),t(l(6)),t(l(7)),t(l(8)),t(l(9))]),
+ p([t(l(10))]),
+ p([c(l(11)),c(l(12)),c(l(13))]),
+ p([c(l(14)),c(l(15))])],
+
+ ?line [{0,1},{1,1},{2,1},{3,1},{4,1},
+ {5,2},{6,2},{7,2},{8,2},{9,2},
+ {10,3},
+ {11,4},{12,4},{13,4},
+ {14,5},{15,5}] = reader_groups_map(CPUT2, 5),
+
+
+ ?line [{0,1},{1,1},{2,2},{3,2},{4,2},
+ {5,3},{6,3},{7,3},{8,3},{9,3},
+ {10,4},
+ {11,5},{12,5},{13,5},
+ {14,6},{15,6}] = reader_groups_map(CPUT2, 6),
+
+ ?line [{0,1},{1,1},{2,2},{3,2},{4,2},
+ {5,3},{6,3},{7,3},{8,3},{9,3},
+ {10,4},
+ {11,5},{12,6},{13,6},
+ {14,7},{15,7}] = reader_groups_map(CPUT2, 7),
+
+ ?line [{0,1},{1,1},{2,2},{3,2},{4,2},
+ {5,3},{6,3},{7,3},{8,3},{9,3},
+ {10,4},
+ {11,5},{12,6},{13,6},
+ {14,7},{15,8}] = reader_groups_map(CPUT2, 8),
+
+ ?line [{0,1},{1,2},{2,2},{3,3},{4,3},
+ {5,4},{6,4},{7,4},{8,4},{9,4},
+ {10,5},
+ {11,6},{12,7},{13,7},
+ {14,8},{15,9}] = reader_groups_map(CPUT2, 9),
+
+ ?line [{0,1},{1,2},{2,2},{3,3},{4,3},
+ {5,4},{6,4},{7,4},{8,4},{9,4},
+ {10,5},
+ {11,6},{12,7},{13,8},
+ {14,9},{15,10}] = reader_groups_map(CPUT2, 10),
+
+ ?line [{0,1},{1,2},{2,3},{3,4},{4,4},
+ {5,5},{6,5},{7,5},{8,5},{9,5},
+ {10,6},
+ {11,7},{12,8},{13,9},
+ {14,10},{15,11}] = reader_groups_map(CPUT2, 11),
+
+ ?line [{0,1},{1,2},{2,3},{3,4},{4,5},
+ {5,6},{6,6},{7,6},{8,6},{9,6},
+ {10,7},
+ {11,8},{12,9},{13,10},
+ {14,11},{15,12}] = reader_groups_map(CPUT2, 100),
+
+ CPUT3 = [p([t(l(5)),t(l(6)),t(l(7)),t(l(8)),t(l(9))]),
+ p([t(l(10))]),
+ p([c(l(11)),c(l(12)),c(l(13))]),
+ p([c(l(14)),c(l(15))]),
+ p([c(l(0)),c(l(1)),c(l(2)),c(l(3)),c(l(4))])],
+
+ ?line [{0,5},{1,5},{2,6},{3,6},{4,6},
+ {5,1},{6,1},{7,1},{8,1},{9,1},
+ {10,2},{11,3},{12,3},{13,3},
+ {14,4},{15,4}] = reader_groups_map(CPUT3, 6),
+
+ CPUT4 = [p([t(l(0)),t(l(1)),t(l(2)),t(l(3)),t(l(4))]),
+ p([t(l(5))]),
+ p([c(l(6)),c(l(7)),c(l(8))]),
+ p([c(l(9)),c(l(10))]),
+ p([c(l(11)),c(l(12)),c(l(13)),c(l(14)),c(l(15))])],
+
+ ?line [{0,1},{1,1},{2,1},{3,1},{4,1},
+ {5,2},
+ {6,3},{7,3},{8,3},
+ {9,4},{10,4},
+ {11,5},{12,5},{13,6},{14,6},{15,6}] = reader_groups_map(CPUT4, 6),
+
+ ?line [{0,1},{1,1},{2,1},{3,1},{4,1},
+ {5,2},
+ {6,3},{7,4},{8,4},
+ {9,5},{10,5},
+ {11,6},{12,6},{13,7},{14,7},{15,7}] = reader_groups_map(CPUT4, 7),
+
+ ?line [{0,1},{65535,2}] = reader_groups_map([c(l(0)),c(l(65535))], 10),
+
+ ?line ok.
+reader_groups_map(CPUT, Groups) ->
+ Old = erlang:system_info({cpu_topology, defined}),
+ erlang:system_flag(cpu_topology, CPUT),
+ enable_internal_state(),
+ Res = erts_debug:get_internal_state({reader_groups_map, Groups}),
+ erlang:system_flag(cpu_topology, Old),
+ lists:sort(Res).
+
%%
%% Utils
%%
+tilera_cpu_topology() ->
+ [{processor,[{node,[{core,{logical,0}},
+ {core,{logical,1}},
+ {core,{logical,2}},
+ {core,{logical,8}},
+ {core,{logical,9}},
+ {core,{logical,10}},
+ {core,{logical,11}},
+ {core,{logical,16}},
+ {core,{logical,17}},
+ {core,{logical,18}},
+ {core,{logical,19}},
+ {core,{logical,24}},
+ {core,{logical,25}},
+ {core,{logical,27}},
+ {core,{logical,29}}]},
+ {node,[{core,{logical,3}},
+ {core,{logical,4}},
+ {core,{logical,5}},
+ {core,{logical,6}},
+ {core,{logical,7}},
+ {core,{logical,12}},
+ {core,{logical,13}},
+ {core,{logical,14}},
+ {core,{logical,15}},
+ {core,{logical,20}},
+ {core,{logical,21}},
+ {core,{logical,22}},
+ {core,{logical,23}},
+ {core,{logical,28}},
+ {core,{logical,30}}]},
+ {node,[{core,{logical,31}},
+ {core,{logical,36}},
+ {core,{logical,37}},
+ {core,{logical,38}},
+ {core,{logical,44}},
+ {core,{logical,45}},
+ {core,{logical,46}},
+ {core,{logical,47}},
+ {core,{logical,51}},
+ {core,{logical,52}},
+ {core,{logical,53}},
+ {core,{logical,54}},
+ {core,{logical,55}},
+ {core,{logical,60}},
+ {core,{logical,61}}]},
+ {node,[{core,{logical,26}},
+ {core,{logical,32}},
+ {core,{logical,33}},
+ {core,{logical,34}},
+ {core,{logical,35}},
+ {core,{logical,39}},
+ {core,{logical,40}},
+ {core,{logical,41}},
+ {core,{logical,42}},
+ {core,{logical,43}},
+ {core,{logical,48}},
+ {core,{logical,49}},
+ {core,{logical,50}},
+ {core,{logical,58}}]}]}].
+
+l(Id) ->
+ {logical, Id}.
+
+t(X) ->
+ {thread, X}.
+
+c(X) ->
+ {core, X}.
+
+p(X) ->
+ {processor, X}.
+
+n(X) ->
+ {node, X}.
+
mcall(Node, Funs) ->
Parent = self(),
Refs = lists:map(fun (Fun) ->
diff --git a/erts/emulator/test/send_term_SUITE.erl b/erts/emulator/test/send_term_SUITE.erl
index 489adbd660..5fd01a9ac5 100644
--- a/erts/emulator/test/send_term_SUITE.erl
+++ b/erts/emulator/test/send_term_SUITE.erl
@@ -76,40 +76,43 @@ basic(Config) when is_list(Config) ->
?line ExpectedBinTup = term(P, 7),
%% single terms
- ?line [] = term(P, 8), % ERL_DRV_NIL
- ?line '' = term(P, 9), % ERL_DRV_ATOM
- ?line an_atom = term(P, 10), % ERL_DRV_ATOM
- ?line -4711 = term(P, 11), % ERL_DRV_INT
- ?line 4711 = term(P, 12), % ERL_DRV_UINT
- ?line P = term(P, 13), % ERL_DRV_PORT
- ?line <<>> = term(P, 14), % ERL_DRV_BINARY
- ?line <<"hejsan">> = term(P, 15), % ERL_DRV_BINARY
- ?line <<>> = term(P, 16), % ERL_DRV_BUF2BINARY
- ?line <<>> = term(P, 17), % ERL_DRV_BUF2BINARY
- ?line <<"hoppsan">> = term(P, 18), % ERL_DRV_BUF2BINARY
- ?line "" = term(P, 19), % ERL_DRV_STRING
- ?line "" = term(P, 20), % ERL_DRV_STRING
- ?line "hippsan" = term(P, 21), % ERL_DRV_STRING
- ?line {} = term(P, 22), % ERL_DRV_TUPLE
- ?line [] = term(P, 23), % ERL_DRV_LIST
- ?line Self = term(P, 24), % ERL_DRV_PID
- ?line [] = term(P, 25), % ERL_DRV_STRING_CONS
- ?line AFloat = term(P, 26), % ERL_DRV_FLOAT
+ Singles = [{[], 8}, % ERL_DRV_NIL
+ {'', 9}, % ERL_DRV_ATOM
+ {an_atom, 10}, % ERL_DRV_ATOM
+ {-4711, 11}, % ERL_DRV_INT
+ {4711, 12}, % ERL_DRV_UINT
+ {P, 13}, % ERL_DRV_PORT
+ {<<>>, 14}, % ERL_DRV_BINARY
+ {<<"hejsan">>, 15}, % ERL_DRV_BINARY
+ {<<>>, 16}, % ERL_DRV_BUF2BINARY
+ {<<>>, 17}, % ERL_DRV_BUF2BINARY
+ {<<"hoppsan">>, 18}, % ERL_DRV_BUF2BINARY
+ {"", 19}, % ERL_DRV_STRING
+ {"", 20}, % ERL_DRV_STRING
+ {"hippsan", 21}, % ERL_DRV_STRING
+ {{}, 22}, % ERL_DRV_TUPLE
+ {[], 23}, % ERL_DRV_LIST
+ {Self, 24}, % ERL_DRV_PID
+ {[], 25}, % ERL_DRV_STRING_CONS
+ {[], 27}, % ERL_DRV_EXT2TERM
+ {18446744073709551615, 28}, % ERL_DRV_UINT64
+ {20233590931456, 29}, % ERL_DRV_UINT64
+ {4711, 30}, % ERL_DRV_UINT64
+ {0, 31}, % ERL_DRV_UINT64
+ {9223372036854775807, 32}, % ERL_DRV_INT64
+ {20233590931456, 33}, % ERL_DRV_INT64
+ {4711, 34}, % ERL_DRV_INT64
+ {0, 35}, % ERL_DRV_INT64
+ {-1, 36}, % ERL_DRV_INT64
+ {-4711, 37}, % ERL_DRV_INT64
+ {-20233590931456, 38}, % ERL_DRV_INT64
+ {-9223372036854775808, 39}], % ERL_DRV_INT64
+ ?line {Terms, Ops} = lists:unzip(Singles),
+ ?line Terms = term(P,Ops),
+
+ AFloat = term(P, 26), % ERL_DRV_FLOAT
?line true = AFloat < 0.001,
?line true = AFloat > -0.001,
- ?line [] = term(P, 27), % ERL_DRV_EXT2TERM
- ?line 18446744073709551615 = term(P, 28), % ERL_DRV_UINT64
- ?line 20233590931456 = term(P, 29), % ERL_DRV_UINT64
- ?line 4711 = term(P, 30), % ERL_DRV_UINT64
- ?line 0 = term(P, 31), % ERL_DRV_UINT64
- ?line 9223372036854775807 = term(P, 32), % ERL_DRV_INT64
- ?line 20233590931456 = term(P, 33), % ERL_DRV_INT64
- ?line 4711 = term(P, 34), % ERL_DRV_INT64
- ?line 0 = term(P, 35), % ERL_DRV_INT64
- ?line -1 = term(P, 36), % ERL_DRV_INT64
- ?line -4711 = term(P, 37), % ERL_DRV_INT64
- ?line -20233590931456 = term(P, 38), % ERL_DRV_INT64
- ?line -9223372036854775808 = term(P, 39), % ERL_DRV_INT64
%% Failure cases.
?line [] = term(P, 127),
diff --git a/erts/emulator/test/send_term_SUITE_data/send_term_drv.c b/erts/emulator/test/send_term_SUITE_data/send_term_drv.c
index 6638de0560..165cce2e9d 100644
--- a/erts/emulator/test/send_term_SUITE_data/send_term_drv.c
+++ b/erts/emulator/test/send_term_SUITE_data/send_term_drv.c
@@ -17,6 +17,7 @@
*/
#include "erl_driver.h"
+#include <stdio.h>
#include <errno.h>
#include <string.h>
@@ -65,12 +66,21 @@ static void fail_term(ErlDrvTermData* msg, int len, int line);
static void send_term_drv_run(ErlDrvData port, char *buf, int count)
{
- ErlDrvTermData msg[1024];
-
- switch (*buf) {
+ char buf7[1024];
+ ErlDrvTermData spec[1024];
+ ErlDrvTermData* msg = spec;
+ ErlDrvBinary* bins[15];
+ int bin_ix = 0;
+ ErlDrvSInt64 s64[15];
+ int s64_ix = 0;
+ ErlDrvUInt64 u64[15];
+ int u64_ix = 0;
+ int i = 0;
+
+ for (i=0; i<count; i++) switch (buf[i]) {
case 0:
msg[0] = ERL_DRV_NIL;
- output_term(msg, 1);
+ msg += 1;
break;
case 1: /* Most term types inside a tuple. */
@@ -102,7 +112,7 @@ static void send_term_drv_run(ErlDrvData port, char *buf, int count)
msg[22] = driver_connected(erlang_port);
msg[23] = ERL_DRV_TUPLE;
msg[24] = (ErlDrvTermData) 7;
- output_term(msg, 25);
+ msg += 25;
}
break;
@@ -117,7 +127,7 @@ static void send_term_drv_run(ErlDrvData port, char *buf, int count)
msg[i] = ERL_DRV_NIL;
msg[i+1] = ERL_DRV_LIST;
msg[i+2] = (ErlDrvTermData) 201;
- output_term(msg, i+3);
+ msg += i+3;
}
break;
@@ -126,7 +136,7 @@ static void send_term_drv_run(ErlDrvData port, char *buf, int count)
ErlDrvBinary* bin;
int i;
- bin = driver_alloc_binary(256);
+ bin = bins[bin_ix++] = driver_alloc_binary(256);
for (i = 0; i < 256; i++) {
bin->orig_bytes[i] = i;
}
@@ -140,8 +150,7 @@ static void send_term_drv_run(ErlDrvData port, char *buf, int count)
msg[7] = (ErlDrvTermData) 23;
msg[8] = ERL_DRV_TUPLE;
msg[9] = (ErlDrvTermData) 2;
- output_term(msg, 10);
- driver_free_binary(bin);
+ msg += 10;
}
break;
@@ -152,11 +161,11 @@ static void send_term_drv_run(ErlDrvData port, char *buf, int count)
msg[3] = driver_caller(erlang_port);
msg[4] = ERL_DRV_TUPLE;
msg[5] = (ErlDrvTermData) 2;
- output_term(msg, 6);
+ msg += 6;
break;
case 5:
- output_term(msg, make_ext_term_list(msg, 0));
+ msg += make_ext_term_list(msg, 0);
break;
case 6:
@@ -166,94 +175,91 @@ static void send_term_drv_run(ErlDrvData port, char *buf, int count)
msg[3] = ~((ErlDrvTermData) 0);
msg[4] = ERL_DRV_TUPLE;
msg[5] = (ErlDrvTermData) 2;
- output_term(msg, 6);
+ msg += 6;
break;
case 7: {
int len = 0;
- char buf[1024];
- memset(buf, 17, sizeof(buf));
+ memset(buf7, 17, sizeof(buf7));
/* empty heap binary */
msg[len++] = ERL_DRV_BUF2BINARY;
msg[len++] = (ErlDrvTermData) NULL; /* NULL is ok if size == 0 */
msg[len++] = (ErlDrvTermData) 0;
/* empty heap binary again */
msg[len++] = ERL_DRV_BUF2BINARY;
- msg[len++] = (ErlDrvTermData) &buf[0]; /* ptr is ok if size == 0 */
+ msg[len++] = (ErlDrvTermData) buf7; /* ptr is ok if size == 0 */
msg[len++] = (ErlDrvTermData) 0;
/* heap binary */
msg[len++] = ERL_DRV_BUF2BINARY;
- msg[len++] = (ErlDrvTermData) &buf[0];
+ msg[len++] = (ErlDrvTermData) buf7;
msg[len++] = (ErlDrvTermData) 17;
/* off heap binary */
msg[len++] = ERL_DRV_BUF2BINARY;
- msg[len++] = (ErlDrvTermData) &buf[0];
- msg[len++] = (ErlDrvTermData) sizeof(buf);
+ msg[len++] = (ErlDrvTermData) buf7;
+ msg[len++] = (ErlDrvTermData) sizeof(buf7);
msg[len++] = ERL_DRV_TUPLE;
msg[len++] = (ErlDrvTermData) 4;
- output_term(msg, len);
+ msg += len;
break;
}
case 8:
msg[0] = ERL_DRV_NIL;
- output_term(msg, 1);
+ msg += 1;
break;
case 9:
msg[0] = ERL_DRV_ATOM;
msg[1] = (ErlDrvTermData) driver_mk_atom("");
- output_term(msg, 2);
+ msg += 2;
break;
case 10:
msg[0] = ERL_DRV_ATOM;
msg[1] = (ErlDrvTermData) driver_mk_atom("an_atom");
- output_term(msg, 2);
+ msg += 2;
break;
case 11:
msg[0] = ERL_DRV_INT;
msg[1] = (ErlDrvTermData) -4711;
- output_term(msg, 2);
+ msg += 2;
break;
case 12:
msg[0] = ERL_DRV_UINT;
msg[1] = (ErlDrvTermData) 4711;
- output_term(msg, 2);
+ msg += 2;
break;
case 13:
msg[0] = ERL_DRV_PORT;
msg[1] = driver_mk_port(erlang_port);
- output_term(msg, 2);
+ msg += 2;
break;
case 14: {
- ErlDrvBinary *dbin = driver_alloc_binary(0);
+ ErlDrvBinary *dbin = bins[bin_ix++] = driver_alloc_binary(0);
msg[0] = ERL_DRV_BINARY;
msg[1] = (ErlDrvTermData) dbin;
msg[2] = (ErlDrvTermData) 0;
msg[3] = (ErlDrvTermData) 0;
- output_term(msg, 4);
- driver_free_binary(dbin);
+ msg += 4;
break;
}
case 15: {
- char buf[] = "hejsan";
- ErlDrvBinary *dbin = driver_alloc_binary(sizeof(buf)-1);
+ static const char buf[] = "hejsan";
+ ErlDrvBinary *dbin = bins[bin_ix++] = driver_alloc_binary(sizeof(buf)-1);
if (dbin)
memcpy((void *) dbin->orig_bytes, (void *) buf, sizeof(buf)-1);
msg[0] = ERL_DRV_BINARY;
msg[1] = (ErlDrvTermData) dbin;
msg[2] = (ErlDrvTermData) (dbin ? sizeof(buf)-1 : 0);
msg[3] = (ErlDrvTermData) 0;
- output_term(msg, 4);
- driver_free_binary(dbin);
+ msg += 4;
break;
}
@@ -261,24 +267,24 @@ static void send_term_drv_run(ErlDrvData port, char *buf, int count)
msg[0] = ERL_DRV_BUF2BINARY;
msg[1] = (ErlDrvTermData) NULL;
msg[2] = (ErlDrvTermData) 0;
- output_term(msg, 3);
+ msg += 3;
break;
case 17: {
- char buf[] = "";
+ static const char buf[] = "";
msg[0] = ERL_DRV_BUF2BINARY;
msg[1] = (ErlDrvTermData) buf;
msg[2] = (ErlDrvTermData) sizeof(buf)-1;
- output_term(msg, 3);
+ msg += 3;
break;
}
case 18: {
- char buf[] = "hoppsan";
+ static const char buf[] = "hoppsan";
msg[0] = ERL_DRV_BUF2BINARY;
msg[1] = (ErlDrvTermData) buf;
msg[2] = (ErlDrvTermData) sizeof(buf)-1;
- output_term(msg, 3);
+ msg += 3;
break;
}
@@ -286,44 +292,44 @@ static void send_term_drv_run(ErlDrvData port, char *buf, int count)
msg[0] = ERL_DRV_STRING;
msg[1] = (ErlDrvTermData) buf;
msg[2] = (ErlDrvTermData) 0;
- output_term(msg, 3);
+ msg += 3;
break;
case 20: {
- char buf[] = "";
+ static const char buf[] = "";
msg[0] = ERL_DRV_STRING;
msg[1] = (ErlDrvTermData) buf;
msg[2] = (ErlDrvTermData) sizeof(buf)-1;
- output_term(msg, 3);
+ msg += 3;
break;
}
case 21: {
- char buf[] = "hippsan";
+ static const char buf[] = "hippsan";
msg[0] = ERL_DRV_STRING;
msg[1] = (ErlDrvTermData) buf;
msg[2] = (ErlDrvTermData) sizeof(buf)-1;
- output_term(msg, 3);
+ msg += 3;
break;
}
case 22:
msg[0] = ERL_DRV_TUPLE;
msg[1] = (ErlDrvTermData) 0;
- output_term(msg, 2);
+ msg += 2;
break;
case 23:
msg[0] = ERL_DRV_NIL;
msg[1] = ERL_DRV_LIST;
msg[2] = (ErlDrvTermData) 1;
- output_term(msg, 3);
+ msg += 3;
break;
case 24:
msg[0] = ERL_DRV_PID;
msg[1] = driver_connected(erlang_port);
- output_term(msg, 2);
+ msg += 2;
break;
case 25:
@@ -331,132 +337,131 @@ static void send_term_drv_run(ErlDrvData port, char *buf, int count)
msg[1] = ERL_DRV_STRING_CONS;
msg[2] = (ErlDrvTermData) "";
msg[3] = (ErlDrvTermData) 0;
- output_term(msg, 4);
+ msg += 4;
break;
case 26: {
- double my_float = 0.0;
+ static double my_float = 0.0;
msg[0] = ERL_DRV_FLOAT;
msg[1] = (ErlDrvTermData) &my_float;
- output_term(msg, 2);
+ msg += 2;
break;
}
case 27: {
- char buf[] = {131, 106}; /* [] */
+ static char buf[] = {131, 106}; /* [] */
msg[0] = ERL_DRV_EXT2TERM;
msg[1] = (ErlDrvTermData) buf;
msg[2] = (ErlDrvTermData) sizeof(buf);
- output_term(msg, 3);
+ msg += 3;
break;
}
case 28: {
- ErlDrvUInt64 x = ~((ErlDrvUInt64) 0);
+ ErlDrvUInt64* x = &u64[u64_ix++];
+ *x = ~((ErlDrvUInt64) 0);
msg[0] = ERL_DRV_UINT64;
- msg[1] = (ErlDrvTermData) &x;
- output_term(msg, 2);
-
+ msg[1] = (ErlDrvTermData) x;
+ msg += 2;
break;
}
case 29: {
- ErlDrvUInt64 x = ((ErlDrvUInt64) 4711) << 32;
+ ErlDrvUInt64* x = &u64[u64_ix++];
+ *x = ((ErlDrvUInt64) 4711) << 32;
msg[0] = ERL_DRV_UINT64;
- msg[1] = (ErlDrvTermData) &x;
- output_term(msg, 2);
-
+ msg[1] = (ErlDrvTermData) x;
+ msg += 2;
break;
}
case 30: {
- ErlDrvUInt64 x = 4711;
+ ErlDrvUInt64* x = &u64[u64_ix++];
+ *x = 4711;
msg[0] = ERL_DRV_UINT64;
- msg[1] = (ErlDrvTermData) &x;
- output_term(msg, 2);
-
+ msg[1] = (ErlDrvTermData) x;
+ msg += 2;
break;
}
case 31: {
- ErlDrvUInt64 x = 0;
+ ErlDrvUInt64* x = &u64[u64_ix++];
+ *x = 0;
msg[0] = ERL_DRV_UINT64;
- msg[1] = (ErlDrvTermData) &x;
- output_term(msg, 2);
-
+ msg[1] = (ErlDrvTermData) x;
+ msg += 2;
break;
}
case 32: {
- ErlDrvSInt64 x = ((((ErlDrvUInt64) 0x7fffffff) << 32)
- | ((ErlDrvUInt64) 0xffffffff));
+ ErlDrvSInt64* x = &s64[s64_ix++];
+ *x = ((((ErlDrvUInt64) 0x7fffffff) << 32) | ((ErlDrvUInt64) 0xffffffff));
msg[0] = ERL_DRV_INT64;
- msg[1] = (ErlDrvTermData) &x;
- output_term(msg, 2);
-
+ msg[1] = (ErlDrvTermData) x;
+ msg += 2;
break;
}
case 33: {
- ErlDrvSInt64 x = (ErlDrvSInt64) (((ErlDrvUInt64) 4711) << 32);
+ ErlDrvSInt64* x = &s64[s64_ix++];
+ *x = (ErlDrvSInt64) (((ErlDrvUInt64) 4711) << 32);
msg[0] = ERL_DRV_INT64;
- msg[1] = (ErlDrvTermData) &x;
- output_term(msg, 2);
-
+ msg[1] = (ErlDrvTermData) x;
+ msg += 2;
break;
}
case 34: {
- ErlDrvSInt64 x = 4711;
+ ErlDrvSInt64* x = &s64[s64_ix++];
+ *x = 4711;
msg[0] = ERL_DRV_INT64;
- msg[1] = (ErlDrvTermData) &x;
- output_term(msg, 2);
-
+ msg[1] = (ErlDrvTermData) x;
+ msg += 2;
break;
}
case 35: {
- ErlDrvSInt64 x = 0;
+ ErlDrvSInt64* x = &s64[s64_ix++];
+ *x = 0;
msg[0] = ERL_DRV_INT64;
- msg[1] = (ErlDrvTermData) &x;
- output_term(msg, 2);
-
+ msg[1] = (ErlDrvTermData) x;
+ msg += 2;
break;
}
case 36: {
- ErlDrvSInt64 x = -1;
+ ErlDrvSInt64* x = &s64[s64_ix++];
+ *x = -1;
msg[0] = ERL_DRV_INT64;
- msg[1] = (ErlDrvTermData) &x;
- output_term(msg, 2);
-
+ msg[1] = (ErlDrvTermData) x;
+ msg += 2;
break;
}
case 37: {
- ErlDrvSInt64 x = -4711;
+ ErlDrvSInt64* x = &s64[s64_ix++];
+ *x = -4711;
msg[0] = ERL_DRV_INT64;
- msg[1] = (ErlDrvTermData) &x;
- output_term(msg, 2);
-
+ msg[1] = (ErlDrvTermData) x;
+ msg += 2;
break;
}
case 38: {
- ErlDrvSInt64 x = ((ErlDrvSInt64) ((ErlDrvUInt64) 4711) << 32)*-1;
+ ErlDrvSInt64* x = &s64[s64_ix++];
+ *x = ((ErlDrvSInt64) ((ErlDrvUInt64) 4711) << 32)*-1;
msg[0] = ERL_DRV_INT64;
- msg[1] = (ErlDrvTermData) &x;
- output_term(msg, 2);
-
+ msg[1] = (ErlDrvTermData) x;
+ msg += 2;
break;
}
case 39: {
- ErlDrvSInt64 x = ((ErlDrvSInt64) 1) << 63;
+ ErlDrvSInt64* x = &s64[s64_ix++];
+ *x = ((ErlDrvSInt64) 1) << 63;
msg[0] = ERL_DRV_INT64;
- msg[1] = (ErlDrvTermData) &x;
- output_term(msg, 2);
-
+ msg[1] = (ErlDrvTermData) x;
+ msg += 2;
break;
}
@@ -464,7 +469,7 @@ static void send_term_drv_run(ErlDrvData port, char *buf, int count)
case 127: /* Error cases */
{
long refc;
- ErlDrvBinary* bin = driver_alloc_binary(256);
+ ErlDrvBinary* bin = bins[bin_ix++] = driver_alloc_binary(256);
FAIL_TERM(msg, 0);
@@ -537,7 +542,7 @@ static void send_term_drv_run(ErlDrvData port, char *buf, int count)
refc = driver_binary_get_refc(bin);
if (refc > 3) {
char sbuf[128];
- sprintf(sbuf, "bad_refc:%d", refc);
+ sprintf(sbuf, "bad_refc:%ld", refc);
driver_failure_atom(erlang_port, sbuf);
}
driver_free_binary(bin);
@@ -644,6 +649,7 @@ static void send_term_drv_run(ErlDrvData port, char *buf, int count)
/* Signal end of test case */
msg[0] = ERL_DRV_NIL;
driver_output_term(erlang_port, msg, 1);
+ return;
}
break;
@@ -651,6 +657,16 @@ static void send_term_drv_run(ErlDrvData port, char *buf, int count)
driver_failure_atom(erlang_port, "bad_request");
break;
}
+ if (count > 1) {
+ *msg++ = ERL_DRV_NIL;
+ *msg++ = ERL_DRV_LIST;
+ *msg++ = count + 1;
+ }
+ output_term(spec, msg-spec);
+ if ((bin_ix|s64_ix|u64_ix) > 15) abort();
+ while (bin_ix) {
+ driver_free_binary(bins[--bin_ix]);
+ }
}
static void output_term(ErlDrvTermData* msg, int len)
diff --git a/erts/epmd/src/epmd.c b/erts/epmd/src/epmd.c
index c509c49b39..9c2ce065bb 100644
--- a/erts/epmd/src/epmd.c
+++ b/erts/epmd/src/epmd.c
@@ -226,11 +226,17 @@ int main(int argc, char** argv)
else
usage(g);
epmd_cleanup_exit(g,0);
+ } else if (strcmp(argv[0], "-stop") == 0) {
+ if (argc == 2)
+ stop_cli(g, argv[1]);
+ else
+ usage(g);
+ epmd_cleanup_exit(g,0);
}
else
usage(g);
}
- dbg_printf(g,0,"epmd running - daemon = %d",g->is_daemon);
+ dbg_printf(g,1,"epmd running - daemon = %d",g->is_daemon);
#ifndef NO_SYSCONF
if ((g->max_conn = sysconf(_SC_OPEN_MAX)) <= 0)
@@ -382,7 +388,7 @@ static void run_daemon(EpmdVars *g)
static void usage(EpmdVars *g)
{
fprintf(stderr, "usage: epmd [-d|-debug] [DbgExtra...] [-port No] [-daemon]\n");
- fprintf(stderr, " [-d|-debug] [-port No] [-names|-kill]\n\n");
+ fprintf(stderr, " [-d|-debug] [-port No] [-names|-kill|-stop name]\n\n");
fprintf(stderr, "See the Erlang epmd manual page for info about the usage.\n");
fprintf(stderr, "The -port and DbgExtra options are\n\n");
fprintf(stderr, " -port No\n");
diff --git a/erts/epmd/src/epmd_cli.c b/erts/epmd/src/epmd_cli.c
index c12f711bc5..2aed861390 100644
--- a/erts/epmd/src/epmd_cli.c
+++ b/erts/epmd/src/epmd_cli.c
@@ -54,6 +54,42 @@ void kill_epmd(EpmdVars *g)
}
}
+void stop_cli(EpmdVars *g, char *name)
+{
+ char buf[1024];
+ int fd, rval, bsize;
+
+ bsize = strlen(name);
+ if (bsize > 1000) {
+ printf("epmd: Name too long!");
+ epmd_cleanup_exit(g, 1);
+ }
+
+ fd = conn_to_epmd(g);
+ bsize++;
+ put_int16(bsize, buf);
+ buf[2] = EPMD_STOP_REQ;
+ bsize += 2;
+ strcpy(buf+3, name);
+
+ if (write(fd, buf, bsize) != bsize) {
+ printf("epmd: Can't write to epmd\n");
+ epmd_cleanup_exit(g,1);
+ }
+ if ((rval = read_fill(fd,buf,7)) == 7) {
+ buf[7] = '\000';
+ printf("%s\n", buf);
+ epmd_cleanup_exit(g,0);
+ } else if (rval < 0) {
+ printf("epmd: failed to read answer from local epmd\n");
+ epmd_cleanup_exit(g,1);
+ } else { /* rval is now 0 or 1 */
+ buf[rval] = '\0';
+ printf("epmd: local epmd responded with <%s>\n", buf);
+ epmd_cleanup_exit(g,1);
+ }
+}
+
/* what == EPMD_NAMES_REQ || EPMD_DUMP_REQ */
void epmd_call(EpmdVars *g,int what)
diff --git a/erts/epmd/src/epmd_srv.c b/erts/epmd/src/epmd_srv.c
index 34f657fb16..c836bf0bb7 100644
--- a/erts/epmd/src/epmd_srv.c
+++ b/erts/epmd/src/epmd_srv.c
@@ -591,7 +591,7 @@ static void do_request(g, fd, s, buf, bsize)
if (bsize <= 1)
{
- dbg_printf(g,0,"packet to small for request PORT2_REQ (%d)", bsize);
+ dbg_printf(g,0,"packet too small for request PORT2_REQ (%d)", bsize);
return;
}
@@ -740,7 +740,7 @@ static void do_request(g, fd, s, buf, bsize)
dbg_printf(g,1,"** got STOP_REQ");
if (bsize <= 1 )
{
- dbg_printf(g,0,"packet to small for request STOP_REQ (%d)",bsize);
+ dbg_printf(g,0,"packet too small for request STOP_REQ (%d)",bsize);
return;
}
@@ -902,7 +902,7 @@ static void node_init(EpmdVars *g)
/* We have got a close on a connection and it may be a
- EPMD_ALIVE_CLOSE_REQ. Note that this call shouild be called
+ EPMD_ALIVE_CLOSE_REQ. Note that this call should be called
*before* calling conn_close() */
static int node_unreg(EpmdVars *g,char *name)
diff --git a/erts/etc/common/Makefile.in b/erts/etc/common/Makefile.in
index 6551b2999a..96655662b8 100644
--- a/erts/etc/common/Makefile.in
+++ b/erts/etc/common/Makefile.in
@@ -98,7 +98,7 @@ endif
ifeq ($(TARGET),win32)
ERTS_INTERNAL_LIBS=-L../../lib/internal/$(TARGET) -lerts_internal_r$(ERTS_LIB_TYPEMARKER) @ERTS_INTERNAL_X_LIBS@
else
-ERTS_INTERNAL_LIBS=-L../../lib/internal/$(TARGET) -lerts_internal$(ERTS_LIB_TYPEMARKER) @ERTS_INTERNAL_X_LIBS@
+ERTS_INTERNAL_LIBS=-L../../lib/internal/$(TARGET) -lerts_internal$(ERTS_LIB_TYPEMARKER) @ERTS_INTERNAL_X_LIBS@ -lm
endif
# ----------------------------------------------------
diff --git a/erts/etc/common/erlexec.c b/erts/etc/common/erlexec.c
index f79f5cc978..76ce0a7e3c 100644
--- a/erts/etc/common/erlexec.c
+++ b/erts/etc/common/erlexec.c
@@ -131,6 +131,12 @@ static char *plush_val_switches[] = {
NULL
};
+/* +r arguments with values */
+static char *plusr_val_switches[] = {
+ "g",
+ NULL
+};
+
/*
* Define sleep(seconds) in terms of Sleep() on Windows.
@@ -872,6 +878,21 @@ int main(int argc, char **argv)
i++;
}
break;
+ case 'r':
+ if (!is_one_of_strings(&argv[i][2],
+ plusr_val_switches))
+ goto the_default;
+ else {
+ if (i+1 >= argc
+ || argv[i+1][0] == '-'
+ || argv[i+1][0] == '+')
+ usage(argv[i]);
+ argv[i][0] = '-';
+ add_Eargs(argv[i]);
+ add_Eargs(argv[i+1]);
+ i++;
+ }
+ break;
case 's':
if (!is_one_of_strings(&argv[i][2],
pluss_val_switches))
@@ -1069,11 +1090,12 @@ usage_aux(void)
"[-hybrid] "
#endif
"[-make] [-man [manopts] MANPAGE] [-x] [-emu_args] "
- "[-args_file FILENAME] "
- "[+A THREADS] [+a SIZE] [+B[c|d|i]] [+c] [+h HEAP_SIZE_OPTION] [+K BOOLEAN] "
+ "[-args_file FILENAME] [+A THREADS] [+a SIZE] [+B[c|d|i]] [+c] "
+ "[+h HEAP_SIZE_OPTION] [+K BOOLEAN] "
"[+l] [+M<SUBSWITCH> <ARGUMENT>] [+P MAX_PROCS] [+R COMPAT_REL] "
- "[+r] [+s SCHEDULER_OPTION] [+S NO_SCHEDULERS:NO_SCHEDULERS_ONLINE] [+T LEVEL] [+V] [+v] [+W<i|w>] "
- "[args ...]\n");
+ "[+r] [+rg READER_GROUPS_LIMIT] [+s SCHEDULER_OPTION] "
+ "[+S NO_SCHEDULERS:NO_SCHEDULERS_ONLINE] [+T LEVEL] [+V] [+v] "
+ "[+W<i|w>] [args ...]\n");
exit(1);
}
diff --git a/erts/include/internal/erl_misc_utils.h b/erts/include/internal/erl_misc_utils.h
index 82e9ba3798..507e1726f4 100644
--- a/erts/include/internal/erl_misc_utils.h
+++ b/erts/include/internal/erl_misc_utils.h
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2006-2009. All Rights Reserved.
+ * Copyright Ericsson AB 2006-2010. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
@@ -34,7 +34,7 @@ typedef struct {
erts_cpu_info_t *erts_cpu_info_create(void);
void erts_cpu_info_destroy(erts_cpu_info_t *cpuinfo);
-void erts_cpu_info_update(erts_cpu_info_t *cpuinfo);
+int erts_cpu_info_update(erts_cpu_info_t *cpuinfo);
int erts_get_cpu_configured(erts_cpu_info_t *cpuinfo);
int erts_get_cpu_online(erts_cpu_info_t *cpuinfo);
int erts_get_cpu_available(erts_cpu_info_t *cpuinfo);
@@ -50,4 +50,9 @@ int erts_unbind_from_cpu_str(char *str);
int erts_milli_sleep(long);
+#ifdef __WIN32__
+int erts_map_win_error_to_errno(DWORD win_error);
+int erts_get_last_win_errno(void);
+#endif
+
#endif /* #ifndef ERL_MISC_UTILS_H_ */
diff --git a/erts/include/internal/ethr_internal.h b/erts/include/internal/ethr_internal.h
new file mode 100644
index 0000000000..e9c3daf783
--- /dev/null
+++ b/erts/include/internal/ethr_internal.h
@@ -0,0 +1,67 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2010. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+/*
+ * Description: Internal ethread exports
+ * Author: Rickard Green
+ */
+
+#ifndef ETHR_INTERNAL_H__
+#define ETHR_INTERNAL_H__
+
+#include "erl_misc_utils.h"
+
+extern ethr_memory_allocators ethr_mem__;
+extern erts_cpu_info_t *ethr_cpu_info__;
+extern size_t ethr_pagesize__;
+extern size_t ethr_min_stack_size__; /* kilo words */
+extern size_t ethr_max_stack_size__; /* kilo words */
+extern int ethr_not_completely_inited__;
+extern int ethr_not_inited__;
+
+extern void *(*ethr_thr_prepare_func__)(void);
+extern void (*ethr_thr_parent_func__)(void *);
+extern void (*ethr_thr_child_func__)(void *);
+
+#define ETHR_PAGE_ALIGN(SZ) \
+ (((((size_t) (SZ)) - 1)/ethr_pagesize__ + 1)*ethr_pagesize__)
+#define ETHR_B2KW(B) ((((size_t) (B)) - 1)/(sizeof(void *)*1024) + 1)
+#define ETHR_KW2B(KW) (((size_t) (KW))*sizeof(void *)*1024)
+
+#undef ETHR_STACK_GUARD_SIZE
+#ifdef ETHR_HAVE_PTHREAD_ATTR_SETGUARDSIZE
+# define ETHR_STACK_GUARD_SIZE (ethr_pagesize__)
+#endif
+
+/* implemented in lib_src/<thr-lib>/ethread.c */
+int ethr_set_tse__(ethr_ts_event *tsep);
+ethr_ts_event *ethr_get_tse__(void);
+ETHR_PROTO_NORETURN__ ethr_abort__(void);
+#ifdef ETHR_WIN32_THREADS
+int ethr_win_get_errno__(void);
+#endif
+
+/* implemented in lib_src/common/ethread_aux.c */
+int ethr_init_common__(ethr_init_data *id);
+int ethr_late_init_common__(ethr_late_init_data *lid);
+void ethr_run_exit_handlers__(void);
+void ethr_ts_event_destructor__(void *vtsep);
+
+
+#endif
diff --git a/erts/include/internal/ethr_mutex.h b/erts/include/internal/ethr_mutex.h
new file mode 100644
index 0000000000..4ce3e75c78
--- /dev/null
+++ b/erts/include/internal/ethr_mutex.h
@@ -0,0 +1,510 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2010. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+/*
+ * Description: Mutex, rwmutex and condition variable implementation
+ * Author: Rickard Green
+ */
+
+#ifndef ETHR_MUTEX_H__
+#define ETHR_MUTEX_H__
+
+#define ETHR_RWMUTEX_INITIALIZED 0x99999999
+#define ETHR_MUTEX_INITIALIZED 0x77777777
+#define ETHR_COND_INITIALIZED 0x55555555
+
+#if 0
+# define ETHR_MTX_HARD_DEBUG
+#endif
+
+#ifdef ETHR_MTX_HARD_DEBUG
+# ifdef __GNUC__
+# warning ETHR_MTX_HARD_DEBUG
+# endif
+/*# define ETHR_MTX_HARD_DEBUG_LFS*/
+/*# define ETHR_MTX_HARD_DEBUG_FENCE*/
+/*# define ETHR_MTX_HARD_DEBUG_Q*/
+# define ETHR_MTX_HARD_DEBUG_WSQ
+
+# if !defined(ETHR_MTX_HARD_DEBUG_WSQ) && defined(ETHR_MTX_HARD_DEBUG_Q)
+# define ETHR_MTX_HARD_DEBUG_WSQ
+# endif
+#endif
+
+#if defined(ETHR_USE_OWN_RWMTX_IMPL__) || defined(ETHR_USE_OWN_MTX_IMPL__)
+
+#if 0
+# define ETHR_MTX_Q_LOCK_SPINLOCK__
+# define ETHR_MTX_QLOCK_TYPE__ ethr_spinlock_t
+#elif defined(ETHR_PTHREADS)
+# define ETHR_MTX_Q_LOCK_PTHREAD_MUTEX__
+# define ETHR_MTX_QLOCK_TYPE__ pthread_mutex_t
+#elif defined(ETHR_WIN32_THREADS)
+# define ETHR_MTX_Q_LOCK_CRITICAL_SECTION__
+# define ETHR_MTX_QLOCK_TYPE__ CRITICAL_SECTION
+#else
+# error Need a qlock implementation
+#endif
+
+#define ETHR_RWMTX_W_FLG__ (((long) 1) << 31)
+#define ETHR_RWMTX_W_WAIT_FLG__ (((long) 1) << 30)
+#define ETHR_RWMTX_R_WAIT_FLG__ (((long) 1) << 29)
+
+/* frequent read kind */
+#define ETHR_RWMTX_R_FLG__ (((long) 1) << 28)
+#define ETHR_RWMTX_R_PEND_UNLCK_MASK__ (ETHR_RWMTX_R_FLG__ - 1)
+#define ETHR_RWMTX_R_MASK__ (ETHR_RWMTX_R_WAIT_FLG__ - 1)
+
+/* normal kind */
+#define ETHR_RWMTX_RS_MASK__ (ETHR_RWMTX_R_WAIT_FLG__ - 1)
+
+#define ETHR_RWMTX_WAIT_FLGS__ \
+ (ETHR_RWMTX_W_WAIT_FLG__|ETHR_RWMTX_R_WAIT_FLG__)
+
+#define ETHR_CND_WAIT_FLG__ ETHR_RWMTX_R_WAIT_FLG__
+
+struct ethr_mutex_base_ {
+#ifdef ETHR_MTX_HARD_DEBUG_FENCE
+ long pre_fence;
+#endif
+ ethr_atomic_t flgs;
+ ETHR_MTX_QLOCK_TYPE__ qlck;
+ ethr_ts_event *q;
+ short aux_scnt;
+ short main_scnt;
+#ifdef ETHR_MTX_HARD_DEBUG_WSQ
+ int ws;
+#endif
+#ifdef ETHR_MTX_HARD_DEBUG_LFS
+ ethr_atomic_t hdbg_lfs;
+#endif
+};
+
+#endif
+
+typedef struct {
+ int main_spincount;
+ int aux_spincount;
+} ethr_mutex_opt;
+
+typedef struct {
+ int main_spincount;
+ int aux_spincount;
+} ethr_cond_opt;
+
+#ifdef ETHR_USE_OWN_MTX_IMPL__
+
+typedef struct ethr_mutex_ ethr_mutex;
+struct ethr_mutex_ {
+ struct ethr_mutex_base_ mtxb;
+#ifdef ETHR_MTX_HARD_DEBUG_FENCE
+ long post_fence;
+#endif
+#if ETHR_XCHK
+ int initialized;
+#endif
+};
+
+typedef struct ethr_cond_ ethr_cond;
+struct ethr_cond_ {
+#ifdef ETHR_MTX_HARD_DEBUG_FENCE
+ struct {
+ long pre_fence;
+ } mtxb; /* mtxb allows us to use same macro as for mutex and rwmutex... */
+#endif
+ ETHR_MTX_QLOCK_TYPE__ qlck;
+ ethr_ts_event *q;
+ short aux_scnt;
+ short main_scnt;
+#ifdef ETHR_MTX_HARD_DEBUG_FENCE
+ long post_fence;
+#endif
+#if ETHR_XCHK
+ int initialized;
+#endif
+};
+
+#else /* pthread */
+
+typedef struct ethr_mutex_ ethr_mutex;
+struct ethr_mutex_ {
+ pthread_mutex_t pt_mtx;
+#if ETHR_XCHK
+ int initialized;
+#endif
+};
+
+typedef struct ethr_cond_ ethr_cond;
+struct ethr_cond_ {
+ pthread_cond_t pt_cnd;
+#if ETHR_XCHK
+ int initialized;
+#endif
+};
+
+#endif /* pthread */
+
+int ethr_mutex_init_opt(ethr_mutex *, ethr_mutex_opt *);
+int ethr_mutex_init(ethr_mutex *);
+int ethr_mutex_destroy(ethr_mutex *);
+#if !defined(ETHR_TRY_INLINE_FUNCS) || defined(ETHR_MUTEX_IMPL__)
+int ethr_mutex_trylock(ethr_mutex *);
+void ethr_mutex_lock(ethr_mutex *);
+void ethr_mutex_unlock(ethr_mutex *);
+#endif
+int ethr_cond_init_opt(ethr_cond *, ethr_cond_opt *);
+int ethr_cond_init(ethr_cond *);
+int ethr_cond_destroy(ethr_cond *);
+void ethr_cond_signal(ethr_cond *);
+void ethr_cond_broadcast(ethr_cond *);
+int ethr_cond_wait(ethr_cond *, ethr_mutex *);
+
+typedef enum {
+ ETHR_RWMUTEX_TYPE_NORMAL,
+ ETHR_RWMUTEX_TYPE_FREQUENT_READ,
+ ETHR_RWMUTEX_TYPE_EXTREMELY_FREQUENT_READ
+} ethr_rwmutex_type;
+
+typedef enum {
+ ETHR_RWMUTEX_LONG_LIVED,
+ ETHR_RWMUTEX_SHORT_LIVED,
+ ETHR_RWMUTEX_UNKNOWN_LIVED
+} ethr_rwmutex_lived;
+
+typedef struct {
+ ethr_rwmutex_type type;
+ ethr_rwmutex_lived lived;
+ int main_spincount;
+ int aux_spincount;
+} ethr_rwmutex_opt;
+
+#define ETHR_RWMUTEX_OPT_DEFAULT_INITER \
+ {ETHR_RWMUTEX_TYPE_NORMAL, ETHR_RWMUTEX_UNKNOWN_LIVED, -1, -1}
+
+#ifdef ETHR_USE_OWN_RWMTX_IMPL__
+
+typedef union {
+ struct {
+ ethr_atomic_t readers;
+ int waiting_readers;
+ int byte_offset;
+ ethr_rwmutex_lived lived;
+ } data;
+ char align__[ETHR_CACHE_LINE_SIZE];
+} ethr_rwmtx_readers_array__;
+
+typedef struct ethr_rwmutex_ ethr_rwmutex;
+struct ethr_rwmutex_ {
+ struct ethr_mutex_base_ mtxb;
+ ethr_rwmutex_type type;
+ ethr_ts_event *rq_end;
+ union {
+ ethr_rwmtx_readers_array__ *ra;
+ int rs;
+ } tdata;
+#ifdef ETHR_MTX_HARD_DEBUG_FENCE
+ long post_fence;
+#endif
+#if ETHR_XCHK
+ int initialized;
+#endif
+};
+
+#else /* pthread_rwlock */
+
+typedef struct ethr_rwmutex_ ethr_rwmutex;
+struct ethr_rwmutex_ {
+ pthread_rwlock_t pt_rwlock;
+#if ETHR_XCHK
+ int initialized;
+#endif
+};
+
+#endif /* pthread_rwlock */
+
+int ethr_rwmutex_set_reader_group(int);
+int ethr_rwmutex_init_opt(ethr_rwmutex *, ethr_rwmutex_opt *);
+int ethr_rwmutex_init(ethr_rwmutex *);
+int ethr_rwmutex_destroy(ethr_rwmutex *);
+#if defined(ETHR_USE_OWN_RWMTX_IMPL__) \
+ || !defined(ETHR_TRY_INLINE_FUNCS) \
+ || defined(ETHR_MUTEX_IMPL__)
+int ethr_rwmutex_tryrlock(ethr_rwmutex *);
+void ethr_rwmutex_rlock(ethr_rwmutex *);
+void ethr_rwmutex_runlock(ethr_rwmutex *);
+int ethr_rwmutex_tryrwlock(ethr_rwmutex *);
+void ethr_rwmutex_rwlock(ethr_rwmutex *);
+void ethr_rwmutex_rwunlock(ethr_rwmutex *);
+#endif
+
+#ifdef ETHR_MTX_HARD_DEBUG
+#define ETHR_MTX_HARD_ASSERT(A) \
+ ((void) ((A) ? 1 : ethr_assert_failed(__FILE__, __LINE__, #A)))
+#else
+#define ETHR_MTX_HARD_ASSERT(A) ((void) 1)
+#endif
+
+#ifdef ETHR_MTX_HARD_DEBUG_LFS
+# define ETHR_MTX_HARD_DEBUG_LFS_INIT(MTXB) \
+do { \
+ ethr_atomic_init(&(MTXB)->hdbg_lfs, 0); \
+} while (0)
+# define ETHR_MTX_HARD_DEBUG_LFS_RLOCK(MTXB) \
+do { \
+ long val__; \
+ ETHR_COMPILER_BARRIER; \
+ val__ = ethr_atomic_inc_read(&(MTXB)->hdbg_lfs); \
+ ETHR_MTX_HARD_ASSERT(val__ > 0); \
+} while (0)
+# define ETHR_MTX_HARD_DEBUG_LFS_TRYRLOCK(MTXB, RES) \
+do { \
+ ETHR_COMPILER_BARRIER; \
+ if ((RES) == 0) \
+ ETHR_MTX_HARD_DEBUG_LFS_RLOCK((MTXB)); \
+ else \
+ ETHR_MTX_HARD_ASSERT((RES) == EBUSY); \
+} while (0)
+# define ETHR_MTX_HARD_DEBUG_LFS_RUNLOCK(MTXB) \
+do { \
+ long val__ = ethr_atomic_dec_read(&(MTXB)->hdbg_lfs); \
+ ETHR_MTX_HARD_ASSERT(val__ >= 0); \
+ ETHR_COMPILER_BARRIER; \
+} while (0)
+# define ETHR_MTX_HARD_DEBUG_LFS_RWLOCK(MTXB) \
+do { \
+ long val__; \
+ ETHR_COMPILER_BARRIER; \
+ val__ = ethr_atomic_dec_read(&(MTXB)->hdbg_lfs); \
+ ETHR_MTX_HARD_ASSERT(val__ == -1); \
+} while (0)
+# define ETHR_MTX_HARD_DEBUG_LFS_TRYRWLOCK(MTXB, RES) \
+do { \
+ ETHR_COMPILER_BARRIER; \
+ if ((RES) == 0) \
+ ETHR_MTX_HARD_DEBUG_LFS_RWLOCK((MTXB)); \
+ else \
+ ETHR_MTX_HARD_ASSERT((RES) == EBUSY); \
+} while (0)
+# define ETHR_MTX_HARD_DEBUG_LFS_RWUNLOCK(MTXB) \
+do { \
+ long val__ = ethr_atomic_inctest(&(MTXB)->hdbg_lfs); \
+ ETHR_MTX_HARD_ASSERT(val__ == 0); \
+ ETHR_COMPILER_BARRIER; \
+} while (0)
+#else
+# define ETHR_MTX_HARD_DEBUG_LFS_INIT(MTXB)
+# define ETHR_MTX_HARD_DEBUG_LFS_RLOCK(MTXB)
+# define ETHR_MTX_HARD_DEBUG_LFS_TRYRLOCK(MTXB, RES)
+# define ETHR_MTX_HARD_DEBUG_LFS_RUNLOCK(MTXB)
+# define ETHR_MTX_HARD_DEBUG_LFS_RWLOCK(MTXB)
+# define ETHR_MTX_HARD_DEBUG_LFS_TRYRWLOCK(MTXB, RES)
+# define ETHR_MTX_HARD_DEBUG_LFS_RWUNLOCK(MTXB)
+#endif
+
+#ifdef ETHR_MTX_HARD_DEBUG_FENCE
+
+#if ETHR_SIZEOF_PTR == 8
+# define ETHR_MTX_HARD_DEBUG_PRE_FENCE 0xdeadbeefdeadbeefL
+# define ETHR_MTX_HARD_DEBUG_POST_FENCE 0xdeaddeaddeaddeadL
+#else
+# define ETHR_MTX_HARD_DEBUG_PRE_FENCE 0xdeaddeadL
+# define ETHR_MTX_HARD_DEBUG_POST_FENCE 0xdeaddeadL
+#endif
+
+#define ETHR_MTX_HARD_DEBUG_FENCE_CHK(X) \
+do { \
+ ETHR_COMPILER_BARRIER; \
+ ETHR_MTX_HARD_ASSERT((X)->mtxb.pre_fence == ETHR_MTX_HARD_DEBUG_PRE_FENCE);\
+ ETHR_MTX_HARD_ASSERT((X)->post_fence == ETHR_MTX_HARD_DEBUG_POST_FENCE); \
+ ETHR_COMPILER_BARRIER; \
+} while (0)
+#define ETHR_MTX_HARD_DEBUG_FENCE_INIT(X) \
+do { \
+ (X)->mtxb.pre_fence = ETHR_MTX_HARD_DEBUG_PRE_FENCE; \
+ (X)->post_fence = ETHR_MTX_HARD_DEBUG_POST_FENCE; \
+} while (0)
+#else
+#define ETHR_MTX_HARD_DEBUG_FENCE_CHK(X)
+#define ETHR_MTX_HARD_DEBUG_FENCE_INIT(X)
+#endif
+
+#ifdef ETHR_USE_OWN_MTX_IMPL__
+
+#define ETHR_MTX_DEFAULT_MAIN_SPINCOUNT 1000
+#define ETHR_MTX_DEFAULT_AUX_SPINCOUNT 50
+
+#define ETHR_CND_DEFAULT_MAIN_SPINCOUNT 0
+#define ETHR_CND_DEFAULT_AUX_SPINCOUNT 0
+
+#if defined(ETHR_TRY_INLINE_FUNCS) || defined(ETHR_MUTEX_IMPL__)
+
+void ethr_mutex_lock_wait__(ethr_mutex *, long);
+void ethr_mutex_unlock_wake__(ethr_mutex *, long);
+
+static ETHR_INLINE int
+ETHR_INLINE_FUNC_NAME_(ethr_mutex_trylock)(ethr_mutex *mtx)
+{
+ long act;
+ int res;
+ ETHR_MTX_HARD_DEBUG_FENCE_CHK(mtx);
+
+ act = ethr_atomic_cmpxchg_acqb(&mtx->mtxb.flgs, ETHR_RWMTX_W_FLG__, 0);
+ res = (act == 0) ? 0 : EBUSY;
+
+ ETHR_MTX_HARD_DEBUG_LFS_TRYRWLOCK(&mtx->mtxb, res);
+ ETHR_MTX_HARD_DEBUG_FENCE_CHK(mtx);
+
+ ETHR_COMPILER_BARRIER;
+ return res;
+}
+
+static ETHR_INLINE void
+ETHR_INLINE_FUNC_NAME_(ethr_mutex_lock)(ethr_mutex *mtx)
+{
+ long act;
+ ETHR_MTX_HARD_DEBUG_FENCE_CHK(mtx);
+
+ act = ethr_atomic_cmpxchg_acqb(&mtx->mtxb.flgs, ETHR_RWMTX_W_FLG__, 0);
+ if (act != 0)
+ ethr_mutex_lock_wait__(mtx, act);
+
+ ETHR_MTX_HARD_DEBUG_LFS_RWLOCK(&mtx->mtxb);
+ ETHR_MTX_HARD_DEBUG_FENCE_CHK(mtx);
+
+ ETHR_COMPILER_BARRIER;
+}
+
+static ETHR_INLINE void
+ETHR_INLINE_FUNC_NAME_(ethr_mutex_unlock)(ethr_mutex *mtx)
+{
+ long act;
+ ETHR_COMPILER_BARRIER;
+ ETHR_MTX_HARD_DEBUG_FENCE_CHK(mtx);
+ ETHR_MTX_HARD_DEBUG_LFS_RWUNLOCK(&mtx->mtxb);
+
+ act = ethr_atomic_cmpxchg_relb(&mtx->mtxb.flgs, 0, ETHR_RWMTX_W_FLG__);
+ if (act != ETHR_RWMTX_W_FLG__)
+ ethr_mutex_unlock_wake__(mtx, act);
+
+ ETHR_MTX_HARD_DEBUG_FENCE_CHK(mtx);
+}
+
+#endif /* ETHR_TRY_INLINE_FUNCS */
+
+#else /* pthread_mutex */
+
+#if defined(ETHR_TRY_INLINE_FUNCS) || defined(ETHR_MUTEX_IMPL__)
+
+static ETHR_INLINE int
+ETHR_INLINE_FUNC_NAME_(ethr_mutex_trylock)(ethr_mutex *mtx)
+{
+ int res;
+ res = pthread_mutex_trylock(&mtx->pt_mtx);
+ if (res != 0 && res != EBUSY)
+ ETHR_FATAL_ERROR__(res);
+ return res;
+}
+
+static ETHR_INLINE void
+ETHR_INLINE_FUNC_NAME_(ethr_mutex_lock)(ethr_mutex *mtx)
+{
+ int res = pthread_mutex_lock(&mtx->pt_mtx);
+ if (res != 0)
+ ETHR_FATAL_ERROR__(res);
+}
+
+static ETHR_INLINE void
+ETHR_INLINE_FUNC_NAME_(ethr_mutex_unlock)(ethr_mutex *mtx)
+{
+ int res = pthread_mutex_unlock(&mtx->pt_mtx);
+ if (res != 0)
+ ETHR_FATAL_ERROR__(res);
+}
+
+#endif /* ETHR_TRY_INLINE_FUNCS */
+
+#endif /* pthread_mutex */
+
+#ifdef ETHR_USE_OWN_RWMTX_IMPL__
+
+#define ETHR_RWMTX_DEFAULT_MAIN_SPINCOUNT 1000
+#define ETHR_RWMTX_DEFAULT_AUX_SPINCOUNT 50
+
+#else /* pthread_rwlock */
+
+#if defined(ETHR_TRY_INLINE_FUNCS) || defined(ETHR_MUTEX_IMPL__)
+
+static ETHR_INLINE int
+ETHR_INLINE_FUNC_NAME_(ethr_rwmutex_tryrlock)(ethr_rwmutex *rwmtx)
+{
+ int res = pthread_rwlock_tryrdlock(&rwmtx->pt_rwlock);
+ if (res != 0 && res != EBUSY)
+ ETHR_FATAL_ERROR__(res);
+ return res;
+}
+
+static ETHR_INLINE void
+ETHR_INLINE_FUNC_NAME_(ethr_rwmutex_rlock)(ethr_rwmutex *rwmtx)
+{
+ int res = pthread_rwlock_rdlock(&rwmtx->pt_rwlock);
+ if (res != 0)
+ ETHR_FATAL_ERROR__(res);
+}
+
+static ETHR_INLINE void
+ETHR_INLINE_FUNC_NAME_(ethr_rwmutex_runlock)(ethr_rwmutex *rwmtx)
+{
+ int res = pthread_rwlock_unlock(&rwmtx->pt_rwlock);
+ if (res != 0)
+ ETHR_FATAL_ERROR__(res);
+}
+
+static ETHR_INLINE int
+ETHR_INLINE_FUNC_NAME_(ethr_rwmutex_tryrwlock)(ethr_rwmutex *rwmtx)
+{
+ int res = pthread_rwlock_trywrlock(&rwmtx->pt_rwlock);
+ if (res != 0 && res != EBUSY)
+ ETHR_FATAL_ERROR__(res);
+ return res;
+}
+
+static ETHR_INLINE void
+ETHR_INLINE_FUNC_NAME_(ethr_rwmutex_rwlock)(ethr_rwmutex *rwmtx)
+{
+ int res = pthread_rwlock_wrlock(&rwmtx->pt_rwlock);
+ if (res != 0)
+ ETHR_FATAL_ERROR__(res);
+}
+
+static ETHR_INLINE void
+ETHR_INLINE_FUNC_NAME_(ethr_rwmutex_rwunlock)(ethr_rwmutex *rwmtx)
+{
+ int res = pthread_rwlock_unlock(&rwmtx->pt_rwlock);
+ if (res != 0)
+ ETHR_FATAL_ERROR__(res);
+}
+
+#endif /* ETHR_TRY_INLINE_FUNCS */
+
+#endif /* pthread_rwlock */
+
+int ethr_mutex_lib_init(int);
+int ethr_mutex_lib_late_init(int, int);
+
+#endif /* #ifndef ETHR_MUTEX_H__ */
diff --git a/erts/include/internal/ethr_optimized_fallbacks.h b/erts/include/internal/ethr_optimized_fallbacks.h
new file mode 100644
index 0000000000..2f9f987d0b
--- /dev/null
+++ b/erts/include/internal/ethr_optimized_fallbacks.h
@@ -0,0 +1,185 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2010. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+/*
+ * Description: "Optimized" fallbacks used when native ops are missing
+ * Author: Rickard Green
+ */
+
+#ifndef ETHR_OPTIMIZED_FALLBACKS_H__
+#define ETHR_OPTIMIZED_FALLBACKS_H__
+
+#ifdef ETHR_HAVE_NATIVE_ATOMICS
+#define ETHR_HAVE_OPTIMIZED_ATOMIC_OPS 1
+#endif
+
+#ifdef ETHR_HAVE_NATIVE_SPINLOCKS
+#define ETHR_HAVE_OPTIMIZED_SPINLOCKS 1
+#elif defined(ETHR_HAVE_PTHREAD_SPIN_LOCK)
+/* --- Optimized spinlocks using pthread spinlocks -------------------------- */
+#define ETHR_HAVE_OPTIMIZED_SPINLOCKS 1
+
+typedef pthread_spinlock_t ethr_opt_spinlock_t;
+
+#if defined(ETHR_TRY_INLINE_FUNCS) || defined(ETHR_AUX_IMPL__)
+
+static ETHR_INLINE int
+ethr_opt_spinlock_init(ethr_opt_spinlock_t *lock)
+{
+ return pthread_spin_init((pthread_spinlock_t *) lock, 0);
+}
+
+static ETHR_INLINE int
+ethr_opt_spinlock_destroy(ethr_opt_spinlock_t *lock)
+{
+ return pthread_spin_destroy((pthread_spinlock_t *) lock);
+}
+
+
+static ETHR_INLINE int
+ethr_opt_spin_unlock(ethr_opt_spinlock_t *lock)
+{
+ return pthread_spin_unlock((pthread_spinlock_t *) lock);
+}
+
+static ETHR_INLINE int
+ethr_opt_spin_lock(ethr_opt_spinlock_t *lock)
+{
+ return pthread_spin_lock((pthread_spinlock_t *) lock);
+}
+
+#endif
+
+#elif defined(ETHR_HAVE_NATIVE_ATOMICS)
+/* --- Native spinlocks using native atomics -------------------------------- */
+#define ETHR_HAVE_NATIVE_SPINLOCKS 1
+#define ETHR_HAVE_OPTIMIZED_SPINLOCKS 1
+
+typedef ethr_native_atomic_t ethr_native_spinlock_t;
+
+#if defined(ETHR_TRY_INLINE_FUNCS) || defined(ETHR_AUX_IMPL__)
+
+static ETHR_INLINE void
+ethr_native_spinlock_init(ethr_native_spinlock_t *lock)
+{
+ ethr_native_atomic_init((ethr_native_atomic_t *) lock, 0);
+}
+
+static ETHR_INLINE void
+ethr_native_spin_unlock(ethr_native_spinlock_t *lock)
+{
+ ETHR_COMPILER_BARRIER;
+ ETHR_ASSERT(ethr_native_atomic_read((ethr_native_atomic_t *) lock) == 1);
+ ethr_native_atomic_set_relb((ethr_native_atomic_t *) lock, 0);
+}
+
+static ETHR_INLINE void
+ethr_native_spin_lock(ethr_native_spinlock_t *lock)
+{
+ while (ethr_native_atomic_cmpxchg_acqb((ethr_native_atomic_t *) lock,
+ (long) 1, (long) 0) != 0) {
+ ETHR_SPIN_BODY;
+ }
+ ETHR_COMPILER_BARRIER;
+}
+
+#endif
+
+#endif
+
+
+#ifdef ETHR_HAVE_NATIVE_RWSPINLOCKS
+#define ETHR_HAVE_OPTIMIZED_RWSPINLOCKS 1
+#elif defined(ETHR_HAVE_NATIVE_ATOMICS)
+/* --- Native rwspinlocks using native atomics ------------------------------ */
+#define ETHR_HAVE_NATIVE_RWSPINLOCKS 1
+#define ETHR_HAVE_OPTIMIZED_RWSPINLOCKS 1
+
+typedef ethr_native_atomic_t ethr_native_rwlock_t;
+
+#if defined(ETHR_TRY_INLINE_FUNCS) || defined(ETHR_AUX_IMPL__)
+
+#define ETHR_WLOCK_FLAG__ (((long) 1) << 30)
+
+static ETHR_INLINE void
+ethr_native_rwlock_init(ethr_native_rwlock_t *lock)
+{
+ ethr_native_atomic_init((ethr_native_atomic_t *) lock, 0);
+}
+
+static ETHR_INLINE void
+ethr_native_read_unlock(ethr_native_rwlock_t *lock)
+{
+ ETHR_COMPILER_BARRIER;
+#ifdef DEBUG
+ ETHR_ASSERT(ethr_native_atomic_read((ethr_native_atomic_t *) lock) >= 0);
+#endif
+ ethr_native_atomic_dec_relb((ethr_native_atomic_t *) lock);
+}
+
+static ETHR_INLINE void
+ethr_native_read_lock(ethr_native_rwlock_t *lock)
+{
+ long act, exp = 0;
+ while (1) {
+ act = ethr_native_atomic_cmpxchg_acqb((ethr_native_atomic_t *) lock,
+ exp+1, exp);
+ if (act == exp)
+ break;
+ ETHR_SPIN_BODY;
+ exp = (act & ETHR_WLOCK_FLAG__) ? 0 : act;
+ }
+ ETHR_COMPILER_BARRIER;
+}
+
+static ETHR_INLINE void
+ethr_native_write_unlock(ethr_native_rwlock_t *lock)
+{
+ ETHR_COMPILER_BARRIER;
+ ETHR_ASSERT(ethr_native_atomic_read((ethr_native_atomic_t *) lock)
+ == ETHR_WLOCK_FLAG__);
+ ethr_native_atomic_set_relb((ethr_native_atomic_t *) lock, 0);
+}
+
+static ETHR_INLINE void
+ethr_native_write_lock(ethr_native_rwlock_t *lock)
+{
+ long act, exp = 0;
+ while (1) {
+ act = ethr_native_atomic_cmpxchg_acqb((ethr_native_atomic_t *) lock,
+ exp|ETHR_WLOCK_FLAG__, exp);
+ if (act == exp)
+ break;
+ ETHR_SPIN_BODY;
+ exp = act & ~ETHR_WLOCK_FLAG__;
+ }
+ act |= ETHR_WLOCK_FLAG__;
+ /* Wait for readers to leave */
+ while (act != ETHR_WLOCK_FLAG__) {
+ ETHR_SPIN_BODY;
+ act = ethr_native_atomic_read_acqb((ethr_native_atomic_t *) lock);
+ }
+ ETHR_COMPILER_BARRIER;
+}
+
+#endif
+
+#endif
+
+#endif
diff --git a/erts/include/internal/ethread.h b/erts/include/internal/ethread.h
index 4e7a38cd5c..4a205699bd 100644
--- a/erts/include/internal/ethread.h
+++ b/erts/include/internal/ethread.h
@@ -33,20 +33,9 @@
#include <stdlib.h>
#include "erl_errno.h"
-/*
- * Extra memory barrier requirements:
- * - ethr_atomic_or_old() needs to enforce a memory barrier sufficient
- * for a lock operation.
- * - ethr_atomic_and_old() needs to enforce a memory barrier sufficient
- * for an unlock operation.
- * - ethr_atomic_cmpxchg() needs to enforce a memory barrier sufficient
- * for a lock and unlock operation.
- */
-
-
-#undef ETHR_USE_RWMTX_FALLBACK
#undef ETHR_HAVE_OPTIMIZED_ATOMIC_OPS
-#undef ETHR_HAVE_OPTIMIZED_LOCKS
+#undef ETHR_HAVE_OPTIMIZED_SPINLOCK
+#undef ETHR_HAVE_OPTIMIZED_RWSPINLOCK
typedef struct {
long tv_sec;
@@ -54,6 +43,10 @@ typedef struct {
} ethr_timeval;
#if defined(DEBUG)
+# define ETHR_DEBUG
+#endif
+
+#if defined(ETHR_DEBUG)
# undef ETHR_XCHK
# define ETHR_XCHK 1
#else
@@ -68,47 +61,57 @@ typedef struct {
#elif defined(__WIN32__)
# define ETHR_INLINE __forceinline
#endif
-#if defined(DEBUG) || !defined(ETHR_INLINE) || ETHR_XCHK \
+#if defined(ETHR_DEBUG) || !defined(ETHR_INLINE) || ETHR_XCHK \
|| (defined(__GNUC__) && defined(ERTS_MIXED_CYGWIN_VC))
# undef ETHR_INLINE
# define ETHR_INLINE
# undef ETHR_TRY_INLINE_FUNCS
#endif
-#ifdef ETHR_FORCE_INLINE_FUNCS
-# define ETHR_TRY_INLINE_FUNCS
-#endif
-#if !defined(ETHR_DISABLE_NATIVE_IMPLS) \
- && (defined(PURIFY) || defined(VALGRIND) || defined(ERTS_MIXED_CYGWIN_VC))
+#if !defined(ETHR_DISABLE_NATIVE_IMPLS) && (defined(PURIFY)||defined(VALGRIND))
# define ETHR_DISABLE_NATIVE_IMPLS
#endif
-#define ETHR_RWMUTEX_INITIALIZED 0x99999999
-#define ETHR_MUTEX_INITIALIZED 0x77777777
-#define ETHR_COND_INITIALIZED 0x55555555
+/* Assume 64-byte cache line size */
+#define ETHR_CACHE_LINE_SIZE 64L
+#define ETHR_CACHE_LINE_MASK (ETHR_CACHE_LINE_SIZE - 1)
-#define ETHR_CACHE_LINE_SIZE 64
+#define ETHR_CACHE_LINE_ALIGN_SIZE(SZ) \
+ (((((SZ) - 1) / ETHR_CACHE_LINE_SIZE) + 1) * ETHR_CACHE_LINE_SIZE)
-#ifdef ETHR_INLINE_FUNC_NAME_
-# define ETHR_CUSTOM_INLINE_FUNC_NAME_
-#else
+#ifndef ETHR_INLINE_FUNC_NAME_
# define ETHR_INLINE_FUNC_NAME_(X) X
#endif
-#define ETHR_COMPILER_BARRIER ethr_compiler_barrier()
-#ifdef __GNUC__
-# undef ETHR_COMPILER_BARRIER
-# define ETHR_COMPILER_BARRIER __asm__ __volatile__("":::"memory")
+#if !defined(__func__)
+# if !defined(__STDC_VERSION__) || __STDC_VERSION__ < 199901L
+# if !defined(__GNUC__) || __GNUC__ < 2
+# define __func__ "[unknown_function]"
+# else
+# define __func__ __FUNCTION__
+# endif
+# endif
#endif
-#ifdef DEBUG
+int ethr_assert_failed(const char *file, int line, const char *func, char *a);
+#ifdef ETHR_DEBUG
#define ETHR_ASSERT(A) \
- ((void) ((A) ? 1 : ethr_assert_failed(__FILE__, __LINE__, #A)))
-int ethr_assert_failed(char *f, int l, char *a);
+ ((void) ((A) ? 1 : ethr_assert_failed(__FILE__, __LINE__, __func__, #A)))
#else
#define ETHR_ASSERT(A) ((void) 1)
#endif
+#if defined(__GNUC__)
+# define ETHR_PROTO_NORETURN__ void __attribute__((noreturn))
+# define ETHR_IMPL_NORETURN__ void
+#elif defined(__WIN32__) && defined(_MSC_VER)
+# define ETHR_PROTO_NORETURN__ __declspec(noreturn) void
+# define ETHR_IMPL_NORETURN__ __declspec(noreturn) void
+#else
+# define ETHR_PROTO_NORETURN__ void
+# define ETHR_IMPL_NORETURN__ void
+#endif
+
#if defined(ETHR_PTHREADS)
/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *\
* The pthread implementation *
@@ -118,7 +121,9 @@ int ethr_assert_failed(char *f, int l, char *a);
#error "_GNU_SOURCE not defined. Please, compile all files with -D_GNU_SOURCE."
#endif
-#if defined(ETHR_HAVE_MIT_PTHREAD_H)
+#if defined(ETHR_NEED_NPTL_PTHREAD_H)
+#include <nptl/pthread.h>
+#elif defined(ETHR_HAVE_MIT_PTHREAD_H)
#include <pthread/mit/pthread.h>
#elif defined(ETHR_HAVE_PTHREAD_H)
#include <pthread.h>
@@ -128,130 +133,23 @@ int ethr_assert_failed(char *f, int l, char *a);
typedef pthread_t ethr_tid;
-typedef struct ethr_mutex_ ethr_mutex;
-struct ethr_mutex_ {
- pthread_mutex_t pt_mtx;
- int is_rec_mtx;
- ethr_mutex *prev;
- ethr_mutex *next;
-#if ETHR_XCHK
- int initialized;
-#endif
-};
-
-typedef struct ethr_cond_ ethr_cond;
-struct ethr_cond_ {
- pthread_cond_t pt_cnd;
-#if ETHR_XCHK
- int initialized;
-#endif
-};
+typedef pthread_key_t ethr_tsd_key;
-#ifndef ETHR_HAVE_PTHREAD_RWLOCK_INIT
-#define ETHR_USE_RWMTX_FALLBACK
-#else
-typedef struct ethr_rwmutex_ ethr_rwmutex;
-struct ethr_rwmutex_ {
- pthread_rwlock_t pt_rwlock;
-#if ETHR_XCHK
- int initialized;
-#endif
-};
-#endif
+#define ETHR_HAVE_ETHR_SIG_FUNCS 1
-/* Static initializers */
-#if ETHR_XCHK
-#define ETHR_MUTEX_XCHK_INITER , ETHR_MUTEX_INITIALIZED
-#define ETHR_COND_XCHK_INITER , ETHR_COND_INITIALIZED
-#else
-#define ETHR_MUTEX_XCHK_INITER
-#define ETHR_COND_XCHK_INITER
+#if defined(PURIFY) || defined(VALGRIND)
+# define ETHR_FORCE_PTHREAD_RWLOCK
+# define ETHR_FORCE_PTHREAD_MUTEX
#endif
-#define ETHR_MUTEX_INITER {PTHREAD_MUTEX_INITIALIZER, 0, NULL, NULL ETHR_MUTEX_XCHK_INITER}
-#define ETHR_COND_INITER {PTHREAD_COND_INITIALIZER ETHR_COND_XCHK_INITER}
-
-#if defined(ETHR_HAVE_PTHREAD_MUTEXATTR_SETTYPE) \
- || defined(ETHR_HAVE_PTHREAD_MUTEXATTR_SETKIND_NP)
-# define ETHR_HAVE_ETHR_REC_MUTEX_INIT 1
-# ifdef PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP
-# define ETHR_REC_MUTEX_INITER \
- {PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP, 1, NULL, NULL ETHR_MUTEX_XCHK_INITER}
-# endif
-#else
-# undef ETHR_HAVE_ETHR_REC_MUTEX_INIT
+#if !defined(ETHR_FORCE_PTHREAD_RWLOCK)
+# define ETHR_USE_OWN_RWMTX_IMPL__
#endif
-#ifndef ETHR_HAVE_PTHREAD_ATFORK
-# define ETHR_NO_FORKSAFETY 1
+#if !defined(ETHR_FORCE_PTHREAD_MUTEX) && 0
+# define ETHR_USE_OWN_MTX_IMPL__
#endif
-typedef pthread_key_t ethr_tsd_key;
-
-#define ETHR_HAVE_ETHR_SIG_FUNCS 1
-
-#ifdef ETHR_TRY_INLINE_FUNCS
-
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_mutex_trylock)(ethr_mutex *mtx)
-{
- return pthread_mutex_trylock(&mtx->pt_mtx);
-}
-
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_mutex_lock)(ethr_mutex *mtx)
-{
- return pthread_mutex_lock(&mtx->pt_mtx);
-}
-
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_mutex_unlock)(ethr_mutex *mtx)
-{
- return pthread_mutex_unlock(&mtx->pt_mtx);
-}
-
-#ifdef ETHR_HAVE_PTHREAD_RWLOCK_INIT
-
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_rwmutex_tryrlock)(ethr_rwmutex *rwmtx)
-{
- return pthread_rwlock_tryrdlock(&rwmtx->pt_rwlock);
-}
-
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_rwmutex_rlock)(ethr_rwmutex *rwmtx)
-{
- return pthread_rwlock_rdlock(&rwmtx->pt_rwlock);
-}
-
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_rwmutex_runlock)(ethr_rwmutex *rwmtx)
-{
- return pthread_rwlock_unlock(&rwmtx->pt_rwlock);
-}
-
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_rwmutex_tryrwlock)(ethr_rwmutex *rwmtx)
-{
- return pthread_rwlock_trywrlock(&rwmtx->pt_rwlock);
-}
-
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_rwmutex_rwlock)(ethr_rwmutex *rwmtx)
-{
- return pthread_rwlock_wrlock(&rwmtx->pt_rwlock);
-}
-
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_rwmutex_rwunlock)(ethr_rwmutex *rwmtx)
-{
- return pthread_rwlock_unlock(&rwmtx->pt_rwlock);
-}
-
-#endif /* ETHR_HAVE_PTHREAD_RWLOCK_INIT */
-
-#endif /* ETHR_TRY_INLINE_FUNCS */
-
#elif defined(ETHR_WIN32_THREADS)
/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *\
* The native win32 threads implementation *
@@ -273,409 +171,22 @@ ETHR_INLINE_FUNC_NAME_(ethr_rwmutex_rwunlock)(ethr_rwmutex *rwmtx)
# undef WIN32_LEAN_AND_MEAN
#endif
-/* Types */
-typedef long ethr_tid; /* thread id type */
-typedef struct {
- volatile int initialized;
- CRITICAL_SECTION cs;
-#if ETHR_XCHK
- int is_rec_mtx;
-#endif
-} ethr_mutex;
-
-typedef struct cnd_wait_event__ cnd_wait_event_;
+struct ethr_join_data_;
+/* Types */
typedef struct {
- volatile int initialized;
- CRITICAL_SECTION cs;
- cnd_wait_event_ *queue;
- cnd_wait_event_ *queue_end;
-} ethr_cond;
-
-#define ETHR_USE_RWMTX_FALLBACK
-
-/* Static initializers */
-
-#define ETHR_MUTEX_INITER {0}
-#define ETHR_COND_INITER {0}
-
-#define ETHR_REC_MUTEX_INITER ETHR_MUTEX_INITER
-
-#define ETHR_HAVE_ETHR_REC_MUTEX_INIT 1
+ long id;
+ struct ethr_join_data_ *jdata;
+} ethr_tid; /* thread id type */
typedef DWORD ethr_tsd_key;
#undef ETHR_HAVE_ETHR_SIG_FUNCS
-#ifdef ETHR_TRY_INLINE_FUNCS
-int ethr_fake_static_mutex_init(ethr_mutex *mtx);
-
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_mutex_trylock)(ethr_mutex *mtx)
-{
- if (!mtx->initialized) {
- int res = ethr_fake_static_mutex_init(mtx);
- if (res != 0)
- return res;
- }
- return TryEnterCriticalSection(&mtx->cs) ? 0 : EBUSY;
-}
-
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_mutex_lock)(ethr_mutex *mtx)
-{
- if (!mtx->initialized) {
- int res = ethr_fake_static_mutex_init(mtx);
- if (res != 0)
- return res;
- }
- EnterCriticalSection(&mtx->cs);
- return 0;
-}
-
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_mutex_unlock)(ethr_mutex *mtx)
-{
- LeaveCriticalSection(&mtx->cs);
- return 0;
-}
-
-#endif /* #ifdef ETHR_TRY_INLINE_FUNCS */
-
-#ifdef ERTS_MIXED_CYGWIN_VC
-
-/* atomics */
-
-#ifdef _MSC_VER
-# if _MSC_VER < 1300
-# define ETHR_IMMED_ATOMIC_SET_GET_SAFE__ 0 /* Dont trust really old compilers */
-# else
-# if defined(_M_IX86)
-# define ETHR_IMMED_ATOMIC_SET_GET_SAFE__ 1
-# else /* I.e. IA64 */
-# if _MSC_VER >= 1400
-# define ETHR_IMMED_ATOMIC_SET_GET_SAFE__ 1
-# else
-# define ETHR_IMMED_ATOMIC_SET_GET_SAFE__ 0
-# endif
-# endif
-# endif
-# if _MSC_VER >= 1400
-# include <intrin.h>
-# undef ETHR_COMPILER_BARRIER
-# define ETHR_COMPILER_BARRIER _ReadWriteBarrier()
-# endif
-#pragma intrinsic(_ReadWriteBarrier)
-#pragma intrinsic(_InterlockedAnd)
-#pragma intrinsic(_InterlockedOr)
-#else
-# define ETHR_IMMED_ATOMIC_SET_GET_SAFE__ 0
-#endif
-
-#define ETHR_HAVE_OPTIMIZED_ATOMIC_OPS 1
-#define ETHR_HAVE_OPTIMIZED_LOCKS 1
-
-#define ETHR_MEMORY_BARRIER \
-do { \
- volatile LONG x___ = 0; \
- (void) _InterlockedCompareExchange(&x___, (LONG) 1, (LONG) 0); \
-} while (0)
-
-typedef struct {
- volatile LONG value;
-} ethr_atomic_t;
-
-typedef struct {
- volatile LONG locked;
-} ethr_spinlock_t;
-
-typedef struct {
- volatile LONG counter;
-} ethr_rwlock_t;
-#define ETHR_WLOCK_FLAG__ (((LONG) 1) << 30)
-
-#ifdef ETHR_TRY_INLINE_FUNCS
-
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_atomic_init)(ethr_atomic_t *var, long i)
-{
-#if ETHR_IMMED_ATOMIC_SET_GET_SAFE__
- var->value = (LONG) i;
-#else
- (void) InterlockedExchange(&var->value, (LONG) i);
-#endif
- return 0;
-}
-
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_atomic_set)(ethr_atomic_t *var, long i)
-{
-#if ETHR_IMMED_ATOMIC_SET_GET_SAFE__
- var->value = (LONG) i;
-#else
- (void) InterlockedExchange(&var->value, (LONG) i);
-#endif
- return 0;
-}
-
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_atomic_read)(ethr_atomic_t *var, long *i)
-{
-#if ETHR_IMMED_ATOMIC_SET_GET_SAFE__
- *i = var->value;
-#else
- *i = InterlockedExchangeAdd(&var->value, (LONG) 0);
-#endif
- return 0;
-}
-
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_atomic_add)(ethr_atomic_t *var, long incr)
-{
- (void) InterlockedExchangeAdd(&var->value, (LONG) incr);
- return 0;
-}
-
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_atomic_addtest)(ethr_atomic_t *var,
- long i,
- long *testp)
-{
- *testp = InterlockedExchangeAdd(&var->value, (LONG) i);
- *testp += i;
- return 0;
-}
-
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_atomic_inc)(ethr_atomic_t *var)
-{
- (void) InterlockedIncrement(&var->value);
- return 0;
-}
-
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_atomic_dec)(ethr_atomic_t *var)
-{
- (void) InterlockedDecrement(&var->value);
- return 0;
-}
-
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_atomic_inctest)(ethr_atomic_t *var, long *testp)
-{
- *testp = (long) InterlockedIncrement(&var->value);
- return 0;
-}
-
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_atomic_dectest)(ethr_atomic_t *var, long *testp)
-{
- *testp = (long) InterlockedDecrement(&var->value);
- return 0;
-}
-
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_atomic_and_old)(ethr_atomic_t *var,
- long mask,
- long *old)
-{
- /*
- * See "Extra memory barrier requirements" note at the top
- * of the file.
- *
- * According to msdn _InterlockedAnd() provides a full
- * memory barrier.
- */
- *old = (long) _InterlockedAnd(&var->value, mask);
- return 0;
-}
-
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_atomic_or_old)(ethr_atomic_t *var,
- long mask,
- long *old)
-{
- /*
- * See "Extra memory barrier requirements" note at the top
- * of the file.
- *
- * According to msdn _InterlockedOr() provides a full
- * memory barrier.
- */
- *old = (long) _InterlockedOr(&var->value, mask);
- return 0;
-}
-
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_atomic_cmpxchg)(ethr_atomic_t *var,
- long new,
- long expected,
- long *old)
-{
- /*
- * See "Extra memory barrier requirements" note at the top
- * of the file.
- *
- * According to msdn _InterlockedCompareExchange() provides a full
- * memory barrier.
- */
- *old = _InterlockedCompareExchange(&var->value, (LONG) new, (LONG) expected);
- return 0;
-}
-
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_atomic_xchg)(ethr_atomic_t *var,
- long new,
- long *old)
-{
- *old = (long) InterlockedExchange(&var->value, (LONG) new);
- return 0;
-}
-
-/*
- * According to msdn InterlockedExchange() provides a full
- * memory barrier.
- */
-
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_spinlock_init)(ethr_spinlock_t *lock)
-{
-#if ETHR_IMMED_ATOMIC_SET_GET_SAFE__
- lock->locked = (LONG) 0;
-#else
- (void) InterlockedExchange(&lock->locked, (LONG) 0);
-#endif
- return 0;
-}
-
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_spinlock_destroy)(ethr_spinlock_t *lock)
-{
- return 0;
-}
-
-
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_spin_unlock)(ethr_spinlock_t *lock)
-{
- ETHR_COMPILER_BARRIER;
- {
-#ifdef DEBUG
- LONG old =
-#endif
- InterlockedExchange(&lock->locked, (LONG) 0);
-#ifdef DEBUG
- ETHR_ASSERT(old == 1);
-#endif
- }
- return 0;
-}
-
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_spin_lock)(ethr_spinlock_t *lock)
-{
- LONG old;
- do {
- old = InterlockedExchange(&lock->locked, (LONG) 1);
- } while (old != (LONG) 0);
- ETHR_COMPILER_BARRIER;
- return 0;
-}
+#define ETHR_USE_OWN_RWMTX_IMPL__
+#define ETHR_USE_OWN_MTX_IMPL__
-/*
- * According to msdn InterlockedIncrement, InterlockedDecrement,
- * and InterlockedExchangeAdd(), _InterlockedAnd, and _InterlockedOr
- * provides full memory barriers.
- */
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_rwlock_init)(ethr_rwlock_t *lock)
-{
-#if ETHR_IMMED_ATOMIC_SET_GET_SAFE__
- lock->counter = (LONG) 0;
-#else
- (void) InterlockedExchange(&lock->counter, (LONG) 0);
-#endif
- return 0;
-}
-
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_rwlock_destroy)(ethr_rwlock_t *lock)
-{
- return 0;
-}
-
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_read_unlock)(ethr_rwlock_t *lock)
-{
- ETHR_COMPILER_BARRIER;
- {
-#ifdef DEBUG
- LONG old =
-#endif
- InterlockedDecrement(&lock->counter);
- ETHR_ASSERT(old != 0);
- }
- return 0;
-}
-
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_read_lock)(ethr_rwlock_t *lock)
-{
- while (1) {
- LONG old = InterlockedIncrement(&lock->counter);
- if ((old & ETHR_WLOCK_FLAG__) == 0)
- break; /* Got read lock */
- /* Restore and wait for writers to unlock */
- old = InterlockedDecrement(&lock->counter);
- while (old & ETHR_WLOCK_FLAG__) {
-#if ETHR_IMMED_ATOMIC_SET_GET_SAFE__
- old = lock->counter;
-#else
- old = InterlockedExchangeAdd(&lock->counter, (LONG) 0);
-#endif
- }
- }
- ETHR_COMPILER_BARRIER;
- return 0;
-}
-
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_write_unlock)(ethr_rwlock_t *lock)
-{
- ETHR_COMPILER_BARRIER;
- {
-#ifdef DEBUG
- LONG old =
-#endif
- _InterlockedAnd(&lock->counter, ~ETHR_WLOCK_FLAG__);
- ETHR_ASSERT(old & ETHR_WLOCK_FLAG__);
- }
- return 0;
-}
-
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_write_lock)(ethr_rwlock_t *lock)
-{
- LONG old;
- do {
- old = _InterlockedOr(&lock->counter, ETHR_WLOCK_FLAG__);
- } while (old & ETHR_WLOCK_FLAG__);
- /* We got the write part of the lock; wait for readers to unlock */
- while ((old & ~ETHR_WLOCK_FLAG__) != 0) {
-#if ETHR_IMMED_ATOMIC_SET_GET_SAFE__
- old = lock->counter;
-#else
- old = InterlockedExchangeAdd(&lock->counter, (LONG) 0);
-#endif
- ETHR_ASSERT(old & ETHR_WLOCK_FLAG__);
- }
- ETHR_COMPILER_BARRIER;
- return 0;
-}
-
-#endif /* #ifdef ETHR_TRY_INLINE_FUNCS */
-
-#endif /* #ifdef ERTS_MIXED_CYGWIN_VC */
+#define ETHR_YIELD() (Sleep(0), 0)
#else /* No supported thread lib found */
@@ -687,6 +198,12 @@ ETHR_INLINE_FUNC_NAME_(ethr_write_lock)(ethr_rwlock_t *lock)
#endif
+#ifdef SIZEOF_LONG
+#if SIZEOF_LONG < ETHR_SIZEOF_PTR
+#error size of long currently needs to be at least the same as size of void *
+#endif
+#endif
+
/* __builtin_expect() is needed by both native atomics code
* and the fallback code */
#if !defined(__GNUC__) || (__GNUC__ < 2) || (__GNUC__ == 2 && __GNUC_MINOR__ < 96)
@@ -698,6 +215,8 @@ ETHR_INLINE_FUNC_NAME_(ethr_write_lock)(ethr_rwlock_t *lock)
# if defined(__GNUC__)
# if defined(ETHR_PREFER_GCC_NATIVE_IMPLS)
# include "gcc/ethread.h"
+# elif defined(ETHR_PREFER_LIBATOMIC_OPS_NATIVE_IMPLS)
+# include "libatomic_ops/ethread.h"
# endif
# ifndef ETHR_HAVE_NATIVE_ATOMICS
# if ETHR_SIZEOF_PTR == 4
@@ -718,115 +237,141 @@ ETHR_INLINE_FUNC_NAME_(ethr_write_lock)(ethr_rwlock_t *lock)
# endif
# endif
# include "gcc/ethread.h"
+# include "libatomic_ops/ethread.h"
# endif
+# elif defined(ETHR_WIN32_THREADS)
+# include "win/ethread.h"
# endif
-#endif /* !defined(ETHR_DISABLE_NATIVE_IMPLS) */
-
-#ifdef ETHR_HAVE_OPTIMIZED_ATOMIC_OPS
-# undef ETHR_HAVE_NATIVE_ATOMICS
-#endif
-#ifdef ETHR_HAVE_OPTIMIZED_LOCKS
-# undef ETHR_HAVE_NATIVE_LOCKS
-#endif
+#endif /* !ETHR_DISABLE_NATIVE_IMPLS */
-#ifdef ETHR_HAVE_NATIVE_ATOMICS
-#define ETHR_HAVE_OPTIMIZED_ATOMIC_OPS 1
-#endif
-#ifdef ETHR_HAVE_NATIVE_LOCKS
-#define ETHR_HAVE_OPTIMIZED_LOCKS 1
+#if defined(__GNUC__)
+# ifndef ETHR_COMPILER_BARRIER
+# define ETHR_COMPILER_BARRIER __asm__ __volatile__("" : : : "memory")
+# endif
+# ifndef ETHR_SPIN_BODY
+# if defined(__i386__) || defined(__x86_64__)
+# define ETHR_SPIN_BODY __asm__ __volatile__("rep;nop" : : : "memory")
+# elif defined(__ia64__)
+# define ETHR_SPIN_BODY __asm__ __volatile__("hint @pause" : : : "memory")
+# elif defined(__sparc__)
+# define ETHR_SPIN_BODY __asm__ __volatile__("membar #LoadLoad")
+# else
+# define ETHR_SPIN_BODY ETHR_COMPILER_BARRIER
+# endif
+# endif
+#elif defined(ETHR_WIN32_THREADS)
+# ifndef ETHR_COMPILER_BARRIER
+# include <intrin.h>
+# pragma intrinsic(_ReadWriteBarrier)
+# define ETHR_COMPILER_BARRIER _ReadWriteBarrier()
+# endif
+# ifndef ETHR_SPIN_BODY
+# define ETHR_SPIN_BODY do {YieldProcessor();ETHR_COMPILER_BARRIER;} while(0)
+# endif
#endif
-typedef struct {
- unsigned open;
- ethr_mutex mtx;
- ethr_cond cnd;
-} ethr_gate;
+#define ETHR_YIELD_AFTER_BUSY_LOOPS 50
-#ifdef ETHR_HAVE_NATIVE_ATOMICS
+#ifndef ETHR_HAVE_NATIVE_ATOMICS
/*
- * Map ethread native atomics to ethread API atomics.
+ * ETHR_*MEMORY_BARRIER orders between locked and atomic accesses only,
+ * i.e. when our lock based atomic fallback is used, a noop is sufficient.
*/
-typedef ethr_native_atomic_t ethr_atomic_t;
+#define ETHR_MEMORY_BARRIER do { } while (0)
+#define ETHR_WRITE_MEMORY_BARRIER do { } while (0)
+#define ETHR_READ_MEMORY_BARRIER do { } while (0)
+#define ETHR_READ_DEPEND_MEMORY_BARRIER do { } while (0)
#endif
-#ifdef ETHR_HAVE_NATIVE_LOCKS
-/*
- * Map ethread native spinlocks to ethread API spinlocks.
- */
-typedef ethr_native_spinlock_t ethr_spinlock_t;
-/*
- * Map ethread native rwlocks to ethread API rwlocks.
- */
-typedef ethr_native_rwlock_t ethr_rwlock_t;
+#ifndef ETHR_WRITE_MEMORY_BARRIER
+# define ETHR_WRITE_MEMORY_BARRIER ETHR_MEMORY_BARRIER
+# define ETHR_WRITE_MEMORY_BARRIER_IS_FULL
#endif
-
-#ifdef ETHR_USE_RWMTX_FALLBACK
-typedef struct {
- ethr_mutex mtx;
- ethr_cond rcnd;
- ethr_cond wcnd;
- unsigned readers;
- unsigned waiting_readers;
- unsigned waiting_writers;
-#if ETHR_XCHK
- int initialized;
+#ifndef ETHR_READ_MEMORY_BARRIER
+# define ETHR_READ_MEMORY_BARRIER ETHR_MEMORY_BARRIER
+# define ETHR_READ_MEMORY_BARRIER_IS_FULL
#endif
-} ethr_rwmutex;
+#ifndef ETHR_READ_DEPEND_MEMORY_BARRIER
+# define ETHR_READ_DEPEND_MEMORY_BARRIER ETHR_COMPILER_BARRIER
+# define ETHR_READ_DEPEND_MEMORY_BARRIER_IS_COMPILER_BARRIER
#endif
-#ifndef ETHR_HAVE_OPTIMIZED_ATOMIC_OPS
-typedef long ethr_atomic_t;
-#endif
+#define ETHR_FATAL_ERROR__(ERR) \
+ ethr_fatal_error__(__FILE__, __LINE__, __func__, (ERR))
-#ifndef ETHR_HAVE_OPTIMIZED_LOCKS
+ETHR_PROTO_NORETURN__ ethr_fatal_error__(const char *file,
+ int line,
+ const char *func,
+ int err);
-#if defined(ETHR_WIN32_THREADS)
-typedef struct {
- CRITICAL_SECTION cs;
-} ethr_spinlock_t;
-typedef struct {
- CRITICAL_SECTION cs;
- unsigned counter;
-} ethr_rwlock_t;
+void ethr_compiler_barrier_fallback(void);
+#ifndef ETHR_COMPILER_BARRIER
+# define ETHR_COMPILER_BARRIER ethr_compiler_barrier_fallback()
+#endif
-int ethr_do_spinlock_init(ethr_spinlock_t *lock);
-int ethr_do_rwlock_init(ethr_rwlock_t *lock);
+#ifndef ETHR_SPIN_BODY
+# define ETHR_SPIN_BODY ETHR_COMPILER_BARRIER
+#endif
-#define ETHR_RWLOCK_WRITERS (((unsigned) 1) << 31)
+#ifndef ETHR_YIELD
+# if defined(ETHR_HAVE_SCHED_YIELD)
+# ifdef ETHR_HAVE_SCHED_H
+# include <sched.h>
+# endif
+# include <errno.h>
+# if defined(ETHR_SCHED_YIELD_RET_INT)
+# define ETHR_YIELD() (sched_yield() < 0 ? errno : 0)
+# else
+# define ETHR_YIELD() (sched_yield(), 0)
+# endif
+# elif defined(ETHR_HAVE_PTHREAD_YIELD)
+# if defined(ETHR_PTHREAD_YIELD_RET_INT)
+# define ETHR_YIELD() pthread_yield()
+# else
+# define ETHR_YIELD() (pthread_yield(), 0)
+# endif
+# else
+# define ETHR_YIELD() (ethr_compiler_barrier(), 0)
+# endif
+#endif
+
+#include "ethr_optimized_fallbacks.h"
-#elif defined(ETHR_HAVE_PTHREAD_SPIN_LOCK)
-typedef struct {
- pthread_spinlock_t spnlck;
-} ethr_spinlock_t;
typedef struct {
- pthread_spinlock_t spnlck;
- unsigned counter;
-} ethr_rwlock_t;
-#define ETHR_RWLOCK_WRITERS (((unsigned) 1) << 31)
+ void *(*thread_create_prepare_func)(void);
+ void (*thread_create_parent_func)(void *);
+ void (*thread_create_child_func)(void *);
+} ethr_init_data;
-#else /* ethr mutex/rwmutex */
+#define ETHR_INIT_DATA_DEFAULT_INITER {NULL, NULL, NULL}
typedef struct {
- ethr_mutex mtx;
-} ethr_spinlock_t;
+ void *(*alloc)(size_t);
+ void *(*realloc)(void *, size_t);
+ void (*free)(void *);
+} ethr_memory_allocator;
+
+#define ETHR_MEM_ALLOC_DEF_INITER__ {NULL, NULL, NULL}
typedef struct {
- ethr_rwmutex rwmtx;
-} ethr_rwlock_t;
+ ethr_memory_allocator std;
+ ethr_memory_allocator sl;
+ ethr_memory_allocator ll;
+} ethr_memory_allocators;
-#endif /* end mutex/rwmutex */
-#endif /* ETHR_HAVE_OPTIMIZED_LOCKS */
+#define ETHR_MEM_ALLOCS_DEF_INITER__ \
+ {ETHR_MEM_ALLOC_DEF_INITER__, \
+ ETHR_MEM_ALLOC_DEF_INITER__, \
+ ETHR_MEM_ALLOC_DEF_INITER__}
typedef struct {
- void *(*alloc)(size_t);
- void *(*realloc)(void *, size_t);
- void (*free)(void *);
- void *(*thread_create_prepare_func)(void);
- void (*thread_create_parent_func)(void *);
- void (*thread_create_child_func)(void *);
-} ethr_init_data;
+ ethr_memory_allocators mem;
+ int reader_groups;
+ int main_threads;
+} ethr_late_init_data;
-#define ETHR_INIT_DATA_DEFAULT_INITER {malloc, realloc, free, NULL, NULL, NULL}
+#define ETHR_LATE_INIT_DATA_DEFAULT_INITER \
+ {ETHR_MEM_ALLOCS_DEF_INITER__, 0, 0}
typedef struct {
int detached; /* boolean (default false) */
@@ -835,18 +380,15 @@ typedef struct {
#define ETHR_THR_OPTS_DEFAULT_INITER {0, -1}
-#if defined(ETHR_CUSTOM_INLINE_FUNC_NAME_) || !defined(ETHR_TRY_INLINE_FUNCS)
-# define ETHR_NEED_MTX_PROTOTYPES__
-# define ETHR_NEED_RWMTX_PROTOTYPES__
+
+#if !defined(ETHR_TRY_INLINE_FUNCS) || defined(ETHR_AUX_IMPL__)
# define ETHR_NEED_SPINLOCK_PROTOTYPES__
+# define ETHR_NEED_RWSPINLOCK_PROTOTYPES__
# define ETHR_NEED_ATOMIC_PROTOTYPES__
#endif
-#if !defined(ETHR_NEED_RWMTX_PROTOTYPES__) && defined(ETHR_USE_RWMTX_FALLBACK)
-# define ETHR_NEED_RWMTX_PROTOTYPES__
-#endif
-
int ethr_init(ethr_init_data *);
+int ethr_late_init(ethr_late_init_data *);
int ethr_install_exit_handler(void (*funcp)(void));
int ethr_thr_create(ethr_tid *, void * (*)(void *), void *, ethr_thr_opts *);
int ethr_thr_join(ethr_tid, void **);
@@ -854,65 +396,6 @@ int ethr_thr_detach(ethr_tid);
void ethr_thr_exit(void *);
ethr_tid ethr_self(void);
int ethr_equal_tids(ethr_tid, ethr_tid);
-int ethr_mutex_init(ethr_mutex *);
-#ifdef ETHR_HAVE_ETHR_REC_MUTEX_INIT
-int ethr_rec_mutex_init(ethr_mutex *);
-#endif
-int ethr_mutex_destroy(ethr_mutex *);
-int ethr_mutex_set_forksafe(ethr_mutex *);
-int ethr_mutex_unset_forksafe(ethr_mutex *);
-#ifdef ETHR_NEED_MTX_PROTOTYPES__
-int ethr_mutex_trylock(ethr_mutex *);
-int ethr_mutex_lock(ethr_mutex *);
-int ethr_mutex_unlock(ethr_mutex *);
-#endif
-int ethr_cond_init(ethr_cond *);
-int ethr_cond_destroy(ethr_cond *);
-int ethr_cond_signal(ethr_cond *);
-int ethr_cond_broadcast(ethr_cond *);
-int ethr_cond_wait(ethr_cond *, ethr_mutex *);
-int ethr_cond_timedwait(ethr_cond *, ethr_mutex *, ethr_timeval *);
-
-int ethr_rwmutex_init(ethr_rwmutex *);
-int ethr_rwmutex_destroy(ethr_rwmutex *);
-#ifdef ETHR_NEED_RWMTX_PROTOTYPES__
-int ethr_rwmutex_tryrlock(ethr_rwmutex *);
-int ethr_rwmutex_rlock(ethr_rwmutex *);
-int ethr_rwmutex_runlock(ethr_rwmutex *);
-int ethr_rwmutex_tryrwlock(ethr_rwmutex *);
-int ethr_rwmutex_rwlock(ethr_rwmutex *);
-int ethr_rwmutex_rwunlock(ethr_rwmutex *);
-#endif
-
-#ifdef ETHR_NEED_ATOMIC_PROTOTYPES__
-int ethr_atomic_init(ethr_atomic_t *, long);
-int ethr_atomic_set(ethr_atomic_t *, long);
-int ethr_atomic_read(ethr_atomic_t *, long *);
-int ethr_atomic_inctest(ethr_atomic_t *, long *);
-int ethr_atomic_dectest(ethr_atomic_t *, long *);
-int ethr_atomic_inc(ethr_atomic_t *);
-int ethr_atomic_dec(ethr_atomic_t *);
-int ethr_atomic_addtest(ethr_atomic_t *, long, long *);
-int ethr_atomic_add(ethr_atomic_t *, long);
-int ethr_atomic_and_old(ethr_atomic_t *, long, long *);
-int ethr_atomic_or_old(ethr_atomic_t *, long, long *);
-int ethr_atomic_xchg(ethr_atomic_t *, long, long *);
-int ethr_atomic_cmpxchg(ethr_atomic_t *, long, long, long *);
-#endif
-
-#ifdef ETHR_NEED_SPINLOCK_PROTOTYPES__
-int ethr_spinlock_init(ethr_spinlock_t *);
-int ethr_spinlock_destroy(ethr_spinlock_t *);
-int ethr_spin_unlock(ethr_spinlock_t *);
-int ethr_spin_lock(ethr_spinlock_t *);
-
-int ethr_rwlock_init(ethr_rwlock_t *);
-int ethr_rwlock_destroy(ethr_rwlock_t *);
-int ethr_read_unlock(ethr_rwlock_t *);
-int ethr_read_lock(ethr_rwlock_t *);
-int ethr_write_unlock(ethr_rwlock_t *);
-int ethr_write_lock(ethr_rwlock_t *);
-#endif
int ethr_time_now(ethr_timeval *);
int ethr_tsd_key_create(ethr_tsd_key *);
@@ -920,13 +403,6 @@ int ethr_tsd_key_delete(ethr_tsd_key);
int ethr_tsd_set(ethr_tsd_key, void *);
void *ethr_tsd_get(ethr_tsd_key);
-int ethr_gate_init(ethr_gate *);
-int ethr_gate_destroy(ethr_gate *);
-int ethr_gate_close(ethr_gate *);
-int ethr_gate_let_through(ethr_gate *, unsigned);
-int ethr_gate_wait(ethr_gate *);
-int ethr_gate_swait(ethr_gate *, int);
-
#ifdef ETHR_HAVE_ETHR_SIG_FUNCS
#include <signal.h>
int ethr_sigmask(int how, const sigset_t *set, sigset_t *oset);
@@ -935,534 +411,579 @@ int ethr_sigwait(const sigset_t *set, int *sig);
void ethr_compiler_barrier(void);
-#ifdef ETHR_TRY_INLINE_FUNCS
-
-#ifdef ETHR_HAVE_NATIVE_ATOMICS
-
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_atomic_init)(ethr_atomic_t *var, long i)
-{
- ethr_native_atomic_init(var, i);
- return 0;
-}
-
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_atomic_set)(ethr_atomic_t *var, long i)
-{
- ethr_native_atomic_set(var, i);
- return 0;
-}
-
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_atomic_read)(ethr_atomic_t *var, long *i)
-{
- *i = ethr_native_atomic_read(var);
- return 0;
-}
-
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_atomic_add)(ethr_atomic_t *var, long incr)
-{
- ethr_native_atomic_add(var, incr);
- return 0;
-}
-
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_atomic_addtest)(ethr_atomic_t *var,
- long i,
- long *testp)
-{
- *testp = ethr_native_atomic_add_return(var, i);
- return 0;
-}
-
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_atomic_inc)(ethr_atomic_t *var)
-{
- ethr_native_atomic_inc(var);
- return 0;
-}
-
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_atomic_dec)(ethr_atomic_t *var)
-{
- ethr_native_atomic_dec(var);
- return 0;
-}
-
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_atomic_inctest)(ethr_atomic_t *var, long *testp)
-{
- *testp = ethr_native_atomic_inc_return(var);
- return 0;
-}
-
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_atomic_dectest)(ethr_atomic_t *var, long *testp)
-{
- *testp = ethr_native_atomic_dec_return(var);
- return 0;
-}
-
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_atomic_and_old)(ethr_atomic_t *var,
- long mask,
- long *old)
-{
- /*
- * See "Extra memory barrier requirements" note at the top
- * of the file.
- */
- *old = ethr_native_atomic_and_retold(var, mask);
- return 0;
-}
-
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_atomic_or_old)(ethr_atomic_t *var,
- long mask,
- long *old)
-{
- /*
- * See "Extra memory barrier requirements" note at the top
- * of the file.
- */
- *old = ethr_native_atomic_or_retold(var, mask);
- return 0;
-}
-
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_atomic_xchg)(ethr_atomic_t *var,
- long new,
- long *old)
-{
- *old = ethr_native_atomic_xchg(var, new);
- return 0;
-}
-
-/*
- * If *var == *old, replace *old with new, else do nothing.
- * In any case return the original value of *var in *old.
- */
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_atomic_cmpxchg)(ethr_atomic_t *var,
- long new,
- long expected,
- long *old)
-{
- /*
- * See "Extra memory barrier requirements" note at the top
- * of the file.
- */
- *old = ethr_native_atomic_cmpxchg(var, new, expected);
- return 0;
-}
+#if defined(ETHR_HAVE_NATIVE_SPINLOCKS)
+typedef ethr_native_spinlock_t ethr_spinlock_t;
+#elif defined(ETHR_HAVE_OPTIMIZED_SPINLOCKS)
+typedef ethr_opt_spinlock_t ethr_spinlock_t;
+#elif defined(__WIN32__)
+typedef CRITICAL_SECTION ethr_spinlock_t;
+#else
+typedef pthread_mutex_t ethr_spinlock_t;
+#endif
-#endif /* ETHR_HAVE_NATIVE_ATOMICS */
+#ifdef ETHR_NEED_SPINLOCK_PROTOTYPES__
+int ethr_spinlock_init(ethr_spinlock_t *);
+int ethr_spinlock_destroy(ethr_spinlock_t *);
+void ethr_spin_unlock(ethr_spinlock_t *);
+void ethr_spin_lock(ethr_spinlock_t *);
+#endif
-#ifdef ETHR_HAVE_NATIVE_LOCKS
+#if defined(ETHR_TRY_INLINE_FUNCS) || defined(ETHR_AUX_IMPL__)
static ETHR_INLINE int
ETHR_INLINE_FUNC_NAME_(ethr_spinlock_init)(ethr_spinlock_t *lock)
{
+#ifdef ETHR_HAVE_NATIVE_SPINLOCKS
ethr_native_spinlock_init(lock);
return 0;
+#elif defined(ETHR_HAVE_OPTIMIZED_SPINLOCKS)
+ return ethr_opt_spinlock_init((ethr_opt_spinlock_t *) lock);
+#elif defined(__WIN32__)
+ if (!InitializeCriticalSectionAndSpinCount((CRITICAL_SECTION *) lock, INT_MAX))
+ return ethr_win_get_errno__();
+ return 0;
+#else
+ return pthread_mutex_init((pthread_mutex_t *) lock, NULL);
+#endif
}
static ETHR_INLINE int
ETHR_INLINE_FUNC_NAME_(ethr_spinlock_destroy)(ethr_spinlock_t *lock)
{
+#ifdef ETHR_HAVE_NATIVE_SPINLOCKS
return 0;
+#elif defined(ETHR_HAVE_OPTIMIZED_SPINLOCKS)
+ return ethr_opt_spinlock_destroy((ethr_opt_spinlock_t *) lock);
+#elif defined(__WIN32__)
+ DeleteCriticalSection((CRITICAL_SECTION *) lock);
+ return 0;
+#else
+ return pthread_mutex_destroy((pthread_mutex_t *) lock);
+#endif
}
-static ETHR_INLINE int
+static ETHR_INLINE void
ETHR_INLINE_FUNC_NAME_(ethr_spin_unlock)(ethr_spinlock_t *lock)
{
+#ifdef ETHR_HAVE_NATIVE_SPINLOCKS
ethr_native_spin_unlock(lock);
- return 0;
+#elif defined(ETHR_HAVE_OPTIMIZED_SPINLOCKS)
+ int err = ethr_opt_spin_unlock((ethr_opt_spinlock_t *) lock);
+ if (err)
+ ETHR_FATAL_ERROR__(err);
+#elif defined(__WIN32__)
+ LeaveCriticalSection((CRITICAL_SECTION *) lock);
+#else
+ int err = pthread_mutex_unlock((pthread_mutex_t *) lock);
+ if (err)
+ ETHR_FATAL_ERROR__(err);
+#endif
}
-static ETHR_INLINE int
+static ETHR_INLINE void
ETHR_INLINE_FUNC_NAME_(ethr_spin_lock)(ethr_spinlock_t *lock)
{
+#ifdef ETHR_HAVE_NATIVE_SPINLOCKS
ethr_native_spin_lock(lock);
- return 0;
-}
-
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_rwlock_init)(ethr_rwlock_t *lock)
-{
- ethr_native_rwlock_init(lock);
- return 0;
-}
-
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_rwlock_destroy)(ethr_rwlock_t *lock)
-{
- return 0;
-}
-
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_read_unlock)(ethr_rwlock_t *lock)
-{
- ethr_native_read_unlock(lock);
- return 0;
-}
-
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_read_lock)(ethr_rwlock_t *lock)
-{
- ethr_native_read_lock(lock);
- return 0;
-}
-
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_write_unlock)(ethr_rwlock_t *lock)
-{
- ethr_native_write_unlock(lock);
- return 0;
-}
-
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_write_lock)(ethr_rwlock_t *lock)
-{
- ethr_native_write_lock(lock);
- return 0;
+#elif defined(ETHR_HAVE_OPTIMIZED_SPINLOCKS)
+ int err = ethr_opt_spin_lock((ethr_opt_spinlock_t *) lock);
+ if (err)
+ ETHR_FATAL_ERROR__(err);
+#elif defined(__WIN32__)
+ EnterCriticalSection((CRITICAL_SECTION *) lock);
+#else
+ int err = pthread_mutex_lock((pthread_mutex_t *) lock);
+ if (err)
+ ETHR_FATAL_ERROR__(err);
+#endif
}
-#endif /* ETHR_HAVE_NATIVE_LOCKS */
-
#endif /* ETHR_TRY_INLINE_FUNCS */
+#ifdef ETHR_HAVE_NATIVE_ATOMICS
/*
- * Fallbacks for atomics used in absence of optimized implementation.
+ * Map ethread native atomics to ethread API atomics.
*/
-#ifndef ETHR_HAVE_OPTIMIZED_ATOMIC_OPS
+typedef ethr_native_atomic_t ethr_atomic_t;
+#else
+typedef long ethr_atomic_t;
+#endif
+#ifdef ETHR_NEED_ATOMIC_PROTOTYPES__
+void ethr_atomic_init(ethr_atomic_t *, long);
+void ethr_atomic_set(ethr_atomic_t *, long);
+long ethr_atomic_read(ethr_atomic_t *);
+long ethr_atomic_inc_read(ethr_atomic_t *);
+long ethr_atomic_dec_read(ethr_atomic_t *);
+void ethr_atomic_inc(ethr_atomic_t *);
+void ethr_atomic_dec(ethr_atomic_t *);
+long ethr_atomic_add_read(ethr_atomic_t *, long);
+void ethr_atomic_add(ethr_atomic_t *, long);
+long ethr_atomic_read_band(ethr_atomic_t *, long);
+long ethr_atomic_read_bor(ethr_atomic_t *, long);
+long ethr_atomic_xchg(ethr_atomic_t *, long);
+long ethr_atomic_cmpxchg(ethr_atomic_t *, long, long);
+long ethr_atomic_read_acqb(ethr_atomic_t *);
+long ethr_atomic_inc_read_acqb(ethr_atomic_t *);
+void ethr_atomic_set_relb(ethr_atomic_t *, long);
+void ethr_atomic_dec_relb(ethr_atomic_t *);
+long ethr_atomic_dec_read_relb(ethr_atomic_t *);
+long ethr_atomic_cmpxchg_acqb(ethr_atomic_t *, long, long);
+long ethr_atomic_cmpxchg_relb(ethr_atomic_t *, long, long);
+#endif
+
+#if defined(ETHR_TRY_INLINE_FUNCS) || defined(ETHR_AUX_IMPL__)
+
+#ifndef ETHR_HAVE_NATIVE_ATOMICS
/*
- * ETHR_MEMORY_BARRIER orders between locked and atomic accesses only,
- * i.e. when this atomic fallback is used a noop is sufficient.
+ * Fallbacks for atomics used in absence of a native implementation.
*/
-#define ETHR_MEMORY_BARRIER
#define ETHR_ATOMIC_ADDR_BITS 10
#define ETHR_ATOMIC_ADDR_SHIFT 6
typedef struct {
union {
-#if defined(ETHR_HAVE_PTHREAD_SPIN_LOCK)
- pthread_spinlock_t spnlck;
-#else
- ethr_mutex mtx;
-#endif
+ ethr_spinlock_t lck;
char buf[ETHR_CACHE_LINE_SIZE];
} u;
} ethr_atomic_protection_t;
extern ethr_atomic_protection_t ethr_atomic_protection__[1 << ETHR_ATOMIC_ADDR_BITS];
-
-#if defined(ETHR_HAVE_PTHREAD_SPIN_LOCK)
-
#define ETHR_ATOMIC_PTR2LCK__(PTR) \
(&ethr_atomic_protection__[((((unsigned long) (PTR)) >> ETHR_ATOMIC_ADDR_SHIFT) \
- & ((1 << ETHR_ATOMIC_ADDR_BITS) - 1))].u.spnlck)
+ & ((1 << ETHR_ATOMIC_ADDR_BITS) - 1))].u.lck)
#define ETHR_ATOMIC_OP_FALLBACK_IMPL__(AP, EXPS) \
do { \
- pthread_spinlock_t *slp__ = ETHR_ATOMIC_PTR2LCK__((AP)); \
- int res__ = pthread_spin_lock(slp__); \
- if (res__ != 0) \
- return res__; \
+ ethr_spinlock_t *slp__ = ETHR_ATOMIC_PTR2LCK__((AP)); \
+ ethr_spin_lock(slp__); \
{ EXPS; } \
- return pthread_spin_unlock(slp__); \
+ ethr_spin_unlock(slp__); \
} while (0)
-#else /* ethread mutex */
-
-#define ETHR_ATOMIC_PTR2LCK__(PTR) \
-(&ethr_atomic_protection__[((((unsigned long) (PTR)) >> ETHR_ATOMIC_ADDR_SHIFT) \
- & ((1 << ETHR_ATOMIC_ADDR_BITS) - 1))].u.mtx)
-
-#define ETHR_ATOMIC_OP_FALLBACK_IMPL__(AP, EXPS) \
-do { \
- ethr_mutex *mtxp__ = ETHR_ATOMIC_PTR2LCK__((AP)); \
- int res__ = ETHR_INLINE_FUNC_NAME_(ethr_mutex_lock)(mtxp__); \
- if (res__ != 0) \
- return res__; \
- { EXPS; } \
- return ETHR_INLINE_FUNC_NAME_(ethr_mutex_unlock)(mtxp__); \
-} while (0)
-
-#endif /* end ethread mutex */
-
-#ifdef ETHR_TRY_INLINE_FUNCS
+#endif
-static ETHR_INLINE int
+static ETHR_INLINE void
ETHR_INLINE_FUNC_NAME_(ethr_atomic_init)(ethr_atomic_t *var, long i)
{
- ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, *var = (ethr_atomic_t) i);
+#ifdef ETHR_HAVE_NATIVE_ATOMICS
+ ethr_native_atomic_init(var, i);
+#else
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, *var = i);
+#endif
}
-static ETHR_INLINE int
+static ETHR_INLINE void
ETHR_INLINE_FUNC_NAME_(ethr_atomic_set)(ethr_atomic_t *var, long i)
{
- ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, *var = (ethr_atomic_t) i);
+#ifdef ETHR_HAVE_NATIVE_ATOMICS
+ ethr_native_atomic_set(var, i);
+#else
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, *var = i);
+#endif
}
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_atomic_read)(ethr_atomic_t *var, long *i)
+static ETHR_INLINE long
+ETHR_INLINE_FUNC_NAME_(ethr_atomic_read)(ethr_atomic_t *var)
{
- ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, *i = (long) *var);
+#ifdef ETHR_HAVE_NATIVE_ATOMICS
+ return ethr_native_atomic_read(var);
+#else
+ long res;
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, res = (long) *var);
+ return res;
+#endif
}
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_atomic_inctest)(ethr_atomic_t *incp, long *testp)
+static ETHR_INLINE void
+ETHR_INLINE_FUNC_NAME_(ethr_atomic_add)(ethr_atomic_t *var, long incr)
{
- ETHR_ATOMIC_OP_FALLBACK_IMPL__(incp, *testp = (long) ++(*incp));
+#ifdef ETHR_HAVE_NATIVE_ATOMICS
+ ethr_native_atomic_add(var, incr);
+#else
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, *var += incr);
+#endif
+}
+
+static ETHR_INLINE long
+ETHR_INLINE_FUNC_NAME_(ethr_atomic_add_read)(ethr_atomic_t *var, long i)
+{
+#ifdef ETHR_HAVE_NATIVE_ATOMICS
+ return ethr_native_atomic_add_return(var, i);
+#else
+ long res;
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, *var += i; res = *var);
+ return res;
+#endif
}
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_atomic_dectest)(ethr_atomic_t *decp, long *testp)
+static ETHR_INLINE void
+ETHR_INLINE_FUNC_NAME_(ethr_atomic_inc)(ethr_atomic_t *var)
{
- ETHR_ATOMIC_OP_FALLBACK_IMPL__(decp, *testp = (long) --(*decp));
+#ifdef ETHR_HAVE_NATIVE_ATOMICS
+ ethr_native_atomic_inc(var);
+#else
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, ++(*var));
+#endif
}
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_atomic_add)(ethr_atomic_t *var, long incr)
-{
- ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, *var += incr);
-}
-
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_atomic_addtest)(ethr_atomic_t *incp,
- long i,
- long *testp)
+static ETHR_INLINE void
+ETHR_INLINE_FUNC_NAME_(ethr_atomic_dec)(ethr_atomic_t *var)
{
- ETHR_ATOMIC_OP_FALLBACK_IMPL__(incp, *incp += i; *testp = *incp);
+#ifdef ETHR_HAVE_NATIVE_ATOMICS
+ ethr_native_atomic_dec(var);
+#else
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, --(*var));
+#endif
}
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_atomic_inc)(ethr_atomic_t *incp)
+static ETHR_INLINE long
+ETHR_INLINE_FUNC_NAME_(ethr_atomic_inc_read)(ethr_atomic_t *var)
{
- ETHR_ATOMIC_OP_FALLBACK_IMPL__(incp, ++(*incp));
+#ifdef ETHR_HAVE_NATIVE_ATOMICS
+ return ethr_native_atomic_inc_return(var);
+#else
+ long res;
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, res = (long) ++(*var));
+ return res;
+#endif
}
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_atomic_dec)(ethr_atomic_t *decp)
+static ETHR_INLINE long
+ETHR_INLINE_FUNC_NAME_(ethr_atomic_dec_read)(ethr_atomic_t *var)
{
- ETHR_ATOMIC_OP_FALLBACK_IMPL__(decp, --(*decp));
+#ifdef ETHR_HAVE_NATIVE_ATOMICS
+ return ethr_native_atomic_dec_return(var);
+#else
+ long res;
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, res = (long) --(*var));
+ return res;
+#endif
}
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_atomic_and_old)(ethr_atomic_t *var,
- long mask,
- long *old)
+static ETHR_INLINE long
+ETHR_INLINE_FUNC_NAME_(ethr_atomic_read_band)(ethr_atomic_t *var,
+ long mask)
{
- /*
- * See "Extra memory barrier requirements" note at the top
- * of the file.
- */
- ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, *old = *var; *var &= mask);
+#ifdef ETHR_HAVE_NATIVE_ATOMICS
+ return ethr_native_atomic_and_retold(var, mask);
+#else
+ long res;
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, res = *var; *var &= mask);
+ return res;
+#endif
}
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_atomic_or_old)(ethr_atomic_t *var,
- long mask,
- long *old)
+static ETHR_INLINE long
+ETHR_INLINE_FUNC_NAME_(ethr_atomic_read_bor)(ethr_atomic_t *var,
+ long mask)
{
- /*
- * See "Extra memory barrier requirements" note at the top
- * of the file.
- */
- ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, *old = *var; *var |= mask);
+#ifdef ETHR_HAVE_NATIVE_ATOMICS
+ return ethr_native_atomic_or_retold(var, mask);
+#else
+ long res;
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, res = *var; *var |= mask);
+ return res;
+#endif
}
-static ETHR_INLINE int
+static ETHR_INLINE long
ETHR_INLINE_FUNC_NAME_(ethr_atomic_xchg)(ethr_atomic_t *var,
- long new,
- long *old)
+ long new)
{
- ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, *old = *var; *var = new);
-}
+#ifdef ETHR_HAVE_NATIVE_ATOMICS
+ return ethr_native_atomic_xchg(var, new);
+#else
+ long res;
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, res = *var; *var = new);
+ return res;
+#endif
+}
-/*
- * If *var == *old, replace *old with new, else do nothing.
- * In any case return the original value of *var in *old.
- */
-static ETHR_INLINE int
+static ETHR_INLINE long
ETHR_INLINE_FUNC_NAME_(ethr_atomic_cmpxchg)(ethr_atomic_t *var,
long new,
- long expected,
- long *old)
+ long exp)
{
- /*
- * See "Extra memory barrier requirements" note at the top
- * of the file.
- */
- ETHR_ATOMIC_OP_FALLBACK_IMPL__(
- var,
- long old_val = *var;
- *old = old_val;
- if (__builtin_expect(old_val == expected, 1))
- *var = new;
- );
- return 0;
+#ifdef ETHR_HAVE_NATIVE_ATOMICS
+ return ethr_native_atomic_cmpxchg(var, new, exp);
+#else
+ long res;
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var,
+ {
+ res = *var;
+ if (__builtin_expect(res == exp, 1))
+ *var = new;
+ });
+ return res;
+#endif
}
-#endif /* #ifdef ETHR_TRY_INLINE_FUNCS */
-#endif /* #ifndef ETHR_HAVE_OPTIMIZED_ATOMIC_OPS */
-
/*
- * Fallbacks for spin locks, and rw spin locks used in absence of
- * optimized implementation.
+ * Important memory barrier requirements.
+ *
+ * The following atomic operations *must* supply a memory barrier of
+ * at least the type specified by its suffix:
+ * _acqb = acquire barrier
+ * _relb = release barrier
*/
-#ifndef ETHR_HAVE_OPTIMIZED_LOCKS
-#ifdef ETHR_TRY_INLINE_FUNCS
+static ETHR_INLINE long
+ETHR_INLINE_FUNC_NAME_(ethr_atomic_read_acqb)(ethr_atomic_t *var)
+{
+#ifdef ETHR_HAVE_NATIVE_ATOMICS
+ return ethr_native_atomic_read_acqb(var);
+#else
+ return ETHR_INLINE_FUNC_NAME_(ethr_atomic_read)(var);
+#endif
+}
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_spinlock_init)(ethr_spinlock_t *lock)
+static ETHR_INLINE long
+ETHR_INLINE_FUNC_NAME_(ethr_atomic_inc_read_acqb)(ethr_atomic_t *var)
{
-#if defined(ETHR_HAVE_PTHREAD_SPIN_LOCK)
- return pthread_spin_init(&lock->spnlck, 0);
+#ifdef ETHR_HAVE_NATIVE_ATOMICS
+ return ethr_native_atomic_inc_return_acqb(var);
#else
- return ethr_mutex_init(&lock->mtx);
+ return ETHR_INLINE_FUNC_NAME_(ethr_atomic_inc_read)(var);
#endif
}
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_spinlock_destroy)(ethr_spinlock_t *lock)
+static ETHR_INLINE void
+ETHR_INLINE_FUNC_NAME_(ethr_atomic_set_relb)(ethr_atomic_t *var, long val)
+{
+#ifdef ETHR_HAVE_NATIVE_ATOMICS
+ ethr_native_atomic_set_relb(var, val);
+#else
+ return ETHR_INLINE_FUNC_NAME_(ethr_atomic_set)(var, val);
+#endif
+}
+
+static ETHR_INLINE void
+ETHR_INLINE_FUNC_NAME_(ethr_atomic_dec_relb)(ethr_atomic_t *var)
{
-#if defined(ETHR_HAVE_PTHREAD_SPIN_LOCK)
- return pthread_spin_destroy(&lock->spnlck);
+#ifdef ETHR_HAVE_NATIVE_ATOMICS
+ ethr_native_atomic_dec_relb(var);
#else
- return ethr_mutex_destroy(&lock->mtx);
+ ETHR_INLINE_FUNC_NAME_(ethr_atomic_dec)(var);
#endif
}
+static ETHR_INLINE long
+ETHR_INLINE_FUNC_NAME_(ethr_atomic_dec_read_relb)(ethr_atomic_t *var)
+{
+#ifdef ETHR_HAVE_NATIVE_ATOMICS
+ return ethr_native_atomic_dec_return_relb(var);
+#else
+ return ETHR_INLINE_FUNC_NAME_(ethr_atomic_dec_read)(var);
+#endif
+}
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_spin_unlock)(ethr_spinlock_t *lock)
+static ETHR_INLINE long
+ETHR_INLINE_FUNC_NAME_(ethr_atomic_cmpxchg_acqb)(ethr_atomic_t *var,
+ long new,
+ long exp)
{
-#if defined(ETHR_HAVE_PTHREAD_SPIN_LOCK)
- return pthread_spin_unlock(&lock->spnlck);
+#ifdef ETHR_HAVE_NATIVE_ATOMICS
+ return ethr_native_atomic_cmpxchg_acqb(var, new, exp);
#else
- return ETHR_INLINE_FUNC_NAME_(ethr_mutex_unlock)(&lock->mtx);
+ return ETHR_INLINE_FUNC_NAME_(ethr_atomic_cmpxchg)(var, new, exp);
#endif
}
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_spin_lock)(ethr_spinlock_t *lock)
+static ETHR_INLINE long
+ETHR_INLINE_FUNC_NAME_(ethr_atomic_cmpxchg_relb)(ethr_atomic_t *var,
+ long new,
+ long exp)
{
-#if defined(ETHR_HAVE_PTHREAD_SPIN_LOCK)
- return pthread_spin_lock(&lock->spnlck);
+#ifdef ETHR_HAVE_NATIVE_ATOMICS
+ return ethr_native_atomic_cmpxchg_relb(var, new, exp);
#else
- return ETHR_INLINE_FUNC_NAME_(ethr_mutex_lock)(&lock->mtx);
+ return ETHR_INLINE_FUNC_NAME_(ethr_atomic_cmpxchg)(var, new, exp);
#endif
}
-#ifdef ETHR_USE_RWMTX_FALLBACK
-#define ETHR_RWLOCK_RWMTX_FALLBACK_NAME_(X) X
+#endif /* ETHR_TRY_INLINE_FUNCS */
+
+typedef struct ethr_ts_event_ ethr_ts_event; /* Needed by ethr_mutex.h */
+
+#if defined(ETHR_WIN32_THREADS)
+# include "win/ethr_event.h"
#else
-#define ETHR_RWLOCK_RWMTX_FALLBACK_NAME_(X) ETHR_INLINE_FUNC_NAME_(X)
+# include "pthread/ethr_event.h"
+#endif
+
+int ethr_set_main_thr_status(int, int);
+int ethr_get_main_thr_status(int *);
+
+struct ethr_ts_event_ {
+ ethr_ts_event *next;
+ ethr_ts_event *prev;
+ ethr_event event;
+ void *udata;
+ ethr_atomic_t uaflgs;
+ unsigned uflgs;
+ unsigned iflgs; /* for ethr lib only */
+ short rgix; /* for ethr lib only */
+ short mtix; /* for ethr lib only */
+};
+
+#define ETHR_TS_EV_ETHREAD (((unsigned) 1) << 0)
+#define ETHR_TS_EV_INITED (((unsigned) 1) << 1)
+#define ETHR_TS_EV_TMP (((unsigned) 1) << 2)
+#define ETHR_TS_EV_MAIN_THR (((unsigned) 1) << 3)
+
+int ethr_get_tmp_ts_event__(ethr_ts_event **tsepp);
+int ethr_free_ts_event__(ethr_ts_event *tsep);
+int ethr_make_ts_event__(ethr_ts_event **tsepp);
+
+#if !defined(ETHR_TRY_INLINE_FUNCS) || defined(ETHREAD_IMPL__)
+ethr_ts_event *ethr_get_ts_event(void);
+void ethr_leave_ts_event(ethr_ts_event *);
+#endif
+
+#if defined(ETHR_PTHREADS)
+
+#if defined(ETHR_TRY_INLINE_FUNCS) || defined(ETHREAD_IMPL__)
+
+extern pthread_key_t ethr_ts_event_key__;
+
+static ETHR_INLINE ethr_ts_event *
+ETHR_INLINE_FUNC_NAME_(ethr_get_ts_event)(void)
+{
+ ethr_ts_event *tsep = pthread_getspecific(ethr_ts_event_key__);
+ if (!tsep) {
+ int res = ethr_make_ts_event__(&tsep);
+ if (res != 0)
+ ETHR_FATAL_ERROR__(res);
+ ETHR_ASSERT(tsep);
+ }
+ return tsep;
+}
+
+static ETHR_INLINE void
+ETHR_INLINE_FUNC_NAME_(ethr_leave_ts_event)(ethr_ts_event *tsep)
+{
+
+}
+
#endif
+#elif defined(ETHR_WIN32_THREADS)
+
+#if defined(ETHR_TRY_INLINE_FUNCS) || defined(ETHREAD_IMPL__)
+
+extern DWORD ethr_ts_event_key__;
+
+static ETHR_INLINE ethr_ts_event *
+ETHR_INLINE_FUNC_NAME_(ethr_get_ts_event)(void)
+{
+ ethr_ts_event *tsep = TlsGetValue(ethr_ts_event_key__);
+ if (!tsep) {
+ int res = ethr_get_tmp_ts_event__(&tsep);
+ if (res != 0)
+ ETHR_FATAL_ERROR__(res);
+ ETHR_ASSERT(tsep);
+ }
+ return tsep;
+}
+
+static ETHR_INLINE void
+ETHR_INLINE_FUNC_NAME_(ethr_leave_ts_event)(ethr_ts_event *tsep)
+{
+ if (tsep->iflgs & ETHR_TS_EV_TMP) {
+ int res = ethr_free_ts_event__(tsep);
+ if (res != 0)
+ ETHR_FATAL_ERROR__(res);
+ }
+}
+
+#endif
+
+#endif
+
+#include "ethr_mutex.h" /* Need atomic declarations and tse */
+
+#ifdef ETHR_HAVE_NATIVE_RWSPINLOCKS
+typedef ethr_native_rwlock_t ethr_rwlock_t;
+#else
+typedef ethr_rwmutex ethr_rwlock_t;
+#endif
+
+#ifdef ETHR_NEED_RWSPINLOCK_PROTOTYPES__
+int ethr_rwlock_init(ethr_rwlock_t *);
+int ethr_rwlock_destroy(ethr_rwlock_t *);
+void ethr_read_unlock(ethr_rwlock_t *);
+void ethr_read_lock(ethr_rwlock_t *);
+void ethr_write_unlock(ethr_rwlock_t *);
+void ethr_write_lock(ethr_rwlock_t *);
+#endif
+
+#if defined(ETHR_TRY_INLINE_FUNCS) || defined(ETHR_AUX_IMPL__)
+
static ETHR_INLINE int
ETHR_INLINE_FUNC_NAME_(ethr_rwlock_init)(ethr_rwlock_t *lock)
{
-#if defined(ETHR_HAVE_PTHREAD_SPIN_LOCK)
- lock->counter = 0;
- return pthread_spin_init(&lock->spnlck, 0);
+#ifdef ETHR_HAVE_NATIVE_RWSPINLOCKS
+ ethr_native_rwlock_init(lock);
+ return 0;
#else
- return ethr_rwmutex_init(&lock->rwmtx);
+ return ethr_rwmutex_init_opt((ethr_rwmutex *) lock, NULL);
#endif
}
static ETHR_INLINE int
ETHR_INLINE_FUNC_NAME_(ethr_rwlock_destroy)(ethr_rwlock_t *lock)
{
-#if defined(ETHR_HAVE_PTHREAD_SPIN_LOCK)
- return pthread_spin_destroy(&lock->spnlck);
+#ifdef ETHR_HAVE_NATIVE_RWSPINLOCKS
+ return 0;
#else
- return ethr_rwmutex_destroy(&lock->rwmtx);
+ return ethr_rwmutex_destroy((ethr_rwmutex *) lock);
#endif
}
-static ETHR_INLINE int
+static ETHR_INLINE void
ETHR_INLINE_FUNC_NAME_(ethr_read_unlock)(ethr_rwlock_t *lock)
{
-#if defined(ETHR_HAVE_PTHREAD_SPIN_LOCK)
- int res = pthread_spin_lock(&lock->spnlck);
- if (res != 0)
- return res;
- lock->counter--;
- return pthread_spin_unlock(&lock->spnlck);
+#ifdef ETHR_HAVE_NATIVE_RWSPINLOCKS
+ ethr_native_read_unlock(lock);
#else
- return ETHR_RWLOCK_RWMTX_FALLBACK_NAME_(ethr_rwmutex_runlock)(&lock->rwmtx);
+ ethr_rwmutex_runlock((ethr_rwmutex *) lock);
#endif
}
-static ETHR_INLINE int
+static ETHR_INLINE void
ETHR_INLINE_FUNC_NAME_(ethr_read_lock)(ethr_rwlock_t *lock)
{
-#if defined(ETHR_HAVE_PTHREAD_SPIN_LOCK)
- int locked = 0;
- do {
- int res = pthread_spin_lock(&lock->spnlck);
- if (res != 0)
- return res;
- if ((lock->counter & ETHR_RWLOCK_WRITERS) == 0) {
- lock->counter++;
- locked = 1;
- }
- res = pthread_spin_unlock(&lock->spnlck);
- if (res != 0)
- return res;
- } while (!locked);
- return 0;
+#ifdef ETHR_HAVE_NATIVE_RWSPINLOCKS
+ ethr_native_read_lock(lock);
#else
- return ETHR_RWLOCK_RWMTX_FALLBACK_NAME_(ethr_rwmutex_rlock)(&lock->rwmtx);
+ ethr_rwmutex_rlock((ethr_rwmutex *) lock);
#endif
}
-static ETHR_INLINE int
+static ETHR_INLINE void
ETHR_INLINE_FUNC_NAME_(ethr_write_unlock)(ethr_rwlock_t *lock)
{
-#if defined(ETHR_HAVE_PTHREAD_SPIN_LOCK)
- lock->counter = 0;
- return pthread_spin_unlock(&lock->spnlck);
+#ifdef ETHR_HAVE_NATIVE_RWSPINLOCKS
+ ethr_native_write_unlock(lock);
#else
- return ETHR_RWLOCK_RWMTX_FALLBACK_NAME_(ethr_rwmutex_rwunlock)(&lock->rwmtx);
+ ethr_rwmutex_rwunlock((ethr_rwmutex *) lock);
#endif
}
-static ETHR_INLINE int
+static ETHR_INLINE void
ETHR_INLINE_FUNC_NAME_(ethr_write_lock)(ethr_rwlock_t *lock)
{
-#if defined(ETHR_HAVE_PTHREAD_SPIN_LOCK)
- while (1) {
- int res = pthread_spin_lock(&lock->spnlck);
- if (res != 0)
- return res;
- lock->counter |= ETHR_RWLOCK_WRITERS;
- if (lock->counter == ETHR_RWLOCK_WRITERS)
- return 0;
- res = pthread_spin_unlock(&lock->spnlck);
- if (res != 0)
- return res;
- }
+#ifdef ETHR_HAVE_NATIVE_RWSPINLOCKS
+ ethr_native_write_lock(lock);
#else
- return ETHR_RWLOCK_RWMTX_FALLBACK_NAME_(ethr_rwmutex_rwlock)(&lock->rwmtx);
+ ethr_rwmutex_rwlock((ethr_rwmutex *) lock);
#endif
}
-#endif /* #ifdef ETHR_TRY_INLINE_FUNCS */
-
-#endif /* ETHR_HAVE_OPTIMIZED_LOCKS */
-
-#if defined(ETHR_HAVE_OPTIMIZED_LOCKS) || defined(ETHR_HAVE_PTHREAD_SPIN_LOCK)
-# define ETHR_HAVE_OPTIMIZED_SPINLOCK
-#endif
+#endif /* ETHR_TRY_INLINE_FUNCS */
#endif /* #ifndef ETHREAD_H__ */
diff --git a/erts/include/internal/ethread_header_config.h.in b/erts/include/internal/ethread_header_config.h.in
index c9fd87c2f6..5debb44756 100644
--- a/erts/include/internal/ethread_header_config.h.in
+++ b/erts/include/internal/ethread_header_config.h.in
@@ -29,26 +29,54 @@
/* Define if you have pthreads */
#undef ETHR_PTHREADS
+/* Define if you need the <nptl/pthread.h> header file. */
+#undef ETHR_NEED_NPTL_PTHREAD_H
+
/* Define if you have the <pthread.h> header file. */
#undef ETHR_HAVE_PTHREAD_H
/* Define if the pthread.h header file is in pthread/mit directory. */
#undef ETHR_HAVE_MIT_PTHREAD_H
-/* Define if you have the pthread_mutexattr_settype function. */
-#undef ETHR_HAVE_PTHREAD_MUTEXATTR_SETTYPE
+/* Define if you have the pthread_spin_lock function. */
+#undef ETHR_HAVE_PTHREAD_SPIN_LOCK
-/* Define if you have the pthread_mutexattr_setkind_np function. */
-#undef ETHR_HAVE_PTHREAD_MUTEXATTR_SETKIND_NP
+/* Define if you want to force usage of pthread rwlocks */
+#undef ETHR_FORCE_PTHREAD_RWLOCK
-/* Define if you have the pthread_atfork function. */
-#undef ETHR_HAVE_PTHREAD_ATFORK
+/* Define if you have the pthread_rwlockattr_setkind_np() function. */
+#undef ETHR_HAVE_PTHREAD_RWLOCKATTR_SETKIND_NP
-/* Define if you have the pthread_spin_lock function. */
-#undef ETHR_HAVE_PTHREAD_SPIN_LOCK
+/* Define if you have the PTHREAD_RWLOCK_PREFER_WRITER_NONRECURSIVE_NP rwlock
+ attribute. */
+#undef ETHR_HAVE_PTHREAD_RWLOCK_PREFER_WRITER_NONRECURSIVE_NP
-/* Define if you have a pthread_rwlock implementation that can be used */
-#undef ETHR_HAVE_PTHREAD_RWLOCK_INIT
+/* Define if you have a linux futex implementation. */
+#undef ETHR_HAVE_LINUX_FUTEX
+
+/* Define if you have gcc atomic operations */
+#undef ETHR_HAVE_GCC_ATOMIC_OPS
+
+/* Define if you prefer gcc native ethread implementations */
+#undef ETHR_PREFER_GCC_NATIVE_IMPLS
+
+/* Define if you have the <sched.h> header file. */
+#undef ETHR_HAVE_SCHED_H
+
+/* Define if you have the sched_yield() function. */
+#undef ETHR_HAVE_SCHED_YIELD
+
+/* Define if you have the pthread_yield() function. */
+#undef ETHR_HAVE_PTHREAD_YIELD
+
+/* Define if pthread_yield() returns an int. */
+#undef ETHR_PTHREAD_YIELD_RET_INT
+
+/* Define if sched_yield() returns an int. */
+#undef ETHR_SCHED_YIELD_RET_INT
+
+/* Define if you want compatibilty with x86 processors before pentium4. */
+#undef ETHR_PRE_PENTIUM4_COMPAT
/* Define if you have the pthread_rwlockattr_setkind_np() function. */
#undef ETHR_HAVE_PTHREAD_RWLOCKATTR_SETKIND_NP
@@ -63,6 +91,15 @@
/* Define if you prefer gcc native ethread implementations */
#undef ETHR_PREFER_GCC_NATIVE_IMPLS
+/* Define if you have libatomic_ops atomic operations */
+#undef ETHR_HAVE_LIBATOMIC_OPS
+
+/* Define if you prefer libatomic_ops native ethread implementations */
+#undef ETHR_PREFER_LIBATOMIC_OPS_NATIVE_IMPLS
+
+/* Define to the size of AO_t if libatomic_ops is used */
+#undef ETHR_SIZEOF_AO_T
+
/* Define if you want to turn on extra sanity checking in the ethread library */
#undef ETHR_XCHK
diff --git a/erts/include/internal/gcc/ethr_atomic.h b/erts/include/internal/gcc/ethr_atomic.h
index 775030c8d5..5fe6e23477 100644
--- a/erts/include/internal/gcc/ethr_atomic.h
+++ b/erts/include/internal/gcc/ethr_atomic.h
@@ -55,6 +55,7 @@ do { \
volatile long x___ = 0; \
(void) __sync_val_compare_and_swap(&x___, (long) 0, (long) 1); \
} while (0)
+#define ETHR_READ_DEPEND_MEMORY_BARRIER ETHR_MEMORY_BARRIER
#if defined(ETHR_TRY_INLINE_FUNCS) || defined(ETHR_AUX_IMPL__)
@@ -157,6 +158,24 @@ ethr_native_atomic_xchg(ethr_native_atomic_t *var, long new)
return act;
}
+/*
+ * Atomic ops with at least specified barriers.
+ */
+
+static ETHR_INLINE long
+ethr_native_atomic_read_acqb(ethr_native_atomic_t *var)
+{
+ return __sync_add_and_fetch(&var->counter, (long) 0);
+}
+
+#define ethr_native_atomic_inc_return_acqb ethr_native_atomic_inc_return
+#define ethr_native_atomic_set_relb ethr_native_atomic_xchg
+#define ethr_native_atomic_dec_relb ethr_native_atomic_dec_return
+#define ethr_native_atomic_dec_return_relb ethr_native_atomic_dec_return
+
+#define ethr_native_atomic_cmpxchg_acqb ethr_native_atomic_cmpxchg
+#define ethr_native_atomic_cmpxchg_relb ethr_native_atomic_cmpxchg
+
#endif
#endif
diff --git a/erts/include/internal/i386/atomic.h b/erts/include/internal/i386/atomic.h
index 90b4c5f773..f28258059f 100644
--- a/erts/include/internal/i386/atomic.h
+++ b/erts/include/internal/i386/atomic.h
@@ -32,8 +32,11 @@ typedef struct {
volatile long counter;
} ethr_native_atomic_t;
-#ifdef __x86_64__
+#if defined(__x86_64__) || !defined(ETHR_PRE_PENTIUM4_COMPAT)
#define ETHR_MEMORY_BARRIER __asm__ __volatile__("mfence" : : : "memory")
+#define ETHR_WRITE_MEMORY_BARRIER __asm__ __volatile__("sfence" : : : "memory")
+#define ETHR_READ_MEMORY_BARRIER __asm__ __volatile__("lfence" : : : "memory")
+#define ETHR_READ_DEPEND_MEMORY_BARRIER __asm__ __volatile__("" : : : "memory")
#else
#define ETHR_MEMORY_BARRIER \
do { \
@@ -42,7 +45,9 @@ do { \
} while (0)
#endif
-#ifdef ETHR_TRY_INLINE_FUNCS
+#define ETHR_ATOMIC_HAVE_INC_DEC_INSTRUCTIONS 1
+
+#if defined(ETHR_TRY_INLINE_FUNCS) || defined(ETHR_AUX_IMPL__)
#ifdef __x86_64__
#define LONG_SUFFIX "q"
@@ -158,6 +163,22 @@ ethr_native_atomic_xchg(ethr_native_atomic_t *var, long val)
return tmp;
}
+/*
+ * Atomic ops with at least specified barriers.
+ */
+
+#define ethr_native_atomic_read_acqb ethr_native_atomic_read
+#define ethr_native_atomic_inc_return_acqb ethr_native_atomic_inc_return
+#if defined(__x86_64__) || !defined(ETHR_PRE_PENTIUM4_COMPAT)
+#define ethr_native_atomic_set_relb ethr_native_atomic_set
+#else
+#define ethr_native_atomic_set_relb ethr_native_atomic_xchg
+#endif
+#define ethr_native_atomic_dec_relb ethr_native_atomic_dec
+#define ethr_native_atomic_dec_return_relb ethr_native_atomic_dec_return
+#define ethr_native_atomic_cmpxchg_acqb ethr_native_atomic_cmpxchg
+#define ethr_native_atomic_cmpxchg_relb ethr_native_atomic_cmpxchg
+
#undef LONG_SUFFIX
#endif /* ETHR_TRY_INLINE_FUNCS */
diff --git a/erts/include/internal/i386/ethread.h b/erts/include/internal/i386/ethread.h
index fad8b108fa..ed43e77279 100644
--- a/erts/include/internal/i386/ethread.h
+++ b/erts/include/internal/i386/ethread.h
@@ -29,6 +29,7 @@
#include "rwlock.h"
#define ETHR_HAVE_NATIVE_ATOMICS 1
-#define ETHR_HAVE_NATIVE_LOCKS 1
+#define ETHR_HAVE_NATIVE_SPINLOCKS 1
+#define ETHR_HAVE_NATIVE_RWSPINLOCKS 1
#endif /* ETHREAD_I386_ETHREAD_H */
diff --git a/erts/include/internal/i386/rwlock.h b/erts/include/internal/i386/rwlock.h
index c009be8ef1..be47f459ce 100644
--- a/erts/include/internal/i386/rwlock.h
+++ b/erts/include/internal/i386/rwlock.h
@@ -1,19 +1,19 @@
/*
* %CopyrightBegin%
- *
- * Copyright Ericsson AB 2005-2009. All Rights Reserved.
- *
+ *
+ * Copyright Ericsson AB 2005-2010. All Rights Reserved.
+ *
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
* compliance with the License. You should have received a copy of the
* Erlang Public License along with this software. If not, it can be
* retrieved online at http://www.erlang.org/.
- *
+ *
* Software distributed under the License is distributed on an "AS IS"
* basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
* the License for the specific language governing rights and limitations
* under the License.
- *
+ *
* %CopyrightEnd%
*/
@@ -31,7 +31,7 @@ typedef struct {
volatile int lock;
} ethr_native_rwlock_t;
-#ifdef ETHR_TRY_INLINE_FUNCS
+#if defined(ETHR_TRY_INLINE_FUNCS) || defined(ETHR_AUX_IMPL__)
#define ETHR_RWLOCK_OFFSET (1<<24)
diff --git a/erts/include/internal/i386/spinlock.h b/erts/include/internal/i386/spinlock.h
index 2b4832e26a..0325324895 100644
--- a/erts/include/internal/i386/spinlock.h
+++ b/erts/include/internal/i386/spinlock.h
@@ -1,19 +1,19 @@
/*
* %CopyrightBegin%
- *
- * Copyright Ericsson AB 2005-2009. All Rights Reserved.
- *
+ *
+ * Copyright Ericsson AB 2005-2010. All Rights Reserved.
+ *
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
* compliance with the License. You should have received a copy of the
* Erlang Public License along with this software. If not, it can be
* retrieved online at http://www.erlang.org/.
- *
+ *
* Software distributed under the License is distributed on an "AS IS"
* basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
* the License for the specific language governing rights and limitations
* under the License.
- *
+ *
* %CopyrightEnd%
*/
@@ -31,7 +31,7 @@ typedef struct {
volatile unsigned int lock;
} ethr_native_spinlock_t;
-#ifdef ETHR_TRY_INLINE_FUNCS
+#if defined(ETHR_TRY_INLINE_FUNCS) || defined(ETHR_AUX_IMPL__)
static ETHR_INLINE void
ethr_native_spinlock_init(ethr_native_spinlock_t *lock)
@@ -46,7 +46,7 @@ ethr_native_spin_unlock(ethr_native_spinlock_t *lock)
* On i386 this needs to be a locked operation
* to avoid Pentium Pro errata 66 and 92.
*/
-#if defined(__x86_64__)
+#if defined(__x86_64__) || !defined(ETHR_PRE_PENTIUM4_COMPAT)
__asm__ __volatile__("" : : : "memory");
*(unsigned char*)&lock->lock = 0;
#else
diff --git a/erts/include/internal/libatomic_ops/ethr_atomic.h b/erts/include/internal/libatomic_ops/ethr_atomic.h
new file mode 100644
index 0000000000..a6eb43a0bd
--- /dev/null
+++ b/erts/include/internal/libatomic_ops/ethr_atomic.h
@@ -0,0 +1,292 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2010. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+/*
+ * Description: Native atomics ethread support using libatomic_ops
+ * Author: Rickard Green
+ */
+
+#ifndef ETHR_LIBATOMIC_OPS_ATOMIC_H__
+#define ETHR_LIBATOMIC_OPS_ATOMIC_H__
+
+#if !defined(ETHR_HAVE_NATIVE_ATOMICS) && defined(ETHR_HAVE_LIBATOMIC_OPS)
+#define ETHR_HAVE_NATIVE_ATOMICS 1
+
+#if (defined(__i386__) && !defined(ETHR_PRE_PENTIUM4_COMPAT)) \
+ || defined(__x86_64__)
+#define AO_USE_PENTIUM4_INSTRS
+#endif
+
+#include "atomic_ops.h"
+
+/*
+ * libatomic_ops can be downloaded from:
+ * http://www.hpl.hp.com/research/linux/atomic_ops/
+ *
+ * These operations need to be defined by libatomic_ops;
+ * otherwise, we won't compile:
+ * - AO_nop_full()
+ * - AO_load()
+ * - AO_store()
+ * - AO_compare_and_swap()
+ *
+ * The `AO_t' type also have to be at least as large as
+ * `void *' and `long' types.
+ */
+
+#if ETHR_SIZEOF_AO_T < ETHR_SIZEOF_PTR
+#error The AO_t type is too small
+#endif
+
+typedef struct {
+ volatile AO_t counter;
+} ethr_native_atomic_t;
+
+#define ETHR_MEMORY_BARRIER AO_nop_full()
+#ifdef AO_HAVE_nop_write
+# define ETHR_WRITE_MEMORY_BARRIER AO_nop_write()
+#else
+# define ETHR_WRITE_MEMORY_BARRIER ETHR_MEMORY_BARRIER
+#endif
+#ifdef AO_HAVE_nop_read
+# define ETHR_READ_MEMORY_BARRIER AO_nop_read()
+#else
+# define ETHR_READ_MEMORY_BARRIER ETHR_MEMORY_BARRIER
+#endif
+#ifdef AO_NO_DD_ORDERING
+# define ETHR_READ_DEPEND_MEMORY_BARRIER ETHR_READ_MEMORY_BARRIER
+#else
+# define ETHR_READ_DEPEND_MEMORY_BARRIER __asm__ __volatile__("":::"memory")
+#endif
+
+#if defined(ETHR_TRY_INLINE_FUNCS) || defined(ETHR_AUX_IMPL__)
+
+static ETHR_INLINE void
+ethr_native_atomic_set(ethr_native_atomic_t *var, long value)
+{
+ AO_store(&var->counter, (AO_t) value);
+}
+
+static ETHR_INLINE void
+ethr_native_atomic_init(ethr_native_atomic_t *var, long value)
+{
+ ethr_native_atomic_set(var, value);
+}
+
+static ETHR_INLINE long
+ethr_native_atomic_read(ethr_native_atomic_t *var)
+{
+ return (long) AO_load(&var->counter);
+}
+
+static ETHR_INLINE long
+ethr_native_atomic_add_return(ethr_native_atomic_t *var, long incr)
+{
+#ifdef AO_HAVE_fetch_and_add
+ return ((long) AO_fetch_and_add(&var->counter, (AO_t) incr)) + incr;
+#else
+ while (1) {
+ AO_t exp = AO_load(&var->counter);
+ AO_t new = exp + (AO_t) incr;
+ if (AO_compare_and_swap(&var->counter, exp, new))
+ return (long) new;
+ }
+#endif
+}
+
+static ETHR_INLINE void
+ethr_native_atomic_add(ethr_native_atomic_t *var, long incr)
+{
+ (void) ethr_native_atomic_add_return(var, incr);
+}
+
+static ETHR_INLINE long
+ethr_native_atomic_inc_return(ethr_native_atomic_t *var)
+{
+#ifdef AO_HAVE_fetch_and_add1
+ return ((long) AO_fetch_and_add1(&var->counter)) + 1;
+#else
+ return ethr_native_atomic_add_return(var, 1);
+#endif
+}
+
+static ETHR_INLINE void
+ethr_native_atomic_inc(ethr_native_atomic_t *var)
+{
+ (void) ethr_native_atomic_inc_return(var);
+}
+
+static ETHR_INLINE long
+ethr_native_atomic_dec_return(ethr_native_atomic_t *var)
+{
+#ifdef AO_HAVE_fetch_and_sub1
+ return ((long) AO_fetch_and_sub1(&var->counter)) - 1;
+#else
+ return ethr_native_atomic_add_return(var, -1);
+#endif
+}
+
+static ETHR_INLINE void
+ethr_native_atomic_dec(ethr_native_atomic_t *var)
+{
+ (void) ethr_native_atomic_dec_return(var);
+}
+
+static ETHR_INLINE long
+ethr_native_atomic_and_retold(ethr_native_atomic_t *var, long mask)
+{
+ while (1) {
+ AO_t exp = AO_load(&var->counter);
+ AO_t new = exp & ((AO_t) mask);
+ if (AO_compare_and_swap(&var->counter, exp, new))
+ return (long) exp;
+ }
+}
+
+static ETHR_INLINE long
+ethr_native_atomic_or_retold(ethr_native_atomic_t *var, long mask)
+{
+ while (1) {
+ AO_t exp = AO_load(&var->counter);
+ AO_t new = exp | ((AO_t) mask);
+ if (AO_compare_and_swap(&var->counter, exp, new))
+ return (long) exp;
+ }
+}
+
+static ETHR_INLINE long
+ethr_native_atomic_cmpxchg(ethr_native_atomic_t *var, long new, long exp)
+{
+ long act;
+ do {
+ if (AO_compare_and_swap(&var->counter, (AO_t) exp, (AO_t) new))
+ return exp;
+ act = (long) AO_load(&var->counter);
+ } while (act == exp);
+ return act;
+}
+
+static ETHR_INLINE long
+ethr_native_atomic_xchg(ethr_native_atomic_t *var, long new)
+{
+ while (1) {
+ AO_t exp = AO_load(&var->counter);
+ if (AO_compare_and_swap(&var->counter, exp, (AO_t) new))
+ return (long) exp;
+ }
+}
+
+/*
+ * Atomic ops with at least specified barriers.
+ */
+
+static ETHR_INLINE long
+ethr_native_atomic_read_acqb(ethr_native_atomic_t *var)
+{
+#ifdef AO_HAVE_load_acquire
+ return (long) AO_load_acquire(&var->counter);
+#else
+ long res = ethr_native_atomic_read(var);
+ ETHR_MEMORY_BARRIER;
+ return res;
+#endif
+}
+
+static ETHR_INLINE long
+ethr_native_atomic_inc_return_acqb(ethr_native_atomic_t *var)
+{
+#ifdef AO_HAVE_fetch_and_add1_acquire
+ return ((long) AO_fetch_and_add1_acquire(&var->counter)) + 1;
+#else
+ long res = ethr_native_atomic_add_return(var, 1);
+ ETHR_MEMORY_BARRIER;
+ return res;
+#endif
+}
+
+static ETHR_INLINE void
+ethr_native_atomic_set_relb(ethr_native_atomic_t *var, long value)
+{
+#ifdef AO_HAVE_store_release
+ AO_store_release(&var->counter, (AO_t) value);
+#else
+ ETHR_MEMORY_BARRIER;
+ ethr_native_atomic_set(var, value);
+#endif
+}
+
+static ETHR_INLINE long
+ethr_native_atomic_dec_return_relb(ethr_native_atomic_t *var)
+{
+#ifdef AO_HAVE_fetch_and_sub1_release
+ return ((long) AO_fetch_and_sub1_release(&var->counter)) - 1;
+#else
+ ETHR_MEMORY_BARRIER;
+ return ethr_native_atomic_dec_return(var);
+#endif
+}
+
+static ETHR_INLINE void
+ethr_native_atomic_dec_relb(ethr_native_atomic_t *var)
+{
+ (void) ethr_native_atomic_dec_return_relb(var);
+}
+
+static ETHR_INLINE long
+ethr_native_atomic_cmpxchg_acqb(ethr_native_atomic_t *var, long new, long exp)
+{
+#ifdef AO_HAVE_compare_and_swap_acquire
+ long act;
+ do {
+ if (AO_compare_and_swap_acquire(&var->counter, (AO_t) exp, (AO_t) new))
+ return exp;
+ act = (long) AO_load(&var->counter);
+ } while (act == exp);
+ AO_nop_full();
+ return act;
+#else
+ long act = ethr_native_atomic_cmpxchg(var, new, exp);
+ ETHR_MEMORY_BARRIER;
+ return act;
+#endif
+}
+
+static ETHR_INLINE long
+ethr_native_atomic_cmpxchg_relb(ethr_native_atomic_t *var, long new, long exp)
+{
+#ifdef AO_HAVE_compare_and_swap_release
+ long act;
+ do {
+ if (AO_compare_and_swap_release(&var->counter, (AO_t) exp, (AO_t) new))
+ return exp;
+ act = (long) AO_load(&var->counter);
+ } while (act == exp);
+ return act;
+#else
+ ETHR_MEMORY_BARRIER;
+ return ethr_native_atomic_cmpxchg(var, new, exp);
+#endif
+}
+
+
+#endif
+
+#endif
+
+#endif
diff --git a/erts/include/internal/libatomic_ops/ethread.h b/erts/include/internal/libatomic_ops/ethread.h
new file mode 100644
index 0000000000..ee73ba73bc
--- /dev/null
+++ b/erts/include/internal/libatomic_ops/ethread.h
@@ -0,0 +1,30 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2010. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+/*
+ * Description: Native atomics ethread support using libatomic_ops
+ * Author: Rickard Green
+ */
+
+#ifndef ETHREAD_LIBATOMIC_OPS_H__
+#define ETHREAD_LIBATOMIC_OPS_H__
+
+#include "ethr_atomic.h"
+
+#endif
diff --git a/erts/include/internal/ppc32/atomic.h b/erts/include/internal/ppc32/atomic.h
index 105d874995..f21f7c9588 100644
--- a/erts/include/internal/ppc32/atomic.h
+++ b/erts/include/internal/ppc32/atomic.h
@@ -34,7 +34,7 @@ typedef struct {
#define ETHR_MEMORY_BARRIER __asm__ __volatile__("sync" : : : "memory")
-#ifdef ETHR_TRY_INLINE_FUNCS
+#if defined(ETHR_TRY_INLINE_FUNCS) || defined(ETHR_AUX_IMPL__)
static ETHR_INLINE void
ethr_native_atomic_init(ethr_native_atomic_t *var, int i)
@@ -205,6 +205,26 @@ ethr_native_atomic_cmpxchg(ethr_native_atomic_t *var, int new, int expected)
return old;
}
+/*
+ * Atomic ops with at least specified barriers.
+ */
+
+static ETHR_INLINE long
+ethr_native_atomic_read_acqb(ethr_native_atomic_t *var)
+{
+ long res = ethr_native_atomic_read(var);
+ ETHR_MEMORY_BARRIER;
+ return res;
+}
+
+#define ethr_native_atomic_set_relb ethr_native_atomic_xchg
+#define ethr_native_atomic_inc_return_acqb ethr_native_atomic_inc_return
+#define ethr_native_atomic_dec_relb ethr_native_atomic_dec_return
+#define ethr_native_atomic_dec_return_relb ethr_native_atomic_dec_return
+
+#define ethr_native_atomic_cmpxchg_acqb ethr_native_atomic_cmpxchg
+#define ethr_native_atomic_cmpxchg_relb ethr_native_atomic_cmpxchg
+
#endif /* ETHR_TRY_INLINE_FUNCS */
#endif /* ETHREAD_PPC_ATOMIC_H */
diff --git a/erts/include/internal/ppc32/ethread.h b/erts/include/internal/ppc32/ethread.h
index d2a72c3dc1..12efc1b653 100644
--- a/erts/include/internal/ppc32/ethread.h
+++ b/erts/include/internal/ppc32/ethread.h
@@ -29,6 +29,7 @@
#include "rwlock.h"
#define ETHR_HAVE_NATIVE_ATOMICS 1
-#define ETHR_HAVE_NATIVE_LOCKS 1
+#define ETHR_HAVE_NATIVE_SPINLOCKS 1
+#define ETHR_HAVE_NATIVE_RWSPINLOCKS 1
#endif /* ETHREAD_PPC32_ETHREAD_H */
diff --git a/erts/include/internal/ppc32/rwlock.h b/erts/include/internal/ppc32/rwlock.h
index 9bdab12826..19ec26ab68 100644
--- a/erts/include/internal/ppc32/rwlock.h
+++ b/erts/include/internal/ppc32/rwlock.h
@@ -1,19 +1,19 @@
/*
* %CopyrightBegin%
- *
- * Copyright Ericsson AB 2005-2009. All Rights Reserved.
- *
+ *
+ * Copyright Ericsson AB 2005-2010. All Rights Reserved.
+ *
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
* compliance with the License. You should have received a copy of the
* Erlang Public License along with this software. If not, it can be
* retrieved online at http://www.erlang.org/.
- *
+ *
* Software distributed under the License is distributed on an "AS IS"
* basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
* the License for the specific language governing rights and limitations
* under the License.
- *
+ *
* %CopyrightEnd%
*/
@@ -34,7 +34,7 @@ typedef struct {
volatile int lock;
} ethr_native_rwlock_t;
-#ifdef ETHR_TRY_INLINE_FUNCS
+#if defined(ETHR_TRY_INLINE_FUNCS) || defined(ETHR_AUX_IMPL__)
static ETHR_INLINE void
ethr_native_rwlock_init(ethr_native_rwlock_t *lock)
diff --git a/erts/include/internal/ppc32/spinlock.h b/erts/include/internal/ppc32/spinlock.h
index 034c20c143..c8460a3e8a 100644
--- a/erts/include/internal/ppc32/spinlock.h
+++ b/erts/include/internal/ppc32/spinlock.h
@@ -1,19 +1,19 @@
/*
* %CopyrightBegin%
- *
- * Copyright Ericsson AB 2005-2009. All Rights Reserved.
- *
+ *
+ * Copyright Ericsson AB 2005-2010. All Rights Reserved.
+ *
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
* compliance with the License. You should have received a copy of the
* Erlang Public License along with this software. If not, it can be
* retrieved online at http://www.erlang.org/.
- *
+ *
* Software distributed under the License is distributed on an "AS IS"
* basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
* the License for the specific language governing rights and limitations
* under the License.
- *
+ *
* %CopyrightEnd%
*/
@@ -34,7 +34,7 @@ typedef struct {
volatile unsigned int lock;
} ethr_native_spinlock_t;
-#ifdef ETHR_TRY_INLINE_FUNCS
+#if defined(ETHR_TRY_INLINE_FUNCS) || defined(ETHR_AUX_IMPL__)
static ETHR_INLINE void
ethr_native_spinlock_init(ethr_native_spinlock_t *lock)
diff --git a/erts/include/internal/pthread/ethr_event.h b/erts/include/internal/pthread/ethr_event.h
new file mode 100644
index 0000000000..104ec287e0
--- /dev/null
+++ b/erts/include/internal/pthread/ethr_event.h
@@ -0,0 +1,151 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2009-2010. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+/*
+ * Author: Rickard Green
+ */
+
+#if defined(ETHR_HAVE_LINUX_FUTEX) && defined(ETHR_HAVE_NATIVE_ATOMICS)
+/* --- Linux futex implementation of ethread events ------------------------- */
+#define ETHR_LINUX_FUTEX_IMPL__
+
+#include <sys/syscall.h>
+#include <unistd.h>
+#include <linux/futex.h>
+#include <sys/time.h>
+
+/*
+ * Note: Linux futexes operate on 32-bit integers, but
+ * ethr_native_atomic_t are 64-bits on 64-bit
+ * platforms. This has to be taken into account.
+ * Therefore, in each individual value used each
+ * byte look the same.
+ */
+
+#if ETHR_SIZEOF_PTR == 8
+
+#define ETHR_EVENT_OFF_WAITER__ 0xffffffffffffffffL
+#define ETHR_EVENT_OFF__ 0x7777777777777777L
+#define ETHR_EVENT_ON__ 0L
+
+#elif ETHR_SIZEOF_PTR == 4
+
+#define ETHR_EVENT_OFF_WAITER__ 0xffffffffL
+#define ETHR_EVENT_OFF__ 0x77777777L
+#define ETHR_EVENT_ON__ 0L
+
+#else
+
+#error ehrm...
+
+#endif
+
+#if defined(FUTEX_WAIT_PRIVATE) && defined(FUTEX_WAKE_PRIVATE)
+# define ETHR_FUTEX_WAIT__ FUTEX_WAIT_PRIVATE
+# define ETHR_FUTEX_WAKE__ FUTEX_WAKE_PRIVATE
+#else
+# define ETHR_FUTEX_WAIT__ FUTEX_WAIT
+# define ETHR_FUTEX_WAKE__ FUTEX_WAKE
+#endif
+
+typedef struct {
+ ethr_atomic_t futex;
+} ethr_event;
+
+#define ETHR_FUTEX__(FTX, OP, VAL) \
+ (-1 == syscall(__NR_futex, (void *) (FTX), (OP), (int) (VAL), NULL, NULL, 0)\
+ ? errno : 0)
+
+#if defined(ETHR_TRY_INLINE_FUNCS) || defined(ETHR_EVENT_IMPL__)
+
+static void ETHR_INLINE
+ETHR_INLINE_FUNC_NAME_(ethr_event_set)(ethr_event *e)
+{
+ long val;
+ ETHR_WRITE_MEMORY_BARRIER;
+ val = ethr_atomic_xchg(&e->futex, ETHR_EVENT_ON__);
+ if (val == ETHR_EVENT_OFF_WAITER__) {
+ int res = ETHR_FUTEX__(&e->futex, ETHR_FUTEX_WAKE__, 1);
+ if (res != 0)
+ ETHR_FATAL_ERROR__(res);
+ }
+}
+
+static void ETHR_INLINE
+ETHR_INLINE_FUNC_NAME_(ethr_event_reset)(ethr_event *e)
+{
+ ethr_atomic_set(&e->futex, ETHR_EVENT_OFF__);
+ ETHR_MEMORY_BARRIER;
+}
+
+#endif
+
+#elif defined(ETHR_PTHREADS)
+/* --- Posix mutex/cond implementation of events ---------------------------- */
+
+typedef struct {
+ ethr_atomic_t state;
+ pthread_mutex_t mtx;
+ pthread_cond_t cnd;
+} ethr_event;
+
+#define ETHR_EVENT_OFF_WAITER__ -1L
+#define ETHR_EVENT_OFF__ 1L
+#define ETHR_EVENT_ON__ 0L
+
+#if defined(ETHR_TRY_INLINE_FUNCS) || defined(ETHR_EVENT_IMPL__)
+
+static void ETHR_INLINE
+ETHR_INLINE_FUNC_NAME_(ethr_event_set)(ethr_event *e)
+{
+ long val;
+ ETHR_WRITE_MEMORY_BARRIER;
+ val = ethr_atomic_xchg(&e->state, ETHR_EVENT_ON__);
+ if (val == ETHR_EVENT_OFF_WAITER__) {
+ int res = pthread_mutex_lock(&e->mtx);
+ if (res != 0)
+ ETHR_FATAL_ERROR__(res);
+ res = pthread_cond_signal(&e->cnd);
+ if (res != 0)
+ ETHR_FATAL_ERROR__(res);
+ res = pthread_mutex_unlock(&e->mtx);
+ if (res != 0)
+ ETHR_FATAL_ERROR__(res);
+ }
+}
+
+static void ETHR_INLINE
+ETHR_INLINE_FUNC_NAME_(ethr_event_reset)(ethr_event *e)
+{
+ ethr_atomic_set(&e->state, ETHR_EVENT_OFF__);
+ ETHR_MEMORY_BARRIER;
+}
+
+#endif
+
+#endif
+
+int ethr_event_init(ethr_event *e);
+int ethr_event_destroy(ethr_event *e);
+int ethr_event_wait(ethr_event *e);
+int ethr_event_swait(ethr_event *e, int spincount);
+#if !defined(ETHR_TRY_INLINE_FUNCS) || defined(ETHR_EVENT_IMPL__)
+void ethr_event_set(ethr_event *e);
+void ethr_event_reset(ethr_event *e);
+#endif
diff --git a/erts/include/internal/sparc32/atomic.h b/erts/include/internal/sparc32/atomic.h
index 8fde449a52..2a995d4465 100644
--- a/erts/include/internal/sparc32/atomic.h
+++ b/erts/include/internal/sparc32/atomic.h
@@ -32,7 +32,7 @@ typedef struct {
__asm__ __volatile__("membar #LoadLoad|#LoadStore|#StoreLoad|#StoreStore\n" \
: : : "memory")
-#ifdef ETHR_TRY_INLINE_FUNCS
+#if defined(ETHR_TRY_INLINE_FUNCS) || defined(ETHR_AUX_IMPL__)
#if defined(__arch64__)
#define CASX "casx"
@@ -172,6 +172,43 @@ ethr_native_atomic_cmpxchg(ethr_native_atomic_t *var, long new, long old)
return new;
}
+/*
+ * Atomic ops with at least specified barriers.
+ */
+
+static ETHR_INLINE long
+ethr_native_atomic_read_acqb(ethr_native_atomic_t *var)
+{
+ long res = ethr_native_atomic_read(var);
+ __asm__ __volatile__("membar #StoreLoad|#StoreStore");
+ return res;
+}
+
+static ETHR_INLINE void
+ethr_native_atomic_set_relb(ethr_native_atomic_t *var, long i)
+{
+ __asm__ __volatile__("membar #LoadStore|#StoreStore");
+ ethr_native_atomic_set(var, i);
+}
+
+static ETHR_INLINE void
+ethr_native_atomic_dec_relb(ethr_native_atomic_t *var)
+{
+ __asm__ __volatile__("membar #LoadStore|#StoreStore");
+ ethr_native_atomic_dec(var);
+}
+
+static ETHR_INLINE long
+ethr_native_atomic_dec_return_relb(ethr_native_atomic_t *var)
+{
+ __asm__ __volatile__("membar #LoadStore|#StoreStore");
+ return ethr_native_atomic_dec_return(var);
+}
+
+#define ethr_native_atomic_inc_return_acqb ethr_native_atomic_inc_return
+#define ethr_native_atomic_cmpxchg_acqb ethr_native_atomic_cmpxchg
+#define ethr_native_atomic_cmpxchg_relb ethr_native_atomic_cmpxchg
+
#endif /* ETHR_TRY_INLINE_FUNCS */
#endif /* ETHR_SPARC32_ATOMIC_H */
diff --git a/erts/include/internal/sparc32/ethread.h b/erts/include/internal/sparc32/ethread.h
index 1d55399640..dca113b4d6 100644
--- a/erts/include/internal/sparc32/ethread.h
+++ b/erts/include/internal/sparc32/ethread.h
@@ -29,6 +29,7 @@
#include "rwlock.h"
#define ETHR_HAVE_NATIVE_ATOMICS 1
-#define ETHR_HAVE_NATIVE_LOCKS 1
+#define ETHR_HAVE_NATIVE_SPINLOCKS 1
+#define ETHR_HAVE_NATIVE_RWSPINLOCKS 1
#endif /* ETHREAD_SPARC32_ETHREAD_H */
diff --git a/erts/include/internal/sparc32/rwlock.h b/erts/include/internal/sparc32/rwlock.h
index 12448e0b06..465ec96866 100644
--- a/erts/include/internal/sparc32/rwlock.h
+++ b/erts/include/internal/sparc32/rwlock.h
@@ -1,19 +1,19 @@
/*
* %CopyrightBegin%
- *
- * Copyright Ericsson AB 2005-2009. All Rights Reserved.
- *
+ *
+ * Copyright Ericsson AB 2005-2010. All Rights Reserved.
+ *
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
* compliance with the License. You should have received a copy of the
* Erlang Public License along with this software. If not, it can be
* retrieved online at http://www.erlang.org/.
- *
+ *
* Software distributed under the License is distributed on an "AS IS"
* basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
* the License for the specific language governing rights and limitations
* under the License.
- *
+ *
* %CopyrightEnd%
*/
@@ -29,7 +29,7 @@ typedef struct {
volatile int lock;
} ethr_native_rwlock_t;
-#ifdef ETHR_TRY_INLINE_FUNCS
+#if defined(ETHR_TRY_INLINE_FUNCS) || defined(ETHR_AUX_IMPL__)
static ETHR_INLINE void
ethr_native_rwlock_init(ethr_native_rwlock_t *lock)
diff --git a/erts/include/internal/sparc32/spinlock.h b/erts/include/internal/sparc32/spinlock.h
index b4fe48b714..493d514210 100644
--- a/erts/include/internal/sparc32/spinlock.h
+++ b/erts/include/internal/sparc32/spinlock.h
@@ -1,19 +1,19 @@
/*
* %CopyrightBegin%
- *
- * Copyright Ericsson AB 2005-2009. All Rights Reserved.
- *
+ *
+ * Copyright Ericsson AB 2005-2010. All Rights Reserved.
+ *
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
* compliance with the License. You should have received a copy of the
* Erlang Public License along with this software. If not, it can be
* retrieved online at http://www.erlang.org/.
- *
+ *
* Software distributed under the License is distributed on an "AS IS"
* basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
* the License for the specific language governing rights and limitations
* under the License.
- *
+ *
* %CopyrightEnd%
*/
@@ -29,7 +29,7 @@ typedef struct {
volatile unsigned char lock;
} ethr_native_spinlock_t;
-#ifdef ETHR_TRY_INLINE_FUNCS
+#if defined(ETHR_TRY_INLINE_FUNCS) || defined(ETHR_AUX_IMPL__)
static ETHR_INLINE void
ethr_native_spinlock_init(ethr_native_spinlock_t *lock)
diff --git a/erts/include/internal/tile/atomic.h b/erts/include/internal/tile/atomic.h
index 5e4c7ac9fe..69569d82d1 100644
--- a/erts/include/internal/tile/atomic.h
+++ b/erts/include/internal/tile/atomic.h
@@ -34,7 +34,7 @@ typedef struct {
#define ETHR_MEMORY_BARRIER __insn_mf()
-#ifdef ETHR_TRY_INLINE_FUNCS
+#if defined(ETHR_TRY_INLINE_FUNCS) || defined(ETHR_AUX_IMPL__)
static ETHR_INLINE void
ethr_native_atomic_init(ethr_native_atomic_t *var, long i)
@@ -45,7 +45,6 @@ ethr_native_atomic_init(ethr_native_atomic_t *var, long i)
static ETHR_INLINE void
ethr_native_atomic_set(ethr_native_atomic_t *var, long i)
{
- __insn_mf();
atomic_exchange_acq(&var->counter, i);
}
@@ -58,28 +57,24 @@ ethr_native_atomic_read(ethr_native_atomic_t *var)
static ETHR_INLINE void
ethr_native_atomic_add(ethr_native_atomic_t *var, long incr)
{
- __insn_mf();
atomic_add(&var->counter, incr);
}
static ETHR_INLINE void
ethr_native_atomic_inc(ethr_native_atomic_t *var)
{
- __insn_mf();
atomic_increment(&var->counter);
}
static ETHR_INLINE void
ethr_native_atomic_dec(ethr_native_atomic_t *var)
{
- __insn_mf();
atomic_decrement(&var->counter);
}
static ETHR_INLINE long
ethr_native_atomic_add_return(ethr_native_atomic_t *var, long incr)
{
- __insn_mf();
return atomic_exchange_and_add(&var->counter, incr) + incr;
}
@@ -98,33 +93,81 @@ ethr_native_atomic_dec_return(ethr_native_atomic_t *var)
static ETHR_INLINE long
ethr_native_atomic_and_retold(ethr_native_atomic_t *var, long mask)
{
- /* Implement a barrier suitable for a mutex unlock. */
- __insn_mf();
return atomic_and_val(&var->counter, mask);
}
static ETHR_INLINE long
ethr_native_atomic_or_retold(ethr_native_atomic_t *var, long mask)
{
- __insn_mf();
return atomic_or_val(&var->counter, mask);
}
static ETHR_INLINE long
ethr_native_atomic_xchg(ethr_native_atomic_t *var, long val)
{
- __insn_mf();
return atomic_exchange_acq(&var->counter, val);
}
static ETHR_INLINE long
ethr_native_atomic_cmpxchg(ethr_native_atomic_t *var, long new, long expected)
{
- /* Implement a barrier suitable for a mutex unlock. */
- __insn_mf();
return atomic_compare_and_exchange_val_acq(&var->counter, new, expected);
}
+/*
+ * Atomic ops with at least specified barriers.
+ */
+
+static ETHR_INLINE long
+ethr_native_atomic_read_acqb(ethr_native_atomic_t *var)
+{
+ long res = ethr_native_atomic_read(var);
+ ETHR_MEMORY_BARRIER;
+ return res;
+}
+
+static ETHR_INLINE long
+ethr_native_atomic_inc_return_acqb(ethr_native_atomic_t *var)
+{
+ long res = ethr_native_atomic_inc_return(var);
+ ETHR_MEMORY_BARRIER;
+ return res;
+}
+
+static ETHR_INLINE void
+ethr_native_atomic_set_relb(ethr_native_atomic_t *var, long val)
+{
+ ETHR_MEMORY_BARRIER;
+ ethr_native_atomic_set(var, val);
+}
+
+static ETHR_INLINE void
+ethr_native_atomic_dec_relb(ethr_native_atomic_t *var)
+{
+ ETHR_MEMORY_BARRIER;
+ ethr_native_atomic_dec(var);
+}
+
+static ETHR_INLINE long
+ethr_native_atomic_dec_return_relb(ethr_native_atomic_t *var)
+{
+ ETHR_MEMORY_BARRIER;
+ return ethr_native_atomic_dec_return(var);
+}
+
+static ETHR_INLINE long
+ethr_native_atomic_cmpxchg_acqb(ethr_native_atomic_t *var, long new, long exp)
+{
+ return ethr_native_atomic_cmpxchg(var, new, exp);
+}
+
+static ETHR_INLINE long
+ethr_native_atomic_cmpxchg_relb(ethr_native_atomic_t *var, long new, long exp)
+{
+ ETHR_MEMORY_BARRIER;
+ return ethr_native_atomic_cmpxchg(var, new, exp);
+}
+
#endif /* ETHR_TRY_INLINE_FUNCS */
#endif /* ETHREAD_TILE_ATOMIC_H */
diff --git a/erts/include/internal/win/ethr_atomic.h b/erts/include/internal/win/ethr_atomic.h
new file mode 100644
index 0000000000..500459dd6c
--- /dev/null
+++ b/erts/include/internal/win/ethr_atomic.h
@@ -0,0 +1,244 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2010. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+/*
+ * Description: Native atomics ethread support when using VC++
+ * Author: Rickard Green
+ */
+
+#ifndef ETHR_WIN_ATOMIC_H__
+#define ETHR_WIN_ATOMIC_H__
+
+#ifdef _MSC_VER
+# if _MSC_VER < 1300
+# define ETHR_IMMED_ATOMIC_SET_GET_SAFE__ 0 /* Dont trust really old compilers */
+# else
+# if defined(_M_IX86)
+# define ETHR_IMMED_ATOMIC_SET_GET_SAFE__ 1
+# else /* I.e. IA64 */
+# if _MSC_VER >= 1400
+# define ETHR_IMMED_ATOMIC_SET_GET_SAFE__ 1
+# else
+# define ETHR_IMMED_ATOMIC_SET_GET_SAFE__ 0
+# endif
+# endif
+# endif
+# if _MSC_VER >= 1400
+# include <intrin.h>
+# undef ETHR_COMPILER_BARRIER
+# define ETHR_COMPILER_BARRIER _ReadWriteBarrier()
+# endif
+#pragma intrinsic(_ReadWriteBarrier)
+#pragma intrinsic(_InterlockedAnd)
+#pragma intrinsic(_InterlockedOr)
+#else
+# define ETHR_IMMED_ATOMIC_SET_GET_SAFE__ 0
+#endif
+
+/*
+ * No configure test checking for _Interlocked*_{acq,rel} and
+ * Interlocked*{Acquire,Release} have been written yet...
+ *
+ * Note, that these are pure optimizations for the itanium
+ * processor.
+ */
+
+#ifdef ETHR_HAVE_INTERLOCKEDCOMPAREEXCHANGE_ACQ
+#pragma intrinsic(_InterlockedCompareExchange_acq)
+#endif
+#ifdef ETHR_HAVE_INTERLOCKEDCOMPAREEXCHANGE_REL
+#pragma intrinsic(_InterlockedCompareExchange_rel)
+#endif
+
+
+typedef struct {
+ volatile LONG value;
+} ethr_native_atomic_t;
+
+#define ETHR_MEMORY_BARRIER \
+do { \
+ volatile LONG x___ = 0; \
+ _InterlockedCompareExchange(&x___, (LONG) 1, (LONG) 0); \
+} while (0)
+
+#if defined(ETHR_TRY_INLINE_FUNCS) || defined(ETHR_AUX_IMPL__)
+
+static ETHR_INLINE void
+ethr_native_atomic_init(ethr_native_atomic_t *var, long i)
+{
+ var->value = (LONG) i;
+}
+
+static ETHR_INLINE void
+ethr_native_atomic_set(ethr_native_atomic_t *var, long i)
+{
+#if ETHR_IMMED_ATOMIC_SET_GET_SAFE__
+ var->value = (LONG) i;
+#else
+ (void) InterlockedExchange(&var->value, (LONG) i);
+#endif
+}
+
+static ETHR_INLINE long
+ethr_native_atomic_read(ethr_native_atomic_t *var)
+{
+#if ETHR_IMMED_ATOMIC_SET_GET_SAFE__
+ return var->value;
+#else
+ return InterlockedExchangeAdd(&var->value, (LONG) 0);
+#endif
+}
+
+static ETHR_INLINE void
+ethr_native_atomic_add(ethr_native_atomic_t *var, long incr)
+{
+ (void) InterlockedExchangeAdd(&var->value, (LONG) incr);
+}
+
+static ETHR_INLINE long
+ethr_native_atomic_add_return(ethr_native_atomic_t *var, long i)
+{
+ LONG tmp = InterlockedExchangeAdd(&var->value, (LONG) i);
+ return tmp + i;
+}
+
+static ETHR_INLINE void
+ethr_native_atomic_inc(ethr_native_atomic_t *var)
+{
+ (void) InterlockedIncrement(&var->value);
+}
+
+static ETHR_INLINE void
+ethr_native_atomic_dec(ethr_native_atomic_t *var)
+{
+ (void) InterlockedDecrement(&var->value);
+}
+
+static ETHR_INLINE long
+ethr_native_atomic_inc_return(ethr_native_atomic_t *var)
+{
+ return (long) InterlockedIncrement(&var->value);
+}
+
+static ETHR_INLINE long
+ethr_native_atomic_dec_return(ethr_native_atomic_t *var)
+{
+ return (long) InterlockedDecrement(&var->value);
+}
+
+static ETHR_INLINE long
+ethr_native_atomic_and_retold(ethr_native_atomic_t *var, long mask)
+{
+ return (long) _InterlockedAnd(&var->value, mask);
+}
+
+static ETHR_INLINE long
+ethr_native_atomic_or_retold(ethr_native_atomic_t *var, long mask)
+{
+ return (long) _InterlockedOr(&var->value, mask);
+}
+
+
+static ETHR_INLINE long
+ethr_native_atomic_cmpxchg(ethr_native_atomic_t *var, long new, long old)
+{
+ return (long) _InterlockedCompareExchange(&var->value, (LONG) new, (LONG) old);
+}
+
+
+static ETHR_INLINE long
+ethr_native_atomic_xchg(ethr_native_atomic_t *var, long new)
+{
+ return (long) InterlockedExchange(&var->value, (LONG) new);
+}
+
+/*
+ * Atomic ops with at least specified barriers.
+ */
+
+static ETHR_INLINE long
+ethr_native_atomic_read_acqb(ethr_native_atomic_t *var)
+{
+#ifdef ETHR_HAVE_INTERLOCKEDEXCHANGEADDACQUIRE
+ return (long) InterlockedExchangeAddAcquire(&var->value, (LONG) 0);
+#else
+ return (long) InterlockedExchangeAdd(&var->value, (LONG) 0);
+#endif
+}
+
+static ETHR_INLINE long
+ethr_native_atomic_inc_return_acqb(ethr_native_atomic_t *var)
+{
+#ifdef ETHR_HAVE_INTERLOCKEDINCREMENTACQUIRE
+ return (long) InterlockedIncrementAcquire(&var->value);
+#else
+ return (long) InterlockedIncrement(&var->value);
+#endif
+}
+
+static ETHR_INLINE void
+ethr_native_atomic_set_relb(ethr_native_atomic_t *var, long i)
+{
+ (void) InterlockedExchange(&var->value, (LONG) i);
+}
+
+static ETHR_INLINE void
+ethr_native_atomic_dec_relb(ethr_native_atomic_t *var)
+{
+#ifdef ETHR_HAVE_INTERLOCKEDDECREMENTRELEASE
+ (void) InterlockedDecrementRelease(&var->value);
+#else
+ (void) InterlockedDecrement(&var->value);
+#endif
+}
+
+static ETHR_INLINE long
+ethr_native_atomic_dec_return_relb(ethr_native_atomic_t *var)
+{
+#ifdef ETHR_HAVE_INTERLOCKEDDECREMENTRELEASE
+ return (long) InterlockedDecrementRelease(&var->value);
+#else
+ return (long) InterlockedDecrement(&var->value);
+#endif
+}
+
+static ETHR_INLINE long
+ethr_native_atomic_cmpxchg_acqb(ethr_native_atomic_t *var, long new, long old)
+{
+#ifdef ETHR_HAVE_INTERLOCKEDCOMPAREEXCHANGE_ACQ
+ return (long) _InterlockedCompareExchange_acq(&var->value, (LONG) new, (LONG) old);
+#else
+ return (long) _InterlockedCompareExchange(&var->value, (LONG) new, (LONG) old);
+#endif
+}
+
+static ETHR_INLINE long
+ethr_native_atomic_cmpxchg_relb(ethr_native_atomic_t *var, long new, long old)
+{
+
+#ifdef ETHR_HAVE_INTERLOCKEDCOMPAREEXCHANGE_REL
+ return (long) _InterlockedCompareExchange_rel(&var->value, (LONG) new, (LONG) old);
+#else
+ return (long) _InterlockedCompareExchange(&var->value, (LONG) new, (LONG) old);
+#endif
+}
+
+#endif
+
+#endif
diff --git a/erts/include/internal/win/ethr_event.h b/erts/include/internal/win/ethr_event.h
new file mode 100644
index 0000000000..af57c20f91
--- /dev/null
+++ b/erts/include/internal/win/ethr_event.h
@@ -0,0 +1,62 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2009-2010. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+/*
+ * Author: Rickard Green
+ */
+
+#define ETHR_EVENT_OFF_WAITER__ ((LONG) -1)
+#define ETHR_EVENT_OFF__ ((LONG) 1)
+#define ETHR_EVENT_ON__ ((LONG) 0)
+
+typedef struct {
+ volatile LONG state;
+ HANDLE handle;
+} ethr_event;
+
+#if defined(ETHR_TRY_INLINE_FUNCS) || defined(ETHR_EVENT_IMPL__)
+
+static ETHR_INLINE void
+ETHR_INLINE_FUNC_NAME_(ethr_event_set)(ethr_event *e)
+{
+ /* InterlockedExchange() imply a full memory barrier which is important */
+ LONG state = InterlockedExchange(&e->state, ETHR_EVENT_ON__);
+ if (state == ETHR_EVENT_OFF_WAITER__) {
+ if (!SetEvent(e->handle))
+ ETHR_FATAL_ERROR__(ethr_win_get_errno__());
+ }
+}
+
+static ETHR_INLINE void
+ETHR_INLINE_FUNC_NAME_(ethr_event_reset)(ethr_event *e)
+{
+ /* InterlockedExchange() imply a full memory barrier which is important */
+ InterlockedExchange(&e->state, ETHR_EVENT_OFF__);
+}
+
+#endif
+
+int ethr_event_init(ethr_event *e);
+int ethr_event_destroy(ethr_event *e);
+int ethr_event_wait(ethr_event *e);
+int ethr_event_swait(ethr_event *e, int spincount);
+#if !defined(ETHR_TRY_INLINE_FUNCS) || defined(ETHR_EVENT_IMPL__)
+void ethr_event_set(ethr_event *e);
+void ethr_event_reset(ethr_event *e);
+#endif
diff --git a/erts/include/internal/win/ethread.h b/erts/include/internal/win/ethread.h
new file mode 100644
index 0000000000..b52710f6a3
--- /dev/null
+++ b/erts/include/internal/win/ethread.h
@@ -0,0 +1,31 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2010. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+/*
+ * Description: Native atomic and spinlock ethread support when using VC++
+ * Author: Rickard Green
+ */
+
+#ifndef ETHREAD_WIN_H__
+#define ETHREAD_WIN_H__
+
+#include "ethr_atomic.h"
+#define ETHR_HAVE_NATIVE_ATOMICS 1
+
+#endif
diff --git a/erts/lib_src/Makefile.in b/erts/lib_src/Makefile.in
index e7caac8072..0d3181cace 100644
--- a/erts/lib_src/Makefile.in
+++ b/erts/lib_src/Makefile.in
@@ -280,8 +280,13 @@ endif
#
# ethread library
#
+ETHR_THR_LIB_BASE_DIR=@ETHR_THR_LIB_BASE_DIR@
ifneq ($(strip $(ETHR_LIB_NAME)),)
-ETHREAD_LIB_SRC=common/ethread.c
+ETHREAD_LIB_SRC=common/ethr_aux.c \
+ common/ethr_mutex.c \
+ common/ethr_cbf.c \
+ $(ETHR_THR_LIB_BASE_DIR)/ethread.c \
+ $(ETHR_THR_LIB_BASE_DIR)/ethr_event.c
ETHREAD_LIB_NAME=ethread$(TYPE_SUFFIX)
ifeq ($(USING_VC),yes)
@@ -379,13 +384,13 @@ $(ERTS_LIB): $(ERTS_LIB_OBJS)
$(r_OBJ_DIR)/%.o: common/%.c
$(CC) $(THR_DEFS) $(CFLAGS) $(INCLUDES) -c $< -o $@
-$(r_OBJ_DIR)/%.o: $(ERLANG_OSTYPE)/%.c
+$(r_OBJ_DIR)/%.o: $(ETHR_THR_LIB_BASE_DIR)/%.c
$(CC) $(THR_DEFS) $(CFLAGS) $(INCLUDES) -c $< -o $@
$(OBJ_DIR)/%.o: common/%.c
$(CC) $(CFLAGS) $(INCLUDES) -c $< -o $@
-$(OBJ_DIR)/%.o: $(ERLANG_OSTYPE)/%.c
+$(OBJ_DIR)/%.o: $(ETHR_THR_LIB_BASE_DIR)/%.c
$(CC) $(CFLAGS) $(INCLUDES) -c $< -o $@
# Win32 specific
@@ -393,25 +398,25 @@ $(OBJ_DIR)/%.o: $(ERLANG_OSTYPE)/%.c
$(MD_OBJ_DIR)/%.o: common/%.c
$(CC) $(THR_DEFS) $(CFLAGS) -MD $(INCLUDES) -c $< -o $@
-$(MD_OBJ_DIR)/%.o: $(ERLANG_OSTYPE)/%.c
+$(MD_OBJ_DIR)/%.o: $(ETHR_THR_LIB_BASE_DIR)/%.c
$(CC) $(THR_DEFS) $(CFLAGS) -MD $(INCLUDES) -c $< -o $@
$(MDd_OBJ_DIR)/%.o: common/%.c
$(CC) $(THR_DEFS) $(CFLAGS) -MDd $(INCLUDES) -c $< -o $@
-$(MDd_OBJ_DIR)/%.o: $(ERLANG_OSTYPE)/%.c
+$(MDd_OBJ_DIR)/%.o: $(ETHR_THR_LIB_BASE_DIR)/%.c
$(CC) $(THR_DEFS) $(CFLAGS) -MDd $(INCLUDES) -c $< -o $@
$(MT_OBJ_DIR)/%.o: common/%.c
$(CC) $(THR_DEFS) $(CFLAGS) -MT $(INCLUDES) -c $< -o $@
-$(MT_OBJ_DIR)/%.o: $(ERLANG_OSTYPE)/%.c
+$(MT_OBJ_DIR)/%.o: $(ETHR_THR_LIB_BASE_DIR)/%.c
$(CC) $(THR_DEFS) $(CFLAGS) -MT $(INCLUDES) -c $< -o $@
$(MTd_OBJ_DIR)/%.o: common/%.c
$(CC) $(THR_DEFS) $(CFLAGS) -MTd $(INCLUDES) -c $< -o $@
-$(MTd_OBJ_DIR)/%.o: $(ERLANG_OSTYPE)/%.c
+$(MTd_OBJ_DIR)/%.o: $(ETHR_THR_LIB_BASE_DIR)/%.c
$(CC) $(THR_DEFS) $(CFLAGS) -MTd $(INCLUDES) -c $< -o $@
#
@@ -438,6 +443,8 @@ RELEASE_LIBS=$(ERTS_LIBS)
INTERNAL_RELEASE_INCLUDES= \
$(ERTS_INCL_INT)/README \
$(ERTS_INCL_INT)/ethread.h \
+ $(ERTS_INCL_INT)/ethr_mutex.h \
+ $(ERTS_INCL_INT)/ethr_optimized_fallbacks.h \
$(ERTS_INCL_INT)/$(TARGET)/ethread.mk \
$(ERTS_INCL_INT)/$(TARGET)/erts_internal.mk \
$(ERTS_INCL_INT)/$(TARGET)/ethread_header_config.h \
@@ -447,7 +454,8 @@ INTERNAL_RELEASE_INCLUDES= \
$(ERTS_INCL_INT)/erl_misc_utils.h \
$(ERTS_INCL_INT)/erl_errno.h
-INTERNAL_X_RELEASE_INCLUDE_DIRS= i386 x86_64 ppc32 sparc32 sparc64 tile gcc
+INTERNAL_X_RELEASE_INCLUDE_DIRS= \
+ i386 x86_64 ppc32 sparc32 sparc64 tile gcc pthread win libatomic_ops
INTERNAL_RELEASE_LIBS= \
../lib/internal/README \
diff --git a/erts/lib_src/common/erl_misc_utils.c b/erts/lib_src/common/erl_misc_utils.c
index f70db86960..116c9886d8 100644
--- a/erts/lib_src/common/erl_misc_utils.c
+++ b/erts/lib_src/common/erl_misc_utils.c
@@ -21,10 +21,13 @@
#include "config.h"
#endif
+#if defined(__WIN32__)
+# include <windows.h>
+#endif
+
#include "erl_misc_utils.h"
#if defined(__WIN32__)
-# include <windows.h>
#elif defined(VXWORKS)
# include <selectLib.h>
#else /* UNIX */
@@ -59,8 +62,25 @@
# endif
#endif
-#ifdef HAVE_SCHED_xETAFFINITY
+#if defined(HAVE_SCHED_xETAFFINITY)
# include <sched.h>
+# define ERTS_HAVE_MISC_UTIL_AFFINITY_MASK__
+#define ERTS_MU_GET_PROC_AFFINITY__(CPUINFOP, CPUSET) \
+ (sched_getaffinity((CPUINFOP)->pid, \
+ sizeof(cpu_set_t), \
+ (CPUSET)) != 0 ? -errno : 0)
+#define ERTS_MU_SET_THR_AFFINITY__(SETP) \
+ (sched_setaffinity(0, sizeof(cpu_set_t), (SETP)) != 0 ? -errno : 0)
+#elif defined(__WIN32__)
+# define ERTS_HAVE_MISC_UTIL_AFFINITY_MASK__
+# define cpu_set_t DWORD
+# define CPU_SETSIZE (sizeof(DWORD)*8)
+# define CPU_ZERO(SETP) (*(SETP) = (DWORD) 0)
+# define CPU_SET(CPU, SETP) (*(SETP) |= (((DWORD) 1) << (CPU)))
+# define CPU_CLR(CPU, SETP) (*(SETP) &= ~(((DWORD) 1) << (CPU)))
+# define CPU_ISSET(CPU, SETP) ((*(SETP) & (((DWORD) 1) << (CPU))) != (DWORD) 0)
+#define ERTS_MU_GET_PROC_AFFINITY__ get_proc_affinity
+#define ERTS_MU_SET_THR_AFFINITY__ set_thr_affinity
#endif
#ifdef HAVE_PSET_INFO
# include <sys/pset.h>
@@ -82,6 +102,26 @@
static int read_topology(erts_cpu_info_t *cpuinfo);
+#if defined(ERTS_HAVE_MISC_UTIL_AFFINITY_MASK__)
+static int
+cpu_sets_are_eq(cpu_set_t *x, cpu_set_t *y)
+{
+ int i;
+ for (i = 0; i < CPU_SETSIZE; i++) {
+ if (CPU_ISSET(i, x)) {
+ if (!CPU_ISSET(i, y))
+ return 0;
+ }
+ else {
+ if (CPU_ISSET(i, y))
+ return 0;
+ }
+ }
+ return 1;
+}
+
+#endif
+
int
erts_milli_sleep(long ms)
{
@@ -105,30 +145,66 @@ struct erts_cpu_info_t_ {
int available;
int topology_size;
erts_cpu_topology_t *topology;
-#if defined(HAVE_SCHED_xETAFFINITY)
+#if defined(ERTS_HAVE_MISC_UTIL_AFFINITY_MASK__)
char *affinity_str;
char affinity_str_buf[CPU_SETSIZE/4+2];
cpu_set_t cpuset;
+#if defined(HAVE_SCHED_xETAFFINITY)
pid_t pid;
+#endif
#elif defined(HAVE_PSET_INFO)
processorid_t *cpuids;
#endif
};
+#if defined(__WIN32__)
+
+static __forceinline int
+get_proc_affinity(erts_cpu_info_t *cpuinfo, cpu_set_t *cpuset)
+{
+ DWORD pamask, samask;
+ if (GetProcessAffinityMask(GetCurrentProcess(), &pamask, &samask)) {
+ *cpuset = (cpu_set_t) pamask;
+ return 0;
+ }
+ else {
+ *cpuset = (cpu_set_t) 0;
+ return -erts_get_last_win_errno();
+ }
+}
+
+static __forceinline int
+set_thr_affinity(cpu_set_t *set)
+{
+ if (*set == (cpu_set_t) 0)
+ return -ENOTSUP;
+ if (SetThreadAffinityMask(GetCurrentThread(), *set) == 0)
+ return -erts_get_last_win_errno();
+ else
+ return 0;
+}
+
+#endif
+
erts_cpu_info_t *
erts_cpu_info_create(void)
{
erts_cpu_info_t *cpuinfo = malloc(sizeof(erts_cpu_info_t));
if (!cpuinfo)
return NULL;
-#if defined(HAVE_SCHED_xETAFFINITY)
+#if defined(ERTS_HAVE_MISC_UTIL_AFFINITY_MASK__)
cpuinfo->affinity_str = NULL;
+#if defined(HAVE_SCHED_xETAFFINITY)
cpuinfo->pid = getpid();
+#endif
#elif defined(HAVE_PSET_INFO)
cpuinfo->cpuids = NULL;
#endif
cpuinfo->topology_size = 0;
cpuinfo->topology = NULL;
+ cpuinfo->configured = -1;
+ cpuinfo->online = -1;
+ cpuinfo->available = -1;
erts_cpu_info_update(cpuinfo);
return cpuinfo;
}
@@ -153,31 +229,40 @@ erts_cpu_info_destroy(erts_cpu_info_t *cpuinfo)
}
}
-void
+int
erts_cpu_info_update(erts_cpu_info_t *cpuinfo)
{
- cpuinfo->configured = 0;
- cpuinfo->online = 0;
- cpuinfo->available = 0;
+ int changed = 0;
+ int configured = 0;
+ int online = 0;
+ int available = 0;
+ erts_cpu_topology_t *old_topology;
+ int old_topology_size;
+#if defined(ERTS_HAVE_MISC_UTIL_AFFINITY_MASK__)
+ cpu_set_t cpuset;
+#endif
#ifdef __WIN32__
{
+ int i;
SYSTEM_INFO sys_info;
GetSystemInfo(&sys_info);
- cpuinfo->configured = (int) sys_info.dwNumberOfProcessors;
-
+ configured = (int) sys_info.dwNumberOfProcessors;
+ for (i = 0; i < sizeof(DWORD)*8; i++)
+ if (sys_info.dwActiveProcessorMask & (((DWORD) 1) << i))
+ online++;
}
#elif !defined(NO_SYSCONF) && (defined(_SC_NPROCESSORS_CONF) \
|| defined(_SC_NPROCESSORS_ONLN))
#ifdef _SC_NPROCESSORS_CONF
- cpuinfo->configured = (int) sysconf(_SC_NPROCESSORS_CONF);
- if (cpuinfo->configured < 0)
- cpuinfo->configured = 0;
+ configured = (int) sysconf(_SC_NPROCESSORS_CONF);
+ if (configured < 0)
+ configured = 0;
#endif
#ifdef _SC_NPROCESSORS_ONLN
- cpuinfo->online = (int) sysconf(_SC_NPROCESSORS_ONLN);
- if (cpuinfo->online < 0)
- cpuinfo->online = 0;
+ online = (int) sysconf(_SC_NPROCESSORS_ONLN);
+ if (online < 0)
+ online = 0;
#endif
#elif defined(HAVE_SYS_SYSCTL_H) && defined(CTL_HW) && (defined(HW_NCPU) \
|| defined(HW_AVAILCPU))
@@ -189,71 +274,138 @@ erts_cpu_info_update(erts_cpu_info_t *cpuinfo)
len = sizeof(int);
mib[0] = CTL_HW;
mib[1] = HW_NCPU;
- if (sysctl(&mib[0], 2, &cpuinfo->configured, &len, NULL, 0) < 0)
- cpuinfo->configured = 0;
+ if (sysctl(&mib[0], 2, &configured, &len, NULL, 0) < 0)
+ configured = 0;
#endif
#ifdef HW_AVAILCPU
len = sizeof(int);
mib[0] = CTL_HW;
mib[1] = HW_AVAILCPU;
- if (sysctl(&mib[0], 2, &cpuinfo->online, &len, NULL, 0) < 0)
- cpuinfo->online = 0;
+ if (sysctl(&mib[0], 2, &online, &len, NULL, 0) < 0)
+ online = 0;
#endif
}
#endif
- if (cpuinfo->online > cpuinfo->configured)
- cpuinfo->online = cpuinfo->configured;
+ if (online > configured)
+ online = configured;
-#ifdef HAVE_SCHED_xETAFFINITY
- if (sched_getaffinity(cpuinfo->pid, sizeof(cpu_set_t), &cpuinfo->cpuset) == 0) {
- int i, c, cn, si;
- c = cn = 0;
- si = sizeof(cpuinfo->affinity_str_buf) - 1;
- cpuinfo->affinity_str_buf[si] = '\0';
- for (i = 0; i < CPU_SETSIZE; i++) {
- if (CPU_ISSET(i, &cpuinfo->cpuset)) {
- c |= 1 << cn;
- cpuinfo->available++;
+ if (cpuinfo->configured != configured)
+ changed = 1;
+ if (cpuinfo->online != online)
+ changed = 1;
+
+#if defined(ERTS_HAVE_MISC_UTIL_AFFINITY_MASK__)
+ if (ERTS_MU_GET_PROC_AFFINITY__(cpuinfo, &cpuset) == 0) {
+ if (!changed && !cpu_sets_are_eq(&cpuset, &cpuinfo->cpuset))
+ changed = 1;
+
+ if (!changed)
+ available = cpuinfo->available;
+ else {
+ int i, c, cn, si;
+
+ memcpy((void *) &cpuinfo->cpuset,
+ (void *) &cpuset,
+ sizeof(cpu_set_t));
+
+ c = cn = 0;
+ si = sizeof(cpuinfo->affinity_str_buf) - 1;
+ cpuinfo->affinity_str_buf[si] = '\0';
+ for (i = 0; i < CPU_SETSIZE; i++) {
+ if (CPU_ISSET(i, &cpuinfo->cpuset)) {
+ c |= 1 << cn;
+ available++;
+ }
+ cn++;
+ if (cn == 4) {
+ cpuinfo->affinity_str_buf[--si] = (c < 10
+ ? '0' + c
+ : 'A' + c - 10);
+ c = cn = 0;
+ }
}
- cn++;
- if (cn == 4) {
+ if (c)
cpuinfo->affinity_str_buf[--si] = (c < 10
? '0' + c
: 'A' + c - 10);
- c = cn = 0;
- }
+ while (cpuinfo->affinity_str_buf[si] == '0')
+ si++;
+ cpuinfo->affinity_str = &cpuinfo->affinity_str_buf[si];
}
- if (c)
- cpuinfo->affinity_str_buf[--si] = (c < 10
- ? '0' + c
- : 'A' + c - 10);
- while (cpuinfo->affinity_str_buf[si] == '0')
- si++;
- cpuinfo->affinity_str = &cpuinfo->affinity_str_buf[si];
}
#elif defined(HAVE_PSET_INFO)
{
- uint_t numcpus = cpuinfo->configured;
- if (cpuinfo->cpuids)
- free(cpuinfo->cpuids);
- cpuinfo->cpuids = malloc(sizeof(processorid_t)*numcpus);
- if (cpuinfo->cpuids) {
- if (pset_info(PS_MYID, NULL, &numcpus, &cpuinfo->cpuids) == 0)
- cpuinfo->available = (int) numcpus;
- if (cpuinfo->available < 0) {
- free(cpuinfo->cpuid);
- cpuinfo->available = 0;
+ processorid_t *cpuids;
+ uint_t numcpus = configured;
+ cpuids = malloc(sizeof(processorid_t)*numcpus);
+ if (cpuids) {
+ if (pset_info(PS_MYID, NULL, &numcpus, &cpuids) == 0)
+ available = (int) numcpus;
+ if (available < 0) {
+ free(cpuids);
+ cpuids = NULL;
+ available = 0;
}
}
+ if (!cpuids) {
+ if (cpuinfo->cpuids)
+ changed = 1;
+ }
+ else {
+ if (cpuinfo->cpuids)
+ changed = 1;
+ if (memcmp((void *) cpuinfo->cpuids,
+ (void *) cpuids,
+ sizeof(processorid_t)*numcpus) != 0)
+ changed = 1;
+
+ }
+ if (!changed) {
+ if (cpuids)
+ free(cpuids);
+ }
+ else {
+ if (cpuinfo->cpuids)
+ free(cpuinfo->cpuids);
+ cpuinfo->cpuids = cpuids;
+ }
}
#endif
- if (cpuinfo->available > cpuinfo->online)
- cpuinfo->available = cpuinfo->online;
+ if (available > online)
+ available = online;
+
+ if (cpuinfo->available != available)
+ changed = 1;
+
+ cpuinfo->configured = configured;
+ cpuinfo->online = online;
+ cpuinfo->available = available;
+
+ old_topology = cpuinfo->topology;
+ old_topology_size = cpuinfo->topology_size;
+ cpuinfo->topology = NULL;
read_topology(cpuinfo);
+ if (cpuinfo->topology_size != old_topology_size
+ || (old_topology_size != 0
+ && memcmp((void *) cpuinfo->topology,
+ (void *) old_topology,
+ (sizeof(erts_cpu_topology_t)
+ * old_topology_size)) != 0)) {
+ changed = 1;
+ if (old_topology)
+ free(old_topology);
+ }
+ else {
+ if (cpuinfo->topology)
+ free(cpuinfo->topology);
+ cpuinfo->topology = old_topology;
+ }
+
+ return changed;
}
int
@@ -289,7 +441,7 @@ erts_get_cpu_available(erts_cpu_info_t *cpuinfo)
char *
erts_get_unbind_from_cpu_str(erts_cpu_info_t *cpuinfo)
{
-#if defined(HAVE_SCHED_xETAFFINITY)
+#if defined(ERTS_HAVE_MISC_UTIL_AFFINITY_MASK__)
if (!cpuinfo)
return "false";
return cpuinfo->affinity_str;
@@ -303,7 +455,7 @@ erts_get_available_cpu(erts_cpu_info_t *cpuinfo, int no)
{
if (!cpuinfo || no < 1 || cpuinfo->available < no)
return -EINVAL;
-#ifdef HAVE_SCHED_xETAFFINITY
+#if defined(ERTS_HAVE_MISC_UTIL_AFFINITY_MASK__)
{
cpu_set_t *allowed = &cpuinfo->cpuset;
int ix, n;
@@ -335,8 +487,8 @@ int
erts_is_cpu_available(erts_cpu_info_t *cpuinfo, int id)
{
if (cpuinfo && 0 <= id) {
-#ifdef HAVE_SCHED_xETAFFINITY
- if (id <= CPU_SETSIZE)
+#if defined(ERTS_HAVE_MISC_UTIL_AFFINITY_MASK__)
+ if (id < CPU_SETSIZE)
return CPU_ISSET(id, &cpuinfo->cpuset);
#elif defined(HAVE_PROCESSOR_BIND)
int no;
@@ -388,7 +540,7 @@ erts_bind_to_cpu(erts_cpu_info_t *cpuinfo, int cpu)
*/
if (!cpuinfo)
return -EINVAL;
-#ifdef HAVE_SCHED_xETAFFINITY
+#if defined(ERTS_HAVE_MISC_UTIL_AFFINITY_MASK__)
{
cpu_set_t bind_set;
if (cpu < 0)
@@ -398,9 +550,7 @@ erts_bind_to_cpu(erts_cpu_info_t *cpuinfo, int cpu)
CPU_ZERO(&bind_set);
CPU_SET(cpu, &bind_set);
- if (sched_setaffinity(0, sizeof(cpu_set_t), &bind_set) != 0)
- return -errno;
- return 0;
+ return ERTS_MU_SET_THR_AFFINITY__(&bind_set);
}
#elif defined(HAVE_PROCESSOR_BIND)
if (cpu < 0)
@@ -418,10 +568,8 @@ erts_unbind_from_cpu(erts_cpu_info_t *cpuinfo)
{
if (!cpuinfo)
return -EINVAL;
-#if defined(HAVE_SCHED_xETAFFINITY)
- if (sched_setaffinity(0, sizeof(cpu_set_t), &cpuinfo->cpuset) != 0)
- return -errno;
- return 0;
+#if defined(ERTS_HAVE_MISC_UTIL_AFFINITY_MASK__)
+ return ERTS_MU_SET_THR_AFFINITY__(&cpuinfo->cpuset);
#elif defined(HAVE_PROCESSOR_BIND)
if (processor_bind(P_LWPID, P_MYID, PBIND_NONE, NULL) != 0)
return -errno;
@@ -434,7 +582,7 @@ erts_unbind_from_cpu(erts_cpu_info_t *cpuinfo)
int
erts_unbind_from_cpu_str(char *str)
{
-#if defined(HAVE_SCHED_xETAFFINITY)
+#if defined(ERTS_HAVE_MISC_UTIL_AFFINITY_MASK__)
char *c = str;
int cpus = 0;
int shft = 0;
@@ -486,9 +634,7 @@ erts_unbind_from_cpu_str(char *str)
if (!cpus)
return -EINVAL;
- if (sched_setaffinity(0, sizeof(cpu_set_t), &cpuset) != 0)
- return -errno;
- return 0;
+ return ERTS_MU_SET_THR_AFFINITY__(&cpuset);
#elif defined(HAVE_PROCESSOR_BIND)
if (processor_bind(P_LWPID, P_MYID, PBIND_NONE, NULL) != 0)
return -errno;
@@ -541,6 +687,56 @@ cpu_cmp(const void *vx, const void *vy)
return 0;
}
+static void
+adjust_processor_nodes(erts_cpu_info_t *cpuinfo, int no_nodes)
+{
+ erts_cpu_topology_t *prev, *this, *last;
+ if (no_nodes > 1) {
+ int processor = -1;
+ int processor_node = 0;
+ int node = -1;
+
+ qsort(cpuinfo->topology,
+ cpuinfo->topology_size,
+ sizeof(erts_cpu_topology_t),
+ pn_cmp);
+
+ prev = NULL;
+ this = &cpuinfo->topology[0];
+ last = &cpuinfo->topology[cpuinfo->configured-1];
+ while (1) {
+ if (processor == this->processor) {
+ if (node != this->node)
+ processor_node = 1;
+ }
+ else {
+ if (processor_node) {
+ make_processor_node:
+ while (prev->processor == processor) {
+ prev->processor_node = prev->node;
+ prev->node = -1;
+ if (prev == &cpuinfo->topology[0])
+ break;
+ prev--;
+ }
+ processor_node = 0;
+ }
+ processor = this->processor;
+ node = this->node;
+ }
+ if (this == last) {
+ if (processor_node) {
+ prev = this;
+ goto make_processor_node;
+ }
+ break;
+ }
+ prev = this++;
+ }
+ }
+}
+
+
#ifdef __linux__
static int
@@ -594,9 +790,6 @@ read_topology(erts_cpu_info_t *cpuinfo)
errno = 0;
- if (cpuinfo->topology)
- free(cpuinfo->topology);
-
if (cpuinfo->configured < 1)
goto error;
@@ -710,49 +903,7 @@ read_topology(erts_cpu_info_t *cpuinfo)
cpuinfo->topology = t;
}
- if (no_nodes > 1) {
- int processor = -1;
- int processor_node = 0;
- int node = -1;
-
- qsort(cpuinfo->topology,
- cpuinfo->topology_size,
- sizeof(erts_cpu_topology_t),
- pn_cmp);
-
- prev = NULL;
- this = &cpuinfo->topology[0];
- last = &cpuinfo->topology[cpuinfo->configured-1];
- while (1) {
- if (processor == this->processor) {
- if (node != this->node)
- processor_node = 1;
- }
- else {
- if (processor_node) {
- make_processor_node:
- while (prev->processor == processor) {
- prev->processor_node = prev->node;
- prev->node = -1;
- if (prev == &cpuinfo->topology[0])
- break;
- prev--;
- }
- processor_node = 0;
- }
- processor = this->processor;
- node = this->node;
- }
- if (this == last) {
- if (processor_node) {
- prev = this;
- goto make_processor_node;
- }
- break;
- }
- prev = this++;
- }
- }
+ adjust_processor_nodes(cpuinfo, no_nodes);
qsort(cpuinfo->topology,
cpuinfo->topology_size,
@@ -849,9 +1000,6 @@ read_topology(erts_cpu_info_t *cpuinfo)
errno = 0;
- if (cpuinfo->topology)
- free(cpuinfo->topology);
-
if (cpuinfo->configured < 1)
goto error;
@@ -938,6 +1086,8 @@ read_topology(erts_cpu_info_t *cpuinfo)
}
}
+ adjust_processor_nodes(cpuinfo, 1);
+
error:
if (res == 0) {
@@ -956,6 +1106,275 @@ read_topology(erts_cpu_info_t *cpuinfo)
}
+#elif defined(__WIN32__)
+
+/*
+ * We cannot use Relation* out of the box since all of them are not
+ * always part of the LOGICAL_PROCESSOR_RELATIONSHIP enum. They are
+ * however documented as follows...
+ */
+#define ERTS_MU_RELATION_PROCESSOR_CORE 0 /* RelationProcessorCore */
+#define ERTS_MU_RELATION_NUMA_NODE 1 /* RelationNumaNode */
+#define ERTS_MU_RELATION_CACHE 2 /* RelationCache */
+#define ERTS_MU_RELATION_PROCESSOR_PACKAGE 3 /* RelationProcessorPackage */
+
+static __forceinline int
+rel_cmp_val(int r)
+{
+ switch (r) {
+ case ERTS_MU_RELATION_NUMA_NODE: return 0;
+ case ERTS_MU_RELATION_PROCESSOR_PACKAGE: return 1;
+ case ERTS_MU_RELATION_PROCESSOR_CORE: return 2;
+ default: /* currently not used */ return 3;
+ }
+}
+
+static int
+slpi_cmp(const void *vx, const void *vy)
+{
+ PSYSTEM_LOGICAL_PROCESSOR_INFORMATION x, y;
+ x = (PSYSTEM_LOGICAL_PROCESSOR_INFORMATION) vx;
+ y = (PSYSTEM_LOGICAL_PROCESSOR_INFORMATION) vy;
+
+ if ((int) x->Relationship != (int) y->Relationship)
+ return (rel_cmp_val((int) x->Relationship)
+ - rel_cmp_val((int) y->Relationship));
+
+ switch ((int) x->Relationship) {
+ case ERTS_MU_RELATION_NUMA_NODE:
+ if (x->NumaNode.NodeNumber == y->NumaNode.NodeNumber)
+ break;
+ return ((int) x->NumaNode.NodeNumber) - ((int) y->NumaNode.NodeNumber);
+ case ERTS_MU_RELATION_PROCESSOR_CORE:
+ case ERTS_MU_RELATION_PROCESSOR_PACKAGE:
+ default:
+ break;
+ }
+
+ if (x->ProcessorMask == y->ProcessorMask)
+ return 0;
+ return x->ProcessorMask < y->ProcessorMask ? -1 : 1;
+}
+
+typedef BOOL (WINAPI *glpi_t)(PSYSTEM_LOGICAL_PROCESSOR_INFORMATION, PDWORD);
+
+static int
+read_topology(erts_cpu_info_t *cpuinfo)
+{
+ int res = 0;
+ glpi_t glpi;
+ int *core_id = NULL;
+ PSYSTEM_LOGICAL_PROCESSOR_INFORMATION slpip = NULL;
+ int wix, rix, max_l, l, packages, nodes, no_slpi;
+ DWORD slpi_size = 0;
+
+
+ glpi = (glpi_t) GetProcAddress(GetModuleHandle("kernel32"),
+ "GetLogicalProcessorInformation");
+ if (!glpi)
+ return -ENOTSUP;
+
+ cpuinfo->topology = NULL;
+
+ if (cpuinfo->configured < 1 || sizeof(ULONG_PTR)*8 < cpuinfo->configured)
+ goto error;
+
+ while (1) {
+ DWORD werr;
+ if (TRUE == glpi(slpip, &slpi_size))
+ break;
+ werr = GetLastError();
+ if (werr != ERROR_INSUFFICIENT_BUFFER) {
+ res = -erts_map_win_error_to_errno(werr);
+ goto error;
+ }
+ if (slpip)
+ free(slpip);
+ slpip = malloc(slpi_size);
+ if (!slpip) {
+ res = -ENOMEM;
+ goto error;
+ }
+ }
+
+ no_slpi = (int) slpi_size/sizeof(SYSTEM_LOGICAL_PROCESSOR_INFORMATION);
+
+ qsort(slpip,
+ no_slpi,
+ sizeof(SYSTEM_LOGICAL_PROCESSOR_INFORMATION),
+ slpi_cmp);
+
+ /*
+ * Now numa node relations appear before package relations which
+ * appear before core relations which appear before relations
+ * we aren't interested in...
+ */
+
+ max_l = 0;
+ packages = 0;
+ nodes = 0;
+ for (rix = 0; rix < no_slpi; rix++) {
+ PSYSTEM_LOGICAL_PROCESSOR_INFORMATION this = &slpip[rix];
+ for (l = sizeof(ULONG_PTR)*8 - 1; l > 0; l--) {
+ if (slpip[rix].ProcessorMask & (((ULONG_PTR) 1) << l)) {
+ if (max_l < l)
+ max_l = l;
+ break;
+ }
+ }
+ if ((int) slpip[rix].Relationship == ERTS_MU_RELATION_PROCESSOR_PACKAGE)
+ packages++;
+ if ((int) slpip[rix].Relationship == ERTS_MU_RELATION_NUMA_NODE)
+ nodes++;
+ }
+
+ core_id = malloc(sizeof(int)*(packages ? packages : 1));
+ if (!core_id) {
+ res = -ENOMEM;
+ goto error;
+ }
+
+ for (rix = 0; rix < packages; rix++)
+ core_id[rix] = 0;
+
+ cpuinfo->topology_size = max_l + 1;
+ cpuinfo->topology = malloc(sizeof(erts_cpu_topology_t)
+ * cpuinfo->topology_size);
+ if (!cpuinfo->topology) {
+ res = -ENOMEM;
+ goto error;
+ }
+
+ for (wix = 0; wix < cpuinfo->topology_size; wix++) {
+ cpuinfo->topology[wix].node = -1;
+ cpuinfo->topology[wix].processor = -1;
+ cpuinfo->topology[wix].processor_node = -1;
+ cpuinfo->topology[wix].core = -1;
+ cpuinfo->topology[wix].thread = -1;
+ cpuinfo->topology[wix].logical = -1;
+ }
+
+ nodes = 0;
+ packages = 0;
+
+ for (rix = 0; rix < no_slpi; rix++) {
+
+ switch ((int) slpip[rix].Relationship) {
+ case ERTS_MU_RELATION_NUMA_NODE:
+ for (l = 0; l < sizeof(ULONG_PTR)*8; l++) {
+ if (slpip[rix].ProcessorMask & (((ULONG_PTR) 1) << l)) {
+ cpuinfo->topology[l].logical = l;
+ cpuinfo->topology[l].node = slpip[rix].NumaNode.NodeNumber;
+ }
+ }
+ nodes++;
+ break;
+ case ERTS_MU_RELATION_PROCESSOR_PACKAGE:
+ for (l = 0; l < sizeof(ULONG_PTR)*8; l++) {
+ if (slpip[rix].ProcessorMask & (((ULONG_PTR) 1) << l)) {
+ cpuinfo->topology[l].logical = l;
+ cpuinfo->topology[l].processor = packages;
+ }
+ }
+ packages++;
+ break;
+ case ERTS_MU_RELATION_PROCESSOR_CORE: {
+ int thread = 0;
+ int processor = -1;
+ for (l = 0; l < sizeof(ULONG_PTR)*8; l++) {
+ /*
+ * Nodes and packages may not be supported; pretend
+ * that there are one if this is the case...
+ */
+ if (!nodes)
+ cpuinfo->topology[l].node = 0;
+ if (!packages)
+ cpuinfo->topology[l].processor = 0;
+ if (slpip[rix].ProcessorMask & (((ULONG_PTR) 1) << l)) {
+ if (processor < 0) {
+ processor = cpuinfo->topology[l].processor;
+ if (processor < 0) {
+ res = -EINVAL;
+ goto error;
+ }
+ }
+ else if (processor != cpuinfo->topology[l].processor) {
+ res = -EINVAL;
+ goto error;
+ }
+ cpuinfo->topology[l].logical = l;
+ cpuinfo->topology[l].thread = thread;
+ cpuinfo->topology[l].core = core_id[processor];
+ thread++;
+ }
+ }
+ core_id[processor]++;
+ break;
+ }
+ default:
+ /*
+ * We have reached the end of the relationships
+ * that we (currently) are interested in...
+ */
+ goto relationships_done;
+ }
+ }
+
+ relationships_done:
+
+ /*
+ * There may be unused entries; remove them...
+ */
+ for (rix = wix = 0; rix < cpuinfo->topology_size; rix++) {
+ if (cpuinfo->topology[rix].logical >= 0) {
+ if (wix != rix)
+ cpuinfo->topology[wix] = cpuinfo->topology[rix];
+ wix++;
+ }
+ }
+
+ if (cpuinfo->topology_size != wix) {
+ erts_cpu_topology_t *new = cpuinfo->topology;
+ new = realloc(cpuinfo->topology,
+ sizeof(erts_cpu_topology_t)*wix);
+ if (!new) {
+ res = -ENOMEM;
+ goto error;
+ }
+ cpuinfo->topology = new;
+ cpuinfo->topology_size = wix;
+ }
+
+ res = wix;
+
+ adjust_processor_nodes(cpuinfo, nodes);
+
+ qsort(cpuinfo->topology,
+ cpuinfo->topology_size,
+ sizeof(erts_cpu_topology_t),
+ cpu_cmp);
+
+ if (res < cpuinfo->online)
+ res = -EINVAL;
+
+ error:
+
+ if (res <= 0) {
+ cpuinfo->topology_size = 0;
+ if (cpuinfo->topology) {
+ free(cpuinfo->topology);
+ cpuinfo->topology = NULL;
+ }
+ }
+
+ if (slpip)
+ free(slpip);
+ if (core_id)
+ free(core_id);
+
+ return res;
+}
+
#else
static int
@@ -965,3 +1384,98 @@ read_topology(erts_cpu_info_t *cpuinfo)
}
#endif
+
+#if defined(__WIN32__)
+
+int
+erts_map_win_error_to_errno(DWORD win_error)
+{
+ switch (win_error) {
+ case ERROR_INVALID_FUNCTION: return EINVAL; /* 1 */
+ case ERROR_FILE_NOT_FOUND: return ENOENT; /* 2 */
+ case ERROR_PATH_NOT_FOUND: return ENOENT; /* 3 */
+ case ERROR_TOO_MANY_OPEN_FILES: return EMFILE; /* 4 */
+ case ERROR_ACCESS_DENIED: return EACCES; /* 5 */
+ case ERROR_INVALID_HANDLE: return EBADF; /* 6 */
+ case ERROR_ARENA_TRASHED: return ENOMEM; /* 7 */
+ case ERROR_NOT_ENOUGH_MEMORY: return ENOMEM; /* 8 */
+ case ERROR_INVALID_BLOCK: return ENOMEM; /* 9 */
+ case ERROR_BAD_ENVIRONMENT: return E2BIG; /* 10 */
+ case ERROR_BAD_FORMAT: return ENOEXEC; /* 11 */
+ case ERROR_INVALID_ACCESS: return EINVAL; /* 12 */
+ case ERROR_INVALID_DATA: return EINVAL; /* 13 */
+ case ERROR_OUTOFMEMORY: return ENOMEM; /* 14 */
+ case ERROR_INVALID_DRIVE: return ENOENT; /* 15 */
+ case ERROR_CURRENT_DIRECTORY: return EACCES; /* 16 */
+ case ERROR_NOT_SAME_DEVICE: return EXDEV; /* 17 */
+ case ERROR_NO_MORE_FILES: return ENOENT; /* 18 */
+ case ERROR_WRITE_PROTECT: return EACCES; /* 19 */
+ case ERROR_BAD_UNIT: return EACCES; /* 20 */
+ case ERROR_NOT_READY: return EACCES; /* 21 */
+ case ERROR_BAD_COMMAND: return EACCES; /* 22 */
+ case ERROR_CRC: return EACCES; /* 23 */
+ case ERROR_BAD_LENGTH: return EACCES; /* 24 */
+ case ERROR_SEEK: return EACCES; /* 25 */
+ case ERROR_NOT_DOS_DISK: return EACCES; /* 26 */
+ case ERROR_SECTOR_NOT_FOUND: return EACCES; /* 27 */
+ case ERROR_OUT_OF_PAPER: return EACCES; /* 28 */
+ case ERROR_WRITE_FAULT: return EACCES; /* 29 */
+ case ERROR_READ_FAULT: return EACCES; /* 30 */
+ case ERROR_GEN_FAILURE: return EACCES; /* 31 */
+ case ERROR_SHARING_VIOLATION: return EACCES; /* 32 */
+ case ERROR_LOCK_VIOLATION: return EACCES; /* 33 */
+ case ERROR_WRONG_DISK: return EACCES; /* 34 */
+ case ERROR_SHARING_BUFFER_EXCEEDED: return EACCES; /* 36 */
+ case ERROR_BAD_NETPATH: return ENOENT; /* 53 */
+ case ERROR_NETWORK_ACCESS_DENIED: return EACCES; /* 65 */
+ case ERROR_BAD_NET_NAME: return ENOENT; /* 67 */
+ case ERROR_FILE_EXISTS: return EEXIST; /* 80 */
+ case ERROR_CANNOT_MAKE: return EACCES; /* 82 */
+ case ERROR_FAIL_I24: return EACCES; /* 83 */
+ case ERROR_INVALID_PARAMETER: return EINVAL; /* 87 */
+ case ERROR_NO_PROC_SLOTS: return EAGAIN; /* 89 */
+ case ERROR_DRIVE_LOCKED: return EACCES; /* 108 */
+ case ERROR_BROKEN_PIPE: return EPIPE; /* 109 */
+ case ERROR_DISK_FULL: return ENOSPC; /* 112 */
+ case ERROR_INVALID_TARGET_HANDLE: return EBADF; /* 114 */
+ case ERROR_WAIT_NO_CHILDREN: return ECHILD; /* 128 */
+ case ERROR_CHILD_NOT_COMPLETE: return ECHILD; /* 129 */
+ case ERROR_DIRECT_ACCESS_HANDLE: return EBADF; /* 130 */
+ case ERROR_NEGATIVE_SEEK: return EINVAL; /* 131 */
+ case ERROR_SEEK_ON_DEVICE: return EACCES; /* 132 */
+ case ERROR_DIR_NOT_EMPTY: return ENOTEMPTY;/* 145 */
+ case ERROR_NOT_LOCKED: return EACCES; /* 158 */
+ case ERROR_BAD_PATHNAME: return ENOENT; /* 161 */
+ case ERROR_MAX_THRDS_REACHED: return EAGAIN; /* 164 */
+ case ERROR_LOCK_FAILED: return EACCES; /* 167 */
+ case ERROR_ALREADY_EXISTS: return EEXIST; /* 183 */
+ case ERROR_INVALID_STARTING_CODESEG: return ENOEXEC; /* 188 */
+ case ERROR_INVALID_STACKSEG: return ENOEXEC; /* 189 */
+ case ERROR_INVALID_MODULETYPE: return ENOEXEC; /* 190 */
+ case ERROR_INVALID_EXE_SIGNATURE: return ENOEXEC; /* 191 */
+ case ERROR_EXE_MARKED_INVALID: return ENOEXEC; /* 192 */
+ case ERROR_BAD_EXE_FORMAT: return ENOEXEC; /* 193 */
+ case ERROR_ITERATED_DATA_EXCEEDS_64k: return ENOEXEC; /* 194 */
+ case ERROR_INVALID_MINALLOCSIZE: return ENOEXEC; /* 195 */
+ case ERROR_DYNLINK_FROM_INVALID_RING: return ENOEXEC; /* 196 */
+ case ERROR_IOPL_NOT_ENABLED: return ENOEXEC; /* 197 */
+ case ERROR_INVALID_SEGDPL: return ENOEXEC; /* 198 */
+ case ERROR_AUTODATASEG_EXCEEDS_64k: return ENOEXEC; /* 199 */
+ case ERROR_RING2SEG_MUST_BE_MOVABLE: return ENOEXEC; /* 200 */
+ case ERROR_RELOC_CHAIN_XEEDS_SEGLIM: return ENOEXEC; /* 201 */
+ case ERROR_INFLOOP_IN_RELOC_CHAIN: return ENOEXEC; /* 202 */
+ case ERROR_FILENAME_EXCED_RANGE: return ENOENT; /* 206 */
+ case ERROR_NESTING_NOT_ALLOWED: return EAGAIN; /* 215 */
+ case ERROR_NOT_ENOUGH_QUOTA: return ENOMEM; /* 1816 */
+ default: return EINVAL;
+ }
+}
+
+int
+erts_get_last_win_errno(void)
+{
+ return erts_map_win_error_to_errno(GetLastError());
+}
+
+
+#endif
diff --git a/erts/lib_src/common/ethr_aux.c b/erts/lib_src/common/ethr_aux.c
new file mode 100644
index 0000000000..4db4cffd3a
--- /dev/null
+++ b/erts/lib_src/common/ethr_aux.c
@@ -0,0 +1,762 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2010. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+/*
+ * Description: A Thread library for use in the ERTS and other OTP
+ * applications.
+ * Author: Rickard Green
+ */
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include <stdio.h>
+
+#define ETHR_INLINE_FUNC_NAME_(X) X ## __
+#define ETHR_AUX_IMPL__
+
+#include "ethread.h"
+#include "ethr_internal.h"
+#include <string.h>
+#include <limits.h>
+
+#ifndef __WIN32__
+#include <unistd.h>
+#endif
+
+#define ERTS_TS_EV_ALLOC_DEFAULT_POOL_SIZE 100
+#define ERTS_TS_EV_ALLOC_POOL_SIZE 25
+
+erts_cpu_info_t *ethr_cpu_info__;
+
+int ethr_not_completely_inited__ = 1;
+int ethr_not_inited__ = 1;
+
+ethr_memory_allocators ethr_mem__ = ETHR_MEM_ALLOCS_DEF_INITER__;
+
+#ifndef ETHR_HAVE_OPTIMIZED_ATOMIC_OPS
+ethr_atomic_protection_t ethr_atomic_protection__[1 << ETHR_ATOMIC_ADDR_BITS];
+#endif
+
+void *(*ethr_thr_prepare_func__)(void) = NULL;
+void (*ethr_thr_parent_func__)(void *) = NULL;
+void (*ethr_thr_child_func__)(void *) = NULL;
+
+typedef struct ethr_xhndl_list_ ethr_xhndl_list;
+struct ethr_xhndl_list_ {
+ ethr_xhndl_list *next;
+ void (*funcp)(void);
+};
+
+size_t ethr_pagesize__;
+size_t ethr_min_stack_size__; /* kilo words */
+size_t ethr_max_stack_size__; /* kilo words */
+
+ethr_rwmutex xhndl_rwmtx;
+ethr_xhndl_list *xhndl_list;
+
+static int main_threads;
+
+static int init_ts_event_alloc(void);
+
+int
+ethr_init_common__(ethr_init_data *id)
+{
+ int res;
+ if (id) {
+ ethr_thr_prepare_func__ = id->thread_create_prepare_func;
+ ethr_thr_parent_func__ = id->thread_create_parent_func;
+ ethr_thr_child_func__ = id->thread_create_child_func;
+ }
+
+ ethr_cpu_info__ = erts_cpu_info_create();
+ if (!ethr_cpu_info__)
+ return ENOMEM;
+
+#ifdef _SC_PAGESIZE
+ ethr_pagesize__ = (size_t) sysconf(_SC_PAGESIZE);
+#elif defined(HAVE_GETPAGESIZE)
+ ethr_pagesize__ = (size_t) getpagesize();
+#else
+ ethr_pagesize__ = (size_t) 4*1024; /* Guess 4 KB */
+#endif
+
+ /* User needs at least 4 KB */
+ ethr_min_stack_size__ = 4*1024;
+#if SIZEOF_VOID_P == 8
+ /* Double that on 64-bit archs */
+ ethr_min_stack_size__ *= 2;
+#endif
+ /* On some systems as much as about 4 KB is used by the system */
+ ethr_min_stack_size__ += 4*1024;
+ /* There should be room for signal handlers */
+#ifdef SIGSTKSZ
+ ethr_min_stack_size__ += SIGSTKSZ;
+#else
+ ethr_min_stack_size__ += ethr_pagesize__;
+#endif
+ /* The system may think that we need more stack */
+#if defined(PTHREAD_STACK_MIN)
+ if (ethr_min_stack_size__ < PTHREAD_STACK_MIN)
+ ethr_min_stack_size__ = PTHREAD_STACK_MIN;
+#elif defined(_SC_THREAD_STACK_MIN)
+ {
+ size_t thr_min_stk_sz = (size_t) sysconf(_SC_THREAD_STACK_MIN);
+ if (ethr_min_stack_size__ < thr_min_stk_sz)
+ ethr_min_stack_size__ = thr_min_stk_sz;
+ }
+#endif
+ /* The guard is at least on some platforms included in the stack size
+ passed when creating threads */
+#ifdef ETHR_STACK_GUARD_SIZE
+ ethr_min_stack_size__ += ETHR_STACK_GUARD_SIZE;
+#endif
+ ethr_min_stack_size__ = ETHR_PAGE_ALIGN(ethr_min_stack_size__);
+
+ ethr_min_stack_size__ = ETHR_B2KW(ethr_min_stack_size__);
+
+ ethr_max_stack_size__ = 32*1024*1024;
+#if SIZEOF_VOID_P == 8
+ ethr_max_stack_size__ *= 2;
+#endif
+ ethr_max_stack_size__ = ETHR_B2KW(ethr_max_stack_size__);
+
+#ifndef ETHR_HAVE_OPTIMIZED_ATOMIC_OPS
+ {
+ int i;
+ for (i = 0; i < (1 << ETHR_ATOMIC_ADDR_BITS); i++) {
+ res = ethr_spinlock_init(&ethr_atomic_protection__[i].u.lck);
+ if (res != 0)
+ return res;
+ }
+ }
+#endif
+
+ res = ethr_mutex_lib_init(erts_get_cpu_configured(ethr_cpu_info__));
+ if (res != 0)
+ return res;
+
+ xhndl_list = NULL;
+
+ return 0;
+}
+
+int
+ethr_late_init_common__(ethr_late_init_data *lid)
+{
+ ethr_ts_event *tsep = NULL;
+ int reader_groups;
+ int res;
+ int i;
+ ethr_memory_allocator *m[] = {&ethr_mem__.std,
+ &ethr_mem__.sl,
+ &ethr_mem__.ll};
+ if (lid)
+ ethr_mem__ = lid->mem;
+ if (!ethr_mem__.std.alloc
+ || !ethr_mem__.std.realloc
+ || !ethr_mem__.std.free) {
+ ethr_mem__.std.alloc = malloc;
+ ethr_mem__.std.realloc = realloc;
+ ethr_mem__.std.free = free;
+ }
+ for (i = 0; i < sizeof(m)/sizeof(m[0]); i++) {
+ if (!m[i]->alloc || !m[i]->realloc || !m[i]->free) {
+ m[i]->alloc = ethr_mem__.std.alloc;
+ m[i]->realloc = ethr_mem__.std.realloc;
+ m[i]->free = ethr_mem__.std.free;
+ }
+
+ }
+ res = init_ts_event_alloc();
+ if (res != 0)
+ return res;
+ res = ethr_make_ts_event__(&tsep);
+ if (res == 0)
+ tsep->iflgs |= ETHR_TS_EV_ETHREAD;
+ if (!lid) {
+ main_threads = 0;
+ reader_groups = 0;
+ }
+ else {
+ if (lid->main_threads < 0 || USHRT_MAX < lid->main_threads)
+ return res;
+ main_threads = lid->main_threads;
+ reader_groups = lid->reader_groups;
+ }
+ res = ethr_mutex_lib_late_init(reader_groups, main_threads);
+ if (res != 0)
+ return res;
+ ethr_not_completely_inited__ = 0; /* Need it for
+ rwmutex_init */
+ res = ethr_rwmutex_init(&xhndl_rwmtx);
+ ethr_not_completely_inited__ = 1;
+ if (res != 0)
+ return res;
+ return 0;
+}
+
+int
+ethr_install_exit_handler(void (*funcp)(void))
+{
+ ethr_xhndl_list *xhp;
+
+#if ETHR_XCHK
+ if (ethr_not_completely_inited__) {
+ ETHR_ASSERT(0);
+ return EACCES;
+ }
+#endif
+
+ if (!funcp)
+ return EINVAL;
+
+ xhp = (ethr_xhndl_list *) ethr_mem__.std.alloc(sizeof(ethr_xhndl_list));
+ if (!xhp)
+ return ENOMEM;
+
+ ethr_rwmutex_rwlock(&xhndl_rwmtx);
+
+ xhp->funcp = funcp;
+ xhp->next = xhndl_list;
+ xhndl_list = xhp;
+
+ ethr_rwmutex_rwunlock(&xhndl_rwmtx);
+
+ return 0;
+}
+
+void
+ethr_run_exit_handlers__(void)
+{
+ ethr_xhndl_list *xhp;
+
+ ethr_rwmutex_rlock(&xhndl_rwmtx);
+
+ xhp = xhndl_list;
+
+ ethr_rwmutex_runlock(&xhndl_rwmtx);
+
+ for (; xhp; xhp = xhp->next)
+ (*xhp->funcp)();
+}
+
+/*
+ * Thread specific event alloc, etc.
+ *
+ * Note that we don't know when it is safe to destroy an event, but
+ * we know when it is safe to reuse it. ts_event_free() therefore
+ * never destroys an event (but makes freed events available for
+ * reuse).
+ *
+ * We could easily keep track of the usage of events, and by this
+ * make it possible to destroy events. We would however suffer a
+ * performance penalty for this and save very little memory.
+ */
+
+typedef union {
+ ethr_ts_event ts_ev;
+ char align[ETHR_CACHE_LINE_ALIGN_SIZE(sizeof(ethr_ts_event))];
+} ethr_aligned_ts_event;
+
+static ethr_spinlock_t ts_ev_alloc_lock;
+static ethr_ts_event *free_ts_ev;
+
+#if SIZEOF_VOID_P == SIZEOF_INT
+typedef unsigned int EthrPtrSzUInt;
+#elif SIZEOF_VOID_P == SIZEOF_LONG
+typedef unsigned long EthrPtrSzUInt;
+#else
+#error No pointer sized integer type
+#endif
+
+static ethr_ts_event *ts_event_pool(int size, ethr_ts_event **endpp)
+{
+ int i;
+ ethr_aligned_ts_event *atsev;
+ atsev = ethr_mem__.std.alloc(sizeof(ethr_aligned_ts_event) * size
+ + ETHR_CACHE_LINE_SIZE);
+ if (!atsev)
+ return NULL;
+ if ((((EthrPtrSzUInt) atsev) & ETHR_CACHE_LINE_MASK) == 0)
+ atsev = ((ethr_aligned_ts_event *)
+ ((((EthrPtrSzUInt) atsev) & ~ETHR_CACHE_LINE_MASK)
+ + ETHR_CACHE_LINE_SIZE));
+ for (i = 1; i < size; i++) {
+ atsev[i-1].ts_ev.next = &atsev[i].ts_ev;
+ ethr_atomic_init(&atsev[i-1].ts_ev.uaflgs, 0);
+ atsev[i-1].ts_ev.iflgs = 0;
+ }
+ ethr_atomic_init(&atsev[size-1].ts_ev.uaflgs, 0);
+ atsev[size-1].ts_ev.iflgs = 0;
+ atsev[size-1].ts_ev.next = NULL;
+ if (endpp)
+ *endpp = &atsev[size-1].ts_ev;
+ return &atsev[0].ts_ev;
+}
+
+static int init_ts_event_alloc(void)
+{
+ free_ts_ev = ts_event_pool(ERTS_TS_EV_ALLOC_DEFAULT_POOL_SIZE,
+ NULL);
+ if (!free_ts_ev)
+ return ENOMEM;
+ return ethr_spinlock_init(&ts_ev_alloc_lock);
+}
+
+static ethr_ts_event *ts_event_alloc(void)
+{
+ ethr_ts_event *ts_ev;
+ ethr_spin_lock(&ts_ev_alloc_lock);
+ if (free_ts_ev) {
+ ts_ev = free_ts_ev;
+ free_ts_ev = ts_ev->next;
+ ethr_spin_unlock(&ts_ev_alloc_lock);
+ }
+ else {
+ ethr_ts_event *ts_ev_pool_end;
+ ethr_spin_unlock(&ts_ev_alloc_lock);
+
+ ts_ev = ts_event_pool(ERTS_TS_EV_ALLOC_POOL_SIZE, &ts_ev_pool_end);
+ if (!ts_ev)
+ return NULL;
+
+ ethr_spin_lock(&ts_ev_alloc_lock);
+ ts_ev_pool_end->next = free_ts_ev;
+ free_ts_ev = ts_ev->next;
+ ethr_spin_unlock(&ts_ev_alloc_lock);
+ }
+ return ts_ev;
+}
+
+static void ts_event_free(ethr_ts_event *ts_ev)
+{
+ ETHR_ASSERT(!ts_ev->udata);
+ ethr_spin_lock(&ts_ev_alloc_lock);
+ ts_ev->next = free_ts_ev;
+ free_ts_ev = ts_ev;
+ ethr_spin_unlock(&ts_ev_alloc_lock);
+}
+
+int ethr_make_ts_event__(ethr_ts_event **tsepp)
+{
+ int res;
+ ethr_ts_event *tsep = *tsepp;
+
+ if (!tsep) {
+ tsep = ts_event_alloc();
+ if (!tsep)
+ return ENOMEM;
+ }
+
+ if ((tsep->iflgs & ETHR_TS_EV_INITED) == 0) {
+ res = ethr_event_init(&tsep->event);
+ if (res != 0) {
+ ts_event_free(tsep);
+ return res;
+ }
+ }
+
+ tsep->iflgs = ETHR_TS_EV_INITED;
+ tsep->udata = NULL;
+ tsep->rgix = 0;
+ tsep->mtix = 0;
+
+ res = ethr_set_tse__(tsep);
+ if (res != 0 && tsepp && *tsepp) {
+ ts_event_free(tsep);
+ return res;
+ }
+
+ if (tsepp)
+ *tsepp = tsep;
+
+ return 0;
+}
+
+int ethr_get_tmp_ts_event__(ethr_ts_event **tsepp)
+{
+ int res;
+ ethr_ts_event *tsep = *tsepp;
+
+ if (!tsep) {
+ tsep = ts_event_alloc();
+ if (!tsep)
+ return ENOMEM;
+ }
+
+ if ((tsep->iflgs & ETHR_TS_EV_INITED) == 0) {
+ res = ethr_event_init(&tsep->event);
+ if (res != 0) {
+ ts_event_free(tsep);
+ return res;
+ }
+ }
+
+ tsep->iflgs = ETHR_TS_EV_INITED|ETHR_TS_EV_TMP;
+ tsep->udata = NULL;
+
+ if (tsepp)
+ *tsepp = tsep;
+
+ return 0;
+}
+
+int ethr_free_ts_event__(ethr_ts_event *tsep)
+{
+ ts_event_free(tsep);
+ return 0;
+}
+
+void ethr_ts_event_destructor__(void *vtsep)
+{
+ if (vtsep) {
+ ethr_ts_event *tsep = (ethr_ts_event *) vtsep;
+ ts_event_free(tsep);
+ ethr_set_tse__(NULL);
+ }
+}
+
+int ethr_set_main_thr_status(int on, int no)
+{
+ ethr_ts_event *tsep = ethr_get_tse__();
+ if (!tsep)
+ return EINVAL;
+ if (on) {
+ if (no < 1 || main_threads < no)
+ return EINVAL;
+ tsep->mtix = (unsigned short) no;
+ tsep->iflgs |= ETHR_TS_EV_MAIN_THR;
+ }
+ else {
+ tsep->iflgs &= ~ETHR_TS_EV_MAIN_THR;
+ tsep->mtix = (unsigned short) 0;
+ }
+ return 0;
+}
+
+int ethr_get_main_thr_status(int *on)
+{
+ ethr_ts_event *tsep = ethr_get_tse__();
+ if (!tsep)
+ *on = 0;
+ else {
+ if (tsep->iflgs & ETHR_TS_EV_MAIN_THR)
+ *on = 1;
+ else
+ *on = 0;
+ }
+ return 0;
+}
+
+
+/* Atomics */
+
+void
+ethr_atomic_init(ethr_atomic_t *var, long i)
+{
+ ETHR_ASSERT(var);
+ ethr_atomic_init__(var, i);
+}
+
+void
+ethr_atomic_set(ethr_atomic_t *var, long i)
+{
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(var);
+ ethr_atomic_set__(var, i);
+}
+
+long
+ethr_atomic_read(ethr_atomic_t *var)
+{
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(var);
+ return ethr_atomic_read__(var);
+}
+
+
+long
+ethr_atomic_add_read(ethr_atomic_t *var, long incr)
+{
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(var);
+ return ethr_atomic_add_read__(var, incr);
+}
+
+long
+ethr_atomic_inc_read(ethr_atomic_t *var)
+{
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(var);
+ return ethr_atomic_inc_read__(var);
+}
+
+long
+ethr_atomic_dec_read(ethr_atomic_t *var)
+{
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(var);
+ return ethr_atomic_dec_read__(var);
+}
+
+void
+ethr_atomic_add(ethr_atomic_t *var, long incr)
+{
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(var);
+ ethr_atomic_add__(var, incr);
+}
+
+void
+ethr_atomic_inc(ethr_atomic_t *var)
+{
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(var);
+ ethr_atomic_inc__(var);
+}
+
+void
+ethr_atomic_dec(ethr_atomic_t *var)
+{
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(var);
+ ethr_atomic_dec__(var);
+}
+
+long
+ethr_atomic_read_band(ethr_atomic_t *var, long mask)
+{
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(var);
+ return ethr_atomic_read_band__(var, mask);
+}
+
+long
+ethr_atomic_read_bor(ethr_atomic_t *var, long mask)
+{
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(var);
+ return ethr_atomic_read_bor__(var, mask);
+}
+
+long
+ethr_atomic_xchg(ethr_atomic_t *var, long new)
+{
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(var);
+ return ethr_atomic_xchg__(var, new);
+}
+
+long
+ethr_atomic_cmpxchg(ethr_atomic_t *var, long new, long expected)
+{
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(var);
+ return ethr_atomic_cmpxchg__(var, new, expected);
+}
+
+long
+ethr_atomic_read_acqb(ethr_atomic_t *var)
+{
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(var);
+ return ethr_atomic_read_acqb__(var);
+}
+
+long
+ethr_atomic_inc_read_acqb(ethr_atomic_t *var)
+{
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(var);
+ return ethr_atomic_inc_read_acqb__(var);
+}
+
+void
+ethr_atomic_set_relb(ethr_atomic_t *var, long i)
+{
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(var);
+ ethr_atomic_set_relb__(var, i);
+}
+
+void
+ethr_atomic_dec_relb(ethr_atomic_t *var)
+{
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(var);
+ ethr_atomic_dec_relb__(var);
+}
+
+long
+ethr_atomic_dec_read_relb(ethr_atomic_t *var)
+{
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(var);
+ return ethr_atomic_dec_read_relb__(var);
+}
+
+long
+ethr_atomic_cmpxchg_acqb(ethr_atomic_t *var, long new, long exp)
+{
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(var);
+ return ethr_atomic_cmpxchg_acqb__(var, new, exp);
+}
+
+long
+ethr_atomic_cmpxchg_relb(ethr_atomic_t *var, long new, long exp)
+{
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(var);
+ return ethr_atomic_cmpxchg_relb__(var, new, exp);
+}
+
+
+/* Spinlocks and rwspinlocks */
+
+int
+ethr_spinlock_init(ethr_spinlock_t *lock)
+{
+#if ETHR_XCHK
+ if (!lock) {
+ ETHR_ASSERT(0);
+ return EINVAL;
+ }
+#endif
+ return ethr_spinlock_init__(lock);
+}
+
+int
+ethr_spinlock_destroy(ethr_spinlock_t *lock)
+{
+#if ETHR_XCHK
+ if (ethr_not_inited__) {
+ ETHR_ASSERT(0);
+ return EACCES;
+ }
+ if (!lock) {
+ ETHR_ASSERT(0);
+ return EINVAL;
+ }
+#endif
+ return ethr_spinlock_destroy__(lock);
+}
+
+void
+ethr_spin_unlock(ethr_spinlock_t *lock)
+{
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(lock);
+ ethr_spin_unlock__(lock);
+}
+
+void
+ethr_spin_lock(ethr_spinlock_t *lock)
+{
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(lock);
+ ethr_spin_lock__(lock);
+}
+
+int
+ethr_rwlock_init(ethr_rwlock_t *lock)
+{
+#if ETHR_XCHK
+ if (!lock) {
+ ETHR_ASSERT(0);
+ return EINVAL;
+ }
+#endif
+ return ethr_rwlock_init__(lock);
+}
+
+int
+ethr_rwlock_destroy(ethr_rwlock_t *lock)
+{
+#if ETHR_XCHK
+ if (ethr_not_inited__) {
+ ETHR_ASSERT(0);
+ return EACCES;
+ }
+ if (!lock) {
+ ETHR_ASSERT(0);
+ return EINVAL;
+ }
+#endif
+ return ethr_rwlock_destroy__(lock);
+}
+
+void
+ethr_read_unlock(ethr_rwlock_t *lock)
+{
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(lock);
+ ethr_read_unlock__(lock);
+}
+
+void
+ethr_read_lock(ethr_rwlock_t *lock)
+{
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(lock);
+ ethr_read_lock__(lock);
+}
+
+void
+ethr_write_unlock(ethr_rwlock_t *lock)
+{
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(lock);
+ ethr_write_unlock__(lock);
+}
+
+void
+ethr_write_lock(ethr_rwlock_t *lock)
+{
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(lock);
+ ethr_write_lock__(lock);
+}
+
+ETHR_IMPL_NORETURN__ ethr_fatal_error__(const char *file,
+ int line,
+ const char *func,
+ int err)
+{
+ char *errstr;
+ if (err == ENOTSUP)
+ errstr = "Operation not supported";
+ else {
+ errstr = strerror(err);
+ if (!errstr)
+ errstr = "Unknown error";
+ }
+ fprintf(stderr, "%s:%d: Fatal error in %s(): %s (%d)\n",
+ file, line, func, errstr, err);
+ ethr_abort__();
+}
+
+int ethr_assert_failed(const char *file, int line, const char *func, char *a)
+{
+ fprintf(stderr, "%s:%d: %s(): Assertion failed: %s\n", file, line, func, a);
+ ethr_abort__();
+ return 0;
+}
diff --git a/erts/lib_src/common/ethr_cbf.c b/erts/lib_src/common/ethr_cbf.c
new file mode 100644
index 0000000000..04feceec89
--- /dev/null
+++ b/erts/lib_src/common/ethr_cbf.c
@@ -0,0 +1,36 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2010. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+
+/*
+ * We keep this function alone in a separate file so the
+ * compiler wont optimize it away.
+ */
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include "ethread.h"
+
+void
+ethr_compiler_barrier_fallback(void)
+{
+
+}
diff --git a/erts/lib_src/common/ethr_mutex.c b/erts/lib_src/common/ethr_mutex.c
new file mode 100644
index 0000000000..aac0d44a32
--- /dev/null
+++ b/erts/lib_src/common/ethr_mutex.c
@@ -0,0 +1,2758 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2010. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+/*
+ * Description: Mutex, rwmutex and condition variable implementation
+ * Author: Rickard Green
+ */
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#define ETHR_INLINE_FUNC_NAME_(X) X ## __
+#define ETHR_MUTEX_IMPL__
+
+#include <limits.h>
+#include "ethread.h"
+#include "ethr_internal.h"
+
+#define ETHR_SPIN_WITH_WAITERS 1
+
+#define ETHR_MTX_MAX_FLGS_SPIN 1000
+
+#ifdef ETHR_USE_OWN_RWMTX_IMPL__
+static int default_rwmtx_main_spincount;
+static int default_rwmtx_aux_spincount;
+#endif
+#ifdef ETHR_USE_OWN_MTX_IMPL__
+static int default_mtx_main_spincount;
+static int default_mtx_aux_spincount;
+static int default_cnd_main_spincount;
+static int default_cnd_aux_spincount;
+#endif
+
+static int no_spin;
+
+#ifndef ETHR_USE_OWN_RWMTX_IMPL__
+static pthread_rwlockattr_t write_pref_attr_data;
+static pthread_rwlockattr_t *write_pref_attr;
+#endif
+
+#if defined(ETHR_MTX_Q_LOCK_SPINLOCK__)
+# define ETHR_MTX_QLOCK_INIT ethr_spinlock_init
+# define ETHR_MTX_QLOCK_DESTROY ethr_spinlock_destroy
+# define ETHR_MTX_Q_LOCK ethr_spin_lock
+# define ETHR_MTX_Q_UNLOCK ethr_spin_unlock
+#elif defined(ETHR_MTX_Q_LOCK_PTHREAD_MUTEX__)
+# define ETHR_MTX_QLOCK_INIT(QL) pthread_mutex_init((QL), NULL)
+# define ETHR_MTX_QLOCK_DESTROY pthread_mutex_destroy
+# define ETHR_MTX_Q_LOCK(L) \
+do { \
+ int res__ = pthread_mutex_lock(L); \
+ if (res__ != 0) \
+ ETHR_FATAL_ERROR__(res__); \
+} while (0)
+# define ETHR_MTX_Q_UNLOCK(L) \
+do { \
+ int res__ = pthread_mutex_unlock(L); \
+ if (res__ != 0) \
+ ETHR_FATAL_ERROR__(res__); \
+} while (0)
+#elif defined(ETHR_MTX_Q_LOCK_CRITICAL_SECTION__)
+# define ETHR_MTX_QLOCK_INIT(QL) (InitializeCriticalSection((QL)), 0)
+# define ETHR_MTX_QLOCK_DESTROY(QL) (DeleteCriticalSection((QL)), 0)
+# define ETHR_MTX_Q_LOCK(QL) EnterCriticalSection((QL))
+# define ETHR_MTX_Q_UNLOCK(QL) LeaveCriticalSection((QL))
+#endif
+
+int
+ethr_mutex_lib_init(int cpu_conf)
+{
+ int res = 0;
+
+ no_spin = cpu_conf == 1;
+
+#ifdef ETHR_USE_OWN_MTX_IMPL__
+ default_mtx_main_spincount = ETHR_MTX_DEFAULT_MAIN_SPINCOUNT;
+ default_mtx_aux_spincount = ETHR_MTX_DEFAULT_AUX_SPINCOUNT;
+ default_cnd_main_spincount = ETHR_CND_DEFAULT_MAIN_SPINCOUNT;
+ default_cnd_aux_spincount = ETHR_CND_DEFAULT_AUX_SPINCOUNT;
+#endif
+
+#ifdef ETHR_USE_OWN_RWMTX_IMPL__
+
+ default_rwmtx_main_spincount = ETHR_RWMTX_DEFAULT_MAIN_SPINCOUNT;
+ default_rwmtx_aux_spincount = ETHR_RWMTX_DEFAULT_AUX_SPINCOUNT;
+
+#else
+
+#if defined(ETHR_HAVE_PTHREAD_RWLOCKATTR_SETKIND_NP) \
+ && defined(ETHR_HAVE_PTHREAD_RWLOCK_PREFER_WRITER_NONRECURSIVE_NP)
+ res = pthread_rwlockattr_init(&write_pref_attr_data);
+ if (res != 0)
+ return res;
+ res = pthread_rwlockattr_setkind_np(
+ &write_pref_attr_data,
+ PTHREAD_RWLOCK_PREFER_WRITER_NONRECURSIVE_NP);
+ write_pref_attr = &write_pref_attr_data;
+#else
+ write_pref_attr = NULL;
+#endif
+
+#endif
+
+ return res;
+}
+
+#ifdef ETHR_USE_OWN_RWMTX_IMPL__
+
+#ifdef ETHR_ATOMIC_HAVE_INC_DEC_INSTRUCTIONS
+#if 0 /*
+ * When inc and dec are real atomic instructions as on x86, the
+ * ETHR_RLOCK_WITH_INC_DEC implementations performs better with
+ * lots of read locks compared to the cmpxchg based implementation.
+ * It, however, performs worse with lots of mixed reads and writes.
+ * It could be used for rwlocks that are known to be read locked
+ * much, but the readers array based implementation outperforms it
+ * by far. Therefore, it has been disabled, and will probably be
+ * removed some time in the future.
+ */
+# define ETHR_RLOCK_WITH_INC_DEC
+#endif
+#endif
+
+static int reader_groups_array_size = 0;
+static int main_threads_array_size = 0;
+
+#endif
+
+int
+ethr_mutex_lib_late_init(int no_reader_groups, int no_main_threads)
+{
+#ifdef ETHR_USE_OWN_RWMTX_IMPL__
+ reader_groups_array_size = (no_reader_groups <= 1
+ ? 1
+ : no_reader_groups + 1);
+ main_threads_array_size = (no_main_threads <= 1
+ ? 1
+ : no_main_threads + 1);
+#endif
+ return 0;
+}
+
+int
+ethr_rwmutex_set_reader_group(int ix)
+{
+#ifdef ETHR_USE_OWN_RWMTX_IMPL__
+ ethr_ts_event *tse;
+
+ if (ix < 0 || reader_groups_array_size <= ix)
+ return EINVAL;
+
+ tse = ethr_get_ts_event();
+
+ if ((tse->iflgs & ETHR_TS_EV_ETHREAD) == 0) {
+ ethr_leave_ts_event(tse);
+ return EINVAL;
+ }
+
+ tse->rgix = ix;
+
+ ethr_leave_ts_event(tse);
+#endif
+ return 0;
+}
+
+#if defined(ETHR_MTX_HARD_DEBUG_Q) || defined(ETHR_MTX_HARD_DEBUG_WSQ)
+static void hard_debug_chk_q__(struct ethr_mutex_base_ *, int);
+#define ETHR_RWMTX_HARD_DEBUG_CHK_Q(RWMTX) hard_debug_chk_q__(&(RWMTX)->mtxb,1)
+#define ETHR_MTX_HARD_DEBUG_CHK_Q(MTX) hard_debug_chk_q__(&(MTX)->mtxb, 0)
+#else
+#define ETHR_RWMTX_HARD_DEBUG_CHK_Q(RWMTX)
+#define ETHR_MTX_HARD_DEBUG_CHK_Q(MTX)
+#endif
+
+#ifdef ETHR_USE_OWN_RWMTX_IMPL__
+static void
+rwmutex_unlock_wake(ethr_rwmutex *rwmtx,
+ int have_w,
+ long initial);
+static int
+rwmutex_try_complete_runlock(ethr_rwmutex *rwmtx,
+ long initial,
+ ethr_ts_event *tse,
+ int start_next_ix,
+ int check_before_try,
+ int try_write_lock);
+#endif
+
+#if defined(ETHR_USE_OWN_RWMTX_IMPL__) || defined(ETHR_USE_OWN_MTX_IMPL__)
+
+/* -- Utilities operating both on ordinary mutexes and read write mutexes -- */
+
+static ETHR_INLINE void
+rwmutex_freqread_wtng_rdrs_inc(ethr_rwmutex *rwmtx, ethr_ts_event *tse)
+{
+ int ix = (rwmtx->type == ETHR_RWMUTEX_TYPE_FREQUENT_READ
+ ? tse->rgix
+ : tse->mtix);
+ rwmtx->tdata.ra[ix].data.waiting_readers++;
+}
+
+static ETHR_INLINE void
+rwmutex_freqread_rdrs_add(ethr_rwmutex *rwmtx,
+ ethr_rwmutex_type type,
+ int ix,
+ int inc)
+{
+ if (type == ETHR_RWMUTEX_TYPE_FREQUENT_READ || ix == 0)
+ ethr_atomic_add(&rwmtx->tdata.ra[ix].data.readers, inc);
+ else {
+ ETHR_ASSERT(type == ETHR_RWMUTEX_TYPE_EXTREMELY_FREQUENT_READ);
+ ETHR_ASSERT(ethr_atomic_read(&rwmtx->tdata.ra[ix].data.readers) == 0);
+ ETHR_ASSERT(inc == 1);
+ ethr_atomic_set(&rwmtx->tdata.ra[ix].data.readers, (long) 1);
+ }
+}
+
+static ETHR_INLINE void
+rwmutex_freqread_rdrs_inc(ethr_rwmutex *rwmtx, ethr_ts_event *tse)
+{
+ int ix;
+ if (rwmtx->type == ETHR_RWMUTEX_TYPE_FREQUENT_READ) {
+ ix = tse->rgix;
+ atomic_inc:
+ ethr_atomic_inc(&rwmtx->tdata.ra[ix].data.readers);
+ }
+ else {
+ ix = tse->mtix;
+ if (ix == 0)
+ goto atomic_inc;
+ ETHR_ASSERT(rwmtx->type == ETHR_RWMUTEX_TYPE_EXTREMELY_FREQUENT_READ);
+ ETHR_ASSERT(ethr_atomic_read(&rwmtx->tdata.ra[ix].data.readers) == 0);
+ ethr_atomic_set(&rwmtx->tdata.ra[ix].data.readers, (long) 1);
+ }
+}
+
+static ETHR_INLINE void
+rwmutex_freqread_rdrs_dec(ethr_rwmutex *rwmtx, ethr_ts_event *tse)
+{
+ int ix;
+ if (rwmtx->type == ETHR_RWMUTEX_TYPE_FREQUENT_READ) {
+ ix = tse->rgix;
+ atomic_dec:
+ ethr_atomic_dec(&rwmtx->tdata.ra[ix].data.readers);
+ }
+ else {
+ ix = tse->mtix;
+ if (ix == 0)
+ goto atomic_dec;
+ ETHR_ASSERT(rwmtx->type == ETHR_RWMUTEX_TYPE_EXTREMELY_FREQUENT_READ);
+ ETHR_ASSERT(ethr_atomic_read(&rwmtx->tdata.ra[ix].data.readers) == 1);
+ ethr_atomic_set(&rwmtx->tdata.ra[ix].data.readers, (long) 0);
+ }
+}
+
+static ETHR_INLINE long
+rwmutex_freqread_rdrs_dec_read(ethr_rwmutex *rwmtx, ethr_ts_event *tse)
+{
+ int ix;
+ if (rwmtx->type == ETHR_RWMUTEX_TYPE_FREQUENT_READ) {
+ ix = tse->rgix;
+ atomic_dec_read:
+ return ethr_atomic_dec_read(&rwmtx->tdata.ra[ix].data.readers);
+ }
+ else {
+ ix = tse->mtix;
+ if (ix == 0)
+ goto atomic_dec_read;
+ ETHR_ASSERT(rwmtx->type == ETHR_RWMUTEX_TYPE_EXTREMELY_FREQUENT_READ);
+ ETHR_ASSERT(ethr_atomic_read(&rwmtx->tdata.ra[ix].data.readers) == 1);
+ ethr_atomic_set(&rwmtx->tdata.ra[ix].data.readers, (long) 0);
+ return (long) 0;
+ }
+}
+
+static ETHR_INLINE long
+rwmutex_freqread_rdrs_dec_read_relb(ethr_rwmutex *rwmtx, ethr_ts_event *tse)
+{
+ int ix;
+ if (rwmtx->type == ETHR_RWMUTEX_TYPE_FREQUENT_READ) {
+ ix = tse->rgix;
+ atomic_dec_read:
+ return ethr_atomic_dec_read_relb(&rwmtx->tdata.ra[ix].data.readers);
+ }
+ else {
+ ix = tse->mtix;
+ if (ix == 0)
+ goto atomic_dec_read;
+ ETHR_ASSERT(rwmtx->type == ETHR_RWMUTEX_TYPE_EXTREMELY_FREQUENT_READ);
+ ETHR_ASSERT(ethr_atomic_read(&rwmtx->tdata.ra[ix].data.readers) == 1);
+ ethr_atomic_set_relb(&rwmtx->tdata.ra[ix].data.readers, (long) 0);
+ return (long) 0;
+ }
+}
+
+static ETHR_INLINE long
+rwmutex_freqread_rdrs_read(ethr_rwmutex *rwmtx, int ix)
+{
+ long res = ethr_atomic_read(&rwmtx->tdata.ra[ix].data.readers);
+#ifdef ETHR_DEBUG
+ switch (rwmtx->type) {
+ case ETHR_RWMUTEX_TYPE_FREQUENT_READ:
+ ETHR_ASSERT(res >= 0);
+ break;
+ case ETHR_RWMUTEX_TYPE_EXTREMELY_FREQUENT_READ:
+ ETHR_ASSERT(res == 0 || res == 1);
+ break;
+ default:
+ ETHR_ASSERT(0);
+ break;
+ }
+#endif
+ return res;
+}
+
+
+static ETHR_INLINE void
+enqueue(ethr_ts_event **queue,
+ ethr_ts_event *tse_start,
+ ethr_ts_event *tse_end)
+{
+ if (!*queue) {
+ *queue = tse_start;
+ tse_start->prev = tse_end;
+ tse_end->next = tse_start;
+ }
+ else {
+ tse_end->next = *queue;
+ tse_start->prev = (*queue)->prev;
+ (*queue)->prev->next = tse_start;
+ (*queue)->prev = tse_end;
+ }
+}
+
+static ETHR_INLINE void
+insert(ethr_ts_event *tse_pred, ethr_ts_event *tse)
+{
+ tse->next = tse_pred->next;
+ tse->prev = tse_pred;
+ tse_pred->next->prev = tse;
+ tse_pred->next = tse;
+}
+
+static ETHR_INLINE void
+dequeue(ethr_ts_event **queue,
+ ethr_ts_event *tse_start,
+ ethr_ts_event *tse_end)
+{
+ if (tse_start->prev == tse_end) {
+ ETHR_ASSERT(*queue == tse_start && tse_end->next == tse_start);
+ *queue = NULL;
+ }
+ else {
+ if (*queue == tse_start)
+ *queue = tse_end->next;
+ tse_end->next->prev = tse_start->prev;
+ tse_start->prev->next = tse_end->next;
+ }
+}
+
+static void
+event_wait(struct ethr_mutex_base_ *mtxb,
+ ethr_ts_event *tse,
+ int spincount,
+ long type,
+ int is_rwmtx,
+ int is_freq_read)
+{
+ int locked = 0;
+ long act;
+ int need_try_complete_runlock = 0;
+
+ /* Need to enqueue and wait... */
+
+ tse->uflgs = type;
+ ethr_atomic_set(&tse->uaflgs, type);
+
+ ETHR_MTX_Q_LOCK(&mtxb->qlck);
+ locked = 1;
+
+#ifdef ETHR_MTX_HARD_DEBUG_Q
+ hard_debug_chk_q__(mtxb, is_rwmtx);
+#endif
+
+ act = ethr_atomic_read(&mtxb->flgs);
+
+ if (act & type) {
+
+ /* Wait bit already there; enqueue... */
+
+ ETHR_ASSERT(mtxb->q);
+ if (type == ETHR_RWMTX_W_WAIT_FLG__) {
+ enqueue(&mtxb->q, tse, tse);
+#ifdef ETHR_MTX_HARD_DEBUG_WSQ
+ mtxb->ws++;
+#endif
+ }
+ else {
+ ethr_rwmutex *rwmtx = (ethr_rwmutex *) mtxb;
+ ETHR_ASSERT(is_rwmtx);
+ ETHR_ASSERT(rwmtx->rq_end);
+ insert(rwmtx->rq_end, tse);
+ rwmtx->rq_end = tse;
+ if (is_freq_read)
+ rwmutex_freqread_wtng_rdrs_inc(rwmtx, tse);
+ else
+ rwmtx->tdata.rs++;
+ }
+ }
+ else {
+
+ /* Set wait bit */
+
+ while (1) {
+ long new, exp = act;
+ int freqread_tryrlock = 0;
+ need_try_complete_runlock = 0;
+
+ if (type == ETHR_RWMTX_W_WAIT_FLG__) {
+ if (is_freq_read && act == ETHR_RWMTX_R_FLG__)
+ need_try_complete_runlock = 1;
+ if (act != 0)
+ new = act | ETHR_RWMTX_W_WAIT_FLG__;
+ else
+ new = ETHR_RWMTX_W_FLG__; /* Try to get it */
+ }
+ else {
+ ETHR_ASSERT(is_rwmtx);
+
+ if (!is_freq_read) {
+ if (act & (ETHR_RWMTX_W_FLG__| ETHR_RWMTX_W_WAIT_FLG__))
+ new = act | ETHR_RWMTX_R_WAIT_FLG__;
+ else
+ new = act + 1; /* Try to get it */
+ }
+ else {
+ if (act & ~ETHR_RWMTX_R_FLG__)
+ new = act | ETHR_RWMTX_R_WAIT_FLG__;
+ else { /* Try to get it */
+ ethr_rwmutex *rwmtx = (ethr_rwmutex *) mtxb;
+ rwmutex_freqread_rdrs_inc(rwmtx, tse);
+ ETHR_MEMORY_BARRIER;
+ new = act | ETHR_RWMTX_R_FLG__;
+ freqread_tryrlock = 1;
+ }
+ }
+ }
+
+ act = ethr_atomic_cmpxchg_acqb(&mtxb->flgs, new, exp);
+ if (exp == act) {
+ if (new & type) {
+ act = new;
+ break;
+ }
+ else {
+ /* Got it */
+ goto done;
+ }
+ }
+
+ if (freqread_tryrlock) {
+ ethr_rwmutex *rwmtx = (ethr_rwmutex *) mtxb;
+
+ /* We didn't set ETHR_RWMTX_R_FLG__, however someone
+ else might have */
+ if (act == ETHR_RWMTX_R_FLG__)
+ goto done; /* Got it by help from someone else */
+
+ ETHR_ASSERT((act & ETHR_RWMTX_WAIT_FLGS__) == 0);
+ /*
+ * We know that no waiter flags have been set, i.e.,
+ * we cannot get into a situation where we need to wake
+ * someone up here. Just restore the readers counter
+ * and do it over again...
+ */
+ rwmutex_freqread_rdrs_dec(rwmtx, tse);
+ }
+ }
+
+ /* Enqueue */
+
+ if (type == ETHR_RWMTX_R_WAIT_FLG__) {
+ ethr_rwmutex *rwmtx = (ethr_rwmutex *) mtxb;
+ ETHR_ASSERT(is_rwmtx);
+ ETHR_ASSERT(!rwmtx->rq_end);
+ rwmtx->rq_end = tse;
+ if (is_freq_read)
+ rwmutex_freqread_wtng_rdrs_inc(rwmtx, tse);
+ else
+ rwmtx->tdata.rs++;
+ }
+#ifdef ETHR_MTX_HARD_DEBUG_WSQ
+ else {
+ mtxb->ws++;
+ }
+#endif
+
+ enqueue(&mtxb->q, tse, tse);
+ }
+
+#ifdef ETHR_MTX_HARD_DEBUG_Q
+ hard_debug_chk_q__(mtxb, is_rwmtx);
+#endif
+
+ /* Wait */
+ locked = 0;
+ ETHR_MTX_Q_UNLOCK(&mtxb->qlck);
+
+ if (need_try_complete_runlock) {
+ ETHR_ASSERT(((ethr_rwmutex *) mtxb)->type
+ != ETHR_RWMUTEX_TYPE_NORMAL);
+ /*
+ * We were the only one in queue when we enqueued, and it
+ * was seemingly read locked. We need to try to complete a
+ * runlock otherwise we might be hanging forever. If the
+ * runlock could be completed we will be dequeued and
+ * woken by ourselves.
+ */
+ rwmutex_try_complete_runlock((ethr_rwmutex *) mtxb,
+ act, tse, 0, 1, 0);
+ }
+
+ while (1) {
+ ethr_event_reset(&tse->event);
+
+ act = ethr_atomic_read_acqb(&tse->uaflgs);
+ if (!act)
+ goto done; /* Got it */
+
+ ETHR_ASSERT(act == type);
+ ethr_event_swait(&tse->event, spincount);
+ /* swait result: 0 || EINTR */
+
+ act = ethr_atomic_read_acqb(&tse->uaflgs);
+ if (!act)
+ goto done; /* Got it */
+ }
+
+ done:
+ if (locked)
+ ETHR_MTX_Q_UNLOCK(&mtxb->qlck);
+}
+
+static void
+wake_writer(struct ethr_mutex_base_ *mtxb, int is_rwmtx)
+{
+ ethr_ts_event *tse;
+
+ tse = mtxb->q;
+ ETHR_ASSERT(tse);
+ dequeue(&mtxb->q, tse, tse);
+
+ ETHR_ASSERT(tse->uflgs == ETHR_RWMTX_W_WAIT_FLG__);
+ ETHR_ASSERT(ethr_atomic_read(&tse->uaflgs) == ETHR_RWMTX_W_WAIT_FLG__);
+#ifdef ETHR_MTX_HARD_DEBUG_WSQ
+ mtxb->ws--;
+#endif
+#if defined(ETHR_MTX_HARD_DEBUG_Q) || defined(ETHR_MTX_HARD_DEBUG_WSQ)
+ hard_debug_chk_q__(mtxb, is_rwmtx);
+#endif
+
+ ETHR_MTX_Q_UNLOCK(&mtxb->qlck);
+
+ ethr_atomic_set(&tse->uaflgs, 0);
+ ethr_event_set(&tse->event);
+}
+
+static ETHR_INLINE int
+initial_spincount(struct ethr_mutex_base_ *mtxb)
+{
+ return (mtxb->aux_scnt < ETHR_MTX_MAX_FLGS_SPIN
+ ? mtxb->aux_scnt
+ : ETHR_MTX_MAX_FLGS_SPIN);
+}
+
+static ETHR_INLINE int
+update_spincount(struct ethr_mutex_base_ *mtxb,
+ ethr_ts_event *tse,
+ int *start_scnt,
+ int *scnt)
+{
+ int sscnt = *start_scnt;
+ if (sscnt < 0) {
+ *scnt = ((tse->iflgs & ETHR_TS_EV_MAIN_THR)
+ ? mtxb->main_scnt
+ : mtxb->aux_scnt);
+ *scnt -= ETHR_MTX_MAX_FLGS_SPIN;
+ }
+ else {
+ *scnt = ((tse->iflgs & ETHR_TS_EV_MAIN_THR)
+ ? mtxb->main_scnt
+ : mtxb->aux_scnt);
+ *scnt -= sscnt;
+ if (*scnt > 0 && sscnt < ETHR_MTX_MAX_FLGS_SPIN) {
+ *scnt = ETHR_MTX_MAX_FLGS_SPIN - sscnt;
+ *start_scnt = -1;
+ return 0;
+ }
+ }
+ return 1;
+}
+
+int check_readers_array(ethr_rwmutex *rwmtx,
+ int start_rix,
+ int length,
+ int pre_check);
+
+static ETHR_INLINE void
+write_lock_wait(struct ethr_mutex_base_ *mtxb,
+ long initial,
+ int is_rwmtx,
+ int is_freq_read)
+{
+ long act = initial;
+ int scnt, start_scnt;
+ ethr_ts_event *tse = NULL;
+ int until_yield = ETHR_YIELD_AFTER_BUSY_LOOPS;
+ int res;
+ int freq_read_size = -1;
+ int freq_read_start_ix = -1;
+
+ ETHR_ASSERT(!is_freq_read || is_rwmtx);
+
+ start_scnt = scnt = initial_spincount(mtxb);
+
+ /*
+ * Spin trying to write lock for a while. If unsuccessful,
+ * wait on event.
+ */
+
+ while (1) {
+ long exp;
+
+ while (act != 0) {
+
+ if (is_freq_read && act == ETHR_RWMTX_R_FLG__) {
+ ethr_rwmutex *rwmtx = (ethr_rwmutex *) mtxb;
+ if (!tse)
+ tse = ethr_get_ts_event();
+ if (freq_read_size < 0) {
+ if (rwmtx->type == ETHR_RWMUTEX_TYPE_FREQUENT_READ) {
+ freq_read_size = reader_groups_array_size;
+ freq_read_start_ix = tse->rgix;
+ }
+ else {
+ freq_read_size = main_threads_array_size;
+ freq_read_start_ix = tse->mtix;
+ }
+ }
+ res = check_readers_array(rwmtx,
+ freq_read_start_ix,
+ freq_read_size,
+ 1);
+ scnt--;
+ if (res == 0) {
+ act = ethr_atomic_read(&mtxb->flgs);
+ if (act & ETHR_RWMTX_R_MASK__) {
+ res = rwmutex_try_complete_runlock(rwmtx, act,
+ tse, 0, 0,
+ 1);
+ if (res != EBUSY)
+ goto done; /* Got it */
+ }
+ if (scnt <= 0)
+ goto chk_spin;
+ if (--until_yield == 0) {
+ until_yield = ETHR_YIELD_AFTER_BUSY_LOOPS;
+ ETHR_YIELD();
+ }
+ continue;
+ }
+ }
+
+ if (scnt <= 0) {
+ chk_spin:
+ scnt = 0;
+
+ if (!tse)
+ tse = ethr_get_ts_event();
+ if (update_spincount(mtxb, tse, &start_scnt, &scnt)) {
+ event_wait(mtxb, tse, scnt, ETHR_RWMTX_W_WAIT_FLG__,
+ is_rwmtx, is_freq_read);
+ goto done;
+ }
+ }
+ ETHR_SPIN_BODY;
+ if (--until_yield == 0) {
+ until_yield = ETHR_YIELD_AFTER_BUSY_LOOPS;
+ ETHR_YIELD();
+ }
+ act = ethr_atomic_read(&mtxb->flgs);
+ scnt--;
+ }
+ ETHR_ASSERT(scnt >= 0);
+
+ exp = act;
+
+ act = ethr_atomic_cmpxchg_acqb(&mtxb->flgs,
+ ETHR_RWMTX_W_FLG__,
+ exp);
+ if (act == 0)
+ goto done; /* Got it */
+ }
+
+ done:
+ if (tse)
+ ethr_leave_ts_event(tse);
+}
+
+static int
+mtxb_init(struct ethr_mutex_base_ *mtxb,
+ int def_main_scnt,
+ int main_scnt,
+ int def_aux_scnt,
+ int aux_scnt)
+{
+ ETHR_MTX_HARD_DEBUG_LFS_INIT(mtxb);
+#ifdef ETHR_MTX_HARD_DEBUG_WSQ
+ mtxb->ws = 0;
+#endif
+ if (no_spin) {
+ mtxb->main_scnt = 0;
+ mtxb->aux_scnt = 0;
+ }
+ else {
+ if (main_scnt > SHRT_MAX)
+ mtxb->main_scnt = SHRT_MAX;
+ else if (main_scnt < 0)
+ mtxb->main_scnt = def_main_scnt;
+ else
+ mtxb->main_scnt = (short) main_scnt;
+ if (aux_scnt > SHRT_MAX)
+ mtxb->aux_scnt = SHRT_MAX;
+ else if (aux_scnt < 0)
+ mtxb->aux_scnt = def_aux_scnt;
+ else
+ mtxb->aux_scnt = (short) aux_scnt;
+ if (mtxb->main_scnt < mtxb->aux_scnt)
+ mtxb->main_scnt = mtxb->aux_scnt;
+
+ }
+ mtxb->q = NULL;
+ ethr_atomic_init(&mtxb->flgs, 0);
+ return ETHR_MTX_QLOCK_INIT(&mtxb->qlck);
+}
+
+static int
+mtxb_destroy(struct ethr_mutex_base_ *mtxb)
+{
+ long act;
+ ETHR_MTX_Q_LOCK(&mtxb->qlck);
+ act = ethr_atomic_read(&mtxb->flgs);
+ ETHR_MTX_Q_UNLOCK(&mtxb->qlck);
+ if (act != 0)
+ return EINVAL;
+ return ETHR_MTX_QLOCK_DESTROY(&mtxb->qlck);
+}
+
+
+#endif /* ETHR_USE_OWN_RWMTX_IMPL__ || ETHR_USE_OWN_MTX_IMPL__ */
+
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *\
+ * Mutex and condition variable implementation *
+\* */
+
+#ifdef ETHR_USE_OWN_MTX_IMPL__
+
+/* -- Mutex ---------------------------------------------------------------- */
+
+int
+ethr_mutex_init_opt(ethr_mutex *mtx, ethr_mutex_opt *opt)
+{
+ int res;
+#if ETHR_XCHK
+ if (!mtx) {
+ ETHR_ASSERT(0);
+ return EINVAL;
+ }
+ mtx->initialized = ETHR_MUTEX_INITIALIZED;
+#endif
+ ETHR_MTX_HARD_DEBUG_FENCE_INIT(mtx);
+ res = mtxb_init(&mtx->mtxb,
+ default_mtx_main_spincount,
+ opt ? opt->main_spincount : -1,
+ default_mtx_aux_spincount,
+ opt ? opt->aux_spincount : -1);
+#if ETHR_XCHK
+ if (res != 0)
+ mtx->initialized = 0;
+#endif
+ return res;
+}
+
+int
+ethr_mutex_init(ethr_mutex *mtx)
+{
+ return ethr_mutex_init_opt(mtx, NULL);
+}
+
+int
+ethr_mutex_destroy(ethr_mutex *mtx)
+{
+#if ETHR_XCHK
+ if (ethr_not_inited__) {
+ ETHR_ASSERT(0);
+ return EACCES;
+ }
+ if (!mtx) {
+ ETHR_ASSERT(0);
+ return EINVAL;
+ }
+ mtx->initialized = 0;
+#endif
+ return mtxb_destroy(&mtx->mtxb);
+}
+
+void
+ethr_mutex_lock_wait__(ethr_mutex *mtx, long initial)
+{
+ write_lock_wait(&mtx->mtxb, initial, 0, 0);
+}
+
+void
+ethr_mutex_unlock_wake__(ethr_mutex *mtx, long initial)
+{
+ ethr_ts_event *tse;
+
+ ETHR_MTX_Q_LOCK(&mtx->mtxb.qlck);
+ tse = mtx->mtxb.q;
+
+ ETHR_ASSERT(tse);
+ ETHR_ASSERT(ethr_atomic_read(&mtx->mtxb.flgs)
+ == (ETHR_RWMTX_W_FLG__|ETHR_RWMTX_W_WAIT_FLG__));
+ ETHR_ASSERT(initial & ETHR_RWMTX_W_WAIT_FLG__);
+ ETHR_MTX_HARD_DEBUG_CHK_Q(mtx);
+
+ /*
+ * If we have multiple waiters, there is no need to modify
+ * mtxb->flgs; otherwise, we need to clear the write wait bit...
+ */
+ if (tse->next == mtx->mtxb.q)
+ ethr_atomic_set(&mtx->mtxb.flgs, ETHR_RWMTX_W_FLG__);
+
+ wake_writer(&mtx->mtxb, 0);
+}
+
+/* -- Condition variables -------------------------------------------------- */
+
+static void
+enqueue_mtx(ethr_mutex *mtx, ethr_ts_event *tse_start, ethr_ts_event *tse_end)
+{
+ long act;
+
+ /*
+ * `ethr_cond_signal()' and `ethr_cond_broadcast()' end up here. If `mtx'
+ * is not currently locked by current thread, we almost certainly have a
+ * hard to debug race condition. There might however be some (strange)
+ * use for it. POSIX also allow a call to `pthread_cond_signal' or
+ * `pthread_cond_broadcast' even though the the associated mutex isn't
+ * locked by the caller. Therefore, we also allow this kind of strange
+ * usage, but optimize for the case where the mutex is locked by the
+ * calling thread.
+ */
+
+ ETHR_MTX_Q_LOCK(&mtx->mtxb.qlck);
+
+ ETHR_MTX_HARD_DEBUG_CHK_Q(mtx);
+
+#ifdef ETHR_MTX_HARD_DEBUG_WSQ
+ {
+ int dbg_nws__ = 0;
+ ethr_ts_event *dbg_tse__;
+ for (dbg_tse__ = tse_start;
+ dbg_tse__ != tse_end;
+ dbg_tse__ = dbg_tse__->next)
+ dbg_nws__++;
+ mtx->mtxb.ws += dbg_nws__ + 1;
+ }
+#endif
+
+ act = ethr_atomic_read(&mtx->mtxb.flgs);
+ ETHR_ASSERT(act == 0
+ || act == ETHR_RWMTX_W_FLG__
+ || act == (ETHR_RWMTX_W_FLG__|ETHR_RWMTX_W_WAIT_FLG__));
+ if (act & ETHR_RWMTX_W_FLG__) {
+ /* The normal sane case */
+ if (!(act & ETHR_RWMTX_W_WAIT_FLG__)) {
+ ETHR_ASSERT(!mtx->mtxb.q);
+ act = ethr_atomic_cmpxchg(&mtx->mtxb.flgs,
+ (ETHR_RWMTX_W_FLG__
+ | ETHR_RWMTX_W_WAIT_FLG__),
+ ETHR_RWMTX_W_FLG__);
+ if (act != ETHR_RWMTX_W_FLG__) {
+ /*
+ * Sigh... this wasn't so sane after all since, the mutex was
+ * obviously not locked by the current thread....
+ */
+ ETHR_ASSERT(act == 0);
+ goto mtx_unlocked;
+ }
+ }
+
+#ifdef ETHR_DEBUG
+ if (act & ETHR_RWMTX_W_WAIT_FLG__)
+ ETHR_ASSERT(mtx->mtxb.q);
+ else
+ ETHR_ASSERT(!mtx->mtxb.q);
+#endif
+
+ enqueue(&mtx->mtxb.q, tse_start, tse_end);
+
+ ETHR_MTX_HARD_DEBUG_CHK_Q(mtx);
+ ETHR_MTX_Q_UNLOCK(&mtx->mtxb.qlck);
+
+ }
+ else {
+ int multi;
+ mtx_unlocked:
+ /* Sigh... mutex isn't locked... */
+
+ multi = tse_start != tse_end;
+
+ while (1) {
+ long new, exp = act;
+
+ if (multi || (act & ETHR_RWMTX_W_FLG__))
+ new = ETHR_RWMTX_W_FLG__|ETHR_RWMTX_W_WAIT_FLG__;
+ else
+ new = ETHR_RWMTX_W_FLG__;
+
+ act = ethr_atomic_cmpxchg(&mtx->mtxb.flgs, new, exp);
+ if (exp == act) {
+ ETHR_ASSERT(!mtx->mtxb.q);
+ if (act & ETHR_RWMTX_W_FLG__) {
+ enqueue(&mtx->mtxb.q, tse_start, tse_end);
+
+ ETHR_MTX_HARD_DEBUG_CHK_Q(mtx);
+ ETHR_MTX_Q_UNLOCK(&mtx->mtxb.qlck);
+
+ }
+ else {
+ ETHR_ASSERT(!mtx->mtxb.q);
+ /*
+ * Acquired the mutex on behalf of the
+ * first thread in the queue; wake
+ * it and enqueue the rest...
+ */
+#ifdef ETHR_MTX_HARD_DEBUG_WSQ
+ mtx->mtxb.ws--;
+#endif
+ if (multi) {
+ enqueue(&mtx->mtxb.q, tse_start->next, tse_end);
+ ETHR_ASSERT(mtx->mtxb.q);
+ }
+
+ ETHR_MTX_HARD_DEBUG_CHK_Q(mtx);
+ ETHR_MTX_Q_UNLOCK(&mtx->mtxb.qlck);
+
+ ethr_atomic_set(&tse_start->uaflgs, 0);
+ ethr_event_set(&tse_start->event);
+ }
+ break;
+ }
+ }
+ }
+}
+
+int
+ethr_cond_init_opt(ethr_cond *cnd, ethr_cond_opt *opt)
+{
+ int res;
+#if ETHR_XCHK
+ if (!cnd) {
+ ETHR_ASSERT(0);
+ return EINVAL;
+ }
+ cnd->initialized = ETHR_COND_INITIALIZED;
+#endif
+ ETHR_MTX_HARD_DEBUG_FENCE_INIT(cnd);
+ cnd->q = NULL;
+ if (no_spin) {
+ cnd->main_scnt = 0;
+ cnd->aux_scnt = 0;
+ }
+ else {
+ if (!opt || opt->main_spincount < 0)
+ cnd->main_scnt = default_cnd_main_spincount;
+ else if (opt->main_spincount > SHRT_MAX)
+ cnd->main_scnt = SHRT_MAX;
+ else
+ cnd->main_scnt = (short) opt->main_spincount;
+ if (!opt || opt->aux_spincount < 0)
+ cnd->aux_scnt = default_cnd_aux_spincount;
+ else if (opt->aux_spincount > SHRT_MAX)
+ cnd->aux_scnt = SHRT_MAX;
+ else
+ cnd->aux_scnt = (short) opt->aux_spincount;
+ if (cnd->main_scnt < cnd->aux_scnt)
+ cnd->main_scnt = cnd->aux_scnt;
+ }
+ ETHR_MTX_QLOCK_INIT(&cnd->qlck);
+ return 0;
+}
+
+int
+ethr_cond_init(ethr_cond *cnd)
+{
+ return ethr_cond_init_opt(cnd, NULL);
+}
+
+int
+ethr_cond_destroy(ethr_cond *cnd)
+{
+#if ETHR_XCHK
+ if (ethr_not_inited__) {
+ ETHR_ASSERT(0);
+ return EACCES;
+ }
+ if (!cnd || cnd->initialized != ETHR_COND_INITIALIZED) {
+ ETHR_ASSERT(0);
+ return EINVAL;
+ }
+ cnd->initialized = 0;
+#endif
+ return ETHR_MTX_QLOCK_DESTROY(&cnd->qlck);
+}
+
+void
+ethr_cond_signal(ethr_cond *cnd)
+{
+ ethr_ts_event *tse;
+
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(cnd);
+ ETHR_ASSERT(cnd->initialized == ETHR_COND_INITIALIZED);
+ ETHR_MTX_HARD_DEBUG_FENCE_CHK(cnd);
+
+ ETHR_MTX_Q_LOCK(&cnd->qlck);
+
+ tse = cnd->q;
+
+ if (!tse) {
+ ETHR_MTX_HARD_DEBUG_FENCE_CHK(cnd);
+ ETHR_MTX_Q_UNLOCK(&cnd->qlck);
+ }
+ else {
+ ethr_mutex *mtx = (ethr_mutex *) tse->udata;
+
+ ETHR_MTX_HARD_DEBUG_FENCE_CHK(mtx);
+ ETHR_ASSERT(tse->uflgs == ETHR_RWMTX_W_WAIT_FLG__);
+ ETHR_ASSERT(ethr_atomic_read(&tse->uaflgs) == ETHR_CND_WAIT_FLG__);
+
+ ethr_atomic_set(&tse->uaflgs, ETHR_RWMTX_W_WAIT_FLG__);
+
+ dequeue(&cnd->q, tse, tse);
+
+ ETHR_MTX_Q_UNLOCK(&cnd->qlck);
+
+ tse->next = tse->prev = NULL;
+
+ enqueue_mtx(mtx, tse, tse);
+
+ ETHR_MTX_HARD_DEBUG_FENCE_CHK(cnd);
+ ETHR_MTX_HARD_DEBUG_FENCE_CHK(mtx);
+ }
+}
+
+void
+ethr_cond_broadcast(ethr_cond *cnd)
+{
+ int res;
+ int got_all;
+ ethr_ts_event *tse;
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(cnd);
+ ETHR_ASSERT(cnd->initialized == ETHR_COND_INITIALIZED);
+
+ ETHR_MTX_HARD_DEBUG_FENCE_CHK(cnd);
+ do {
+ got_all = 1;
+
+ ETHR_MTX_Q_LOCK(&cnd->qlck);
+
+ tse = cnd->q;
+
+ if (!tse) {
+ ETHR_MTX_HARD_DEBUG_FENCE_CHK(cnd);
+ ETHR_MTX_Q_UNLOCK(&cnd->qlck);
+ }
+ else {
+ ethr_mutex *mtx = (ethr_mutex *) tse->udata;
+ ethr_ts_event *tse_tmp, *tse_end;
+
+ ETHR_MTX_HARD_DEBUG_FENCE_CHK(mtx);
+ tse_end = cnd->q->prev;
+
+ tse_tmp = tse;
+
+ do {
+
+ if (mtx == (ethr_mutex *) tse_tmp->udata) {
+ /* The normal case */
+
+ ETHR_ASSERT(tse_tmp->uflgs == ETHR_RWMTX_W_WAIT_FLG__);
+ ETHR_ASSERT(ethr_atomic_read(&tse_tmp->uaflgs)
+ == ETHR_CND_WAIT_FLG__);
+
+ ethr_atomic_set(&tse_tmp->uaflgs, ETHR_RWMTX_W_WAIT_FLG__);
+ }
+ else {
+ /* Should be very unusual */
+ ETHR_MTX_HARD_DEBUG_FENCE_CHK(mtx);
+ tse_end = tse_tmp->prev;
+ got_all = 0;
+ break;
+ }
+
+ tse_tmp = tse_tmp->next;
+
+ } while (tse_tmp != cnd->q);
+
+ dequeue(&cnd->q, tse, tse_end);
+
+ ETHR_MTX_Q_UNLOCK(&cnd->qlck);
+
+ enqueue_mtx(mtx, tse, tse_end);
+ }
+
+ } while (!got_all);
+
+ ETHR_MTX_HARD_DEBUG_FENCE_CHK(cnd);
+}
+
+int
+ethr_cond_wait(ethr_cond *cnd, ethr_mutex *mtx)
+{
+ int woken;
+ int scnt;
+ int res;
+ void *udata = NULL;
+ ethr_ts_event *tse;
+
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(cnd);
+ ETHR_ASSERT(cnd->initialized == ETHR_COND_INITIALIZED);
+ ETHR_ASSERT(mtx);
+ ETHR_ASSERT(mtx->initialized == ETHR_MUTEX_INITIALIZED);
+
+ tse = ethr_get_ts_event();
+
+ scnt = ((tse->iflgs & ETHR_TS_EV_MAIN_THR)
+ ? cnd->main_scnt
+ : cnd->aux_scnt);
+
+ ETHR_MTX_HARD_DEBUG_FENCE_CHK(cnd);
+ ETHR_MTX_HARD_DEBUG_FENCE_CHK(mtx);
+
+ udata = tse->udata; /* Got to restore udata before returning */
+ tse->udata = (void *) mtx;
+
+ tse->uflgs = ETHR_RWMTX_W_WAIT_FLG__; /* Prep for mutex lock op */
+ ethr_atomic_set(&tse->uaflgs, ETHR_CND_WAIT_FLG__);
+
+ ETHR_MTX_Q_LOCK(&cnd->qlck);
+
+ enqueue(&cnd->q, tse, tse);
+
+ ETHR_MTX_Q_UNLOCK(&cnd->qlck);
+
+ ethr_mutex_unlock(mtx);
+
+ /* Wait */
+ woken = 0;
+ while (1) {
+ long act;
+
+ ethr_event_reset(&tse->event);
+
+ act = ethr_atomic_read_acqb(&tse->uaflgs);
+ if (!act)
+ break; /* Mtx locked */
+
+ /* First time, got EINTR, or spurious wakeup... */
+
+ ETHR_ASSERT(act == ETHR_CND_WAIT_FLG__
+ || act == ETHR_RWMTX_W_WAIT_FLG__);
+
+ if (woken) {
+ /*
+ * If act == ETHR_RWMTX_W_WAIT_FLG__, we have already been enqueued
+ * on the mutex; continue wait until locked...
+ */
+ if (act == ETHR_CND_WAIT_FLG__) {
+ ETHR_MTX_Q_LOCK(&cnd->qlck);
+ act = ethr_atomic_read(&tse->uaflgs);
+ ETHR_ASSERT(act == ETHR_CND_WAIT_FLG__
+ || act == ETHR_RWMTX_W_WAIT_FLG__);
+ /*
+ * If act == ETHR_RWMTX_W_WAIT_FLG__, we have already
+ * enqueued on the mutex; continue wait until locked...
+ */
+ if (act == ETHR_CND_WAIT_FLG__)
+ dequeue(&cnd->q, tse, tse);
+
+ ETHR_MTX_Q_UNLOCK(&cnd->qlck);
+
+ if (act == ETHR_CND_WAIT_FLG__) {
+ tse->udata = udata;
+ ethr_leave_ts_event(tse);
+ ETHR_MTX_HARD_DEBUG_FENCE_CHK(cnd);
+ ethr_mutex_lock(mtx);
+ return EINTR;
+ }
+ }
+ ETHR_ASSERT(act == ETHR_RWMTX_W_WAIT_FLG__);
+ }
+ ethr_event_swait(&tse->event, scnt);
+ /* swait result: 0 || EINTR */
+ woken = 1;
+ }
+
+ ETHR_MTX_HARD_DEBUG_LFS_RWLOCK(&mtx->mtxb);
+ ETHR_MTX_HARD_DEBUG_FENCE_CHK(cnd);
+ ETHR_MTX_HARD_DEBUG_FENCE_CHK(mtx);
+
+ tse->udata = udata;
+ ethr_leave_ts_event(tse);
+ return 0;
+}
+
+#else
+/* -- pthread mutex and condition variables -------------------------------- */
+
+int
+ethr_mutex_init(ethr_mutex *mtx)
+{
+#if ETHR_XCHK
+ if (!mtx) {
+ ETHR_ASSERT(0);
+ return EINVAL;
+ }
+ mtx->initialized = ETHR_MUTEX_INITIALIZED;
+#endif
+ return pthread_mutex_init(&mtx->pt_mtx, NULL);
+}
+
+int
+ethr_mutex_destroy(ethr_mutex *mtx)
+{
+#if ETHR_XCHK
+ if (ethr_not_inited__) {
+ ETHR_ASSERT(0);
+ return EACCES;
+ }
+ if (!mtx || mtx->initialized != ETHR_MUTEX_INITIALIZED) {
+ ETHR_ASSERT(0);
+ return EINVAL;
+ }
+#endif
+#if ETHR_XCHK
+ mtx->initialized = 0;
+#endif
+ return pthread_mutex_destroy(&mtx->pt_mtx);
+}
+
+int
+ethr_cond_init(ethr_cond *cnd)
+{
+#if ETHR_XCHK
+ if (!cnd) {
+ ETHR_ASSERT(0);
+ return EINVAL;
+ }
+ cnd->initialized = ETHR_COND_INITIALIZED;
+#endif
+ return pthread_cond_init(&cnd->pt_cnd, NULL);
+}
+
+int
+ethr_cond_destroy(ethr_cond *cnd)
+{
+#if ETHR_XCHK
+ if (ethr_not_inited__) {
+ ETHR_ASSERT(0);
+ return EACCES;
+ }
+ if (!cnd || cnd->initialized != ETHR_COND_INITIALIZED) {
+ ETHR_ASSERT(0);
+ return EINVAL;
+ }
+ cnd->initialized = 0;
+#endif
+ return pthread_cond_destroy(&cnd->pt_cnd);
+}
+
+void
+ethr_cond_signal(ethr_cond *cnd)
+{
+ int res;
+
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(cnd);
+ ETHR_ASSERT(cnd->initialized == ETHR_COND_INITIALIZED);
+
+ res = pthread_cond_signal(&cnd->pt_cnd);
+ if (res != 0)
+ ETHR_FATAL_ERROR__(res);
+}
+
+void
+ethr_cond_broadcast(ethr_cond *cnd)
+{
+ int res;
+
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(cnd);
+ ETHR_ASSERT(cnd->initialized == ETHR_COND_INITIALIZED);
+
+ res = pthread_cond_broadcast(&cnd->pt_cnd);
+ if (res != 0)
+ ETHR_FATAL_ERROR__(res);
+}
+
+int
+ethr_cond_wait(ethr_cond *cnd, ethr_mutex *mtx)
+{
+ int res;
+
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(cnd);
+ ETHR_ASSERT(cnd->initialized == ETHR_COND_INITIALIZED);
+ ETHR_ASSERT(mtx);
+ ETHR_ASSERT(mtx->initialized == ETHR_MUTEX_INITIALIZED);
+
+ res = pthread_cond_wait(&cnd->pt_cnd, &mtx->pt_mtx);
+ if (res != 0 && res != EINTR)
+ ETHR_FATAL_ERROR__(res);
+ return res;
+}
+
+#endif /* pthread_mutex */
+
+/* -- Exported symbols of inline functions --------------------------------- */
+
+int
+ethr_mutex_trylock(ethr_mutex *mtx)
+{
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(mtx);
+ ETHR_ASSERT(mtx->initialized == ETHR_MUTEX_INITIALIZED);
+
+ return ethr_mutex_trylock__(mtx);
+}
+
+void
+ethr_mutex_lock(ethr_mutex *mtx)
+{
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(mtx);
+ ETHR_ASSERT(mtx->initialized == ETHR_MUTEX_INITIALIZED);
+
+ ethr_mutex_lock__(mtx);
+}
+
+void
+ethr_mutex_unlock(ethr_mutex *mtx)
+{
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(mtx);
+ ETHR_ASSERT(mtx->initialized == ETHR_MUTEX_INITIALIZED);
+
+ ethr_mutex_unlock__(mtx);
+}
+
+
+#ifdef ETHR_USE_OWN_RWMTX_IMPL__
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *\
+ * Read/Write Mutex *
+\* */
+
+static void
+wake_readers(ethr_rwmutex *rwmtx, int rs)
+{
+ ethr_ts_event *tse;
+#ifdef ETHR_DEBUG
+ int drs = 0;
+#endif
+
+ tse = rwmtx->mtxb.q;
+ ETHR_ASSERT(tse);
+ ETHR_ASSERT(rwmtx->rq_end);
+ dequeue(&rwmtx->mtxb.q, tse, rwmtx->rq_end);
+ rwmtx->rq_end->next = NULL;
+ rwmtx->rq_end = NULL;
+
+ ETHR_ASSERT(!rwmtx->mtxb.q
+ || (ethr_atomic_read(&rwmtx->mtxb.q->uaflgs)
+ == ETHR_RWMTX_W_WAIT_FLG__));
+
+ ETHR_RWMTX_HARD_DEBUG_CHK_Q(rwmtx);
+ ETHR_MTX_Q_UNLOCK(&rwmtx->mtxb.qlck);
+
+ while (tse) {
+ ethr_ts_event *tse_next;
+
+#ifdef ETHR_DEBUG
+ ETHR_ASSERT(tse->uflgs == ETHR_RWMTX_R_WAIT_FLG__);
+ ETHR_ASSERT(ethr_atomic_read(&tse->uaflgs)
+ == ETHR_RWMTX_R_WAIT_FLG__);
+ drs++;
+#endif
+
+ tse_next = tse->next; /* we aren't allowed to read tse->next
+ after we have reset uaflgs */
+
+ ethr_atomic_set(&tse->uaflgs, 0);
+ ethr_event_set(&tse->event);
+ tse = tse_next;
+ }
+
+ ETHR_ASSERT(rs == drs);
+}
+
+static ETHR_INLINE int
+is_w_waiter(ethr_ts_event *tse)
+{
+ ETHR_ASSERT(tse->uflgs == ETHR_RWMTX_W_WAIT_FLG__
+ || tse->uflgs == ETHR_RWMTX_R_WAIT_FLG__);
+ return tse->uflgs == ETHR_RWMTX_W_WAIT_FLG__;
+}
+
+static ETHR_INLINE int
+multiple_w_waiters(ethr_rwmutex *rwmtx)
+{
+ ETHR_ASSERT(rwmtx->mtxb.q);
+ ETHR_ASSERT(rwmtx->mtxb.q->uflgs == ETHR_RWMTX_W_WAIT_FLG__);
+
+ if (!rwmtx->rq_end)
+ return rwmtx->mtxb.q->next != rwmtx->mtxb.q;
+ else {
+ ETHR_ASSERT(rwmtx->mtxb.q->next != rwmtx->mtxb.q);
+ if (rwmtx->mtxb.q->next->uflgs == ETHR_RWMTX_W_WAIT_FLG__)
+ return 1;
+ ETHR_ASSERT(rwmtx->rq_end->next == rwmtx->mtxb.q
+ || rwmtx->rq_end->next->uflgs == ETHR_RWMTX_W_WAIT_FLG__);
+ return rwmtx->rq_end->next != rwmtx->mtxb.q;
+ }
+}
+
+int check_readers_array(ethr_rwmutex *rwmtx,
+ int start_rix,
+ int length,
+ int pre_check)
+{
+ int ix = start_rix;
+
+#ifndef ETHR_READ_MEMORY_BARRIER_IS_FULL
+ if (pre_check)
+ ETHR_READ_MEMORY_BARRIER;
+ else
+#endif
+ ETHR_MEMORY_BARRIER;
+
+ do {
+ long act = rwmutex_freqread_rdrs_read(rwmtx, ix);
+ if (act != 0)
+ return EBUSY;
+ ix++;
+ if (ix == length)
+ ix = 0;
+ } while (ix != start_rix);
+
+ return 0;
+}
+
+static ETHR_INLINE void
+rwmutex_freqread_restore_failed_tryrlock(ethr_rwmutex *rwmtx,
+ ethr_ts_event *tse)
+{
+ long act;
+ /*
+ * Restore failed increment
+ */
+ act = rwmutex_freqread_rdrs_dec_read(rwmtx, tse);
+
+ ETHR_WRITE_MEMORY_BARRIER;
+
+ if (act == 0) {
+
+#ifndef ETHR_WRITE_MEMORY_BARRIER_IS_FULL
+ ETHR_READ_MEMORY_BARRIER;
+#endif
+
+ act = ethr_atomic_read(&rwmtx->mtxb.flgs);
+
+ if ((act & ETHR_RWMTX_W_FLG__) == 0
+ && act & (ETHR_RWMTX_WAIT_FLGS__|ETHR_RWMTX_R_PEND_UNLCK_MASK__)) {
+ /*
+ * We either got waiters, or someone else trying
+ * to read unlock which we might have to help.
+ */
+ rwmutex_try_complete_runlock(rwmtx, act, tse, 1, 1, 0);
+ }
+ }
+}
+
+static int
+rwmutex_try_complete_runlock(ethr_rwmutex *rwmtx,
+ long initial,
+ ethr_ts_event *tse,
+ int start_next_ix,
+ int check_before_try,
+ int try_write_lock)
+{
+ ethr_ts_event *tse_tmp;
+ long act = initial;
+ int six, res, length;
+
+ tse_tmp = tse;
+ if (!tse_tmp)
+ tse_tmp = ethr_get_ts_event();
+
+ if ((act & ETHR_RWMTX_WAIT_FLGS__)
+ && (act & ~ETHR_RWMTX_WAIT_FLGS__) == 0)
+ goto check_waiters;
+
+ if (rwmtx->type == ETHR_RWMUTEX_TYPE_FREQUENT_READ) {
+ length = reader_groups_array_size;
+ six = tse_tmp->rgix;
+ }
+ else {
+ length = main_threads_array_size;
+ six = tse_tmp->mtix;
+ }
+ if (start_next_ix) {
+ six++;
+ if (six >= length)
+ six = 0;
+ }
+
+ if (!tse)
+ ethr_leave_ts_event(tse_tmp);
+
+ if (check_before_try) {
+ res = check_readers_array(rwmtx, six, length, 1);
+ if (res == EBUSY)
+ return try_write_lock ? EBUSY : 0;
+ }
+
+ while (1) {
+ long exp = act;
+ long new = act+1;
+
+ ETHR_ASSERT((act & ETHR_RWMTX_R_PEND_UNLCK_MASK__)
+ < ETHR_RWMTX_R_PEND_UNLCK_MASK__);
+
+ act = ethr_atomic_cmpxchg(&rwmtx->mtxb.flgs, new, exp);
+ if (exp == act) {
+ act = new;
+ break;
+ }
+ if (!try_write_lock) {
+ if (act == ETHR_RWMTX_W_FLG__ || act == 0)
+ return 0;
+ if ((act & ETHR_RWMTX_WAIT_FLGS__) == 0) {
+ if ((act & ETHR_RWMTX_R_FLG__) == 0)
+ return 0;
+ }
+ else if ((act & ETHR_RWMTX_R_FLG__) == 0) {
+ if (act & ETHR_RWMTX_R_PEND_UNLCK_MASK__)
+ return 0;
+ goto check_waiters;
+ }
+ }
+ else {
+ if (act == 0)
+ goto tryrwlock;
+ if (act & (ETHR_RWMTX_W_FLG__|ETHR_RWMTX_WAIT_FLGS__))
+ return EBUSY;
+ }
+ }
+
+ res = check_readers_array(rwmtx, six, length, 0);
+ if (res == EBUSY) {
+ act = ethr_atomic_dec_read(&rwmtx->mtxb.flgs);
+ if (act & ETHR_RWMTX_R_MASK__)
+ return try_write_lock ? EBUSY : 0;
+ }
+ else {
+ while (1) {
+ long exp = act;
+ long new = act;
+ new &= ~ETHR_RWMTX_R_FLG__;
+ new--;
+
+ ETHR_ASSERT(act & ETHR_RWMTX_R_PEND_UNLCK_MASK__);
+
+ act = ethr_atomic_cmpxchg(&rwmtx->mtxb.flgs, new, exp);
+ if (exp == act) {
+ if (new & ETHR_RWMTX_R_PEND_UNLCK_MASK__)
+ return try_write_lock ? EBUSY : 0;
+ act = new;
+ break;
+ }
+ }
+ }
+
+ /*
+ * Read unlock completed, but we have to check if
+ * threads have to be woken (or if we should try
+ * to write lock it).
+ */
+
+ if (act & ETHR_RWMTX_W_FLG__)
+ return try_write_lock ? EBUSY : 0;
+
+ if (act & ETHR_RWMTX_WAIT_FLGS__) {
+ check_waiters:
+ rwmutex_unlock_wake(rwmtx, 0, act);
+ return try_write_lock ? EBUSY : 0;
+ }
+
+ if (!try_write_lock)
+ return 0;
+
+ tryrwlock:
+ /* Try to write lock it */
+
+ act = ethr_atomic_cmpxchg_acqb(&rwmtx->mtxb.flgs,
+ ETHR_RWMTX_W_FLG__,
+ 0);
+ return act == 0 ? 0 : EBUSY;
+}
+
+#ifdef ETHR_RLOCK_WITH_INC_DEC
+
+static ETHR_INLINE void
+rwmutex_incdec_restore_failed_tryrlock(ethr_rwmutex *rwmtx)
+{
+ long act;
+ /*
+ * Restore failed increment
+ */
+ act = ethr_atomic_dec_read(&rwmtx->mtxb.flgs);
+ if ((act & ETHR_RWMTX_WAIT_FLGS__)
+ && (act & ~ETHR_RWMTX_WAIT_FLGS__) == 0) {
+ rwmutex_unlock_wake(rwmtx, 0, act);
+ }
+}
+
+#endif
+
+static void
+rwmutex_normal_rlock_wait(ethr_rwmutex *rwmtx,
+ long initial)
+{
+ long act = initial, exp;
+ int scnt, start_scnt;
+ ethr_ts_event *tse = NULL;
+ int until_yield = ETHR_YIELD_AFTER_BUSY_LOOPS;
+
+ start_scnt = scnt = initial_spincount(&rwmtx->mtxb);
+
+ /*
+ * Spin trying to read lock for a while. If unsuccessful,
+ * wait on event.
+ */
+
+ while (1) {
+
+#ifdef ETHR_RLOCK_WITH_INC_DEC
+ rwmutex_incdec_restore_failed_tryrlock(rwmtx);
+ act = ethr_atomic_read(&rwmtx->mtxb.flgs);
+#endif
+
+ while (act & (ETHR_RWMTX_W_FLG__|ETHR_RWMTX_W_WAIT_FLG__)) {
+ if (scnt == 0) {
+ tse = ethr_get_ts_event();
+ if (update_spincount(&rwmtx->mtxb, tse, &start_scnt, &scnt)) {
+ event_wait(&rwmtx->mtxb, tse, scnt,
+ ETHR_RWMTX_R_WAIT_FLG__, 1, 0);
+ goto done;
+ }
+ }
+ ETHR_SPIN_BODY;
+ if (--until_yield == 0) {
+ until_yield = ETHR_YIELD_AFTER_BUSY_LOOPS;
+ ETHR_YIELD();
+ }
+ act = ethr_atomic_read(&rwmtx->mtxb.flgs);
+ scnt--;
+ }
+ exp = act;
+ ETHR_ASSERT(scnt >= 0);
+
+#ifdef ETHR_RLOCK_WITH_INC_DEC
+ act = ethr_atomic_inc_read(&rwmtx->mtxb.flgs);
+ if ((act & (ETHR_RWMTX_W_FLG__|ETHR_RWMTX_W_WAIT_FLG__)) == 0)
+ goto done; /* Got it */
+#else
+ act = ethr_atomic_cmpxchg_acqb(&rwmtx->mtxb.flgs, exp+1, exp);
+ if (act == exp)
+ goto done; /* Got it */
+#endif
+ }
+
+ done:
+ if (tse)
+ ethr_leave_ts_event(tse);
+}
+
+static void
+rwmutex_freqread_rlock_wait(ethr_rwmutex *rwmtx,
+ ethr_ts_event *tse,
+ long initial)
+{
+ long act = initial;
+ int scnt, start_scnt;
+ int until_yield = ETHR_YIELD_AFTER_BUSY_LOOPS;
+
+ start_scnt = scnt = initial_spincount(&rwmtx->mtxb);
+
+ /*
+ * Spin trying to read lock for a while. If unsuccessful,
+ * wait on event.
+ */
+
+ while (1) {
+
+ rwmutex_freqread_restore_failed_tryrlock(rwmtx, tse);
+
+ act = ethr_atomic_read(&rwmtx->mtxb.flgs);
+
+ while (act & ~(ETHR_RWMTX_R_FLG__|ETHR_RWMTX_R_WAIT_FLG__)) {
+ if (scnt == 0) {
+ if (update_spincount(&rwmtx->mtxb, tse, &start_scnt, &scnt)) {
+ event_wait(&rwmtx->mtxb, tse, scnt,
+ ETHR_RWMTX_R_WAIT_FLG__, 1, 1);
+ return; /* Got it */
+ }
+ }
+ ETHR_SPIN_BODY;
+ if (--until_yield == 0) {
+ until_yield = ETHR_YIELD_AFTER_BUSY_LOOPS;
+ ETHR_YIELD();
+ }
+ act = ethr_atomic_read(&rwmtx->mtxb.flgs);
+ scnt--;
+ }
+
+ ETHR_ASSERT(scnt >= 0);
+
+ rwmutex_freqread_rdrs_inc(rwmtx, tse);
+
+ ETHR_MEMORY_BARRIER;
+
+ act = ethr_atomic_read(&rwmtx->mtxb.flgs);
+
+ if (act == ETHR_RWMTX_R_FLG__)
+ return; /* Got it */
+
+ while (1) {
+ long exp, new;
+
+ if (act & ~(ETHR_RWMTX_R_FLG__|ETHR_RWMTX_R_WAIT_FLG__))
+ break; /* Busy (need to restore inc) */
+
+ if (act & ETHR_RWMTX_R_FLG__)
+ return; /* Got it */
+
+ exp = act;
+ new = act | ETHR_RWMTX_R_FLG__;
+ act = ethr_atomic_cmpxchg(&rwmtx->mtxb.flgs, new, exp);
+ if (act == exp)
+ return; /* Got it */
+ }
+ }
+}
+
+static void
+rwmutex_normal_rwlock_wait(ethr_rwmutex *rwmtx, long initial)
+{
+ write_lock_wait(&rwmtx->mtxb, initial, 1, 0);
+}
+
+static void
+rwmutex_freqread_rwlock_wait(ethr_rwmutex *rwmtx, long initial)
+{
+ write_lock_wait(&rwmtx->mtxb, initial, 1, 1);
+}
+
+static ETHR_INLINE void
+rwlock_wake_set_flags(ethr_rwmutex *rwmtx, long new_initial, int act_initial)
+{
+ long act, act_mask;
+ if (rwmtx->type != ETHR_RWMUTEX_TYPE_NORMAL) {
+ /* r pend unlock mask may vary and must be retained */
+ act_mask = ETHR_RWMTX_R_PEND_UNLCK_MASK__;
+ }
+ else {
+#ifdef ETHR_RLOCK_WITH_INC_DEC
+ /* rs mask may vary and must be retained */
+ act_mask = ETHR_RWMTX_RS_MASK__;
+#else
+ /* rs mask always zero */
+ ETHR_ASSERT((act_initial & ETHR_RWMTX_RS_MASK__) == 0);
+ ethr_atomic_set(&rwmtx->mtxb.flgs, new_initial);
+ return;
+#endif
+ }
+
+ act = act_initial;
+ while (1) {
+ long exp = act;
+ long new = new_initial + (act & act_mask);
+ act = ethr_atomic_cmpxchg(&rwmtx->mtxb.flgs, new, exp);
+ if (act == exp)
+ break;
+ exp = act;
+ }
+}
+
+#ifdef ETHR_DEBUG
+
+static void
+dbg_unlock_wake(ethr_rwmutex *rwmtx,
+ int have_w,
+ ethr_ts_event *tse)
+{
+ long exp, act, imask;
+
+ exp = have_w ? ETHR_RWMTX_W_FLG__ : 0;
+
+ if (rwmtx->type != ETHR_RWMUTEX_TYPE_NORMAL)
+ imask = ETHR_RWMTX_R_PEND_UNLCK_MASK__;
+ else {
+#ifdef ETHR_RLOCK_WITH_INC_DEC
+ imask = ETHR_RWMTX_RS_MASK__;
+#else
+ imask = 0;
+#endif
+ }
+
+ ETHR_ASSERT(tse);
+
+ if (is_w_waiter(tse)) {
+
+ exp |= ETHR_RWMTX_W_WAIT_FLG__;
+ if (rwmtx->rq_end) {
+ exp |= ETHR_RWMTX_R_WAIT_FLG__;
+ }
+ act = ethr_atomic_read(&rwmtx->mtxb.flgs);
+ ETHR_ASSERT((exp & ~imask) == (act & ~imask));
+
+ ETHR_RWMTX_HARD_DEBUG_CHK_Q(rwmtx);
+
+ }
+ else {
+
+ exp |= ETHR_RWMTX_R_WAIT_FLG__;
+ if (rwmtx->rq_end->next != rwmtx->mtxb.q)
+ exp |= ETHR_RWMTX_W_WAIT_FLG__;
+ act = ethr_atomic_read(&rwmtx->mtxb.flgs);
+ ETHR_ASSERT((exp & ~imask) == (act & ~imask));
+
+ ETHR_RWMTX_HARD_DEBUG_CHK_Q(rwmtx);
+
+ }
+}
+
+#endif
+
+static void
+rwmutex_unlock_wake(ethr_rwmutex *rwmtx, int have_w, long initial)
+{
+ long new, act = initial;
+ ethr_ts_event *tse;
+
+ if ((act & ETHR_RWMTX_WAIT_FLGS__) == 0) {
+ if (!have_w)
+ return;
+ else {
+ while ((act & ETHR_RWMTX_WAIT_FLGS__) == 0) {
+ long exp = act;
+ new = exp & ~ETHR_RWMTX_W_FLG__;
+ act = ethr_atomic_cmpxchg(&rwmtx->mtxb.flgs, new, exp);
+ if (act == exp)
+ return;
+ }
+ }
+ }
+
+ ETHR_MTX_Q_LOCK(&rwmtx->mtxb.qlck);
+ tse = rwmtx->mtxb.q;
+
+ if (!have_w) {
+ if (!tse) {
+#ifdef ETHR_DEBUG
+ act = ethr_atomic_read(&rwmtx->mtxb.flgs);
+ ETHR_ASSERT((act & ETHR_RWMTX_WAIT_FLGS__) == 0);
+#endif
+ goto already_served;
+ }
+ act = ethr_atomic_read(&rwmtx->mtxb.flgs);
+ if (act & ~ETHR_RWMTX_WAIT_FLGS__) {
+ already_served:
+ ETHR_MTX_Q_UNLOCK(&rwmtx->mtxb.qlck);
+ return;
+ }
+ }
+
+#ifdef ETHR_DEBUG
+ dbg_unlock_wake(rwmtx, have_w, tse);
+#endif
+
+ if (is_w_waiter(tse)) {
+
+ if (!have_w) {
+ act = ethr_atomic_read_bor(&rwmtx->mtxb.flgs,
+ ETHR_RWMTX_W_FLG__);
+ ETHR_ASSERT((act & ~ETHR_RWMTX_WAIT_FLGS__) == 0);
+ ETHR_ASSERT(act & ETHR_RWMTX_W_WAIT_FLG__);
+ act |= ETHR_RWMTX_W_FLG__;
+ }
+
+ /*
+ * If we have multiple write waiters, there
+ * is no need to modify mtxb->flgs; otherwise,
+ * we need to clear the write wait bit...
+ */
+ if (!multiple_w_waiters(rwmtx)) {
+ new = ETHR_RWMTX_W_FLG__;
+ if (tse->next != rwmtx->mtxb.q) {
+ ETHR_ASSERT(tse->next->uflgs == ETHR_RWMTX_R_WAIT_FLG__);
+ new |= ETHR_RWMTX_R_WAIT_FLG__;
+ }
+
+ rwlock_wake_set_flags(rwmtx, new, act);
+ }
+
+ wake_writer(&rwmtx->mtxb, 1);
+ }
+ else {
+ int rs;
+
+ if (rwmtx->type == ETHR_RWMUTEX_TYPE_NORMAL) {
+ rs = rwmtx->tdata.rs;
+ new = (long) rs;
+ rwmtx->tdata.rs = 0;
+ }
+ else {
+ ethr_rwmutex_type type = rwmtx->type;
+ int length = (type == ETHR_RWMUTEX_TYPE_FREQUENT_READ
+ ? reader_groups_array_size
+ : main_threads_array_size);
+ int ix;
+
+ rs = 0;
+ for (ix = 0; ix < length; ix++) {
+ int wrs = rwmtx->tdata.ra[ix].data.waiting_readers;
+ rwmtx->tdata.ra[ix].data.waiting_readers = 0;
+ ETHR_ASSERT(wrs >= 0);
+ if (wrs) {
+ rs += wrs;
+ rwmutex_freqread_rdrs_add(rwmtx, type, ix, wrs);
+ }
+ }
+ new = ETHR_RWMTX_R_FLG__;
+ }
+
+ if (rwmtx->rq_end->next != rwmtx->mtxb.q)
+ new |= ETHR_RWMTX_W_WAIT_FLG__;
+
+ rwlock_wake_set_flags(rwmtx, new, act);
+ wake_readers(rwmtx, rs);
+ }
+}
+
+static ethr_rwmtx_readers_array__ *
+alloc_readers_array(int length, ethr_rwmutex_lived lived)
+{
+ ethr_rwmtx_readers_array__ *ra;
+ size_t sz;
+ void *mem;
+
+ sz = sizeof(ethr_rwmtx_readers_array__) * (length + 1);
+
+ switch (lived) {
+ case ETHR_RWMUTEX_LONG_LIVED:
+ mem = ethr_mem__.ll.alloc(sz);
+ break;
+ case ETHR_RWMUTEX_SHORT_LIVED:
+ mem = ethr_mem__.sl.alloc(sz);
+ break;
+ default:
+ mem = ethr_mem__.std.alloc(sz);
+ break;
+ }
+ if (!mem)
+ return NULL;
+
+ if ((((unsigned long) mem) & ETHR_CACHE_LINE_MASK) == 0) {
+ ra = (ethr_rwmtx_readers_array__ *) mem;
+ ra->data.byte_offset = 0;
+ }
+ else {
+ ra = ((ethr_rwmtx_readers_array__ *)
+ ((((unsigned long) mem) & ~ETHR_CACHE_LINE_MASK)
+ + ETHR_CACHE_LINE_SIZE));
+ ra->data.byte_offset = (int) ((unsigned long) ra
+ - (unsigned long) mem);
+ }
+ ra->data.lived = lived;
+ return ra;
+}
+
+static void
+free_readers_array(ethr_rwmtx_readers_array__ *ra)
+{
+ void *ptr = (void *) (((char *) ra) - ra->data.byte_offset);
+ switch (ra->data.lived) {
+ case ETHR_RWMUTEX_LONG_LIVED:
+ ethr_mem__.ll.free(ptr);
+ break;
+ case ETHR_RWMUTEX_SHORT_LIVED:
+ ethr_mem__.sl.free(ptr);
+ break;
+ default:
+ ethr_mem__.std.free(ptr);
+ break;
+ }
+}
+
+int
+ethr_rwmutex_init_opt(ethr_rwmutex *rwmtx, ethr_rwmutex_opt *opt)
+{
+ int res;
+ ethr_rwmtx_readers_array__ *ra = NULL;
+#if ETHR_XCHK
+ if (ethr_not_completely_inited__) {
+ ETHR_ASSERT(0);
+ return EACCES;
+ }
+ if (!rwmtx) {
+ ETHR_ASSERT(0);
+ return EINVAL;
+ }
+ rwmtx->initialized = ETHR_RWMUTEX_INITIALIZED;
+#endif
+ ETHR_MTX_HARD_DEBUG_FENCE_INIT(rwmtx);
+ rwmtx->rq_end = NULL;
+ rwmtx->type = opt ? opt->type : ETHR_RWMUTEX_TYPE_NORMAL;
+ switch (rwmtx->type) {
+ case ETHR_RWMUTEX_TYPE_FREQUENT_READ:
+ if (main_threads_array_size <= reader_groups_array_size) {
+ /* No point using reader groups... */
+ rwmtx->type = ETHR_RWMUTEX_TYPE_EXTREMELY_FREQUENT_READ;
+ }
+ /* Fall through */
+ case ETHR_RWMUTEX_TYPE_EXTREMELY_FREQUENT_READ: {
+ int length;
+
+ length = (rwmtx->type == ETHR_RWMUTEX_TYPE_EXTREMELY_FREQUENT_READ
+ ? main_threads_array_size
+ : reader_groups_array_size);
+
+ if (length == 1) {
+ /* No point using a frequent reader type... */
+ rwmtx->type = ETHR_RWMUTEX_TYPE_NORMAL;
+ }
+ else {
+ int ix;
+ ra = alloc_readers_array(length,
+ (opt
+ ? opt->lived
+ : ETHR_RWMUTEX_UNKNOWN_LIVED));
+ if (!ra) {
+ res = ENOMEM;
+ goto error;
+ }
+
+ rwmtx->tdata.ra = ra;
+
+ for (ix = 0; ix < length; ix++) {
+ ethr_atomic_init(&rwmtx->tdata.ra[ix].data.readers, 0);
+ rwmtx->tdata.ra[ix].data.waiting_readers = 0;
+ }
+ break;
+ }
+ }
+ case ETHR_RWMUTEX_TYPE_NORMAL:
+ rwmtx->tdata.rs = 0;
+ break;
+ default:
+ res = EINVAL;
+ goto error;
+ }
+ res = mtxb_init(&rwmtx->mtxb,
+ default_rwmtx_main_spincount,
+ opt ? opt->main_spincount : -1,
+ default_rwmtx_aux_spincount,
+ opt ? opt->aux_spincount : -1);
+ if (res == 0)
+ return 0;
+
+ error:
+
+ if (ra)
+ free_readers_array(ra);
+
+#if ETHR_XCHK
+ rwmtx->initialized = 0;
+#endif
+ return res;
+}
+
+int
+ethr_rwmutex_init(ethr_rwmutex *rwmtx)
+{
+ return ethr_rwmutex_init_opt(rwmtx, NULL);
+}
+
+int
+ethr_rwmutex_destroy(ethr_rwmutex *rwmtx)
+{
+ int res;
+#if ETHR_XCHK
+ if (ethr_not_inited__) {
+ ETHR_ASSERT(0);
+ return EACCES;
+ }
+ if (!rwmtx || rwmtx->initialized != ETHR_RWMUTEX_INITIALIZED) {
+ ETHR_ASSERT(0);
+ return EINVAL;
+ }
+#endif
+ if (rwmtx->type != ETHR_RWMUTEX_TYPE_NORMAL) {
+ long act = ethr_atomic_read(&rwmtx->mtxb.flgs);
+ if (act == ETHR_RWMTX_R_FLG__)
+ rwmutex_try_complete_runlock(rwmtx, act, NULL, 0, 0, 0);
+ }
+ res = mtxb_destroy(&rwmtx->mtxb);
+ if (res != 0)
+ return res;
+ if (rwmtx->type != ETHR_RWMUTEX_TYPE_NORMAL)
+ free_readers_array(rwmtx->tdata.ra);
+#if ETHR_XCHK
+ rwmtx->initialized = 0;
+#endif
+ return 0;
+}
+
+#define ETHR_MAX_TRYRLOCK_TRIES 5
+
+int
+ethr_rwmutex_tryrlock(ethr_rwmutex *rwmtx)
+{
+ int res = 0;
+ long act;
+
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(rwmtx);
+ ETHR_ASSERT(rwmtx->initialized == ETHR_RWMUTEX_INITIALIZED);
+
+ ETHR_MTX_HARD_DEBUG_FENCE_CHK(rwmtx);
+
+ switch (rwmtx->type) {
+ case ETHR_RWMUTEX_TYPE_NORMAL: {
+#ifdef ETHR_RLOCK_WITH_INC_DEC
+ act = ethr_atomic_read(&rwmtx->mtxb.flgs);
+ if (act & (ETHR_RWMTX_W_FLG__|ETHR_RWMTX_W_WAIT_FLG__))
+ res = EBUSY;
+ else {
+ act = ethr_atomic_inc_read_acqb(&rwmtx->mtxb.flgs);
+ if (act & (ETHR_RWMTX_W_FLG__|ETHR_RWMTX_W_WAIT_FLG__)) {
+ rwmutex_incdec_restore_failed_tryrlock(rwmtx);
+ res = EBUSY;
+ }
+ }
+#else
+ long exp = 0;
+ int tries = 0;
+
+ while (1) {
+ act = ethr_atomic_cmpxchg_acqb(&rwmtx->mtxb.flgs, exp+1, exp);
+ if (act == exp) {
+ res = 0;
+ break;
+ }
+ if (tries > ETHR_MAX_TRYRLOCK_TRIES
+ || (act & (ETHR_RWMTX_W_FLG__|ETHR_RWMTX_W_WAIT_FLG__))) {
+ res = EBUSY;
+ break;
+ }
+ tries++;
+ exp = act;
+ }
+#endif
+ break;
+ }
+
+ case ETHR_RWMUTEX_TYPE_FREQUENT_READ:
+ case ETHR_RWMUTEX_TYPE_EXTREMELY_FREQUENT_READ: {
+ ethr_ts_event *tse = ethr_get_ts_event();
+
+ rwmutex_freqread_rdrs_inc(rwmtx, tse);
+
+ ETHR_MEMORY_BARRIER;
+
+ act = ethr_atomic_read_acqb(&rwmtx->mtxb.flgs);
+
+ if (act != ETHR_RWMTX_R_FLG__) {
+ while (1) {
+ long exp, new;
+
+ if (act & ~(ETHR_RWMTX_R_FLG__|ETHR_RWMTX_R_WAIT_FLG__)) {
+ rwmutex_freqread_restore_failed_tryrlock(rwmtx, tse);
+ res = EBUSY;
+ break;
+ }
+
+ if (act & ETHR_RWMTX_R_FLG__)
+ break;
+
+ exp = act;
+ new = act | ETHR_RWMTX_R_FLG__;
+ act = ethr_atomic_cmpxchg_acqb(&rwmtx->mtxb.flgs, new, exp);
+ if (act == exp)
+ break;
+ }
+ }
+
+ ethr_leave_ts_event(tse);
+ break;
+ }
+ }
+
+ ETHR_MTX_HARD_DEBUG_LFS_TRYRLOCK(&rwmtx->mtxb, res);
+ ETHR_MTX_HARD_DEBUG_FENCE_CHK(rwmtx);
+
+ return res;
+}
+
+void
+ethr_rwmutex_rlock(ethr_rwmutex *rwmtx)
+{
+ long act;
+
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(rwmtx);
+ ETHR_ASSERT(rwmtx->initialized == ETHR_RWMUTEX_INITIALIZED);
+
+ ETHR_MTX_HARD_DEBUG_FENCE_CHK(rwmtx);
+
+ switch (rwmtx->type) {
+ case ETHR_RWMUTEX_TYPE_NORMAL: {
+#ifdef ETHR_RLOCK_WITH_INC_DEC
+ act = ethr_atomic_inc_read_acqb(&rwmtx->mtxb.flgs);
+ if (act & (ETHR_RWMTX_W_FLG__|ETHR_RWMTX_W_WAIT_FLG__))
+ rwmutex_normal_rlock_wait(rwmtx, act);
+#else
+ long exp = 0;
+
+ while (1) {
+ act = ethr_atomic_cmpxchg_acqb(&rwmtx->mtxb.flgs, exp+1, exp);
+ if (act == exp) {
+ break;
+ }
+
+ if (act & (ETHR_RWMTX_W_FLG__|ETHR_RWMTX_W_WAIT_FLG__)) {
+ rwmutex_normal_rlock_wait(rwmtx, act);
+ break;
+ }
+ exp = act;
+ }
+#endif
+ break;
+ }
+
+ case ETHR_RWMUTEX_TYPE_FREQUENT_READ:
+ case ETHR_RWMUTEX_TYPE_EXTREMELY_FREQUENT_READ: {
+ ethr_ts_event *tse = ethr_get_ts_event();
+
+ rwmutex_freqread_rdrs_inc(rwmtx, tse);
+
+ ETHR_MEMORY_BARRIER;
+
+ act = ethr_atomic_read_acqb(&rwmtx->mtxb.flgs);
+
+ if (act != ETHR_RWMTX_R_FLG__) {
+ while (1) {
+ long exp, new;
+
+ if (act & ~(ETHR_RWMTX_R_FLG__|ETHR_RWMTX_R_WAIT_FLG__)) {
+ rwmutex_freqread_rlock_wait(rwmtx, tse, act);
+ break;
+ }
+
+ if (act & ETHR_RWMTX_R_FLG__)
+ break;
+
+ exp = act;
+ new = act | ETHR_RWMTX_R_FLG__;
+ act = ethr_atomic_cmpxchg_acqb(&rwmtx->mtxb.flgs, new, exp);
+ if (act == exp)
+ break;
+ }
+ }
+
+ ethr_leave_ts_event(tse);
+ break;
+ }
+ }
+
+ ETHR_MTX_HARD_DEBUG_LFS_RLOCK(&rwmtx->mtxb);
+ ETHR_MTX_HARD_DEBUG_FENCE_CHK(rwmtx);
+}
+
+void
+ethr_rwmutex_runlock(ethr_rwmutex *rwmtx)
+{
+ long act;
+
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(rwmtx);
+ ETHR_ASSERT(rwmtx->initialized == ETHR_RWMUTEX_INITIALIZED);
+
+ ETHR_MTX_HARD_DEBUG_FENCE_CHK(rwmtx);
+ ETHR_MTX_HARD_DEBUG_LFS_RUNLOCK(&rwmtx->mtxb);
+
+ switch (rwmtx->type) {
+ case ETHR_RWMUTEX_TYPE_NORMAL:
+ act = ethr_atomic_dec_read_relb(&rwmtx->mtxb.flgs);
+ if ((act & ETHR_RWMTX_WAIT_FLGS__)
+ && (act & ~ETHR_RWMTX_WAIT_FLGS__) == 0) {
+ ETHR_ASSERT((act & ETHR_RWMTX_W_FLG__) == 0);
+ rwmutex_unlock_wake(rwmtx, 0, act);
+ }
+ break;
+
+ case ETHR_RWMUTEX_TYPE_FREQUENT_READ:
+ case ETHR_RWMUTEX_TYPE_EXTREMELY_FREQUENT_READ: {
+ ethr_ts_event *tse = ethr_get_ts_event();
+
+ act = rwmutex_freqread_rdrs_dec_read_relb(rwmtx, tse);
+
+ ETHR_ASSERT(act >= 0);
+
+ ETHR_WRITE_MEMORY_BARRIER;
+
+ if (act == 0) {
+
+#ifndef ETHR_WRITE_MEMORY_BARRIER_IS_FULL
+ ETHR_READ_MEMORY_BARRIER;
+#endif
+ act = ethr_atomic_read(&rwmtx->mtxb.flgs);
+
+ if ((act & ETHR_RWMTX_W_FLG__) == 0
+ && (act & (ETHR_RWMTX_WAIT_FLGS__
+ | ETHR_RWMTX_R_PEND_UNLCK_MASK__))) {
+ rwmutex_try_complete_runlock(rwmtx, act, tse, 1, 0, 0);
+ }
+
+ }
+
+ ethr_leave_ts_event(tse);
+ break;
+ }
+ }
+
+ ETHR_MTX_HARD_DEBUG_FENCE_CHK(rwmtx);
+}
+
+int
+ethr_rwmutex_tryrwlock(ethr_rwmutex *rwmtx)
+{
+ int res = 0;
+ long act;
+
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(rwmtx);
+ ETHR_ASSERT(rwmtx->initialized == ETHR_RWMUTEX_INITIALIZED);
+
+ ETHR_MTX_HARD_DEBUG_FENCE_CHK(rwmtx);
+
+ switch (rwmtx->type) {
+ case ETHR_RWMUTEX_TYPE_NORMAL:
+ act = ethr_atomic_cmpxchg_acqb(&rwmtx->mtxb.flgs,
+ ETHR_RWMTX_W_FLG__, 0);
+ if (act != 0)
+ res = EBUSY;
+ break;
+
+ case ETHR_RWMUTEX_TYPE_FREQUENT_READ:
+ case ETHR_RWMUTEX_TYPE_EXTREMELY_FREQUENT_READ:
+
+ res = 0;
+ act = ethr_atomic_read(&rwmtx->mtxb.flgs);
+
+ do {
+
+ if (act & (ETHR_RWMTX_W_FLG__|ETHR_RWMTX_WAIT_FLGS__)) {
+ res = EBUSY;
+ break;
+ }
+
+ if (act & ETHR_RWMTX_R_MASK__) {
+ res = rwmutex_try_complete_runlock(rwmtx, act, NULL,
+ 0, 1, 1);
+ break;
+ }
+
+ act = ethr_atomic_cmpxchg_acqb(&rwmtx->mtxb.flgs,
+ ETHR_RWMTX_W_FLG__, 0);
+
+ } while (act != 0);
+
+ break;
+ }
+
+ ETHR_MTX_HARD_DEBUG_LFS_TRYRWLOCK(&rwmtx->mtxb, res);
+ ETHR_MTX_HARD_DEBUG_FENCE_CHK(rwmtx);
+
+ return res;
+}
+
+void
+ethr_rwmutex_rwlock(ethr_rwmutex *rwmtx)
+{
+ long act;
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(rwmtx);
+ ETHR_ASSERT(rwmtx->initialized == ETHR_RWMUTEX_INITIALIZED);
+
+ ETHR_MTX_HARD_DEBUG_FENCE_CHK(rwmtx);
+
+ switch (rwmtx->type) {
+ case ETHR_RWMUTEX_TYPE_NORMAL:
+ act = ethr_atomic_cmpxchg_acqb(&rwmtx->mtxb.flgs,
+ ETHR_RWMTX_W_FLG__, 0);
+ if (act != 0)
+ rwmutex_normal_rwlock_wait(rwmtx, act);
+ break;
+
+ case ETHR_RWMUTEX_TYPE_FREQUENT_READ:
+ case ETHR_RWMUTEX_TYPE_EXTREMELY_FREQUENT_READ:
+
+ act = ethr_atomic_read(&rwmtx->mtxb.flgs);
+
+ do {
+
+ if (act != 0) {
+ rwmutex_freqread_rwlock_wait(rwmtx, act);
+ break;
+ }
+
+ act = ethr_atomic_cmpxchg_acqb(&rwmtx->mtxb.flgs,
+ ETHR_RWMTX_W_FLG__, 0);
+
+ } while (act != 0);
+
+ break;
+ }
+
+ ETHR_MTX_HARD_DEBUG_LFS_RWLOCK(&rwmtx->mtxb);
+ ETHR_MTX_HARD_DEBUG_FENCE_CHK(rwmtx);
+
+}
+
+void
+ethr_rwmutex_rwunlock(ethr_rwmutex *rwmtx)
+{
+ long act;
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(rwmtx);
+ ETHR_ASSERT(rwmtx->initialized == ETHR_RWMUTEX_INITIALIZED);
+
+ ETHR_MTX_HARD_DEBUG_FENCE_CHK(rwmtx);
+ ETHR_MTX_HARD_DEBUG_LFS_RWUNLOCK(&rwmtx->mtxb);
+
+ switch (rwmtx->type) {
+ case ETHR_RWMUTEX_TYPE_NORMAL:
+ act = ethr_atomic_cmpxchg_relb(&rwmtx->mtxb.flgs,
+ 0, ETHR_RWMTX_W_FLG__);
+ if (act != ETHR_RWMTX_W_FLG__)
+ rwmutex_unlock_wake(rwmtx, 1, act);
+ break;
+
+ case ETHR_RWMUTEX_TYPE_FREQUENT_READ:
+ case ETHR_RWMUTEX_TYPE_EXTREMELY_FREQUENT_READ:
+ act = ethr_atomic_cmpxchg_relb(&rwmtx->mtxb.flgs, 0,
+ ETHR_RWMTX_W_FLG__);
+ if (act != ETHR_RWMTX_W_FLG__)
+ rwmutex_unlock_wake(rwmtx, 1, act);
+ break;
+ }
+
+ ETHR_MTX_HARD_DEBUG_FENCE_CHK(rwmtx);
+}
+
+#else
+/* -- pthread read/write mutex --------------------------------------------- */
+
+int
+ethr_rwmutex_init(ethr_rwmutex *rwmtx)
+{
+#if ETHR_XCHK
+ if (!rwmtx) {
+ ETHR_ASSERT(0);
+ return EINVAL;
+ }
+ rwmtx->initialized = ETHR_RWMUTEX_INITIALIZED;
+#endif
+ return pthread_rwlock_init(&rwmtx->pt_rwlock, write_pref_attr);
+}
+
+int
+ethr_rwmutex_init_opt(ethr_rwmutex *rwmtx, ethr_rwmutex_opt *opt)
+{
+ return ethr_rwmutex_init(rwmtx);
+}
+
+int
+ethr_rwmutex_destroy(ethr_rwmutex *rwmtx)
+{
+ int res;
+#if ETHR_XCHK
+ if (ethr_not_inited__) {
+ ETHR_ASSERT(0);
+ return EACCES;
+ }
+ if (!rwmtx || rwmtx->initialized != ETHR_RWMUTEX_INITIALIZED) {
+ ETHR_ASSERT(0);
+ return EINVAL;
+ }
+#endif
+ res = pthread_rwlock_destroy(&rwmtx->pt_rwlock);
+#if ETHR_XCHK
+ rwmtx->initialized = 0;
+#endif
+ return res;
+}
+
+/* -- Exported symbols of inline functions --------------------------------- */
+
+int
+ethr_rwmutex_tryrlock(ethr_rwmutex *rwmtx)
+{
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(rwmtx);
+ ETHR_ASSERT(rwmtx->initialized == ETHR_RWMUTEX_INITIALIZED);
+
+ return ethr_rwmutex_tryrlock__(rwmtx);
+}
+
+void
+ethr_rwmutex_rlock(ethr_rwmutex *rwmtx)
+{
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(rwmtx);
+ ETHR_ASSERT(rwmtx->initialized == ETHR_RWMUTEX_INITIALIZED);
+
+ ethr_rwmutex_rlock__(rwmtx);
+}
+
+void
+ethr_rwmutex_runlock(ethr_rwmutex *rwmtx)
+{
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(rwmtx);
+ ETHR_ASSERT(rwmtx->initialized == ETHR_RWMUTEX_INITIALIZED);
+
+ ethr_rwmutex_runlock__(rwmtx);
+}
+
+int
+ethr_rwmutex_tryrwlock(ethr_rwmutex *rwmtx)
+{
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(rwmtx);
+ ETHR_ASSERT(rwmtx->initialized == ETHR_RWMUTEX_INITIALIZED);
+
+ return ethr_rwmutex_tryrwlock__(rwmtx);
+}
+
+void
+ethr_rwmutex_rwlock(ethr_rwmutex *rwmtx)
+{
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(rwmtx);
+ ETHR_ASSERT(rwmtx->initialized == ETHR_RWMUTEX_INITIALIZED);
+
+ return ethr_rwmutex_rwlock__(rwmtx);
+}
+
+void
+ethr_rwmutex_rwunlock(ethr_rwmutex *rwmtx)
+{
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(rwmtx);
+ ETHR_ASSERT(rwmtx->initialized == ETHR_RWMUTEX_INITIALIZED);
+
+ ethr_rwmutex_rwunlock__(rwmtx);
+}
+
+#endif /* pthread */
+
+
+#if defined(ETHR_USE_OWN_RWMTX_IMPL__) || defined(ETHR_USE_OWN_MTX_IMPL__)
+
+#ifdef ETHR_MTX_HARD_DEBUG_Q
+static void
+hard_debug_chk_q__(struct ethr_mutex_base_ *mtxb, int is_rwmtx)
+{
+ int res;
+ long flgs = ethr_atomic_read(&mtxb->flgs);
+
+ ETHR_MTX_HARD_ASSERT(res == 0);
+
+ ETHR_MTX_HARD_ASSERT(!(flgs & ETHR_RWMTX_R_WAIT_FLG__) || is_rwmtx);
+
+ if (!(flgs & ETHR_RWMTX_WAIT_FLGS__)) {
+ ETHR_MTX_HARD_ASSERT(!mtxb->q);
+ if (is_rwmtx) {
+ ethr_rwmutex *rwmtx = (ethr_rwmutex *) mtxb;
+ ETHR_MTX_HARD_ASSERT(!rwmtx->rq_end);
+ ETHR_MTX_HARD_ASSERT(!rwmtx->rs);
+ }
+ }
+ else {
+ ethr_ts_event *tse;
+ int ws = 0, rs = 0, rsf = 0, ref = 0;
+
+ ETHR_MTX_HARD_ASSERT(mtxb->q);
+
+ tse = mtxb->q;
+
+ do {
+ long type;
+
+ ETHR_MTX_HARD_ASSERT(tse->next->prev == tse);
+ ETHR_MTX_HARD_ASSERT(tse->prev->next == tse);
+
+ type = ethr_atomic_read(&tse->uaflgs);
+ ETHR_MTX_HARD_ASSERT(type == tse->uflgs);
+ switch (type) {
+ case ETHR_RWMTX_W_WAIT_FLG__:
+ ws++;
+ break;
+ case ETHR_RWMTX_R_WAIT_FLG__: {
+ ethr_rwmutex *rwmtx = (ethr_rwmutex *) mtxb;
+ ETHR_MTX_HARD_ASSERT(is_rwmtx);
+ if (!rsf)
+ rsf = 1;
+ ETHR_MTX_HARD_ASSERT(!ref);
+ if (rwmtx->rq_end == tse) {
+ ETHR_MTX_HARD_ASSERT(
+ tse->next == rwmtx->mtxb.q
+ || tse->next->uflgs == ETHR_RWMTX_W_WAIT_FLG__);
+ ref = 1;
+ }
+ rs++;
+ break;
+ }
+ default:
+ ETHR_MTX_HARD_ASSERT(! "invalid wait type found");
+ }
+
+ tse = tse->next;
+ } while (tse != mtxb->q);
+
+ if (is_rwmtx) {
+ ethr_rwmutex *rwmtx = (ethr_rwmutex *) mtxb;
+ ETHR_MTX_HARD_ASSERT(rs == rwmtx->rs);
+ }
+
+#ifdef ETHR_MTX_HARD_DEBUG_WSQ
+ ETHR_MTX_HARD_ASSERT(ws == mtxb->ws);
+#endif
+
+ if (flgs & ETHR_RWMTX_W_WAIT_FLG__)
+ ETHR_MTX_HARD_ASSERT(ws);
+ else
+ ETHR_MTX_HARD_ASSERT(!ws);
+
+ if (flgs & ETHR_RWMTX_R_WAIT_FLG__) {
+ ethr_rwmutex *rwmtx = (ethr_rwmutex *) mtxb;
+ ETHR_MTX_HARD_ASSERT(is_rwmtx);
+ ETHR_MTX_HARD_ASSERT(rwmtx->rq_end);
+ ETHR_MTX_HARD_ASSERT(rsf);
+ ETHR_MTX_HARD_ASSERT(ref);
+ ETHR_MTX_HARD_ASSERT(rs);
+ }
+ else {
+ if (is_rwmtx) {
+ ethr_rwmutex *rwmtx = (ethr_rwmutex *) mtxb;
+ ETHR_MTX_HARD_ASSERT(!rwmtx->rq_end);
+ }
+ ETHR_MTX_HARD_ASSERT(!rsf);
+ ETHR_MTX_HARD_ASSERT(!ref);
+ ETHR_MTX_HARD_ASSERT(!rs);
+ }
+ }
+}
+
+#elif defined(ETHR_MTX_HARD_DEBUG_WSQ)
+
+static void
+hard_debug_chk_q__(struct ethr_mutex_base_ *mtxb, int is_rwmtx)
+{
+ int ws = 0;
+ int rs = 0;
+
+ if (mtxb->q) {
+ ethr_ts_event *tse = mtxb->q;
+ do {
+ switch (tse->uflgs) {
+ case ETHR_RWMTX_W_WAIT_FLG__:
+ ws++;
+ break;
+ case ETHR_RWMTX_R_WAIT_FLG__:
+ rs++;
+ break;
+ default:
+ ETHR_MTX_HARD_ASSERT(0);
+ break;
+ }
+ tse = tse->next;
+ } while (tse != mtxb->q);
+ }
+
+ ETHR_MTX_HARD_ASSERT(mtxb->ws == ws);
+ if (is_rwmtx) {
+ ethr_rwmutex *rwmtx = (ethr_rwmutex *) mtxb;
+ ETHR_MTX_HARD_ASSERT(rwmtx->rs == rs);
+ }
+}
+
+#endif
+
+#endif
diff --git a/erts/lib_src/common/ethread.c b/erts/lib_src/common/ethread.c
deleted file mode 100644
index 9c88233934..0000000000
--- a/erts/lib_src/common/ethread.c
+++ /dev/null
@@ -1,3369 +0,0 @@
-/*
- * %CopyrightBegin%
- *
- * Copyright Ericsson AB 2004-2010. All Rights Reserved.
- *
- * The contents of this file are subject to the Erlang Public License,
- * Version 1.1, (the "License"); you may not use this file except in
- * compliance with the License. You should have received a copy of the
- * Erlang Public License along with this software. If not, it can be
- * retrieved online at http://www.erlang.org/.
- *
- * Software distributed under the License is distributed on an "AS IS"
- * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
- * the License for the specific language governing rights and limitations
- * under the License.
- *
- * %CopyrightEnd%
- */
-
-/*
- * Description: A Thread library for use in the ERTS and other OTP
- * applications.
- * Author: Rickard Green
- */
-
-#ifdef HAVE_CONFIG_H
-#include "config.h"
-#endif
-
-#undef ETHR_STACK_GUARD_SIZE
-
-#if defined(ETHR_PTHREADS)
-
-#ifdef ETHR_TIME_WITH_SYS_TIME
-# include <time.h>
-# include <sys/time.h>
-#else
-# ifdef ETHR_HAVE_SYS_TIME_H
-# include <sys/time.h>
-# else
-# include <time.h>
-# endif
-#endif
-#include <sys/types.h>
-#include <unistd.h>
-#include <signal.h>
-
-#ifdef ETHR_HAVE_PTHREAD_ATTR_SETGUARDSIZE
-# define ETHR_STACK_GUARD_SIZE (pagesize)
-#endif
-
-#elif defined(ETHR_WIN32_THREADS)
-
-#undef WIN32_LEAN_AND_MEAN
-#define WIN32_LEAN_AND_MEAN
-#include <windows.h>
-#include <process.h>
-#include <winerror.h>
-
-#else
-#error "Missing thread implementation"
-#endif
-
-#include <limits.h>
-
-#define ETHR_FORCE_INLINE_FUNCS
-#define ETHR_INLINE_FUNC_NAME_(X) X ## __
-#include "ethread.h"
-
-#ifndef ETHR_HAVE_ETHREAD_DEFINES
-#error Missing configure defines
-#endif
-
-/*
- * ----------------------------------------------------------------------------
- * Common stuff
- * ----------------------------------------------------------------------------
- */
-
-#define ETHR_MAX_THREADS 2048 /* Has to be an even power of 2 */
-
-static int ethr_not_inited = 1;
-
-#define ASSERT(A) ETHR_ASSERT((A))
-
-static void *(*allocp)(size_t) = malloc;
-static void *(*reallocp)(void *, size_t) = realloc;
-static void (*freep)(void *) = free;
-
-#ifndef ETHR_HAVE_OPTIMIZED_ATOMIC_OPS
-ethr_atomic_protection_t ethr_atomic_protection__[1 << ETHR_ATOMIC_ADDR_BITS];
-#endif
-
-void *(*thread_create_prepare_func)(void) = NULL;
-void (*thread_create_parent_func)(void *) = NULL;
-void (*thread_create_child_func)(void *) = NULL;
-
-typedef struct ethr_xhndl_list_ ethr_xhndl_list;
-struct ethr_xhndl_list_ {
- ethr_xhndl_list *next;
- void (*funcp)(void);
-};
-
-static size_t pagesize;
-#define ETHR_PAGE_ALIGN(SZ) (((((size_t) (SZ)) - 1)/pagesize + 1)*pagesize)
-static size_t min_stack_size; /* kilo words */
-static size_t max_stack_size; /* kilo words */
-#define ETHR_B2KW(B) ((((size_t) (B)) - 1)/(sizeof(void *)*1024) + 1)
-#define ETHR_KW2B(KW) (((size_t) (KW))*sizeof(void *)*1024)
-
-ethr_mutex xhndl_mtx;
-ethr_xhndl_list *xhndl_list;
-
-static int
-init_common(ethr_init_data *id)
-{
- int res;
- if (id) {
- allocp = id->alloc;
- reallocp = id->realloc;
- freep = id->free;
- thread_create_prepare_func = id->thread_create_prepare_func;
- thread_create_parent_func = id->thread_create_parent_func;
- thread_create_child_func = id->thread_create_child_func;
- }
- if (!allocp || !reallocp || !freep)
- return EINVAL;
-
-#ifdef _SC_PAGESIZE
- pagesize = (size_t) sysconf(_SC_PAGESIZE);
-#elif defined(HAVE_GETPAGESIZE)
- pagesize = (size_t) getpagesize();
-#else
- pagesize = (size_t) 4*1024; /* Guess 4 KB */
-#endif
-
- /* User needs at least 4 KB */
- min_stack_size = 4*1024;
-#if SIZEOF_VOID_P == 8
- /* Double that on 64-bit archs */
- min_stack_size *= 2;
-#endif
- /* On some systems as much as about 4 KB is used by the system */
- min_stack_size += 4*1024;
- /* There should be room for signal handlers */
-#ifdef SIGSTKSZ
- min_stack_size += SIGSTKSZ;
-#else
- min_stack_size += pagesize;
-#endif
- /* The system may think that we need more stack */
-#if defined(PTHREAD_STACK_MIN)
- if (min_stack_size < PTHREAD_STACK_MIN)
- min_stack_size = PTHREAD_STACK_MIN;
-#elif defined(_SC_THREAD_STACK_MIN)
- {
- size_t thr_min_stk_sz = (size_t) sysconf(_SC_THREAD_STACK_MIN);
- if (min_stack_size < thr_min_stk_sz)
- min_stack_size = thr_min_stk_sz;
- }
-#endif
- /* The guard is at least on some platforms included in the stack size
- passed when creating threads */
-#ifdef ETHR_STACK_GUARD_SIZE
- min_stack_size += ETHR_STACK_GUARD_SIZE;
-#endif
- min_stack_size = ETHR_PAGE_ALIGN(min_stack_size);
-
- min_stack_size = ETHR_B2KW(min_stack_size);
-
- max_stack_size = 32*1024*1024;
-#if SIZEOF_VOID_P == 8
- max_stack_size *= 2;
-#endif
- max_stack_size = ETHR_B2KW(max_stack_size);
-
- xhndl_list = NULL;
-
- res = ethr_mutex_init(&xhndl_mtx);
- if (res != 0)
- return res;
-
- res = ethr_mutex_set_forksafe(&xhndl_mtx);
- if (res != 0 && res != ENOTSUP)
- return res;
-
- return 0;
-}
-
-int
-ethr_install_exit_handler(void (*funcp)(void))
-{
- ethr_xhndl_list *xhp;
- int res;
-
-#if ETHR_XCHK
- if (ethr_not_inited) {
- ASSERT(0);
- return EACCES;
- }
-#endif
-
- if (!funcp)
- return EINVAL;
-
- xhp = (ethr_xhndl_list *) (*allocp)(sizeof(ethr_xhndl_list));
- if (!xhp)
- return ENOMEM;
-
- res = ethr_mutex_lock__(&xhndl_mtx);
- if (res != 0) {
- (*freep)((void *) xhp);
- return res;
- }
-
- xhp->funcp = funcp;
- xhp->next = xhndl_list;
- xhndl_list = xhp;
-
- res = ethr_mutex_unlock__(&xhndl_mtx);
- if (res != 0)
- abort();
-
- return res;
-}
-
-static void
-run_exit_handlers(void)
-{
- int res;
- ethr_xhndl_list *xhp;
-
- res = ethr_mutex_lock__(&xhndl_mtx);
- if (res != 0)
- abort();
-
- xhp = xhndl_list;
-
- res = ethr_mutex_unlock__(&xhndl_mtx);
- if (res != 0)
- abort();
-
- for (; xhp; xhp = xhp->next)
- (*xhp->funcp)();
-}
-
-#if defined(ETHR_PTHREADS)
-/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *\
- * pthread implementation *
-\* */
-
-typedef struct {
- pthread_mutex_t mtx;
- pthread_cond_t cnd;
- int initialized;
- void *(*thr_func)(void *);
- void *arg;
- void *prep_func_res;
-} thr_wrap_data_;
-
-static int no_ethreads;
-static ethr_mutex no_ethrs_mtx;
-
-#ifndef ETHR_HAVE_PTHREAD_ATFORK
-#define ETHR_HAVE_PTHREAD_ATFORK 0
-#endif
-
-#if !ETHR_HAVE_PTHREAD_ATFORK
-#warning "Cannot enforce fork-safety"
-#endif
-
-#ifdef ETHR_HAVE_PTHREAD_RWLOCK_INIT
-static pthread_rwlockattr_t write_pref_attr_data;
-static pthread_rwlockattr_t *write_pref_attr;
-#endif
-
-/*
- * ----------------------------------------------------------------------------
- * Static functions
- * ----------------------------------------------------------------------------
- */
-
-/*
- * Functions with safe_ prefix aborts on failure. To be used when
- * we cannot recover after failure.
- */
-
-static ETHR_INLINE void
-safe_mutex_lock(pthread_mutex_t *mtxp)
-{
- int res = pthread_mutex_lock(mtxp);
- if (res != 0)
- abort();
-}
-
-static ETHR_INLINE void
-safe_mutex_unlock(pthread_mutex_t *mtxp)
-{
- int res = pthread_mutex_unlock(mtxp);
- if (res != 0)
- abort();
-}
-
-static ETHR_INLINE void
-safe_cond_signal(pthread_cond_t *cndp)
-{
- int res = pthread_cond_signal(cndp);
- if (res != 0)
- abort();
-}
-
-#ifdef ETHR_HAVE_ETHR_REC_MUTEX_INIT
-
-static volatile int rec_mtx_attr_need_init = 1;
-static pthread_mutexattr_t rec_mtx_attr;
-
-static int init_rec_mtx_attr(void);
-
-#endif
-
-#if ETHR_HAVE_PTHREAD_ATFORK
-
-static ethr_mutex forksafe_mtx = ETHR_MUTEX_INITER;
-
-static void lock_mutexes(void)
-{
- ethr_mutex *m = &forksafe_mtx;
- do {
-
- safe_mutex_lock(&m->pt_mtx);
-
- m = m->next;
-
- } while (m != &forksafe_mtx);
-}
-
-static void unlock_mutexes(void)
-{
- ethr_mutex *m = forksafe_mtx.prev;
- do {
-
- safe_mutex_unlock(&m->pt_mtx);
-
- m = m->prev;
-
- } while (m->next != &forksafe_mtx);
-}
-
-#if ETHR_INIT_MUTEX_IN_CHILD_AT_FORK
-
-static void reinit_mutexes(void)
-{
- ethr_mutex *m = forksafe_mtx.prev;
- do {
- pthread_mutexattr_t *attrp = NULL;
-
-#ifdef ETHR_HAVE_ETHR_REC_MUTEX_INIT
- if (m->is_rec_mtx) {
- if (rec_mtx_attr_need_init) {
- int res = init_rec_mtx_attr();
- if (res != 0)
- abort();
- }
- attrp = &rec_mtx_attr;
- }
-#endif
- if (pthread_mutex_init(&m->pt_mtx, attrp) != 0)
- abort();
-
- m = m->prev;
-
- } while (m->next != &forksafe_mtx);
-}
-
-#endif
-
-static int
-init_forksafe(void)
-{
- static int init_done = 0;
- int res = 0;
-
- if (init_done)
- return res;
-
- forksafe_mtx.prev = &forksafe_mtx;
- forksafe_mtx.next = &forksafe_mtx;
-
- res = pthread_atfork(lock_mutexes,
- unlock_mutexes,
-#if ETHR_INIT_MUTEX_IN_CHILD_AT_FORK
- reinit_mutexes
-#else
- unlock_mutexes
-#endif
- );
-
- init_done = 1;
- return res;
-}
-
-#endif
-
-
-#ifdef ETHR_HAVE_ETHR_REC_MUTEX_INIT
-
-#if defined(ETHR_HAVE_PTHREAD_MUTEXATTR_SETTYPE)
-
-#define SET_REC_MUTEX_ATTR(AP) \
- pthread_mutexattr_settype((AP), PTHREAD_MUTEX_RECURSIVE);
-
-#elif defined(ETHR_HAVE_PTHREAD_MUTEXATTR_SETKIND_NP)
-
-#define SET_REC_MUTEX_ATTR(AP) \
- pthread_mutexattr_setkind_np((AP), PTHREAD_MUTEX_RECURSIVE_NP);
-
-#else
-
-#error "Don't know how to set recursive mutex attributes"
-
-#endif
-
-static int
-init_rec_mtx_attr(void)
-{
- int res, mres;
- static pthread_mutex_t attrinit_mtx = PTHREAD_MUTEX_INITIALIZER;
-
- mres = pthread_mutex_lock(&attrinit_mtx);
- if (mres != 0)
- return mres;
- /* Got here under race conditions; check again ... */
- if (!rec_mtx_attr_need_init)
- res = 0;
- else {
- res = pthread_mutexattr_init(&rec_mtx_attr);
- if (res == 0) {
- res = SET_REC_MUTEX_ATTR(&rec_mtx_attr);
- if (res == 0)
- rec_mtx_attr_need_init = 0;
- else
- (void) pthread_mutexattr_destroy(&rec_mtx_attr);
- }
- }
-
- mres = pthread_mutex_unlock(&attrinit_mtx);
- if (mres != 0)
- return mres;
- return res;
-}
-
-#endif /* #if ETHR_HAVE_ETHR_REC_MUTEX_INIT */
-
-static ETHR_INLINE void thr_exit_cleanup(void)
-{
- run_exit_handlers();
- safe_mutex_lock(&no_ethrs_mtx.pt_mtx);
- ASSERT(no_ethreads > 0);
- no_ethreads--;
- safe_mutex_unlock(&no_ethrs_mtx.pt_mtx);
-}
-
-static void *thr_wrapper(void *vtwd)
-{
- void *res;
- thr_wrap_data_ *twd = (thr_wrap_data_ *) vtwd;
- void *(*thr_func)(void *) = twd->thr_func;
- void *arg = twd->arg;
-
- safe_mutex_lock(&twd->mtx);
-
- if (thread_create_child_func)
- (*thread_create_child_func)(twd->prep_func_res);
-
- twd->initialized = 1;
-
- safe_cond_signal(&twd->cnd);
- safe_mutex_unlock(&twd->mtx);
-
- res = (*thr_func)(arg);
- thr_exit_cleanup();
- return res;
-}
-
-
-/*
- * ----------------------------------------------------------------------------
- * Exported functions
- * ----------------------------------------------------------------------------
- */
-
-int
-ethr_init(ethr_init_data *id)
-{
- int res;
-
- if (!ethr_not_inited)
- return EINVAL;
-
- ethr_not_inited = 0;
-
- res = init_common(id);
- if (res != 0)
- goto error;
-
-#if ETHR_HAVE_PTHREAD_ATFORK
- init_forksafe();
-#endif
-
- no_ethreads = 1;
- res = ethr_mutex_init(&no_ethrs_mtx);
- if (res != 0)
- goto error;
- res = ethr_mutex_set_forksafe(&no_ethrs_mtx);
- if (res != 0 && res != ENOTSUP)
- goto error;
-
-#ifndef ETHR_HAVE_OPTIMIZED_ATOMIC_OPS
- {
- int i;
- for (i = 0; i < (1 << ETHR_ATOMIC_ADDR_BITS); i++) {
-#ifdef ETHR_HAVE_PTHREAD_SPIN_LOCK
- res = pthread_spin_init(&ethr_atomic_protection__[i].u.spnlck, 0);
-#else
- res = ethr_mutex_init(&ethr_atomic_protection__[i].u.mtx);
-#endif
- if (res != 0)
- goto error;
- }
- }
-#endif
-
-#ifdef ETHR_HAVE_PTHREAD_RWLOCK_INIT
-#if defined(ETHR_HAVE_PTHREAD_RWLOCKATTR_SETKIND_NP) \
- && defined(ETHR_HAVE_PTHREAD_RWLOCK_PREFER_WRITER_NONRECURSIVE_NP)
- res = pthread_rwlockattr_init(&write_pref_attr_data);
- if (res != 0)
- goto error;
- res = pthread_rwlockattr_setkind_np(
- &write_pref_attr_data,
- PTHREAD_RWLOCK_PREFER_WRITER_NONRECURSIVE_NP);
- if (res != 0)
- goto error;
- write_pref_attr = &write_pref_attr_data;
-#else
- write_pref_attr = NULL;
-#endif
-#endif
-
-
- return 0;
-
- error:
- ethr_not_inited = 1;
- return res;
-
-}
-
-int
-ethr_thr_create(ethr_tid *tid, void * (*func)(void *), void *arg,
- ethr_thr_opts *opts)
-{
- thr_wrap_data_ twd;
- pthread_attr_t attr;
- int res, dres;
- int use_stack_size = (opts && opts->suggested_stack_size >= 0
- ? opts->suggested_stack_size
- : -1 /* Use system default */);
-
-#ifdef ETHR_MODIFIED_DEFAULT_STACK_SIZE
- if (use_stack_size < 0)
- use_stack_size = ETHR_MODIFIED_DEFAULT_STACK_SIZE;
-#endif
-
- twd.initialized = 0;
- twd.thr_func = func;
- twd.arg = arg;
-
-#if ETHR_XCHK
- if (ethr_not_inited) {
- ASSERT(0);
- return EACCES;
- }
- if (!tid || !func) {
- ASSERT(0);
- return EINVAL;
- }
-#endif
-
- /* Call prepare func if it exist */
- if (thread_create_prepare_func)
- twd.prep_func_res = (*thread_create_prepare_func)();
- else
- twd.prep_func_res = NULL;
-
- /* Set som thread attributes */
- res = pthread_attr_init(&attr);
- if (res != 0)
- goto cleanup_parent_func;
- res = pthread_mutex_init(&twd.mtx, NULL);
- if (res != 0)
- goto cleanup_attr_destroy;
- res = pthread_cond_init(&twd.cnd, NULL);
- if (res != 0)
- goto cleanup_mutex_destroy;
-
- /* Schedule child thread in system scope (if possible) ... */
- res = pthread_attr_setscope(&attr, PTHREAD_SCOPE_SYSTEM);
- if (res != 0 && res != ENOTSUP)
- goto cleanup_cond_destroy;
-
- if (use_stack_size >= 0) {
- size_t suggested_stack_size = (size_t) use_stack_size;
- size_t stack_size;
-#ifdef DEBUG
- suggested_stack_size /= 2; /* Make sure we got margin */
-#endif
-#ifdef ETHR_STACK_GUARD_SIZE
- /* The guard is at least on some platforms included in the stack size
- passed when creating threads */
- suggested_stack_size += ETHR_B2KW(ETHR_STACK_GUARD_SIZE);
-#endif
- if (suggested_stack_size < min_stack_size)
- stack_size = ETHR_KW2B(min_stack_size);
- else if (suggested_stack_size > max_stack_size)
- stack_size = ETHR_KW2B(max_stack_size);
- else
- stack_size = ETHR_PAGE_ALIGN(ETHR_KW2B(suggested_stack_size));
- (void) pthread_attr_setstacksize(&attr, stack_size);
- }
-
-#ifdef ETHR_STACK_GUARD_SIZE
- (void) pthread_attr_setguardsize(&attr, ETHR_STACK_GUARD_SIZE);
-#endif
-
- /* Detached or joinable... */
- res = pthread_attr_setdetachstate(&attr,
- (opts && opts->detached
- ? PTHREAD_CREATE_DETACHED
- : PTHREAD_CREATE_JOINABLE));
- if (res != 0)
- goto cleanup_cond_destroy;
-
- res = pthread_mutex_lock(&twd.mtx);
-
- if (res != 0)
- goto cleanup_cond_destroy;
-
- safe_mutex_lock(&no_ethrs_mtx.pt_mtx);
- if (no_ethreads < ETHR_MAX_THREADS) {
- no_ethreads++;
- safe_mutex_unlock(&no_ethrs_mtx.pt_mtx);
- }
- else {
- res = EAGAIN;
- safe_mutex_unlock(&no_ethrs_mtx.pt_mtx);
- goto cleanup_mutex_unlock;
- }
-
- res = pthread_create((pthread_t *) tid, &attr, thr_wrapper, (void *) &twd);
-
- if (res != 0) {
- safe_mutex_lock(&no_ethrs_mtx.pt_mtx);
- ASSERT(no_ethreads > 0);
- no_ethreads--;
- safe_mutex_unlock(&no_ethrs_mtx.pt_mtx);
- }
- else {
-
- /* Wait for child to initialize... */
- while (!twd.initialized) {
- res = pthread_cond_wait(&twd.cnd, &twd.mtx);
- if (res != 0 && res != EINTR)
- break;
- }
-
- }
-
- /* Cleanup... */
- cleanup_mutex_unlock:
- dres = pthread_mutex_unlock(&twd.mtx);
- if (res == 0)
- res = dres;
- cleanup_cond_destroy:
- dres = pthread_cond_destroy(&twd.cnd);
- if (res == 0)
- res = dres;
- cleanup_mutex_destroy:
- dres = pthread_mutex_destroy(&twd.mtx);
- if (res == 0)
- res = dres;
- cleanup_attr_destroy:
- dres = pthread_attr_destroy(&attr);
- if (res == 0)
- res = dres;
- cleanup_parent_func:
- if (thread_create_parent_func)
- (*thread_create_parent_func)(twd.prep_func_res);
-
- return res;
-}
-
-int
-ethr_thr_join(ethr_tid tid, void **res)
-{
-#if ETHR_XCHK
- if (ethr_not_inited) {
- ASSERT(0);
- return EACCES;
- }
-#endif
- return pthread_join((pthread_t) tid, res);
-}
-
-int
-ethr_thr_detach(ethr_tid tid)
-{
-#if ETHR_XCHK
- if (ethr_not_inited) {
- ASSERT(0);
- return EACCES;
- }
-#endif
- return pthread_detach((pthread_t) tid);
-}
-
-void
-ethr_thr_exit(void *res)
-{
-#if ETHR_XCHK
- if (ethr_not_inited) {
- ASSERT(0);
- return;
- }
-#endif
- thr_exit_cleanup();
- pthread_exit(res);
-}
-
-ethr_tid
-ethr_self(void)
-{
- return (ethr_tid) pthread_self();
-}
-
-int
-ethr_equal_tids(ethr_tid tid1, ethr_tid tid2)
-{
- return pthread_equal((pthread_t) tid1, (pthread_t) tid2);
-}
-
-
-/*
- * Mutex functions
- */
-
-
-int
-ethr_mutex_init(ethr_mutex *mtx)
-{
-#if ETHR_XCHK
- if (ethr_not_inited) {
- ASSERT(0);
- return EACCES;
- }
- if (!mtx) {
- ASSERT(0);
- return EINVAL;
- }
- mtx->initialized = ETHR_MUTEX_INITIALIZED;
-#endif
- mtx->prev = NULL;
- mtx->next = NULL;
- mtx->is_rec_mtx = 0;
- return pthread_mutex_init(&mtx->pt_mtx, NULL);
-}
-
-#ifdef ETHR_HAVE_ETHR_REC_MUTEX_INIT
-
-int
-ethr_rec_mutex_init(ethr_mutex *mtx)
-{
-#if ETHR_XCHK
- if (ethr_not_inited) {
- ASSERT(0);
- return EACCES;
- }
- if (!mtx) {
- ASSERT(0);
- return EINVAL;
- }
- mtx->initialized = ETHR_MUTEX_INITIALIZED;
-#endif
- if (rec_mtx_attr_need_init)
- init_rec_mtx_attr();
-
- mtx->prev = NULL;
- mtx->next = NULL;
- mtx->is_rec_mtx = 1;
- return pthread_mutex_init(&mtx->pt_mtx, &rec_mtx_attr);
-}
-
-#endif /* #if ETHR_HAVE_ETHR_REC_MUTEX_INIT */
-
-int
-ethr_mutex_destroy(ethr_mutex *mtx)
-{
-#if ETHR_XCHK
- if (ethr_not_inited) {
- ASSERT(0);
- return EACCES;
- }
- if (!mtx || mtx->initialized != ETHR_MUTEX_INITIALIZED) {
- ASSERT(0);
- return EINVAL;
- }
-#endif
- if (mtx->next) {
- ASSERT(mtx->prev);
- ethr_mutex_unset_forksafe(mtx);
- }
-#if ETHR_XCHK
- mtx->initialized = 0;
-#endif
- return pthread_mutex_destroy(&mtx->pt_mtx);
-}
-
-int ethr_mutex_set_forksafe(ethr_mutex *mtx)
-{
- int res;
-#if ETHR_XCHK
- if (ethr_not_inited) {
- ASSERT(0);
- return EACCES;
- }
- if (!mtx || mtx->initialized != ETHR_MUTEX_INITIALIZED) {
- ASSERT(0);
- return EINVAL;
- }
-#endif
-#if ETHR_HAVE_PTHREAD_ATFORK
- res = pthread_mutex_lock(&forksafe_mtx.pt_mtx);
- if (res != 0)
- return res;
- if (!forksafe_mtx.next) {
- ASSERT(!forksafe_mtx.prev);
- init_forksafe();
- }
- if (mtx->next) {
- /* forksafe already set for this mutex */
- ASSERT(mtx->prev);
- }
- else {
- mtx->next = forksafe_mtx.next;
- mtx->prev = &forksafe_mtx;
- forksafe_mtx.next->prev = mtx;
- forksafe_mtx.next = mtx;
- }
-
- res = pthread_mutex_unlock(&forksafe_mtx.pt_mtx);
-
-#else /* #if ETHR_HAVE_PTHREAD_ATFORK */
- res = ENOTSUP;
-#endif /* #if ETHR_HAVE_PTHREAD_ATFORK */
- return res;
-}
-
-int ethr_mutex_unset_forksafe(ethr_mutex *mtx)
-{
- int res;
-#if ETHR_XCHK
- if (ethr_not_inited) {
- ASSERT(0);
- return EACCES;
- }
- if (!mtx || mtx->initialized != ETHR_MUTEX_INITIALIZED) {
- ASSERT(0);
- return EINVAL;
- }
-#endif
-#if ETHR_HAVE_PTHREAD_ATFORK
- res = pthread_mutex_lock(&forksafe_mtx.pt_mtx);
- if (res != 0)
- return res;
- if (!forksafe_mtx.next) {
- ASSERT(!forksafe_mtx.prev);
- init_forksafe();
- }
- if (!mtx->next) {
- /* forksafe already unset for this mutex */
- ASSERT(!mtx->prev);
- }
- else {
- mtx->prev->next = mtx->next;
- mtx->next->prev = mtx->prev;
- mtx->next = NULL;
- mtx->prev = NULL;
- }
- res = pthread_mutex_unlock(&forksafe_mtx.pt_mtx);
-
-#else /* #if ETHR_HAVE_PTHREAD_ATFORK */
- res = ENOTSUP;
-#endif /* #if ETHR_HAVE_PTHREAD_ATFORK */
- return res;
-}
-
-int
-ethr_mutex_trylock(ethr_mutex *mtx)
-{
-#if ETHR_XCHK
- if (ethr_not_inited) {
- ASSERT(0);
- return EACCES;
- }
- if (!mtx || mtx->initialized != ETHR_MUTEX_INITIALIZED) {
- ASSERT(0);
- return EINVAL;
- }
-#endif
- return ethr_mutex_trylock__(mtx);
-}
-
-int
-ethr_mutex_lock(ethr_mutex *mtx)
-{
-#if ETHR_XCHK
- if (ethr_not_inited) {
- ASSERT(0);
- return EACCES;
- }
- if (!mtx || mtx->initialized != ETHR_MUTEX_INITIALIZED) {
- ASSERT(0);
- return EINVAL;
- }
-#endif
- return ethr_mutex_lock__(mtx);
-}
-
-int
-ethr_mutex_unlock(ethr_mutex *mtx)
-{
-#if ETHR_XCHK
- if (ethr_not_inited) {
- ASSERT(0);
- return EACCES;
- }
- if (!mtx || mtx->initialized != ETHR_MUTEX_INITIALIZED) {
- ASSERT(0);
- return EINVAL;
- }
-#endif
- return ethr_mutex_unlock__(mtx);
-}
-
-/*
- * Condition variable functions
- */
-
-int
-ethr_cond_init(ethr_cond *cnd)
-{
-#if ETHR_XCHK
- if (ethr_not_inited) {
- ASSERT(0);
- return EACCES;
- }
- if (!cnd) {
- ASSERT(0);
- return EINVAL;
- }
- cnd->initialized = ETHR_COND_INITIALIZED;
-#endif
- return pthread_cond_init(&cnd->pt_cnd, NULL);
-}
-
-int
-ethr_cond_destroy(ethr_cond *cnd)
-{
-#if ETHR_XCHK
- if (ethr_not_inited) {
- ASSERT(0);
- return EACCES;
- }
- if (!cnd || cnd->initialized != ETHR_COND_INITIALIZED) {
- ASSERT(0);
- return EINVAL;
- }
- cnd->initialized = 0;
-#endif
- return pthread_cond_destroy(&cnd->pt_cnd);
-}
-
-int
-ethr_cond_signal(ethr_cond *cnd)
-{
-#if ETHR_XCHK
- if (ethr_not_inited) {
- ASSERT(0);
- return EACCES;
- }
- if (!cnd || cnd->initialized != ETHR_COND_INITIALIZED) {
- ASSERT(0);
- return EINVAL;
- }
-#endif
- return pthread_cond_signal(&cnd->pt_cnd);
-}
-
-int
-ethr_cond_broadcast(ethr_cond *cnd)
-{
-#if ETHR_XCHK
- if (ethr_not_inited) {
- ASSERT(0);
- return EACCES;
- }
- if (!cnd || cnd->initialized != ETHR_COND_INITIALIZED) {
- ASSERT(0);
- return EINVAL;
- }
-#endif
- return pthread_cond_broadcast(&cnd->pt_cnd);
-}
-
-int
-ethr_cond_wait(ethr_cond *cnd, ethr_mutex *mtx)
-{
-#if ETHR_XCHK
- if (ethr_not_inited) {
- ASSERT(0);
- return EACCES;
- }
- if (!cnd
- || cnd->initialized != ETHR_COND_INITIALIZED
- || !mtx
- || mtx->initialized != ETHR_MUTEX_INITIALIZED) {
- ASSERT(0);
- return EINVAL;
- }
-#endif
- return pthread_cond_wait(&cnd->pt_cnd, &mtx->pt_mtx);
-}
-
-int
-ethr_cond_timedwait(ethr_cond *cnd, ethr_mutex *mtx, ethr_timeval *timeout)
-{
- struct timespec to;
-#if ETHR_XCHK
- if (ethr_not_inited) {
- ASSERT(0);
- return EACCES;
- }
- if (!cnd
- || cnd->initialized != ETHR_COND_INITIALIZED
- || !mtx
- || mtx->initialized != ETHR_MUTEX_INITIALIZED
- || !timeout) {
- ASSERT(0);
- return EINVAL;
- }
-#endif
-
- to.tv_sec = timeout->tv_sec;
- to.tv_nsec = timeout->tv_nsec;
-
- return pthread_cond_timedwait(&cnd->pt_cnd, &mtx->pt_mtx, &to);
-}
-
-
-#ifdef ETHR_HAVE_PTHREAD_RWLOCK_INIT
-
-int
-ethr_rwmutex_init(ethr_rwmutex *rwmtx)
-{
-#if ETHR_XCHK
- if (ethr_not_inited) {
- ASSERT(0);
- return EACCES;
- }
- if (!rwmtx) {
- ASSERT(0);
- return EINVAL;
- }
- rwmtx->initialized = ETHR_RWMUTEX_INITIALIZED;
-#endif
- return pthread_rwlock_init(&rwmtx->pt_rwlock, write_pref_attr);
-}
-
-int
-ethr_rwmutex_destroy(ethr_rwmutex *rwmtx)
-{
- int res;
-#if ETHR_XCHK
- if (ethr_not_inited) {
- ASSERT(0);
- return EACCES;
- }
- if (!rwmtx || rwmtx->initialized != ETHR_RWMUTEX_INITIALIZED) {
- ASSERT(0);
- return EINVAL;
- }
-#endif
- res = pthread_rwlock_destroy(&rwmtx->pt_rwlock);
-#if ETHR_XCHK
- rwmtx->initialized = 0;
-#endif
- return res;
-}
-
-int
-ethr_rwmutex_tryrlock(ethr_rwmutex *rwmtx)
-{
-#if ETHR_XCHK
- if (ethr_not_inited) {
- ASSERT(0);
- return EACCES;
- }
- if (!rwmtx || rwmtx->initialized != ETHR_RWMUTEX_INITIALIZED) {
- ASSERT(0);
- return EINVAL;
- }
-#endif
- return ethr_rwmutex_tryrlock__(rwmtx);
-}
-
-int
-ethr_rwmutex_rlock(ethr_rwmutex *rwmtx)
-{
-#if ETHR_XCHK
- if (ethr_not_inited) {
- ASSERT(0);
- return EACCES;
- }
- if (!rwmtx || rwmtx->initialized != ETHR_RWMUTEX_INITIALIZED) {
- ASSERT(0);
- return EINVAL;
- }
-#endif
- return ethr_rwmutex_rlock__(rwmtx);
-}
-
-int
-ethr_rwmutex_runlock(ethr_rwmutex *rwmtx)
-{
-#if ETHR_XCHK
- if (ethr_not_inited) {
- ASSERT(0);
- return EACCES;
- }
- if (!rwmtx || rwmtx->initialized != ETHR_RWMUTEX_INITIALIZED) {
- ASSERT(0);
- return EINVAL;
- }
-#endif
- return ethr_rwmutex_runlock__(rwmtx);
-}
-
-int
-ethr_rwmutex_tryrwlock(ethr_rwmutex *rwmtx)
-{
-#if ETHR_XCHK
- if (ethr_not_inited) {
- ASSERT(0);
- return EACCES;
- }
- if (!rwmtx || rwmtx->initialized != ETHR_RWMUTEX_INITIALIZED) {
- ASSERT(0);
- return EINVAL;
- }
-#endif
- return ethr_rwmutex_tryrwlock__(rwmtx);
-}
-
-int
-ethr_rwmutex_rwlock(ethr_rwmutex *rwmtx)
-{
-#if ETHR_XCHK
- if (ethr_not_inited) {
- ASSERT(0);
- return EACCES;
- }
- if (!rwmtx || rwmtx->initialized != ETHR_RWMUTEX_INITIALIZED) {
- ASSERT(0);
- return EINVAL;
- }
-#endif
- return ethr_rwmutex_rwlock__(rwmtx);
-}
-
-int
-ethr_rwmutex_rwunlock(ethr_rwmutex *rwmtx)
-{
-#if ETHR_XCHK
- if (ethr_not_inited) {
- ASSERT(0);
- return EACCES;
- }
- if (!rwmtx || rwmtx->initialized != ETHR_RWMUTEX_INITIALIZED) {
- ASSERT(0);
- return EINVAL;
- }
-#endif
- return ethr_rwmutex_rwunlock__(rwmtx);
-}
-
-#endif /* #ifdef ETHR_HAVE_PTHREAD_RWLOCK_INIT */
-
-/*
- * Current time
- */
-
-int
-ethr_time_now(ethr_timeval *time)
-{
- int res;
- struct timeval tv;
-#if ETHR_XCHK
- if (ethr_not_inited) {
- ASSERT(0);
- return EACCES;
- }
- if (!time) {
- ASSERT(0);
- return EINVAL;
- }
-#endif
-
- res = gettimeofday(&tv, NULL);
- time->tv_sec = (long) tv.tv_sec;
- time->tv_nsec = ((long) tv.tv_usec)*1000;
- return res;
-}
-
-/*
- * Thread specific data
- */
-
-int
-ethr_tsd_key_create(ethr_tsd_key *keyp)
-{
-#if ETHR_XCHK
- if (ethr_not_inited) {
- ASSERT(0);
- return EACCES;
- }
- if (!keyp) {
- ASSERT(0);
- return EINVAL;
- }
-#endif
- return pthread_key_create((pthread_key_t *) keyp, NULL);
-}
-
-int
-ethr_tsd_key_delete(ethr_tsd_key key)
-{
-#if ETHR_XCHK
- if (ethr_not_inited) {
- ASSERT(0);
- return EACCES;
- }
-#endif
- return pthread_key_delete((pthread_key_t) key);
-}
-
-int
-ethr_tsd_set(ethr_tsd_key key, void *value)
-{
-#if ETHR_XCHK
- if (ethr_not_inited) {
- ASSERT(0);
- return EACCES;
- }
-#endif
- return pthread_setspecific((pthread_key_t) key, value);
-}
-
-void *
-ethr_tsd_get(ethr_tsd_key key)
-{
-#if ETHR_XCHK
- if (ethr_not_inited) {
- ASSERT(0);
- return NULL;
- }
-#endif
- return pthread_getspecific((pthread_key_t) key);
-}
-
-/*
- * Signal functions
- */
-
-#if ETHR_HAVE_ETHR_SIG_FUNCS
-
-int ethr_sigmask(int how, const sigset_t *set, sigset_t *oset)
-{
-#if ETHR_XCHK
- if (ethr_not_inited) {
- ASSERT(0);
- return EACCES;
- }
- if (!set && !oset) {
- ASSERT(0);
- return EINVAL;
- }
-#endif
- return pthread_sigmask(how, set, oset);
-}
-
-int ethr_sigwait(const sigset_t *set, int *sig)
-{
-#if ETHR_XCHK
- if (ethr_not_inited) {
- ASSERT(0);
- return EACCES;
- }
- if (!set || !sig) {
- ASSERT(0);
- return EINVAL;
- }
-#endif
- if (sigwait(set, sig) < 0)
- return errno;
- return 0;
-}
-
-#endif /* #if ETHR_HAVE_ETHR_SIG_FUNCS */
-
-#elif defined(ETHR_WIN32_THREADS)
-/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *\
- * Native win32 threads implementation *
-\* */
-
-#define INVALID_TID -1
-
-/* The spin count values are more or less taken out of the blue */
-#define ETHR_MUTEX_SPIN_COUNT 5000
-#define ETHR_COND_SPIN_COUNT 1000
-
-ethr_tid serial_shift; /* Bits to shift serial when constructing a tid */
-ethr_tid last_serial; /* Last thread table serial used */
-ethr_tid last_ix; /* Last thread table index used */
-ethr_tid thr_ix_mask; /* Mask used to mask out thread table index from a tid */
-
-/* Event used for conditional variables. On per thread. */
-/*typedef struct cnd_wait_event__ cnd_wait_event_;*/
-struct cnd_wait_event__ {
- HANDLE handle;
- cnd_wait_event_ *prev;
- cnd_wait_event_ *next;
- int in_queue;
-};
-
-/* Thread specific data. Stored in the thread table */
-typedef struct {
- ethr_tid thr_id;
- HANDLE thr_handle;
- ethr_tid joiner;
- void *result;
- cnd_wait_event_ wait_event;
-} thr_data_;
-
-/* Argument passed to thr_wrapper() */
-typedef struct {
- void * (*func)(void *);
- void * arg;
- thr_data_ *ptd;
- thr_data_ *td;
- int res;
- void *prep_func_res;
-} thr_wrap_data_;
-
-
-static CRITICAL_SECTION thr_table_cs; /* Critical section used to protect
- the thread table from concurrent
- accesses. */
-static CRITICAL_SECTION fake_static_init_cs; /* Critical section used to protect
- initialazition of 'statically
- initialized' mutexes */
-static thr_data_ * thr_table[ETHR_MAX_THREADS]; /* The thread table */
-
-static DWORD tls_own_thr_data;
-
-static thr_data_ main_thr_data;
-
-#define THR_IX(TID) ((TID) & thr_ix_mask)
-#define OWN_THR_DATA ((thr_data_ *) TlsGetValue(tls_own_thr_data))
-
-/*
- * ----------------------------------------------------------------------------
- * Static functions
- * ----------------------------------------------------------------------------
- */
-
-static int
-get_errno(void)
-{
- switch (GetLastError()) {
- case ERROR_INVALID_FUNCTION: return EINVAL; /* 1 */
- case ERROR_FILE_NOT_FOUND: return ENOENT; /* 2 */
- case ERROR_PATH_NOT_FOUND: return ENOENT; /* 3 */
- case ERROR_TOO_MANY_OPEN_FILES: return EMFILE; /* 4 */
- case ERROR_ACCESS_DENIED: return EACCES; /* 5 */
- case ERROR_INVALID_HANDLE: return EBADF; /* 6 */
- case ERROR_ARENA_TRASHED: return ENOMEM; /* 7 */
- case ERROR_NOT_ENOUGH_MEMORY: return ENOMEM; /* 8 */
- case ERROR_INVALID_BLOCK: return ENOMEM; /* 9 */
- case ERROR_BAD_ENVIRONMENT: return E2BIG; /* 10 */
- case ERROR_BAD_FORMAT: return ENOEXEC; /* 11 */
- case ERROR_INVALID_ACCESS: return EINVAL; /* 12 */
- case ERROR_INVALID_DATA: return EINVAL; /* 13 */
- case ERROR_OUTOFMEMORY: return ENOMEM; /* 14 */
- case ERROR_INVALID_DRIVE: return ENOENT; /* 15 */
- case ERROR_CURRENT_DIRECTORY: return EACCES; /* 16 */
- case ERROR_NOT_SAME_DEVICE: return EXDEV; /* 17 */
- case ERROR_NO_MORE_FILES: return ENOENT; /* 18 */
- case ERROR_WRITE_PROTECT: return EACCES; /* 19 */
- case ERROR_BAD_UNIT: return EACCES; /* 20 */
- case ERROR_NOT_READY: return EACCES; /* 21 */
- case ERROR_BAD_COMMAND: return EACCES; /* 22 */
- case ERROR_CRC: return EACCES; /* 23 */
- case ERROR_BAD_LENGTH: return EACCES; /* 24 */
- case ERROR_SEEK: return EACCES; /* 25 */
- case ERROR_NOT_DOS_DISK: return EACCES; /* 26 */
- case ERROR_SECTOR_NOT_FOUND: return EACCES; /* 27 */
- case ERROR_OUT_OF_PAPER: return EACCES; /* 28 */
- case ERROR_WRITE_FAULT: return EACCES; /* 29 */
- case ERROR_READ_FAULT: return EACCES; /* 30 */
- case ERROR_GEN_FAILURE: return EACCES; /* 31 */
- case ERROR_SHARING_VIOLATION: return EACCES; /* 32 */
- case ERROR_LOCK_VIOLATION: return EACCES; /* 33 */
- case ERROR_WRONG_DISK: return EACCES; /* 34 */
- case ERROR_SHARING_BUFFER_EXCEEDED: return EACCES; /* 36 */
- case ERROR_BAD_NETPATH: return ENOENT; /* 53 */
- case ERROR_NETWORK_ACCESS_DENIED: return EACCES; /* 65 */
- case ERROR_BAD_NET_NAME: return ENOENT; /* 67 */
- case ERROR_FILE_EXISTS: return EEXIST; /* 80 */
- case ERROR_CANNOT_MAKE: return EACCES; /* 82 */
- case ERROR_FAIL_I24: return EACCES; /* 83 */
- case ERROR_INVALID_PARAMETER: return EINVAL; /* 87 */
- case ERROR_NO_PROC_SLOTS: return EAGAIN; /* 89 */
- case ERROR_DRIVE_LOCKED: return EACCES; /* 108 */
- case ERROR_BROKEN_PIPE: return EPIPE; /* 109 */
- case ERROR_DISK_FULL: return ENOSPC; /* 112 */
- case ERROR_INVALID_TARGET_HANDLE: return EBADF; /* 114 */
- case ERROR_WAIT_NO_CHILDREN: return ECHILD; /* 128 */
- case ERROR_CHILD_NOT_COMPLETE: return ECHILD; /* 129 */
- case ERROR_DIRECT_ACCESS_HANDLE: return EBADF; /* 130 */
- case ERROR_NEGATIVE_SEEK: return EINVAL; /* 131 */
- case ERROR_SEEK_ON_DEVICE: return EACCES; /* 132 */
- case ERROR_DIR_NOT_EMPTY: return ENOTEMPTY;/* 145 */
- case ERROR_NOT_LOCKED: return EACCES; /* 158 */
- case ERROR_BAD_PATHNAME: return ENOENT; /* 161 */
- case ERROR_MAX_THRDS_REACHED: return EAGAIN; /* 164 */
- case ERROR_LOCK_FAILED: return EACCES; /* 167 */
- case ERROR_ALREADY_EXISTS: return EEXIST; /* 183 */
- case ERROR_INVALID_STARTING_CODESEG: return ENOEXEC; /* 188 */
- case ERROR_INVALID_STACKSEG: return ENOEXEC; /* 189 */
- case ERROR_INVALID_MODULETYPE: return ENOEXEC; /* 190 */
- case ERROR_INVALID_EXE_SIGNATURE: return ENOEXEC; /* 191 */
- case ERROR_EXE_MARKED_INVALID: return ENOEXEC; /* 192 */
- case ERROR_BAD_EXE_FORMAT: return ENOEXEC; /* 193 */
- case ERROR_ITERATED_DATA_EXCEEDS_64k: return ENOEXEC; /* 194 */
- case ERROR_INVALID_MINALLOCSIZE: return ENOEXEC; /* 195 */
- case ERROR_DYNLINK_FROM_INVALID_RING: return ENOEXEC; /* 196 */
- case ERROR_IOPL_NOT_ENABLED: return ENOEXEC; /* 197 */
- case ERROR_INVALID_SEGDPL: return ENOEXEC; /* 198 */
- case ERROR_AUTODATASEG_EXCEEDS_64k: return ENOEXEC; /* 199 */
- case ERROR_RING2SEG_MUST_BE_MOVABLE: return ENOEXEC; /* 200 */
- case ERROR_RELOC_CHAIN_XEEDS_SEGLIM: return ENOEXEC; /* 201 */
- case ERROR_INFLOOP_IN_RELOC_CHAIN: return ENOEXEC; /* 202 */
- case ERROR_FILENAME_EXCED_RANGE: return ENOENT; /* 206 */
- case ERROR_NESTING_NOT_ALLOWED: return EAGAIN; /* 215 */
- case ERROR_NOT_ENOUGH_QUOTA: return ENOMEM; /* 1816 */
- default: return EINVAL;
- }
-}
-
-static ETHR_INLINE thr_data_ *
-tid2thr(ethr_tid tid)
-{
- ethr_tid ix;
- thr_data_ *td;
-
- if (tid < 0)
- return NULL;
- ix = THR_IX(tid);
- if (ix >= ETHR_MAX_THREADS)
- return NULL;
- td = thr_table[ix];
- if (!td)
- return NULL;
- if (td->thr_id != tid)
- return NULL;
- return td;
-}
-
-static ETHR_INLINE void
-new_tid(ethr_tid *new_tid, ethr_tid *new_serial, ethr_tid *new_ix)
-{
- ethr_tid tmp_serial = last_serial;
- ethr_tid tmp_ix = last_ix + 1;
- ethr_tid start_ix = tmp_ix;
-
-
- do {
- if (tmp_ix >= ETHR_MAX_THREADS) {
- tmp_serial++;
- if ((tmp_serial << serial_shift) < 0)
- tmp_serial = 0;
- tmp_ix = 0;
- }
- if (!thr_table[tmp_ix]) {
- *new_tid = (tmp_serial << serial_shift) | tmp_ix;
- *new_serial = tmp_serial;
- *new_ix = tmp_ix;
- return;
- }
- tmp_ix++;
- } while (tmp_ix != start_ix);
-
- *new_tid = INVALID_TID;
- *new_serial = INVALID_TID;
- *new_ix = INVALID_TID;
-
-}
-
-
-static void thr_exit_cleanup(thr_data_ *td, void *res)
-{
-
- ASSERT(td == OWN_THR_DATA);
-
- run_exit_handlers();
-
- EnterCriticalSection(&thr_table_cs);
- CloseHandle(td->wait_event.handle);
- if (td->thr_handle == INVALID_HANDLE_VALUE) {
- /* We are detached; cleanup thread table */
- ASSERT(td->joiner == INVALID_TID);
- ASSERT(td == thr_table[THR_IX(td->thr_id)]);
- thr_table[THR_IX(td->thr_id)] = NULL;
- if (td != &main_thr_data)
- (*freep)((void *) td);
- }
- else {
- /* Save result and let joining thread cleanup */
- td->result = res;
- }
- LeaveCriticalSection(&thr_table_cs);
-}
-
-static unsigned __stdcall thr_wrapper(LPVOID args)
-{
- void *(*func)(void*) = ((thr_wrap_data_ *) args)->func;
- void *arg = ((thr_wrap_data_ *) args)->arg;
- thr_data_ *td = ((thr_wrap_data_ *) args)->td;
-
- td->wait_event.handle = CreateEvent(NULL, FALSE, FALSE, NULL);
- if (td->wait_event.handle == INVALID_HANDLE_VALUE
- || !TlsSetValue(tls_own_thr_data, (LPVOID) td)) {
- ((thr_wrap_data_ *) args)->res = get_errno();
- if (td->wait_event.handle != INVALID_HANDLE_VALUE)
- CloseHandle(td->wait_event.handle);
- SetEvent(((thr_wrap_data_ *) args)->ptd->wait_event.handle);
- _endthreadex((unsigned) 0);
- ASSERT(0);
- }
-
- td->wait_event.prev = NULL;
- td->wait_event.next = NULL;
- td->wait_event.in_queue = 0;
-
- if (thread_create_child_func)
- (*thread_create_child_func)(((thr_wrap_data_ *) args)->prep_func_res);
-
- ASSERT(td == OWN_THR_DATA);
-
- ((thr_wrap_data_ *) args)->res = 0;
- SetEvent(((thr_wrap_data_ *) args)->ptd->wait_event.handle);
-
- thr_exit_cleanup(td, (*func)(arg));
- return 0;
-}
-
-int
-ethr_fake_static_mutex_init(ethr_mutex *mtx)
-{
- EnterCriticalSection((CRITICAL_SECTION *) &fake_static_init_cs);
- /* Got here under race conditions; check again... */
- if (!mtx->initialized) {
- if (!InitializeCriticalSectionAndSpinCount(&mtx->cs,
- ETHR_MUTEX_SPIN_COUNT))
- return get_errno();
- mtx->initialized = ETHR_MUTEX_INITIALIZED;
- }
- LeaveCriticalSection((CRITICAL_SECTION *) &fake_static_init_cs);
- return 0;
-}
-
-static int
-fake_static_cond_init(ethr_cond *cnd)
-{
- EnterCriticalSection((CRITICAL_SECTION *) &fake_static_init_cs);
- /* Got here under race conditions; check again... */
- if (!cnd->initialized) {
- if (!InitializeCriticalSectionAndSpinCount(&cnd->cs,
- ETHR_COND_SPIN_COUNT))
- return get_errno();
- cnd->queue = NULL;
- cnd->queue_end = NULL;
- cnd->initialized = ETHR_COND_INITIALIZED;
- }
- LeaveCriticalSection((CRITICAL_SECTION *) &fake_static_init_cs);
- return 0;
-}
-
-#ifdef __GNUC__
-#define LL_LITERAL(X) X##LL
-#else
-#define LL_LITERAL(X) X##i64
-#endif
-
-#define EPOCH_JULIAN_DIFF LL_LITERAL(11644473600)
-
-static ETHR_INLINE void
-get_curr_time(long *sec, long *nsec)
-{
- SYSTEMTIME t;
- FILETIME ft;
- LONGLONG lft;
-
- GetSystemTime(&t);
- SystemTimeToFileTime(&t, &ft);
- memcpy(&lft, &ft, sizeof(lft));
- *nsec = ((long) (lft % LL_LITERAL(10000000)))*100;
- *sec = (long) ((lft / LL_LITERAL(10000000)) - EPOCH_JULIAN_DIFF);
-}
-
-static cnd_wait_event_ *cwe_freelist;
-static CRITICAL_SECTION cwe_cs;
-
-static int
-alloc_cwe(cnd_wait_event_ **cwe_res)
-{
- cnd_wait_event_ *cwe;
- EnterCriticalSection(&cwe_cs);
- cwe = cwe_freelist;
- if (cwe) {
- cwe_freelist = cwe->next;
- LeaveCriticalSection(&cwe_cs);
- }
- else {
- LeaveCriticalSection(&cwe_cs);
- cwe = (*allocp)(sizeof(cnd_wait_event_));
- if (!cwe)
- return ENOMEM;
- cwe->handle = CreateEvent(NULL, FALSE, FALSE, NULL);
- if (cwe->handle == INVALID_HANDLE_VALUE) {
- int res = get_errno();
- (*freep)(cwe);
- return res;
- }
- }
- *cwe_res = cwe;
- return 0;
-}
-
-static
-free_cwe(cnd_wait_event_ *cwe)
-{
- EnterCriticalSection(&cwe_cs);
- cwe->next = cwe_freelist;
- cwe_freelist = cwe;
- LeaveCriticalSection(&cwe_cs);
-}
-
-static ETHR_INLINE int
-condwait(ethr_cond *cnd,
- ethr_mutex *mtx,
- int with_timeout,
- ethr_timeval *timeout)
-{
- int res;
- thr_data_ *td;
- cnd_wait_event_ *cwe;
- DWORD code;
- long time; /* time until timeout in milli seconds */
-
-#if ETHR_XCHK
- if (ethr_not_inited) {
- ASSERT(0);
- return EACCES;
- }
-
- if (!mtx
- || mtx->initialized != ETHR_MUTEX_INITIALIZED
- || !cnd
- || (cnd->initialized && cnd->initialized != ETHR_COND_INITIALIZED)
- || (with_timeout && !timeout)) {
- ASSERT(0);
- return EINVAL;
- }
-#endif
-
- td = OWN_THR_DATA;
- if (td)
- cwe = &td->wait_event;
- else { /* A non-ethread thread */
- res = alloc_cwe(&cwe);
- if (res != 0)
- return res;
- }
-
- if (!cnd->initialized)
- fake_static_cond_init(cnd);
- EnterCriticalSection(&cnd->cs);
-
- ASSERT(!cwe->in_queue);
- if (cnd->queue_end) {
- ASSERT(cnd->queue);
- cwe->prev = cnd->queue_end;
- cwe->next = NULL;
- cnd->queue_end->next = cwe;
- cnd->queue_end = cwe;
- }
- else {
- ASSERT(!cnd->queue);
- cwe->prev = NULL;
- cwe->next = NULL;
- cnd->queue = cwe;
- cnd->queue_end = cwe;
- }
- cwe->in_queue = 1;
-
- LeaveCriticalSection(&cnd->cs);
-
- LeaveCriticalSection(&mtx->cs);
-
- if (!with_timeout)
- time = INFINITE;
- else {
- long sec, nsec;
- ASSERT(timeout);
- get_curr_time(&sec, &nsec);
- time = (timeout->tv_sec - sec)*1000;
- time += (timeout->tv_nsec - nsec + 500)/1000000;
- if (time < 0)
- time = 0;
- }
-
- /* wait for event to signal */
- code = WaitForSingleObject(cwe->handle, time);
-
- EnterCriticalSection(&mtx->cs);
-
- if (code == WAIT_OBJECT_0) {
- /* We were woken by a signal or a broadcast ... */
- res = 0;
-
- /* ... no need to remove event from wait queue since this was
- taken care of by the signal or broadcast */
-#ifdef DEBUG
- EnterCriticalSection(&cnd->cs);
- ASSERT(!cwe->in_queue);
- LeaveCriticalSection(&cnd->cs);
-#endif
-
- }
- else {
- /* We timed out... */
- res = ETIMEDOUT;
-
- /* ... probably have to remove event from wait queue ... */
- EnterCriticalSection(&cnd->cs);
-
- if (cwe->in_queue) { /* ... but we must check that we are in queue
- since a signal or broadcast after timeout
- may have removed us from the queue */
- if (cwe->prev) {
- cwe->prev->next = cwe->next;
- }
- else {
- ASSERT(cnd->queue == cwe);
- cnd->queue = cwe->next;
- }
-
- if (cwe->next) {
- cwe->next->prev = cwe->prev;
- }
- else {
- ASSERT(cnd->queue_end == cwe);
- cnd->queue_end = cwe->prev;
- }
- cwe->in_queue = 0;
- }
-
- LeaveCriticalSection(&cnd->cs);
-
- }
-
- if (!td)
- free_cwe(cwe);
-
- return res;
-
-}
-
-
-/*
- * ----------------------------------------------------------------------------
- * Exported functions
- * ----------------------------------------------------------------------------
- */
-
-int
-ethr_init(ethr_init_data *id)
-{
-#ifdef _WIN32_WINNT
- DWORD major = (_WIN32_WINNT >> 8) & 0xff;
- DWORD minor = _WIN32_WINNT & 0xff;
- OSVERSIONINFO os_version;
-#endif
- int err = 0;
- thr_data_ *td = &main_thr_data;
- unsigned long i;
-
- if (!ethr_not_inited)
- return EINVAL;
-
-#ifdef _WIN32_WINNT
- os_version.dwOSVersionInfoSize = sizeof(OSVERSIONINFO);
- GetVersionEx(&os_version);
- if (os_version.dwPlatformId != VER_PLATFORM_WIN32_NT
- || os_version.dwMajorVersion < major
- || (os_version.dwMajorVersion == major
- && os_version.dwMinorVersion < minor))
- return ENOTSUP;
-#endif
-
- ASSERT(ETHR_MAX_THREADS > 0);
- for (i = ETHR_MAX_THREADS - 1, serial_shift = 0;
- i;
- serial_shift++, i >>= 1);
- thr_ix_mask = ~(~((ethr_tid) 0) << serial_shift);
-
- tls_own_thr_data = TlsAlloc();
- if (tls_own_thr_data == TLS_OUT_OF_INDEXES)
- goto error;
-
- last_serial = 0;
- last_ix = 0;
-
- td->thr_id = 0;
- td->thr_handle = GetCurrentThread();
- td->joiner = INVALID_TID;
- td->result = NULL;
- td->wait_event.handle = CreateEvent(NULL, FALSE, FALSE, NULL);
- if (td->wait_event.handle == INVALID_HANDLE_VALUE)
- goto error;
- td->wait_event.prev = NULL;
- td->wait_event.next = NULL;
- td->wait_event.in_queue = 0;
- thr_table[0] = td;
-
- if (!TlsSetValue(tls_own_thr_data, (LPVOID) td))
- goto error;
-
- ASSERT(td == OWN_THR_DATA);
-
-
- cwe_freelist = NULL;
- if (!InitializeCriticalSectionAndSpinCount(&cwe_cs,
- ETHR_MUTEX_SPIN_COUNT))
- goto error;
-
- for (i = 1; i < ETHR_MAX_THREADS; i++)
- thr_table[i] = NULL;
-
- if (!InitializeCriticalSectionAndSpinCount(&thr_table_cs,
- ETHR_MUTEX_SPIN_COUNT))
- goto error;
- if (!InitializeCriticalSectionAndSpinCount(&fake_static_init_cs,
- ETHR_MUTEX_SPIN_COUNT))
- goto error;
- ethr_not_inited = 0;
-
- err = init_common(id);
- if (err)
- goto error;
-
- return 0;
-
- error:
- ethr_not_inited = 1;
- if (err == 0)
- err = get_errno();
- ASSERT(err != 0);
- if (td->thr_handle != INVALID_HANDLE_VALUE)
- CloseHandle(td->thr_handle);
- if (td->wait_event.handle != INVALID_HANDLE_VALUE)
- CloseHandle(td->wait_event.handle);
- return err;
-}
-
-/*
- * Thread functions.
- */
-
-int
-ethr_thr_create(ethr_tid *tid, void * (*func)(void *), void *arg,
- ethr_thr_opts *opts)
-{
- int err = 0;
- thr_wrap_data_ twd;
- thr_data_ *my_td, *child_td = NULL;
- ethr_tid child_tid, child_serial, child_ix;
- DWORD code;
- unsigned ID;
- unsigned stack_size = 0; /* 0 = system default */
- int use_stack_size = (opts && opts->suggested_stack_size >= 0
- ? opts->suggested_stack_size
- : -1 /* Use system default */);
-
-#ifdef ETHR_MODIFIED_DEFAULT_STACK_SIZE
- if (use_stack_size < 0)
- use_stack_size = ETHR_MODIFIED_DEFAULT_STACK_SIZE;
-#endif
-
-#if ETHR_XCHK
- if (ethr_not_inited) {
- ASSERT(0);
- return EACCES;
- }
- if (!tid || !func) {
- ASSERT(0);
- return EINVAL;
- }
-#endif
-
- my_td = OWN_THR_DATA;
- if (!my_td) {
- /* Only ethreads are allowed to call this function */
- ASSERT(0);
- return EACCES;
- }
-
- if (use_stack_size >= 0) {
- size_t suggested_stack_size = (size_t) use_stack_size;
-#ifdef DEBUG
- suggested_stack_size /= 2; /* Make sure we got margin */
-#endif
- if (suggested_stack_size < min_stack_size)
- stack_size = (unsigned) ETHR_KW2B(min_stack_size);
- else if (suggested_stack_size > max_stack_size)
- stack_size = (unsigned) ETHR_KW2B(max_stack_size);
- else
- stack_size =
- (unsigned) ETHR_PAGE_ALIGN(ETHR_KW2B(suggested_stack_size));
- }
-
- EnterCriticalSection(&thr_table_cs);
-
- /* Call prepare func if it exist */
- if (thread_create_prepare_func)
- twd.prep_func_res = (*thread_create_prepare_func)();
- else
- twd.prep_func_res = NULL;
-
- /* Find a new thread id to use */
- new_tid(&child_tid, &child_serial, &child_ix);
- if (child_tid == INVALID_TID) {
- err = EAGAIN;
- goto error;
- }
-
- ASSERT(child_ix == THR_IX(child_tid));
-
- *tid = child_tid;
-
- ASSERT(!thr_table[child_ix]);
-
- /* Alloc thread data */
- thr_table[child_ix] = child_td = (thr_data_ *) (*allocp)(sizeof(thr_data_));
- if (!child_td) {
- err = ENOMEM;
- goto error;
- }
-
- /* Init thread data */
-
- child_td->thr_id = child_tid;
- child_td->thr_handle = INVALID_HANDLE_VALUE;
- child_td->joiner = INVALID_TID;
- child_td->result = NULL;
- /* 'child_td->wait_event' is initialized by child thread */
-
-
- /* Init thread wrapper data */
-
- twd.func = func;
- twd.arg = arg;
- twd.ptd = my_td;
- twd.td = child_td;
- twd.res = 0;
-
- ASSERT(!my_td->wait_event.in_queue);
-
- /* spawn the thr_wrapper function */
- child_td->thr_handle = (HANDLE) _beginthreadex(NULL,
- stack_size,
- thr_wrapper,
- (LPVOID) &twd,
- 0,
- &ID);
- if (child_td->thr_handle == (HANDLE) 0) {
- child_td->thr_handle = INVALID_HANDLE_VALUE;
- goto error;
- }
-
- ASSERT(child_td->thr_handle != INVALID_HANDLE_VALUE);
-
- /* Wait for child to finish initialization */
- code = WaitForSingleObject(my_td->wait_event.handle, INFINITE);
- if (twd.res || code != WAIT_OBJECT_0) {
- err = twd.res;
- goto error;
- }
-
- if (opts && opts->detached) {
- CloseHandle(child_td->thr_handle);
- child_td->thr_handle = INVALID_HANDLE_VALUE;
- }
-
- last_serial = child_serial;
- last_ix = child_ix;
-
- ASSERT(thr_table[child_ix] == child_td);
-
- if (thread_create_parent_func)
- (*thread_create_parent_func)(twd.prep_func_res);
-
- LeaveCriticalSection(&thr_table_cs);
-
- return 0;
-
- error:
-
- if (err == 0)
- err = get_errno();
- ASSERT(err != 0);
-
- if (thread_create_parent_func)
- (*thread_create_parent_func)(twd.prep_func_res);
-
- if (child_ix != INVALID_TID) {
-
- if (child_td) {
- ASSERT(thr_table[child_ix] == child_td);
-
- if (child_td->thr_handle != INVALID_HANDLE_VALUE) {
- WaitForSingleObject(child_td->thr_handle, INFINITE);
- CloseHandle(child_td->thr_handle);
- }
-
- (*freep)((void *) child_td);
- thr_table[child_ix] = NULL;
- }
- }
-
- *tid = INVALID_TID;
-
- LeaveCriticalSection(&thr_table_cs);
- return err;
-}
-
-int ethr_thr_join(ethr_tid tid, void **res)
-{
- int err = 0;
- DWORD code;
- thr_data_ *td;
- thr_data_ *my_td;
-
-#if ETHR_XCHK
- if (ethr_not_inited) {
- ASSERT(0);
- return EACCES;
- }
-#endif
-
- my_td = OWN_THR_DATA;
-
- if (!my_td) {
- /* Only ethreads are allowed to call this function */
- ASSERT(0);
- return EACCES;
- }
-
- EnterCriticalSection(&thr_table_cs);
-
- td = tid2thr(tid);
- if (!td)
- err = ESRCH;
- else if (td->thr_handle == INVALID_HANDLE_VALUE /* i.e. detached */
- || td->joiner != INVALID_TID) /* i.e. someone else is joining */
- err = EINVAL;
- else if (my_td == td)
- err = EDEADLK;
- else
- td->joiner = my_td->thr_id;
-
- LeaveCriticalSection(&thr_table_cs);
-
- if (err)
- goto error;
-
- /* Wait for thread to terminate */
- code = WaitForSingleObject(td->thr_handle, INFINITE);
- if (code != WAIT_OBJECT_0)
- goto error;
-
- EnterCriticalSection(&thr_table_cs);
-
- ASSERT(td == tid2thr(tid));
- ASSERT(td->thr_handle != INVALID_HANDLE_VALUE);
- ASSERT(td->joiner == my_td->thr_id);
-
- if (res)
- *res = td->result;
-
- CloseHandle(td->thr_handle);
- ASSERT(td == thr_table[THR_IX(td->thr_id)]);
- thr_table[THR_IX(td->thr_id)] = NULL;
- if (td != &main_thr_data)
- (*freep)((void *) td);
-
- LeaveCriticalSection(&thr_table_cs);
-
- return 0;
-
- error:
- if (err == 0)
- err = get_errno();
- ASSERT(err != 0);
- return err;
-}
-
-
-int
-ethr_thr_detach(ethr_tid tid)
-{
- int res;
- DWORD code;
- thr_data_ *td;
-
-#if ETHR_XCHK
- if (ethr_not_inited) {
- ASSERT(0);
- return EACCES;
- }
-#endif
-
- if (!OWN_THR_DATA) {
- /* Only ethreads are allowed to call this function */
- ASSERT(0);
- return EACCES;
- }
-
- EnterCriticalSection(&thr_table_cs);
-
- td = tid2thr(tid);
- if (!td)
- res = ESRCH;
- if (td->thr_handle == INVALID_HANDLE_VALUE /* i.e. detached */
- || td->joiner != INVALID_TID) /* i.e. someone is joining */
- res = EINVAL;
- else {
- res = 0;
- CloseHandle(td->thr_handle);
- td->thr_handle = INVALID_HANDLE_VALUE;
- }
-
- LeaveCriticalSection(&thr_table_cs);
-
- return res;
-}
-
-
-void
-ethr_thr_exit(void *res)
-{
- thr_data_ *td;
-#if ETHR_XCHK
- if (ethr_not_inited) {
- ASSERT(0);
- return;
- }
-#endif
- td = OWN_THR_DATA;
- if (!td) {
- /* Only ethreads are allowed to call this function */
- ASSERT(0);
- return;
- }
- thr_exit_cleanup(td, res);
- _endthreadex((unsigned) 0);
-}
-
-ethr_tid
-ethr_self(void)
-{
- thr_data_ *td;
-#if ETHR_XCHK
- if (ethr_not_inited) {
- ASSERT(0);
- return INVALID_TID;
- }
-#endif
- /* It is okay for non-ethreads (i.e. native win32 threads) to call
- ethr_self(). They will however be returned the INVALID_TID. */
- td = OWN_THR_DATA;
- if (!td)
- return INVALID_TID;
- return td->thr_id;
-}
-
-int
-ethr_equal_tids(ethr_tid tid1, ethr_tid tid2)
-{
- /* INVALID_TID does not equal any tid, not even the INVALID_TID */
- return tid1 == tid2 && tid1 != INVALID_TID;
-}
-
-/*
- * Mutex functions.
- */
-
-int
-ethr_mutex_init(ethr_mutex *mtx)
-{
-#if ETHR_XCHK
- if (ethr_not_inited) {
- ASSERT(0);
- return EACCES;
- }
- if (!mtx) {
- ASSERT(0);
- return EINVAL;
- }
-#endif
- if (!InitializeCriticalSectionAndSpinCount(&mtx->cs, ETHR_MUTEX_SPIN_COUNT))
- return get_errno();
- mtx->initialized = ETHR_MUTEX_INITIALIZED;
-#if ETHR_XCHK
- mtx->is_rec_mtx = 0;
-#endif
- return 0;
-}
-
-int
-ethr_rec_mutex_init(ethr_mutex *mtx)
-{
- int res;
- res = ethr_mutex_init(mtx);
-#if ETHR_XCHK
- mtx->is_rec_mtx = 1;
-#endif
- return res;
-}
-
-int
-ethr_mutex_destroy(ethr_mutex *mtx)
-{
-#if ETHR_XCHK
- if (ethr_not_inited) {
- ASSERT(0);
- return EACCES;
- }
- if (!mtx || mtx->initialized != ETHR_MUTEX_INITIALIZED) {
- ASSERT(0);
- return EINVAL;
- }
-#endif
- DeleteCriticalSection(&mtx->cs);
- mtx->initialized = 0;
- return 0;
-}
-
-int ethr_mutex_set_forksafe(ethr_mutex *mtx)
-{
-#if ETHR_XCHK
- if (ethr_not_inited) {
- ASSERT(0);
- return EACCES;
- }
-#endif
- return 0; /* No fork() */
-}
-
-int ethr_mutex_unset_forksafe(ethr_mutex *mtx)
-{
-#if ETHR_XCHK
- if (ethr_not_inited) {
- ASSERT(0);
- return EACCES;
- }
-#endif
- return 0; /* No fork() */
-}
-
-int
-ethr_mutex_trylock(ethr_mutex *mtx)
-{
-#if ETHR_XCHK
- if (ethr_not_inited) {
- ASSERT(0);
- return EACCES;
- }
- if (!mtx
- || (mtx->initialized && mtx->initialized != ETHR_MUTEX_INITIALIZED)) {
- ASSERT(0);
- return EINVAL;
- }
-#endif
- if (!mtx->initialized) {
- int res = ethr_fake_static_mutex_init(mtx);
- if (res != 0)
- return res;
- }
- return ethr_mutex_trylock__(mtx);
-}
-
-int
-ethr_mutex_lock(ethr_mutex *mtx)
-{
- int res;
-#if ETHR_XCHK
- if (ethr_not_inited) {
- ASSERT(0);
- return EACCES;
- }
- if (!mtx
- || (mtx->initialized && mtx->initialized != ETHR_MUTEX_INITIALIZED)) {
- ASSERT(0);
- return EINVAL;
- }
-#endif
- return ethr_mutex_lock__(mtx);
-}
-
-int
-ethr_mutex_unlock(ethr_mutex *mtx)
-{
-#if ETHR_XCHK
- int res;
- if (ethr_not_inited) {
- ASSERT(0);
- return EACCES;
- }
- if (!mtx || mtx->initialized != ETHR_MUTEX_INITIALIZED) {
- ASSERT(0);
- return EINVAL;
- }
-#endif
- return ethr_mutex_unlock__(mtx);
-}
-
-/*
- * Condition variable functions.
- */
-
-int
-ethr_cond_init(ethr_cond *cnd)
-{
-#if ETHR_XCHK
- if (ethr_not_inited) {
- ASSERT(0);
- return EACCES;
- }
- if (!cnd) {
- ASSERT(0);
- return EINVAL;
- }
-#endif
- if (!InitializeCriticalSectionAndSpinCount(&cnd->cs, ETHR_COND_SPIN_COUNT))
- return get_errno();
- cnd->queue = NULL;
- cnd->queue_end = NULL;
- cnd->initialized = ETHR_COND_INITIALIZED;
- return 0;
-}
-
-int
-ethr_cond_destroy(ethr_cond *cnd)
-{
-#if ETHR_XCHK
- if (ethr_not_inited) {
- ASSERT(0);
- return EACCES;
- }
- if (!cnd
- || (cnd->initialized && cnd->initialized != ETHR_COND_INITIALIZED)
- || cnd->queue) {
- ASSERT(0);
- return EINVAL;
- }
-#endif
- DeleteCriticalSection(&cnd->cs);
- cnd->initialized = 0;
- return 0;
-}
-
-int
-ethr_cond_signal(ethr_cond *cnd)
-{
- cnd_wait_event_ *cwe;
-#if ETHR_XCHK
- if (ethr_not_inited) {
- ASSERT(0);
- return EACCES;
- }
- if (!cnd
- || (cnd->initialized && cnd->initialized != ETHR_COND_INITIALIZED)) {
- ASSERT(0);
- return EINVAL;
- }
-#endif
- if (!cnd->initialized) {
- int res = fake_static_cond_init(cnd);
- if (res != 0)
- return res;
- }
- EnterCriticalSection(&cnd->cs);
- cwe = cnd->queue;
- if (cwe) {
- ASSERT(cwe->in_queue);
- SetEvent(cnd->queue->handle);
- if (cwe->next)
- cwe->next->prev = NULL;
- else {
- ASSERT(cnd->queue_end == cnd->queue);
- cnd->queue_end = NULL;
- }
- cnd->queue = cwe->next;
- cwe->in_queue = 0;
- }
- LeaveCriticalSection(&cnd->cs);
- return 0;
-}
-
-int
-ethr_cond_broadcast(ethr_cond *cnd)
-{
- cnd_wait_event_ *cwe;
-
-#if ETHR_XCHK
- if (ethr_not_inited) {
- ASSERT(0);
- return EACCES;
- }
- if (!cnd
- || (cnd->initialized && cnd->initialized != ETHR_COND_INITIALIZED)) {
- ASSERT(0);
- return EINVAL;
- }
-#endif
- if (!cnd->initialized) {
- int res = fake_static_cond_init(cnd);
- if (res != 0)
- return res;
- }
- EnterCriticalSection(&cnd->cs);
- for (cwe = cnd->queue; cwe; cwe = cwe->next) {
- ASSERT(cwe->in_queue);
- SetEvent(cwe->handle);
- cwe->in_queue = 0;
- }
- cnd->queue = NULL;
- cnd->queue_end = NULL;
- LeaveCriticalSection(&cnd->cs);
- return 0;
-
-}
-
-int
-ethr_cond_wait(ethr_cond *cnd, ethr_mutex *mtx)
-{
- return condwait(cnd, mtx, 0, NULL);
-}
-
-int
-ethr_cond_timedwait(ethr_cond *cnd, ethr_mutex *mtx, ethr_timeval *timeout)
-{
- return condwait(cnd, mtx, 1, timeout);
-}
-
-int
-ethr_time_now(ethr_timeval *time)
-{
-#if ETHR_XCHK
- if (ethr_not_inited) {
- ASSERT(0);
- return EACCES;
- }
- if (!time) {
- ASSERT(0);
- return EINVAL;
- }
-#endif
- get_curr_time(&time->tv_sec, &time->tv_nsec);
- return 0;
-}
-
-/*
- * Thread specific data
- */
-
-int
-ethr_tsd_key_create(ethr_tsd_key *keyp)
-{
- DWORD key;
-#if ETHR_XCHK
- if (ethr_not_inited) {
- ASSERT(0);
- return EACCES;
- }
- if (!keyp) {
- ASSERT(0);
- return EINVAL;
- }
-#endif
- key = TlsAlloc();
- if (key == TLS_OUT_OF_INDEXES)
- return get_errno();
- *keyp = (ethr_tsd_key) key;
- return 0;
-}
-
-int
-ethr_tsd_key_delete(ethr_tsd_key key)
-{
-#if ETHR_XCHK
- if (ethr_not_inited) {
- ASSERT(0);
- return EACCES;
- }
-#endif
- if (!TlsFree((DWORD) key))
- return get_errno();
- return 0;
-}
-
-int
-ethr_tsd_set(ethr_tsd_key key, void *value)
-{
-#if ETHR_XCHK
- if (ethr_not_inited) {
- ASSERT(0);
- return EACCES;
- }
-#endif
- if (!TlsSetValue((DWORD) key, (LPVOID) value))
- return get_errno();
- return 0;
-}
-
-void *
-ethr_tsd_get(ethr_tsd_key key)
-{
-#if ETHR_XCHK
- if (ethr_not_inited) {
- ASSERT(0);
- return NULL;
- }
-#endif
- return (void *) TlsGetValue((DWORD) key);
-}
-
-/* Misc */
-
-#ifndef ETHR_HAVE_OPTIMIZED_LOCKS
-
-int
-ethr_do_spinlock_init(ethr_spinlock_t *lock)
-{
-#if ETHR_XCHK
- if (ethr_not_inited) {
- ASSERT(0);
- return EACCES;
- }
- if (!lock) {
- ASSERT(0);
- return EINVAL;
- }
-#endif
- if (InitializeCriticalSectionAndSpinCount(&lock->cs, INT_MAX))
- return 0;
- else
- return get_errno();
-}
-
-int
-ethr_do_rwlock_init(ethr_rwlock_t *lock)
-{
-#if ETHR_XCHK
- if (ethr_not_inited) {
- ASSERT(0);
- return EACCES;
- }
- if (!lock) {
- ASSERT(0);
- return EINVAL;
- }
-#endif
- lock->counter = 0;
- if (InitializeCriticalSectionAndSpinCount(&lock->cs, INT_MAX))
- return 0;
- else
- return get_errno();
-}
-
-#endif /* #ifndef ETHR_HAVE_OPTIMIZED_ATOMIC_OPS */
-
-#else
-#error "Missing thread implementation"
-#endif
-
-/* Atomics */
-
-int
-ethr_atomic_init(ethr_atomic_t *var, long i)
-{
-#if ETHR_XCHK
- if (ethr_not_inited) {
- ASSERT(0);
- return EACCES;
- }
- if (!var) {
- ASSERT(0);
- return EINVAL;
- }
-#endif
- return ethr_atomic_init__(var, i);
-}
-
-int
-ethr_atomic_set(ethr_atomic_t *var, long i)
-{
-#if ETHR_XCHK
- if (ethr_not_inited) {
- ASSERT(0);
- return EACCES;
- }
- if (!var) {
- ASSERT(0);
- return EINVAL;
- }
-#endif
- return ethr_atomic_set__(var, i);
-}
-
-int
-ethr_atomic_read(ethr_atomic_t *var, long *i)
-{
-#if ETHR_XCHK
- if (ethr_not_inited) {
- ASSERT(0);
- return EACCES;
- }
- if (!var || !i) {
- ASSERT(0);
- return EINVAL;
- }
-#endif
- return ethr_atomic_read__(var, i);
-}
-
-
-int
-ethr_atomic_addtest(ethr_atomic_t *var, long incr, long *testp)
-{
-#if ETHR_XCHK
- if (ethr_not_inited) {
- ASSERT(0);
- return EACCES;
- }
- if (!var || !testp) {
- ASSERT(0);
- return EINVAL;
- }
-#endif
- return ethr_atomic_addtest__(var, incr, testp);
-}
-
-int
-ethr_atomic_inctest(ethr_atomic_t *incp, long *testp)
-{
-#if ETHR_XCHK
- if (ethr_not_inited) {
- ASSERT(0);
- return EACCES;
- }
- if (!incp || !testp) {
- ASSERT(0);
- return EINVAL;
- }
-#endif
- return ethr_atomic_inctest__(incp, testp);
-}
-
-int
-ethr_atomic_dectest(ethr_atomic_t *decp, long *testp)
-{
-#if ETHR_XCHK
- if (ethr_not_inited) {
- ASSERT(0);
- return EACCES;
- }
- if (!decp || !testp) {
- ASSERT(0);
- return EINVAL;
- }
-#endif
- return ethr_atomic_dectest__(decp, testp);
-}
-
-int
-ethr_atomic_add(ethr_atomic_t *var, long incr)
-{
-#if ETHR_XCHK
- if (ethr_not_inited) {
- ASSERT(0);
- return EACCES;
- }
- if (!var) {
- ASSERT(0);
- return EINVAL;
- }
-#endif
- return ethr_atomic_add__(var, incr);
-}
-
-int
-ethr_atomic_inc(ethr_atomic_t *incp)
-{
-#if ETHR_XCHK
- if (ethr_not_inited) {
- ASSERT(0);
- return EACCES;
- }
- if (!incp) {
- ASSERT(0);
- return EINVAL;
- }
-#endif
- return ethr_atomic_inc__(incp);
-}
-
-int
-ethr_atomic_dec(ethr_atomic_t *decp)
-{
-#if ETHR_XCHK
- if (ethr_not_inited) {
- ASSERT(0);
- return EACCES;
- }
- if (!decp) {
- ASSERT(0);
- return EINVAL;
- }
-#endif
- return ethr_atomic_dec__(decp);
-}
-
-int
-ethr_atomic_and_old(ethr_atomic_t *var, long mask, long *old)
-{
-#if ETHR_XCHK
- if (ethr_not_inited) {
- ASSERT(0);
- return EACCES;
- }
- if (!var || !old) {
- ASSERT(0);
- return EINVAL;
- }
-#endif
- return ethr_atomic_and_old__(var, mask, old);
-}
-
-int
-ethr_atomic_or_old(ethr_atomic_t *var, long mask, long *old)
-{
-#if ETHR_XCHK
- if (ethr_not_inited) {
- ASSERT(0);
- return EACCES;
- }
- if (!var || !old) {
- ASSERT(0);
- return EINVAL;
- }
-#endif
- return ethr_atomic_or_old__(var, mask, old);
-}
-
-int
-ethr_atomic_xchg(ethr_atomic_t *var, long new, long *old)
-{
-#if ETHR_XCHK
- if (ethr_not_inited) {
- ASSERT(0);
- return EACCES;
- }
- if (!var || !old) {
- ASSERT(0);
- return EINVAL;
- }
-#endif
- return ethr_atomic_xchg__(var, new, old);
-}
-
-int
-ethr_atomic_cmpxchg(ethr_atomic_t *var, long new, long expected, long *old)
-{
-#if ETHR_XCHK
- if (ethr_not_inited) {
- ASSERT(0);
- return EACCES;
- }
- if (!var || !old) {
- ASSERT(0);
- return EINVAL;
- }
-#endif
- return ethr_atomic_cmpxchg__(var, new, expected, old);
-}
-
-/* Spinlocks and rwspinlocks */
-
-int
-ethr_spinlock_init(ethr_spinlock_t *lock)
-{
-#if ETHR_XCHK
- if (ethr_not_inited) {
- ASSERT(0);
- return EACCES;
- }
- if (!lock) {
- ASSERT(0);
- return EINVAL;
- }
-#endif
- return ethr_spinlock_init__(lock);
-}
-
-int
-ethr_spinlock_destroy(ethr_spinlock_t *lock)
-{
-#if ETHR_XCHK
- if (ethr_not_inited) {
- ASSERT(0);
- return EACCES;
- }
- if (!lock) {
- ASSERT(0);
- return EINVAL;
- }
-#endif
- return ethr_spinlock_destroy__(lock);
-}
-
-
-int
-ethr_spin_unlock(ethr_spinlock_t *lock)
-{
-#if ETHR_XCHK
- if (ethr_not_inited) {
- ASSERT(0);
- return EACCES;
- }
- if (!lock) {
- ASSERT(0);
- return EINVAL;
- }
-#endif
- return ethr_spin_unlock__(lock);
-}
-
-int
-ethr_spin_lock(ethr_spinlock_t *lock)
-{
-#if ETHR_XCHK
- if (ethr_not_inited) {
- ASSERT(0);
- return EACCES;
- }
- if (!lock) {
- ASSERT(0);
- return EINVAL;
- }
-#endif
- return ethr_spin_lock__(lock);
-}
-
-int
-ethr_rwlock_init(ethr_rwlock_t *lock)
-{
-#if ETHR_XCHK
- if (ethr_not_inited) {
- ASSERT(0);
- return EACCES;
- }
- if (!lock) {
- ASSERT(0);
- return EINVAL;
- }
-#endif
- return ethr_rwlock_init__(lock);
-}
-
-int
-ethr_rwlock_destroy(ethr_rwlock_t *lock)
-{
-#if ETHR_XCHK
- if (ethr_not_inited) {
- ASSERT(0);
- return EACCES;
- }
- if (!lock) {
- ASSERT(0);
- return EINVAL;
- }
-#endif
- return ethr_rwlock_destroy__(lock);
-}
-
-int
-ethr_read_unlock(ethr_rwlock_t *lock)
-{
-#if ETHR_XCHK
- if (ethr_not_inited) {
- ASSERT(0);
- return EACCES;
- }
- if (!lock) {
- ASSERT(0);
- return EINVAL;
- }
-#endif
- return ethr_read_unlock__(lock);
-}
-
-int
-ethr_read_lock(ethr_rwlock_t *lock)
-{
-#if ETHR_XCHK
- if (ethr_not_inited) {
- ASSERT(0);
- return EACCES;
- }
- if (!lock) {
- ASSERT(0);
- return EINVAL;
- }
-#endif
- return ethr_read_lock__(lock);
-}
-
-int
-ethr_write_unlock(ethr_rwlock_t *lock)
-{
-#if ETHR_XCHK
- if (ethr_not_inited) {
- ASSERT(0);
- return EACCES;
- }
- if (!lock) {
- ASSERT(0);
- return EINVAL;
- }
-#endif
- return ethr_write_unlock__(lock);
-}
-
-int
-ethr_write_lock(ethr_rwlock_t *lock)
-{
-#if ETHR_XCHK
- if (ethr_not_inited) {
- ASSERT(0);
- return EACCES;
- }
- if (!lock) {
- ASSERT(0);
- return EINVAL;
- }
-#endif
- return ethr_write_lock__(lock);
-}
-
-
-int
-ethr_gate_init(ethr_gate *gp)
-{
- int res;
-#if ETHR_XCHK
- if (ethr_not_inited) {
- ASSERT(0);
- return EACCES;
- }
- if (!gp) {
- ASSERT(0);
- return EINVAL;
- }
-#endif
- res = ethr_mutex_init(&gp->mtx);
- if (res != 0)
- return res;
- res = ethr_cond_init(&gp->cnd);
- if (res != 0) {
- ethr_mutex_destroy(&gp->mtx);
- return res;
- }
- gp->open = 0;
- return 0;
-}
-
-int
-ethr_gate_destroy(ethr_gate *gp)
-{
- int res, dres;
-#if ETHR_XCHK
- if (ethr_not_inited) {
- ASSERT(0);
- return EACCES;
- }
- if (!gp) {
- ASSERT(0);
- return EINVAL;
- }
-#endif
- res = ethr_mutex_destroy(&gp->mtx);
- dres = ethr_cond_destroy(&gp->cnd);
- if (res == 0)
- res = dres;
- gp->open = 0;
- return res;
-}
-
-int
-ethr_gate_close(ethr_gate *gp)
-{
- int res;
-#if ETHR_XCHK
- if (ethr_not_inited) {
- ASSERT(0);
- return EACCES;
- }
- if (!gp) {
- ASSERT(0);
- return EINVAL;
- }
-#endif
- res = ethr_mutex_lock__(&gp->mtx);
- if (res != 0)
- return res;
- gp->open = 0;
- res = ethr_mutex_unlock__(&gp->mtx);
- return res;
-}
-
-int
-ethr_gate_let_through(ethr_gate *gp, unsigned no)
-{
- int res, ures;
-#if ETHR_XCHK
- if (ethr_not_inited) {
- ASSERT(0);
- return EACCES;
- }
- if (!gp) {
- ASSERT(0);
- return EINVAL;
- }
-#endif
- res = ethr_mutex_lock__(&gp->mtx);
- if (res != 0)
- return res;
- gp->open += no;
- res = (gp->open == 1
- ? ethr_cond_signal(&gp->cnd)
- : ethr_cond_broadcast(&gp->cnd));
- ures = ethr_mutex_unlock__(&gp->mtx);
- if (res != 0)
- res = ures;
- return res;
-}
-
-int
-ethr_gate_swait(ethr_gate *gp, int spincount)
-{
- int res, ures, n;
-#if ETHR_XCHK
- if (ethr_not_inited) {
- ASSERT(0);
- return EACCES;
- }
- if (!gp) {
- ASSERT(0);
- return EINVAL;
- }
-#endif
- n = spincount;
- res = ethr_mutex_lock__(&gp->mtx);
- if (res != 0)
- return res;
- while (n >= 0 && !gp->open) {
- res = ethr_mutex_unlock__(&gp->mtx);
- if (res != 0)
- return res;
- res = ethr_mutex_lock__(&gp->mtx);
- if (res != 0)
- return res;
- n--;
- }
- while (!gp->open) {
- res = ethr_cond_wait(&gp->cnd, &gp->mtx);
- if (res != 0 && res != EINTR)
- goto done;
- }
- gp->open--;
- done:
- ures = ethr_mutex_unlock__(&gp->mtx);
- if (res == 0)
- res = ures;
- return res;
-}
-
-
-int
-ethr_gate_wait(ethr_gate *gp)
-{
- return ethr_gate_swait(gp, 0);
-}
-
-
-/* rwmutex fallback */
-#ifdef ETHR_USE_RWMTX_FALLBACK
-
-int
-ethr_rwmutex_init(ethr_rwmutex *rwmtx)
-{
- int res;
-#if ETHR_XCHK
- if (!rwmtx) {
- ASSERT(0);
- return EINVAL;
- }
-#endif
- res = ethr_mutex_init(&rwmtx->mtx);
- if (res != 0)
- return res;
- ethr_cond_init(&rwmtx->rcnd);
- if (res != 0)
- goto error_cleanup1;
- res = ethr_cond_init(&rwmtx->wcnd);
- if (res != 0)
- goto error_cleanup2;
- rwmtx->readers = 0;
- rwmtx->waiting_readers = 0;
- rwmtx->waiting_writers = 0;
-#if ETHR_XCHK
- rwmtx->initialized = ETHR_RWMUTEX_INITIALIZED;
-#endif
- return 0;
- error_cleanup2:
- ethr_cond_destroy(&rwmtx->rcnd);
- error_cleanup1:
- ethr_mutex_destroy(&rwmtx->mtx);
- return res;
-}
-
-int
-ethr_rwmutex_destroy(ethr_rwmutex *rwmtx)
-{
- int res, pres;
-#if ETHR_XCHK
- if (!rwmtx || rwmtx->initialized != ETHR_RWMUTEX_INITIALIZED) {
- ASSERT(0);
- return EINVAL;
- }
- rwmtx->initialized = 0;
-#endif
- res = ethr_mutex_destroy(&rwmtx->mtx);
- pres = ethr_cond_destroy(&rwmtx->rcnd);
- if (res == 0)
- res = pres;
- pres = ethr_cond_destroy(&rwmtx->wcnd);
- if (res == 0)
- res = pres;
- return res;
-}
-
-int
-ethr_rwmutex_tryrlock(ethr_rwmutex *rwmtx)
-{
- int res;
-#if ETHR_XCHK
- if (!rwmtx || rwmtx->initialized != ETHR_RWMUTEX_INITIALIZED) {
- ASSERT(0);
- return EINVAL;
- }
-#endif
- res = ethr_mutex_trylock__(&rwmtx->mtx);
- if (res != 0)
- return res;
- if (rwmtx->waiting_writers) {
- res = ethr_mutex_unlock__(&rwmtx->mtx);
- if (res == 0)
- return EBUSY;
- return res;
- }
- rwmtx->readers++;
- return ethr_mutex_unlock__(&rwmtx->mtx);
-}
-
-int
-ethr_rwmutex_rlock(ethr_rwmutex *rwmtx)
-{
- int res;
-#if ETHR_XCHK
- if (!rwmtx || rwmtx->initialized != ETHR_RWMUTEX_INITIALIZED) {
- ASSERT(0);
- return EINVAL;
- }
-#endif
- res = ethr_mutex_lock__(&rwmtx->mtx);
- if (res != 0)
- return res;
- while (rwmtx->waiting_writers) {
- rwmtx->waiting_readers++;
- res = ethr_cond_wait(&rwmtx->rcnd, &rwmtx->mtx);
- rwmtx->waiting_readers--;
- if (res != 0 && res != EINTR) {
- (void) ethr_mutex_unlock__(&rwmtx->mtx);
- return res;
- }
- }
- rwmtx->readers++;
- return ethr_mutex_unlock__(&rwmtx->mtx);
-}
-
-int
-ethr_rwmutex_runlock(ethr_rwmutex *rwmtx)
-{
- int res, ures;
-#if ETHR_XCHK
- if (!rwmtx || rwmtx->initialized != ETHR_RWMUTEX_INITIALIZED) {
- ASSERT(0);
- return EINVAL;
- }
-#endif
- res = ethr_mutex_lock__(&rwmtx->mtx);
- if (res != 0)
- return res;
- rwmtx->readers--;
- if (!rwmtx->readers && rwmtx->waiting_writers)
- res = ethr_cond_signal(&rwmtx->wcnd);
- ures = ethr_mutex_unlock__(&rwmtx->mtx);
- if (res == 0)
- res = ures;
- return res;
-}
-
-int
-ethr_rwmutex_tryrwlock(ethr_rwmutex *rwmtx)
-{
- int res;
-#if ETHR_XCHK
- if (!rwmtx || rwmtx->initialized != ETHR_RWMUTEX_INITIALIZED) {
- ASSERT(0);
- return EINVAL;
- }
-#endif
- res = ethr_mutex_trylock__(&rwmtx->mtx);
- if (res != 0)
- return res;
- if (!rwmtx->readers && !rwmtx->waiting_writers)
- return 0;
- else {
- res = ethr_mutex_unlock__(&rwmtx->mtx);
- if (res == 0)
- return EBUSY;
- return res;
- }
-}
-
-int
-ethr_rwmutex_rwlock(ethr_rwmutex *rwmtx)
-{
- int res;
-#if ETHR_XCHK
- if (!rwmtx || rwmtx->initialized != ETHR_RWMUTEX_INITIALIZED) {
- ASSERT(0);
- return EINVAL;
- }
-#endif
- res = ethr_mutex_lock__(&rwmtx->mtx);
- if (res != 0)
- return res;
- if (!rwmtx->readers && !rwmtx->waiting_writers)
- return 0;
-
- while (rwmtx->readers) {
- rwmtx->waiting_writers++;
- res = ethr_cond_wait(&rwmtx->wcnd, &rwmtx->mtx);
- rwmtx->waiting_writers--;
- if (res != 0 && res != EINTR) {
- (void) ethr_rwmutex_rwunlock(rwmtx);
- return res;
- }
- }
- return 0;
-}
-
-int
-ethr_rwmutex_rwunlock(ethr_rwmutex *rwmtx)
-{
- int res, ures;
-#if ETHR_XCHK
- if (!rwmtx || rwmtx->initialized != ETHR_RWMUTEX_INITIALIZED) {
- ASSERT(0);
- return EINVAL;
- }
-#endif
- res = 0;
- if (rwmtx->waiting_writers)
- res = ethr_cond_signal(&rwmtx->wcnd);
- else if (rwmtx->waiting_readers)
- res = ethr_cond_broadcast(&rwmtx->rcnd);
- ures = ethr_mutex_unlock__(&rwmtx->mtx);
- if (res == 0)
- res = ures;
- return res;
-}
-
-#endif /* #ifdef ETHR_USE_RWMTX_FALLBACK */
-
-void
-ethr_compiler_barrier(void)
-{
-
-}
-
-#ifdef DEBUG
-
-#include <stdio.h>
-int ethr_assert_failed(char *f, int l, char *a)
-{
- fprintf(stderr, "%s:%d: Assertion failed: %s\n", f, l, a);
- abort();
- return 0;
-}
-
-#endif
-
-
diff --git a/erts/lib_src/pthread/ethr_event.c b/erts/lib_src/pthread/ethr_event.c
new file mode 100644
index 0000000000..6731c0eb46
--- /dev/null
+++ b/erts/lib_src/pthread/ethr_event.c
@@ -0,0 +1,219 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2009-2010. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+/*
+ * Author: Rickard Green
+ */
+
+#define ETHR_INLINE_FUNC_NAME_(X) X ## __
+#define ETHR_EVENT_IMPL__
+
+#include "ethread.h"
+
+#if defined(ETHR_LINUX_FUTEX_IMPL__)
+/* --- Linux futex implementation of ethread events ------------------------- */
+
+#include <sched.h>
+#include <errno.h>
+
+#define ETHR_YIELD_AFTER_BUSY_LOOPS 50
+
+int
+ethr_event_init(ethr_event *e)
+{
+ ethr_atomic_init(&e->futex, ETHR_EVENT_OFF__);
+ return 0;
+}
+
+int
+ethr_event_destroy(ethr_event *e)
+{
+ return 0;
+}
+
+static ETHR_INLINE int
+wait__(ethr_event *e, int spincount)
+{
+ unsigned sc = spincount;
+ int res;
+ long val;
+ int until_yield = ETHR_YIELD_AFTER_BUSY_LOOPS;
+
+ if (spincount < 0)
+ ETHR_FATAL_ERROR__(EINVAL);
+
+ while (1) {
+ while (1) {
+ val = ethr_atomic_read(&e->futex);
+ if (val == ETHR_EVENT_ON__)
+ return 0;
+ if (sc == 0)
+ break;
+ sc--;
+ ETHR_SPIN_BODY;
+ if (--until_yield == 0) {
+ until_yield = ETHR_YIELD_AFTER_BUSY_LOOPS;
+ res = ETHR_YIELD();
+ if (res != 0)
+ ETHR_FATAL_ERROR__(res);
+ }
+ }
+
+ if (val != ETHR_EVENT_OFF_WAITER__) {
+ val = ethr_atomic_cmpxchg(&e->futex,
+ ETHR_EVENT_OFF_WAITER__,
+ ETHR_EVENT_OFF__);
+
+ if (val == ETHR_EVENT_ON__)
+ return 0;
+ ETHR_ASSERT(val == ETHR_EVENT_OFF__);
+ }
+
+ res = ETHR_FUTEX__(&e->futex, ETHR_FUTEX_WAIT__, ETHR_EVENT_OFF_WAITER__);
+ if (res == EINTR)
+ break;
+ if (res != 0 && res != EWOULDBLOCK)
+ ETHR_FATAL_ERROR__(res);
+ }
+
+ return res;
+}
+
+#elif defined(ETHR_PTHREADS)
+/* --- Posix mutex/cond implementation of events ---------------------------- */
+
+int
+ethr_event_init(ethr_event *e)
+{
+ int res;
+ ethr_atomic_init(&e->state, ETHR_EVENT_OFF__);
+ res = pthread_mutex_init(&e->mtx, NULL);
+ if (res != 0)
+ return res;
+ res = pthread_cond_init(&e->cnd, NULL);
+ if (res != 0) {
+ pthread_mutex_destroy(&e->mtx);
+ return res;
+ }
+ return 0;
+}
+
+int
+ethr_event_destroy(ethr_event *e)
+{
+ int res;
+ res = pthread_mutex_destroy(&e->mtx);
+ if (res != 0)
+ return res;
+ res = pthread_cond_destroy(&e->cnd);
+ if (res != 0)
+ return res;
+ return 0;
+}
+
+static ETHR_INLINE int
+wait__(ethr_event *e, int spincount)
+{
+ int sc = spincount;
+ long val;
+ int res, ulres;
+ int until_yield = ETHR_YIELD_AFTER_BUSY_LOOPS;
+
+ if (spincount < 0)
+ ETHR_FATAL_ERROR__(EINVAL);
+
+ while (1) {
+ val = ethr_atomic_read(&e->state);
+ if (val == ETHR_EVENT_ON__)
+ return 0;
+ if (sc == 0)
+ break;
+ sc--;
+ ETHR_SPIN_BODY;
+ if (--until_yield == 0) {
+ until_yield = ETHR_YIELD_AFTER_BUSY_LOOPS;
+ res = ETHR_YIELD();
+ if (res != 0)
+ ETHR_FATAL_ERROR__(res);
+ }
+ }
+
+ if (val != ETHR_EVENT_OFF_WAITER__) {
+ val = ethr_atomic_cmpxchg(&e->state,
+ ETHR_EVENT_OFF_WAITER__,
+ ETHR_EVENT_OFF__);
+ if (val == ETHR_EVENT_ON__)
+ return 0;
+ ETHR_ASSERT(val == ETHR_EVENT_OFF__);
+ }
+
+ ETHR_ASSERT(val == ETHR_EVENT_OFF_WAITER__
+ || val == ETHR_EVENT_OFF__);
+
+ res = pthread_mutex_lock(&e->mtx);
+ if (res != 0)
+ ETHR_FATAL_ERROR__(res);
+
+ while (1) {
+
+ val = ethr_atomic_read(&e->state);
+ if (val == ETHR_EVENT_ON__)
+ break;
+
+ res = pthread_cond_wait(&e->cnd, &e->mtx);
+ if (res == EINTR)
+ break;
+ if (res != 0)
+ ETHR_FATAL_ERROR__(res);
+ }
+
+ ulres = pthread_mutex_unlock(&e->mtx);
+ if (ulres != 0)
+ ETHR_FATAL_ERROR__(ulres);
+
+ return res; /* 0 || EINTR */
+}
+
+#else
+#error No ethread event implementation
+#endif
+
+void
+ethr_event_reset(ethr_event *e)
+{
+ ethr_event_reset__(e);
+}
+
+void
+ethr_event_set(ethr_event *e)
+{
+ ethr_event_set__(e);
+}
+
+int
+ethr_event_wait(ethr_event *e)
+{
+ return wait__(e, 0);
+}
+
+int
+ethr_event_swait(ethr_event *e, int spincount)
+{
+ return wait__(e, spincount);
+}
diff --git a/erts/lib_src/pthread/ethread.c b/erts/lib_src/pthread/ethread.c
new file mode 100644
index 0000000000..ea1d9d43f0
--- /dev/null
+++ b/erts/lib_src/pthread/ethread.c
@@ -0,0 +1,477 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2010. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+/*
+ * Description: Pthread implementation of the ethread library
+ * Author: Rickard Green
+ */
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#define ETHR_CHILD_WAIT_SPIN_COUNT 4000
+
+#include <stdio.h>
+#ifdef ETHR_TIME_WITH_SYS_TIME
+# include <time.h>
+# include <sys/time.h>
+#else
+# ifdef ETHR_HAVE_SYS_TIME_H
+# include <sys/time.h>
+# else
+# include <time.h>
+# endif
+#endif
+#include <sys/types.h>
+#include <unistd.h>
+#include <signal.h>
+
+#include <limits.h>
+
+#define ETHR_INLINE_FUNC_NAME_(X) X ## __
+#define ETHREAD_IMPL__
+
+#include "ethread.h"
+#include "ethr_internal.h"
+
+#ifndef ETHR_HAVE_ETHREAD_DEFINES
+#error Missing configure defines
+#endif
+
+pthread_key_t ethr_ts_event_key__;
+static int child_wait_spin_count;
+
+/*
+ * --------------------------------------------------------------------------
+ * Static functions
+ * --------------------------------------------------------------------------
+ */
+
+static void thr_exit_cleanup(void)
+{
+ ethr_run_exit_handlers__();
+}
+
+
+/* Argument passed to thr_wrapper() */
+typedef struct {
+ ethr_atomic_t result;
+ ethr_ts_event *tse;
+ void *(*thr_func)(void *);
+ void *arg;
+ void *prep_func_res;
+} ethr_thr_wrap_data__;
+
+static void *thr_wrapper(void *vtwd)
+{
+ long result;
+ void *res;
+ ethr_thr_wrap_data__ *twd = (ethr_thr_wrap_data__ *) vtwd;
+ void *(*thr_func)(void *) = twd->thr_func;
+ void *arg = twd->arg;
+ ethr_ts_event *tsep = NULL;
+
+ result = (long) ethr_make_ts_event__(&tsep);
+
+ if (result == 0) {
+ tsep->iflgs |= ETHR_TS_EV_ETHREAD;
+ if (ethr_thr_child_func__)
+ ethr_thr_child_func__(twd->prep_func_res);
+ }
+
+ tsep = twd->tse; /* We aren't allowed to follow twd after
+ result has been set! */
+
+ ethr_atomic_set(&twd->result, result);
+
+ ethr_event_set(&tsep->event);
+
+ res = result == 0 ? (*thr_func)(arg) : NULL;
+
+ thr_exit_cleanup();
+ return res;
+}
+
+/* internal exports */
+
+int ethr_set_tse__(ethr_ts_event *tsep)
+{
+ return pthread_setspecific(ethr_ts_event_key__, (void *) tsep);
+}
+
+ethr_ts_event *ethr_get_tse__(void)
+{
+ return pthread_getspecific(ethr_ts_event_key__);
+}
+
+/*
+ * --------------------------------------------------------------------------
+ * Exported functions
+ * --------------------------------------------------------------------------
+ */
+
+int
+ethr_init(ethr_init_data *id)
+{
+ int res;
+
+ if (!ethr_not_inited__)
+ return EINVAL;
+
+ ethr_not_inited__ = 0;
+
+ res = ethr_init_common__(id);
+ if (res != 0)
+ goto error;
+
+ child_wait_spin_count = ETHR_CHILD_WAIT_SPIN_COUNT;
+ if (erts_get_cpu_configured(ethr_cpu_info__) == 1)
+ child_wait_spin_count = 0;
+
+ res = pthread_key_create(&ethr_ts_event_key__, ethr_ts_event_destructor__);
+
+ return 0;
+ error:
+ ethr_not_inited__ = 1;
+ return res;
+
+}
+
+int
+ethr_late_init(ethr_late_init_data *id)
+{
+ int res = ethr_late_init_common__(id);
+ if (res != 0)
+ return res;
+ ethr_not_completely_inited__ = 0;
+ return res;
+}
+
+int
+ethr_thr_create(ethr_tid *tid, void * (*func)(void *), void *arg,
+ ethr_thr_opts *opts)
+{
+ ethr_thr_wrap_data__ twd;
+ pthread_attr_t attr;
+ int res, dres;
+ int use_stack_size = (opts && opts->suggested_stack_size >= 0
+ ? opts->suggested_stack_size
+ : -1 /* Use system default */);
+
+#ifdef ETHR_MODIFIED_DEFAULT_STACK_SIZE
+ if (use_stack_size < 0)
+ use_stack_size = ETHR_MODIFIED_DEFAULT_STACK_SIZE;
+#endif
+
+#if ETHR_XCHK
+ if (ethr_not_completely_inited__) {
+ ETHR_ASSERT(0);
+ return EACCES;
+ }
+ if (!tid || !func) {
+ ETHR_ASSERT(0);
+ return EINVAL;
+ }
+#endif
+
+ ethr_atomic_init(&twd.result, -1);
+ twd.tse = ethr_get_ts_event();
+ twd.thr_func = func;
+ twd.arg = arg;
+
+ res = pthread_attr_init(&attr);
+ if (res != 0)
+ return res;
+
+ /* Error cleanup needed after this point */
+
+ /* Schedule child thread in system scope (if possible) ... */
+ res = pthread_attr_setscope(&attr, PTHREAD_SCOPE_SYSTEM);
+ if (res != 0 && res != ENOTSUP)
+ goto error;
+
+ if (use_stack_size >= 0) {
+ size_t suggested_stack_size = (size_t) use_stack_size;
+ size_t stack_size;
+#ifdef ETHR_DEBUG
+ suggested_stack_size /= 2; /* Make sure we got margin */
+#endif
+#ifdef ETHR_STACK_GUARD_SIZE
+ /* The guard is at least on some platforms included in the stack size
+ passed when creating threads */
+ suggested_stack_size += ETHR_B2KW(ETHR_STACK_GUARD_SIZE);
+#endif
+ if (suggested_stack_size < ethr_min_stack_size__)
+ stack_size = ETHR_KW2B(ethr_min_stack_size__);
+ else if (suggested_stack_size > ethr_max_stack_size__)
+ stack_size = ETHR_KW2B(ethr_max_stack_size__);
+ else
+ stack_size = ETHR_PAGE_ALIGN(ETHR_KW2B(suggested_stack_size));
+ (void) pthread_attr_setstacksize(&attr, stack_size);
+ }
+
+#ifdef ETHR_STACK_GUARD_SIZE
+ (void) pthread_attr_setguardsize(&attr, ETHR_STACK_GUARD_SIZE);
+#endif
+
+ /* Detached or joinable... */
+ res = pthread_attr_setdetachstate(&attr,
+ (opts && opts->detached
+ ? PTHREAD_CREATE_DETACHED
+ : PTHREAD_CREATE_JOINABLE));
+ if (res != 0)
+ goto error;
+
+ /* Call prepare func if it exist */
+ if (ethr_thr_prepare_func__)
+ twd.prep_func_res = ethr_thr_prepare_func__();
+ else
+ twd.prep_func_res = NULL;
+
+ res = pthread_create((pthread_t *) tid, &attr, thr_wrapper, (void*) &twd);
+
+ if (res == 0) {
+ int spin_count = child_wait_spin_count;
+
+ /* Wait for child to initialize... */
+ while (1) {
+ long result;
+ ethr_event_reset(&twd.tse->event);
+
+ result = ethr_atomic_read(&twd.result);
+ if (result == 0)
+ break;
+
+ if (result > 0) {
+ res = (int) result;
+ goto error;
+ }
+
+ res = ethr_event_swait(&twd.tse->event, spin_count);
+ if (res != 0 && res != EINTR)
+ goto error;
+ spin_count = 0;
+ }
+ }
+
+ /* Cleanup... */
+
+ error:
+ dres = pthread_attr_destroy(&attr);
+ if (res == 0)
+ res = dres;
+ if (ethr_thr_parent_func__)
+ ethr_thr_parent_func__(twd.prep_func_res);
+ return res;
+}
+
+int
+ethr_thr_join(ethr_tid tid, void **res)
+{
+#if ETHR_XCHK
+ if (ethr_not_inited__) {
+ ETHR_ASSERT(0);
+ return EACCES;
+ }
+#endif
+ return pthread_join((pthread_t) tid, res);
+}
+
+int
+ethr_thr_detach(ethr_tid tid)
+{
+#if ETHR_XCHK
+ if (ethr_not_inited__) {
+ ETHR_ASSERT(0);
+ return EACCES;
+ }
+#endif
+ return pthread_detach((pthread_t) tid);
+}
+
+void
+ethr_thr_exit(void *res)
+{
+#if ETHR_XCHK
+ if (ethr_not_inited__) {
+ ETHR_ASSERT(0);
+ return;
+ }
+#endif
+ thr_exit_cleanup();
+ pthread_exit(res);
+}
+
+ethr_tid
+ethr_self(void)
+{
+ return (ethr_tid) pthread_self();
+}
+
+int
+ethr_equal_tids(ethr_tid tid1, ethr_tid tid2)
+{
+ return pthread_equal((pthread_t) tid1, (pthread_t) tid2);
+}
+
+
+/*
+ * Thread specific events
+ */
+
+ethr_ts_event *
+ethr_get_ts_event(void)
+{
+ return ethr_get_ts_event__();
+}
+
+void
+ethr_leave_ts_event(ethr_ts_event *tsep)
+{
+ ethr_leave_ts_event__(tsep);
+}
+
+/*
+ * Current time
+ */
+
+int
+ethr_time_now(ethr_timeval *time)
+{
+ int res;
+ struct timeval tv;
+#if ETHR_XCHK
+ if (ethr_not_inited__) {
+ ETHR_ASSERT(0);
+ return EACCES;
+ }
+ if (!time) {
+ ETHR_ASSERT(0);
+ return EINVAL;
+ }
+#endif
+
+ res = gettimeofday(&tv, NULL);
+ time->tv_sec = (long) tv.tv_sec;
+ time->tv_nsec = ((long) tv.tv_usec)*1000;
+ return res;
+}
+
+/*
+ * Thread specific data
+ */
+
+int
+ethr_tsd_key_create(ethr_tsd_key *keyp)
+{
+#if ETHR_XCHK
+ if (ethr_not_inited__) {
+ ETHR_ASSERT(0);
+ return EACCES;
+ }
+ if (!keyp) {
+ ETHR_ASSERT(0);
+ return EINVAL;
+ }
+#endif
+ return pthread_key_create((pthread_key_t *) keyp, NULL);
+}
+
+int
+ethr_tsd_key_delete(ethr_tsd_key key)
+{
+#if ETHR_XCHK
+ if (ethr_not_inited__) {
+ ETHR_ASSERT(0);
+ return EACCES;
+ }
+#endif
+ return pthread_key_delete((pthread_key_t) key);
+}
+
+int
+ethr_tsd_set(ethr_tsd_key key, void *value)
+{
+#if ETHR_XCHK
+ if (ethr_not_inited__) {
+ ETHR_ASSERT(0);
+ return EACCES;
+ }
+#endif
+ return pthread_setspecific((pthread_key_t) key, value);
+}
+
+void *
+ethr_tsd_get(ethr_tsd_key key)
+{
+#if ETHR_XCHK
+ if (ethr_not_inited__) {
+ ETHR_ASSERT(0);
+ return NULL;
+ }
+#endif
+ return pthread_getspecific((pthread_key_t) key);
+}
+
+/*
+ * Signal functions
+ */
+
+#if ETHR_HAVE_ETHR_SIG_FUNCS
+
+int ethr_sigmask(int how, const sigset_t *set, sigset_t *oset)
+{
+#if ETHR_XCHK
+ if (ethr_not_inited__) {
+ ETHR_ASSERT(0);
+ return EACCES;
+ }
+ if (!set && !oset) {
+ ETHR_ASSERT(0);
+ return EINVAL;
+ }
+#endif
+ return pthread_sigmask(how, set, oset);
+}
+
+int ethr_sigwait(const sigset_t *set, int *sig)
+{
+#if ETHR_XCHK
+ if (ethr_not_inited__) {
+ ETHR_ASSERT(0);
+ return EACCES;
+ }
+ if (!set || !sig) {
+ ETHR_ASSERT(0);
+ return EINVAL;
+ }
+#endif
+ if (sigwait(set, sig) < 0)
+ return errno;
+ return 0;
+}
+
+#endif /* #if ETHR_HAVE_ETHR_SIG_FUNCS */
+
+ETHR_IMPL_NORETURN__
+ethr_abort__(void)
+{
+ abort();
+}
diff --git a/erts/lib_src/win/ethr_event.c b/erts/lib_src/win/ethr_event.c
new file mode 100644
index 0000000000..ddb4780ff1
--- /dev/null
+++ b/erts/lib_src/win/ethr_event.c
@@ -0,0 +1,120 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2009-2010. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+/*
+ * Author: Rickard Green
+ */
+
+#define ETHR_INLINE_FUNC_NAME_(X) X ## __
+#define ETHR_EVENT_IMPL__
+
+#include "ethread.h"
+
+/* --- Windows implementation of thread events ------------------------------ */
+
+int
+ethr_event_init(ethr_event *e)
+{
+ e->state = ETHR_EVENT_OFF__;
+ e->handle = CreateEvent(NULL, FALSE, FALSE, NULL);
+ if (e->handle == INVALID_HANDLE_VALUE)
+ return ethr_win_get_errno__();
+ return 0;
+}
+
+int
+ethr_event_destroy(ethr_event *e)
+{
+ BOOL res = CloseHandle(e->handle);
+ return res == 0 ? ethr_win_get_errno__() : 0;
+}
+
+void
+ethr_event_set(ethr_event *e)
+{
+ ethr_event_set__(e);
+}
+
+void
+ethr_event_reset(ethr_event *e)
+{
+ ethr_event_reset__(e);
+}
+
+static ETHR_INLINE int
+wait(ethr_event *e, int spincount)
+{
+ LONG state;
+ DWORD code;
+ int sc, res, until_yield = ETHR_YIELD_AFTER_BUSY_LOOPS;
+
+ if (spincount < 0)
+ ETHR_FATAL_ERROR__(EINVAL);
+
+ sc = spincount;
+
+ while (1) {
+ long on;
+ while (1) {
+#if ETHR_IMMED_ATOMIC_SET_GET_SAFE__
+ state = e->state;
+#else
+ state = InterlockedExchangeAdd(&e->state, (LONG) 0);
+#endif
+ if (state == ETHR_EVENT_ON__)
+ return 0;
+ if (sc == 0)
+ break;
+ sc--;
+ ETHR_SPIN_BODY;
+ if (--until_yield == 0) {
+ until_yield = ETHR_YIELD_AFTER_BUSY_LOOPS;
+ res = ETHR_YIELD();
+ if (res != 0)
+ ETHR_FATAL_ERROR__(res);
+ }
+ }
+
+ if (state != ETHR_EVENT_OFF_WAITER__) {
+ state = _InterlockedCompareExchange(&e->state,
+ ETHR_EVENT_OFF_WAITER__,
+ ETHR_EVENT_OFF__);
+ if (state == ETHR_EVENT_ON__)
+ return 0;
+ ETHR_ASSERT(state == ETHR_EVENT_OFF__);
+ }
+
+ code = WaitForSingleObject(e->handle, INFINITE);
+ if (code != WAIT_OBJECT_0)
+ ETHR_FATAL_ERROR__(ethr_win_get_errno__());
+ }
+
+}
+
+int
+ethr_event_wait(ethr_event *e)
+{
+ return wait(e, 0);
+}
+
+int
+ethr_event_swait(ethr_event *e, int spincount)
+{
+ return wait(e, spincount);
+}
diff --git a/erts/lib_src/win/ethread.c b/erts/lib_src/win/ethread.c
new file mode 100644
index 0000000000..69523edf94
--- /dev/null
+++ b/erts/lib_src/win/ethread.c
@@ -0,0 +1,625 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2010. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+/*
+ * Description: Windows native threads implementation of the ethread library
+ * Author: Rickard Green
+ */
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#define ETHR_CHILD_WAIT_SPIN_COUNT 4000
+
+#undef WIN32_LEAN_AND_MEAN
+#define WIN32_LEAN_AND_MEAN
+#include <windows.h>
+#include <process.h>
+#include <winerror.h>
+#include <stdio.h>
+#include <limits.h>
+
+#define ETHR_INLINE_FUNC_NAME_(X) X ## __
+#define ETHREAD_IMPL__
+
+#include "ethread.h"
+#include "ethr_internal.h"
+
+#ifndef ETHR_HAVE_ETHREAD_DEFINES
+#error Missing configure defines
+#endif
+
+/* Argument passed to thr_wrapper() */
+typedef struct {
+ ethr_tid *tid;
+ ethr_atomic_t result;
+ ethr_ts_event *tse;
+ void *(*thr_func)(void *);
+ void *arg;
+ void *prep_func_res;
+} ethr_thr_wrap_data__;
+
+#define ETHR_INVALID_TID_ID -1
+
+struct ethr_join_data_ {
+ HANDLE handle;
+ void *res;
+};
+
+static ethr_atomic_t thread_id_counter;
+static DWORD own_tid_key;
+static ethr_tid main_thr_tid;
+static int child_wait_spin_count;
+
+DWORD ethr_ts_event_key__;
+
+#define ETHR_GET_OWN_TID__ ((ethr_tid *) TlsGetValue(own_tid_key))
+
+/*
+ * --------------------------------------------------------------------------
+ * Static functions
+ * --------------------------------------------------------------------------
+ */
+
+static void thr_exit_cleanup(ethr_tid *tid, void *res)
+{
+
+ ETHR_ASSERT(tid == ETHR_GET_OWN_TID__);
+
+ if (tid->jdata)
+ tid->jdata->res = res;
+
+ ethr_run_exit_handlers__();
+ ethr_ts_event_destructor__((void *) ethr_get_tse__());
+}
+
+static unsigned __stdcall thr_wrapper(LPVOID vtwd)
+{
+ ethr_tid my_tid;
+ long result;
+ void *res;
+ ethr_thr_wrap_data__ *twd = (ethr_thr_wrap_data__ *) vtwd;
+ void *(*thr_func)(void *) = twd->thr_func;
+ void *arg = twd->arg;
+ ethr_ts_event *tsep = NULL;
+
+ result = (long) ethr_make_ts_event__(&tsep);
+
+ if (result == 0) {
+ tsep->iflgs |= ETHR_TS_EV_ETHREAD;
+ my_tid = *twd->tid;
+ if (!TlsSetValue(own_tid_key, (LPVOID) &my_tid)) {
+ result = (long) ethr_win_get_errno__();
+ ethr_free_ts_event__(tsep);
+ }
+ else {
+ if (ethr_thr_child_func__)
+ ethr_thr_child_func__(twd->prep_func_res);
+ }
+ }
+
+ tsep = twd->tse; /* We aren't allowed to follow twd after
+ result has been set! */
+
+ ethr_atomic_set(&twd->result, result);
+
+ ethr_event_set(&tsep->event);
+
+ res = result == 0 ? (*thr_func)(arg) : NULL;
+
+ thr_exit_cleanup(&my_tid, res);
+ return 0;
+}
+
+#ifdef __GNUC__
+#define LL_LITERAL(X) X##LL
+#else
+#define LL_LITERAL(X) X##i64
+#endif
+
+#define EPOCH_JULIAN_DIFF LL_LITERAL(11644473600)
+
+static ETHR_INLINE void
+get_curr_time(long *sec, long *nsec)
+{
+ SYSTEMTIME t;
+ FILETIME ft;
+ LONGLONG lft;
+
+ GetSystemTime(&t);
+ SystemTimeToFileTime(&t, &ft);
+ memcpy(&lft, &ft, sizeof(lft));
+ *nsec = ((long) (lft % LL_LITERAL(10000000)))*100;
+ *sec = (long) ((lft / LL_LITERAL(10000000)) - EPOCH_JULIAN_DIFF);
+}
+
+/* internal exports */
+
+int
+ethr_win_get_errno__(void)
+{
+ return erts_get_last_win_errno();
+}
+
+int ethr_set_tse__(ethr_ts_event *tsep)
+{
+ return (TlsSetValue(ethr_ts_event_key__, (LPVOID) tsep)
+ ? 0
+ : ethr_win_get_errno__());
+}
+
+ethr_ts_event *ethr_get_tse__(void)
+{
+ return (ethr_ts_event *) TlsGetValue(ethr_ts_event_key__);
+}
+
+ETHR_IMPL_NORETURN__
+ethr_abort__(void)
+{
+#if 1
+ DebugBreak();
+#else
+ abort();
+#endif
+}
+
+/*
+ * ----------------------------------------------------------------------------
+ * Exported functions
+ * ----------------------------------------------------------------------------
+ */
+
+int
+ethr_init(ethr_init_data *id)
+{
+#ifdef _WIN32_WINNT
+ DWORD major = (_WIN32_WINNT >> 8) & 0xff;
+ DWORD minor = _WIN32_WINNT & 0xff;
+ OSVERSIONINFO os_version;
+#endif
+ int err = 0;
+ unsigned long i;
+
+ if (!ethr_not_inited__)
+ return EINVAL;
+
+#ifdef _WIN32_WINNT
+ os_version.dwOSVersionInfoSize = sizeof(OSVERSIONINFO);
+ GetVersionEx(&os_version);
+ if (os_version.dwPlatformId != VER_PLATFORM_WIN32_NT
+ || os_version.dwMajorVersion < major
+ || (os_version.dwMajorVersion == major
+ && os_version.dwMinorVersion < minor))
+ return ENOTSUP;
+#endif
+ err = ethr_init_common__(id);
+ if (err)
+ goto error;
+
+ own_tid_key = TlsAlloc();
+ if (own_tid_key == TLS_OUT_OF_INDEXES)
+ goto error;
+
+ ethr_atomic_init(&thread_id_counter, 0);
+
+ main_thr_tid.id = 0;
+ main_thr_tid.jdata = NULL;
+
+ if (!TlsSetValue(own_tid_key, (LPVOID) &main_thr_tid))
+ goto error;
+
+ ETHR_ASSERT(&main_thr_tid == ETHR_GET_OWN_TID__);
+
+ ethr_ts_event_key__ = TlsAlloc();
+ if (ethr_ts_event_key__ == TLS_OUT_OF_INDEXES)
+ goto error;
+
+ child_wait_spin_count = ETHR_CHILD_WAIT_SPIN_COUNT;
+ if (erts_get_cpu_configured(ethr_cpu_info__) == 1)
+ child_wait_spin_count = 0;
+
+ ethr_not_inited__ = 0;
+
+ return 0;
+
+ error:
+ ethr_not_inited__ = 1;
+ if (err == 0)
+ err = ethr_win_get_errno__();
+ ETHR_ASSERT(err != 0);
+ return err;
+}
+
+int
+ethr_late_init(ethr_late_init_data *id)
+{
+ int res = ethr_late_init_common__(id);
+ if (res != 0)
+ return res;
+ ethr_not_completely_inited__ = 0;
+ return res;
+}
+
+
+/*
+ * Thread functions.
+ */
+
+int
+ethr_thr_create(ethr_tid *tid, void * (*func)(void *), void *arg,
+ ethr_thr_opts *opts)
+{
+ HANDLE handle = INVALID_HANDLE_VALUE;
+ int err = 0;
+ ethr_thr_wrap_data__ twd;
+ DWORD code;
+ unsigned ID;
+ unsigned stack_size = 0; /* 0 = system default */
+ int use_stack_size = (opts && opts->suggested_stack_size >= 0
+ ? opts->suggested_stack_size
+ : -1 /* Use system default */);
+
+#ifdef ETHR_MODIFIED_DEFAULT_STACK_SIZE
+ if (use_stack_size < 0)
+ use_stack_size = ETHR_MODIFIED_DEFAULT_STACK_SIZE;
+#endif
+
+#if ETHR_XCHK
+ if (ethr_not_completely_inited__) {
+ ETHR_ASSERT(0);
+ return EACCES;
+ }
+ if (!tid || !func) {
+ ETHR_ASSERT(0);
+ return EINVAL;
+ }
+#endif
+
+ do {
+ tid->id = ethr_atomic_inc_read(&thread_id_counter);
+ } while (tid->id == ETHR_INVALID_TID_ID);
+
+ if (opts && opts->detached)
+ tid->jdata = NULL;
+ else {
+ tid->jdata = ethr_mem__.std.alloc(sizeof(struct ethr_join_data_));
+ if (!tid->jdata)
+ return ENOMEM;
+ tid->jdata->handle = INVALID_HANDLE_VALUE;
+ tid->jdata->res = NULL;
+ }
+
+ if (use_stack_size >= 0) {
+ size_t suggested_stack_size = (size_t) use_stack_size;
+#ifdef ETHR_DEBUG
+ suggested_stack_size /= 2; /* Make sure we got margin */
+#endif
+ if (suggested_stack_size < ethr_min_stack_size__)
+ stack_size = (unsigned) ETHR_KW2B(ethr_min_stack_size__);
+ else if (suggested_stack_size > ethr_max_stack_size__)
+ stack_size = (unsigned) ETHR_KW2B(ethr_max_stack_size__);
+ else
+ stack_size = (unsigned)
+ ETHR_PAGE_ALIGN(ETHR_KW2B(suggested_stack_size));
+ }
+
+ ethr_atomic_init(&twd.result, -1);
+
+ twd.tid = tid;
+ twd.thr_func = func;
+ twd.arg = arg;
+ twd.tse = ethr_get_ts_event();
+
+ /* Call prepare func if it exist */
+ if (ethr_thr_prepare_func__)
+ twd.prep_func_res = ethr_thr_prepare_func__();
+ else
+ twd.prep_func_res = NULL;
+
+ /* spawn the thr_wrapper function */
+ handle = (HANDLE) _beginthreadex(NULL, stack_size, thr_wrapper,
+ (LPVOID) &twd, 0, &ID);
+ if (handle == (HANDLE) 0) {
+ handle = INVALID_HANDLE_VALUE;
+ goto error;
+ }
+ else {
+ int spin_count = child_wait_spin_count;
+
+ ETHR_ASSERT(handle != INVALID_HANDLE_VALUE);
+
+ if (!tid->jdata)
+ CloseHandle(handle);
+ else
+ tid->jdata->handle = handle;
+
+ /* Wait for child to initialize... */
+ while (1) {
+ long result;
+ int err;
+ ethr_event_reset(&twd.tse->event);
+
+ result = ethr_atomic_read(&twd.result);
+ if (result == 0)
+ break;
+
+ if (result > 0) {
+ err = (int) result;
+ goto error;
+ }
+
+ err = ethr_event_swait(&twd.tse->event, spin_count);
+ if (err && err != EINTR)
+ goto error;
+ spin_count = 0;
+ }
+ }
+
+ if (ethr_thr_parent_func__)
+ ethr_thr_parent_func__(twd.prep_func_res);
+
+ if (twd.tse)
+ ethr_leave_ts_event(twd.tse);
+
+ return 0;
+
+ error:
+
+ if (err == 0)
+ err = ethr_win_get_errno__();
+ ETHR_ASSERT(err != 0);
+
+ if (ethr_thr_parent_func__)
+ ethr_thr_parent_func__(twd.prep_func_res);
+
+ if (handle != INVALID_HANDLE_VALUE) {
+ WaitForSingleObject(handle, INFINITE);
+ CloseHandle(handle);
+ }
+
+ if (tid->jdata) {
+ ethr_mem__.std.free(tid->jdata);
+ tid->jdata = NULL;
+ }
+
+ tid->id = ETHR_INVALID_TID_ID;
+
+ if (twd.tse)
+ ethr_leave_ts_event(twd.tse);
+
+ return err;
+}
+
+int ethr_thr_join(ethr_tid tid, void **res)
+{
+ DWORD code;
+
+#if ETHR_XCHK
+ if (ethr_not_inited__) {
+ ETHR_ASSERT(0);
+ return EACCES;
+ }
+#endif
+
+ if (tid.id == ETHR_INVALID_TID_ID || !tid.jdata)
+ return EINVAL;
+
+ /* Wait for thread to terminate */
+ code = WaitForSingleObject(tid.jdata->handle, INFINITE);
+ if (code != WAIT_OBJECT_0)
+ return ethr_win_get_errno__();
+
+ CloseHandle(tid.jdata->handle);
+ tid.jdata->handle = INVALID_HANDLE_VALUE;
+
+ if (res)
+ *res = tid.jdata->res;
+
+ /*
+ * User better not try to join or detach again; or
+ * bad things will happen... (users responsibility)
+ */
+
+ ethr_mem__.std.free(tid.jdata);
+
+ return 0;
+}
+
+
+int
+ethr_thr_detach(ethr_tid tid)
+{
+#if ETHR_XCHK
+ if (ethr_not_inited__) {
+ ETHR_ASSERT(0);
+ return EACCES;
+ }
+#endif
+
+ if (tid.id == ETHR_INVALID_TID_ID || !tid.jdata)
+ return EINVAL;
+
+ CloseHandle(tid.jdata->handle);
+ tid.jdata->handle = INVALID_HANDLE_VALUE;
+
+ /*
+ * User better not try to join or detach again; or
+ * bad things will happen... (users responsibility)
+ */
+
+ ethr_mem__.std.free(tid.jdata);
+
+ return 0;
+}
+
+
+void
+ethr_thr_exit(void *res)
+{
+ ethr_tid *tid;
+#if ETHR_XCHK
+ if (ethr_not_inited__) {
+ ETHR_ASSERT(0);
+ return;
+ }
+#endif
+ tid = ETHR_GET_OWN_TID__;
+ if (!tid) {
+ ETHR_ASSERT(0);
+ _endthreadex((unsigned) 0);
+ }
+ thr_exit_cleanup(tid, res);
+ _endthreadex((unsigned) 0);
+}
+
+ethr_tid
+ethr_self(void)
+{
+ ethr_tid *tid;
+#if ETHR_XCHK
+ if (ethr_not_inited__) {
+ ethr_tid dummy_tid = {ETHR_INVALID_TID_ID, NULL};
+ ETHR_ASSERT(0);
+ return dummy_tid;
+ }
+#endif
+ /* It is okay for non-ethreads (i.e. native win32 threads) to call
+ ethr_self(). They will however be returned an invalid tid. */
+ tid = ETHR_GET_OWN_TID__;
+ if (!tid) {
+ ethr_tid dummy_tid = {ETHR_INVALID_TID_ID, NULL};
+ return dummy_tid;
+ }
+ return *tid;
+}
+
+int
+ethr_equal_tids(ethr_tid tid1, ethr_tid tid2)
+{
+ /* An invalid tid does not equal any tid, not even an invalid tid */
+ return tid1.id == tid2.id && tid1.id != ETHR_INVALID_TID_ID;
+}
+
+int
+ethr_time_now(ethr_timeval *time)
+{
+#if ETHR_XCHK
+ if (ethr_not_inited__) {
+ ETHR_ASSERT(0);
+ return EACCES;
+ }
+ if (!time) {
+ ETHR_ASSERT(0);
+ return EINVAL;
+ }
+#endif
+ get_curr_time(&time->tv_sec, &time->tv_nsec);
+ return 0;
+}
+
+/*
+ * Thread specific data
+ */
+
+int
+ethr_tsd_key_create(ethr_tsd_key *keyp)
+{
+ DWORD key;
+#if ETHR_XCHK
+ if (ethr_not_inited__) {
+ ETHR_ASSERT(0);
+ return EACCES;
+ }
+ if (!keyp) {
+ ETHR_ASSERT(0);
+ return EINVAL;
+ }
+#endif
+ key = TlsAlloc();
+ if (key == TLS_OUT_OF_INDEXES)
+ return ethr_win_get_errno__();
+ *keyp = (ethr_tsd_key) key;
+ return 0;
+}
+
+int
+ethr_tsd_key_delete(ethr_tsd_key key)
+{
+#if ETHR_XCHK
+ if (ethr_not_inited__) {
+ ETHR_ASSERT(0);
+ return EACCES;
+ }
+#endif
+ if (!TlsFree((DWORD) key))
+ return ethr_win_get_errno__();
+ return 0;
+}
+
+int
+ethr_tsd_set(ethr_tsd_key key, void *value)
+{
+#if ETHR_XCHK
+ if (ethr_not_inited__) {
+ ETHR_ASSERT(0);
+ return EACCES;
+ }
+#endif
+ if (!TlsSetValue((DWORD) key, (LPVOID) value))
+ return ethr_win_get_errno__();
+ return 0;
+}
+
+void *
+ethr_tsd_get(ethr_tsd_key key)
+{
+#if ETHR_XCHK
+ if (ethr_not_inited__) {
+ ETHR_ASSERT(0);
+ return NULL;
+ }
+#endif
+ return (void *) TlsGetValue((DWORD) key);
+}
+
+
+/*
+ * Thread specific events
+ */
+
+ethr_ts_event *
+ethr_get_ts_event(void)
+{
+ return ethr_get_ts_event__();
+}
+
+void
+ethr_leave_ts_event(ethr_ts_event *tsep)
+{
+ ethr_leave_ts_event__(tsep);
+}
+
+ethr_ts_event *
+ethr_create_ts_event__(void)
+{
+ ethr_ts_event *tsep;
+ ethr_make_ts_event__(&tsep);
+ return tsep;
+}
diff --git a/erts/test/ethread_SUITE.erl b/erts/test/ethread_SUITE.erl
index bbc79e9381..0cc315e9be 100644
--- a/erts/test/ethread_SUITE.erl
+++ b/erts/test/ethread_SUITE.erl
@@ -37,21 +37,16 @@
equal_tids/1,
mutex/1,
try_lock_mutex/1,
- recursive_mutex/1,
time_now/1,
cond_wait/1,
- cond_timedwait/1,
broadcast/1,
detached_thread/1,
max_threads/1,
- forksafety/1,
- vfork/1,
tsd/1,
spinlock/1,
rwspinlock/1,
rwmutex/1,
- atomic/1,
- gate/1]).
+ atomic/1]).
-include_lib("test_server/include/test_server.hrl").
@@ -60,21 +55,16 @@ tests() ->
equal_tids,
mutex,
try_lock_mutex,
- recursive_mutex,
time_now,
cond_wait,
- cond_timedwait,
broadcast,
detached_thread,
max_threads,
- forksafety,
- vfork,
tsd,
spinlock,
rwspinlock,
rwmutex,
- atomic,
- gate].
+ atomic].
all(doc) -> [];
all(suite) -> tests().
@@ -114,13 +104,6 @@ try_lock_mutex(suite) ->
try_lock_mutex(Config) ->
run_case(Config, "try_lock_mutex", "").
-recursive_mutex(doc) ->
- ["Tests recursive mutexes."];
-recursive_mutex(suite) ->
- [];
-recursive_mutex(Config) ->
- run_case(Config, "recursive_mutex", "").
-
time_now(doc) ->
["Tests ethr_time_now by comparing time values with Erlang."];
time_now(suite) ->
@@ -169,13 +152,6 @@ cond_wait(suite) ->
cond_wait(Config) ->
run_case(Config, "cond_wait", "").
-cond_timedwait(doc) ->
- ["Tests ethr_cond_timedwait with ethr_cond_signal and ethr_cond_broadcast."];
-cond_timedwait(suite) ->
- [];
-cond_timedwait(Config) ->
- run_case(Config, "cond_timedwait", "").
-
broadcast(doc) ->
["Tests that a ethr_cond_broadcast really wakes up all waiting threads"];
broadcast(suite) ->
@@ -197,25 +173,6 @@ max_threads(suite) ->
max_threads(Config) ->
run_case(Config, "max_threads", "").
-forksafety(doc) ->
- ["Tests forksafety."];
-forksafety(suite) ->
- [];
-forksafety(Config) ->
- run_case(Config, "forksafety", "").
-
-vfork(doc) ->
- ["Tests vfork with threads."];
-vfork(suite) ->
- case ?t:os_type() of
- {unix, osf1} ->
- {skip, "vfork() known to hang multi-threaded applications on osf1"};
- _ ->
- []
- end;
-vfork(Config) ->
- run_case(Config, "vfork", "").
-
tsd(doc) ->
["Tests thread specific data."];
tsd(suite) ->
@@ -251,13 +208,6 @@ atomic(suite) ->
atomic(Config) ->
run_case(Config, "atomic", "").
-gate(doc) ->
- ["Tests gates."];
-gate(suite) ->
- [];
-gate(Config) ->
- run_case(Config, "gate", "").
-
%%
%%
%% Auxiliary functions
diff --git a/erts/test/ethread_SUITE_data/ethread_tests.c b/erts/test/ethread_SUITE_data/ethread_tests.c
index f779f13c51..7fc71d8047 100644
--- a/erts/test/ethread_SUITE_data/ethread_tests.c
+++ b/erts/test/ethread_SUITE_data/ethread_tests.c
@@ -1,19 +1,19 @@
/*
* %CopyrightBegin%
- *
- * Copyright Ericsson AB 2004-2009. All Rights Reserved.
- *
+ *
+ * Copyright Ericsson AB 2004-2010. All Rights Reserved.
+ *
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
* compliance with the License. You should have received a copy of the
* Erlang Public License along with this software. If not, it can be
* retrieved online at http://www.erlang.org/.
- *
+ *
* Software distributed under the License is distributed on an "AS IS"
* basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
* the License for the specific language governing rights and limitations
* under the License.
- *
+ *
* %CopyrightEnd%
*/
@@ -90,10 +90,12 @@ static void print_line(char *frmt,...)
print_eol();
}
+#if 0 /* Currently not used; silence annoying warning... */
static void print(char *frmt,...)
{
PRINT_VA_LIST(frmt);
}
+#endif
static void fail(char *frmt,...)
{
@@ -197,8 +199,8 @@ create_join_thread_test(void)
}
for (i = 1; i <= CJTT_NO_THREADS; i++) {
- int *tres;
- res = ethr_thr_join(cjtt_tids[i], (void **) &tres);
+ void *tres;
+ res = ethr_thr_join(cjtt_tids[i], &tres);
ASSERT(res == 0);
ASSERT(tres == &cjtt_res[i]);
ASSERT(cjtt_res[i] == i);
@@ -216,8 +218,8 @@ create_join_thread_test(void)
#define ETT_THREADS 100000
static ethr_tid ett_tids[3];
-static ethr_mutex ett_mutex = ETHR_MUTEX_INITER;
-static ethr_cond ett_cond = ETHR_COND_INITER;
+static ethr_mutex ett_mutex;
+static ethr_cond ett_cond;
static int ett_terminate;
static void *
@@ -234,14 +236,12 @@ static void *
ett_thread2(void *unused)
{
int res;
- res = ethr_mutex_lock(&ett_mutex);
- ASSERT(res == 0);
+ ethr_mutex_lock(&ett_mutex);
while (!ett_terminate) {
res = ethr_cond_wait(&ett_cond, &ett_mutex);
ASSERT(res == 0);
}
- res = ethr_mutex_unlock(&ett_mutex);
- ASSERT(res == 0);
+ ethr_mutex_unlock(&ett_mutex);
return NULL;
}
@@ -250,6 +250,10 @@ equal_tids_test(void)
{
int res, i;
+ res = ethr_mutex_init(&ett_mutex);
+ ASSERT(res == 0);
+ res = ethr_cond_init(&ett_cond);
+ ASSERT(res == 0);
ett_tids[0] = ethr_self();
res = ethr_thr_create(&ett_tids[1], ett_thread, (void *) &ett_tids[1], NULL);
@@ -298,13 +302,10 @@ equal_tids_test(void)
ASSERT(res == 0);
}
- res = ethr_mutex_lock(&ett_mutex);
- ASSERT(res == 0);
+ ethr_mutex_lock(&ett_mutex);
ett_terminate = 1;
- res = ethr_cond_signal(&ett_cond);
- ASSERT(res == 0);
- res = ethr_mutex_unlock(&ett_mutex);
- ASSERT(res == 0);
+ ethr_cond_signal(&ett_cond);
+ ethr_mutex_unlock(&ett_mutex);
res = ethr_thr_join(ett_tids[1], NULL);
ASSERT(res == 0);
@@ -321,17 +322,14 @@ equal_tids_test(void)
* Tests mutexes.
*/
-static ethr_mutex mt_mutex = ETHR_MUTEX_INITER;
+static ethr_mutex mt_mutex;
static int mt_data;
void *
mt_thread(void *unused)
{
- int res;
-
print_line("Aux thread tries to lock mutex");
- res = ethr_mutex_lock(&mt_mutex);
- ASSERT(res == 0);
+ ethr_mutex_lock(&mt_mutex);
print_line("Aux thread locked mutex");
ASSERT(mt_data == 0);
@@ -345,8 +343,7 @@ mt_thread(void *unused)
ASSERT(mt_data == 1);
- res = ethr_mutex_unlock(&mt_mutex);
- ASSERT(res == 0);
+ ethr_mutex_unlock(&mt_mutex);
print_line("Aux thread unlocked mutex");
return NULL;
@@ -356,18 +353,18 @@ mt_thread(void *unused)
static void
mutex_test(void)
{
- int do_restart = 1;
int res;
ethr_tid tid;
- print_line("Running test with statically initialized mutex");
+ print_line("Trying to initialize mutex");
+ res = ethr_mutex_init(&mt_mutex);
+ ASSERT(res == 0);
+ print_line("Initialized mutex");
- restart:
mt_data = 0;
print_line("Main thread tries to lock mutex");
- res = ethr_mutex_lock(&mt_mutex);
- ASSERT(res == 0);
+ ethr_mutex_lock(&mt_mutex);
print_line("Main thread locked mutex");
ASSERT(mt_data == 0);
@@ -383,8 +380,7 @@ mutex_test(void)
ASSERT(mt_data == 0);
- res = ethr_mutex_unlock(&mt_mutex);
- ASSERT(res == 0);
+ ethr_mutex_unlock(&mt_mutex);
print_line("Main thread unlocked mutex");
print_line("Main thread goes to sleep for 1 second");
@@ -392,8 +388,7 @@ mutex_test(void)
print_line("Main thread woke up");
print_line("Main thread tries to lock mutex");
- res = ethr_mutex_lock(&mt_mutex);
- ASSERT(res == 0);
+ ethr_mutex_lock(&mt_mutex);
print_line("Main thread locked mutex");
ASSERT(mt_data == 1);
@@ -404,8 +399,7 @@ mutex_test(void)
ASSERT(mt_data == 1);
- res = ethr_mutex_unlock(&mt_mutex);
- ASSERT(res == 0);
+ ethr_mutex_unlock(&mt_mutex);
print_line("Main thread unlocked mutex");
res = ethr_thr_join(tid, NULL);
@@ -416,20 +410,6 @@ mutex_test(void)
ASSERT(res == 0);
print_line("Main thread destroyed mutex");
- if (do_restart) {
- do_restart = 0;
-
- print_line("Running test with dynamically initialized mutex");
-
- print_line("Trying to initialize mutex");
- res = ethr_mutex_init(&mt_mutex);
- ASSERT(res == 0);
- print_line("Initialized mutex");
-
- goto restart;
-
- }
-
}
/*
@@ -438,9 +418,9 @@ mutex_test(void)
* Tests try lock mutex operation.
*/
-static ethr_mutex tlmt_mtx1 = ETHR_MUTEX_INITER;
-static ethr_mutex tlmt_mtx2 = ETHR_MUTEX_INITER;
-static ethr_cond tlmt_cnd2 = ETHR_COND_INITER;
+static ethr_mutex tlmt_mtx1;
+static ethr_mutex tlmt_mtx2;
+static ethr_cond tlmt_cnd2;
static int tlmt_mtx1_locked;
static int tlmt_mtx1_do_unlock;
@@ -450,32 +430,24 @@ tlmt_thread(void *unused)
{
int res;
- res = ethr_mutex_lock(&tlmt_mtx1);
- ASSERT(res == 0);
- res = ethr_mutex_lock(&tlmt_mtx2);
- ASSERT(res == 0);
+ ethr_mutex_lock(&tlmt_mtx1);
+ ethr_mutex_lock(&tlmt_mtx2);
tlmt_mtx1_locked = 1;
- res = ethr_cond_signal(&tlmt_cnd2);
- ASSERT(res == 0);
+ ethr_cond_signal(&tlmt_cnd2);
while (!tlmt_mtx1_do_unlock) {
res = ethr_cond_wait(&tlmt_cnd2, &tlmt_mtx2);
ASSERT(res == 0 || res == EINTR);
}
- res = ethr_mutex_unlock(&tlmt_mtx2);
- ASSERT(res == 0);
- res = ethr_mutex_unlock(&tlmt_mtx1);
- ASSERT(res == 0);
+ ethr_mutex_unlock(&tlmt_mtx2);
+ ethr_mutex_unlock(&tlmt_mtx1);
- res = ethr_mutex_lock(&tlmt_mtx2);
- ASSERT(res == 0);
+ ethr_mutex_lock(&tlmt_mtx2);
tlmt_mtx1_locked = 0;
- res = ethr_cond_signal(&tlmt_cnd2);
- ASSERT(res == 0);
- res = ethr_mutex_unlock(&tlmt_mtx2);
- ASSERT(res == 0);
+ ethr_cond_signal(&tlmt_cnd2);
+ ethr_mutex_unlock(&tlmt_mtx2);
return NULL;
}
@@ -486,48 +458,49 @@ try_lock_mutex_test(void)
int i, res;
ethr_tid tid;
+ res = ethr_mutex_init(&tlmt_mtx1);
+ ASSERT(res == 0);
+ res = ethr_mutex_init(&tlmt_mtx2);
+ ASSERT(res == 0);
+ res = ethr_cond_init(&tlmt_cnd2);
+ ASSERT(res == 0);
+
tlmt_mtx1_locked = 0;
tlmt_mtx1_do_unlock = 0;
res = ethr_thr_create(&tid, tlmt_thread, NULL, NULL);
ASSERT(res == 0);
- res = ethr_mutex_lock(&tlmt_mtx2);
- ASSERT(res == 0);
+ ethr_mutex_lock(&tlmt_mtx2);
while (!tlmt_mtx1_locked) {
res = ethr_cond_wait(&tlmt_cnd2, &tlmt_mtx2);
ASSERT(res == 0 || res == EINTR);
}
- res = ethr_mutex_unlock(&tlmt_mtx2);
- ASSERT(res == 0);
+ ethr_mutex_unlock(&tlmt_mtx2);
for (i = 0; i < 10; i++) {
res = ethr_mutex_trylock(&tlmt_mtx1);
ASSERT(res == EBUSY);
}
- res = ethr_mutex_lock(&tlmt_mtx2);
- ASSERT(res == 0);
+ ethr_mutex_lock(&tlmt_mtx2);
tlmt_mtx1_do_unlock = 1;
- res = ethr_cond_signal(&tlmt_cnd2);
- ASSERT(res == 0);
+ ethr_cond_signal(&tlmt_cnd2);
while (tlmt_mtx1_locked) {
res = ethr_cond_wait(&tlmt_cnd2, &tlmt_mtx2);
ASSERT(res == 0 || res == EINTR);
}
- res = ethr_mutex_unlock(&tlmt_mtx2);
- ASSERT(res == 0);
+ ethr_mutex_unlock(&tlmt_mtx2);
res = ethr_mutex_trylock(&tlmt_mtx1);
ASSERT(res == 0);
- res = ethr_mutex_unlock(&tlmt_mtx1);
- ASSERT(res == 0);
+ ethr_mutex_unlock(&tlmt_mtx1);
res = ethr_thr_join(tid, NULL);
ASSERT(res == 0);
@@ -541,159 +514,6 @@ try_lock_mutex_test(void)
}
/*
- * The recursive mutex test case.
- *
- * Tests recursive mutexes.
- */
-
-#ifdef ETHR_HAVE_ETHR_REC_MUTEX_INIT
-
-static ethr_mutex rmt_mutex
-#ifdef ETHR_REC_MUTEX_INITER
- = ETHR_REC_MUTEX_INITER
-#endif
- ;
-static int rmt_data;
-
-void *
-rmt_thread(void *unused)
-{
- int res;
-
- print_line("Aux thread tries to lock mutex");
- res = ethr_mutex_lock(&rmt_mutex);
- ASSERT(res == 0);
- print_line("Aux thread locked mutex");
-
- ASSERT(rmt_data == 0);
-
- rmt_data = 1;
- print_line("Aux thread wrote");
-
- print_line("Aux thread goes to sleep for 1 second");
- do_sleep(1);
- print_line("Aux thread woke up");
-
- ASSERT(rmt_data == 1);
-
- res = ethr_mutex_unlock(&rmt_mutex);
- ASSERT(res == 0);
- print_line("Aux thread unlocked mutex");
-
- return NULL;
-}
-
-#endif
-
-static void
-recursive_mutex_test(void)
-{
-#ifdef ETHR_HAVE_ETHR_REC_MUTEX_INIT
- int do_restart = 1;
- int res;
- ethr_tid tid;
-
-#ifdef ETHR_REC_MUTEX_INITER
- print_line("Running test with statically initialized mutex");
-#else
- goto dynamic_init;
-#endif
-
- restart:
- rmt_data = 0;
-
- print_line("Main thread tries to lock mutex");
- res = ethr_mutex_lock(&rmt_mutex);
- ASSERT(res == 0);
- print_line("Main thread locked mutex");
-
- print_line("Main thread tries to lock mutex again");
- res = ethr_mutex_lock(&rmt_mutex);
- ASSERT(res == 0);
- print_line("Main thread locked mutex again");
-
- ASSERT(rmt_data == 0);
-
- print_line("Main thread about to create aux thread");
- res = ethr_thr_create(&tid, rmt_thread, NULL, NULL);
- ASSERT(res == 0);
- print_line("Main thread created aux thread");
-
- print_line("Main thread goes to sleep for 1 second");
- do_sleep(1);
- print_line("Main thread woke up");
-
- ASSERT(rmt_data == 0);
-
- res = ethr_mutex_unlock(&rmt_mutex);
- ASSERT(res == 0);
- print_line("Main thread unlocked mutex");
-
- print_line("Main thread goes to sleep for 1 second");
- do_sleep(1);
- print_line("Main thread woke up");
-
- ASSERT(rmt_data == 0);
-
- res = ethr_mutex_unlock(&rmt_mutex);
- ASSERT(res == 0);
- print_line("Main thread unlocked mutex again");
-
- print_line("Main thread goes to sleep for 1 second");
- do_sleep(1);
- print_line("Main thread woke up");
-
- print_line("Main thread tries to lock mutex");
- res = ethr_mutex_lock(&rmt_mutex);
- ASSERT(res == 0);
- print_line("Main thread locked mutex");
-
- ASSERT(rmt_data == 1);
-
- print_line("Main thread goes to sleep for 1 second");
- do_sleep(1);
- print_line("Main thread woke up");
-
- ASSERT(rmt_data == 1);
-
- res = ethr_mutex_unlock(&rmt_mutex);
- ASSERT(res == 0);
- print_line("Main thread unlocked mutex");
-
- res = ethr_thr_join(tid, NULL);
- ASSERT(res == 0);
- print_line("Main thread joined aux thread");
-
- res = ethr_mutex_destroy(&rmt_mutex);
- ASSERT(res == 0);
- print_line("Main thread destroyed mutex");
-
- if (do_restart) {
-#ifndef ETHR_REC_MUTEX_INITER
- dynamic_init:
-#endif
- do_restart = 0;
-
- print_line("Running test with dynamically initialized mutex");
-
- print_line("Trying to initialize mutex");
- res = ethr_rec_mutex_init(&rmt_mutex);
- ASSERT(res == 0);
- print_line("Initialized mutex");
-
- goto restart;
- }
-
-#ifndef ETHR_REC_MUTEX_INITER
- succeed("Static initializer for recursive mutexes not supported");
-#endif
-
-#else /* #ifdef ETHR_HAVE_ETHR_REC_MUTEX_INIT */
- skip("Recursive mutexes not supported");
-#endif /* #ifdef ETHR_HAVE_ETHR_REC_MUTEX_INIT */
-}
-
-/*
* The time now test.
*
* Tests ethr_time_now by comparing time values with Erlang.
@@ -763,106 +583,82 @@ time_now_test(void)
*/
-static ethr_mutex cwt_mutex = ETHR_MUTEX_INITER;
-static ethr_cond cwt_cond = ETHR_COND_INITER;
+static ethr_mutex cwt_mutex;
+static ethr_cond cwt_cond;
static int cwt_counter;
void *
-cwt_thread(void *is_timedwait_test_ptr)
+cwt_thread(void *unused)
{
- int use_timedwait = *((int *) is_timedwait_test_ptr);
int res;
- res = ethr_mutex_lock(&cwt_mutex);
- ASSERT(res == 0);
-
- if (use_timedwait) {
- ethr_timeval tv;
- res = ethr_time_now(&tv);
- ASSERT(res == 0);
- tv.tv_sec += 3600; /* Make sure we won't time out */
+ ethr_mutex_lock(&cwt_mutex);
- do {
- res = ethr_cond_timedwait(&cwt_cond, &cwt_mutex, &tv);
- } while (res == EINTR);
- ASSERT(res == 0);
- }
- else {
- do {
- res = ethr_cond_wait(&cwt_cond, &cwt_mutex);
- } while (res == EINTR);
- ASSERT(res == 0);
- }
+ do {
+ res = ethr_cond_wait(&cwt_cond, &cwt_mutex);
+ } while (res == EINTR);
+ ASSERT(res == 0);
cwt_counter++;
- res = ethr_mutex_unlock(&cwt_mutex);
- ASSERT(res == 0);
+ ethr_mutex_unlock(&cwt_mutex);
return NULL;
}
static void
-cond_wait_test(int is_timedwait_test)
+cond_wait_test(void)
{
- int do_restart = !is_timedwait_test;
ethr_tid tid1, tid2;
int res;
- if (!is_timedwait_test)
- print_line("Running test with statically initialized mutex and cond");
+ res = ethr_mutex_init(&cwt_mutex);
+ ASSERT(res == 0);
+ res = ethr_cond_init(&cwt_cond);
+ ASSERT(res == 0);
- restart:
/* Wake with signal */
cwt_counter = 0;
- res = ethr_thr_create(&tid1, cwt_thread, (void *) &is_timedwait_test, NULL);
+ res = ethr_thr_create(&tid1, cwt_thread, NULL, NULL);
ASSERT(res == 0);
- res = ethr_thr_create(&tid2, cwt_thread, (void *) &is_timedwait_test, NULL);
+ res = ethr_thr_create(&tid2, cwt_thread, NULL, NULL);
ASSERT(res == 0);
do_sleep(1); /* Make sure threads waits on cond var */
- res = ethr_mutex_lock(&cwt_mutex);
- ASSERT(res == 0);
+ ethr_mutex_lock(&cwt_mutex);
- res = ethr_cond_signal(&cwt_cond); /* Wake one thread */
- ASSERT(res == 0);
+ ethr_cond_signal(&cwt_cond); /* Wake one thread */
do_sleep(1); /* Make sure awakened thread waits on mutex */
ASSERT(cwt_counter == 0);
- res = ethr_mutex_unlock(&cwt_mutex);
- ASSERT(res == 0);
+ ethr_mutex_unlock(&cwt_mutex);
do_sleep(1); /* Let awakened thread proceed */
- res = ethr_mutex_lock(&cwt_mutex);
- ASSERT(res == 0);
+ ethr_mutex_lock(&cwt_mutex);
ASSERT(cwt_counter == 1);
- res = ethr_cond_signal(&cwt_cond); /* Wake the other thread */
- ASSERT(res == 0);
+ ethr_cond_signal(&cwt_cond); /* Wake the other thread */
do_sleep(1); /* Make sure awakened thread waits on mutex */
ASSERT(cwt_counter == 1);
- res = ethr_mutex_unlock(&cwt_mutex);
- ASSERT(res == 0);
+ ethr_mutex_unlock(&cwt_mutex);
do_sleep(1); /* Let awakened thread proceed */
- res = ethr_mutex_lock(&cwt_mutex);
- ASSERT(res == 0);
+ ethr_mutex_lock(&cwt_mutex);
ASSERT(cwt_counter == 2);
- res = ethr_mutex_unlock(&cwt_mutex);
- ASSERT(res == 0);
+ ethr_mutex_unlock(&cwt_mutex);
res = ethr_thr_join(tid1, NULL);
ASSERT(res == 0);
@@ -875,35 +671,30 @@ cond_wait_test(int is_timedwait_test)
cwt_counter = 0;
- res = ethr_thr_create(&tid1, cwt_thread, (void *) &is_timedwait_test, NULL);
+ res = ethr_thr_create(&tid1, cwt_thread, NULL, NULL);
ASSERT(res == 0);
- res = ethr_thr_create(&tid2, cwt_thread, (void *) &is_timedwait_test, NULL);
+ res = ethr_thr_create(&tid2, cwt_thread, NULL, NULL);
ASSERT(res == 0);
do_sleep(1); /* Make sure threads waits on cond var */
- res = ethr_mutex_lock(&cwt_mutex);
- ASSERT(res == 0);
+ ethr_mutex_lock(&cwt_mutex);
- res = ethr_cond_broadcast(&cwt_cond); /* Wake the threads */
- ASSERT(res == 0);
+ ethr_cond_broadcast(&cwt_cond); /* Wake the threads */
do_sleep(1); /* Make sure awakened threads wait on mutex */
ASSERT(cwt_counter == 0);
- res = ethr_mutex_unlock(&cwt_mutex);
- ASSERT(res == 0);
+ ethr_mutex_unlock(&cwt_mutex);
do_sleep(1); /* Let awakened threads proceed */
- res = ethr_mutex_lock(&cwt_mutex);
- ASSERT(res == 0);
+ ethr_mutex_lock(&cwt_mutex);
ASSERT(cwt_counter == 2);
- res = ethr_mutex_unlock(&cwt_mutex);
- ASSERT(res == 0);
+ ethr_mutex_unlock(&cwt_mutex);
res = ethr_thr_join(tid1, NULL);
ASSERT(res == 0);
@@ -916,113 +707,6 @@ cond_wait_test(int is_timedwait_test)
res = ethr_cond_destroy(&cwt_cond);
ASSERT(res == 0);
- if (do_restart) {
- do_restart = 0;
- res = ethr_mutex_init(&cwt_mutex);
- ASSERT(res == 0);
- res = ethr_cond_init(&cwt_cond);
- ASSERT(res == 0);
- print_line("Running test with dynamically initialized mutex and cond");
- goto restart;
- }
-}
-
-/*
- * The cond timedwait test case.
- *
- * Tests ethr_cond_timedwait with ethr_cond_signal and ethr_cond_broadcast.
- */
-
-#define CTWT_MAX_TIME_DIFF 100000
-
-static long
-ctwt_check_timeout(long to)
-{
- int res;
- ethr_timeval tva, tvb;
- long diff, abs_diff;
-
- res = ethr_time_now(&tva);
- ASSERT(res == 0);
-
- tva.tv_sec += to / 1000;
- tva.tv_nsec += (to % 1000) * 1000000;
- if (tva.tv_nsec >= 1000000000) {
- tva.tv_sec++;
- tva.tv_nsec -= 1000000000;
- ASSERT(tva.tv_nsec < 1000000000);
- }
-
- do {
- res = ethr_cond_timedwait(&cwt_cond, &cwt_mutex, &tva);
- } while (res == EINTR);
- ASSERT(res == ETIMEDOUT);
-
- res = ethr_time_now(&tvb);
- ASSERT(res == 0);
-
- diff = (tvb.tv_sec - tva.tv_sec) * 1000000;
- diff += (tvb.tv_nsec - tva.tv_nsec)/1000;
-
- print("Timeout=%ld; ", to);
- print("tva.tv_sec=%ld tva.tv_nsec=%ld; ", tva.tv_sec, tva.tv_nsec);
- print("tvb.tv_sec=%ld tvb.tv_nsec=%ld; ", tvb.tv_sec, tvb.tv_nsec);
- print_line("diff (tvb - tva) = %ld us", diff);
-
- abs_diff = (long) abs((int) diff);
-
- ASSERT(CTWT_MAX_TIME_DIFF >= abs_diff);
- return abs_diff;
-}
-
-static void
-cond_timedwait_test(void)
-{
- int do_restart = 1;
- long abs_diff, max_abs_diff = 0;
- int res;
-
-#define CTWT_UPD_MAX_DIFF if (abs_diff > max_abs_diff) max_abs_diff = abs_diff;
-
- print_line("Running test with statically initialized mutex and cond");
-
- print_line("CTWT_MAX_TIME_DIFF=%d", CTWT_MAX_TIME_DIFF);
-
- restart:
-
- res = ethr_mutex_lock(&cwt_mutex);
- ASSERT(res == 0);
-
- abs_diff = ctwt_check_timeout(300);
- CTWT_UPD_MAX_DIFF;
- abs_diff = ctwt_check_timeout(700);
- CTWT_UPD_MAX_DIFF;
- abs_diff = ctwt_check_timeout(1000);
- CTWT_UPD_MAX_DIFF;
- abs_diff = ctwt_check_timeout(2300);
- CTWT_UPD_MAX_DIFF;
- abs_diff = ctwt_check_timeout(5100);
- CTWT_UPD_MAX_DIFF;
-
- res = ethr_mutex_unlock(&cwt_mutex);
- ASSERT(res == 0);
-
- cond_wait_test(1);
-
- if (do_restart) {
- do_restart = 0;
- res = ethr_mutex_init(&cwt_mutex);
- ASSERT(res == 0);
- res = ethr_cond_init(&cwt_cond);
- ASSERT(res == 0);
- print_line("Running test with dynamically initialized mutex and cond");
- goto restart;
- }
-
- print_line("Max absolute diff = %d us", max_abs_diff);
- succeed("Max absolute diff = %d us", max_abs_diff);
-
-#undef CTWT_UPD_MAX_DIFF
}
/*
@@ -1037,9 +721,9 @@ cond_timedwait_test(void)
static int bct_woken = 0;
static int bct_waiting = 0;
static int bct_done = 0;
-static ethr_mutex bct_mutex = ETHR_MUTEX_INITER;
-static ethr_cond bct_cond = ETHR_COND_INITER;
-static ethr_cond bct_cntrl_cond = ETHR_COND_INITER;
+static ethr_mutex bct_mutex;
+static ethr_cond bct_cond;
+static ethr_cond bct_cntrl_cond;
static void *
@@ -1047,30 +731,24 @@ bct_thread(void *unused)
{
int res;
- res = ethr_mutex_lock(&bct_mutex);
- ASSERT(res == 0);
+ ethr_mutex_lock(&bct_mutex);
while (!bct_done) {
bct_waiting++;
- if (bct_waiting == BCT_THREADS) {
- res = ethr_cond_signal(&bct_cntrl_cond);
- ASSERT(res == 0);
- }
+ if (bct_waiting == BCT_THREADS)
+ ethr_cond_signal(&bct_cntrl_cond);
do {
res = ethr_cond_wait(&bct_cond, &bct_mutex);
} while (res == EINTR);
ASSERT(res == 0);
bct_woken++;
- if (bct_woken == BCT_THREADS) {
- res = ethr_cond_signal(&bct_cntrl_cond);
- ASSERT(res == 0);
- }
+ if (bct_woken == BCT_THREADS)
+ ethr_cond_signal(&bct_cntrl_cond);
}
- res = ethr_mutex_unlock(&bct_mutex);
- ASSERT(res == 0);
+ ethr_mutex_unlock(&bct_mutex);
return NULL;
}
@@ -1081,14 +759,20 @@ broadcast_test(void)
int res, i;
ethr_tid tid[BCT_THREADS];
+ res = ethr_mutex_init(&bct_mutex);
+ ASSERT(res == 0);
+ res = ethr_cond_init(&bct_cntrl_cond);
+ ASSERT(res == 0);
+ res = ethr_cond_init(&bct_cond);
+ ASSERT(res == 0);
+
for (i = 0; i < BCT_THREADS; i++) {
res = ethr_thr_create(&tid[i], bct_thread, NULL, NULL);
ASSERT(res == 0);
}
- res = ethr_mutex_lock(&bct_mutex);
- ASSERT(res == 0);
+ ethr_mutex_lock(&bct_mutex);
for (i = 0; i < BCT_NO_OF_WAITS; i++) {
@@ -1101,8 +785,7 @@ broadcast_test(void)
bct_woken = 0;
/* Wake all threads */
- res = ethr_cond_broadcast(&bct_cond);
- ASSERT(res == 0);
+ ethr_cond_broadcast(&bct_cond);
while (bct_woken != BCT_THREADS) {
res = ethr_cond_wait(&bct_cntrl_cond, &bct_mutex);
@@ -1114,13 +797,11 @@ broadcast_test(void)
bct_done = 1;
/* Wake all threads */
- res = ethr_cond_broadcast(&bct_cond);
- ASSERT(res == 0);
+ ethr_cond_broadcast(&bct_cond);
- res = ethr_mutex_unlock(&bct_mutex);
- ASSERT(res == 0);
+ ethr_mutex_unlock(&bct_mutex);
- for (i = 0; i < BCT_THREADS - 1; i++) {
+ for (i = 0; i < BCT_THREADS; i++) {
res = ethr_thr_join(tid[i], NULL);
ASSERT(res == 0);
}
@@ -1143,26 +824,22 @@ broadcast_test(void)
#define DT_THREADS (50*1024)
#define DT_BATCH_SIZE 64
-static ethr_mutex dt_mutex = ETHR_MUTEX_INITER;
-static ethr_cond dt_cond = ETHR_COND_INITER;
+static ethr_mutex dt_mutex;
+static ethr_cond dt_cond;
static int dt_count;
static int dt_limit;
static void *
dt_thread(void *unused)
{
- int res;
-
- res = ethr_mutex_lock(&dt_mutex);
- ASSERT(res == 0);
+ ethr_mutex_lock(&dt_mutex);
dt_count++;
if (dt_count >= dt_limit)
ethr_cond_signal(&dt_cond);
- res = ethr_mutex_unlock(&dt_mutex);
- ASSERT(res == 0);
+ ethr_mutex_unlock(&dt_mutex);
return NULL;
}
@@ -1174,6 +851,11 @@ detached_thread_test(void)
ethr_tid tid[DT_BATCH_SIZE];
int i, j, res;
+ res = ethr_mutex_init(&dt_mutex);
+ ASSERT(res == 0);
+ res = ethr_cond_init(&dt_cond);
+ ASSERT(res == 0);
+
thr_opts.detached = 1;
dt_count = 0;
dt_limit = 0;
@@ -1187,14 +869,12 @@ detached_thread_test(void)
ASSERT(res == 0);
}
- res = ethr_mutex_lock(&dt_mutex);
- ASSERT(res == 0);
+ ethr_mutex_lock(&dt_mutex);
while (dt_count < dt_limit) {
res = ethr_cond_wait(&dt_cond, &dt_mutex);
ASSERT(res == 0 || res == EINTR);
}
- res = ethr_mutex_unlock(&dt_mutex);
- ASSERT(res == 0);
+ ethr_mutex_unlock(&dt_mutex);
print_line("dt_count = %d", dt_count);
}
@@ -1211,8 +891,8 @@ detached_thread_test(void)
#define MTT_TIMES 10
static int mtt_terminate;
-static ethr_mutex mtt_mutex = ETHR_MUTEX_INITER;
-static ethr_cond mtt_cond = ETHR_COND_INITER;
+static ethr_mutex mtt_mutex;
+static ethr_cond mtt_cond;
static char mtt_string[22*MTT_TIMES]; /* 22 is enough for ", %d" */
@@ -1220,16 +900,14 @@ void *mtt_thread(void *unused)
{
int res;
- res = ethr_mutex_lock(&mtt_mutex);
- ASSERT(res == 0);
+ ethr_mutex_lock(&mtt_mutex);
while (!mtt_terminate) {
res = ethr_cond_wait(&mtt_cond, &mtt_mutex);
ASSERT(res == 0 || res == EINTR);
}
- res = ethr_mutex_unlock(&mtt_mutex);
- ASSERT(res == 0);
+ ethr_mutex_unlock(&mtt_mutex);
return NULL;
}
@@ -1265,16 +943,13 @@ mtt_create_join_threads(void)
print_line("%d = ethr_thr_create()", res);
print_line("Number of created threads: %d", no_threads);
- res = ethr_mutex_lock(&mtt_mutex);
- ASSERT(res == 0);
+ ethr_mutex_lock(&mtt_mutex);
mtt_terminate = 1;
- res = ethr_cond_broadcast(&mtt_cond);
- ASSERT(res == 0);
+ ethr_cond_broadcast(&mtt_cond);
- res = ethr_mutex_unlock(&mtt_mutex);
- ASSERT(res == 0);
+ ethr_mutex_unlock(&mtt_mutex);
while (ix) {
res = ethr_thr_join(tids[--ix], NULL);
@@ -1292,9 +967,14 @@ mtt_create_join_threads(void)
static void
max_threads_test(void)
{
- int no_threads[MTT_TIMES], i, up, down, eq;
+ int no_threads[MTT_TIMES], i, up, down, eq, res;
char *str;
+ res = ethr_mutex_init(&mtt_mutex);
+ ASSERT(res == 0);
+ res = ethr_cond_init(&mtt_cond);
+ ASSERT(res == 0);
+
for (i = 0; i < MTT_TIMES; i++) {
no_threads[i] = mtt_create_join_threads();
}
@@ -1326,272 +1006,6 @@ max_threads_test(void)
}
-
-/*
- * The forksafety test case.
- *
- * Tests forksafety.
- */
-#ifdef __WIN32__
-#define NO_FORK_PRESENT
-#endif
-
-#ifndef NO_FORK_PRESENT
-
-static ethr_mutex ft_test_inner_mutex = ETHR_MUTEX_INITER;
-static ethr_mutex ft_test_outer_mutex = ETHR_MUTEX_INITER;
-static ethr_mutex ft_go_mutex = ETHR_MUTEX_INITER;
-static ethr_cond ft_go_cond = ETHR_COND_INITER;
-static int ft_go;
-static int ft_have_forked;
-
-static void *
-ft_thread(void *unused)
-{
- int res;
-
- res = ethr_mutex_lock(&ft_test_outer_mutex);
- ASSERT(res == 0);
-
- res = ethr_mutex_lock(&ft_go_mutex);
- ASSERT(res == 0);
-
- ft_go = 1;
- res = ethr_cond_signal(&ft_go_cond);
- ASSERT(res == 0);
- res = ethr_mutex_unlock(&ft_go_mutex);
- ASSERT(res == 0);
-
- do_sleep(1);
- ASSERT(!ft_have_forked);
-
- res = ethr_mutex_lock(&ft_test_inner_mutex);
- ASSERT(res == 0);
-
- res = ethr_mutex_unlock(&ft_test_inner_mutex);
- ASSERT(res == 0);
-
- do_sleep(1);
- ASSERT(!ft_have_forked);
-
- res = ethr_mutex_unlock(&ft_test_outer_mutex);
- ASSERT(res == 0);
-
- do_sleep(1);
- ASSERT(ft_have_forked);
-
-
- return NULL;
-}
-
-#endif /* #ifndef NO_FORK_PRESENT */
-
-static void
-forksafety_test(void)
-{
-#ifdef NO_FORK_PRESENT
- skip("No fork() present; nothing to test");
-#elif defined(DEBUG)
- skip("Doesn't work in debug build");
-#else
- char snd_msg[] = "ok, bye!";
- char rec_msg[sizeof(snd_msg)*2];
- ethr_tid tid;
- int res;
- int fds[2];
-
-
- res = ethr_mutex_set_forksafe(&ft_test_inner_mutex);
- if (res == ENOTSUP) {
- skip("Forksafety not supported on this platform!");
- }
- ASSERT(res == 0);
- res = ethr_mutex_set_forksafe(&ft_test_outer_mutex);
- ASSERT(res == 0);
-
-
- res = pipe(fds);
- ASSERT(res == 0);
-
- ft_go = 0;
- ft_have_forked = 0;
-
- res = ethr_mutex_lock(&ft_go_mutex);
- ASSERT(res == 0);
-
- res = ethr_thr_create(&tid, ft_thread, NULL, NULL);
- ASSERT(res == 0);
-
- do {
- res = ethr_cond_wait(&ft_go_cond, &ft_go_mutex);
- } while (res == EINTR || !ft_go);
- ASSERT(res == 0);
-
- res = ethr_mutex_unlock(&ft_go_mutex);
- ASSERT(res == 0);
-
- res = fork();
- ft_have_forked = 1;
- if (res == 0) {
- close(fds[0]);
- res = ethr_mutex_lock(&ft_test_outer_mutex);
- if (res != 0)
- _exit(1);
- res = ethr_mutex_lock(&ft_test_inner_mutex);
- if (res != 0)
- _exit(1);
- res = ethr_mutex_unlock(&ft_test_inner_mutex);
- if (res != 0)
- _exit(1);
- res = ethr_mutex_unlock(&ft_test_outer_mutex);
- if (res != 0)
- _exit(1);
-
- res = ethr_mutex_destroy(&ft_test_inner_mutex);
- if (res != 0)
- _exit(1);
- res = ethr_mutex_destroy(&ft_test_outer_mutex);
- if (res != 0)
- _exit(1);
-
- res = (int) write(fds[1], (void *) snd_msg, sizeof(snd_msg));
- if (res != sizeof(snd_msg))
- _exit(1);
- close(fds[1]);
- _exit(0);
- }
- ASSERT(res > 0);
- close(fds[1]);
-
- res = (int) read(fds[0], (void *) rec_msg, sizeof(rec_msg));
- ASSERT(res == (int) sizeof(snd_msg));
- ASSERT(strcmp(snd_msg, rec_msg) == 0);
-
- close(fds[0]);
-#endif
-}
-
-
-/*
- * The vfork test case.
- *
- * Tests vfork with threads.
- */
-
-#ifdef __WIN32__
-#define NO_VFORK_PRESENT
-#endif
-
-#ifndef NO_VFORK_PRESENT
-
-#undef vfork
-
-static ethr_mutex vt_mutex = ETHR_MUTEX_INITER;
-
-static void *
-vt_thread(void *vprog)
-{
- char *prog = (char *) vprog;
- int res;
- char snd_msg[] = "ok, bye!";
- char rec_msg[sizeof(snd_msg)*2];
- int fds[2];
- char closefd[20];
- char writefd[20];
-
- res = pipe(fds);
- ASSERT(res == 0);
-
- res = sprintf(closefd, "%d", fds[0]);
- ASSERT(res <= 20);
- res = sprintf(writefd, "%d", fds[1]);
- ASSERT(res <= 20);
-
- print("parent: About to vfork and execute ");
- print("execlp(\"%s\", \"%s\", \"%s\", \"%s\", \"%s\", \"%s\", \"%s\", NULL)",
- prog, prog, "vfork", "exec", snd_msg, closefd, writefd);
- print_line(" in child");
- res = vfork();
- if (res == 0) {
- execlp(prog, prog, "vfork", "exec", snd_msg, closefd, writefd, NULL);
- _exit(1);
- }
- ASSERT(res > 0);
-
- print_line("parent: I'm back");
-
- close(fds[1]);
-
- res = (int) read(fds[0], (void *) rec_msg, sizeof(rec_msg));
- print_line("parent: %d = read()", res);
- print_line("parent: rec_msg=\"%s\"", rec_msg);
- ASSERT(res == (int) sizeof(snd_msg));
- ASSERT(strcmp(snd_msg, rec_msg) == 0);
-
- close(fds[0]);
-
- return NULL;
-}
-
-#endif /* #ifndef NO_VFORK_PRESENT */
-
-static void
-vfork_test(int argc, char *argv[])
-{
-#ifdef NO_VFORK_PRESENT
- skip("No vfork() present; nothing to test");
-#else
- int res;
- ethr_tid tid;
-
- if (argc == 6 && strcmp("exec", argv[2]) == 0) {
- /* We are child after vfork() and execlp() ... */
-
- char *snd_msg;
- int closefd;
- int writefd;
-
- snd_msg = argv[3];
- closefd = atoi(argv[4]);
- writefd = atoi(argv[5]);
-
- print_line("child: snd_msg=\"%s\"; closefd=%d writefd=%d",
- snd_msg, closefd, writefd);
-
- close(closefd);
-
- res = (int) write(writefd, (void *) snd_msg, strlen(snd_msg)+1);
- print_line("child: %d = write()", res);
- if (res != strlen(snd_msg)+1)
- exit(1);
- close(writefd);
- print_line("child: bye");
- exit(0);
- }
- ASSERT(argc == 2);
-
- res = ethr_mutex_set_forksafe(&vt_mutex);
- ASSERT(res == 0 || res == ENOTSUP);
- res = ethr_mutex_lock(&vt_mutex);
- ASSERT(res == 0);
-
- res = ethr_thr_create(&tid, vt_thread, (void *) argv[0], NULL);
- ASSERT(res == 0);
-
- do_sleep(1);
-
- res = ethr_mutex_unlock(&vt_mutex);
- ASSERT(res == 0);
-
- res = ethr_thr_join(tid, NULL);
- ASSERT(res == 0);
-
- res = ethr_mutex_destroy(&vt_mutex);
- ASSERT(res == 0);
-#endif
-}
-
-
/*
* The tsd test case.
*
@@ -1651,11 +1065,8 @@ static int st_data;
void *
st_thread(void *unused)
{
- int res;
-
print_line("Aux thread tries to lock spinlock");
- res = ethr_spin_lock(&st_spinlock);
- ASSERT(res == 0);
+ ethr_spin_lock(&st_spinlock);
print_line("Aux thread locked spinlock");
ASSERT(st_data == 0);
@@ -1669,8 +1080,7 @@ st_thread(void *unused)
ASSERT(st_data == 1);
- res = ethr_spin_unlock(&st_spinlock);
- ASSERT(res == 0);
+ ethr_spin_unlock(&st_spinlock);
print_line("Aux thread unlocked spinlock");
return NULL;
@@ -1691,8 +1101,7 @@ spinlock_test(void)
st_data = 0;
print_line("Main thread tries to lock spinlock");
- res = ethr_spin_lock(&st_spinlock);
- ASSERT(res == 0);
+ ethr_spin_lock(&st_spinlock);
print_line("Main thread locked spinlock");
ASSERT(st_data == 0);
@@ -1708,8 +1117,7 @@ spinlock_test(void)
ASSERT(st_data == 0);
- res = ethr_spin_unlock(&st_spinlock);
- ASSERT(res == 0);
+ ethr_spin_unlock(&st_spinlock);
print_line("Main thread unlocked spinlock");
print_line("Main thread goes to sleep for 1 second");
@@ -1717,8 +1125,7 @@ spinlock_test(void)
print_line("Main thread woke up");
print_line("Main thread tries to lock spinlock");
- res = ethr_spin_lock(&st_spinlock);
- ASSERT(res == 0);
+ ethr_spin_lock(&st_spinlock);
print_line("Main thread locked spinlock");
ASSERT(st_data == 1);
@@ -1729,8 +1136,7 @@ spinlock_test(void)
ASSERT(st_data == 1);
- res = ethr_spin_unlock(&st_spinlock);
- ASSERT(res == 0);
+ ethr_spin_unlock(&st_spinlock);
print_line("Main thread unlocked spinlock");
res = ethr_thr_join(tid, NULL);
@@ -1757,23 +1163,19 @@ void *
rwst_thread(void *unused)
{
int data;
- int res;
print_line("Aux thread tries to read lock rwspinlock");
- res = ethr_read_lock(&rwst_rwspinlock);
- ASSERT(res == 0);
+ ethr_read_lock(&rwst_rwspinlock);
print_line("Aux thread read locked rwspinlock");
ASSERT(rwst_data == 4711);
print_line("Aux thread tries to read unlock rwspinlock");
- res = ethr_read_unlock(&rwst_rwspinlock);
- ASSERT(res == 0);
+ ethr_read_unlock(&rwst_rwspinlock);
print_line("Aux thread read unlocked rwspinlock");
print_line("Aux thread tries to write lock rwspinlock");
- res = ethr_write_lock(&rwst_rwspinlock);
- ASSERT(res == 0);
+ ethr_write_lock(&rwst_rwspinlock);
print_line("Aux thread write locked rwspinlock");
data = ++rwst_data;
@@ -1787,8 +1189,7 @@ rwst_thread(void *unused)
++rwst_data;
print_line("Aux thread tries to write unlock rwspinlock");
- res = ethr_write_unlock(&rwst_rwspinlock);
- ASSERT(res == 0);
+ ethr_write_unlock(&rwst_rwspinlock);
print_line("Aux thread write unlocked rwspinlock");
return NULL;
@@ -1810,8 +1211,7 @@ rwspinlock_test(void)
rwst_data = 4711;
print_line("Main thread tries to read lock rwspinlock");
- res = ethr_read_lock(&rwst_rwspinlock);
- ASSERT(res == 0);
+ ethr_read_lock(&rwst_rwspinlock);
print_line("Main thread read locked rwspinlock");
ASSERT(rwst_data == 4711);
@@ -1828,13 +1228,11 @@ rwspinlock_test(void)
ASSERT(rwst_data == 4711);
print_line("Main thread tries to read unlock rwspinlock");
- res = ethr_read_unlock(&rwst_rwspinlock);
- ASSERT(res == 0);
+ ethr_read_unlock(&rwst_rwspinlock);
print_line("Main thread read unlocked rwspinlock");
print_line("Main thread tries to write lock rwspinlock");
- res = ethr_write_lock(&rwst_rwspinlock);
- ASSERT(res == 0);
+ ethr_write_lock(&rwst_rwspinlock);
print_line("Main thread write locked rwspinlock");
data = ++rwst_data;
@@ -1847,8 +1245,7 @@ rwspinlock_test(void)
++rwst_data;
print_line("Main thread tries to write unlock rwspinlock");
- res = ethr_write_unlock(&rwst_rwspinlock);
- ASSERT(res == 0);
+ ethr_write_unlock(&rwst_rwspinlock);
print_line("Main thread write unlocked rwspinlock");
res = ethr_thr_join(tid, NULL);
@@ -1875,23 +1272,19 @@ void *
rwmt_thread(void *unused)
{
int data;
- int res;
print_line("Aux thread tries to read lock rwmutex");
- res = ethr_rwmutex_rlock(&rwmt_rwmutex);
- ASSERT(res == 0);
+ ethr_rwmutex_rlock(&rwmt_rwmutex);
print_line("Aux thread read locked rwmutex");
ASSERT(rwmt_data == 4711);
print_line("Aux thread tries to read unlock rwmutex");
- res = ethr_rwmutex_runlock(&rwmt_rwmutex);
- ASSERT(res == 0);
+ ethr_rwmutex_runlock(&rwmt_rwmutex);
print_line("Aux thread read unlocked rwmutex");
print_line("Aux thread tries to write lock rwmutex");
- res = ethr_rwmutex_rwlock(&rwmt_rwmutex);
- ASSERT(res == 0);
+ ethr_rwmutex_rwlock(&rwmt_rwmutex);
print_line("Aux thread write locked rwmutex");
data = ++rwmt_data;
@@ -1905,8 +1298,7 @@ rwmt_thread(void *unused)
++rwmt_data;
print_line("Aux thread tries to write unlock rwmutex");
- res = ethr_rwmutex_rwunlock(&rwmt_rwmutex);
- ASSERT(res == 0);
+ ethr_rwmutex_rwunlock(&rwmt_rwmutex);
print_line("Aux thread write unlocked rwmutex");
return NULL;
@@ -1928,8 +1320,7 @@ rwmutex_test(void)
rwmt_data = 4711;
print_line("Main thread tries to read lock rwmutex");
- res = ethr_rwmutex_rlock(&rwmt_rwmutex);
- ASSERT(res == 0);
+ ethr_rwmutex_rlock(&rwmt_rwmutex);
print_line("Main thread read locked rwmutex");
ASSERT(rwmt_data == 4711);
@@ -1946,13 +1337,11 @@ rwmutex_test(void)
ASSERT(rwmt_data == 4711);
print_line("Main thread tries to read unlock rwmutex");
- res = ethr_rwmutex_runlock(&rwmt_rwmutex);
- ASSERT(res == 0);
+ ethr_rwmutex_runlock(&rwmt_rwmutex);
print_line("Main thread read unlocked rwmutex");
print_line("Main thread tries to write lock rwmutex");
- res = ethr_rwmutex_rwlock(&rwmt_rwmutex);
- ASSERT(res == 0);
+ ethr_rwmutex_rwlock(&rwmt_rwmutex);
print_line("Main thread write locked rwmutex");
data = ++rwmt_data;
@@ -1965,8 +1354,7 @@ rwmutex_test(void)
++rwmt_data;
print_line("Main thread tries to write unlock rwmutex");
- res = ethr_rwmutex_rwunlock(&rwmt_rwmutex);
- ASSERT(res == 0);
+ ethr_rwmutex_rwunlock(&rwmt_rwmutex);
print_line("Main thread write unlocked rwmutex");
res = ethr_thr_join(tid, NULL);
@@ -1998,65 +1386,53 @@ static ethr_atomic_t at_data;
void *
at_thread(void *unused)
{
- int res, i;
+ int i;
long val, go;
- res = ethr_atomic_inctest(&at_ready, &val);
- ASSERT(res == 0);
+ val = ethr_atomic_inc_read(&at_ready);
ASSERT(val > 0);
ASSERT(val <= AT_THREADS);
do {
- res = ethr_atomic_read(&at_go, &go);
- ASSERT(res == 0);
+ go = ethr_atomic_read(&at_go);
} while (!go);
for (i = 0; i < AT_ITER; i++) {
- res = ethr_atomic_or_old(&at_data, at_set_val, &val);
- ASSERT(res == 0);
+ val = ethr_atomic_read_bor(&at_data, at_set_val);
ASSERT(val >= (i == 0 ? 0 : at_set_val) + (long) 4711);
ASSERT(val <= at_max_val);
- res = ethr_atomic_and_old(&at_data, ~at_rm_val, &val);
- ASSERT(res == 0);
+ val = ethr_atomic_read_band(&at_data, ~at_rm_val);
ASSERT(val >= at_set_val + (long) 4711);
ASSERT(val <= at_max_val);
- res = ethr_atomic_read(&at_data, &val);
- ASSERT(res == 0);
+ val = ethr_atomic_read(&at_data);
ASSERT(val >= at_set_val + (long) 4711);
ASSERT(val <= at_max_val);
- res = ethr_atomic_inctest(&at_data, &val);
- ASSERT(res == 0);
+ val = ethr_atomic_inc_read(&at_data);
ASSERT(val > at_set_val + (long) 4711);
ASSERT(val <= at_max_val);
- res = ethr_atomic_dectest(&at_data, &val);
- ASSERT(res == 0);
+ val = ethr_atomic_dec_read(&at_data);
ASSERT(val >= at_set_val + (long) 4711);
ASSERT(val <= at_max_val);
- res = ethr_atomic_inc(&at_data);
- ASSERT(res == 0);
+ ethr_atomic_inc(&at_data);
- res = ethr_atomic_dec(&at_data);
- ASSERT(res == 0);
+ ethr_atomic_dec(&at_data);
- res = ethr_atomic_addtest(&at_data, (long) 4711, &val);
- ASSERT(res == 0);
+ val = ethr_atomic_add_read(&at_data, (long) 4711);
ASSERT(val >= at_set_val + (long) 2*4711);
ASSERT(val <= at_max_val);
- res = ethr_atomic_add(&at_data, (long) -4711);
- ASSERT(res == 0);
+ ethr_atomic_add(&at_data, (long) -4711);
ASSERT(val >= at_set_val + (long) 4711);
ASSERT(val <= at_max_val);
}
- res = ethr_atomic_inc(&at_done);
- ASSERT(res == 0);
+ ethr_atomic_inc(&at_done);
return NULL;
}
@@ -2069,14 +1445,13 @@ atomic_test(void)
ethr_tid tid[AT_THREADS];
ethr_thr_opts thr_opts = ETHR_THR_OPTS_DEFAULT_INITER;
- if (sizeof(long) > 4) {
+#if ETHR_SIZEOF_PTR > 4
at_rm_val = ((long) 1) << 57;
at_set_val = ((long) 1) << 60;
- }
- else {
+#else
at_rm_val = ((long) 1) << 27;
at_set_val = ((long) 1) << 30;
- }
+#endif
at_max_val = at_set_val + at_rm_val + ((long) AT_THREADS + 1) * 4711;
data_init = at_rm_val + (long) 4711;
@@ -2085,22 +1460,15 @@ atomic_test(void)
thr_opts.detached = 1;
print_line("Initializing");
- res = ethr_atomic_init(&at_ready, 0);
- ASSERT(res == 0);
- res = ethr_atomic_init(&at_go, 0);
- ASSERT(res == 0);
- res = ethr_atomic_init(&at_done, data_init);
- ASSERT(res == 0);
- res = ethr_atomic_init(&at_data, data_init);
- ASSERT(res == 0);
+ ethr_atomic_init(&at_ready, 0);
+ ethr_atomic_init(&at_go, 0);
+ ethr_atomic_init(&at_done, data_init);
+ ethr_atomic_init(&at_data, data_init);
- res = ethr_atomic_read(&at_data, &val);
- ASSERT(res == 0);
+ val = ethr_atomic_read(&at_data);
ASSERT(val == data_init);
- res = ethr_atomic_set(&at_done, 0);
- ASSERT(res == 0);
- res = ethr_atomic_read(&at_done, &val);
- ASSERT(res == 0);
+ ethr_atomic_set(&at_done, 0);
+ val = ethr_atomic_read(&at_done);
ASSERT(val == 0);
print_line("Creating threads");
@@ -2111,31 +1479,27 @@ atomic_test(void)
print_line("Waiting for threads to ready up");
do {
- res = ethr_atomic_read(&at_ready, &val);
- ASSERT(res == 0);
+ val = ethr_atomic_read(&at_ready);
ASSERT(val >= 0);
ASSERT(val <= AT_THREADS);
} while (val != AT_THREADS);
print_line("Letting threads loose");
- res = ethr_atomic_xchg(&at_go, 17, &val);
- ASSERT(res == 0);
+ val = ethr_atomic_xchg(&at_go, 17);
ASSERT(val == 0);
- res = ethr_atomic_read(&at_go, &val);
- ASSERT(res == 0);
+ val = ethr_atomic_read(&at_go);
ASSERT(val == 17);
print_line("Waiting for threads to finish");
do {
- res = ethr_atomic_read(&at_done, &val);
- ASSERT(res == 0);
+ val = ethr_atomic_read(&at_done);
ASSERT(val >= 0);
ASSERT(val <= AT_THREADS);
} while (val != AT_THREADS);
print_line("Checking result");
- res = ethr_atomic_read(&at_data, &val);
+ val = ethr_atomic_read(&at_data);
ASSERT(res == 0);
ASSERT(val == data_final);
print_line("Result ok");
@@ -2143,190 +1507,6 @@ atomic_test(void)
}
-/*
- * The gate test case.
- *
- * Tests gates.
- */
-
-#define GT_THREADS 10
-
-static ethr_atomic_t gt_wait1;
-static ethr_atomic_t gt_wait2;
-static ethr_atomic_t gt_done;
-
-static ethr_gate gt_gate1;
-static ethr_gate gt_gate2;
-
-void *
-gt_thread(void *thr_no)
-{
- int no = (int)(long) thr_no;
- int swait = no % 2 == 0;
- int res;
- long done;
-
-
- do {
-
- res = ethr_atomic_inc(&gt_wait1);
- ASSERT(res == 0);
-
- if (swait)
- res = ethr_gate_swait(&gt_gate1, INT_MAX);
- else
- res = ethr_gate_wait(&gt_gate1);
- ASSERT(res == 0);
-
- res = ethr_atomic_dec(&gt_wait1);
- ASSERT(res == 0);
-
- res = ethr_atomic_inc(&gt_wait2);
- ASSERT(res == 0);
-
- if (swait)
- res = ethr_gate_swait(&gt_gate2, INT_MAX);
- else
- res = ethr_gate_wait(&gt_gate2);
- ASSERT(res == 0);
-
- res = ethr_atomic_dec(&gt_wait2);
- ASSERT(res == 0);
-
- res = ethr_atomic_read(&gt_done, &done);
- ASSERT(res == 0);
- } while (!done);
- return NULL;
-}
-
-
-static void
-gate_test(void)
-{
- long val;
- int res, i;
- ethr_tid tid[GT_THREADS];
-
- print_line("Initializing");
- res = ethr_atomic_init(&gt_wait1, 0);
- ASSERT_EQ(res, 0, "%d");
- res = ethr_atomic_init(&gt_wait2, 0);
- ASSERT_EQ(res, 0, "%d");
- res = ethr_atomic_init(&gt_done, 0);
- ASSERT_EQ(res, 0, "%d");
- res = ethr_gate_init(&gt_gate1);
- ASSERT_EQ(res, 0, "%d");
- res = ethr_gate_init(&gt_gate2);
- ASSERT_EQ(res, 0, "%d");
-
- print_line("Creating threads");
- for (i = 0; i < GT_THREADS; i++) {
- res = ethr_thr_create(&tid[i], gt_thread, (void *) i, NULL);
- ASSERT_EQ(res, 0, "%d");
- }
-
- print_line("Waiting for threads to ready up");
- do {
- res = ethr_atomic_read(&gt_wait1, &val);
- ASSERT_EQ(res, 0, "%d");
- ASSERT(0 <= val && val <= GT_THREADS);
- } while (val != GT_THREADS);
-
- print_line("Testing");
-
- res = ethr_gate_let_through(&gt_gate1, 8);
- ASSERT_EQ(res, 0, "%d");
-
- WAIT_UNTIL_LIM((res = ethr_atomic_read(&gt_wait2, &val),
- (res != 0 || val == 8)),
- 60);
-
- res = ethr_atomic_read(&gt_wait1, &val);
- ASSERT_EQ(res, 0, "%d");
- ASSERT_EQ(val, GT_THREADS - 8, "%ld");
-
- res = ethr_atomic_read(&gt_wait2, &val);
- ASSERT_EQ(res, 0, "%d");
- ASSERT_EQ(val, 8, "%ld");
-
- res = ethr_gate_let_through(&gt_gate2, 4);
- ASSERT_EQ(res, 0, "%d");
-
- WAIT_UNTIL_LIM((res = ethr_atomic_read(&gt_wait2, &val),
- (res != 0 || val == 4)),
- 60);
-
- res = ethr_atomic_read(&gt_wait1, &val);
- ASSERT_EQ(res, 0, "%d");
- ASSERT_EQ(val, GT_THREADS - 4, "%ld");
-
- res = ethr_atomic_read(&gt_wait2, &val);
- ASSERT_EQ(res, 0, "%d");
- ASSERT_EQ(val, 4, "%ld");
-
- res = ethr_gate_let_through(&gt_gate1, GT_THREADS);
- ASSERT_EQ(res, 0, "%d");
-
- WAIT_UNTIL_LIM((res = ethr_atomic_read(&gt_wait2, &val),
- (res != 0 || val == GT_THREADS)),
- 60);
- res = ethr_atomic_read(&gt_wait1, &val);
- ASSERT_EQ(res, 0, "%d");
- ASSERT_EQ(val, 0, "%ld");
-
- res = ethr_atomic_read(&gt_wait2, &val);
- ASSERT_EQ(res, 0, "%d");
- ASSERT_EQ(val, GT_THREADS, "%ld");
-
- res = ethr_gate_let_through(&gt_gate2, GT_THREADS);
- ASSERT_EQ(res, 0, "%d");
-
- WAIT_UNTIL_LIM((res = ethr_atomic_read(&gt_wait2, &val),
- (res != 0 || val == 4)),
- 60);
- res = ethr_atomic_read(&gt_wait1, &val);
- ASSERT_EQ(res, 0, "%d");
- ASSERT_EQ(val, GT_THREADS - 4, "%ld");
-
- res = ethr_atomic_read(&gt_wait2, &val);
- ASSERT_EQ(res, 0, "%d");
- ASSERT_EQ(val, 4, "%ld");
-
- res = ethr_atomic_set(&gt_done, 1);
- ASSERT_EQ(res, 0, "%d");
-
- res = ethr_gate_let_through(&gt_gate2, GT_THREADS);
- ASSERT_EQ(res, 0, "%d");
- res = ethr_gate_let_through(&gt_gate1, GT_THREADS - 4);
- ASSERT_EQ(res, 0, "%d");
-
- WAIT_UNTIL_LIM(((res = ethr_atomic_read(&gt_wait1, &val)) != 0
- || (val == 0
- && ((res = ethr_atomic_read(&gt_wait2, &val)) != 0
- || val == 0))),
- 60);
-
- res = ethr_atomic_read(&gt_wait1, &val);
- ASSERT_EQ(res, 0, "%d");
- ASSERT_EQ(val, 0, "%ld");
-
- res = ethr_atomic_read(&gt_wait2, &val);
- ASSERT_EQ(res, 0, "%d");
- ASSERT_EQ(val, 0, "%ld");
-
- print_line("Joining threads");
- for (i = 0; i < GT_THREADS; i++) {
- res = ethr_thr_join(tid[i], NULL);
- ASSERT_EQ(res, 0, "%d");
- }
-
- res = ethr_gate_destroy(&gt_gate1);
- ASSERT_EQ(res, 0, "%d");
- res = ethr_gate_destroy(&gt_gate2);
- ASSERT_EQ(res, 0, "%d");
-
-}
-
#endif /* #ifndef ETHR_NO_THREAD_LIB */
/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
@@ -2342,14 +1522,12 @@ main(int argc, char *argv[])
#ifndef ETHR_NO_THREAD_LIB
{
char *testcase;
- int res;
send_my_pid();
testcase = argv[1];
- res = ethr_init(NULL);
- if (res != 0)
+ if (ethr_init(NULL) != 0 || ethr_late_init(NULL) != 0)
fail("Failed to initialize the ethread library");
if (strcmp(testcase, "create_join_thread") == 0)
@@ -2360,24 +1538,16 @@ main(int argc, char *argv[])
mutex_test();
else if (strcmp(testcase, "try_lock_mutex") == 0)
try_lock_mutex_test();
- else if (strcmp(testcase, "recursive_mutex") == 0)
- recursive_mutex_test();
else if (strcmp(testcase, "time_now") == 0)
time_now_test();
else if (strcmp(testcase, "cond_wait") == 0)
- cond_wait_test(0);
- else if (strcmp(testcase, "cond_timedwait") == 0)
- cond_timedwait_test();
+ cond_wait_test();
else if (strcmp(testcase, "broadcast") == 0)
broadcast_test();
else if (strcmp(testcase, "detached_thread") == 0)
detached_thread_test();
else if (strcmp(testcase, "max_threads") == 0)
max_threads_test();
- else if (strcmp(testcase, "forksafety") == 0)
- forksafety_test();
- else if (strcmp(testcase, "vfork") == 0)
- vfork_test(argc, argv);
else if (strcmp(testcase, "tsd") == 0)
tsd_test();
else if (strcmp(testcase, "spinlock") == 0)
@@ -2388,8 +1558,6 @@ main(int argc, char *argv[])
rwmutex_test();
else if (strcmp(testcase, "atomic") == 0)
atomic_test();
- else if (strcmp(testcase, "gate") == 0)
- gate_test();
else
skip("Test case \"%s\" not implemented yet", testcase);
diff --git a/erts/vsn.mk b/erts/vsn.mk
index 6e1338cff5..ce6330e6dd 100644
--- a/erts/vsn.mk
+++ b/erts/vsn.mk
@@ -17,8 +17,8 @@
# %CopyrightEnd%
#
-VSN = 5.8
-SYSTEM_VSN = R14A
+VSN = 5.8.1
+SYSTEM_VSN = R14B
# Port number 4365 in 4.2
# Port number 4366 in 4.3