aboutsummaryrefslogtreecommitdiffstats
path: root/erts
diff options
context:
space:
mode:
Diffstat (limited to 'erts')
-rw-r--r--erts/Makefile.in3
-rw-r--r--erts/aclocal.m413
-rwxr-xr-xerts/autoconf/win32.config.cache.static2
-rw-r--r--erts/configure.in67
-rw-r--r--erts/doc/src/absform.xml3
-rw-r--r--erts/doc/src/alt_dist.xml4
-rw-r--r--erts/doc/src/erl.xml22
-rw-r--r--erts/doc/src/erl_driver.xml19
-rw-r--r--[-rwxr-xr-x]erts/doc/src/erl_ext_fig.gifbin3834 -> 3834 bytes
-rw-r--r--erts/doc/src/erl_nif.xml10
-rw-r--r--erts/doc/src/erl_prim_loader.xml6
-rw-r--r--erts/doc/src/erlang.xml146
-rw-r--r--erts/doc/src/erlsrv.xml19
-rw-r--r--erts/doc/src/erts_alloc.xml64
-rw-r--r--erts/doc/src/make.dep32
-rw-r--r--erts/doc/src/notes.xml358
-rw-r--r--erts/doc/src/start_erl.xml31
-rw-r--r--erts/doc/src/zlib.xml60
-rw-r--r--erts/emulator/Makefile.in43
-rw-r--r--erts/emulator/beam/atom.names5
-rw-r--r--erts/emulator/beam/beam_bif_load.c149
-rw-r--r--erts/emulator/beam/beam_bp.c38
-rw-r--r--erts/emulator/beam/beam_catches.c46
-rw-r--r--erts/emulator/beam/beam_debug.c32
-rw-r--r--erts/emulator/beam/beam_emu.c562
-rw-r--r--erts/emulator/beam/beam_load.c1428
-rw-r--r--erts/emulator/beam/beam_load.h15
-rw-r--r--erts/emulator/beam/bif.c202
-rw-r--r--erts/emulator/beam/bif.h155
-rw-r--r--erts/emulator/beam/bif.tab14
-rw-r--r--erts/emulator/beam/big.c56
-rw-r--r--erts/emulator/beam/big.h1
-rw-r--r--erts/emulator/beam/binary.c10
-rw-r--r--erts/emulator/beam/break.c13
-rw-r--r--erts/emulator/beam/dist.c62
-rw-r--r--erts/emulator/beam/erl_afit_alloc.c20
-rw-r--r--erts/emulator/beam/erl_alloc.c1316
-rw-r--r--erts/emulator/beam/erl_alloc.h202
-rw-r--r--erts/emulator/beam/erl_alloc.types62
-rw-r--r--erts/emulator/beam/erl_alloc_util.c1015
-rw-r--r--erts/emulator/beam/erl_alloc_util.h109
-rw-r--r--erts/emulator/beam/erl_ao_firstfit_alloc.c18
-rw-r--r--erts/emulator/beam/erl_ao_firstfit_alloc.h2
-rw-r--r--erts/emulator/beam/erl_arith.c8
-rw-r--r--erts/emulator/beam/erl_async.c737
-rw-r--r--erts/emulator/beam/erl_async.h66
-rw-r--r--erts/emulator/beam/erl_bestfit_alloc.c18
-rw-r--r--erts/emulator/beam/erl_bestfit_alloc.h2
-rw-r--r--erts/emulator/beam/erl_bif_binary.c64
-rw-r--r--erts/emulator/beam/erl_bif_ddll.c137
-rw-r--r--erts/emulator/beam/erl_bif_info.c382
-rw-r--r--erts/emulator/beam/erl_bif_lists.c104
-rw-r--r--erts/emulator/beam/erl_bif_op.c25
-rw-r--r--erts/emulator/beam/erl_bif_os.c24
-rw-r--r--erts/emulator/beam/erl_bif_port.c75
-rw-r--r--erts/emulator/beam/erl_bif_re.c135
-rw-r--r--erts/emulator/beam/erl_bif_timer.c3
-rw-r--r--erts/emulator/beam/erl_bif_trace.c144
-rw-r--r--erts/emulator/beam/erl_bits.c12
-rw-r--r--erts/emulator/beam/erl_bits.h4
-rw-r--r--erts/emulator/beam/erl_db.c155
-rw-r--r--erts/emulator/beam/erl_db_hash.c24
-rw-r--r--erts/emulator/beam/erl_db_hash.h3
-rw-r--r--erts/emulator/beam/erl_db_tree.c10
-rw-r--r--erts/emulator/beam/erl_db_util.c53
-rw-r--r--erts/emulator/beam/erl_db_util.h6
-rw-r--r--erts/emulator/beam/erl_driver.h15
-rw-r--r--erts/emulator/beam/erl_drv_thread.c8
-rw-r--r--erts/emulator/beam/erl_gc.c102
-rw-r--r--erts/emulator/beam/erl_goodfit_alloc.c20
-rw-r--r--erts/emulator/beam/erl_init.c173
-rw-r--r--erts/emulator/beam/erl_instrument.c2
-rw-r--r--erts/emulator/beam/erl_lock_check.c14
-rw-r--r--erts/emulator/beam/erl_message.c4
-rw-r--r--erts/emulator/beam/erl_monitors.c8
-rw-r--r--erts/emulator/beam/erl_mtrace.c8
-rw-r--r--erts/emulator/beam/erl_nif.c60
-rw-r--r--erts/emulator/beam/erl_nif_api_funcs.h200
-rw-r--r--erts/emulator/beam/erl_node_tables.c5
-rw-r--r--erts/emulator/beam/erl_port_task.c26
-rw-r--r--erts/emulator/beam/erl_process.c1571
-rw-r--r--erts/emulator/beam/erl_process.h129
-rw-r--r--erts/emulator/beam/erl_process_lock.c18
-rw-r--r--erts/emulator/beam/erl_process_lock.h17
-rw-r--r--erts/emulator/beam/erl_sched_spec_pre_alloc.c305
-rw-r--r--erts/emulator/beam/erl_sched_spec_pre_alloc.h239
-rw-r--r--erts/emulator/beam/erl_smp.h10
-rw-r--r--erts/emulator/beam/erl_term.h8
-rw-r--r--erts/emulator/beam/erl_thr_progress.c1373
-rw-r--r--erts/emulator/beam/erl_thr_progress.h233
-rw-r--r--erts/emulator/beam/erl_thr_queue.c745
-rw-r--r--erts/emulator/beam/erl_thr_queue.h211
-rw-r--r--erts/emulator/beam/erl_threads.h29
-rw-r--r--erts/emulator/beam/erl_trace.c126
-rw-r--r--erts/emulator/beam/erl_unicode.c36
-rw-r--r--erts/emulator/beam/erl_vm.h8
-rw-r--r--erts/emulator/beam/export.c6
-rw-r--r--erts/emulator/beam/external.c92
-rw-r--r--erts/emulator/beam/external.h7
-rw-r--r--erts/emulator/beam/fix_alloc.c287
-rw-r--r--erts/emulator/beam/global.h46
-rw-r--r--erts/emulator/beam/io.c6
-rw-r--r--erts/emulator/beam/module.c5
-rw-r--r--erts/emulator/beam/ops.tab178
-rw-r--r--erts/emulator/beam/sys.h301
-rw-r--r--erts/emulator/beam/time.c2
-rw-r--r--erts/emulator/beam/utils.c806
-rw-r--r--erts/emulator/drivers/common/efile_drv.c11
-rw-r--r--erts/emulator/drivers/common/inet_drv.c733
-rw-r--r--erts/emulator/drivers/unix/ttsl_drv.c3
-rw-r--r--[-rwxr-xr-x]erts/emulator/drivers/win32/win_efile.c6
-rw-r--r--erts/emulator/hipe/hipe_abi.txt2
-rw-r--r--erts/emulator/hipe/hipe_amd64_bifs.m451
-rw-r--r--erts/emulator/hipe/hipe_arm_bifs.m439
-rw-r--r--erts/emulator/hipe/hipe_bif0.h2
-rw-r--r--erts/emulator/hipe/hipe_bif2.c23
-rw-r--r--erts/emulator/hipe/hipe_bif_list.m426
-rw-r--r--erts/emulator/hipe/hipe_mkliterals.c74
-rw-r--r--erts/emulator/hipe/hipe_mode_switch.c52
-rw-r--r--erts/emulator/hipe/hipe_native_bif.c45
-rw-r--r--erts/emulator/hipe/hipe_native_bif.h17
-rw-r--r--erts/emulator/hipe/hipe_ppc_asm.m411
-rw-r--r--erts/emulator/hipe/hipe_ppc_bifs.m459
-rw-r--r--erts/emulator/hipe/hipe_process.h3
-rw-r--r--erts/emulator/hipe/hipe_risc_glue.h16
-rw-r--r--erts/emulator/hipe/hipe_sparc_bifs.m465
-rw-r--r--erts/emulator/hipe/hipe_x86_bifs.m455
-rw-r--r--erts/emulator/hipe/hipe_x86_glue.h19
-rw-r--r--erts/emulator/pcre/Makefile26
-rw-r--r--erts/emulator/pcre/Makefile.in165
-rw-r--r--erts/emulator/pcre/pcre.mk113
-rw-r--r--erts/emulator/sys/common/erl_check_io.c25
-rw-r--r--erts/emulator/sys/common/erl_check_io.h9
-rw-r--r--erts/emulator/sys/common/erl_mseg.c668
-rw-r--r--erts/emulator/sys/common/erl_mseg.h11
-rw-r--r--erts/emulator/sys/common/erl_poll.c191
-rw-r--r--erts/emulator/sys/common/erl_poll.h5
-rw-r--r--erts/emulator/sys/unix/erl_unix_sys.h6
-rw-r--r--erts/emulator/sys/unix/sys.c249
-rw-r--r--erts/emulator/sys/vxworks/sys.c20
-rw-r--r--erts/emulator/sys/win32/erl_poll.c6
-rw-r--r--erts/emulator/sys/win32/sys.c149
-rw-r--r--erts/emulator/sys/win32/sys_env.c14
-rw-r--r--erts/emulator/sys/win32/sys_interrupt.c5
-rw-r--r--erts/emulator/test/binary_SUITE.erl16
-rw-r--r--erts/emulator/test/bs_construct_SUITE.erl17
-rw-r--r--erts/emulator/test/bs_match_misc_SUITE.erl14
-rw-r--r--erts/emulator/test/bs_utf_SUITE.erl12
-rw-r--r--erts/emulator/test/busy_port_SUITE.erl20
-rw-r--r--erts/emulator/test/call_trace_SUITE.erl26
-rw-r--r--erts/emulator/test/code_SUITE.erl196
-rw-r--r--erts/emulator/test/code_SUITE_data/fun_confusion.erl31
-rw-r--r--erts/emulator/test/code_SUITE_data/literals.erl21
-rw-r--r--erts/emulator/test/distribution_SUITE.erl31
-rw-r--r--erts/emulator/test/driver_SUITE.erl158
-rw-r--r--erts/emulator/test/driver_SUITE_data/Makefile.src4
-rw-r--r--erts/emulator/test/driver_SUITE_data/async_blast_drv.c124
-rw-r--r--erts/emulator/test/driver_SUITE_data/thr_free_drv.c241
-rw-r--r--erts/emulator/test/exception_SUITE.erl246
-rw-r--r--erts/emulator/test/float_SUITE.erl101
-rw-r--r--erts/emulator/test/guard_SUITE.erl8
-rw-r--r--erts/emulator/test/hibernate_SUITE.erl31
-rw-r--r--erts/emulator/test/mtx_SUITE.erl13
-rw-r--r--erts/emulator/test/mtx_SUITE_data/mtx_SUITE.c48
-rw-r--r--erts/emulator/test/nif_SUITE.erl54
-rw-r--r--erts/emulator/test/nif_SUITE_data/nif_SUITE.c65
-rw-r--r--erts/emulator/test/port_SUITE.erl2
-rw-r--r--erts/emulator/test/process_SUITE.erl99
-rw-r--r--erts/emulator/test/scheduler_SUITE.erl13
-rw-r--r--erts/emulator/test/system_info_SUITE.erl313
-rw-r--r--erts/emulator/test/trace_local_SUITE.erl32
-rwxr-xr-xerts/emulator/utils/beam_makeops261
-rwxr-xr-xerts/emulator/utils/make_preload1
-rwxr-xr-xerts/emulator/utils/make_tables39
-rw-r--r--erts/emulator/valgrind/suppress.patched.3.6.0307
-rw-r--r--erts/emulator/valgrind/suppress.standard268
-rw-r--r--erts/epmd/src/epmd.c6
-rw-r--r--erts/epmd/src/epmd_cli.c7
-rw-r--r--erts/epmd/src/epmd_int.h2
-rw-r--r--erts/epmd/src/epmd_srv.c3
-rw-r--r--erts/etc/common/erlc.c1
-rw-r--r--erts/etc/common/erlexec.c4
-rw-r--r--erts/etc/win32/erlsrv/erlsrv_interactive.c191
-rw-r--r--erts/etc/win32/erlsrv/erlsrv_interactive.h4
-rw-r--r--[-rwxr-xr-x]erts/etc/win32/nsis/erlang_uninst.icobin766 -> 766 bytes
-rw-r--r--erts/etc/win32/start_erl.c161
-rw-r--r--erts/example/matrix_nif.c87
-rw-r--r--erts/include/internal/ethr_mutex.h115
-rw-r--r--erts/include/internal/ethread.h1
-rw-r--r--erts/lib_src/common/erl_misc_utils.c8
-rw-r--r--erts/lib_src/common/erl_printf.c14
-rw-r--r--erts/lib_src/common/erl_printf_format.c4
-rw-r--r--erts/lib_src/common/ethr_mutex.c496
-rw-r--r--erts/preloaded/ebin/erl_prim_loader.beambin50400 -> 53080 bytes
-rw-r--r--erts/preloaded/ebin/erlang.beambin26772 -> 39568 bytes
-rw-r--r--erts/preloaded/ebin/init.beambin45172 -> 45264 bytes
-rw-r--r--erts/preloaded/ebin/otp_ring0.beambin1432 -> 1528 bytes
-rw-r--r--erts/preloaded/ebin/prim_file.beambin32332 -> 32448 bytes
-rw-r--r--erts/preloaded/ebin/prim_inet.beambin64884 -> 69916 bytes
-rw-r--r--erts/preloaded/ebin/prim_zip.beambin22436 -> 22532 bytes
-rw-r--r--erts/preloaded/ebin/zlib.beambin12016 -> 11836 bytes
-rw-r--r--erts/preloaded/src/erl_prim_loader.erl26
-rw-r--r--erts/preloaded/src/erlang.erl404
-rw-r--r--erts/preloaded/src/init.erl2
-rw-r--r--erts/preloaded/src/prim_file.erl6
-rw-r--r--erts/preloaded/src/prim_inet.erl162
-rw-r--r--erts/preloaded/src/prim_zip.erl4
-rw-r--r--erts/preloaded/src/zlib.erl115
-rw-r--r--erts/test/autoimport_SUITE.erl11
-rw-r--r--erts/test/erlc_SUITE.erl27
-rw-r--r--erts/test/nt_SUITE.erl8
-rw-r--r--erts/test/z_SUITE.erl23
212 files changed, 16484 insertions, 7258 deletions
diff --git a/erts/Makefile.in b/erts/Makefile.in
index 2e63fc469e..8b86fbadf2 100644
--- a/erts/Makefile.in
+++ b/erts/Makefile.in
@@ -16,6 +16,9 @@
#
# %CopyrightEnd%
#
+
+.NOTPARALLEL:
+
include $(ERL_TOP)/make/target.mk
include vsn.mk
diff --git a/erts/aclocal.m4 b/erts/aclocal.m4
index 3ebea66d30..bd228a2d1f 100644
--- a/erts/aclocal.m4
+++ b/erts/aclocal.m4
@@ -125,6 +125,9 @@ AC_DEFUN(LM_FIND_EMU_CC,
ac_cv_prog_emu_cc,
[
AC_TRY_COMPILE([],[
+#ifdef __llvm__
+#error "llvm is currently unable to compile beam_emu.c"
+#endif
__label__ lbl1;
__label__ lbl2;
int x = magic();
@@ -140,7 +143,7 @@ lbl2:
],ac_cv_prog_emu_cc=$CC,ac_cv_prog_emu_cc=no)
if test $ac_cv_prog_emu_cc = no; then
- for ac_progname in emu_cc.sh gcc; do
+ for ac_progname in emu_cc.sh gcc-4.2 gcc; do
IFS="${IFS= }"; ac_save_ifs="$IFS"; IFS=":"
ac_dummy="$PATH"
for ac_dir in $ac_dummy; do
@@ -1363,8 +1366,6 @@ case "$GCC-$host_cpu" in
;;
esac
-
-
AC_DEFINE(ETHR_HAVE_ETHREAD_DEFINES, 1, \
[Define if you have all ethread defines])
@@ -1645,11 +1646,11 @@ dnl Freely inspired by AC_TRY_LINK. (Maybe better to create a
dnl AC_LANG_JAVA instead...)
AC_DEFUN(ERL_TRY_LINK_JAVA,
[java_link='$JAVAC conftest.java 1>&AC_FD_CC'
-changequote(�, �)dnl
+changequote(, )dnl
cat > conftest.java <<EOF
-�$1�
+$1
class conftest { public static void main(String[] args) {
- �$2�
+ $2
; return; }}
EOF
changequote([, ])dnl
diff --git a/erts/autoconf/win32.config.cache.static b/erts/autoconf/win32.config.cache.static
index d25b1df9d9..b387db2b22 100755
--- a/erts/autoconf/win32.config.cache.static
+++ b/erts/autoconf/win32.config.cache.static
@@ -96,7 +96,6 @@ ac_cv_func_sbrk=${ac_cv_func_sbrk=no}
ac_cv_func_select=${ac_cv_func_select=no}
ac_cv_func_setlocale=${ac_cv_func_setlocale=yes}
ac_cv_func_setsid=${ac_cv_func_setsid=no}
-ac_cv_func_setvbuf_reversed=${ac_cv_func_setvbuf_reversed=yes}
ac_cv_func_socket=${ac_cv_func_socket=no}
ac_cv_func_strchr=${ac_cv_func_strchr=yes}
ac_cv_func_strerror=${ac_cv_func_strerror=yes}
@@ -124,7 +123,6 @@ ac_cv_header_ieeefp_h=${ac_cv_header_ieeefp_h=no}
ac_cv_header_inttypes_h=${ac_cv_header_inttypes_h=no}
ac_cv_header_langinfo_h=${ac_cv_header_langinfo_h=no}
ac_cv_header_limits_h=${ac_cv_header_limits_h=yes}
-ac_cv_header_mach_o_dyld_h=${ac_cv_header_mach_o_dyld_h=no}
ac_cv_header_malloc_h=${ac_cv_header_malloc_h=yes}
ac_cv_header_memory_h=${ac_cv_header_memory_h=yes}
ac_cv_header_net_errno_h=${ac_cv_header_net_errno_h=no}
diff --git a/erts/configure.in b/erts/configure.in
index fac07f8b6a..d865e675c4 100644
--- a/erts/configure.in
+++ b/erts/configure.in
@@ -191,7 +191,7 @@ AS_HELP_STRING([--disable-kernel-poll], [disable kernel poll support]),
AC_ARG_ENABLE(sctp,
-AS_HELP_STRING([--enable-sctp], [enable sctp support])
+AS_HELP_STRING([--enable-sctp], [enable sctp support (default)])
AS_HELP_STRING([--disable-sctp], [disable sctp support]),
[ case "$enableval" in
no) enable_sctp=no ;;
@@ -259,13 +259,6 @@ AS_HELP_STRING([--enable-m32-build],
esac
],enable_m32_build=no)
-AC_ARG_ENABLE(fixalloc,
-AS_HELP_STRING([--disable-fixalloc], [disable the use of fix_alloc]))
-if test x${enable_fixalloc} = xno ; then
- AC_DEFINE(NO_FIX_ALLOC,[],
- [Define if you don't want the fix allocator in Erlang])
-fi
-
AC_SUBST(PERFCTR_PATH)
AC_ARG_WITH(perfctr,
AS_HELP_STRING([--with-perfctr=PATH],
@@ -914,16 +907,6 @@ fi
AC_SUBST(ERLANG_OSTYPE)
-dnl Which sysv4 would this be, and what is it for???
-dnl XXX: replace with feature tests.
-case $host_os in
- sysv4*)
- AC_DEFINE(SOCKOPT_CONNECT_STAT,[],[Obscure SYSV feature])
- AC_DEFINE(NO_PRAGMA_WEAK,[],[Obscure SYSV feature])
- LIBS="$LIBS -lgen -lc -L /usr/ucblib -lucb"
- ;;
-esac
-
# Check how to export functions from the emulator executable, needed
# when dynamically loaded drivers are loaded (so that they can find
# emulator functions).
@@ -1484,7 +1467,7 @@ AC_CHECK_HEADERS(fcntl.h limits.h unistd.h syslog.h dlfcn.h ieeefp.h \
sys/types.h sys/stropts.h sys/sysctl.h \
sys/ioctl.h sys/time.h sys/uio.h \
sys/socket.h sys/sockio.h sys/socketio.h \
- net/errno.h malloc.h mach-o/dyld.h arpa/nameser.h \
+ net/errno.h malloc.h arpa/nameser.h \
pty.h util.h utmp.h langinfo.h poll.h sdkddkver.h)
AC_CHECK_HEADER(sys/resource.h,
@@ -1503,7 +1486,7 @@ AC_CHECK_HEADER(sys/devpoll.h, have_kernel_poll=/dev/poll)
dnl Check for kernel SCTP support
AC_SUBST(LIBSCTP)
-if test "x$enable_sctp" = "xyes" ; then
+if test "x$enable_sctp" != "xno" ; then
AC_CHECK_HEADER(netinet/sctp.h,
[LIBSCTP=libsctp.so.1
AC_DEFINE(HAVE_SCTP_H, [1],
@@ -1513,8 +1496,21 @@ if test "x$enable_sctp" = "xyes" ; then
#include <sys/socket.h>
#endif
])
+ AC_CHECK_FUNCS([sctp_bindx sctp_peeloff])
AC_CHECK_DECLS([SCTP_UNORDERED, SCTP_ADDR_OVER, SCTP_ABORT,
- SCTP_EOF, SCTP_SENDALL, SCTP_ADDR_CONFIRMED], [], [],
+ SCTP_EOF, SCTP_SENDALL, SCTP_ADDR_CONFIRMED,
+ SCTP_DELAYED_ACK_TIME,
+ SCTP_EMPTY,
+ SCTP_CLOSED, SCTPS_IDLE,
+ SCTP_BOUND, SCTPS_BOUND,
+ SCTP_LISTEN, SCTPS_LISTEN,
+ SCTP_COOKIE_WAIT, SCTPS_COOKIE_WAIT,
+ SCTP_COOKIE_ECHOED, SCTPS_COOKIE_ECHOED,
+ SCTP_ESTABLISHED, SCTPS_ESTABLISHED,
+ SCTP_SHUTDOWN_PENDING, SCTPS_SHUTDOWN_PENDING,
+ SCTP_SHUTDOWN_SENT, SCTPS_SHUTDOWN_SENT,
+ SCTP_SHUTDOWN_RECEIVED, SCTPS_SHUTDOWN_RECEIVED,
+ SCTP_SHUTDOWN_ACK_SENT, SCTPS_SHUTDOWN_ACK_SENT], [], [],
[#if HAVE_SYS_SOCKET_H
#include <sys/socket.h>
#endif
@@ -1673,6 +1669,15 @@ esac
AC_C_BIGENDIAN
+dnl fdatasync syscall (Unix only)
+AC_CHECK_FUNCS([fdatasync])
+
+dnl Find which C libraries are required to use fdatasync
+dnl TODO: Remove check once SunOS >= 5.11 is required by erts.
+dnl fdatasync requires linking against -lrt on SunOS <= 5.10.
+dnl OpenSolaris 2009.06 is SunOS 5.11 and does not require -lrt.
+AC_SEARCH_LIBS(fdatasync, [rt])
+
dnl ----------------------------------------------------------------------
dnl Checks for library functions.
dnl ----------------------------------------------------------------------
@@ -1800,11 +1805,6 @@ AC_CHECK_FUNCS([ieee_handler fpsetmask finite isnan isinf res_gethostbyname dlop
AC_CHECK_DECLS([posix2time],,,[#include <time.h>])
-if test "X$host" = "Xwin32"; then
- ac_cv_func_setvbuf_reversed=yes
-fi
-AC_FUNC_SETVBUF_REVERSED
-
disable_vfork=false
if test "x$EMU_THR_LIB_NAME" != "x"; then
AC_MSG_CHECKING([if vfork is known to hang multithreaded applications])
@@ -1860,12 +1860,6 @@ fi
dnl Need by run_erl.
AC_CHECK_FUNCS([openpty])
-dnl fdatasync syscall (Unix only)
-AC_CHECK_FUNCS([fdatasync])
-
-dnl Find which C libraries are required to use fdatasync
-AC_SEARCH_LIBS(fdatasync, [rt])
-
AC_CHECK_HEADERS(net/if_dl.h ifaddrs.h netpacket/packet.h)
AC_CHECK_FUNCS([getifaddrs])
@@ -3535,7 +3529,7 @@ AC_SUBST(STATIC_KERBEROS_LIBS)
AC_SUBST(SSL_LINK_WITH_ZLIB)
AC_SUBST(STATIC_ZLIB_LIBS)
-std_ssl_locations="/usr/local /usr/sfw /opt/local /usr /usr/pkg /usr/local/openssl /usr/lib/openssl /usr/openssl /usr/local/ssl /usr/lib/ssl /usr/ssl"
+std_ssl_locations="/usr/local /usr/sfw /usr /opt/local /usr/pkg /usr/local/openssl /usr/lib/openssl /usr/openssl /usr/local/ssl /usr/lib/ssl /usr/ssl"
AC_ARG_WITH(ssl-zlib,
AS_HELP_STRING([--with-ssl-zlib=PATH],
@@ -3722,7 +3716,7 @@ case "$erl_xcomp_without_sysroot-$with_ssl" in
SSL_RUNTIME_LIBDIR="$rdir/lib"
SSL_LIBDIR="$dir/lib"
SSL_CRYPTO_LIBNAME=libeay32
- SSL_CRYPTO_LIBNAME=ssleay32
+ SSL_SSL_LIBNAME=ssleay32
elif test -f "$dir/lib/openssl.lib"; then
SSL_RUNTIME_LIBDIR="$rdir/lib"
SSL_LIBDIR="$dir/lib"
@@ -3904,7 +3898,7 @@ dnl so it is - be adoptable
elif test -f "$with_ssl/lib/libeay32.lib"; then
SSL_LIBDIR="$with_ssl/lib"
SSL_CRYPTO_LIBNAME=libeay32
- SSL_CRYPTO_LIBNAME=ssleay32
+ SSL_SSL_LIBNAME=ssleay32
else
# This probably wont work, but that's what the user said, so...
SSL_LIBDIR="$with_ssl/lib"
@@ -4309,7 +4303,6 @@ dnl Note that the output files are relative to $srcdir
AC_OUTPUT(
emulator/$host/Makefile:emulator/Makefile.in
emulator/zlib/$host/Makefile:emulator/zlib/Makefile.in
- emulator/pcre/$host/Makefile:emulator/pcre/Makefile.in
epmd/src/$host/Makefile:epmd/src/Makefile.in
etc/common/$host/Makefile:etc/common/Makefile.in
include/internal/$host/ethread.mk:include/internal/ethread.mk.in
@@ -4323,7 +4316,7 @@ dnl The ones below should be moved to their respective lib
dnl
../lib/ic/c_src/$host/Makefile:../lib/ic/c_src/Makefile.in
../lib/os_mon/c_src/$host/Makefile:../lib/os_mon/c_src/Makefile.in
- ../lib/ssl/c_src/$host/Makefile:../lib/ssl/c_src/Makefile.in
+dnl ../lib/ssl/c_src/$host/Makefile:../lib/ssl/c_src/Makefile.in
../lib/crypto/c_src/$host/Makefile:../lib/crypto/c_src/Makefile.in
../lib/orber/c_src/$host/Makefile:../lib/orber/c_src/Makefile.in
../lib/runtime_tools/c_src/$host/Makefile:../lib/runtime_tools/c_src/Makefile.in
diff --git a/erts/doc/src/absform.xml b/erts/doc/src/absform.xml
index 4c84412dd6..88e8b284fb 100644
--- a/erts/doc/src/absform.xml
+++ b/erts/doc/src/absform.xml
@@ -285,7 +285,8 @@
<item>If E is <c><![CDATA[fun Name / Arity]]></c>, then
Rep(E) = <c><![CDATA[{'fun',LINE,{function,Name,Arity}}]]></c>.</item>
<item>If E is <c><![CDATA[fun Module:Name/Arity]]></c>, then
- Rep(E) = <c><![CDATA[{'fun',LINE,{function,Module,Name,Arity}}]]></c>.</item>
+ Rep(E) = <c><![CDATA[{'fun',LINE,{function,Rep(Module),Rep(Name),Rep(Arity)}}]]></c>.
+ (Before the R15 release: Rep(E) = <c><![CDATA[{'fun',LINE,{function,Module,Name,Arity}}]]></c>.)</item>
<item>If E is <c><![CDATA[fun Fc_1 ; ... ; Fc_k end]]></c>
where each <c><![CDATA[Fc_i]]></c> is a function clause then Rep(E) =
<c><![CDATA[{'fun',LINE,{clauses,[Rep(Fc_1), ..., Rep(Fc_k)]}}]]></c>.</item>
diff --git a/erts/doc/src/alt_dist.xml b/erts/doc/src/alt_dist.xml
index 36d83a685b..038950b54d 100644
--- a/erts/doc/src/alt_dist.xml
+++ b/erts/doc/src/alt_dist.xml
@@ -4,7 +4,7 @@
<chapter>
<header>
<copyright>
- <year>2000</year><year>2010</year>
+ <year>2000</year><year>2011</year>
<holder>Ericsson AB. All Rights Reserved.</holder>
</copyright>
<legalnotice>
@@ -377,7 +377,7 @@
( 1) typedef enum {
( 2) portTypeUnknown, /* An uninitialized port */
( 3) portTypeListener, /* A listening port/socket */
-( 4) portTypeAcceptor, /* An intermidiate stage when accepting
+( 4) portTypeAcceptor, /* An intermediate stage when accepting
( 5) on a listen port */
( 6) portTypeConnector, /* An intermediate stage when connecting */
( 7) portTypeCommand, /* A connected open port in command mode */
diff --git a/erts/doc/src/erl.xml b/erts/doc/src/erl.xml
index 02082e57c6..d0a0ceaeba 100644
--- a/erts/doc/src/erl.xml
+++ b/erts/doc/src/erl.xml
@@ -587,6 +587,13 @@
<p>Enables auto load tracing, displaying info while loading
code.</p>
</item>
+ <tag><c><![CDATA[+L]]></c></tag>
+ <item>
+ <p>Don't load information about source filenames and line numbers.
+ This will save some memory, but exceptions will not contain
+ information about the filenames and line numbers.
+ </p>
+ </item>
<tag><marker id="erts_alloc"><c><![CDATA[+MFlag Value]]></c></marker></tag>
<item>
<p>Memory allocator specific flags, see
@@ -738,6 +745,19 @@
<seealso marker="erlang#system_flag_scheduler_bind_type">erlang:system_flag(scheduler_bind_type, SchedulerBindType)</seealso>.
</p>
</item>
+ <tag><marker id="+scl"><c>+scl true|false</c></marker></tag>
+ <item>
+ <p>Enable or disable scheduler compaction of load. By default
+ scheduler compaction of load is enabled. When enabled, load
+ balancing will strive for a load distribution which causes
+ as many scheduler threads as possible to be fully loaded (i.e.,
+ not run out of work). This is accomplished by migrating load
+ (e.g. runnable processes) into a smaller set of schedulers
+ when schedulers frequently run out of work. When disabled,
+ the frequency with which schedulers run out of work will
+ not be taken into account by the load balancing logic.
+ </p>
+ </item>
<tag><marker id="+sct"><c>+sct CpuTopology</c></marker></tag>
<item>
<list type="bulleted">
@@ -987,7 +1007,7 @@
the <c><![CDATA[-extra]]></c> section, i.e. the end of the command line
following after an <c><![CDATA[-extra]]></c> flag.</p>
</item>
- <tag><c><![CDATA[ERL_ZFLAGS]]></c>and <c><![CDATA[ERL_FLAGS]]></c></tag>
+ <tag><c><![CDATA[ERL_ZFLAGS]]></c> and <c><![CDATA[ERL_FLAGS]]></c></tag>
<item>
<p>The content of these environment variables will be added to the
end of the command line for <c><![CDATA[erl]]></c>.</p>
diff --git a/erts/doc/src/erl_driver.xml b/erts/doc/src/erl_driver.xml
index 2fb03954b6..8e18dd6657 100644
--- a/erts/doc/src/erl_driver.xml
+++ b/erts/doc/src/erl_driver.xml
@@ -1638,12 +1638,19 @@ ERL_DRV_EXT2TERM char *buf, ErlDrvUInt len
<fsummary>Cancel an asynchronous call</fsummary>
<desc>
<marker id="driver_async_cancel"></marker>
- <p>This function cancels an asynchronous operation, by removing
- it from the queue. Only functions in the queue can be
- cancelled; if a function is executing, it's too late to
- cancel it. The <c>async_free</c> function is also called.</p>
- <p>The return value is 1 if the operation was removed from the
- queue, otherwise 0.</p>
+ <p>This function used to cancel a scheduled asynchronous operation,
+ if it was still in the queue. It returned 1 if it succeeded, and
+ 0 if it failed.</p>
+ <p>Since it could not guarantee success, it was more or less useless.
+ The user had to implement synchronization of cancellation anyway.
+ It also unnecessarily complicated the implementation. Therefore,
+ as of OTP-R15B <c>driver_async_cancel()</c> is deprecated, and
+ scheduled for removal in OTP-R16. It will currently always fail,
+ and return 0.</p>
+ <warning><p><c>driver_async_cancel()</c> is deferred and will
+ be removed in the OTP-R16 release.</p>
+ </warning>
+
</desc>
</func>
<func>
diff --git a/erts/doc/src/erl_ext_fig.gif b/erts/doc/src/erl_ext_fig.gif
index 14d6bbc871..14d6bbc871 100755..100644
--- a/erts/doc/src/erl_ext_fig.gif
+++ b/erts/doc/src/erl_ext_fig.gif
Binary files differ
diff --git a/erts/doc/src/erl_nif.xml b/erts/doc/src/erl_nif.xml
index 382e446dce..8daa67aa87 100644
--- a/erts/doc/src/erl_nif.xml
+++ b/erts/doc/src/erl_nif.xml
@@ -216,14 +216,14 @@ ok
<p/>
<code type="none">
ERL_NIF_TERM term;
- MyStruct* ptr = enif_alloc_resource(my_resource_type, sizeof(MyStruct));
+ MyStruct* obj = enif_alloc_resource(my_resource_type, sizeof(MyStruct));
/* initialize struct ... */
- term = enif_make_resource(env, ptr);
+ term = enif_make_resource(env, obj);
if (keep_a_reference_of_our_own) {
- /* store 'ptr' in static variable, private data or other resource object */
+ /* store 'obj' in static variable, private data or other resource object */
}
else {
enif_release_resource(obj);
@@ -692,6 +692,10 @@ typedef enum {
<fsummary>Determine if a term is an exception</fsummary>
<desc><p>Return true if <c>term</c> is an exception.</p></desc>
</func>
+ <func><name><ret>int</ret><nametext>enif_is_number(ErlNifEnv* env, ERL_NIF_TERM term)</nametext></name>
+ <fsummary>Determine if a term is a number (integer or float)</fsummary>
+ <desc><p>Return true if <c>term</c> is a number.</p></desc>
+ </func>
<func><name><ret>int</ret><nametext>enif_is_fun(ErlNifEnv* env, ERL_NIF_TERM term)</nametext></name>
<fsummary>Determine if a term is a fun</fsummary>
<desc><p>Return true if <c>term</c> is a fun.</p></desc>
diff --git a/erts/doc/src/erl_prim_loader.xml b/erts/doc/src/erl_prim_loader.xml
index fa3daaeecc..9f5b3f385b 100644
--- a/erts/doc/src/erl_prim_loader.xml
+++ b/erts/doc/src/erl_prim_loader.xml
@@ -101,7 +101,7 @@
<c><anno>Bin</anno></c> is the contents of the file as a binary.</p>
<p>The <c><anno>Filename</anno></c> can also be a file in an archive. For example
- <c>/otp/root/lib/mnesia-4.4.7.ez/mnesia-4.4.7/ebin/mnesia_backup.beam</c>.
+ <c>$OTPROOT/lib/</c><c>mnesia-4.4.7.ez/mnesia-4.4.7/ebin/</c><c>mnesia.beam</c>.
See <seealso marker="kernel:code">code(3)</seealso> about archive files.</p>
</desc>
</func>
@@ -124,7 +124,7 @@
the names of all the files in the directory. The names are
not sorted.</p>
<p>The <c><anno>Dir</anno></c> can also be a directory in an archive. For example
- <c>/otp/root/lib/mnesia-4.4.7.ez/mnesia-4.4.7/ebin</c>.
+ <c>$OTPROOT/lib/</c><c>mnesia-4.4.7.ez/mnesia-4.4.7/ebin</c>.
See <seealso marker="kernel:code">code(3)</seealso> about archive files.</p>
</desc>
</func>
@@ -143,7 +143,7 @@
<p>See <seealso marker="kernel:file">file(3)</seealso> for more info about
the record <c>file_info</c>.</p>
<p>The <c><anno>Filename</anno></c> can also be a file in an archive. For example
- <c>/otp/root/lib/mnesia-4.4.7.ez/mnesia-4.4.7/ebin/mnesia_backup.beam</c>.
+ <c>$OTPROOT/lib/</c><c>mnesia-4.4.7.ez/mnesia-4.4.7/ebin/</c><c>mnesia</c>.
See <seealso marker="kernel:code">code(3)</seealso> about archive files.</p>
</desc>
</func>
diff --git a/erts/doc/src/erlang.xml b/erts/doc/src/erlang.xml
index b7d775541f..2ea144eb3f 100644
--- a/erts/doc/src/erlang.xml
+++ b/erts/doc/src/erlang.xml
@@ -518,6 +518,18 @@
</func>
<func>
+ <name>check_old_code(Module) -> boolean()</name>
+ <fsummary>Check if a module has old code</fsummary>
+ <type>
+ <v>Module = atom()</v>
+ </type>
+ <desc>
+ <p>Returns <c>true</c> if the <c>Module</c> has old code,
+ and <c>false</c> otherwise.</p>
+ <p>See also <seealso marker="kernel:code">code(3)</seealso>.</p>
+ </desc>
+ </func>
+ <func>
<name>check_process_code(Pid, Module) -> boolean()</name>
<fsummary>Check if a process is executing old code for a module</fsummary>
<type>
@@ -790,8 +802,7 @@ false</pre>
<type>
<v>MonitorRef = reference()</v>
<v>OptionList = [Option]</v>
- <v>Option = flush</v>
- <v>Option = info</v>
+ <v>&nbsp;Option = flush | info</v>
</type>
<desc>
<p>The returned value is <c>true</c> unless <c>info</c> is part
@@ -1014,6 +1025,56 @@ b</pre>
</desc>
</func>
<func>
+ <name>erlang:external_size(Term) -> integer() >= 0</name>
+ <fsummary>Calculate the maximum size for a term encoded in the Erlang
+ external term format</fsummary>
+ <type>
+ <v>Term = term()</v>
+ </type>
+ <desc>
+ <p>Calculates, without doing the encoding, the maximum byte size for
+ a term encoded in the Erlang external term format. The following
+ condition applies always:</p>
+ <p>
+ <pre>
+> <input>Size1 = byte_size(term_to_binary(Term)),</input>
+> <input>Size2 = erlang:external_size(Term),</input>
+> <input>true = Size1 =&lt; Size2.</input>
+true
+ </pre>
+ </p>
+ <p>This is equivalent to a call to: <code>erlang:external_size(Term, [])
+ </code></p>
+ </desc>
+ </func>
+ <func>
+ <name>erlang:external_size(Term, [Option]) -> integer() >= 0</name>
+ <fsummary>Calculate the maximum size for a term encoded in the Erlang
+ external term format</fsummary>
+ <type>
+ <v>Term = term()</v>
+ <v>Option = {minor_version, Version}</v>
+ </type>
+ <desc>
+ <p>Calculates, without doing the encoding, the maximum byte size for
+ a term encoded in the Erlang external term format. The following
+ condition applies always:</p>
+ <p>
+ <pre>
+> <input>Size1 = byte_size(term_to_binary(Term, Options)),</input>
+> <input>Size2 = erlang:external_size(Term, Options),</input>
+> <input>true = Size1 =&lt; Size2.</input>
+true
+ </pre>
+ </p>
+ <p>The option <c>{minor_version, Version}</c> specifies how floats
+ are encoded. See
+ <seealso marker="#term_to_binary/2">term_to_binary/2</seealso> for
+ a more detailed description.
+ </p>
+ </desc>
+ </func>
+ <func>
<name>float(Number) -> float()</name>
<fsummary>Convert a number to a float</fsummary>
<type>
@@ -1134,11 +1195,16 @@ b</pre>
</item>
<tag><c>{new_uniq, Uniq}</c></tag>
<item>
- <p><c>Uniq</c> (a binary) is a unique value for this fun.</p>
+ <p><c>Uniq</c> (a binary) is a unique value for this fun.
+ It is calculated from the compiled code for the entire module.</p>
</item>
<tag><c>{uniq, Uniq}</c></tag>
<item>
- <p><c>Uniq</c> (an integer) is a unique value for this fun.</p>
+ <p><c>Uniq</c> (an integer) is a unique value for this fun.
+ Starting in the R15 release, this integer is calculated from
+ the compiled code for the entire module. Before R15, this
+ integer was based on only the body of the fun.
+ </p>
</item>
</taglist>
</desc>
@@ -1282,17 +1348,18 @@ b</pre>
</desc>
</func>
<func>
- <name>erlang:get_stacktrace() -> [{Module, Function, Arity | Args}]</name>
+ <name>erlang:get_stacktrace() -> [{Module, Function, Arity | Args, Location}]</name>
<fsummary>Get the call stack back-trace of the last exception</fsummary>
<type>
<v>Module = Function = atom()</v>
<v>Arity = arity()</v>
<v>Args = [term()]</v>
+ <v>Location = [{atom(),term()}]</v>
</type>
<desc>
<p>Get the call stack back-trace (<em>stacktrace</em>) of the last
exception in the calling process as a list of
- <c>{Module,Function,Arity}</c> tuples.
+ <c>{Module,Function,Arity,Location}</c> tuples.
The <c>Arity</c> field in the first tuple may be the argument
list of that function call instead of an arity integer,
depending on the exception.</p>
@@ -1302,6 +1369,25 @@ b</pre>
<p>The stacktrace is the same data as the <c>catch</c> operator
returns, for example:</p>
<p><c>{'EXIT',{badarg,Stacktrace}} = catch abs(x)</c></p>
+ <p><c>Location</c> is a (possibly empty) list of two-tuples that
+ may indicate the location in the source code of the function.
+ The first element is an atom that describes the type of
+ information in the second element. Currently the following
+ items may occur:</p>
+ <taglist>
+ <tag><c>file</c></tag>
+ <item>
+ <p>The second element of the tuple is a string (list of
+ characters) representing the filename of the source file
+ of the function.</p>
+ </item>
+ <tag><c>line</c></tag>
+ <item>
+ <p>The second element of the tuple is the line number
+ (an integer greater than zero) in the source file
+ where the exception occurred or the function was called.</p>
+ </item>
+ </taglist>
<p>See also
<seealso marker="#error/1">erlang:error/1</seealso> and
<seealso marker="#error/2">erlang:error/2</seealso>.</p>
@@ -2827,11 +2913,11 @@ os_prompt%</pre>
<seealso marker="stdlib:unicode_usage">stdlib users guide
</seealso> for details.</p>
- <note>The characters in the name (if given as a list)
+ <note><p>The characters in the name (if given as a list)
can only be &gt; 255 if the Erlang VM is started in
Unicode file name translation mode, otherwise the name
of the executable is limited to the ISO-latin-1
- character set.</note>
+ character set.</p></note>
<p>If the <c>Command</c> cannot be run, an error
exception, with the posix error code as the reason, is
@@ -2944,11 +3030,11 @@ os_prompt%</pre>
Unicode translation of arguments, they can be supplied as
binaries in whatever encoding is deemed appropriate.</p>
- <note>The characters in the arguments (if given as a
+ <note><p>The characters in the arguments (if given as a
list of characters) can only be &gt; 255 if the Erlang
VM is started in Unicode file name mode,
otherwise the arguments are limited to the
- ISO-latin-1 character set.</note>
+ ISO-latin-1 character set.</p></note>
<p>If one, for any reason, wants to explicitly set the
program name in the argument vector, the <c>arg0</c>
@@ -3633,12 +3719,6 @@ os_prompt%</pre>
<tag><c>process_flag(save_calls, N)</c></tag>
<item>
- <p>When there are runnable processes on priority <c>max</c>
- no processes on priority <c>low</c>, <c>normal</c>, or
- <c>high</c> will be selected for execution. As with the
- <c>high</c> priority, processes on lower priorities might
- execute in parallel with processes on priority <c>max</c>.
- </p>
<p><c>N</c> must be an integer in the interval 0..10000.
If <c>N</c> &gt; 0, call saving is made active for the
process, which means that information about the <c>N</c>
@@ -3809,11 +3889,26 @@ os_prompt%</pre>
catches in this process. This <c>InfoTuple</c> may be
changed or removed without prior notice.</p>
</item>
- <tag><c>{current_function, {Module, Function, Args}}</c></tag>
+ <tag><c>{current_function, {Module, Function, Arity}}</c></tag>
<item>
- <p><c>Module</c>, <c>Function</c>, <c>Args</c> is
+ <p><c>Module</c>, <c>Function</c>, <c>Arity</c> is
the current function call of the process.</p>
</item>
+ <tag><c>{current_location, {Module, Function, Arity, Location}}</c></tag>
+ <item>
+ <p><c>Module</c>, <c>Function</c>, <c>Arity</c> is
+ the current function call of the process.
+ <c>Location</c> is a list of two-tuples that describes the
+ location in the source code.
+ </p>
+ </item>
+ <tag><c>{current_stacktrace, Stack}</c></tag>
+ <item>
+ <p>Return the current call stack back-trace (<em>stacktrace</em>)
+ of the process. The stack has the same format as returned by
+ <seealso marker="#get_stacktrace/1">erlang:get_stacktrace/0</seealso>.
+ </p>
+ </item>
<tag><c>{dictionary, Dictionary}</c></tag>
<item>
<p><c>Dictionary</c> is the dictionary of the process.</p>
@@ -4080,11 +4175,14 @@ os_prompt%</pre>
equivalent to <c>erlang:Class(Reason)</c>.
<c>Reason</c> is any term and <c>Stacktrace</c> is a list as
returned from <c>get_stacktrace()</c>, that is a list of
- 3-tuples <c>{Module, Function, Arity | Args}</c> where
- <c>Module</c> and <c>Function</c> are atoms and the third
- element is an integer arity or an argument list. The
- stacktrace may also contain <c>{Fun, Args}</c> tuples where
+ 4-tuples <c>{Module, Function, Arity | Args,
+ Location}</c> where <c>Module</c> and <c>Function</c>
+ are atoms and the third element is an integer arity or an
+ argument list. The stacktrace may also contain <c>{Fun,
+ Args, Location}</c> tuples where
<c>Fun</c> is a local fun and <c>Args</c> is an argument list.</p>
+ <p>The <c>Location</c> element at the end is optional.
+ Omitting it is equivalent to specifying an empty list.</p>
<p>The stacktrace is used as the exception stacktrace for the
calling process; it will be truncated to the current
maximum stacktrace depth.</p>
@@ -5879,7 +5977,7 @@ true</pre>
</item>
<tag><c>wordsize</c></tag>
<item>
- <p>Same as <c>{wordsize, internal}</c></p>
+ <p>Same as <c>{wordsize, internal}.</c></p>
</item>
<tag><c>{wordsize, internal}</c></tag>
<item>
@@ -5888,7 +5986,7 @@ true</pre>
and on a pure 64-bit architecture 8 is returned. On a
halfword 64-bit emulator, 4 is returned, as the Erlang
terms are stored using a virtual wordsize of half the
- systems wordsize.</p>
+ system's wordsize.</p>
</item>
<tag><c>{wordsize, external}</c></tag>
<item>
diff --git a/erts/doc/src/erlsrv.xml b/erts/doc/src/erlsrv.xml
index 0dfad2a112..c1ecbc7b77 100644
--- a/erts/doc/src/erlsrv.xml
+++ b/erts/doc/src/erlsrv.xml
@@ -4,7 +4,7 @@
<comref>
<header>
<copyright>
- <year>1998</year><year>2010</year>
+ <year>1998</year><year>2011</year>
<holder>Ericsson AB. All Rights Reserved.</holder>
</copyright>
<legalnotice>
@@ -273,7 +273,7 @@
</desc>
</func>
<func>
- <name>erlsrv {start | stop | disable | enable} &lt;service-name></name>
+ <name>erlsrv {start | start_disabled | stop | disable | enable} &lt;service-name></name>
<fsummary>Manipulate the current service status.</fsummary>
<desc>
<p>These commands are only added for convenience, the normal
@@ -287,6 +287,21 @@
service actually is stopped. Enabling a service sets it in
automatic mode, that is started at boot. This command cannot
set the service to manual. </p>
+
+ <p>The <c>start_disabled</c> command operates on a service
+ regardless of if it's enabled/disabled or started/stopped. It
+ does this by first enabling it (regardless of if it's enabled
+ or not), then starting it (if it's not already started) and
+ then disabling it. The result will be a disabled but started
+ service, regardless of its earlier state. This is useful for
+ starting services temporarily during a release upgrade. The
+ difference between using <c>start_disabled</c> and the
+ sequence <c>enable</c>, <c>start</c> and <c>disable</c> is
+ that all other <c>erlsrv</c> commands are locked out during
+ the sequence of operations in <c>start_disable</c>, making the
+ operation atomic from an <c>erlsrv</c> user's point of
+ view.</p>
+
</desc>
</func>
<func>
diff --git a/erts/doc/src/erts_alloc.xml b/erts/doc/src/erts_alloc.xml
index 90347824d5..3b5ee5391c 100644
--- a/erts/doc/src/erts_alloc.xml
+++ b/erts/doc/src/erts_alloc.xml
@@ -4,7 +4,7 @@
<cref>
<header>
<copyright>
- <year>2002</year><year>2010</year>
+ <year>2002</year><year>2011</year>
<holder>Ericsson AB. All Rights Reserved.</holder>
</copyright>
<legalnotice>
@@ -58,11 +58,8 @@
<item>Allocator used for memory blocks that are expected to be
long-lived, for example Erlang code.</item>
<tag><c>fix_alloc</c></tag>
- <item>A very fast allocator used for some fix-sized
- data. <c>fix_alloc</c> manages a set of memory pools from
- which memory blocks are handed out. <c>fix_alloc</c>
- allocates memory pools from <c>ll_alloc</c>. Memory pools
- that have been allocated are never deallocated.</item>
+ <item>A fast allocator used for some frequently used
+ fixed size data types.</item>
<tag><c>std_alloc</c></tag>
<item>Allocator used for most memory blocks not allocated via any of
the other allocators described above.</item>
@@ -83,7 +80,7 @@
where only small blocks are placed. Currently this allocator is
disabled by default.</item>
</taglist>
- <p><c>sys_alloc</c> and <c>fix_alloc</c> are always enabled and
+ <p><c>sys_alloc</c> is always enabled and
cannot be disabled. <c>mseg_alloc</c> is always enabled if it is
available and an allocator that uses it is enabled. All other
allocators can be <seealso marker="#M_e">enabled or disabled</seealso>.
@@ -104,7 +101,7 @@
<marker id="alloc_util"></marker>
<title>The alloc_util framework</title>
<p>Internally a framework called <c>alloc_util</c> is used for
- implementing allocators. <c>sys_alloc</c>, <c>fix_alloc</c>, and
+ implementing allocators. <c>sys_alloc</c>, and
<c>mseg_alloc</c> do not use this framework; hence, the
following does <em>not</em> apply to them.</p>
<p>An allocator manages multiple areas, called carriers, in which
@@ -126,9 +123,8 @@
carrier". Main multiblock carriers are never deallocated. The
size of the main multiblock carrier is determined by the value
of the <seealso marker="#M_mmbcs">mmbcs</seealso> parameter.</p>
- <p> <marker id="mseg_mbc_sizes"></marker>
-
- Sizes of multiblock carriers allocated via <c>mseg_alloc</c> are
+ <p><marker id="mseg_mbc_sizes"></marker>Sizes of multiblock carriers
+ allocated via <c>mseg_alloc</c> are
decided based on the values of the largest multiblock carrier
size (<seealso marker="#M_lmbcs">lmbcs</seealso>), the smallest
multiblock carrier size (<seealso marker="#M_smbcs">smbcs</seealso>),
@@ -157,9 +153,8 @@
<p>Coalescing of free blocks are always performed immediately.
Boundary tags (headers and footers) in free blocks are used
which makes the time complexity for coalescing constant.</p>
- <p> <marker id="strategy"></marker>
-
- The memory allocation strategy used for multiblock carriers by an
+ <p><marker id="strategy"></marker>The memory allocation strategy
+ used for multiblock carriers by an
allocator is configurable via the <seealso marker="#M_as">as</seealso>
parameter. Currently the following strategies are available:</p>
<taglist>
@@ -214,6 +209,14 @@
This since it will only cause problems for other allocators.</p>
</item>
</taglist>
+ <p>Apart from the ordinary allocators described above a number of
+ pre-allocators are used for some specific data types. These
+ pre-allocators pre-allocate a fixed amount of memory for certain data
+ types when the run-time system starts. As long as there are available
+ pre-allocated memory, it will be used. When no pre-allocated memory is
+ available, memory will be allocated in ordinary allocators. These
+ pre-allocators are typically much faster than the ordinary allocators,
+ but can only satisfy a limited amount of requests.</p>
</section>
<note><p>
@@ -274,18 +277,6 @@
Max cached segments. The maximum number of memory segments
stored in the memory segment cache. Valid range is
0-30. Default value is 5.</item>
- <tag><marker id="MMcci"><c><![CDATA[+MMcci <time>]]></c></marker></tag>
- <item>
- Cache check interval (in milliseconds). The memory segment
- cache is checked for segments to destroy at an interval
- determined by this parameter. Default value is 1000.</item>
- </taglist>
- <p>The following flags are available for configuration of
- <c>fix_alloc</c>:</p>
- <taglist>
- <tag><marker id="MFe"><c>+MFe true</c></marker></tag>
- <item>
- Enable <c>fix_alloc</c>. Note: <c>fix_alloc</c> cannot be disabled.</item>
</taglist>
<p>The following flags are available for configuration of
<c>sys_alloc</c>:</p>
@@ -324,7 +315,7 @@
based on <c>alloc_util</c>. If <c>u</c> is used as subsystem
identifier (i.e., <c><![CDATA[<S> = u]]></c>) all allocators based on
<c>alloc_util</c> will be effected. If <c>B</c>, <c>D</c>, <c>E</c>,
- <c>H</c>, <c>L</c>, <c>R</c>, <c>S</c>, or <c>T</c> is used as
+ <c>F</c>, <c>H</c>, <c>L</c>, <c>R</c>, <c>S</c>, or <c>T</c> is used as
subsystem identifier, only the specific allocator identified will be
effected:</p>
<taglist>
@@ -443,26 +434,23 @@
kilobytes). See <seealso marker="#mseg_mbc_sizes">the description
on how sizes for mseg_alloc multiblock carriers are decided</seealso>
in "the <c>alloc_util</c> framework" section.</item>
- <tag><marker id="M_t"><c><![CDATA[+M<S>t true|false|<amount>]]></c></marker></tag>
+ <tag><marker id="M_t"><c><![CDATA[+M<S>t true|false]]></c></marker></tag>
<item>
Multiple, thread specific instances of the allocator.
This option will only have any effect on the runtime system
with SMP support. Default behaviour on the runtime system with
- SMP support (<c>N</c> equals the number of scheduler threads):
+ SMP support:
<taglist>
- <tag><c>temp_alloc</c></tag>
- <item><c>N + 1</c> instances.</item>
<tag><c>ll_alloc</c></tag>
<item><c>1</c> instance.</item>
<tag>Other allocators</tag>
- <item><c>N</c> instances when <c>N</c> is less than or equal to
- <c>16</c>. <c>16</c> instances when <c>N</c> is greater than
- <c>16</c>.</item>
+ <item><c>NoSchedulers+1</c> instances. Each scheduler will use
+ a lock-free instance of its own and other threads will use
+ a common instance.</item>
</taglist>
- <c>temp_alloc</c> will always use <c>N + 1</c> instances when
- this option has been enabled regardless of the amount passed.
- Other allocators will use the same amount of instances as the
- amount passed as long as it isn't greater than <c>N</c>.
+ It was previously (before ERTS version 5.9) possible to configure
+ a smaller amount of thread specific instances than schedulers.
+ This is, however, not possible any more.
</item>
</taglist>
<p>Currently the following flags are available for configuration of
diff --git a/erts/doc/src/make.dep b/erts/doc/src/make.dep
deleted file mode 100644
index 98bac78235..0000000000
--- a/erts/doc/src/make.dep
+++ /dev/null
@@ -1,32 +0,0 @@
-# ----------------------------------------------------
-# >>>> Do not edit this file <<<<
-# This file was automaticly generated by
-# /home/gandalf/otp/bin/docdepend
-# ----------------------------------------------------
-
-
-# ----------------------------------------------------
-# TeX files that the DVI file depend on
-# ----------------------------------------------------
-
-book.dvi: absform.tex alt_dist.tex book.tex crash_dump.tex \
- driver.tex driver_entry.tex epmd.tex erl.tex \
- erl_dist_protocol.tex erl_driver.tex erl_ext_dist.tex \
- erl_prim_loader.tex erl_set_memory_block.tex \
- erlang.tex erlc.tex erlsrv.tex erts_alloc.tex \
- escript.tex inet_cfg.tex init.tex match_spec.tex \
- part.tex ref_man.tex run_erl.tex start.tex \
- start_erl.tex tty.tex werl.tex zlib.tex
-
-# ----------------------------------------------------
-# Source inlined when transforming from source to LaTeX
-# ----------------------------------------------------
-
-book.tex: ref_man.xml
-
-# ----------------------------------------------------
-# Pictures that the DVI file depend on
-# ----------------------------------------------------
-
-book.dvi: erl_ext_fig.ps
-
diff --git a/erts/doc/src/notes.xml b/erts/doc/src/notes.xml
index 3733fb2db9..4cef9669dd 100644
--- a/erts/doc/src/notes.xml
+++ b/erts/doc/src/notes.xml
@@ -30,6 +30,344 @@
</header>
<p>This document describes the changes made to the ERTS application.</p>
+<section><title>Erts 5.8.5</title>
+
+ <section><title>Fixed Bugs and Malfunctions</title>
+ <list>
+ <item>
+ <p>
+ Several bugs concerning constant binary constructions
+ such as &lt;&lt;0:4294967295&gt;&gt; have been corrected.
+ Depending on the actual size of the binary and the type
+ of run-time system (32-bit, halfword, 64-bit), such
+ expression could either crash the run-time system or make
+ the loader refuse loading of the module.</p>
+ <p>
+ Own Id: OTP-9284</p>
+ </item>
+ <item>
+ <p>
+ The Erlsrv utility failed to stop the erlang machine if
+ no StopAction was defined when the service was stopped.
+ This is now corrected.</p>
+ <p>
+ Own Id: OTP-9344</p>
+ </item>
+ <item>
+ <p>
+ Due to a bug in glibc the runtime system could abort
+ while trying to destroy a mutex. A fix for this was
+ introduced in R14B02. This fix did, however, not solve
+ the problem. The runtime system will now issue a warning
+ instead of aborting.</p>
+ <p>
+ Own Id: OTP-9373 Aux Id: OTP-9009 </p>
+ </item>
+ <item>
+ <p>
+ Replace atom in DRV macro in prim_file with string</p>
+ <p>
+ An experimental version of Dialyzer discovered that the
+ atom that replaced the DRV macro in prim_file ends up in
+ calls to erlang:open_port({spawn, Driver}, Portopts) as
+ the Driver argument. The documentation states that this
+ call requires a string there.</p>
+ <p>
+ This change is also consistent with the one introduced in
+ commit 0f03b1e9d2bef3bc830c31a369261af4c5234727 by Kostis
+ Sagonas.</p>
+ <p>
+ Own Id: OTP-9377</p>
+ </item>
+ <item>
+ <p>
+ Fix typos in the epmd documentation (Thanks to Holger
+ Wei� )</p>
+ <p>
+ Own Id: OTP-9387</p>
+ </item>
+ <item>
+ <p>
+ Fix faulty integer terms created by NIF API from 64-bit
+ integers on halfword emulator. (Thanks to Paolo Negri and
+ Paul Davis)</p>
+ <p>
+ Own Id: OTP-9394</p>
+ </item>
+ <item>
+ <p>
+ Fix <c>epmd</c> crash on vxworks caused by faulty
+ argument to select() system call.</p>
+ <p>
+ Own Id: OTP-9427 Aux Id: seq11855 </p>
+ </item>
+ <item>
+ <p>
+ The ets:test_ms function could in rare cases truncate the
+ error messages. This is now corrected.</p>
+ <p>
+ Own Id: OTP-9435</p>
+ </item>
+ <item>
+ <p>
+ Fix bug related to hibernate and HiPE (clear
+ F_HIBERNATE_SCHED flag)</p>
+ <p>
+ F_HIBERNATE_SCHED flag that was introduced in
+ b7ecdcd1ae9e11b8f75e must be cleared in hipe_mode_switch
+ as well. Otherwise, processes running HiPE code that
+ hibernate, wake up and then trap into a BIF will not be
+ rescheduled.(Thanks to Paul Guyot)</p>
+ <p>
+ Own Id: OTP-9452</p>
+ </item>
+ <item>
+ <p>
+ Fix bug in FreeBSD topology detection code (Thanks to
+ Paul Guyot)</p>
+ <p>
+ Own Id: OTP-9453</p>
+ </item>
+ <item>
+ <p>
+ Fix use of logical operator &amp;&amp; with constant
+ operand instead of bitwise &amp; (Thanks to Cristian
+ Greco)</p>
+ <p>
+ Own Id: OTP-9454</p>
+ </item>
+ <item>
+ <p>
+ inet: error if fd does not match socket domain</p>
+ <p>
+ If an IPv4 fd is opened as an IPv6 socket, unexpected
+ behaviour can occur. For example, if an IPv4 UDP socket
+ is opened and passed into Erlang as an IPv6 socket, the
+ first 3 bytes (corresponding to 1 byte representing the
+ protocol family, 2 bytes set to the port) are stripped
+ from the payload. The cause of the UDP payload truncation
+ happens in inet_drv.c:packet_inet_input when a call to
+ inet_get_address fails silently because the family is set
+ to PF_INET6 but the buffer len is the size of an IPv4
+ struct sockaddr_in.</p>
+ <p>
+ (Thanks to Andrew Tunnell-Jones for finding the bug and
+ the test case!)</p>
+ <p>
+ Own Id: OTP-9455</p>
+ </item>
+ <item>
+ <p>
+ erts: use a union to avoid strict aliasing issues</p>
+ <p>
+ Use a union for pointer type conversion to avoid compiler
+ warnings about strict-aliasing violations with gcc-4.1.
+ gcc >= 4.2 does not emit the warning. erts: adapt
+ matrix_nif to R14 erl_nif API changes (Thanks To Tuncer
+ Ayaz)</p>
+ <p>
+ Own Id: OTP-9487</p>
+ </item>
+ <item>
+ <p>
+ fix 64-bit issues in the garbage collection (Thanks to
+ Richard Carlsson)</p>
+ <p>
+ Own Id: OTP-9488</p>
+ </item>
+ <item>
+ <p>
+ epmd: fix compiler warnings</p>
+ <p>
+ Suppress compiler warnings about ignored return values.
+ (Thanks to Michael Santos )</p>
+ <p>
+ Own Id: OTP-9500</p>
+ </item>
+ <item>
+ <p>
+ Fix non-existing function (erlang:disconnect/1) in
+ distributed reference manual (Thanks to Fabian Kr�l)</p>
+ <p>
+ Own Id: OTP-9504</p>
+ </item>
+ <item>
+ <p>
+ Document fdatasync -lrt requirement (SunOS &lt;= 5.10)
+ (Thanks to Tuncer Ayaz)</p>
+ <p>
+ Own Id: OTP-9512</p>
+ </item>
+ <item>
+ <p>
+ Let epmd ignore empty ERL_EPMD_ADDRESS</p>
+ <p>
+ If the environment variable ERL_EPMD_ADDRESS is set to
+ the empty string, empd now behaves like it does by
+ default when ERL_EPMD_ADDRESS is unset. That is, in this
+ case, epmd now listens on all available interfaces
+ instead of using only the loopback interface, which
+ happened because epmd added the loopback address to the
+ (in this case empty) list of addresses specified via
+ ERL_EPMD_ADDRESS.</p>
+ <p>
+ Also, epmd now ignores ERL_EPMD_ADDRESS if it contains
+ only separator characters (comma and space).</p>
+ <p>
+ The same applies to epmd's -address option.(Thanks to
+ Holger Wei�)</p>
+ <p>
+ Own Id: OTP-9525</p>
+ </item>
+ <item>
+ <p>
+ Remove dead code in erl_compile (Thanks to Tuncer Ayaz)</p>
+ <p>
+ Own Id: OTP-9527</p>
+ </item>
+ <item>
+ <p>
+ Add erlang:external_size/2 BIF</p>
+ <p>
+ This BIF's second parameter is a list of options.
+ Currently the only allowed option is {minor_version,
+ Version} where version is either 0 (default) or 1.
+ (Thanks to Filipe David Manana )</p>
+ <p>
+ Own Id: OTP-9528</p>
+ </item>
+ <item>
+ <p>
+ Fix enif_compare on 64bits machines</p>
+ <p>
+ In 64bits machines the Sint type has a size of 8 bytes,
+ while on 32bits machines it has a 4 bytes size.
+ enif_compare was ignoring this and therefore returning
+ incorrect values when the result of the CMP function
+ (which returns a Sint value) doesn't fit in 4 bytes.
+ (Thanks to Filipe David Manana)</p>
+ <p>
+ Own Id: OTP-9533</p>
+ </item>
+ <item>
+ <p>
+ Implement or fix -Werror option</p>
+ <p>
+ If -Werror is enabled and there are warnings no output
+ file is written. Also make sure that error/warning
+ reporting is consistent. (Thanks to Tuncer Ayaz)</p>
+ <p>
+ Own Id: OTP-9536</p>
+ </item>
+ <item>
+ <p>In some rare cases we did not have a run queue when
+ scheduling misc ops. This is now fixed.</p>
+ <p>
+ Own Id: OTP-9537</p>
+ </item>
+ <item>
+ <p>Remove misc. compiler warnings</p>
+ <p>
+ Own Id: OTP-9542</p>
+ </item>
+ <item>
+ <p>
+ Two bugs in gen_sctp has been corrected: getopts/setopts
+ hence also send could only be called from socket owner,
+ and options 'linger', 'rcvbuf' and 'sndbuf' was read from
+ wrong protocol layer hence read wrong values by getopts.</p>
+ <p>
+ Own Id: OTP-9544</p>
+ </item>
+ <item>
+ <p>
+ Erlang/OTP can now be built on MacOS X Lion.</p>
+ <p>
+ Own Id: OTP-9547</p>
+ </item>
+ <item>
+ <p> XML files have been corrected. </p>
+ <p>
+ Own Id: OTP-9550 Aux Id: OTP-9541 </p>
+ </item>
+ <item>
+ <p>
+ Fix potential errors inspired by running cppcheck(1)
+ (Thanks to Christian von Roques)</p>
+ <p>
+ Own Id: OTP-9557</p>
+ </item>
+ <item>
+ <p>When auxiliary work was enqueued on a scheduler, the
+ wakeup of the scheduler in order to handle this work
+ could be lost. Wakeups in order to handle ordinary work
+ were not effected by this bug. The bug only effected
+ runtime systems with SMP support as follows:</p> <list>
+ <item>Deallocation of some ETS data structures could be
+ delayed.</item> <item>On Linux systems not using the NPTL
+ thread library (typically ancient systems with kernel
+ versions prior to 2.6) and Windows systems, the <c>{Port,
+ {exit_status, Status}}</c> message from a terminating
+ port program could be delayed. That is, it only effected
+ port programs which had been started by passing
+ <c>exit_status</c> as an option to
+ <c>open_port/2</c>.</item> </list>
+ <p>
+ Own Id: OTP-9567</p>
+ </item>
+ <item>
+ <p>
+ Handle rare race in the crypto key server functionality</p>
+ <p>
+ Own Id: OTP-9586</p>
+ </item>
+ </list>
+ </section>
+
+
+ <section><title>Improvements and New Features</title>
+ <list>
+ <item>
+ <p> Types and specifications have been added. </p>
+ <p>
+ Own Id: OTP-9356</p>
+ </item>
+ <item>
+ <p>
+ New allocator strategy "address order first fit". May
+ ease the emptying of memory carriers and thereby real
+ release of memory back to the OS.</p>
+ <p>
+ Own Id: OTP-9424</p>
+ </item>
+ <item>
+ <p>
+ The new <c>erlang:check_old_code/1</c> BIF checks whether
+ a module has old code.</p>
+ <p>
+ Own Id: OTP-9495</p>
+ </item>
+ <item>
+ <p> Update documentation and specifications of some of
+ the zlib functions. </p>
+ <p>
+ Own Id: OTP-9506</p>
+ </item>
+ <item>
+ <p>
+ Detect the available CPUs on IRIX</p>
+ <p>
+ Add support for querying the number of configured and
+ online processors on SGI systems running IRIX.(Thanks to
+ Holger Wei�)</p>
+ <p>
+ Own Id: OTP-9531</p>
+ </item>
+ </list>
+ </section>
+
+</section>
+
<section><title>Erts 5.8.4</title>
<section><title>Fixed Bugs and Malfunctions</title>
@@ -1949,6 +2287,24 @@
</section>
+<section><title>Erts 5.7.5.2</title>
+
+ <section><title>Known Bugs and Problems</title>
+ <list>
+ <item>
+ <p>
+ Two bugs in gen_sctp has been corrected: getopts/setopts
+ hence also send could only be called from socket owner,
+ and options 'linger', 'rcvbuf' and 'sndbuf' was read from
+ wrong protocol layer hence read wrong values by getopts.</p>
+ <p>
+ Own Id: OTP-9544</p>
+ </item>
+ </list>
+ </section>
+
+</section>
+
<section><title>Erts 5.7.5.1</title>
<section><title>Fixed Bugs and Malfunctions</title>
@@ -4534,7 +4890,7 @@
The race occurred when a process removed a table during
termination simultaneously as another process removed the
same table via <c>ets:delete/1</c> and a third process
- created a table that accidentaly got the same internal
+ created a table that accidentally got the same internal
table index as the table being removed.</p>
<p>
Own Id: OTP-7349</p>
diff --git a/erts/doc/src/start_erl.xml b/erts/doc/src/start_erl.xml
index 21cc901f52..92d87b095a 100644
--- a/erts/doc/src/start_erl.xml
+++ b/erts/doc/src/start_erl.xml
@@ -4,7 +4,7 @@
<comref>
<header>
<copyright>
- <year>1998</year><year>2009</year>
+ <year>1998</year><year>2011</year>
<holder>Ericsson AB. All Rights Reserved.</holder>
</copyright>
<legalnotice>
@@ -69,12 +69,29 @@
<c><![CDATA[erl]]></c> program. Everything <em>after</em><c><![CDATA[++]]></c> is
interpreted as options to <c><![CDATA[start_erl]]></c> itself.</item>
<tag>-reldir &lt;release root&gt;</tag>
- <item>Mandatory if the environment variable <c><![CDATA[RELDIR]]></c> is not
- specified. Tells start_erl where the root of the
- release tree is placed in the file-system
- (like &lt;Erlang root&gt;\\releases). The
- <c><![CDATA[start_erl.data]]></c> file is expected to be placed in
- this directory (if not otherwise specified).</item>
+
+ <item>Mandatory if the environment variable
+ <c><![CDATA[RELDIR]]></c> is not specified and no
+ <c>-rootdir</c> option is given. Tells start_erl where the
+ root of the release tree is placed in the file-system (typically
+ &lt;Erlang root&gt;\\releases). The
+ <c><![CDATA[start_erl.data]]></c> file is expected to be
+ placed in this directory (if not otherwise specified). If
+ only the <c>-rootdir</c> option is given, the directory is
+ assumed to be &lt;Erlang root&gt;\\releases.</item>
+
+ <tag>-rootdir &lt;Erlang root directory&gt;</tag>
+
+ <item>Mandatory if <c>-reldir</c> is not given and there is
+ no <c><![CDATA[RELDIR]]></c> in the environment. This
+ specifies the Erlang installation root directory (under
+ which the <c>lib</c>, <c>releases</c> and
+ <c>erts-&lt;Version&gt;</c> directories are placed). If only
+ <c>-reldir</c> (or the environment variable
+ <c><![CDATA[RELDIR]]></c>) is given, the Erlang root is assumed to
+ be the directory exactly one level above the release
+ directory.</item>
+
<tag>-data &lt;data file name&gt;</tag>
<item>Optional, specifies another data file than start_erl.data
in the &lt;release root&gt;. It is specified relative to the
diff --git a/erts/doc/src/zlib.xml b/erts/doc/src/zlib.xml
index 47a649af02..8917ab5c3a 100644
--- a/erts/doc/src/zlib.xml
+++ b/erts/doc/src/zlib.xml
@@ -378,31 +378,31 @@ unpack(Z, Compressed, Dict) ->
<name name="crc32" arity="2"/>
<fsummary>Calculate CRC</fsummary>
<desc>
- <p>Calculate the CRC checksum for <c><anno>Binary</anno></c>.</p>
+ <p>Calculate the CRC checksum for <c><anno>Data</anno></c>.</p>
</desc>
</func>
<func>
<name name="crc32" arity="3"/>
<fsummary>Calculate CRC</fsummary>
<desc>
- <p>Update a running CRC checksum for <c><anno>Binary</anno></c>.
- If <c><anno>Binary</anno></c> is the empty binary, this function returns
+ <p>Update a running CRC checksum for <c><anno>Data</anno></c>.
+ If <c><anno>Data</anno></c> is the empty binary or the empty iolist, this function returns
the required initial value for the crc.</p>
<pre>
-Crc = lists:foldl(fun(Bin,Crc0) ->
- zlib:crc32(Z, Crc0, Bin),
- end, zlib:crc32(Z,&lt;&lt; &gt;&gt;), Bins)</pre>
+Crc = lists:foldl(fun(Data,Crc0) ->
+ zlib:crc32(Z, Crc0, Data),
+ end, zlib:crc32(Z,&lt;&lt; &gt;&gt;), Datas)</pre>
</desc>
</func>
<func>
<name name="crc32_combine" arity="4"/>
<fsummary>Combine two CRC's</fsummary>
<desc>
- <p>Combine two CRC checksums into one. For two binaries,
- <c>Bin1</c> and <c>Bin2</c> with sizes of <c>Size1</c> and
+ <p>Combine two CRC checksums into one. For two binaries or iolists,
+ <c>Data1</c> and <c>Data2</c> with sizes of <c>Size1</c> and
<c><anno>Size2</anno></c>, with CRC checksums <c><anno>CRC1</anno></c> and
<c><anno>CRC2</anno></c>. <c>crc32_combine/4</c> returns the <c><anno>CRC</anno></c>
- checksum of <c>&lt;&lt;Bin1/binary,Bin2/binary&gt;&gt;</c>, requiring
+ checksum of <c>[Data1,Data2]</c>, requiring
only <c><anno>CRC1</anno></c>, <c><anno>CRC2</anno></c>, and <c><anno>Size2</anno></c>.
</p>
</desc>
@@ -411,75 +411,75 @@ Crc = lists:foldl(fun(Bin,Crc0) ->
<name name="adler32" arity="2"/>
<fsummary>Calculate the adler checksum</fsummary>
<desc>
- <p>Calculate the Adler-32 checksum for <c><anno>Binary</anno></c>.</p>
+ <p>Calculate the Adler-32 checksum for <c><anno>Data</anno></c>.</p>
</desc>
</func>
<func>
<name name="adler32" arity="3"/>
<fsummary>Calculate the adler checksum</fsummary>
<desc>
- <p>Update a running Adler-32 checksum for <c><anno>Binary</anno></c>.
- If <c><anno>Binary</anno></c> is the empty binary, this function returns
+ <p>Update a running Adler-32 checksum for <c><anno>Data</anno></c>.
+ If <c><anno>Data</anno></c> is the empty binary or the empty iolist, this function returns
the required initial value for the checksum.</p>
<pre>
-Crc = lists:foldl(fun(Bin,Crc0) ->
- zlib:adler32(Z, Crc0, Bin),
- end, zlib:adler32(Z,&lt;&lt; &gt;&gt;), Bins)</pre>
+Crc = lists:foldl(fun(Data,Crc0) ->
+ zlib:adler32(Z, Crc0, Data),
+ end, zlib:adler32(Z,&lt;&lt; &gt;&gt;), Datas)</pre>
</desc>
</func>
<func>
<name name="adler32_combine" arity="4"/>
<fsummary>Combine two Adler-32 checksums</fsummary>
<desc>
- <p>Combine two Adler-32 checksums into one. For two binaries,
- <c>Bin1</c> and <c>Bin2</c> with sizes of <c>Size1</c> and
+ <p>Combine two Adler-32 checksums into one. For two binaries or iolists,
+ <c>Data1</c> and <c>Data2</c> with sizes of <c>Size1</c> and
<c><anno>Size2</anno></c>, with Adler-32 checksums <c><anno>Adler1</anno></c> and
<c><anno>Adler2</anno></c>. <c>adler32_combine/4</c> returns the <c><anno>Adler</anno></c>
- checksum of <c>&lt;&lt;Bin1/binary,Bin2/binary&gt;&gt;</c>, requiring
+ checksum of <c>[Data1,Data2]</c>, requiring
only <c><anno>Adler1</anno></c>, <c><anno>Adler2</anno></c>, and <c><anno>Size2</anno></c>.
</p>
</desc>
</func>
<func>
<name name="compress" arity="1"/>
- <fsummary>Compress a binary with standard zlib functionality</fsummary>
+ <fsummary>Compress data with standard zlib functionality</fsummary>
<desc>
- <p>Compress a binary (with zlib headers and checksum).</p>
+ <p>Compress data (with zlib headers and checksum).</p>
</desc>
</func>
<func>
<name name="uncompress" arity="1"/>
- <fsummary>Uncompress a binary with standard zlib functionality</fsummary>
+ <fsummary>Uncompress data with standard zlib functionality</fsummary>
<desc>
- <p>Uncompress a binary (with zlib headers and checksum).</p>
+ <p>Uncompress data (with zlib headers and checksum).</p>
</desc>
</func>
<func>
<name name="zip" arity="1"/>
- <fsummary>Compress a binary without the zlib headers</fsummary>
+ <fsummary>Compress data without the zlib headers</fsummary>
<desc>
- <p>Compress a binary (without zlib headers and checksum).</p>
+ <p>Compress data (without zlib headers and checksum).</p>
</desc>
</func>
<func>
<name name="unzip" arity="1"/>
- <fsummary>Uncompress a binary without the zlib headers</fsummary>
+ <fsummary>Uncompress data without the zlib headers</fsummary>
<desc>
- <p>Uncompress a binary (without zlib headers and checksum).</p>
+ <p>Uncompress data (without zlib headers and checksum).</p>
</desc>
</func>
<func>
<name name="gzip" arity="1"/>
- <fsummary>Compress a binary with gz header</fsummary>
+ <fsummary>Compress data with gz header</fsummary>
<desc>
- <p>Compress a binary (with gz headers and checksum).</p>
+ <p>Compress data (with gz headers and checksum).</p>
</desc>
</func>
<func>
<name name="gunzip" arity="1"/>
- <fsummary>Uncompress a binary with gz header</fsummary>
+ <fsummary>Uncompress data with gz header</fsummary>
<desc>
- <p>Uncompress a binary (with gz headers and checksum).</p>
+ <p>Uncompress data (with gz headers and checksum).</p>
</desc>
</func>
</funcs>
diff --git a/erts/emulator/Makefile.in b/erts/emulator/Makefile.in
index b658e79378..708d4ca0a3 100644
--- a/erts/emulator/Makefile.in
+++ b/erts/emulator/Makefile.in
@@ -291,7 +291,8 @@ else
LIBS += $(ERL_TOP)/erts/emulator/pcre/obj/$(TARGET)/$(TYPE)/$(LIB_PREFIX)epcre$(LIB_SUFFIX)
endif
-DEPLIBS += $(ERL_TOP)/erts/emulator/pcre/obj/$(TARGET)/$(TYPE)/$(LIB_PREFIX)epcre$(LIB_SUFFIX)
+EPCRE_LIB = $(ERL_TOP)/erts/emulator/pcre/obj/$(TARGET)/$(TYPE)/$(LIB_PREFIX)epcre$(LIB_SUFFIX)
+DEPLIBS += $(EPCRE_LIB)
PERFCTR_PATH=@PERFCTR_PATH@
USE_PERFCTR=@USE_PERFCTR@
@@ -382,7 +383,7 @@ ifeq ($(FLAVOR)-@ERTS_BUILD_SMP_EMU@,smp-no)
all:
@echo '*** Omitted build of emulator with smp support'
else
-all: generate erts_lib zlib pcre $(BINDIR)/$(EMULATOR_EXECUTABLE) $(UNIX_ONLY_BUILDS)
+all: generate erts_lib zlib $(BINDIR)/$(EMULATOR_EXECUTABLE) $(UNIX_ONLY_BUILDS)
ifeq ($(OMIT_OMIT_FP),yes)
@echo '* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *'
@echo '* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *'
@@ -403,8 +404,8 @@ zlib:
@set -e ; cd zlib && $(MAKE) TYPE=$(TYPE) $(TYPE)
endif
-pcre:
- @set -e ; cd pcre && $(MAKE) TYPE=$(TYPE) $(TYPE)
+
+include pcre/pcre.mk
erts_lib:
cd $(ERL_TOP)/erts/lib_src && $(MAKE) $(TYPE)
@@ -420,9 +421,9 @@ endif
$(RM) -rf $(BINDIR)/child_setup $(BINDIR)/child_setup.*
$(RM) -f $(BINDIR)/hipe_mkliterals $(BINDIR)/hipe_mkliterals.*
@set -e ; cd zlib && $(MAKE) clean
- @set -e ; cd pcre && $(MAKE) clean
+ rm -f $(OBJS) $(OBJDIR)/libepcre.a
-.PHONY: all zlib pcre clean
+.PHONY: all zlib clean
docs:
@@ -467,10 +468,11 @@ release_docs_spec:
# Generated source code. Put in $(TARGET) directory
#
+_create_dirs := $(shell mkdir -p $(CREATE_DIRS))
+
.PHONY : generate
-GENERATE= $(CREATE_DIRS) \
- $(TTF_DIR)/beam_opcodes.h \
+GENERATE= $(TTF_DIR)/beam_opcodes.h \
$(TARGET)/erl_bif_table.c \
$(TARGET)/erl_version.h \
$(TTF_DIR)/driver_tab.c \
@@ -598,11 +600,6 @@ INCLUDES += -I$(ERL_TOP)/erts/etc/vxworks
endif
ifeq ($(TARGET),win32)
-# Usually the same as the default rule, but certain platforms (i.e. win32) mix
-# different compilers
-$(OBJDIR)/beam_emu.o: beam/beam_emu.c
- $(EMU_CC) $(subst -O2, $(GEN_OPT_FLGS), $(CFLAGS)) $(INCLUDES) -c $< -o $@
-
$(OBJDIR)/dll_sys.o: sys/$(ERLANG_OSTYPE)/sys.c
$(CC) $(CFLAGS) -DERL_RUN_SHARED_LIB=1 $(INCLUDES) -c $< -o $@
@@ -616,6 +613,11 @@ $(OBJDIR)/beam_emu.o: beam/beam_emu.c
$(CC) $(subst -O2, $(GEN_OPT_FLGS), $(CFLAGS)) \
-OPT:Olimit=0 -WOPT:lpre=off:spre=off:epre=off \
$(INCLUDES) -c $< -o $@
+else
+# Usually the same as the default rule, but certain platforms (e.g. win32) mix
+# different compilers
+$(OBJDIR)/beam_emu.o: beam/beam_emu.c
+ $(EMU_CC) $(subst -O2, $(GEN_OPT_FLGS), $(CFLAGS)) $(INCLUDES) -c $< -o $@
endif
@@ -672,14 +674,8 @@ endif
# rebuilding (is this a good idea?) add a dummy dependency to this target.
#
-ifeq ($(findstring clearmake,$(MAKE)),clearmake)
-BEAMFILE_MAKEFLAG=-T
-else
-BEAMFILE_MAKEFLAG=
-endif
-
$(ERL_TOP)/lib/%.beam:
- cd $(@D)/../src && $(MAKE) $(BEAMFILE_MAKEFLAG) ../ebin/$(@F)
+ cd $(@D)/../src && $(MAKE) ../ebin/$(@F)
# ----------------------------------------------------------------------
@@ -725,7 +721,7 @@ RUN_OBJS = \
$(OBJDIR)/external.o $(OBJDIR)/dist.o \
$(OBJDIR)/binary.o $(OBJDIR)/erl_db.o \
$(OBJDIR)/erl_db_util.o $(OBJDIR)/erl_db_hash.o \
- $(OBJDIR)/erl_db_tree.o $(OBJDIR)/fix_alloc.o \
+ $(OBJDIR)/erl_db_tree.o $(OBJDIR)/erl_thr_progress.o \
$(OBJDIR)/big.o $(OBJDIR)/hash.o \
$(OBJDIR)/index.o $(OBJDIR)/atom.o \
$(OBJDIR)/module.o $(OBJDIR)/export.o \
@@ -742,7 +738,8 @@ RUN_OBJS = \
$(OBJDIR)/erl_bif_re.o $(OBJDIR)/erl_unicode.o \
$(OBJDIR)/packet_parser.o $(OBJDIR)/safe_hash.o \
$(OBJDIR)/erl_zlib.o $(OBJDIR)/erl_nif.o \
- $(OBJDIR)/erl_bif_binary.o $(OBJDIR)/erl_ao_firstfit_alloc.o
+ $(OBJDIR)/erl_bif_binary.o $(OBJDIR)/erl_ao_firstfit_alloc.o \
+ $(OBJDIR)/erl_thr_queue.o $(OBJDIR)/erl_sched_spec_pre_alloc.o
ifeq ($(TARGET),win32)
DRV_OBJS = \
@@ -857,7 +854,7 @@ $(OBJDIR)/%.o: hipe/%.c
$(BINDIR)/hipe_mkliterals$(TF_MARKER): $(OBJDIR)/hipe_mkliterals.o
$(CC) $(CFLAGS) $(INCLUDES) -o $@ $<
-$(OBJDIR)/hipe_mkliterals.o: $(TTF_DIR)/hipe_x86_asm.h $(TTF_DIR)/hipe_ppc_asm.h
+$(OBJDIR)/hipe_mkliterals.o: $(TTF_DIR)/hipe_x86_asm.h $(TTF_DIR)/hipe_ppc_asm.h $(TTF_DIR)/beam_opcodes.h
$(TTF_DIR)/hipe_literals.h: $(BINDIR)/hipe_mkliterals$(TF_MARKER)
$(BINDIR)/hipe_mkliterals$(TF_MARKER) -c > $@
diff --git a/erts/emulator/beam/atom.names b/erts/emulator/beam/atom.names
index 68d64fb7b0..71454b3e57 100644
--- a/erts/emulator/beam/atom.names
+++ b/erts/emulator/beam/atom.names
@@ -69,6 +69,8 @@ atom ac
atom active
atom all
atom all_but_first
+atom alloc_info
+atom alloc_sizes
atom allocated
atom allocated_areas
atom allocator
@@ -156,6 +158,8 @@ atom cr
atom crlf
atom creation
atom current_function
+atom current_location
+atom current_stacktrace
atom data
atom debug_flags
atom delay_trap
@@ -553,5 +557,6 @@ atom warning_msg
atom wordsize
atom write_concurrency
atom xor
+atom x86
atom yes
atom yield
diff --git a/erts/emulator/beam/beam_bif_load.c b/erts/emulator/beam/beam_bif_load.c
index d76a7d8e9f..bc8c001454 100644
--- a/erts/emulator/beam/beam_bif_load.c
+++ b/erts/emulator/beam/beam_bif_load.c
@@ -33,12 +33,14 @@
#include "beam_catches.h"
#include "erl_binary.h"
#include "erl_nif.h"
+#include "erl_thr_progress.h"
static void set_default_trace_pattern(Eterm module);
static Eterm check_process_code(Process* rp, Module* modp);
static void delete_code(Process *c_p, ErtsProcLocks c_p_locks, Module* modp);
static void delete_export_references(Eterm module);
static int purge_module(int module);
+static void decrement_refc(BeamInstr* code);
static int is_native(BeamInstr* code);
static int any_heap_ref_ptrs(Eterm* start, Eterm* end, char* mod_start, Uint mod_size);
static int any_heap_refs(Eterm* start, Eterm* end, char* mod_start, Uint mod_size);
@@ -49,11 +51,11 @@ load_module_2(BIF_ALIST_2)
{
Eterm reason;
Eterm* hp;
- int i;
int sz;
byte* code;
Eterm res;
byte* temp_alloc = NULL;
+ struct LoaderState* stp;
if (is_not_atom(BIF_ARG_1)) {
error:
@@ -63,49 +65,37 @@ load_module_2(BIF_ALIST_2)
if ((code = erts_get_aligned_binary_bytes(BIF_ARG_2, &temp_alloc)) == NULL) {
goto error;
}
- erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
- erts_smp_block_system(0);
-
- erts_export_consolidate();
-
hp = HAlloc(BIF_P, 3);
+
+ /*
+ * Read the BEAM file and prepare the module for loading.
+ */
+ stp = erts_alloc_loader_state();
sz = binary_size(BIF_ARG_2);
- if ((i = erts_load_module(BIF_P, 0,
- BIF_P->group_leader, &BIF_ARG_1, code, sz)) < 0) {
- switch (i) {
- case -1: reason = am_badfile; break;
- case -2: reason = am_nofile; break;
- case -3: reason = am_not_purged; break;
- case -4:
- reason = am_atom_put("native_code", sizeof("native_code")-1);
- break;
- case -5:
- {
- /*
- * The module contains an on_load function. The loader
- * has loaded the module as usual, except that the
- * export entries does not point into the module, so it
- * is not possible to call any code in the module.
- */
-
- ERTS_DECL_AM(on_load);
- reason = AM_on_load;
- break;
- }
- default: reason = am_badfile; break;
- }
+ reason = erts_prepare_loading(stp, BIF_P, BIF_P->group_leader,
+ &BIF_ARG_1, code, sz);
+ erts_free_aligned_binary_bytes(temp_alloc);
+ if (reason != NIL) {
res = TUPLE2(hp, am_error, reason);
- goto done;
+ BIF_RET(res);
}
- set_default_trace_pattern(BIF_ARG_1);
- res = TUPLE2(hp, am_module, BIF_ARG_1);
+ /*
+ * Stop all other processes and finish the loading of the module.
+ */
+ erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
+ erts_smp_thr_progress_block();
+
+ reason = erts_finish_loading(stp, BIF_P, 0, &BIF_ARG_1);
+ if (reason != NIL) {
+ res = TUPLE2(hp, am_error, reason);
+ } else {
+ set_default_trace_pattern(BIF_ARG_1);
+ res = TUPLE2(hp, am_module, BIF_ARG_1);
+ }
- done:
- erts_free_aligned_binary_bytes(temp_alloc);
- erts_smp_release_system();
+ erts_smp_thr_progress_unblock();
erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
-
BIF_RET(res);
}
@@ -118,12 +108,12 @@ BIF_RETTYPE purge_module_1(BIF_ALIST_1)
}
erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
- erts_smp_block_system(0);
+ erts_smp_thr_progress_block();
erts_export_consolidate();
purge_res = purge_module(atom_val(BIF_ARG_1));
- erts_smp_release_system();
+ erts_smp_thr_progress_unblock();
erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
if (purge_res < 0) {
@@ -152,16 +142,33 @@ BIF_RETTYPE code_make_stub_module_3(BIF_ALIST_3)
Eterm res;
erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
- erts_smp_block_system(0);
+ erts_smp_thr_progress_block();
erts_export_consolidate();
res = erts_make_stub_module(BIF_P, BIF_ARG_1, BIF_ARG_2, BIF_ARG_3);
- erts_smp_release_system();
+ erts_smp_thr_progress_unblock();
erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
return res;
}
+BIF_RETTYPE
+check_old_code_1(BIF_ALIST_1)
+{
+ Module* modp;
+
+ if (is_not_atom(BIF_ARG_1)) {
+ BIF_ERROR(BIF_P, BADARG);
+ }
+ modp = erts_get_module(BIF_ARG_1);
+ if (modp == NULL) { /* Doesn't exist. */
+ BIF_RET(am_false);
+ } else if (modp->old_code == NULL) { /* No old code. */
+ BIF_RET(am_false);
+ }
+ BIF_RET(am_true);
+}
+
Eterm
check_process_code_2(BIF_ALIST_2)
{
@@ -175,6 +182,13 @@ check_process_code_2(BIF_ALIST_2)
Eterm res;
if (internal_pid_index(BIF_ARG_1) >= erts_max_processes)
goto error;
+ modp = erts_get_module(BIF_ARG_2);
+ if (modp == NULL) { /* Doesn't exist. */
+ return am_false;
+ } else if (modp->old_code == NULL) { /* No old code. */
+ return am_false;
+ }
+
#ifdef ERTS_SMP
rp = erts_pid2proc_suspend(BIF_P, ERTS_PROC_LOCK_MAIN,
BIF_ARG_1, ERTS_PROC_LOCK_MAIN);
@@ -188,7 +202,6 @@ check_process_code_2(BIF_ALIST_2)
ERTS_BIF_YIELD2(bif_export[BIF_check_process_code_2], BIF_P,
BIF_ARG_1, BIF_ARG_2);
}
- modp = erts_get_module(BIF_ARG_2);
res = check_process_code(rp, modp);
#ifdef ERTS_SMP
if (BIF_P != rp) {
@@ -216,7 +229,7 @@ BIF_RETTYPE delete_module_1(BIF_ALIST_1)
goto badarg;
erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
- erts_smp_block_system(0);
+ erts_smp_thr_progress_block();
{
Module *modp = erts_get_module(BIF_ARG_1);
@@ -237,7 +250,7 @@ BIF_RETTYPE delete_module_1(BIF_ALIST_1)
}
}
- erts_smp_release_system();
+ erts_smp_thr_progress_unblock();
erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
if (res == am_badarg) {
@@ -329,7 +342,7 @@ BIF_RETTYPE finish_after_on_load_2(BIF_ALIST_2)
}
erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
- erts_smp_block_system(0);
+ erts_smp_thr_progress_block();
if (BIF_ARG_2 == am_true) {
int i;
@@ -368,7 +381,7 @@ BIF_RETTYPE finish_after_on_load_2(BIF_ALIST_2)
modp->catches = BEAM_CATCHES_NIL;
remove_from_address_table(code);
}
- erts_smp_release_system();
+ erts_smp_thr_progress_unblock();
erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
BIF_RET(am_true);
}
@@ -412,11 +425,6 @@ check_process_code(Process* rp, Module* modp)
#endif
#define INSIDE(a) (start <= (a) && (a) < end)
- if (modp == NULL) { /* Doesn't exist. */
- return am_false;
- } else if (modp->old_code == NULL) { /* No old code. */
- return am_false;
- }
/*
* Pick up limits for the module.
@@ -546,6 +554,7 @@ check_process_code(Process* rp, Module* modp)
} else {
Eterm* literals;
Uint lit_size;
+ struct erl_off_heap_header* oh;
/*
* Try to get rid of constants by by garbage collecting.
@@ -559,7 +568,9 @@ check_process_code(Process* rp, Module* modp)
(void) erts_garbage_collect(rp, 0, rp->arg_reg, rp->arity);
literals = (Eterm *) modp->old_code[MI_LITERALS_START];
lit_size = (Eterm *) modp->old_code[MI_LITERALS_END] - literals;
- erts_garbage_collect_literals(rp, literals, lit_size);
+ oh = (struct erl_off_heap_header *)
+ modp->old_code[MI_LITERALS_OFF_HEAP];
+ erts_garbage_collect_literals(rp, literals, lit_size, oh);
}
}
return am_false;
@@ -637,9 +648,6 @@ purge_module(int module)
* Any code to purge?
*/
if (modp->old_code == 0) {
- if (display_loads) {
- erts_printf("No code to purge for %T\n", make_atom(module));
- }
return -1;
}
@@ -660,6 +668,7 @@ purge_module(int module)
end = (BeamInstr *)((char *)code + modp->old_code_length);
erts_cleanup_funs_on_purge(code, end);
beam_catches_delmod(modp->old_catches, code, modp->old_code_length);
+ decrement_refc(code);
erts_free(ERTS_ALC_T_CODE, (void *) code);
modp->old_code = NULL;
modp->old_code_length = 0;
@@ -669,6 +678,23 @@ purge_module(int module)
}
static void
+decrement_refc(BeamInstr* code)
+{
+ struct erl_off_heap_header* oh =
+ (struct erl_off_heap_header *) code[MI_LITERALS_OFF_HEAP];
+
+ while (oh) {
+ Binary* bptr;
+ ASSERT(thing_subtag(oh->thing_word) == REFC_BINARY_SUBTAG);
+ bptr = ((ProcBin*)oh)->val;
+ if (erts_refc_dectest(&bptr->refc, 0) == 0) {
+ erts_bin_free(bptr);
+ }
+ oh = oh->next;
+ }
+}
+
+static void
remove_from_address_table(BeamInstr* code)
{
int i;
@@ -710,10 +736,10 @@ delete_code(Process *c_p, ErtsProcLocks c_p_locks, Module* modp)
if (modp->code != NULL && modp->code[MI_NUM_BREAKPOINTS] > 0) {
if (c_p && c_p_locks)
erts_smp_proc_unlock(c_p, ERTS_PROC_LOCK_MAIN);
- erts_smp_block_system(0);
+ erts_smp_thr_progress_block();
erts_clear_module_break(modp);
modp->code[MI_NUM_BREAKPOINTS] = 0;
- erts_smp_release_system();
+ erts_smp_thr_progress_unblock();
if (c_p && c_p_locks)
erts_smp_proc_lock(c_p, ERTS_PROC_LOCK_MAIN);
}
@@ -755,7 +781,7 @@ delete_export_references(Eterm module)
}
-int
+Eterm
beam_make_current_old(Process *c_p, ErtsProcLocks c_p_locks, Eterm module)
{
Module* modp = erts_put_module(module);
@@ -766,15 +792,12 @@ beam_make_current_old(Process *c_p, ErtsProcLocks c_p_locks, Eterm module)
*/
if (modp->code != NULL && modp->old_code != NULL) {
- return -3;
+ return am_not_purged;
} else if (modp->old_code == NULL) { /* Make the current version old. */
- if (display_loads) {
- erts_printf("saving old code\n");
- }
delete_code(c_p, c_p_locks, modp);
delete_export_references(module);
}
- return 0;
+ return NIL;
}
static int
diff --git a/erts/emulator/beam/beam_bp.c b/erts/emulator/beam/beam_bp.c
index 773baad01f..dd31376a2d 100644
--- a/erts/emulator/beam/beam_bp.c
+++ b/erts/emulator/beam/beam_bp.c
@@ -167,7 +167,7 @@ erts_bp_init(void) {
int
erts_set_trace_break(Eterm mfa[3], int specified, Binary *match_spec,
Eterm tracer_pid) {
- ERTS_SMP_LC_ASSERT(erts_smp_is_system_blocked(0));
+ ERTS_SMP_LC_ASSERT(erts_smp_thr_progress_is_blocking());
return set_break(mfa, specified, match_spec,
(BeamInstr) BeamOp(op_i_trace_breakpoint), 0, tracer_pid);
}
@@ -175,7 +175,7 @@ erts_set_trace_break(Eterm mfa[3], int specified, Binary *match_spec,
int
erts_set_mtrace_break(Eterm mfa[3], int specified, Binary *match_spec,
Eterm tracer_pid) {
- ERTS_SMP_LC_ASSERT(erts_smp_is_system_blocked(0));
+ ERTS_SMP_LC_ASSERT(erts_smp_thr_progress_is_blocking());
return set_break(mfa, specified, match_spec,
(BeamInstr) BeamOp(op_i_mtrace_breakpoint), 0, tracer_pid);
}
@@ -184,7 +184,7 @@ erts_set_mtrace_break(Eterm mfa[3], int specified, Binary *match_spec,
void
erts_set_mtrace_bif(BeamInstr *pc, Binary *match_spec, Eterm tracer_pid) {
- ERTS_SMP_LC_ASSERT(erts_smp_is_system_blocked(0));
+ ERTS_SMP_LC_ASSERT(erts_smp_thr_progress_is_blocking());
set_function_break(NULL, pc, BREAK_IS_BIF, match_spec, (BeamInstr) BeamOp(op_i_mtrace_breakpoint), 0, tracer_pid);
}
@@ -198,35 +198,35 @@ void erts_clear_time_trace_bif(BeamInstr *pc) {
int
erts_set_debug_break(Eterm mfa[3], int specified) {
- ERTS_SMP_LC_ASSERT(erts_smp_is_system_blocked(0));
+ ERTS_SMP_LC_ASSERT(erts_smp_thr_progress_is_blocking());
return set_break(mfa, specified, NULL,
(BeamInstr) BeamOp(op_i_debug_breakpoint), 0, NIL);
}
int
erts_set_count_break(Eterm mfa[3], int specified, enum erts_break_op count_op) {
- ERTS_SMP_LC_ASSERT(erts_smp_is_system_blocked(0));
+ ERTS_SMP_LC_ASSERT(erts_smp_thr_progress_is_blocking());
return set_break(mfa, specified, NULL,
(BeamInstr) BeamOp(op_i_count_breakpoint), count_op, NIL);
}
int
erts_set_time_break(Eterm mfa[3], int specified, enum erts_break_op count_op) {
- ERTS_SMP_LC_ASSERT(erts_smp_is_system_blocked(0));
+ ERTS_SMP_LC_ASSERT(erts_smp_thr_progress_is_blocking());
return set_break(mfa, specified, NULL,
(BeamInstr) BeamOp(op_i_time_breakpoint), count_op, NIL);
}
int
erts_clear_trace_break(Eterm mfa[3], int specified) {
- ERTS_SMP_LC_ASSERT(erts_smp_is_system_blocked(0));
+ ERTS_SMP_LC_ASSERT(erts_smp_thr_progress_is_blocking());
return clear_break(mfa, specified,
(BeamInstr) BeamOp(op_i_trace_breakpoint));
}
int
erts_clear_mtrace_break(Eterm mfa[3], int specified) {
- ERTS_SMP_LC_ASSERT(erts_smp_is_system_blocked(0));
+ ERTS_SMP_LC_ASSERT(erts_smp_thr_progress_is_blocking());
return clear_break(mfa, specified,
(BeamInstr) BeamOp(op_i_mtrace_breakpoint));
}
@@ -238,41 +238,41 @@ erts_clear_mtrace_bif(BeamInstr *pc) {
int
erts_clear_debug_break(Eterm mfa[3], int specified) {
- ERTS_SMP_LC_ASSERT(erts_smp_is_system_blocked(0));
+ ERTS_SMP_LC_ASSERT(erts_smp_thr_progress_is_blocking());
return clear_break(mfa, specified,
(BeamInstr) BeamOp(op_i_debug_breakpoint));
}
int
erts_clear_count_break(Eterm mfa[3], int specified) {
- ERTS_SMP_LC_ASSERT(erts_smp_is_system_blocked(0));
+ ERTS_SMP_LC_ASSERT(erts_smp_thr_progress_is_blocking());
return clear_break(mfa, specified,
(BeamInstr) BeamOp(op_i_count_breakpoint));
}
int
erts_clear_time_break(Eterm mfa[3], int specified) {
- ERTS_SMP_LC_ASSERT(erts_smp_is_system_blocked(0));
+ ERTS_SMP_LC_ASSERT(erts_smp_thr_progress_is_blocking());
return clear_break(mfa, specified,
(BeamInstr) BeamOp(op_i_time_breakpoint));
}
int
erts_clear_break(Eterm mfa[3], int specified) {
- ERTS_SMP_LC_ASSERT(erts_smp_is_system_blocked(0));
+ ERTS_SMP_LC_ASSERT(erts_smp_thr_progress_is_blocking());
return clear_break(mfa, specified, 0);
}
int
erts_clear_module_break(Module *modp) {
- ERTS_SMP_LC_ASSERT(erts_smp_is_system_blocked(0));
+ ERTS_SMP_LC_ASSERT(erts_smp_thr_progress_is_blocking());
ASSERT(modp);
return clear_module_break(modp, NULL, 0, 0);
}
int
erts_clear_function_break(Module *modp, BeamInstr *pc) {
- ERTS_SMP_LC_ASSERT(erts_smp_is_system_blocked(0));
+ ERTS_SMP_LC_ASSERT(erts_smp_thr_progress_is_blocking());
ASSERT(modp);
return clear_function_break(modp, pc, BREAK_IS_ERL, 0);
}
@@ -612,9 +612,13 @@ static void bp_hash_delete(bp_time_hash_t *hash) {
static void bp_time_diff(bp_data_time_item_t *item, /* out */
process_breakpoint_time_t *pbt, /* in */
Uint ms, Uint s, Uint us) {
- int dms,ds,dus;
+ int ds,dus;
+#ifdef DEBUG
+ int dms;
+
dms = ms - pbt->ms;
+#endif
ds = s - pbt->s;
dus = us - pbt->us;
@@ -622,7 +626,9 @@ static void bp_time_diff(bp_data_time_item_t *item, /* out */
* this is ok.
*/
+#ifdef DEBUG
ASSERT(dms >= 0 || ds >= 0 || dus >= 0);
+#endif
if (dus < 0) {
dus += 1000000;
@@ -975,7 +981,7 @@ static int set_function_break(Module *modp, BeamInstr *pc, int bif,
BpDataTime *bdt = (BpDataTime *) bd;
Uint i = 0;
- ERTS_SMP_LC_ASSERT(erts_smp_is_system_blocked(0));
+ ERTS_SMP_LC_ASSERT(erts_smp_thr_progress_is_blocking());
if (count_op == erts_break_stop) {
bdt->pause = 1;
diff --git a/erts/emulator/beam/beam_catches.c b/erts/emulator/beam/beam_catches.c
index e795b4efbd..a550ec5ad0 100644
--- a/erts/emulator/beam/beam_catches.c
+++ b/erts/emulator/beam/beam_catches.c
@@ -22,21 +22,27 @@
#endif
#include "sys.h"
#include "beam_catches.h"
+#include "global.h"
-/* XXX: should use dynamic reallocation */
-#define TABSIZ (16*1024)
-static struct {
+/* R14B04 has about 380 catches when starting erlang */
+#define DEFAULT_TABSIZE (1024)
+typedef struct {
BeamInstr *cp;
unsigned cdr;
-} beam_catches[TABSIZ];
+} beam_catch_t;
static int free_list;
static unsigned high_mark;
+static unsigned tabsize;
+static beam_catch_t *beam_catches;
void beam_catches_init(void)
{
+ tabsize = DEFAULT_TABSIZE;
free_list = -1;
high_mark = 0;
+
+ beam_catches = erts_alloc(ERTS_ALC_T_CODE, sizeof(beam_catch_t)*DEFAULT_TABSIZE);
}
unsigned beam_catches_cons(BeamInstr *cp, unsigned cdr)
@@ -50,16 +56,21 @@ unsigned beam_catches_cons(BeamInstr *cp, unsigned cdr)
* This avoids the need to initialise the free list in
* beam_catches_init(), which would cost O(TABSIZ) time.
*/
- if( (i = free_list) >= 0 ) {
+ if( free_list >= 0 ) {
+ i = free_list;
free_list = beam_catches[i].cdr;
- } else if( (i = high_mark) < TABSIZ ) {
- high_mark = i + 1;
+ } else if( high_mark < tabsize ) {
+ i = high_mark;
+ high_mark++;
} else {
- fprintf(stderr, "beam_catches_cons: no free slots :-(\r\n");
- exit(1);
+ /* No free slots and table is full: realloc table */
+ tabsize = 2*tabsize;
+ beam_catches = erts_realloc(ERTS_ALC_T_CODE, beam_catches, sizeof(beam_catch_t)*tabsize);
+ i = high_mark;
+ high_mark++;
}
- beam_catches[i].cp = cp;
+ beam_catches[i].cp = cp;
beam_catches[i].cdr = cdr;
return i;
@@ -67,10 +78,8 @@ unsigned beam_catches_cons(BeamInstr *cp, unsigned cdr)
BeamInstr *beam_catches_car(unsigned i)
{
- if( i >= TABSIZ ) {
- fprintf(stderr,
- "beam_catches_car: index %#x is out of range\r\n", i);
- abort();
+ if( i >= tabsize ) {
+ erl_exit(1, "beam_catches_delmod: index %#x is out of range\r\n", i);
}
return beam_catches[i].cp;
}
@@ -80,18 +89,15 @@ void beam_catches_delmod(unsigned head, BeamInstr *code, unsigned code_bytes)
unsigned i, cdr;
for(i = head; i != (unsigned)-1;) {
- if( i >= TABSIZ ) {
- fprintf(stderr,
- "beam_catches_delmod: index %#x is out of range\r\n", i);
- abort();
+ if( i >= tabsize ) {
+ erl_exit(1, "beam_catches_delmod: index %#x is out of range\r\n", i);
}
if( (char*)beam_catches[i].cp - (char*)code >= code_bytes ) {
- fprintf(stderr,
+ erl_exit(1,
"beam_catches_delmod: item %#x has cp %#lx which is not "
"in module's range [%#lx,%#lx[\r\n",
i, (long)beam_catches[i].cp,
(long)code, (long)((char*)code + code_bytes));
- abort();
}
beam_catches[i].cp = 0;
cdr = beam_catches[i].cdr;
diff --git a/erts/emulator/beam/beam_debug.c b/erts/emulator/beam/beam_debug.c
index fffb172c68..8041c92162 100644
--- a/erts/emulator/beam/beam_debug.c
+++ b/erts/emulator/beam/beam_debug.c
@@ -37,6 +37,7 @@
#include "beam_load.h"
#include "beam_bp.h"
#include "erl_binary.h"
+#include "erl_thr_progress.h"
#ifdef ARCH_64
# define HEXF "%016bpX"
@@ -49,15 +50,18 @@ void dbg_bt(Process* p, Eterm* sp);
void dbg_where(BeamInstr* addr, Eterm x0, Eterm* reg);
static int print_op(int to, void *to_arg, int op, int size, BeamInstr* addr);
-Eterm
-erts_debug_same_2(Process* p, Eterm term1, Eterm term2)
+
+BIF_RETTYPE
+erts_debug_same_2(BIF_ALIST_2)
{
- return (term1 == term2) ? am_true : am_false;
+ return (BIF_ARG_1 == BIF_ARG_2) ? am_true : am_false;
}
-Eterm
-erts_debug_flat_size_1(Process* p, Eterm term)
+BIF_RETTYPE
+erts_debug_flat_size_1(BIF_ALIST_1)
{
+ Process* p = BIF_P;
+ Eterm term = BIF_ARG_1;
Uint size = size_object(term);
if (IS_USMALL(0, size)) {
@@ -68,9 +72,13 @@ erts_debug_flat_size_1(Process* p, Eterm term)
}
}
-Eterm
-erts_debug_breakpoint_2(Process* p, Eterm MFA, Eterm bool)
+
+BIF_RETTYPE
+erts_debug_breakpoint_2(BIF_ALIST_2)
{
+ Process* p = BIF_P;
+ Eterm MFA = BIF_ARG_1;
+ Eterm bool = BIF_ARG_2;
Eterm* tp;
Eterm mfa[3];
int i;
@@ -107,7 +115,7 @@ erts_debug_breakpoint_2(Process* p, Eterm MFA, Eterm bool)
}
erts_smp_proc_unlock(p, ERTS_PROC_LOCK_MAIN);
- erts_smp_block_system(0);
+ erts_smp_thr_progress_block();
if (bool == am_true) {
res = make_small(erts_set_debug_break(mfa, specified));
@@ -115,7 +123,7 @@ erts_debug_breakpoint_2(Process* p, Eterm MFA, Eterm bool)
res = make_small(erts_clear_debug_break(mfa, specified));
}
- erts_smp_release_system();
+ erts_smp_thr_progress_unblock();
erts_smp_proc_lock(p, ERTS_PROC_LOCK_MAIN);
return res;
@@ -175,9 +183,11 @@ erts_debug_instructions_0(BIF_ALIST_0)
return res;
}
-Eterm
-erts_debug_disassemble_1(Process* p, Eterm addr)
+BIF_RETTYPE
+erts_debug_disassemble_1(BIF_ALIST_1)
{
+ Process* p = BIF_P;
+ Eterm addr = BIF_ARG_1;
erts_dsprintf_buf_t *dsbufp;
Eterm* hp;
Eterm* tp;
diff --git a/erts/emulator/beam/beam_emu.c b/erts/emulator/beam/beam_emu.c
index fb90a7d4f7..9c5450bd48 100644
--- a/erts/emulator/beam/beam_emu.c
+++ b/erts/emulator/beam/beam_emu.c
@@ -36,6 +36,7 @@
#include "dist.h"
#include "beam_bp.h"
#include "beam_catches.h"
+#include "erl_thr_progress.h"
#ifdef HIPE
#include "hipe_mode_switch.h"
#include "hipe_bif1.h"
@@ -70,7 +71,7 @@ do { \
} \
else \
erts_lc_check_exact(NULL, 0); \
- ERTS_SMP_LC_ASSERT(!ERTS_LC_IS_BLOCKING); \
+ ERTS_SMP_LC_ASSERT(!erts_thr_progress_is_blocking()); \
} while (0)
# define ERTS_SMP_REQ_PROC_MAIN_LOCK(P) \
if ((P)) erts_proc_lc_require_lock((P), ERTS_PROC_LOCK_MAIN)
@@ -303,44 +304,6 @@ extern int count_instructions;
PROCESS_MAIN_CHK_LOCKS((P)); \
ERTS_SMP_UNREQ_PROC_MAIN_LOCK((P))
-#if defined(HYBRID)
-# define POST_BIF_GC_SWAPIN_0(_p, _res) \
- if (((_p)->mbuf) || (MSO(_p).overhead >= BIN_VHEAP_SZ(_p)) ) { \
- _res = erts_gc_after_bif_call((_p), (_res), NULL, 0); \
- } \
- SWAPIN
-
-# define POST_BIF_GC_SWAPIN(_p, _res, _regs, _arity) \
- if (((_p)->mbuf) || (MSO(_p).overhead >= BIN_VHEAP_SZ(_p)) ) { \
- _regs[0] = r(0); \
- _res = erts_gc_after_bif_call((_p), (_res), _regs, (_arity)); \
- r(0) = _regs[0]; \
- } \
- SWAPIN
-#else
-# define POST_BIF_GC_SWAPIN_0(_p, _res) \
- ERTS_SMP_REQ_PROC_MAIN_LOCK((_p)); \
- PROCESS_MAIN_CHK_LOCKS((_p)); \
- ERTS_VERIFY_UNUSED_TEMP_ALLOC((_p)); \
- if (((_p)->mbuf) || (MSO(_p).overhead >= BIN_VHEAP_SZ(_p)) ) { \
- _res = erts_gc_after_bif_call((_p), (_res), NULL, 0); \
- E = (_p)->stop; \
- } \
- HTOP = HEAP_TOP((_p))
-
-# define POST_BIF_GC_SWAPIN(_p, _res, _regs, _arity) \
- ERTS_VERIFY_UNUSED_TEMP_ALLOC((_p)); \
- ERTS_SMP_REQ_PROC_MAIN_LOCK((_p)); \
- PROCESS_MAIN_CHK_LOCKS((_p)); \
- if (((_p)->mbuf) || (MSO(_p).overhead >= BIN_VHEAP_SZ(_p)) ) { \
- _regs[0] = r(0); \
- _res = erts_gc_after_bif_call((_p), (_res), _regs, (_arity)); \
- r(0) = _regs[0]; \
- E = (_p)->stop; \
- } \
- HTOP = HEAP_TOP((_p))
-#endif
-
#define db(N) (N)
#define tb(N) (N)
#define xb(N) (*(Eterm *) (((unsigned char *)reg) + (N)))
@@ -794,11 +757,11 @@ extern int count_instructions;
} \
} while (0)
-#define IsFunction2(F, A, Action) \
- do { \
- if (is_function_2(c_p, F, A) != am_true ) {\
- Action; \
- } \
+#define IsFunction2(F, A, Action) \
+ do { \
+ if (erl_is_function(c_p, F, A) != am_true ) { \
+ Action; \
+ } \
} while (0)
#define IsTupleOfArity(Src, Arity, Fail) \
@@ -1145,26 +1108,11 @@ void process_main(void)
Eterm *tmp_big; /* Temporary buffer for small bignums if !HEAP_ON_C_STACK. */
#endif
-#ifndef ERTS_SMP
-#if !HALFWORD_HEAP
- static Eterm save_reg[ERTS_X_REGS_ALLOCATED];
- /* X registers -- not used directly, but
- * through 'reg', because using it directly
- * needs two instructions on a SPARC,
- * while using it through reg needs only
- * one.
- */
-#endif
/*
- * Floating point registers.
- */
- static FloatDef freg[MAX_REG];
-#else
- /* X regisers and floating point registers are located in
+ * X registers and floating point registers are located in
* scheduler specific data.
*/
register FloatDef *freg;
-#endif
/*
* For keeping the negative old value of 'reds' when call saving is active.
@@ -1201,14 +1149,6 @@ void process_main(void)
init_done = 1;
goto init_emulator;
}
-#ifndef ERTS_SMP
-#if !HALFWORD_HEAP
- reg = save_reg; /* XXX: probably wastes a register on x86 */
-#else
- /* Registers need to be heap allocated (correct memory range) for tracing to work */
- reg = erts_alloc(ERTS_ALC_T_BEAM_REGISTER, ERTS_X_REGS_ALLOCATED * sizeof(Eterm));
-#endif
-#endif
c_p = NULL;
reds_used = 0;
goto do_schedule1;
@@ -1229,10 +1169,8 @@ void process_main(void)
#endif
ERTS_SMP_REQ_PROC_MAIN_LOCK(c_p);
PROCESS_MAIN_CHK_LOCKS(c_p);
-#ifdef ERTS_SMP
- reg = c_p->scheduler_data->save_reg;
- freg = c_p->scheduler_data->freg;
-#endif
+ reg = ERTS_PROC_GET_SCHDATA(c_p)->x_reg_array;
+ freg = ERTS_PROC_GET_SCHDATA(c_p)->f_reg_array;
#if !HEAP_ON_C_STACK
tmp_big = ERTS_PROC_GET_SCHDATA(c_p)->beam_emu_tmp_heap;
#endif
@@ -1566,9 +1504,17 @@ void process_main(void)
PRE_BIF_SWAPOUT(c_p);
c_p->fcalls = FCALLS - 1;
- result = send_2(c_p, r(0), x(1));
+ reg[0] = r(0);
+ result = erl_send(c_p, r(0), x(1));
PreFetch(0, next);
- POST_BIF_GC_SWAPIN(c_p, result, reg, 2);
+ ERTS_SMP_REQ_PROC_MAIN_LOCK(c_p);
+ PROCESS_MAIN_CHK_LOCKS(c_p);
+ if (c_p->mbuf || MSO(c_p).overhead >= BIN_VHEAP_SZ(c_p)) {
+ result = erts_gc_after_bif_call(c_p, result, reg, 2);
+ r(0) = reg[0];
+ E = c_p->stop;
+ }
+ HTOP = HEAP_TOP(c_p);
FCALLS = c_p->fcalls;
if (is_value(result)) {
r(0) = result;
@@ -1576,10 +1522,9 @@ void process_main(void)
NextPF(0, next);
} else if (c_p->freason == TRAP) {
SET_CP(c_p, I+1);
- SET_I(*((BeamInstr **) (BeamInstr) ((c_p)->def_arg_reg + 3)));
+ SET_I(c_p->i);
SWAPIN;
- r(0) = c_p->def_arg_reg[0];
- x(1) = c_p->def_arg_reg[1];
+ r(0) = reg[0];
Dispatch();
}
goto find_func_info;
@@ -2234,16 +2179,16 @@ void process_main(void)
OpCase(bif1_fbsd):
{
- Eterm (*bf)(Process*, Eterm);
- Eterm arg;
+ Eterm (*bf)(Process*, Eterm*);
+ Eterm tmp_reg[1];
Eterm result;
- GetArg1(2, arg);
+ GetArg1(2, tmp_reg[0]);
bf = (BifFunction) Arg(1);
c_p->fcalls = FCALLS;
PROCESS_MAIN_CHK_LOCKS(c_p);
ASSERT(!ERTS_PROC_IS_EXITING(c_p));
- result = (*bf)(c_p, arg);
+ result = (*bf)(c_p, tmp_reg);
ASSERT(!ERTS_PROC_IS_EXITING(c_p) || is_non_value(result));
ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p);
PROCESS_MAIN_CHK_LOCKS(c_p);
@@ -2262,17 +2207,17 @@ void process_main(void)
OpCase(bif1_body_bsd):
{
- Eterm (*bf)(Process*, Eterm);
+ Eterm (*bf)(Process*, Eterm*);
- Eterm arg;
+ Eterm tmp_reg[1];
Eterm result;
- GetArg1(1, arg);
+ GetArg1(1, tmp_reg[0]);
bf = (BifFunction) Arg(0);
c_p->fcalls = FCALLS;
PROCESS_MAIN_CHK_LOCKS(c_p);
ASSERT(!ERTS_PROC_IS_EXITING(c_p));
- result = (*bf)(c_p, arg);
+ result = (*bf)(c_p, tmp_reg);
ASSERT(!ERTS_PROC_IS_EXITING(c_p) || is_non_value(result));
ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p);
PROCESS_MAIN_CHK_LOCKS(c_p);
@@ -2281,7 +2226,7 @@ void process_main(void)
if (is_value(result)) {
StoreBifResult(2, result);
}
- reg[0] = arg;
+ reg[0] = tmp_reg[0];
SWAPOUT;
I = handle_error(c_p, I, reg, bf);
goto post_error_handling;
@@ -2405,14 +2350,15 @@ void process_main(void)
*/
OpCase(i_bif2_fbd):
{
- Eterm (*bf)(Process*, Eterm, Eterm);
+ Eterm tmp_reg[2] = {tmp_arg1, tmp_arg2};
+ Eterm (*bf)(Process*, Eterm*);
Eterm result;
bf = (BifFunction) Arg(1);
c_p->fcalls = FCALLS;
PROCESS_MAIN_CHK_LOCKS(c_p);
ASSERT(!ERTS_PROC_IS_EXITING(c_p));
- result = (*bf)(c_p, tmp_arg1, tmp_arg2);
+ result = (*bf)(c_p, tmp_reg);
ASSERT(!ERTS_PROC_IS_EXITING(c_p) || is_non_value(result));
ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p);
PROCESS_MAIN_CHK_LOCKS(c_p);
@@ -2430,13 +2376,14 @@ void process_main(void)
*/
OpCase(i_bif2_body_bd):
{
- Eterm (*bf)(Process*, Eterm, Eterm);
+ Eterm tmp_reg[2] = {tmp_arg1, tmp_arg2};
+ Eterm (*bf)(Process*, Eterm*);
Eterm result;
bf = (BifFunction) Arg(0);
PROCESS_MAIN_CHK_LOCKS(c_p);
ASSERT(!ERTS_PROC_IS_EXITING(c_p));
- result = (*bf)(c_p, tmp_arg1, tmp_arg2);
+ result = (*bf)(c_p, tmp_reg);
ASSERT(!ERTS_PROC_IS_EXITING(c_p) || is_non_value(result));
ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p);
PROCESS_MAIN_CHK_LOCKS(c_p);
@@ -2456,77 +2403,9 @@ void process_main(void)
* The most general BIF call. The BIF may build any amount of data
* on the heap. The result is always returned in r(0).
*/
- OpCase(call_bif0_e):
- {
- Eterm (*bf)(Process*, BeamInstr*) = GET_BIF_ADDRESS(Arg(0));
-
- PRE_BIF_SWAPOUT(c_p);
- c_p->fcalls = FCALLS - 1;
- if (FCALLS <= 0) {
- save_calls(c_p, (Export *) Arg(0));
- }
-
- ASSERT(!ERTS_PROC_IS_EXITING(c_p));
- r(0) = (*bf)(c_p, I);
- ASSERT(!ERTS_PROC_IS_EXITING(c_p) || is_non_value(r(0)));
- ERTS_HOLE_CHECK(c_p);
- POST_BIF_GC_SWAPIN_0(c_p, r(0));
- FCALLS = c_p->fcalls;
- if (is_value(r(0))) {
- CHECK_TERM(r(0));
- Next(1);
- }
- else if (c_p->freason == TRAP) {
- goto call_bif_trap3;
- }
-
- /*
- * Error handling. SWAPOUT is not needed because it was done above.
- */
- ASSERT(c_p->stop == E);
- reg[0] = r(0);
- I = handle_error(c_p, I, reg, bf);
- goto post_error_handling;
- }
-
- OpCase(call_bif1_e):
- {
- Eterm (*bf)(Process*, Eterm, BeamInstr*) = GET_BIF_ADDRESS(Arg(0));
- Eterm result;
- BeamInstr *next;
-
- c_p->fcalls = FCALLS - 1;
- if (FCALLS <= 0) {
- save_calls(c_p, (Export *) Arg(0));
- }
- PreFetch(1, next);
- PRE_BIF_SWAPOUT(c_p);
- ASSERT(!ERTS_PROC_IS_EXITING(c_p));
- result = (*bf)(c_p, r(0), I);
- ASSERT(!ERTS_PROC_IS_EXITING(c_p) || is_non_value(result));
- ERTS_HOLE_CHECK(c_p);
- POST_BIF_GC_SWAPIN(c_p, result, reg, 1);
- FCALLS = c_p->fcalls;
- if (is_value(result)) {
- r(0) = result;
- CHECK_TERM(r(0));
- NextPF(1, next);
- } else if (c_p->freason == TRAP) {
- goto call_bif_trap3;
- }
-
- /*
- * Error handling. SWAPOUT is not needed because it was done above.
- */
- ASSERT(c_p->stop == E);
- reg[0] = r(0);
- I = handle_error(c_p, I, reg, bf);
- goto post_error_handling;
- }
-
- OpCase(call_bif2_e):
+ OpCase(call_bif_e):
{
- Eterm (*bf)(Process*, Eterm, Eterm, BeamInstr*) = GET_BIF_ADDRESS(Arg(0));
+ Eterm (*bf)(Process*, Eterm*, BeamInstr*) = GET_BIF_ADDRESS(Arg(0));
Eterm result;
BeamInstr *next;
@@ -2536,61 +2415,29 @@ void process_main(void)
save_calls(c_p, (Export *) Arg(0));
}
PreFetch(1, next);
- CHECK_TERM(r(0));
- CHECK_TERM(x(1));
ASSERT(!ERTS_PROC_IS_EXITING(c_p));
- result = (*bf)(c_p, r(0), x(1), I);
- ASSERT(!ERTS_PROC_IS_EXITING(c_p) || is_non_value(result));
- ERTS_HOLE_CHECK(c_p);
- POST_BIF_GC_SWAPIN(c_p, result, reg, 2);
- FCALLS = c_p->fcalls;
- if (is_value(result)) {
- r(0) = result;
- CHECK_TERM(r(0));
- NextPF(1, next);
- } else if (c_p->freason == TRAP) {
- goto call_bif_trap3;
- }
-
- /*
- * Error handling. SWAPOUT is not needed because it was done above.
- */
- ASSERT(c_p->stop == E);
reg[0] = r(0);
- I = handle_error(c_p, I, reg, bf);
- goto post_error_handling;
- }
-
- OpCase(call_bif3_e):
- {
- Eterm (*bf)(Process*, Eterm, Eterm, Eterm, BeamInstr*) = GET_BIF_ADDRESS(Arg(0));
- Eterm result;
- BeamInstr *next;
-
- PRE_BIF_SWAPOUT(c_p);
- c_p->fcalls = FCALLS - 1;
- if (FCALLS <= 0) {
- save_calls(c_p, (Export *) Arg(0));
- }
- PreFetch(1, next);
- ASSERT(!ERTS_PROC_IS_EXITING(c_p));
- result = (*bf)(c_p, r(0), x(1), x(2), I);
+ result = (*bf)(c_p, reg, I);
ASSERT(!ERTS_PROC_IS_EXITING(c_p) || is_non_value(result));
ERTS_HOLE_CHECK(c_p);
- POST_BIF_GC_SWAPIN(c_p, result, reg, 3);
+ ERTS_SMP_REQ_PROC_MAIN_LOCK(c_p);
+ PROCESS_MAIN_CHK_LOCKS(c_p);
+ if (c_p->mbuf || MSO(c_p).overhead >= BIN_VHEAP_SZ(c_p)) {
+ Uint arity = ((Export *)Arg(0))->code[2];
+ result = erts_gc_after_bif_call(c_p, result, reg, arity);
+ E = c_p->stop;
+ }
+ HTOP = HEAP_TOP(c_p);
FCALLS = c_p->fcalls;
if (is_value(result)) {
r(0) = result;
CHECK_TERM(r(0));
NextPF(1, next);
} else if (c_p->freason == TRAP) {
- call_bif_trap3:
SET_CP(c_p, I+2);
- SET_I(*((BeamInstr **) (UWord) ((c_p)->def_arg_reg + 3)));
+ SET_I(c_p->i);
SWAPIN;
- r(0) = c_p->def_arg_reg[0];
- x(1) = c_p->def_arg_reg[1];
- x(2) = c_p->def_arg_reg[2];
+ r(0) = reg[0];
Dispatch();
}
@@ -2598,7 +2445,6 @@ void process_main(void)
* Error handling. SWAPOUT is not needed because it was done above.
*/
ASSERT(c_p->stop == E);
- reg[0] = r(0);
I = handle_error(c_p, I, reg, bf);
goto post_error_handling;
}
@@ -3351,64 +3197,23 @@ void process_main(void)
ASSERT(bif_nif_arity <= 3);
ERTS_SMP_UNREQ_PROC_MAIN_LOCK(c_p);
ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p);
- switch (bif_nif_arity) {
- case 3:
- {
- Eterm (*bf)(Process*, Eterm, Eterm, Eterm, BeamInstr*) = vbf;
- ASSERT(!ERTS_PROC_IS_EXITING(c_p));
- nif_bif_result = (*bf)(c_p, r(0), x(1), x(2), I);
- ASSERT(!ERTS_PROC_IS_EXITING(c_p) ||
- is_non_value(nif_bif_result));
- ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p);
- PROCESS_MAIN_CHK_LOCKS(c_p);
- }
- break;
- case 2:
- {
- Eterm (*bf)(Process*, Eterm, Eterm, BeamInstr*) = vbf;
- ASSERT(!ERTS_PROC_IS_EXITING(c_p));
- nif_bif_result = (*bf)(c_p, r(0), x(1), I);
- ASSERT(!ERTS_PROC_IS_EXITING(c_p) ||
- is_non_value(nif_bif_result));
- ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p);
- PROCESS_MAIN_CHK_LOCKS(c_p);
- }
- break;
- case 1:
- {
- Eterm (*bf)(Process*, Eterm, BeamInstr*) = vbf;
- ASSERT(!ERTS_PROC_IS_EXITING(c_p));
- nif_bif_result = (*bf)(c_p, r(0), I);
- ASSERT(!ERTS_PROC_IS_EXITING(c_p) ||
- is_non_value(nif_bif_result));
- ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p);
- PROCESS_MAIN_CHK_LOCKS(c_p);
- }
- break;
- case 0:
- {
- Eterm (*bf)(Process*, BeamInstr*) = vbf;
- ASSERT(!ERTS_PROC_IS_EXITING(c_p));
- nif_bif_result = (*bf)(c_p, I);
- ASSERT(!ERTS_PROC_IS_EXITING(c_p) ||
- is_non_value(nif_bif_result));
- ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p);
- PROCESS_MAIN_CHK_LOCKS(c_p);
- break;
- }
- default:
- erl_exit(1, "apply_bif: invalid arity: %u\n",
- bif_nif_arity);
+ reg[0] = r(0);
+ {
+ Eterm (*bf)(Process*, Eterm*, BeamInstr*) = vbf;
+ ASSERT(!ERTS_PROC_IS_EXITING(c_p));
+ nif_bif_result = (*bf)(c_p, reg, I);
+ ASSERT(!ERTS_PROC_IS_EXITING(c_p) ||
+ is_non_value(nif_bif_result));
+ ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p);
+ PROCESS_MAIN_CHK_LOCKS(c_p);
}
apply_bif_or_nif_epilogue:
ERTS_SMP_REQ_PROC_MAIN_LOCK(c_p);
ERTS_HOLE_CHECK(c_p);
if (c_p->mbuf) {
- reg[0] = r(0);
nif_bif_result = erts_gc_after_bif_call(c_p, nif_bif_result,
reg, bif_nif_arity);
- r(0) = reg[0];
}
SWAPIN; /* There might have been a garbage collection. */
FCALLS = c_p->fcalls;
@@ -3419,17 +3224,14 @@ void process_main(void)
c_p->cp = 0;
Goto(*I);
} else if (c_p->freason == TRAP) {
- SET_I(*((BeamInstr **) (UWord) ((c_p)->def_arg_reg + 3)));
- r(0) = c_p->def_arg_reg[0];
- x(1) = c_p->def_arg_reg[1];
- x(2) = c_p->def_arg_reg[2];
+ SET_I(c_p->i);
+ r(0) = reg[0];
if (c_p->flags & F_HIBERNATE_SCHED) {
c_p->flags &= ~F_HIBERNATE_SCHED;
goto do_schedule;
}
Dispatch();
}
- reg[0] = r(0);
I = handle_error(c_p, c_p->cp, reg, vbf);
goto post_error_handling;
}
@@ -3561,7 +3363,7 @@ void process_main(void)
* Operands: NotUsed Live Dst
*/
do_bs_init_bits_known:
- num_bytes = (num_bits+7) >> 3;
+ num_bytes = ((Uint64)num_bits+(Uint64)7) >> 3;
if (num_bits & 7) {
alloc += ERL_SUB_BIN_SIZE;
}
@@ -3992,8 +3794,7 @@ void process_main(void)
* too big numbers).
*/
if (is_not_small(val) || val > make_small(0x10FFFFUL) ||
- (make_small(0xD800UL) <= val && val <= make_small(0xDFFFUL)) ||
- val == make_small(0xFFFEUL) || val == make_small(0xFFFFUL)) {
+ (make_small(0xD800UL) <= val && val <= make_small(0xDFFFUL))) {
goto badarg;
}
Next(2);
@@ -4012,8 +3813,8 @@ void process_main(void)
* the valid range).
*/
if (is_not_small(tmp_arg1) || tmp_arg1 > make_small(0x10FFFFUL) ||
- (make_small(0xD800UL) <= tmp_arg1 && tmp_arg1 <= make_small(0xDFFFUL)) ||
- tmp_arg1 == make_small(0xFFFEUL) || tmp_arg1 == make_small(0xFFFFUL)) {
+ (make_small(0xD800UL) <= tmp_arg1 &&
+ tmp_arg1 <= make_small(0xDFFFUL))) {
ErlBinMatchBuffer *mb = ms_matchbuffer(tmp_arg2);
mb->offset -= 32;
@@ -4888,92 +4689,6 @@ void process_main(void)
}
/*
- * Instructions for allocating on the message area.
- */
-
- OpCase(i_global_cons):
- {
- BeamInstr *next;
-#ifdef HYBRID
- Eterm *hp;
-
- PreFetch(0,next);
- TestGlobalHeap(2,2,hp);
- hp[0] = r(0);
- hp[1] = x(1);
- r(0) = make_list(hp);
-#ifndef INCREMENTAL
- global_htop += 2;
-#endif
- NextPF(0,next);
-#else
- PreFetch(0,next);
- c_p->freason = EXC_INTERNAL_ERROR;
- goto find_func_info;
-#endif
- }
-
- OpCase(i_global_tuple):
- {
- BeamInstr *next;
- int len;
-#ifdef HYBRID
- Eterm list;
- Eterm *hp;
-#endif
-
- if ((len = list_length(r(0))) < 0) {
- goto badarg;
- }
-
- PreFetch(0,next);
-#ifdef HYBRID
- TestGlobalHeap(len + 1,1,hp);
- list = r(0);
- r(0) = make_tuple(hp);
- *hp++ = make_arityval(len);
- while(is_list(list))
- {
- Eterm* cons = list_val(list);
- *hp++ = CAR(cons);
- list = CDR(cons);
- }
-#ifndef INCREMENTAL
- global_htop += len + 1;
-#endif
- NextPF(0,next);
-#else
- c_p->freason = EXC_INTERNAL_ERROR;
- goto find_func_info;
-#endif
- }
-
- OpCase(i_global_copy):
- {
- BeamInstr *next;
- PreFetch(0,next);
-#ifdef HYBRID
- if (!IS_CONST(r(0)))
- {
- BM_SWAP_TIMER(system,copy);
- SWAPOUT;
- reg[0] = r(0);
- reg[1] = NIL;
- r(0) = copy_struct_lazy(c_p,r(0),0);
- ASSERT(ma_src_top == 0);
- ASSERT(ma_dst_top == 0);
- ASSERT(ma_offset_top == 0);
- SWAPIN;
- BM_SWAP_TIMER(copy,system);
- }
- NextPF(0,next);
-#else
- c_p->freason = EXC_INTERNAL_ERROR;
- goto find_func_info;
-#endif
- }
-
- /*
* New floating point instructions.
*/
@@ -5151,10 +4866,8 @@ void process_main(void)
c_p->def_arg_reg[4] = -neg_o_reds;
reg[0] = r(0);
c_p = hipe_mode_switch(c_p, cmd, reg);
-#ifdef ERTS_SMP
- reg = c_p->scheduler_data->save_reg;
- freg = c_p->scheduler_data->freg;
-#endif
+ reg = ERTS_PROC_GET_SCHDATA(c_p)->x_reg_array;
+ freg = ERTS_PROC_GET_SCHDATA(c_p)->f_reg_array;
ERL_BITS_RELOAD_STATEP(c_p);
neg_o_reds = -c_p->def_arg_reg[4];
FCALLS = c_p->fcalls;
@@ -5268,8 +4981,8 @@ void process_main(void)
OpCase(int_code_end):
OpCase(label_L):
- OpCase(too_old_compiler):
OpCase(on_load):
+ OpCase(line_I):
erl_exit(1, "meta op\n");
/*
@@ -5686,6 +5399,25 @@ expand_error_value(Process* c_p, Uint freason, Eterm Value) {
* that c_p->ftrace will point to a cons cell which holds the given args
* and the saved data (encoded as a bignum).
*
+ * There is an issue with line number information. Line number
+ * information is associated with the address *before* an operation
+ * that may fail or be stored stored on the stack. But continuation
+ * pointers point after its call instruction, not before. To avoid
+ * finding the wrong line number, we'll need to adjust them so that
+ * they point at the beginning of the call instruction or inside the
+ * call instruction. Since its impractical to point at the beginning,
+ * we'll do the simplest thing and decrement the continuation pointers
+ * by one.
+ *
+ * Here is an example of what can go wrong. Without the adjustment
+ * of continuation pointers, the call at line 42 below would seem to
+ * be at line 43:
+ *
+ * line 42
+ * call ...
+ * line 43
+ * gc_bif ...
+ *
* (It would be much better to put the arglist - when it exists - in the
* error value instead of in the actual trace; e.g. '{badarg, Args}'
* instead of using 'badarg' with Args in the trace. The arglist may
@@ -5752,7 +5484,7 @@ save_stacktrace(Process* c_p, BeamInstr* pc, Eterm* reg, BifFunction bf,
}
/* Save second stack entry if CP is valid and different from pc */
if (depth > 0 && c_p->cp != 0 && c_p->cp != pc) {
- s->trace[s->depth++] = c_p->cp;
+ s->trace[s->depth++] = c_p->cp - 1;
depth--;
}
s->pc = NULL;
@@ -5772,13 +5504,13 @@ save_stacktrace(Process* c_p, BeamInstr* pc, Eterm* reg, BifFunction bf,
/* Save first stack entry */
ASSERT(c_p->cp);
if (depth > 0) {
- s->trace[s->depth++] = c_p->cp;
+ s->trace[s->depth++] = c_p->cp - 1;
depth--;
}
s->pc = NULL; /* Ignore pc */
} else {
if (depth > 0 && c_p->cp != 0 && c_p->cp != pc) {
- s->trace[s->depth++] = c_p->cp;
+ s->trace[s->depth++] = c_p->cp - 1;
depth--;
}
s->pc = pc;
@@ -5793,24 +5525,31 @@ save_stacktrace(Process* c_p, BeamInstr* pc, Eterm* reg, BifFunction bf,
}
/* Save the actual stack trace */
+ erts_save_stacktrace(c_p, s, depth);
+}
+
+void
+erts_save_stacktrace(Process* p, struct StackTrace* s, int depth)
+{
if (depth > 0) {
Eterm *ptr;
BeamInstr *prev = s->depth ? s->trace[s->depth-1] : NULL;
BeamInstr i_return_trace = beam_return_trace[0];
BeamInstr i_return_to_trace = beam_return_to_trace[0];
+
/*
* Traverse the stack backwards and add all unique continuation
* pointers to the buffer, up to the maximum stack trace size.
*
* Skip trace stack frames.
*/
- ptr = c_p->stop;
- if (ptr < STACK_START(c_p)
- && (is_not_CP(*ptr)|| (*cp_val(*ptr) != i_return_trace &&
- *cp_val(*ptr) != i_return_to_trace))
- && c_p->cp) {
- /* Can not follow cp here - code may be unloaded */
- BeamInstr *cpp = c_p->cp;
+ ptr = p->stop;
+ if (ptr < STACK_START(p) &&
+ (is_not_CP(*ptr)|| (*cp_val(*ptr) != i_return_trace &&
+ *cp_val(*ptr) != i_return_to_trace)) &&
+ p->cp) {
+ /* Cannot follow cp here - code may be unloaded */
+ BeamInstr *cpp = p->cp;
if (cpp == beam_exception_trace || cpp == beam_return_trace) {
/* Skip return_trace parameters */
ptr += 2;
@@ -5819,7 +5558,7 @@ save_stacktrace(Process* c_p, BeamInstr* pc, Eterm* reg, BifFunction bf,
ptr += 1;
}
}
- while (ptr < STACK_START(c_p) && depth > 0) {
+ while (ptr < STACK_START(p) && depth > 0) {
if (is_CP(*ptr)) {
if (*cp_val(*ptr) == i_return_trace) {
/* Skip stack frame variables */
@@ -5834,7 +5573,7 @@ save_stacktrace(Process* c_p, BeamInstr* pc, Eterm* reg, BifFunction bf,
if (cp != prev) {
/* Record non-duplicates only */
prev = cp;
- s->trace[s->depth++] = cp;
+ s->trace[s->depth++] = cp - 1;
depth--;
}
ptr++;
@@ -5902,9 +5641,14 @@ build_stacktrace(Process* c_p, Eterm exc) {
struct StackTrace* s;
Eterm args;
int depth;
- BeamInstr* current;
- Eterm Where = NIL;
- Eterm *next_p = &Where;
+ FunctionInfo fi;
+ FunctionInfo* stk;
+ FunctionInfo* stkp;
+ Eterm res = NIL;
+ Uint heap_size;
+ Eterm* hp;
+ Eterm mfa;
+ int i;
if (! (s = get_trace_from_exc(exc))) {
return NIL;
@@ -5923,64 +5667,56 @@ build_stacktrace(Process* c_p, Eterm exc) {
* saved s->current should already contain the proper value.
*/
if (s->pc != NULL) {
- current = find_function_from_pc(s->pc);
+ erts_lookup_function_info(&fi, s->pc, 1);
+ } else if (GET_EXC_INDEX(s->freason) ==
+ GET_EXC_INDEX(EXC_FUNCTION_CLAUSE)) {
+ erts_lookup_function_info(&fi, s->current, 1);
} else {
- current = s->current;
+ erts_set_current_function(&fi, s->current);
}
+
/*
- * If current is still NULL, default to the initial function
+ * If fi.current is still NULL, default to the initial function
* (e.g. spawn_link(erlang, abs, [1])).
*/
- if (current == NULL) {
- current = c_p->initial;
+ if (fi.current == NULL) {
+ erts_set_current_function(&fi, c_p->initial);
args = am_true; /* Just in case */
} else {
args = get_args_from_exc(exc);
}
- depth = s->depth;
-
/*
- * Add the {M,F,A} for the current function
- * (where A is arity or [Argument]).
+ * Look up all saved continuation pointers and calculate
+ * needed heap space.
*/
- {
- int i;
- Eterm mfa;
- Uint heap_size = 6*(depth+1);
- Eterm* hp = HAlloc(c_p, heap_size);
- Eterm* hp_end = hp + heap_size;
-
- if (args != am_true) {
- /* We have an arglist - use it */
- mfa = TUPLE3(hp, current[0], current[1], args);
- } else {
- Eterm arity = make_small(current[2]);
- mfa = TUPLE3(hp, current[0], current[1], arity);
+ depth = s->depth;
+ stk = stkp = (FunctionInfo *) erts_alloc(ERTS_ALC_T_TMP,
+ depth*sizeof(FunctionInfo));
+ heap_size = fi.needed + 2;
+ for (i = 0; i < depth; i++) {
+ erts_lookup_function_info(stkp, s->trace[i], 1);
+ if (stkp->current) {
+ heap_size += stkp->needed + 2;
+ stkp++;
}
- hp += 4;
- ASSERT(*next_p == NIL);
- *next_p = CONS(hp, mfa, NIL);
- next_p = &CDR(list_val(*next_p));
- hp += 2;
+ }
- /*
- * Finally, we go through the saved continuation pointers.
- */
- for (i = 0; i < depth; i++) {
- BeamInstr *fi = find_function_from_pc((BeamInstr *) s->trace[i]);
- if (fi == NULL) continue;
- mfa = TUPLE3(hp, fi[0], fi[1], make_small(fi[2]));
- hp += 4;
- ASSERT(*next_p == NIL);
- *next_p = CONS(hp, mfa, NIL);
- next_p = &CDR(list_val(*next_p));
- hp += 2;
- }
- ASSERT(hp <= hp_end);
- HRelease(c_p, hp_end, hp);
+ /*
+ * Allocate heap space and build the stacktrace.
+ */
+ hp = HAlloc(c_p, heap_size);
+ while (stkp > stk) {
+ stkp--;
+ hp = erts_build_mfa_item(stkp, hp, am_true, &mfa);
+ res = CONS(hp, mfa, res);
+ hp += 2;
}
- return Where;
+ hp = erts_build_mfa_item(&fi, hp, args, &mfa);
+ res = CONS(hp, mfa, res);
+
+ erts_free(ERTS_ALC_T_TMP, (void *) stk);
+ return res;
}
diff --git a/erts/emulator/beam/beam_load.c b/erts/emulator/beam/beam_load.c
index 57fe25453d..e43d364add 100644
--- a/erts/emulator/beam/beam_load.c
+++ b/erts/emulator/beam/beam_load.c
@@ -158,6 +158,7 @@ typedef struct {
#define LITERAL_CHUNK 6
#define ATTR_CHUNK 7
#define COMPILE_CHUNK 8
+#define LINE_CHUNK 9
#define NUM_CHUNK_TYPES (sizeof(chunk_types)/sizeof(chunk_types[0]))
@@ -182,6 +183,7 @@ static Uint chunk_types[] = {
MakeIffId('L', 'i', 't', 'T'), /* 6 */
MakeIffId('A', 't', 't', 'r'), /* 7 */
MakeIffId('C', 'I', 'n', 'f'), /* 8 */
+ MakeIffId('L', 'i', 'n', 'e'), /* 9 */
};
/*
@@ -204,6 +206,7 @@ typedef struct {
Eterm term; /* The tagged term (in the heap). */
Uint heap_size; /* (Exact) size on the heap. */
Uint offset; /* Offset from temporary location to final. */
+ ErlOffHeap off_heap; /* Start of linked list of ProcBins. */
Eterm* heap; /* Heap for term. */
} Literal;
@@ -231,10 +234,19 @@ struct string_patch {
};
/*
+ * This structure associates a code offset with a source code location.
+ */
+
+typedef struct {
+ int pos; /* Position in code */
+ Uint32 loc; /* Location in source code */
+} LineInstr;
+
+/*
* This structure contains all information about the module being loaded.
*/
-typedef struct {
+typedef struct LoaderState {
/*
* The current logical file within the binary.
*/
@@ -276,7 +288,6 @@ typedef struct {
BeamInstr* code; /* Loaded code. */
int ci; /* Current index into loaded code. */
Label* labels;
- BeamInstr new_bs_put_strings; /* Linked list of i_new_bs_put_string instructions. */
StringPatch* string_patches; /* Linked list of position into string table to patch. */
BeamInstr catches; /* Linked list of catch_yf instructions. */
unsigned loaded_size; /* Final size of code when loaded. */
@@ -325,27 +336,58 @@ typedef struct {
Literal* literals; /* Array of literals. */
LiteralPatch* literal_patches; /* Operands that need to be patched. */
Uint total_literal_size; /* Total heap size for all literals. */
+
+ /*
+ * Line table.
+ */
+ BeamInstr* line_item; /* Line items from the BEAM file. */
+ int num_line_items; /* Number of line items. */
+ LineInstr* line_instr; /* Line instructions */
+ int num_line_instrs; /* Maximum number of line instructions */
+ int current_li; /* Current line instruction */
+ int* func_line; /* Mapping from function to first line instr */
+ Eterm* fname; /* List of file names */
+ int num_fnames; /* Number of filenames in fname table */
+ int loc_size; /* Size of location info in bytes (2/4) */
} LoaderState;
-typedef struct {
- unsigned num_functions; /* Number of functions. */
- Eterm* func_tab[1]; /* Pointers to each function. */
-} LoadedCode;
-
-#define GetTagAndValue(Stp, Tag, Val) \
- do { \
- BeamInstr __w; \
- GetByte(Stp, __w); \
- Tag = __w & 0x07; \
- if ((__w & 0x08) == 0) { \
- Val = __w >> 4; \
- } else if ((__w & 0x10) == 0) { \
- Val = ((__w >> 5) << 8); \
- GetByte(Stp, __w); \
- Val |= __w; \
- } else { \
- if (!get_int_val(Stp, __w, &(Val))) goto load_error; \
- } \
+/*
+ * Layout of the line table.
+ */
+
+#define MI_LINE_FNAME_PTR 0
+#define MI_LINE_LOC_TAB 1
+#define MI_LINE_LOC_SIZE 2
+#define MI_LINE_FUNC_TAB 3
+
+#define LINE_INVALID_LOCATION (0)
+
+/*
+ * Macros for manipulating locations.
+ */
+
+#define IS_VALID_LOCATION(File, Line) \
+ ((unsigned) (File) < 255 && (unsigned) (Line) < ((1 << 24) - 1))
+#define MAKE_LOCATION(File, Line) (((File) << 24) | (Line))
+#define LOC_FILE(Loc) ((Loc) >> 24)
+#define LOC_LINE(Loc) ((Loc) & ((1 << 24)-1))
+
+#define GetTagAndValue(Stp, Tag, Val) \
+ do { \
+ BeamInstr __w; \
+ GetByte(Stp, __w); \
+ Tag = __w & 0x07; \
+ if ((__w & 0x08) == 0) { \
+ Val = __w >> 4; \
+ } else if ((__w & 0x10) == 0) { \
+ Val = ((__w >> 5) << 8); \
+ GetByte(Stp, __w); \
+ Val |= __w; \
+ } else { \
+ int __res = get_tag_and_value(Stp, __w, (Tag), &(Val)); \
+ if (__res < 0) goto load_error; \
+ Tag = (unsigned) __res; \
+ } \
} while (0)
@@ -453,12 +495,10 @@ typedef struct {
} while (0)
-static int bin_load(Process *c_p, ErtsProcLocks c_p_locks,
- Eterm group_leader, Eterm* modp, byte* bytes, int unloaded_size);
-static void init_state(LoaderState* stp);
-static int insert_new_code(Process *c_p, ErtsProcLocks c_p_locks,
- Eterm group_leader, Eterm module,
- BeamInstr* code, Uint size, BeamInstr catches);
+static void free_state(LoaderState* stp);
+static Eterm insert_new_code(Process *c_p, ErtsProcLocks c_p_locks,
+ Eterm group_leader, Eterm module,
+ BeamInstr* code, Uint size);
static int scan_iff_file(LoaderState* stp, Uint* chunk_types,
Uint num_types, Uint num_mandatory);
static int load_atom_table(LoaderState* stp);
@@ -466,6 +506,7 @@ static int load_import_table(LoaderState* stp);
static int read_export_table(LoaderState* stp);
static int read_lambda_table(LoaderState* stp);
static int read_literal_table(LoaderState* stp);
+static int read_line_table(LoaderState* stp);
static int read_code_header(LoaderState* stp);
static int load_code(LoaderState* stp);
static GenOp* gen_element(LoaderState* stp, GenOpArg Fail, GenOpArg Index,
@@ -489,8 +530,8 @@ static void load_printf(int line, LoaderState* context, char *fmt, ...);
static int transform_engine(LoaderState* st);
static void id_to_string(Uint id, char* s);
static void new_genop(LoaderState* stp);
-static int get_int_val(LoaderState* stp, Uint len_code, BeamInstr* result);
-static int get_erlang_integer(LoaderState* stp, Uint len_code, BeamInstr* result);
+static int get_tag_and_value(LoaderState* stp, Uint len_code,
+ unsigned tag, BeamInstr* result);
static int new_label(LoaderState* stp);
static void new_literal_patch(LoaderState* stp, int pos);
static void new_string_patch(LoaderState* stp, int pos);
@@ -504,6 +545,8 @@ static Eterm native_addresses(Process* p, Eterm mod);
int patch_funentries(Eterm Patchlist);
int patch(Eterm Addresses, Uint fe);
static int safe_mul(UWord a, UWord b, UWord* resp);
+static void lookup_loc(FunctionInfo* fi, BeamInstr* pc,
+ BeamInstr* modp, int idx);
static int must_swap_floats;
@@ -548,7 +591,7 @@ define_file(LoaderState* stp, char* name, int idx)
stp->file_left = stp->chunks[idx].size;
}
-int
+Eterm
erts_load_module(Process *c_p,
ErtsProcLocks c_p_locks,
Eterm group_leader, /* Group leader or NIL if none. */
@@ -557,29 +600,17 @@ erts_load_module(Process *c_p,
* On return, contains the actual module name.
*/
byte* code, /* Points to the code to load */
- int size) /* Size of code to load. */
+ Uint size) /* Size of code to load. */
{
- ErlDrvBinary* bin;
- int result;
+ LoaderState* stp = erts_alloc_loader_state();
+ Eterm retval;
- if (size >= 4 && code[0] == 'F' && code[1] == 'O' &&
- code[2] == 'R' && code[3] == '1') {
- /*
- * The BEAM module is not compressed.
- */
- result = bin_load(c_p, c_p_locks, group_leader, modp, code, size);
- } else {
- /*
- * The BEAM module is compressed (or possibly invalid/corrupted).
- */
- if ((bin = (ErlDrvBinary *) erts_gzinflate_buffer((char*)code, size)) == NULL) {
- return -1;
- }
- result = bin_load(c_p, c_p_locks, group_leader, modp,
- (byte*)bin->orig_bytes, bin->orig_size);
- driver_free_binary(bin);
+ retval = erts_prepare_loading(stp, c_p, group_leader, modp,
+ code, size);
+ if (retval != NIL) {
+ return retval;
}
- return result;
+ return erts_finish_loading(stp, c_p, c_p_locks, modp);
}
/* #define LOAD_MEMORY_HARD_DEBUG 1*/
@@ -594,16 +625,30 @@ extern void check_allocated_block(Uint type, void *blk);
#define CHKBLK(TYPE,BLK) /* nothing */
#endif
-static int
-bin_load(Process *c_p, ErtsProcLocks c_p_locks,
- Eterm group_leader, Eterm* modp, byte* bytes, int unloaded_size)
+Eterm
+erts_prepare_loading(LoaderState* stp, Process *c_p, Eterm group_leader,
+ Eterm* modp, byte* code, Uint unloaded_size)
{
- LoaderState state;
- int rval = -1;
+ Eterm retval = am_badfile;
+ ErlDrvBinary* bin = NULL;
+
+ stp->module = *modp;
+ stp->group_leader = group_leader;
- init_state(&state);
- state.module = *modp;
- state.group_leader = group_leader;
+ /*
+ * Check if the module is compressed (or possibly invalid/corrupted).
+ */
+ if ( !(unloaded_size >= 4 &&
+ code[0] == 'F' && code[1] == 'O' &&
+ code[2] == 'R' && code[3] == '1') ) {
+ bin = (ErlDrvBinary *)
+ erts_gzinflate_buffer((char*)code, unloaded_size);
+ if (bin == NULL) {
+ goto load_error;
+ }
+ code = (byte*)bin->orig_bytes;
+ unloaded_size = bin->orig_size;
+ }
/*
* Scan the IFF file.
@@ -614,11 +659,11 @@ bin_load(Process *c_p, ErtsProcLocks c_p_locks,
#endif
CHKALLOC();
- CHKBLK(ERTS_ALC_T_CODE,state.code);
- state.file_name = "IFF header for Beam file";
- state.file_p = bytes;
- state.file_left = unloaded_size;
- if (!scan_iff_file(&state, chunk_types, NUM_CHUNK_TYPES, NUM_MANDATORY)) {
+ CHKBLK(ERTS_ALC_T_CODE,stp->code);
+ stp->file_name = "IFF header for Beam file";
+ stp->file_p = code;
+ stp->file_left = unloaded_size;
+ if (!scan_iff_file(stp, chunk_types, NUM_CHUNK_TYPES, NUM_MANDATORY)) {
goto load_error;
}
@@ -626,19 +671,38 @@ bin_load(Process *c_p, ErtsProcLocks c_p_locks,
* Read the header for the code chunk.
*/
- CHKBLK(ERTS_ALC_T_CODE,state.code);
- define_file(&state, "code chunk header", CODE_CHUNK);
- if (!read_code_header(&state)) {
+ CHKBLK(ERTS_ALC_T_CODE,stp->code);
+ define_file(stp, "code chunk header", CODE_CHUNK);
+ if (!read_code_header(stp)) {
goto load_error;
}
/*
+ * Initialize code area.
+ */
+ stp->code_buffer_size = erts_next_heap_size(2048 + stp->num_functions, 0);
+ stp->code = (BeamInstr *) erts_alloc(ERTS_ALC_T_CODE,
+ sizeof(BeamInstr) * stp->code_buffer_size);
+
+ stp->code[MI_NUM_FUNCTIONS] = stp->num_functions;
+ stp->ci = MI_FUNCTIONS + stp->num_functions + 1;
+
+ stp->code[MI_ATTR_PTR] = 0;
+ stp->code[MI_ATTR_SIZE] = 0;
+ stp->code[MI_ATTR_SIZE_ON_HEAP] = 0;
+ stp->code[MI_COMPILE_PTR] = 0;
+ stp->code[MI_COMPILE_SIZE] = 0;
+ stp->code[MI_COMPILE_SIZE_ON_HEAP] = 0;
+ stp->code[MI_NUM_BREAKPOINTS] = 0;
+
+
+ /*
* Read the atom table.
*/
- CHKBLK(ERTS_ALC_T_CODE,state.code);
- define_file(&state, "atom table", ATOM_CHUNK);
- if (!load_atom_table(&state)) {
+ CHKBLK(ERTS_ALC_T_CODE,stp->code);
+ define_file(stp, "atom table", ATOM_CHUNK);
+ if (!load_atom_table(stp)) {
goto load_error;
}
@@ -646,9 +710,9 @@ bin_load(Process *c_p, ErtsProcLocks c_p_locks,
* Read the import table.
*/
- CHKBLK(ERTS_ALC_T_CODE,state.code);
- define_file(&state, "import table", IMP_CHUNK);
- if (!load_import_table(&state)) {
+ CHKBLK(ERTS_ALC_T_CODE,stp->code);
+ define_file(stp, "import table", IMP_CHUNK);
+ if (!load_import_table(stp)) {
goto load_error;
}
@@ -656,10 +720,10 @@ bin_load(Process *c_p, ErtsProcLocks c_p_locks,
* Read the lambda (fun) table.
*/
- CHKBLK(ERTS_ALC_T_CODE,state.code);
- if (state.chunks[LAMBDA_CHUNK].size > 0) {
- define_file(&state, "lambda (fun) table", LAMBDA_CHUNK);
- if (!read_lambda_table(&state)) {
+ CHKBLK(ERTS_ALC_T_CODE,stp->code);
+ if (stp->chunks[LAMBDA_CHUNK].size > 0) {
+ define_file(stp, "lambda (fun) table", LAMBDA_CHUNK);
+ if (!read_lambda_table(stp)) {
goto load_error;
}
}
@@ -668,10 +732,22 @@ bin_load(Process *c_p, ErtsProcLocks c_p_locks,
* Read the literal table.
*/
- CHKBLK(ERTS_ALC_T_CODE,state.code);
- if (state.chunks[LITERAL_CHUNK].size > 0) {
- define_file(&state, "literals table (constant pool)", LITERAL_CHUNK);
- if (!read_literal_table(&state)) {
+ CHKBLK(ERTS_ALC_T_CODE,stp->code);
+ if (stp->chunks[LITERAL_CHUNK].size > 0) {
+ define_file(stp, "literals table (constant pool)", LITERAL_CHUNK);
+ if (!read_literal_table(stp)) {
+ goto load_error;
+ }
+ }
+
+ /*
+ * Read the line table (if present).
+ */
+
+ CHKBLK(ERTS_ALC_T_CODE,stp->code);
+ if (stp->chunks[LINE_CHUNK].size > 0) {
+ define_file(stp, "line table", LINE_CHUNK);
+ if (!read_line_table(stp)) {
goto load_error;
}
}
@@ -680,15 +756,15 @@ bin_load(Process *c_p, ErtsProcLocks c_p_locks,
* Load the code chunk.
*/
- CHKBLK(ERTS_ALC_T_CODE,state.code);
- state.file_name = "code chunk";
- state.file_p = state.code_start;
- state.file_left = state.code_size;
- if (!load_code(&state)) {
+ CHKBLK(ERTS_ALC_T_CODE,stp->code);
+ stp->file_name = "code chunk";
+ stp->file_p = stp->code_start;
+ stp->file_left = stp->code_size;
+ if (!load_code(stp)) {
goto load_error;
}
- CHKBLK(ERTS_ALC_T_CODE,state.code);
- if (!freeze_code(&state)) {
+ CHKBLK(ERTS_ALC_T_CODE,stp->code);
+ if (!freeze_code(stp)) {
goto load_error;
}
@@ -698,9 +774,52 @@ bin_load(Process *c_p, ErtsProcLocks c_p_locks,
* loading the code, because it contains labels.)
*/
- CHKBLK(ERTS_ALC_T_CODE,state.code);
- define_file(&state, "export table", EXP_CHUNK);
- if (!read_export_table(&state)) {
+ CHKBLK(ERTS_ALC_T_CODE,stp->code);
+ define_file(stp, "export table", EXP_CHUNK);
+ if (!read_export_table(stp)) {
+ goto load_error;
+ }
+
+ /*
+ * Good so far.
+ */
+
+ retval = NIL;
+
+ load_error:
+ if (bin) {
+ driver_free_binary(bin);
+ }
+ if (retval != NIL) {
+ free_state(stp);
+ }
+ return retval;
+}
+
+Eterm
+erts_finish_loading(LoaderState* stp, Process* c_p,
+ ErtsProcLocks c_p_locks, Eterm* modp)
+{
+ Eterm retval;
+
+ /*
+ * No other process may run since we will update the export
+ * table which is not protected by any locks.
+ */
+
+ ERTS_SMP_LC_ASSERT(erts_initialized == 0 ||
+ erts_smp_thr_progress_is_blocking());
+
+ /*
+ * Make current code for the module old and insert the new code
+ * as current. This will fail if there already exists old code
+ * for the module.
+ */
+
+ CHKBLK(ERTS_ALC_T_CODE,stp->code);
+ retval = insert_new_code(c_p, c_p_locks, stp->group_leader, stp->module,
+ stp->code, stp->loaded_size);
+ if (retval != NIL) {
goto load_error;
}
@@ -709,88 +828,42 @@ bin_load(Process *c_p, ErtsProcLocks c_p_locks,
* exported and imported functions. This can't fail.
*/
- CHKBLK(ERTS_ALC_T_CODE,state.code);
- rval = insert_new_code(c_p, c_p_locks, state.group_leader, state.module,
- state.code, state.loaded_size, state.catches);
- if (rval < 0) {
- goto load_error;
- }
- CHKBLK(ERTS_ALC_T_CODE,state.code);
- final_touch(&state);
+ erts_export_consolidate();
+ CHKBLK(ERTS_ALC_T_CODE,stp->code);
+ final_touch(stp);
/*
* Loading succeded.
*/
- CHKBLK(ERTS_ALC_T_CODE,state.code);
+ CHKBLK(ERTS_ALC_T_CODE,stp->code);
#if defined(LOAD_MEMORY_HARD_DEBUG) && defined(DEBUG)
erts_fprintf(stderr,"Loaded %T\n",*modp);
#if 0
- debug_dump_code(state.code,state.ci);
+ debug_dump_code(stp->code,stp->ci);
#endif
#endif
- rval = 0;
- state.code = NULL; /* Prevent code from being freed. */
- *modp = state.module;
+ stp->code = NULL; /* Prevent code from being freed. */
+ *modp = stp->module;
/*
* If there is an on_load function, signal an error to
* indicate that the on_load function must be run.
*/
- if (state.on_load) {
- rval = -5;
+ if (stp->on_load) {
+ retval = am_on_load;
}
load_error:
- if (state.code != 0) {
- erts_free(ERTS_ALC_T_CODE, state.code);
- }
- if (state.labels != NULL) {
- erts_free(ERTS_ALC_T_LOADER_TMP, (void *) state.labels);
- }
- if (state.atom != NULL) {
- erts_free(ERTS_ALC_T_LOADER_TMP, (void *) state.atom);
- }
- if (state.import != NULL) {
- erts_free(ERTS_ALC_T_LOADER_TMP, (void *) state.import);
- }
- if (state.export != NULL) {
- erts_free(ERTS_ALC_T_LOADER_TMP, (void *) state.export);
- }
- if (state.lambdas != state.def_lambdas) {
- erts_free(ERTS_ALC_T_LOADER_TMP, (void *) state.lambdas);
- }
- if (state.literals != NULL) {
- int i;
- for (i = 0; i < state.num_literals; i++) {
- if (state.literals[i].heap != NULL) {
- erts_free(ERTS_ALC_T_LOADER_TMP, (void *) state.literals[i].heap);
- }
- }
- erts_free(ERTS_ALC_T_LOADER_TMP, (void *) state.literals);
- }
- while (state.literal_patches != NULL) {
- LiteralPatch* next = state.literal_patches->next;
- erts_free(ERTS_ALC_T_LOADER_TMP, (void *) state.literal_patches);
- state.literal_patches = next;
- }
- while (state.string_patches != NULL) {
- StringPatch* next = state.string_patches->next;
- erts_free(ERTS_ALC_T_LOADER_TMP, (void *) state.string_patches);
- state.string_patches = next;
- }
- while (state.genop_blocks) {
- GenOpBlock* next = state.genop_blocks->next;
- erts_free(ERTS_ALC_T_LOADER_TMP, (void *) state.genop_blocks);
- state.genop_blocks = next;
- }
-
- return rval;
+ free_state(stp);
+ return retval;
}
-
-static void
-init_state(LoaderState* stp)
+LoaderState*
+erts_alloc_loader_state(void)
{
+ LoaderState* stp;
+
+ stp = erts_alloc(ERTS_ALC_T_LOADER_TMP, sizeof(LoaderState));
stp->function = THE_NON_VALUE; /* Function not known yet */
stp->arity = 0;
stp->specific_op = -1;
@@ -814,23 +887,94 @@ init_state(LoaderState* stp)
stp->string_patches = 0;
stp->may_load_nif = 0;
stp->on_load = 0;
+ stp->line_item = 0;
+ stp->line_instr = 0;
+ stp->func_line = 0;
+ stp->fname = 0;
+ return stp;
}
-static int
+static void
+free_state(LoaderState* stp)
+{
+ if (stp->code != 0) {
+ erts_free(ERTS_ALC_T_CODE, stp->code);
+ }
+ if (stp->labels != NULL) {
+ erts_free(ERTS_ALC_T_LOADER_TMP, (void *) stp->labels);
+ }
+ if (stp->atom != NULL) {
+ erts_free(ERTS_ALC_T_LOADER_TMP, (void *) stp->atom);
+ }
+ if (stp->import != NULL) {
+ erts_free(ERTS_ALC_T_LOADER_TMP, (void *) stp->import);
+ }
+ if (stp->export != NULL) {
+ erts_free(ERTS_ALC_T_LOADER_TMP, (void *) stp->export);
+ }
+ if (stp->lambdas != stp->def_lambdas) {
+ erts_free(ERTS_ALC_T_LOADER_TMP, (void *) stp->lambdas);
+ }
+ if (stp->literals != NULL) {
+ int i;
+ for (i = 0; i < stp->num_literals; i++) {
+ if (stp->literals[i].heap != NULL) {
+ erts_free(ERTS_ALC_T_LOADER_TMP,
+ (void *) stp->literals[i].heap);
+ }
+ }
+ erts_free(ERTS_ALC_T_LOADER_TMP, (void *) stp->literals);
+ }
+ while (stp->literal_patches != NULL) {
+ LiteralPatch* next = stp->literal_patches->next;
+ erts_free(ERTS_ALC_T_LOADER_TMP, (void *) stp->literal_patches);
+ stp->literal_patches = next;
+ }
+ while (stp->string_patches != NULL) {
+ StringPatch* next = stp->string_patches->next;
+ erts_free(ERTS_ALC_T_LOADER_TMP, (void *) stp->string_patches);
+ stp->string_patches = next;
+ }
+ while (stp->genop_blocks) {
+ GenOpBlock* next = stp->genop_blocks->next;
+ erts_free(ERTS_ALC_T_LOADER_TMP, (void *) stp->genop_blocks);
+ stp->genop_blocks = next;
+ }
+
+ if (stp->line_item != 0) {
+ erts_free(ERTS_ALC_T_LOADER_TMP, stp->line_item);
+ }
+
+ if (stp->line_instr != 0) {
+ erts_free(ERTS_ALC_T_LOADER_TMP, stp->line_instr);
+ }
+
+ if (stp->func_line != 0) {
+ erts_free(ERTS_ALC_T_LOADER_TMP, stp->func_line);
+ }
+
+ if (stp->fname != 0) {
+ erts_free(ERTS_ALC_T_LOADER_TMP, stp->fname);
+ }
+ erts_free(ERTS_ALC_T_LOADER_TMP, stp);
+}
+
+static Eterm
insert_new_code(Process *c_p, ErtsProcLocks c_p_locks,
- Eterm group_leader, Eterm module, BeamInstr* code, Uint size, BeamInstr catches)
+ Eterm group_leader, Eterm module, BeamInstr* code,
+ Uint size)
{
Module* modp;
- int rval;
+ Eterm retval;
int i;
- if ((rval = beam_make_current_old(c_p, c_p_locks, module)) < 0) {
+ if ((retval = beam_make_current_old(c_p, c_p_locks, module)) < 0) {
erts_dsprintf_buf_t *dsbufp = erts_create_logger_dsbuf();
erts_dsprintf(dsbufp,
"Module %T must be purged before loading\n",
module);
erts_send_error_to_logger(group_leader, dsbufp);
- return rval;
+ return retval;
}
/*
@@ -841,7 +985,7 @@ insert_new_code(Process *c_p, ErtsProcLocks c_p_locks,
modp = erts_put_module(module);
modp->code = code;
modp->code_length = size;
- modp->catches = catches;
+ modp->catches = BEAM_CATCHES_NIL; /* Will be filled in later. */
/*
* Update address table (used for finding a function from a PC value).
@@ -863,7 +1007,7 @@ insert_new_code(Process *c_p, ErtsProcLocks c_p_locks,
modules[i].end = (BeamInstr *) (((byte *)code) + size);
num_loaded_modules++;
mid_module = &modules[num_loaded_modules/2];
- return 0;
+ return NIL;
}
static int
@@ -1276,12 +1420,14 @@ read_literal_table(LoaderState* stp)
GetInt(stp, 4, sz); /* Size of external term format. */
GetString(stp, p, sz);
- if ((heap_size = erts_decode_ext_size(p, sz, 1)) < 0) {
+ if ((heap_size = erts_decode_ext_size(p, sz)) < 0) {
LoadError1(stp, "literal %d: bad external format", i);
}
hp = stp->literals[i].heap = erts_alloc(ERTS_ALC_T_LOADER_TMP,
heap_size*sizeof(Eterm));
- val = erts_decode_ext(&hp, NULL, &p);
+ stp->literals[i].off_heap.first = 0;
+ stp->literals[i].off_heap.overhead = 0;
+ val = erts_decode_ext(&hp, &stp->literals[i].off_heap, &p);
stp->literals[i].heap_size = hp - stp->literals[i].heap;
if (stp->literals[i].heap_size > heap_size) {
erl_exit(1, "overrun by %d word(s) for literal heap, term %d",
@@ -1303,6 +1449,138 @@ read_literal_table(LoaderState* stp)
return 0;
}
+static int
+read_line_table(LoaderState* stp)
+{
+ unsigned version;
+ unsigned flags;
+ int num_line_items;
+ BeamInstr* lp;
+ int i;
+ BeamInstr fname_index;
+ BeamInstr tag;
+
+ /*
+ * If the emulator flag ignoring the line information was given,
+ * return immediately.
+ */
+
+ if (erts_no_line_info) {
+ return 1;
+ }
+
+ /*
+ * Check version of line table.
+ */
+
+ GetInt(stp, 4, version);
+ if (version != 0) {
+ /*
+ * Wrong version. Silently ignore the line number chunk.
+ */
+ return 1;
+ }
+
+ /*
+ * Read the remaining header words. The flag word is reserved
+ * for possible future use; for the moment we ignore it.
+ */
+ GetInt(stp, 4, flags);
+ GetInt(stp, 4, stp->num_line_instrs);
+ GetInt(stp, 4, num_line_items);
+ GetInt(stp, 4, stp->num_fnames);
+
+ /*
+ * Calculate space and allocate memory for the line item table.
+ */
+
+ num_line_items++;
+ lp = (BeamInstr *) erts_alloc(ERTS_ALC_T_LOADER_TMP,
+ num_line_items * sizeof(BeamInstr));
+ stp->line_item = lp;
+ stp->num_line_items = num_line_items;
+
+ /*
+ * The zeroth entry in the line item table is special.
+ * It contains the undefined location.
+ */
+
+ *lp++ = LINE_INVALID_LOCATION;
+ num_line_items--;
+
+ /*
+ * Read all the line items.
+ */
+
+ stp->loc_size = stp->num_fnames ? 4 : 2;
+ fname_index = 0;
+ while (num_line_items-- > 0) {
+ BeamInstr val;
+ BeamInstr loc;
+
+ GetTagAndValue(stp, tag, val);
+ if (tag == TAG_i) {
+ if (IS_VALID_LOCATION(fname_index, val)) {
+ loc = MAKE_LOCATION(fname_index, val);
+ } else {
+ /*
+ * Too many files or huge line number. Silently invalidate
+ * the location.
+ */
+ loc = LINE_INVALID_LOCATION;
+ }
+ *lp++ = loc;
+ if (val > 0xFFFF) {
+ stp->loc_size = 4;
+ }
+ } else if (tag == TAG_a) {
+ if (val > stp->num_fnames) {
+ LoadError2(stp, "file index overflow (%d/%d)",
+ val, stp->num_fnames);
+ }
+ fname_index = val;
+ num_line_items++;
+ } else {
+ LoadError1(stp, "bad tag '%c' (expected 'a' or 'i')",
+ tag_to_letter[tag]);
+ }
+ }
+
+ /*
+ * Read all filenames.
+ */
+
+ if (stp->num_fnames != 0) {
+ stp->fname = (Eterm *) erts_alloc(ERTS_ALC_T_LOADER_TMP,
+ stp->num_fnames *
+ sizeof(Eterm));
+ for (i = 0; i < stp->num_fnames; i++) {
+ byte* fname;
+ Uint n;
+
+ GetInt(stp, 2, n);
+ GetString(stp, fname, n);
+ stp->fname[i] = am_atom_put((char*)fname, n);
+ }
+ }
+
+ /*
+ * Allocate the arrays to be filled while code is being loaded.
+ */
+ stp->line_instr = (LineInstr *) erts_alloc(ERTS_ALC_T_LOADER_TMP,
+ stp->num_line_instrs *
+ sizeof(LineInstr));
+ stp->current_li = 0;
+ stp->func_line = (int *) erts_alloc(ERTS_ALC_T_LOADER_TMP,
+ stp->num_functions *
+ sizeof(int));
+
+ return 1;
+
+ load_error:
+ return 0;
+}
+
static int
read_code_header(LoaderState* stp)
@@ -1337,10 +1615,15 @@ read_code_header(LoaderState* stp)
/*
* Verify the number of the highest opcode used.
*/
-
GetInt(stp, 4, opcode_max);
if (opcode_max > MAX_GENERIC_OPCODE) {
- LoadError2(stp, "use of opcode %d; this emulator supports only up to %d",
+ LoadError2(stp,
+ "This BEAM file was compiled for a later version"
+ " of the run-time system than " ERLANG_OTP_RELEASE ".\n"
+ " To fix this, please recompile this module with an "
+ ERLANG_OTP_RELEASE " compiler.\n"
+ " (Use of opcode %d; this emulator supports "
+ "only up to %d.)",
opcode_max, MAX_GENERIC_OPCODE);
}
@@ -1361,25 +1644,6 @@ read_code_header(LoaderState* stp)
#endif
}
- /*
- * Initialize code area.
- */
- stp->code_buffer_size = erts_next_heap_size(2048 + stp->num_functions, 0);
- stp->code = (BeamInstr *) erts_alloc(ERTS_ALC_T_CODE,
- sizeof(BeamInstr) * stp->code_buffer_size);
-
- stp->code[MI_NUM_FUNCTIONS] = stp->num_functions;
- stp->ci = MI_FUNCTIONS + stp->num_functions + 1;
-
- stp->code[MI_ATTR_PTR] = 0;
- stp->code[MI_ATTR_SIZE] = 0;
- stp->code[MI_ATTR_SIZE_ON_HEAP] = 0;
- stp->code[MI_COMPILE_PTR] = 0;
- stp->code[MI_COMPILE_SIZE] = 0;
- stp->code[MI_COMPILE_SIZE_ON_HEAP] = 0;
- stp->code[MI_NUM_BREAKPOINTS] = 0;
-
- stp->new_bs_put_strings = 0;
stp->catches = 0;
return 1;
@@ -1412,7 +1676,7 @@ load_code(LoaderState* stp)
{
int i;
int ci;
- int last_func_start = 0;
+ int last_func_start = 0; /* Needed by nif loading and line instructions */
char* sign;
int arg; /* Number of current argument. */
int num_specific; /* Number of specific ops for current. */
@@ -1425,6 +1689,14 @@ load_code(LoaderState* stp)
GenOp** last_op_next = NULL;
int arity;
+ /*
+ * The size of the loaded func_info instruction is needed
+ * by both the nif functionality and line instructions.
+ */
+ enum {
+ FUNC_INFO_SZ = 5
+ };
+
code = stp->code;
code_buffer_size = stp->code_buffer_size;
ci = stp->ci;
@@ -1470,46 +1742,15 @@ load_code(LoaderState* stp)
last_op->arity = 0;
ASSERT(arity <= MAX_OPARGS);
-#define GetValue(Stp, First, Val) \
- do { \
- if (((First) & 0x08) == 0) { \
- Val = (First) >> 4; \
- } else if (((First) & 0x10) == 0) { \
- BeamInstr __w; \
- GetByte(Stp, __w); \
- Val = (((First) >> 5) << 8) | __w; \
- } else { \
- if (!get_int_val(Stp, (First), &(Val))) goto load_error; \
- } \
- } while (0)
-
for (arg = 0; arg < arity; arg++) {
- BeamInstr first;
-
- GetByte(stp, first);
- last_op->a[arg].type = first & 0x07;
+ GetTagAndValue(stp, last_op->a[arg].type, last_op->a[arg].val);
switch (last_op->a[arg].type) {
case TAG_i:
- if ((first & 0x08) == 0) {
- last_op->a[arg].val = first >> 4;
- } else if ((first & 0x10) == 0) {
- BeamInstr w;
- GetByte(stp, w);
- ASSERT(first < 0x800);
- last_op->a[arg].val = ((first >> 5) << 8) | w;
- } else {
- int i = get_erlang_integer(stp, first, &(last_op->a[arg].val));
- if (i < 0) {
- goto load_error;
- }
- last_op->a[arg].type = i;
- }
- break;
case TAG_u:
- GetValue(stp, first, last_op->a[arg].val);
+ case TAG_q:
+ case TAG_o:
break;
case TAG_x:
- GetValue(stp, first, last_op->a[arg].val);
if (last_op->a[arg].val == 0) {
last_op->a[arg].type = TAG_r;
} else if (last_op->a[arg].val >= MAX_REG) {
@@ -1518,7 +1759,6 @@ load_code(LoaderState* stp)
}
break;
case TAG_y:
- GetValue(stp, first, last_op->a[arg].val);
if (last_op->a[arg].val >= MAX_REG) {
LoadError1(stp, "invalid y register number: %u",
last_op->a[arg].val);
@@ -1526,7 +1766,6 @@ load_code(LoaderState* stp)
last_op->a[arg].val += CP_SIZE;
break;
case TAG_a:
- GetValue(stp, first, last_op->a[arg].val);
if (last_op->a[arg].val == 0) {
last_op->a[arg].type = TAG_n;
} else if (last_op->a[arg].val >= stp->num_atoms) {
@@ -1536,7 +1775,6 @@ load_code(LoaderState* stp)
}
break;
case TAG_f:
- GetValue(stp, first, last_op->a[arg].val);
if (last_op->a[arg].val == 0) {
last_op->a[arg].type = TAG_p;
} else if (last_op->a[arg].val >= stp->num_labels) {
@@ -1544,7 +1782,6 @@ load_code(LoaderState* stp)
}
break;
case TAG_h:
- GetValue(stp, first, last_op->a[arg].val);
if (last_op->a[arg].val > 65535) {
LoadError1(stp, "invalid range for character data type: %u",
last_op->a[arg].val);
@@ -1552,11 +1789,9 @@ load_code(LoaderState* stp)
break;
case TAG_z:
{
- BeamInstr ext_tag;
unsigned tag;
- GetValue(stp, first, ext_tag);
- switch (ext_tag) {
+ switch (last_op->a[arg].val) {
case 0: /* Floating point number */
{
Eterm* hp;
@@ -1648,7 +1883,8 @@ load_code(LoaderState* stp)
break;
}
default:
- LoadError1(stp, "invalid extended tag %d", ext_tag);
+ LoadError1(stp, "invalid extended tag %d",
+ last_op->a[arg].val);
break;
}
}
@@ -1659,7 +1895,6 @@ load_code(LoaderState* stp)
}
last_op->arity++;
}
-#undef GetValue
ASSERT(arity == last_op->arity);
@@ -1701,14 +1936,6 @@ load_code(LoaderState* stp)
}
/*
- * Special error message instruction.
- */
- if (stp->genop->op == genop_too_old_compiler_0) {
- LoadError0(stp, "please re-compile this module with an "
- ERLANG_OTP_RELEASE " compiler");
- }
-
- /*
* From the collected generic instruction, find the specific
* instruction.
*/
@@ -1759,7 +1986,27 @@ load_code(LoaderState* stp)
ERLANG_OTP_RELEASE " compiler ");
}
- LoadError0(stp, "no specific operation found");
+ /*
+ * Some generic instructions should have a special
+ * error message.
+ */
+ switch (stp->genop->op) {
+ case genop_too_old_compiler_0:
+ LoadError0(stp, "please re-compile this module with an "
+ ERLANG_OTP_RELEASE " compiler");
+ case genop_unsupported_guard_bif_3:
+ {
+ Eterm Mod = (Eterm) stp->genop->a[0].val;
+ Eterm Name = (Eterm) stp->genop->a[1].val;
+ Uint arity = (Uint) stp->genop->a[2].val;
+ FREE_GENOP(stp, stp->genop);
+ stp->genop = 0;
+ LoadError3(stp, "unsupported guard BIF: %T:%T/%d\n",
+ Mod, Name, arity);
+ }
+ default:
+ LoadError0(stp, "no specific operation found");
+ }
}
stp->specific_op = specific;
@@ -2048,7 +2295,6 @@ load_code(LoaderState* stp)
case op_i_func_info_IaaI:
{
Uint offset;
- enum { FINFO_SZ = 5 };
if (function_number >= stp->num_functions) {
LoadError1(stp, "too many functions in module (header said %d)",
@@ -2056,27 +2302,37 @@ load_code(LoaderState* stp)
}
if (stp->may_load_nif) {
- const int finfo_ix = ci - FINFO_SZ;
+ const int finfo_ix = ci - FUNC_INFO_SZ;
enum { MIN_FUNC_SZ = 3 };
if (finfo_ix - last_func_start < MIN_FUNC_SZ && last_func_start) {
/* Must make room for call_nif op */
int pad = MIN_FUNC_SZ - (finfo_ix - last_func_start);
ASSERT(pad > 0 && pad < MIN_FUNC_SZ);
CodeNeed(pad);
- sys_memmove(&code[finfo_ix+pad], &code[finfo_ix], FINFO_SZ*sizeof(BeamInstr));
+ sys_memmove(&code[finfo_ix+pad], &code[finfo_ix],
+ FUNC_INFO_SZ*sizeof(BeamInstr));
sys_memset(&code[finfo_ix], 0, pad*sizeof(BeamInstr));
ci += pad;
stp->labels[last_label].value += pad;
}
}
last_func_start = ci;
+
+ /*
+ * Save current offset of into the line instruction array.
+ */
+
+ if (stp->func_line) {
+ stp->func_line[function_number] = stp->current_li;
+ }
+
/*
* Save context for error messages.
*/
stp->function = code[ci-2];
stp->arity = code[ci-1];
- ASSERT(stp->labels[last_label].value == ci - FINFO_SZ);
+ ASSERT(stp->labels[last_label].value == ci - FUNC_INFO_SZ);
offset = MI_FUNCTIONS + function_number;
code[offset] = stp->labels[last_label].patches;
stp->labels[last_label].patches = offset;
@@ -2099,32 +2355,6 @@ load_code(LoaderState* stp)
stp->on_load = ci;
break;
case op_bs_put_string_II:
- {
- /*
- * At entry:
- *
- * code[ci-3] &&lb_i_new_bs_put_string_II
- * code[ci-2] length of string
- * code[ci-1] offset into string table
- *
- * Since we don't know the address of the string table yet,
- * just check the offset and length for validity, and use
- * the instruction field as a link field to link all put_string
- * instructions into a single linked list. At exit:
- *
- * code[ci-3] pointer to next i_new_bs_put_string instruction (or 0
- * if this is the last)
- */
- Uint offset = code[ci-1];
- Uint len = code[ci-2];
- unsigned strtab_size = stp->chunks[STR_CHUNK].size;
- if (offset > strtab_size || offset + len > strtab_size) {
- LoadError2(stp, "invalid string reference %d, size %d", offset, len);
- }
- code[ci-3] = stp->new_bs_put_strings;
- stp->new_bs_put_strings = ci - 3;
- }
- break;
case op_i_bs_match_string_rfII:
case op_i_bs_match_string_xfII:
new_string_patch(stp, ci-1);
@@ -2139,6 +2369,45 @@ load_code(LoaderState* stp)
stp->catches = ci-3;
break;
+ case op_line_I:
+ if (stp->line_item) {
+ BeamInstr item = code[ci-1];
+ BeamInstr loc;
+ int li;
+ if (item >= stp->num_line_items) {
+ LoadError2(stp, "line instruction index overflow (%d/%d)",
+ item, stp->num_line_items);
+ }
+ li = stp->current_li;
+ if (li >= stp->num_line_instrs) {
+ LoadError2(stp, "line instruction table overflow (%d/%d)",
+ li, stp->num_line_instrs);
+ }
+ loc = stp->line_item[item];
+
+ if (ci - 2 == last_func_start) {
+ /*
+ * This line instruction directly follows the func_info
+ * instruction. Its address must be adjusted to point to
+ * func_info instruction.
+ */
+ stp->line_instr[li].pos = last_func_start - FUNC_INFO_SZ;
+ stp->line_instr[li].loc = stp->line_item[item];
+ stp->current_li++;
+ } else if (li <= stp->func_line[function_number-1] ||
+ stp->line_instr[li-1].loc != loc) {
+ /*
+ * Only store the location if it is different
+ * from the previous location in the same function.
+ */
+ stp->line_instr[li].pos = ci - 2;
+ stp->line_instr[li].loc = stp->line_item[item];
+ stp->current_li++;
+ }
+ }
+ ci -= 2; /* Get rid of the instruction */
+ break;
+
/*
* End of code found.
*/
@@ -2175,6 +2444,8 @@ load_code(LoaderState* stp)
#define no_fpe_signals(St) 0
#endif
+#define never(St) 0
+
/*
* Predicate that tests whether a jump table can be used.
*/
@@ -2562,13 +2833,8 @@ should_gen_heap_bin(LoaderState* stp, GenOpArg Src)
static int
binary_too_big(LoaderState* stp, GenOpArg Size)
{
- return Size.type == TAG_u && ((Size.val >> (8*sizeof(Uint)-3)) != 0);
-}
-
-static int
-binary_too_big_bits(LoaderState* stp, GenOpArg Size)
-{
- return Size.type == TAG_u && (((Size.val+7)/8) >> (8*sizeof(Uint)-3) != 0);
+ return Size.type == TAG_o ||
+ (Size.type == TAG_u && ((Size.val >> (8*sizeof(Uint)-3)) != 0));
}
static GenOp*
@@ -3435,10 +3701,7 @@ gen_guard_bif1(LoaderState* stp, GenOpArg Fail, GenOpArg Live, GenOpArg Bif,
BifFunction bf;
NEW_GENOP(stp, op);
- op->op = genop_i_gc_bif1_5;
- op->arity = 5;
- op->a[0] = Fail;
- op->a[1].type = TAG_u;
+ op->next = NULL;
bf = stp->import[Bif.val].bf;
/* The translations here need to have a reverse counterpart in
beam_emu.c:translate_gc_bif for error handling to work properly. */
@@ -3459,19 +3722,30 @@ gen_guard_bif1(LoaderState* stp, GenOpArg Fail, GenOpArg Live, GenOpArg Bif,
} else if (bf == trunc_1) {
op->a[1].val = (BeamInstr) (void *) erts_gc_trunc_1;
} else {
- abort();
+ op->op = genop_unsupported_guard_bif_3;
+ op->arity = 3;
+ op->a[0].type = TAG_a;
+ op->a[0].val = stp->import[Bif.val].module;
+ op->a[1].type = TAG_a;
+ op->a[1].val = stp->import[Bif.val].function;
+ op->a[2].type = TAG_u;
+ op->a[2].val = stp->import[Bif.val].arity;
+ return op;
}
+ op->op = genop_i_gc_bif1_5;
+ op->arity = 5;
+ op->a[0] = Fail;
+ op->a[1].type = TAG_u;
op->a[2] = Src;
op->a[3] = Live;
op->a[4] = Dst;
- op->next = NULL;
return op;
}
/*
- * This is used by the ops.tab rule that rewrites gc_bifs with two parameters
+ * This is used by the ops.tab rule that rewrites gc_bifs with two parameters.
* The instruction returned is then again rewritten to an i_load instruction
- * folowed by i_gc_bif2_jIId, to handle literals properly.
+ * followed by i_gc_bif2_jIId, to handle literals properly.
* As opposed to the i_gc_bif1_jIsId, the instruction i_gc_bif2_jIId is
* always rewritten, regardless of if there actually are any literals.
*/
@@ -3483,31 +3757,39 @@ gen_guard_bif2(LoaderState* stp, GenOpArg Fail, GenOpArg Live, GenOpArg Bif,
BifFunction bf;
NEW_GENOP(stp, op);
- op->op = genop_ii_gc_bif2_6;
- op->arity = 6;
- op->a[0] = Fail;
- op->a[1].type = TAG_u;
+ op->next = NULL;
bf = stp->import[Bif.val].bf;
/* The translations here need to have a reverse counterpart in
beam_emu.c:translate_gc_bif for error handling to work properly. */
if (bf == binary_part_2) {
op->a[1].val = (BeamInstr) (void *) erts_gc_binary_part_2;
} else {
- abort();
+ op->op = genop_unsupported_guard_bif_3;
+ op->arity = 3;
+ op->a[0].type = TAG_a;
+ op->a[0].val = stp->import[Bif.val].module;
+ op->a[1].type = TAG_a;
+ op->a[1].val = stp->import[Bif.val].function;
+ op->a[2].type = TAG_u;
+ op->a[2].val = stp->import[Bif.val].arity;
+ return op;
}
+ op->op = genop_ii_gc_bif2_6;
+ op->arity = 6;
+ op->a[0] = Fail;
+ op->a[1].type = TAG_u;
op->a[2] = S1;
op->a[3] = S2;
op->a[4] = Live;
op->a[5] = Dst;
- op->next = NULL;
return op;
}
/*
- * This is used by the ops.tab rule that rewrites gc_bifs with three parameters
+ * This is used by the ops.tab rule that rewrites gc_bifs with three parameters.
* The instruction returned is then again rewritten to a move instruction that
* uses r[0] for temp storage, followed by an i_load instruction,
- * folowed by i_gc_bif3_jIsId, to handle literals properly. Rewriting
+ * followed by i_gc_bif3_jIsId, to handle literals properly. Rewriting
* always occur, as with the gc_bif2 counterpart.
*/
static GenOp*
@@ -3518,18 +3800,27 @@ gen_guard_bif3(LoaderState* stp, GenOpArg Fail, GenOpArg Live, GenOpArg Bif,
BifFunction bf;
NEW_GENOP(stp, op);
- op->op = genop_ii_gc_bif3_7;
- op->arity = 7;
- op->a[0] = Fail;
- op->a[1].type = TAG_u;
+ op->next = NULL;
bf = stp->import[Bif.val].bf;
/* The translations here need to have a reverse counterpart in
beam_emu.c:translate_gc_bif for error handling to work properly. */
if (bf == binary_part_3) {
op->a[1].val = (BeamInstr) (void *) erts_gc_binary_part_3;
} else {
- abort();
+ op->op = genop_unsupported_guard_bif_3;
+ op->arity = 3;
+ op->a[0].type = TAG_a;
+ op->a[0].val = stp->import[Bif.val].module;
+ op->a[1].type = TAG_a;
+ op->a[1].val = stp->import[Bif.val].function;
+ op->a[2].type = TAG_u;
+ op->a[2].val = stp->import[Bif.val].arity;
+ return op;
}
+ op->op = genop_ii_gc_bif3_7;
+ op->arity = 7;
+ op->a[0] = Fail;
+ op->a[1].type = TAG_u;
op->a[2] = S1;
op->a[3] = S2;
op->a[4] = S3;
@@ -3600,15 +3891,14 @@ freeze_code(LoaderState* stp)
{
BeamInstr* code = stp->code;
Uint *literal_end = NULL;
- Uint index;
int i;
byte* str_table;
unsigned strtab_size = stp->chunks[STR_CHUNK].size;
unsigned attr_size = stp->chunks[ATTR_CHUNK].size;
unsigned compile_size = stp->chunks[COMPILE_CHUNK].size;
Uint size;
- unsigned catches;
Sint decoded_size;
+ Uint line_size;
/*
* Verify that there was a correct 'FunT' chunk if there were
@@ -3619,13 +3909,19 @@ freeze_code(LoaderState* stp)
LoadError0(stp, stp->lambda_error);
}
-
/*
* Calculate the final size of the code.
*/
-
- size = (stp->ci * sizeof(BeamInstr)) + (stp->total_literal_size * sizeof(Eterm)) +
- strtab_size + attr_size + compile_size;
+ if (stp->line_instr == 0) {
+ line_size = 0;
+ } else {
+ line_size = (MI_LINE_FUNC_TAB + (stp->num_functions + 1) +
+ (stp->current_li+1) + stp->num_fnames) *
+ sizeof(Eterm) + (stp->current_li+1) * stp->loc_size;
+ }
+ size = (stp->ci * sizeof(BeamInstr)) +
+ (stp->total_literal_size * sizeof(Eterm)) +
+ strtab_size + attr_size + compile_size + line_size;
/*
* Move the code to its final location.
@@ -3662,6 +3958,8 @@ freeze_code(LoaderState* stp)
Uint* low;
Uint* high;
LiteralPatch* lp;
+ struct erl_off_heap_header* off_heap = 0;
+ struct erl_off_heap_header** off_heap_last = &off_heap;
low = (Uint *) (code+stp->ci);
high = low + stp->total_literal_size;
@@ -3670,6 +3968,7 @@ freeze_code(LoaderState* stp)
ptr = low;
for (i = 0; i < stp->num_literals; i++) {
Uint offset;
+ struct erl_off_heap_header* t_off_heap;
sys_memcpy(ptr, stp->literals[i].heap,
stp->literals[i].heap_size*sizeof(Eterm));
@@ -3684,9 +3983,19 @@ freeze_code(LoaderState* stp)
*ptr++ = offset_ptr(val, offset);
break;
case TAG_PRIMARY_HEADER:
- ptr++;
- if (header_is_thing(val)) {
- ptr += thing_arityval(val);
+ if (header_is_transparent(val)) {
+ ptr++;
+ } else {
+ if (thing_subtag(val) == REFC_BINARY_SUBTAG) {
+ struct erl_off_heap_header* oh;
+
+ oh = (struct erl_off_heap_header*) ptr;
+ if (oh->next) {
+ Eterm** uptr = (Eterm **) (void *) &oh->next;
+ *uptr += offset;
+ }
+ }
+ ptr += 1 + thing_arityval(val);
}
break;
default:
@@ -3695,7 +4004,23 @@ freeze_code(LoaderState* stp)
}
}
ASSERT(ptr == high);
+
+ /*
+ * Re-link the off_heap list for this term onto the
+ * off_heap list for the entire module.
+ */
+ t_off_heap = stp->literals[i].off_heap.first;
+ if (t_off_heap) {
+ t_off_heap = (struct erl_off_heap_header *)
+ offset_ptr((UWord) t_off_heap, offset);
+ while (t_off_heap) {
+ *off_heap_last = t_off_heap;
+ off_heap_last = &t_off_heap->next;
+ t_off_heap = t_off_heap->next;
+ }
+ }
}
+ code[MI_LITERALS_OFF_HEAP] = (BeamInstr) off_heap;
lp = stp->literal_patches;
while (lp != 0) {
BeamInstr* op_ptr;
@@ -3713,21 +4038,72 @@ freeze_code(LoaderState* stp)
}
literal_end += stp->total_literal_size;
}
-
+ CHKBLK(ERTS_ALC_T_CODE,code);
+
/*
- * Place the string table and, optionally, attributes, after the literal heap.
+ * If there is line information, place it here.
*/
- CHKBLK(ERTS_ALC_T_CODE,code);
+ if (stp->line_instr == 0) {
+ code[MI_LINE_TABLE] = (BeamInstr) 0;
+ str_table = (byte *) literal_end;
+ } else {
+ Eterm* line_tab = (Eterm *) literal_end;
+ Eterm* p;
+ int ftab_size = stp->num_functions;
+ int num_instrs = stp->current_li;
+ Eterm* first_line_item;
+
+ code[MI_LINE_TABLE] = (BeamInstr) line_tab;
+ p = line_tab + MI_LINE_FUNC_TAB;
+
+ first_line_item = (p + ftab_size + 1);
+ for (i = 0; i < ftab_size; i++) {
+ *p++ = (Eterm) (BeamInstr) (first_line_item + stp->func_line[i]);
+ }
+ *p++ = (Eterm) (BeamInstr) (first_line_item + num_instrs);
+ ASSERT(p == first_line_item);
+ for (i = 0; i < num_instrs; i++) {
+ *p++ = (Eterm) (BeamInstr) (code + stp->line_instr[i].pos);
+ }
+ *p++ = (Eterm) (BeamInstr) (code + stp->ci - 1);
+
+ line_tab[MI_LINE_FNAME_PTR] = (Eterm) (BeamInstr) p;
+ memcpy(p, stp->fname, stp->num_fnames*sizeof(Eterm));
+ p += stp->num_fnames;
+
+ line_tab[MI_LINE_LOC_TAB] = (Eterm) (BeamInstr) p;
+ line_tab[MI_LINE_LOC_SIZE] = stp->loc_size;
+ if (stp->loc_size == 2) {
+ Uint16* locp = (Uint16 *) p;
+ for (i = 0; i < num_instrs; i++) {
+ *locp++ = (Uint16) stp->line_instr[i].loc;
+ }
+ *locp++ = LINE_INVALID_LOCATION;
+ str_table = (byte *) locp;
+ } else {
+ Uint32* locp = (Uint32 *) p;
+ ASSERT(stp->loc_size == 4);
+ for (i = 0; i < num_instrs; i++) {
+ *locp++ = stp->line_instr[i].loc;
+ }
+ *locp++ = LINE_INVALID_LOCATION;
+ str_table = (byte *) locp;
+ }
- sys_memcpy(literal_end, stp->chunks[STR_CHUNK].start, strtab_size);
+ CHKBLK(ERTS_ALC_T_CODE,code);
+ }
+
+ /*
+ * Place the string table and, optionally, attributes here.
+ */
+ sys_memcpy(str_table, stp->chunks[STR_CHUNK].start, strtab_size);
CHKBLK(ERTS_ALC_T_CODE,code);
- str_table = (byte *) literal_end;
if (attr_size) {
byte* attr = str_table + strtab_size;
sys_memcpy(attr, stp->chunks[ATTR_CHUNK].start, stp->chunks[ATTR_CHUNK].size);
code[MI_ATTR_PTR] = (BeamInstr) attr;
code[MI_ATTR_SIZE] = (BeamInstr) stp->chunks[ATTR_CHUNK].size;
- decoded_size = erts_decode_ext_size(attr, attr_size, 0);
+ decoded_size = erts_decode_ext_size(attr, attr_size);
if (decoded_size < 0) {
LoadError0(stp, "bad external term representation of module attributes");
}
@@ -3745,7 +4121,7 @@ freeze_code(LoaderState* stp)
CHKBLK(ERTS_ALC_T_CODE,code);
code[MI_COMPILE_SIZE] = (BeamInstr) stp->chunks[COMPILE_CHUNK].size;
CHKBLK(ERTS_ALC_T_CODE,code);
- decoded_size = erts_decode_ext_size(compile_info, compile_size, 0);
+ decoded_size = erts_decode_ext_size(compile_info, compile_size);
CHKBLK(ERTS_ALC_T_CODE,code);
if (decoded_size < 0) {
LoadError0(stp, "bad external term representation of compilation information");
@@ -3762,20 +4138,8 @@ freeze_code(LoaderState* stp)
((byte *) code) + size);
/*
- * Go through all i_new_bs_put_strings instructions, restore the pointer to
- * the instruction and convert string offsets to pointers (to the
- * FIRST character).
+ * Patch all instructions that refer to the string table.
*/
-
- index = stp->new_bs_put_strings;
- while (index != 0) {
- Uint next = code[index];
- code[index] = BeamOpCode(op_bs_put_string_II);
- code[index+2] = (BeamInstr) (str_table + code[index+2]);
- index = next;
- }
- CHKBLK(ERTS_ALC_T_CODE,code);
-
{
StringPatch* sp = stp->string_patches;
@@ -3816,21 +4180,6 @@ freeze_code(LoaderState* stp)
CHKBLK(ERTS_ALC_T_CODE,code);
/*
- * Fix all catch_yf instructions.
- */
- index = stp->catches;
- catches = BEAM_CATCHES_NIL;
- while (index != 0) {
- BeamInstr next = code[index];
- code[index] = BeamOpCode(op_catch_yf);
- catches = beam_catches_cons((BeamInstr *)code[index+2], catches);
- code[index+2] = make_catch(catches);
- index = next;
- }
- stp->catches = catches;
- CHKBLK(ERTS_ALC_T_CODE,code);
-
- /*
* Save the updated code pointer and code size.
*/
@@ -3855,6 +4204,26 @@ final_touch(LoaderState* stp)
{
int i;
int on_load = stp->on_load;
+ unsigned catches;
+ Uint index;
+ BeamInstr* code = stp->code;
+ Module* modp;
+
+ /*
+ * Allocate catch indices and fix up all catch_yf instructions.
+ */
+
+ index = stp->catches;
+ catches = BEAM_CATCHES_NIL;
+ while (index != 0) {
+ BeamInstr next = code[index];
+ code[index] = BeamOpCode(op_catch_yf);
+ catches = beam_catches_cons((BeamInstr *)code[index+2], catches);
+ code[index+2] = make_catch(catches);
+ index = next;
+ }
+ modp = erts_put_module(stp->module);
+ modp->catches = catches;
/*
* Export functions.
@@ -3938,6 +4307,7 @@ transform_engine(LoaderState* st)
GenOp* instr;
Uint* pc;
int rval;
+ static Uint restart_fail[1] = {TOP_fail};
ASSERT(gen_opc[st->genop->op].transform != -1);
pc = op_transform + gen_opc[st->genop->op].transform;
@@ -3951,7 +4321,6 @@ transform_engine(LoaderState* st)
ASSERT(restart != NULL);
pc = restart;
ASSERT(*pc < NUM_TOPS); /* Valid instruction? */
- ASSERT(*pc == TOP_try_me_else || *pc == TOP_fail);
instr = st->genop;
#define RETURN(r) rval = (r); goto do_return;
@@ -3964,7 +4333,9 @@ transform_engine(LoaderState* st)
op = *pc++;
switch (op) {
- case TOP_is_op:
+ case TOP_next_instr:
+ instr = instr->next;
+ ap = 0;
if (instr == NULL) {
/*
* We'll need at least one more instruction to decide whether
@@ -4151,10 +4522,6 @@ transform_engine(LoaderState* st)
case TOP_next_arg:
ap++;
break;
- case TOP_next_instr:
- instr = instr->next;
- ap = 0;
- break;
case TOP_commit:
instr = instr->next; /* The next_instr was optimized away. */
@@ -4172,8 +4539,8 @@ transform_engine(LoaderState* st)
#endif
break;
-#if defined(TOP_call)
- case TOP_call:
+#if defined(TOP_call_end)
+ case TOP_call_end:
{
GenOp** lastp;
GenOp* new_instr;
@@ -4210,7 +4577,7 @@ transform_engine(LoaderState* st)
*lastp = st->genop;
st->genop = new_instr;
}
- break;
+ RETURN(TE_OK);
#endif
case TOP_new_instr:
/*
@@ -4219,12 +4586,10 @@ transform_engine(LoaderState* st)
NEW_GENOP(st, instr);
instr->next = st->genop;
st->genop = instr;
+ instr->op = op = *pc++;
+ instr->arity = gen_opc[op].arity;
ap = 0;
break;
- case TOP_store_op:
- instr->op = *pc++;
- instr->arity = *pc++;
- break;
case TOP_store_type:
i = *pc++;
instr->a[ap].type = i;
@@ -4234,21 +4599,25 @@ transform_engine(LoaderState* st)
i = *pc++;
instr->a[ap].val = i;
break;
- case TOP_store_var:
+ case TOP_store_var_next_arg:
i = *pc++;
ASSERT(i < TE_MAX_VARS);
instr->a[ap].type = var[i].type;
instr->a[ap].val = var[i].val;
+ ap++;
break;
case TOP_try_me_else:
restart = pc + 1;
restart += *pc++;
ASSERT(*pc < NUM_TOPS); /* Valid instruction? */
break;
+ case TOP_try_me_else_fail:
+ restart = restart_fail;
+ break;
case TOP_end:
RETURN(TE_OK);
case TOP_fail:
- RETURN(TE_FAIL)
+ RETURN(TE_FAIL);
default:
ASSERT(0);
}
@@ -4317,41 +4686,9 @@ load_printf(int line, LoaderState* context, char *fmt,...)
erts_send_error_to_logger(context->group_leader, dsbufp);
}
-
-static int
-get_int_val(LoaderState* stp, Uint len_code, BeamInstr* result)
-{
- Uint count;
- Uint val;
-
- len_code >>= 5;
- ASSERT(len_code < 8);
- if (len_code == 7) {
- LoadError0(stp, "can't load integers bigger than 8 bytes yet\n");
- }
- count = len_code + 2;
- if (count == 5) {
- Uint msb;
- GetByte(stp, msb);
- if (msb == 0) {
- count--;
- }
- GetInt(stp, 4, *result);
- } else if (count <= 4) {
- GetInt(stp, count, val);
- *result = ((val << 8*(sizeof(val)-count)) >> 8*(sizeof(val)-count));
- } else {
- LoadError1(stp, "too big integer; %d bytes\n", count);
- }
- return 1;
-
- load_error:
- return 0;
-}
-
-
static int
-get_erlang_integer(LoaderState* stp, Uint len_code, BeamInstr* result)
+get_tag_and_value(LoaderState* stp, Uint len_code,
+ unsigned tag, BeamInstr* result)
{
Uint count;
Sint val;
@@ -4371,17 +4708,62 @@ get_erlang_integer(LoaderState* stp, Uint len_code, BeamInstr* result)
if (len_code < 7) {
count = len_code + 2;
} else {
- Uint tag;
+ unsigned sztag;
UWord len_word;
ASSERT(len_code == 7);
- GetTagAndValue(stp, tag, len_word);
- VerifyTag(stp, TAG_u, tag);
+ GetTagAndValue(stp, sztag, len_word);
+ VerifyTag(stp, sztag, TAG_u);
count = len_word + 9;
}
/*
- * Handle values up to the size of an int, meaning either a small or bignum.
+ * The value for tags except TAG_i must be an unsigned integer
+ * fitting in an Uint. If it does not fit, we'll indicate overflow
+ * by changing the tag to TAG_o.
+ */
+
+ if (tag != TAG_i) {
+ if (count == sizeof(Uint)+1) {
+ Uint msb;
+
+ /*
+ * The encoded value has one more byte than an Uint.
+ * It will still fit in an Uint if the most significant
+ * byte is 0.
+ */
+ GetByte(stp, msb);
+ GetInt(stp, sizeof(Uint), *result);
+ if (msb != 0) {
+ /* Overflow: Negative or too big. */
+ return TAG_o;
+ }
+ } else if (count == sizeof(Uint)) {
+ /*
+ * The value must be positive (or the encoded value would
+ * have been one byte longer).
+ */
+ GetInt(stp, count, *result);
+ } else if (count < sizeof(Uint)) {
+ GetInt(stp, count, *result);
+
+ /*
+ * If the sign bit is set, the value is negative
+ * (not allowed).
+ */
+ if (*result & ((Uint)1 << (count*8-1))) {
+ return TAG_o;
+ }
+ } else {
+ GetInt(stp, count, *result);
+ return TAG_o;
+ }
+ return tag;
+ }
+
+ /*
+ * TAG_i: First handle values up to the size of an Uint (i.e. either
+ * a small or a bignum).
*/
if (count <= sizeof(val)) {
@@ -4558,6 +4940,8 @@ new_literal(LoaderState* stp, Eterm** hpp, Uint heap_size)
lit->heap_size = heap_size;
lit->heap = erts_alloc(ERTS_ALC_T_LOADER_TMP, heap_size*sizeof(Eterm));
lit->term = make_boxed(lit->heap);
+ lit->off_heap.first = 0;
+ lit->off_heap.overhead = 0;
*hpp = lit->heap;
return stp->num_literals++;
}
@@ -4836,17 +5220,24 @@ compilation_info_for_module(Process* p, /* Process whose heap to use. */
return result;
}
-
/*
- * Returns a pointer to {module, function, arity}, or NULL if not found.
+ * Find a function from the given pc and fill information in
+ * the FunctionInfo struct. If the full_info is non-zero, fill
+ * in all available information (including location in the
+ * source code). If no function is found, the 'current' field
+ * will be set to NULL.
*/
-BeamInstr *
-find_function_from_pc(BeamInstr* pc)
+
+void
+erts_lookup_function_info(FunctionInfo* fi, BeamInstr* pc, int full_info)
{
Range* low = modules;
Range* high = low + num_loaded_modules;
Range* mid = mid_module;
+ fi->current = NULL;
+ fi->needed = 5;
+ fi->loc = LINE_INVALID_LOCATION;
while (low < high) {
if (pc < mid->start) {
high = mid;
@@ -4863,25 +5254,159 @@ find_function_from_pc(BeamInstr* pc)
high1 = mid1;
} else if (pc < mid1[1]) {
mid_module = mid;
- return mid1[0]+2;
+ fi->current = mid1[0]+2;
+ if (full_info) {
+ BeamInstr** fp = (BeamInstr **) (mid->start +
+ MI_FUNCTIONS);
+ int idx = mid1 - fp;
+ lookup_loc(fi, pc, mid->start, idx);
+ }
+ return;
} else {
low1 = mid1 + 1;
}
}
- return NULL;
+ return;
}
mid = low + (high-low) / 2;
}
- return NULL;
+}
+
+static void
+lookup_loc(FunctionInfo* fi, BeamInstr* orig_pc, BeamInstr* modp, int idx)
+{
+ Eterm* line = (Eterm *) modp[MI_LINE_TABLE];
+ Eterm* low;
+ Eterm* high;
+ Eterm* mid;
+ Eterm pc;
+
+ if (line == 0) {
+ return;
+ }
+
+ pc = (Eterm) (BeamInstr) orig_pc;
+ fi->fname_ptr = (Eterm *) (BeamInstr) line[MI_LINE_FNAME_PTR];
+ low = (Eterm *) (BeamInstr) line[MI_LINE_FUNC_TAB+idx];
+ high = (Eterm *) (BeamInstr) line[MI_LINE_FUNC_TAB+idx+1];
+ while (high > low) {
+ mid = low + (high-low) / 2;
+ if (pc < mid[0]) {
+ high = mid;
+ } else if (pc < mid[1]) {
+ int file;
+ int index = mid - (Eterm *) (BeamInstr) line[MI_LINE_FUNC_TAB];
+
+ if (line[MI_LINE_LOC_SIZE] == 2) {
+ Uint16* loc_table =
+ (Uint16 *) (BeamInstr) line[MI_LINE_LOC_TAB];
+ fi->loc = loc_table[index];
+ } else {
+ Uint32* loc_table =
+ (Uint32 *) (BeamInstr) line[MI_LINE_LOC_TAB];
+ ASSERT(line[MI_LINE_LOC_SIZE] == 4);
+ fi->loc = loc_table[index];
+ }
+ if (fi->loc == LINE_INVALID_LOCATION) {
+ return;
+ }
+ fi->needed += 3+2+3+2;
+ file = LOC_FILE(fi->loc);
+ if (file == 0) {
+ /* Special case: Module name with ".erl" appended */
+ Atom* mod_atom = atom_tab(atom_val(fi->current[0]));
+ fi->needed += 2*(mod_atom->len+4);
+ } else {
+ Atom* ap = atom_tab(atom_val((fi->fname_ptr)[file-1]));
+ fi->needed += 2*ap->len;
+ }
+ return;
+ } else {
+ low = mid + 1;
+ }
+ }
+}
+
+/*
+ * Build a single {M,F,A,Loction} item to be part of
+ * a stack trace.
+ */
+Eterm*
+erts_build_mfa_item(FunctionInfo* fi, Eterm* hp, Eterm args, Eterm* mfa_p)
+{
+ BeamInstr* current = fi->current;
+ Eterm loc = NIL;
+
+ if (fi->loc != LINE_INVALID_LOCATION) {
+ Eterm tuple;
+ int line = LOC_LINE(fi->loc);
+ int file = LOC_FILE(fi->loc);
+ Eterm file_term = NIL;
+
+ if (file == 0) {
+ Atom* ap = atom_tab(atom_val(fi->current[0]));
+ file_term = buf_to_intlist(&hp, ".erl", 4, NIL);
+ file_term = buf_to_intlist(&hp, (char*)ap->name, ap->len, file_term);
+ } else {
+ Atom* ap = atom_tab(atom_val((fi->fname_ptr)[file-1]));
+ file_term = buf_to_intlist(&hp, (char*)ap->name, ap->len, NIL);
+ }
+
+ tuple = TUPLE2(hp, am_line, make_small(line));
+ hp += 3;
+ loc = CONS(hp, tuple, loc);
+ hp += 2;
+ tuple = TUPLE2(hp, am_file, file_term);
+ hp += 3;
+ loc = CONS(hp, tuple, loc);
+ hp += 2;
+ }
+
+ if (is_list(args) || is_nil(args)) {
+ *mfa_p = TUPLE4(hp, current[0], current[1], args, loc);
+ } else {
+ Eterm arity = make_small(current[2]);
+ *mfa_p = TUPLE4(hp, current[0], current[1], arity, loc);
+ }
+ return hp + 5;
+}
+
+/*
+ * Force setting of the current function in a FunctionInfo
+ * structure. No source code location will be associated with
+ * the function.
+ */
+void
+erts_set_current_function(FunctionInfo* fi, BeamInstr* current)
+{
+ fi->current = current;
+ fi->needed = 5;
+ fi->loc = LINE_INVALID_LOCATION;
+}
+
+
+/*
+ * Returns a pointer to {module, function, arity}, or NULL if not found.
+ */
+BeamInstr*
+find_function_from_pc(BeamInstr* pc)
+{
+ FunctionInfo fi;
+
+ erts_lookup_function_info(&fi, pc, 0);
+ return fi.current;
}
/*
* Read a specific chunk from a Beam binary.
*/
-Eterm
-code_get_chunk_2(Process* p, Eterm Bin, Eterm Chunk)
+BIF_RETTYPE
+code_get_chunk_2(BIF_ALIST_2)
{
+ Process* p = BIF_P;
+ Eterm Bin = BIF_ARG_1;
+ Eterm Chunk = BIF_ARG_2;
LoaderState state;
Uint chunk = 0;
ErlSubBin* sb;
@@ -4946,9 +5471,11 @@ code_get_chunk_2(Process* p, Eterm Bin, Eterm Chunk)
* Calculate the MD5 for a module.
*/
-Eterm
-code_module_md5_1(Process* p, Eterm Bin)
+BIF_RETTYPE
+code_module_md5_1(BIF_ALIST_1)
{
+ Process* p = BIF_P;
+ Eterm Bin = BIF_ARG_1;
LoaderState state;
byte* temp_alloc = NULL;
@@ -4998,7 +5525,7 @@ stub_copy_info(LoaderState* stp,
if (size != 0) {
memcpy(info, stp->chunks[chunk].start, size);
*ptr_word = (BeamInstr) info;
- decoded_size = erts_decode_ext_size(info, size, 0);
+ decoded_size = erts_decode_ext_size(info, size);
if (decoded_size < 0) {
return 0;
}
@@ -5205,7 +5732,17 @@ patch_funentries(Eterm Patchlist)
fe = erts_get_fun_entry(Mod, uniq, index);
fe->native_address = (Uint *)native_address;
- erts_refc_dec(&fe->refc, 1);
+
+ /* Deliberate MEMORY LEAK of native fun entries!!!
+ *
+ * Uncomment line below when hipe code upgrade and purging works correctly.
+ * Today we may get cases when old (leaked) native code of a purged module
+ * gets called and tries to create instances of a deleted fun entry.
+ *
+ * Reproduced on a debug emulator with stdlib_test/qlc_SUITE:join_merge
+ *
+ * erts_refc_dec(&fe->refc, 1);
+ */
if (!patch(Addresses, (Uint) fe))
return 0;
@@ -5226,7 +5763,7 @@ patch_funentries(Eterm Patchlist)
Eterm
erts_make_stub_module(Process* p, Eterm Mod, Eterm Beam, Eterm Info)
{
- LoaderState state;
+ LoaderState* stp;
BeamInstr Funcs;
BeamInstr Patchlist;
Eterm* tp;
@@ -5245,10 +5782,10 @@ erts_make_stub_module(Process* p, Eterm Mod, Eterm Beam, Eterm Info)
Uint size;
/*
- * Must initialize state.lambdas here because the error handling code
+ * Must initialize stp->lambdas here because the error handling code
* at label 'error' uses it.
*/
- init_state(&state);
+ stp = erts_alloc_loader_state();
if (is_not_atom(Mod)) {
goto error;
@@ -5288,31 +5825,31 @@ erts_make_stub_module(Process* p, Eterm Mod, Eterm Beam, Eterm Info)
* Scan the Beam binary and read the interesting sections.
*/
- state.file_name = "IFF header for Beam file";
- state.file_p = bytes;
- state.file_left = size;
- state.module = Mod;
- state.group_leader = p->group_leader;
- state.num_functions = n;
- if (!scan_iff_file(&state, chunk_types, NUM_CHUNK_TYPES, NUM_MANDATORY)) {
+ stp->file_name = "IFF header for Beam file";
+ stp->file_p = bytes;
+ stp->file_left = size;
+ stp->module = Mod;
+ stp->group_leader = p->group_leader;
+ stp->num_functions = n;
+ if (!scan_iff_file(stp, chunk_types, NUM_CHUNK_TYPES, NUM_MANDATORY)) {
goto error;
}
- define_file(&state, "code chunk header", CODE_CHUNK);
- if (!read_code_header(&state)) {
+ define_file(stp, "code chunk header", CODE_CHUNK);
+ if (!read_code_header(stp)) {
goto error;
}
- define_file(&state, "atom table", ATOM_CHUNK);
- if (!load_atom_table(&state)) {
+ define_file(stp, "atom table", ATOM_CHUNK);
+ if (!load_atom_table(stp)) {
goto error;
}
- define_file(&state, "export table", EXP_CHUNK);
- if (!stub_read_export_table(&state)) {
+ define_file(stp, "export table", EXP_CHUNK);
+ if (!stub_read_export_table(stp)) {
goto error;
}
- if (state.chunks[LAMBDA_CHUNK].size > 0) {
- define_file(&state, "lambda (fun) table", LAMBDA_CHUNK);
- if (!read_lambda_table(&state)) {
+ if (stp->chunks[LAMBDA_CHUNK].size > 0) {
+ define_file(stp, "lambda (fun) table", LAMBDA_CHUNK);
+ if (!read_lambda_table(stp)) {
goto error;
}
}
@@ -5322,8 +5859,8 @@ erts_make_stub_module(Process* p, Eterm Mod, Eterm Beam, Eterm Info)
*/
code_size = ((WORDS_PER_FUNCTION+1)*n + MI_FUNCTIONS + 2) * sizeof(BeamInstr);
- code_size += state.chunks[ATTR_CHUNK].size;
- code_size += state.chunks[COMPILE_CHUNK].size;
+ code_size += stp->chunks[ATTR_CHUNK].size;
+ code_size += stp->chunks[COMPILE_CHUNK].size;
code = erts_alloc_fnf(ERTS_ALC_T_CODE, code_size);
if (!code) {
goto error;
@@ -5341,6 +5878,9 @@ erts_make_stub_module(Process* p, Eterm Mod, Eterm Beam, Eterm Info)
code[MI_COMPILE_SIZE] = 0;
code[MI_COMPILE_SIZE_ON_HEAP] = 0;
code[MI_NUM_BREAKPOINTS] = 0;
+ code[MI_LITERALS_START] = 0;
+ code[MI_LITERALS_END] = 0;
+ code[MI_LITERALS_OFF_HEAP] = 0;
code[MI_ON_LOAD_FUNCTION_PTR] = 0;
ci = MI_FUNCTIONS + n + 1;
@@ -5413,12 +5953,12 @@ erts_make_stub_module(Process* p, Eterm Mod, Eterm Beam, Eterm Info)
*/
info = (byte *) fp;
- info = stub_copy_info(&state, ATTR_CHUNK, info,
+ info = stub_copy_info(stp, ATTR_CHUNK, info,
code+MI_ATTR_PTR, code+MI_ATTR_SIZE_ON_HEAP);
if (info == NULL) {
goto error;
}
- info = stub_copy_info(&state, COMPILE_CHUNK, info,
+ info = stub_copy_info(stp, COMPILE_CHUNK, info,
code+MI_COMPILE_PTR, code+MI_COMPILE_SIZE_ON_HEAP);
if (info == NULL) {
goto error;
@@ -5428,9 +5968,8 @@ erts_make_stub_module(Process* p, Eterm Mod, Eterm Beam, Eterm Info)
* Insert the module in the module table.
*/
- rval = insert_new_code(p, 0, p->group_leader, Mod, code, code_size,
- BEAM_CATCHES_NIL);
- if (rval < 0) {
+ rval = insert_new_code(p, 0, p->group_leader, Mod, code, code_size);
+ if (rval != NIL) {
goto error;
}
@@ -5440,18 +5979,13 @@ erts_make_stub_module(Process* p, Eterm Mod, Eterm Beam, Eterm Info)
fp = code + ci;
for (i = 0; i < n; i++) {
- stub_final_touch(&state, fp);
+ stub_final_touch(stp, fp);
fp += WORDS_PER_FUNCTION;
}
if (patch_funentries(Patchlist)) {
erts_free_aligned_binary_bytes(temp_alloc);
- if (state.lambdas != state.def_lambdas) {
- erts_free(ERTS_ALC_T_LOADER_TMP, (void *) state.lambdas);
- }
- erts_free(ERTS_ALC_T_LOADER_TMP, (void *) state.labels);
- erts_free(ERTS_ALC_T_LOADER_TMP, (void *) state.atom);
- erts_free(ERTS_ALC_T_LOADER_TMP, (void *) state.export);
+ free_state(stp);
if (bin != NULL) {
driver_free_binary(bin);
}
@@ -5459,27 +5993,7 @@ erts_make_stub_module(Process* p, Eterm Mod, Eterm Beam, Eterm Info)
}
error:
- erts_free_aligned_binary_bytes(temp_alloc);
- if (code != NULL) {
- erts_free(ERTS_ALC_T_CODE, code);
- }
- if (state.labels != NULL) {
- erts_free(ERTS_ALC_T_LOADER_TMP, (void *) state.labels);
- }
- if (state.lambdas != state.def_lambdas) {
- erts_free(ERTS_ALC_T_LOADER_TMP, (void *) state.lambdas);
- }
- if (state.atom != NULL) {
- erts_free(ERTS_ALC_T_LOADER_TMP, (void *) state.atom);
- }
- if (state.export != NULL) {
- erts_free(ERTS_ALC_T_LOADER_TMP, (void *) state.export);
- }
- if (bin != NULL) {
- driver_free_binary(bin);
- }
-
-
+ free_state(stp);
BIF_ERROR(p, BADARG);
}
diff --git a/erts/emulator/beam/beam_load.h b/erts/emulator/beam/beam_load.h
index 26e3054c4b..4e22ee4d79 100644
--- a/erts/emulator/beam/beam_load.h
+++ b/erts/emulator/beam/beam_load.h
@@ -23,7 +23,9 @@
#include "beam_opcodes.h"
#include "erl_process.h"
-int beam_make_current_old(Process *c_p, ErtsProcLocks c_p_locks, Eterm module);
+Eterm beam_make_current_old(Process *c_p, ErtsProcLocks c_p_locks,
+ Eterm module);
+
typedef struct gen_op_entry {
char* name;
@@ -101,11 +103,18 @@ extern Uint erts_total_code_size;
*/
#define MI_LITERALS_START 8
#define MI_LITERALS_END 9
+#define MI_LITERALS_OFF_HEAP 10
+
/*
* Pointer to the on_load function (or NULL if none).
*/
-#define MI_ON_LOAD_FUNCTION_PTR 10
+#define MI_ON_LOAD_FUNCTION_PTR 11
+
+/*
+ * Pointer to the line table (or NULL if none).
+ */
+#define MI_LINE_TABLE 12
/*
* Start of function pointer table. This table contains pointers to
@@ -116,5 +125,5 @@ extern Uint erts_total_code_size;
* this table.
*/
-#define MI_FUNCTIONS 11
+#define MI_FUNCTIONS 13
#endif /* _BEAM_LOAD_H */
diff --git a/erts/emulator/beam/bif.c b/erts/emulator/beam/bif.c
index 98dde066fc..8ab363a1ec 100644
--- a/erts/emulator/beam/bif.c
+++ b/erts/emulator/beam/bif.c
@@ -36,6 +36,7 @@
#include "beam_bp.h"
#include "erl_db_util.h"
#include "register.h"
+#include "erl_thr_progress.h"
static Export* flush_monitor_message_trap = NULL;
static Export* set_cpu_topology_trap = NULL;
@@ -1107,9 +1108,9 @@ BIF_RETTYPE hibernate_3(BIF_ALIST_3)
/**********************************************************************/
-BIF_RETTYPE get_stacktrace_0(Process* p)
+BIF_RETTYPE get_stacktrace_0(BIF_ALIST_0)
{
- Eterm t = build_stacktrace(p, p->ftrace);
+ Eterm t = build_stacktrace(BIF_P, BIF_P->ftrace);
BIF_RET(t);
}
@@ -1119,10 +1120,10 @@ BIF_RETTYPE get_stacktrace_0(Process* p)
* the process, and the final error value will be {Term,StackTrace}.
*/
-BIF_RETTYPE error_1(Process* p, Eterm term)
+BIF_RETTYPE error_1(BIF_ALIST_1)
{
- p->fvalue = term;
- BIF_ERROR(p, EXC_ERROR);
+ BIF_P->fvalue = BIF_ARG_1;
+ BIF_ERROR(BIF_P, EXC_ERROR);
}
/**********************************************************************/
@@ -1131,12 +1132,12 @@ BIF_RETTYPE error_1(Process* p, Eterm term)
* in the stacktrace.
*/
-BIF_RETTYPE error_2(Process* p, Eterm value, Eterm args)
+BIF_RETTYPE error_2(BIF_ALIST_2)
{
- Eterm* hp = HAlloc(p, 3);
+ Eterm* hp = HAlloc(BIF_P, 3);
- p->fvalue = TUPLE2(hp, value, args);
- BIF_ERROR(p, EXC_ERROR_2);
+ BIF_P->fvalue = TUPLE2(hp, BIF_ARG_1, BIF_ARG_2);
+ BIF_ERROR(BIF_P, EXC_ERROR_2);
}
/**********************************************************************/
@@ -1146,10 +1147,10 @@ BIF_RETTYPE error_2(Process* p, Eterm value, Eterm args)
* It is useful in stub functions for NIFs.
*/
-BIF_RETTYPE nif_error_1(Process* p, Eterm term)
+BIF_RETTYPE nif_error_1(BIF_ALIST_1)
{
- p->fvalue = term;
- BIF_ERROR(p, EXC_ERROR);
+ BIF_P->fvalue = BIF_ARG_1;
+ BIF_ERROR(BIF_P, EXC_ERROR);
}
/**********************************************************************/
@@ -1159,12 +1160,12 @@ BIF_RETTYPE nif_error_1(Process* p, Eterm term)
* It is useful in stub functions for NIFs.
*/
-BIF_RETTYPE nif_error_2(Process* p, Eterm value, Eterm args)
+BIF_RETTYPE nif_error_2(BIF_ALIST_2)
{
- Eterm* hp = HAlloc(p, 3);
+ Eterm* hp = HAlloc(BIF_P, 3);
- p->fvalue = TUPLE2(hp, value, args);
- BIF_ERROR(p, EXC_ERROR_2);
+ BIF_P->fvalue = TUPLE2(hp, BIF_ARG_1, BIF_ARG_2);
+ BIF_ERROR(BIF_P, EXC_ERROR_2);
}
/**********************************************************************/
@@ -1183,14 +1184,19 @@ BIF_RETTYPE exit_1(BIF_ALIST_1)
* If there is an error in the argument format,
* return the atom 'badarg' instead.
*/
-Eterm
-raise_3(Process *c_p, Eterm class, Eterm value, Eterm stacktrace) {
+BIF_RETTYPE raise_3(BIF_ALIST_3)
+{
+ Process *c_p = BIF_P;
+ Eterm class = BIF_ARG_1;
+ Eterm value = BIF_ARG_2;
+ Eterm stacktrace = BIF_ARG_3;
Eterm reason;
Eterm l, *hp, *hp_end, *tp;
int depth, cnt;
size_t sz;
+ int must_copy = 0;
struct StackTrace *s;
-
+
if (class == am_error) {
c_p->fvalue = value;
reason = EXC_ERROR;
@@ -1206,35 +1212,74 @@ raise_3(Process *c_p, Eterm class, Eterm value, Eterm stacktrace) {
/* Check syntax of stacktrace, and count depth.
* Accept anything that can be returned from erlang:get_stacktrace/0,
* as well as a 2-tuple with a fun as first element that the
- * error_handler may need to give us.
+ * error_handler may need to give us. Also allow old-style
+ * MFA three-tuples.
*/
for (l = stacktrace, depth = 0;
is_list(l);
l = CDR(list_val(l)), depth++) {
Eterm t = CAR(list_val(l));
- int arity;
+ Eterm location = NIL;
+
if (is_not_tuple(t)) goto error;
tp = tuple_val(t);
- arity = arityval(tp[0]);
- if ((arity == 3) && is_atom(tp[1]) && is_atom(tp[2])) continue;
- if ((arity == 2) && is_fun(tp[1])) continue;
- goto error;
+ switch (arityval(tp[0])) {
+ case 2:
+ /* {Fun,Args} */
+ if (is_fun(tp[1])) {
+ must_copy = 1;
+ } else {
+ goto error;
+ }
+ break;
+ case 3:
+ /*
+ * One of:
+ * {Fun,Args,Location}
+ * {M,F,A}
+ */
+ if (is_fun(tp[1])) {
+ location = tp[3];
+ } else if (is_atom(tp[1]) && is_atom(tp[2])) {
+ must_copy = 1;
+ } else {
+ goto error;
+ }
+ break;
+ case 4:
+ if (!(is_atom(tp[1]) && is_atom(tp[2]))) {
+ goto error;
+ }
+ location = tp[4];
+ break;
+ default:
+ goto error;
+ }
+ if (is_not_list(location) && is_not_nil(location)) {
+ goto error;
+ }
}
if (is_not_nil(l)) goto error;
/* Create stacktrace and store */
- if (depth <= erts_backtrace_depth) {
+ if (erts_backtrace_depth < depth) {
+ depth = erts_backtrace_depth;
+ must_copy = 1;
+ }
+ if (must_copy) {
+ cnt = depth;
+ c_p->ftrace = NIL;
+ } else {
+ /* No need to copy the stacktrace */
cnt = 0;
c_p->ftrace = stacktrace;
- } else {
- cnt = depth = erts_backtrace_depth;
- c_p->ftrace = NIL;
}
+
tp = &c_p->ftrace;
sz = (offsetof(struct StackTrace, trace) + sizeof(Eterm) - 1)
/ sizeof(Eterm);
- hp = HAlloc(c_p, sz + 2*(cnt + 1));
- hp_end = hp + sz + 2*(cnt + 1);
+ hp = HAlloc(c_p, sz + (2+6)*(cnt + 1));
+ hp_end = hp + sz + (2+6)*(cnt + 1);
s = (struct StackTrace *) hp;
s->header = make_neg_bignum_header(sz - 1);
s->freason = reason;
@@ -1242,13 +1287,29 @@ raise_3(Process *c_p, Eterm class, Eterm value, Eterm stacktrace) {
s->current = NULL;
s->depth = 0;
hp += sz;
- if (cnt > 0) {
+ if (must_copy) {
+ int cnt;
+
/* Copy list up to depth */
for (cnt = 0, l = stacktrace;
cnt < depth;
cnt++, l = CDR(list_val(l))) {
+ Eterm t;
+ Eterm *tpp;
+ int arity;
+
ASSERT(*tp == NIL);
- *tp = CONS(hp, CAR(list_val(l)), *tp);
+ t = CAR(list_val(l));
+ tpp = tuple_val(t);
+ arity = arityval(tpp[0]);
+ if (arity == 2) {
+ t = TUPLE3(hp, tpp[1], tpp[2], NIL);
+ hp += 4;
+ } else if (arity == 3 && is_atom(tpp[1])) {
+ t = TUPLE4(hp, tpp[1], tpp[2], tpp[3], NIL);
+ hp += 5;
+ }
+ *tp = CONS(hp, t, *tp);
tp = &CDR(list_val(*tp));
hp += 2;
}
@@ -1256,7 +1317,7 @@ raise_3(Process *c_p, Eterm class, Eterm value, Eterm stacktrace) {
c_p->ftrace = CONS(hp, c_p->ftrace, make_big((Eterm *) s));
hp += 2;
ASSERT(hp <= hp_end);
-
+ HRelease(c_p, hp_end, hp);
BIF_ERROR(c_p, reason);
error:
@@ -1674,10 +1735,10 @@ BIF_RETTYPE whereis_1(BIF_ALIST_1)
* erlang:'!'/2
*/
-Eterm
-ebif_bang_2(Process* p, Eterm To, Eterm Message)
+BIF_RETTYPE
+ebif_bang_2(BIF_ALIST_2)
{
- return send_2(p, To, Message);
+ return erl_send(BIF_P, BIF_ARG_1, BIF_ARG_2);
}
@@ -2014,8 +2075,13 @@ do_send(Process *p, Eterm to, Eterm msg, int suspend) {
}
-Eterm
-send_3(Process *p, Eterm to, Eterm msg, Eterm opts) {
+BIF_RETTYPE send_3(BIF_ALIST_3)
+{
+ Process *p = BIF_P;
+ Eterm to = BIF_ARG_1;
+ Eterm msg = BIF_ARG_2;
+ Eterm opts = BIF_ARG_3;
+
int connect = !0;
int suspend = !0;
Eterm l = opts;
@@ -2079,8 +2145,13 @@ send_3(Process *p, Eterm to, Eterm msg, Eterm opts) {
BIF_ERROR(p, BADARG);
}
-Eterm
-send_2(Process *p, Eterm to, Eterm msg) {
+BIF_RETTYPE send_2(BIF_ALIST_2)
+{
+ return erl_send(BIF_P, BIF_ARG_1, BIF_ARG_2);
+}
+
+Eterm erl_send(Process *p, Eterm to, Eterm msg)
+{
Sint result = do_send(p, to, msg, !0);
if (result > 0) {
@@ -3256,8 +3327,11 @@ time_to_parts(Eterm date, Sint* year, Sint* month, Sint* day,
/* return the universal time */
BIF_RETTYPE
-localtime_to_universaltime_2(Process *p, Eterm localtime, Eterm dst)
+localtime_to_universaltime_2(BIF_ALIST_2)
{
+ Process *p = BIF_P;
+ Eterm localtime = BIF_ARG_1;
+ Eterm dst = BIF_ARG_2;
Sint year, month, day;
Sint hour, minute, second;
int isdst;
@@ -3506,9 +3580,10 @@ BIF_RETTYPE erts_debug_display_1(BIF_ALIST_1)
}
-Eterm
-display_string_1(Process* p, Eterm string)
+BIF_RETTYPE display_string_1(BIF_ALIST_1)
{
+ Process* p = BIF_P;
+ Eterm string = BIF_ARG_1;
int len = is_string(string);
char *str;
@@ -3524,8 +3599,7 @@ display_string_1(Process* p, Eterm string)
BIF_RET(am_true);
}
-Eterm
-display_nl_0(Process* p)
+BIF_RETTYPE display_nl_0(BIF_ALIST_0)
{
erts_fprintf(stderr, "\n");
BIF_RET(am_true);
@@ -3589,8 +3663,13 @@ BIF_RETTYPE function_exported_3(BIF_ALIST_3)
/**********************************************************************/
-BIF_RETTYPE is_builtin_3(Process* p, Eterm Mod, Eterm Name, Eterm Arity)
+BIF_RETTYPE is_builtin_3(BIF_ALIST_3)
{
+ Process* p = BIF_P;
+ Eterm Mod = BIF_ARG_1;
+ Eterm Name = BIF_ARG_2;
+ Eterm Arity = BIF_ARG_3;
+
if (is_not_atom(Mod) || is_not_atom(Name) || is_not_small(Arity)) {
BIF_ERROR(p, BADARG);
}
@@ -3655,9 +3734,11 @@ BIF_RETTYPE make_fun_3(BIF_ALIST_3)
BIF_RET(make_export(hp));
}
-Eterm
-fun_to_list_1(Process* p, Eterm fun)
+BIF_RETTYPE fun_to_list_1(BIF_ALIST_1)
{
+ Process* p = BIF_P;
+ Eterm fun = BIF_ARG_1;
+
if (is_not_any_fun(fun))
BIF_ERROR(p, BADARG);
BIF_RET(term2list_dsprintf(p, fun));
@@ -3953,11 +4034,11 @@ BIF_RETTYPE system_flag_2(BIF_ALIST_2)
}
erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
- erts_smp_block_system(0);
+ erts_smp_thr_progress_block();
H_MIN_SIZE = erts_next_heap_size(n, 0);
- erts_smp_release_system();
+ erts_smp_thr_progress_unblock();
erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
BIF_RET(make_small(oval));
@@ -3969,11 +4050,11 @@ BIF_RETTYPE system_flag_2(BIF_ALIST_2)
}
erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
- erts_smp_block_system(0);
+ erts_smp_thr_progress_block();
BIN_VH_MIN_SIZE = erts_next_heap_size(n, 0);
- erts_smp_release_system();
+ erts_smp_thr_progress_unblock();
erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
BIF_RET(make_small(oval));
@@ -3995,7 +4076,7 @@ BIF_RETTYPE system_flag_2(BIF_ALIST_2)
erts_backtrace_depth = n;
BIF_RET(make_small(oval));
} else if (BIF_ARG_1 == am_trace_control_word) {
- BIF_RET(db_set_trace_control_word_1(BIF_P, BIF_ARG_2));
+ BIF_RET(db_set_trace_control_word(BIF_P, BIF_ARG_2));
} else if (BIF_ARG_1 == am_sequential_tracer) {
Eterm old_value = erts_set_system_seq_tracer(BIF_P,
ERTS_PROC_LOCK_MAIN,
@@ -4007,7 +4088,7 @@ BIF_RETTYPE system_flag_2(BIF_ALIST_2)
Uint i;
ErlMessage* mp;
erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
- erts_smp_block_system(0);
+ erts_smp_thr_progress_block();
for (i = 0; i < erts_max_processes; i++) {
if (process_tab[i] != (Process*) 0) {
@@ -4024,7 +4105,7 @@ BIF_RETTYPE system_flag_2(BIF_ALIST_2)
}
}
- erts_smp_release_system();
+ erts_smp_thr_progress_unblock();
erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
BIF_RET(am_true);
@@ -4235,8 +4316,7 @@ void
erts_bif_prep_await_proc_exit_data_trap(Process *c_p, Eterm pid, Eterm ret)
{
if (skip_current_msgq(c_p)) {
- Eterm unused;
- ERTS_BIF_PREP_TRAP3(unused, await_proc_exit_trap, c_p, pid, am_data, ret);
+ ERTS_BIF_PREP_TRAP3_NO_RET(await_proc_exit_trap, c_p, pid, am_data, ret);
}
}
@@ -4244,8 +4324,7 @@ void
erts_bif_prep_await_proc_exit_reason_trap(Process *c_p, Eterm pid)
{
if (skip_current_msgq(c_p)) {
- Eterm unused;
- ERTS_BIF_PREP_TRAP3(unused, await_proc_exit_trap, c_p,
+ ERTS_BIF_PREP_TRAP3_NO_RET(await_proc_exit_trap, c_p,
pid, am_reason, am_undefined);
}
}
@@ -4260,7 +4339,6 @@ erts_bif_prep_await_proc_exit_apply_trap(Process *c_p,
{
ASSERT(is_atom(module) && is_atom(function));
if (skip_current_msgq(c_p)) {
- Eterm unused;
Eterm term;
Eterm *hp;
int i;
@@ -4272,7 +4350,7 @@ erts_bif_prep_await_proc_exit_apply_trap(Process *c_p,
hp += 2;
}
term = TUPLE3(hp, module, function, term);
- ERTS_BIF_PREP_TRAP3(unused, await_proc_exit_trap, c_p, pid, am_apply, term);
+ ERTS_BIF_PREP_TRAP3_NO_RET(await_proc_exit_trap, c_p, pid, am_apply, term);
}
}
diff --git a/erts/emulator/beam/bif.h b/erts/emulator/beam/bif.h
index 8faa09feb8..d20089a9fb 100644
--- a/erts/emulator/beam/bif.h
+++ b/erts/emulator/beam/bif.h
@@ -26,14 +26,14 @@ extern Export* erts_format_cpu_topology_trap;
#define BIF_P A__p
-#define BIF_ALIST_0 Process* A__p
-#define BIF_ALIST_1 Process* A__p, Eterm A_1
-#define BIF_ALIST_2 Process* A__p, Eterm A_1, Eterm A_2
-#define BIF_ALIST_3 Process* A__p, Eterm A_1, Eterm A_2, Eterm A_3
+#define BIF_ALIST_0 Process* A__p, Eterm* BIF__ARGS
+#define BIF_ALIST_1 Process* A__p, Eterm* BIF__ARGS
+#define BIF_ALIST_2 Process* A__p, Eterm* BIF__ARGS
+#define BIF_ALIST_3 Process* A__p, Eterm* BIF__ARGS
-#define BIF_ARG_1 A_1
-#define BIF_ARG_2 A_2
-#define BIF_ARG_3 A_3
+#define BIF_ARG_1 (BIF__ARGS[0])
+#define BIF_ARG_2 (BIF__ARGS[1])
+#define BIF_ARG_3 (BIF__ARGS[2])
#define BUMP_ALL_REDS(p) do { \
if (!ERTS_PROC_GET_SAVED_CALLS_BUF((p))) \
@@ -122,89 +122,106 @@ do { \
} while (0)
-#define ERTS_BIF_PREP_TRAP0(Ret, Trap, Proc) \
-do { \
- (Proc)->arity = 0; \
- *((UWord *) (UWord) ((Proc)->def_arg_reg + 3)) = (UWord) ((Trap)->address); \
- (Proc)->freason = TRAP; \
- (Ret) = THE_NON_VALUE; \
+#define ERTS_BIF_PREP_TRAP0(Ret, Trap, Proc) \
+do { \
+ (Proc)->arity = 0; \
+ (Proc)->i = (BeamInstr*) ((Trap)->address); \
+ (Proc)->freason = TRAP; \
+ (Ret) = THE_NON_VALUE; \
} while (0)
-#define ERTS_BIF_PREP_TRAP1(Ret, Trap, Proc, A0) \
-do { \
- (Proc)->arity = 1; \
- (Proc)->def_arg_reg[0] = (Eterm) (A0); \
- *((UWord *) (UWord) ((Proc)->def_arg_reg + 3)) = (UWord) ((Trap)->address); \
- (Proc)->freason = TRAP; \
- (Ret) = THE_NON_VALUE; \
+#define ERTS_BIF_PREP_TRAP1(Ret, Trap, Proc, A0) \
+do { \
+ Eterm* reg = ERTS_PROC_GET_SCHDATA((Proc))->x_reg_array; \
+ (Proc)->arity = 1; \
+ reg[0] = (Eterm) (A0); \
+ (Proc)->i = (BeamInstr*) ((Trap)->address); \
+ (Proc)->freason = TRAP; \
+ (Ret) = THE_NON_VALUE; \
} while (0)
-#define ERTS_BIF_PREP_TRAP2(Ret, Trap, Proc, A0, A1) \
-do { \
- (Proc)->arity = 2; \
- (Proc)->def_arg_reg[0] = (Eterm) (A0); \
- (Proc)->def_arg_reg[1] = (Eterm) (A1); \
- *((UWord *) (UWord) ((Proc)->def_arg_reg + 3)) = (UWord) ((Trap)->address); \
- (Proc)->freason = TRAP; \
- (Ret) = THE_NON_VALUE; \
+#define ERTS_BIF_PREP_TRAP2(Ret, Trap, Proc, A0, A1) \
+do { \
+ Eterm* reg = ERTS_PROC_GET_SCHDATA((Proc))->x_reg_array; \
+ (Proc)->arity = 2; \
+ reg[0] = (Eterm) (A0); \
+ reg[1] = (Eterm) (A1); \
+ (Proc)->i = (BeamInstr*) ((Trap)->address); \
+ (Proc)->freason = TRAP; \
+ (Ret) = THE_NON_VALUE; \
+} while (0)
+
+#define ERTS_BIF_PREP_TRAP3(Ret, Trap, Proc, A0, A1, A2) \
+do { \
+ Eterm* reg = ERTS_PROC_GET_SCHDATA((Proc))->x_reg_array; \
+ (Proc)->arity = 3; \
+ reg[0] = (Eterm) (A0); \
+ reg[1] = (Eterm) (A1); \
+ reg[2] = (Eterm) (A2); \
+ (Proc)->i = (BeamInstr*) ((Trap)->address); \
+ (Proc)->freason = TRAP; \
+ (Ret) = THE_NON_VALUE; \
} while (0)
-#define ERTS_BIF_PREP_TRAP3(Ret, Trap, Proc, A0, A1, A2)\
+#define ERTS_BIF_PREP_TRAP3_NO_RET(Trap, Proc, A0, A1, A2)\
do { \
+ Eterm* reg = ERTS_PROC_GET_SCHDATA((Proc))->x_reg_array; \
(Proc)->arity = 3; \
- (Proc)->def_arg_reg[0] = (Eterm) (A0); \
- (Proc)->def_arg_reg[1] = (Eterm) (A1); \
- (Proc)->def_arg_reg[2] = (Eterm) (A2); \
- *((UWord *) (UWord) ((Proc)->def_arg_reg + 3)) = (UWord) ((Trap)->address); \
+ reg[0] = (Eterm) (A0); \
+ reg[1] = (Eterm) (A1); \
+ reg[2] = (Eterm) (A2); \
+ (Proc)->i = (BeamInstr*) ((Trap)->address); \
(Proc)->freason = TRAP; \
- (Ret) = THE_NON_VALUE; \
} while (0)
-#define BIF_TRAP0(p, Trap_) do { \
- (p)->arity = 0; \
- *((UWord *) (UWord) ((p)->def_arg_reg + 3)) = (UWord) ((Trap_)->address); \
- (p)->freason = TRAP; \
- return THE_NON_VALUE; \
+#define BIF_TRAP0(p, Trap_) do { \
+ (p)->arity = 0; \
+ (p)->i = (BeamInstr*) ((Trap_)->address); \
+ (p)->freason = TRAP; \
+ return THE_NON_VALUE; \
} while(0)
-#define BIF_TRAP1(Trap_, p, A0) do { \
- (p)->arity = 1; \
- (p)->def_arg_reg[0] = (A0); \
- *((UWord *) (UWord) ((p)->def_arg_reg + 3)) = (UWord) ((Trap_)->address); \
- (p)->freason = TRAP; \
- return THE_NON_VALUE; \
+#define BIF_TRAP1(Trap_, p, A0) do { \
+ Eterm* reg = ERTS_PROC_GET_SCHDATA((p))->x_reg_array; \
+ (p)->arity = 1; \
+ reg[0] = (A0); \
+ (p)->i = (BeamInstr*) ((Trap_)->address); \
+ (p)->freason = TRAP; \
+ return THE_NON_VALUE; \
} while(0)
-#define BIF_TRAP2(Trap_, p, A0, A1) do { \
- (p)->arity = 2; \
- (p)->def_arg_reg[0] = (A0); \
- (p)->def_arg_reg[1] = (A1); \
- *((UWord *) (UWord) ((p)->def_arg_reg + 3)) = (UWord) ((Trap_)->address); \
- (p)->freason = TRAP; \
- return THE_NON_VALUE; \
+#define BIF_TRAP2(Trap_, p, A0, A1) do { \
+ Eterm* reg = ERTS_PROC_GET_SCHDATA((p))->x_reg_array; \
+ (p)->arity = 2; \
+ reg[0] = (A0); \
+ reg[1] = (A1); \
+ (p)->i = (BeamInstr*) ((Trap_)->address); \
+ (p)->freason = TRAP; \
+ return THE_NON_VALUE; \
} while(0)
-#define BIF_TRAP3(Trap_, p, A0, A1, A2) do { \
- (p)->arity = 3; \
- (p)->def_arg_reg[0] = (A0); \
- (p)->def_arg_reg[1] = (A1); \
- (p)->def_arg_reg[2] = (A2); \
- *((UWord *) (UWord) ((p)->def_arg_reg + 3)) = (UWord) ((Trap_)->address); \
- (p)->freason = TRAP; \
- return THE_NON_VALUE; \
+#define BIF_TRAP3(Trap_, p, A0, A1, A2) do { \
+ Eterm* reg = ERTS_PROC_GET_SCHDATA((p))->x_reg_array; \
+ (p)->arity = 3; \
+ reg[0] = (A0); \
+ reg[1] = (A1); \
+ reg[2] = (A2); \
+ (p)->i = (BeamInstr*) ((Trap_)->address); \
+ (p)->freason = TRAP; \
+ return THE_NON_VALUE; \
} while(0)
-#define BIF_TRAP_CODE_PTR_0(p, Code_) do { \
- (p)->arity = 0; \
- *((UWord *) (UWord) ((p)->def_arg_reg + 3)) = (UWord) (Code_); \
- (p)->freason = TRAP; \
- return THE_NON_VALUE; \
+#define BIF_TRAP_CODE_PTR_0(p, Code_) do { \
+ (p)->arity = 0; \
+ (p)->i = (BeamInstr*) (Code_); \
+ (p)->freason = TRAP; \
+ return THE_NON_VALUE; \
} while(0)
-#define BIF_TRAP_CODE_PTR_(p, Code_) do { \
- *((UWord *) (UWord) ((p)->def_arg_reg + 3)) = (UWord) (Code_); \
- (p)->freason = TRAP; \
- return THE_NON_VALUE; \
+#define BIF_TRAP_CODE_PTR_(p, Code_) do { \
+ (p)-> i = (BeamInstr*) (Code_); \
+ (p)->freason = TRAP; \
+ return THE_NON_VALUE; \
} while(0)
extern Export bif_return_trap_export;
diff --git a/erts/emulator/beam/bif.tab b/erts/emulator/beam/bif.tab
index d9dd80fa8b..987008c937 100644
--- a/erts/emulator/beam/bif.tab
+++ b/erts/emulator/beam/bif.tab
@@ -1,7 +1,7 @@
#
# %CopyrightBegin%
#
-# Copyright Ericsson AB 1996-2010. All Rights Reserved.
+# Copyright Ericsson AB 1996-2011. All Rights Reserved.
#
# The contents of this file are subject to the Erlang Public License,
# Version 1.1, (the "License"); you may not use this file except in
@@ -87,6 +87,8 @@ bif erlang:exit/2
bif 'erl.lang.proc':signal/2 ebif_signal_2 exit_2
bif erlang:external_size/1
bif 'erl.lang.term':external_size/1 ebif_external_size_1
+bif erlang:external_size/2
+bif 'erl.lang.term':external_size/2 ebif_external_size_2
ubif erlang:float/1
ubif 'erl.lang.number':to_float/1 ebif_to_float_1 float_1
bif erlang:float_to_list/1
@@ -158,10 +160,6 @@ bif erlang:md5_update/2
bif 'erl.util.crypt.md5':update/2 ebif_md5_update_2
bif erlang:md5_final/1
bif 'erl.util.crypt.md5':final/1 ebif_md5_final_1
-bif erlang:memory/0
-bif 'erl.lang':memory/0 ebif_memory_0
-bif erlang:memory/1
-bif 'erl.lang':memory/1 ebif_memory_1
bif erlang:module_loaded/1
bif 'erl.system.code':is_loaded/1 ebif_is_loaded_1 module_loaded_1
bif erlang:function_exported/3
@@ -802,6 +800,12 @@ bif prim_file:internal_name2native/1
bif prim_file:internal_native2name/1
bif prim_file:internal_normalize_utf8/1
bif file:native_name_encoding/0
+
+#
+# New in R14B04.
+#
+bif erlang:check_old_code/1
+
#
# Obsolete
#
diff --git a/erts/emulator/beam/big.c b/erts/emulator/beam/big.c
index d18de9ae5d..b90ea6b478 100644
--- a/erts/emulator/beam/big.c
+++ b/erts/emulator/beam/big.c
@@ -1584,6 +1584,62 @@ big_to_double(Wterm x, double* resp)
return 0;
}
+/*
+ * Logic has been copied from erl_bif_guard.c and slightly
+ * modified to use a static instead of dynamic heap
+ */
+Eterm
+double_to_big(double x, Eterm *heap)
+{
+ int is_negative;
+ int ds;
+ ErtsDigit* xp;
+ Eterm res;
+ int i;
+ size_t sz;
+ Eterm* hp;
+ double dbase;
+
+ if (x >= 0) {
+ is_negative = 0;
+ } else {
+ is_negative = 1;
+ x = -x;
+ }
+
+ /* Unscale & (calculate exponent) */
+ ds = 0;
+ dbase = ((double) (D_MASK) + 1);
+ while (x >= 1.0) {
+ x /= dbase; /* "shift" right */
+ ds++;
+ }
+ sz = BIG_NEED_SIZE(ds); /* number of words including arity */
+
+ hp = heap;
+ res = make_big(hp);
+ xp = (ErtsDigit*) (hp + 1);
+
+ for (i = ds - 1; i >= 0; i--) {
+ ErtsDigit d;
+
+ x *= dbase; /* "shift" left */
+ d = x; /* trunc */
+ xp[i] = d; /* store digit */
+ x -= d; /* remove integer part */
+ }
+ while ((ds & (BIG_DIGITS_PER_WORD - 1)) != 0) {
+ xp[ds++] = 0;
+ }
+
+ if (is_negative) {
+ *hp = make_neg_bignum_header(sz-1);
+ } else {
+ *hp = make_pos_bignum_header(sz-1);
+ }
+ return res;
+}
+
/*
** Estimate the number of decimal digits (include sign)
diff --git a/erts/emulator/beam/big.h b/erts/emulator/beam/big.h
index 2afc37004f..256f1c2b45 100644
--- a/erts/emulator/beam/big.h
+++ b/erts/emulator/beam/big.h
@@ -140,6 +140,7 @@ Eterm big_lshift(Eterm, Sint, Eterm*);
int big_comp (Wterm, Wterm);
int big_ucomp (Eterm, Eterm);
int big_to_double(Wterm x, double* resp);
+Eterm double_to_big(double, Eterm*);
Eterm small_to_big(Sint, Eterm*);
Eterm uint_to_big(Uint, Eterm*);
Eterm uword_to_big(UWord, Eterm*);
diff --git a/erts/emulator/beam/binary.c b/erts/emulator/beam/binary.c
index 1fb39c6c67..29461877c5 100644
--- a/erts/emulator/beam/binary.c
+++ b/erts/emulator/beam/binary.c
@@ -356,8 +356,10 @@ BIF_RETTYPE erts_list_to_binary_bif(Process *p, Eterm arg)
{
Eterm bin;
Uint size;
- int offset;
byte* bytes;
+#ifdef DEBUG
+ int offset;
+#endif
if (is_nil(arg)) {
BIF_RET(new_binary(p,(byte*)"",0));
@@ -372,7 +374,11 @@ BIF_RETTYPE erts_list_to_binary_bif(Process *p, Eterm arg)
}
bin = new_binary(p, (byte *)NULL, size);
bytes = binary_bytes(bin);
- offset = io_list_to_buf(arg, (char*) bytes, size);
+#ifdef DEBUG
+ offset =
+#endif
+ io_list_to_buf(arg, (char*) bytes, size);
+
ASSERT(offset == 0);
BIF_RET(bin);
diff --git a/erts/emulator/beam/break.c b/erts/emulator/beam/break.c
index 432b3d0780..784e55ecd2 100644
--- a/erts/emulator/beam/break.c
+++ b/erts/emulator/beam/break.c
@@ -37,6 +37,7 @@
#include "beam_load.h"
#include "erl_instrument.h"
#include "erl_bif_timer.h"
+#include "erl_thr_progress.h"
/* Forward declarations -- should really appear somewhere else */
static void process_killer(void);
@@ -94,7 +95,7 @@ process_killer(void)
erts_printf("(k)ill (n)ext (r)eturn:\n");
while(1) {
if ((j = sys_get_key(0)) <= 0)
- halt_0(0);
+ erl_exit(0, "");
switch(j) {
case 'k':
if (rp->status == P_WAITING) {
@@ -653,20 +654,18 @@ erl_crash_dump_v(char *file, int line, char* fmt, va_list args)
if (ERTS_SOMEONE_IS_CRASH_DUMPING)
return;
- /* Wait for all threads to block. If all threads haven't blocked
+#ifdef ERTS_SMP
+ /*
+ * Wait for all managed threads to block. If all threads haven't blocked
* after a minute, we go anyway and hope for the best...
*
* We do not release system again. We expect an exit() or abort() after
* dump has been written.
- *
- * NOTE: We allow gc therefore it is important not to lock *any*
- * process locks.
*/
- erts_smp_emergency_block_system(60000, ERTS_BS_FLG_ALLOW_GC);
+ erts_thr_progress_fatal_error_block(60000);
/* Either worked or not... */
/* Allow us to pass certain places without locking... */
-#ifdef ERTS_SMP
erts_smp_atomic32_set_mb(&erts_writing_erl_crash_dump, 1);
erts_smp_tsd_set(erts_is_crash_dumping_key, (void *) 1);
#else
diff --git a/erts/emulator/beam/dist.c b/erts/emulator/beam/dist.c
index ad042ec088..44c5ba1e26 100644
--- a/erts/emulator/beam/dist.c
+++ b/erts/emulator/beam/dist.c
@@ -41,6 +41,7 @@
#include "bif.h"
#include "external.h"
#include "erl_binary.h"
+#include "erl_thr_progress.h"
/* Turn this on to get printouts of all distribution messages
* which go on the line
@@ -430,11 +431,11 @@ int erts_do_net_exits(DistEntry *dep, Eterm reason)
erts_smp_rwmtx_rwunlock(&erts_dist_table_rwmtx);
nodename = erts_this_dist_entry->sysname;
- erts_smp_block_system(ERTS_BS_FLG_ALLOW_GC);
+ erts_smp_thr_progress_block();
erts_set_this_node(am_Noname, 0);
erts_is_alive = 0;
send_nodes_mon_msgs(NULL, am_nodedown, nodename, am_visible, nd_reason);
- erts_smp_release_system();
+ erts_smp_thr_progress_unblock();
}
else { /* recursive call via erts_do_exit_port() will end up here */
@@ -967,7 +968,7 @@ int erts_net_message(Port *prt,
res = erts_prepare_dist_ext(&ede, t, len, dep, dep->cache);
if (res >= 0)
- res = ctl_len = erts_decode_dist_ext_size(&ede, 0);
+ res = ctl_len = erts_decode_dist_ext_size(&ede);
else {
#ifdef ERTS_DIST_MSG_DBG
erts_fprintf(stderr, "DIST MSG DEBUG: erts_prepare_dist_ext() failed:\n");
@@ -2330,11 +2331,11 @@ BIF_RETTYPE setnode_2(BIF_ALIST_2)
#endif
erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
- erts_smp_block_system(ERTS_BS_FLG_ALLOW_GC);
+ erts_smp_thr_progress_block();
erts_set_this_node(BIF_ARG_1, (Uint32) creation);
erts_is_alive = 1;
send_nodes_mon_msgs(NULL, am_nodeup, BIF_ARG_1, am_visible, NIL);
- erts_smp_release_system();
+ erts_smp_thr_progress_unblock();
erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
BIF_RET(am_true);
@@ -2730,85 +2731,92 @@ BIF_RETTYPE is_alive_0(BIF_ALIST_0)
/**********************************************************************/
/* erlang:monitor_node(Node, Bool, Options) -> Bool */
-BIF_RETTYPE monitor_node_3(BIF_ALIST_3)
+static BIF_RETTYPE
+monitor_node(Process* p, Eterm Node, Eterm Bool, Eterm Options)
{
DistEntry *dep;
ErtsLink *lnk;
Eterm l;
- for (l = BIF_ARG_3; l != NIL && is_list(l); l = CDR(list_val(l))) {
+ for (l = Options; l != NIL && is_list(l); l = CDR(list_val(l))) {
Eterm t = CAR(list_val(l));
/* allow_passive_connect the only available option right now */
if (t != am_allow_passive_connect) {
- BIF_ERROR(BIF_P, BADARG);
+ BIF_ERROR(p, BADARG);
}
}
if (l != NIL) {
- BIF_ERROR(BIF_P, BADARG);
+ BIF_ERROR(p, BADARG);
}
- if (is_not_atom(BIF_ARG_1) ||
- ((BIF_ARG_2 != am_true) && (BIF_ARG_2 != am_false)) ||
+ if (is_not_atom(Node) ||
+ ((Bool != am_true) && (Bool != am_false)) ||
((erts_this_node->sysname == am_Noname)
- && (BIF_ARG_1 != erts_this_node->sysname))) {
- BIF_ERROR(BIF_P, BADARG);
+ && (Node != erts_this_node->sysname))) {
+ BIF_ERROR(p, BADARG);
}
- dep = erts_sysname_to_connected_dist_entry(BIF_ARG_1);
+ dep = erts_sysname_to_connected_dist_entry(Node);
if (!dep) {
do_trap:
- BIF_TRAP3(dmonitor_node_trap, BIF_P, BIF_ARG_1, BIF_ARG_2, BIF_ARG_3);
+ BIF_TRAP3(dmonitor_node_trap, p, Node, Bool, Options);
}
if (dep == erts_this_dist_entry)
goto done;
- erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_LINK);
+ erts_smp_proc_lock(p, ERTS_PROC_LOCK_LINK);
erts_smp_de_rlock(dep);
if (ERTS_DE_IS_NOT_CONNECTED(dep)) {
- erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_LINK);
+ erts_smp_proc_unlock(p, ERTS_PROC_LOCK_LINK);
erts_smp_de_runlock(dep);
goto do_trap;
}
erts_smp_de_links_lock(dep);
erts_smp_de_runlock(dep);
- if (BIF_ARG_2 == am_true) {
+ if (Bool == am_true) {
ASSERT(dep->cid != NIL);
lnk = erts_add_or_lookup_link(&(dep->node_links), LINK_NODE,
- BIF_P->id);
+ p->id);
++ERTS_LINK_REFC(lnk);
- lnk = erts_add_or_lookup_link(&(BIF_P->nlinks), LINK_NODE, BIF_ARG_1);
+ lnk = erts_add_or_lookup_link(&(p->nlinks), LINK_NODE, Node);
++ERTS_LINK_REFC(lnk);
}
else {
- lnk = erts_lookup_link(dep->node_links, BIF_P->id);
+ lnk = erts_lookup_link(dep->node_links, p->id);
if (lnk != NULL) {
if ((--ERTS_LINK_REFC(lnk)) == 0) {
erts_destroy_link(erts_remove_link(&(dep->node_links),
- BIF_P->id));
+ p->id));
}
}
- lnk = erts_lookup_link(BIF_P->nlinks, BIF_ARG_1);
+ lnk = erts_lookup_link(p->nlinks, Node);
if (lnk != NULL) {
if ((--ERTS_LINK_REFC(lnk)) == 0) {
- erts_destroy_link(erts_remove_link(&(BIF_P->nlinks),
- BIF_ARG_1));
+ erts_destroy_link(erts_remove_link(&(p->nlinks),
+ Node));
}
}
}
erts_smp_de_links_unlock(dep);
- erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_LINK);
+ erts_smp_proc_unlock(p, ERTS_PROC_LOCK_LINK);
done:
erts_deref_dist_entry(dep);
BIF_RET(am_true);
}
+BIF_RETTYPE monitor_node_3(BIF_ALIST_3)
+{
+ BIF_RET(monitor_node(BIF_P, BIF_ARG_1, BIF_ARG_2, BIF_ARG_3));
+}
+
+
/* monitor_node(Node, Bool) -> Bool */
BIF_RETTYPE monitor_node_2(BIF_ALIST_2)
{
- BIF_RET(monitor_node_3(BIF_P,BIF_ARG_1,BIF_ARG_2,NIL));
+ BIF_RET(monitor_node(BIF_P, BIF_ARG_1, BIF_ARG_2, NIL));
}
BIF_RETTYPE net_kernel_dflag_unicode_io_1(BIF_ALIST_1)
diff --git a/erts/emulator/beam/erl_afit_alloc.c b/erts/emulator/beam/erl_afit_alloc.c
index d397cd8848..570cc59be2 100644
--- a/erts/emulator/beam/erl_afit_alloc.c
+++ b/erts/emulator/beam/erl_afit_alloc.c
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2003-2009. All Rights Reserved.
+ * Copyright Ericsson AB 2003-2011. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
@@ -65,16 +65,20 @@ erts_afalc_start(AFAllctr_t *afallctr,
AFAllctrInit_t *afinit,
AllctrInit_t *init)
{
- AFAllctr_t nulled_state = {{0}};
- /* {{0}} is used instead of {0}, in order to avoid (an incorrect) gcc
- warning. gcc warns if {0} is used as initializer of a struct when
- the first member is a struct (not if, for example, the third member
- is a struct). */
+ struct {
+ int dummy;
+ AFAllctr_t allctr;
+ } zero = {0};
+ /* The struct with a dummy element first is used in order to avoid (an
+ incorrect) gcc warning. gcc warns if {0} is used as initializer of
+ a struct when the first member is a struct (not if, for example,
+ the third member is a struct). */
+
Allctr_t *allctr = (Allctr_t *) afallctr;
- init->sbmbct = 0; /* Small mbc not supported by afit */
+ sys_memcpy((void *) afallctr, (void *) &zero.allctr, sizeof(AFAllctr_t));
- sys_memcpy((void *) afallctr, (void *) &nulled_state, sizeof(AFAllctr_t));
+ init->sbmbct = 0; /* Small mbc not supported by afit */
allctr->mbc_header_size = sizeof(Carrier_t);
allctr->min_mbc_size = MIN_MBC_SZ;
diff --git a/erts/emulator/beam/erl_alloc.c b/erts/emulator/beam/erl_alloc.c
index bbc8a445a7..140a84d5fc 100644
--- a/erts/emulator/beam/erl_alloc.c
+++ b/erts/emulator/beam/erl_alloc.c
@@ -40,6 +40,8 @@
#include "erl_mseg.h"
#include "erl_monitors.h"
#include "erl_bif_timer.h"
+#include "erl_cpu_topology.h"
+#include "erl_thr_queue.h"
#if defined(ERTS_ALC_T_DRV_SEL_D_STATE) || defined(ERTS_ALC_T_DRV_EV_D_STATE)
#include "erl_check_io.h"
#endif
@@ -54,7 +56,14 @@
#include "erl_ao_firstfit_alloc.h"
-#define ERTS_ALC_DEFAULT_MAX_THR_PREF 16
+#if ERTS_MAX_NO_OF_SCHEDULERS > ERTS_AU_MAX_PREF_ALLOC_INSTANCES
+# error "Too many schedulers; cannot create that many pref alloc instances"
+#endif
+
+#define ERTS_ALC_FIX_TYPE_IX(T) \
+ (ERTS_ALC_T2N((T)) - ERTS_ALC_N_MIN_A_FIXED_SIZE)
+
+#define ERTS_ALC_DEFAULT_MAX_THR_PREF ERTS_MAX_NO_OF_SCHEDULERS
#if defined(SMALL_MEMORY) || defined(PURIFY) || defined(VALGRIND)
#define AU_ALLOC_DEFAULT_ENABLE(X) 0
@@ -106,24 +115,43 @@ static ErtsAllocatorState_t eheap_alloc_state;
static ErtsAllocatorState_t binary_alloc_state;
static ErtsAllocatorState_t ets_alloc_state;
static ErtsAllocatorState_t driver_alloc_state;
+static ErtsAllocatorState_t fix_alloc_state;
-ErtsAlcType_t erts_fix_core_allocator_ix;
-#ifdef ERTS_ALC_N_MIN_A_FIXED_SIZE
-static void *(*fix_core_allocator)(ErtsAlcType_t, void *, Uint);
-static void *fix_core_extra;
-static void *fix_core_alloc(Uint size)
+typedef struct {
+ erts_smp_atomic32_t refc;
+ int only_sz;
+ Uint req_sched;
+ Process *proc;
+ Eterm ref;
+ Eterm ref_heap[REF_THING_SIZE];
+ int allocs[ERTS_ALC_A_MAX-ERTS_ALC_A_MIN+1+2];
+} ErtsAllocInfoReq;
+
+#define ERTS_ALC_INFO_A_ALLOC_UTIL (ERTS_ALC_A_MAX + 1)
+#define ERTS_ALC_INFO_A_MSEG_ALLOC (ERTS_ALC_A_MAX + 2)
+#define ERTS_ALC_INFO_A_MAX ERTS_ALC_INFO_A_MSEG_ALLOC
+
+#if !HALFWORD_HEAP
+ERTS_SCHED_PREF_QUICK_ALLOC_IMPL(aireq,
+ ErtsAllocInfoReq,
+ 5,
+ ERTS_ALC_T_AINFO_REQ)
+#else
+static ERTS_INLINE ErtsAllocInfoReq *
+aireq_alloc(void)
{
- void *res;
- res = (*fix_core_allocator)(ERTS_ALC_T_UNDEF, fix_core_extra, size);
- if (erts_mtrace_enabled)
- erts_mtrace_crr_alloc(res,
- ERTS_ALC_A_FIXED_SIZE,
- erts_fix_core_allocator_ix,
- size);
- return res;
+ return erts_alloc(ERTS_ALC_T_AINFO_REQ, sizeof(ErtsAllocInfoReq));
+}
+
+static ERTS_INLINE void
+aireq_free(ErtsAllocInfoReq *ptr)
+{
+ erts_free(ERTS_ALC_T_AINFO_REQ, ptr);
}
#endif
+ErtsAlcType_t erts_fix_core_allocator_ix;
+
enum allctr_type {
GOODFIT,
BESTFIT,
@@ -181,6 +209,7 @@ typedef struct {
struct au_init binary_alloc;
struct au_init ets_alloc;
struct au_init driver_alloc;
+ struct au_init fix_alloc;
#if HALFWORD_HEAP
struct au_init sbmbc_low_alloc;
struct au_init std_low_alloc;
@@ -393,46 +422,52 @@ set_default_driver_alloc_opts(struct au_init *ip)
ip->init.util.ts = ERTS_ALC_MTA_DRIVER;
}
+static void
+set_default_fix_alloc_opts(struct au_init *ip,
+ size_t *fix_type_sizes)
+{
+ SET_DEFAULT_ALLOC_OPTS(ip);
+ ip->enable = AU_ALLOC_DEFAULT_ENABLE(1);
+ ip->thr_spec = 1;
+ ip->atype = BESTFIT;
+ ip->init.bf.ao = 1;
+ ip->init.util.name_prefix = "fix_";
+ ip->init.util.fix_type_size = fix_type_sizes;
+ ip->init.util.alloc_no = ERTS_ALC_A_FIXED_SIZE;
+#ifndef SMALL_MEMORY
+ ip->init.util.mmbcs = 128*1024; /* Main carrier size */
+#else
+ ip->init.util.mmbcs = 128*1024; /* Main carrier size */
+#endif
+ ip->init.util.ts = ERTS_ALC_MTA_FIXED_SIZE;
+}
+
#ifdef ERTS_SMP
static void
adjust_tpref(struct au_init *ip, int no_sched)
{
if (ip->thr_spec) {
- Uint allocs;
- if (ip->thr_spec < 0) {/* User specified amount */
- allocs = abs(ip->thr_spec);
- if (allocs > no_sched)
- allocs = no_sched;
- }
- else if (no_sched > ERTS_ALC_DEFAULT_MAX_THR_PREF)
- allocs = ERTS_ALC_DEFAULT_MAX_THR_PREF;
- else
- allocs = no_sched;
- if (allocs <= 1)
- ip->thr_spec = 0;
- else {
- ip->thr_spec = (int) allocs;
- ip->thr_spec *= -1; /* thread preferred */
-
- /* If default ... */
-
- /* ... shrink main multi-block carrier size */
- if (ip->default_.mmbcs)
- ip->init.util.mmbcs /= ERTS_MIN(4, allocs);
- /* ... shrink largest multi-block carrier size */
- if (ip->default_.lmbcs)
- ip->init.util.lmbcs /= ERTS_MIN(2, allocs);
- /* ... shrink smallest multi-block carrier size */
- if (ip->default_.smbcs)
- ip->init.util.smbcs /= ERTS_MIN(4, allocs);
- /* ... and more than three allocators shrink
- max mseg multi-block carriers */
- if (ip->default_.mmmbc && allocs > 2) {
- ip->init.util.mmmbc /= ERTS_MIN(4, allocs - 1);
- if (ip->init.util.mmmbc < 3)
- ip->init.util.mmmbc = 3;
- }
+ ip->thr_spec = no_sched;
+ ip->thr_spec *= -1; /* thread preferred */
+
+ /* If default ... */
+
+ /* ... shrink main multi-block carrier size */
+ if (ip->default_.mmbcs)
+ ip->init.util.mmbcs /= ERTS_MIN(4, no_sched);
+ /* ... shrink largest multi-block carrier size */
+ if (ip->default_.lmbcs)
+ ip->init.util.lmbcs /= ERTS_MIN(2, no_sched);
+ /* ... shrink smallest multi-block carrier size */
+ if (ip->default_.smbcs)
+ ip->init.util.smbcs /= ERTS_MIN(4, no_sched);
+ /* ... and more than three allocators shrink
+ max mseg multi-block carriers */
+ if (ip->default_.mmmbc && no_sched > 2) {
+ ip->init.util.mmmbc /= ERTS_MIN(4, no_sched - 1);
+ if (ip->init.util.mmmbc < 3)
+ ip->init.util.mmmbc = 3;
}
}
}
@@ -442,7 +477,7 @@ adjust_tpref(struct au_init *ip, int no_sched)
static void handle_args(int *, char **, erts_alc_hndl_args_init_t *);
static void
-set_au_allocator(ErtsAlcType_t alctr_n, struct au_init *init);
+set_au_allocator(ErtsAlcType_t alctr_n, struct au_init *init, int ncpu);
static void
start_au_allocator(ErtsAlcType_t alctr_n,
@@ -456,8 +491,6 @@ refuse_af_strategy(struct au_init *init)
init->atype = GOODFIT;
}
-static void init_thr_ix(int static_ixs);
-
#ifdef HARD_DEBUG
static void hdbg_init(void);
#endif
@@ -466,7 +499,7 @@ void
erts_alloc_init(int *argc, char **argv, ErtsAllocInitOpts *eaiop)
{
UWord extra_block_size = 0;
- int i;
+ int i, ncpu;
erts_alc_hndl_args_init_t init = {
0,
#if HAVE_ERTS_MSEG
@@ -474,17 +507,38 @@ erts_alloc_init(int *argc, char **argv, ErtsAllocInitOpts *eaiop)
#endif
ERTS_DEFAULT_TRIM_THRESHOLD,
ERTS_DEFAULT_TOP_PAD,
- ERTS_DEFAULT_ALCU_INIT
+ ERTS_DEFAULT_ALCU_INIT,
};
+ size_t fix_type_sizes[ERTS_ALC_NO_FIXED_SIZES] = {0};
+ fix_type_sizes[ERTS_ALC_FIX_TYPE_IX(ERTS_ALC_T_PROC)]
+ = sizeof(Process);
+#if !HALFWORD_HEAP
+ fix_type_sizes[ERTS_ALC_FIX_TYPE_IX(ERTS_ALC_T_MONITOR_SH)]
+ = ERTS_MONITOR_SH_SIZE;
+ fix_type_sizes[ERTS_ALC_FIX_TYPE_IX(ERTS_ALC_T_NLINK_SH)]
+ = ERTS_LINK_SH_SIZE;
+#endif
+ fix_type_sizes[ERTS_ALC_FIX_TYPE_IX(ERTS_ALC_T_DRV_EV_D_STATE)]
+ = sizeof(ErtsDrvEventDataState);
+ fix_type_sizes[ERTS_ALC_FIX_TYPE_IX(ERTS_ALC_T_DRV_SEL_D_STATE)]
+ = sizeof(ErtsDrvSelectDataState);
+ fix_type_sizes[ERTS_ALC_FIX_TYPE_IX(ERTS_ALC_T_MSG_REF)]
+ = sizeof(ErlMessage);
+#ifdef ERTS_SMP
+ fix_type_sizes[ERTS_ALC_FIX_TYPE_IX(ERTS_ALC_T_THR_Q_EL_SL)]
+ = sizeof(ErtsThrQElement_t);
+#endif
#ifdef HARD_DEBUG
hdbg_init();
#endif
erts_have_sbmbc_alloc = 0;
+ ncpu = eaiop->ncpu;
+ if (ncpu < 1)
+ ncpu = 1;
erts_sys_alloc_init();
- init_thr_ix(erts_no_schedulers);
erts_init_utils_mem();
set_default_sbmbc_alloc_opts(&init.sbmbc_alloc);
@@ -496,20 +550,23 @@ erts_alloc_init(int *argc, char **argv, ErtsAllocInitOpts *eaiop)
set_default_binary_alloc_opts(&init.binary_alloc);
set_default_ets_alloc_opts(&init.ets_alloc);
set_default_driver_alloc_opts(&init.driver_alloc);
+ set_default_fix_alloc_opts(&init.fix_alloc,
+ fix_type_sizes);
if (argc && argv)
handle_args(argc, argv, &init);
- if (erts_no_schedulers <= 1) {
- init.sbmbc_alloc.thr_spec = 0;
- init.sl_alloc.thr_spec = 0;
- init.std_alloc.thr_spec = 0;
- init.ll_alloc.thr_spec = 0;
- init.eheap_alloc.thr_spec = 0;
- init.binary_alloc.thr_spec = 0;
- init.ets_alloc.thr_spec = 0;
- init.driver_alloc.thr_spec = 0;
- }
+#ifndef ERTS_SMP
+ init.sbmbc_alloc.thr_spec = 0;
+ init.sl_alloc.thr_spec = 0;
+ init.std_alloc.thr_spec = 0;
+ init.ll_alloc.thr_spec = 0;
+ init.eheap_alloc.thr_spec = 0;
+ init.binary_alloc.thr_spec = 0;
+ init.ets_alloc.thr_spec = 0;
+ init.driver_alloc.thr_spec = 0;
+ init.fix_alloc.thr_spec = 0;
+#endif
if (init.erts_alloc_config) {
/* Adjust flags that erts_alloc_config won't like */
@@ -522,6 +579,7 @@ erts_alloc_init(int *argc, char **argv, ErtsAllocInitOpts *eaiop)
init.binary_alloc.thr_spec = 0;
init.ets_alloc.thr_spec = 0;
init.driver_alloc.thr_spec = 0;
+ init.fix_alloc.thr_spec = 0;
}
#ifdef ERTS_SMP
@@ -538,6 +596,7 @@ erts_alloc_init(int *argc, char **argv, ErtsAllocInitOpts *eaiop)
adjust_tpref(&init.binary_alloc, erts_no_schedulers);
adjust_tpref(&init.ets_alloc, erts_no_schedulers);
adjust_tpref(&init.driver_alloc, erts_no_schedulers);
+ adjust_tpref(&init.fix_alloc, erts_no_schedulers);
#else
/* No thread specific if not smp */
@@ -556,6 +615,7 @@ erts_alloc_init(int *argc, char **argv, ErtsAllocInitOpts *eaiop)
refuse_af_strategy(&init.binary_alloc);
refuse_af_strategy(&init.ets_alloc);
refuse_af_strategy(&init.driver_alloc);
+ refuse_af_strategy(&init.fix_alloc);
#ifdef ERTS_SMP
if (!init.temp_alloc.thr_spec)
@@ -564,6 +624,7 @@ erts_alloc_init(int *argc, char **argv, ErtsAllocInitOpts *eaiop)
erts_mtrace_pre_init();
#if HAVE_ERTS_MSEG
+ init.mseg.nos = erts_no_schedulers;
erts_mseg_init(&init.mseg);
#endif
erts_alcu_init(&init.alloc_util);
@@ -583,20 +644,6 @@ erts_alloc_init(int *argc, char **argv, ErtsAllocInitOpts *eaiop)
erts_allctrs_info[i].extra = NULL;
}
-#ifdef ERTS_ALC_N_MIN_A_FIXED_SIZE
-#if !defined(PURIFY) && !defined(VALGRIND)
- erts_allctrs[ERTS_ALC_A_FIXED_SIZE].alloc = erts_fix_alloc;
- erts_allctrs[ERTS_ALC_A_FIXED_SIZE].realloc = erts_fix_realloc;
- erts_allctrs[ERTS_ALC_A_FIXED_SIZE].free = erts_fix_free;
- erts_allctrs_info[ERTS_ALC_A_FIXED_SIZE].enabled = 1;
-#else
- erts_allctrs[ERTS_ALC_A_FIXED_SIZE].alloc = erts_sys_alloc;
- erts_allctrs[ERTS_ALC_A_FIXED_SIZE].realloc = erts_sys_realloc;
- erts_allctrs[ERTS_ALC_A_FIXED_SIZE].free = erts_sys_free;
- erts_allctrs_info[ERTS_ALC_A_FIXED_SIZE].enabled = 0;
-#endif
-#endif
-
erts_allctrs[ERTS_ALC_A_SYSTEM].alloc = erts_sys_alloc;
erts_allctrs[ERTS_ALC_A_SYSTEM].realloc = erts_sys_realloc;
erts_allctrs[ERTS_ALC_A_SYSTEM].free = erts_sys_free;
@@ -621,20 +668,21 @@ erts_alloc_init(int *argc, char **argv, ErtsAllocInitOpts *eaiop)
init.ll_low_alloc.init.util.force = 1;
init.ll_low_alloc.init.util.low_mem = 1;
- set_au_allocator(ERTS_ALC_A_SBMBC_LOW, &init.sbmbc_low_alloc);
- set_au_allocator(ERTS_ALC_A_STANDARD_LOW, &init.std_low_alloc);
- set_au_allocator(ERTS_ALC_A_LONG_LIVED_LOW, &init.ll_low_alloc);
+ set_au_allocator(ERTS_ALC_A_SBMBC_LOW, &init.sbmbc_low_alloc, ncpu);
+ set_au_allocator(ERTS_ALC_A_STANDARD_LOW, &init.std_low_alloc, ncpu);
+ set_au_allocator(ERTS_ALC_A_LONG_LIVED_LOW, &init.ll_low_alloc, ncpu);
#endif /* HALFWORD */
- set_au_allocator(ERTS_ALC_A_TEMPORARY, &init.temp_alloc);
- set_au_allocator(ERTS_ALC_A_SBMBC, &init.sbmbc_alloc);
- set_au_allocator(ERTS_ALC_A_SHORT_LIVED, &init.sl_alloc);
- set_au_allocator(ERTS_ALC_A_STANDARD, &init.std_alloc);
- set_au_allocator(ERTS_ALC_A_LONG_LIVED, &init.ll_alloc);
- set_au_allocator(ERTS_ALC_A_EHEAP, &init.eheap_alloc);
- set_au_allocator(ERTS_ALC_A_BINARY, &init.binary_alloc);
- set_au_allocator(ERTS_ALC_A_ETS, &init.ets_alloc);
- set_au_allocator(ERTS_ALC_A_DRIVER, &init.driver_alloc);
+ set_au_allocator(ERTS_ALC_A_TEMPORARY, &init.temp_alloc, ncpu);
+ set_au_allocator(ERTS_ALC_A_SBMBC, &init.sbmbc_alloc, ncpu);
+ set_au_allocator(ERTS_ALC_A_SHORT_LIVED, &init.sl_alloc, ncpu);
+ set_au_allocator(ERTS_ALC_A_STANDARD, &init.std_alloc, ncpu);
+ set_au_allocator(ERTS_ALC_A_LONG_LIVED, &init.ll_alloc, ncpu);
+ set_au_allocator(ERTS_ALC_A_EHEAP, &init.eheap_alloc, ncpu);
+ set_au_allocator(ERTS_ALC_A_BINARY, &init.binary_alloc, ncpu);
+ set_au_allocator(ERTS_ALC_A_ETS, &init.ets_alloc, ncpu);
+ set_au_allocator(ERTS_ALC_A_DRIVER, &init.driver_alloc, ncpu);
+ set_au_allocator(ERTS_ALC_A_FIXED_SIZE, &init.fix_alloc, ncpu);
for (i = ERTS_ALC_A_MIN; i <= ERTS_ALC_A_MAX; i++) {
if (!erts_allctrs[i].alloc)
@@ -650,10 +698,6 @@ erts_alloc_init(int *argc, char **argv, ErtsAllocInitOpts *eaiop)
sys_alloc_opt(SYS_ALLOC_OPT_TRIM_THRESHOLD, init.trim_threshold);
sys_alloc_opt(SYS_ALLOC_OPT_TOP_PAD, init.top_pad);
- if (erts_allctrs_info[ERTS_FIX_CORE_ALLOCATOR].enabled)
- erts_fix_core_allocator_ix = ERTS_FIX_CORE_ALLOCATOR;
- else
- erts_fix_core_allocator_ix = ERTS_ALC_A_SYSTEM;
erts_mtrace_init(init.instr.mtrace, init.instr.nodename);
@@ -710,49 +754,40 @@ erts_alloc_init(int *argc, char **argv, ErtsAllocInitOpts *eaiop)
&init.driver_alloc,
&driver_alloc_state);
- fix_core_allocator = erts_allctrs[erts_fix_core_allocator_ix].alloc;
- fix_core_extra = erts_allctrs[erts_fix_core_allocator_ix].extra;
+ start_au_allocator(ERTS_ALC_A_FIXED_SIZE,
+ &init.fix_alloc,
+ &fix_alloc_state);
erts_mtrace_install_wrapper_functions();
extra_block_size += erts_instr_init(init.instr.stat, init.instr.map);
+#if !HALFWORD_HEAP
+ init_aireq_alloc();
+#endif
+
#ifdef DEBUG
extra_block_size += install_debug_functions();
#endif
-#ifdef ERTS_ALC_N_MIN_A_FIXED_SIZE
-
- erts_init_fix_alloc(extra_block_size, fix_core_alloc);
-
+}
-#if !defined(PURIFY) && !defined(VALGRIND)
- erts_set_fix_size(ERTS_ALC_T_PROC, sizeof(Process));
- erts_set_fix_size(ERTS_ALC_T_DB_TABLE, sizeof(DbTable));
- erts_set_fix_size(ERTS_ALC_T_ATOM, sizeof(Atom));
+void
+erts_alloc_late_init(void)
+{
- erts_set_fix_size(ERTS_ALC_T_MODULE, sizeof(Module));
- erts_set_fix_size(ERTS_ALC_T_REG_PROC, sizeof(RegProc));
- erts_set_fix_size(ERTS_ALC_T_FUN_ENTRY, sizeof(ErlFunEntry));
-#ifdef ERTS_ALC_T_DRV_EV_D_STATE
- erts_set_fix_size(ERTS_ALC_T_DRV_EV_D_STATE,
- sizeof(ErtsDrvEventDataState));
-#endif
-#ifdef ERTS_ALC_T_DRV_SEL_D_STATE
- erts_set_fix_size(ERTS_ALC_T_DRV_SEL_D_STATE,
- sizeof(ErtsDrvSelectDataState));
-#endif
-#if !HALFWORD_HEAP
- erts_set_fix_size(ERTS_ALC_T_EXPORT, sizeof(Export));
- erts_set_fix_size(ERTS_ALC_T_MONITOR_SH, ERTS_MONITOR_SH_SIZE*sizeof(Uint));
- erts_set_fix_size(ERTS_ALC_T_NLINK_SH, ERTS_LINK_SH_SIZE*sizeof(Uint));
-#endif
-#endif
-#endif
+}
+static void *
+erts_realloc_fixed_size(ErtsAlcType_t type, void *extra, void *p, Uint size)
+{
+ erl_exit(ERTS_ABORT_EXIT,
+ "Attempt to reallocate a block of the fixed size type %s\n",
+ ERTS_ALC_T2TD(type));
}
+
static void
-set_au_allocator(ErtsAlcType_t alctr_n, struct au_init *init)
+set_au_allocator(ErtsAlcType_t alctr_n, struct au_init *init, int ncpu)
{
ErtsAllocatorFunctions_t *af = &erts_allctrs[alctr_n];
ErtsAllocatorInfo_t *ai = &erts_allctrs_info[alctr_n];
@@ -764,6 +799,12 @@ set_au_allocator(ErtsAlcType_t alctr_n, struct au_init *init)
if (init->init.util.force)
init->enable = 1;
+ tspec->enabled = 0;
+ tspec->dd = 0;
+ tspec->aix = alctr_n;
+ tspec->size = 0;
+ ai->thr_spec = 0;
+
if (!init->enable) {
af->alloc = erts_sys_alloc;
af->realloc = erts_sys_realloc;
@@ -775,14 +816,14 @@ set_au_allocator(ErtsAlcType_t alctr_n, struct au_init *init)
return;
}
- tspec->enabled = 0;
- tspec->all_thr_safe = 0;
- ai->thr_spec = 0;
#ifdef USE_THREADS
+#ifdef ERTS_SMP
if (init->thr_spec) {
if (init->thr_spec > 0) {
af->alloc = erts_alcu_alloc_thr_spec;
- if (init->init.util.ramv)
+ if (init->init.util.fix_type_size)
+ af->realloc = erts_realloc_fixed_size;
+ else if (init->init.util.ramv)
af->realloc = erts_alcu_realloc_mv_thr_spec;
else
af->realloc = erts_alcu_realloc_thr_spec;
@@ -790,12 +831,14 @@ set_au_allocator(ErtsAlcType_t alctr_n, struct au_init *init)
}
else {
af->alloc = erts_alcu_alloc_thr_pref;
- if (init->init.util.ramv)
+ if (init->init.util.fix_type_size)
+ af->realloc = erts_realloc_fixed_size;
+ else if (init->init.util.ramv)
af->realloc = erts_alcu_realloc_mv_thr_pref;
else
af->realloc = erts_alcu_realloc_thr_pref;
af->free = erts_alcu_free_thr_pref;
- tspec->all_thr_safe = 1;
+ tspec->dd = 1;
}
tspec->enabled = 1;
@@ -803,9 +846,13 @@ set_au_allocator(ErtsAlcType_t alctr_n, struct au_init *init)
ai->thr_spec = tspec->size;
}
- else if (init->init.util.ts) {
+ else
+#endif
+ if (init->init.util.ts) {
af->alloc = erts_alcu_alloc_ts;
- if (init->init.util.ramv)
+ if (init->init.util.fix_type_size)
+ af->realloc = erts_realloc_fixed_size;
+ else if (init->init.util.ramv)
af->realloc = erts_alcu_realloc_mv_ts;
else
af->realloc = erts_alcu_realloc_ts;
@@ -815,7 +862,9 @@ set_au_allocator(ErtsAlcType_t alctr_n, struct au_init *init)
#endif
{
af->alloc = erts_alcu_alloc;
- if (init->init.util.ramv)
+ if (init->init.util.fix_type_size)
+ af->realloc = erts_realloc_fixed_size;
+ else if (init->init.util.ramv)
af->realloc = erts_alcu_realloc_mv;
else
af->realloc = erts_alcu_realloc;
@@ -838,12 +887,14 @@ start_au_allocator(ErtsAlcType_t alctr_n,
ErtsAllocatorFunctions_t *af = &erts_allctrs[alctr_n];
ErtsAllocatorInfo_t *ai = &erts_allctrs_info[alctr_n];
ErtsAllocatorThrSpec_t *tspec = &erts_allctr_thr_spec[alctr_n];
+ ErtsAlcFixList_t *fix_lists = NULL;
+ size_t fix_list_size = 0;
if (!init->enable)
return;
if (init->thr_spec) {
- void *states = erts_sys_alloc(0,
+ char *states = erts_sys_alloc(0,
NULL,
((sizeof(Allctr_t *)
* (tspec->size + 1))
@@ -855,18 +906,40 @@ start_au_allocator(ErtsAlcType_t alctr_n,
"Failed to allocate allocator states for %salloc\n",
init->init.util.name_prefix);
tspec->allctr = (Allctr_t **) states;
- states = ((char *) states) + sizeof(Allctr_t *) * (tspec->size + 1);
+ states += sizeof(Allctr_t *) * (tspec->size + 1);
states = ((((UWord) states) & ERTS_CACHE_LINE_MASK)
- ? (void *) ((((UWord) states) & ~ERTS_CACHE_LINE_MASK)
+ ? (char *) ((((UWord) states) & ~ERTS_CACHE_LINE_MASK)
+ ERTS_CACHE_LINE_SIZE)
- : (void *) states);
- tspec->allctr[0] = init->thr_spec > 0 ? (Allctr_t *) state : (Allctr_t *) NULL;
+ : (char *) states);
+ tspec->allctr[0] = (Allctr_t *) state;
size = tspec->size;
for (i = 1; i < size; i++)
tspec->allctr[i] = (Allctr_t *)
&((ErtsAllocatorState_t *) states)[i-1];
}
+ if (init->init.util.fix_type_size) {
+ size_t tot_fix_list_size;
+ fix_list_size = sizeof(ErtsAlcFixList_t)*ERTS_ALC_NO_FIXED_SIZES;
+ fix_list_size = ERTS_ALC_CACHE_LINE_ALIGN_SIZE(fix_list_size);
+ tot_fix_list_size = fix_list_size;
+ if (init->thr_spec)
+ tot_fix_list_size *= tspec->size;
+ fix_lists = erts_sys_alloc(0,
+ NULL,
+ (tot_fix_list_size
+ + ERTS_CACHE_LINE_SIZE - 1));
+ if (!fix_lists)
+ erl_exit(ERTS_ABORT_EXIT,
+ "Failed to allocate fix lists for %salloc\n",
+ init->init.util.name_prefix);
+
+ if (((UWord) fix_lists) & ERTS_CACHE_LINE_MASK)
+ fix_lists = ((ErtsAlcFixList_t *)
+ ((((UWord) fix_lists) & ~ERTS_CACHE_LINE_MASK)
+ + ERTS_CACHE_LINE_SIZE));
+ }
+
for (i = 0; i < size; i++) {
void *as;
atype = init->atype;
@@ -877,25 +950,32 @@ start_au_allocator(ErtsAlcType_t alctr_n,
as0 = (void *) tspec->allctr[i];
if (!as0)
continue;
- if (i == 0) {
- if (atype == AFIT)
- atype = GOODFIT;
- init->init.util.ts = 1;
+ if (init->thr_spec < 0) {
+ init->init.util.ts = i == 0;
+ init->init.util.tspec = 0;
+ init->init.util.tpref = -1*init->thr_spec + 1;
}
else {
- if (init->thr_spec < 0) {
+ if (i != 0)
+ init->init.util.ts = 0;
+ else {
+ if (atype == AFIT)
+ atype = GOODFIT;
init->init.util.ts = 1;
- init->init.util.tspec = 0;
- init->init.util.tpref = -1*init->thr_spec;
}
- else {
- init->init.util.ts = 0;
- init->init.util.tspec = init->thr_spec + 1;
- init->init.util.tpref = 0;
- }
- }
+ init->init.util.tspec = init->thr_spec + 1;
+ init->init.util.tpref = 0;
+ }
+ }
+
+ if (fix_lists) {
+ init->init.util.fix = fix_lists;
+ fix_lists = ((ErtsAlcFixList_t *)
+ (((char *) fix_lists) + fix_list_size));
}
+ init->init.util.ix = i;
+
switch (atype) {
case GOODFIT:
as = (void *) erts_gfalc_start((GFAllctr_t *) as0,
@@ -931,11 +1011,8 @@ start_au_allocator(ErtsAlcType_t alctr_n,
af->extra = as;
}
- if (init->thr_spec) {
+ if (init->thr_spec)
af->extra = tspec;
- init->init.util.ts = 1;
- }
-
ai->extra = af->extra;
}
@@ -1055,34 +1132,6 @@ get_amount_value(char *param_end, char** argv, int* ip)
return (Uint) tmp;
}
-static int
-get_bool_or_possitive_amount_value(int *bool, Uint *amount,
- char *param_end, char** argv, int* ip)
-{
- char *param = argv[*ip]+1;
- char *value = get_value(param_end, argv, ip);
- if (strcmp(value, "true") == 0) {
- *bool = 1;
- return 1;
- }
- else if (strcmp(value, "false") == 0) {
- *bool = 0;
- return 1;
- }
- else {
- Sint tmp;
- char *rest;
- errno = 0;
- tmp = (Sint) strtol(value, &rest, 10);
- if (errno != 0 || rest == value || tmp <= 0) {
- bad_value(param, param_end, value);
- return -1;
- }
- *amount = (Uint) tmp;
- return 0;
- }
-}
-
static void
handle_au_arg(struct au_init *auip,
char* sub_param,
@@ -1197,25 +1246,16 @@ handle_au_arg(struct au_init *auip,
goto bad_switch;
break;
case 't': {
- Uint no;
- int enable;
- int res = get_bool_or_possitive_amount_value(&enable,
- &no,
- sub_param+1,
- argv,
- ip);
- if (res > 0)
- auip->thr_spec = enable ? 1 : 0;
+ int res = get_bool_value(sub_param+1, argv, ip);
+ if (res > 0) {
+ auip->thr_spec = 1;
+ break;
+ }
else if (res == 0) {
- int allocs = (int) no;
- if (allocs < 0)
- allocs = INT_MIN;
- else {
- allocs *= -1;
- }
- auip->thr_spec = allocs;
+ auip->thr_spec = 0;
+ break;
}
- break;
+ goto bad_switch;
}
default:
bad_switch:
@@ -1234,6 +1274,7 @@ handle_args(int *argc, char **argv, erts_alc_hndl_args_init_t *init)
&init->eheap_alloc,
&init->ll_alloc,
&init->driver_alloc,
+ &init->fix_alloc,
&init->sl_alloc,
&init->temp_alloc
};
@@ -1264,14 +1305,8 @@ handle_args(int *argc, char **argv, erts_alc_hndl_args_init_t *init)
case 'E':
handle_au_arg(&init->ets_alloc, &argv[i][3], argv, &i);
break;
- case 'F': /* fix_alloc */
- if (has_prefix("e", param+2)) {
- arg = get_value(param+3, argv, &i);
- if (strcmp("true", arg) != 0)
- bad_value(param, param+3, arg);
- }
- else
- bad_param(param, param+2);
+ case 'F':
+ handle_au_arg(&init->fix_alloc, &argv[i][3], argv, &i);
break;
case 'H':
handle_au_arg(&init->eheap_alloc, &argv[i][3], argv, &i);
@@ -1298,12 +1333,6 @@ handle_args(int *argc, char **argv, erts_alc_hndl_args_init_t *init)
#endif
get_amount_value(argv[i]+6, argv, &i);
}
- else if (has_prefix("cci", argv[i]+3)) {
-#if HAVE_ERTS_MSEG
- init->mseg.cci =
-#endif
- get_amount_value(argv[i]+6, argv, &i);
- }
else {
bad_param(param, param+2);
}
@@ -1389,6 +1418,7 @@ handle_args(int *argc, char **argv, erts_alc_hndl_args_init_t *init)
set_default_binary_alloc_opts(&init->binary_alloc);
set_default_ets_alloc_opts(&init->ets_alloc);
set_default_driver_alloc_opts(&init->driver_alloc);
+ set_default_driver_alloc_opts(&init->fix_alloc);
init->driver_alloc.enable = 0;
if (strcmp("r9c", arg) == 0) {
@@ -1523,43 +1553,74 @@ static char *type_no_str(ErtsAlcType_t n)
#define type_str(T) type_no_str(ERTS_ALC_T2N((T)))
-erts_tsd_key_t thr_ix_key;
-erts_spinlock_t alloc_thr_ix_lock;
-int last_thr_ix;
-int first_dyn_thr_ix;
-
-static void
-init_thr_ix(int static_ixs)
+void
+erts_alloc_register_scheduler(void *vesdp)
{
- erts_tsd_key_create(&thr_ix_key);
- erts_spinlock_init(&alloc_thr_ix_lock, "alloc_thr_ix_lock");
- last_thr_ix = -4711;
- first_dyn_thr_ix = static_ixs+1;
+ ErtsSchedulerData *esdp = (ErtsSchedulerData *) vesdp;
+ int ix = (int) esdp->no;
+ int aix;
+
+ for (aix = ERTS_ALC_A_MIN; aix <= ERTS_ALC_A_MAX; aix++) {
+ ErtsAllocatorThrSpec_t *tspec = &erts_allctr_thr_spec[aix];
+ esdp->alloc_data.deallctr[aix] = NULL;
+ esdp->alloc_data.pref_ix[aix] = -1;
+ if (tspec->enabled) {
+ if (!tspec->dd)
+ esdp->alloc_data.pref_ix[aix] = ix;
+ else {
+ Allctr_t *allctr = tspec->allctr[ix];
+ ASSERT(allctr);
+ esdp->alloc_data.deallctr[aix] = allctr;
+ esdp->alloc_data.pref_ix[aix] = ix;
+ }
+ }
+ }
}
-int
-erts_alc_get_thr_ix(void)
+void
+erts_alloc_scheduler_handle_delayed_dealloc(void *vesdp,
+ int *need_thr_progress,
+ int *more_work)
{
- int ix = (int)(long) erts_tsd_get(thr_ix_key);
- if (ix == 0) {
- erts_spin_lock(&alloc_thr_ix_lock);
- last_thr_ix++;
- if (last_thr_ix < 0)
- last_thr_ix = first_dyn_thr_ix;
- ix = last_thr_ix;
- erts_spin_unlock(&alloc_thr_ix_lock);
- erts_tsd_set(thr_ix_key, (void *)(long) ix);
+ ErtsSchedulerData *esdp = (ErtsSchedulerData *) vesdp;
+ int aix;
+ for (aix = ERTS_ALC_A_MIN; aix <= ERTS_ALC_A_MAX; aix++) {
+ Allctr_t *allctr;
+ if (esdp)
+ allctr = esdp->alloc_data.deallctr[aix];
+ else {
+ ErtsAllocatorThrSpec_t *tspec = &erts_allctr_thr_spec[aix];
+ if (tspec->enabled && tspec->dd)
+ allctr = tspec->allctr[0];
+ else
+ allctr = NULL;
+ }
+ if (allctr) {
+ erts_alcu_check_delayed_dealloc(allctr,
+ 1,
+ need_thr_progress,
+ more_work);
+ }
}
- ASSERT(ix > 0);
- return ix;
}
-void erts_alloc_reg_scheduler_id(Uint id)
+erts_aint32_t
+erts_alloc_fix_alloc_shrink(int ix, erts_aint32_t flgs)
{
- int ix = (int) id;
- ASSERT(0 < ix && ix <= first_dyn_thr_ix);
- ASSERT(0 == (int) (long) erts_tsd_get(thr_ix_key));
- erts_tsd_set(thr_ix_key, (void *)(long) ix);
+#ifdef ERTS_SMP
+ ErtsAllocatorThrSpec_t *tspec;
+ tspec = &erts_allctr_thr_spec[ERTS_ALC_A_FIXED_SIZE];
+ if (erts_allctrs_info[ERTS_ALC_A_FIXED_SIZE].thr_spec && tspec->enabled)
+ return erts_alcu_fix_alloc_shrink(tspec->allctr[ix], flgs);
+ if (ix == 0 && erts_allctrs_info[ERTS_ALC_A_FIXED_SIZE].extra)
+ return erts_alcu_fix_alloc_shrink(
+ erts_allctrs_info[ERTS_ALC_A_FIXED_SIZE].extra, flgs);
+#else
+ if (ix == 1 && erts_allctrs_info[ERTS_ALC_A_FIXED_SIZE].extra)
+ return erts_alcu_fix_alloc_shrink(
+ erts_allctrs_info[ERTS_ALC_A_FIXED_SIZE].extra, flgs);
+#endif
+ return 0;
}
static void
@@ -1574,14 +1635,12 @@ erts_alloc_get_verify_unused_temp_alloc(Allctr_t **allctr)
if (erts_allctrs_info[ERTS_ALC_A_TEMPORARY].alloc_util
&& erts_allctrs_info[ERTS_ALC_A_TEMPORARY].thr_spec) {
ErtsAllocatorThrSpec_t *tspec;
+ int ix = ERTS_ALC_GET_THR_IX();
tspec = &erts_allctr_thr_spec[ERTS_ALC_A_TEMPORARY];
- if (!tspec->all_thr_safe) {
- int ix = erts_alc_get_thr_ix();
- if (ix < tspec->size) {
- *allctr = tspec->allctr[ix];
- return erts_alcu_verify_unused;
- }
+ if (ix < tspec->size) {
+ *allctr = tspec->allctr[ix];
+ return erts_alcu_verify_unused;
}
}
@@ -1680,7 +1739,7 @@ erts_realloc_n_enomem(ErtsAlcType_t n, void *ptr, Uint size)
}
static ERTS_INLINE UWord
-alcu_size(ErtsAlcType_t ai)
+alcu_size(ErtsAlcType_t ai, ErtsAlcUFixInfo_t *fi, int fisz)
{
UWord res = 0;
@@ -1690,22 +1749,20 @@ alcu_size(ErtsAlcType_t ai)
if (!erts_allctrs_info[ai].thr_spec) {
Allctr_t *allctr = erts_allctrs_info[ai].extra;
AllctrSize_t asize;
- erts_alcu_current_size(allctr, &asize);
+ erts_alcu_current_size(allctr, &asize, fi, fisz);
res += asize.blocks;
}
else {
ErtsAllocatorThrSpec_t *tspec = &erts_allctr_thr_spec[ai];
int i;
- ASSERT(tspec->all_thr_safe);
-
ASSERT(tspec->enabled);
for (i = tspec->size - 1; i >= 0; i--) {
Allctr_t *allctr = tspec->allctr[i];
AllctrSize_t asize;
if (allctr) {
- erts_alcu_current_size(allctr, &asize);
+ erts_alcu_current_size(allctr, &asize, fi, fisz);
res += asize.blocks;
}
}
@@ -1733,7 +1790,6 @@ alcu_is_low(ErtsAlcType_t ai)
int found_one = 0;
# endif
- ASSERT(tspec->all_thr_safe);
ASSERT(tspec->enabled);
for (i = tspec->size - 1; i >= 0; i--) {
@@ -1757,11 +1813,24 @@ alcu_is_low(ErtsAlcType_t ai)
}
#endif /* HALFWORD */
+static ERTS_INLINE void
+add_fix_values(UWord *ap, UWord *up, ErtsAlcUFixInfo_t *fi, ErtsAlcType_t type)
+{
+ int ix = ERTS_ALC_T2N(type) - ERTS_ALC_N_MIN_A_FIXED_SIZE;
+ ASSERT(0 <= ix && ix < ERTS_ALC_NO_FIXED_SIZES);
+
+ *ap += (UWord) fi[ix].allocated;
+ *up += (UWord) fi[ix].used;
+}
+
Eterm
erts_memory(int *print_to_p, void *print_to_arg, void *proc, Eterm earg)
{
+/*
+ * NOTE! When updating this function, make sure to also update
+ * erlang:memory/[0,1] in $ERL_TOP/erts/preloaded/src/erlang.erl
+ */
#define ERTS_MEM_NEED_ALL_ALCU (!erts_instr_stat && want_tot_or_sys)
- ErtsFixInfo efi;
struct {
int total;
int processes;
@@ -1800,6 +1869,9 @@ erts_memory(int *print_to_p, void *print_to_arg, void *proc, Eterm earg)
Eterm res = THE_NON_VALUE;
ErtsAlcType_t ai;
int only_one_value = 0;
+ ErtsAlcUFixInfo_t fi[ERTS_ALC_NO_FIXED_SIZES] = {{0,0}};
+
+ ERTS_SMP_LC_ASSERT(erts_smp_thr_progress_is_blocking());
/* Figure out whats wanted... */
@@ -1969,7 +2041,6 @@ erts_memory(int *print_to_p, void *print_to_arg, void *proc, Eterm earg)
for (ai = ERTS_ALC_A_MIN; ai <= ERTS_ALC_A_MAX; ai++) {
switch (ai) {
case ERTS_ALC_A_SYSTEM:
- case ERTS_ALC_A_FIXED_SIZE:
case ERTS_ALC_A_SBMBC:
#if HALFWORD_HEAP
case ERTS_ALC_A_SBMBC_LOW:
@@ -2029,11 +2100,15 @@ erts_memory(int *print_to_p, void *print_to_arg, void *proc, Eterm earg)
case ERTS_ALC_A_BINARY:
save = &size.binary;
break;
+ case ERTS_ALC_A_FIXED_SIZE:
+ asz = alcu_size(ai, fi, ERTS_ALC_NO_FIXED_SIZES);
+ size.total += asz;
+ continue;
default:
save = NULL;
break;
}
- asz = alcu_size(ai);
+ asz = alcu_size(ai, NULL, 0);
if (save)
*save = asz;
size.total += asz;
@@ -2053,8 +2128,11 @@ erts_memory(int *print_to_p, void *print_to_arg, void *proc, Eterm earg)
if (ERTS_MEM_NEED_ALL_ALCU)
tmp = size.processes;
- else
- tmp = alcu_size(ERTS_ALC_A_EHEAP);
+ else {
+ alcu_size(ERTS_ALC_A_FIXED_SIZE,
+ fi, ERTS_ALC_NO_FIXED_SIZES);
+ tmp = alcu_size(ERTS_ALC_A_EHEAP, NULL, 0);
+ }
tmp += erts_max_processes*sizeof(Process*);
#ifdef HYBRID
tmp += erts_max_processes*sizeof(Process*);
@@ -2064,69 +2142,54 @@ erts_memory(int *print_to_p, void *print_to_arg, void *proc, Eterm earg)
size.processes = size.processes_used = tmp;
-#if HALFWORD_HEAP
- /* BUG: We ignore link and monitor memory */
-#else
- erts_fix_info(ERTS_ALC_T_NLINK_SH, &efi);
- size.processes += efi.total;
- size.processes_used += efi.used;
+ add_fix_values(&size.processes,
+ &size.processes_used,
+ fi,
+ ERTS_ALC_T_PROC);
+#if !HALFWORD_HEAP
+ add_fix_values(&size.processes,
+ &size.processes_used,
+ fi,
+ ERTS_ALC_T_MONITOR_SH);
- erts_fix_info(ERTS_ALC_T_MONITOR_SH, &efi);
- size.processes += efi.total;
- size.processes_used += efi.used;
+ add_fix_values(&size.processes,
+ &size.processes_used,
+ fi,
+ ERTS_ALC_T_NLINK_SH);
#endif
-
- erts_fix_info(ERTS_ALC_T_PROC, &efi);
- size.processes += efi.total;
- size.processes_used += efi.used;
-
- erts_fix_info(ERTS_ALC_T_REG_PROC, &efi);
- size.processes += efi.total;
- size.processes_used += efi.used;
-
+ add_fix_values(&size.processes,
+ &size.processes_used,
+ fi,
+ ERTS_ALC_T_MSG_REF);
}
if (want.atom || want.atom_used) {
Uint reserved_atom_space, atom_space;
erts_atom_get_text_space_sizes(&reserved_atom_space, &atom_space);
size.atom = size.atom_used = atom_table_sz();
- erts_fix_info(ERTS_ALC_T_ATOM, &efi);
- if (want.atom) {
+ if (want.atom)
size.atom += reserved_atom_space;
- size.atom += efi.total;
- }
- if (want.atom_used) {
+ if (want.atom_used)
size.atom_used += atom_space;
- size.atom_used += efi.used;
- }
}
if (!ERTS_MEM_NEED_ALL_ALCU && want.binary)
- size.binary = alcu_size(ERTS_ALC_A_BINARY);
+ size.binary = alcu_size(ERTS_ALC_A_BINARY, NULL, 0);
if (want.code) {
size.code = module_table_sz();
- erts_fix_info(ERTS_ALC_T_MODULE, &efi);
- size.code += efi.used;
size.code += export_table_sz();
-#if HALFWORD_HEAP
size.code += export_list_size() * sizeof(Export);
-#else
- erts_fix_info(ERTS_ALC_T_EXPORT, &efi);
- size.code += efi.used;
-#endif
size.code += erts_fun_table_sz();
- erts_fix_info(ERTS_ALC_T_FUN_ENTRY, &efi);
- size.code += efi.used;
size.code += allocated_modules*sizeof(Range);
size.code += erts_total_code_size;
}
if (want.ets) {
if (!ERTS_MEM_NEED_ALL_ALCU)
- size.ets = alcu_size(ERTS_ALC_A_ETS);
+ size.ets = alcu_size(ERTS_ALC_A_ETS, NULL, 0);
size.ets += erts_get_ets_misc_mem_size();
}
@@ -2199,13 +2262,10 @@ struct aa_values {
Eterm
erts_allocated_areas(int *print_to_p, void *print_to_arg, void *proc)
{
-#define MAX_AA_VALUES \
- (20 + (ERTS_ALC_N_MAX_A_FIXED_SIZE - ERTS_ALC_N_MIN_A_FIXED_SIZE + 1))
-
+#define MAX_AA_VALUES (23)
struct aa_values values[MAX_AA_VALUES];
Eterm res = THE_NON_VALUE;
int i, length;
- ErtsFixInfo efi;
Uint reserved_atom_space, atom_space;
if (proc) {
@@ -2270,6 +2330,11 @@ erts_allocated_areas(int *print_to_p, void *print_to_arg, void *proc)
i++;
values[i].arity = 2;
+ values[i].name = "export_list";
+ values[i].ui[0] = export_list_size() * sizeof(Export);
+ i++;
+
+ values[i].arity = 2;
values[i].name = "register_table";
values[i].ui[0] = process_reg_sz();
i++;
@@ -2314,22 +2379,15 @@ erts_allocated_areas(int *print_to_p, void *print_to_arg, void *proc)
values[i].ui[0] = erts_tot_link_lh_size();
i++;
- {
- Uint n;
-
- for (n = ERTS_ALC_N_MIN_A_FIXED_SIZE;
- n <= ERTS_ALC_N_MAX_A_FIXED_SIZE;
- n++) {
- erts_fix_info(ERTS_ALC_N2T(n), &efi);
-
- values[i].arity = 3;
- values[i].name = ERTS_ALC_N2TD(n);
- values[i].ui[0] = efi.total;
- values[i].ui[1] = efi.used;
- i++;
- }
+ values[i].arity = 2;
+ values[i].name = "process_table";
+ values[i].ui[0] = erts_max_processes*sizeof(Process*);
+ i++;
- }
+ values[i].arity = 2;
+ values[i].name = "ets_misc";
+ values[i].ui[0] = erts_get_ets_misc_mem_size();
+ i++;
length = i;
ASSERT(length <= MAX_AA_VALUES);
@@ -2423,17 +2481,16 @@ erts_alloc_util_allocators(void *proc)
Uint sz;
int i;
/*
- * Currently all allocators except sys_alloc and fix_alloc are
+ * Currently all allocators except sys_alloc are
* alloc_util allocators.
*/
- sz = ((ERTS_ALC_A_MAX + 1 - ERTS_ALC_A_MIN) - 2)*2;
+ sz = ((ERTS_ALC_A_MAX + 1 - ERTS_ALC_A_MIN) - 1)*2;
ASSERT(sz > 0);
hp = HAlloc((Process *) proc, sz);
res = NIL;
for (i = ERTS_ALC_A_MAX; i >= ERTS_ALC_A_MIN; i--) {
switch (i) {
case ERTS_ALC_A_SYSTEM:
- case ERTS_ALC_A_FIXED_SIZE:
break;
default: {
char *alc_str = (char *) ERTS_ALC_A2AD(i);
@@ -2447,267 +2504,12 @@ erts_alloc_util_allocators(void *proc)
return res;
}
-Eterm
-erts_allocator_info_term(void *proc, Eterm which_alloc, int only_sz)
-{
-#define ERTS_AIT_RET(R) \
- do { res = (R); goto done; } while (0)
-#define ERTS_AIT_HALLOC(P, S) \
- do { hp = HAlloc((P), (S)); hp_end = hp + (S); } while (0)
-
- ErtsAlcType_t i;
- Uint sz = 0;
- Uint *hp = NULL;
- Uint *hp_end = NULL;
- Eterm res = am_undefined;
-
- if (is_not_atom(which_alloc))
- goto done;
-
- for (i = ERTS_ALC_A_MIN; i <= ERTS_ALC_A_MAX; i++) {
- if (erts_is_atom_str((char *) ERTS_ALC_A2AD(i), which_alloc)) {
- if (!erts_allctrs_info[i].enabled)
- ERTS_AIT_RET(am_false);
- else {
- if (erts_allctrs_info[i].alloc_util) {
- Eterm ires, tmp;
- Eterm **hpp;
- Uint *szp;
- Eterm (*info_func)(Allctr_t *,
- int,
- int *,
- void *,
- Uint **,
- Uint *);
-
- info_func = (only_sz
- ? erts_alcu_sz_info
- : erts_alcu_info);
-
- if (erts_allctrs_info[i].thr_spec) {
- ErtsAllocatorThrSpec_t *tspec = &erts_allctr_thr_spec[i];
- int j;
- int block_system = !tspec->all_thr_safe;
-
- if (block_system) {
- erts_smp_proc_unlock(proc, ERTS_PROC_LOCK_MAIN);
- erts_smp_block_system(0);
- }
- ASSERT(tspec->enabled);
-
- szp = &sz;
- hpp = NULL;
-
- while (1) {
- ires = NIL;
- for (j = tspec->size - 1; j >= 0; j--) {
- Allctr_t *allctr = tspec->allctr[j];
- if (allctr) {
- tmp = erts_bld_tuple(hpp,
- szp,
- 3,
- erts_bld_atom(hpp,
- szp,
- "instance"),
- make_small((Uint) j),
- (*info_func)(allctr,
- hpp != NULL,
- NULL,
- NULL,
- hpp,
- szp));
- ires = erts_bld_cons(hpp, szp, tmp, ires);
- }
- }
- if (hpp)
- break;
- ERTS_AIT_HALLOC((Process *) proc, sz);
- hpp = &hp;
- szp = NULL;
- }
-
- if (block_system) {
- erts_smp_release_system();
- erts_smp_proc_lock(proc, ERTS_PROC_LOCK_MAIN);
- }
- }
- else {
- Allctr_t *allctr = erts_allctrs_info[i].extra;
- szp = &sz;
- hpp = NULL;
- while (1) {
- ires = NIL;
- tmp = erts_bld_tuple(hpp,
- szp,
- 3,
- erts_bld_atom(hpp,
- szp,
- "instance"),
- make_small((Uint) 0),
- (*info_func)(allctr,
- hpp != NULL,
- NULL,
- NULL,
- hpp,
- szp));
- ires = erts_bld_cons(hpp, szp, tmp, ires);
- if (hpp)
- break;
- ERTS_AIT_HALLOC((Process *) proc, sz);
- hpp = &hp;
- szp = NULL;
- }
- }
- ERTS_AIT_RET(ires);
- }
- else {
- Eterm *szp, **hpp;
-
- switch (i) {
- case ERTS_ALC_A_SYSTEM: {
- SysAllocStat sas;
- Eterm opts_am;
- Eterm opts;
- Eterm as[4]; /* Ok even if !HEAP_ON_C_STACK, not really heap data on stack */
- Eterm ts[4]; /* Ok even if !HEAP_ON_C_STACK, not really heap data on stack */
- int l;
-
- if (only_sz)
- ERTS_AIT_RET(NIL);
-
- sys_alloc_stat(&sas);
- opts_am = am_atom_put("options", 7);
-
- szp = &sz;
- hpp = NULL;
-
- restart_sys_alloc:
- l = 0;
- as[l] = am_atom_put("e", 1);
- ts[l++] = am_true;
- as[l] = am_atom_put("m", 1);
- ts[l++] = am_atom_put("libc", 4);
- if(sas.trim_threshold >= 0) {
- as[l] = am_atom_put("tt", 2);
- ts[l++] = erts_bld_uint(hpp, szp,
- (Uint) sas.trim_threshold);
- }
- if(sas.top_pad >= 0) {
- as[l] = am_atom_put("tp", 2);
- ts[l++] = erts_bld_uint(hpp, szp, (Uint) sas.top_pad);
- }
-
- opts = erts_bld_2tup_list(hpp, szp, l, as, ts);
- res = erts_bld_2tup_list(hpp, szp, 1, &opts_am, &opts);
-
- if (szp) {
- ERTS_AIT_HALLOC((Process *) proc, sz);
- szp = NULL;
- hpp = &hp;
- goto restart_sys_alloc;
- }
- ERTS_AIT_RET(res);
- }
- case ERTS_ALC_A_FIXED_SIZE: {
- ErtsAlcType_t n;
- Eterm as[2], vs[2];
-
- if (only_sz)
- ERTS_AIT_RET(NIL);
-
- as[0] = am_atom_put("options", 7);
- as[1] = am_atom_put("pools", 5);
-
- szp = &sz;
- hpp = NULL;
-
- restart_fix_alloc:
-
- vs[0] = erts_bld_cons(hpp, szp,
- erts_bld_tuple(hpp, szp, 2,
- am_atom_put("e",
- 1),
- am_true),
- NIL);
-
- vs[1] = NIL;
- for (n = ERTS_ALC_N_MIN_A_FIXED_SIZE;
- n <= ERTS_ALC_N_MAX_A_FIXED_SIZE;
- n++) {
- ErtsFixInfo efi;
- erts_fix_info(ERTS_ALC_N2T(n), &efi);
-
- vs[1] = erts_bld_cons(
- hpp, szp,
- erts_bld_tuple(
- hpp, szp, 3,
- am_atom_put((char *) ERTS_ALC_N2TD(n),
- strlen(ERTS_ALC_N2TD(n))),
- erts_bld_uint(hpp, szp, efi.total),
- erts_bld_uint(hpp, szp, efi.used)),
- vs[1]);
-
- }
-
- res = erts_bld_2tup_list(hpp, szp, 2, as, vs);
- if (szp) {
- ERTS_AIT_HALLOC((Process *) proc, sz);
- szp = NULL;
- hpp = &hp;
- goto restart_fix_alloc;
- }
- ERTS_AIT_RET(res);
- }
- default:
- ASSERT(0);
- goto done;
- }
- }
- }
- }
- }
-
- if (ERTS_IS_ATOM_STR("mseg_alloc", which_alloc)) {
-#if HAVE_ERTS_MSEG
- if (only_sz)
- ERTS_AIT_RET(NIL);
- erts_mseg_info(NULL, NULL, 0, NULL, &sz);
- if (sz)
- ERTS_AIT_HALLOC((Process *) proc, sz);
- ERTS_AIT_RET(erts_mseg_info(NULL, NULL, 1, &hp, NULL));
-#else
- ERTS_AIT_RET(am_false);
-#endif
-
- }
- else if (ERTS_IS_ATOM_STR("alloc_util", which_alloc)) {
- if (only_sz)
- ERTS_AIT_RET(NIL);
- erts_alcu_au_info_options(NULL, NULL, NULL, &sz);
- if (sz)
- ERTS_AIT_HALLOC((Process *) proc, sz);
- ERTS_AIT_RET(erts_alcu_au_info_options(NULL, NULL, &hp, NULL));
- }
-
- done:
- if (hp) {
- ASSERT(hp_end >= hp);
- HRelease((Process *) proc, hp_end, hp);
- }
- return res;
-
-#undef ERTS_AIT_RET
-#undef ERTS_AIT_HALLOC
-}
-
void
erts_allocator_info(int to, void *arg)
{
ErtsAlcType_t a;
- ERTS_SMP_LC_ASSERT(erts_smp_is_system_blocked(0)
- || (ERTS_IS_CRASH_DUMPING
- && erts_smp_is_system_blocked(ERTS_BS_FLG_ALLOW_GC)));
+ ERTS_SMP_LC_ASSERT(erts_smp_thr_progress_is_blocking());
for (a = ERTS_ALC_A_MIN; a <= ERTS_ALC_A_MAX; a++) {
int ai;
@@ -2748,22 +2550,6 @@ erts_allocator_info(int to, void *arg)
erts_print(to, arg, "option tp: %d\n", sas.top_pad);
break;
}
- case ERTS_ALC_A_FIXED_SIZE: {
- ErtsAlcType_t n;
- erts_print(to, arg, "option e: true\n");
-
- for (n = ERTS_ALC_N_MIN_A_FIXED_SIZE;
- n <= ERTS_ALC_N_MAX_A_FIXED_SIZE;
- n++) {
- ErtsFixInfo efi;
- erts_fix_info(ERTS_ALC_N2T(n), &efi);
- erts_print(to, arg, "%s: %lu %lu\n",
- ERTS_ALC_N2TD(n),
- efi.total,
- efi.used);
- }
- break;
- }
default:
ASSERT(0);
break;
@@ -2774,8 +2560,18 @@ erts_allocator_info(int to, void *arg)
}
#if HAVE_ERTS_MSEG
- erts_print(to, arg, "=allocator:mseg_alloc\n");
- erts_mseg_info(&to, arg, 0, NULL, NULL);
+ {
+#ifdef ERTS_SMP
+ int max = (int) erts_no_schedulers;
+#else
+ int max = 0;
+#endif
+ int i;
+ for (i = 0; i <= max; i++) {
+ erts_print(to, arg, "=allocator:mseg_alloc[%d]\n", i);
+ erts_mseg_info(i, &to, arg, 0, NULL, NULL);
+ }
+ }
#endif
erts_print(to, arg, "=allocator:alloc_util\n");
@@ -2829,7 +2625,7 @@ erts_allocator_options(void *proc)
use_mseg++;
#endif
if (erts_allctr_thr_spec[a].enabled)
- allctr = erts_allctr_thr_spec[a].allctr[1];
+ allctr = erts_allctr_thr_spec[a].allctr[0];
else
allctr = erts_allctrs_info[a].extra;
tmp = erts_alcu_info_options(allctr, NULL, NULL, hpp, szp);
@@ -2878,7 +2674,7 @@ erts_allocator_options(void *proc)
#if HAVE_ERTS_MSEG
if (use_mseg) {
atoms[length] = am_atom_put("mseg_alloc", 10);
- terms[length++] = erts_mseg_info_options(NULL, NULL, hpp, szp);
+ terms[length++] = erts_mseg_info_options(0, NULL, NULL, hpp, szp);
}
#endif
@@ -2960,6 +2756,338 @@ erts_allocator_options(void *proc)
return res;
}
+void *erts_alloc_permanent_cache_aligned(ErtsAlcType_t type, Uint size)
+{
+ UWord v = (UWord) erts_alloc(type, size + (ERTS_CACHE_LINE_SIZE-1)
+#ifdef VALGRIND
+ + sizeof(UWord)
+#endif
+ );
+
+#ifdef VALGRIND
+ { /* Link them to avoid Leak_PossiblyLost */
+ static UWord* first_in_list = NULL;
+ *(UWord**)v = first_in_list;
+ first_in_list = (UWord*) v;
+ v += sizeof(UWord);
+ }
+#endif
+
+ if (v & ERTS_CACHE_LINE_MASK) {
+ v = (v & ~ERTS_CACHE_LINE_MASK) + ERTS_CACHE_LINE_SIZE;
+ }
+ ASSERT((v & ERTS_CACHE_LINE_MASK) == 0);
+ return (void*)v;
+}
+
+static void
+reply_alloc_info(void *vair)
+{
+ ErtsAllocInfoReq *air = (ErtsAllocInfoReq *) vair;
+ Uint sched_id = erts_get_scheduler_id();
+ int global_instances = air->req_sched == sched_id;
+ ErtsProcLocks rp_locks;
+ Process *rp = air->proc;
+ Eterm ref_copy = NIL, ai_list, msg;
+ Eterm *hp = NULL, *hp_end = NULL, *hp_start = NULL;
+ Eterm **hpp;
+ Uint sz, *szp;
+ ErlOffHeap *ohp = NULL;
+ ErlHeapFragment *bp = NULL;
+ int i;
+ Eterm (*info_func)(Allctr_t *,
+ int,
+ int *,
+ void *,
+ Uint **,
+ Uint *) = (air->only_sz
+ ? erts_alcu_sz_info
+ : erts_alcu_info);
+
+ rp_locks = air->req_sched == sched_id ? ERTS_PROC_LOCK_MAIN : 0;
+
+ sz = 0;
+ hpp = NULL;
+ szp = &sz;
+
+ while (1) {
+
+ if (hpp)
+ ref_copy = STORE_NC(hpp, ohp, air->ref);
+ else
+ *szp += REF_THING_SIZE;
+
+ ai_list = NIL;
+ for (i = 0; air->allocs[i] != ERTS_ALC_A_INVALID; i++);
+ for (i--; i >= 0; i--) {
+ int ai = air->allocs[i];
+ Allctr_t *allctr;
+ Eterm ainfo;
+ Eterm alloc_atom;
+ if (global_instances) {
+ switch (ai) {
+ case ERTS_ALC_A_SYSTEM: {
+ alloc_atom = erts_bld_atom(hpp, szp, "sys_alloc");
+ ainfo = NIL;
+ if (!air->only_sz) {
+ SysAllocStat sas;
+ if (hpp)
+ sys_alloc_stat(&sas);
+ if (szp) {
+ /* ensure ehough heap */
+ sas.top_pad = INT_MAX;
+ sas.trim_threshold = INT_MAX;
+ }
+ if (sas.top_pad >= 0) {
+ ainfo = erts_bld_cons(
+ hpp, szp,
+ erts_bld_tuple(
+ hpp, szp, 2,
+ erts_bld_atom(hpp, szp, "tp"),
+ erts_bld_uint(
+ hpp, szp,
+ (Uint) sas.top_pad)),
+ ainfo);
+ }
+ if (sas.trim_threshold >= 0) {
+ ainfo = erts_bld_cons(
+ hpp, szp,
+ erts_bld_tuple(
+ hpp, szp, 2,
+ erts_bld_atom(hpp, szp, "tt"),
+ erts_bld_uint(
+ hpp, szp,
+ (Uint) sas.trim_threshold)),
+ ainfo);
+ }
+ ainfo = erts_bld_cons(hpp, szp,
+ erts_bld_tuple(
+ hpp, szp, 2,
+ erts_bld_atom(hpp, szp,
+ "m"),
+ erts_bld_atom(hpp, szp,
+ "libc")),
+ ainfo);
+ ainfo = erts_bld_cons(hpp, szp,
+ erts_bld_tuple(
+ hpp, szp, 2,
+ erts_bld_atom(hpp, szp,
+ "e"),
+ am_true),
+ ainfo);
+ ainfo = erts_bld_tuple(hpp, szp, 2,
+ erts_bld_atom(hpp, szp,
+ "otps"),
+ ainfo);
+ }
+ ainfo = erts_bld_tuple(hpp, szp, 3,
+ alloc_atom,
+ make_small(0),
+ ainfo);
+ break;
+ }
+ case ERTS_ALC_INFO_A_ALLOC_UTIL:
+ alloc_atom = erts_bld_atom(hpp, szp, "alloc_util");
+ ainfo = (air->only_sz
+ ? NIL
+ : erts_alcu_au_info_options(NULL, NULL,
+ hpp, szp));
+ ainfo = erts_bld_tuple(hpp, szp, 3,
+ alloc_atom,
+ make_small(0),
+ ainfo);
+ break;
+ case ERTS_ALC_INFO_A_MSEG_ALLOC:
+ alloc_atom = erts_bld_atom(hpp, szp, "mseg_alloc");
+#if HAVE_ERTS_MSEG
+ ainfo = (air->only_sz
+ ? NIL
+ : erts_mseg_info(0, NULL, NULL, hpp != NULL,
+ hpp, szp));
+ ainfo = erts_bld_tuple(hpp, szp, 3,
+ alloc_atom,
+ make_small(0),
+ ainfo);
+#else
+ ainfo = erts_bld_tuple(hpp, szp, 2, alloc_atom,
+ am_false);
+#endif
+ break;
+ default:
+ alloc_atom = erts_bld_atom(hpp, szp,
+ (char *) ERTS_ALC_A2AD(ai));
+ if (!erts_allctrs_info[ai].enabled)
+ ainfo = erts_bld_tuple(hpp, szp, 2, alloc_atom,
+ am_false);
+ else if (erts_allctrs_info[ai].alloc_util) {
+ if (erts_allctrs_info[ai].thr_spec)
+ allctr = erts_allctr_thr_spec[ai].allctr[0];
+ else
+ allctr = erts_allctrs_info[ai].extra;
+ ainfo = info_func(allctr, hpp != NULL, NULL,
+ NULL, hpp, szp);
+ ainfo = erts_bld_tuple(hpp, szp, 3, alloc_atom,
+ make_small(0), ainfo);
+ }
+ else {
+ erl_exit(ERTS_ABORT_EXIT, "%s:%d: internal error\n",
+ __FILE__, __LINE__);
+ }
+ }
+ ai_list = erts_bld_cons(hpp, szp,
+ ainfo, ai_list);
+ }
+ switch (ai) {
+ case ERTS_ALC_A_SYSTEM:
+ case ERTS_ALC_INFO_A_ALLOC_UTIL:
+ break;
+ case ERTS_ALC_INFO_A_MSEG_ALLOC:
+#if HAVE_ERTS_MSEG && defined(ERTS_SMP)
+ alloc_atom = erts_bld_atom(hpp, szp, "mseg_alloc");
+ ainfo = (air->only_sz
+ ? NIL
+ : erts_mseg_info(sched_id, NULL, NULL,
+ hpp != NULL, hpp, szp));
+ ainfo = erts_bld_tuple(hpp, szp, 3,
+ alloc_atom,
+ make_small(sched_id),
+ ainfo);
+ ai_list = erts_bld_cons(hpp, szp, ainfo, ai_list);
+#endif
+ break;
+ default:
+ if (erts_allctrs_info[ai].thr_spec) {
+ alloc_atom = erts_bld_atom(hpp, szp,
+ (char *) ERTS_ALC_A2AD(ai));
+ allctr = erts_allctr_thr_spec[ai].allctr[sched_id];
+ ainfo = info_func(allctr, hpp != NULL, NULL,
+ NULL, hpp, szp);
+ ai_list = erts_bld_cons(hpp, szp,
+ erts_bld_tuple(
+ hpp, szp,
+ 3,
+ alloc_atom,
+ make_small(sched_id),
+ ainfo),
+ ai_list);
+ }
+ break;
+ }
+ msg = erts_bld_tuple(hpp, szp,
+ 3,
+ ref_copy,
+ make_small(sched_id),
+ ai_list);
+
+ }
+ if (hpp)
+ break;
+
+ hp = erts_alloc_message_heap(sz, &bp, &ohp, rp, &rp_locks);
+ hp_start = hp;
+ hp_end = hp + sz;
+ szp = NULL;
+ hpp = &hp;
+ }
+ if (bp)
+ bp = erts_resize_message_buffer(bp, hp - hp_start, &msg, 1);
+ else {
+ ASSERT(hp);
+ HRelease(rp, hp_end, hp);
+ }
+
+ erts_queue_message(rp, &rp_locks, bp, msg, NIL);
+
+ if (air->req_sched == sched_id)
+ rp_locks &= ~ERTS_PROC_LOCK_MAIN;
+
+ erts_smp_proc_unlock(rp, rp_locks);
+ erts_smp_proc_dec_refc(rp);
+
+ if (erts_smp_atomic32_dec_read_nob(&air->refc) == 0)
+ aireq_free(air);
+}
+
+int
+erts_request_alloc_info(struct process *c_p,
+ Eterm ref,
+ Eterm allocs,
+ int only_sz)
+{
+ ErtsAllocInfoReq *air = aireq_alloc();
+ Eterm req_ai[ERTS_ALC_A_MAX+1+2] = {0};
+ Eterm alist;
+ Eterm *hp;
+ int airix = 0, ai;
+
+ air->req_sched = erts_get_scheduler_id();
+
+ air->only_sz = only_sz;
+
+ air->proc = c_p;
+
+ if (is_not_internal_ref(ref))
+ return 0;
+
+ hp = &air->ref_heap[0];
+ air->ref = STORE_NC(&hp, NULL, ref);
+
+ if (is_not_list(allocs))
+ return 0;
+
+ alist = allocs;
+
+ while (is_list(alist)) {
+ int saved = 0;
+ Eterm* consp = list_val(alist);
+ Eterm alloc = CAR(consp);
+
+ for (ai = ERTS_ALC_A_MIN; ai <= ERTS_ALC_A_MAX; ai++)
+ if (erts_is_atom_str((char *) erts_alc_a2ad[ai], alloc))
+ goto save_alloc;
+ if (erts_is_atom_str("mseg_alloc", alloc)) {
+ ai = ERTS_ALC_INFO_A_MSEG_ALLOC;
+ goto save_alloc;
+ }
+ if (erts_is_atom_str("alloc_util", alloc)) {
+ ai = ERTS_ALC_INFO_A_ALLOC_UTIL;
+ save_alloc:
+ if (req_ai[ai])
+ return 0;
+ air->allocs[airix++] = ai;
+ req_ai[ai] = 1;
+ saved = 1;
+ }
+
+ if (!saved)
+ return 0;
+
+ alist = CDR(consp);
+ }
+
+ if (is_not_nil(alist))
+ return 0;
+
+ air->allocs[airix] = ERTS_ALC_A_INVALID;
+
+ erts_smp_atomic32_init_nob(&air->refc,
+ (erts_aint32_t) erts_no_schedulers);
+
+ erts_smp_proc_add_refc(c_p, (Sint32) erts_no_schedulers);
+
+#ifdef ERTS_SMP
+ if (erts_no_schedulers > 1)
+ erts_schedule_multi_misc_aux_work(1,
+ erts_no_schedulers,
+ reply_alloc_info,
+ (void *) air);
+#endif
+
+ reply_alloc_info((void *) air);
+
+ return 1;
+}
+
/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *\
* Deprecated functions *
* *
@@ -3583,6 +3711,4 @@ install_debug_functions(void)
return FENCE_SZ;
}
-
-
#endif /* #ifdef DEBUG */
diff --git a/erts/emulator/beam/erl_alloc.h b/erts/emulator/beam/erl_alloc.h
index c35a60da22..f4133cdb1a 100644
--- a/erts/emulator/beam/erl_alloc.h
+++ b/erts/emulator/beam/erl_alloc.h
@@ -43,43 +43,40 @@
# define ERTS_ALC_INLINE
#endif
-#define ERTS_FIX_CORE_ALLOCATOR ERTS_ALC_A_LONG_LIVED
-extern ErtsAlcType_t erts_fix_core_allocator_ix;
-
-typedef struct {
- Uint total;
- Uint used;
-} ErtsFixInfo;
+#define ERTS_ALC_NO_FIXED_SIZES \
+ (ERTS_ALC_N_MAX_A_FIXED_SIZE - ERTS_ALC_N_MIN_A_FIXED_SIZE + 1)
void erts_sys_alloc_init(void);
void *erts_sys_alloc(ErtsAlcType_t, void *, Uint);
void *erts_sys_realloc(ErtsAlcType_t, void *, void *, Uint);
void erts_sys_free(ErtsAlcType_t, void *, void *);
-
-void erts_init_fix_alloc(Uint, void *(*)(Uint));
-Uint erts_get_fix_size(ErtsAlcType_t);
-void erts_set_fix_size(ErtsAlcType_t, Uint);
-void erts_fix_info(ErtsAlcType_t, ErtsFixInfo *);
-void *erts_fix_alloc(ErtsAlcType_t, void *, Uint);
-void *erts_fix_realloc(ErtsAlcType_t, void *, void*, Uint);
-void erts_fix_free(ErtsAlcType_t, void *, void*);
-
-
Eterm erts_memory(int *, void *, void *, Eterm);
Eterm erts_allocated_areas(int *, void *, void *);
Eterm erts_alloc_util_allocators(void *proc);
void erts_allocator_info(int, void *);
-Eterm erts_allocator_info_term(void *proc, Eterm which_alloc, int only_sz);
Eterm erts_allocator_options(void *proc);
+struct process;
+
+int erts_request_alloc_info(struct process *c_p, Eterm ref, Eterm allocs,
+ int only_sz);
+
#define ERTS_ALLOC_INIT_DEF_OPTS_INITER {0}
typedef struct {
- int dummy;
+ int ncpu;
} ErtsAllocInitOpts;
+typedef struct {
+ Allctr_t *deallctr[ERTS_ALC_A_MAX+1];
+ int pref_ix[ERTS_ALC_A_MAX+1];
+ int flist_ix[ERTS_ALC_A_MAX+1];
+ int pre_alc_ix;
+} ErtsSchedAllocData;
+
void erts_alloc_init(int *argc, char **argv, ErtsAllocInitOpts *eaiop);
+void erts_alloc_late_init(void);
#if defined(GET_ERTS_ALC_TEST) || defined(ERTS_ALC_INTERNAL__)
/* Only for testing */
@@ -126,15 +123,19 @@ extern ErtsAllocatorInfo_t erts_allctrs_info[ERTS_ALC_A_MAX+1];
typedef struct {
int enabled;
- int all_thr_safe;
+ int dd;
+ int aix;
int size;
Allctr_t **allctr;
} ErtsAllocatorThrSpec_t;
extern ErtsAllocatorThrSpec_t erts_allctr_thr_spec[ERTS_ALC_A_MAX+1];
-int erts_alc_get_thr_ix(void);
-void erts_alloc_reg_scheduler_id(Uint id);
+void erts_alloc_register_scheduler(void *vesdp);
+void erts_alloc_scheduler_handle_delayed_dealloc(void *vesdp,
+ int *need_thr_progress,
+ int *more_work);
+erts_aint32_t erts_alloc_fix_alloc_shrink(int ix, erts_aint32_t flgs);
__decl_noreturn void erts_alloc_enomem(ErtsAlcType_t,Uint)
__noreturn;
@@ -180,11 +181,11 @@ void *erts_realloc(ErtsAlcType_t type, void *ptr, Uint size);
void erts_free(ErtsAlcType_t type, void *ptr);
void *erts_alloc_fnf(ErtsAlcType_t type, Uint size);
void *erts_realloc_fnf(ErtsAlcType_t type, void *ptr, Uint size);
-void *erts_alloc_permanent_cache_aligned(ErtsAlcType_t type, Uint size);
-
#endif /* #if !ERTS_ALC_DO_INLINE */
+void *erts_alloc_permanent_cache_aligned(ErtsAlcType_t type, Uint size);
+
#ifndef ERTS_CACHE_LINE_SIZE
/* Assume a cache line size of 64 bytes */
# define ERTS_CACHE_LINE_SIZE ((UWord) 64)
@@ -250,20 +251,10 @@ void *erts_realloc_fnf(ErtsAlcType_t type, void *ptr, Uint size)
size);
}
-ERTS_ALC_INLINE
-void *erts_alloc_permanent_cache_aligned(ErtsAlcType_t type, Uint size)
-{
- UWord v = (UWord) erts_alloc(type, size + (ERTS_CACHE_LINE_SIZE-1));
-
- if (v & ERTS_CACHE_LINE_MASK) {
- v = (v & ~ERTS_CACHE_LINE_MASK) + ERTS_CACHE_LINE_SIZE;
- }
- ASSERT((v & ERTS_CACHE_LINE_MASK) == 0);
- return (void*)v;
-}
-
#endif /* #if ERTS_ALC_DO_INLINE || defined(ERTS_ALC_INTERNAL__) */
+#define ERTS_ALC_GET_THR_IX() ((int) erts_get_scheduler_id())
+
typedef void (*erts_alloc_verify_func_t)(Allctr_t *);
erts_alloc_verify_func_t
@@ -448,136 +439,41 @@ NAME##_free(TYPE *p) \
} \
}
-typedef struct {
- void *start;
- void *end;
- int chunks_mem_size;
-} erts_sched_pref_quick_alloc_data_t;
-
-#ifdef DEBUG
-#define ERTS_SPPA_DBG_CHK_IN_CHNK(A, C, P) \
-do { \
- ASSERT((void *) (C) < (void *) (P)); \
- ASSERT((void *) (P) \
- < (void *) (((char *) (C)) + (A)->chunks_mem_size)); \
-} while (0)
-#else
-#define ERTS_SPPA_DBG_CHK_IN_CHNK(A, C, P)
-#endif
+#include "erl_sched_spec_pre_alloc.h"
#define ERTS_SCHED_PREF_PRE_ALLOC_IMPL(NAME, TYPE, PASZ) \
-union erts_qa_##NAME##__ { \
+union erts_sspa_##NAME##__ { \
+ erts_sspa_blk_t next; \
TYPE type; \
- union erts_qa_##NAME##__ *next; \
}; \
-typedef struct { \
- erts_smp_spinlock_t lock; \
- union erts_qa_##NAME##__ *freelist; \
- union erts_qa_##NAME##__ pre_alloced[1]; \
-} erts_qa_##NAME##_chunk__; \
-static erts_sched_pref_quick_alloc_data_t *qa_data_##NAME##__; \
-static ERTS_INLINE erts_qa_##NAME##_chunk__ * \
-get_##NAME##_chunk_ix(int cix) \
-{ \
- char *ptr = (char *) qa_data_##NAME##__->start; \
- ptr += cix*qa_data_##NAME##__->chunks_mem_size; \
- return (erts_qa_##NAME##_chunk__ *) ptr; \
-} \
-static ERTS_INLINE erts_qa_##NAME##_chunk__ * \
-get_##NAME##_chunk_ptr(void *ptr) \
-{ \
- int cix; \
- size_t diff; \
- if (ptr < qa_data_##NAME##__->start || qa_data_##NAME##__->end <= ptr)\
- return NULL; \
- diff = ((char *) ptr) - ((char *) qa_data_##NAME##__->start); \
- cix = diff / qa_data_##NAME##__->chunks_mem_size; \
- return get_##NAME##_chunk_ix(cix); \
-} \
+ \
+static erts_sspa_data_t *sspa_data_##NAME##__; \
+ \
static void \
init_##NAME##_alloc(void) \
{ \
- size_t tot_size; \
- size_t chunk_mem_size; \
- char *chunk_start; \
- int cix; \
- int no_blocks = ERTS_PRE_ALLOC_SIZE((PASZ)); \
- int no_blocks_per_chunk = 2*((no_blocks-1)/erts_no_schedulers + 1); \
- no_blocks = no_blocks_per_chunk * erts_no_schedulers; \
- chunk_mem_size = sizeof(erts_qa_##NAME##_chunk__); \
- chunk_mem_size += (sizeof(union erts_qa_##NAME##__) \
- * (no_blocks_per_chunk - 1)); \
- chunk_mem_size = ERTS_ALC_CACHE_LINE_ALIGN_SIZE(chunk_mem_size); \
- tot_size = sizeof(erts_sched_pref_quick_alloc_data_t); \
- tot_size += ERTS_CACHE_LINE_SIZE - 1; \
- tot_size += chunk_mem_size*erts_no_schedulers; \
- qa_data_##NAME##__ = erts_alloc(ERTS_ALC_T_PRE_ALLOC_DATA,tot_size);\
- chunk_start = (((char *) qa_data_##NAME##__) \
- + sizeof(erts_sched_pref_quick_alloc_data_t)); \
- if ((((UWord) chunk_start) & ERTS_CACHE_LINE_MASK) != ((UWord) 0)) \
- chunk_start = ((char *) \
- ((((UWord) chunk_start) & ~ERTS_CACHE_LINE_MASK) \
- + ERTS_CACHE_LINE_SIZE)); \
- qa_data_##NAME##__->chunks_mem_size = chunk_mem_size; \
- qa_data_##NAME##__->start = (void *) chunk_start; \
- qa_data_##NAME##__->end = (chunk_start \
- + chunk_mem_size*erts_no_schedulers); \
- for (cix = 0; cix < erts_no_schedulers; cix++) { \
- int i; \
- erts_qa_##NAME##_chunk__ *chunk = get_##NAME##_chunk_ix(cix); \
- erts_smp_spinlock_init(&chunk->lock, #NAME "_alloc_lock"); \
- chunk->freelist = &chunk->pre_alloced[0]; \
- for (i = 1; i < no_blocks_per_chunk; i++) { \
- ERTS_PRE_ALLOC_CLOBBER(&chunk->pre_alloced[i-1], \
- union erts_qa_##NAME##__); \
- chunk->pre_alloced[i-1].next = &chunk->pre_alloced[i]; \
- } \
- ERTS_PRE_ALLOC_CLOBBER(&chunk->pre_alloced[no_blocks_per_chunk-1],\
- union erts_qa_##NAME##__); \
- chunk->pre_alloced[no_blocks_per_chunk-1].next = NULL; \
- } \
+ sspa_data_##NAME##__ = \
+ erts_sspa_create(sizeof(union erts_sspa_##NAME##__), \
+ ERTS_PRE_ALLOC_SIZE((PASZ))); \
} \
-static ERTS_INLINE TYPE * \
+ \
+static TYPE * \
NAME##_alloc(void) \
{ \
- int cix = ((int) erts_get_scheduler_id()) - 1; \
- TYPE *res; \
- if (cix < 0) \
- res = NULL; \
- else { \
- erts_qa_##NAME##_chunk__ *chunk = get_##NAME##_chunk_ix(cix); \
- erts_smp_spin_lock(&chunk->lock); \
- if (!chunk->freelist) \
- res = NULL; \
- else { \
- res = &chunk->freelist->type; \
- chunk->freelist = chunk->freelist->next; \
- ERTS_SPPA_DBG_CHK_IN_CHNK(qa_data_##NAME##__, chunk, res); \
- } \
- erts_smp_spin_unlock(&chunk->lock); \
- } \
- return res; \
+ ErtsSchedulerData *esdp = erts_get_scheduler_data(); \
+ if (!esdp) \
+ return NULL; \
+ return (TYPE *) erts_sspa_alloc(sspa_data_##NAME##__, \
+ (int) esdp->no - 1); \
} \
-static ERTS_INLINE int \
+ \
+static int \
NAME##_free(TYPE *p) \
{ \
- erts_qa_##NAME##_chunk__ *chunk; \
- chunk = get_##NAME##_chunk_ptr((void *) p); \
- if (!chunk) \
- return 0; \
- else { \
- union erts_qa_##NAME##__ *up; \
- ERTS_SPPA_DBG_CHK_IN_CHNK(qa_data_##NAME##__, chunk, p); \
- up = ((union erts_qa_##NAME##__ *) \
- (((char *) p) \
- - ((char *) &((union erts_qa_##NAME##__ *) 0)->type))); \
- erts_smp_spin_lock(&chunk->lock); \
- ERTS_PRE_ALLOC_CLOBBER(up, union erts_qa_##NAME##__); \
- up->next = chunk->freelist; \
- chunk->freelist = up; \
- erts_smp_spin_unlock(&chunk->lock); \
- return 1; \
- } \
+ ErtsSchedulerData *esdp = erts_get_scheduler_data(); \
+ return erts_sspa_free(sspa_data_##NAME##__, \
+ esdp ? (int) esdp->no - 1 : -1, \
+ (char *) p); \
}
#ifdef DEBUG
diff --git a/erts/emulator/beam/erl_alloc.types b/erts/emulator/beam/erl_alloc.types
index eda0831441..962db8b831 100644
--- a/erts/emulator/beam/erl_alloc.types
+++ b/erts/emulator/beam/erl_alloc.types
@@ -50,6 +50,15 @@
# command line argument to make_alloc_types. The variable X is false
# after a "+disable X" statement or if it has never been mentioned.
++if smp
++disable threads_no_smp
++else
++if threads
++enable threads_no_smp
++else
++disable threads_no_smp
++endif
++endif
# --- Allocator declarations -------------------------------------------------
#
@@ -133,29 +142,25 @@ class SYSTEM system_data
# should be deallocated before the emulator starts executing Erlang
# code again.
#
-# NOTE: When adding or removing a type which uses the FIXED_SIZE allocator,
-# also add or remove initialization of the type in erts_alloc_init()
-# (erl_alloc.c).
-#
# <TYPE> <ALLOCATOR> <CLASS> <DESCRIPTION>
type SBMBC SBMBC SYSTEM small_block_mbc
type PROC FIXED_SIZE PROCESSES proc
-type ATOM FIXED_SIZE ATOM atom_entry
-type MODULE FIXED_SIZE CODE module_entry
-type REG_PROC FIXED_SIZE PROCESSES reg_proc
+type ATOM LONG_LIVED ATOM atom_entry
+type MODULE LONG_LIVED CODE module_entry
+type REG_PROC STANDARD PROCESSES reg_proc
type LINK_LH STANDARD PROCESSES link_lh
type SUSPEND_MON STANDARD PROCESSES suspend_monitor
type PEND_SUSPEND SHORT_LIVED PROCESSES pending_suspend
type PROC_LIST SHORT_LIVED PROCESSES proc_list
-type FUN_ENTRY FIXED_SIZE CODE fun_entry
+type FUN_ENTRY LONG_LIVED CODE fun_entry
type ATOM_TXT LONG_LIVED ATOM atom_text
type BEAM_REGISTER EHEAP PROCESSES beam_register
type HEAP EHEAP PROCESSES heap
type OLD_HEAP EHEAP PROCESSES old_heap
type HEAP_FRAG EHEAP PROCESSES heap_frag
type TMP_HEAP TEMPORARY PROCESSES tmp_heap
-type MSG_REF SHORT_LIVED PROCESSES msg_ref
+type MSG_REF FIXED_SIZE PROCESSES msg_ref
type MSG_ROOTS TEMPORARY PROCESSES msg_roots
type ROOTSET TEMPORARY PROCESSES root_set
type LOADER_TMP TEMPORARY CODE loader_tmp
@@ -196,10 +201,10 @@ type LINEBUF STANDARD SYSTEM line_buf
type IOQ STANDARD SYSTEM io_queue
type BITS_BUF STANDARD SYSTEM bits_buf
type TMP_DIST_BUF TEMPORARY SYSTEM tmp_dist_buf
-type ASYNC_Q LONG_LIVED SYSTEM async_queue
+type ASYNC_DATA LONG_LIVED SYSTEM internal_async_data
type ESTACK TEMPORARY SYSTEM estack
type PORT_CALL_BUF TEMPORARY SYSTEM port_call_buf
-type DB_TABLE FIXED_SIZE ETS db_tab
+type DB_TABLE ETS ETS db_tab
type DB_FIXATION SHORT_LIVED ETS db_fixation
type DB_FIX_DEL SHORT_LIVED ETS fixed_del
type DB_TABLES LONG_LIVED ETS db_tabs
@@ -256,6 +261,23 @@ type TMP_CPU_IDS SHORT_LIVED SYSTEM tmp_cpu_ids
type EXT_TERM_DATA SHORT_LIVED PROCESSES external_term_data
type ZLIB STANDARD SYSTEM zlib
type CPU_GRPS_MAP LONG_LIVED SYSTEM cpu_groups_map
+type AUX_WORK_TMO LONG_LIVED SYSTEM aux_work_timeouts
+type MISC_AUX_WORK_Q LONG_LIVED SYSTEM misc_aux_work_q
+
++if threads_no_smp
+# Need thread safe allocs, but std_alloc and fix_alloc are not;
+# use driver_alloc which is...
+type THR_Q_EL DRIVER SYSTEM thr_q_element
+type THR_Q_EL_SL DRIVER SYSTEM sl_thr_q_element
+type MISC_AUX_WORK DRIVER SYSTEM misc_aux_work
++else
+type THR_Q_EL STANDARD SYSTEM thr_q_element
+type THR_Q_EL_SL FIXED_SIZE SYSTEM sl_thr_q_element
+type MISC_AUX_WORK SHORT_LIVED SYSTEM misc_aux_work
++endif
+type THR_Q STANDARD SYSTEM thr_queue
+type THR_Q_SL SHORT_LIVED SYSTEM short_lived_thr_queue
+type THR_Q_LL LONG_LIVED SYSTEM long_lived_thr_queue
+if smp
type ASYNC SHORT_LIVED SYSTEM async
@@ -271,8 +293,9 @@ type XPORTS_LIST SHORT_LIVED SYSTEM extra_port_list
type PROC_LCK_WTR LONG_LIVED SYSTEM proc_lock_waiter
type PROC_LCK_QS LONG_LIVED SYSTEM proc_lock_queues
type RUNQ_BLNS LONG_LIVED SYSTEM run_queue_balancing
-type MISC_AUX_WORK_Q LONG_LIVED SYSTEM misc_aux_work_q
-type MISC_AUX_WORK SHORT_LIVED SYSTEM misc_aux_work
+type THR_PRGR_IDATA LONG_LIVED SYSTEM thr_prgr_internal_data
+type THR_PRGR_DATA LONG_LIVED SYSTEM thr_prgr_data
+type T_THR_PRGR_DATA SHORT_LIVED SYSTEM temp_thr_prgr_data
+endif
#
@@ -285,12 +308,6 @@ type ETHR_STD STANDARD SYSTEM ethread_standard
type ETHR_SL SHORT_LIVED SYSTEM ethread_short_lived
type ETHR_LL LONG_LIVED SYSTEM ethread_long_lived
-+ifnot smp
-
-type ARCALLBACK LONG_LIVED SYSTEM async_ready_callback
-
-+endif
-
+endif
+if shared_heap
@@ -346,10 +363,10 @@ type DB_MS_PSDO_PROC LONG_LIVED_LOW ETS db_match_pseudo_proc
type SCHDLR_DATA LONG_LIVED_LOW SYSTEM scheduler_data
type LL_TEMP_TERM LONG_LIVED_LOW SYSTEM ll_temp_term
-# no FIXED_SIZE for low memory
-type EXPORT STANDARD_LOW CODE export_entry
+type EXPORT LONG_LIVED_LOW CODE export_entry
type MONITOR_SH STANDARD_LOW PROCESSES monitor_sh
type NLINK_SH STANDARD_LOW PROCESSES nlink_sh
+type AINFO_REQ STANDARD_LOW SYSTEM alloc_info_request
+else # "fullword"
@@ -362,9 +379,10 @@ type DB_MS_PSDO_PROC LONG_LIVED ETS db_match_pseudo_proc
type SCHDLR_DATA LONG_LIVED SYSTEM scheduler_data
type LL_TEMP_TERM LONG_LIVED SYSTEM ll_temp_term
-type EXPORT FIXED_SIZE CODE export_entry
+type EXPORT LONG_LIVED CODE export_entry
type MONITOR_SH FIXED_SIZE PROCESSES monitor_sh
type NLINK_SH FIXED_SIZE PROCESSES nlink_sh
+type AINFO_REQ SHORT_LIVED SYSTEM alloc_info_request
+endif
diff --git a/erts/emulator/beam/erl_alloc_util.c b/erts/emulator/beam/erl_alloc_util.c
index d51ed0c36d..af386c9197 100644
--- a/erts/emulator/beam/erl_alloc_util.c
+++ b/erts/emulator/beam/erl_alloc_util.c
@@ -46,6 +46,7 @@
#include "erl_alloc_util.h"
#include "erl_mseg.h"
#include "erl_threads.h"
+#include "erl_thr_progress.h"
#ifdef ERTS_ENABLE_LOCK_COUNT
#include "erl_lock_count.h"
@@ -61,6 +62,13 @@
#warning "* * * * * * * * * *"
#endif
+#define ERTS_ALCU_DD_OPS_LIM_HIGH 20
+#define ERTS_ALCU_DD_OPS_LIM_LOW 2
+
+/* Fix alloc limit */
+#define ERTS_ALCU_FIX_MAX_LIST_SZ 1000
+#define ERTS_ALC_FIX_MAX_SHRINK_OPS 30
+
#define ALLOC_ZERO_EQ_NULL 0
static int atoms_initialized = 0;
@@ -269,7 +277,6 @@ static void check_blk_carrier(Allctr_t *, Block_t *);
#define HARD_CHECK_BLK_CARRIER(A, B)
#endif
-
/* Statistics updating ... */
#ifdef DEBUG
@@ -465,26 +472,34 @@ do { \
#ifdef DEBUG
#ifdef USE_THREADS
-#define ERTS_ALCU_DBG_CHK_THR_SPEC(A) \
+#define ERTS_ALCU_DBG_CHK_THR_ACCESS(A) \
do { \
if (!(A)->thread_safe) { \
- if (!(A)->debug.saved_tid) \
+ if (!(A)->debug.saved_tid) { \
(A)->debug.tid = erts_thr_self(); \
+ (A)->debug.saved_tid = 1; \
+ } \
else { \
- ASSERT(ethr_equal_tids((A)->debug.tid, erts_thr_self())); \
+ ERTS_SMP_LC_ASSERT( \
+ ethr_equal_tids((A)->debug.tid, erts_thr_self()) \
+ || erts_thr_progress_is_blocking()); \
} \
} \
} while (0)
#else
-#define ERTS_ALCU_DBG_CHK_THR_SPEC(A)
+#define ERTS_ALCU_DBG_CHK_THR_ACCESS(A)
#endif
#else
-#define ERTS_ALCU_DBG_CHK_THR_SPEC(A)
+#define ERTS_ALCU_DBG_CHK_THR_ACCESS(A)
#endif
static void make_name_atoms(Allctr_t *allctr);
+static Block_t *create_carrier(Allctr_t *, Uint, UWord);
+static void destroy_carrier(Allctr_t *, Block_t *);
+static void mbc_free(Allctr_t *allctr, void *p);
+
/* mseg ... */
@@ -651,6 +666,446 @@ static void destroy_sbmbc(Allctr_t *allctr, Block_t *blk);
static Block_t *create_carrier(Allctr_t *, Uint, UWord);
static void destroy_carrier(Allctr_t *, Block_t *);
+#if 0
+#define ERTS_DBG_CHK_FIX_LIST(A, FIX, IX, B) \
+ do { if ((FIX)) chk_fix_list((A), (FIX), (IX), (B)); } while (0)
+static void
+chk_fix_list(Allctr_t *allctr, ErtsAlcFixList_t *fix, int ix, int before)
+{
+ void *p;
+ int n;
+ for (n = 0, p = fix[ix].list; p; p = *((void **) p))
+ n++;
+ if (n != fix[ix].list_size) {
+ erts_fprintf(stderr, "FOUND IT ts=%d, sched=%d, ix=%d, n=%d, ls=%d %s!\n",
+ allctr->thread_safe, allctr->ix, ix, n, fix[ix].list_size, before ? "before" : "after");
+ abort();
+ }
+}
+#else
+#define ERTS_DBG_CHK_FIX_LIST(A, FIX, IX, B)
+#endif
+
+erts_aint32_t
+erts_alcu_fix_alloc_shrink(Allctr_t *allctr, erts_aint32_t flgs)
+{
+ int all_empty = 1;
+ erts_aint32_t res = 0;
+ int ix, o;
+ ErtsAlcFixList_t *fix = allctr->fix;
+ int flush = flgs == 0;
+
+#ifdef USE_THREADS
+ if (allctr->thread_safe)
+ erts_mtx_lock(&allctr->mutex);
+#endif
+
+ for (ix = 0; ix < ERTS_ALC_NO_FIXED_SIZES; ix++) {
+ ERTS_DBG_CHK_FIX_LIST(allctr, fix, ix, 1);
+ if (flgs & ERTS_SSI_AUX_WORK_FIX_ALLOC_LOWER_LIM) {
+ fix[ix].limit = fix[ix].max_used;
+ if (fix[ix].limit < fix[ix].used)
+ fix[ix].limit = fix[ix].used;
+ fix[ix].max_used = fix[ix].used;
+ ASSERT(fix[ix].limit >= 0);
+
+ }
+ if (flush) {
+ fix[ix].limit = 0;
+ fix[ix].max_used = fix[ix].used;
+ ASSERT(fix[ix].limit >= 0);
+ }
+ for (o = 0; o < ERTS_ALC_FIX_MAX_SHRINK_OPS || flush; o++) {
+ Block_t *blk;
+ void *ptr;
+
+ if (!flush && fix[ix].limit >= fix[ix].allocated)
+ break;
+ if (fix[ix].list_size == 0)
+ break;
+ ptr = fix[ix].list;
+ fix[ix].list = *((void **) ptr);
+ fix[ix].list_size--;
+
+ blk = UMEM2BLK(ptr);
+
+ if (IS_SBC_BLK(blk))
+ destroy_carrier(allctr, blk);
+ else
+ mbc_free(allctr, ptr);
+
+ fix[ix].allocated--;
+ }
+ if (fix[ix].list_size != 0) {
+ if (fix[ix].limit < fix[ix].allocated)
+ res |= ERTS_SSI_AUX_WORK_FIX_ALLOC_DEALLOC;
+ all_empty = 0;
+ }
+ ERTS_DBG_CHK_FIX_LIST(allctr, fix, ix, 0);
+ }
+
+ if (all_empty && allctr->fix_shrink_scheduled) {
+ allctr->fix_shrink_scheduled = 0;
+ erts_set_aux_work_timeout(allctr->ix,
+ (ERTS_SSI_AUX_WORK_FIX_ALLOC_LOWER_LIM
+ | ERTS_SSI_AUX_WORK_FIX_ALLOC_DEALLOC),
+ 0);
+ }
+
+#ifdef USE_THREADS
+ if (allctr->thread_safe)
+ erts_mtx_unlock(&allctr->mutex);
+#endif
+
+ return res;
+}
+
+#ifdef ERTS_SMP
+
+#define ERTS_ALCU_DD_FIX_TYPE_OFFS \
+ ((sizeof(ErtsAllctrDDBlock_t)-1)/sizeof(UWord) + 1)
+
+#define ERTS_AU_PREF_ALLOC_IX_MASK \
+ ((((UWord) 1) << ERTS_AU_PREF_ALLOC_BITS) - 1)
+#define ERTS_AU_PREF_ALLOC_SIZE_MASK \
+ ((((UWord) 1) << (sizeof(UWord)*8 - ERTS_AU_PREF_ALLOC_BITS)) - 1)
+
+static ERTS_INLINE int
+get_pref_allctr(void *extra, Allctr_t **allctr)
+{
+ ErtsAllocatorThrSpec_t *tspec = (ErtsAllocatorThrSpec_t *) extra;
+ int pref_ix;
+
+ pref_ix = ERTS_ALC_GET_THR_IX();
+
+ ASSERT(sizeof(UWord) == sizeof(Allctr_t *));
+ ASSERT(0 <= pref_ix && pref_ix < tspec->size);
+
+ *allctr = tspec->allctr[pref_ix];
+ return pref_ix;
+}
+
+static ERTS_INLINE void *
+get_used_allctr(void *extra, void *p, Allctr_t **allctr, UWord *sizep)
+{
+ ErtsAllocatorThrSpec_t *tspec = (ErtsAllocatorThrSpec_t *) extra;
+ void *ptr = (void *) (((char *) p) - sizeof(UWord));
+ UWord ainfo = *((UWord *) ptr);
+ int aix = (int) (ainfo & ERTS_AU_PREF_ALLOC_IX_MASK);
+ *allctr = tspec->allctr[aix];
+ if (sizep)
+ *sizep = ((ainfo >> ERTS_AU_PREF_ALLOC_BITS)
+ & ERTS_AU_PREF_ALLOC_SIZE_MASK);
+ return ptr;
+}
+
+static ERTS_INLINE void *
+put_used_allctr(void *p, int ix, UWord size)
+{
+ UWord ainfo = (size >= ERTS_AU_PREF_ALLOC_SIZE_MASK
+ ? ERTS_AU_PREF_ALLOC_SIZE_MASK
+ : size);
+ ainfo <<= ERTS_AU_PREF_ALLOC_BITS;
+ ainfo |= (UWord) ix;
+ *((UWord *) p) = ainfo;
+ return (void *) (((char *) p) + sizeof(UWord));
+}
+
+static void
+init_dd_queue(ErtsAllctrDDQueue_t *ddq)
+{
+ erts_atomic_init_nob(&ddq->tail.data.marker.atmc_next, ERTS_AINT_NULL);
+ erts_atomic_init_nob(&ddq->tail.data.last,
+ (erts_aint_t) &ddq->tail.data.marker);
+ erts_atomic_init_nob(&ddq->tail.data.um_refc[0], 0);
+ erts_atomic_init_nob(&ddq->tail.data.um_refc[1], 0);
+ erts_atomic32_init_nob(&ddq->tail.data.um_refc_ix, 0);
+ ddq->head.first = &ddq->tail.data.marker;
+ ddq->head.unref_end = &ddq->tail.data.marker;
+ ddq->head.next.thr_progress = erts_thr_progress_current();
+ ddq->head.next.thr_progress_reached = 1;
+ ddq->head.next.um_refc_ix = 1;
+ ddq->head.next.unref_end = &ddq->tail.data.marker;
+ ddq->head.used_marker = 1;
+}
+
+static ERTS_INLINE erts_aint_t
+ddq_managed_thread_enqueue(ErtsAllctrDDQueue_t *ddq, void *ptr)
+{
+ erts_aint_t ilast, itmp;
+ ErtsAllctrDDBlock_t *this = ptr;
+
+ erts_atomic_init_nob(&this->atmc_next, ERTS_AINT_NULL);
+
+ /* Enqueue at end of list... */
+
+ ilast = erts_atomic_read_nob(&ddq->tail.data.last);
+ while (1) {
+ ErtsAllctrDDBlock_t *last = (ErtsAllctrDDBlock_t *) ilast;
+ itmp = erts_atomic_cmpxchg_mb(&last->atmc_next,
+ (erts_aint_t) this,
+ ERTS_AINT_NULL);
+ if (itmp == ERTS_AINT_NULL)
+ break;
+ ilast = itmp;
+ }
+
+ /* Move last pointer forward... */
+ while (1) {
+ if (erts_atomic_read_rb(&this->atmc_next) != ERTS_AINT_NULL) {
+ /* Someone else will move it forward */
+ return erts_atomic_read_rb(&ddq->tail.data.last);
+ }
+ itmp = erts_atomic_cmpxchg_mb(&ddq->tail.data.last,
+ (erts_aint_t) this,
+ ilast);
+ if (ilast == itmp)
+ return (erts_aint_t) this;
+ ilast = itmp;
+ }
+}
+
+static ERTS_INLINE int
+ddq_enqueue(ErtsAlcType_t type, ErtsAllctrDDQueue_t *ddq, void *ptr)
+{
+ erts_aint_t ilast;
+ int um_refc_ix = 0;
+ int managed_thread = erts_thr_progress_is_managed_thread();
+ if (!managed_thread) {
+ um_refc_ix = erts_atomic32_read_acqb(&ddq->tail.data.um_refc_ix);
+ while (1) {
+ int tmp_um_refc_ix;
+ erts_atomic_inc_acqb(&ddq->tail.data.um_refc[um_refc_ix]);
+ tmp_um_refc_ix = erts_atomic32_read_acqb(&ddq->tail.data.um_refc_ix);
+ if (tmp_um_refc_ix == um_refc_ix)
+ break;
+ erts_atomic_dec_relb(&ddq->tail.data.um_refc[um_refc_ix]);
+ um_refc_ix = tmp_um_refc_ix;
+ }
+ }
+
+ ilast = ddq_managed_thread_enqueue(ddq, ptr);
+
+ if (!managed_thread)
+ erts_atomic_dec_relb(&ddq->tail.data.um_refc[um_refc_ix]);
+ return ilast == (erts_aint_t) ptr;
+}
+
+static ERTS_INLINE void *
+ddq_dequeue(ErtsAllctrDDQueue_t *ddq)
+{
+ ErtsAllctrDDBlock_t *blk;
+
+ if (ddq->head.first == ddq->head.unref_end)
+ return NULL;
+
+ blk = ddq->head.first;
+ if (blk == &ddq->tail.data.marker) {
+ ASSERT(ddq->head.used_marker);
+ ddq->head.used_marker = 0;
+ blk = ((ErtsAllctrDDBlock_t *)
+ erts_atomic_read_nob(&blk->atmc_next));
+ if (blk == ddq->head.unref_end) {
+ ddq->head.first = blk;
+ return NULL;
+ }
+ }
+
+ ddq->head.first = ((ErtsAllctrDDBlock_t *)
+ erts_atomic_read_nob(&blk->atmc_next));
+
+ ASSERT(ddq->head.first);
+
+ return (void *) blk;
+}
+
+static int
+ddq_check_incoming(ErtsAllctrDDQueue_t *ddq)
+{
+ erts_aint_t ilast = erts_atomic_read_nob(&ddq->tail.data.last);
+ if (((ErtsAllctrDDBlock_t *) ilast) == &ddq->tail.data.marker
+ && ddq->head.first == &ddq->tail.data.marker) {
+ /* Nothing more to do... */
+ return 0;
+ }
+
+ if (ddq->head.next.thr_progress_reached
+ || erts_thr_progress_has_reached(ddq->head.next.thr_progress)) {
+ int um_refc_ix;
+ ddq->head.next.thr_progress_reached = 1;
+ um_refc_ix = ddq->head.next.um_refc_ix;
+ if (erts_atomic_read_acqb(&ddq->tail.data.um_refc[um_refc_ix]) == 0) {
+ /* Move unreferenced end pointer forward... */
+
+ ddq->head.unref_end = ddq->head.next.unref_end;
+
+ if (!ddq->head.used_marker
+ && ddq->head.unref_end == (ErtsAllctrDDBlock_t *) ilast) {
+ ddq->head.used_marker = 1;
+ ilast = ddq_managed_thread_enqueue(ddq, &ddq->tail.data.marker);
+ }
+
+ if (ddq->head.unref_end == (ErtsAllctrDDBlock_t *) ilast)
+ ERTS_THR_MEMORY_BARRIER;
+ else {
+ ddq->head.next.unref_end = (ErtsAllctrDDBlock_t *) ilast;
+ ERTS_THR_MEMORY_BARRIER;
+ ddq->head.next.thr_progress = erts_thr_progress_later();
+ erts_atomic32_set_relb(&ddq->tail.data.um_refc_ix,
+ um_refc_ix);
+ ddq->head.next.um_refc_ix = um_refc_ix == 0 ? 1 : 0;
+ ddq->head.next.thr_progress_reached = 0;
+ }
+ }
+ }
+ return 1;
+}
+
+static ERTS_INLINE int
+handle_delayed_dealloc(Allctr_t *allctr,
+ int allctr_locked,
+ int use_limit,
+ int ops_limit,
+ int *need_thr_progress,
+ int *need_more_work)
+{
+ int need_thr_prgr = 0;
+ int need_mr_wrk = 0;
+ int have_checked_incoming = 0;
+ int ops = 0;
+ ErtsAlcFixList_t *fix;
+ int res;
+ ErtsAllctrDDQueue_t *ddq;
+
+ if (allctr->thread_safe && !allctr_locked)
+ erts_mtx_lock(&allctr->mutex);
+
+ ERTS_ALCU_DBG_CHK_THR_ACCESS(allctr);
+
+ fix = allctr->fix;
+
+ ddq = &allctr->dd.q;
+
+ res = 0;
+
+ while (1) {
+ Block_t *blk;
+ void *ptr;
+ int ix;
+
+ if (use_limit && ++ops > ops_limit) {
+ if (ddq->head.first != ddq->head.unref_end) {
+ need_mr_wrk = 1;
+ if (need_more_work)
+ *need_more_work |= 1;
+ }
+ break;
+ }
+
+ dequeue:
+ ptr = ddq_dequeue(ddq);
+ if (!ptr) {
+ if (have_checked_incoming)
+ break;
+ need_thr_prgr = ddq_check_incoming(ddq);
+ if (need_thr_progress)
+ *need_thr_progress |= need_thr_prgr;
+ have_checked_incoming = 1;
+ goto dequeue;
+ }
+
+ res = 1;
+
+ INC_CC(allctr->calls.this_free);
+
+ if (fix) {
+ ErtsAlcType_t type;
+
+ type = (ErtsAlcType_t) ((UWord *) ptr)[ERTS_ALCU_DD_FIX_TYPE_OFFS];
+ ix = type - ERTS_ALC_N_MIN_A_FIXED_SIZE;
+ ERTS_DBG_CHK_FIX_LIST(allctr, fix, ix, 1);
+ fix[ix].used--;
+ if (fix[ix].allocated < fix[ix].limit
+ && fix[ix].list_size < ERTS_ALCU_FIX_MAX_LIST_SZ) {
+ *((void **) ptr) = fix[ix].list;
+ fix[ix].list = ptr;
+ fix[ix].list_size++;
+ if (!allctr->fix_shrink_scheduled) {
+ allctr->fix_shrink_scheduled = 1;
+ erts_set_aux_work_timeout(
+ allctr->ix,
+ (ERTS_SSI_AUX_WORK_FIX_ALLOC_LOWER_LIM
+ | ERTS_SSI_AUX_WORK_FIX_ALLOC_DEALLOC),
+ 1);
+ }
+ ERTS_DBG_CHK_FIX_LIST(allctr, fix, ix, 0);
+ continue;
+ }
+ fix[ix].allocated--;
+ if (fix[ix].list && fix[ix].allocated > fix[ix].limit) {
+ blk = UMEM2BLK(ptr);
+ if (IS_SBC_BLK(blk))
+ destroy_carrier(allctr, blk);
+ else
+ mbc_free(allctr, ptr);
+ ptr = fix[ix].list;
+ fix[ix].list = *((void **) ptr);
+ fix[ix].list_size--;
+ fix[ix].allocated--;
+ }
+ }
+
+ blk = UMEM2BLK(ptr);
+
+ if (IS_SBC_BLK(blk))
+ destroy_carrier(allctr, blk);
+ else
+ mbc_free(allctr, ptr);
+ ERTS_DBG_CHK_FIX_LIST(allctr, fix, ix, 0);
+ }
+
+ if (need_thr_progress && !(need_thr_prgr | need_mr_wrk)) {
+ need_thr_prgr = ddq_check_incoming(ddq);
+ *need_thr_progress |= need_thr_prgr;
+ }
+
+ if (allctr->thread_safe && !allctr_locked)
+ erts_mtx_unlock(&allctr->mutex);
+ return res;
+}
+
+static ERTS_INLINE void
+enqueue_dealloc_other_instance(ErtsAlcType_t type, Allctr_t *allctr, void *ptr)
+{
+ if (allctr->fix)
+ ((UWord *) ptr)[ERTS_ALCU_DD_FIX_TYPE_OFFS] = (UWord) type;
+
+ if (ddq_enqueue(type, &allctr->dd.q, ptr))
+ erts_alloc_notify_delayed_dealloc(allctr->ix);
+}
+
+#endif
+
+void
+erts_alcu_check_delayed_dealloc(Allctr_t *allctr,
+ int limit,
+ int *need_thr_progress,
+ int *more_work)
+{
+#ifdef ERTS_SMP
+ handle_delayed_dealloc(allctr,
+ 0,
+ limit,
+ ERTS_ALCU_DD_OPS_LIM_HIGH,
+ need_thr_progress,
+ more_work);
+#endif
+}
+
+#define ERTS_ALCU_HANDLE_DD_IN_OP(Allctr, Locked) \
+ handle_delayed_dealloc((Allctr), (Locked), 1, \
+ ERTS_ALCU_DD_OPS_LIM_LOW, NULL, NULL)
+
/* Multi block carrier alloc/realloc/free ... */
/* NOTE! mbc_alloc() may in case of memory shortage place the requested
@@ -680,8 +1135,21 @@ mbc_alloc_block(Allctr_t *allctr, Uint size, Uint *blk_szp, Uint32 *alcu_flgsp)
}
}
+#ifdef ERTS_SMP
+ if (allctr->dd.use)
+ ERTS_ALCU_HANDLE_DD_IN_OP(allctr, 1);
+#endif
+
blk = (*allctr->get_free_block)(allctr, get_blk_sz, NULL, 0, *alcu_flgsp);
+#ifdef ERTS_SMP
+ if (!blk && allctr->dd.use) {
+ if (ERTS_ALCU_HANDLE_DD_IN_OP(allctr, 1))
+ blk = (*allctr->get_free_block)(allctr, get_blk_sz, NULL, 0,
+ *alcu_flgsp);
+ }
+#endif
+
if (!blk) {
if ((*alcu_flgsp) & ERTS_ALCU_FLG_SBMBC)
blk = create_sbmbc(allctr, get_blk_sz);
@@ -939,6 +1407,11 @@ mbc_realloc(Allctr_t *allctr, void *p, Uint size, Uint32 alcu_flgs)
Uint is_last_blk;
#endif /* #ifndef MBC_REALLOC_ALWAYS_MOVES */
+#ifdef ERTS_SMP
+ if (allctr->dd.use)
+ ERTS_ALCU_HANDLE_DD_IN_OP(allctr, 1);
+#endif
+
ASSERT(p);
ASSERT(size);
ASSERT(size < allctr->sbc_threshold);
@@ -1005,7 +1478,6 @@ mbc_realloc(Allctr_t *allctr, void *p, Uint size, Uint32 alcu_flgs)
cand_blk,
cand_blk_sz,
alcu_flgs);
-
if (new_blk || cand_blk != blk)
goto move_into_new_blk;
}
@@ -1441,7 +1913,7 @@ create_carrier(Allctr_t *allctr, Uint umem_sz, UWord flags)
goto try_sys_alloc;
if (flags & CFLG_FORCE_MSEG)
goto try_mseg;
- if (erts_mseg_no() >= max_mseg_carriers)
+ if (erts_mseg_no(&allctr->mseg_opt) >= max_mseg_carriers)
goto try_sys_alloc;
if (flags & CFLG_SBC) {
if (allctr->sbcs.curr.norm.mseg.no >= allctr->max_mseg_sbcs)
@@ -1840,8 +2312,12 @@ static struct {
Eterm ycs;
/* Eterm sbmbcs; */
+
+ Eterm fix_types;
+
Eterm mbcs;
Eterm sbcs;
+
Eterm sys_alloc_carriers_size;
#if HAVE_ERTS_MSEG
Eterm mseg_alloc_carriers_size;
@@ -1871,6 +2347,8 @@ static struct {
#endif
} am;
+static Eterm fix_type_atoms[ERTS_ALC_NO_FIXED_SIZES];
+
static ERTS_INLINE void atom_init(Eterm *atom, char *name)
{
*atom = am_atom_put(name, strlen(name));
@@ -1891,6 +2369,7 @@ init_atoms(Allctr_t *allctr)
erts_mtx_lock(&init_atoms_mtx);
if (!atoms_initialized) {
+ int ix;
#ifdef DEBUG
Eterm *atom;
@@ -1933,8 +2412,12 @@ init_atoms(Allctr_t *allctr)
AM_INIT(ycs);
/*AM_INIT(sbmbcs);*/
+
+ AM_INIT(fix_types);
+
AM_INIT(mbcs);
AM_INIT(sbcs);
+
AM_INIT(sys_alloc_carriers_size);
#if HAVE_ERTS_MSEG
AM_INIT(mseg_alloc_carriers_size);
@@ -1965,6 +2448,13 @@ init_atoms(Allctr_t *allctr)
ASSERT(*atom != THE_NON_VALUE);
}
#endif
+
+ for (ix = 0; ix < ERTS_ALC_NO_FIXED_SIZES; ix++) {
+ ErtsAlcType_t n = ERTS_ALC_N_MIN_A_FIXED_SIZE + ix;
+ char *name = (char *) ERTS_ALC_N2TD(n);
+ size_t len = strlen(name);
+ fix_type_atoms[ix] = am_atom_put(name, len);
+ }
}
@@ -2043,6 +2533,48 @@ add_4tup(Uint **hpp, Uint *szp, Eterm *lp,
}
static Eterm
+sz_info_fix(Allctr_t *allctr,
+ int *print_to_p,
+ void *print_to_arg,
+ Uint **hpp,
+ Uint *szp)
+{
+ Eterm res;
+ int ix;
+ ErtsAlcFixList_t *fix = allctr->fix;
+
+ ASSERT(fix);
+
+ res = NIL;
+
+ for (ix = ERTS_ALC_NO_FIXED_SIZES-1; ix >= 0; ix--) {
+ ErtsAlcType_t n = ix + ERTS_ALC_N_MIN_A_FIXED_SIZE;
+ Uint alloced = (fix[ix].type_size * fix[ix].allocated);
+ Uint used = fix[ix].type_size*fix[ix].used;
+
+ if (print_to_p) {
+ int to = *print_to_p;
+ void *arg = print_to_arg;
+ erts_print(to,
+ arg,
+ "fix type: %s %bpu %bpu\n",
+ (char *) ERTS_ALC_N2TD(n),
+ alloced,
+ used);
+ }
+
+ if (hpp || szp) {
+ add_3tup(hpp, szp, &res,
+ fix_type_atoms[ix],
+ bld_unstable_uint(hpp, szp, alloced),
+ bld_unstable_uint(hpp, szp, used));
+ }
+ }
+
+ return res;
+}
+
+static Eterm
sz_info_carriers(Allctr_t *allctr,
CarriersStats_t *cs,
char *prefix,
@@ -2590,7 +3122,7 @@ erts_alcu_sz_info(Allctr_t *allctr,
Uint **hpp,
Uint *szp)
{
- Eterm res, sbmbcs, mbcs, sbcs;
+ Eterm res, sbmbcs, mbcs, sbcs, fix = THE_NON_VALUE;
res = THE_NON_VALUE;
@@ -2607,6 +3139,8 @@ erts_alcu_sz_info(Allctr_t *allctr,
erts_mtx_lock(&allctr->mutex);
#endif
+ ERTS_ALCU_DBG_CHK_THR_ACCESS(allctr);
+
if (hpp || szp)
ensure_atoms_initialized(allctr);
@@ -2619,6 +3153,8 @@ erts_alcu_sz_info(Allctr_t *allctr,
update_max_ever_values(&allctr->mbcs);
update_max_ever_values(&allctr->sbcs);
+ if (allctr->fix)
+ fix = sz_info_fix(allctr, print_to_p, print_to_arg, hpp, szp);
sbmbcs = sz_info_carriers(allctr, &allctr->sbmbcs, "sbmbcs ", print_to_p,
print_to_arg, hpp, szp);
mbcs = sz_info_carriers(allctr, &allctr->mbcs, "mbcs ", print_to_p,
@@ -2631,6 +3167,8 @@ erts_alcu_sz_info(Allctr_t *allctr,
add_2tup(hpp, szp, &res, am.sbcs, sbcs);
add_2tup(hpp, szp, &res, am.mbcs, mbcs);
add_2tup(hpp, szp, &res, am.sbmbcs, sbmbcs);
+ if (allctr->fix)
+ add_2tup(hpp, szp, &res, am.fix_types, fix);
}
if (begin_max_period) {
@@ -2656,7 +3194,7 @@ erts_alcu_info(Allctr_t *allctr,
Uint **hpp,
Uint *szp)
{
- Eterm res, sett, sbmbcs, mbcs, sbcs, calls;
+ Eterm res, sett, sbmbcs, mbcs, sbcs, calls, fix = THE_NON_VALUE;
res = THE_NON_VALUE;
@@ -2673,6 +3211,8 @@ erts_alcu_info(Allctr_t *allctr,
erts_mtx_lock(&allctr->mutex);
#endif
+ ERTS_ALCU_DBG_CHK_THR_ACCESS(allctr);
+
if (hpp || szp)
ensure_atoms_initialized(allctr);
@@ -2694,6 +3234,8 @@ erts_alcu_info(Allctr_t *allctr,
}
sett = info_options(allctr, print_to_p, print_to_arg, hpp, szp);
+ if (allctr->fix)
+ fix = sz_info_fix(allctr, print_to_p, print_to_arg, hpp, szp);
sbmbcs = info_carriers(allctr, &allctr->sbmbcs, "sbmbcs ", print_to_p,
print_to_arg, hpp, szp);
mbcs = info_carriers(allctr, &allctr->mbcs, "mbcs ", print_to_p,
@@ -2709,6 +3251,8 @@ erts_alcu_info(Allctr_t *allctr,
add_2tup(hpp, szp, &res, am.sbcs, sbcs);
add_2tup(hpp, szp, &res, am.mbcs, mbcs);
add_2tup(hpp, szp, &res, am.sbmbcs, sbmbcs);
+ if (allctr->fix)
+ add_2tup(hpp, szp, &res, am.fix_types, fix);
add_2tup(hpp, szp, &res, am.options, sett);
add_3tup(hpp, szp, &res,
am.versions,
@@ -2733,7 +3277,7 @@ erts_alcu_info(Allctr_t *allctr,
void
-erts_alcu_current_size(Allctr_t *allctr, AllctrSize_t *size)
+erts_alcu_current_size(Allctr_t *allctr, AllctrSize_t *size, ErtsAlcUFixInfo_t *fi, int fisz)
{
#ifdef USE_THREADS
@@ -2751,6 +3295,18 @@ erts_alcu_current_size(Allctr_t *allctr, AllctrSize_t *size)
size->blocks += allctr->sbmbcs.blocks.curr.size;
size->blocks += allctr->sbcs.blocks.curr.size;
+ if (fi) {
+ int ix;
+ for (ix = 0; ix < fisz; ix++) {
+ if (allctr->fix) {
+ fi[ix].allocated += (allctr->fix[ix].type_size
+ * allctr->fix[ix].allocated);
+ fi[ix].used += (allctr->fix[ix].type_size
+ * allctr->fix[ix].used);
+ }
+ }
+ }
+
#ifdef USE_THREADS
if (allctr->thread_safe)
erts_mtx_unlock(&allctr->mutex);
@@ -2764,12 +3320,16 @@ do_erts_alcu_alloc(ErtsAlcType_t type, void *extra, Uint size)
{
Allctr_t *allctr = (Allctr_t *) extra;
void *res;
+ ErtsAlcFixList_t *fix;
ASSERT(initialized);
ASSERT(allctr);
- ERTS_ALCU_DBG_CHK_THR_SPEC(allctr);
+ ERTS_SMP_LC_ASSERT(!allctr->thread_safe
+ || erts_lc_mtx_is_locked(&allctr->mutex));
+
+ ERTS_ALCU_DBG_CHK_THR_ACCESS(allctr);
#if ALLOC_ZERO_EQ_NULL
if (!size)
@@ -2778,18 +3338,61 @@ do_erts_alcu_alloc(ErtsAlcType_t type, void *extra, Uint size)
INC_CC(allctr->calls.this_alloc);
+ fix = allctr->fix;
+ if (fix) {
+ int ix = type - ERTS_ALC_N_MIN_A_FIXED_SIZE;
+ ERTS_DBG_CHK_FIX_LIST(allctr, fix, ix, 1);
+ fix[ix].used++;
+ res = fix[ix].list;
+ if (res) {
+ fix[ix].list_size--;
+ fix[ix].list = *((void **) res);
+ if (fix[ix].list && fix[ix].allocated > fix[ix].limit) {
+ void *p = fix[ix].list;
+ Block_t *blk;
+ fix[ix].list = *((void **) p);
+ fix[ix].list_size--;
+ blk = UMEM2BLK(p);
+ if (IS_SBC_BLK(blk))
+ destroy_carrier(allctr, blk);
+ else
+ mbc_free(allctr, p);
+ fix[ix].allocated--;
+ }
+ ERTS_DBG_CHK_FIX_LIST(allctr, fix, ix, 0);
+ return res;
+ }
+ if (size < 2*sizeof(UWord))
+ size += sizeof(UWord);
+ if (fix[ix].limit < fix[ix].used)
+ fix[ix].limit = fix[ix].used;
+ if (fix[ix].max_used < fix[ix].used)
+ fix[ix].max_used = fix[ix].used;
+ fix[ix].allocated++;
+ }
+
if (size >= allctr->sbc_threshold) {
+ Block_t *blk;
+#ifdef ERTS_SMP
+ if (allctr->dd.use)
+ ERTS_ALCU_HANDLE_DD_IN_OP(allctr, 1);
+#endif
#if HALFWORD_HEAP
- Block_t *blk = create_carrier(allctr, size,
- CFLG_SBC | CFLG_FORCE_MSEG);
+ blk = create_carrier(allctr, size,
+ CFLG_SBC | CFLG_FORCE_MSEG);
#else
- Block_t *blk = create_carrier(allctr, size, CFLG_SBC);
+ blk = create_carrier(allctr, size, CFLG_SBC);
#endif
res = blk ? BLK2UMEM(blk) : NULL;
}
else
res = mbc_alloc(allctr, size);
+ if (!res && fix) {
+ int ix = type - ERTS_ALC_N_MIN_A_FIXED_SIZE;
+ fix[ix].allocated--;
+ fix[ix].used--;
+ }
return res;
}
@@ -2818,29 +3421,28 @@ erts_alcu_alloc_ts(ErtsAlcType_t type, void *extra, Uint size)
return res;
}
+#ifdef ERTS_SMP
+
void *
erts_alcu_alloc_thr_spec(ErtsAlcType_t type, void *extra, Uint size)
{
ErtsAllocatorThrSpec_t *tspec = (ErtsAllocatorThrSpec_t *) extra;
- int ix = erts_alc_get_thr_ix();
+ int ix;
Allctr_t *allctr;
- int unlock;
void *res;
- ASSERT(ix > 0);
- if (ix < tspec->size) {
- allctr = tspec->allctr[ix];
- unlock = 0;
- }
- else {
- allctr = tspec->allctr[0];
- unlock = 1;
+ ix = ERTS_ALC_GET_THR_IX();
+
+ ASSERT(0 <= ix && ix < tspec->size);
+
+ allctr = tspec->allctr[ix];
+
+ if (allctr->thread_safe)
erts_mtx_lock(&allctr->mutex);
- }
res = do_erts_alcu_alloc(type, allctr, size);
- if (unlock)
+ if (allctr->thread_safe)
erts_mtx_unlock(&allctr->mutex);
DEBUG_CHECK_ALIGNMENT(res);
@@ -2851,51 +3453,96 @@ erts_alcu_alloc_thr_spec(ErtsAlcType_t type, void *extra, Uint size)
void *
erts_alcu_alloc_thr_pref(ErtsAlcType_t type, void *extra, Uint size)
{
- ErtsAllocatorThrSpec_t *tspec = (ErtsAllocatorThrSpec_t *) extra;
- int ix = erts_alc_get_thr_ix();
- Allctr_t *allctr;
+ int pref_ix;
+ Allctr_t *pref_allctr;
void *res;
- ASSERT(sizeof(UWord) == sizeof(Allctr_t *));
- ASSERT(ix > 0);
- if (ix >= tspec->size)
- ix = (ix % (tspec->size - 1)) + 1;
- allctr = tspec->allctr[ix];
- erts_mtx_lock(&allctr->mutex);
- res = do_erts_alcu_alloc(type, allctr, size + sizeof(UWord));
- if (res) {
- *((Allctr_t **) res) = allctr;
- res = (void *) (((char *) res) + sizeof(UWord));
- }
- erts_mtx_unlock(&allctr->mutex);
+ pref_ix = get_pref_allctr(extra, &pref_allctr);
+
+ if (pref_allctr->thread_safe)
+ erts_mtx_lock(&pref_allctr->mutex);
+
+ ERTS_ALCU_DBG_CHK_THR_ACCESS(pref_allctr);
+
+ res = do_erts_alcu_alloc(type, pref_allctr, size + sizeof(UWord));
+ if (pref_allctr->thread_safe)
+ erts_mtx_unlock(&pref_allctr->mutex);
+
+ if (res)
+ res = put_used_allctr(res, pref_ix, size);
+
DEBUG_CHECK_ALIGNMENT(res);
+
+
return res;
}
#endif
+#endif
+
/* ------------------------------------------------------------------------- */
static ERTS_INLINE void
do_erts_alcu_free(ErtsAlcType_t type, void *extra, void *p)
{
+ int ix;
Allctr_t *allctr = (Allctr_t *) extra;
ASSERT(initialized);
ASSERT(allctr);
- ERTS_ALCU_DBG_CHK_THR_SPEC(allctr);
+ ERTS_SMP_LC_ASSERT(!allctr->thread_safe
+ || erts_lc_mtx_is_locked(&allctr->mutex));
+
+ ERTS_ALCU_DBG_CHK_THR_ACCESS(allctr);
if (p) {
+ ErtsAlcFixList_t *fix = allctr->fix;
Block_t *blk;
INC_CC(allctr->calls.this_free);
+ if (fix) {
+ ix = type - ERTS_ALC_N_MIN_A_FIXED_SIZE;
+ ERTS_DBG_CHK_FIX_LIST(allctr, fix, ix, 1);
+ fix[ix].used--;
+ if (fix[ix].allocated < fix[ix].limit
+ && fix[ix].list_size < ERTS_ALCU_FIX_MAX_LIST_SZ) {
+ *((void **) p) = fix[ix].list;
+ fix[ix].list = p;
+ fix[ix].list_size++;
+ if (!allctr->fix_shrink_scheduled) {
+ allctr->fix_shrink_scheduled = 1;
+ erts_set_aux_work_timeout(
+ allctr->ix,
+ (ERTS_SSI_AUX_WORK_FIX_ALLOC_LOWER_LIM
+ | ERTS_SSI_AUX_WORK_FIX_ALLOC_DEALLOC),
+ 1);
+ }
+ ERTS_DBG_CHK_FIX_LIST(allctr, fix, ix, 0);
+ return;
+ }
+ fix[ix].allocated--;
+ if (fix[ix].list && fix[ix].allocated > fix[ix].limit) {
+ blk = UMEM2BLK(p);
+ if (IS_SBC_BLK(blk))
+ destroy_carrier(allctr, blk);
+ else
+ mbc_free(allctr, p);
+ p = fix[ix].list;
+ fix[ix].list = *((void **) p);
+ fix[ix].list_size--;
+ fix[ix].allocated--;
+ }
+ }
+
blk = UMEM2BLK(p);
if (IS_SBC_BLK(blk))
destroy_carrier(allctr, blk);
else
mbc_free(allctr, p);
+ ERTS_DBG_CHK_FIX_LIST(allctr, fix, ix, 0);
}
}
@@ -2915,44 +3562,56 @@ erts_alcu_free_ts(ErtsAlcType_t type, void *extra, void *p)
erts_mtx_unlock(&allctr->mutex);
}
+#ifdef ERTS_SMP
+
void
erts_alcu_free_thr_spec(ErtsAlcType_t type, void *extra, void *p)
{
ErtsAllocatorThrSpec_t *tspec = (ErtsAllocatorThrSpec_t *) extra;
- int ix = erts_alc_get_thr_ix();
- int unlock;
+ int ix;
Allctr_t *allctr;
- ASSERT(ix > 0);
- if (ix < tspec->size) {
- allctr = tspec->allctr[ix];
- unlock = 0;
- }
- else {
- allctr = tspec->allctr[0];
- unlock = 1;
+ ix = ERTS_ALC_GET_THR_IX();
+
+ ASSERT(0 <= ix && ix < tspec->size);
+
+ allctr = tspec->allctr[ix];
+
+ if (allctr->thread_safe)
erts_mtx_lock(&allctr->mutex);
- }
do_erts_alcu_free(type, allctr, p);
- if (unlock)
+
+ if (allctr->thread_safe)
erts_mtx_unlock(&allctr->mutex);
}
void
-erts_alcu_free_thr_pref(ErtsAlcType_t type, void *unused, void *p)
+erts_alcu_free_thr_pref(ErtsAlcType_t type, void *extra, void *p)
{
if (p) {
- void *ptr = (void *) (((char *) p) - sizeof(UWord));
- Allctr_t *allctr = *((Allctr_t **) ptr);
- erts_mtx_lock(&allctr->mutex);
- do_erts_alcu_free(type, allctr, ptr);
- erts_mtx_unlock(&allctr->mutex);
+ Allctr_t *pref_allctr, *used_allctr;
+ void *ptr;
+
+ get_pref_allctr(extra, &pref_allctr);
+ ptr = get_used_allctr(extra, p, &used_allctr, NULL);
+ if (pref_allctr != used_allctr)
+ enqueue_dealloc_other_instance(type, used_allctr, ptr);
+ else {
+ if (used_allctr->thread_safe)
+ erts_mtx_lock(&used_allctr->mutex);
+ ERTS_ALCU_DBG_CHK_THR_ACCESS(used_allctr);
+ do_erts_alcu_free(type, used_allctr, ptr);
+ if (used_allctr->thread_safe)
+ erts_mtx_unlock(&used_allctr->mutex);
+ }
}
}
#endif
+#endif
+
/* ------------------------------------------------------------------------- */
static ERTS_INLINE void *
@@ -2970,7 +3629,10 @@ do_erts_alcu_realloc(ErtsAlcType_t type,
ASSERT(allctr);
- ERTS_ALCU_DBG_CHK_THR_SPEC(allctr);
+ ERTS_SMP_LC_ASSERT(!allctr->thread_safe
+ || erts_lc_mtx_is_locked(&allctr->mutex));
+
+ ERTS_ALCU_DBG_CHK_THR_ACCESS(allctr);
if (!p) {
res = do_erts_alcu_alloc(type, extra, size);
@@ -3063,6 +3725,10 @@ do_erts_alcu_realloc(ErtsAlcType_t type,
}
else {
Block_t *new_blk;
+#ifdef ERTS_SMP
+ if (allctr->dd.use)
+ ERTS_ALCU_HANDLE_DD_IN_OP(allctr, 1);
+#endif
if(IS_SBC_BLK(blk)) {
do_carrier_resize:
#if HALFWORD_HEAP
@@ -3166,30 +3832,29 @@ erts_alcu_realloc_mv_ts(ErtsAlcType_t type, void *extra, void *p, Uint size)
return res;
}
+#ifdef ERTS_SMP
+
void *
erts_alcu_realloc_thr_spec(ErtsAlcType_t type, void *extra,
void *ptr, Uint size)
{
ErtsAllocatorThrSpec_t *tspec = (ErtsAllocatorThrSpec_t *) extra;
- int ix = erts_alc_get_thr_ix();
+ int ix;
Allctr_t *allctr;
- int unlock;
void *res;
- ASSERT(ix > 0);
- if (ix < tspec->size) {
- allctr = tspec->allctr[ix];
- unlock = 0;
- }
- else {
- allctr = tspec->allctr[0];
- unlock = 1;
+ ix = ERTS_ALC_GET_THR_IX();
+
+ ASSERT(0 <= ix && ix < tspec->size);
+
+ allctr = tspec->allctr[ix];
+
+ if (allctr->thread_safe)
erts_mtx_lock(&allctr->mutex);
- }
res = do_erts_alcu_realloc(type, allctr, ptr, size, 0);
- if (unlock)
+ if (allctr->thread_safe)
erts_mtx_unlock(&allctr->mutex);
DEBUG_CHECK_ALIGNMENT(res);
@@ -3202,26 +3867,22 @@ erts_alcu_realloc_mv_thr_spec(ErtsAlcType_t type, void *extra,
void *ptr, Uint size)
{
ErtsAllocatorThrSpec_t *tspec = (ErtsAllocatorThrSpec_t *) extra;
- int ix = erts_alc_get_thr_ix();
+ int ix;
Allctr_t *allctr;
- int unlock;
void *res;
- ASSERT(ix > 0);
- if (ix < tspec->size) {
- allctr = tspec->allctr[ix];
- unlock = 0;
- }
- else {
- allctr = tspec->allctr[0];
- unlock = 1;
- erts_mtx_lock(&allctr->mutex);
- }
+ ix = ERTS_ALC_GET_THR_IX();
+ ASSERT(0 <= ix && ix < tspec->size);
+
+ allctr = tspec->allctr[ix];
+
+ if (allctr->thread_safe)
+ erts_mtx_lock(&allctr->mutex);
res = do_erts_alcu_alloc(type, allctr, size);
if (!res) {
- if (unlock)
+ if (allctr->thread_safe)
erts_mtx_unlock(&allctr->mutex);
res = erts_alcu_realloc_thr_spec(type, allctr, ptr, size);
}
@@ -3235,7 +3896,7 @@ erts_alcu_realloc_mv_thr_spec(ErtsAlcType_t type, void *extra,
cpy_size = size;
sys_memcpy(res, ptr, cpy_size);
do_erts_alcu_free(type, allctr, ptr);
- if (unlock)
+ if (allctr->thread_safe)
erts_mtx_unlock(&allctr->mutex);
}
@@ -3244,129 +3905,101 @@ erts_alcu_realloc_mv_thr_spec(ErtsAlcType_t type, void *extra,
return res;
}
-void *
-erts_alcu_realloc_thr_pref(ErtsAlcType_t type, void *extra, void *p, Uint size)
+static ERTS_INLINE void *
+realloc_thr_pref(ErtsAlcType_t type, void *extra, void *p, Uint size,
+ int force_move)
{
- ErtsAllocatorThrSpec_t *tspec = (ErtsAllocatorThrSpec_t *) extra;
- int ix;
+ int pref_ix;
void *ptr, *res;
Allctr_t *pref_allctr, *used_allctr;
+ UWord old_user_size;
if (!p)
return erts_alcu_alloc_thr_pref(type, extra, size);
- ptr = (void *) (((char *) p) - sizeof(UWord));
- used_allctr = *((Allctr_t **) ptr);
+ pref_ix = get_pref_allctr(extra, &pref_allctr);
+ ptr = get_used_allctr(extra, p, &used_allctr, &old_user_size);
- ix = erts_alc_get_thr_ix();
- ASSERT(ix > 0);
- if (ix >= tspec->size)
- ix = (ix % (tspec->size - 1)) + 1;
- pref_allctr = tspec->allctr[ix];
ASSERT(used_allctr && pref_allctr);
- erts_mtx_lock(&used_allctr->mutex);
- res = do_erts_alcu_realloc(type,
- used_allctr,
- ptr,
- size + sizeof(UWord),
- (pref_allctr != used_allctr
- ? ERTS_ALCU_FLG_FAIL_REALLOC_MOVE
- : 0));
- erts_mtx_unlock(&used_allctr->mutex);
- if (res) {
- ASSERT(used_allctr == *((Allctr_t **) res));
- res = (void *) (((char *) res) + sizeof(UWord));
- DEBUG_CHECK_ALIGNMENT(res);
+ if (!force_move && used_allctr == pref_allctr) {
+ if (used_allctr->thread_safe)
+ erts_mtx_lock(&used_allctr->mutex);
+ ERTS_ALCU_DBG_CHK_THR_ACCESS(used_allctr);
+ res = do_erts_alcu_realloc(type,
+ used_allctr,
+ ptr,
+ size + sizeof(UWord),
+ 0);
+ if (used_allctr->thread_safe)
+ erts_mtx_unlock(&used_allctr->mutex);
+ if (res)
+ res = put_used_allctr(res, pref_ix, size);
}
else {
- erts_mtx_lock(&pref_allctr->mutex);
+ if (pref_allctr->thread_safe)
+ erts_mtx_lock(&pref_allctr->mutex);
res = do_erts_alcu_alloc(type, pref_allctr, size + sizeof(UWord));
- erts_mtx_unlock(&pref_allctr->mutex);
+ if (pref_allctr->thread_safe && (!force_move
+ || used_allctr != pref_allctr))
+ erts_mtx_unlock(&pref_allctr->mutex);
if (res) {
Block_t *blk;
size_t cpy_size;
- *((Allctr_t **) res) = pref_allctr;
- res = (void *) (((char *) res) + sizeof(UWord));
+ res = put_used_allctr(res, pref_ix, size);
DEBUG_CHECK_ALIGNMENT(res);
- erts_mtx_lock(&used_allctr->mutex);
blk = UMEM2BLK(ptr);
- cpy_size = BLK_SZ(blk) - ABLK_HDR_SZ - sizeof(UWord);
+ if (old_user_size != ERTS_AU_PREF_ALLOC_SIZE_MASK)
+ cpy_size = old_user_size;
+ else {
+ if (used_allctr->thread_safe && (!force_move
+ || used_allctr != pref_allctr))
+ erts_mtx_lock(&used_allctr->mutex);
+ ERTS_SMP_LC_ASSERT(erts_lc_mtx_is_locked(&used_allctr->mutex));
+ cpy_size = BLK_SZ(blk);
+ if (used_allctr->thread_safe && (!force_move
+ || used_allctr != pref_allctr))
+ erts_mtx_unlock(&used_allctr->mutex);
+ cpy_size -= ABLK_HDR_SZ + sizeof(UWord);
+ }
if (cpy_size > size)
cpy_size = size;
sys_memcpy(res, p, cpy_size);
- do_erts_alcu_free(type, used_allctr, ptr);
- erts_mtx_unlock(&used_allctr->mutex);
+
+ if (!force_move || used_allctr != pref_allctr)
+ enqueue_dealloc_other_instance(type, used_allctr, ptr);
+ else {
+ do_erts_alcu_free(type, used_allctr, ptr);
+ ASSERT(pref_allctr == used_allctr);
+ if (pref_allctr->thread_safe)
+ erts_mtx_unlock(&pref_allctr->mutex);
+ }
}
}
return res;
}
+void *
+erts_alcu_realloc_thr_pref(ErtsAlcType_t type, void *extra, void *p, Uint size)
+{
+ return realloc_thr_pref(type, extra, p, size, 0);
+}
void *
erts_alcu_realloc_mv_thr_pref(ErtsAlcType_t type, void *extra,
void *p, Uint size)
{
- ErtsAllocatorThrSpec_t *tspec = (ErtsAllocatorThrSpec_t *) extra;
- int ix;
- void *ptr, *res;
- Allctr_t *pref_allctr, *used_allctr;
-
- if (!p)
- return erts_alcu_alloc_thr_pref(type, extra, size);
-
- ptr = (void *) (((char *) p) - sizeof(UWord));
- used_allctr = *((Allctr_t **) ptr);
-
- ix = erts_alc_get_thr_ix();
- ASSERT(ix > 0);
- if (ix >= tspec->size)
- ix = (ix % (tspec->size - 1)) + 1;
- pref_allctr = tspec->allctr[ix];
- ASSERT(used_allctr && pref_allctr);
-
- erts_mtx_lock(&pref_allctr->mutex);
- res = do_erts_alcu_alloc(type, pref_allctr, size + sizeof(UWord));
- if (!res) {
- erts_mtx_unlock(&pref_allctr->mutex);
- res = erts_alcu_realloc_thr_pref(type, extra, p, size);
- }
- else {
- Block_t *blk;
- size_t cpy_size;
- Allctr_t *allctr;
-
- *((Allctr_t **) res) = pref_allctr;
- res = (void *) (((char *) res) + sizeof(UWord));
-
- DEBUG_CHECK_ALIGNMENT(res);
-
- if (used_allctr == pref_allctr)
- allctr = pref_allctr;
- else {
- erts_mtx_unlock(&pref_allctr->mutex);
- allctr = used_allctr;
- erts_mtx_lock(&allctr->mutex);
- }
-
- blk = UMEM2BLK(ptr);
- cpy_size = BLK_SZ(blk) - ABLK_HDR_SZ - sizeof(UWord);
- if (cpy_size > size)
- cpy_size = size;
- sys_memcpy(res, p, cpy_size);
- do_erts_alcu_free(type, allctr, ptr);
- erts_mtx_unlock(&allctr->mutex);
- }
-
- return res;
+ return realloc_thr_pref(type, extra, p, size, 1);
}
#endif
+#endif
+
/* ------------------------------------------------------------------------- */
int
@@ -3381,6 +4014,10 @@ erts_alcu_start(Allctr_t *allctr, AllctrInit_t *init)
sys_memcpy((void *) &allctr->mseg_opt,
(void *) &erts_mseg_default_opt,
sizeof(ErtsMsegOpt_t));
+#ifdef ERTS_SMP
+ if (init->tspec || init->tpref)
+ allctr->mseg_opt.sched_spec = 1;
+#endif
# if HALFWORD_HEAP
allctr->mseg_opt.low_mem = init->low_mem;
# endif
@@ -3390,6 +4027,7 @@ erts_alcu_start(Allctr_t *allctr, AllctrInit_t *init)
if (!allctr->name_prefix)
goto error;
+ allctr->ix = init->ix;
allctr->alloc_no = init->alloc_no;
if (allctr->alloc_no < ERTS_ALC_A_MIN
|| ERTS_ALC_A_MAX < allctr->alloc_no)
@@ -3431,6 +4069,18 @@ erts_alcu_start(Allctr_t *allctr, AllctrInit_t *init)
goto error;
allctr->min_block_size = UNIT_CEILING(allctr->min_block_size
+ sizeof(UWord));
+#if ERTS_SMP
+ if (init->tpref) {
+ Uint sz = sizeof(Block_t);
+ sz += ERTS_ALCU_DD_FIX_TYPE_OFFS*sizeof(UWord);
+ if (init->fix)
+ sz += sizeof(UWord);
+ sz = UNIT_CEILING(sz);
+ if (sz > allctr->min_block_size)
+ allctr->min_block_size = sz;
+ }
+#endif
+
allctr->sbmbc_threshold = init->sbmbct;
@@ -3493,7 +4143,8 @@ erts_alcu_start(Allctr_t *allctr, AllctrInit_t *init)
if (allctr->mbc_header_size < sizeof(Carrier_t))
goto error;
-#ifdef USE_THREADS
+#ifdef ERTS_SMP
+ allctr->dd.use = 0;
if (init->tpref) {
allctr->mbc_header_size = (UNIT_CEILING(allctr->mbc_header_size
+ FBLK_FTR_SZ
@@ -3507,6 +4158,9 @@ erts_alcu_start(Allctr_t *allctr, AllctrInit_t *init)
+ sizeof(UWord))
- ABLK_HDR_SZ
- sizeof(UWord));
+
+ allctr->dd.use = 1;
+ init_dd_queue(&allctr->dd.q);
}
else
#endif
@@ -3548,6 +4202,21 @@ erts_alcu_start(Allctr_t *allctr, AllctrInit_t *init)
}
+ if (init->fix) {
+ int i;
+ allctr->fix = init->fix;
+ allctr->fix_shrink_scheduled = 0;
+ for (i = 0; i < ERTS_ALC_NO_FIXED_SIZES; i++) {
+ allctr->fix[i].max_used = 0;
+ allctr->fix[i].limit = 0;
+ allctr->fix[i].type_size = init->fix_type_size[i];
+ allctr->fix[i].list_size = 0;
+ allctr->fix[i].list = NULL;
+ allctr->fix[i].allocated = 0;
+ allctr->fix[i].used = 0;
+ }
+ }
+
return 1;
error:
diff --git a/erts/emulator/beam/erl_alloc_util.h b/erts/emulator/beam/erl_alloc_util.h
index fed4d3dbe6..df560a0de2 100644
--- a/erts/emulator/beam/erl_alloc_util.h
+++ b/erts/emulator/beam/erl_alloc_util.h
@@ -20,10 +20,13 @@
#ifndef ERL_ALLOC_UTIL__
#define ERL_ALLOC_UTIL__
-#define ERTS_ALCU_VSN_STR "2.2"
+#define ERTS_ALCU_VSN_STR "3.0"
#include "erl_alloc_types.h"
+#define ERTS_AU_PREF_ALLOC_BITS 11
+#define ERTS_AU_MAX_PREF_ALLOC_INSTANCES (1 << ERTS_AU_PREF_ALLOC_BITS)
+
typedef struct Allctr_t_ Allctr_t;
typedef struct {
@@ -35,6 +38,7 @@ typedef struct {
char *name_prefix;
ErtsAlcType_t alloc_no;
int force;
+ int ix;
int ts;
int tspec;
int tpref;
@@ -53,6 +57,9 @@ typedef struct {
UWord mbcgs;
UWord sbmbct;
UWord sbmbcs;
+
+ void *fix;
+ size_t *fix_type_size;
} AllctrInit_t;
typedef struct {
@@ -60,6 +67,11 @@ typedef struct {
UWord carriers;
} AllctrSize_t;
+typedef struct {
+ UWord allocated;
+ UWord used;
+} ErtsAlcUFixInfo_t;
+
#ifndef SMALL_MEMORY
#define ERTS_DEFAULT_ALCU_INIT { \
@@ -71,6 +83,7 @@ typedef struct {
NULL, \
ERTS_ALC_A_INVALID, /* (number) alloc_no: allocator number */\
0, /* (bool) force: force enabled */\
+ 0, /* (number) ix: instance index */\
1, /* (bool) ts: thread safe */\
0, /* (bool) tspec: thread specific */\
0, /* (bool) tpref: thread preferred */\
@@ -88,7 +101,10 @@ typedef struct {
1024*1024, /* (bytes) smbcs: smallest mbc size */\
10, /* (amount) mbcgs: mbc growth stages */\
256, /* (bytes) sbmbct: small block mbc threshold */\
- 8*1024 /* (bytes) sbmbcs: small block mbc size */\
+ 8*1024, /* (bytes) sbmbcs: small block mbc size */ \
+ /* --- Data not options -------------------------------------------- */\
+ NULL, /* (ptr) fix */\
+ NULL /* (ptr) fix_type_size */\
}
#else /* if SMALL_MEMORY */
@@ -102,6 +118,7 @@ typedef struct {
NULL, \
ERTS_ALC_A_INVALID, /* (number) alloc_no: allocator number */\
0, /* (bool) force: force enabled */\
+ 0, /* (number) ix: instance index */\
1, /* (bool) ts: thread safe */\
0, /* (bool) tspec: thread specific */\
0, /* (bool) tpref: thread preferred */\
@@ -118,7 +135,10 @@ typedef struct {
128*1024, /* (bytes) smbcs: smallest mbc size */\
10, /* (amount) mbcgs: mbc growth stages */\
256, /* (bytes) sbmbct: small block mbc threshold */\
- 8*1024 /* (bytes) sbmbcs: small block mbc size */\
+ 8*1024, /* (bytes) sbmbcs: small block mbc size */ \
+ /* --- Data not options -------------------------------------------- */\
+ NULL, /* (ptr) fix */\
+ NULL /* (ptr) fix_type_size */\
}
#endif
@@ -132,6 +152,7 @@ void * erts_alcu_alloc_ts(ErtsAlcType_t, void *, Uint);
void * erts_alcu_realloc_ts(ErtsAlcType_t, void *, void *, Uint);
void * erts_alcu_realloc_mv_ts(ErtsAlcType_t, void *, void *, Uint);
void erts_alcu_free_ts(ErtsAlcType_t, void *, void *);
+#ifdef ERTS_SMP
void * erts_alcu_alloc_thr_spec(ErtsAlcType_t, void *, Uint);
void * erts_alcu_realloc_thr_spec(ErtsAlcType_t, void *, void *, Uint);
void * erts_alcu_realloc_mv_thr_spec(ErtsAlcType_t, void *, void *, Uint);
@@ -141,12 +162,16 @@ void * erts_alcu_realloc_thr_pref(ErtsAlcType_t, void *, void *, Uint);
void * erts_alcu_realloc_mv_thr_pref(ErtsAlcType_t, void *, void *, Uint);
void erts_alcu_free_thr_pref(ErtsAlcType_t, void *, void *);
#endif
+#endif
Eterm erts_alcu_au_info_options(int *, void *, Uint **, Uint *);
Eterm erts_alcu_info_options(Allctr_t *, int *, void *, Uint **, Uint *);
Eterm erts_alcu_sz_info(Allctr_t *, int, int *, void *, Uint **, Uint *);
Eterm erts_alcu_info(Allctr_t *, int, int *, void *, Uint **, Uint *);
void erts_alcu_init(AlcUInit_t *);
-void erts_alcu_current_size(Allctr_t *, AllctrSize_t *);
+void erts_alcu_current_size(Allctr_t *, AllctrSize_t *,
+ ErtsAlcUFixInfo_t *, int);
+void erts_alcu_check_delayed_dealloc(Allctr_t *, int, int *, int *);
+erts_aint32_t erts_alcu_fix_alloc_shrink(Allctr_t *, erts_aint32_t);
#endif
@@ -246,7 +271,74 @@ typedef struct {
} blocks;
} CarriersStats_t;
+#ifdef ERTS_SMP
+
+typedef union ErtsAllctrDDBlock_t_ ErtsAllctrDDBlock_t;
+
+union ErtsAllctrDDBlock_t_ {
+ erts_atomic_t atmc_next;
+ ErtsAllctrDDBlock_t *ptr_next;
+};
+
+typedef struct {
+ ErtsAllctrDDBlock_t marker;
+ erts_atomic_t last;
+ erts_atomic_t um_refc[2];
+ erts_atomic32_t um_refc_ix;
+} ErtsDDTail_t;
+
+typedef struct {
+ /*
+ * This structure needs to be cache line aligned for best
+ * performance.
+ */
+ union {
+ /* Modified by threads returning memory to this allocator */
+ ErtsDDTail_t data;
+ char align__[ERTS_ALC_CACHE_LINE_ALIGN_SIZE(sizeof(ErtsDDTail_t))];
+ } tail;
+ /*
+ * Everything below this point is *only* accessed by the
+ * thread owning the allocator.
+ */
+ struct {
+ ErtsAllctrDDBlock_t *first;
+ ErtsAllctrDDBlock_t *unref_end;
+ struct {
+ ErtsThrPrgrVal thr_progress;
+ int thr_progress_reached;
+ int um_refc_ix;
+ ErtsAllctrDDBlock_t *unref_end;
+ } next;
+ int used_marker;
+ } head;
+} ErtsAllctrDDQueue_t;
+
+#endif
+
+typedef struct {
+ size_t type_size;
+ SWord list_size;
+ void *list;
+ SWord max_used;
+ SWord limit;
+ SWord allocated;
+ SWord used;
+} ErtsAlcFixList_t;
+
struct Allctr_t_ {
+#ifdef ERTS_SMP
+ struct {
+ /*
+ * We want the queue at the beginning of
+ * the Allctr_t struct, due to cache line
+ * alignment reasons.
+ */
+ ErtsAllctrDDQueue_t q;
+ int use;
+ int ix;
+ } dd;
+#endif
/* Allocator name prefix */
char * name_prefix;
@@ -254,6 +346,9 @@ struct Allctr_t_ {
/* Allocator number */
ErtsAlcType_t alloc_no;
+ /* Instance index */
+ int ix;
+
/* Alloc, realloc and free names as atoms */
struct {
Eterm alloc;
@@ -278,6 +373,7 @@ struct Allctr_t_ {
Uint mbc_growth_stages;
Uint sbmbc_threshold;
Uint sbmbc_size;
+
#if HAVE_ERTS_MSEG
ErtsMsegOpt_t mseg_opt;
#endif
@@ -315,6 +411,10 @@ struct Allctr_t_ {
void (*check_mbc) (Allctr_t *, Carrier_t *);
#endif
+ int fix_n_base;
+ int fix_shrink_scheduled;
+ ErtsAlcFixList_t *fix;
+
#ifdef USE_THREADS
/* Mutex for this allocator */
erts_mtx_t mutex;
@@ -323,6 +423,7 @@ struct Allctr_t_ {
Allctr_t *prev;
Allctr_t *next;
} ts_list;
+
#endif
int atoms_initialized;
diff --git a/erts/emulator/beam/erl_ao_firstfit_alloc.c b/erts/emulator/beam/erl_ao_firstfit_alloc.c
index 002852cdad..5bdb752d3a 100644
--- a/erts/emulator/beam/erl_ao_firstfit_alloc.c
+++ b/erts/emulator/beam/erl_ao_firstfit_alloc.c
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2003-2009. All Rights Reserved.
+ * Copyright Ericsson AB 2003-2011. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
@@ -170,14 +170,18 @@ erts_aoffalc_start(AOFFAllctr_t *alc,
AOFFAllctrInit_t* aoffinit,
AllctrInit_t *init)
{
- AOFFAllctr_t nulled_state = {{0}};
- /* {{0}} is used instead of {0}, in order to avoid (an incorrect) gcc
- warning. gcc warns if {0} is used as initializer of a struct when
- the first member is a struct (not if, for example, the third member
- is a struct). */
+ struct {
+ int dummy;
+ AOFFAllctr_t allctr;
+ } zero = {0};
+ /* The struct with a dummy element first is used in order to avoid (an
+ incorrect) gcc warning. gcc warns if {0} is used as initializer of
+ a struct when the first member is a struct (not if, for example,
+ the third member is a struct). */
+
Allctr_t *allctr = (Allctr_t *) alc;
- sys_memcpy((void *) alc, (void *) &nulled_state, sizeof(AOFFAllctr_t));
+ sys_memcpy((void *) alc, (void *) &zero.allctr, sizeof(AOFFAllctr_t));
allctr->mbc_header_size = sizeof(Carrier_t);
allctr->min_mbc_size = MIN_MBC_SZ;
diff --git a/erts/emulator/beam/erl_ao_firstfit_alloc.h b/erts/emulator/beam/erl_ao_firstfit_alloc.h
index 0bf0ec8cee..6fa626f723 100644
--- a/erts/emulator/beam/erl_ao_firstfit_alloc.h
+++ b/erts/emulator/beam/erl_ao_firstfit_alloc.h
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2003-2009. All Rights Reserved.
+ * Copyright Ericsson AB 2003-2011. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
diff --git a/erts/emulator/beam/erl_arith.c b/erts/emulator/beam/erl_arith.c
index 64fad9fe0e..5150a8a507 100644
--- a/erts/emulator/beam/erl_arith.c
+++ b/erts/emulator/beam/erl_arith.c
@@ -164,14 +164,14 @@ BIF_RETTYPE bxor_2(BIF_ALIST_2)
BIF_RET(erts_bxor(BIF_P, BIF_ARG_1, BIF_ARG_2));
}
-BIF_RETTYPE bsl_2(Process* p, Eterm arg1, Eterm arg2)
+BIF_RETTYPE bsl_2(BIF_ALIST_2)
{
- BIF_RET(shift(p, arg1, arg2, 0));
+ BIF_RET(shift(BIF_P, BIF_ARG_1, BIF_ARG_2, 0));
}
-BIF_RETTYPE bsr_2(Process* p, Eterm arg1, Eterm arg2)
+BIF_RETTYPE bsr_2(BIF_ALIST_2)
{
- BIF_RET(shift(p, arg1, arg2, 1));
+ BIF_RET(shift(BIF_P, BIF_ARG_1, BIF_ARG_2, 1));
}
static Eterm
diff --git a/erts/emulator/beam/erl_async.c b/erts/emulator/beam/erl_async.c
index 91b64411d4..2dc7237f7c 100644
--- a/erts/emulator/beam/erl_async.c
+++ b/erts/emulator/beam/erl_async.c
@@ -24,10 +24,18 @@
#include "erl_sys_driver.h"
#include "global.h"
#include "erl_threads.h"
+#include "erl_thr_queue.h"
+#include "erl_async.h"
+
+#define ERTS_MAX_ASYNC_READY_CALLS_IN_SEQ 20
+
+#define ERTS_ASYNC_PRINT_JOB 0
+
+#if !defined(ERTS_SMP) && defined(USE_THREADS) && !ERTS_USE_ASYNC_READY_Q
+# error "Need async ready queue in non-smp case"
+#endif
typedef struct _erl_async {
- struct _erl_async* next;
- struct _erl_async* prev;
DE_Handle* hndl; /* The DE_Handle is needed when port is gone */
Eterm port;
long async_id;
@@ -35,345 +43,498 @@ typedef struct _erl_async {
ErlDrvPDL pdl;
void (*async_invoke)(void*);
void (*async_free)(void*);
-} ErlAsync;
+#if ERTS_USE_ASYNC_READY_Q
+ Uint sched_id;
+ union {
+ ErtsThrQPrepEnQ_t *prep_enq;
+ ErtsThrQFinDeQ_t fin_deq;
+ } q;
+#endif
+} ErtsAsync;
+
+#if ERTS_USE_ASYNC_READY_Q
+
+/*
+ * We can do without the enqueue mutex since it isn't needed for
+ * thread safety. Its only purpose is to put async threads to sleep
+ * during a blast of ready async jobs. This in order to reduce
+ * contention on the enqueue end of the async ready queues. During
+ * such a blast without the enqueue mutex much cpu time is consumed
+ * by the async threads without them doing much progress which in turn
+ * slow down progress of scheduler threads.
+ */
+#define ERTS_USE_ASYNC_READY_ENQ_MTX 1
+
+#if ERTS_USE_ASYNC_READY_ENQ_MTX
typedef struct {
- erts_mtx_t mtx;
- erts_cnd_t cv;
- erts_tid_t thr;
- int len;
-#ifndef ERTS_SMP
- int hndl;
+ erts_mtx_t enq_mtx;
+} ErtsAsyncReadyQXData;
+
#endif
- ErlAsync* head;
- ErlAsync* tail;
-#ifdef ERTS_ENABLE_LOCK_CHECK
- int no;
+
+typedef struct {
+#if ERTS_USE_ASYNC_READY_ENQ_MTX
+ union {
+ ErtsAsyncReadyQXData data;
+ char align__[ERTS_ALC_CACHE_LINE_ALIGN_SIZE(
+ sizeof(ErtsAsyncReadyQXData))];
+ } x;
#endif
-} AsyncQueue;
+ ErtsThrQ_t thr_q;
+ ErtsThrQFinDeQ_t fin_deq;
+} ErtsAsyncReadyQ;
-static erts_smp_spinlock_t async_id_lock;
-static long async_id = 0;
+typedef union {
+ ErtsAsyncReadyQ arq;
+ char align__[ERTS_ALC_CACHE_LINE_ALIGN_SIZE(sizeof(ErtsAsyncReadyQ))];
+} ErtsAlgndAsyncReadyQ;
-#ifndef ERTS_SMP
+#endif /* ERTS_USE_ASYNC_READY_Q */
-erts_mtx_t async_ready_mtx;
-static ErlAsync* async_ready_list = NULL;
+typedef struct {
+ ErtsThrQ_t thr_q;
+ erts_tid_t thr_id;
+} ErtsAsyncQ;
+
+typedef union {
+ ErtsAsyncQ aq;
+ char align__[ERTS_ALC_CACHE_LINE_ALIGN_SIZE(sizeof(ErtsAsyncQ))];
+} ErtsAlgndAsyncQ;
+typedef struct {
+ int no_initialized;
+ erts_mtx_t mtx;
+ erts_cnd_t cnd;
+ erts_atomic_t id;
+} ErtsAsyncInit;
+
+typedef struct {
+ union {
+ ErtsAsyncInit data;
+ char align__[ERTS_ALC_CACHE_LINE_ALIGN_SIZE(sizeof(ErtsAsyncInit))];
+ } init;
+ ErtsAlgndAsyncQ *queue;
+#if ERTS_USE_ASYNC_READY_Q
+ ErtsAlgndAsyncReadyQ *ready_queue;
#endif
+} ErtsAsyncData;
-/*
-** Initialize worker threads (if supported)
-*/
+int erts_async_max_threads; /* Initialized by erl_init.c */
+int erts_async_thread_suggested_stack_size; /* Initialized by erl_init.c */
-/* Detach from driver */
-static void async_detach(DE_Handle* dh)
-{
- return;
-}
+static ErtsAsyncData *async;
+#ifndef USE_THREADS
-#ifdef USE_THREADS
+void
+erts_init_async(void)
+{
-static AsyncQueue* async_q;
+}
-static void* async_main(void*);
-static void async_add(ErlAsync*, AsyncQueue*);
+#else
-#ifndef ERTS_SMP
-typedef struct ErtsAsyncReadyCallback_ ErtsAsyncReadyCallback;
-struct ErtsAsyncReadyCallback_ {
- struct ErtsAsyncReadyCallback_ *next;
- void (*callback)(void);
-};
+static void *async_main(void *);
-static ErtsAsyncReadyCallback *callbacks;
-static int async_handle;
+static ERTS_INLINE ErtsAsyncQ *
+async_q(int i)
+{
+ return &async->queue[i].aq;
+}
+
+#if ERTS_USE_ASYNC_READY_Q
-int erts_register_async_ready_callback(void (*funcp)(void))
+static ERTS_INLINE ErtsAsyncReadyQ *
+async_ready_q(Uint sched_id)
{
- ErtsAsyncReadyCallback *cb = erts_alloc(ERTS_ALC_T_ARCALLBACK,
- sizeof(ErtsAsyncReadyCallback));
- cb->next = callbacks;
- cb->callback = funcp;
- erts_mtx_lock(&async_ready_mtx);
- callbacks = cb;
- erts_mtx_unlock(&async_ready_mtx);
- return async_handle;
+ return &async->ready_queue[((int)sched_id)-1].arq;
}
+
#endif
-int init_async(int hndl)
+void
+erts_init_async(void)
{
- erts_thr_opts_t thr_opts = ERTS_THR_OPTS_DEFAULT_INITER;
- AsyncQueue* q;
- int i;
+ async = NULL;
+ if (erts_async_max_threads > 0) {
+#if ERTS_USE_ASYNC_READY_Q
+ ErtsThrQInit_t qinit = ERTS_THR_Q_INIT_DEFAULT;
+#endif
+ erts_thr_opts_t thr_opts = ERTS_THR_OPTS_DEFAULT_INITER;
+ char *ptr;
+ size_t tot_size = 0;
+ int i;
+
+ tot_size += ERTS_ALC_CACHE_LINE_ALIGN_SIZE(sizeof(ErtsAsyncData));
+ tot_size += sizeof(ErtsAlgndAsyncQ)*erts_async_max_threads;
+#if ERTS_USE_ASYNC_READY_Q
+ tot_size += sizeof(ErtsAlgndAsyncReadyQ)*erts_no_schedulers;
+#endif
- thr_opts.detached = 0;
- thr_opts.suggested_stack_size = erts_async_thread_suggested_stack_size;
-
-#ifndef ERTS_SMP
- callbacks = NULL;
- async_handle = hndl;
- erts_mtx_init(&async_ready_mtx, "async_ready");
- async_ready_list = NULL;
-#endif
-
- async_id = 0;
- erts_smp_spinlock_init(&async_id_lock, "async_id");
-
- async_q = q = (AsyncQueue*)
- (erts_async_max_threads
- ? erts_alloc(ERTS_ALC_T_ASYNC_Q,
- erts_async_max_threads * sizeof(AsyncQueue))
- : NULL);
- for (i = 0; i < erts_async_max_threads; i++) {
- q->head = NULL;
- q->tail = NULL;
- q->len = 0;
-#ifndef ERTS_SMP
- q->hndl = hndl;
-#endif
-#ifdef ERTS_ENABLE_LOCK_CHECK
- q->no = i;
-#endif
- erts_mtx_init(&q->mtx, "asyncq");
- erts_cnd_init(&q->cv);
- erts_thr_create(&q->thr, async_main, (void*)q, &thr_opts);
- q++;
- }
- return 0;
-}
+ ptr = erts_alloc_permanent_cache_aligned(ERTS_ALC_T_ASYNC_DATA,
+ tot_size);
+ async = (ErtsAsyncData *) ptr;
+ ptr += ERTS_ALC_CACHE_LINE_ALIGN_SIZE(sizeof(ErtsAsyncData));
-int exit_async()
-{
- int i;
+ async->init.data.no_initialized = 0;
+ erts_mtx_init(&async->init.data.mtx, "async_init_mtx");
+ erts_cnd_init(&async->init.data.cnd);
+ erts_atomic_init_nob(&async->init.data.id, 0);
- /* terminate threads */
- for (i = 0; i < erts_async_max_threads; i++) {
- ErlAsync* a = (ErlAsync*) erts_alloc(ERTS_ALC_T_ASYNC,
- sizeof(ErlAsync));
- a->port = NIL;
- async_add(a, &async_q[i]);
- }
+ async->queue = (ErtsAlgndAsyncQ *) ptr;
+ ptr += sizeof(ErtsAlgndAsyncQ)*erts_async_max_threads;
- for (i = 0; i < erts_async_max_threads; i++) {
- erts_thr_join(async_q[i].thr, NULL);
- erts_mtx_destroy(&async_q[i].mtx);
- erts_cnd_destroy(&async_q[i].cv);
- }
-#ifndef ERTS_SMP
- erts_mtx_destroy(&async_ready_mtx);
+#if ERTS_USE_ASYNC_READY_Q
+
+ qinit.live.queue = ERTS_THR_Q_LIVE_LONG;
+ qinit.live.objects = ERTS_THR_Q_LIVE_SHORT;
+ qinit.notify = erts_notify_check_async_ready_queue;
+
+ async->ready_queue = (ErtsAlgndAsyncReadyQ *) ptr;
+ ptr += sizeof(ErtsAlgndAsyncReadyQ)*erts_no_schedulers;
+
+ for (i = 1; i <= erts_no_schedulers; i++) {
+ ErtsAsyncReadyQ *arq = async_ready_q(i);
+#if ERTS_USE_ASYNC_READY_ENQ_MTX
+ erts_mtx_init(&arq->x.data.enq_mtx, "async_enq_mtx");
#endif
- if (async_q)
- erts_free(ERTS_ALC_T_ASYNC_Q, (void *) async_q);
- return 0;
+ erts_thr_q_finalize_dequeue_state_init(&arq->fin_deq);
+ qinit.arg = (void *) (SWord) i;
+ erts_thr_q_initialize(&arq->thr_q, &qinit);
+ }
+
+#endif
+
+ /* Create async threads... */
+
+ thr_opts.detached = 0;
+ thr_opts.suggested_stack_size
+ = erts_async_thread_suggested_stack_size;
+
+ for (i = 0; i < erts_async_max_threads; i++) {
+ ErtsAsyncQ *aq = async_q(i);
+ erts_thr_create(&aq->thr_id, async_main, (void*) aq, &thr_opts);
+ }
+
+ /* Wait for async threads to initialize... */
+
+ erts_mtx_lock(&async->init.data.mtx);
+ while (async->init.data.no_initialized != erts_async_max_threads)
+ erts_cnd_wait(&async->init.data.cnd, &async->init.data.mtx);
+ erts_mtx_unlock(&async->init.data.mtx);
+
+ erts_mtx_destroy(&async->init.data.mtx);
+ erts_cnd_destroy(&async->init.data.cnd);
+
+ }
}
+#if ERTS_USE_ASYNC_READY_Q
-static void async_add(ErlAsync* a, AsyncQueue* q)
+void *
+erts_get_async_ready_queue(Uint sched_id)
+{
+ return (void *) async ? async_ready_q(sched_id) : NULL;
+}
+
+#endif
+
+static ERTS_INLINE void async_add(ErtsAsync *a, ErtsAsyncQ* q)
{
if (is_internal_port(a->port)) {
- ERTS_LC_ASSERT(erts_drvportid2port(a->port));
+#if ERTS_USE_ASYNC_READY_Q
+ ErtsAsyncReadyQ *arq = async_ready_q(a->sched_id);
+ a->q.prep_enq = erts_thr_q_prepare_enqueue(&arq->thr_q);
+#endif
/* make sure the driver will stay around */
- driver_lock_driver(internal_port_index(a->port));
+ if (a->hndl)
+ erts_ddll_reference_referenced_driver(a->hndl);
}
- erts_mtx_lock(&q->mtx);
+#if ERTS_ASYNC_PRINT_JOB
+ erts_fprintf(stderr, "-> %ld\n", a->async_id);
+#endif
- if (q->len == 0) {
- q->head = a;
- q->tail = a;
- q->len = 1;
- erts_cnd_signal(&q->cv);
- }
- else { /* no need to signal (since the worker is working) */
- a->next = q->head;
- q->head->prev = a;
- q->head = a;
- q->len++;
- }
- erts_mtx_unlock(&q->mtx);
+ erts_thr_q_enqueue(&q->thr_q, a);
}
-static ErlAsync* async_get(AsyncQueue* q)
+static ERTS_INLINE ErtsAsync *async_get(ErtsThrQ_t *q,
+ erts_tse_t *tse,
+ ErtsThrQPrepEnQ_t **prep_enq)
{
- ErlAsync* a;
+#if ERTS_USE_ASYNC_READY_Q
+ int saved_fin_deq = 0;
+ ErtsThrQFinDeQ_t fin_deq;
+#endif
- erts_mtx_lock(&q->mtx);
- while((a = q->tail) == NULL) {
- erts_cnd_wait(&q->cv, &q->mtx);
- }
+ while (1) {
+ ErtsAsync *a = (ErtsAsync *) erts_thr_q_dequeue(q);
+ if (a) {
+
+#if ERTS_USE_ASYNC_READY_Q
+ *prep_enq = a->q.prep_enq;
+ erts_thr_q_get_finalize_dequeue_data(q, &a->q.fin_deq);
+ if (saved_fin_deq)
+ erts_thr_q_append_finalize_dequeue_data(&a->q.fin_deq, &fin_deq);
+#endif
+
+ return a;
+ }
+
+ if (ERTS_THR_Q_DIRTY != erts_thr_q_clean(q)) {
+ ErtsThrQFinDeQ_t tmp_fin_deq;
+
+ erts_tse_reset(tse);
+
+#if ERTS_USE_ASYNC_READY_Q
+ chk_fin_deq:
+ if (erts_thr_q_get_finalize_dequeue_data(q, &tmp_fin_deq)) {
+ if (!saved_fin_deq) {
+ erts_thr_q_finalize_dequeue_state_init(&fin_deq);
+ saved_fin_deq = 1;
+ }
+ erts_thr_q_append_finalize_dequeue_data(&fin_deq,
+ &tmp_fin_deq);
+ }
+#endif
+
+ switch (erts_thr_q_inspect(q, 1)) {
+ case ERTS_THR_Q_DIRTY:
+ break;
#ifdef ERTS_SMP
- ASSERT(a && q->tail == a);
+ case ERTS_THR_Q_NEED_THR_PRGR: {
+ ErtsThrPrgrVal prgr = erts_thr_q_need_thr_progress(q);
+ erts_thr_progress_wakeup(NULL, prgr);
+ /*
+ * We do no dequeue finalizing in hope that a new async
+ * job will arrive before we are woken due to thread
+ * progress...
+ */
+ erts_tse_wait(tse);
+ break;
+ }
#endif
- if (q->head == q->tail) {
- q->head = q->tail = NULL;
- q->len = 0;
- }
- else {
- q->tail->prev->next = NULL;
- q->tail = q->tail->prev;
- q->len--;
+ case ERTS_THR_Q_CLEAN:
+
+#if ERTS_USE_ASYNC_READY_Q
+ if (saved_fin_deq) {
+ if (erts_thr_q_finalize_dequeue(&fin_deq))
+ goto chk_fin_deq;
+ else
+ saved_fin_deq = 0;
+ }
+#endif
+
+ erts_tse_wait(tse);
+ break;
+
+ default:
+ ASSERT(0);
+ break;
+ }
+
+ }
}
- erts_mtx_unlock(&q->mtx);
- return a;
}
-
-static int async_del(long id)
+static ERTS_INLINE void call_async_ready(ErtsAsync *a)
{
- int i;
- /* scan all queue for an entry with async_id == 'id' */
-
- for (i = 0; i < erts_async_max_threads; i++) {
- ErlAsync* a;
- erts_mtx_lock(&async_q[i].mtx);
-
- a = async_q[i].head;
- while(a != NULL) {
- if (a->async_id == id) {
- if (a->prev != NULL)
- a->prev->next = a->next;
- else
- async_q[i].head = a->next;
- if (a->next != NULL)
- a->next->prev = a->prev;
- else
- async_q[i].tail = a->prev;
- async_q[i].len--;
- erts_mtx_unlock(&async_q[i].mtx);
- if (a->async_free != NULL)
- a->async_free(a->async_data);
- async_detach(a->hndl);
- erts_free(ERTS_ALC_T_ASYNC, a);
- return 1;
- }
- a = a->next;
+ Port *p = erts_id2port_sflgs(a->port,
+ NULL,
+ 0,
+ ERTS_PORT_SFLGS_INVALID_DRIVER_LOOKUP);
+ if (!p) {
+ if (a->async_free)
+ a->async_free(a->async_data);
+ }
+ else {
+ if (async_ready(p, a->async_data)) {
+ if (a->async_free)
+ a->async_free(a->async_data);
}
- erts_mtx_unlock(&async_q[i].mtx);
+ erts_port_release(p);
}
- return 0;
+ if (a->hndl)
+ erts_ddll_dereference_driver(a->hndl);
}
-static void* async_main(void* arg)
+static ERTS_INLINE void async_reply(ErtsAsync *a, ErtsThrQPrepEnQ_t *prep_enq)
{
- AsyncQueue* q = (AsyncQueue*) arg;
+#if ERTS_USE_ASYNC_READY_Q
+ ErtsAsyncReadyQ *arq;
-#ifdef ERTS_ENABLE_LOCK_CHECK
- {
- char buf[27];
- erts_snprintf(&buf[0], 27, "async %d", q->no);
- erts_lc_set_thread_name(&buf[0]);
- }
+ if (a->pdl)
+ driver_pdl_dec_refc(a->pdl);
+
+#if ERTS_ASYNC_PRINT_JOB
+ erts_fprintf(stderr, "=>> %ld\n", a->async_id);
#endif
- while(1) {
- ErlAsync* a = async_get(q);
+ arq = async_ready_q(a->sched_id);
- if (a->port == NIL) { /* TIME TO DIE SIGNAL */
- erts_free(ERTS_ALC_T_ASYNC, (void *) a);
- break;
- }
- else {
- (*a->async_invoke)(a->async_data);
- /* Major problem if the code for async_invoke
- or async_free is removed during a blocking operation */
+#if ERTS_USE_ASYNC_READY_ENQ_MTX
+ erts_mtx_lock(&arq->x.data.enq_mtx);
+#endif
+
+ erts_thr_q_enqueue_prepared(&arq->thr_q, (void *) a, prep_enq);
+
+#if ERTS_USE_ASYNC_READY_ENQ_MTX
+ erts_mtx_unlock(&arq->x.data.enq_mtx);
+#endif
+
+#else /* ERTS_USE_ASYNC_READY_Q */
+
+ call_async_ready(a);
+ if (a->pdl)
+ driver_pdl_dec_refc(a->pdl);
+ erts_free(ERTS_ALC_T_ASYNC, (void *) a);
+
+#endif /* ERTS_USE_ASYNC_READY_Q */
+}
+
+
+static void
+async_wakeup(void *vtse)
+{
+ erts_tse_set((erts_tse_t *) vtse);
+}
+
+static erts_tse_t *async_thread_init(ErtsAsyncQ *aq)
+{
+ ErtsThrQInit_t qinit = ERTS_THR_Q_INIT_DEFAULT;
+ erts_tse_t *tse = erts_tse_fetch();
#ifdef ERTS_SMP
- {
- Port *p;
- p = erts_id2port_sflgs(a->port,
- NULL,
- 0,
- ERTS_PORT_SFLGS_INVALID_DRIVER_LOOKUP);
- if (!p) {
- if (a->async_free)
- (*a->async_free)(a->async_data);
- }
- else {
- if (async_ready(p, a->async_data)) {
- if (a->async_free)
- (*a->async_free)(a->async_data);
- }
- async_detach(a->hndl);
- erts_port_release(p);
- }
- if (a->pdl) {
- driver_pdl_dec_refc(a->pdl);
- }
- erts_free(ERTS_ALC_T_ASYNC, (void *) a);
- }
-#else
- if (a->pdl) {
- driver_pdl_dec_refc(a->pdl);
- }
- erts_mtx_lock(&async_ready_mtx);
- a->next = async_ready_list;
- async_ready_list = a;
- erts_mtx_unlock(&async_ready_mtx);
- sys_async_ready(q->hndl);
+ ErtsThrPrgrCallbacks callbacks;
+
+ callbacks.arg = (void *) tse;
+ callbacks.wakeup = async_wakeup;
+ callbacks.prepare_wait = NULL;
+ callbacks.wait = NULL;
+
+ erts_thr_progress_register_unmanaged_thread(&callbacks);
#endif
- }
- }
- return NULL;
+ qinit.live.queue = ERTS_THR_Q_LIVE_LONG;
+ qinit.live.objects = ERTS_THR_Q_LIVE_SHORT;
+ qinit.arg = (void *) tse;
+ qinit.notify = async_wakeup;
+#if ERTS_USE_ASYNC_READY_Q
+ qinit.auto_finalize_dequeue = 0;
+#endif
+
+ erts_thr_q_initialize(&aq->thr_q, &qinit);
+
+ /* Inform main thread that we are done initializing... */
+ erts_mtx_lock(&async->init.data.mtx);
+ async->init.data.no_initialized++;
+ erts_cnd_signal(&async->init.data.cnd);
+ erts_mtx_unlock(&async->init.data.mtx);
+
+ return tse;
}
+static void *async_main(void* arg)
+{
+ ErtsAsyncQ *aq = (ErtsAsyncQ *) arg;
+ erts_tse_t *tse = async_thread_init(aq);
+
+ while (1) {
+ ErtsThrQPrepEnQ_t *prep_enq;
+ ErtsAsync *a = async_get(&aq->thr_q, tse, &prep_enq);
+ if (is_nil(a->port))
+ break; /* Time to die */
+#if ERTS_ASYNC_PRINT_JOB
+ erts_fprintf(stderr, "<- %ld\n", a->async_id);
#endif
-#ifndef ERTS_SMP
+ a->async_invoke(a->async_data);
+
+ async_reply(a, prep_enq);
+ }
+
+ return NULL;
+}
+
+#endif /* USE_THREADS */
-int check_async_ready(void)
+void
+erts_exit_flush_async(void)
{
#ifdef USE_THREADS
- ErtsAsyncReadyCallback *cbs;
+ int i;
+ ErtsAsync a;
+ a.port = NIL;
+ /*
+ * Terminate threads in order to flush queues. We do not
+ * bother to clean everything up since we are about to
+ * terminate the runtime system and a cleanup would only
+ * delay the termination.
+ */
+ for (i = 0; i < erts_async_max_threads; i++)
+ async_add(&a, async_q(i));
+ for (i = 0; i < erts_async_max_threads; i++)
+ erts_thr_join(async->queue[i].aq.thr_id, NULL);
#endif
- ErlAsync* a;
- int count = 0;
+}
- erts_mtx_lock(&async_ready_mtx);
- a = async_ready_list;
- async_ready_list = NULL;
-#ifdef USE_THREADS
- cbs = callbacks;
-#endif
- erts_mtx_unlock(&async_ready_mtx);
-
- while(a != NULL) {
- ErlAsync* a_next = a->next;
- /* Every port not dead */
- Port *p = erts_id2port_sflgs(a->port,
- NULL,
- 0,
- ERTS_PORT_SFLGS_INVALID_DRIVER_LOOKUP);
- if (!p) {
- if (a->async_free)
- (*a->async_free)(a->async_data);
- }
- else {
- count++;
- if (async_ready(p, a->async_data)) {
- if (a->async_free != NULL)
- (*a->async_free)(a->async_data);
- }
- async_detach(a->hndl);
- erts_port_release(p);
+#if defined(USE_THREADS) && ERTS_USE_ASYNC_READY_Q
+
+int erts_check_async_ready(void *varq)
+{
+ ErtsAsyncReadyQ *arq = (ErtsAsyncReadyQ *) varq;
+ int res = 1;
+ int i;
+
+ for (i = 0; i < ERTS_MAX_ASYNC_READY_CALLS_IN_SEQ; i++) {
+ ErtsAsync *a = (ErtsAsync *) erts_thr_q_dequeue(&arq->thr_q);
+ if (!a) {
+ res = 0;
+ break;
}
+
+#if ERTS_ASYNC_PRINT_JOB
+ erts_fprintf(stderr, "<<= %ld\n", a->async_id);
+#endif
+ erts_thr_q_append_finalize_dequeue_data(&arq->fin_deq, &a->q.fin_deq);
+ call_async_ready(a);
erts_free(ERTS_ALC_T_ASYNC, (void *) a);
- a = a_next;
}
-#ifdef USE_THREADS
- for (; cbs; cbs = cbs->next)
- (*cbs->callback)();
-#endif
- return count;
+
+ erts_thr_q_finalize_dequeue(&arq->fin_deq);
+
+ return res;
}
+int erts_async_ready_clean(void *varq, void *val)
+{
+ ErtsAsyncReadyQ *arq = (ErtsAsyncReadyQ *) varq;
+ ErtsThrQCleanState_t cstate;
+
+ cstate = erts_thr_q_clean(&arq->thr_q);
+
+ if (erts_thr_q_finalize_dequeue(&arq->fin_deq))
+ return ERTS_ASYNC_READY_DIRTY;
+
+ switch (cstate) {
+ case ERTS_THR_Q_DIRTY:
+ return ERTS_ASYNC_READY_DIRTY;
+#ifdef ERTS_SMP
+ case ERTS_THR_Q_NEED_THR_PRGR:
+ *((ErtsThrPrgrVal *) val)
+ = erts_thr_q_need_thr_progress(&arq->thr_q);
+ return ERTS_ASYNC_READY_NEED_THR_PRGR;
#endif
+ case ERTS_THR_Q_CLEAN:
+ break;
+ }
+ return ERTS_ASYNC_READY_CLEAN;
+}
+#endif
/*
** Schedule async_invoke on a worker thread
@@ -393,19 +554,29 @@ long driver_async(ErlDrvPort ix, unsigned int* key,
void (*async_invoke)(void*), void* async_data,
void (*async_free)(void*))
{
- ErlAsync* a = (ErlAsync*) erts_alloc(ERTS_ALC_T_ASYNC, sizeof(ErlAsync));
- Port* prt = erts_drvport2port(ix);
+ ErtsAsync* a;
+ Port* prt;
long id;
unsigned int qix;
+#if ERTS_USE_ASYNC_READY_Q
+ Uint sched_id;
+ sched_id = erts_get_scheduler_id();
+ if (!sched_id)
+ sched_id = 1;
+#endif
+ prt = erts_drvport2port(ix);
if (!prt)
return -1;
ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt));
- a->next = NULL;
- a->prev = NULL;
+ a = (ErtsAsync*) erts_alloc(ERTS_ALC_T_ASYNC, sizeof(ErtsAsync));
+
+#if ERTS_USE_ASYNC_READY_Q
+ a->sched_id = sched_id;
+#endif
a->hndl = (DE_Handle*)prt->drv_ptr->handle;
a->port = prt->id;
a->pdl = NULL;
@@ -413,12 +584,16 @@ long driver_async(ErlDrvPort ix, unsigned int* key,
a->async_invoke = async_invoke;
a->async_free = async_free;
- erts_smp_spin_lock(&async_id_lock);
- async_id = (async_id + 1) & 0x7fffffff;
- if (async_id == 0)
- async_id++;
- id = async_id;
- erts_smp_spin_unlock(&async_id_lock);
+ if (!async)
+ id = 0;
+ else {
+ do {
+ id = erts_atomic_inc_read_nob(&async->init.data.id);
+ } while (id == 0);
+ if (id < 0)
+ id *= -1;
+ ASSERT(id > 0);
+ }
a->async_id = id;
@@ -437,7 +612,7 @@ long driver_async(ErlDrvPort ix, unsigned int* key,
driver_pdl_inc_refc(prt->port_data_lock);
a->pdl = prt->port_data_lock;
}
- async_add(a, &async_q[qix]);
+ async_add(a, async_q(qix));
return id;
}
#endif
@@ -455,10 +630,16 @@ long driver_async(ErlDrvPort ix, unsigned int* key,
int driver_async_cancel(unsigned int id)
{
-#ifdef USE_THREADS
- if (erts_async_max_threads > 0)
- return async_del(id);
-#endif
+ /*
+ * Not supported anymore. Always fail (which is backward
+ * compatible).
+ *
+ * This functionality could be implemented again. However,
+ * it is (and always has been) completely useless since
+ * it doesn't give you any guarantees whatsoever. The user
+ * needs to (and always have had to) synchronize in his/her
+ * own code in order to get any guarantees.
+ */
return 0;
}
diff --git a/erts/emulator/beam/erl_async.h b/erts/emulator/beam/erl_async.h
new file mode 100644
index 0000000000..95374a8fc9
--- /dev/null
+++ b/erts/emulator/beam/erl_async.h
@@ -0,0 +1,66 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2011. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+#ifndef ERL_ASYNC_H__
+#define ERL_ASYNC_H__
+
+#define ERTS_MAX_NO_OF_ASYNC_THREADS 1024
+extern int erts_async_max_threads;
+#define ERTS_ASYNC_THREAD_MIN_STACK_SIZE 16 /* Kilo words */
+#define ERTS_ASYNC_THREAD_MAX_STACK_SIZE 8192 /* Kilo words */
+extern int erts_async_thread_suggested_stack_size;
+
+#ifdef USE_THREADS
+
+#ifdef ERTS_SMP
+/*
+ * With smp support we can choose to have, or not to
+ * have an async ready queue.
+ */
+#define ERTS_USE_ASYNC_READY_Q 1
+#endif
+
+#ifndef ERTS_SMP
+/* In non-smp case we *need* the async ready queue */
+# undef ERTS_USE_ASYNC_READY_Q
+# define ERTS_USE_ASYNC_READY_Q 1
+#endif
+
+#ifndef ERTS_USE_ASYNC_READY_Q
+# define ERTS_USE_ASYNC_READY_Q 0
+#endif
+
+#if ERTS_USE_ASYNC_READY_Q
+int erts_check_async_ready(void *);
+int erts_async_ready_clean(void *, void *);
+void *erts_get_async_ready_queue(Uint sched_id);
+#define ERTS_ASYNC_READY_CLEAN 0
+#define ERTS_ASYNC_READY_DIRTY 1
+#ifdef ERTS_SMP
+#define ERTS_ASYNC_READY_NEED_THR_PRGR 2
+#endif
+#endif /* ERTS_USE_ASYNC_READY_Q */
+
+#endif /* USE_THREADS */
+
+void erts_init_async(void);
+void erts_exit_flush_async(void);
+
+
+#endif /* ERL_ASYNC_H__ */
diff --git a/erts/emulator/beam/erl_bestfit_alloc.c b/erts/emulator/beam/erl_bestfit_alloc.c
index 5e3032ddaa..c50fdeb4e8 100644
--- a/erts/emulator/beam/erl_bestfit_alloc.c
+++ b/erts/emulator/beam/erl_bestfit_alloc.c
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2003-2009. All Rights Reserved.
+ * Copyright Ericsson AB 2003-2011. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
@@ -161,14 +161,18 @@ erts_bfalc_start(BFAllctr_t *bfallctr,
BFAllctrInit_t *bfinit,
AllctrInit_t *init)
{
- BFAllctr_t nulled_state = {{0}};
- /* {{0}} is used instead of {0}, in order to avoid (an incorrect) gcc
- warning. gcc warns if {0} is used as initializer of a struct when
- the first member is a struct (not if, for example, the third member
- is a struct). */
+ struct {
+ int dummy;
+ BFAllctr_t allctr;
+ } zero = {0};
+ /* The struct with a dummy element first is used in order to avoid (an
+ incorrect) gcc warning. gcc warns if {0} is used as initializer of
+ a struct when the first member is a struct (not if, for example,
+ the third member is a struct). */
+
Allctr_t *allctr = (Allctr_t *) bfallctr;
- sys_memcpy((void *) bfallctr, (void *) &nulled_state, sizeof(BFAllctr_t));
+ sys_memcpy((void *) bfallctr, (void *) &zero.allctr, sizeof(BFAllctr_t));
bfallctr->address_order = bfinit->ao;
diff --git a/erts/emulator/beam/erl_bestfit_alloc.h b/erts/emulator/beam/erl_bestfit_alloc.h
index faa2d9742e..0c29662852 100644
--- a/erts/emulator/beam/erl_bestfit_alloc.h
+++ b/erts/emulator/beam/erl_bestfit_alloc.h
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2003-2009. All Rights Reserved.
+ * Copyright Ericsson AB 2003-2011. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
diff --git a/erts/emulator/beam/erl_bif_binary.c b/erts/emulator/beam/erl_bif_binary.c
index 684fa5d12f..6d022e0d11 100644
--- a/erts/emulator/beam/erl_bif_binary.c
+++ b/erts/emulator/beam/erl_bif_binary.c
@@ -65,6 +65,10 @@ static Export binary_copy_trap_export;
static BIF_RETTYPE binary_copy_trap(BIF_ALIST_2);
static Uint max_loop_limit;
+static BIF_RETTYPE
+binary_match(Process *p, Eterm arg1, Eterm arg2, Eterm arg3);
+static BIF_RETTYPE
+binary_matches(Process *p, Eterm arg1, Eterm arg2, Eterm arg3);
void erts_init_bif_binary(void)
{
@@ -1399,6 +1403,12 @@ static BIF_RETTYPE binary_matches_trap(BIF_ALIST_3)
BIF_RETTYPE binary_match_3(BIF_ALIST_3)
{
+ return binary_match(BIF_P, BIF_ARG_1, BIF_ARG_2, BIF_ARG_3);
+}
+
+static BIF_RETTYPE
+binary_match(Process *p, Eterm arg1, Eterm arg2, Eterm arg3)
+{
Uint hsstart;
Uint hsend;
Eterm *tp;
@@ -1408,17 +1418,17 @@ BIF_RETTYPE binary_match_3(BIF_ALIST_3)
int runres;
Eterm result;
- if (is_not_binary(BIF_ARG_1)) {
+ if (is_not_binary(arg1)) {
goto badarg;
}
- if (parse_match_opts_list(BIF_ARG_3,BIF_ARG_1,&hsstart,&hsend)) {
+ if (parse_match_opts_list(arg3,arg1,&hsstart,&hsend)) {
goto badarg;
}
if (hsend == 0) {
BIF_RET(am_nomatch);
}
- if (is_tuple(BIF_ARG_2)) {
- tp = tuple_val(BIF_ARG_2);
+ if (is_tuple(arg2)) {
+ tp = tuple_val(arg2);
if (arityval(*tp) != 2 || is_not_atom(tp[1])) {
goto badarg;
}
@@ -1437,13 +1447,13 @@ BIF_RETTYPE binary_match_3(BIF_ALIST_3)
goto badarg;
}
bin_term = tp[2];
- } else if (do_binary_match_compile(BIF_ARG_2,&type,&bin)) {
+ } else if (do_binary_match_compile(arg2,&type,&bin)) {
goto badarg;
}
- runres = do_binary_match(BIF_P,BIF_ARG_1,hsstart,hsend,type,bin,NIL,&result);
+ runres = do_binary_match(p,arg1,hsstart,hsend,type,bin,NIL,&result);
if (runres == DO_BIN_MATCH_RESTART && bin_term == NIL) {
- Eterm *hp = HAlloc(BIF_P, PROC_BIN_SIZE);
- bin_term = erts_mk_magic_binary_term(&hp, &MSO(BIF_P), bin);
+ Eterm *hp = HAlloc(p, PROC_BIN_SIZE);
+ bin_term = erts_mk_magic_binary_term(&hp, &MSO(p), bin);
} else if (bin_term == NIL) {
erts_bin_free(bin);
}
@@ -1451,17 +1461,23 @@ BIF_RETTYPE binary_match_3(BIF_ALIST_3)
case DO_BIN_MATCH_OK:
BIF_RET(result);
case DO_BIN_MATCH_RESTART:
- BUMP_ALL_REDS(BIF_P);
- BIF_TRAP3(&binary_match_trap_export, BIF_P, BIF_ARG_1, result, bin_term);
+ BUMP_ALL_REDS(p);
+ BIF_TRAP3(&binary_match_trap_export, p, arg1, result, bin_term);
default:
goto badarg;
}
badarg:
- BIF_ERROR(BIF_P,BADARG);
+ BIF_ERROR(p,BADARG);
}
BIF_RETTYPE binary_matches_3(BIF_ALIST_3)
{
+ return binary_matches(BIF_P, BIF_ARG_1, BIF_ARG_2, BIF_ARG_3);
+}
+
+static BIF_RETTYPE
+binary_matches(Process *p, Eterm arg1, Eterm arg2, Eterm arg3)
+{
Uint hsstart, hsend;
Eterm *tp;
Eterm type;
@@ -1470,17 +1486,17 @@ BIF_RETTYPE binary_matches_3(BIF_ALIST_3)
int runres;
Eterm result;
- if (is_not_binary(BIF_ARG_1)) {
+ if (is_not_binary(arg1)) {
goto badarg;
}
- if (parse_match_opts_list(BIF_ARG_3,BIF_ARG_1,&hsstart,&hsend)) {
+ if (parse_match_opts_list(arg3,arg1,&hsstart,&hsend)) {
goto badarg;
}
if (hsend == 0) {
BIF_RET(NIL);
}
- if (is_tuple(BIF_ARG_2)) {
- tp = tuple_val(BIF_ARG_2);
+ if (is_tuple(arg2)) {
+ tp = tuple_val(arg2);
if (arityval(*tp) != 2 || is_not_atom(tp[1])) {
goto badarg;
}
@@ -1499,14 +1515,14 @@ BIF_RETTYPE binary_matches_3(BIF_ALIST_3)
goto badarg;
}
bin_term = tp[2];
- } else if (do_binary_match_compile(BIF_ARG_2,&type,&bin)) {
+ } else if (do_binary_match_compile(arg2,&type,&bin)) {
goto badarg;
}
- runres = do_binary_matches(BIF_P,BIF_ARG_1,hsstart,hsend,type,bin,
+ runres = do_binary_matches(p,arg1,hsstart,hsend,type,bin,
NIL,&result);
if (runres == DO_BIN_MATCH_RESTART && bin_term == NIL) {
- Eterm *hp = HAlloc(BIF_P, PROC_BIN_SIZE);
- bin_term = erts_mk_magic_binary_term(&hp, &MSO(BIF_P), bin);
+ Eterm *hp = HAlloc(p, PROC_BIN_SIZE);
+ bin_term = erts_mk_magic_binary_term(&hp, &MSO(p), bin);
} else if (bin_term == NIL) {
erts_bin_free(bin);
}
@@ -1514,26 +1530,26 @@ BIF_RETTYPE binary_matches_3(BIF_ALIST_3)
case DO_BIN_MATCH_OK:
BIF_RET(result);
case DO_BIN_MATCH_RESTART:
- BUMP_ALL_REDS(BIF_P);
- BIF_TRAP3(&binary_matches_trap_export, BIF_P, BIF_ARG_1, result,
+ BUMP_ALL_REDS(p);
+ BIF_TRAP3(&binary_matches_trap_export, p, arg1, result,
bin_term);
default:
goto badarg;
}
badarg:
- BIF_ERROR(BIF_P,BADARG);
+ BIF_ERROR(p,BADARG);
}
BIF_RETTYPE binary_match_2(BIF_ALIST_2)
{
- return binary_match_3(BIF_P,BIF_ARG_1,BIF_ARG_2,((Eterm) 0));
+ return binary_match(BIF_P,BIF_ARG_1,BIF_ARG_2,((Eterm) 0));
}
BIF_RETTYPE binary_matches_2(BIF_ALIST_2)
{
- return binary_matches_3(BIF_P,BIF_ARG_1,BIF_ARG_2,((Eterm) 0));
+ return binary_matches(BIF_P,BIF_ARG_1,BIF_ARG_2,((Eterm) 0));
}
diff --git a/erts/emulator/beam/erl_bif_ddll.c b/erts/emulator/beam/erl_bif_ddll.c
index d714eacd06..b2d5722e9b 100644
--- a/erts/emulator/beam/erl_bif_ddll.c
+++ b/erts/emulator/beam/erl_bif_ddll.c
@@ -142,9 +142,11 @@ static void ddll_no_more_references(void *vdh);
* really load and add as LOADED {ok,loaded} {ok,pending_driver}
* {error, permanent} {error,load_error()}
*/
-BIF_RETTYPE erl_ddll_try_load_3(Process *p, Eterm path_term,
- Eterm name_term, Eterm options)
+BIF_RETTYPE erl_ddll_try_load_3(BIF_ALIST_3)
{
+ Eterm path_term = BIF_ARG_1;
+ Eterm name_term = BIF_ARG_2;
+ Eterm options = BIF_ARG_3;
char *path = NULL;
Uint path_len;
char *name = NULL;
@@ -236,7 +238,7 @@ BIF_RETTYPE erl_ddll_try_load_3(Process *p, Eterm path_term,
sys_strcpy(path+path_len,name);
#if DDLL_SMP
- erts_smp_proc_unlock(p, ERTS_PROC_LOCK_MAIN);
+ erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
lock_drv_list();
#endif
if ((drv = lookup_driver(name)) != NULL) {
@@ -247,7 +249,7 @@ BIF_RETTYPE erl_ddll_try_load_3(Process *p, Eterm path_term,
} else {
dh = drv->handle;
if (dh->status == ERL_DE_OK) {
- int is_last = is_last_user(dh,p);
+ int is_last = is_last_user(dh, BIF_P);
if (reload == 1 && !is_last) {
/*Want reload if no other users,
but there are others...*/
@@ -261,7 +263,8 @@ BIF_RETTYPE erl_ddll_try_load_3(Process *p, Eterm path_term,
soft_error_term = am_inconsistent;
goto soft_error;
}
- if ((old = find_proc_entry(dh, p, ERL_DE_PROC_LOADED)) ==
+ if ((old = find_proc_entry(dh, BIF_P,
+ ERL_DE_PROC_LOADED)) ==
NULL) {
soft_error_term = am_not_loaded_by_this_process;
goto soft_error;
@@ -272,7 +275,7 @@ BIF_RETTYPE erl_ddll_try_load_3(Process *p, Eterm path_term,
}
/* Reload requested and granted */
dereference_all_processes(dh);
- set_driver_reloading(dh, p, path, name, flags);
+ set_driver_reloading(dh, BIF_P, path, name, flags);
if (dh->flags & ERL_DE_FL_KILL_PORTS) {
kill_ports = 1;
}
@@ -286,7 +289,7 @@ BIF_RETTYPE erl_ddll_try_load_3(Process *p, Eterm path_term,
soft_error_term = am_inconsistent;
goto soft_error;
}
- add_proc_loaded(dh,p);
+ add_proc_loaded(dh, BIF_P);
erts_ddll_reference_driver(dh);
monitor = 0;
ok_term = mkatom("already_loaded");
@@ -308,7 +311,7 @@ BIF_RETTYPE erl_ddll_try_load_3(Process *p, Eterm path_term,
notify_all(dh, drv->name,
ERL_DE_PROC_AWAIT_UNLOAD, am_UP,
am_unload_cancelled);
- add_proc_loaded(dh,p);
+ add_proc_loaded(dh, BIF_P);
erts_ddll_reference_driver(dh);
monitor = 0;
ok_term = mkatom("already_loaded");
@@ -325,7 +328,8 @@ BIF_RETTYPE erl_ddll_try_load_3(Process *p, Eterm path_term,
goto soft_error;
}
/* Load of granted unload... */
- add_proc_loaded_deref(dh,p); /* Dont reference, will happen after reload */
+ /* Don't reference, will happen after reload */
+ add_proc_loaded_deref(dh, BIF_P);
++monitor;
ok_term = am_pending_driver;
} else { /* ERL_DE_PERMANENT */
@@ -345,7 +349,7 @@ BIF_RETTYPE erl_ddll_try_load_3(Process *p, Eterm path_term,
goto soft_error;
} else {
dh->flags = flags;
- add_proc_loaded(dh,p);
+ add_proc_loaded(dh, BIF_P);
first_ddll_reference(dh);
monitor = 0;
ok_term = mkatom("loaded");
@@ -397,18 +401,18 @@ BIF_RETTYPE erl_ddll_try_load_3(Process *p, Eterm path_term,
#if DDLL_SMP
erts_ddll_reference_driver(dh);
unlock_drv_list();
- erts_smp_proc_lock(p, ERTS_PROC_LOCK_MAIN);
+ erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
lock_drv_list();
erts_ddll_dereference_driver(dh);
#endif
- p->flags |= F_USING_DDLL;
+ BIF_P->flags |= F_USING_DDLL;
if (monitor) {
- Eterm mref = add_monitor(p, dh, ERL_DE_PROC_AWAIT_LOAD);
- hp = HAlloc(p,4);
+ Eterm mref = add_monitor(BIF_P, dh, ERL_DE_PROC_AWAIT_LOAD);
+ hp = HAlloc(BIF_P, 4);
t = TUPLE3(hp, am_ok, ok_term, mref);
} else {
- hp = HAlloc(p,3);
+ hp = HAlloc(BIF_P, 3);
t = TUPLE2(hp, am_ok, ok_term);
}
#if DDLL_SMP
@@ -416,33 +420,33 @@ BIF_RETTYPE erl_ddll_try_load_3(Process *p, Eterm path_term,
#endif
erts_free(ERTS_ALC_T_DDLL_TMP_BUF, (void *) path);
erts_free(ERTS_ALC_T_DDLL_TMP_BUF, (void *) name);
- ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_MAIN & erts_proc_lc_my_proc_locks(p));
+ ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_MAIN & erts_proc_lc_my_proc_locks(BIF_P));
BIF_RET(t);
soft_error:
#if DDLL_SMP
unlock_drv_list();
- erts_smp_proc_lock(p, ERTS_PROC_LOCK_MAIN);
+ erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
#endif
if (do_build_load_error) {
- soft_error_term = build_load_error(p, build_this_load_error);
+ soft_error_term = build_load_error(BIF_P, build_this_load_error);
}
- hp = HAlloc(p,3);
+ hp = HAlloc(BIF_P, 3);
t = TUPLE2(hp, am_error, soft_error_term);
erts_free(ERTS_ALC_T_DDLL_TMP_BUF, (void *) path);
erts_free(ERTS_ALC_T_DDLL_TMP_BUF, (void *) name);
- ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_MAIN & erts_proc_lc_my_proc_locks(p));
+ ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_MAIN & erts_proc_lc_my_proc_locks(BIF_P));
BIF_RET(t);
error:
assert_drv_list_not_locked();
- ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_MAIN & erts_proc_lc_my_proc_locks(p));
+ ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_MAIN & erts_proc_lc_my_proc_locks(BIF_P));
if (path != NULL) {
erts_free(ERTS_ALC_T_DDLL_TMP_BUF, (void *) path);
}
if (name != NULL) {
erts_free(ERTS_ALC_T_DDLL_TMP_BUF, (void *) name);
}
- BIF_ERROR(p,BADARG);
+ BIF_ERROR(BIF_P, BADARG);
}
/*
@@ -481,8 +485,10 @@ BIF_RETTYPE erl_ddll_try_load_3(Process *p, Eterm path_term,
any AWAIT_LOAD-waiters with {'DOWN', ref(), driver, name(), load_cancelled}
If the driver made itself permanent, {'UP', ref(), driver, name(), permanent}
*/
-Eterm erl_ddll_try_unload_2(Process *p, Eterm name_term, Eterm options)
+Eterm erl_ddll_try_unload_2(BIF_ALIST_2)
{
+ Eterm name_term = BIF_ARG_1;
+ Eterm options = BIF_ARG_2;
char *name = NULL;
Eterm ok_term = NIL;
Eterm soft_error_term = NIL;
@@ -495,7 +501,7 @@ Eterm erl_ddll_try_unload_2(Process *p, Eterm name_term, Eterm options)
Eterm l;
int kill_ports = 0;
- erts_smp_proc_unlock(p, ERTS_PROC_LOCK_MAIN);
+ erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
for(l = options; is_list(l); l = CDR(list_val(l))) {
Eterm opt = CAR(list_val(l));
@@ -548,7 +554,7 @@ Eterm erl_ddll_try_unload_2(Process *p, Eterm name_term, Eterm options)
if (dh->flags & ERL_DE_FL_KILL_PORTS) {
kill_ports = 1;
}
- if ((pe = find_proc_entry(dh, p, ERL_DE_PROC_LOADED)) == NULL) {
+ if ((pe = find_proc_entry(dh, BIF_P, ERL_DE_PROC_LOADED)) == NULL) {
if (num_procs(dh, ERL_DE_PROC_LOADED) > 0) {
soft_error_term = am_not_loaded_by_this_process;
goto soft_error;
@@ -624,22 +630,22 @@ done:
#if DDLL_SMP
erts_ddll_reference_driver(dh);
unlock_drv_list();
- erts_smp_proc_lock(p, ERTS_PROC_LOCK_MAIN);
+ erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
lock_drv_list();
erts_ddll_dereference_driver(dh);
#endif
erts_free(ERTS_ALC_T_DDLL_TMP_BUF, (void *) name);
- p->flags |= F_USING_DDLL;
+ BIF_P->flags |= F_USING_DDLL;
if (monitor > 0) {
- Eterm mref = add_monitor(p, dh, ERL_DE_PROC_AWAIT_UNLOAD);
- hp = HAlloc(p,4);
+ Eterm mref = add_monitor(BIF_P, dh, ERL_DE_PROC_AWAIT_UNLOAD);
+ hp = HAlloc(BIF_P, 4);
t = TUPLE3(hp, am_ok, ok_term, mref);
} else {
- hp = HAlloc(p,3);
+ hp = HAlloc(BIF_P, 3);
t = TUPLE2(hp, am_ok, ok_term);
}
if (kill_ports > 1) {
- ERTS_BIF_CHK_EXITED(p); /* May be exited by port killing */
+ ERTS_BIF_CHK_EXITED(BIF_P); /* May be exited by port killing */
}
#if DDLL_SMP
unlock_drv_list();
@@ -651,8 +657,8 @@ soft_error:
unlock_drv_list();
#endif
erts_free(ERTS_ALC_T_DDLL_TMP_BUF, (void *) name);
- erts_smp_proc_lock(p, ERTS_PROC_LOCK_MAIN);
- hp = HAlloc(p,3);
+ erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
+ hp = HAlloc(BIF_P, 3);
t = TUPLE2(hp, am_error, soft_error_term);
BIF_RET(t);
@@ -661,21 +667,21 @@ soft_error:
if (name != NULL) {
erts_free(ERTS_ALC_T_DDLL_TMP_BUF, (void *) name);
}
- erts_smp_proc_lock(p, ERTS_PROC_LOCK_MAIN);
- BIF_ERROR(p,BADARG);
+ erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
+ BIF_ERROR(BIF_P, BADARG);
}
/*
* A shadow of the "real" demonitor BIF
*/
-BIF_RETTYPE erl_ddll_demonitor_1(Process *p, Eterm ref)
+BIF_RETTYPE erl_ddll_demonitor_1(BIF_ALIST_1)
{
- if (is_not_internal_ref(ref)) {
- BIF_ERROR(p, BADARG);
+ if (is_not_internal_ref(BIF_ARG_1)) {
+ BIF_ERROR(BIF_P, BADARG);
}
- if (p->flags & F_USING_DDLL) {
- erts_ddll_remove_monitor(p, ref, ERTS_PROC_LOCK_MAIN);
+ if (BIF_P->flags & F_USING_DDLL) {
+ erts_ddll_remove_monitor(BIF_P, BIF_ARG_1, ERTS_PROC_LOCK_MAIN);
}
BIF_RET(am_true);
}
@@ -683,18 +689,18 @@ BIF_RETTYPE erl_ddll_demonitor_1(Process *p, Eterm ref)
/*
* A shadow of the "real" monitor BIF
*/
-BIF_RETTYPE erl_ddll_monitor_2(Process *p, Eterm dr, Eterm what)
+BIF_RETTYPE erl_ddll_monitor_2(BIF_ALIST_2)
{
- if (dr != am_driver) {
- BIF_ERROR(p,BADARG);
+ if (BIF_ARG_1 != am_driver) {
+ BIF_ERROR(BIF_P, BADARG);
}
- return erts_ddll_monitor_driver(p, what, ERTS_PROC_LOCK_MAIN);
+ return erts_ddll_monitor_driver(BIF_P, BIF_ARG_2, ERTS_PROC_LOCK_MAIN);
}
/*
* Return list of loaded drivers {ok,[string()]}
*/
-Eterm erl_ddll_loaded_drivers_0(Process *p)
+BIF_RETTYPE erl_ddll_loaded_drivers_0(BIF_ALIST_0)
{
Eterm *hp;
int need = 3;
@@ -706,7 +712,7 @@ Eterm erl_ddll_loaded_drivers_0(Process *p)
for (drv = driver_list; drv; drv = drv->next) {
need += sys_strlen(drv->name)*2+2;
}
- hp = HAlloc(p,need);
+ hp = HAlloc(BIF_P, need);
for (drv = driver_list; drv; drv = drv->next) {
Eterm l;
l = buf_to_intlist(&hp, drv->name, sys_strlen(drv->name), NIL);
@@ -726,8 +732,11 @@ Eterm erl_ddll_loaded_drivers_0(Process *p)
* item is processes, driver_options, port_count, linked_in_driver,
* permanent, awaiting_load, awaiting_unload
*/
-Eterm erl_ddll_info_2(Process *p, Eterm name_term, Eterm item)
+BIF_RETTYPE erl_ddll_info_2(BIF_ALIST_2)
{
+ Process *p = BIF_P;
+ Eterm name_term = BIF_ARG_1;
+ Eterm item = BIF_ARG_2;
char *name = NULL;
Eterm res = NIL;
erts_driver_t *drv;
@@ -850,8 +859,10 @@ Eterm erl_ddll_info_2(Process *p, Eterm name_term, Eterm item)
* Backend for erl_ddll:format_error, handles all "soft" errors returned by builtins,
* possibly by calling the system specific error handler
*/
-Eterm erl_ddll_format_error_int_1(Process *p, Eterm code_term)
+BIF_RETTYPE erl_ddll_format_error_int_1(BIF_ALIST_1)
{
+ Process *p = BIF_P;
+ Eterm code_term = BIF_ARG_1;
char *errstring = NULL;
int errint;
int len;
@@ -1558,14 +1569,14 @@ static int do_load_driver_entry(DE_Handle *dh, char *path, char *name)
if ((res = erts_sys_ddll_load_driver_init(dh->handle,
&init_handle)) != ERL_DE_NO_ERROR) {
- erts_sys_ddll_close(dh->handle);
- return ERL_DE_LOAD_ERROR_NO_INIT;
+ res = ERL_DE_LOAD_ERROR_NO_INIT;
+ goto error;
}
dp = erts_sys_ddll_call_init(init_handle);
if (dp == NULL) {
- erts_sys_ddll_close(dh->handle);
- return ERL_DE_LOAD_ERROR_FAILED_INIT;
+ res = ERL_DE_LOAD_ERROR_FAILED_INIT;
+ goto error;
}
switch (dp->extended_marker) {
@@ -1583,24 +1594,27 @@ static int do_load_driver_entry(DE_Handle *dh, char *path, char *name)
|| dp->handle2 != NULL
|| dp->process_exit != NULL) {
/* Old driver; needs to be recompiled... */
- return ERL_DE_LOAD_ERROR_INCORRECT_VERSION;
+ res = ERL_DE_LOAD_ERROR_INCORRECT_VERSION;
+ goto error;
}
break;
case ERL_DRV_EXTENDED_MARKER:
if (ERL_DRV_EXTENDED_MAJOR_VERSION != dp->major_version
|| ERL_DRV_EXTENDED_MINOR_VERSION < dp->minor_version) {
/* Incompatible driver version */
- return ERL_DE_LOAD_ERROR_INCORRECT_VERSION;
+ res = ERL_DE_LOAD_ERROR_INCORRECT_VERSION;
+ goto error;
}
break;
default:
/* Old driver; needs to be recompiled... */
- return ERL_DE_LOAD_ERROR_INCORRECT_VERSION;
+ res = ERL_DE_LOAD_ERROR_INCORRECT_VERSION;
+ goto error;
}
if (strcmp(name, dp->driver_name) != 0) {
- erts_sys_ddll_close(dh->handle);
- return ERL_DE_LOAD_ERROR_BAD_NAME;
+ res = ERL_DE_LOAD_ERROR_BAD_NAME;
+ goto error;
}
erts_smp_atomic_init_nob(&(dh->refc), (erts_aint_t) 0);
dh->port_count = 0;
@@ -1615,11 +1629,14 @@ static int do_load_driver_entry(DE_Handle *dh, char *path, char *name)
*/
erts_free(ERTS_ALC_T_DDLL_HANDLE, dh->full_path);
dh->full_path = NULL;
- erts_sys_ddll_close(dh->handle);
- return ERL_DE_LOAD_ERROR_FAILED_INIT;
+ res = ERL_DE_LOAD_ERROR_FAILED_INIT;
+ goto error;
}
-
return ERL_DE_NO_ERROR;
+
+error:
+ erts_sys_ddll_close(dh->handle);
+ return res;
}
static int do_unload_driver_entry(DE_Handle *dh, Eterm *save_name)
diff --git a/erts/emulator/beam/erl_bif_info.c b/erts/emulator/beam/erl_bif_info.c
index 6a74596f76..a79feaebdb 100644
--- a/erts/emulator/beam/erl_bif_info.c
+++ b/erts/emulator/beam/erl_bif_info.c
@@ -39,6 +39,8 @@
#include "dist.h"
#include "erl_gc.h"
#include "erl_cpu_topology.h"
+#include "erl_async.h"
+#include "erl_thr_progress.h"
#ifdef HIPE
#include "hipe_arch.h"
#endif
@@ -52,6 +54,9 @@
#include <valgrind/memcheck.h>
#endif
+static Export* alloc_info_trap = NULL;
+static Export* alloc_sizes_trap = NULL;
+
#define DECL_AM(S) Eterm AM_ ## S = am_atom_put(#S, sizeof(#S) - 1)
/* Keep erts_system_version as a global variable for easy access from a core */
@@ -119,6 +124,16 @@ static char erts_system_version[] = ("Erlang " ERLANG_OTP_RELEASE
# define PERFMON_GETPCR _IOR('P', 2, unsigned long long)
#endif
+/* Cached, pre-built {OsType,OsFlavor} and {Major,Minor,Build} tuples */
+static Eterm os_type_tuple;
+static Eterm os_version_tuple;
+
+static BIF_RETTYPE port_info(Process* p, Eterm portid, Eterm item);
+
+static Eterm
+current_function(Process* p, Process* rp, Eterm** hpp, int full_info);
+static Eterm current_stacktrace(Process* p, Process* rp, Eterm** hpp);
+
static Eterm
bld_bin_list(Uint **hpp, Uint *szp, ErlOffHeap* oh)
{
@@ -554,6 +569,8 @@ static Eterm pi_args[] = {
am_suspending,
am_min_heap_size,
am_min_bin_vheap_size,
+ am_current_location,
+ am_current_stacktrace,
#ifdef HYBRID
am_message_binary
#endif
@@ -602,8 +619,10 @@ pi_arg2ix(Eterm arg)
case am_suspending: return 26;
case am_min_heap_size: return 27;
case am_min_bin_vheap_size: return 28;
+ case am_current_location: return 29;
+ case am_current_stacktrace: return 30;
#ifdef HYBRID
- case am_message_binary: return 29;
+ case am_message_binary: return 31;
#endif
default: return -1;
}
@@ -1006,35 +1025,15 @@ process_info_aux(Process *BIF_P,
break;
case am_current_function:
- if (rp->current == NULL) {
- rp->current = find_function_from_pc(rp->i);
- }
- if (rp->current == NULL) {
- hp = HAlloc(BIF_P, 3);
- res = am_undefined;
- } else {
- BeamInstr* current;
-
- if (rp->current[0] == am_erlang &&
- rp->current[1] == am_process_info &&
- (rp->current[2] == 1 || rp->current[2] == 2) &&
- (current = find_function_from_pc(rp->cp)) != NULL) {
-
- /*
- * The current function is erlang:process_info/2,
- * which is not the answer that the application want.
- * We will use the function pointed into by rp->cp
- * instead.
- */
+ res = current_function(BIF_P, rp, &hp, 0);
+ break;
- rp->current = current;
- }
+ case am_current_location:
+ res = current_function(BIF_P, rp, &hp, 1);
+ break;
- hp = HAlloc(BIF_P, 3+4);
- res = TUPLE3(hp, rp->current[0],
- rp->current[1], make_small(rp->current[2]));
- hp += 4;
- }
+ case am_current_stacktrace:
+ res = current_stacktrace(BIF_P, rp, &hp);
break;
case am_initial_call:
@@ -1608,6 +1607,113 @@ process_info_aux(Process *BIF_P,
}
#undef MI_INC
+static Eterm
+current_function(Process* BIF_P, Process* rp, Eterm** hpp, int full_info)
+{
+ Eterm* hp;
+ Eterm res;
+ FunctionInfo fi;
+
+ if (rp->current == NULL) {
+ erts_lookup_function_info(&fi, rp->i, full_info);
+ rp->current = fi.current;
+ } else if (full_info) {
+ erts_lookup_function_info(&fi, rp->i, full_info);
+ if (fi.current == NULL) {
+ /* Use the current function without location info */
+ erts_set_current_function(&fi, rp->current);
+ }
+ }
+
+ if (BIF_P->id == rp->id) {
+ FunctionInfo fi2;
+
+ /*
+ * The current function is erlang:process_info/{1,2},
+ * which is not the answer that the application want.
+ * We will use the function pointed into by rp->cp
+ * instead if it can be looked up.
+ */
+ erts_lookup_function_info(&fi2, rp->cp, full_info);
+ if (fi2.current) {
+ fi = fi2;
+ rp->current = fi2.current;
+ }
+ }
+
+ /*
+ * Return the result.
+ */
+ if (rp->current == NULL) {
+ hp = HAlloc(BIF_P, 3);
+ res = am_undefined;
+ } else if (full_info) {
+ hp = HAlloc(BIF_P, 3+fi.needed);
+ hp = erts_build_mfa_item(&fi, hp, am_true, &res);
+ } else {
+ hp = HAlloc(BIF_P, 3+4);
+ res = TUPLE3(hp, rp->current[0],
+ rp->current[1], make_small(rp->current[2]));
+ hp += 4;
+ }
+ *hpp = hp;
+ return res;
+}
+
+static Eterm
+current_stacktrace(Process* p, Process* rp, Eterm** hpp)
+{
+ Uint sz;
+ struct StackTrace* s;
+ int depth;
+ FunctionInfo* stk;
+ FunctionInfo* stkp;
+ Uint heap_size;
+ int i;
+ Eterm* hp = *hpp;
+ Eterm mfa;
+ Eterm res = NIL;
+
+ depth = 8;
+ sz = offsetof(struct StackTrace, trace) + sizeof(BeamInstr *)*depth;
+ s = (struct StackTrace *) erts_alloc(ERTS_ALC_T_TMP, sz);
+ s->depth = 0;
+ if (rp->i) {
+ s->trace[s->depth++] = rp->i;
+ depth--;
+ }
+ if (depth > 0 && rp->cp != 0) {
+ s->trace[s->depth++] = rp->cp - 1;
+ depth--;
+ }
+ erts_save_stacktrace(rp, s, depth);
+
+ depth = s->depth;
+ stk = stkp = (FunctionInfo *) erts_alloc(ERTS_ALC_T_TMP,
+ depth*sizeof(FunctionInfo));
+ heap_size = 3;
+ for (i = 0; i < depth; i++) {
+ erts_lookup_function_info(stkp, s->trace[i], 1);
+ if (stkp->current) {
+ heap_size += stkp->needed + 2;
+ stkp++;
+ }
+ }
+
+ hp = HAlloc(p, heap_size);
+ while (stkp > stk) {
+ stkp--;
+ hp = erts_build_mfa_item(stkp, hp, am_true, &mfa);
+ res = CONS(hp, mfa, res);
+ hp += 2;
+ }
+
+ erts_free(ERTS_ALC_T_TMP, stk);
+ erts_free(ERTS_ALC_T_TMP, s);
+ *hpp = hp;
+ return res;
+}
+
#if defined(VALGRIND)
static int check_if_xml(void)
{
@@ -1633,9 +1739,19 @@ info_1_tuple(Process* BIF_P, /* Pointer to current process. */
sel = *tp++;
- if (sel == am_allocator_sizes && arity == 2) {
- return erts_allocator_info_term(BIF_P, *tp, 1);
- } else if (sel == am_wordsize && arity == 2) {
+ if (sel == am_allocator_sizes) {
+ switch (arity) {
+ case 2:
+ ERTS_BIF_PREP_TRAP1(ret, alloc_sizes_trap, BIF_P, *tp);
+ return ret;
+ case 3:
+ if (erts_request_alloc_info(BIF_P, tp[0], tp[1], 1))
+ return am_true;
+ default:
+ goto badarg;
+ }
+ }
+ else if (sel == am_wordsize && arity == 2) {
if (tp[0] == am_internal) {
return make_small(sizeof(Eterm));
}
@@ -1682,8 +1798,17 @@ info_1_tuple(Process* BIF_P, /* Pointer to current process. */
}
else
goto badarg;
- } else if (sel == am_allocator && arity == 2) {
- return erts_allocator_info_term(BIF_P, *tp, 0);
+ } else if (sel == am_allocator) {
+ switch (arity) {
+ case 2:
+ ERTS_BIF_PREP_TRAP1(ret, alloc_info_trap, BIF_P, *tp);
+ return ret;
+ case 3:
+ if (erts_request_alloc_info(BIF_P, tp[0], tp[1], 0))
+ return am_true;
+ default:
+ goto badarg;
+ }
} else if (ERTS_IS_ATOM_STR("internal_cpu_topology", sel) && arity == 2) {
return erts_get_cpu_topology_term(BIF_P, *tp);
} else if (ERTS_IS_ATOM_STR("cpu_topology", sel) && arity == 2) {
@@ -2005,7 +2130,7 @@ BIF_RETTYPE system_info_1(BIF_ALIST_1)
BIF_RET(am_undefined);
#endif
} else if (BIF_ARG_1 == am_trace_control_word) {
- BIF_RET(db_get_trace_control_word_0(BIF_P));
+ BIF_RET(db_get_trace_control_word(BIF_P));
} else if (ERTS_IS_ATOM_STR("ets_realloc_moves", BIF_ARG_1)) {
BIF_RET((erts_ets_realloc_always_moves) ? am_true : am_false);
} else if (ERTS_IS_ATOM_STR("ets_always_compress", BIF_ARG_1)) {
@@ -2065,7 +2190,7 @@ BIF_RETTYPE system_info_1(BIF_ALIST_1)
/* Need to be the only thread running... */
erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
- erts_smp_block_system(0);
+ erts_smp_thr_progress_block();
if (BIF_ARG_1 == am_info)
info(ERTS_PRINT_DSBUF, (void *) dsbufp);
@@ -2076,7 +2201,7 @@ BIF_RETTYPE system_info_1(BIF_ALIST_1)
else
distribution_info(ERTS_PRINT_DSBUF, (void *) dsbufp);
- erts_smp_release_system();
+ erts_smp_thr_progress_unblock();
erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
ASSERT(dsbufp && dsbufp->str);
@@ -2088,7 +2213,7 @@ BIF_RETTYPE system_info_1(BIF_ALIST_1)
i = 0;
/* Need to be the only thread running... */
erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
- erts_smp_block_system(0);
+ erts_smp_thr_progress_block();
for (dep = erts_visible_dist_entries; dep; dep = dep->next)
++i;
for (dep = erts_hidden_dist_entries; dep; dep = dep->next)
@@ -2111,7 +2236,7 @@ BIF_RETTYPE system_info_1(BIF_ALIST_1)
res = CONS(hp, tpl, res);
hp += 2;
}
- erts_smp_release_system();
+ erts_smp_thr_progress_unblock();
erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
BIF_RET(res);
} else if (BIF_ARG_1 == am_system_version) {
@@ -2132,16 +2257,7 @@ BIF_RETTYPE system_info_1(BIF_ALIST_1)
return erts_instr_get_type_info(BIF_P);
}
else if (BIF_ARG_1 == am_os_type) {
- Eterm type = am_atom_put(os_type, strlen(os_type));
- Eterm flav, tup;
- char *buf = erts_alloc(ERTS_ALC_T_TMP, 1024); /* More than enough */
-
- os_flavor(buf, 1024);
- flav = am_atom_put(buf, strlen(buf));
- hp = HAlloc(BIF_P, 3);
- tup = TUPLE2(hp, type, flav);
- erts_free(ERTS_ALC_T_TMP, (void *) buf);
- BIF_RET(tup);
+ BIF_RET(os_type_tuple);
}
else if (BIF_ARG_1 == am_allocator) {
BIF_RET(erts_allocator_options((void *) BIF_P));
@@ -2167,16 +2283,7 @@ BIF_RETTYPE system_info_1(BIF_ALIST_1)
BIF_RET(am_false);
}
else if (BIF_ARG_1 == am_os_version) {
- int major, minor, build;
- Eterm tup;
-
- os_version(&major, &minor, &build);
- hp = HAlloc(BIF_P, 4);
- tup = TUPLE3(hp,
- make_small(major),
- make_small(minor),
- make_small(build));
- BIF_RET(tup);
+ BIF_RET(os_version_tuple);
}
else if (BIF_ARG_1 == am_version) {
int n = strlen(ERLANG_VERSION);
@@ -2546,8 +2653,12 @@ BIF_RETTYPE system_info_1(BIF_ALIST_1)
res = erts_bld_uint(&hp, NULL, erts_dist_buf_busy_limit);
BIF_RET(res);
} else if (ERTS_IS_ATOM_STR("print_ethread_info", BIF_ARG_1)) {
+#if defined(ETHR_NATIVE_ATOMIC32_IMPL) \
+ || defined(ETHR_NATIVE_ATOMIC64_IMPL) \
+ || defined(ETHR_NATIVE_DW_ATOMIC_IMPL)
int i;
char **str;
+#endif
#ifdef ETHR_NATIVE_ATOMIC32_IMPL
erts_printf("32-bit native atomics: %s\n",
ETHR_NATIVE_ATOMIC32_IMPL);
@@ -2610,13 +2721,21 @@ BIF_RETTYPE system_info_1(BIF_ALIST_1)
#endif
BIF_RET(am_true);
}
+#ifdef ERTS_SMP
+ else if (ERTS_IS_ATOM_STR("thread_progress", BIF_ARG_1)) {
+ erts_thr_progress_dbg_print_state();
+ BIF_RET(am_true);
+ }
+#endif
BIF_ERROR(BIF_P, BADARG);
}
-Eterm
-port_info_1(Process* p, Eterm pid)
+BIF_RETTYPE
+port_info_1(BIF_ALIST_1)
{
+ Process* p = BIF_P;
+ Eterm pid = BIF_ARG_1;
static Eterm keys[] = {
am_name,
am_links,
@@ -2639,7 +2758,7 @@ port_info_1(Process* p, Eterm pid)
for (i = 0; i < ASIZE(keys); i++) {
Eterm item;
- item = port_info_2(p, pid, keys[i]);
+ item = port_info(p, pid, keys[i]);
if (is_non_value(item)) {
return THE_NON_VALUE;
}
@@ -2648,7 +2767,7 @@ port_info_1(Process* p, Eterm pid)
}
items[i] = item;
}
- reg_name = port_info_2(p, pid, am_registered_name);
+ reg_name = port_info(p, pid, am_registered_name);
/*
* Build the resulting list.
@@ -2684,24 +2803,27 @@ port_info_1(Process* p, Eterm pid)
BIF_RETTYPE port_info_2(BIF_ALIST_2)
{
+ return port_info(BIF_P, BIF_ARG_1, BIF_ARG_2);
+}
+
+static BIF_RETTYPE port_info(Process* p, Eterm portid, Eterm item)
+{
BIF_RETTYPE ret;
- Eterm portid = BIF_ARG_1;
Port *prt;
- Eterm item = BIF_ARG_2;
Eterm res;
Eterm* hp;
int count;
if (is_internal_port(portid))
- prt = erts_id2port(portid, BIF_P, ERTS_PROC_LOCK_MAIN);
+ prt = erts_id2port(portid, p, ERTS_PROC_LOCK_MAIN);
else if (is_atom(portid))
- erts_whereis_name(BIF_P, ERTS_PROC_LOCK_MAIN,
+ erts_whereis_name(p, ERTS_PROC_LOCK_MAIN,
portid, NULL, 0, 0, &prt);
else if (is_external_port(portid)
&& external_port_dist_entry(portid) == erts_this_dist_entry)
BIF_RET(am_undefined);
else {
- BIF_ERROR(BIF_P, BADARG);
+ BIF_ERROR(p, BADARG);
}
if (!prt) {
@@ -2709,7 +2831,7 @@ BIF_RETTYPE port_info_2(BIF_ALIST_2)
}
if (item == am_id) {
- hp = HAlloc(BIF_P, 3);
+ hp = HAlloc(p, 3);
res = make_small(internal_port_number(portid));
}
else if (item == am_links) {
@@ -2721,10 +2843,10 @@ BIF_RETTYPE port_info_2(BIF_ALIST_2)
erts_doforall_links(prt->nlinks, &collect_one_link, &mic);
- hp = HAlloc(BIF_P, 3 + mic.sz);
+ hp = HAlloc(p, 3 + mic.sz);
res = NIL;
for (i = 0; i < mic.mi_i; i++) {
- item = STORE_NC(&hp, &MSO(BIF_P), mic.mi[i].entity);
+ item = STORE_NC(&hp, &MSO(p), mic.mi[i].entity);
res = CONS(hp, item, res);
hp += 2;
}
@@ -2740,11 +2862,11 @@ BIF_RETTYPE port_info_2(BIF_ALIST_2)
erts_doforall_monitors(prt->monitors, &collect_one_origin_monitor, &mic);
- hp = HAlloc(BIF_P, 3 + mic.sz);
+ hp = HAlloc(p, 3 + mic.sz);
res = NIL;
for (i = 0; i < mic.mi_i; i++) {
Eterm t;
- item = STORE_NC(&hp, &MSO(BIF_P), mic.mi[i].entity);
+ item = STORE_NC(&hp, &MSO(p), mic.mi[i].entity);
t = TUPLE2(hp, am_process, item);
hp += 3;
res = CONS(hp, t, res);
@@ -2756,25 +2878,25 @@ BIF_RETTYPE port_info_2(BIF_ALIST_2)
else if (item == am_name) {
count = sys_strlen(prt->name);
- hp = HAlloc(BIF_P, 3 + 2*count);
+ hp = HAlloc(p, 3 + 2*count);
res = buf_to_intlist(&hp, prt->name, count, NIL);
}
else if (item == am_connected) {
- hp = HAlloc(BIF_P, 3);
+ hp = HAlloc(p, 3);
res = prt->connected; /* internal pid */
}
else if (item == am_input) {
Uint hsz = 3;
Uint n = prt->bytes_in;
(void) erts_bld_uint(NULL, &hsz, n);
- hp = HAlloc(BIF_P, hsz);
+ hp = HAlloc(p, hsz);
res = erts_bld_uint(&hp, NULL, n);
}
else if (item == am_output) {
Uint hsz = 3;
Uint n = prt->bytes_out;
(void) erts_bld_uint(NULL, &hsz, n);
- hp = HAlloc(BIF_P, hsz);
+ hp = HAlloc(p, hsz);
res = erts_bld_uint(&hp, NULL, n);
}
else if (item == am_registered_name) {
@@ -2784,7 +2906,7 @@ BIF_RETTYPE port_info_2(BIF_ALIST_2)
ERTS_BIF_PREP_RET(ret, NIL);
goto done;
} else {
- hp = HAlloc(BIF_P, 3);
+ hp = HAlloc(p, 3);
res = reg->name;
}
}
@@ -2796,7 +2918,7 @@ BIF_RETTYPE port_info_2(BIF_ALIST_2)
Uint size = 0;
ErlHeapFragment* bp;
- hp = HAlloc(BIF_P, 3);
+ hp = HAlloc(p, 3);
erts_doforall_links(prt->nlinks, &one_link_size, &size);
@@ -2813,18 +2935,18 @@ BIF_RETTYPE port_info_2(BIF_ALIST_2)
hard to retrieve... */
(void) erts_bld_uint(NULL, &hsz, size);
- hp = HAlloc(BIF_P, hsz);
+ hp = HAlloc(p, hsz);
res = erts_bld_uint(&hp, NULL, size);
}
else if (item == am_queue_size) {
Uint ioq_size = erts_port_ioq_size(prt);
Uint hsz = 3;
(void) erts_bld_uint(NULL, &hsz, ioq_size);
- hp = HAlloc(BIF_P, hsz);
+ hp = HAlloc(p, hsz);
res = erts_bld_uint(&hp, NULL, ioq_size);
}
else if (ERTS_IS_ATOM_STR("locking", item)) {
- hp = HAlloc(BIF_P, 3);
+ hp = HAlloc(p, 3);
#ifndef ERTS_SMP
res = am_false;
#else
@@ -2843,7 +2965,7 @@ BIF_RETTYPE port_info_2(BIF_ALIST_2)
#endif
}
else {
- ERTS_BIF_PREP_ERROR(ret, BIF_P, BADARG);
+ ERTS_BIF_PREP_ERROR(ret, p, BADARG);
goto done;
}
@@ -2857,9 +2979,12 @@ BIF_RETTYPE port_info_2(BIF_ALIST_2)
}
-Eterm
-fun_info_2(Process* p, Eterm fun, Eterm what)
+BIF_RETTYPE
+fun_info_2(BIF_ALIST_2)
{
+ Process* p = BIF_P;
+ Eterm fun = BIF_ARG_1;
+ Eterm what = BIF_ARG_2;
Eterm* hp;
Eterm val;
@@ -3170,26 +3295,6 @@ BIF_RETTYPE statistics_1(BIF_ALIST_1)
BIF_ERROR(BIF_P, BADARG);
}
-BIF_RETTYPE memory_0(BIF_ALIST_0)
-{
- BIF_RETTYPE res = erts_memory(NULL, NULL, BIF_P, THE_NON_VALUE);
- switch (res) {
- case am_badarg: BIF_ERROR(BIF_P, EXC_INTERNAL_ERROR); /* never... */
- case am_notsup: BIF_ERROR(BIF_P, EXC_NOTSUP);
- default: BIF_RET(res);
- }
-}
-
-BIF_RETTYPE memory_1(BIF_ALIST_1)
-{
- BIF_RETTYPE res = erts_memory(NULL, NULL, BIF_P, BIF_ARG_1);
- switch (res) {
- case am_badarg: BIF_ERROR(BIF_P, BADARG);
- case am_notsup: BIF_ERROR(BIF_P, EXC_NOTSUP);
- default: BIF_RET(res);
- }
-}
-
BIF_RETTYPE error_logger_warning_map_0(BIF_ALIST_0)
{
BIF_RET(erts_error_logger_warnings);
@@ -3291,6 +3396,15 @@ BIF_RETTYPE erts_debug_get_internal_state_1(BIF_ALIST_1)
BIF_RET(am_false);
#endif
}
+ else if (ERTS_IS_ATOM_STR("memory", BIF_ARG_1)) {
+ Eterm res;
+ erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
+ erts_smp_thr_progress_block();
+ erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
+ res = erts_memory(NULL, NULL, BIF_P, THE_NON_VALUE);
+ erts_smp_thr_progress_unblock();
+ BIF_RET(res);
+ }
}
else if (is_tuple(BIF_ARG_1)) {
Eterm* tp = tuple_val(BIF_ARG_1);
@@ -3493,6 +3607,7 @@ BIF_RETTYPE erts_debug_get_internal_state_1(BIF_ALIST_1)
static erts_smp_atomic_t hipe_test_reschedule_flag;
+
BIF_RETTYPE erts_debug_set_internal_state_2(BIF_ALIST_2)
{
/*
@@ -3543,10 +3658,10 @@ BIF_RETTYPE erts_debug_set_internal_state_2(BIF_ALIST_2)
if (ms > 0) {
erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
if (block)
- erts_smp_block_system(0);
+ erts_smp_thr_progress_block();
while (erts_milli_sleep((long) ms) != 0);
if (block)
- erts_smp_release_system();
+ erts_smp_thr_progress_unblock();
erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
}
BIF_RET(am_true);
@@ -3756,16 +3871,23 @@ BIF_RETTYPE erts_debug_set_internal_state_2(BIF_ALIST_2)
}
erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
- erts_smp_block_system(0);
+ erts_smp_thr_progress_block();
old_use_opt = !erts_disable_proc_not_running_opt;
erts_disable_proc_not_running_opt = !use_opt;
- erts_smp_release_system();
+ erts_smp_thr_progress_unblock();
erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
BIF_RET(old_use_opt ? am_true : am_false);
#else
BIF_ERROR(BIF_P, EXC_NOTSUP);
#endif
}
+ else if (ERTS_IS_ATOM_STR("wait", BIF_ARG_1)) {
+ if (ERTS_IS_ATOM_STR("deallocations", BIF_ARG_2)) {
+ if (erts_debug_wait_deallocations(BIF_P)) {
+ ERTS_BIF_YIELD_RETURN(BIF_P, am_ok);
+ }
+ }
+ }
}
BIF_ERROR(BIF_P, BADARG);
@@ -3924,7 +4046,7 @@ BIF_RETTYPE erts_debug_lock_counters_1(BIF_ALIST_1)
Eterm* hp;
erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
- erts_smp_block_system(0);
+ erts_smp_thr_progress_block();
erts_lcnt_set_rt_opt(ERTS_LCNT_OPT_SUSPEND);
data = erts_lcnt_get_data();
@@ -3942,17 +4064,17 @@ BIF_RETTYPE erts_debug_lock_counters_1(BIF_ALIST_1)
erts_lcnt_clear_rt_opt(ERTS_LCNT_OPT_SUSPEND);
- erts_smp_release_system();
+ erts_smp_thr_progress_unblock();
erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
BIF_RET(res);
} else if (BIF_ARG_1 == am_clear) {
erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
- erts_smp_block_system(0);
+ erts_smp_thr_progress_block();
erts_lcnt_clear_counters();
- erts_smp_release_system();
+ erts_smp_thr_progress_unblock();
erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
BIF_RET(am_ok);
@@ -3963,7 +4085,7 @@ BIF_RETTYPE erts_debug_lock_counters_1(BIF_ALIST_1)
case 2:
if (ERTS_IS_ATOM_STR("copy_save", tp[1])) {
erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
- erts_smp_block_system(0);
+ erts_smp_thr_progress_block();
if (tp[2] == am_true) {
res = erts_lcnt_set_rt_opt(ERTS_LCNT_OPT_COPYSAVE) ? am_true : am_false;
@@ -3973,17 +4095,17 @@ BIF_RETTYPE erts_debug_lock_counters_1(BIF_ALIST_1)
res = erts_lcnt_clear_rt_opt(ERTS_LCNT_OPT_COPYSAVE) ? am_true : am_false;
} else {
- erts_smp_release_system();
+ erts_smp_thr_progress_unblock();
erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
BIF_ERROR(BIF_P, BADARG);
}
- erts_smp_release_system();
+ erts_smp_thr_progress_unblock();
erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
BIF_RET(res);
} else if (ERTS_IS_ATOM_STR("process_locks", tp[1])) {
erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
- erts_smp_block_system(0);
+ erts_smp_thr_progress_block();
if (tp[2] == am_true) {
res = erts_lcnt_set_rt_opt(ERTS_LCNT_OPT_PROCLOCK) ? am_true : am_false;
@@ -3993,11 +4115,11 @@ BIF_RETTYPE erts_debug_lock_counters_1(BIF_ALIST_1)
res = erts_lcnt_set_rt_opt(ERTS_LCNT_OPT_PROCLOCK) ? am_true : am_false;
} else {
- erts_smp_release_system();
+ erts_smp_thr_progress_unblock();
erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
BIF_ERROR(BIF_P, BADARG);
}
- erts_smp_release_system();
+ erts_smp_thr_progress_unblock();
erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
BIF_RET(res);
}
@@ -4012,11 +4134,35 @@ BIF_RETTYPE erts_debug_lock_counters_1(BIF_ALIST_1)
BIF_ERROR(BIF_P, BADARG);
}
+static void os_info_init(void)
+{
+ Eterm type = am_atom_put(os_type, strlen(os_type));
+ Eterm flav;
+ int major, minor, build;
+ char* buf = erts_alloc(ERTS_ALC_T_TMP, 1024); /* More than enough */
+ Eterm* hp;
+
+ os_flavor(buf, 1024);
+ flav = am_atom_put(buf, strlen(buf));
+ erts_free(ERTS_ALC_T_TMP, (void *) buf);
+ hp = erts_alloc(ERTS_ALC_T_LL_TEMP_TERM, (3+4)*sizeof(Eterm));
+ os_type_tuple = TUPLE2(hp, type, flav);
+ hp += 3;
+ os_version(&major, &minor, &build);
+ os_version_tuple = TUPLE3(hp,
+ make_small(major),
+ make_small(minor),
+ make_small(build));
+}
+
void
erts_bif_info_init(void)
{
erts_smp_atomic_init_nob(&available_internal_state, 0);
erts_smp_atomic_init_nob(&hipe_test_reschedule_flag, 0);
+ alloc_info_trap = erts_export_put(am_erlang, am_alloc_info, 1);
+ alloc_sizes_trap = erts_export_put(am_erlang, am_alloc_sizes, 1);
process_info_init();
+ os_info_init();
}
diff --git a/erts/emulator/beam/erl_bif_lists.c b/erts/emulator/beam/erl_bif_lists.c
index 47c48e74d6..1805366cfe 100644
--- a/erts/emulator/beam/erl_bif_lists.c
+++ b/erts/emulator/beam/erl_bif_lists.c
@@ -34,27 +34,7 @@
static Eterm keyfind(int Bif, Process* p, Eterm Key, Eterm Pos, Eterm List);
-/*
- * erlang:'++'/2
- */
-
-Eterm
-ebif_plusplus_2(Process* p, Eterm A, Eterm B)
-{
- return append_2(p, A, B);
-}
-
-/*
- * erlang:'--'/2
- */
-
-Eterm
-ebif_minusminus_2(Process* p, Eterm A, Eterm B)
-{
- return subtract_2(p, A, B);
-}
-
-BIF_RETTYPE append_2(BIF_ALIST_2)
+static BIF_RETTYPE append(Process* p, Eterm A, Eterm B)
{
Eterm list;
Eterm copy;
@@ -63,18 +43,18 @@ BIF_RETTYPE append_2(BIF_ALIST_2)
Eterm* hp;
int i;
- if ((i = list_length(BIF_ARG_1)) < 0) {
- BIF_ERROR(BIF_P, BADARG);
+ if ((i = list_length(A)) < 0) {
+ BIF_ERROR(p, BADARG);
}
if (i == 0) {
- BIF_RET(BIF_ARG_2);
- } else if (is_nil(BIF_ARG_2)) {
- BIF_RET(BIF_ARG_1);
+ BIF_RET(B);
+ } else if (is_nil(B)) {
+ BIF_RET(A);
}
need = 2*i;
- hp = HAlloc(BIF_P, need);
- list = BIF_ARG_1;
+ hp = HAlloc(p, need);
+ list = A;
copy = last = CONS(hp, CAR(list_val(list)), make_list(hp+2));
list = CDR(list_val(list));
hp += 2;
@@ -85,12 +65,31 @@ BIF_RETTYPE append_2(BIF_ALIST_2)
list = CDR(listp);
hp += 2;
}
- CDR(list_val(last)) = BIF_ARG_2;
+ CDR(list_val(last)) = B;
BIF_RET(copy);
}
+/*
+ * erlang:'++'/2
+ */
+
+Eterm
+ebif_plusplus_2(BIF_ALIST_2)
+{
+ return append(BIF_P, BIF_ARG_1, BIF_ARG_2);
+}
+
+BIF_RETTYPE append_2(BIF_ALIST_2)
+{
+ return append(BIF_P, BIF_ARG_1, BIF_ARG_2);
+}
+
+/*
+ * erlang:'--'/2
+ */
+
#define SMALL_VEC_SIZE 10
-BIF_RETTYPE subtract_2(BIF_ALIST_2)
+static Eterm subtract(Process* p, Eterm A, Eterm B)
{
Eterm list;
Eterm* hp;
@@ -103,17 +102,17 @@ BIF_RETTYPE subtract_2(BIF_ALIST_2)
int n;
int m;
- if ((n = list_length(BIF_ARG_1)) < 0) {
- BIF_ERROR(BIF_P, BADARG);
+ if ((n = list_length(A)) < 0) {
+ BIF_ERROR(p, BADARG);
}
- if ((m = list_length(BIF_ARG_2)) < 0) {
- BIF_ERROR(BIF_P, BADARG);
+ if ((m = list_length(B)) < 0) {
+ BIF_ERROR(p, BADARG);
}
if (n == 0)
BIF_RET(NIL);
if (m == 0)
- BIF_RET(BIF_ARG_1);
+ BIF_RET(A);
/* allocate element vector */
if (n <= SMALL_VEC_SIZE)
@@ -123,7 +122,7 @@ BIF_RETTYPE subtract_2(BIF_ALIST_2)
/* PUT ALL ELEMENTS IN VP */
vp = vec_p;
- list = BIF_ARG_1;
+ list = A;
i = n;
while(i--) {
Eterm* listp = list_val(list);
@@ -132,7 +131,7 @@ BIF_RETTYPE subtract_2(BIF_ALIST_2)
}
/* UNMARK ALL DELETED CELLS */
- list = BIF_ARG_2;
+ list = B;
m = 0; /* number of deleted elements */
while(is_list(list)) {
Eterm* listp = list_val(list);
@@ -153,11 +152,11 @@ BIF_RETTYPE subtract_2(BIF_ALIST_2)
if (m == n) /* All deleted ? */
res = NIL;
else if (m == 0) /* None deleted ? */
- res = BIF_ARG_1;
+ res = A;
else { /* REBUILD LIST */
res = NIL;
need = 2*(n - m);
- hp = HAlloc(BIF_P, need);
+ hp = HAlloc(p, need);
vp = vec_p + n - 1;
while(vp >= vec_p) {
if (is_value(*vp)) {
@@ -172,6 +171,16 @@ BIF_RETTYPE subtract_2(BIF_ALIST_2)
BIF_RET(res);
}
+BIF_RETTYPE ebif_minusminus_2(BIF_ALIST_2)
+{
+ return subtract(BIF_P, BIF_ARG_1, BIF_ARG_2);
+}
+
+BIF_RETTYPE subtract_2(BIF_ALIST_2)
+{
+ return subtract(BIF_P, BIF_ARG_1, BIF_ARG_2);
+}
+
BIF_RETTYPE lists_member_2(BIF_ALIST_2)
{
Eterm term;
@@ -278,11 +287,12 @@ BIF_RETTYPE lists_reverse_2(BIF_ALIST_2)
}
BIF_RETTYPE
-lists_keymember_3(Process* p, Eterm Key, Eterm Pos, Eterm List)
+lists_keymember_3(BIF_ALIST_3)
{
Eterm res;
- res = keyfind(BIF_lists_keymember_3, p, Key, Pos, List);
+ res = keyfind(BIF_lists_keymember_3, BIF_P,
+ BIF_ARG_1, BIF_ARG_2, BIF_ARG_3);
if (is_value(res) && is_tuple(res)) {
return am_true;
} else {
@@ -291,23 +301,25 @@ lists_keymember_3(Process* p, Eterm Key, Eterm Pos, Eterm List)
}
BIF_RETTYPE
-lists_keysearch_3(Process* p, Eterm Key, Eterm Pos, Eterm List)
+lists_keysearch_3(BIF_ALIST_3)
{
Eterm res;
- res = keyfind(BIF_lists_keysearch_3, p, Key, Pos, List);
+ res = keyfind(BIF_lists_keysearch_3, BIF_P,
+ BIF_ARG_1, BIF_ARG_2, BIF_ARG_3);
if (is_non_value(res) || is_not_tuple(res)) {
return res;
} else { /* Tuple */
- Eterm* hp = HAlloc(p, 3);
+ Eterm* hp = HAlloc(BIF_P, 3);
return TUPLE2(hp, am_value, res);
}
}
BIF_RETTYPE
-lists_keyfind_3(Process* p, Eterm Key, Eterm Pos, Eterm List)
+lists_keyfind_3(BIF_ALIST_3)
{
- return keyfind(BIF_lists_keyfind_3, p, Key, Pos, List);
+ return keyfind(BIF_lists_keyfind_3, BIF_P,
+ BIF_ARG_1, BIF_ARG_2, BIF_ARG_3);
}
static Eterm
diff --git a/erts/emulator/beam/erl_bif_op.c b/erts/emulator/beam/erl_bif_op.c
index deda7adc1f..13f8b1f63c 100644
--- a/erts/emulator/beam/erl_bif_op.c
+++ b/erts/emulator/beam/erl_bif_op.c
@@ -225,18 +225,23 @@ BIF_RETTYPE is_function_1(BIF_ALIST_1)
BIF_RETTYPE is_function_2(BIF_ALIST_2)
{
+ BIF_RET(erl_is_function(BIF_P, BIF_ARG_1, BIF_ARG_2));
+}
+
+Eterm erl_is_function(Process* p, Eterm arg1, Eterm arg2)
+{
Sint arity;
/*
* Verify argument 2 (arity); arity must be >= 0.
*/
- if (is_small(BIF_ARG_2)) {
- arity = signed_val(BIF_ARG_2);
+ if (is_small(arg2)) {
+ arity = signed_val(arg2);
if (arity < 0) {
error:
- BIF_ERROR(BIF_P, BADARG);
+ BIF_ERROR(p, BADARG);
}
- } else if (is_big(BIF_ARG_2) && !bignum_header_is_neg(*big_val(BIF_ARG_2))) {
+ } else if (is_big(arg2) && !bignum_header_is_neg(*big_val(arg2))) {
/* A positive bignum is OK, but can't possibly match. */
arity = -1;
} else {
@@ -244,20 +249,20 @@ BIF_RETTYPE is_function_2(BIF_ALIST_2)
goto error;
}
- if (is_fun(BIF_ARG_1)) {
- ErlFunThing* funp = (ErlFunThing *) fun_val(BIF_ARG_1);
+ if (is_fun(arg1)) {
+ ErlFunThing* funp = (ErlFunThing *) fun_val(arg1);
if (funp->arity == (Uint) arity) {
BIF_RET(am_true);
}
- } else if (is_export(BIF_ARG_1)) {
- Export* exp = (Export *) EXPAND_POINTER((export_val(BIF_ARG_1))[1]);
+ } else if (is_export(arg1)) {
+ Export* exp = (Export *) EXPAND_POINTER((export_val(arg1))[1]);
if (exp->code[2] == (Uint) arity) {
BIF_RET(am_true);
}
- } else if (is_tuple(BIF_ARG_1)) {
- Eterm* tp = tuple_val(BIF_ARG_1);
+ } else if (is_tuple(arg1)) {
+ Eterm* tp = tuple_val(arg1);
if (tp[0] == make_arityval(2) && is_atom(tp[1]) && is_atom(tp[2])) {
BIF_RET(am_true);
}
diff --git a/erts/emulator/beam/erl_bif_os.c b/erts/emulator/beam/erl_bif_os.c
index 954b1f9729..58d48199fa 100644
--- a/erts/emulator/beam/erl_bif_os.c
+++ b/erts/emulator/beam/erl_bif_os.c
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 1999-2009. All Rights Reserved.
+ * Copyright Ericsson AB 1999-2010. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
@@ -53,20 +53,18 @@ BIF_RETTYPE os_timestamp_0(BIF_ALIST_0)
}
-Eterm
-os_getpid_0(Process* p)
+BIF_RETTYPE os_getpid_0(BIF_ALIST_0)
{
char pid_string[21]; /* enough for a 64 bit number */
int n;
Eterm* hp;
sys_get_pid(pid_string); /* In sys.c */
n = sys_strlen(pid_string);
- hp = HAlloc(p, n*2);
+ hp = HAlloc(BIF_P, n*2);
BIF_RET(buf_to_intlist(&hp, pid_string, n, NIL));
}
-Eterm
-os_getenv_0(Process* p)
+BIF_RETTYPE os_getenv_0(BIF_ALIST_0)
{
GETENV_STATE state;
char *cp;
@@ -80,7 +78,7 @@ os_getenv_0(Process* p)
ret = NIL;
while ((cp = getenv_string(&state)) != NULL) {
len = strlen(cp);
- hp = HAlloc(p, len*2+2);
+ hp = HAlloc(BIF_P, len*2+2);
str = buf_to_intlist(&hp, cp, len, NIL);
ret = CONS(hp, str, ret);
}
@@ -90,9 +88,11 @@ os_getenv_0(Process* p)
return ret;
}
-Eterm
-os_getenv_1(Process* p, Eterm key)
+
+BIF_RETTYPE os_getenv_1(BIF_ALIST_1)
{
+ Process* p = BIF_P;
+ Eterm key = BIF_ARG_1;
Eterm str;
int len, res;
char *key_str, *val;
@@ -145,9 +145,11 @@ os_getenv_1(Process* p, Eterm key)
BIF_RET(str);
}
-Eterm
-os_putenv_2(Process* p, Eterm key, Eterm value)
+BIF_RETTYPE os_putenv_2(BIF_ALIST_2)
{
+ Process* p = BIF_P;
+ Eterm key = BIF_ARG_1;
+ Eterm value = BIF_ARG_2;
char def_buf[1024];
char *buf = NULL;
int sep_ix, i, key_len, value_len, tot_len;
diff --git a/erts/emulator/beam/erl_bif_port.c b/erts/emulator/beam/erl_bif_port.c
index 3fd35dd963..6b8f1b21fd 100644
--- a/erts/emulator/beam/erl_bif_port.c
+++ b/erts/emulator/beam/erl_bif_port.c
@@ -48,6 +48,9 @@ static void free_args(char **);
char *erts_default_arg0 = "default";
+static BIF_RETTYPE
+port_call(Process* p, Eterm arg1, Eterm arg2, Eterm arg3);
+
BIF_RETTYPE open_port_2(BIF_ALIST_2)
{
int port_num;
@@ -117,11 +120,9 @@ id_or_name2port(Process *c_p, Eterm id)
#define ERTS_PORT_COMMAND_FLAG_FORCE (((Uint32) 1) << 0)
#define ERTS_PORT_COMMAND_FLAG_NOSUSPEND (((Uint32) 1) << 1)
-static BIF_RETTYPE do_port_command(Process *BIF_P,
- Eterm BIF_ARG_1,
- Eterm BIF_ARG_2,
- Eterm BIF_ARG_3,
- Uint32 flags)
+static BIF_RETTYPE
+do_port_command(Process *BIF_P, Eterm arg1, Eterm arg2, Eterm arg3,
+ Uint32 flags)
{
BIF_RETTYPE res;
Port *p;
@@ -135,7 +136,7 @@ static BIF_RETTYPE do_port_command(Process *BIF_P,
profile_runnable_proc(BIF_P, am_inactive);
}
- p = id_or_name2port(BIF_P, BIF_ARG_1);
+ p = id_or_name2port(BIF_P, arg1);
if (!p) {
if (IS_TRACED_FL(BIF_P, F_TRACE_SCHED_PROCS)) {
trace_virtual_sched(BIF_P, am_in);
@@ -172,13 +173,13 @@ static BIF_RETTYPE do_port_command(Process *BIF_P,
monitor_generic(BIF_P, am_busy_port, p->id);
}
ERTS_BIF_PREP_YIELD3(res, bif_export[BIF_port_command_3], BIF_P,
- BIF_ARG_1, BIF_ARG_2, BIF_ARG_3);
+ arg1, arg2, arg3);
}
} else {
int wres;
erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
ERTS_SMP_CHK_NO_PROC_LOCKS;
- wres = erts_write_to_port(BIF_P->id, p, BIF_ARG_2);
+ wres = erts_write_to_port(BIF_P->id, p, arg2);
erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
if (wres != 0) {
ERTS_BIF_PREP_ERROR(res, BIF_P, BADARG);
@@ -237,11 +238,17 @@ BIF_RETTYPE port_command_3(BIF_ALIST_3)
BIF_RETTYPE port_call_2(BIF_ALIST_2)
{
- return port_call_3(BIF_P,BIF_ARG_1,make_small(0),BIF_ARG_2);
+ return port_call(BIF_P,BIF_ARG_1, make_small(0), BIF_ARG_2);
}
BIF_RETTYPE port_call_3(BIF_ALIST_3)
{
+ return port_call(BIF_P, BIF_ARG_1, BIF_ARG_2, BIF_ARG_3);
+}
+
+static BIF_RETTYPE
+port_call(Process* c_p, Eterm arg1, Eterm arg2, Eterm arg3)
+{
Uint op;
Port *p;
Uint size;
@@ -266,15 +273,15 @@ BIF_RETTYPE port_call_3(BIF_ALIST_3)
/* trace of port scheduling with virtual process descheduling
* lock wait
*/
- if (IS_TRACED_FL(BIF_P, F_TRACE_SCHED_PROCS)) {
- trace_virtual_sched(BIF_P, am_out);
+ if (IS_TRACED_FL(c_p, F_TRACE_SCHED_PROCS)) {
+ trace_virtual_sched(c_p, am_out);
}
if (erts_system_profile_flags.runnable_procs && erts_system_profile_flags.exclusive) {
- profile_runnable_proc(BIF_P, am_inactive);
+ profile_runnable_proc(c_p, am_inactive);
}
- p = id_or_name2port(BIF_P, BIF_ARG_1);
+ p = id_or_name2port(c_p, arg1);
if (!p) {
error:
if (port_resp != port_result &&
@@ -286,22 +293,22 @@ BIF_RETTYPE port_call_3(BIF_ALIST_3)
/* Need to virtual schedule in the process if there
* was an error.
*/
- if (IS_TRACED_FL(BIF_P, F_TRACE_SCHED_PROCS)) {
- trace_virtual_sched(BIF_P, am_in);
+ if (IS_TRACED_FL(c_p, F_TRACE_SCHED_PROCS)) {
+ trace_virtual_sched(c_p, am_in);
}
if (erts_system_profile_flags.runnable_procs && erts_system_profile_flags.exclusive) {
- profile_runnable_proc(BIF_P, am_active);
+ profile_runnable_proc(c_p, am_active);
}
if (p)
erts_port_release(p);
#ifdef ERTS_SMP
- ERTS_SMP_BIF_CHK_PENDING_EXIT(BIF_P, ERTS_PROC_LOCK_MAIN);
+ ERTS_SMP_BIF_CHK_PENDING_EXIT(c_p, ERTS_PROC_LOCK_MAIN);
#else
- ERTS_BIF_CHK_EXITED(BIF_P);
+ ERTS_BIF_CHK_EXITED(c_p);
#endif
- BIF_ERROR(BIF_P, BADARG);
+ BIF_ERROR(c_p, BADARG);
}
if ((drv = p->drv_ptr) == NULL) {
@@ -310,10 +317,10 @@ BIF_RETTYPE port_call_3(BIF_ALIST_3)
if (drv->call == NULL) {
goto error;
}
- if (!term_to_Uint(BIF_ARG_2, &op)) {
+ if (!term_to_Uint(arg2, &op)) {
goto error;
}
- p->caller = BIF_P->id;
+ p->caller = c_p->id;
/* Lock taken, virtual schedule of port */
if (IS_TRACED_FL(p, F_TRACE_SCHED_PORTS)) {
@@ -323,19 +330,19 @@ BIF_RETTYPE port_call_3(BIF_ALIST_3)
if (erts_system_profile_flags.runnable_ports && !erts_port_is_scheduled(p)) {
profile_runnable_port(p, am_active);
}
- size = erts_encode_ext_size(BIF_ARG_3);
+ size = erts_encode_ext_size(arg3);
if (size > sizeof(port_input))
bytes = erts_alloc(ERTS_ALC_T_PORT_CALL_BUF, size);
endp = bytes;
- erts_encode_ext(BIF_ARG_3, &endp);
+ erts_encode_ext(arg3, &endp);
real_size = endp - bytes;
if (real_size > size) {
erl_exit(1, "%s, line %d: buffer overflow: %d word(s)\n",
__FILE__, __LINE__, endp - (bytes + size));
}
- erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
+ erts_smp_proc_unlock(c_p, ERTS_PROC_LOCK_MAIN);
prc = (char *) port_resp;
fpe_was_unmasked = erts_block_fpe();
ret = drv->call((ErlDrvData)p->drv_data,
@@ -356,7 +363,7 @@ BIF_RETTYPE port_call_3(BIF_ALIST_3)
port_resp = (byte *) prc;
p->caller = NIL;
- erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
+ erts_smp_proc_lock(c_p, ERTS_PROC_LOCK_MAIN);
#ifdef HARDDEBUG
{
int z;
@@ -378,18 +385,18 @@ BIF_RETTYPE port_call_3(BIF_ALIST_3)
/* Error or a binary without magic/ with wrong magic */
goto error;
}
- result_size = erts_decode_ext_size(port_resp, ret, 0);
+ result_size = erts_decode_ext_size(port_resp, ret);
if (result_size < 0) {
goto error;
}
- hp = HAlloc(BIF_P, result_size);
+ hp = HAlloc(c_p, result_size);
hp_end = hp + result_size;
endp = port_resp;
- res = erts_decode_ext(&hp, &MSO(BIF_P), &endp);
+ res = erts_decode_ext(&hp, &MSO(c_p), &endp);
if (res == THE_NON_VALUE) {
goto error;
}
- HRelease(BIF_P, hp_end, hp);
+ HRelease(c_p, hp_end, hp);
if (port_resp != port_result && !(ret_flags & DRIVER_CALL_KEEP_BUFFER)) {
driver_free(port_resp);
}
@@ -398,16 +405,16 @@ BIF_RETTYPE port_call_3(BIF_ALIST_3)
if (p)
erts_port_release(p);
#ifdef ERTS_SMP
- ERTS_SMP_BIF_CHK_PENDING_EXIT(BIF_P, ERTS_PROC_LOCK_MAIN);
+ ERTS_SMP_BIF_CHK_PENDING_EXIT(c_p, ERTS_PROC_LOCK_MAIN);
#else
- ERTS_BIF_CHK_EXITED(BIF_P);
+ ERTS_BIF_CHK_EXITED(c_p);
#endif
- if (IS_TRACED_FL(BIF_P, F_TRACE_SCHED_PROCS)) {
- trace_virtual_sched(BIF_P, am_in);
+ if (IS_TRACED_FL(c_p, F_TRACE_SCHED_PROCS)) {
+ trace_virtual_sched(c_p, am_in);
}
if (erts_system_profile_flags.runnable_procs && erts_system_profile_flags.exclusive) {
- profile_runnable_proc(BIF_P, am_active);
+ profile_runnable_proc(c_p, am_active);
}
return res;
diff --git a/erts/emulator/beam/erl_bif_re.c b/erts/emulator/beam/erl_bif_re.c
index 26891c4348..6b843d2e08 100644
--- a/erts/emulator/beam/erl_bif_re.c
+++ b/erts/emulator/beam/erl_bif_re.c
@@ -45,6 +45,7 @@ static Export *urun_trap_exportp = NULL;
static Export *ucompile_trap_exportp = NULL;
static BIF_RETTYPE re_exec_trap(BIF_ALIST_3);
+static BIF_RETTYPE re_run(Process *p, Eterm arg1, Eterm arg2, Eterm arg3);
static void *erts_erts_pcre_malloc(size_t size) {
return erts_alloc(ERTS_ALC_T_RE_HEAP,size);
@@ -414,8 +415,8 @@ build_compile_result(Process *p, Eterm error_tag, pcre *result, int errcode, con
* Compile BIFs
*/
-BIF_RETTYPE
-re_compile_2(BIF_ALIST_2)
+static BIF_RETTYPE
+re_compile(Process* p, Eterm arg1, Eterm arg2)
{
Uint slen;
char *expr;
@@ -429,43 +430,49 @@ re_compile_2(BIF_ALIST_2)
int unicode = 0;
- if (parse_options(BIF_ARG_2,&options,NULL,&pflags,NULL,NULL)
+ if (parse_options(arg2,&options,NULL,&pflags,NULL,NULL)
< 0) {
- BIF_ERROR(BIF_P,BADARG);
+ BIF_ERROR(p,BADARG);
}
if (pflags & PARSE_FLAG_UNIQUE_EXEC_OPT) {
- BIF_ERROR(BIF_P,BADARG);
+ BIF_ERROR(p,BADARG);
}
unicode = (pflags & PARSE_FLAG_UNICODE) ? 1 : 0;
- if (pflags & PARSE_FLAG_UNICODE && !is_binary(BIF_ARG_1)) {
- BIF_TRAP2(ucompile_trap_exportp, BIF_P, BIF_ARG_1, BIF_ARG_2);
+ if (pflags & PARSE_FLAG_UNICODE && !is_binary(arg1)) {
+ BIF_TRAP2(ucompile_trap_exportp, p, arg1, arg2);
}
- if (erts_iolist_size(BIF_ARG_1, &slen)) {
- BIF_ERROR(BIF_P,BADARG);
+ if (erts_iolist_size(arg1, &slen)) {
+ BIF_ERROR(p,BADARG);
}
expr = erts_alloc(ERTS_ALC_T_RE_TMP_BUF, slen + 1);
- if (io_list_to_buf(BIF_ARG_1, expr, slen) != 0) {
+ if (io_list_to_buf(arg1, expr, slen) != 0) {
erts_free(ERTS_ALC_T_RE_TMP_BUF, expr);
- BIF_ERROR(BIF_P,BADARG);
+ BIF_ERROR(p,BADARG);
}
expr[slen]='\0';
result = erts_pcre_compile2(expr, options, &errcode,
&errstr, &errofset, default_table);
- ret = build_compile_result(BIF_P, am_error, result, errcode,
+ ret = build_compile_result(p, am_error, result, errcode,
errstr, errofset, unicode, 1);
erts_free(ERTS_ALC_T_RE_TMP_BUF, expr);
BIF_RET(ret);
}
BIF_RETTYPE
+re_compile_2(BIF_ALIST_2)
+{
+ return re_compile(BIF_P, BIF_ARG_1, BIF_ARG_2);
+}
+
+BIF_RETTYPE
re_compile_1(BIF_ALIST_1)
{
- return re_compile_2(BIF_P,BIF_ARG_1,NIL);
+ return re_compile(BIF_P, BIF_ARG_1, NIL);
}
/*
@@ -845,8 +852,8 @@ build_capture(Eterm capture_spec[CAPSPEC_SIZE], const pcre *code)
/*
* The actual re:run/2,3 BIFs
*/
-BIF_RETTYPE
-re_run_3(BIF_ALIST_3)
+static BIF_RETTYPE
+re_run(Process *p, Eterm arg1, Eterm arg2, Eterm arg3)
{
const pcre *code_tmp;
RestartContext restart;
@@ -865,15 +872,15 @@ re_run_3(BIF_ALIST_3)
Eterm capture[CAPSPEC_SIZE];
int is_list_cap;
- if (parse_options(BIF_ARG_3,&comp_options,&options,&pflags,&startoffset,capture)
+ if (parse_options(arg3,&comp_options,&options,&pflags,&startoffset,capture)
< 0) {
- BIF_ERROR(BIF_P,BADARG);
+ BIF_ERROR(p,BADARG);
}
is_list_cap = ((pflags & PARSE_FLAG_CAPTURE_OPT) &&
(capture[CAPSPEC_TYPE] == am_list));
- if (is_not_tuple(BIF_ARG_2) || (arityval(*tuple_val(BIF_ARG_2)) != 4)) {
- if (is_binary(BIF_ARG_2) || is_list(BIF_ARG_2) || is_nil(BIF_ARG_2)) {
+ if (is_not_tuple(arg2) || (arityval(*tuple_val(arg2)) != 4)) {
+ if (is_binary(arg2) || is_list(arg2) || is_nil(arg2)) {
/* Compile from textual RE */
Uint slen;
char *expr;
@@ -884,19 +891,19 @@ re_run_3(BIF_ALIST_3)
int capture_count;
if (pflags & PARSE_FLAG_UNICODE &&
- (!is_binary(BIF_ARG_2) || !is_binary(BIF_ARG_1) ||
+ (!is_binary(arg2) || !is_binary(arg1) ||
(is_list_cap && !(pflags & PARSE_FLAG_GLOBAL)))) {
- BIF_TRAP3(urun_trap_exportp, BIF_P, BIF_ARG_1, BIF_ARG_2, BIF_ARG_3);
+ BIF_TRAP3(urun_trap_exportp, p, arg1, arg2, arg3);
}
- if (erts_iolist_size(BIF_ARG_2, &slen)) {
- BIF_ERROR(BIF_P,BADARG);
+ if (erts_iolist_size(arg2, &slen)) {
+ BIF_ERROR(p,BADARG);
}
expr = erts_alloc(ERTS_ALC_T_RE_TMP_BUF, slen + 1);
- if (io_list_to_buf(BIF_ARG_2, expr, slen) != 0) {
+ if (io_list_to_buf(arg2, expr, slen) != 0) {
erts_free(ERTS_ALC_T_RE_TMP_BUF, expr);
- BIF_ERROR(BIF_P,BADARG);
+ BIF_ERROR(p,BADARG);
}
expr[slen]='\0';
result = erts_pcre_compile2(expr, comp_options, &errcode,
@@ -905,11 +912,11 @@ re_run_3(BIF_ALIST_3)
erts_free(ERTS_ALC_T_RE_TMP_BUF, expr);
/* Compilation error gives badarg except in the compile
function */
- BIF_ERROR(BIF_P,BADARG);
+ BIF_ERROR(p,BADARG);
}
if (pflags & PARSE_FLAG_GLOBAL) {
Eterm precompiled =
- build_compile_result(BIF_P, am_error,
+ build_compile_result(p, am_error,
result, errcode,
errstr, errofset,
(pflags &
@@ -917,13 +924,13 @@ re_run_3(BIF_ALIST_3)
0);
Eterm *hp,r;
erts_free(ERTS_ALC_T_RE_TMP_BUF, expr);
- hp = HAlloc(BIF_P,4);
- /* BIF_ARG_2 is in the tuple just to make exceptions right */
- r = TUPLE3(hp,BIF_ARG_3,
+ hp = HAlloc(p,4);
+ /* arg2 is in the tuple just to make exceptions right */
+ r = TUPLE3(hp,arg3,
((pflags & PARSE_FLAG_UNIQUE_COMPILE_OPT) ?
am_true :
- am_false), BIF_ARG_2);
- BIF_TRAP3(grun_trap_exportp, BIF_P, BIF_ARG_1, precompiled, r);
+ am_false), arg2);
+ BIF_TRAP3(grun_trap_exportp, p, arg1, precompiled, r);
}
erts_pcre_fullinfo(result, NULL, PCRE_INFO_SIZE, &code_size);
@@ -935,31 +942,31 @@ re_run_3(BIF_ALIST_3)
erts_free(ERTS_ALC_T_RE_TMP_BUF, expr);
/*unicode = (pflags & PARSE_FLAG_UNICODE) ? 1 : 0;*/
} else {
- BIF_ERROR(BIF_P,BADARG);
+ BIF_ERROR(p,BADARG);
}
} else {
if (pflags & PARSE_FLAG_UNIQUE_COMPILE_OPT) {
- BIF_ERROR(BIF_P,BADARG);
+ BIF_ERROR(p,BADARG);
}
- tp = tuple_val(BIF_ARG_2);
+ tp = tuple_val(arg2);
if (tp[1] != am_re_pattern || is_not_small(tp[2]) ||
is_not_small(tp[3]) || is_not_binary(tp[4])) {
- BIF_ERROR(BIF_P,BADARG);
+ BIF_ERROR(p,BADARG);
}
if (unsigned_val(tp[3]) &&
- (!is_binary(BIF_ARG_1) ||
+ (!is_binary(arg1) ||
(is_list_cap && !(pflags & PARSE_FLAG_GLOBAL)))) { /* unicode */
- BIF_TRAP3(urun_trap_exportp, BIF_P, BIF_ARG_1, BIF_ARG_2,
- BIF_ARG_3);
+ BIF_TRAP3(urun_trap_exportp, p, arg1, arg2,
+ arg3);
}
if (pflags & PARSE_FLAG_GLOBAL) {
Eterm *hp,r;
- hp = HAlloc(BIF_P,3);
- r = TUPLE2(hp,BIF_ARG_3,am_false);
- BIF_TRAP3(grun_trap_exportp, BIF_P, BIF_ARG_1, BIF_ARG_2,
+ hp = HAlloc(p,3);
+ r = TUPLE2(hp,arg3,am_false);
+ BIF_TRAP3(grun_trap_exportp, p, arg1, arg2,
r);
}
@@ -968,7 +975,7 @@ re_run_3(BIF_ALIST_3)
if ((code_tmp = (const pcre *)
erts_get_aligned_binary_bytes(tp[4], &temp_alloc)) == NULL) {
erts_free_aligned_binary_bytes(temp_alloc);
- BIF_ERROR(BIF_P, BADARG);
+ BIF_ERROR(p, BADARG);
}
restart.code = erts_alloc(ERTS_ALC_T_RE_SUBJECT, code_size);
memcpy(restart.code, code_tmp, code_size);
@@ -980,7 +987,7 @@ re_run_3(BIF_ALIST_3)
restart.ovector = erts_alloc(ERTS_ALC_T_RE_SUBJECT, ovsize * sizeof(int));
restart.extra.flags = PCRE_EXTRA_TABLES | PCRE_EXTRA_LOOP_LIMIT;
restart.extra.tables = default_table;
- restart.extra.loop_limit = ERTS_BIF_REDS_LEFT(BIF_P) * LOOP_FACTOR;
+ restart.extra.loop_limit = ERTS_BIF_REDS_LEFT(p) * LOOP_FACTOR;
loop_limit_tmp = max_loop_limit; /* To lesser probability of race in debug
situation (erts_debug) */
if (restart.extra.loop_limit > loop_limit_tmp) {
@@ -996,7 +1003,7 @@ re_run_3(BIF_ALIST_3)
if ((restart.ret_info = build_capture(capture,restart.code)) == NULL) {
erts_free(ERTS_ALC_T_RE_SUBJECT, restart.ovector);
erts_free(ERTS_ALC_T_RE_SUBJECT, restart.code);
- BIF_ERROR(BIF_P,BADARG);
+ BIF_ERROR(p,BADARG);
}
}
@@ -1004,7 +1011,7 @@ re_run_3(BIF_ALIST_3)
copying, also binary returns can be sub binaries in that case */
restart.flags = 0;
- if (is_binary(BIF_ARG_1)) {
+ if (is_binary(arg1)) {
Eterm real_bin;
Uint offset;
Eterm* bptr;
@@ -1012,9 +1019,9 @@ re_run_3(BIF_ALIST_3)
int bitsize;
ProcBin* pb;
- ERTS_GET_REAL_BIN(BIF_ARG_1, real_bin, offset, bitoffs, bitsize);
+ ERTS_GET_REAL_BIN(arg1, real_bin, offset, bitoffs, bitsize);
- slength = binary_size(BIF_ARG_1);
+ slength = binary_size(arg1);
bptr = binary_val(real_bin);
if (bitsize != 0 || bitoffs != 0 || (*bptr != HEADER_PROC_BIN)) {
goto handle_iolist;
@@ -1027,24 +1034,24 @@ re_run_3(BIF_ALIST_3)
restart.flags |= RESTART_FLAG_SUBJECT_IN_BINARY;
} else {
handle_iolist:
- if (erts_iolist_size(BIF_ARG_1, &slength)) {
+ if (erts_iolist_size(arg1, &slength)) {
erts_free(ERTS_ALC_T_RE_SUBJECT, restart.ovector);
erts_free(ERTS_ALC_T_RE_SUBJECT, restart.code);
if (restart.ret_info != NULL) {
erts_free(ERTS_ALC_T_RE_SUBJECT, restart.ret_info);
}
- BIF_ERROR(BIF_P,BADARG);
+ BIF_ERROR(p,BADARG);
}
restart.subject = erts_alloc(ERTS_ALC_T_RE_SUBJECT, slength);
- if (io_list_to_buf(BIF_ARG_1, restart.subject, slength) != 0) {
+ if (io_list_to_buf(arg1, restart.subject, slength) != 0) {
erts_free(ERTS_ALC_T_RE_SUBJECT, restart.ovector);
erts_free(ERTS_ALC_T_RE_SUBJECT, restart.code);
erts_free(ERTS_ALC_T_RE_SUBJECT, restart.subject);
if (restart.ret_info != NULL) {
erts_free(ERTS_ALC_T_RE_SUBJECT, restart.ret_info);
}
- BIF_ERROR(BIF_P,BADARG);
+ BIF_ERROR(p,BADARG);
}
}
@@ -1056,7 +1063,7 @@ handle_iolist:
rc = erts_pcre_exec(restart.code, &(restart.extra), restart.subject, slength, startoffset,
options, restart.ovector, ovsize);
ASSERT(loop_count != 0xFFFFFFFF);
- BUMP_REDS(BIF_P, loop_count / LOOP_FACTOR);
+ BUMP_REDS(p, loop_count / LOOP_FACTOR);
if (rc == PCRE_ERROR_LOOP_LIMIT) {
/* Trap */
Binary *mbp = erts_create_magic_binary(sizeof(RestartContext),
@@ -1065,17 +1072,17 @@ handle_iolist:
Eterm magic_bin;
Eterm *hp;
memcpy(restartp,&restart,sizeof(RestartContext));
- BUMP_ALL_REDS(BIF_P);
- hp = HAlloc(BIF_P, PROC_BIN_SIZE);
- magic_bin = erts_mk_magic_binary_term(&hp, &MSO(BIF_P), mbp);
+ BUMP_ALL_REDS(p);
+ hp = HAlloc(p, PROC_BIN_SIZE);
+ magic_bin = erts_mk_magic_binary_term(&hp, &MSO(p), mbp);
BIF_TRAP3(&re_exec_trap_export,
- BIF_P,
- BIF_ARG_1,
- BIF_ARG_2 /* To avoid GC of precompiled code, XXX: not utilized yet */,
+ p,
+ arg1,
+ arg2 /* To avoid GC of precompiled code, XXX: not utilized yet */,
magic_bin);
}
- res = build_exec_return(BIF_P, rc, &restart, BIF_ARG_1);
+ res = build_exec_return(p, rc, &restart, arg1);
cleanup_restart_context(&restart);
@@ -1083,9 +1090,15 @@ handle_iolist:
}
BIF_RETTYPE
+re_run_3(BIF_ALIST_3)
+{
+ return re_run(BIF_P,BIF_ARG_1, BIF_ARG_2, BIF_ARG_3);
+}
+
+BIF_RETTYPE
re_run_2(BIF_ALIST_2)
{
- return re_run_3(BIF_P,BIF_ARG_1, BIF_ARG_2, NIL);
+ return re_run(BIF_P,BIF_ARG_1, BIF_ARG_2, NIL);
}
/*
diff --git a/erts/emulator/beam/erl_bif_timer.c b/erts/emulator/beam/erl_bif_timer.c
index db771bd216..a922a33da3 100644
--- a/erts/emulator/beam/erl_bif_timer.c
+++ b/erts/emulator/beam/erl_bif_timer.c
@@ -26,6 +26,7 @@
#include "bif.h"
#include "error.h"
#include "big.h"
+#include "erl_thr_progress.h"
/****************************************************************************
** BIF Timer support
@@ -686,7 +687,7 @@ erts_bif_timer_foreach(void (*func)(Eterm, Eterm, ErlHeapFragment *, void *),
{
int i;
- ERTS_SMP_LC_ASSERT(erts_smp_is_system_blocked(0));
+ ERTS_SMP_LC_ASSERT(erts_smp_thr_progress_is_blocking());
for (i = 0; i < TIMER_HASH_VEC_SZ; i++) {
ErtsBifTimer *btm;
diff --git a/erts/emulator/beam/erl_bif_trace.c b/erts/emulator/beam/erl_bif_trace.c
index 0509e51a6f..b0a58c80ea 100644
--- a/erts/emulator/beam/erl_bif_trace.c
+++ b/erts/emulator/beam/erl_bif_trace.c
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 1999-2010. All Rights Reserved.
+ * Copyright Ericsson AB 1999-2011. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
@@ -37,6 +37,7 @@
#include "erl_version.h"
#include "beam_bp.h"
#include "erl_binary.h"
+#include "erl_thr_progress.h"
#define DECL_AM(S) Eterm AM_ ## S = am_atom_put(#S, sizeof(#S) - 1)
@@ -47,6 +48,11 @@ static Binary *erts_default_meta_match_spec;
static struct trace_pattern_flags erts_default_trace_pattern_flags;
static Eterm erts_default_meta_tracer_pid;
+static Eterm
+trace_pattern(Process* p, Eterm MFA, Eterm Pattern, Eterm flaglist);
+static BIF_RETTYPE
+system_monitor(Process *p, Eterm monitor_pid, Eterm list);
+
static void new_seq_trace_token(Process* p); /* help func for seq_trace_2*/
static int already_traced(Process *p, Process *tracee_p, Eterm tracer);
static int port_already_traced(Process *p, Port *tracee_port, Eterm tracer);
@@ -76,13 +82,19 @@ erts_bif_trace_init(void)
*/
Eterm
-trace_pattern_2(Process* p, Eterm MFA, Eterm Pattern)
+trace_pattern_2(BIF_ALIST_2)
{
- return trace_pattern_3(p,MFA,Pattern,NIL);
+ return trace_pattern(BIF_P, BIF_ARG_1, BIF_ARG_2, NIL);
}
Eterm
-trace_pattern_3(Process* p, Eterm MFA, Eterm Pattern, Eterm flaglist)
+trace_pattern_3(BIF_ALIST_3)
+{
+ return trace_pattern(BIF_P, BIF_ARG_1, BIF_ARG_2, BIF_ARG_3);
+}
+
+static Eterm
+trace_pattern(Process* p, Eterm MFA, Eterm Pattern, Eterm flaglist)
{
DeclareTmpHeap(mfa,3,p); /* Not really heap here, but might be when setting pattern */
int i;
@@ -97,7 +109,7 @@ trace_pattern_3(Process* p, Eterm MFA, Eterm Pattern, Eterm flaglist)
Eterm meta_tracer_pid = p->id;
erts_smp_proc_unlock(p, ERTS_PROC_LOCK_MAIN);
- erts_smp_block_system(0);
+ erts_smp_thr_progress_block();
UseTmpHeap(3,p);
/*
@@ -326,7 +338,7 @@ trace_pattern_3(Process* p, Eterm MFA, Eterm Pattern, Eterm flaglist)
done:
UnUseTmpHeap(3,p);
- erts_smp_release_system();
+ erts_smp_thr_progress_unblock();
erts_smp_proc_lock(p, ERTS_PROC_LOCK_MAIN);
return make_small(matches);
@@ -336,7 +348,7 @@ trace_pattern_3(Process* p, Eterm MFA, Eterm Pattern, Eterm flaglist)
MatchSetUnref(match_prog_set);
UnUseTmpHeap(3,p);
- erts_smp_release_system();
+ erts_smp_thr_progress_unblock();
erts_smp_proc_lock(p, ERTS_PROC_LOCK_MAIN);
BIF_ERROR(p, BADARG);
}
@@ -435,9 +447,12 @@ erts_trace_flags(Eterm List,
return 0;
}
-Eterm
-trace_3(Process* p, Eterm pid_spec, Eterm how, Eterm list)
+Eterm trace_3(BIF_ALIST_3)
{
+ Process* p = BIF_P;
+ Eterm pid_spec = BIF_ARG_1;
+ Eterm how = BIF_ARG_2;
+ Eterm list = BIF_ARG_3;
int on;
Eterm tracer = NIL;
int matches = 0;
@@ -630,7 +645,7 @@ trace_3(Process* p, Eterm pid_spec, Eterm how, Eterm list)
#ifdef ERTS_SMP
erts_smp_proc_unlock(p, ERTS_PROC_LOCK_MAIN);
- erts_smp_block_system(0);
+ erts_smp_thr_progress_block();
system_blocked = 1;
#endif
@@ -679,7 +694,7 @@ trace_3(Process* p, Eterm pid_spec, Eterm how, Eterm list)
} else if (tracer != NIL) {
tracee_port->tracer_proc = tracer;
}
- /* matches are not counted for ports since it would violate compability */
+ /* matches are not counted for ports since it would violate compatibility */
/* This could be a reason to modify this function or make a new one. */
}
}
@@ -711,7 +726,7 @@ trace_3(Process* p, Eterm pid_spec, Eterm how, Eterm list)
#ifdef ERTS_SMP
if (system_blocked) {
- erts_smp_release_system();
+ erts_smp_thr_progress_unblock();
erts_smp_proc_lock(p, ERTS_PROC_LOCK_MAIN);
}
#endif
@@ -726,7 +741,7 @@ trace_3(Process* p, Eterm pid_spec, Eterm how, Eterm list)
#ifdef ERTS_SMP
if (system_blocked) {
- erts_smp_release_system();
+ erts_smp_thr_progress_unblock();
erts_smp_proc_lock(p, ERTS_PROC_LOCK_MAIN);
}
#endif
@@ -820,9 +835,11 @@ static int already_traced(Process *c_p, Process *tracee_p, Eterm tracer)
* Return information about a process or an external function being traced.
*/
-Eterm
-trace_info_2(Process* p, Eterm What, Eterm Key)
+Eterm trace_info_2(BIF_ALIST_2)
{
+ Process* p = BIF_P;
+ Eterm What = BIF_ARG_1;
+ Eterm Key = BIF_ARG_2;
Eterm res;
if (What == am_on_load) {
res = trace_info_on_load(p, Key);
@@ -1060,7 +1077,7 @@ trace_info_func(Process* p, Eterm func_spec, Eterm key)
#ifdef ERTS_SMP
if ( (key == am_call_time) || (key == am_all)) {
erts_smp_proc_unlock(p, ERTS_PROC_LOCK_MAIN);
- erts_smp_block_system(0);
+ erts_smp_thr_progress_block();
}
#endif
@@ -1068,7 +1085,7 @@ trace_info_func(Process* p, Eterm func_spec, Eterm key)
#ifdef ERTS_SMP
if ( (key == am_call_time) || (key == am_all)) {
- erts_smp_release_system();
+ erts_smp_thr_progress_unblock();
erts_smp_proc_lock(p, ERTS_PROC_LOCK_MAIN);
}
#endif
@@ -1752,23 +1769,20 @@ new_seq_trace_token(Process* p)
}
}
-BIF_RETTYPE seq_trace_info_1(BIF_ALIST_1)
+BIF_RETTYPE erl_seq_trace_info(Process *p, Eterm item)
{
- Eterm item;
Eterm res;
Eterm* hp;
Uint current_flag;
- if (is_not_atom(BIF_ARG_1)) {
- BIF_ERROR(BIF_P, BADARG);
+ if (is_not_atom(item)) {
+ BIF_ERROR(p, BADARG);
}
- item = BIF_ARG_1;
-
- if (SEQ_TRACE_TOKEN(BIF_P) == NIL) {
+ if (SEQ_TRACE_TOKEN(p) == NIL) {
if ((item == am_send) || (item == am_receive) ||
(item == am_print) || (item == am_timestamp)) {
- hp = HAlloc(BIF_P,3);
+ hp = HAlloc(p,3);
res = TUPLE2(hp, item, am_false);
BIF_RET(res);
} else if ((item == am_label) || (item == am_serial)) {
@@ -1778,35 +1792,40 @@ BIF_RETTYPE seq_trace_info_1(BIF_ALIST_1)
}
}
- if (BIF_ARG_1 == am_send) {
+ if (item == am_send) {
current_flag = SEQ_TRACE_SEND;
- } else if (BIF_ARG_1 == am_receive) {
+ } else if (item == am_receive) {
current_flag = SEQ_TRACE_RECEIVE;
- } else if (BIF_ARG_1 == am_print) {
+ } else if (item == am_print) {
current_flag = SEQ_TRACE_PRINT;
- } else if (BIF_ARG_1 == am_timestamp) {
+ } else if (item == am_timestamp) {
current_flag = SEQ_TRACE_TIMESTAMP;
} else {
current_flag = 0;
}
if (current_flag) {
- res = unsigned_val(SEQ_TRACE_TOKEN_FLAGS(BIF_P)) & current_flag ?
+ res = unsigned_val(SEQ_TRACE_TOKEN_FLAGS(p)) & current_flag ?
am_true : am_false;
} else if (item == am_label) {
- res = SEQ_TRACE_TOKEN_LABEL(BIF_P);
+ res = SEQ_TRACE_TOKEN_LABEL(p);
} else if (item == am_serial) {
- hp = HAlloc(BIF_P, 3);
- res = TUPLE2(hp, SEQ_TRACE_TOKEN_LASTCNT(BIF_P), SEQ_TRACE_TOKEN_SERIAL(BIF_P));
+ hp = HAlloc(p, 3);
+ res = TUPLE2(hp, SEQ_TRACE_TOKEN_LASTCNT(p), SEQ_TRACE_TOKEN_SERIAL(p));
} else {
error:
- BIF_ERROR(BIF_P, BADARG);
+ BIF_ERROR(p, BADARG);
}
- hp = HAlloc(BIF_P, 3);
+ hp = HAlloc(p, 3);
res = TUPLE2(hp, item, res);
BIF_RET(res);
}
+BIF_RETTYPE seq_trace_info_1(BIF_ALIST_1)
+{
+ BIF_RET(erl_seq_trace_info(BIF_P, BIF_ARG_1));
+}
+
/*
seq_trace_print(Message) -> true | false
This function passes Message to the system_tracer
@@ -1852,7 +1871,7 @@ void erts_system_monitor_clear(Process *c_p) {
#ifdef ERTS_SMP
if (c_p) {
erts_smp_proc_unlock(c_p, ERTS_PROC_LOCK_MAIN);
- erts_smp_block_system(0);
+ erts_smp_thr_progress_block();
}
#endif
erts_set_system_monitor(NIL);
@@ -1862,7 +1881,7 @@ void erts_system_monitor_clear(Process *c_p) {
erts_system_monitor_flags.busy_dist_port = 0;
#ifdef ERTS_SMP
if (c_p) {
- erts_smp_release_system();
+ erts_smp_thr_progress_unblock();
erts_smp_proc_lock(c_p, ERTS_PROC_LOCK_MAIN);
}
#endif
@@ -1919,23 +1938,35 @@ static Eterm system_monitor_get(Process *p)
}
-BIF_RETTYPE system_monitor_0(Process *p) {
- BIF_RET(system_monitor_get(p));
+BIF_RETTYPE system_monitor_0(BIF_ALIST_0)
+{
+ BIF_RET(system_monitor_get(BIF_P));
}
-BIF_RETTYPE system_monitor_1(Process *p, Eterm spec) {
+BIF_RETTYPE system_monitor_1(BIF_ALIST_1)
+{
+ Process* p = BIF_P;
+ Eterm spec = BIF_ARG_1;
+
if (spec == am_undefined) {
- BIF_RET(system_monitor_2(p, spec, NIL));
+ BIF_RET(system_monitor(p, spec, NIL));
} else if (is_tuple(spec)) {
Eterm *tp = tuple_val(spec);
if (tp[0] != make_arityval(2)) goto error;
- BIF_RET(system_monitor_2(p, tp[1], tp[2]));
+ BIF_RET(system_monitor(p, tp[1], tp[2]));
}
error:
BIF_ERROR(p, BADARG);
}
-BIF_RETTYPE system_monitor_2(Process *p, Eterm monitor_pid, Eterm list) {
+BIF_RETTYPE system_monitor_2(BIF_ALIST_2)
+{
+ return system_monitor(BIF_P, BIF_ARG_1, BIF_ARG_2);
+}
+
+static BIF_RETTYPE
+system_monitor(Process *p, Eterm monitor_pid, Eterm list)
+{
Eterm prev;
int system_blocked = 0;
@@ -1951,7 +1982,7 @@ BIF_RETTYPE system_monitor_2(Process *p, Eterm monitor_pid, Eterm list) {
system_blocked = 1;
erts_smp_proc_unlock(p, ERTS_PROC_LOCK_MAIN);
- erts_smp_block_system(0);
+ erts_smp_thr_progress_block();
if (!erts_pid2proc(p, ERTS_PROC_LOCK_MAIN, monitor_pid, 0))
goto error;
@@ -1985,7 +2016,7 @@ BIF_RETTYPE system_monitor_2(Process *p, Eterm monitor_pid, Eterm list) {
erts_system_monitor_flags.busy_port = !!busy_port;
erts_system_monitor_flags.busy_dist_port = !!busy_dist_port;
- erts_smp_release_system();
+ erts_smp_thr_progress_unblock();
erts_smp_proc_lock(p, ERTS_PROC_LOCK_MAIN);
BIF_RET(prev);
}
@@ -1993,7 +2024,7 @@ BIF_RETTYPE system_monitor_2(Process *p, Eterm monitor_pid, Eterm list) {
error:
if (system_blocked) {
- erts_smp_release_system();
+ erts_smp_thr_progress_unblock();
erts_smp_proc_lock(p, ERTS_PROC_LOCK_MAIN);
}
@@ -2006,7 +2037,7 @@ void erts_system_profile_clear(Process *c_p) {
#ifdef ERTS_SMP
if (c_p) {
erts_smp_proc_unlock(c_p, ERTS_PROC_LOCK_MAIN);
- erts_smp_block_system(0);
+ erts_smp_thr_progress_block();
}
#endif
erts_set_system_profile(NIL);
@@ -2016,7 +2047,7 @@ void erts_system_profile_clear(Process *c_p) {
erts_system_profile_flags.exclusive = 0;
#ifdef ERTS_SMP
if (c_p) {
- erts_smp_release_system();
+ erts_smp_thr_progress_unblock();
erts_smp_proc_lock(c_p, ERTS_PROC_LOCK_MAIN);
}
#endif
@@ -2053,11 +2084,16 @@ static Eterm system_profile_get(Process *p) {
}
}
-BIF_RETTYPE system_profile_0(Process *p) {
- BIF_RET(system_profile_get(p));
+BIF_RETTYPE system_profile_0(BIF_ALIST_0)
+{
+ BIF_RET(system_profile_get(BIF_P));
}
-BIF_RETTYPE system_profile_2(Process *p, Eterm profiler, Eterm list) {
+BIF_RETTYPE system_profile_2(BIF_ALIST_2)
+{
+ Process *p = BIF_P;
+ Eterm profiler = BIF_ARG_1;
+ Eterm list = BIF_ARG_2;
Eterm prev;
int system_blocked = 0;
Process *profiler_p = NULL;
@@ -2075,7 +2111,7 @@ BIF_RETTYPE system_profile_2(Process *p, Eterm profiler, Eterm list) {
system_blocked = 1;
erts_smp_proc_unlock(p, ERTS_PROC_LOCK_MAIN);
- erts_smp_block_system(0);
+ erts_smp_thr_progress_block();
/* Check if valid process, no locks are taken */
@@ -2117,7 +2153,7 @@ BIF_RETTYPE system_profile_2(Process *p, Eterm profiler, Eterm list) {
erts_system_profile_flags.runnable_procs = !!runnable_procs;
erts_system_profile_flags.exclusive = !!exclusive;
- erts_smp_release_system();
+ erts_smp_thr_progress_unblock();
erts_smp_proc_lock(p, ERTS_PROC_LOCK_MAIN);
BIF_RET(prev);
@@ -2126,7 +2162,7 @@ BIF_RETTYPE system_profile_2(Process *p, Eterm profiler, Eterm list) {
error:
if (system_blocked) {
- erts_smp_release_system();
+ erts_smp_thr_progress_unblock();
erts_smp_proc_lock(p, ERTS_PROC_LOCK_MAIN);
}
diff --git a/erts/emulator/beam/erl_bits.c b/erts/emulator/beam/erl_bits.c
index 326a5c136b..6f7309f493 100644
--- a/erts/emulator/beam/erl_bits.c
+++ b/erts/emulator/beam/erl_bits.c
@@ -845,8 +845,7 @@ erts_bs_put_utf8(ERL_BITS_PROTO_1(Eterm arg))
dst[1] = 0x80 | (val & 0x3F);
num_bits = 16;
} else if (val < 0x10000UL) {
- if ((0xD800 <= val && val <= 0xDFFF) ||
- val == 0xFFFE || val == 0xFFFF) {
+ if (0xD800 <= val && val <= 0xDFFF) {
return 0;
}
dst[0] = 0xE0 | (val >> 12);
@@ -886,8 +885,7 @@ erts_bs_put_utf16(ERL_BITS_PROTO_2(Eterm arg, Uint flags))
return 0;
}
val = unsigned_val(arg);
- if (val > 0x10FFFF || (0xD800 <= val && val <= 0xDFFF) ||
- val == 0xFFFE || val == 0xFFFF) {
+ if (val > 0x10FFFF || (0xD800 <= val && val <= 0xDFFF)) {
return 0;
}
@@ -1652,8 +1650,7 @@ erts_bs_get_utf8(ErlBinMatchBuffer* mb)
return THE_NON_VALUE;
}
result = (((result << 6) + a) << 6) + b - (Eterm) 0x000E2080UL;
- if ((0xD800 <= result && result <= 0xDFFF) ||
- result == 0xFFFE || result == 0xFFFF) {
+ if (0xD800 <= result && result <= 0xDFFF) {
return THE_NON_VALUE;
}
mb->offset += 24;
@@ -1723,9 +1720,6 @@ erts_bs_get_utf16(ErlBinMatchBuffer* mb, Uint flags)
w1 = (src[0] << 8) | src[1];
}
if (w1 < 0xD800 || w1 > 0xDFFF) {
- if (w1 == 0xFFFE || w1 == 0xFFFF) {
- return THE_NON_VALUE;
- }
mb->offset += 16;
return make_small(w1);
} else if (w1 > 0xDBFF) {
diff --git a/erts/emulator/beam/erl_bits.h b/erts/emulator/beam/erl_bits.h
index 0f67733fa4..388d943755 100644
--- a/erts/emulator/beam/erl_bits.h
+++ b/erts/emulator/beam/erl_bits.h
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 1999-2010. All Rights Reserved.
+ * Copyright Ericsson AB 1999-2011. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
@@ -150,7 +150,7 @@ void erts_bits_destroy_state(ERL_BITS_PROTO_0);
* NBYTES(x) returns the number of bytes needed to store x bits.
*/
-#define NBYTES(x) (((x) + 7) >> 3)
+#define NBYTES(x) (((Uint64)(x) + (Uint64) 7) >> 3)
#define BYTE_OFFSET(ofs) ((Uint) (ofs) >> 3)
#define BIT_OFFSET(ofs) ((ofs) & 7)
diff --git a/erts/emulator/beam/erl_db.c b/erts/emulator/beam/erl_db.c
index 59ef8fc3ea..0079c13287 100644
--- a/erts/emulator/beam/erl_db.c
+++ b/erts/emulator/beam/erl_db.c
@@ -129,8 +129,6 @@ static Uint meta_main_tab_slot_mask; /* The slot index part of an unnamed tab
static Uint meta_main_tab_seq_incr;
static Uint meta_main_tab_seq_cnt = 0; /* To give unique(-ish) table identifiers */
-
-
/*
** The meta hash table of all NAMED ets tables
*/
@@ -202,12 +200,17 @@ static int free_table_cont(Process *p,
int first,
int clean_meta_tab);
static void print_table(int to, void *to_arg, int show, DbTable* tb);
-static BIF_RETTYPE ets_select_delete_1(Process *p, Eterm a1);
-static BIF_RETTYPE ets_select_count_1(Process *p, Eterm a1);
-static BIF_RETTYPE ets_select_trap_1(Process *p, Eterm a1);
-static BIF_RETTYPE ets_delete_trap(Process *p, Eterm a1);
+static BIF_RETTYPE ets_select_delete_1(BIF_ALIST_1);
+static BIF_RETTYPE ets_select_count_1(BIF_ALIST_1);
+static BIF_RETTYPE ets_select_trap_1(BIF_ALIST_1);
+static BIF_RETTYPE ets_delete_trap(BIF_ALIST_1);
static Eterm table_info(Process* p, DbTable* tb, Eterm What);
+static BIF_RETTYPE ets_select1(Process* p, Eterm arg1);
+static BIF_RETTYPE ets_select2(Process* p, Eterm arg1, Eterm arg2);
+static BIF_RETTYPE ets_select3(Process* p, Eterm arg1, Eterm arg2, Eterm arg3);
+
+
/*
* Exported global
*/
@@ -277,8 +280,7 @@ static void schedule_free_dbtable(DbTable* tb)
ASSERT(scheds >= 1);
ASSERT(erts_refc_read(&tb->common.ref, 0) == 0);
erts_refc_init(&tb->common.ref, scheds);
- ERTS_THR_MEMORY_BARRIER;
- erts_smp_schedule_misc_aux_work(0, scheds, chk_free_dbtable, tb);
+ erts_schedule_multi_misc_aux_work(0, scheds, chk_free_dbtable, tb);
#else
free_dbtable(tb);
#endif
@@ -1297,7 +1299,9 @@ BIF_RETTYPE ets_new_2(BIF_ALIST_2)
Uint32 status;
Sint keypos;
int is_named, is_fine_locked, frequent_read, is_compressed;
+#ifdef DEBUG
int cret;
+#endif
DeclareTmpHeap(meta_tuple,3,BIF_P);
DbTableMethod* meth;
erts_smp_rwmtx_t *mmtl;
@@ -1445,7 +1449,10 @@ BIF_RETTYPE ets_new_2(BIF_ALIST_2)
tb->common.fixations = NULL;
tb->common.compress = is_compressed;
- cret = meth->db_create(BIF_P, tb);
+#ifdef DEBUG
+ cret =
+#endif
+ meth->db_create(BIF_P, tb);
ASSERT(cret == DB_ERROR_NONE);
erts_smp_spin_lock(&meta_main_tab_main_lock);
@@ -1942,8 +1949,10 @@ BIF_RETTYPE ets_delete_object_2(BIF_ALIST_2)
/*
** This is for trapping, cannot be called directly.
*/
-static BIF_RETTYPE ets_select_delete_1(Process *p, Eterm a1)
+static BIF_RETTYPE ets_select_delete_1(BIF_ALIST_1)
{
+ Process *p = BIF_P;
+ Eterm a1 = BIF_ARG_1;
BIF_RETTYPE result;
DbTable* tb;
int cret;
@@ -2109,7 +2118,7 @@ BIF_RETTYPE ets_slot_2(BIF_ALIST_2)
BIF_RETTYPE ets_match_1(BIF_ALIST_1)
{
- return ets_select_1(BIF_P, BIF_ARG_1);
+ return ets_select1(BIF_P, BIF_ARG_1);
}
BIF_RETTYPE ets_match_2(BIF_ALIST_2)
@@ -2125,7 +2134,7 @@ BIF_RETTYPE ets_match_2(BIF_ALIST_2)
ms = TUPLE3(hp, BIF_ARG_2, NIL, ms);
hp += 4;
ms = CONS(hp, ms, NIL);
- res = ets_select_2(BIF_P, BIF_ARG_1, ms);
+ res = ets_select2(BIF_P, BIF_ARG_1, ms);
UnUseTmpHeap(8,BIF_P);
return res;
}
@@ -2143,7 +2152,7 @@ BIF_RETTYPE ets_match_3(BIF_ALIST_3)
ms = TUPLE3(hp, BIF_ARG_2, NIL, ms);
hp += 4;
ms = CONS(hp, ms, NIL);
- res = ets_select_3(BIF_P, BIF_ARG_1, ms, BIF_ARG_3);
+ res = ets_select3(BIF_P, BIF_ARG_1, ms, BIF_ARG_3);
UnUseTmpHeap(8,BIF_P);
return res;
}
@@ -2151,6 +2160,12 @@ BIF_RETTYPE ets_match_3(BIF_ALIST_3)
BIF_RETTYPE ets_select_3(BIF_ALIST_3)
{
+ return ets_select3(BIF_P, BIF_ARG_1, BIF_ARG_2, BIF_ARG_3);
+}
+
+static BIF_RETTYPE
+ets_select3(Process* p, Eterm arg1, Eterm arg2, Eterm arg3)
+{
BIF_RETTYPE result;
DbTable* tb;
int cret;
@@ -2161,22 +2176,22 @@ BIF_RETTYPE ets_select_3(BIF_ALIST_3)
CHECK_TABLES();
/* Chunk size strictly greater than 0 */
- if (is_not_small(BIF_ARG_3) || (chunk_size = signed_val(BIF_ARG_3)) <= 0) {
- BIF_ERROR(BIF_P, BADARG);
+ if (is_not_small(arg3) || (chunk_size = signed_val(arg3)) <= 0) {
+ BIF_ERROR(p, BADARG);
}
- if ((tb = db_get_table(BIF_P, BIF_ARG_1, DB_READ, LCK_READ)) == NULL) {
- BIF_ERROR(BIF_P, BADARG);
+ if ((tb = db_get_table(p, arg1, DB_READ, LCK_READ)) == NULL) {
+ BIF_ERROR(p, BADARG);
}
- safety = ITERATION_SAFETY(BIF_P,tb);
+ safety = ITERATION_SAFETY(p,tb);
if (safety == ITER_UNSAFE) {
local_fix_table(tb);
}
- cret = tb->common.meth->db_select_chunk(BIF_P, tb,
- BIF_ARG_2, chunk_size,
+ cret = tb->common.meth->db_select_chunk(p, tb,
+ arg2, chunk_size,
0 /* not reversed */,
&ret);
- if (DID_TRAP(BIF_P,ret) && safety != ITER_SAFE) {
- fix_table_locked(BIF_P, tb);
+ if (DID_TRAP(p,ret) && safety != ITER_SAFE) {
+ fix_table_locked(p, tb);
}
if (safety == ITER_UNSAFE) {
local_unfix_table(tb);
@@ -2188,22 +2203,24 @@ BIF_RETTYPE ets_select_3(BIF_ALIST_3)
ERTS_BIF_PREP_RET(result, ret);
break;
case DB_ERROR_SYSRES:
- ERTS_BIF_PREP_ERROR(result, BIF_P, SYSTEM_LIMIT);
+ ERTS_BIF_PREP_ERROR(result, p, SYSTEM_LIMIT);
break;
default:
- ERTS_BIF_PREP_ERROR(result, BIF_P, BADARG);
+ ERTS_BIF_PREP_ERROR(result, p, BADARG);
break;
}
- erts_match_set_release_result(BIF_P);
+ erts_match_set_release_result(p);
return result;
}
/* We get here instead of in the real BIF when trapping */
-static BIF_RETTYPE ets_select_trap_1(Process *p, Eterm a1)
+static BIF_RETTYPE ets_select_trap_1(BIF_ALIST_1)
{
+ Process *p = BIF_P;
+ Eterm a1 = BIF_ARG_1;
BIF_RETTYPE result;
DbTable* tb;
int cret;
@@ -2248,6 +2265,11 @@ static BIF_RETTYPE ets_select_trap_1(Process *p, Eterm a1)
BIF_RETTYPE ets_select_1(BIF_ALIST_1)
{
+ return ets_select1(BIF_P, BIF_ARG_1);
+}
+
+static BIF_RETTYPE ets_select1(Process *p, Eterm arg1)
+{
BIF_RETTYPE result;
DbTable* tb;
int cret;
@@ -2261,28 +2283,27 @@ BIF_RETTYPE ets_select_1(BIF_ALIST_1)
* Make sure that the table exists.
*/
- if (!is_tuple(BIF_ARG_1)) {
- if (BIF_ARG_1 == am_EOT) {
+ if (!is_tuple(arg1)) {
+ if (arg1 == am_EOT) {
BIF_RET(am_EOT);
}
- BIF_ERROR(BIF_P, BADARG);
+ BIF_ERROR(p, BADARG);
}
- tptr = tuple_val(BIF_ARG_1);
+ tptr = tuple_val(arg1);
if (arityval(*tptr) < 1 ||
- (tb = db_get_table(BIF_P, tptr[1], DB_READ, LCK_READ)) == NULL) {
- BIF_ERROR(BIF_P, BADARG);
+ (tb = db_get_table(p, tptr[1], DB_READ, LCK_READ)) == NULL) {
+ BIF_ERROR(p, BADARG);
}
- safety = ITERATION_SAFETY(BIF_P,tb);
+ safety = ITERATION_SAFETY(p,tb);
if (safety == ITER_UNSAFE) {
local_fix_table(tb);
}
- cret = tb->common.meth->db_select_continue(BIF_P,tb,
- BIF_ARG_1, &ret);
+ cret = tb->common.meth->db_select_continue(p,tb, arg1, &ret);
- if (DID_TRAP(BIF_P,ret) && safety != ITER_SAFE) {
- fix_table_locked(BIF_P, tb);
+ if (DID_TRAP(p,ret) && safety != ITER_SAFE) {
+ fix_table_locked(p, tb);
}
if (safety == ITER_UNSAFE) {
local_unfix_table(tb);
@@ -2294,20 +2315,26 @@ BIF_RETTYPE ets_select_1(BIF_ALIST_1)
ERTS_BIF_PREP_RET(result, ret);
break;
case DB_ERROR_SYSRES:
- ERTS_BIF_PREP_ERROR(result, BIF_P, SYSTEM_LIMIT);
+ ERTS_BIF_PREP_ERROR(result, p, SYSTEM_LIMIT);
break;
default:
- ERTS_BIF_PREP_ERROR(result, BIF_P, BADARG);
+ ERTS_BIF_PREP_ERROR(result, p, BADARG);
break;
}
- erts_match_set_release_result(BIF_P);
+ erts_match_set_release_result(p);
return result;
}
BIF_RETTYPE ets_select_2(BIF_ALIST_2)
{
+ return ets_select2(BIF_P, BIF_ARG_1, BIF_ARG_2);
+}
+
+static BIF_RETTYPE
+ets_select2(Process* p, Eterm arg1, Eterm arg2)
+{
BIF_RETTYPE result;
DbTable* tb;
int cret;
@@ -2320,19 +2347,19 @@ BIF_RETTYPE ets_select_2(BIF_ALIST_2)
* Make sure that the table exists.
*/
- if ((tb = db_get_table(BIF_P, BIF_ARG_1, DB_READ, LCK_READ)) == NULL) {
- BIF_ERROR(BIF_P, BADARG);
+ if ((tb = db_get_table(p, arg1, DB_READ, LCK_READ)) == NULL) {
+ BIF_ERROR(p, BADARG);
}
- safety = ITERATION_SAFETY(BIF_P,tb);
+ safety = ITERATION_SAFETY(p,tb);
if (safety == ITER_UNSAFE) {
local_fix_table(tb);
}
- cret = tb->common.meth->db_select(BIF_P, tb, BIF_ARG_2,
+ cret = tb->common.meth->db_select(p, tb, arg2,
0, &ret);
- if (DID_TRAP(BIF_P,ret) && safety != ITER_SAFE) {
- fix_table_locked(BIF_P, tb);
+ if (DID_TRAP(p,ret) && safety != ITER_SAFE) {
+ fix_table_locked(p, tb);
}
if (safety == ITER_UNSAFE) {
local_unfix_table(tb);
@@ -2344,21 +2371,23 @@ BIF_RETTYPE ets_select_2(BIF_ALIST_2)
ERTS_BIF_PREP_RET(result, ret);
break;
case DB_ERROR_SYSRES:
- ERTS_BIF_PREP_ERROR(result, BIF_P, SYSTEM_LIMIT);
+ ERTS_BIF_PREP_ERROR(result, p, SYSTEM_LIMIT);
break;
default:
- ERTS_BIF_PREP_ERROR(result, BIF_P, BADARG);
+ ERTS_BIF_PREP_ERROR(result, p, BADARG);
break;
}
- erts_match_set_release_result(BIF_P);
+ erts_match_set_release_result(p);
return result;
}
/* We get here instead of in the real BIF when trapping */
-static BIF_RETTYPE ets_select_count_1(Process *p, Eterm a1)
+static BIF_RETTYPE ets_select_count_1(BIF_ALIST_1)
{
+ Process *p = BIF_P;
+ Eterm a1 = BIF_ARG_1;
BIF_RETTYPE result;
DbTable* tb;
int cret;
@@ -2499,7 +2528,7 @@ BIF_RETTYPE ets_select_reverse_3(BIF_ALIST_3)
BIF_RETTYPE ets_select_reverse_1(BIF_ALIST_1)
{
- return ets_select_1(BIF_P, BIF_ARG_1);
+ return ets_select1(BIF_P, BIF_ARG_1);
}
BIF_RETTYPE ets_select_reverse_2(BIF_ALIST_2)
@@ -2553,7 +2582,7 @@ BIF_RETTYPE ets_select_reverse_2(BIF_ALIST_2)
*/
BIF_RETTYPE ets_match_object_1(BIF_ALIST_1)
{
- return ets_select_1(BIF_P, BIF_ARG_1);
+ return ets_select1(BIF_P, BIF_ARG_1);
}
BIF_RETTYPE ets_match_object_2(BIF_ALIST_2)
@@ -2569,7 +2598,7 @@ BIF_RETTYPE ets_match_object_2(BIF_ALIST_2)
ms = TUPLE3(hp, BIF_ARG_2, NIL, ms);
hp += 4;
ms = CONS(hp, ms, NIL);
- res = ets_select_2(BIF_P, BIF_ARG_1, ms);
+ res = ets_select2(BIF_P, BIF_ARG_1, ms);
UnUseTmpHeap(8,BIF_P);
return res;
}
@@ -2587,7 +2616,7 @@ BIF_RETTYPE ets_match_object_3(BIF_ALIST_3)
ms = TUPLE3(hp, BIF_ARG_2, NIL, ms);
hp += 4;
ms = CONS(hp, ms, NIL);
- res = ets_select_3(BIF_P, BIF_ARG_1, ms, BIF_ARG_3);
+ res = ets_select3(BIF_P, BIF_ARG_1, ms, BIF_ARG_3);
UnUseTmpHeap(8,BIF_P);
return res;
}
@@ -2606,7 +2635,9 @@ BIF_RETTYPE ets_info_1(BIF_ALIST_1)
int i;
Eterm* hp;
/*Process* rp = NULL;*/
+ /* If/when we implement lockless private tables:
Eterm owner;
+ */
if ((tb = db_get_table(BIF_P, BIF_ARG_1, DB_INFO, LCK_READ)) == NULL) {
if (is_atom(BIF_ARG_1) || is_small(BIF_ARG_1)) {
@@ -2615,7 +2646,9 @@ BIF_RETTYPE ets_info_1(BIF_ALIST_1)
BIF_ERROR(BIF_P, BADARG);
}
+ /* If/when we implement lockless private tables:
owner = tb->common.owner;
+ */
/* If/when we implement lockless private tables:
if ((tb->common.status & DB_PRIVATE) && owner != BIF_P->id) {
@@ -3521,8 +3554,10 @@ static void free_heir_data(DbTable* tb)
#endif
}
-static BIF_RETTYPE ets_delete_trap(Process *p, Eterm cont)
+static BIF_RETTYPE ets_delete_trap(BIF_ALIST_1)
{
+ Process *p = BIF_P;
+ Eterm cont = BIF_ARG_1;
int trap;
Eterm* ptr = big_val(cont);
DbTable *tb = *((DbTable **) (UWord) (ptr + 1));
@@ -3659,9 +3694,6 @@ static Eterm table_info(Process* p, DbTable* tb, Eterm What)
ret = am_true;
else
ret = am_false;
- } else if (What == am_atom_put("kept_objects",12)) {
- ret = make_small(IS_HASH_TABLE(tb->common.status)
- ? db_kept_items_hash(&tb->hash) : 0);
} else if (What == am_atom_put("safe_fixed",10)) {
#ifdef ERTS_SMP
erts_smp_mtx_lock(&tb->common.fixlock);
@@ -3703,7 +3735,7 @@ static Eterm table_info(Process* p, DbTable* tb, Eterm What)
Eterm* hp;
db_calc_stats_hash(&tb->hash, &stats);
- hp = HAlloc(p, 1 + 6 + FLOAT_SIZE_OBJECT*3);
+ hp = HAlloc(p, 1 + 7 + FLOAT_SIZE_OBJECT*3);
f.fd = stats.avg_chain_len;
avg = make_float(hp);
PUT_DOUBLE(f, hp);
@@ -3718,10 +3750,11 @@ static Eterm table_info(Process* p, DbTable* tb, Eterm What)
std_dev_exp = make_float(hp);
PUT_DOUBLE(f, hp);
hp += FLOAT_SIZE_OBJECT;
- ret = TUPLE6(hp, make_small(erts_smp_atomic_read_nob(&tb->hash.nactive)),
+ ret = TUPLE7(hp, make_small(erts_smp_atomic_read_nob(&tb->hash.nactive)),
avg, std_dev_real, std_dev_exp,
make_small(stats.min_chain_len),
- make_small(stats.max_chain_len));
+ make_small(stats.max_chain_len),
+ make_small(db_kept_items_hash(&tb->hash)));
}
else {
ret = am_false;
diff --git a/erts/emulator/beam/erl_db_hash.c b/erts/emulator/beam/erl_db_hash.c
index e3380a57b2..038a667b06 100644
--- a/erts/emulator/beam/erl_db_hash.c
+++ b/erts/emulator/beam/erl_db_hash.c
@@ -312,15 +312,24 @@ struct ext_segment {
struct segment* segtab[1]; /* The segment table */
};
#define SIZEOF_EXTSEG(NSEGS) \
- (sizeof(struct ext_segment) - sizeof(struct segment*) + sizeof(struct segment*)*(NSEGS))
+ (offsetof(struct ext_segment,segtab) + sizeof(struct segment*)*(NSEGS))
-#ifdef DEBUG
-# include <stddef.h> /* offsetof */
+#if defined(DEBUG) || defined(VALGRIND)
# define EXTSEG(SEGTAB_PTR) \
((struct ext_segment*) (((char*)(SEGTAB_PTR)) - offsetof(struct ext_segment,segtab)))
#endif
+static ERTS_INLINE void SET_SEGTAB(DbTableHash* tb,
+ struct segment** segtab)
+{
+ erts_smp_atomic_set_wb(&tb->segtab, (erts_aint_t) segtab);
+#ifdef VALGRIND
+ tb->top_ptr_to_segment_with_active_segtab = EXTSEG(segtab);
+#endif
+}
+
+
/* How the table segments relate to each other:
ext_segment: ext_segment: "plain" segment
@@ -649,7 +658,8 @@ int db_create_hash(Process *p, DbTable *tbl)
erts_smp_atomic_init_nob(&tb->szm, SEGSZ_MASK);
erts_smp_atomic_init_nob(&tb->nactive, SEGSZ);
erts_smp_atomic_init_nob(&tb->fixdel, (erts_aint_t)NULL);
- erts_smp_atomic_init_nob(&tb->segtab, (erts_aint_t) alloc_ext_seg(tb,0,NULL)->segtab);
+ erts_smp_atomic_init_nob(&tb->segtab, (erts_aint_t)NULL);
+ SET_SEGTAB(tb, alloc_ext_seg(tb,0,NULL)->segtab);
tb->nsegs = NSEG_1;
tb->nslots = SEGSZ;
@@ -2357,7 +2367,7 @@ static int alloc_seg(DbTableHash *tb)
struct ext_segment* eseg;
eseg = (struct ext_segment*) SEGTAB(tb)[seg_ix-1];
MY_ASSERT(eseg!=NULL && eseg->s.is_ext_segment);
- erts_smp_atomic_set_wb(&tb->segtab, (erts_aint_t) eseg->segtab);
+ SET_SEGTAB(tb, eseg->segtab);
tb->nsegs = eseg->nsegs;
}
ASSERT(seg_ix < tb->nsegs);
@@ -2429,7 +2439,7 @@ static int free_seg(DbTableHash *tb, int free_records)
MY_ASSERT(newtop->s.is_ext_segment);
if (newtop->prev_segtab != NULL) {
/* Time to use a smaller segtab */
- erts_smp_atomic_set_wb(&tb->segtab, (erts_aint_t)newtop->prev_segtab);
+ SET_SEGTAB(tb, newtop->prev_segtab);
tb->nsegs = seg_ix;
ASSERT(tb->nsegs == EXTSEG(SEGTAB(tb))->nsegs);
}
@@ -2446,7 +2456,7 @@ static int free_seg(DbTableHash *tb, int free_records)
if (seg_ix > 0) {
if (seg_ix < tb->nsegs) SEGTAB(tb)[seg_ix] = NULL;
} else {
- erts_smp_atomic_set_wb(&tb->segtab, (erts_aint_t)NULL);
+ SET_SEGTAB(tb, NULL);
}
#endif
tb->nslots -= SEGSZ;
diff --git a/erts/emulator/beam/erl_db_hash.h b/erts/emulator/beam/erl_db_hash.h
index e0285fa5ed..23ac493118 100644
--- a/erts/emulator/beam/erl_db_hash.h
+++ b/erts/emulator/beam/erl_db_hash.h
@@ -58,6 +58,9 @@ typedef struct db_table_hash {
#ifdef ERTS_SMP
DbTableHashFineLocks* locks;
#endif
+#ifdef VALGRIND
+ struct ext_segment* top_ptr_to_segment_with_active_segtab;
+#endif
} DbTableHash;
diff --git a/erts/emulator/beam/erl_db_tree.c b/erts/emulator/beam/erl_db_tree.c
index c6f0d80e32..312050b931 100644
--- a/erts/emulator/beam/erl_db_tree.c
+++ b/erts/emulator/beam/erl_db_tree.c
@@ -344,8 +344,8 @@ static int do_partly_bound_can_match_lesser(Eterm a, Eterm b,
int *done);
static int do_partly_bound_can_match_greater(Eterm a, Eterm b,
int *done);
-static BIF_RETTYPE ets_select_reverse(Process *p, Eterm a1,
- Eterm a2, Eterm a3);
+static BIF_RETTYPE ets_select_reverse(BIF_ALIST_3);
+
/* Method interface functions */
static int db_first_tree(Process *p, DbTable *tbl,
@@ -844,8 +844,12 @@ static int db_slot_tree(Process *p, DbTable *tbl,
-static BIF_RETTYPE ets_select_reverse(Process *p, Eterm a1, Eterm a2, Eterm a3)
+static BIF_RETTYPE ets_select_reverse(BIF_ALIST_3)
{
+ Process *p = BIF_P;
+ Eterm a1 = BIF_ARG_1;
+ Eterm a2 = BIF_ARG_2;
+ Eterm a3 = BIF_ARG_3;
Eterm list;
Eterm result;
Eterm* hp;
diff --git a/erts/emulator/beam/erl_db_util.c b/erts/emulator/beam/erl_db_util.c
index 7dfbb2ed02..4821a7d9fb 100644
--- a/erts/emulator/beam/erl_db_util.c
+++ b/erts/emulator/beam/erl_db_util.c
@@ -35,6 +35,7 @@
#include "bif.h"
#include "big.h"
#include "erl_binary.h"
+#include "erl_thr_progress.h"
#include "erl_db_util.h"
@@ -495,7 +496,7 @@ static erts_smp_atomic32_t trace_control_word;
/* This needs to be here, before the bif table... */
-static Eterm db_set_trace_control_word_fake_1(Process *p, Eterm val);
+static Eterm db_set_trace_control_word_fake_1(BIF_ALIST_1);
/*
** The table of callable bif's, i e guard bif's and
@@ -908,14 +909,18 @@ static void db_free_tmp_uncompressed(DbTerm* obj);
/*
** Pseudo BIF:s to be callable from the PAM VM.
*/
-
-BIF_RETTYPE db_get_trace_control_word_0(Process *p)
+BIF_RETTYPE db_get_trace_control_word(Process *p)
{
Uint32 tcw = (Uint32) erts_smp_atomic32_read_acqb(&trace_control_word);
BIF_RET(erts_make_integer((Uint) tcw, p));
}
-BIF_RETTYPE db_set_trace_control_word_1(Process *p, Eterm new)
+BIF_RETTYPE db_get_trace_control_word_0(BIF_ALIST_0)
+{
+ BIF_RET(db_get_trace_control_word(BIF_P));
+}
+
+BIF_RETTYPE db_set_trace_control_word(Process *p, Eterm new)
{
Uint val;
Uint32 old_tcw;
@@ -923,20 +928,27 @@ BIF_RETTYPE db_set_trace_control_word_1(Process *p, Eterm new)
BIF_ERROR(p, BADARG);
if (val != ((Uint32)val))
BIF_ERROR(p, BADARG);
-
+
old_tcw = (Uint32) erts_smp_atomic32_xchg_relb(&trace_control_word,
(erts_aint32_t) val);
BIF_RET(erts_make_integer((Uint) old_tcw, p));
}
-static Eterm db_set_trace_control_word_fake_1(Process *p, Eterm new)
+BIF_RETTYPE db_set_trace_control_word_1(BIF_ALIST_1)
{
+ BIF_RET(db_set_trace_control_word(BIF_P, BIF_ARG_1));
+}
+
+static Eterm db_set_trace_control_word_fake_1(BIF_ALIST_1)
+{
+ Process *p = BIF_P;
+ Eterm new = BIF_ARG_1;
Uint val;
if (!term_to_Uint(new, &val))
BIF_ERROR(p, BADARG);
if (val != ((Uint32)val))
BIF_ERROR(p, BADARG);
- BIF_RET(db_get_trace_control_word_0(p));
+ BIF_RET(db_get_trace_control_word(p));
}
/*
@@ -1704,6 +1716,7 @@ Eterm db_prog_match(Process *c_p, Binary *bprog,
Process *current_scheduled;
ErtsSchedulerData *esdp;
Eterm (*bif)(Process*, ...);
+ Eterm bif_args[3];
int fail_label;
int atomic_trace;
#if HALFWORD_HEAP
@@ -1734,14 +1747,14 @@ Eterm db_prog_match(Process *c_p, Binary *bprog,
if (! atomic_trace) { \
erts_refc_inc(&bprog->refc, 2); \
erts_smp_proc_unlock((p), ERTS_PROC_LOCK_MAIN); \
- erts_smp_block_system(0); \
+ erts_smp_thr_progress_block(); \
atomic_trace = !0; \
} \
} while (0)
#define END_ATOMIC_TRACE(p) \
do { \
if (atomic_trace) { \
- erts_smp_release_system(); \
+ erts_smp_thr_progress_unblock(); \
erts_smp_proc_lock((p), ERTS_PROC_LOCK_MAIN); \
if (erts_refc_dectest(&bprog->refc, 0) == 0) {\
erts_bin_free(bprog); \
@@ -1957,7 +1970,7 @@ restart:
break;
case matchCall0:
bif = (Eterm (*)(Process*, ...)) *pc++;
- t = (*bif)(build_proc);
+ t = (*bif)(build_proc, bif_args);
if (is_non_value(t)) {
if (do_catch)
t = FAIL_TERM;
@@ -1968,7 +1981,7 @@ restart:
break;
case matchCall1:
bif = (Eterm (*)(Process*, ...)) *pc++;
- t = (*bif)(build_proc, esp[-1]);
+ t = (*bif)(build_proc, esp-1);
if (is_non_value(t)) {
if (do_catch)
t = FAIL_TERM;
@@ -1979,7 +1992,9 @@ restart:
break;
case matchCall2:
bif = (Eterm (*)(Process*, ...)) *pc++;
- t = (*bif)(build_proc, esp[-1], esp[-2]);
+ bif_args[0] = esp[-1];
+ bif_args[1] = esp[-2];
+ t = (*bif)(build_proc, bif_args);
if (is_non_value(t)) {
if (do_catch)
t = FAIL_TERM;
@@ -1991,7 +2006,10 @@ restart:
break;
case matchCall3:
bif = (Eterm (*)(Process*, ...)) *pc++;
- t = (*bif)(build_proc, esp[-1], esp[-2], esp[-3]);
+ bif_args[0] = esp[-1];
+ bif_args[1] = esp[-2];
+ bif_args[2] = esp[-3];
+ t = (*bif)(build_proc, bif_args);
if (is_non_value(t)) {
if (do_catch)
t = FAIL_TERM;
@@ -2846,7 +2864,9 @@ void* db_store_term_comp(DbTableCommon *tb, DbTerm* old, Uint offset, Eterm obj)
Uint new_sz = offset + db_size_dbterm_comp(tb, obj);
byte* basep;
DbTerm* newp;
+#ifdef DEBUG
byte* top;
+#endif
ASSERT(tb->compress);
if (old != 0) {
@@ -2868,7 +2888,10 @@ void* db_store_term_comp(DbTableCommon *tb, DbTerm* old, Uint offset, Eterm obj)
}
newp->size = size_object(obj);
- top = copy_to_comp(tb, obj, newp, new_sz);
+#ifdef DEBUG
+ top =
+#endif
+ copy_to_comp(tb, obj, newp, new_sz);
ASSERT(top <= basep + new_sz);
/* ToDo: Maybe realloc if ((basep+new_sz) - top) > WASTED_SPACE_LIMIT */
@@ -4970,7 +4993,7 @@ static Eterm match_spec_test(Process *p, Eterm against, Eterm spec, int trace)
static Eterm seq_trace_fake(Process *p, Eterm arg1)
{
- Eterm result = seq_trace_info_1(p,arg1);
+ Eterm result = erl_seq_trace_info(p, arg1);
if (is_tuple(result) && *tuple_val(result) == 2) {
return (tuple_val(result))[2];
}
diff --git a/erts/emulator/beam/erl_db_util.h b/erts/emulator/beam/erl_db_util.h
index bb1751d309..6a96e174e1 100644
--- a/erts/emulator/beam/erl_db_util.h
+++ b/erts/emulator/beam/erl_db_util.h
@@ -326,8 +326,10 @@ ERTS_GLB_INLINE int db_eq(DbTableCommon* tb, Eterm a, DbTerm* b)
(T)->common.owner == (P)->id)
/* Function prototypes */
-Eterm db_get_trace_control_word_0(Process *p);
-Eterm db_set_trace_control_word_1(Process *p, Eterm val);
+BIF_RETTYPE db_get_trace_control_word(Process* p);
+BIF_RETTYPE db_set_trace_control_word(Process* p, Eterm tcw);
+BIF_RETTYPE db_get_trace_control_word_0(BIF_ALIST_0);
+BIF_RETTYPE db_set_trace_control_word_1(BIF_ALIST_1);
void db_initialize_util(void);
Eterm db_getkey(int keypos, Eterm obj);
diff --git a/erts/emulator/beam/erl_driver.h b/erts/emulator/beam/erl_driver.h
index 401967a8de..ae0c9def90 100644
--- a/erts/emulator/beam/erl_driver.h
+++ b/erts/emulator/beam/erl_driver.h
@@ -28,6 +28,14 @@
# include "config.h"
#endif
+#define ERL_DRV_DEPRECATED_FUNC
+#ifdef __GNUC__
+# if __GNUC__ >= 3
+# undef ERL_DRV_DEPRECATED_FUNC
+# define ERL_DRV_DEPRECATED_FUNC __attribute__((deprecated))
+# endif
+#endif
+
#ifdef SIZEOF_CHAR
# define SIZEOF_CHAR_SAVED__ SIZEOF_CHAR
# undef SIZEOF_CHAR
@@ -582,8 +590,11 @@ EXTERN long driver_async(ErlDrvPort ix,
void* async_data,
void (*async_free)(void*));
-
-EXTERN int driver_async_cancel(unsigned int key);
+/*
+ * driver_async_cancel() is deprecated. It is scheduled for removal
+ * in OTP-R16. For more information see the erl_driver(3) documentation.
+ */
+EXTERN int driver_async_cancel(unsigned int key) ERL_DRV_DEPRECATED_FUNC;
/* Locks the driver in the machine "forever", there is
no unlock function. Note that this is almost never useful, as an open
diff --git a/erts/emulator/beam/erl_drv_thread.c b/erts/emulator/beam/erl_drv_thread.c
index dc578f6d2a..a49a155701 100644
--- a/erts/emulator/beam/erl_drv_thread.c
+++ b/erts/emulator/beam/erl_drv_thread.c
@@ -158,7 +158,9 @@ erl_drv_mutex_create(char *name)
(sizeof(ErlDrvMutex)
+ (name ? sys_strlen(name) + 1 : 0)));
if (dmtx) {
- if (ethr_mutex_init(&dmtx->mtx) != 0) {
+ ethr_mutex_opt opt = ETHR_MUTEX_OPT_DEFAULT_INITER;
+ opt.posix_compliant = 1;
+ if (ethr_mutex_init_opt(&dmtx->mtx, &opt) != 0) {
erts_free(ERTS_ALC_T_DRV_MTX, (void *) dmtx);
dmtx = NULL;
}
@@ -226,7 +228,9 @@ erl_drv_cond_create(char *name)
(sizeof(ErlDrvCond)
+ (name ? sys_strlen(name) + 1 : 0)));
if (dcnd) {
- if (ethr_cond_init(&dcnd->cnd) != 0) {
+ ethr_cond_opt opt = ETHR_COND_OPT_DEFAULT_INITER;
+ opt.posix_compliant = 1;
+ if (ethr_cond_init_opt(&dcnd->cnd, &opt) != 0) {
erts_free(ERTS_ALC_T_DRV_CND, (void *) dcnd);
dcnd = NULL;
}
diff --git a/erts/emulator/beam/erl_gc.c b/erts/emulator/beam/erl_gc.c
index 5edcd667e7..eb2b945877 100644
--- a/erts/emulator/beam/erl_gc.c
+++ b/erts/emulator/beam/erl_gc.c
@@ -100,14 +100,14 @@ static Uint combined_message_size(Process* p);
static void remove_message_buffers(Process* p);
static int major_collection(Process* p, int need, Eterm* objv, int nobj, Uint *recl);
static int minor_collection(Process* p, int need, Eterm* objv, int nobj, Uint *recl);
-static void do_minor(Process *p, int new_sz, Eterm* objv, int nobj);
+static void do_minor(Process *p, Uint new_sz, Eterm* objv, int nobj);
static Eterm* sweep_rootset(Rootset *rootset, Eterm* htop, char* src, Uint src_size);
static Eterm* sweep_one_area(Eterm* n_hp, Eterm* n_htop, char* src, Uint src_size);
static Eterm* sweep_one_heap(Eterm* heap_ptr, Eterm* heap_end, Eterm* htop,
char* src, Uint src_size);
static Eterm* collect_heap_frags(Process* p, Eterm* heap,
Eterm* htop, Eterm* objv, int nobj);
-static Uint adjust_after_fullsweep(Process *p, int size_before,
+static Uint adjust_after_fullsweep(Process *p, Uint size_before,
int need, Eterm *objv, int nobj);
static void shrink_new_heap(Process *p, Uint new_sz, Eterm *objv, int nobj);
static void grow_new_heap(Process *p, Uint new_sz, Eterm* objv, int nobj);
@@ -315,7 +315,12 @@ erts_gc_after_bif_call(Process* p, Eterm result, Eterm* regs, Uint arity)
if (is_non_value(result)) {
if (p->freason == TRAP) {
- cost = erts_garbage_collect(p, 0, p->def_arg_reg, p->arity);
+ #if HIPE
+ if (regs == NULL) {
+ regs = ERTS_PROC_GET_SCHDATA(p)->x_reg_array;
+ }
+ #endif
+ cost = erts_garbage_collect(p, 0, regs, p->arity);
} else {
cost = erts_garbage_collect(p, 0, regs, arity);
}
@@ -357,8 +362,6 @@ erts_garbage_collect(Process* p, int need, Eterm* objv, int nobj)
}
erts_smp_proc_unlock(p, ERTS_PROC_LOCK_STATUS);
- erts_smp_locked_activity_begin(ERTS_ACTIVITY_GC);
-
ERTS_CHK_OFFHEAP(p);
ErtsGcQuickSanityCheck(p);
@@ -392,8 +395,6 @@ erts_garbage_collect(Process* p, int need, Eterm* objv, int nobj)
trace_gc(p, am_gc_end);
}
- erts_smp_locked_activity_end(ERTS_ACTIVITY_GC);
-
if (erts_system_monitor_long_gc != 0) {
Uint ms2, s2, us2;
Sint t;
@@ -441,7 +442,15 @@ erts_garbage_collect(Process* p, int need, Eterm* objv, int nobj)
p->last_old_htop = p->old_htop;
#endif
- return ((int) (HEAP_TOP(p) - HEAP_START(p)) / 10);
+ /* FIXME: This function should really return an Sint, i.e., a possibly
+ 64 bit wide signed integer, but that requires updating all the code
+ that calls it. For now, we just return INT_MAX if the result is too
+ large for an int. */
+ {
+ Sint result = (HEAP_TOP(p) - HEAP_START(p)) / 10;
+ if (result >= INT_MAX) return INT_MAX;
+ else return (int) result;
+ }
}
/*
@@ -469,7 +478,6 @@ erts_garbage_collect_hibernate(Process* p)
p->gcstatus = p->status;
p->status = P_GARBING;
erts_smp_proc_unlock(p, ERTS_PROC_LOCK_STATUS);
- erts_smp_locked_activity_begin(ERTS_ACTIVITY_GC);
ErtsGcQuickSanityCheck(p);
ASSERT(p->mbuf_sz == 0);
ASSERT(p->mbuf == 0);
@@ -583,12 +591,13 @@ erts_garbage_collect_hibernate(Process* p)
erts_smp_proc_lock(p, ERTS_PROC_LOCK_STATUS);
p->status = p->gcstatus;
erts_smp_proc_unlock(p, ERTS_PROC_LOCK_STATUS);
- erts_smp_locked_activity_end(ERTS_ACTIVITY_GC);
}
void
-erts_garbage_collect_literals(Process* p, Eterm* literals, Uint lit_size)
+erts_garbage_collect_literals(Process* p, Eterm* literals,
+ Uint lit_size,
+ struct erl_off_heap_header* oh)
{
Uint byte_lit_size = sizeof(Eterm)*lit_size;
Uint old_heap_size;
@@ -599,7 +608,8 @@ erts_garbage_collect_literals(Process* p, Eterm* literals, Uint lit_size)
char* area;
Uint area_size;
Eterm* old_htop;
- int n;
+ Uint n;
+ struct erl_off_heap_header** prev;
/*
* Set GC state.
@@ -608,7 +618,6 @@ erts_garbage_collect_literals(Process* p, Eterm* literals, Uint lit_size)
p->gcstatus = p->status;
p->status = P_GARBING;
erts_smp_proc_unlock(p, ERTS_PROC_LOCK_STATUS);
- erts_smp_locked_activity_begin(ERTS_ACTIVITY_GC);
/*
* We assume that the caller has already done a major collection
@@ -634,6 +643,9 @@ erts_garbage_collect_literals(Process* p, Eterm* literals, Uint lit_size)
offset_heap(temp_lit, lit_size, offs, (char *) literals, byte_lit_size);
offset_heap(p->heap, p->htop - p->heap, offs, (char *) literals, byte_lit_size);
offset_rootset(p, offs, (char *) literals, byte_lit_size, p->arg_reg, p->arity);
+ if (oh) {
+ oh = (struct erl_off_heap_header *) ((Eterm *)(void *) oh + offs);
+ }
/*
* Now the literals are placed in memory that is safe to write into,
@@ -701,6 +713,45 @@ erts_garbage_collect_literals(Process* p, Eterm* literals, Uint lit_size)
p->old_htop = old_htop;
/*
+ * Prepare to sweep binaries. Since all MSOs on the new heap
+ * must be come before MSOs on the old heap, find the end of
+ * current MSO list and use that as a starting point.
+ */
+
+ if (oh) {
+ prev = &MSO(p).first;
+ while (*prev) {
+ prev = &(*prev)->next;
+ }
+ }
+
+ /*
+ * Sweep through all binaries in the temporary literal area.
+ */
+
+ while (oh) {
+ if (IS_MOVED_BOXED(oh->thing_word)) {
+ Binary* bptr;
+ struct erl_off_heap_header* ptr;
+
+ ptr = (struct erl_off_heap_header*) boxed_val(oh->thing_word);
+ ASSERT(thing_subtag(ptr->thing_word) == REFC_BINARY_SUBTAG);
+ bptr = ((ProcBin*)ptr)->val;
+
+ /*
+ * This binary has been copied to the heap.
+ * We must increment its reference count and
+ * link it into the MSO list for the process.
+ */
+
+ erts_refc_inc(&bptr->refc, 1);
+ *prev = ptr;
+ prev = &ptr->next;
+ }
+ oh = oh->next;
+ }
+
+ /*
* We no longer need this temporary area.
*/
erts_free(ERTS_ALC_T_TMP, (void *) temp_lit);
@@ -711,7 +762,6 @@ erts_garbage_collect_literals(Process* p, Eterm* literals, Uint lit_size)
erts_smp_proc_lock(p, ERTS_PROC_LOCK_STATUS);
p->status = p->gcstatus;
erts_smp_proc_unlock(p, ERTS_PROC_LOCK_STATUS);
- erts_smp_locked_activity_end(ERTS_ACTIVITY_GC);
}
static int
@@ -731,7 +781,7 @@ minor_collection(Process* p, int need, Eterm* objv, int nobj, Uint *recl)
* This improved Estone by more than 1200 estones on my computer
* (Ultra Sparc 10).
*/
- size_t new_sz = erts_next_heap_size(HEAP_TOP(p) - HEAP_START(p), 1);
+ Uint new_sz = erts_next_heap_size(HEAP_TOP(p) - HEAP_START(p), 1);
/* Create new, empty old_heap */
n_old = (Eterm *) ERTS_HEAP_ALLOC(ERTS_ALC_T_OLD_HEAP,
@@ -871,12 +921,12 @@ minor_collection(Process* p, int need, Eterm* objv, int nobj, Uint *recl)
#endif /* HIPE */
static void
-do_minor(Process *p, int new_sz, Eterm* objv, int nobj)
+do_minor(Process *p, Uint new_sz, Eterm* objv, int nobj)
{
Rootset rootset; /* Rootset for GC (stack, dictionary, etc). */
Roots* roots;
Eterm* n_htop;
- int n;
+ Uint n;
Eterm* ptr;
Eterm val;
Eterm gval;
@@ -1079,14 +1129,14 @@ major_collection(Process* p, int need, Eterm* objv, int nobj, Uint *recl)
{
Rootset rootset;
Roots* roots;
- int size_before;
+ Uint size_before;
Eterm* n_heap;
Eterm* n_htop;
char* src = (char *) HEAP_START(p);
Uint src_size = (char *) HEAP_TOP(p) - src;
char* oh = (char *) OLD_HEAP(p);
Uint oh_size = (char *) OLD_HTOP(p) - oh;
- int n;
+ Uint n;
Uint new_sz;
Uint fragments = MBUF_SIZE(p) + combined_message_size(p);
ErlMessage *msgp;
@@ -1312,10 +1362,10 @@ major_collection(Process* p, int need, Eterm* objv, int nobj, Uint *recl)
}
static Uint
-adjust_after_fullsweep(Process *p, int size_before, int need, Eterm *objv, int nobj)
+adjust_after_fullsweep(Process *p, Uint size_before, int need, Eterm *objv, int nobj)
{
- int wanted, sz, size_after, need_after;
- int stack_size = STACK_SZ_ON_HEAP(p);
+ Uint wanted, sz, size_after, need_after;
+ Uint stack_size = STACK_SZ_ON_HEAP(p);
Uint reclaimed_now;
size_after = (HEAP_TOP(p) - HEAP_START(p));
@@ -1915,8 +1965,8 @@ static void
grow_new_heap(Process *p, Uint new_sz, Eterm* objv, int nobj)
{
Eterm* new_heap;
- int heap_size = HEAP_TOP(p) - HEAP_START(p);
- int stack_size = p->hend - p->stop;
+ Uint heap_size = HEAP_TOP(p) - HEAP_START(p);
+ Uint stack_size = p->hend - p->stop;
Sint offs;
ASSERT(HEAP_SIZE(p) < new_sz);
@@ -1954,10 +2004,10 @@ static void
shrink_new_heap(Process *p, Uint new_sz, Eterm *objv, int nobj)
{
Eterm* new_heap;
- int heap_size = HEAP_TOP(p) - HEAP_START(p);
+ Uint heap_size = HEAP_TOP(p) - HEAP_START(p);
Sint offs;
- int stack_size = p->hend - p->stop;
+ Uint stack_size = p->hend - p->stop;
ASSERT(new_sz < p->heap_sz);
sys_memmove(p->heap + new_sz - stack_size, p->stop, stack_size *
diff --git a/erts/emulator/beam/erl_goodfit_alloc.c b/erts/emulator/beam/erl_goodfit_alloc.c
index 1cc508ac5a..e7d4ac2b67 100644
--- a/erts/emulator/beam/erl_goodfit_alloc.c
+++ b/erts/emulator/beam/erl_goodfit_alloc.c
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2003-2010. All Rights Reserved.
+ * Copyright Ericsson AB 2003-2011. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
@@ -190,16 +190,20 @@ erts_gfalc_start(GFAllctr_t *gfallctr,
GFAllctrInit_t *gfinit,
AllctrInit_t *init)
{
- GFAllctr_t nulled_state = {{0}};
- /* {{0}} is used instead of {0}, in order to avoid (an incorrect) gcc
- warning. gcc warns if {0} is used as initializer of a struct when
- the first member is a struct (not if, for example, the third member
- is a struct). */
+ struct {
+ int dummy;
+ GFAllctr_t allctr;
+ } zero = {0};
+ /* The struct with a dummy element first is used in order to avoid (an
+ incorrect) gcc warning. gcc warns if {0} is used as initializer of
+ a struct when the first member is a struct (not if, for example,
+ the third member is a struct). */
+
Allctr_t *allctr = (Allctr_t *) gfallctr;
- init->sbmbct = 0; /* Small mbc not yet supported by goodfit */
+ sys_memcpy((void *) gfallctr, (void *) &zero.allctr, sizeof(GFAllctr_t));
- sys_memcpy((void *) gfallctr, (void *) &nulled_state, sizeof(GFAllctr_t));
+ init->sbmbct = 0; /* Small mbc not yet supported by goodfit */
allctr->mbc_header_size = sizeof(Carrier_t);
allctr->min_mbc_size = MIN_MBC_SZ;
diff --git a/erts/emulator/beam/erl_init.c b/erts/emulator/beam/erl_init.c
index 5f3f653e99..6c4ba2af68 100644
--- a/erts/emulator/beam/erl_init.c
+++ b/erts/emulator/beam/erl_init.c
@@ -42,6 +42,9 @@
#include "erl_misc_utils.h"
#include "packet_parser.h"
#include "erl_cpu_topology.h"
+#include "erl_thr_progress.h"
+#include "erl_thr_queue.h"
+#include "erl_async.h"
#ifdef HIPE
#include "hipe_mode_switch.h" /* for hipe_mode_switch_init() */
@@ -68,6 +71,8 @@ static void erl_init(int ncpu);
#define ERTS_MIN_COMPAT_REL 7
+static erts_atomic_t exiting;
+
#ifdef ERTS_SMP
erts_smp_atomic32_t erts_writing_erl_crash_dump;
erts_tsd_key_t erts_is_crash_dumping_key;
@@ -87,7 +92,6 @@ int erts_use_sender_punish;
*/
Uint display_items; /* no of items to display in traces etc */
-Uint display_loads; /* print info about loaded modules */
int H_MIN_SIZE; /* The minimum heap grain */
int BIN_VH_MIN_SIZE; /* The minimum binary virtual*/
@@ -99,8 +103,6 @@ int erts_backtrace_depth; /* How many functions to show in a backtrace
* in error codes.
*/
-int erts_async_max_threads; /* number of threads for async support */
-int erts_async_thread_suggested_stack_size;
erts_smp_atomic32_t erts_max_gen_gcs;
Eterm erts_error_logger_warnings; /* What to map warning logs to, am_error,
@@ -127,6 +129,8 @@ int erts_modified_timing_level;
int erts_no_crash_dump = 0; /* Use -d to suppress crash dump. */
+int erts_no_line_info = 0; /* -L: Don't load line information */
+
/*
* Other global variables.
*/
@@ -245,10 +249,6 @@ erl_init(int ncpu)
{
init_benchmarking();
-#ifdef ERTS_SMP
- erts_system_block_init();
-#endif
-
erts_init_monitors();
erts_init_gc();
erts_init_time();
@@ -258,6 +258,8 @@ erl_init(int ncpu)
no_schedulers,
no_schedulers_online);
erts_init_cpu_topology(); /* Must be after init_scheduling */
+ erts_alloc_late_init();
+
H_MIN_SIZE = erts_next_heap_size(H_MIN_SIZE, 0);
BIN_VH_MIN_SIZE = erts_next_heap_size(BIN_VH_MIN_SIZE, 0);
@@ -279,6 +281,7 @@ erl_init(int ncpu)
erts_init_node_tables();
init_dist();
erl_drv_thr_init();
+ erts_init_async();
init_io();
init_copy();
init_load();
@@ -433,7 +436,7 @@ static void
load_preloaded(void)
{
int i;
- int res;
+ Eterm res;
Preload* preload_p;
Eterm module_name;
byte* code;
@@ -452,8 +455,9 @@ load_preloaded(void)
name);
res = erts_load_module(NULL, 0, NIL, &module_name, code, length);
sys_preload_end(&preload_p[i]);
- if (res < 0)
- erl_exit(1,"Failed loading preloaded module %s\n", name);
+ if (res != NIL)
+ erl_exit(1,"Failed loading preloaded module %s (%T)\n",
+ name, res);
i++;
}
}
@@ -495,8 +499,6 @@ void erts_usage(void)
erts_fprintf(stderr, "-K boolean enable or disable kernel poll\n");
- erts_fprintf(stderr, "-l turn on auto load tracing\n");
-
erts_fprintf(stderr, "-M<X> <Y> memory allocator switches,\n");
erts_fprintf(stderr, " see the erts_alloc(3) documentation for more info.\n");
@@ -511,6 +513,8 @@ void erts_usage(void)
erts_fprintf(stderr, "-rg amount set reader groups limit\n");
erts_fprintf(stderr, "-sbt type set scheduler bind type, valid types are:\n");
erts_fprintf(stderr, " u|ns|ts|ps|s|nnts|nnps|tnnps|db\n");
+ erts_fprintf(stderr, "-scl bool enable/disable compaction of scheduler load,\n");
+ erts_fprintf(stderr, " see the erl(1) documentation for more info.\n");
erts_fprintf(stderr, "-sct cput set cpu topology,\n");
erts_fprintf(stderr, " see the erl(1) documentation for more info.\n");
erts_fprintf(stderr, "-swt val set scheduler wakeup threshold, valid values are:\n");
@@ -605,12 +609,14 @@ early_init(int *argc, char **argv) /*
int max_main_threads;
int max_reader_groups;
int reader_groups;
+ char envbuf[21]; /* enough for any 64-bit integer */
+ size_t envbufsz;
+ erts_sched_compact_load = 1;
use_multi_run_queue = 1;
erts_printf_eterm_func = erts_printf_term;
erts_disable_tolerant_timeofday = 0;
display_items = 200;
- display_loads = 0;
erts_backtrace_depth = DEFAULT_BACKTRACE_SIZE;
erts_async_max_threads = 0;
erts_async_thread_suggested_stack_size = ERTS_ASYNC_THREAD_MIN_STACK_SIZE;
@@ -642,6 +648,10 @@ early_init(int *argc, char **argv) /*
erts_use_r9_pids_ports = 0;
erts_sys_pre_init();
+ erts_atomic_init_nob(&exiting, 0);
+#ifdef ERTS_SMP
+ erts_thr_progress_pre_init();
+#endif
#ifdef ERTS_ENABLE_LOCK_CHECK
erts_lc_init();
@@ -673,6 +683,16 @@ early_init(int *argc, char **argv) /*
schdlrs = no_schedulers;
schdlrs_onln = no_schedulers_online;
+ envbufsz = sizeof(envbuf);
+
+ /* erts_sys_getenv() not initialized yet; need erts_sys_getenv__() */
+ if (erts_sys_getenv__("ERL_THREAD_POOL_SIZE", envbuf, &envbufsz) == 0)
+ erts_async_max_threads = atoi(envbuf);
+ else
+ erts_async_max_threads = 0;
+ if (erts_async_max_threads > ERTS_MAX_NO_OF_ASYNC_THREADS)
+ erts_async_max_threads = ERTS_MAX_NO_OF_ASYNC_THREADS;
+
if (argc && argv) {
int i = 1;
while (i < *argc) {
@@ -700,6 +720,20 @@ early_init(int *argc, char **argv) /*
}
break;
}
+ case 'A': {
+ /* set number of threads in thread pool */
+ char *arg = get_arg(argv[i]+2, argv[i+1], &i);
+ if (((erts_async_max_threads = atoi(arg)) < 0) ||
+ (erts_async_max_threads > ERTS_MAX_NO_OF_ASYNC_THREADS)) {
+ erts_fprintf(stderr,
+ "bad number of async threads %s\n",
+ arg);
+ erts_usage();
+ VERBOSE(DEBUG_SYSTEM, ("using %d async-threads\n",
+ erts_async_max_threads));
+ }
+ break;
+ }
case 'S' : {
int tot, onln;
char *arg = get_arg(argv[i]+2, argv[i+1], &i);
@@ -765,11 +799,29 @@ early_init(int *argc, char **argv) /*
erts_no_schedulers = (Uint) no_schedulers;
#endif
+ erts_early_init_scheduling(no_schedulers);
+ alloc_opts.ncpu = ncpu;
erts_alloc_init(argc, argv, &alloc_opts); /* Handles (and removes)
-M flags. */
/* Require allocators */
- erts_early_init_scheduling();
+#ifdef ERTS_SMP
+ /*
+ * Thread progress management:
+ *
+ * * Managed threads:
+ * ** Scheduler threads (see erl_process.c)
+ * ** Aux thread (see erl_process.c)
+ * ** Sys message dispatcher thread (see erl_trace.c)
+ *
+ * * Unmanaged threads that need to register:
+ * ** Async threads (see erl_async.c)
+ */
+ erts_thr_progress_init(no_schedulers,
+ no_schedulers+2,
+ erts_async_max_threads);
+#endif
+ erts_thr_q_init();
erts_init_utils();
erts_early_init_cpu_topology(no_schedulers,
&max_main_threads,
@@ -806,10 +858,12 @@ early_init(int *argc, char **argv) /*
#if defined(HIPE)
hipe_signal_init(); /* must be done very early */
#endif
- erl_sys_init();
erl_sys_args(argc, argv);
+ /* Creates threads on Windows that depend on the arguments, so has to be after erl_sys_args */
+ erl_sys_init();
+
erts_ets_realloc_always_moves = 0;
erts_ets_always_compress = 0;
erts_dist_buf_busy_limit = ERTS_DE_BUSY_LIMIT;
@@ -847,7 +901,6 @@ erl_start(int argc, char **argv)
int have_break_handler = 1;
char envbuf[21]; /* enough for any 64-bit integer */
size_t envbufsz;
- int async_max_threads = erts_async_max_threads;
int ncpu = early_init(&argc, argv);
envbufsz = sizeof(envbuf);
@@ -863,11 +916,6 @@ erl_start(int argc, char **argv)
(erts_aint32_t) max_gen_gcs);
}
- envbufsz = sizeof(envbuf);
- if (erts_sys_getenv("ERL_THREAD_POOL_SIZE", envbuf, &envbufsz) == 0) {
- async_max_threads = atoi(envbuf);
- }
-
#if (defined(__APPLE__) && defined(__MACH__)) || defined(__DARWIN__)
/*
* The default stack size on MacOS X is too small for pcre.
@@ -933,10 +981,9 @@ erl_start(int argc, char **argv)
erts_fprintf(stderr, "%s unknown flag %s\n", argv[0], argv[i]);
erts_usage();
}
- case 'l':
- display_loads++;
+ case 'L':
+ erts_no_line_info = 1;
break;
-
case 'v':
#ifdef DEBUG
if (argv[i][2] == '\0') {
@@ -1154,6 +1201,19 @@ erl_start(int argc, char **argv)
erts_usage();
}
}
+ else if (has_prefix("cl", sub_param)) {
+ arg = get_arg(sub_param+2, argv[i+1], &i);
+ if (sys_strcmp("true", arg) == 0)
+ erts_sched_compact_load = 1;
+ else if (sys_strcmp("false", arg) == 0)
+ erts_sched_compact_load = 0;
+ else {
+ erts_fprintf(stderr,
+ "bad scheduler compact load value '%s'\n",
+ arg);
+ erts_usage();
+ }
+ }
else if (has_prefix("ct", sub_param)) {
arg = get_arg(sub_param+2, argv[i+1], &i);
res = erts_init_cpu_topology_string(arg);
@@ -1295,17 +1355,8 @@ erl_start(int argc, char **argv)
break;
}
- case 'A':
- /* set number of threads in thread pool */
- arg = get_arg(argv[i]+2, argv[i+1], &i);
- if (((async_max_threads = atoi(arg)) < 0) ||
- (async_max_threads > ERTS_MAX_NO_OF_ASYNC_THREADS)) {
- erts_fprintf(stderr, "bad number of async threads %s\n", arg);
- erts_usage();
- }
-
- VERBOSE(DEBUG_SYSTEM, ("using %d async-threads\n",
- async_max_threads));
+ case 'A': /* Was handled in early init just read past it */
+ (void) get_arg(argv[i]+2, argv[i+1], &i);
break;
case 'a':
@@ -1394,10 +1445,6 @@ erl_start(int argc, char **argv)
i++;
}
-#ifdef USE_THREADS
- erts_async_max_threads = async_max_threads;
-#endif
-
/* Delayed check of +P flag */
if (erts_max_processes < ERTS_MIN_PROCESSES
|| erts_max_processes > ERTS_MAX_PROCESSES
@@ -1443,6 +1490,10 @@ erl_start(int argc, char **argv)
erts_sys_main_thread(); /* May or may not return! */
#else
erts_thr_set_main_status(1, 1);
+#if ERTS_USE_ASYNC_READY_Q
+ erts_get_scheduler_data()->aux_work_data.async_ready.queue
+ = erts_get_async_ready_queue(1);
+#endif
set_main_stack_size();
process_main();
#endif
@@ -1468,6 +1519,29 @@ __decl_noreturn void erts_thr_fatal_error(int err, char *what)
static void
system_cleanup(int exit_code)
{
+ /*
+ * Make sure only one thread exits the runtime system.
+ */
+ if (erts_atomic_inc_read_nob(&exiting) != 1) {
+ /*
+ * Another thread is currently exiting the system;
+ * wait for it to do its job.
+ */
+#ifdef ERTS_SMP
+ if (erts_thr_progress_is_managed_thread()) {
+ /*
+ * The exiting thread might be waiting for
+ * us to block; need to update status...
+ */
+ erts_thr_progress_active(NULL, 0);
+ erts_thr_progress_prepare_wait(NULL);
+ }
+#endif
+ /* Wait forever... */
+ while (1)
+ erts_milli_sleep(10000000);
+ }
+
/* No cleanup wanted if ...
* 1. we are about to do an abnormal exit
* 2. we haven't finished initializing, or
@@ -1487,7 +1561,6 @@ system_cleanup(int exit_code)
#ifdef ERTS_ENABLE_LOCK_CHECK
erts_lc_check_exact(NULL, 0);
#endif
- erts_smp_block_system(ERTS_BS_FLG_ALLOW_GC); /* We never release it... */
#endif
#ifdef HYBRID
@@ -1516,17 +1589,7 @@ system_cleanup(int exit_code)
erts_cleanup_incgc();
#endif
-#if defined(USE_THREADS)
- exit_async();
-#endif
-#if HAVE_ERTS_MSEG
- erts_mseg_exit();
-#endif
-
- /*
- * A lot more cleaning could/should have been done...
- */
-
+ erts_exit_flush_async();
}
/*
@@ -1543,10 +1606,10 @@ __decl_noreturn void erl_exit0(char *file, int line, int n, char *fmt,...)
va_start(args, fmt);
- save_statistics();
-
system_cleanup(n);
+ save_statistics();
+
an = abs(n);
if (erts_mtrace_enabled)
@@ -1583,10 +1646,10 @@ __decl_noreturn void erl_exit(int n, char *fmt,...)
va_start(args, fmt);
- save_statistics();
-
system_cleanup(n);
+ save_statistics();
+
an = abs(n);
if (erts_mtrace_enabled)
diff --git a/erts/emulator/beam/erl_instrument.c b/erts/emulator/beam/erl_instrument.c
index 04ea004ef7..963c8b3c58 100644
--- a/erts/emulator/beam/erl_instrument.c
+++ b/erts/emulator/beam/erl_instrument.c
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2003-2010. All Rights Reserved.
+ * Copyright Ericsson AB 2003-2011. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
diff --git a/erts/emulator/beam/erl_lock_check.c b/erts/emulator/beam/erl_lock_check.c
index 587d82f2bb..44da6b6c51 100644
--- a/erts/emulator/beam/erl_lock_check.c
+++ b/erts/emulator/beam/erl_lock_check.c
@@ -110,10 +110,6 @@ static erts_lc_lock_order_t erts_lock_order[] = {
{ "fun_tab", NULL },
{ "environ", NULL },
#endif
- { "asyncq", "address" },
-#ifndef ERTS_SMP
- { "async_ready", NULL },
-#endif
{ "efile_drv", "address" },
#if defined(ENABLE_CHILD_WAITER_THREAD) || defined(ERTS_SMP)
{ "child_status", NULL },
@@ -125,7 +121,7 @@ static erts_lc_lock_order_t erts_lock_order[] = {
{ "drv_ev_state", "address" },
{ "safe_hash", "address" },
{ "pollset_rm_list", NULL },
- { "removed_fd_pre_alloc_lock", NULL },
+ { "removed_fd_pre_alloc_lock", "address" },
{ "state_prealloc", NULL },
{ "schdlr_sspnd", NULL },
{ "run_queue", "address" },
@@ -138,6 +134,7 @@ static erts_lc_lock_order_t erts_lock_order[] = {
{ "alcu_init_atoms", NULL },
{ "mseg_init_atoms", NULL },
{ "drv_tsd", NULL },
+ { "async_enq_mtx", NULL },
#ifdef ERTS_SMP
{ "sys_msg_q", NULL },
{ "atom_tab", NULL },
@@ -151,10 +148,8 @@ static erts_lc_lock_order_t erts_lock_order[] = {
{ "mtrace_op", NULL },
{ "instr_x", NULL },
{ "instr", NULL },
- { "fix_alloc", "index" },
{ "alcu_allocator", "index" },
{ "sbmbc_alloc", "index" },
- { "alcu_delayed_free", "index" },
{ "mseg", NULL },
#if HALFWORD_HEAP
{ "pmmap", NULL },
@@ -175,15 +170,12 @@ static erts_lc_lock_order_t erts_lock_order[] = {
{ "timeofday", NULL },
{ "breakpoints", NULL },
{ "pollsets_lock", NULL },
- { "async_id", NULL },
{ "pix_lock", "address" },
{ "run_queues_lists", NULL },
- { "misc_aux_work_queue", "index" },
- { "misc_aux_work_pre_alloc_lock", "address" },
{ "sched_stat", NULL },
{ "run_queue_sleep_list", "address" },
#endif
- { "alloc_thr_ix_lock", NULL },
+ { "async_init_mtx", NULL },
#ifdef ERTS_SMP
{ "proc_lck_qs_alloc", NULL },
#endif
diff --git a/erts/emulator/beam/erl_message.c b/erts/emulator/beam/erl_message.c
index 82f272d28a..16be47d540 100644
--- a/erts/emulator/beam/erl_message.c
+++ b/erts/emulator/beam/erl_message.c
@@ -240,7 +240,7 @@ erts_msg_distext2heap(Process *pp,
Sint sz;
*bpp = NULL;
- sz = erts_decode_dist_ext_size(dist_extp, 0);
+ sz = erts_decode_dist_ext_size(dist_extp);
if (sz < 0)
goto decode_error;
if (is_not_nil(*tokenp)) {
@@ -713,7 +713,7 @@ erts_msg_attached_data_size_aux(ErlMessage *msg)
ASSERT(msg->data.dist_ext);
ASSERT(msg->data.dist_ext->heap_size < 0);
- sz = erts_decode_dist_ext_size(msg->data.dist_ext, 0);
+ sz = erts_decode_dist_ext_size(msg->data.dist_ext);
if (sz < 0) {
/* Bad external; remove it */
if (is_not_nil(ERL_MESSAGE_TOKEN(msg))) {
diff --git a/erts/emulator/beam/erl_monitors.c b/erts/emulator/beam/erl_monitors.c
index 47597f302b..1a84950120 100644
--- a/erts/emulator/beam/erl_monitors.c
+++ b/erts/emulator/beam/erl_monitors.c
@@ -948,8 +948,10 @@ static void erts_dump_links(ErtsLink *root, int indent)
erts_destroy_tmp_dsbuf(dsbufp);
}
-Eterm erts_debug_dump_monitors_1(Process *p, Eterm pid)
+Eterm erts_debug_dump_monitors_1(BIF_ALIST_1)
{
+ Process *p = BIF_P;
+ Eterm pid = BIF_ARG_1;
Process *rp;
DistEntry *dep;
rp = erts_pid2proc(p, ERTS_PROC_LOCK_MAIN, pid, ERTS_PROC_LOCK_LINK);
@@ -976,8 +978,10 @@ Eterm erts_debug_dump_monitors_1(Process *p, Eterm pid)
}
}
-Eterm erts_debug_dump_links_1(Process *p, Eterm pid)
+Eterm erts_debug_dump_links_1(BIF_ALIST_1)
{
+ Process *p = BIF_P;
+ Eterm pid = BIF_ARG_1;
Process *rp;
DistEntry *dep;
if (is_internal_port(pid)) {
diff --git a/erts/emulator/beam/erl_mtrace.c b/erts/emulator/beam/erl_mtrace.c
index b1478758a1..358c67bf20 100644
--- a/erts/emulator/beam/erl_mtrace.c
+++ b/erts/emulator/beam/erl_mtrace.c
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2003-2010. All Rights Reserved.
+ * Copyright Ericsson AB 2003-2011. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
@@ -503,12 +503,6 @@ write_trace_header(char *nodename, char *pid, char *hostname)
case ERTS_ALC_A_SYSTEM:
PUT_UI16(tracep, ERTS_MTRACE_SEGMENT_ID);
break;
- case ERTS_ALC_A_FIXED_SIZE:
- if (erts_allctrs_info[ERTS_FIX_CORE_ALLOCATOR].enabled)
- PUT_UI16(tracep, ERTS_FIX_CORE_ALLOCATOR);
- else
- PUT_UI16(tracep, ERTS_ALC_A_SYSTEM);
- break;
default:
PUT_UI16(tracep, ERTS_MTRACE_SEGMENT_ID);
break;
diff --git a/erts/emulator/beam/erl_nif.c b/erts/emulator/beam/erl_nif.c
index ea781a6cd0..62798bb2c1 100644
--- a/erts/emulator/beam/erl_nif.c
+++ b/erts/emulator/beam/erl_nif.c
@@ -32,6 +32,7 @@
#include "error.h"
#include "big.h"
#include "beam_bp.h"
+#include "erl_thr_progress.h"
#include <limits.h>
#include <stddef.h> /* offsetof */
@@ -130,10 +131,13 @@ static void pre_nif_noproc(ErlNifEnv* env, struct erl_module_nif* mod_nif)
env->tmp_obj_list = NULL;
}
-/* Temporary object header, auto-deallocated when NIF returns. */
+/* Temporary object header, auto-deallocated when NIF returns
+ * or when independent environment is cleared.
+ */
struct enif_tmp_obj_t {
struct enif_tmp_obj_t* next;
void (*dtor)(struct enif_tmp_obj_t*);
+ ErtsAlcType_t allocator;
/*char data[];*/
};
@@ -244,7 +248,7 @@ ErlNifEnv* enif_alloc_env(void)
msg_env->env.hp_end = phony_heap;
msg_env->env.heap_frag = NULL;
msg_env->env.mod_nif = NULL;
- msg_env->env.tmp_obj_list = (struct enif_tmp_obj_t*) 1; /* invalid non-NULL */
+ msg_env->env.tmp_obj_list = NULL;
msg_env->env.proc = &msg_env->phony_proc;
memset(&msg_env->phony_proc, 0, sizeof(Process));
HEAP_START(&msg_env->phony_proc) = phony_heap;
@@ -289,6 +293,7 @@ void enif_clear_env(ErlNifEnv* env)
menv->env.hp = menv->env.hp_end = HEAP_TOP(p);
ASSERT(!is_offheap(&MSO(p)));
+ free_tmp_objs(env);
}
int enif_send(ErlNifEnv* env, const ErlNifPid* to_pid,
ErlNifEnv* msg_env, ERL_NIF_TERM msg)
@@ -435,24 +440,36 @@ int enif_is_exception(ErlNifEnv* env, ERL_NIF_TERM term)
return term == THE_NON_VALUE;
}
+int enif_is_number(ErlNifEnv* env, ERL_NIF_TERM term)
+{
+ return is_number(term);
+}
+
+static ERTS_INLINE int is_proc_bound(ErlNifEnv* env)
+{
+ return env->mod_nif != NULL;
+}
+
static void aligned_binary_dtor(struct enif_tmp_obj_t* obj)
{
- erts_free_aligned_binary_bytes_extra((byte*)obj,ERTS_ALC_T_TMP);
+ erts_free_aligned_binary_bytes_extra((byte*)obj, obj->allocator);
}
int enif_inspect_binary(ErlNifEnv* env, Eterm bin_term, ErlNifBinary* bin)
{
+ ErtsAlcType_t allocator = is_proc_bound(env) ? ERTS_ALC_T_TMP : ERTS_ALC_T_NIF;
union {
struct enif_tmp_obj_t* tmp;
byte* raw_ptr;
}u;
u.tmp = NULL;
- bin->data = erts_get_aligned_binary_bytes_extra(bin_term, &u.raw_ptr, ERTS_ALC_T_TMP,
+ bin->data = erts_get_aligned_binary_bytes_extra(bin_term, &u.raw_ptr, allocator,
sizeof(struct enif_tmp_obj_t));
if (bin->data == NULL) {
return 0;
}
if (u.tmp != NULL) {
+ u.tmp->allocator = allocator;
u.tmp->next = env->tmp_obj_list;
u.tmp->dtor = &aligned_binary_dtor;
env->tmp_obj_list = u.tmp;
@@ -466,12 +483,13 @@ int enif_inspect_binary(ErlNifEnv* env, Eterm bin_term, ErlNifBinary* bin)
static void tmp_alloc_dtor(struct enif_tmp_obj_t* obj)
{
- erts_free(ERTS_ALC_T_TMP, obj);
+ erts_free(obj->allocator, obj);
}
int enif_inspect_iolist_as_binary(ErlNifEnv* env, Eterm term, ErlNifBinary* bin)
{
struct enif_tmp_obj_t* tobj;
+ ErtsAlcType_t allocator;
Uint sz;
if (is_binary(term)) {
return enif_inspect_binary(env,term,bin);
@@ -486,8 +504,10 @@ int enif_inspect_iolist_as_binary(ErlNifEnv* env, Eterm term, ErlNifBinary* bin)
if (erts_iolist_size(term, &sz)) {
return 0;
}
-
- tobj = erts_alloc(ERTS_ALC_T_TMP, sz + sizeof(struct enif_tmp_obj_t));
+
+ allocator = is_proc_bound(env) ? ERTS_ALC_T_TMP : ERTS_ALC_T_NIF;
+ tobj = erts_alloc(allocator, sz + sizeof(struct enif_tmp_obj_t));
+ tobj->allocator = allocator;
tobj->next = env->tmp_obj_list;
tobj->dtor = &tmp_alloc_dtor;
env->tmp_obj_list = tobj;
@@ -578,7 +598,15 @@ int enif_is_identical(Eterm lhs, Eterm rhs)
int enif_compare(Eterm lhs, Eterm rhs)
{
- return CMP(lhs,rhs);
+ Sint result = CMP(lhs,rhs);
+
+ if (result < 0) {
+ return -1;
+ } else if (result > 0) {
+ return 1;
+ }
+
+ return result;
}
int enif_get_tuple(ErlNifEnv* env, Eterm tpl, int* arity, const Eterm** array)
@@ -668,6 +696,7 @@ Eterm enif_make_sub_binary(ErlNifEnv* env, ERL_NIF_TERM bin_term,
ErlSubBin* sb;
Eterm orig;
Uint offset, bit_offset, bit_size;
+#ifdef DEBUG
unsigned src_size;
ASSERT(is_binary(bin_term));
@@ -675,6 +704,7 @@ Eterm enif_make_sub_binary(ErlNifEnv* env, ERL_NIF_TERM bin_term,
ASSERT(pos <= src_size);
ASSERT(size <= src_size);
ASSERT(pos + size <= src_size);
+#endif
sb = (ErlSubBin*) alloc_heap(env, ERL_SUB_BIN_SIZE);
ERTS_GET_REAL_BIN(bin_term, orig, offset, bit_offset, bit_size);
sb->thing_word = HEADER_SUB_BIN;
@@ -1175,7 +1205,7 @@ enif_open_resource_type(ErlNifEnv* env,
ErlNifResourceFlags op = flags;
Eterm module_am, name_am;
- ASSERT(erts_smp_is_system_blocked(0));
+ ASSERT(erts_smp_thr_progress_is_blocking());
ASSERT(module_str == NULL); /* for now... */
module_am = make_atom(env->mod_nif->mod->module);
name_am = enif_make_atom(env, name_str);
@@ -1469,7 +1499,7 @@ BIF_RETTYPE load_nif_2(BIF_ALIST_2)
/* Block system (is this the right place to do it?) */
erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
- erts_smp_block_system(0);
+ erts_smp_thr_progress_block();
/* Find calling module */
ASSERT(BIF_P->current != NULL);
@@ -1658,7 +1688,7 @@ BIF_RETTYPE load_nif_2(BIF_ALIST_2)
erts_sys_ddll_free_error(&errdesc);
}
- erts_smp_release_system();
+ erts_smp_thr_progress_unblock();
erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
erts_free(ERTS_ALC_T_TMP, lib_name);
BIF_RET(ret);
@@ -1670,7 +1700,7 @@ erts_unload_nif(struct erl_module_nif* lib)
{
ErlNifResourceType* rt;
ErlNifResourceType* next;
- ASSERT(erts_smp_is_system_blocked(0));
+ ASSERT(erts_smp_thr_progress_is_blocking());
ASSERT(lib != NULL);
ASSERT(lib->mod != NULL);
for (rt = resource_type_list.next;
@@ -1730,8 +1760,10 @@ struct readonly_check_t
};
static void add_readonly_check(ErlNifEnv* env, unsigned char* ptr, unsigned sz)
{
- struct readonly_check_t* obj = erts_alloc(ERTS_ALC_T_TMP,
+ ErtsAlcType_t allocator = is_proc_bound(env) ? ERTS_ALC_T_TMP : ERTS_ALC_T_NIF;
+ struct readonly_check_t* obj = erts_alloc(allocator,
sizeof(struct readonly_check_t));
+ obj->hdr.allocator = allocator;
obj->hdr.next = env->tmp_obj_list;
env->tmp_obj_list = &obj->hdr;
obj->hdr.dtor = &readonly_check_dtor;
@@ -1748,7 +1780,7 @@ static void readonly_check_dtor(struct enif_tmp_obj_t* o)
" %x != %x\r\nABORTING\r\n", chksum, obj->checksum);
abort();
}
- erts_free(ERTS_ALC_T_TMP, obj);
+ erts_free(obj->hdr.allocator, obj);
}
static unsigned calc_checksum(unsigned char* ptr, unsigned size)
{
diff --git a/erts/emulator/beam/erl_nif_api_funcs.h b/erts/emulator/beam/erl_nif_api_funcs.h
index 4af9f61000..6396af09d0 100644
--- a/erts/emulator/beam/erl_nif_api_funcs.h
+++ b/erts/emulator/beam/erl_nif_api_funcs.h
@@ -137,6 +137,7 @@ ERL_NIF_API_FUNC_DECL(ERL_NIF_TERM,enif_make_uint64,(ErlNifEnv*, ErlNifUInt64));
#endif
ERL_NIF_API_FUNC_DECL(int,enif_is_exception,(ErlNifEnv*, ERL_NIF_TERM term));
ERL_NIF_API_FUNC_DECL(int,enif_make_reverse_list,(ErlNifEnv*, ERL_NIF_TERM term, ERL_NIF_TERM *list));
+ERL_NIF_API_FUNC_DECL(int,enif_is_number,(ErlNifEnv*, ERL_NIF_TERM term));
/*
** Add new entries here to keep compatibility on Windows!!!
@@ -258,12 +259,206 @@ ERL_NIF_API_FUNC_DECL(int,enif_make_reverse_list,(ErlNifEnv*, ERL_NIF_TERM term,
# define enif_is_exception ERL_NIF_API_FUNC_MACRO(enif_is_exception)
# define enif_make_reverse_list ERL_NIF_API_FUNC_MACRO(enif_make_reverse_list)
+# define enif_is_number ERL_NIF_API_FUNC_MACRO(enif_is_number)
/*
** Add new entries here
*/
#endif
+
+#if defined(__GNUC__) && !(defined(__WIN32__) || defined(_WIN32) || defined(_WIN32_))
+
+/* Inline functions for compile time type checking of arguments to
+ variadic functions.
+*/
+
+# define ERL_NIF_INLINE __inline__
+
+static ERL_NIF_INLINE ERL_NIF_TERM enif_make_tuple1(ErlNifEnv* env,
+ ERL_NIF_TERM e1)
+{
+ return enif_make_tuple(env, 1, e1);
+}
+
+static ERL_NIF_INLINE ERL_NIF_TERM enif_make_tuple2(ErlNifEnv* env,
+ ERL_NIF_TERM e1,
+ ERL_NIF_TERM e2)
+{
+ return enif_make_tuple(env, 2, e1, e2);
+}
+
+static ERL_NIF_INLINE ERL_NIF_TERM enif_make_tuple3(ErlNifEnv* env,
+ ERL_NIF_TERM e1,
+ ERL_NIF_TERM e2,
+ ERL_NIF_TERM e3)
+{
+ return enif_make_tuple(env, 3, e1, e2, e3);
+}
+
+static ERL_NIF_INLINE ERL_NIF_TERM enif_make_tuple4(ErlNifEnv* env,
+ ERL_NIF_TERM e1,
+ ERL_NIF_TERM e2,
+ ERL_NIF_TERM e3,
+ ERL_NIF_TERM e4)
+{
+ return enif_make_tuple(env, 4, e1, e2, e3, e4);
+}
+
+static ERL_NIF_INLINE ERL_NIF_TERM enif_make_tuple5(ErlNifEnv* env,
+ ERL_NIF_TERM e1,
+ ERL_NIF_TERM e2,
+ ERL_NIF_TERM e3,
+ ERL_NIF_TERM e4,
+ ERL_NIF_TERM e5)
+{
+ return enif_make_tuple(env, 5, e1, e2, e3, e4, e5);
+}
+
+static ERL_NIF_INLINE ERL_NIF_TERM enif_make_tuple6(ErlNifEnv* env,
+ ERL_NIF_TERM e1,
+ ERL_NIF_TERM e2,
+ ERL_NIF_TERM e3,
+ ERL_NIF_TERM e4,
+ ERL_NIF_TERM e5,
+ ERL_NIF_TERM e6)
+{
+ return enif_make_tuple(env, 6, e1, e2, e3, e4, e5, e6);
+}
+
+static ERL_NIF_INLINE ERL_NIF_TERM enif_make_tuple7(ErlNifEnv* env,
+ ERL_NIF_TERM e1,
+ ERL_NIF_TERM e2,
+ ERL_NIF_TERM e3,
+ ERL_NIF_TERM e4,
+ ERL_NIF_TERM e5,
+ ERL_NIF_TERM e6,
+ ERL_NIF_TERM e7)
+{
+ return enif_make_tuple(env, 7, e1, e2, e3, e4, e5, e6, e7);
+}
+
+static ERL_NIF_INLINE ERL_NIF_TERM enif_make_tuple8(ErlNifEnv* env,
+ ERL_NIF_TERM e1,
+ ERL_NIF_TERM e2,
+ ERL_NIF_TERM e3,
+ ERL_NIF_TERM e4,
+ ERL_NIF_TERM e5,
+ ERL_NIF_TERM e6,
+ ERL_NIF_TERM e7,
+ ERL_NIF_TERM e8)
+{
+ return enif_make_tuple(env, 8, e1, e2, e3, e4, e5, e6, e7, e8);
+}
+
+static ERL_NIF_INLINE ERL_NIF_TERM enif_make_tuple9(ErlNifEnv* env,
+ ERL_NIF_TERM e1,
+ ERL_NIF_TERM e2,
+ ERL_NIF_TERM e3,
+ ERL_NIF_TERM e4,
+ ERL_NIF_TERM e5,
+ ERL_NIF_TERM e6,
+ ERL_NIF_TERM e7,
+ ERL_NIF_TERM e8,
+ ERL_NIF_TERM e9)
+{
+ return enif_make_tuple(env, 9, e1, e2, e3, e4, e5, e6, e7, e8, e9);
+}
+
+static ERL_NIF_INLINE ERL_NIF_TERM enif_make_list1(ErlNifEnv* env,
+ ERL_NIF_TERM e1)
+{
+ return enif_make_list(env, 1, e1);
+}
+
+static ERL_NIF_INLINE ERL_NIF_TERM enif_make_list2(ErlNifEnv* env,
+ ERL_NIF_TERM e1,
+ ERL_NIF_TERM e2)
+{
+ return enif_make_list(env, 2, e1, e2);
+}
+
+static ERL_NIF_INLINE ERL_NIF_TERM enif_make_list3(ErlNifEnv* env,
+ ERL_NIF_TERM e1,
+ ERL_NIF_TERM e2,
+ ERL_NIF_TERM e3)
+{
+ return enif_make_list(env, 3, e1, e2, e3);
+}
+
+static ERL_NIF_INLINE ERL_NIF_TERM enif_make_list4(ErlNifEnv* env,
+ ERL_NIF_TERM e1,
+ ERL_NIF_TERM e2,
+ ERL_NIF_TERM e3,
+ ERL_NIF_TERM e4)
+{
+ return enif_make_list(env, 4, e1, e2, e3, e4);
+}
+
+static ERL_NIF_INLINE ERL_NIF_TERM enif_make_list5(ErlNifEnv* env,
+ ERL_NIF_TERM e1,
+ ERL_NIF_TERM e2,
+ ERL_NIF_TERM e3,
+ ERL_NIF_TERM e4,
+ ERL_NIF_TERM e5)
+{
+ return enif_make_list(env, 5, e1, e2, e3, e4, e5);
+}
+
+static ERL_NIF_INLINE ERL_NIF_TERM enif_make_list6(ErlNifEnv* env,
+ ERL_NIF_TERM e1,
+ ERL_NIF_TERM e2,
+ ERL_NIF_TERM e3,
+ ERL_NIF_TERM e4,
+ ERL_NIF_TERM e5,
+ ERL_NIF_TERM e6)
+{
+ return enif_make_list(env, 6, e1, e2, e3, e4, e5, e6);
+}
+
+static ERL_NIF_INLINE ERL_NIF_TERM enif_make_list7(ErlNifEnv* env,
+ ERL_NIF_TERM e1,
+ ERL_NIF_TERM e2,
+ ERL_NIF_TERM e3,
+ ERL_NIF_TERM e4,
+ ERL_NIF_TERM e5,
+ ERL_NIF_TERM e6,
+ ERL_NIF_TERM e7)
+{
+ return enif_make_list(env, 7, e1, e2, e3, e4, e5, e6, e7);
+}
+
+static ERL_NIF_INLINE ERL_NIF_TERM enif_make_list8(ErlNifEnv* env,
+ ERL_NIF_TERM e1,
+ ERL_NIF_TERM e2,
+ ERL_NIF_TERM e3,
+ ERL_NIF_TERM e4,
+ ERL_NIF_TERM e5,
+ ERL_NIF_TERM e6,
+ ERL_NIF_TERM e7,
+ ERL_NIF_TERM e8)
+{
+ return enif_make_list(env, 8, e1, e2, e3, e4, e5, e6, e7, e8);
+}
+
+static ERL_NIF_INLINE ERL_NIF_TERM enif_make_list9(ErlNifEnv* env,
+ ERL_NIF_TERM e1,
+ ERL_NIF_TERM e2,
+ ERL_NIF_TERM e3,
+ ERL_NIF_TERM e4,
+ ERL_NIF_TERM e5,
+ ERL_NIF_TERM e6,
+ ERL_NIF_TERM e7,
+ ERL_NIF_TERM e8,
+ ERL_NIF_TERM e9)
+{
+ return enif_make_list(env, 9, e1, e2, e3, e4, e5, e6, e7, e8, e9);
+}
+
+# undef ERL_NIF_INLINE
+
+#else /* fallback with macros */
+
#ifndef enif_make_list1
# define enif_make_list1(ENV,E1) enif_make_list(ENV,1,E1)
# define enif_make_list2(ENV,E1,E2) enif_make_list(ENV,2,E1,E2)
@@ -283,6 +478,11 @@ ERL_NIF_API_FUNC_DECL(int,enif_make_reverse_list,(ErlNifEnv*, ERL_NIF_TERM term,
# define enif_make_tuple7(ENV,E1,E2,E3,E4,E5,E6,E7) enif_make_tuple(ENV,7,E1,E2,E3,E4,E5,E6,E7)
# define enif_make_tuple8(ENV,E1,E2,E3,E4,E5,E6,E7,E8) enif_make_tuple(ENV,8,E1,E2,E3,E4,E5,E6,E7,E8)
# define enif_make_tuple9(ENV,E1,E2,E3,E4,E5,E6,E7,E8,E9) enif_make_tuple(ENV,9,E1,E2,E3,E4,E5,E6,E7,E8,E9)
+#endif
+
+#endif /* __GNUC__ && !WIN32 */
+
+#ifndef enif_make_pid
# define enif_make_pid(ENV, PID) ((const ERL_NIF_TERM)((PID)->pid))
diff --git a/erts/emulator/beam/erl_node_tables.c b/erts/emulator/beam/erl_node_tables.c
index af3873995e..908ba755ed 100644
--- a/erts/emulator/beam/erl_node_tables.c
+++ b/erts/emulator/beam/erl_node_tables.c
@@ -26,6 +26,7 @@
#include "dist.h"
#include "big.h"
#include "error.h"
+#include "erl_thr_progress.h"
Hash erts_dist_table;
Hash erts_node_table;
@@ -907,7 +908,7 @@ erts_get_node_and_dist_references(struct process *proc)
#endif
erts_smp_proc_unlock(proc, ERTS_PROC_LOCK_MAIN);
- erts_smp_block_system(0);
+ erts_smp_thr_progress_block();
/* No need to lock any thing since we are alone... */
if (references_atoms_need_init) {
@@ -951,7 +952,7 @@ erts_get_node_and_dist_references(struct process *proc)
delete_reference_table();
- erts_smp_release_system();
+ erts_smp_thr_progress_unblock();
erts_smp_proc_lock(proc, ERTS_PROC_LOCK_MAIN);
return res;
}
diff --git a/erts/emulator/beam/erl_port_task.c b/erts/emulator/beam/erl_port_task.c
index 6aa5161b08..87b8e5131b 100644
--- a/erts/emulator/beam/erl_port_task.c
+++ b/erts/emulator/beam/erl_port_task.c
@@ -711,23 +711,6 @@ typedef struct {
int *resp;
} ErtsPortTaskExeBlockData;
-static void
-prepare_for_block(void *vd)
-{
- ErtsPortTaskExeBlockData *d = (ErtsPortTaskExeBlockData *) vd;
- erts_smp_runq_unlock(d->runq);
-}
-
-static void
-resume_after_block(void *vd)
-{
- ErtsPortTaskExeBlockData *d = (ErtsPortTaskExeBlockData *) vd;
- erts_smp_runq_lock(d->runq);
- if (d->resp)
- *d->resp = (erts_smp_atomic_read_nob(&erts_port_task_outstanding_io_tasks)
- != (erts_aint_t) 0);
-}
-
/*
* Run all scheduled tasks for the first port in run queue. If
* new tasks appear while running reschedule port (free task is
@@ -752,11 +735,6 @@ erts_port_task_execute(ErtsRunQueue *runq, Port **curr_port_pp)
ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(runq));
- erts_smp_activity_begin(ERTS_ACTIVITY_IO,
- prepare_for_block,
- resume_after_block,
- (void *) &blk_data);
-
ERTS_PT_CHK_PORTQ(runq);
pp = pop_port(runq);
@@ -988,10 +966,6 @@ erts_port_task_execute(ErtsRunQueue *runq, Port **curr_port_pp)
done:
blk_data.resp = &res;
- erts_smp_activity_end(ERTS_ACTIVITY_IO,
- prepare_for_block,
- resume_after_block,
- (void *) &blk_data);
ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(runq));
diff --git a/erts/emulator/beam/erl_process.c b/erts/emulator/beam/erl_process.c
index bbdcf79d00..a1f5069b2d 100644
--- a/erts/emulator/beam/erl_process.c
+++ b/erts/emulator/beam/erl_process.c
@@ -39,6 +39,9 @@
#include "erl_binary.h"
#include "beam_bp.h"
#include "erl_cpu_topology.h"
+#include "erl_thr_progress.h"
+#include "erl_thr_queue.h"
+#include "erl_async.h"
#define ERTS_RUNQ_CHECK_BALANCE_REDS_PER_SCHED (2000*CONTEXT_REDS)
#define ERTS_RUNQ_CALL_CHECK_BALANCE_REDS \
@@ -111,6 +114,7 @@ static Sint p_serial;
static Uint p_serial_mask;
static Uint p_serial_shift;
+int erts_sched_compact_load;
Uint erts_no_schedulers;
Uint erts_max_processes = ERTS_DEFAULT_MAX_PROCESSES;
Uint erts_process_tab_index_mask;
@@ -124,9 +128,10 @@ ErtsLcPSDLocks erts_psd_required_locks[ERTS_PSD_SIZE];
#endif
#ifdef ERTS_SMP
-
int erts_disable_proc_not_running_opt;
+static ErtsAuxWorkData *aux_thread_aux_work_data;
+
#define ERTS_SCHDLR_SSPND_CHNG_WAITER (((erts_aint32_t) 1) << 0)
#define ERTS_SCHDLR_SSPND_CHNG_MSB (((erts_aint32_t) 1) << 1)
#define ERTS_SCHDLR_SSPND_CHNG_ONLN (((erts_aint32_t) 1) << 2)
@@ -213,8 +218,6 @@ Uint erts_no_run_queues;
ErtsAlignedSchedulerData *erts_aligned_scheduler_data;
-#ifdef ERTS_SMP
-
typedef union {
ErtsSchedulerSleepInfo ssi;
char align[ERTS_ALC_CACHE_LINE_ALIGN_SIZE(sizeof(ErtsSchedulerSleepInfo))];
@@ -222,8 +225,6 @@ typedef union {
static ErtsAlignedSchedulerSleepInfo *aligned_sched_sleep_info;
-#endif
-
#ifndef BM_COUNTERS
static int processes_busy;
#endif
@@ -285,8 +286,9 @@ ERTS_SCHED_PREF_QUICK_ALLOC_IMPL(proclist,
ERTS_ALC_T_PROC_LIST)
#define ERTS_SCHED_SLEEP_INFO_IX(IX) \
- (ASSERT_EXPR(0 <= (IX) && (IX) < erts_no_schedulers), \
- &aligned_sched_sleep_info[(IX)].ssi)
+ (ASSERT_EXPR(-1 <= ((int) (IX)) \
+ && ((int) (IX)) < ((int) erts_no_schedulers)), \
+ &aligned_sched_sleep_info[(IX)].ssi)
#define ERTS_FOREACH_RUNQ(RQVAR, DO) \
do { \
@@ -339,6 +341,66 @@ static void exec_misc_ops(ErtsRunQueue *);
static void print_function_from_pc(int to, void *to_arg, BeamInstr* x);
static int stack_element_dump(int to, void *to_arg, Process* p, Eterm* sp,
int yreg);
+
+static void aux_work_timeout(void *unused);
+static void aux_work_timeout_early_init(int no_schedulers);
+static void aux_work_timeout_late_init(void);
+static void setup_aux_work_timer(void);
+
+#if defined(DEBUG) || 0
+#define ERTS_DBG_CHK_AUX_WORK_VAL(V) dbg_chk_aux_work_val((V))
+static void
+dbg_chk_aux_work_val(erts_aint32_t value)
+{
+ erts_aint32_t valid = 0;
+
+#ifdef ERTS_SSI_AUX_WORK_SET_TMO
+ valid |= ERTS_SSI_AUX_WORK_SET_TMO;
+#endif
+#ifdef ERTS_SSI_AUX_WORK_CHECK_CHILDREN
+ valid |= ERTS_SSI_AUX_WORK_CHECK_CHILDREN;
+#endif
+#ifdef ERTS_SSI_AUX_WORK_MISC
+ valid |= ERTS_SSI_AUX_WORK_MISC;
+#endif
+#ifdef ERTS_SSI_AUX_WORK_MISC_THR_PRGR
+ valid |= ERTS_SSI_AUX_WORK_MISC_THR_PRGR;
+#endif
+#ifdef ERTS_SSI_AUX_WORK_ASYNC_READY
+ valid |= ERTS_SSI_AUX_WORK_ASYNC_READY;
+#endif
+#ifdef ERTS_SSI_AUX_WORK_ASYNC_READY_CLEAN
+ valid |= ERTS_SSI_AUX_WORK_ASYNC_READY_CLEAN;
+#endif
+
+#ifdef ERTS_SSI_AUX_WORK_FIX_ALLOC_LOWER_LIM
+ valid |= ERTS_SSI_AUX_WORK_FIX_ALLOC_LOWER_LIM;
+#endif
+#ifdef ERTS_SSI_AUX_WORK_FIX_ALLOC_DEALLOC
+ valid |= ERTS_SSI_AUX_WORK_FIX_ALLOC_DEALLOC;
+#endif
+#ifdef ERTS_SSI_AUX_WORK_DD
+ valid |= ERTS_SSI_AUX_WORK_DD;
+#endif
+#ifdef ERTS_SSI_AUX_WORK_DD
+ valid |= ERTS_SSI_AUX_WORK_DD_THR_PRGR;
+#endif
+#ifdef ERTS_SSI_AUX_WORK_MSEG_CACHE_CHECK
+ valid |= ERTS_SSI_AUX_WORK_MSEG_CACHE_CHECK;
+#endif
+
+ if (~valid & value)
+ erl_exit(ERTS_ABORT_EXIT,
+ "Invalid aux_work value found: 0x%x\n",
+ ~valid & value);
+}
+#define ERTS_DBG_CHK_SSI_AUX_WORK(SSI) \
+ ERTS_DBG_CHK_AUX_WORK_VAL(erts_atomic32_read_nob(&(SSI)->aux_work))
+#else
+#define ERTS_DBG_CHK_AUX_WORK_VAL(V)
+#define ERTS_DBG_CHK_SSI_AUX_WORK(SSI)
+#endif
+
#ifdef ERTS_SMP
static void handle_pending_exiters(ErtsProcList *);
@@ -577,6 +639,13 @@ erts_sched_finish_poke(ErtsSchedulerSleepInfo *ssi, erts_aint32_t flags)
case ERTS_SSI_FLG_POLL_SLEEPING:
erts_sys_schedule_interrupt(1);
break;
+ case ERTS_SSI_FLG_POLL_SLEEPING|ERTS_SSI_FLG_TSE_SLEEPING:
+ /*
+ * Thread progress blocking while poll sleeping; need
+ * to signal on both...
+ */
+ erts_sys_schedule_interrupt(1);
+ /* fall through */
case ERTS_SSI_FLG_TSE_SLEEPING:
erts_tse_set(ssi->event);
break;
@@ -589,189 +658,712 @@ erts_sched_finish_poke(ErtsSchedulerSleepInfo *ssi, erts_aint32_t flags)
}
}
+#endif
+
+static ERTS_INLINE void
+set_aux_work_flags_wakeup_nob(ErtsSchedulerSleepInfo *ssi,
+ erts_aint32_t flgs)
+{
+ erts_aint32_t old_flgs;
+
+ ERTS_DBG_CHK_SSI_AUX_WORK(ssi);
+
+ old_flgs = erts_atomic32_read_nob(&ssi->aux_work);
+ if ((old_flgs & flgs) == 0) {
+
+ old_flgs = erts_atomic32_read_bor_nob(&ssi->aux_work, flgs);
+
+ if ((old_flgs & flgs) == 0) {
+#ifdef ERTS_SMP
+ erts_sched_poke(ssi);
+#else
+ erts_sys_schedule_interrupt(1);
+#endif
+ }
+ }
+}
+
+#if 0 /* Currently not used */
+
+static ERTS_INLINE void
+set_aux_work_flags_wakeup_relb(ErtsSchedulerSleepInfo *ssi,
+ erts_aint32_t flgs)
+{
+ erts_aint32_t old_flgs;
+
+ ERTS_DBG_CHK_SSI_AUX_WORK(ssi);
+
+ old_flgs = erts_atomic32_read_bor_relb(&ssi->aux_work, flgs);
+
+ if ((old_flgs & flgs) == 0) {
+#ifdef ERTS_SMP
+ erts_sched_poke(ssi);
+#else
+ erts_sys_schedule_interrupt(1);
+#endif
+ }
+}
+
+#endif
+
+static ERTS_INLINE erts_aint32_t
+set_aux_work_flags(ErtsSchedulerSleepInfo *ssi, erts_aint32_t flgs)
+{
+ return erts_atomic32_read_bor_nob(&ssi->aux_work, flgs);
+}
+
+static ERTS_INLINE erts_aint32_t
+unset_aux_work_flags(ErtsSchedulerSleepInfo *ssi, erts_aint32_t flgs)
+{
+ return erts_atomic32_read_band_nob(&ssi->aux_work, ~flgs);
+}
+
typedef struct erts_misc_aux_work_t_ erts_misc_aux_work_t;
struct erts_misc_aux_work_t_ {
- erts_misc_aux_work_t *next;
void (*func)(void *);
void *arg;
};
-typedef struct {
- erts_smp_mtx_t mtx;
- erts_misc_aux_work_t *first;
- erts_misc_aux_work_t *last;
-} erts_misc_aux_work_q_t;
+ERTS_SCHED_PREF_QUICK_ALLOC_IMPL(misc_aux_work,
+ erts_misc_aux_work_t,
+ 200,
+ ERTS_ALC_T_MISC_AUX_WORK)
typedef union {
- erts_misc_aux_work_q_t data;
- char align[ERTS_ALC_CACHE_LINE_ALIGN_SIZE(sizeof(erts_misc_aux_work_q_t))];
+ ErtsThrQ_t q;
+ char align[ERTS_ALC_CACHE_LINE_ALIGN_SIZE(sizeof(ErtsThrQ_t))];
} erts_algnd_misc_aux_work_q_t;
static erts_algnd_misc_aux_work_q_t *misc_aux_work_queues;
-ERTS_SCHED_PREF_QUICK_ALLOC_IMPL(misc_aux_work,
- erts_misc_aux_work_t,
- 200,
- ERTS_ALC_T_MISC_AUX_WORK)
+static void
+notify_aux_work(void *vssi)
+{
+ set_aux_work_flags_wakeup_nob((ErtsSchedulerSleepInfo *) vssi,
+ ERTS_SSI_AUX_WORK_MISC);
+}
static void
init_misc_aux_work(void)
{
int ix;
+ ErtsThrQInit_t qinit = ERTS_THR_Q_INIT_DEFAULT;
+ qinit.notify = notify_aux_work;
init_misc_aux_work_alloc();
misc_aux_work_queues =
erts_alloc_permanent_cache_aligned(ERTS_ALC_T_MISC_AUX_WORK_Q,
- erts_no_schedulers *
- sizeof(erts_algnd_misc_aux_work_q_t));
+ sizeof(erts_algnd_misc_aux_work_q_t)
+ * (erts_no_schedulers+1));
+
+#ifdef ERTS_SMP
+ ix = 0; /* aux_thread + schedulers */
+#else
+ ix = 1; /* scheduler only */
+#endif
- for (ix = 0; ix < erts_no_schedulers; ix++) {
- erts_smp_mtx_init_x(&misc_aux_work_queues[ix].data.mtx,
- "misc_aux_work_queue",
- make_small(ix + 1));
- misc_aux_work_queues[ix].data.first = NULL;
- misc_aux_work_queues[ix].data.last = NULL;
+ for (; ix <= erts_no_schedulers; ix++) {
+ qinit.arg = (void *) ERTS_SCHED_SLEEP_INFO_IX(ix-1);
+ erts_thr_q_initialize(&misc_aux_work_queues[ix].q, &qinit);
}
}
-static void
-handle_misc_aux_work(ErtsSchedulerData *esdp)
-{
- int ix = (int) esdp->no - 1;
- erts_misc_aux_work_t *mawp;
+static erts_aint32_t
+misc_aux_work_clean(ErtsThrQ_t *q,
+ ErtsAuxWorkData *awdp,
+ erts_aint32_t aux_work)
+{
+ switch (erts_thr_q_clean(q)) {
+ case ERTS_THR_Q_DIRTY:
+ set_aux_work_flags(awdp->ssi, ERTS_SSI_AUX_WORK_MISC);
+ return aux_work | ERTS_SSI_AUX_WORK_MISC;
+#ifdef ERTS_SMP
+ case ERTS_THR_Q_NEED_THR_PRGR:
+ set_aux_work_flags(awdp->ssi, ERTS_SSI_AUX_WORK_MISC_THR_PRGR);
+ erts_thr_progress_wakeup(awdp->esdp,
+ erts_thr_q_need_thr_progress(q));
+#endif
+ case ERTS_THR_Q_CLEAN:
+ break;
+ }
+ return aux_work;
+}
- erts_smp_mtx_lock(&misc_aux_work_queues[ix].data.mtx);
- mawp = misc_aux_work_queues[ix].data.first;
- misc_aux_work_queues[ix].data.first = NULL;
- misc_aux_work_queues[ix].data.last = NULL;
- erts_smp_mtx_unlock(&misc_aux_work_queues[ix].data.mtx);
+static erts_aint32_t
+handle_misc_aux_work(ErtsAuxWorkData *awdp,
+ erts_aint32_t aux_work)
+{
+ ErtsThrQ_t *q = &misc_aux_work_queues[awdp->sched_id].q;
- while (mawp) {
- erts_misc_aux_work_t *free_mawp;
+ unset_aux_work_flags(awdp->ssi, ERTS_SSI_AUX_WORK_MISC);
+ while (1) {
+ erts_misc_aux_work_t *mawp = erts_thr_q_dequeue(q);
+ if (!mawp)
+ break;
mawp->func(mawp->arg);
- free_mawp = mawp;
- mawp = mawp->next;
- misc_aux_work_free(free_mawp);
+ misc_aux_work_free(mawp);
}
+
+ return misc_aux_work_clean(q, awdp, aux_work & ~ERTS_SSI_AUX_WORK_MISC);
+}
+
+#ifdef ERTS_SMP
+
+static erts_aint32_t
+handle_misc_aux_work_thr_prgr(ErtsAuxWorkData *awdp,
+ erts_aint32_t aux_work)
+{
+ if (!erts_thr_progress_has_reached(awdp->misc.thr_prgr))
+ return aux_work;
+
+ unset_aux_work_flags(awdp->ssi, ERTS_SSI_AUX_WORK_MISC_THR_PRGR);
+
+ return misc_aux_work_clean(&misc_aux_work_queues[awdp->sched_id].q,
+ awdp,
+ aux_work & ~ERTS_SSI_AUX_WORK_MISC_THR_PRGR);
+}
+
+#endif
+
+static ERTS_INLINE void
+schedule_misc_aux_work(int sched_id,
+ void (*func)(void *),
+ void *arg)
+{
+ ErtsThrQ_t *q;
+ erts_misc_aux_work_t *mawp;
+
+#ifdef ERTS_SMP
+ ASSERT(0 <= sched_id && sched_id <= erts_no_schedulers);
+#else
+ ASSERT(sched_id == 1);
+#endif
+
+ q = &misc_aux_work_queues[sched_id].q;
+ mawp = misc_aux_work_alloc();
+ mawp->func = func;
+ mawp->arg = arg;
+ erts_thr_q_enqueue(q, mawp);
+}
+
+void
+erts_schedule_misc_aux_work(int sched_id,
+ void (*func)(void *),
+ void *arg)
+{
+ schedule_misc_aux_work(sched_id, func, arg);
}
void
-erts_smp_schedule_misc_aux_work(int ignore_self,
- int max_sched,
- void (*func)(void *),
- void *arg)
+erts_schedule_multi_misc_aux_work(int ignore_self,
+ int max_sched,
+ void (*func)(void *),
+ void *arg)
{
- int ix, ignore_ix = -1;
+ int id, self = 0;
if (ignore_self) {
ErtsSchedulerData *esdp = erts_get_scheduler_data();
if (esdp)
- ignore_ix = (int) esdp->no - 1;
+ self = (int) esdp->no;
}
- ASSERT(0 <= max_sched && max_sched <= erts_no_schedulers);
+ ASSERT(0 < max_sched && max_sched <= erts_no_schedulers);
- for (ix = 0; ix < max_sched; ix++) {
- erts_aint32_t aux_work;
- erts_misc_aux_work_t *mawp;
- ErtsSchedulerSleepInfo *ssi;
- if (ix == ignore_ix)
+ for (id = 1; id <= max_sched; id++) {
+ if (id == self)
continue;
+ schedule_misc_aux_work(id, func, arg);
+ }
+}
- mawp = misc_aux_work_alloc();
+#if ERTS_USE_ASYNC_READY_Q
- mawp->func = func;
- mawp->arg = arg;
- mawp->next = NULL;
+void
+erts_notify_check_async_ready_queue(void *vno)
+{
+ int ix = ((int) (SWord) vno) -1;
+ set_aux_work_flags_wakeup_nob(ERTS_SCHED_SLEEP_INFO_IX(ix),
+ ERTS_SSI_AUX_WORK_ASYNC_READY);
+}
- erts_smp_mtx_lock(&misc_aux_work_queues[ix].data.mtx);
- if (!misc_aux_work_queues[ix].data.last)
- misc_aux_work_queues[ix].data.first = mawp;
- else
- misc_aux_work_queues[ix].data.last->next = mawp;
- misc_aux_work_queues[ix].data.last = mawp;
- erts_smp_mtx_unlock(&misc_aux_work_queues[ix].data.mtx);
-
- ssi = ERTS_SCHED_SLEEP_INFO_IX(ix);
- aux_work = erts_smp_atomic32_read_bor_nob(&ssi->aux_work,
- ERTS_SSI_AUX_WORK_MISC);
- if ((aux_work & ERTS_SSI_AUX_WORK_MISC) == 0)
- erts_sched_poke(ssi);
- }
+static erts_aint32_t
+handle_async_ready(ErtsAuxWorkData *awdp,
+ erts_aint32_t aux_work)
+{
+ ErtsSchedulerSleepInfo *ssi = awdp->ssi;
+ unset_aux_work_flags(ssi, ERTS_SSI_AUX_WORK_ASYNC_READY);
+ if (erts_check_async_ready(awdp->async_ready.queue)) {
+ if (set_aux_work_flags(ssi, ERTS_SSI_AUX_WORK_ASYNC_READY)
+ & ERTS_SSI_AUX_WORK_ASYNC_READY_CLEAN) {
+ unset_aux_work_flags(ssi, ERTS_SSI_AUX_WORK_ASYNC_READY_CLEAN);
+ aux_work &= ~ERTS_SSI_AUX_WORK_ASYNC_READY_CLEAN;
+ }
+ return aux_work;
+ }
+#ifdef ERTS_SMP
+ awdp->async_ready.need_thr_prgr = 0;
+#endif
+ set_aux_work_flags(ssi, ERTS_SSI_AUX_WORK_ASYNC_READY_CLEAN);
+ return ((aux_work & ~ERTS_SSI_AUX_WORK_ASYNC_READY)
+ | ERTS_SSI_AUX_WORK_ASYNC_READY_CLEAN);
}
+static erts_aint32_t
+handle_async_ready_clean(ErtsAuxWorkData *awdp,
+ erts_aint32_t aux_work)
+{
+ void *thr_prgr_p;
+
+#ifdef ERTS_SMP
+ if (awdp->async_ready.need_thr_prgr
+ && !erts_thr_progress_has_reached(awdp->misc.thr_prgr)) {
+ return aux_work & ~ERTS_SSI_AUX_WORK_ASYNC_READY_CLEAN;
+ }
+
+ awdp->async_ready.need_thr_prgr = 0;
+ thr_prgr_p = (void *) &awdp->async_ready.thr_prgr;
+#else
+ thr_prgr_p = NULL;
+#endif
+
+ switch (erts_async_ready_clean(awdp->async_ready.queue, thr_prgr_p)) {
+ case ERTS_ASYNC_READY_CLEAN:
+ unset_aux_work_flags(awdp->ssi, ERTS_SSI_AUX_WORK_ASYNC_READY_CLEAN);
+ return aux_work & ~ERTS_SSI_AUX_WORK_ASYNC_READY_CLEAN;
+#ifdef ERTS_SMP
+ case ERTS_ASYNC_READY_NEED_THR_PRGR:
+ erts_thr_progress_wakeup(awdp->esdp,
+ awdp->async_ready.thr_prgr);
+ awdp->async_ready.need_thr_prgr = 1;
+#endif
+ default:
+ return aux_work;
+ }
+}
+
+#endif
+
+static erts_aint32_t
+handle_fix_alloc(ErtsAuxWorkData *awdp, erts_aint32_t aux_work)
+{
+ ErtsSchedulerSleepInfo *ssi = awdp->ssi;
+ erts_aint32_t res;
+
+ unset_aux_work_flags(ssi, (ERTS_SSI_AUX_WORK_FIX_ALLOC_LOWER_LIM
+ | ERTS_SSI_AUX_WORK_FIX_ALLOC_DEALLOC));
+ aux_work &= ~(ERTS_SSI_AUX_WORK_FIX_ALLOC_LOWER_LIM
+ | ERTS_SSI_AUX_WORK_FIX_ALLOC_DEALLOC);
+ res = erts_alloc_fix_alloc_shrink(awdp->sched_id, aux_work);
+ if (res) {
+ set_aux_work_flags(ssi, res);
+ aux_work |= res;
+ }
+
+ return aux_work;
+}
+
+#ifdef ERTS_SMP
+
+void
+erts_alloc_notify_delayed_dealloc(int ix)
+{
+ set_aux_work_flags_wakeup_nob(ERTS_SCHED_SLEEP_INFO_IX(ix-1),
+ ERTS_SSI_AUX_WORK_DD);
+}
+
+static erts_aint32_t
+handle_delayed_dealloc(ErtsAuxWorkData *awdp, erts_aint32_t aux_work)
+{
+ ErtsSchedulerSleepInfo *ssi = awdp->ssi;
+ int need_thr_progress = 0;
+ int more_work = 0;
+
+ unset_aux_work_flags(ssi, ERTS_SSI_AUX_WORK_DD);
+ erts_alloc_scheduler_handle_delayed_dealloc((void *) awdp->esdp,
+ &need_thr_progress,
+ &more_work);
+ if (more_work) {
+ if (set_aux_work_flags(ssi, ERTS_SSI_AUX_WORK_DD)
+ & ERTS_SSI_AUX_WORK_DD_THR_PRGR) {
+ unset_aux_work_flags(ssi, ERTS_SSI_AUX_WORK_DD_THR_PRGR);
+ aux_work &= ~ERTS_SSI_AUX_WORK_DD_THR_PRGR;
+ }
+ return aux_work;
+ }
+
+ if (need_thr_progress) {
+ set_aux_work_flags(ssi, ERTS_SSI_AUX_WORK_DD_THR_PRGR);
+ awdp->dd.thr_prgr = erts_thr_progress_later();
+ erts_thr_progress_wakeup(awdp->esdp, awdp->dd.thr_prgr);
+ }
+ else if (awdp->dd.completed_callback) {
+ awdp->dd.completed_callback(awdp->dd.completed_arg);
+ awdp->dd.completed_callback = NULL;
+ awdp->dd.completed_arg = NULL;
+ }
+ return aux_work & ~ERTS_SSI_AUX_WORK_DD;
+}
+
+static erts_aint32_t
+handle_delayed_dealloc_thr_prgr(ErtsAuxWorkData *awdp, erts_aint32_t aux_work)
+{
+ ErtsSchedulerSleepInfo *ssi;
+ int need_thr_progress;
+ int more_work;
+
+ if (!erts_thr_progress_has_reached(awdp->dd.thr_prgr))
+ return aux_work & ~ERTS_SSI_AUX_WORK_DD_THR_PRGR;
+
+ ssi = awdp->ssi;
+ need_thr_progress = 0;
+ more_work = 0;
+
+ erts_alloc_scheduler_handle_delayed_dealloc((void *) awdp->esdp,
+ &need_thr_progress,
+ &more_work);
+ if (more_work) {
+ set_aux_work_flags(ssi, ERTS_SSI_AUX_WORK_DD);
+ unset_aux_work_flags(ssi, ERTS_SSI_AUX_WORK_DD_THR_PRGR);
+ return ((aux_work & ~ERTS_SSI_AUX_WORK_DD_THR_PRGR)
+ | ERTS_SSI_AUX_WORK_DD);
+ }
+
+ if (need_thr_progress) {
+ awdp->dd.thr_prgr = erts_thr_progress_later();
+ erts_thr_progress_wakeup(awdp->esdp, awdp->dd.thr_prgr);
+ }
+ else {
+ unset_aux_work_flags(ssi, ERTS_SSI_AUX_WORK_DD_THR_PRGR);
+ if (awdp->dd.completed_callback) {
+ awdp->dd.completed_callback(awdp->dd.completed_arg);
+ awdp->dd.completed_callback = NULL;
+ awdp->dd.completed_arg = NULL;
+ }
+ }
+
+ return aux_work & ~ERTS_SSI_AUX_WORK_DD_THR_PRGR;
+}
+
+static erts_atomic32_t completed_dealloc_count;
+
+static void
+completed_dealloc(void *vproc)
+{
+ if (erts_atomic32_dec_read_mb(&completed_dealloc_count) == 0) {
+ erts_resume((Process *) vproc, (ErtsProcLocks) 0);
+ erts_smp_proc_dec_refc((Process *) vproc);
+ }
+}
+
+static void
+setup_completed_dealloc(void *vproc)
+{
+ ErtsSchedulerData *esdp = erts_get_scheduler_data();
+ ErtsAuxWorkData *awdp = (esdp
+ ? &esdp->aux_work_data
+ : aux_thread_aux_work_data);
+ erts_alloc_fix_alloc_shrink(awdp->sched_id, 0);
+ set_aux_work_flags_wakeup_nob(awdp->ssi, ERTS_SSI_AUX_WORK_DD);
+ awdp->dd.completed_callback = completed_dealloc;
+ awdp->dd.completed_arg = vproc;
+}
+
+static void
+prep_setup_completed_dealloc(void *vproc)
+{
+ erts_aint32_t count = (erts_aint32_t) (erts_no_schedulers+1);
+ if (erts_atomic32_dec_read_mb(&completed_dealloc_count) == count) {
+ /* scheduler threads */
+ erts_schedule_multi_misc_aux_work(0,
+ erts_no_schedulers,
+ setup_completed_dealloc,
+ vproc);
+ /* aux_thread */
+ erts_schedule_misc_aux_work(0,
+ setup_completed_dealloc,
+ vproc);
+ }
+}
+
+#endif /* ERTS_SMP */
+
+int
+erts_debug_wait_deallocations(Process *c_p)
+{
+#ifndef ERTS_SMP
+ erts_alloc_fix_alloc_shrink(1, 0);
+ return 1;
+#else
+ /* Only one process at a time can do this */
+ erts_aint32_t count = (erts_aint32_t) (2*(erts_no_schedulers+1));
+ if (0 == erts_atomic32_cmpxchg_mb(&completed_dealloc_count,
+ count,
+ 0)) {
+ erts_suspend(c_p, ERTS_PROC_LOCK_MAIN, NULL);
+ erts_smp_proc_inc_refc(c_p);
+ /* scheduler threads */
+ erts_schedule_multi_misc_aux_work(0,
+ erts_no_schedulers,
+ prep_setup_completed_dealloc,
+ (void *) c_p);
+ /* aux_thread */
+ erts_schedule_misc_aux_work(0,
+ prep_setup_completed_dealloc,
+ (void *) c_p);
+ return 1;
+ }
+ return 0;
+#endif
+}
+
+
#ifdef ERTS_SMP_SCHEDULERS_NEED_TO_CHECK_CHILDREN
void
erts_smp_notify_check_children_needed(void)
{
int i;
+ for (i = 0; i < erts_no_schedulers; i++)
+ set_aux_work_flags_wakeup_nob(ERTS_SCHED_SLEEP_INFO_IX(i),
+ ERTS_SSI_AUX_WORK_CHECK_CHILDREN);
+}
- for (i = 0; i < erts_no_schedulers; i++) {
- erts_aint32_t aux_work;
- ErtsSchedulerSleepInfo *ssi;
- ssi = ERTS_SCHED_SLEEP_INFO_IX(i);
- aux_work = erts_smp_atomic32_read_bor_nob(&ssi->aux_work,
- ERTS_SSI_AUX_WORK_CHECK_CHILDREN);
- if (!(aux_work & ERTS_SSI_AUX_WORK_CHECK_CHILDREN))
- erts_sched_poke(ssi);
- }
+static erts_aint32_t
+handle_check_children(ErtsAuxWorkData *awdp, erts_aint32_t aux_work)
+{
+ unset_aux_work_flags(awdp->ssi, ERTS_SSI_AUX_WORK_CHECK_CHILDREN);
+ erts_check_children();
+ return aux_work & ~ERTS_SSI_AUX_WORK_CHECK_CHILDREN;
+}
+
+#endif
+
+#ifdef ERTS_SSI_AUX_WORK_MSEG_CACHE_CHECK
+
+static erts_aint32_t
+handle_mseg_cache_check(ErtsAuxWorkData *awdp, erts_aint32_t aux_work)
+{
+ unset_aux_work_flags(awdp->ssi, ERTS_SSI_AUX_WORK_MSEG_CACHE_CHECK);
+ erts_mseg_cache_check();
+ return aux_work & ~ERTS_SSI_AUX_WORK_MSEG_CACHE_CHECK;
}
+
#endif
-#ifdef ERTS_SCHED_NEED_BLOCKABLE_AUX_WORK
+static erts_aint32_t
+handle_setup_aux_work_timer(ErtsAuxWorkData *awdp, erts_aint32_t aux_work)
+{
+ unset_aux_work_flags(awdp->ssi, ERTS_SSI_AUX_WORK_SET_TMO);
+ setup_aux_work_timer();
+ return aux_work & ~ERTS_SSI_AUX_WORK_SET_TMO;
+}
+
static ERTS_INLINE erts_aint32_t
-blockable_aux_work(ErtsSchedulerData *esdp,
- ErtsSchedulerSleepInfo *ssi,
- erts_aint32_t aux_work)
+handle_aux_work(ErtsAuxWorkData *awdp, erts_aint32_t aux_work)
{
- if (aux_work & ERTS_SSI_BLOCKABLE_AUX_WORK_MASK) {
- if (aux_work & ERTS_SSI_AUX_WORK_MISC) {
- aux_work = erts_smp_atomic32_read_band_nob(&ssi->aux_work,
- ~ERTS_SSI_AUX_WORK_MISC);
- aux_work &= ~ERTS_SSI_AUX_WORK_MISC;
- handle_misc_aux_work(esdp);
- }
+ /*
+ * Handlers are *only* allowed to modify flags in return value
+ * and ssi flags that are explicity handled by the handler.
+ * Handlers are, e.g., not allowed to read the ssi flag field and
+ * then unconditionally return that value.
+ */
+ ERTS_DBG_CHK_AUX_WORK_VAL(aux_work);
+ if (aux_work & ERTS_SSI_AUX_WORK_SET_TMO) {
+ aux_work = handle_setup_aux_work_timer(awdp, aux_work);
+ ERTS_DBG_CHK_AUX_WORK_VAL(aux_work);
+ }
+#ifdef ERTS_SMP
+ if (aux_work & ERTS_SSI_AUX_WORK_MISC_THR_PRGR) {
+ aux_work = handle_misc_aux_work_thr_prgr(awdp, aux_work);
+ ERTS_DBG_CHK_AUX_WORK_VAL(aux_work);
+ }
+#endif
+ if (aux_work & ERTS_SSI_AUX_WORK_MISC) {
+ aux_work = handle_misc_aux_work(awdp, aux_work);
+ ERTS_DBG_CHK_AUX_WORK_VAL(aux_work);
+ }
+#if ERTS_USE_ASYNC_READY_Q
+ if (aux_work & ERTS_SSI_AUX_WORK_ASYNC_READY) {
+ aux_work = handle_async_ready(awdp, aux_work);
+ ERTS_DBG_CHK_AUX_WORK_VAL(aux_work);
+ }
+ if (aux_work & ERTS_SSI_AUX_WORK_ASYNC_READY_CLEAN) {
+ aux_work = handle_async_ready_clean(awdp, aux_work);
+ ERTS_DBG_CHK_AUX_WORK_VAL(aux_work);
+ }
+#endif
#ifdef ERTS_SMP_SCHEDULERS_NEED_TO_CHECK_CHILDREN
- if (aux_work & ERTS_SSI_AUX_WORK_CHECK_CHILDREN) {
- aux_work = erts_smp_atomic32_band_nob(&ssi->aux_work,
- ~ERTS_SSI_AUX_WORK_CHECK_CHILDREN);
- aux_work &= ~ERTS_SSI_AUX_WORK_CHECK_CHILDREN;
- erts_check_children();
- }
+ if (aux_work & ERTS_SSI_AUX_WORK_CHECK_CHILDREN) {
+ aux_work = handle_check_children(awdp, aux_work);
+ ERTS_DBG_CHK_AUX_WORK_VAL(aux_work);
+ }
+#endif
+ if (aux_work & (ERTS_SSI_AUX_WORK_FIX_ALLOC_LOWER_LIM
+ | ERTS_SSI_AUX_WORK_FIX_ALLOC_DEALLOC)) {
+ aux_work = handle_fix_alloc(awdp, aux_work);
+ ERTS_DBG_CHK_AUX_WORK_VAL(aux_work);
+ }
+#ifdef ERTS_SMP
+ if (aux_work & ERTS_SSI_AUX_WORK_DD) {
+ aux_work = handle_delayed_dealloc(awdp, aux_work);
+ ERTS_DBG_CHK_AUX_WORK_VAL(aux_work);
+ }
+ if (aux_work & ERTS_SSI_AUX_WORK_DD_THR_PRGR) {
+ aux_work = handle_delayed_dealloc_thr_prgr(awdp, aux_work);
+ ERTS_DBG_CHK_AUX_WORK_VAL(aux_work);
+ }
#endif
+#ifdef ERTS_SSI_AUX_WORK_MSEG_CACHE_CHECK
+ if (aux_work & ERTS_SSI_AUX_WORK_MSEG_CACHE_CHECK) {
+ aux_work = handle_mseg_cache_check(awdp, aux_work);
+ ERTS_DBG_CHK_AUX_WORK_VAL(aux_work);
}
+#endif
+ ERTS_DBG_CHK_AUX_WORK_VAL(aux_work);
return aux_work;
}
-#endif
+typedef struct {
+ union {
+ ErlTimer data;
+ char align__[ERTS_ALC_CACHE_LINE_ALIGN_SIZE(sizeof(ErlTimer))];
+ } timer;
-#ifdef ERTS_SCHED_NEED_NONBLOCKABLE_AUX_WORK
-static ERTS_INLINE erts_aint32_t
-nonblockable_aux_work(ErtsSchedulerData *esdp,
- ErtsSchedulerSleepInfo *ssi,
- erts_aint32_t aux_work)
+ int initialized;
+ erts_atomic32_t refc;
+ erts_atomic32_t type[1];
+} ErtsAuxWorkTmo;
+
+static ErtsAuxWorkTmo *aux_work_tmo;
+
+static void
+aux_work_timeout_early_init(int no_schedulers)
{
- if (aux_work & ERTS_SSI_NONBLOCKABLE_AUX_WORK_MASK) {
+ int i;
+ UWord p;
+
+ /*
+ * This is done really early. Our own allocators have
+ * not been started yet.
+ */
+
+ p = (UWord) malloc((sizeof(ErtsAuxWorkTmo)
+ + sizeof(erts_atomic32_t)*(no_schedulers+1))
+ + ERTS_CACHE_LINE_SIZE-1);
+ if (p & ERTS_CACHE_LINE_MASK)
+ p = (p & ~ERTS_CACHE_LINE_MASK) + ERTS_CACHE_LINE_SIZE;
+ ASSERT((p & ERTS_CACHE_LINE_MASK) == 0);
+ aux_work_tmo = (ErtsAuxWorkTmo *) p;
+ aux_work_tmo->initialized = 0;
+ erts_atomic32_init_nob(&aux_work_tmo->refc, 0);
+ for (i = 0; i <= no_schedulers; i++)
+ erts_atomic32_init_nob(&aux_work_tmo->type[i], 0);
+}
+
+void
+aux_work_timeout_late_init(void)
+{
+ aux_work_tmo->initialized = 1;
+ if (erts_atomic32_read_nob(&aux_work_tmo->refc)) {
+ aux_work_tmo->timer.data.active = 0;
+ erts_set_timer(&aux_work_tmo->timer.data,
+ aux_work_timeout,
+ NULL,
+ NULL,
+ 1000);
}
}
-#endif
static void
-prepare_for_block(void *vrq)
+aux_work_timeout(void *unused)
{
- erts_smp_runq_unlock((ErtsRunQueue *) vrq);
+ erts_aint32_t refc;
+ int i;
+#ifdef ERTS_SMP
+ i = 0;
+#else
+ i = 1;
+#endif
+
+ for (; i <= erts_no_schedulers; i++) {
+ erts_aint32_t type;
+ type = erts_atomic32_read_acqb(&aux_work_tmo->type[i]);
+ if (type)
+ set_aux_work_flags_wakeup_nob(ERTS_SCHED_SLEEP_INFO_IX(i-1),
+ type);
+ }
+
+ refc = erts_atomic32_read_nob(&aux_work_tmo->refc);
+ ASSERT(refc >= 1);
+ if (refc != 1
+ || 1 != erts_atomic32_cmpxchg_relb(&aux_work_tmo->refc, 0, 1)) {
+ /* Setup next timeout... */
+ aux_work_tmo->timer.data.active = 0;
+ erts_set_timer(&aux_work_tmo->timer.data,
+ aux_work_timeout,
+ NULL,
+ NULL,
+ 1000);
+ }
}
static void
-resume_after_block(void *vrq)
+setup_aux_work_timer(void)
{
- erts_smp_runq_lock((ErtsRunQueue *) vrq);
+#ifndef ERTS_SMP
+ if (!erts_get_scheduler_data())
+ set_aux_work_flags_wakeup_nob(ERTS_SCHED_SLEEP_INFO_IX(0),
+ ERTS_SSI_AUX_WORK_SET_TMO);
+ else
+#endif
+ {
+ aux_work_tmo->timer.data.active = 0;
+ erts_set_timer(&aux_work_tmo->timer.data,
+ aux_work_timeout,
+ NULL,
+ NULL,
+ 1000);
+ }
}
+erts_aint32_t
+erts_set_aux_work_timeout(int ix, erts_aint32_t type, int enable)
+{
+ erts_aint32_t old, refc;
+
+#ifndef ERTS_SMP
+ ix = 1;
#endif
+ ERTS_DBG_CHK_AUX_WORK_VAL(type);
+ ERTS_DBG_CHK_AUX_WORK_VAL(erts_atomic32_read_nob(&aux_work_tmo->type[ix]));
+// erts_fprintf(stderr, "t(%d, 0x%x, %d)\n", ix, type, enable);
+
+ if (!enable) {
+ old = erts_atomic32_read_band_mb(&aux_work_tmo->type[ix], ~type);
+ ERTS_DBG_CHK_AUX_WORK_VAL(erts_atomic32_read_nob(&aux_work_tmo->type[ix]));
+ if (old != 0 && (old & ~type) == 0)
+ erts_atomic32_dec_relb(&aux_work_tmo->refc);
+ return old;
+ }
+
+ old = erts_atomic32_read_bor_mb(&aux_work_tmo->type[ix], type);
+ ERTS_DBG_CHK_AUX_WORK_VAL(erts_atomic32_read_nob(&aux_work_tmo->type[ix]));
+ if (old == 0 && type != 0) {
+ refc = erts_atomic32_inc_read_acqb(&aux_work_tmo->refc);
+ if (refc == 1) {
+ erts_atomic32_inc_acqb(&aux_work_tmo->refc);
+ if (aux_work_tmo->initialized)
+ setup_aux_work_timer();
+ }
+ }
+ return old;
+}
+
+
+
static ERTS_INLINE void
sched_waiting_sys(Uint no, ErtsRunQueue *rq)
{
@@ -800,8 +1392,6 @@ sched_active_sys(Uint no, ErtsRunQueue *rq)
Uint
erts_active_schedulers(void)
{
- /* RRRRRRRRR */
-
Uint as = erts_no_schedulers;
ERTS_ATOMIC_FOREACH_RUNQ(rq, as -= abs(rq->waiting));
@@ -988,6 +1578,10 @@ sched_set_sleeptype(ErtsSchedulerSleepInfo *ssi, erts_aint32_t sleep_type)
if (sleep_type == ERTS_SSI_FLG_TSE_SLEEPING)
erts_tse_reset(ssi->event);
+ else {
+ ASSERT(sleep_type == ERTS_SSI_FLG_POLL_SLEEPING);
+ erts_sys_schedule_interrupt(0);
+ }
while (1) {
oflgs = erts_smp_atomic32_cmpxchg_acqb(&ssi->flags, nflgs, xflgs);
@@ -1006,16 +1600,127 @@ sched_set_sleeptype(ErtsSchedulerSleepInfo *ssi, erts_aint32_t sleep_type)
(((FLGS) & (ERTS_SSI_FLG_WAITING|ERTS_SSI_FLG_SUSPENDED)) \
!= ERTS_SSI_FLG_WAITING)
+
+static void
+thr_prgr_wakeup(void *vssi)
+{
+ erts_sched_poke((ErtsSchedulerSleepInfo *) vssi);
+}
+
+static void
+thr_prgr_prep_wait(void *vssi)
+{
+ ErtsSchedulerSleepInfo *ssi = (ErtsSchedulerSleepInfo *) vssi;
+ erts_smp_atomic32_read_bor_acqb(&ssi->flags,
+ ERTS_SSI_FLG_SLEEPING);
+}
+
+static void
+thr_prgr_wait(void *vssi)
+{
+ ErtsSchedulerSleepInfo *ssi = (ErtsSchedulerSleepInfo *) vssi;
+ erts_aint32_t xflgs = ERTS_SSI_FLG_SLEEPING;
+
+ erts_tse_reset(ssi->event);
+
+ while (1) {
+ erts_aint32_t aflgs, nflgs;
+ nflgs = xflgs | ERTS_SSI_FLG_TSE_SLEEPING;
+ aflgs = erts_smp_atomic32_cmpxchg_acqb(&ssi->flags, nflgs, xflgs);
+ if (aflgs == xflgs) {
+ erts_tse_wait(ssi->event);
+ break;
+ }
+ if ((aflgs & ERTS_SSI_FLG_SLEEPING) == 0)
+ break;
+ xflgs = aflgs;
+ }
+}
+
+static void
+thr_prgr_fin_wait(void *vssi)
+{
+ ErtsSchedulerSleepInfo *ssi = (ErtsSchedulerSleepInfo *) vssi;
+ erts_smp_atomic32_read_band_nob(&ssi->flags,
+ ~(ERTS_SSI_FLG_SLEEPING
+ | ERTS_SSI_FLG_TSE_SLEEPING));
+}
+
+static void init_aux_work_data(ErtsAuxWorkData *awdp, ErtsSchedulerData *esdp);
+
+static void *
+aux_thread(void *unused)
+{
+ ErtsAuxWorkData *awdp = aux_thread_aux_work_data;
+ ErtsSchedulerSleepInfo *ssi = ERTS_SCHED_SLEEP_INFO_IX(-1);
+ erts_aint32_t aux_work;
+ ErtsThrPrgrCallbacks callbacks;
+ int thr_prgr_active = 1;
+
+ ssi->event = erts_tse_fetch();
+
+ callbacks.arg = (void *) ssi;
+ callbacks.wakeup = thr_prgr_wakeup;
+ callbacks.prepare_wait = thr_prgr_prep_wait;
+ callbacks.wait = thr_prgr_wait;
+ callbacks.finalize_wait = thr_prgr_fin_wait;
+
+ erts_thr_progress_register_managed_thread(NULL, &callbacks, 1);
+ init_aux_work_data(awdp, NULL);
+ awdp->ssi = ssi;
+
+ sched_prep_spin_wait(ssi);
+
+ while (1) {
+ erts_aint32_t flgs;
+
+ aux_work = erts_atomic32_read_acqb(&ssi->aux_work);
+ if (aux_work) {
+ if (!thr_prgr_active)
+ erts_thr_progress_active(NULL, thr_prgr_active = 1);
+ aux_work = handle_aux_work(awdp, aux_work);
+ if (aux_work && erts_thr_progress_update(NULL))
+ erts_thr_progress_leader_update(NULL);
+ }
+
+ if (!aux_work) {
+ if (thr_prgr_active)
+ erts_thr_progress_active(NULL, thr_prgr_active = 0);
+ erts_thr_progress_prepare_wait(NULL);
+
+ flgs = sched_spin_wait(ssi, 0);
+
+ if (flgs & ERTS_SSI_FLG_SLEEPING) {
+ ASSERT(flgs & ERTS_SSI_FLG_WAITING);
+ flgs = sched_set_sleeptype(ssi, ERTS_SSI_FLG_TSE_SLEEPING);
+ if (flgs & ERTS_SSI_FLG_SLEEPING) {
+ int res;
+ ASSERT(flgs & ERTS_SSI_FLG_TSE_SLEEPING);
+ ASSERT(flgs & ERTS_SSI_FLG_WAITING);
+ do {
+ res = erts_tse_wait(ssi->event);
+ } while (res == EINTR);
+ }
+ }
+ erts_thr_progress_finalize_wait(NULL);
+ }
+
+ flgs = sched_prep_spin_wait(ssi);
+ }
+ return NULL;
+}
+
+#endif /* ERTS_SMP */
+
static void
scheduler_wait(int *fcalls, ErtsSchedulerData *esdp, ErtsRunQueue *rq)
{
ErtsSchedulerSleepInfo *ssi = esdp->ssi;
int spincount;
+ erts_aint32_t aux_work = 0;
+#ifdef ERTS_SMP
+ int thr_prgr_active = 1;
erts_aint32_t flgs;
-#if defined(ERTS_SCHED_NEED_NONBLOCKABLE_AUX_WORK) \
- || defined(ERTS_SCHED_NEED_BLOCKABLE_AUX_WORK)
- erts_aint32_t aux_work;
-#endif
ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(rq));
@@ -1049,34 +1754,38 @@ scheduler_wait(int *fcalls, ErtsSchedulerData *esdp, ErtsRunQueue *rq)
tse_wait:
-#ifdef ERTS_SCHED_NEED_BLOCKABLE_AUX_WORK
- aux_work = erts_smp_atomic32_read_nob(&ssi->aux_work);
- tse_blockable_aux_work:
- aux_work = blockable_aux_work(esdp, ssi, aux_work);
-#endif
- erts_smp_activity_begin(ERTS_ACTIVITY_WAIT, NULL, NULL, NULL);
-
while (1) {
-#ifdef ERTS_SCHED_NEED_NONBLOCKABLE_AUX_WORK
-#ifndef ERTS_SCHED_NEED_BLOCKABLE_AUX_WORK
- aux_work = erts_smp_atomic32_read_nob(&ssi->aux_work);
-#endif
- nonblockable_aux_work(esdp, ssi, aux_work);
-#endif
+ aux_work = erts_atomic32_read_acqb(&ssi->aux_work);
+ if (aux_work) {
+ if (!thr_prgr_active)
+ erts_thr_progress_active(esdp, thr_prgr_active = 1);
+ aux_work = handle_aux_work(&esdp->aux_work_data, aux_work);
+ if (aux_work && erts_thr_progress_update(esdp))
+ erts_thr_progress_leader_update(esdp);
+ }
- flgs = sched_spin_wait(ssi, spincount);
- if (flgs & ERTS_SSI_FLG_SLEEPING) {
- ASSERT(flgs & ERTS_SSI_FLG_WAITING);
- flgs = sched_set_sleeptype(ssi, ERTS_SSI_FLG_TSE_SLEEPING);
+ if (aux_work)
+ flgs = erts_smp_atomic32_read_acqb(&ssi->flags);
+ else {
+ if (thr_prgr_active)
+ erts_thr_progress_active(esdp, thr_prgr_active = 0);
+ erts_thr_progress_prepare_wait(esdp);
+
+ flgs = sched_spin_wait(ssi, spincount);
if (flgs & ERTS_SSI_FLG_SLEEPING) {
- int res;
- ASSERT(flgs & ERTS_SSI_FLG_TSE_SLEEPING);
ASSERT(flgs & ERTS_SSI_FLG_WAITING);
- do {
- res = erts_tse_wait(ssi->event);
- } while (res == EINTR);
+ flgs = sched_set_sleeptype(ssi, ERTS_SSI_FLG_TSE_SLEEPING);
+ if (flgs & ERTS_SSI_FLG_SLEEPING) {
+ int res;
+ ASSERT(flgs & ERTS_SSI_FLG_TSE_SLEEPING);
+ ASSERT(flgs & ERTS_SSI_FLG_WAITING);
+ do {
+ res = erts_tse_wait(ssi->event);
+ } while (res == EINTR);
+ }
}
+ erts_thr_progress_finalize_wait(esdp);
}
if (!(flgs & ERTS_SSI_FLG_WAITING)) {
@@ -1092,26 +1801,21 @@ scheduler_wait(int *fcalls, ErtsSchedulerData *esdp, ErtsRunQueue *rq)
break;
}
-#ifdef ERTS_SCHED_NEED_BLOCKABLE_AUX_WORK
- aux_work = erts_smp_atomic32_read_nob(&ssi->aux_work);
- if (aux_work & ERTS_SSI_BLOCKABLE_AUX_WORK_MASK) {
- erts_smp_activity_end(ERTS_ACTIVITY_WAIT, NULL, NULL, NULL);
- goto tse_blockable_aux_work;
- }
-#endif
-
}
- erts_smp_activity_end(ERTS_ACTIVITY_WAIT, NULL, NULL, NULL);
-
if (flgs & ~ERTS_SSI_FLG_SUSPENDED)
erts_smp_atomic32_read_band_nob(&ssi->flags, ERTS_SSI_FLG_SUSPENDED);
+ if (!thr_prgr_active)
+ erts_thr_progress_active(esdp, thr_prgr_active = 1);
+
erts_smp_runq_lock(rq);
sched_active(esdp->no, rq);
}
- else {
+ else
+#endif
+ {
erts_aint_t dt;
erts_smp_atomic32_set_relb(&function_calls, 0);
@@ -1135,30 +1839,32 @@ scheduler_wait(int *fcalls, ErtsSchedulerData *esdp, ErtsRunQueue *rq)
if (dt) erts_bump_timer(dt);
sys_aux_work:
-
-#ifdef ERTS_SCHED_NEED_BLOCKABLE_AUX_WORK
- aux_work = erts_smp_atomic32_read_nob(&ssi->aux_work);
- aux_work = blockable_aux_work(esdp, ssi, aux_work);
+#ifndef ERTS_SMP
+ erts_sys_schedule_interrupt(0);
#endif
-#ifdef ERTS_SCHED_NEED_NONBLOCKABLE_AUX_WORK
-#ifndef ERTS_SCHED_NEED_BLOCKABLE_AUX_WORK
- aux_work = erts_smp_atomic32_read_nob(&ssi->aux_work);
+
+ aux_work = erts_atomic32_read_acqb(&ssi->aux_work);
+ if (aux_work) {
+#ifdef ERTS_SMP
+ if (!thr_prgr_active)
+ erts_thr_progress_active(esdp, thr_prgr_active = 1);
#endif
- nonblockable_aux_work(esdp, ssi, aux_work);
+ aux_work = handle_aux_work(&esdp->aux_work_data, aux_work);
+#ifdef ERTS_SMP
+ if (aux_work && erts_thr_progress_update(esdp))
+ erts_thr_progress_leader_update(esdp);
#endif
+ }
+#ifndef ERTS_SMP
+ if (rq->len != 0 || rq->misc.start)
+ goto sys_woken;
+#else
flgs = erts_smp_atomic32_read_acqb(&ssi->flags);
if (!(flgs & ERTS_SSI_FLG_WAITING)) {
ASSERT(!(flgs & ERTS_SSI_FLG_SLEEPING));
goto sys_woken;
}
- if (!(flgs & ERTS_SSI_FLG_SLEEPING)) {
- flgs = sched_prep_cont_spin_wait(ssi);
- if (!(flgs & ERTS_SSI_FLG_WAITING)) {
- ASSERT(!(flgs & ERTS_SSI_FLG_SLEEPING));
- goto sys_woken;
- }
- }
/*
* If we got new I/O tasks we aren't allowed to
@@ -1175,10 +1881,12 @@ scheduler_wait(int *fcalls, ErtsSchedulerData *esdp, ErtsRunQueue *rq)
goto tse_wait;
}
}
+#endif
}
erts_smp_runq_lock(rq);
+#ifdef ERTS_SMP
/*
* If we got new I/O tasks we aren't allowed to
* sleep in erl_sys_schedule().
@@ -1190,64 +1898,88 @@ scheduler_wait(int *fcalls, ErtsSchedulerData *esdp, ErtsRunQueue *rq)
* Got to check that we still got I/O tasks; otherwise
* we have to wait in erl_sys_schedule() after all...
*/
- if (prepare_for_sys_schedule())
- goto do_sys_schedule;
-
- /*
- * Not allowed to wait in erl_sys_schedule;
- * do tse wait instead...
- */
- sched_change_waiting_sys_to_waiting(esdp->no, rq);
+ if (!prepare_for_sys_schedule()) {
+ /*
+ * Not allowed to wait in erl_sys_schedule;
+ * do tse wait instead...
+ */
+ sched_change_waiting_sys_to_waiting(esdp->no, rq);
+ erts_smp_runq_unlock(rq);
+ spincount = 0;
+ goto tse_wait;
+ }
+ }
+#endif
+ if (aux_work) {
erts_smp_runq_unlock(rq);
- spincount = 0;
- goto tse_wait;
+ goto sys_poll_aux_work;
}
- else {
- do_sys_schedule:
- erts_sys_schedule_interrupt(0);
- flgs = sched_set_sleeptype(ssi, ERTS_SSI_FLG_POLL_SLEEPING);
- if (!(flgs & ERTS_SSI_FLG_SLEEPING)) {
- if (!(flgs & ERTS_SSI_FLG_WAITING))
- goto sys_locked_woken;
- erts_smp_runq_unlock(rq);
- flgs = sched_prep_cont_spin_wait(ssi);
- if (!(flgs & ERTS_SSI_FLG_WAITING)) {
- ASSERT(!(flgs & ERTS_SSI_FLG_SLEEPING));
- goto sys_woken;
- }
- ASSERT(!erts_port_task_have_outstanding_io_tasks());
- goto sys_poll_aux_work;
+#ifdef ERTS_SMP
+ flgs = sched_set_sleeptype(ssi, ERTS_SSI_FLG_POLL_SLEEPING);
+ if (!(flgs & ERTS_SSI_FLG_SLEEPING)) {
+ if (!(flgs & ERTS_SSI_FLG_WAITING)) {
+ ASSERT(!(flgs & ERTS_SSI_FLG_SLEEPING));
+ goto sys_locked_woken;
+ }
+ erts_smp_runq_unlock(rq);
+ flgs = sched_prep_cont_spin_wait(ssi);
+ if (!(flgs & ERTS_SSI_FLG_WAITING)) {
+ ASSERT(!(flgs & ERTS_SSI_FLG_SLEEPING));
+ goto sys_woken;
}
+ ASSERT(!erts_port_task_have_outstanding_io_tasks());
+ goto sys_poll_aux_work;
+ }
- ASSERT(flgs & ERTS_SSI_FLG_POLL_SLEEPING);
- ASSERT(flgs & ERTS_SSI_FLG_WAITING);
+ ASSERT(flgs & ERTS_SSI_FLG_POLL_SLEEPING);
+ ASSERT(flgs & ERTS_SSI_FLG_WAITING);
+#endif
- erts_smp_runq_unlock(rq);
+ erts_smp_runq_unlock(rq);
- ASSERT(!erts_port_task_have_outstanding_io_tasks());
+#ifdef ERTS_SMP
+ if (thr_prgr_active)
+ erts_thr_progress_active(esdp, thr_prgr_active = 0);
+#endif
- erl_sys_schedule(0);
+ ASSERT(!erts_port_task_have_outstanding_io_tasks());
- dt = erts_do_time_read_and_reset();
- if (dt) erts_bump_timer(dt);
+ erl_sys_schedule(0);
- flgs = sched_prep_cont_spin_wait(ssi);
- if (flgs & ERTS_SSI_FLG_WAITING)
- goto sys_aux_work;
+ dt = erts_do_time_read_and_reset();
+ if (dt) erts_bump_timer(dt);
+
+#ifndef ERTS_SMP
+ if (rq->len == 0 && !rq->misc.start)
+ goto sys_aux_work;
+ sys_woken:
+#else
+ flgs = sched_prep_cont_spin_wait(ssi);
+ if (flgs & ERTS_SSI_FLG_WAITING)
+ goto sys_aux_work;
- sys_woken:
+ sys_woken:
+ if (!thr_prgr_active)
+ erts_thr_progress_active(esdp, thr_prgr_active = 1);
+ erts_smp_runq_lock(rq);
+ sys_locked_woken:
+ if (!thr_prgr_active) {
+ erts_smp_runq_unlock(rq);
+ erts_thr_progress_active(esdp, thr_prgr_active = 1);
erts_smp_runq_lock(rq);
- sys_locked_woken:
- clear_sys_scheduling();
- if (flgs & ~ERTS_SSI_FLG_SUSPENDED)
- erts_smp_atomic32_read_band_nob(&ssi->flags, ERTS_SSI_FLG_SUSPENDED);
- sched_active_sys(esdp->no, rq);
}
+ clear_sys_scheduling();
+ if (flgs & ~ERTS_SSI_FLG_SUSPENDED)
+ erts_smp_atomic32_read_band_nob(&ssi->flags, ERTS_SSI_FLG_SUSPENDED);
+#endif
+ sched_active_sys(esdp->no, rq);
}
ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(rq));
}
+#ifdef ERTS_SMP
+
static ERTS_INLINE erts_aint32_t
ssi_flags_set_wake(ErtsSchedulerSleepInfo *ssi)
{
@@ -2218,6 +2950,9 @@ check_balance(ErtsRunQueue *c_rq)
mmax_len = run_queue_info[qix].max_len;
}
+ if (!erts_sched_compact_load)
+ goto all_active;
+
if (!forced && half_full_scheds != blnc_no_rqs) {
int min = 1;
if (min < half_full_scheds)
@@ -2554,8 +3289,9 @@ erts_debug_nbalance(void)
}
void
-erts_early_init_scheduling(void)
+erts_early_init_scheduling(int no_schedulers)
{
+ aux_work_timeout_early_init(no_schedulers);
wakeup_other_limit = ERTS_WAKEUP_OTHER_LIMIT_MEDIUM;
}
@@ -2576,12 +3312,32 @@ erts_sched_set_wakeup_limit(char *str)
return EINVAL;
return 0;
}
-
+
+static void
+init_aux_work_data(ErtsAuxWorkData *awdp, ErtsSchedulerData *esdp)
+{
+ awdp->sched_id = esdp ? (int) esdp->no : 0;
+ awdp->esdp = esdp;
+ awdp->ssi = esdp ? esdp->ssi : NULL;
+#ifdef ERTS_SMP
+ awdp->misc.thr_prgr = ERTS_THR_PRGR_VAL_WAITING;
+ awdp->dd.thr_prgr = ERTS_THR_PRGR_VAL_WAITING;
+ awdp->dd.completed_callback = NULL;
+ awdp->dd.completed_arg = NULL;
+#endif
+#ifdef ERTS_USE_ASYNC_READY_Q
+#ifdef ERTS_SMP
+ awdp->async_ready.need_thr_prgr = 0;
+ awdp->async_ready.thr_prgr = ERTS_THR_PRGR_VAL_WAITING;
+#endif
+ awdp->async_ready.queue = NULL;
+#endif
+}
void
erts_init_scheduling(int mrq, int no_schedulers, int no_schedulers_online)
{
- int ix, n;
+ int ix, n, no_ssi;
#ifndef ERTS_SMP
mrq = 0;
@@ -2691,23 +3447,31 @@ erts_init_scheduling(int mrq, int no_schedulers, int no_schedulers_online)
n = (int) no_schedulers;
erts_no_schedulers = n;
-#ifdef ERTS_SMP
/* Create and initialize scheduler sleep info */
-
+#ifdef ERTS_SMP
+ no_ssi = n+1;
+#else
+ no_ssi = 1;
+#endif
aligned_sched_sleep_info =
- erts_alloc_permanent_cache_aligned(ERTS_ALC_T_SCHDLR_SLP_INFO,
- n * sizeof(ErtsAlignedSchedulerSleepInfo));
-
- for (ix = 0; ix < n; ix++) {
- ErtsSchedulerSleepInfo *ssi = ERTS_SCHED_SLEEP_INFO_IX(ix);
+ erts_alloc_permanent_cache_aligned(
+ ERTS_ALC_T_SCHDLR_SLP_INFO,
+ no_ssi*sizeof(ErtsAlignedSchedulerSleepInfo));
+ for (ix = 0; ix < no_ssi; ix++) {
+ ErtsSchedulerSleepInfo *ssi = &aligned_sched_sleep_info[ix].ssi;
+#ifdef ERTS_SMP
#if 0 /* no need to initialize these... */
ssi->next = NULL;
ssi->prev = NULL;
#endif
erts_smp_atomic32_init_nob(&ssi->flags, 0);
ssi->event = NULL; /* initialized in sched_thread_func */
- erts_smp_atomic32_init_nob(&ssi->aux_work, 0);
+#endif
+ erts_atomic32_init_nob(&ssi->aux_work, 0);
}
+
+#ifdef ERTS_SMP
+ aligned_sched_sleep_info++;
#endif
/* Create and initialize scheduler specific data */
@@ -2721,17 +3485,20 @@ erts_init_scheduling(int mrq, int no_schedulers, int no_schedulers_online)
#ifdef ERTS_SMP
erts_bits_init_state(&esdp->erl_bits_state);
esdp->match_pseudo_process = NULL;
- esdp->ssi = ERTS_SCHED_SLEEP_INFO_IX(ix);
esdp->free_process = NULL;
-#if HALFWORD_HEAP
- /* Registers need to be heap allocated (correct memory range) for tracing to work */
- esdp->save_reg = erts_alloc(ERTS_ALC_T_BEAM_REGISTER, ERTS_X_REGS_ALLOCATED * sizeof(Eterm));
-#endif
#endif
+ esdp->x_reg_array =
+ erts_alloc_permanent_cache_aligned(ERTS_ALC_T_BEAM_REGISTER,
+ ERTS_X_REGS_ALLOCATED *
+ sizeof(Eterm));
+ esdp->f_reg_array =
+ erts_alloc_permanent_cache_aligned(ERTS_ALC_T_BEAM_REGISTER,
+ MAX_REG * sizeof(FloatDef));
#if !HEAP_ON_C_STACK
esdp->num_tmp_heap_used = 0;
#endif
esdp->no = (Uint) ix+1;
+ esdp->ssi = ERTS_SCHED_SLEEP_INFO_IX(ix);
esdp->current_process = NULL;
esdp->current_port = NULL;
@@ -2752,9 +3519,19 @@ erts_init_scheduling(int mrq, int no_schedulers, int no_schedulers_online)
#ifdef ERTS_SMP
erts_smp_atomic32_init_nob(&esdp->chk_cpu_bind, 0);
#endif
+ init_aux_work_data(&esdp->aux_work_data, esdp);
}
+ init_misc_aux_work();
+
#ifdef ERTS_SMP
+
+ erts_atomic32_init_nob(&completed_dealloc_count, 0); /* debug only */
+
+ aux_thread_aux_work_data =
+ erts_alloc_permanent_cache_aligned(ERTS_ALC_T_SCHDLR_DATA,
+ sizeof(ErtsAuxWorkData));
+
erts_smp_mtx_init(&schdlr_sspnd.mtx, "schdlr_sspnd");
erts_smp_cnd_init(&schdlr_sspnd.cnd);
@@ -2816,6 +3593,8 @@ erts_init_scheduling(int mrq, int no_schedulers, int no_schedulers_online)
/* init port tasks */
erts_port_task_init();
+ aux_work_timeout_late_init();
+
#ifndef ERTS_SMP
#ifdef ERTS_DO_VERIFY_UNUSED_TEMP_ALLOC
erts_scheduler_data->verify_unused_temp_alloc
@@ -2950,18 +3729,6 @@ erts_get_max_no_executing_schedulers(void)
#ifdef ERTS_SMP
static void
-susp_sched_prep_block(void *unused)
-{
- erts_smp_mtx_unlock(&schdlr_sspnd.mtx);
-}
-
-static void
-susp_sched_resume_block(void *unused)
-{
- erts_smp_mtx_lock(&schdlr_sspnd.mtx);
-}
-
-static void
scheduler_ix_resume_wake(Uint ix)
{
ErtsSchedulerSleepInfo *ssi = ERTS_SCHED_SLEEP_INFO_IX(ix);
@@ -3065,10 +3832,8 @@ suspend_scheduler(ErtsSchedulerData *esdp)
long active_schedulers;
int curr_online = 1;
int wake = 0;
-#if defined(ERTS_SCHED_NEED_NONBLOCKABLE_AUX_WORK) \
- || defined(ERTS_SCHED_NEED_BLOCKABLE_AUX_WORK)
erts_aint32_t aux_work;
-#endif
+ int thr_prgr_active = 1;
/*
* Schedulers may be suspended in two different ways:
@@ -3141,38 +3906,40 @@ suspend_scheduler(ErtsSchedulerData *esdp)
break;
erts_smp_mtx_unlock(&schdlr_sspnd.mtx);
-
-#ifdef ERTS_SCHED_NEED_BLOCKABLE_AUX_WORK
- aux_work = erts_smp_atomic32_read_nob(&ssi->aux_work);
- blockable_aux_work:
- blockable_aux_work(esdp, ssi, aux_work);
-#endif
-
- erts_smp_activity_begin(ERTS_ACTIVITY_WAIT, NULL, NULL, NULL);
while (1) {
erts_aint32_t flgs;
-#ifdef ERTS_SCHED_NEED_NONBLOCKABLE_AUX_WORK
-#ifndef ERTS_SCHED_NEED_BLOCKABLE_AUX_WORK
- aux_work = erts_smp_atomic32_read(&ssi->aux_work);
-#endif
- nonblockable_aux_work(esdp, ssi, aux_work);
-#endif
- flgs = sched_spin_suspended(ssi,
- ERTS_SCHED_SUSPEND_SLEEP_SPINCOUNT);
- if (flgs == (ERTS_SSI_FLG_SLEEPING
- | ERTS_SSI_FLG_WAITING
- | ERTS_SSI_FLG_SUSPENDED)) {
- flgs = sched_set_suspended_sleeptype(ssi);
+ aux_work = erts_atomic32_read_acqb(&ssi->aux_work);
+ if (aux_work) {
+ if (!thr_prgr_active)
+ erts_thr_progress_active(esdp, thr_prgr_active = 1);
+ aux_work = handle_aux_work(&esdp->aux_work_data, aux_work);
+ if (aux_work && erts_thr_progress_update(esdp))
+ erts_thr_progress_leader_update(esdp);
+ }
+
+ if (!aux_work) {
+ if (thr_prgr_active)
+ erts_thr_progress_active(esdp, thr_prgr_active = 0);
+ erts_thr_progress_prepare_wait(esdp);
+ flgs = sched_spin_suspended(ssi,
+ ERTS_SCHED_SUSPEND_SLEEP_SPINCOUNT);
if (flgs == (ERTS_SSI_FLG_SLEEPING
- | ERTS_SSI_FLG_TSE_SLEEPING
| ERTS_SSI_FLG_WAITING
| ERTS_SSI_FLG_SUSPENDED)) {
- int res;
- do {
- res = erts_tse_wait(ssi->event);
- } while (res == EINTR);
+ flgs = sched_set_suspended_sleeptype(ssi);
+ if (flgs == (ERTS_SSI_FLG_SLEEPING
+ | ERTS_SSI_FLG_TSE_SLEEPING
+ | ERTS_SSI_FLG_WAITING
+ | ERTS_SSI_FLG_SUSPENDED)) {
+ int res;
+
+ do {
+ res = erts_tse_wait(ssi->event);
+ } while (res == EINTR);
+ }
}
+ erts_thr_progress_finalize_wait(esdp);
}
flgs = sched_prep_spin_suspended(ssi, (ERTS_SSI_FLG_WAITING
@@ -3182,20 +3949,8 @@ suspend_scheduler(ErtsSchedulerData *esdp)
changing = erts_smp_atomic32_read_nob(&schdlr_sspnd.changing);
if (changing & ~ERTS_SCHDLR_SSPND_CHNG_WAITER)
break;
-
-
-#ifdef ERTS_SCHED_NEED_BLOCKABLE_AUX_WORK
- aux_work = erts_smp_atomic32_read_nob(&ssi->aux_work);
- if (aux_work & ERTS_SSI_BLOCKABLE_AUX_WORK_MASK) {
- erts_smp_activity_end(ERTS_ACTIVITY_WAIT, NULL, NULL, NULL);
- goto blockable_aux_work;
- }
-#endif
-
}
- erts_smp_activity_end(ERTS_ACTIVITY_WAIT, NULL, NULL, NULL);
-
erts_smp_mtx_lock(&schdlr_sspnd.mtx);
changing = erts_smp_atomic32_read_nob(&schdlr_sspnd.changing);
}
@@ -3220,6 +3975,9 @@ suspend_scheduler(ErtsSchedulerData *esdp)
if (erts_system_profile_flags.scheduler)
profile_scheduler(make_small(esdp->no), am_active);
+ if (!thr_prgr_active)
+ erts_thr_progress_active(esdp, thr_prgr_active = 1);
+
erts_smp_runq_lock(esdp->run_queue);
non_empty_runq(esdp->run_queue);
@@ -3302,12 +4060,16 @@ erts_set_schedulers_online(Process *p,
Sint new_no,
Sint *old_no)
{
- int ix, res, no, have_unlocked_plocks;
+ ErtsSchedulerData *esdp;
+ int ix, res, no, have_unlocked_plocks, end_wait;
erts_aint32_t changing;
if (new_no < 1 || erts_no_schedulers < new_no)
return ERTS_SCHDLR_SSPND_EINVAL;
+ esdp = ERTS_PROC_GET_SCHDATA(p);
+ end_wait = 0;
+
erts_smp_mtx_lock(&schdlr_sspnd.mtx);
have_unlocked_plocks = 0;
@@ -3424,16 +4186,21 @@ erts_set_schedulers_online(Process *p,
}
}
- erts_smp_activity_begin(ERTS_ACTIVITY_WAIT,
- susp_sched_prep_block,
- susp_sched_resume_block,
- NULL);
+ if (schdlr_sspnd.curr_online != schdlr_sspnd.wait_curr_online) {
+ erts_smp_mtx_unlock(&schdlr_sspnd.mtx);
+ if (plocks && !have_unlocked_plocks) {
+ have_unlocked_plocks = 1;
+ erts_smp_proc_unlock(p, plocks);
+ }
+ erts_thr_progress_active(esdp, 0);
+ erts_thr_progress_prepare_wait(esdp);
+ end_wait = 1;
+ erts_smp_mtx_lock(&schdlr_sspnd.mtx);
+ }
+
while (schdlr_sspnd.curr_online != schdlr_sspnd.wait_curr_online)
erts_smp_cnd_wait(&schdlr_sspnd.cnd, &schdlr_sspnd.mtx);
- erts_smp_activity_end(ERTS_ACTIVITY_WAIT,
- susp_sched_prep_block,
- susp_sched_resume_block,
- NULL);
+
ASSERT(res != ERTS_SCHDLR_SSPND_DONE
? (ERTS_SCHDLR_SSPND_CHNG_WAITER
& erts_smp_atomic32_read_nob(&schdlr_sspnd.changing))
@@ -3441,10 +4208,15 @@ erts_set_schedulers_online(Process *p,
== erts_smp_atomic32_read_nob(&schdlr_sspnd.changing)));
erts_smp_atomic32_read_band_nob(&schdlr_sspnd.changing,
~ERTS_SCHDLR_SSPND_CHNG_WAITER);
+
}
}
erts_smp_mtx_unlock(&schdlr_sspnd.mtx);
+ if (end_wait) {
+ erts_thr_progress_finalize_wait(esdp);
+ erts_thr_progress_active(esdp, 1);
+ }
if (have_unlocked_plocks)
erts_smp_proc_lock(p, plocks);
@@ -3529,17 +4301,38 @@ erts_block_multi_scheduling(Process *p, ErtsProcLocks plocks, int on, int all)
erts_smp_mtx_unlock(&balance_info.update_mtx);
erts_smp_mtx_lock(&schdlr_sspnd.mtx);
}
- erts_smp_activity_begin(ERTS_ACTIVITY_WAIT,
- susp_sched_prep_block,
- susp_sched_resume_block,
- NULL);
- while (erts_smp_atomic32_read_nob(&schdlr_sspnd.active)
- != schdlr_sspnd.msb.wait_active)
- erts_smp_cnd_wait(&schdlr_sspnd.cnd, &schdlr_sspnd.mtx);
- erts_smp_activity_end(ERTS_ACTIVITY_WAIT,
- susp_sched_prep_block,
- susp_sched_resume_block,
- NULL);
+
+ if (erts_smp_atomic32_read_nob(&schdlr_sspnd.active)
+ != schdlr_sspnd.msb.wait_active) {
+ ErtsSchedulerData *esdp;
+
+ erts_smp_mtx_unlock(&schdlr_sspnd.mtx);
+
+ if (plocks && !have_unlocked_plocks) {
+ have_unlocked_plocks = 1;
+ erts_smp_proc_unlock(p, plocks);
+ }
+
+ esdp = ERTS_PROC_GET_SCHDATA(p);
+
+ erts_thr_progress_active(esdp, 0);
+ erts_thr_progress_prepare_wait(esdp);
+
+ erts_smp_mtx_lock(&schdlr_sspnd.mtx);
+
+ while (erts_smp_atomic32_read_nob(&schdlr_sspnd.active)
+ != schdlr_sspnd.msb.wait_active)
+ erts_smp_cnd_wait(&schdlr_sspnd.cnd,
+ &schdlr_sspnd.mtx);
+
+ erts_smp_mtx_unlock(&schdlr_sspnd.mtx);
+
+ erts_thr_progress_active(esdp, 1);
+ erts_thr_progress_finalize_wait(esdp);
+
+ erts_smp_mtx_lock(&schdlr_sspnd.mtx);
+
+ }
ASSERT(res != ERTS_SCHDLR_SSPND_DONE_MSCHED_BLOCKED
? (ERTS_SCHDLR_SSPND_CHNG_WAITER
& erts_smp_atomic32_read_nob(&schdlr_sspnd.changing))
@@ -3727,8 +4520,19 @@ erts_multi_scheduling_blockers(Process *p)
static void *
sched_thread_func(void *vesdp)
{
+ ErtsThrPrgrCallbacks callbacks;
+ ErtsSchedulerData *esdp = vesdp;
+ Uint no = esdp->no;
#ifdef ERTS_SMP
- Uint no = ((ErtsSchedulerData *) vesdp)->no;
+ ERTS_SCHED_SLEEP_INFO_IX(no - 1)->event = erts_tse_fetch();
+ callbacks.arg = (void *) esdp->ssi;
+ callbacks.wakeup = thr_prgr_wakeup;
+ callbacks.prepare_wait = thr_prgr_prep_wait;
+ callbacks.wait = thr_prgr_wait;
+ callbacks.finalize_wait = thr_prgr_fin_wait;
+
+ erts_thr_progress_register_managed_thread(esdp, &callbacks, 0);
+ erts_alloc_register_scheduler(vesdp);
#endif
#ifdef ERTS_ENABLE_LOCK_CHECK
{
@@ -3737,22 +4541,30 @@ sched_thread_func(void *vesdp)
erts_lc_set_thread_name(&buf[0]);
}
#endif
- erts_alloc_reg_scheduler_id(no);
erts_tsd_set(sched_data_key, vesdp);
#ifdef ERTS_SMP
+#if HAVE_ERTS_MSEG
+ erts_mseg_late_init();
+#endif
+#if ERTS_USE_ASYNC_READY_Q
+ esdp->aux_work_data.async_ready.queue = erts_get_async_ready_queue(no);
+#endif
- erts_sched_init_check_cpu_bind((ErtsSchedulerData *) vesdp);
+ erts_sched_init_check_cpu_bind(esdp);
erts_proc_lock_prepare_proc_lock_waiter();
- ERTS_SCHED_SLEEP_INFO_IX(no - 1)->event = erts_tse_fetch();
-
-
#endif
- erts_register_blockable_thread();
+
#ifdef HIPE
hipe_thread_signal_init();
#endif
erts_thread_init_float();
+
+ if (no == 1) {
+ erts_thr_progress_active(esdp, 0);
+ erts_thr_progress_prepare_wait(esdp);
+ }
+
erts_smp_mtx_lock(&schdlr_sspnd.mtx);
ASSERT(erts_smp_atomic32_read_nob(&schdlr_sspnd.changing)
@@ -3761,41 +4573,39 @@ sched_thread_func(void *vesdp)
if (--schdlr_sspnd.curr_online == schdlr_sspnd.wait_curr_online) {
erts_smp_atomic32_read_band_nob(&schdlr_sspnd.changing,
~ERTS_SCHDLR_SSPND_CHNG_ONLN);
- if (((ErtsSchedulerData *) vesdp)->no != 1)
+ if (no != 1)
erts_smp_cnd_signal(&schdlr_sspnd.cnd);
}
- if (((ErtsSchedulerData *) vesdp)->no == 1) {
- if (schdlr_sspnd.curr_online != schdlr_sspnd.wait_curr_online) {
- erts_smp_activity_begin(ERTS_ACTIVITY_WAIT,
- susp_sched_prep_block,
- susp_sched_resume_block,
- NULL);
- while (schdlr_sspnd.curr_online != schdlr_sspnd.wait_curr_online)
- erts_smp_cnd_wait(&schdlr_sspnd.cnd, &schdlr_sspnd.mtx);
- erts_smp_activity_end(ERTS_ACTIVITY_WAIT,
- susp_sched_prep_block,
- susp_sched_resume_block,
- NULL);
- }
+ if (no == 1) {
+ while (schdlr_sspnd.curr_online != schdlr_sspnd.wait_curr_online)
+ erts_smp_cnd_wait(&schdlr_sspnd.cnd, &schdlr_sspnd.mtx);
ERTS_SCHDLR_SSPND_CHNG_SET(0, ERTS_SCHDLR_SSPND_CHNG_WAITER);
}
erts_smp_mtx_unlock(&schdlr_sspnd.mtx);
+ if (no == 1) {
+ erts_thr_progress_finalize_wait(esdp);
+ erts_thr_progress_active(esdp, 1);
+ }
+
#ifdef ERTS_DO_VERIFY_UNUSED_TEMP_ALLOC
- ((ErtsSchedulerData *) vesdp)->verify_unused_temp_alloc
+ esdp->verify_unused_temp_alloc
= erts_alloc_get_verify_unused_temp_alloc(
- &((ErtsSchedulerData *) vesdp)->verify_unused_temp_alloc_data);
+ &esdp->verify_unused_temp_alloc_data);
ERTS_VERIFY_UNUSED_TEMP_ALLOC(NULL);
#endif
process_main();
/* No schedulers should *ever* terminate */
- erl_exit(ERTS_ABORT_EXIT, "Scheduler thread number %beu terminated\n",
- ((ErtsSchedulerData *) vesdp)->no);
+ erl_exit(ERTS_ABORT_EXIT,
+ "Scheduler thread number %beu terminated\n",
+ no);
return NULL;
}
+static ethr_tid aux_tid;
+
void
erts_start_schedulers(void)
{
@@ -3815,8 +4625,6 @@ erts_start_schedulers(void)
res = ENOTSUP;
}
- erts_block_system(0);
-
while (actual < wanted) {
ErtsSchedulerData *esdp = ERTS_SCHEDULER_IX(actual);
actual++;
@@ -3829,7 +4637,12 @@ erts_start_schedulers(void)
}
erts_no_schedulers = actual;
- erts_release_system();
+
+ ERTS_THR_MEMORY_BARRIER;
+
+ res = ethr_thr_create(&aux_tid, aux_thread, NULL, &opts);
+ if (res != 0)
+ erl_exit(1, "Failed to create aux thread\n");
if (actual < 1)
erl_exit(1,
@@ -5190,7 +6003,7 @@ Process *schedule(Process *p, int calls)
input_reductions = INPUT_REDUCTIONS;
}
- ERTS_SMP_LC_ASSERT(!ERTS_LC_IS_BLOCKING);
+ ERTS_SMP_LC_ASSERT(!erts_thr_progress_is_blocking());
/*
* Clean up after the process being scheduled out.
@@ -5328,7 +6141,8 @@ Process *schedule(Process *p, int calls)
}
- ERTS_SMP_LC_ASSERT(!ERTS_LC_IS_BLOCKING);
+ ERTS_SMP_LC_ASSERT(!erts_thr_progress_is_blocking());
+
check_activities_to_run: {
#ifdef ERTS_SMP
@@ -5338,7 +6152,7 @@ Process *schedule(Process *p, int calls)
check_balance(rq);
}
- ERTS_SMP_LC_ASSERT(!ERTS_LC_IS_BLOCKING);
+ ERTS_SMP_LC_ASSERT(!erts_thr_progress_is_blocking());
ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(rq));
if (rq->flags & ERTS_RUNQ_FLGS_IMMIGRATE_QMASK)
@@ -5362,42 +6176,38 @@ Process *schedule(Process *p, int calls)
}
}
-#if defined(ERTS_SCHED_NEED_BLOCKABLE_AUX_WORK) \
- || defined(ERTS_SCHED_NEED_NONBLOCKABLE_AUX_WORK)
{
- ErtsSchedulerSleepInfo *ssi = esdp->ssi;
- erts_aint32_t aux_work = erts_smp_atomic32_read_nob(&ssi->aux_work);
- if (aux_work) {
+ erts_aint32_t aux_work;
+ int leader_update = erts_thr_progress_update(esdp);
+ aux_work = erts_atomic32_read_acqb(&esdp->ssi->aux_work);
+ if (aux_work | leader_update) {
erts_smp_runq_unlock(rq);
-#ifdef ERTS_SCHED_NEED_BLOCKABLE_AUX_WORK
- aux_work = blockable_aux_work(esdp, ssi, aux_work);
-#endif
-#ifdef ERTS_SCHED_NEED_NONBLOCKABLE_AUX_WORK
- nonblockable_aux_work(esdp, ssi, aux_work);
-#endif
+ if (leader_update)
+ erts_thr_progress_leader_update(esdp);
+ if (aux_work)
+ handle_aux_work(&esdp->aux_work_data, aux_work);
erts_smp_runq_lock(rq);
}
}
-#endif
-
- erts_smp_chk_system_block(prepare_for_block,
- resume_after_block,
- (void *) rq);
- ERTS_SMP_LC_ASSERT(!ERTS_LC_IS_BLOCKING);
+ ERTS_SMP_LC_ASSERT(!erts_thr_progress_is_blocking());
ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(rq));
-#endif
+#else /* ERTS_SMP */
+ {
+ erts_aint32_t aux_work;
+ aux_work = erts_atomic32_read_acqb(&esdp->ssi->aux_work);
+ if (aux_work)
+ handle_aux_work(&esdp->aux_work_data, aux_work);
+ }
+#endif /* ERTS_SMP */
ASSERT(rq->len == rq->procs.len + rq->ports.info.len);
-#ifndef ERTS_SMP
+ if (rq->len == 0 && !rq->misc.start) {
- if (rq->len == 0 && !rq->misc.start)
- goto do_sys_schedule;
+#ifdef ERTS_SMP
-#else /* ERTS_SMP */
- if (rq->len == 0 && !rq->misc.start) {
ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(rq));
rq->wakeup_other = 0;
@@ -5428,26 +6238,17 @@ Process *schedule(Process *p, int calls)
}
}
+#endif
+
scheduler_wait(&fcalls, esdp, rq);
+#ifdef ERTS_SMP
non_empty_runq(rq);
+#endif
goto check_activities_to_run;
}
- else
-#endif /* ERTS_SMP */
- if (fcalls > input_reductions && prepare_for_sys_schedule()) {
- int runnable;
-
-#ifdef ERTS_SMP
- runnable = 1;
-#else
- do_sys_schedule:
- runnable = rq->len != 0;
- if (!runnable)
- sched_waiting_sys(esdp->no, rq);
-#endif
-
+ else if (fcalls > input_reductions && prepare_for_sys_schedule()) {
/*
* Schedule system-level activities.
*/
@@ -5457,11 +6258,11 @@ Process *schedule(Process *p, int calls)
ASSERT(!erts_port_task_have_outstanding_io_tasks());
-#ifdef ERTS_SMP
- /* erts_sys_schedule_interrupt(0); */
+#if 0 /* Not needed since we wont wait in sys schedule */
+ erts_sys_schedule_interrupt(0);
#endif
erts_smp_runq_unlock(rq);
- erl_sys_schedule(runnable);
+ erl_sys_schedule(1);
dt = erts_do_time_read_and_reset();
if (dt) erts_bump_timer(dt);
#ifdef ERTS_SMP
@@ -5469,8 +6270,6 @@ Process *schedule(Process *p, int calls)
clear_sys_scheduling();
goto continue_check_activities_to_run;
#else
- if (!runnable)
- sched_active_sys(esdp->no, rq);
goto check_activities_to_run;
#endif
}
@@ -5718,14 +6517,14 @@ erts_sched_stat_modify(int what)
int ix;
switch (what) {
case ERTS_SCHED_STAT_MODIFY_ENABLE:
- erts_smp_block_system(0);
+ erts_smp_thr_progress_block();
erts_sched_stat.enabled = 1;
- erts_smp_release_system();
+ erts_smp_thr_progress_unblock();
break;
case ERTS_SCHED_STAT_MODIFY_DISABLE:
- erts_smp_block_system(0);
+ erts_smp_thr_progress_block();
erts_sched_stat.enabled = 1;
- erts_smp_release_system();
+ erts_smp_thr_progress_unblock();
break;
case ERTS_SCHED_STAT_MODIFY_CLEAR:
erts_smp_spin_lock(&erts_sched_stat.lock);
@@ -5785,18 +6584,10 @@ erts_sched_stat_term(Process *p, int total)
void
erts_schedule_misc_op(void (*func)(void *), void *arg)
{
- ErtsRunQueue *rq = erts_get_runq_current(NULL);
+ ErtsSchedulerData *esdp = erts_get_scheduler_data();
+ ErtsRunQueue *rq = esdp ? esdp->run_queue : ERTS_RUNQ_IX(0);
ErtsMiscOpList *molp = misc_op_list_alloc();
- if (!rq) {
- /*
- * This can only happen when the sys msg dispatcher
- * thread schedules misc ops (this happens *very*
- * seldom; only when trace drivers are unloaded).
- */
- rq = ERTS_RUNQ_IX(0);
- }
-
erts_smp_runq_lock(rq);
while (rq->misc.evac_runq) {
@@ -5888,7 +6679,7 @@ erts_get_exact_total_reductions(Process *c_p, Uint *redsp, Uint *diffp)
* Wait for other schedulers to schedule out their processes
* and update 'reductions'.
*/
- erts_smp_block_system(0);
+ erts_smp_thr_progress_block();
for (reds = 0, ix = 0; ix < erts_no_run_queues; ix++)
reds += ERTS_RUNQ_IX(ix)->procs.reductions;
if (redsp)
@@ -5896,7 +6687,7 @@ erts_get_exact_total_reductions(Process *c_p, Uint *redsp, Uint *diffp)
if (diffp)
*diffp = reds - last_exact_reductions;
last_exact_reductions = reds;
- erts_smp_release_system();
+ erts_smp_thr_progress_unblock();
erts_smp_proc_lock(c_p, ERTS_PROC_LOCK_MAIN);
}
@@ -8701,6 +9492,22 @@ init_processes_bif(void)
* Debug stuff
*/
+#if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_CHECK)
+int
+erts_dbg_check_halloc_lock(Process *p)
+{
+ if (ERTS_PROC_LOCK_MAIN & erts_proc_lc_my_proc_locks(p))
+ return 1;
+ if (p->id == ERTS_INVALID_PID)
+ return 1;
+ if (p->scheduler_data && p == p->scheduler_data->match_pseudo_process)
+ return 1;
+ if (erts_thr_progress_is_blocking())
+ return 1;
+ return 0;
+}
+#endif
+
Eterm
erts_debug_processes(Process *c_p)
{
diff --git a/erts/emulator/beam/erl_process.h b/erts/emulator/beam/erl_process.h
index 739aef3130..f0c86a0851 100644
--- a/erts/emulator/beam/erl_process.h
+++ b/erts/emulator/beam/erl_process.h
@@ -53,11 +53,18 @@ typedef struct process Process;
#include "erl_time.h"
#include "erl_atom_table.h"
#include "external.h"
+#include "erl_mseg.h"
+#include "erl_async.h"
#ifdef HIPE
#include "hipe_process.h"
#endif
+#undef ERL_THR_PROGRESS_TSD_TYPE_ONLY
+#define ERL_THR_PROGRESS_TSD_TYPE_ONLY
+#include "erl_thr_progress.h"
+#undef ERL_THR_PROGRESS_TSD_TYPE_ONLY
+
struct ErtsNodesMonitor_;
struct port;
@@ -88,6 +95,7 @@ struct saved_calls {
};
extern Export exp_send, exp_receive, exp_timeout;
+extern int erts_sched_compact_load;
extern Uint erts_no_schedulers;
extern Uint erts_no_run_queues;
extern int erts_sched_thread_suggested_stack_size;
@@ -242,16 +250,25 @@ typedef enum {
| ERTS_SSI_FLG_WAITING \
| ERTS_SSI_FLG_SUSPENDED)
-#define ERTS_SCHED_NEED_BLOCKABLE_AUX_WORK
-
-#define ERTS_SSI_AUX_WORK_CHECK_CHILDREN (((erts_aint32_t) 1) << 0)
-#define ERTS_SSI_AUX_WORK_MISC (((erts_aint32_t) 1) << 1)
+#define ERTS_SSI_AUX_WORK_SET_TMO (((erts_aint32_t) 1) << 0)
+#define ERTS_SSI_AUX_WORK_CHECK_CHILDREN (((erts_aint32_t) 1) << 1)
+#define ERTS_SSI_AUX_WORK_MISC (((erts_aint32_t) 1) << 2)
+#ifdef ERTS_SMP
+#define ERTS_SSI_AUX_WORK_MISC_THR_PRGR (((erts_aint32_t) 1) << 3)
+#endif
+#define ERTS_SSI_AUX_WORK_FIX_ALLOC_LOWER_LIM (((erts_aint32_t) 1) << 4)
+#define ERTS_SSI_AUX_WORK_FIX_ALLOC_DEALLOC (((erts_aint32_t) 1) << 5)
+#define ERTS_SSI_AUX_WORK_ASYNC_READY (((erts_aint32_t) 1) << 6)
+#define ERTS_SSI_AUX_WORK_ASYNC_READY_CLEAN (((erts_aint32_t) 1) << 7)
+#ifdef ERTS_SMP
+#define ERTS_SSI_AUX_WORK_DD (((erts_aint32_t) 1) << 8)
+#define ERTS_SSI_AUX_WORK_DD_THR_PRGR (((erts_aint32_t) 1) << 9)
+#endif
+#define ERTS_SSI_AUX_WORK_MSEG_CACHE_CHECK (((erts_aint32_t) 1) << 10)
-#define ERTS_SSI_BLOCKABLE_AUX_WORK_MASK \
- (ERTS_SSI_AUX_WORK_CHECK_CHILDREN \
- | ERTS_SSI_AUX_WORK_MISC)
-#define ERTS_SSI_NONBLOCKABLE_AUX_WORK_MASK \
- (0)
+#if !HAVE_ERTS_MSEG
+# undef ERTS_SSI_AUX_WORK_MSEG_CACHE_CHECK
+#endif
typedef struct ErtsSchedulerSleepInfo_ ErtsSchedulerSleepInfo;
@@ -261,11 +278,13 @@ typedef struct {
} ErtsSchedulerSleepList;
struct ErtsSchedulerSleepInfo_ {
+#ifdef ERTS_SMP
ErtsSchedulerSleepInfo *next;
ErtsSchedulerSleepInfo *prev;
erts_smp_atomic32_t flags;
erts_tse_t *event;
- erts_smp_atomic32_t aux_work;
+#endif
+ erts_atomic32_t aux_work;
};
/* times to reschedule low prio process before running */
@@ -386,25 +405,49 @@ do { \
(RQ)->wakeup_other_reds += (REDS); \
} while (0)
-struct ErtsSchedulerData_ {
-
+typedef struct {
+ int sched_id;
+ ErtsSchedulerData *esdp;
+ ErtsSchedulerSleepInfo *ssi;
+ struct {
+ int ix;
#ifdef ERTS_SMP
+ ErtsThrPrgrVal thr_prgr;
+#endif
+ } misc;
+#ifdef ERTS_SMP
+ struct {
+ ErtsThrPrgrVal thr_prgr;
+ void (*completed_callback)(void *);
+ void (*completed_arg)(void *);
+ } dd;
+#endif
+#ifdef ERTS_USE_ASYNC_READY_Q
+ struct {
+#ifdef ERTS_SMP
+ int need_thr_prgr;
+ ErtsThrPrgrVal thr_prgr;
+#endif
+ void *queue;
+ } async_ready;
+#endif
+} ErtsAuxWorkData;
+
+struct ErtsSchedulerData_ {
/*
* Keep X registers first (so we get as many low
* numbered registers as possible in the same cache
* line).
*/
-#if !HALFWORD_HEAP
- Eterm save_reg[ERTS_X_REGS_ALLOCATED]; /* X registers */
-#else
- Eterm *save_reg;
-#endif
- FloatDef freg[MAX_REG]; /* Floating point registers. */
+ Eterm* x_reg_array; /* X registers */
+ FloatDef* f_reg_array; /* Floating point registers. */
+
+#ifdef ERTS_SMP
ethr_tid tid; /* Thread id */
struct erl_bits_state erl_bits_state; /* erl_bits.c state */
void *match_pseudo_process; /* erl_db_util.c:db_prog_match() */
- ErtsSchedulerSleepInfo *ssi;
Process *free_process;
+ ErtsThrPrgrData thr_progress_data;
#endif
#if !HEAP_ON_C_STACK
Eterm tmp_heap[TMP_HEAP_SIZE];
@@ -413,16 +456,19 @@ struct ErtsSchedulerData_ {
Eterm cmp_tmp_heap[CMP_TMP_HEAP_SIZE];
Eterm erl_arith_tmp_heap[ERL_ARITH_TMP_HEAP_SIZE];
#endif
-
+ ErtsSchedulerSleepInfo *ssi;
Process *current_process;
Uint no; /* Scheduler number */
struct port *current_port;
ErtsRunQueue *run_queue;
int virtual_reds;
int cpu_id; /* >= 0 when bound */
+ ErtsAuxWorkData aux_work_data;
ErtsAtomCacheMap atom_cache_map;
+ ErtsSchedAllocData alloc_data;
+
#ifdef ERTS_SMP
/* NOTE: These fields are modified under held mutexes by other threads */
erts_smp_atomic32_t chk_cpu_bind; /* Only used when common run queue */
@@ -1032,7 +1078,7 @@ extern struct erts_system_profile_flags_t erts_system_profile_flags;
void erts_pre_init_process(void);
void erts_late_init_process(void);
-void erts_early_init_scheduling(void);
+void erts_early_init_scheduling(int);
void erts_init_scheduling(int, int, int);
ErtsProcList *erts_proclist_create(Process *);
@@ -1041,6 +1087,9 @@ int erts_proclist_same(ErtsProcList *, Process *);
int erts_sched_set_wakeup_limit(char *str);
+#if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_CHECK)
+int erts_dbg_check_halloc_lock(Process *p);
+#endif
#ifdef DEBUG
void erts_dbg_multi_scheduling_return_trap(Process *, Eterm);
#endif
@@ -1058,13 +1107,20 @@ erts_block_multi_scheduling(Process *, ErtsProcLocks, int, int);
int erts_is_multi_scheduling_blocked(void);
Eterm erts_multi_scheduling_blockers(Process *);
void erts_start_schedulers(void);
+void erts_alloc_notify_delayed_dealloc(int);
void erts_smp_notify_check_children_needed(void);
-void
-erts_smp_schedule_misc_aux_work(int ignore_self,
- int max_sched,
- void (*func)(void *),
- void *arg);
#endif
+#if ERTS_USE_ASYNC_READY_Q
+void erts_notify_check_async_ready_queue(void *);
+#endif
+void erts_schedule_misc_aux_work(int sched_id,
+ void (*func)(void *),
+ void *arg);
+void erts_schedule_multi_misc_aux_work(int ignore_self,
+ int max_sched,
+ void (*func)(void *),
+ void *arg);
+erts_aint32_t erts_set_aux_work_timeout(int, erts_aint32_t, int);
void erts_sched_notify_check_cpu_bind(void);
Uint erts_active_schedulers(void);
void erts_init_process(int);
@@ -1148,6 +1204,7 @@ Sint erts_test_next_pid(int, Uint);
Eterm erts_debug_processes(Process *c_p);
Eterm erts_debug_processes_bif_info(Process *c_p);
Uint erts_debug_nbalance(void);
+int erts_debug_wait_deallocations(Process *c_p);
#ifdef ERTS_SMP
# define ERTS_GET_SCHEDULER_DATA_FROM_PROC(PROC) ((PROC)->scheduler_data)
@@ -1218,16 +1275,11 @@ erts_psd_get(Process *p, int ix)
#if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_CHECK)
ErtsProcLocks locks = erts_proc_lc_my_proc_locks(p);
if (ERTS_LC_PSD_ANY_LOCK == erts_psd_required_locks[ix].get_locks)
- ERTS_SMP_LC_ASSERT(locks
- || erts_is_system_blocked(0)
- || (ERTS_IS_CRASH_DUMPING
- && erts_is_system_blocked(ERTS_BS_FLG_ALLOW_GC)));
+ ERTS_SMP_LC_ASSERT(locks || erts_thr_progress_is_blocking());
else {
locks &= erts_psd_required_locks[ix].get_locks;
ERTS_SMP_LC_ASSERT(erts_psd_required_locks[ix].get_locks == locks
- || erts_is_system_blocked(0)
- || (ERTS_IS_CRASH_DUMPING
- && erts_is_system_blocked(ERTS_BS_FLG_ALLOW_GC)));
+ || erts_thr_progress_is_blocking());
}
#endif
ASSERT(0 <= ix && ix < ERTS_PSD_SIZE);
@@ -1244,16 +1296,11 @@ erts_psd_set(Process *p, ErtsProcLocks plocks, int ix, void *data)
#if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_CHECK)
ErtsProcLocks locks = erts_proc_lc_my_proc_locks(p);
if (ERTS_LC_PSD_ANY_LOCK == erts_psd_required_locks[ix].set_locks)
- ERTS_SMP_LC_ASSERT(locks
- || erts_is_system_blocked(0)
- || (ERTS_IS_CRASH_DUMPING
- && erts_is_system_blocked(ERTS_BS_FLG_ALLOW_GC)));
+ ERTS_SMP_LC_ASSERT(locks || erts_thr_progress_is_blocking());
else {
locks &= erts_psd_required_locks[ix].set_locks;
ERTS_SMP_LC_ASSERT(erts_psd_required_locks[ix].set_locks == locks
- || erts_is_system_blocked(0)
- || (ERTS_IS_CRASH_DUMPING
- && erts_is_system_blocked(ERTS_BS_FLG_ALLOW_GC)));
+ || erts_thr_progress_is_blocking());
}
#endif
ASSERT(0 <= ix && ix < ERTS_PSD_SIZE);
@@ -1600,8 +1647,6 @@ erts_sched_poke(ErtsSchedulerSleepInfo *ssi)
erts_aint32_t flags;
ERTS_THR_MEMORY_BARRIER;
flags = erts_smp_atomic32_read_nob(&ssi->flags);
- ASSERT(!(flags & ERTS_SSI_FLG_SLEEPING)
- || (flags & ERTS_SSI_FLG_WAITING));
if (flags & ERTS_SSI_FLG_SLEEPING) {
flags = erts_smp_atomic32_read_band_nob(&ssi->flags, ~ERTS_SSI_FLGS_SLEEP);
erts_sched_finish_poke(ssi, flags);
diff --git a/erts/emulator/beam/erl_process_lock.c b/erts/emulator/beam/erl_process_lock.c
index 83379d7352..b4d20480c5 100644
--- a/erts/emulator/beam/erl_process_lock.c
+++ b/erts/emulator/beam/erl_process_lock.c
@@ -669,7 +669,9 @@ proc_safelock(Process *a_proc,
ErtsProcLocks b_need_locks)
{
Process *p1, *p2;
+#ifdef ERTS_ENABLE_LOCK_CHECK
Eterm pid1, pid2;
+#endif
erts_pix_lock_t *pix_lck1, *pix_lck2;
ErtsProcLocks need_locks1, have_locks1, need_locks2, have_locks2;
ErtsProcLocks unlock_mask;
@@ -684,24 +686,32 @@ proc_safelock(Process *a_proc,
if (a_proc) {
if (a_proc->id < b_proc->id) {
p1 = a_proc;
+#ifdef ERTS_ENABLE_LOCK_CHECK
pid1 = a_proc->id;
+#endif
pix_lck1 = a_pix_lck;
need_locks1 = a_need_locks;
have_locks1 = a_have_locks;
p2 = b_proc;
+#ifdef ERTS_ENABLE_LOCK_CHECK
pid2 = b_proc->id;
+#endif
pix_lck2 = b_pix_lck;
need_locks2 = b_need_locks;
have_locks2 = b_have_locks;
}
else if (a_proc->id > b_proc->id) {
p1 = b_proc;
+#ifdef ERTS_ENABLE_LOCK_CHECK
pid1 = b_proc->id;
+#endif
pix_lck1 = b_pix_lck;
need_locks1 = b_need_locks;
have_locks1 = b_have_locks;
p2 = a_proc;
+#ifdef ERTS_ENABLE_LOCK_CHECK
pid2 = a_proc->id;
+#endif
pix_lck2 = a_pix_lck;
need_locks2 = a_need_locks;
have_locks2 = a_have_locks;
@@ -710,12 +720,16 @@ proc_safelock(Process *a_proc,
ERTS_LC_ASSERT(a_proc == b_proc);
ERTS_LC_ASSERT(a_proc->id == b_proc->id);
p1 = a_proc;
+#ifdef ERTS_ENABLE_LOCK_CHECK
pid1 = a_proc->id;
+#endif
pix_lck1 = a_pix_lck;
need_locks1 = a_need_locks | b_need_locks;
have_locks1 = a_have_locks | b_have_locks;
p2 = NULL;
+#ifdef ERTS_ENABLE_LOCK_CHECK
pid2 = 0;
+#endif
pix_lck2 = NULL;
need_locks2 = 0;
have_locks2 = 0;
@@ -723,12 +737,16 @@ proc_safelock(Process *a_proc,
}
else {
p1 = b_proc;
+#ifdef ERTS_ENABLE_LOCK_CHECK
pid1 = b_proc->id;
+#endif
pix_lck1 = b_pix_lck;
need_locks1 = b_need_locks;
have_locks1 = b_have_locks;
p2 = NULL;
+#ifdef ERTS_ENABLE_LOCK_CHECK
pid2 = 0;
+#endif
pix_lck2 = NULL;
need_locks2 = 0;
have_locks2 = 0;
diff --git a/erts/emulator/beam/erl_process_lock.h b/erts/emulator/beam/erl_process_lock.h
index cd3b2182fd..97f250138e 100644
--- a/erts/emulator/beam/erl_process_lock.h
+++ b/erts/emulator/beam/erl_process_lock.h
@@ -651,7 +651,7 @@ ERTS_GLB_INLINE int erts_smp_proc_trylock(Process *, ErtsProcLocks);
ERTS_GLB_INLINE void erts_smp_proc_inc_refc(Process *);
ERTS_GLB_INLINE void erts_smp_proc_dec_refc(Process *);
-
+ERTS_GLB_INLINE void erts_smp_proc_add_refc(Process *, Sint32);
#if ERTS_GLB_INLINE_INCL_FUNC_DEF
@@ -737,6 +737,21 @@ ERTS_GLB_INLINE void erts_smp_proc_dec_refc(Process *p)
#endif
}
+ERTS_GLB_INLINE void erts_smp_proc_add_refc(Process *p, Sint32 refc)
+{
+#ifdef ERTS_SMP
+ Process *fp;
+ erts_pix_lock_t *pixlck = ERTS_PID2PIXLOCK(p->id);
+ erts_pix_lock(pixlck);
+ ERTS_LC_ASSERT(p->lock.refc > 0);
+ p->lock.refc += refc;
+ fp = p->lock.refc == 0 ? p : NULL;
+ erts_pix_unlock(pixlck);
+ if (fp)
+ erts_free_proc(fp);
+#endif
+}
+
#endif /* #if ERTS_GLB_INLINE_INCL_FUNC_DEF */
#ifdef ERTS_SMP
diff --git a/erts/emulator/beam/erl_sched_spec_pre_alloc.c b/erts/emulator/beam/erl_sched_spec_pre_alloc.c
new file mode 100644
index 0000000000..a7ccea7403
--- /dev/null
+++ b/erts/emulator/beam/erl_sched_spec_pre_alloc.c
@@ -0,0 +1,305 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2011. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+/*
+ * Description: Scheduler specific pre-allocators. Each scheduler
+ * thread allocates memory in its own private chunk of
+ * memory. Memory blocks deallocated by remote
+ * schedulers (or other threads) are passed back to
+ * the chunk owner via a lock-free data structure.
+ *
+ * Author: Rickard Green
+ */
+
+#ifdef HAVE_CONFIG_H
+# include "config.h"
+#endif
+
+#ifdef ERTS_SMP
+
+#include "erl_process.h"
+#include "erl_thr_progress.h"
+
+erts_sspa_data_t *
+erts_sspa_create(size_t blk_sz, int pa_size)
+{
+ erts_sspa_data_t *data;
+ size_t tot_size;
+ size_t chunk_mem_size;
+ char *p;
+ char *chunk_start;
+ int cix;
+ int no_blocks = pa_size;
+ int no_blocks_per_chunk;
+
+ if (erts_no_schedulers == 1)
+ no_blocks_per_chunk = no_blocks;
+ else {
+ int extra = (no_blocks - 1)/4 + 1;
+ if (extra == 0)
+ extra = 1;
+ no_blocks_per_chunk = no_blocks;
+ no_blocks_per_chunk += extra*erts_no_schedulers;
+ no_blocks_per_chunk /= erts_no_schedulers;
+ }
+ no_blocks = no_blocks_per_chunk * erts_no_schedulers;
+ chunk_mem_size = ERTS_ALC_CACHE_LINE_ALIGN_SIZE(sizeof(erts_sspa_chunk_header_t));
+ chunk_mem_size += blk_sz * no_blocks_per_chunk;
+ chunk_mem_size = ERTS_ALC_CACHE_LINE_ALIGN_SIZE(chunk_mem_size);
+ tot_size = ERTS_ALC_CACHE_LINE_ALIGN_SIZE(sizeof(erts_sspa_data_t));
+ tot_size += chunk_mem_size*erts_no_schedulers;
+
+ p = erts_alloc_permanent_cache_aligned(ERTS_ALC_T_PRE_ALLOC_DATA, tot_size);
+ data = (erts_sspa_data_t *) p;
+ p += ERTS_ALC_CACHE_LINE_ALIGN_SIZE(sizeof(erts_sspa_data_t));
+ chunk_start = p;
+
+ data->chunks_mem_size = chunk_mem_size;
+ data->start = chunk_start;
+ data->end = chunk_start + chunk_mem_size*erts_no_schedulers;
+
+ /* Initialize all chunks */
+ for (cix = 0; cix < erts_no_schedulers; cix++) {
+ erts_sspa_chunk_t *chnk = erts_sspa_cix2chunk(data, cix);
+ erts_sspa_chunk_header_t *chdr = &chnk->aligned.header;
+ erts_sspa_blk_t *blk;
+ int i;
+
+ erts_atomic_init_nob(&chdr->tail.data.last, (erts_aint_t) &chdr->tail.data.marker);
+ erts_atomic_init_nob(&chdr->tail.data.marker.next_atmc, ERTS_AINT_NULL);
+ erts_atomic_init_nob(&chdr->tail.data.um_refc[0], 0);
+ erts_atomic_init_nob(&chdr->tail.data.um_refc[1], 0);
+ erts_atomic32_init_nob(&chdr->tail.data.um_refc_ix, 0);
+
+ chdr->head.no_thr_progress_check = 0;
+ chdr->head.used_marker = 1;
+ chdr->head.first = &chdr->tail.data.marker;
+ chdr->head.unref_end = &chdr->tail.data.marker;
+ chdr->head.next.thr_progress = erts_thr_progress_current();
+ chdr->head.next.thr_progress_reached = 1;
+ chdr->head.next.um_refc_ix = 1;
+ chdr->head.next.unref_end = &chdr->tail.data.marker;
+
+ p = &chnk->data[0];
+ chdr->local.first = (erts_sspa_blk_t *) p;
+ blk = (erts_sspa_blk_t *) p;
+ for (i = 0; i < no_blocks_per_chunk; i++) {
+ blk = (erts_sspa_blk_t *) p;
+ p += blk_sz;
+ blk->next_ptr = (erts_sspa_blk_t *) p;
+ }
+
+ blk->next_ptr = NULL;
+ chdr->local.last = blk;
+ chdr->local.cnt = no_blocks_per_chunk;
+ chdr->local.lim = no_blocks_per_chunk / 3;
+
+ ERTS_SSPA_DBG_CHK_LCL(chdr);
+ }
+
+ return data;
+}
+
+static ERTS_INLINE erts_aint_t
+enqueue_remote_managed_thread(erts_sspa_chunk_header_t *chdr,
+ erts_sspa_blk_t *this,
+ int want_last)
+{
+ erts_aint_t ilast, itmp;
+
+ erts_atomic_init_nob(&this->next_atmc, ERTS_AINT_NULL);
+
+ /* Enqueue at end of list... */
+
+ ilast = erts_atomic_read_nob(&chdr->tail.data.last);
+ while (1) {
+ erts_sspa_blk_t *last = (erts_sspa_blk_t *) ilast;
+ itmp = erts_atomic_cmpxchg_mb(&last->next_atmc,
+ (erts_aint_t) this,
+ ERTS_AINT_NULL);
+ if (itmp == ERTS_AINT_NULL)
+ break;
+ ilast = itmp;
+ }
+
+ /* Move last pointer forward... */
+ while (1) {
+ erts_aint_t itmp;
+ if (want_last) {
+ if (erts_atomic_read_rb(&this->next_atmc) != ERTS_AINT_NULL) {
+ /* Someone else will move it forward */
+ return erts_atomic_read_nob(&chdr->tail.data.last);
+ }
+ }
+ else {
+ if (erts_atomic_read_nob(&this->next_atmc) != ERTS_AINT_NULL) {
+ /* Someone else will move it forward */
+ return ERTS_AINT_NULL;
+ }
+ }
+ itmp = erts_atomic_cmpxchg_mb(&chdr->tail.data.last,
+ (erts_aint_t) this,
+ ilast);
+ if (ilast == itmp)
+ return want_last ? (erts_aint_t) this : ERTS_AINT_NULL;
+ ilast = itmp;
+ }
+}
+
+void
+erts_sspa_remote_free(erts_sspa_chunk_header_t *chdr, erts_sspa_blk_t *blk)
+{
+ int um_refc_ix = 0;
+ int managed_thread = erts_thr_progress_is_managed_thread();
+ if (!managed_thread) {
+ um_refc_ix = erts_atomic32_read_acqb(&chdr->tail.data.um_refc_ix);
+ while (1) {
+ int tmp_um_refc_ix;
+ erts_atomic_inc_acqb(&chdr->tail.data.um_refc[um_refc_ix]);
+ tmp_um_refc_ix = erts_atomic32_read_acqb(&chdr->tail.data.um_refc_ix);
+ if (tmp_um_refc_ix == um_refc_ix)
+ break;
+ erts_atomic_dec_relb(&chdr->tail.data.um_refc[um_refc_ix]);
+ um_refc_ix = tmp_um_refc_ix;
+ }
+ }
+
+ (void) enqueue_remote_managed_thread(chdr, blk, 0);
+
+ if (!managed_thread)
+ erts_atomic_dec_relb(&chdr->tail.data.um_refc[um_refc_ix]);
+}
+
+static ERTS_INLINE void
+fetch_remote(erts_sspa_chunk_header_t *chdr, int max)
+{
+ int new_local = 0;
+
+ if (chdr->head.no_thr_progress_check < ERTS_SSPA_FORCE_THR_CHECK_PROGRESS)
+ chdr->head.no_thr_progress_check++;
+ else {
+ erts_aint_t ilast;
+
+ chdr->head.no_thr_progress_check = 0;
+
+ ilast = erts_atomic_read_nob(&chdr->tail.data.last);
+ if (((erts_sspa_blk_t *) ilast) == &chdr->tail.data.marker
+ && chdr->head.first == &chdr->tail.data.marker)
+ return;
+
+ if (chdr->head.next.thr_progress_reached
+ || erts_thr_progress_has_reached(chdr->head.next.thr_progress)) {
+ int um_refc_ix;
+ chdr->head.next.thr_progress_reached = 1;
+ um_refc_ix = chdr->head.next.um_refc_ix;
+ if (erts_atomic_read_acqb(&chdr->tail.data.um_refc[um_refc_ix]) == 0) {
+
+ /* Move unreferenced end pointer forward... */
+
+ chdr->head.unref_end = chdr->head.next.unref_end;
+
+ if (!chdr->head.used_marker
+ && chdr->head.unref_end == (erts_sspa_blk_t *) ilast) {
+ /* Need to equeue marker */
+ chdr->head.used_marker = 1;
+ ilast = enqueue_remote_managed_thread(chdr,
+ &chdr->tail.data.marker,
+ 1);
+ }
+
+ if (chdr->head.unref_end == (erts_sspa_blk_t *) ilast)
+ ERTS_THR_MEMORY_BARRIER;
+ else {
+ chdr->head.next.unref_end = (erts_sspa_blk_t *) ilast;
+ ERTS_THR_MEMORY_BARRIER;
+ chdr->head.next.thr_progress = erts_thr_progress_later();
+ erts_atomic32_set_relb(&chdr->tail.data.um_refc_ix,
+ um_refc_ix);
+ chdr->head.next.um_refc_ix = um_refc_ix == 0 ? 1 : 0;
+ chdr->head.next.thr_progress_reached = 0;
+ }
+ }
+ }
+ }
+
+ if (new_local < max && chdr->head.first != chdr->head.unref_end) {
+ erts_sspa_blk_t *first, *this, *next, *last;
+ first = chdr->head.first;
+ if (first == &chdr->tail.data.marker) {
+ chdr->head.used_marker = 0;
+ first = ((erts_sspa_blk_t *)
+ erts_atomic_read_nob(&first->next_atmc));
+ chdr->head.first = first;
+ }
+ if (first != chdr->head.unref_end) {
+
+ ERTS_SSPA_DBG_CHK_LCL(chdr);
+
+ this = last = first;
+ do {
+ next = (erts_sspa_blk_t *) erts_atomic_read_nob(&this->next_atmc);
+ if (this == &chdr->tail.data.marker)
+ chdr->head.used_marker = 0;
+ else {
+ last->next_ptr = this;
+ last = this;
+ new_local++;
+ }
+ this = next;
+ } while (new_local < max && this != chdr->head.unref_end);
+ chdr->head.first = this;
+ if (!chdr->local.last)
+ chdr->local.first = first;
+ else
+ chdr->local.last->next_ptr = first;
+ chdr->local.last = last;
+ last->next_ptr = NULL;
+ chdr->local.cnt += new_local;
+
+ ERTS_SSPA_DBG_CHK_LCL(chdr);
+ }
+ }
+
+}
+
+erts_sspa_blk_t *
+erts_sspa_process_remote_frees(erts_sspa_chunk_header_t *chdr,
+ erts_sspa_blk_t *old_res)
+{
+ erts_sspa_blk_t *res = old_res;
+
+ fetch_remote(chdr, ERTS_SSPA_MAX_GET_NEW_LOCAL);
+
+ if (!res && chdr->local.first) {
+
+ ERTS_SSPA_DBG_CHK_LCL(chdr);
+
+ res = chdr->local.first;
+ chdr->local.first = res->next_ptr;
+ chdr->local.cnt--;
+ if (!chdr->local.first)
+ chdr->local.last = NULL;
+
+ ERTS_SSPA_DBG_CHK_LCL(chdr);
+ }
+
+ return res;
+}
+
+#endif /* ERTS_SMP */
diff --git a/erts/emulator/beam/erl_sched_spec_pre_alloc.h b/erts/emulator/beam/erl_sched_spec_pre_alloc.h
new file mode 100644
index 0000000000..d36066c399
--- /dev/null
+++ b/erts/emulator/beam/erl_sched_spec_pre_alloc.h
@@ -0,0 +1,239 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2011. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+/*
+ * Description: Scheduler specific pre-allocators. Each scheduler
+ * thread allocates memory in its own private chunk of
+ * memory. Memory blocks deallocated by remote
+ * schedulers (or other threads) are passed back to
+ * the chunk owner via a lock-free data structure.
+ *
+ * Author: Rickard Green
+ */
+
+#ifndef ERTS_SCHED_SPEC_PRE_ALLOC_H__
+#define ERTS_SCHED_SPEC_PRE_ALLOC_H__
+
+#ifdef ERTS_SMP
+
+#undef ERL_THR_PROGRESS_TSD_TYPE_ONLY
+#define ERL_THR_PROGRESS_TSD_TYPE_ONLY
+#include "erl_thr_progress.h"
+#undef ERL_THR_PROGRESS_TSD_TYPE_ONLY
+
+#ifdef DEBUG
+#define ERTS_SPPA_DBG_CHK_IN_CHNK(A, C, P) \
+do { \
+ ASSERT((void *) (C) < (void *) (P)); \
+ ASSERT((void *) (P) \
+ < (void *) (((char *) (C)) + (A)->chunks_mem_size)); \
+} while (0)
+#else
+#define ERTS_SPPA_DBG_CHK_IN_CHNK(A, C, P)
+#endif
+
+#ifdef DEBUG
+extern Uint erts_no_schedulers;
+#endif
+
+#define ERTS_SSPA_FORCE_THR_CHECK_PROGRESS 10
+#define ERTS_SSPA_MAX_GET_NEW_LOCAL 5
+
+typedef struct {
+ char *start;
+ char *end;
+ int chunks_mem_size;
+} erts_sspa_data_t;
+
+typedef union erts_sspa_blk_t_ erts_sspa_blk_t;
+union erts_sspa_blk_t_ {
+ erts_atomic_t next_atmc;
+ erts_sspa_blk_t *next_ptr;
+};
+
+typedef struct {
+ erts_sspa_blk_t *first;
+ erts_sspa_blk_t *last;
+ int cnt;
+ int lim;
+} erts_sspa_local_freelist_t;
+
+typedef struct {
+ erts_sspa_blk_t marker;
+ erts_atomic_t last;
+ erts_atomic_t um_refc[2];
+ erts_atomic32_t um_refc_ix;
+} erts_sspa_tail_t;
+
+typedef struct {
+ /*
+ * This structure needs to be cache line aligned for best
+ * performance.
+ */
+ union {
+ /* Modified by threads returning memory to this chunk */
+ erts_sspa_tail_t data;
+ char align__[ERTS_ALC_CACHE_LINE_ALIGN_SIZE(sizeof(erts_sspa_tail_t))];
+ } tail;
+ /*
+ * Everything below this point is *only* accessed by the
+ * thread owning this chunk.
+ */
+ struct {
+ int no_thr_progress_check;
+ int used_marker;
+ erts_sspa_blk_t *first;
+ erts_sspa_blk_t *unref_end;
+ struct {
+ ErtsThrPrgrVal thr_progress;
+ int thr_progress_reached;
+ int um_refc_ix;
+ erts_sspa_blk_t *unref_end;
+ } next;
+ } head;
+ erts_sspa_local_freelist_t local;
+} erts_sspa_chunk_header_t;
+
+typedef struct {
+ union {
+ erts_sspa_chunk_header_t header;
+ char align__[ERTS_ALC_CACHE_LINE_ALIGN_SIZE(
+ sizeof(erts_sspa_chunk_header_t))];
+ } aligned;
+ char data[1];
+} erts_sspa_chunk_t;
+
+#ifdef DEBUG
+ERTS_GLB_INLINE void
+check_local_list(erts_sspa_chunk_header_t *chdr);
+
+#if ERTS_GLB_INLINE_INCL_FUNC_DEF
+ERTS_GLB_INLINE void
+check_local_list(erts_sspa_chunk_header_t *chdr)
+{
+ erts_sspa_blk_t *blk;
+ int n = 0;
+ for (blk = chdr->local.first; blk; blk = blk->next_ptr)
+ n++;
+ ASSERT(n == chdr->local.cnt);
+}
+#endif
+#define ERTS_SSPA_DBG_CHK_LCL(CHDR) check_local_list((CHDR))
+#else
+#define ERTS_SSPA_DBG_CHK_LCL(CHDR)
+#endif
+
+erts_sspa_data_t *erts_sspa_create(size_t blk_sz,
+ int pa_size);
+void erts_sspa_remote_free(erts_sspa_chunk_header_t *chdr,
+ erts_sspa_blk_t *blk);
+erts_sspa_blk_t *erts_sspa_process_remote_frees(erts_sspa_chunk_header_t *chdr,
+ erts_sspa_blk_t *old_res);
+
+ERTS_GLB_INLINE erts_sspa_chunk_t *erts_sspa_cix2chunk(erts_sspa_data_t *data,
+ int cix);
+ERTS_GLB_INLINE int erts_sspa_ptr2cix(erts_sspa_data_t *data, void *ptr);
+ERTS_GLB_INLINE char *erts_sspa_alloc(erts_sspa_data_t *data, int cix);
+ERTS_GLB_INLINE int erts_sspa_free(erts_sspa_data_t *data, int cix, char *blk);
+
+#if ERTS_GLB_INLINE_INCL_FUNC_DEF
+
+ERTS_GLB_INLINE erts_sspa_chunk_t *
+erts_sspa_cix2chunk(erts_sspa_data_t *data, int cix)
+{
+ ASSERT(0 <= cix && cix < erts_no_schedulers);
+ return (erts_sspa_chunk_t *) (data->start + cix*data->chunks_mem_size);
+}
+
+ERTS_GLB_INLINE int
+erts_sspa_ptr2cix(erts_sspa_data_t *data, void *ptr)
+{
+ int cix;
+ size_t diff;
+ if ((char *) ptr < data->start || data->end <= (char *) ptr)
+ return -1;
+ diff = ((char *) ptr) - data->start;
+ cix = (int) diff / data->chunks_mem_size;
+ ASSERT(0 <= cix && cix < erts_no_schedulers);
+ return cix;
+}
+
+ERTS_GLB_INLINE char *
+erts_sspa_alloc(erts_sspa_data_t *data, int cix)
+{
+ erts_sspa_chunk_t *chnk;
+ erts_sspa_chunk_header_t *chdr;
+ erts_sspa_blk_t *res;
+
+ chnk = erts_sspa_cix2chunk(data, cix);
+ chdr = &chnk->aligned.header;
+ res = chdr->local.first;
+ ERTS_SSPA_DBG_CHK_LCL(chdr);
+ if (res) {
+ ERTS_SSPA_DBG_CHK_LCL(chdr);
+ chdr->local.first = res->next_ptr;
+ chdr->local.cnt--;
+ if (!chdr->local.first)
+ chdr->local.last = NULL;
+ ERTS_SSPA_DBG_CHK_LCL(chdr);
+ }
+ if (chdr->local.cnt <= chdr->local.lim)
+ return (char *) erts_sspa_process_remote_frees(chdr, res);
+ else if (chdr->head.no_thr_progress_check < ERTS_SSPA_FORCE_THR_CHECK_PROGRESS)
+ chdr->head.no_thr_progress_check++;
+ ASSERT(res);
+ return (char *) res;
+}
+
+ERTS_GLB_INLINE int
+erts_sspa_free(erts_sspa_data_t *data, int cix, char *cblk)
+{
+ erts_sspa_chunk_t *chnk;
+ erts_sspa_chunk_header_t *chdr;
+ erts_sspa_blk_t *blk = (erts_sspa_blk_t *) cblk;
+ int chnk_cix = erts_sspa_ptr2cix(data, blk);
+
+ if (chnk_cix < 0)
+ return 0;
+
+ chnk = erts_sspa_cix2chunk(data, chnk_cix);
+ chdr = &chnk->aligned.header;
+ if (chnk_cix != cix) {
+ /* Remote chunk */
+ erts_sspa_remote_free(chdr, blk);
+ }
+ else {
+ /* Local chunk */
+ ERTS_SSPA_DBG_CHK_LCL(chdr);
+ blk->next_ptr = chdr->local.first;
+ chdr->local.first = blk;
+ if (!chdr->local.last)
+ chdr->local.last = blk;
+ chdr->local.cnt++;
+ ERTS_SSPA_DBG_CHK_LCL(chdr);
+ }
+
+ return 1;
+}
+
+#endif /* ERTS_GLB_INLINE_INCL_FUNC_DEF */
+
+#endif /* ERTS_SMP */
+
+#endif /* ERTS_SCHED_SPEC_PRE_ALLOC_H__ */
diff --git a/erts/emulator/beam/erl_smp.h b/erts/emulator/beam/erl_smp.h
index a89ddfbcc1..63179dfad4 100644
--- a/erts/emulator/beam/erl_smp.h
+++ b/erts/emulator/beam/erl_smp.h
@@ -822,6 +822,16 @@ erts_smp_cnd_wait(erts_smp_cnd_t *cnd, erts_smp_mtx_t *mtx)
#endif
}
+/*
+ * IMPORTANT note about erts_smp_cnd_signal() and erts_smp_cnd_broadcast()
+ *
+ * POSIX allow a call to `pthread_cond_signal' or `pthread_cond_broadcast'
+ * even though the associated mutex/mutexes isn't/aren't locked by the
+ * caller. Our implementation do not allow that in order to avoid a
+ * performance penalty. That is, all associated mutexes *need* to be
+ * locked by the caller of erts_smp_cnd_signal()/erts_smp_cnd_broadcast()!
+ */
+
ERTS_GLB_INLINE void
erts_smp_cnd_signal(erts_smp_cnd_t *cnd)
{
diff --git a/erts/emulator/beam/erl_term.h b/erts/emulator/beam/erl_term.h
index 1d75fa313c..bc20b2d798 100644
--- a/erts/emulator/beam/erl_term.h
+++ b/erts/emulator/beam/erl_term.h
@@ -331,7 +331,13 @@ _ET_DECLARE_CHECKED(Uint,thing_subtag,Eterm)
* we now use a non-zero bit-pattern in debug mode.
*/
#if ET_DEBUG
-#define THE_NON_VALUE _make_header(0,_TAG_HEADER_FLOAT)
+# ifdef HIPE
+ /* A very large (or negative) value as work-around for ugly hipe-bifs
+ that return untagged integers (eg hipe_bs_put_utf8) */
+# define THE_NON_VALUE _make_header((Uint)~0,_TAG_HEADER_FLOAT)
+# else
+# define THE_NON_VALUE _make_header(0,_TAG_HEADER_FLOAT)
+# endif
#else
#define THE_NON_VALUE (0)
#endif
diff --git a/erts/emulator/beam/erl_thr_progress.c b/erts/emulator/beam/erl_thr_progress.c
new file mode 100644
index 0000000000..9324bcde51
--- /dev/null
+++ b/erts/emulator/beam/erl_thr_progress.c
@@ -0,0 +1,1373 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2011. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+/*
+ * Description: Thread progress information. Used by lock free algorithms
+ * to determine when all involved threads are guaranteed to
+ * have passed a specific point of execution.
+ *
+ * Usage instructions below.
+ *
+ * Author: Rickard Green
+ */
+
+/*
+ * ------ Usage instructions -----------------------------------------------
+ *
+ * This module keeps track of the progress of a set of managed threads. Only
+ * threads that behave well can be allowed to be managed. A managed thread
+ * should update its thread progress frequently. Currently only scheduler
+ * threads and the aux_thread are managed threads. We typically do not want
+ * any async threads as managed threads since they cannot guarantee a
+ * frequent update of thread progress, since they execute user implemented
+ * driver code.
+ *
+ * erts_thr_progress_current() returns the global current thread progress
+ * value of managed threads. I.e., the latest progress value that all
+ * managed threads have reached. Thread progress values are opaque.
+ *
+ * erts_thr_progress_has_reached(VAL) returns a value != 0 if current
+ * global thread progress has reached or passed VAL.
+ *
+ * erts_thr_progress_later() returns a thread progress value in the future
+ * which no managed thread have yet reached.
+ *
+ * All threads issue a full memory barrier when reaching a new thread
+ * progress value. They only reach new thread progress values in specific
+ * controlled states when calling erts_thr_progress_update(). Schedulers
+ * call erts_thr_progress_update() in between execution of processes,
+ * when going to sleep and when waking up.
+ *
+ * Sleeping managed threads are considered to have reached next thread
+ * progress value immediately. They are not woken and do therefore not
+ * issue any memory barriers when reaching a new thread progress value.
+ * A sleeping thread do however immediately issue a memory barrier upon
+ * wakeup.
+ *
+ * Both managed and registered unmanaged threads may request wakeup when
+ * the global thread progress reach a certain value using
+ * erts_thr_progress_wakeup().
+ *
+ * Note that thread progress values are opaque, and that you are only
+ * allowed to use thread progress values retrieved from this API!
+ *
+ * -------------------------------------------------------------------------
+ */
+
+#ifdef HAVE_CONFIG_H
+# include "config.h"
+#endif
+
+#include <stddef.h> /* offsetof() */
+#include "erl_thr_progress.h"
+#include "global.h"
+
+#ifdef ERTS_SMP
+
+/*
+ * We use a 64-bit value for thread progress. By this wrapping of
+ * the thread progress will more or less never occur.
+ *
+ * On 32-bit systems we therefore need a double word atomic.
+ */
+
+#define ERTS_THR_PRGR_PRINT_LEADER 0
+#define ERTS_THR_PRGR_PRINT_VAL 0
+#define ERTS_THR_PRGR_PRINT_BLOCKERS 0
+
+#define ERTS_THR_PRGR_FTL_ERR_BLCK_POLL_INTERVAL 100
+
+#define ERTS_THR_PRGR_LFLG_BLOCK (((erts_aint32_t) 1) << 31)
+#define ERTS_THR_PRGR_LFLG_NO_LEADER (((erts_aint32_t) 1) << 30)
+#define ERTS_THR_PRGR_LFLG_ACTIVE_MASK (~(ERTS_THR_PRGR_LFLG_NO_LEADER \
+ | ERTS_THR_PRGR_LFLG_BLOCK))
+
+#define ERTS_THR_PRGR_LFLGS_ACTIVE(LFLGS) \
+ ((LFLGS) & ERTS_THR_PRGR_LFLG_ACTIVE_MASK)
+
+#define ERTS_THR_PRGR_LFLGS_ALL_WAITING(LFLGS) \
+ (((LFLGS) & (ERTS_THR_PRGR_LFLG_NO_LEADER \
+ |ERTS_THR_PRGR_LFLG_ACTIVE_MASK)) \
+ == ERTS_THR_PRGR_LFLG_NO_LEADER)
+
+#define read_acqb erts_thr_prgr_read_acqb__
+
+#ifdef ARCH_64
+
+static ERTS_INLINE void
+set_mb(ERTS_THR_PRGR_ATOMIC *atmc, ErtsThrPrgrVal val)
+{
+ erts_atomic_set_mb(atmc, val);
+}
+
+static ERTS_INLINE void
+set_nob(ERTS_THR_PRGR_ATOMIC *atmc, ErtsThrPrgrVal val)
+{
+ erts_atomic_set_nob(atmc, val);
+}
+
+static ERTS_INLINE ErtsThrPrgrVal
+read_nob(ERTS_THR_PRGR_ATOMIC *atmc)
+{
+ return (ErtsThrPrgrVal) erts_atomic_read_nob(atmc);
+}
+
+static ERTS_INLINE void
+init_nob(ERTS_THR_PRGR_ATOMIC *atmc, ErtsThrPrgrVal val)
+{
+ erts_atomic_init_nob(atmc, val);
+}
+
+#else
+
+#undef dw_sint_to_val
+#define dw_sint_to_val erts_thr_prgr_dw_sint_to_val__
+
+static void
+val_to_dw_sint(ethr_dw_sint_t *dw_sint, ErtsThrPrgrVal val)
+{
+#ifdef ETHR_SU_DW_NAINT_T__
+ dw_sint->dw_sint = (ETHR_SU_DW_NAINT_T__) val;
+#else
+ dw_sint->sint[ETHR_DW_SINT_LOW_WORD]
+ = (ethr_sint_t) (val & 0xffffffff);
+ dw_sint->sint[ETHR_DW_SINT_HIGH_WORD]
+ = (ethr_sint_t) ((val >> 32) & 0xffffffff);
+#endif
+}
+
+static ERTS_INLINE void
+set_mb(ERTS_THR_PRGR_ATOMIC *atmc, ErtsThrPrgrVal val)
+{
+ ethr_dw_sint_t dw_sint;
+ val_to_dw_sint(&dw_sint, val);
+ erts_dw_atomic_set_mb(atmc, &dw_sint);
+}
+
+static ERTS_INLINE void
+set_nob(ERTS_THR_PRGR_ATOMIC *atmc, ErtsThrPrgrVal val)
+{
+ ethr_dw_sint_t dw_sint;
+ val_to_dw_sint(&dw_sint, val);
+ erts_dw_atomic_set_nob(atmc, &dw_sint);
+}
+
+static ERTS_INLINE ErtsThrPrgrVal
+read_nob(ERTS_THR_PRGR_ATOMIC *atmc)
+{
+ ethr_dw_sint_t dw_sint;
+ erts_dw_atomic_read_nob(atmc, &dw_sint);
+ return erts_thr_prgr_dw_sint_to_val__(&dw_sint);
+}
+
+static ERTS_INLINE void
+init_nob(ERTS_THR_PRGR_ATOMIC *atmc, ErtsThrPrgrVal val)
+{
+ ethr_dw_sint_t dw_sint;
+ val_to_dw_sint(&dw_sint, val);
+ erts_dw_atomic_init_nob(atmc, &dw_sint);
+}
+
+#endif
+
+/* #define ERTS_THR_PROGRESS_STATE_DEBUG */
+
+#ifdef ERTS_THR_PROGRESS_STATE_DEBUG
+
+#ifdef __GNUC__
+#warning "Thread progress state debug is on"
+#endif
+
+#define ERTS_THR_PROGRESS_STATE_DEBUG_LEADER (((erts_aint32_t) 1) << 0)
+#define ERTS_THR_PROGRESS_STATE_DEBUG_ACTIVE (((erts_aint32_t) 1) << 1)
+
+#define ERTS_THR_PROGRESS_STATE_DEBUG_INIT(ID) \
+ erts_atomic32_init_nob(&intrnl->thr[(ID)].data.state_debug, \
+ ERTS_THR_PROGRESS_STATE_DEBUG_ACTIVE)
+
+#define ERTS_THR_PROGRESS_STATE_DEBUG_SET_ACTIVE(ID, ON) \
+do { \
+ erts_aint32_t state_debug__; \
+ state_debug__ = erts_atomic32_read_nob(&intrnl->thr[(ID)].data.state_debug); \
+ if ((ON)) \
+ state_debug__ |= ERTS_THR_PROGRESS_STATE_DEBUG_ACTIVE; \
+ else \
+ state_debug__ &= ~ERTS_THR_PROGRESS_STATE_DEBUG_ACTIVE; \
+ erts_atomic32_set_nob(&intrnl->thr[(ID)].data.state_debug, state_debug__); \
+} while (0)
+
+#define ERTS_THR_PROGRESS_STATE_DEBUG_SET_LEADER(ID, ON) \
+do { \
+ erts_aint32_t state_debug__; \
+ state_debug__ = erts_atomic32_read_nob(&intrnl->thr[(ID)].data.state_debug); \
+ if ((ON)) \
+ state_debug__ |= ERTS_THR_PROGRESS_STATE_DEBUG_LEADER; \
+ else \
+ state_debug__ &= ~ERTS_THR_PROGRESS_STATE_DEBUG_LEADER; \
+ erts_atomic32_set_nob(&intrnl->thr[(ID)].data.state_debug, state_debug__); \
+} while (0)
+
+#else
+
+#define ERTS_THR_PROGRESS_STATE_DEBUG_INIT(ID)
+#define ERTS_THR_PROGRESS_STATE_DEBUG_SET_ACTIVE(ID, ON)
+#define ERTS_THR_PROGRESS_STATE_DEBUG_SET_LEADER(ID, ON)
+
+#endif /* ERTS_THR_PROGRESS_STATE_DEBUG */
+
+#define ERTS_THR_PRGR_BLCKR_INVALID (~((erts_aint32_t) 0))
+#define ERTS_THR_PRGR_BLCKR_UNMANAGED (((erts_aint32_t) 1) << 31)
+
+#define ERTS_THR_PRGR_BC_FLG_NOT_BLOCKING (((erts_aint32_t) 1) << 31)
+
+#define ERTS_THR_PRGR_BM_BITS 32
+#define ERTS_THR_PRGR_BM_SHIFT 5
+#define ERTS_THR_PRGR_BM_MASK 0x1f
+
+#define ERTS_THR_PRGR_WAKEUP_DATA_MASK (ERTS_THR_PRGR_WAKEUP_DATA_SIZE - 1)
+
+#define ERTS_THR_PRGR_WAKEUP_IX(V) \
+ ((int) ((V) & ERTS_THR_PRGR_WAKEUP_DATA_MASK))
+
+typedef struct {
+ erts_atomic32_t len;
+ int id[1];
+} ErtsThrPrgrManagedWakeupData;
+
+typedef struct {
+ erts_atomic32_t len;
+ int high_sz;
+ int low_sz;
+ erts_atomic32_t *high;
+ erts_atomic32_t *low;
+} ErtsThrPrgrUnmanagedWakeupData;
+
+typedef struct {
+ erts_atomic32_t lflgs;
+ erts_atomic32_t block_count;
+ erts_atomic_t blocker_event;
+ erts_atomic32_t pref_wakeup_used;
+ erts_atomic32_t managed_count;
+ erts_atomic32_t managed_id;
+ erts_atomic32_t unmanaged_id;
+} ErtsThrPrgrMiscData;
+
+typedef struct {
+ ERTS_THR_PRGR_ATOMIC current;
+#ifdef ERTS_THR_PROGRESS_STATE_DEBUG
+ erts_atomic32_t state_debug;
+#endif
+} ErtsThrPrgrElement;
+
+typedef union {
+ ErtsThrPrgrElement data;
+ char align__[ERTS_ALC_CACHE_LINE_ALIGN_SIZE(sizeof(ErtsThrPrgrElement))];
+} ErtsThrPrgrArray;
+
+typedef struct {
+ union {
+ ErtsThrPrgrMiscData data;
+ char align__[ERTS_ALC_CACHE_LINE_ALIGN_SIZE(
+ sizeof(ErtsThrPrgrMiscData))];
+ } misc;
+ ErtsThrPrgrArray *thr;
+ struct {
+ int no;
+ ErtsThrPrgrCallbacks *callbacks;
+ ErtsThrPrgrManagedWakeupData *data[ERTS_THR_PRGR_WAKEUP_DATA_SIZE];
+ } managed;
+ struct {
+ int no;
+ ErtsThrPrgrCallbacks *callbacks;
+ ErtsThrPrgrUnmanagedWakeupData *data[ERTS_THR_PRGR_WAKEUP_DATA_SIZE];
+ } unmanaged;
+} ErtsThrPrgrInternalData;
+
+static ErtsThrPrgrInternalData *intrnl;
+
+ErtsThrPrgr erts_thr_prgr__;
+
+erts_tsd_key_t erts_thr_prgr_data_key__;
+
+static void handle_wakeup_requests(ErtsThrPrgrVal current);
+static int got_sched_wakeups(void);
+static erts_aint32_t block_thread(ErtsThrPrgrData *tpd);
+
+static ERTS_INLINE void
+wakeup_managed(int id)
+{
+ ErtsThrPrgrCallbacks *cbp = &intrnl->managed.callbacks[id];
+ ASSERT(0 <= id && id < intrnl->managed.no);
+ cbp->wakeup(cbp->arg);
+}
+
+
+static ERTS_INLINE void
+wakeup_unmanaged(int id)
+{
+ ErtsThrPrgrCallbacks *cbp = &intrnl->unmanaged.callbacks[id];
+ ASSERT(0 <= id && id < intrnl->unmanaged.no);
+ cbp->wakeup(cbp->arg);
+}
+
+static ERTS_INLINE ErtsThrPrgrData *
+perhaps_thr_prgr_data(ErtsSchedulerData *esdp)
+{
+ if (esdp)
+ return &esdp->thr_progress_data;
+ else
+ return erts_tsd_get(erts_thr_prgr_data_key__);
+}
+
+static ERTS_INLINE ErtsThrPrgrData *
+thr_prgr_data(ErtsSchedulerData *esdp)
+{
+ ErtsThrPrgrData *tpd = perhaps_thr_prgr_data(esdp);
+ ASSERT(tpd);
+ return tpd;
+}
+
+static void
+init_tmp_thr_prgr_data(ErtsThrPrgrData *tpd)
+{
+ tpd->id = -1;
+ tpd->is_managed = 0;
+ tpd->is_blocking = 0;
+ tpd->is_temporary = 1;
+
+ erts_tsd_set(erts_thr_prgr_data_key__, (void *) tpd);
+}
+
+static ERTS_INLINE ErtsThrPrgrData *
+tmp_thr_prgr_data(ErtsSchedulerData *esdp)
+{
+ ErtsThrPrgrData *tpd = perhaps_thr_prgr_data(esdp);
+
+ if (!tpd) {
+ /*
+ * We only allocate the part up to the wakeup_request field
+ * which is the first field only used by registered threads
+ */
+ tpd = erts_alloc(ERTS_ALC_T_T_THR_PRGR_DATA,
+ offsetof(ErtsThrPrgrData, wakeup_request));
+ init_tmp_thr_prgr_data(tpd);
+ }
+
+ return tpd;
+}
+
+static ERTS_INLINE void
+return_tmp_thr_prgr_data(ErtsThrPrgrData *tpd)
+{
+ if (tpd->is_temporary) {
+ erts_tsd_set(erts_thr_prgr_data_key__, NULL);
+ erts_free(ERTS_ALC_T_T_THR_PRGR_DATA, tpd);
+ }
+}
+
+static ERTS_INLINE int
+block_count_dec(void)
+{
+ erts_aint32_t block_count;
+ block_count = erts_atomic32_dec_read_mb(&intrnl->misc.data.block_count);
+ if (block_count == 0) {
+ erts_tse_t *event;
+ event = ((erts_tse_t*)
+ erts_atomic_read_nob(&intrnl->misc.data.blocker_event));
+ if (event)
+ erts_tse_set(event);
+ return 1;
+ }
+
+ return (block_count & ERTS_THR_PRGR_BC_FLG_NOT_BLOCKING) == 0;
+}
+
+static ERTS_INLINE int
+block_count_inc(void)
+{
+ erts_aint32_t block_count;
+ block_count = erts_atomic32_inc_read_mb(&intrnl->misc.data.block_count);
+ return (block_count & ERTS_THR_PRGR_BC_FLG_NOT_BLOCKING) == 0;
+}
+
+
+void
+erts_thr_progress_pre_init(void)
+{
+ intrnl = NULL;
+ erts_tsd_key_create(&erts_thr_prgr_data_key__);
+ init_nob(&erts_thr_prgr__.current, 0);
+}
+
+void
+erts_thr_progress_init(int no_schedulers, int managed, int unmanaged)
+{
+ int i, j, um_low, um_high;
+ char *ptr;
+ size_t cb_sz, intrnl_sz, thr_arr_sz, m_wakeup_size, um_wakeup_size,
+ tot_size;
+
+ intrnl_sz = sizeof(ErtsThrPrgrInternalData);
+ intrnl_sz = ERTS_ALC_CACHE_LINE_ALIGN_SIZE(intrnl_sz);
+
+ cb_sz = sizeof(ErtsThrPrgrCallbacks)*(managed+unmanaged);
+ cb_sz = ERTS_ALC_CACHE_LINE_ALIGN_SIZE(cb_sz);
+
+ thr_arr_sz = sizeof(ErtsThrPrgrArray)*managed;
+ ASSERT(thr_arr_sz == ERTS_ALC_CACHE_LINE_ALIGN_SIZE(thr_arr_sz));
+
+ m_wakeup_size = sizeof(ErtsThrPrgrManagedWakeupData);
+ m_wakeup_size += (managed - 1)*sizeof(int);
+ m_wakeup_size = ERTS_ALC_CACHE_LINE_ALIGN_SIZE(m_wakeup_size);
+
+ um_low = (unmanaged - 1)/ERTS_THR_PRGR_BM_BITS + 1;
+ um_high = (um_low - 1)/ERTS_THR_PRGR_BM_BITS + 1;
+
+ um_wakeup_size = sizeof(ErtsThrPrgrUnmanagedWakeupData);
+ um_wakeup_size += (um_high + um_low)*sizeof(erts_atomic32_t);
+ um_wakeup_size = ERTS_ALC_CACHE_LINE_ALIGN_SIZE(um_wakeup_size);
+
+ tot_size = intrnl_sz;
+ tot_size += cb_sz;
+ tot_size += thr_arr_sz;
+ tot_size += m_wakeup_size*ERTS_THR_PRGR_WAKEUP_DATA_SIZE;
+ tot_size += um_wakeup_size*ERTS_THR_PRGR_WAKEUP_DATA_SIZE;
+
+ ptr = erts_alloc_permanent_cache_aligned(ERTS_ALC_T_THR_PRGR_IDATA,
+ tot_size);
+
+ intrnl = (ErtsThrPrgrInternalData *) ptr;
+ ptr += intrnl_sz;
+
+ erts_atomic32_init_nob(&intrnl->misc.data.lflgs,
+ ERTS_THR_PRGR_LFLG_NO_LEADER);
+ erts_atomic32_init_nob(&intrnl->misc.data.block_count,
+ (ERTS_THR_PRGR_BC_FLG_NOT_BLOCKING
+ | (erts_aint32_t) managed));
+ erts_atomic_init_nob(&intrnl->misc.data.blocker_event, ERTS_AINT_NULL);
+ erts_atomic32_init_nob(&intrnl->misc.data.pref_wakeup_used, 0);
+ erts_atomic32_init_nob(&intrnl->misc.data.managed_count, 0);
+ erts_atomic32_init_nob(&intrnl->misc.data.managed_id, no_schedulers);
+ erts_atomic32_init_nob(&intrnl->misc.data.unmanaged_id, -1);
+
+ intrnl->thr = (ErtsThrPrgrArray *) ptr;
+ ptr += thr_arr_sz;
+ for (i = 0; i < managed; i++)
+ init_nob(&intrnl->thr[i].data.current, 0);
+
+ intrnl->managed.callbacks = (ErtsThrPrgrCallbacks *) ptr;
+ intrnl->unmanaged.callbacks = &intrnl->managed.callbacks[managed];
+ ptr += cb_sz;
+
+ intrnl->managed.no = managed;
+ for (i = 0; i < managed; i++) {
+ intrnl->managed.callbacks[i].arg = NULL;
+ intrnl->managed.callbacks[i].wakeup = NULL;
+ }
+
+ intrnl->unmanaged.no = unmanaged;
+ for (i = 0; i < unmanaged; i++) {
+ intrnl->unmanaged.callbacks[i].arg = NULL;
+ intrnl->unmanaged.callbacks[i].wakeup = NULL;
+ }
+
+ for (i = 0; i < ERTS_THR_PRGR_WAKEUP_DATA_SIZE; i++) {
+ intrnl->managed.data[i] = (ErtsThrPrgrManagedWakeupData *) ptr;
+ erts_atomic32_init_nob(&intrnl->managed.data[i]->len, 0);
+ ptr += m_wakeup_size;
+ }
+
+ for (i = 0; i < ERTS_THR_PRGR_WAKEUP_DATA_SIZE; i++) {
+ erts_atomic32_t *bm;
+ intrnl->unmanaged.data[i] = (ErtsThrPrgrUnmanagedWakeupData *) ptr;
+ erts_atomic32_init_nob(&intrnl->unmanaged.data[i]->len, 0);
+ bm = (erts_atomic32_t *) (ptr + sizeof(ErtsThrPrgrUnmanagedWakeupData));
+ intrnl->unmanaged.data[i]->high = bm;
+ intrnl->unmanaged.data[i]->high_sz = um_high;
+ for (j = 0; j < um_high; j++)
+ erts_atomic32_init_nob(&intrnl->unmanaged.data[i]->high[j], 0);
+ intrnl->unmanaged.data[i]->low
+ = &intrnl->unmanaged.data[i]->high[um_high];
+ intrnl->unmanaged.data[i]->low_sz = um_low;
+ for (j = 0; j < um_low; j++)
+ erts_atomic32_init_nob(&intrnl->unmanaged.data[i]->low[j], 0);
+ ptr += um_wakeup_size;
+ }
+ ERTS_THR_MEMORY_BARRIER;
+}
+
+static void
+init_wakeup_request_array(ErtsThrPrgrVal *w)
+{
+ int i;
+ ErtsThrPrgrVal current;
+
+ current = read_acqb(&erts_thr_prgr__.current);
+ for (i = 0; i < ERTS_THR_PRGR_WAKEUP_DATA_SIZE; i++) {
+ w[i] = current - ((ErtsThrPrgrVal) (ERTS_THR_PRGR_WAKEUP_DATA_SIZE + i));
+ if (w[i] > current)
+ w[i]--;
+ }
+}
+
+void
+erts_thr_progress_register_unmanaged_thread(ErtsThrPrgrCallbacks *callbacks)
+{
+ ErtsThrPrgrData *tpd = perhaps_thr_prgr_data(NULL);
+ int is_blocking = 0;
+
+ if (tpd) {
+ if (!tpd->is_temporary)
+ erl_exit(ERTS_ABORT_EXIT,
+ "%s:%d:%s(): Double register of thread\n",
+ __FILE__, __LINE__, __func__);
+ is_blocking = tpd->is_blocking;
+ return_tmp_thr_prgr_data(tpd);
+ }
+
+ /*
+ * We only allocate the part up to the leader field
+ * which is the first field only used by managed threads
+ */
+ tpd = erts_alloc(ERTS_ALC_T_THR_PRGR_DATA,
+ offsetof(ErtsThrPrgrData, leader));
+ tpd->id = (int) erts_atomic32_inc_read_nob(&intrnl->misc.data.unmanaged_id);
+ tpd->is_managed = 0;
+ tpd->is_blocking = is_blocking;
+ tpd->is_temporary = 0;
+ ASSERT(tpd->id >= 0);
+ if (tpd->id >= intrnl->unmanaged.no)
+ erl_exit(ERTS_ABORT_EXIT,
+ "%s:%d:%s(): Too many unmanaged registered threads\n",
+ __FILE__, __LINE__, __func__);
+
+ init_wakeup_request_array(&tpd->wakeup_request[0]);
+ erts_tsd_set(erts_thr_prgr_data_key__, (void *) tpd);
+
+ ASSERT(callbacks->wakeup);
+
+ intrnl->unmanaged.callbacks[tpd->id] = *callbacks;
+}
+
+
+void
+erts_thr_progress_register_managed_thread(ErtsSchedulerData *esdp,
+ ErtsThrPrgrCallbacks *callbacks,
+ int pref_wakeup)
+{
+ ErtsThrPrgrData *tpd = perhaps_thr_prgr_data(NULL);
+ int is_blocking = 0, managed;
+
+ if (tpd) {
+ if (!tpd->is_temporary)
+ erl_exit(ERTS_ABORT_EXIT,
+ "%s:%d:%s(): Double register of thread\n",
+ __FILE__, __LINE__, __func__);
+ is_blocking = tpd->is_blocking;
+ return_tmp_thr_prgr_data(tpd);
+ }
+
+ if (esdp)
+ tpd = &esdp->thr_progress_data;
+ else
+ tpd = erts_alloc(ERTS_ALC_T_THR_PRGR_DATA, sizeof(ErtsThrPrgrData));
+
+ if (pref_wakeup
+ && !erts_atomic32_xchg_nob(&intrnl->misc.data.pref_wakeup_used, 1))
+ tpd->id = 0;
+ else if (esdp)
+ tpd->id = (int) esdp->no;
+ else
+ tpd->id = erts_atomic32_inc_read_nob(&intrnl->misc.data.managed_id);
+ ASSERT(tpd->id >= 0);
+ if (tpd->id >= intrnl->managed.no)
+ erl_exit(ERTS_ABORT_EXIT,
+ "%s:%d:%s(): Too many managed registered threads\n",
+ __FILE__, __LINE__, __func__);
+
+ tpd->is_managed = 1;
+ tpd->is_blocking = is_blocking;
+ tpd->is_temporary = 0;
+
+ init_wakeup_request_array(&tpd->wakeup_request[0]);
+
+ ERTS_THR_PROGRESS_STATE_DEBUG_INIT(tpd->id);
+
+ tpd->leader = 0;
+ tpd->active = 1;
+ tpd->previous.local = 0;
+ tpd->previous.current = ERTS_THR_PRGR_VAL_WAITING;
+ erts_tsd_set(erts_thr_prgr_data_key__, (void *) tpd);
+
+ erts_atomic32_inc_nob(&intrnl->misc.data.lflgs);
+
+ ASSERT(callbacks->wakeup);
+ ASSERT(callbacks->prepare_wait);
+ ASSERT(callbacks->wait);
+ ASSERT(callbacks->finalize_wait);
+
+ intrnl->managed.callbacks[tpd->id] = *callbacks;
+
+ callbacks->prepare_wait(callbacks->arg);
+ managed = erts_atomic32_inc_read_relb(&intrnl->misc.data.managed_count);
+ if (managed != intrnl->managed.no) {
+ /* Wait until all managed threads have registered... */
+ do {
+ callbacks->wait(callbacks->arg);
+ callbacks->prepare_wait(callbacks->arg);
+ managed = erts_atomic32_read_acqb(&intrnl->misc.data.managed_count);
+ } while (managed != intrnl->managed.no);
+ }
+ else {
+ int id;
+ /* All managed threads have registered; lets go... */
+ for (id = 0; id < managed; id++)
+ if (id != tpd->id)
+ wakeup_managed(id);
+ }
+ callbacks->finalize_wait(callbacks->arg);
+}
+
+static ERTS_INLINE int
+leader_update(ErtsThrPrgrData *tpd)
+{
+#ifdef ERTS_ENABLE_LOCK_CHECK
+ erts_lc_check_exact(NULL, 0);
+#endif
+ if (!tpd->leader) {
+ /* Probably need to block... */
+ block_thread(tpd);
+ }
+ else {
+ erts_aint32_t lflgs;
+ ErtsThrPrgrVal next;
+ int ix, sz, make_progress;
+
+ if (tpd->previous.current == ERTS_THR_PRGR_VAL_WAITING) {
+ /* Took over as leader from another thread */
+ tpd->previous.current = read_acqb(&erts_thr_prgr__.current);
+ tpd->previous.next = tpd->previous.current;
+ tpd->previous.next++;
+ if (tpd->previous.next == ERTS_THR_PRGR_VAL_WAITING)
+ tpd->previous.next = 0;
+ }
+
+ if (tpd->previous.local == tpd->previous.current) {
+ ErtsThrPrgrVal val = tpd->previous.current + 1;
+ if (val == ERTS_THR_PRGR_VAL_WAITING)
+ val = 0;
+ tpd->previous.local = val;
+ set_mb(&intrnl->thr[tpd->id].data.current, val);
+ }
+
+ next = tpd->previous.next;
+
+ make_progress = 1;
+ sz = intrnl->managed.no;
+ for (ix = 0; ix < sz; ix++) {
+ ErtsThrPrgrVal tmp;
+ tmp = read_nob(&intrnl->thr[ix].data.current);
+ if (tmp != next && tmp != ERTS_THR_PRGR_VAL_WAITING) {
+ make_progress = 0;
+ ASSERT(erts_thr_progress_has_passed__(next, tmp));
+ break;
+ }
+ }
+
+ if (make_progress) {
+ ErtsThrPrgrVal current = next;
+
+ next++;
+ if (next == ERTS_THR_PRGR_VAL_WAITING)
+ next = 0;
+
+ set_nob(&intrnl->thr[tpd->id].data.current, next);
+ set_mb(&erts_thr_prgr__.current, current);
+ tpd->previous.local = next;
+ tpd->previous.next = next;
+ tpd->previous.current = current;
+
+#if ERTS_THR_PRGR_PRINT_VAL
+ if (current % 1000 == 0)
+ erts_fprintf(stderr, "%b64u\n", current);
+#endif
+ handle_wakeup_requests(current);
+ }
+
+ if (tpd->active) {
+ lflgs = erts_atomic32_read_nob(&intrnl->misc.data.lflgs);
+ if (lflgs & ERTS_THR_PRGR_LFLG_BLOCK)
+ (void) block_thread(tpd);
+ }
+ else {
+ tpd->leader = 0;
+ tpd->previous.current = ERTS_THR_PRGR_VAL_WAITING;
+#if ERTS_THR_PRGR_PRINT_LEADER
+ erts_fprintf(stderr, "L <- %d\n", tpd->id);
+#endif
+
+ ERTS_THR_PROGRESS_STATE_DEBUG_SET_LEADER(tpd->id, 0);
+
+ lflgs = erts_atomic32_read_bor_relb(&intrnl->misc.data.lflgs,
+ ERTS_THR_PRGR_LFLG_NO_LEADER);
+ if (lflgs & ERTS_THR_PRGR_LFLG_BLOCK)
+ lflgs = block_thread(tpd);
+ if (ERTS_THR_PRGR_LFLGS_ACTIVE(lflgs) == 0 && got_sched_wakeups())
+ wakeup_managed(0);
+ }
+ }
+
+ return tpd->leader;
+}
+
+static int
+update(ErtsThrPrgrData *tpd)
+{
+ int res;
+ ErtsThrPrgrVal val;
+
+ if (tpd->leader)
+ res = 1;
+ else {
+ erts_aint32_t lflgs;
+ res = 0;
+ val = read_acqb(&erts_thr_prgr__.current);
+ if (tpd->previous.local == val) {
+ val++;
+ if (val == ERTS_THR_PRGR_VAL_WAITING)
+ val = 0;
+ tpd->previous.local = val;
+ set_mb(&intrnl->thr[tpd->id].data.current, val);
+ }
+
+ lflgs = erts_atomic32_read_nob(&intrnl->misc.data.lflgs);
+ if (lflgs & ERTS_THR_PRGR_LFLG_BLOCK)
+ res = 1; /* Need to block in leader_update() */
+
+ if ((lflgs & ERTS_THR_PRGR_LFLG_NO_LEADER)
+ && (tpd->active || ERTS_THR_PRGR_LFLGS_ACTIVE(lflgs) == 0)) {
+ /* Try to take over leadership... */
+ erts_aint32_t olflgs;
+ olflgs = erts_atomic32_read_band_acqb(
+ &intrnl->misc.data.lflgs,
+ ~ERTS_THR_PRGR_LFLG_NO_LEADER);
+ if (olflgs & ERTS_THR_PRGR_LFLG_NO_LEADER) {
+ tpd->leader = 1;
+#if ERTS_THR_PRGR_PRINT_LEADER
+ erts_fprintf(stderr, "L -> %d\n", tpd->id);
+#endif
+ ERTS_THR_PROGRESS_STATE_DEBUG_SET_LEADER(tpd->id, 1);
+ }
+ }
+ res |= tpd->leader;
+ }
+ return res;
+}
+
+int
+erts_thr_progress_update(ErtsSchedulerData *esdp)
+{
+ return update(thr_prgr_data(esdp));
+}
+
+
+int
+erts_thr_progress_leader_update(ErtsSchedulerData *esdp)
+{
+ return leader_update(thr_prgr_data(esdp));
+}
+
+void
+erts_thr_progress_prepare_wait(ErtsSchedulerData *esdp)
+{
+ erts_aint32_t lflgs;
+ ErtsThrPrgrData *tpd = thr_prgr_data(esdp);
+
+#ifdef ERTS_ENABLE_LOCK_CHECK
+ erts_lc_check_exact(NULL, 0);
+#endif
+
+ block_count_dec();
+
+ tpd->previous.local = ERTS_THR_PRGR_VAL_WAITING;
+ set_mb(&intrnl->thr[tpd->id].data.current, ERTS_THR_PRGR_VAL_WAITING);
+
+ lflgs = erts_atomic32_read_nob(&intrnl->misc.data.lflgs);
+ if (ERTS_THR_PRGR_LFLGS_ALL_WAITING(lflgs) && got_sched_wakeups())
+ wakeup_managed(0); /* Someone need to make progress */
+}
+
+void
+erts_thr_progress_finalize_wait(ErtsSchedulerData *esdp)
+{
+ ErtsThrPrgrData *tpd = thr_prgr_data(esdp);
+ ErtsThrPrgrVal current, val;
+
+#ifdef ERTS_ENABLE_LOCK_CHECK
+ erts_lc_check_exact(NULL, 0);
+#endif
+
+ /*
+ * We aren't allowed to continue until our thread
+ * progress is past global current.
+ */
+ val = current = read_acqb(&erts_thr_prgr__.current);
+ while (1) {
+ val++;
+ if (val == ERTS_THR_PRGR_VAL_WAITING)
+ val = 0;
+ tpd->previous.local = val;
+ set_mb(&intrnl->thr[tpd->id].data.current, val);
+ val = read_acqb(&erts_thr_prgr__.current);
+ if (current == val)
+ break;
+ current = val;
+ }
+ if (block_count_inc())
+ block_thread(tpd);
+ if (update(tpd))
+ leader_update(tpd);
+}
+
+void
+erts_thr_progress_active(ErtsSchedulerData *esdp, int on)
+{
+ ErtsThrPrgrData *tpd = thr_prgr_data(esdp);
+
+#ifdef ERTS_ENABLE_LOCK_CHECK
+ erts_lc_check_exact(NULL, 0);
+#endif
+
+ ERTS_THR_PROGRESS_STATE_DEBUG_SET_ACTIVE(tpd->id, on);
+
+ if (on) {
+ ASSERT(!tpd->active);
+ tpd->active = 1;
+ erts_atomic32_inc_nob(&intrnl->misc.data.lflgs);
+ }
+ else {
+ ASSERT(tpd->active);
+ tpd->active = 0;
+ erts_atomic32_dec_nob(&intrnl->misc.data.lflgs);
+ if (update(tpd))
+ leader_update(tpd);
+ }
+
+#ifdef DEBUG
+ {
+ erts_aint32_t n = erts_atomic32_read_nob(&intrnl->misc.data.lflgs);
+ n &= ERTS_THR_PRGR_LFLG_ACTIVE_MASK;
+ ASSERT(tpd->active <= n && n <= intrnl->managed.no);
+ }
+#endif
+
+}
+
+static ERTS_INLINE int
+has_reached_wakeup(ErtsThrPrgrVal wakeup)
+{
+ /*
+ * Exactly the same as erts_thr_progress_has_reached(), but
+ * also verify valid wakeup requests in debug mode.
+ */
+ ErtsThrPrgrVal current;
+
+ current = read_acqb(&erts_thr_prgr__.current);
+
+#if ERTS_THR_PRGR_DBG_CHK_WAKEUP_REQUEST_VALUE
+ {
+ ErtsThrPrgrVal limit;
+ /*
+ * erts_thr_progress_later() returns values which are
+ * equal to 'current + 2'. That is, users should never
+ * get a hold of values larger than that.
+ *
+ * That is, valid values are values less than 'current + 3'.
+ *
+ * Values larger than this won't work with the wakeup
+ * algorithm.
+ */
+
+ limit = current + 3;
+ if (limit == ERTS_THR_PRGR_VAL_WAITING)
+ limit = 0;
+ else if (limit < current) /* Wrapped */
+ limit + 1;
+
+ if (!erts_thr_progress_has_passed__(limit, wakeup))
+ erl_exit(ERTS_ABORT_EXIT,
+ "Invalid wakeup request value found:"
+ " current=%b64u, wakeup=%b64u, limit=%b64u",
+ current, wakeup, limit);
+ }
+#endif
+
+ if (current == wakeup)
+ return 1;
+ return erts_thr_progress_has_passed__(current, wakeup);
+}
+
+static void
+request_wakeup_managed(ErtsThrPrgrData *tpd, ErtsThrPrgrVal value)
+{
+ ErtsThrPrgrManagedWakeupData *mwd;
+ int ix, wix;
+
+ /*
+ * Only managed threads that aren't in waiting state
+ * are allowed to call this function.
+ */
+
+ ASSERT(tpd->is_managed);
+ ASSERT(tpd->previous.local != ERTS_THR_PRGR_VAL_WAITING);
+
+ if (has_reached_wakeup(value))
+ wakeup_managed(tpd->id);
+
+ wix = ERTS_THR_PRGR_WAKEUP_IX(value);
+ if (tpd->wakeup_request[wix] == value)
+ return; /* Already got a request registered */
+
+ ASSERT(erts_thr_progress_has_passed__(value,
+ tpd->wakeup_request[wix]));
+
+
+ if (tpd->previous.local == value) {
+ /*
+ * We have already confirmed this value. We need to request
+ * wakeup for a value later than our latest confirmed value in
+ * order to prevent progress from reaching the requested value
+ * while we are writing the request.
+ *
+ * It is ok to move the wakeup request forward since the only
+ * guarantee we make (and can make) is that the thread will be
+ * woken some time *after* the requested value has been reached.
+ */
+ value++;
+ if (value == ERTS_THR_PRGR_VAL_WAITING)
+ value = 0;
+
+ wix = ERTS_THR_PRGR_WAKEUP_IX(value);
+ if (tpd->wakeup_request[wix] == value)
+ return; /* Already got a request registered */
+
+ ASSERT(erts_thr_progress_has_passed__(value,
+ tpd->wakeup_request[wix]));
+ }
+
+ tpd->wakeup_request[wix] = value;
+
+ mwd = intrnl->managed.data[wix];
+
+ ix = erts_atomic32_inc_read_nob(&mwd->len) - 1;
+ mwd->id[ix] = tpd->id;
+
+ ASSERT(!erts_thr_progress_has_reached(value));
+
+ /*
+ * This thread is guarranteed to issue a full memory barrier:
+ * - after the request has been written, but
+ * - before the global thread progress reach the (possibly
+ * increased) requested wakeup value.
+ */
+}
+
+static void
+request_wakeup_unmanaged(ErtsThrPrgrData *tpd, ErtsThrPrgrVal value)
+{
+ int wix, ix, id, bit;
+ ErtsThrPrgrUnmanagedWakeupData *umwd;
+
+ ASSERT(!tpd->is_managed);
+
+ /*
+ * Thread progress *can* reach and pass our requested value while
+ * we are writing the request.
+ */
+
+ if (has_reached_wakeup(value))
+ wakeup_unmanaged(tpd->id);
+
+ wix = ERTS_THR_PRGR_WAKEUP_IX(value);
+
+ if (tpd->wakeup_request[wix] == value)
+ return; /* Already got a request registered */
+
+ ASSERT(erts_thr_progress_has_passed__(value,
+ tpd->wakeup_request[wix]));
+
+ umwd = intrnl->unmanaged.data[wix];
+
+ id = tpd->id;
+
+ bit = id & ERTS_THR_PRGR_BM_MASK;
+ ix = id >> ERTS_THR_PRGR_BM_SHIFT;
+ ASSERT(0 <= ix && ix < umwd->low_sz);
+ erts_atomic32_read_bor_nob(&umwd->low[ix], 1 << bit);
+
+ bit = ix & ERTS_THR_PRGR_BM_MASK;
+ ix >>= ERTS_THR_PRGR_BM_SHIFT;
+ ASSERT(0 <= ix && ix < umwd->high_sz);
+ erts_atomic32_read_bor_nob(&umwd->high[ix], 1 << bit);
+
+ erts_atomic32_inc_mb(&umwd->len);
+
+ if (erts_thr_progress_has_reached(value))
+ wakeup_unmanaged(tpd->id);
+ else
+ tpd->wakeup_request[wix] = value;
+}
+
+void
+erts_thr_progress_wakeup(ErtsSchedulerData *esdp,
+ ErtsThrPrgrVal value)
+{
+ ErtsThrPrgrData *tpd = thr_prgr_data(esdp);
+ ASSERT(!tpd->is_temporary);
+ if (tpd->is_managed)
+ request_wakeup_managed(tpd, value);
+ else
+ request_wakeup_unmanaged(tpd, value);
+}
+
+static void
+wakeup_unmanaged_threads(ErtsThrPrgrUnmanagedWakeupData *umwd)
+{
+ int hix;
+ for (hix = 0; hix < umwd->high_sz; hix++) {
+ erts_aint32_t hmask = erts_atomic32_read_nob(&umwd->high[hix]);
+ if (hmask) {
+ int hbase = hix << ERTS_THR_PRGR_BM_SHIFT;
+ int hbit;
+ for (hbit = 0; hbit < ERTS_THR_PRGR_BM_BITS; hbit++) {
+ if (hmask & (1 << hbit)) {
+ erts_aint_t lmask;
+ int lix = hbase + hbit;
+ ASSERT(0 <= lix && lix < umwd->low_sz);
+ lmask = erts_atomic32_read_nob(&umwd->low[lix]);
+ if (lmask) {
+ int lbase = lix << ERTS_THR_PRGR_BM_SHIFT;
+ int lbit;
+ for (lbit = 0; lbit < ERTS_THR_PRGR_BM_BITS; lbit++) {
+ if (lmask & (1 << lbit)) {
+ int id = lbase + lbit;
+ wakeup_unmanaged(id);
+ }
+ }
+ erts_atomic32_set_nob(&umwd->low[lix], 0);
+ }
+ }
+ }
+ erts_atomic32_set_nob(&umwd->high[hix], 0);
+ }
+ }
+}
+
+
+static void
+handle_wakeup_requests(ErtsThrPrgrVal current)
+{
+ ErtsThrPrgrManagedWakeupData *mwd;
+ ErtsThrPrgrUnmanagedWakeupData *umwd;
+ int wix, len, i;
+
+ wix = ERTS_THR_PRGR_WAKEUP_IX(current);
+
+ mwd = intrnl->managed.data[wix];
+ len = erts_atomic32_read_nob(&mwd->len);
+ ASSERT(len >= 0);
+ if (len) {
+ for (i = 0; i < len; i++)
+ wakeup_managed(mwd->id[i]);
+ erts_atomic32_set_nob(&mwd->len, 0);
+ }
+
+ umwd = intrnl->unmanaged.data[wix];
+ len = erts_atomic32_read_nob(&umwd->len);
+ ASSERT(len >= 0);
+ if (len) {
+ wakeup_unmanaged_threads(umwd);
+ erts_atomic32_set_nob(&umwd->len, 0);
+ }
+
+}
+
+static int
+got_sched_wakeups(void)
+{
+ int wix;
+
+ ERTS_THR_MEMORY_BARRIER;
+
+ for (wix = 0; wix < ERTS_THR_PRGR_WAKEUP_DATA_SIZE; wix++) {
+ ErtsThrPrgrManagedWakeupData **mwd = intrnl->managed.data;
+ if (erts_atomic32_read_nob(&mwd[wix]->len))
+ return 1;
+ }
+ for (wix = 0; wix < ERTS_THR_PRGR_WAKEUP_DATA_SIZE; wix++) {
+ ErtsThrPrgrUnmanagedWakeupData **umwd = intrnl->unmanaged.data;
+ if (erts_atomic32_read_nob(&umwd[wix]->len))
+ return 1;
+ }
+ return 0;
+}
+
+static erts_aint32_t
+block_thread(ErtsThrPrgrData *tpd)
+{
+ erts_aint32_t lflgs;
+ ErtsThrPrgrCallbacks *cbp = &intrnl->managed.callbacks[tpd->id];
+
+ do {
+ block_count_dec();
+
+ while (1) {
+ cbp->prepare_wait(cbp->arg);
+ lflgs = erts_atomic32_read_nob(&intrnl->misc.data.lflgs);
+ if (lflgs & ERTS_THR_PRGR_LFLG_BLOCK)
+ cbp->wait(cbp->arg);
+ else
+ break;
+ }
+
+ } while (block_count_inc());
+
+ cbp->finalize_wait(cbp->arg);
+
+ return lflgs;
+}
+
+static erts_aint32_t
+thr_progress_block(ErtsThrPrgrData *tpd, int wait)
+{
+ erts_tse_t *event = NULL; /* Remove erroneous warning... sigh... */
+ erts_aint32_t lflgs, bc;
+
+ if (tpd->is_blocking++)
+ return (erts_aint32_t) 0;
+
+ while (1) {
+ lflgs = erts_atomic32_read_bor_nob(&intrnl->misc.data.lflgs,
+ ERTS_THR_PRGR_LFLG_BLOCK);
+ if (lflgs & ERTS_THR_PRGR_LFLG_BLOCK)
+ block_thread(tpd);
+ else
+ break;
+ }
+
+#if ERTS_THR_PRGR_PRINT_BLOCKERS
+ erts_fprintf(stderr, "block(%d)\n", tpd->id);
+#endif
+
+ ASSERT(ERTS_AINT_NULL
+ == erts_atomic_read_nob(&intrnl->misc.data.blocker_event));
+
+ if (wait) {
+ event = erts_tse_fetch();
+ erts_tse_reset(event);
+ erts_atomic_set_nob(&intrnl->misc.data.blocker_event,
+ (erts_aint_t) event);
+ }
+ if (tpd->is_managed)
+ erts_atomic32_dec_nob(&intrnl->misc.data.block_count);
+ bc = erts_atomic32_read_band_mb(&intrnl->misc.data.block_count,
+ ~ERTS_THR_PRGR_BC_FLG_NOT_BLOCKING);
+ bc &= ~ERTS_THR_PRGR_BC_FLG_NOT_BLOCKING;
+ if (wait) {
+ while (bc != 0) {
+ erts_tse_wait(event);
+ erts_tse_reset(event);
+ bc = erts_atomic32_read_acqb(&intrnl->misc.data.block_count);
+ }
+ }
+ return bc;
+
+}
+
+void
+erts_thr_progress_block(void)
+{
+ thr_progress_block(tmp_thr_prgr_data(NULL), 1);
+}
+
+void
+erts_thr_progress_fatal_error_block(SWord timeout)
+{
+ ErtsThrPrgrData tpd_buf;
+ ErtsThrPrgrData *tpd = perhaps_thr_prgr_data(NULL);
+ erts_aint32_t bc;
+ SWord time_left = timeout;
+ SysTimeval to;
+
+ /*
+ * Counting poll intervals may give us a too long timeout
+ * if cpu is busy. If we got tolerant time of day we use it
+ * to prevent this.
+ */
+ if (!erts_disable_tolerant_timeofday) {
+ erts_get_timeval(&to);
+ to.tv_sec += timeout / 1000;
+ to.tv_sec += timeout % 1000;
+ }
+
+ if (!tpd) {
+ /*
+ * We stack allocate since failure to allocate memory may
+ * have caused the problem in the first place. This is ok
+ * since we never complete an unblock after a fatal error
+ * block.
+ */
+ tpd = &tpd_buf;
+ init_tmp_thr_prgr_data(tpd);
+ }
+
+ bc = thr_progress_block(tpd, 0);
+ if (bc == 0)
+ return; /* Succefully blocked all managed threads */
+
+ while (1) {
+ if (erts_milli_sleep(ERTS_THR_PRGR_FTL_ERR_BLCK_POLL_INTERVAL) == 0)
+ time_left -= ERTS_THR_PRGR_FTL_ERR_BLCK_POLL_INTERVAL;
+ bc = erts_atomic32_read_acqb(&intrnl->misc.data.block_count);
+ if (bc == 0)
+ break; /* Succefully blocked all managed threads */
+ if (time_left <= 0)
+ break; /* Timeout */
+ if (!erts_disable_tolerant_timeofday) {
+ SysTimeval now;
+ erts_get_timeval(&now);
+ if (now.tv_sec > to.tv_sec)
+ break; /* Timeout */
+ if (now.tv_sec == to.tv_sec && now.tv_usec >= to.tv_usec)
+ break; /* Timeout */
+ }
+ }
+}
+
+void
+erts_thr_progress_unblock(void)
+{
+ erts_tse_t *event;
+ int id, break_id, sz, wakeup;
+ ErtsThrPrgrData *tpd = thr_prgr_data(NULL);
+
+ ASSERT(tpd->is_blocking);
+ if (--tpd->is_blocking)
+ return;
+
+ sz = intrnl->managed.no;
+
+ wakeup = 1;
+ if (!tpd->is_managed)
+ id = break_id = tpd->id < 0 ? 0 : tpd->id % sz;
+ else {
+ break_id = tpd->id;
+ id = break_id + 1;
+ if (id >= sz)
+ id = 0;
+ if (id == break_id)
+ wakeup = 0;
+ erts_atomic32_inc_nob(&intrnl->misc.data.block_count);
+ }
+
+ event = ((erts_tse_t *)
+ erts_atomic_read_nob(&intrnl->misc.data.blocker_event));
+ ASSERT(event);
+ erts_atomic_set_nob(&intrnl->misc.data.blocker_event, ERTS_AINT_NULL);
+
+ erts_atomic32_read_bor_relb(&intrnl->misc.data.block_count,
+ ERTS_THR_PRGR_BC_FLG_NOT_BLOCKING);
+#if ERTS_THR_PRGR_PRINT_BLOCKERS
+ erts_fprintf(stderr, "unblock(%d)\n", tpd->id);
+#endif
+ erts_atomic32_read_band_mb(&intrnl->misc.data.lflgs,
+ ~ERTS_THR_PRGR_LFLG_BLOCK);
+
+ if (wakeup) {
+ do {
+ ErtsThrPrgrVal tmp;
+ tmp = read_nob(&intrnl->thr[id].data.current);
+ if (tmp != ERTS_THR_PRGR_VAL_WAITING)
+ wakeup_managed(id);
+ if (++id >= sz)
+ id = 0;
+ } while (id != break_id);
+ }
+
+ return_tmp_thr_prgr_data(tpd);
+ erts_tse_return(event);
+}
+
+int
+erts_thr_progress_is_blocking(void)
+{
+ ErtsThrPrgrData *tpd = perhaps_thr_prgr_data(NULL);
+ return tpd && tpd->is_blocking;
+}
+
+void erts_thr_progress_dbg_print_state(void)
+{
+ int id;
+ int sz = intrnl->managed.no;
+
+ erts_fprintf(stderr, "--- thread progress ---\n");
+ erts_fprintf(stderr,"current=%b64u\n", erts_thr_progress_current());
+ for (id = 0; id < sz; id++) {
+ ErtsThrPrgrVal current = read_nob(&intrnl->thr[id].data.current);
+#ifdef ERTS_THR_PROGRESS_STATE_DEBUG
+ erts_aint32_t state_debug;
+ char *active, *leader;
+
+ state_debug = erts_atomic32_read_nob(&intrnl->thr[id].data.state_debug);
+ active = (state_debug & ERTS_THR_PROGRESS_STATE_DEBUG_ACTIVE
+ ? "true"
+ : "false");
+ leader = (state_debug & ERTS_THR_PROGRESS_STATE_DEBUG_LEADER
+ ? "true"
+ : "false");
+#endif
+ if (current == ERTS_THR_PRGR_VAL_WAITING)
+ erts_fprintf(stderr,
+ " id=%d, current=WAITING"
+#ifdef ERTS_THR_PROGRESS_STATE_DEBUG
+ ", active=%s, leader=%s"
+#endif
+ "\n", id
+#ifdef ERTS_THR_PROGRESS_STATE_DEBUG
+ , active, leader
+#endif
+ );
+ else
+ erts_fprintf(stderr,
+ " id=%d, current=%b64u"
+#ifdef ERTS_THR_PROGRESS_STATE_DEBUG
+ ", active=%s, leader=%s"
+#endif
+ "\n", id, current
+#ifdef ERTS_THR_PROGRESS_STATE_DEBUG
+ , active, leader
+#endif
+ );
+ }
+ erts_fprintf(stderr, "-----------------------\n");
+
+
+}
+
+#endif
diff --git a/erts/emulator/beam/erl_thr_progress.h b/erts/emulator/beam/erl_thr_progress.h
new file mode 100644
index 0000000000..68d14174b9
--- /dev/null
+++ b/erts/emulator/beam/erl_thr_progress.h
@@ -0,0 +1,233 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2011. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+/*
+ * Description: Thread progress information. Used by lock free algorithms
+ * to determine when all involved threads are guaranteed to
+ * have passed a specific point of execution.
+ *
+ * Usage instructions can be found in ert_thr_progress.c
+ *
+ * Author: Rickard Green
+ */
+
+#if !defined(ERL_THR_PROGRESS_H__TSD_TYPE__)
+#define ERL_THR_PROGRESS_H__TSD_TYPE__
+
+#include "sys.h"
+
+#ifndef ERTS_SMP
+
+#define erts_smp_thr_progress_block() ((void) 0)
+#define erts_smp_thr_progress_unblock() ((void) 0)
+#define erts_smp_thr_progress_is_blocking() 1
+
+#else /* ERTS_SMP */
+
+#define erts_smp_thr_progress_block erts_thr_progress_block
+#define erts_smp_thr_progress_unblock erts_thr_progress_unblock
+#define erts_smp_thr_progress_is_blocking erts_thr_progress_is_blocking
+
+void erts_thr_progress_fatal_error_block(SWord timeout);
+void erts_thr_progress_block(void);
+void erts_thr_progress_unblock(void);
+int erts_thr_progress_is_blocking(void);
+
+typedef Uint64 ErtsThrPrgrVal;
+
+#define ERTS_THR_PRGR_WAKEUP_DATA_SIZE 4 /* Need to be an even power of 2. */
+
+typedef struct {
+ int id;
+ int is_managed;
+ int is_blocking;
+ int is_temporary;
+
+ /* --- Part below only for registered threads --- */
+
+ ErtsThrPrgrVal wakeup_request[ERTS_THR_PRGR_WAKEUP_DATA_SIZE];
+
+ /* --- Part below only for managed threads --- */
+
+ int leader; /* Needs to be first in the managed threads part */
+ int active;
+ struct {
+ ErtsThrPrgrVal local;
+ ErtsThrPrgrVal next;
+ ErtsThrPrgrVal current;
+ } previous;
+} ErtsThrPrgrData;
+#endif /* ERTS_SMP */
+
+#endif
+
+#if !defined(ERL_THR_PROGRESS_H__) && !defined(ERL_THR_PROGRESS_TSD_TYPE_ONLY)
+#define ERL_THR_PROGRESS_H__
+
+#include "erl_threads.h"
+#include "erl_process.h"
+
+#ifdef ERTS_SMP
+
+#define ERTS_THR_PRGR_VAL_WAITING (~((ErtsThrPrgrVal) 0))
+
+extern erts_tsd_key_t erts_thr_prgr_data_key__;
+
+#ifdef ARCH_64
+# define ERTS_THR_PRGR_ATOMIC erts_atomic_t
+#else /* ARCH_32 */
+# define ERTS_THR_PRGR_ATOMIC erts_dw_atomic_t
+#endif
+
+typedef struct {
+ void *arg;
+ void (*wakeup)(void *);
+ void (*prepare_wait)(void *);
+ void (*wait)(void *);
+ void (*finalize_wait)(void *);
+} ErtsThrPrgrCallbacks;
+
+typedef struct {
+ ERTS_THR_PRGR_ATOMIC current;
+} ErtsThrPrgr;
+
+extern ErtsThrPrgr erts_thr_prgr__;
+
+void erts_thr_progress_pre_init(void);
+void erts_thr_progress_init(int no_schedulers, int managed, int unmanaged);
+void erts_thr_progress_register_managed_thread(ErtsSchedulerData *esdp,
+ ErtsThrPrgrCallbacks *,
+ int);
+void erts_thr_progress_register_unmanaged_thread(ErtsThrPrgrCallbacks *);
+void erts_thr_progress_active(ErtsSchedulerData *esdp, int on);
+void erts_thr_progress_wakeup(ErtsSchedulerData *esdp,
+ ErtsThrPrgrVal value);
+int erts_thr_progress_update(ErtsSchedulerData *esdp);
+int erts_thr_progress_leader_update(ErtsSchedulerData *esdp);
+void erts_thr_progress_prepare_wait(ErtsSchedulerData *esdp);
+void erts_thr_progress_finalize_wait(ErtsSchedulerData *esdp);
+
+void erts_thr_progress_dbg_print_state(void);
+
+#ifdef ARCH_32
+#define ERTS_THR_PRGR_ATOMIC erts_dw_atomic_t
+ERTS_GLB_INLINE ErtsThrPrgrVal erts_thr_prgr_dw_sint_to_val__(ethr_dw_sint_t *dw_sint);
+#endif
+ERTS_GLB_INLINE ErtsThrPrgrVal erts_thr_prgr_read_acqb__(ERTS_THR_PRGR_ATOMIC *atmc);
+
+ERTS_GLB_INLINE int erts_thr_progress_is_managed_thread(void);
+ERTS_GLB_INLINE ErtsThrPrgrVal erts_thr_progress_later(void);
+ERTS_GLB_INLINE ErtsThrPrgrVal erts_thr_progress_current(void);
+ERTS_GLB_INLINE int erts_thr_progress_has_passed__(ErtsThrPrgrVal val1, ErtsThrPrgrVal val2);
+ERTS_GLB_INLINE int erts_thr_progress_has_reached(ErtsThrPrgrVal val);
+
+#if ERTS_GLB_INLINE_INCL_FUNC_DEF
+
+#ifdef ARCH_64
+
+ERTS_GLB_INLINE ErtsThrPrgrVal
+erts_thr_prgr_read_acqb__(ERTS_THR_PRGR_ATOMIC *atmc)
+{
+ return (ErtsThrPrgrVal) erts_atomic_read_acqb(atmc);
+}
+
+#else /* ARCH_32 */
+
+ERTS_GLB_INLINE ErtsThrPrgrVal
+erts_thr_prgr_dw_sint_to_val__(ethr_dw_sint_t *dw_sint)
+{
+#ifdef ETHR_SU_DW_NAINT_T__
+ return (ErtsThrPrgrVal) dw_sint->dw_sint;
+#else
+ ErtsThrPrgrVal res;
+ res = (ErtsThrPrgrVal) ((Uint32) dw_sint->sint[ETHR_DW_SINT_HIGH_WORD]);
+ res <<= 32;
+ res |= (ErtsThrPrgrVal) ((Uint32) dw_sint->sint[ETHR_DW_SINT_LOW_WORD]);
+ return res;
+#endif
+}
+
+ERTS_GLB_INLINE ErtsThrPrgrVal
+erts_thr_prgr_read_acqb__(ERTS_THR_PRGR_ATOMIC *atmc)
+{
+ ethr_dw_sint_t dw_sint;
+ erts_dw_atomic_read_acqb(atmc, &dw_sint);
+ return erts_thr_prgr_dw_sint_to_val__(&dw_sint);
+}
+
+#endif
+
+ERTS_GLB_INLINE int
+erts_thr_progress_is_managed_thread(void)
+{
+ ErtsThrPrgrData *tpd = erts_tsd_get(erts_thr_prgr_data_key__);
+ return tpd && tpd->is_managed;
+}
+
+ERTS_GLB_INLINE ErtsThrPrgrVal
+erts_thr_progress_later(void)
+{
+ ErtsThrPrgrVal val = erts_thr_prgr_read_acqb__(&erts_thr_prgr__.current);
+ if (val == (ERTS_THR_PRGR_VAL_WAITING-((ErtsThrPrgrVal)2)))
+ return ((ErtsThrPrgrVal) 0);
+ else if (val == (ERTS_THR_PRGR_VAL_WAITING-((ErtsThrPrgrVal)1)))
+ return ((ErtsThrPrgrVal) 1);
+ else
+ return val + ((ErtsThrPrgrVal) 2);
+}
+
+ERTS_GLB_INLINE ErtsThrPrgrVal
+erts_thr_progress_current(void)
+{
+ return erts_thr_prgr_read_acqb__(&erts_thr_prgr__.current);
+}
+
+ERTS_GLB_INLINE int
+erts_thr_progress_has_passed__(ErtsThrPrgrVal val1, ErtsThrPrgrVal val0)
+{
+ if ((((((ErtsThrPrgrVal) 1) << 63) & val1)
+ ^ ((((ErtsThrPrgrVal) 1) << 63) & val0)) != 0) {
+ /* May have wrapped... */
+ if (val1 < (((ErtsThrPrgrVal) 1) << 62)
+ && val0 > (((ErtsThrPrgrVal) 3) << 62)) {
+ /*
+ * 'val1' has wrapped but 'val0' has not yet wrapped. While in
+ * these ranges 'current' is considered later than 'val0'.
+ */
+ return 1;
+ }
+ }
+ return val1 > val0;
+}
+
+ERTS_GLB_INLINE int
+erts_thr_progress_has_reached(ErtsThrPrgrVal val)
+{
+ ErtsThrPrgrVal current;
+ current = erts_thr_prgr_read_acqb__(&erts_thr_prgr__.current);
+ if (current == val)
+ return 1;
+ return erts_thr_progress_has_passed__(current, val);
+}
+
+#endif
+
+#endif /* ERTS_SMP */
+
+#endif
diff --git a/erts/emulator/beam/erl_thr_queue.c b/erts/emulator/beam/erl_thr_queue.c
new file mode 100644
index 0000000000..9ac4cd4b8e
--- /dev/null
+++ b/erts/emulator/beam/erl_thr_queue.c
@@ -0,0 +1,745 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2011. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+/*
+ * Description: Lock-free queue for communication between threads.
+ *
+ * Currently only a many-to-one version has been,
+ * implemented, i.e., many threads can enqueue but
+ * only one thread can dequeue at a time. It doesn't
+ * have to be the same thread dequeuing every time, but
+ * synchronization so that only one thread dequeues
+ * at a time has to be provided by other means.
+ *
+ * When/If the need for a many-to-many queue arises,
+ * this implementation can relatively easy be extended
+ * to support that too.
+ *
+ * Usage instructions below.
+ *
+ * Author: Rickard Green
+ */
+
+/*
+ * ------ Usage instructions -----------------------------------------------
+ *
+ * Dequeuing generates garbage that needs to be cleaned up.
+ * erts_thr_q_dequeue() automatically cleans, but garbage may have to be
+ * cleaned up also when the queue is empty. This is done by calling
+ * erts_thr_q_clean(). In the SMP case thread progress may have to be made
+ * before cleaning can continue. If so, erts_thr_q_need_thr_progress() in
+ * combination with erts_thr_progress_wakeup() can be used in order to
+ * request a wakeup at appropriate time.
+ *
+ * Enqueuing implies memory allocation and dequeuing implies memory
+ * deallocation. Memory allocation can be moved to another more suitable
+ * thread using erts_thr_q_prepare_enqueue() together with
+ * erts_thr_q_enqueue_prepared() instead of using erts_thr_q_enqueue().
+ * Memory deallocation can can be moved to another more suitable thread by
+ * disabling auto_finalize_dequeue when initializing the queue and then use
+ * erts_thr_q_get_finalize_dequeue_data() together
+ * erts_thr_q_finalize_dequeue() after dequeuing or cleaning.
+ *
+ * Ending the life of the queue using either erts_thr_q_destroy()
+ * or erts_thr_q_finalize() impies cleaning the queue. Both functions
+ * return the cleaning result and may have to be called multiple times
+ * until the queue is clean. Once one of these functions have been called
+ * enqueuing is not allowed. This has to be synchronized by the user.
+ * If auto_finalize_dequeue has been disabled, the finalize dequeue
+ * functionality has to be called after ending the life of the queue just
+ * as when dequeuing or cleaning on a queue that is alive.
+ *
+ * -------------------------------------------------------------------------
+ */
+
+#ifdef HAVE_CONFIG_H
+# include "config.h"
+#endif
+
+#include "erl_thr_queue.h"
+
+#if defined(DEBUG)
+#define ERTS_THR_Q_DBG_CHK_DATA 1
+#else
+#define ERTS_THR_Q_DBG_CHK_DATA 0
+#endif
+
+#define ERTS_THR_Q_MAX_CLEAN_REACHED_HEAD_COUNT 100
+#define ERTS_THR_Q_MAX_SCHED_CLEAN_OPS 50
+#define ERTS_THR_Q_MAX_DEQUEUE_CLEAN_OPS 3
+
+#define ERTS_THR_Q_MAX_FINI_DEQ_OPS 50
+
+#ifdef ERTS_SMP
+ERTS_SCHED_PREF_QUICK_ALLOC_IMPL(sl_element,
+ ErtsThrQElement_t,
+ 1000,
+ ERTS_ALC_T_THR_Q_EL_SL)
+#else
+
+static void
+init_sl_element_alloc(void)
+{
+}
+
+static ErtsThrQElement_t *
+sl_element_alloc(void)
+{
+ return erts_alloc(ERTS_ALC_T_THR_Q_EL_SL,
+ sizeof(ErtsThrQElement_t));
+}
+
+static void
+sl_element_free(ErtsThrQElement_t *p)
+{
+ erts_free(ERTS_ALC_T_THR_Q_EL_SL, p);
+}
+
+#endif
+
+typedef union {
+ ErtsThrQ_t q;
+ char align__[ERTS_ALC_CACHE_LINE_ALIGN_SIZE(sizeof(ErtsThrQ_t))];
+} ErtsAlignedThrQ_t;
+
+void
+erts_thr_q_init(void)
+{
+ init_sl_element_alloc();
+}
+
+static void noop_callback(void *arg) { }
+
+void
+erts_thr_q_initialize(ErtsThrQ_t *q, ErtsThrQInit_t *qi)
+{
+#ifndef USE_THREADS
+ q->init = *qi;
+ if (!q->init.notify)
+ q->init.notify = noop_callback;
+ q->first = NULL;
+ q->last = NULL;
+ q->q.blk = NULL;
+#else
+ erts_atomic_init_nob(&q->tail.data.marker.next.atmc, ERTS_AINT_NULL);
+ q->tail.data.marker.data.ptr = NULL;
+ erts_atomic_init_nob(&q->tail.data.last,
+ (erts_aint_t) &q->tail.data.marker);
+ erts_atomic_init_nob(&q->tail.data.um_refc[0], 0);
+ erts_atomic_init_nob(&q->tail.data.um_refc[1], 0);
+ erts_atomic32_init_nob(&q->tail.data.um_refc_ix, 0);
+ q->tail.data.live = qi->live.objects;
+ q->tail.data.arg = qi->arg;
+ q->tail.data.notify = qi->notify;
+ if (!q->tail.data.notify)
+ q->tail.data.notify = noop_callback;
+
+ q->head.head.ptr = &q->tail.data.marker;
+ q->head.live = qi->live.objects;
+ q->head.first = &q->tail.data.marker;
+ q->head.unref_end = &q->tail.data.marker;
+ q->head.clean_reached_head_count = 0;
+ q->head.deq_fini.automatic = qi->auto_finalize_dequeue;
+ q->head.deq_fini.start = NULL;
+ q->head.deq_fini.end = NULL;
+#ifdef ERTS_SMP
+ q->head.next.thr_progress = erts_thr_progress_current();
+ q->head.next.thr_progress_reached = 1;
+#endif
+ q->head.next.um_refc_ix = 1;
+ q->head.next.unref_end = &q->tail.data.marker;
+ q->head.used_marker = 1;
+ q->head.arg = qi->arg;
+ q->head.notify = q->tail.data.notify;
+ q->q.finalizing = 0;
+ q->q.live = qi->live.queue;
+ q->q.blk = NULL;
+#endif
+}
+
+ErtsThrQCleanState_t
+erts_thr_q_finalize(ErtsThrQ_t *q)
+{
+#ifdef USE_THREADS
+ q->q.finalizing = 1;
+#endif
+ while (erts_thr_q_dequeue(q));
+ return erts_thr_q_clean(q);
+}
+
+ErtsThrQ_t *
+erts_thr_q_create(ErtsThrQInit_t *qi)
+{
+ ErtsAlcType_t atype;
+ ErtsThrQ_t *q, *qblk;
+ UWord qw;
+
+ switch (qi->live.queue) {
+ case ERTS_THR_Q_LIVE_SHORT:
+ atype = ERTS_ALC_T_THR_Q_SL;
+ break;
+ case ERTS_THR_Q_LIVE_LONG:
+ atype = ERTS_ALC_T_THR_Q_LL;
+ break;
+ default:
+ atype = ERTS_ALC_T_THR_Q;
+ break;
+ }
+
+ qw = (UWord) erts_alloc(atype,
+ sizeof(ErtsThrQ_t) + (ERTS_CACHE_LINE_SIZE-1));
+ qblk = (ErtsThrQ_t *) qw;
+ if (qw & ERTS_CACHE_LINE_MASK)
+ qw = (qw & ~ERTS_CACHE_LINE_MASK) + ERTS_CACHE_LINE_SIZE;
+ ASSERT((qw & ERTS_CACHE_LINE_MASK) == 0);
+ q = (ErtsThrQ_t *) qw;
+ erts_thr_q_initialize(q, qi);
+ q->q.blk = qblk;
+ return q;
+}
+
+ErtsThrQCleanState_t
+erts_thr_q_destroy(ErtsThrQ_t *q)
+{
+ if (!q->q.blk)
+ erl_exit(ERTS_ABORT_EXIT,
+ "Trying to destroy not created thread queue\n");
+ return erts_thr_q_finalize(q);
+}
+
+#ifdef USE_THREADS
+
+static void
+destroy(ErtsThrQ_t *q)
+{
+ ErtsAlcType_t atype;
+ switch (q->q.live) {
+ case ERTS_THR_Q_LIVE_SHORT:
+ atype = ERTS_ALC_T_THR_Q_SL;
+ break;
+ case ERTS_THR_Q_LIVE_LONG:
+ atype = ERTS_ALC_T_THR_Q_LL;
+ break;
+ default:
+ atype = ERTS_ALC_T_THR_Q;
+ break;
+ }
+ erts_free(atype, q->q.blk);
+}
+
+#endif
+
+static ERTS_INLINE ErtsThrQElement_t *
+element_live_alloc(ErtsThrQLive_t live)
+{
+ switch (live) {
+ case ERTS_THR_Q_LIVE_SHORT:
+ return sl_element_alloc();
+ default:
+ return (ErtsThrQElement_t *) erts_alloc(ERTS_ALC_T_THR_Q_EL,
+ sizeof(ErtsThrQElement_t));
+ }
+}
+
+static ERTS_INLINE ErtsThrQElement_t *
+element_alloc(ErtsThrQ_t *q)
+{
+ ErtsThrQLive_t live;
+#ifdef USE_THREADS
+ live = q->tail.data.live;
+#else
+ live = q->init.live.objects;
+#endif
+ return element_live_alloc(live);
+}
+
+static ERTS_INLINE void
+element_live_free(ErtsThrQLive_t live, ErtsThrQElement_t *el)
+{
+ switch (live) {
+ case ERTS_THR_Q_LIVE_SHORT:
+ sl_element_free(el);
+ break;
+ default:
+ erts_free(ERTS_ALC_T_THR_Q_EL, el);
+ }
+}
+
+static ERTS_INLINE void
+element_free(ErtsThrQ_t *q, ErtsThrQElement_t *el)
+{
+ ErtsThrQLive_t live;
+#ifdef USE_THREADS
+ live = q->head.live;
+#else
+ live = q->init.live.objects;
+#endif
+ element_live_free(live, el);
+}
+
+#ifdef USE_THREADS
+
+static ERTS_INLINE ErtsThrQElement_t *
+enqueue_managed(ErtsThrQ_t *q, ErtsThrQElement_t *this, int want_last)
+{
+ erts_aint_t ilast, itmp;
+
+ erts_atomic_init_nob(&this->next.atmc, ERTS_AINT_NULL);
+ /* Enqueue at end of list... */
+
+ ilast = erts_atomic_read_nob(&q->tail.data.last);
+ while (1) {
+ ErtsThrQElement_t *last = (ErtsThrQElement_t *) ilast;
+ itmp = erts_atomic_cmpxchg_mb(&last->next.atmc,
+ (erts_aint_t) this,
+ ERTS_AINT_NULL);
+ if (itmp == ERTS_AINT_NULL)
+ break;
+ ilast = itmp;
+ }
+
+ /* Move last pointer forward... */
+ while (1) {
+ if (want_last) {
+ if (erts_atomic_read_rb(&this->next.atmc) != ERTS_AINT_NULL) {
+ /* Someone else will move it forward */
+ ilast = erts_atomic_read_rb(&q->tail.data.last);
+ return (ErtsThrQElement_t *) ilast;
+ }
+ }
+ else {
+ if (erts_atomic_read_nob(&this->next.atmc) != ERTS_AINT_NULL) {
+ /* Someone else will move it forward */
+ return NULL;
+ }
+ }
+ itmp = erts_atomic_cmpxchg_mb(&q->tail.data.last,
+ (erts_aint_t) this,
+ ilast);
+ if (ilast == itmp)
+ return want_last ? this : NULL;
+ ilast = itmp;
+ }
+}
+
+static ErtsThrQCleanState_t
+clean(ErtsThrQ_t *q, int max_ops, int do_notify)
+{
+ erts_aint_t ilast;
+ int um_refc_ix;
+ int ops;
+
+ for (ops = 0; ops < max_ops; ops++) {
+ ErtsThrQElement_t *tmp;
+ restart:
+ ASSERT(q->head.first);
+ if (q->head.first == q->head.head.ptr) {
+ q->head.clean_reached_head_count++;
+ if (q->head.clean_reached_head_count
+ >= ERTS_THR_Q_MAX_CLEAN_REACHED_HEAD_COUNT) {
+ q->head.clean_reached_head_count = 0;
+ break;
+ }
+ goto inspect_head;
+ }
+ if (q->head.first == q->head.unref_end)
+ break;
+ if (q->head.first == &q->tail.data.marker) {
+ q->head.used_marker = 0;
+ q->head.first = q->head.first->next.ptr;
+ goto restart;
+ }
+ tmp = q->head.first;
+ q->head.first = q->head.first->next.ptr;
+ if (q->head.deq_fini.automatic)
+ element_free(q, tmp);
+ else {
+ tmp->data.ptr = (void *) (UWord) q->head.live;
+ if (!q->head.deq_fini.start)
+ q->head.deq_fini.start = tmp;
+ else if (q->head.deq_fini.end->next.ptr == &q->tail.data.marker)
+ q->head.deq_fini.end->next.ptr = tmp;
+ q->head.deq_fini.end = tmp;
+ }
+ }
+
+ ilast = erts_atomic_read_nob(&q->tail.data.last);
+ if (q->head.first == ((ErtsThrQElement_t *) ilast)
+ && ((ErtsThrQElement_t *) ilast) == &q->tail.data.marker
+ && q->head.first == &q->tail.data.marker) {
+ /* Empty and clean queue */
+ if (q->q.finalizing)
+ destroy(q);
+ return ERTS_THR_Q_CLEAN;
+ }
+
+#ifdef ERTS_SMP
+ if (q->head.next.thr_progress_reached
+ || erts_thr_progress_has_reached(q->head.next.thr_progress)) {
+ q->head.next.thr_progress_reached = 1;
+#endif
+ um_refc_ix = q->head.next.um_refc_ix;
+ if (erts_atomic_read_acqb(&q->tail.data.um_refc[um_refc_ix]) == 0) {
+ /* Move unreferenced end pointer forward... */
+ q->head.clean_reached_head_count = 0;
+ q->head.unref_end = q->head.next.unref_end;
+
+ if (!q->head.used_marker
+ && q->head.unref_end == (ErtsThrQElement_t *) ilast) {
+ q->head.used_marker = 1;
+ ilast = (erts_aint_t) enqueue_managed(q,
+ &q->tail.data.marker,
+ 1);
+ if (q->head.head.ptr == q->head.unref_end) {
+ ErtsThrQElement_t *next;
+ next = ((ErtsThrQElement_t *)
+ erts_atomic_read_acqb(&q->head.head.ptr->next.atmc));
+ if (next == &q->tail.data.marker) {
+ q->head.head.ptr->next.ptr = &q->tail.data.marker;
+ q->head.head.ptr = &q->tail.data.marker;
+ }
+ }
+ }
+
+ if (q->head.unref_end == (ErtsThrQElement_t *) ilast)
+ ERTS_THR_MEMORY_BARRIER;
+ else {
+ q->head.next.unref_end = (ErtsThrQElement_t *) ilast;
+ ERTS_THR_MEMORY_BARRIER;
+#ifdef ERTS_SMP
+ q->head.next.thr_progress = erts_thr_progress_later();
+#endif
+ erts_atomic32_set_relb(&q->tail.data.um_refc_ix,
+ um_refc_ix);
+ q->head.next.um_refc_ix = um_refc_ix == 0 ? 1 : 0;
+#ifdef ERTS_SMP
+ q->head.next.thr_progress_reached = 0;
+#endif
+ }
+ }
+#ifdef ERTS_SMP
+ }
+#endif
+
+ if (q->head.first == q->head.head.ptr) {
+ inspect_head:
+ if (!q->head.used_marker) {
+ erts_aint_t inext;
+ inext = erts_atomic_read_acqb(&q->head.head.ptr->next.atmc);
+ if (inext == ERTS_AINT_NULL) {
+ q->head.used_marker = 1;
+ (void) enqueue_managed(q, &q->tail.data.marker, 0);
+ inext = erts_atomic_read_acqb(&q->head.head.ptr->next.atmc);
+ if (inext == (erts_aint_t) &q->tail.data.marker) {
+ q->head.head.ptr->next.ptr = &q->tail.data.marker;
+ q->head.head.ptr = &q->tail.data.marker;
+#ifdef ERTS_SMP
+ if (!q->head.next.thr_progress_reached)
+ return ERTS_THR_Q_NEED_THR_PRGR;
+#else
+ if (do_notify)
+ q->head.notify(q->head.arg);
+#endif
+ return ERTS_THR_Q_DIRTY;
+ }
+ }
+ }
+ return ERTS_THR_Q_CLEAN;
+ }
+
+ if (q->head.first != q->head.unref_end) {
+ if (do_notify)
+ q->head.notify(q->head.arg);
+ return ERTS_THR_Q_DIRTY;
+ }
+
+#ifdef ERTS_SMP
+ if (!q->head.next.thr_progress_reached)
+ return ERTS_THR_Q_NEED_THR_PRGR;
+#endif
+
+ return ERTS_THR_Q_CLEAN; /* Waiting for unmanaged threads to complete... */
+}
+
+#endif
+
+ErtsThrQCleanState_t
+erts_thr_q_clean(ErtsThrQ_t *q)
+{
+#ifdef USE_THREADS
+ return clean(q, ERTS_THR_Q_MAX_SCHED_CLEAN_OPS, 0);
+#else
+ return ERTS_THR_Q_CLEAN;
+#endif
+}
+
+ErtsThrQCleanState_t
+erts_thr_q_inspect(ErtsThrQ_t *q, int ensure_empty)
+{
+#ifdef USE_THREADS
+ if (ensure_empty) {
+ erts_aint_t inext;
+ inext = erts_atomic_read_acqb(&q->head.head.ptr->next.atmc);
+ if (inext != ERTS_AINT_NULL) {
+ if (&q->tail.data.marker != (ErtsThrQElement_t *) inext)
+ return ERTS_THR_Q_DIRTY;
+ else {
+ q->head.head.ptr->next.ptr = (ErtsThrQElement_t *) inext;
+ q->head.head.ptr = (ErtsThrQElement_t *) inext;
+ inext = erts_atomic_read_acqb(&q->head.head.ptr->next.atmc);
+ if (inext != ERTS_AINT_NULL)
+ return ERTS_THR_Q_DIRTY;
+ }
+ }
+ }
+
+ if (q->head.first == q->head.head.ptr) {
+ if (!q->head.used_marker) {
+ erts_aint_t inext;
+ inext = erts_atomic_read_acqb(&q->head.head.ptr->next.atmc);
+ if (inext == ERTS_AINT_NULL)
+ return ERTS_THR_Q_DIRTY;
+ }
+ return ERTS_THR_Q_CLEAN;
+ }
+
+ if (q->head.first != q->head.unref_end)
+ return ERTS_THR_Q_DIRTY;
+
+#ifdef ERTS_SMP
+ if (!q->head.next.thr_progress_reached)
+ return ERTS_THR_Q_NEED_THR_PRGR;
+#endif
+#endif
+ return ERTS_THR_Q_CLEAN;
+}
+
+static void
+enqueue(ErtsThrQ_t *q, void *data, ErtsThrQElement_t *this)
+{
+#ifndef USE_THREADS
+ ASSERT(data);
+
+ this->next.ptr = NULL;
+ this->data.ptr = data;
+
+ if (q->last)
+ q->last->next.ptr = this;
+ else {
+ q->first = q->last = this;
+ q->init.notify(q->init.arg);
+ }
+#else
+ int notify;
+ int um_refc_ix = 0;
+#ifdef ERTS_SMP
+ int unmanaged_thread;
+#endif
+
+#if ERTS_THR_Q_DBG_CHK_DATA
+ if (!data)
+ erl_exit(ERTS_ABORT_EXIT, "Missing data in enqueue\n");
+#endif
+
+ ASSERT(!q->q.finalizing);
+
+ this->data.ptr = data;
+
+#ifdef ERTS_SMP
+ unmanaged_thread = !erts_thr_progress_is_managed_thread();
+ if (unmanaged_thread)
+#endif
+ {
+ um_refc_ix = erts_atomic32_read_acqb(&q->tail.data.um_refc_ix);
+ while (1) {
+ int tmp_um_refc_ix;
+ erts_atomic_inc_acqb(&q->tail.data.um_refc[um_refc_ix]);
+ tmp_um_refc_ix = erts_atomic32_read_acqb(&q->tail.data.um_refc_ix);
+ if (tmp_um_refc_ix == um_refc_ix)
+ break;
+ erts_atomic_dec_relb(&q->tail.data.um_refc[um_refc_ix]);
+ um_refc_ix = tmp_um_refc_ix;
+ }
+ }
+
+ notify = this == enqueue_managed(q, this, 1);
+
+
+#ifdef ERTS_SMP
+ if (unmanaged_thread)
+#endif
+ {
+ if (notify)
+ erts_atomic_dec_relb(&q->tail.data.um_refc[um_refc_ix]);
+ else if (erts_atomic_dec_read_relb(&q->tail.data.um_refc[um_refc_ix]) == 0)
+ notify = 1;
+ }
+ if (notify)
+ q->tail.data.notify(q->tail.data.arg);
+#endif
+}
+
+void
+erts_thr_q_enqueue(ErtsThrQ_t *q, void *data)
+{
+ enqueue(q, data, element_alloc(q));
+}
+
+ErtsThrQPrepEnQ_t *
+erts_thr_q_prepare_enqueue(ErtsThrQ_t *q)
+{
+ return (ErtsThrQPrepEnQ_t *) element_alloc(q);
+}
+
+int
+erts_thr_q_get_finalize_dequeue_data(ErtsThrQ_t *q, ErtsThrQFinDeQ_t *fdp)
+{
+#ifndef USE_THREADS
+ return 0;
+#else
+#ifdef DEBUG
+ if (!q->head.deq_fini.start) {
+ ASSERT(!q->head.deq_fini.end);
+ }
+ else {
+ ErtsThrQElement_t *e = q->head.deq_fini.start;
+ ErtsThrQElement_t *end = q->head.deq_fini.end;
+ while (e != end) {
+ ASSERT(q->head.head.ptr != e);
+ ASSERT(q->head.first != e);
+ ASSERT(q->head.unref_end != e);
+ e = e->next.ptr;
+ }
+ }
+#endif
+ fdp->start = q->head.deq_fini.start;
+ fdp->end = q->head.deq_fini.end;
+ if (fdp->end)
+ fdp->end->next.ptr = NULL;
+ q->head.deq_fini.start = NULL;
+ q->head.deq_fini.end = NULL;
+ return fdp->start != NULL;
+#endif
+}
+
+void
+erts_thr_q_append_finalize_dequeue_data(ErtsThrQFinDeQ_t *fdp0,
+ ErtsThrQFinDeQ_t *fdp1)
+{
+#ifdef USE_THREADS
+ if (fdp1->start) {
+ if (fdp0->end)
+ fdp0->end->next.ptr = fdp1->start;
+ else
+ fdp0->start = fdp1->start;
+ fdp0->end = fdp1->end;
+ }
+#endif
+}
+
+
+int erts_thr_q_finalize_dequeue(ErtsThrQFinDeQ_t *state)
+{
+#ifdef USE_THREADS
+ ErtsThrQElement_t *start = state->start;
+ if (start) {
+ ErtsThrQLive_t live;
+ int i;
+ for (i = 0; i < ERTS_THR_Q_MAX_FINI_DEQ_OPS; i++) {
+ ErtsThrQElement_t *tmp;
+ if (!start)
+ break;
+ tmp = start;
+ start = start->next.ptr;
+ live = (ErtsThrQLive_t) (UWord) tmp->data.ptr;
+ element_live_free(live, tmp);
+ }
+ state->start = start;
+ if (start)
+ return 1; /* More to do */
+ state->end = NULL;
+ }
+#endif
+ return 0;
+}
+
+void
+erts_thr_q_finalize_dequeue_state_init(ErtsThrQFinDeQ_t *state)
+{
+#ifdef USE_THREADS
+ state->start = NULL;
+ state->end = NULL;
+#endif
+}
+
+
+void
+erts_thr_q_enqueue_prepared(ErtsThrQ_t *q, void *data, ErtsThrQPrepEnQ_t *prep)
+{
+ ASSERT(prep);
+ enqueue(q, data, (ErtsThrQElement_t *) prep);
+}
+
+void *
+erts_thr_q_dequeue(ErtsThrQ_t *q)
+{
+#ifndef USE_THREADS
+ void *res;
+ ErtsThrQElement_t *tmp;
+
+ if (!q->first)
+ return NULL;
+ tmp = q->first;
+ res = tmp->data.ptr;
+ q->first = tmp->next.ptr;
+ if (!q->first)
+ q->last = NULL;
+
+ element_free(q, tmp);
+
+ return res;
+#else
+ erts_aint_t inext;
+ void *res;
+
+ inext = erts_atomic_read_acqb(&q->head.head.ptr->next.atmc);
+ if (inext == ERTS_AINT_NULL)
+ return NULL;
+ q->head.head.ptr->next.ptr = (ErtsThrQElement_t *) inext;
+ q->head.head.ptr = (ErtsThrQElement_t *) inext;
+ if (q->head.head.ptr == &q->tail.data.marker) {
+ inext = erts_atomic_read_acqb(&q->head.head.ptr->next.atmc);
+ if (inext == ERTS_AINT_NULL)
+ return NULL;
+ q->head.head.ptr->next.ptr = (ErtsThrQElement_t *) inext;
+ q->head.head.ptr = (ErtsThrQElement_t *) inext;
+ }
+ res = q->head.head.ptr->data.ptr;
+#if ERTS_THR_Q_DBG_CHK_DATA
+ q->head.head.ptr->data.ptr = NULL;
+ if (!res)
+ erl_exit(ERTS_ABORT_EXIT, "Missing data in dequeue\n");
+#endif
+ clean(q,
+ (q->head.deq_fini.automatic
+ ? ERTS_THR_Q_MAX_DEQUEUE_CLEAN_OPS
+ : ERTS_THR_Q_MAX_SCHED_CLEAN_OPS), 1);
+ return res;
+#endif
+}
diff --git a/erts/emulator/beam/erl_thr_queue.h b/erts/emulator/beam/erl_thr_queue.h
new file mode 100644
index 0000000000..407c23f5eb
--- /dev/null
+++ b/erts/emulator/beam/erl_thr_queue.h
@@ -0,0 +1,211 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2011. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+/*
+ * Description: Lock-free queue for communication between threads.
+ *
+ * Currently only a many-to-one version has been,
+ * implemented, i.e., many threads can enqueue but
+ * only one thread can dequeue at a time. It doesn't
+ * have to be the same thread dequeuing every time, but
+ * synchronization so that only one thread dequeues
+ * at a time has to be provided by other means.
+ *
+ * When/If the need for a many-to-many queue arises,
+ * this implementation can relatively easy be extended
+ * to support that too.
+ *
+ * Usage instructions can be found in erts_thr_queue.c
+ *
+ * Author: Rickard Green
+ */
+
+#ifndef ERL_THR_QUEUE_H__
+#define ERL_THR_QUEUE_H__
+
+#include "sys.h"
+#include "erl_threads.h"
+#include "erl_alloc.h"
+#include "erl_thr_progress.h"
+
+typedef enum {
+ ERTS_THR_Q_LIVE_UNDEF,
+ ERTS_THR_Q_LIVE_SHORT,
+ ERTS_THR_Q_LIVE_LONG
+} ErtsThrQLive_t;
+
+#define ERTS_THR_Q_INIT_DEFAULT \
+{ \
+ { \
+ ERTS_THR_Q_LIVE_UNDEF, \
+ ERTS_THR_Q_LIVE_SHORT \
+ }, \
+ NULL, \
+ NULL, \
+ 1 \
+}
+
+typedef struct ErtsThrQ_t_ ErtsThrQ_t;
+
+typedef struct {
+ struct {
+ ErtsThrQLive_t queue;
+ ErtsThrQLive_t objects;
+ } live;
+ void *arg;
+ void (*notify)(void *);
+ int auto_finalize_dequeue;
+} ErtsThrQInit_t;
+
+typedef struct ErtsThrQElement_t_ ErtsThrQElement_t;
+typedef struct ErtsThrQElement_t ErtsThrQPrepEnQ_t;
+
+typedef union {
+ erts_atomic_t atmc;
+ ErtsThrQElement_t *ptr;
+} ErtsThrQPtr_t;
+
+struct ErtsThrQElement_t_ {
+ ErtsThrQPtr_t next;
+ union {
+ erts_atomic_t atmc;
+ void *ptr;
+ } data;
+};
+
+typedef struct {
+ ErtsThrQElement_t *start;
+ ErtsThrQElement_t *end;
+} ErtsThrQFinDeQ_t;
+
+typedef enum {
+ ERTS_THR_Q_CLEAN,
+#ifdef ERTS_SMP
+ ERTS_THR_Q_NEED_THR_PRGR,
+#endif
+ ERTS_THR_Q_DIRTY,
+} ErtsThrQCleanState_t;
+
+#ifdef USE_THREADS
+
+typedef struct {
+ ErtsThrQElement_t marker;
+ erts_atomic_t last;
+ erts_atomic_t um_refc[2];
+ erts_atomic32_t um_refc_ix;
+ ErtsThrQLive_t live;
+#ifdef ERTS_SMP
+ erts_atomic32_t thr_prgr_clean_scheduled;
+#endif
+ void *arg;
+ void (*notify)(void *);
+} ErtsThrQTail_t;
+
+struct ErtsThrQ_t_ {
+ /*
+ * This structure needs to be cache line aligned for best
+ * performance.
+ */
+ union {
+ /* Modified by threads enqueuing */
+ ErtsThrQTail_t data;
+ char align__[ERTS_ALC_CACHE_LINE_ALIGN_SIZE(sizeof(ErtsThrQTail_t))];
+ } tail;
+ /*
+ * Everything below this point is *only* accessed by the
+ * thread dequeuing.
+ */
+ struct {
+ ErtsThrQPtr_t head;
+ ErtsThrQLive_t live;
+ ErtsThrQElement_t *first;
+ ErtsThrQElement_t *unref_end;
+ int clean_reached_head_count;
+ struct {
+ int automatic;
+ ErtsThrQElement_t *start;
+ ErtsThrQElement_t *end;
+ } deq_fini;
+ struct {
+#ifdef ERTS_SMP
+ ErtsThrPrgrVal thr_progress;
+ int thr_progress_reached;
+#endif
+ int um_refc_ix;
+ ErtsThrQElement_t *unref_end;
+ } next;
+ int used_marker;
+ void *arg;
+ void (*notify)(void *);
+ } head;
+ struct {
+ int finalizing;
+ ErtsThrQLive_t live;
+ void *blk;
+ } q;
+};
+
+#else /* !USE_THREADS */
+
+struct ErtsThrQ_t_ {
+ ErtsThrQInit_t init;
+ ErtsThrQElement_t *first;
+ ErtsThrQElement_t *last;
+ struct {
+ void *blk;
+ } q;
+};
+
+#endif
+
+void erts_thr_q_init(void);
+void erts_thr_q_initialize(ErtsThrQ_t *, ErtsThrQInit_t *);
+ErtsThrQCleanState_t erts_thr_q_finalize(ErtsThrQ_t *);
+ErtsThrQ_t *erts_thr_q_create(ErtsThrQInit_t *);
+ErtsThrQCleanState_t erts_thr_q_destroy(ErtsThrQ_t *);
+ErtsThrQCleanState_t erts_thr_q_clean(ErtsThrQ_t *);
+ErtsThrQCleanState_t erts_thr_q_inspect(ErtsThrQ_t *, int);
+ErtsThrQPrepEnQ_t *erts_thr_q_prepare_enqueue(ErtsThrQ_t *);
+void erts_thr_q_enqueue_prepared(ErtsThrQ_t *, void *, ErtsThrQPrepEnQ_t *);
+void erts_thr_q_enqueue(ErtsThrQ_t *, void *);
+void * erts_thr_q_dequeue(ErtsThrQ_t *);
+int erts_thr_q_get_finalize_dequeue_data(ErtsThrQ_t *,
+ ErtsThrQFinDeQ_t *);
+void erts_thr_q_append_finalize_dequeue_data(ErtsThrQFinDeQ_t *,
+ ErtsThrQFinDeQ_t *);
+int erts_thr_q_finalize_dequeue(ErtsThrQFinDeQ_t *);
+void erts_thr_q_finalize_dequeue_state_init(ErtsThrQFinDeQ_t *);
+
+#ifdef ERTS_SMP
+ERTS_GLB_INLINE ErtsThrPrgrVal erts_thr_q_need_thr_progress(ErtsThrQ_t *q);
+#endif
+
+#if ERTS_GLB_INLINE_INCL_FUNC_DEF
+
+#ifdef ERTS_SMP
+ERTS_GLB_INLINE ErtsThrPrgrVal
+erts_thr_q_need_thr_progress(ErtsThrQ_t *q)
+{
+ return q->head.next.thr_progress;
+}
+#endif
+
+#endif /* ERTS_GLB_INLINE_INCL_FUNC_DEF */
+
+#endif /* ERL_THR_QUEUE_H__ */
diff --git a/erts/emulator/beam/erl_threads.h b/erts/emulator/beam/erl_threads.h
index 9b897ffd24..065e7077c0 100644
--- a/erts/emulator/beam/erl_threads.h
+++ b/erts/emulator/beam/erl_threads.h
@@ -92,6 +92,8 @@ typedef struct {
#endif
} erts_rwmtx_t;
+#define ERTS_MTX_OPT_DEFAULT_INITER ETHR_MUTEX_OPT_DEFAULT_INITER
+#define ERTS_CND_OPT_DEFAULT_INITER ETHR_COND_OPT_DEFAULT_INITER
#define ERTS_RWMTX_OPT_DEFAULT_INITER ETHR_RWMUTEX_OPT_DEFAULT_INITER
#define ERTS_RWMTX_TYPE_NORMAL ETHR_RWMUTEX_TYPE_NORMAL
#define ERTS_RWMTX_TYPE_FREQUENT_READ ETHR_RWMUTEX_TYPE_FREQUENT_READ
@@ -193,6 +195,8 @@ typedef struct { int gcc_is_buggy; } erts_rwlock_t;
#endif /* #ifdef USE_THREADS */
+#define ERTS_AINT_NULL ((erts_aint_t) NULL)
+
#define ERTS_AINT_T_MAX (~(((erts_aint_t) 1) << (sizeof(erts_aint_t)*8-1)))
#define ERTS_AINT_T_MIN ((((erts_aint_t) 1) << (sizeof(erts_aint_t)*8-1)))
#define ERTS_AINT32_T_MAX (~(((erts_aint32_t) 1) << (sizeof(erts_aint32_t)*8-1)))
@@ -990,8 +994,9 @@ erts_mtx_destroy(erts_mtx_t *mtx)
"Most likely a bug in pthread implementation.";
erts_send_warning_to_logger_str_nogl(warn);
}
+ else
#endif
- erts_thr_fatal_error(res, "destroy mutex");
+ erts_thr_fatal_error(res, "destroy mutex");
}
#endif
}
@@ -1094,8 +1099,9 @@ erts_cnd_destroy(erts_cnd_t *cnd)
"Most likely a bug in pthread implementation.";
erts_send_warning_to_logger_str_nogl(warn);
}
+ else
#endif
- erts_thr_fatal_error(res, "destroy condition variable");
+ erts_thr_fatal_error(res, "destroy condition variable");
}
#endif
}
@@ -1126,6 +1132,16 @@ erts_cnd_wait(erts_cnd_t *cnd, erts_mtx_t *mtx)
#endif
}
+/*
+ * IMPORTANT note about erts_cnd_signal() and erts_cnd_broadcast()
+ *
+ * POSIX allow a call to `pthread_cond_signal' or `pthread_cond_broadcast'
+ * even though the associated mutex/mutexes isn't/aren't locked by the
+ * caller. Our implementation do not allow that in order to avoid a
+ * performance penalty. That is, all associated mutexes *need* to be
+ * locked by the caller of erts_cnd_signal()/erts_cnd_broadcast()!
+ */
+
ERTS_GLB_INLINE void
erts_cnd_signal(erts_cnd_t *cnd)
{
@@ -1229,8 +1245,9 @@ erts_rwmtx_destroy(erts_rwmtx_t *rwmtx)
"Most likely a bug in pthread implementation.";
erts_send_warning_to_logger_str_nogl(warn);
}
+ else
#endif
- erts_thr_fatal_error(res, "destroy rwmutex");
+ erts_thr_fatal_error(res, "destroy rwmutex");
}
#endif
}
@@ -1693,8 +1710,9 @@ erts_spinlock_destroy(erts_spinlock_t *lock)
"Most likely a bug in pthread implementation.";
erts_send_warning_to_logger_str_nogl(warn);
}
+ else
#endif
- erts_thr_fatal_error(res, "destroy rwlock");
+ erts_thr_fatal_error(res, "destroy rwlock");
}
#else
(void)lock;
@@ -1811,8 +1829,9 @@ erts_rwlock_destroy(erts_rwlock_t *lock)
"Most likely a bug in pthread implementation.";
erts_send_warning_to_logger_str_nogl(warn);
}
+ else
#endif
- erts_thr_fatal_error(res, "destroy rwlock");
+ erts_thr_fatal_error(res, "destroy rwlock");
}
#else
(void)lock;
diff --git a/erts/emulator/beam/erl_trace.c b/erts/emulator/beam/erl_trace.c
index 8833137112..b487dbf054 100644
--- a/erts/emulator/beam/erl_trace.c
+++ b/erts/emulator/beam/erl_trace.c
@@ -36,6 +36,7 @@
#include "error.h"
#include "erl_binary.h"
#include "erl_bits.h"
+#include "erl_thr_progress.h"
#if 0
#define DEBUG_PRINTOUTS
@@ -159,7 +160,7 @@ static Uint active_sched;
void
erts_system_profile_setup_active_schedulers(void)
{
- ERTS_SMP_LC_ASSERT(erts_is_system_blocked(0));
+ ERTS_SMP_LC_ASSERT(erts_thr_progress_is_blocking());
active_sched = erts_active_schedulers();
}
@@ -1940,7 +1941,8 @@ trace_proc(Process *c_p, Process *t_p, Eterm what, Eterm data)
Eterm* hp;
int need;
- ERTS_SMP_LC_ASSERT((erts_proc_lc_my_proc_locks(t_p) != 0) || erts_is_system_blocked(0));
+ ERTS_SMP_LC_ASSERT((erts_proc_lc_my_proc_locks(t_p) != 0)
+ || erts_thr_progress_is_blocking());
if (is_internal_port(t_p->tracer_proc)) {
#define LOCAL_HEAP_SIZE (5+5)
DeclareTmpHeapNoproc(local_heap,LOCAL_HEAP_SIZE);
@@ -2092,8 +2094,7 @@ void save_calls(Process *p, Export *e)
* entries instead of the original BIF functions.
*/
Eterm
-erts_bif_trace(int bif_index, Process* p,
- Eterm arg1, Eterm arg2, Eterm arg3, BeamInstr *I)
+erts_bif_trace(int bif_index, Process* p, Eterm* args, BeamInstr* I)
{
Eterm result;
int meta = !!(erts_bif_trace_flags[bif_index] & BIF_TRACE_AS_META);
@@ -2107,10 +2108,10 @@ erts_bif_trace(int bif_index, Process* p,
* no tracing will occur. Doing the whole else branch will
* also do nothing, only slower.
*/
- Eterm (*func)(Process*, Eterm, Eterm, Eterm, BeamInstr*) = bif_table[bif_index].f;
- result = func(p, arg1, arg2, arg3, I);
+ Eterm (*func)(Process*, Eterm*, BeamInstr*) = bif_table[bif_index].f;
+ result = func(p, args, I);
} else {
- Eterm (*func)(Process*, Eterm, Eterm, Eterm, BeamInstr*);
+ Eterm (*func)(Process*, Eterm*, BeamInstr*);
Export* ep = bif_export[bif_index];
Uint32 flags = 0, flags_meta = 0;
int global = !!(erts_bif_trace_flags[bif_index] & BIF_TRACE_AS_GLOBAL);
@@ -2122,8 +2123,6 @@ erts_bif_trace(int bif_index, Process* p,
* export entry */
BeamInstr *cp = p->cp;
- Eterm args[3] = {arg1, arg2, arg3};
-
/*
* Make continuation pointer OK, it is not during direct BIF calls,
* but it is correct during apply of bif.
@@ -2155,7 +2154,7 @@ erts_bif_trace(int bif_index, Process* p,
func = bif_table[bif_index].f;
- result = func(p, arg1, arg2, arg3, I);
+ result = func(p, args, I);
if (applying && (flags & MATCH_SET_RETURN_TO_TRACE)) {
BeamInstr i_return_trace = beam_return_trace[0];
@@ -2745,7 +2744,8 @@ trace_port(Port *t_p, Eterm what, Eterm data) {
Eterm mess;
Eterm* hp;
- ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(t_p) || erts_is_system_blocked(0));
+ ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(t_p)
+ || erts_thr_progress_is_blocking());
if (is_internal_port(t_p->tracer_proc)) {
#define LOCAL_HEAP_SIZE (5+5)
@@ -3021,8 +3021,6 @@ static ErtsSysMsgQ *sys_message_queue_end;
static erts_tid_t sys_msg_dispatcher_tid;
static erts_cnd_t smq_cnd;
-static int dispatcher_waiting;
-
ERTS_QUALLOC_IMPL(smq_element, ErtsSysMsgQ, 20, ERTS_ALC_T_SYS_MSG_Q)
static void
@@ -3066,18 +3064,6 @@ enqueue_sys_msg(enum ErtsSysMsgType type,
erts_smp_mtx_unlock(&smq_mtx);
}
-static void
-prepare_for_block(void *unused)
-{
- erts_smp_mtx_unlock(&smq_mtx);
-}
-
-static void
-resume_after_block(void *unused)
-{
- erts_smp_mtx_lock(&smq_mtx);
-}
-
void
erts_queue_error_logger_message(Eterm from, Eterm msg, ErlHeapFragment *bp)
{
@@ -3143,10 +3129,10 @@ sys_msg_disp_failure(ErtsSysMsgQ *smqp, Eterm receiver)
&& !erts_system_monitor_flags.busy_port
&& !erts_system_monitor_flags.busy_dist_port)
break; /* Everything is disabled */
- erts_smp_block_system(ERTS_BS_FLG_ALLOW_GC);
+ erts_smp_thr_progress_block();
if (system_monitor == receiver || receiver == NIL)
erts_system_monitor_clear(NULL);
- erts_smp_release_system();
+ erts_smp_thr_progress_unblock();
break;
case SYS_MSG_TYPE_SYSPROF:
if (receiver == NIL
@@ -3156,11 +3142,11 @@ sys_msg_disp_failure(ErtsSysMsgQ *smqp, Eterm receiver)
&& !erts_system_profile_flags.scheduler)
break;
/* Block system to clear flags */
- erts_smp_block_system(0);
+ erts_smp_thr_progress_block();
if (system_profile == receiver || receiver == NIL) {
erts_system_profile_clear(NULL);
}
- erts_smp_release_system();
+ erts_smp_thr_progress_unblock();
break;
case SYS_MSG_TYPE_ERRLGR: {
char *no_elgger = "(no error logger present)";
@@ -3201,22 +3187,68 @@ sys_msg_disp_failure(ErtsSysMsgQ *smqp, Eterm receiver)
}
}
+static void
+sys_msg_dispatcher_wakeup(void *vwait_p)
+{
+ int *wait_p = (int *) vwait_p;
+ erts_smp_mtx_lock(&smq_mtx);
+ *wait_p = 0;
+ erts_smp_cnd_signal(&smq_cnd);
+ erts_smp_mtx_unlock(&smq_mtx);
+}
+
+static void
+sys_msg_dispatcher_prep_wait(void *vwait_p)
+{
+ int *wait_p = (int *) vwait_p;
+ erts_smp_mtx_lock(&smq_mtx);
+ *wait_p = 1;
+ erts_smp_mtx_unlock(&smq_mtx);
+}
+
+static void
+sys_msg_dispatcher_fin_wait(void *vwait_p)
+{
+ int *wait_p = (int *) vwait_p;
+ erts_smp_mtx_lock(&smq_mtx);
+ *wait_p = 0;
+ erts_smp_mtx_unlock(&smq_mtx);
+}
+
+static void
+sys_msg_dispatcher_wait(void *vwait_p)
+{
+ int *wait_p = (int *) vwait_p;
+ erts_smp_mtx_lock(&smq_mtx);
+ while (*wait_p)
+ erts_smp_cnd_wait(&smq_cnd, &smq_mtx);
+ erts_smp_mtx_unlock(&smq_mtx);
+}
+
static void *
sys_msg_dispatcher_func(void *unused)
{
+ ErtsThrPrgrCallbacks callbacks;
ErtsSysMsgQ *local_sys_message_queue = NULL;
+ int wait = 0;
#ifdef ERTS_ENABLE_LOCK_CHECK
erts_lc_set_thread_name("system message dispatcher");
#endif
- erts_register_blockable_thread();
- erts_smp_activity_begin(ERTS_ACTIVITY_IO, NULL, NULL, NULL);
+ callbacks.arg = (void *) &wait;
+ callbacks.wakeup = sys_msg_dispatcher_wakeup;
+ callbacks.prepare_wait = sys_msg_dispatcher_prep_wait;
+ callbacks.wait = sys_msg_dispatcher_wait;
+ callbacks.finalize_wait = sys_msg_dispatcher_fin_wait;
+
+ erts_thr_progress_register_managed_thread(NULL, &callbacks, 0);
while (1) {
+ int end_wait = 0;
ErtsSysMsgQ *smqp;
- ERTS_SMP_LC_ASSERT(!ERTS_LC_IS_BLOCKING);
+ ERTS_SMP_LC_ASSERT(!erts_thr_progress_is_blocking());
erts_smp_mtx_lock(&smq_mtx);
@@ -3228,20 +3260,16 @@ sys_msg_dispatcher_func(void *unused)
}
/* Fetch current trace message queue ... */
- erts_smp_activity_change(ERTS_ACTIVITY_IO,
- ERTS_ACTIVITY_WAIT,
- prepare_for_block,
- resume_after_block,
- NULL);
- dispatcher_waiting = 1;
+ if (!sys_message_queue) {
+ erts_smp_mtx_unlock(&smq_mtx);
+ end_wait = 1;
+ erts_thr_progress_active(NULL, 0);
+ erts_thr_progress_prepare_wait(NULL);
+ erts_smp_mtx_lock(&smq_mtx);
+ }
+
while (!sys_message_queue)
erts_smp_cnd_wait(&smq_cnd, &smq_mtx);
- dispatcher_waiting = 0;
- erts_smp_activity_change(ERTS_ACTIVITY_WAIT,
- ERTS_ACTIVITY_IO,
- prepare_for_block,
- resume_after_block,
- NULL);
local_sys_message_queue = sys_message_queue;
sys_message_queue = NULL;
@@ -3249,6 +3277,11 @@ sys_msg_dispatcher_func(void *unused)
erts_smp_mtx_unlock(&smq_mtx);
+ if (end_wait) {
+ erts_thr_progress_finalize_wait(NULL);
+ erts_thr_progress_active(NULL, 1);
+ }
+
/* Send trace messages ... */
ASSERT(local_sys_message_queue);
@@ -3259,6 +3292,9 @@ sys_msg_dispatcher_func(void *unused)
Process *proc = NULL;
Port *port = NULL;
+ if (erts_thr_progress_update(NULL))
+ erts_thr_progress_leader_update(NULL);
+
#ifdef DEBUG_PRINTOUTS
print_msg_type(smqp);
#endif
@@ -3372,7 +3408,6 @@ sys_msg_dispatcher_func(void *unused)
}
}
- erts_smp_activity_end(ERTS_ACTIVITY_IO, NULL, NULL, NULL);
return NULL;
}
@@ -3422,7 +3457,6 @@ init_sys_msg_dispatcher(void)
sys_message_queue_end = NULL;
erts_smp_cnd_init(&smq_cnd);
erts_smp_mtx_init(&smq_mtx, "sys_msg_q");
- dispatcher_waiting = 0;
erts_smp_thr_create(&sys_msg_dispatcher_tid,
sys_msg_dispatcher_func,
NULL,
diff --git a/erts/emulator/beam/erl_unicode.c b/erts/emulator/beam/erl_unicode.c
index 158eb361a4..fca785a4de 100644
--- a/erts/emulator/beam/erl_unicode.c
+++ b/erts/emulator/beam/erl_unicode.c
@@ -47,7 +47,7 @@ typedef struct _restart_context {
static Uint max_loop_limit;
-static BIF_RETTYPE utf8_to_list(BIF_ALIST_1);
+static BIF_RETTYPE utf8_to_list(Process *p, Eterm arg1);
static BIF_RETTYPE finalize_list_to_list(Process *p,
byte *bytes,
Eterm rest,
@@ -348,12 +348,6 @@ static int copy_utf8_bin(byte *target, byte *source, Uint size,
return copied;
}
- if (((*source) == 0xEF) && (source[1] == 0xBF) &&
- ((source[2] == 0xBE) || (source[2] == 0xBF))) {
- *err_pos = source;
- return copied;
- }
-
*(target++) = *(source++);
*(target++) = *(source++);
*(target++) = *(source++);
@@ -714,9 +708,8 @@ L_Again: /* Restart with sublist, old listend was pushed on stack */
target[(*pos)++] = (((byte) (x & 0x3F)) |
((byte) 0x80));
} else if (x < 0x10000) {
- if ((x >= 0xD800 && x <= 0xDFFF) ||
- (x == 0xFFFE) ||
- (x == 0xFFFF)) { /* Invalid unicode range */
+ if (x >= 0xD800 && x <= 0xDFFF) {
+ /* Invalid unicode range */
*err = 1;
goto done;
}
@@ -1230,10 +1223,6 @@ int erts_analyze_utf8(byte *source, Uint size,
((source[1] & 0x20) != 0)) {
return ERTS_UTF8_ERROR;
}
- if (((*source) == 0xEF) && (source[1] == 0xBF) &&
- ((source[2] == 0xBE) || (source[2] == 0xBF))) {
- return ERTS_UTF8_ERROR;
- }
source += 3;
size -= 3;
} else if (((*source) & ((byte) 0xF8)) == 0xF0) {
@@ -1839,13 +1828,13 @@ static BIF_RETTYPE characters_to_list_trap_4(BIF_ALIST_1)
* Instead of building an utf8 buffer, we analyze the binary given and use that.
*/
-static BIF_RETTYPE utf8_to_list(BIF_ALIST_1)
+static BIF_RETTYPE utf8_to_list(Process* p, Eterm arg)
{
- if (!is_binary(BIF_ARG_1) || aligned_binary_size(BIF_ARG_1) < 0) {
- BIF_ERROR(BIF_P,BADARG);
+ if (!is_binary(arg) || aligned_binary_size(arg) < 0) {
+ BIF_ERROR(p, BADARG);
}
- return do_bif_utf8_to_list(BIF_P, BIF_ARG_1, 0U, 0U, 0U,
- ERTS_UTF8_ANALYZE_MORE,NIL);
+ return do_bif_utf8_to_list(p, arg, 0U, 0U, 0U,
+ ERTS_UTF8_ANALYZE_MORE, NIL);
}
@@ -2166,9 +2155,8 @@ L_Again: /* Restart with sublist, old listend was pushed on stack */
} else if (x < 0x800) {
need += 2;
} else if (x < 0x10000) {
- if ((x >= 0xD800 && x <= 0xDFFF) ||
- (x == 0xFFFE) ||
- (x == 0xFFFF)) { /* Invalid unicode range */
+ if (x >= 0xD800 && x <= 0xDFFF) {
+ /* Invalid unicode range */
DESTROY_ESTACK(stack);
return ((Sint) -1);
}
@@ -2314,9 +2302,7 @@ L_Again: /* Restart with sublist, old listend was pushed on stack */
*p++ = (((byte) (x & 0x3F)) |
((byte) 0x80));
} else if (x < 0x10000) {
- ASSERT(!((x >= 0xD800 && x <= 0xDFFF) ||
- (x == 0xFFFE) ||
- (x == 0xFFFF)));
+ ASSERT(!(x >= 0xD800 && x <= 0xDFFF));
*p++ = (((byte) (x >> 12)) |
((byte) 0xE0));
*p++ = ((((byte) (x >> 6)) & 0x3F) |
diff --git a/erts/emulator/beam/erl_vm.h b/erts/emulator/beam/erl_vm.h
index e7fd144ec3..5dc307e383 100644
--- a/erts/emulator/beam/erl_vm.h
+++ b/erts/emulator/beam/erl_vm.h
@@ -55,7 +55,7 @@
heap data on the C stack or if we use the buffers in the scheduler data. */
#define TMP_HEAP_SIZE 128 /* Number of Eterm in the schedulers
small heap for transient heap data */
-#define CMP_TMP_HEAP_SIZE 2 /* cmp wants its own tmp-heap... */
+#define CMP_TMP_HEAP_SIZE 32 /* cmp wants its own tmp-heap... */
#define ERL_ARITH_TMP_HEAP_SIZE 4 /* as does erl_arith... */
#define BEAM_EMU_TMP_HEAP_SIZE 2 /* and beam_emu... */
@@ -83,11 +83,7 @@
#define CP_SIZE 1
#define ErtsHAllocLockCheck(P) \
- ERTS_SMP_LC_ASSERT((ERTS_PROC_LOCK_MAIN & erts_proc_lc_my_proc_locks((P))) \
- || ((P)->id == ERTS_INVALID_PID) \
- || ((P)->scheduler_data \
- && (P) == (P)->scheduler_data->match_pseudo_process) \
- || erts_is_system_blocked(0))
+ ERTS_SMP_LC_ASSERT(erts_dbg_check_halloc_lock((P)))
#ifdef DEBUG
diff --git a/erts/emulator/beam/export.c b/erts/emulator/beam/export.c
index 5bc402fe22..18d62dac1d 100644
--- a/erts/emulator/beam/export.c
+++ b/erts/emulator/beam/export.c
@@ -208,7 +208,8 @@ erts_export_put(Eterm mod, Eterm func, unsigned int arity)
Export e;
int ix;
- ERTS_SMP_LC_ASSERT(erts_initialized == 0 || erts_smp_is_system_blocked(0));
+ ERTS_SMP_LC_ASSERT(erts_initialized == 0
+ || erts_smp_thr_progress_is_blocking());
ASSERT(is_atom(mod));
ASSERT(is_atom(func));
e.code[0] = mod;
@@ -265,7 +266,8 @@ erts_export_consolidate(void)
HashInfo hi;
#endif
- ERTS_SMP_LC_ASSERT(erts_initialized == 0 || erts_smp_is_system_blocked(0));
+ ERTS_SMP_LC_ASSERT(erts_initialized == 0
+ || erts_smp_thr_progress_is_blocking());
export_write_lock();
erts_index_merge(&secondary_export_table, &export_table);
diff --git a/erts/emulator/beam/external.c b/erts/emulator/beam/external.c
index 1a102f7187..4b867f2b10 100644
--- a/erts/emulator/beam/external.c
+++ b/erts/emulator/beam/external.c
@@ -88,7 +88,7 @@ static byte* enc_pid(ErtsAtomCacheMap *, Eterm, byte*, Uint32);
static byte* dec_term(ErtsDistExternal *, Eterm**, byte*, ErlOffHeap*, Eterm*);
static byte* dec_atom(ErtsDistExternal *, byte*, Eterm*);
static byte* dec_pid(ErtsDistExternal *, Eterm**, byte*, ErlOffHeap*, Eterm*);
-static Sint decoded_size(byte *ep, byte* endp, int only_heap_bins, int internal_tags);
+static Sint decoded_size(byte *ep, byte* endp, int internal_tags);
static Uint encode_size_struct2(ErtsAtomCacheMap *, Eterm, unsigned);
@@ -459,6 +459,12 @@ Uint erts_encode_ext_size(Eterm term)
+ 1 /* VERSION_MAGIC */;
}
+Uint erts_encode_ext_size_2(Eterm term, unsigned dflags)
+{
+ return encode_size_struct2(NULL, term, TERM_TO_BINARY_DFLAGS|dflags)
+ + 1 /* VERSION_MAGIC */;
+}
+
Uint erts_encode_ext_size_ets(Eterm term)
{
return encode_size_struct2(NULL, term, TERM_TO_BINARY_DFLAGS|DFLAGS_INTERNAL_TAGS);
@@ -804,7 +810,7 @@ bad_dist_ext(ErtsDistExternal *edep)
}
Sint
-erts_decode_dist_ext_size(ErtsDistExternal *edep, int no_refc_bins)
+erts_decode_dist_ext_size(ErtsDistExternal *edep)
{
Sint res;
byte *ep;
@@ -823,7 +829,7 @@ erts_decode_dist_ext_size(ErtsDistExternal *edep, int no_refc_bins)
goto fail;
ep = edep->extp+1;
}
- res = decoded_size(ep, edep->ext_endp, no_refc_bins, 0);
+ res = decoded_size(ep, edep->ext_endp, 0);
if (res >= 0)
return res;
fail:
@@ -831,16 +837,16 @@ erts_decode_dist_ext_size(ErtsDistExternal *edep, int no_refc_bins)
return -1;
}
-Sint erts_decode_ext_size(byte *ext, Uint size, int no_refc_bins)
+Sint erts_decode_ext_size(byte *ext, Uint size)
{
if (size == 0 || *ext != VERSION_MAGIC)
return -1;
- return decoded_size(ext+1, ext+size, no_refc_bins, 0);
+ return decoded_size(ext+1, ext+size, 0);
}
Sint erts_decode_ext_size_ets(byte *ext, Uint size)
{
- Sint sz = decoded_size(ext, ext+size, 0, 1);
+ Sint sz = decoded_size(ext, ext+size, 1);
ASSERT(sz >= 0);
return sz;
}
@@ -962,7 +968,7 @@ BIF_RETTYPE erts_debug_dist_ext_to_term_2(BIF_ALIST_2)
ede.extp = binary_bytes(real_bin)+offset;
ede.ext_endp = ede.extp + size;
- hsz = erts_decode_dist_ext_size(&ede, 0);
+ hsz = erts_decode_dist_ext_size(&ede);
if (hsz < 0)
goto badarg;
@@ -982,16 +988,16 @@ BIF_RETTYPE erts_debug_dist_ext_to_term_2(BIF_ALIST_2)
}
-Eterm
-term_to_binary_1(Process* p, Eterm Term)
+BIF_RETTYPE term_to_binary_1(BIF_ALIST_1)
{
- return erts_term_to_binary(p, Term, 0, TERM_TO_BINARY_DFLAGS);
+ return erts_term_to_binary(BIF_P, BIF_ARG_1, 0, TERM_TO_BINARY_DFLAGS);
}
-
-Eterm
-term_to_binary_2(Process* p, Eterm Term, Eterm Flags)
+BIF_RETTYPE term_to_binary_2(BIF_ALIST_2)
{
+ Process* p = BIF_P;
+ Eterm Term = BIF_ARG_1;
+ Eterm Flags = BIF_ARG_2;
int level = 0;
Uint flags = TERM_TO_BINARY_DFLAGS;
@@ -1100,7 +1106,7 @@ binary2term_prepare(ErtsBinary2TermState *state, byte *data, Sint data_size)
goto error;
size = (Sint) dest_len;
}
- res = decoded_size(state->extp, state->extp + size, 0, 0);
+ res = decoded_size(state->extp, state->extp + size, 0);
if (res < 0)
goto error;
return res;
@@ -1250,8 +1256,11 @@ BIF_RETTYPE binary_to_term_2(BIF_ALIST_2)
}
Eterm
-external_size_1(Process* p, Eterm Term)
+external_size_1(BIF_ALIST_1)
{
+ Process* p = BIF_P;
+ Eterm Term = BIF_ARG_1;
+
Uint size = erts_encode_ext_size(Term);
if (IS_USMALL(0, size)) {
BIF_RET(make_small(size));
@@ -1262,6 +1271,49 @@ external_size_1(Process* p, Eterm Term)
}
Eterm
+external_size_2(BIF_ALIST_2)
+{
+ Uint size;
+ Uint flags = TERM_TO_BINARY_DFLAGS;
+
+ while (is_list(BIF_ARG_2)) {
+ Eterm arg = CAR(list_val(BIF_ARG_2));
+ Eterm* tp;
+
+ if (is_tuple(arg) && *(tp = tuple_val(arg)) == make_arityval(2)) {
+ if (tp[1] == am_minor_version && is_small(tp[2])) {
+ switch (signed_val(tp[2])) {
+ case 0:
+ break;
+ case 1:
+ flags |= DFLAG_NEW_FLOATS;
+ break;
+ default:
+ goto error;
+ }
+ } else {
+ goto error;
+ }
+ } else {
+ error:
+ BIF_ERROR(BIF_P, BADARG);
+ }
+ BIF_ARG_2 = CDR(list_val(BIF_ARG_2));
+ }
+ if (is_not_nil(BIF_ARG_2)) {
+ goto error;
+ }
+
+ size = erts_encode_ext_size_2(BIF_ARG_1, flags);
+ if (IS_USMALL(0, size)) {
+ BIF_RET(make_small(size));
+ } else {
+ Eterm* hp = HAlloc(BIF_P, BIG_UINT_HEAP_SIZE);
+ BIF_RET(uint_to_big(size, hp));
+ }
+}
+
+Eterm
erts_term_to_binary(Process* p, Eterm Term, int level, Uint flags)
{
Uint size;
@@ -2402,7 +2454,7 @@ dec_term_atom_common:
n = get_int32(ep);
ep += 4;
- if (n <= ERL_ONHEAP_BIN_LIMIT || off_heap == NULL) {
+ if (n <= ERL_ONHEAP_BIN_LIMIT) {
ErlHeapBin* hb = (ErlHeapBin *) hp;
hb->thing_word = header_heap_bin(n);
@@ -2440,7 +2492,7 @@ dec_term_atom_common:
n = get_int32(ep);
bitsize = ep[4];
ep += 5;
- if (n <= ERL_ONHEAP_BIN_LIMIT || off_heap == NULL) {
+ if (n <= ERL_ONHEAP_BIN_LIMIT) {
ErlHeapBin* hb = (ErlHeapBin *) hp;
hb->thing_word = header_heap_bin(n);
@@ -3009,7 +3061,7 @@ encode_size_struct2(ErtsAtomCacheMap *acmp, Eterm obj, unsigned dflags)
}
static Sint
-decoded_size(byte *ep, byte* endp, int no_refc_bins, int internal_tags)
+decoded_size(byte *ep, byte* endp, int internal_tags)
{
int heap_size = 0;
int terms;
@@ -3171,7 +3223,7 @@ decoded_size(byte *ep, byte* endp, int no_refc_bins, int internal_tags)
CHKSIZE(4);
n = get_int32(ep);
SKIP2(n, 4);
- if (n <= ERL_ONHEAP_BIN_LIMIT || no_refc_bins) {
+ if (n <= ERL_ONHEAP_BIN_LIMIT) {
heap_size += heap_bin_size(n);
} else {
heap_size += PROC_BIN_SIZE;
@@ -3182,7 +3234,7 @@ decoded_size(byte *ep, byte* endp, int no_refc_bins, int internal_tags)
CHKSIZE(5);
n = get_int32(ep);
SKIP2(n, 5);
- if (n <= ERL_ONHEAP_BIN_LIMIT || no_refc_bins) {
+ if (n <= ERL_ONHEAP_BIN_LIMIT) {
heap_size += heap_bin_size(n) + ERL_SUB_BIN_SIZE;
} else {
heap_size += PROC_BIN_SIZE + ERL_SUB_BIN_SIZE;
diff --git a/erts/emulator/beam/external.h b/erts/emulator/beam/external.h
index d8287b96a4..eddd4571dd 100644
--- a/erts/emulator/beam/external.h
+++ b/erts/emulator/beam/external.h
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 1996-2010. All Rights Reserved.
+ * Copyright Ericsson AB 1996-2011. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
@@ -160,6 +160,7 @@ Uint erts_encode_dist_ext_size(Eterm, Uint32, ErtsAtomCacheMap *);
void erts_encode_dist_ext(Eterm, byte **, Uint32, ErtsAtomCacheMap *);
Uint erts_encode_ext_size(Eterm);
+Uint erts_encode_ext_size_2(Eterm, unsigned);
Uint erts_encode_ext_size_ets(Eterm);
void erts_encode_ext(Eterm, byte **);
byte* erts_encode_ext_ets(Eterm, byte *, struct erl_off_heap_header** ext_off_heap);
@@ -174,10 +175,10 @@ void *erts_dist_ext_trailer(ErtsDistExternal *);
void erts_destroy_dist_ext_copy(ErtsDistExternal *);
int erts_prepare_dist_ext(ErtsDistExternal *, byte *, Uint,
DistEntry *, ErtsAtomCache *);
-Sint erts_decode_dist_ext_size(ErtsDistExternal *, int);
+Sint erts_decode_dist_ext_size(ErtsDistExternal *);
Eterm erts_decode_dist_ext(Eterm **, ErlOffHeap *, ErtsDistExternal *);
-Sint erts_decode_ext_size(byte*, Uint, int);
+Sint erts_decode_ext_size(byte*, Uint);
Sint erts_decode_ext_size_ets(byte*, Uint);
Eterm erts_decode_ext(Eterm **, ErlOffHeap *, byte**);
Eterm erts_decode_ext_ets(Eterm **, ErlOffHeap *, byte*);
diff --git a/erts/emulator/beam/fix_alloc.c b/erts/emulator/beam/fix_alloc.c
deleted file mode 100644
index 5637281597..0000000000
--- a/erts/emulator/beam/fix_alloc.c
+++ /dev/null
@@ -1,287 +0,0 @@
-/*
- * %CopyrightBegin%
- *
- * Copyright Ericsson AB 1996-2009. All Rights Reserved.
- *
- * The contents of this file are subject to the Erlang Public License,
- * Version 1.1, (the "License"); you may not use this file except in
- * compliance with the License. You should have received a copy of the
- * Erlang Public License along with this software. If not, it can be
- * retrieved online at http://www.erlang.org/.
- *
- * Software distributed under the License is distributed on an "AS IS"
- * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
- * the License for the specific language governing rights and limitations
- * under the License.
- *
- * %CopyrightEnd%
- */
-/* General purpose Memory allocator for fixed block size objects */
-/* This allocater is at least an order of magnitude faster than malloc() */
-
-
-#define NOPERBLOCK 20
-#ifdef HAVE_CONFIG_H
-# include "config.h"
-#endif
-
-#include "sys.h"
-#include "erl_vm.h"
-#include "global.h"
-#include "erl_db.h"
-
-#ifdef ERTS_ALC_N_MIN_A_FIXED_SIZE
-
-#if ERTS_ALC_MTA_FIXED_SIZE
-#include "erl_threads.h"
-#include "erl_smp.h"
-# ifdef ERTS_SMP
-# define FA_LOCK(FA) erts_smp_spin_lock(&(FA)->slck)
-# define FA_UNLOCK(FA) erts_smp_spin_unlock(&(FA)->slck)
-# else
-# define FA_LOCK(FA) erts_mtx_lock(&(FA)->mtx)
-# define FA_UNLOCK(FA) erts_mtx_unlock(&(FA)->mtx)
-# endif
-#else
-# define FA_LOCK(FA)
-# define FA_UNLOCK(FA)
-#endif
-
-typedef union {double d; long l;} align_t;
-
-typedef struct fix_alloc_block {
- struct fix_alloc_block *next;
- align_t mem[1];
-} FixAllocBlock;
-
-typedef struct fix_alloc {
- Uint item_size;
- void *freelist;
- Uint no_free;
- Uint no_blocks;
- FixAllocBlock *blocks;
-#if ERTS_ALC_MTA_FIXED_SIZE
-# ifdef ERTS_SMP
- erts_smp_spinlock_t slck;
-# else
- erts_mtx_t mtx;
-# endif
-#endif
-} FixAlloc;
-
-static void *(*core_alloc)(Uint);
-static Uint xblk_sz;
-
-static FixAlloc **fa;
-#define FA_SZ (1 + ERTS_ALC_N_MAX_A_FIXED_SIZE - ERTS_ALC_N_MIN_A_FIXED_SIZE)
-
-#define FIX_IX(N) ((N) - ERTS_ALC_N_MIN_A_FIXED_SIZE)
-
-#define FIX_POOL_SZ(I_SZ) \
- ((I_SZ)*NOPERBLOCK + sizeof(FixAllocBlock) - sizeof(align_t))
-
-#if defined(DEBUG) && !ERTS_ALC_MTA_FIXED_SIZE
-static int first_time;
-#endif
-
-void erts_init_fix_alloc(Uint extra_block_size,
- void *(*alloc)(Uint))
-{
- int i;
-
- xblk_sz = extra_block_size;
- core_alloc = alloc;
-
- fa = (FixAlloc **) (*core_alloc)(FA_SZ * sizeof(FixAlloc *));
- if (!fa)
- erts_alloc_enomem(ERTS_ALC_T_UNDEF, FA_SZ * sizeof(FixAlloc *));
-
- for (i = 0; i < FA_SZ; i++)
- fa[i] = NULL;
-#if defined(DEBUG) && !ERTS_ALC_MTA_FIXED_SIZE
- first_time = 1;
-#endif
-}
-
-Uint
-erts_get_fix_size(ErtsAlcType_t type)
-{
- Uint i = FIX_IX(ERTS_ALC_T2N(type));
- return i < FA_SZ && fa[i] ? fa[i]->item_size : 0;
-}
-
-void
-erts_set_fix_size(ErtsAlcType_t type, Uint size)
-{
- Uint sz;
- Uint i;
- FixAlloc *fs;
- ErtsAlcType_t t_no = ERTS_ALC_T2N(type);
- sz = xblk_sz + size;
-
-#ifdef DEBUG
- ASSERT(ERTS_ALC_N_MIN_A_FIXED_SIZE <= t_no);
- ASSERT(t_no <= ERTS_ALC_N_MAX_A_FIXED_SIZE);
-#endif
-
- while (sz % sizeof(align_t) != 0) /* Alignment */
- sz++;
-
- i = FIX_IX(t_no);
- fs = (FixAlloc *) (*core_alloc)(sizeof(FixAlloc));
- if (!fs)
- erts_alloc_n_enomem(t_no, sizeof(FixAlloc));
-
- fs->item_size = sz;
- fs->no_blocks = 0;
- fs->no_free = 0;
- fs->blocks = NULL;
- fs->freelist = NULL;
- if (fa[i])
- erl_exit(-1, "Attempt to overwrite existing fix size (%d)", i);
- fa[i] = fs;
-
-#if ERTS_ALC_MTA_FIXED_SIZE
-#ifdef ERTS_SMP
- erts_smp_spinlock_init_x(&fs->slck, "fix_alloc", make_small(i));
-#else
- erts_mtx_init_x(&fs->mtx, "fix_alloc", make_small(i));
-#endif
-#endif
-
-}
-
-void
-erts_fix_info(ErtsAlcType_t type, ErtsFixInfo *efip)
-{
- Uint i;
- FixAlloc *f;
-#ifdef DEBUG
- FixAllocBlock *b;
- void *fp;
-#endif
- Uint real_item_size;
- ErtsAlcType_t t_no = ERTS_ALC_T2N(type);
-
- ASSERT(ERTS_ALC_N_MIN_A_FIXED_SIZE <= t_no);
- ASSERT(t_no <= ERTS_ALC_N_MAX_A_FIXED_SIZE);
-
- i = FIX_IX(t_no);
- f = fa[i];
-
- efip->total = sizeof(FixAlloc *);
- efip->used = 0;
- if (!f)
- return;
-
- real_item_size = f->item_size - xblk_sz;
-
- FA_LOCK(f);
-
- efip->total += sizeof(FixAlloc);
- efip->total += f->no_blocks*FIX_POOL_SZ(real_item_size);
- efip->used = efip->total - f->no_free*real_item_size;
-
-#ifdef DEBUG
- ASSERT(efip->total >= efip->used);
- for(i = 0, b = f->blocks; b; i++, b = b->next);
- ASSERT(f->no_blocks == i);
- for (i = 0, fp = f->freelist; fp; i++, fp = *((void **) fp));
- ASSERT(f->no_free == i);
-#endif
-
- FA_UNLOCK(f);
-
-}
-
-void
-erts_fix_free(ErtsAlcType_t t_no, void *extra, void* ptr)
-{
- Uint i;
- FixAlloc *f;
-
- ASSERT(ERTS_ALC_N_MIN_A_FIXED_SIZE <= t_no);
- ASSERT(t_no <= ERTS_ALC_N_MAX_A_FIXED_SIZE);
-
- i = FIX_IX(t_no);
- f = fa[i];
-
- FA_LOCK(f);
- *((void **) ptr) = f->freelist;
- f->freelist = ptr;
- f->no_free++;
- FA_UNLOCK(f);
-}
-
-
-void *erts_fix_realloc(ErtsAlcType_t t_no, void *extra, void* ptr, Uint size)
-{
- erts_alc_fatal_error(ERTS_ALC_E_NOTSUP, ERTS_ALC_O_REALLOC, t_no);
- return NULL;
-}
-
-void *erts_fix_alloc(ErtsAlcType_t t_no, void *extra, Uint size)
-{
- void *ret;
- int i;
- FixAlloc *f;
-
-#if defined(DEBUG) && !ERTS_ALC_MTA_FIXED_SIZE
- ASSERT(ERTS_ALC_N_MIN_A_FIXED_SIZE <= t_no);
- ASSERT(t_no <= ERTS_ALC_N_MAX_A_FIXED_SIZE);
- if (first_time) { /* Check that all sizes have been initialized */
- int i;
- for (i = 0; i < FA_SZ; i++)
- ASSERT(fa[i]);
- first_time = 0;
- }
-#endif
-
-
- i = FIX_IX(t_no);
- f = fa[i];
-
- ASSERT(f);
- ASSERT(f->item_size >= size);
-
- FA_LOCK(f);
- if (f->freelist == NULL) { /* Gotta alloc some more mem */
- char *ptr;
- FixAllocBlock *bl;
- Uint n;
-
-
- FA_UNLOCK(f);
- bl = (*core_alloc)(FIX_POOL_SZ(f->item_size));
- if (!bl)
- return NULL;
-
- FA_LOCK(f);
- bl->next = f->blocks; /* link in first */
- f->blocks = bl;
-
- n = NOPERBLOCK;
- ptr = (char *) &f->blocks->mem[0];
- while(n--) {
- *((void **) ptr) = f->freelist;
- f->freelist = (void *) ptr;
- ptr += f->item_size;
- }
-#if !ERTS_ALC_MTA_FIXED_SIZE
- ASSERT(f->no_free == 0);
-#endif
- f->no_free += NOPERBLOCK;
- f->no_blocks++;
- }
-
- ret = f->freelist;
- f->freelist = *((void **) f->freelist);
- ASSERT(f->no_free > 0);
- f->no_free--;
-
- FA_UNLOCK(f);
-
- return ret;
-}
-
-#endif /* #ifdef ERTS_ALC_N_MIN_A_FIXED_SIZE */
diff --git a/erts/emulator/beam/global.h b/erts/emulator/beam/global.h
index 249df54015..b247576f1c 100644
--- a/erts/emulator/beam/global.h
+++ b/erts/emulator/beam/global.h
@@ -37,16 +37,11 @@
#include "erl_process.h"
#include "erl_sys_driver.h"
#include "erl_debug.h"
+#include "error.h"
typedef struct port Port;
#include "erl_port_task.h"
-#define ERTS_MAX_NO_OF_ASYNC_THREADS 1024
-extern int erts_async_max_threads;
-#define ERTS_ASYNC_THREAD_MIN_STACK_SIZE 16 /* Kilo words */
-#define ERTS_ASYNC_THREAD_MAX_STACK_SIZE 8192 /* Kilo words */
-extern int erts_async_thread_suggested_stack_size;
-
typedef struct erts_driver_t_ erts_driver_t;
#define SMALL_IO_QUEUE 5 /* Number of fixed elements */
@@ -546,7 +541,7 @@ ERTS_GLB_INLINE void erts_may_save_closed_port(Port *prt)
tombstone = (Eterm*) erts_smp_atomic_add_read_nob(&erts_dead_ports_ptr,
-(erts_aint_t)sizeof(Eterm));
ASSERT(tombstone+1 != NULL);
- ASSERT(prt->snapshot == erts_smp_atomic_read_nob(&erts_ports_snapshot) - 1);
+ ASSERT(prt->snapshot == erts_smp_atomic32_read_nob(&erts_ports_snapshot) - 1);
*tombstone = prt->id;
}
/*else no ongoing snapshot or port was already included or created after snapshot */
@@ -560,7 +555,6 @@ extern Eterm node_cookie;
extern erts_smp_atomic_t erts_bytes_out; /* no bytes written out */
extern erts_smp_atomic_t erts_bytes_in; /* no bytes sent into the system */
extern Uint display_items; /* no of items to display in traces etc */
-extern Uint display_loads; /* print info about loaded modules */
extern int erts_backtrace_depth;
extern erts_smp_atomic32_t erts_max_gen_gcs;
@@ -851,18 +845,41 @@ void erts_queue_monitor_message(Process *,
Eterm,
Eterm);
void erts_init_bif(void);
+Eterm erl_send(Process *p, Eterm to, Eterm msg);
+
+/* erl_bif_op.c */
+
+Eterm erl_is_function(Process* p, Eterm arg1, Eterm arg2);
/* erl_bif_port.c */
/* erl_bif_trace.c */
+Eterm erl_seq_trace_info(Process *p, Eterm arg1);
void erts_system_monitor_clear(Process *c_p);
void erts_system_profile_clear(Process *c_p);
/* beam_load.c */
-int erts_load_module(Process *c_p, ErtsProcLocks c_p_locks,
- Eterm group_leader, Eterm* mod, byte* code, int size);
+typedef struct {
+ BeamInstr* current; /* Pointer to: Mod, Name, Arity */
+ Uint needed; /* Heap space needed for entire tuple */
+ Uint32 loc; /* Location in source code */
+ Eterm* fname_ptr; /* Pointer to fname table */
+} FunctionInfo;
+
+struct LoaderState* erts_alloc_loader_state(void);
+Eterm erts_prepare_loading(struct LoaderState*, Process *c_p,
+ Eterm group_leader, Eterm* modp,
+ byte* code, Uint size);
+Eterm erts_finish_loading(struct LoaderState* stp, Process* c_p,
+ ErtsProcLocks c_p_locks, Eterm* modp);
+Eterm erts_load_module(Process *c_p, ErtsProcLocks c_p_locks,
+ Eterm group_leader, Eterm* mod, byte* code, Uint size);
void init_load(void);
BeamInstr* find_function_from_pc(BeamInstr* pc);
+Eterm* erts_build_mfa_item(FunctionInfo* fi, Eterm* hp,
+ Eterm args, Eterm* mfa_p);
+void erts_lookup_function_info(FunctionInfo* fi, BeamInstr* pc, int full_info);
+void erts_set_current_function(FunctionInfo* fi, BeamInstr* current);
Eterm erts_module_info_0(Process* p, Eterm module);
Eterm erts_module_info_1(Process* p, Eterm module, Eterm what);
Eterm erts_make_stub_module(Process* p, Eterm Mod, Eterm Beam, Eterm Info);
@@ -1053,6 +1070,7 @@ void init_emulator(void);
void process_main(void);
Eterm build_stacktrace(Process* c_p, Eterm exc);
Eterm expand_error_value(Process* c_p, Uint freason, Eterm Value);
+void erts_save_stacktrace(Process* p, struct StackTrace* s, int depth);
/* erl_init.c */
@@ -1074,6 +1092,7 @@ extern ErtsModifiedTimings erts_modified_timings[];
#define ERTS_MODIFIED_TIMING_INPUT_REDS \
(erts_modified_timings[erts_modified_timing_level].input_reds)
+extern int erts_no_line_info;
extern Eterm erts_error_logger_warnings;
extern int erts_initialized;
extern int erts_compat_rel;
@@ -1107,7 +1126,9 @@ void erts_init_gc(void);
int erts_garbage_collect(Process*, int, Eterm*, int);
void erts_garbage_collect_hibernate(Process* p);
Eterm erts_gc_after_bif_call(Process* p, Eterm result, Eterm* regs, Uint arity);
-void erts_garbage_collect_literals(Process* p, Eterm* literals, Uint lit_size);
+void erts_garbage_collect_literals(Process* p, Eterm* literals,
+ Uint lit_size,
+ struct erl_off_heap_header* oh);
Uint erts_next_heap_size(Uint, Uint);
Eterm erts_heap_sizes(Process* p);
@@ -1627,8 +1648,7 @@ void monitor_generic(Process *p, Eterm type, Eterm spec);
Uint erts_trace_flag2bit(Eterm flag);
int erts_trace_flags(Eterm List,
Uint *pMask, Eterm *pTracer, int *pCpuTimestamp);
-Eterm erts_bif_trace(int bif_index, Process* p,
- Eterm arg1, Eterm arg2, Eterm arg3, BeamInstr *I);
+Eterm erts_bif_trace(int bif_index, Process* p, Eterm* args, BeamInstr *I);
#ifdef ERTS_SMP
void erts_send_pending_trace_msgs(ErtsSchedulerData *esdp);
diff --git a/erts/emulator/beam/io.c b/erts/emulator/beam/io.c
index 151c776a3d..fff720634d 100644
--- a/erts/emulator/beam/io.c
+++ b/erts/emulator/beam/io.c
@@ -42,6 +42,7 @@
#include "erl_bits.h"
#include "erl_version.h"
#include "error.h"
+#include "erl_async.h"
extern ErlDrvEntry fd_driver_entry;
extern ErlDrvEntry vanilla_driver_entry;
@@ -4579,7 +4580,10 @@ int driver_lock_driver(ErlDrvPort ix)
erts_smp_mtx_lock(&erts_driver_list_lock);
- if (prt == NULL) return -1;
+ if (prt == NULL) {
+ erts_smp_mtx_unlock(&erts_driver_list_lock);
+ return -1;
+ }
ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt));
if ((dh = (DE_Handle*)prt->drv_ptr->handle ) == NULL) {
diff --git a/erts/emulator/beam/module.c b/erts/emulator/beam/module.c
index 91e4ccce70..b93b1ad09a 100644
--- a/erts/emulator/beam/module.c
+++ b/erts/emulator/beam/module.c
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 1996-2010. All Rights Reserved.
+ * Copyright Ericsson AB 1996-2011. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
@@ -108,7 +108,8 @@ erts_put_module(Eterm mod)
int index;
ASSERT(is_atom(mod));
- ERTS_SMP_LC_ASSERT(erts_initialized == 0 || erts_smp_is_system_blocked(0));
+ ERTS_SMP_LC_ASSERT(erts_initialized == 0
+ || erts_smp_thr_progress_is_blocking());
e.module = atom_val(mod);
index = index_put(&module_table, (void*) &e);
return (Module*) erts_index_lookup(&module_table, index);
diff --git a/erts/emulator/beam/ops.tab b/erts/emulator/beam/ops.tab
index 8a5763b4bb..fc53a88a3a 100644
--- a/erts/emulator/beam/ops.tab
+++ b/erts/emulator/beam/ops.tab
@@ -25,30 +25,12 @@
# instruction transformations; thus, they never occur in BEAM files.
#
-# Special instruction used to generate an error message when
-# trying to load a module compiled by the V1 compiler (R5 & R6).
-# (Specially treated in beam_load.c.)
+# The too_old_compiler/0 instruction is specially handled in beam_load.c
+# to produce a user-friendly message informing the user that the module
+# needs to be re-compiled with a modern compiler.
too_old_compiler/0
-too_old_compiler
-
-#
-# Obsolete instruction usage follow. (Nowdays we use f with
-# a zero label instead of p.)
-#
-
-is_list p S => too_old_compiler
-is_nonempty_list p R => too_old_compiler
-is_nil p R => too_old_compiler
-
-is_tuple p S => too_old_compiler
-test_arity p S Arity => too_old_compiler
-
-is_integer p R => too_old_compiler
-is_float p R => too_old_compiler
-is_atom p R => too_old_compiler
-
-is_eq_exact p S1 S2 => too_old_compiler
+too_old_compiler | never() =>
# In R9C and earlier, the loader used to insert special instructions inside
# the module_info/0,1 functions. (In R10B and later, the compiler inserts
@@ -88,12 +70,42 @@ i_time_breakpoint
i_return_time_trace
i_return_to_trace
i_yield
-i_global_cons
-i_global_tuple
-i_global_copy
return
+#
+# To ensure that a "move Src x(0)" instruction can be combined
+# with the following call instruction, we need to make sure that
+# there is no line/1 instruction between the move and the call.
+#
+
+move S r | line Loc | call_ext Ar Func => \
+ line Loc | move S r | call_ext Ar Func
+move S r | line Loc | call_ext_last Ar Func=u$is_bif D => \
+ line Loc | move S r | call_ext_last Ar Func D
+move S r | line Loc | call_ext_only Ar Func=u$is_bif => \
+ line Loc | move S r | call_ext_only Ar Func
+move S r | line Loc | call Ar Func => \
+ line Loc | move S r | call Ar Func
+
+#
+# A tail-recursive call to an external function (non-BIF) will
+# never be saved on the stack, so there is no reason to keep
+# the line instruction. (The compiler did not remove the line
+# instruction because it cannot tell the difference between
+# BIFs and ordinary Erlang functions.)
+#
+
+line Loc | call_ext_last Ar Func=u$is_not_bif D => \
+ call_ext_last Ar Func D
+line Loc | call_ext_only Ar Func=u$is_not_bif => \
+ call_ext_only Ar Func
+
+line Loc | func_info M F A => func_info M F A | line Loc
+
+line I
+
+
%macro: allocate Allocate -pack
%macro: allocate_zero AllocateZero -pack
%macro: allocate_heap AllocateHeap -pack
@@ -277,8 +289,6 @@ raise s s
badarg j
system_limit j
-move R R =>
-
move C=cxy r | jump Lbl => move_jump Lbl C
%macro: move_jump MoveJump -nonext
@@ -585,8 +595,6 @@ get_tuple_element Reg P Dst => i_get_tuple_element Reg P Dst | original_reg Reg
original_reg Reg Pos =>
-get_tuple_element Reg P Dst => i_get_tuple_element Reg P Dst
-
original_reg/2
extract_next_element D1=xy | original_reg Reg P1 | get_tuple_element Reg P2 D2=xy | \
@@ -837,11 +845,11 @@ call_ext_only u==3 u$func:erlang:apply/3 => i_apply_only
# thus there is no need to generate any return instruction.
#
-call_ext_last u==1 Bif=u$bif:erlang:exit/1 D => call_bif1 Bif
-call_ext_last u==1 Bif=u$bif:erlang:throw/1 D => call_bif1 Bif
+call_ext_last u==1 Bif=u$bif:erlang:exit/1 D => call_bif Bif
+call_ext_last u==1 Bif=u$bif:erlang:throw/1 D => call_bif Bif
-call_ext_only u==1 Bif=u$bif:erlang:exit/1 => call_bif1 Bif
-call_ext_only u==1 Bif=u$bif:erlang:throw/1 => call_bif1 Bif
+call_ext_only u==1 Bif=u$bif:erlang:exit/1 => call_bif Bif
+call_ext_only u==1 Bif=u$bif:erlang:throw/1 => call_bif Bif
#
# The error/1 and error/2 BIFs never execute the instruction following them;
@@ -851,13 +859,13 @@ call_ext_only u==1 Bif=u$bif:erlang:throw/1 => call_bif1 Bif
# the continuation pointer on the stack.
#
-call_ext_last u==1 Bif=u$bif:erlang:error/1 D => call_bif1 Bif
-call_ext_last u==2 Bif=u$bif:erlang:error/2 D => call_bif2 Bif
+call_ext_last u==1 Bif=u$bif:erlang:error/1 D => call_bif Bif
+call_ext_last u==2 Bif=u$bif:erlang:error/2 D => call_bif Bif
call_ext_only Ar=u==1 Bif=u$bif:erlang:error/1 => \
- allocate u Ar | call_bif1 Bif
+ allocate u Ar | call_bif Bif
call_ext_only Ar=u==2 Bif=u$bif:erlang:error/2 => \
- allocate u Ar | call_bif2 Bif
+ allocate u Ar | call_bif Bif
#
# The yield/0 BIF is an instruction
@@ -875,47 +883,18 @@ call_ext_last u==3 u$func:erlang:hibernate/3 D => i_hibernate
call_ext_only u==3 u$func:erlang:hibernate/3 => i_hibernate
#
-# Hybrid memory architecture need special cons and tuple instructions
-# that allocate on the message area. These looks like BIFs in the BEAM code.
-#
-
-call_ext u==2 u$func:hybrid:cons/2 => i_global_cons
-call_ext_last u==2 u$func:hybrid:cons/2 D => i_global_cons | deallocate_return D
-call_ext_only Ar=u==2 u$func:hybrid:cons/2 => i_global_cons | return
-
-call_ext u==1 u$func:hybrid:tuple/1 => i_global_tuple
-call_ext_last u==1 u$func:hybrid:tuple/1 D => i_global_tuple | deallocate_return D
-call_ext_only Ar=u==1 u$func:hybrid:tuple/1 => i_global_tuple | return
-
-call_ext u==1 u$func:hybrid:copy/1 => i_global_copy
-call_ext_last u==1 u$func:hybrid:copy/1 D => i_global_copy | deallocate_return D
-call_ext_only u==1 Ar=u$func:hybrid:copy/1 => i_global_copy | return
-
-#
# The general case for BIFs that have no special instructions.
# A BIF used in the tail must be followed by a return instruction.
#
# To make trapping and stack backtraces work correctly, we make sure that
# the continuation pointer is always stored on the stack.
-call_ext u==0 Bif=u$is_bif => call_bif0 Bif
-call_ext u==1 Bif=u$is_bif => call_bif1 Bif
-call_ext u==2 Bif=u$is_bif => call_bif2 Bif
-call_ext u==3 Bif=$is_bif => call_bif3 Bif
+call_ext u Bif=u$is_bif => call_bif Bif
-call_ext_last u==0 Bif=u$is_bif D => call_bif0 Bif | deallocate_return D
-call_ext_last u==1 Bif=u$is_bif D => call_bif1 Bif | deallocate_return D
-call_ext_last u==2 Bif=u$is_bif D => call_bif2 Bif | deallocate_return D
-call_ext_last u==3 Bif=u$is_bif D => call_bif3 Bif | deallocate_return D
+call_ext_last u Bif=u$is_bif D => call_bif Bif | deallocate_return D
-call_ext_only Ar=u==0 Bif=u$is_bif => \
- allocate u Ar | call_bif0 Bif | deallocate_return u
-call_ext_only Ar=u==1 Bif=u$is_bif => \
- allocate u Ar | call_bif1 Bif | deallocate_return u
-call_ext_only Ar=u==2 Bif=u$is_bif => \
- allocate u Ar | call_bif2 Bif | deallocate_return u
-call_ext_only Ar=u==3 Bif=u$is_bif => \
- allocate u Ar | call_bif3 Bif | deallocate_return u
+call_ext_only Ar=u Bif=u$is_bif => \
+ allocate u Ar | call_bif Bif | deallocate_return u
#
# Any remaining calls are calls to Erlang functions, not BIFs.
@@ -928,9 +907,9 @@ move S=c r | call_ext Ar=u Func=u$is_not_bif => i_move_call_ext S r Func
move S=c r | call_ext_last Ar=u Func=u$is_not_bif D => i_move_call_ext_last Func D S r
move S=c r | call_ext_only Ar=u Func=u$is_not_bif => i_move_call_ext_only Func S r
-call_ext Ar=u Func => i_call_ext Func
-call_ext_last Ar=u Func D => i_call_ext_last Func D
-call_ext_only Ar=u Func => i_call_ext_only Func
+call_ext Ar Func => i_call_ext Func
+call_ext_last Ar Func D => i_call_ext_last Func D
+call_ext_only Ar Func => i_call_ext_only Func
i_apply
i_apply_last P
@@ -942,10 +921,7 @@ i_apply_fun_only
i_hibernate
-call_bif0 e
-call_bif1 e
-call_bif2 e
-call_bif3 e
+call_bif e
#
# Calls to non-building and guard BIFs.
@@ -964,7 +940,7 @@ bif1 p Bif S1 Dst => bif1_body Bif S1 Dst
bif1_body Bif Literal=q Dst => move Literal x | bif1_body Bif x Dst
bif2 p Bif S1 S2 Dst => i_fetch S1 S2 | i_bif2_body Bif Dst
-bif2 Fail=f Bif S1 S2 Dst => i_fetch S1 S2 | i_bif2 Fail Bif Dst
+bif2 Fail Bif S1 S2 Dst => i_fetch S1 S2 | i_bif2 Fail Bif Dst
i_get s d
@@ -1047,8 +1023,8 @@ i_move_call_ext_only e c r
# Fun calls.
-call_fun Arity=u | deallocate D | return => i_call_fun_last Arity D
-call_fun Arity=u => i_call_fun Arity
+call_fun Arity | deallocate D | return => i_call_fun_last Arity D
+call_fun Arity => i_call_fun Arity
i_call_fun I
i_call_fun_last I P
@@ -1236,7 +1212,7 @@ i_bs_init_heap I I I d
i_bs_init_heap_bin_heap I I I d
-bs_init_bits Fail Sz Words Regs Flags Dst | binary_too_big_bits(Sz) => system_limit Fail
+bs_init_bits Fail Sz=o Words Regs Flags Dst => system_limit Fail
bs_init_bits Fail Sz=u Words=u==0 Regs Flags Dst => i_bs_init_bits Sz Regs Dst
bs_init_bits Fail Sz=u Words Regs Flags Dst => i_bs_init_bits_heap Sz Words Regs Dst
@@ -1304,13 +1280,13 @@ i_bs_utf16_size s d
bs_put_utf8 Fail=j Flags=u Literal=q => \
move Literal x | bs_put_utf8 Fail Flags x
-bs_put_utf8 Fail=j u Src=s => i_bs_put_utf8 Fail Src
+bs_put_utf8 Fail u Src=s => i_bs_put_utf8 Fail Src
i_bs_put_utf8 j s
bs_put_utf16 Fail=j Flags=u Literal=q => \
move Literal x | bs_put_utf16 Fail Flags x
-bs_put_utf16 Fail=j Flags=u Src=s => i_bs_put_utf16 Fail Flags Src
+bs_put_utf16 Fail Flags=u Src=s => i_bs_put_utf16 Fail Flags Src
i_bs_put_utf16 j I s
@@ -1475,34 +1451,13 @@ bif1 Fail u$bif:erlang:trunc/1 s d => too_old_compiler
#
# Guard BIFs.
#
-gc_bif1 Fail I Bif=u$bif:erlang:length/1 Src Dst=d => \
- gen_guard_bif1(Fail, I, Bif, Src, Dst)
-
-gc_bif1 Fail I Bif=u$bif:erlang:size/1 Src Dst=d => \
- gen_guard_bif1(Fail, I, Bif, Src, Dst)
-
-gc_bif1 Fail I Bif=u$bif:erlang:bit_size/1 Src Dst=d => \
- gen_guard_bif1(Fail, I, Bif, Src, Dst)
-
-gc_bif1 Fail I Bif=u$bif:erlang:byte_size/1 Src Dst=d => \
- gen_guard_bif1(Fail, I, Bif, Src, Dst)
-
-gc_bif1 Fail I Bif=u$bif:erlang:abs/1 Src Dst=d => \
- gen_guard_bif1(Fail, I, Bif, Src, Dst)
-
-gc_bif1 Fail I Bif=u$bif:erlang:float/1 Src Dst=d => \
+gc_bif1 Fail I Bif Src Dst => \
gen_guard_bif1(Fail, I, Bif, Src, Dst)
-gc_bif1 Fail I Bif=u$bif:erlang:round/1 Src Dst=d => \
- gen_guard_bif1(Fail, I, Bif, Src, Dst)
-
-gc_bif1 Fail I Bif=u$bif:erlang:trunc/1 Src Dst=d => \
- gen_guard_bif1(Fail, I, Bif, Src, Dst)
-
-gc_bif2 Fail I Bif=u$bif:erlang:binary_part/2 S1 S2 Dst=d => \
+gc_bif2 Fail I Bif S1 S2 Dst => \
gen_guard_bif2(Fail, I, Bif, S1, S2, Dst)
-gc_bif3 Fail I Bif=u$bif:erlang:binary_part/3 S1 S2 S3 Dst=d => \
+gc_bif3 Fail I Bif S1 S2 S3 Dst => \
gen_guard_bif3(Fail, I, Bif, S1, S2, S3, Dst)
i_gc_bif1 Fail Bif V=q Live D => move V x | i_gc_bif1 Fail Bif x Live D
@@ -1520,6 +1475,15 @@ ii_gc_bif3/7
ii_gc_bif3 Fail Bif S1 S2 S3 Live D => move S1 x | i_fetch S2 S3 | i_gc_bif3 Fail Bif x Live D
i_gc_bif3 j I s I d
+
+#
+# The following instruction is specially handled in beam_load.c
+# to produce a user-friendly message if an unsupported guard BIF is
+# encountered.
+#
+unsupported_guard_bif/3
+unsupported_guard_bif A B C | never() =>
+
#
# R13B03
#
diff --git a/erts/emulator/beam/sys.h b/erts/emulator/beam/sys.h
index 669a601b35..f9cbcc5892 100644
--- a/erts/emulator/beam/sys.h
+++ b/erts/emulator/beam/sys.h
@@ -475,15 +475,6 @@ __decl_noreturn void __noreturn erl_exit(int n, char*, ...);
#define ERTS_ABORT_EXIT (INT_MIN + 1) /* no crash dump; only abort() */
#define ERTS_DUMP_EXIT (127) /* crash dump; then exit() */
-
-#ifndef ERTS_SMP
-int check_async_ready(void);
-#ifdef USE_THREADS
-void sys_async_ready(int hndl);
-int erts_register_async_ready_callback(void (*funcp)(void));
-#endif
-#endif
-
Eterm erts_check_io_info(void *p);
/* Size of misc memory allocated from system dependent code */
@@ -616,13 +607,10 @@ extern char *erts_sys_ddll_error(int code);
* System interfaces for startup.
*/
-
-#ifdef ERTS_SMP
void erts_sys_schedule_interrupt(int set);
+#ifdef ERTS_SMP
void erts_sys_schedule_interrupt_timed(int set, long msec);
void erts_sys_main_thread(void);
-#else
-#define erts_sys_schedule_interrupt(Set)
#endif
extern void erts_sys_prepare_crash_dump(void);
@@ -674,6 +662,8 @@ int erts_sys_putenv(char *key_value, int sep_ix);
*size), a value > 0 if value buffer is too small (*size is set to needed
size), and a value < 0 on failure. */
int erts_sys_getenv(char *key, char *value, size_t *size);
+/* erts_sys_getenv__() is only allowed to be used in early init phase */
+int erts_sys_getenv__(char *key, char *value, size_t *size);
/* Easier to use, but not as efficient, environment functions */
char *erts_read_env(char *key);
@@ -697,291 +687,14 @@ int erts_write_env(char *key, char *value);
int sys_alloc_opt(int, int);
typedef struct {
- Sint trim_threshold;
- Sint top_pad;
- Sint mmap_threshold;
- Sint mmap_max;
+ int trim_threshold;
+ int top_pad;
+ int mmap_threshold;
+ int mmap_max;
} SysAllocStat;
void sys_alloc_stat(SysAllocStat *);
-/* Block the whole system... */
-
-#define ERTS_BS_FLG_ALLOW_GC (((Uint32) 1) << 0)
-#define ERTS_BS_FLG_ALLOW_IO (((Uint32) 1) << 1)
-
-/* Activities... */
-typedef enum {
- ERTS_ACTIVITY_UNDEFINED, /* Undefined activity */
- ERTS_ACTIVITY_WAIT, /* Waiting */
- ERTS_ACTIVITY_GC, /* Garbage collecting */
- ERTS_ACTIVITY_IO /* I/O including message passing to erl procs */
-} erts_activity_t;
-
-#ifdef ERTS_SMP
-
-typedef enum {
- ERTS_ACT_ERR_LEAVE_WAIT_UNLOCKED,
- ERTS_ACT_ERR_LEAVE_UNKNOWN_ACTIVITY,
- ERTS_ACT_ERR_ENTER_UNKNOWN_ACTIVITY
-} erts_activity_error_t;
-
-typedef struct {
- erts_smp_atomic32_t do_block;
- struct {
- erts_smp_atomic32_t wait;
- erts_smp_atomic32_t gc;
- erts_smp_atomic32_t io;
- } in_activity;
-} erts_system_block_state_t;
-
-extern erts_system_block_state_t erts_system_block_state;
-
-int erts_is_system_blocked(erts_activity_t allowed_activities);
-void erts_block_me(void (*prepare)(void *), void (*resume)(void *), void *arg);
-void erts_register_blockable_thread(void);
-void erts_unregister_blockable_thread(void);
-void erts_note_activity_begin(erts_activity_t activity);
-void
-erts_check_block(erts_activity_t old_activity,
- erts_activity_t new_activity,
- int locked,
- void (*prepare)(void *),
- void (*resume)(void *),
- void *arg);
-void erts_block_system(Uint32 allowed_activities);
-int erts_emergency_block_system(long timeout, Uint32 allowed_activities);
-void erts_release_system(void);
-void erts_system_block_init(void);
-void erts_set_activity_error(erts_activity_error_t, char *, int);
-#ifdef ERTS_ENABLE_LOCK_CHECK
-void erts_lc_activity_change_begin(void);
-void erts_lc_activity_change_end(void);
-int erts_lc_is_blocking(void);
-#define ERTS_LC_IS_BLOCKING \
- (erts_smp_pending_system_block() && erts_lc_is_blocking())
-#endif
-#endif
-
-#define erts_smp_activity_begin(NACT, PRP, RSM, ARG) \
- erts_smp_set_activity(ERTS_ACTIVITY_UNDEFINED, \
- (NACT), \
- 0, \
- (PRP), \
- (RSM), \
- (ARG), \
- __FILE__, \
- __LINE__)
-#define erts_smp_activity_change(OACT, NACT, PRP, RSM, ARG) \
- erts_smp_set_activity((OACT), \
- (NACT), \
- 0, \
- (PRP), \
- (RSM), \
- (ARG), \
- __FILE__, \
- __LINE__)
-#define erts_smp_activity_end(OACT, PRP, RSM, ARG) \
- erts_smp_set_activity((OACT), \
- ERTS_ACTIVITY_UNDEFINED, \
- 0, \
- (PRP), \
- (RSM), \
- (ARG), \
- __FILE__, \
- __LINE__)
-
-#define erts_smp_locked_activity_begin(NACT) \
- erts_smp_set_activity(ERTS_ACTIVITY_UNDEFINED, \
- (NACT), \
- 1, \
- NULL, \
- NULL, \
- NULL, \
- __FILE__, \
- __LINE__)
-#define erts_smp_locked_activity_change(OACT, NACT) \
- erts_smp_set_activity((OACT), \
- (NACT), \
- 1, \
- NULL, \
- NULL, \
- NULL, \
- __FILE__, \
- __LINE__)
-#define erts_smp_locked_activity_end(OACT) \
- erts_smp_set_activity((OACT), \
- ERTS_ACTIVITY_UNDEFINED, \
- 1, \
- NULL, \
- NULL, \
- NULL, \
- __FILE__, \
- __LINE__)
-
-
-ERTS_GLB_INLINE int erts_smp_is_system_blocked(erts_activity_t allowed_activities);
-ERTS_GLB_INLINE void erts_smp_block_system(Uint32 allowed_activities);
-ERTS_GLB_INLINE int erts_smp_emergency_block_system(long timeout,
- Uint32 allowed_activities);
-ERTS_GLB_INLINE void erts_smp_release_system(void);
-ERTS_GLB_INLINE int erts_smp_pending_system_block(void);
-ERTS_GLB_INLINE void erts_smp_chk_system_block(void (*prepare)(void *),
- void (*resume)(void *),
- void *arg);
-ERTS_GLB_INLINE void
-erts_smp_set_activity(erts_activity_t old_activity,
- erts_activity_t new_activity,
- int locked,
- void (*prepare)(void *),
- void (*resume)(void *),
- void *arg,
- char *file,
- int line);
-
-#if ERTS_GLB_INLINE_INCL_FUNC_DEF
-
-
-ERTS_GLB_INLINE int
-erts_smp_is_system_blocked(erts_activity_t allowed_activities)
-{
-#ifdef ERTS_SMP
- return erts_is_system_blocked(allowed_activities);
-#else
- return 1;
-#endif
-}
-
-ERTS_GLB_INLINE void
-erts_smp_block_system(Uint32 allowed_activities)
-{
-#ifdef ERTS_SMP
- erts_block_system(allowed_activities);
-#endif
-}
-
-ERTS_GLB_INLINE int
-erts_smp_emergency_block_system(long timeout, Uint32 allowed_activities)
-{
-#ifdef ERTS_SMP
- return erts_emergency_block_system(timeout, allowed_activities);
-#else
- return 0;
-#endif
-}
-
-ERTS_GLB_INLINE void
-erts_smp_release_system(void)
-{
-#ifdef ERTS_SMP
- erts_release_system();
-#endif
-}
-
-ERTS_GLB_INLINE int
-erts_smp_pending_system_block(void)
-{
-#ifdef ERTS_SMP
- return (int) erts_smp_atomic32_read_nob(&erts_system_block_state.do_block);
-#else
- return 0;
-#endif
-}
-
-
-ERTS_GLB_INLINE void
-erts_smp_chk_system_block(void (*prepare)(void *),
- void (*resume)(void *),
- void *arg)
-{
-#ifdef ERTS_SMP
- if (erts_smp_pending_system_block())
- erts_block_me(prepare, resume, arg);
-#endif
-}
-
-ERTS_GLB_INLINE void
-erts_smp_set_activity(erts_activity_t old_activity,
- erts_activity_t new_activity,
- int locked,
- void (*prepare)(void *),
- void (*resume)(void *),
- void *arg,
- char *file,
- int line)
-{
-#ifdef ERTS_SMP
-#ifdef ERTS_ENABLE_LOCK_CHECK
- erts_lc_activity_change_begin();
-#endif
- switch (old_activity) {
- case ERTS_ACTIVITY_UNDEFINED:
- break;
- case ERTS_ACTIVITY_WAIT:
- erts_smp_atomic32_dec_acqb(&erts_system_block_state.in_activity.wait);
- if (locked) {
- /* You are not allowed to leave activity waiting
- * without supplying the possibility to block
- * unlocked.
- */
- erts_set_activity_error(ERTS_ACT_ERR_LEAVE_WAIT_UNLOCKED,
- file, line);
- }
- break;
- case ERTS_ACTIVITY_GC:
- erts_smp_atomic32_dec_acqb(&erts_system_block_state.in_activity.gc);
- break;
- case ERTS_ACTIVITY_IO:
- erts_smp_atomic32_dec_acqb(&erts_system_block_state.in_activity.io);
- break;
- default:
- erts_set_activity_error(ERTS_ACT_ERR_LEAVE_UNKNOWN_ACTIVITY,
- file, line);
- break;
- }
-
- /* We are not allowed to block when going to activity waiting... */
- if (new_activity != ERTS_ACTIVITY_WAIT && erts_smp_pending_system_block())
- erts_check_block(old_activity,new_activity,locked,prepare,resume,arg);
-
- switch (new_activity) {
- case ERTS_ACTIVITY_UNDEFINED:
- break;
- case ERTS_ACTIVITY_WAIT:
- erts_smp_atomic32_inc_mb(&erts_system_block_state.in_activity.wait);
- break;
- case ERTS_ACTIVITY_GC:
- erts_smp_atomic32_inc_mb(&erts_system_block_state.in_activity.gc);
- break;
- case ERTS_ACTIVITY_IO:
- erts_smp_atomic32_inc_mb(&erts_system_block_state.in_activity.io);
- break;
- default:
- erts_set_activity_error(ERTS_ACT_ERR_ENTER_UNKNOWN_ACTIVITY,
- file, line);
- break;
- }
-
- switch (new_activity) {
- case ERTS_ACTIVITY_WAIT:
- case ERTS_ACTIVITY_GC:
- case ERTS_ACTIVITY_IO:
- if (erts_smp_pending_system_block())
- erts_note_activity_begin(new_activity);
- break;
- default:
- break;
- }
-
-#ifdef ERTS_ENABLE_LOCK_CHECK
- erts_lc_activity_change_end();
-#endif
-
-#endif
-}
-
-#endif /* #if ERTS_GLB_INLINE_INCL_FUNC_DEF */
-
#if defined(DEBUG) || defined(ERTS_ENABLE_LOCK_CHECK)
#undef ERTS_REFC_DEBUG
#define ERTS_REFC_DEBUG
diff --git a/erts/emulator/beam/time.c b/erts/emulator/beam/time.c
index 8fa8c1cfe0..db9a24e0a3 100644
--- a/erts/emulator/beam/time.c
+++ b/erts/emulator/beam/time.c
@@ -444,7 +444,7 @@ erts_time_left(ErlTimer *p)
}
#ifdef DEBUG
-void erts_p_slpq()
+void erts_p_slpq(void)
{
int i;
ErlTimer* p;
diff --git a/erts/emulator/beam/utils.c b/erts/emulator/beam/utils.c
index 3f6accba2d..1bd178f280 100644
--- a/erts/emulator/beam/utils.c
+++ b/erts/emulator/beam/utils.c
@@ -42,6 +42,9 @@
#include "erl_threads.h"
#include "erl_smp.h"
#include "erl_time.h"
+#include "erl_thr_progress.h"
+#include "erl_thr_queue.h"
+#include "erl_sched_spec_pre_alloc.h"
#undef M_TRIM_THRESHOLD
#undef M_TOP_PAD
@@ -75,6 +78,7 @@ typedef struct {
#ifdef ERTS_SMP
+#if 0 /* Unused */
static void
dispatch_profile_msg_q(profile_sched_msg_q *psmq)
{
@@ -86,6 +90,7 @@ dispatch_profile_msg_q(profile_sched_msg_q *psmq)
profile_scheduler_q(make_small(msg->scheduler_id), msg->state, am_undefined, msg->Ms, msg->s, msg->us);
}
}
+#endif
#endif
@@ -2642,7 +2647,7 @@ tailrecur_ne:
FloatDef f1, f2;
Eterm big;
#if HEAP_ON_C_STACK
- Eterm big_buf[2]; /* If HEAP_ON_C_STACK */
+ Eterm big_buf[32]; /* If HEAP_ON_C_STACK */
#else
Eterm *big_buf = erts_get_scheduler_data()->cmp_tmp_heap;
#endif
@@ -2653,41 +2658,108 @@ tailrecur_ne:
Eterm aw = a;
Eterm bw = b;
#endif
+#define MAX_LOSSLESS_FLOAT ((double)((1LL << 53) - 2))
+#define MIN_LOSSLESS_FLOAT ((double)(((1LL << 53) - 2)*-1))
b_tag = tag_val_def(bw);
switch(_NUMBER_CODE(a_tag, b_tag)) {
case SMALL_BIG:
- big = small_to_big(signed_val(a), big_buf);
- j = big_comp(big, bw);
+ j = big_sign(bw) ? 1 : -1;
+ break;
+ case BIG_SMALL:
+ j = big_sign(aw) ? -1 : 1;
break;
case SMALL_FLOAT:
- f1.fd = signed_val(a);
GET_DOUBLE(bw, f2);
- j = float_comp(f1.fd, f2.fd);
- break;
- case BIG_SMALL:
- big = small_to_big(signed_val(b), big_buf);
- j = big_comp(aw, big);
+ if (f2.fd < MAX_LOSSLESS_FLOAT && f2.fd > MIN_LOSSLESS_FLOAT) {
+ // Float is within the no loss limit
+ f1.fd = signed_val(aw);
+ j = float_comp(f1.fd, f2.fd);
+#if ERTS_SIZEOF_ETERM == 8
+ } else if (f2.fd > (double) (MAX_SMALL + 1)) {
+ // Float is a positive bignum, i.e. bigger
+ j = -1;
+ } else if (f2.fd < (double) (MIN_SMALL - 1)) {
+ // Float is a negative bignum, i.e. smaller
+ j = 1;
+ } else { // Float is a Sint but less precise
+ j = signed_val(aw) - (Sint) f2.fd;
+ }
+#else
+ } else {
+ // If float is positive it is bigger than small
+ j = (f2.fd > 0.0) ? -1 : 1;
+ }
+#endif // ERTS_SIZEOF_ETERM == 8
break;
case BIG_FLOAT:
- if (big_to_double(aw, &f1.fd) < 0) {
- j = big_sign(a) ? -1 : 1;
+ GET_DOUBLE(bw, f2);
+ if ((f2.fd < (double) (MAX_SMALL + 1))
+ && (f2.fd > (double) (MIN_SMALL - 1))) {
+ // Float is a Sint
+ j = big_sign(aw) ? -1 : 1;
+ } else if ((pow(2.0,(big_arity(aw)-1.0)*D_EXP)-1.0) > fabs(f2.fd)) {
+ // If bignum size shows that it is bigger than the abs float
+ j = big_sign(aw) ? -1 : 1;
+ } else if ((pow(2.0,(big_arity(aw))*D_EXP)-1.0) < fabs(f2.fd)) {
+ // If bignum size shows that it is smaller than the abs float
+ j = f2.fd < 0 ? 1 : -1;
+ } else if (f2.fd < MAX_LOSSLESS_FLOAT && f2.fd > MIN_LOSSLESS_FLOAT) {
+ // Float is within the no loss limit
+ if (big_to_double(aw, &f1.fd) < 0) {
+ j = big_sign(aw) ? -1 : 1;
+ } else {
+ j = float_comp(f1.fd, f2.fd);
+ }
} else {
- GET_DOUBLE(bw, f2);
- j = float_comp(f1.fd, f2.fd);
+ big = double_to_big(f2.fd, big_buf);
+ j = big_comp(aw, big);
}
break;
case FLOAT_SMALL:
GET_DOUBLE(aw, f1);
- f2.fd = signed_val(b);
- j = float_comp(f1.fd, f2.fd);
+ if (f1.fd < MAX_LOSSLESS_FLOAT && f1.fd > MIN_LOSSLESS_FLOAT) {
+ // Float is within the no loss limit
+ f2.fd = signed_val(bw);
+ j = float_comp(f1.fd, f2.fd);
+#if ERTS_SIZEOF_ETERM == 8
+ } else if (f1.fd > (double) (MAX_SMALL + 1)) {
+ // Float is a positive bignum, i.e. bigger
+ j = 1;
+ } else if (f1.fd < (double) (MIN_SMALL - 1)) {
+ // Float is a negative bignum, i.e. smaller
+ j = -1;
+ } else { // Float is a Sint but less precise it
+ j = (Sint) f1.fd - signed_val(bw);
+ }
+#else
+ } else {
+ // If float is positive it is bigger than small
+ j = (f1.fd > 0.0) ? 1 : -1;
+ }
+#endif // ERTS_SIZEOF_ETERM == 8
break;
case FLOAT_BIG:
- if (big_to_double(bw, &f2.fd) < 0) {
- j = big_sign(b) ? 1 : -1;
+ GET_DOUBLE(aw, f1);
+ if ((f1.fd < (double) (MAX_SMALL + 1))
+ && (f1.fd > (double) (MIN_SMALL - 1))) { // Float is a Sint
+ j = big_sign(bw) ? 1 : -1;
+ } else if ((pow(2.0, (big_arity(bw) - 1.0) * D_EXP) - 1.0) > fabs(f1.fd)) {
+ // If bignum size shows that it is bigger than the abs float
+ j = big_sign(bw) ? 1 : -1;
+ } else if ((pow(2.0,(big_arity(bw))*D_EXP)-1.0) < fabs(f1.fd)) {
+ // If bignum size shows that it is smaller than the abs float
+ j = f1.fd < 0 ? -1 : 1;
+ } else if (f1.fd < MAX_LOSSLESS_FLOAT && f1.fd > MIN_LOSSLESS_FLOAT) {
+ // Float is within the no loss limit
+ if (big_to_double(bw, &f2.fd) < 0) {
+ j = big_sign(bw) ? 1 : -1;
+ } else {
+ j = float_comp(f1.fd, f2.fd);
+ }
} else {
- GET_DOUBLE(aw, f1);
- j = float_comp(f1.fd, f2.fd);
+ big = double_to_big(f1.fd, big_buf);
+ j = big_comp(big, bw);
}
break;
default:
@@ -3250,10 +3322,10 @@ erts_cancel_smp_ptimer(ErtsSmpPTimer *ptimer)
#endif
-static Sint trim_threshold;
-static Sint top_pad;
-static Sint mmap_threshold;
-static Sint mmap_max;
+static int trim_threshold;
+static int top_pad;
+static int mmap_threshold;
+static int mmap_max;
Uint tot_bin_allocated;
@@ -3276,8 +3348,8 @@ int
sys_alloc_opt(int opt, int value)
{
#if HAVE_MALLOPT
- Sint m_opt;
- Sint *curr_val;
+ int m_opt;
+ int *curr_val;
switch(opt) {
case SYS_ALLOC_OPT_TRIM_THRESHOLD:
@@ -3317,7 +3389,7 @@ sys_alloc_opt(int opt, int value)
}
if(mallopt(m_opt, value)) {
- *curr_val = (Sint) value;
+ *curr_val = value;
return 1;
}
@@ -3336,688 +3408,6 @@ sys_alloc_stat(SysAllocStat *sasp)
}
-#ifdef ERTS_SMP
-
-/* Local system block state */
-
-struct {
- int emergency;
- long emergency_timeout;
- erts_smp_cnd_t watchdog_cnd;
- erts_smp_tid_t watchdog_tid;
- int threads_to_block;
- int have_blocker;
- erts_smp_tid_t blocker_tid;
- int recursive_block;
- Uint32 allowed_activities;
- erts_smp_tsd_key_t blockable_key;
- erts_smp_mtx_t mtx;
- erts_smp_cnd_t cnd;
-#ifdef ERTS_ENABLE_LOCK_CHECK
- int activity_changing;
- int checking;
-#endif
-} system_block_state;
-
-/* Global system block state */
-erts_system_block_state_t erts_system_block_state;
-
-
-static ERTS_INLINE int
-is_blockable_thread(void)
-{
- return erts_smp_tsd_get(system_block_state.blockable_key) != NULL;
-}
-
-static ERTS_INLINE int
-is_blocker(void)
-{
- return (system_block_state.have_blocker
- && erts_smp_equal_tids(system_block_state.blocker_tid,
- erts_smp_thr_self()));
-}
-
-#ifdef ERTS_ENABLE_LOCK_CHECK
-int
-erts_lc_is_blocking(void)
-{
- int res;
- erts_smp_mtx_lock(&system_block_state.mtx);
- res = erts_smp_pending_system_block() && is_blocker();
- erts_smp_mtx_unlock(&system_block_state.mtx);
- return res;
-}
-#endif
-
-static ERTS_INLINE void
-block_me(void (*prepare)(void *),
- void (*resume)(void *),
- void *arg,
- int mtx_locked,
- int want_to_block,
- int update_act_changing,
- profile_sched_msg_q *psmq)
-{
- if (prepare)
- (*prepare)(arg);
-
- /* Locks might be held... */
-
- if (!mtx_locked)
- erts_smp_mtx_lock(&system_block_state.mtx);
-
- if (erts_smp_pending_system_block() && !is_blocker()) {
- int is_blockable = is_blockable_thread();
- ASSERT(is_blockable);
-
- if (is_blockable)
- system_block_state.threads_to_block--;
-
- if (erts_system_profile_flags.scheduler && psmq) {
- ErtsSchedulerData *esdp = erts_get_scheduler_data();
- if (esdp) {
- profile_sched_msg *msg = NULL;
-
- ASSERT(psmq->n < 2);
- msg = &((psmq->msg)[psmq->n]);
- msg->scheduler_id = esdp->no;
- get_now(&(msg->Ms), &(msg->s), &(msg->us));
- msg->no_schedulers = 0;
- msg->state = am_inactive;
- psmq->n++;
- }
- }
-
-#ifdef ERTS_ENABLE_LOCK_CHECK
- if (update_act_changing)
- system_block_state.activity_changing--;
-#endif
-
- erts_smp_cnd_broadcast(&system_block_state.cnd);
-
- do {
- erts_smp_cnd_wait(&system_block_state.cnd, &system_block_state.mtx);
- } while (erts_smp_pending_system_block()
- && !(want_to_block && !system_block_state.have_blocker));
-
-#ifdef ERTS_ENABLE_LOCK_CHECK
- if (update_act_changing)
- system_block_state.activity_changing++;
-#endif
- if (erts_system_profile_flags.scheduler && psmq) {
- ErtsSchedulerData *esdp = erts_get_scheduler_data();
- if (esdp) {
- profile_sched_msg *msg = NULL;
-
- ASSERT(psmq->n < 2);
- msg = &((psmq->msg)[psmq->n]);
- msg->scheduler_id = esdp->no;
- get_now(&(msg->Ms), &(msg->s), &(msg->us));
- msg->no_schedulers = 0;
- msg->state = am_active;
- psmq->n++;
- }
- }
-
- if (is_blockable)
- system_block_state.threads_to_block++;
- }
-
- if (!mtx_locked)
- erts_smp_mtx_unlock(&system_block_state.mtx);
-
- if (resume)
- (*resume)(arg);
-}
-
-void
-erts_block_me(void (*prepare)(void *),
- void (*resume)(void *),
- void *arg)
-{
- profile_sched_msg_q psmq;
- psmq.n = 0;
- if (prepare)
- (*prepare)(arg);
-
-#ifdef ERTS_ENABLE_LOCK_CHECK
- erts_lc_check_exact(NULL, 0); /* No locks should be locked */
-#endif
-
- block_me(NULL, NULL, NULL, 0, 0, 0, &psmq);
-
- if (erts_system_profile_flags.scheduler && psmq.n > 0)
- dispatch_profile_msg_q(&psmq);
-
- if (resume)
- (*resume)(arg);
-}
-
-void
-erts_register_blockable_thread(void)
-{
- profile_sched_msg_q psmq;
- psmq.n = 0;
- if (!is_blockable_thread()) {
- erts_smp_mtx_lock(&system_block_state.mtx);
- system_block_state.threads_to_block++;
- erts_smp_tsd_set(system_block_state.blockable_key,
- (void *) &erts_system_block_state);
-
- /* Someone might be waiting for us to block... */
- if (erts_smp_pending_system_block())
- block_me(NULL, NULL, NULL, 1, 0, 0, &psmq);
- erts_smp_mtx_unlock(&system_block_state.mtx);
-
- if (erts_system_profile_flags.scheduler && psmq.n > 0)
- dispatch_profile_msg_q(&psmq);
- }
-}
-
-void
-erts_unregister_blockable_thread(void)
-{
- if (is_blockable_thread()) {
- erts_smp_mtx_lock(&system_block_state.mtx);
- system_block_state.threads_to_block--;
- ASSERT(system_block_state.threads_to_block >= 0);
- erts_smp_tsd_set(system_block_state.blockable_key, NULL);
-
- /* Someone might be waiting for us to block... */
- if (erts_smp_pending_system_block())
- erts_smp_cnd_broadcast(&system_block_state.cnd);
- erts_smp_mtx_unlock(&system_block_state.mtx);
- }
-}
-
-void
-erts_note_activity_begin(erts_activity_t activity)
-{
- erts_smp_mtx_lock(&system_block_state.mtx);
- if (erts_smp_pending_system_block()) {
- Uint32 broadcast = 0;
- switch (activity) {
- case ERTS_ACTIVITY_GC:
- broadcast = (system_block_state.allowed_activities
- & ERTS_BS_FLG_ALLOW_GC);
- break;
- case ERTS_ACTIVITY_IO:
- broadcast = (system_block_state.allowed_activities
- & ERTS_BS_FLG_ALLOW_IO);
- break;
- case ERTS_ACTIVITY_WAIT:
- broadcast = 1;
- break;
- default:
- abort();
- break;
- }
- if (broadcast)
- erts_smp_cnd_broadcast(&system_block_state.cnd);
- }
- erts_smp_mtx_unlock(&system_block_state.mtx);
-}
-
-void
-erts_check_block(erts_activity_t old_activity,
- erts_activity_t new_activity,
- int locked,
- void (*prepare)(void *),
- void (*resume)(void *),
- void *arg)
-{
- int do_block;
- profile_sched_msg_q psmq;
-
- psmq.n = 0;
- if (!locked && prepare)
- (*prepare)(arg);
-
- erts_smp_mtx_lock(&system_block_state.mtx);
-
- /* First check if it is ok to block... */
- if (!locked)
- do_block = 1;
- else {
- switch (old_activity) {
- case ERTS_ACTIVITY_UNDEFINED:
- do_block = 0;
- break;
- case ERTS_ACTIVITY_GC:
- do_block = (system_block_state.allowed_activities
- & ERTS_BS_FLG_ALLOW_GC);
- break;
- case ERTS_ACTIVITY_IO:
- do_block = (system_block_state.allowed_activities
- & ERTS_BS_FLG_ALLOW_IO);
- break;
- case ERTS_ACTIVITY_WAIT:
- /* You are not allowed to leave activity waiting
- * without supplying the possibility to block
- * unlocked.
- */
- erts_set_activity_error(ERTS_ACT_ERR_LEAVE_WAIT_UNLOCKED,
- __FILE__, __LINE__);
- do_block = 0;
- break;
- default:
- erts_set_activity_error(ERTS_ACT_ERR_LEAVE_UNKNOWN_ACTIVITY,
- __FILE__, __LINE__);
- do_block = 0;
- break;
- }
- }
-
- if (do_block) {
- /* ... then check if it is necessary to block... */
-
- switch (new_activity) {
- case ERTS_ACTIVITY_UNDEFINED:
- do_block = 1;
- break;
- case ERTS_ACTIVITY_GC:
- do_block = !(system_block_state.allowed_activities
- & ERTS_BS_FLG_ALLOW_GC);
- break;
- case ERTS_ACTIVITY_IO:
- do_block = !(system_block_state.allowed_activities
- & ERTS_BS_FLG_ALLOW_IO);
- break;
- case ERTS_ACTIVITY_WAIT:
- /* No need to block if we are going to wait */
- do_block = 0;
- break;
- default:
- erts_set_activity_error(ERTS_ACT_ERR_ENTER_UNKNOWN_ACTIVITY,
- __FILE__, __LINE__);
- break;
- }
- }
-
- if (do_block) {
-
-#ifdef ERTS_ENABLE_LOCK_CHECK
- if (!locked) {
- /* Only system_block_state.mtx should be held */
- erts_lc_check_exact(&system_block_state.mtx.lc, 1);
- }
-#endif
-
- block_me(NULL, NULL, NULL, 1, 0, 1, &psmq);
-
- }
-
- erts_smp_mtx_unlock(&system_block_state.mtx);
-
- if (erts_system_profile_flags.scheduler && psmq.n > 0)
- dispatch_profile_msg_q(&psmq);
-
- if (!locked && resume)
- (*resume)(arg);
-}
-
-
-
-void
-erts_set_activity_error(erts_activity_error_t error, char *file, int line)
-{
- switch (error) {
- case ERTS_ACT_ERR_LEAVE_WAIT_UNLOCKED:
- erl_exit(1, "%s:%d: Fatal error: Leaving activity waiting without "
- "supplying the possibility to block unlocked.",
- file, line);
- break;
- case ERTS_ACT_ERR_LEAVE_UNKNOWN_ACTIVITY:
- erl_exit(1, "%s:%d: Fatal error: Leaving unknown activity.",
- file, line);
- break;
- case ERTS_ACT_ERR_ENTER_UNKNOWN_ACTIVITY:
- erl_exit(1, "%s:%d: Fatal error: Leaving unknown activity.",
- file, line);
- break;
- default:
- erl_exit(1, "%s:%d: Internal error in erts_smp_set_activity()",
- file, line);
- break;
- }
-
-}
-
-
-static ERTS_INLINE erts_aint32_t
-threads_not_under_control(void)
-{
- erts_aint32_t res = system_block_state.threads_to_block;
-
- ERTS_THR_MEMORY_BARRIER;
-
- /* Waiting is always an allowed activity... */
- res -= erts_smp_atomic32_read_nob(&erts_system_block_state.in_activity.wait);
-
- if (system_block_state.allowed_activities & ERTS_BS_FLG_ALLOW_GC)
- res -= erts_smp_atomic32_read_nob(&erts_system_block_state.in_activity.gc);
-
- if (system_block_state.allowed_activities & ERTS_BS_FLG_ALLOW_IO)
- res -= erts_smp_atomic32_read_nob(&erts_system_block_state.in_activity.io);
-
- if (res < 0) {
- ASSERT(0);
- return 0;
- }
- return res;
-}
-
-/*
- * erts_block_system() blocks all threads registered as blockable.
- * It doesn't return until either all threads have blocked (0 is returned)
- * or it has timed out (ETIMEDOUT) is returned.
- *
- * If allowed activities == 0, blocked threads will release all locks
- * before blocking.
- *
- * If allowed_activities is != 0, erts_block_system() will allow blockable
- * threads to continue executing as long as they are doing an allowed
- * activity. When they are done with the allowed activity they will block,
- * *but* they will block holding locks. Therefore, the thread calling
- * erts_block_system() must *not* try to aquire any locks that might be
- * held by blocked threads holding locks from allowed activities.
- *
- * Currently allowed_activities are:
- * * ERTS_BS_FLG_ALLOW_GC Thread continues with garbage
- * collection and blocks with
- * main process lock on current
- * process locked.
- * * ERTS_BS_FLG_ALLOW_IO Thread continues with I/O
- */
-
-void
-erts_block_system(Uint32 allowed_activities)
-{
- int do_block;
- profile_sched_msg_q psmq;
-
- psmq.n = 0;
-#ifdef ERTS_ENABLE_LOCK_CHECK
- erts_lc_check_exact(NULL, 0); /* No locks should be locked */
-#endif
-
- erts_smp_mtx_lock(&system_block_state.mtx);
-
- do_block = erts_smp_pending_system_block();
- if (do_block
- && system_block_state.have_blocker
- && erts_smp_equal_tids(system_block_state.blocker_tid,
- erts_smp_thr_self())) {
- ASSERT(system_block_state.recursive_block >= 0);
- system_block_state.recursive_block++;
-
- /* You are not allowed to restrict allowed activites
- in a recursive block! */
- ERTS_SMP_LC_ASSERT((system_block_state.allowed_activities
- & ~allowed_activities) == 0);
- }
- else {
-
- erts_smp_atomic32_inc_nob(&erts_system_block_state.do_block);
-
- /* Someone else might be waiting for us to block... */
- if (do_block) {
- do_block_me:
- block_me(NULL, NULL, NULL, 1, 1, 0, &psmq);
- }
-
- ASSERT(!system_block_state.have_blocker);
- system_block_state.have_blocker = 1;
- system_block_state.blocker_tid = erts_smp_thr_self();
- system_block_state.allowed_activities = allowed_activities;
-
- if (is_blockable_thread())
- system_block_state.threads_to_block--;
-
- while (threads_not_under_control() && !system_block_state.emergency)
- erts_smp_cnd_wait(&system_block_state.cnd, &system_block_state.mtx);
-
- if (system_block_state.emergency) {
- system_block_state.have_blocker = 0;
- goto do_block_me;
- }
- }
-
- erts_smp_mtx_unlock(&system_block_state.mtx);
-
- if (erts_system_profile_flags.scheduler && psmq.n > 0 )
- dispatch_profile_msg_q(&psmq);
-}
-
-/*
- * erts_emergency_block_system() should only be called when we are
- * about to write a crash dump...
- */
-
-int
-erts_emergency_block_system(long timeout, Uint32 allowed_activities)
-{
- int res = 0;
- long another_blocker;
-
- erts_smp_mtx_lock(&system_block_state.mtx);
-
- if (system_block_state.emergency) {
- /* Argh... */
- res = EINVAL;
- goto done;
- }
-
- another_blocker = erts_smp_pending_system_block();
- system_block_state.emergency = 1;
- erts_smp_atomic32_inc_nob(&erts_system_block_state.do_block);
-
- if (another_blocker) {
- if (is_blocker()) {
- erts_smp_atomic32_dec_nob(&erts_system_block_state.do_block);
- res = 0;
- goto done;
- }
- /* kick the other blocker */
- erts_smp_cnd_broadcast(&system_block_state.cnd);
- while (system_block_state.have_blocker)
- erts_smp_cnd_wait(&system_block_state.cnd, &system_block_state.mtx);
- }
-
- ASSERT(!system_block_state.have_blocker);
- system_block_state.have_blocker = 1;
- system_block_state.blocker_tid = erts_smp_thr_self();
- system_block_state.allowed_activities = allowed_activities;
-
- if (is_blockable_thread())
- system_block_state.threads_to_block--;
-
- if (timeout < 0) {
- while (threads_not_under_control())
- erts_smp_cnd_wait(&system_block_state.cnd, &system_block_state.mtx);
- }
- else {
- system_block_state.emergency_timeout = timeout;
- erts_smp_cnd_signal(&system_block_state.watchdog_cnd);
-
- while (system_block_state.emergency_timeout >= 0
- && threads_not_under_control()) {
- erts_smp_cnd_wait(&system_block_state.cnd,
- &system_block_state.mtx);
- }
- }
- done:
- erts_smp_mtx_unlock(&system_block_state.mtx);
- return res;
-}
-
-void
-erts_release_system(void)
-{
- long do_block;
- profile_sched_msg_q psmq;
-
- psmq.n = 0;
-
-#ifdef ERTS_ENABLE_LOCK_CHECK
- erts_lc_check_exact(NULL, 0); /* No locks should be locked */
-#endif
-
- erts_smp_mtx_lock(&system_block_state.mtx);
- ASSERT(is_blocker());
-
- ASSERT(system_block_state.recursive_block >= 0);
-
- if (system_block_state.recursive_block)
- system_block_state.recursive_block--;
- else {
- do_block = erts_smp_atomic32_dec_read_nob(&erts_system_block_state.do_block);
- system_block_state.have_blocker = 0;
- if (is_blockable_thread())
- system_block_state.threads_to_block++;
- else
- do_block = 0;
-
- /* Someone else might be waiting for us to block... */
- if (do_block)
- block_me(NULL, NULL, NULL, 1, 0, 0, &psmq);
- else
- erts_smp_cnd_broadcast(&system_block_state.cnd);
- }
-
- erts_smp_mtx_unlock(&system_block_state.mtx);
-
- if (erts_system_profile_flags.scheduler && psmq.n > 0)
- dispatch_profile_msg_q(&psmq);
-}
-
-#ifdef ERTS_ENABLE_LOCK_CHECK
-
-void
-erts_lc_activity_change_begin(void)
-{
- erts_smp_mtx_lock(&system_block_state.mtx);
- system_block_state.activity_changing++;
- erts_smp_mtx_unlock(&system_block_state.mtx);
-}
-
-void
-erts_lc_activity_change_end(void)
-{
- erts_smp_mtx_lock(&system_block_state.mtx);
- system_block_state.activity_changing--;
- if (system_block_state.checking && !system_block_state.activity_changing)
- erts_smp_cnd_broadcast(&system_block_state.cnd);
- erts_smp_mtx_unlock(&system_block_state.mtx);
-}
-
-#endif
-
-int
-erts_is_system_blocked(erts_activity_t allowed_activities)
-{
- int blkd;
-
- erts_smp_mtx_lock(&system_block_state.mtx);
- blkd = (erts_smp_pending_system_block()
- && system_block_state.have_blocker
- && erts_smp_equal_tids(system_block_state.blocker_tid,
- erts_smp_thr_self())
- && !(system_block_state.allowed_activities & ~allowed_activities));
-#ifdef ERTS_ENABLE_LOCK_CHECK
- if (blkd) {
- system_block_state.checking = 1;
- while (system_block_state.activity_changing)
- erts_smp_cnd_wait(&system_block_state.cnd, &system_block_state.mtx);
- system_block_state.checking = 0;
- blkd = !threads_not_under_control();
- }
-#endif
- erts_smp_mtx_unlock(&system_block_state.mtx);
- return blkd;
-}
-
-static void *
-emergency_watchdog(void *unused)
-{
- erts_smp_mtx_lock(&system_block_state.mtx);
- while (1) {
- long timeout;
- while (system_block_state.emergency_timeout < 0)
- erts_smp_cnd_wait(&system_block_state.watchdog_cnd, &system_block_state.mtx);
- timeout = system_block_state.emergency_timeout;
- erts_smp_mtx_unlock(&system_block_state.mtx);
-
- if (erts_disable_tolerant_timeofday)
- erts_milli_sleep(timeout);
- else {
- SysTimeval to;
- erts_get_timeval(&to);
- to.tv_sec += timeout / 1000;
- to.tv_usec += timeout % 1000;
-
- while (1) {
- SysTimeval curr;
- erts_milli_sleep(timeout);
- erts_get_timeval(&curr);
- if (curr.tv_sec > to.tv_sec
- || (curr.tv_sec == to.tv_sec && curr.tv_usec >= to.tv_usec)) {
- break;
- }
- timeout = (to.tv_sec - curr.tv_sec)*1000;
- timeout += (to.tv_usec - curr.tv_usec)/1000;
- }
- }
-
- erts_smp_mtx_lock(&system_block_state.mtx);
- system_block_state.emergency_timeout = -1;
- erts_smp_cnd_broadcast(&system_block_state.cnd);
- }
- erts_smp_mtx_unlock(&system_block_state.mtx);
- return NULL;
-}
-
-void
-erts_system_block_init(void)
-{
- erts_smp_thr_opts_t thr_opts = ERTS_SMP_THR_OPTS_DEFAULT_INITER;
- /* Local state... */
- system_block_state.emergency = 0;
- system_block_state.emergency_timeout = -1;
- erts_smp_cnd_init(&system_block_state.watchdog_cnd);
- system_block_state.threads_to_block = 0;
- system_block_state.have_blocker = 0;
- /* system_block_state.block_tid */
- system_block_state.recursive_block = 0;
- system_block_state.allowed_activities = 0;
- erts_smp_tsd_key_create(&system_block_state.blockable_key);
- erts_smp_mtx_init(&system_block_state.mtx, "system_block");
- erts_smp_cnd_init(&system_block_state.cnd);
-#ifdef ERTS_ENABLE_LOCK_CHECK
- system_block_state.activity_changing = 0;
- system_block_state.checking = 0;
-#endif
-
- thr_opts.suggested_stack_size = 8;
- erts_smp_thr_create(&system_block_state.watchdog_tid,
- emergency_watchdog,
- NULL,
- &thr_opts);
-
- /* Global state... */
-
- erts_smp_atomic32_init_nob(&erts_system_block_state.do_block, 0);
- erts_smp_atomic32_init_nob(&erts_system_block_state.in_activity.wait, 0);
- erts_smp_atomic32_init_nob(&erts_system_block_state.in_activity.gc, 0);
- erts_smp_atomic32_init_nob(&erts_system_block_state.in_activity.io, 0);
-
- /* Make sure blockable threads unregister when exiting... */
- erts_smp_install_exit_handler(erts_unregister_blockable_thread);
-}
-
-
-#endif /* #ifdef ERTS_SMP */
-
char *
erts_read_env(char *key)
{
diff --git a/erts/emulator/drivers/common/efile_drv.c b/erts/emulator/drivers/common/efile_drv.c
index 68987b3493..52f1b5312b 100644
--- a/erts/emulator/drivers/common/efile_drv.c
+++ b/erts/emulator/drivers/common/efile_drv.c
@@ -2493,13 +2493,20 @@ file_flush(ErlDrvData e) {
static int
file_control(ErlDrvData e, unsigned int command,
char* buf, int len, char **rbuf, int rlen) {
+ /*
+ * warning: variable ‘desc’ set but not used
+ * [-Wunused-but-set-variable]
+ * ... no kidding ...
+ *
+ *
file_descriptor *desc = (file_descriptor *)e;
switch (command) {
default:
return 0;
- } /* switch (command) */
+ }
ASSERT(0);
- desc = NULL; /* XXX Avoid warning while empty switch */
+ desc = NULL;
+ */
return 0;
}
diff --git a/erts/emulator/drivers/common/inet_drv.c b/erts/emulator/drivers/common/inet_drv.c
index 40c4a0df08..2ff5f744d6 100644
--- a/erts/emulator/drivers/common/inet_drv.c
+++ b/erts/emulator/drivers/common/inet_drv.c
@@ -280,6 +280,57 @@ static unsigned long one_value = 1;
# define SCTP_EOF MSG_EOF
#endif
+/* More Solaris 10 fixes: */
+#if ! HAVE_DECL_SCTP_CLOSED && HAVE_DECL_SCTPS_IDLE
+# define SCTP_CLOSED SCTPS_IDLE
+# undef HAVE_DECL_SCTP_CLOSED
+# define HAVE_DECL_SCTP_CLOSED 1
+#endif
+#if ! HAVE_DECL_SCTP_BOUND && HAVE_DECL_SCTPS_BOUND
+# define SCTP_BOUND SCTPS_BOUND
+# undef HAVE_DECL_SCTP_BOUND
+# define HAVE_DECL_SCTP_BOUND 1
+#endif
+#if ! HAVE_DECL_SCTP_LISTEN && HAVE_DECL_SCTPS_LISTEN
+# define SCTP_LISTEN SCTPS_LISTEN
+# undef HAVE_DECL_SCTP_LISTEN
+# define HAVE_DECL_SCTP_LISTEN 1
+#endif
+#if ! HAVE_DECL_SCTP_COOKIE_WAIT && HAVE_DECL_SCTPS_COOKIE_WAIT
+# define SCTP_COOKIE_WAIT SCTPS_COOKIE_WAIT
+# undef HAVE_DECL_SCTP_COOKIE_WAIT
+# define HAVE_DECL_SCTP_COOKIE_WAIT 1
+#endif
+#if ! HAVE_DECL_SCTP_COOKIE_ECHOED && HAVE_DECL_SCTPS_COOKIE_ECHOED
+# define SCTP_COOKIE_ECHOED SCTPS_COOKIE_ECHOED
+# undef HAVE_DECL_SCTP_COOKIE_ECHOED
+# define HAVE_DECL_SCTP_COOKIE_ECHOED 1
+#endif
+#if ! HAVE_DECL_SCTP_ESTABLISHED && HAVE_DECL_SCTPS_ESTABLISHED
+# define SCTP_ESTABLISHED SCTPS_ESTABLISHED
+# undef HAVE_DECL_SCTP_ESTABLISHED
+# define HAVE_DECL_SCTP_ESTABLISHED 1
+#endif
+#if ! HAVE_DECL_SCTP_SHUTDOWN_PENDING && HAVE_DECL_SCTPS_SHUTDOWN_PENDING
+# define SCTP_SHUTDOWN_PENDING SCTPS_SHUTDOWN_PENDING
+# undef HAVE_DECL_SCTP_SHUTDOWN_PENDING
+# define HAVE_DECL_SCTP_SHUTDOWN_PENDING 1
+#endif
+#if ! HAVE_DECL_SCTP_SHUTDOWN_SENT && HAVE_DECL_SCTPS_SHUTDOWN_SENT
+# define SCTP_SHUTDOWN_SENT SCTPS_SHUTDOWN_SENT
+# undef HAVE_DECL_SCTP_SHUTDOWN_SENT
+# define HAVE_DECL_SCTP_SHUTDOWN_SENT 1
+#endif
+#if ! HAVE_DECL_SCTP_SHUTDOWN_RECEIVED && HAVE_DECL_SCTPS_SHUTDOWN_RECEIVED
+# define SCTP_SHUTDOWN_RECEIVED SCTPS_SHUTDOWN_RECEIVED
+# undef HAVE_DECL_SCTP_SHUTDOWN_RECEIVED
+# define HAVE_DECL_SCTP_SHUTDOWN_RECEIVED 1
+#endif
+#if ! HAVE_DECL_SCTP_SHUTDOWN_ACK_SENT && HAVE_DECL_SCTPS_SHUTDOWN_ACK_SENT
+# define SCTP_SHUTDOWN_ACK_SENT SCTPS_SHUTDOWN_ACK_SENT
+# undef HAVE_DECL_SCTP_SHUTDOWN_ACK_SENT
+# define HAVE_DECL_SCTP_SHUTDOWN_ACK_SENT 1
+#endif
/* New spelling in lksctp 2.6.22 or maybe even earlier:
* adaption -> adaptation
*/
@@ -294,12 +345,13 @@ static unsigned long one_value = 1;
# define sctp_adaptation_layer_event sctp_adaption_layer_event
#endif
-static void *h_libsctp = NULL;
#ifdef __GNUC__
static typeof(sctp_bindx) *p_sctp_bindx = NULL;
+static typeof(sctp_peeloff) *p_sctp_peeloff = NULL;
#else
static int (*p_sctp_bindx)(int sd, struct sockaddr *addrs,
int addrcnt, int flags) = NULL;
+static int (*p_sctp_peeloff)(int sd, sctp_assoc_t assoc_id) = NULL;
#endif
#endif /* SCTP supported */
@@ -427,7 +479,7 @@ static int my_strncasecmp(const char *s1, const char *s2, size_t n)
#define INET_AF_ANY 3 /* INADDR_ANY or IN6ADDR_ANY_INIT */
#define INET_AF_LOOPBACK 4 /* INADDR_LOOPBACK or IN6ADDR_LOOPBACK_INIT */
-/* INET_REQ_GETTYPE enumeration */
+/* open and INET_REQ_GETTYPE enumeration */
#define INET_TYPE_STREAM 1
#define INET_TYPE_DGRAM 2
#define INET_TYPE_SEQPACKET 3
@@ -484,16 +536,19 @@ static int my_strncasecmp(const char *s1, const char *s2, size_t n)
#define INET_REQ_IFSET 23
#define INET_REQ_SUBSCRIBE 24
#define INET_REQ_GETIFADDRS 25
+#define INET_REQ_ACCEPT 26
+#define INET_REQ_LISTEN 27
/* TCP requests */
-#define TCP_REQ_ACCEPT 40
-#define TCP_REQ_LISTEN 41
+/* #define TCP_REQ_ACCEPT 40 MOVED */
+/* #define TCP_REQ_LISTEN 41 MERGED */
#define TCP_REQ_RECV 42
#define TCP_REQ_UNRECV 43
#define TCP_REQ_SHUTDOWN 44
/* UDP and SCTP requests */
#define PACKET_REQ_RECV 60 /* Common for UDP and SCTP */
-#define SCTP_REQ_LISTEN 61 /* Different from TCP; not for UDP */
+/* #define SCTP_REQ_LISTEN 61 MERGED Different from TCP; not for UDP */
#define SCTP_REQ_BINDX 62 /* Multi-home SCTP bind */
+#define SCTP_REQ_PEELOFF 63
/* INET_REQ_SUBSCRIBE sub-requests */
#define INET_SUBS_EMPTY_OUT_Q 1
@@ -507,7 +562,7 @@ static int my_strncasecmp(const char *s1, const char *s2, size_t n)
/* *_REQ_* replies */
#define INET_REP_ERROR 0
#define INET_REP_OK 1
-#define INET_REP_SCTP 2
+#define INET_REP 2
/* INET_REQ_SETOPTS and INET_REQ_GETOPTS options */
#define INET_OPT_REUSEADDR 0 /* enable/disable local address reuse */
@@ -628,10 +683,14 @@ static int my_strncasecmp(const char *s1, const char *s2, size_t n)
** End of interface constants.
**--------------------------------------------------------------------------*/
-#define INET_STATE_CLOSED 0
-#define INET_STATE_OPEN (INET_F_OPEN)
-#define INET_STATE_BOUND (INET_STATE_OPEN | INET_F_BOUND)
-#define INET_STATE_CONNECTED (INET_STATE_BOUND | INET_F_ACTIVE)
+#define INET_STATE_CLOSED (0)
+#define INET_STATE_OPEN (INET_F_OPEN)
+#define INET_STATE_BOUND (INET_STATE_OPEN | INET_F_BOUND)
+#define INET_STATE_CONNECTED (INET_STATE_BOUND | INET_F_ACTIVE)
+#define INET_STATE_LISTENING (INET_STATE_BOUND | INET_F_LISTEN)
+#define INET_STATE_CONNECTING (INET_STATE_BOUND | INET_F_CON)
+#define INET_STATE_ACCEPTING (INET_STATE_LISTENING | INET_F_ACC)
+#define INET_STATE_MULTI_ACCEPTING (INET_STATE_ACCEPTING | INET_F_MULTI_CLIENT)
#define IS_OPEN(d) \
(((d)->state & INET_F_OPEN) == INET_F_OPEN)
@@ -674,7 +733,7 @@ static int my_strncasecmp(const char *s1, const char *s2, size_t n)
#endif
-#define BIN_REALLOC_LIMIT(x) (((x)*3)/4) /* 75% */
+#define BIN_REALLOC_MARGIN(x) ((x)/4) /* 25% */
/* The general purpose sockaddr */
typedef union {
@@ -809,16 +868,6 @@ typedef struct {
-#define TCP_STATE_CLOSED INET_STATE_CLOSED
-#define TCP_STATE_OPEN (INET_F_OPEN)
-#define TCP_STATE_BOUND (TCP_STATE_OPEN | INET_F_BOUND)
-#define TCP_STATE_CONNECTED (TCP_STATE_BOUND | INET_F_ACTIVE)
-#define TCP_STATE_LISTEN (TCP_STATE_BOUND | INET_F_LISTEN)
-#define TCP_STATE_CONNECTING (TCP_STATE_BOUND | INET_F_CON)
-#define TCP_STATE_ACCEPTING (TCP_STATE_LISTEN | INET_F_ACC)
-#define TCP_STATE_MULTI_ACCEPTING (TCP_STATE_ACCEPTING | INET_F_MULTI_CLIENT)
-
-
#define TCP_MAX_PACKET_SIZE 0x4000000 /* 64 M */
#define MAX_VSIZE 16 /* Max number of entries allowed in an I/O
@@ -874,12 +923,6 @@ static struct erl_drv_entry tcp_inet_driver_entry =
inet_stop_select
};
-#define PACKET_STATE_CLOSED INET_STATE_CLOSED
-#define PACKET_STATE_OPEN (INET_F_OPEN)
-#define PACKET_STATE_BOUND (PACKET_STATE_OPEN | INET_F_BOUND)
-#define SCTP_STATE_LISTEN (PACKET_STATE_BOUND | INET_F_LISTEN)
-#define SCTP_STATE_CONNECTING (PACKET_STATE_BOUND | INET_F_CON)
-#define PACKET_STATE_CONNECTED (PACKET_STATE_BOUND | INET_F_ACTIVE)
static int packet_inet_init(void);
@@ -997,6 +1040,9 @@ static int tcp_inet_input(tcp_descriptor* desc, HANDLE event);
typedef struct {
inet_descriptor inet; /* common data structure (DON'T MOVE) */
int read_packets; /* Number of packets to read per invocation */
+ int i_bufsz; /* current input buffer size */
+ ErlDrvBinary* i_buf; /* current binary buffer */
+ char* i_ptr; /* current pos in buf */
} udp_descriptor;
@@ -1851,6 +1897,26 @@ static int inet_reply_ok(inet_descriptor* desc)
return driver_send_term(desc->port, caller, spec, i);
}
+#ifdef HAVE_SCTP
+static int inet_reply_ok_port(inet_descriptor* desc, ErlDrvTermData dport)
+{
+ ErlDrvTermData spec[2*LOAD_ATOM_CNT + 2*LOAD_PORT_CNT + 2*LOAD_TUPLE_CNT];
+ ErlDrvTermData caller = desc->caller;
+ int i = 0;
+
+ i = LOAD_ATOM(spec, i, am_inet_reply);
+ i = LOAD_PORT(spec, i, desc->dport);
+ i = LOAD_ATOM(spec, i, am_ok);
+ i = LOAD_PORT(spec, i, dport);
+ i = LOAD_TUPLE(spec, i, 2);
+ i = LOAD_TUPLE(spec, i, 3);
+ ASSERT(i == sizeof(spec)/sizeof(*spec));
+
+ desc->caller = 0;
+ return driver_send_term(desc->port, caller, spec, i);
+}
+#endif
+
/* send:
** {inet_reply, S, {error, Reason}}
*/
@@ -2389,14 +2455,19 @@ static ErlDrvTermData am_sctp_rtoinfo, /* Option names */
am_active, am_inactive,
/* For #sctp_status{}: */
- am_empty, am_closed,
+# if HAVE_DECL_SCTP_EMPTY
+ am_empty,
+# endif
+# if HAVE_DECL_SCTP_BOUND
+ am_bound,
+# endif
+# if HAVE_DECL_SCTP_LISTEN
+ am_listen,
+# endif
am_cookie_wait, am_cookie_echoed,
am_established, am_shutdown_pending,
am_shutdown_sent, am_shutdown_received,
am_shutdown_ack_sent;
- /* Not yet implemented in the Linux kernel:
- ** am_bound, am_listen;
- */
/*
** Parsing of "sctp_sndrcvinfo": ancillary data coming with received msgs.
@@ -2665,7 +2736,8 @@ static int sctp_parse_async_event
# ifdef HAVE_STRUCT_SCTP_REMOTE_ERROR_SRE_DATA
chunk = (char*) (&(sptr->sre_data));
# else
- chunk = ((char*)sptr) + sizeof(*sptr);
+ chunk = ((char*) &(sptr->sre_assoc_id))
+ + sizeof(sptr->sre_assoc_id);
# endif
chlen = sptr->sre_length - (chunk - (char *)sptr);
i = sctp_parse_error_chunk(spec, i, chunk, chlen);
@@ -2716,7 +2788,8 @@ static int sctp_parse_async_event
# ifdef HAVE_STRUCT_SCTP_SEND_FAILED_SSF_DATA
chunk = (char*) (&(sptr->ssf_data));
# else
- chunk = ((char*)sptr) + sizeof(*sptr);
+ chunk = ((char*) &(sptr->ssf_assoc_id))
+ + sizeof(sptr->ssf_assoc_id);
# endif
chlen = sptr->ssf_length - (chunk - (char*) sptr);
choff = chunk - bin->orig_bytes;
@@ -3390,8 +3463,15 @@ static void inet_init_sctp(void) {
INIT_ATOM(inactive);
/* For #sctp_status{}: */
+# if HAVE_DECL_SCTP_EMPTY
INIT_ATOM(empty);
- INIT_ATOM(closed);
+# endif
+# if HAVE_DECL_SCTP_BOUND
+ INIT_ATOM(bound);
+# endif
+# if HAVE_DECL_SCTP_LISTEN
+ INIT_ATOM(listen);
+# endif
INIT_ATOM(cookie_wait);
INIT_ATOM(cookie_echoed);
INIT_ATOM(established);
@@ -3399,10 +3479,6 @@ static void inet_init_sctp(void) {
INIT_ATOM(shutdown_sent);
INIT_ATOM(shutdown_received);
INIT_ATOM(shutdown_ack_sent);
- /* Not yet implemented in the Linux kernel:
- ** INIT_ATOM(bound);
- ** INIT_ATOM(listen);
- */
}
#endif /* HAVE_SCTP */
@@ -3453,17 +3529,32 @@ static int inet_init()
/* Check the size of SCTP AssocID -- currently both this driver and the
Erlang part require 32 bit: */
ASSERT(sizeof(sctp_assoc_t)==ASSOC_ID_LEN);
-# ifndef LIBSCTP
-# error LIBSCTP not defined
-# endif
- if (erts_sys_ddll_open_noext(STRINGIFY(LIBSCTP), &h_libsctp, NULL) == 0) {
- void *ptr;
- if (erts_sys_ddll_sym(h_libsctp, "sctp_bindx", &ptr) == 0) {
- p_sctp_bindx = ptr;
- inet_init_sctp();
- add_driver_entry(&sctp_inet_driver_entry);
+# if defined(HAVE_SCTP_BINDX) && defined (HAVE_SCTP_PEELOFF)
+ p_sctp_bindx = sctp_bindx;
+ p_sctp_peeloff = sctp_peeloff;
+ inet_init_sctp();
+ add_driver_entry(&sctp_inet_driver_entry);
+# else
+# ifndef LIBSCTP
+# error LIBSCTP not defined
+# endif
+ {
+ static void *h_libsctp = NULL;
+
+ if (erts_sys_ddll_open_noext(STRINGIFY(LIBSCTP), &h_libsctp, NULL)
+ == 0) {
+ void *ptr;
+ if (erts_sys_ddll_sym(h_libsctp, "sctp_bindx", &ptr) == 0) {
+ p_sctp_bindx = ptr;
+ inet_init_sctp();
+ add_driver_entry(&sctp_inet_driver_entry);
+ if (erts_sys_ddll_sym(h_libsctp, "sctp_peeloff", &ptr) == 0) {
+ p_sctp_peeloff = ptr;
+ }
+ }
}
}
+# endif
#endif
/* remove the dummy inet driver */
@@ -3709,6 +3800,8 @@ static int inet_ctl_fdopen(inet_descriptor* desc, int domain, int type,
/* check that it is a socket and that the socket is bound */
if (IS_SOCKET_ERROR(sock_name(s, (struct sockaddr*) &name, &sz)))
return ctl_error(sock_errno(), rbuf, rsize);
+ if (name.sa.sa_family != domain)
+ return ctl_error(EINVAL, rbuf, rsize);
desc->s = s;
if ((desc->event = sock_create_event(desc)) == INVALID_EVENT)
return ctl_error(sock_errno(), rbuf, rsize);
@@ -4457,6 +4550,7 @@ static int inet_ctl_ifset(inet_descriptor* desc, char* buf, int len,
+#if defined(__WIN32__) || defined(HAVE_GETIFADDRS)
/* Latin-1 to utf8 */
static int utf8_len(const char *c, int m) {
@@ -4479,6 +4573,7 @@ static void utf8_encode(const char *c, int m, char *p) {
}
}
}
+#endif
#if defined(__WIN32__)
@@ -6537,7 +6632,7 @@ static int sctp_fill_opts(inet_descriptor* desc, char* buf, int buflen,
struct linger lg;
unsigned int sz = sizeof(lg);
- if (sock_getopt(desc->s, IPPROTO_SCTP, SO_LINGER,
+ if (sock_getopt(desc->s, SOL_SOCKET, SO_LINGER,
&lg, &sz) < 0) continue;
/* Fill in the response: */
PLACE_FOR(spec, i,
@@ -6573,7 +6668,7 @@ static int sctp_fill_opts(inet_descriptor* desc, char* buf, int buflen,
{
case INET_OPT_RCVBUF :
{
- proto = IPPROTO_SCTP;
+ proto = SOL_SOCKET;
type = SO_RCVBUF;
is_int = 1;
tag = am_recbuf;
@@ -6581,7 +6676,7 @@ static int sctp_fill_opts(inet_descriptor* desc, char* buf, int buflen,
}
case INET_OPT_SNDBUF :
{
- proto = IPPROTO_SCTP;
+ proto = SOL_SOCKET;
type = SO_SNDBUF;
is_int = 1;
tag = am_sndbuf;
@@ -6734,7 +6829,7 @@ static int sctp_fill_opts(inet_descriptor* desc, char* buf, int buflen,
2*LOAD_ATOM_CNT + LOAD_INT_CNT + 2*LOAD_TUPLE_CNT);
i = LOAD_ATOM (spec, i, am_sctp_adaptation_layer);
i = LOAD_ATOM (spec, i, am_sctp_setadaptation);
- i = LOAD_INT (spec, i, ad.ssb_adaptation_ind);
+ i = LOAD_INT (spec, i, sock_ntohl(ad.ssb_adaptation_ind));
i = LOAD_TUPLE (spec, i, 2);
i = LOAD_TUPLE (spec, i, 2);
break;
@@ -6877,7 +6972,7 @@ static int sctp_fill_opts(inet_descriptor* desc, char* buf, int buflen,
break;
}
/* The following option is not available in Solaris 10: */
-# ifdef SCTP_DELAYED_ACK_TIME
+# if HAVE_DECL_SCTP_DELAYED_ACK_TIME
case SCTP_OPT_DELAYED_ACK_TIME:
{
struct sctp_assoc_value av;
@@ -6924,7 +7019,7 @@ static int sctp_fill_opts(inet_descriptor* desc, char* buf, int buflen,
switch(st.sstat_state)
{
/* SCTP_EMPTY is not supported on SOLARIS10: */
-# ifdef SCTP_EMPTY
+# if HAVE_DECL_SCTP_EMPTY
case SCTP_EMPTY:
i = LOAD_ATOM (spec, i, am_empty);
break;
@@ -6932,14 +7027,16 @@ static int sctp_fill_opts(inet_descriptor* desc, char* buf, int buflen,
case SCTP_CLOSED:
i = LOAD_ATOM (spec, i, am_closed);
break;
- /* The following states are not supported by Linux Kernel SCTP yet:
+# if HAVE_DECL_SCTP_BOUND
case SCTP_BOUND:
i = LOAD_ATOM (spec, i, am_bound);
break;
+# endif
+# if HAVE_DECL_SCTP_LISTEN
case SCTP_LISTEN:
i = LOAD_ATOM (spec, i, am_listen);
break;
- */
+# endif
case SCTP_COOKIE_WAIT:
i = LOAD_ATOM (spec, i, am_cookie_wait);
break;
@@ -7010,7 +7107,7 @@ static int sctp_fill_opts(inet_descriptor* desc, char* buf, int buflen,
default:
RETURN_ERROR(spec, -EINVAL); /* No more valid options */
}
- /* If we get here one result has been succesfully loaded */
+ /* If we get here one result has been successfully loaded */
length ++;
}
if (buflen != 0) RETURN_ERROR(spec, -EINVAL); /* Optparam mismatch */
@@ -7027,11 +7124,10 @@ static int sctp_fill_opts(inet_descriptor* desc, char* buf, int buflen,
i = LOAD_TUPLE(spec, i, 3);
/* Now, convert "spec" into the returnable term: */
- /* desc->caller = 0; What does it mean? */
- driver_output_term(desc->port, spec, i);
+ driver_send_term(desc->port, driver_caller(desc->port), spec, i);
FREE(spec);
- (*dest)[0] = INET_REP_SCTP;
+ (*dest)[0] = INET_REP;
return 1; /* Response length */
# undef PLACE_FOR
# undef RETURN_ERROR
@@ -7806,22 +7902,22 @@ static tcp_descriptor* tcp_inet_copy(tcp_descriptor* desc,SOCKET s,
static void tcp_close_check(tcp_descriptor* desc)
{
/* XXX:PaN - multiple clients to handle! */
- if (desc->inet.state == TCP_STATE_ACCEPTING) {
+ if (desc->inet.state == INET_STATE_ACCEPTING) {
inet_async_op *this_op = desc->inet.opt;
sock_select(INETP(desc), FD_ACCEPT, 0);
- desc->inet.state = TCP_STATE_LISTEN;
+ desc->inet.state = INET_STATE_LISTENING;
if (this_op != NULL) {
driver_demonitor_process(desc->inet.port, &(this_op->monitor));
}
async_error_am(INETP(desc), am_closed);
}
- else if (desc->inet.state == TCP_STATE_MULTI_ACCEPTING) {
+ else if (desc->inet.state == INET_STATE_MULTI_ACCEPTING) {
int id,req;
ErlDrvTermData caller;
ErlDrvMonitor monitor;
sock_select(INETP(desc), FD_ACCEPT, 0);
- desc->inet.state = TCP_STATE_LISTEN;
+ desc->inet.state = INET_STATE_LISTENING;
while (deq_multi_op(desc,&id,&req,&caller,NULL,&monitor) == 0) {
driver_demonitor_process(desc->inet.port, &monitor);
send_async_error(desc->inet.port, desc->inet.dport, id, caller, am_closed);
@@ -7829,10 +7925,10 @@ static void tcp_close_check(tcp_descriptor* desc)
clean_multi_timers(&(desc->mtd), desc->inet.port);
}
- else if (desc->inet.state == TCP_STATE_CONNECTING) {
+ else if (desc->inet.state == INET_STATE_CONNECTING) {
async_error_am(INETP(desc), am_closed);
}
- else if (desc->inet.state == TCP_STATE_CONNECTED) {
+ else if (desc->inet.state == INET_STATE_CONNECTED) {
async_error_am_all(INETP(desc), am_closed);
}
}
@@ -7864,40 +7960,62 @@ static int tcp_inet_ctl(ErlDrvData e, unsigned int cmd, char* buf, int len,
{
tcp_descriptor* desc = (tcp_descriptor*)e;
switch(cmd) {
- case INET_REQ_OPEN: /* open socket and return internal index */
+ case INET_REQ_OPEN: { /* open socket and return internal index */
+ int domain;
DEBUGF(("tcp_inet_ctl(%ld): OPEN\r\n", (long)desc->inet.port));
- if ((len == 1) && (buf[0] == INET_AF_INET))
- return
- inet_ctl_open(INETP(desc), AF_INET, SOCK_STREAM, rbuf, rsize);
+ if (len != 2) return ctl_error(EINVAL, rbuf, rsize);
+ switch(buf[0]) {
+ case INET_AF_INET:
+ domain = AF_INET;
+ break;
#if defined(HAVE_IN6) && defined(AF_INET6)
- else if ((len == 1) && (buf[0] == INET_AF_INET6))
- return
- inet_ctl_open(INETP(desc), AF_INET6, SOCK_STREAM, rbuf, rsize);
+ case INET_AF_INET6:
+ domain = AF_INET6;
+ break;
#else
- else if ((len == 1) && (buf[0] == INET_AF_INET6))
- return ctl_xerror("eafnosupport",rbuf,rsize);
+ case INET_AF_INET6:
+ return ctl_xerror("eafnosupport", rbuf, rsize);
+ break;
#endif
- else
+ default:
return ctl_error(EINVAL, rbuf, rsize);
+ }
+ if (buf[1] != INET_TYPE_STREAM) return ctl_error(EINVAL, rbuf, rsize);
+ return inet_ctl_open(INETP(desc), domain, SOCK_STREAM, rbuf, rsize);
+ break;
+ }
- case INET_REQ_FDOPEN: /* pass in an open socket */
- DEBUGF(("tcp_inet_ctl(%ld): FDOPEN\r\n", (long)desc->inet.port));
- if ((len == 5) && (buf[0] == INET_AF_INET))
- return inet_ctl_fdopen(INETP(desc), AF_INET, SOCK_STREAM,
- (SOCKET) get_int32(buf+1), rbuf, rsize);
+ case INET_REQ_FDOPEN: { /* pass in an open socket */
+ int domain;
+ DEBUGF(("tcp_inet_ctl(%ld): FDOPEN\r\n", (long)desc->inet.port));
+ if (len != 6) return ctl_error(EINVAL, rbuf, rsize);
+ switch(buf[0]) {
+ case INET_AF_INET:
+ domain = AF_INET;
+ break;
#if defined(HAVE_IN6) && defined(AF_INET6)
- else if ((len == 5) && (buf[0] == INET_AF_INET6))
- return inet_ctl_fdopen(INETP(desc), AF_INET6, SOCK_STREAM,
- (SOCKET) get_int32(buf+1), rbuf, rsize);
+ case INET_AF_INET6:
+ domain = AF_INET6;
+ break;
+#else
+ case INET_AF_INET6:
+ return ctl_xerror("eafnosupport", rbuf, rsize);
+ break;
#endif
- else
+ default:
return ctl_error(EINVAL, rbuf, rsize);
+ }
+ if (buf[1] != INET_TYPE_STREAM) return ctl_error(EINVAL, rbuf, rsize);
+ return inet_ctl_fdopen(INETP(desc), domain, SOCK_STREAM,
+ (SOCKET) get_int32(buf+2), rbuf, rsize);
+ break;
+ }
- case TCP_REQ_LISTEN: { /* argument backlog */
+ case INET_REQ_LISTEN: { /* argument backlog */
int backlog;
DEBUGF(("tcp_inet_ctl(%ld): LISTEN\r\n", (long)desc->inet.port));
- if (desc->inet.state == TCP_STATE_CLOSED)
+ if (desc->inet.state == INET_STATE_CLOSED)
return ctl_xerror(EXBADPORT, rbuf, rsize);
if (!IS_OPEN(INETP(desc)))
return ctl_xerror(EXBADPORT, rbuf, rsize);
@@ -7908,7 +8026,7 @@ static int tcp_inet_ctl(ErlDrvData e, unsigned int cmd, char* buf, int len,
backlog = get_int16(buf);
if (IS_SOCKET_ERROR(sock_listen(desc->inet.s, backlog)))
return ctl_error(sock_errno(), rbuf, rsize);
- desc->inet.state = TCP_STATE_LISTEN;
+ desc->inet.state = INET_STATE_LISTENING;
return ctl_reply(INET_REP_OK, NULL, 0, rbuf, rsize);
}
@@ -7944,13 +8062,13 @@ static int tcp_inet_ctl(ErlDrvData e, unsigned int cmd, char* buf, int len,
((sock_errno() == ERRNO_BLOCK) || /* Winsock2 */
(sock_errno() == EINPROGRESS))) { /* Unix & OSE!! */
sock_select(INETP(desc), FD_CONNECT, 1);
- desc->inet.state = TCP_STATE_CONNECTING;
+ desc->inet.state = INET_STATE_CONNECTING;
if (timeout != INET_INFINITY)
driver_set_timer(desc->inet.port, timeout);
enq_async(INETP(desc), tbuf, INET_REQ_CONNECT);
}
else if (code == 0) { /* ok we are connected */
- desc->inet.state = TCP_STATE_CONNECTED;
+ desc->inet.state = INET_STATE_CONNECTED;
if (desc->inet.active)
sock_select(INETP(desc), (FD_READ|FD_CLOSE), 1);
enq_async(INETP(desc), tbuf, INET_REQ_CONNECT);
@@ -7962,7 +8080,7 @@ static int tcp_inet_ctl(ErlDrvData e, unsigned int cmd, char* buf, int len,
return ctl_reply(INET_REP_OK, tbuf, 2, rbuf, rsize);
}
- case TCP_REQ_ACCEPT: { /* do async accept */
+ case INET_REQ_ACCEPT: { /* do async accept */
char tbuf[2];
unsigned timeout;
inet_address remote;
@@ -7972,14 +8090,14 @@ static int tcp_inet_ctl(ErlDrvData e, unsigned int cmd, char* buf, int len,
DEBUGF(("tcp_inet_ctl(%ld): ACCEPT\r\n", (long)desc->inet.port));
/* INPUT: Timeout(4) */
- if ((desc->inet.state != TCP_STATE_LISTEN && desc->inet.state != TCP_STATE_ACCEPTING &&
- desc->inet.state != TCP_STATE_MULTI_ACCEPTING) || len != 4) {
+ if ((desc->inet.state != INET_STATE_LISTENING && desc->inet.state != INET_STATE_ACCEPTING &&
+ desc->inet.state != INET_STATE_MULTI_ACCEPTING) || len != 4) {
return ctl_error(EINVAL, rbuf, rsize);
}
timeout = get_int32(buf);
- if (desc->inet.state == TCP_STATE_ACCEPTING) {
+ if (desc->inet.state == INET_STATE_ACCEPTING) {
unsigned long time_left = 0;
int oid = 0;
ErlDrvTermData ocaller = ERL_DRV_NIL;
@@ -8008,10 +8126,10 @@ static int tcp_inet_ctl(ErlDrvData e, unsigned int cmd, char* buf, int len,
mtd = add_multi_timer(&(desc->mtd), desc->inet.port, caller,
timeout, &tcp_inet_multi_timeout);
}
- enq_multi_op(desc, tbuf, TCP_REQ_ACCEPT, caller, mtd, &monitor);
- desc->inet.state = TCP_STATE_MULTI_ACCEPTING;
+ enq_multi_op(desc, tbuf, INET_REQ_ACCEPT, caller, mtd, &monitor);
+ desc->inet.state = INET_STATE_MULTI_ACCEPTING;
return ctl_reply(INET_REP_OK, tbuf, 2, rbuf, rsize);
- } else if (desc->inet.state == TCP_STATE_MULTI_ACCEPTING) {
+ } else if (desc->inet.state == INET_STATE_MULTI_ACCEPTING) {
ErlDrvTermData caller = driver_caller(desc->inet.port);
MultiTimerData *mtd = NULL;
ErlDrvMonitor monitor;
@@ -8023,7 +8141,7 @@ static int tcp_inet_ctl(ErlDrvData e, unsigned int cmd, char* buf, int len,
mtd = add_multi_timer(&(desc->mtd), desc->inet.port, caller,
timeout, &tcp_inet_multi_timeout);
}
- enq_multi_op(desc, tbuf, TCP_REQ_ACCEPT, caller, mtd, &monitor);
+ enq_multi_op(desc, tbuf, INET_REQ_ACCEPT, caller, mtd, &monitor);
return ctl_reply(INET_REP_OK, tbuf, 2, rbuf, rsize);
} else {
n = sizeof(desc->inet.remote);
@@ -8035,8 +8153,8 @@ static int tcp_inet_ctl(ErlDrvData e, unsigned int cmd, char* buf, int len,
&monitor) != 0) {
return ctl_xerror("noproc", rbuf, rsize);
}
- enq_async_w_tmo(INETP(desc), tbuf, TCP_REQ_ACCEPT, timeout, &monitor);
- desc->inet.state = TCP_STATE_ACCEPTING;
+ enq_async_w_tmo(INETP(desc), tbuf, INET_REQ_ACCEPT, timeout, &monitor);
+ desc->inet.state = INET_STATE_ACCEPTING;
sock_select(INETP(desc),FD_ACCEPT,1);
if (timeout != INET_INFINITY) {
driver_set_timer(desc->inet.port, timeout);
@@ -8063,8 +8181,8 @@ static int tcp_inet_ctl(ErlDrvData e, unsigned int cmd, char* buf, int len,
driver_select(accept_desc->inet.port, accept_desc->inet.event,
ERL_DRV_READ, 1);
#endif
- accept_desc->inet.state = TCP_STATE_CONNECTED;
- enq_async(INETP(desc), tbuf, TCP_REQ_ACCEPT);
+ accept_desc->inet.state = INET_STATE_CONNECTED;
+ enq_async(INETP(desc), tbuf, INET_REQ_ACCEPT);
async_ok_port(INETP(desc), accept_desc->inet.dport);
}
return ctl_reply(INET_REP_OK, tbuf, 2, rbuf, rsize);
@@ -8170,7 +8288,7 @@ static void tcp_inet_timeout(ErlDrvData e)
(long)desc->inet.port, desc->inet.s));
if ((state & INET_F_MULTI_CLIENT)) { /* Multi-client always means multi-timers */
fire_multi_timers(&(desc->mtd), desc->inet.port, e);
- } else if ((state & TCP_STATE_CONNECTED) == TCP_STATE_CONNECTED) {
+ } else if ((state & INET_STATE_CONNECTED) == INET_STATE_CONNECTED) {
if (desc->busy_on_send) {
ASSERT(IS_BUSY(INETP(desc)));
desc->inet.caller = desc->inet.busy_caller;
@@ -8190,20 +8308,20 @@ static void tcp_inet_timeout(ErlDrvData e)
async_error_am(INETP(desc), am_timeout);
}
}
- else if ((state & TCP_STATE_CONNECTING) == TCP_STATE_CONNECTING) {
+ else if ((state & INET_STATE_CONNECTING) == INET_STATE_CONNECTING) {
/* assume connect timeout */
/* close the socket since it's not usable (see man pages) */
erl_inet_close(INETP(desc));
async_error_am(INETP(desc), am_timeout);
}
- else if ((state & TCP_STATE_ACCEPTING) == TCP_STATE_ACCEPTING) {
+ else if ((state & INET_STATE_ACCEPTING) == INET_STATE_ACCEPTING) {
inet_async_op *this_op = desc->inet.opt;
/* timer is set on accept */
sock_select(INETP(desc), FD_ACCEPT, 0);
if (this_op != NULL) {
driver_demonitor_process(desc->inet.port, &(this_op->monitor));
}
- desc->inet.state = TCP_STATE_LISTEN;
+ desc->inet.state = INET_STATE_LISTENING;
async_error_am(INETP(desc), am_timeout);
}
DEBUGF(("tcp_inet_timeout(%ld) }\r\n", (long)desc->inet.port));
@@ -8221,7 +8339,7 @@ static void tcp_inet_multi_timeout(ErlDrvData e, ErlDrvTermData caller)
driver_demonitor_process(desc->inet.port, &monitor);
if (desc->multi_first == NULL) {
sock_select(INETP(desc),FD_ACCEPT,0);
- desc->inet.state = TCP_STATE_LISTEN; /* restore state */
+ desc->inet.state = INET_STATE_LISTENING; /* restore state */
}
send_async_error(desc->inet.port, desc->inet.dport, id, caller, am_timeout);
}
@@ -8287,7 +8405,7 @@ static void tcp_inet_process_exit(ErlDrvData e, ErlDrvMonitor *monitorp)
ErlDrvTermData who = driver_get_monitored_process(desc->inet.port,monitorp);
int state = desc->inet.state;
- if ((state & TCP_STATE_MULTI_ACCEPTING) == TCP_STATE_MULTI_ACCEPTING) {
+ if ((state & INET_STATE_MULTI_ACCEPTING) == INET_STATE_MULTI_ACCEPTING) {
int id,req;
MultiTimerData *timeout;
if (remove_multi_op(desc, &id, &req, who, &timeout, NULL) != 0) {
@@ -8298,15 +8416,15 @@ static void tcp_inet_process_exit(ErlDrvData e, ErlDrvMonitor *monitorp)
}
if (desc->multi_first == NULL) {
sock_select(INETP(desc),FD_ACCEPT,0);
- desc->inet.state = TCP_STATE_LISTEN; /* restore state */
+ desc->inet.state = INET_STATE_LISTENING; /* restore state */
}
- } else if ((state & TCP_STATE_ACCEPTING) == TCP_STATE_ACCEPTING) {
+ } else if ((state & INET_STATE_ACCEPTING) == INET_STATE_ACCEPTING) {
int did,drid;
ErlDrvTermData dcaller;
deq_async(INETP(desc), &did, &dcaller, &drid);
driver_cancel_timer(desc->inet.port);
sock_select(INETP(desc),FD_ACCEPT,0);
- desc->inet.state = TCP_STATE_LISTEN; /* restore state */
+ desc->inet.state = INET_STATE_LISTENING; /* restore state */
}
}
@@ -8496,32 +8614,29 @@ static int tcp_deliver(tcp_descriptor* desc, int len)
}
while (len > 0) {
- int code = 0;
+ int code;
inet_input_count(INETP(desc), len);
/* deliver binary? */
if (len*4 >= desc->i_buf->orig_size*3) { /* >=75% */
+ code = tcp_reply_binary_data(desc, desc->i_buf,
+ (desc->i_ptr_start -
+ desc->i_buf->orig_bytes),
+ len);
+ if (code < 0)
+ return code;
+
/* something after? */
if (desc->i_ptr_start + len == desc->i_ptr) { /* no */
- code = tcp_reply_binary_data(desc, desc->i_buf,
- (desc->i_ptr_start -
- desc->i_buf->orig_bytes),
- len);
tcp_clear_input(desc);
}
else { /* move trail to beginning of a new buffer */
- ErlDrvBinary* bin;
+ ErlDrvBinary* bin = alloc_buffer(desc->i_bufsz);
char* ptr_end = desc->i_ptr_start + len;
int sz = desc->i_ptr - ptr_end;
- bin = alloc_buffer(desc->i_bufsz);
memcpy(bin->orig_bytes, ptr_end, sz);
-
- code = tcp_reply_binary_data(desc, desc->i_buf,
- (desc->i_ptr_start-
- desc->i_buf->orig_bytes),
- len);
free_buffer(desc->i_buf);
desc->i_buf = bin;
desc->i_ptr_start = desc->i_buf->orig_bytes;
@@ -8533,17 +8648,15 @@ static int tcp_deliver(tcp_descriptor* desc, int len)
code = tcp_reply_data(desc, desc->i_ptr_start, len);
/* XXX The buffer gets thrown away on error (code < 0) */
/* Windows needs workaround for this in tcp_inet_event... */
+ if (code < 0)
+ return code;
desc->i_ptr_start += len;
if (desc->i_ptr_start == desc->i_ptr)
tcp_clear_input(desc);
else
desc->i_remain = 0;
-
}
- if (code < 0)
- return code;
-
count++;
len = 0;
@@ -8848,8 +8961,8 @@ static void tcp_inet_event(ErlDrvData e, ErlDrvEvent event)
/* socket has input:
-** 1. TCP_STATE_ACCEPTING => non block accept ?
-** 2. TCP_STATE_CONNECTED => read input
+** 1. INET_STATE_ACCEPTING => non block accept ?
+** 2. INET_STATE_CONNECTED => read input
*/
static int tcp_inet_input(tcp_descriptor* desc, HANDLE event)
{
@@ -8858,7 +8971,7 @@ static int tcp_inet_input(tcp_descriptor* desc, HANDLE event)
long port = (long) desc->inet.port; /* Used after driver_exit() */
#endif
DEBUGF(("tcp_inet_input(%ld) {s=%d\r\n", port, desc->inet.s));
- if (desc->inet.state == TCP_STATE_ACCEPTING) {
+ if (desc->inet.state == INET_STATE_ACCEPTING) {
SOCKET s;
unsigned int len;
inet_address remote;
@@ -8873,7 +8986,7 @@ static int tcp_inet_input(tcp_descriptor* desc, HANDLE event)
}
sock_select(INETP(desc),FD_ACCEPT,0);
- desc->inet.state = TCP_STATE_LISTEN; /* restore state */
+ desc->inet.state = INET_STATE_LISTENING; /* restore state */
if (this_op != NULL) {
driver_demonitor_process(desc->inet.port, &(this_op->monitor));
@@ -8913,11 +9026,11 @@ static int tcp_inet_input(tcp_descriptor* desc, HANDLE event)
driver_select(accept_desc->inet.port, accept_desc->inet.event,
ERL_DRV_READ, 1);
#endif
- accept_desc->inet.state = TCP_STATE_CONNECTED;
+ accept_desc->inet.state = INET_STATE_CONNECTED;
ret = async_ok_port(INETP(desc), accept_desc->inet.dport);
goto done;
}
- } else if (desc->inet.state == TCP_STATE_MULTI_ACCEPTING) {
+ } else if (desc->inet.state == INET_STATE_MULTI_ACCEPTING) {
SOCKET s;
unsigned int len;
inet_address remote;
@@ -8929,7 +9042,7 @@ static int tcp_inet_input(tcp_descriptor* desc, HANDLE event)
int times = 0;
#endif
- while (desc->inet.state == TCP_STATE_MULTI_ACCEPTING) {
+ while (desc->inet.state == INET_STATE_MULTI_ACCEPTING) {
len = sizeof(desc->inet.remote);
s = sock_accept(desc->inet.s, (struct sockaddr*) &remote, &len);
@@ -8949,7 +9062,7 @@ static int tcp_inet_input(tcp_descriptor* desc, HANDLE event)
if (desc->multi_first == NULL) {
sock_select(INETP(desc),FD_ACCEPT,0);
- desc->inet.state = TCP_STATE_LISTEN; /* restore state */
+ desc->inet.state = INET_STATE_LISTENING; /* restore state */
}
if (timeout != NULL) {
@@ -8980,7 +9093,7 @@ static int tcp_inet_input(tcp_descriptor* desc, HANDLE event)
driver_select(accept_desc->inet.port, accept_desc->inet.event,
ERL_DRV_READ, 1);
#endif
- accept_desc->inet.state = TCP_STATE_CONNECTED;
+ accept_desc->inet.state = INET_STATE_CONNECTED;
ret = send_async_ok_port(desc->inet.port, desc->inet.dport,
id, caller, accept_desc->inet.dport);
}
@@ -9258,8 +9371,8 @@ static void tcp_inet_drv_input(ErlDrvData data, ErlDrvEvent event)
}
/* socket ready for ouput:
-** 1. TCP_STATE_CONNECTING => non block connect ?
-** 2. TCP_STATE_CONNECTED => write output
+** 1. INET_STATE_CONNECTING => non block connect ?
+** 2. INET_STATE_CONNECTED => write output
*/
static int tcp_inet_output(tcp_descriptor* desc, HANDLE event)
{
@@ -9268,7 +9381,7 @@ static int tcp_inet_output(tcp_descriptor* desc, HANDLE event)
DEBUGF(("tcp_inet_output(%ld) {s=%d\r\n",
(long)desc->inet.port, desc->inet.s));
- if (desc->inet.state == TCP_STATE_CONNECTING) {
+ if (desc->inet.state == INET_STATE_CONNECTING) {
sock_select(INETP(desc),FD_CONNECT,0);
driver_cancel_timer(ix); /* posssibly cancel a timer */
@@ -9288,7 +9401,7 @@ static int tcp_inet_output(tcp_descriptor* desc, HANDLE event)
(struct sockaddr*) &desc->inet.remote, &sz);
if (IS_SOCKET_ERROR(code)) {
- desc->inet.state = TCP_STATE_BOUND; /* restore state */
+ desc->inet.state = INET_STATE_BOUND; /* restore state */
ret = async_error(INETP(desc), sock_errno());
goto done;
}
@@ -9301,15 +9414,15 @@ static int tcp_inet_output(tcp_descriptor* desc, HANDLE event)
(void *)&error, &sz);
if ((code < 0) || error) {
- desc->inet.state = TCP_STATE_BOUND; /* restore state */
+ desc->inet.state = INET_STATE_BOUND; /* restore state */
ret = async_error(INETP(desc), error);
goto done;
}
}
-#endif /* SOCKOPT_CONNECT_STAT */
+#endif /* SO_ERROR */
#endif /* !__WIN32__ */
- desc->inet.state = TCP_STATE_CONNECTED;
+ desc->inet.state = INET_STATE_CONNECTED;
if (desc->inet.active)
sock_select(INETP(desc),(FD_READ|FD_CLOSE),1);
async_ok(INETP(desc));
@@ -9409,6 +9522,59 @@ static int should_use_so_bsdcompat(void)
#endif /* __linux__ */
#endif /* HAVE_SO_BSDCOMPAT */
+
+
+#ifdef HAVE_SCTP
+/* Copy a descriptor, by creating a new port with same settings
+ * as the descriptor desc.
+ * return NULL on error (ENFILE no ports avail)
+ */
+static udp_descriptor* sctp_inet_copy(udp_descriptor* desc, SOCKET s, int* err)
+{
+ ErlDrvPort port = desc->inet.port;
+ udp_descriptor* copy_desc;
+
+ copy_desc = (udp_descriptor*) sctp_inet_start(port, NULL);
+
+ /* Setup event if needed */
+ if ((copy_desc->inet.s = s) != INVALID_SOCKET) {
+ if ((copy_desc->inet.event = sock_create_event(INETP(copy_desc))) ==
+ INVALID_EVENT) {
+ *err = sock_errno();
+ FREE(copy_desc);
+ return NULL;
+ }
+ }
+
+ /* Some flags must be inherited at this point */
+ copy_desc->inet.mode = desc->inet.mode;
+ copy_desc->inet.exitf = desc->inet.exitf;
+ copy_desc->inet.bit8f = desc->inet.bit8f;
+ copy_desc->inet.deliver = desc->inet.deliver;
+ copy_desc->inet.htype = desc->inet.htype;
+ copy_desc->inet.psize = desc->inet.psize;
+ copy_desc->inet.stype = desc->inet.stype;
+ copy_desc->inet.sfamily = desc->inet.sfamily;
+ copy_desc->inet.hsz = desc->inet.hsz;
+ copy_desc->inet.bufsz = desc->inet.bufsz;
+
+ /* The new port will be linked and connected to the caller */
+ port = driver_create_port(port, desc->inet.caller, "sctp_inet",
+ (ErlDrvData) copy_desc);
+ if ((long)port == -1) {
+ *err = ENFILE;
+ FREE(copy_desc);
+ return NULL;
+ }
+ copy_desc->inet.port = port;
+ copy_desc->inet.dport = driver_mk_port(port);
+ *err = 0;
+ return copy_desc;
+}
+#endif
+
+
+
static int packet_inet_init()
{
return 0;
@@ -9427,6 +9593,9 @@ static ErlDrvData packet_inet_start(ErlDrvPort port, char* args, int protocol)
return ERL_DRV_ERROR_ERRNO;
desc->read_packets = INET_PACKET_POLL;
+ desc->i_bufsz = 0;
+ desc->i_buf = NULL;
+ desc->i_ptr = NULL;
return drvd;
}
@@ -9451,6 +9620,10 @@ static void packet_inet_stop(ErlDrvData e)
*/
udp_descriptor * udesc = (udp_descriptor*) e;
inet_descriptor* descr = INETP(udesc);
+ if (udesc->i_buf != NULL) {
+ release_buffer(udesc->i_buf);
+ udesc->i_buf = NULL;
+ }
ASSERT(NO_SUBSCRIBERS(&(descr->empty_out_q_subs)));
inet_stop(descr);
@@ -9475,21 +9648,31 @@ static int packet_inet_ctl(ErlDrvData e, unsigned int cmd, char* buf, int len,
udp_descriptor * udesc = (udp_descriptor *) e;
inet_descriptor* desc = INETP(udesc);
int type = SOCK_DGRAM;
- int af;
-#ifdef HAVE_SCTP
- if (IS_SCTP(desc)) type = SOCK_SEQPACKET;
-#endif
+ int af = AF_INET;
switch(cmd) {
case INET_REQ_OPEN: /* open socket and return internal index */
DEBUGF(("packet_inet_ctl(%ld): OPEN\r\n", (long)desc->port));
- if (len != 1) {
+ if (len != 2) {
return ctl_error(EINVAL, rbuf, rsize);
}
switch (buf[0]) {
case INET_AF_INET: af = AF_INET; break;
#if defined(HAVE_IN6) && defined(AF_INET6)
- case INET_AF_INET6: af = AF_INET6; break;
+ case INET_AF_INET6: af = AF_INET6; break;
+#else
+ case INET_AF_INET6:
+ return ctl_xerror("eafnosupport", rbuf, rsize);
+ break;
+#endif
+ default:
+ return ctl_error(EINVAL, rbuf, rsize);
+ }
+ switch (buf[1]) {
+ case INET_TYPE_STREAM: type = SOCK_STREAM; break;
+ case INET_TYPE_DGRAM: type = SOCK_DGRAM; break;
+#ifdef HAVE_SCTP
+ case INET_TYPE_SEQPACKET: type = SOCK_SEQPACKET; break;
#endif
default:
return ctl_error(EINVAL, rbuf, rsize);
@@ -9516,18 +9699,35 @@ static int packet_inet_ctl(ErlDrvData e, unsigned int cmd, char* buf, int len,
return replen;
- case INET_REQ_FDOPEN: /* pass in an open (and bound) socket */
+ case INET_REQ_FDOPEN: { /* pass in an open (and bound) socket */
+ SOCKET s;
DEBUGF(("packet inet_ctl(%ld): FDOPEN\r\n", (long)desc->port));
- if ((len == 5) && (buf[0] == INET_AF_INET))
- replen = inet_ctl_fdopen(desc, AF_INET, SOCK_DGRAM,
- (SOCKET)get_int32(buf+1),rbuf,rsize);
+ if (len != 6) {
+ return ctl_error(EINVAL, rbuf, rsize);
+ }
+ switch (buf[0]) {
+ case INET_AF_INET: af = AF_INET; break;
#if defined(HAVE_IN6) && defined(AF_INET6)
- else if ((len == 5) && (buf[0] == INET_AF_INET6))
- replen = inet_ctl_fdopen(desc, AF_INET6, SOCK_DGRAM,
- (SOCKET)get_int32(buf+1),rbuf,rsize);
+ case INET_AF_INET6: af = AF_INET6; break;
+#else
+ case INET_AF_INET6:
+ return ctl_xerror("eafnosupport", rbuf, rsize);
+ break;
#endif
- else
+ default:
return ctl_error(EINVAL, rbuf, rsize);
+ }
+ switch (buf[1]) {
+ case INET_TYPE_STREAM: type = SOCK_STREAM; break;
+ case INET_TYPE_DGRAM: type = SOCK_DGRAM; break;
+#ifdef HAVE_SCTP
+ case INET_TYPE_SEQPACKET: type = SOCK_SEQPACKET; break;
+#endif
+ default:
+ return ctl_error(EINVAL, rbuf, rsize);
+ }
+ s = (SOCKET)get_int32(buf+2);
+ replen = inet_ctl_fdopen(desc, af, type, s, rbuf, rsize);
if ((*rbuf)[0] != INET_REP_ERROR) {
if (desc->active)
@@ -9547,6 +9747,7 @@ static int packet_inet_ctl(ErlDrvData e, unsigned int cmd, char* buf, int len,
#endif
}
return replen;
+ }
case INET_REQ_CLOSE:
@@ -9599,14 +9800,14 @@ static int packet_inet_ctl(ErlDrvData e, unsigned int cmd, char* buf, int len,
if (IS_SOCKET_ERROR(code) && (sock_errno() == EINPROGRESS)) {
/* XXX: Unix only -- WinSock would have a different cond! */
- desc->state = SCTP_STATE_CONNECTING;
+ desc->state = INET_STATE_CONNECTING;
if (timeout != INET_INFINITY)
driver_set_timer(desc->port, timeout);
enq_async(desc, tbuf, INET_REQ_CONNECT);
}
else if (code == 0) { /* OK we are connected */
sock_select(desc, FD_CONNECT, 0);
- desc->state = PACKET_STATE_CONNECTED;
+ desc->state = INET_STATE_CONNECTED;
enq_async(desc, tbuf, INET_REQ_CONNECT);
async_ok(desc);
}
@@ -9652,11 +9853,11 @@ static int packet_inet_ctl(ErlDrvData e, unsigned int cmd, char* buf, int len,
}
#ifdef HAVE_SCTP
- case SCTP_REQ_LISTEN:
+ case INET_REQ_LISTEN:
{ /* LISTEN is only for SCTP sockets, not UDP. This code is borrowed
from the TCP section. Returns: {ok,[]} on success.
*/
- int flag;
+ int backlog;
DEBUGF(("packet_inet_ctl(%ld): LISTEN\r\n", (long)desc->port));
if (!IS_SCTP(desc))
@@ -9666,15 +9867,14 @@ static int packet_inet_ctl(ErlDrvData e, unsigned int cmd, char* buf, int len,
if (!IS_BOUND(desc))
return ctl_xerror(EXBADSEQ, rbuf, rsize);
- /* The arg is a binary value: 1:enable, 0:disable */
- if (len != 1)
+ if (len != 2)
return ctl_error(EINVAL, rbuf, rsize);
- flag = get_int8(buf);
+ backlog = get_int16(buf);
- if (IS_SOCKET_ERROR(sock_listen(desc->s, flag)))
+ if (IS_SOCKET_ERROR(sock_listen(desc->s, backlog)))
return ctl_error(sock_errno(), rbuf, rsize);
- desc->state = SCTP_STATE_LISTEN; /* XXX: not used? */
+ desc->state = INET_STATE_LISTENING; /* XXX: not used? */
return ctl_reply(INET_REP_OK, NULL, 0, rbuf, rsize);
}
@@ -9720,6 +9920,46 @@ static int packet_inet_ctl(ErlDrvData e, unsigned int cmd, char* buf, int len,
return ctl_reply(INET_REP_OK, NULL, 0, rbuf, rsize);
}
+
+ case SCTP_REQ_PEELOFF:
+ {
+ Uint32 assoc_id;
+ udp_descriptor* new_udesc;
+ int err;
+ SOCKET new_socket;
+
+ DEBUGF(("packet_inet_ctl(%ld): PEELOFF\r\n", (long)desc->port));
+ if (!IS_SCTP(desc))
+ return ctl_xerror(EXBADPORT, rbuf, rsize);
+ if (!IS_OPEN(desc))
+ return ctl_xerror(EXBADPORT, rbuf, rsize);
+ if (!IS_BOUND(desc))
+ return ctl_xerror(EXBADSEQ, rbuf, rsize);
+ if (! p_sctp_peeloff)
+ return ctl_error(ENOTSUP, rbuf, rsize);
+
+ if (len != 4)
+ return ctl_error(EINVAL, rbuf, rsize);
+ assoc_id = get_int32(buf);
+
+ new_socket = p_sctp_peeloff(desc->s, assoc_id);
+ if (IS_SOCKET_ERROR(new_socket)) {
+ return ctl_error(sock_errno(), rbuf, rsize);
+ }
+
+ desc->caller = driver_caller(desc->port);
+ if ((new_udesc = sctp_inet_copy(udesc, new_socket, &err)) == NULL) {
+ sock_close(new_socket);
+ desc->caller = 0;
+ return ctl_error(err, rbuf, rsize);
+ }
+ new_udesc->inet.state = INET_STATE_CONNECTED;
+ new_udesc->inet.stype = SOCK_STREAM;
+
+ inet_reply_ok_port(desc, new_udesc->inet.dport);
+ (*rbuf)[0] = INET_REP;
+ return 1;
+ }
#endif /* HAVE_SCTP */
case PACKET_REQ_RECV:
@@ -9739,7 +9979,7 @@ static int packet_inet_ctl(ErlDrvData e, unsigned int cmd, char* buf, int len,
if (desc->active || (len != 8))
return ctl_error(EINVAL, rbuf, rsize);
timeout = get_int32(buf);
- /* The 2nd arg, Length(4), is ignored for both UDP ans SCTP protocols,
+ /* The 2nd arg, Length(4), is ignored for both UDP and SCTP protocols,
since they are msg-oriented. */
if (enq_async(desc, tbuf, PACKET_REQ_RECV) < 0)
@@ -9918,12 +10158,8 @@ static int packet_inet_input(udp_descriptor* udesc, HANDLE event)
{
inet_descriptor* desc = INETP(udesc);
int n;
- unsigned int len;
inet_address other;
char abuf[sizeof(inet_address)]; /* buffer address; enough??? */
- int sz;
- char* ptr;
- ErlDrvBinary* buf; /* binary */
int packet_count = udesc->read_packets;
int count = 0; /* number of packets delivered to owner */
#ifdef HAVE_SCTP
@@ -9934,23 +10170,39 @@ static int packet_inet_input(udp_descriptor* udesc, HANDLE event)
#endif
while(packet_count--) {
- len = sizeof(other);
- sz = desc->bufsz;
- /* Allocate space for message and address. NB: "bufsz" is in "desc",
- but the "buf" itself is allocated separately:
- */
- if ((buf = alloc_buffer(sz+len)) == NULL)
- return packet_error(udesc, ENOMEM);
- ptr = buf->orig_bytes + len; /* pointer to message part */
+ unsigned int len = sizeof(other);
+
+ /* udesc->i_buf is only kept between SCTP fragments */
+ if (udesc->i_buf == NULL) {
+ udesc->i_bufsz = desc->bufsz + len;
+ if ((udesc->i_buf = alloc_buffer(udesc->i_bufsz)) == NULL)
+ return packet_error(udesc, ENOMEM);
+ /* pointer to message start */
+ udesc->i_ptr = udesc->i_buf->orig_bytes + len;
+ } else {
+ ErlDrvBinary* tmp;
+ int bufsz;
+ bufsz = desc->bufsz + (udesc->i_ptr - udesc->i_buf->orig_bytes);
+ if ((tmp = realloc_buffer(udesc->i_buf, bufsz)) == NULL) {
+ release_buffer(udesc->i_buf);
+ udesc->i_buf = NULL;
+ return packet_error(udesc, ENOMEM);
+ } else {
+ udesc->i_ptr =
+ tmp->orig_bytes + (udesc->i_ptr - udesc->i_buf->orig_bytes);
+ udesc->i_buf = tmp;
+ udesc->i_bufsz = bufsz;
+ }
+ }
/* Note: On Windows NT, recvfrom() fails if the socket is connected. */
#ifdef HAVE_SCTP
/* For SCTP we must use recvmsg() */
if (IS_SCTP(desc)) {
- iov->iov_base = ptr; /* Data will come here */
- iov->iov_len = sz; /* Remaining buffer space */
+ iov->iov_base = udesc->i_ptr; /* Data will come here */
+ iov->iov_len = desc->bufsz; /* Remaining buffer space */
- mhdr.msg_name = &other; /* Peer addr comes into "other" */
+ mhdr.msg_name = &other; /* Peer addr comes into "other" */
mhdr.msg_namelen = len;
mhdr.msg_iov = iov;
mhdr.msg_iovlen = 1;
@@ -9960,42 +10212,28 @@ static int packet_inet_input(udp_descriptor* udesc, HANDLE event)
/* Do the actual SCTP receive: */
n = sock_recvmsg(desc->s, &mhdr, 0);
+ len = mhdr.msg_namelen;
goto check_result;
}
#endif
/* Use recv() instead on connected sockets. */
if ((desc->state & INET_F_ACTIVE)) {
- n = sock_recv(desc->s, ptr, sz, 0);
+ n = sock_recv(desc->s, udesc->i_ptr, desc->bufsz, 0);
other = desc->remote;
+ goto check_result;
}
- else
- n = sock_recvfrom(desc->s, ptr, sz, 0, &other.sa, &len);
-
-#ifdef HAVE_SCTP
+ n = sock_recvfrom(desc->s, udesc->i_ptr, desc->bufsz,
+ 0, &other.sa, &len);
check_result:
-#endif
/* Analyse the result: */
- if (IS_SOCKET_ERROR(n)
-#ifdef HAVE_SCTP
- || (short_recv = (IS_SCTP(desc) && !(mhdr.msg_flags & MSG_EOR)))
- /* NB: here we check for EOR not being set -- this is an error as
- well, we don't support partial msgs:
- */
-#endif
- ) {
+ if (IS_SOCKET_ERROR(n)) {
int err = sock_errno();
- release_buffer(buf);
if (err != ERRNO_BLOCK) {
+ /* real error */
+ release_buffer(udesc->i_buf);
+ udesc->i_buf = NULL;
if (!desc->active) {
-#ifdef HAVE_SCTP
- if (short_recv) {
- async_error_am(desc, am_short_recv);
- } else {
- async_error(desc, err);
- }
-#else
async_error(desc, err);
-#endif
driver_cancel_timer(desc->port);
sock_select(desc,FD_READ,0);
}
@@ -10003,46 +10241,69 @@ static int packet_inet_input(udp_descriptor* udesc, HANDLE event)
/* This is for an active desc only: */
packet_error_message(udesc, err);
}
+ return count;
}
- else if (!desc->active)
+ /* would block error - try again */
+ if (!desc->active
+#ifdef HAVE_SCTP
+ || short_recv
+#endif
+ ) {
sock_select(desc,FD_READ,1);
+ }
return count; /* strange, not ready */
}
- else {
- int offs;
- int nsz;
+
+#ifdef HAVE_SCTP
+ if (IS_SCTP(desc) && (short_recv = !(mhdr.msg_flags & MSG_EOR))) {
+ /* SCTP non-final message fragment */
+ inet_input_count(desc, n);
+ udesc->i_ptr += n;
+ continue; /* wait for more fragments */
+ }
+#endif
+
+ {
+ /* message received */
int code;
- unsigned int alen = len;
void * extra = NULL;
+ char * ptr;
inet_input_count(desc, n);
- inet_get_address(desc->sfamily, abuf, &other, &alen);
- /* Copy formatted address to the buffer allocated; "alen" is the
- actual length which must be <= than the original reserved "len".
+ udesc->i_ptr += n;
+ inet_get_address(desc->sfamily, abuf, &other, &len);
+ /* Copy formatted address to the buffer allocated; "len" is the
+ actual length which must be <= than the original reserved.
This means that the addr + data in the buffer are contiguous,
- but they may start not at the "orig_bytes", but with some "offs"
- from them:
+ but they may start not at the "orig_bytes", instead at "ptr":
*/
- ASSERT (alen <= len);
- sys_memcpy(ptr - alen, abuf, alen);
- ptr -= alen;
- nsz = n + alen; /* nsz = data + address */
- offs = ptr - buf->orig_bytes; /* initial pointer offset */
+ ASSERT (len <= sizeof(other));
+ ptr = udesc->i_buf->orig_bytes + sizeof(other) - len;
+ sys_memcpy(ptr, abuf, len);
/* Check if we need to reallocate binary */
if ((desc->mode == INET_MODE_BINARY) &&
- (desc->hsz < n) && (nsz < BIN_REALLOC_LIMIT(sz))) {
+ (desc->hsz < (udesc->i_ptr - ptr)) &&
+ ((udesc->i_ptr - ptr) + BIN_REALLOC_MARGIN(desc->bufsz) >=
+ udesc->i_bufsz)) {
ErlDrvBinary* tmp;
- if ((tmp = realloc_buffer(buf,nsz+offs)) != NULL)
- buf = tmp;
+ int bufsz;
+ bufsz = udesc->i_ptr - udesc->i_buf->orig_bytes;
+ if ((tmp = realloc_buffer(udesc->i_buf, bufsz)) != NULL) {
+ udesc->i_buf = tmp;
+ udesc->i_bufsz = bufsz;
+ }
}
#ifdef HAVE_SCTP
if (IS_SCTP(desc)) extra = &mhdr;
#endif
/* Actual parsing and return of the data received, occur here: */
- code = packet_reply_binary_data(desc, (unsigned int)alen,
- buf, offs, nsz, extra);
- free_buffer(buf);
+ code = packet_reply_binary_data(desc, len, udesc->i_buf,
+ ptr - udesc->i_buf->orig_bytes,
+ udesc->i_ptr - ptr,
+ extra);
+ free_buffer(udesc->i_buf);
+ udesc->i_buf = NULL;
if (code < 0)
return count;
count++;
@@ -10052,7 +10313,17 @@ static int packet_inet_input(udp_descriptor* udesc, HANDLE event)
return count; /* passive mode (read one packet only) */
}
}
+ } /* while(packet_count--) { */
+
+ /* we ran out of tries (packet_count) either on an active socket
+ * that got that many messages or an SCTP socket that got that
+ * many message fragments but still not the final
+ */
+#ifdef HAVE_SCTP
+ if (short_recv) {
+ sock_select(desc, FD_READ, 1);
}
+#endif
return count;
}
@@ -10062,7 +10333,7 @@ static void packet_inet_drv_output(ErlDrvData e, ErlDrvEvent event)
}
/* UDP/SCTP socket ready for output:
-** This is a Back-End for Non-Block SCTP Connect (SCTP_STATE_CONNECTING)
+** This is a Back-End for Non-Block SCTP Connect (INET_STATE_CONNECTING)
*/
static int packet_inet_output(udp_descriptor* udesc, HANDLE event)
{
@@ -10073,7 +10344,7 @@ static int packet_inet_output(udp_descriptor* udesc, HANDLE event)
DEBUGF(("packet_inet_output(%ld) {s=%d\r\n",
(long)desc->port, desc->s));
- if (desc->state == SCTP_STATE_CONNECTING) {
+ if (desc->state == INET_STATE_CONNECTING) {
sock_select(desc, FD_CONNECT, 0);
driver_cancel_timer(ix); /* posssibly cancel a timer */
@@ -10093,7 +10364,7 @@ static int packet_inet_output(udp_descriptor* udesc, HANDLE event)
(struct sockaddr*) &desc->remote, &sz);
if (IS_SOCKET_ERROR(code)) {
- desc->state = PACKET_STATE_BOUND; /* restore state */
+ desc->state = INET_STATE_BOUND; /* restore state */
ret = async_error(desc, sock_errno());
goto done;
}
@@ -10106,15 +10377,15 @@ static int packet_inet_output(udp_descriptor* udesc, HANDLE event)
(void *)&error, &sz);
if ((code < 0) || error) {
- desc->state = PACKET_STATE_BOUND; /* restore state */
+ desc->state = INET_STATE_BOUND; /* restore state */
ret = async_error(desc, error);
goto done;
}
}
-#endif /* SOCKOPT_CONNECT_STAT */
+#endif /* SO_ERROR */
#endif /* !__WIN32__ */
- desc->state = PACKET_STATE_CONNECTED;
+ desc->state = INET_STATE_CONNECTED;
async_ok(desc);
}
else {
diff --git a/erts/emulator/drivers/unix/ttsl_drv.c b/erts/emulator/drivers/unix/ttsl_drv.c
index d782b044a9..45d39a559f 100644
--- a/erts/emulator/drivers/unix/ttsl_drv.c
+++ b/erts/emulator/drivers/unix/ttsl_drv.c
@@ -242,7 +242,7 @@ static ErlDrvData ttysl_start(ErlDrvPort port, char* buf)
#ifndef HAVE_TERMCAP
return ERL_DRV_ERROR_GENERAL;
#else
- char *s, *t, c, *l;
+ char *s, *t, *l;
int canon, echo, sig; /* Terminal characteristics */
int flag;
extern int using_oldshell; /* set this to let the rest of erts know */
@@ -262,7 +262,6 @@ static ErlDrvData ttysl_start(ErlDrvPort port, char* buf)
s++;
/* Find end of this argument (start of next) and insert NUL. */
if ((t = strchr(s, ' '))) {
- c = *t;
*t = '\0';
}
if ((flag = ((*s == '+') ? 1 : ((*s == '-') ? -1 : 0)))) {
diff --git a/erts/emulator/drivers/win32/win_efile.c b/erts/emulator/drivers/win32/win_efile.c
index 3d59564f7b..931bb196f1 100755..100644
--- a/erts/emulator/drivers/win32/win_efile.c
+++ b/erts/emulator/drivers/win32/win_efile.c
@@ -127,6 +127,8 @@ static int errno_map(DWORD last_error) {
return EBUSY;
case ERROR_NO_PROC_SLOTS:
return EAGAIN;
+ case ERROR_CANT_RESOLVE_FILENAME:
+ return EMLINK;
case ERROR_ARENA_TRASHED:
case ERROR_INVALID_BLOCK:
case ERROR_BAD_ENVIRONMENT:
@@ -1405,7 +1407,7 @@ efile_readlink(Efile_error* errInfo, char* name, char* buffer, size_t size)
DWORD fileAttributes = GetFileAttributesW(wname);
if ((fileAttributes & FILE_ATTRIBUTE_REPARSE_POINT)) {
BOOLEAN success = 0;
- HANDLE h = CreateFileW(wname, GENERIC_READ, 0,NULL, OPEN_EXISTING, 0, NULL);
+ HANDLE h = CreateFileW(wname, GENERIC_READ, 0,NULL, OPEN_EXISTING, FILE_FLAG_BACKUP_SEMANTICS, NULL);
int len;
if(h != INVALID_HANDLE_VALUE) {
success = pGetFinalPathNameByHandle(h, wbuffer, size,0);
@@ -1421,7 +1423,7 @@ efile_readlink(Efile_error* errInfo, char* name, char* buffer, size_t size)
if (*wbuffer == L'\\')
*wbuffer = L'/';
CloseHandle(h);
- }
+ }
FreeLibrary(hModule);
if (success) {
return 1;
diff --git a/erts/emulator/hipe/hipe_abi.txt b/erts/emulator/hipe/hipe_abi.txt
index d0ec162342..9d4726de9d 100644
--- a/erts/emulator/hipe/hipe_abi.txt
+++ b/erts/emulator/hipe/hipe_abi.txt
@@ -62,7 +62,7 @@ exceptional condition, it puts an error code in p->freason
and returns THE_NON_VALUE (zero, except in debug mode).
If p->freason == TRAP, then the BIF redirects its call to some
-other function, given by p->def_arg_reg[].
+other function, given by p->i
The BIF and the new callee may have different arities.
The "hipe_${ARCH}_bifs.m4" macro files take care of these issues
diff --git a/erts/emulator/hipe/hipe_amd64_bifs.m4 b/erts/emulator/hipe/hipe_amd64_bifs.m4
index 0ba763cbea..97a8267647 100644
--- a/erts/emulator/hipe/hipe_amd64_bifs.m4
+++ b/erts/emulator/hipe/hipe_amd64_bifs.m4
@@ -20,24 +20,37 @@ changecom(`/*', `*/')dnl
include(`hipe/hipe_amd64_asm.m4')
+#`include' "config.h"
#`include' "hipe_literals.h"
+
`#if THE_NON_VALUE == 0
#define TEST_GOT_EXN testq %rax, %rax
#else
#define TEST_GOT_EXN cmpq $THE_NON_VALUE, %rax
#endif'
-`#define TEST_GOT_MBUF movq P_MBUF(P), %rdx; testq %rdx, %rdx; jnz 3f; 2:
-#define JOIN3(A,B,C) A##B##C
-#define HANDLE_GOT_MBUF(ARITY) 3: call JOIN3(nbif_,ARITY,_gc_after_bif); jmp 2b'
+define(TEST_GOT_MBUF,`movq P_MBUF(P), %rdx # `TEST_GOT_MBUF'
+ testq %rdx, %rdx
+ jnz 3f
+2:')
+define(HANDLE_GOT_MBUF,`
+3: call nbif_$1_gc_after_bif # `HANDLE_GOT_MBUF'
+ jmp 2b')
+
+`#if defined(ERTS_ENABLE_LOCK_CHECK) && defined(ERTS_SMP)
+# define CALL_BIF(F) movq $CSYM(F), P_BIF_CALLEE(P); call CSYM(hipe_debug_bif_wrapper)
+#else
+# define CALL_BIF(F) call CSYM(F)
+#endif'
/*
* standard_bif_interface_1(nbif_name, cbif_name)
* standard_bif_interface_2(nbif_name, cbif_name)
* standard_bif_interface_3(nbif_name, cbif_name)
+ * standard_bif_interface_0(nbif_name, cbif_name)
*
- * Generate native interface for a BIF with 1-3 parameters and
+ * Generate native interface for a BIF with 0-3 parameters and
* standard failure mode.
*/
define(standard_bif_interface_1,
@@ -54,7 +67,11 @@ ASYM($1):
/* make the call on the C stack */
SWITCH_ERLANG_TO_C
- call CSYM($2)
+ pushq %rsi
+ movq %rsp, %rsi /* Eterm* BIF__ARGS */
+ sub $(8), %rsp /* stack frame 16-byte alignment */
+ CALL_BIF($2)
+ add $(1*8 + 8), %rsp
TEST_GOT_MBUF
SWITCH_C_TO_ERLANG
@@ -82,7 +99,11 @@ ASYM($1):
/* make the call on the C stack */
SWITCH_ERLANG_TO_C
- call CSYM($2)
+ pushq %rdx
+ pushq %rsi
+ movq %rsp, %rsi /* Eterm* BIF__ARGS */
+ CALL_BIF($2)
+ add $(2*8), %rsp
TEST_GOT_MBUF
SWITCH_C_TO_ERLANG
@@ -111,7 +132,13 @@ ASYM($1):
/* make the call on the C stack */
SWITCH_ERLANG_TO_C
- call CSYM($2)
+ pushq %rcx
+ pushq %rdx
+ pushq %rsi
+ movq %rsp, %rsi /* Eterm* BIF__ARGS */
+ sub $(8), %rsp /* stack frame 16-byte alignment */
+ CALL_BIF($2)
+ add $(3*8 + 8), %rsp
TEST_GOT_MBUF
SWITCH_C_TO_ERLANG
@@ -124,13 +151,7 @@ ASYM($1):
TYPE_FUNCTION(ASYM($1))
#endif')
-/*
- * fail_bif_interface_0(nbif_name, cbif_name)
- *
- * Generate native interface for a BIF with 0 parameters and
- * standard failure mode.
- */
-define(fail_bif_interface_0,
+define(standard_bif_interface_0,
`
#ifndef HAVE_$1
#`define' HAVE_$1
@@ -143,7 +164,7 @@ ASYM($1):
/* make the call on the C stack */
SWITCH_ERLANG_TO_C
- call CSYM($2)
+ CALL_BIF($2)
TEST_GOT_MBUF
SWITCH_C_TO_ERLANG
diff --git a/erts/emulator/hipe/hipe_arm_bifs.m4 b/erts/emulator/hipe/hipe_arm_bifs.m4
index 3664fb6502..e0c6f09796 100644
--- a/erts/emulator/hipe/hipe_arm_bifs.m4
+++ b/erts/emulator/hipe/hipe_arm_bifs.m4
@@ -20,18 +20,27 @@ changecom(`/*', `*/')dnl
include(`hipe/hipe_arm_asm.m4')
+#`include' "config.h"
#`include' "hipe_literals.h"
.text
.p2align 2
-`#define JOIN3(A,B,C) A##B##C
-#define TEST_GOT_MBUF(ARITY) ldr r1, [P, #P_MBUF]; cmp r1, #0; blne JOIN3(nbif_,ARITY,_gc_after_bif)'
+`#if defined(ERTS_ENABLE_LOCK_CHECK) && defined(ERTS_SMP)
+# define CALL_BIF(F) mov r14, #F; str r14, [r0, #P_BIF_CALLEE]; bl hipe_debug_bif_wrapper
+#else
+# define CALL_BIF(F) bl F
+#endif'
+
+define(TEST_GOT_MBUF,`ldr r1, [P, #P_MBUF] /* `TEST_GOT_MBUF' */
+ cmp r1, #0
+ blne nbif_$1_gc_after_bif')
/*
* standard_bif_interface_1(nbif_name, cbif_name)
* standard_bif_interface_2(nbif_name, cbif_name)
* standard_bif_interface_3(nbif_name, cbif_name)
+ * standard_bif_interface_0(nbif_name, cbif_name)
*
* Generate native interface for a BIF with 1-3 parameters and
* standard failure mode.
@@ -48,7 +57,9 @@ $1:
/* Save caller-save registers and call the C function. */
SAVE_CONTEXT_BIF
- bl $2
+ str r1, [r0, #P_ARG0] /* Store BIF__ARGS in def_arg_reg[] */
+ add r1, r0, #P_ARG0
+ CALL_BIF($2)
TEST_GOT_MBUF(1)
/* Restore registers. Check for exception. */
@@ -73,7 +84,10 @@ $1:
/* Save caller-save registers and call the C function. */
SAVE_CONTEXT_BIF
- bl $2
+ str r1, [r0, #P_ARG0] /* Store BIF__ARGS in def_arg_reg[] */
+ str r2, [r0, #P_ARG1]
+ add r1, r0, #P_ARG0
+ CALL_BIF($2)
TEST_GOT_MBUF(2)
/* Restore registers. Check for exception. */
@@ -99,7 +113,11 @@ $1:
/* Save caller-save registers and call the C function. */
SAVE_CONTEXT_BIF
- bl $2
+ str r1, [r0, #P_ARG0] /* Store BIF__ARGS in def_arg_reg[] */
+ str r2, [r0, #P_ARG1]
+ str r3, [r0, #P_ARG2]
+ add r1, r0, #P_ARG0
+ CALL_BIF($2)
TEST_GOT_MBUF(3)
/* Restore registers. Check for exception. */
@@ -111,13 +129,7 @@ $1:
.type $1, %function
#endif')
-/*
- * fail_bif_interface_0(nbif_name, cbif_name)
- *
- * Generate native interface for a BIF with 0 parameters and
- * standard failure mode.
- */
-define(fail_bif_interface_0,
+define(standard_bif_interface_0,
`
#ifndef HAVE_$1
#`define' HAVE_$1
@@ -128,7 +140,8 @@ $1:
/* Save caller-save registers and call the C function. */
SAVE_CONTEXT_BIF
- bl $2
+ /* ignore empty BIF__ARGS */
+ CALL_BIF($2)
TEST_GOT_MBUF(0)
/* Restore registers. Check for exception. */
diff --git a/erts/emulator/hipe/hipe_bif0.h b/erts/emulator/hipe/hipe_bif0.h
index f02e8862dc..c512d66f9d 100644
--- a/erts/emulator/hipe/hipe_bif0.h
+++ b/erts/emulator/hipe/hipe_bif0.h
@@ -29,7 +29,7 @@ extern Uint *hipe_bifs_find_pc_from_mfa(Eterm mfa);
extern void hipe_mfa_info_table_init(void);
extern void *hipe_get_remote_na(Eterm m, Eterm f, unsigned int a);
-extern Eterm hipe_find_na_or_make_stub(Process*, Eterm, Eterm, Eterm);
+extern BIF_RETTYPE hipe_find_na_or_make_stub(BIF_ALIST_3);
extern int hipe_find_mfa_from_ra(const void *ra, Eterm *m, Eterm *f, unsigned int *a);
#if defined(__powerpc__) || defined(__ppc__) || defined(__powerpc64__) || defined(__arm__)
extern void *hipe_mfa_get_trampoline(Eterm m, Eterm f, unsigned int a);
diff --git a/erts/emulator/hipe/hipe_bif2.c b/erts/emulator/hipe/hipe_bif2.c
index 2660f74a82..ee97541e15 100644
--- a/erts/emulator/hipe/hipe_bif2.c
+++ b/erts/emulator/hipe/hipe_bif2.c
@@ -166,3 +166,26 @@ BIF_RETTYPE hipe_bifs_show_message_area_0(BIF_ALIST_0)
BIF_RET(am_false);
#endif
}
+
+#if defined(ERTS_ENABLE_LOCK_CHECK) && defined(ERTS_SMP)
+
+BIF_RETTYPE hipe_debug_bif_wrapper(BIF_ALIST_1);
+
+# define ERTS_SMP_REQ_PROC_MAIN_LOCK(P) \
+ if ((P)) erts_proc_lc_require_lock((P), ERTS_PROC_LOCK_MAIN)
+# define ERTS_SMP_UNREQ_PROC_MAIN_LOCK(P) \
+ if ((P)) erts_proc_lc_unrequire_lock((P), ERTS_PROC_LOCK_MAIN)
+
+BIF_RETTYPE hipe_debug_bif_wrapper(BIF_ALIST_1)
+{
+ typedef BIF_RETTYPE Bif(BIF_ALIST_1);
+ Bif* fp = (Bif*) (BIF_P->hipe.bif_callee);
+ BIF_RETTYPE res;
+ ERTS_SMP_UNREQ_PROC_MAIN_LOCK(BIF_P);
+ res = (*fp)(BIF_P, BIF__ARGS);
+ ERTS_SMP_REQ_PROC_MAIN_LOCK(BIF_P);
+ return res;
+}
+
+#endif /* ERTS_ENABLE_LOCK_CHECK && ERTS_SMP */
+
diff --git a/erts/emulator/hipe/hipe_bif_list.m4 b/erts/emulator/hipe/hipe_bif_list.m4
index 083788997b..48c7c1bc9b 100644
--- a/erts/emulator/hipe/hipe_bif_list.m4
+++ b/erts/emulator/hipe/hipe_bif_list.m4
@@ -70,24 +70,18 @@
****************************************************************/
/*
+ * standard_bif_interface_0(nbif_name, cbif_name)
* standard_bif_interface_1(nbif_name, cbif_name)
* standard_bif_interface_2(nbif_name, cbif_name)
* standard_bif_interface_3(nbif_name, cbif_name)
*
- * A BIF with implicit P parameter, 1-3 ordinary parameters,
+ * A BIF with implicit P parameter, 0-3 ordinary parameters,
* which may fail.
* HP and FCALLS may be read and updated.
* HP_LIMIT, NSP, NSP_LIMIT, and NRA may not be accessed.
*/
/*
- * fail_bif_interface_0(nbif_name, cbif_name)
- *
- * A zero-arity BIF which may fail, otherwise
- * identical to standard_bif_interface_N.
- */
-
-/*
* nofail_primop_interface_0(nbif_name, cbif_name)
* nofail_primop_interface_1(nbif_name, cbif_name)
* nofail_primop_interface_2(nbif_name, cbif_name)
@@ -150,8 +144,7 @@
/*
* Zero-arity BIFs that can fail.
*/
-fail_bif_interface_0(nbif_memory_0, memory_0)
-fail_bif_interface_0(nbif_processes_0, processes_0)
+standard_bif_interface_0(nbif_processes_0, processes_0)
/*
* BIFs and primops that may do a GC (change heap limit and walk the native stack).
@@ -176,10 +169,10 @@ gc_bif_interface_0(nbif_hipe_bifs_nstack_used_size_0, hipe_bifs_nstack_used_size
/*
* Arithmetic operators called indirectly by the HiPE compiler.
*/
-standard_bif_interface_2(nbif_add_2, erts_mixed_plus)
-standard_bif_interface_2(nbif_sub_2, erts_mixed_minus)
-standard_bif_interface_2(nbif_mul_2, erts_mixed_times)
-standard_bif_interface_2(nbif_div_2, erts_mixed_div)
+standard_bif_interface_2(nbif_add_2, splus_2)
+standard_bif_interface_2(nbif_sub_2, sminus_2)
+standard_bif_interface_2(nbif_mul_2, stimes_2)
+standard_bif_interface_2(nbif_div_2, div_2)
standard_bif_interface_2(nbif_intdiv_2, intdiv_2)
standard_bif_interface_2(nbif_rem_2, rem_2)
standard_bif_interface_2(nbif_bsl_2, bsl_2)
@@ -261,11 +254,6 @@ noproc_primop_interface_1(nbif_atomic_inc, hipe_atomic_inc)
',)dnl
/*
- * Implement standard_bif_interface_0 as nofail_primop_interface_0.
- */
-define(standard_bif_interface_0,`nofail_primop_interface_0($1, $2)')
-
-/*
* Standard BIFs.
* BIF_LIST(ModuleAtom,FunctionAtom,Arity,CFun,Index)
*/
diff --git a/erts/emulator/hipe/hipe_mkliterals.c b/erts/emulator/hipe/hipe_mkliterals.c
index bced90785d..61e15f1d58 100644
--- a/erts/emulator/hipe/hipe_mkliterals.c
+++ b/erts/emulator/hipe/hipe_mkliterals.c
@@ -1,9 +1,8 @@
/*
* %CopyrightBegin%
-
- *
+ *
* Copyright Ericsson AB 2001-2011. All Rights Reserved.
- *
+ *
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
* compliance with the License. You should have received a copy of the
@@ -212,6 +211,11 @@ static const unsigned int CRCTABLE[256] = {
0xB40BBE37, 0xC30C8EA1, 0x5A05DF1B, 0x2D02EF8D,
};
+/* For hipe cross compiler. Hard code all values.
+ No calls by hipe compiler to query the running emulator.
+*/
+static int is_xcomp = 0;
+
/*
* The algorithm for calculating the 32 bit CRC checksum is based upon
* documentation and algorithms provided by Dr. Ross N. Williams in the
@@ -243,7 +247,7 @@ crc_update_buf(unsigned int crc_value,
}
static unsigned int
-crc_update_int(unsigned int crc_value, const unsigned int *p)
+crc_update_int(unsigned int crc_value, const int *p)
{
return crc_update_buf(crc_value, p, sizeof *p);
}
@@ -256,7 +260,7 @@ crc_update_int(unsigned int crc_value, const unsigned int *p)
*/
static const struct literal {
const char *name;
- unsigned int value;
+ int value;
} literals[] = {
/* Field offsets in a process struct */
{ "P_HP", offsetof(struct process, htop) },
@@ -289,6 +293,9 @@ static const struct literal {
{ "P_NRA", offsetof(struct process, hipe.nra) },
#endif
{ "P_NARITY", offsetof(struct process, hipe.narity) },
+# if defined(ERTS_ENABLE_LOCK_CHECK) && defined(ERTS_SMP)
+ { "P_BIF_CALLEE", offsetof(struct process, hipe.bif_callee) },
+# endif
#endif /* HIPE */
/* process flags bits */
@@ -298,7 +305,7 @@ static const struct literal {
{ "FREASON_TRAP", TRAP },
/* special Erlang constants */
- { "THE_NON_VALUE", THE_NON_VALUE },
+ { "THE_NON_VALUE", (int)THE_NON_VALUE },
/* funs */
#ifdef HIPE
@@ -452,7 +459,7 @@ static const struct rts_param {
unsigned int nr;
const char *name;
unsigned int is_defined;
- unsigned int value;
+ int value;
} rts_params[] = {
{ 1, "P_OFF_HEAP_FUNS",
#if !defined(HYBRID)
@@ -528,12 +535,12 @@ static void compute_crc(void)
static void c_define_literal(FILE *fp, const struct literal *literal)
{
- fprintf(fp, "#define %s %u\n", literal->name, literal->value);
+ fprintf(fp, "#define %s %d\n", literal->name, literal->value);
}
static void e_define_literal(FILE *fp, const struct literal *literal)
{
- fprintf(fp, "-define(%s, %u).\n", literal->name, literal->value);
+ fprintf(fp, "-define(%s, %d).\n", literal->name, literal->value);
}
static void print_literals(FILE *fp, void (*print_literal)(FILE*, const struct literal*))
@@ -560,7 +567,7 @@ static void print_atom_literals(FILE *fp, void (*print_atom_literal)(FILE*, cons
static void c_define_param(FILE *fp, const struct rts_param *param)
{
if (param->is_defined)
- fprintf(fp, "#define %s %u\n", param->name, param->value);
+ fprintf(fp, "#define %s %d\n", param->name, param->value);
}
static void c_case_param(FILE *fp, const struct rts_param *param)
@@ -568,7 +575,7 @@ static void c_case_param(FILE *fp, const struct rts_param *param)
fprintf(fp, " \\\n");
fprintf(fp, "\tcase %u: ", param->nr);
if (param->is_defined)
- fprintf(fp, "value = %u", param->value);
+ fprintf(fp, "value = %d", param->value);
else
fprintf(fp, "is_defined = 0");
fprintf(fp, "; break;");
@@ -576,7 +583,15 @@ static void c_case_param(FILE *fp, const struct rts_param *param)
static void e_define_param(FILE *fp, const struct rts_param *param)
{
- fprintf(fp, "-define(%s, hipe_bifs:get_rts_param(%u)).\n", param->name, param->nr);
+ if (is_xcomp) {
+ if (param->is_defined)
+ fprintf(fp, "-define(%s, %d).\n", param->name, param->value);
+ else
+ fprintf(fp, "-define(%s, []).\n", param->name);
+ }
+ else {
+ fprintf(fp, "-define(%s, hipe_bifs:get_rts_param(%u)).\n", param->name, param->nr);
+ }
}
static void print_params(FILE *fp, void (*print_param)(FILE*,const struct rts_param*))
@@ -613,19 +628,40 @@ static int do_e(FILE *fp, const char* this_exe)
fprintf(fp, "\n");
print_params(fp, e_define_param);
fprintf(fp, "\n");
- fprintf(fp, "-define(HIPE_SYSTEM_CRC, hipe_bifs:system_crc(%u)).\n", literals_crc);
+ if (is_xcomp) {
+ fprintf(fp, "-define(HIPE_SYSTEM_CRC, %u).\n", system_crc);
+ }
+ else {
+ fprintf(fp, "-define(HIPE_SYSTEM_CRC, hipe_bifs:system_crc(%u)).\n",
+ literals_crc);
+ }
return 0;
}
int main(int argc, const char **argv)
{
+ int i;
+ int (*do_func_ptr)(FILE *, const char*) = NULL;
+
compute_crc();
- if (argc == 2) {
- if (strcmp(argv[1], "-c") == 0)
- return do_c(stdout, argv[0]);
- if (strcmp(argv[1], "-e") == 0)
- return do_e(stdout, argv[0]);
+ for (i = 1; i < argc; i++) {
+ if (strcmp(argv[i], "-c") == 0)
+ do_func_ptr = &do_c;
+ else if (strcmp(argv[i], "-e") == 0)
+ do_func_ptr = &do_e;
+ else if (strcmp(argv[i], "-x") == 0)
+ is_xcomp = 1;
+ else
+ goto error;
+ }
+ if (do_func_ptr) {
+ return do_func_ptr(stdout, argv[0]);
}
- fprintf(stderr, "usage: %s [-c | -e] > output-file\n", argv[0]);
+error:
+ fprintf(stderr, "usage: %s [-x] [-c | -e] > output-file\n"
+ "\t-c\tC header file\n"
+ "\t-e\tErlang header file\n"
+ "\t-x\tCross compile. No dependencies to compiling emulator\n",
+ argv[0]);
return 1;
}
diff --git a/erts/emulator/hipe/hipe_mode_switch.c b/erts/emulator/hipe/hipe_mode_switch.c
index 16f8fb1347..4d75883fc5 100644
--- a/erts/emulator/hipe/hipe_mode_switch.c
+++ b/erts/emulator/hipe/hipe_mode_switch.c
@@ -35,6 +35,17 @@
#include "hipe_stack.h"
#include "hipe_bif0.h" /* hipe_mfa_info_table_init() */
+#if defined(ERTS_ENABLE_LOCK_CHECK) && defined(ERTS_SMP)
+# define ERTS_SMP_REQ_PROC_MAIN_LOCK(P) \
+ if ((P)) erts_proc_lc_require_lock((P), ERTS_PROC_LOCK_MAIN)
+# define ERTS_SMP_UNREQ_PROC_MAIN_LOCK(P) \
+ if ((P)) erts_proc_lc_unrequire_lock((P), ERTS_PROC_LOCK_MAIN)
+#else
+# define ERTS_SMP_REQ_PROC_MAIN_LOCK(P)
+# define ERTS_SMP_UNREQ_PROC_MAIN_LOCK(P)
+#endif
+
+
/*
* Internal debug support.
* #define HIPE_DEBUG to the desired debug level:
@@ -318,8 +329,8 @@ Process *hipe_mode_switch(Process *p, unsigned cmd, Eterm reg[])
* Native code called a BIF, which "failed" with a TRAP to BEAM.
* Prior to returning, the BIF stored (see BIF_TRAP<N>):
- * the callee's address in p->def_arg_reg[3]
- * the callee's parameters in p->def_arg_reg[0..2]
+ * the callee's address in p->i
+ * the callee's parameters in reg[0..2]
* the callee's arity in p->arity (for BEAM gc purposes)
*
* We need to remove the BIF's parameters from the native
@@ -331,32 +342,25 @@ Process *hipe_mode_switch(Process *p, unsigned cmd, Eterm reg[])
*/
unsigned int i, is_recursive = 0;
- /* Save p->arity, then update it with the original BIF's arity.
- Get rid of any stacked parameters in that call. */
- /* XXX: hipe_call_from_native_is_recursive() copies data to
- reg[], which is useless in the TRAP case. Maybe write a
- specialised hipe_trap_from_native_is_recursive() later. */
if (p->hipe.nsp != NULL) {
- unsigned int callee_arity;
- callee_arity = p->arity;
- p->arity = p->hipe.narity; /* caller's arity */
- is_recursive = hipe_call_from_native_is_recursive(p, reg);
-
- p->i = (Eterm *)(p->def_arg_reg[3]);
- p->arity = callee_arity;
+ is_recursive = hipe_trap_from_native_is_recursive(p);
}
- /* If process is in P_WAITING state, we schedule the next process */
+ /* Schedule next process if current process was hibernated or is waiting
+ for messages */
+ if (p->flags & F_HIBERNATE_SCHED) {
+ p->flags &= ~F_HIBERNATE_SCHED;
+ goto do_schedule;
+ }
if (p->status == P_WAITING) {
+ for (i = 0; i < p->arity; ++i)
+ p->arg_reg[i] = reg[i];
goto do_schedule;
}
- for (i = 0; i < p->arity; ++i)
- reg[i] = p->def_arg_reg[i];
-
if (is_recursive)
hipe_push_beam_trap_frame(p, reg, p->arity);
-
+
result = HIPE_MODE_SWITCH_RES_CALL;
break;
}
@@ -465,10 +469,12 @@ Process *hipe_mode_switch(Process *p, unsigned cmd, Eterm reg[])
#if !(NR_ARG_REGS > 5)
int reds_in = p->def_arg_reg[5];
#endif
+ ERTS_SMP_UNREQ_PROC_MAIN_LOCK(p);
p = schedule(p, reds_in - p->fcalls);
+ ERTS_SMP_REQ_PROC_MAIN_LOCK(p);
#ifdef ERTS_SMP
p->hipe_smp.have_receive_locks = 0;
- reg = p->scheduler_data->save_reg;
+ reg = p->scheduler_data->x_reg_array;
#endif
}
{
@@ -643,7 +649,7 @@ Eterm hipe_build_stacktrace(Process *p, struct StackTrace *s)
if (depth < 1)
return NIL;
- heap_size = 6 * depth; /* each [{M,F,A}|_] is 2+4 == 6 words */
+ heap_size = 7 * depth; /* each [{M,F,A,[]}|_] is 2+5 == 7 words */
hp = HAlloc(p, heap_size);
hp_end = hp + heap_size;
@@ -654,8 +660,8 @@ Eterm hipe_build_stacktrace(Process *p, struct StackTrace *s)
ra = (const void*)s->trace[i];
if (!hipe_find_mfa_from_ra(ra, &m, &f, &a))
continue;
- mfa = TUPLE3(hp, m, f, make_small(a));
- hp += 4;
+ mfa = TUPLE4(hp, m, f, make_small(a), NIL);
+ hp += 5;
next = CONS(hp, mfa, NIL);
*next_p = next;
next_p = &CDR(list_val(next));
diff --git a/erts/emulator/hipe/hipe_native_bif.c b/erts/emulator/hipe/hipe_native_bif.c
index dfb4ca794a..77dee6f9e9 100644
--- a/erts/emulator/hipe/hipe_native_bif.c
+++ b/erts/emulator/hipe/hipe_native_bif.c
@@ -41,9 +41,9 @@
*/
/* for -Wmissing-prototypes :-( */
-extern Eterm hipe_check_process_code_2(Process*, Eterm, Eterm);
-extern Eterm hipe_garbage_collect_1(Process*, Eterm);
-extern Eterm hipe_show_nstack_1(Process*, Eterm);
+extern Eterm hipe_check_process_code_2(BIF_ALIST_2);
+extern Eterm hipe_garbage_collect_1(BIF_ALIST_1);
+extern Eterm hipe_show_nstack_1(BIF_ALIST_1);
/* Used when a BIF can trigger a stack walk. */
static __inline__ void hipe_set_narity(Process *p, unsigned int arity)
@@ -56,7 +56,7 @@ Eterm hipe_check_process_code_2(BIF_ALIST_2)
Eterm ret;
hipe_set_narity(BIF_P, 2);
- ret = check_process_code_2(BIF_P, BIF_ARG_1, BIF_ARG_2);
+ ret = check_process_code_2(BIF_P, BIF__ARGS);
hipe_set_narity(BIF_P, 0);
return ret;
}
@@ -66,7 +66,7 @@ Eterm hipe_garbage_collect_1(BIF_ALIST_1)
Eterm ret;
hipe_set_narity(BIF_P, 1);
- ret = garbage_collect_1(BIF_P, BIF_ARG_1);
+ ret = garbage_collect_1(BIF_P, BIF__ARGS);
hipe_set_narity(BIF_P, 0);
return ret;
}
@@ -76,7 +76,7 @@ Eterm hipe_show_nstack_1(BIF_ALIST_1)
Eterm ret;
hipe_set_narity(BIF_P, 1);
- ret = hipe_bifs_show_nstack_1(BIF_P, BIF_ARG_1);
+ ret = hipe_bifs_show_nstack_1(BIF_P, BIF__ARGS);
hipe_set_narity(BIF_P, 0);
return ret;
}
@@ -99,8 +99,10 @@ void hipe_gc(Process *p, Eterm need)
* has begun.
* XXX: BUG: native code should check return status
*/
-Eterm hipe_set_timeout(Process *p, Eterm timeout_value)
+BIF_RETTYPE hipe_set_timeout(BIF_ALIST_1)
{
+ Process* p = BIF_P;
+ Eterm timeout_value = BIF_ARG_1;
#if !defined(ARCH_64)
Uint time_val;
#endif
@@ -286,8 +288,13 @@ static struct StackTrace *get_trace_from_exc(Eterm exc)
* This does what the (misnamed) Beam instruction 'raise_ss' does,
* namely, a proper re-throw of an exception that was caught by 'try'.
*/
-Eterm hipe_rethrow(Process *c_p, Eterm exc, Eterm value)
+
+BIF_RETTYPE hipe_rethrow(BIF_ALIST_2)
{
+ Process* c_p = BIF_P;
+ Eterm exc = BIF_ARG_1;
+ Eterm value = BIF_ARG_2;
+
c_p->fvalue = value;
if (c_p->freason == EXC_NULL) {
/* a safety check for the R10-0 case; should not happen */
@@ -411,8 +418,12 @@ Eterm hipe_bs_utf8_size(Eterm arg)
return make_small(4);
}
-Eterm hipe_bs_put_utf8(Process *p, Eterm arg, byte *base, unsigned int offset)
+BIF_RETTYPE hipe_bs_put_utf8(BIF_ALIST_3)
{
+ Process* p = BIF_P;
+ Eterm arg = BIF_ARG_1;
+ byte* base = (byte*) BIF_ARG_2;
+ Uint offset = (Uint) BIF_ARG_3;
byte *save_bin_buf;
Uint save_bin_offset;
int res;
@@ -468,13 +479,21 @@ Eterm hipe_bs_put_utf16(Process *p, Eterm arg, byte *base, unsigned int offset,
return new_offset;
}
-Eterm hipe_bs_put_utf16be(Process *p, Eterm arg, byte *base, unsigned int offset)
+BIF_RETTYPE hipe_bs_put_utf16be(BIF_ALIST_3)
{
+ Process *p = BIF_P;
+ Eterm arg = BIF_ARG_1;
+ byte *base = (byte*) BIF_ARG_2;
+ Uint offset = (Uint) BIF_ARG_3;
return hipe_bs_put_utf16(p, arg, base, offset, 0);
}
-Eterm hipe_bs_put_utf16le(Process *p, Eterm arg, byte *base, unsigned int offset)
+BIF_RETTYPE hipe_bs_put_utf16le(BIF_ALIST_3)
{
+ Process *p = BIF_P;
+ Eterm arg = BIF_ARG_1;
+ byte *base = (byte*) BIF_ARG_2;
+ Uint offset = (Uint) BIF_ARG_3;
return hipe_bs_put_utf16(p, arg, base, offset, BSF_LITTLE);
}
@@ -489,8 +508,10 @@ static int validate_unicode(Eterm arg)
return 1;
}
-Eterm hipe_bs_validate_unicode(Process *p, Eterm arg)
+BIF_RETTYPE hipe_bs_validate_unicode(BIF_ALIST_1)
{
+ Process *p = BIF_P;
+ Eterm arg = BIF_ARG_1;
if (!validate_unicode(arg))
BIF_ERROR(p, BADARG);
return NIL;
diff --git a/erts/emulator/hipe/hipe_native_bif.h b/erts/emulator/hipe/hipe_native_bif.h
index 13a02b84a2..8c9dec180e 100644
--- a/erts/emulator/hipe/hipe_native_bif.h
+++ b/erts/emulator/hipe/hipe_native_bif.h
@@ -23,6 +23,7 @@
#ifndef HIPE_NATIVE_BIF_H
#define HIPE_NATIVE_BIF_H
+#include "bif.h"
#include "hipe_arch.h"
/*
@@ -71,24 +72,24 @@ AEXTERN(void,nbif_select_msg,(Process*));
AEXTERN(Eterm,nbif_cmp_2,(void));
AEXTERN(Eterm,nbif_eq_2,(void));
-Eterm hipe_nonclosure_address(Process*, Eterm, Uint);
-Eterm hipe_conv_big_to_float(Process*, Eterm);
+BIF_RETTYPE hipe_nonclosure_address(BIF_ALIST_2);
+BIF_RETTYPE hipe_conv_big_to_float(BIF_ALIST_1);
void hipe_fclearerror_error(Process*);
void hipe_select_msg(Process*);
void hipe_gc(Process*, Eterm);
-Eterm hipe_set_timeout(Process*, Eterm);
+BIF_RETTYPE hipe_set_timeout(BIF_ALIST_1);
void hipe_handle_exception(Process*);
-Eterm hipe_rethrow(Process *c_p, Eterm exc, Eterm value);
+BIF_RETTYPE hipe_rethrow(BIF_ALIST_2);
char *hipe_bs_allocate(int);
Binary *hipe_bs_reallocate(Binary*, int);
int hipe_bs_put_small_float(Process*, Eterm, Uint, byte*, unsigned, unsigned);
void hipe_bs_put_bits(Eterm, Uint, byte*, unsigned, unsigned);
Eterm hipe_bs_utf8_size(Eterm);
-Eterm hipe_bs_put_utf8(Process*, Eterm, byte*, unsigned int);
+BIF_RETTYPE hipe_bs_put_utf8(BIF_ALIST_3);
Eterm hipe_bs_utf16_size(Eterm);
-Eterm hipe_bs_put_utf16be(Process*, Eterm, byte*, unsigned int);
-Eterm hipe_bs_put_utf16le(Process*, Eterm, byte*, unsigned int);
-Eterm hipe_bs_validate_unicode(Process*, Eterm);
+BIF_RETTYPE hipe_bs_put_utf16be(BIF_ALIST_3);
+BIF_RETTYPE hipe_bs_put_utf16le(BIF_ALIST_3);
+BIF_RETTYPE hipe_bs_validate_unicode(BIF_ALIST_1);
struct erl_bin_match_buffer;
int hipe_bs_validate_unicode_retract(struct erl_bin_match_buffer*, Eterm);
diff --git a/erts/emulator/hipe/hipe_ppc_asm.m4 b/erts/emulator/hipe/hipe_ppc_asm.m4
index 0eb5c441e6..343402f9f0 100644
--- a/erts/emulator/hipe/hipe_ppc_asm.m4
+++ b/erts/emulator/hipe/hipe_ppc_asm.m4
@@ -31,12 +31,23 @@ define(LOAD,ld)dnl
define(STORE,std)dnl
define(CMPI,cmpdi)dnl
define(WSIZE,8)dnl
+`#define STORE_IA(ADDR, DST, TMP) \
+ addis TMP, 0, ADDR@highest SEMI\
+ ori TMP, TMP, ADDR@higher SEMI\
+ rldicr TMP, TMP, 32, 31 SEMI\
+ oris TMP, TMP, ADDR@h SEMI\
+ ori TMP, TMP, ADDR@l SEMI\
+ std TMP, DST'
',`
/* 32-bit PowerPC */
define(LOAD,lwz)dnl
define(STORE,stw)dnl
define(CMPI,cmpwi)dnl
define(WSIZE,4)dnl
+`#define STORE_IA(ADDR, DST, TMP) \
+ lis TMP, ADDR@ha SEMI\
+ addi TMP, TMP, ADDR@l SEMI\
+ stw TMP, DST'
')dnl
`#define LOAD 'LOAD
`#define STORE 'STORE
diff --git a/erts/emulator/hipe/hipe_ppc_bifs.m4 b/erts/emulator/hipe/hipe_ppc_bifs.m4
index 203fefe1a1..d09551d10d 100644
--- a/erts/emulator/hipe/hipe_ppc_bifs.m4
+++ b/erts/emulator/hipe/hipe_ppc_bifs.m4
@@ -20,21 +20,34 @@ changecom(`/*', `*/')dnl
include(`hipe/hipe_ppc_asm.m4')
+#`include' "config.h"
#`include' "hipe_literals.h"
+`#if defined(ERTS_ENABLE_LOCK_CHECK) && defined(ERTS_SMP)
+# define CALL_BIF(F) STORE_IA(CSYM(F), P_BIF_CALLEE(P), r29); bl CSYM(hipe_debug_bif_wrapper)
+#else
+# define CALL_BIF(F) bl CSYM(F)
+#endif'
+
.text
.p2align 2
-`#define TEST_GOT_MBUF LOAD r4, P_MBUF(P) SEMI CMPI r4, 0 SEMI bne- 3f SEMI 2:
-#define JOIN3(A,B,C) A##B##C
-#define HANDLE_GOT_MBUF(ARITY) 3: bl CSYM(JOIN3(nbif_,ARITY,_gc_after_bif)) SEMI b 2b'
+define(TEST_GOT_MBUF,`LOAD r4, P_MBUF(P) # `TEST_GOT_MBUF'
+ CMPI r4, 0
+ bne- 3f
+2:')
+define(HANDLE_GOT_MBUF,`
+3: bl CSYM(nbif_$1_gc_after_bif) # `HANDLE_GOT_MBUF'
+ b 2b')
+
/*
* standard_bif_interface_1(nbif_name, cbif_name)
* standard_bif_interface_2(nbif_name, cbif_name)
* standard_bif_interface_3(nbif_name, cbif_name)
+ * standard_bif_interface_0(nbif_name, cbif_name)
*
- * Generate native interface for a BIF with 1-3 parameters and
+ * Generate native interface for a BIF with 0-3 parameters and
* standard failure mode.
*/
define(standard_bif_interface_1,
@@ -49,7 +62,9 @@ ASYM($1):
/* Save caller-save registers and call the C function. */
SAVE_CONTEXT_BIF
- bl CSYM($2)
+ STORE r4, P_ARG0(r3) # Store BIF__ARGS in def_arg_reg[]
+ addi r4, r3, P_ARG0
+ CALL_BIF($2)
TEST_GOT_MBUF
/* Restore registers. Check for exception. */
@@ -77,7 +92,10 @@ ASYM($1):
/* Save caller-save registers and call the C function. */
SAVE_CONTEXT_BIF
- bl CSYM($2)
+ STORE r4, P_ARG0(r3) # Store BIF__ARGS in def_arg_reg[]
+ STORE r5, P_ARG1(r3)
+ addi r4, r3, P_ARG0
+ CALL_BIF($2)
TEST_GOT_MBUF
/* Restore registers. Check for exception. */
@@ -106,7 +124,11 @@ ASYM($1):
/* Save caller-save registers and call the C function. */
SAVE_CONTEXT_BIF
- bl CSYM($2)
+ STORE r4, P_ARG0(r3) # Store BIF__ARGS in def_arg_reg[]
+ STORE r5, P_ARG1(r3)
+ STORE r6, P_ARG2(r3)
+ addi r4, r3, P_ARG0
+ CALL_BIF($2)
TEST_GOT_MBUF
/* Restore registers. Check for exception. */
@@ -121,13 +143,7 @@ ASYM($1):
TYPE_FUNCTION(ASYM($1))
#endif')
-/*
- * fail_bif_interface_0(nbif_name, cbif_name)
- *
- * Generate native interface for a BIF with 0 parameters and
- * standard failure mode.
- */
-define(fail_bif_interface_0,
+define(standard_bif_interface_0,
`
#ifndef HAVE_$1
#`define' HAVE_$1
@@ -138,7 +154,8 @@ ASYM($1):
/* Save caller-save registers and call the C function. */
SAVE_CONTEXT_BIF
- bl CSYM($2)
+ /* ignore empty BIF__ARGS */
+ CALL_BIF($2)
TEST_GOT_MBUF
/* Restore registers. Check for exception. */
@@ -173,7 +190,8 @@ ASYM($1):
/* Save caller-save registers and call the C function. */
SAVE_CONTEXT_GC
- bl CSYM($2)
+ /* ignore empty BIF__ARGS */
+ CALL_BIF($2)
TEST_GOT_MBUF
/* Restore registers. */
@@ -196,7 +214,9 @@ ASYM($1):
/* Save caller-save registers and call the C function. */
SAVE_CONTEXT_GC
- bl CSYM($2)
+ STORE r4, P_ARG0(r3) # Store BIF__ARGS in def_arg_reg[]
+ addi r4, r3, P_ARG0
+ CALL_BIF($2)
TEST_GOT_MBUF
/* Restore registers. Check for exception. */
@@ -224,7 +244,10 @@ ASYM($1):
/* Save caller-save registers and call the C function. */
SAVE_CONTEXT_GC
- bl CSYM($2)
+ STORE r4, P_ARG0(r3) # Store BIF__ARGS in def_arg_reg[]
+ STORE r5, P_ARG1(r3)
+ addi r4, r3, P_ARG0
+ CALL_BIF($2)
TEST_GOT_MBUF
/* Restore registers. Check for exception. */
diff --git a/erts/emulator/hipe/hipe_process.h b/erts/emulator/hipe/hipe_process.h
index 5effacb398..43f47d1a28 100644
--- a/erts/emulator/hipe/hipe_process.h
+++ b/erts/emulator/hipe/hipe_process.h
@@ -42,6 +42,9 @@ struct hipe_process_state {
void (*nra)(void); /* Native code return address. */
#endif
unsigned int narity; /* Arity of BIF call, for stack walks. */
+#if defined(ERTS_ENABLE_LOCK_CHECK) && defined(ERTS_SMP)
+ void (*bif_callee)(void); /* When calling BIF's via debug wrapper */
+#endif
};
extern void hipe_arch_print_pcb(struct hipe_process_state *p);
diff --git a/erts/emulator/hipe/hipe_risc_glue.h b/erts/emulator/hipe/hipe_risc_glue.h
index e74023e3e9..cc2671c016 100644
--- a/erts/emulator/hipe/hipe_risc_glue.h
+++ b/erts/emulator/hipe/hipe_risc_glue.h
@@ -199,6 +199,22 @@ hipe_call_from_native_is_recursive(Process *p, Eterm reg[])
return 0;
}
+/* BEAM called native, which called BIF that returned trap
+ * Discard bif parameters.
+ * If tailcall, also clean up native stub continuation. */
+static __inline__ int
+hipe_trap_from_native_is_recursive(Process *p)
+{
+ if (p->hipe.narity > NR_ARG_REGS) {
+ p->hipe.nsp += (p->hipe.narity - NR_ARG_REGS);
+ }
+ if (p->hipe.nra != (void(*)(void))&nbif_return)
+ return 1;
+ hipe_pop_risc_nra_frame(p);
+ return 0;
+}
+
+
/* Native makes a call which needs to unload the parameters.
This differs from hipe_call_from_native_is_recursive() in
that it doesn't check for or pop the BEAM-calls-native frame.
diff --git a/erts/emulator/hipe/hipe_sparc_bifs.m4 b/erts/emulator/hipe/hipe_sparc_bifs.m4
index 03db7f3413..ca5af45d58 100644
--- a/erts/emulator/hipe/hipe_sparc_bifs.m4
+++ b/erts/emulator/hipe/hipe_sparc_bifs.m4
@@ -20,27 +20,42 @@ changecom(`/*', `*/')dnl
include(`hipe/hipe_sparc_asm.m4')
+#`include' "config.h"
#`include' "hipe_literals.h"
.section ".text"
.align 4
+`#if defined(ERTS_ENABLE_LOCK_CHECK) && defined(ERTS_SMP)
+# define CALL_BIF(F) set F, %o7; st %o7, [%o0+P_BIF_CALLEE]; call hipe_debug_bif_wrapper
+#else
+# define CALL_BIF(F) call F
+#endif'
+
/*
* Test for exception. This macro executes its delay slot.
*/
-`#define __TEST_GOT_EXN(LABEL) cmp %o0, THE_NON_VALUE; bz,pn %icc, LABEL
-#define TEST_GOT_EXN(ARITY) __TEST_GOT_EXN(JOIN3(nbif_,ARITY,_simple_exception))'
+define(TEST_GOT_EXN,`cmp %o0, THE_NON_VALUE ! `TEST_GOT_EXN'
+ bz,pn %icc, nbif_$1_simple_exception')
-`#define TEST_GOT_MBUF ld [P+P_MBUF], %o1; cmp %o1, 0; bne 3f; nop; 2:
-#define JOIN3(A,B,C) A##B##C
-#define HANDLE_GOT_MBUF(ARITY) 3: call JOIN3(nbif_,ARITY,_gc_after_bif); nop; b 2b; nop'
+define(TEST_GOT_MBUF,`ld [P+P_MBUF], %o1 ! `TEST_GOT_MBUF'
+ cmp %o1, 0
+ bne 3f
+ nop
+2:')
+define(HANDLE_GOT_MBUF,`
+3: call nbif_$1_gc_after_bif ! `HANDLE_GOT_MBUF'
+ nop
+ b 2b
+ nop')
/*
* standard_bif_interface_1(nbif_name, cbif_name)
* standard_bif_interface_2(nbif_name, cbif_name)
* standard_bif_interface_3(nbif_name, cbif_name)
+ * standard_bif_interface_0(nbif_name, cbif_name)
*
- * Generate native interface for a BIF with 1-3 parameters and
+ * Generate native interface for a BIF with 0-3 parameters and
* standard failure mode.
*/
define(standard_bif_interface_1,
@@ -55,7 +70,9 @@ $1:
/* Save caller-save registers and call the C function. */
SAVE_CONTEXT_BIF
- call $2
+ st %o1, [%o0+P_ARG0] ! Store BIF__ARGS in def_arg_reg
+ add %o0, P_ARG0, %o1
+ CALL_BIF($2)
nop
TEST_GOT_MBUF
@@ -81,7 +98,10 @@ $1:
/* Save caller-save registers and call the C function. */
SAVE_CONTEXT_BIF
- call $2
+ st %o1, [%o0+P_ARG0] ! Store BIF__ARGS in def_arg_reg
+ st %o2, [%o0+P_ARG1]
+ add %o0, P_ARG0, %o1
+ CALL_BIF($2)
nop
TEST_GOT_MBUF
@@ -108,7 +128,11 @@ $1:
/* Save caller-save registers and call the C function. */
SAVE_CONTEXT_BIF
- call $2
+ st %o1, [%o0+P_ARG0] ! Store BIF__ARGS in def_arg_reg
+ st %o2, [%o0+P_ARG1]
+ st %o3, [%o0+P_ARG2]
+ add %o0, P_ARG0, %o1
+ CALL_BIF($2)
nop
TEST_GOT_MBUF
@@ -121,13 +145,7 @@ $1:
.type $1, #function
#endif')
-/*
- * fail_bif_interface_0(nbif_name, cbif_name)
- *
- * Generate native interface for a BIF with 0 parameters and
- * standard failure mode.
- */
-define(fail_bif_interface_0,
+define(standard_bif_interface_0,
`
#ifndef HAVE_$1
#`define' HAVE_$1
@@ -138,7 +156,8 @@ $1:
/* Save caller-save registers and call the C function. */
SAVE_CONTEXT_BIF
- call $2
+ /* ignore empty BIF__ARGS */
+ CALL_BIF($2)
nop
TEST_GOT_MBUF
@@ -171,7 +190,8 @@ $1:
/* Save caller-save registers and call the C function. */
SAVE_CONTEXT_GC
- call $2
+ /* ignore empty BIF__ARGS */
+ CALL_BIF($2)
nop
TEST_GOT_MBUF
@@ -195,7 +215,9 @@ $1:
/* Save caller-save registers and call the C function. */
SAVE_CONTEXT_GC
- call $2
+ st %o1, [%o0+P_ARG0] ! Store BIF__ARGS in def_arg_reg
+ add %o0, P_ARG0, %o1
+ CALL_BIF($2)
nop
TEST_GOT_MBUF
@@ -221,7 +243,10 @@ $1:
/* Save caller-save registers and call the C function. */
SAVE_CONTEXT_GC
- call $2
+ st %o1, [%o0+P_ARG0] ! Store BIF__ARGS in def_arg_reg
+ st %o2, [%o0+P_ARG1]
+ add %o0, P_ARG0, %o1
+ CALL_BIF($2)
nop
TEST_GOT_MBUF
diff --git a/erts/emulator/hipe/hipe_x86_bifs.m4 b/erts/emulator/hipe/hipe_x86_bifs.m4
index 1bb6488b00..2ea69bde3c 100644
--- a/erts/emulator/hipe/hipe_x86_bifs.m4
+++ b/erts/emulator/hipe/hipe_x86_bifs.m4
@@ -20,6 +20,7 @@ changecom(`/*', `*/')dnl
include(`hipe/hipe_x86_asm.m4')
+#`include' "config.h"
#`include' "hipe_literals.h"
`#if THE_NON_VALUE == 0
@@ -28,16 +29,27 @@ include(`hipe/hipe_x86_asm.m4')
#define TEST_GOT_EXN cmpl $THE_NON_VALUE,%eax
#endif'
-`#define TEST_GOT_MBUF movl P_MBUF(P), %edx; testl %edx, %edx; jnz 3f; 2:
-#define JOIN3(A,B,C) A##B##C
-#define HANDLE_GOT_MBUF(ARITY) 3: call JOIN3(nbif_,ARITY,_gc_after_bif); jmp 2b'
+`#if defined(ERTS_ENABLE_LOCK_CHECK) && defined(ERTS_SMP)
+# define CALL_BIF(F) movl $CSYM(F), P_BIF_CALLEE(P); call CSYM(hipe_debug_bif_wrapper)
+#else
+# define CALL_BIF(F) call CSYM(F)
+#endif'
+
+define(TEST_GOT_MBUF,`movl P_MBUF(P), %edx # `TEST_GOT_MBUF'
+ testl %edx, %edx
+ jnz 3f
+2:')
+define(HANDLE_GOT_MBUF,`
+3: call nbif_$1_gc_after_bif # `HANDLE_GOT_MBUF'
+ jmp 2b')
/*
* standard_bif_interface_1(nbif_name, cbif_name)
* standard_bif_interface_2(nbif_name, cbif_name)
* standard_bif_interface_3(nbif_name, cbif_name)
+ * standard_bif_interface_0(nbif_name, cbif_name)
*
- * Generate native interface for a BIF with 1-3 parameters and
+ * Generate native interface for a BIF with 0-3 parameters and
* standard failure mode.
*/
define(standard_bif_interface_1,
@@ -56,8 +68,10 @@ ASYM($1):
/* make the call on the C stack */
NBIF_ARG_REG(0,P)
- NBIF_ARG(1,1,0)
- call CSYM($2)
+ NBIF_ARG(2,1,0)
+ lea 8(%esp), %eax
+ NBIF_ARG_REG(1,%eax) # BIF__ARGS
+ CALL_BIF($2)
TEST_GOT_MBUF
/* switch to native stack */
@@ -88,9 +102,11 @@ ASYM($1):
/* make the call on the C stack */
NBIF_ARG_REG(0,P)
- NBIF_ARG(1,2,0)
- NBIF_ARG(2,2,1)
- call CSYM($2)
+ NBIF_ARG(2,2,0)
+ NBIF_ARG(3,2,1)
+ lea 8(%esp), %eax
+ NBIF_ARG_REG(1,%eax) # BIF__ARGS
+ CALL_BIF($2)
TEST_GOT_MBUF
/* switch to native stack */
@@ -121,10 +137,12 @@ ASYM($1):
/* make the call on the C stack */
NBIF_ARG_REG(0,P)
- NBIF_ARG(1,3,0)
- NBIF_ARG(2,3,1)
- NBIF_ARG(3,3,2)
- call CSYM($2)
+ NBIF_ARG(2,3,0)
+ NBIF_ARG(3,3,1)
+ NBIF_ARG(4,3,2)
+ lea 8(%esp), %eax
+ NBIF_ARG_REG(1,%eax) # BIF__ARGS
+ CALL_BIF($2)
TEST_GOT_MBUF
/* switch to native stack */
@@ -139,13 +157,7 @@ ASYM($1):
TYPE_FUNCTION(ASYM($1))
#endif')
-/*
- * fail_bif_interface_0(nbif_name, cbif_name)
- *
- * Generate native interface for a BIF with 0 parameters and
- * standard failure mode.
- */
-define(fail_bif_interface_0,
+define(standard_bif_interface_0,
`
#ifndef HAVE_$1
#`define' HAVE_$1
@@ -158,7 +170,8 @@ ASYM($1):
/* make the call on the C stack */
NBIF_ARG_REG(0,P)
- call CSYM($2)
+ /* skip BIF__ARGS */
+ CALL_BIF($2)
TEST_GOT_MBUF
/* switch to native stack */
diff --git a/erts/emulator/hipe/hipe_x86_glue.h b/erts/emulator/hipe/hipe_x86_glue.h
index a7b0f164be..b0db93267c 100644
--- a/erts/emulator/hipe/hipe_x86_glue.h
+++ b/erts/emulator/hipe/hipe_x86_glue.h
@@ -186,6 +186,25 @@ hipe_call_from_native_is_recursive(Process *p, Eterm reg[])
return 0;
}
+/* BEAM called native, which called BIF that returned trap
+ * Discard bif parameters.
+ * If tailcall, also clean up native stub continuation. */
+static __inline__ int
+hipe_trap_from_native_is_recursive(Process *p)
+{
+ Eterm nra = *(p->hipe.nsp++);
+
+ if (p->hipe.narity > NR_ARG_REGS) {
+ p->hipe.nsp += (p->hipe.narity - NR_ARG_REGS);
+ }
+ if (nra != (Eterm)nbif_return) {
+ *--(p->hipe.nsp) = nra;
+ return 1;
+ }
+ return 0;
+}
+
+
/* Native makes a call which needs to unload the parameters.
This differs from hipe_call_from_native_is_recursive() in
that it doesn't check for or pop the BEAM-calls-native frame.
diff --git a/erts/emulator/pcre/Makefile b/erts/emulator/pcre/Makefile
deleted file mode 100644
index 72eea01130..0000000000
--- a/erts/emulator/pcre/Makefile
+++ /dev/null
@@ -1,26 +0,0 @@
-#
-# %CopyrightBegin%
-#
-# Copyright Ericsson AB 2008-2009. All Rights Reserved.
-#
-# The contents of this file are subject to the Erlang Public License,
-# Version 1.1, (the "License"); you may not use this file except in
-# compliance with the License. You should have received a copy of the
-# Erlang Public License along with this software. If not, it can be
-# retrieved online at http://www.erlang.org/.
-#
-# Software distributed under the License is distributed on an "AS IS"
-# basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
-# the License for the specific language governing rights and limitations
-# under the License.
-#
-# %CopyrightEnd%
-#
-#
-# Invoke with GNU make or clearmake -C gnu.
-#
-
-include $(ERL_TOP)/make/run_make.mk
-
-table:
- $(MAKE) -f $(TARGET)/Makefile $@ \ No newline at end of file
diff --git a/erts/emulator/pcre/Makefile.in b/erts/emulator/pcre/Makefile.in
deleted file mode 100644
index f62700ec4e..0000000000
--- a/erts/emulator/pcre/Makefile.in
+++ /dev/null
@@ -1,165 +0,0 @@
-# Makefile for zlib
-# Copyright (C) 1995-1996 Jean-loup Gailly.
-# For conditions of distribution and use, see copyright notice in zlib.h
-
-# To compile and test, type:
-# ./configure; make test
-# The call of configure is optional if you don't have special requirements
-
-# To install /usr/local/lib/libz.* and /usr/local/include/zlib.h, type:
-# make install
-# To install in $HOME instead of /usr/local, use:
-# make install prefix=$HOME
-
-# %ExternalCopyright%
-
-ARFLAGS = rc
-
-O = \
-pcre_latin_1_table.o \
-pcre_compile.o \
-pcre_config.o \
-pcre_dfa_exec.o \
-pcre_exec.o \
-pcre_fullinfo.o \
-pcre_get.o \
-pcre_globals.o \
-pcre_info.o \
-pcre_maketables.o \
-pcre_newline.o \
-pcre_ord2utf8.o \
-pcre_refcount.o \
-pcre_study.o \
-pcre_tables.o \
-pcre_try_flipped.o \
-pcre_ucp_searchfuncs.o \
-pcre_valid_utf8.o \
-pcre_version.o \
-pcre_xclass.o
-
-OBJS = $(O:%=$(OBJDIR)/%)
-
-GENINC = pcre_exec_loop_break_cases.inc
-
-#### Begin OTP targets
-
-include $(ERL_TOP)/make/target.mk
-
-# On windows we need a separate zlib during debug build
-ifeq ($(TARGET),win32)
-
-ifeq ($(TYPE),debug)
-CFLAGS = $(subst -O2, -g, @CFLAGS@ @DEFS@ @DEBUG_FLAGS@ @EMU_THR_DEFS@ -DERLANG_INTEGRATION)
-else # debug
-CFLAGS = @CFLAGS@ @DEFS@ @EMU_THR_DEFS@ -DERLANG_INTEGRATION
-endif # debug
-
-else # win32
-
-ifeq ($(TYPE),debug)
-TYPE_FLAGS = @DEBUG_CFLAGS@
-else # debug
-ifeq ($(TYPE),gcov)
-TYPE_FLAGS = -O0 -fprofile-arcs -ftest-coverage
-else # gcov
-TYPE_FLAGS = -O3
-endif # gcov
-endif # debug
-
-CFLAGS = $(TYPE_FLAGS) $(subst -O2,, @CFLAGS@) @DEFS@ @EMU_THR_DEFS@ -DERLANG_INTEGRATION
-
-endif # win32
-
-OBJDIR = $(ERL_TOP)/erts/emulator/pcre/obj/$(TARGET)/$(TYPE)
-
-include $(ERL_TOP)/make/$(TARGET)/otp.mk
-
-ifeq ($(TARGET), win32)
-LIBRARY=$(OBJDIR)/epcre.lib
-else
-LIBRARY=$(OBJDIR)/libepcre.a
-endif
-
-all: $(LIBRARY)
-
-# ----------------------------------------------------
-# Release Target
-# ----------------------------------------------------
-include $(ERL_TOP)/make/otp_release_targets.mk
-
-release_spec: opt
-
-tests release_tests:
-
-docs release_docs release_docs_spec:
-
-clean:
- rm -f $(OBJS) $(OBJDIR)/libepcre.a
-
-#### end OTP targets
-
-ifeq ($(TARGET), win32)
-$(LIBRARY): $(OBJS)
- $(AR) -out:$@ $(OBJS)
-else
-$(LIBRARY): $(OBJS)
- $(AR) $(ARFLAGS) $@ $(OBJS)
- -@ ($(RANLIB) $@ || true) 2>/dev/null
-endif
-
-$(OBJDIR)/%.o: %.c
- $(CC) -c $(CFLAGS) -o $@ $<
-
-$(GENINC): pcre_exec.c
- for x in `grep -n COST_CHK pcre_exec.c | grep -v 'COST_CHK(N)' | awk -F: '{print $$1}'`; \
- do \
- N=`expr $$x + 100`; \
- echo "case $$N: goto L_LOOP_COUNT_$${x};"; \
- done > $(GENINC)
-
-table: ./gen_table
- ./gen_table pcre_latin_1_table.c
-
-./gen_table: pcre_make_latin1_default.c make_latin1_table.c
- $(CC) $(CFLAGS) -o gen_table pcre_make_latin1_default.c make_latin1_table.c
-
-# DO NOT DELETE THIS LINE -- make depend depends on it.
-
-$(OBJDIR)/pcre_chartables.o: pcre_chartables.c pcre_internal.h local_config.h \
- pcre.h ucp.h
-$(OBJDIR)/pcre_compile.o: pcre_compile.c pcre_internal.h local_config.h \
- pcre.h ucp.h
-$(OBJDIR)/pcre_config.o: pcre_config.c pcre_internal.h local_config.h pcre.h \
- ucp.h
-$(OBJDIR)/pcre_dfa_exec.o: pcre_dfa_exec.c pcre_internal.h local_config.h \
- pcre.h ucp.h
-$(OBJDIR)/pcre_exec.o: pcre_exec.c pcre_internal.h local_config.h pcre.h ucp.h \
- $(GENINC)
-$(OBJDIR)/pcre_fullinfo.o: pcre_fullinfo.c pcre_internal.h local_config.h \
- pcre.h ucp.h
-$(OBJDIR)/pcre_get.o: pcre_get.c pcre_internal.h local_config.h pcre.h ucp.h
-$(OBJDIR)/pcre_globals.o: pcre_globals.c pcre_internal.h local_config.h \
- pcre.h ucp.h
-$(OBJDIR)/pcre_info.o: pcre_info.c pcre_internal.h local_config.h pcre.h ucp.h
-$(OBJDIR)/pcre_maketables.o: pcre_maketables.c pcre_internal.h local_config.h \
- pcre.h ucp.h
-$(OBJDIR)/pcre_newline.o: pcre_newline.c pcre_internal.h local_config.h \
- pcre.h ucp.h
-$(OBJDIR)/pcre_ord2utf8.o: pcre_ord2utf8.c pcre_internal.h local_config.h \
- pcre.h ucp.h
-$(OBJDIR)/pcre_refcount.o: pcre_refcount.c pcre_internal.h local_config.h \
- pcre.h ucp.h
-$(OBJDIR)/pcre_study.o: pcre_study.c pcre_internal.h local_config.h pcre.h \
- ucp.h
-$(OBJDIR)/pcre_tables.o: pcre_tables.c pcre_internal.h local_config.h pcre.h \
- ucp.h
-$(OBJDIR)/pcre_try_flipped.o: pcre_try_flipped.c pcre_internal.h \
- local_config.h pcre.h ucp.h
-$(OBJDIR)/pcre_ucp_searchfuncs.o: pcre_ucp_searchfuncs.c pcre_internal.h \
- local_config.h pcre.h ucp.h ucpinternal.h ucptable.h
-$(OBJDIR)/pcre_valid_utf8.o: pcre_valid_utf8.c pcre_internal.h local_config.h \
- pcre.h ucp.h
-pcre_version.o: pcre_version.c pcre_internal.h local_config.h pcre.h \
- ucp.h
-$(OBJDIR)/pcre_xclass.o: pcre_xclass.c pcre_internal.h local_config.h pcre.h \
- ucp.h
diff --git a/erts/emulator/pcre/pcre.mk b/erts/emulator/pcre/pcre.mk
new file mode 100644
index 0000000000..b752c11459
--- /dev/null
+++ b/erts/emulator/pcre/pcre.mk
@@ -0,0 +1,113 @@
+#
+# %CopyrightBegin%
+#
+# Copyright Ericsson AB 2011. All Rights Reserved.
+#
+# The contents of this file are subject to the Erlang Public License,
+# Version 1.1, (the "License"); you may not use this file except in
+# compliance with the License. You should have received a copy of the
+# Erlang Public License along with this software. If not, it can be
+# retrieved online at http://www.erlang.org/.
+#
+# Software distributed under the License is distributed on an "AS IS"
+# basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+# the License for the specific language governing rights and limitations
+# under the License.
+#
+# %CopyrightEnd%
+#
+
+ARFLAGS = rc
+
+PCRE_O = \
+pcre_latin_1_table.o \
+pcre_compile.o \
+pcre_config.o \
+pcre_dfa_exec.o \
+pcre_exec.o \
+pcre_fullinfo.o \
+pcre_get.o \
+pcre_globals.o \
+pcre_info.o \
+pcre_maketables.o \
+pcre_newline.o \
+pcre_ord2utf8.o \
+pcre_refcount.o \
+pcre_study.o \
+pcre_tables.o \
+pcre_try_flipped.o \
+pcre_ucp_searchfuncs.o \
+pcre_valid_utf8.o \
+pcre_version.o \
+pcre_xclass.o
+
+PCRE_OBJS = $(PCRE_O:%=$(PCRE_OBJDIR)/%)
+
+GENINC = pcre/pcre_exec_loop_break_cases.inc
+
+PCRE_OBJDIR = $(ERL_TOP)/erts/emulator/pcre/obj/$(TARGET)/$(TYPE)
+
+PCRE_CFLAGS = $(filter-out -DDEBUG,$(CFLAGS)) -DERLANG_INTEGRATION
+
+ifeq ($(TARGET), win32)
+$(EPCRE_LIB): $(PCRE_OBJS)
+ $(AR) -out:$@ $(PCRE_OBJS)
+else
+$(EPCRE_LIB): $(PCRE_OBJS)
+ $(AR) $(ARFLAGS) $@ $(PCRE_OBJS)
+ -@ ($(RANLIB) $@ || true) 2>/dev/null
+endif
+
+$(PCRE_OBJDIR)/%.o: pcre/%.c
+ $(CC) -c $(PCRE_CFLAGS) -o $@ $<
+
+$(GENINC): pcre/pcre_exec.c
+ for x in `grep -n COST_CHK pcre/pcre_exec.c | grep -v 'COST_CHK(N)' | awk -F: '{print $$1}'`; \
+ do \
+ N=`expr $$x + 100`; \
+ echo "case $$N: goto L_LOOP_COUNT_$${x};"; \
+ done > $(GENINC)
+
+# Dependencies.
+
+$(PCRE_OBJDIR)/pcre_chartables.o: pcre/pcre_chartables.c pcre/pcre_internal.h \
+ pcre/local_config.h pcre/pcre.h pcre/ucp.h
+$(PCRE_OBJDIR)/pcre_compile.o: pcre/pcre_compile.c pcre/pcre_internal.h \
+ pcre/local_config.h pcre/pcre.h pcre/ucp.h
+$(PCRE_OBJDIR)/pcre_config.o: pcre/pcre_config.c pcre/pcre_internal.h \
+ pcre/local_config.h pcre/pcre.h pcre/ucp.h
+$(PCRE_OBJDIR)/pcre_dfa_exec.o: pcre/pcre_dfa_exec.c pcre/pcre_internal.h \
+ pcre/local_config.h pcre/pcre.h pcre/ucp.h
+$(PCRE_OBJDIR)/pcre_exec.o: pcre/pcre_exec.c pcre/pcre_internal.h \
+ pcre/local_config.h pcre/pcre.h pcre/ucp.h $(GENINC)
+$(PCRE_OBJDIR)/pcre_fullinfo.o: pcre/pcre_fullinfo.c pcre/pcre_internal.h \
+ pcre/local_config.h pcre/pcre.h pcre/ucp.h
+$(PCRE_OBJDIR)/pcre_get.o: pcre/pcre_get.c pcre/pcre_internal.h \
+ pcre/local_config.h pcre/pcre.h pcre/ucp.h
+$(PCRE_OBJDIR)/pcre_globals.o: pcre/pcre_globals.c pcre/pcre_internal.h \
+ pcre/local_config.h pcre/pcre.h pcre/ucp.h
+$(PCRE_OBJDIR)/pcre_info.o: pcre/pcre_info.c pcre/pcre_internal.h \
+ pcre/local_config.h pcre/pcre.h pcre/ucp.h
+$(PCRE_OBJDIR)/pcre_maketables.o: pcre/pcre_maketables.c pcre/pcre_internal.h \
+ pcre/local_config.h pcre/pcre.h pcre/ucp.h
+$(PCRE_OBJDIR)/pcre_newline.o: pcre/pcre_newline.c pcre/pcre_internal.h \
+ pcre/local_config.h pcre/pcre.h pcre/ucp.h
+$(PCRE_OBJDIR)/pcre_ord2utf8.o: pcre/pcre_ord2utf8.c pcre/pcre_internal.h \
+ pcre/local_config.h pcre/pcre.h pcre/ucp.h
+$(PCRE_OBJDIR)/pcre/pcre_refcount.o: pcre/pcre_refcount.c pcre/pcre_internal.h \
+ pcre/local_config.h pcre/pcre.h pcre/ucp.h
+$(PCRE_OBJDIR)/pcre_study.o: pcre/pcre_study.c pcre/pcre_internal.h \
+ pcre/local_config.h pcre/pcre.h pcre/ucp.h
+$(PCRE_OBJDIR)/pcre_tables.o: pcre/pcre_tables.c pcre/pcre_internal.h \
+ pcre/local_config.h pcre/pcre.h pcre/ucp.h
+$(PCRE_OBJDIR)/pcre_try_flipped.o: pcre/pcre_try_flipped.c pcre/pcre_internal.h \
+ pcre/local_config.h pcre/pcre.h pcre/ucp.h
+$(PCRE_OBJDIR)/pcre_ucp_searchfuncs.o: pcre/pcre_ucp_searchfuncs.c \
+ pcre/pcre_internal.h pcre/local_config.h pcre/pcre.h pcre/ucp.h \
+ pcre/ucpinternal.h pcre/ucptable.h
+$(PCRE_OBJDIR)/pcre_valid_utf8.o: pcre/pcre_valid_utf8.c pcre/pcre_internal.h \
+ pcre/local_config.h pcre/pcre.h pcre/ucp.h
+pcre_version.o: pcre/pcre_version.c pcre/pcre_internal.h pcre/local_config.h \
+ pcre/pcre.h pcre/ucp.h
+$(PCRE_OBJDIR)/pcre/pcre_xclass.o: pcre/pcre_xclass.c pcre/pcre_internal.h \
+ pcre/local_config.h pcre/pcre.h pcre/ucp.h
diff --git a/erts/emulator/sys/common/erl_check_io.c b/erts/emulator/sys/common/erl_check_io.c
index 57321259f9..ba88fd1d39 100644
--- a/erts/emulator/sys/common/erl_check_io.c
+++ b/erts/emulator/sys/common/erl_check_io.c
@@ -35,6 +35,7 @@
#include "sys.h"
#include "global.h"
#include "erl_check_io.h"
+#include "erl_thr_progress.h"
#ifdef ERTS_SYS_CONTINOUS_FD_NUMBERS
# define ERTS_DRV_EV_STATE_EXTRA_SIZE 128
@@ -66,6 +67,9 @@ typedef char EventStateFlags;
#define ERTS_CIO_POLL_CTL ERTS_POLL_EXPORT(erts_poll_control)
#define ERTS_CIO_POLL_WAIT ERTS_POLL_EXPORT(erts_poll_wait)
+#ifdef ERTS_POLL_NEED_ASYNC_INTERRUPT_SUPPORT
+#define ERTS_CIO_POLL_AS_INTR ERTS_POLL_EXPORT(erts_poll_async_sig_interrupt)
+#endif
#define ERTS_CIO_POLL_INTR ERTS_POLL_EXPORT(erts_poll_interrupt)
#define ERTS_CIO_POLL_INTR_TMD ERTS_POLL_EXPORT(erts_poll_interrupt_timed)
#define ERTS_CIO_NEW_POLLSET ERTS_POLL_EXPORT(erts_poll_create_pollset)
@@ -1115,6 +1119,14 @@ eready(Eterm id, ErtsDrvEventState *state, ErlDrvEventData event_data)
static void bad_fd_in_pollset( ErtsDrvEventState *, Eterm, Eterm, ErtsPollEvents);
+#ifdef ERTS_POLL_NEED_ASYNC_INTERRUPT_SUPPORT
+void
+ERTS_CIO_EXPORT(erts_check_io_async_sig_interrupt)(void)
+{
+ ERTS_CIO_POLL_AS_INTR(pollset.ps);
+}
+#endif
+
void
ERTS_CIO_EXPORT(erts_check_io_interrupt)(int set)
{
@@ -1153,7 +1165,6 @@ ERTS_CIO_EXPORT(erts_check_io)(int do_wait)
#ifdef ERTS_ENABLE_LOCK_CHECK
erts_lc_check_exact(NULL, 0); /* No locks should be locked */
#endif
- erts_smp_activity_begin(ERTS_ACTIVITY_WAIT, NULL, NULL, NULL);
pollres_len = sizeof(pollres)/sizeof(ErtsPollResFd);
erts_smp_atomic_set_nob(&pollset.in_poll_wait, 1);
@@ -1163,7 +1174,6 @@ ERTS_CIO_EXPORT(erts_check_io)(int do_wait)
#ifdef ERTS_ENABLE_LOCK_CHECK
erts_lc_check_exact(NULL, 0); /* No locks should be locked */
#endif
- erts_smp_activity_end(ERTS_ACTIVITY_WAIT, NULL, NULL, NULL);
erts_deliver_time(); /* sync the machine's idea of time */
@@ -1870,13 +1880,12 @@ ERTS_CIO_EXPORT(erts_check_io_debug)(void)
erts_printf("--- fds in pollset --------------------------------------\n");
-#ifdef ERTS_SMP
-# ifdef ERTS_ENABLE_LOCK_CHECK
+#if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_CHECK)
erts_lc_check_exact(NULL, 0); /* No locks should be locked */
-# endif
- erts_block_system(0); /* stop the world to avoid messy locking */
#endif
+ erts_smp_thr_progress_block(); /* stop the world to avoid messy locking */
+
#ifdef ERTS_SYS_CONTINOUS_FD_NUMBERS
counters.epep = erts_alloc(ERTS_ALC_T_TMP, sizeof(ErtsPollEvents)*max_fds);
ERTS_POLL_EXPORT(erts_poll_get_selected_events)(pollset.ps, counters.epep, max_fds);
@@ -1898,9 +1907,7 @@ ERTS_CIO_EXPORT(erts_check_io_debug)(void)
safe_hash_for_each(&drv_ev_state_tab, &doit_erts_check_io_debug, (void *) &counters);
#endif
-#ifdef ERTS_SMP
- erts_release_system();
-#endif
+ erts_smp_thr_progress_unblock();
erts_printf("\n");
erts_printf("used fds=%d\n", counters.used_fds);
diff --git a/erts/emulator/sys/common/erl_check_io.h b/erts/emulator/sys/common/erl_check_io.h
index 9b45a63913..7cc1658062 100644
--- a/erts/emulator/sys/common/erl_check_io.h
+++ b/erts/emulator/sys/common/erl_check_io.h
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2006-2009. All Rights Reserved.
+ * Copyright Ericsson AB 2006-2011. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
@@ -40,6 +40,10 @@ Eterm erts_check_io_info_kp(void *);
Eterm erts_check_io_info_nkp(void *);
int erts_check_io_max_files_kp(void);
int erts_check_io_max_files_nkp(void);
+#ifdef ERTS_POLL_NEED_ASYNC_INTERRUPT_SUPPORT
+void erts_check_io_async_sig_interrupt_kp(void);
+void erts_check_io_async_sig_interrupt_nkp(void);
+#endif
void erts_check_io_interrupt_kp(int);
void erts_check_io_interrupt_nkp(int);
void erts_check_io_interrupt_timed_kp(int, long);
@@ -56,6 +60,9 @@ int erts_check_io_debug_nkp(void);
Uint erts_check_io_size(void);
Eterm erts_check_io_info(void *);
int erts_check_io_max_files(void);
+#ifdef ERTS_POLL_NEED_ASYNC_INTERRUPT_SUPPORT
+void erts_check_io_async_sig_interrupt(void);
+#endif
void erts_check_io_interrupt(int);
void erts_check_io_interrupt_timed(int, long);
void erts_check_io(int);
diff --git a/erts/emulator/sys/common/erl_mseg.c b/erts/emulator/sys/common/erl_mseg.c
index eaef6680dd..49750ff6ce 100644
--- a/erts/emulator/sys/common/erl_mseg.c
+++ b/erts/emulator/sys/common/erl_mseg.c
@@ -36,14 +36,12 @@
#include "erl_threads.h"
#include "erl_mtrace.h"
#include "erl_time.h"
+#include "erl_alloc.h"
#include "big.h"
+#include "erl_thr_progress.h"
#if HAVE_ERTS_MSEG
-#if defined(USE_THREADS) && !defined(ERTS_SMP)
-# define ERTS_THREADS_NO_SMP
-#endif
-
#define SEGTYPE ERTS_MTRACE_SEGMENT_ID
#ifndef HAVE_GETPAGESIZE
@@ -75,16 +73,9 @@
static int atoms_initialized;
-static Uint cache_check_interval;
-
typedef struct mem_kind_t MemKind;
-static void check_cache(void *unused);
static void mseg_clear_cache(MemKind*);
-static int is_cache_check_scheduled;
-#ifdef ERTS_THREADS_NO_SMP
-static int is_cache_check_requested;
-#endif
#if HALFWORD_HEAP
static int initialize_pmmap(void);
@@ -138,7 +129,8 @@ const ErtsMsegOpt_t erts_mseg_default_opt = {
1, /* Use cache */
1, /* Preserv data */
0, /* Absolute shrink threshold */
- 0 /* Relative shrink threshold */
+ 0, /* Relative shrink threshold */
+ 0 /* Scheduler specific */
#if HALFWORD_HEAP
,0 /* need low memory */
#endif
@@ -157,11 +149,10 @@ typedef struct {
Uint32 no;
} CallCounter;
-static int is_init_done;
static Uint page_size;
static Uint page_shift;
-static struct {
+typedef struct {
CallCounter alloc;
CallCounter dealloc;
CallCounter realloc;
@@ -172,7 +163,9 @@ static struct {
#endif
CallCounter clear_cache;
CallCounter check_cache;
-} calls;
+} ErtsMsegCalls;
+
+typedef struct ErtsMsegAllctr_t_ ErtsMsegAllctr_t;
struct mem_kind_t {
cache_desc_t cache_descs[MAX_CACHE_SIZE];
@@ -201,25 +194,84 @@ struct mem_kind_t {
} max_ever;
} segments;
+ ErtsMsegAllctr_t *ma;
const char* name;
MemKind* next;
};/*MemKind*/
+struct ErtsMsegAllctr_t_ {
+ int ix;
+
+ int is_init_done;
+ int is_thread_safe;
+ erts_mtx_t mtx;
+
+ int is_cache_check_scheduled;
+
+ MemKind* mk_list;
+
#if HALFWORD_HEAP
-static MemKind low_mem, hi_mem;
+ MemKind low_mem;
+ MemKind hi_mem;
#else
-static MemKind the_mem;
+ MemKind the_mem;
#endif
-static MemKind* mk_list = NULL;
-static Uint max_cache_size;
-static Uint abs_max_cache_bad_fit;
-static Uint rel_max_cache_bad_fit;
+ Uint max_cache_size;
+ Uint abs_max_cache_bad_fit;
+ Uint rel_max_cache_bad_fit;
+
+ ErtsMsegCalls calls;
#if CAN_PARTLY_DESTROY
-static Uint min_seg_size;
+ Uint min_seg_size;
+#endif
+
+};
+
+typedef union {
+ ErtsMsegAllctr_t mseg_alloc;
+ char align__[ERTS_ALC_CACHE_LINE_ALIGN_SIZE(sizeof(ErtsMsegAllctr_t))];
+} ErtsAlgndMsegAllctr_t;
+
+static int no_mseg_allocators;
+static ErtsAlgndMsegAllctr_t *aligned_mseg_allctr;
+
+#ifdef ERTS_SMP
+
+#define ERTS_MSEG_ALLCTR_IX(IX) \
+ (&aligned_mseg_allctr[(IX)].mseg_alloc)
+
+#define ERTS_MSEG_ALLCTR_SS() \
+ ERTS_MSEG_ALLCTR_IX((int) erts_get_scheduler_id())
+
+#define ERTS_MSEG_ALLCTR_OPT(OPT) \
+ ((OPT)->sched_spec ? ERTS_MSEG_ALLCTR_SS() : ERTS_MSEG_ALLCTR_IX(0))
+
+#else
+
+#define ERTS_MSEG_ALLCTR_IX(IX) \
+ (&aligned_mseg_allctr[0].mseg_alloc)
+
+#define ERTS_MSEG_ALLCTR_SS() \
+ (&aligned_mseg_allctr[0].mseg_alloc)
+
+#define ERTS_MSEG_ALLCTR_OPT(OPT) \
+ (&aligned_mseg_allctr[0].mseg_alloc)
+
#endif
+#define ERTS_MSEG_LOCK(MA) \
+do { \
+ if ((MA)->is_thread_safe) \
+ erts_mtx_lock(&(MA)->mtx); \
+} while (0)
+
+#define ERTS_MSEG_UNLOCK(MA) \
+do { \
+ if ((MA)->is_thread_safe) \
+ erts_mtx_unlock(&(MA)->mtx); \
+} while (0)
#define ERTS_MSEG_ALLOC_STAT(C,SZ) \
do { \
@@ -250,104 +302,44 @@ do { \
#define ONE_GIGA (1000000000)
-#define ZERO_CC(CC) (calls.CC.no = 0, calls.CC.giga_no = 0)
+#define ZERO_CC(MA, CC) ((MA)->calls.CC.no = 0, \
+ (MA)->calls.CC.giga_no = 0)
-#define INC_CC(CC) (calls.CC.no == ONE_GIGA - 1 \
- ? (calls.CC.giga_no++, calls.CC.no = 0) \
- : calls.CC.no++)
+#define INC_CC(MA, CC) ((MA)->calls.CC.no == ONE_GIGA - 1 \
+ ? ((MA)->calls.CC.giga_no++, \
+ (MA)->calls.CC.no = 0) \
+ : (MA)->calls.CC.no++)
-#define DEC_CC(CC) (calls.CC.no == 0 \
- ? (calls.CC.giga_no--, \
- calls.CC.no = ONE_GIGA - 1) \
- : calls.CC.no--)
+#define DEC_CC(MA, CC) ((MA)->calls.CC.no == 0 \
+ ? ((MA)->calls.CC.giga_no--, \
+ (MA)->calls.CC.no = ONE_GIGA - 1) \
+ : (MA)->calls.CC.no--)
-static erts_mtx_t mseg_mutex; /* Also needed when !USE_THREADS */
static erts_mtx_t init_atoms_mutex; /* Also needed when !USE_THREADS */
-#ifdef USE_THREADS
-#ifdef ERTS_THREADS_NO_SMP
-static erts_tid_t main_tid;
-static int async_handle = -1;
-#endif
-
-static void thread_safe_init(void)
-{
- erts_mtx_init(&init_atoms_mutex, "mseg_init_atoms");
- erts_mtx_init(&mseg_mutex, "mseg");
-
-#ifdef ERTS_THREADS_NO_SMP
- main_tid = erts_thr_self();
-#endif
-}
-
-#endif
-
-static ErlTimer cache_check_timer;
static ERTS_INLINE void
-schedule_cache_check(void)
-{
- if (!is_cache_check_scheduled && is_init_done) {
-#ifdef ERTS_THREADS_NO_SMP
- if (!erts_equal_tids(erts_thr_self(), main_tid)) {
- if (!is_cache_check_requested) {
- is_cache_check_requested = 1;
- sys_async_ready(async_handle);
- }
- }
- else
-#endif
- {
- cache_check_timer.active = 0;
- erts_set_timer(&cache_check_timer,
- check_cache,
- NULL,
- NULL,
- cache_check_interval);
- is_cache_check_scheduled = 1;
-#ifdef ERTS_THREADS_NO_SMP
- is_cache_check_requested = 0;
-#endif
- }
- }
-}
-
-#ifdef ERTS_THREADS_NO_SMP
-
-static void
-check_schedule_cache_check(void)
+schedule_cache_check(ErtsMsegAllctr_t *ma)
{
- erts_mtx_lock(&mseg_mutex);
- if (is_cache_check_requested
- && !is_cache_check_scheduled) {
- schedule_cache_check();
- }
- erts_mtx_unlock(&mseg_mutex);
-}
-
-#endif
-static void
-mseg_shutdown(void)
-{
- MemKind* mk;
- erts_mtx_lock(&mseg_mutex);
- for (mk=mk_list; mk; mk=mk->next) {
- mseg_clear_cache(mk);
+ if (!ma->is_cache_check_scheduled && ma->is_init_done) {
+ erts_set_aux_work_timeout(ma->ix,
+ ERTS_SSI_AUX_WORK_MSEG_CACHE_CHECK,
+ 1);
+ ma->is_cache_check_scheduled = 1;
}
- erts_mtx_unlock(&mseg_mutex);
}
static ERTS_INLINE void *
-mseg_create(MemKind* mk, Uint size)
+mseg_create(ErtsMsegAllctr_t *ma, MemKind* mk, Uint size)
{
void *seg;
ASSERT(size % page_size == 0);
#if HALFWORD_HEAP
- if (mk == &low_mem) {
+ if (mk == &ma->low_mem) {
seg = pmmap(size);
if ((unsigned long) seg & CHECK_POINTER_MASK) {
erts_fprintf(stderr,"Pointer mask failure (0x%08lx)\n",(unsigned long) seg);
@@ -371,18 +363,18 @@ mseg_create(MemKind* mk, Uint size)
#endif
}
- INC_CC(create);
+ INC_CC(ma, create);
return seg;
}
static ERTS_INLINE void
-mseg_destroy(MemKind* mk, void *seg, Uint size)
+mseg_destroy(ErtsMsegAllctr_t *ma, MemKind* mk, void *seg, Uint size)
{
int res;
#if HALFWORD_HEAP
- if (mk == &low_mem) {
+ if (mk == &ma->low_mem) {
res = pmunmap((void *) seg, size);
}
else
@@ -401,14 +393,14 @@ mseg_destroy(MemKind* mk, void *seg, Uint size)
ASSERT(size % page_size == 0);
ASSERT(res == 0);
- INC_CC(destroy);
+ INC_CC(ma, destroy);
}
#if HAVE_MSEG_RECREATE
static ERTS_INLINE void *
-mseg_recreate(MemKind* mk, void *old_seg, Uint old_size, Uint new_size)
+mseg_recreate(ErtsMsegAllctr_t *ma, MemKind* mk, void *old_seg, Uint old_size, Uint new_size)
{
void *new_seg;
@@ -416,7 +408,7 @@ mseg_recreate(MemKind* mk, void *old_seg, Uint old_size, Uint new_size)
ASSERT(new_size % page_size == 0);
#if HALFWORD_HEAP
- if (mk == &low_mem) {
+ if (mk == &ma->low_mem) {
new_seg = (void *) pmremap((void *) old_seg,
(size_t) old_size,
(size_t) new_size);
@@ -447,19 +439,37 @@ mseg_recreate(MemKind* mk, void *old_seg, Uint old_size, Uint new_size)
#endif
}
- INC_CC(recreate);
+ INC_CC(ma, recreate);
return new_seg;
}
#endif /* #if HAVE_MSEG_RECREATE */
+#ifdef DEBUG
+#define ERTS_DBG_MA_CHK_THR_ACCESS(MA) \
+do { \
+ if ((MA)->is_thread_safe) \
+ ERTS_LC_ASSERT(erts_lc_mtx_is_locked(&(MA)->mtx) \
+ || erts_smp_thr_progress_is_blocking() \
+ || ERTS_IS_CRASH_DUMPING); \
+ else \
+ ERTS_LC_ASSERT((MA)->ix == (int) erts_get_scheduler_id() \
+ || erts_smp_thr_progress_is_blocking() \
+ || ERTS_IS_CRASH_DUMPING); \
+} while (0)
+#define ERTS_DBG_MK_CHK_THR_ACCESS(MK) \
+ ERTS_DBG_MA_CHK_THR_ACCESS((MK)->ma)
+#else
+#define ERTS_DBG_MA_CHK_THR_ACCESS(MA)
+#define ERTS_DBG_MK_CHK_THR_ACCESS(MK)
+#endif
static ERTS_INLINE cache_desc_t *
alloc_cd(MemKind* mk)
{
cache_desc_t *cd = mk->free_cache_descs;
- ERTS_LC_ASSERT(erts_lc_mtx_is_locked(&mseg_mutex));
+ ERTS_DBG_MK_CHK_THR_ACCESS(mk);
if (cd)
mk->free_cache_descs = cd->next;
return cd;
@@ -468,7 +478,7 @@ alloc_cd(MemKind* mk)
static ERTS_INLINE void
free_cd(MemKind* mk, cache_desc_t *cd)
{
- ERTS_LC_ASSERT(erts_lc_mtx_is_locked(&mseg_mutex));
+ ERTS_DBG_MK_CHK_THR_ACCESS(mk);
cd->next = mk->free_cache_descs;
mk->free_cache_descs = cd;
}
@@ -477,7 +487,7 @@ free_cd(MemKind* mk, cache_desc_t *cd)
static ERTS_INLINE void
link_cd(MemKind* mk, cache_desc_t *cd)
{
- ERTS_LC_ASSERT(erts_lc_mtx_is_locked(&mseg_mutex));
+ ERTS_DBG_MK_CHK_THR_ACCESS(mk);
if (mk->cache)
mk->cache->prev = cd;
cd->next = mk->cache;
@@ -496,7 +506,7 @@ link_cd(MemKind* mk, cache_desc_t *cd)
static ERTS_INLINE void
end_link_cd(MemKind* mk, cache_desc_t *cd)
{
- ERTS_LC_ASSERT(erts_lc_mtx_is_locked(&mseg_mutex));
+ ERTS_DBG_MK_CHK_THR_ACCESS(mk);
if (mk->cache_end)
mk->cache_end->next = cd;
cd->next = NULL;
@@ -515,7 +525,7 @@ end_link_cd(MemKind* mk, cache_desc_t *cd)
static ERTS_INLINE void
unlink_cd(MemKind* mk, cache_desc_t *cd)
{
- ERTS_LC_ASSERT(erts_lc_mtx_is_locked(&mseg_mutex));
+ ERTS_DBG_MK_CHK_THR_ACCESS(mk);
if (cd->next)
cd->next->prev = cd->prev;
else
@@ -533,7 +543,7 @@ static ERTS_INLINE void
check_cache_limits(MemKind* mk)
{
cache_desc_t *cd;
- ERTS_LC_ASSERT(erts_lc_mtx_is_locked(&mseg_mutex));
+ ERTS_DBG_MK_CHK_THR_ACCESS(mk);
mk->max_cached_seg_size = 0;
mk->min_cached_seg_size = ~((Uint) 0);
for (cd = mk->cache; cd; cd = cd->next) {
@@ -551,7 +561,7 @@ adjust_cache_size(MemKind* mk, int force_check_limits)
int check_limits = force_check_limits;
Sint max_cached = ((Sint) mk->segments.current.watermark
- (Sint) mk->segments.current.no);
- ERTS_LC_ASSERT(erts_lc_mtx_is_locked(&mseg_mutex));
+ ERTS_DBG_MK_CHK_THR_ACCESS(mk);
while (((Sint) mk->cache_size) > max_cached && ((Sint) mk->cache_size) > 0) {
ASSERT(mk->cache_end);
cd = mk->cache_end;
@@ -562,7 +572,7 @@ adjust_cache_size(MemKind* mk, int force_check_limits)
}
if (erts_mtrace_enabled)
erts_mtrace_crr_free(SEGTYPE, SEGTYPE, cd->seg);
- mseg_destroy(mk, cd->seg, cd->size);
+ mseg_destroy(mk->ma, mk, cd->seg, cd->size);
unlink_cd(mk,cd);
free_cd(mk,cd);
}
@@ -571,7 +581,7 @@ adjust_cache_size(MemKind* mk, int force_check_limits)
check_cache_limits(mk);
}
-static void
+static Uint
check_one_cache(MemKind* mk)
{
if (mk->segments.current.watermark > mk->segments.current.no)
@@ -579,23 +589,37 @@ check_one_cache(MemKind* mk)
adjust_cache_size(mk, 0);
if (mk->cache_size)
- schedule_cache_check();
+ schedule_cache_check(mk->ma);
+ return mk->cache_size;
}
-static void check_cache(void* unused)
+static void do_cache_check(ErtsMsegAllctr_t *ma)
{
+ int empty_cache = 1;
MemKind* mk;
- erts_mtx_lock(&mseg_mutex);
- is_cache_check_scheduled = 0;
+ ERTS_MSEG_LOCK(ma);
- for (mk=mk_list; mk; mk=mk->next) {
- check_one_cache(mk);
+ for (mk=ma->mk_list; mk; mk=mk->next) {
+ if (check_one_cache(mk))
+ empty_cache = 0;
+ }
+
+ if (empty_cache) {
+ ma->is_cache_check_scheduled = 0;
+ erts_set_aux_work_timeout(ma->ix,
+ ERTS_SSI_AUX_WORK_MSEG_CACHE_CHECK,
+ 0);
}
- INC_CC(check_cache);
+ INC_CC(ma, check_cache);
- erts_mtx_unlock(&mseg_mutex);
+ ERTS_MSEG_UNLOCK(ma);
+}
+
+void erts_mseg_cache_check(void)
+{
+ do_cache_check(ERTS_MSEG_ALLCTR_SS());
}
static void
@@ -611,42 +635,44 @@ mseg_clear_cache(MemKind* mk)
mk->segments.current.watermark = mk->segments.current.no;
- INC_CC(clear_cache);
+ INC_CC(mk->ma, clear_cache);
}
-static ERTS_INLINE MemKind* memkind(const ErtsMsegOpt_t *opt)
+static ERTS_INLINE MemKind* memkind(ErtsMsegAllctr_t *ma,
+ const ErtsMsegOpt_t *opt)
{
#if HALFWORD_HEAP
- return opt->low_mem ? &low_mem : &hi_mem;
+ return opt->low_mem ? &ma->low_mem : &ma->hi_mem;
#else
- return &the_mem;
+ return &ma->the_mem;
#endif
}
static void *
-mseg_alloc(ErtsAlcType_t atype, Uint *size_p, const ErtsMsegOpt_t *opt)
+mseg_alloc(ErtsMsegAllctr_t *ma, ErtsAlcType_t atype, Uint *size_p,
+ const ErtsMsegOpt_t *opt)
{
Uint max, min, diff_size, size;
cache_desc_t *cd, *cand_cd;
void *seg;
- MemKind* mk = memkind(opt);
+ MemKind* mk = memkind(ma, opt);
- INC_CC(alloc);
+ INC_CC(ma, alloc);
size = PAGE_CEILING(*size_p);
#if CAN_PARTLY_DESTROY
- if (size < min_seg_size)
- min_seg_size = size;
+ if (size < ma->min_seg_size)
+ ma->min_seg_size = size;
#endif
if (!opt->cache) {
create_seg:
adjust_cache_size(mk,0);
- seg = mseg_create(mk, size);
+ seg = mseg_create(ma, mk, size);
if (!seg) {
mseg_clear_cache(mk);
- seg = mseg_create(mk, size);
+ seg = mseg_create(ma, mk, size);
if (!seg)
size = 0;
}
@@ -667,10 +693,10 @@ mseg_alloc(ErtsAlcType_t atype, Uint *size_p, const ErtsMsegOpt_t *opt)
diff_size = mk->min_cached_seg_size - size;
- if (diff_size > abs_max_cache_bad_fit)
+ if (diff_size > ma->abs_max_cache_bad_fit)
goto create_seg;
- if (100*PAGES(diff_size) > rel_max_cache_bad_fit*PAGES(size))
+ if (100*PAGES(diff_size) > ma->rel_max_cache_bad_fit*PAGES(size))
goto create_seg;
}
@@ -708,8 +734,8 @@ mseg_alloc(ErtsAlcType_t atype, Uint *size_p, const ErtsMsegOpt_t *opt)
diff_size = cand_cd->size - size;
- if (diff_size > abs_max_cache_bad_fit
- || 100*PAGES(diff_size) > rel_max_cache_bad_fit*PAGES(size)) {
+ if (diff_size > ma->abs_max_cache_bad_fit
+ || 100*PAGES(diff_size) > ma->rel_max_cache_bad_fit*PAGES(size)) {
if (mk->max_cached_seg_size < cand_cd->size)
mk->max_cached_seg_size = cand_cd->size;
if (mk->min_cached_seg_size > cand_cd->size)
@@ -740,18 +766,18 @@ mseg_alloc(ErtsAlcType_t atype, Uint *size_p, const ErtsMsegOpt_t *opt)
static void
-mseg_dealloc(ErtsAlcType_t atype, void *seg, Uint size,
+mseg_dealloc(ErtsMsegAllctr_t *ma, ErtsAlcType_t atype, void *seg, Uint size,
const ErtsMsegOpt_t *opt)
{
- MemKind* mk = memkind(opt);
+ MemKind* mk = memkind(ma, opt);
cache_desc_t *cd;
ERTS_MSEG_DEALLOC_STAT(mk,size);
- if (!opt->cache || max_cache_size == 0) {
+ if (!opt->cache || ma->max_cache_size == 0) {
if (erts_mtrace_enabled)
erts_mtrace_crr_free(atype, SEGTYPE, seg);
- mseg_destroy(mk, seg, size);
+ mseg_destroy(ma, mk, seg, size);
}
else {
int check_limits = 0;
@@ -769,7 +795,7 @@ mseg_dealloc(ErtsAlcType_t atype, void *seg, Uint size,
}
if (erts_mtrace_enabled)
erts_mtrace_crr_free(SEGTYPE, SEGTYPE, cd->seg);
- mseg_destroy(mk, cd->seg, cd->size);
+ mseg_destroy(ma, mk, cd->seg, cd->size);
unlink_cd(mk,cd);
free_cd(mk,cd);
}
@@ -790,33 +816,34 @@ mseg_dealloc(ErtsAlcType_t atype, void *seg, Uint size,
if (check_limits)
check_cache_limits(mk);
- schedule_cache_check();
+ schedule_cache_check(ma);
}
- INC_CC(dealloc);
+ INC_CC(ma, dealloc);
}
static void *
-mseg_realloc(ErtsAlcType_t atype, void *seg, Uint old_size, Uint *new_size_p,
- const ErtsMsegOpt_t *opt)
+mseg_realloc(ErtsMsegAllctr_t *ma, ErtsAlcType_t atype, void *seg,
+ Uint old_size, Uint *new_size_p, const ErtsMsegOpt_t *opt)
{
- MemKind* mk = memkind(opt);
+ MemKind* mk;
void *new_seg;
Uint new_size;
if (!seg || !old_size) {
- new_seg = mseg_alloc(atype, new_size_p, opt);
- DEC_CC(alloc);
+ new_seg = mseg_alloc(ma, atype, new_size_p, opt);
+ DEC_CC(ma, alloc);
return new_seg;
}
if (!(*new_size_p)) {
- mseg_dealloc(atype, seg, old_size, opt);
- DEC_CC(dealloc);
+ mseg_dealloc(ma, atype, seg, old_size, opt);
+ DEC_CC(ma, dealloc);
return NULL;
}
+ mk = memkind(ma, opt);
new_seg = seg;
new_size = PAGE_CEILING(*new_size_p);
@@ -826,8 +853,8 @@ mseg_realloc(ErtsAlcType_t atype, void *seg, Uint old_size, Uint *new_size_p,
Uint shrink_sz = old_size - new_size;
#if CAN_PARTLY_DESTROY
- if (new_size < min_seg_size)
- min_seg_size = new_size;
+ if (new_size < ma->min_seg_size)
+ ma->min_seg_size = new_size;
#endif
if (shrink_sz < opt->abs_shrink_th
@@ -838,7 +865,7 @@ mseg_realloc(ErtsAlcType_t atype, void *seg, Uint old_size, Uint *new_size_p,
#if CAN_PARTLY_DESTROY
- if (shrink_sz > min_seg_size
+ if (shrink_sz > ma->min_seg_size
&& mk->free_cache_descs
&& opt->cache) {
cache_desc_t *cd;
@@ -857,7 +884,7 @@ mseg_realloc(ErtsAlcType_t atype, void *seg, Uint old_size, Uint *new_size_p,
new_size);
erts_mtrace_crr_alloc(cd->seg, SEGTYPE, SEGTYPE, cd->size);
}
- schedule_cache_check();
+ schedule_cache_check(ma);
}
else {
if (erts_mtrace_enabled)
@@ -866,7 +893,7 @@ mseg_realloc(ErtsAlcType_t atype, void *seg, Uint old_size, Uint *new_size_p,
SEGTYPE,
seg,
new_size);
- mseg_destroy(mk, ((char *) seg) + new_size, shrink_sz);
+ mseg_destroy(ma, mk, ((char *) seg) + new_size, shrink_sz);
}
#elif HAVE_MSEG_RECREATE
@@ -875,14 +902,14 @@ mseg_realloc(ErtsAlcType_t atype, void *seg, Uint old_size, Uint *new_size_p,
#else
- new_seg = mseg_alloc(atype, &new_size, opt);
+ new_seg = mseg_alloc(ma, atype, &new_size, opt);
if (!new_seg)
new_size = old_size;
else {
sys_memcpy(((char *) new_seg),
((char *) seg),
MIN(new_size, old_size));
- mseg_dealloc(atype, seg, old_size, opt);
+ mseg_dealloc(ma, atype, seg, old_size, opt);
}
#endif
@@ -892,34 +919,34 @@ mseg_realloc(ErtsAlcType_t atype, void *seg, Uint old_size, Uint *new_size_p,
else {
if (!opt->preserv) {
- mseg_dealloc(atype, seg, old_size, opt);
- new_seg = mseg_alloc(atype, &new_size, opt);
+ mseg_dealloc(ma, atype, seg, old_size, opt);
+ new_seg = mseg_alloc(ma, atype, &new_size, opt);
}
else {
#if HAVE_MSEG_RECREATE
#if !CAN_PARTLY_DESTROY
do_recreate:
#endif
- new_seg = mseg_recreate(mk, (void *) seg, old_size, new_size);
+ new_seg = mseg_recreate(ma, mk, (void *) seg, old_size, new_size);
if (erts_mtrace_enabled)
erts_mtrace_crr_realloc(new_seg, atype, SEGTYPE, seg, new_size);
if (!new_seg)
new_size = old_size;
#else
- new_seg = mseg_alloc(atype, &new_size, opt);
+ new_seg = mseg_alloc(ma, atype, &new_size, opt);
if (!new_seg)
new_size = old_size;
else {
sys_memcpy(((char *) new_seg),
((char *) seg),
MIN(new_size, old_size));
- mseg_dealloc(atype, seg, old_size, opt);
+ mseg_dealloc(ma, atype, seg, old_size, opt);
}
#endif
}
}
- INC_CC(realloc);
+ INC_CC(ma, realloc);
*new_size_p = new_size;
@@ -937,7 +964,6 @@ static struct {
Eterm amcbf;
Eterm rmcbf;
Eterm mcs;
- Eterm cci;
Eterm memkind;
Eterm name;
@@ -973,13 +999,13 @@ static void ERTS_INLINE atom_init(Eterm *atom, char *name)
#define AM_INIT(AM) atom_init(&am.AM, #AM)
static void
-init_atoms(void)
+init_atoms(ErtsMsegAllctr_t *ma)
{
#ifdef DEBUG
Eterm *atom;
#endif
- erts_mtx_unlock(&mseg_mutex);
+ ERTS_MSEG_UNLOCK(ma);
erts_mtx_lock(&init_atoms_mutex);
if (!atoms_initialized) {
@@ -997,7 +1023,6 @@ init_atoms(void)
AM_INIT(amcbf);
AM_INIT(rmcbf);
AM_INIT(mcs);
- AM_INIT(cci);
AM_INIT(status);
AM_INIT(cached_segments);
@@ -1025,7 +1050,7 @@ init_atoms(void)
#endif
}
- erts_mtx_lock(&mseg_mutex);
+ ERTS_MSEG_LOCK(ma);
atoms_initialized = 1;
erts_mtx_unlock(&init_atoms_mutex);
}
@@ -1082,7 +1107,8 @@ add_4tup(Uint **hpp, Uint *szp, Eterm *lp,
}
static Eterm
-info_options(char *prefix,
+info_options(ErtsMsegAllctr_t *ma,
+ char *prefix,
int *print_to_p,
void *print_to_arg,
Uint **hpp,
@@ -1093,30 +1119,26 @@ info_options(char *prefix,
if (print_to_p) {
int to = *print_to_p;
void *arg = print_to_arg;
- erts_print(to, arg, "%samcbf: %beu\n", prefix, abs_max_cache_bad_fit);
- erts_print(to, arg, "%srmcbf: %beu\n", prefix, rel_max_cache_bad_fit);
- erts_print(to, arg, "%smcs: %beu\n", prefix, max_cache_size);
- erts_print(to, arg, "%scci: %beu\n", prefix, cache_check_interval);
+ erts_print(to, arg, "%samcbf: %beu\n", prefix, ma->abs_max_cache_bad_fit);
+ erts_print(to, arg, "%srmcbf: %beu\n", prefix, ma->rel_max_cache_bad_fit);
+ erts_print(to, arg, "%smcs: %beu\n", prefix, ma->max_cache_size);
}
if (hpp || szp) {
if (!atoms_initialized)
- init_atoms();
+ init_atoms(ma);
res = NIL;
add_2tup(hpp, szp, &res,
- am.cci,
- bld_uint(hpp, szp, cache_check_interval));
- add_2tup(hpp, szp, &res,
am.mcs,
- bld_uint(hpp, szp, max_cache_size));
+ bld_uint(hpp, szp, ma->max_cache_size));
add_2tup(hpp, szp, &res,
am.rmcbf,
- bld_uint(hpp, szp, rel_max_cache_bad_fit));
+ bld_uint(hpp, szp, ma->rel_max_cache_bad_fit));
add_2tup(hpp, szp, &res,
am.amcbf,
- bld_uint(hpp, szp, abs_max_cache_bad_fit));
+ bld_uint(hpp, szp, ma->abs_max_cache_bad_fit));
}
@@ -1124,18 +1146,18 @@ info_options(char *prefix,
}
static Eterm
-info_calls(int *print_to_p, void *print_to_arg, Uint **hpp, Uint *szp)
+info_calls(ErtsMsegAllctr_t *ma, int *print_to_p, void *print_to_arg, Uint **hpp, Uint *szp)
{
Eterm res = THE_NON_VALUE;
if (print_to_p) {
-#define PRINT_CC(TO, TOA, CC) \
- if (calls.CC.giga_no == 0) \
- erts_print(TO, TOA, "mseg_%s calls: %b32u\n", #CC, calls.CC.no); \
- else \
+#define PRINT_CC(TO, TOA, CC) \
+ if (ma->calls.CC.giga_no == 0) \
+ erts_print(TO, TOA, "mseg_%s calls: %b32u\n", #CC, ma->calls.CC.no); \
+ else \
erts_print(TO, TOA, "mseg_%s calls: %b32u%09b32u\n", #CC, \
- calls.CC.giga_no, calls.CC.no)
+ ma->calls.CC.giga_no, ma->calls.CC.no)
int to = *print_to_p;
void *arg = print_to_arg;
@@ -1161,48 +1183,48 @@ info_calls(int *print_to_p, void *print_to_arg, Uint **hpp, Uint *szp)
add_3tup(hpp, szp, &res,
am.mseg_check_cache,
- bld_unstable_uint(hpp, szp, calls.check_cache.giga_no),
- bld_unstable_uint(hpp, szp, calls.check_cache.no));
+ bld_unstable_uint(hpp, szp, ma->calls.check_cache.giga_no),
+ bld_unstable_uint(hpp, szp, ma->calls.check_cache.no));
add_3tup(hpp, szp, &res,
am.mseg_clear_cache,
- bld_unstable_uint(hpp, szp, calls.clear_cache.giga_no),
- bld_unstable_uint(hpp, szp, calls.clear_cache.no));
+ bld_unstable_uint(hpp, szp, ma->calls.clear_cache.giga_no),
+ bld_unstable_uint(hpp, szp, ma->calls.clear_cache.no));
#if HAVE_MSEG_RECREATE
add_3tup(hpp, szp, &res,
am.mseg_recreate,
- bld_unstable_uint(hpp, szp, calls.recreate.giga_no),
- bld_unstable_uint(hpp, szp, calls.recreate.no));
+ bld_unstable_uint(hpp, szp, ma->calls.recreate.giga_no),
+ bld_unstable_uint(hpp, szp, ma->calls.recreate.no));
#endif
add_3tup(hpp, szp, &res,
am.mseg_destroy,
- bld_unstable_uint(hpp, szp, calls.destroy.giga_no),
- bld_unstable_uint(hpp, szp, calls.destroy.no));
+ bld_unstable_uint(hpp, szp, ma->calls.destroy.giga_no),
+ bld_unstable_uint(hpp, szp, ma->calls.destroy.no));
add_3tup(hpp, szp, &res,
am.mseg_create,
- bld_unstable_uint(hpp, szp, calls.create.giga_no),
- bld_unstable_uint(hpp, szp, calls.create.no));
+ bld_unstable_uint(hpp, szp, ma->calls.create.giga_no),
+ bld_unstable_uint(hpp, szp, ma->calls.create.no));
add_3tup(hpp, szp, &res,
am.mseg_realloc,
- bld_unstable_uint(hpp, szp, calls.realloc.giga_no),
- bld_unstable_uint(hpp, szp, calls.realloc.no));
+ bld_unstable_uint(hpp, szp, ma->calls.realloc.giga_no),
+ bld_unstable_uint(hpp, szp, ma->calls.realloc.no));
add_3tup(hpp, szp, &res,
am.mseg_dealloc,
- bld_unstable_uint(hpp, szp, calls.dealloc.giga_no),
- bld_unstable_uint(hpp, szp, calls.dealloc.no));
+ bld_unstable_uint(hpp, szp, ma->calls.dealloc.giga_no),
+ bld_unstable_uint(hpp, szp, ma->calls.dealloc.no));
add_3tup(hpp, szp, &res,
am.mseg_alloc,
- bld_unstable_uint(hpp, szp, calls.alloc.giga_no),
- bld_unstable_uint(hpp, szp, calls.alloc.no));
+ bld_unstable_uint(hpp, szp, ma->calls.alloc.giga_no),
+ bld_unstable_uint(hpp, szp, ma->calls.alloc.no));
}
return res;
}
static Eterm
-info_status(MemKind* mk, int *print_to_p, void *print_to_arg,
+info_status(ErtsMsegAllctr_t *ma, MemKind* mk, int *print_to_p, void *print_to_arg,
int begin_new_max_period, Uint **hpp, Uint *szp)
{
Eterm res = THE_NON_VALUE;
@@ -1258,7 +1280,7 @@ info_status(MemKind* mk, int *print_to_p, void *print_to_arg,
return res;
}
-static Eterm info_memkind(MemKind* mk, int *print_to_p, void *print_to_arg,
+static Eterm info_memkind(ErtsMsegAllctr_t *ma, MemKind* mk, int *print_to_p, void *print_to_arg,
int begin_max_per, Uint **hpp, Uint *szp)
{
Eterm res = THE_NON_VALUE;
@@ -1274,8 +1296,8 @@ static Eterm info_memkind(MemKind* mk, int *print_to_p, void *print_to_arg,
atoms[2] = am.calls;
values[0] = erts_bld_string(hpp, szp, mk->name);
}
- values[1] = info_status(mk, print_to_p, print_to_arg, begin_max_per, hpp, szp);
- values[2] = info_calls(print_to_p, print_to_arg, hpp, szp);
+ values[1] = info_status(ma, mk, print_to_p, print_to_arg, begin_max_per, hpp, szp);
+ values[2] = info_calls(ma, print_to_p, print_to_arg, hpp, szp);
if (hpp || szp)
res = bld_2tup_list(hpp, szp, 3, atoms, values);
@@ -1285,7 +1307,7 @@ static Eterm info_memkind(MemKind* mk, int *print_to_p, void *print_to_arg,
static Eterm
-info_version(int *print_to_p, void *print_to_arg, Uint **hpp, Uint *szp)
+info_version(ErtsMsegAllctr_t *ma, int *print_to_p, void *print_to_arg, Uint **hpp, Uint *szp)
{
Eterm res = THE_NON_VALUE;
@@ -1306,56 +1328,64 @@ info_version(int *print_to_p, void *print_to_arg, Uint **hpp, Uint *szp)
\* */
Eterm
-erts_mseg_info_options(int *print_to_p, void *print_to_arg,
+erts_mseg_info_options(int ix,
+ int *print_to_p, void *print_to_arg,
Uint **hpp, Uint *szp)
{
+ ErtsMsegAllctr_t *ma = ERTS_MSEG_ALLCTR_IX(ix);
Eterm res;
- erts_mtx_lock(&mseg_mutex);
+ ERTS_MSEG_LOCK(ma);
- res = info_options("option ", print_to_p, print_to_arg, hpp, szp);
+ ERTS_DBG_MA_CHK_THR_ACCESS(ma);
- erts_mtx_unlock(&mseg_mutex);
+ res = info_options(ma, "option ", print_to_p, print_to_arg, hpp, szp);
+
+ ERTS_MSEG_UNLOCK(ma);
return res;
}
Eterm
-erts_mseg_info(int *print_to_p,
+erts_mseg_info(int ix,
+ int *print_to_p,
void *print_to_arg,
int begin_max_per,
Uint **hpp,
Uint *szp)
{
+ ErtsMsegAllctr_t *ma = ERTS_MSEG_ALLCTR_IX(ix);
Eterm res = THE_NON_VALUE;
Eterm atoms[4];
Eterm values[4];
Uint n = 0;
- erts_mtx_lock(&mseg_mutex);
+ ERTS_MSEG_LOCK(ma);
+
+ ERTS_DBG_MA_CHK_THR_ACCESS(ma);
if (hpp || szp) {
if (!atoms_initialized)
- init_atoms();
+ init_atoms(ma);
atoms[0] = am.version;
atoms[1] = am.options;
atoms[2] = am.memkind;
atoms[3] = am.memkind;
}
- values[n++] = info_version(print_to_p, print_to_arg, hpp, szp);
- values[n++] = info_options("option ", print_to_p, print_to_arg, hpp, szp);
+ values[n++] = info_version(ma, print_to_p, print_to_arg, hpp, szp);
+ values[n++] = info_options(ma, "option ", print_to_p, print_to_arg, hpp, szp);
#if HALFWORD_HEAP
- values[n++] = info_memkind(&low_mem, print_to_p, print_to_arg, begin_max_per, hpp, szp);
- values[n++] = info_memkind(&hi_mem, print_to_p, print_to_arg, begin_max_per, hpp, szp);
+ values[n++] = info_memkind(ma, &ma->low_mem, print_to_p, print_to_arg, begin_max_per, hpp, szp);
+ values[n++] = info_memkind(ma, &ma->hi_mem, print_to_p, print_to_arg, begin_max_per, hpp, szp);
#else
- values[n++] = info_memkind(&the_mem, print_to_p, print_to_arg, begin_max_per, hpp, szp);
+ values[n++] = info_memkind(ma, &ma->the_mem, print_to_p, print_to_arg, begin_max_per, hpp, szp);
#endif
if (hpp || szp)
res = bld_2tup_list(hpp, szp, n, atoms, values);
- erts_mtx_unlock(&mseg_mutex);
+ ERTS_MSEG_UNLOCK(ma);
return res;
}
@@ -1363,10 +1393,12 @@ erts_mseg_info(int *print_to_p,
void *
erts_mseg_alloc_opt(ErtsAlcType_t atype, Uint *size_p, const ErtsMsegOpt_t *opt)
{
+ ErtsMsegAllctr_t *ma = ERTS_MSEG_ALLCTR_OPT(opt);
void *seg;
- erts_mtx_lock(&mseg_mutex);
- seg = mseg_alloc(atype, size_p, opt);
- erts_mtx_unlock(&mseg_mutex);
+ ERTS_MSEG_LOCK(ma);
+ ERTS_DBG_MA_CHK_THR_ACCESS(ma);
+ seg = mseg_alloc(ma, atype, size_p, opt);
+ ERTS_MSEG_UNLOCK(ma);
return seg;
}
@@ -1377,12 +1409,14 @@ erts_mseg_alloc(ErtsAlcType_t atype, Uint *size_p)
}
void
-erts_mseg_dealloc_opt(ErtsAlcType_t atype, void *seg, Uint size,
- const ErtsMsegOpt_t *opt)
+erts_mseg_dealloc_opt(ErtsAlcType_t atype, void *seg,
+ Uint size, const ErtsMsegOpt_t *opt)
{
- erts_mtx_lock(&mseg_mutex);
- mseg_dealloc(atype, seg, size, opt);
- erts_mtx_unlock(&mseg_mutex);
+ ErtsMsegAllctr_t *ma = ERTS_MSEG_ALLCTR_OPT(opt);
+ ERTS_MSEG_LOCK(ma);
+ ERTS_DBG_MA_CHK_THR_ACCESS(ma);
+ mseg_dealloc(ma, atype, seg, size, opt);
+ ERTS_MSEG_UNLOCK(ma);
}
void
@@ -1392,44 +1426,60 @@ erts_mseg_dealloc(ErtsAlcType_t atype, void *seg, Uint size)
}
void *
-erts_mseg_realloc_opt(ErtsAlcType_t atype, void *seg, Uint old_size,
- Uint *new_size_p, const ErtsMsegOpt_t *opt)
+erts_mseg_realloc_opt(ErtsAlcType_t atype, void *seg,
+ Uint old_size, Uint *new_size_p,
+ const ErtsMsegOpt_t *opt)
{
+ ErtsMsegAllctr_t *ma = ERTS_MSEG_ALLCTR_OPT(opt);
void *new_seg;
- erts_mtx_lock(&mseg_mutex);
- new_seg = mseg_realloc(atype, seg, old_size, new_size_p, opt);
- erts_mtx_unlock(&mseg_mutex);
+ ERTS_MSEG_LOCK(ma);
+ ERTS_DBG_MA_CHK_THR_ACCESS(ma);
+ new_seg = mseg_realloc(ma, atype, seg, old_size, new_size_p, opt);
+ ERTS_MSEG_UNLOCK(ma);
return new_seg;
}
void *
-erts_mseg_realloc(ErtsAlcType_t atype, void *seg, Uint old_size,
- Uint *new_size_p)
+erts_mseg_realloc(ErtsAlcType_t atype, void *seg,
+ Uint old_size, Uint *new_size_p)
{
- return erts_mseg_realloc_opt(atype, seg, old_size, new_size_p, &erts_mseg_default_opt);
+ return erts_mseg_realloc_opt(atype, seg, old_size, new_size_p,
+ &erts_mseg_default_opt);
}
void
erts_mseg_clear_cache(void)
{
+ ErtsMsegAllctr_t *ma = ERTS_MSEG_ALLCTR_SS();
MemKind* mk;
- erts_mtx_lock(&mseg_mutex);
- for (mk=mk_list; mk; mk=mk->next) {
+
+start:
+
+ ERTS_MSEG_LOCK(ma);
+ ERTS_DBG_MA_CHK_THR_ACCESS(ma);
+ for (mk=ma->mk_list; mk; mk=mk->next) {
mseg_clear_cache(mk);
}
- erts_mtx_unlock(&mseg_mutex);
+ ERTS_MSEG_UNLOCK(ma);
+
+ if (ma->ix != 0) {
+ ma = ERTS_MSEG_ALLCTR_IX(0);
+ goto start;
+ }
}
Uint
-erts_mseg_no(void)
+erts_mseg_no(const ErtsMsegOpt_t *opt)
{
+ ErtsMsegAllctr_t *ma = ERTS_MSEG_ALLCTR_OPT(opt);
MemKind* mk;
Uint n = 0;
- erts_mtx_lock(&mseg_mutex);
- for (mk=mk_list; mk; mk=mk->next) {
+ ERTS_MSEG_LOCK(ma);
+ ERTS_DBG_MA_CHK_THR_ACCESS(ma);
+ for (mk=ma->mk_list; mk; mk=mk->next) {
n += mk->segments.current.no;
}
- erts_mtx_unlock(&mseg_mutex);
+ ERTS_MSEG_UNLOCK(ma);
return n;
}
@@ -1439,7 +1489,7 @@ erts_mseg_unit_size(void)
return page_size;
}
-static void mem_kind_init(MemKind* mk, const char* name)
+static void mem_kind_init(ErtsMsegAllctr_t *ma, MemKind* mk, const char* name)
{
unsigned i;
@@ -1450,10 +1500,10 @@ static void mem_kind_init(MemKind* mk, const char* name)
mk->cache_size = 0;
mk->cache_hits = 0;
- if (max_cache_size > 0) {
- for (i = 0; i < max_cache_size - 1; i++)
+ if (ma->max_cache_size > 0) {
+ for (i = 0; i < ma->max_cache_size - 1; i++)
mk->cache_descs[i].next = &mk->cache_descs[i + 1];
- mk->cache_descs[max_cache_size - 1].next = NULL;
+ mk->cache_descs[ma->max_cache_size - 1].next = NULL;
mk->free_cache_descs = &mk->cache_descs[0];
}
else
@@ -1467,30 +1517,38 @@ static void mem_kind_init(MemKind* mk, const char* name)
mk->segments.max_ever.no = 0;
mk->segments.max_ever.sz = 0;
+ mk->ma = ma;
mk->name = name;
- mk->next = mk_list;
- mk_list = mk;
+ mk->next = ma->mk_list;
+ ma->mk_list = mk;
}
+
+
void
erts_mseg_init(ErtsMsegInit_t *init)
{
- atoms_initialized = 0;
- is_init_done = 0;
+ int i;
+ UWord x;
- /* Options ... */
+#ifdef ERTS_SMP
+ no_mseg_allocators = init->nos + 1;
+#else
+ no_mseg_allocators = 1;
+#endif
- abs_max_cache_bad_fit = init->amcbf;
- rel_max_cache_bad_fit = init->rmcbf;
- max_cache_size = init->mcs;
- cache_check_interval = init->cci;
+ x = (UWord) malloc(sizeof(ErtsAlgndMsegAllctr_t)
+ *no_mseg_allocators
+ + (ERTS_CACHE_LINE_SIZE-1));
+ if (x & ERTS_CACHE_LINE_MASK)
+ x = (x & ~ERTS_CACHE_LINE_MASK) + ERTS_CACHE_LINE_SIZE;
+ ASSERT((x & ERTS_CACHE_LINE_MASK) == 0);
+ aligned_mseg_allctr = (ErtsAlgndMsegAllctr_t *) x;
- /* */
+ atoms_initialized = 0;
-#ifdef USE_THREADS
- thread_safe_init();
-#endif
+ erts_mtx_init(&init_atoms_mutex, "mseg_init_atoms");
#if HAVE_MMAP && !defined(MAP_ANON)
mmap_fd = open("/dev/zero", O_RDWR);
@@ -1512,34 +1570,55 @@ erts_mseg_init(ErtsMsegInit_t *init)
page_shift++;
}
- sys_memzero((void *) &calls, sizeof(calls));
+ for (i = 0; i < no_mseg_allocators; i++) {
+ ErtsMsegAllctr_t *ma = ERTS_MSEG_ALLCTR_IX(i);
-#if CAN_PARTLY_DESTROY
- min_seg_size = ~((Uint) 0);
-#endif
+ ma->ix = i;
+
+ ma->is_init_done = 0;
+
+ if (i != 0)
+ ma->is_thread_safe = 0;
+ else {
+ ma->is_thread_safe = 1;
+ erts_mtx_init(&ma->mtx, "mseg");
+ }
+
+ ma->is_cache_check_scheduled = 0;
+
+ /* Options ... */
+
+ ma->abs_max_cache_bad_fit = init->amcbf;
+ ma->rel_max_cache_bad_fit = init->rmcbf;
+ ma->max_cache_size = init->mcs;
- if (max_cache_size > MAX_CACHE_SIZE)
- max_cache_size = MAX_CACHE_SIZE;
+ if (ma->max_cache_size > MAX_CACHE_SIZE)
+ ma->max_cache_size = MAX_CACHE_SIZE;
+
+ ma->mk_list = NULL;
#if HALFWORD_HEAP
- mem_kind_init(&low_mem, "low memory");
- mem_kind_init(&hi_mem, "high memory");
+ mem_kind_init(ma, &ma->low_mem, "low memory");
+ mem_kind_init(ma, &ma->hi_mem, "high memory");
#else
- mem_kind_init(&the_mem, "all memory");
+ mem_kind_init(ma, &ma->the_mem, "all memory");
#endif
- is_cache_check_scheduled = 0;
-#ifdef ERTS_THREADS_NO_SMP
- is_cache_check_requested = 0;
+ sys_memzero((void *) &ma->calls, sizeof(ErtsMsegCalls));
+
+#if CAN_PARTLY_DESTROY
+ ma->min_seg_size = ~((Uint) 0);
#endif
+ }
}
-static ERTS_INLINE Uint tot_cache_size(void)
+static ERTS_INLINE Uint tot_cache_size(ErtsMsegAllctr_t *ma)
{
MemKind* mk;
Uint sz = 0;
- for (mk=mk_list; mk; mk=mk->next) {
+ ERTS_DBG_MA_CHK_THR_ACCESS(ma);
+ for (mk=ma->mk_list; mk; mk=mk->next) {
sz += mk->cache_size;
}
return sz;
@@ -1552,25 +1631,13 @@ static ERTS_INLINE Uint tot_cache_size(void)
void
erts_mseg_late_init(void)
{
-#ifdef ERTS_THREADS_NO_SMP
- int handle =
- erts_register_async_ready_callback(
- check_schedule_cache_check);
-#endif
- erts_mtx_lock(&mseg_mutex);
- is_init_done = 1;
-#ifdef ERTS_THREADS_NO_SMP
- async_handle = handle;
-#endif
- if (tot_cache_size())
- schedule_cache_check();
- erts_mtx_unlock(&mseg_mutex);
-}
-
-void
-erts_mseg_exit(void)
-{
- mseg_shutdown();
+ ErtsMsegAllctr_t *ma = ERTS_MSEG_ALLCTR_SS();
+ ERTS_MSEG_LOCK(ma);
+ ERTS_DBG_MA_CHK_THR_ACCESS(ma);
+ ma->is_init_done = 1;
+ if (tot_cache_size(ma))
+ schedule_cache_check(ma);
+ ERTS_MSEG_UNLOCK(ma);
}
#endif /* #if HAVE_ERTS_MSEG */
@@ -1599,12 +1666,13 @@ erts_mseg_test(unsigned long op,
erts_mseg_clear_cache();
return (unsigned long) 0;
case 0x405:
- return (unsigned long) erts_mseg_no();
+ return (unsigned long) erts_mseg_no(&erts_mseg_default_opt);
case 0x406: {
+ ErtsMsegAllctr_t *ma = ERTS_MSEG_ALLCTR_IX(0);
unsigned long res;
- erts_mtx_lock(&mseg_mutex);
- res = (unsigned long) tot_cache_size();
- erts_mtx_unlock(&mseg_mutex);
+ ERTS_MSEG_LOCK(ma);
+ res = (unsigned long) tot_cache_size(ma);
+ ERTS_MSEG_UNLOCK(ma);
return res;
}
#else /* #if HAVE_ERTS_MSEG */
diff --git a/erts/emulator/sys/common/erl_mseg.h b/erts/emulator/sys/common/erl_mseg.h
index 8f116030a8..741080fb78 100644
--- a/erts/emulator/sys/common/erl_mseg.h
+++ b/erts/emulator/sys/common/erl_mseg.h
@@ -44,7 +44,7 @@ typedef struct {
Uint amcbf;
Uint rmcbf;
Uint mcs;
- Uint cci;
+ Uint nos;
} ErtsMsegInit_t;
#define ERTS_MSEG_INIT_DEFAULT_INITIALIZER \
@@ -60,6 +60,7 @@ typedef struct {
int preserv;
UWord abs_shrink_th;
UWord rel_shrink_th;
+ int sched_spec;
#if HALFWORD_HEAP
int low_mem;
#endif
@@ -75,14 +76,14 @@ void *erts_mseg_realloc(ErtsAlcType_t, void *, Uint, Uint *);
void *erts_mseg_realloc_opt(ErtsAlcType_t, void *, Uint, Uint *,
const ErtsMsegOpt_t *);
void erts_mseg_clear_cache(void);
-Uint erts_mseg_no(void);
+void erts_mseg_cache_check(void);
+Uint erts_mseg_no( const ErtsMsegOpt_t *);
Uint erts_mseg_unit_size(void);
void erts_mseg_init(ErtsMsegInit_t *init);
void erts_mseg_late_init(void); /* Have to be called after all allocators,
threads and timers have been initialized. */
-void erts_mseg_exit(void);
-Eterm erts_mseg_info_options(int *, void*, Uint **, Uint *);
-Eterm erts_mseg_info(int *, void*, int, Uint **, Uint *);
+Eterm erts_mseg_info_options(int, int *, void*, Uint **, Uint *);
+Eterm erts_mseg_info(int, int *, void*, int, Uint **, Uint *);
#endif /* #if HAVE_ERTS_MSEG */
diff --git a/erts/emulator/sys/common/erl_poll.c b/erts/emulator/sys/common/erl_poll.c
index 81f1c95020..80db2055a2 100644
--- a/erts/emulator/sys/common/erl_poll.c
+++ b/erts/emulator/sys/common/erl_poll.c
@@ -68,6 +68,7 @@
# endif
# endif
#endif
+#include "erl_thr_progress.h"
#include "erl_driver.h"
#include "erl_alloc.h"
@@ -114,7 +115,7 @@
#endif
#define ERTS_POLL_USE_WAKEUP_PIPE \
- (ERTS_POLL_ASYNC_INTERRUPT_SUPPORT || defined(ERTS_SMP))
+ (ERTS_POLL_ASYNC_INTERRUPT_SUPPORT || defined(USE_THREADS))
#ifdef ERTS_SMP
@@ -261,7 +262,6 @@ struct ErtsPollSet_ {
#ifdef ERTS_SMP
erts_atomic32_t polled;
erts_smp_mtx_t mtx;
-#elif ERTS_POLL_ASYNC_INTERRUPT_SUPPORT
#endif
#if ERTS_POLL_USE_WAKEUP_PIPE
int wake_fds[2];
@@ -269,10 +269,8 @@ struct ErtsPollSet_ {
#if ERTS_POLL_USE_FALLBACK
int fallback_used;
#endif
-#ifdef ERTS_SMP
+#if defined(USE_THREADS) || ERTS_POLL_ASYNC_INTERRUPT_SUPPORT
erts_atomic32_t wakeup_state;
-#elif ERTS_POLL_ASYNC_INTERRUPT_SUPPORT
- volatile int wakeup_state;
#endif
erts_smp_atomic32_t timeout;
#ifdef ERTS_POLL_COUNT_AVOIDED_WAKEUPS
@@ -345,21 +343,16 @@ static void print_misc_debug_info(void);
static ERTS_INLINE void
reset_wakeup_state(ErtsPollSet ps)
{
-#ifdef ERTS_SMP
- erts_atomic32_set_nob(&ps->wakeup_state, ERTS_POLL_NOT_WOKEN);
- ERTS_THR_MEMORY_BARRIER;
-#elif ERTS_POLL_ASYNC_INTERRUPT_SUPPORT
- ps->wakeup_state = 0;
+#if defined(USE_THREADS) || ERTS_POLL_ASYNC_INTERRUPT_SUPPORT
+ erts_atomic32_set_mb(&ps->wakeup_state, ERTS_POLL_NOT_WOKEN);
#endif
}
static ERTS_INLINE int
is_woken(ErtsPollSet ps)
{
-#ifdef ERTS_SMP
+#if defined(USE_THREADS) || ERTS_POLL_ASYNC_INTERRUPT_SUPPORT
return erts_atomic32_read_acqb(&ps->wakeup_state) != ERTS_POLL_NOT_WOKEN;
-#elif ERTS_POLL_ASYNC_INTERRUPT_SUPPORT
- return ps->wakeup_state != ERTS_POLL_NOT_WOKEN;
#else
return 0;
#endif
@@ -368,13 +361,9 @@ is_woken(ErtsPollSet ps)
static ERTS_INLINE int
is_interrupted_reset(ErtsPollSet ps)
{
-#ifdef ERTS_SMP
+#if defined(USE_THREADS) || ERTS_POLL_ASYNC_INTERRUPT_SUPPORT
return (erts_atomic32_xchg_nob(&ps->wakeup_state, ERTS_POLL_NOT_WOKEN)
== ERTS_POLL_WOKEN_INTR);
-#elif ERTS_POLL_ASYNC_INTERRUPT_SUPPORT
- int res = ps->wakeup_state == ERTS_POLL_WOKEN_INTR;
- ps->wakeup_state = ERTS_POLL_NOT_WOKEN;
- return res;
#else
return 0;
#endif
@@ -383,16 +372,13 @@ is_interrupted_reset(ErtsPollSet ps)
static ERTS_INLINE void
woke_up(ErtsPollSet ps)
{
-#ifdef ERTS_SMP
+#if defined(USE_THREADS) || ERTS_POLL_ASYNC_INTERRUPT_SUPPORT
erts_aint32_t wakeup_state = erts_atomic32_read_nob(&ps->wakeup_state);
if (wakeup_state == ERTS_POLL_NOT_WOKEN)
(void) erts_atomic32_cmpxchg_nob(&ps->wakeup_state,
ERTS_POLL_WOKEN,
ERTS_POLL_NOT_WOKEN);
ASSERT(erts_atomic32_read_nob(&ps->wakeup_state) != ERTS_POLL_NOT_WOKEN);
-#elif ERTS_POLL_ASYNC_INTERRUPT_SUPPORT
- if (ps->wakeup_state == ERTS_POLL_NOT_WOKEN)
- ps->wakeup_state = ERTS_POLL_WOKEN;
#endif
}
@@ -403,28 +389,27 @@ woke_up(ErtsPollSet ps)
#if ERTS_POLL_USE_WAKEUP_PIPE
static ERTS_INLINE void
-wake_poller(ErtsPollSet ps, int interrupted)
+wake_poller(ErtsPollSet ps, int interrupted, int async_signal_safe)
{
int wake;
-#ifdef ERTS_SMP
- erts_aint32_t wakeup_state;
- if (!interrupted)
- wakeup_state = erts_atomic32_cmpxchg_relb(&ps->wakeup_state,
- ERTS_POLL_WOKEN,
- ERTS_POLL_NOT_WOKEN);
+ if (async_signal_safe)
+ wake = 1;
else {
- /*
- * We might unnecessarily write to the pipe, however,
- * that isn't problematic.
- */
- wakeup_state = erts_atomic32_read_nob(&ps->wakeup_state);
- erts_atomic32_set_relb(&ps->wakeup_state, ERTS_POLL_WOKEN_INTR);
+ erts_aint32_t wakeup_state;
+ if (!interrupted)
+ wakeup_state = erts_atomic32_cmpxchg_relb(&ps->wakeup_state,
+ ERTS_POLL_WOKEN,
+ ERTS_POLL_NOT_WOKEN);
+ else {
+ /*
+ * We might unnecessarily write to the pipe, however,
+ * that isn't problematic.
+ */
+ wakeup_state = erts_atomic32_read_nob(&ps->wakeup_state);
+ erts_atomic32_set_relb(&ps->wakeup_state, ERTS_POLL_WOKEN_INTR);
+ }
+ wake = wakeup_state == ERTS_POLL_NOT_WOKEN;
}
- wake = wakeup_state == ERTS_POLL_NOT_WOKEN;
-#elif ERTS_POLL_ASYNC_INTERRUPT_SUPPORT
- wake = ps->wakeup_state == ERTS_POLL_NOT_WOKEN;
- ps->wakeup_state = interrupted ? ERTS_POLL_WOKEN_INTR : ERTS_POLL_NOT_WOKEN;
-#endif
/*
* NOTE: This function might be called from signal handlers in the
* non-smp case; therefore, it has to be async-signal safe in
@@ -439,9 +424,17 @@ wake_poller(ErtsPollSet ps, int interrupted)
res = write(ps->wake_fds[1], "!", 1);
} while (res < 0 && errno == EINTR);
if (res <= 0 && errno != ERRNO_BLOCK) {
- fatal_error_async_signal_safe(__FILE__
- ":XXX:wake_poller(): "
- "Failed to write on wakeup pipe\n");
+ if (async_signal_safe)
+ fatal_error_async_signal_safe(__FILE__
+ ":XXX:wake_poller(): "
+ "Failed to write on wakeup pipe\n");
+ else
+ fatal_error("%s:%d:wake_poller(): "
+ "Failed to write to wakeup pipe fd=%d: "
+ "%s (%d)\n",
+ __FILE__, __LINE__,
+ ps->wake_fds[1],
+ erl_errno_id(errno), errno);
}
}
}
@@ -449,11 +442,18 @@ wake_poller(ErtsPollSet ps, int interrupted)
static ERTS_INLINE void
cleanup_wakeup_pipe(ErtsPollSet ps)
{
+#if ERTS_POLL_ASYNC_INTERRUPT_SUPPORT
+ int intr = 0;
+#endif
int fd = ps->wake_fds[0];
int res;
do {
char buf[32];
res = read(fd, buf, sizeof(buf));
+#if ERTS_POLL_ASYNC_INTERRUPT_SUPPORT
+ if (res > 0)
+ intr = 1;
+#endif
} while (res > 0 || (res < 0 && errno == EINTR));
if (res < 0 && errno != ERRNO_BLOCK) {
fatal_error("%s:%d:cleanup_wakeup_pipe(): "
@@ -463,6 +463,10 @@ cleanup_wakeup_pipe(ErtsPollSet ps)
fd,
erl_errno_id(errno), errno);
}
+#if ERTS_POLL_ASYNC_INTERRUPT_SUPPORT
+ if (intr)
+ erts_atomic32_set_nob(&ps->wakeup_state, ERTS_POLL_WOKEN_INTR);
+#endif
}
static void
@@ -1497,7 +1501,7 @@ ERTS_POLL_EXPORT(erts_poll_controlv)(ErtsPollSet ps,
#ifdef ERTS_SMP
if (final_do_wake)
- wake_poller(ps, 0);
+ wake_poller(ps, 0, 0);
#endif /* ERTS_SMP */
}
@@ -1520,7 +1524,7 @@ ERTS_POLL_EXPORT(erts_poll_control)(ErtsPollSet ps,
#ifdef ERTS_SMP
if (*do_wake) {
- wake_poller(ps, 0);
+ wake_poller(ps, 0, 0);
}
#endif /* ERTS_SMP */
@@ -1893,9 +1897,9 @@ save_poll_result(ErtsPollSet ps, ErtsPollResFd pr[], int max_res,
}
static ERTS_INLINE int
-check_fd_events(ErtsPollSet ps, SysTimeval *tv, int max_res, int *ps_locked)
+check_fd_events(ErtsPollSet ps, SysTimeval *tv, int max_res)
{
- ASSERT(!*ps_locked);
+ int res;
if (erts_smp_atomic_read_nob(&ps->no_of_user_fds) == 0
&& tv->tv_usec == 0 && tv->tv_sec == 0) {
/* Nothing to poll and zero timeout; done... */
@@ -1915,16 +1919,23 @@ check_fd_events(ErtsPollSet ps, SysTimeval *tv, int max_res, int *ps_locked)
timeout = INT_MAX;
if (max_res > ps->res_events_len)
grow_res_events(ps, max_res);
- return epoll_wait(ps->kp_fd, ps->res_events, max_res, (int)timeout);
+#ifdef ERTS_SMP
+ if (timeout)
+ erts_thr_progress_prepare_wait(NULL);
+#endif
+ res = epoll_wait(ps->kp_fd, ps->res_events, max_res, (int)timeout);
#elif ERTS_POLL_USE_KQUEUE /* --- kqueue ------------------------------ */
struct timespec ts;
- ts.tv_sec = tv->tv_sec;
- ts.tv_nsec = tv->tv_usec*1000;
if (max_res > ps->res_events_len)
grow_res_events(ps, max_res);
- return kevent(ps->kp_fd, NULL, 0, ps->res_events, max_res, &ts);
+#ifdef ERTS_SMP
+ if (timeout)
+ erts_thr_progress_prepare_wait(NULL);
+#endif
+ ts.tv_sec = tv->tv_sec;
+ ts.tv_nsec = tv->tv_usec*1000;
+ res = kevent(ps->kp_fd, NULL, 0, ps->res_events, max_res, &ts);
#endif /* ----------------------------------------- */
-
}
else /* use fallback (i.e. poll() or select()) */
#endif /* ERTS_POLL_USE_FALLBACK */
@@ -1947,22 +1958,38 @@ check_fd_events(ErtsPollSet ps, SysTimeval *tv, int max_res, int *ps_locked)
if (poll_res.dp_nfds > ps->res_events_len)
grow_res_events(ps, poll_res.dp_nfds);
poll_res.dp_fds = ps->res_events;
+#ifdef ERTS_SMP
+ if (timeout)
+ erts_thr_progress_prepare_wait(NULL);
+#endif
poll_res.dp_timeout = (int) timeout;
- return ioctl(ps->kp_fd, DP_POLL, &poll_res);
+ res = ioctl(ps->kp_fd, DP_POLL, &poll_res);
#elif ERTS_POLL_USE_POLL /* --- poll -------------------------------- */
if (timeout > INT_MAX)
timeout = INT_MAX;
- return poll(ps->poll_fds, ps->no_poll_fds, (int) timeout);
+#ifdef ERTS_SMP
+ if (timeout)
+ erts_thr_progress_prepare_wait(NULL);
+#endif
+ res = poll(ps->poll_fds, ps->no_poll_fds, (int) timeout);
#elif ERTS_POLL_USE_SELECT /* --- select ------------------------------ */
- int res;
+ SysTimeval to = *tv;
+
ps->res_input_fds = ps->input_fds;
ps->res_output_fds = ps->output_fds;
+
+#ifdef ERTS_SMP
+ if (to.tv_sec || to.tv_usec)
+ erts_thr_progress_prepare_wait(NULL);
+#endif
res = select(ps->max_fd + 1,
&ps->res_input_fds,
&ps->res_output_fds,
NULL,
- tv);
+ &to);
#ifdef ERTS_SMP
+ if (to.tv_sec || to.tv_usec)
+ erts_thr_progress_finalize_wait(NULL);
if (res < 0
&& errno == EBADF
&& ERTS_POLLSET_HAVE_UPDATE_REQUESTS(ps)) {
@@ -1978,15 +2005,16 @@ check_fd_events(ErtsPollSet ps, SysTimeval *tv, int max_res, int *ps_locked)
* have triggered, we fake an EAGAIN error and let the caller
* restart us.
*/
- SysTimeval zero_tv = {0, 0};
- *ps_locked = 1;
+ to.tv_sec = 0;
+ to.tv_usec = 0;
ERTS_POLLSET_LOCK(ps);
handle_update_requests(ps);
+ ERTS_POLLSET_UNLOCK(ps);
res = select(ps->max_fd + 1,
&ps->res_input_fds,
&ps->res_output_fds,
NULL,
- &zero_tv);
+ &to);
if (res == 0) {
errno = EAGAIN;
res = -1;
@@ -1996,6 +2024,11 @@ check_fd_events(ErtsPollSet ps, SysTimeval *tv, int max_res, int *ps_locked)
return res;
#endif /* ----------------------------------------- */
}
+#ifdef ERTS_SMP
+ if (timeout)
+ erts_thr_progress_finalize_wait(NULL);
+#endif
+ return res;
}
}
@@ -2007,7 +2040,9 @@ ERTS_POLL_EXPORT(erts_poll_wait)(ErtsPollSet ps,
{
int res, no_fds;
int ebadf = 0;
- int ps_locked;
+#ifdef ERTS_SMP
+ int ps_locked = 0;
+#endif
SysTimeval *tvp;
SysTimeval itv;
@@ -2049,8 +2084,7 @@ ERTS_POLL_EXPORT(erts_poll_wait)(ErtsPollSet ps,
}
#endif
- ps_locked = 0;
- res = check_fd_events(ps, tvp, no_fds, &ps_locked);
+ res = check_fd_events(ps, tvp, no_fds);
woke_up(ps);
@@ -2072,10 +2106,8 @@ ERTS_POLL_EXPORT(erts_poll_wait)(ErtsPollSet ps,
#endif
#ifdef ERTS_SMP
- if (!ps_locked) {
- ps_locked = 1;
- ERTS_POLLSET_LOCK(ps);
- }
+ ps_locked = 1;
+ ERTS_POLLSET_LOCK(ps);
#endif
no_fds = save_poll_result(ps, pr, no_fds, res, ebadf);
@@ -2111,19 +2143,26 @@ ERTS_POLL_EXPORT(erts_poll_wait)(ErtsPollSet ps,
void
ERTS_POLL_EXPORT(erts_poll_interrupt)(ErtsPollSet ps, int set)
{
-#if ERTS_POLL_ASYNC_INTERRUPT_SUPPORT || defined(ERTS_SMP)
- /*
- * NOTE: This function might be called from signal handlers in the
- * non-smp case; therefore, it has to be async-signal safe in
- * the non-smp case.
- */
+#if defined(USE_THREADS)
if (!set)
reset_wakeup_state(ps);
else
- wake_poller(ps, 1);
+ wake_poller(ps, 1, 0);
#endif
}
+#if ERTS_POLL_ASYNC_INTERRUPT_SUPPORT
+void
+ERTS_POLL_EXPORT(erts_poll_async_sig_interrupt)(ErtsPollSet ps)
+{
+ /*
+ * NOTE: This function is called from signal handlers, it,
+ * therefore, it has to be async-signal safe.
+ */
+ wake_poller(ps, 1, 1);
+}
+#endif
+
/*
* erts_poll_interrupt_timed():
* If 'set' != 0, interrupt thread blocked in erts_poll_wait() if it
@@ -2139,7 +2178,7 @@ ERTS_POLL_EXPORT(erts_poll_interrupt_timed)(ErtsPollSet ps,
reset_wakeup_state(ps);
else {
if (erts_smp_atomic32_read_acqb(&ps->timeout) > (erts_aint32_t) msec)
- wake_poller(ps, 1);
+ wake_poller(ps, 1, 0);
#ifdef ERTS_POLL_COUNT_AVOIDED_WAKEUPS
else {
if (ERTS_POLLSET_IS_POLLED(ps))
@@ -2266,10 +2305,8 @@ ERTS_POLL_EXPORT(erts_poll_create_pollset)(void)
erts_atomic32_init_nob(&ps->polled, 0);
erts_smp_mtx_init(&ps->mtx, "pollset");
#endif
-#ifdef ERTS_SMP
+#if defined(USE_THREADS) || ERTS_POLL_ASYNC_INTERRUPT_SUPPORT
erts_atomic32_init_nob(&ps->wakeup_state, (erts_aint32_t) 0);
-#elif ERTS_POLL_ASYNC_INTERRUPT_SUPPORT
- ps->wakeup_state = 0;
#endif
#if ERTS_POLL_USE_WAKEUP_PIPE
create_wakeup_pipe(ps);
diff --git a/erts/emulator/sys/common/erl_poll.h b/erts/emulator/sys/common/erl_poll.h
index 725a77a152..e0296c6a33 100644
--- a/erts/emulator/sys/common/erl_poll.h
+++ b/erts/emulator/sys/common/erl_poll.h
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2006-2009. All Rights Reserved.
+ * Copyright Ericsson AB 2006-2011. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
@@ -216,6 +216,9 @@ typedef struct {
#endif
} ErtsPollInfo;
+#ifdef ERTS_POLL_NEED_ASYNC_INTERRUPT_SUPPORT
+void ERTS_POLL_EXPORT(erts_poll_async_sig_interrupt)(ErtsPollSet);
+#endif
void ERTS_POLL_EXPORT(erts_poll_interrupt)(ErtsPollSet,
int);
void ERTS_POLL_EXPORT(erts_poll_interrupt_timed)(ErtsPollSet,
diff --git a/erts/emulator/sys/unix/erl_unix_sys.h b/erts/emulator/sys/unix/erl_unix_sys.h
index d8d51b192c..9a5ed9f5bc 100644
--- a/erts/emulator/sys/unix/erl_unix_sys.h
+++ b/erts/emulator/sys/unix/erl_unix_sys.h
@@ -129,10 +129,12 @@
#define HAVE_ERTS_CHECK_IO_DEBUG
int erts_check_io_debug(void);
-
-#ifndef ENABLE_CHILD_WAITER_THREAD
+#ifndef ERTS_SMP
# undef ERTS_POLL_NEED_ASYNC_INTERRUPT_SUPPORT
# define ERTS_POLL_NEED_ASYNC_INTERRUPT_SUPPORT
+#endif
+
+#ifndef ENABLE_CHILD_WAITER_THREAD
# ifdef ERTS_SMP
# define ERTS_SMP_SCHEDULERS_NEED_TO_CHECK_CHILDREN
void erts_check_children(void);
diff --git a/erts/emulator/sys/unix/sys.c b/erts/emulator/sys/unix/sys.c
index fd15635168..c6b63350e5 100644
--- a/erts/emulator/sys/unix/sys.c
+++ b/erts/emulator/sys/unix/sys.c
@@ -52,6 +52,7 @@
#define ERTS_WANT_GOT_SIGUSR1
#define WANT_NONBLOCKING /* must define this to pull in defs from sys.h */
#include "sys.h"
+#include "erl_thr_progress.h"
#if defined(__APPLE__) && defined(__MACH__) && !defined(__DARWIN__)
#define __DARWIN__ 1
@@ -127,7 +128,6 @@ static ErtsSysReportExit *report_exit_list;
static ErtsSysReportExit *report_exit_transit_list;
#endif
-extern int check_async_ready(void);
extern int driver_interrupt(int, int);
extern void do_break(void);
@@ -263,6 +263,7 @@ int erts_use_kernel_poll = 0;
struct {
int (*select)(ErlDrvPort, ErlDrvEvent, int, int);
int (*event)(ErlDrvPort, ErlDrvEvent, ErlDrvEventData);
+ void (*check_io_as_interrupt)(void);
void (*check_io_interrupt)(int);
void (*check_io_interrupt_tmd)(int, long);
void (*check_io)(int);
@@ -302,6 +303,9 @@ init_check_io(void)
if (erts_use_kernel_poll) {
io_func.select = driver_select_kp;
io_func.event = driver_event_kp;
+#ifdef ERTS_POLL_NEED_ASYNC_INTERRUPT_SUPPORT
+ io_func.check_io_as_interrupt = erts_check_io_async_sig_interrupt_kp;
+#endif
io_func.check_io_interrupt = erts_check_io_interrupt_kp;
io_func.check_io_interrupt_tmd = erts_check_io_interrupt_timed_kp;
io_func.check_io = erts_check_io_kp;
@@ -314,6 +318,9 @@ init_check_io(void)
else {
io_func.select = driver_select_nkp;
io_func.event = driver_event_nkp;
+#ifdef ERTS_POLL_NEED_ASYNC_INTERRUPT_SUPPORT
+ io_func.check_io_as_interrupt = erts_check_io_async_sig_interrupt_nkp;
+#endif
io_func.check_io_interrupt = erts_check_io_interrupt_nkp;
io_func.check_io_interrupt_tmd = erts_check_io_interrupt_timed_nkp;
io_func.check_io = erts_check_io_nkp;
@@ -325,6 +332,11 @@ init_check_io(void)
}
}
+#ifdef ERTS_POLL_NEED_ASYNC_INTERRUPT_SUPPORT
+#define ERTS_CHK_IO_AS_INTR() (*io_func.check_io_as_interrupt)()
+#else
+#define ERTS_CHK_IO_AS_INTR() (*io_func.check_io_interrupt)(1)
+#endif
#define ERTS_CHK_IO_INTR (*io_func.check_io_interrupt)
#define ERTS_CHK_IO_INTR_TMD (*io_func.check_io_interrupt_tmd)
#define ERTS_CHK_IO (*io_func.check_io)
@@ -339,6 +351,11 @@ init_check_io(void)
max_files = erts_check_io_max_files();
}
+#ifdef ERTS_POLL_NEED_ASYNC_INTERRUPT_SUPPORT
+#define ERTS_CHK_IO_AS_INTR() erts_check_io_async_sig_interrupt()
+#else
+#define ERTS_CHK_IO_AS_INTR() erts_check_io_interrupt(1)
+#endif
#define ERTS_CHK_IO_INTR erts_check_io_interrupt
#define ERTS_CHK_IO_INTR_TMD erts_check_io_interrupt_timed
#define ERTS_CHK_IO erts_check_io
@@ -346,13 +363,13 @@ init_check_io(void)
#endif
-#ifdef ERTS_SMP
void
erts_sys_schedule_interrupt(int set)
{
ERTS_CHK_IO_INTR(set);
}
+#ifdef ERTS_SMP
void
erts_sys_schedule_interrupt_timed(int set, long msec)
{
@@ -527,7 +544,6 @@ erts_sys_pre_init(void)
void
erl_sys_init(void)
{
- erts_smp_rwmtx_init(&environ_rwmtx, "environ");
#if !DISABLE_VFORK
{
int res;
@@ -732,7 +748,7 @@ break_requested(void)
erl_exit(ERTS_INTR_EXIT, "");
ERTS_SET_BREAK_REQUESTED;
- ERTS_CHK_IO_INTR(1); /* Make sure we don't sleep in poll */
+ ERTS_CHK_IO_AS_INTR(); /* Make sure we don't sleep in poll */
}
/* set up signal handlers for break and quit */
@@ -932,18 +948,13 @@ void
os_flavor(char* namebuf, /* Where to return the name. */
unsigned size) /* Size of name buffer. */
{
- static int called = 0;
- static struct utsname uts; /* Information about the system. */
-
- if (!called) {
- char* s;
+ struct utsname uts; /* Information about the system. */
+ char* s;
- (void) uname(&uts);
- called = 1;
- for (s = uts.sysname; *s; s++) {
- if (isupper((int) *s)) {
- *s = tolower((int) *s);
- }
+ (void) uname(&uts);
+ for (s = uts.sysname; *s; s++) {
+ if (isupper((int) *s)) {
+ *s = tolower((int) *s);
}
}
strcpy(namebuf, uts.sysname);
@@ -1108,31 +1119,6 @@ struct erl_drv_entry vanilla_driver_entry = {
stop_select
};
-#if defined(USE_THREADS) && !defined(ERTS_SMP)
-static int async_drv_init(void);
-static ErlDrvData async_drv_start(ErlDrvPort, char*, SysDriverOpts*);
-static void async_drv_stop(ErlDrvData);
-static void async_drv_input(ErlDrvData, ErlDrvEvent);
-
-/* INTERNAL use only */
-
-struct erl_drv_entry async_driver_entry = {
- async_drv_init,
- async_drv_start,
- async_drv_stop,
- NULL,
- async_drv_input,
- NULL,
- "async",
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
- NULL
-};
-#endif
-
/* Handle SIGCHLD signals. */
#if (defined(SIG_SIGSET) || defined(SIG_SIGNAL))
static RETSIGTYPE onchld(void)
@@ -1146,7 +1132,7 @@ static RETSIGTYPE onchld(int signum)
smp_sig_notify('C');
#else
children_died = 1;
- ERTS_CHK_IO_INTR(1); /* Make sure we don't sleep in poll */
+ ERTS_CHK_IO_AS_INTR(); /* Make sure we don't sleep in poll */
#endif
}
@@ -2317,87 +2303,6 @@ static void stop_select(ErlDrvEvent fd, void* _)
close((int)fd);
}
-/*
-** Async opertation support
-*/
-#if defined(USE_THREADS) && !defined(ERTS_SMP)
-static void
-sys_async_ready_failed(int fd, int r, int err)
-{
- char buf[120];
- sprintf(buf, "sys_async_ready(): Fatal error: fd=%d, r=%d, errno=%d\n",
- fd, r, err);
- erts_silence_warn_unused_result(write(2, buf, strlen(buf)));
- abort();
-}
-
-/* called from threads !! */
-void sys_async_ready(int fd)
-{
- int r;
- while (1) {
- r = write(fd, "0", 1); /* signal main thread fd MUST be async_fd[1] */
- if (r == 1) {
- DEBUGF(("sys_async_ready(): r = 1\r\n"));
- break;
- }
- if (r < 0 && errno == EINTR) {
- DEBUGF(("sys_async_ready(): r = %d\r\n", r));
- continue;
- }
- sys_async_ready_failed(fd, r, errno);
- }
-}
-
-static int async_drv_init(void)
-{
- async_fd[0] = -1;
- async_fd[1] = -1;
- return 0;
-}
-
-static ErlDrvData async_drv_start(ErlDrvPort port_num,
- char* name, SysDriverOpts* opts)
-{
- if (async_fd[0] != -1)
- return ERL_DRV_ERROR_GENERAL;
- if (pipe(async_fd) < 0)
- return ERL_DRV_ERROR_GENERAL;
-
- DEBUGF(("async_drv_start: %d\r\n", port_num));
-
- SET_NONBLOCKING(async_fd[0]);
- driver_select(port_num, async_fd[0], ERL_DRV_READ, 1);
-
- if (init_async(async_fd[1]) < 0)
- return ERL_DRV_ERROR_GENERAL;
- return (ErlDrvData)port_num;
-}
-
-static void async_drv_stop(ErlDrvData e)
-{
- int port_num = (int)(long)e;
-
- DEBUGF(("async_drv_stop: %d\r\n", port_num));
-
- exit_async();
-
- driver_select(port_num, async_fd[0], ERL_DRV_READ, 0);
-
- close(async_fd[0]);
- close(async_fd[1]);
- async_fd[0] = async_fd[1] = -1;
-}
-
-
-static void async_drv_input(ErlDrvData e, ErlDrvEvent fd)
-{
- char *buf[32];
- DEBUGF(("async_drv_input\r\n"));
- while (read((int) fd, (void *) buf, 32) > 0); /* fd MUST be async_fd[0] */
- check_async_ready(); /* invoke all async_ready */
-}
-#endif
void erts_do_break_handling(void)
{
@@ -2409,11 +2314,7 @@ void erts_do_break_handling(void)
* therefore, make sure that all threads but this one are blocked before
* proceeding!
*/
- erts_smp_block_system(0);
- /*
- * NOTE: since we allow gc we are not allowed to lock
- * (any) process main locks while blocking system...
- */
+ erts_smp_thr_progress_block();
/* during break we revert to initial settings */
/* this is done differently for oldshell */
@@ -2441,7 +2342,7 @@ void erts_do_break_handling(void)
tcsetattr(0,TCSANOW,&temp_mode);
}
- erts_smp_release_system();
+ erts_smp_thr_progress_unblock();
}
/* Fills in the systems representation of the jam/beam process identifier.
@@ -2475,12 +2376,10 @@ erts_sys_putenv(char *buffer, int sep_ix)
}
int
-erts_sys_getenv(char *key, char *value, size_t *size)
+erts_sys_getenv__(char *key, char *value, size_t *size)
{
- char *orig_value;
int res;
- erts_smp_rwmtx_rlock(&environ_rwmtx);
- orig_value = getenv(key);
+ char *orig_value = getenv(key);
if (!orig_value)
res = -1;
else {
@@ -2495,6 +2394,15 @@ erts_sys_getenv(char *key, char *value, size_t *size)
res = 0;
}
}
+ return res;
+}
+
+int
+erts_sys_getenv(char *key, char *value, size_t *size)
+{
+ int res;
+ erts_smp_rwmtx_rlock(&environ_rwmtx);
+ res = erts_sys_getenv__(key, value, size);
erts_smp_rwmtx_runlock(&environ_rwmtx);
return res;
}
@@ -2506,31 +2414,6 @@ sys_init_io(void)
erts_alloc(ERTS_ALC_T_FD_TAB, max_files * sizeof(struct fd_data));
erts_smp_atomic_add_nob(&sys_misc_mem_sz,
max_files * sizeof(struct fd_data));
-
-#ifdef USE_THREADS
-#ifdef ERTS_SMP
- if (init_async(-1) < 0)
- erl_exit(1, "Failed to initialize async-threads\n");
-#else
- {
- /* This is speical stuff, starting a driver from the
- * system routines, but is a nice way of handling stuff
- * the erlang way
- */
- SysDriverOpts dopts;
- int ret;
-
- sys_memset((void*)&dopts, 0, sizeof(SysDriverOpts));
- add_driver_entry(&async_driver_entry);
- ret = erts_open_driver(NULL, NIL, "async", &dopts, NULL);
- DEBUGF(("open_driver = %d\n", ret));
- if (ret < 0)
- erl_exit(1, "Failed to open async driver\n");
- erts_port[ret].status |= ERTS_PORT_SFLG_IMMORTAL;
- }
-#endif
-#endif
-
}
#if (0) /* unused? */
@@ -2757,15 +2640,7 @@ initiate_report_exit_status(ErtsSysReportExit *rep, int status)
rep->next = report_exit_transit_list;
rep->status = status;
report_exit_transit_list = rep;
- /*
- * We need the scheduler thread to call check_children().
- * If the scheduler thread is sleeping in a poll with a
- * timeout, we need to wake the scheduler thread. We use the
- * functionality of the async driver to do this, instead of
- * implementing yet another driver doing the same thing. A
- * little bit ugly, but it works...
- */
- sys_async_ready(async_fd[1]);
+ erts_sys_schedule_interrupt(1);
}
static int check_children(void)
@@ -2852,20 +2727,11 @@ erl_sys_schedule(int runnable)
{
#ifdef ERTS_SMP
ERTS_CHK_IO(!runnable);
- ERTS_SMP_LC_ASSERT(!ERTS_LC_IS_BLOCKING);
#else
- ERTS_CHK_IO_INTR(0);
- if (runnable) {
- ERTS_CHK_IO(0); /* Poll for I/O */
- check_async_ready(); /* Check async completions */
- } else {
- int wait_for_io = !check_async_ready();
- if (wait_for_io)
- wait_for_io = !check_children();
- ERTS_CHK_IO(wait_for_io);
- }
- (void) check_children();
+ ERTS_CHK_IO(runnable ? 0 : !check_children());
#endif
+ ERTS_SMP_LC_ASSERT(!erts_thr_progress_is_blocking());
+ (void) check_children();
}
@@ -2893,8 +2759,8 @@ smp_sig_notify(char c)
static void *
signal_dispatcher_thread_func(void *unused)
{
- int initialized = 0;
#if !CHLDWTHR
+ int initialized = 0;
int notify_check_children = 0;
#endif
#ifdef ERTS_ENABLE_LOCK_CHECK
@@ -2922,20 +2788,20 @@ signal_dispatcher_thread_func(void *unused)
* to other threads.
*
* NOTE 2: The signal dispatcher thread is not a blockable
- * thread (i.e., it hasn't called
- * erts_register_blockable_thread()). This is
- * intentional. We want to be able to interrupt
- * writing of a crash dump by hitting C-c twice.
- * Since it isn't a blockable thread it is important
- * that it doesn't change the state of any data that
- * a blocking thread expects to have exclusive access
- * to (unless the signal dispatcher itself explicitly
- * is blocking all blockable threads).
+ * thread (i.e., not a thread managed by the
+ * erl_thr_progress module). This is intentional.
+ * We want to be able to interrupt writing of a crash
+ * dump by hitting C-c twice. Since it isn't a
+ * blockable thread it is important that it doesn't
+ * change the state of any data that a blocking thread
+ * expects to have exclusive access to (unless the
+ * signal dispatcher itself explicitly is blocking all
+ * blockable threads).
*/
switch (buf[i]) {
case 0: /* Emulator initialized */
- initialized = 1;
#if !CHLDWTHR
+ initialized = 1;
if (!notify_check_children)
#endif
break;
@@ -2970,7 +2836,7 @@ signal_dispatcher_thread_func(void *unused)
buf[i]);
}
}
- ERTS_SMP_LC_ASSERT(!ERTS_LC_IS_BLOCKING);
+ ERTS_SMP_LC_ASSERT(!erts_thr_progress_is_blocking());
}
return NULL;
}
@@ -3090,6 +2956,8 @@ erl_sys_args(int* argc, char** argv)
{
int i, j;
+ erts_smp_rwmtx_init(&environ_rwmtx, "environ");
+
i = 1;
ASSERT(argc && argv);
@@ -3151,4 +3019,5 @@ erl_sys_args(int* argc, char** argv)
argv[j++] = argv[i];
}
*argc = j;
+
}
diff --git a/erts/emulator/sys/vxworks/sys.c b/erts/emulator/sys/vxworks/sys.c
index c6e7b65f32..d6d1fe64e0 100644
--- a/erts/emulator/sys/vxworks/sys.c
+++ b/erts/emulator/sys/vxworks/sys.c
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 1997-2010. All Rights Reserved.
+ * Copyright Ericsson AB 1997-2011. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
@@ -238,6 +238,12 @@ erl_sys_args(int* argc, char** argv)
ASSERT(max_files <= erts_vxworks_max_files);
}
+void
+erts_sys_schedule_interrupt(int set)
+{
+ erts_check_io_interrupt(set);
+}
+
/*
* Called from schedule() when it runs out of runnable processes,
* or when Erlang code has performed INPUT_REDUCTIONS reduction
@@ -246,7 +252,6 @@ erl_sys_args(int* argc, char** argv)
void
erl_sys_schedule(int runnable)
{
- erts_check_io_interrupt(0);
erts_check_io(!runnable);
}
@@ -309,7 +314,7 @@ static void request_break(void)
fprintf(stderr,"break!\n");
#endif
erts_break_requested = 1;
- erts_check_io_interrupt(1); /* Make sure we don't sleep in erts_poll_wait */
+ erts_check_io_async_sig_interrupt(1); /* Make sure we don't sleep in erts_poll_wait */
}
static void do_quit(void)
@@ -1515,6 +1520,12 @@ erts_sys_getenv(char *key, char *value, size_t *size)
return res;
}
+int
+erts_sys_getenv__(char *key, char *value, size_t *size)
+{
+ return erts_sys_getenv(key, value, size);
+}
+
void
sys_init_io(void)
{
@@ -2025,9 +2036,6 @@ int erl_memory_show(int p0, int p1, int p2, int p3, int p4, int p5,
erts_printf("The memory block used by elib is save_malloc'ed "
"at 0x%08x.\n", (unsigned int) alloc_pool_ptr);
}
-#ifdef NO_FIX_ALLOC
- erts_printf("Fix_alloc is disabled in this build\n");
-#endif
erts_printf("Statistics from elib_malloc:\n");
ELIB_LOCK;
diff --git a/erts/emulator/sys/win32/erl_poll.c b/erts/emulator/sys/win32/erl_poll.c
index 735c420d8e..ab4ef05118 100644
--- a/erts/emulator/sys/win32/erl_poll.c
+++ b/erts/emulator/sys/win32/erl_poll.c
@@ -1159,7 +1159,13 @@ int erts_poll_wait(ErtsPollSet ps,
HARDDEBUGF(("Start waiting %d [%d]",num_h, (int) timeout));
ERTS_POLLSET_UNLOCK(ps);
+#ifdef ERTS_SMP
+ erts_thr_progress_prepare_wait(NULL);
+#endif
WaitForMultipleObjects(num_h, harr, FALSE, timeout);
+#ifdef ERTS_SMP
+ erts_thr_progress_finalize_wait(NULL);
+#endif
ERTS_POLLSET_LOCK(ps);
HARDDEBUGF(("Stop waiting %d [%d]",num_h, (int) timeout));
woke_up(ps);
diff --git a/erts/emulator/sys/win32/sys.c b/erts/emulator/sys/win32/sys.c
index ce1d376a54..6f33ef7ad6 100644
--- a/erts/emulator/sys/win32/sys.c
+++ b/erts/emulator/sys/win32/sys.c
@@ -216,6 +216,9 @@ void sys_tty_reset(int exit_code)
void erl_sys_args(int* argc, char** argv)
{
char *event_name;
+
+ erts_sys_env_init();
+
nohup = get_and_remove_option(argc, argv, "-nohup");
#ifdef DEBUG
@@ -566,51 +569,6 @@ struct erl_drv_entry vanilla_driver_entry = {
stop_select
};
-#if defined(USE_THREADS) && !defined(ERTS_SMP)
-
-static int async_drv_init(void);
-static ErlDrvData async_drv_start(ErlDrvPort, char*, SysDriverOpts*);
-static void async_drv_stop(ErlDrvData);
-static void async_drv_input(ErlDrvData, ErlDrvEvent);
-
-/* INTERNAL use only */
-
-void null_output(ErlDrvData drv_data, char* buf, int len)
-{
-}
-
-void null_ready_output(ErlDrvData drv_data, ErlDrvEvent event)
-{
-}
-
-struct erl_drv_entry async_driver_entry = {
- async_drv_init,
- async_drv_start,
- async_drv_stop,
- null_output,
- async_drv_input,
- null_ready_output,
- "async",
- NULL, /* finish */
- NULL, /* handle */
- NULL, /* control */
- NULL, /* timeout */
- NULL, /* outputv */
- NULL, /* ready_async */
- NULL, /* flush */
- NULL, /* call */
- NULL, /* event */
- ERL_DRV_EXTENDED_MARKER,
- ERL_DRV_EXTENDED_MAJOR_VERSION,
- ERL_DRV_EXTENDED_MINOR_VERSION,
- 0, /* ERL_DRV_FLAGs */
- NULL,
- NULL, /* process_exit */
- stop_select
-};
-
-#endif
-
/*
* Initialises a DriverData structure.
*
@@ -2772,7 +2730,7 @@ ready_output(ErlDrvData drv_data, ErlDrvEvent ready_event)
DEBUGF(("ready_output(%d, 0x%x)\n", drv_data, ready_event));
set_busy_port(dp->port_num, 0);
if (!(dp->outbuf)) {
- /* Happens because event sometimes get signalled during a succesful
+ /* Happens because event sometimes get signalled during a successful
write... */
return;
}
@@ -2825,30 +2783,6 @@ sys_init_io(void)
We estimate the number to twice the amount of ports.
We really dont know on windows, do we? */
max_files = 2*erts_max_ports;
-
-#ifdef USE_THREADS
-#ifdef ERTS_SMP
- if (init_async(-1) < 0)
- erl_exit(1, "Failed to initialize async-threads\n");
-#else
- {
- /* This is special stuff, starting a driver from the
- * system routines, but is a nice way of handling stuff
- * the erlang way
- */
- SysDriverOpts dopts;
- int ret;
-
- sys_memset((void*)&dopts, 0, sizeof(SysDriverOpts));
- add_driver_entry(&async_driver_entry);
- ret = erts_open_driver(NULL, NIL, "async", &dopts, NULL);
- DEBUGF(("open_driver = %d\n", ret));
- if (ret < 0)
- erl_exit(1, "Failed to open async driver\n");
- erts_port[ret].status |= ERTS_PORT_SFLG_IMMORTAL;
- }
-#endif
-#endif
}
#ifdef ERTS_SMP
@@ -3298,8 +3232,6 @@ void erl_sys_init(void)
{
HANDLE handle;
- erts_sys_env_init();
-
noinherit_std_handle(STD_OUTPUT_HANDLE);
noinherit_std_handle(STD_INPUT_HANDLE);
noinherit_std_handle(STD_ERROR_HANDLE);
@@ -3361,13 +3293,13 @@ void erl_sys_init(void)
SEM_FAILCRITICALERRORS | SEM_NOOPENFILEERRORBOX);
}
-#ifdef ERTS_SMP
void
erts_sys_schedule_interrupt(int set)
{
erts_check_io_interrupt(set);
}
+#ifdef ERTS_SMP
void
erts_sys_schedule_interrupt_timed(int set, long msec)
{
@@ -3383,76 +3315,7 @@ erts_sys_schedule_interrupt_timed(int set, long msec)
void
erl_sys_schedule(int runnable)
{
-#ifdef ERTS_SMP
erts_check_io(!runnable);
- ERTS_SMP_LC_ASSERT(!ERTS_LC_IS_BLOCKING);
-#else
- erts_check_io_interrupt(0);
- if (runnable) {
- erts_check_io(0); /* Poll for I/O */
- check_async_ready(); /* Check async completions */
- } else {
- erts_check_io(check_async_ready() ? 0 : 1);
- }
-#endif
-}
-
-#if defined(USE_THREADS) && !defined(ERTS_SMP)
-/*
- * Async operation support.
- */
-
-static ErlDrvEvent async_drv_event;
-
-void
-sys_async_ready(int fd)
-{
- SetEvent((HANDLE)async_drv_event);
-}
-
-static int
-async_drv_init(void)
-{
- async_drv_event = (ErlDrvEvent) NULL;
- return 0;
-}
-
-static ErlDrvData
-async_drv_start(ErlDrvPort port_num, char* name, SysDriverOpts* opts)
-{
- if (async_drv_event != (ErlDrvEvent) NULL) {
- return ERL_DRV_ERROR_GENERAL;
- }
- if ((async_drv_event = (ErlDrvEvent)CreateAutoEvent(FALSE)) == (ErlDrvEvent) NULL) {
- return ERL_DRV_ERROR_GENERAL;
- }
-
- driver_select(port_num, async_drv_event, ERL_DRV_READ|ERL_DRV_USE, 1);
- if (init_async(async_drv_event) < 0) {
- return ERL_DRV_ERROR_GENERAL;
- }
- return (ErlDrvData)port_num;
-}
-
-static void
-async_drv_stop(ErlDrvData port_num)
-{
- exit_async();
- driver_select((ErlDrvPort)port_num, async_drv_event, ERL_DRV_READ|ERL_DRV_USE, 0);
- /*CloseHandle((HANDLE)async_drv_event);*/
- async_drv_event = (ErlDrvEvent) NULL;
+ ERTS_SMP_LC_ASSERT(!erts_thr_progress_is_blocking());
}
-
-static void
-async_drv_input(ErlDrvData port_num, ErlDrvEvent e)
-{
- check_async_ready();
-
- /*
- * Our event is auto-resetting.
- */
-}
-
-#endif
-
diff --git a/erts/emulator/sys/win32/sys_env.c b/erts/emulator/sys/win32/sys_env.c
index 02c8433a10..7acc7f07ee 100644
--- a/erts/emulator/sys/win32/sys_env.c
+++ b/erts/emulator/sys/win32/sys_env.c
@@ -55,19 +55,17 @@ erts_sys_putenv(char *key_value, int sep_ix)
}
int
-erts_sys_getenv(char *key, char *value, size_t *size)
+erts_sys_getenv__(char *key, char *value, size_t *size)
{
size_t req_size = 0;
int res = 0;
DWORD new_size;
- erts_smp_rwmtx_rlock(&environ_rwmtx);
SetLastError(0);
new_size = GetEnvironmentVariable((LPCTSTR) key,
(LPTSTR) value,
(DWORD) *size);
res = !new_size && GetLastError() == ERROR_ENVVAR_NOT_FOUND ? -1 : 0;
- erts_smp_rwmtx_runlock(&environ_rwmtx);
if (res < 0)
return res;
res = new_size > *size ? 1 : 0;
@@ -75,6 +73,16 @@ erts_sys_getenv(char *key, char *value, size_t *size)
return res;
}
+int
+erts_sys_getenv(char *key, char *value, size_t *size)
+{
+ int res;
+ erts_smp_rwmtx_rlock(&environ_rwmtx);
+ res = erts_sys_getenv__(key, value, size);
+ erts_smp_rwmtx_runlock(&environ_rwmtx);
+ return res;
+}
+
struct win32_getenv_state {
char *env;
char *next;
diff --git a/erts/emulator/sys/win32/sys_interrupt.c b/erts/emulator/sys/win32/sys_interrupt.c
index 1d73edd30b..93aaa23f97 100644
--- a/erts/emulator/sys/win32/sys_interrupt.c
+++ b/erts/emulator/sys/win32/sys_interrupt.c
@@ -21,6 +21,7 @@
*/
#include "sys.h"
#include "erl_alloc.h"
+#include "erl_thr_progress.h"
#include "erl_driver.h"
#include "../../drivers/win32/win_con.h"
@@ -52,14 +53,14 @@ void erts_do_break_handling(void)
* therefore, make sure that all threads but this one are blocked before
* proceeding!
*/
- erts_smp_block_system(0);
+ erts_smp_thr_progress_block();
/* call the break handling function, reset the flag */
do_break();
ResetEvent(erts_sys_break_event);
ERTS_UNSET_BREAK_REQUESTED;
- erts_smp_release_system();
+ erts_smp_thr_progress_unblock();
}
diff --git a/erts/emulator/test/binary_SUITE.erl b/erts/emulator/test/binary_SUITE.erl
index 459dc84565..d9fc876482 100644
--- a/erts/emulator/test/binary_SUITE.erl
+++ b/erts/emulator/test/binary_SUITE.erl
@@ -441,6 +441,11 @@ terms(Config) when is_list(Config) ->
Sz when is_integer(Sz), size(Bin) =< Sz ->
ok
end,
+ Bin1 = term_to_binary(Term, [{minor_version, 1}]),
+ case erlang:external_size(Bin1, [{minor_version, 1}]) of
+ Sz1 when is_integer(Sz1), size(Bin1) =< Sz1 ->
+ ok
+ end,
Term = binary_to_term(Bin),
Term = binary_to_term(Bin, [safe]),
Unaligned = make_unaligned_sub_binary(Bin),
@@ -473,7 +478,12 @@ terms_float(Config) when is_list(Config) ->
Term = binary_to_term(Bin0),
Bin1 = term_to_binary(Term, [{minor_version,1}]),
Term = binary_to_term(Bin1),
- true = size(Bin1) < size(Bin0)
+ true = size(Bin1) < size(Bin0),
+ Size0 = erlang:external_size(Term),
+ Size00 = erlang:external_size(Term, [{minor_version, 0}]),
+ Size1 = erlang:external_size(Term, [{minor_version, 1}]),
+ true = (Size0 =:= Size00),
+ true = Size1 < Size0
end).
external_size(Config) when is_list(Config) ->
@@ -489,7 +499,9 @@ external_size(Config) when is_list(Config) ->
io:format(" Aligned size: ~p\n", [Sz1]),
io:format("Unaligned size: ~p\n", [Sz2]),
?line ?t:fail()
- end.
+ end,
+ ?line erlang:external_size(Bin) =:= erlang:external_size(Bin, [{minor_version, 1}]),
+ ?line erlang:external_size(Unaligned) =:= erlang:external_size(Unaligned, [{minor_version, 1}]).
external_size_1(Term, Size0, Limit) when Size0 < Limit ->
case erlang:external_size(Term) of
diff --git a/erts/emulator/test/bs_construct_SUITE.erl b/erts/emulator/test/bs_construct_SUITE.erl
index 1959803385..7fdf36711b 100644
--- a/erts/emulator/test/bs_construct_SUITE.erl
+++ b/erts/emulator/test/bs_construct_SUITE.erl
@@ -553,6 +553,11 @@ huge_float_check({'EXIT',{badarg,_}}) -> ok.
huge_binary(Config) when is_list(Config) ->
?line 16777216 = size(<<0:(id(1 bsl 26)),(-1):(id(1 bsl 26))>>),
+ ?line garbage_collect(),
+ ?line id(<<0:((1 bsl 32)-1)>>),
+ ?line garbage_collect(),
+ ?line id(<<0:(id((1 bsl 32)-1))>>),
+ ?line garbage_collect(),
ok.
system_limit(Config) when is_list(Config) ->
@@ -565,6 +570,10 @@ system_limit(Config) when is_list(Config) ->
?line {'EXIT',{system_limit,_}} =
(catch <<(id(<<>>))/binary,0:(id(1 bsl 100))>>),
+ %% Would fail to load.
+ ?line {'EXIT',{system_limit,_}} = (catch <<0:(1 bsl 67)>>),
+ ?line {'EXIT',{system_limit,_}} = (catch <<0:((1 bsl 64)+1)>>),
+
case WordSize of
4 ->
system_limit_32();
@@ -581,6 +590,14 @@ system_limit_32() ->
?line {'EXIT',{system_limit,_}} = (catch <<0:(id(8)),42:536870912/unit:8>>),
?line {'EXIT',{system_limit,_}} =
(catch <<0:(id(8)),42:(id(536870912))/unit:8>>),
+
+ %% The size would be silently truncated, resulting in a crash.
+ ?line {'EXIT',{system_limit,_}} = (catch <<0:(1 bsl 35)>>),
+ ?line {'EXIT',{system_limit,_}} = (catch <<0:((1 bsl 32)+1)>>),
+
+ %% Would fail to load.
+ ?line {'EXIT',{system_limit,_}} = (catch <<0:(1 bsl 43)>>),
+ ?line {'EXIT',{system_limit,_}} = (catch <<0:((1 bsl 40)+1)>>),
ok.
badarg(Config) when is_list(Config) ->
diff --git a/erts/emulator/test/bs_match_misc_SUITE.erl b/erts/emulator/test/bs_match_misc_SUITE.erl
index b022f96740..15427661f3 100644
--- a/erts/emulator/test/bs_match_misc_SUITE.erl
+++ b/erts/emulator/test/bs_match_misc_SUITE.erl
@@ -23,7 +23,7 @@
bound_var/1,bound_tail/1,t_float/1,little_float/1,sean/1,
kenneth/1,encode_binary/1,native/1,happi/1,
size_var/1,wiger/1,x0_context/1,huge_float_field/1,
- writable_binary_matched/1,otp_7198/1]).
+ writable_binary_matched/1,otp_7198/1,unordered_bindings/1]).
-include_lib("test_server/include/test_server.hrl").
@@ -33,7 +33,7 @@ all() ->
[bound_var, bound_tail, t_float, little_float, sean,
kenneth, encode_binary, native, happi, size_var, wiger,
x0_context, huge_float_field, writable_binary_matched,
- otp_7198].
+ otp_7198, unordered_bindings].
groups() ->
[].
@@ -553,5 +553,15 @@ otp_7198_scan(<<C, Rest/binary>>, TokAcc) when
otp_7198_scan(Rest, [{'KEYWORD', C} | TokAcc])
end.
+unordered_bindings(Config) when is_list(Config) ->
+ {<<1,2,3,4>>,<<42,42>>,<<3,3,3>>} =
+ unordered_bindings(4, 2, 3, <<1,2,3,4, 42,42, 3,3,3, 3>>),
+ ok.
+
+unordered_bindings(CompressedLength, HashSize, PadLength, T) ->
+ <<Content:CompressedLength/binary,Mac:HashSize/binary,
+ Padding:PadLength/binary,PadLength>> = T,
+ {Content,Mac,Padding}.
+
id(I) -> I.
diff --git a/erts/emulator/test/bs_utf_SUITE.erl b/erts/emulator/test/bs_utf_SUITE.erl
index 72c656c400..4ab7d674a6 100644
--- a/erts/emulator/test/bs_utf_SUITE.erl
+++ b/erts/emulator/test/bs_utf_SUITE.erl
@@ -64,8 +64,7 @@ end_per_group(_GroupName, Config) ->
utf8_roundtrip(Config) when is_list(Config) ->
?line utf8_roundtrip(0, 16#D7FF),
- ?line utf8_roundtrip(16#E000, 16#FFFD),
- ?line utf8_roundtrip(16#10000, 16#10FFFF),
+ ?line utf8_roundtrip(16#E000, 16#10FFFF),
ok.
utf8_roundtrip(First, Last) when First =< Last ->
@@ -91,8 +90,7 @@ utf16_roundtrip(Config) when is_list(Config) ->
do_utf16_roundtrip(Fun) ->
do_utf16_roundtrip(0, 16#D7FF, Fun),
- do_utf16_roundtrip(16#E000, 16#FFFD, Fun),
- do_utf16_roundtrip(16#10000, 16#10FFFF, Fun).
+ do_utf16_roundtrip(16#E000, 16#10FFFF, Fun).
do_utf16_roundtrip(First, Last, Fun) when First =< Last ->
Fun(First),
@@ -129,8 +127,7 @@ utf32_roundtrip(Config) when is_list(Config) ->
do_utf32_roundtrip(Fun) ->
do_utf32_roundtrip(0, 16#D7FF, Fun),
- do_utf32_roundtrip(16#E000, 16#FFFD, Fun),
- do_utf32_roundtrip(16#10000, 16#10FFFF, Fun).
+ do_utf32_roundtrip(16#E000, 16#10FFFF, Fun).
do_utf32_roundtrip(First, Last, Fun) when First =< Last ->
Fun(First),
@@ -158,7 +155,6 @@ utf32_little_roundtrip(Char) ->
utf8_illegal_sequences(Config) when is_list(Config) ->
?line fail_range(16#10FFFF+1, 16#10FFFF+512), %Too large.
?line fail_range(16#D800, 16#DFFF), %Reserved for UTF-16.
- ?line fail_range(16#FFFE, 16#FFFF), %Non-characters.
%% Illegal first character.
?line [fail(<<I,16#8F,16#8F,16#8F>>) || I <- lists:seq(16#80, 16#BF)],
@@ -251,7 +247,6 @@ fail_1(_) -> ok.
utf16_illegal_sequences(Config) when is_list(Config) ->
?line utf16_fail_range(16#10FFFF+1, 16#10FFFF+512), %Too large.
?line utf16_fail_range(16#D800, 16#DFFF), %Reserved for UTF-16.
- ?line utf16_fail_range(16#FFFE, 16#FFFF), %Non-characters.
?line lonely_hi_surrogate(16#D800, 16#DFFF),
?line leading_lo_surrogate(16#DC00, 16#DFFF),
@@ -300,7 +295,6 @@ leading_lo_surrogate(_, _, _) -> ok.
utf32_illegal_sequences(Config) when is_list(Config) ->
?line utf32_fail_range(16#10FFFF+1, 16#10FFFF+512), %Too large.
?line utf32_fail_range(16#D800, 16#DFFF), %Reserved for UTF-16.
- ?line utf32_fail_range(16#FFFE, 16#FFFF), %Non-characters.
?line utf32_fail_range(-100, -1),
ok.
diff --git a/erts/emulator/test/busy_port_SUITE.erl b/erts/emulator/test/busy_port_SUITE.erl
index 8365e1c540..3a29fd4d68 100644
--- a/erts/emulator/test/busy_port_SUITE.erl
+++ b/erts/emulator/test/busy_port_SUITE.erl
@@ -20,7 +20,7 @@
-module(busy_port_SUITE).
-export([all/0, suite/0,groups/0,init_per_suite/1, end_per_suite/1,
- init_per_group/2,end_per_group/2,
+ init_per_group/2,end_per_group/2,end_per_testcase/2,
io_to_busy/1, message_order/1, send_3/1,
system_monitor/1, no_trap_exit/1,
no_trap_exit_unlinked/1, trap_exit/1, multiple_writers/1,
@@ -53,6 +53,20 @@ init_per_group(_GroupName, Config) ->
end_per_group(_GroupName, Config) ->
Config.
+end_per_testcase(_Case, Config) when is_list(Config) ->
+ case whereis(busy_drv_server) of
+ undefined ->
+ ok;
+ Pid when is_pid(Pid) ->
+ Ref = monitor(process, Pid),
+ unlink(Pid),
+ exit(Pid, kill),
+ receive
+ {'DOWN',Ref,process,Pid,_} ->
+ ok
+ end
+ end,
+ Config.
%% Tests I/O operations to a busy port, to make sure a suspended send
%% operation is correctly restarted. This used to crash Beam.
@@ -495,12 +509,12 @@ hs_busy_pcmd(Prt, Opts, StartFun, EndFun) ->
P = spawn_link(fun () ->
erlang:yield(),
Tester ! {self(), doing_port_command},
- Start = os:timestamp(),
+ Start = now(),
Res = try {return,
port_command(Prt, [], Opts)}
catch Exception:Error -> {Exception, Error}
end,
- End = os:timestamp(),
+ End = now(),
Time = round(timer:now_diff(End, Start)/1000),
Tester ! {self(), port_command_result, Res, Time}
end),
diff --git a/erts/emulator/test/call_trace_SUITE.erl b/erts/emulator/test/call_trace_SUITE.erl
index 93fdc157f7..3e2bee06d1 100644
--- a/erts/emulator/test/call_trace_SUITE.erl
+++ b/erts/emulator/test/call_trace_SUITE.erl
@@ -934,6 +934,10 @@ exception_nocatch(Config) when is_list(Config) ->
exception_nocatch().
exception_nocatch() ->
+ Deep4LocThrow = get_deep_4_loc({throw,[42]}),
+ Deep4LocError = get_deep_4_loc({error,[42]}),
+ Deep4LocBadmatch = get_deep_4_loc({'=',[a,b]}),
+
Prog = [{'_',[],[{exception_trace}]}],
?line 1 = erlang:trace_pattern({?MODULE,deep_1,'_'}, Prog),
?line 1 = erlang:trace_pattern({?MODULE,deep_2,'_'}, Prog),
@@ -959,8 +963,9 @@ exception_nocatch() ->
{trace,t2,exception_from,{erlang,throw,1},
{error,{nocatch,Q2}}}],
exception_from, {error,{nocatch,Q2}}),
- ?line expect({trace,T2,exit,{{nocatch,Q2},[{erlang,throw,[Q2]},
- {?MODULE,deep_4,1}]}}),
+ ?line expect({trace,T2,exit,{{nocatch,Q2},[{erlang,throw,[Q2],[]},
+ {?MODULE,deep_4,1,
+ Deep4LocThrow}]}}),
?line Q3 = {dump,[dump,{dump}]},
?line T3 =
exception_nocatch(?LINE, error, [Q3], 4,
@@ -968,18 +973,29 @@ exception_nocatch() ->
{trace,t3,exception_from,{erlang,error,1},
{error,Q3}}],
exception_from, {error,Q3}),
- ?line expect({trace,T3,exit,{Q3,[{erlang,error,[Q3]},
- {?MODULE,deep_4,1}]}}),
+ ?line expect({trace,T3,exit,{Q3,[{erlang,error,[Q3],[]},
+ {?MODULE,deep_4,1,Deep4LocError}]}}),
?line T4 =
exception_nocatch(?LINE, '=', [17,4711], 5, [],
exception_from, {error,{badmatch,4711}}),
- ?line expect({trace,T4,exit,{{badmatch,4711},[{?MODULE,deep_4,1}]}}),
+ ?line expect({trace,T4,exit,{{badmatch,4711},
+ [{?MODULE,deep_4,1,Deep4LocBadmatch}]}}),
%%
?line erlang:trace_pattern({?MODULE,'_','_'}, false),
?line erlang:trace_pattern({erlang,'_','_'}, false),
?line expect(),
?line ok.
+get_deep_4_loc(Arg) ->
+ try
+ deep_4(Arg),
+ ?t:fail(should_not_return_to_here)
+ catch
+ _:_ ->
+ [{?MODULE,deep_4,1,Loc0}|_] = erlang:get_stacktrace(),
+ Loc0
+ end.
+
exception_nocatch(Line, B, Q, N, Extra, Tag, R) ->
?line io:format("== Subtest: ~w", [Line]),
?line Go = make_ref(),
diff --git a/erts/emulator/test/code_SUITE.erl b/erts/emulator/test/code_SUITE.erl
index a062cea117..2f9b01cc92 100644
--- a/erts/emulator/test/code_SUITE.erl
+++ b/erts/emulator/test/code_SUITE.erl
@@ -20,28 +20,34 @@
-module(code_SUITE).
-export([all/0, suite/0,groups/0,init_per_suite/1, end_per_suite/1,
init_per_group/2,end_per_group/2,
- new_binary_types/1,t_check_process_code/1,t_check_process_code_ets/1,
+ new_binary_types/1,
+ t_check_process_code/1,t_check_old_code/1,
+ t_check_process_code_ets/1,
external_fun/1,get_chunk/1,module_md5/1,make_stub/1,
- make_stub_many_funs/1,constant_pools/1,
- false_dependency/1,coverage/1]).
+ make_stub_many_funs/1,constant_pools/1,constant_refc_binaries/1,
+ false_dependency/1,coverage/1,fun_confusion/1]).
+-define(line_trace, 1).
-include_lib("test_server/include/test_server.hrl").
suite() -> [{ct_hooks,[ts_install_cth]}].
all() ->
[new_binary_types, t_check_process_code,
- t_check_process_code_ets, external_fun, get_chunk,
+ t_check_process_code_ets, t_check_old_code, external_fun, get_chunk,
module_md5, make_stub, make_stub_many_funs,
- constant_pools, false_dependency, coverage].
+ constant_pools, constant_refc_binaries, false_dependency,
+ coverage, fun_confusion].
groups() ->
[].
init_per_suite(Config) ->
+ erts_debug:set_internal_state(available_internal_state, true),
Config.
end_per_suite(_Config) ->
+ catch erts_debug:set_internal_state(available_internal_state, false),
ok.
init_per_group(_GroupName, Config) ->
@@ -248,9 +254,36 @@ fun_refc(F) ->
Count.
+%% Test the erlang:check_old_code/1 BIF.
+t_check_old_code(Config) when is_list(Config) ->
+ ?line Data = ?config(data_dir, Config),
+ ?line File = filename:join(Data, "my_code_test"),
+
+ ?line erlang:purge_module(my_code_test),
+ ?line erlang:delete_module(my_code_test),
+ ?line catch erlang:purge_module(my_code_test),
+
+ ?line false = erlang:check_old_code(my_code_test),
+
+ ?line {ok,my_code_test,Code} = compile:file(File, [binary]),
+ ?line {module,my_code_test} = code:load_binary(my_code_test, File, Code),
+
+ ?line false = erlang:check_old_code(my_code_test),
+ ?line {module,my_code_test} = code:load_binary(my_code_test, File, Code),
+ ?line true = erlang:check_old_code(my_code_test),
+
+ ?line true = erlang:purge_module(my_code_test),
+ ?line true = erlang:delete_module(my_code_test),
+ ?line true = erlang:purge_module(my_code_test),
+
+ ?line {'EXIT',_} = (catch erlang:check_old_code([])),
+
+ ok.
+
external_fun(Config) when is_list(Config) ->
?line false = erlang:function_exported(another_code_test, x, 1),
- ?line ExtFun = erlang:make_fun(id(another_code_test), x, 1),
+ AnotherCodeTest = id(another_code_test),
+ ExtFun = fun AnotherCodeTest:x/1,
?line {'EXIT',{undef,_}} = (catch ExtFun(answer)),
?line false = erlang:function_exported(another_code_test, x, 1),
?line false = lists:member(another_code_test, erlang:loaded()),
@@ -375,7 +408,7 @@ make_stub_many_funs(Config) when is_list(Config) ->
constant_pools(Config) when is_list(Config) ->
?line Data = ?config(data_dir, Config),
?line File = filename:join(Data, "literals"),
- ?line {ok,literals,Code} = compile:file(File, [report,binary,constant_pool]),
+ ?line {ok,literals,Code} = compile:file(File, [report,binary]),
?line {module,literals} = erlang:load_module(literals,
make_sub_binary(Code)),
@@ -446,6 +479,131 @@ create_old_heap() ->
create_old_heap()
end.
+constant_refc_binaries(Config) when is_list(Config) ->
+ wait_for_memory_deallocations(),
+ Bef = memory_binary(),
+ io:format("Binary data (bytes) before test: ~p\n", [Bef]),
+
+ %% Compile the the literals module.
+ Data = ?config(data_dir, Config),
+ File = filename:join(Data, "literals"),
+ {ok,literals,Code} = compile:file(File, [report,binary]),
+
+ %% Load the code and make sure that the binary is a refc binary.
+ {module,literals} = erlang:load_module(literals, Code),
+ Bin = literals:binary(),
+ Sz = byte_size(Bin),
+ Check = erlang:md5(Bin),
+ io:format("Size of literal refc binary: ~p\n", [Sz]),
+ {refc_binary,Sz,_,_} = erts_debug:get_internal_state({binary_info,Bin}),
+ true = erlang:delete_module(literals),
+ false = erlang:check_process_code(self(), literals),
+ true = erlang:purge_module(literals),
+
+ %% Now try to provoke a memory leak.
+ provoke_mem_leak(10, Code, Check),
+
+ %% Calculate the change in allocated binary data.
+ erlang:garbage_collect(),
+ wait_for_memory_deallocations(),
+ Aft = memory_binary(),
+ io:format("Binary data (bytes) after test: ~p", [Aft]),
+ Diff = Aft - Bef,
+ if
+ Diff < 0 ->
+ io:format("~p less bytes", [abs(Diff)]);
+ Diff > 0 ->
+ io:format("~p more bytes", [Diff]);
+ true ->
+ ok
+ end,
+
+ %% Test for leaks. We must accept some natural variations in
+ %% the size of allocated binaries.
+ if
+ Diff > 64*1024 ->
+ ?t:fail(binary_leak);
+ true ->
+ ok
+ end.
+
+memory_binary() ->
+ try
+ erlang:memory(binary)
+ catch
+ error:notsup ->
+ 0
+ end.
+
+provoke_mem_leak(0, _, _) -> ok;
+provoke_mem_leak(N, Code, Check) ->
+ {module,literals} = erlang:load_module(literals, Code),
+
+ %% Create several processes with references to the literal binary.
+ Self = self(),
+ Pids = [spawn_link(fun() ->
+ create_binaries(Self, NumRefs, Check)
+ end) || NumRefs <- lists:seq(1, 10)],
+ [receive {started,Pid} -> ok end || Pid <- Pids],
+
+ %% Make the code old and remove references to the constant pool
+ %% in all processes.
+ true = erlang:delete_module(literals),
+ Ms = [spawn_monitor(fun() ->
+ false = erlang:check_process_code(Pid, literals)
+ end) || Pid <- Pids],
+ [receive
+ {'DOWN',R,process,P,normal} ->
+ ok
+ end || {P,R} <- Ms],
+
+ %% Purge the code.
+ true = erlang:purge_module(literals),
+
+ %% Tell the processes that the code has been purged.
+ [begin
+ monitor(process, Pid),
+ Pid ! purged
+ end || Pid <- Pids],
+
+ %% Wait for all processes to terminate.
+ [receive
+ {'DOWN',_,process,Pid,normal} ->
+ ok
+ end || Pid <- Pids],
+
+ %% We now expect that the binary has been deallocated.
+ provoke_mem_leak(N-1, Code, Check).
+
+create_binaries(Parent, NumRefs, Check) ->
+ Bin = literals:binary(),
+ Bins = lists:duplicate(NumRefs, Bin),
+ {bits,Bits} = literals:bits(),
+ Parent ! {started,self()},
+ receive
+ purged ->
+ %% The code has been purged. Now make sure that
+ %% the binaries haven't been corrupted.
+ Check = erlang:md5(Bin),
+ [Bin = B || B <- Bins],
+ <<42:13,Bin/binary>> = Bits,
+
+ %% Remove all references to the binaries
+ %% Doing it explicitly like this ensures that
+ %% the binaries are gone when the parent process
+ %% receives the 'DOWN' message.
+ erlang:garbage_collect()
+ end.
+
+wait_for_memory_deallocations() ->
+ try
+ erts_debug:set_internal_state(wait, deallocations)
+ catch
+ error:undef ->
+ erts_debug:set_internal_state(available_internal_state, true),
+ wait_for_memory_deallocations()
+ end.
+
%% OTP-7559: c_p->cp could contain garbage and create a false dependency
%% to a module in a process. (Thanks to Richard Carlsson.)
false_dependency(Config) when is_list(Config) ->
@@ -527,6 +685,30 @@ coverage(Config) when is_list(Config) ->
?line {'EXIT',{badarg,_}} = (catch erlang:module_loaded(42)),
ok.
+fun_confusion(Config) when is_list(Config) ->
+ Data = ?config(data_dir, Config),
+ Src = filename:join(Data, "fun_confusion"),
+ Mod = fun_confusion,
+
+ %% Load first version of module.
+ compile_load(Mod, Src, 1),
+ F1 = Mod:f(),
+ 1 = F1(),
+
+ %% Load second version of module.
+ compile_load(Mod, Src, 2),
+ F2 = Mod:f(),
+
+ %% F1 should refer to the old code, not the newly loaded code.
+ 1 = F1(),
+ 2 = F2(),
+ ok.
+
+compile_load(Mod, Src, Ver) ->
+ {ok,Mod,Code1} = compile:file(Src, [binary,{d,version,Ver}]),
+ {module,Mod} = code:load_binary(Mod, "fun_confusion.beam", Code1),
+ ok.
+
%% Utilities.
make_sub_binary(Bin) when is_binary(Bin) ->
diff --git a/erts/emulator/test/code_SUITE_data/fun_confusion.erl b/erts/emulator/test/code_SUITE_data/fun_confusion.erl
new file mode 100644
index 0000000000..16000861df
--- /dev/null
+++ b/erts/emulator/test/code_SUITE_data/fun_confusion.erl
@@ -0,0 +1,31 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 2011. All Rights Reserved.
+%%
+%% The contents of this file are subject to the Erlang Public License,
+%% Version 1.1, (the "License"); you may not use this file except in
+%% compliance with the License. You should have received a copy of the
+%% Erlang Public License along with this software. If not, it can be
+%% retrieved online at http://www.erlang.org/.
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% %CopyrightEnd%
+%%
+
+-module(fun_confusion).
+
+-export([f/0]).
+
+f() ->
+ fun() -> version() end.
+
+version() ->
+ %% Changing the value returned here should change
+ %% the identity of the fun in f/0.
+ ?version.
+
diff --git a/erts/emulator/test/code_SUITE_data/literals.erl b/erts/emulator/test/code_SUITE_data/literals.erl
index 9f99b1a780..d9cb8938db 100644
--- a/erts/emulator/test/code_SUITE_data/literals.erl
+++ b/erts/emulator/test/code_SUITE_data/literals.erl
@@ -18,7 +18,7 @@
%%
-module(literals).
--export([a/0,b/0,huge_bignum/0]).
+-export([a/0,b/0,huge_bignum/0,binary/0,unused_binaries/0,bits/0]).
a() ->
{a,42.0,[7,38877938333399637266518333334747]}.
@@ -81,3 +81,22 @@ b() ->
huge_bignum() ->
36#9987333333392789234879423987243987423432879423879234897423879423874328794323248423872348742323487423987423879243872347824374238792437842374283926276478623462342363243SDKJFSDLEFHDSHJFE48H3838973879JFSDKJLFASLKJVBJKLEJKLDYEIOEHFEOU39873487SFHJSLDFASUIDFHSDHFEYR0R987YDFHDHFDLKHFSIDFHSIDFSIFDHSIFHWIHR07373767667987769707660766789076874238792437842374283926276478623462342363243SDKJFSDLEFHDSHJFE48H3838973879JFSDKJLFASLKJVBJKLEJKLDYEIOEHFEOU39873487SFHJSLDFASUIDFHSDHFEYR0R987YDFHDHFDLKHFSIDFHSIDFSIFDHSIFHWIHR0737376766798779987333333392789234879423987243987423432879423879234897423879423874328794323248423872348742323487423987423879243872347824374238792437842374283926276478623462342363243SDKJFSDLEFHDSHJFE48H3838973879JFSDKJLFASLKJVBJKLEJKLDYEIOEHFEOU39873487SFHJSLDFASUIDFHSDHFEYR0R987YDFHDHFDLKHFSIDFHSIDFSIFDHSIFHWIHR07373767667987769707660766789076874238792437842374283926276478623462342363243SDKJFSDLEFHDSHJFE48H3838973879JFSDKJLFASLKJVBJKLEJKLDYEIOEHFEOU39873487SFHJSLDFASUIDFHSDHFEYR0R987YDFHDHFDLKHFSIDFHSIDFSIFDHSIFHWIHR07373767667987779JFSDKJLFASLKJVBJKLEJKLDYEIOEHFEOU39873487SFHJSLDFASUIDFHSDHFEYR0R987YDFHDHFDLKHFSIDFHSIDFSIFDHSIFHWIHR07373767667987769707660766789076874238792437842374283926276478623462342363243SDKJFSDLEFHDSHJFE48H3838973879JFSDKJLFASLKJVBJKLEJKLDYEIOEHFEOU39873487SFHJSLDFASUIDFHSDHFEYR0R987YDFHDHFDLKHFSIDFHSIDFSIFDHSIFHWIHR0737376766798779987333333392789234879423987243987423432879423879234897423879423874328794323248423872348742323487423987423879243872347824374238792437842374283926276478623462342363243SDKJFSDLEFHDSHJFE48H3838973879JFSDKJLFASLKJVBJKLEJKLDYEIOEHFEOU39873487SFHJSLDFASUIDFHSDHFEYR0R987YDFHDHFDLKHFSIDFHSIDFSIFDHSIFHWIHR07373767667987769707660766789076874238792437842374283926276478623462342363243SDKJFSDLEFHDSHJFE48H3838973879JFSDKJLFASLKJVBJKLEJKLDYEIOEHFEOU39873487SFHJSLDFASUIDFHSDHFEYR0R987YDFHDHFDLKHFSIDFHSIDFSIFDHSIFHWIHR073737676679877.
+
+-define(TIMES_FOUR(X), X,X,X,X).
+-define(BYTES_256, 0:256,1:256,2:256,3:256, 4:256,5:256,6:256,7:256).
+-define(KB_1, ?TIMES_FOUR(?BYTES_256)).
+-define(KB_4, ?TIMES_FOUR(?KB_1)).
+-define(KB_16, ?TIMES_FOUR(?KB_4)).
+-define(KB_64, ?TIMES_FOUR(?KB_16)).
+-define(KB_128, ?TIMES_FOUR(?KB_64)).
+-define(MB_1, ?TIMES_FOUR(?KB_128)).
+
+binary() ->
+ %% Too big to be a heap binary.
+ <<?MB_1>>.
+
+unused_binaries() ->
+ {<<?KB_128>>,<<?BYTES_256>>}.
+
+bits() ->
+ {bits,<<42:13,?MB_1>>}.
diff --git a/erts/emulator/test/distribution_SUITE.erl b/erts/emulator/test/distribution_SUITE.erl
index 4bebae51cc..19281f6d58 100644
--- a/erts/emulator/test/distribution_SUITE.erl
+++ b/erts/emulator/test/distribution_SUITE.erl
@@ -173,15 +173,20 @@ bulk_sendsend(Terms, BinSize) ->
Ratio = if MonitorCount2 == 0 -> MonitorCount1 / 1.0;
true -> MonitorCount1 / MonitorCount2
end,
- %% A somewhat arbitrary ratio, but hopefully one that will accomodate
- %% a wide range of CPU speeds.
- true = (Ratio > 8.0),
- {comment,
- integer_to_list(Rate1) ++ " K/s, " ++
- integer_to_list(Rate2) ++ " K/s, " ++
- integer_to_list(MonitorCount1) ++ " monitor msgs, " ++
- integer_to_list(MonitorCount2) ++ " monitor msgs, " ++
- float_to_list(Ratio) ++ " monitor ratio"}.
+ Comment = integer_to_list(Rate1) ++ " K/s, " ++
+ integer_to_list(Rate2) ++ " K/s, " ++
+ integer_to_list(MonitorCount1) ++ " monitor msgs, " ++
+ integer_to_list(MonitorCount2) ++ " monitor msgs, " ++
+ float_to_list(Ratio) ++ " monitor ratio",
+ if
+ %% A somewhat arbitrary ratio, but hopefully one that will
+ %% accommodate a wide range of CPU speeds.
+ Ratio > 8.0 ->
+ {comment,Comment};
+ true ->
+ io:put_chars(Comment),
+ ?line ?t:fail(ratio_too_low)
+ end.
bulk_sendsend2(Terms, BinSize, BusyBufSize) ->
?line Dog = test_server:timetrap(test_server:seconds(30)),
@@ -331,7 +336,7 @@ receiver2(Num, TotSize) ->
link_to_busy(doc) -> "Test that link/1 to a busy distribution port works.";
link_to_busy(Config) when is_list(Config) ->
- ?line Dog = test_server:timetrap(test_server:seconds(30)),
+ ?line Dog = test_server:timetrap(test_server:seconds(60)),
?line {ok, Node} = start_node(link_to_busy),
?line Recv = spawn(Node, erlang, apply, [fun sink/1, [link_to_busy_sink]]),
@@ -378,7 +383,7 @@ tail_applied_linker(Pid) ->
exit_to_busy(doc) -> "Test that exit/2 to a busy distribution port works.";
exit_to_busy(Config) when is_list(Config) ->
- ?line Dog = test_server:timetrap(test_server:seconds(30)),
+ ?line Dog = test_server:timetrap(test_server:seconds(60)),
?line {ok, Node} = start_node(exit_to_busy),
Tracer = case os:getenv("TRACE_BUSY_DIST_PORT") of
@@ -1597,8 +1602,8 @@ bad_dist_ext_control(Config) when is_list(Config) ->
?line stop_node(Victim).
bad_dist_ext_connection_id(Config) when is_list(Config) ->
- ?line {ok, Offender} = start_node(bad_dist_ext_receive_offender),
- ?line {ok, Victim} = start_node(bad_dist_ext_receive_victim),
+ ?line {ok, Offender} = start_node(bad_dist_ext_connection_id_offender),
+ ?line {ok, Victim} = start_node(bad_dist_ext_connection_id_victim),
?line start_node_monitors([Offender,Victim]),
?line Parent = self(),
diff --git a/erts/emulator/test/driver_SUITE.erl b/erts/emulator/test/driver_SUITE.erl
index f6cf01ce16..c07dbc5871 100644
--- a/erts/emulator/test/driver_SUITE.erl
+++ b/erts/emulator/test/driver_SUITE.erl
@@ -75,7 +75,9 @@
smp_select/1,
driver_select_use/1,
thread_mseg_alloc_cache_clean/1,
- otp_9302/1]).
+ otp_9302/1,
+ thr_free_drv/1,
+ async_blast/1]).
-export([bin_prefix/2]).
@@ -143,7 +145,9 @@ all() ->
otp_6879, caller, many_events, missing_callbacks,
smp_select, driver_select_use,
thread_mseg_alloc_cache_clean,
- otp_9302].
+ otp_9302,
+ thr_free_drv,
+ async_blast].
groups() ->
[{timer, [],
@@ -1590,7 +1594,7 @@ otp_6879(Config) when is_list(Config) ->
end
end,
Procs),
- %% Also try it when input exeeds default buffer (256 bytes)
+ %% Also try it when input exceeds default buffer (256 bytes)
?line Data = lists:seq(1, 1000),
?line case open_port({spawn, Drv}, []) of
Port when is_port(Port) ->
@@ -1792,7 +1796,7 @@ driver_select_use0(Config) ->
thread_mseg_alloc_cache_clean(Config) when is_list(Config) ->
case {erlang:system_info(threads),
- erlang:system_info({allocator,mseg_alloc}),
+ mseg_inst_info(0),
driver_alloc_sbct()} of
{_, false, _} ->
?line {skipped, "No mseg_alloc"};
@@ -1804,13 +1808,13 @@ thread_mseg_alloc_cache_clean(Config) when is_list(Config) ->
?line {skipped, "driver_alloc() using too large single block threshold"};
{_, _, 0} ->
?line {skipped, "driver_alloc() using too low single block threshold"};
- {true, MsegAllocInfo, SBCT} ->
+ {true, _MsegAllocInfo, SBCT} ->
?line DrvName = 'thr_alloc_drv',
?line Path = ?config(data_dir, Config),
?line erl_ddll:start(),
?line ok = load_driver(Path, DrvName),
?line Port = open_port({spawn, DrvName}, []),
- ?line CCI = mseg_alloc_cci(MsegAllocInfo),
+ ?line CCI = 1000,
?line ?t:format("CCI = ~p~n", [CCI]),
?line CCC = mseg_alloc_ccc(),
?line ?t:format("CCC = ~p~n", [CCC]),
@@ -1831,7 +1835,7 @@ mseg_alloc_cci(MsegAllocInfo) ->
?line CCI.
mseg_alloc_ccc() ->
- mseg_alloc_ccc(erlang:system_info({allocator,mseg_alloc})).
+ mseg_alloc_ccc(mseg_inst_info(0)).
mseg_alloc_ccc(MsegAllocInfo) ->
?line {value,{memkind, MKL}} = lists:keysearch(memkind,1,MsegAllocInfo),
@@ -1841,7 +1845,7 @@ mseg_alloc_ccc(MsegAllocInfo) ->
?line GigaCCC*1000000000 + CCC.
mseg_alloc_cached_segments() ->
- mseg_alloc_cached_segments(erlang:system_info({allocator,mseg_alloc})).
+ mseg_alloc_cached_segments(mseg_inst_info(0)).
mseg_alloc_cached_segments(MsegAllocInfo) ->
MemName = case is_halfword_vm() of
@@ -1859,6 +1863,13 @@ mseg_alloc_cached_segments(MsegAllocInfo) ->
= lists:keysearch(cached_segments, 1, SL),
?line CS.
+mseg_inst_info(I) ->
+ {value, {instance, I, Value}}
+ = lists:keysearch(I,
+ 2,
+ erlang:system_info({allocator,mseg_alloc})),
+ Value.
+
is_halfword_vm() ->
case {erlang:system_info({wordsize, internal}),
erlang:system_info({wordsize, external})} of
@@ -1902,18 +1913,105 @@ otp_9302(Config) when is_list(Config) ->
?line port_command(Port, ""),
?line {msg, block} = get_port_msg(Port, infinity),
?line {msg, job} = get_port_msg(Port, infinity),
- ?line case erlang:system_info(thread_pool_size) of
- 0 ->
- {msg, cancel} = get_port_msg(Port, infinity);
- _ ->
- ok
- end,
- ?line {msg, job} = get_port_msg(Port, infinity),
+ ?line C = case erlang:system_info(thread_pool_size) of
+ 0 ->
+ ?line {msg, cancel} = get_port_msg(Port, infinity),
+ ?line {msg, job} = get_port_msg(Port, infinity),
+ ?line false;
+ _ ->
+ case get_port_msg(Port, infinity) of
+ {msg, cancel} -> %% Cancel always fail in Rel >= 15
+ ?line {msg, job} = get_port_msg(Port, infinity),
+ ?line false;
+ {msg, job} ->
+ ?line ok,
+ ?line true
+ end
+ end,
?line {msg, end_of_jobs} = get_port_msg(Port, infinity),
?line no_msg = get_port_msg(Port, 2000),
?line port_close(Port),
+ ?line case C of
+ true ->
+ ?line {comment, "Async job cancelled"};
+ false ->
+ ?line {comment, "Async job not cancelled"}
+ end.
+
+thr_free_drv(Config) when is_list(Config) ->
+ ?line Path = ?config(data_dir, Config),
+ ?line erl_ddll:start(),
+ ?line ok = load_driver(Path, thr_free_drv),
+ ?line MemBefore = driver_alloc_size(),
+% io:format("SID=~p", [erlang:system_info(scheduler_id)]),
+ ?line Port = open_port({spawn, thr_free_drv}, []),
+ ?line MemPeek = driver_alloc_size(),
+ ?line true = is_port(Port),
+ ?line ok = thr_free_drv_control(Port, 0),
+ ?line port_close(Port),
+ ?line MemAfter = driver_alloc_size(),
+ ?line io:format("MemPeek=~p~n", [MemPeek]),
+ ?line io:format("MemBefore=~p, MemAfter=~p~n", [MemBefore, MemAfter]),
+ ?line MemBefore = MemAfter,
+ ?line case MemPeek of
+ undefined -> ok;
+ _ ->
+ ?line true = MemPeek > MemBefore
+ end,
?line ok.
+thr_free_drv_control(Port, N) ->
+ case erlang:port_control(Port, 0, "") of
+ "done" ->
+ ok;
+ "more" ->
+ erlang:yield(),
+% io:format("N=~p, SID=~p", [N, erlang:system_info(scheduler_id)]),
+ thr_free_drv_control(Port, N+1)
+ end.
+
+async_blast(Config) when is_list(Config) ->
+ ?line Path = ?config(data_dir, Config),
+ ?line erl_ddll:start(),
+ ?line ok = load_driver(Path, async_blast_drv),
+ ?line SchedOnln = erlang:system_info(schedulers_online),
+ ?line MemBefore = driver_alloc_size(),
+ ?line Start = os:timestamp(),
+ ?line Blast = fun () ->
+ Port = open_port({spawn, async_blast_drv}, []),
+ true = is_port(Port),
+ port_command(Port, ""),
+ receive
+ {Port, done} ->
+ ok
+ end,
+ port_close(Port)
+ end,
+ ?line Ps = lists:map(fun (N) ->
+ spawn_opt(Blast,
+ [{scheduler,
+ (N rem SchedOnln)+ 1},
+ monitor])
+ end,
+ lists:seq(1, 100)),
+ ?line MemMid = driver_alloc_size(),
+ ?line lists:foreach(fun ({Pid, Mon}) ->
+ receive
+ {'DOWN',Mon,process,Pid,_} -> ok
+ end
+ end, Ps),
+ ?line End = os:timestamp(),
+ ?line MemAfter = driver_alloc_size(),
+ ?line io:format("MemBefore=~p, MemMid=~p, MemAfter=~p~n",
+ [MemBefore, MemMid, MemAfter]),
+ ?line AsyncBlastTime = timer:now_diff(End,Start)/1000000,
+ ?line io:format("AsyncBlastTime=~p~n", [AsyncBlastTime]),
+ ?line MemBefore = MemAfter,
+ ?line erlang:display({async_blast_time, AsyncBlastTime}),
+ ?line ok.
+
+
+
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%% Utilities
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
@@ -2077,3 +2175,33 @@ start_node(Config) when is_list(Config) ->
stop_node(Node) ->
?t:stop_node(Node).
+
+wait_deallocations() ->
+ try
+ erts_debug:set_internal_state(wait, deallocations)
+ catch error:undef ->
+ erts_debug:set_internal_state(available_internal_state, true),
+ wait_deallocations()
+ end.
+
+driver_alloc_size() ->
+ wait_deallocations(),
+ case erlang:system_info({allocator_sizes, driver_alloc}) of
+ false ->
+ undefined;
+ MemInfo ->
+ CS = lists:foldl(
+ fun ({instance, _, L}, Acc) ->
+ {value,{_,SBMBCS}} = lists:keysearch(sbmbcs, 1, L),
+ {value,{_,MBCS}} = lists:keysearch(mbcs, 1, L),
+ {value,{_,SBCS}} = lists:keysearch(sbcs, 1, L),
+ [SBMBCS,MBCS,SBCS | Acc]
+ end,
+ [],
+ MemInfo),
+ lists:foldl(
+ fun(L, Sz0) ->
+ {value,{_,Sz,_,_}} = lists:keysearch(blocks_size, 1, L),
+ Sz0+Sz
+ end, 0, CS)
+ end.
diff --git a/erts/emulator/test/driver_SUITE_data/Makefile.src b/erts/emulator/test/driver_SUITE_data/Makefile.src
index 5b3ba1557e..dd48f6a0f7 100644
--- a/erts/emulator/test/driver_SUITE_data/Makefile.src
+++ b/erts/emulator/test/driver_SUITE_data/Makefile.src
@@ -12,7 +12,9 @@ MISC_DRVS = outputv_drv@dll@ \
many_events_drv@dll@ \
missing_callback_drv@dll@ \
thr_alloc_drv@dll@ \
- otp_9302_drv@dll@
+ otp_9302_drv@dll@ \
+ thr_free_drv@dll@ \
+ async_blast_drv@dll@
SYS_INFO_DRVS = sys_info_1_0_drv@dll@ \
sys_info_1_1_drv@dll@ \
diff --git a/erts/emulator/test/driver_SUITE_data/async_blast_drv.c b/erts/emulator/test/driver_SUITE_data/async_blast_drv.c
new file mode 100644
index 0000000000..3821f7e3dc
--- /dev/null
+++ b/erts/emulator/test/driver_SUITE_data/async_blast_drv.c
@@ -0,0 +1,124 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2011. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+#include "erl_driver.h"
+
+#define NO_ASYNC_JOBS 10000
+
+static void stop(ErlDrvData drv_data);
+static ErlDrvData start(ErlDrvPort port,
+ char *command);
+static void output(ErlDrvData drv_data,
+ char *buf, int len);
+static void ready_async(ErlDrvData drv_data,
+ ErlDrvThreadData thread_data);
+
+static ErlDrvEntry async_blast_drv_entry = {
+ NULL /* init */,
+ start,
+ stop,
+ output,
+ NULL /* ready_input */,
+ NULL /* ready_output */,
+ "async_blast_drv",
+ NULL /* finish */,
+ NULL /* handle */,
+ NULL /* control */,
+ NULL /* timeout */,
+ NULL /* outputv */,
+ ready_async,
+ NULL /* flush */,
+ NULL /* call */,
+ NULL /* event */,
+ ERL_DRV_EXTENDED_MARKER,
+ ERL_DRV_EXTENDED_MAJOR_VERSION,
+ ERL_DRV_EXTENDED_MINOR_VERSION,
+ ERL_DRV_FLAG_USE_PORT_LOCKING,
+ NULL /* handle2 */,
+ NULL /* handle_monitor */
+};
+
+typedef struct {
+ ErlDrvPort port;
+ ErlDrvTermData caller;
+ int counter;
+} async_blast_data_t;
+
+
+DRIVER_INIT(async_blast_drv)
+{
+ return &async_blast_drv_entry;
+}
+
+static void stop(ErlDrvData drv_data)
+{
+ driver_free((void *) drv_data);
+}
+
+static ErlDrvData start(ErlDrvPort port,
+ char *command)
+{
+ async_blast_data_t *abd;
+
+ abd = driver_alloc(sizeof(async_blast_data_t));
+ if (!abd)
+ return ERL_DRV_ERROR_GENERAL;
+
+ abd->port = port;
+ abd->counter = 0;
+ return (ErlDrvData) abd;
+}
+
+static void async_invoke(void *data)
+{
+
+}
+#include <stdio.h>
+
+static void ready_async(ErlDrvData drv_data,
+ ErlDrvThreadData thread_data)
+{
+ async_blast_data_t *abd = (async_blast_data_t *) drv_data;
+ if (--abd->counter == 0) {
+ ErlDrvTermData spec[] = {
+ ERL_DRV_PORT, driver_mk_port(abd->port),
+ ERL_DRV_ATOM, driver_mk_atom("done"),
+ ERL_DRV_TUPLE, 2
+ };
+ driver_send_term(abd->port, abd->caller,
+ spec, sizeof(spec)/sizeof(spec[0]));
+ }
+}
+
+static void output(ErlDrvData drv_data,
+ char *buf, int len)
+{
+ async_blast_data_t *abd = (async_blast_data_t *) drv_data;
+ if (abd->counter == 0) {
+ int i;
+ abd->caller = driver_caller(abd->port);
+ abd->counter = NO_ASYNC_JOBS;
+ for (i = 0; i < NO_ASYNC_JOBS; i++) {
+ if (0 > driver_async(abd->port, NULL, async_invoke, NULL, NULL)) {
+ driver_failure_atom(abd->port, "driver_async_failed");
+ break;
+ }
+ }
+ }
+}
diff --git a/erts/emulator/test/driver_SUITE_data/thr_free_drv.c b/erts/emulator/test/driver_SUITE_data/thr_free_drv.c
new file mode 100644
index 0000000000..622a62ebea
--- /dev/null
+++ b/erts/emulator/test/driver_SUITE_data/thr_free_drv.c
@@ -0,0 +1,241 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2011. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+#include <stdlib.h>
+#include <errno.h>
+#include <string.h>
+#include "erl_driver.h"
+
+#define BLOCKS_PER_THREAD 100000
+#define NO_THREADS 10
+#define BLOCKS_PER_CTRL 1000
+
+typedef struct {
+ ErlDrvMutex *mtx;
+ ErlDrvCond *cnd;
+ int b;
+ int *go;
+ int *skip;
+ void *blocks[BLOCKS_PER_THREAD];
+} test_thread_data;
+
+typedef struct {
+ ErlDrvPort port;
+ int b;
+ int go;
+ int skip;
+ test_thread_data ttd[NO_THREADS+1];
+ ErlDrvTid tids[NO_THREADS+1];
+} test_data;
+
+static ErlDrvData start(ErlDrvPort port, char *command);
+static void stop(ErlDrvData data);
+static int control(ErlDrvData drv_data, unsigned int command, char *buf,
+ int len, char **rbuf, int rlen);
+
+static ErlDrvEntry thr_free_drv_entry = {
+ NULL /* init */,
+ start,
+ stop,
+ NULL /* output */,
+ NULL /* ready_input */,
+ NULL /* ready_output */,
+ "thr_free_drv",
+ NULL /* finish */,
+ NULL /* handle */,
+ control,
+ NULL /* timeout */,
+ NULL /* outputv */,
+ NULL /* ready_async */,
+ NULL /* flush */,
+ NULL /* call */,
+ NULL /* event */,
+ ERL_DRV_EXTENDED_MARKER,
+ ERL_DRV_EXTENDED_MAJOR_VERSION,
+ ERL_DRV_EXTENDED_MINOR_VERSION,
+ ERL_DRV_FLAG_USE_PORT_LOCKING,
+ NULL /* handle2 */,
+ NULL /* handle_monitor */
+};
+
+DRIVER_INIT(thr_free_drv)
+{
+ return &thr_free_drv_entry;
+}
+
+void *
+test_thread(void *vttd)
+{
+ test_thread_data *ttd = (test_thread_data *) vttd;
+ int i, skip;
+
+ erl_drv_mutex_lock(ttd->mtx);
+
+ while (!*ttd->go)
+ erl_drv_cond_wait(ttd->cnd, ttd->mtx);
+ skip = *ttd->skip;
+ erl_drv_mutex_unlock(ttd->mtx);
+
+ if (!skip) {
+ for (i = 0; i < BLOCKS_PER_THREAD; i++)
+ driver_free(ttd->blocks[i]);
+ }
+ return NULL;
+}
+
+ErlDrvData start(ErlDrvPort port, char *command)
+{
+ int join = 0, t, b, res;
+ test_thread_data *ttd;
+ test_data *td = driver_alloc(sizeof(test_data));
+ if (!td)
+ return ERL_DRV_ERROR_GENERAL;
+ ttd = td->ttd;
+ for (b = 0; b < BLOCKS_PER_THREAD; b++)
+ for (t = 0; t <= NO_THREADS; t++)
+ ttd[t].blocks[b] = NULL;
+ ttd[0].mtx = NULL;
+ ttd[0].cnd = NULL;
+
+ for (b = 0; b < BLOCKS_PER_THREAD; b++) {
+ for (t = 0; t <= NO_THREADS; t++) {
+ ttd[t].blocks[b] = driver_alloc(1);
+ if (ttd[t].blocks[b] == NULL)
+ goto fail;
+ }
+ }
+
+ td->b = -1;
+ td->go = 0;
+ td->skip = 0;
+
+ ttd[0].mtx = erl_drv_mutex_create("test_mutex");
+ if (!ttd[0].mtx)
+ goto fail;
+ ttd[0].cnd = erl_drv_cond_create("test_cnd");
+ if (!ttd[0].cnd)
+ goto fail;
+ ttd[0].go = &td->go;
+ ttd[0].skip = &td->skip;
+
+ for (t = 1; t <= NO_THREADS; t++) {
+ ttd[t].mtx = ttd[0].mtx;
+ ttd[t].cnd = ttd[0].cnd;
+ ttd[t].go = ttd[0].go;
+ ttd[t].skip = ttd[0].skip;
+ res = erl_drv_thread_create("test_thread",
+ &td->tids[t],
+ test_thread,
+ &ttd[t],
+ NULL);
+ if (res != 0)
+ goto fail;
+ join = t;
+ }
+
+ td->port = port;
+
+ return (ErlDrvData) td;
+
+fail:
+
+ if (join) {
+ erl_drv_mutex_lock(ttd[0].mtx);
+ td->go = 1;
+ td->skip = 1;
+ erl_drv_cond_broadcast(ttd[0].cnd);
+ erl_drv_mutex_unlock(ttd[0].mtx);
+ for (t = 1; t <= join; t++)
+ erl_drv_thread_join(td->tids[t], NULL);
+ }
+
+ if (ttd[0].mtx)
+ erl_drv_mutex_destroy(ttd[0].mtx);
+ if (ttd[0].cnd)
+ erl_drv_cond_destroy(ttd[0].cnd);
+
+ for (b = 0; b < BLOCKS_PER_THREAD; b++) {
+ for (t = 0; t <= NO_THREADS; t++) {
+ if (ttd[t].blocks[b] != NULL)
+ driver_free(ttd[t].blocks[b]);
+ }
+ }
+
+ return ERL_DRV_ERROR_GENERAL;
+}
+
+static void stop(ErlDrvData drv_data)
+{
+ test_data *td = (test_data *) drv_data;
+ int t, b;
+ for (t = 1; t <= NO_THREADS; t++)
+ erl_drv_thread_join(td->tids[t], NULL);
+ for (b = 0; b < BLOCKS_PER_THREAD; b++) {
+ if (td->ttd[0].blocks[b])
+ driver_free(td->ttd[0].blocks[b]);
+ }
+ erl_drv_mutex_destroy(td->ttd[0].mtx);
+ erl_drv_cond_destroy(td->ttd[0].cnd);
+ driver_free(td);
+}
+
+static int control(ErlDrvData drv_data, unsigned int command, char *buf,
+ int len, char **rbuf, int rlen)
+{
+ test_data *td = (test_data *) drv_data;
+ char *result = "failure";
+ int i, b;
+ int res;
+ int result_len;
+
+ if (td->b == -1) {
+ erl_drv_mutex_lock(td->ttd[0].mtx);
+ td->go = 1;
+ erl_drv_cond_broadcast(td->ttd[0].cnd);
+ erl_drv_mutex_unlock(td->ttd[0].mtx);
+ td->b = 0;
+ }
+
+ for (i = 0, b = td->b; i < BLOCKS_PER_CTRL && b < BLOCKS_PER_THREAD; i++, b++) {
+ driver_free(td->ttd[0].blocks[b]);
+ td->ttd[0].blocks[b] = NULL;
+ }
+
+ td->b = b;
+ if (b >= BLOCKS_PER_THREAD)
+ result = "done";
+ else
+ result = "more";
+
+ result_len = strlen(result);
+ if (result_len <= rlen) {
+ memcpy(*rbuf, result, result_len);
+ return result_len;
+ }
+ else {
+ *rbuf = driver_alloc(result_len);
+ if (!*rbuf) {
+ driver_failure_posix(td->port, ENOMEM);
+ return 0;
+ }
+ else {
+ memcpy(*rbuf, result, result_len);
+ return result_len;
+ }
+ }
+}
diff --git a/erts/emulator/test/exception_SUITE.erl b/erts/emulator/test/exception_SUITE.erl
index 9d6fc9521d..109cec25cb 100644
--- a/erts/emulator/test/exception_SUITE.erl
+++ b/erts/emulator/test/exception_SUITE.erl
@@ -23,9 +23,10 @@
init_per_group/2,end_per_group/2,
badmatch/1, pending_errors/1, nil_arith/1,
stacktrace/1, nested_stacktrace/1, raise/1, gunilla/1, per/1,
- exception_with_heap_frag/1]).
+ exception_with_heap_frag/1, line_numbers/1]).
-export([bad_guy/2]).
+-export([crash/1]).
-include_lib("test_server/include/test_server.hrl").
-import(lists, [foreach/2]).
@@ -35,7 +36,7 @@ suite() -> [{ct_hooks,[ts_install_cth]}].
all() ->
[badmatch, pending_errors, nil_arith, stacktrace,
nested_stacktrace, raise, gunilla, per,
- exception_with_heap_frag].
+ exception_with_heap_frag, line_numbers].
groups() ->
[].
@@ -141,14 +142,20 @@ pending_exit_message(Args, Expected) ->
end,
process_flag(trap_exit, false).
-pending({badarg, [{erlang,Bif,BifArgs},{?MODULE,Func,Arity}|_]}, Func, Args, _Code)
- when is_atom(Bif), is_list(BifArgs), length(Args) == Arity ->
+pending({badarg,[{erlang,Bif,BifArgs,Loc1},
+ {?MODULE,Func,Arity,Loc2}|_]},
+ Func, Args, _Code)
+ when is_atom(Bif), is_list(BifArgs), length(Args) =:= Arity,
+ is_list(Loc1), is_list(Loc2) ->
ok;
-pending({undef,[{non_existing_module,foo,[]}|_]}, _, _, _) ->
+pending({undef,[{non_existing_module,foo,[],Loc}|_]}, _, _, _)
+ when is_list(Loc) ->
ok;
-pending({function_clause,[{?MODULE,Func,Args}|_]}, Func, Args, _Code) ->
+pending({function_clause,[{?MODULE,Func,Args,Loc}|_]}, Func, Args, _Code)
+ when is_list(Loc) ->
ok;
-pending({Code,[{?MODULE,Func,Arity}|_]}, Func, Args, Code) when length(Args) == Arity ->
+pending({Code,[{?MODULE,Func,Arity,Loc}|_]}, Func, Args, Code)
+ when length(Args) =:= Arity, is_list(Loc) ->
ok;
pending(Reason, _Function, _Args, _Code) ->
test_server:fail({bad_exit_reason,Reason}).
@@ -255,24 +262,24 @@ stacktrace(Conf) when is_list(Conf) ->
?line {_,Mref} = spawn_monitor(fun() -> exit({Tag,erlang:get_stacktrace()}) end),
?line {Tag,[]} = receive {'DOWN',Mref,_,_,Info} -> Info end,
V = [make_ref()|self()],
- ?line {value2,{caught1,badarg,[{erlang,abs,[V]}|_]=St1}} =
+ ?line {value2,{caught1,badarg,[{erlang,abs,[V],_}|_]=St1}} =
stacktrace_1({'abs',V}, error, {value,V}),
?line St1 = erase(stacktrace1),
?line St1 = erase(stacktrace2),
?line St1 = erlang:get_stacktrace(),
- ?line {caught2,{error,badarith},[{?MODULE,my_add,2}|_]=St2} =
+ ?line {caught2,{error,badarith},[{?MODULE,my_add,2,_}|_]=St2} =
stacktrace_1({'div',{1,0}}, error, {'add',{0,a}}),
- ?line [{?MODULE,my_div,2}|_] = erase(stacktrace1),
+ ?line [{?MODULE,my_div,2,_}|_] = erase(stacktrace1),
?line St2 = erase(stacktrace2),
?line St2 = erlang:get_stacktrace(),
- ?line {caught2,{error,{try_clause,V}},[{?MODULE,stacktrace_1,3}|_]=St3} =
+ ?line {caught2,{error,{try_clause,V}},[{?MODULE,stacktrace_1,3,_}|_]=St3} =
stacktrace_1({value,V}, error, {value,V}),
?line St3 = erase(stacktrace1),
?line St3 = erase(stacktrace2),
?line St3 = erlang:get_stacktrace(),
- ?line {caught2,{throw,V},[{?MODULE,foo,1}|_]=St4} =
+ ?line {caught2,{throw,V},[{?MODULE,foo,1,_}|_]=St4} =
stacktrace_1({value,V}, error, {throw,V}),
- ?line [{?MODULE,stacktrace_1,3}|_] = erase(stacktrace1),
+ ?line [{?MODULE,stacktrace_1,3,_}|_] = erase(stacktrace1),
?line St4 = erase(stacktrace2),
?line St4 = erlang:get_stacktrace(),
@@ -280,8 +287,8 @@ stacktrace(Conf) when is_list(Conf) ->
?line stacktrace_2()
catch
error:{badmatch,_} ->
- [{?MODULE,stacktrace_2,0},
- {?MODULE,stacktrace,1}|_] =
+ [{?MODULE,stacktrace_2,0,_},
+ {?MODULE,stacktrace,1,_}|_] =
erlang:get_stacktrace(),
ok
end.
@@ -315,15 +322,15 @@ nested_stacktrace(Conf) when is_list(Conf) ->
nested_stacktrace_1({{value,{V,x1}},void,{V,x1}},
{void,void,void}),
?line {caught1,
- [{?MODULE,my_add,2}|_],
+ [{?MODULE,my_add,2,_}|_],
value2,
- [{?MODULE,my_add,2}|_]} =
+ [{?MODULE,my_add,2,_}|_]} =
nested_stacktrace_1({{'add',{V,x1}},error,badarith},
{{value,{V,x2}},void,{V,x2}}),
?line {caught1,
- [{?MODULE,my_add,2}|_],
- {caught2,[{erlang,abs,[V]}|_]},
- [{erlang,abs,[V]}|_]} =
+ [{?MODULE,my_add,2,_}|_],
+ {caught2,[{erlang,abs,[V],_}|_]},
+ [{erlang,abs,[V],_}|_]} =
nested_stacktrace_1({{'add',{V,x1}},error,badarith},
{{'abs',V},error,badarg}),
ok.
@@ -362,14 +369,14 @@ raise(Conf) when is_list(Conf) ->
end,
?line A = erlang:get_stacktrace(),
?line A = get(raise),
- ?line [{?MODULE,my_div,2}|_] = A,
+ ?line [{?MODULE,my_div,2,_}|_] = A,
%%
N = 8, % Must be even
?line N = erlang:system_flag(backtrace_depth, N),
+ ?line B = odd_even(N, []),
?line try even(N)
catch error:function_clause -> ok
end,
- ?line B = odd_even(N, []),
?line B = erlang:get_stacktrace(),
%%
?line C0 = odd_even(N+1, []),
@@ -387,19 +394,12 @@ raise(Conf) when is_list(Conf) ->
odd_even(N, R) when is_integer(N), N > 1 ->
odd_even(N-1,
[if (N rem 2) == 0 ->
- {?MODULE,even,1};
+ {?MODULE,even,1,[{file,"odd_even.erl"},{line,3}]};
true ->
- {?MODULE,odd,1}
+ {?MODULE,odd,1,[{file,"odd_even.erl"},{line,6}]}
end|R]);
odd_even(1, R) ->
- [{?MODULE,odd,[1]}|R].
-
-even(N) when is_integer(N), N > 1, (N rem 2) == 0 ->
- odd(N-1)++[N].
-
-odd(N) when is_integer(N), N > 1, (N rem 2) == 1 ->
- even(N-1)++[N].
-
+ [{?MODULE,odd,[1],[{file,"odd_even.erl"},{line,5}]}|R].
foo({value,Value}) -> Value;
foo({'div',{A,B}}) ->
@@ -526,4 +526,186 @@ do_exception_with_heap_frag(Bin, [Sz|Sizes]) ->
do_exception_with_heap_frag(Bin, Sizes);
do_exception_with_heap_frag(_, []) -> ok.
+line_numbers(Config) when is_list(Config) ->
+ {'EXIT',{{case_clause,bad_tag},
+ [{?MODULE,line1,2,
+ [{file,"fake_file.erl"},{line,3}]},
+ {?MODULE,line_numbers,1,_}|_]}} =
+ (catch line1(bad_tag, 0)),
+ {'EXIT',{badarith,
+ [{?MODULE,line1,2,
+ [{file,"fake_file.erl"},{line,5}]},
+ {?MODULE,line_numbers,1,_}|_]}} =
+ (catch line1(a, not_an_integer)),
+ {'EXIT',{{badmatch,{ok,1}},
+ [{?MODULE,line1,2,
+ [{file,"fake_file.erl"},{line,7}]},
+ {?MODULE,line_numbers,1,_}|_]}} =
+ (catch line1(a, 0)),
+ {'EXIT',{crash,
+ [{?MODULE,crash,1,
+ [{file,"fake_file.erl"},{line,14}]},
+ {?MODULE,line_numbers,1,_}|_]}} =
+ (catch line1(a, 41)),
+
+ ModFile = ?MODULE_STRING++".erl",
+ [{?MODULE,maybe_crash,1,[{file,"call.erl"},{line,28}]},
+ {?MODULE,call1,0,[{file,"call.erl"},{line,14}]},
+ {?MODULE,close_calls,1,[{file,"call.erl"},{line,5}]},
+ {?MODULE,line_numbers,1,[{file,ModFile},{line,_}]}|_] =
+ close_calls(call1),
+ [{?MODULE,maybe_crash,1,[{file,"call.erl"},{line,28}]},
+ {?MODULE,call2,0,[{file,"call.erl"},{line,18}]},
+ {?MODULE,close_calls,1,[{file,"call.erl"},{line,6}]},
+ {?MODULE,line_numbers,1,[{file,ModFile},{line,_}]}|_] =
+ close_calls(call2),
+ [{?MODULE,maybe_crash,1,[{file,"call.erl"},{line,28}]},
+ {?MODULE,call3,0,[{file,"call.erl"},{line,22}]},
+ {?MODULE,close_calls,1,[{file,"call.erl"},{line,7}]},
+ {?MODULE,line_numbers,1,[{file,ModFile},{line,_}]}|_] =
+ close_calls(call3),
+ no_crash = close_calls(other),
+
+ <<0,0>> = build_binary1(16),
+ {'EXIT',{badarg,
+ [{?MODULE,build_binary1,1,
+ [{file,"bit_syntax.erl"},{line,72503}]},
+ {?MODULE,line_numbers,1,
+ [{file,ModFile},{line,_}]}|_]}} =
+ (catch build_binary1(bad_size)),
+
+ <<7,1,2,3>> = build_binary2(8, <<1,2,3>>),
+ {'EXIT',{badarg,
+ [{?MODULE,build_binary2,2,
+ [{file,"bit_syntax.erl"},{line,72507}]},
+ {?MODULE,line_numbers,1,
+ [{file,ModFile},{line,_}]}|_]}} =
+ (catch build_binary2(bad_size, <<>>)),
+ {'EXIT',{badarg,
+ [{erlang,bit_size,[bad_binary],[]},
+ {?MODULE,build_binary2,2,
+ [{file,"bit_syntax.erl"},{line,72507}]},
+ {?MODULE,line_numbers,1,
+ [{file,ModFile},{line,_}]}|_]}} =
+ (catch build_binary2(8, bad_binary)),
+
+ {'EXIT',{function_clause,
+ [{?MODULE,do_call_abs,[y,y],
+ [{file,"gc_bif.erl"},{line,18}]},
+ {?MODULE,line_numbers,1,_}|_]}} =
+ (catch do_call_abs(y, y)),
+ {'EXIT',{badarg,
+ [{erlang,abs,[[]],[]},
+ {?MODULE,do_call_abs,2,
+ [{file,"gc_bif.erl"},{line,19}]},
+ {?MODULE,line_numbers,1,_}|_]}} =
+ (catch do_call_abs(x, [])),
+
+ {'EXIT',{{badmatch,"42"},
+ [{MODULE,applied_bif_1,1,[{file,"applied_bif.erl"},{line,5}]},
+ {?MODULE,line_numbers,1,_}|_]}} =
+ (catch applied_bif_1(42)),
+
+ {'EXIT',{{badmatch,{current_location,
+ {?MODULE,applied_bif_2,0,
+ [{file,"applied_bif.erl"},{line,9}]}}},
+ [{MODULE,applied_bif_2,0,[{file,"applied_bif.erl"},{line,10}]},
+ {?MODULE,line_numbers,1,_}|_]}} =
+ (catch applied_bif_2()),
+
+ ok.
+
id(I) -> I.
+
+-file("odd_even.erl", 1). %Line 1
+even(N) when is_integer(N), N > 1, (N rem 2) == 0 ->
+ odd(N-1)++[N]. %Line 3
+
+odd(N) when is_integer(N), N > 1, (N rem 2) == 1 ->
+ even(N-1)++[N]. %Line 6
+
+%%
+%% If the compiler removes redundant line instructions (any
+%% line instruction with the same location as the previous),
+%% and the loader also removes line instructions before
+%% tail-recursive calls to external functions, then the
+%% badmatch exception in line 7 below will be reported as
+%% occurring in line 6.
+%%
+%% That means that any removal of redundant line instructions
+%% must all be done in the compiler OR in the loader.
+%%
+-file("fake_file.erl", 1). %Line 1
+line1(Tag, X) -> %Line 2
+ case Tag of %Line 3
+ a ->
+ Y = X + 1, %Line 5
+ Res = id({ok,Y}), %Line 6
+ ?MODULE:crash({ok,42} = Res); %Line 7
+ b ->
+ x = id(x), %Line 9
+ ok %Line 10
+ end. %Line 11
+
+crash(_) -> %Line 13
+ erlang:error(crash). %Line 14
+
+-file("call.erl", 1). %Line 1
+close_calls(Where) -> %Line 2
+ put(where_to_crash, Where), %Line 3
+ try
+ call1(), %Line 5
+ call2(), %Line 6
+ call3(), %Line 7
+ no_crash %Line 8
+ catch error:crash ->
+ erlang:get_stacktrace() %Line 10
+ end. %Line 11
+
+call1() -> %Line 13
+ maybe_crash(call1), %Line 14
+ ok. %Line 15
+
+call2() -> %Line 17
+ maybe_crash(call2), %Line 18
+ ok. %Line 19
+
+call3() -> %Line 21
+ maybe_crash(call3), %Line 22
+ ok. %Line 23
+
+maybe_crash(Name) -> %Line 25
+ case get(where_to_crash) of %Line 26
+ Name ->
+ erlang:error(crash); %Line 28
+ _ ->
+ ok %Line 30
+ end.
+
+-file("bit_syntax.erl", 72500). %Line 72500
+build_binary1(Size) -> %Line 72501
+ id(42), %Line 72502
+ <<0:Size>>. %Line 72503
+
+build_binary2(Size, Bin) -> %Line 72505
+ id(0), %Line 72506
+ <<7:Size,Bin/binary>>. %Line 72507
+
+-file("gc_bif.erl", 17).
+do_call_abs(x, Arg) -> %Line 18
+ abs(Arg). %Line 19
+
+%% Make sure a BIF that is applied does not leave the p->cp
+%% set (and thus generating an extra entry on the stack).
+
+-file("applied_bif.erl", 1).
+%% Explicit apply.
+applied_bif_1(I) -> %Line 3
+ L = apply(erlang, integer_to_list, [I]), %Line 4
+ fail = L, %Line 5
+ ok. %Line 6
+%% Implicit apply.
+applied_bif_2() -> %Line 8
+ R = process_info(self(), current_location), %Line 9
+ fail = R, %Line 10
+ ok. %Line 11
diff --git a/erts/emulator/test/float_SUITE.erl b/erts/emulator/test/float_SUITE.erl
index 736510339f..46466427c5 100644
--- a/erts/emulator/test/float_SUITE.erl
+++ b/erts/emulator/test/float_SUITE.erl
@@ -25,7 +25,7 @@
init_per_group/2,end_per_group/2,
init_per_testcase/2,end_per_testcase/2,
fpe/1,fp_drv/1,fp_drv_thread/1,denormalized/1,match/1,
- bad_float_unpack/1]).
+ bad_float_unpack/1,cmp_zero/1, cmp_integer/1, cmp_bignum/1]).
-export([otp_7178/1]).
@@ -41,10 +41,10 @@ suite() -> [{ct_hooks,[ts_install_cth]}].
all() ->
[fpe, fp_drv, fp_drv_thread, otp_7178, denormalized,
- match, bad_float_unpack].
+ match, bad_float_unpack, {group, comparison}].
groups() ->
- [].
+ [{comparison, [parallel], [cmp_zero, cmp_integer, cmp_bignum]}].
init_per_suite(Config) ->
Config.
@@ -187,6 +187,101 @@ bad_float_unpack(Config) when is_list(Config) ->
bad_float_unpack_match(<<F:64/float>>) -> F;
bad_float_unpack_match(<<I:64/integer-signed>>) -> I.
+cmp_zero(_Config) ->
+ cmp(0.5e-323,0).
+
+cmp_integer(_Config) ->
+ Axis = (1 bsl 53)-2.0, %% The point where floating points become unprecise
+ span_cmp(Axis,2,200),
+ cmp(Axis*Axis,round(Axis)).
+
+cmp_bignum(_Config) ->
+ span_cmp((1 bsl 58) - 1.0),%% Smallest bignum float
+
+ %% Test when the big num goes from I to I+1 in size
+ [span_cmp((1 bsl (32*I)) - 1.0) || I <- lists:seq(2,30)],
+
+ %% Test bignum greater then largest float
+ cmp((1 bsl (64*16)) - 1, (1 bsl (64*15)) * 1.0),
+ %% Test when num is much larger then float
+ [cmp((1 bsl (32*I)) - 1, (1 bsl (32*(I-2))) * 1.0) || I <- lists:seq(3,30)],
+ %% Test when float is much larger than num
+ [cmp((1 bsl (64*15)) * 1.0, (1 bsl (32*(I)))) || I <- lists:seq(1,29)],
+
+ %% Test that all int == float works as they should
+ [true = 1 bsl N == (1 bsl N)*1.0 || N <- lists:seq(0, 1023)],
+ [true = (1 bsl N)*-1 == (1 bsl N)*-1.0 || N <- lists:seq(0, 1023)].
+
+span_cmp(Axis) ->
+ span_cmp(Axis, 25).
+span_cmp(Axis, Length) ->
+ span_cmp(Axis, round(Axis) bsr 52, Length).
+span_cmp(Axis, Incr, Length) ->
+ [span_cmp(Axis, Incr, Length, 1 bsl (1 bsl I)) || I <- lists:seq(0,6)].
+%% This function creates tests around number axis. Both <, > and == is tested
+%% for both negative and positive numbers.
+%%
+%% Axis: The number around which to do the tests eg. (1 bsl 58) - 1.0
+%% Incr: How much to increment the test numbers inbetween each test.
+%% Length: Length/2 is the number of Incr away from Axis to test on the
+%% negative and positive plane.
+%% Diff: How much the float and int should differ when comparing
+span_cmp(Axis, Incr, Length, Diff) ->
+ [begin
+ cmp(round(Axis*-1.0)+Diff+I*Incr,Axis*-1.0+I*Incr),
+ cmp(Axis*-1.0+I*Incr,round(Axis*-1.0)-Diff+I*Incr)
+ end || I <- lists:seq((Length div 2)*-1,(Length div 2))],
+ [begin
+ cmp(round(Axis)+Diff+I*Incr,Axis+I*Incr),
+ cmp(Axis+I*Incr,round(Axis)-Diff+I*Incr)
+ end || I <- lists:seq((Length div 2)*-1,(Length div 2))].
+
+cmp(Big,Small) when is_float(Big) ->
+ BigGtSmall = lists:flatten(
+ io_lib:format("~f > ~p",[Big,Small])),
+ BigLtSmall = lists:flatten(
+ io_lib:format("~f < ~p",[Big,Small])),
+ BigEqSmall = lists:flatten(
+ io_lib:format("~f == ~p",[Big,Small])),
+ SmallGtBig = lists:flatten(
+ io_lib:format("~p > ~f",[Small,Big])),
+ SmallLtBig = lists:flatten(
+ io_lib:format("~p < ~f",[Small,Big])),
+ SmallEqBig = lists:flatten(
+ io_lib:format("~p == ~f",[Small,Big])),
+ cmp(Big,Small,BigGtSmall,BigLtSmall,SmallGtBig,SmallLtBig,
+ SmallEqBig,BigEqSmall);
+cmp(Big,Small) when is_float(Small) ->
+ BigGtSmall = lists:flatten(
+ io_lib:format("~p > ~f",[Big,Small])),
+ BigLtSmall = lists:flatten(
+ io_lib:format("~p < ~f",[Big,Small])),
+ BigEqSmall = lists:flatten(
+ io_lib:format("~p == ~f",[Big,Small])),
+ SmallGtBig = lists:flatten(
+ io_lib:format("~f > ~p",[Small,Big])),
+ SmallLtBig = lists:flatten(
+ io_lib:format("~f < ~p",[Small,Big])),
+ SmallEqBig = lists:flatten(
+ io_lib:format("~f == ~p",[Small,Big])),
+ cmp(Big,Small,BigGtSmall,BigLtSmall,SmallGtBig,SmallLtBig,
+ SmallEqBig,BigEqSmall).
+
+cmp(Big,Small,BigGtSmall,BigLtSmall,SmallGtBig,SmallLtBig,
+ SmallEqBig,BigEqSmall) ->
+ {_,_,_,true} = {Big,Small,BigGtSmall,
+ Big > Small},
+ {_,_,_,false} = {Big,Small,BigLtSmall,
+ Big < Small},
+ {_,_,_,false} = {Big,Small,SmallGtBig,
+ Small > Big},
+ {_,_,_,true} = {Big,Small,SmallLtBig,
+ Small < Big},
+ {_,_,_,false} = {Big,Small,SmallEqBig,
+ Small == Big},
+ {_,_,_,false} = {Big,Small,BigEqSmall,
+ Big == Small}.
+
id(I) -> I.
start_node(Config) when is_list(Config) ->
diff --git a/erts/emulator/test/guard_SUITE.erl b/erts/emulator/test/guard_SUITE.erl
index f41324c2cc..a5df9b59a0 100644
--- a/erts/emulator/test/guard_SUITE.erl
+++ b/erts/emulator/test/guard_SUITE.erl
@@ -421,7 +421,7 @@ try_gbif(Id, X, Y) ->
try_fail_gbif(Id, X, Y) ->
case catch guard_bif(Id, X, Y) of
- {'EXIT', {function_clause,[{?MODULE,guard_bif,[Id,X,Y]}|_]}} ->
+ {'EXIT',{function_clause,[{?MODULE,guard_bif,[Id,X,Y],_}|_]}} ->
io:format("guard_bif(~p, ~p, ~p) -- ok", [Id,X,Y]);
Other ->
?line ok = io:format("guard_bif(~p, ~p, ~p) -- bad result: ~p\n",
@@ -493,9 +493,9 @@ type_tests(Test, [Type|T], Allowed) ->
end;
false ->
case catch type_test(Test, Value) of
- {'EXIT', {function_clause, {?MODULE,type_test,[Test,Value]}}} ->
- ok;
- {'EXIT', {function_clause,[{?MODULE,type_test,[Test,Value]}|_]}} ->
+ {'EXIT',{function_clause,
+ [{?MODULE,type_test,[Test,Value],Loc}|_]}}
+ when is_list(Loc) ->
ok;
{'EXIT',Other} ->
?line test_server:fail({unexpected_error_reason,Other});
diff --git a/erts/emulator/test/hibernate_SUITE.erl b/erts/emulator/test/hibernate_SUITE.erl
index 203fa6b48e..82a0aad189 100644
--- a/erts/emulator/test/hibernate_SUITE.erl
+++ b/erts/emulator/test/hibernate_SUITE.erl
@@ -25,16 +25,16 @@
init_per_group/2,end_per_group/2,
init_per_testcase/2,end_per_testcase/2,
basic/1,dynamic_call/1,min_heap_size/1,bad_args/1,
- messages_in_queue/1,undefined_mfa/1, no_heap/1]).
+ messages_in_queue/1,undefined_mfa/1,no_heap/1,wake_up_and_bif_trap/1]).
%% Used by test cases.
--export([basic_hibernator/1,dynamic_call_hibernator/2,messages_in_queue_restart/2, no_heap_loop/0]).
+-export([basic_hibernator/1,dynamic_call_hibernator/2,messages_in_queue_restart/2, no_heap_loop/0,characters_to_list_trap/1]).
suite() -> [{ct_hooks,[ts_install_cth]}].
all() ->
[basic, dynamic_call, min_heap_size, bad_args, messages_in_queue,
- undefined_mfa, no_heap].
+ undefined_mfa, no_heap, wake_up_and_bif_trap].
groups() ->
[].
@@ -384,6 +384,31 @@ clean_dict() ->
lists:foreach(fun ({Key, _}) -> erase(Key) end, Dict).
%%
+%% Wake up and then immediatly bif trap with a lengthy computation.
+%%
+
+wake_up_and_bif_trap(doc) -> [];
+wake_up_and_bif_trap(suite) -> [];
+wake_up_and_bif_trap(Config) when is_list(Config) ->
+ ?line Self = self(),
+ ?line Pid = spawn_link(fun() -> erlang:hibernate(?MODULE, characters_to_list_trap, [Self]) end),
+ ?line Pid ! wakeup,
+ ?line receive
+ {ok, Pid0} when Pid0 =:= Pid -> ok
+ after 5000 ->
+ ?line ?t:fail(process_blocked)
+ end,
+ ?line unlink(Pid),
+ ?line exit(Pid, bye).
+
+%% Lengthy computation that traps (in characters_to_list_trap_3).
+characters_to_list_trap(Parent) ->
+ Bin0 = <<"abcdefghijklmnopqrstuvwxz0123456789">>,
+ Bin = binary:copy(Bin0, 1500),
+ unicode:characters_to_list(Bin),
+ Parent ! {ok, self()}.
+
+%%
%% Misc
%%
diff --git a/erts/emulator/test/mtx_SUITE.erl b/erts/emulator/test/mtx_SUITE.erl
index e0a7878bd8..879d2f61dd 100644
--- a/erts/emulator/test/mtx_SUITE.erl
+++ b/erts/emulator/test/mtx_SUITE.erl
@@ -62,16 +62,29 @@ init_per_suite(Config) when is_list(Config) ->
Config.
end_per_suite(Config) when is_list(Config) ->
+ catch erts_debug:set_internal_state(available_internal_state, false),
Config.
init_per_testcase(_Case, Config) ->
Dog = ?t:timetrap(?t:minutes(15)),
+ %% Wait for deallocations to complete since we measure
+ %% runtime in test cases.
+ wait_deallocations(),
[{watchdog, Dog}|Config].
end_per_testcase(_Func, Config) ->
Dog = ?config(watchdog, Config),
?t:timetrap_cancel(Dog).
+wait_deallocations() ->
+ try
+ erts_debug:set_internal_state(wait, deallocations)
+ catch
+ error:undef ->
+ erts_debug:set_internal_state(available_internal_state, true),
+ wait_deallocations()
+ end.
+
suite() -> [{ct_hooks,[ts_install_cth]}].
all() ->
diff --git a/erts/emulator/test/mtx_SUITE_data/mtx_SUITE.c b/erts/emulator/test/mtx_SUITE_data/mtx_SUITE.c
index 818023211c..7c8137dc83 100644
--- a/erts/emulator/test/mtx_SUITE_data/mtx_SUITE.c
+++ b/erts/emulator/test/mtx_SUITE_data/mtx_SUITE.c
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2010. All Rights Reserved.
+ * Copyright Ericsson AB 2010-2011. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
@@ -552,13 +552,19 @@ create_rwlock(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[])
static ERL_NIF_TERM
rwlock_op(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[])
{
- rwlock_resource_t *rwlr;
+ /*
+ * Use a union for pointer type conversion to avoid compiler warnings
+ * about strict-aliasing violations with gcc-4.1. gcc >= 4.2 does not
+ * emit the warning.
+ * TODO: Reconsider use of union once gcc-4.1 is obsolete?
+ */
+ union { void* vp; rwlock_resource_t *p; } rwlr;
int blocking, write, wait_locked, wait_unlocked;
if (argc != 5)
goto badarg;
- if (!enif_get_resource(env, argv[0], enif_priv_data(env), (void **) &rwlr))
+ if (!enif_get_resource(env, argv[0], enif_priv_data(env), &rwlr.vp))
goto badarg;
blocking = get_bool(env, argv[1]);
@@ -581,22 +587,22 @@ rwlock_op(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[])
if (write) {
if (blocking)
- RWMUTEX_WLOCK(rwlr->rwlock);
+ RWMUTEX_WLOCK(rwlr.p->rwlock);
else
- while (EBUSY == RWMUTEX_TRYWLOCK(rwlr->rwlock));
- if (rwlr->lock_check) {
- ASSERT(!ATOMIC_READ(&rwlr->is_locked));
- ATOMIC_SET(&rwlr->is_locked, -1);
+ while (EBUSY == RWMUTEX_TRYWLOCK(rwlr.p->rwlock));
+ if (rwlr.p->lock_check) {
+ ASSERT(!ATOMIC_READ(&rwlr.p->is_locked));
+ ATOMIC_SET(&rwlr.p->is_locked, -1);
}
}
else {
if (blocking)
- RWMUTEX_RLOCK(rwlr->rwlock);
+ RWMUTEX_RLOCK(rwlr.p->rwlock);
else
- while (EBUSY == RWMUTEX_TRYRLOCK(rwlr->rwlock));
- if (rwlr->lock_check) {
- ASSERT(ATOMIC_READ(&rwlr->is_locked) >= 0);
- ATOMIC_INC(&rwlr->is_locked);
+ while (EBUSY == RWMUTEX_TRYRLOCK(rwlr.p->rwlock));
+ if (rwlr.p->lock_check) {
+ ASSERT(ATOMIC_READ(&rwlr.p->is_locked) >= 0);
+ ATOMIC_INC(&rwlr.p->is_locked);
}
}
@@ -604,18 +610,18 @@ rwlock_op(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[])
milli_sleep(wait_locked);
if (write) {
- if (rwlr->lock_check) {
- ASSERT(ATOMIC_READ(&rwlr->is_locked) == -1);
- ATOMIC_SET(&rwlr->is_locked, 0);
+ if (rwlr.p->lock_check) {
+ ASSERT(ATOMIC_READ(&rwlr.p->is_locked) == -1);
+ ATOMIC_SET(&rwlr.p->is_locked, 0);
}
- RWMUTEX_WUNLOCK(rwlr->rwlock);
+ RWMUTEX_WUNLOCK(rwlr.p->rwlock);
}
else {
- if (rwlr->lock_check) {
- ASSERT(ATOMIC_READ(&rwlr->is_locked) > 0);
- ATOMIC_DEC(&rwlr->is_locked);
+ if (rwlr.p->lock_check) {
+ ASSERT(ATOMIC_READ(&rwlr.p->is_locked) > 0);
+ ATOMIC_DEC(&rwlr.p->is_locked);
}
- RWMUTEX_RUNLOCK(rwlr->rwlock);
+ RWMUTEX_RUNLOCK(rwlr.p->rwlock);
}
if (wait_unlocked)
diff --git a/erts/emulator/test/nif_SUITE.erl b/erts/emulator/test/nif_SUITE.erl
index 9c31b7f78d..370363bf9e 100644
--- a/erts/emulator/test/nif_SUITE.erl
+++ b/erts/emulator/test/nif_SUITE.erl
@@ -35,7 +35,9 @@
resource_takeover/1,
threading/1, send/1, send2/1, send3/1, send_threaded/1, neg/1,
is_checks/1,
- get_length/1, make_atom/1, make_string/1, reverse_list_test/1]).
+ get_length/1, make_atom/1, make_string/1, reverse_list_test/1,
+ otp_9668/1
+ ]).
-export([many_args_100/100]).
@@ -60,7 +62,9 @@ all() ->
iolist_as_binary, resource, resource_binary,
resource_takeover, threading, send, send2, send3,
send_threaded, neg, is_checks, get_length, make_atom,
- make_string,reverse_list_test].
+ make_string,reverse_list_test,
+ otp_9668
+ ].
groups() ->
[].
@@ -281,6 +285,12 @@ types(Config) when is_list(Config) ->
end, int_list()),
?line verify_tmpmem(TmpMem),
+ ?line true = (compare(-1294536544000, -1178704800000) < 0),
+ ?line true = (compare(-1178704800000, -1294536544000) > 0),
+ ?line true = (compare(-295147905179352825856, -36893488147419103232) < 0),
+ ?line true = (compare(-36893488147419103232, -295147905179352825856) > 0),
+ ?line true = (compare(-29514790517935282585612345678, -36893488147419103232) < 0),
+ ?line true = (compare(-36893488147419103232, -29514790517935282585612345678) > 0),
ok.
int_list() ->
@@ -1159,7 +1169,28 @@ is_checks(Config) when is_list(Config) ->
?line ensure_lib_loaded(Config, 1),
?line ok = check_is(hejsan, <<19,98>>, make_ref(), ok, fun() -> ok end,
self(), hd(erlang:ports()), [], [1,9,9,8],
- {hejsan, "hejsan", [$h,"ejs",<<"an">>]}),
+ {hejsan, "hejsan", [$h,"ejs",<<"an">>]}, 12),
+ ?line ok = check_is(hejsan, <<19,98>>, make_ref(), ok, fun() -> ok end,
+ self(), hd(erlang:ports()), [], [1,9,9,8],
+ {hejsan, "hejsan", [$h,"ejs",<<"an">>]}, -12),
+ ?line ok = check_is(hejsan, <<19,98>>, make_ref(), ok, fun() -> ok end,
+ self(), hd(erlang:ports()), [], [1,9,9,8],
+ {hejsan, "hejsan", [$h,"ejs",<<"an">>]}, 18446744073709551617),
+ ?line ok = check_is(hejsan, <<19,98>>, make_ref(), ok, fun() -> ok end,
+ self(), hd(erlang:ports()), [], [1,9,9,8],
+ {hejsan, "hejsan", [$h,"ejs",<<"an">>]}, -18446744073709551617),
+ ?line ok = check_is(hejsan, <<19,98>>, make_ref(), ok, fun() -> ok end,
+ self(), hd(erlang:ports()), [], [1,9,9,8],
+ {hejsan, "hejsan", [$h,"ejs",<<"an">>]}, 99.146),
+ ?line ok = check_is(hejsan, <<19,98>>, make_ref(), ok, fun() -> ok end,
+ self(), hd(erlang:ports()), [], [1,9,9,8],
+ {hejsan, "hejsan", [$h,"ejs",<<"an">>]}, -99.146),
+ ?line ok = check_is(hejsan, <<19,98>>, make_ref(), ok, fun() -> ok end,
+ self(), hd(erlang:ports()), [], [1,9,9,8],
+ {hejsan, "hejsan", [$h,"ejs",<<"an">>]}, 18446744073709551616.2e2),
+ ?line ok = check_is(hejsan, <<19,98>>, make_ref(), ok, fun() -> ok end,
+ self(), hd(erlang:ports()), [], [1,9,9,8],
+ {hejsan, "hejsan", [$h,"ejs",<<"an">>]}, -18446744073709551616.2e2),
try
?line error = check_is_exception(),
?line throw(expected_badarg)
@@ -1209,6 +1240,20 @@ reverse_list_test(Config) ->
?line RevList = reverse_list(List),
?line badarg = reverse_list(foo).
+otp_9668(doc) -> ["Memory leak of tmp-buffer when inspecting iolist or unaligned binary in unbound environment"];
+otp_9668(Config) ->
+ ensure_lib_loaded(Config, 1),
+ TmpMem = tmpmem(),
+ IOList = ["This",' ',<<"is">>,' ',[<<"an iolist">>,'.']],
+ otp_9668_nif(IOList),
+
+ <<_:5/bitstring,UnalignedBin:10/binary,_/bitstring>> = <<"Abuse me as unaligned">>,
+ otp_9668_nif(UnalignedBin),
+
+ ?line verify_tmpmem(TmpMem),
+ ok.
+
+
tmpmem() ->
case erlang:system_info({allocator,temp_alloc}) of
false -> undefined;
@@ -1297,7 +1342,7 @@ get_resource(_,_) -> ?nif_stub.
release_resource(_) -> ?nif_stub.
last_resource_dtor_call() -> ?nif_stub.
make_new_resource(_,_) -> ?nif_stub.
-check_is(_,_,_,_,_,_,_,_,_,_) -> ?nif_stub.
+check_is(_,_,_,_,_,_,_,_,_,_,_) -> ?nif_stub.
check_is_exception() -> ?nif_stub.
length_test(_,_,_,_,_) -> ?nif_stub.
make_atoms() -> ?nif_stub.
@@ -1318,6 +1363,7 @@ send_term(_,_) -> ?nif_stub.
reverse_list(_) -> ?nif_stub.
echo_int(_) -> ?nif_stub.
type_sizes() -> ?nif_stub.
+otp_9668_nif(_) -> ?nif_stub.
nif_stub_error(Line) ->
exit({nif_not_loaded,module,?MODULE,line,Line}).
diff --git a/erts/emulator/test/nif_SUITE_data/nif_SUITE.c b/erts/emulator/test/nif_SUITE_data/nif_SUITE.c
index bdf1549862..7d7903af25 100644
--- a/erts/emulator/test/nif_SUITE_data/nif_SUITE.c
+++ b/erts/emulator/test/nif_SUITE_data/nif_SUITE.c
@@ -41,7 +41,18 @@ typedef struct
CallInfo* call_history;
NifModPrivData* nif_mod;
union { ErlNifResourceType* t; long l; } rt_arr[2];
-}PrivData;
+} PrivData;
+
+/*
+ * Use a union for pointer type conversion to avoid compiler warnings
+ * about strict-aliasing violations with gcc-4.1. gcc >= 4.2 does not
+ * emit the warning.
+ * TODO: Reconsider use of union once gcc-4.1 is obsolete?
+ */
+typedef union {
+ void* vp;
+ struct make_term_info* p;
+} mti_t;
void add_call(ErlNifEnv* env, PrivData* data, const char* func_name)
{
@@ -707,7 +718,7 @@ static ERL_NIF_TERM get_resource_type(ErlNifEnv* env, int argc, const ERL_NIF_TE
static ERL_NIF_TERM alloc_resource(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
{
ErlNifBinary data_bin;
- union { ErlNifResourceType* t; long l;} type;
+ union { ErlNifResourceType* t; long l; } type;
union { void* p; long l;} data;
if (!enif_get_long(env, argv[0], &type.l)
|| !enif_inspect_binary(env, argv[1], &data_bin)
@@ -731,7 +742,7 @@ static ERL_NIF_TERM make_resource(ErlNifEnv* env, int argc, const ERL_NIF_TERM a
static ERL_NIF_TERM make_new_resource(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
{
ErlNifBinary data_bin;
- union { ErlNifResourceType* t; long l;} type;
+ union { ErlNifResourceType* t; long l; } type;
void* data;
ERL_NIF_TERM ret;
if (!enif_get_long(env, argv[0], &type.l)
@@ -749,7 +760,7 @@ static ERL_NIF_TERM make_new_resource(ErlNifEnv* env, int argc, const ERL_NIF_TE
static ERL_NIF_TERM make_new_resource_binary(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
{
ErlNifBinary data_bin;
- union { struct binary_resource* p; void* vp; long l;} br;
+ union { struct binary_resource* p; void* vp; long l; } br;
void* buf;
ERL_NIF_TERM ret;
if (!enif_inspect_binary(env, argv[0], &data_bin)
@@ -821,6 +832,7 @@ static ERL_NIF_TERM release_resource(ErlNifEnv* env, int argc, const ERL_NIF_TER
* argv[7] an empty list
* argv[8] a non-empty list
* argv[9] a tuple
+ * argv[10] a number (small, big integer or float)
*/
static ERL_NIF_TERM check_is(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
{
@@ -837,6 +849,7 @@ static ERL_NIF_TERM check_is(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]
if (!enif_is_list(env, argv[7])) return enif_make_badarg(env);
if (!enif_is_list(env, argv[8])) return enif_make_badarg(env);
if (!enif_is_tuple(env, argv[9])) return enif_make_badarg(env);
+ if (!enif_is_number(env, argv[10])) return enif_make_badarg(env);
return ok_atom;
}
@@ -1269,10 +1282,7 @@ static void msgenv_dtor(ErlNifEnv* env, void* obj)
static ERL_NIF_TERM clear_msgenv(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
{
- union {
- void* vp;
- struct make_term_info* p;
- }mti;
+ mti_t mti;
if (!enif_get_resource(env, argv[0], msgenv_resource_type, &mti.vp)) {
return enif_make_badarg(env);
}
@@ -1285,7 +1295,7 @@ static ERL_NIF_TERM clear_msgenv(ErlNifEnv* env, int argc, const ERL_NIF_TERM ar
static ERL_NIF_TERM grow_blob(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
{
- union { void* vp; struct make_term_info* p; }mti;
+ mti_t mti;
ERL_NIF_TERM term;
if (!enif_get_resource(env, argv[0], msgenv_resource_type, &mti.vp)
|| (argc>2 && !enif_get_uint(env,argv[2], &mti.p->n))) {
@@ -1301,7 +1311,7 @@ static ERL_NIF_TERM grow_blob(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[
static ERL_NIF_TERM send_blob(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
{
- union { void* vp; struct make_term_info* p; }mti;
+ mti_t mti;
ErlNifPid to;
ERL_NIF_TERM copy;
int res;
@@ -1316,7 +1326,7 @@ static ERL_NIF_TERM send_blob(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[
static ERL_NIF_TERM send3_blob(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
{
- union { void* vp; struct make_term_info* p; }mti;
+ mti_t mti;
ErlNifPid to;
ERL_NIF_TERM copy;
int res;
@@ -1334,7 +1344,7 @@ static ERL_NIF_TERM send3_blob(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv
void* threaded_sender(void *arg)
{
- union { void* vp; struct make_term_info* p; }mti;
+ mti_t mti;
mti.vp = arg;
enif_mutex_lock(mti.p->mtx);
@@ -1349,7 +1359,7 @@ void* threaded_sender(void *arg)
static ERL_NIF_TERM send_blob_thread(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
{
- union { void* vp; struct make_term_info* p; }mti;
+ mti_t mti;
ERL_NIF_TERM copy;
if (!enif_get_resource(env, argv[0], msgenv_resource_type, &mti.vp)
|| !enif_get_local_pid(env,argv[1], &mti.p->to_pid)) {
@@ -1375,7 +1385,7 @@ static ERL_NIF_TERM send_blob_thread(ErlNifEnv* env, int argc, const ERL_NIF_TER
static ERL_NIF_TERM join_send_thread(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
{
- union { void* vp; struct make_term_info* p; }mti;
+ mti_t mti;
int err;
if (!enif_get_resource(env, argv[0], msgenv_resource_type, &mti.vp)) {
return enif_make_badarg(env);
@@ -1392,7 +1402,7 @@ static ERL_NIF_TERM join_send_thread(ErlNifEnv* env, int argc, const ERL_NIF_TER
static ERL_NIF_TERM copy_blob(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
{
- union { void* vp; struct make_term_info* p; }mti;
+ mti_t mti;
if (!enif_get_resource(env, argv[0], msgenv_resource_type, &mti.vp)) {
return enif_make_badarg(env);
}
@@ -1421,6 +1431,26 @@ static ERL_NIF_TERM reverse_list(ErlNifEnv* env, int argc, const ERL_NIF_TERM ar
return rev_list;
}
+static ERL_NIF_TERM otp_9668_nif(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
+{
+ /* Inspect in process independent env */
+ ErlNifEnv* myenv = enif_alloc_env();
+ ERL_NIF_TERM mycopy = enif_make_copy(myenv, argv[0]);
+ ErlNifBinary obin, cbin;
+
+ if ((enif_inspect_binary(env, argv[0], &obin)
+ && enif_inspect_binary(myenv, mycopy, &cbin))
+ ||
+ (enif_inspect_iolist_as_binary(env, argv[0], &obin)
+ && enif_inspect_iolist_as_binary(myenv, mycopy, &cbin)))
+ {
+ assert(obin.size == cbin.size);
+ assert(memcmp(obin.data, cbin.data, obin.size) == 0);
+ }
+ enif_free_env(myenv);
+ return atom_ok;
+}
+
static ErlNifFunc nif_funcs[] =
{
{"lib_version", 0, lib_version},
@@ -1447,7 +1477,7 @@ static ErlNifFunc nif_funcs[] =
{"release_resource", 1, release_resource},
{"last_resource_dtor_call", 0, last_resource_dtor_call},
{"make_new_resource", 2, make_new_resource},
- {"check_is", 10, check_is},
+ {"check_is", 11, check_is},
{"check_is_exception", 0, check_is_exception},
{"length_test", 5, length_test},
{"make_atoms", 0, make_atoms},
@@ -1468,7 +1498,8 @@ static ErlNifFunc nif_funcs[] =
{"send_term", 2, send_term},
{"reverse_list",1, reverse_list},
{"echo_int", 1, echo_int},
- {"type_sizes", 0, type_sizes}
+ {"type_sizes", 0, type_sizes},
+ {"otp_9668_nif", 1, otp_9668_nif}
};
ERL_NIF_INIT(nif_SUITE,nif_funcs,load,reload,upgrade,unload)
diff --git a/erts/emulator/test/port_SUITE.erl b/erts/emulator/test/port_SUITE.erl
index eac56a867d..0a1ef5a78f 100644
--- a/erts/emulator/test/port_SUITE.erl
+++ b/erts/emulator/test/port_SUITE.erl
@@ -724,6 +724,8 @@ open_ports(Name, Settings) ->
[];
system_limit ->
[];
+ enomem ->
+ [];
Other ->
?line test_server:fail({open_ports, Other})
end;
diff --git a/erts/emulator/test/process_SUITE.erl b/erts/emulator/test/process_SUITE.erl
index f68e712268..fdc55a4cc5 100644
--- a/erts/emulator/test/process_SUITE.erl
+++ b/erts/emulator/test/process_SUITE.erl
@@ -35,7 +35,7 @@
self_exit/1, normal_suicide_exit/1, abnormal_suicide_exit/1,
t_exit_2_catch/1, trap_exit_badarg/1, trap_exit_badarg_in_bif/1,
exit_and_timeout/1, exit_twice/1,
- t_process_info/1, process_info_other_msg/1,
+ t_process_info/1, process_info_other/1, process_info_other_msg/1,
process_info_other_dist_msg/1,
process_info_2_list/1, process_info_lock_reschedule/1,
process_info_lock_reschedule2/1,
@@ -64,7 +64,7 @@ suite() -> [{ct_hooks,[ts_install_cth]}].
all() ->
[spawn_with_binaries, t_exit_1, {group, t_exit_2},
trap_exit_badarg, trap_exit_badarg_in_bif,
- t_process_info, process_info_other_msg,
+ t_process_info, process_info_other, process_info_other_msg,
process_info_other_dist_msg, process_info_2_list,
process_info_lock_reschedule,
process_info_lock_reschedule2,
@@ -258,7 +258,9 @@ trap_exit_badarg() ->
?line Pid = fun_spawn(fun() -> bad_guy(kb_128()) end),
?line Garbage = kb_128(),
?line receive
- {'EXIT', Pid, {badarg,[{erlang,abs,[Garbage]},{?MODULE,bad_guy,1}|_]}} ->
+ {'EXIT',Pid,{badarg,[{erlang,abs,[Garbage],Loc1},
+ {?MODULE,bad_guy,1,Loc2}|_]}}
+ when is_list(Loc1), is_list(Loc2) ->
ok;
Other ->
?line ok = io:format("Bad EXIT message: ~P", [Other, 30]),
@@ -410,7 +412,7 @@ etwice_high(Low) ->
exit(Low, first),
exit(Low, second).
-%% Tests the process_info/1 BIF.
+%% Tests the process_info/2 BIF.
t_process_info(Config) when is_list(Config) ->
?line [] = process_info(self(), registered_name),
?line register(my_name, self()),
@@ -418,13 +420,100 @@ t_process_info(Config) when is_list(Config) ->
?line {status, running} = process_info(self(), status),
?line {min_heap_size, 233} = process_info(self(), min_heap_size),
?line {min_bin_vheap_size, 46368} = process_info(self(), min_bin_vheap_size),
- ?line {current_function, {?MODULE, t_process_info, 1}} =
+ ?line {current_function,{?MODULE,t_process_info,1}} =
process_info(self(), current_function),
+ ?line {current_function,{?MODULE,t_process_info,1}} =
+ apply(erlang, process_info, [self(),current_function]),
+
+ %% current_location and current_stacktrace
+ {Line1,Res1} = {?LINE,process_info(self(), current_location)},
+ verify_loc(Line1, Res1),
+ {Line2,Res2} = {?LINE,apply(erlang, process_info,
+ [self(),current_location])},
+ verify_loc(Line2, Res2),
+ pi_stacktrace([{?MODULE,t_process_info,1,?LINE}]),
+
?line Gleader = group_leader(),
?line {group_leader, Gleader} = process_info(self(), group_leader),
?line {'EXIT',{badarg,_Info}} = (catch process_info('not_a_pid')),
ok.
+pi_stacktrace(Expected0) ->
+ {Line,Res} = {?LINE,erlang:process_info(self(), current_stacktrace)},
+ {current_stacktrace,Stack} = Res,
+ Expected = [{?MODULE,pi_stacktrace,1,Line}|Expected0],
+ pi_stacktrace_1(Stack, Expected).
+
+pi_stacktrace_1([{M,F,A,Loc}|Stk], [{M,F,A,Line}|Exp]) ->
+ case Loc of
+ [] ->
+ %% No location info for some reason (+L, native code).
+ io:format("Missing location information for ~w:~w/~w",
+ [M,F,A]),
+ ok;
+ [_|_] ->
+ Line = proplists:get_value(line, Loc),
+ File = proplists:get_value(file, Loc),
+ File = ?MODULE_STRING ++ ".erl"
+ end,
+ pi_stacktrace_1(Stk, Exp);
+pi_stacktrace_1([_|_], []) -> ok.
+
+verify_loc(Line, {current_location,{?MODULE,t_process_info=F,1=A,Loc}}) ->
+ case Loc of
+ [] ->
+ %% No location info for some reason (+L, native code).
+ io:format("Missing location information for ~w:~w/~w",
+ [?MODULE,F,A]),
+ ok;
+ [_|_] ->
+ Line = proplists:get_value(line, Loc),
+ File = proplists:get_value(file, Loc),
+ File = ?MODULE_STRING ++ ".erl"
+ end.
+
+process_info_other(Config) when is_list(Config) ->
+ Self = self(),
+ Pid = spawn_link(fun() -> process_info_looper(Self) end),
+ receive after 1 -> ok end,
+ pio_current_location(10000, Pid, 0, 0),
+ pio_current_stacktrace().
+
+pio_current_location(0, _, Pi, Looper) ->
+ io:format("~w call(s) to erlang:process_info/2", [Pi]),
+ io:format("~w call(s) to ~w:process_info_looper/1", [Looper,?MODULE]);
+pio_current_location(N, Pid, Pi, Looper) ->
+ erlang:yield(),
+ {current_location,Where} = process_info(Pid, current_location),
+ case Where of
+ {erlang,process_info,2,[]} ->
+ pio_current_location(N-1, Pid, Pi+1, Looper);
+ {?MODULE,process_info_looper,1,Loc} when is_list(Loc) ->
+ pio_current_location(N-1, Pid, Pi, Looper+1)
+ end.
+
+pio_current_stacktrace() ->
+ L = [begin
+ {current_stacktrace,Stk} = process_info(P, current_stacktrace),
+ {P,Stk}
+ end || P <- processes()],
+ [erlang:garbage_collect(P) || {P,_} <- L],
+ erlang:garbage_collect(),
+ [verify_stacktrace(Stk) || {_,Stk} <- L],
+ ok.
+
+verify_stacktrace([{M,F,A,Loc}|T])
+ when is_atom(M),
+ is_atom(F),
+ is_integer(A),
+ is_list(Loc) ->
+ verify_stacktrace(T);
+verify_stacktrace([]) -> ok.
+
+process_info_looper(Parent) ->
+ process_info(Parent, current_location),
+ process_info_looper(Parent).
+
%% Tests the process_info/1 BIF on another process with messages.
process_info_other_msg(Config) when is_list(Config) ->
Self = self(),
diff --git a/erts/emulator/test/scheduler_SUITE.erl b/erts/emulator/test/scheduler_SUITE.erl
index f16d0ea429..debb54579b 100644
--- a/erts/emulator/test/scheduler_SUITE.erl
+++ b/erts/emulator/test/scheduler_SUITE.erl
@@ -87,8 +87,17 @@ init_per_group(_GroupName, Config) ->
end_per_group(_GroupName, Config) ->
Config.
-
+init_per_testcase(update_cpu_info, Config) ->
+ case os:find_executable("taskset") of
+ false ->
+ {skip,"Could not find 'taskset' in path"};
+ _ ->
+ init_per_tc(update_cpu_info, Config)
+ end;
init_per_testcase(Case, Config) when is_list(Config) ->
+ init_per_tc(Case, Config).
+
+init_per_tc(Case, Config) ->
Dog = ?t:timetrap(?DEFAULT_TIMEOUT),
process_flag(priority, max),
erlang:display({'------------', ?MODULE, Case, '------------'}),
@@ -1030,7 +1039,7 @@ sbt_test(Config, CpuTCmd, ClBt, Bt, LP) ->
?line ok.
scheduler_suspend(Config) when is_list(Config) ->
- ?line Dog = ?t:timetrap(?t:minutes(2)),
+ ?line Dog = ?t:timetrap(?t:minutes(5)),
?line lists:foreach(fun (S) -> scheduler_suspend_test(Config, S) end,
[64, 32, 16, default]),
?line ?t:timetrap_cancel(Dog),
diff --git a/erts/emulator/test/system_info_SUITE.erl b/erts/emulator/test/system_info_SUITE.erl
index 9b782b35a2..0350eb671d 100644
--- a/erts/emulator/test/system_info_SUITE.erl
+++ b/erts/emulator/test/system_info_SUITE.erl
@@ -37,7 +37,7 @@
init_per_group/2,end_per_group/2,
init_per_testcase/2, end_per_testcase/2]).
--export([process_count/1, system_version/1, misc_smoke_tests/1, heap_size/1, wordsize/1]).
+-export([process_count/1, system_version/1, misc_smoke_tests/1, heap_size/1, wordsize/1, memory/1]).
-define(DEFAULT_TIMEOUT, ?t:minutes(2)).
@@ -45,7 +45,7 @@ suite() -> [{ct_hooks,[ts_install_cth]}].
all() ->
[process_count, system_version, misc_smoke_tests,
- heap_size, wordsize].
+ heap_size, wordsize, memory].
groups() ->
[].
@@ -187,3 +187,312 @@ wordsize(Config) when is_list(Config) ->
Other ->
exit({unexpected_wordsizes,Other})
end.
+
+memory(doc) -> ["Verify that erlang:memory/0 and memory results in crashdump produce are similar"];
+memory(Config) when is_list(Config) ->
+ %%
+ %% Verify that erlang:memory/0 and memory results in
+ %% crashdump produce are similar.
+ %%
+ %% erlang:memory/0 requests information from each scheduler
+ %% thread and puts the information together in erlang code
+ %% (erlang.erl).
+ %%
+ %% When a crash dump is written we cannot use the
+ %% erlang:memory/0 implementation. The crashdump implementation
+ %% is a pure C implementation inspecting all allocator instances
+ %% after the system has been blocked (erts_memory() in erl_alloc.c).
+ %%
+ %% Since we got two implementations, modifications can easily
+ %% cause them to produce different results.
+ %%
+ %% erts_debug:get_internal_state(memory) blocks the system and
+ %% execute the same code as the crash dump writing uses.
+ %%
+
+ erts_debug:set_internal_state(available_internal_state, true),
+ %% Use a large heap size on the controling process in
+ %% order to avoid changes in its heap size during
+ %% comparisons.
+ MinHeapSize = process_flag(min_heap_size, 1024*1024),
+ Prio = process_flag(priority, max),
+ try
+ erlang:memory(), %% first call will init stat atoms
+ garbage_collect(), %% blow up heap
+ memory_test(Config)
+ catch
+ error:notsup -> {skipped, "erlang:memory() not supported"}
+ after
+ process_flag(min_heap_size, MinHeapSize),
+ process_flag(priority, Prio),
+ catch erts_debug:set_internal_state(available_internal_state, false)
+ end.
+
+memory_test(_Config) ->
+
+ MWs = spawn_mem_workers(),
+
+ DPs = mem_workers_call(MWs,
+ fun () ->
+ mapn(fun (_) ->
+ spawn(fun () ->
+ receive
+ after infinity ->
+ ok
+ end
+ end)
+ end,
+ 1000 div erlang:system_info(schedulers_online))
+ end,
+ []),
+ cmp_memory(MWs, "spawn procs"),
+
+ Ps = lists:flatten(DPs),
+
+ mem_workers_call(MWs,
+ fun () ->
+ lists:foreach(fun (P) -> link(P) end, Ps)
+ end,
+ []),
+ cmp_memory(MWs, "link procs"),
+ mem_workers_call(MWs,
+ fun () ->
+ lists:foreach(fun (P) -> unlink(P) end, Ps)
+ end,
+ []),
+ cmp_memory(MWs, "unlink procs"),
+
+ DMs = mem_workers_call(MWs,
+ fun () ->
+ lists:map(fun (P) ->
+ monitor(process, P)
+ end, Ps)
+ end,
+ []),
+ cmp_memory(MWs, "monitor procs"),
+ Ms = lists:flatten(DMs),
+ mem_workers_call(MWs,
+ fun () ->
+ lists:foreach(fun (M) ->
+ demonitor(M)
+ end, Ms)
+ end,
+ []),
+ cmp_memory(MWs, "demonitor procs"),
+
+ mem_workers_call(MWs,
+ fun () ->
+ lists:foreach(fun (P) ->
+ P ! {a, "message", make_ref()}
+ end, Ps)
+ end,
+ []),
+ cmp_memory(MWs, "message procs"),
+
+ mem_workers_call(MWs,
+ fun () ->
+ Mons = lists:map(fun (P) ->
+ exit(P, kill),
+ monitor(process, P)
+ end,
+ Ps),
+ lists:foreach(fun (Mon) ->
+ receive
+ {'DOWN', Mon, _, _, _} -> ok
+ end
+ end,
+ Mons)
+ end, []),
+ cmp_memory(MWs, "kill procs"),
+
+ mem_workers_call(MWs,
+ fun () ->
+ put(binary_data,
+ mapn(fun (_) -> list_to_binary(lists:duplicate(256,$?)) end, 100))
+ end,
+ []),
+
+ cmp_memory(MWs, "store binary data"),
+
+ mem_workers_call(MWs,
+ fun () ->
+ put(binary_data, false),
+ garbage_collect()
+ end,
+ []),
+ cmp_memory(MWs, "release binary data"),
+
+ mem_workers_call(MWs,
+ fun () ->
+ list_to_atom("an ugly atom "++integer_to_list(erlang:system_info(scheduler_id))),
+ list_to_atom("another ugly atom "++integer_to_list(erlang:system_info(scheduler_id))),
+ list_to_atom("yet another ugly atom "++integer_to_list(erlang:system_info(scheduler_id)))
+ end,
+ []),
+ cmp_memory(MWs, "new atoms"),
+
+
+ mem_workers_call(MWs,
+ fun () ->
+ T = ets:new(?MODULE, []),
+ ets:insert(T, {gurka, lists:seq(1,10000)}),
+ ets:insert(T, {banan, lists:seq(1,1024)}),
+ ets:insert(T, {appelsin, make_ref()}),
+ put(ets_id, T)
+ end,
+ []),
+ cmp_memory(MWs, "store ets data"),
+
+ mem_workers_call(MWs,
+ fun () ->
+ ets:delete(get(ets_id)),
+ put(ets_id, false)
+ end,
+ []),
+ cmp_memory(MWs, "remove ets data"),
+
+ lists:foreach(fun (MW) ->
+ unlink(MW),
+ Mon = monitor(process, MW),
+ exit(MW, kill),
+ receive
+ {'DOWN', Mon, _, _, _} -> ok
+ end
+ end,
+ MWs),
+ ok.
+
+mem_worker() ->
+ receive
+ {call, From, Fun, Args} ->
+ From ! {reply, self(), apply(Fun, Args)},
+ mem_worker();
+ {cast, _From, Fun, Args} ->
+ apply(Fun, Args),
+ mem_worker()
+ end.
+
+mem_workers_call(MWs, Fun, Args) ->
+ lists:foreach(fun (MW) ->
+ MW ! {call, self(), Fun, Args}
+ end,
+ MWs),
+ lists:map(fun (MW) ->
+ receive
+ {reply, MW, Res} ->
+ Res
+ end
+ end,
+ MWs).
+
+mem_workers_cast(MWs, Fun, Args) ->
+ lists:foreach(fun (MW) ->
+ MW ! {cast, self(), Fun, Args}
+ end,
+ MWs).
+
+spawn_mem_workers() ->
+ spawn_mem_workers(erlang:system_info(schedulers_online)).
+
+spawn_mem_workers(0) ->
+ [];
+spawn_mem_workers(N) ->
+ [spawn_opt(fun () -> mem_worker() end,
+ [{scheduler, N rem erlang:system_info(schedulers_online) + 1},
+ link]) | spawn_mem_workers(N-1)].
+
+
+
+mem_get(X, Mem) ->
+ case lists:keyfind(X, 1, Mem) of
+ {X, Val} -> Val;
+ false -> false
+ end.
+
+cmp_memory(What, Mem1, Mem2, 1) ->
+ R1 = mem_get(What, Mem1),
+ R2 = mem_get(What, Mem2),
+ true = R1 == R2;
+cmp_memory(What, Mem1, Mem2, RelDiff) ->
+ %% We allow RealDiff diff
+ R1 = mem_get(What, Mem1),
+ R2 = mem_get(What, Mem2),
+ case R1 == R2 of
+ true ->
+ ok;
+ false ->
+ case R1 > R2 of
+ true ->
+ true = R2*RelDiff > R1;
+ false ->
+ true = R1*RelDiff > R2
+ end
+ end.
+
+pos_int(Val) when Val >= 0 ->
+ Val;
+pos_int(Val) ->
+ exit({not_pos_int, Val}).
+
+check_sane_memory(Mem) ->
+ Tot = pos_int(mem_get(total, Mem)),
+ Proc = pos_int(mem_get(processes, Mem)),
+ ProcUsed = pos_int(mem_get(processes_used, Mem)),
+ Sys = pos_int(mem_get(system, Mem)),
+ Atom = pos_int(mem_get(atom, Mem)),
+ AtomUsed = pos_int(mem_get(atom_used, Mem)),
+ Bin = pos_int(mem_get(binary, Mem)),
+ Code = pos_int(mem_get(code, Mem)),
+ Ets = pos_int(mem_get(ets, Mem)),
+
+ Tot = Proc + Sys,
+ true = Sys > Atom + Bin + Code + Ets,
+ true = Proc >= ProcUsed,
+ true = Atom >= AtomUsed,
+
+ case mem_get(maximum, Mem) of
+ false -> ok;
+ Max -> true = pos_int(Max) >= Tot
+ end,
+ ok.
+
+cmp_memory(MWs, Str) ->
+ erlang:display(Str),
+ lists:foreach(fun (MW) -> garbage_collect(MW) end, MWs),
+ garbage_collect(),
+ erts_debug:set_internal_state(wait, deallocations),
+
+ EDM = erts_debug:get_internal_state(memory),
+ EM = erlang:memory(),
+
+ io:format("~s:~n"
+ "erlang:memory() = ~p~n"
+ "crash dump memory = ~p~n",
+ [Str, EM, EDM]),
+
+ ?line check_sane_memory(EM),
+ ?line check_sane_memory(EDM),
+
+ %% We expect these to always give us exactly the same result
+
+ ?line cmp_memory(atom, EM, EDM, 1),
+ ?line cmp_memory(atom_used, EM, EDM, 1),
+ ?line cmp_memory(binary, EM, EDM, 1),
+ ?line cmp_memory(code, EM, EDM, 1),
+ ?line cmp_memory(ets, EM, EDM, 1),
+
+ %% Total, processes, processes_used, and system will seldom
+ %% give us exactly the same result since the two readings
+ %% aren't taken atomically.
+
+ ?line cmp_memory(total, EM, EDM, 1.05),
+ ?line cmp_memory(processes, EM, EDM, 1.05),
+ ?line cmp_memory(processes_used, EM, EDM, 1.05),
+ ?line cmp_memory(system, EM, EDM, 1.05),
+
+ ok.
+
+mapn(_Fun, 0) ->
+ [];
+mapn(Fun, N) ->
+ [Fun(N) | mapn(Fun, N-1)].
diff --git a/erts/emulator/test/trace_local_SUITE.erl b/erts/emulator/test/trace_local_SUITE.erl
index 091e960610..32e2a98e3c 100644
--- a/erts/emulator/test/trace_local_SUITE.erl
+++ b/erts/emulator/test/trace_local_SUITE.erl
@@ -767,8 +767,8 @@ exception_test(Opts, Func0, Args0) ->
end,
?line R1 = exc_slave(ExcOpts, Func, Args),
- ?line Stack2 = [{?MODULE,exc_top,3},{?MODULE,slave,2}],
- ?line Stack3 = [{?MODULE,exc,2}|Stack2],
+ ?line Stack2 = [{?MODULE,exc_top,3,[]},{?MODULE,slave,2,[]}],
+ ?line Stack3 = [{?MODULE,exc,2,[]}|Stack2],
?line Rs =
case x_exc_top(ExcOpts, Func, Args) of % Emulation
{crash,{Reason,Stack}}=R when is_list(Stack) ->
@@ -789,21 +789,29 @@ exception_test(Opts, Func0, Args0) ->
end,
?line expect({nm}).
-exception_validate(R1, [R2|Rs]) ->
+exception_validate(R0, Rs0) ->
+ R = clean_location(R0),
+ Rs = [clean_location(E) || E <- Rs0],
+ exception_validate_1(R, Rs).
+
+exception_validate_1(R1, [R2|Rs]) ->
case [R1|R2] of
[R|R] ->
ok;
- [{crash,{badarg,[{lists,reverse,[L1a,L1b]}|T]}}|
- {crash,{badarg,[{lists,reverse,[L2a,L2b]}|T]}}] ->
+ [{crash,{badarg,[{lists,reverse,[L1a,L1b],_}|T]}}|
+ {crash,{badarg,[{lists,reverse,[L2a,L2b],_}|T]}}] ->
same({crash,{badarg,[{lists,reverse,
- [lists:reverse(L1b, L1a),[]]}|T]}},
+ [lists:reverse(L1b, L1a),[]],[]}|T]}},
{crash,{badarg,[{lists,reverse,
- [lists:reverse(L2b, L2a),[]]}|T]}});
+ [lists:reverse(L2b, L2a),[]],[]}|T]}});
_ when is_list(Rs), Rs =/= [] ->
exception_validate(R1, Rs)
end.
-
+clean_location({crash,{Reason,Stk0}}) ->
+ Stk = [{M,F,A,[]} || {M,F,A,_} <- Stk0],
+ {crash,{Reason,Stk}};
+clean_location(Term) -> Term.
%%% Tracee target functions %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%
@@ -1057,10 +1065,10 @@ x_exc_exception(_Rtt, M, F, _, Arity, CR) ->
x_exc_stacktrace() ->
x_exc_stacktrace(erlang:get_stacktrace()).
%% Truncate stacktrace to below exc/2
-x_exc_stacktrace([{?MODULE,x_exc,4}|_]) -> [];
-x_exc_stacktrace([{?MODULE,x_exc_func,4}|_]) -> [];
-x_exc_stacktrace([{?MODULE,x_exc_body,4}|_]) -> [];
-x_exc_stacktrace([{?MODULE,exc,2}|_]) -> [];
+x_exc_stacktrace([{?MODULE,x_exc,4,_}|_]) -> [];
+x_exc_stacktrace([{?MODULE,x_exc_func,4,_}|_]) -> [];
+x_exc_stacktrace([{?MODULE,x_exc_body,4,_}|_]) -> [];
+x_exc_stacktrace([{?MODULE,exc,2,_}|_]) -> [];
x_exc_stacktrace([H|T]) ->
[H|x_exc_stacktrace(T)].
diff --git a/erts/emulator/utils/beam_makeops b/erts/emulator/utils/beam_makeops
index e7c57142c0..58c36c3bdc 100755
--- a/erts/emulator/utils/beam_makeops
+++ b/erts/emulator/utils/beam_makeops
@@ -2,7 +2,7 @@
#
# %CopyrightBegin%
#
-# Copyright Ericsson AB 1998-2010. All Rights Reserved.
+# Copyright Ericsson AB 1998-2011. All Rights Reserved.
#
# The contents of this file are subject to the Erlang Public License,
# Version 1.1, (the "License"); you may not use this file except in
@@ -67,6 +67,10 @@ my $max_gen_operands = 8;
# Must be even. The beam_load.c file must be updated, too.
my $max_spec_operands = 6;
+# The maximum number of primitive genop_types.
+
+my $max_genop_types = 16;
+
my %gen_opnum;
my %num_specific;
my %gen_to_spec;
@@ -101,12 +105,14 @@ my %match_engine_ops; # All opcodes for the match engine.
my %gen_transform_offset;
my @transformations;
my @call_table;
+my %call_table;
my @pred_table;
+my %pred_table;
# Operand types for generic instructions.
my $compiler_types = "uiaxyfhz";
-my $loader_types = "nprvlq";
+my $loader_types = "nprvlqo";
my $genop_types = $compiler_types . $loader_types;
#
@@ -142,34 +148,67 @@ my %arg_size = ('r' => 0, # x(0) - x register zero
my %type_bit;
my @tag_type;
+sub define_type_bit {
+ my($tag,$val) = @_;
+ defined $type_bit{$tag} and
+ sanity("the tag '$tag' has already been defined with the value ",
+ $type_bit{$tag});
+ $type_bit{$tag} = $val;
+}
+
{
my($bit) = 1;
my(%bit);
foreach (split('', $genop_types)) {
push(@tag_type, $_);
- $type_bit{$_} = $bit;
+ define_type_bit($_, $bit);
$bit{$_} = $bit;
$bit *= 2;
}
# Composed types.
- $type_bit{'d'} = $type_bit{'x'} | $type_bit{'y'} | $type_bit{'r'};
- $type_bit{'c'} = $type_bit{'i'} | $type_bit{'a'} | $type_bit{'n'} | $type_bit{'q'};
- $type_bit{'s'} = $type_bit{'d'} | $type_bit{'i'} | $type_bit{'a'} | $type_bit{'n'};
- $type_bit{'j'} = $type_bit{'f'} | $type_bit{'p'};
+ define_type_bit('d', $type_bit{'x'} | $type_bit{'y'} | $type_bit{'r'});
+ define_type_bit('c', $type_bit{'i'} | $type_bit{'a'} |
+ $type_bit{'n'} | $type_bit{'q'});
+ define_type_bit('s', $type_bit{'d'} | $type_bit{'i'} |
+ $type_bit{'a'} | $type_bit{'n'});
+ define_type_bit('j', $type_bit{'f'} | $type_bit{'p'});
# Aliases (for matching purposes).
- $type_bit{'I'} = $type_bit{'u'};
- $type_bit{'t'} = $type_bit{'u'};
- $type_bit{'A'} = $type_bit{'u'};
- $type_bit{'L'} = $type_bit{'u'};
- $type_bit{'b'} = $type_bit{'u'};
- $type_bit{'N'} = $type_bit{'u'};
- $type_bit{'U'} = $type_bit{'u'};
- $type_bit{'e'} = $type_bit{'u'};
- $type_bit{'P'} = $type_bit{'u'};
- $type_bit{'Q'} = $type_bit{'u'};
+ define_type_bit('I', $type_bit{'u'});
+ define_type_bit('t', $type_bit{'u'});
+ define_type_bit('A', $type_bit{'u'});
+ define_type_bit('L', $type_bit{'u'});
+ define_type_bit('b', $type_bit{'u'});
+ define_type_bit('N', $type_bit{'u'});
+ define_type_bit('U', $type_bit{'u'});
+ define_type_bit('e', $type_bit{'u'});
+ define_type_bit('P', $type_bit{'u'});
+ define_type_bit('Q', $type_bit{'u'});
+}
+
+#
+# Pre-define the 'fail' instruction. It is used internally
+# by the 'try_me_else_fail' instruction.
+#
+$match_engine_ops{'TOP_fail'} = 1;
+
+#
+# Sanity checks.
+#
+
+{
+ if (@tag_type > $max_genop_types) {
+ sanity("\$max_genop_types is $max_genop_types, ",
+ "but there are ", scalar(@tag_type),
+ " primitive tags defined\n");
+ }
+
+ foreach my $tag (@tag_type) {
+ sanity("tag '$tag': primitive tags must be named with lowercase letters")
+ unless $tag =~ /^[a-z]$/;
+ }
}
#
@@ -436,12 +475,12 @@ sub emulator_output {
#
my(@bits) = (0) x ($max_spec_operands/2);
- my($shift) = 16;
my($i);
for ($i = 0; $i < $max_spec_operands && defined $args[$i]; $i++) {
my $t = $args[$i];
if (defined $type_bit{$t}) {
- $bits[int($i/2)] |= $type_bit{$t} << (16*($i%2));
+ my $shift = $max_genop_types * ($i % 2);
+ $bits[int($i/2)] |= $type_bit{$t} << $shift;
}
}
@@ -753,6 +792,10 @@ sub error {
die $where, @message, "\n";
}
+sub sanity {
+ die "internal error: ", @_, "\n";
+}
+
sub comment {
my($lang, @comments) = @_;
my($prefix);
@@ -1269,7 +1312,8 @@ sub tr_gen {
foreach $ref (@g) {
my($line, $orig_transform, $from_ref, $to_ref) = @$ref;
- my $so_far = tr_gen_from($line, @$from_ref);
+ my $used_ref = used_vars($from_ref, $to_ref);
+ my $so_far = tr_gen_from($line, $used_ref, @$from_ref);
tr_gen_to($line, $orig_transform, $so_far, @$to_ref);
}
@@ -1278,9 +1322,22 @@ sub tr_gen {
#
my($offset) = 0;
print "Uint op_transform[] = {\n";
- foreach $key (keys %gen_transform) {
+ foreach $key (sort keys %gen_transform) {
$gen_transform_offset{$key} = $offset;
- foreach $instr (@{$gen_transform{$key}}) {
+ my @instr = @{$gen_transform{$key}};
+
+ #
+ # If the last instruction is 'fail', remove it and
+ # convert the previous 'try_me_else' to 'try_me_else_fail'.
+ #
+ if (is_instr($instr[$#instr], 'fail')) {
+ pop(@instr);
+ my $i = $#instr;
+ $i-- while !is_instr($instr[$i], 'try_me_else');
+ $instr[$i] = make_op('', 'try_me_else_fail');
+ }
+
+ foreach $instr (@instr) {
my($size, $instr_ref, $comment) = @$instr;
my($op, @args) = @$instr_ref;
print " ";
@@ -1307,8 +1364,48 @@ sub tr_gen {
print "};\n\n";
}
+sub used_vars {
+ my($from_ref,$to_ref) = @_;
+ my %used;
+ my %seen;
+
+ foreach my $ref (@$from_ref) {
+ my($name,$arity,@ops) = @$ref;
+ if ($name =~ /^[.]/) {
+ foreach my $var (@ops) {
+ $used{$var} = 1;
+ }
+ } else {
+ # Any variable that is used at least twice on the
+ # left-hand side is used. (E.g. "move R R".)
+ foreach my $op (@ops) {
+ my($var, $type, $type_val) = @$op;
+ next if $var eq '';
+ $used{$var} = 1 if $seen{$var};
+ $seen{$var} = 1;
+ }
+ }
+ }
+
+ foreach my $ref (@$to_ref) {
+ my($name, $arity, @ops) = @$ref;
+ if ($name =~ /^[.]/) {
+ foreach my $var (@ops) {
+ $used{$var} = 1;
+ }
+ } else {
+ foreach my $op (@ops) {
+ my($var, $type, $type_val) = @$op;
+ next if $var eq '';
+ $used{$var} = 1;
+ }
+ }
+ }
+ \%used;
+}
+
sub tr_gen_from {
- my($line, @tr) = @_;
+ my($line,$used_ref,@tr) = @_;
my(%var) = ();
my(%var_type);
my($var_num) = 0;
@@ -1318,25 +1415,30 @@ sub tr_gen_from {
my(@fix_pred_funcs);
my($op, $ref); # Loop variables.
my $where = "left side of transformation in line $line: ";
+ my %var_used = %$used_ref;
+ my $may_fail = 0;
+ my $is_first = 1;
foreach $ref (@tr) {
my($name, $arity, @ops) = @$ref;
my($key) = "$name/$arity";
my($opnum);
+ $may_fail = 1 unless $is_first;
+ $is_first = 0;
+
#
# A name starting with a period is a C pred function to be called.
#
if ($name =~ /^\.(\w+)/) {
$name = $1;
+ $may_fail = 1;
my $var;
my(@args);
- my $next_instr = pop(@code); # Get rid of 'next_instr'
push(@fix_pred_funcs, scalar(@code));
push(@code, [$name, @ops]);
- push(@code, $next_instr);
next;
}
@@ -1348,17 +1450,21 @@ sub tr_gen_from {
unless defined $gen_opnum{$name,$arity};
$opnum = $gen_opnum{$name,$arity};
- push(@code, &make_op("$name/$arity", 'is_op', $opnum));
+ push(@code, make_op("$name/$arity", 'next_instr', $opnum));
$min_window++;
foreach $op (@ops) {
my($var, $type, $type_val, $cond, $val) = @$op;
+ my $ignored_var = "$var (ignored)";
if ($type ne '' && $type ne '*') {
+ $may_fail = 1;
+
#
# The is_bif, is_not_bif, and is_func instructions have
# their own built-in type test and don't need to
# be guarded with a type test instruction.
#
+ $ignored_var = '';
unless ($cond eq 'is_bif' or
$cond eq 'is_not_bif' or
$cond eq 'is_func') {
@@ -1372,7 +1478,7 @@ sub tr_gen_from {
push(@code, &make_op($types, 'is_type', $type_mask));
} else {
$cond = '';
- push(@code, &make_op($types, 'is_type_eq',
+ push(@code, &make_op("$types== $val", 'is_type_eq',
$type_mask, $val));
}
}
@@ -1380,46 +1486,55 @@ sub tr_gen_from {
if ($cond eq 'is_func') {
my($m, $f, $a) = split(/:/, $val);
+ $ignored_var = '';
+ $may_fail = 1;
push(@code, &make_op('', "$cond", "am_$m",
"am_$f", $a));
} elsif ($cond ne '') {
+ $ignored_var = '';
+ $may_fail = 1;
push(@code, &make_op('', "$cond", $val));
}
if ($var ne '') {
if (defined $var{$var}) {
+ $ignored_var = '';
+ $may_fail = 1;
push(@code, &make_op($var, 'is_same_var', $var{$var}));
} elsif ($type eq '*') {
#
# Reserve a hole for a 'rest_args' instruction.
#
+ $ignored_var = '';
push(@fix_rest_args, scalar(@code));
push(@code, $var);
- } else {
+ } elsif ($var_used{$var}) {
+ $ignored_var = '';
$var_type{$var} = 'scalar';
$var{$var} = $var_num;
$var_num++;
push(@code, &make_op($var, 'set_var', $var{$var}));
}
}
- if (is_set_var_instr($code[$#code])) {
+ if (is_instr($code[$#code], 'set_var')) {
my $ref = pop @code;
my $comment = $ref->[2];
my $var = $ref->[1][1];
push(@code, make_op($comment, 'set_var_next_arg', $var));
} else {
- push(@code, &make_op('', 'next_arg'));
+ push(@code, &make_op($ignored_var, 'next_arg'));
}
}
- push(@code, &make_op('', 'next_instr'));
- pop(@code) if $code[$#code]->[1][0] eq 'next_arg';
+
+ # Remove redundant 'next_arg' instructions before the end
+ # of the instruction.
+ pop(@code) while is_instr($code[$#code], 'next_arg');
}
#
# Insert the commit operation.
#
- pop(@code); # Get rid of 'next_instr'
- push(@code, &make_op('', 'commit'));
+ push(@code, make_op($may_fail ? '' : 'always reached', 'commit'));
#
# If there is an rest_args instruction, we must insert its correct
@@ -1449,9 +1564,8 @@ sub tr_gen_from {
push(@args, "var+$var{$var}");
}
}
- splice(@code, $index, 1, &make_op("$name()",
- 'pred', scalar(@pred_table)));
- push(@pred_table, [$name, @args]);
+ my $pi = tr_next_index(\@pred_table, \%pred_table, $name, @args);
+ splice(@code, $index, 1, make_op("$name()", 'pred', $pi));
}
$te_max_vars = $var_num
@@ -1468,6 +1582,10 @@ sub tr_gen_to {
my($op, $ref); # Loop variables.
my($where) = "right side of transformation in line $line: ";
+ my $last_instr = $code[$#code];
+ my $cannot_fail = is_instr($last_instr, 'commit') &&
+ (get_comment($last_instr) =~ /^always/);
+
foreach $ref (@tr) {
my($name, $arity, @ops) = @$ref;
@@ -1489,9 +1607,10 @@ sub tr_gen_to {
push(@args, "var+$var{$var}");
}
}
- pop(@code); # Get rid of 'next_instr'
- push(@code, &make_op("$name()", 'call', scalar(@call_table)));
- push(@call_table, [$name, @args]);
+ pop(@code); # Get rid of 'commit' instruction
+ my $index = tr_next_index(\@call_table, \%call_table,
+ $name, @args);
+ push(@code, make_op("$name()", 'call_end', $index));
last;
}
@@ -1508,27 +1627,27 @@ sub tr_gen_to {
# Create code to build the generic instruction.
#
- push(@code, &make_op('', 'new_instr'));
- push(@code, &make_op("$name/$arity", 'store_op', $opnum, $arity));
+ push(@code, make_op("$name/$arity", 'new_instr', $opnum));
foreach $op (@ops) {
my($var, $type, $type_val) = @$op;
if ($var ne '') {
&error($where, "variable '$var' unbound")
unless defined $var{$var};
- push(@code, &make_op($var, 'store_var', $var{$var}));
+ push(@code, &make_op($var, 'store_var_next_arg', $var{$var}));
} elsif ($type ne '') {
push(@code, &make_op('', 'store_type', "TAG_$type"));
if ($type_val) {
push(@code, &make_op('', 'store_val', $type_val));
}
+ push(@code, make_op('', 'next_arg'));
}
- push(@code, &make_op('', 'next_arg'));
}
- pop(@code) if $code[$#code]->[1][0] eq 'next_arg';
+ pop(@code) if is_instr($code[$#code], 'next_arg');
}
- push(@code, &make_op('', 'end'));
+ push(@code, make_op('', 'end'))
+ unless is_instr($code[$#code], 'call_end');
#
# Chain together all codes segments having the same first operation.
@@ -1540,11 +1659,20 @@ sub tr_gen_to {
$min_window{$key} = $min_window
if $min_window{$key} > $min_window;
- pop(@{$gen_transform{$key}})
+ my $prev_last;
+ $prev_last = pop(@{$gen_transform{$key}})
if defined @{$gen_transform{$key}}; # Fail
- my(@prefix) = (&make_op($comment), &make_op('', 'try_me_else', &tr_code_len(@code)));
- unshift(@code, @prefix);
- push(@{$gen_transform{$key}}, @code, &make_op('', 'fail'));
+
+ if ($prev_last && !is_instr($prev_last, 'fail')) {
+ error("Line $line: A previous transformation shadows '$orig_transform'");
+ }
+ unless ($cannot_fail) {
+ unshift(@code, make_op('', 'try_me_else',
+ tr_code_len(@code)));
+ push(@code, make_op(""), make_op("$key", 'fail'));
+ }
+ unshift(@code, make_op($comment));
+ push(@{$gen_transform{$key}}, @code),
}
sub tr_code_len {
@@ -1562,21 +1690,38 @@ sub make_op {
[scalar(@op), [@op], $comment];
}
-sub is_set_var_instr {
- my($ref) = @_;
+sub is_instr {
+ my($ref,$op) = @_;
return 0 unless ref($ref) eq 'ARRAY';
- $ref->[1][0] eq 'set_var';
+ $ref->[1][0] eq $op;
+}
+
+sub get_comment {
+ my($ref,$op) = @_;
+ return '' unless ref($ref) eq 'ARRAY';
+ $ref->[2];
+}
+
+sub tr_next_index {
+ my($lref,$href,$name,@args) = @_;
+ my $code = "RVAL = $name(" . join(', ', 'st', @args) . "); break;\n";
+ my $index;
+
+ if (defined $$href{$code}) {
+ $index = $$href{$code};
+ } else {
+ $index = scalar(@$lref);
+ push(@$lref, $code);
+ $$href{$code} = $index;
+ }
+ $index;
}
sub tr_gen_call {
my(@call_table) = @_;
my($i);
- print "\n";
for ($i = 0; $i < @call_table; $i++) {
- my $ref = $call_table[$i];
- my($name, @args) = @$ref;
- print "case $i: RVAL = $name(", join(', ', 'st', @args), "); break;\n";
+ print "case $i: $call_table[$i]";
}
- print "\n";
}
diff --git a/erts/emulator/utils/make_preload b/erts/emulator/utils/make_preload
index d0671e998d..d22f08f993 100755
--- a/erts/emulator/utils/make_preload
+++ b/erts/emulator/utils/make_preload
@@ -88,6 +88,7 @@ foreach $file (@ARGV) {
print "unsigned char preloaded_$module", "[] = {\n";
for ($i = 0; $i < length($_); $i++) {
if ($i % 8 == 0 && $comment ne '') {
+ $comment =~ s@/\*@..@g; # Comment start -- avoid warning.
$comment =~ s@\*/@..@g; # Comment terminator.
print " /* $comment */\n ";
$comment = '';
diff --git a/erts/emulator/utils/make_tables b/erts/emulator/utils/make_tables
index 918ef62094..91efb4c023 100755
--- a/erts/emulator/utils/make_tables
+++ b/erts/emulator/utils/make_tables
@@ -181,8 +181,7 @@ for ($i = 0; $i < @bif; $i++) {
print "\n";
for ($i = 0; $i < @bif; $i++) {
- my $arity = $bif[$i]->[2];
- my $args = join(', ', 'Process*', ('Eterm') x $arity);
+ my $args = join(', ', 'Process*', 'Eterm*');
print "Eterm $bif[$i]->[3]($args);\n";
print "Eterm wrap_$bif[$i]->[3]($args, UWord *I);\n";
}
@@ -219,28 +218,10 @@ for ($i = 0; $i < @bif; $i++) {
next if $bif[$i]->[3] eq $bif[$i]->[4]; # Skip unwrapped bifs
my $arity = $bif[$i]->[2];
my $func = $bif[$i]->[3];
- my $arg;
print "Eterm\n";
- print "wrap_$func(Process* p";
- for ($arg = 1; $arg <= $arity; $arg++) {
- print ", Eterm arg$arg";
- }
- print ", UWord *I)\n";
+ print "wrap_$func(Process* p, Eterm* args, UWord* I)\n";
print "{\n";
- print " return erts_bif_trace($i, p";
- for ($arg = 1; $arg <= 3; $arg++) {
- if ($arg <= $arity) {
- print ", arg$arg";
- #} elsif ($arg == ($arity + 1)) {
- # # Place I in correct position
- # print ", (Eterm) I";
- } else {
- print ", 0";
- }
- }
- # I is always last, as well as in the correct position
- # Note that "last" and "correct position" may be the same...
- print ", I);\n";
+ print " return erts_bif_trace($i, p, args, I);\n";
print "}\n\n";
}
@@ -261,19 +242,9 @@ for ($i = 0; $i < @bif; $i++) {
my $orig_func = $1;
$orig_func = $implementation[$i] if $implementation[$i];
print "Eterm\n";
- print "$func(Process* p";
- for ($arg = 1; $arg <= $arity; $arg++) {
- print ", Eterm arg$arg";
- }
- print ")\n";
+ print "$func(Process* p, Eterm* BIF__ARGS)\n";
print "{\n";
- print " return $orig_func(p";
- for ($arg = 1; $arg <= 3; $arg++) {
- if ($arg <= $arity) {
- print ", arg$arg";
- }
- }
- print ");\n";
+ print " return $orig_func(p, BIF__ARGS);\n";
print "}\n\n";
}
diff --git a/erts/emulator/valgrind/suppress.patched.3.6.0 b/erts/emulator/valgrind/suppress.patched.3.6.0
new file mode 100644
index 0000000000..2647949672
--- /dev/null
+++ b/erts/emulator/valgrind/suppress.patched.3.6.0
@@ -0,0 +1,307 @@
+# Valgrind suppression file updated to support the patched
+# Valgrind used in daily builds on ahmed.
+
+{
+ libc internal error
+ Memcheck:Addr8
+ obj:/lib64/ld-2.3.5.so
+}
+{
+ libc internal error
+ Memcheck:Addr8
+ fun:_dl_start
+}
+{
+ libc internal error
+ Memcheck:Addr8
+ fun:__libc_start_main
+ obj:*
+}
+{
+ libc internal error
+ Memcheck:Addr4
+ fun:__sigjmp_save
+ fun:__libc_start_main
+ obj:*
+}
+{
+ libc internal error
+ Memcheck:Addr8
+ fun:__sigsetjmp
+ fun:__libc_start_main
+ obj:*
+}
+{
+ Intentional error in testcase
+ Memcheck:Param
+ pipe(filedes)
+ fun:pipe
+ fun:chkio_drv_timeout
+}
+{
+ Intentional error in testcase
+ Memcheck:Param
+ pipe(filedes)
+ fun:pipe
+ fun:io_ready_exit_drv_control
+ fun:erts_port_control
+ fun:port_control_3
+ fun:process_main
+}
+{
+ Leak in libc putenv
+ Memcheck:Leak
+ fun:malloc
+ fun:realloc
+ fun:__add_to_environ
+ fun:putenv
+ fun:erts_sys_putenv
+ fun:os_putenv_2
+ fun:process_main
+}
+{
+Leak in libc putenv
+Memcheck:Leak
+fun:malloc
+fun:erts_sys_alloc
+...
+fun:erts_alloc
+fun:erts_sys_putenv
+fun:os_putenv_2
+fun:process_main
+}
+{
+ erronous warning
+ Memcheck:Leak
+ fun:malloc
+ fun:erts_sys_alloc
+ ...
+ fun:fix_core_alloc
+ fun:erts_init_fix_alloc
+ fun:erts_alloc_init
+ fun:early_init
+ fun:erl_start
+}
+{
+ pthread internal error
+ Memcheck:Param
+ futex(utime)
+ fun:__lll_mutex_unlock_wake
+}
+{
+ libc internal error
+ Memcheck:Param
+ socketcall.sendto(msg)
+ ...
+ fun:getifaddrs
+}
+{
+inet_drv; pointer inside allocated block
+Memcheck:Leak
+PossiblyLost
+fun:realloc
+fun:erts_sys_realloc
+...
+fun:erts_realloc_fnf
+fun:erts_bin_realloc_fnf
+fun:driver_realloc_binary
+}
+{
+inet_drv; pointer inside allocated block
+Memcheck:Leak
+PossiblyLost
+fun:malloc
+fun:erts_sys_alloc
+...
+fun:erts_alloc_fnf
+fun:erts_bin_drv_alloc_fnf
+fun:driver_alloc_binary
+}
+{
+pthread leak or erroneous valgrind warning
+Memcheck:Leak
+fun:calloc
+fun:allocate_dtv
+fun:_dl_allocate_tls
+fun:pthread_create@@GLIBC_2.2.5
+}
+{
+pthread leak or erroneous valgrind warning
+Memcheck:Leak
+fun:calloc
+fun:_dl_allocate_tls
+fun:pthread_create@@GLIBC_2.2.5
+}
+{
+zlib; ok according to zlib developers
+Memcheck:Cond
+fun:longest_match
+fun:deflate_slow
+fun:deflate
+}
+{
+zlib; ok according to zlib developers
+Memcheck:Cond
+fun:longest_match
+fun:deflate_fast
+fun:deflate
+}
+{
+zlib; ok accordnig to zlib (this one popped up with valgrind-3.6.0)
+Memcheck:Cond
+fun:deflate_slow
+fun:deflate
+fun:zlib_deflate
+fun:zlib_ctl
+}
+{
+No leak; pointer into block
+Memcheck:Leak
+fun:malloc
+fun:erts_sys_alloc
+...
+fun:erts_alloc
+fun:erts_init_scheduling
+fun:erl_init
+fun:erl_start
+fun:main
+}
+{
+No leak; pointer into block
+Memcheck:Leak
+PossiblyLost
+fun:malloc
+fun:erts_sys_alloc
+...
+fun:erts_alloc
+fun:init_db
+fun:erl_init
+fun:erl_start
+fun:main
+}
+{
+No leak; sometimes pointer into block
+Memcheck:Leak
+PossiblyLost
+fun:malloc
+fun:erts_sys_alloc
+...
+fun:erts_alloc_fnf
+fun:driver_alloc
+fun:get_bufstk
+fun:alloc_buffer
+}
+{
+ Crypto internal...
+Memcheck:Cond
+obj:*/crypto.valgrind.*
+}
+{
+ Crypto internal...
+Memcheck:Cond
+obj:*/libcrypto.*
+}
+{
+ Crypto internal...
+Memcheck:Cond
+obj:*/openssl.*
+}
+{
+ Crypto internal...
+Memcheck:Cond
+obj:*/ssleay.*
+}
+{
+ Crypto internal...
+Memcheck:Value8
+obj:*/crypto.valgrind.*
+}
+{
+ Crypto internal...
+Memcheck:Value8
+obj:*/libcrypto.*
+}
+{
+ Crypto internal...
+Memcheck:Value8
+obj:*/openssl.*
+}
+{
+ Crypto internal...
+Memcheck:Value8
+obj:*/ssleay.*
+}
+{
+ Crypto internal...
+ Memcheck:Cond
+ fun:memset
+ fun:BN_lshift
+ fun:BN_div
+ fun:BN_MONT_CTX_set
+ fun:BN_is_prime_fasttest_ex
+ fun:BN_generate_prime_ex
+ fun:DH_generate_parameters_ex
+ fun:DH_generate_parameters
+ fun:dh_generate_parameters_nif
+ fun:process_main
+ fun:sched_thread_func
+ fun:thr_wrapper
+}
+{
+ Crypto internal...
+ Memcheck:Cond
+ fun:memset
+ fun:BN_lshift
+ fun:BN_div
+ fun:BN_nnmod
+ fun:BN_mod_inverse
+ fun:BN_MONT_CTX_set
+ fun:BN_is_prime_fasttest_ex
+ fun:BN_generate_prime_ex
+ fun:DH_generate_parameters_ex
+ fun:DH_generate_parameters
+ fun:dh_generate_parameters_nif
+ fun:process_main
+}
+{
+ Crypto internal...
+ Memcheck:Value8
+ fun:BN_mod_exp_mont_consttime
+ fun:generate_key
+ fun:dh_generate_key_nif
+ fun:process_main
+ fun:sched_thread_func
+ fun:thr_wrapper
+ fun:start_thread
+ fun:clone
+}
+
+{
+erts_bits_init_state; Why is this needed?
+Memcheck:Leak
+PossiblyLost
+fun:malloc
+fun:erts_sys_alloc
+...
+fun:erts_alloc
+fun:erts_bits_init_state
+fun:erts_init_scheduling
+fun:erl_init
+fun:erl_start
+fun:main
+}
+
+{
+Prebuilt constant terms in os_info_init
+Memcheck:Leak
+PossiblyLost
+fun:malloc
+fun:erts_sys_alloc
+...
+fun:erts_alloc
+fun:os_info_init
+fun:erts_bif_info_init
+fun:erl_init
+fun:erl_start
+fun:main
+}
diff --git a/erts/emulator/valgrind/suppress.standard b/erts/emulator/valgrind/suppress.standard
new file mode 100644
index 0000000000..d759038c97
--- /dev/null
+++ b/erts/emulator/valgrind/suppress.standard
@@ -0,0 +1,268 @@
+{
+ libc internal error
+ Memcheck:Addr8
+ obj:/lib64/ld-2.3.5.so
+}
+{
+ libc internal error
+ Memcheck:Addr8
+ fun:_dl_start
+}
+{
+ libc internal error
+ Memcheck:Addr8
+ fun:__libc_start_main
+ obj:*
+}
+{
+ libc internal error
+ Memcheck:Addr4
+ fun:__sigjmp_save
+ fun:__libc_start_main
+ obj:*
+}
+{
+ libc internal error
+ Memcheck:Addr8
+ fun:__sigsetjmp
+ fun:__libc_start_main
+ obj:*
+}
+{
+ Intentional error in testcase
+ Memcheck:Param
+ pipe(filedes)
+ fun:pipe
+ fun:chkio_drv_timeout
+}
+{
+ Intentional error in testcase
+ Memcheck:Param
+ pipe(filedes)
+ fun:pipe
+ fun:io_ready_exit_drv_control
+ fun:erts_port_control
+ fun:port_control_3
+ fun:process_main
+}
+{
+ Leak in libc putenv
+ Memcheck:Leak
+ fun:malloc
+ fun:realloc
+ fun:__add_to_environ
+ fun:putenv
+ fun:erts_sys_putenv
+ fun:os_putenv_2
+ fun:process_main
+}
+{
+Leak in libc putenv
+Memcheck:Leak
+fun:malloc
+fun:erts_sys_alloc
+...
+fun:erts_alloc
+fun:erts_sys_putenv
+fun:os_putenv_2
+fun:process_main
+}
+{
+ erronous warning
+ Memcheck:Leak
+ fun:malloc
+ fun:erts_sys_alloc
+ fun:fix_core_alloc
+ fun:erts_init_fix_alloc
+ fun:erts_alloc_init
+ fun:early_init
+ fun:erl_start
+}
+{
+ pthread internal error
+ Memcheck:Param
+ futex(utime)
+ fun:__lll_mutex_unlock_wake
+}
+{
+ libc internal error
+ Memcheck:Param
+ socketcall.sendto(msg)
+ ...
+ fun:getifaddrs
+}
+{
+inet_drv; pointer inside allocated block
+Memcheck:Leak
+fun:realloc
+fun:erts_sys_realloc
+...
+fun:erts_realloc_fnf
+fun:erts_bin_realloc_fnf
+fun:driver_realloc_binary
+}
+{
+inet_drv; pointer inside allocated block
+Memcheck:Leak
+fun:malloc
+fun:erts_sys_alloc
+...
+fun:erts_alloc_fnf
+fun:erts_bin_drv_alloc_fnf
+fun:driver_alloc_binary
+}
+{
+pthread leak or erroneous valgrind warning
+Memcheck:Leak
+fun:calloc
+fun:allocate_dtv
+fun:_dl_allocate_tls
+fun:pthread_create@@GLIBC_2.2.5
+}
+{
+zlib; ok according to zlib developers
+Memcheck:Cond
+fun:longest_match
+fun:deflate_slow
+fun:deflate
+}
+{
+zlib; ok according to zlib developers
+Memcheck:Cond
+fun:longest_match
+fun:deflate_fast
+fun:deflate
+}
+{
+No leak; pointer into block
+Memcheck:Leak
+fun:malloc
+fun:erts_sys_alloc
+...
+fun:erts_alloc
+fun:erts_init_scheduling
+fun:erl_init
+fun:erl_start
+fun:main
+}
+{
+No leak; pointer into block
+Memcheck:Leak
+fun:malloc
+fun:erts_sys_alloc
+...
+fun:erts_alloc
+fun:init_db
+fun:erl_init
+fun:erl_start
+fun:main
+}
+{
+No leak; sometimes pointer into block
+Memcheck:Leak
+fun:malloc
+fun:erts_sys_alloc
+...
+fun:erts_alloc_fnf
+fun:driver_alloc
+fun:get_bufstk
+fun:alloc_buffer
+}
+{
+ Crypto internal...
+Memcheck:Cond
+obj:*/crypto.valgrind.*
+}
+{
+ Crypto internal...
+Memcheck:Cond
+obj:*/libcrypto.*
+}
+{
+ Crypto internal...
+Memcheck:Cond
+obj:*/openssl.*
+}
+{
+ Crypto internal...
+Memcheck:Cond
+obj:*/ssleay.*
+}
+{
+ Crypto internal...
+Memcheck:Value8
+obj:*/crypto.valgrind.*
+}
+{
+ Crypto internal...
+Memcheck:Value8
+obj:*/libcrypto.*
+}
+{
+ Crypto internal...
+Memcheck:Value8
+obj:*/openssl.*
+}
+{
+ Crypto internal...
+Memcheck:Value8
+obj:*/ssleay.*
+}
+{
+ Crypto internal...
+ Memcheck:Cond
+ fun:memset
+ fun:BN_lshift
+ fun:BN_div
+ fun:BN_MONT_CTX_set
+ fun:BN_is_prime_fasttest_ex
+ fun:BN_generate_prime_ex
+ fun:DH_generate_parameters_ex
+ fun:DH_generate_parameters
+ fun:dh_generate_parameters_nif
+ fun:process_main
+ fun:sched_thread_func
+ fun:thr_wrapper
+}
+{
+ Crypto internal...
+ Memcheck:Cond
+ fun:memset
+ fun:BN_lshift
+ fun:BN_div
+ fun:BN_nnmod
+ fun:BN_mod_inverse
+ fun:BN_MONT_CTX_set
+ fun:BN_is_prime_fasttest_ex
+ fun:BN_generate_prime_ex
+ fun:DH_generate_parameters_ex
+ fun:DH_generate_parameters
+ fun:dh_generate_parameters_nif
+ fun:process_main
+}
+{
+ Crypto internal...
+ Memcheck:Value8
+ fun:BN_mod_exp_mont_consttime
+ fun:generate_key
+ fun:dh_generate_key_nif
+ fun:process_main
+ fun:sched_thread_func
+ fun:thr_wrapper
+ fun:start_thread
+ fun:clone
+}
+
+{
+Prebuilt constant terms in os_info_init (PossiblyLost)
+Memcheck:Leak
+fun:malloc
+fun:erts_sys_alloc
+...
+fun:erts_alloc
+fun:os_info_init
+fun:erts_bif_info_init
+fun:erl_init
+fun:erl_start
+fun:main
+}
diff --git a/erts/epmd/src/epmd.c b/erts/epmd/src/epmd.c
index 08576d923f..2267f9b12b 100644
--- a/erts/epmd/src/epmd.c
+++ b/erts/epmd/src/epmd.c
@@ -324,7 +324,11 @@ static void run_daemon(EpmdVars *g)
}
/* move cwd to root to make sure we are not on a mounted filesystem */
- chdir("/");
+ if (chdir("/") < 0)
+ {
+ dbg_perror(g,"epmd: chdir() failed");
+ epmd_cleanup_exit(g,1);
+ }
umask(0);
diff --git a/erts/epmd/src/epmd_cli.c b/erts/epmd/src/epmd_cli.c
index ac55ba6bb6..74408e3ebe 100644
--- a/erts/epmd/src/epmd_cli.c
+++ b/erts/epmd/src/epmd_cli.c
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 1998-2010. All Rights Reserved.
+ * Copyright Ericsson AB 1998-2011. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
@@ -104,7 +104,10 @@ void epmd_call(EpmdVars *g,int what)
fd = conn_to_epmd(g);
put_int16(1,buf);
buf[2] = what;
- write(fd,buf,3);
+ if (write(fd, buf, 3) != 3) {
+ printf("epmd: Can't write to epmd\n");
+ epmd_cleanup_exit(g,1);
+ }
if (read(fd,(char *)&i,4) != 4) {
if (!g->silent)
printf("epmd: no response from local epmd\n");
diff --git a/erts/epmd/src/epmd_int.h b/erts/epmd/src/epmd_int.h
index a2d7559f9d..14d05c3f19 100644
--- a/erts/epmd/src/epmd_int.h
+++ b/erts/epmd/src/epmd_int.h
@@ -2,7 +2,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 1998-2010. All Rights Reserved.
+ * Copyright Ericsson AB 1998-2011. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
diff --git a/erts/epmd/src/epmd_srv.c b/erts/epmd/src/epmd_srv.c
index 5debae26b6..da575affa1 100644
--- a/erts/epmd/src/epmd_srv.c
+++ b/erts/epmd/src/epmd_srv.c
@@ -102,7 +102,8 @@ void run(EpmdVars *g)
dbg_printf(g,2,"try to initiate listening port %d", g->port);
- if (g->addresses != NULL)
+ if (g->addresses != NULL && /* String contains non-separator characters if: */
+ g->addresses[strspn(g->addresses," ,")] != '\000')
{
char *tmp;
char *token;
diff --git a/erts/etc/common/erlc.c b/erts/etc/common/erlc.c
index 35c360a99d..23f009ff4d 100644
--- a/erts/etc/common/erlc.c
+++ b/erts/etc/common/erlc.c
@@ -185,6 +185,7 @@ main(int argc, char** argv)
* Push initial arguments.
*/
+ PUSH("+sbtu");
PUSH("-noinput");
PUSH2("-mode", "minimal");
PUSH2("-boot", "start_clean");
diff --git a/erts/etc/common/erlexec.c b/erts/etc/common/erlexec.c
index 2bd576d8e8..19b3bb82ef 100644
--- a/erts/etc/common/erlexec.c
+++ b/erts/etc/common/erlexec.c
@@ -67,6 +67,7 @@ static const char plusM_au_allocs[]= {
'C', /* sbmbc_alloc */
'D', /* std_alloc */
'E', /* ets_alloc */
+ 'F', /* fix_alloc */
'H', /* eheap_alloc */
'L', /* ll_alloc */
'R', /* driver_alloc */
@@ -110,8 +111,6 @@ static char *plusM_other_switches[] = {
"Mamcbf",
"Mrmcbf",
"Mmcs",
- "Mcci",
- "Fe",
"Ye",
"Ym",
"Ytp",
@@ -122,6 +121,7 @@ static char *plusM_other_switches[] = {
/* +s arguments with values */
static char *pluss_val_switches[] = {
"bt",
+ "cl",
"ct",
"wt",
"ss",
diff --git a/erts/etc/win32/erlsrv/erlsrv_interactive.c b/erts/etc/win32/erlsrv/erlsrv_interactive.c
index 13e029b364..736eabac79 100644
--- a/erts/etc/win32/erlsrv/erlsrv_interactive.c
+++ b/erts/etc/win32/erlsrv/erlsrv_interactive.c
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 1998-2009. All Rights Reserved.
+ * Copyright Ericsson AB 1998-2011. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
@@ -135,7 +135,12 @@ void print_last_error(void){
fprintf(stderr,"Error: %s",mes);
LocalFree(mes);
}
-
+
+static int get_last_error(void)
+{
+ return (last_error) ? last_error : GetLastError();
+}
+
static BOOL install_service(void){
SC_HANDLE scm;
SC_HANDLE service;
@@ -508,7 +513,7 @@ int do_usage(char *arg0){
"\t[{-sn[ame] | -n[ame]} [<nodename>]]\n"
"\t[-d[ebugtype] [{new|reuse|console}]]\n"
"\t[-ar[gs] [<limited erl arguments>]]\n\n"
- "%s {start | stop | disable | enable} <servicename>\n\n"
+ "%s {start | start_disabled | stop | disable | enable} <servicename>\n\n"
"%s remove <servicename>\n\n"
"%s rename <servicename> <servicename>\n\n"
"%s list [<servicename>]\n\n"
@@ -561,6 +566,45 @@ int do_manage(int argc,char **argv){
return 0;
}
}
+ if(!_stricmp(action,"start_disabled")){
+ if(!enable_service()){
+ fprintf(stderr,"%s: Failed to enable service %s.\n",
+ argv[0],service_name);
+ print_last_error();
+ return 1;
+ }
+ if(!start_service() && get_last_error() != ERROR_SERVICE_ALREADY_RUNNING){
+ fprintf(stderr,"%s: Failed to start service %s.\n",
+ argv[0],service_name);
+ print_last_error();
+ goto failure_starting;
+ }
+
+ if(!wait_service_trans(SERVICE_STOPPED, SERVICE_START_PENDING,
+ SERVICE_RUNNING, 60)){
+ fprintf(stderr,"%s: Failed to start service %s.\n",
+ argv[0],service_name);
+ print_last_error();
+ goto failure_starting;
+ }
+
+ if(!disable_service()){
+ fprintf(stderr,"%s: Failed to disable service %s.\n",
+ argv[0],service_name);
+ print_last_error();
+ return 1;
+ }
+ printf("%s: Service %s started.\n",
+ argv[0],service_name);
+ return 0;
+ failure_starting:
+ if(!disable_service()){
+ fprintf(stderr,"%s: Failed to disable service %s.\n",
+ argv[0],service_name);
+ print_last_error();
+ }
+ return 1;
+ }
if(!_stricmp(action,"stop")){
if(!stop_service()){
fprintf(stderr,"%s: Failed to stop service %s.\n",
@@ -841,6 +885,7 @@ int do_add_or_set(int argc, char **argv){
argv[0], service_name);
return 0;
}
+
int do_rename(int argc, char **argv){
RegEntry *current = empty_reg_tab();
RegEntry *dummy = empty_reg_tab();
@@ -1129,35 +1174,131 @@ void read_arguments(int *pargc, char ***pargv){
*pargc = argc;
*pargv = argv;
}
+
+/* Create a free-for-all ACL to set on the semaphore */
+PACL get_acl(PSECURITY_DESCRIPTOR secdescp)
+{
+ DWORD acl_length = 0;
+ PSID auth_users_sidp = NULL;
+ PACL aclp = NULL;
+ SID_IDENTIFIER_AUTHORITY ntauth = SECURITY_NT_AUTHORITY;
+
+ if(!InitializeSecurityDescriptor(secdescp, SECURITY_DESCRIPTOR_REVISION)) {
+ return NULL;
+ }
+
+ if(!AllocateAndInitializeSid(&ntauth,
+ 1,
+ SECURITY_AUTHENTICATED_USER_RID,
+ 0, 0, 0, 0, 0, 0, 0,
+ &auth_users_sidp)) {
+ return NULL;
+ }
+
+ acl_length = sizeof(ACL) +
+ sizeof(ACCESS_ALLOWED_ACE) - sizeof(DWORD) +
+ GetLengthSid(auth_users_sidp);
+
+ if((aclp = (PACL) HeapAlloc(GetProcessHeap(), HEAP_ZERO_MEMORY, acl_length)) == NULL) {
+ FreeSid(auth_users_sidp);
+ return NULL;
+ }
+
+ if(!InitializeAcl(aclp, acl_length, ACL_REVISION)) {
+ FreeSid(auth_users_sidp);
+ HeapFree(GetProcessHeap(), 0, aclp);
+ return NULL;
+ }
+
+ if(!AddAccessAllowedAce(aclp, ACL_REVISION, SEMAPHORE_ALL_ACCESS, auth_users_sidp)) {
+ FreeSid(auth_users_sidp);
+ HeapFree(GetProcessHeap(), 0, aclp);
+ return NULL;
+ }
+
+ if(!SetSecurityDescriptorDacl(secdescp, TRUE, aclp, FALSE)) {
+ FreeSid(auth_users_sidp);
+ HeapFree(GetProcessHeap(), 0, aclp);
+ return NULL;
+ }
+ return aclp;
+}
+
+static HANDLE lock_semaphore = NULL;
+
+int take_lock(void) {
+ SECURITY_ATTRIBUTES attr;
+ PACL aclp;
+ SECURITY_DESCRIPTOR secdesc;
+
+ if ((aclp = get_acl(&secdesc)) == NULL) {
+ return -1;
+ }
+
+ memset(&attr,0,sizeof(attr));
+ attr.nLength = sizeof(attr);
+ attr.lpSecurityDescriptor = &secdesc;
+ attr.bInheritHandle = FALSE;
+
+ if ((lock_semaphore = CreateSemaphore(&attr, 1, 1, ERLSRV_INTERACTIVE_GLOBAL_SEMAPHORE)) == NULL) {
+ return -1;
+ }
+
+ if (WaitForSingleObject(lock_semaphore,INFINITE) != WAIT_OBJECT_0) {
+ return -1;
+ }
+
+ HeapFree(GetProcessHeap(), 0, aclp);
+ return 0;
+}
+
+void release_lock(void) {
+ ReleaseSemaphore(lock_semaphore,1,NULL);
+}
+
int interactive_main(int argc, char **argv){
char *action = argv[1];
-
+ int res;
+
+ if (take_lock() != 0) {
+ fprintf(stderr,"%s: unable to acquire global lock (%s).\n",argv[0],
+ ERLSRV_INTERACTIVE_GLOBAL_SEMAPHORE);
+ return 1;
+ }
+
if(!_stricmp(action,"readargs")){
- read_arguments(&argc,&argv);
- action = argv[1];
+ read_arguments(&argc,&argv);
+ action = argv[1];
}
if(!_stricmp(action,"set") || !_stricmp(action,"add"))
- return do_add_or_set(argc,argv);
- if(!_stricmp(action,"rename"))
- return do_rename(argc,argv);
- if(!_stricmp(action,"remove"))
- return do_remove(argc,argv);
- if(!_stricmp(action,"list"))
- return do_list(argc,argv);
- if(!_stricmp(action,"start") ||
- !_stricmp(action,"stop") ||
- !_stricmp(action,"enable") ||
- !_stricmp(action,"disable"))
- return do_manage(argc,argv);
- if(_stricmp(action,"?") &&
- _stricmp(action,"/?") &&
- _stricmp(action,"-?") &&
- *action != 'h' &&
- *action != 'H')
+ res = do_add_or_set(argc,argv);
+ else if(!_stricmp(action,"rename"))
+ res = do_rename(argc,argv);
+ else if(!_stricmp(action,"remove"))
+ res = do_remove(argc,argv);
+ else if(!_stricmp(action,"list"))
+ res = do_list(argc,argv);
+ else if(!_stricmp(action,"start") ||
+ !_stricmp(action,"start_disabled") ||
+ !_stricmp(action,"stop") ||
+ !_stricmp(action,"enable") ||
+ !_stricmp(action,"disable"))
+ res = do_manage(argc,argv);
+ else if(_stricmp(action,"?") &&
+ _stricmp(action,"/?") &&
+ _stricmp(action,"-?") &&
+ *action != 'h' &&
+ *action != 'H') {
fprintf(stderr,"%s: action %s not implemented.\n",argv[0],action);
- do_usage(argv[0]);
- return 1;
+ do_usage(argv[0]);
+ res = 1;
+ } else {
+ do_usage(argv[0]);
+ res = 0;
+ }
+ release_lock();
+ return res;
}
diff --git a/erts/etc/win32/erlsrv/erlsrv_interactive.h b/erts/etc/win32/erlsrv/erlsrv_interactive.h
index deacf81899..23e69e508d 100644
--- a/erts/etc/win32/erlsrv/erlsrv_interactive.h
+++ b/erts/etc/win32/erlsrv/erlsrv_interactive.h
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 1998-2009. All Rights Reserved.
+ * Copyright Ericsson AB 1998-2011. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
@@ -19,6 +19,8 @@
#ifndef _ERLSRV_INTERACTIVE_H
#define _ERLSRV_INTERACTIVE_H
+#define ERLSRV_INTERACTIVE_GLOBAL_SEMAPHORE "{468d6954-e355-415f-968f-d257cb0feef4}"
+
int interactive_main(int argc, char **argv);
#endif /* _ERLSRV_INTERACTIVE_H */
diff --git a/erts/etc/win32/nsis/erlang_uninst.ico b/erts/etc/win32/nsis/erlang_uninst.ico
index edbd8a6f2c..edbd8a6f2c 100755..100644
--- a/erts/etc/win32/nsis/erlang_uninst.ico
+++ b/erts/etc/win32/nsis/erlang_uninst.ico
Binary files differ
diff --git a/erts/etc/win32/start_erl.c b/erts/etc/win32/start_erl.c
index dcf8c8b281..28c8e55bd3 100644
--- a/erts/etc/win32/start_erl.c
+++ b/erts/etc/win32/start_erl.c
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 1998-2009. All Rights Reserved.
+ * Copyright Ericsson AB 1998-2011. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
@@ -44,6 +44,8 @@ char *progname;
#endif
#define RELEASE_SUBDIR "\\releases"
+#define ERTS_SUBDIR_PREFIX "\\erts-"
+#define BIN_SUBDIR "\\bin"
#define REGISTRY_BASE "Software\\Ericsson\\Erlang\\"
#define DEFAULT_DATAFILE "start_erl.data"
@@ -101,7 +103,8 @@ void exit_help(char *err)
printf("Usage:\n%s\n"
" [<erlang options>] ++\n"
" [-data <datafile>]\n"
- " [-reldir <releasedir>]\n"
+ " {-rootdir <erlang root directory> | \n"
+ " -reldir <releasedir>}\n"
" [-bootflags <bootflagsfile>]\n"
" [-noconfig]\n", progname);
@@ -177,8 +180,9 @@ void split_commandline(void)
*/
char * unquote_optionarg(char *str, char **strp)
{
- char *newstr = (char *)malloc(strlen(str)+1); /* This one is realloc:ed later */
- int i=0, inquote=0;
+ char *newstr = (char *)malloc(strlen(str)+1); /* This one is
+ realloc:ed later */
+ int i = 0, inquote = 0;
assert(newstr);
assert(str);
@@ -223,8 +227,8 @@ char * unquote_optionarg(char *str, char **strp)
/*
- * Parses MyCommandLine and tries to fill in all the required option variables
- * (one way or another).
+ * Parses MyCommandLine and tries to fill in all the required option
+ * variables (in one way or another).
*/
void parse_commandline(void)
{
@@ -237,6 +241,11 @@ void parse_commandline(void)
*cmdline++;
if( strnicmp(cmdline, "data", 4) == 0) {
DataFileName = unquote_optionarg(cmdline+4, &cmdline);
+ } else if( strnicmp(cmdline, "rootdir", 7) == 0) {
+ RootDir = unquote_optionarg(cmdline+7, &cmdline);
+#ifdef _DEBUG
+ fprintf(stderr, "RootDir: '%s'\n", RootDir);
+#endif
} else if( strnicmp(cmdline, "reldir", 6) == 0) {
RelDir = unquote_optionarg(cmdline+6, &cmdline);
#ifdef _DEBUG
@@ -266,8 +275,8 @@ void parse_commandline(void)
* Read the data file specified and get the version and release number
* from it.
*
- * This function also construct the correct RegistryKey from the version information
- * retrieved.
+ * This function also construct the correct RegistryKey from the version
+ * information retrieved.
*/
void read_datafile(void)
{
@@ -325,88 +334,6 @@ void read_datafile(void)
/*
- * Read the registry keys we need
- */
-void read_registry_keys(void)
-{
- HKEY hReg;
- ULONG lLen;
-
- /* Create the RegistryKey name */
- RegistryKey = (char *) malloc(strlen(REGISTRY_BASE) +
- strlen(Version) + 1);
- assert(RegistryKey);
- sprintf(RegistryKey, REGISTRY_BASE "%s", Version);
-
- /* We always need to find BinDir */
- if( (RegOpenKeyEx(HKEY_LOCAL_MACHINE,
- RegistryKey,
- 0,
- KEY_READ,
- &hReg)) != ERROR_SUCCESS ) {
- exit_help("Could not open registry key.");
- }
-
- /* First query size of data */
- if( (RegQueryValueEx(hReg,
- "Bindir",
- NULL,
- NULL,
- NULL,
- &lLen)) != ERROR_SUCCESS) {
- exit_help("Failed to query BinDir of release.\n");
- }
-
- /* Allocate enough space */
- BinDir = (char *)malloc(lLen+1);
- assert(BinDir);
- /* Retrieve the value */
- if( (RegQueryValueEx(hReg,
- "Bindir",
- NULL,
- NULL,
- (unsigned char *) BinDir,
- &lLen)) != ERROR_SUCCESS) {
- exit_help("Failed to query BinDir of release (2).\n");
- }
-
-#ifdef _DEBUG
- fprintf(stderr, "Bindir: '%s'\n", BinDir);
-#endif
-
- /* We also need the rootdir, in case we need to build RelDir later */
-
- /* First query size of data */
- if( (RegQueryValueEx(hReg,
- "Rootdir",
- NULL,
- NULL,
- NULL,
- &lLen)) != ERROR_SUCCESS) {
- exit_help("Failed to query RootDir of release.\n");
- }
-
- /* Allocate enough space */
- RootDir = (char *) malloc(lLen+1);
- assert(RootDir);
- /* Retrieve the value */
- if( (RegQueryValueEx(hReg,
- "Rootdir",
- NULL,
- NULL,
- (unsigned char *) RootDir,
- &lLen)) != ERROR_SUCCESS) {
- exit_help("Failed to query RootDir of release (2).\n");
- }
-
-#ifdef _DEBUG
- fprintf(stderr, "Rootdir: '%s'\n", RootDir);
-#endif
-
- RegCloseKey(hReg);
-}
-
-/*
* Read the bootflags. This file contains extra command line options to erl.exe
*/
void read_bootflags(void)
@@ -424,7 +351,8 @@ void read_bootflags(void)
exit_help("Need -reldir when -bootflags "
"filename has relative path.");
} else {
- newname = (char *)malloc(strlen(BootFlagsFile)+strlen(RelDir)+strlen(Release)+3);
+ newname = (char *)malloc(strlen(BootFlagsFile)+
+ strlen(RelDir)+strlen(Release)+3);
assert(newname);
sprintf(newname, "%s\\%s\\%s", RelDir, Release, BootFlagsFile);
free(BootFlagsFile);
@@ -436,8 +364,6 @@ void read_bootflags(void)
fprintf(stderr, "BootFlagsFile: '%s'\n", BootFlagsFile);
#endif
-
-
if( (fp=fopen(BootFlagsFile, "rb")) == NULL) {
exit_help("Could not open BootFlags file.");
}
@@ -605,32 +531,49 @@ void complete_options(void)
sz = nsz;
}
if (RelDir == NULL) {
- if(DataFileName){
- /* Needs to be absolute for this to work, but we
- can try... */
- read_datafile();
- read_registry_keys();
- } else {
- /* Impossible to find all data... */
- exit_help("Need either Release directory or an absolute "
- "datafile name.");
- }
- /* Ok, construct our own RelDir from RootDir */
- RelDir = (char *) malloc(strlen(RootDir)+strlen(RELEASE_SUBDIR)+1);
- assert(RelDir);
- sprintf(RelDir, "%s" RELEASE_SUBDIR, RootDir);
+ if (!RootDir) {
+ /* Impossible to find all data... */
+ exit_help("Need either Root directory nor Release directory.");
+ }
+ /* Ok, construct our own RelDir from RootDir */
+ RelDir = (char *) malloc(strlen(RootDir)+strlen(RELEASE_SUBDIR)+1);
+ assert(RelDir);
+ sprintf(RelDir, "%s" RELEASE_SUBDIR, RootDir);
+ read_datafile();
} else {
read_datafile();
- read_registry_keys();
}
} else {
read_datafile();
- read_registry_keys();
}
+ if( !RootDir ) {
+ /* Try to construct RootDir from RelDir */
+ char *p;
+ RootDir = malloc(strlen(RelDir)+1);
+ strcpy(RootDir,RelDir);
+ p = RootDir+strlen(RootDir)-1;
+ if (p >= RootDir && (*p == '/' || *p == '\\'))
+ --p;
+ while (p >= RootDir && *p != '/' && *p != '\\')
+ --p;
+ if (p <= RootDir) { /* Empty RootDir is also an error */
+ exit_help("Cannot determine Root directory from "
+ "Release directory.");
+ }
+ *p = '\0';
+ }
+
+
+ BinDir = (char *) malloc(strlen(RootDir)+strlen(ERTS_SUBDIR_PREFIX)+
+ strlen(Version)+strlen(BIN_SUBDIR)+1);
+ assert(BinDir);
+ sprintf(BinDir, "%s" ERTS_SUBDIR_PREFIX "%s" BIN_SUBDIR, RootDir, Version);
+
read_bootflags();
#ifdef _DEBUG
fprintf(stderr, "RelDir: '%s'\n", RelDir);
+ fprintf(stderr, "BinDir: '%s'\n", BinDir);
#endif
}
diff --git a/erts/example/matrix_nif.c b/erts/example/matrix_nif.c
index c5e01dade5..404329e36c 100644
--- a/erts/example/matrix_nif.c
+++ b/erts/example/matrix_nif.c
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2010. All Rights Reserved.
+ * Copyright Ericsson AB 2010-2011. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
@@ -31,7 +31,19 @@ typedef struct
unsigned nrows;
unsigned ncols;
double* data;
-}Matrix;
+} Matrix;
+
+/*
+ * Use a union for pointer type conversion to avoid compiler warnings
+ * about strict-aliasing violations with gcc-4.1. gcc >= 4.2 does not
+ * emit the warning.
+ * TODO: Reconsider use of union once gcc-4.1 is obsolete?
+ */
+typedef union
+{
+ void* vp;
+ Matrix* p;
+} mx_t;
#define POS(MX, ROW, COL) ((MX)->data[(ROW)* (MX)->ncols + (COL)])
@@ -44,8 +56,9 @@ static ErlNifResourceType* resource_type = NULL;
static int load(ErlNifEnv* env, void** priv_data, ERL_NIF_TERM load_info)
{
- ErlNifResourceType* rt = enif_open_resource_type(env, "matrix_nif_example",
- matrix_dtor,
+ ErlNifResourceType* rt = enif_open_resource_type(env, NULL,
+ "matrix_nif_example",
+ matrix_dtor,
ERL_NIF_RT_CREATE, NULL);
if (rt == NULL) {
return -1;
@@ -90,12 +103,12 @@ static ERL_NIF_TERM create(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
}
ret = enif_make_resource(env, mx);
- enif_release_resource(env, mx);
+ enif_release_resource(mx);
return ret;
badarg:
if (mx != NULL) {
- enif_release_resource(env,mx);
+ enif_release_resource(mx);
}
return enif_make_badarg(env);
}
@@ -104,14 +117,14 @@ badarg:
static ERL_NIF_TERM pos(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
{
/* pos(Matrix, Row, Column) -> float() */
- Matrix* mx;
+ mx_t mx;
unsigned i, j;
- if (!enif_get_resource(env, argv[0], resource_type, (void**)&mx) ||
- !enif_get_uint(env, argv[1], &i) || (--i >= mx->nrows) ||
- !enif_get_uint(env, argv[2], &j) || (--j >= mx->ncols)) {
+ if (!enif_get_resource(env, argv[0], resource_type, &mx.vp) ||
+ !enif_get_uint(env, argv[1], &i) || (--i >= mx.p->nrows) ||
+ !enif_get_uint(env, argv[2], &j) || (--j >= mx.p->ncols)) {
return enif_make_badarg(env);
}
- return enif_make_double(env, POS(mx, i,j));
+ return enif_make_double(env, POS(mx.p, i,j));
}
static ERL_NIF_TERM add(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
@@ -119,37 +132,38 @@ static ERL_NIF_TERM add(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
/* add(Matrix_A, Matrix_B) -> Matrix_Sum */
unsigned i, j;
ERL_NIF_TERM ret;
- Matrix* mxA = NULL;
- Matrix* mxB = NULL;
- Matrix* mxS = NULL;
+ mx_t mxA, mxB, mxS;
+ mxA.p = NULL;
+ mxB.p = NULL;
+ mxS.p = NULL;
- if (!enif_get_resource(env, argv[0], resource_type, (void**)&mxA) ||
- !enif_get_resource(env, argv[1], resource_type, (void**)&mxB) ||
- mxA->nrows != mxB->nrows ||
- mxB->ncols != mxB->ncols) {
+ if (!enif_get_resource(env, argv[0], resource_type, &mxA.vp) ||
+ !enif_get_resource(env, argv[1], resource_type, &mxB.vp) ||
+ mxA.p->nrows != mxB.p->nrows ||
+ mxB.p->ncols != mxB.p->ncols) {
return enif_make_badarg(env);
}
- mxS = alloc_matrix(env, mxA->nrows, mxA->ncols);
- for (i = 0; i < mxA->nrows; i++) {
- for (j = 0; j < mxA->ncols; j++) {
- POS(mxS, i, j) = POS(mxA, i, j) + POS(mxB, i, j);
+ mxS.p = alloc_matrix(env, mxA.p->nrows, mxA.p->ncols);
+ for (i = 0; i < mxA.p->nrows; i++) {
+ for (j = 0; j < mxA.p->ncols; j++) {
+ POS(mxS.p, i, j) = POS(mxA.p, i, j) + POS(mxB.p, i, j);
}
}
- ret = enif_make_resource(env, mxS);
- enif_release_resource(env, mxS);
+ ret = enif_make_resource(env, mxS.p);
+ enif_release_resource(mxS.p);
return ret;
}
static ERL_NIF_TERM size_of(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
{
/* size(Matrix) -> {Nrows, Ncols} */
- Matrix* mx;
- if (!enif_get_resource(env, argv[0], resource_type, (void**)&mx)) {
+ mx_t mx;
+ if (!enif_get_resource(env, argv[0], resource_type, &mx.vp)) {
return enif_make_badarg(env);
}
- return enif_make_tuple2(env, enif_make_uint(env, mx->nrows),
- enif_make_uint(env, mx->ncols));
+ return enif_make_tuple2(env, enif_make_uint(env, mx.p->nrows),
+ enif_make_uint(env, mx.p->ncols));
}
static ERL_NIF_TERM to_term(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
@@ -157,16 +171,17 @@ static ERL_NIF_TERM to_term(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
/* to_term(Matrix) -> [[first row], [second row], ...,[last row]] */
unsigned i, j;
ERL_NIF_TERM res;
- Matrix* mx = NULL;
+ mx_t mx;
+ mx.p = NULL;
- if (!enif_get_resource(env, argv[0], resource_type, (void**)&mx)) {
+ if (!enif_get_resource(env, argv[0], resource_type, &mx.vp)) {
return enif_make_badarg(env);
}
res = enif_make_list(env, 0);
- for (i = mx->nrows; i-- > 0; ) {
+ for (i = mx.p->nrows; i-- > 0; ) {
ERL_NIF_TERM row = enif_make_list(env, 0);
- for (j = mx->ncols; j-- > 0; ) {
- row = enif_make_list_cell(env, enif_make_double(env, POS(mx,i,j)),
+ for (j = mx.p->ncols; j-- > 0; ) {
+ row = enif_make_list_cell(env, enif_make_double(env, POS(mx.p,i,j)),
row);
}
res = enif_make_list_cell(env, row, res);
@@ -183,17 +198,17 @@ static int get_number(ErlNifEnv* env, ERL_NIF_TERM term, double* dp)
static Matrix* alloc_matrix(ErlNifEnv* env, unsigned nrows, unsigned ncols)
{
- Matrix* mx = enif_alloc_resource(env, resource_type, sizeof(Matrix));
+ Matrix* mx = enif_alloc_resource(resource_type, sizeof(Matrix));
mx->nrows = nrows;
mx->ncols = ncols;
- mx->data = enif_alloc(env, nrows*ncols*sizeof(double));
+ mx->data = enif_alloc(nrows*ncols*sizeof(double));
return mx;
}
static void matrix_dtor(ErlNifEnv* env, void* obj)
{
Matrix* mx = (Matrix*) obj;
- enif_free(env, mx->data);
+ enif_free(mx->data);
mx->data = NULL;
}
diff --git a/erts/include/internal/ethr_mutex.h b/erts/include/internal/ethr_mutex.h
index a0685ea3c0..86a1e9fbdf 100644
--- a/erts/include/internal/ethr_mutex.h
+++ b/erts/include/internal/ethr_mutex.h
@@ -22,6 +22,23 @@
* Author: Rickard Green
*/
+/*
+ * IMPORTANT note about ethr_cond_signal() and ethr_cond_broadcast()
+ *
+ * POSIX allow a call to `pthread_cond_signal' or `pthread_cond_broadcast'
+ * even though the associated mutex/mutexes isn't/aren't locked by the
+ * caller. We do not allow that by default in order to avoid a performance
+ * penalty on some platforms.
+ *
+ * Mutexes and condition variables can, however, be initialized as POSIX
+ * compliant. When initialized as such ethr_cond_signal(), and
+ * ethr_cond_broadcast() are allowed to be called even though the associated
+ * mutexes aren't locked. This will, however, incur a performance penalty on
+ * some platforms.
+ *
+ * POSIX compliant mutexes and condition variables *need* to be used together.
+ */
+
#ifndef ETHR_MUTEX_H__
#define ETHR_MUTEX_H__
@@ -40,6 +57,14 @@
#endif
#endif
+/* #define ETHR_DBG_WIN_MTX_WITH_PTHREADS */
+#ifdef ETHR_DBG_WIN_MTX_WITH_PTHREADS
+typedef pthread_mutex_t CRITICAL_SECTION;
+int TryEnterCriticalSection(CRITICAL_SECTION *);
+void EnterCriticalSection(CRITICAL_SECTION *);
+void LeaveCriticalSection(CRITICAL_SECTION *);
+#endif
+
#ifdef ETHR_MTX_HARD_DEBUG
# ifdef __GNUC__
# warning ETHR_MTX_HARD_DEBUG
@@ -140,13 +165,19 @@ struct ethr_mutex_base_ {
typedef struct {
int main_spincount;
int aux_spincount;
+ int posix_compliant;
} ethr_mutex_opt;
+#define ETHR_MUTEX_OPT_DEFAULT_INITER {-1, -1, 0}
+
typedef struct {
int main_spincount;
int aux_spincount;
+ int posix_compliant;
} ethr_cond_opt;
+#define ETHR_COND_OPT_DEFAULT_INITER {-1, -1, 0}
+
#ifdef ETHR_USE_OWN_MTX_IMPL__
typedef struct ethr_mutex_ ethr_mutex;
@@ -179,7 +210,7 @@ struct ethr_cond_ {
#endif
};
-#else /* pthread */
+#elif defined(ETHR_PTHREADS) && !defined(ETHR_DBG_WIN_MTX_WITH_PTHREADS)
typedef struct ethr_mutex_ ethr_mutex;
struct ethr_mutex_ {
@@ -197,7 +228,36 @@ struct ethr_cond_ {
#endif
};
-#endif /* pthread */
+#elif defined(ETHR_WIN32_THREADS) || defined(ETHR_DBG_WIN_MTX_WITH_PTHREADS)
+# define ETHR_WIN_MUTEX__
+
+typedef struct ethr_mutex_ ethr_mutex;
+struct ethr_mutex_ {
+ int posix_compliant;
+ CRITICAL_SECTION cs;
+ ethr_ts_event *wakeups;
+ ethr_atomic32_t have_wakeups; /* only when posix compliant */
+ ethr_atomic32_t locked; /* only when posix compliant */
+ ethr_spinlock_t lock; /* only when posix compliant */
+#if ETHR_XCHK
+ int initialized;
+#endif
+};
+
+typedef struct ethr_cond_ ethr_cond;
+struct ethr_cond_ {
+ int posix_compliant;
+ CRITICAL_SECTION cs;
+ ethr_ts_event *waiters;
+ int spincount;
+#if ETHR_XCHK
+ int initialized;
+#endif
+};
+
+#else
+# error "no mutex implementation"
+#endif
int ethr_mutex_init_opt(ethr_mutex *, ethr_mutex_opt *);
int ethr_mutex_init(ethr_mutex *);
@@ -573,7 +633,7 @@ ETHR_INLINE_MTX_FUNC_NAME_(ethr_mutex_unlock)(ethr_mutex *mtx)
#endif /* ETHR_TRY_INLINE_FUNCS */
-#else /* pthread_mutex */
+#elif defined(ETHR_PTHREADS) && !defined(ETHR_DBG_WIN_MTX_WITH_PTHREADS)
#if defined(ETHR_TRY_INLINE_FUNCS) || defined(ETHR_MUTEX_IMPL__)
@@ -605,7 +665,54 @@ ETHR_INLINE_MTX_FUNC_NAME_(ethr_mutex_unlock)(ethr_mutex *mtx)
#endif /* ETHR_TRY_INLINE_FUNCS */
-#endif /* pthread_mutex */
+#elif defined(ETHR_WIN32_THREADS) || defined(ETHR_DBG_WIN_MTX_WITH_PTHREADS)
+
+#if defined(ETHR_TRY_INLINE_FUNCS) || defined(ETHR_MUTEX_IMPL__)
+
+static ETHR_INLINE int
+ETHR_INLINE_MTX_FUNC_NAME_(ethr_mutex_trylock)(ethr_mutex *mtx)
+{
+ if (!TryEnterCriticalSection(&mtx->cs))
+ return EBUSY;
+ if (mtx->posix_compliant)
+ ethr_atomic32_set(&mtx->locked, 1);
+ return 0;
+}
+
+static ETHR_INLINE void
+ETHR_INLINE_MTX_FUNC_NAME_(ethr_mutex_lock)(ethr_mutex *mtx)
+{
+ EnterCriticalSection(&mtx->cs);
+ if (mtx->posix_compliant)
+ ethr_atomic32_set(&mtx->locked, 1);
+}
+
+void ethr_mutex_cond_wakeup__(ethr_mutex *mtx);
+
+static ETHR_INLINE void
+ETHR_INLINE_MTX_FUNC_NAME_(ethr_mutex_unlock)(ethr_mutex *mtx)
+{
+ if (mtx->posix_compliant) {
+ ethr_atomic32_set_mb(&mtx->locked, 0);
+ if (ethr_atomic32_read_acqb(&mtx->have_wakeups))
+ goto cond_wakeup;
+ else
+ goto leave_cs;
+ }
+
+ if (mtx->wakeups) {
+ cond_wakeup:
+ ethr_mutex_cond_wakeup__(mtx);
+ }
+ else {
+ leave_cs:
+ LeaveCriticalSection(&mtx->cs);
+ }
+}
+
+#endif /* ETHR_TRY_INLINE_FUNCS */
+
+#endif
#ifdef ETHR_USE_OWN_RWMTX_IMPL__
diff --git a/erts/include/internal/ethread.h b/erts/include/internal/ethread.h
index 8ad0ded144..142c26c0ca 100644
--- a/erts/include/internal/ethread.h
+++ b/erts/include/internal/ethread.h
@@ -191,7 +191,6 @@ typedef DWORD ethr_tsd_key;
#undef ETHR_HAVE_ETHR_SIG_FUNCS
#define ETHR_USE_OWN_RWMTX_IMPL__
-#define ETHR_USE_OWN_MTX_IMPL__
#define ETHR_YIELD() (Sleep(0), 0)
diff --git a/erts/lib_src/common/erl_misc_utils.c b/erts/lib_src/common/erl_misc_utils.c
index 5dbf98c7d1..5e94ff19db 100644
--- a/erts/lib_src/common/erl_misc_utils.c
+++ b/erts/lib_src/common/erl_misc_utils.c
@@ -55,6 +55,12 @@
# ifdef HAVE_UNISTD_H
# include <unistd.h>
# endif
+# if defined(_SC_NPROC_CONF) && !defined(_SC_NPROCESSORS_CONF)
+# define _SC_NPROCESSORS_CONF _SC_NPROC_CONF
+# endif
+# if defined(_SC_NPROC_ONLN) && !defined(_SC_NPROCESSORS_ONLN)
+# define _SC_NPROCESSORS_ONLN _SC_NPROC_ONLN
+# endif
# if (defined(NO_SYSCONF) || !defined(_SC_NPROCESSORS_CONF))
# ifdef HAVE_SYS_SYSCTL_H
# include <sys/sysctl.h>
@@ -1511,7 +1517,7 @@ const char* parse_topology_spec_group(erts_cpu_info_t *cpuinfo, const char* xml,
}
}
- if (cacheLevel == 0) {
+ if (parentCacheLevel == 0) {
*core_p = 0;
*processor_p = (*processor_p)++;
} else {
diff --git a/erts/lib_src/common/erl_printf.c b/erts/lib_src/common/erl_printf.c
index 72d18ab6f1..108a8bb531 100644
--- a/erts/lib_src/common/erl_printf.c
+++ b/erts/lib_src/common/erl_printf.c
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2005-2009. All Rights Reserved.
+ * Copyright Ericsson AB 2005-2011. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
@@ -108,7 +108,7 @@ write_f_add_cr(void *vfp, char* buf, size_t len)
if (PUTC(buf[i], (FILE *) vfp) == EOF)
return get_error_result();
}
- return 0;
+ return len;
}
static int
@@ -126,13 +126,14 @@ write_f(void *vfp, char* buf, size_t len)
#endif
if (FWRITE((void *) buf, sizeof(char), len, (FILE *) vfp) != len)
return get_error_result();
- return 0;
+ return len;
}
static int
write_fd(void *vfdp, char* buf, size_t len)
{
ssize_t size;
+ size_t res = len;
ASSERT(vfdp);
while (len) {
@@ -149,7 +150,7 @@ write_fd(void *vfdp, char* buf, size_t len)
len -= size;
}
- return 0;
+ return res;
}
static int
@@ -160,7 +161,7 @@ write_s(void *vwbufpp, char* bufp, size_t len)
ASSERT(len > 0);
memcpy((void *) *wbufpp, (void *) bufp, len);
*wbufpp += len;
- return 0;
+ return len;
}
@@ -182,6 +183,7 @@ write_sn(void *vwsnap, char* buf, size_t len)
memcpy((void *) wsnap->buf, (void *) buf, sz);
wsnap->buf += sz;
wsnap->len -= sz;
+ return sz;
}
return 0;
}
@@ -201,7 +203,7 @@ write_ds(void *vdsbufp, char* buf, size_t len)
}
memcpy((void *) (dsbufp->str + dsbufp->str_len), (void *) buf, len);
dsbufp->str_len += len;
- return 0;
+ return len;
}
int
diff --git a/erts/lib_src/common/erl_printf_format.c b/erts/lib_src/common/erl_printf_format.c
index fba3fd723c..473791dce4 100644
--- a/erts/lib_src/common/erl_printf_format.c
+++ b/erts/lib_src/common/erl_printf_format.c
@@ -388,7 +388,7 @@ static int fmt_double(fmtfn_t fn,void*arg,double val,
max_size++;
if (precision)
max_size += precision;
- else if (fmt && FMTF_alt)
+ else if (fmt & FMTF_alt)
max_size++;
break;
case FMTC_E:
@@ -402,7 +402,7 @@ static int fmt_double(fmtfn_t fn,void*arg,double val,
max_size += 4;
if (precision)
max_size += precision;
- else if (fmt && FMTF_alt)
+ else if (fmt & FMTF_alt)
max_size++;
aexp = exp >= 0 ? exp : -exp;
if (aexp < 100)
diff --git a/erts/lib_src/common/ethr_mutex.c b/erts/lib_src/common/ethr_mutex.c
index 81fd6af80a..e363279f2e 100644
--- a/erts/lib_src/common/ethr_mutex.c
+++ b/erts/lib_src/common/ethr_mutex.c
@@ -223,9 +223,59 @@ rwmutex_try_complete_runlock(ethr_rwmutex *rwmtx,
int try_write_lock);
#endif
+/* -- Utilities used by multiple implementations -- */
+
+#if defined(ETHR_USE_OWN_RWMTX_IMPL__) || defined(ETHR_USE_OWN_MTX_IMPL__) \
+ || defined(ETHR_WIN32_THREADS)
+
+static ETHR_INLINE void
+enqueue(ethr_ts_event **queue,
+ ethr_ts_event *tse_start,
+ ethr_ts_event *tse_end)
+{
+ if (!*queue) {
+ *queue = tse_start;
+ tse_start->prev = tse_end;
+ tse_end->next = tse_start;
+ }
+ else {
+ tse_end->next = *queue;
+ tse_start->prev = (*queue)->prev;
+ (*queue)->prev->next = tse_start;
+ (*queue)->prev = tse_end;
+ }
+}
+
+
+static ETHR_INLINE void
+dequeue(ethr_ts_event **queue,
+ ethr_ts_event *tse_start,
+ ethr_ts_event *tse_end)
+{
+ if (tse_start->prev == tse_end) {
+ ETHR_ASSERT(*queue == tse_start && tse_end->next == tse_start);
+ *queue = NULL;
+ }
+ else {
+ if (*queue == tse_start)
+ *queue = tse_end->next;
+ tse_end->next->prev = tse_start->prev;
+ tse_start->prev->next = tse_end->next;
+ }
+}
+
+#endif
+
#if defined(ETHR_USE_OWN_RWMTX_IMPL__) || defined(ETHR_USE_OWN_MTX_IMPL__)
-/* -- Utilities operating both on ordinary mutexes and read write mutexes -- */
+static ETHR_INLINE void
+insert(ethr_ts_event *tse_pred, ethr_ts_event *tse)
+{
+ tse->next = tse_pred->next;
+ tse->prev = tse_pred;
+ tse_pred->next->prev = tse;
+ tse_pred->next = tse;
+}
static ETHR_INLINE void
rwmutex_freqread_wtng_rdrs_inc(ethr_rwmutex *rwmtx, ethr_ts_event *tse)
@@ -355,51 +405,6 @@ rwmutex_freqread_rdrs_read(ethr_rwmutex *rwmtx, int ix)
return res;
}
-
-static ETHR_INLINE void
-enqueue(ethr_ts_event **queue,
- ethr_ts_event *tse_start,
- ethr_ts_event *tse_end)
-{
- if (!*queue) {
- *queue = tse_start;
- tse_start->prev = tse_end;
- tse_end->next = tse_start;
- }
- else {
- tse_end->next = *queue;
- tse_start->prev = (*queue)->prev;
- (*queue)->prev->next = tse_start;
- (*queue)->prev = tse_end;
- }
-}
-
-static ETHR_INLINE void
-insert(ethr_ts_event *tse_pred, ethr_ts_event *tse)
-{
- tse->next = tse_pred->next;
- tse->prev = tse_pred;
- tse_pred->next->prev = tse;
- tse_pred->next = tse;
-}
-
-static ETHR_INLINE void
-dequeue(ethr_ts_event **queue,
- ethr_ts_event *tse_start,
- ethr_ts_event *tse_end)
-{
- if (tse_start->prev == tse_end) {
- ETHR_ASSERT(*queue == tse_start && tse_end->next == tse_start);
- *queue = NULL;
- }
- else {
- if (*queue == tse_start)
- *queue = tse_end->next;
- tse_end->next->prev = tse_start->prev;
- tse_start->prev->next = tse_end->next;
- }
-}
-
static void
event_wait(struct ethr_mutex_base_ *mtxb,
ethr_ts_event *tse,
@@ -1244,7 +1249,7 @@ ethr_cond_wait(ethr_cond *cnd, ethr_mutex *mtx)
return 0;
}
-#else
+#elif defined(ETHR_PTHREADS) && !defined(ETHR_DBG_WIN_MTX_WITH_PTHREADS)
/* -- pthread mutex and condition variables -------------------------------- */
int
@@ -1261,6 +1266,12 @@ ethr_mutex_init(ethr_mutex *mtx)
}
int
+ethr_mutex_init_opt(ethr_mutex *mtx, ethr_mutex_opt *opt)
+{
+ return ethr_mutex_init(mtx);
+}
+
+int
ethr_mutex_destroy(ethr_mutex *mtx)
{
#if ETHR_XCHK
@@ -1293,6 +1304,12 @@ ethr_cond_init(ethr_cond *cnd)
}
int
+ethr_cond_init_opt(ethr_cond *cnd, ethr_cond_opt *opt)
+{
+ return ethr_cond_init(cnd);
+}
+
+int
ethr_cond_destroy(ethr_cond *cnd)
{
#if ETHR_XCHK
@@ -1354,7 +1371,388 @@ ethr_cond_wait(ethr_cond *cnd, ethr_mutex *mtx)
return res;
}
-#endif /* pthread_mutex */
+#elif defined(ETHR_WIN32_THREADS) || defined(ETHR_DBG_WIN_MTX_WITH_PTHREADS)
+
+/*
+ * As of Vista/Server, 2008 Windows has condition variables that can be
+ * used with critical sections. However, we need to be able to run on
+ * older Windows versions too, so we need to implement condition variables
+ * ourselves.
+ */
+
+#ifdef ETHR_DBG_WIN_MTX_WITH_PTHREADS
+/*
+ * For debugging of this implementation on POSIX platforms...
+ */
+
+#define ethr_win_get_errno__() EINVAL
+#if defined(__GNUC__)
+#define __forceinline __inline__
+#else
+#define __forceinline
+#endif
+
+static int
+InitializeCriticalSectionAndSpinCount(CRITICAL_SECTION *cs, int sc)
+{
+ return 0 == pthread_mutex_init((pthread_mutex_t *) cs, NULL);
+}
+
+static void DeleteCriticalSection(CRITICAL_SECTION *cs)
+{
+ int res = pthread_mutex_destroy((pthread_mutex_t *) cs);
+ if (res != 0)
+ ETHR_FATAL_ERROR__(res);
+}
+
+int TryEnterCriticalSection(CRITICAL_SECTION *cs)
+{
+ int res;
+ res = pthread_mutex_trylock((pthread_mutex_t *) cs);
+ if (res != 0 && res != EBUSY)
+ ETHR_FATAL_ERROR__(res);
+ return res == 0;
+}
+
+void EnterCriticalSection(CRITICAL_SECTION *cs)
+{
+ int res = pthread_mutex_lock((pthread_mutex_t *) cs);
+ if (res != 0)
+ ETHR_FATAL_ERROR__(res);
+}
+
+void LeaveCriticalSection(CRITICAL_SECTION *cs)
+{
+ int res = pthread_mutex_unlock((pthread_mutex_t *) cs);
+ if (res != 0)
+ ETHR_FATAL_ERROR__(res);
+}
+
+#endif
+
+#define ETHR_CND_WAIT__ ((ethr_sint32_t) 0x11dead11)
+#define ETHR_CND_WAKEUP__ ((ethr_sint32_t) 0x11beef11)
+
+static __forceinline void
+cond_wakeup(ethr_ts_event *tse)
+{
+ ETHR_ASSERT(ethr_atomic32_read(&tse->uaflgs) == ETHR_CND_WAIT__);
+
+ ethr_atomic32_set_relb(&tse->uaflgs, ETHR_CND_WAKEUP__);
+ ethr_event_set(&tse->event);
+}
+
+void
+ethr_mutex_cond_wakeup__(ethr_mutex *mtx)
+{
+ /*
+ * Called by ethr_mutex_unlock() when we have
+ * cond signal/broadcast wakeups waiting to
+ * be completed.
+ */
+ ethr_ts_event *tse;
+
+ if (!mtx->posix_compliant) {
+ tse = mtx->wakeups;
+ dequeue(&mtx->wakeups, tse, tse);
+ }
+ else {
+ ethr_spin_lock(&mtx->lock);
+ tse = mtx->wakeups;
+ if (tse)
+ dequeue(&mtx->wakeups, tse, tse);
+ if (!mtx->wakeups)
+ ethr_atomic32_set_relb(&mtx->have_wakeups, 0);
+ ethr_spin_unlock(&mtx->lock);
+ }
+
+ LeaveCriticalSection(&mtx->cs);
+
+ ETHR_ASSERT(tse || mtx->posix_compliant);
+
+ /*
+ * We delay actual condition variable wakeup until
+ * this point when we have left the critical section.
+ * This in order to avoid that the other thread is
+ * woken and then right away have to go to sleep
+ * waiting for the critical section that we are in.
+ *
+ * We also only wake one thread at a time even if
+ * there are multiple threads waiting to be woken.
+ * Otherwise all but one will be woken and then right
+ * away have to go to sleep on the critical section.
+ * Since each wakeup is guaranteed to generate at
+ * least one lock/unlock sequence on this mutex, all
+ * threads will eventually be woken.
+ */
+
+ if (tse)
+ cond_wakeup(tse);
+}
+
+int
+ethr_mutex_init_opt(ethr_mutex *mtx, ethr_mutex_opt *opt)
+{
+ int spincount;
+#if ETHR_XCHK
+ if (!mtx) {
+ ETHR_ASSERT(0);
+ return EINVAL;
+ }
+ mtx->initialized = ETHR_MUTEX_INITIALIZED;
+#endif
+
+ spincount = opt ? opt->aux_spincount : 0;
+ if (spincount < 0)
+ spincount = 0;
+
+ if (!InitializeCriticalSectionAndSpinCount(&mtx->cs, spincount)) {
+#if ETHR_XCHK
+ mtx->initialized = 0;
+#endif
+ return ethr_win_get_errno__();
+ }
+
+ mtx->posix_compliant = opt ? opt->posix_compliant : 0;
+ mtx->wakeups = NULL;
+ if (mtx->posix_compliant) {
+ ethr_atomic32_init(&mtx->locked, 0);
+ ethr_atomic32_init(&mtx->have_wakeups, 0);
+ ethr_spinlock_init(&mtx->lock);
+ }
+ return 0;
+}
+
+int
+ethr_mutex_init(ethr_mutex *mtx)
+{
+ return ethr_mutex_init_opt(mtx, NULL);
+}
+
+int
+ethr_mutex_destroy(ethr_mutex *mtx)
+{
+ DeleteCriticalSection(&mtx->cs);
+ if (mtx->posix_compliant)
+ return ethr_spinlock_destroy(&mtx->lock);
+ else
+ return 0;
+}
+
+int
+ethr_cond_wait(ethr_cond *cnd, ethr_mutex *mtx)
+{
+ void *udata;
+ ethr_ts_event *tse = ethr_get_ts_event();
+ int spincount;
+
+ udata = tse->udata;
+ tse->udata = (void *) mtx;
+ ethr_atomic32_set_relb(&tse->uaflgs, ETHR_CND_WAIT__);
+
+ EnterCriticalSection(&cnd->cs);
+ enqueue(&cnd->waiters, tse, tse);
+ LeaveCriticalSection(&cnd->cs);
+
+ ethr_mutex_unlock(mtx);
+
+ spincount = cnd->spincount;
+
+ while (ethr_atomic32_read_acqb(&tse->uaflgs) != ETHR_CND_WAKEUP__) {
+ ethr_event_reset(&tse->event);
+ if (ethr_atomic32_read_acqb(&tse->uaflgs) == ETHR_CND_WAKEUP__)
+ break;
+ ethr_event_swait(&tse->event, spincount);
+ spincount = 0;
+ }
+
+ tse->udata = udata;
+ ethr_leave_ts_event(tse);
+
+ ethr_mutex_lock(mtx);
+
+ return 0;
+}
+
+static __forceinline void
+posix_compliant_mtx_enqueue(ethr_mutex *mtx,
+ ethr_ts_event *tse_start,
+ ethr_ts_event *tse_end)
+{
+ ethr_ts_event *tse_wakeup = NULL; /* Avoid erroneous compiler warning... */
+ /*
+ * The associated mutex might not be locked, so we need to
+ * check if it is. If locked, enqueue for wakeup at unlock;
+ * otherwise, wakeup the first one now and enqueue the rest.
+ */
+ if (tse_start == tse_end && !ethr_atomic32_read(&mtx->locked)) {
+ tse_wakeup = tse_start;
+ wakeup:
+ cond_wakeup(tse_wakeup);
+ }
+ else {
+ int need_wakeup;
+ ethr_spin_lock(&mtx->lock);
+ if (!mtx->wakeups)
+ ethr_atomic32_set_mb(&mtx->have_wakeups, 1);
+ need_wakeup = !ethr_atomic32_read(&mtx->locked);
+ if (need_wakeup) {
+ if (tse_start == tse_end) {
+ if (!mtx->wakeups)
+ ethr_atomic32_set_relb(&mtx->have_wakeups, 0);
+ ethr_spin_unlock(&mtx->lock);
+ tse_wakeup = tse_start;
+ goto wakeup;
+ }
+ tse_wakeup = tse_start;
+ tse_start = tse_start->next;
+ }
+ enqueue(&mtx->wakeups, tse_start, tse_end);
+ ethr_spin_unlock(&mtx->lock);
+ if (need_wakeup)
+ goto wakeup;
+ }
+}
+
+static __forceinline void
+enqueue_cond_wakeups(ethr_ts_event *queue, int posix_compliant)
+{
+ if (queue) {
+ int more;
+ ethr_ts_event *q = queue;
+
+ /*
+ * Waiters may be using different mutexes...
+ */
+
+ do {
+ ethr_mutex *mtx;
+ ethr_ts_event *tse, *tse_start, *tse_end;
+
+ more = 0;
+ tse_start = q;
+ mtx = (ethr_mutex *) tse_start->udata;
+
+ ETHR_ASSERT(posix_compliant
+ ? mtx->posix_compliant
+ : !mtx->posix_compliant);
+
+ ETHR_ASSERT(ethr_atomic32_read(&tse_start->uaflgs)
+ == ETHR_CND_WAIT__);
+ ETHR_ASSERT(mtx->initialized == ETHR_MUTEX_INITIALIZED);
+
+ tse_end = tse_start->prev;
+
+ for (tse = tse_start->next; tse != tse_start; tse = tse->next) {
+
+ ETHR_ASSERT(ethr_atomic32_read(&tse->uaflgs)
+ == ETHR_CND_WAIT__);
+
+ if (mtx != (ethr_mutex *) tse->udata) {
+ tse_end = tse->prev;
+ dequeue(&q, tse_start, tse_end);
+ more = 1;
+ break;
+ }
+ }
+
+ if (posix_compliant)
+ posix_compliant_mtx_enqueue(mtx, tse_start, tse_end);
+ else
+ enqueue(&mtx->wakeups, tse_start, tse_end);
+
+ } while (more);
+ }
+}
+
+void
+ethr_cond_broadcast(ethr_cond *cnd)
+{
+ ethr_ts_event *waiters;
+
+ EnterCriticalSection(&cnd->cs);
+ waiters = cnd->waiters;
+ cnd->waiters = NULL;
+ LeaveCriticalSection(&cnd->cs);
+
+ if (cnd->posix_compliant)
+ enqueue_cond_wakeups(waiters, 1);
+ else
+ enqueue_cond_wakeups(waiters, 0);
+}
+
+void
+ethr_cond_signal(ethr_cond *cnd)
+{
+ ethr_mutex *mtx;
+ ethr_ts_event *tse;
+
+ EnterCriticalSection(&cnd->cs);
+ tse = cnd->waiters;
+ if (tse)
+ dequeue(&cnd->waiters, tse, tse);
+ LeaveCriticalSection(&cnd->cs);
+
+ if (tse) {
+ mtx = (ethr_mutex *) tse->udata;
+
+ ETHR_ASSERT(ethr_atomic32_read(&tse->uaflgs) == ETHR_CND_WAIT__);
+ ETHR_ASSERT(mtx->initialized == ETHR_MUTEX_INITIALIZED);
+ ETHR_ASSERT(cnd->posix_compliant
+ ? mtx->posix_compliant
+ : !mtx->posix_compliant);
+
+ if (cnd->posix_compliant)
+ posix_compliant_mtx_enqueue(mtx, tse, tse);
+ else
+ enqueue(&mtx->wakeups, tse, tse);
+ }
+}
+
+int
+ethr_cond_init_opt(ethr_cond *cnd, ethr_cond_opt *opt)
+{
+ int spincount;
+
+#if ETHR_XCHK
+ if (!cnd) {
+ ETHR_ASSERT(0);
+ return EINVAL;
+ }
+ cnd->initialized = ETHR_COND_INITIALIZED;
+#endif
+
+ spincount = opt ? opt->aux_spincount : 0;
+ if (spincount < 0)
+ spincount = 0;
+
+ if (!InitializeCriticalSectionAndSpinCount(&cnd->cs, spincount)) {
+#if ETHR_XCHK
+ cnd->initialized = 0;
+#endif
+ return ethr_win_get_errno__();
+ }
+
+ cnd->posix_compliant = opt ? opt->posix_compliant : 0;
+ cnd->waiters = NULL;
+ cnd->spincount = spincount;
+ return 0;
+}
+
+int
+ethr_cond_init(ethr_cond *cnd)
+{
+ return ethr_cond_init_opt(cnd, NULL);
+}
+
+int
+ethr_cond_destroy(ethr_cond *cnd)
+{
+ DeleteCriticalSection(&cnd->cs);
+ return 0;
+}
+
+#endif
/* -- Exported symbols of inline functions --------------------------------- */
@@ -1969,7 +2367,7 @@ dbg_unlock_wake(ethr_rwmutex *rwmtx,
exp = have_w ? ETHR_RWMTX_W_FLG__ : 0;
if (rwmtx->type != ETHR_RWMUTEX_TYPE_NORMAL)
- imask = ETHR_RWMTX_R_PEND_UNLCK_MASK__;
+ imask = ETHR_RWMTX_R_PEND_UNLCK_MASK__|ETHR_RWMTX_R_ABRT_UNLCK_FLG__;
else {
#ifdef ETHR_RLOCK_WITH_INC_DEC
imask = ETHR_RWMTX_RS_MASK__;
diff --git a/erts/preloaded/ebin/erl_prim_loader.beam b/erts/preloaded/ebin/erl_prim_loader.beam
index f08620b128..fe91a604b5 100644
--- a/erts/preloaded/ebin/erl_prim_loader.beam
+++ b/erts/preloaded/ebin/erl_prim_loader.beam
Binary files differ
diff --git a/erts/preloaded/ebin/erlang.beam b/erts/preloaded/ebin/erlang.beam
index f704135ce8..dda24d4405 100644
--- a/erts/preloaded/ebin/erlang.beam
+++ b/erts/preloaded/ebin/erlang.beam
Binary files differ
diff --git a/erts/preloaded/ebin/init.beam b/erts/preloaded/ebin/init.beam
index 4adb3dedf2..37fd8bb832 100644
--- a/erts/preloaded/ebin/init.beam
+++ b/erts/preloaded/ebin/init.beam
Binary files differ
diff --git a/erts/preloaded/ebin/otp_ring0.beam b/erts/preloaded/ebin/otp_ring0.beam
index 09216fd9d8..e255cc803f 100644
--- a/erts/preloaded/ebin/otp_ring0.beam
+++ b/erts/preloaded/ebin/otp_ring0.beam
Binary files differ
diff --git a/erts/preloaded/ebin/prim_file.beam b/erts/preloaded/ebin/prim_file.beam
index 7ac18352c4..6227b562a1 100644
--- a/erts/preloaded/ebin/prim_file.beam
+++ b/erts/preloaded/ebin/prim_file.beam
Binary files differ
diff --git a/erts/preloaded/ebin/prim_inet.beam b/erts/preloaded/ebin/prim_inet.beam
index 78481e2b00..d44bbbbd27 100644
--- a/erts/preloaded/ebin/prim_inet.beam
+++ b/erts/preloaded/ebin/prim_inet.beam
Binary files differ
diff --git a/erts/preloaded/ebin/prim_zip.beam b/erts/preloaded/ebin/prim_zip.beam
index cad777b272..7e1a5d1fdb 100644
--- a/erts/preloaded/ebin/prim_zip.beam
+++ b/erts/preloaded/ebin/prim_zip.beam
Binary files differ
diff --git a/erts/preloaded/ebin/zlib.beam b/erts/preloaded/ebin/zlib.beam
index 9b62607c04..ebf9f8e6d6 100644
--- a/erts/preloaded/ebin/zlib.beam
+++ b/erts/preloaded/ebin/zlib.beam
Binary files differ
diff --git a/erts/preloaded/src/erl_prim_loader.erl b/erts/preloaded/src/erl_prim_loader.erl
index ccfa7978c8..35defde692 100644
--- a/erts/preloaded/src/erl_prim_loader.erl
+++ b/erts/preloaded/src/erl_prim_loader.erl
@@ -396,7 +396,7 @@ handle_timeout(State = #state{loader = inet}, Parent) ->
inet_timeout_handler(State, Parent).
%%% --------------------------------------------------------
-%%% Functions which handles efile as prim_loader (default).
+%%% Functions which handle efile as prim_loader (default).
%%% --------------------------------------------------------
%%% Reading many files in parallel is an optimization.
@@ -470,7 +470,7 @@ efile_get_file_from_port2(#state{prim_state = PS} = State, File) ->
end.
efile_get_file_from_port3(State, File, [P | Paths]) ->
- case efile_get_file_from_port2(State, concat([P,"/",File])) of
+ case efile_get_file_from_port2(State, join(P, File)) of
{{error,Reason},State1} when Reason =/= emfile ->
case Paths of
[] -> % return last error
@@ -523,7 +523,7 @@ efile_timeout_handler(#state{n_timeouts = N} = State, _Parent) ->
end.
%%% --------------------------------------------------------
-%%% Functions which handles inet prim_loader
+%%% Functions which handle inet prim_loader
%%% --------------------------------------------------------
%%
@@ -644,7 +644,7 @@ inet_get_file_from_port(State, File, Paths) ->
end.
inet_get_file_from_port1(File, [P | Paths], State) ->
- File1 = concat([P,"/",File]),
+ File1 = join(P, File),
case inet_send_and_rcv({get,File1}, File1, State) of
{{error,Reason},State1} ->
case Paths of
@@ -729,7 +729,7 @@ udp_options() ->
%% INET version IPv4 addresses
%%
ll_tcp_connect(LocalPort, IP, RemotePort) ->
- case ll_open_set_bind(tcp, ?INET_FAMILY, tcp_options(),
+ case ll_open_set_bind(tcp, ?INET_FAMILY, stream, tcp_options(),
?INET_ADDRESS, LocalPort) of
{ok,S} ->
case prim_inet:connect(S, IP, RemotePort, tcp_timeout()) of
@@ -743,11 +743,11 @@ ll_tcp_connect(LocalPort, IP, RemotePort) ->
%% Open and initialize an udp port for broadcast
%%
ll_udp_open(P) ->
- ll_open_set_bind(udp, ?INET_FAMILY, udp_options(), ?INET_ADDRESS, P).
+ ll_open_set_bind(udp, ?INET_FAMILY, dgram, udp_options(), ?INET_ADDRESS, P).
-ll_open_set_bind(Protocol, Family, SOpts, IP, Port) ->
- case prim_inet:open(Protocol, Family) of
+ll_open_set_bind(Protocol, Family, Type, SOpts, IP, Port) ->
+ case prim_inet:open(Protocol, Family, Type) of
{ok, S} ->
case prim_inet:setopts(S, SOpts) of
ok ->
@@ -1152,14 +1152,8 @@ send_all(U, [IP | AL], Cmd) ->
send_all(U, AL, Cmd);
send_all(_U, [], _) -> ok.
-%%concat([A|T]) when is_atom(A) -> %Atom
-%% atom_to_list(A) ++ concat(T);
-concat([C|T]) when C >= 0, C =< 255 ->
- [C|concat(T)];
-concat([S|T]) -> %String
- S ++ concat(T);
-concat([]) ->
- [].
+join(P, F) ->
+ P ++ "/" ++ F.
member(X, [X|_]) -> true;
member(X, [_|Y]) -> member(X, Y);
diff --git a/erts/preloaded/src/erlang.erl b/erts/preloaded/src/erlang.erl
index ff3633a3cc..e9a59a7aaf 100644
--- a/erts/preloaded/src/erlang.erl
+++ b/erts/preloaded/src/erlang.erl
@@ -40,6 +40,8 @@
-export([flush_monitor_message/2]).
-export([set_cpu_topology/1, format_cpu_topology/1]).
-export([await_proc_exit/3]).
+-export([memory/0, memory/1]).
+-export([alloc_info/1, alloc_sizes/1]).
-deprecated([hash/2]).
@@ -800,3 +802,405 @@ min(A, _) -> A.
Maximum :: term().
max(A, B) when A < B -> B;
max(A, _) -> A.
+
+
+%%
+%% erlang:memory/[0,1]
+%%
+%% NOTE! When updating these functions, make sure to also update
+%% erts_memory() in $ERL_TOP/erts/emulator/beam/erl_alloc.c
+%%
+
+-type memory_type() :: 'total' | 'processes' | 'processes_used' | 'system' | 'atom' | 'atom_used' | 'binary' | 'code' | 'ets' | 'low' | 'maximum'.
+
+-define(CARRIER_ALLOCS, [mseg_alloc, sbmbc_alloc, sbmbc_low_alloc]).
+-define(LOW_ALLOCS, [sbmbc_low_alloc, ll_low_alloc, std_low_alloc]).
+-define(ALL_NEEDED_ALLOCS, (erlang:system_info(alloc_util_allocators)
+ -- ?CARRIER_ALLOCS)).
+
+-record(memory, {total = 0,
+ processes = 0,
+ processes_used = 0,
+ system = 0,
+ atom = 0,
+ atom_used = 0,
+ binary = 0,
+ code = 0,
+ ets = 0,
+ low = 0,
+ maximum = 0}).
+
+-spec memory() -> [{memory_type(), non_neg_integer()}].
+memory() ->
+ case aa_mem_data(au_mem_data(?ALL_NEEDED_ALLOCS)) of
+ notsup ->
+ erlang:error(notsup);
+ Mem ->
+ InstrTail = case Mem#memory.maximum of
+ 0 -> [];
+ _ -> [{maximum, Mem#memory.maximum}]
+ end,
+ Tail = case Mem#memory.low of
+ 0 -> InstrTail;
+ _ -> [{low, Mem#memory.low} | InstrTail]
+ end,
+ [{total, Mem#memory.total},
+ {processes, Mem#memory.processes},
+ {processes_used, Mem#memory.processes_used},
+ {system, Mem#memory.system},
+ {atom, Mem#memory.atom},
+ {atom_used, Mem#memory.atom_used},
+ {binary, Mem#memory.binary},
+ {code, Mem#memory.code},
+ {ets, Mem#memory.ets} | Tail]
+ end.
+
+-spec memory(memory_type()|[memory_type()]) -> non_neg_integer() | [{memory_type(), non_neg_integer()}].
+memory(Type) when is_atom(Type) ->
+ {AA, ALCU, ChkSup, BadArgZero} = need_mem_info(Type),
+ case get_mem_data(ChkSup, ALCU, AA) of
+ notsup ->
+ erlang:error(notsup, [Type]);
+ Mem ->
+ Value = get_memval(Type, Mem),
+ case {BadArgZero, Value} of
+ {true, 0} -> erlang:error(badarg, [Type]);
+ _ -> Value
+ end
+ end;
+memory(Types) when is_list(Types) ->
+ {AA, ALCU, ChkSup, BadArgZeroList} = need_mem_info_list(Types),
+ case get_mem_data(ChkSup, ALCU, AA) of
+ notsup ->
+ erlang:error(notsup, [Types]);
+ Mem ->
+ case memory_result_list(Types, BadArgZeroList, Mem) of
+ badarg -> erlang:error(badarg, [Types]);
+ Result -> Result
+ end
+ end.
+
+memory_result_list([], [], _Mem) ->
+ [];
+memory_result_list([T|Ts], [BAZ|BAZs], Mem) ->
+ case memory_result_list(Ts, BAZs, Mem) of
+ badarg -> badarg;
+ TVs ->
+ V = get_memval(T, Mem),
+ case {BAZ, V} of
+ {true, 0} -> badarg;
+ _ -> [{T, V}| TVs]
+ end
+ end.
+
+get_mem_data(true, AlcUAllocs, NeedAllocatedAreas) ->
+ case memory_is_supported() of
+ false -> notsup;
+ true -> get_mem_data(false, AlcUAllocs, NeedAllocatedAreas)
+ end;
+get_mem_data(false, AlcUAllocs, NeedAllocatedAreas) ->
+ AlcUMem = case AlcUAllocs of
+ [] -> #memory{};
+ _ ->
+ au_mem_data(AlcUAllocs)
+ end,
+ case NeedAllocatedAreas of
+ true -> aa_mem_data(AlcUMem);
+ false -> AlcUMem
+ end.
+
+need_mem_info_list([]) ->
+ {false, [], false, []};
+need_mem_info_list([T|Ts]) ->
+ {MAA, MALCU, MChkSup, MBadArgZero} = need_mem_info_list(Ts),
+ {AA, ALCU, ChkSup, BadArgZero} = need_mem_info(T),
+ {case AA of
+ true -> true;
+ _ -> MAA
+ end,
+ ALCU ++ (MALCU -- ALCU),
+ case ChkSup of
+ true -> true;
+ _ -> MChkSup
+ end,
+ [BadArgZero|MBadArgZero]}.
+
+need_mem_info(Type) when Type == total;
+ Type == system ->
+ {true, ?ALL_NEEDED_ALLOCS, false, false};
+need_mem_info(Type) when Type == processes;
+ Type == processes_used ->
+ {true, [eheap_alloc, fix_alloc], true, false};
+need_mem_info(Type) when Type == atom;
+ Type == atom_used;
+ Type == code ->
+ {true, [], true, false};
+need_mem_info(binary) ->
+ {false, [binary_alloc], true, false};
+need_mem_info(ets) ->
+ {true, [ets_alloc], true, false};
+need_mem_info(low) ->
+ LowAllocs = ?LOW_ALLOCS -- ?CARRIER_ALLOCS,
+ {_, _, FeatureList, _} = erlang:system_info(allocator),
+ AlcUAllocs = case LowAllocs -- FeatureList of
+ [] -> LowAllocs;
+ _ -> []
+ end,
+ {false, AlcUAllocs, true, true};
+need_mem_info(maximum) ->
+ {true, [], true, true};
+need_mem_info(_) ->
+ {false, [], false, true}.
+
+get_memval(total, #memory{total = V}) -> V;
+get_memval(processes, #memory{processes = V}) -> V;
+get_memval(processes_used, #memory{processes_used = V}) -> V;
+get_memval(system, #memory{system = V}) -> V;
+get_memval(atom, #memory{atom = V}) -> V;
+get_memval(atom_used, #memory{atom_used = V}) -> V;
+get_memval(binary, #memory{binary = V}) -> V;
+get_memval(code, #memory{code = V}) -> V;
+get_memval(ets, #memory{ets = V}) -> V;
+get_memval(low, #memory{low = V}) -> V;
+get_memval(maximum, #memory{maximum = V}) -> V;
+get_memval(_, #memory{}) -> 0.
+
+memory_is_supported() ->
+ {_, _, FeatureList, _} = erlang:system_info(allocator),
+ case ((erlang:system_info(alloc_util_allocators)
+ -- ?CARRIER_ALLOCS)
+ -- FeatureList) of
+ [] -> true;
+ _ -> false
+ end.
+
+get_blocks_size([{blocks_size, Sz, _, _} | Rest], Acc) ->
+ get_blocks_size(Rest, Acc+Sz);
+get_blocks_size([{_, _, _, _} | Rest], Acc) ->
+ get_blocks_size(Rest, Acc);
+get_blocks_size([], Acc) ->
+ Acc.
+
+blocks_size([{Carriers, SizeList} | Rest], Acc) when Carriers == mbcs;
+ Carriers == sbcs;
+ Carriers == sbmbcs ->
+ blocks_size(Rest, get_blocks_size(SizeList, Acc));
+blocks_size([_ | Rest], Acc) ->
+ blocks_size(Rest, Acc);
+blocks_size([], Acc) ->
+ Acc.
+
+get_fix_proc([{ProcType, A1, U1}| Rest], {A0, U0}) when ProcType == proc;
+ ProcType == monitor_sh;
+ ProcType == nlink_sh;
+ ProcType == msg_ref ->
+ get_fix_proc(Rest, {A0+A1, U0+U1});
+get_fix_proc([_|Rest], Acc) ->
+ get_fix_proc(Rest, Acc);
+get_fix_proc([], Acc) ->
+ Acc.
+
+fix_proc([{fix_types, SizeList} | _Rest], Acc) ->
+ get_fix_proc(SizeList, Acc);
+fix_proc([_ | Rest], Acc) ->
+ fix_proc(Rest, Acc);
+fix_proc([], Acc) ->
+ Acc.
+
+is_low_alloc(_A, []) ->
+ false;
+is_low_alloc(A, [A|_As]) ->
+ true;
+is_low_alloc(A, [_A|As]) ->
+ is_low_alloc(A, As).
+
+is_low_alloc(A) ->
+ is_low_alloc(A, ?LOW_ALLOCS).
+
+au_mem_data(notsup, _) ->
+ notsup;
+au_mem_data(_, [{_, false} | _]) ->
+ notsup;
+au_mem_data(#memory{total = Tot,
+ processes = Proc,
+ processes_used = ProcU} = Mem,
+ [{eheap_alloc, _, Data} | Rest]) ->
+ Sz = blocks_size(Data, 0),
+ au_mem_data(Mem#memory{total = Tot+Sz,
+ processes = Proc+Sz,
+ processes_used = ProcU+Sz},
+ Rest);
+au_mem_data(#memory{total = Tot,
+ system = Sys,
+ ets = Ets} = Mem,
+ [{ets_alloc, _, Data} | Rest]) ->
+ Sz = blocks_size(Data, 0),
+ au_mem_data(Mem#memory{total = Tot+Sz,
+ system = Sys+Sz,
+ ets = Ets+Sz},
+ Rest);
+au_mem_data(#memory{total = Tot,
+ system = Sys,
+ binary = Bin} = Mem,
+ [{binary_alloc, _, Data} | Rest]) ->
+ Sz = blocks_size(Data, 0),
+ au_mem_data(Mem#memory{total = Tot+Sz,
+ system = Sys+Sz,
+ binary = Bin+Sz},
+ Rest);
+au_mem_data(#memory{total = Tot,
+ processes = Proc,
+ processes_used = ProcU,
+ system = Sys} = Mem,
+ [{fix_alloc, _, Data} | Rest]) ->
+ {A, U} = fix_proc(Data, {0, 0}),
+ Sz = blocks_size(Data, 0),
+ au_mem_data(Mem#memory{total = Tot+Sz,
+ processes = Proc+A,
+ processes_used = ProcU+U,
+ system = Sys+Sz-A},
+ Rest);
+au_mem_data(#memory{total = Tot,
+ system = Sys,
+ low = Low} = Mem,
+ [{A, _, Data} | Rest]) ->
+ Sz = blocks_size(Data, 0),
+ au_mem_data(Mem#memory{total = Tot+Sz,
+ system = Sys+Sz,
+ low = case is_low_alloc(A) of
+ true -> Low+Sz;
+ false -> Low
+ end},
+ Rest);
+au_mem_data(EMD, []) ->
+ EMD.
+
+au_mem_data(Allocs) ->
+ Ref = make_ref(),
+ erlang:system_info({allocator_sizes, Ref, Allocs}),
+ receive_emd(Ref).
+
+receive_emd(_Ref, EMD, 0) ->
+ EMD;
+receive_emd(Ref, EMD, N) ->
+ receive
+ {Ref, _, Data} ->
+ receive_emd(Ref, au_mem_data(EMD, Data), N-1)
+ end.
+
+receive_emd(Ref) ->
+ receive_emd(Ref, #memory{}, erlang:system_info(schedulers)).
+
+aa_mem_data(notsup, _) ->
+ notsup;
+aa_mem_data(#memory{} = Mem,
+ [{maximum, Max} | Rest]) ->
+ aa_mem_data(Mem#memory{maximum = Max},
+ Rest);
+aa_mem_data(#memory{} = Mem,
+ [{total, Tot} | Rest]) ->
+ aa_mem_data(Mem#memory{total = Tot,
+ system = 0}, % system will be adjusted later
+ Rest);
+aa_mem_data(#memory{atom = Atom,
+ atom_used = AtomU} = Mem,
+ [{atom_space, Alloced, Used} | Rest]) ->
+ aa_mem_data(Mem#memory{atom = Atom+Alloced,
+ atom_used = AtomU+Used},
+ Rest);
+aa_mem_data(#memory{atom = Atom,
+ atom_used = AtomU} = Mem,
+ [{atom_table, Sz} | Rest]) ->
+ aa_mem_data(Mem#memory{atom = Atom+Sz,
+ atom_used = AtomU+Sz},
+ Rest);
+aa_mem_data(#memory{ets = Ets} = Mem,
+ [{ets_misc, Sz} | Rest]) ->
+ aa_mem_data(Mem#memory{ets = Ets+Sz},
+ Rest);
+aa_mem_data(#memory{processes = Proc,
+ processes_used = ProcU,
+ system = Sys} = Mem,
+ [{ProcData, Sz} | Rest]) when ProcData == bif_timer;
+ ProcData == link_lh;
+ ProcData == process_table ->
+ aa_mem_data(Mem#memory{processes = Proc+Sz,
+ processes_used = ProcU+Sz,
+ system = Sys-Sz},
+ Rest);
+aa_mem_data(#memory{code = Code} = Mem,
+ [{CodeData, Sz} | Rest]) when CodeData == module_table;
+ CodeData == export_table;
+ CodeData == export_list;
+ CodeData == fun_table;
+ CodeData == module_refs;
+ CodeData == loaded_code ->
+ aa_mem_data(Mem#memory{code = Code+Sz},
+ Rest);
+aa_mem_data(EMD, [{_, _} | Rest]) ->
+ aa_mem_data(EMD, Rest);
+aa_mem_data(#memory{total = Tot,
+ processes = Proc,
+ system = Sys} = Mem,
+ []) when Sys =< 0 ->
+ %% Instrumented runtime system -> Sys = Tot - Proc
+ Mem#memory{system = Tot - Proc};
+aa_mem_data(EMD, []) ->
+ EMD.
+
+aa_mem_data(notsup) ->
+ notsup;
+aa_mem_data(EMD) ->
+ aa_mem_data(EMD, erlang:system_info(allocated_areas)).
+
+%%
+%% alloc_info/1 and alloc_sizes/1 are for internal use only (used by
+%% erlang:system_info({allocator|allocator_sizes, _})).
+%%
+
+alloc_info(Allocs) ->
+ get_alloc_info(allocator, Allocs).
+
+alloc_sizes(Allocs) ->
+ get_alloc_info(allocator_sizes, Allocs).
+
+get_alloc_info(Type, AAtom) when is_atom(AAtom) ->
+ [{AAtom, Result}] = get_alloc_info(Type, [AAtom]),
+ Result;
+get_alloc_info(Type, AList) when is_list(AList) ->
+ Ref = make_ref(),
+ erlang:system_info({Type, Ref, AList}),
+ receive_allocator(Ref,
+ erlang:system_info(schedulers),
+ mk_res_list(AList)).
+
+mk_res_list([]) ->
+ [];
+mk_res_list([Alloc | Rest]) ->
+ [{Alloc, []} | mk_res_list(Rest)].
+
+insert_instance(I, N, []) ->
+ [{instance, N, I}];
+insert_instance(I, N, [{instance, M, _}|_] = Rest) when N < M ->
+ [{instance, N, I} | Rest];
+insert_instance(I, N, [Prev|Rest]) ->
+ [Prev | insert_instance(I, N, Rest)].
+
+insert_info([], Ys) ->
+ Ys;
+insert_info([{A, false}|Xs], [{A, _IList}|Ys]) ->
+ insert_info(Xs, [{A, false}|Ys]);
+insert_info([{A, N, I}|Xs], [{A, IList}|Ys]) ->
+ insert_info(Xs, [{A, insert_instance(I, N, IList)}|Ys]);
+insert_info([{A1, _}|_] = Xs, [{A2, _} = Y | Ys]) when A1 /= A2 ->
+ [Y | insert_info(Xs, Ys)];
+insert_info([{A1, _, _}|_] = Xs, [{A2, _} = Y | Ys]) when A1 /= A2 ->
+ [Y | insert_info(Xs, Ys)].
+
+receive_allocator(_Ref, 0, Acc) ->
+ Acc;
+receive_allocator(Ref, N, Acc) ->
+ receive
+ {Ref, _, InfoList} ->
+ receive_allocator(Ref, N-1, insert_info(InfoList, Acc))
+ end.
diff --git a/erts/preloaded/src/init.erl b/erts/preloaded/src/init.erl
index e52c813029..c9c434dea0 100644
--- a/erts/preloaded/src/init.erl
+++ b/erts/preloaded/src/init.erl
@@ -345,7 +345,7 @@ notify(Pids) ->
lists:foreach(fun(Pid) -> Pid ! {init,started} end, Pids).
%% Garbage collect all info about initially loaded modules.
-%% This information is temporary stored until the code_server
+%% This information is temporarily stored until the code_server
%% is started.
%% We force the garbage collection as the init process holds
%% this information during the initialisation of the system and
diff --git a/erts/preloaded/src/prim_file.erl b/erts/preloaded/src/prim_file.erl
index b18a57bfef..30b7a5246a 100644
--- a/erts/preloaded/src/prim_file.erl
+++ b/erts/preloaded/src/prim_file.erl
@@ -375,7 +375,7 @@ read(#file_descriptor{module = ?MODULE, data = {Port, _}}, Size)
{ok, Data};
{error, enomem} ->
%% Garbage collecting here might help if
- %% the current processes has some old binaries left.
+ %% the current processes have some old binaries left.
erlang:garbage_collect(),
case drv_command(Port, <<?FILE_READ, Size:64>>) of
{ok, {0, _Data}} when Size =/= 0 ->
@@ -825,7 +825,7 @@ list_dir_int(Port, Dir) ->
%% Opens a driver port and converts any problems into {error, emfile}.
-%% Returns {ok, Port} when succesful.
+%% Returns {ok, Port} when successful.
drv_open(Driver, Portopts) ->
try erlang:open_port({spawn, Driver}, Portopts) of
@@ -945,7 +945,7 @@ append([I | Is], R) when is_list(R) -> append(Is, [I | R]);
append([], R) -> R.
-%% Converts a list of mode atoms into an mode word for the driver.
+%% Converts a list of mode atoms into a mode word for the driver.
%% Returns {Mode, Portopts, Setopts} where Portopts is a list of
%% options for erlang:open_port/2 and Setopts is a list of
%% setopt commands to send to the port, or error Reason upon failure.
diff --git a/erts/preloaded/src/prim_inet.erl b/erts/preloaded/src/prim_inet.erl
index 8f2e845b4f..f144f73d68 100644
--- a/erts/preloaded/src/prim_inet.erl
+++ b/erts/preloaded/src/prim_inet.erl
@@ -25,8 +25,8 @@
%% Primitive inet_drv interface
--export([open/1, open/2, fdopen/2, fdopen/3, close/1]).
--export([bind/3, listen/1, listen/2]).
+-export([open/3, fdopen/4, close/1]).
+-export([bind/3, listen/1, listen/2, peeloff/2]).
-export([connect/3, connect/4, async_connect/4]).
-export([accept/1, accept/2, async_accept/2]).
-export([shutdown/2]).
@@ -56,58 +56,46 @@
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%
-%% OPEN(tcp | udp | sctp, inet | inet6) ->
+%% OPEN(tcp | udp | sctp, inet | inet6, stream | dgram | seqpacket) ->
%% {ok, insock()} |
%% {error, Reason}
%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-open(Protocol) -> open1(Protocol, ?INET_AF_INET).
+open(Protocol, Family, Type) ->
+ open(Protocol, Family, Type, ?INET_REQ_OPEN, []).
-open(Protocol, inet) -> open1(Protocol, ?INET_AF_INET);
-open(Protocol, inet6) -> open1(Protocol, ?INET_AF_INET6);
-open(_, _) -> {error, einval}.
+fdopen(Protocol, Family, Type, Fd) when is_integer(Fd) ->
+ open(Protocol, Family, Type, ?INET_REQ_FDOPEN, ?int32(Fd)).
-fdopen(Protocol, Fd) -> fdopen1(Protocol, ?INET_AF_INET, Fd).
-
-fdopen(Protocol, Fd, inet) -> fdopen1(Protocol, ?INET_AF_INET, Fd);
-fdopen(Protocol, Fd, inet6) -> fdopen1(Protocol, ?INET_AF_INET6, Fd);
-fdopen(_, _, _) -> {error, einval}.
-
-open1(Protocol, Family) ->
- case open0(Protocol) of
- {ok, S} ->
- case ctl_cmd(S, ?INET_REQ_OPEN, [Family]) of
- {ok, _} ->
- {ok,S};
- Error ->
- close(S), Error
- end;
- Error -> Error
+open(Protocol, Family, Type, Req, Data) ->
+ Drv = protocol2drv(Protocol),
+ AF = enc_family(Family),
+ T = enc_type(Type),
+ try erlang:open_port({spawn_driver,Drv}, [binary]) of
+ S ->
+ case ctl_cmd(S, Req, [AF,T,Data]) of
+ {ok,_} -> {ok,S};
+ {error,_}=Error ->
+ close(S),
+ Error
+ end
+ catch
+ %% The only (?) way to get here is to try to open
+ %% the sctp driver when it does not exist
+ error:badarg -> {error,eprotonosupport}
end.
-fdopen1(Protocol, Family, Fd) when is_integer(Fd) ->
- case open0(Protocol) of
- {ok, S} ->
- case ctl_cmd(S,?INET_REQ_FDOPEN,[Family,?int32(Fd)]) of
- {ok, _} -> {ok,S};
- Error -> close(S), Error
- end;
- Error -> Error
- end.
+enc_family(inet) -> ?INET_AF_INET;
+enc_family(inet6) -> ?INET_AF_INET6.
-open0(Protocol) ->
- try erlang:open_port({spawn_driver,protocol2drv(Protocol)}, [binary]) of
- Port -> {ok,Port}
- catch
- error:Reason -> {error,Reason}
- end.
+enc_type(stream) -> ?INET_TYPE_STREAM;
+enc_type(dgram) -> ?INET_TYPE_DGRAM;
+enc_type(seqpacket) -> ?INET_TYPE_SEQPACKET.
protocol2drv(tcp) -> "tcp_inet";
protocol2drv(udp) -> "udp_inet";
-protocol2drv(sctp) -> "sctp_inet";
-protocol2drv(_) ->
- erlang:error(eprotonosupport).
+protocol2drv(sctp) -> "sctp_inet".
drv2protocol("tcp_inet") -> tcp;
drv2protocol("udp_inet") -> udp;
@@ -139,7 +127,7 @@ shutdown_1(S, How) ->
shutdown_2(S, How) ->
case ctl_cmd(S, ?TCP_REQ_SHUTDOWN, [How]) of
{ok, []} -> ok;
- Error -> Error
+ {error,_}=Error -> Error
end.
shutdown_pend_loop(S, N0) ->
@@ -195,7 +183,7 @@ close_pend_loop(S, N) ->
bind(S,IP,Port) when is_port(S), is_integer(Port), Port >= 0, Port =< 65535 ->
case ctl_cmd(S,?INET_REQ_BIND,[?int16(Port),ip_to_bytes(IP)]) of
{ok, [P1,P0]} -> {ok, ?u16(P1, P0)};
- Error -> Error
+ {error,_}=Error -> Error
end;
%% Multi-homed "bind": sctp_bindx(). The Op is 'add' or 'remove'.
@@ -222,7 +210,7 @@ bindx(S, AddFlag, Addrs) ->
{IP, Port} <- Addrs]],
case ctl_cmd(S, ?SCTP_REQ_BINDX, Args) of
{ok,_} -> {ok, S};
- Error -> Error
+ {error,_}=Error -> Error
end;
_ -> {error, einval}
end.
@@ -265,7 +253,7 @@ async_connect(S, IP, Port, Time) ->
case ctl_cmd(S, ?INET_REQ_CONNECT,
[enc_time(Time),?int16(Port),ip_to_bytes(IP)]) of
{ok, [R1,R0]} -> {ok, S, ?u16(R1,R0)};
- Error -> Error
+ {error,_}=Error -> Error
end.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
@@ -318,9 +306,9 @@ accept_opts(L, S) ->
end.
async_accept(L, Time) ->
- case ctl_cmd(L,?TCP_REQ_ACCEPT, [enc_time(Time)]) of
+ case ctl_cmd(L,?INET_REQ_ACCEPT, [enc_time(Time)]) of
{ok, [R1,R0]} -> {ok, ?u16(R1,R0)};
- Error -> Error
+ {error,_}=Error -> Error
end.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
@@ -334,16 +322,30 @@ async_accept(L, Time) ->
%% listening) is also accepted:
listen(S) -> listen(S, ?LISTEN_BACKLOG).
-
+
+listen(S, true) -> listen(S, ?LISTEN_BACKLOG);
+listen(S, false) -> listen(S, 0);
listen(S, BackLog) when is_port(S), is_integer(BackLog) ->
- case ctl_cmd(S, ?TCP_REQ_LISTEN, [?int16(BackLog)]) of
+ case ctl_cmd(S, ?INET_REQ_LISTEN, [?int16(BackLog)]) of
{ok, _} -> ok;
- Error -> Error
- end;
-listen(S, Flag) when is_port(S), is_boolean(Flag) ->
- case ctl_cmd(S, ?SCTP_REQ_LISTEN, enc_value(set, bool8, Flag)) of
- {ok,_} -> ok;
- Error -> Error
+ {error,_}=Error -> Error
+ end.
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%%
+%% PEELOFF(insock(), AssocId) -> {ok,outsock()} | {error, Reason}
+%%
+%% SCTP: Peel off one association into a type stream socket
+%%
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+peeloff(S, AssocId) ->
+ case ctl_cmd(S, ?SCTP_REQ_PEELOFF, [?int32(AssocId)]) of
+ inet_reply ->
+ receive
+ {inet_reply,S,Res} -> Res
+ end;
+ {error,_}=Error -> Error
end.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
@@ -395,12 +397,12 @@ sendto(S, IP, Port, Data) when is_port(S), Port >= 0, Port =< 65535 ->
true ->
receive
{inet_reply,S,Reply} ->
- ?DBG_FORMAT("prim_inet:send() -> ~p~n", [Reply]),
+ ?DBG_FORMAT("prim_inet:sendto() -> ~p~n", [Reply]),
Reply
end
catch
error:_ ->
- ?DBG_FORMAT("prim_inet:send() -> {error,einval}~n", []),
+ ?DBG_FORMAT("prim_inet:sendto() -> {error,einval}~n", []),
{error,einval}
end.
@@ -455,7 +457,7 @@ recv0(S, Length, Time) when is_port(S), is_integer(Length), Length >= 0 ->
async_recv(S, Length, Time) ->
case ctl_cmd(S, ?TCP_REQ_RECV, [enc_time(Time), ?int32(Length)]) of
{ok,[R1,R0]} -> {ok, ?u16(R1,R0)};
- Error -> Error
+ {error,_}=Error -> Error
end.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
@@ -501,7 +503,7 @@ recvfrom0(S, Length, Time)
{inet_async, S, Ref, Error={error, _}} ->
Error
end;
- Error ->
+ {error,_}=Error ->
Error % Front-end error
end;
recvfrom0(_, _, _) -> {error,einval}.
@@ -517,18 +519,18 @@ peername(S) when is_port(S) ->
{ok, [F, P1,P0 | Addr]} ->
{IP, _} = get_ip(F, Addr),
{ok, { IP, ?u16(P1, P0) }};
- Error -> Error
+ {error,_}=Error -> Error
end.
setpeername(S, {IP,Port}) when is_port(S) ->
case ctl_cmd(S, ?INET_REQ_SETPEER, [?int16(Port),ip_to_bytes(IP)]) of
{ok,[]} -> ok;
- Error -> Error
+ {error,_}=Error -> Error
end;
setpeername(S, undefined) when is_port(S) ->
case ctl_cmd(S, ?INET_REQ_SETPEER, []) of
{ok,[]} -> ok;
- Error -> Error
+ {error,_}=Error -> Error
end.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
@@ -542,18 +544,18 @@ sockname(S) when is_port(S) ->
{ok, [F, P1, P0 | Addr]} ->
{IP, _} = get_ip(F, Addr),
{ok, { IP, ?u16(P1, P0) }};
- Error -> Error
+ {error,_}=Error -> Error
end.
setsockname(S, {IP,Port}) when is_port(S) ->
case ctl_cmd(S, ?INET_REQ_SETNAME, [?int16(Port),ip_to_bytes(IP)]) of
{ok,[]} -> ok;
- Error -> Error
+ {error,_}=Error -> Error
end;
setsockname(S, undefined) when is_port(S) ->
case ctl_cmd(S, ?INET_REQ_SETNAME, []) of
{ok,[]} -> ok;
- Error -> Error
+ {error,_}=Error -> Error
end.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
@@ -573,7 +575,7 @@ setopts(S, Opts) when is_port(S) ->
{ok, Buf} ->
case ctl_cmd(S, ?INET_REQ_SETOPTS, Buf) of
{ok, _} -> ok;
- Error -> Error
+ {error,_}=Error -> Error
end;
Error -> Error
end.
@@ -599,12 +601,12 @@ getopts(S, Opts) when is_port(S), is_list(Opts) ->
{ok,Rep} ->
%% Non-SCTP: "Rep" contains the encoded option vals:
decode_opt_val(Rep);
- {error,sctp_reply} ->
+ inet_reply ->
%% SCTP: Need to receive the full value:
receive
{inet_reply,S,Res} -> Res
end;
- Error -> Error
+ {error,_}=Error -> Error
end;
Error -> Error
end.
@@ -733,7 +735,7 @@ getifaddrs_ifget(S, IFs, IF, FlagsVals, Opts) ->
getiflist(S) when is_port(S) ->
case ctl_cmd(S, ?INET_REQ_GETIFLIST, []) of
{ok, Data} -> {ok, build_iflist(Data)};
- Error -> Error
+ {error,_}=Error -> Error
end.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
@@ -751,7 +753,7 @@ ifget(S, Name, Opts) ->
{ok, Buf2} ->
case ctl_cmd(S, ?INET_REQ_IFGET, [Buf1,Buf2]) of
{ok, Data} -> decode_ifopts(Data,[]);
- Error -> Error
+ {error,_}=Error -> Error
end;
Error -> Error
end;
@@ -773,7 +775,7 @@ ifset(S, Name, Opts) ->
{ok, Buf2} ->
case ctl_cmd(S, ?INET_REQ_IFSET, [Buf1,Buf2]) of
{ok, _} -> ok;
- Error -> Error
+ {error,_}=Error -> Error
end;
Error -> Error
end;
@@ -801,7 +803,7 @@ subscribe(S, Sub) when is_port(S), is_list(Sub) ->
{ok, Bytes} ->
case ctl_cmd(S, ?INET_REQ_SUBSCRIBE, Bytes) of
{ok, Data} -> decode_subs(Data);
- Error -> Error
+ {error,_}=Error -> Error
end;
Error -> Error
end.
@@ -819,7 +821,7 @@ getstat(S, Stats) when is_port(S), is_list(Stats) ->
{ok, Bytes} ->
case ctl_cmd(S, ?INET_REQ_GETSTAT, Bytes) of
{ok, Data} -> decode_stats(Data);
- Error -> Error
+ {error,_}=Error -> Error
end;
Error -> Error
end.
@@ -835,7 +837,7 @@ getstat(S, Stats) when is_port(S), is_list(Stats) ->
getfd(S) when is_port(S) ->
case ctl_cmd(S, ?INET_REQ_GETFD, []) of
{ok, [S3,S2,S1,S0]} -> {ok, ?u32(S3,S2,S1,S0)};
- Error -> Error
+ {error,_}=Error -> Error
end.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
@@ -873,7 +875,7 @@ gettype(S) when is_port(S) ->
_ -> undefined
end,
{ok, {Family, Type}};
- Error -> Error
+ {error,_}=Error -> Error
end.
getprotocol(S) when is_port(S) ->
@@ -901,7 +903,7 @@ getstatus(S) when is_port(S) ->
case ctl_cmd(S, ?INET_REQ_GETSTATUS, []) of
{ok, [S3,S2,S1,S0]} ->
{ok, dec_status(?u32(S3,S2,S1,S0))};
- Error -> Error
+ {error,_}=Error -> Error
end.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
@@ -943,7 +945,7 @@ getservbyname1(S,Name,Proto) ->
case ctl_cmd(S, ?INET_REQ_GETSERVBYNAME, [L1,Name,L2,Proto]) of
{ok, [P1,P0]} ->
{ok, ?u16(P1,P0)};
- Error ->
+ {error,_}=Error ->
Error
end
end.
@@ -971,7 +973,7 @@ getservbyport1(S,Port,Proto) ->
true ->
case ctl_cmd(S, ?INET_REQ_GETSERVBYPORT, [?int16(Port),L,Proto]) of
{ok, Name} -> {ok, Name};
- Error -> Error
+ {error,_}=Error -> Error
end
end.
@@ -985,7 +987,7 @@ getservbyport1(S,Port,Proto) ->
unrecv(S, Data) ->
case ctl_cmd(S, ?TCP_REQ_UNRECV, Data) of
{ok, _} -> ok;
- Error -> Error
+ {error,_}=Error -> Error
end.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
@@ -2163,7 +2165,7 @@ ctl_cmd(Port, Cmd, Args) ->
Result =
try erlang:port_control(Port, Cmd, Args) of
[?INET_REP_OK|Reply] -> {ok,Reply};
- [?INET_REP_SCTP] -> {error,sctp_reply};
+ [?INET_REP] -> inet_reply;
[?INET_REP_ERROR|Err] -> {error,list_to_atom(Err)}
catch
error:_ -> {error,einval}
diff --git a/erts/preloaded/src/prim_zip.erl b/erts/preloaded/src/prim_zip.erl
index 6a9856fdad..392a9feb45 100644
--- a/erts/preloaded/src/prim_zip.erl
+++ b/erts/preloaded/src/prim_zip.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 2008-2010. All Rights Reserved.
+%% Copyright Ericsson AB 2008-2011. All Rights Reserved.
%%
%% The contents of this file are subject to the Erlang Public License,
%% Version 1.1, (the "License"); you may not use this file except in
@@ -21,7 +21,7 @@
-module(prim_zip).
-%% unzipping piecemal
+%% unzipping piecemeal
-export([
open/1,
open/3,
diff --git a/erts/preloaded/src/zlib.erl b/erts/preloaded/src/zlib.erl
index 6cc7b27114..210532edac 100644
--- a/erts/preloaded/src/zlib.erl
+++ b/erts/preloaded/src/zlib.erl
@@ -173,7 +173,7 @@ deflateInit(Z, Level, Method, WindowBits, MemLevel, Strategy) ->
-spec deflateSetDictionary(Z, Dictionary) -> Adler32 when
Z :: zstream(),
- Dictionary :: binary(),
+ Dictionary :: iodata(),
Adler32 :: integer().
deflateSetDictionary(Z, Dictionary) ->
call(Z, ?DEFLATE_SETDICT, Dictionary).
@@ -232,7 +232,7 @@ inflateInit(Z, WindowBits) ->
-spec inflateSetDictionary(Z, Dictionary) -> 'ok' when
Z :: zstream(),
- Dictionary :: binary().
+ Dictionary :: iodata().
inflateSetDictionary(Z, Dictionary) ->
call(Z, ?INFLATE_SETDICT, Dictionary).
@@ -283,38 +283,36 @@ getBufSize(Z) ->
crc32(Z) ->
call(Z, ?CRC32_0, []).
--spec crc32(Z, Binary) -> CRC when
+-spec crc32(Z, Data) -> CRC when
Z :: zstream(),
- Binary :: binary(),
+ Data :: iodata(),
CRC :: integer().
-crc32(Z, Binary) ->
- call(Z, ?CRC32_1, Binary).
+crc32(Z, Data) ->
+ call(Z, ?CRC32_1, Data).
--spec crc32(Z, PrevCRC, Binary) -> CRC when
+-spec crc32(Z, PrevCRC, Data) -> CRC when
Z :: zstream(),
PrevCRC :: integer(),
- Binary :: binary(),
+ Data :: iodata(),
CRC :: integer().
-crc32(Z, CRC, Binary) when is_binary(Binary), is_integer(CRC) ->
- call(Z, ?CRC32_2, <<CRC:32, Binary/binary>>);
-crc32(_Z, _CRC, _Binary) ->
- erlang:error(badarg).
+crc32(Z, CRC, Data) ->
+ call(Z, ?CRC32_2, [<<CRC:32>>, Data]).
--spec adler32(Z, Binary) -> CheckSum when
+-spec adler32(Z, Data) -> CheckSum when
Z :: zstream(),
- Binary :: binary(),
+ Data :: iodata(),
CheckSum :: integer().
-adler32(Z, Binary) ->
- call(Z, ?ADLER32_1, Binary).
+adler32(Z, Data) ->
+ call(Z, ?ADLER32_1, Data).
--spec adler32(Z, PrevAdler, Binary) -> CheckSum when
+-spec adler32(Z, PrevAdler, Data) -> CheckSum when
Z :: zstream(),
PrevAdler :: integer(),
- Binary :: binary(),
+ Data :: iodata(),
CheckSum :: integer().
-adler32(Z, Adler, Binary) when is_binary(Binary), is_integer(Adler) ->
- call(Z, ?ADLER32_2, <<Adler:32, Binary/binary>>);
-adler32(_Z, _Adler, _Binary) ->
+adler32(Z, Adler, Data) when is_integer(Adler) ->
+ call(Z, ?ADLER32_2, [<<Adler:32>>, Data]);
+adler32(_Z, _Adler, _Data) ->
erlang:error(badarg).
-spec crc32_combine(Z, CRC1, CRC2, Size2) -> CRC when
@@ -346,76 +344,83 @@ getQSize(Z) ->
call(Z, ?GET_QSIZE, []).
%% compress/uncompress zlib with header
--spec compress(Binary) -> Compressed when
- Binary :: binary(),
+-spec compress(Data) -> Compressed when
+ Data :: iodata(),
Compressed :: binary().
-compress(Binary) ->
+compress(Data) ->
Z = open(),
deflateInit(Z, default),
- Bs = deflate(Z, Binary,finish),
+ Bs = deflate(Z, Data, finish),
deflateEnd(Z),
close(Z),
- list_to_binary(Bs).
+ iolist_to_binary(Bs).
--spec uncompress(Binary) -> Decompressed when
- Binary :: binary(),
+-spec uncompress(Data) -> Decompressed when
+ Data :: iodata(),
Decompressed :: binary().
-uncompress(Binary) when byte_size(Binary) >= 8 ->
- Z = open(),
- inflateInit(Z),
- Bs = inflate(Z, Binary),
- inflateEnd(Z),
- close(Z),
- list_to_binary(Bs);
-uncompress(Binary) when is_binary(Binary) -> erlang:error(data_error);
-uncompress(_) -> erlang:error(badarg).
+uncompress(Data) ->
+ try iolist_size(Data) of
+ Size ->
+ if
+ Size >= 8 ->
+ Z = open(),
+ inflateInit(Z),
+ Bs = inflate(Z, Data),
+ inflateEnd(Z),
+ close(Z),
+ iolist_to_binary(Bs);
+ true ->
+ erlang:error(data_error)
+ end
+ catch
+ _:_ ->
+ erlang:error(badarg)
+ end.
%% unzip/zip zlib without header (zip members)
--spec zip(Binary) -> Compressed when
- Binary :: binary(),
+-spec zip(Data) -> Compressed when
+ Data :: iodata(),
Compressed :: binary().
-zip(Binary) ->
+zip(Data) ->
Z = open(),
deflateInit(Z, default, deflated, -?MAX_WBITS, 8, default),
- Bs = deflate(Z, Binary, finish),
+ Bs = deflate(Z, Data, finish),
deflateEnd(Z),
close(Z),
- list_to_binary(Bs).
+ iolist_to_binary(Bs).
--spec unzip(Binary) -> Decompressed when
- Binary :: binary(),
+-spec unzip(Data) -> Decompressed when
+ Data :: iodata(),
Decompressed :: binary().
-unzip(Binary) ->
+unzip(Data) ->
Z = open(),
inflateInit(Z, -?MAX_WBITS),
- Bs = inflate(Z, Binary),
+ Bs = inflate(Z, Data),
inflateEnd(Z),
close(Z),
- list_to_binary(Bs).
+ iolist_to_binary(Bs).
-spec gzip(Data) -> Compressed when
Data :: iodata(),
Compressed :: binary().
-gzip(Data) when is_binary(Data); is_list(Data) ->
+gzip(Data) ->
Z = open(),
deflateInit(Z, default, deflated, 16+?MAX_WBITS, 8, default),
Bs = deflate(Z, Data, finish),
deflateEnd(Z),
close(Z),
- iolist_to_binary(Bs);
-gzip(_) -> erlang:error(badarg).
+ iolist_to_binary(Bs).
--spec gunzip(Binary) -> Decompressed when
- Binary :: binary(),
+-spec gunzip(Data) -> Decompressed when
+ Data :: iodata(),
Decompressed :: binary().
-gunzip(Data) when is_binary(Data); is_list(Data) ->
+gunzip(Data) ->
Z = open(),
inflateInit(Z, 16+?MAX_WBITS),
Bs = inflate(Z, Data),
inflateEnd(Z),
close(Z),
- iolist_to_binary(Bs);
-gunzip(_) -> erlang:error(badarg).
+ iolist_to_binary(Bs).
-spec collect(zstream()) -> iolist().
collect(Z) ->
diff --git a/erts/test/autoimport_SUITE.erl b/erts/test/autoimport_SUITE.erl
index 0e4708e046..71ed5204b1 100644
--- a/erts/test/autoimport_SUITE.erl
+++ b/erts/test/autoimport_SUITE.erl
@@ -87,10 +87,21 @@ autoimports(Config) when is_list(Config) ->
xml(XMLFile) ->
{ok,File} = file:open(XMLFile,[read]),
+ xskip_to_funcs(file:read_line(File),File),
DocData = xloop(file:read_line(File),File),
+ true = DocData =/= [],
file:close(File),
analyze(DocData).
+%% Skip lines up to and including the <funcs> tag.
+xskip_to_funcs({ok,Line},File) ->
+ case re:run(Line,"\\<funcs\\>",[{capture,none}]) of
+ nomatch ->
+ xskip_to_funcs(file:read_line(File),File);
+ match ->
+ ok
+ end.
+
xloop({ok,Line},File) ->
case re:run(Line,"\\<name\\>",[{capture,none}]) of
nomatch ->
diff --git a/erts/test/erlc_SUITE.erl b/erts/test/erlc_SUITE.erl
index 62e0e6813d..a9e28672e3 100644
--- a/erts/test/erlc_SUITE.erl
+++ b/erts/test/erlc_SUITE.erl
@@ -79,7 +79,7 @@ compile_erl(Config) when is_list(Config) ->
?line run(Config, Cmd, FileName, "-Werror",
["compile: warnings being treated as errors\$",
- "Warning: function foo/0 is unused\$",
+ "function foo/0 is unused\$",
"_ERROR_"]),
%% Check a bad file.
@@ -213,13 +213,34 @@ deep_cwd_1(PrivDir) ->
arg_overflow(Config) when is_list(Config) ->
?line {SrcDir, _OutDir, Cmd} = get_cmd(Config),
?line FileName = filename:join(SrcDir, "erl_test_ok.erl"),
- ?line Args = lists:flatten([ ["-D", integer_to_list(N), "=1 "] ||
- N <- lists:seq(1,10000) ]),
+ %% Each -D option will be expanded to three arguments when
+ %% invoking 'erl'.
+ ?line NumDOptions = num_d_options(),
+ ?line Args = lists:flatten([ ["-D", integer_to_list(N, 36), "=1 "] ||
+ N <- lists:seq(1, NumDOptions) ]),
?line run(Config, Cmd, FileName, Args,
["Warning: function foo/0 is unused\$",
"_OK_"]),
ok.
+num_d_options() ->
+ case {os:type(),os:version()} of
+ {{win32,_},_} ->
+ %% The maximum size of a command line in the command
+ %% shell on Windows is 8191 characters.
+ %% Each -D option is expanded to "@dv NN 1", i.e.
+ %% 8 characters. (Numbers up to 1295 can be expressed
+ %% as two 36-base digits.)
+ 1000;
+ {{unix,linux},Version} when Version < {2,6,23} ->
+ %% On some older 64-bit versions of Linux, the maximum number
+ %% of arguments is 16383.
+ %% See: http://www.in-ulm.de/~mascheck/various/argmax/
+ 5440;
+ {_,_} ->
+ 12000
+ end.
+
erlc() ->
case os:find_executable("erlc") of
false ->
diff --git a/erts/test/nt_SUITE.erl b/erts/test/nt_SUITE.erl
index 7d6da28ad6..f9bd15a0ce 100644
--- a/erts/test/nt_SUITE.erl
+++ b/erts/test/nt_SUITE.erl
@@ -490,12 +490,12 @@ middleman(Waitfor) ->
match_event(_X, []) ->
nomatch;
match_event({Time,Cat,Fac,Sev,Mes},[{Pid,Ref,{Cat,Fac,Sev,MesRE}} | Tail]) ->
- case regexp:match(Mes,MesRE) of
- {match,_,_} ->
+ case re:run(Mes,MesRE,[{capture,none}]) of
+ match ->
%%io:format("Match!~n"),
{ok,{Pid,Ref,Time,Mes},Tail};
- _Z ->
- %%io:format("No match (~p)~n",[_Z]),
+ nomatch ->
+ %%io:format("No match~n"),
case match_event({Time,Cat,Fac,Sev,Mes},Tail) of
{ok,X,Rest} ->
{ok,X,[{Pid,Ref,{Cat,Fac,Sev,MesRE}} | Rest]};
diff --git a/erts/test/z_SUITE.erl b/erts/test/z_SUITE.erl
index 8fceab32a6..482ecb8fba 100644
--- a/erts/test/z_SUITE.erl
+++ b/erts/test/z_SUITE.erl
@@ -166,9 +166,12 @@ core_search_conf(RunByTS, DBTop, XDir) ->
file_inspect(#core_search_conf{file = File}, Core) ->
FRes0 = os:cmd(File ++ " " ++ Core),
- FRes = case regexp:match(FRes0, Core) of
- {match, S, E} ->
+ FRes = case string:str(FRes0, Core) of
+ 0 ->
+ FRes0;
+ S ->
L = length(FRes0),
+ E = length(Core),
case S of
1 ->
lists:sublist(FRes0, E+1, L+1);
@@ -178,19 +181,13 @@ file_inspect(#core_search_conf{file = File}, Core) ->
" "
++
lists:sublist(FRes0, E+1, L+1)
- end;
- _ -> FRes0
+ end
end,
- case regexp:match(FRes, "[Tt][Ee][Xx][Tt]") of
+ case re:run(FRes, "text|ascii", [caseless,{capture,none}]) of
+ match ->
+ not_a_core;
nomatch ->
- case regexp:match(FRes, "[Aa][Ss][Cc][Ii][Ii]") of
- nomatch ->
- probably_a_core;
- _ ->
- not_a_core
- end;
- _ ->
- not_a_core
+ probably_a_core
end.
mk_readable(F) ->