aboutsummaryrefslogtreecommitdiffstats
path: root/erts
diff options
context:
space:
mode:
Diffstat (limited to 'erts')
-rw-r--r--erts/Makefile.in13
-rw-r--r--erts/aclocal.m4308
-rwxr-xr-xerts/autoconf/win32.config.cache.static (renamed from erts/autoconf/win32.config.cache)0
-rw-r--r--erts/configure.in151
-rw-r--r--erts/doc/src/Makefile11
-rw-r--r--erts/doc/src/alt_dist.xml8
-rw-r--r--erts/doc/src/driver.xml2
-rw-r--r--erts/doc/src/erl.xml58
-rw-r--r--erts/doc/src/erl_dist_protocol.xml2
-rw-r--r--erts/doc/src/erl_driver.xml2
-rw-r--r--erts/doc/src/erl_nif.xml494
-rw-r--r--erts/doc/src/erlang.xml319
-rw-r--r--erts/doc/src/escript.xml217
-rw-r--r--erts/doc/src/match_spec.xml54
-rw-r--r--erts/doc/src/notes.xml494
-rw-r--r--erts/emulator/Makefile.in166
-rw-r--r--erts/emulator/beam/atom.c10
-rw-r--r--erts/emulator/beam/atom.names12
-rw-r--r--erts/emulator/beam/beam_bif_load.c82
-rw-r--r--erts/emulator/beam/beam_bp.c969
-rw-r--r--erts/emulator/beam/beam_bp.h209
-rw-r--r--erts/emulator/beam/beam_catches.c18
-rw-r--r--erts/emulator/beam/beam_catches.h16
-rw-r--r--erts/emulator/beam/beam_debug.c154
-rw-r--r--erts/emulator/beam/beam_emu.c815
-rw-r--r--erts/emulator/beam/beam_load.c535
-rw-r--r--erts/emulator/beam/beam_load.h22
-rw-r--r--erts/emulator/beam/bif.c99
-rw-r--r--erts/emulator/beam/bif.h31
-rw-r--r--erts/emulator/beam/bif.tab39
-rw-r--r--erts/emulator/beam/big.c168
-rw-r--r--erts/emulator/beam/big.h28
-rw-r--r--erts/emulator/beam/binary.c81
-rw-r--r--erts/emulator/beam/break.c52
-rw-r--r--erts/emulator/beam/copy.c208
-rw-r--r--erts/emulator/beam/decl.h55
-rw-r--r--erts/emulator/beam/dist.c199
-rw-r--r--erts/emulator/beam/dist.h11
-rw-r--r--erts/emulator/beam/elib_malloc.c2334
-rw-r--r--erts/emulator/beam/elib_stat.h45
-rw-r--r--erts/emulator/beam/erl_alloc.c313
-rw-r--r--erts/emulator/beam/erl_alloc.h18
-rw-r--r--erts/emulator/beam/erl_alloc.types23
-rw-r--r--erts/emulator/beam/erl_alloc_util.c175
-rw-r--r--erts/emulator/beam/erl_alloc_util.h60
-rw-r--r--erts/emulator/beam/erl_arith.c107
-rw-r--r--erts/emulator/beam/erl_async.c12
-rw-r--r--erts/emulator/beam/erl_bif_binary.c2931
-rw-r--r--erts/emulator/beam/erl_bif_chksum.c14
-rw-r--r--erts/emulator/beam/erl_bif_ddll.c5
-rw-r--r--erts/emulator/beam/erl_bif_guard.c37
-rw-r--r--erts/emulator/beam/erl_bif_info.c267
-rw-r--r--erts/emulator/beam/erl_bif_lists.c15
-rw-r--r--erts/emulator/beam/erl_bif_op.c12
-rw-r--r--erts/emulator/beam/erl_bif_port.c48
-rw-r--r--erts/emulator/beam/erl_bif_re.c8
-rw-r--r--erts/emulator/beam/erl_bif_timer.c22
-rw-r--r--erts/emulator/beam/erl_bif_trace.c168
-rw-r--r--erts/emulator/beam/erl_binary.h27
-rw-r--r--erts/emulator/beam/erl_bits.c26
-rw-r--r--erts/emulator/beam/erl_bits.h12
-rw-r--r--erts/emulator/beam/erl_db.c454
-rw-r--r--erts/emulator/beam/erl_db_hash.c36
-rw-r--r--erts/emulator/beam/erl_db_tree.c43
-rw-r--r--erts/emulator/beam/erl_db_util.c383
-rw-r--r--erts/emulator/beam/erl_db_util.h23
-rw-r--r--erts/emulator/beam/erl_debug.c38
-rw-r--r--erts/emulator/beam/erl_debug.h18
-rw-r--r--erts/emulator/beam/erl_driver.h9
-rw-r--r--erts/emulator/beam/erl_drv_thread.c92
-rw-r--r--erts/emulator/beam/erl_fun.c40
-rw-r--r--erts/emulator/beam/erl_fun.h22
-rw-r--r--erts/emulator/beam/erl_gc.c603
-rw-r--r--erts/emulator/beam/erl_gc.h39
-rw-r--r--erts/emulator/beam/erl_goodfit_alloc.c58
-rw-r--r--erts/emulator/beam/erl_goodfit_alloc.h30
-rw-r--r--erts/emulator/beam/erl_init.c201
-rw-r--r--erts/emulator/beam/erl_instrument.c84
-rw-r--r--erts/emulator/beam/erl_lock_check.c114
-rw-r--r--erts/emulator/beam/erl_lock_check.h1
-rw-r--r--erts/emulator/beam/erl_lock_count.c58
-rw-r--r--erts/emulator/beam/erl_lock_count.h15
-rw-r--r--erts/emulator/beam/erl_message.c286
-rw-r--r--erts/emulator/beam/erl_message.h52
-rw-r--r--erts/emulator/beam/erl_mtrace.c38
-rw-r--r--erts/emulator/beam/erl_nif.c565
-rw-r--r--erts/emulator/beam/erl_nif.h79
-rw-r--r--erts/emulator/beam/erl_nif_api_funcs.h87
-rw-r--r--erts/emulator/beam/erl_node_container_utils.h12
-rw-r--r--erts/emulator/beam/erl_node_tables.c138
-rw-r--r--erts/emulator/beam/erl_node_tables.h12
-rw-r--r--erts/emulator/beam/erl_obsolete.c186
-rw-r--r--erts/emulator/beam/erl_port_task.c15
-rw-r--r--erts/emulator/beam/erl_printf_term.c16
-rw-r--r--erts/emulator/beam/erl_process.c2100
-rw-r--r--erts/emulator/beam/erl_process.h154
-rw-r--r--erts/emulator/beam/erl_process_dump.c32
-rw-r--r--erts/emulator/beam/erl_process_lock.c350
-rw-r--r--erts/emulator/beam/erl_process_lock.h46
-rw-r--r--erts/emulator/beam/erl_smp.h222
-rw-r--r--erts/emulator/beam/erl_term.c28
-rw-r--r--erts/emulator/beam/erl_term.h98
-rw-r--r--erts/emulator/beam/erl_threads.h527
-rw-r--r--erts/emulator/beam/erl_time_sup.c20
-rw-r--r--erts/emulator/beam/erl_trace.c277
-rw-r--r--erts/emulator/beam/erl_unicode.c30
-rw-r--r--erts/emulator/beam/erl_vm.h17
-rw-r--r--erts/emulator/beam/error.h16
-rw-r--r--erts/emulator/beam/export.c28
-rw-r--r--erts/emulator/beam/export.h16
-rw-r--r--erts/emulator/beam/external.c312
-rw-r--r--erts/emulator/beam/external.h4
-rw-r--r--erts/emulator/beam/global.h199
-rw-r--r--erts/emulator/beam/io.c290
-rw-r--r--erts/emulator/beam/module.h4
-rw-r--r--erts/emulator/beam/ops.tab78
-rw-r--r--erts/emulator/beam/packet_parser.c2
-rw-r--r--erts/emulator/beam/register.c44
-rw-r--r--erts/emulator/beam/sys.h158
-rw-r--r--erts/emulator/beam/utils.c219
-rw-r--r--erts/emulator/drivers/common/efile_drv.c85
-rw-r--r--erts/emulator/drivers/common/erl_efile.h16
-rw-r--r--erts/emulator/drivers/common/inet_drv.c169
-rw-r--r--erts/emulator/drivers/common/ram_file_drv.c17
-rw-r--r--erts/emulator/drivers/common/zlib_drv.c12
-rw-r--r--erts/emulator/drivers/unix/mem_drv.c145
-rw-r--r--erts/emulator/drivers/unix/ttsl_drv.c6
-rw-r--r--erts/emulator/drivers/unix/unix_efile.c109
-rw-r--r--erts/emulator/drivers/win32/mem_drv.c141
-rw-r--r--erts/emulator/drivers/win32/win_efile.c34
-rw-r--r--erts/emulator/hipe/hipe_bif0.c5
-rw-r--r--erts/emulator/hipe/hipe_gc.c18
-rw-r--r--erts/emulator/hipe/hipe_mkliterals.c4
-rw-r--r--erts/emulator/obsolete/driver.h263
-rw-r--r--erts/emulator/pcre/pcre_compile.c9
-rw-r--r--erts/emulator/pcre/pcre_exec.c1
-rw-r--r--erts/emulator/sys/common/erl_mseg.c457
-rw-r--r--erts/emulator/sys/common/erl_mseg.h14
-rw-r--r--erts/emulator/sys/common/erl_poll.c98
-rw-r--r--erts/emulator/sys/unix/erl9_start.c130
-rw-r--r--erts/emulator/sys/unix/sys.c45
-rw-r--r--erts/emulator/sys/unix/sys_float.c17
-rw-r--r--erts/emulator/sys/vxworks/sys.c18
-rw-r--r--erts/emulator/sys/win32/erl_poll.c65
-rw-r--r--erts/emulator/sys/win32/sys.c184
-rw-r--r--erts/emulator/sys/win32/sys_env.c16
-rw-r--r--erts/emulator/test/Makefile18
-rw-r--r--erts/emulator/test/beam_SUITE.erl35
-rw-r--r--erts/emulator/test/bif_SUITE.erl92
-rw-r--r--erts/emulator/test/binary_SUITE.erl42
-rw-r--r--erts/emulator/test/busy_port_SUITE.erl15
-rw-r--r--erts/emulator/test/decode_packet_SUITE.erl39
-rw-r--r--erts/emulator/test/distribution_SUITE.erl11
-rw-r--r--erts/emulator/test/driver_SUITE_data/chkio_drv.c2
-rw-r--r--erts/emulator/test/driver_SUITE_data/io_ready_exit_drv.c2
-rw-r--r--erts/emulator/test/driver_SUITE_data/ioq_exit_drv.c2
-rw-r--r--erts/emulator/test/driver_SUITE_data/missing_callback_drv.c2
-rw-r--r--erts/emulator/test/driver_SUITE_data/peek_non_existing_queue_drv.c2
-rw-r--r--erts/emulator/test/float_SUITE.erl29
-rw-r--r--erts/emulator/test/fun_r12_SUITE.erl (renamed from erts/emulator/test/fun_r11_SUITE.erl)34
-rw-r--r--erts/emulator/test/guard_SUITE.erl179
-rw-r--r--erts/emulator/test/hash_SUITE.erl8
-rw-r--r--erts/emulator/test/nif_SUITE.erl415
-rw-r--r--erts/emulator/test/nif_SUITE_data/nif_SUITE.c895
-rw-r--r--erts/emulator/test/nif_SUITE_data/nif_mod.c98
-rw-r--r--erts/emulator/test/nif_SUITE_data/nif_mod.h12
-rw-r--r--erts/emulator/test/obsolete_SUITE.erl123
-rw-r--r--erts/emulator/test/obsolete_SUITE_data/Makefile.src33
-rw-r--r--erts/emulator/test/obsolete_SUITE_data/erl_threads.c302
-rw-r--r--erts/emulator/test/obsolete_SUITE_data/testcase_driver.c262
-rw-r--r--erts/emulator/test/obsolete_SUITE_data/testcase_driver.h57
-rw-r--r--erts/emulator/test/port_SUITE.erl25
-rw-r--r--erts/emulator/test/receive_SUITE.erl113
-rw-r--r--erts/emulator/test/scheduler_SUITE.erl372
-rw-r--r--erts/emulator/test/send_term_SUITE.erl67
-rw-r--r--erts/emulator/test/send_term_SUITE_data/send_term_drv.c218
-rw-r--r--erts/emulator/test/system_info_SUITE.erl24
-rw-r--r--erts/emulator/test/time_SUITE.erl37
-rw-r--r--erts/emulator/test/trace_call_time_SUITE.erl614
-rw-r--r--erts/emulator/test/trace_call_time_SUITE_data/Makefile.src6
-rw-r--r--erts/emulator/test/trace_call_time_SUITE_data/trace_nif.c37
-rwxr-xr-xerts/emulator/utils/beam_makeops22
-rwxr-xr-xerts/emulator/utils/make_tables20
-rw-r--r--erts/epmd/src/epmd.c113
-rw-r--r--erts/epmd/src/epmd_cli.c36
-rw-r--r--erts/epmd/src/epmd_int.h44
-rw-r--r--erts/epmd/src/epmd_srv.c44
-rw-r--r--erts/etc/common/Makefile.in22
-rw-r--r--erts/etc/common/erlexec.c30
-rw-r--r--erts/etc/common/inet_gethost.c18
-rw-r--r--erts/etc/common/run_test.c514
-rw-r--r--erts/etc/unix/Install.src1
-rw-r--r--erts/etc/unix/cerl.src20
-rw-r--r--erts/etc/win32/Install.c14
-rwxr-xr-xerts/etc/win32/cygwin_tools/vc/ld.sh5
-rwxr-xr-xerts/etc/win32/cygwin_tools/vc/rc.sh12
-rw-r--r--erts/include/erl_int_sizes_config.h.in13
-rw-r--r--erts/include/internal/erl_misc_utils.h9
-rw-r--r--erts/include/internal/ethr_internal.h67
-rw-r--r--erts/include/internal/ethr_mutex.h510
-rw-r--r--erts/include/internal/ethr_optimized_fallbacks.h185
-rw-r--r--erts/include/internal/ethread.h1699
-rw-r--r--erts/include/internal/ethread_header_config.h.in80
-rw-r--r--erts/include/internal/gcc/ethr_atomic.h183
-rw-r--r--erts/include/internal/gcc/ethread.h30
-rw-r--r--erts/include/internal/i386/atomic.h43
-rw-r--r--erts/include/internal/i386/ethread.h3
-rw-r--r--erts/include/internal/i386/rwlock.h12
-rw-r--r--erts/include/internal/i386/spinlock.h14
-rw-r--r--erts/include/internal/libatomic_ops/ethr_atomic.h292
-rw-r--r--erts/include/internal/libatomic_ops/ethread.h30
-rw-r--r--erts/include/internal/ppc32/atomic.h33
-rw-r--r--erts/include/internal/ppc32/ethread.h3
-rw-r--r--erts/include/internal/ppc32/rwlock.h12
-rw-r--r--erts/include/internal/ppc32/spinlock.h12
-rw-r--r--erts/include/internal/pthread/ethr_event.h151
-rw-r--r--erts/include/internal/sparc32/atomic.h53
-rw-r--r--erts/include/internal/sparc32/ethread.h3
-rw-r--r--erts/include/internal/sparc32/rwlock.h12
-rw-r--r--erts/include/internal/sparc32/spinlock.h12
-rw-r--r--erts/include/internal/tile/atomic.h69
-rw-r--r--erts/include/internal/win/ethr_atomic.h244
-rw-r--r--erts/include/internal/win/ethr_event.h62
-rw-r--r--erts/include/internal/win/ethread.h31
-rw-r--r--erts/lib_src/Makefile.in24
-rw-r--r--erts/lib_src/common/erl_misc_utils.c768
-rw-r--r--erts/lib_src/common/ethr_aux.c762
-rw-r--r--erts/lib_src/common/ethr_cbf.c36
-rw-r--r--erts/lib_src/common/ethr_mutex.c2758
-rw-r--r--erts/lib_src/common/ethread.c3346
-rw-r--r--erts/lib_src/pthread/ethr_event.c219
-rw-r--r--erts/lib_src/pthread/ethread.c477
-rw-r--r--erts/lib_src/win/ethr_event.c120
-rw-r--r--erts/lib_src/win/ethread.c625
-rw-r--r--erts/preloaded/ebin/erl_prim_loader.beambin50808 -> 50392 bytes
-rw-r--r--erts/preloaded/ebin/erlang.beambin23800 -> 24324 bytes
-rw-r--r--erts/preloaded/ebin/init.beambin44488 -> 44352 bytes
-rw-r--r--erts/preloaded/ebin/otp_ring0.beambin1436 -> 1436 bytes
-rw-r--r--erts/preloaded/ebin/prim_file.beambin29480 -> 30544 bytes
-rw-r--r--erts/preloaded/ebin/prim_inet.beambin57308 -> 57272 bytes
-rw-r--r--erts/preloaded/ebin/prim_zip.beambin21756 -> 22440 bytes
-rw-r--r--erts/preloaded/ebin/zlib.beambin10612 -> 10620 bytes
-rw-r--r--erts/preloaded/src/erl_prim_loader.erl5
-rw-r--r--erts/preloaded/src/erlang.erl13
-rw-r--r--erts/preloaded/src/prim_file.erl57
-rw-r--r--erts/preloaded/src/prim_zip.erl89
-rw-r--r--erts/test/erlexec_SUITE_data/erlexec_tests.c12
-rw-r--r--erts/test/ethread_SUITE.erl54
-rw-r--r--erts/test/ethread_SUITE_data/ethread_tests.c1198
-rw-r--r--erts/vsn.mk4
250 files changed, 27404 insertions, 16154 deletions
diff --git a/erts/Makefile.in b/erts/Makefile.in
index fabf86db7c..dc8edcd928 100644
--- a/erts/Makefile.in
+++ b/erts/Makefile.in
@@ -1,19 +1,19 @@
#
# %CopyrightBegin%
-#
-# Copyright Ericsson AB 2006-2009. All Rights Reserved.
-#
+#
+# Copyright Ericsson AB 2006-2010. All Rights Reserved.
+#
# The contents of this file are subject to the Erlang Public License,
# Version 1.1, (the "License"); you may not use this file except in
# compliance with the License. You should have received a copy of the
# Erlang Public License along with this software. If not, it can be
# retrieved online at http://www.erlang.org/.
-#
+#
# Software distributed under the License is distributed on an "AS IS"
# basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
# the License for the specific language governing rights and limitations
# under the License.
-#
+#
# %CopyrightEnd%
#
include $(ERL_TOP)/make/target.mk
@@ -92,10 +92,12 @@ local_setup:
$(ERL_TOP)/bin/escript $(ERL_TOP)/bin/escript.exe \
$(ERL_TOP)/bin/dialyzer $(ERL_TOP)/bin/dialyzer.exe \
$(ERL_TOP)/bin/typer $(ERL_TOP)/bin/typer.exe \
+ $(ERL_TOP)/bin/run_test $(ERL_TOP)/bin/run_test.exe \
$(ERL_TOP)/bin/start*.boot $(ERL_TOP)/bin/start*.script
@if [ "X$(TARGET)" = "Xwin32" ]; then \
cp $(ERL_TOP)/bin/$(TARGET)/dialyzer.exe $(ERL_TOP)/bin/dialyzer.exe; \
cp $(ERL_TOP)/bin/$(TARGET)/typer.exe $(ERL_TOP)/bin/typer.exe; \
+ cp $(ERL_TOP)/bin/$(TARGET)/run_test.exe $(ERL_TOP)/bin/run_test.exe; \
cp $(ERL_TOP)/bin/$(TARGET)/erlc.exe $(ERL_TOP)/bin/erlc.exe; \
cp $(ERL_TOP)/bin/$(TARGET)/erl.exe $(ERL_TOP)/bin/erl.exe; \
cp $(ERL_TOP)/bin/$(TARGET)/werl.exe $(ERL_TOP)/bin/werl.exe; \
@@ -115,6 +117,7 @@ local_setup:
$(ERL_TOP)/erts/etc/unix/cerl.src > $(ERL_TOP)/bin/cerl; \
cp $(ERL_TOP)/bin/$(TARGET)/dialyzer $(ERL_TOP)/bin/dialyzer; \
cp $(ERL_TOP)/bin/$(TARGET)/typer $(ERL_TOP)/bin/typer; \
+ cp $(ERL_TOP)/bin/$(TARGET)/run_test $(ERL_TOP)/bin/run_test; \
cp $(ERL_TOP)/bin/$(TARGET)/erlc $(ERL_TOP)/bin/erlc; \
cp $(ERL_TOP)/bin/$(TARGET)/escript $(ERL_TOP)/bin/escript; \
chmod 755 $(ERL_TOP)/bin/erl $(ERL_TOP)/bin/erlc \
diff --git a/erts/aclocal.m4 b/erts/aclocal.m4
index 3d935b7295..3b1edd7605 100644
--- a/erts/aclocal.m4
+++ b/erts/aclocal.m4
@@ -512,6 +512,8 @@ dnl
AC_DEFUN(LM_CHECK_THR_LIB,
[
+NEED_NPTL_PTHREAD_H=no
+
dnl win32?
AC_MSG_CHECKING([for native win32 threads])
if test "X$host_os" = "Xwin32"; then
@@ -519,11 +521,13 @@ if test "X$host_os" = "Xwin32"; then
THR_DEFS="-DWIN32_THREADS"
THR_LIBS=
THR_LIB_NAME=win32_threads
+ THR_LIB_TYPE=win32_threads
else
AC_MSG_RESULT(no)
THR_DEFS=
THR_LIBS=
THR_LIB_NAME=
+ THR_LIB_TYPE=posix_unknown
dnl Try to find POSIX threads
@@ -584,8 +588,11 @@ dnl On ofs1 the '-pthread' switch should be used
AC_MSG_WARN([result yes guessed because of cross compilation])
fi
if test $nptl = yes; then
+ THR_LIB_TYPE=posix_nptl
need_nptl_incldir=no
- AC_CHECK_HEADER(nptl/pthread.h, need_nptl_incldir=yes)
+ AC_CHECK_HEADER(nptl/pthread.h,
+ [need_nptl_incldir=yes
+ NEED_NPTL_PTHREAD_H=yes])
if test $need_nptl_incldir = yes; then
# Ahh...
nptl_path="$C_INCLUDE_PATH:$CPATH"
@@ -649,6 +656,19 @@ fi
])
+AC_DEFUN(ERL_INTERNAL_LIBS,
+[
+
+ERTS_INTERNAL_X_LIBS=
+
+AC_CHECK_LIB(kstat, kstat_open,
+[AC_DEFINE(HAVE_KSTAT, 1, [Define if you have kstat])
+ERTS_INTERNAL_X_LIBS="$ERTS_INTERNAL_X_LIBS -lkstat"])
+
+AC_SUBST(ERTS_INTERNAL_X_LIBS)
+
+])
+
dnl ----------------------------------------------------------------------
dnl
dnl ERL_FIND_ETHR_LIB
@@ -672,10 +692,14 @@ AC_DEFUN(ERL_FIND_ETHR_LIB,
[
LM_CHECK_THR_LIB
+ERL_INTERNAL_LIBS
+ethr_have_native_atomics=no
+ethr_have_native_spinlock=no
ETHR_THR_LIB_BASE="$THR_LIB_NAME"
+ETHR_THR_LIB_BASE_TYPE="$THR_LIB_TYPE"
ETHR_DEFS="$THR_DEFS"
-ETHR_X_LIBS="$THR_LIBS"
+ETHR_X_LIBS="$THR_LIBS $ERTS_INTERNAL_X_LIBS"
ETHR_LIBS=
ETHR_LIB_NAME=
@@ -687,6 +711,7 @@ ethr_lib_name=ethread
case "$THR_LIB_NAME" in
win32_threads)
+ ETHR_THR_LIB_BASE_DIR=win
# * _WIN32_WINNT >= 0x0400 is needed for
# TryEnterCriticalSection
# * _WIN32_WINNT >= 0x0403 is needed for
@@ -712,10 +737,13 @@ case "$THR_LIB_NAME" in
if test $found_win32_winnt = no; then
AC_MSG_ERROR([-D_WIN32_WINNT missing in CPPFLAGS])
fi
+ ethr_have_native_atomics=yes
+ ethr_have_native_spinlock=yes
AC_DEFINE(ETHR_WIN32_THREADS, 1, [Define if you have win32 threads])
;;
pthread)
+ ETHR_THR_LIB_BASE_DIR=pthread
AC_DEFINE(ETHR_PTHREADS, 1, [Define if you have pthreads])
case $host_os in
openbsd*)
@@ -771,9 +799,7 @@ case "$THR_LIB_NAME" in
if test $usable_sigaltstack = no; then
ETHR_DEFS="$ETHR_DEFS -DETHR_UNUSABLE_SIGALTSTACK"
fi
-
- AC_DEFINE(ETHR_INIT_MUTEX_IN_CHILD_AT_FORK, 1, \
-[Define if mutexes should be reinitialized (instead of unlocked) in child at fork.]) ;;
+ ;;
*) ;;
esac
@@ -799,6 +825,15 @@ case "$THR_LIB_NAME" in
AC_DEFINE(ETHR_HAVE_MIT_PTHREAD_H, 1, \
[Define if the pthread.h header file is in pthread/mit directory.]))
+ if test $NEED_NPTL_PTHREAD_H = yes; then
+ AC_DEFINE(ETHR_NEED_NPTL_PTHREAD_H, 1, \
+[Define if you need the <nptl/pthread.h> header file.])
+ fi
+
+ AC_CHECK_HEADER(sched.h, \
+ AC_DEFINE(ETHR_HAVE_SCHED_H, 1, \
+[Define if you have the <sched.h> header file.]))
+
AC_CHECK_HEADER(sys/time.h, \
AC_DEFINE(ETHR_HAVE_SYS_TIME_H, 1, \
[Define if you have the <sys/time.h> header file.]))
@@ -814,39 +849,212 @@ case "$THR_LIB_NAME" in
dnl Check for functions
dnl
- AC_CHECK_FUNC(pthread_atfork, \
- AC_DEFINE(ETHR_HAVE_PTHREAD_ATFORK, 1, \
-[Define if you have the pthread_atfork function.]))
- AC_CHECK_FUNC(pthread_mutexattr_settype, \
- AC_DEFINE(ETHR_HAVE_PTHREAD_MUTEXATTR_SETTYPE, 1, \
-[Define if you have the pthread_mutexattr_settype function.]))
- AC_CHECK_FUNC(pthread_mutexattr_setkind_np, \
- AC_DEFINE(ETHR_HAVE_PTHREAD_MUTEXATTR_SETKIND_NP, 1, \
-[Define if you have the pthread_mutexattr_setkind_np function.]))
AC_CHECK_FUNC(pthread_spin_lock, \
- AC_DEFINE(ETHR_HAVE_PTHREAD_SPIN_LOCK, 1, \
-[Define if you have the pthread_spin_lock function.]))
- test "$force_linux_pthread_rwlocks" = "yes" || {
- force_linux_pthread_rwlocks=no
- }
- case "$force_linux_pthread_rwlocks-$host_os" in
- no-linux*) # Writers may get starved
- # TODO: write a test that tests the implementation
- ;;
- *)
- AC_CHECK_FUNC(pthread_rwlock_init, \
- AC_DEFINE(ETHR_HAVE_PTHREAD_RWLOCK_INIT, 1, \
-[Define if you have a pthread_rwlock implementation that can be used.]))
- ;;
- esac
+ [ethr_have_native_spinlock=yes \
+ AC_DEFINE(ETHR_HAVE_PTHREAD_SPIN_LOCK, 1, \
+[Define if you have the pthread_spin_lock function.])])
+
+ have_sched_yield=no
+ have_librt_sched_yield=no
+ AC_CHECK_FUNC(sched_yield, [have_sched_yield=yes])
+ if test $have_sched_yield = no; then
+ AC_CHECK_LIB(rt, sched_yield,
+ [have_librt_sched_yield=yes
+ ETHR_X_LIBS="$ETHR_X_LIBS -lrt"])
+ fi
+ if test $have_sched_yield = yes || test $have_librt_sched_yield = yes; then
+ AC_DEFINE(ETHR_HAVE_SCHED_YIELD, 1, [Define if you have the sched_yield() function.])
+ AC_MSG_CHECKING([whether sched_yield() returns an int])
+ sched_yield_ret_int=no
+ AC_TRY_COMPILE([
+ #ifdef ETHR_HAVE_SCHED_H
+ #include <sched.h>
+ #endif
+ ],
+ [int sched_yield();],
+ [sched_yield_ret_int=yes])
+ AC_MSG_RESULT([$sched_yield_ret_int])
+ if test $sched_yield_ret_int = yes; then
+ AC_DEFINE(ETHR_SCHED_YIELD_RET_INT, 1, [Define if sched_yield() returns an int.])
+ fi
+ fi
+
+ have_pthread_yield=no
+ AC_CHECK_FUNC(pthread_yield, [have_pthread_yield=yes])
+ if test $have_pthread_yield = yes; then
+ AC_DEFINE(ETHR_HAVE_PTHREAD_YIELD, 1, [Define if you have the pthread_yield() function.])
+ AC_MSG_CHECKING([whether pthread_yield() returns an int])
+ pthread_yield_ret_int=no
+ AC_TRY_COMPILE([
+ #if defined(ETHR_NEED_NPTL_PTHREAD_H)
+ #include <nptl/pthread.h>
+ #elif defined(ETHR_HAVE_MIT_PTHREAD_H)
+ #include <pthread/mit/pthread.h>
+ #elif defined(ETHR_HAVE_PTHREAD_H)
+ #include <pthread.h>
+ #endif
+ ],
+ [int pthread_yield();],
+ [pthread_yield_ret_int=yes])
+ AC_MSG_RESULT([$pthread_yield_ret_int])
+ if test $pthread_yield_ret_int = yes; then
+ AC_DEFINE(ETHR_PTHREAD_YIELD_RET_INT, 1, [Define if pthread_yield() returns an int.])
+ fi
+ fi
+
+ have_pthread_rwlock_init=no
+ AC_CHECK_FUNC(pthread_rwlock_init, [have_pthread_rwlock_init=yes])
+ if test $have_pthread_rwlock_init = yes; then
+
+ ethr_have_pthread_rwlockattr_setkind_np=no
+ AC_CHECK_FUNC(pthread_rwlockattr_setkind_np,
+ [ethr_have_pthread_rwlockattr_setkind_np=yes])
+
+ if test $ethr_have_pthread_rwlockattr_setkind_np = yes; then
+ AC_DEFINE(ETHR_HAVE_PTHREAD_RWLOCKATTR_SETKIND_NP, 1, \
+[Define if you have the pthread_rwlockattr_setkind_np() function.])
+
+ AC_MSG_CHECKING([for PTHREAD_RWLOCK_PREFER_WRITER_NONRECURSIVE_NP])
+ ethr_pthread_rwlock_writer_nonrecursive_initializer_np=no
+ AC_TRY_LINK([
+ #if defined(ETHR_NEED_NPTL_PTHREAD_H)
+ #include <nptl/pthread.h>
+ #elif defined(ETHR_HAVE_MIT_PTHREAD_H)
+ #include <pthread/mit/pthread.h>
+ #elif defined(ETHR_HAVE_PTHREAD_H)
+ #include <pthread.h>
+ #endif
+ ],
+ [
+ pthread_rwlockattr_t *attr;
+ return pthread_rwlockattr_setkind_np(attr,
+ PTHREAD_RWLOCK_PREFER_WRITER_NONRECURSIVE_NP);
+ ],
+ [ethr_pthread_rwlock_writer_nonrecursive_initializer_np=yes])
+ AC_MSG_RESULT([$ethr_pthread_rwlock_writer_nonrecursive_initializer_np])
+ if test $ethr_pthread_rwlock_writer_nonrecursive_initializer_np = yes; then
+ AC_DEFINE(ETHR_HAVE_PTHREAD_RWLOCK_PREFER_WRITER_NONRECURSIVE_NP, 1, \
+[Define if you have the PTHREAD_RWLOCK_PREFER_WRITER_NONRECURSIVE_NP rwlock attribute.])
+ fi
+ fi
+ fi
+
+ if test "$force_pthread_rwlocks" = "yes"; then
+
+ AC_DEFINE(ETHR_FORCE_PTHREAD_RWLOCK, 1, \
+[Define if you want to force usage of pthread rwlocks])
+
+ if test $have_pthread_rwlock_init = yes; then
+ AC_MSG_WARN([Forced usage of pthread rwlocks. Note that this implementation may suffer from starvation issues.])
+ else
+ AC_MSG_ERROR([User forced usage of pthread rwlock, but no such implementation was found])
+ fi
+ fi
+
AC_CHECK_FUNC(pthread_attr_setguardsize, \
AC_DEFINE(ETHR_HAVE_PTHREAD_ATTR_SETGUARDSIZE, 1, \
[Define if you have the pthread_attr_setguardsize function.]))
+ linux_futex=no
+ AC_MSG_CHECKING([for Linux futexes])
+ AC_TRY_LINK([
+ #include <sys/syscall.h>
+ #include <unistd.h>
+ #include <linux/futex.h>
+ #include <sys/time.h>
+ ],
+ [
+ int i = 1;
+ syscall(__NR_futex, (void *) &i, FUTEX_WAKE, 1,
+ (void*)0,(void*)0, 0);
+ syscall(__NR_futex, (void *) &i, FUTEX_WAIT, 0,
+ (void*)0,(void*)0, 0);
+ return 0;
+ ],
+ linux_futex=yes)
+ AC_MSG_RESULT([$linux_futex])
+ test $linux_futex = yes && AC_DEFINE(ETHR_HAVE_LINUX_FUTEX, 1, [Define if you have a linux futex implementation.])
+
+ AC_MSG_CHECKING([for GCC atomic operations])
+ ethr_have_gcc_atomic_ops=no
+ AC_TRY_LINK([],
+ [
+ long res;
+ volatile long val;
+ res = __sync_val_compare_and_swap(&val, (long) 1, (long) 0);
+ res = __sync_add_and_fetch(&val, (long) 1);
+ res = __sync_sub_and_fetch(&val, (long) 1);
+ res = __sync_fetch_and_and(&val, (long) 1);
+ res = __sync_fetch_and_or(&val, (long) 1);
+ ],
+ [ethr_have_native_atomics=yes
+ ethr_have_gcc_atomic_ops=yes])
+ AC_MSG_RESULT([$ethr_have_gcc_atomic_ops])
+ test $ethr_have_gcc_atomic_ops = yes && AC_DEFINE(ETHR_HAVE_GCC_ATOMIC_OPS, 1, [Define if you have gcc atomic operations])
+
+ case "$host_cpu" in
+ sun4u | sparc64 | sun4v)
+ ethr_have_native_atomics=yes;;
+ i86pc | i386 | i486 | i586 | i686 | x86_64 | amd64)
+ ethr_have_native_atomics=yes;;
+ macppc | ppc | "Power Macintosh")
+ ethr_have_native_atomics=yes;;
+ tile)
+ ethr_have_native_atomics=yes;;
+ *)
+ ;;
+ esac
+
+ AC_MSG_CHECKING([for a usable libatomic_ops implementation])
+ case "x$with_libatomic_ops" in
+ xno | xyes | x)
+ libatomic_ops_include=
+ ;;
+ *)
+ if test -d "${with_libatomic_ops}/include"; then
+ libatomic_ops_include="-I$with_libatomic_ops/include"
+ CPPFLAGS="$CPPFLAGS $libatomic_ops_include"
+ else
+ AC_MSG_ERROR([libatomic_ops include directory $with_libatomic_ops/include not found])
+ fi;;
+ esac
+ ethr_have_libatomic_ops=no
+ AC_TRY_LINK([#include "atomic_ops.h"],
+ [
+ volatile AO_t x;
+ AO_t y;
+ int z;
+
+ AO_nop_full();
+ AO_store(&x, (AO_t) 0);
+ z = AO_load(&x);
+ z = AO_compare_and_swap(&x, (AO_t) 0, (AO_t) 1);
+ ],
+ [ethr_have_native_atomics=yes
+ ethr_have_libatomic_ops=yes])
+ AC_MSG_RESULT([$ethr_have_libatomic_ops])
+ if test $ethr_have_libatomic_ops = yes; then
+ AC_CHECK_SIZEOF(AO_t, ,
+ [
+ #include <stdio.h>
+ #include "atomic_ops.h"
+ ])
+ AC_DEFINE_UNQUOTED(ETHR_SIZEOF_AO_T, $ac_cv_sizeof_AO_t, [Define to the size of AO_t if libatomic_ops is used])
+
+ AC_DEFINE(ETHR_HAVE_LIBATOMIC_OPS, 1, [Define if you have libatomic_ops atomic operations])
+ if test "x$with_libatomic_ops" != "xno" && test "x$with_libatomic_ops" != "x"; then
+ AC_DEFINE(ETHR_PREFER_LIBATOMIC_OPS_NATIVE_IMPLS, 1, [Define if you prefer libatomic_ops native ethread implementations])
+ fi
+ ETHR_DEFS="$ETHR_DEFS $libatomic_ops_include"
+ elif test "x$with_libatomic_ops" != "xno" && test "x$with_libatomic_ops" != "x"; then
+ AC_MSG_ERROR([No usable libatomic_ops implementation found])
+ fi
+
dnl Restore LIBS
LIBS=$saved_libs
dnl restore CPPFLAGS
CPPFLAGS=$saved_cppflags
+
;;
*)
;;
@@ -862,16 +1070,49 @@ fi
if test "x$ETHR_THR_LIB_BASE" != "x"; then
ETHR_DEFS="-DUSE_THREADS $ETHR_DEFS"
- ETHR_LIBS="-l$ethr_lib_name $ETHR_X_LIBS"
+ ETHR_LIBS="-l$ethr_lib_name -lerts_internal_r $ETHR_X_LIBS"
ETHR_LIB_NAME=$ethr_lib_name
fi
AC_CHECK_SIZEOF(void *)
AC_DEFINE_UNQUOTED(ETHR_SIZEOF_PTR, $ac_cv_sizeof_void_p, [Define to the size of pointers])
-if test "X$disable_native_ethr_impls" = "Xyes"; then
- AC_DEFINE(ETHR_DISABLE_NATIVE_IMPLS, 1, [Define if you want to disable native ethread implementations])
-fi
+AC_ARG_ENABLE(native-ethr-impls,
+ AS_HELP_STRING([--disable-native-ethr-impls],
+ [disable native ethread implementations]),
+[ case "$enableval" in
+ no) disable_native_ethr_impls=yes ;;
+ *) disable_native_ethr_impls=no ;;
+ esac ], disable_native_ethr_impls=no)
+
+test "X$disable_native_ethr_impls" = "Xyes" &&
+ AC_DEFINE(ETHR_DISABLE_NATIVE_IMPLS, 1, [Define if you want to disable native ethread implementations])
+
+AC_ARG_ENABLE(prefer-gcc-native-ethr-impls,
+ AS_HELP_STRING([--enable-prefer-gcc-native-ethr-impls],
+ [enable prefer gcc native ethread implementations]),
+[ case "$enableval" in
+ yes) enable_prefer_gcc_native_ethr_impls=yes ;;
+ *) enable_prefer_gcc_native_ethr_impls=no ;;
+ esac ], enable_prefer_gcc_native_ethr_impls=no)
+
+test $enable_prefer_gcc_native_ethr_impls = yes &&
+ AC_DEFINE(ETHR_PREFER_GCC_NATIVE_IMPLS, 1, [Define if you prefer gcc native ethread implementations])
+
+AC_ARG_ENABLE(ethread-pre-pentium4-compatibility,
+ AS_HELP_STRING([--enable-ethread-pre-pentium4-compatibility],
+ [enable compatibility with x86 processors before pentium 4 (back to 486) in the ethread library]),
+[ case "$enableval" in
+ yes) enable_ethread_pre_pentium4_compatibility=yes ;;
+ *) enable_ethread_pre_pentium4_compatibilit=no ;;
+ esac ], enable_ethread_pre_pentium4_compatibilit=no)
+
+test $enable_ethread_pre_pentium4_compatibilit = yes &&
+ AC_DEFINE(ETHR_PRE_PENTIUM4_COMPAT, 1, [Define if you want compatibilty with x86 processors before pentium4.])
+
+AC_ARG_WITH(libatomic_ops,
+ AS_HELP_STRING([--with-libatomic_ops=PATH],
+ [use libatomic_ops with the ethread library]))
AC_DEFINE(ETHR_HAVE_ETHREAD_DEFINES, 1, \
[Define if you have all ethread defines])
@@ -881,6 +1122,7 @@ AC_SUBST(ETHR_LIBS)
AC_SUBST(ETHR_LIB_NAME)
AC_SUBST(ETHR_DEFS)
AC_SUBST(ETHR_THR_LIB_BASE)
+AC_SUBST(ETHR_THR_LIB_BASE_DIR)
])
diff --git a/erts/autoconf/win32.config.cache b/erts/autoconf/win32.config.cache.static
index 31dfe510cd..31dfe510cd 100755
--- a/erts/autoconf/win32.config.cache
+++ b/erts/autoconf/win32.config.cache.static
diff --git a/erts/configure.in b/erts/configure.in
index 5fa1245b13..6b494ef127 100644
--- a/erts/configure.in
+++ b/erts/configure.in
@@ -61,6 +61,9 @@ if test x"${ERL_TOP}/erts" != x"$srcdir"; then
fi
erl_top=${ERL_TOP}
+# Remove old configuration information
+/bin/rm -f "$ERL_TOP/erts/CONF_INFO"
+
# echo XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
# echo X
# echo "X srcdir = $srcdir"
@@ -87,6 +90,13 @@ else
host_os=$host
fi
+if test "$cross_compiling" = "yes"; then
+ CROSS_COMPILING=yes
+else
+ CROSS_COMPILING=no
+fi
+AC_SUBST(CROSS_COMPILING)
+
ERL_XCOMP_SYSROOT_INIT
AC_ISC_POSIX
@@ -123,6 +133,14 @@ AC_ARG_ENABLE(threads,
*) enable_threads=yes ;;
esac ], enable_threads=unknown)
+AC_ARG_ENABLE(halfword-emulator,
+[ --enable-halfword-emulator enable halfword emulator (only for 64bit builds)
+ --disable-halfword-emulator disable halfword emulator (only for 64bit builds)],
+[ case "$enableval" in
+ no) enable_halfword_emualtor=no ;;
+ *) enable_halfword_emulator=yes ;;
+ esac ], enable_halfword_emulator=unknown)
+
AC_ARG_ENABLE(smp-support,
[ --enable-smp-support enable smp support
--disable-smp-support disable smp support],
@@ -190,9 +208,6 @@ AC_ARG_ENABLE(native-libs,
AC_ARG_ENABLE(tsp,
[ --enable-tsp compile tsp app])
-AC_ARG_ENABLE(elib-malloc,
-[ --enable-elib-malloc use elib_malloc instead of normal malloc])
-
AC_ARG_ENABLE(fp-exceptions,
[ --enable-fp-exceptions Use hardware floating point exceptions (default if hipe enabled)],
[ case "$enableval" in
@@ -269,20 +284,6 @@ AC_ARG_ENABLE(clock-gettime,
*) clock_gettime_correction=yes ;;
esac ], clock_gettime_correction=unknown)
-AC_ARG_ENABLE(native-ethr-impls,
-[ --enable-native-ethr-impls enable native ethread implementations
- --disable-native-ethr-impls disable native ethread implementations],
-[ case "$enableval" in
- no) disable_native_ethr_impls=yes ;;
- *) disable_native_ethr_impls=no ;;
- esac ], disable_native_ethr_impls=no)
-
-dnl Defined in libraries/megaco/configure.in but we need it here
-dnl also in order to show it to the "top user"
-
-AC_ARG_ENABLE(megaco_flex_scanner_lineno,
-[ --disable-megaco-flex-scanner-lineno disable megaco flex scanner lineno])
-
dnl Magic test for clearcase.
OTP_RELEASE=
if test "${ERLANG_COMMERCIAL_BUILD}" != ""; then
@@ -749,6 +750,25 @@ esac
AC_SUBST(LIBCARBON)
+dnl Check if we should/can build a halfword emulator
+
+AC_MSG_CHECKING(if we are building a halfword emulator (32bit heap on 64bit machine))
+if test "$enable_halfword_emulator" = "yes"; then
+ if test "$ARCH" = "amd64"; then
+ AC_DEFINE(HALFWORD_HEAP_EMULATOR, [1],
+ [Define if building a halfword-heap 64bit emulator])
+ AC_MSG_RESULT([yes])
+ else
+ AC_MSG_ERROR(no; halfword emulator not supported on this architecture)
+ fi
+else
+ AC_MSG_RESULT([no])
+fi
+
+
+
+
+
dnl some tests below will call this if we haven't already - and autoconf
dnl can't handle those tests being done conditionally at runtime
AC_PROG_CPP
@@ -794,13 +814,14 @@ fi
AC_CHECK_PROGS(XSLTPROC, xsltproc)
if test -z "$XSLTPROC"; then
echo "xsltproc" >> doc/CONF_INFO
- AC_MSG_WARN([No 'xsltproc' command found: the documentation can not be built])
+ AC_MSG_WARN([No 'xsltproc' command found: the documentation cannot be built])
fi
AC_CHECK_PROGS(FOP, fop)
if test -z "$FOP"; then
+ FOP="$ERL_TOP/make/fakefop"
echo "fop" >> doc/CONF_INFO
- AC_MSG_WARN([No 'fop' command found: the documentation can not be built])
+ AC_MSG_WARN([No 'fop' command found: going to generate placeholder PDF files])
fi
dnl
@@ -1021,11 +1042,48 @@ if test $ERTS_BUILD_SMP_EMU = yes; then
fi
AC_DEFINE(ERTS_HAVE_SMP_EMU, 1, [Define if the smp emulator is built])
+
+ case "$ethr_have_native_atomics-$ethr_have_native_spinlock" in
+ yes-*)
+ ;;
+
+ no-yes)
+
+ test -f "$ERL_TOP/erts/CONF_INFO" ||
+ echo "" > "$ERL_TOP/erts/CONF_INFO"
+ cat >> $ERL_TOP/erts/CONF_INFO <<EOF
+
+ No native atomic implementation available.
+ Fallbacks implemented using spinlocks will be
+ used. Note that the performance of the SMP
+ runtime system will suffer due to this.
+
+EOF
+ ;;
+
+ no-no)
+
+ test -f "$ERL_TOP/erts/CONF_INFO" ||
+ echo "" > "$ERL_TOP/erts/CONF_INFO"
+ cat >> "$ERL_TOP/erts/CONF_INFO" <<EOF
+
+ No native atomic implementation, nor no native
+ spinlock implementation available. Fallbacks
+ implemented using mutexes will be used. Note
+ that the performance of the SMP runtime system
+ will suffer much due to this.
+
+EOF
+ ;;
+
+ esac
+
enable_threads=force
fi
AC_SUBST(ERTS_BUILD_SMP_EMU)
+AC_CHECK_FUNCS([posix_fadvise])
#
@@ -1120,17 +1178,15 @@ else
enable_child_waiter_thread=yes
;;
linux*)
- AC_DEFINE(USE_RECURSIVE_MALLOC_MUTEX,[1],
- [Define if malloc should use a recursive mutex])
AC_MSG_CHECKING([whether dlopen() needs to be called before first call to dlerror()])
- if test "x$ETHR_THR_LIB_BASE_NAME" != "xnptl"; then
+ if test "x$ETHR_THR_LIB_BASE_TYPE" != "xposix_nptl"; then
AC_DEFINE(ERTS_NEED_DLOPEN_BEFORE_DLERROR,[1],
[Define if dlopen() needs to be called before first call to dlerror()])
AC_MSG_RESULT(yes)
else
AC_MSG_RESULT(no)
fi
- if test "x$ETHR_THR_LIB_BASE_NAME" != "xnptl"; then
+ if test "x$ETHR_THR_LIB_BASE_TYPE" != "xposix_nptl"; then
# Child waiter thread cannot be enabled
disable_child_waiter_thread=yes
enable_child_waiter_thread=no
@@ -1192,13 +1248,7 @@ fi
AC_SUBST(EMU_LOCK_CHECKING)
-ERTS_INTERNAL_X_LIBS=
-
-AC_CHECK_LIB(kstat, kstat_open,
-[AC_DEFINE(HAVE_KSTAT, 1, [Define if you have kstat])
-ERTS_INTERNAL_X_LIBS="$ERTS_INTERNAL_X_LIBS -lkstat"])
-
-AC_SUBST(ERTS_INTERNAL_X_LIBS)
+ERL_INTERNAL_LIBS
dnl THR_LIBS and THR_DEFS are only used by odbc
THR_LIBS=$ETHR_X_LIBS
@@ -1666,9 +1716,12 @@ AC_CHECK_FUNCS([getnameinfo getipnodebyname getipnodebyaddr gethostbyname2])
AC_CHECK_FUNCS([ieee_handler fpsetmask finite isnan isinf res_gethostbyname dlopen \
pread pwrite writev memmove strerror strerror_r strncasecmp \
- gethrtime localtime_r gmtime_r mremap memcpy mallopt \
+ gethrtime localtime_r gmtime_r mmap mremap memcpy mallopt \
sbrk _sbrk __sbrk brk _brk __brk \
flockfile fstat strlcpy strlcat setsid posix2time setlocale nl_langinfo poll])
+
+AC_CHECK_DECLS([posix2time],,,[#include <time.h>])
+
if test "X$host" = "Xwin32"; then
ac_cv_func_setvbuf_reversed=yes
fi
@@ -1698,7 +1751,6 @@ if test $disable_vfork = true; then
fi
AC_FUNC_VPRINTF
-AC_FUNC_MMAP
dnl The AC_DEFINEs are necessary for autoheader to work. :-(
dnl for gzio
@@ -1730,6 +1782,12 @@ fi
dnl Need by run_erl.
AC_CHECK_FUNCS([openpty])
+dnl fdatasync syscall (Unix only)
+AC_CHECK_FUNCS([fdatasync])
+
+dnl Find which C libraries are required to use fdatasync
+AC_SEARCH_LIBS(fdatasync, [rt])
+
dnl ----------------------------------------------------------------------
dnl Checks for features/quirks in the system that affects Erlang.
dnl ----------------------------------------------------------------------
@@ -2883,7 +2941,7 @@ case $enable_hybrid_heap-$host_os in
no-*)
AC_MSG_RESULT([no; disabled by user])
ERTS_BUILD_HYBRID_EMU=no;;
- *-win32|*-vxworks|*-ose) # vxworks and ose have their own "configure scripts"...
+ *-win32|*-vxworks) # vxworks have their own "configure scripts"...
AC_MSG_RESULT([no; default on this platform])
ERTS_BUILD_HYBRID_EMU=no;;
*)
@@ -2946,17 +3004,6 @@ if test "x$HIPE_ENABLED" = "xyes" ; then
fi
#
-# Check if we should use elib_malloc.
-#
-
-if test X${enable_elib_malloc} = Xyes; then
- AC_DEFINE(ENABLE_ELIB_MALLOC,[],[Define to enable use of elib_malloc (a malloc() replacement)])
- AC_DEFINE(ELIB_HEAP_SBRK,[],[Elib sbrk])
- AC_DEFINE(ELIB_ALLOC_IS_CLIB,[],[Use elib malloc as clib])
- AC_DEFINE(ELIB_SORTED_BLOCKS,[],[Define to enable the use of sorted blocks when using elib_malloc])
-fi
-
-#
# Check for working poll().
#
AC_MSG_CHECKING([for working poll()])
@@ -3568,10 +3615,10 @@ dnl Should one use EXEEXT or ac_exeext?
if test -f "$erl_xcomp_sysroot$SSL_BINDIR/openssl$EXEEXT"; then
if test "$cross_compiling" = "yes"; then
dnl Cannot test it; hope it is working...
- OPENSSL_CMD="$SSL_BINDIR/openssl"
+ OPENSSL_CMD="$erl_xcomp_sysroot$SSL_BINDIR/openssl$EXEEXT"
else
- if "$SSL_BINDIR/openssl" version > /dev/null 2>&1; then
- OPENSSL_CMD="$SSL_BINDIR/openssl"
+ if "$SSL_BINDIR/openssl$EXEEXT" version > /dev/null 2>&1; then
+ OPENSSL_CMD="$SSL_BINDIR/openssl$EXEEXT"
else
is_real_ssl=no
fi
@@ -3580,6 +3627,9 @@ dnl Should one use EXEEXT or ac_exeext?
is_real_ssl=no
fi
if test "x$is_real_ssl" = "xyes" ; then
+ if test "$MIXED_CYGWIN" = "yes"; then
+ OPENSSL_CMD=`cygpath -s -m "$OPENSSL_CMD"` 2> /dev/null
+ fi
SSL_INCLUDE="-I$dir/include"
old_CPPFLAGS=$CPPFLAGS
CPPFLAGS=$SSL_INCLUDE
@@ -3702,13 +3752,6 @@ dnl so it is - be adoptable
fi
;;
*)
- if test "$cross_compiling" = "yes"; then
- case "$with_ssl" in
- "$erl_xcomp_sysroot"*) ;;
- *) AC_MSG_ERROR([Invalid path to option --with-ssl=PATH (not a subdirectory to cross system root)]);;
- esac
- fi
-
# Option given with PATH to package
if test ! -d "$with_ssl" ; then
AC_MSG_ERROR(Invalid path to option --with-ssl=PATH)
diff --git a/erts/doc/src/Makefile b/erts/doc/src/Makefile
index 3dfefa2001..6578923fe1 100644
--- a/erts/doc/src/Makefile
+++ b/erts/doc/src/Makefile
@@ -1,19 +1,19 @@
#
# %CopyrightBegin%
-#
-# Copyright Ericsson AB 1997-2009. All Rights Reserved.
-#
+#
+# Copyright Ericsson AB 1997-2010. All Rights Reserved.
+#
# The contents of this file are subject to the Erlang Public License,
# Version 1.1, (the "License"); you may not use this file except in
# compliance with the License. You should have received a copy of the
# Erlang Public License along with this software. If not, it can be
# retrieved online at http://www.erlang.org/.
-#
+#
# Software distributed under the License is distributed on an "AS IS"
# basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
# the License for the specific language governing rights and limitations
# under the License.
-#
+#
# %CopyrightEnd%
#
include $(ERL_TOP)/make/target.mk
@@ -45,6 +45,7 @@ XML_REF1_FILES = epmd.xml \
XML_REF3_FILES = \
driver_entry.xml \
+ erl_nif.xml \
erl_set_memory_block.xml \
erl_driver.xml \
erl_prim_loader.xml \
diff --git a/erts/doc/src/alt_dist.xml b/erts/doc/src/alt_dist.xml
index a929aec97f..36d83a685b 100644
--- a/erts/doc/src/alt_dist.xml
+++ b/erts/doc/src/alt_dist.xml
@@ -434,7 +434,7 @@
(13) Word received; /* Bytes received */
(14) struct uds_data *partner; /* The partner in an accept/listen pair */
(15) struct uds_data *next; /* Next structure in list */
-(16) /* The input buffer and it's data */
+(16) /* The input buffer and its data */
(17) int buffer_size; /* The allocated size of the input buffer */
(18) int buffer_pos; /* Current position in input buffer */
(19) int header_pos; /* Where the current header is in the
@@ -825,7 +825,7 @@
I/O vector itself. One can use this to allocate the binaries
for the queue "manually" in the driver, but we'll just fill
the binary array with NULL values (line 7) , which will make
- the runtime system allocate it's own buffers when we call
+ the runtime system allocate its own buffers when we call
<c><![CDATA[driver_enqv]]></c> (line 37).</p>
<p></p>
<p>The routine builds an I/O vector containing the header bytes
@@ -942,7 +942,7 @@
between invocations of Erlang nodes with the same name.</item>
</list>
<p>The control interface gets a buffer to return its value in,
- but is free to allocate it's own buffer is the provided one is
+ but is free to allocate its own buffer is the provided one is
to small. Here is the code for <c><![CDATA[uds_control]]></c>:</p>
<code type="none"><![CDATA[
( 1) static int uds_control(ErlDrvData handle, unsigned int command,
@@ -1042,7 +1042,7 @@
<c><![CDATA[net_kernel:start/1]]></c> function, which is useful as it starts
the distribution on a running system, where tracing/debugging
can be performed. The <c><![CDATA[net_kernel:start/1]]></c> routine takes a
- list as it's single argument. The lists first element should be
+ list as its single argument. The lists first element should be
the node name (without the "@hostname") as an atom, and the second (and
last) element should be one of the atoms <c><![CDATA[shortnames]]></c> or
<c><![CDATA[longnames]]></c>. In the example case <c><![CDATA[shortnames]]></c> is
diff --git a/erts/doc/src/driver.xml b/erts/doc/src/driver.xml
index 12c79aee90..006a6160de 100644
--- a/erts/doc/src/driver.xml
+++ b/erts/doc/src/driver.xml
@@ -413,7 +413,7 @@ select(Port, Query) ->
<title>Sample asynchronous driver</title>
<p>Sometimes database queries can take long time to
complete, in our <c><![CDATA[pg_sync]]></c> driver, the emulator
- halts while the driver is doing it's job. This is
+ halts while the driver is doing its job. This is
often not acceptable, since no other Erlang processes
gets a chance to do anything. To improve on our
postgres driver, we reimplement it using the asynchronous
diff --git a/erts/doc/src/erl.xml b/erts/doc/src/erl.xml
index bb741c7836..f477280a6f 100644
--- a/erts/doc/src/erl.xml
+++ b/erts/doc/src/erl.xml
@@ -41,6 +41,26 @@
to scroll back to text which has scrolled off the screen.
The <c><![CDATA[erl]]></c> program must be used, however, in pipelines or if
you want to redirect standard input or output.</p>
+ <note><p>As of ERTS version 5.8 (OTP-R14A) the runtime system will by
+ default bind schedulers to logical processors using the
+ <c>default_bind</c> bind type if the amount of schedulers are
+ at least equal to the amount of logical processors configured,
+ binding of schedulers is supported, and a CPU topology is
+ available at startup.
+ </p><p>
+ If the Erlang runtime system is the only operating system
+ process that binds threads to logical processors, this
+ improves the performance of the runtime system. However,
+ if other operating system processes (as for example
+ another Erlang runtime system) also bind threads to
+ logical processors, there might be a performance penalty
+ instead. If this is the case you, are are advised to
+ unbind the schedulers using the
+ <seealso marker="#+sbt">+sbtu</seealso> command line argument,
+ or by invoking
+ <seealso marker="erlang#system_flag_scheduler_bind_type">erlang:system_flag(scheduler_bind_type,
+ unbound)</seealso>.</p>
+ </note>
</description>
<funcs>
<func>
@@ -583,6 +603,24 @@
<item>
<p>Force ets memory block to be moved on realloc.</p>
</item>
+ <tag><marker id="+rg"><c><![CDATA[+rg ReaderGroupsLimit]]></c></marker></tag>
+ <item>
+ <p>Limits the amount of reader groups used by read/write locks
+ optimized for read operations in the Erlang runtime system. By
+ default the reader groups limit equals 8.</p>
+ <p>When the amount of schedulers is less than or equal to the reader
+ groups limit, each scheduler has its own reader group. When the
+ amount of schedulers is larger than the reader groups limit,
+ schedulers share reader groups. Shared reader groups degrades
+ read lock and read unlock performance while a large amount of
+ reader groups degrades write lock performance, so the limit is a
+ tradeoff between performance for read operations and performance
+ for write operations. Each reader group currently consumes 64 byte
+ in each read/write lock. Also note that a runtime system using
+ shared reader groups benefits from <seealso marker="#+sbt">binding
+ schedulers to logical processors</seealso>, since the reader groups
+ are distributed better between schedulers.</p>
+ </item>
<tag><marker id="+S"><c><![CDATA[+S Schedulers:SchedulerOnline]]></c></marker></tag>
<item>
<p>Sets the amount of scheduler threads to create and scheduler
@@ -647,8 +685,8 @@
<seealso marker="erlang#system_flag_scheduler_bind_type">erlang:system_flag(scheduler_bind_type, default_bind)</seealso>.
</p></item>
</taglist>
- <p>Binding of schedulers are currently only supported on newer
- Linux and Solaris systems.</p>
+ <p>Binding of schedulers is currently only supported on newer
+ Linux, Solaris, and Windows systems.</p>
<p>If no CPU topology is available when the <c>+sbt</c> flag
is processed and <c>BindType</c> is any other type than
<c>u</c>, the runtime system will fail to start. CPU
@@ -657,6 +695,22 @@
that the <c>+sct</c> flag may have to be passed before the
<c>+sbt</c> flag on the command line (in case no CPU topology
has been automatically detected).</p>
+ <p>The runtime system will by default bind schedulers to logical
+ processors using the <c>default_bind</c> bind type if the amount
+ of schedulers are at least equal to the amount of logical
+ processors configured, binding of schedulers is supported,
+ and a CPU topology is available at startup.
+ </p>
+ <p><em>NOTE:</em> If the Erlang runtime system is the only operating
+ system process that binds threads to logical processors, this
+ improves the performance of the runtime system. However, if other
+ operating system processes (as for example another Erlang runtime
+ system) also bind threads to logical processors, there might be a
+ performance penalty instead. If this is the case you, are advised
+ to unbind the schedulers using the <c>+sbtu</c> command line
+ argument, or by invoking
+ <seealso marker="erlang#system_flag_scheduler_bind_type">erlang:system_flag(scheduler_bind_type,
+ unbound)</seealso>.</p>
<p>For more information, see
<seealso marker="erlang#system_flag_scheduler_bind_type">erlang:system_flag(scheduler_bind_type, SchedulerBindType)</seealso>.
</p>
diff --git a/erts/doc/src/erl_dist_protocol.xml b/erts/doc/src/erl_dist_protocol.xml
index 5978af178a..1fe7ac7ecd 100644
--- a/erts/doc/src/erl_dist_protocol.xml
+++ b/erts/doc/src/erl_dist_protocol.xml
@@ -206,7 +206,7 @@ By default EPMD listens on port 4369.
<section>
<title>Unregister a node from the EPMD</title>
<p>
- A node unregister itself from the EPMD by simply closing the
+ A node unregisters itself from the EPMD by simply closing the
TCP connection towards EPMD established when the node was registered.
</p>
</section>
diff --git a/erts/doc/src/erl_driver.xml b/erts/doc/src/erl_driver.xml
index 5061230a33..497a2fa01d 100644
--- a/erts/doc/src/erl_driver.xml
+++ b/erts/doc/src/erl_driver.xml
@@ -781,7 +781,7 @@ typedef struct ErlIOVec {
<marker id="driver_get_now"></marker>
<p>This function reads a timestamp into the memory pointed to by
the parameter <c>now</c>. See the description of <seealso marker="#ErlDrvNowData">ErlDrvNowData</seealso> for
- specification of it's fields. </p>
+ specification of its fields. </p>
<p>The return value is 0 unless the <c>now</c> pointer is not
valid, in which case it is &lt; 0. </p>
</desc>
diff --git a/erts/doc/src/erl_nif.xml b/erts/doc/src/erl_nif.xml
index 0dd46a951a..8e4d8130f5 100644
--- a/erts/doc/src/erl_nif.xml
+++ b/erts/doc/src/erl_nif.xml
@@ -34,18 +34,29 @@
<lib>erl_nif</lib>
<libsummary>API functions for an Erlang NIF library</libsummary>
<description>
- <warning><p>The NIF concept was introduced in R13B03 as an
- EXPERIMENTAL feature. The interfaces may be changed in any way
- in coming releases. The plan is however to lift the experimental label and
- maintain interface backward compatibility from R14B.</p>
- <p>Incompatible changes in <em>R13B04</em>:</p>
+ <note><p>The NIF concept is officially supported from R14B. NIF source code
+ written for earlier experimental versions might need adaption to run on R14B.</p>
+ <p>No incompatible changes between <em>R14B</em> and R14A.</p>
+ <p>Incompatible changes between <em>R14A</em> and R13B04:</p>
+ <list>
+ <item>Environment argument removed for <c>enif_alloc</c>,
+ <c>enif_realloc</c>, <c>enif_free</c>, <c>enif_alloc_binary</c>,
+ <c>enif_realloc_binary</c>, <c>enif_release_binary</c>,
+ <c>enif_alloc_resource</c>, <c>enif_release_resource</c>,
+ <c>enif_is_identical</c> and <c>enif_compare</c>.</item>
+ <item>Character encoding argument added to <c>enif_get_atom</c>
+ and <c>enif_make_existing_atom</c>.</item>
+ <item>Module argument added to <c>enif_open_resource_type</c>
+ while changing name spaces of resource types from global to module local.</item>
+ </list>
+ <p>Incompatible changes between <em>R13B04</em> and R13B03:</p>
<list>
<item>The function prototypes of the NIFs have changed to expect <c>argc</c> and <c>argv</c>
arguments. The arity of a NIF is by that no longer limited to 3.</item>
<item><c>enif_get_data</c> renamed as <c>enif_priv_data</c>.</item>
<item><c>enif_make_string</c> got a third argument for character encoding.</item>
</list>
- </warning>
+ </note>
<p>A NIF library contains native implementation of some functions
of an Erlang module. The native implemented functions (NIFs) are
@@ -109,9 +120,9 @@ ok
the new directive <seealso
marker="doc/reference_manual:code_loading#on_load">on_load</seealso> to automatically
load the NIF library when the module is loaded.</p>
- <note><p>A NIF must be exported or used locally by the module (or both).
- An unused local stub function will be optimized away by the compiler
- causing loading of the NIF library to fail.</p>
+ <note><p>A NIF does not have to be exported, it can be local to the module.
+ Note however that unused local stub functions will be optimized
+ away by the compiler causing loading of the NIF library to fail.</p>
</note>
<p>A loaded NIF library is tied to the Erlang module code version
that loaded it. If the module is upgraded with a new version, the
@@ -122,7 +133,7 @@ ok
will be shared as well. To avoid unintentionally shared static
data, each Erlang module code can keep its own private data. This
private data can be set when the NIF library is loaded and
- then retrieved by calling <seealso marker="#enif_priv_data">enif_priv_data()</seealso>.</p>
+ then retrieved by calling <seealso marker="#enif_priv_data">enif_priv_data</seealso>.</p>
<p>There is no way to explicitly unload a NIF library. A library will be
automatically unloaded when the module code that it belongs to is purged
by the code server. A NIF library will also be unloaded if it is replaced
@@ -137,14 +148,20 @@ ok
<taglist>
<tag>Read and write Erlang terms</tag>
<item><p>Any Erlang terms can be passed to a NIF as function arguments and
- be returned as function return values. The terms are of C-type <c>ERL_NIF_TERM</c>
+ be returned as function return values. The terms are of C-type
+ <seealso marker="#ERL_NIF_TERM">ERL_NIF_TERM</seealso>
and can only be read or written using API functions. Most functions to read
the content of a term are prefixed <c>enif_get_</c> and usually return
true (or false) if the term was of the expected type (or not).
The functions to write terms are all prefixed <c>enif_make_</c> and usually
return the created <c>ERL_NIF_TERM</c>. There are also some functions
to query terms, like <c>enif_is_atom</c>, <c>enif_is_identical</c> and
- <c>enif_compare</c>.</p></item>
+ <c>enif_compare</c>.</p>
+ <p>All terms of type <c>ERL_NIF_TERM</c> belong to an environment of type
+ <seealso marker="#ErlNifEnv">ErlNifEnv</seealso>. The lifetime of a term is
+ controlled by the lifetime of its environment object. All API functions that read
+ or write terms has the environment, that the term belongs to, as the first
+ function argument.</p></item>
<tag>Binaries</tag>
<item><p>Terms of type binary are accessed with the help of the struct type
<seealso marker="#ErlNifBinary">ErlNifBinary</seealso>
@@ -161,8 +178,10 @@ ok
<seealso marker="#enif_release_binary">enif_release_binary</seealso>
or made read-only by transferring it to an Erlang term with
<seealso marker="#enif_make_binary">enif_make_binary</seealso>.
- But it does not have do happen in the same NIF call. Read-only binaries
- does not have to be released.</p>
+ But it does not have to happen in the same NIF call. Read-only binaries
+ do not have to be released.</p>
+ <p><seealso marker="#enif_make_new_binary">enif_make_new_binary</seealso>
+ can be used as a shortcut to allocate and return a binary in the same NIF call.</p>
<p>Binaries are sequences of whole bytes. Bitstrings with an arbitrary
bit length have no support yet.</p>
</item>
@@ -170,28 +189,29 @@ ok
<item><p>The use of resource objects is a way to return pointers to
native data structures from a NIF in a safe way. A resource object is
just a block of memory allocated with
- <seealso marker="#enif_alloc_resource">enif_alloc_resource()</seealso>.
+ <seealso marker="#enif_alloc_resource">enif_alloc_resource</seealso>.
A handle ("safe pointer") to this memory block can then be returned to Erlang by the use of
- <seealso marker="#enif_make_resource">enif_make_resource()</seealso>.
+ <seealso marker="#enif_make_resource">enif_make_resource</seealso>.
The term returned by <c>enif_make_resource</c>
is totally opaque in nature. It can be stored and passed between processses
on the same node, but the only real end usage is to pass it back as argument to a NIF.
- The NIF can then do <seealso marker="#enif_get_resource">enif_get_resource()</seealso>
+ The NIF can then do <seealso marker="#enif_get_resource">enif_get_resource</seealso>
and get back a pointer to the memory block that is guaranteed to still be
valid. A resource object will not be deallocated until the last handle term
has been garbage collected by the VM and the resource has been
- released with <seealso marker="#enif_release_resource">enif_release_resource()</seealso>
+ released with <seealso marker="#enif_release_resource">enif_release_resource</seealso>
(not necessarily in that order).</p>
<p>All resource objects are created as instances of some <em>resource type</em>.
This makes resources from different modules to be distinguishable.
A resource type is created by calling
- <seealso marker="#enif_open_resource_type">enif_open_resource_type()</seealso>
+ <seealso marker="#enif_open_resource_type">enif_open_resource_type</seealso>
when a library is loaded. Objects of that resource type can then later be allocated
and <c>enif_get_resource</c> verifies that the resource is of the expected type.
A resource type can have a user supplied destructor function that is
automatically called when resources of that type are released (by either
the garbage collector or <c>enif_release_resource</c>). Resource types
- are uniquely identified by a supplied name string.</p>
+ are uniquely identified by a supplied name string and the name of the
+ implementing module.</p>
<p>Resource types support upgrade in runtime by allowing a loaded NIF
library to takeover an already existing resource type and thereby
"inherit" all existing objects of that type. The destructor of the new
@@ -199,14 +219,14 @@ ok
library with the old destructor function can be safely unloaded. Existing
resource objects, of a module that is upgraded, must either be deleted
or taken over by the new NIF library. The unloading of a library will be
- postponed as long as it exists resource objects with a destructor
+ postponed as long as there exist resource objects with a destructor
function in the library.
</p>
<p>Here is a template example of how to create and return a resource object.</p>
<p/>
<code type="none">
ERL_NIF_TERM term;
- MyStruct* ptr = enif_alloc_resource(env, my_resource_type, sizeof(MyStruct));
+ MyStruct* ptr = enif_alloc_resource(my_resource_type, sizeof(MyStruct));
/* initialize struct ... */
@@ -216,21 +236,31 @@ ok
/* store 'ptr' in static variable, private data or other resource object */
}
else {
- enif_release_resource(env, obj);
+ enif_release_resource(obj);
/* resource now only owned by "Erlang" */
}
return term;
}
</code>
-
+ <p>Another usage of resource objects is to create binary terms with
+ user defined memory management.
+ <seealso marker="#enif_make_resource_binary">enif_make_resource_binary</seealso>
+ will create a binary term that is connected to a resource object. The
+ destructor of the resource will be called when the binary is garbage
+ collected, at which time the binary data can be released. An example of
+ this can be a binary term consisting of data from a <c>mmap</c>'ed file.
+ The destructor can then do <c>munmap</c> to release the memory
+ region.</p>
</item>
<tag>Threads and concurrency</tag>
<item><p>A NIF is thread-safe without any explicit synchronization as
long as it acts as a pure function and only reads the supplied
arguments. As soon as you write towards a shared state either through
static variables or <seealso marker="#enif_priv_data">enif_priv_data</seealso>
- you need to supply your own explicit synchronization. Resource objects
- will also require synchronization if you treat them as mutable.</p>
+ you need to supply your own explicit synchronization. This includes terms
+ in process independent environments that are shared between threads.
+ Resource objects will also require synchronization if you treat them as
+ mutable.</p>
<p>The library initialization callbacks <c>load</c>, <c>reload</c> and
<c>upgrade</c> are all thread-safe even for shared state data.</p>
<p>Avoid doing lengthy work in NIF calls as that may degrade the
@@ -262,8 +292,8 @@ ok
<item><p><c>load</c> is called when the NIF library is loaded
and there is no previously loaded library for this module.</p>
<p><c>*priv_data</c> can be set to point to some private data
- that the library needs in able to keep a state between NIF
- calls. <c>enif_priv_data()</c> will return this pointer.
+ that the library needs in order to keep a state between NIF
+ calls. <c>enif_priv_data</c> will return this pointer.
<c>*priv_data</c> will be initialized to NULL when <c>load</c> is
called.</p>
<p><c>load_info</c> is the second argument to <seealso
@@ -315,19 +345,37 @@ ok
<item>
<p>Variables of type <c>ERL_NIF_TERM</c> can refer to any Erlang term.
This is an opaque type and values of it can only by used either as
- arguments to API functions or as return values from NIFs. A variable of
- type <c>ERL_NIF_TERM</c> is only valid until the NIF call, where it was
- obtained, returns.</p>
+ arguments to API functions or as return values from NIFs. All
+ <c>ERL_NIF_TERM</c>'s belong to an environment
+ (<seealso marker="#ErlNifEnv">ErlNifEnv</seealso>). A term can not be
+ destructed individually, it is valid until its environment is destructed.</p>
</item>
<tag><marker id="ErlNifEnv"/>ErlNifEnv</tag>
<item>
- <p><c>ErlNifEnv</c> contains information about the context in
- which a NIF call is made. This pointer should not be
- dereferenced in any way, but only passed on to API
- functions. An <c>ErlNifEnv</c> pointer is only valid until
- the function, where is what supplied as argument,
- returns. There is thus useless and dangerous to store <c>ErlNifEnv</c>
- pointers in between NIF calls.</p>
+ <p><c>ErlNifEnv</c> represents an environment that can host Erlang terms.
+ All terms in an environment are valid as long as the environment is valid.
+ <c>ErlNifEnv</c> is an opaque type and pointers to it can only be passed
+ on to API functions. There are two types of environments; process
+ bound and process independent.</p>
+ <p>A <em>process bound environment</em> is passed as the first argument to all NIFs.
+ All function arguments passed to a NIF will belong to that environment.
+ The return value from a NIF must also be a term belonging to the same
+ environment.
+ In addition a process bound environment contains transient information
+ about the calling Erlang process. The environment is only valid in the
+ thread where it was supplied as argument until the NIF returns. It is
+ thus useless and dangerous to store pointers to process bound
+ environments between NIF calls. </p>
+ <p>A <em>process independent environment</em> is created by calling
+ <seealso marker="#enif_alloc_env">enif_alloc_env</seealso>. It can be
+ used to store terms beteen NIF calls and to send terms with
+ <seealso marker="#enif_send">enif_send</seealso>. A process
+ independent environment with all its terms is valid until you explicitly
+ invalidates it with <seealso marker="#enif_free_env">enif_free_env</seealso>
+ or <c>enif_send</c>.</p>
+ <p>All elements of a list/tuple must belong to the same environment as the
+ list/tuple itself. Terms can be copied between environments with
+ <seealso marker="#enif_make_copy">enif_make_copy</seealso>.</p>
</item>
<tag><marker id="ErlNifFunc"/>ErlNifFunc</tag>
<item>
@@ -361,7 +409,18 @@ typedef struct {
<p><c>ErlNifBinary</c> contains transient information about an
inspected binary term. <c>data</c> is a pointer to a buffer
of <c>size</c> bytes with the raw content of the binary.</p>
+ <p>Note that <c>ErlNifBinary</c> is a semi-opaque type and you are
+ only allowed to read fields <c>size</c> and <c>data</c>.</p>
</item>
+ <tag><marker id="ErlNifPid"/>ErlNifPid</tag>
+ <item>
+ <p><c>ErlNifPid</c> is a process identifier (pid). In contrast to
+ pid terms (instances of <c>ERL_NIF_TERM</c>), <c>ErlNifPid</c>'s are self
+ contained and not bound to any
+ <seealso marker="#ErlNifEnv">environment</seealso>. <c>ErlNifPid</c>
+ is an opaque type.</p>
+ </item>
+
<tag><marker id="ErlNifResourceType"/>ErlNifResourceType</tag>
<item>
<p>Each instance of <c>ErlNifResourceType</c> represent a class of
@@ -386,9 +445,9 @@ typedef enum {
ERL_NIF_LATIN1
}ErlNifCharEncoding;
</code>
- <p>The character encoding used in strings. The only supported
- encoding is currently <c>ERL_NIF_LATIN1</c> for iso-latin-1
- (8-bit ascii).</p>
+ <p>The character encoding used in strings and atoms. The only
+ supported encoding is currently <c>ERL_NIF_LATIN1</c> for
+ iso-latin-1 (8-bit ascii).</p>
</item>
<tag><marker id="ErlNifSysInfo"/>ErlNifSysInfo</tag>
<item>
@@ -396,6 +455,10 @@ typedef enum {
to return information about the runtime system. Contains currently
the exact same content as <seealso marker="erl_driver#ErlDrvSysInfo">ErlDrvSysInfo</seealso>.</p>
</item>
+ <tag><marker id="ErlNifSInt64"/>ErlNifSInt64</tag>
+ <item><p>A native signed 64-bit integer type.</p></item>
+ <tag><marker id="ErlNifUInt64"/>ErlNifUInt64</tag>
+ <item><p>A native unsigned 64-bit integer type.</p></item>
</taglist>
</section>
@@ -405,24 +468,40 @@ typedef enum {
<fsummary>Allocate dynamic memory.</fsummary>
<desc><p>Allocate memory of <c>size</c> bytes. Return NULL if allocation failed.</p></desc>
</func>
- <func><name><ret>int</ret><nametext>enif_alloc_binary(ErlNifEnv* env, unsigned size, ErlNifBinary* bin)</nametext></name>
+ <func><name><ret>int</ret><nametext>enif_alloc_binary(size_t size, ErlNifBinary* bin)</nametext></name>
<fsummary>Create a new binary.</fsummary>
- <desc><p>Allocate a new binary of size of <c>size</c>
+ <desc><p>Allocate a new binary of size <c>size</c>
bytes. Initialize the structure pointed to by <c>bin</c> to
refer to the allocated binary. The binary must either be released by
- <seealso marker="#enif_release_binary">enif_release_binary()</seealso>
+ <seealso marker="#enif_release_binary">enif_release_binary</seealso>
or ownership transferred to an Erlang term with
- <seealso marker="#enif_make_binary">enif_make_binary()</seealso>.
+ <seealso marker="#enif_make_binary">enif_make_binary</seealso>.
An allocated (and owned) <c>ErlNifBinary</c> can be kept between NIF
calls.</p>
- <p>Return false if allocation failed.</p>
+ <p>Return true on success or false if allocation failed.</p>
</desc>
</func>
- <func><name><ret>void*</ret><nametext>enif_alloc_resource(ErlNifEnv* env, ErlNifResourceType* type, unsigned size)</nametext></name>
+ <func><name><ret>ErlNifEnv*</ret><nametext>enif_alloc_env()</nametext></name>
+ <fsummary>Create a new environment</fsummary>
+ <desc><p>Allocate a new process independent environment. The environment can
+ be used to hold terms that is not bound to any process. Such terms can
+ later be copied to a process environment with
+ <seealso marker="#enif_make_copy">enif_make_copy</seealso>
+ or be sent to a process as a message with <seealso marker="#enif_send">enif_send</seealso>.</p>
+ <p>Return pointer to the new environment.</p>
+ </desc>
+ </func>
+ <func><name><ret>void*</ret><nametext>enif_alloc_resource(ErlNifResourceType* type, unsigned size)</nametext></name>
<fsummary>Allocate a memory managed resource object</fsummary>
<desc><p>Allocate a memory managed resource object of type <c>type</c> and size <c>size</c> bytes.</p></desc>
</func>
- <func><name><ret>int</ret><nametext>enif_compare(ErlNifEnv* env, ERL_NIF_TERM lhs, ERL_NIF_TERM rhs)</nametext></name>
+ <func><name><ret>void</ret><nametext>enif_clear_env(ErlNifEnv* env)</nametext></name>
+ <fsummary>Clear an environment for reuse.</fsummary>
+ <desc><p>Free all terms in an environment and clear it for reuse. The environment must
+ have been allocated with <seealso marker="#enif_alloc_env">enif_alloc_env</seealso>.
+ </p></desc>
+ </func>
+ <func><name><ret>int</ret><nametext>enif_compare(ERL_NIF_TERM lhs, ERL_NIF_TERM rhs)</nametext></name>
<fsummary>Compare two terms</fsummary>
<desc><p>Return an integer less than, equal to, or greater than
zero if <c>lhs</c> is found, respectively, to be less than,
@@ -432,77 +511,104 @@ typedef enum {
</func>
<func><name><ret>void</ret><nametext>enif_cond_broadcast(ErlNifCond *cnd)</nametext></name>
<fsummary></fsummary>
- <desc><p>Same as <seealso marker="erl_driver#erl_drv_cond_broadcast">erl_drv_cond_broadcast()</seealso>.
+ <desc><p>Same as <seealso marker="erl_driver#erl_drv_cond_broadcast">erl_drv_cond_broadcast</seealso>.
</p></desc>
</func>
<func><name><ret>ErlNifCond*</ret><nametext>enif_cond_create(char *name)</nametext></name>
<fsummary></fsummary>
- <desc><p>Same as <seealso marker="erl_driver#erl_drv_cond_create">erl_drv_cond_create()</seealso>.
+ <desc><p>Same as <seealso marker="erl_driver#erl_drv_cond_create">erl_drv_cond_create</seealso>.
</p></desc>
</func>
<func><name><ret>void</ret><nametext>enif_cond_destroy(ErlNifCond *cnd)</nametext></name>
<fsummary></fsummary>
- <desc><p>Same as <seealso marker="erl_driver#erl_drv_cond_destroy">erl_drv_cond_destroy()</seealso>.
+ <desc><p>Same as <seealso marker="erl_driver#erl_drv_cond_destroy">erl_drv_cond_destroy</seealso>.
</p></desc>
</func>
<func><name><ret>void</ret><nametext>enif_cond_signal(ErlNifCond *cnd)</nametext></name>
<fsummary></fsummary>
- <desc><p>Same as <seealso marker="erl_driver#erl_drv_cond_signal">erl_drv_cond_signal()</seealso>.
+ <desc><p>Same as <seealso marker="erl_driver#erl_drv_cond_signal">erl_drv_cond_signal</seealso>.
</p></desc>
</func>
<func><name><ret>void</ret><nametext>enif_cond_wait(ErlNifCond *cnd, ErlNifMutex *mtx)</nametext></name>
<fsummary></fsummary>
- <desc><p>Same as <seealso marker="erl_driver#erl_drv_cond_wait">erl_drv_cond_wait()</seealso>.
+ <desc><p>Same as <seealso marker="erl_driver#erl_drv_cond_wait">erl_drv_cond_wait</seealso>.
</p></desc>
</func>
<func><name><ret>int</ret><nametext>enif_equal_tids(ErlNifTid tid1, ErlNifTid tid2)</nametext></name>
<fsummary></fsummary>
- <desc><p>Same as <seealso marker="erl_driver#erl_drv_equal_tids">erl_drv_equal_tids()</seealso>.
+ <desc><p>Same as <seealso marker="erl_driver#erl_drv_equal_tids">erl_drv_equal_tids</seealso>.
</p></desc>
</func>
<func><name><ret>void</ret><nametext>enif_free(ErlNifEnv* env, void* ptr)</nametext></name>
<fsummary>Free dynamic memory</fsummary>
<desc><p>Free memory allocated by <c>enif_alloc</c>.</p></desc>
</func>
- <func><name><ret>int</ret><nametext>enif_get_atom(ErlNifEnv* env,
- ERL_NIF_TERM term, char* buf, unsigned size)
- </nametext></name>
+ <func><name><ret>void</ret><nametext>enif_free_env(ErlNifEnv* env)</nametext></name>
+ <fsummary>Free an environment allocated with enif_alloc_env</fsummary>
+ <desc><p>Free an environment allocated with <seealso marker="#enif_alloc_env">enif_alloc_env</seealso>.
+ All terms created in the environment will be freed as well.</p></desc>
+ </func>
+ <func><name><ret>int</ret><nametext>enif_get_atom(ErlNifEnv* env, ERL_NIF_TERM term, char* buf, unsigned size, ErlNifCharEncoding encode)</nametext></name>
<fsummary>Get the text representation of an atom term</fsummary>
<desc><p>Write a null-terminated string, in the buffer pointed to by
<c>buf</c> of size <c>size</c>, consisting of the string
- representation of the atom <c>term</c>. Return the number of bytes
- written (including terminating null character) or 0 if
+ representation of the atom <c>term</c> with encoding
+ <seealso marker="#ErlNifCharEncoding">encode</seealso>. Return
+ the number of bytes written (including terminating null character) or 0 if
<c>term</c> is not an atom with maximum length of
<c>size-1</c>.</p></desc>
</func>
+ <func><name><ret>int</ret><nametext>enif_get_atom_length(ErlNifEnv* env, ERL_NIF_TERM term, unsigned* len, ErlNifCharEncoding encode)</nametext></name>
+ <fsummary>Get the length of atom <c>term</c>.</fsummary>
+ <desc><p>Set <c>*len</c> to the length (number of bytes excluding
+ terminating null character) of the atom <c>term</c> with encoding
+ <c>encode</c>. Return true on success or false if <c>term</c> is not an
+ atom.</p></desc>
+ </func>
<func><name><ret>int</ret><nametext>enif_get_double(ErlNifEnv* env, ERL_NIF_TERM term, double* dp)</nametext></name>
<fsummary>Read a floating-point number term.</fsummary>
<desc><p>Set <c>*dp</c> to the floating point value of
- <c>term</c> or return false if <c>term</c> is not a float.</p></desc>
+ <c>term</c>. Return true on success or false if <c>term</c> is not a float.</p></desc>
</func>
<func><name><ret>int</ret><nametext>enif_get_int(ErlNifEnv* env, ERL_NIF_TERM term, int* ip)</nametext></name>
- <fsummary>Read an integer term.</fsummary>
+ <fsummary>Read an integer term</fsummary>
<desc><p>Set <c>*ip</c> to the integer value of
- <c>term</c> or return false if <c>term</c> is not an integer or is
- outside the bounds of type <c>int</c></p></desc>
+ <c>term</c>. Return true on success or false if <c>term</c> is not an
+ integer or is outside the bounds of type <c>int</c>.</p></desc>
+ </func>
+ <func><name><ret>int</ret><nametext>enif_get_int64(ErlNifEnv* env, ERL_NIF_TERM term, ErlNifSInt64* ip)</nametext></name>
+ <fsummary>Read a 64-bit integer term</fsummary>
+ <desc><p>Set <c>*ip</c> to the integer value of
+ <c>term</c>. Return true on success or false if <c>term</c> is not an
+ integer or is outside the bounds of a signed 64-bit integer.</p></desc>
+ </func>
+ <func><name><ret>int</ret><nametext>enif_get_local_pid(ErlNifEnv* env, ERL_NIF_TERM term, ErlNifPid* pid)</nametext></name>
+ <fsummary>Read an local pid term</fsummary>
+ <desc><p>If <c>term</c> is the pid of a node local process, initialize the
+ pid variable <c>*pid</c> from it and return true. Otherwise return false.
+ No check if the process is alive is done.</p></desc>
</func>
<func><name><ret>int</ret><nametext>enif_get_list_cell(ErlNifEnv* env, ERL_NIF_TERM list, ERL_NIF_TERM* head, ERL_NIF_TERM* tail)</nametext></name>
<fsummary>Get head and tail from a list</fsummary>
<desc><p>Set <c>*head</c> and <c>*tail</c> from
- <c>list</c> or return false if <c>list</c> is not a non-empty
- list.</p></desc>
+ <c>list</c> and return true, or return false if <c>list</c> is not a
+ non-empty list.</p></desc>
+ </func>
+ <func><name><ret>int</ret><nametext>enif_get_list_length(ErlNifEnv* env, ERL_NIF_TERM term, unsigned* len)</nametext></name>
+ <fsummary>Get the length of list <c>term</c>.</fsummary>
+ <desc><p>Set <c>*len</c> to the length of list <c>term</c> and return true,
+ or return false if <c>term</c> is not a list.</p></desc>
</func>
<func><name><ret>int</ret><nametext>enif_get_long(ErlNifEnv* env, ERL_NIF_TERM term, long int* ip)</nametext></name>
<fsummary>Read an long integer term.</fsummary>
- <desc><p>Set <c>*ip</c> to the long integer value of
- <c>term</c> or return false if <c>term</c> is not an integer or is
+ <desc><p>Set <c>*ip</c> to the long integer value of <c>term</c> and
+ return true, or return false if <c>term</c> is not an integer or is
outside the bounds of type <c>long int</c>.</p></desc>
</func>
<func><name><ret>int</ret><nametext>enif_get_resource(ErlNifEnv* env, ERL_NIF_TERM term, ErlNifResourceType* type, void** objp)</nametext></name>
<fsummary>Get the pointer to a resource object</fsummary>
- <desc><p>Set <c>*objp</c> to point to the resource object referred to by <c>term</c>.
- The pointer is valid until the calling NIF returns and should not be released.</p>
- <p>Return false if <c>term</c> is not a handle to a resource object
+ <desc><p>Set <c>*objp</c> to point to the resource object referred to by <c>term</c>.</p>
+ <p>Return true on success or false if <c>term</c> is not a handle to a resource object
of type <c>type</c>.</p></desc>
</func>
<func><name><ret>int</ret><nametext>enif_get_string(ErlNifEnv* env,
@@ -527,27 +633,32 @@ typedef enum {
<c>*arity</c> to the number of elements. Note that the array
is read-only and <c>(*array)[N-1]</c> will be the Nth element of
the tuple. <c>*array</c> is undefined if the arity of the tuple
- is zero.</p><p>Return false if <c>term</c> is not a
+ is zero.</p><p>Return true on success or false if <c>term</c> is not a
tuple.</p></desc>
</func>
<func><name><ret>int</ret><nametext>enif_get_uint(ErlNifEnv* env, ERL_NIF_TERM term, unsigned int* ip)</nametext></name>
<fsummary>Read an unsigned integer term.</fsummary>
- <desc><p>Set <c>*ip</c> to the unsigned integer value of
- <c>term</c> or return false if <c>term</c> is not an unsigned integer or is
- outside the bounds of type <c>unsigned int</c></p></desc>
+ <desc><p>Set <c>*ip</c> to the unsigned integer value of <c>term</c> and
+ return true, or return false if <c>term</c> is not an unsigned integer or
+ is outside the bounds of type <c>unsigned int</c>.</p></desc>
+ </func>
+ <func><name><ret>int</ret><nametext>enif_get_uint64(ErlNifEnv* env, ERL_NIF_TERM term, ErlNifUInt64* ip)</nametext></name>
+ <fsummary>Read an unsigned 64-bit integer term.</fsummary>
+ <desc><p>Set <c>*ip</c> to the unsigned integer value of <c>term</c> and
+ return true, or return false if <c>term</c> is not an unsigned integer or
+ is outside the bounds of an unsigned 64-bit integer.</p></desc>
</func>
-
<func><name><ret>int</ret><nametext>enif_get_ulong(ErlNifEnv* env, ERL_NIF_TERM term, unsigned long* ip)</nametext></name>
<fsummary>Read an unsigned integer term.</fsummary>
- <desc><p>Set <c>*ip</c> to the unsigned long integer value of
- <c>term</c> or return false if <c>term</c> is not an unsigned integer or is
- outside the bounds of type <c>unsigned long</c></p></desc>
+ <desc><p>Set <c>*ip</c> to the unsigned long integer value of <c>term</c>
+ and return true, or return false if <c>term</c> is not an unsigned integer or is
+ outside the bounds of type <c>unsigned long</c>.</p></desc>
</func>
<func><name><ret>int</ret><nametext>enif_inspect_binary(ErlNifEnv* env, ERL_NIF_TERM bin_term, ErlNifBinary* bin)</nametext></name>
<fsummary>Inspect the content of a binary</fsummary>
<desc><p>Initialize the structure pointed to by <c>bin</c> with
information about the binary term
- <c>bin_term</c>. Return false if <c>bin_term</c> is not a binary.</p></desc>
+ <c>bin_term</c>. Return true on success or false if <c>bin_term</c> is not a binary.</p></desc>
</func>
<func><name><ret>int</ret><nametext>enif_inspect_iolist_as_binary(ErlNifEnv*
env, ERL_NIF_TERM term, ErlNifBinary* bin)
@@ -556,7 +667,7 @@ typedef enum {
<desc><p>Initialize the structure pointed to by <c>bin</c> with one
continuous buffer with the same byte content as <c>iolist</c>. As with
inspect_binary, the data pointed to by <c>bin</c> is transient and does
- not need to be released. Return false if <c>iolist</c> is not an
+ not need to be released. Return true on success or false if <c>iolist</c> is not an
iolist.</p>
</desc>
</func>
@@ -576,7 +687,7 @@ typedef enum {
<fsummary>Determine if a term is a fun</fsummary>
<desc><p>Return true if <c>term</c> is a fun.</p></desc>
</func>
- <func><name><ret>int</ret><nametext>enif_is_identical(ErlNifEnv* env, ERL_NIF_TERM lhs, ERL_NIF_TERM rhs)</nametext></name>
+ <func><name><ret>int</ret><nametext>enif_is_identical(ERL_NIF_TERM lhs, ERL_NIF_TERM rhs)</nametext></name>
<fsummary>Erlang operator =:=</fsummary>
<desc><p>Return true if the two terms are identical. Corresponds to the
Erlang operators <c>=:=</c> and
@@ -590,15 +701,35 @@ typedef enum {
<fsummary>Determine if a term is a port</fsummary>
<desc><p>Return true if <c>term</c> is a port.</p></desc>
</func>
-
<func><name><ret>int</ret><nametext>enif_is_ref(ErlNifEnv* env, ERL_NIF_TERM term)</nametext></name>
<fsummary>Determine if a term is a reference</fsummary>
<desc><p>Return true if <c>term</c> is a reference.</p></desc>
</func>
+ <func><name><ret>int</ret><nametext>enif_is_tuple(ErlNifEnv* env, ERL_NIF_TERM term)</nametext></name>
+ <fsummary>Determine if a term is a tuple</fsummary>
+ <desc><p>Return true if <c>term</c> is a tuple.</p></desc>
+ </func>
+ <func><name><ret>int</ret><nametext>enif_is_list(ErlNifEnv* env, ERL_NIF_TERM term)</nametext></name>
+ <fsummary>Determine if a term is a list</fsummary>
+ <desc><p>Return true if <c>term</c> is a list.</p></desc>
+ </func>
+ <func><name><ret>int</ret><nametext>enif_keep_resource(void* obj)</nametext></name>
+ <fsummary>Add a reference to a resource object</fsummary>
+ <desc><p>Add a reference to resource object <c>obj</c> obtained from
+ <seealso marker="#enif_alloc_resource">enif_alloc_resource</seealso>.
+ Each call to <c>enif_keep_resource</c> for an object must be balanced by
+ a call to <seealso marker="#enif_release_resource">enif_release_resource</seealso>
+ before the object will be destructed.</p></desc>
+ </func>
<func><name><ret>ERL_NIF_TERM</ret><nametext>enif_make_atom(ErlNifEnv* env, const char* name)</nametext></name>
<fsummary>Create an atom term</fsummary>
- <desc><p>Create an atom term from the C-string <c>name</c>. Unlike other terms, atom
- terms may be saved and used between NIF calls.</p></desc>
+ <desc><p>Create an atom term from the null-terminated C-string <c>name</c>
+ with iso-latin-1 encoding.</p></desc>
+ </func>
+ <func><name><ret>ERL_NIF_TERM</ret><nametext>enif_make_atom_len(ErlNifEnv* env, const char* name, size_t len)</nametext></name>
+ <fsummary>Create an atom term</fsummary>
+ <desc><p>Create an atom term from the string <c>name</c> with length <c>len</c>.
+ Null-characters are treated as any other characters.</p></desc>
</func>
<func><name><ret>ERL_NIF_TERM</ret><nametext>enif_make_badarg(ErlNifEnv* env)</nametext></name>
<fsummary>Make a badarg exception.</fsummary>
@@ -611,21 +742,40 @@ typedef enum {
<c>bin</c> should be considered read-only for the rest of the NIF
call and then as released.</p></desc>
</func>
+ <func><name><ret>ERL_NIF_TERM</ret><nametext>enif_make_copy(ErlNifEnv* dst_env, ERL_NIF_TERM src_term)</nametext></name>
+ <fsummary>Make a copy of a term.</fsummary>
+ <desc><p>Make a copy of term <c>src_term</c>. The copy will be created in
+ environment <c>dst_env</c>. The source term may be located in any
+ environment.</p></desc>
+ </func>
<func><name><ret>ERL_NIF_TERM</ret><nametext>enif_make_double(ErlNifEnv* env, double d)</nametext></name>
- <fsummary>Create an floating-point term</fsummary>
- <desc><p>Create an floating-point term from a <c>double</c>.</p></desc>
+ <fsummary>Create a floating-point term</fsummary>
+ <desc><p>Create a floating-point term from a <c>double</c>.</p></desc>
</func>
- <func><name><ret>int</ret><nametext>enif_make_existing_atom(ErlNifEnv* env, const char* name, ERL_NIF_TERM* atom)</nametext></name>
+ <func><name><ret>int</ret><nametext>enif_make_existing_atom(ErlNifEnv* env, const char* name, ERL_NIF_TERM* atom, ErlNifCharEncoding encode)</nametext></name>
<fsummary>Create an existing atom term</fsummary>
<desc><p>Try to create the term of an already existing atom from
- the C-string <c>name</c>. If the atom already exist store the
- term in <c>*atom</c> and return true, otherwise return
- false.</p></desc>
+ the null-terminated C-string <c>name</c> with encoding
+ <seealso marker="#ErlNifCharEncoding">encode</seealso>. If the atom
+ already exists store the term in <c>*atom</c> and return true, otherwise
+ return false.</p></desc>
+ </func>
+ <func><name><ret>int</ret><nametext>enif_make_existing_atom_len(ErlNifEnv* env, const char* name, size_t len, ERL_NIF_TERM* atom, ErlNifCharEncoding encoding)</nametext></name>
+ <fsummary>Create an existing atom term</fsummary>
+ <desc><p>Try to create the term of an already existing atom from the
+ string <c>name</c> with length <c>len</c> and encoding
+ <seealso marker="#ErlNifCharEncoding">encode</seealso>. Null-characters
+ are treated as any other characters. If the atom already exists store the term
+ in <c>*atom</c> and return true, otherwise return false.</p></desc>
</func>
<func><name><ret>ERL_NIF_TERM</ret><nametext>enif_make_int(ErlNifEnv* env, int i)</nametext></name>
<fsummary>Create an integer term</fsummary>
<desc><p>Create an integer term.</p></desc>
</func>
+ <func><name><ret>ERL_NIF_TERM</ret><nametext>enif_make_int64(ErlNifEnv* env, ErlNifSInt64 i)</nametext></name>
+ <fsummary>Create an integer term</fsummary>
+ <desc><p>Create an integer term from a signed 64-bit integer.</p></desc>
+ </func>
<func><name><ret>ERL_NIF_TERM</ret><nametext>enif_make_list(ErlNifEnv* env, unsigned cnt, ...)</nametext></name>
<fsummary>Create a list term.</fsummary>
<desc><p>Create an ordinary list term of length <c>cnt</c>. Expects
@@ -644,7 +794,7 @@ typedef enum {
<fsummary>Create a list term.</fsummary>
<desc><p>Create an ordinary list term with length indicated by the
function name. Prefer these functions (macros) over the variadic
- <c>enif_make_list</c> to get compile time error if the number of
+ <c>enif_make_list</c> to get a compile time error if the number of
arguments does not match.</p></desc>
</func>
<func><name><ret>ERL_NIF_TERM</ret><nametext>enif_make_list_cell(ErlNifEnv* env, ERL_NIF_TERM head, ERL_NIF_TERM tail)</nametext></name>
@@ -660,6 +810,20 @@ typedef enum {
<fsummary>Create an integer term from a long int</fsummary>
<desc><p>Create an integer term from a <c>long int</c>.</p></desc>
</func>
+ <func><name><ret>unsigned char*</ret><nametext>enif_make_new_binary(ErlNifEnv* env, size_t size, ERL_NIF_TERM* termp)</nametext></name>
+ <fsummary>Allocate and create a new binary term</fsummary>
+ <desc><p>Allocate a binary of size <c>size</c> bytes and create an owning
+ term. The binary data is mutable until the calling NIF returns. This is a
+ quick way to create a new binary without having to use
+ <seealso marker="#ErlNifBinary">ErlNifBinary</seealso>. The drawbacks are
+ that the binary can not be kept between NIF calls and it can not be
+ reallocated.</p><p>Return a pointer to the raw binary data and set
+ <c>*termp</c> to the binary term.</p></desc>
+ </func>
+ <func><name><ret>ERL_NIF_TERM</ret><nametext>enif_make_pid(ErlNifEnv* env, const ErlNifPid* pid)</nametext></name>
+ <fsummary>Make a pid term</fsummary>
+ <desc><p>Make a pid term from <c>*pid</c>.</p></desc>
+ </func>
<func><name><ret>ERL_NIF_TERM</ret><nametext>enif_make_ref(ErlNifEnv* env)</nametext></name>
<fsummary>Create a reference.</fsummary>
<desc><p>Create a reference like <seealso marker="erlang#make_ref-0">erlang:make_ref/0</seealso>.</p></desc>
@@ -670,18 +834,47 @@ typedef enum {
obtained by <seealso marker="#enif_alloc_resource">enif_alloc_resource</seealso>.
No ownership transfer is done, the resource object still needs to be released by
<seealso marker="#enif_release_resource">enif_release_resource</seealso>.</p>
- <p>Note that the only defined behaviour when using of a resource term in
+ <p>Note that the only defined behaviour of using a resource term in
an Erlang program is to store it and send it between processes on the
same node. Other operations such as matching or <c>term_to_binary</c>
will have unpredictable (but harmless) results.</p></desc>
</func>
+ <func><name><ret>ERL_NIF_TERM</ret><nametext>enif_make_resource_binary(ErlNifEnv* env, void* obj, const void* data, size_t size)</nametext></name>
+ <fsummary>Create a custom binary term</fsummary>
+ <desc><p>Create a binary term that is memory managed by a resource object
+ <c>obj</c> obtained by <seealso marker="#enif_alloc_resource">enif_alloc_resource</seealso>.
+ The returned binary term will consist of <c>size</c> bytes pointed to
+ by <c>data</c>. This raw binary data must be kept readable and unchanged
+ until the destructor of the resource is called. The binary data may be
+ stored external to the resource object in which case it is the responsibility
+ of the destructor to release the data.</p>
+ <p>Several binary terms may be managed by the same resource object. The
+ destructor will not be called until the last binary is garbage collected.
+ This can be useful as a way to return different parts of a larger binary
+ buffer.</p>
+ <p>As with <seealso marker="#enif_make_resource">enif_make_resource</seealso>,
+ no ownership transfer is done. The resource still needs to be released with
+ <seealso marker="#enif_release_resource">enif_release_resource</seealso>.</p>
+ </desc>
+ </func>
+ <func><name><ret>ErlNifPid*</ret><nametext>enif_self(ErlNifEnv* caller_env, ErlNifPid* pid)</nametext></name>
+ <fsummary>Get the pid of the calling process.</fsummary>
+ <desc><p>Initialize the pid variable <c>*pid</c> to represent the
+ calling process. Return <c>pid</c>.</p></desc>
+ </func>
<func><name><ret>ERL_NIF_TERM</ret><nametext>enif_make_string(ErlNifEnv* env, const char* string, ErlNifCharEncoding encoding)</nametext></name>
<fsummary>Create a string.</fsummary>
<desc><p>Create a list containing the characters of the
null-terminated string <c>string</c> with encoding <seealso marker="#ErlNifCharEncoding">encoding</seealso>.</p></desc>
</func>
+ <func><name><ret>ERL_NIF_TERM</ret><nametext>enif_make_string_len(ErlNifEnv* env, const char* string, size_t len, ErlNifCharEncoding encoding)</nametext></name>
+ <fsummary>Create a string.</fsummary>
+ <desc><p>Create a list containing the characters of the string <c>string</c> with
+ length <c>len</c> and encoding <seealso marker="#ErlNifCharEncoding">encoding</seealso>.
+ Null-characters are treated as any other characters.</p></desc>
+ </func>
<func><name><ret>ERL_NIF_TERM</ret><nametext>enif_make_sub_binary(ErlNifEnv*
- env, ERL_NIF_TERM bin_term, unsigned pos, unsigned size)</nametext></name>
+ env, ERL_NIF_TERM bin_term, size_t pos, size_t size)</nametext></name>
<fsummary>Make a subbinary term.</fsummary>
<desc><p>Make a subbinary of binary <c>bin_term</c>, starting at
zero-based position <c>pos</c> with a length of <c>size</c> bytes.
@@ -707,7 +900,7 @@ typedef enum {
<fsummary>Create a tuple term.</fsummary>
<desc><p>Create a tuple term with length indicated by the
function name. Prefer these functions (macros) over the variadic
- <c>enif_make_tuple</c> to get compile time error if the number of
+ <c>enif_make_tuple</c> to get a compile time error if the number of
arguments does not match.</p></desc>
</func>
<func><name><ret>ERL_NIF_TERM</ret><nametext>enif_make_tuple_from_array(ErlNifEnv* env, const ERL_NIF_TERM arr[], unsigned cnt)</nametext></name>
@@ -719,36 +912,41 @@ typedef enum {
<fsummary>Create an unsigned integer term</fsummary>
<desc><p>Create an integer term from an <c>unsigned int</c>.</p></desc>
</func>
+ <func><name><ret>ERL_NIF_TERM</ret><nametext>enif_make_uint64(ErlNifEnv* env, ErlNifUInt64 i)</nametext></name>
+ <fsummary>Create an unsigned integer term</fsummary>
+ <desc><p>Create an integer term from an unsigned 64-bit integer.</p></desc>
+ </func>
<func><name><ret>ERL_NIF_TERM</ret><nametext>enif_make_ulong(ErlNifEnv* env, unsigned long i)</nametext></name>
<fsummary>Create an integer term from an unsigned long int</fsummary>
<desc><p>Create an integer term from an <c>unsigned long int</c>.</p></desc>
</func>
<func><name><ret>ErlNifMutex*</ret><nametext>enif_mutex_create(char *name)</nametext></name>
<fsummary></fsummary>
- <desc><p>Same as <seealso marker="erl_driver#erl_drv_mutex_create">erl_drv_mutex_create()</seealso>.
+ <desc><p>Same as <seealso marker="erl_driver#erl_drv_mutex_create">erl_drv_mutex_create</seealso>.
</p></desc>
</func>
<func><name><ret>void</ret><nametext>enif_mutex_destroy(ErlNifMutex *mtx)</nametext></name>
<fsummary></fsummary>
- <desc><p>Same as <seealso marker="erl_driver#erl_drv_mutex_destroy">erl_drv_mutex_destroy()</seealso>.
+ <desc><p>Same as <seealso marker="erl_driver#erl_drv_mutex_destroy">erl_drv_mutex_destroy</seealso>.
</p></desc>
</func>
<func><name><ret>void</ret><nametext>enif_mutex_lock(ErlNifMutex *mtx)</nametext></name>
<fsummary></fsummary>
- <desc><p>Same as <seealso marker="erl_driver#erl_drv_mutex_lock">erl_drv_mutex_lock()</seealso>.
+ <desc><p>Same as <seealso marker="erl_driver#erl_drv_mutex_lock">erl_drv_mutex_lock</seealso>.
</p></desc>
</func>
<func><name><ret>int</ret><nametext>enif_mutex_trylock(ErlNifMutex *mtx)</nametext></name>
<fsummary></fsummary>
- <desc><p>Same as <seealso marker="erl_driver#erl_drv_mutex_trylock">erl_drv_mutex_trylock()</seealso>.
+ <desc><p>Same as <seealso marker="erl_driver#erl_drv_mutex_trylock">erl_drv_mutex_trylock</seealso>.
</p></desc>
</func>
<func><name><ret>void</ret><nametext>enif_mutex_unlock(ErlNifMutex *mtx)</nametext></name>
<fsummary></fsummary>
- <desc><p>Same as <seealso marker="erl_driver#erl_drv_mutex_unlock">erl_drv_mutex_unlock()</seealso>.
+ <desc><p>Same as <seealso marker="erl_driver#erl_drv_mutex_unlock">erl_drv_mutex_unlock</seealso>.
</p></desc>
</func>
- <func><name><ret>ErlNifResourceType*</ret><nametext>enif_open_resource_type(ErlNifEnv* env, const char* name,
+ <func><name><ret>ErlNifResourceType*</ret><nametext>enif_open_resource_type(ErlNifEnv* env,
+ const char* module_str, const char* name,
ErlNifResourceDtor* dtor, ErlNifResourceFlags flags, ErlNifResourceFlags* tried)</nametext></name>
<fsummary>Create or takeover a resource type</fsummary>
<desc><p>Create or takeover a resource type identified by the string
@@ -762,10 +960,10 @@ typedef enum {
The supplied destructor <c>dtor</c> will be called both for existing instances
as well as new instances not yet created by the calling NIF library.</item>
</taglist>
- <p>The two flag values can be combined with bitwise-or. To avoid unintentionally
- name clashes a good practice is to include the module name as part of the
- type <c>name</c>. The <c>dtor</c> may be <c>NULL</c> in case no destructor
- is needed.</p>
+ <p>The two flag values can be combined with bitwise-or. The name of the
+ resource type is local to the calling module. Argument <c>module_str</c>
+ is not (yet) used and must be NULL. The <c>dtor</c> may be <c>NULL</c>
+ in case no destructor is needed.</p>
<p>On success, return a pointer to the resource type and <c>*tried</c>
will be set to either <c>ERL_NIF_RT_CREATE</c> or
<c>ERL_NIF_RT_TAKEOVER</c> to indicate what was actually done.
@@ -782,123 +980,151 @@ typedef enum {
<c>reload</c> or <c>upgrade</c>.</p>
<p>Was previously named <c>enif_get_data</c>.</p></desc>
</func>
- <func><name><ret>void</ret><nametext>enif_realloc_binary(ErlNifEnv* env, ErlNifBinary* bin, unsigned size)</nametext></name>
+ <func><name><ret>void</ret><nametext>enif_realloc_binary(ErlNifBinary* bin, size_t size)</nametext></name>
<fsummary>Change the size of a binary.</fsummary>
<desc><p>Change the size of a binary <c>bin</c>. The source binary
may be read-only, in which case it will be left untouched and
a mutable copy is allocated and assigned to <c>*bin</c>.</p></desc>
</func>
- <func><name><ret>void</ret><nametext>enif_release_binary(ErlNifEnv* env, ErlNifBinary* bin)</nametext></name>
+ <func><name><ret>void</ret><nametext>enif_release_binary(ErlNifBinary* bin)</nametext></name>
<fsummary>Release a binary.</fsummary>
- <desc><p>Release a binary obtained
- from <c>enif_alloc_binary</c>.</p></desc>
+ <desc><p>Release a binary obtained from <c>enif_alloc_binary</c>.</p></desc>
</func>
- <func><name><ret>void</ret><nametext>enif_release_resource(ErlNifEnv* env, void* obj)</nametext></name>
+ <func><name><ret>void</ret><nametext>enif_release_resource(void* obj)</nametext></name>
<fsummary>Release a resource object.</fsummary>
- <desc><p>Release a resource objects obtained from <c>enif_alloc_resource</c>.
- The object may still be alive if it is referred to by Erlang terms. Each call to
- <c>enif_release_resource</c> must correspond to a previous call to <c>enif_alloc_resource</c>.
- References made by <c>enif_make_resource</c> can only be released by the garbage collector.</p></desc>
+ <desc><p>Remove a reference to resource object <c>obj</c>obtained from
+ <seealso marker="#enif_alloc_resource">enif_alloc_resource</seealso>.
+ The resource object will be destructed when the last reference is removed.
+ Each call to <c>enif_release_resource</c> must correspond to a previous
+ call to <c>enif_alloc_resource</c> or
+ <seealso marker="#enif_keep_resource">enif_keep_resource</seealso>.
+ References made by <seealso marker="#enif_make_resource">enif_make_resource</seealso>
+ can only be removed by the garbage collector.</p></desc>
</func>
<func><name><ret>ErlNifRWLock*</ret><nametext>enif_rwlock_create(char *name)</nametext></name>
<fsummary></fsummary>
- <desc><p>Same as <seealso marker="erl_driver#erl_drv_rwlock_create">erl_drv_rwlock_create()</seealso>.
+ <desc><p>Same as <seealso marker="erl_driver#erl_drv_rwlock_create">erl_drv_rwlock_create</seealso>.
</p></desc>
</func>
<func><name><ret>void</ret><nametext>enif_rwlock_destroy(ErlNifRWLock *rwlck)</nametext></name>
<fsummary></fsummary>
- <desc><p>Same as <seealso marker="erl_driver#erl_drv_rwlock_destroy">erl_drv_rwlock_destroy()</seealso>.
+ <desc><p>Same as <seealso marker="erl_driver#erl_drv_rwlock_destroy">erl_drv_rwlock_destroy</seealso>.
</p></desc>
</func>
<func><name><ret>void</ret><nametext>enif_rwlock_rlock(ErlNifRWLock *rwlck)</nametext></name>
<fsummary></fsummary>
- <desc><p>Same as <seealso marker="erl_driver#erl_drv_rwlock_rlock">erl_drv_rwlock_rlock()</seealso>.
+ <desc><p>Same as <seealso marker="erl_driver#erl_drv_rwlock_rlock">erl_drv_rwlock_rlock</seealso>.
</p></desc>
</func>
<func><name><ret>void</ret><nametext>enif_rwlock_runlock(ErlNifRWLock *rwlck)</nametext></name>
<fsummary></fsummary>
- <desc><p>Same as <seealso marker="erl_driver#erl_drv_rwlock_runlock">erl_drv_rwlock_runlock()</seealso>.
+ <desc><p>Same as <seealso marker="erl_driver#erl_drv_rwlock_runlock">erl_drv_rwlock_runlock</seealso>.
</p></desc>
</func>
<func><name><ret>void</ret><nametext>enif_rwlock_rwlock(ErlNifRWLock *rwlck)</nametext></name>
<fsummary></fsummary>
- <desc><p>Same as <seealso marker="erl_driver#erl_drv_rwlock_rwlock">erl_drv_rwlock_rwlock()</seealso>.
+ <desc><p>Same as <seealso marker="erl_driver#erl_drv_rwlock_rwlock">erl_drv_rwlock_rwlock</seealso>.
</p></desc>
</func>
<func><name><ret>void</ret><nametext>enif_rwlock_rwunlock(ErlNifRWLock *rwlck)</nametext></name>
<fsummary></fsummary>
- <desc><p>Same as <seealso marker="erl_driver#erl_drv_rwlock_rwunlock">erl_drv_rwlock_rwunlock()</seealso>.
+ <desc><p>Same as <seealso marker="erl_driver#erl_drv_rwlock_rwunlock">erl_drv_rwlock_rwunlock</seealso>.
</p></desc>
</func>
<func><name><ret>int</ret><nametext>enif_rwlock_tryrlock(ErlNifRWLock *rwlck)</nametext></name>
<fsummary></fsummary>
- <desc><p>Same as <seealso marker="erl_driver#erl_drv_rwlock_tryrlock">erl_drv_rwlock_tryrlock()</seealso>.
+ <desc><p>Same as <seealso marker="erl_driver#erl_drv_rwlock_tryrlock">erl_drv_rwlock_tryrlock</seealso>.
</p></desc>
</func>
<func><name><ret>int</ret><nametext>enif_rwlock_tryrwlock(ErlNifRWLock *rwlck)</nametext></name>
<fsummary></fsummary>
- <desc><p>Same as <seealso marker="erl_driver#erl_drv_rwlock_tryrwlock">erl_drv_rwlock_tryrwlock()</seealso>.
+ <desc><p>Same as <seealso marker="erl_driver#erl_drv_rwlock_tryrwlock">erl_drv_rwlock_tryrwlock</seealso>.
</p></desc>
</func>
- <func><name><ret>unsigned</ret><nametext>enif_sizeof_resource(ErlNifEnv* env, void* obj)</nametext></name>
+ <func><name><ret>unsigned</ret><nametext>enif_send(ErlNifEnv* env, ErlNifPid* to_pid, ErlNifEnv* msg_env, ERL_NIF_TERM msg)</nametext></name>
+ <fsummary>Send a message to a process.</fsummary>
+ <desc><p>Send a message to a process.</p>
+ <taglist>
+ <tag><c>env</c></tag>
+ <item>The environment of the calling process. Must be NULL if and
+ only if calling from a created thread.</item>
+ <tag><c>*to_pid</c></tag>
+ <item>The pid of the receiving process. The pid should refer to a process on the local node.</item>
+ <tag><c>msg_env</c></tag>
+ <item>The environment of the message term. Must be a process
+ independent environment allocated with
+ <seealso marker="#enif_alloc_env">enif_alloc_env</seealso>.</item>
+ <tag><c>msg</c></tag>
+ <item>The message term to send.</item>
+ </taglist>
+ <p>Return true on success, or false if <c>*to_pid</c> does not refer to an alive local process.</p>
+ <p>The message environment <c>msg_env</c> with all its terms (including
+ <c>msg</c>) will be invalidated by a successful call to <c>enif_send</c>. The environment
+ should either be freed with <seealso marker="#enif_free_env">enif_free_env</seealso>
+ of cleared for reuse with <seealso marker="#enif_clear_env">enif_clear_env</seealso>.</p>
+ <p>This function is only thread-safe when the emulator with SMP support is used.
+ It can only be used in a non-SMP emulator from a NIF-calling thread.</p>
+ </desc>
+ </func>
+ <func><name><ret>unsigned</ret><nametext>enif_sizeof_resource(void* obj)</nametext></name>
<fsummary>Get the byte size of a resource object</fsummary>
<desc><p>Get the byte size of a resource object <c>obj</c> obtained by
- <c>enif_alloc_resource</c>.</p></desc>
+ <seealso marker="#enif_alloc_resource">enif_alloc_resource</seealso>.</p></desc>
</func>
<func>
<name><ret>void</ret><nametext>enif_system_info(ErlNifSysInfo *sys_info_ptr, size_t size)</nametext></name>
<fsummary>Get information about the Erlang runtime system</fsummary>
- <desc><p>Same as <seealso marker="erl_driver#driver_system_info">driver_system_info()</seealso>.
+ <desc><p>Same as <seealso marker="erl_driver#driver_system_info">driver_system_info</seealso>.
</p></desc>
</func>
<func><name><ret>int</ret><nametext>enif_thread_create(char *name,ErlNifTid *tid,void * (*func)(void *),void *args,ErlNifThreadOpts *opts)</nametext></name>
<fsummary></fsummary>
- <desc><p>Same as <seealso marker="erl_driver#erl_drv_thread_create">erl_drv_thread_create()</seealso>.
+ <desc><p>Same as <seealso marker="erl_driver#erl_drv_thread_create">erl_drv_thread_create</seealso>.
</p></desc>
</func>
<func><name><ret>void</ret><nametext>enif_thread_exit(void *resp)</nametext></name>
<fsummary></fsummary>
- <desc><p>Same as <seealso marker="erl_driver#erl_drv_thread_exit">erl_drv_thread_exit()</seealso>.
+ <desc><p>Same as <seealso marker="erl_driver#erl_drv_thread_exit">erl_drv_thread_exit</seealso>.
</p></desc>
</func>
<func><name><ret>int</ret><nametext>enif_thread_join(ErlNifTid, void **respp)</nametext></name>
<fsummary></fsummary>
- <desc><p>Same as <seealso marker="erl_driver#erl_drv_thread_join">erl_drv_thread_join ()</seealso>.
+ <desc><p>Same as <seealso marker="erl_driver#erl_drv_thread_join">erl_drv_thread_join </seealso>.
</p></desc>
</func>
<func><name><ret>ErlNifThreadOpts*</ret><nametext>enif_thread_opts_create(char *name)</nametext></name>
<fsummary></fsummary>
- <desc><p>Same as <seealso marker="erl_driver#erl_drv_thread_opts_create">erl_drv_thread_opts_create()</seealso>.
+ <desc><p>Same as <seealso marker="erl_driver#erl_drv_thread_opts_create">erl_drv_thread_opts_create</seealso>.
</p></desc>
</func>
<func><name><ret>void</ret><nametext>enif_thread_opts_destroy(ErlNifThreadOpts *opts)</nametext></name>
<fsummary></fsummary>
- <desc><p>Same as <seealso marker="erl_driver#erl_drv_thread_opts_destroy">erl_drv_thread_opts_destroy()</seealso>.
+ <desc><p>Same as <seealso marker="erl_driver#erl_drv_thread_opts_destroy">erl_drv_thread_opts_destroy</seealso>.
</p></desc>
</func>
<func><name><ret>ErlNifTid</ret><nametext>enif_thread_self(void)</nametext></name>
<fsummary></fsummary>
- <desc><p>Same as <seealso marker="erl_driver#erl_drv_thread_self">erl_drv_thread_self()</seealso>.
+ <desc><p>Same as <seealso marker="erl_driver#erl_drv_thread_self">erl_drv_thread_self</seealso>.
</p></desc>
</func>
<func><name><ret>int</ret><nametext>enif_tsd_key_create(char *name, ErlNifTSDKey *key)</nametext></name>
<fsummary></fsummary>
- <desc><p>Same as <seealso marker="erl_driver#erl_drv_tsd_key_create">erl_drv_tsd_key_create()</seealso>.
+ <desc><p>Same as <seealso marker="erl_driver#erl_drv_tsd_key_create">erl_drv_tsd_key_create</seealso>.
</p></desc>
</func>
<func><name><ret>void</ret><nametext>enif_tsd_key_destroy(ErlNifTSDKey key)</nametext></name>
<fsummary></fsummary>
- <desc><p>Same as <seealso marker="erl_driver#erl_drv_tsd_key_destroy">erl_drv_tsd_key_destroy()</seealso>.
+ <desc><p>Same as <seealso marker="erl_driver#erl_drv_tsd_key_destroy">erl_drv_tsd_key_destroy</seealso>.
</p></desc>
</func>
<func><name><ret>void*</ret><nametext>enif_tsd_get(ErlNifTSDKey key)</nametext></name>
<fsummary></fsummary>
- <desc><p>Same as <seealso marker="erl_driver#erl_drv_tsd_get">erl_drv_tsd_get()</seealso>.
+ <desc><p>Same as <seealso marker="erl_driver#erl_drv_tsd_get">erl_drv_tsd_get</seealso>.
</p></desc>
</func>
<func><name><ret>void</ret><nametext>enif_tsd_set(ErlNifTSDKey key, void *data)</nametext></name>
<fsummary></fsummary>
- <desc><p>Same as <seealso marker="erl_driver#erl_drv_tsd_set">erl_drv_tsd_set()</seealso>.
+ <desc><p>Same as <seealso marker="erl_driver#erl_drv_tsd_set">erl_drv_tsd_set</seealso>.
</p></desc>
</func>
</funcs>
diff --git a/erts/doc/src/erlang.xml b/erts/doc/src/erlang.xml
index 46f8df4683..1eec45e0f3 100644
--- a/erts/doc/src/erlang.xml
+++ b/erts/doc/src/erlang.xml
@@ -253,6 +253,54 @@ iolist() = [char() | binary() | iolist()]
</desc>
</func>
<func>
+ <name>binary_part(Subject, PosLen) -> binary()</name>
+ <fsummary>Extracts a part of a binary</fsummary>
+ <type>
+ <v>Subject = binary()</v>
+ <v>PosLen = {Start,Length}</v>
+ <v>Start = int()</v>
+ <v>Length = int()</v>
+ </type>
+ <desc>
+ <p>Extracts the part of the binary described by <c>PosLen</c>.</p>
+
+ <p>Negative length can be used to extract bytes at the end of a binary:</p>
+
+<code>
+1> Bin = &lt;&lt;1,2,3,4,5,6,7,8,9,10&gt;&gt;.
+2> binary_part(Bin,{byte_size(Bin), -5)).
+&lt;&lt;6,7,8,9,10&gt;&gt;
+</code>
+
+ <p>If <c>PosLen</c> in any way references outside the binary, a <c>badarg</c> exception is raised.</p>
+
+ <p><c>Start</c> is zero-based, i.e:</p>
+<code>
+1> Bin = &lt;&lt;1,2,3&gt;&gt;
+2> binary_part(Bin,{0,2}).
+&lt;&lt;1,2&gt;&gt;
+</code>
+
+ <p>See the STDLIB module <c>binary</c> for details about the <c>PosLen</c> semantics.</p>
+
+ <p>Allowed in guard tests.</p>
+ </desc>
+ </func>
+ <func>
+ <name>binary_part(Subject, Start, Length) -> binary()</name>
+ <fsummary>Extracts a part of a binary</fsummary>
+ <type>
+ <v>Subject = binary()</v>
+ <v>Start = int()</v>
+ <v>Length = int()</v>
+ </type>
+ <desc>
+ <p>The same as <c>binary_part(Subject, {Pos, Len})</c>.</p>
+
+ <p>Allowed in guard tests.</p>
+ </desc>
+ </func>
+ <func>
<name>binary_to_atom(Binary, Encoding) -> atom()</name>
<fsummary>Convert from text representation to an atom</fsummary>
<type>
@@ -318,6 +366,11 @@ iolist() = [char() | binary() | iolist()]
corresponding to the bytes from position <c>Start</c> to
position <c>Stop</c> in <c>Binary</c>. Positions in the
binary are numbered starting from 1.</p>
+
+ <note><p>This function's indexing style of using one-based indices for
+ binaries is deprecated. New code should use the functions in
+ the STDLIB module <c>binary</c> instead. They consequently
+ use the same (zero-based) style of indexing.</p></note>
</desc>
</func>
<func>
@@ -354,7 +407,7 @@ iolist() = [char() | binary() | iolist()]
</desc>
</func>
<func>
- <name>erlang:binary_to_term(Binary, Opts) -> term()</name>
+ <name>binary_to_term(Binary, Opts) -> term()</name>
<fsummary>Decode an Erlang external term format binary</fsummary>
<type>
<v>Opts = [safe]</v>
@@ -446,7 +499,7 @@ iolist() = [char() | binary() | iolist()]
<name>erlang:cancel_timer(TimerRef) -> Time | false</name>
<fsummary>Cancel a timer</fsummary>
<type>
- <v>TimerRef = ref()</v>
+ <v>TimerRef = reference()</v>
<v>Time = int()</v>
</type>
<desc>
@@ -710,7 +763,7 @@ false</pre>
<name>erlang:demonitor(MonitorRef) -> true</name>
<fsummary>Stop monitoring</fsummary>
<type>
- <v>MonitorRef = ref()</v>
+ <v>MonitorRef = reference()</v>
</type>
<desc>
<p>If <c>MonitorRef</c> is a reference which the calling process
@@ -750,7 +803,7 @@ false</pre>
<name>erlang:demonitor(MonitorRef, OptionList) -> true|false</name>
<fsummary>Stop monitoring</fsummary>
<type>
- <v>MonitorRef = ref()</v>
+ <v>MonitorRef = reference()</v>
<v>OptionList = [Option]</v>
<v>Option = flush</v>
<v>Option = info</v>
@@ -2107,7 +2160,7 @@ os_prompt%</pre>
</desc>
</func>
<func>
- <name>make_ref() -> ref()</name>
+ <name>make_ref() -> reference()</name>
<fsummary>Return an almost unique reference</fsummary>
<desc>
<p>Returns an almost unique reference.</p>
@@ -2156,7 +2209,7 @@ os_prompt%</pre>
</desc>
</func>
<func>
- <name>erlang:max(Term1, Term2) -> Maximum</name>
+ <name>max(Term1, Term2) -> Maximum</name>
<fsummary>Return the largest of two term</fsummary>
<type>
<v>Term1 = Term2 = Maximum = term()</v>
@@ -2405,7 +2458,7 @@ os_prompt%</pre>
</desc>
</func>
<func>
- <name>erlang:min(Term1, Term2) -> Minimum</name>
+ <name>min(Term1, Term2) -> Minimum</name>
<fsummary>Return the smallest of two term</fsummary>
<type>
<v>Term1 = Term2 = Minimum = term()</v>
@@ -2591,6 +2644,37 @@ os_prompt%</pre>
</desc>
</func>
<func>
+ <name>erlang:nif_error(Reason)</name>
+ <fsummary>Stop execution with a given reason</fsummary>
+ <type>
+ <v>Reason = term()</v>
+ </type>
+ <desc>
+ <p>Works exactly like
+ <seealso marker="#error/1">erlang:error/1</seealso>,
+ but Dialyzer thinks that this BIF will return an arbitrary term.
+ When used in a stub function for a NIF to generate an
+ exception when the NIF library is not loaded, Dialyzer
+ will not generate false warnings.</p>
+ </desc>
+ </func>
+ <func>
+ <name>erlang:nif_error(Reason, Args)</name>
+ <fsummary>Stop execution with a given reason</fsummary>
+ <type>
+ <v>Reason = term()</v>
+ <v>Args = [term()]</v>
+ </type>
+ <desc>
+ <p>Works exactly like
+ <seealso marker="#error/2">erlang:error/2</seealso>,
+ but Dialyzer thinks that this BIF will return an arbitrary term.
+ When used in a stub function for a NIF to generate an
+ exception when the NIF library is not loaded, Dialyzer
+ will not generate false warnings.</p>
+ </desc>
+ </func>
+ <func>
<name>node() -> Node</name>
<fsummary>Name of the local node</fsummary>
<type>
@@ -2606,7 +2690,7 @@ os_prompt%</pre>
<name>node(Arg) -> Node</name>
<fsummary>At which node is a pid, port or reference located</fsummary>
<type>
- <v>Arg = pid() | port() | ref()</v>
+ <v>Arg = pid() | port() | reference()</v>
<v>Node = node()</v>
</type>
<desc>
@@ -2683,8 +2767,10 @@ os_prompt%</pre>
Otherwise, some other point in time is chosen. It is also
guaranteed that subsequent calls to this BIF returns
continuously increasing values. Hence, the return value from
- <c>now()</c> can be used to generate unique time-stamps. It
- can only be used to check the local time of day if
+ <c>now()</c> can be used to generate unique time-stamps,
+ and if it is called in a tight loop on a fast machine
+ the time of the node can become skewed.</p>
+ <p>It can only be used to check the local time of day if
the time-zone info of the underlying operating system is
properly configured.</p>
</desc>
@@ -2749,7 +2835,7 @@ os_prompt%</pre>
<item>
<p>Works like <c>{spawn, Command}</c>, but only runs
- external executables. The <c>Command</c> in it's whole
+ external executables. The <c>Command</c> in its whole
is used as the name of the executable, including any
spaces. If arguments are to be passed, the
<c>args</c> and <c>arg0</c> <c>PortSettings</c> can be used.</p>
@@ -2926,7 +3012,7 @@ os_prompt%</pre>
The standard input and standard output handles of the port program
will, if this option is supplied, be opened with the flag
FILE_FLAG_OVERLAPPED, so that the port program can (and has to) do
- overlapped I/O on it's standard handles. This is not normally
+ overlapped I/O on its standard handles. This is not normally
the case for simple port programs, but an option of value for the
experienced Windows programmer. <em>On all other platforms, this
option is silently discarded</em>.</p>
@@ -3147,7 +3233,7 @@ os_prompt%</pre>
</desc>
</func>
<func>
- <name>erlang:port_command(Port, Data, OptionList) -> true|false</name>
+ <name>port_command(Port, Data, OptionList) -> true|false</name>
<fsummary>Send data to a port</fsummary>
<type>
<v>Port = port() | atom()</v>
@@ -3183,10 +3269,6 @@ os_prompt%</pre>
<note>
<p>More options may be added in the future.</p>
</note>
- <note>
- <p><c>erlang:port_command/3</c> is currently not auto imported, but
- it is planned to be auto imported in OTP R14.</p>
- </note>
<p>Failures:</p>
<taglist>
<tag><c>badarg</c></tag>
@@ -4016,7 +4098,7 @@ os_prompt%</pre>
<name>erlang:read_timer(TimerRef) -> int() | false</name>
<fsummary>Number of milliseconds remaining for a timer</fsummary>
<type>
- <v>TimerRef = ref()</v>
+ <v>TimerRef = reference()</v>
</type>
<desc>
<p><c>TimerRef</c> is a timer reference returned by
@@ -4039,7 +4121,7 @@ os_prompt%</pre>
<name>erlang:ref_to_list(Ref) -> string()</name>
<fsummary>Text representation of a reference</fsummary>
<type>
- <v>Ref = ref()</v>
+ <v>Ref = reference()</v>
</type>
<desc>
<p>Returns a string which corresponds to the text
@@ -4218,7 +4300,7 @@ true</pre>
<v>Dest = pid() | RegName </v>
<v>&nbsp;LocalPid = pid() (of a process, alive or dead, on the local node)</v>
<v>Msg = term()</v>
- <v>TimerRef = ref()</v>
+ <v>TimerRef = reference()</v>
</type>
<desc>
<p>Starts a timer which will send the message <c>Msg</c>
@@ -4431,7 +4513,7 @@ true</pre>
</desc>
</func>
<func>
- <name>spawn(Node, Module, Function, ArgumentList) -> pid()</name>
+ <name>spawn(Node, Module, Function, Args) -> pid()</name>
<fsummary>Create a new process with a function as entry point on a given node</fsummary>
<type>
<v>Node = node()</v>
@@ -4747,7 +4829,7 @@ true</pre>
<v>&nbsp;LocalPid = pid() (of a process, alive or dead, on the local node)</v>
<v>&nbsp;RegName = atom()</v>
<v>Msg = term()</v>
- <v>TimerRef = ref()</v>
+ <v>TimerRef = reference()</v>
</type>
<desc>
<p>Starts a timer which will send the message
@@ -5093,9 +5175,9 @@ true</pre>
schedulers actually have bound as requested, call
<seealso marker="#system_info_scheduler_bindings">erlang:system_info(scheduler_bindings)</seealso>.
</p>
- <p>Schedulers can currently only be bound on newer Linux
- and Solaris systems, but more systems will be supported
- in the future.
+ <p>Schedulers can currently only be bound on newer Linux,
+ Solaris, and Windows systems, but more systems will be
+ supported in the future.
</p>
<p>In order for the runtime system to be able to bind schedulers,
the CPU topology needs to be known. If the runtime system fails
@@ -5103,10 +5185,21 @@ true</pre>
For more information on how to define the CPU topology, see
<seealso marker="#system_flag_cpu_topology">erlang:system_flag(cpu_topology, CpuTopology)</seealso>.
</p>
- <p><em>NOTE:</em> If other programs on the system have bound
- to processors, e.g. another Erlang runtime system, you
- may loose performance when binding schedulers. Therefore,
- schedulers are by default not bound.</p>
+ <p>The runtime system will by default bind schedulers to logical
+ processors using the <c>default_bind</c> bind type if the amount
+ of schedulers are at least equal to the amount of logical
+ processors configured, binding of schedulers is supported,
+ and a CPU topology is available at startup.
+ </p>
+ <p><em>NOTE:</em> If the Erlang runtime system is the only
+ operating system process that binds threads to logical processors,
+ this improves the performance of the runtime system. However,
+ if other operating system processes (as for example another Erlang
+ runtime system) also bind threads to logical processors, there
+ might be a performance penalty instead. If this is the case you,
+ are are advised to unbind the schedulers using the
+ <seealso marker="erl#+sbt">+sbtu</seealso> command line argument,
+ or <c>erlang:system_flag(scheduler_bind_type, unbound)</c>.</p>
<p>Schedulers can be bound in different ways. The <c>How</c>
argument determines how schedulers are bound. <c>How</c> can
currently be one of:</p>
@@ -5271,7 +5364,7 @@ true</pre>
<p>Returns <c>{Allocator, Version, Features, Settings}.</c></p>
<p>Types:</p>
<list type="bulleted">
- <item><c>Allocator = undefined | elib_malloc | glibc</c></item>
+ <item><c>Allocator = undefined | glibc</c></item>
<item><c>Version = [int()]</c></item>
<item><c>Features = [atom()]</c></item>
<item><c>Settings = [{Subsystem, [{Parameter, Value}]}]</c></item>
@@ -5286,7 +5379,7 @@ true</pre>
implementation used. If <c>Allocator</c> equals
<c>undefined</c>, the <c>malloc()</c> implementation
used could not be identified. Currently
- <c>elib_malloc</c> and <c>glibc</c> can be identified.</p>
+ <c>glibc</c> can be identified.</p>
</item>
<item>
<p><c>Version</c> is a list of integers (but not a
@@ -5440,7 +5533,7 @@ true</pre>
<c>CpuTopology</c> type to change.
</p>
</item>
- <tag><c>{cpu_topology, defined}</c></tag>
+ <tag><marker id="system_info_cpu_topology_defined"><c>{cpu_topology, defined}</c></marker></tag>
<item>
<p>Returns the user defined <c>CpuTopology</c>. For more
information see the documentation of
@@ -5450,12 +5543,14 @@ true</pre>
argument.
</p>
</item>
- <tag><c>{cpu_topology, detected}</c></tag>
+ <tag><marker id="system_info_cpu_topology_detected"><c>{cpu_topology, detected}</c></marker></tag>
<item>
<p>Returns the automatically detected <c>CpuTopology</c>. The
emulator currently only detects the CPU topology on some newer
- linux and solaris systems. For more information see the
- documentation of the
+ Linux, Solaris, and Windows systems. On Windows system with
+ more than 32 logical processors the CPU topology is not detected.
+ </p>
+ <p>For more information see the documentation of the
<seealso marker="#system_info_cpu_topology">cpu_topology</seealso>
argument.
</p>
@@ -5513,52 +5608,9 @@ true</pre>
</item>
<tag><c>elib_malloc</c></tag>
<item>
- <p>If the emulator uses the <c>elib_malloc</c> memory
- allocator, a list of two-element tuples containing status
- information is returned; otherwise, <c>false</c> is
- returned. The list currently contains the following
- two-element tuples (all sizes are presented in bytes):</p>
- <taglist>
- <tag><c>{heap_size, Size}</c></tag>
- <item>
- <p>Where <c>Size</c> is the current heap size.</p>
- </item>
- <tag><c>{max_alloced_size, Size}</c></tag>
- <item>
- <p>Where <c>Size</c> is the maximum amount of memory
- allocated on the heap since the emulator started.</p>
- </item>
- <tag><c>{alloced_size, Size}</c></tag>
- <item>
- <p>Where <c>Size</c> is the current amount of memory
- allocated on the heap.</p>
- </item>
- <tag><c>{free_size, Size}</c></tag>
- <item>
- <p>Where <c>Size</c> is the current amount of free
- memory on the heap.</p>
- </item>
- <tag><c>{no_alloced_blocks, No}</c></tag>
- <item>
- <p>Where <c>No</c> is the current number of allocated
- blocks on the heap.</p>
- </item>
- <tag><c>{no_free_blocks, No}</c></tag>
- <item>
- <p>Where <c>No</c> is the current number of free blocks
- on the heap.</p>
- </item>
- <tag><c>{smallest_alloced_block, Size}</c></tag>
- <item>
- <p>Where <c>Size</c> is the size of the smallest
- allocated block on the heap.</p>
- </item>
- <tag><c>{largest_free_block, Size}</c></tag>
- <item>
- <p>Where <c>Size</c> is the size of the largest free
- block on the heap.</p>
- </item>
- </taglist>
+ <p>This option will be removed in a future release.
+ The return value will always be <c>false</c> since
+ the elib_malloc allocator has been removed.</p>
</item>
<tag><c>fullsweep_after</c></tag>
<item>
@@ -5634,11 +5686,34 @@ true</pre>
information see the <seealso marker="erts:crash_dump">"How to interpret the Erlang crash dumps"</seealso> chapter
in the ERTS User's Guide.</p>
</item>
- <tag><c>logical_processors</c></tag>
+ <tag><marker id="logical_processors"><c>logical_processors</c></marker></tag>
+ <item>
+ <p>Returns the detected number of logical processors configured
+ on the system. The return value is either an integer, or
+ the atom <c>unknown</c> if the emulator wasn't able to
+ detect logical processors configured.
+ </p>
+ </item>
+ <tag><marker id="logical_processors_available"><c>logical_processors_available</c></marker></tag>
+ <item>
+ <p>Returns the detected number of logical processors available to
+ the Erlang runtime system. The return value is either an
+ integer, or the atom <c>unknown</c> if the emulator wasn't
+ able to detect logical processors available. The number
+ of logical processors available is less than or equal to
+ the number of <seealso marker="#logical_processors_online">logical
+ processors online</seealso>.
+ </p>
+ </item>
+ <tag><marker id="logical_processors_online"><c>logical_processors_online</c></marker></tag>
<item>
- <p>Returns the number of logical processors detected on the
- system as an integer or the atom <c>unknown</c> if the
- emulator wasn't able to detect any.
+ <p>Returns the detected number of logical processors online on
+ the system. The return value is either an integer,
+ or the atom <c>unknown</c> if the emulator wasn't able to
+ detect logical processors online. The number of logical
+ processors online is less than or equal to the number of
+ <seealso marker="#logical_processors">logical processors
+ configured</seealso>.
</p>
</item>
<tag><c>machine</c></tag>
@@ -5843,6 +5918,26 @@ true</pre>
<c>get_tcw</c> in "Match Specifications in Erlang",
<seealso marker="erts:match_spec#get_tcw">ERTS User's Guide</seealso>.</p>
</item>
+ <tag><marker id="update_cpu_info"><c>update_cpu_info</c></marker></tag>
+ <item>
+ <p>The runtime system rereads the CPU information available and
+ updates its internally stored information about the
+ <seealso marker="#system_info_cpu_topology_detected">detected CPU
+ topology</seealso> and the amount of logical processors
+ <seealso marker="#logical_processors">configured</seealso>,
+ <seealso marker="#logical_processors_online">online</seealso>, and
+ <seealso marker="#logical_processors_available">available</seealso>.
+ If the CPU information has changed since the last time it was read,
+ the atom <c>changed</c> is returned; otherwise, the atom
+ <c>unchanged</c> is returned. If the CPU information has changed
+ you probably want to
+ <seealso marker="#system_flag_schedulers_online">adjust the amount
+ of schedulers online</seealso>. You typically want to have as
+ many schedulers online as
+ <seealso marker="#logical_processors_available">logical processors
+ available</seealso>.
+ </p>
+ </item>
<tag><marker id="system_info_version"><c>version</c></marker></tag>
<item>
<p>Returns a string containing the version number of the
@@ -5850,9 +5945,23 @@ true</pre>
</item>
<tag><c>wordsize</c></tag>
<item>
- <p>Returns the word size in bytes as an integer, i.e. on a
- 32-bit architecture 4 is returned, and on a 64-bit
- architecture 8 is returned.</p>
+ <p>Same as <c>{wordsize, internal}</c></p>
+ </item>
+ <tag><c>{wordsize, internal}</c></tag>
+ <item>
+ <p>Returns the size of Erlang term words in bytes as an
+ integer, i.e. on a 32-bit architecture 4 is returned,
+ and on a pure 64-bit architecture 8 is returned. On a
+ halfword 64-bit emulator, 4 is returned, as the Erlang
+ terms are stored using a virtual wordsize of half the
+ systems wordsize.</p>
+ </item>
+ <tag><c>{wordsize, external}</c></tag>
+ <item>
+ <p>Returns the true wordsize of the emulator, i.e. the size
+ of a pointer, in bytes as an integer. On a pure 32-bit
+ architecture 4 is returned, on both a halfword and pure
+ 64-bit architecture, 8 is returned.</p>
</item>
</taglist>
<note>
@@ -6645,6 +6754,17 @@ true</pre>
See also
<seealso marker="#trace_pattern/3">erlang:trace_pattern/3</seealso>.</p>
</item>
+ <tag><c>call_time</c></tag>
+ <item>
+ <p>Return the call time values for this function or
+ <c>true</c> for the pseudo function <c>on_load</c> if call
+ time tracing is active. Returns <c>false</c> otherwise.
+ The call time values returned, <c>[{Pid, Count, S, Us}]</c>,
+ is a list of each process that has executed the function and its specific counters.
+ See also
+ <seealso marker="#trace_pattern/3">erlang:trace_pattern/3</seealso>.</p>
+ </item>
+
<tag><c>all</c></tag>
<item>
<p>Return a list containing the <c>{Item, Value}</c> tuples
@@ -6747,13 +6867,13 @@ true</pre>
</item>
<tag><c>restart</c></tag>
<item>
- <p>For the <c>FlagList</c> option <c>call_count</c>:
+ <p>For the <c>FlagList</c> option <c>call_count</c> and <c>call_time</c>:
restart the existing counters. The behaviour is undefined
for other <c>FlagList</c> options.</p>
</item>
<tag><c>pause</c></tag>
<item>
- <p>For the <c>FlagList</c> option <c>call_count</c>: pause
+ <p>For the <c>FlagList</c> option <c>call_count</c> and <c>call_time</c>: pause
the existing counters. The behaviour is undefined for
other <c>FlagList</c> options.</p>
</item>
@@ -6808,6 +6928,23 @@ true</pre>
<p>The counter value can be read with
<seealso marker="#trace_info/2">erlang:trace_info/2</seealso>.</p>
</item>
+ <tag><c>call_time</c></tag>
+ <item>
+ <p>Starts (<c>MatchSpec == true</c>) or stops
+ (<c>MatchSpec == false</c>) call time tracing for all
+ types of function calls. For every function a counter is
+ incremented when the function is called. Time spent in the function
+ is accumulated in two other counters, seconds and micro-seconds.
+ The counters are stored for each call traced process.</p>
+ <p>If call time tracing is started while already running,
+ the count and time is restarted from zero. Running counters can be
+ paused with <c>MatchSpec == pause</c>. Paused and running
+ counters can be restarted from zero with
+ <c>MatchSpec == restart</c>.</p>
+ <p>The counter value can be read with
+ <seealso marker="#trace_info/2">erlang:trace_info/2</seealso>.</p>
+ </item>
+
</taglist>
<p>The <c>global</c> and <c>local</c> options are mutually
exclusive and <c>global</c> is the default (if no options are
@@ -6815,7 +6952,7 @@ true</pre>
perform a kind of local tracing, and can also not be combined
with <c>global</c>. A function can be either globally or
locally traced. If global tracing is specified for a
- specified set of functions; local, meta and call count
+ specified set of functions; local, meta, call time and call count
tracing for the matching set of local functions will be
disabled, and vice versa.</p>
<p>When disabling trace, the option must match the type of trace
diff --git a/erts/doc/src/escript.xml b/erts/doc/src/escript.xml
index a89449df23..44c9a5ac68 100644
--- a/erts/doc/src/escript.xml
+++ b/erts/doc/src/escript.xml
@@ -31,7 +31,7 @@
<com>escript</com>
<comsummary>Erlang scripting support</comsummary>
<description>
- <p><c><![CDATA[escript]]></c> provides support for running short Erlang programs
+ <p><c>escript</c> provides support for running short Erlang programs
without having to compile them first and an easy way to retrieve the
command line arguments.</p>
</description>
@@ -41,10 +41,10 @@
<name>escript escript-flags script-name script-arg1 script-arg2...</name>
<fsummary>Run a script written in Erlang</fsummary>
<desc>
- <p><c><![CDATA[escript]]></c> runs a script written in Erlang.</p>
+ <p><c>escript</c> runs a script written in Erlang.</p>
<p>Here follows an example.</p>
<pre>
-$ <input>cat factorial</input>
+$ <input>cat factorial</input>
#!/usr/bin/env escript
%% -*- erlang -*-
%%! -smp enable -sname factorial -mnesia debug verbose
@@ -59,11 +59,11 @@ main([String]) ->
end;
main(_) ->
usage().
-
+
usage() ->
io:format("usage: factorial integer\n"),
halt(1).
-
+
fac(0) -> 1;
fac(N) -> N * fac(N-1).
$ <input>factorial 5</input>
@@ -74,9 +74,8 @@ $ <input>factorial five</input>
usage: factorial integer </pre>
<p>The header of the Erlang script in the example differs from
a normal Erlang module. The first line is intended to be the
- interpreter line, which invokes
- <c><![CDATA[escript]]></c>. However if you invoke the
- <c><![CDATA[escript]]></c> like this</p>
+ interpreter line, which invokes <c>escript</c>. However if you
+ invoke the <c>escript</c> like this</p>
<pre>
$ <input>escript factorial 5</input> </pre>
<p>the contents of the first line does not matter, but it
@@ -93,13 +92,13 @@ $ <input>escript factorial 5</input> </pre>
%%! -smp enable -sname factorial -mnesia debug verbose</pre>
<p>Such an argument line must start with <c>%%!</c> and the
rest of the line will interpreted as arguments to the emulator.</p>
- <p>If you know the location of the <c><![CDATA[escript]]></c> executable, the first
- line can directly give the path to <c><![CDATA[escript]]></c>. For instance:</p>
+ <p>If you know the location of the <c>escript</c> executable, the first
+ line can directly give the path to <c>escript</c>. For instance:</p>
<pre>
#!/usr/local/bin/escript </pre>
<p>As any other kind of scripts, Erlang scripts will not work on
Unix platforms if the execution bit for the script file is not set.
- (Use <c><![CDATA[chmod +x script-name]]></c> to turn on the execution bit.)
+ (Use <c>chmod +x script-name</c> to turn on the execution bit.)
</p>
<p>The rest of the Erlang script file may either contain
@@ -108,33 +107,33 @@ $ <input>escript factorial 5</input> </pre>
<p>An Erlang script file must always contain the function
<em>main/1</em>. When the script is run, the
- <c><![CDATA[main/1]]></c> function will be called with a list
+ <c>main/1</c> function will be called with a list
of strings representing the arguments given to the script (not
changed or interpreted in any way).</p>
- <p>If the <c><![CDATA[main/1]]></c> function in the script returns successfully,
+ <p>If the <c>main/1</c> function in the script returns successfully,
the exit status for the script will be 0. If an exception is generated
during execution, a short message will be printed and the script terminated
with exit status 127.</p>
- <p>To return your own non-zero exit code, call <c><![CDATA[halt(ExitCode)]]></c>;
+ <p>To return your own non-zero exit code, call <c>halt(ExitCode)</c>;
for instance:</p>
<pre>
halt(1).</pre>
- <p>Call <c><![CDATA[escript:script_name/0]]></c> from your to
- script to retrieve the pathname of the script (the pathname
- is usually, but not always, absolute).</p>
+ <p>Call <seealso marker="#script_name_0">escript:script_name()</seealso>
+ from your to script to retrieve the pathname of the script
+ (the pathname is usually, but not always, absolute).</p>
<p>If the file contains source code (as in the example above),
it will be processed by the preprocessor <c>epp</c>. This
means that you for example may use pre-defined macros (such as
- <c><![CDATA[?MODULE]]></c>) as well as include directives like
- the <c><![CDATA[-include_lib]]></c> directive. For instance, use</p>
+ <c>?MODULE</c>) as well as include directives like
+ the <c>-include_lib</c> directive. For instance, use</p>
<pre>
--include_lib("kernel/include/file.hrl"). </pre>
+-include_lib("kernel/include/file.hrl").</pre>
<p>to include the record definitions for the records used by the
- <c><![CDATA[file:read_link_info/1]]></c> function.</p>
+ <c>file:read_link_info/1</c> function.</p>
<p>The script will be checked for syntactic and semantic
correctness before being run. If there are warnings (such as
@@ -144,7 +143,7 @@ halt(1).</pre>
127.</p>
<p>Both the module declaration and the export declaration of
- the <c><![CDATA[main/1]]></c> function are optional.</p>
+ the <c>main/1</c> function are optional.</p>
<p>By default, the script will be interpreted. You can force
it to be compiled by including the following line somewhere
@@ -198,6 +197,180 @@ factorial 5 = 120
</pre>
</desc>
</func>
+ <func>
+ <name>escript:create(FileOrBin, Sections) -> ok | {ok, binary()} | {error, term()}</name>
+ <fsummary>Create an escript</fsummary>
+ <type>
+ <v>FileOrBin = filename() | 'binary'</v>
+ <v>Sections = [Header] Body | Body</v>
+ <v>Header = shebang | {shebang, Shebang}
+ | comment | {comment, Comment}
+ | {emu_args, EmuArgs}</v>
+ <v>Shebang = string() | 'default' | 'undefined'</v>
+ <v>Comment = string() | 'default' | 'undefined'</v>
+ <v>EmuArgs = string() | 'undefined'</v>
+ <v>Body = {source, SourceCode}
+ | {beam, BeamCode}
+ | {archive, ZipArchive}</v>
+ <v>SourceCode = BeamCode = ZipArchive = binary()</v>
+ </type>
+ <desc>
+ <p>The <marker id="create_2"></marker> <c>create/2</c>
+ function creates an escript from a list of sections. The
+ sections can be given in any order. An escript begins with an
+ optional <c>Header</c> followed by a mandatory <c>Body</c>. If
+ the header is present, it does always begin with a
+ <c>shebang</c>, possibly followed by a <c>comment</c> and
+ <c>emu_args</c>. The <c>shebang</c> defaults to
+ <c>"/usr/bin/env escript"</c>. The comment defaults to
+ <c>"This is an -*- erlang -*- file"</c>. The created escript
+ can either be returned as a binary or written to file.</p>
+
+ <p>As an example of how the function can be used, we create an
+ interpreted escript which uses emu_args to set some emulator
+ flag. In this case it happens to disable the smp_support. We
+ do also extract the different sections from the newly created
+ script:</p>
+ <pre>
+&gt; <input>Source = "%% Demo\nmain(_Args) ->\n io:format(erlang:system_info(smp_support)).\n".</input>
+"%% Demo\nmain(_Args) ->\n io:format(erlang:system_info(smp_support)).\n"
+&gt; <input>io:format("~s\n", [Source]).</input>
+%% Demo
+main(_Args) ->
+ io:format(erlang:system_info(smp_support)).
+
+ok
+&gt; <input>{ok, Bin} = escript:create(binary, [shebang, comment, {emu_args, "-smp disable"},
+ {source, list_to_binary(Source)}]).</input>
+{ok,&lt;&lt;"#!/usr/bin/env escript\n%% This is an -*- erlang -*- file\n%%!-smp disabl"...&gt;&gt;}
+&gt; <input>file:write_file("demo.escript", Bin).</input>
+ok
+&gt; <input>os:cmd("escript demo.escript").</input>
+"false"
+&gt; <input>escript:extract("demo.escript", []).</input>
+{ok,[{shebang,default}, {comment,default}, {emu_args,"-smp disable"},
+ {source,&lt;&lt;"%% Demo\nmain(_Args) ->\n io:format(erlang:system_info(smp_su"...&gt;&gt;}]}
+ </pre>
+
+ <p>An escript without header can be created like this:</p>
+<pre>
+&gt; <input>file:write_file("demo.erl",
+ ["%% demo.erl\n-module(demo).\n-export([main/1]).\n\n", Source]).</input>
+ok
+&gt; <input>{ok, _, BeamCode} = compile:file("demo.erl", [binary, debug_info]).</input>
+{ok,demo,
+ &lt;&lt;70,79,82,49,0,0,2,208,66,69,65,77,65,116,111,109,0,0,0,
+ 79,0,0,0,9,4,100,...&gt;&gt;}
+&gt; <input>escript:create("demo.beam", [{beam, BeamCode}]).</input>
+ok
+&gt; <input>escript:extract("demo.beam", []).</input>
+{ok,[{shebang,undefined}, {comment,undefined}, {emu_args,undefined},
+ {beam,&lt;&lt;70,79,82,49,0,0,3,68,66,69,65,77,65,116,
+ 111,109,0,0,0,83,0,0,0,9,...&gt;&gt;}]}
+&gt; <input>os:cmd("escript demo.beam").</input>
+"true"
+</pre>
+ <p>Here we create an archive script containing both Erlang
+ code as well as beam code. Then we iterate over all files in
+ the archive and collect their contents and some info about
+ them.
+ </p>
+<pre>
+&gt; <input>{ok, SourceCode} = file:read_file("demo.erl").</input>
+{ok,&lt;&lt;"%% demo.erl\n-module(demo).\n-export([main/1]).\n\n%% Demo\nmain(_Arg"...&gt;&gt;}
+&gt; <input>escript:create("demo.escript",
+ [shebang,
+ {archive, [{"demo.erl", SourceCode},
+ {"demo.beam", BeamCode}], []}]).</input>
+ok
+&gt; <input>{ok, [{shebang,default}, {comment,undefined}, {emu_args,undefined},
+ {archive, ArchiveBin}]} = escript:extract("demo.escript", []).</input>
+{ok,[{shebang,default}, {comment,undefined}, {emu_args,undefined},
+ {{archive,&lt;&lt;80,75,3,4,20,0,0,0,8,0,118,7,98,60,105,
+ 152,61,93,107,0,0,0,118,0,...&gt;&gt;}]}
+&gt; <input>file:write_file("demo.zip", ArchiveBin).</input>
+ok
+&gt; <input>zip:foldl(fun(N, I, B, A) -> [{N, I(), B()} | A] end, [], "demo.zip").</input>
+{ok,[{"demo.beam",
+ {file_info,748,regular,read_write,
+ {{2010,3,2},{0,59,22}},
+ {{2010,3,2},{0,59,22}},
+ {{2010,3,2},{0,59,22}},
+ 54,1,0,0,0,0,0},
+ &lt;&lt;70,79,82,49,0,0,2,228,66,69,65,77,65,116,111,109,0,0,0,
+ 83,0,0,...&gt;&gt;},
+ {"demo.erl",
+ {file_info,118,regular,read_write,
+ {{2010,3,2},{0,59,22}},
+ {{2010,3,2},{0,59,22}},
+ {{2010,3,2},{0,59,22}},
+ 54,1,0,0,0,0,0},
+ &lt;&lt;"%% demo.erl\n-module(demo).\n-export([main/1]).\n\n%% Demo\nmain(_Arg"...&gt;&gt;}]}</pre>
+ </desc>
+ </func>
+ <func>
+ <name>escript:extract(File, Options) -> {ok, Sections} | {error, term()}</name>
+ <fsummary>Parses an escript and extracts its sections</fsummary>
+ <type>
+ <v>File = filename()</v>
+ <v>Options = [] | [compile_source]</v>
+ <v>Sections = Headers Body</v>
+ <v>Headers = {shebang, Shebang}
+ {comment, Comment}
+ {emu_args, EmuArgs}</v>
+ <v>Shebang = string() | 'default' | 'undefined'</v>
+ <v>Comment = string() | 'default' | 'undefined'</v>
+ <v>EmuArgs = string() | 'undefined'</v>
+ <v>Body = {source, SourceCode}
+ | {source, BeamCode}
+ | {beam, BeamCode}
+ | {archive, ZipArchive}</v>
+ <v>SourceCode = BeamCode = ZipArchive = binary()</v>
+ </type>
+ <desc>
+ <p>The <marker id="extract_2"></marker> <c>extract/2</c>
+ function parses an escript and extracts its sections. This is
+ the reverse of <c>create/2</c>.</p>
+
+ <p>All sections are returned even if they do not exist in the
+ escript. If a particular section happens to have the same
+ value as the default value, the extracted value is set to the
+ atom <c>default</c>. If a section is missing, the extracted
+ value is set to the atom <c>undefined</c>. </p>
+
+ <p>The <c>compile_source</c> option only affects the result if
+ the escript contains <c>source</c> code. In that case the
+ Erlang code is automatically compiled and <c>{source,
+ BeamCode}</c> is returned instead of <c>{source,
+ SourceCode}</c>.</p>
+
+ <pre>
+&gt; <input>escript:create("demo.escript",
+ [shebang, {archive, [{"demo.erl", SourceCode},
+ {"demo.beam", BeamCode}], []}]).</input>
+ok
+&gt; <input>{ok, [{shebang,default}, {comment,undefined}, {emu_args,undefined},
+ {archive, ArchiveBin}]} =
+ escript:extract("demo.escript", []).</input>
+{ok,[{{archive,&lt;&lt;80,75,3,4,20,0,0,0,8,0,118,7,98,60,105,
+ 152,61,93,107,0,0,0,118,0,...&gt;&gt;}
+ {emu_args,undefined}]}
+ </pre>
+ </desc>
+ </func>
+ <func>
+ <name>escript:script_name() -> File</name>
+ <fsummary>Returns the name of an escript</fsummary>
+ <type>
+ <v>File = filename()</v>
+ </type>
+ <desc>
+ <p>The <marker id="script_name_0"></marker>
+ <c>script_name/0</c> function returns the name of the escript
+ being executed. If the function is invoked outside the context
+ of an escript, the behavior is undefined.</p>
+ </desc>
+ </func>
</funcs>
<section>
diff --git a/erts/doc/src/match_spec.xml b/erts/doc/src/match_spec.xml
index b9f955e4db..f0390c9db8 100644
--- a/erts/doc/src/match_spec.xml
+++ b/erts/doc/src/match_spec.xml
@@ -60,7 +60,7 @@
<section>
<title>Grammar</title>
- <p>A match_spec can be described in this <em>informal</em> grammar:</p>
+ <p>A match_spec used in tracing can be described in this <em>informal</em> grammar:</p>
<list type="bulleted">
<item>MatchExpression ::= [ MatchFunction, ... ]
</item>
@@ -117,6 +117,52 @@
<c><![CDATA[display]]></c> | <c><![CDATA[caller]]></c> | <c><![CDATA[set_tcw]]></c> |
<c><![CDATA[silent]]></c></item>
</list>
+
+ <p>A match_spec used in ets can be described in this <em>informal</em> grammar:</p>
+ <list type="bulleted">
+ <item>MatchExpression ::= [ MatchFunction, ... ]
+ </item>
+ <item>MatchFunction ::= { MatchHead, MatchConditions, MatchBody }
+ </item>
+ <item>MatchHead ::= MatchVariable | <c><![CDATA['_']]></c> | { MatchHeadPart, ... }
+ </item>
+ <item>MatchHeadPart ::= term() | MatchVariable | <c><![CDATA['_']]></c></item>
+ <item>MatchVariable ::= '$&lt;number&gt;'
+ </item>
+ <item>MatchConditions ::= [ MatchCondition, ...] | <c><![CDATA[[]]]></c></item>
+ <item>MatchCondition ::= { GuardFunction } |
+ { GuardFunction, ConditionExpression, ... }
+ </item>
+ <item>BoolFunction ::= <c><![CDATA[is_atom]]></c> | <c><![CDATA[is_constant]]></c> |
+ <c><![CDATA[is_float]]></c> | <c><![CDATA[is_integer]]></c> | <c><![CDATA[is_list]]></c> |
+ <c><![CDATA[is_number]]></c> | <c><![CDATA[is_pid]]></c> | <c><![CDATA[is_port]]></c> |
+ <c><![CDATA[is_reference]]></c> | <c><![CDATA[is_tuple]]></c> | <c><![CDATA[is_binary]]></c> |
+ <c><![CDATA[is_function]]></c> | <c><![CDATA[is_record]]></c> | <c><![CDATA[is_seq_trace]]></c> |
+ <c><![CDATA['and']]></c> | <c><![CDATA['or']]></c> | <c><![CDATA['not']]></c> | <c><![CDATA['xor']]></c> |
+ <c><![CDATA[andalso]]></c> | <c><![CDATA[orelse]]></c></item>
+ <item>ConditionExpression ::= ExprMatchVariable | { GuardFunction } |
+ { GuardFunction, ConditionExpression, ... } | TermConstruct
+ </item>
+ <item>ExprMatchVariable ::= MatchVariable (bound in the MatchHead) |
+ <c><![CDATA['$_']]></c> | <c><![CDATA['$$']]></c></item>
+ <item>TermConstruct = {{}} | {{ ConditionExpression, ... }} |
+ <c><![CDATA[[]]]></c> | [ConditionExpression, ...] | NonCompositeTerm | Constant
+ </item>
+ <item>NonCompositeTerm ::= term() (not list or tuple)
+ </item>
+ <item>Constant ::= {<c><![CDATA[const]]></c>, term()}
+ </item>
+ <item>GuardFunction ::= BoolFunction | <c><![CDATA[abs]]></c> |
+ <c><![CDATA[element]]></c> | <c><![CDATA[hd]]></c> | <c><![CDATA[length]]></c> | <c><![CDATA[node]]></c> |
+ <c><![CDATA[round]]></c> | <c><![CDATA[size]]></c> | <c><![CDATA[tl]]></c> | <c><![CDATA[trunc]]></c> |
+ <c><![CDATA['+']]></c> | <c><![CDATA['-']]></c> | <c><![CDATA['*']]></c> | <c><![CDATA['div']]></c> |
+ <c><![CDATA['rem']]></c> | <c><![CDATA['band']]></c> | <c><![CDATA['bor']]></c> | <c><![CDATA['bxor']]></c> |
+ <c><![CDATA['bnot']]></c> | <c><![CDATA['bsl']]></c> | <c><![CDATA['bsr']]></c> | <c><![CDATA['>']]></c> |
+ <c><![CDATA['>=']]></c> | <c><![CDATA['<']]></c> | <c><![CDATA['=<']]></c> | <c><![CDATA['=:=']]></c> |
+ <c><![CDATA['==']]></c> | <c><![CDATA['=/=']]></c> | <c><![CDATA['/=']]></c> | <c><![CDATA[self]]></c> |
+ <c><![CDATA[get_tcw]]></c></item>
+ <item>MatchBody ::= [ ConditionExpression, ... ]</item>
+ </list>
</section>
<section>
@@ -453,8 +499,8 @@
<section>
<title>Differences between match specifications in ETS and tracing</title>
<p>ETS match specifications are there to produce a return
- value. Usually the expression contains one single
- <c><![CDATA[ActionTerm]]></c> which defines the return value without having
+ value. Usually the <c><![CDATA[MatchBody]]></c> contains one single
+ <c><![CDATA[ConditionExpression]]></c> which defines the return value without having
any side effects. Calls with side effects are not allowed in the
ETS context.</p>
<p>When tracing there is no return value to produce, the
@@ -530,7 +576,7 @@
the atom 'strider' and the tuple arity is 3 and return the whole
object.</p>
<code type="none"><![CDATA[
-[{{strider,'_'.'_'},
+[{{strider,'_','_'},
[],
['$_']}]
]]></code>
diff --git a/erts/doc/src/notes.xml b/erts/doc/src/notes.xml
index 824ad6d94e..6174917807 100644
--- a/erts/doc/src/notes.xml
+++ b/erts/doc/src/notes.xml
@@ -30,6 +30,496 @@
</header>
<p>This document describes the changes made to the ERTS application.</p>
+<section><title>Erts 5.8</title>
+
+ <section><title>Fixed Bugs and Malfunctions</title>
+ <list>
+ <item>
+ <p>
+ Driver threads, such as async threads, using <seealso
+ marker="erl_driver#ErlDrvPDL">port data locks</seealso>
+ peeked at the port status field without proper locking
+ when looking up the driver queue.</p>
+ <p>
+ Own Id: OTP-8475</p>
+ </item>
+ <item>
+ <p>
+ The use of <c>mmap()</c> was unnecessarily disabled when
+ cross compiling.</p>
+ <p>
+ The <c>configure</c> arguments <c>--with-ssl</c>, and
+ <c>--with-odbc</c> refused to accept libraries outside of
+ <c>$erl_xcomp_sysroot</c> when cross compiling for no
+ good reason.</p>
+ <p>
+ The <c>configure</c> argument <c>--with-odbc</c> didn't
+ handle the value <c>yes</c> correct.</p>
+ <p>
+ The <c>configure</c> arguments <c>--with-odbc</c>, and
+ <c>--without-odbc</c> have also been added to the
+ configure help.</p>
+ <p>
+ (Thanks to Steve Vinoski for reporting these issues)</p>
+ <p>
+ Own Id: OTP-8484</p>
+ </item>
+ <item>
+ <p>
+ A call to the BIF <c>unregister(RegName)</c> when a port
+ had the name <c>RegName</c> registered in the runtime
+ system without SMP support caused a runtime system crash.
+ (Thanks to Per Hedeland for the bugfix and test case.)</p>
+ <p>
+ Own Id: OTP-8487</p>
+ </item>
+ <item>
+ <p>
+ The runtime system crashed if fewer logical processors
+ were found than reported by <c>sysconf(
+ SC_NPROCESSORS_CONF)</c>.</p>
+ <p>
+ Own Id: OTP-8549</p>
+ </item>
+ <item>
+ <p>
+ Fix memory management bug causing crash of non-SMP
+ emulator with async threads enabled. The bug did first
+ appear in R13B03.</p>
+ <p>
+ Own Id: OTP-8591 Aux Id: seq11554 </p>
+ </item>
+ <item>
+ <p>
+ Port locks could be prematurely destroyed.</p>
+ <p>
+ Own Id: OTP-8612</p>
+ </item>
+ <item>
+ <p>The <c>empd</c> program could loop and consume 100%
+ CPU time if an unexpected error ocurred in
+ <c>listen()</c> or <c>accept()</c>. Now <c>epmd</c> will
+ terminate if a non-recoverable error occurs. (Thanks to
+ Michael Santos.)</p>
+ <p>
+ Own Id: OTP-8618</p>
+ </item>
+ <item>
+ <p>
+ When kernel poll has been enabled, a livelock could in
+ rare circumstances occur. Problem reported by Chetan
+ Ahuja, fix by Mikael Pettersson.</p>
+ <p>
+ Own Id: OTP-8632</p>
+ </item>
+ <item>
+ <p>
+ Windows: Closing port of program that stalled without
+ reading all data could deadlock scheduler thread.</p>
+ <p>
+ Own Id: OTP-8641</p>
+ </item>
+ <item>
+ <p>
+ On some combination of Montavista Linux on Cavium Octeon
+ processors, some socket-related system calls returned
+ other numbers than -1 for errors. This caused a core dump
+ in inet_drv.c. Now the code works around this problem.</p>
+ <p>
+ Own Id: OTP-8654</p>
+ </item>
+ <item>
+ <p>
+ Missing memory barriers in <c>erts_poll()</c> could cause
+ the runtime system to hang indefinitely.</p>
+ <p>
+ Own Id: OTP-8658</p>
+ </item>
+ <item>
+ <p>
+ <c>ethr_rwmutex_tryrlock()</c> acquired and refused to
+ acquire a lock with inverted logic. The lock was however
+ never acquired in a thread unsafe manner. (Thanks to JR
+ Zhang for noting this issue)</p>
+ <p>
+ Own Id: OTP-8662</p>
+ </item>
+ <item>
+ <p>
+ Extreme combinations of register/unregister in a highly
+ parallell SMP application could crash the VM. The error
+ is corrected.</p>
+ <p>
+ Own Id: OTP-8663</p>
+ </item>
+ <item>
+ <p>
+ On Windows, files are now opened with FILE_SHARE_DELETE
+ to get closer to Unix semantics.</p>
+ <p>
+ Own Id: OTP-8667</p>
+ </item>
+ <item>
+ <p>
+ <c>erlang:system_info(multi_scheduling)</c> sometimes
+ erroneously returned <c>enabled</c> when it should have
+ returned <c>blocked</c>.</p>
+ <p>
+ Own Id: OTP-8675</p>
+ </item>
+ <item>
+ <p>
+ Fix bug causing <c>erlang:decode_packet</c> and
+ <c>enif_make_string</c> to generate faulty strings with
+ negative character values for ascii values larger than
+ 127. (Thanks to Paul Guyot)</p>
+ <p>
+ Own Id: OTP-8685</p>
+ </item>
+ <item>
+ <p>
+ <c>open_port/2</c> with the <c>spawn</c> and
+ <c>spawn_executable</c> options can include an
+ <c>{env,Env}</c> option. In some cases unsetting
+ variables would not work on Unix (typically if more
+ variables were unset than were actually present in the
+ environment).</p>
+ <p>
+ Own Id: OTP-8701</p>
+ </item>
+ <item>
+ <p>
+ A user defined CPU topology set via a call to <seealso
+ marker="erlang#system_flag_cpu_topology">erlang:system_flag(cpu_topology,
+ CPUTopology)</seealso> was not properly verified, and
+ could in worst case cause an emulator crash. The emulator
+ crash could only occur when a user defined CPU topology
+ already existed and was redefined.</p>
+ <p>
+ Own Id: OTP-8710</p>
+ </item>
+ </list>
+ </section>
+
+
+ <section><title>Improvements and New Features</title>
+ <list>
+ <item>
+ <p>
+ The grammar for match specifications in ERTS users guide
+ only described the tracing dialect of match
+ specifications. An informal grammar for the ETS dialect
+ is added.</p>
+ <p>
+ Own Id: OTP-8086 Aux Id: seq11333 </p>
+ </item>
+ <item>
+ <p>
+ The module binary from EEP31 (and EEP9) is implemented.</p>
+ <p>
+ Own Id: OTP-8217</p>
+ </item>
+ <item>
+ <p>
+ New NIF API function <c>enif_make_new_binary</c></p>
+ <p>
+ Own Id: OTP-8474</p>
+ </item>
+ <item>
+ <p>
+ The guard BIF <c>is_boolean/1</c> (introduced in R10B)
+ has now been included in the lists of BIFs allowed in
+ guards in the Reference Manual.</p>
+ <p>
+ Own Id: OTP-8477</p>
+ </item>
+ <item>
+ <p>
+ Added function <c>zip:foldl/3</c> to iterate over zip
+ archives.</p>
+ <p>
+ Added functions to create and extract escripts. See
+ <c>escript:create/2</c> and <c>escript:extract/2</c>.</p>
+ <p>
+ The undocumented function <c>escript:foldl/3</c> has been
+ removed. The same functionality can be achieved with the
+ more flexible functions <c>escript:extract/2</c> and
+ <c>zip:foldl/3</c>.</p>
+ <p>
+ Record fields has been annotated with type info. Source
+ files as been adapted to fit within 80 chars and trailing
+ whitespace has been removed.</p>
+ <p>
+ Own Id: OTP-8521</p>
+ </item>
+ <item>
+ <p>A regular expression with many levels of parenthesis
+ could cause a buffer overflow. That has been corrected.
+ (Thanks to Michael Santos.)</p>
+ <p>
+ Own Id: OTP-8539</p>
+ </item>
+ <item>
+ <p>
+ <c>erlang:decode_packet(httph_bin,..)</c> could return
+ corrupt header strings or even crash the VM. This has
+ been fixed. It only happened on 32-bit VM if the header
+ name was unknown and between 16 and 20 characters long.
+ Sockets with simular <c>packet</c> option did not suffer
+ from this bug.</p>
+ <p>
+ Own Id: OTP-8548</p>
+ </item>
+ <item>
+ <p>New NIF features:</p> <list><item> Send messages from
+ a NIF, or from thread created by NIF, to any local
+ process (<c>enif_send</c>) </item><item> Store terms
+ between NIF calls (<c>enif_alloc_env</c>,
+ <c>enif_make_copy</c>) </item><item> Create binary terms
+ with user defined memory management
+ (<c>enif_make_resource_binary</c>) </item></list> <p>And
+ some incompatible changes made to the API. For more
+ information see the warning text in <seealso
+ marker="erl_nif">erl_nif(3)</seealso>.</p>
+ <p>
+ *** POTENTIAL INCOMPATIBILITY ***</p>
+ <p>
+ Own Id: OTP-8555</p>
+ </item>
+ <item>
+ <p>If the '<c>fop</c>' program (needed for building PDF
+ files) cannot not be found, it is now possible to build
+ the HTML and man pages anyway (there will also be dummy
+ PDF files with no real content created). (Thanks to
+ Tuncer Ayaz.)</p>
+ <p>
+ Own Id: OTP-8559</p>
+ </item>
+ <item>
+ <p>When defining macros the closing right parenthesis
+ before the dot is now mandatory.</p>
+ <p>
+ *** POTENTIAL INCOMPATIBILITY ***</p>
+ <p>
+ Own Id: OTP-8562</p>
+ </item>
+ <item>
+ <p>Local and imported functions now override the
+ auto-imported BIFs when the names clash. The pre R14
+ behaviour was that auto-imported BIFs would override
+ local functions. To avoid that old programs change
+ behaviour, the following will generate an error:</p>
+ <list><item><p>Doing a call without explicit module name
+ to a local function having a name clashing with the name
+ of an auto-imported BIF that was present (and
+ auto-imported) before OTP R14A</p></item>
+ <item><p>Explicitly importing a function having a name
+ clashing with the name of an autoimported BIF that was
+ present (and autoimported) before OTP R14A</p></item>
+ <item><p>Using any form of the old compiler directive
+ <c>nowarn_bif_clash</c></p></item> </list> <p>If the BIF
+ was added or auto-imported in OTP R14A or later,
+ overriding it with an import or a local function will
+ only result in a warning,</p> <p>To resolve clashes, you
+ can either use the explicit module name <c>erlang</c> to
+ call the BIF, or you can remove the auto-import of that
+ specific BIF by using the new compiler directive
+ <c>-compile({no_auto_import,[F/A]}).</c>, which makes all
+ calls to the local or imported function without explicit
+ module name pass without warnings or errors.</p> <p>The
+ change makes it possible to add auto-imported BIFs
+ without breaking or silently changing old code in the
+ future. However some current code ingeniously utilizing
+ the old behaviour or the <c>nowarn_bif_clash</c> compiler
+ directive, might need changing to be accepted by the
+ compiler.</p>
+ <p>
+ *** POTENTIAL INCOMPATIBILITY ***</p>
+ <p>
+ Own Id: OTP-8579</p>
+ </item>
+ <item>
+ <p>
+ A bug in re that could cause certain regular expression
+ matches never to terminate is corrected. (Thanks to
+ Michael Santos and Gordon Guthrie.)</p>
+ <p>
+ Own Id: OTP-8589</p>
+ </item>
+ <item>
+ <p>The erlang:open_port spawn and spawn_executable
+ directives can include an <c>{env,Env}</c> directive to
+ set up environment variables for the spawned process. A
+ bug prevented applications from using <c>{env,Env}</c> to
+ set an environment variable whose value ended with a
+ '<c>=</c>' (equal sign) character; the trailing equal
+ sign was mistaken as an indication that an environment
+ variable was to be cleared from the environment of the
+ spawned process. (Thanks to Steve Vinoski.)</p>
+ <p>
+ Own Id: OTP-8614</p>
+ </item>
+ <item>
+ <p><c>receive</c> statements that can only read out a
+ newly created reference are now specially optimized so
+ that it will execute in constant time regardless of the
+ number of messages in the receive queue for the process.
+ That optimization will benefit calls to
+ <c>gen_server:call()</c>. (See <c>gen:do_call/4</c> for
+ an example of a receive statement that will be
+ optimized.)</p>
+ <p>
+ Own Id: OTP-8623</p>
+ </item>
+ <item>
+ <p>
+ The functions file:advise/4 and file:datasync/1 have been
+ added. (Thanks to Filipe David Manana.)</p>
+ <p>
+ Own Id: OTP-8637</p>
+ </item>
+ <item>
+ <p>
+ New NIF API functions: <c>enif_make_atom_len</c>,
+ <c>enif_make_existing_atom_len</c>,
+ <c>enif_make_string_len</c>, <c>enif_get_atom_length</c>,
+ <c>enif_get_list_length</c>, <c>enif_is_list</c>,
+ <c>enif_is_tuple</c> (by Tuncer Ayaz)</p>
+ <p>
+ Own Id: OTP-8640</p>
+ </item>
+ <item>
+ <p>
+ Support for using gcc's built-in functions for atomic
+ memory access has been added. This functionallity will be
+ used if available and no other native atomic
+ implementation in ERTS is available.</p>
+ <p>
+ Own Id: OTP-8659</p>
+ </item>
+ <item>
+ <p>
+ The number of spinlocks used when implementing atomic
+ fall-backs when no native atomic implementation is
+ available has been increased from 16 to 1024.</p>
+ <p>
+ Own Id: OTP-8660</p>
+ </item>
+ <item>
+ <p>
+ Writer preferred pthread read/write locks has been
+ enabled on Linux.</p>
+ <p>
+ Own Id: OTP-8661</p>
+ </item>
+ <item>
+ <p>
+ The runtime system will by default bind schedulers to
+ logical processors using the <c>default_bind</c> bind
+ type if the amount of schedulers are at least equal to
+ the amount of logical processors configured, binding of
+ schedulers is supported, and a CPU topology is available
+ at startup. </p>
+ <p>
+ <em>NOTE:</em> If the Erlang runtime system is the only
+ operating system process that binds threads to logical
+ processors, this improves the performance of the runtime
+ system. However, if other operating system processes (as
+ for example another Erlang runtime system) also bind
+ threads to logical processors, there might be a
+ performance penalty instead. If this is the case you, are
+ are advised to unbind the schedulers using the <seealso
+ marker="erl#+sbt">+sbtu</seealso> command line argument,
+ or by invoking <seealso
+ marker="erlang#system_flag_scheduler_bind_type">erlang:system_flag(scheduler_bind_type,
+ unbound)</seealso>.</p>
+ <p>
+ Own Id: OTP-8666</p>
+ </item>
+ <item>
+ <p>
+ The recently added BIFs erlang:min/2, erlang:max/2 and
+ erlang:port_command/3 are now auto-imported (as they were
+ originally intended to be). Due to the recent compiler
+ change (OTP-8579), the only impact on old code defining
+ it's own min/2, max/2 or port_command/3 functions will be
+ a warning, the local functions will still be used. The
+ warning can be removed by using
+ -compile({no_auto_import,[min/2,max/2,port_command/3]}).
+ in the source file.</p>
+ <p>
+ *** POTENTIAL INCOMPATIBILITY ***</p>
+ <p>
+ Own Id: OTP-8669 Aux Id: OTP-8579 </p>
+ </item>
+ <item>
+ <p>
+ There is a new option 'exclusive' to file:open/2 that
+ uses the OS O_EXCL flag where supported to open the file
+ in exclusive mode.</p>
+ <p>
+ Own Id: OTP-8670</p>
+ </item>
+ <item>
+ <p>
+ Now, binary_to_term/2 is auto-imported. This will cause a
+ compile warning if and only if a module has got a local
+ function with that name.</p>
+ <p>
+ *** POTENTIAL INCOMPATIBILITY ***</p>
+ <p>
+ Own Id: OTP-8671</p>
+ </item>
+ <item>
+ <p>
+ Alignment of scheduler data and run queues were adjusted.</p>
+ <p>
+ Own Id: OTP-8673</p>
+ </item>
+ <item>
+ <p>Call time breakpoint tracing</p> <list><item>Introduce
+ a <c>call_time</c> option to
+ <c>erlang:trace_pattern/3</c>.This option enables call
+ time breakpoint tracing on code that is executed by
+ processes with call tracing enabled. Call time tracing
+ stores the number of calls and the time spent of each
+ function with this trace pattern enabled. The information
+ can be retrieved with <c>erlang:trace_info/2</c></item>
+ <item>Add a scheduler array for BpData. To solve the
+ issue of multiple schedulers constantly updating the head
+ pointer to the bp data wheel, each scheduler now has its
+ own entrypoint to the wheel. This head pointer can be
+ updated without a locking being taken.</item></list>
+ <p>Teach call count tracing to use atomics</p>
+ <list><item>Call count previously used a global lock for
+ accessing and writing its counter in the breakpoint. This
+ is now changed to atomics instead.</item> <item>The
+ change will let call count tracing and cprof to scale
+ better when increasing the number of
+ schedulers.</item></list>
+ <p>
+ Own Id: OTP-8677</p>
+ </item>
+ <item>
+ <p><c>eprof</c> has been reimplemented with support in
+ the Erlang virtual machine and is now both faster (i.e.
+ slows down the code being measured less) and scales much
+ better. In measurements we saw speed-ups compared to the
+ old eprof ranging from 6 times (for sequential code that
+ only uses one scheduler/core) up to 84 times (for
+ parallel code that uses 8 cores).</p>
+ <p>Note: The API for the <c>eprof</c> has been cleaned up
+ and extended. See the documentation.</p>
+ <p>
+ *** POTENTIAL INCOMPATIBILITY ***</p>
+ <p>
+ Own Id: OTP-8706</p>
+ </item>
+ </list>
+ </section>
+
+</section>
+
<section><title>Erts 5.7.5.1</title>
<section><title>Fixed Bugs and Malfunctions</title>
@@ -144,7 +634,7 @@
<item>
<p>A number of bugs concerning re and unicode are
corrected:</p>
- <p>re:compile no longer looses unicode option, which also
+ <p>re:compile no longer loses unicode option, which also
fixes bug in re:split.</p>
<p>re:replace now handles unicode charlist replacement
argument</p>
@@ -4716,7 +5206,7 @@
</item>
<item>
<p>The runtime system with SMP support did not slowly adjust
- it's view of time when the system time suddenly changed.</p>
+ its view of time when the system time suddenly changed.</p>
<p>Timeouts could sometimes timeout too early on the runtime
system with SMP support.</p>
<p>Own Id: OTP-6202</p>
diff --git a/erts/emulator/Makefile.in b/erts/emulator/Makefile.in
index a81ab28847..ed0d1b3fa6 100644
--- a/erts/emulator/Makefile.in
+++ b/erts/emulator/Makefile.in
@@ -61,11 +61,7 @@ ifeq ($(TYPE),quantify)
PURIFY = quantify $(QUANTIFY_BUILD_OPTIONS)
TYPEMARKER = .quantify
ENABLE_ALLOC_TYPE_VARS += quantify
-ifeq ($(findstring ose,$(TARGET)),ose)
- TYPE_FLAGS = @CFLAGS@ -DQUANTIFY
-else
TYPE_FLAGS = @CFLAGS@ -g -O2 -DQUANTIFY -DNO_JUMP_TABLE
-endif
else
ifeq ($(TYPE),purecov)
@@ -203,6 +199,14 @@ MKDIR = @MKDIR@
USING_MINGW=@MIXED_CYGWIN_MINGW@
+ifeq ($(TARGET),win32)
+LIB_PREFIX=
+LIB_SUFFIX=.lib
+else
+LIB_PREFIX=lib
+LIB_SUFFIX=.a
+endif
+
OMIT_OMIT_FP=no
ifeq (@EMU_LOCK_CHECKING@,yes)
@@ -283,16 +287,11 @@ endif
ifeq ($(TARGET),win32)
LIBS += -L$(ERL_TOP)/erts/emulator/pcre/obj/$(TARGET)/$(TYPE) -lepcre
-DEPLIBS += $(ERL_TOP)/erts/emulator/pcre/obj/$(TARGET)/$(TYPE)/epcre.lib
else
-LIBS += $(ERL_TOP)/erts/emulator/pcre/obj/$(TARGET)/$(TYPE)/libepcre.a
-DEPLIBS += \
- $(ERL_TOP)/erts/emulator/pcre/obj/$(TARGET)/$(TYPE)/libepcre.a \
- $(ERL_TOP)/erts/lib/internal/$(TARGET)/liberts_internal.a
-# rem liberts_internal.a
+LIBS += $(ERL_TOP)/erts/emulator/pcre/obj/$(TARGET)/$(TYPE)/$(LIB_PREFIX)epcre$(LIB_SUFFIX)
endif
-ELIB_FLAGS = -DENABLE_ELIB_MALLOC -DELIB_ALLOC_IS_CLIB -DELIB_HEAP_SBRK
+DEPLIBS += $(ERL_TOP)/erts/emulator/pcre/obj/$(TARGET)/$(TYPE)/$(LIB_PREFIX)epcre$(LIB_SUFFIX)
PERFCTR_PATH=@PERFCTR_PATH@
USE_PERFCTR=@USE_PERFCTR@
@@ -309,7 +308,19 @@ LIBSCTP = @LIBSCTP@
ORG_THR_LIBS=@EMU_THR_LIBS@
THR_LIB_NAME=@EMU_THR_LIB_NAME@
-THR_LIBS=$(subst -l$(THR_LIB_NAME),-l$(THR_LIB_NAME)$(TYPEMARKER),$(ORG_THR_LIBS))
+ifneq ($(strip $(THR_LIB_NAME)),)
+DEPLIBS += $(ERL_TOP)/erts/lib/internal/$(TARGET)/$(LIB_PREFIX)erts_internal_r$(TYPEMARKER)$(LIB_SUFFIX) \
+ $(ERL_TOP)/erts/lib/internal/$(TARGET)/$(LIB_PREFIX)ethread$(TYPEMARKER)$(LIB_SUFFIX)
+else
+DEPLIBS += $(ERL_TOP)/erts/lib/internal/$(TARGET)/$(LIB_PREFIX)erts_internal$(TYPEMARKER)$(LIB_SUFFIX)
+endif
+
+THR_LIBS=$(subst -l$(THR_LIB_NAME),-l$(THR_LIB_NAME)$(TYPEMARKER), \
+ $(subst -lerts_internal_r,-lerts_internal_r$(TYPEMARKER),$(ORG_THR_LIBS)))
+
+LIBS += $(THR_LIBS)
+
+ifneq ($(findstring erts_internal_r, $(THR_LIBS)),erts_internal_r)
ifeq ($(findstring vxworks,$(TARGET)),vxworks)
ERTS_INTERNAL_LIB=erts_internal
@@ -321,7 +332,9 @@ ERTS_INTERNAL_LIB=erts_internal
endif
endif
-LIBS += $(THR_LIBS) -l$(ERTS_INTERNAL_LIB)$(TYPEMARKER)
+LIBS += -l$(ERTS_INTERNAL_LIB)$(TYPEMARKER)
+
+endif # erts_internal_r
LIBS += @LIBRT@
@@ -345,7 +358,6 @@ ERLANG_OSTYPE = @ERLANG_OSTYPE@
ENABLE_ALLOC_TYPE_VARS += @ERLANG_OSTYPE@
-EMULATOR_EXECUTABLE_SAE = beam_evm$(TF_MARKER)
EMULATOR_EXECUTABLE_ELIB = beam.elib$(TF_MARKER)
ifeq ($(TARGET), win32)
EMULATOR_EXECUTABLE = beam$(TF_MARKER).dll
@@ -410,7 +422,7 @@ endif
@set -e ; cd zlib && $(MAKE) clean
@set -e ; cd pcre && $(MAKE) clean
-.PHONY: all sae zlib pcre clean
+.PHONY: all zlib pcre clean
docs:
@@ -423,12 +435,6 @@ RELEASE_INCLUDES = beam/erl_driver.h sys/$(ERLANG_OSTYPE)/driver_int.h beam/erl_
ifeq ($(TARGET),win32)
RELEASE_INCLUDES += sys/$(ERLANG_OSTYPE)/erl_win_dyn_driver.h
endif
-ifeq ($(findstring ose,$(TARGET)),ose)
-RELEASE_INCLUDES += sys/$(ERLANG_OSTYPE)/erl_port_signals.sig \
- sys/$(ERLANG_OSTYPE)/ose_erl_port_prog.h \
- drivers/$(ERLANG_OSTYPE)/ose_erl_driver.h
-
-endif
ifeq ($(TYPE)-@HAVE_VALGRIND@,valgrind-no)
release_spec:
@@ -452,8 +458,6 @@ release_spec: all
ifeq ($(ERLANG_OSTYPE), unix)
$(INSTALL_PROGRAM) $(BINDIR)/$(CS_EXECUTABLE) $(RELSYSDIR)/bin
endif
- $(INSTALL_DIR) $(RELEASE_PATH)/usr/include/obsolete
- $(INSTALL_DATA) obsolete/driver.h $(RELEASE_PATH)/usr/include/obsolete
endif
endif
@@ -480,8 +484,6 @@ endif
ifeq ($(findstring vxworks,$(TARGET)),vxworks)
else
-ifeq ($(findstring ose,$(TARGET)),ose)
-else
ifdef HIPE_ENABLED
GENERATE += $(TTF_DIR)/hipe_x86_asm.h \
$(TTF_DIR)/hipe_amd64_asm.h \
@@ -492,7 +494,6 @@ GENERATE += $(TTF_DIR)/hipe_x86_asm.h \
$(BINDIR)/hipe_mkliterals$(TF_MARKER)
endif
endif
-endif
ifeq ($(FLAVOR)-@ERTS_BUILD_SMP_EMU@,smp-no)
GENERATE=
@@ -592,7 +593,6 @@ ifeq ($(findstring vxworks,$(TARGET)),vxworks)
INCLUDES += -I$(ERL_TOP)/erts/etc/vxworks
endif
-ifneq ($(findstring ose,$(TARGET)),ose)
ifeq ($(TARGET),win32)
# Usually the same as the default rule, but certain platforms (i.e. win32) mix
# different compilers
@@ -618,33 +618,6 @@ endif
$(OBJDIR)/%.o: beam/%.c
$(CC) $(subst -O2, $(GEN_OPT_FLGS), $(CFLAGS)) $(INCLUDES) -c $< -o $@
-else
-
-INCLUDES += -Idrivers/ose
-
-ifeq ($(TYPE),debug)
-$(OBJDIR)/%.o: beam/%.c
- $(CC) $(CFLAGS) -DNO_JUMP_TABLE $(INCLUDES) -c $< -o $@
-else
-
-VXCC=@VXCC@
-VXCFLAGS=@VXCFLAGS@
-CFLAGS_NOOPT=@CFLAGS_NOOPT@ @DEFS@ $(WFLAGS) $(THR_DEFS) $(ARCHCFLAGS)
-
-# we want to use jump table
-$(OBJDIR)/beam_emu.o: beam/beam_emu.c
- $(VXCC) $(VXCFLAGS) $(INCLUDES) -c $< -o $@
-
-# erl_process does not work properly with DIAB's -XO option,
-# we'll compile it with gcc instead
-$(OBJDIR)/erl_process.o: beam/erl_process.c
- $(VXCC) $(VXCFLAGS) $(INCLUDES) -c $< -o $@
-
-$(OBJDIR)/%.o: beam/%.c
- $(CC) $(CFLAGS) $(INCLUDES) -c $< -o $@
-endif
-endif
-
$(OBJDIR)/%.o: $(TARGET)/%.c
$(CC) $(CFLAGS) $(INCLUDES) -Idrivers/common -c $< -o $@
@@ -663,15 +636,11 @@ $(OBJDIR)/%.o: drivers/common/%.c
$(OBJDIR)/%.o: drivers/$(ERLANG_OSTYPE)/%.c
$(CC) $(CFLAGS) $(INCLUDES) -Idrivers/common -Idrivers/$(ERLANG_OSTYPE) -I../etc/$(ERLANG_OSTYPE) -c $< -o $@
-# VxWorks and OSE uses unix drivers too...
+# VxWorks uses unix drivers too...
ifeq ($(findstring vxworks,$(TARGET)),vxworks)
$(OBJDIR)/%.o: drivers/unix/%.c
$(CC) $(CFLAGS) $(INCLUDES) -Idrivers/common -c $< -o $@
endif
-ifeq ($(findstring ose,$(TARGET)),ose)
-$(OBJDIR)/%.o: drivers/unix/%.c
- $(CC) $(CFLAGS) $(INCLUDES) -Idrivers/common -c $< -o $@
-endif
# ----------------------------------------------------------------------
# Specials
@@ -682,13 +651,6 @@ $(BINDIR)/$(CS_EXECUTABLE): $(CS_SRC)
$(CS_PURIFY) $(CC) $(CS_LDFLAGS) -o $(BINDIR)/$(CS_EXECUTABLE) \
$(CS_CFLAGS) $(COMMON_INCLUDES) $(CS_SRC) $(CS_LIBS)
-$(OBJDIR)/%.elib.o: beam/%.c
- $(CC) $(ELIB_FLAGS) $(subst -O2, $(GEN_OPT_FLGS), $(CFLAGS)) $(INCLUDES) -c $< -o $@
-
-# Disable vfork() for sae (then we don't need the child_setup program)
-$(OBJDIR)/sys_sae.o: sys/$(ERLANG_OSTYPE)/sys.c
- $(CC) -DDISABLE_VFORK=1 $(CFLAGS) $(INCLUDES) -c $< -o $@
-
$(OBJDIR)/%.kp.o: sys/common/%.c
$(CC) -DERTS_KERNEL_POLL_VERSION $(subst -O2, $(GEN_OPT_FLGS), $(CFLAGS)) $(INCLUDES) -c $< -o $@
@@ -697,9 +659,6 @@ $(OBJDIR)/%.nkp.o: sys/common/%.c
ifeq ($(GCC),yes)
-$(OBJDIR)/erl_obsolete.o: beam/erl_obsolete.c
- $(CC) $(subst -Wstrict-prototypes, , $(subst -O2, $(GEN_OPT_FLGS), $(CFLAGS))) $(INCLUDES) -c $< -o $@
-
$(OBJDIR)/erl_goodfit_alloc.o: beam/erl_goodfit_alloc.c
$(CC) $(subst -O2, $(GEN_OPT_FLGS) $(UNROLL_FLG), $(CFLAGS)) $(INCLUDES) -c $< -o $@
endif
@@ -734,8 +693,6 @@ endif
INIT_OBJS = $(OBJDIR)/erl_main.o $(PRELOAD)
-INIT_OBJS_SAE = $(OBJDIR)/erl9_start.o
-
EMU_OBJS = \
$(OBJDIR)/beam_emu.o $(OBJDIR)/beam_opcodes.o \
$(OBJDIR)/beam_load.o $(OBJDIR)/beam_bif_load.o \
@@ -776,11 +733,12 @@ RUN_OBJS = \
$(OBJDIR)/erl_fun.o $(OBJDIR)/erl_bif_port.o \
$(OBJDIR)/erl_term.o $(OBJDIR)/erl_node_tables.o \
$(OBJDIR)/erl_monitors.o $(OBJDIR)/erl_process_dump.o \
- $(OBJDIR)/erl_obsolete.o $(OBJDIR)/erl_bif_timer.o \
+ $(OBJDIR)/erl_bif_timer.o \
$(OBJDIR)/erl_drv_thread.o $(OBJDIR)/erl_bif_chksum.o \
$(OBJDIR)/erl_bif_re.o $(OBJDIR)/erl_unicode.o \
$(OBJDIR)/packet_parser.o $(OBJDIR)/safe_hash.o \
- $(OBJDIR)/erl_zlib.o $(OBJDIR)/erl_nif.o
+ $(OBJDIR)/erl_zlib.o $(OBJDIR)/erl_nif.o \
+ $(OBJDIR)/erl_bif_binary.o
ifeq ($(TARGET),win32)
DRV_OBJS = \
@@ -799,29 +757,21 @@ OS_OBJS = \
$(OBJDIR)/sys_time.o \
$(OBJDIR)/sys_interrupt.o \
$(OBJDIR)/sys_env.o \
- $(OBJDIR)/dosmap.o \
- $(OBJDIR)/elib_malloc.o
+ $(OBJDIR)/dosmap.o
else
OS_OBJS = \
$(OBJDIR)/sys.o \
$(OBJDIR)/driver_tab.o \
$(OBJDIR)/unix_efile.o \
$(OBJDIR)/gzio.o \
- $(OBJDIR)/elib_malloc.o \
$(OBJDIR)/elib_memmove.o
-ifeq ($(findstring ose,$(TARGET)),ose)
- OS_OBJS += $(OBJDIR)/erl_port_init.o \
- $(OBJDIR)/ose_inet_sock_select.o \
- $(OBJDIR)/ose_sfp.o
-else
ifeq ($(findstring vxworks,$(TARGET)),vxworks)
OS_OBJS += $(OBJDIR)/int64.o
else
OS_OBJS += $(OBJDIR)/sys_float.o \
$(OBJDIR)/sys_time.o
endif
-endif
DRV_OBJS = \
$(OBJDIR)/efile_drv.o \
$(OBJDIR)/inet_drv.o \
@@ -830,11 +780,7 @@ DRV_OBJS = \
endif
ifneq ($(findstring vxworks,$(TARGET)),vxworks)
- ifeq ($(findstring ose,$(TARGET)),ose)
- DRV_OBJS += $(OBJDIR)/ose_inet_drv.o
- else
DRV_OBJS += $(OBJDIR)/ttsl_drv.o
- endif
endif
ifeq ($(ERTS_ENABLE_KERNEL_POLL),yes)
@@ -880,17 +826,6 @@ endif
BASE_OBJS = $(RUN_OBJS) $(EMU_OBJS) $(OS_OBJS) $(EXTRA_BASE_OBJS)
OBJS = $(BASE_OBJS) $(DRV_OBJS)
-OBJS_SAE = $(subst sys.o,sys_sae.o,$(OBJS))
-
-ELIB_C_FILES = beam/elib_malloc.c \
- beam/elib_memmove.c \
- beam/erl_bif_info.c \
- beam/utils.c \
- beam/erl_alloc.c
-
-MOD_OBJS_ELIB = $(patsubst %.c,$(OBJDIR)/%.o,$(notdir $(ELIB_C_FILES)))
-OBJS_ELIB = $(patsubst %.o,%.elib.o,$(MOD_OBJS_ELIB)) \
- $(filter-out $(MOD_OBJS_ELIB),$(OBJS))
########################################
# HiPE section
@@ -968,33 +903,6 @@ $(TARGET)/int64.c:
endif
-ifeq ($(findstring ose,$(TARGET)),ose)
-# Extract soft float functions from libgcc.a (for beam_emu)
-VXCC=@VXCC@
-VXCFLAGS=@VXCFLAGS@
-VXLD=@VXLD@
-VXLDFLAGS=@VXLDFLAGS@
-VXCCLIBFLAGS=@VXCCLIBFLAGS@
-STRIP=@STRIP@
-SYMPREFIX=@SYMPREFIX@
-
-NEEDFUNCTIONS=__floatsidf __adddf3 __negdf2 __muldf3 __divdf3 __subdf3
-KEEPSYMS=$(NEEDFUNCTIONS:%=-K $(SYMPREFIX)%)
-
-$(OBJDIR)/ose_sfp.o: $(TARGET)/ose_sfp.c
- $(VXCC) $(VXCFLAGS) -o $(OBJDIR)/ose_sfp_tmp.o -c $(TARGET)/ose_sfp.c
- $(VXLD) -o $(OBJDIR)/ose_sfp.o $(OBJDIR)/ose_sfp_tmp.o $(VXLDFLAGS) $(VXCCLIBFLAGS)
- $(STRIP) $(KEEPSYMS) $(OBJDIR)/ose_sfp.o
-
-$(TARGET)/ose_sfp.c:
- echo 'void dummy(void); void dummy(void) {' > $(TARGET)/ose_sfp.c
- for x in $(NEEDFUNCTIONS); do echo 'extern void '$$x'();' \
- >> $(TARGET)/ose_sfp.c; done
- for x in $(NEEDFUNCTIONS); do echo $$x'();' >> $(TARGET)/ose_sfp.c; done
- echo '}' >> $(TARGET)/ose_sfp.c
-
-endif
-
# ----------------------------------------------------------------------
# The emulator itself
@@ -1010,14 +918,6 @@ $(BINDIR)/$(EMULATOR_EXECUTABLE): $(INIT_OBJS) $(OBJS) $(DEPLIBS)
$(PURIFY) $(LD) -o $(BINDIR)/$(EMULATOR_EXECUTABLE) \
$(HIPEBEAMLDFLAGS) $(LDFLAGS) $(DEXPORT) $(INIT_OBJS) $(OBJS) $(LIBS)
-$(BINDIR)/$(EMULATOR_EXECUTABLE_ELIB): $(INIT_OBJS) $(OBJS_ELIB) $(DEPLIBS)
- $(PURIFY) $(LD) -o $(BINDIR)/$(EMULATOR_EXECUTABLE_ELIB) \
- $(LDFLAGS) $(DEXPORT) $(INIT_OBJS) $(OBJS_ELIB) $(LIBS)
-
-$(BINDIR)/$(EMULATOR_EXECUTABLE_SAE): $(INIT_OBJS_SAE) $(OBJS_SAE) $(DEPLIBS)
- $(PURIFY) $(LD) -o $(BINDIR)/$(EMULATOR_EXECUTABLE_SAE) \
- $(LDFLAGS) $(DEXPORT) $(INIT_OBJS_SAE) $(OBJS_SAE) $(LIBS)
-
endif
#
@@ -1112,7 +1012,7 @@ depend:
$(DEP_CC) $(DEP_FLAGS) $(TARGET_SRC) \
| $(SED_DEPEND) >> $(TARGET)/depend.mk
ifneq ($(TARGET),win32)
- $(DEP_CC) $(DEP_FLAGS) $(ELIB_FLAGS) $(ELIB_C_FILES) \
+ $(DEP_CC) $(DEP_FLAGS) $(ELIB_C_FILES) \
| $(SED_ELIB_DEPEND) >> $(TARGET)/depend.mk
endif
ifdef HIPE_ENABLED
diff --git a/erts/emulator/beam/atom.c b/erts/emulator/beam/atom.c
index e2a79d6e4f..6b3c106a97 100644
--- a/erts/emulator/beam/atom.c
+++ b/erts/emulator/beam/atom.c
@@ -41,8 +41,7 @@ static erts_smp_rwmtx_t atom_table_lock;
#define atom_read_unlock() erts_smp_rwmtx_runlock(&atom_table_lock)
#define atom_write_lock() erts_smp_rwmtx_rwlock(&atom_table_lock)
#define atom_write_unlock() erts_smp_rwmtx_rwunlock(&atom_table_lock)
-#define atom_init_lock() erts_smp_rwmtx_init(&atom_table_lock, \
- "atom_tab")
+
#if 0
#define ERTS_ATOM_PUT_OPS_STAT
#endif
@@ -304,12 +303,17 @@ init_atom_table(void)
HashFunctions f;
int i;
Atom a;
+ erts_smp_rwmtx_opt_t rwmtx_opt = ERTS_SMP_THR_OPTS_DEFAULT_INITER;
+
+ rwmtx_opt.type = ERTS_SMP_RWMTX_TYPE_FREQUENT_READ;
+ rwmtx_opt.lived = ERTS_SMP_RWMTX_LONG_LIVED;
#ifdef ERTS_ATOM_PUT_OPS_STAT
erts_smp_atomic_init(&atom_put_ops, 0);
#endif
- atom_init_lock();
+ erts_smp_rwmtx_init_opt(&atom_table_lock, &rwmtx_opt, "atom_tab");
+
f.hash = (H_FUN) atom_hash;
f.cmp = (HCMP_FUN) atom_cmp;
f.alloc = (HALLOC_FUN) atom_alloc;
diff --git a/erts/emulator/beam/atom.names b/erts/emulator/beam/atom.names
index 57c8b08223..0815cdbc7f 100644
--- a/erts/emulator/beam/atom.names
+++ b/erts/emulator/beam/atom.names
@@ -65,6 +65,7 @@ atom EXIT='EXIT'
atom aborted
atom abs_path
atom absoluteURI
+atom ac
atom active
atom all
atom all_but_first
@@ -100,8 +101,15 @@ atom band
atom big
atom bif_return_trap
atom binary
+atom binary_bin_to_list_trap
+atom binary_copy_trap
+atom binary_longest_prefix_trap
+atom binary_longest_suffix_trap
+atom binary_match_trap
+atom binary_matches_trap
atom block
atom blocked
+atom bm
atom bnot
atom bor
atom bxor
@@ -115,6 +123,7 @@ atom busy_dist_port
atom busy_port
atom call
atom call_count
+atom call_time
atom caller
atom capture
atom case_clause
@@ -256,6 +265,7 @@ atom info
atom info_msg
atom initial_call
atom input
+atom internal
atom internal_error
atom internal_status
atom instruction_counts
@@ -422,6 +432,7 @@ atom raw
atom re
atom re_pattern
atom re_run_trap
+atom read_concurrency
atom ready_input
atom ready_output
atom ready_async
@@ -453,6 +464,7 @@ atom scheduler
atom scheduler_id
atom schedulers_online
atom scheme
+atom scope
atom sensitive
atom sequential_tracer
atom sequential_trace_token
diff --git a/erts/emulator/beam/beam_bif_load.c b/erts/emulator/beam/beam_bif_load.c
index b1feec7074..4fc271d41c 100644
--- a/erts/emulator/beam/beam_bif_load.c
+++ b/erts/emulator/beam/beam_bif_load.c
@@ -39,10 +39,10 @@ static Eterm check_process_code(Process* rp, Module* modp);
static void delete_code(Process *c_p, ErtsProcLocks c_p_locks, Module* modp);
static void delete_export_references(Eterm module);
static int purge_module(int module);
-static int is_native(Eterm* code);
+static int is_native(BeamInstr* code);
static int any_heap_ref_ptrs(Eterm* start, Eterm* end, char* mod_start, Uint mod_size);
static int any_heap_refs(Eterm* start, Eterm* end, char* mod_start, Uint mod_size);
-static void remove_from_address_table(Eterm* code);
+static void remove_from_address_table(BeamInstr* code);
Eterm
load_module_2(BIF_ALIST_2)
@@ -344,8 +344,8 @@ BIF_RETTYPE finish_after_on_load_2(BIF_ALIST_2)
modp->code[MI_ON_LOAD_FUNCTION_PTR] = 0;
set_default_trace_pattern(BIF_ARG_1);
} else if (BIF_ARG_2 == am_false) {
- Eterm* code;
- Eterm* end;
+ BeamInstr* code;
+ BeamInstr* end;
/*
* The on_load function failed. Remove the loaded code.
@@ -354,7 +354,7 @@ BIF_RETTYPE finish_after_on_load_2(BIF_ALIST_2)
*/
erts_total_code_size -= modp->code_length;
code = modp->code;
- end = (Eterm *)((char *)code + modp->code_length);
+ end = (BeamInstr *)((char *)code + modp->code_length);
erts_cleanup_funs_on_purge(code, end);
beam_catches_delmod(modp->catches, code, modp->code_length);
erts_free(ERTS_ALC_T_CODE, (void *) code);
@@ -368,7 +368,6 @@ BIF_RETTYPE finish_after_on_load_2(BIF_ALIST_2)
BIF_RET(am_true);
}
-
static void
set_default_trace_pattern(Eterm module)
{
@@ -397,13 +396,13 @@ set_default_trace_pattern(Eterm module)
static Eterm
check_process_code(Process* rp, Module* modp)
{
- Eterm* start;
+ BeamInstr* start;
char* mod_start;
Uint mod_size;
- Eterm* end;
+ BeamInstr* end;
Eterm* sp;
#ifndef HYBRID /* FIND ME! */
- ErlFunThing* funp;
+ struct erl_off_heap_header* oh;
int done_gc = 0;
#endif
@@ -418,7 +417,7 @@ check_process_code(Process* rp, Module* modp)
* Pick up limits for the module.
*/
start = modp->old_code;
- end = (Eterm *)((char *)start + modp->old_code_length);
+ end = (BeamInstr *)((char *)start + modp->old_code_length);
mod_start = (char *) start;
mod_size = modp->old_code_length;
@@ -471,27 +470,30 @@ check_process_code(Process* rp, Module* modp)
#ifndef HYBRID /* FIND ME! */
rescan:
- for (funp = MSO(rp).funs; funp; funp = funp->next) {
- Eterm* fun_code;
-
- fun_code = funp->fe->address;
-
- if (INSIDE((Eterm *) funp->fe->address)) {
- if (done_gc) {
- return am_true;
- } else {
- /*
- * Try to get rid of this fun by garbage collecting.
- * Clear both fvalue and ftrace to make sure they
- * don't hold any funs.
- */
- rp->freason = EXC_NULL;
- rp->fvalue = NIL;
- rp->ftrace = NIL;
- done_gc = 1;
- FLAGS(rp) |= F_NEED_FULLSWEEP;
- (void) erts_garbage_collect(rp, 0, rp->arg_reg, rp->arity);
- goto rescan;
+ for (oh = MSO(rp).first; oh; oh = oh->next) {
+ if (thing_subtag(oh->thing_word) == FUN_SUBTAG) {
+ ErlFunThing* funp = (ErlFunThing*) oh;
+ BeamInstr* fun_code;
+
+ fun_code = funp->fe->address;
+
+ if (INSIDE((BeamInstr *) funp->fe->address)) {
+ if (done_gc) {
+ return am_true;
+ } else {
+ /*
+ * Try to get rid of this fun by garbage collecting.
+ * Clear both fvalue and ftrace to make sure they
+ * don't hold any funs.
+ */
+ rp->freason = EXC_NULL;
+ rp->fvalue = NIL;
+ rp->ftrace = NIL;
+ done_gc = 1;
+ FLAGS(rp) |= F_NEED_FULLSWEEP;
+ (void) erts_garbage_collect(rp, 0, rp->arg_reg, rp->arity);
+ goto rescan;
+ }
}
}
}
@@ -576,7 +578,7 @@ any_heap_ref_ptrs(Eterm* start, Eterm* end, char* mod_start, Uint mod_size)
switch (primary_tag(val)) {
case TAG_PRIMARY_BOXED:
case TAG_PRIMARY_LIST:
- if (in_area(val, mod_start, mod_size)) {
+ if (in_area(EXPAND_POINTER(val), mod_start, mod_size)) {
return 1;
}
break;
@@ -596,7 +598,7 @@ any_heap_refs(Eterm* start, Eterm* end, char* mod_start, Uint mod_size)
switch (primary_tag(val)) {
case TAG_PRIMARY_BOXED:
case TAG_PRIMARY_LIST:
- if (in_area(val, mod_start, mod_size)) {
+ if (in_area(EXPAND_POINTER(val), mod_start, mod_size)) {
return 1;
}
break;
@@ -617,8 +619,8 @@ any_heap_refs(Eterm* start, Eterm* end, char* mod_start, Uint mod_size)
static int
purge_module(int module)
{
- Eterm* code;
- Eterm* end;
+ BeamInstr* code;
+ BeamInstr* end;
Module* modp;
/*
@@ -653,7 +655,7 @@ purge_module(int module)
ASSERT(erts_total_code_size >= modp->old_code_length);
erts_total_code_size -= modp->old_code_length;
code = modp->old_code;
- end = (Eterm *)((char *)code + modp->old_code_length);
+ end = (BeamInstr *)((char *)code + modp->old_code_length);
erts_cleanup_funs_on_purge(code, end);
beam_catches_delmod(modp->old_catches, code, modp->old_code_length);
erts_free(ERTS_ALC_T_CODE, (void *) code);
@@ -665,7 +667,7 @@ purge_module(int module)
}
static void
-remove_from_address_table(Eterm* code)
+remove_from_address_table(BeamInstr* code)
{
int i;
@@ -738,11 +740,11 @@ delete_export_references(Eterm module)
Export *ep = export_list(i);
if (ep != NULL && (ep->code[0] == module)) {
if (ep->address == ep->code+3 &&
- (ep->code[3] == (Eterm) em_apply_bif)) {
+ (ep->code[3] == (BeamInstr) em_apply_bif)) {
continue;
}
ep->address = ep->code+3;
- ep->code[3] = (Uint) em_call_error_handler;
+ ep->code[3] = (BeamInstr) em_call_error_handler;
ep->code[4] = 0;
MatchSetUnref(ep->match_prog_set);
ep->match_prog_set = NULL;
@@ -774,7 +776,7 @@ beam_make_current_old(Process *c_p, ErtsProcLocks c_p_locks, Eterm module)
}
static int
-is_native(Eterm* code)
+is_native(BeamInstr* code)
{
return ((Eterm *)code[MI_FUNCTIONS])[1] != 0;
}
diff --git a/erts/emulator/beam/beam_bp.c b/erts/emulator/beam/beam_bp.c
index 1abf1dc10c..682f31b83f 100644
--- a/erts/emulator/beam/beam_bp.c
+++ b/erts/emulator/beam/beam_bp.c
@@ -1,19 +1,19 @@
/*
* %CopyrightBegin%
- *
- * Copyright Ericsson AB 2000-2009. All Rights Reserved.
- *
+ *
+ * Copyright Ericsson AB 2000-2010. All Rights Reserved.
+ *
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
* compliance with the License. You should have received a copy of the
* Erlang Public License along with this software. If not, it can be
* retrieved online at http://www.erlang.org/.
- *
+ *
* Software distributed under the License is distributed on an "AS IS"
* basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
* the License for the specific language governing rights and limitations
* under the License.
- *
+ *
* %CopyrightEnd%
*/
@@ -30,6 +30,7 @@
#include "error.h"
#include "erl_binary.h"
#include "beam_bp.h"
+#include "erl_term.h"
/* *************************************************************************
** Macros
@@ -100,6 +101,11 @@ do { \
(b)->prev = (a); \
} while (0)
+
+#define BREAK_IS_BIF (1)
+#define BREAK_IS_ERL (0)
+
+
/* *************************************************************************
** Local prototypes
*/
@@ -109,24 +115,42 @@ do { \
*/
static int set_break(Eterm mfa[3], int specified,
- Binary *match_spec, Uint break_op,
+ Binary *match_spec, BeamInstr break_op,
enum erts_break_op count_op, Eterm tracer_pid);
static int set_module_break(Module *modp, Eterm mfa[3], int specified,
- Binary *match_spec, Uint break_op,
+ Binary *match_spec, BeamInstr break_op,
enum erts_break_op count_op, Eterm tracer_pid);
-static int set_function_break(Module *modp, Uint *pc,
- Binary *match_spec, Uint break_op,
+static int set_function_break(Module *modp, BeamInstr *pc, int bif,
+ Binary *match_spec, BeamInstr break_op,
enum erts_break_op count_op, Eterm tracer_pid);
static int clear_break(Eterm mfa[3], int specified,
- Uint break_op);
+ BeamInstr break_op);
static int clear_module_break(Module *modp, Eterm mfa[3], int specified,
- Uint break_op);
-static int clear_function_break(Module *modp, Uint *pc,
- Uint break_op);
-
-static BpData *is_break(Uint *pc, Uint break_op);
-
+ BeamInstr break_op);
+static int clear_function_break(Module *modp, BeamInstr *pc, int bif,
+ BeamInstr break_op);
+
+static BpData *is_break(BeamInstr *pc, BeamInstr break_op);
+static BpData *get_break(Process *p, BeamInstr *pc, BeamInstr break_op);
+
+/* bp_hash */
+#define BP_TIME_ADD(pi0, pi1) \
+ do { \
+ Uint r; \
+ (pi0)->count += (pi1)->count; \
+ (pi0)->s_time += (pi1)->s_time; \
+ (pi0)->us_time += (pi1)->us_time; \
+ r = (pi0)->us_time / 1000000; \
+ (pi0)->s_time += r; \
+ (pi0)->us_time = (pi0)->us_time % 1000000; \
+ } while(0)
+
+static void bp_hash_init(bp_time_hash_t *hash, Uint n);
+static void bp_hash_rehash(bp_time_hash_t *hash, Uint n);
+static ERTS_INLINE bp_data_time_item_t * bp_hash_get(bp_time_hash_t *hash, bp_data_time_item_t *sitem);
+static ERTS_INLINE bp_data_time_item_t * bp_hash_put(bp_time_hash_t *hash, bp_data_time_item_t *sitem);
+static void bp_hash_delete(bp_time_hash_t *hash);
/* *************************************************************************
@@ -145,7 +169,7 @@ erts_set_trace_break(Eterm mfa[3], int specified, Binary *match_spec,
Eterm tracer_pid) {
ERTS_SMP_LC_ASSERT(erts_smp_is_system_blocked(0));
return set_break(mfa, specified, match_spec,
- (Uint) BeamOp(op_i_trace_breakpoint), 0, tracer_pid);
+ (BeamInstr) BeamOp(op_i_trace_breakpoint), 0, tracer_pid);
}
int
@@ -153,87 +177,84 @@ erts_set_mtrace_break(Eterm mfa[3], int specified, Binary *match_spec,
Eterm tracer_pid) {
ERTS_SMP_LC_ASSERT(erts_smp_is_system_blocked(0));
return set_break(mfa, specified, match_spec,
- (Uint) BeamOp(op_i_mtrace_breakpoint), 0, tracer_pid);
+ (BeamInstr) BeamOp(op_i_mtrace_breakpoint), 0, tracer_pid);
}
+/* set breakpoint data for on exported bif entry */
+
void
-erts_set_mtrace_bif(Uint *pc, Binary *match_spec, Eterm tracer_pid) {
- BpDataTrace *bdt;
+erts_set_mtrace_bif(BeamInstr *pc, Binary *match_spec, Eterm tracer_pid) {
ERTS_SMP_LC_ASSERT(erts_smp_is_system_blocked(0));
+ set_function_break(NULL, pc, BREAK_IS_BIF, match_spec, (BeamInstr) BeamOp(op_i_mtrace_breakpoint), 0, tracer_pid);
+}
- bdt = (BpDataTrace *) pc[-4];
- if (bdt) {
- MatchSetUnref(bdt->match_spec);
- MatchSetRef(match_spec);
- bdt->match_spec = match_spec;
- bdt->tracer_pid = tracer_pid;
- } else {
- bdt = Alloc(sizeof(BpDataTrace));
- BpInit((BpData *) bdt, 0);
- MatchSetRef(match_spec);
- bdt->match_spec = match_spec;
- bdt->tracer_pid = tracer_pid;
- pc[-4] = (Uint) bdt;
- }
+void erts_set_time_trace_bif(BeamInstr *pc, enum erts_break_op count_op) {
+ set_function_break(NULL, pc, BREAK_IS_BIF, NULL, (BeamInstr) BeamOp(op_i_time_breakpoint), count_op, NIL);
+}
+
+void erts_clear_time_trace_bif(BeamInstr *pc) {
+ clear_function_break(NULL, pc, BREAK_IS_BIF, (BeamInstr) BeamOp(op_i_time_breakpoint));
}
int
erts_set_debug_break(Eterm mfa[3], int specified) {
ERTS_SMP_LC_ASSERT(erts_smp_is_system_blocked(0));
return set_break(mfa, specified, NULL,
- (Uint) BeamOp(op_i_debug_breakpoint), 0, NIL);
+ (BeamInstr) BeamOp(op_i_debug_breakpoint), 0, NIL);
}
int
erts_set_count_break(Eterm mfa[3], int specified, enum erts_break_op count_op) {
ERTS_SMP_LC_ASSERT(erts_smp_is_system_blocked(0));
return set_break(mfa, specified, NULL,
- (Uint) BeamOp(op_i_count_breakpoint), count_op, NIL);
+ (BeamInstr) BeamOp(op_i_count_breakpoint), count_op, NIL);
}
-
+int
+erts_set_time_break(Eterm mfa[3], int specified, enum erts_break_op count_op) {
+ ERTS_SMP_LC_ASSERT(erts_smp_is_system_blocked(0));
+ return set_break(mfa, specified, NULL,
+ (BeamInstr) BeamOp(op_i_time_breakpoint), count_op, NIL);
+}
int
erts_clear_trace_break(Eterm mfa[3], int specified) {
ERTS_SMP_LC_ASSERT(erts_smp_is_system_blocked(0));
return clear_break(mfa, specified,
- (Uint) BeamOp(op_i_trace_breakpoint));
+ (BeamInstr) BeamOp(op_i_trace_breakpoint));
}
int
erts_clear_mtrace_break(Eterm mfa[3], int specified) {
ERTS_SMP_LC_ASSERT(erts_smp_is_system_blocked(0));
return clear_break(mfa, specified,
- (Uint) BeamOp(op_i_mtrace_breakpoint));
+ (BeamInstr) BeamOp(op_i_mtrace_breakpoint));
}
void
-erts_clear_mtrace_bif(Uint *pc) {
- BpDataTrace *bdt;
- ERTS_SMP_LC_ASSERT(erts_smp_is_system_blocked(0));
-
- bdt = (BpDataTrace *) pc[-4];
- if (bdt) {
- if (bdt->match_spec) {
- MatchSetUnref(bdt->match_spec);
- }
- Free(bdt);
- }
- pc[-4] = (Uint) NULL;
+erts_clear_mtrace_bif(BeamInstr *pc) {
+ clear_function_break(NULL, pc, BREAK_IS_BIF, (BeamInstr) BeamOp(op_i_mtrace_breakpoint));
}
int
erts_clear_debug_break(Eterm mfa[3], int specified) {
ERTS_SMP_LC_ASSERT(erts_smp_is_system_blocked(0));
return clear_break(mfa, specified,
- (Uint) BeamOp(op_i_debug_breakpoint));
+ (BeamInstr) BeamOp(op_i_debug_breakpoint));
}
int
erts_clear_count_break(Eterm mfa[3], int specified) {
ERTS_SMP_LC_ASSERT(erts_smp_is_system_blocked(0));
return clear_break(mfa, specified,
- (Uint) BeamOp(op_i_count_breakpoint));
+ (BeamInstr) BeamOp(op_i_count_breakpoint));
+}
+
+int
+erts_clear_time_break(Eterm mfa[3], int specified) {
+ ERTS_SMP_LC_ASSERT(erts_smp_is_system_blocked(0));
+ return clear_break(mfa, specified,
+ (BeamInstr) BeamOp(op_i_time_breakpoint));
}
int
@@ -250,10 +271,10 @@ erts_clear_module_break(Module *modp) {
}
int
-erts_clear_function_break(Module *modp, Uint *pc) {
+erts_clear_function_break(Module *modp, BeamInstr *pc) {
ERTS_SMP_LC_ASSERT(erts_smp_is_system_blocked(0));
ASSERT(modp);
- return clear_function_break(modp, pc, 0);
+ return clear_function_break(modp, pc, BREAK_IS_ERL, 0);
}
@@ -261,13 +282,16 @@ erts_clear_function_break(Module *modp, Uint *pc) {
/*
* SMP NOTE: Process p may have become exiting on return!
*/
-Uint
-erts_trace_break(Process *p, Uint *pc, Eterm *args,
+BeamInstr
+erts_trace_break(Process *p, BeamInstr *pc, Eterm *args,
Uint32 *ret_flags, Eterm *tracer_pid) {
Eterm tpid1, tpid2;
- BpDataTrace *bdt = (BpDataTrace *) pc[-4];
-
- ASSERT(pc[-5] == (Uint) BeamOp(op_i_func_info_IaaI));
+ BpData **bds = (BpData **) (pc)[-4];
+ BpDataTrace *bdt = NULL;
+
+ ASSERT(bds);
+ ASSERT(pc[-5] == (BeamInstr) BeamOp(op_i_func_info_IaaI));
+ bdt = (BpDataTrace *) bds[bp_sched2ix_proc(p)];
ASSERT(bdt);
bdt = (BpDataTrace *) bdt->next;
ASSERT(bdt);
@@ -286,7 +310,7 @@ erts_trace_break(Process *p, Uint *pc, Eterm *args,
bdt->tracer_pid = tpid2;
ErtsSmpBPUnlock(bdt);
}
- pc[-4] = (Uint) bdt;
+ bds[bp_sched2ix_proc(p)] = (BpData *) bdt;
return bdt->orig_instr;
}
@@ -296,14 +320,17 @@ erts_trace_break(Process *p, Uint *pc, Eterm *args,
* SMP NOTE: Process p may have become exiting on return!
*/
Uint32
-erts_bif_mtrace(Process *p, Uint *pc, Eterm *args, int local,
+erts_bif_mtrace(Process *p, BeamInstr *pc, Eterm *args, int local,
Eterm *tracer_pid) {
- BpDataTrace *bdt = (BpDataTrace *) pc[-4];
-
+ BpData **bds = (BpData **) (pc)[-4];
+ BpDataTrace *bdt = NULL;
+
+
ASSERT(tracer_pid);
- if (bdt) {
+ if (bds) {
Eterm tpid1, tpid2;
Uint32 flags;
+ bdt = (BpDataTrace *)bds[bp_sched2ix_proc(p)];
ErtsSmpBPLock(bdt);
tpid1 = tpid2 = bdt->tracer_pid;
@@ -326,9 +353,9 @@ erts_bif_mtrace(Process *p, Uint *pc, Eterm *args, int local,
int
-erts_is_trace_break(Uint *pc, Binary **match_spec_ret, Eterm *tracer_pid_ret) {
+erts_is_trace_break(BeamInstr *pc, Binary **match_spec_ret, Eterm *tracer_pid_ret) {
BpDataTrace *bdt =
- (BpDataTrace *) is_break(pc, (Uint) BeamOp(op_i_trace_breakpoint));
+ (BpDataTrace *) is_break(pc, (BeamInstr) BeamOp(op_i_trace_breakpoint));
if (bdt) {
if (match_spec_ret) {
@@ -345,9 +372,9 @@ erts_is_trace_break(Uint *pc, Binary **match_spec_ret, Eterm *tracer_pid_ret) {
}
int
-erts_is_mtrace_break(Uint *pc, Binary **match_spec_ret, Eterm *tracer_pid_ret) {
+erts_is_mtrace_break(BeamInstr *pc, Binary **match_spec_ret, Eterm *tracer_pid_ret) {
BpDataTrace *bdt =
- (BpDataTrace *) is_break(pc, (Uint) BeamOp(op_i_mtrace_breakpoint));
+ (BpDataTrace *) is_break(pc, (BeamInstr) BeamOp(op_i_mtrace_breakpoint));
if (bdt) {
if (match_spec_ret) {
@@ -364,74 +391,455 @@ erts_is_mtrace_break(Uint *pc, Binary **match_spec_ret, Eterm *tracer_pid_ret) {
}
int
-erts_is_mtrace_bif(Uint *pc, Binary **match_spec_ret, Eterm *tracer_pid_ret) {
- BpDataTrace *bdt = (BpDataTrace *) pc[-4];
-
- if (bdt) {
- if (match_spec_ret) {
- *match_spec_ret = bdt->match_spec;
- }
- if (tracer_pid_ret) {
- ErtsSmpBPLock(bdt);
- *tracer_pid_ret = bdt->tracer_pid;
- ErtsSmpBPUnlock(bdt);
- }
- return !0;
- }
- return 0;
-}
-
-int
-erts_is_native_break(Uint *pc) {
+erts_is_native_break(BeamInstr *pc) {
#ifdef HIPE
- ASSERT(pc[-5] == (Uint) BeamOp(op_i_func_info_IaaI));
- return pc[0] == (Uint) BeamOp(op_hipe_trap_call)
- || pc[0] == (Uint) BeamOp(op_hipe_trap_call_closure);
+ ASSERT(pc[-5] == (BeamInstr) BeamOp(op_i_func_info_IaaI));
+ return pc[0] == (BeamInstr) BeamOp(op_hipe_trap_call)
+ || pc[0] == (BeamInstr) BeamOp(op_hipe_trap_call_closure);
#else
return 0;
#endif
}
int
-erts_is_count_break(Uint *pc, Sint *count_ret) {
+erts_is_count_break(BeamInstr *pc, Sint *count_ret) {
BpDataCount *bdc =
- (BpDataCount *) is_break(pc, (Uint) BeamOp(op_i_count_breakpoint));
+ (BpDataCount *) is_break(pc, (BeamInstr) BeamOp(op_i_count_breakpoint));
if (bdc) {
if (count_ret) {
- ErtsSmpBPLock(bdc);
- *count_ret = bdc->count;
- ErtsSmpBPUnlock(bdc);
+ *count_ret = (Sint) erts_smp_atomic_read(&bdc->acount);
}
return !0;
}
return 0;
}
-Uint *
+int erts_is_time_break(Process *p, BeamInstr *pc, Eterm *retval) {
+ Uint i, ix;
+ bp_time_hash_t hash;
+ Uint size;
+ Eterm *hp, t;
+ bp_data_time_item_t *item = NULL;
+ BpDataTime *bdt = (BpDataTime *) is_break(pc, (BeamInstr) BeamOp(op_i_time_breakpoint));
+
+ if (bdt) {
+ if (retval) {
+ /* collect all hashes to one hash */
+ bp_hash_init(&hash, 64);
+ /* foreach threadspecific hash */
+ for (i = 0; i < bdt->n; i++) {
+ bp_data_time_item_t *sitem;
+
+ /* foreach hash bucket not NIL*/
+ for(ix = 0; ix < bdt->hash[i].n; ix++) {
+ item = &(bdt->hash[i].item[ix]);
+ if (item->pid != NIL) {
+ sitem = bp_hash_get(&hash, item);
+ if (sitem) {
+ BP_TIME_ADD(sitem, item);
+ } else {
+ bp_hash_put(&hash, item);
+ }
+ }
+ }
+ }
+ /* *retval should be NIL or term from previous bif in export entry */
+
+ if (hash.used > 0) {
+ size = (5 + 2)*hash.used;
+ hp = HAlloc(p, size);
+
+ for(ix = 0; ix < hash.n; ix++) {
+ item = &(hash.item[ix]);
+ if (item->pid != NIL) {
+ t = TUPLE4(hp, item->pid,
+ make_small(item->count),
+ make_small(item->s_time),
+ make_small(item->us_time));
+ hp += 5;
+ *retval = CONS(hp, t, *retval); hp += 2;
+ }
+ }
+ }
+ bp_hash_delete(&hash);
+ }
+ return !0;
+ }
+
+ return 0;
+}
+
+
+BeamInstr *
erts_find_local_func(Eterm mfa[3]) {
Module *modp;
- Uint** code_base;
- Uint* code_ptr;
+ BeamInstr** code_base;
+ BeamInstr* code_ptr;
Uint i,n;
if ((modp = erts_get_module(mfa[0])) == NULL)
return NULL;
- if ((code_base = (Uint **) modp->code) == NULL)
+ if ((code_base = (BeamInstr **) modp->code) == NULL)
return NULL;
- n = (Uint) code_base[MI_NUM_FUNCTIONS];
+ n = (BeamInstr) code_base[MI_NUM_FUNCTIONS];
for (i = 0; i < n; ++i) {
code_ptr = code_base[MI_FUNCTIONS+i];
- ASSERT(((Uint) BeamOp(op_i_func_info_IaaI)) == code_ptr[0]);
+ ASSERT(((BeamInstr) BeamOp(op_i_func_info_IaaI)) == code_ptr[0]);
ASSERT(mfa[0] == ((Eterm) code_ptr[2]));
if (mfa[1] == ((Eterm) code_ptr[3]) &&
- ((Uint) mfa[2]) == code_ptr[4]) {
+ ((BeamInstr) mfa[2]) == code_ptr[4]) {
return code_ptr + 5;
}
}
return NULL;
}
+/* bp_hash */
+ERTS_INLINE Uint bp_sched2ix() {
+#ifdef ERTS_SMP
+ ErtsSchedulerData *esdp;
+ esdp = erts_get_scheduler_data();
+ return esdp->no - 1;
+#else
+ return 0;
+#endif
+}
+static void bp_hash_init(bp_time_hash_t *hash, Uint n) {
+ Uint size = sizeof(bp_data_time_item_t)*n;
+ Uint i;
+
+ hash->n = n;
+ hash->used = 0;
+
+ hash->item = (bp_data_time_item_t *)Alloc(size);
+ sys_memzero(hash->item, size);
+
+ for(i = 0; i < n; ++i) {
+ hash->item[i].pid = NIL;
+ }
+}
+
+static void bp_hash_rehash(bp_time_hash_t *hash, Uint n) {
+ bp_data_time_item_t *item = NULL;
+ Uint size = sizeof(bp_data_time_item_t)*n;
+ Uint ix;
+ Uint hval;
+
+ item = (bp_data_time_item_t *)Alloc(size);
+ sys_memzero(item, size);
+
+ for( ix = 0; ix < n; ++ix) {
+ item[ix].pid = NIL;
+ }
+
+ /* rehash, old hash -> new hash */
+
+ for( ix = 0; ix < hash->n; ix++) {
+ if (hash->item[ix].pid != NIL) {
+
+ hval = ((hash->item[ix].pid) >> 4) % n; /* new n */
+
+ while (item[hval].pid != NIL) {
+ hval = (hval + 1) % n;
+ }
+ item[hval].pid = hash->item[ix].pid;
+ item[hval].count = hash->item[ix].count;
+ item[hval].s_time = hash->item[ix].s_time;
+ item[hval].us_time = hash->item[ix].us_time;
+ }
+ }
+
+ Free(hash->item);
+ hash->n = n;
+ hash->item = item;
+}
+static ERTS_INLINE bp_data_time_item_t * bp_hash_get(bp_time_hash_t *hash, bp_data_time_item_t *sitem) {
+ Eterm pid = sitem->pid;
+ Uint hval = (pid >> 4) % hash->n;
+ bp_data_time_item_t *item = NULL;
+
+ item = hash->item;
+
+ while (item[hval].pid != pid) {
+ if (item[hval].pid == NIL) return NULL;
+ hval = (hval + 1) % hash->n;
+ }
+
+ return &(item[hval]);
+}
+
+static ERTS_INLINE bp_data_time_item_t * bp_hash_put(bp_time_hash_t *hash, bp_data_time_item_t* sitem) {
+ Uint hval;
+ float r = 0.0;
+ bp_data_time_item_t *item;
+
+ /* make sure that the hash is not saturated */
+ /* if saturated, rehash it */
+
+ r = hash->used / (float) hash->n;
+
+ if (r > 0.7f) {
+ bp_hash_rehash(hash, hash->n * 2);
+ }
+ /* Do hval after rehash */
+ hval = (sitem->pid >> 4) % hash->n;
+
+ /* find free slot */
+ item = hash->item;
+
+ while (item[hval].pid != NIL) {
+ hval = (hval + 1) % hash->n;
+ }
+ item = &(hash->item[hval]);
+
+ item->pid = sitem->pid;
+ item->s_time = sitem->s_time;
+ item->us_time = sitem->us_time;
+ item->count = sitem->count;
+ hash->used++;
+
+ return item;
+}
+
+static void bp_hash_delete(bp_time_hash_t *hash) {
+ hash->n = 0;
+ hash->used = 0;
+ Free(hash->item);
+ hash->item = NULL;
+}
+
+static void bp_time_diff(bp_data_time_item_t *item, /* out */
+ process_breakpoint_time_t *pbt, /* in */
+ Uint ms, Uint s, Uint us) {
+ int dms,ds,dus;
+
+ dms = ms - pbt->ms;
+ ds = s - pbt->s;
+ dus = us - pbt->us;
+
+ /* get_sys_now may return zero difftime,
+ * this is ok.
+ */
+
+ ASSERT(dms >= 0 || ds >= 0 || dus >= 0);
+
+ if (dus < 0) {
+ dus += 1000000;
+ ds -= 1;
+ }
+ if (ds < 0) {
+ ds += 1000000;
+ }
+
+ item->s_time = ds;
+ item->us_time = dus;
+}
+
+void erts_schedule_time_break(Process *p, Uint schedule) {
+ Uint ms, s, us;
+ process_breakpoint_time_t *pbt = NULL;
+ bp_data_time_item_t sitem, *item = NULL;
+ bp_time_hash_t *h = NULL;
+ BpDataTime *pbdt = NULL;
+
+ ASSERT(p);
+
+ pbt = ERTS_PROC_GET_CALL_TIME(p);
+
+ if (pbt) {
+
+ switch(schedule) {
+ case ERTS_BP_CALL_TIME_SCHEDULE_EXITING :
+ break;
+ case ERTS_BP_CALL_TIME_SCHEDULE_OUT :
+ /* When a process is scheduled _out_,
+ * timestamp it and add its delta to
+ * the previous breakpoint.
+ */
+
+ pbdt = (BpDataTime *) get_break(p, pbt->pc, (BeamInstr) BeamOp(op_i_time_breakpoint));
+ if (pbdt) {
+ get_sys_now(&ms,&s,&us);
+ bp_time_diff(&sitem, pbt, ms, s, us);
+ sitem.pid = p->id;
+ sitem.count = 0;
+
+ h = &(pbdt->hash[bp_sched2ix_proc(p)]);
+
+ ASSERT(h);
+ ASSERT(h->item);
+
+ item = bp_hash_get(h, &sitem);
+ if (!item) {
+ item = bp_hash_put(h, &sitem);
+ } else {
+ BP_TIME_ADD(item, &sitem);
+ }
+ }
+ break;
+ case ERTS_BP_CALL_TIME_SCHEDULE_IN :
+ /* When a process is scheduled _in_,
+ * timestamp it and remove the previous
+ * timestamp in the psd.
+ */
+ get_sys_now(&ms,&s,&us);
+ pbt->ms = ms;
+ pbt->s = s;
+ pbt->us = us;
+ break;
+ default :
+ ASSERT(0);
+ /* will never happen */
+ break;
+ }
+ } /* pbt */
+}
+
+/* call_time breakpoint
+ * Accumulated times are added to the previous bp,
+ * not the current one. The current one is saved
+ * for future reference.
+ * The previous breakpoint is stored in the process it self, the psd.
+ * We do not need to store in a stack frame.
+ * There is no need for locking, each thread has its own
+ * area in each bp to save data.
+ * Since we need to diffrentiate between processes for each bp,
+ * every bp has a hash (per thread) to process-bp statistics.
+ * - egil
+ */
+
+void erts_trace_time_break(Process *p, BeamInstr *pc, BpDataTime *bdt, Uint type) {
+ Uint ms,s,us;
+ process_breakpoint_time_t *pbt = NULL;
+ bp_data_time_item_t sitem, *item = NULL;
+ bp_time_hash_t *h = NULL;
+ BpDataTime *pbdt = NULL;
+
+ ASSERT(p);
+ ASSERT(p->status == P_RUNNING);
+
+ /* get previous timestamp and breakpoint
+ * from the process psd */
+
+ pbt = ERTS_PROC_GET_CALL_TIME(p);
+ get_sys_now(&ms,&s,&us);
+
+ switch(type) {
+ /* get pbt
+ * timestamp = t0
+ * lookup bdt from code
+ * set ts0 to pbt
+ * add call count here?
+ */
+ case ERTS_BP_CALL_TIME_CALL:
+ case ERTS_BP_CALL_TIME_TAIL_CALL:
+
+ if (pbt) {
+ ASSERT(pbt->pc);
+ /* add time to previous code */
+ bp_time_diff(&sitem, pbt, ms, s, us);
+ sitem.pid = p->id;
+ sitem.count = 0;
+
+ /* previous breakpoint */
+ pbdt = (BpDataTime *) get_break(p, pbt->pc, (BeamInstr) BeamOp(op_i_time_breakpoint));
+
+ /* if null then the breakpoint was removed */
+ if (pbdt) {
+ h = &(pbdt->hash[bp_sched2ix_proc(p)]);
+
+ ASSERT(h);
+ ASSERT(h->item);
+
+ item = bp_hash_get(h, &sitem);
+ if (!item) {
+ item = bp_hash_put(h, &sitem);
+ } else {
+ BP_TIME_ADD(item, &sitem);
+ }
+ }
+
+ } else {
+ /* first call of process to instrumented function */
+ pbt = Alloc(sizeof(process_breakpoint_time_t));
+ (void *) ERTS_PROC_SET_CALL_TIME(p, ERTS_PROC_LOCK_MAIN, pbt);
+ }
+ /* add count to this code */
+ sitem.pid = p->id;
+ sitem.count = 1;
+ sitem.s_time = 0;
+ sitem.us_time = 0;
+
+ /* this breakpoint */
+ ASSERT(bdt);
+ h = &(bdt->hash[bp_sched2ix_proc(p)]);
+
+ ASSERT(h);
+ ASSERT(h->item);
+
+ item = bp_hash_get(h, &sitem);
+ if (!item) {
+ item = bp_hash_put(h, &sitem);
+ } else {
+ BP_TIME_ADD(item, &sitem);
+ }
+
+ pbt->pc = pc;
+ pbt->ms = ms;
+ pbt->s = s;
+ pbt->us = us;
+ break;
+
+ case ERTS_BP_CALL_TIME_RETURN:
+ /* get pbt
+ * lookup bdt from code
+ * timestamp = t1
+ * get ts0 from pbt
+ * get item from bdt->hash[bp_hash(p->id)]
+ * ack diff (t1, t0) to item
+ */
+
+ if(pbt) {
+ /* might have been removed due to
+ * trace_pattern(false)
+ */
+ ASSERT(pbt->pc);
+
+ bp_time_diff(&sitem, pbt, ms, s, us);
+ sitem.pid = p->id;
+ sitem.count = 0;
+
+ /* previous breakpoint */
+ pbdt = (BpDataTime *) get_break(p, pbt->pc, (BeamInstr) BeamOp(op_i_time_breakpoint));
+
+ /* beware, the trace_pattern might have been removed */
+ if (pbdt) {
+ h = &(pbdt->hash[bp_sched2ix_proc(p)]);
+
+ ASSERT(h);
+ ASSERT(h->item);
+
+ item = bp_hash_get(h, &sitem);
+ if (!item) {
+ item = bp_hash_put(h, &sitem);
+ } else {
+ BP_TIME_ADD(item, &sitem);
+ }
+ }
+
+ pbt->pc = pc;
+ pbt->ms = ms;
+ pbt->s = s;
+ pbt->us = us;
+ }
+ break;
+ default :
+ ASSERT(0);
+ /* will never happen */
+ break;
+ }
+}
/* *************************************************************************
@@ -440,7 +848,7 @@ erts_find_local_func(Eterm mfa[3]) {
static int set_break(Eterm mfa[3], int specified,
- Binary *match_spec, Eterm break_op,
+ Binary *match_spec, BeamInstr break_op,
enum erts_break_op count_op, Eterm tracer_pid)
{
Module *modp;
@@ -470,45 +878,54 @@ static int set_break(Eterm mfa[3], int specified,
}
static int set_module_break(Module *modp, Eterm mfa[3], int specified,
- Binary *match_spec, Uint break_op,
+ Binary *match_spec, BeamInstr break_op,
enum erts_break_op count_op, Eterm tracer_pid) {
- Uint** code_base;
- Uint* code_ptr;
+ BeamInstr** code_base;
+ BeamInstr* code_ptr;
int num_processed = 0;
Uint i,n;
ASSERT(break_op);
ASSERT(modp);
- code_base = (Uint **) modp->code;
+ code_base = (BeamInstr **) modp->code;
if (code_base == NULL) {
return 0;
}
- n = (Uint) code_base[MI_NUM_FUNCTIONS];
+ n = (BeamInstr) code_base[MI_NUM_FUNCTIONS];
for (i = 0; i < n; ++i) {
code_ptr = code_base[MI_FUNCTIONS+i];
- ASSERT(code_ptr[0] == (Uint) BeamOp(op_i_func_info_IaaI));
+ ASSERT(code_ptr[0] == (BeamInstr) BeamOp(op_i_func_info_IaaI));
if ((specified < 2 || mfa[1] == ((Eterm) code_ptr[3])) &&
(specified < 3 || ((int) mfa[2]) == ((int) code_ptr[4]))) {
- Uint *pc = code_ptr+5;
+ BeamInstr *pc = code_ptr+5;
num_processed +=
- set_function_break(modp, pc, match_spec,
+ set_function_break(modp, pc, BREAK_IS_ERL, match_spec,
break_op, count_op, tracer_pid);
}
}
return num_processed;
}
-static int set_function_break(Module *modp, Uint *pc,
- Binary *match_spec, Uint break_op,
+static int set_function_break(Module *modp, BeamInstr *pc, int bif,
+ Binary *match_spec, BeamInstr break_op,
enum erts_break_op count_op, Eterm tracer_pid) {
- BpData *bd, **r;
+
+ BeamInstr **code_base = NULL;
+ BpData *bd, **r, ***rs;
size_t size;
- Uint **code_base = (Uint **)modp->code;
+ Uint ix = 0;
- ASSERT(code_base);
- ASSERT(code_base <= (Uint **)pc);
- ASSERT((Uint **)pc < code_base + (modp->code_length/sizeof(Uint *)));
+ if (bif == BREAK_IS_ERL) {
+ code_base = (BeamInstr **)modp->code;
+ ASSERT(code_base);
+ ASSERT(code_base <= (BeamInstr **)pc);
+ ASSERT((BeamInstr **)pc < code_base + (modp->code_length/sizeof(BeamInstr *)));
+ } else {
+ ASSERT(*pc == (BeamInstr) em_apply_bif);
+ ASSERT(modp == NULL);
+ }
+
/*
* Currently no trace support for native code.
*/
@@ -517,8 +934,9 @@ static int set_function_break(Module *modp, Uint *pc,
}
/* Do not allow two breakpoints of the same kind */
if ( (bd = is_break(pc, break_op))) {
- if (break_op == (Uint) BeamOp(op_i_trace_breakpoint)
- || break_op == (Uint) BeamOp(op_i_mtrace_breakpoint)) {
+ if (break_op == (BeamInstr) BeamOp(op_i_trace_breakpoint)
+ || break_op == (BeamInstr) BeamOp(op_i_mtrace_breakpoint)) {
+
BpDataTrace *bdt = (BpDataTrace *) bd;
Binary *old_match_spec;
@@ -531,71 +949,115 @@ static int set_function_break(Module *modp, Uint *pc,
ErtsSmpBPUnlock(bdt);
MatchSetUnref(old_match_spec);
} else {
+ BpDataCount *bdc = (BpDataCount *) bd;
+ long count = 0;
+ long res = 0;
+
ASSERT(! match_spec);
ASSERT(is_nil(tracer_pid));
- if (break_op == (Uint) BeamOp(op_i_count_breakpoint)) {
- BpDataCount *bdc = (BpDataCount *) bd;
- ErtsSmpBPLock(bdc);
+ if (break_op == (BeamInstr) BeamOp(op_i_count_breakpoint)) {
if (count_op == erts_break_stop) {
- if (bdc->count >= 0) {
- bdc->count = -bdc->count-1; /* Stop call counter */
+ count = erts_smp_atomic_read(&bdc->acount);
+ if (count >= 0) {
+ while(1) {
+ res = erts_smp_atomic_cmpxchg(&bdc->acount, -count - 1, count);
+ if ((res == count) || count < 0) break;
+ count = res;
+ }
}
} else {
- bdc->count = 0; /* Reset call counter */
+ /* Reset call counter */
+ erts_smp_atomic_set(&bdc->acount, 0);
}
- ErtsSmpBPUnlock(bdc);
+
+ } else if (break_op == (BeamInstr) BeamOp(op_i_time_breakpoint)) {
+ BpDataTime *bdt = (BpDataTime *) bd;
+ Uint i = 0;
+
+ ERTS_SMP_LC_ASSERT(erts_smp_is_system_blocked(0));
+
+ if (count_op == erts_break_stop) {
+ bdt->pause = 1;
+ } else {
+ bdt->pause = 0;
+ for (i = 0; i < bdt->n; i++) {
+ bp_hash_delete(&(bdt->hash[i]));
+ bp_hash_init(&(bdt->hash[i]), 32);
+ }
+ }
+
} else {
ASSERT (! count_op);
}
}
return 1;
}
- if (break_op == (Uint) BeamOp(op_i_trace_breakpoint) ||
- break_op == (Uint) BeamOp(op_i_mtrace_breakpoint)) {
+ if (break_op == (BeamInstr) BeamOp(op_i_trace_breakpoint) ||
+ break_op == (BeamInstr) BeamOp(op_i_mtrace_breakpoint)) {
size = sizeof(BpDataTrace);
} else {
ASSERT(! match_spec);
ASSERT(is_nil(tracer_pid));
- if (break_op == (Uint) BeamOp(op_i_count_breakpoint)) {
- if (count_op == erts_break_reset
- || count_op == erts_break_stop) {
+ if (break_op == (BeamInstr) BeamOp(op_i_count_breakpoint)) {
+ if (count_op == erts_break_reset || count_op == erts_break_stop) {
/* Do not insert a new breakpoint */
return 1;
}
size = sizeof(BpDataCount);
+ } else if (break_op == (BeamInstr) BeamOp(op_i_time_breakpoint)) {
+ if (count_op == erts_break_reset || count_op == erts_break_stop) {
+ /* Do not insert a new breakpoint */
+ return 1;
+ }
+ size = sizeof(BpDataTime);
} else {
ASSERT(! count_op);
- ASSERT(break_op == (Uint) BeamOp(op_i_debug_breakpoint));
+ ASSERT(break_op == (BeamInstr) BeamOp(op_i_debug_breakpoint));
size = sizeof(BpDataDebug);
}
}
- r = (BpData **) (pc-4);
+ rs = (BpData ***) (pc-4);
+ if (! *rs) {
+ size_t ssize = sizeof(BeamInstr) * erts_no_schedulers;
+ *rs = (BpData **) Alloc(ssize);
+ sys_memzero(*rs, ssize);
+ }
+
+ r = &((*rs)[0]);
+
if (! *r) {
- ASSERT(*pc != (Uint) BeamOp(op_i_trace_breakpoint));
- ASSERT(*pc != (Uint) BeamOp(op_i_mtrace_breakpoint));
- ASSERT(*pc != (Uint) BeamOp(op_i_debug_breakpoint));
- ASSERT(*pc != (Uint) BeamOp(op_i_count_breakpoint));
+ ASSERT(*pc != (BeamInstr) BeamOp(op_i_trace_breakpoint));
+ ASSERT(*pc != (BeamInstr) BeamOp(op_i_mtrace_breakpoint));
+ ASSERT(*pc != (BeamInstr) BeamOp(op_i_debug_breakpoint));
+ ASSERT(*pc != (BeamInstr) BeamOp(op_i_count_breakpoint));
+ ASSERT(*pc != (BeamInstr) BeamOp(op_i_time_breakpoint));
/* First breakpoint; create singleton ring */
bd = Alloc(size);
BpInit(bd, *pc);
- *pc = break_op;
*r = bd;
+ if (bif == BREAK_IS_ERL) {
+ *pc = break_op;
+ }
} else {
- ASSERT(*pc == (Uint) BeamOp(op_i_trace_breakpoint) ||
- *pc == (Uint) BeamOp(op_i_mtrace_breakpoint) ||
- *pc == (Uint) BeamOp(op_i_debug_breakpoint) ||
- *pc == (Uint) BeamOp(op_i_count_breakpoint));
- if (*pc == (Uint) BeamOp(op_i_debug_breakpoint)) {
+ ASSERT(*pc == (BeamInstr) BeamOp(op_i_trace_breakpoint) ||
+ *pc == (BeamInstr) BeamOp(op_i_mtrace_breakpoint) ||
+ *pc == (BeamInstr) BeamOp(op_i_debug_breakpoint) ||
+ *pc == (BeamInstr) BeamOp(op_i_time_breakpoint) ||
+ *pc == (BeamInstr) BeamOp(op_i_count_breakpoint) ||
+ *pc == (BeamInstr) em_apply_bif);
+ if (*pc == (BeamInstr) BeamOp(op_i_debug_breakpoint)) {
/* Debug bp must be last, so if it is also first;
* it must be singleton. */
- ASSERT(BpSingleton(*r));
+ ASSERT(BpSingleton(*r));
/* Insert new bp first in the ring, i.e second to last. */
bd = Alloc(size);
BpInitAndSpliceNext(bd, *pc, *r);
- *pc = break_op;
- } else if ((*r)->prev->orig_instr
- == (Uint) BeamOp(op_i_debug_breakpoint)) {
+ if (bif == BREAK_IS_ERL) {
+ *pc = break_op;
+ }
+ } else if ((*r)->prev->orig_instr
+ == (BeamInstr) BeamOp(op_i_debug_breakpoint)) {
/* Debug bp last in the ring; insert new second to last. */
bd = Alloc(size);
BpInitAndSplicePrev(bd, (*r)->prev->orig_instr, *r);
@@ -608,25 +1070,43 @@ static int set_function_break(Module *modp, Uint *pc,
*r = bd;
}
}
+ for (ix = 1; ix < erts_no_schedulers; ++ix) {
+ (*rs)[ix] = (*rs)[0];
+ }
+
+ bd->this_instr = break_op;
/* Init the bp type specific data */
- if (break_op == (Uint) BeamOp(op_i_trace_breakpoint) ||
- break_op == (Uint) BeamOp(op_i_mtrace_breakpoint)) {
+ if (break_op == (BeamInstr) BeamOp(op_i_trace_breakpoint) ||
+ break_op == (BeamInstr) BeamOp(op_i_mtrace_breakpoint)) {
BpDataTrace *bdt = (BpDataTrace *) bd;
MatchSetRef(match_spec);
bdt->match_spec = match_spec;
bdt->tracer_pid = tracer_pid;
- } else if (break_op == (Uint) BeamOp(op_i_count_breakpoint)) {
+ } else if (break_op == (BeamInstr) BeamOp(op_i_time_breakpoint)) {
+ BpDataTime *bdt = (BpDataTime *) bd;
+ Uint i = 0;
+
+ bdt->pause = 0;
+ bdt->n = erts_no_schedulers;
+ bdt->hash = Alloc(sizeof(bp_time_hash_t)*(bdt->n));
+
+ for (i = 0; i < bdt->n; i++) {
+ bp_hash_init(&(bdt->hash[i]), 32);
+ }
+ } else if (break_op == (BeamInstr) BeamOp(op_i_count_breakpoint)) {
BpDataCount *bdc = (BpDataCount *) bd;
+ erts_smp_atomic_init(&bdc->acount, 0);
+ }
- bdc->count = 0;
+ if (bif == BREAK_IS_ERL) {
+ ++(*(BeamInstr*)&code_base[MI_NUM_BREAKPOINTS]);
}
- ++(*(Uint*)&code_base[MI_NUM_BREAKPOINTS]);
return 1;
}
-static int clear_break(Eterm mfa[3], int specified, Uint break_op)
+static int clear_break(Eterm mfa[3], int specified, BeamInstr break_op)
{
int num_processed = 0;
Module *modp;
@@ -652,52 +1132,71 @@ static int clear_break(Eterm mfa[3], int specified, Uint break_op)
}
static int clear_module_break(Module *m, Eterm mfa[3], int specified,
- Uint break_op) {
- Uint** code_base;
- Uint* code_ptr;
+ BeamInstr break_op) {
+ BeamInstr** code_base;
+ BeamInstr* code_ptr;
int num_processed = 0;
- Uint i,n;
+ Uint i;
+ BeamInstr n;
ASSERT(m);
- code_base = (Uint **) m->code;
+ code_base = (BeamInstr **) m->code;
if (code_base == NULL) {
return 0;
}
- n = (Uint) code_base[MI_NUM_FUNCTIONS];
+ n = (BeamInstr) code_base[MI_NUM_FUNCTIONS];
for (i = 0; i < n; ++i) {
code_ptr = code_base[MI_FUNCTIONS+i];
if ((specified < 2 || mfa[1] == ((Eterm) code_ptr[3])) &&
(specified < 3 || ((int) mfa[2]) == ((int) code_ptr[4]))) {
- Uint *pc = code_ptr + 5;
+ BeamInstr *pc = code_ptr + 5;
num_processed +=
- clear_function_break(m, pc, break_op);
+ clear_function_break(m, pc, BREAK_IS_ERL, break_op);
}
}
return num_processed;
}
-static int clear_function_break(Module *m, Uint *pc, Uint break_op) {
+static int clear_function_break(Module *m, BeamInstr *pc, int bif, BeamInstr break_op) {
BpData *bd;
- Uint **code_base = (Uint **)m->code;
-
- ASSERT(code_base);
- ASSERT(code_base <= (Uint **)pc);
- ASSERT((Uint **)pc < code_base + (m->code_length/sizeof(Uint *)));
+ Uint ix = 0;
+ BeamInstr **code_base = NULL;
+
+ if (bif == BREAK_IS_ERL) {
+ code_base = (BeamInstr **)m->code;
+ ASSERT(code_base);
+ ASSERT(code_base <= (BeamInstr **)pc);
+ ASSERT((BeamInstr **)pc < code_base + (m->code_length/sizeof(BeamInstr *)));
+ } else {
+ ASSERT(*pc == (BeamInstr) em_apply_bif);
+ ASSERT(m == NULL);
+ }
+
/*
* Currently no trace support for native code.
*/
if (erts_is_native_break(pc)) {
return 0;
}
+
while ( (bd = is_break(pc, break_op))) {
/* Remove all breakpoints of this type.
* There should be only one of each type,
* but break_op may be 0 which matches any type.
*/
- Uint op;
- BpData **r = (BpData **) (pc-4);
+ BeamInstr op;
+ BpData ***rs = (BpData ***) (pc - 4);
+ BpData **r = NULL;
+
+#ifdef DEBUG
+ for (ix = 1; ix < erts_no_schedulers; ++ix) {
+ ASSERT((*rs)[ix] == (*rs)[0]);
+ }
+#endif
+ r = &((*rs)[0]);
+
ASSERT(*r);
/* Find opcode for this breakpoint */
if (break_op) {
@@ -713,8 +1212,11 @@ static int clear_function_break(Module *m, Uint *pc, Uint break_op) {
if (BpSingleton(bd)) {
ASSERT(*r == bd);
/* Only one breakpoint to remove */
- *r = NULL;
- *pc = bd->orig_instr;
+ if (bif == BREAK_IS_ERL) {
+ *pc = bd->orig_instr;
+ }
+ Free(*rs);
+ *rs = NULL;
} else {
BpData *bd_prev = bd->prev;
@@ -726,22 +1228,64 @@ static int clear_function_break(Module *m, Uint *pc, Uint break_op) {
bd_prev->orig_instr = bd->orig_instr;
} else if (bd_prev == *r) {
/* We removed the first breakpoint in the ring */
- *pc = bd->orig_instr;
+ if (bif == BREAK_IS_ERL) {
+ *pc = bd->orig_instr;
+ }
} else {
bd_prev->orig_instr = bd->orig_instr;
}
}
- if (op == (Uint) BeamOp(op_i_trace_breakpoint) ||
- op == (Uint) BeamOp(op_i_mtrace_breakpoint)) {
+ if (op == (BeamInstr) BeamOp(op_i_trace_breakpoint) ||
+ op == (BeamInstr) BeamOp(op_i_mtrace_breakpoint)) {
BpDataTrace *bdt = (BpDataTrace *) bd;
-
MatchSetUnref(bdt->match_spec);
}
+ if (op == (BeamInstr) BeamOp(op_i_time_breakpoint)) {
+ BpDataTime *bdt = (BpDataTime *) bd;
+ Uint i = 0;
+ Uint j = 0;
+ Process *h_p = NULL;
+ bp_data_time_item_t *item = NULL;
+ process_breakpoint_time_t *pbt = NULL;
+
+ /* remove all psd associated with the hash
+ * and then delete the hash.
+ * ... sigh ...
+ */
+
+ for( i = 0; i < bdt->n; ++i) {
+ if (bdt->hash[i].used) {
+ for (j = 0; j < bdt->hash[i].n; ++j) {
+ item = &(bdt->hash[i].item[j]);
+ if (item->pid != NIL) {
+ h_p = process_tab[internal_pid_index(item->pid)];
+ if (h_p) {
+ pbt = ERTS_PROC_SET_CALL_TIME(h_p, ERTS_PROC_LOCK_MAIN, NULL);
+ if (pbt) {
+ Free(pbt);
+ }
+ }
+ }
+ }
+ }
+ bp_hash_delete(&(bdt->hash[i]));
+ }
+ Free(bdt->hash);
+ bdt->hash = NULL;
+ bdt->n = 0;
+ }
Free(bd);
- ASSERT(((Uint) code_base[MI_NUM_BREAKPOINTS]) > 0);
- --(*(Uint*)&code_base[MI_NUM_BREAKPOINTS]);
- }
+ if (bif == BREAK_IS_ERL) {
+ ASSERT(((BeamInstr) code_base[MI_NUM_BREAKPOINTS]) > 0);
+ --(*(BeamInstr*)&code_base[MI_NUM_BREAKPOINTS]);
+ }
+ if (*rs) {
+ for (ix = 1; ix < erts_no_schedulers; ++ix) {
+ (*rs)[ix] = (*rs)[0];
+ }
+ }
+ } /* while bd != NULL */
return 1;
}
@@ -754,32 +1298,63 @@ static int clear_function_break(Module *m, Uint *pc, Uint break_op) {
** returned. The program counter must point to the first executable
** (breakpoint) instruction of the function.
*/
-static BpData *is_break(Uint *pc, Uint break_op) {
- ASSERT(pc[-5] == (Uint) BeamOp(op_i_func_info_IaaI));
+
+BpData *erts_get_time_break(Process *p, BeamInstr *pc) {
+ return get_break(p, pc, (BeamInstr) BeamOp(op_i_time_breakpoint));
+}
+
+static BpData *get_break(Process *p, BeamInstr *pc, BeamInstr break_op) {
+ ASSERT(pc[-5] == (BeamInstr) BeamOp(op_i_func_info_IaaI));
if (! erts_is_native_break(pc)) {
- BpData *bd = (BpData *) pc[-4];
-
- if (break_op == 0) {
- return bd;
- }
- if (*pc == break_op) {
- ASSERT(bd);
- return bd->next;
- }
- if (! bd){
+ BpData **rs = (BpData **) pc[-4];
+ BpData *bd = NULL, *ebd = NULL;
+
+ if (! rs) {
return NULL;
}
+
+ bd = ebd = rs[bp_sched2ix_proc(p)];
+ ASSERT(bd);
+ if (bd->this_instr == break_op) {
+ return bd;
+ }
+
bd = bd->next;
- while (bd != (BpData *) pc[-4]) {
+ while (bd != ebd) {
ASSERT(bd);
- if (bd->orig_instr == break_op) {
- bd = bd->next;
+ if (bd->this_instr == break_op) {
ASSERT(bd);
return bd;
- } else {
- bd = bd->next;
}
+ bd = bd->next;
}
}
return NULL;
}
+
+static BpData *is_break(BeamInstr *pc, BeamInstr break_op) {
+ BpData **rs = (BpData **) pc[-4];
+ BpData *bd = NULL, *ebd = NULL;
+ ASSERT(pc[-5] == (BeamInstr) BeamOp(op_i_func_info_IaaI));
+
+ if (! rs) {
+ return NULL;
+ }
+
+ bd = ebd = rs[bp_sched2ix()];
+ ASSERT(bd);
+ if ( (break_op == 0) || (bd->this_instr == break_op)) {
+ return bd;
+ }
+
+ bd = bd->next;
+ while (bd != ebd) {
+ ASSERT(bd);
+ if (bd->this_instr == break_op) {
+ ASSERT(bd);
+ return bd;
+ }
+ bd = bd->next;
+ }
+ return NULL;
+}
diff --git a/erts/emulator/beam/beam_bp.h b/erts/emulator/beam/beam_bp.h
index 44e6b294d8..ebc171078d 100644
--- a/erts/emulator/beam/beam_bp.h
+++ b/erts/emulator/beam/beam_bp.h
@@ -1,19 +1,19 @@
/*
* %CopyrightBegin%
- *
- * Copyright Ericsson AB 2000-2009. All Rights Reserved.
- *
+ *
+ * Copyright Ericsson AB 2000-2010. All Rights Reserved.
+ *
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
* compliance with the License. You should have received a copy of the
* Erlang Public License along with this software. If not, it can be
* retrieved online at http://www.erlang.org/.
- *
+ *
* Software distributed under the License is distributed on an "AS IS"
* basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
* the License for the specific language governing rights and limitations
* under the License.
- *
+ *
* %CopyrightEnd%
*/
@@ -27,28 +27,46 @@
-/*
-** Common struct to all bp_data_*
-**
-** Two gotchas:
-**
-** 1) The type of bp_data structure in the ring is deduced from the
-** orig_instr field of the structure _before_ in the ring, except for
-** the first structure in the ring that has its instruction in
-** pc[0] of the code to execute.
-**
-** 2) pc[-4] points to the _last_ structure in the ring before the
-** breakpoints are being executed.
-**
-** So, as an example, when a breakpointed function starts to execute,
-** the first instruction that is a breakpoint instruction at pc[0] finds
-** its data at ((BpData *) pc[-4])->next and has to cast that pointer
-** to the correct bp_data type.
+/* A couple of gotchas:
+ *
+ * The breakpoint structure from BeamInstr,
+ * In beam_emu where the instruction counter pointer, I (or pc),
+ * points to the *current* instruction. At that time, if the instruction
+ * is a breakpoint instruction the pc looks like the following,
+ *
+ * I[-5] | op_i_func_info_IaaI | scheduler specific entries
+ * I[-4] | BpData** bpa | --> | BpData * bdas1 | ... | BpData * bdasN |
+ * I[-3] | Tagged Module | | |
+ * I[-2] | Tagged Function | V V
+ * I[-1] | Arity | BpData -> BpData -> BpData -> BpData
+ * I[0] | The bp instruction | ^ * the bp wheel * |
+ * |------------------------------
+ *
+ * Common struct to all bp_data_*
+ *
+ * 1) The type of bp_data structure in the ring is deduced from the
+ * orig_instr field of the structure _before_ in the ring, except for
+ * the first structure in the ring that has its instruction in
+ * pc[0] of the code to execute.
+ * This is valid as long as you don't search for the function while it is
+ * being executed by something else. Or is in the middle of its rotation for
+ * any other reason.
+ * A key, the bp beam instruction, is included for this reason.
+ *
+ * 2) pc[-4][sched_id - 1] points to the _last_ structure in the ring before the
+ * breakpoints are being executed.
+ *
+ * So, as an example, when a breakpointed function starts to execute,
+ * the first instruction that is a breakpoint instruction at pc[0] finds
+ * its data at ((BpData **) pc[-4][sched_id - 1])->next and has to cast that pointer
+ * to the correct bp_data type.
*/
+
typedef struct bp_data {
struct bp_data *next; /* Doubly linked ring pointers */
struct bp_data *prev; /* -"- */
- Uint orig_instr; /* The original instruction to execute */
+ BeamInstr orig_instr; /* The original instruction to execute */
+ BeamInstr this_instr; /* key */
} BpData;
/*
** All the following bp_data_.. structs must begin the same way
@@ -57,7 +75,8 @@ typedef struct bp_data {
typedef struct bp_data_trace {
struct bp_data *next;
struct bp_data *prev;
- Uint orig_instr;
+ BeamInstr orig_instr;
+ BeamInstr this_instr; /* key */
Binary *match_spec;
Eterm tracer_pid;
} BpDataTrace;
@@ -65,18 +84,58 @@ typedef struct bp_data_trace {
typedef struct bp_data_debug {
struct bp_data *next;
struct bp_data *prev;
- Uint orig_instr;
+ BeamInstr orig_instr;
+ BeamInstr this_instr; /* key */
} BpDataDebug;
-typedef struct bp_data_count { /* Call count */
+typedef struct bp_data_count { /* Call count */
struct bp_data *next;
struct bp_data *prev;
- Uint orig_instr;
- Sint count;
+ BeamInstr orig_instr;
+ BeamInstr this_instr; /* key */
+ erts_smp_atomic_t acount;
} BpDataCount;
+typedef struct {
+ Eterm pid;
+ Sint count;
+ Uint s_time;
+ Uint us_time;
+} bp_data_time_item_t;
+
+typedef struct {
+ Uint n;
+ Uint used;
+ bp_data_time_item_t *item;
+} bp_time_hash_t;
+
+typedef struct bp_data_time { /* Call time */
+ struct bp_data *next;
+ struct bp_data *prev;
+ BeamInstr orig_instr;
+ BeamInstr this_instr; /* key */
+ Uint pause;
+ Uint n;
+ bp_time_hash_t *hash;
+} BpDataTime;
+
+typedef struct {
+ Uint ms;
+ Uint s;
+ Uint us;
+ BeamInstr *pc;
+} process_breakpoint_time_t; /* used within psd */
+
extern erts_smp_spinlock_t erts_bp_lock;
+#define ERTS_BP_CALL_TIME_SCHEDULE_IN (0)
+#define ERTS_BP_CALL_TIME_SCHEDULE_OUT (1)
+#define ERTS_BP_CALL_TIME_SCHEDULE_EXITING (2)
+
+#define ERTS_BP_CALL_TIME_CALL (0)
+#define ERTS_BP_CALL_TIME_RETURN (1)
+#define ERTS_BP_CALL_TIME_TAIL_CALL (2)
+
#ifdef ERTS_SMP
#define ErtsSmpBPLock(BDC) erts_smp_spin_lock(&erts_bp_lock)
#define ErtsSmpBPUnlock(BDC) erts_smp_spin_unlock(&erts_bp_lock)
@@ -85,31 +144,46 @@ extern erts_smp_spinlock_t erts_bp_lock;
#define ErtsSmpBPUnlock(BDC)
#endif
-#define ErtsCountBreak(pc,instr_result) \
-do { \
- BpDataCount *bdc = (BpDataCount *) (pc)[-4]; \
- \
- ASSERT((pc)[-5] == (Uint) BeamOp(op_i_func_info_IaaI)); \
- ASSERT(bdc); \
- bdc = (BpDataCount *) bdc->next; \
- ASSERT(bdc); \
- (pc)[-4] = (Uint) bdc; \
- ErtsSmpBPLock(bdc); \
- if (bdc->count >= 0) bdc->count++; \
- ErtsSmpBPUnlock(bdc); \
- *(instr_result) = bdc->orig_instr; \
+ERTS_INLINE Uint bp_sched2ix(void);
+
+#ifdef ERTS_SMP
+#define bp_sched2ix_proc(p) ((p)->scheduler_data->no - 1)
+#else
+#define bp_sched2ix_proc(p) (0)
+#endif
+
+#define ErtsCountBreak(p, pc,instr_result) \
+do { \
+ BpData **bds = (BpData **) (pc)[-4]; \
+ BpDataCount *bdc = NULL; \
+ Uint ix = bp_sched2ix_proc( (p) ); \
+ long count = 0; \
+ \
+ ASSERT((pc)[-5] == (BeamInstr) BeamOp(op_i_func_info_IaaI)); \
+ ASSERT(bds); \
+ bdc = (BpDataCount *) bds[ix]; \
+ bdc = (BpDataCount *) bdc->next; \
+ ASSERT(bdc); \
+ bds[ix] = (BpData *) bdc; \
+ count = erts_smp_atomic_read(&bdc->acount); \
+ if (count >= 0) erts_smp_atomic_inc(&bdc->acount); \
+ *(instr_result) = bdc->orig_instr; \
} while (0)
-#define ErtsBreakSkip(pc,instr_result) \
-do { \
- BpData *bd = (BpData *) (pc)[-4]; \
- \
- ASSERT((pc)[-5] == (Uint) BeamOp(op_i_func_info_IaaI)); \
- ASSERT(bd); \
- bd = bd->next; \
- ASSERT(bd); \
- (pc)[-4] = (Uint) bd; \
- *(instr_result) = bd->orig_instr; \
+#define ErtsBreakSkip(p, pc,instr_result) \
+do { \
+ BpData **bds = (BpData **) (pc)[-4]; \
+ BpData *bd = NULL; \
+ Uint ix = bp_sched2ix_proc( (p) ); \
+ \
+ ASSERT((pc)[-5] == (BeamInstr) BeamOp(op_i_func_info_IaaI)); \
+ ASSERT(bds); \
+ bd = bds[ix]; \
+ ASSERT(bd); \
+ bd = bd->next; \
+ ASSERT(bd); \
+ bds[ix] = bd; \
+ *(instr_result) = bd->orig_instr; \
} while (0)
enum erts_break_op{
@@ -133,9 +207,9 @@ int erts_clear_trace_break(Eterm mfa[3], int specified);
int erts_set_mtrace_break(Eterm mfa[3], int specified, Binary *match_spec,
Eterm tracer_pid);
int erts_clear_mtrace_break(Eterm mfa[3], int specified);
-void erts_set_mtrace_bif(Uint *pc, Binary *match_spec,
+void erts_set_mtrace_bif(BeamInstr *pc, Binary *match_spec,
Eterm tracer_pid);
-void erts_clear_mtrace_bif(Uint *pc);
+void erts_clear_mtrace_bif(BeamInstr *pc);
int erts_set_debug_break(Eterm mfa[3], int specified);
int erts_clear_debug_break(Eterm mfa[3], int specified);
int erts_set_count_break(Eterm mfa[3], int specified, enum erts_break_op);
@@ -144,22 +218,33 @@ int erts_clear_count_break(Eterm mfa[3], int specified);
int erts_clear_break(Eterm mfa[3], int specified);
int erts_clear_module_break(Module *modp);
-int erts_clear_function_break(Module *modp, Uint *pc);
+int erts_clear_function_break(Module *modp, BeamInstr *pc);
-Uint erts_trace_break(Process *p, Uint *pc, Eterm *args,
+BeamInstr erts_trace_break(Process *p, BeamInstr *pc, Eterm *args,
Uint32 *ret_flags, Eterm *tracer_pid);
-Uint32 erts_bif_mtrace(Process *p, Uint *pc, Eterm *args,
+Uint32 erts_bif_mtrace(Process *p, BeamInstr *pc, Eterm *args,
int local, Eterm *tracer_pid);
-int erts_is_trace_break(Uint *pc, Binary **match_spec_ret,
+int erts_is_trace_break(BeamInstr *pc, Binary **match_spec_ret,
Eterm *tracer_pid_ret);
-int erts_is_mtrace_break(Uint *pc, Binary **match_spec_ret,
+int erts_is_mtrace_break(BeamInstr *pc, Binary **match_spec_ret,
Eterm *tracer_pid_rte);
-int erts_is_mtrace_bif(Uint *pc, Binary **match_spec_ret,
+int erts_is_mtrace_bif(BeamInstr *pc, Binary **match_spec_ret,
Eterm *tracer_pid_ret);
-int erts_is_native_break(Uint *pc);
-int erts_is_count_break(Uint *pc, Sint *count_ret);
+int erts_is_native_break(BeamInstr *pc);
+int erts_is_count_break(BeamInstr *pc, Sint *count_ret);
+int erts_is_time_break(Process *p, BeamInstr *pc, Eterm *call_time);
+
+void erts_trace_time_break(Process *p, BeamInstr *pc, BpDataTime *bdt, Uint type);
+void erts_schedule_time_break(Process *p, Uint out);
+int erts_set_time_break(Eterm mfa[3], int specified, enum erts_break_op);
+int erts_clear_time_break(Eterm mfa[3], int specified);
+
+int erts_is_time_trace_bif(Process *p, BeamInstr *pc, Eterm *call_time);
+void erts_set_time_trace_bif(BeamInstr *pc, enum erts_break_op);
+void erts_clear_time_trace_bif(BeamInstr *pc);
+BpData *erts_get_time_break(Process *p, BeamInstr *pc);
-Uint *erts_find_local_func(Eterm mfa[3]);
+BeamInstr *erts_find_local_func(Eterm mfa[3]);
#endif /* _BEAM_BP_H */
diff --git a/erts/emulator/beam/beam_catches.c b/erts/emulator/beam/beam_catches.c
index d5cef1cad2..e795b4efbd 100644
--- a/erts/emulator/beam/beam_catches.c
+++ b/erts/emulator/beam/beam_catches.c
@@ -1,19 +1,19 @@
/*
* %CopyrightBegin%
- *
- * Copyright Ericsson AB 2000-2009. All Rights Reserved.
- *
+ *
+ * Copyright Ericsson AB 2000-2010. All Rights Reserved.
+ *
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
* compliance with the License. You should have received a copy of the
* Erlang Public License along with this software. If not, it can be
* retrieved online at http://www.erlang.org/.
- *
+ *
* Software distributed under the License is distributed on an "AS IS"
* basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
* the License for the specific language governing rights and limitations
* under the License.
- *
+ *
* %CopyrightEnd%
*/
@@ -26,7 +26,7 @@
/* XXX: should use dynamic reallocation */
#define TABSIZ (16*1024)
static struct {
- Eterm *cp;
+ BeamInstr *cp;
unsigned cdr;
} beam_catches[TABSIZ];
@@ -39,7 +39,7 @@ void beam_catches_init(void)
high_mark = 0;
}
-unsigned beam_catches_cons(Eterm *cp, unsigned cdr)
+unsigned beam_catches_cons(BeamInstr *cp, unsigned cdr)
{
int i;
@@ -65,7 +65,7 @@ unsigned beam_catches_cons(Eterm *cp, unsigned cdr)
return i;
}
-Eterm *beam_catches_car(unsigned i)
+BeamInstr *beam_catches_car(unsigned i)
{
if( i >= TABSIZ ) {
fprintf(stderr,
@@ -75,7 +75,7 @@ Eterm *beam_catches_car(unsigned i)
return beam_catches[i].cp;
}
-void beam_catches_delmod(unsigned head, Eterm *code, unsigned code_bytes)
+void beam_catches_delmod(unsigned head, BeamInstr *code, unsigned code_bytes)
{
unsigned i, cdr;
diff --git a/erts/emulator/beam/beam_catches.h b/erts/emulator/beam/beam_catches.h
index ccf33d5e86..6223427f0d 100644
--- a/erts/emulator/beam/beam_catches.h
+++ b/erts/emulator/beam/beam_catches.h
@@ -1,19 +1,19 @@
/*
* %CopyrightBegin%
- *
- * Copyright Ericsson AB 2000-2009. All Rights Reserved.
- *
+ *
+ * Copyright Ericsson AB 2000-2010. All Rights Reserved.
+ *
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
* compliance with the License. You should have received a copy of the
* Erlang Public License along with this software. If not, it can be
* retrieved online at http://www.erlang.org/.
- *
+ *
* Software distributed under the License is distributed on an "AS IS"
* basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
* the License for the specific language governing rights and limitations
* under the License.
- *
+ *
* %CopyrightEnd%
*/
@@ -23,9 +23,9 @@
#define BEAM_CATCHES_NIL (-1)
void beam_catches_init(void);
-unsigned beam_catches_cons(Eterm* cp, unsigned cdr);
-Eterm *beam_catches_car(unsigned i);
-void beam_catches_delmod(unsigned head, Eterm* code, unsigned code_bytes);
+unsigned beam_catches_cons(BeamInstr* cp, unsigned cdr);
+BeamInstr *beam_catches_car(unsigned i);
+void beam_catches_delmod(unsigned head, BeamInstr* code, unsigned code_bytes);
#define catch_pc(x) beam_catches_car(catch_val((x)))
diff --git a/erts/emulator/beam/beam_debug.c b/erts/emulator/beam/beam_debug.c
index 4242a4161e..b0bf14b94f 100644
--- a/erts/emulator/beam/beam_debug.c
+++ b/erts/emulator/beam/beam_debug.c
@@ -1,19 +1,19 @@
/*
* %CopyrightBegin%
- *
- * Copyright Ericsson AB 1998-2009. All Rights Reserved.
- *
+ *
+ * Copyright Ericsson AB 1998-2010. All Rights Reserved.
+ *
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
* compliance with the License. You should have received a copy of the
* Erlang Public License along with this software. If not, it can be
* retrieved online at http://www.erlang.org/.
- *
+ *
* Software distributed under the License is distributed on an "AS IS"
* basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
* the License for the specific language governing rights and limitations
* under the License.
- *
+ *
* %CopyrightEnd%
*/
@@ -43,12 +43,13 @@
#else
# define HEXF "%08bpX"
#endif
+#define TermWords(t) (((t) / (sizeof(BeamInstr)/sizeof(Eterm))) + !!((t) % (sizeof(BeamInstr)/sizeof(Eterm))))
void dbg_bt(Process* p, Eterm* sp);
-void dbg_where(Eterm* addr, Eterm x0, Eterm* reg);
+void dbg_where(BeamInstr* addr, Eterm x0, Eterm* reg);
static void print_big(int to, void *to_arg, Eterm* addr);
-static int print_op(int to, void *to_arg, int op, int size, Eterm* addr);
+static int print_op(int to, void *to_arg, int op, int size, BeamInstr* addr);
Eterm
erts_debug_same_2(Process* p, Eterm term1, Eterm term2)
{
@@ -124,6 +125,38 @@ erts_debug_breakpoint_2(Process* p, Eterm MFA, Eterm bool)
BIF_ERROR(p, BADARG);
}
+#if 0 /* Kept for conveninence when hard debugging. */
+void debug_dump_code(BeamInstr *I, int num)
+{
+ BeamInstr *code_ptr = I;
+ BeamInstr *end = code_ptr + num;
+ erts_dsprintf_buf_t *dsbufp;
+ BeamInstr instr;
+ int i;
+
+ dsbufp = erts_create_tmp_dsbuf(0);
+ while (code_ptr < end) {
+ erts_print(ERTS_PRINT_DSBUF, (void *) dsbufp, HEXF ": ", code_ptr);
+ instr = (BeamInstr) code_ptr[0];
+ for (i = 0; i < NUM_SPECIFIC_OPS; i++) {
+ if (instr == (BeamInstr) BeamOp(i) && opc[i].name[0] != '\0') {
+ code_ptr += print_op(ERTS_PRINT_DSBUF, (void *) dsbufp,
+ i, opc[i].sz-1, code_ptr+1) + 1;
+ break;
+ }
+ }
+ if (i >= NUM_SPECIFIC_OPS) {
+ erts_print(ERTS_PRINT_DSBUF, (void *) dsbufp,
+ "unknown " HEXF "\n", instr);
+ code_ptr++;
+ }
+ }
+ dsbufp->str[dsbufp->str_len] = 0;
+ erts_fprintf(stderr,"%s", dsbufp->str);
+ erts_destroy_tmp_dsbuf(dsbufp);
+}
+#endif
+
Eterm
erts_debug_disassemble_1(Process* p, Eterm addr)
{
@@ -132,16 +165,16 @@ erts_debug_disassemble_1(Process* p, Eterm addr)
Eterm* tp;
Eterm bin;
Eterm mfa;
- Eterm* funcinfo = NULL; /* Initialized to eliminate warning. */
- Uint* code_base;
- Uint* code_ptr = NULL; /* Initialized to eliminate warning. */
- Uint instr;
- Uint uaddr;
+ BeamInstr* funcinfo = NULL; /* Initialized to eliminate warning. */
+ BeamInstr* code_base;
+ BeamInstr* code_ptr = NULL; /* Initialized to eliminate warning. */
+ BeamInstr instr;
+ BeamInstr uaddr;
Uint hsz;
int i;
- if (term_to_Uint(addr, &uaddr)) {
- code_ptr = (Uint *) uaddr;
+ if (term_to_UWord(addr, &uaddr)) {
+ code_ptr = (BeamInstr *) uaddr;
if ((funcinfo = find_function_from_pc(code_ptr)) == NULL) {
BIF_RET(am_false);
}
@@ -180,14 +213,14 @@ erts_debug_disassemble_1(Process* p, Eterm addr)
* But this code_ptr will point to the start of the Export,
* not the function's func_info instruction. BOOM !?
*/
- code_ptr = ((Eterm *) ep->address) - 5;
+ code_ptr = ((BeamInstr *) ep->address) - 5;
funcinfo = code_ptr+2;
} else if (modp == NULL || (code_base = modp->code) == NULL) {
BIF_RET(am_undef);
} else {
n = code_base[MI_NUM_FUNCTIONS];
for (i = 0; i < n; i++) {
- code_ptr = (Uint *) code_base[MI_FUNCTIONS+i];
+ code_ptr = (BeamInstr *) code_base[MI_FUNCTIONS+i];
if (code_ptr[3] == name && code_ptr[4] == arity) {
funcinfo = code_ptr+2;
break;
@@ -203,9 +236,9 @@ erts_debug_disassemble_1(Process* p, Eterm addr)
dsbufp = erts_create_tmp_dsbuf(0);
erts_print(ERTS_PRINT_DSBUF, (void *) dsbufp, HEXF ": ", code_ptr);
- instr = (Uint) code_ptr[0];
+ instr = (BeamInstr) code_ptr[0];
for (i = 0; i < NUM_SPECIFIC_OPS; i++) {
- if (instr == (Uint) BeamOp(i) && opc[i].name[0] != '\0') {
+ if (instr == (BeamInstr) BeamOp(i) && opc[i].name[0] != '\0') {
code_ptr += print_op(ERTS_PRINT_DSBUF, (void *) dsbufp,
i, opc[i].sz-1, code_ptr+1) + 1;
break;
@@ -219,12 +252,12 @@ erts_debug_disassemble_1(Process* p, Eterm addr)
bin = new_binary(p, (byte *) dsbufp->str, (int) dsbufp->str_len);
erts_destroy_tmp_dsbuf(dsbufp);
hsz = 4+4;
- (void) erts_bld_uint(NULL, &hsz, (Uint) code_ptr);
+ (void) erts_bld_uword(NULL, &hsz, (BeamInstr) code_ptr);
hp = HAlloc(p, hsz);
- addr = erts_bld_uint(&hp, NULL, (Uint) code_ptr);
+ addr = erts_bld_uword(&hp, NULL, (BeamInstr) code_ptr);
ASSERT(is_atom(funcinfo[0]));
ASSERT(is_atom(funcinfo[1]));
- mfa = TUPLE3(hp, funcinfo[0], funcinfo[1], make_small(funcinfo[2]));
+ mfa = TUPLE3(hp, (Eterm) funcinfo[0], (Eterm) funcinfo[1], make_small((Eterm) funcinfo[2]));
hp += 4;
return TUPLE3(hp, addr, bin, mfa);
}
@@ -236,20 +269,20 @@ dbg_bt(Process* p, Eterm* sp)
while (sp < stack) {
if (is_CP(*sp)) {
- Eterm* addr = find_function_from_pc(cp_val(*sp));
+ BeamInstr* addr = find_function_from_pc(cp_val(*sp));
if (addr)
erts_fprintf(stderr,
HEXF ": %T:%T/%bpu\n",
- addr, addr[0], addr[1], addr[2]);
+ addr, (Eterm) addr[0], (Eterm) addr[1], (Uint) addr[2]);
}
sp++;
}
}
void
-dbg_where(Eterm* addr, Eterm x0, Eterm* reg)
+dbg_where(BeamInstr* addr, Eterm x0, Eterm* reg)
{
- Eterm* f = find_function_from_pc(addr);
+ BeamInstr* f = find_function_from_pc(addr);
if (f == NULL) {
erts_fprintf(stderr, "???\n");
@@ -259,7 +292,7 @@ dbg_where(Eterm* addr, Eterm x0, Eterm* reg)
addr = f;
arity = addr[2];
- erts_fprintf(stderr, HEXF ": %T:%T(", addr, addr[0], addr[1]);
+ erts_fprintf(stderr, HEXF ": %T:%T(", addr, (Eterm) addr[0], (Eterm) addr[1]);
for (i = 0; i < arity; i++)
erts_fprintf(stderr, i ? ", %T" : "%T", i ? reg[i] : x0);
erts_fprintf(stderr, ")\n");
@@ -267,18 +300,18 @@ dbg_where(Eterm* addr, Eterm x0, Eterm* reg)
}
static int
-print_op(int to, void *to_arg, int op, int size, Eterm* addr)
+print_op(int to, void *to_arg, int op, int size, BeamInstr* addr)
{
int i;
- Uint tag;
+ BeamInstr tag;
char* sign;
char* start_prog; /* Start of program for packer. */
char* prog; /* Current position in packer program. */
- Uint stack[8]; /* Stack for packer. */
- Uint* sp = stack; /* Points to next free position. */
- Uint packed = 0; /* Accumulator for packed operations. */
- Uint args[8]; /* Arguments for this instruction. */
- Uint* ap; /* Pointer to arguments. */
+ BeamInstr stack[8]; /* Stack for packer. */
+ BeamInstr* sp = stack; /* Points to next free position. */
+ BeamInstr packed = 0; /* Accumulator for packed operations. */
+ BeamInstr args[8]; /* Arguments for this instruction. */
+ BeamInstr* ap; /* Pointer to arguments. */
start_prog = opc[op].pack;
@@ -288,7 +321,7 @@ print_op(int to, void *to_arg, int op, int size, Eterm* addr)
* Avoid copying because instructions containing bignum operands
* are bigger than actually declared.
*/
- ap = (Uint *) addr;
+ ap = (BeamInstr *) addr;
} else {
/*
* Copy all arguments to a local buffer for the unpacking.
@@ -324,8 +357,8 @@ print_op(int to, void *to_arg, int op, int size, Eterm* addr)
packed >>= BEAM_TIGHT_SHIFT;
break;
case '6': /* Shift 16 steps */
- *ap++ = packed & 0xffff;
- packed >>= 16;
+ *ap++ = packed & BEAM_LOOSE_MASK;
+ packed >>= BEAM_LOOSE_SHIFT;
break;
case 'p':
*sp++ = *--ap;
@@ -390,11 +423,11 @@ print_op(int to, void *to_arg, int op, int size, Eterm* addr)
case 'i': /* Tagged integer */
case 'c': /* Tagged constant */
case 'q': /* Tagged literal */
- erts_print(to, to_arg, "%T", *ap);
+ erts_print(to, to_arg, "%T", (Eterm) *ap);
ap++;
break;
case 'A':
- erts_print(to, to_arg, "%d", arityval(ap[0]));
+ erts_print(to, to_arg, "%d", arityval( (Eterm) ap[0]));
ap++;
break;
case 'd': /* Destination (x(0), x(N), y(N)) */
@@ -421,30 +454,36 @@ print_op(int to, void *to_arg, int op, int size, Eterm* addr)
ap++;
break;
case 'f': /* Destination label */
- erts_print(to, to_arg, "f(%X)", *ap);
- ap++;
+ {
+ BeamInstr* f = find_function_from_pc((BeamInstr *)*ap);
+ if (f+3 != (BeamInstr *) *ap) {
+ erts_print(to, to_arg, "f(" HEXF ")", *ap);
+ } else {
+ erts_print(to, to_arg, "%T:%T/%bpu", (Eterm) f[0], (Eterm) f[1], (Eterm) f[2]);
+ }
+ ap++;
+ }
break;
case 'p': /* Pointer (to label) */
{
- Eterm* f = find_function_from_pc((Eterm *)*ap);
-
- if (f+3 != (Eterm *) *ap) {
- erts_print(to, to_arg, "p(%X)", *ap);
+ BeamInstr* f = find_function_from_pc((BeamInstr *)*ap);
+ if (f+3 != (BeamInstr *) *ap) {
+ erts_print(to, to_arg, "p(" HEXF ")", *ap);
} else {
- erts_print(to, to_arg, "%T:%T/%bpu", f[0], f[1], f[2]);
+ erts_print(to, to_arg, "%T:%T/%bpu", (Eterm) f[0], (Eterm) f[1], (Eterm) f[2]);
}
ap++;
}
break;
case 'j': /* Pointer (to label) */
- erts_print(to, to_arg, "j(%X)", *ap);
+ erts_print(to, to_arg, "j(" HEXF ")", *ap);
ap++;
break;
case 'e': /* Export entry */
{
Export* ex = (Export *) *ap;
erts_print(to, to_arg,
- "%T:%T/%bpu", ex->code[0], ex->code[1], ex->code[2]);
+ "%T:%T/%bpu", (Eterm) ex->code[0], (Eterm) ex->code[1], (Uint) ex->code[2]);
ap++;
}
break;
@@ -467,7 +506,7 @@ print_op(int to, void *to_arg, int op, int size, Eterm* addr)
ap++;
break;
case 'P': /* Byte offset into tuple (see beam_load.c) */
- erts_print(to, to_arg, "%d", (*ap / sizeof(Eterm*)) - 1);
+ erts_print(to, to_arg, "%d", (*ap / sizeof(Eterm)) - 1);
ap++;
break;
case 'l': /* fr(N) */
@@ -494,7 +533,7 @@ print_op(int to, void *to_arg, int op, int size, Eterm* addr)
int n = ap[-1];
while (n > 0) {
- erts_print(to, to_arg, "%T f(%X) ", ap[0], ap[1]);
+ erts_print(to, to_arg, "%T f(" HEXF ") ", (Eterm) ap[0], ap[1]);
ap += 2;
size += 2;
n--;
@@ -505,7 +544,7 @@ print_op(int to, void *to_arg, int op, int size, Eterm* addr)
{
int n;
for (n = ap[-2]; n > 0; n--) {
- erts_print(to, to_arg, "f(%X) ", ap[0]);
+ erts_print(to, to_arg, "f(" HEXF ") ", ap[0]);
ap++;
size++;
}
@@ -513,11 +552,12 @@ print_op(int to, void *to_arg, int op, int size, Eterm* addr)
break;
case op_i_select_big_sf:
while (ap[0]) {
- int arity = thing_arityval(ap[0]);
- print_big(to, to_arg, ap);
- size += arity+1;
- ap += arity+1;
- erts_print(to, to_arg, " f(%X) ", ap[0]);
+ Eterm *bigp = (Eterm *) ap;
+ int arity = thing_arityval(*bigp);
+ print_big(to, to_arg, bigp);
+ size += TermWords(arity+1);
+ ap += TermWords(arity+1);
+ erts_print(to, to_arg, " f(" HEXF ") ", ap[0]);
ap++;
size++;
}
@@ -541,8 +581,8 @@ print_big(int to, void *to_arg, Eterm* addr)
erts_print(to, to_arg, "-#integer(%d) = {", i);
else
erts_print(to, to_arg, "#integer(%d) = {", i);
- erts_print(to, to_arg, "%d", BIG_DIGIT(addr, 0));
+ erts_print(to, to_arg, "0x%x", BIG_DIGIT(addr, 0));
for (k = 1; k < i; k++)
- erts_print(to, to_arg, ",%d", BIG_DIGIT(addr, k));
+ erts_print(to, to_arg, ",0x%x", BIG_DIGIT(addr, k));
erts_print(to, to_arg, "}");
}
diff --git a/erts/emulator/beam/beam_emu.c b/erts/emulator/beam/beam_emu.c
index 2f7f48193d..8a0e12dd4f 100644
--- a/erts/emulator/beam/beam_emu.c
+++ b/erts/emulator/beam/beam_emu.c
@@ -117,6 +117,7 @@ do { \
#endif
#define GET_BIF_ADDRESS(p) ((BifFunction) (((Export *) p)->code[4]))
+#define TermWords(t) (((t) / (sizeof(BeamInstr)/sizeof(Eterm))) + !!((t) % (sizeof(BeamInstr)/sizeof(Eterm))))
/*
@@ -138,8 +139,8 @@ do { \
#define VALID_INSTR(IP) (0 <= (int)(IP) && ((int)(IP) < (NUMBER_OF_OPCODES*2+10)))
#else
#define VALID_INSTR(IP) \
- ((Sint)LabelAddr(emulator_loop) <= (Sint)(IP) && \
- (Sint)(IP) < (Sint)LabelAddr(end_emulator_loop))
+ ((SWord)LabelAddr(emulator_loop) <= (SWord)(IP) && \
+ (SWord)(IP) < (SWord)LabelAddr(end_emulator_loop))
#endif /* NO_JUMP_TABLE */
#define SET_CP(p, ip) \
@@ -181,11 +182,11 @@ do { \
#define StoreBifResult(Dst, Result) \
do { \
- Eterm* stb_next; \
+ BeamInstr* stb_next; \
Eterm stb_reg; \
stb_reg = Arg(Dst); \
I += (Dst) + 2; \
- stb_next = (Eterm *) *I; \
+ stb_next = (BeamInstr *) *I; \
CHECK_TERM(Result); \
switch (beam_reg_tag(stb_reg)) { \
case R_REG_DEF: \
@@ -205,7 +206,7 @@ do { \
c_p->cp = 0; \
} while(0)
-#define RESTORE_CP(X) SET_CP(c_p, cp_val(*(X)))
+#define RESTORE_CP(X) SET_CP(c_p, (BeamInstr *) cp_val(*(X)))
#define ISCATCHEND(instr) ((Eterm *) *(instr) == OpCode(catch_end_y))
@@ -213,13 +214,13 @@ do { \
* Special Beam instructions.
*/
-Eterm beam_apply[2];
-Eterm beam_exit[1];
-Eterm beam_continue_exit[1];
+BeamInstr beam_apply[2];
+BeamInstr beam_exit[1];
+BeamInstr beam_continue_exit[1];
-Eterm* em_call_error_handler;
-Eterm* em_apply_bif;
-Eterm* em_call_traced_function;
+BeamInstr* em_call_error_handler;
+BeamInstr* em_apply_bif;
+BeamInstr* em_call_traced_function;
/* NOTE These should be the only variables containing trace instructions.
@@ -227,9 +228,10 @@ Eterm* em_call_traced_function;
** for the refering variable (one of these), and rouge references
** will most likely cause chaos.
*/
-Eterm beam_return_to_trace[1]; /* OpCode(i_return_to_trace) */
-Eterm beam_return_trace[1]; /* OpCode(i_return_trace) */
-Eterm beam_exception_trace[1]; /* UGLY also OpCode(i_return_trace) */
+BeamInstr beam_return_to_trace[1]; /* OpCode(i_return_to_trace) */
+BeamInstr beam_return_trace[1]; /* OpCode(i_return_trace) */
+BeamInstr beam_exception_trace[1]; /* UGLY also OpCode(i_return_trace) */
+BeamInstr beam_return_time_trace[1]; /* OpCode(i_return_time_trace) */
/*
* All Beam instructions in numerical order.
@@ -522,8 +524,8 @@ extern int count_instructions;
#define DispatchMacro() \
do { \
- Eterm* dis_next; \
- dis_next = (Eterm *) *I; \
+ BeamInstr* dis_next; \
+ dis_next = (BeamInstr *) *I; \
CHECK_ARGS(I); \
if (FCALLS > 0 || FCALLS > neg_o_reds) { \
FCALLS--; \
@@ -535,8 +537,8 @@ extern int count_instructions;
#define DispatchMacroFun() \
do { \
- Eterm* dis_next; \
- dis_next = (Eterm *) *I; \
+ BeamInstr* dis_next; \
+ dis_next = (BeamInstr *) *I; \
CHECK_ARGS(I); \
if (FCALLS > 0 || FCALLS > neg_o_reds) { \
FCALLS--; \
@@ -590,7 +592,7 @@ extern int count_instructions;
ASSERT(VALID_INSTR(*I)); \
Goto(*I)
-#define PreFetch(N, Dst) do { Dst = (Eterm *) *(I + N + 1); } while (0)
+#define PreFetch(N, Dst) do { Dst = (BeamInstr *) *(I + N + 1); } while (0)
#define NextPF(N, Dst) \
I += N + 1; \
ASSERT(VALID_INSTR(Dst)); \
@@ -644,7 +646,7 @@ extern int count_instructions;
#define DeallocateReturn(Deallocate) \
do { \
int words_to_pop = (Deallocate); \
- SET_I(cp_val(*E)); \
+ SET_I((BeamInstr *) cp_val(*E)); \
E = ADD_BYTE_OFFSET(E, words_to_pop); \
CHECK_TERM(r(0)); \
Goto(*I); \
@@ -657,19 +659,19 @@ extern int count_instructions;
#define MoveCall(Src, Dest, CallDest, Size) \
(Dest) = (Src); \
SET_CP(c_p, I+Size+1); \
- SET_I((Eterm *) CallDest); \
+ SET_I((BeamInstr *) CallDest); \
Dispatch();
#define MoveCallLast(Src, Dest, CallDest, Deallocate) \
(Dest) = (Src); \
RESTORE_CP(E); \
E = ADD_BYTE_OFFSET(E, (Deallocate)); \
- SET_I((Eterm *) CallDest); \
+ SET_I((BeamInstr *) CallDest); \
Dispatch();
#define MoveCallOnly(Src, Dest, CallDest) \
(Dest) = (Src); \
- SET_I((Eterm *) CallDest); \
+ SET_I((BeamInstr *) CallDest); \
Dispatch();
#define GetList(Src, H, T) do { \
@@ -677,47 +679,48 @@ extern int count_instructions;
H = CAR(tmp_ptr); \
T = CDR(tmp_ptr); } while (0)
-#define GetTupleElement(Src, Element, Dest) \
- do { \
- tmp_arg1 = (Eterm) (((unsigned char *) tuple_val(Src)) + (Element)); \
- (Dest) = (*(Eterm *)tmp_arg1); \
+#define GetTupleElement(Src, Element, Dest) \
+ do { \
+ tmp_arg1 = (Eterm) COMPRESS_POINTER(((unsigned char *) tuple_val(Src)) + \
+ (Element)); \
+ (Dest) = (*(Eterm *) EXPAND_POINTER(tmp_arg1)); \
} while (0)
-#define ExtractNextElement(Dest) \
- tmp_arg1 += sizeof(Eterm); \
- (Dest) = (* (Eterm *) (((unsigned char *) tmp_arg1)))
+#define ExtractNextElement(Dest) \
+ tmp_arg1 += sizeof(Eterm); \
+ (Dest) = (* (Eterm *) (((unsigned char *) EXPAND_POINTER(tmp_arg1))))
-#define ExtractNextElement2(Dest) \
- do { \
- Eterm* ene_dstp = &(Dest); \
- ene_dstp[0] = ((Eterm *) tmp_arg1)[1]; \
- ene_dstp[1] = ((Eterm *) tmp_arg1)[2]; \
- tmp_arg1 += sizeof(Eterm) + sizeof(Eterm); \
+#define ExtractNextElement2(Dest) \
+ do { \
+ Eterm* ene_dstp = &(Dest); \
+ ene_dstp[0] = ((Eterm *) EXPAND_POINTER(tmp_arg1))[1]; \
+ ene_dstp[1] = ((Eterm *) EXPAND_POINTER(tmp_arg1))[2]; \
+ tmp_arg1 += sizeof(Eterm) + sizeof(Eterm); \
} while (0)
#define ExtractNextElement3(Dest) \
do { \
Eterm* ene_dstp = &(Dest); \
- ene_dstp[0] = ((Eterm *) tmp_arg1)[1]; \
- ene_dstp[1] = ((Eterm *) tmp_arg1)[2]; \
- ene_dstp[2] = ((Eterm *) tmp_arg1)[3]; \
+ ene_dstp[0] = ((Eterm *) EXPAND_POINTER(tmp_arg1))[1]; \
+ ene_dstp[1] = ((Eterm *) EXPAND_POINTER(tmp_arg1))[2]; \
+ ene_dstp[2] = ((Eterm *) EXPAND_POINTER(tmp_arg1))[3]; \
tmp_arg1 += 3*sizeof(Eterm); \
} while (0)
#define ExtractNextElement4(Dest) \
do { \
Eterm* ene_dstp = &(Dest); \
- ene_dstp[0] = ((Eterm *) tmp_arg1)[1]; \
- ene_dstp[1] = ((Eterm *) tmp_arg1)[2]; \
- ene_dstp[2] = ((Eterm *) tmp_arg1)[3]; \
- ene_dstp[3] = ((Eterm *) tmp_arg1)[4]; \
+ ene_dstp[0] = ((Eterm *) EXPAND_POINTER(tmp_arg1))[1]; \
+ ene_dstp[1] = ((Eterm *) EXPAND_POINTER(tmp_arg1))[2]; \
+ ene_dstp[2] = ((Eterm *) EXPAND_POINTER(tmp_arg1))[3]; \
+ ene_dstp[3] = ((Eterm *) EXPAND_POINTER(tmp_arg1))[4]; \
tmp_arg1 += 4*sizeof(Eterm); \
} while (0)
#define ExtractElement(Element, Dest) \
do { \
tmp_arg1 += (Element); \
- (Dest) = (* (Eterm *) tmp_arg1); \
+ (Dest) = (* (Eterm *) EXPAND_POINTER(tmp_arg1)); \
} while (0)
#define PutTuple(Arity, Src, Dest) \
@@ -759,8 +762,13 @@ extern int count_instructions;
#define IsTuple(X, Action) if (is_not_tuple(X)) Action
-#define IsArity(Pointer, Arity, Fail) \
- if (*(Eterm *)(tmp_arg1 = (Eterm)tuple_val(Pointer)) != (Arity)) { Fail; }
+#define IsArity(Pointer, Arity, Fail) \
+ if (*(Eterm *) \
+ EXPAND_POINTER(tmp_arg1 = (Eterm) \
+ COMPRESS_POINTER(tuple_val(Pointer))) != (Arity)) \
+ { \
+ Fail; \
+ }
#define IsFunction(X, Action) \
do { \
@@ -776,11 +784,14 @@ extern int count_instructions;
} \
} while (0)
-#define IsTupleOfArity(Src, Arity, Fail) \
- do { \
- if (is_not_tuple(Src) || *(Eterm *)(tmp_arg1 = (Eterm) tuple_val(Src)) != Arity) { \
- Fail; \
- } \
+#define IsTupleOfArity(Src, Arity, Fail) \
+ do { \
+ if (is_not_tuple(Src) || \
+ *(Eterm *) \
+ EXPAND_POINTER(tmp_arg1 = \
+ (Eterm) COMPRESS_POINTER(tuple_val(Src))) != Arity) { \
+ Fail; \
+ } \
} while (0)
#define IsBoolean(X, Fail) if ((X) != am_true && (X) != am_false) { Fail; }
@@ -791,7 +802,7 @@ extern int count_instructions;
#define IsBitstring(Src, Fail) \
if (is_not_binary(Src)) { Fail; }
-#ifdef ARCH_64
+#if defined(ARCH_64) && !HALFWORD_HEAP
#define BsSafeMul(A, B, Fail, Target) \
do { Uint64 _res = (A) * (B); \
if (_res / B != A) { Fail; } \
@@ -974,33 +985,33 @@ extern int count_instructions;
#define IsRef(Src, Fail) if (is_not_ref(Src)) { Fail; }
static BifFunction translate_gc_bif(void* gcf);
-static Eterm* handle_error(Process* c_p, Eterm* pc, Eterm* reg, BifFunction bf);
-static Eterm* next_catch(Process* c_p, Eterm *reg);
+static BeamInstr* handle_error(Process* c_p, BeamInstr* pc, Eterm* reg, BifFunction bf);
+static BeamInstr* next_catch(Process* c_p, Eterm *reg);
static void terminate_proc(Process* c_p, Eterm Value);
static Eterm add_stacktrace(Process* c_p, Eterm Value, Eterm exc);
-static void save_stacktrace(Process* c_p, Eterm* pc, Eterm* reg,
+static void save_stacktrace(Process* c_p, BeamInstr* pc, Eterm* reg,
BifFunction bf, Eterm args);
static struct StackTrace * get_trace_from_exc(Eterm exc);
static Eterm make_arglist(Process* c_p, Eterm* reg, int a);
-static Eterm call_error_handler(Process* p, Eterm* ip, Eterm* reg);
-static Eterm call_breakpoint_handler(Process* p, Eterm* fi, Eterm* reg);
-static Uint* fixed_apply(Process* p, Eterm* reg, Uint arity);
-static Eterm* apply(Process* p, Eterm module, Eterm function,
+static Eterm call_error_handler(Process* p, BeamInstr* ip, Eterm* reg);
+static Eterm call_breakpoint_handler(Process* p, BeamInstr* fi, Eterm* reg);
+static BeamInstr* fixed_apply(Process* p, Eterm* reg, Uint arity);
+static BeamInstr* apply(Process* p, Eterm module, Eterm function,
Eterm args, Eterm* reg);
static int hibernate(Process* c_p, Eterm module, Eterm function,
Eterm args, Eterm* reg);
-static Eterm* call_fun(Process* p, int arity, Eterm* reg, Eterm args);
-static Eterm* apply_fun(Process* p, Eterm fun, Eterm args, Eterm* reg);
+static BeamInstr* call_fun(Process* p, int arity, Eterm* reg, Eterm args);
+static BeamInstr* apply_fun(Process* p, Eterm fun, Eterm args, Eterm* reg);
static Eterm new_fun(Process* p, Eterm* reg, ErlFunEntry* fe, int num_free);
-#if defined(_OSE_) || defined(VXWORKS)
+#if defined(VXWORKS)
static int init_done;
#endif
void
init_emulator(void)
{
-#if defined(_OSE_) || defined(VXWORKS)
+#if defined(VXWORKS)
init_done = 0;
#endif
process_main();
@@ -1039,7 +1050,7 @@ init_emulator(void)
*/
void process_main(void)
{
-#if !defined(_OSE_) && !defined(VXWORKS)
+#if !defined(VXWORKS)
static int init_done = 0;
#endif
Process* c_p = NULL;
@@ -1078,7 +1089,7 @@ void process_main(void)
/*
* Pointer to next threaded instruction.
*/
- register Eterm *I REG_I = NULL;
+ register BeamInstr *I REG_I = NULL;
/* Number of reductions left. This function
* returns to the scheduler when FCALLS reaches zero.
@@ -1090,9 +1101,14 @@ void process_main(void)
*/
register Eterm tmp_arg1 REG_tmp_arg1 = NIL;
register Eterm tmp_arg2 REG_tmp_arg2 = NIL;
- Eterm tmp_big[2]; /* Temporary buffer for small bignums. */
+#if HEAP_ON_C_STACK
+ Eterm tmp_big[2]; /* Temporary buffer for small bignums if HEAP_ON_C_STACK. */
+#else
+ Eterm *tmp_big; /* Temporary buffer for small bignums if !HEAP_ON_C_STACK. */
+#endif
#ifndef ERTS_SMP
+#if !HALFWORD_HEAP
static Eterm save_reg[ERTS_X_REGS_ALLOCATED];
/* X registers -- not used directly, but
* through 'reg', because using it directly
@@ -1100,7 +1116,7 @@ void process_main(void)
* while using it through reg needs only
* one.
*/
-
+#endif
/*
* Floating point registers.
*/
@@ -1141,13 +1157,17 @@ void process_main(void)
* Note: c_p->arity must be set to reflect the number of useful terms in
* c_p->arg_reg before calling the scheduler.
*/
-
if (!init_done) {
init_done = 1;
goto init_emulator;
}
#ifndef ERTS_SMP
+#if !HALFWORD_HEAP
reg = save_reg; /* XXX: probably wastes a register on x86 */
+#else
+ /* Registers need to be heap allocated (correct memory range) for tracing to work */
+ reg = erts_alloc(ERTS_ALC_T_BEAM_REGISTER, ERTS_X_REGS_ALLOCATED * sizeof(Eterm));
+#endif
#endif
c_p = NULL;
reds_used = 0;
@@ -1168,11 +1188,14 @@ void process_main(void)
reg = c_p->scheduler_data->save_reg;
freg = c_p->scheduler_data->freg;
#endif
+#if !HEAP_ON_C_STACK
+ tmp_big = ERTS_PROC_GET_SCHDATA(c_p)->beam_emu_tmp_heap;
+#endif
ERL_BITS_RELOAD_STATEP(c_p);
{
int reds;
Eterm* argp;
- Eterm* next;
+ BeamInstr *next;
int i;
argp = c_p->arg_reg;
@@ -1199,7 +1222,7 @@ void process_main(void)
FCALLS = REDS_IN(c_p) = reds;
}
- next = (Eterm *) *I;
+ next = (BeamInstr *) *I;
r(0) = c_p->arg_reg[0];
#ifdef HARDDEBUG
if (c_p->arity > 0) {
@@ -1291,7 +1314,7 @@ void process_main(void)
}
/* FALL THROUGH */
OpCase(i_call_only_f): {
- SET_I((Eterm *) Arg(0));
+ SET_I((BeamInstr *) Arg(0));
Dispatch();
}
@@ -1302,7 +1325,7 @@ void process_main(void)
OpCase(i_call_last_fP): {
RESTORE_CP(E);
E = ADD_BYTE_OFFSET(E, Arg(1));
- SET_I((Eterm *) Arg(0));
+ SET_I((BeamInstr *) Arg(0));
Dispatch();
}
@@ -1313,7 +1336,7 @@ void process_main(void)
/* FALL THROUGH */
OpCase(i_call_f): {
SET_CP(c_p, I+2);
- SET_I((Eterm *) Arg(0));
+ SET_I((BeamInstr *) Arg(0));
Dispatch();
}
@@ -1349,7 +1372,7 @@ void process_main(void)
Dispatchx();
OpCase(init_y): {
- Eterm* next;
+ BeamInstr *next;
PreFetch(1, next);
make_blank(yb(Arg(0)));
@@ -1357,7 +1380,7 @@ void process_main(void)
}
OpCase(i_trim_I): {
- Eterm* next;
+ BeamInstr *next;
Uint words;
Uint cp;
@@ -1383,7 +1406,7 @@ void process_main(void)
}
OpCase(test_heap_1_put_list_Iy): {
- Eterm* next;
+ BeamInstr *next;
PreFetch(2, next);
TestHeap(Arg(0), 1);
@@ -1392,20 +1415,6 @@ void process_main(void)
NextPF(2, next);
}
- OpCase(put_string_IId):
- {
- unsigned char* s;
- int len;
- Eterm result;
-
- len = Arg(0); /* Length. */
- result = NIL;
- for (s = (unsigned char *) Arg(1); len > 0; s--, len--) {
- PutList(make_small(*s), result, result, StoreSimpleDest);
- }
- StoreBifResult(2, result);
- }
-
/*
* Send is almost a standard call-BIF with two arguments, except for:
* 1) It cannot be traced.
@@ -1414,7 +1423,7 @@ void process_main(void)
*/
OpCase(send): {
- Eterm* next;
+ BeamInstr *next;
Eterm result;
PRE_BIF_SWAPOUT(c_p);
@@ -1429,7 +1438,7 @@ void process_main(void)
NextPF(0, next);
} else if (c_p->freason == TRAP) {
SET_CP(c_p, I+1);
- SET_I((Eterm *) c_p->def_arg_reg[3]);
+ SET_I(*((BeamInstr **) (BeamInstr) ((c_p)->def_arg_reg + 3)));
SWAPIN;
r(0) = c_p->def_arg_reg[0];
x(1) = c_p->def_arg_reg[1];
@@ -1531,6 +1540,10 @@ void process_main(void)
/*
* Skeleton for receive statement:
*
+ * recv_mark L1 Optional
+ * call make_ref/monitor Optional
+ * ...
+ * recv_set L1 Optional
* L1: <-------------------+
* <-----------+ |
* | |
@@ -1549,13 +1562,41 @@ void process_main(void)
*
*/
+ OpCase(recv_mark_f): {
+ /*
+ * Save the current position in message buffer and the
+ * the label for the loop_rec/2 instruction for the
+ * the receive statement.
+ */
+ c_p->msg.mark = (BeamInstr *) Arg(0);
+ c_p->msg.saved_last = c_p->msg.last;
+ Next(1);
+ }
+
+ OpCase(i_recv_set): {
+ /*
+ * If the mark is valid (points to the loop_rec/2
+ * instruction that follows), we know that the saved
+ * position points to the first message that could
+ * possibly be matched out.
+ *
+ * If the mark is invalid, we do nothing, meaning that
+ * we will look through all messages in the message queue.
+ */
+ if (c_p->msg.mark == (BeamInstr *) (I+1)) {
+ c_p->msg.save = c_p->msg.saved_last;
+ }
+ I++;
+ /* Fall through to the loop_rec/2 instruction */
+ }
+
/*
* Pick up the next message and place it in x(0).
* If no message, jump to a wait or wait_timeout instruction.
*/
OpCase(i_loop_rec_fr):
{
- Eterm* next;
+ BeamInstr *next;
ErlMessage* msgp;
loop_rec__:
@@ -1579,7 +1620,7 @@ void process_main(void)
erts_smp_proc_unlock(c_p, ERTS_PROC_LOCKS_MSG_RECEIVE);
else {
#endif
- SET_I((Eterm *) Arg(0));
+ SET_I((BeamInstr *) Arg(0));
Goto(*I); /* Jump to a wait or wait_timeout instruction */
#ifdef ERTS_SMP
}
@@ -1615,7 +1656,7 @@ void process_main(void)
* Remove a (matched) message from the message queue.
*/
OpCase(remove_message): {
- Eterm* next;
+ BeamInstr *next;
ErlMessage* msgp;
PROCESS_MAIN_CHK_LOCKS(c_p);
@@ -1660,7 +1701,7 @@ void process_main(void)
* message didn't match), then jump to the loop_rec instruction.
*/
OpCase(loop_rec_end_f): {
- SET_I((Eterm *) Arg(0));
+ SET_I((BeamInstr *) Arg(0));
SAVE_MESSAGE(c_p);
goto loop_rec__;
}
@@ -1690,12 +1731,12 @@ void process_main(void)
}
GetArg1(1, timeout_value);
if (timeout_value != make_small(0)) {
-#if !defined(ARCH_64)
+#if !defined(ARCH_64) || HALFWORD_HEAP
Uint time_val;
#endif
if (is_small(timeout_value) && signed_val(timeout_value) > 0 &&
-#if defined(ARCH_64)
+#if defined(ARCH_64) && !HALFWORD_HEAP
((unsigned_val(timeout_value) >> 32) == 0)
#else
1
@@ -1706,14 +1747,16 @@ void process_main(void)
* c_p->def_arg_reg[0]. Note that it is safe to use this
* location because there are no living x registers in
* a receive statement.
+ * Note that for the halfword emulator, the two first elements
+ * of the array are used.
*/
- c_p->def_arg_reg[0] = (Eterm) (I+3);
+ *((BeamInstr **) (UWord) c_p->def_arg_reg) = I+3;
set_timer(c_p, unsigned_val(timeout_value));
} else if (timeout_value == am_infinity) {
c_p->flags |= F_TIMO;
-#if !defined(ARCH_64)
+#if !defined(ARCH_64) || HALFWORD_HEAP
} else if (term_to_Uint(timeout_value, &time_val)) {
- c_p->def_arg_reg[0] = (Eterm) (I+3);
+ *((BeamInstr **) (UWord) c_p->def_arg_reg) = I+3;
set_timer(c_p, time_val);
#endif
} else { /* Wrong time */
@@ -1742,7 +1785,7 @@ void process_main(void)
wait2: {
ASSERT(!ERTS_PROC_IS_EXITING(c_p));
- c_p->i = (Eterm *) Arg(0); /* L1 */
+ c_p->i = (BeamInstr *) Arg(0); /* L1 */
SWAPOUT;
c_p->arity = 0;
c_p->status = P_WAITING;
@@ -1770,7 +1813,7 @@ void process_main(void)
* we must test the F_INSLPQUEUE flag as well as the F_TIMO flag.
*/
if ((c_p->flags & (F_INSLPQUEUE | F_TIMO)) == 0) {
- c_p->def_arg_reg[0] = (Eterm) (I+3);
+ *((BeamInstr **) (UWord) c_p->def_arg_reg) = I+3;
set_timer(c_p, Arg(1));
}
goto wait2;
@@ -1785,7 +1828,7 @@ void process_main(void)
}
OpCase(timeout): {
- Eterm* next;
+ BeamInstr *next;
PreFetch(0, next);
if (IS_TRACED_FL(c_p, F_TRACE_RECEIVE)) {
@@ -1805,8 +1848,8 @@ void process_main(void)
do_binary_search:
{
struct Pairs {
- Eterm val;
- Eterm* addr;
+ BeamInstr val;
+ BeamInstr* addr;
};
struct Pairs* low;
struct Pairs* high;
@@ -1846,7 +1889,7 @@ void process_main(void)
Goto(*I);
}
}
- SET_I((Eterm *) Arg(1));
+ SET_I((BeamInstr *) Arg(1));
Goto(*I);
}
@@ -1858,11 +1901,11 @@ void process_main(void)
if (is_small(index)) {
index = signed_val(index);
if (index < Arg(2)) {
- SET_I((Eterm *) (&Arg(3))[index]);
+ SET_I((BeamInstr *) (&Arg(3))[index]);
Goto(*I);
}
}
- SET_I((Eterm *) Arg(1));
+ SET_I((BeamInstr *) Arg(1));
Goto(*I);
}
@@ -1874,11 +1917,11 @@ void process_main(void)
if (is_small(index)) {
index = (Uint) (signed_val(index) - Arg(3));
if (index < Arg(2)) {
- SET_I((Eterm *) (&Arg(4))[index]);
+ SET_I((BeamInstr *) (&Arg(4))[index]);
Goto(*I);
}
}
- SET_I((Eterm *) Arg(1));
+ SET_I((BeamInstr *) Arg(1));
Goto(*I);
}
@@ -1915,7 +1958,7 @@ void process_main(void)
if (is_value(result)) {
StoreBifResult(3, result);
}
- SET_I((Eterm *) Arg(0));
+ SET_I((BeamInstr *) Arg(0));
Goto(*I);
}
@@ -1955,7 +1998,7 @@ void process_main(void)
GcBifFunction bf;
Eterm arg;
Eterm result;
- Uint live = Arg(3);
+ Uint live = (Uint) Arg(3);
GetArg1(2, arg);
reg[0] = r(0);
@@ -1976,7 +2019,7 @@ void process_main(void)
StoreBifResult(4, result);
}
if (Arg(0) != 0) {
- SET_I((Eterm *) Arg(0));
+ SET_I((BeamInstr *) Arg(0));
Goto(*I);
}
reg[0] = arg;
@@ -1984,6 +2027,81 @@ void process_main(void)
goto post_error_handling;
}
+ OpCase(i_gc_bif2_jIId): /* Note, one less parameter than the i_gc_bif1
+ and i_gc_bif3 */
+ {
+ typedef Eterm (*GcBifFunction)(Process*, Eterm*, Uint);
+ GcBifFunction bf;
+ Eterm result;
+ Uint live = (Uint) Arg(2);
+
+ reg[0] = r(0);
+ reg[live++] = tmp_arg1;
+ reg[live] = tmp_arg2;
+ bf = (GcBifFunction) Arg(1);
+ c_p->fcalls = FCALLS;
+ SWAPOUT;
+ PROCESS_MAIN_CHK_LOCKS(c_p);
+ ERTS_SMP_UNREQ_PROC_MAIN_LOCK(c_p);
+ result = (*bf)(c_p, reg, live);
+ ERTS_SMP_REQ_PROC_MAIN_LOCK(c_p);
+ PROCESS_MAIN_CHK_LOCKS(c_p);
+ SWAPIN;
+ r(0) = reg[0];
+ ERTS_HOLE_CHECK(c_p);
+ FCALLS = c_p->fcalls;
+ if (is_value(result)) {
+ StoreBifResult(3, result);
+ }
+ if (Arg(0) != 0) {
+ SET_I((BeamInstr *) Arg(0));
+ Goto(*I);
+ }
+ reg[0] = tmp_arg1;
+ reg[1] = tmp_arg2;
+ I = handle_error(c_p, I, reg, translate_gc_bif((void *) bf));
+ goto post_error_handling;
+ }
+
+ OpCase(i_gc_bif3_jIsId):
+ {
+ typedef Eterm (*GcBifFunction)(Process*, Eterm*, Uint);
+ GcBifFunction bf;
+ Eterm arg;
+ Eterm result;
+ Uint live = (Uint) Arg(3);
+
+ GetArg1(2, arg);
+ reg[0] = r(0);
+ reg[live++] = arg;
+ reg[live++] = tmp_arg1;
+ reg[live] = tmp_arg2;
+ bf = (GcBifFunction) Arg(1);
+ c_p->fcalls = FCALLS;
+ SWAPOUT;
+ PROCESS_MAIN_CHK_LOCKS(c_p);
+ ERTS_SMP_UNREQ_PROC_MAIN_LOCK(c_p);
+ result = (*bf)(c_p, reg, live);
+ ERTS_SMP_REQ_PROC_MAIN_LOCK(c_p);
+ PROCESS_MAIN_CHK_LOCKS(c_p);
+ SWAPIN;
+ r(0) = reg[0];
+ ERTS_HOLE_CHECK(c_p);
+ FCALLS = c_p->fcalls;
+ if (is_value(result)) {
+ StoreBifResult(4, result);
+ }
+ if (Arg(0) != 0) {
+ SET_I((BeamInstr *) Arg(0));
+ Goto(*I);
+ }
+ reg[0] = arg;
+ reg[1] = tmp_arg1;
+ reg[2] = tmp_arg2;
+ I = handle_error(c_p, I, reg, translate_gc_bif((void *) bf));
+ goto post_error_handling;
+ }
+
/*
* Guards bifs and, or, xor in guards.
*/
@@ -2004,7 +2122,7 @@ void process_main(void)
if (is_value(result)) {
StoreBifResult(2, result);
}
- SET_I((Eterm *) Arg(0));
+ SET_I((BeamInstr *) Arg(0));
Goto(*I);
}
@@ -2040,7 +2158,7 @@ void process_main(void)
*/
OpCase(call_bif0_e):
{
- Eterm (*bf)(Process*, Uint*) = GET_BIF_ADDRESS(Arg(0));
+ Eterm (*bf)(Process*, BeamInstr*) = GET_BIF_ADDRESS(Arg(0));
PRE_BIF_SWAPOUT(c_p);
c_p->fcalls = FCALLS - 1;
@@ -2073,9 +2191,9 @@ void process_main(void)
OpCase(call_bif1_e):
{
- Eterm (*bf)(Process*, Eterm, Uint*) = GET_BIF_ADDRESS(Arg(0));
+ Eterm (*bf)(Process*, Eterm, BeamInstr*) = GET_BIF_ADDRESS(Arg(0));
Eterm result;
- Eterm* next;
+ BeamInstr *next;
c_p->fcalls = FCALLS - 1;
if (FCALLS <= 0) {
@@ -2108,9 +2226,9 @@ void process_main(void)
OpCase(call_bif2_e):
{
- Eterm (*bf)(Process*, Eterm, Eterm, Uint*) = GET_BIF_ADDRESS(Arg(0));
+ Eterm (*bf)(Process*, Eterm, Eterm, BeamInstr*) = GET_BIF_ADDRESS(Arg(0));
Eterm result;
- Eterm* next;
+ BeamInstr *next;
PRE_BIF_SWAPOUT(c_p);
c_p->fcalls = FCALLS - 1;
@@ -2145,9 +2263,9 @@ void process_main(void)
OpCase(call_bif3_e):
{
- Eterm (*bf)(Process*, Eterm, Eterm, Eterm, Uint*) = GET_BIF_ADDRESS(Arg(0));
+ Eterm (*bf)(Process*, Eterm, Eterm, Eterm, BeamInstr*) = GET_BIF_ADDRESS(Arg(0));
Eterm result;
- Eterm* next;
+ BeamInstr *next;
PRE_BIF_SWAPOUT(c_p);
c_p->fcalls = FCALLS - 1;
@@ -2168,7 +2286,7 @@ void process_main(void)
} else if (c_p->freason == TRAP) {
call_bif_trap3:
SET_CP(c_p, I+2);
- SET_I((Eterm *)c_p->def_arg_reg[3]);
+ SET_I(*((BeamInstr **) (UWord) ((c_p)->def_arg_reg + 3)));
SWAPIN;
r(0) = c_p->def_arg_reg[0];
x(1) = c_p->def_arg_reg[1];
@@ -2276,7 +2394,7 @@ void process_main(void)
lb_Cl_error: {
if (Arg(0) != 0) {
OpCase(jump_f): {
- SET_I((Eterm *) Arg(0));
+ SET_I((BeamInstr *) Arg(0));
Goto(*I);
}
}
@@ -2468,7 +2586,7 @@ void process_main(void)
goto lb_Cl_error;
OpCase(i_apply): {
- Eterm* next;
+ BeamInstr *next;
SWAPOUT;
next = apply(c_p, r(0), x(1), x(2), reg);
SWAPIN;
@@ -2483,13 +2601,13 @@ void process_main(void)
}
OpCase(i_apply_last_P): {
- Eterm* next;
+ BeamInstr *next;
SWAPOUT;
next = apply(c_p, r(0), x(1), x(2), reg);
SWAPIN;
if (next != NULL) {
r(0) = reg[0];
- SET_CP(c_p, (Eterm *) E[0]);
+ SET_CP(c_p, (BeamInstr *) EXPAND_POINTER(E[0]));
E = ADD_BYTE_OFFSET(E, Arg(0));
SET_I(next);
Dispatch();
@@ -2499,7 +2617,7 @@ void process_main(void)
}
OpCase(i_apply_only): {
- Eterm* next;
+ BeamInstr *next;
SWAPOUT;
next = apply(c_p, r(0), x(1), x(2), reg);
SWAPIN;
@@ -2513,7 +2631,7 @@ void process_main(void)
}
OpCase(apply_I): {
- Eterm* next;
+ BeamInstr *next;
reg[0] = r(0);
SWAPOUT;
@@ -2530,7 +2648,7 @@ void process_main(void)
}
OpCase(apply_last_IP): {
- Eterm* next;
+ BeamInstr *next;
reg[0] = r(0);
SWAPOUT;
@@ -2538,7 +2656,7 @@ void process_main(void)
SWAPIN;
if (next != NULL) {
r(0) = reg[0];
- SET_CP(c_p, (Eterm *) E[0]);
+ SET_CP(c_p, (BeamInstr *) EXPAND_POINTER(E[0]));
E = ADD_BYTE_OFFSET(E, Arg(1));
SET_I(next);
Dispatch();
@@ -2548,7 +2666,7 @@ void process_main(void)
}
OpCase(i_apply_fun): {
- Eterm* next;
+ BeamInstr *next;
SWAPOUT;
next = apply_fun(c_p, r(0), x(1), reg);
@@ -2563,14 +2681,14 @@ void process_main(void)
}
OpCase(i_apply_fun_last_P): {
- Eterm* next;
+ BeamInstr *next;
SWAPOUT;
next = apply_fun(c_p, r(0), x(1), reg);
SWAPIN;
if (next != NULL) {
r(0) = reg[0];
- SET_CP(c_p, (Eterm *) E[0]);
+ SET_CP(c_p, (BeamInstr *) EXPAND_POINTER(E[0]));
E = ADD_BYTE_OFFSET(E, Arg(0));
SET_I(next);
Dispatchfun();
@@ -2579,7 +2697,7 @@ void process_main(void)
}
OpCase(i_apply_fun_only): {
- Eterm* next;
+ BeamInstr *next;
SWAPOUT;
next = apply_fun(c_p, r(0), x(1), reg);
@@ -2593,10 +2711,11 @@ void process_main(void)
}
OpCase(i_call_fun_I): {
- Eterm* next;
+ BeamInstr *next;
SWAPOUT;
reg[0] = r(0);
+
next = call_fun(c_p, Arg(0), reg, THE_NON_VALUE);
SWAPIN;
if (next != NULL) {
@@ -2609,7 +2728,7 @@ void process_main(void)
}
OpCase(i_call_fun_last_IP): {
- Eterm* next;
+ BeamInstr *next;
SWAPOUT;
reg[0] = r(0);
@@ -2617,7 +2736,7 @@ void process_main(void)
SWAPIN;
if (next != NULL) {
r(0) = reg[0];
- SET_CP(c_p, (Eterm *) E[0]);
+ SET_CP(c_p, (BeamInstr *) EXPAND_POINTER(E[0]));
E = ADD_BYTE_OFFSET(E, Arg(1));
SET_I(next);
Dispatchfun();
@@ -2722,7 +2841,7 @@ void process_main(void)
tmp_arg1 = *tuple_val(tmp_arg1);
goto do_binary_search;
}
- SET_I((Eterm *) Arg(1));
+ SET_I((BeamInstr *) Arg(1));
Goto(*I);
}
@@ -2748,16 +2867,18 @@ void process_main(void)
given = big_val(tmp_arg1);
given_arity = given[0];
given_size = thing_arityval(given_arity);
- bigp = &Arg(2);
+ bigp = (Eterm *) &Arg(2);
while ((arity = bigp[0]) > given_arity) {
- bigp += thing_arityval(arity) + 2;
+ bigp += (TermWords(thing_arityval(arity) + 1) + 1) * (sizeof(BeamInstr)/sizeof(Eterm));
}
while (bigp[0] == given_arity) {
if (memcmp(bigp+1, given+1, sizeof(Eterm)*given_size) == 0) {
- SET_I((Eterm *) bigp[given_size+1]);
+ BeamInstr *tmp =
+ ((BeamInstr *) (UWord) bigp) + TermWords(given_size + 1);
+ SET_I((BeamInstr *) *tmp);
Goto(*I);
}
- bigp += thing_arityval(arity) + 2;
+ bigp += (TermWords(thing_arityval(arity) + 1) + 1) * (sizeof(BeamInstr)/sizeof(Eterm));
}
}
@@ -2765,18 +2886,18 @@ void process_main(void)
* Failed.
*/
- SET_I((Eterm *) Arg(1));
+ SET_I((BeamInstr *) Arg(1));
Goto(*I);
}
-#ifdef ARCH_64
+#if defined(ARCH_64) && !HALFWORD_HEAP
OpCase(i_select_float_sfI):
{
Uint f;
int n;
struct ValLabel {
Uint f;
- Eterm* addr;
+ BeamInstr* addr;
};
struct ValLabel* ptr;
@@ -2804,7 +2925,7 @@ void process_main(void)
struct ValLabel {
Uint fpart1;
Uint fpart2;
- Eterm* addr;
+ BeamInstr* addr;
};
struct ValLabel* ptr;
@@ -2822,7 +2943,7 @@ void process_main(void)
}
ptr++;
}
- SET_I((Eterm *) Arg(1));
+ SET_I((BeamInstr *) Arg(1));
Goto(*I);
}
#endif
@@ -2830,7 +2951,7 @@ void process_main(void)
OpCase(set_tuple_element_sdP): {
Eterm element;
Eterm tuple;
- Eterm* next;
+ BeamInstr *next;
Eterm* p;
PreFetch(3, next);
@@ -3025,7 +3146,7 @@ void process_main(void)
switch (tmp_arg2) {
case 3:
{
- Eterm (*bf)(Process*, Eterm, Eterm, Eterm, Uint*) = vbf;
+ Eterm (*bf)(Process*, Eterm, Eterm, Eterm, BeamInstr*) = vbf;
ASSERT(!ERTS_PROC_IS_EXITING(c_p));
tmp_arg1 = (*bf)(c_p, r(0), x(1), x(2), I);
ASSERT(!ERTS_PROC_IS_EXITING(c_p) || is_non_value(tmp_arg1));
@@ -3034,7 +3155,7 @@ void process_main(void)
break;
case 2:
{
- Eterm (*bf)(Process*, Eterm, Eterm, Uint*) = vbf;
+ Eterm (*bf)(Process*, Eterm, Eterm, BeamInstr*) = vbf;
ASSERT(!ERTS_PROC_IS_EXITING(c_p));
tmp_arg1 = (*bf)(c_p, r(0), x(1), I);
ASSERT(!ERTS_PROC_IS_EXITING(c_p) || is_non_value(tmp_arg1));
@@ -3043,7 +3164,7 @@ void process_main(void)
break;
case 1:
{
- Eterm (*bf)(Process*, Eterm, Uint*) = vbf;
+ Eterm (*bf)(Process*, Eterm, BeamInstr*) = vbf;
ASSERT(!ERTS_PROC_IS_EXITING(c_p));
tmp_arg1 = (*bf)(c_p, r(0), I);
ASSERT(!ERTS_PROC_IS_EXITING(c_p) || is_non_value(tmp_arg1));
@@ -3052,7 +3173,7 @@ void process_main(void)
break;
case 0:
{
- Eterm (*bf)(Process*, Uint*) = vbf;
+ Eterm (*bf)(Process*, BeamInstr*) = vbf;
ASSERT(!ERTS_PROC_IS_EXITING(c_p));
tmp_arg1 = (*bf)(c_p, I);
ASSERT(!ERTS_PROC_IS_EXITING(c_p) || is_non_value(tmp_arg1));
@@ -3076,7 +3197,7 @@ apply_bif_or_nif_epilogue:
SET_I(c_p->cp);
Goto(*I);
} else if (c_p->freason == TRAP) {
- SET_I((Eterm *)c_p->def_arg_reg[3]);
+ SET_I(*((BeamInstr **) (UWord) ((c_p)->def_arg_reg + 3)));
r(0) = c_p->def_arg_reg[0];
x(1) = c_p->def_arg_reg[1];
x(2) = c_p->def_arg_reg[2];
@@ -3097,12 +3218,6 @@ apply_bif_or_nif_epilogue:
StoreBifResult(1, result);
}
- OpCase(i_put_tuple_only_Ad): {
- tmp_arg1 = make_tuple(HTOP);
- *HTOP++ = Arg(0);
- StoreBifResult(1, tmp_arg1);
- }
-
OpCase(case_end_s):
GetArg1(0, tmp_arg1);
c_p->fvalue = tmp_arg1;
@@ -3270,12 +3385,12 @@ apply_bif_or_nif_epilogue:
HTOP += PROC_BIN_SIZE;
pb->thing_word = HEADER_PROC_BIN;
pb->size = num_bytes;
- pb->next = MSO(c_p).mso;
- MSO(c_p).mso = pb;
+ pb->next = MSO(c_p).first;
+ MSO(c_p).first = (struct erl_off_heap_header*) pb;
pb->val = bptr;
pb->bytes = (byte*) bptr->orig_bytes;
pb->flags = 0;
- MSO(c_p).overhead += pb->size / sizeof(Eterm);
+ OH_OVERHEAD(&(MSO(c_p)), pb->size / sizeof(Eterm));
new_binary = make_binary(pb);
goto do_bits_sub_bin;
}
@@ -3371,13 +3486,13 @@ apply_bif_or_nif_epilogue:
HTOP += PROC_BIN_SIZE;
pb->thing_word = HEADER_PROC_BIN;
pb->size = tmp_arg1;
- pb->next = MSO(c_p).mso;
- MSO(c_p).mso = pb;
+ pb->next = MSO(c_p).first;
+ MSO(c_p).first = (struct erl_off_heap_header*) pb;
pb->val = bptr;
pb->bytes = (byte*) bptr->orig_bytes;
pb->flags = 0;
- MSO(c_p).overhead += tmp_arg1 / sizeof(Eterm);
+ OH_OVERHEAD(&(MSO(c_p)), tmp_arg1 / sizeof(Eterm));
StoreBifResult(2, make_binary(pb));
}
@@ -3413,42 +3528,6 @@ apply_bif_or_nif_epilogue:
}
}
- OpCase(i_bs_bits_to_bytes_rjd): {
- tmp_arg1 = r(0);
- goto do_bits_to_bytes;
- }
-
- OpCase(i_bs_bits_to_bytes_yjd): {
- tmp_arg1 = yb(Arg(0));
- I++;
- goto do_bits_to_bytes;
-
- OpCase(i_bs_bits_to_bytes_xjd): {
- tmp_arg1 = xb(Arg(0));
- I++;
- }
-
- do_bits_to_bytes:
- {
- if (is_valid_bit_size(tmp_arg1)) {
- tmp_arg1 = make_small(unsigned_val(tmp_arg1) >> 3);
- } else {
- Uint bytes;
- if (!term_to_Uint(tmp_arg1, &bytes)) {
- goto badarg;
- }
- tmp_arg1 = bytes;
- if ((tmp_arg1 & 0x07) != 0) {
- goto badarg;
- }
- SWAPOUT;
- tmp_arg1 = erts_make_integer(tmp_arg1 >> 3, c_p);
- HTOP = HEAP_TOP(c_p);
- }
- StoreBifResult(1, tmp_arg1);
- }
- }
-
OpCase(i_bs_add_jId): {
Uint Unit = Arg(1);
if (is_both_small(tmp_arg1, tmp_arg2)) {
@@ -3486,7 +3565,7 @@ apply_bif_or_nif_epilogue:
/*
* Now we know that one of the arguments is
- * not at small. We must convert both arguments
+ * not a small. We must convert both arguments
* to Uints and check for errors at the same time.
*
* Error checking is tricky.
@@ -3534,7 +3613,7 @@ apply_bif_or_nif_epilogue:
OpCase(bs_put_string_II):
{
- Eterm* next;
+ BeamInstr *next;
PreFetch(2, next);
erts_new_bs_put_string(ERL_BITS_ARGS_2((byte *) Arg(1), Arg(0)));
NextPF(2, next);
@@ -3705,7 +3784,7 @@ apply_bif_or_nif_epilogue:
{
Eterm header;
- Eterm* next;
+ BeamInstr *next;
Uint slots;
OpCase(i_bs_start_match2_rfIId): {
@@ -3771,7 +3850,7 @@ apply_bif_or_nif_epilogue:
}
OpCase(bs_test_zero_tail2_fr): {
- Eterm* next;
+ BeamInstr *next;
ErlBinMatchBuffer *_mb;
PreFetch(1, next);
@@ -3783,7 +3862,7 @@ apply_bif_or_nif_epilogue:
}
OpCase(bs_test_zero_tail2_fx): {
- Eterm* next;
+ BeamInstr *next;
ErlBinMatchBuffer *_mb;
PreFetch(2, next);
@@ -3795,7 +3874,7 @@ apply_bif_or_nif_epilogue:
}
OpCase(bs_test_tail_imm2_frI): {
- Eterm* next;
+ BeamInstr *next;
ErlBinMatchBuffer *_mb;
PreFetch(2, next);
_mb = ms_matchbuffer(r(0));
@@ -3805,7 +3884,7 @@ apply_bif_or_nif_epilogue:
NextPF(2, next);
}
OpCase(bs_test_tail_imm2_fxI): {
- Eterm* next;
+ BeamInstr *next;
ErlBinMatchBuffer *_mb;
PreFetch(3, next);
_mb = ms_matchbuffer(xb(Arg(1)));
@@ -3816,7 +3895,7 @@ apply_bif_or_nif_epilogue:
}
OpCase(bs_test_unit_frI): {
- Eterm* next;
+ BeamInstr *next;
ErlBinMatchBuffer *_mb;
PreFetch(2, next);
_mb = ms_matchbuffer(r(0));
@@ -3826,7 +3905,7 @@ apply_bif_or_nif_epilogue:
NextPF(2, next);
}
OpCase(bs_test_unit_fxI): {
- Eterm* next;
+ BeamInstr *next;
ErlBinMatchBuffer *_mb;
PreFetch(3, next);
_mb = ms_matchbuffer(xb(Arg(1)));
@@ -3837,7 +3916,7 @@ apply_bif_or_nif_epilogue:
}
OpCase(bs_test_unit8_fr): {
- Eterm* next;
+ BeamInstr *next;
ErlBinMatchBuffer *_mb;
PreFetch(1, next);
_mb = ms_matchbuffer(r(0));
@@ -3847,7 +3926,7 @@ apply_bif_or_nif_epilogue:
NextPF(1, next);
}
OpCase(bs_test_unit8_fx): {
- Eterm* next;
+ BeamInstr *next;
ErlBinMatchBuffer *_mb;
PreFetch(2, next);
_mb = ms_matchbuffer(xb(Arg(1)));
@@ -3931,11 +4010,11 @@ apply_bif_or_nif_epilogue:
_integer = get_int32(_mb->base + _mb->offset/8);
}
_mb->offset += 32;
-#ifndef ARCH_64
+#if !defined(ARCH_64) || HALFWORD_HEAP
if (IS_USMALL(0, _integer)) {
#endif
_result = make_small(_integer);
-#ifndef ARCH_64
+#if !defined(ARCH_64) || HALFWORD_HEAP
} else {
TestHeap(BIG_UINT_HEAP_SIZE, Arg(1));
_result = uint_to_big((Uint) _integer, HTOP);
@@ -4172,7 +4251,7 @@ apply_bif_or_nif_epilogue:
do_bs_match_string:
{
- Eterm* next;
+ BeamInstr *next;
byte* bytes;
Uint bits;
ErlBinMatchBuffer* mb;
@@ -4199,7 +4278,7 @@ apply_bif_or_nif_epilogue:
}
OpCase(i_bs_save2_rI): {
- Eterm* next;
+ BeamInstr *next;
ErlBinMatchState *_ms;
PreFetch(1, next);
_ms = (ErlBinMatchState*) boxed_val((Eterm) r(0));
@@ -4207,7 +4286,7 @@ apply_bif_or_nif_epilogue:
NextPF(1, next);
}
OpCase(i_bs_save2_xI): {
- Eterm* next;
+ BeamInstr *next;
ErlBinMatchState *_ms;
PreFetch(2, next);
_ms = (ErlBinMatchState*) boxed_val((Eterm) xb(Arg(0)));
@@ -4216,7 +4295,7 @@ apply_bif_or_nif_epilogue:
}
OpCase(i_bs_restore2_rI): {
- Eterm* next;
+ BeamInstr *next;
ErlBinMatchState *_ms;
PreFetch(1, next);
_ms = (ErlBinMatchState*) boxed_val((Eterm) r(0));
@@ -4224,7 +4303,7 @@ apply_bif_or_nif_epilogue:
NextPF(1, next);
}
OpCase(i_bs_restore2_xI): {
- Eterm* next;
+ BeamInstr *next;
ErlBinMatchState *_ms;
PreFetch(2, next);
_ms = (ErlBinMatchState*) boxed_val((Eterm) xb(Arg(0)));
@@ -4241,7 +4320,7 @@ apply_bif_or_nif_epilogue:
* deallocate not followed by a return, and that should work.
*/
OpCase(deallocate_I): {
- Eterm* next;
+ BeamInstr *next;
PreFetch(1, next);
D(Arg(0));
@@ -4264,7 +4343,7 @@ apply_bif_or_nif_epilogue:
*/
OpCase(call_traced_function): {
if (IS_TRACED_FL(c_p, F_TRACE_CALLS)) {
- unsigned offset = offsetof(Export, code) + 3*sizeof(Eterm);
+ unsigned offset = offsetof(Export, code) + 3*sizeof(BeamInstr);
Export* ep = (Export *) (((char *)I)-offset);
Uint32 flags;
@@ -4291,26 +4370,25 @@ apply_bif_or_nif_epilogue:
}
E -= 3;
ASSERT(c_p->htop <= E && E <= c_p->hend);
- ASSERT(is_CP((Eterm)(ep->code)));
+ ASSERT(is_CP((BeamInstr)(ep->code)));
ASSERT(is_internal_pid(c_p->tracer_proc) ||
is_internal_port(c_p->tracer_proc));
- E[2] = make_cp(c_p->cp);
+ E[2] = make_cp(c_p->cp); /* Code in lower range on halfword */
E[1] = am_true; /* Process tracer */
E[0] = make_cp(ep->code);
- c_p->cp = (Eterm*)
- make_cp(flags & MATCH_SET_EXCEPTION_TRACE
- ? beam_exception_trace : beam_return_trace);
+ c_p->cp = (flags & MATCH_SET_EXCEPTION_TRACE)
+ ? beam_exception_trace : beam_return_trace;
erts_smp_proc_lock(c_p, ERTS_PROC_LOCKS_ALL_MINOR);
c_p->trace_flags |= F_EXCEPTION_TRACE;
erts_smp_proc_unlock(c_p, ERTS_PROC_LOCKS_ALL_MINOR);
}
}
- SET_I((Uint *) Arg(0));
+ SET_I((BeamInstr *)Arg(0));
Dispatch();
}
OpCase(return_trace): {
- Uint* code = (Uint *) E[0];
+ BeamInstr* code = (BeamInstr *) (UWord) E[0];
SWAPOUT; /* Needed for shared heap */
ERTS_SMP_UNREQ_PROC_MAIN_LOCK(c_p);
@@ -4318,44 +4396,117 @@ apply_bif_or_nif_epilogue:
ERTS_SMP_REQ_PROC_MAIN_LOCK(c_p);
SWAPIN;
c_p->cp = NULL;
- SET_I((Eterm *) E[2]);
+ SET_I((BeamInstr *) cp_val(E[2]));
E += 3;
Goto(*I);
}
OpCase(i_count_breakpoint): {
- Uint real_I;
+ BeamInstr real_I;
- ErtsCountBreak((Uint *) I, &real_I);
+ ErtsCountBreak(c_p, (BeamInstr *) I, &real_I);
+ ASSERT(VALID_INSTR(real_I));
+ Goto(real_I);
+ }
+
+ /* need to send mfa instead of bdt pointer
+ * the pointer might be deallocated.
+ */
+
+ OpCase(i_time_breakpoint): {
+ BeamInstr real_I;
+ BpData **bds = (BpData **) (I)[-4];
+ BpDataTime *bdt = NULL;
+ Uint ix = 0;
+#ifdef ERTS_SMP
+ ix = c_p->scheduler_data->no - 1;
+#else
+ ix = 0;
+#endif
+ bdt = (BpDataTime *)bds[ix];
+
+ ASSERT((I)[-5] == (BeamInstr) BeamOp(op_i_func_info_IaaI));
+ ASSERT(bdt);
+ bdt = (BpDataTime *) bdt->next;
+ ASSERT(bdt);
+ bds[ix] = (BpData *) bdt;
+ real_I = bdt->orig_instr;
ASSERT(VALID_INSTR(real_I));
+
+ if (IS_TRACED_FL(c_p, F_TRACE_CALLS) && !(bdt->pause)) {
+ if ( (*(c_p->cp) == (BeamInstr) OpCode(i_return_time_trace)) ||
+ (*(c_p->cp) == (BeamInstr) OpCode(return_trace)) ||
+ (*(c_p->cp) == (BeamInstr) OpCode(i_return_to_trace))) {
+ /* This _IS_ a tail recursive call */
+ SWAPOUT;
+ erts_trace_time_break(c_p, I, bdt, ERTS_BP_CALL_TIME_TAIL_CALL);
+ SWAPIN;
+ } else {
+ SWAPOUT;
+ erts_trace_time_break(c_p, I, bdt, ERTS_BP_CALL_TIME_CALL);
+
+ /* r register needs to be copied to the array
+ * for the garbage collector
+ */
+ ASSERT(c_p->htop <= E && E <= c_p->hend);
+ if (E - 2 < HTOP) {
+ reg[0] = r(0);
+ PROCESS_MAIN_CHK_LOCKS(c_p);
+ FCALLS -= erts_garbage_collect(c_p, 2, reg, I[-1]);
+ PROCESS_MAIN_CHK_LOCKS(c_p);
+ r(0) = reg[0];
+ }
+ SWAPIN;
+
+ ASSERT(c_p->htop <= E && E <= c_p->hend);
+
+ E -= 2;
+ E[0] = make_cp(I);
+ E[1] = make_cp(c_p->cp); /* original return address */
+ c_p->cp = beam_return_time_trace;
+ }
+ }
+
Goto(real_I);
}
+ OpCase(i_return_time_trace): {
+ BeamInstr *pc = (BeamInstr *) (UWord) E[0];
+ SWAPOUT;
+ erts_trace_time_break(c_p, pc, NULL, ERTS_BP_CALL_TIME_RETURN);
+ SWAPIN;
+ c_p->cp = NULL;
+ SET_I((BeamInstr *) cp_val(E[1]));
+ E += 2;
+ Goto(*I);
+ }
+
OpCase(i_trace_breakpoint):
if (! IS_TRACED_FL(c_p, F_TRACE_CALLS)) {
- Uint real_I;
+ BeamInstr real_I;
- ErtsBreakSkip((Uint *) I, &real_I);
+ ErtsBreakSkip(c_p, (BeamInstr *) I, &real_I);
Goto(real_I);
}
/* Fall through to next case */
OpCase(i_mtrace_breakpoint): {
- Uint real_I;
+ BeamInstr real_I;
Uint32 flags;
Eterm tracer_pid;
- Uint *cpp;
+ Uint* cpp;
int return_to_trace = 0, need = 0;
flags = 0;
SWAPOUT;
reg[0] = r(0);
- if (*cp_val((Eterm)c_p->cp)
- == (Uint) OpCode(return_trace)) {
- cpp = (Uint*)&E[2];
- } else if (*cp_val((Eterm)c_p->cp)
- == (Uint) OpCode(i_return_to_trace)) {
+ if (*(c_p->cp) == (BeamInstr) OpCode(return_trace)) {
+ cpp = &E[2];
+ } else if (*(c_p->cp) == (BeamInstr) OpCode(i_return_to_trace)) {
return_to_trace = !0;
- cpp = (Uint*)&E[0];
+ cpp = &E[0];
+ } else if (*(c_p->cp) == (BeamInstr) OpCode(i_return_time_trace)) {
+ return_to_trace = !0;
+ cpp = &E[0];
} else {
cpp = NULL;
}
@@ -4364,19 +4515,21 @@ apply_bif_or_nif_epilogue:
* return_trace and/or i_return_to_trace stackframes
* on the stack, they are not intermixed with y registers
*/
- Eterm *cp_save = c_p->cp;
+ BeamInstr *cp_save = c_p->cp;
for (;;) {
ASSERT(is_CP(*cpp));
- if (*cp_val(*cpp) == (Uint) OpCode(return_trace)) {
+ if (*cp_val(*cpp) == (BeamInstr) OpCode(return_trace)) {
cpp += 3;
- } else if (*cp_val(*cpp) == (Uint) OpCode(i_return_to_trace)) {
+ } else if (*cp_val(*cpp) == (BeamInstr) OpCode(i_return_to_trace)) {
return_to_trace = !0;
cpp += 1;
+ } else if (*cp_val(*cpp) == (BeamInstr) OpCode(i_return_time_trace)) {
+ cpp += 2;
} else
break;
}
- c_p->cp = (Eterm *) *cpp;
- ASSERT(is_CP((Eterm)c_p->cp));
+ c_p->cp = (BeamInstr *) cp_val(*cpp);
+ ASSERT(is_CP(*cpp));
ERTS_SMP_UNREQ_PROC_MAIN_LOCK(c_p);
real_I = erts_trace_break(c_p, I, reg, &flags, &tracer_pid);
ERTS_SMP_REQ_PROC_MAIN_LOCK(c_p);
@@ -4412,12 +4565,12 @@ apply_bif_or_nif_epilogue:
E -= 1;
ASSERT(c_p->htop <= E && E <= c_p->hend);
E[0] = make_cp(c_p->cp);
- c_p->cp = (Eterm *) make_cp(beam_return_to_trace);
+ c_p->cp = (BeamInstr *) beam_return_to_trace;
}
if (flags & MATCH_SET_RX_TRACE) {
E -= 3;
ASSERT(c_p->htop <= E && E <= c_p->hend);
- ASSERT(is_CP((Eterm) (I - 3)));
+ ASSERT(is_CP((Eterm) (UWord) (I - 3)));
ASSERT(am_true == tracer_pid ||
is_internal_pid(tracer_pid) || is_internal_port(tracer_pid));
E[2] = make_cp(c_p->cp);
@@ -4425,9 +4578,9 @@ apply_bif_or_nif_epilogue:
E[0] = make_cp(I - 3); /* We ARE at the beginning of an
instruction,
the funcinfo is above i. */
- c_p->cp = (Eterm*)
- make_cp(flags & MATCH_SET_EXCEPTION_TRACE
- ? beam_exception_trace : beam_return_trace);
+ c_p->cp =
+ (flags & MATCH_SET_EXCEPTION_TRACE)
+ ? beam_exception_trace : beam_return_trace;
erts_smp_proc_lock(c_p, ERTS_PROC_LOCKS_ALL_MINOR);
c_p->trace_flags |= F_EXCEPTION_TRACE;
erts_smp_proc_unlock(c_p, ERTS_PROC_LOCKS_ALL_MINOR);
@@ -4440,10 +4593,10 @@ apply_bif_or_nif_epilogue:
Uint *cpp = (Uint*) E;
for(;;) {
ASSERT(is_CP(*cpp));
- if (*cp_val(*cpp) == (Uint) OpCode(return_trace)) {
+ if (*cp_val(*cpp) == (BeamInstr) OpCode(return_trace)) {
do ++cpp; while(is_not_CP(*cpp));
cpp += 2;
- } else if (*cp_val(*cpp) == (Uint) OpCode(i_return_to_trace)) {
+ } else if (*cp_val(*cpp) == (BeamInstr) OpCode(i_return_to_trace)) {
do ++cpp; while(is_not_CP(*cpp));
} else break;
}
@@ -4454,7 +4607,7 @@ apply_bif_or_nif_epilogue:
SWAPIN;
}
c_p->cp = NULL;
- SET_I((Eterm *) E[0]);
+ SET_I((BeamInstr *) cp_val(E[0]));
E += 1;
Goto(*I);
}
@@ -4465,7 +4618,7 @@ apply_bif_or_nif_epilogue:
OpCase(i_global_cons):
{
- Eterm *next;
+ BeamInstr *next;
#ifdef HYBRID
Eterm *hp;
@@ -4487,7 +4640,7 @@ apply_bif_or_nif_epilogue:
OpCase(i_global_tuple):
{
- Eterm *next;
+ BeamInstr *next;
int len;
#ifdef HYBRID
Eterm list;
@@ -4522,7 +4675,7 @@ apply_bif_or_nif_epilogue:
OpCase(i_global_copy):
{
- Eterm *next;
+ BeamInstr *next;
PreFetch(0,next);
#ifdef HYBRID
if (!IS_CONST(r(0)))
@@ -4551,7 +4704,7 @@ apply_bif_or_nif_epilogue:
OpCase(fmove_ql): {
Eterm fr = Arg(1);
- Eterm* next;
+ BeamInstr *next;
PreFetch(2, next);
GET_DOUBLE(Arg(0), *(FloatDef*)ADD_BYTE_OFFSET(freg, fr));
@@ -4561,7 +4714,7 @@ apply_bif_or_nif_epilogue:
OpCase(fmove_dl): {
Eterm targ1;
Eterm fr = Arg(1);
- Eterm* next;
+ BeamInstr *next;
PreFetch(2, next);
GetR(0, targ1);
@@ -4582,7 +4735,7 @@ apply_bif_or_nif_epilogue:
OpCase(fconv_dl): {
Eterm targ1;
Eterm fr = Arg(1);
- Eterm* next;
+ BeamInstr *next;
GetR(0, targ1);
PreFetch(2, next);
@@ -4611,7 +4764,7 @@ apply_bif_or_nif_epilogue:
erl_exit(1, "fclearerror/i_fcheckerror without fpe signals (beam_emu)");
#else
OpCase(fclearerror): {
- Eterm* next;
+ BeamInstr *next;
PreFetch(0, next);
ERTS_FP_CHECK_INIT(c_p);
@@ -4619,7 +4772,7 @@ apply_bif_or_nif_epilogue:
}
OpCase(i_fcheckerror): {
- Eterm* next;
+ BeamInstr *next;
PreFetch(0, next);
ERTS_FP_ERROR(c_p, freg[0].fd, goto fbadarith);
@@ -4633,7 +4786,7 @@ apply_bif_or_nif_epilogue:
OpCase(i_fadd_lll): {
- Eterm* next;
+ BeamInstr *next;
PreFetch(3, next);
ERTS_FP_CHECK_INIT(c_p);
@@ -4642,7 +4795,7 @@ apply_bif_or_nif_epilogue:
NextPF(3, next);
}
OpCase(i_fsub_lll): {
- Eterm* next;
+ BeamInstr *next;
PreFetch(3, next);
ERTS_FP_CHECK_INIT(c_p);
@@ -4651,7 +4804,7 @@ apply_bif_or_nif_epilogue:
NextPF(3, next);
}
OpCase(i_fmul_lll): {
- Eterm* next;
+ BeamInstr *next;
PreFetch(3, next);
ERTS_FP_CHECK_INIT(c_p);
@@ -4660,7 +4813,7 @@ apply_bif_or_nif_epilogue:
NextPF(3, next);
}
OpCase(i_fdiv_lll): {
- Eterm* next;
+ BeamInstr *next;
PreFetch(3, next);
ERTS_FP_CHECK_INIT(c_p);
@@ -4669,7 +4822,7 @@ apply_bif_or_nif_epilogue:
NextPF(3, next);
}
OpCase(i_fnegate_ll): {
- Eterm* next;
+ BeamInstr *next;
PreFetch(2, next);
ERTS_FP_CHECK_INIT(c_p);
@@ -4736,7 +4889,7 @@ apply_bif_or_nif_epilogue:
neg_o_reds = -c_p->def_arg_reg[4];
FCALLS = c_p->fcalls;
SWAPIN;
- switch( c_p->def_arg_reg[3] ) {
+ switch( c_p->def_arg_reg[3] ) { /* Halfword wont work with hipe yet! */
case HIPE_MODE_SWITCH_RES_RETURN:
ASSERT(is_value(reg[0]));
MoveReturn(reg[0], r(0));
@@ -4748,7 +4901,7 @@ apply_bif_or_nif_epilogue:
/* This can be used to call any function value, but currently it's
only used to call closures referring to unloaded modules. */
{
- Eterm *next;
+ BeamInstr *next;
next = call_fun(c_p, c_p->arity - 1, reg, THE_NON_VALUE);
SWAPIN;
@@ -4881,13 +5034,15 @@ apply_bif_or_nif_epilogue:
em_call_error_handler = OpCode(call_error_handler);
em_call_traced_function = OpCode(call_traced_function);
em_apply_bif = OpCode(apply_bif);
- beam_apply[0] = (Eterm) OpCode(i_apply);
- beam_apply[1] = (Eterm) OpCode(normal_exit);
- beam_exit[0] = (Eterm) OpCode(error_action_code);
- beam_continue_exit[0] = (Eterm) OpCode(continue_exit);
- beam_return_to_trace[0] = (Eterm) OpCode(i_return_to_trace);
- beam_return_trace[0] = (Eterm) OpCode(return_trace);
- beam_exception_trace[0] = (Eterm) OpCode(return_trace); /* UGLY */
+
+ beam_apply[0] = (BeamInstr) OpCode(i_apply);
+ beam_apply[1] = (BeamInstr) OpCode(normal_exit);
+ beam_exit[0] = (BeamInstr) OpCode(error_action_code);
+ beam_continue_exit[0] = (BeamInstr) OpCode(continue_exit);
+ beam_return_to_trace[0] = (BeamInstr) OpCode(i_return_to_trace);
+ beam_return_trace[0] = (BeamInstr) OpCode(return_trace);
+ beam_exception_trace[0] = (BeamInstr) OpCode(return_trace); /* UGLY */
+ beam_return_time_trace[0] = (BeamInstr) OpCode(i_return_time_trace);
/*
* Enter all BIFs into the export table.
@@ -4897,8 +5052,10 @@ apply_bif_or_nif_epilogue:
bif_table[i].name,
bif_table[i].arity);
bif_export[i] = ep;
- ep->code[3] = (Eterm) OpCode(apply_bif);
- ep->code[4] = (Eterm) bif_table[i].f;
+ ep->code[3] = (BeamInstr) OpCode(apply_bif);
+ ep->code[4] = (BeamInstr) bif_table[i].f;
+ /* XXX: set func info for bifs */
+ ((BeamInstr*)ep->code + 3)[-5] = (BeamInstr) BeamOp(op_i_func_info_IaaI);
}
return;
@@ -4943,6 +5100,10 @@ translate_gc_bif(void* gcf)
return round_1;
} else if (gcf == erts_gc_trunc_1) {
return round_1;
+ } else if (gcf == erts_gc_binary_part_2) {
+ return binary_part_2;
+ } else if (gcf == erts_gc_binary_part_3) {
+ return binary_part_3;
} else {
erl_exit(1, "bad gc bif");
}
@@ -5001,8 +5162,8 @@ Eterm error_atom[NUMBER_EXIT_CODES] = {
* at the point of the original exception.
*/
-static Eterm*
-handle_error(Process* c_p, Eterm* pc, Eterm* reg, BifFunction bf)
+static BeamInstr*
+handle_error(Process* c_p, BeamInstr* pc, Eterm* reg, BifFunction bf)
{
Eterm* hp;
Eterm Value = c_p->fvalue;
@@ -5056,7 +5217,7 @@ handle_error(Process* c_p, Eterm* pc, Eterm* reg, BifFunction bf)
/* Find a handler or die */
if ((c_p->catches > 0 || IS_TRACED_FL(c_p, F_EXCEPTION_TRACE))
&& !(c_p->freason & EXF_PANIC)) {
- Eterm *new_pc;
+ BeamInstr *new_pc;
/* The Beam handler code (catch_end or try_end) checks reg[0]
for THE_NON_VALUE to see if the previous code finished
abnormally. If so, reg[1], reg[2] and reg[3] should hold the
@@ -5082,30 +5243,37 @@ handle_error(Process* c_p, Eterm* pc, Eterm* reg, BifFunction bf)
/*
* Find the nearest catch handler
*/
-static Eterm*
+static BeamInstr*
next_catch(Process* c_p, Eterm *reg) {
int active_catches = c_p->catches > 0;
int have_return_to_trace = 0;
Eterm *ptr, *prev, *return_to_trace_ptr = NULL;
- Uint i_return_trace = beam_return_trace[0];
- Uint i_return_to_trace = beam_return_to_trace[0];
+
+ BeamInstr i_return_trace = beam_return_trace[0];
+ BeamInstr i_return_to_trace = beam_return_to_trace[0];
+ BeamInstr i_return_time_trace = beam_return_time_trace[0];
+
ptr = prev = c_p->stop;
ASSERT(is_CP(*ptr));
ASSERT(ptr <= STACK_START(c_p));
if (ptr == STACK_START(c_p)) return NULL;
if ((is_not_CP(*ptr) || (*cp_val(*ptr) != i_return_trace &&
- *cp_val(*ptr) != i_return_to_trace))
+ *cp_val(*ptr) != i_return_to_trace &&
+ *cp_val(*ptr) != i_return_time_trace ))
&& c_p->cp) {
/* Can not follow cp here - code may be unloaded */
- Uint *cpp = cp_val((Eterm) c_p->cp);
+ BeamInstr *cpp = c_p->cp;
if (cpp == beam_exception_trace) {
- erts_trace_exception(c_p, (Eterm*) ptr[0],
+ erts_trace_exception(c_p, cp_val(ptr[0]),
reg[1], reg[2], ptr+1);
/* Skip return_trace parameters */
ptr += 2;
} else if (cpp == beam_return_trace) {
/* Skip return_trace parameters */
ptr += 2;
+ } else if (cpp == beam_return_time_trace) {
+ /* Skip return_trace parameters */
+ ptr += 1;
} else if (cpp == beam_return_to_trace) {
have_return_to_trace = !0; /* Record next cp */
}
@@ -5123,7 +5291,7 @@ next_catch(Process* c_p, Eterm *reg) {
if (is_catch(*ptr) && active_catches) goto found_catch;
}
if (cp_val(*prev) == beam_exception_trace) {
- erts_trace_exception(c_p, (Eterm*) ptr[0],
+ erts_trace_exception(c_p, cp_val(ptr[0]),
reg[1], reg[2], ptr+1);
}
/* Skip return_trace parameters */
@@ -5135,6 +5303,13 @@ next_catch(Process* c_p, Eterm *reg) {
}
have_return_to_trace = !0; /* Record next cp */
return_to_trace_ptr = NULL;
+ } else if (*cp_val(*prev) == i_return_time_trace) {
+ /* Skip stack frame variables */
+ while (++ptr, ptr < STACK_START(c_p) && is_not_CP(*ptr)) {
+ if (is_catch(*ptr) && active_catches) goto found_catch;
+ }
+ /* Skip return_trace parameters */
+ ptr += 1;
} else {
if (have_return_to_trace) {
/* Record this cp as possible return_to trace cp */
@@ -5252,7 +5427,7 @@ expand_error_value(Process* c_p, Uint freason, Eterm Value) {
*/
static void
-save_stacktrace(Process* c_p, Eterm* pc, Eterm* reg, BifFunction bf,
+save_stacktrace(Process* c_p, BeamInstr* pc, Eterm* reg, BifFunction bf,
Eterm args) {
struct StackTrace* s;
int sz;
@@ -5263,7 +5438,7 @@ save_stacktrace(Process* c_p, Eterm* pc, Eterm* reg, BifFunction bf,
}
/* Create a container for the exception data */
- sz = (offsetof(struct StackTrace, trace) + sizeof(Eterm)*depth
+ sz = (offsetof(struct StackTrace, trace) + sizeof(BeamInstr *)*depth
+ sizeof(Eterm) - 1) / sizeof(Eterm);
s = (struct StackTrace *) HAlloc(c_p, 1 + sz);
/* The following fields are inside the bignum */
@@ -5298,7 +5473,6 @@ save_stacktrace(Process* c_p, Eterm* pc, Eterm* reg, BifFunction bf,
ASSERT(c_p->current);
s->current = c_p->current;
a = s->current[2];
- ASSERT(s->current[2] <= 3);
}
/* Save first stack entry */
ASSERT(pc);
@@ -5350,9 +5524,10 @@ save_stacktrace(Process* c_p, Eterm* pc, Eterm* reg, BifFunction bf,
/* Save the actual stack trace */
if (depth > 0) {
- Eterm *ptr, *prev = s->depth ? s->trace[s->depth-1] : NULL;
- Uint i_return_trace = beam_return_trace[0];
- Uint i_return_to_trace = beam_return_to_trace[0];
+ Eterm *ptr;
+ BeamInstr *prev = s->depth ? s->trace[s->depth-1] : NULL;
+ BeamInstr i_return_trace = beam_return_trace[0];
+ BeamInstr i_return_to_trace = beam_return_to_trace[0];
/*
* Traverse the stack backwards and add all unique continuation
* pointers to the buffer, up to the maximum stack trace size.
@@ -5365,7 +5540,7 @@ save_stacktrace(Process* c_p, Eterm* pc, Eterm* reg, BifFunction bf,
*cp_val(*ptr) != i_return_to_trace))
&& c_p->cp) {
/* Can not follow cp here - code may be unloaded */
- Uint *cpp = cp_val((Eterm) c_p->cp);
+ BeamInstr *cpp = c_p->cp;
if (cpp == beam_exception_trace || cpp == beam_return_trace) {
/* Skip return_trace parameters */
ptr += 2;
@@ -5385,7 +5560,7 @@ save_stacktrace(Process* c_p, Eterm* pc, Eterm* reg, BifFunction bf,
/* Skip stack frame variables */
do ++ptr; while (is_not_CP(*ptr));
} else {
- Eterm *cp = (Eterm *)(*ptr);
+ BeamInstr *cp = cp_val(*ptr);
if (cp != prev) {
/* Record non-duplicates only */
prev = cp;
@@ -5457,9 +5632,12 @@ build_stacktrace(Process* c_p, Eterm exc) {
struct StackTrace* s;
Eterm args;
int depth;
- Eterm* current;
+ BeamInstr* current;
+#if HALFWORD_HEAP
+ BeamInstr current_buff[3];
+#endif
Eterm Where = NIL;
- Eterm* next_p = &Where;
+ Eterm *next_p = &Where;
if (! (s = get_trace_from_exc(exc))) {
return NIL;
@@ -5487,7 +5665,14 @@ build_stacktrace(Process* c_p, Eterm exc) {
* (e.g. spawn_link(erlang, abs, [1])).
*/
if (current == NULL) {
+#if HALFWORD_HEAP
+ current = current_buff;
+ current[0] = (BeamInstr) c_p->initial[0];
+ current[1] = (BeamInstr) c_p->initial[1];
+ current[2] = (BeamInstr) c_p->initial[2];
+#else
current = c_p->initial;
+#endif
args = am_true; /* Just in case */
} else {
args = get_args_from_exc(exc);
@@ -5523,7 +5708,7 @@ build_stacktrace(Process* c_p, Eterm exc) {
* Finally, we go through the saved continuation pointers.
*/
for (i = 0; i < depth; i++) {
- Eterm *fi = find_function_from_pc((Eterm *) s->trace[i]);
+ BeamInstr *fi = find_function_from_pc((BeamInstr *) s->trace[i]);
if (fi == NULL) continue;
mfa = TUPLE3(hp, fi[0], fi[1], make_small(fi[2]));
hp += 4;
@@ -5540,7 +5725,7 @@ build_stacktrace(Process* c_p, Eterm exc) {
static Eterm
-call_error_handler(Process* p, Eterm* fi, Eterm* reg)
+call_error_handler(Process* p, BeamInstr* fi, Eterm* reg)
{
Eterm* hp;
Export* ep;
@@ -5588,7 +5773,7 @@ call_error_handler(Process* p, Eterm* fi, Eterm* reg)
}
static Eterm
-call_breakpoint_handler(Process* p, Eterm* fi, Eterm* reg)
+call_breakpoint_handler(Process* p, BeamInstr* fi, Eterm* reg)
{
Eterm* hp;
Export* ep;
@@ -5681,7 +5866,7 @@ apply_setup_error_handler(Process* p, Eterm module, Eterm function, Uint arity,
return ep;
}
-static Uint*
+static BeamInstr*
apply(Process* p, Eterm module, Eterm function, Eterm args, Eterm* reg)
{
int arity;
@@ -5763,7 +5948,7 @@ apply(Process* p, Eterm module, Eterm function, Eterm args, Eterm* reg)
return ep->address;
}
-static Uint*
+static BeamInstr*
fixed_apply(Process* p, Eterm* reg, Uint arity)
{
Export* ep;
@@ -5869,7 +6054,7 @@ hibernate(Process* c_p, Eterm module, Eterm function, Eterm args, Eterm* reg)
c_p->stop = STACK_START(c_p);
c_p->catches = 0;
c_p->i = beam_apply;
- c_p->cp = (Eterm *) beam_apply+1;
+ c_p->cp = (BeamInstr *) beam_apply+1;
/*
* If there are no waiting messages, garbage collect and
@@ -5899,7 +6084,7 @@ hibernate(Process* c_p, Eterm module, Eterm function, Eterm args, Eterm* reg)
return 1;
}
-static Uint*
+static BeamInstr*
call_fun(Process* p, /* Current process. */
int arity, /* Number of arguments for Fun. */
Eterm* reg, /* Contents of registers. */
@@ -5919,7 +6104,7 @@ call_fun(Process* p, /* Current process. */
if (is_fun_header(hdr)) {
ErlFunThing* funp = (ErlFunThing *) fun_val(fun);
ErlFunEntry* fe;
- Eterm* code_ptr;
+ BeamInstr* code_ptr;
Eterm* var_ptr;
int actual_arity;
unsigned num_free;
@@ -6014,8 +6199,12 @@ call_fun(Process* p, /* Current process. */
}
}
} else if (is_export_header(hdr)) {
- Export* ep = (Export *) (export_val(fun))[1];
- int actual_arity = (int) ep->code[2];
+ Export *ep;
+ int actual_arity;
+
+ ep = *((Export **) (export_val(fun) + 1));
+ actual_arity = (int) ep->code[2];
+
if (arity == actual_arity) {
return ep->address;
} else {
@@ -6082,7 +6271,7 @@ call_fun(Process* p, /* Current process. */
}
}
-static Eterm*
+static BeamInstr*
apply_fun(Process* p, Eterm fun, Eterm args, Eterm* reg)
{
int arity;
@@ -6134,8 +6323,8 @@ new_fun(Process* p, Eterm* reg, ErlFunEntry* fe, int num_free)
erts_refc_inc(&fe->refc, 2);
funp->thing_word = HEADER_FUN;
#ifndef HYBRID /* FIND ME! */
- funp->next = MSO(p).funs;
- MSO(p).funs = funp;
+ funp->next = MSO(p).first;
+ MSO(p).first = (struct erl_off_heap_header*) funp;
#endif
funp->fe = fe;
funp->num_free = num_free;
@@ -6176,7 +6365,7 @@ erts_is_builtin(Eterm Mod, Eterm Name, int arity)
if ((ep = export_get(&e)) == NULL) {
return 0;
}
- return ep->address == ep->code+3 && (ep->code[3] == (Uint) em_apply_bif);
+ return ep->address == ep->code+3 && (ep->code[3] == (BeamInstr) em_apply_bif);
}
diff --git a/erts/emulator/beam/beam_load.c b/erts/emulator/beam/beam_load.c
index 99fab28dce..74777e4c26 100644
--- a/erts/emulator/beam/beam_load.c
+++ b/erts/emulator/beam/beam_load.c
@@ -1,19 +1,19 @@
/*
* %CopyrightBegin%
- *
- * Copyright Ericsson AB 1996-2009. All Rights Reserved.
- *
+ *
+ * Copyright Ericsson AB 1996-2010. All Rights Reserved.
+ *
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
* compliance with the License. You should have received a copy of the
* Erlang Public License along with this software. If not, it can be
* retrieved online at http://www.erlang.org/.
- *
+ *
* Software distributed under the License is distributed on an "AS IS"
* basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
* the License for the specific language governing rights and limitations
* under the License.
- *
+ *
* %CopyrightEnd%
*/
@@ -51,9 +51,9 @@ ErlDrvBinary* erts_gzinflate_buffer(char*, int);
#define EXPORTED 2
#ifdef NO_JUMP_TABLE
-# define BeamOpCode(Op) ((Uint)(Op))
+# define BeamOpCode(Op) ((BeamInstr)(Op))
#else
-# define BeamOpCode(Op) ((Eterm)beam_ops[Op])
+# define BeamOpCode(Op) ((BeamInstr)beam_ops[Op])
#endif
#if defined(WORDS_BIGENDIAN)
@@ -94,7 +94,7 @@ typedef struct {
typedef struct {
unsigned type; /* Type of operand. */
- Uint val; /* Value of operand. */
+ BeamInstr val; /* Value of operand. */
Uint bigarity; /* Arity for bignumbers (only). */
} GenOpArg;
@@ -142,7 +142,7 @@ typedef struct {
typedef struct {
Eterm function; /* Tagged atom for function. */
int arity; /* Arity. */
- Eterm* address; /* Address to function in code. */
+ BeamInstr* address; /* Address to function in code. */
} ExportEntry;
#define MakeIffId(a, b, c, d) \
@@ -274,13 +274,12 @@ typedef struct {
int num_functions; /* Number of functions in module. */
int num_labels; /* Number of labels. */
int code_buffer_size; /* Size of code buffer in words. */
- Eterm* code; /* Loaded code. */
+ BeamInstr* code; /* Loaded code. */
int ci; /* Current index into loaded code. */
Label* labels;
- Uint put_strings; /* Linked list of put_string instructions. */
- Uint new_bs_put_strings; /* Linked list of i_new_bs_put_string instructions. */
+ BeamInstr new_bs_put_strings; /* Linked list of i_new_bs_put_string instructions. */
StringPatch* string_patches; /* Linked list of position into string table to patch. */
- Uint catches; /* Linked list of catch_yf instructions. */
+ BeamInstr catches; /* Linked list of catch_yf instructions. */
unsigned loaded_size; /* Final size of code when loaded. */
byte mod_md5[16]; /* MD5 for module code. */
int may_load_nif; /* true if NIFs may later be loaded for this module */
@@ -341,7 +340,7 @@ typedef struct {
#define GetTagAndValue(Stp, Tag, Val) \
do { \
- Uint __w; \
+ BeamInstr __w; \
GetByte(Stp, __w); \
Tag = __w & 0x07; \
if ((__w & 0x08) == 0) { \
@@ -388,7 +387,7 @@ typedef struct {
goto load_error; \
} else { \
int __n = (N); \
- Uint __result = 0; \
+ BeamInstr __result = 0; \
Stp->file_left -= (unsigned) __n; \
while (__n-- > 0) { \
__result = __result << 8 | *Stp->file_p++; \
@@ -465,7 +464,7 @@ static int bin_load(Process *c_p, ErtsProcLocks c_p_locks,
static void init_state(LoaderState* stp);
static int insert_new_code(Process *c_p, ErtsProcLocks c_p_locks,
Eterm group_leader, Eterm module,
- Eterm* code, Uint size, Uint catches);
+ BeamInstr* code, Uint size, BeamInstr catches);
static int scan_iff_file(LoaderState* stp, Uint* chunk_types,
Uint num_types, Uint num_mandatory);
static int load_atom_table(LoaderState* stp);
@@ -487,9 +486,6 @@ static GenOp* const_select_val(LoaderState* stp, GenOpArg S, GenOpArg Fail,
GenOpArg Size, GenOpArg* Rest);
static GenOp* gen_func_info(LoaderState* stp, GenOpArg mod, GenOpArg Func,
GenOpArg arity, GenOpArg label);
-static GenOp*
-gen_guard_bif(LoaderState* stp, GenOpArg Fail, GenOpArg Live, GenOpArg Bif,
- GenOpArg Src, GenOpArg Dst);
static int freeze_code(LoaderState* stp);
@@ -499,8 +495,8 @@ static void load_printf(int line, LoaderState* context, char *fmt, ...);
static int transform_engine(LoaderState* st);
static void id_to_string(Uint id, char* s);
static void new_genop(LoaderState* stp);
-static int get_int_val(LoaderState* stp, Uint len_code, Uint* result);
-static int get_erlang_integer(LoaderState* stp, Uint len_code, Uint* result);
+static int get_int_val(LoaderState* stp, Uint len_code, BeamInstr* result);
+static int get_erlang_integer(LoaderState* stp, Uint len_code, BeamInstr* result);
static int new_label(LoaderState* stp);
static void new_literal_patch(LoaderState* stp, int pos);
static void new_string_patch(LoaderState* stp, int pos);
@@ -513,7 +509,7 @@ static Eterm compilation_info_for_module(Process* p, Eterm mod);
static Eterm native_addresses(Process* p, Eterm mod);
int patch_funentries(Eterm Patchlist);
int patch(Eterm Addresses, Uint fe);
-static int safe_mul(Uint a, Uint b, Uint* resp);
+static int safe_mul(UWord a, UWord b, UWord* resp);
static int must_swap_floats;
@@ -591,7 +587,18 @@ erts_load_module(Process *c_p,
}
return result;
}
-
+/* #define LOAD_MEMORY_HARD_DEBUG 1*/
+
+#if defined(LOAD_MEMORY_HARD_DEBUG) && defined(DEBUG)
+/* Requires allocators ERTS_ALLOC_UTIL_HARD_DEBUG also set in erl_alloc_util.h */
+extern void check_allocators(void);
+extern void check_allocated_block(Uint type, void *blk);
+#define CHKALLOC() check_allocators()
+#define CHKBLK(TYPE,BLK) if ((BLK) != NULL) check_allocated_block((TYPE),(BLK))
+#else
+#define CHKALLOC() /* nothing */
+#define CHKBLK(TYPE,BLK) /* nothing */
+#endif
static int
bin_load(Process *c_p, ErtsProcLocks c_p_locks,
@@ -608,6 +615,12 @@ bin_load(Process *c_p, ErtsProcLocks c_p_locks,
* Scan the IFF file.
*/
+#if defined(LOAD_MEMORY_HARD_DEBUG) && defined(DEBUG)
+ erts_fprintf(stderr,"Loading a module\n");
+#endif
+
+ CHKALLOC();
+ CHKBLK(ERTS_ALC_T_CODE,state.code);
state.file_name = "IFF header for Beam file";
state.file_p = bytes;
state.file_left = unloaded_size;
@@ -619,6 +632,7 @@ bin_load(Process *c_p, ErtsProcLocks c_p_locks,
* Read the header for the code chunk.
*/
+ CHKBLK(ERTS_ALC_T_CODE,state.code);
define_file(&state, "code chunk header", CODE_CHUNK);
if (!read_code_header(&state)) {
goto load_error;
@@ -628,6 +642,7 @@ bin_load(Process *c_p, ErtsProcLocks c_p_locks,
* Read the atom table.
*/
+ CHKBLK(ERTS_ALC_T_CODE,state.code);
define_file(&state, "atom table", ATOM_CHUNK);
if (!load_atom_table(&state)) {
goto load_error;
@@ -637,6 +652,7 @@ bin_load(Process *c_p, ErtsProcLocks c_p_locks,
* Read the import table.
*/
+ CHKBLK(ERTS_ALC_T_CODE,state.code);
define_file(&state, "import table", IMP_CHUNK);
if (!load_import_table(&state)) {
goto load_error;
@@ -646,6 +662,7 @@ bin_load(Process *c_p, ErtsProcLocks c_p_locks,
* Read the lambda (fun) table.
*/
+ CHKBLK(ERTS_ALC_T_CODE,state.code);
if (state.chunks[LAMBDA_CHUNK].size > 0) {
define_file(&state, "lambda (fun) table", LAMBDA_CHUNK);
if (!read_lambda_table(&state)) {
@@ -657,6 +674,7 @@ bin_load(Process *c_p, ErtsProcLocks c_p_locks,
* Read the literal table.
*/
+ CHKBLK(ERTS_ALC_T_CODE,state.code);
if (state.chunks[LITERAL_CHUNK].size > 0) {
define_file(&state, "literals table (constant pool)", LITERAL_CHUNK);
if (!read_literal_table(&state)) {
@@ -668,18 +686,25 @@ bin_load(Process *c_p, ErtsProcLocks c_p_locks,
* Load the code chunk.
*/
+ CHKBLK(ERTS_ALC_T_CODE,state.code);
state.file_name = "code chunk";
state.file_p = state.code_start;
state.file_left = state.code_size;
- if (!load_code(&state) || !freeze_code(&state)) {
+ if (!load_code(&state)) {
+ goto load_error;
+ }
+ CHKBLK(ERTS_ALC_T_CODE,state.code);
+ if (!freeze_code(&state)) {
goto load_error;
}
+
/*
* Read and validate the export table. (This must be done after
* loading the code, because it contains labels.)
*/
+ CHKBLK(ERTS_ALC_T_CODE,state.code);
define_file(&state, "export table", EXP_CHUNK);
if (!read_export_table(&state)) {
goto load_error;
@@ -690,16 +715,25 @@ bin_load(Process *c_p, ErtsProcLocks c_p_locks,
* exported and imported functions. This can't fail.
*/
+ CHKBLK(ERTS_ALC_T_CODE,state.code);
rval = insert_new_code(c_p, c_p_locks, state.group_leader, state.module,
state.code, state.loaded_size, state.catches);
if (rval < 0) {
goto load_error;
}
+ CHKBLK(ERTS_ALC_T_CODE,state.code);
final_touch(&state);
/*
* Loading succeded.
*/
+ CHKBLK(ERTS_ALC_T_CODE,state.code);
+#if defined(LOAD_MEMORY_HARD_DEBUG) && defined(DEBUG)
+ erts_fprintf(stderr,"Loaded %T\n",*modp);
+#if 0
+ debug_dump_code(state.code,state.ci);
+#endif
+#endif
rval = 0;
state.code = NULL; /* Prevent code from being freed. */
*modp = state.module;
@@ -791,7 +825,7 @@ init_state(LoaderState* stp)
static int
insert_new_code(Process *c_p, ErtsProcLocks c_p_locks,
- Eterm group_leader, Eterm module, Eterm* code, Uint size, Uint catches)
+ Eterm group_leader, Eterm module, BeamInstr* code, Uint size, BeamInstr catches)
{
Module* modp;
int rval;
@@ -833,7 +867,7 @@ insert_new_code(Process *c_p, ErtsProcLocks c_p_locks,
modules[i] = modules[i-1];
}
modules[i].start = code;
- modules[i].end = (Eterm *) (((byte *)code) + size);
+ modules[i].end = (BeamInstr *) (((byte *)code) + size);
num_loaded_modules++;
mid_module = &modules[num_loaded_modules/2];
return 0;
@@ -1083,7 +1117,7 @@ load_import_table(LoaderState* stp)
* the BIF function.
*/
if ((e = erts_find_export_entry(mod, func, arity)) != NULL) {
- if (e->code[3] == (Uint) em_apply_bif) {
+ if (e->code[3] == (BeamInstr) em_apply_bif) {
stp->import[i].bf = (BifFunction) e->code[4];
if (func == am_load_nif && mod == am_erlang && arity == 2) {
stp->may_load_nif = 1;
@@ -1151,7 +1185,7 @@ read_export_table(LoaderState* stp)
* redefine).
*/
if ((e = erts_find_export_entry(stp->module, func, arity)) != NULL) {
- if (e->code[3] == (Uint) em_apply_bif) {
+ if (e->code[3] == (BeamInstr) em_apply_bif) {
int j;
for (j = 0; j < sizeof(allow_redef)/sizeof(allow_redef[0]); j++) {
@@ -1220,7 +1254,7 @@ static int
read_literal_table(LoaderState* stp)
{
int i;
- Uint uncompressed_sz;
+ BeamInstr uncompressed_sz;
byte* uncompressed = 0;
GetInt(stp, 4, uncompressed_sz);
@@ -1338,8 +1372,8 @@ read_code_header(LoaderState* stp)
* Initialize code area.
*/
stp->code_buffer_size = erts_next_heap_size(2048 + stp->num_functions, 0);
- stp->code = (Eterm*) erts_alloc(ERTS_ALC_T_CODE,
- sizeof(Eterm) * stp->code_buffer_size);
+ stp->code = (BeamInstr *) erts_alloc(ERTS_ALC_T_CODE,
+ sizeof(BeamInstr) * stp->code_buffer_size);
stp->code[MI_NUM_FUNCTIONS] = stp->num_functions;
stp->ci = MI_FUNCTIONS + stp->num_functions + 1;
@@ -1350,7 +1384,6 @@ read_code_header(LoaderState* stp)
stp->code[MI_COMPILE_SIZE_ON_HEAP] = 0;
stp->code[MI_NUM_BREAKPOINTS] = 0;
- stp->put_strings = 0;
stp->new_bs_put_strings = 0;
stp->catches = 0;
return 1;
@@ -1365,16 +1398,18 @@ read_code_header(LoaderState* stp)
LoadError2(Stp, "bad tag %d; expected %d", Actual, Expected); \
} else {}
-#define Need(w) \
- ASSERT(ci <= code_buffer_size); \
- if (code_buffer_size < ci+(w)) { \
- code_buffer_size = erts_next_heap_size(ci+(w), 0); \
- stp->code = code \
- = (Eterm *) erts_realloc(ERTS_ALC_T_CODE, \
- (void *) code, \
- code_buffer_size * sizeof(Eterm)); \
- }
+#define CodeNeed(w) do { \
+ ASSERT(ci <= code_buffer_size); \
+ if (code_buffer_size < ci+(w)) { \
+ code_buffer_size = erts_next_heap_size(ci+(w), 0); \
+ stp->code = code \
+ = (BeamInstr *) erts_realloc(ERTS_ALC_T_CODE, \
+ (void *) code, \
+ code_buffer_size * sizeof(BeamInstr)); \
+ } \
+} while (0)
+#define TermWords(t) (((t) / (sizeof(BeamInstr)/sizeof(Eterm))) + !!((t) % (sizeof(BeamInstr)/sizeof(Eterm))))
static int
@@ -1387,7 +1422,7 @@ load_code(LoaderState* stp)
char* sign;
int arg; /* Number of current argument. */
int num_specific; /* Number of specific ops for current. */
- Eterm* code;
+ BeamInstr* code;
int code_buffer_size;
int specific;
Uint last_label = 0; /* Number of last label. */
@@ -1446,7 +1481,7 @@ load_code(LoaderState* stp)
if (((First) & 0x08) == 0) { \
Val = (First) >> 4; \
} else if (((First) & 0x10) == 0) { \
- Uint __w; \
+ BeamInstr __w; \
GetByte(Stp, __w); \
Val = (((First) >> 5) << 8) | __w; \
} else { \
@@ -1455,7 +1490,7 @@ load_code(LoaderState* stp)
} while (0)
for (arg = 0; arg < arity; arg++) {
- Uint first;
+ BeamInstr first;
GetByte(stp, first);
last_op->a[arg].type = first & 0x07;
@@ -1464,7 +1499,7 @@ load_code(LoaderState* stp)
if ((first & 0x08) == 0) {
last_op->a[arg].val = first >> 4;
} else if ((first & 0x10) == 0) {
- Uint w;
+ BeamInstr w;
GetByte(stp, w);
ASSERT(first < 0x800);
last_op->a[arg].val = ((first >> 5) << 8) | w;
@@ -1523,7 +1558,7 @@ load_code(LoaderState* stp)
break;
case TAG_z:
{
- Uint ext_tag;
+ BeamInstr ext_tag;
unsigned tag;
GetValue(stp, first, ext_tag);
@@ -1531,14 +1566,15 @@ load_code(LoaderState* stp)
case 0: /* Floating point number */
{
Eterm* hp;
-# ifndef ARCH_64
+/* XXX:PaN - Halfword should use ARCH_64 variant instead */
+#if !defined(ARCH_64) || HALFWORD_HEAP
Uint high, low;
# endif
last_op->a[arg].val = new_literal(stp, &hp,
FLOAT_SIZE_OBJECT);
hp[0] = HEADER_FLONUM;
last_op->a[arg].type = TAG_q;
-# ifdef ARCH_64
+#if defined(ARCH_64) && !HALFWORD_HEAP
GetInt(stp, 8, hp[1]);
# else
GetInt(stp, 4, high);
@@ -1575,10 +1611,10 @@ load_code(LoaderState* stp)
break;
case 3: /* Allocation list. */
{
- Uint n;
- Uint type;
- Uint val;
- Uint words = 0;
+ BeamInstr n;
+ BeamInstr type;
+ BeamInstr val;
+ BeamInstr words = 0;
stp->new_float_instructions = 1;
GetTagAndValue(stp, tag, n);
@@ -1607,7 +1643,7 @@ load_code(LoaderState* stp)
}
case 4: /* Literal. */
{
- Uint val;
+ BeamInstr val;
GetTagAndValue(stp, tag, val);
VerifyTag(stp, tag, TAG_u);
@@ -1734,7 +1770,7 @@ load_code(LoaderState* stp)
}
stp->specific_op = specific;
- Need(opc[stp->specific_op].sz+2); /* Extra margin for packing */
+ CodeNeed(opc[stp->specific_op].sz+2); /* Extra margin for packing */
code[ci++] = BeamOpCode(stp->specific_op);
}
@@ -1772,7 +1808,7 @@ load_code(LoaderState* stp)
case 'c': /* Tagged constant */
switch (tag) {
case TAG_i:
- code[ci++] = make_small(tmp_op->a[arg].val);
+ code[ci++] = (BeamInstr) make_small((Uint) tmp_op->a[arg].val);
break;
case TAG_a:
code[ci++] = tmp_op->a[arg].val;
@@ -1802,7 +1838,7 @@ load_code(LoaderState* stp)
code[ci++] = make_yreg(tmp_op->a[arg].val);
break;
case TAG_i:
- code[ci++] = make_small(tmp_op->a[arg].val);
+ code[ci++] = (BeamInstr) make_small((Uint)tmp_op->a[arg].val);
break;
case TAG_a:
code[ci++] = tmp_op->a[arg].val;
@@ -1896,12 +1932,12 @@ load_code(LoaderState* stp)
if (stp->import[i].bf == NULL) {
LoadError1(stp, "not a BIF: import table index %d", i);
}
- code[ci++] = (Eterm) stp->import[i].bf;
+ code[ci++] = (BeamInstr) stp->import[i].bf;
break;
case 'P': /* Byte offset into tuple */
VerifyTag(stp, tag, TAG_u);
tmp = tmp_op->a[arg].val;
- code[ci++] = (Eterm) ((tmp_op->a[arg].val+1) * sizeof(Eterm *));
+ code[ci++] = (BeamInstr) ((tmp_op->a[arg].val+1) * sizeof(Eterm));
break;
case 'l': /* Floating point register. */
VerifyTag(stp, tag_to_letter[tag], *sign);
@@ -1925,17 +1961,17 @@ load_code(LoaderState* stp)
for ( ; arg < tmp_op->arity; arg++) {
switch (tmp_op->a[arg].type) {
case TAG_i:
- Need(1);
+ CodeNeed(1);
code[ci++] = make_small(tmp_op->a[arg].val);
break;
case TAG_u:
case TAG_a:
case TAG_v:
- Need(1);
+ CodeNeed(1);
code[ci++] = tmp_op->a[arg].val;
break;
case TAG_f:
- Need(1);
+ CodeNeed(1);
code[ci] = stp->labels[tmp_op->a[arg].val].patches;
stp->labels[tmp_op->a[arg].val].patches = ci;
ci++;
@@ -1947,24 +1983,41 @@ load_code(LoaderState* stp)
lit = stp->literals[tmp_op->a[arg].val].term;
if (is_big(lit)) {
Eterm* bigp;
+ Eterm *tmp;
Uint size;
+ Uint term_size;
bigp = big_val(lit);
- size = bignum_header_arity(*bigp);
- Need(size+1);
- code[ci++] = *bigp++;
- while (size-- > 0) {
- code[ci++] = *bigp++;
+ term_size = bignum_header_arity(*bigp);
+ size = TermWords(term_size + 1);
+ CodeNeed(size);
+ tmp = (Eterm *) (code + ci);
+ *tmp++ = *bigp++;
+ while (term_size-- > 0) {
+ *tmp++ = *bigp++;
}
+ ci +=size;
} else if (is_float(lit)) {
-#ifdef ARCH_64
- Need(1);
+#if defined(ARCH_64) && !HALFWORD_HEAP
+ CodeNeed(1);
code[ci++] = float_val(stp->literals[tmp_op->a[arg].val].term)[1];
+#elif HALFWORD_HEAP
+ Eterm* fptr;
+ Uint size;
+ Eterm *tmp;
+
+ fptr = float_val(stp->literals[tmp_op->a[arg].val].term)+1;
+ size = TermWords(2);
+ CodeNeed(size);
+ tmp = (Eterm *) (code + ci);
+ *tmp++ = *fptr++;
+ *tmp = *fptr;
+ ci += size;
#else
Eterm* fptr;
fptr = float_val(stp->literals[tmp_op->a[arg].val].term)+1;
- Need(2);
+ CodeNeed(2);
code[ci++] = *fptr++;
code[ci++] = *fptr;
#endif
@@ -1984,10 +2037,10 @@ load_code(LoaderState* stp)
*/
if (opc[stp->specific_op].pack[0]) {
char* prog; /* Program for packing engine. */
- Uint stack[8]; /* Stack. */
- Uint* sp = stack; /* Points to next free position. */
- Uint packed = 0; /* Accumulator for packed operations. */
-
+ BeamInstr stack[8]; /* Stack. */
+ BeamInstr* sp = stack; /* Points to next free position. */
+ BeamInstr packed = 0; /* Accumulator for packed operations. */
+
for (prog = opc[stp->specific_op].pack; *prog; prog++) {
switch (*prog) {
case 'g': /* Get instruction; push on stack. */
@@ -2000,7 +2053,7 @@ load_code(LoaderState* stp)
packed = (packed << BEAM_TIGHT_SHIFT) | code[--ci];
break;
case '6': /* Shift 16 steps */
- packed = (packed << 16) | code[--ci];
+ packed = (packed << BEAM_LOOSE_SHIFT) | code[--ci];
break;
case 'p': /* Put instruction (from stack). */
code[ci++] = *--sp;
@@ -2037,9 +2090,9 @@ load_code(LoaderState* stp)
/* Must make room for call_nif op */
int pad = MIN_FUNC_SZ - (finfo_ix - last_func_start);
ASSERT(pad > 0 && pad < MIN_FUNC_SZ);
- Need(pad);
- sys_memmove(&code[finfo_ix+pad], &code[finfo_ix], FINFO_SZ*sizeof(Eterm));
- sys_memset(&code[finfo_ix], 0, pad*sizeof(Eterm));
+ CodeNeed(pad);
+ sys_memmove(&code[finfo_ix+pad], &code[finfo_ix], FINFO_SZ*sizeof(BeamInstr));
+ sys_memset(&code[finfo_ix], 0, pad*sizeof(BeamInstr));
ci += pad;
stp->labels[last_label].value += pad;
}
@@ -2050,6 +2103,7 @@ load_code(LoaderState* stp)
*/
stp->function = code[ci-2];
stp->arity = code[ci-1];
+
ASSERT(stp->labels[last_label].value == ci - FINFO_SZ);
offset = MI_FUNCTIONS + function_number;
code[offset] = stp->labels[last_label].patches;
@@ -2072,34 +2126,6 @@ load_code(LoaderState* stp)
/* Remember offset for the on_load function. */
stp->on_load = ci;
break;
- case op_put_string_IId:
- {
- /*
- * At entry:
- *
- * code[ci-4] &&lb_put_string_IId
- * code[ci-3] length of string
- * code[ci-2] offset into string table
- * code[ci-1] destination register
- *
- * Since we don't know the address of the string table yet,
- * just check the offset and length for validity, and use
- * the instruction field as a link field to link all put_string
- * instructions into a single linked list. At exit:
- *
- * code[ci-4] pointer to next put_string instruction (or 0
- * if this is the last)
- */
- Uint offset = code[ci-2];
- Uint len = code[ci-3];
- unsigned strtab_size = stp->chunks[STR_CHUNK].size;
- if (offset > strtab_size || offset + len > strtab_size) {
- LoadError2(stp, "invalid string reference %d, size %d", offset, len);
- }
- code[ci-4] = stp->put_strings;
- stp->put_strings = ci - 4;
- }
- break;
case op_bs_put_string_II:
{
/*
@@ -2161,7 +2187,6 @@ load_code(LoaderState* stp)
}
}
-#undef Need
load_error:
return 0;
@@ -2373,7 +2398,7 @@ gen_get_integer2(LoaderState* stp, GenOpArg Fail, GenOpArg Ms, GenOpArg Live,
GenOpArg Flags, GenOpArg Dst)
{
GenOp* op;
- Uint bits;
+ UWord bits;
NEW_GENOP(stp, op);
@@ -2838,14 +2863,14 @@ gen_literal_timeout(LoaderState* stp, GenOpArg Fail, GenOpArg Time)
op->a[1].type = TAG_u;
if (Time.type == TAG_i && (timeout = Time.val) >= 0 &&
-#ifdef ARCH_64
+#if defined(ARCH_64) && !HALFWORD_HEAP
(timeout >> 32) == 0
#else
1
#endif
) {
op->a[1].val = timeout;
-#if !defined(ARCH_64)
+#if !defined(ARCH_64) || HALFWORD_HEAP
} else if (Time.type == TAG_q) {
Eterm big;
@@ -2856,11 +2881,13 @@ gen_literal_timeout(LoaderState* stp, GenOpArg Fail, GenOpArg Time)
if (big_arity(big) > 1 || big_sign(big)) {
goto error;
} else {
- (void) term_to_Uint(big, &op->a[1].val);
+ Uint u;
+ (void) term_to_Uint(big, &u);
+ op->a[1].val = (BeamInstr) u;
}
#endif
} else {
-#if !defined(ARCH_64)
+#if !defined(ARCH_64) || HALFWORD_HEAP
error:
#endif
op->op = genop_i_wait_error_0;
@@ -2883,14 +2910,14 @@ gen_literal_timeout_locked(LoaderState* stp, GenOpArg Fail, GenOpArg Time)
op->a[1].type = TAG_u;
if (Time.type == TAG_i && (timeout = Time.val) >= 0 &&
-#ifdef ARCH_64
+#if defined(ARCH_64) && !HALFWORD_HEAP
(timeout >> 32) == 0
#else
1
#endif
) {
op->a[1].val = timeout;
-#ifndef ARCH_64
+#if !defined(ARCH_64) || HALFWORD_HEAP
} else if (Time.type == TAG_q) {
Eterm big;
@@ -2901,11 +2928,13 @@ gen_literal_timeout_locked(LoaderState* stp, GenOpArg Fail, GenOpArg Time)
if (big_arity(big) > 1 || big_sign(big)) {
goto error;
} else {
- (void) term_to_Uint(big, &op->a[1].val);
+ Uint u;
+ (void) term_to_Uint(big, &u);
+ op->a[1].val = (BeamInstr) u;
}
#endif
} else {
-#ifndef ARCH_64
+#if !defined(ARCH_64) || HALFWORD_HEAP
error:
#endif
op->op = genop_i_wait_error_locked_0;
@@ -3321,15 +3350,21 @@ gen_make_fun2(LoaderState* stp, GenOpArg idx)
op->op = genop_i_make_fun_2;
op->arity = 2;
op->a[0].type = TAG_u;
- op->a[0].val = (Uint) fe;
+ op->a[0].val = (BeamInstr) fe;
op->a[1].type = TAG_u;
op->a[1].val = stp->lambdas[idx.val].num_free;
op->next = NULL;
return op;
}
-
+/*
+ * Rewrite gc_bifs with one parameter (the common case). Utilized
+ * in ops.tab to rewrite instructions calling bif's in guards
+ * to use a garbage collecting implementation. The instructions
+ * are sometimes once again rewritten to handle literals (putting the
+ * parameter in the mostly unused r[0] before the instruction is executed).
+ */
static GenOp*
-gen_guard_bif(LoaderState* stp, GenOpArg Fail, GenOpArg Live, GenOpArg Bif,
+gen_guard_bif1(LoaderState* stp, GenOpArg Fail, GenOpArg Live, GenOpArg Bif,
GenOpArg Src, GenOpArg Dst)
{
GenOp* op;
@@ -3341,22 +3376,24 @@ gen_guard_bif(LoaderState* stp, GenOpArg Fail, GenOpArg Live, GenOpArg Bif,
op->a[0] = Fail;
op->a[1].type = TAG_u;
bf = stp->import[Bif.val].bf;
+ /* The translations here need to have a reverse counterpart in
+ beam_emu.c:translate_gc_bif for error handling to work properly. */
if (bf == length_1) {
- op->a[1].val = (Uint) (void *) erts_gc_length_1;
+ op->a[1].val = (BeamInstr) (void *) erts_gc_length_1;
} else if (bf == size_1) {
- op->a[1].val = (Uint) (void *) erts_gc_size_1;
+ op->a[1].val = (BeamInstr) (void *) erts_gc_size_1;
} else if (bf == bit_size_1) {
- op->a[1].val = (Uint) (void *) erts_gc_bit_size_1;
+ op->a[1].val = (BeamInstr) (void *) erts_gc_bit_size_1;
} else if (bf == byte_size_1) {
- op->a[1].val = (Uint) (void *) erts_gc_byte_size_1;
+ op->a[1].val = (BeamInstr) (void *) erts_gc_byte_size_1;
} else if (bf == abs_1) {
- op->a[1].val = (Uint) (void *) erts_gc_abs_1;
+ op->a[1].val = (BeamInstr) (void *) erts_gc_abs_1;
} else if (bf == float_1) {
- op->a[1].val = (Uint) (void *) erts_gc_float_1;
+ op->a[1].val = (BeamInstr) (void *) erts_gc_float_1;
} else if (bf == round_1) {
- op->a[1].val = (Uint) (void *) erts_gc_round_1;
+ op->a[1].val = (BeamInstr) (void *) erts_gc_round_1;
} else if (bf == trunc_1) {
- op->a[1].val = (Uint) (void *) erts_gc_trunc_1;
+ op->a[1].val = (BeamInstr) (void *) erts_gc_trunc_1;
} else {
abort();
}
@@ -3367,6 +3404,77 @@ gen_guard_bif(LoaderState* stp, GenOpArg Fail, GenOpArg Live, GenOpArg Bif,
return op;
}
+/*
+ * This is used by the ops.tab rule that rewrites gc_bifs with two parameters
+ * The instruction returned is then again rewritten to an i_load instruction
+ * folowed by i_gc_bif2_jIId, to handle literals properly.
+ * As opposed to the i_gc_bif1_jIsId, the instruction i_gc_bif2_jIId is
+ * always rewritten, regardless of if there actually are any literals.
+ */
+static GenOp*
+gen_guard_bif2(LoaderState* stp, GenOpArg Fail, GenOpArg Live, GenOpArg Bif,
+ GenOpArg S1, GenOpArg S2, GenOpArg Dst)
+{
+ GenOp* op;
+ BifFunction bf;
+
+ NEW_GENOP(stp, op);
+ op->op = genop_ii_gc_bif2_6;
+ op->arity = 6;
+ op->a[0] = Fail;
+ op->a[1].type = TAG_u;
+ bf = stp->import[Bif.val].bf;
+ /* The translations here need to have a reverse counterpart in
+ beam_emu.c:translate_gc_bif for error handling to work properly. */
+ if (bf == binary_part_2) {
+ op->a[1].val = (BeamInstr) (void *) erts_gc_binary_part_2;
+ } else {
+ abort();
+ }
+ op->a[2] = S1;
+ op->a[3] = S2;
+ op->a[4] = Live;
+ op->a[5] = Dst;
+ op->next = NULL;
+ return op;
+}
+
+/*
+ * This is used by the ops.tab rule that rewrites gc_bifs with three parameters
+ * The instruction returned is then again rewritten to a move instruction that
+ * uses r[0] for temp storage, followed by an i_load instruction,
+ * folowed by i_gc_bif3_jIsId, to handle literals properly. Rewriting
+ * always occur, as with the gc_bif2 counterpart.
+ */
+static GenOp*
+gen_guard_bif3(LoaderState* stp, GenOpArg Fail, GenOpArg Live, GenOpArg Bif,
+ GenOpArg S1, GenOpArg S2, GenOpArg S3, GenOpArg Dst)
+{
+ GenOp* op;
+ BifFunction bf;
+
+ NEW_GENOP(stp, op);
+ op->op = genop_ii_gc_bif3_7;
+ op->arity = 7;
+ op->a[0] = Fail;
+ op->a[1].type = TAG_u;
+ bf = stp->import[Bif.val].bf;
+ /* The translations here need to have a reverse counterpart in
+ beam_emu.c:translate_gc_bif for error handling to work properly. */
+ if (bf == binary_part_3) {
+ op->a[1].val = (BeamInstr) (void *) erts_gc_binary_part_3;
+ } else {
+ abort();
+ }
+ op->a[2] = S1;
+ op->a[3] = S2;
+ op->a[4] = S3;
+ op->a[5] = Live;
+ op->a[6] = Dst;
+ op->next = NULL;
+ return op;
+}
+
/*
* Freeze the code in memory, move the string table into place,
@@ -3376,7 +3484,8 @@ gen_guard_bif(LoaderState* stp, GenOpArg Fail, GenOpArg Live, GenOpArg Bif,
static int
freeze_code(LoaderState* stp)
{
- Eterm* code = stp->code;
+ BeamInstr* code = stp->code;
+ Uint *literal_end = NULL;
Uint index;
int i;
byte* str_table;
@@ -3401,46 +3510,49 @@ freeze_code(LoaderState* stp)
* Calculate the final size of the code.
*/
- size = (stp->ci + stp->total_literal_size) * sizeof(Eterm) +
+ size = (stp->ci * sizeof(BeamInstr)) + (stp->total_literal_size * sizeof(Eterm)) +
strtab_size + attr_size + compile_size;
/*
* Move the code to its final location.
*/
- code = (Eterm *) erts_realloc(ERTS_ALC_T_CODE, (void *) code, size);
-
+ code = (BeamInstr *) erts_realloc(ERTS_ALC_T_CODE, (void *) code, size);
+ CHKBLK(ERTS_ALC_T_CODE,code);
/*
* Place a pointer to the op_int_code_end instruction in the
* function table in the beginning of the file.
*/
- code[MI_FUNCTIONS+stp->num_functions] = (Eterm) (code + stp->ci - 1);
+ code[MI_FUNCTIONS+stp->num_functions] = (BeamInstr) (code + stp->ci - 1);
+ CHKBLK(ERTS_ALC_T_CODE,code);
/*
* Store the pointer to the on_load function.
*/
if (stp->on_load) {
- code[MI_ON_LOAD_FUNCTION_PTR] = (Eterm) (code + stp->on_load);
+ code[MI_ON_LOAD_FUNCTION_PTR] = (BeamInstr) (code + stp->on_load);
} else {
code[MI_ON_LOAD_FUNCTION_PTR] = 0;
}
+ CHKBLK(ERTS_ALC_T_CODE,code);
+ literal_end = (Uint *) (code+stp->ci);
/*
* Place the literal heap directly after the code and fix up all
- * put_literal instructions that refer to it.
+ * instructions that refer to it.
*/
{
- Eterm* ptr;
- Eterm* low;
- Eterm* high;
+ Uint* ptr;
+ Uint* low;
+ Uint* high;
LiteralPatch* lp;
- low = code+stp->ci;
+ low = (Uint *) (code+stp->ci);
high = low + stp->total_literal_size;
- code[MI_LITERALS_START] = (Eterm) low;
- code[MI_LITERALS_END] = (Eterm) high;
+ code[MI_LITERALS_START] = (BeamInstr) low;
+ code[MI_LITERALS_END] = (BeamInstr) high;
ptr = low;
for (i = 0; i < stp->num_literals; i++) {
Uint offset;
@@ -3472,7 +3584,7 @@ freeze_code(LoaderState* stp)
}
lp = stp->literal_patches;
while (lp != 0) {
- Uint* op_ptr;
+ BeamInstr* op_ptr;
Uint literal;
Literal* lit;
@@ -3485,53 +3597,48 @@ freeze_code(LoaderState* stp)
op_ptr[0] = literal;
lp = lp->next;
}
- stp->ci += stp->total_literal_size;
+ literal_end += stp->total_literal_size;
}
/*
* Place the string table and, optionally, attributes, after the literal heap.
*/
+ CHKBLK(ERTS_ALC_T_CODE,code);
- sys_memcpy(code+stp->ci, stp->chunks[STR_CHUNK].start, strtab_size);
- str_table = (byte *) (code+stp->ci);
+ sys_memcpy(literal_end, stp->chunks[STR_CHUNK].start, strtab_size);
+ CHKBLK(ERTS_ALC_T_CODE,code);
+ str_table = (byte *) literal_end;
if (attr_size) {
byte* attr = str_table + strtab_size;
sys_memcpy(attr, stp->chunks[ATTR_CHUNK].start, stp->chunks[ATTR_CHUNK].size);
- code[MI_ATTR_PTR] = (Eterm) attr;
- code[MI_ATTR_SIZE] = (Eterm) stp->chunks[ATTR_CHUNK].size;
+ code[MI_ATTR_PTR] = (BeamInstr) attr;
+ code[MI_ATTR_SIZE] = (BeamInstr) stp->chunks[ATTR_CHUNK].size;
decoded_size = erts_decode_ext_size(attr, attr_size, 0);
if (decoded_size < 0) {
LoadError0(stp, "bad external term representation of module attributes");
}
code[MI_ATTR_SIZE_ON_HEAP] = decoded_size;
}
+ CHKBLK(ERTS_ALC_T_CODE,code);
if (compile_size) {
byte* compile_info = str_table + strtab_size + attr_size;
+ CHKBLK(ERTS_ALC_T_CODE,code);
sys_memcpy(compile_info, stp->chunks[COMPILE_CHUNK].start,
stp->chunks[COMPILE_CHUNK].size);
- code[MI_COMPILE_PTR] = (Eterm) compile_info;
- code[MI_COMPILE_SIZE] = (Eterm) stp->chunks[COMPILE_CHUNK].size;
+ CHKBLK(ERTS_ALC_T_CODE,code);
+ code[MI_COMPILE_PTR] = (BeamInstr) compile_info;
+ CHKBLK(ERTS_ALC_T_CODE,code);
+ code[MI_COMPILE_SIZE] = (BeamInstr) stp->chunks[COMPILE_CHUNK].size;
+ CHKBLK(ERTS_ALC_T_CODE,code);
decoded_size = erts_decode_ext_size(compile_info, compile_size, 0);
+ CHKBLK(ERTS_ALC_T_CODE,code);
if (decoded_size < 0) {
LoadError0(stp, "bad external term representation of compilation information");
}
+ CHKBLK(ERTS_ALC_T_CODE,code);
code[MI_COMPILE_SIZE_ON_HEAP] = decoded_size;
}
-
-
- /*
- * Go through all put_strings instructions, restore the pointer to
- * the instruction and convert string offsets to pointers (to the
- * LAST character).
- */
-
- index = stp->put_strings;
- while (index != 0) {
- Uint next = code[index];
- code[index] = BeamOpCode(op_put_string_IId);
- code[index+2] = (Uint) (str_table + code[index+2] + code[index+1] - 1);
- index = next;
- }
+ CHKBLK(ERTS_ALC_T_CODE,code);
/*
* Go through all i_new_bs_put_strings instructions, restore the pointer to
@@ -3543,23 +3650,25 @@ freeze_code(LoaderState* stp)
while (index != 0) {
Uint next = code[index];
code[index] = BeamOpCode(op_bs_put_string_II);
- code[index+2] = (Uint) (str_table + code[index+2]);
+ code[index+2] = (BeamInstr) (str_table + code[index+2]);
index = next;
}
+ CHKBLK(ERTS_ALC_T_CODE,code);
{
StringPatch* sp = stp->string_patches;
while (sp != 0) {
- Uint* op_ptr;
+ BeamInstr* op_ptr;
byte* strp;
op_ptr = code + sp->pos;
strp = str_table + op_ptr[0];
- op_ptr[0] = (Eterm) strp;
+ op_ptr[0] = (BeamInstr) strp;
sp = sp->next;
}
}
+ CHKBLK(ERTS_ALC_T_CODE,code);
/*
* Resolve all labels.
@@ -3579,10 +3688,11 @@ freeze_code(LoaderState* stp)
ASSERT(this_patch < stp->ci);
next_patch = code[this_patch];
ASSERT(next_patch < stp->ci);
- code[this_patch] = (Uint) (code + value);
+ code[this_patch] = (BeamInstr) (code + value);
this_patch = next_patch;
}
}
+ CHKBLK(ERTS_ALC_T_CODE,code);
/*
* Fix all catch_yf instructions.
@@ -3590,13 +3700,14 @@ freeze_code(LoaderState* stp)
index = stp->catches;
catches = BEAM_CATCHES_NIL;
while (index != 0) {
- Uint next = code[index];
+ BeamInstr next = code[index];
code[index] = BeamOpCode(op_catch_yf);
- catches = beam_catches_cons((Uint*)code[index+2], catches);
+ catches = beam_catches_cons((BeamInstr *)code[index+2], catches);
code[index+2] = make_catch(catches);
index = next;
}
stp->catches = catches;
+ CHKBLK(ERTS_ALC_T_CODE,code);
/*
* Save the updated code pointer and code size.
@@ -3605,6 +3716,7 @@ freeze_code(LoaderState* stp)
stp->code = code;
stp->loaded_size = size;
+ CHKBLK(ERTS_ALC_T_CODE,code);
return 1;
load_error:
@@ -3638,7 +3750,7 @@ final_touch(LoaderState* stp)
* callable yet.
*/
ep->address = ep->code+3;
- ep->code[4] = (Eterm) stp->export[i].address;
+ ep->code[4] = (BeamInstr) stp->export[i].address;
}
}
@@ -3650,14 +3762,14 @@ final_touch(LoaderState* stp)
Eterm mod;
Eterm func;
Uint arity;
- Uint import;
+ BeamInstr import;
Uint current;
Uint next;
mod = stp->import[i].module;
func = stp->import[i].function;
arity = stp->import[i].arity;
- import = (Uint) erts_export_put(mod, func, arity);
+ import = (BeamInstr) erts_export_put(mod, func, arity);
current = stp->import[i].patches;
while (current != 0) {
ASSERT(current < stp->ci);
@@ -3675,7 +3787,7 @@ final_touch(LoaderState* stp)
for (i = 0; i < stp->num_lambdas; i++) {
unsigned entry_label = stp->lambdas[i].label;
ErlFunEntry* fe = stp->lambdas[i].fe;
- Eterm* code_ptr = (Eterm *) (stp->code + stp->labels[entry_label].value);
+ BeamInstr* code_ptr = (BeamInstr *) (stp->code + stp->labels[entry_label].value);
if (fe->address[0] != 0) {
/*
@@ -3807,7 +3919,7 @@ transform_engine(LoaderState* st)
if (i >= st->num_imports || st->import[i].bf == NULL)
goto restart;
if (bif_number != -1 &&
- bif_export[bif_number]->code[4] != (Uint) st->import[i].bf) {
+ bif_export[bif_number]->code[4] != (BeamInstr) st->import[i].bf) {
goto restart;
}
}
@@ -4071,7 +4183,7 @@ load_printf(int line, LoaderState* context, char *fmt,...)
static int
-get_int_val(LoaderState* stp, Uint len_code, Uint* result)
+get_int_val(LoaderState* stp, Uint len_code, BeamInstr* result)
{
Uint count;
Uint val;
@@ -4103,7 +4215,7 @@ get_int_val(LoaderState* stp, Uint len_code, Uint* result)
static int
-get_erlang_integer(LoaderState* stp, Uint len_code, Uint* result)
+get_erlang_integer(LoaderState* stp, Uint len_code, BeamInstr* result)
{
Uint count;
Sint val;
@@ -4124,11 +4236,12 @@ get_erlang_integer(LoaderState* stp, Uint len_code, Uint* result)
count = len_code + 2;
} else {
Uint tag;
+ UWord len_word;
ASSERT(len_code == 7);
- GetTagAndValue(stp, tag, len_code);
+ GetTagAndValue(stp, tag, len_word);
VerifyTag(stp, TAG_u, tag);
- count = len_code + 9;
+ count = len_word + 9;
}
/*
@@ -4376,7 +4489,7 @@ functions_in_module(Process* p, /* Process whose heap to use. */
Eterm mod) /* Tagged atom for module. */
{
Module* modp;
- Eterm* code;
+ BeamInstr* code;
int i;
Uint num_functions;
Eterm* hp;
@@ -4394,9 +4507,9 @@ functions_in_module(Process* p, /* Process whose heap to use. */
num_functions = code[MI_NUM_FUNCTIONS];
hp = HAlloc(p, 5*num_functions);
for (i = num_functions-1; i >= 0 ; i--) {
- Eterm* func_info = (Eterm *) code[MI_FUNCTIONS+i];
- Eterm name = func_info[3];
- int arity = func_info[4];
+ BeamInstr* func_info = (BeamInstr *) code[MI_FUNCTIONS+i];
+ Eterm name = (Eterm) func_info[3];
+ int arity = (int) func_info[4];
Eterm tuple;
ASSERT(is_atom(name));
@@ -4419,7 +4532,7 @@ static Eterm
native_addresses(Process* p, Eterm mod)
{
Module* modp;
- Eterm* code;
+ BeamInstr* code;
int i;
Eterm* hp;
Uint num_functions;
@@ -4442,9 +4555,9 @@ native_addresses(Process* p, Eterm mod)
hp = HAlloc(p, need);
hp_end = hp + need;
for (i = num_functions-1; i >= 0 ; i--) {
- Eterm* func_info = (Eterm *) code[MI_FUNCTIONS+i];
- Eterm name = func_info[3];
- int arity = func_info[4];
+ BeamInstr* func_info = (BeamInstr *) code[MI_FUNCTIONS+i];
+ Eterm name = (Eterm) func_info[3];
+ int arity = (int) func_info[4];
Eterm tuple;
ASSERT(is_atom(name));
@@ -4486,7 +4599,7 @@ exported_from_module(Process* p, /* Process whose heap to use. */
Eterm tuple;
if (ep->address == ep->code+3 &&
- ep->code[3] == (Eterm) em_call_error_handler) {
+ ep->code[3] == (BeamInstr) em_call_error_handler) {
/* There is a call to the function, but it does not exist. */
continue;
}
@@ -4519,7 +4632,7 @@ attributes_for_module(Process* p, /* Process whose heap to use. */
{
Module* modp;
- Eterm* code;
+ BeamInstr* code;
Eterm* hp;
byte* ext;
Eterm result = NIL;
@@ -4559,7 +4672,7 @@ compilation_info_for_module(Process* p, /* Process whose heap to use. */
Eterm mod) /* Tagged atom for module. */
{
Module* modp;
- Eterm* code;
+ BeamInstr* code;
Eterm* hp;
byte* ext;
Eterm result = NIL;
@@ -4591,8 +4704,8 @@ compilation_info_for_module(Process* p, /* Process whose heap to use. */
/*
* Returns a pointer to {module, function, arity}, or NULL if not found.
*/
-Eterm*
-find_function_from_pc(Eterm* pc)
+BeamInstr *
+find_function_from_pc(BeamInstr* pc)
{
Range* low = modules;
Range* high = low + num_loaded_modules;
@@ -4604,9 +4717,9 @@ find_function_from_pc(Eterm* pc)
} else if (pc > mid->end) {
low = mid + 1;
} else {
- Eterm** low1 = (Eterm **) (mid->start + MI_FUNCTIONS);
- Eterm** high1 = low1 + mid->start[MI_NUM_FUNCTIONS];
- Eterm** mid1;
+ BeamInstr** low1 = (BeamInstr **) (mid->start + MI_FUNCTIONS);
+ BeamInstr** high1 = low1 + mid->start[MI_NUM_FUNCTIONS];
+ BeamInstr** mid1;
while (low1 < high1) {
mid1 = low1 + (high1-low1) / 2;
@@ -4719,10 +4832,10 @@ code_module_md5_1(Process* p, Eterm Bin)
#define WORDS_PER_FUNCTION 6
-static Eterm*
-make_stub(Eterm* fp, Eterm mod, Eterm func, Uint arity, Uint native, Eterm OpCode)
+static BeamInstr*
+make_stub(BeamInstr* fp, Eterm mod, Eterm func, Uint arity, Uint native, BeamInstr OpCode)
{
- fp[0] = (Eterm) BeamOp(op_i_func_info_IaaI);
+ fp[0] = (BeamInstr) BeamOp(op_i_func_info_IaaI);
fp[1] = native;
fp[2] = mod;
fp[3] = func;
@@ -4741,14 +4854,14 @@ static byte*
stub_copy_info(LoaderState* stp,
int chunk, /* Chunk: ATTR_CHUNK or COMPILE_CHUNK */
byte* info, /* Where to store info. */
- Eterm* ptr_word, /* Where to store pointer into info. */
- Eterm* size_word) /* Where to store size of info. */
+ BeamInstr* ptr_word, /* Where to store pointer into info. */
+ BeamInstr* size_word) /* Where to store size of info. */
{
Sint decoded_size;
Uint size = stp->chunks[chunk].size;
if (size != 0) {
memcpy(info, stp->chunks[chunk].start, size);
- *ptr_word = (Eterm) info;
+ *ptr_word = (BeamInstr) info;
decoded_size = erts_decode_ext_size(info, size, 0);
if (decoded_size < 0) {
return 0;
@@ -4791,7 +4904,7 @@ stub_read_export_table(LoaderState* stp)
}
static void
-stub_final_touch(LoaderState* stp, Eterm* fp)
+stub_final_touch(LoaderState* stp, BeamInstr* fp)
{
int i;
int n = stp->num_exps;
@@ -4978,12 +5091,12 @@ Eterm
erts_make_stub_module(Process* p, Eterm Mod, Eterm Beam, Eterm Info)
{
LoaderState state;
- Eterm Funcs;
- Eterm Patchlist;
+ BeamInstr Funcs;
+ BeamInstr Patchlist;
Eterm* tp;
- Eterm* code = NULL;
- Eterm* ptrs;
- Eterm* fp;
+ BeamInstr* code = NULL;
+ BeamInstr* ptrs;
+ BeamInstr* fp;
byte* info;
Uint ci;
int n;
@@ -5072,7 +5185,7 @@ erts_make_stub_module(Process* p, Eterm Mod, Eterm Beam, Eterm Info)
* Allocate memory for the stub module.
*/
- code_size = ((WORDS_PER_FUNCTION+1)*n + MI_FUNCTIONS + 2) * sizeof(Eterm);
+ code_size = ((WORDS_PER_FUNCTION+1)*n + MI_FUNCTIONS + 2) * sizeof(BeamInstr);
code_size += state.chunks[ATTR_CHUNK].size;
code_size += state.chunks[COMPILE_CHUNK].size;
code = erts_alloc_fnf(ERTS_ALC_T_CODE, code_size);
@@ -5141,7 +5254,7 @@ erts_make_stub_module(Process* p, Eterm Mod, Eterm Beam, Eterm Info)
* Set the pointer and make the stub. Put a return instruction
* as the body until we know what kind of trap we should put there.
*/
- ptrs[i] = (Eterm) fp;
+ ptrs[i] = (BeamInstr) fp;
#ifdef HIPE
op = (Eterm) BeamOpCode(op_hipe_trap_call); /* Might be changed later. */
#else
@@ -5154,8 +5267,8 @@ erts_make_stub_module(Process* p, Eterm Mod, Eterm Beam, Eterm Info)
* Insert the last pointer and the int_code_end instruction.
*/
- ptrs[i] = (Eterm) fp;
- *fp++ = (Eterm) BeamOp(op_int_code_end);
+ ptrs[i] = (BeamInstr) fp;
+ *fp++ = (BeamInstr) BeamOp(op_int_code_end);
/*
* Copy attributes and compilation information.
@@ -5222,9 +5335,9 @@ erts_make_stub_module(Process* p, Eterm Mod, Eterm Beam, Eterm Info)
#undef WORDS_PER_FUNCTION
-static int safe_mul(Uint a, Uint b, Uint* resp)
+static int safe_mul(UWord a, UWord b, UWord* resp)
{
- Uint res = a * b;
+ Uint res = a * b; /* XXX:Pan - used in bit syntax, the multiplication has to be stored in Uint */
*resp = res;
if (b == 0) {
diff --git a/erts/emulator/beam/beam_load.h b/erts/emulator/beam/beam_load.h
index c17844a553..26e3054c4b 100644
--- a/erts/emulator/beam/beam_load.h
+++ b/erts/emulator/beam/beam_load.h
@@ -1,19 +1,19 @@
/*
* %CopyrightBegin%
- *
- * Copyright Ericsson AB 1999-2009. All Rights Reserved.
- *
+ *
+ * Copyright Ericsson AB 1999-2010. All Rights Reserved.
+ *
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
* compliance with the License. You should have received a copy of the
* Erlang Public License along with this software. If not, it can be
* retrieved online at http://www.erlang.org/.
- *
+ *
* Software distributed under the License is distributed on an "AS IS"
* basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
* the License for the specific language governing rights and limitations
* under the License.
- *
+ *
* %CopyrightEnd%
*/
@@ -44,13 +44,13 @@ extern void** beam_ops;
#endif
-extern Eterm beam_debug_apply[];
-extern Eterm* em_call_error_handler;
-extern Eterm* em_apply_bif;
-extern Eterm* em_call_traced_function;
+extern BeamInstr beam_debug_apply[];
+extern BeamInstr* em_call_error_handler;
+extern BeamInstr* em_apply_bif;
+extern BeamInstr* em_call_traced_function;
typedef struct {
- Eterm* start; /* Pointer to start of module. */
- Eterm* end; /* Points one word beyond last function in module. */
+ BeamInstr* start; /* Pointer to start of module. */
+ BeamInstr* end; /* Points one word beyond last function in module. */
} Range;
/*
diff --git a/erts/emulator/beam/bif.c b/erts/emulator/beam/bif.c
index 1b670585a7..6e9755ad48 100644
--- a/erts/emulator/beam/bif.c
+++ b/erts/emulator/beam/bif.c
@@ -616,13 +616,15 @@ local_name_monitor(Process *p, Eterm target_name)
rp = erts_whereis_process(p, p_locks, target_name, ERTS_PROC_LOCK_LINK,
ERTS_P2P_FLG_ALLOW_OTHER_X);
if (!rp) {
- Eterm lhp[3];
+ DeclareTmpHeap(lhp,3,p);
Eterm item;
+ UseTmpHeap(3,p);
erts_smp_proc_unlock(p, ERTS_PROC_LOCK_LINK);
p_locks &= ~ERTS_PROC_LOCK_LINK;
item = TUPLE2(lhp, target_name, erts_this_dist_entry->sysname);
erts_queue_monitor_message(p, &p_locks,
mon_ref, am_process, item, am_noproc);
+ UnUseTmpHeap(3,p);
}
else if (rp != p) {
erts_add_monitor(&(p->monitors), MON_ORIGIN, mon_ref, rp->id,
@@ -1130,6 +1132,34 @@ BIF_RETTYPE error_2(Process* p, Eterm value, Eterm args)
}
/**********************************************************************/
+/*
+ * This is like exactly like error/1. The only difference is
+ * that Dialyzer thinks that it it will return an arbitrary term.
+ * It is useful in stub functions for NIFs.
+ */
+
+BIF_RETTYPE nif_error_1(Process* p, Eterm term)
+{
+ p->fvalue = term;
+ BIF_ERROR(p, EXC_ERROR);
+}
+
+/**********************************************************************/
+/*
+ * This is like exactly like error/2. The only difference is
+ * that Dialyzer thinks that it it will return an arbitrary term.
+ * It is useful in stub functions for NIFs.
+ */
+
+BIF_RETTYPE nif_error_2(Process* p, Eterm value, Eterm args)
+{
+ Eterm* hp = HAlloc(p, 3);
+
+ p->fvalue = TUPLE2(hp, value, args);
+ BIF_ERROR(p, EXC_ERROR_2);
+}
+
+/**********************************************************************/
/* this is like throw/1 except that we set freason to EXC_EXIT */
BIF_RETTYPE exit_1(BIF_ALIST_1)
@@ -3466,9 +3496,16 @@ BIF_RETTYPE make_fun_3(BIF_ALIST_3)
if (arity < 0) {
goto error;
}
+#if HALFWORD_HEAP
+ hp = HAlloc(BIF_P, 3);
+ hp[0] = HEADER_EXPORT;
+ /* Yes, May be misaligned, but X86_64 will fix it... */
+ *((Export **) (hp+1)) = erts_export_get_or_make_stub(BIF_ARG_1, BIF_ARG_2, (Uint) arity);
+#else
hp = HAlloc(BIF_P, 2);
hp[0] = HEADER_EXPORT;
hp[1] = (Eterm) erts_export_get_or_make_stub(BIF_ARG_1, BIF_ARG_2, (Uint) arity);
+#endif
BIF_RET(make_export(hp));
}
@@ -3574,11 +3611,11 @@ BIF_RETTYPE list_to_pid_1(BIF_ALIST_1)
etp = (ExternalThing *) HAlloc(BIF_P, EXTERNAL_THING_HEAD_SIZE + 1);
etp->header = make_external_pid_header(1);
- etp->next = MSO(BIF_P).externals;
+ etp->next = MSO(BIF_P).first;
etp->node = enp;
etp->data.ui[0] = make_pid_data(c, b);
- MSO(BIF_P).externals = etp;
+ MSO(BIF_P).first = (struct erl_off_heap_header*) etp;
erts_deref_dist_entry(dep);
BIF_RET(make_external_pid(etp));
}
@@ -3884,7 +3921,7 @@ BIF_RETTYPE hash_2(BIF_ALIST_2)
if ((range = signed_val(BIF_ARG_2)) <= 0) { /* [1..MAX_SMALL] */
BIF_ERROR(BIF_P, BADARG);
}
-#ifdef ARCH_64
+#if defined(ARCH_64) && !HALFWORD_HEAP
if (range > ((1L << 27) - 1))
BIF_ERROR(BIF_P, BADARG);
#endif
@@ -3956,7 +3993,7 @@ BIF_RETTYPE phash2_2(BIF_ALIST_2)
/*
* Return either a small or a big. Use the heap for bigs if there is room.
*/
-#ifdef ARCH_64
+#if defined(ARCH_64) && !HALFWORD_HEAP
BIF_RET(make_small(final_hash));
#else
if (IS_USMALL(0, final_hash)) {
@@ -4118,8 +4155,8 @@ void erts_init_bif(void)
#else
bif_return_trap_export.code[2] = 1;
#endif
- bif_return_trap_export.code[3] = (Eterm) em_apply_bif;
- bif_return_trap_export.code[4] = (Eterm) &bif_return_trap;
+ bif_return_trap_export.code[3] = (BeamInstr) em_apply_bif;
+ bif_return_trap_export.code[4] = (BeamInstr) &bif_return_trap;
flush_monitor_message_trap = erts_export_put(am_erlang,
am_flush_monitor_message,
@@ -4134,54 +4171,6 @@ void erts_init_bif(void)
await_proc_exit_trap = erts_export_put(am_erlang,am_await_proc_exit,3);
}
-BIF_RETTYPE blocking_read_file_1(BIF_ALIST_1)
-{
- Eterm bin;
- Eterm* hp;
- byte *buff;
- int i, buff_size;
- FILE *file;
- struct stat file_info;
- char *filename = NULL;
- size_t size;
-
- i = list_length(BIF_ARG_1);
- if (i < 0) {
- BIF_ERROR(BIF_P, BADARG);
- }
- filename = erts_alloc(ERTS_ALC_T_TMP, i + 1);
- if (intlist_to_buf(BIF_ARG_1, filename, i) != i)
- erl_exit(1, "%s:%d: Internal error\n", __FILE__, __LINE__);
- filename[i] = '\0';
-
- hp = HAlloc(BIF_P, 3);
-
- file = fopen(filename, "r");
- if(file == NULL){
- erts_free(ERTS_ALC_T_TMP, (void *) filename);
- BIF_RET(TUPLE2(hp, am_error, am_nofile));
- }
-
- stat(filename, &file_info);
- erts_free(ERTS_ALC_T_TMP, (void *) filename);
-
- buff_size = file_info.st_size;
- buff = (byte *) erts_alloc_fnf(ERTS_ALC_T_TMP, buff_size);
- if (!buff) {
- fclose(file);
- BIF_RET(TUPLE2(hp, am_error, am_allocator));
- }
- size = fread(buff, 1, buff_size, file);
- fclose(file);
- if (size < 0)
- size = 0;
- else if (size > buff_size)
- size = (size_t) buff_size;
- bin = new_binary(BIF_P, buff, (int) size);
- erts_free(ERTS_ALC_T_TMP, (void *) buff);
-
- BIF_RET(TUPLE2(hp, am_ok, bin));
-}
#ifdef HARDDEBUG
/*
You'll need this line in bif.tab to be able to use this debug bif
diff --git a/erts/emulator/beam/bif.h b/erts/emulator/beam/bif.h
index 05e9b78c28..50f5f4fbd6 100644
--- a/erts/emulator/beam/bif.h
+++ b/erts/emulator/beam/bif.h
@@ -1,19 +1,19 @@
/*
* %CopyrightBegin%
- *
- * Copyright Ericsson AB 1996-2009. All Rights Reserved.
- *
+ *
+ * Copyright Ericsson AB 1996-2010. All Rights Reserved.
+ *
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
* compliance with the License. You should have received a copy of the
* Erlang Public License along with this software. If not, it can be
* retrieved online at http://www.erlang.org/.
- *
+ *
* Software distributed under the License is distributed on an "AS IS"
* basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
* the License for the specific language governing rights and limitations
* under the License.
- *
+ *
* %CopyrightEnd%
*/
@@ -125,7 +125,7 @@ do { \
#define ERTS_BIF_PREP_TRAP0(Ret, Trap, Proc) \
do { \
(Proc)->arity = 0; \
- (Proc)->def_arg_reg[3] = (Eterm) (Trap->address); \
+ *((UWord *) (UWord) ((Proc)->def_arg_reg + 3)) = (UWord) ((Trap)->address); \
(Proc)->freason = TRAP; \
(Ret) = THE_NON_VALUE; \
} while (0)
@@ -134,7 +134,8 @@ do { \
do { \
(Proc)->arity = 1; \
(Proc)->def_arg_reg[0] = (Eterm) (A0); \
- (Proc)->def_arg_reg[3] = (Eterm) ((Trap)->address); \
+ *((UWord *) (UWord) ((Proc)->def_arg_reg + 3)) = (UWord) ((Trap)->address); \
+ (Proc)->def_arg_reg[3] = (UWord) ((Trap)->address); \
(Proc)->freason = TRAP; \
(Ret) = THE_NON_VALUE; \
} while (0)
@@ -144,7 +145,8 @@ do { \
(Proc)->arity = 2; \
(Proc)->def_arg_reg[0] = (Eterm) (A0); \
(Proc)->def_arg_reg[1] = (Eterm) (A1); \
- (Proc)->def_arg_reg[3] = (Eterm) ((Trap)->address); \
+ *((UWord *) (UWord) ((Proc)->def_arg_reg + 3)) = (UWord) ((Trap)->address); \
+ (Proc)->def_arg_reg[3] = (UWord) ((Trap)->address); \
(Proc)->freason = TRAP; \
(Ret) = THE_NON_VALUE; \
} while (0)
@@ -155,14 +157,15 @@ do { \
(Proc)->def_arg_reg[0] = (Eterm) (A0); \
(Proc)->def_arg_reg[1] = (Eterm) (A1); \
(Proc)->def_arg_reg[2] = (Eterm) (A2); \
- (Proc)->def_arg_reg[3] = (Eterm) ((Trap)->address); \
+ *((UWord *) (UWord) ((Proc)->def_arg_reg + 3)) = (UWord) ((Trap)->address); \
+ (Proc)->def_arg_reg[3] = (UWord) ((Trap)->address); \
(Proc)->freason = TRAP; \
(Ret) = THE_NON_VALUE; \
} while (0)
#define BIF_TRAP0(p, Trap_) do { \
(p)->arity = 0; \
- (p)->def_arg_reg[3] = (Eterm) ((Trap_)->address); \
+ *((UWord *) (UWord) ((p)->def_arg_reg + 3)) = (UWord) ((Trap_)->address); \
(p)->freason = TRAP; \
return THE_NON_VALUE; \
} while(0)
@@ -170,7 +173,7 @@ do { \
#define BIF_TRAP1(Trap_, p, A0) do { \
(p)->arity = 1; \
(p)->def_arg_reg[0] = (A0); \
- (p)->def_arg_reg[3] = (Eterm) ((Trap_)->address); \
+ *((UWord *) (UWord) ((p)->def_arg_reg + 3)) = (UWord) ((Trap_)->address); \
(p)->freason = TRAP; \
return THE_NON_VALUE; \
} while(0)
@@ -179,7 +182,7 @@ do { \
(p)->arity = 2; \
(p)->def_arg_reg[0] = (A0); \
(p)->def_arg_reg[1] = (A1); \
- (p)->def_arg_reg[3] = (Eterm) ((Trap_)->address); \
+ *((UWord *) (UWord) ((p)->def_arg_reg + 3)) = (UWord) ((Trap_)->address); \
(p)->freason = TRAP; \
return THE_NON_VALUE; \
} while(0)
@@ -189,14 +192,14 @@ do { \
(p)->def_arg_reg[0] = (A0); \
(p)->def_arg_reg[1] = (A1); \
(p)->def_arg_reg[2] = (A2); \
- (p)->def_arg_reg[3] = (Eterm) ((Trap_)->address); \
+ *((UWord *) (UWord) ((p)->def_arg_reg + 3)) = (UWord) ((Trap_)->address); \
(p)->freason = TRAP; \
return THE_NON_VALUE; \
} while(0)
#define BIF_TRAP_CODE_PTR_0(p, Code_) do { \
(p)->arity = 0; \
- (p)->def_arg_reg[3] = (Eterm) (Code_); \
+ *((UWord *) (UWord) ((p)->def_arg_reg + 3)) = (UWord) (Code_); \
(p)->freason = TRAP; \
return THE_NON_VALUE; \
} while(0)
diff --git a/erts/emulator/beam/bif.tab b/erts/emulator/beam/bif.tab
index b6fa06354a..0674aae77f 100644
--- a/erts/emulator/beam/bif.tab
+++ b/erts/emulator/beam/bif.tab
@@ -686,8 +686,6 @@ bif 'erl.system.code':make_stub_module/3 ebif_code_make_stub_module_3
bif code:is_module_native/1
bif 'erl.system.code':is_native/1 ebif_code_is_native_1 code_is_module_native_1
-bif erlang:blocking_read_file/1
-
#
# New Bifs in R9C.
#
@@ -760,6 +758,43 @@ bif erlang:finish_after_on_load/2
bif erlang:binary_to_term/2
#
+# The binary match bifs (New in R14A - EEP9)
+#
+
+#
+# The searching/splitting/substituting thingies
+#
+ubif erlang:binary_part/2
+ubif erlang:binary_part/3
+
+bif binary:compile_pattern/1
+bif binary:match/2
+bif binary:match/3
+bif binary:matches/2
+bif binary:matches/3
+bif binary:longest_common_prefix/1
+bif binary:longest_common_suffix/1
+bif binary:first/1
+bif binary:last/1
+bif binary:at/2
+bif binary:part/2 binary_binary_part_2
+bif binary:part/3 binary_binary_part_3
+bif binary:bin_to_list/1
+bif binary:bin_to_list/2
+bif binary:bin_to_list/3
+bif binary:list_to_bin/1
+bif binary:copy/1
+bif binary:copy/2
+bif binary:referenced_byte_size/1
+bif binary:encode_unsigned/1
+bif binary:encode_unsigned/2
+bif binary:decode_unsigned/1
+bif binary:decode_unsigned/2
+
+bif erlang:nif_error/1
+bif erlang:nif_error/2
+
+#
# Obsolete
#
diff --git a/erts/emulator/beam/big.c b/erts/emulator/beam/big.c
index 03c88da8c6..ff15d834ab 100644
--- a/erts/emulator/beam/big.c
+++ b/erts/emulator/beam/big.c
@@ -1,19 +1,19 @@
/*
* %CopyrightBegin%
- *
- * Copyright Ericsson AB 1996-2009. All Rights Reserved.
- *
+ *
+ * Copyright Ericsson AB 1996-2010. All Rights Reserved.
+ *
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
* compliance with the License. You should have received a copy of the
* Erlang Public License along with this software. If not, it can be
* retrieved online at http://www.erlang.org/.
- *
+ *
* Software distributed under the License is distributed on an "AS IS"
* basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
* the License for the specific language governing rights and limitations
* under the License.
- *
+ *
* %CopyrightEnd%
*/
@@ -1459,7 +1459,31 @@ Eterm uint_to_big(Uint x, Eterm *y)
BIG_DIGIT(y, 0) = x;
return make_big(y);
}
+/*
+** convert UWord to bigint
+** (must only be used if x is to big to be stored as a small)
+** Allocation is tricky, the heap need has to be calculated
+** with the macro BIG_UWORD_HEAP_SIZE(x)
+*/
+Eterm uword_to_big(UWord x, Eterm *y)
+{
+#if HALFWORD_HEAP
+ Uint upper = x >> 32;
+ Uint lower = x & 0xFFFFFFFFUL;
+ if (upper == 0) {
+ *y = make_pos_bignum_header(1);
+ } else {
+ *y = make_pos_bignum_header(2);
+ BIG_DIGIT(y, 1) = upper;
+ }
+ BIG_DIGIT(y, 0) = lower;
+#else
+ *y = make_pos_bignum_header(1);
+ BIG_DIGIT(y, 0) = x;
+#endif
+ return make_big(y);
+}
/*
** convert signed int to bigint
@@ -1480,19 +1504,19 @@ Eterm small_to_big(Sint x, Eterm *y)
Eterm erts_uint64_to_big(Uint64 x, Eterm **hpp)
{
Eterm *hp = *hpp;
-#ifdef ARCH_32
+#if defined(ARCH_32) || HALFWORD_HEAP
if (x >= (((Uint64) 1) << 32)) {
*hp = make_pos_bignum_header(2);
BIG_DIGIT(hp, 0) = (Uint) (x & ((Uint) 0xffffffff));
BIG_DIGIT(hp, 1) = (Uint) ((x >> 32) & ((Uint) 0xffffffff));
- *hpp += 2;
+ *hpp += 3;
}
else
#endif
{
*hp = make_pos_bignum_header(1);
BIG_DIGIT(hp, 0) = (Uint) x;
- *hpp += 1;
+ *hpp += 2;
}
return make_big(hp);
}
@@ -1507,7 +1531,7 @@ Eterm erts_sint64_to_big(Sint64 x, Eterm **hpp)
neg = 1;
x = -x;
}
-#ifdef ARCH_32
+#if defined(ARCH_32) || HALFWORD_HEAP
if (x >= (((Uint64) 1) << 32)) {
if (neg)
*hp = make_neg_bignum_header(2);
@@ -1515,7 +1539,7 @@ Eterm erts_sint64_to_big(Sint64 x, Eterm **hpp)
*hp = make_pos_bignum_header(2);
BIG_DIGIT(hp, 0) = (Uint) (x & ((Uint) 0xffffffff));
BIG_DIGIT(hp, 1) = (Uint) ((x >> 32) & ((Uint) 0xffffffff));
- *hpp += 2;
+ *hpp += 3;
}
else
#endif
@@ -1525,7 +1549,7 @@ Eterm erts_sint64_to_big(Sint64 x, Eterm **hpp)
else
*hp = make_pos_bignum_header(1);
BIG_DIGIT(hp, 0) = (Uint) x;
- *hpp += 1;
+ *hpp += 2;
}
return make_big(hp);
}
@@ -1854,6 +1878,87 @@ term_to_Uint(Eterm term, Uint *up)
}
}
+int
+term_to_UWord(Eterm term, UWord *up)
+{
+#if SIZEOF_VOID_P == ERTS_SIZEOF_ETERM
+ return term_to_Uint(term,up);
+#else
+ if (is_small(term)) {
+ Sint i = signed_val(term);
+ if (i < 0) {
+ *up = BADARG;
+ return 0;
+ }
+ *up = (UWord) i;
+ return 1;
+ } else if (is_big(term)) {
+ ErtsDigit* xr = big_v(term);
+ dsize_t xl = big_size(term);
+ UWord uval = 0;
+ int n = 0;
+
+ if (big_sign(term)) {
+ *up = BADARG;
+ return 0;
+ } else if (xl*D_EXP > sizeof(UWord)*8) {
+ *up = SYSTEM_LIMIT;
+ return 0;
+ }
+ while (xl-- > 0) {
+ uval |= ((UWord)(*xr++)) << n;
+ n += D_EXP;
+ }
+ *up = uval;
+ return 1;
+ } else {
+ *up = BADARG;
+ return 0;
+ }
+#endif
+}
+
+int
+term_to_Uint64(Eterm term, Uint64 *up)
+{
+#if SIZEOF_VOID_P == 8
+ return term_to_UWord(term,up);
+#else
+ if (is_small(term)) {
+ Sint i = signed_val(term);
+ if (i < 0) {
+ *up = BADARG;
+ return 0;
+ }
+ *up = (Uint64) i;
+ return 1;
+ } else if (is_big(term)) {
+ ErtsDigit* xr = big_v(term);
+ dsize_t xl = big_size(term);
+ Uint64 uval = 0;
+ int n = 0;
+
+ if (big_sign(term)) {
+ *up = BADARG;
+ return 0;
+ } else if (xl*D_EXP > sizeof(Uint64)*8) {
+ *up = SYSTEM_LIMIT;
+ return 0;
+ }
+ while (xl-- > 0) {
+ uval |= ((Uint64)(*xr++)) << n;
+ n += D_EXP;
+ }
+ *up = uval;
+ return 1;
+ } else {
+ *up = BADARG;
+ return 0;
+ }
+#endif
+}
+
+
int term_to_Sint(Eterm term, Sint *sp)
{
if (is_small(term)) {
@@ -1888,6 +1993,47 @@ int term_to_Sint(Eterm term, Sint *sp)
}
}
+#if HAVE_INT64
+int term_to_Sint64(Eterm term, Sint64 *sp)
+{
+#if ERTS_SIZEOF_ETERM == 8
+ return term_to_Sint(term, sp);
+#else
+ if (is_small(term)) {
+ *sp = signed_val(term);
+ return 1;
+ } else if (is_big(term)) {
+ ErtsDigit* xr = big_v(term);
+ dsize_t xl = big_size(term);
+ int sign = big_sign(term);
+ Uint64 uval = 0;
+ int n = 0;
+
+ if (xl*D_EXP > sizeof(Uint64)*8) {
+ return 0;
+ }
+ while (xl-- > 0) {
+ uval |= ((Uint64)(*xr++)) << n;
+ n += D_EXP;
+ }
+ if (sign) {
+ uval = -uval;
+ if ((Sint64)uval > 0)
+ return 0;
+ } else {
+ if ((Sint64)uval < 0)
+ return 0;
+ }
+ *sp = uval;
+ return 1;
+ } else {
+ return 0;
+ }
+#endif
+}
+#endif /* HAVE_INT64 */
+
+
/*
** Add and subtract
*/
diff --git a/erts/emulator/beam/big.h b/erts/emulator/beam/big.h
index b8e38d482c..25466cd3c2 100644
--- a/erts/emulator/beam/big.h
+++ b/erts/emulator/beam/big.h
@@ -1,19 +1,19 @@
/*
* %CopyrightBegin%
- *
- * Copyright Ericsson AB 1996-2009. All Rights Reserved.
- *
+ *
+ * Copyright Ericsson AB 1996-2010. All Rights Reserved.
+ *
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
* compliance with the License. You should have received a copy of the
* Erlang Public License along with this software. If not, it can be
* retrieved online at http://www.erlang.org/.
- *
+ *
* Software distributed under the License is distributed on an "AS IS"
* basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
* the License for the specific language governing rights and limitations
* under the License.
- *
+ *
* %CopyrightEnd%
*/
@@ -34,7 +34,7 @@
typedef Uint ErtsDigit;
-#if (SIZEOF_VOID_P == 4) && defined(SIZEOF_LONG_LONG) && (SIZEOF_LONG_LONG == 8)
+#if ((SIZEOF_VOID_P == 4) || HALFWORD_HEAP) && defined(SIZEOF_LONG_LONG) && (SIZEOF_LONG_LONG == 8)
/* Assume 32-bit machine with long long support */
typedef Uint64 ErtsDoubleDigit;
typedef Uint16 ErtsHalfDigit;
@@ -58,7 +58,7 @@ typedef Uint32 ErtsHalfDigit;
typedef Uint dsize_t; /* Vector size type */
-#define D_EXP (SIZEOF_VOID_P*8)
+#define D_EXP (ERTS_SIZEOF_ETERM*8)
#define D_MASK ((ErtsDigit)(-1)) /* D_BASE-1 */
/* macros for bignum objects */
@@ -88,7 +88,13 @@ typedef Uint dsize_t; /* Vector size type */
#define BIG_UINT_HEAP_SIZE (1 + 1) /* always, since sizeof(Uint) <= sizeof(Eterm) */
-#ifdef ARCH_32
+#if HALFWORD_HEAP
+#define BIG_UWORD_HEAP_SIZE(UW) (((UW) >> (sizeof(Uint) * 8)) ? 3 : 2)
+#else
+#define BIG_UWORD_HEAP_SIZE(UW) BIG_UINT_HEAP_SIZE
+#endif
+
+#if defined(ARCH_32) || HALFWORD_HEAP
#define ERTS_UINT64_BIG_HEAP_SIZE__(X) \
((X) >= (((Uint64) 1) << 32) ? (1 + 2) : (1 + 1))
@@ -136,6 +142,7 @@ int big_ucomp (Eterm, Eterm);
int big_to_double(Eterm x, double* resp);
Eterm small_to_big(Sint, Eterm*);
Eterm uint_to_big(Uint, Eterm*);
+Eterm uword_to_big(UWord, Eterm*);
Eterm erts_make_integer(Uint, Process *);
dsize_t big_bytes(Eterm);
@@ -143,7 +150,12 @@ Eterm bytes_to_big(byte*, dsize_t, int, Eterm*);
byte* big_to_bytes(Eterm, byte*);
int term_to_Uint(Eterm, Uint*);
+int term_to_UWord(Eterm, UWord*);
int term_to_Sint(Eterm, Sint*);
+#if HAVE_INT64
+int term_to_Uint64(Eterm, Uint64*);
+int term_to_Sint64(Eterm, Sint64*);
+#endif
Uint32 big_to_uint32(Eterm b);
int term_equals_2pow32(Eterm);
diff --git a/erts/emulator/beam/binary.c b/erts/emulator/beam/binary.c
index 08c64610a2..8ee8fbcb29 100644
--- a/erts/emulator/beam/binary.c
+++ b/erts/emulator/beam/binary.c
@@ -42,7 +42,7 @@ void
erts_init_binary(void)
{
/* Verify Binary alignment... */
- if ((((Uint) &((Binary *) 0)->orig_bytes[0]) % ((Uint) 8)) != 0) {
+ if ((((UWord) &((Binary *) 0)->orig_bytes[0]) % ((UWord) 8)) != 0) {
/* I assume that any compiler should be able to optimize this
away. If not, this test is not very expensive... */
erl_exit(ERTS_ABORT_EXIT,
@@ -88,8 +88,8 @@ new_binary(Process *p, byte *buf, int len)
pb = (ProcBin *) HAlloc(p, PROC_BIN_SIZE);
pb->thing_word = HEADER_PROC_BIN;
pb->size = len;
- pb->next = MSO(p).mso;
- MSO(p).mso = pb;
+ pb->next = MSO(p).first;
+ MSO(p).first = (struct erl_off_heap_header*)pb;
pb->val = bptr;
pb->bytes = (byte*) bptr->orig_bytes;
pb->flags = 0;
@@ -97,7 +97,7 @@ new_binary(Process *p, byte *buf, int len)
/*
* Miscellanous updates. Return the tagged binary.
*/
- MSO(p).overhead += pb->size / sizeof(Eterm);
+ OH_OVERHEAD(&(MSO(p)), pb->size / sizeof(Eterm));
return make_binary(pb);
}
@@ -127,8 +127,8 @@ Eterm erts_new_mso_binary(Process *p, byte *buf, int len)
pb = (ProcBin *) HAlloc(p, PROC_BIN_SIZE);
pb->thing_word = HEADER_PROC_BIN;
pb->size = len;
- pb->next = MSO(p).mso;
- MSO(p).mso = pb;
+ pb->next = MSO(p).first;
+ MSO(p).first = (struct erl_off_heap_header*)pb;
pb->val = bptr;
pb->bytes = (byte*) bptr->orig_bytes;
pb->flags = 0;
@@ -136,7 +136,7 @@ Eterm erts_new_mso_binary(Process *p, byte *buf, int len)
/*
* Miscellanous updates. Return the tagged binary.
*/
- MSO(p).overhead += pb->size / sizeof(Eterm);
+ OH_OVERHEAD(&(MSO(p)), pb->size / sizeof(Eterm));
return make_binary(pb);
}
@@ -180,7 +180,7 @@ erts_realloc_binary(Eterm bin, size_t size)
}
byte*
-erts_get_aligned_binary_bytes_extra(Eterm bin, byte** base_ptr, unsigned extra)
+erts_get_aligned_binary_bytes_extra(Eterm bin, byte** base_ptr, ErtsAlcType_t allocator, unsigned extra)
{
byte* bytes;
Eterm* real_bin;
@@ -208,7 +208,7 @@ erts_get_aligned_binary_bytes_extra(Eterm bin, byte** base_ptr, unsigned extra)
bytes = (byte *)(&(((ErlHeapBin *) real_bin)->data)) + offs;
}
if (bit_offs) {
- byte* buf = (byte *) erts_alloc(ERTS_ALC_T_TMP, byte_size + extra);
+ byte* buf = (byte *) erts_alloc(allocator, byte_size + extra);
*base_ptr = buf;
buf += extra;
erts_copy_bits(bytes, bit_offs, 1, buf, 0, 1, byte_size*8);
@@ -346,29 +346,40 @@ BIF_RETTYPE bitstring_to_list_1(BIF_ALIST_1)
/* Turn a possibly deep list of ints (and binaries) into */
/* One large binary object */
-BIF_RETTYPE list_to_binary_1(BIF_ALIST_1)
+/*
+ * This bif also exists in the binary module, under the name
+ * binary:list_to_bin/1, why it's divided into interface and
+ * implementation. Also the backend for iolist_to_binary_1.
+ */
+
+BIF_RETTYPE erts_list_to_binary_bif(Process *p, Eterm arg)
{
Eterm bin;
int i;
int offset;
byte* bytes;
- if (is_nil(BIF_ARG_1)) {
- BIF_RET(new_binary(BIF_P,(byte*)"",0));
+ if (is_nil(arg)) {
+ BIF_RET(new_binary(p,(byte*)"",0));
}
- if (is_not_list(BIF_ARG_1)) {
+ if (is_not_list(arg)) {
goto error;
}
- if ((i = io_list_len(BIF_ARG_1)) < 0) {
+ if ((i = io_list_len(arg)) < 0) {
goto error;
}
- bin = new_binary(BIF_P, (byte *)NULL, i);
+ bin = new_binary(p, (byte *)NULL, i);
bytes = binary_bytes(bin);
- offset = io_list_to_buf(BIF_ARG_1, (char*) bytes, i);
+ offset = io_list_to_buf(arg, (char*) bytes, i);
ASSERT(offset == 0);
BIF_RET(bin);
- error:
- BIF_ERROR(BIF_P, BADARG);
+ error:
+ BIF_ERROR(p, BADARG);
+}
+
+BIF_RETTYPE list_to_binary_1(BIF_ALIST_1)
+{
+ return erts_list_to_binary_bif(BIF_P, BIF_ARG_1);
}
/* Turn a possibly deep list of ints (and binaries) into */
@@ -376,31 +387,10 @@ BIF_RETTYPE list_to_binary_1(BIF_ALIST_1)
BIF_RETTYPE iolist_to_binary_1(BIF_ALIST_1)
{
- Eterm bin;
- int i;
- int offset;
- byte* bytes;
-
if (is_binary(BIF_ARG_1)) {
BIF_RET(BIF_ARG_1);
}
- if (is_nil(BIF_ARG_1)) {
- BIF_RET(new_binary(BIF_P,(byte*)"",0));
- }
- if (is_not_list(BIF_ARG_1)) {
- goto error;
- }
- if ((i = io_list_len(BIF_ARG_1)) < 0) {
- goto error;
- }
- bin = new_binary(BIF_P, (byte *)NULL, i);
- bytes = binary_bytes(bin);
- offset = io_list_to_buf(BIF_ARG_1, (char*) bytes, i);
- ASSERT(offset == 0);
- BIF_RET(bin);
-
- error:
- BIF_ERROR(BIF_P, BADARG);
+ return erts_list_to_binary_bif(BIF_P, BIF_ARG_1);
}
BIF_RETTYPE list_to_bitstring_1(BIF_ALIST_1)
@@ -497,16 +487,6 @@ BIF_RETTYPE split_binary_2(BIF_ALIST_2)
BIF_ERROR(BIF_P, BADARG);
}
-void
-erts_cleanup_mso(ProcBin* pb)
-{
- while (pb != NULL) {
- ProcBin* next = pb->next;
- if (erts_refc_dectest(&pb->val->refc, 0) == 0)
- erts_bin_free(pb->val);
- pb = next;
- }
-}
/*
* Local functions.
@@ -675,3 +655,4 @@ bitstr_list_len(Eterm obj)
DESTROY_ESTACK(s);
return (Sint) -1;
}
+
diff --git a/erts/emulator/beam/break.c b/erts/emulator/beam/break.c
index cc69977b79..f339e19761 100644
--- a/erts/emulator/beam/break.c
+++ b/erts/emulator/beam/break.c
@@ -1,19 +1,19 @@
/*
* %CopyrightBegin%
- *
- * Copyright Ericsson AB 1996-2009. All Rights Reserved.
- *
+ *
+ * Copyright Ericsson AB 1996-2010. All Rights Reserved.
+ *
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
* compliance with the License. You should have received a copy of the
* Erlang Public License along with this software. If not, it can be
* retrieved online at http://www.erlang.org/.
- *
+ *
* Software distributed under the License is distributed on an "AS IS"
* basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
* the License for the specific language governing rights and limitations
* under the License.
- *
+ *
* %CopyrightEnd%
*/
/* This File contains functions which are called if a user hits ^C */
@@ -38,10 +38,6 @@
#include "erl_instrument.h"
#include "erl_bif_timer.h"
-#ifdef _OSE_
-#include "time.h"
-#endif
-
/* Forward declarations -- should really appear somewhere else */
static void process_killer(void);
void do_break(void);
@@ -262,12 +258,10 @@ print_process_info(int to, void *to_arg, Process *p)
}
{
- long s = 0;
int frags = 0;
ErlHeapFragment *m = p->mbuf;
while (m != NULL) {
frags++;
- s += m->size;
m = m->next;
}
erts_print(to, to_arg, "Number of heap fragments: %d\n", frags);
@@ -327,7 +321,7 @@ print_process_info(int to, void *to_arg, Process *p)
(unsigned)(OLD_HEND(p) - OLD_HEAP(p)) );
erts_print(to, to_arg, "Heap unused: %bpu\n", (p->hend - p->htop));
erts_print(to, to_arg, "OldHeap unused: %bpu\n",
- (OLD_HEAP(p) == NULL) ? 0 : (OLD_HEND(p) - OLD_HEAP(p)) );
+ (OLD_HEAP(p) == NULL) ? 0 : (OLD_HEND(p) - OLD_HTOP(p)) );
if (garbing) {
print_garb_info(to, to_arg, p);
@@ -381,7 +375,7 @@ loaded(int to, void *to_arg)
int i;
int old = 0;
int cur = 0;
- Eterm* code;
+ BeamInstr* code;
/*
* Calculate and print totals.
@@ -617,29 +611,29 @@ static void
bin_check(void)
{
Process *rp;
- ProcBin *bp;
- int i, printed;
+ struct erl_off_heap_header* hdr;
+ int i, printed = 0;
for (i=0; i < erts_max_processes; i++) {
if ((rp = process_tab[i]) == NULL)
continue;
- if (!(bp = rp->off_heap.mso))
- continue;
- printed = 0;
- while (bp) {
- if (printed == 0) {
- erts_printf("Process %T holding binary data \n", rp->id);
- printed = 1;
+ for (hdr = rp->off_heap.first; hdr; hdr = hdr->next) {
+ if (hdr->thing_word == HEADER_PROC_BIN) {
+ ProcBin *bp = (ProcBin*) hdr;
+ if (!printed) {
+ erts_printf("Process %T holding binary data \n", rp->id);
+ printed = 1;
+ }
+ erts_printf("0x%08lx orig_size: %ld, norefs = %ld\n",
+ (unsigned long)bp->val,
+ (long)bp->val->orig_size,
+ erts_smp_atomic_read(&bp->val->refc));
}
- erts_printf("0x%08lx orig_size: %ld, norefs = %ld\n",
- (unsigned long)bp->val,
- (long)bp->val->orig_size,
- erts_smp_atomic_read(&bp->val->refc));
-
- bp = bp->next;
}
- if (printed == 1)
+ if (printed) {
erts_printf("--------------------------------------\n");
+ printed = 0;
+ }
}
/* db_bin_check() has to be rewritten for the AVL trees... */
/*db_bin_check();*/
diff --git a/erts/emulator/beam/copy.c b/erts/emulator/beam/copy.c
index 0a5050b1fe..8bee47232e 100644
--- a/erts/emulator/beam/copy.c
+++ b/erts/emulator/beam/copy.c
@@ -1,19 +1,19 @@
/*
* %CopyrightBegin%
- *
- * Copyright Ericsson AB 1996-2009. All Rights Reserved.
- *
+ *
+ * Copyright Ericsson AB 1996-2010. All Rights Reserved.
+ *
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
* compliance with the License. You should have received a copy of the
* Erlang Public License along with this software. If not, it can be
* retrieved online at http://www.erlang.org/.
- *
+ *
* Software distributed under the License is distributed on an "AS IS"
* basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
* the License for the specific language governing rights and limitations
* under the License.
- *
+ *
* %CopyrightEnd%
*/
@@ -37,6 +37,8 @@ MA_STACK_DECLARE(dst);
MA_STACK_DECLARE(offset);
#endif
+static void move_one_frag(Eterm** hpp, Eterm* src, Uint src_sz, ErlOffHeap*);
+
void
init_copy(void)
{
@@ -86,7 +88,7 @@ size_object(Eterm obj)
obj = *ptr++;
if (!IS_CONST(obj)) {
ESTACK_PUSH(s, obj);
- }
+ }
obj = *ptr;
break;
case TAG_PRIMARY_BOXED:
@@ -99,7 +101,7 @@ size_object(Eterm obj)
arity = header_arity(hdr);
sum += arity + 1;
if (arity == 0) { /* Empty tuple -- unusual. */
- goto size_common;
+ goto pop_next;
}
while (arity-- > 1) {
obj = *++ptr;
@@ -115,7 +117,6 @@ size_object(Eterm obj)
ErlFunThing* funp = (ErlFunThing *) bptr;
unsigned eterms = 1 /* creator */ + funp->num_free;
unsigned sz = thing_arityval(hdr);
-
sum += 1 /* header */ + sz + eterms;
bptr += 1 /* header */ + sz;
while (eterms-- > 1) {
@@ -151,7 +152,7 @@ size_object(Eterm obj)
} else {
sum += heap_bin_size(binary_size(obj)+extra_bytes);
}
- goto size_common;
+ goto pop_next;
}
break;
case BIN_MATCHSTATE_SUBTAG:
@@ -159,18 +160,12 @@ size_object(Eterm obj)
"size_object: matchstate term not allowed");
default:
sum += thing_arityval(hdr) + 1;
- /* Fall through */
- size_common:
- if (ESTACK_ISEMPTY(s)) {
- DESTROY_ESTACK(s);
- return sum;
- }
- obj = ESTACK_POP(s);
- break;
+ goto pop_next;
}
}
break;
case TAG_PRIMARY_IMMED1:
+ pop_next:
if (ESTACK_ISEMPTY(s)) {
DESTROY_ESTACK(s);
return sum;
@@ -319,10 +314,10 @@ copy_struct(Eterm obj, Uint sz, Eterm** hpp, ErlOffHeap* off_heap)
*argp = make_binary(hbot);
pb = (ProcBin*) hbot;
erts_refc_inc(&pb->val->refc, 2);
- pb->next = off_heap->mso;
+ pb->next = off_heap->first;
pb->flags = 0;
- off_heap->mso = pb;
- off_heap->overhead += pb->size / sizeof(Eterm);
+ off_heap->first = (struct erl_off_heap_header*) pb;
+ OH_OVERHEAD(off_heap, pb->size / sizeof(Eterm));
}
break;
case SUB_BINARY_SUBTAG:
@@ -368,10 +363,10 @@ copy_struct(Eterm obj, Uint sz, Eterm** hpp, ErlOffHeap* off_heap)
to->val = from->val;
erts_refc_inc(&to->val->refc, 2);
to->bytes = from->bytes + offset;
- to->next = off_heap->mso;
+ to->next = off_heap->first;
to->flags = 0;
- off_heap->mso = to;
- off_heap->overhead += to->size / sizeof(Eterm);
+ off_heap->first = (struct erl_off_heap_header*) to;
+ OH_OVERHEAD(off_heap, to->size / sizeof(Eterm));
}
*argp = make_binary(hbot);
if (extra_bytes != 0) {
@@ -401,8 +396,8 @@ copy_struct(Eterm obj, Uint sz, Eterm** hpp, ErlOffHeap* off_heap)
}
#ifndef HYBRID /* FIND ME! */
funp = (ErlFunThing *) tp;
- funp->next = off_heap->funs;
- off_heap->funs = funp;
+ funp->next = off_heap->first;
+ off_heap->first = (struct erl_off_heap_header*) funp;
erts_refc_inc(&funp->fe->refc, 2);
#endif
*argp = make_fun(tp);
@@ -421,8 +416,8 @@ copy_struct(Eterm obj, Uint sz, Eterm** hpp, ErlOffHeap* off_heap)
*htop++ = *objp++;
}
- etp->next = off_heap->externals;
- off_heap->externals = etp;
+ etp->next = off_heap->first;
+ off_heap->first = (struct erl_off_heap_header*)etp;
erts_refc_inc(&etp->node->refc, 2);
*argp = make_external(tp);
@@ -655,9 +650,9 @@ Eterm copy_struct_lazy(Process *from, Eterm orig, Uint offs)
*hp++ = *objp++;
}
erts_refc_inc(&pb->val->refc, 2);
- pb->next = erts_global_offheap.mso;
- erts_global_offheap.mso = pb;
- erts_global_offheap.overhead += pb->size / sizeof(Eterm);
+ pb->next = erts_global_offheap.first;
+ erts_global_offheap.first = pb;
+ OH_OVERHEAD(off_heap, pb->size / sizeof(Eterm));
continue;
}
@@ -677,9 +672,9 @@ Eterm copy_struct_lazy(Process *from, Eterm orig, Uint offs)
while (i--) {
*hp++ = *objp++;
}
-#ifndef HYBRID // FIND ME!
- funp->next = erts_global_offheap.funs;
- erts_global_offheap.funs = funp;
+#ifndef HYBRID /* FIND ME! */
+ funp->next = erts_global_offheap.first;
+ erts_global_offheap.first = funp;
erts_refc_inc(&funp->fe->refc, 2);
#endif
for (i = k; i < j; i++) {
@@ -723,8 +718,8 @@ Eterm copy_struct_lazy(Process *from, Eterm orig, Uint offs)
*hp++ = *objp++;
}
- etp->next = erts_global_offheap.externals;
- erts_global_offheap.externals = etp;
+ etp->next = erts_global_offheap.first;
+ erts_global_offheap.first = etp;
erts_refc_inc(&etp->node->refc, 2);
continue;
}
@@ -780,9 +775,9 @@ Eterm copy_struct_lazy(Process *from, Eterm orig, Uint offs)
to_bin->size = real_size;
to_bin->val = from_bin->val;
to_bin->bytes = from_bin->bytes + sub_offset;
- to_bin->next = erts_global_offheap.mso;
- erts_global_offheap.mso = to_bin;
- erts_global_offheap.overhead += to_bin->size / sizeof(Eterm);
+ to_bin->next = erts_global_offheap.first;
+ erts_global_offheap.first = to_bin;
+ OH_OVERHEAD(&erts_global_offheap, to_bin->size / sizeof(Eterm));
res_binary=make_binary(to_bin);
hp += PROC_BIN_SIZE;
}
@@ -915,57 +910,43 @@ copy_shallow(Eterm* ptr, Uint sz, Eterm** hpp, ErlOffHeap* off_heap)
break;
case REFC_BINARY_SUBTAG:
{
- ProcBin* pb = (ProcBin *) (hp-1);
- int tari = thing_arityval(val);
-
- sz -= tari;
- while (tari--) {
- *hp++ = *tp++;
- }
+ ProcBin* pb = (ProcBin *) (tp-1);
erts_refc_inc(&pb->val->refc, 2);
- pb->next = off_heap->mso;
- off_heap->mso = pb;
- off_heap->overhead += pb->size / sizeof(Eterm);
+ OH_OVERHEAD(off_heap, pb->size / sizeof(Eterm));
}
- break;
+ goto off_heap_common;
+
case FUN_SUBTAG:
{
-#ifndef HYBRID /* FIND ME! */
- ErlFunThing* funp = (ErlFunThing *) (hp-1);
-#endif
- int tari = thing_arityval(val);
-
- sz -= tari;
- while (tari--) {
- *hp++ = *tp++;
- }
-#ifndef HYBRID /* FIND ME! */
- funp->next = off_heap->funs;
- off_heap->funs = funp;
+ ErlFunThing* funp = (ErlFunThing *) (tp-1);
erts_refc_inc(&funp->fe->refc, 2);
-#endif
}
- break;
+ goto off_heap_common;
+
case EXTERNAL_PID_SUBTAG:
case EXTERNAL_PORT_SUBTAG:
case EXTERNAL_REF_SUBTAG:
{
- ExternalThing* etp = (ExternalThing *) (hp-1);
+ ExternalThing* etp = (ExternalThing *) (tp-1);
+ erts_refc_inc(&etp->node->refc, 2);
+ }
+ off_heap_common:
+ {
+ struct erl_off_heap_header* ohh = (struct erl_off_heap_header*)(hp-1);
int tari = thing_arityval(val);
-
+
sz -= tari;
while (tari--) {
*hp++ = *tp++;
}
- etp->next = off_heap->externals;
- off_heap->externals = etp;
- erts_refc_inc(&etp->node->refc, 2);
+ ohh->next = off_heap->first;
+ off_heap->first = ohh;
}
break;
default:
{
int tari = header_arity(val);
-
+
sz -= tari;
while (tari--) {
*hp++ = *tp++;
@@ -979,3 +960,92 @@ copy_shallow(Eterm* ptr, Uint sz, Eterm** hpp, ErlOffHeap* off_heap)
*hpp = hp;
return make_tuple(ptr + offs);
}
+
+/* Move all terms in heap fragments into heap. The terms must be guaranteed to
+ * be contained within the fragments. The source terms are destructed with
+ * move markers.
+ * Typically used to copy a multi-fragmented message (from NIF).
+ */
+void move_multi_frags(Eterm** hpp, ErlOffHeap* off_heap, ErlHeapFragment* first,
+ Eterm* refs, unsigned nrefs)
+{
+ ErlHeapFragment* bp;
+ Eterm* hp_start = *hpp;
+ Eterm* hp_end;
+ Eterm* hp;
+ unsigned i;
+
+ for (bp=first; bp!=NULL; bp=bp->next) {
+ move_one_frag(hpp, bp->mem, bp->used_size, off_heap);
+ OH_OVERHEAD(off_heap, bp->off_heap.overhead);
+ }
+ hp_end = *hpp;
+ for (hp=hp_start; hp<hp_end; ++hp) {
+ Eterm* ptr;
+ Eterm val;
+ Eterm gval = *hp;
+ switch (primary_tag(gval)) {
+ case TAG_PRIMARY_BOXED:
+ ptr = boxed_val(gval);
+ val = *ptr;
+ if (IS_MOVED_BOXED(val)) {
+ ASSERT(is_boxed(val));
+ *hp = val;
+ }
+ break;
+ case TAG_PRIMARY_LIST:
+ ptr = list_val(gval);
+ val = *ptr;
+ if (IS_MOVED_CONS(val)) {
+ *hp = ptr[1];
+ }
+ break;
+ case TAG_PRIMARY_HEADER:
+ if (header_is_thing(gval)) {
+ hp += thing_arityval(gval);
+ }
+ break;
+ }
+ }
+ for (i=0; i<nrefs; ++i) {
+ refs[i] = follow_moved(refs[i]);
+ }
+}
+
+static void
+move_one_frag(Eterm** hpp, Eterm* src, Uint src_sz, ErlOffHeap* off_heap)
+{
+ Eterm* ptr = src;
+ Eterm* end = ptr + src_sz;
+ Eterm dummy_ref;
+ Eterm* hp = *hpp;
+
+ while (ptr != end) {
+ Eterm val;
+ ASSERT(ptr < end);
+ val = *ptr;
+ ASSERT(val != ERTS_HOLE_MARKER);
+ if (is_header(val)) {
+ struct erl_off_heap_header* hdr = (struct erl_off_heap_header*)hp;
+ ASSERT(ptr + header_arity(val) < end);
+ MOVE_BOXED(ptr, val, hp, &dummy_ref);
+ switch (val & _HEADER_SUBTAG_MASK) {
+ case REFC_BINARY_SUBTAG:
+ case FUN_SUBTAG:
+ case EXTERNAL_PID_SUBTAG:
+ case EXTERNAL_PORT_SUBTAG:
+ case EXTERNAL_REF_SUBTAG:
+ hdr->next = off_heap->first;
+ off_heap->first = hdr;
+ break;
+ }
+ }
+ else { /* must be a cons cell */
+ ASSERT(ptr+1 < end);
+ MOVE_CONS(ptr, val, hp, &dummy_ref);
+ ptr += 2;
+ }
+ }
+ *hpp = hp;
+}
+
diff --git a/erts/emulator/beam/decl.h b/erts/emulator/beam/decl.h
deleted file mode 100644
index da1be29d53..0000000000
--- a/erts/emulator/beam/decl.h
+++ /dev/null
@@ -1,55 +0,0 @@
-/*
- * %CopyrightBegin%
- *
- * Copyright Ericsson AB 1996-2009. All Rights Reserved.
- *
- * The contents of this file are subject to the Erlang Public License,
- * Version 1.1, (the "License"); you may not use this file except in
- * compliance with the License. You should have received a copy of the
- * Erlang Public License along with this software. If not, it can be
- * retrieved online at http://www.erlang.org/.
- *
- * Software distributed under the License is distributed on an "AS IS"
- * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
- * the License for the specific language governing rights and limitations
- * under the License.
- *
- * %CopyrightEnd%
- */
-
-#ifndef __DECL_H__
-#define __DECL_H__
-
-#if defined(__STDC__) || defined(_MSC_VER)
-#define EXTERN_FUNCTION(t, f, x) extern t f x
-#define FUNCTION(t, f, x) t f x
-#define _DOTS_ ...
-#define _VOID_ void
-#elif defined(__cplusplus)
-#define EXTERN_FUNCTION(f, x) extern "C" { f x }
-#define FUNCTION(t, f, x) t f x
-#define _DOTS_ ...
-#define _VOID_ void
-#else
-#define EXTERN_FUNCTION(t, f, x) extern t f (/*x*/)
-#define FUNCTION(t, f, x) t f (/*x*/)
-#define _DOTS_
-#define _VOID_
-#endif
-
-/*
-** Example of declarations
-**
-** EXTERN_FUNCTION(void, foo, (int, int, char));
-** FUNCTION(void, bar, (int, char));
-**
-** struct funcs {
-** FUNCTION(int*, (*f1), (int, int));
-** FUNCTION(void, (*f2), (int, char));
-** FUNCTION(void, (*f3), (_VOID_));
-** FUNCTION(int, (*f4), (char*, _DOTS_));
-** };
-**
-*/
-
-#endif
diff --git a/erts/emulator/beam/dist.c b/erts/emulator/beam/dist.c
index e3094404e2..16b6aeac3f 100644
--- a/erts/emulator/beam/dist.c
+++ b/erts/emulator/beam/dist.c
@@ -1,19 +1,19 @@
/*
* %CopyrightBegin%
- *
- * Copyright Ericsson AB 1996-2009. All Rights Reserved.
- *
+ *
+ * Copyright Ericsson AB 1996-2010. All Rights Reserved.
+ *
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
* compliance with the License. You should have received a copy of the
* Erlang Public License along with this software. If not, it can be
* retrieved online at http://www.erlang.org/.
- *
+ *
* Software distributed under the License is distributed on an "AS IS"
* basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
* the License for the specific language governing rights and limitations
* under the License.
- *
+ *
* %CopyrightEnd%
*/
@@ -228,6 +228,7 @@ int is_node_name_atom(Eterm a)
typedef struct {
DistEntry *dep;
+ Eterm *lhp;
} NetExitsContext;
/*
@@ -253,8 +254,9 @@ static void doit_monitor_net_exits(ErtsMonitor *mon, void *vnecp)
erts_destroy_monitor(rmon);
}
} else {
- Eterm lhp[3];
+ DeclareTmpHeapNoproc(lhp,3);
Eterm watched;
+ UseTmpHeapNoproc(3);
ASSERT(mon->type == MON_TARGET);
rmon = erts_remove_monitor(&(rp->monitors),mon->ref);
/* ASSERT(rmon != NULL); can happen during process exit */
@@ -271,6 +273,7 @@ static void doit_monitor_net_exits(ErtsMonitor *mon, void *vnecp)
watched, am_noconnection);
erts_destroy_monitor(rmon);
}
+ UnUseTmpHeapNoproc(3);
}
erts_smp_proc_unlock(rp, rp_locks);
done:
@@ -632,19 +635,27 @@ static void clear_dist_entry(DistEntry *dep)
int
erts_dsig_send_link(ErtsDSigData *dsdp, Eterm local, Eterm remote)
{
- Eterm ctl_heap[4];
+ DeclareTmpHeapNoproc(ctl_heap,4);
Eterm ctl = TUPLE3(&ctl_heap[0], make_small(DOP_LINK), local, remote);
+ int res;
+ UseTmpHeapNoproc(4);
- return dsig_send(dsdp, ctl, THE_NON_VALUE, 0);
+ res = dsig_send(dsdp, ctl, THE_NON_VALUE, 0);
+ UnUseTmpHeapNoproc(4);
+ return res;
}
int
erts_dsig_send_unlink(ErtsDSigData *dsdp, Eterm local, Eterm remote)
{
- Eterm ctl_heap[4];
+ DeclareTmpHeapNoproc(ctl_heap,4);
Eterm ctl = TUPLE3(&ctl_heap[0], make_small(DOP_UNLINK), local, remote);
+ int res;
- return dsig_send(dsdp, ctl, THE_NON_VALUE, 0);
+ UseTmpHeapNoproc(4);
+ res = dsig_send(dsdp, ctl, THE_NON_VALUE, 0);
+ UnUseTmpHeapNoproc(4);
+ return res;
}
@@ -656,7 +667,10 @@ erts_dsig_send_m_exit(ErtsDSigData *dsdp, Eterm watcher, Eterm watched,
Eterm ref, Eterm reason)
{
Eterm ctl;
- Eterm ctl_heap[6];
+ DeclareTmpHeapNoproc(ctl_heap,6);
+ int res;
+
+ UseTmpHeapNoproc(6);
ctl = TUPLE5(&ctl_heap[0], make_small(DOP_MONITOR_P_EXIT),
watched, watcher, ref, reason);
@@ -667,7 +681,9 @@ erts_dsig_send_m_exit(ErtsDSigData *dsdp, Eterm watcher, Eterm watched,
erts_smp_de_links_unlock(dsdp->dep);
#endif
- return dsig_send(dsdp, ctl, THE_NON_VALUE, 1);
+ res = dsig_send(dsdp, ctl, THE_NON_VALUE, 1);
+ UnUseTmpHeapNoproc(6);
+ return res;
}
/* We want to monitor a process (named or unnamed) on another node, we send:
@@ -678,13 +694,17 @@ erts_dsig_send_monitor(ErtsDSigData *dsdp, Eterm watcher, Eterm watched,
Eterm ref)
{
Eterm ctl;
- Eterm ctl_heap[5];
+ DeclareTmpHeapNoproc(ctl_heap,5);
+ int res;
+ UseTmpHeapNoproc(5);
ctl = TUPLE4(&ctl_heap[0],
make_small(DOP_MONITOR_P),
watcher, watched, ref);
- return dsig_send(dsdp, ctl, THE_NON_VALUE, 0);
+ res = dsig_send(dsdp, ctl, THE_NON_VALUE, 0);
+ UnUseTmpHeapNoproc(5);
+ return res;
}
/* A local process monitoring a remote one wants to stop monitoring, either
@@ -696,23 +716,29 @@ erts_dsig_send_demonitor(ErtsDSigData *dsdp, Eterm watcher,
Eterm watched, Eterm ref, int force)
{
Eterm ctl;
- Eterm ctl_heap[5];
+ DeclareTmpHeapNoproc(ctl_heap,5);
+ int res;
+ UseTmpHeapNoproc(5);
ctl = TUPLE4(&ctl_heap[0],
make_small(DOP_DEMONITOR_P),
watcher, watched, ref);
- return dsig_send(dsdp, ctl, THE_NON_VALUE, force);
+ res = dsig_send(dsdp, ctl, THE_NON_VALUE, force);
+ UnUseTmpHeapNoproc(5);
+ return res;
}
int
erts_dsig_send_msg(ErtsDSigData *dsdp, Eterm remote, Eterm message)
{
Eterm ctl;
- Eterm ctl_heap[5];
+ DeclareTmpHeapNoproc(ctl_heap,5);
Eterm token = NIL;
Process *sender = dsdp->proc;
+ int res;
+ UseTmpHeapNoproc(5);
if (SEQ_TRACE_TOKEN(sender) != NIL) {
seq_trace_update_send(sender);
token = SEQ_TRACE_TOKEN(sender);
@@ -724,17 +750,21 @@ erts_dsig_send_msg(ErtsDSigData *dsdp, Eterm remote, Eterm message)
make_small(DOP_SEND_TT), am_Cookie, remote, token);
else
ctl = TUPLE3(&ctl_heap[0], make_small(DOP_SEND), am_Cookie, remote);
- return dsig_send(dsdp, ctl, message, 0);
+ res = dsig_send(dsdp, ctl, message, 0);
+ UnUseTmpHeapNoproc(5);
+ return res;
}
int
erts_dsig_send_reg_msg(ErtsDSigData *dsdp, Eterm remote_name, Eterm message)
{
Eterm ctl;
- Eterm ctl_heap[6];
+ DeclareTmpHeapNoproc(ctl_heap,6);
Eterm token = NIL;
Process *sender = dsdp->proc;
+ int res;
+ UseTmpHeapNoproc(6);
if (SEQ_TRACE_TOKEN(sender) != NIL) {
seq_trace_update_send(sender);
token = SEQ_TRACE_TOKEN(sender);
@@ -747,7 +777,9 @@ erts_dsig_send_reg_msg(ErtsDSigData *dsdp, Eterm remote_name, Eterm message)
else
ctl = TUPLE4(&ctl_heap[0], make_small(DOP_REG_SEND),
sender->id, am_Cookie, remote_name);
- return dsig_send(dsdp, ctl, message, 0);
+ res = dsig_send(dsdp, ctl, message, 0);
+ UnUseTmpHeapNoproc(6);
+ return res;
}
/* local has died, deliver the exit signal to remote */
@@ -756,8 +788,10 @@ erts_dsig_send_exit_tt(ErtsDSigData *dsdp, Eterm local, Eterm remote,
Eterm reason, Eterm token)
{
Eterm ctl;
- Eterm ctl_heap[6];
+ DeclareTmpHeapNoproc(ctl_heap,6);
+ int res;
+ UseTmpHeapNoproc(6);
if (token != NIL) {
seq_trace_update_send(dsdp->proc);
seq_trace_output_exit(token, reason, SEQ_TRACE_SEND, remote, local);
@@ -767,38 +801,58 @@ erts_dsig_send_exit_tt(ErtsDSigData *dsdp, Eterm local, Eterm remote,
ctl = TUPLE4(&ctl_heap[0], make_small(DOP_EXIT), local, remote, reason);
}
/* forced, i.e ignore busy */
- return dsig_send(dsdp, ctl, THE_NON_VALUE, 1);
+ res = dsig_send(dsdp, ctl, THE_NON_VALUE, 1);
+ UnUseTmpHeapNoproc(6);
+ return res;
}
int
erts_dsig_send_exit(ErtsDSigData *dsdp, Eterm local, Eterm remote, Eterm reason)
{
- Eterm ctl_heap[5];
- Eterm ctl = TUPLE4(&ctl_heap[0],
- make_small(DOP_EXIT), local, remote, reason);
+ DeclareTmpHeapNoproc(ctl_heap,5);
+ int res;
+ Eterm ctl;
+
+ UseTmpHeapNoproc(5);
+ ctl = TUPLE4(&ctl_heap[0],
+ make_small(DOP_EXIT), local, remote, reason);
/* forced, i.e ignore busy */
- return dsig_send(dsdp, ctl, THE_NON_VALUE, 1);
+ res = dsig_send(dsdp, ctl, THE_NON_VALUE, 1);
+ UnUseTmpHeapNoproc(5);
+ return res;
}
int
erts_dsig_send_exit2(ErtsDSigData *dsdp, Eterm local, Eterm remote, Eterm reason)
{
- Eterm ctl_heap[5];
- Eterm ctl = TUPLE4(&ctl_heap[0],
- make_small(DOP_EXIT2), local, remote, reason);
+ DeclareTmpHeapNoproc(ctl_heap,5);
+ int res;
+ Eterm ctl;
- return dsig_send(dsdp, ctl, THE_NON_VALUE, 0);
+ UseTmpHeapNoproc(5);
+ ctl = TUPLE4(&ctl_heap[0],
+ make_small(DOP_EXIT2), local, remote, reason);
+
+ res = dsig_send(dsdp, ctl, THE_NON_VALUE, 0);
+ UnUseTmpHeapNoproc(5);
+ return res;
}
int
erts_dsig_send_group_leader(ErtsDSigData *dsdp, Eterm leader, Eterm remote)
{
- Eterm ctl_heap[4];
- Eterm ctl = TUPLE3(&ctl_heap[0],
- make_small(DOP_GROUP_LEADER), leader, remote);
+ DeclareTmpHeapNoproc(ctl_heap,4);
+ int res;
+ Eterm ctl;
+
+ UseTmpHeapNoproc(4);
+ ctl = TUPLE3(&ctl_heap[0],
+ make_small(DOP_GROUP_LEADER), leader, remote);
- return dsig_send(dsdp, ctl, THE_NON_VALUE, 0);
+ res = dsig_send(dsdp, ctl, THE_NON_VALUE, 0);
+ UnUseTmpHeapNoproc(4);
+ return res;
}
#if defined(PURIFY)
@@ -832,6 +886,7 @@ erts_dsig_send_group_leader(ErtsDSigData *dsdp, Eterm leader, Eterm remote)
**
** assert hlen == 0 !!!
*/
+
int erts_net_message(Port *prt,
DistEntry *dep,
byte *hbuf,
@@ -839,6 +894,7 @@ int erts_net_message(Port *prt,
byte *buf,
int len)
{
+#define DIST_CTL_DEFAULT_SIZE 64
ErtsDistExternal ede;
byte *t;
Sint ctl_len;
@@ -850,7 +906,7 @@ int erts_net_message(Port *prt,
Eterm *tuple;
Eterm reason;
Process* rp;
- Eterm ctl_default[64];
+ DeclareTmpHeapNoproc(ctl_default,DIST_CTL_DEFAULT_SIZE);
Eterm* ctl = ctl_default;
ErlOffHeap off_heap;
Eterm* hp;
@@ -864,24 +920,25 @@ int erts_net_message(Port *prt,
int orig_len = len;
#endif
+ UseTmpHeapNoproc(DIST_CTL_DEFAULT_SIZE);
/* Thanks to Luke Gorrie */
- off_heap.mso = NULL;
-#ifndef HYBRID /* FIND ME! */
- off_heap.funs = NULL;
-#endif
+ off_heap.first = NULL;
off_heap.overhead = 0;
- off_heap.externals = NULL;
ERTS_SMP_CHK_NO_PROC_LOCKS;
ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt));
- if (!erts_is_alive)
+ if (!erts_is_alive) {
+ UnUseTmpHeapNoproc(DIST_CTL_DEFAULT_SIZE);
return 0;
+ }
if (hlen > 0)
goto data_error;
- if (len == 0) /* HANDLE TICK !!! */
+ if (len == 0) { /* HANDLE TICK !!! */
+ UnUseTmpHeapNoproc(DIST_CTL_DEFAULT_SIZE);
return 0;
+ }
#ifdef ERTS_RAW_DIST_MSG_DBG
erts_fprintf(stderr, "<< ");
@@ -922,7 +979,8 @@ int erts_net_message(Port *prt,
goto data_error;
}
orig_ctl_len = ctl_len;
- if (ctl_len > sizeof(ctl_default)/sizeof(ctl_default[0])) {
+
+ if (ctl_len > DIST_CTL_DEFAULT_SIZE) {
ctl = erts_alloc(ERTS_ALC_T_DCTRL_BUF, ctl_len * sizeof(Eterm));
}
hp = ctl;
@@ -1202,7 +1260,7 @@ int erts_net_message(Port *prt,
{DOP_MONITOR_P_EXIT, Remote pid or name, Local pid, ref, reason} */
- Eterm lhp[3];
+ DeclareTmpHeapNoproc(lhp,3);
Eterm sysname;
ErtsProcLocks rp_locks = ERTS_PROC_LOCKS_MSG_SEND|ERTS_PROC_LOCK_LINK;
@@ -1237,6 +1295,7 @@ int erts_net_message(Port *prt,
erts_smp_proc_unlock(rp, rp_locks);
break;
}
+ UseTmpHeapNoproc(3);
watched = (is_not_nil(mon->name)
? TUPLE2(&lhp[0], mon->name, sysname)
@@ -1246,6 +1305,7 @@ int erts_net_message(Port *prt,
ref, am_process, watched, reason);
erts_smp_proc_unlock(rp, rp_locks);
erts_destroy_monitor(mon);
+ UnUseTmpHeapNoproc(3);
break;
}
@@ -1370,38 +1430,24 @@ int erts_net_message(Port *prt,
}
}
- if (off_heap.mso) {
- erts_cleanup_mso(off_heap.mso);
- }
- if (off_heap.externals) {
- erts_cleanup_externals(off_heap.externals);
- }
+ erts_cleanup_offheap(&off_heap);
#ifndef HYBRID /* FIND ME! */
- if (off_heap.funs) {
- erts_cleanup_funs(off_heap.funs);
- }
if (ctl != ctl_default) {
erts_free(ERTS_ALC_T_DCTRL_BUF, (void *) ctl);
}
#endif
+ UnUseTmpHeapNoproc(DIST_CTL_DEFAULT_SIZE);
ERTS_SMP_CHK_NO_PROC_LOCKS;
return 0;
data_error:
- if (off_heap.mso) {
- erts_cleanup_mso(off_heap.mso);
- }
- if (off_heap.externals) {
- erts_cleanup_externals(off_heap.externals);
- }
+ erts_cleanup_offheap(&off_heap);
#ifndef HYBRID /* FIND ME! */
- if (off_heap.funs) {
- erts_cleanup_funs(off_heap.funs);
- }
if (ctl != ctl_default) {
erts_free(ERTS_ALC_T_DCTRL_BUF, (void *) ctl);
}
#endif
+ UnUseTmpHeapNoproc(DIST_CTL_DEFAULT_SIZE);
erts_do_exit_port(prt, dep->cid, am_killed);
ERTS_SMP_CHK_NO_PROC_LOCKS;
return -1;
@@ -1554,9 +1600,9 @@ dsig_send(ErtsDSigData *dsdp, Eterm ctl, Eterm msg, int force_busy)
*/
data_size >>= (10-4);
-#if defined(ARCH_64)
+#if defined(ARCH_64) && !HALFWORD_HEAP
data_size &= 0x003fffffffffffff;
-#elif defined(ARCH_32)
+#elif defined(ARCH_32) || HALFWORD_HEAP
data_size &= 0x003fffff;
#else
# error "Ohh come on ... !?!"
@@ -1640,9 +1686,9 @@ dist_port_commandv(Port *prt, ErtsDistOutputBuf *obuf)
}
-#if defined(ARCH_64)
+#if defined(ARCH_64) && !HALFWORD_HEAP
#define ERTS_PORT_REDS_MASK__ 0x003fffffffffffffL
-#elif defined(ARCH_32)
+#elif defined(ARCH_32) || HALFWORD_HEAP
#define ERTS_PORT_REDS_MASK__ 0x003fffff
#else
# error "Ohh come on ... !?!"
@@ -2547,12 +2593,15 @@ BIF_RETTYPE nodes_1(BIF_ALIST_1)
int visible = 0;
int hidden = 0;
int this = 0;
- Uint buf[2]; /* For one cons-cell */
+ DeclareTmpHeap(buf,2,BIF_P); /* For one cons-cell */
DistEntry *dep;
Eterm arg_list = BIF_ARG_1;
#ifdef DEBUG
Eterm* endp;
#endif
+
+ UseTmpHeap(2,BIF_P);
+
if (is_atom(BIF_ARG_1))
arg_list = CONS(buf, BIF_ARG_1, NIL);
@@ -2563,13 +2612,14 @@ BIF_RETTYPE nodes_1(BIF_ALIST_1)
case am_known: visible = hidden = not_connected = this = 1; break;
case am_this: this = 1; break;
case am_connected: visible = hidden = 1; break;
- default: BIF_ERROR(BIF_P, BADARG); break;
+ default: goto error; break;
}
arg_list = CDR(list_val(arg_list));
}
- if (is_not_nil(arg_list))
- BIF_ERROR(BIF_P, BADARG);
+ if (is_not_nil(arg_list)) {
+ goto error;
+ }
length = 0;
@@ -2591,7 +2641,7 @@ BIF_RETTYPE nodes_1(BIF_ALIST_1)
if (length == 0) {
erts_smp_rwmtx_rwunlock(&erts_dist_table_rwmtx);
- BIF_RET(result);
+ goto done;
}
hp = HAlloc(BIF_P, 2*length);
@@ -2620,7 +2670,14 @@ BIF_RETTYPE nodes_1(BIF_ALIST_1)
}
ASSERT(endp == hp);
erts_smp_rwmtx_rwunlock(&erts_dist_table_rwmtx);
+
+done:
+ UnUseTmpHeap(2,BIF_P);
BIF_RET(result);
+
+error:
+ UnUseTmpHeap(2,BIF_P);
+ BIF_ERROR(BIF_P,BADARG);
}
/**********************************************************************/
diff --git a/erts/emulator/beam/dist.h b/erts/emulator/beam/dist.h
index ea1abcaeed..fa19c7fb45 100644
--- a/erts/emulator/beam/dist.h
+++ b/erts/emulator/beam/dist.h
@@ -1,19 +1,19 @@
/*
* %CopyrightBegin%
- *
- * Copyright Ericsson AB 1996-2009. All Rights Reserved.
- *
+ *
+ * Copyright Ericsson AB 1996-2010. All Rights Reserved.
+ *
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
* compliance with the License. You should have received a copy of the
* Erlang Public License along with this software. If not, it can be
* retrieved online at http://www.erlang.org/.
- *
+ *
* Software distributed under the License is distributed on an "AS IS"
* basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
* the License for the specific language governing rights and limitations
* under the License.
- *
+ *
* %CopyrightEnd%
*/
@@ -287,4 +287,5 @@ extern void erts_kill_dist_connection(DistEntry *dep, Uint32);
extern Uint erts_dist_cache_size(void);
+
#endif
diff --git a/erts/emulator/beam/elib_malloc.c b/erts/emulator/beam/elib_malloc.c
deleted file mode 100644
index b18c48d8d6..0000000000
--- a/erts/emulator/beam/elib_malloc.c
+++ /dev/null
@@ -1,2334 +0,0 @@
-/*
- * %CopyrightBegin%
- *
- * Copyright Ericsson AB 1997-2009. All Rights Reserved.
- *
- * The contents of this file are subject to the Erlang Public License,
- * Version 1.1, (the "License"); you may not use this file except in
- * compliance with the License. You should have received a copy of the
- * Erlang Public License along with this software. If not, it can be
- * retrieved online at http://www.erlang.org/.
- *
- * Software distributed under the License is distributed on an "AS IS"
- * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
- * the License for the specific language governing rights and limitations
- * under the License.
- *
- * %CopyrightEnd%
- */
-
-/*
-** Description: Faster malloc().
-*/
-#ifdef HAVE_CONFIG_H
-# include "config.h"
-#endif
-
-#include "sys.h"
-
-#ifdef ENABLE_ELIB_MALLOC
-
-#undef THREAD_SAFE_ELIB_MALLOC
-#ifdef USE_THREADS
-#define THREAD_SAFE_ELIB_MALLOC 1
-#else
-#define THREAD_SAFE_ELIB_MALLOC 0
-#endif
-
-#include "erl_driver.h"
-#include "erl_threads.h"
-#include "elib_stat.h"
-#include <stdio.h>
-#include <stdlib.h>
-
-/* To avoid clobbering of names becaure of reclaim on VxWorks,
- we undefine all possible malloc, calloc etc. */
-#undef malloc
-#undef calloc
-#undef free
-#undef realloc
-
-#define ELIB_INLINE /* inline all possible functions */
-
-#ifndef ELIB_ALIGN
-#define ELIB_ALIGN sizeof(double)
-#endif
-
-#ifndef ELIB_HEAP_SIZE
-#define ELIB_HEAP_SIZE (64*1024) /* Default 64K */
-#endif
-
-#ifndef ELIB_HEAP_INCREAMENT
-#define ELIB_HEAP_INCREAMENT (32*1024) /* Default 32K */
-#endif
-
-#ifndef ELIB_FAILURE
-#define ELIB_FAILURE abort()
-#endif
-
-#undef ASSERT
-#ifdef DEBUG
-#define ASSERT(B) \
- ((void) ((B) ? 1 : (fprintf(stderr, "%s:%d: Assertion failed: %s\n", \
- __FILE__, __LINE__, #B), abort(), 0)))
-#else
-#define ASSERT(B) ((void) 1)
-#endif
-
-#ifndef USE_RECURSIVE_MALLOC_MUTEX
-#define USE_RECURSIVE_MALLOC_MUTEX 0
-#endif
-
-#if USE_RECURSIVE_MALLOC_MUTEX
-static erts_mtx_t malloc_mutex = ERTS_REC_MTX_INITER;
-#else /* #if USE_RECURSIVE_MALLOC_MUTEX */
-static erts_mtx_t malloc_mutex = ERTS_MTX_INITER;
-#if THREAD_SAFE_ELIB_MALLOC
-static erts_cnd_t malloc_cond = ERTS_CND_INITER;
-#endif
-#endif /* #if USE_RECURSIVE_MALLOC_MUTEX */
-
-typedef unsigned long EWord; /* Assume 32-bit in this implementation */
-typedef unsigned short EHalfWord; /* Assume 16-bit in this implementation */
-typedef unsigned char EByte; /* Assume 8-bit byte */
-
-
-#define elib_printf fprintf
-#define elib_putc fputc
-
-
-#if defined(__STDC__) || defined(__WIN32__)
-#define CONCAT(x,y) x##y
-#else
-#define CONCAT(x,y) x/**/y
-#endif
-
-
-#ifdef ELIB_DEBUG
-#define ELIB_PREFIX(fun, args) CONCAT(elib__,fun) args
-#else
-#define ELIB_PREFIX(fun, args) CONCAT(elib_,fun) args
-#endif
-
-#if defined(__STDC__)
-void *ELIB_PREFIX(malloc, (size_t));
-void *ELIB_PREFIX(calloc, (size_t, size_t));
-void ELIB_PREFIX(cfree, (EWord *));
-void ELIB_PREFIX(free, (EWord *));
-void *ELIB_PREFIX(realloc, (EWord *, size_t));
-void* ELIB_PREFIX(memresize, (EWord *, int));
-void* ELIB_PREFIX(memalign, (int, int));
-void* ELIB_PREFIX(valloc, (int));
-void* ELIB_PREFIX(pvalloc, (int));
-int ELIB_PREFIX(memsize, (EWord *));
-/* Extern interfaces used by VxWorks */
-size_t elib_sizeof(void *);
-void elib_init(EWord *, EWord);
-void elib_force_init(EWord *, EWord);
-#endif
-
-#if defined(__STDC__)
-/* define prototypes for missing */
-void* memalign(size_t a, size_t s);
-void* pvalloc(size_t nb);
-void* memresize(void *p, int nb);
-int memsize(void *p);
-#endif
-
-/* bytes to pages */
-#define PAGES(x) (((x)+page_size-1) / page_size)
-#define PAGE_ALIGN(p) ((char*)((((EWord)(p))+page_size-1)&~(page_size-1)))
-
-/* bytes to words */
-#define WORDS(x) (((x)+sizeof(EWord)-1) / sizeof(EWord))
-
-/* Align an address */
-#define ALIGN(p) ((EWord*)((((EWord)(p)+ELIB_ALIGN-1)&~(ELIB_ALIGN-1))))
-
-/* Calculate the size needed to keep alignment */
-
-#define ALIGN_BSZ(nb) ((nb+sizeof(EWord)+ELIB_ALIGN-1) & ~(ELIB_ALIGN-1))
-
-#define ALIGN_WSZ(nb) WORDS(ALIGN_BSZ(nb))
-
-#define ALIGN_SIZE(nb) (ALIGN_WSZ(nb) - 1)
-
-
-/* PARAMETERS */
-
-#if defined(ELIB_HEAP_SBRK)
-
-#undef PAGE_SIZE
-
-/* Get the system page size (NEED MORE DEFINES HERE) */
-#ifdef _SC_PAGESIZE
-#define PAGE_SIZE sysconf(_SC_PAGESIZE)
-#elif defined(_MSC_VER)
-# ifdef _M_ALPHA
-# define PAGE_SIZE 0x2000
-# else
-# define PAGE_SIZE 0x1000
-# endif
-#else
-#define PAGE_SIZE getpagesize()
-#endif
-
-#define ELIB_EXPAND(need) expand_sbrk(need)
-static FUNCTION(int, expand_sbrk, (EWord));
-
-#elif defined(ELIB_HEAP_FIXED)
-
-#define PAGE_SIZE 1024
-#define ELIB_EXPAND(need) -1
-static EWord fix_heap[WORDS(ELIB_HEAP_SIZE)];
-
-#elif defined(ELIB_HEAP_USER)
-
-#define PAGE_SIZE 1024
-#define ELIB_EXPAND(need) -1
-
-#else
-
-#error "ELIB HEAP TYPE NOT SET"
-
-#endif
-
-
-#define STAT_ALLOCED_BLOCK(SZ) \
-do { \
- tot_allocated += (SZ); \
- if (max_allocated < tot_allocated) \
- max_allocated = tot_allocated; \
-} while (0)
-
-#define STAT_FREED_BLOCK(SZ) \
-do { \
- tot_allocated -= (SZ); \
-} while (0)
-
-static int max_allocated = 0;
-static int tot_allocated = 0;
-static EWord* eheap; /* Align heap start */
-static EWord* eheap_top; /* Point to end of heap */
-EWord page_size = 0; /* Set by elib_init */
-
-#if defined(ELIB_DEBUG) || defined(DEBUG)
-#define ALIGN_CHECK(a, p) \
- do { \
- if ((EWord)(p) & (a-1)) { \
- elib_printf(stderr, \
- "RUNTIME ERROR: bad alignment (0x%lx:%d:%d)\n", \
- (unsigned long) (p), (int) a, __LINE__); \
- ELIB_FAILURE; \
- } \
- } while(0)
-#define ELIB_ALIGN_CHECK(p) ALIGN_CHECK(ELIB_ALIGN, p)
-#else
-#define ALIGN_CHECK(a, p)
-#define ELIB_ALIGN_CHECK(p)
-#endif
-
-#define DYNAMIC 32
-
-/*
-** Free block layout
-** 1 1 30
-** +--------------------------+
-** |F|P| Size |
-** +--------------------------+
-**
-** Where F is the free bit
-** P is the free above bit
-** Size is messured in words and does not include the hdr word
-**
-** If block is on the free list the size is also stored last in the block.
-**
-*/
-typedef struct _free_block FreeBlock;
-struct _free_block {
- EWord hdr;
- Uint flags;
- FreeBlock* parent;
- FreeBlock* left;
- FreeBlock* right;
- EWord v[1];
-};
-
-typedef struct _allocated_block {
- EWord hdr;
- EWord v[5];
-} AllocatedBlock;
-
-
-/*
- * Interface to tree routines.
- */
-typedef Uint Block_t;
-
-static Block_t* get_free_block(Uint);
-static void link_free_block(Block_t *);
-static void unlink_free_block(Block_t *del);
-
-#define FREE_BIT 0x80000000
-#define FREE_ABOVE_BIT 0x40000000
-#define SIZE_MASK 0x3fffffff /* 2^30 words = 2^32 bytes */
-
-/* Work on both FreeBlock and AllocatedBlock */
-#define SIZEOF(p) ((p)->hdr & SIZE_MASK)
-#define IS_FREE(p) (((p)->hdr & FREE_BIT) != 0)
-#define IS_FREE_ABOVE(p) (((p)->hdr & FREE_ABOVE_BIT) != 0)
-
-/* Given that we have a free block above find its size */
-#define SIZEOF_ABOVE(p) *(((EWord*) (p)) - 1)
-
-#define MIN_BLOCK_SIZE (sizeof(FreeBlock)/sizeof(EWord))
-#define MIN_WORD_SIZE (MIN_BLOCK_SIZE-1)
-#define MIN_BYTE_SIZE (sizeof(FreeBlock)-sizeof(EWord))
-
-#define MIN_ALIGN_SIZE ALIGN_SIZE(MIN_BYTE_SIZE)
-
-
-static AllocatedBlock* heap_head = 0;
-static AllocatedBlock* heap_tail = 0;
-static EWord eheap_size = 0;
-
-static int heap_locked;
-
-static int elib_need_init = 1;
-#if THREAD_SAFE_ELIB_MALLOC
-static int elib_is_initing = 0;
-#endif
-
-typedef FreeBlock RBTree_t;
-
-static RBTree_t* root = NULL;
-
-
-static FUNCTION(void, deallocate, (AllocatedBlock*, int));
-
-/*
- * Unlink a free block
- */
-
-#define mark_allocated(p, szp) do { \
- (p)->hdr = ((p)->hdr & FREE_ABOVE_BIT) | (szp); \
- (p)->v[szp] &= ~FREE_ABOVE_BIT; \
- } while(0)
-
-#define mark_free(p, szp) do { \
- (p)->hdr = FREE_BIT | (szp); \
- ((FreeBlock *)p)->v[szp-sizeof(FreeBlock)/sizeof(EWord)+1] = (szp); \
- } while(0)
-
-#if 0
-/* Help macros to log2 */
-#define LOG_1(x) (((x) > 1) ? 1 : 0)
-#define LOG_2(x) (((x) > 3) ? 2+LOG_1((x) >> 2) : LOG_1(x))
-#define LOG_4(x) (((x) > 15) ? 4+LOG_2((x) >> 4) : LOG_2(x))
-#define LOG_8(x) (((x) > 255) ? 8+LOG_4((x)>>8) : LOG_4(x))
-#define LOG_16(x) (((x) > 65535) ? 16+LOG_8((x)>>16) : LOG_8(x))
-
-#define log2(x) LOG_16(x)
-#endif
-
-/*
- * Split a block to be allocated.
- * Mark block as ALLOCATED and clear
- * FREE_ABOVE_BIT on next block
- *
- * nw is SIZE aligned and szp is SIZE aligned + 1
- */
-static void
-split_block(FreeBlock* p, EWord nw, EWord szp)
-{
- EWord szq;
- FreeBlock* q;
-
- szq = szp - nw;
- /* Preserve FREE_ABOVE bit in p->hdr !!! */
-
- if (szq >= MIN_ALIGN_SIZE+1) {
- szq--;
- p->hdr = (p->hdr & FREE_ABOVE_BIT) | nw;
-
- q = (FreeBlock*) (((EWord*) p) + nw + 1);
- mark_free(q, szq);
- link_free_block((Block_t *) q);
-
- q = (FreeBlock*) (((EWord*) q) + szq + 1);
- q->hdr |= FREE_ABOVE_BIT;
- }
- else {
- mark_allocated((AllocatedBlock*)p, szp);
- }
-}
-
-/*
- * Find a free block
- */
-static FreeBlock*
-alloc_block(EWord nw)
-{
- for (;;) {
- FreeBlock* p = (FreeBlock *) get_free_block(nw);
-
- if (p != NULL) {
- return p;
- } else if (ELIB_EXPAND(nw+MIN_WORD_SIZE)) {
- return 0;
- }
- }
-}
-
-
-size_t elib_sizeof(void *p)
-{
- AllocatedBlock* pp;
-
- if (p != 0) {
- pp = (AllocatedBlock*) (((char *)p)-1);
- return SIZEOF(pp);
- }
- return 0;
-}
-
-static void locked_elib_init(EWord*, EWord);
-static void init_elib_malloc(EWord*, EWord);
-
-/*
-** Initialize the elib
-** The addr and sz is only used when compiled with EXPAND_ADDR
-*/
-/* Not static, this is used by VxWorks */
-void elib_init(EWord* addr, EWord sz)
-{
- if (!elib_need_init)
- return;
- erts_mtx_lock(&malloc_mutex);
- locked_elib_init(addr, sz);
- erts_mtx_unlock(&malloc_mutex);
-}
-
-static void locked_elib_init(EWord* addr, EWord sz)
-{
- if (!elib_need_init)
- return;
-
-#if THREAD_SAFE_ELIB_MALLOC
-
-#if !USE_RECURSIVE_MALLOC_MUTEX
- {
- static erts_tid_t initer_tid;
-
- if(elib_is_initing) {
-
- if(erts_equal_tids(initer_tid, erts_thr_self()))
- return;
-
- /* Wait until initializing thread is done with initialization */
-
- while(elib_need_init)
- erts_cnd_wait(&malloc_cond, &malloc_mutex);
-
- return;
- }
- else {
- initer_tid = erts_thr_self();
- elib_is_initing = 1;
- }
- }
-#else
- if(elib_is_initing)
- return;
- elib_is_initing = 1;
-#endif
-
-#endif /* #if THREAD_SAFE_ELIB_MALLOC */
-
- /* Do the actual initialization of the malloc implementation */
- init_elib_malloc(addr, sz);
-
-#if THREAD_SAFE_ELIB_MALLOC
-
-#if !USE_RECURSIVE_MALLOC_MUTEX
- erts_mtx_unlock(&malloc_mutex);
-#endif
-
- /* Recursive calls to malloc are allowed here... */
- erts_mtx_set_forksafe(&malloc_mutex);
-
-#if !USE_RECURSIVE_MALLOC_MUTEX
- erts_mtx_lock(&malloc_mutex);
- elib_is_initing = 0;
-#endif
-
-#endif /* #if THREAD_SAFE_ELIB_MALLOC */
-
- elib_need_init = 0;
-
-#if THREAD_SAFE_ELIB_MALLOC && !USE_RECURSIVE_MALLOC_MUTEX
- erts_cnd_broadcast(&malloc_cond);
-#endif
-
-}
-
-static void init_elib_malloc(EWord* addr, EWord sz)
-{
- int i;
- FreeBlock* freep;
- EWord tmp_sz;
-#ifdef ELIB_HEAP_SBRK
- char* top;
- EWord n;
-#endif
-
- max_allocated = 0;
- tot_allocated = 0;
- root = NULL;
-
- /* Get the page size (may involve system call!!!) */
- page_size = PAGE_SIZE;
-
-#if defined(ELIB_HEAP_SBRK)
- sz = PAGES(ELIB_HEAP_SIZE)*page_size;
-
- if ((top = (char*) sbrk(0)) == (char*)-1) {
- elib_printf(stderr, "could not initialize elib, sbrk(0)");
- ELIB_FAILURE;
- }
- n = PAGE_ALIGN(top) - top;
- if ((top = (char*) sbrk(n)) == (char*)-1) {
- elib_printf(stderr, "could not initialize elib, sbrk(n)");
- ELIB_FAILURE;
- }
- if ((eheap = (EWord*) sbrk(sz)) == (EWord*)-1) {
- elib_printf(stderr, "could not initialize elib, sbrk(SIZE)");
- ELIB_FAILURE;
- }
- sz = WORDS(ELIB_HEAP_SIZE);
-#elif defined(ELIB_HEAP_FIXED)
- eheap = fix_heap;
- sz = WORDS(ELIB_HEAP_SIZE);
-#elif defined(ELIB_HEAP_USER)
- eheap = addr;
- sz = WORDS(sz);
-#else
- return -1;
-#endif
- eheap_size = 0;
-
- /* Make sure that the first word of the heap_head is aligned */
- addr = ALIGN(eheap+1);
- sz -= ((addr - 1) - eheap); /* Subtract unusable size */
- eheap_top = eheap = addr - 1; /* Set new aligned heap start */
-
- eheap_top[sz-1] = 0; /* Heap stop mark */
-
- addr = eheap;
- heap_head = (AllocatedBlock*) addr;
- heap_head->hdr = MIN_ALIGN_SIZE;
- for (i = 0; i < MIN_ALIGN_SIZE; i++)
- heap_head->v[i] = 0;
-
- addr += (MIN_ALIGN_SIZE+1);
- freep = (FreeBlock*) addr;
- tmp_sz = sz - (((MIN_ALIGN_SIZE+1) + MIN_BLOCK_SIZE) + 1 + 1);
- mark_free(freep, tmp_sz);
- link_free_block((Block_t *) freep);
-
- /* No need to align heap tail */
- heap_tail = (AllocatedBlock*) &eheap_top[sz-MIN_BLOCK_SIZE-1];
- heap_tail->hdr = FREE_ABOVE_BIT | MIN_WORD_SIZE;
- heap_tail->v[0] = 0;
- heap_tail->v[1] = 0;
- heap_tail->v[2] = 0;
-
- eheap_top += sz;
- eheap_size += sz;
-
- heap_locked = 0;
-}
-
-#ifdef ELIB_HEAP_USER
-void elib_force_init(EWord* addr, EWord sz)
-{
- elib_need_init = 1;
- elib_init(addr,sz);
-}
-#endif
-
-#ifdef ELIB_HEAP_SBRK
-
-/*
-** need in number of words (should include head and tail words)
-*/
-static int expand_sbrk(EWord sz)
-{
- EWord* p;
- EWord bytes = sz * sizeof(EWord);
- EWord size;
- AllocatedBlock* tail;
-
- if (bytes < ELIB_HEAP_SIZE)
- size = PAGES(ELIB_HEAP_INCREAMENT)*page_size;
- else
- size = PAGES(bytes)*page_size;
-
- if ((p = (EWord*) sbrk(size)) == ((EWord*) -1))
- return -1;
-
- if (p != eheap_top) {
- elib_printf(stderr, "panic: sbrk moved\n");
- ELIB_FAILURE;
- }
-
- sz = WORDS(size);
-
- /* Set new endof heap marker and a new heap tail */
- eheap_top[sz-1] = 0;
-
- tail = (AllocatedBlock*) &eheap_top[sz-MIN_BLOCK_SIZE-1];
- tail->hdr = FREE_ABOVE_BIT | MIN_WORD_SIZE;
- tail->v[0] = 0;
- tail->v[1] = 0;
- tail->v[2] = 0;
-
- /* Patch old tail with new appended size */
- heap_tail->hdr = (heap_tail->hdr & FREE_ABOVE_BIT) |
- (MIN_WORD_SIZE+1+(sz-MIN_BLOCK_SIZE-1));
- deallocate(heap_tail, 0);
-
- heap_tail = tail;
-
- eheap_size += sz;
- eheap_top += sz;
-
- return 0;
-}
-
-#endif /* ELIB_HEAP_SBRK */
-
-
-/*
-** Scan heap and check for corrupted heap
-*/
-int elib_check_heap(void)
-{
- AllocatedBlock* p = heap_head;
- EWord sz;
-
- if (heap_locked) {
- elib_printf(stderr, "heap is locked no info avaiable\n");
- return 0;
- }
-
- while((sz = SIZEOF(p)) != 0) {
- if (IS_FREE(p)) {
- if (p->v[sz-1] != sz) {
- elib_printf(stderr, "panic: heap corrupted\r\n");
- ELIB_FAILURE;
- }
- p = (AllocatedBlock*) (p->v + sz);
- if (!IS_FREE_ABOVE(p)) {
- elib_printf(stderr, "panic: heap corrupted\r\n");
- ELIB_FAILURE;
- }
- }
- else
- p = (AllocatedBlock*) (p->v + sz);
- }
- return 1;
-}
-
-/*
-** Load the byte vector pointed to by v of length vsz
-** with a heap image
-** The scale is defined by vsz and the current heap size
-** free = 0, full = 255
-**
-**
-*/
-int elib_heap_map(EByte* v, int vsz)
-{
- AllocatedBlock* p = heap_head;
- EWord sz;
- int gsz = eheap_size / vsz; /* The granuality used */
- int fsz = 0;
- int usz = 0;
-
- if (gsz == 0)
- return -1; /* too good reolution */
-
- while((sz = SIZEOF(p)) != 0) {
- if (IS_FREE(p)) {
- fsz += sz;
- if ((fsz + usz) > gsz) {
- *v++ = (255*usz)/gsz;
- fsz -= (gsz - usz);
- usz = 0;
- while(fsz >= gsz) {
- *v++ = 0;
- fsz -= gsz;
- }
- }
- }
- else {
- usz += sz;
- if ((fsz + usz) > gsz) {
- *v++ = 255 - (255*fsz)/gsz;
- usz -= (gsz - fsz);
- fsz = 0;
- while(usz >= gsz) {
- *v++ = 255;
- usz -= gsz;
- }
- }
- }
- p = (AllocatedBlock*) (p->v + sz);
- }
- return 0;
-}
-
-/*
-** Generate a histogram of free/allocated blocks
-** Count granuality of 10 gives
-** (0-10],(10-100],(100-1000],(1000-10000] ...
-** (0-2], (2-4], (4-8], (8-16], ....
-*/
-static int i_logb(EWord size, int base)
-{
- int lg = 0;
- while(size >= base) {
- size /= base;
- lg++;
- }
- return lg;
-}
-
-int elib_histo(EWord* vf, EWord* va, int vsz, int base)
-{
- AllocatedBlock* p = heap_head;
- EWord sz;
- int i;
- int linear;
-
- if ((vsz <= 1) || (vf == 0 && va == 0))
- return -1;
-
- if (base < 0) {
- linear = 1;
- base = -base;
- }
- else
- linear = 0;
-
- if (base <= 1)
- return -1;
-
- if (vf != 0) {
- for (i = 0; i < vsz; i++)
- vf[i] = 0;
- }
- if (va != 0) {
- for (i = 0; i < vsz; i++)
- va[i] = 0;
- }
-
- while((sz = SIZEOF(p)) != 0) {
- if (IS_FREE(p)) {
- if (vf != 0) {
- int val;
- if (linear)
- val = sz / base;
- else
- val = i_logb(sz, base);
- if (val >= vsz)
- vf[vsz-1]++;
- else
- vf[val]++;
- }
- }
- else {
- if (va != 0) {
- int val;
- if (linear)
- val = sz / base;
- else
- val = i_logb(sz, base);
- if (val >= vsz)
- va[vsz-1]++;
- else
- va[val]++;
- }
- }
- p = (AllocatedBlock*) (p->v + sz);
- }
- return 0;
-}
-
-/*
-** Fill the info structure with actual values
-** Total
-** Allocated
-** Free
-** maxMaxFree
-*/
-void elib_stat(struct elib_stat* info)
-{
- EWord blks = 0;
- EWord sz_free = 0;
- EWord sz_alloc = 0;
- EWord sz_max_free = 0;
- EWord sz_min_used = 0x7fffffff;
- EWord sz;
- EWord num_free = 0;
- AllocatedBlock* p = heap_head;
-
- info->mem_total = eheap_size;
-
- p = (AllocatedBlock*) (p->v + SIZEOF(p));
-
- while((sz = SIZEOF(p)) != 0) {
- blks++;
- if (IS_FREE(p)) {
- if (sz > sz_max_free)
- sz_max_free = sz;
- sz_free += sz;
- ++num_free;
- }
- else {
- if (sz < sz_min_used)
- sz_min_used = sz;
- sz_alloc += sz;
- }
- p = (AllocatedBlock*) (p->v + sz);
- }
- info->mem_blocks = blks;
- info->free_blocks = num_free;
- info->mem_alloc = sz_alloc;
- info->mem_free = sz_free;
- info->min_used = sz_min_used;
- info->max_free = sz_max_free;
- info->mem_max_alloc = max_allocated;
- ASSERT(sz_alloc == tot_allocated);
-}
-
-/*
-** Dump the heap
-*/
-void elib_heap_dump(char* label)
-{
- AllocatedBlock* p = heap_head;
- EWord sz;
-
- elib_printf(stderr, "HEAP DUMP (%s)\n", label);
- if (!elib_check_heap())
- return;
-
- while((sz = SIZEOF(p)) != 0) {
- if (IS_FREE(p)) {
- elib_printf(stderr, "%p: FREE, size = %d\n", p, (int) sz);
- }
- else {
- elib_printf(stderr, "%p: USED, size = %d %s\n", p, (int) sz,
- IS_FREE_ABOVE(p)?"(FREE ABOVE)":"");
- }
- p = (AllocatedBlock*) (p->v + sz);
- }
-}
-
-/*
-** Scan heaps and count:
-** free_size, allocated_size, max_free_block
-*/
-void elib_statistics(void* to)
-{
- struct elib_stat info;
- EWord frag;
-
- if (!elib_check_heap())
- return;
-
- elib_stat(&info);
-
- frag = 1000 - ((1000 * info.max_free) / info.mem_free);
-
- elib_printf(to, "Heap Statistics: total(%d), blocks(%d), frag(%d.%d%%)\n",
- info.mem_total, info.mem_blocks,
- (int) frag/10, (int) frag % 10);
-
- elib_printf(to, " allocated(%d), free(%d), "
- "free_blocks(%d)\n",
- info.mem_alloc, info.mem_free,info.free_blocks);
- elib_printf(to, " max_free(%d), min_used(%d)\n",
- info.max_free, info.min_used);
-}
-
-/*
-** Allocate a least nb bytes with alignment a
-** Algorithm:
-** 1) Try locate a block which match exacly among the by direct index.
-** 2) Try using a fix block of greater size
-** 3) Try locate a block by searching in lists where block sizes
-** X may vary between 2^i < X <= 2^(i+1)
-**
-** Reset memory to zero if clear is true
-*/
-static AllocatedBlock* allocate(EWord nb, EWord a, int clear)
-{
- FreeBlock* p;
- EWord nw;
-
- if (a == ELIB_ALIGN) {
- /*
- * Common case: Called by malloc(), realloc(), calloc().
- */
- nw = nb < MIN_BYTE_SIZE ? MIN_ALIGN_SIZE : ALIGN_SIZE(nb);
-
- if ((p = alloc_block(nw)) == 0)
- return NULL;
- } else {
- /*
- * Special case: Called by memalign().
- */
- EWord asz, szp, szq, tmpsz;
- FreeBlock *q;
-
- if ((p = alloc_block((1+MIN_ALIGN_SIZE)*sizeof(EWord)+a-1+nb)) == 0)
- return NULL;
-
- asz = a - ((EWord) ((AllocatedBlock *)p)->v) % a;
-
- if (asz != a) {
- /* Enforce the alignment requirement by cutting of a free
- block at the beginning of the block. */
-
- if (asz < (1+MIN_ALIGN_SIZE)*sizeof(EWord) && !IS_FREE_ABOVE(p)) {
- /* Not enough room to cut of a free block;
- increase align size */
- asz += (((1+MIN_ALIGN_SIZE)*sizeof(EWord) + a - 1)/a)*a;
- }
-
- szq = ALIGN_SIZE(asz - sizeof(EWord));
- szp = SIZEOF(p) - szq - 1;
-
- q = p;
- p = (FreeBlock*) (((EWord*) q) + szq + 1);
- p->hdr = FREE_ABOVE_BIT | FREE_BIT | szp;
-
- if (IS_FREE_ABOVE(q)) { /* This should not be possible I think,
- but just in case... */
- tmpsz = SIZEOF_ABOVE(q) + 1;
- szq += tmpsz;
- q = (FreeBlock*) (((EWord*) q) - tmpsz);
- unlink_free_block((Block_t *) q);
- q->hdr = (q->hdr & FREE_ABOVE_BIT) | FREE_BIT | szq;
- }
- mark_free(q, szq);
- link_free_block((Block_t *) q);
-
- } /* else already had the correct alignment */
-
- nw = nb < MIN_BYTE_SIZE ? MIN_ALIGN_SIZE : ALIGN_SIZE(nb);
- }
-
- split_block(p, nw, SIZEOF(p));
-
- STAT_ALLOCED_BLOCK(SIZEOF(p));
-
- if (clear) {
- EWord* pp = ((AllocatedBlock*)p)->v;
-
- while(nw--)
- *pp++ = 0;
- }
-
- return (AllocatedBlock*) p;
-}
-
-
-/*
-** Deallocate memory pointed to by p
-** 1. Merge with block above if this block is free
-** 2. Merge with block below if this block is free
-** Link the block to the correct free list
-**
-** p points to the block header!
-**
-*/
-static void deallocate(AllocatedBlock* p, int stat_count)
-{
- FreeBlock* q;
- EWord szq;
- EWord szp;
-
- szp = SIZEOF(p);
-
- if (stat_count)
- STAT_FREED_BLOCK(SIZEOF(p));
-
- if (IS_FREE_ABOVE(p)) {
- szq = SIZEOF_ABOVE(p);
- q = (FreeBlock*) ( ((EWord*) p) - szq - 1);
- unlink_free_block((Block_t *) q);
-
- p = (AllocatedBlock*) q;
- szp += (szq + 1);
- }
- q = (FreeBlock*) (p->v + szp);
- if (IS_FREE(q)) {
- szq = SIZEOF(q);
- unlink_free_block((Block_t *) q);
- szp += (szq + 1);
- }
- else
- q->hdr |= FREE_ABOVE_BIT;
-
- /* The block above p can NEVER be free !!! */
- p->hdr = FREE_BIT | szp;
- p->v[szp-1] = szp;
-
- link_free_block((Block_t *) p);
-}
-
-/*
-** Reallocate memory
-** If preserve is true then data is moved if neccesary
-*/
-static AllocatedBlock* reallocate(AllocatedBlock* p, EWord nb, int preserve)
-{
- EWord szp;
- EWord szq;
- EWord sz;
- EWord nw;
- FreeBlock* q;
-
- if (nb < MIN_BYTE_SIZE)
- nw = MIN_ALIGN_SIZE;
- else
- nw = ALIGN_SIZE(nb);
-
- sz = szp = SIZEOF(p);
-
- STAT_FREED_BLOCK(szp);
-
- /* Merge with block below */
- q = (FreeBlock*) (p->v + szp);
- if (IS_FREE(q)) {
- szq = SIZEOF(q);
- unlink_free_block((Block_t *) q);
- szp += (szq + 1);
- }
-
- if (nw <= szp) {
- split_block((FreeBlock *) p, nw, szp);
- STAT_ALLOCED_BLOCK(SIZEOF(p));
- return p;
- }
- else {
- EWord* dp = p->v;
- AllocatedBlock* npp;
-
- if (IS_FREE_ABOVE(p)) {
- szq = SIZEOF_ABOVE(p);
- if (szq + szp + 1 >= nw) {
- q = (FreeBlock*) (((EWord*) p) - szq - 1);
- unlink_free_block((Block_t * )q);
- szp += (szq + 1);
- p = (AllocatedBlock*) q;
-
- if (preserve) {
- EWord* pp = p->v;
- while(sz--)
- *pp++ = *dp++;
- }
- split_block((FreeBlock *) p, nw, szp);
- STAT_ALLOCED_BLOCK(SIZEOF(p));
- return p;
- }
- }
-
- /*
- * Update p so that allocate() and deallocate() works.
- * (Note that allocate() may call expand_sbrk(), which in
- * in turn calls deallocate().)
- */
-
- p->hdr = (p->hdr & FREE_ABOVE_BIT) | szp;
- p->v[szp] &= ~FREE_ABOVE_BIT;
-
- npp = allocate(nb, ELIB_ALIGN, 0);
- if(npp == NULL)
- return NULL;
- if (preserve) {
- EWord* pp = npp->v;
- while(sz--)
- *pp++ = *dp++;
- }
- deallocate(p, 0);
- return npp;
- }
-}
-
-/*
-** What malloc() and friends should do (and return) when the heap is
-** exhausted. [sverkerw]
-*/
-static void* heap_exhausted(void)
-{
- /* Choose behaviour */
-#if 0
- /* Crash-and-burn --- leave a usable corpse (hopefully) */
- abort();
-#endif
- /* The usual ANSI-compliant behaviour */
- return NULL;
-}
-
-/*
-** Allocate size bytes of memory
-*/
-void* ELIB_PREFIX(malloc, (size_t nb))
-{
- void *res;
- AllocatedBlock* p;
-
- erts_mtx_lock(&malloc_mutex);
- if (elib_need_init)
- locked_elib_init(NULL,(EWord)0);
-
- if (nb == 0)
- res = NULL;
- else if ((p = allocate(nb, ELIB_ALIGN, 0)) != 0) {
- ELIB_ALIGN_CHECK(p->v);
- res = p->v;
- }
- else
- res = heap_exhausted();
-
- erts_mtx_unlock(&malloc_mutex);
-
- return res;
-}
-
-
-void* ELIB_PREFIX(calloc, (size_t nelem, size_t size))
-{
- void *res;
- int nb;
- AllocatedBlock* p;
-
- erts_mtx_lock(&malloc_mutex);
- if (elib_need_init)
- locked_elib_init(NULL,(EWord)0);
-
- if ((nb = nelem * size) == 0)
- res = NULL;
- else if ((p = allocate(nb, ELIB_ALIGN, 1)) != 0) {
- ELIB_ALIGN_CHECK(p->v);
- res = p->v;
- }
- else
- res = heap_exhausted();
-
- erts_mtx_unlock(&malloc_mutex);
-
- return res;
-}
-
-/*
-** Free memory allocated by malloc
-*/
-
-void ELIB_PREFIX(free, (EWord* p))
-{
- erts_mtx_lock(&malloc_mutex);
- if (elib_need_init)
- locked_elib_init(NULL,(EWord)0);
-
- if (p != 0)
- deallocate((AllocatedBlock*)(p-1), 1);
-
- erts_mtx_unlock(&malloc_mutex);
-}
-
-void ELIB_PREFIX(cfree, (EWord* p))
-{
- ELIB_PREFIX(free, (p));
-}
-
-
-/*
-** Realloc the memory allocated in p to nb number of bytes
-**
-*/
-
-void* ELIB_PREFIX(realloc, (EWord* p, size_t nb))
-{
- void *res = NULL;
- AllocatedBlock* pp;
-
- erts_mtx_lock(&malloc_mutex);
- if (elib_need_init)
- locked_elib_init(NULL,(EWord)0);
-
- if (p != 0) {
- pp = (AllocatedBlock*) (p-1);
- if (nb > 0) {
- if ((pp = reallocate(pp, nb, 1)) != 0) {
- ELIB_ALIGN_CHECK(pp->v);
- res = pp->v;
- }
- }
- else
- deallocate(pp, 1);
- }
- else if (nb > 0) {
- if ((pp = allocate(nb, ELIB_ALIGN, 0)) != 0) {
- ELIB_ALIGN_CHECK(pp->v);
- res = pp->v;
- }
- else
- res = heap_exhausted();
- }
-
- erts_mtx_unlock(&malloc_mutex);
-
- return res;
-}
-
-/*
-** Resize the memory area pointed to by p with nb number of bytes
-*/
-void* ELIB_PREFIX(memresize, (EWord* p, int nb))
-{
- void *res = NULL;
- AllocatedBlock* pp;
-
- erts_mtx_lock(&malloc_mutex);
- if (elib_need_init)
- locked_elib_init(NULL,(EWord)0);
-
- if (p != 0) {
- pp = (AllocatedBlock*) (p-1);
- if (nb > 0) {
- if ((pp = reallocate(pp, nb, 0)) != 0) {
- ELIB_ALIGN_CHECK(pp->v);
- res = pp->v;
- }
- }
- else
- deallocate(pp, 1);
- }
- else if (nb > 0) {
- if ((pp = allocate(nb, ELIB_ALIGN, 0)) != 0) {
- ELIB_ALIGN_CHECK(pp->v);
- res = pp->v;
- }
- else
- res = heap_exhausted();
- }
-
- erts_mtx_unlock(&malloc_mutex);
-
- return res;
-}
-
-
-/* Create aligned memory a must be a power of 2 !!! */
-
-void* ELIB_PREFIX(memalign, (int a, int nb))
-{
- void *res;
- AllocatedBlock* p;
-
- erts_mtx_lock(&malloc_mutex);
- if (elib_need_init)
- locked_elib_init(NULL,(EWord)0);
-
- if (nb == 0 || a <= 0)
- res = NULL;
- else if ((p = allocate(nb, a, 0)) != 0) {
- ALIGN_CHECK(a, p->v);
- res = p->v;
- }
- else
- res = heap_exhausted();
-
- erts_mtx_unlock(&malloc_mutex);
-
- return res;
-}
-
-void* ELIB_PREFIX(valloc, (int nb))
-{
- return ELIB_PREFIX(memalign, (page_size, nb));
-}
-
-
-void* ELIB_PREFIX(pvalloc, (int nb))
-{
- return ELIB_PREFIX(memalign, (page_size, PAGES(nb)*page_size));
-}
-/* Return memory size for pointer p in bytes */
-
-int ELIB_PREFIX(memsize, (p))
-EWord* p;
-{
- return SIZEOF((AllocatedBlock*)(p-1))*4;
-}
-
-
-/*
-** --------------------------------------------------------------------------
-** DEBUG LIBRARY
-** --------------------------------------------------------------------------
-*/
-
-#ifdef ELIB_DEBUG
-
-#define IN_HEAP(p) (((p) >= (char*) eheap) && (p) < (char*) eheap_top)
-/*
-** ptr_to_block: return the pointer to heap block pointed into by ptr
-** Returns 0 if not pointing into a block
-*/
-
-static EWord* ptr_to_block(char* ptr)
-{
- AllocatedBlock* p = heap_head;
- EWord sz;
-
- while((sz = SIZEOF(p)) != 0) {
- if ((ptr >= (char*) p->v) && (ptr < (char*)(p->v+sz)))
- return p->v;
- p = (AllocatedBlock*) (p->v + sz);
- }
- return 0;
-}
-
-/*
-** Validate a pointer
-** returns:
-** 0 - if points to start of a block
-** 1 - if points outsize heap
-** -1 - if points inside block
-**
-*/
-static int check_pointer(char* ptr)
-{
- if (IN_HEAP(ptr)) {
- if (ptr_to_block(ptr) == 0)
- return 1;
- return 0;
- }
- return -1;
-}
-
-/*
-** Validate a memory area
-** returns:
-** 0 - if area is included in a block
-** -1 - if area overlap a heap block
-** 1 - if area is outside heap
-*/
-static int check_area(char* ptr, int n)
-{
- if (IN_HEAP(ptr)) {
- if (IN_HEAP(ptr+n-1)) {
- EWord* p1 = ptr_to_block(ptr);
- EWord* p2 = ptr_to_block(ptr+n-1);
-
- if (p1 == p2)
- return (p1 == 0) ? -1 : 0;
- return -1;
- }
- }
- else if (IN_HEAP(ptr+n-1))
- return -1;
- return 1;
-}
-
-/*
-** Check if a block write will overwrite heap block
-*/
-static void check_write(char* ptr, int n, char* file, int line, char* fun)
-{
- if (check_area(ptr, n) == -1) {
- elib_printf(stderr, "RUNTIME ERROR: %s heap overwrite\n", fun);
- elib_printf(stderr, "File: %s Line: %d\n", file, line);
- ELIB_FAILURE;
- }
-}
-
-/*
-** Check if a pointer is an allocated object
-*/
-static void check_allocated_block(char* ptr, char* file, int line, char* fun)
-{
- EWord* q;
-
- if (!IN_HEAP(ptr) || ((q=ptr_to_block(ptr)) == 0) || (ptr != (char*) q)) {
- elib_printf(stderr, "RUNTIME ERROR: %s non heap pointer\n", fun);
- elib_printf(stderr, "File: %s Line: %d\n", file, line);
- ELIB_FAILURE;
- }
-
- if (IS_FREE((AllocatedBlock*)(q-1))) {
- elib_printf(stderr, "RUNTIME ERROR: %s free pointer\n", fun);
- elib_printf(stderr, "File: %s Line: %d\n", file, line);
- ELIB_FAILURE;
- }
-
-}
-
-/*
-** --------------------------------------------------------------------------
-** DEBUG VERSIONS (COMPILED WITH THE ELIB.H)
-** --------------------------------------------------------------------------
-*/
-
-void* elib_dbg_malloc(int n, char* file, int line)
-{
- return elib__malloc(n);
-}
-
-void* elib_dbg_calloc(int n, int s, char* file, int line)
-{
- return elib__calloc(n, s);
-}
-
-void* elib_dbg_realloc(EWord* p, int n, char* file, int line)
-{
- if (p == 0)
- return elib__malloc(n);
- check_allocated_block(p, file, line, "elib_realloc");
- return elib__realloc(p, n);
-}
-
-void elib_dbg_free(EWord* p, char* file, int line)
-{
- if (p == 0)
- return;
- check_allocated_block(p, file, line, "elib_free");
- elib__free(p);
-}
-
-void elib_dbg_cfree(EWord* p, char* file, int line)
-{
- if (p == 0)
- return;
- check_allocated_block(p, file, line, "elib_free");
- elib__cfree(p);
-}
-
-void* elib_dbg_memalign(int a, int n, char* file, int line)
-{
- return elib__memalign(a, n);
-}
-
-void* elib_dbg_valloc(int n, char* file, int line)
-{
- return elib__valloc(n);
-}
-
-void* elib_dbg_pvalloc(int n, char* file, int line)
-{
- return elib__pvalloc(n);
-}
-
-void* elib_dbg_memresize(EWord* p, int n, char* file, int line)
-{
- if (p == 0)
- return elib__malloc(n);
- check_allocated_block(p, file, line, "elib_memresize");
- return elib__memresize(p, n);
-}
-
-int elib_dbg_memsize(void* p, char* file, int line)
-{
- check_allocated_block(p, file, line, "elib_memsize");
- return elib__memsize(p);
-}
-
-/*
-** --------------------------------------------------------------------------
-** LINK TIME FUNCTIONS (NOT COMPILED CALLS)
-** --------------------------------------------------------------------------
-*/
-
-void* elib_malloc(int n)
-{
- return elib_dbg_malloc(n, "", -1);
-}
-
-void* elib_calloc(int n, int s)
-{
- return elib_dbg_calloc(n, s, "", -1);
-}
-
-void* elib_realloc(EWord* p, int n)
-{
- return elib_dbg_realloc(p, n, "", -1);
-}
-
-void elib_free(EWord* p)
-{
- elib_dbg_free(p, "", -1);
-}
-
-void elib_cfree(EWord* p)
-{
- elib_dbg_cfree(p, "", -1);
-}
-
-void* elib_memalign(int a, int n)
-{
- return elib_dbg_memalign(a, n, "", -1);
-}
-
-void* elib_valloc(int n)
-{
- return elib_dbg_valloc(n, "", -1);
-}
-
-void* elib_pvalloc(int n)
-{
- return elib_dbg_pvalloc(n, "", -1);
-}
-
-void* elib_memresize(EWord* p, int n)
-{
- return elib_dbg_memresize(p, n, "", -1);
-}
-
-
-int elib_memsize(EWord* p)
-{
- return elib_dbg_memsize(p, "", -1);
-}
-
-#endif /* ELIB_DEBUG */
-
-/*
-** --------------------------------------------------------------------------
-** Map c library functions to elib
-** --------------------------------------------------------------------------
-*/
-
-#if defined(ELIB_ALLOC_IS_CLIB)
-void* malloc(size_t nb)
-{
- return elib_malloc(nb);
-}
-
-void* calloc(size_t nelem, size_t size)
-{
- return elib_calloc(nelem, size);
-}
-
-
-void free(void *p)
-{
- elib_free(p);
-}
-
-void cfree(void *p)
-{
- elib_cfree(p);
-}
-
-void* realloc(void* p, size_t nb)
-{
- return elib_realloc(p, nb);
-}
-
-
-void* memalign(size_t a, size_t s)
-{
- return elib_memalign(a, s);
-}
-
-void* valloc(size_t nb)
-{
- return elib_valloc(nb);
-}
-
-void* pvalloc(size_t nb)
-{
- return elib_pvalloc(nb);
-}
-
-#if 0
-void* memresize(void* p, int nb)
-{
- return elib_memresize(p, nb);
-}
-
-int memsize(void* p)
-{
- return elib_memsize(p);
-}
-#endif
-#endif /* ELIB_ALLOC_IS_CLIB */
-
-#endif /* ENABLE_ELIB_MALLOC */
-
-void elib_ensure_initialized(void)
-{
-#ifdef ENABLE_ELIB_MALLOC
-#ifndef ELIB_DONT_INITIALIZE
- elib_init(NULL, 0);
-#endif
-#endif
-}
-
-#ifdef ENABLE_ELIB_MALLOC
-/**
- ** A Slightly modified version of the "address order best fit" algorithm
- ** used in erl_bestfit_alloc.c. Comments refer to that implementation.
- **/
-
-/*
- * Description: A combined "address order best fit"/"best fit" allocator
- * based on a Red-Black (binary search) Tree. The search,
- * insert, and delete operations are all O(log n) operations
- * on a Red-Black Tree. In the "address order best fit" case
- * n equals number of free blocks, and in the "best fit" case
- * n equals number of distinct sizes of free blocks. Red-Black
- * Trees are described in "Introduction to Algorithms", by
- * Thomas H. Cormen, Charles E. Leiserson, and
- * Ronald L. Riverest.
- *
- * This module is a callback-module for erl_alloc_util.c
- *
- * Author: Rickard Green
- */
-
-#ifdef DEBUG
-#if 0
-#define HARD_DEBUG
-#endif
-#else
-#undef HARD_DEBUG
-#endif
-
-#define SZ_MASK SIZE_MASK
-#define FLG_MASK (~(SZ_MASK))
-
-#define BLK_SZ(B) (*((Block_t *) (B)) & SZ_MASK)
-
-#define TREE_NODE_FLG (((Uint) 1) << 0)
-#define RED_FLG (((Uint) 1) << 1)
-#ifdef HARD_DEBUG
-# define LEFT_VISITED_FLG (((Uint) 1) << 2)
-# define RIGHT_VISITED_FLG (((Uint) 1) << 3)
-#endif
-
-#define IS_TREE_NODE(N) (((RBTree_t *) (N))->flags & TREE_NODE_FLG)
-#define IS_LIST_ELEM(N) (!IS_TREE_NODE(((RBTree_t *) (N))))
-
-#define SET_TREE_NODE(N) (((RBTree_t *) (N))->flags |= TREE_NODE_FLG)
-#define SET_LIST_ELEM(N) (((RBTree_t *) (N))->flags &= ~TREE_NODE_FLG)
-
-#define IS_RED(N) (((RBTree_t *) (N)) \
- && ((RBTree_t *) (N))->flags & RED_FLG)
-#define IS_BLACK(N) (!IS_RED(((RBTree_t *) (N))))
-
-#define SET_RED(N) (((RBTree_t *) (N))->flags |= RED_FLG)
-#define SET_BLACK(N) (((RBTree_t *) (N))->flags &= ~RED_FLG)
-
-#undef ASSERT
-#define ASSERT ASSERT_EXPR
-
-#if 1
-#define RBT_ASSERT ASSERT
-#else
-#define RBT_ASSERT(x)
-#endif
-
-
-#ifdef HARD_DEBUG
-static RBTree_t * check_tree(Uint);
-#endif
-
-#ifdef ERTS_INLINE
-# ifndef ERTS_CAN_INLINE
-# define ERTS_CAN_INLINE 1
-# endif
-#else
-# if defined(__GNUC__)
-# define ERTS_CAN_INLINE 1
-# define ERTS_INLINE __inline__
-# elif defined(__WIN32__)
-# define ERTS_CAN_INLINE 1
-# define ERTS_INLINE __inline
-# else
-# define ERTS_CAN_INLINE 0
-# define ERTS_INLINE
-# endif
-#endif
-
-/* Types... */
-#if 0
-typedef struct RBTree_t_ RBTree_t;
-
-struct RBTree_t_ {
- Block_t hdr;
- Uint flags;
- RBTree_t *parent;
- RBTree_t *left;
- RBTree_t *right;
-};
-#endif
-
-#if 0
-typedef struct {
- RBTree_t t;
- RBTree_t *next;
-} RBTreeList_t;
-
-#define LIST_NEXT(N) (((RBTreeList_t *) (N))->next)
-#define LIST_PREV(N) (((RBTreeList_t *) (N))->t.parent)
-#endif
-
-#ifdef DEBUG
-
-/* Destroy all tree fields */
-#define DESTROY_TREE_NODE(N) \
- sys_memset((void *) (((Block_t *) (N)) + 1), \
- 0xff, \
- (sizeof(RBTree_t) - sizeof(Block_t)))
-
-/* Destroy all tree and list fields */
-#define DESTROY_LIST_ELEM(N) \
- sys_memset((void *) (((Block_t *) (N)) + 1), \
- 0xff, \
- (sizeof(RBTreeList_t) - sizeof(Block_t)))
-
-#else
-
-#define DESTROY_TREE_NODE(N)
-#define DESTROY_LIST_ELEM(N)
-
-#endif
-
-
-/*
- * Red-Black Tree operations needed
- */
-
-static ERTS_INLINE void
-left_rotate(RBTree_t **root, RBTree_t *x)
-{
- RBTree_t *y = x->right;
- x->right = y->left;
- if (y->left)
- y->left->parent = x;
- y->parent = x->parent;
- if (!y->parent) {
- RBT_ASSERT(*root == x);
- *root = y;
- }
- else if (x == x->parent->left)
- x->parent->left = y;
- else {
- RBT_ASSERT(x == x->parent->right);
- x->parent->right = y;
- }
- y->left = x;
- x->parent = y;
-}
-
-static ERTS_INLINE void
-right_rotate(RBTree_t **root, RBTree_t *x)
-{
- RBTree_t *y = x->left;
- x->left = y->right;
- if (y->right)
- y->right->parent = x;
- y->parent = x->parent;
- if (!y->parent) {
- RBT_ASSERT(*root == x);
- *root = y;
- }
- else if (x == x->parent->right)
- x->parent->right = y;
- else {
- RBT_ASSERT(x == x->parent->left);
- x->parent->left = y;
- }
- y->right = x;
- x->parent = y;
-}
-
-
-/*
- * Replace node x with node y
- * NOTE: block header of y is not changed
- */
-static ERTS_INLINE void
-replace(RBTree_t **root, RBTree_t *x, RBTree_t *y)
-{
-
- if (!x->parent) {
- RBT_ASSERT(*root == x);
- *root = y;
- }
- else if (x == x->parent->left)
- x->parent->left = y;
- else {
- RBT_ASSERT(x == x->parent->right);
- x->parent->right = y;
- }
- if (x->left) {
- RBT_ASSERT(x->left->parent == x);
- x->left->parent = y;
- }
- if (x->right) {
- RBT_ASSERT(x->right->parent == x);
- x->right->parent = y;
- }
-
- y->flags = x->flags;
- y->parent = x->parent;
- y->right = x->right;
- y->left = x->left;
-
- DESTROY_TREE_NODE(x);
-
-}
-
-static void
-tree_insert_fixup(RBTree_t *blk)
-{
- RBTree_t *x = blk, *y;
-
- /*
- * Rearrange the tree so that it satisfies the Red-Black Tree properties
- */
-
- RBT_ASSERT(x != root && IS_RED(x->parent));
- do {
-
- /*
- * x and its parent are both red. Move the red pair up the tree
- * until we get to the root or until we can separate them.
- */
-
- RBT_ASSERT(IS_RED(x));
- RBT_ASSERT(IS_BLACK(x->parent->parent));
- RBT_ASSERT(x->parent->parent);
-
- if (x->parent == x->parent->parent->left) {
- y = x->parent->parent->right;
- if (IS_RED(y)) {
- SET_BLACK(y);
- x = x->parent;
- SET_BLACK(x);
- x = x->parent;
- SET_RED(x);
- }
- else {
-
- if (x == x->parent->right) {
- x = x->parent;
- left_rotate(&root, x);
- }
-
- RBT_ASSERT(x == x->parent->parent->left->left);
- RBT_ASSERT(IS_RED(x));
- RBT_ASSERT(IS_RED(x->parent));
- RBT_ASSERT(IS_BLACK(x->parent->parent));
- RBT_ASSERT(IS_BLACK(y));
-
- SET_BLACK(x->parent);
- SET_RED(x->parent->parent);
- right_rotate(&root, x->parent->parent);
-
- RBT_ASSERT(x == x->parent->left);
- RBT_ASSERT(IS_RED(x));
- RBT_ASSERT(IS_RED(x->parent->right));
- RBT_ASSERT(IS_BLACK(x->parent));
- break;
- }
- }
- else {
- RBT_ASSERT(x->parent == x->parent->parent->right);
- y = x->parent->parent->left;
- if (IS_RED(y)) {
- SET_BLACK(y);
- x = x->parent;
- SET_BLACK(x);
- x = x->parent;
- SET_RED(x);
- }
- else {
-
- if (x == x->parent->left) {
- x = x->parent;
- right_rotate(&root, x);
- }
-
- RBT_ASSERT(x == x->parent->parent->right->right);
- RBT_ASSERT(IS_RED(x));
- RBT_ASSERT(IS_RED(x->parent));
- RBT_ASSERT(IS_BLACK(x->parent->parent));
- RBT_ASSERT(IS_BLACK(y));
-
- SET_BLACK(x->parent);
- SET_RED(x->parent->parent);
- left_rotate(&root, x->parent->parent);
-
- RBT_ASSERT(x == x->parent->right);
- RBT_ASSERT(IS_RED(x));
- RBT_ASSERT(IS_RED(x->parent->left));
- RBT_ASSERT(IS_BLACK(x->parent));
- break;
- }
- }
- } while (x != root && IS_RED(x->parent));
-
- SET_BLACK(root);
-}
-
-static void
-unlink_free_block(Block_t *del)
-{
- Uint spliced_is_black;
- RBTree_t *x, *y, *z = (RBTree_t *) del;
- RBTree_t null_x; /* null_x is used to get the fixup started when we
- splice out a node without children. */
-
- null_x.parent = NULL;
-
-#ifdef HARD_DEBUG
- check_tree(0);
-#endif
-
- /* Remove node from tree... */
-
- /* Find node to splice out */
- if (!z->left || !z->right)
- y = z;
- else
- /* Set y to z:s successor */
- for(y = z->right; y->left; y = y->left);
- /* splice out y */
- x = y->left ? y->left : y->right;
- spliced_is_black = IS_BLACK(y);
- if (x) {
- x->parent = y->parent;
- }
- else if (!x && spliced_is_black) {
- x = &null_x;
- x->flags = 0;
- SET_BLACK(x);
- x->right = x->left = NULL;
- x->parent = y->parent;
- y->left = x;
- }
-
- if (!y->parent) {
- RBT_ASSERT(root == y);
- root = x;
- }
- else if (y == y->parent->left)
- y->parent->left = x;
- else {
- RBT_ASSERT(y == y->parent->right);
- y->parent->right = x;
- }
- if (y != z) {
- /* We spliced out the successor of z; replace z by the successor */
- replace(&root, z, y);
- }
-
- if (spliced_is_black) {
- /* We removed a black node which makes the resulting tree
- violate the Red-Black Tree properties. Fixup tree... */
-
- while (IS_BLACK(x) && x->parent) {
-
- /*
- * x has an "extra black" which we move up the tree
- * until we reach the root or until we can get rid of it.
- *
- * y is the sibbling of x
- */
-
- if (x == x->parent->left) {
- y = x->parent->right;
- RBT_ASSERT(y);
- if (IS_RED(y)) {
- RBT_ASSERT(y->right);
- RBT_ASSERT(y->left);
- SET_BLACK(y);
- RBT_ASSERT(IS_BLACK(x->parent));
- SET_RED(x->parent);
- left_rotate(&root, x->parent);
- y = x->parent->right;
- }
- RBT_ASSERT(y);
- RBT_ASSERT(IS_BLACK(y));
- if (IS_BLACK(y->left) && IS_BLACK(y->right)) {
- SET_RED(y);
- x = x->parent;
- }
- else {
- if (IS_BLACK(y->right)) {
- SET_BLACK(y->left);
- SET_RED(y);
- right_rotate(&root, y);
- y = x->parent->right;
- }
- RBT_ASSERT(y);
- if (IS_RED(x->parent)) {
-
- SET_BLACK(x->parent);
- SET_RED(y);
- }
- RBT_ASSERT(y->right);
- SET_BLACK(y->right);
- left_rotate(&root, x->parent);
- x = root;
- break;
- }
- }
- else {
- RBT_ASSERT(x == x->parent->right);
- y = x->parent->left;
- RBT_ASSERT(y);
- if (IS_RED(y)) {
- RBT_ASSERT(y->right);
- RBT_ASSERT(y->left);
- SET_BLACK(y);
- RBT_ASSERT(IS_BLACK(x->parent));
- SET_RED(x->parent);
- right_rotate(&root, x->parent);
- y = x->parent->left;
- }
- RBT_ASSERT(y);
- RBT_ASSERT(IS_BLACK(y));
- if (IS_BLACK(y->right) && IS_BLACK(y->left)) {
- SET_RED(y);
- x = x->parent;
- }
- else {
- if (IS_BLACK(y->left)) {
- SET_BLACK(y->right);
- SET_RED(y);
- left_rotate(&root, y);
- y = x->parent->left;
- }
- RBT_ASSERT(y);
- if (IS_RED(x->parent)) {
- SET_BLACK(x->parent);
- SET_RED(y);
- }
- RBT_ASSERT(y->left);
- SET_BLACK(y->left);
- right_rotate(&root, x->parent);
- x = root;
- break;
- }
- }
- }
- SET_BLACK(x);
-
- if (null_x.parent) {
- if (null_x.parent->left == &null_x)
- null_x.parent->left = NULL;
- else {
- RBT_ASSERT(null_x.parent->right == &null_x);
- null_x.parent->right = NULL;
- }
- RBT_ASSERT(!null_x.left);
- RBT_ASSERT(!null_x.right);
- }
- else if (root == &null_x) {
- root = NULL;
- RBT_ASSERT(!null_x.left);
- RBT_ASSERT(!null_x.right);
- }
- }
-
-
- DESTROY_TREE_NODE(del);
-
-#ifdef HARD_DEBUG
- check_tree(0);
-#endif
-
-}
-
-/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *\
- * "Address order best fit" specific callbacks. *
-\* */
-
-static void
-link_free_block(Block_t *block)
-{
- RBTree_t *blk = (RBTree_t *) block;
- Uint blk_sz = BLK_SZ(blk);
-
- blk->flags = 0;
- blk->left = NULL;
- blk->right = NULL;
-
- if (!root) {
- blk->parent = NULL;
- SET_BLACK(blk);
- root = blk;
- } else {
- RBTree_t *x = root;
- while (1) {
- Uint size;
-
- size = BLK_SZ(x);
-
- if (blk_sz < size || (blk_sz == size && blk < x)) {
- if (!x->left) {
- blk->parent = x;
- x->left = blk;
- break;
- }
- x = x->left;
- }
- else {
- if (!x->right) {
- blk->parent = x;
- x->right = blk;
- break;
- }
- x = x->right;
- }
-
- }
-
- /* Insert block into size tree */
- RBT_ASSERT(blk->parent);
-
- SET_RED(blk);
- if (IS_RED(blk->parent)) {
- tree_insert_fixup(blk);
- }
- }
-
-#ifdef HARD_DEBUG
- check_tree(0);
-#endif
-}
-
-
-static Block_t *
-get_free_block(Uint size)
-{
- RBTree_t *x = root;
- RBTree_t *blk = NULL;
- Uint blk_sz;
-
- while (x) {
- blk_sz = BLK_SZ(x);
- if (blk_sz < size) {
- x = x->right;
- }
- else {
- blk = x;
- x = x->left;
- }
- }
-
- if (!blk)
- return NULL;
-
-#ifdef HARD_DEBUG
- ASSERT(blk == check_tree(size));
-#endif
-
- unlink_free_block((Block_t *) blk);
-
- return (Block_t *) blk;
-}
-
-
-/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *\
- * Debug functions *
-\* */
-
-
-#ifdef HARD_DEBUG
-
-#define IS_LEFT_VISITED(FB) ((FB)->flags & LEFT_VISITED_FLG)
-#define IS_RIGHT_VISITED(FB) ((FB)->flags & RIGHT_VISITED_FLG)
-
-#define SET_LEFT_VISITED(FB) ((FB)->flags |= LEFT_VISITED_FLG)
-#define SET_RIGHT_VISITED(FB) ((FB)->flags |= RIGHT_VISITED_FLG)
-
-#define UNSET_LEFT_VISITED(FB) ((FB)->flags &= ~LEFT_VISITED_FLG)
-#define UNSET_RIGHT_VISITED(FB) ((FB)->flags &= ~RIGHT_VISITED_FLG)
-
-
-#if 0
-# define PRINT_TREE
-#else
-# undef PRINT_TREE
-#endif
-
-#ifdef PRINT_TREE
-static void print_tree(void);
-#endif
-
-/*
- * Checks that the order between parent and children are correct,
- * and that the Red-Black Tree properies are satisfied. if size > 0,
- * check_tree() returns a node that satisfies "best fit" resp.
- * "address order best fit".
- *
- * The Red-Black Tree properies are:
- * 1. Every node is either red or black.
- * 2. Every leaf (NIL) is black.
- * 3. If a node is red, then both its children are black.
- * 4. Every simple path from a node to a descendant leaf
- * contains the same number of black nodes.
- */
-
-static RBTree_t *
-check_tree(Uint size)
-{
- RBTree_t *res = NULL;
- Sint blacks;
- Sint curr_blacks;
- RBTree_t *x;
-
-#ifdef PRINT_TREE
- print_tree();
-#endif
-
- if (!root)
- return res;
-
- x = root;
- ASSERT(IS_BLACK(x));
- ASSERT(!x->parent);
- curr_blacks = 1;
- blacks = -1;
-
- while (x) {
- if (!IS_LEFT_VISITED(x)) {
- SET_LEFT_VISITED(x);
- if (x->left) {
- x = x->left;
- if (IS_BLACK(x))
- curr_blacks++;
- continue;
- }
- else {
- if (blacks < 0)
- blacks = curr_blacks;
- ASSERT(blacks == curr_blacks);
- }
- }
-
- if (!IS_RIGHT_VISITED(x)) {
- SET_RIGHT_VISITED(x);
- if (x->right) {
- x = x->right;
- if (IS_BLACK(x))
- curr_blacks++;
- continue;
- }
- else {
- if (blacks < 0)
- blacks = curr_blacks;
- ASSERT(blacks == curr_blacks);
- }
- }
-
-
- if (IS_RED(x)) {
- ASSERT(IS_BLACK(x->right));
- ASSERT(IS_BLACK(x->left));
- }
-
- ASSERT(x->parent || x == root);
-
- if (x->left) {
- ASSERT(x->left->parent == x);
- ASSERT(BLK_SZ(x->left) < BLK_SZ(x)
- || (BLK_SZ(x->left) == BLK_SZ(x) && x->left < x));
- }
-
- if (x->right) {
- ASSERT(x->right->parent == x);
- ASSERT(BLK_SZ(x->right) > BLK_SZ(x)
- || (BLK_SZ(x->right) == BLK_SZ(x) && x->right > x));
- }
-
- if (size && BLK_SZ(x) >= size) {
- if (!res
- || BLK_SZ(x) < BLK_SZ(res)
- || (BLK_SZ(x) == BLK_SZ(res) && x < res))
- res = x;
- }
-
- UNSET_LEFT_VISITED(x);
- UNSET_RIGHT_VISITED(x);
- if (IS_BLACK(x))
- curr_blacks--;
- x = x->parent;
-
- }
-
- ASSERT(curr_blacks == 0);
-
- UNSET_LEFT_VISITED(root);
- UNSET_RIGHT_VISITED(root);
-
- return res;
-
-}
-
-
-#ifdef PRINT_TREE
-#define INDENT_STEP 2
-
-#include <stdio.h>
-
-static void
-print_tree_aux(RBTree_t *x, int indent)
-{
- int i;
-
- if (!x) {
- for (i = 0; i < indent; i++) {
- putc(' ', stderr);
- }
- fprintf(stderr, "BLACK: nil\r\n");
- }
- else {
- print_tree_aux(x->right, indent + INDENT_STEP);
- for (i = 0; i < indent; i++) {
- putc(' ', stderr);
- }
- fprintf(stderr, "%s: sz=%lu addr=0x%lx\r\n",
- IS_BLACK(x) ? "BLACK" : "RED",
- BLK_SZ(x),
- (Uint) x);
- print_tree_aux(x->left, indent + INDENT_STEP);
- }
-}
-
-
-static void
-print_tree(void)
-{
- fprintf(stderr, " --- Size-Adress tree begin ---\r\n");
- print_tree_aux(root, 0);
- fprintf(stderr, " --- Size-Adress tree end ---\r\n");
-}
-
-#endif
-
-#endif
-
-#endif /* ENABLE_ELIB_MALLOC */
diff --git a/erts/emulator/beam/elib_stat.h b/erts/emulator/beam/elib_stat.h
deleted file mode 100644
index d8c7f31737..0000000000
--- a/erts/emulator/beam/elib_stat.h
+++ /dev/null
@@ -1,45 +0,0 @@
-/*
- * %CopyrightBegin%
- *
- * Copyright Ericsson AB 1996-2009. All Rights Reserved.
- *
- * The contents of this file are subject to the Erlang Public License,
- * Version 1.1, (the "License"); you may not use this file except in
- * compliance with the License. You should have received a copy of the
- * Erlang Public License along with this software. If not, it can be
- * retrieved online at http://www.erlang.org/.
- *
- * Software distributed under the License is distributed on an "AS IS"
- * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
- * the License for the specific language governing rights and limitations
- * under the License.
- *
- * %CopyrightEnd%
- */
-
-/*
-** Interface to elib statistics
-**
-*/
-#ifndef __ELIB_STAT_H__
-#define __ELIB_STAT_H__
-
-struct elib_stat {
- int mem_total; /* Number of heap words */
- int mem_blocks; /* Number of block */
- int mem_alloc; /* Number of words in use */
- int mem_free; /* Number of words free */
- int min_used; /* Size of the smallest block used */
- int max_free; /* Size of the largest free block */
- int free_blocks; /* Number of fragments in free list */
- int mem_max_alloc;/* Max number of words in use */
-};
-
-EXTERN_FUNCTION(void, elib_statistics, (void*));
-EXTERN_FUNCTION(int, elib_check_heap, (_VOID_));
-EXTERN_FUNCTION(void, elib_heap_dump, (char*));
-EXTERN_FUNCTION(void, elib_stat, (struct elib_stat*));
-EXTERN_FUNCTION(int, elib_heap_map, (unsigned char*, int));
-EXTERN_FUNCTION(int, elib_histo, (unsigned long*, unsigned long*, int, int));
-
-#endif
diff --git a/erts/emulator/beam/erl_alloc.c b/erts/emulator/beam/erl_alloc.c
index b853ec0f01..07b4167b27 100644
--- a/erts/emulator/beam/erl_alloc.c
+++ b/erts/emulator/beam/erl_alloc.c
@@ -1,19 +1,19 @@
/*
* %CopyrightBegin%
- *
- * Copyright Ericsson AB 2002-2009. All Rights Reserved.
- *
+ *
+ * Copyright Ericsson AB 2002-2010. All Rights Reserved.
+ *
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
* compliance with the License. You should have received a copy of the
* Erlang Public License along with this software. If not, it can be
* retrieved online at http://www.erlang.org/.
- *
+ *
* Software distributed under the License is distributed on an "AS IS"
* basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
* the License for the specific language governing rights and limitations
* under the License.
- *
+ *
* %CopyrightEnd%
*/
@@ -38,9 +38,6 @@
#include "erl_bits.h"
#include "erl_instrument.h"
#include "erl_mseg.h"
-#ifdef ELIB_ALLOC_IS_CLIB
-#include "erl_version.h"
-#endif
#include "erl_monitors.h"
#include "erl_bif_timer.h"
#if defined(ERTS_ALC_T_DRV_SEL_D_STATE) || defined(ERTS_ALC_T_DRV_EV_D_STATE)
@@ -64,8 +61,15 @@
#ifdef DEBUG
static Uint install_debug_functions(void);
+#if 0
+#define HARD_DEBUG
+#ifdef __GNUC__
+#warning "* * * * * * * * * * * * * *"
+#warning "* HARD DEBUG IS ENABLED! *"
+#warning "* * * * * * * * * * * * * *"
+#endif
+#endif
#endif
-extern void elib_ensure_initialized(void);
ErtsAllocatorFunctions_t erts_allctrs[ERTS_ALC_A_MAX+1];
ErtsAllocatorInfo_t erts_allctrs_info[ERTS_ALC_A_MAX+1];
@@ -220,7 +224,7 @@ set_default_ll_alloc_opts(struct au_init *ip)
ip->init.util.ramv = 0;
ip->init.util.mmsbc = 0;
ip->init.util.mmmbc = 0;
- ip->init.util.sbct = ~((Uint) 0);
+ ip->init.util.sbct = ~((UWord) 0);
ip->init.util.name_prefix = "ll_";
ip->init.util.alloc_no = ERTS_ALC_A_LONG_LIVED;
#ifndef SMALL_MEMORY
@@ -391,10 +395,14 @@ refuse_af_strategy(struct au_init *init)
static void init_thr_ix(int static_ixs);
+#ifdef HARD_DEBUG
+static void hdbg_init(void);
+#endif
+
void
erts_alloc_init(int *argc, char **argv, ErtsAllocInitOpts *eaiop)
{
- Uint extra_block_size = 0;
+ UWord extra_block_size = 0;
int i;
erts_alc_hndl_args_init_t init = {
0,
@@ -406,6 +414,10 @@ erts_alloc_init(int *argc, char **argv, ErtsAllocInitOpts *eaiop)
ERTS_DEFAULT_ALCU_INIT
};
+#ifdef HARD_DEBUG
+ hdbg_init();
+#endif
+
erts_sys_alloc_init();
init_thr_ix(erts_no_schedulers);
erts_init_utils_mem();
@@ -542,7 +554,6 @@ erts_alloc_init(int *argc, char **argv, ErtsAllocInitOpts *eaiop)
sys_alloc_opt(SYS_ALLOC_OPT_TRIM_THRESHOLD, init.trim_threshold);
sys_alloc_opt(SYS_ALLOC_OPT_TOP_PAD, init.top_pad);
-
if (erts_allctrs_info[ERTS_FIX_CORE_ALLOCATOR].enabled)
erts_fix_core_allocator_ix = ERTS_FIX_CORE_ALLOCATOR;
else
@@ -719,8 +730,8 @@ start_au_allocator(ErtsAlcType_t alctr_n,
init->init.util.name_prefix);
tspec->allctr = (Allctr_t **) states;
states = ((char *) states) + sizeof(Allctr_t *) * (tspec->size + 1);
- states = ((((Uint) states) & ERTS_CACHE_LINE_MASK)
- ? (void *) ((((Uint) states) & ~ERTS_CACHE_LINE_MASK)
+ states = ((((UWord) states) & ERTS_CACHE_LINE_MASK)
+ ? (void *) ((((UWord) states) & ~ERTS_CACHE_LINE_MASK)
+ ERTS_CACHE_LINE_SIZE)
: (void *) states);
tspec->allctr[0] = init->thr_spec > 0 ? (Allctr_t *) state : (Allctr_t *) NULL;
@@ -1605,12 +1616,13 @@ erts_memory(int *print_to_p, void *print_to_arg, void *proc, Eterm earg)
}
else {
- Eterm tmp_heap[2];
+ DeclareTmpHeapNoproc(tmp_heap,2);
Eterm wanted_list;
if (is_nil(earg))
return NIL;
+ UseTmpHeapNoproc(2);
if (is_not_atom(earg))
wanted_list = earg;
else {
@@ -1690,15 +1702,18 @@ erts_memory(int *print_to_p, void *print_to_arg, void *proc, Eterm earg)
atoms[length] = am_maximum;
uintps[length++] = &size.maximum;
}
- }
- else
+ } else {
+ UnUseTmpHeapNoproc(2);
return am_badarg;
+ }
break;
default:
+ UnUseTmpHeapNoproc(2);
return am_badarg;
}
wanted_list = CDR(list_val(wanted_list));
}
+ UnUseTmpHeapNoproc(2);
if (is_not_nil(wanted_list))
return am_badarg;
}
@@ -2285,8 +2300,8 @@ erts_allocator_info_term(void *proc, Eterm which_alloc, int only_sz)
SysAllocStat sas;
Eterm opts_am;
Eterm opts;
- Eterm as[4];
- Eterm ts[4];
+ Eterm as[4]; /* Ok even if !HEAP_ON_C_STACK, not really heap data on stack */
+ Eterm ts[4]; /* Ok even if !HEAP_ON_C_STACK, not really heap data on stack */
int l;
if (only_sz)
@@ -2302,13 +2317,8 @@ erts_allocator_info_term(void *proc, Eterm which_alloc, int only_sz)
l = 0;
as[l] = am_atom_put("e", 1);
ts[l++] = am_true;
-#ifdef ELIB_ALLOC_IS_CLIB
- as[l] = am_atom_put("m", 1);
- ts[l++] = am_atom_put("elib", 4);
-#else
as[l] = am_atom_put("m", 1);
ts[l++] = am_atom_put("libc", 4);
-#endif
if(sas.trim_threshold >= 0) {
as[l] = am_atom_put("tt", 2);
ts[l++] = erts_bld_uint(hpp, szp,
@@ -2462,11 +2472,7 @@ erts_allocator_info(int to, void *arg)
case ERTS_ALC_A_SYSTEM: {
SysAllocStat sas;
erts_print(to, arg, "option e: true\n");
-#ifdef ELIB_ALLOC_IS_CLIB
- erts_print(to, arg, "option m: elib\n");
-#else
erts_print(to, arg, "option m: libc\n");
-#endif
sys_alloc_stat(&sas);
if(sas.trim_threshold >= 0)
erts_print(to, arg, "option tt: %d\n", sas.trim_threshold);
@@ -2570,13 +2576,8 @@ erts_allocator_options(void *proc)
switch (a) {
case ERTS_ALC_A_SYSTEM:
-#ifdef ELIB_ALLOC_IS_CLIB
- as[l] = am_atom_put("m", 1);
- ts[l++] = am_atom_put("elib", 4);
-#else
as[l] = am_atom_put("m", 1);
ts[l++] = am_atom_put("libc", 4);
-#endif
if(sas.trim_threshold >= 0) {
as[l] = am_atom_put("tt", 2);
ts[l++] = erts_bld_uint(hpp, szp,
@@ -2647,23 +2648,7 @@ erts_allocator_options(void *proc)
features = length ? erts_bld_list(hpp, szp, length, terms) : NIL;
-#if defined(ELIB_ALLOC_IS_CLIB)
- {
- Eterm version;
- int i;
- int ver[5];
- i = sscanf(ERLANG_VERSION,
- "%d.%d.%d.%d.%d",
- &ver[0], &ver[1], &ver[2], &ver[3], &ver[4]);
-
- version = NIL;
- for(i--; i >= 0; i--)
- version = erts_bld_cons(hpp, szp, make_small(ver[i]), version);
-
- res = erts_bld_tuple(hpp, szp, 4,
- am_elib_malloc, version, features, settings);
- }
-#elif defined(__GLIBC__)
+#if defined(__GLIBC__)
{
Eterm AM_glibc = am_atom_put("glibc", 5);
Eterm version;
@@ -2859,12 +2844,10 @@ unsigned long erts_alc_test(unsigned long op,
break;
}
case 0xf0a:
- if (ethr_mutex_lock((ethr_mutex *) a1) != 0)
- ERTS_ALC_TEST_ABORT;
+ ethr_mutex_lock((ethr_mutex *) a1);
break;
case 0xf0b:
- if (ethr_mutex_unlock((ethr_mutex *) a1) != 0)
- ERTS_ALC_TEST_ABORT;
+ ethr_mutex_unlock((ethr_mutex *) a1);
break;
case 0xf0c: {
ethr_cond *cnd = erts_alloc(ERTS_ALC_T_UNDEF, sizeof(ethr_cond));
@@ -2880,31 +2863,21 @@ unsigned long erts_alc_test(unsigned long op,
break;
}
case 0xf0e:
- if (ethr_cond_broadcast((ethr_cond *) a1) != 0)
- ERTS_ALC_TEST_ABORT;
+ ethr_cond_broadcast((ethr_cond *) a1);
break;
case 0xf0f: {
int res;
do {
res = ethr_cond_wait((ethr_cond *) a1, (ethr_mutex *) a2);
} while (res == EINTR);
- if (res != 0)
- ERTS_ALC_TEST_ABORT;
break;
}
case 0xf10: {
ethr_tid *tid = erts_alloc(ERTS_ALC_T_UNDEF, sizeof(ethr_tid));
-#ifdef ERTS_ENABLE_LOCK_COUNT
- if (erts_lcnt_thr_create(tid,
- (void * (*)(void *)) a1,
- (void *) a2,
- NULL) != 0)
-#else
if (ethr_thr_create(tid,
(void * (*)(void *)) a1,
(void *) a2,
NULL) != 0)
-#endif
ERTS_ALC_TEST_ABORT;
return (unsigned long) tid;
}
@@ -2943,10 +2916,13 @@ unsigned long erts_alc_test(unsigned long op,
#undef PRINT_OPS
#endif
+#ifdef HARD_DEBUG
+#define FENCE_SZ (4*sizeof(UWord))
+#else
+#define FENCE_SZ (3*sizeof(UWord))
+#endif
-#define FENCE_SZ (3*sizeof(Uint))
-
-#ifdef ARCH_64
+#if defined(ARCH_64)
#define FENCE_PATTERN 0xABCDEF97ABCDEF97
#else
#define FENCE_PATTERN 0xABCDEF97
@@ -2956,7 +2932,7 @@ unsigned long erts_alc_test(unsigned long op,
#define TYPE_PATTERN_SHIFT 16
#define FIXED_FENCE_PATTERN_MASK \
- (~((Uint) (TYPE_PATTERN_MASK << TYPE_PATTERN_SHIFT)))
+ (~((UWord) (TYPE_PATTERN_MASK << TYPE_PATTERN_SHIFT)))
#define FIXED_FENCE_PATTERN \
(FENCE_PATTERN & FIXED_FENCE_PATTERN_MASK)
@@ -2966,22 +2942,170 @@ unsigned long erts_alc_test(unsigned long op,
#define GET_TYPE_OF_PATTERN(P) \
(((P) >> TYPE_PATTERN_SHIFT) & TYPE_PATTERN_MASK)
+#ifdef HARD_DEBUG
+
+#define ERL_ALC_HDBG_MAX_MBLK 100000
+#define ERTS_ALC_O_CHECK -1
+
+typedef struct hdbg_mblk_ hdbg_mblk;
+struct hdbg_mblk_ {
+ hdbg_mblk *next;
+ hdbg_mblk *prev;
+ void *p;
+ Uint s;
+ ErtsAlcType_t n;
+};
+
+static hdbg_mblk hdbg_mblks[ERL_ALC_HDBG_MAX_MBLK];
+
+static hdbg_mblk *free_hdbg_mblks;
+static hdbg_mblk *used_hdbg_mblks;
+static erts_mtx_t hdbg_mblk_mtx;
+
+static void
+hdbg_init(void)
+{
+ int i;
+ for (i = 0; i < ERL_ALC_HDBG_MAX_MBLK-1; i++)
+ hdbg_mblks[i].next = &hdbg_mblks[i+1];
+ hdbg_mblks[ERL_ALC_HDBG_MAX_MBLK-1].next = NULL;
+ free_hdbg_mblks = &hdbg_mblks[0];
+ used_hdbg_mblks = NULL;
+ erts_mtx_init(&hdbg_mblk_mtx, "erts_alloc_hard_debug");
+}
+
+static void *check_memory_fence(void *ptr,
+ Uint *size,
+ ErtsAlcType_t n,
+ int func);
+void erts_hdbg_chk_blks(void);
+
+void
+erts_hdbg_chk_blks(void)
+{
+ hdbg_mblk *mblk;
+
+ erts_mtx_lock(&hdbg_mblk_mtx);
+ for (mblk = used_hdbg_mblks; mblk; mblk = mblk->next) {
+ Uint sz;
+ check_memory_fence(mblk->p, &sz, mblk->n, ERTS_ALC_O_CHECK);
+ ASSERT(sz == mblk->s);
+ }
+ erts_mtx_unlock(&hdbg_mblk_mtx);
+}
+
+static hdbg_mblk *
+hdbg_alloc(void *p, Uint s, ErtsAlcType_t n)
+{
+ hdbg_mblk *mblk;
+
+ erts_mtx_lock(&hdbg_mblk_mtx);
+ mblk = free_hdbg_mblks;
+ if (!mblk) {
+ erts_fprintf(stderr,
+ "Ran out of debug blocks; please increase "
+ "ERL_ALC_HDBG_MAX_MBLK=%d and recompile!\n",
+ ERL_ALC_HDBG_MAX_MBLK);
+ abort();
+ }
+ free_hdbg_mblks = mblk->next;
+
+ mblk->p = p;
+ mblk->s = s;
+ mblk->n = n;
+
+ mblk->next = used_hdbg_mblks;
+ mblk->prev = NULL;
+ if (used_hdbg_mblks)
+ used_hdbg_mblks->prev = mblk;
+ used_hdbg_mblks = mblk;
+ erts_mtx_unlock(&hdbg_mblk_mtx);
+ return (void *) mblk;
+}
+
+static void
+hdbg_free(hdbg_mblk *mblk)
+{
+ erts_mtx_lock(&hdbg_mblk_mtx);
+ if (mblk->next)
+ mblk->next->prev = mblk->prev;
+ if (mblk->prev)
+ mblk->prev->next = mblk->next;
+ else
+ used_hdbg_mblks = mblk->next;
+
+ mblk->next = free_hdbg_mblks;
+ free_hdbg_mblks = mblk;
+ erts_mtx_unlock(&hdbg_mblk_mtx);
+}
+
+#endif
+
+#ifdef ERTS_ALLOC_UTIL_HARD_DEBUG
+static void *check_memory_fence(void *ptr, Uint *size, ErtsAlcType_t n, int func);
+
+void check_allocated_block( Uint type, void *blk)
+{
+ Uint dummy;
+ check_memory_fence(blk, &dummy, ERTS_ALC_T2N(type), ERTS_ALC_O_FREE);
+}
+
+void check_allocators(void)
+{
+ int i;
+ if (!erts_initialized)
+ return;
+ for (i = ERTS_ALC_A_MIN; i <= ERTS_ALC_A_MAX; ++i) {
+ if (erts_allctrs_info[i].alloc_util) {
+ ErtsAllocatorFunctions_t *real_af = (ErtsAllocatorFunctions_t *) erts_allctrs[i].extra;
+ Allctr_t *allctr = real_af->extra;
+ Carrier_t *ct;
+#ifdef USE_THREADS
+ if (allctr->thread_safe)
+ erts_mtx_lock(&allctr->mutex);
+#endif
+
+ if (allctr->check_mbc) {
+ for (ct = allctr->mbc_list.first; ct; ct = ct->next) {
+ fprintf(stderr,"Checking allocator %d\r\n",i);
+ allctr->check_mbc(allctr,ct);
+ }
+ }
+#ifdef USE_THREADS
+ if (allctr->thread_safe)
+ erts_mtx_unlock(&allctr->mutex);
+#endif
+ }
+ }
+}
+#endif
static void *
set_memory_fence(void *ptr, Uint sz, ErtsAlcType_t n)
{
- Uint *ui_ptr;
- Uint pattern;
+ UWord *ui_ptr;
+ UWord pattern;
+#ifdef HARD_DEBUG
+ hdbg_mblk **mblkpp;
+#endif
if (!ptr)
return NULL;
- ui_ptr = (Uint *) ptr;
+ ui_ptr = (UWord *) ptr;
pattern = MK_PATTERN(n);
-
+
+#ifdef HARD_DEBUG
+ mblkpp = (hdbg_mblk **) ui_ptr++;
+#endif
+
*(ui_ptr++) = sz;
*(ui_ptr++) = pattern;
- memcpy((void *) (((char *) ui_ptr)+sz), (void *) &pattern, sizeof(Uint));
+ memcpy((void *) (((char *) ui_ptr)+sz), (void *) &pattern, sizeof(UWord));
+
+#ifdef HARD_DEBUG
+ *mblkpp = hdbg_alloc((void *) ui_ptr, sz, n);
+#endif
return (void *) ui_ptr;
}
@@ -2991,16 +3115,22 @@ check_memory_fence(void *ptr, Uint *size, ErtsAlcType_t n, int func)
{
Uint sz;
Uint found_type;
- Uint pre_pattern;
- Uint post_pattern;
- Uint *ui_ptr;
+ UWord pre_pattern;
+ UWord post_pattern;
+ UWord *ui_ptr;
+#ifdef HARD_DEBUG
+ hdbg_mblk *mblk;
+#endif
if (!ptr)
return NULL;
- ui_ptr = (Uint *) ptr;
+ ui_ptr = (UWord *) ptr;
pre_pattern = *(--ui_ptr);
*size = sz = *(--ui_ptr);
+#ifdef HARD_DEBUG
+ mblk = (hdbg_mblk *) *(--ui_ptr);
+#endif
found_type = GET_TYPE_OF_PATTERN(pre_pattern);
if (pre_pattern != MK_PATTERN(n)) {
@@ -3011,7 +3141,7 @@ check_memory_fence(void *ptr, Uint *size, ErtsAlcType_t n, int func)
(unsigned long) ptr);
}
- memcpy((void *) &post_pattern, (void *) (((char *)ptr)+sz), sizeof(Uint));
+ memcpy((void *) &post_pattern, (void *) (((char *)ptr)+sz), sizeof(UWord));
if (post_pattern != MK_PATTERN(n)
|| pre_pattern != post_pattern) {
@@ -3056,6 +3186,17 @@ check_memory_fence(void *ptr, Uint *size, ErtsAlcType_t n, int func)
(unsigned long) ptr, (unsigned long) sz, ftype, op_str, otype);
}
+#ifdef HARD_DEBUG
+ switch (func) {
+ case ERTS_ALC_O_REALLOC:
+ case ERTS_ALC_O_FREE:
+ hdbg_free(mblk);
+ break;
+ default:
+ break;
+ }
+#endif
+
return (void *) ui_ptr;
}
@@ -3068,6 +3209,10 @@ debug_alloc(ErtsAlcType_t n, void *extra, Uint size)
Uint dsize;
void *res;
+#ifdef HARD_DEBUG
+ erts_hdbg_chk_blks();
+#endif
+
ASSERT(ERTS_ALC_N_MIN <= n && n <= ERTS_ALC_N_MAX);
dsize = size + FENCE_SZ;
res = (*real_af->alloc)(n, real_af->extra, dsize);
@@ -3097,13 +3242,17 @@ debug_realloc(ErtsAlcType_t n, void *extra, void *ptr, Uint size)
dsize = size + FENCE_SZ;
dptr = check_memory_fence(ptr, &old_size, n, ERTS_ALC_O_REALLOC);
+#ifdef HARD_DEBUG
+ erts_hdbg_chk_blks();
+#endif
+
if (old_size > size)
sys_memset((void *) (((char *) ptr) + size),
0xf,
sizeof(Uint) + old_size - size);
res = (*real_af->realloc)(n, real_af->extra, dptr, dsize);
-
+
res = set_memory_fence(res, size, n);
#ifdef PRINT_OPS
@@ -3133,6 +3282,10 @@ debug_free(ErtsAlcType_t n, void *extra, void *ptr)
fprintf(stderr, "free(%s, 0x%lx)\r\n", ERTS_ALC_N2TD(n), (Uint) ptr);
#endif
+#ifdef HARD_DEBUG
+ erts_hdbg_chk_blks();
+#endif
+
}
static Uint
diff --git a/erts/emulator/beam/erl_alloc.h b/erts/emulator/beam/erl_alloc.h
index e7a203002f..3e96c76dbf 100644
--- a/erts/emulator/beam/erl_alloc.h
+++ b/erts/emulator/beam/erl_alloc.h
@@ -1,19 +1,19 @@
/*
* %CopyrightBegin%
- *
- * Copyright Ericsson AB 2002-2009. All Rights Reserved.
- *
+ *
+ * Copyright Ericsson AB 2002-2010. All Rights Reserved.
+ *
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
* compliance with the License. You should have received a copy of the
* Erlang Public License along with this software. If not, it can be
* retrieved online at http://www.erlang.org/.
- *
+ *
* Software distributed under the License is distributed on an "AS IS"
* basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
* the License for the specific language governing rights and limitations
* under the License.
- *
+ *
* %CopyrightEnd%
*/
@@ -238,7 +238,7 @@ void *erts_realloc_fnf(ErtsAlcType_t type, void *ptr, Uint size)
#ifndef ERTS_CACHE_LINE_SIZE
/* Assume a cache line size of 64 bytes */
-# define ERTS_CACHE_LINE_SIZE ((Uint) 64)
+# define ERTS_CACHE_LINE_SIZE ((UWord) 64)
# define ERTS_CACHE_LINE_MASK (ERTS_CACHE_LINE_SIZE - 1)
#endif
@@ -486,9 +486,9 @@ init_##NAME##_alloc(void) \
qa_data_##NAME##__ = erts_alloc(ERTS_ALC_T_PRE_ALLOC_DATA,tot_size);\
chunk_start = (((char *) qa_data_##NAME##__) \
+ sizeof(erts_sched_pref_quick_alloc_data_t)); \
- if ((((Uint) chunk_start) & ERTS_CACHE_LINE_MASK) != ((Uint) 0)) \
+ if ((((UWord) chunk_start) & ERTS_CACHE_LINE_MASK) != ((UWord) 0)) \
chunk_start = ((char *) \
- ((((Uint) chunk_start) & ~ERTS_CACHE_LINE_MASK) \
+ ((((UWord) chunk_start) & ~ERTS_CACHE_LINE_MASK) \
+ ERTS_CACHE_LINE_SIZE)); \
qa_data_##NAME##__->chunks_mem_size = chunk_mem_size; \
qa_data_##NAME##__->start = (void *) chunk_start; \
@@ -553,7 +553,7 @@ NAME##_free(TYPE *p) \
}
#ifdef DEBUG
-#define ERTS_ALC_DBG_BLK_SZ(PTR) (*(((Uint *) (PTR)) - 2))
+#define ERTS_ALC_DBG_BLK_SZ(PTR) (*(((UWord *) (PTR)) - 2))
#endif /* #ifdef DEBUG */
#undef ERTS_ALC_INLINE
diff --git a/erts/emulator/beam/erl_alloc.types b/erts/emulator/beam/erl_alloc.types
index f701f71c7d..7df9f19af0 100644
--- a/erts/emulator/beam/erl_alloc.types
+++ b/erts/emulator/beam/erl_alloc.types
@@ -1,19 +1,19 @@
#
# %CopyrightBegin%
-#
-# Copyright Ericsson AB 2003-2009. All Rights Reserved.
-#
+#
+# Copyright Ericsson AB 2003-2010. All Rights Reserved.
+#
# The contents of this file are subject to the Erlang Public License,
# Version 1.1, (the "License"); you may not use this file except in
# compliance with the License. You should have received a copy of the
# Erlang Public License along with this software. If not, it can be
# retrieved online at http://www.erlang.org/.
-#
+#
# Software distributed under the License is distributed on an "AS IS"
# basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
# the License for the specific language governing rights and limitations
# under the License.
-#
+#
# %CopyrightEnd%
#
@@ -138,6 +138,7 @@ type PEND_SUSPEND SHORT_LIVED PROCESSES pending_suspend
type PROC_LIST SHORT_LIVED PROCESSES proc_list
type FUN_ENTRY FIXED_SIZE CODE fun_entry
type ATOM_TXT LONG_LIVED ATOM atom_text
+type BEAM_REGISTER EHEAP PROCESSES beam_register
type HEAP EHEAP PROCESSES heap
type OLD_HEAP EHEAP PROCESSES old_heap
type HEAP_FRAG EHEAP PROCESSES heap_frag
@@ -211,7 +212,8 @@ type LOGGER_DSBUF TEMPORARY SYSTEM logger_dsbuf
type TMP_DSBUF TEMPORARY SYSTEM tmp_dsbuf
type INFO_DSBUF SYSTEM SYSTEM info_dsbuf
# INFO_DSBUF have to use the SYSTEM allocator; otherwise, a deadlock might occur
-type SCHDLR_DATA LONG_LIVED PROCESSES scheduler_data
+type SCHDLR_DATA LONG_LIVED SYSTEM scheduler_data
+type SCHDLR_SLP_INFO LONG_LIVED SYSTEM scheduler_sleep_info
type RUNQS LONG_LIVED SYSTEM run_queues
type DDLL_PROCESS STANDARD SYSTEM ddll_processes
type DDLL_HANDLE STANDARD SYSTEM ddll_handle
@@ -231,6 +233,7 @@ type RE_SUBJECT SHORT_LIVED SYSTEM re_subject
type RE_HEAP STANDARD SYSTEM re_heap
type RE_STACK SHORT_LIVED SYSTEM re_stack
type UNICODE_BUFFER SHORT_LIVED SYSTEM unicode_buffer
+type BINARY_BUFFER SHORT_LIVED SYSTEM binary_buffer
type PRE_ALLOC_DATA LONG_LIVED SYSTEM pre_alloc_data
type DRV_THR_OPTS DRIVER SYSTEM driver_thread_opts
type DRV_TID DRIVER SYSTEM driver_tid
@@ -244,6 +247,7 @@ type CPUDATA LONG_LIVED SYSTEM cpu_data
type TMP_CPU_IDS SHORT_LIVED SYSTEM tmp_cpu_ids
type EXT_TERM_DATA SHORT_LIVED PROCESSES external_term_data
type ZLIB STANDARD SYSTEM zlib
+type RDR_GRPS_MAP LONG_LIVED SYSTEM reader_groups_map
+if smp
type ASYNC SHORT_LIVED SYSTEM async
@@ -267,7 +271,9 @@ type RUNQ_BLNS LONG_LIVED SYSTEM run_queue_balancing
+if threads
-type ETHR_INTERNAL SYSTEM SYSTEM ethread_internal
+type ETHR_STD STANDARD SYSTEM ethread_standard
+type ETHR_SL SHORT_LIVED SYSTEM ethread_short_lived
+type ETHR_LL LONG_LIVED SYSTEM ethread_long_lived
+ifnot smp
@@ -322,6 +328,9 @@ type SSB SHORT_LIVED PROCESSES ssb
# Types used by system specific code
#
+type TEMP_TERM TEMPORARY SYSTEM temp_term
+type LL_TEMP_TERM LONG_LIVED SYSTEM ll_temp_term
+
type DRV_TAB LONG_LIVED SYSTEM drv_tab
type DRV_EV_STATE LONG_LIVED SYSTEM driver_event_state
type DRV_EV_D_STATE FIXED_SIZE SYSTEM driver_event_data_state
diff --git a/erts/emulator/beam/erl_alloc_util.c b/erts/emulator/beam/erl_alloc_util.c
index 9b7bc24c1c..8b184899c9 100644
--- a/erts/emulator/beam/erl_alloc_util.c
+++ b/erts/emulator/beam/erl_alloc_util.c
@@ -1,19 +1,19 @@
/*
* %CopyrightBegin%
- *
- * Copyright Ericsson AB 2002-2009. All Rights Reserved.
- *
+ *
+ * Copyright Ericsson AB 2002-2010. All Rights Reserved.
+ *
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
* compliance with the License. You should have received a copy of the
* Erlang Public License along with this software. If not, it can be
* retrieved online at http://www.erlang.org/.
- *
+ *
* Software distributed under the License is distributed on an "AS IS"
* basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
* the License for the specific language governing rights and limitations
* under the License.
- *
+ *
* %CopyrightEnd%
*/
@@ -69,14 +69,14 @@ static int initialized = 0;
#if HAVE_ERTS_MSEG
-#define INV_MSEG_UNIT_MASK ((Uint) (mseg_unit_size - 1))
+#define INV_MSEG_UNIT_MASK ((UWord) (mseg_unit_size - 1))
#define MSEG_UNIT_MASK (~INV_MSEG_UNIT_MASK)
#define MSEG_UNIT_FLOOR(X) ((X) & MSEG_UNIT_MASK)
#define MSEG_UNIT_CEILING(X) MSEG_UNIT_FLOOR((X) + INV_MSEG_UNIT_MASK)
#endif
-#define INV_SYS_ALLOC_CARRIER_MASK ((Uint) (sys_alloc_carrier_size - 1))
+#define INV_SYS_ALLOC_CARRIER_MASK ((UWord) (sys_alloc_carrier_size - 1))
#define SYS_ALLOC_CARRIER_MASK (~INV_SYS_ALLOC_CARRIER_MASK)
#define SYS_ALLOC_CARRIER_FLOOR(X) ((X) & SYS_ALLOC_CARRIER_MASK)
#define SYS_ALLOC_CARRIER_CEILING(X) \
@@ -85,7 +85,7 @@ static int initialized = 0;
#undef ASSERT
#define ASSERT ASSERT_EXPR
-#define ERTS_ALCU_FLG_FAIL_REALLOC_MOVE ((Uint) 1)
+#define ERTS_ALCU_FLG_FAIL_REALLOC_MOVE ((UWord) 1)
#if 0
/* Can be useful for debugging */
@@ -114,12 +114,12 @@ static Uint mseg_unit_size;
/* Blocks ... */
-#define SBC_BLK_FTR_FLG (((Uint) 1) << 0)
-#define UNUSED1_BLK_FTR_FLG (((Uint) 1) << 1)
-#define UNUSED2_BLK_FTR_FLG (((Uint) 1) << 2)
+#define SBC_BLK_FTR_FLG (((UWord) 1) << 0)
+#define UNUSED1_BLK_FTR_FLG (((UWord) 1) << 1)
+#define UNUSED2_BLK_FTR_FLG (((UWord) 1) << 2)
#define ABLK_HDR_SZ (sizeof(Block_t))
-#define FBLK_FTR_SZ (sizeof(Uint))
+#define FBLK_FTR_SZ (sizeof(UWord))
#define UMEMSZ2BLKSZ(AP, SZ) \
(ABLK_HDR_SZ + (SZ) <= (AP)->min_block_size \
@@ -130,14 +130,14 @@ static Uint mseg_unit_size;
#define BLK2UMEM(P) ((void *) (((char *) (P)) + ABLK_HDR_SZ))
#define PREV_BLK_SZ(B) \
- ((Uint) (*(((Uint *) (B)) - 1) & SZ_MASK))
+ ((UWord) (*(((UWord *) (B)) - 1) & SZ_MASK))
#define SET_BLK_SZ_FTR(B, SZ) \
- (*((Uint *) (((char *) (B)) + (SZ) - sizeof(Uint))) = (SZ))
+ (*((UWord *) (((char *) (B)) + (SZ) - sizeof(UWord))) = (SZ))
-#define THIS_FREE_BLK_HDR_FLG (((Uint) 1) << 0)
-#define PREV_FREE_BLK_HDR_FLG (((Uint) 1) << 1)
-#define LAST_BLK_HDR_FLG (((Uint) 1) << 2)
+#define THIS_FREE_BLK_HDR_FLG (((UWord) 1) << 0)
+#define PREV_FREE_BLK_HDR_FLG (((UWord) 1) << 1)
+#define LAST_BLK_HDR_FLG (((UWord) 1) << 2)
#define SET_BLK_SZ(B, SZ) \
(ASSERT(((SZ) & FLG_MASK) == 0), \
@@ -156,11 +156,11 @@ static Uint mseg_unit_size;
(*((Block_t *) (B)) &= ~LAST_BLK_HDR_FLG)
#define SBH_THIS_FREE THIS_FREE_BLK_HDR_FLG
-#define SBH_THIS_ALLOCED ((Uint) 0)
+#define SBH_THIS_ALLOCED ((UWord) 0)
#define SBH_PREV_FREE PREV_FREE_BLK_HDR_FLG
-#define SBH_PREV_ALLOCED ((Uint) 0)
+#define SBH_PREV_ALLOCED ((UWord) 0)
#define SBH_LAST_BLK LAST_BLK_HDR_FLG
-#define SBH_NOT_LAST_BLK ((Uint) 0)
+#define SBH_NOT_LAST_BLK ((UWord) 0)
#define SET_BLK_HDR(B, Sz, F) \
(ASSERT(((Sz) & FLG_MASK) == 0), *((Block_t *) (B)) = ((Sz) | (F)))
@@ -200,7 +200,7 @@ static Uint mseg_unit_size;
((FTR) = 0)
#define IS_SBC_BLK(B) \
- (IS_PREV_BLK_FREE((B)) && (((Uint *) (B))[-1] & SBC_BLK_FTR_FLG))
+ (IS_PREV_BLK_FREE((B)) && (((UWord *) (B))[-1] & SBC_BLK_FTR_FLG))
#define IS_MBC_BLK(B) \
(!IS_SBC_BLK((B)))
@@ -211,8 +211,8 @@ static Uint mseg_unit_size;
/* Carriers ... */
-#define MSEG_CARRIER_HDR_FLAG (((Uint) 1) << 0)
-#define SBC_CARRIER_HDR_FLAG (((Uint) 1) << 1)
+#define MSEG_CARRIER_HDR_FLAG (((UWord) 1) << 0)
+#define SBC_CARRIER_HDR_FLAG (((UWord) 1) << 1)
#define SCH_SYS_ALLOC 0
#define SCH_MSEG MSEG_CARRIER_HDR_FLAG
@@ -407,18 +407,18 @@ do { \
/* Debug stuff... */
#ifdef DEBUG
-static Uint carrier_alignment;
+static UWord carrier_alignment;
#define DEBUG_SAVE_ALIGNMENT(C) \
do { \
- Uint algnmnt__ = sizeof(Unit_t) - (((Uint) (C)) % sizeof(Unit_t)); \
+ UWord algnmnt__ = sizeof(Unit_t) - (((UWord) (C)) % sizeof(Unit_t)); \
carrier_alignment = MIN(carrier_alignment, algnmnt__); \
- ASSERT(((Uint) (C)) % sizeof(Uint) == 0); \
+ ASSERT(((UWord) (C)) % sizeof(UWord) == 0); \
} while (0)
#define DEBUG_CHECK_ALIGNMENT(P) \
do { \
- ASSERT(sizeof(Unit_t) - (((Uint) (P)) % sizeof(Unit_t)) \
+ ASSERT(sizeof(Unit_t) - (((UWord) (P)) % sizeof(Unit_t)) \
>= carrier_alignment); \
- ASSERT(((Uint) (P)) % sizeof(Uint) == 0); \
+ ASSERT(((UWord) (P)) % sizeof(UWord) == 0); \
} while (0)
#else
@@ -610,7 +610,7 @@ unlink_carrier(CarrierList_t *cl, Carrier_t *crr)
}
-static Block_t *create_carrier(Allctr_t *, Uint, Uint);
+static Block_t *create_carrier(Allctr_t *, Uint, UWord);
static void destroy_carrier(Allctr_t *, Block_t *);
/* Multi block carrier alloc/realloc/free ... */
@@ -630,6 +630,11 @@ mbc_alloc_block(Allctr_t *allctr, Uint size, Uint *blk_szp)
blk = (*allctr->get_free_block)(allctr, *blk_szp, NULL, 0);
+#if HALFWORD_HEAP
+ if (!blk) {
+ blk = create_carrier(allctr, *blk_szp, CFLG_MBC|CFLG_FORCE_MSEG);
+ }
+#else
if (!blk) {
blk = create_carrier(allctr, *blk_szp, CFLG_MBC);
if (!blk) {
@@ -640,6 +645,7 @@ mbc_alloc_block(Allctr_t *allctr, Uint size, Uint *blk_szp)
CFLG_SBC|CFLG_FORCE_SIZE|CFLG_FORCE_SYS_ALLOC);
}
}
+#endif
#ifdef ERTS_ALLOC_UTIL_HARD_DEBUG
if (IS_MBC_BLK(blk)) {
@@ -656,14 +662,14 @@ static ERTS_INLINE void
mbc_alloc_finalize(Allctr_t *allctr,
Block_t *blk,
Uint org_blk_sz,
- Uint flags,
+ UWord flags,
Uint want_blk_sz,
int valid_blk_info)
{
Uint blk_sz;
Uint nxt_blk_sz;
Block_t *nxt_blk;
- Uint prev_free_flg = flags & PREV_FREE_BLK_HDR_FLG;
+ UWord prev_free_flg = flags & PREV_FREE_BLK_HDR_FLG;
ASSERT(org_blk_sz >= want_blk_sz);
ASSERT(blk);
@@ -853,7 +859,7 @@ mbc_free(Allctr_t *allctr, void *p)
}
static void *
-mbc_realloc(Allctr_t *allctr, void *p, Uint size, Uint flgs)
+mbc_realloc(Allctr_t *allctr, void *p, Uint size, UWord flgs)
{
void *new_p;
Uint old_blk_sz;
@@ -1144,7 +1150,7 @@ mbc_realloc(Allctr_t *allctr, void *p, Uint size, Uint flgs)
}
else {
Uint new_blk_sz;
- Uint new_blk_flgs;
+ UWord new_blk_flgs;
Uint prev_blk_sz;
Uint blk_cpy_sz;
@@ -1239,7 +1245,7 @@ do { \
static Block_t *
-create_carrier(Allctr_t *allctr, Uint umem_sz, Uint flags)
+create_carrier(Allctr_t *allctr, Uint umem_sz, UWord flags)
{
Block_t *blk;
Carrier_t *crr;
@@ -1283,8 +1289,8 @@ create_carrier(Allctr_t *allctr, Uint umem_sz, Uint flags)
if (crr_sz < allctr->mbc_header_size + blk_sz)
crr_sz = allctr->mbc_header_size + blk_sz;
#ifdef ERTS_ALLOC_UTIL_HARD_DEBUG
- if (sizeof(Unit_t) == sizeof(Uint))
- crr_sz += sizeof(Uint);
+ if (sizeof(Unit_t) == sizeof(UWord))
+ crr_sz += sizeof(UWord);
#endif
}
crr_sz = MSEG_UNIT_CEILING(crr_sz);
@@ -1324,8 +1330,8 @@ create_carrier(Allctr_t *allctr, Uint umem_sz, Uint flags)
&& bcrr_sz < allctr->smallest_mbc_size)
bcrr_sz = allctr->smallest_mbc_size;
#ifdef ERTS_ALLOC_UTIL_HARD_DEBUG
- if (sizeof(Unit_t) == sizeof(Uint))
- bcrr_sz += sizeof(Uint);
+ if (sizeof(Unit_t) == sizeof(UWord))
+ bcrr_sz += sizeof(UWord);
#endif
}
@@ -1360,7 +1366,7 @@ create_carrier(Allctr_t *allctr, Uint umem_sz, Uint flags)
blk = SBC2BLK(allctr, crr);
- SET_SBC_BLK_FTR(((Uint *) blk)[-1]);
+ SET_SBC_BLK_FTR(((UWord *) blk)[-1]);
SET_BLK_HDR(blk, blk_sz, SBH_THIS_ALLOCED|SBH_PREV_FREE|SBH_LAST_BLK);
link_carrier(&allctr->sbc_list, crr);
@@ -1379,13 +1385,13 @@ create_carrier(Allctr_t *allctr, Uint umem_sz, Uint flags)
blk = MBC2FBLK(allctr, crr);
#ifdef ERTS_ALLOC_UTIL_HARD_DEBUG
- if (sizeof(Unit_t) == sizeof(Uint))
- crr_sz -= sizeof(Uint);
+ if (sizeof(Unit_t) == sizeof(UWord))
+ crr_sz -= sizeof(UWord);
#endif
blk_sz = UNIT_FLOOR(crr_sz - allctr->mbc_header_size);
- SET_MBC_BLK_FTR(((Uint *) blk)[-1]);
+ SET_MBC_BLK_FTR(((UWord *) blk)[-1]);
SET_BLK_HDR(blk, blk_sz, SBH_THIS_FREE|SBH_PREV_FREE|SBH_LAST_BLK);
#ifdef ERTS_ALLOC_UTIL_HARD_DEBUG
@@ -1400,13 +1406,13 @@ create_carrier(Allctr_t *allctr, Uint umem_sz, Uint flags)
link_carrier(&allctr->mbc_list, crr);
#ifdef ERTS_ALLOC_UTIL_HARD_DEBUG
- if (sizeof(Unit_t) == sizeof(Uint))
- crr_sz += sizeof(Uint);
+ if (sizeof(Unit_t) == sizeof(UWord))
+ crr_sz += sizeof(UWord);
#endif
CHECK_1BLK_CARRIER(allctr, 0, is_mseg, crr, crr_sz, blk, blk_sz);
#ifdef ERTS_ALLOC_UTIL_HARD_DEBUG
- if (sizeof(Unit_t) == sizeof(Uint))
- crr_sz -= sizeof(Uint);
+ if (sizeof(Unit_t) == sizeof(UWord))
+ crr_sz -= sizeof(UWord);
#endif
if (allctr->creating_mbc)
(*allctr->creating_mbc)(allctr, crr);
@@ -1418,11 +1424,12 @@ create_carrier(Allctr_t *allctr, Uint umem_sz, Uint flags)
}
static Block_t *
-resize_carrier(Allctr_t *allctr, Block_t *old_blk, Uint umem_sz, Uint flags)
+resize_carrier(Allctr_t *allctr, Block_t *old_blk, Uint umem_sz, UWord flags)
{
Block_t *new_blk;
Carrier_t *new_crr, *old_crr;
- Uint create_flags, old_crr_sz, old_blk_sz, new_blk_sz, new_crr_sz;
+ UWord create_flags;
+ Uint old_crr_sz, old_blk_sz, new_blk_sz, new_crr_sz;
Uint new_bcrr_sz;
if (flags & CFLG_MBC) {
@@ -2522,7 +2529,12 @@ do_erts_alcu_alloc(ErtsAlcType_t type, void *extra, Uint size)
INC_CC(allctr->calls.this_alloc);
if (size >= allctr->sbc_threshold) {
+#if HALFWORD_HEAP
+ Block_t *blk = create_carrier(allctr, size,
+ CFLG_SBC | CFLG_FORCE_MSEG);
+#else
Block_t *blk = create_carrier(allctr, size, CFLG_SBC);
+#endif
res = blk ? BLK2UMEM(blk) : NULL;
}
else
@@ -2594,16 +2606,16 @@ erts_alcu_alloc_thr_pref(ErtsAlcType_t type, void *extra, Uint size)
Allctr_t *allctr;
void *res;
- ASSERT(sizeof(Uint) == sizeof(Allctr_t *));
+ ASSERT(sizeof(UWord) == sizeof(Allctr_t *));
ASSERT(ix > 0);
if (ix >= tspec->size)
ix = (ix % (tspec->size - 1)) + 1;
allctr = tspec->allctr[ix];
erts_mtx_lock(&allctr->mutex);
- res = do_erts_alcu_alloc(type, allctr, size + sizeof(Uint));
+ res = do_erts_alcu_alloc(type, allctr, size + sizeof(UWord));
if (res) {
*((Allctr_t **) res) = allctr;
- res = (void *) (((char *) res) + sizeof(Uint));
+ res = (void *) (((char *) res) + sizeof(UWord));
}
erts_mtx_unlock(&allctr->mutex);
DEBUG_CHECK_ALIGNMENT(res);
@@ -2681,7 +2693,7 @@ void
erts_alcu_free_thr_pref(ErtsAlcType_t type, void *unused, void *p)
{
if (p) {
- void *ptr = (void *) (((char *) p) - sizeof(Uint));
+ void *ptr = (void *) (((char *) p) - sizeof(UWord));
Allctr_t *allctr = *((Allctr_t **) ptr);
erts_mtx_lock(&allctr->mutex);
do_erts_alcu_free(type, allctr, ptr);
@@ -2698,7 +2710,7 @@ do_erts_alcu_realloc(ErtsAlcType_t type,
void *extra,
void *p,
Uint size,
- Uint flgs)
+ UWord flgs)
{
Allctr_t *allctr = (Allctr_t *) extra;
Block_t *blk;
@@ -2780,13 +2792,21 @@ do_erts_alcu_realloc(ErtsAlcType_t type,
Block_t *new_blk;
if(IS_SBC_BLK(blk)) {
do_carrier_resize:
+#if HALFWORD_HEAP
+ new_blk = resize_carrier(allctr, blk, size, CFLG_SBC | CFLG_FORCE_MSEG);
+#else
new_blk = resize_carrier(allctr, blk, size, CFLG_SBC);
+#endif
res = new_blk ? BLK2UMEM(new_blk) : NULL;
}
else if (flgs & ERTS_ALCU_FLG_FAIL_REALLOC_MOVE)
return NULL;
else {
+#if HALFWORD_HEAP
+ new_blk = create_carrier(allctr, size, CFLG_SBC | CFLG_FORCE_MSEG);
+#else
new_blk = create_carrier(allctr, size, CFLG_SBC);
+#endif
if (new_blk) {
res = BLK2UMEM(new_blk);
sys_memcpy((void *) res,
@@ -2962,7 +2982,7 @@ erts_alcu_realloc_thr_pref(ErtsAlcType_t type, void *extra, void *p, Uint size)
if (!p)
return erts_alcu_alloc_thr_pref(type, extra, size);
- ptr = (void *) (((char *) p) - sizeof(Uint));
+ ptr = (void *) (((char *) p) - sizeof(UWord));
used_allctr = *((Allctr_t **) ptr);
ix = erts_alc_get_thr_ix();
@@ -2976,32 +2996,32 @@ erts_alcu_realloc_thr_pref(ErtsAlcType_t type, void *extra, void *p, Uint size)
res = do_erts_alcu_realloc(type,
used_allctr,
ptr,
- size + sizeof(Uint),
+ size + sizeof(UWord),
(pref_allctr != used_allctr
? ERTS_ALCU_FLG_FAIL_REALLOC_MOVE
: 0));
erts_mtx_unlock(&used_allctr->mutex);
if (res) {
ASSERT(used_allctr == *((Allctr_t **) res));
- res = (void *) (((char *) res) + sizeof(Uint));
+ res = (void *) (((char *) res) + sizeof(UWord));
DEBUG_CHECK_ALIGNMENT(res);
}
else {
erts_mtx_lock(&pref_allctr->mutex);
- res = do_erts_alcu_alloc(type, pref_allctr, size + sizeof(Uint));
+ res = do_erts_alcu_alloc(type, pref_allctr, size + sizeof(UWord));
erts_mtx_unlock(&pref_allctr->mutex);
if (res) {
Block_t *blk;
size_t cpy_size;
*((Allctr_t **) res) = pref_allctr;
- res = (void *) (((char *) res) + sizeof(Uint));
+ res = (void *) (((char *) res) + sizeof(UWord));
DEBUG_CHECK_ALIGNMENT(res);
erts_mtx_lock(&used_allctr->mutex);
blk = UMEM2BLK(ptr);
- cpy_size = BLK_SZ(blk) - ABLK_HDR_SZ - sizeof(Uint);
+ cpy_size = BLK_SZ(blk) - ABLK_HDR_SZ - sizeof(UWord);
if (cpy_size > size)
cpy_size = size;
sys_memcpy(res, p, cpy_size);
@@ -3026,7 +3046,7 @@ erts_alcu_realloc_mv_thr_pref(ErtsAlcType_t type, void *extra,
if (!p)
return erts_alcu_alloc_thr_pref(type, extra, size);
- ptr = (void *) (((char *) p) - sizeof(Uint));
+ ptr = (void *) (((char *) p) - sizeof(UWord));
used_allctr = *((Allctr_t **) ptr);
ix = erts_alc_get_thr_ix();
@@ -3037,7 +3057,7 @@ erts_alcu_realloc_mv_thr_pref(ErtsAlcType_t type, void *extra,
ASSERT(used_allctr && pref_allctr);
erts_mtx_lock(&pref_allctr->mutex);
- res = do_erts_alcu_alloc(type, pref_allctr, size + sizeof(Uint));
+ res = do_erts_alcu_alloc(type, pref_allctr, size + sizeof(UWord));
if (!res) {
erts_mtx_unlock(&pref_allctr->mutex);
res = erts_alcu_realloc_thr_pref(type, extra, p, size);
@@ -3048,7 +3068,7 @@ erts_alcu_realloc_mv_thr_pref(ErtsAlcType_t type, void *extra,
Allctr_t *allctr;
*((Allctr_t **) res) = pref_allctr;
- res = (void *) (((char *) res) + sizeof(Uint));
+ res = (void *) (((char *) res) + sizeof(UWord));
DEBUG_CHECK_ALIGNMENT(res);
@@ -3061,7 +3081,7 @@ erts_alcu_realloc_mv_thr_pref(ErtsAlcType_t type, void *extra,
}
blk = UMEM2BLK(ptr);
- cpy_size = BLK_SZ(blk) - ABLK_HDR_SZ - sizeof(Uint);
+ cpy_size = BLK_SZ(blk) - ABLK_HDR_SZ - sizeof(UWord);
if (cpy_size > size)
cpy_size = size;
sys_memcpy(res, p, cpy_size);
@@ -3138,11 +3158,11 @@ erts_alcu_start(Allctr_t *allctr, AllctrInit_t *init)
if (allctr->min_block_size < ABLK_HDR_SZ)
goto error;
allctr->min_block_size = UNIT_CEILING(allctr->min_block_size
- + sizeof(Uint));
+ + sizeof(UWord));
#if HAVE_ERTS_MSEG
- if (allctr->mseg_opt.abs_shrink_th > ~((Uint) 0) / 100)
- allctr->mseg_opt.abs_shrink_th = ~((Uint) 0) / 100;
+ if (allctr->mseg_opt.abs_shrink_th > ~((UWord) 0) / 100)
+ allctr->mseg_opt.abs_shrink_th = ~((UWord) 0) / 100;
#endif
#ifdef USE_THREADS
@@ -3182,15 +3202,15 @@ erts_alcu_start(Allctr_t *allctr, AllctrInit_t *init)
allctr->mbc_header_size = (UNIT_CEILING(allctr->mbc_header_size
+ FBLK_FTR_SZ
+ ABLK_HDR_SZ
- + sizeof(Uint))
+ + sizeof(UWord))
- ABLK_HDR_SZ
- - sizeof(Uint));
+ - sizeof(UWord));
allctr->sbc_header_size = (UNIT_CEILING(sizeof(Carrier_t)
+ FBLK_FTR_SZ
+ ABLK_HDR_SZ
- + sizeof(Uint))
+ + sizeof(UWord))
- ABLK_HDR_SZ
- - sizeof(Uint));
+ - sizeof(UWord));
}
else
#endif
@@ -3208,12 +3228,21 @@ erts_alcu_start(Allctr_t *allctr, AllctrInit_t *init)
if (allctr->main_carrier_size) {
Block_t *blk;
+#if HALFWORD_HEAP
+ blk = create_carrier(allctr,
+ allctr->main_carrier_size,
+ CFLG_MBC
+ | CFLG_FORCE_SIZE
+ | CFLG_FORCE_MSEG
+ | CFLG_MAIN_CARRIER);
+#else
blk = create_carrier(allctr,
allctr->main_carrier_size,
CFLG_MBC
| CFLG_FORCE_SIZE
| CFLG_FORCE_SYS_ALLOC
| CFLG_MAIN_CARRIER);
+#endif
if (!blk)
goto error;
@@ -3409,7 +3438,7 @@ check_blk_carrier(Allctr_t *allctr, Block_t *iblk)
is_free_blk = (int) IS_FREE_BLK(blk);
if(is_free_blk) {
if (IS_NOT_LAST_BLK(blk))
- ASSERT(*((Uint *) (((char *) blk)+blk_sz-sizeof(Uint)))
+ ASSERT(*((UWord *) (((char *) blk)+blk_sz-sizeof(UWord)))
== blk_sz);
}
@@ -3417,7 +3446,7 @@ check_blk_carrier(Allctr_t *allctr, Block_t *iblk)
(*allctr->check_block)(allctr, blk, (int) is_free_blk);
if (IS_LAST_BLK(blk)) {
- carrier_end = ((char *) NXT_BLK(blk)) + sizeof(Uint);
+ carrier_end = ((char *) NXT_BLK(blk)) + sizeof(UWord);
mbc = *((Carrier_t **) NXT_BLK(blk));
prev_blk = NULL;
blk = MBC2FBLK(allctr, mbc);
@@ -3433,7 +3462,7 @@ check_blk_carrier(Allctr_t *allctr, Block_t *iblk)
ASSERT((((char *) mbc)
+ allctr->mbc_header_size
+ tot_blk_sz
- + sizeof(Uint)) == carrier_end);
+ + sizeof(UWord)) == carrier_end);
ASSERT(((char *) mbc) + CARRIER_SZ(mbc) == carrier_end);
if (allctr->check_mbc)
diff --git a/erts/emulator/beam/erl_alloc_util.h b/erts/emulator/beam/erl_alloc_util.h
index 10b11661e6..f2b951bca6 100644
--- a/erts/emulator/beam/erl_alloc_util.h
+++ b/erts/emulator/beam/erl_alloc_util.h
@@ -1,19 +1,19 @@
/*
* %CopyrightBegin%
- *
- * Copyright Ericsson AB 2002-2009. All Rights Reserved.
- *
+ *
+ * Copyright Ericsson AB 2002-2010. All Rights Reserved.
+ *
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
* compliance with the License. You should have received a copy of the
* Erlang Public License along with this software. If not, it can be
* retrieved online at http://www.erlang.org/.
- *
+ *
* Software distributed under the License is distributed on an "AS IS"
* basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
* the License for the specific language governing rights and limitations
* under the License.
- *
+ *
* %CopyrightEnd%
*/
@@ -27,8 +27,8 @@
typedef struct Allctr_t_ Allctr_t;
typedef struct {
- Uint ycs;
- Uint mmc;
+ UWord ycs;
+ UWord mmc;
} AlcUInit_t;
typedef struct {
@@ -38,22 +38,22 @@ typedef struct {
int tspec;
int tpref;
int ramv;
- Uint sbct;
- Uint asbcst;
- Uint rsbcst;
- Uint rsbcmt;
- Uint rmbcmt;
- Uint mmbcs;
- Uint mmsbc;
- Uint mmmbc;
- Uint lmbcs;
- Uint smbcs;
- Uint mbcgs;
+ UWord sbct;
+ UWord asbcst;
+ UWord rsbcst;
+ UWord rsbcmt;
+ UWord rmbcmt;
+ UWord mmbcs;
+ UWord mmsbc;
+ UWord mmmbc;
+ UWord lmbcs;
+ UWord smbcs;
+ UWord mbcgs;
} AllctrInit_t;
typedef struct {
- Uint blocks;
- Uint carriers;
+ UWord blocks;
+ UWord carriers;
} AllctrSize_t;
#ifndef SMALL_MEMORY
@@ -163,19 +163,19 @@ void erts_alcu_current_size(Allctr_t *, AllctrSize_t *);
#define CEILING(X, I) ((((X) - 1)/(I) + 1)*(I))
#undef WORD_MASK
-#define INV_WORD_MASK ((Uint) (sizeof(Uint) - 1))
+#define INV_WORD_MASK ((UWord) (sizeof(UWord) - 1))
#define WORD_MASK (~INV_WORD_MASK)
#define WORD_FLOOR(X) ((X) & WORD_MASK)
#define WORD_CEILING(X) WORD_FLOOR((X) + INV_WORD_MASK)
#undef UNIT_MASK
-#define INV_UNIT_MASK ((Uint) (sizeof(Unit_t) - 1))
+#define INV_UNIT_MASK ((UWord) (sizeof(Unit_t) - 1))
#define UNIT_MASK (~INV_UNIT_MASK)
#define UNIT_FLOOR(X) ((X) & UNIT_MASK)
#define UNIT_CEILING(X) UNIT_FLOOR((X) + INV_UNIT_MASK)
-#define SZ_MASK (~((Uint) 0) << 3)
+#define SZ_MASK (~((UWord) 0) << 3)
#define FLG_MASK (~(SZ_MASK))
@@ -189,7 +189,7 @@ typedef union {char c[8]; long l; double d;} Unit_t;
typedef struct Carrier_t_ Carrier_t;
struct Carrier_t_ {
- Uint chdr;
+ UWord chdr;
Carrier_t *next;
Carrier_t *prev;
};
@@ -199,17 +199,17 @@ typedef struct {
Carrier_t *last;
} CarrierList_t;
-typedef Uint Block_t;
-typedef Uint FreeBlkFtr_t;
+typedef UWord Block_t;
+typedef UWord FreeBlkFtr_t;
typedef struct {
- Uint giga_no;
- Uint no;
+ UWord giga_no;
+ UWord no;
} CallCounter_t;
typedef struct {
- Uint no;
- Uint size;
+ UWord no;
+ UWord size;
} StatValues_t;
typedef struct {
diff --git a/erts/emulator/beam/erl_arith.c b/erts/emulator/beam/erl_arith.c
index 126ec7cc73..64fad9fe0e 100644
--- a/erts/emulator/beam/erl_arith.c
+++ b/erts/emulator/beam/erl_arith.c
@@ -41,6 +41,16 @@
# define MAX(x, y) (((x) > (y)) ? (x) : (y))
#endif
+#if !HEAP_ON_C_STACK
+# define DECLARE_TMP(VariableName,N,P) \
+ Eterm *VariableName = ((ERTS_PROC_GET_SCHDATA(P)->erl_arith_tmp_heap) + (2 * N))
+#else
+# define DECLARE_TMP(VariableName,N,P) \
+ Eterm VariableName[2]
+#endif
+# define ARG_IS_NOT_TMP(Arg,Tmp) ((Arg) != make_big((Tmp)))
+
+
static Eterm shift(Process* p, Eterm arg1, Eterm arg2, int right);
static ERTS_INLINE void maybe_shrink(Process* p, Eterm* hp, Eterm res, Uint alloc)
@@ -169,7 +179,7 @@ shift(Process* p, Eterm arg1, Eterm arg2, int right)
{
Sint i;
Sint ires;
- Eterm tmp_big1[2];
+ DECLARE_TMP(tmp_big1,0,p);
Eterm* bigp;
Uint need;
@@ -312,8 +322,8 @@ BIF_RETTYPE bnot_1(BIF_ALIST_1)
Eterm
erts_mixed_plus(Process* p, Eterm arg1, Eterm arg2)
{
- Eterm tmp_big1[2];
- Eterm tmp_big2[2];
+ DECLARE_TMP(tmp_big1,0,p);
+ DECLARE_TMP(tmp_big2,1,p);
Eterm res;
Eterm hdr;
FloatDef f1, f2;
@@ -458,8 +468,8 @@ erts_mixed_plus(Process* p, Eterm arg1, Eterm arg2)
Eterm
erts_mixed_minus(Process* p, Eterm arg1, Eterm arg2)
{
- Eterm tmp_big1[2];
- Eterm tmp_big2[2];
+ DECLARE_TMP(tmp_big1,0,p);
+ DECLARE_TMP(tmp_big2,1,p);
Eterm hdr;
Eterm res;
FloatDef f1, f2;
@@ -602,8 +612,8 @@ erts_mixed_minus(Process* p, Eterm arg1, Eterm arg2)
Eterm
erts_mixed_times(Process* p, Eterm arg1, Eterm arg2)
{
- Eterm tmp_big1[2];
- Eterm tmp_big2[2];
+ DECLARE_TMP(tmp_big1,0,p);
+ DECLARE_TMP(tmp_big2,1,p);
Eterm hdr;
Eterm res;
FloatDef f1, f2;
@@ -627,8 +637,8 @@ erts_mixed_times(Process* p, Eterm arg1, Eterm arg2)
} else if (arg2 == SMALL_ONE) {
return(arg1);
} else {
- Eterm big_res[3];
-
+ DeclareTmpHeap(big_res,3,p);
+ UseTmpHeap(3,p);
/*
* The following code is optimized for the case that
* result is small (which should be the most common case
@@ -636,6 +646,7 @@ erts_mixed_times(Process* p, Eterm arg1, Eterm arg2)
*/
res = small_times(signed_val(arg1), signed_val(arg2), big_res);
if (is_small(res)) {
+ UnUseTmpHeap(3,p);
return res;
} else {
/*
@@ -657,6 +668,7 @@ erts_mixed_times(Process* p, Eterm arg1, Eterm arg2)
if (arity > 1) {
*hp = big_res[2];
}
+ UnUseTmpHeap(3,p);
return res;
}
}
@@ -915,8 +927,8 @@ erts_mixed_div(Process* p, Eterm arg1, Eterm arg2)
Eterm
erts_int_div(Process* p, Eterm arg1, Eterm arg2)
{
- Eterm tmp_big1[2];
- Eterm tmp_big2[2];
+ DECLARE_TMP(tmp_big1,0,p);
+ DECLARE_TMP(tmp_big2,1,p);
int ires;
switch (NUMBER_CODE(arg1, arg2)) {
@@ -967,8 +979,8 @@ erts_int_div(Process* p, Eterm arg1, Eterm arg2)
Eterm
erts_int_rem(Process* p, Eterm arg1, Eterm arg2)
{
- Eterm tmp_big1[2];
- Eterm tmp_big2[2];
+ DECLARE_TMP(tmp_big1,0,p);
+ DECLARE_TMP(tmp_big2,1,p);
int ires;
switch (NUMBER_CODE(arg1, arg2)) {
@@ -979,7 +991,8 @@ erts_int_rem(Process* p, Eterm arg1, Eterm arg2)
if (arg1 != make_small(MIN_SMALL)) {
return arg1;
} else {
- Eterm tmp = small_to_big(signed_val(arg1), tmp_big1);
+ Eterm tmp;
+ tmp = small_to_big(signed_val(arg1), tmp_big1);
if ((ires = big_ucomp(tmp, arg2)) == 0) {
return SMALL_ZERO;
} else {
@@ -1013,8 +1026,8 @@ erts_int_rem(Process* p, Eterm arg1, Eterm arg2)
Eterm erts_band(Process* p, Eterm arg1, Eterm arg2)
{
- Eterm tmp_big1[2];
- Eterm tmp_big2[2];
+ DECLARE_TMP(tmp_big1,0,p);
+ DECLARE_TMP(tmp_big2,1,p);
Eterm* hp;
int need;
@@ -1041,8 +1054,8 @@ Eterm erts_band(Process* p, Eterm arg1, Eterm arg2)
Eterm erts_bor(Process* p, Eterm arg1, Eterm arg2)
{
- Eterm tmp_big1[2];
- Eterm tmp_big2[2];
+ DECLARE_TMP(tmp_big1,0,p);
+ DECLARE_TMP(tmp_big2,1,p);
Eterm* hp;
int need;
@@ -1069,8 +1082,8 @@ Eterm erts_bor(Process* p, Eterm arg1, Eterm arg2)
Eterm erts_bxor(Process* p, Eterm arg1, Eterm arg2)
{
- Eterm tmp_big1[2];
- Eterm tmp_big2[2];
+ DECLARE_TMP(tmp_big1,0,p);
+ DECLARE_TMP(tmp_big2,1,p);
Eterm* hp;
int need;
@@ -1148,8 +1161,8 @@ erts_gc_mixed_plus(Process* p, Eterm* reg, Uint live)
{
Eterm arg1;
Eterm arg2;
- Eterm tmp_big1[2];
- Eterm tmp_big2[2];
+ DECLARE_TMP(tmp_big1,0,p);
+ DECLARE_TMP(tmp_big2,1,p);
Eterm res;
Eterm hdr;
FloatDef f1, f2;
@@ -1237,10 +1250,10 @@ erts_gc_mixed_plus(Process* p, Eterm* reg, Uint live)
need_heap = BIG_NEED_SIZE(sz);
if (ERTS_NEED_GC(p, need_heap)) {
erts_garbage_collect(p, need_heap, reg, live+2);
- if (arg1 != make_big(tmp_big1)) {
+ if (ARG_IS_NOT_TMP(arg1,tmp_big1)) {
arg1 = reg[live];
}
- if (arg2 != make_big(tmp_big2)) {
+ if (ARG_IS_NOT_TMP(arg2,tmp_big2)) {
arg2 = reg[live+1];
}
}
@@ -1316,8 +1329,8 @@ erts_gc_mixed_minus(Process* p, Eterm* reg, Uint live)
{
Eterm arg1;
Eterm arg2;
- Eterm tmp_big1[2];
- Eterm tmp_big2[2];
+ DECLARE_TMP(tmp_big1,0,p);
+ DECLARE_TMP(tmp_big2,1,p);
Eterm hdr;
Eterm res;
FloatDef f1, f2;
@@ -1394,10 +1407,10 @@ erts_gc_mixed_minus(Process* p, Eterm* reg, Uint live)
need_heap = BIG_NEED_SIZE(sz);
if (ERTS_NEED_GC(p, need_heap)) {
erts_garbage_collect(p, need_heap, reg, live+2);
- if (arg1 != make_big(tmp_big1)) {
+ if (ARG_IS_NOT_TMP(arg1,tmp_big1)) {
arg1 = reg[live];
}
- if (arg2 != make_big(tmp_big2)) {
+ if (ARG_IS_NOT_TMP(arg2,tmp_big2)) {
arg2 = reg[live+1];
}
}
@@ -1482,8 +1495,8 @@ erts_gc_mixed_times(Process* p, Eterm* reg, Uint live)
{
Eterm arg1;
Eterm arg2;
- Eterm tmp_big1[2];
- Eterm tmp_big2[2];
+ DECLARE_TMP(tmp_big1,0,p);
+ DECLARE_TMP(tmp_big2,1,p);
Eterm hdr;
Eterm res;
FloatDef f1, f2;
@@ -1509,7 +1522,8 @@ erts_gc_mixed_times(Process* p, Eterm* reg, Uint live)
} else if (arg2 == SMALL_ONE) {
return(arg1);
} else {
- Eterm big_res[3];
+ DeclareTmpHeap(big_res,3,p);
+ UseTmpHeap(3,p);
/*
* The following code is optimized for the case that
@@ -1519,6 +1533,7 @@ erts_gc_mixed_times(Process* p, Eterm* reg, Uint live)
res = small_times(signed_val(arg1), signed_val(arg2),
big_res);
if (is_small(res)) {
+ UnUseTmpHeap(3,p);
return res;
} else {
/*
@@ -1546,6 +1561,7 @@ erts_gc_mixed_times(Process* p, Eterm* reg, Uint live)
if (arity > 1) {
*hp = big_res[2];
}
+ UnUseTmpHeap(3,p);
return res;
}
}
@@ -1609,17 +1625,17 @@ erts_gc_mixed_times(Process* p, Eterm* reg, Uint live)
need_heap = BIG_NEED_SIZE(sz);
if (ERTS_NEED_GC(p, need_heap)) {
erts_garbage_collect(p, need_heap, reg, live+2);
- if (arg1 != make_big(tmp_big1)) {
+ if (ARG_IS_NOT_TMP(arg1,tmp_big1)) {
arg1 = reg[live];
}
- if (arg2 != make_big(tmp_big2)) {
+ if (ARG_IS_NOT_TMP(arg2,tmp_big2)) {
arg2 = reg[live+1];
}
}
hp = p->htop;
p->htop += need_heap;
res = big_times(arg1, arg2, hp);
- trim_heap(p, hp, res);
+ trim_heap(p, hp, res);
/*
* Note that the result must be big in this case, since
@@ -1828,8 +1844,8 @@ erts_gc_int_div(Process* p, Eterm* reg, Uint live)
{
Eterm arg1;
Eterm arg2;
- Eterm tmp_big1[2];
- Eterm tmp_big2[2];
+ DECLARE_TMP(tmp_big1,0,p);
+ DECLARE_TMP(tmp_big2,1,p);
int ires;
arg1 = reg[live];
@@ -1866,10 +1882,10 @@ erts_gc_int_div(Process* p, Eterm* reg, Uint live)
need = BIG_NEED_SIZE(i-ires+1) + BIG_NEED_SIZE(i);
if (ERTS_NEED_GC(p, need)) {
erts_garbage_collect(p, need, reg, live+2);
- if (arg1 != make_big(tmp_big1)) {
+ if (ARG_IS_NOT_TMP(arg1,tmp_big1)) {
arg1 = reg[live];
}
- if (arg2 != make_big(tmp_big2)) {
+ if (ARG_IS_NOT_TMP(arg2,tmp_big2)) {
arg2 = reg[live+1];
}
}
@@ -1894,8 +1910,8 @@ erts_gc_int_rem(Process* p, Eterm* reg, Uint live)
{
Eterm arg1;
Eterm arg2;
- Eterm tmp_big1[2];
- Eterm tmp_big2[2];
+ DECLARE_TMP(tmp_big1,0,p);
+ DECLARE_TMP(tmp_big2,1,p);
int ires;
arg1 = reg[live];
@@ -1908,7 +1924,8 @@ erts_gc_int_rem(Process* p, Eterm* reg, Uint live)
if (arg1 != make_small(MIN_SMALL)) {
return arg1;
} else {
- Eterm tmp = small_to_big(signed_val(arg1), tmp_big1);
+ Eterm tmp;
+ tmp = small_to_big(signed_val(arg1), tmp_big1);
if ((ires = big_ucomp(tmp, arg2)) == 0) {
return SMALL_ZERO;
} else {
@@ -1928,10 +1945,10 @@ erts_gc_int_rem(Process* p, Eterm* reg, Uint live)
if (ERTS_NEED_GC(p, need)) {
erts_garbage_collect(p, need, reg, live+2);
- if (arg1 != make_big(tmp_big1)) {
+ if (ARG_IS_NOT_TMP(arg1,tmp_big1)) {
arg1 = reg[live];
}
- if (arg2 != make_big(tmp_big2)) {
+ if (ARG_IS_NOT_TMP(arg2,tmp_big2)) {
arg2 = reg[live+1];
}
}
@@ -1956,8 +1973,8 @@ Eterm erts_gc_##func(Process* p, Eterm* reg, Uint live) \
{ \
Eterm arg1; \
Eterm arg2; \
- Eterm tmp_big1[2]; \
- Eterm tmp_big2[2]; \
+ DECLARE_TMP(tmp_big1,0,p); \
+ DECLARE_TMP(tmp_big2,1,p); \
Eterm* hp; \
int need; \
\
diff --git a/erts/emulator/beam/erl_async.c b/erts/emulator/beam/erl_async.c
index b090564649..12c7631448 100644
--- a/erts/emulator/beam/erl_async.c
+++ b/erts/emulator/beam/erl_async.c
@@ -1,19 +1,19 @@
/*
* %CopyrightBegin%
- *
- * Copyright Ericsson AB 2000-2009. All Rights Reserved.
- *
+ *
+ * Copyright Ericsson AB 2000-2010. All Rights Reserved.
+ *
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
* compliance with the License. You should have received a copy of the
* Erlang Public License along with this software. If not, it can be
* retrieved online at http://www.erlang.org/.
- *
+ *
* Software distributed under the License is distributed on an "AS IS"
* basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
* the License for the specific language governing rights and limitations
* under the License.
- *
+ *
* %CopyrightEnd%
*/
#ifdef HAVE_CONFIG_H
@@ -70,7 +70,6 @@ static ErlAsync* async_ready_list = NULL;
/* Detach from driver */
static void async_detach(DE_Handle* dh)
{
- /* XXX:PaN what should happen here? we want to unload the driver or??? */
return;
}
@@ -176,7 +175,6 @@ int exit_async()
static void async_add(ErlAsync* a, AsyncQueue* q)
{
- /* XXX:PaN Is this still necessary when ports lock drivers? */
if (is_internal_port(a->port)) {
ERTS_LC_ASSERT(erts_drvportid2port(a->port));
/* make sure the driver will stay around */
diff --git a/erts/emulator/beam/erl_bif_binary.c b/erts/emulator/beam/erl_bif_binary.c
new file mode 100644
index 0000000000..024ff2a684
--- /dev/null
+++ b/erts/emulator/beam/erl_bif_binary.c
@@ -0,0 +1,2931 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2010. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+/*
+ * NOTE: This file contains the BIF's for the *module* binary in stdlib.
+ * other BIF's concerning binaries are in binary.c.
+ */
+
+
+#ifdef HAVE_CONFIG_H
+# include "config.h"
+#endif
+
+#include "sys.h"
+#include "erl_vm.h"
+#include "global.h"
+#include "erl_process.h"
+#include "error.h"
+#include "bif.h"
+#include "big.h"
+#include "erl_binary.h"
+#include "erl_bits.h"
+
+
+/*
+ * The native implementation functions for the module binary.
+ * Searching is implemented using either Boyer-Moore or Aho-Corasick
+ * depending on number of searchstrings (BM if one, AC if more than one).
+ * Native implementation is mostly for efficiency, nothing
+ * (except binary:referenced_byte_size) really *needs* to be implemented
+ * in native code.
+ */
+
+/* #define HARDDEBUG */
+
+/* Init and local variables */
+
+static Export binary_match_trap_export;
+static BIF_RETTYPE binary_match_trap(BIF_ALIST_3);
+static Export binary_matches_trap_export;
+static BIF_RETTYPE binary_matches_trap(BIF_ALIST_3);
+static Export binary_longest_prefix_trap_export;
+static BIF_RETTYPE binary_longest_prefix_trap(BIF_ALIST_3);
+static Export binary_longest_suffix_trap_export;
+static BIF_RETTYPE binary_longest_suffix_trap(BIF_ALIST_3);
+static Export binary_bin_to_list_trap_export;
+static BIF_RETTYPE binary_bin_to_list_trap(BIF_ALIST_3);
+static Export binary_copy_trap_export;
+static BIF_RETTYPE binary_copy_trap(BIF_ALIST_2);
+static Uint max_loop_limit;
+
+
+void erts_init_bif_binary(void)
+{
+ sys_memset((void *) &binary_match_trap_export, 0, sizeof(Export));
+ binary_match_trap_export.address = &binary_match_trap_export.code[3];
+ binary_match_trap_export.code[0] = am_erlang;
+ binary_match_trap_export.code[1] = am_binary_match_trap;
+ binary_match_trap_export.code[2] = 3;
+ binary_match_trap_export.code[3] = (BeamInstr) em_apply_bif;
+ binary_match_trap_export.code[4] = (BeamInstr) &binary_match_trap;
+
+ sys_memset((void *) &binary_matches_trap_export, 0, sizeof(Export));
+ binary_matches_trap_export.address = &binary_matches_trap_export.code[3];
+ binary_matches_trap_export.code[0] = am_erlang;
+ binary_matches_trap_export.code[1] = am_binary_matches_trap;
+ binary_matches_trap_export.code[2] = 3;
+ binary_matches_trap_export.code[3] = (BeamInstr) em_apply_bif;
+ binary_matches_trap_export.code[4] = (BeamInstr) &binary_matches_trap;
+
+ sys_memset((void *) &binary_longest_prefix_trap_export, 0, sizeof(Export));
+ binary_longest_prefix_trap_export.address = &binary_longest_prefix_trap_export.code[3];
+ binary_longest_prefix_trap_export.code[0] = am_erlang;
+ binary_longest_prefix_trap_export.code[1] = am_binary_longest_prefix_trap;
+ binary_longest_prefix_trap_export.code[2] = 3;
+ binary_longest_prefix_trap_export.code[3] = (BeamInstr) em_apply_bif;
+ binary_longest_prefix_trap_export.code[4] = (BeamInstr) &binary_longest_prefix_trap;
+
+ sys_memset((void *) &binary_longest_suffix_trap_export, 0, sizeof(Export));
+ binary_longest_suffix_trap_export.address = &binary_longest_suffix_trap_export.code[3];
+ binary_longest_suffix_trap_export.code[0] = am_erlang;
+ binary_longest_suffix_trap_export.code[1] = am_binary_longest_suffix_trap;
+ binary_longest_suffix_trap_export.code[2] = 3;
+ binary_longest_suffix_trap_export.code[3] = (BeamInstr) em_apply_bif;
+ binary_longest_suffix_trap_export.code[4] = (BeamInstr) &binary_longest_suffix_trap;
+
+ sys_memset((void *) &binary_bin_to_list_trap_export, 0, sizeof(Export));
+ binary_bin_to_list_trap_export.address = &binary_bin_to_list_trap_export.code[3];
+ binary_bin_to_list_trap_export.code[0] = am_erlang;
+ binary_bin_to_list_trap_export.code[1] = am_binary_bin_to_list_trap;
+ binary_bin_to_list_trap_export.code[2] = 3;
+ binary_bin_to_list_trap_export.code[3] = (BeamInstr) em_apply_bif;
+ binary_bin_to_list_trap_export.code[4] = (BeamInstr) &binary_bin_to_list_trap;
+ sys_memset((void *) &binary_copy_trap_export, 0, sizeof(Export));
+ binary_copy_trap_export.address = &binary_copy_trap_export.code[3];
+ binary_copy_trap_export.code[0] = am_erlang;
+ binary_copy_trap_export.code[1] = am_binary_copy_trap;
+ binary_copy_trap_export.code[2] = 2;
+ binary_copy_trap_export.code[3] = (BeamInstr) em_apply_bif;
+ binary_copy_trap_export.code[4] = (BeamInstr) &binary_copy_trap;
+
+ max_loop_limit = 0;
+ return;
+}
+
+/*
+ * Setting the loop_limit for searches for debugging
+ */
+Sint erts_binary_set_loop_limit(Sint limit)
+{
+ Sint save = (Sint) max_loop_limit;
+ if (limit <= 0) {
+ max_loop_limit = 0;
+ } else {
+ max_loop_limit = (Uint) limit;
+ }
+
+ return save;
+}
+
+static Uint get_reds(Process *p, int loop_factor)
+{
+ Uint reds = ERTS_BIF_REDS_LEFT(p) * loop_factor;
+ Uint tmp = max_loop_limit;
+ if (tmp != 0 && tmp < reds) {
+ return tmp;
+ }
+ if (!reds) {
+ reds = 1;
+ }
+ return reds;
+}
+
+/*
+ * A micro allocator used when building search structures, just a convenience
+ * for building structures inside a pre-allocated magic binary using
+ * conventional malloc-like interface.
+ */
+
+#define MYALIGN(Size) (SIZEOF_VOID_P * (((Size) / SIZEOF_VOID_P) + \
+ !!(((Size) % SIZEOF_VOID_P))))
+
+#ifdef DEBUG
+#define CHECK_ALLOCATOR(My) ASSERT((My).current <= ((My).mem + (My).size))
+#else
+#define CHECK_ALLOCATOR(My) /* nothing */
+#endif
+
+typedef struct _my_allocator {
+ Uint size;
+ byte *current;
+ byte *mem;
+} MyAllocator;
+
+static void init_my_allocator(MyAllocator *my, Uint siz, byte *array)
+{
+ ASSERT((siz % SIZEOF_VOID_P) == 0);
+ my->size = siz;
+ my->mem = array;
+ my->current = my->mem;
+}
+
+static void *my_alloc(MyAllocator *my, Uint size)
+{
+ void *ptr = my->current;
+ my->current += MYALIGN(size);
+ return ptr;
+}
+
+/*
+ * The search functionality.
+ *
+ * The search is byte oriented, which works nicely for UTF-8 as well as
+ * latin1 data
+ */
+
+#define ALPHABET_SIZE 256
+
+typedef struct _ac_node {
+#ifdef HARDDEBUG
+ Uint32 id; /* To identify h pointer targets when
+ dumping */
+#endif
+ Uint32 d; /* Depth in trie, also represents the
+ length (-1) of the matched string if
+ in final set */
+ Sint32 final; /* Members in final set represent
+ * matches.
+ * The set representation is scattered
+ * among the nodes in this way:
+ * >0 -> this represents a member of
+ * the final set, <0 -> member of
+ * final set somewhere in the failure
+ * chain,
+ * 0 -> not member of the final set */
+ struct _ac_node *h; /* h(Hode) is the failure function */
+ struct _ac_node *g[ALPHABET_SIZE]; /* g(Node,Character) is the
+ transition function */
+} ACNode;
+
+typedef struct _ac_trie {
+#ifdef HARDDEBUG
+ Uint32 idc;
+#endif
+ Uint32 counter; /* Number of added patterns */
+ ACNode *root; /* pointer to the root state */
+} ACTrie;
+
+typedef struct _bm_data {
+ byte *x;
+ Sint len;
+ Sint *goodshift;
+ Sint badshift[ALPHABET_SIZE];
+} BMData;
+
+#ifdef HARDDEBUG
+static void dump_bm_data(BMData *bm);
+static void dump_ac_trie(ACTrie *act);
+static void dump_ac_node(ACNode *node, int indent, int ch);
+#endif
+
+/*
+ * The needed size of binary data for a search structure - given the
+ * accumulated string lengths.
+ */
+#define BM_SIZE(StrLen) /* StrLen: length of searchstring */ \
+((MYALIGN(sizeof(Sint) * (StrLen))) + /* goodshift array */ \
+ MYALIGN(StrLen) + /* searchstring saved */ \
+ (MYALIGN(sizeof(BMData)))) /* Structure */
+
+#define AC_SIZE(StrLens) /* StrLens: sum of all searchstring lengths */ \
+((MYALIGN(sizeof(ACNode)) * \
+((StrLens)+1)) + /* The actual nodes (including rootnode) */ \
+ MYALIGN(sizeof(ACTrie))) /* Structure */
+
+
+#ifndef MAX
+#define MAX(A,B) (((A) > (B)) ? (A) : (B))
+#endif
+
+#ifndef MIN
+#define MIN(A,B) (((A) > (B)) ? (B) : (A))
+#endif
+/*
+ * Callback for the magic binary
+ */
+static void cleanup_my_data_ac(Binary *bp)
+{
+ return;
+}
+static void cleanup_my_data_bm(Binary *bp)
+{
+ return;
+}
+
+/*
+ * Initiate a (allocated) micro allocator and fill in the base
+ * for an Aho-Corasick search trie, given the accumulated length of the search
+ * strings.
+ */
+static ACTrie *create_acdata(MyAllocator *my, Uint len,
+ ACNode ***qbuff /* out */,
+ Binary **the_bin /* out */)
+{
+ Uint datasize = AC_SIZE(len);
+ ACTrie *act;
+ ACNode *acn;
+ Binary *mb = erts_create_magic_binary(datasize,cleanup_my_data_ac);
+ byte *data = ERTS_MAGIC_BIN_DATA(mb);
+
+ init_my_allocator(my, datasize, data);
+ act = my_alloc(my, sizeof(ACTrie)); /* Important that this is the first
+ allocation */
+ act->counter = 0;
+ act->root = acn = my_alloc(my, sizeof(ACNode));
+ acn->d = 0;
+ acn->final = 0;
+ acn->h = NULL;
+ memset(acn->g, 0, sizeof(ACNode *) * ALPHABET_SIZE);
+#ifdef HARDDEBUG
+ act->idc = 0;
+ acn->id = 0;
+#endif
+ *qbuff = erts_alloc(ERTS_ALC_T_TMP, sizeof(ACNode *) * len);
+ *the_bin = mb;
+ return act;
+}
+
+/*
+ * The same initialization of allocator and basic data for Boyer-Moore.
+ */
+static BMData *create_bmdata(MyAllocator *my, byte *x, Uint len,
+ Binary **the_bin /* out */)
+{
+ Uint datasize = BM_SIZE(len);
+ BMData *bmd;
+ Binary *mb = erts_create_magic_binary(datasize,cleanup_my_data_bm);
+ byte *data = ERTS_MAGIC_BIN_DATA(mb);
+ init_my_allocator(my, datasize, data);
+ bmd = my_alloc(my, sizeof(BMData));
+ bmd->x = my_alloc(my,len);
+ memcpy(bmd->x,x,len);
+ bmd->len = len;
+ bmd->goodshift = my_alloc(my,sizeof(Uint) * len);
+ *the_bin = mb;
+ return bmd;
+}
+
+/*
+ * Compilation of search structures
+ */
+
+/*
+ * Aho Corasick - Build a Trie and fill in the failure functions
+ * when all strings are added.
+ * The algorithm is nicely described by Dieter B�hler of University of
+ * T�bingen:
+ * http://www-sr.informatik.uni-tuebingen.de/~buehler/AC/AC.html
+ */
+
+/*
+ * Helper called once for each search pattern
+ */
+static void ac_add_one_pattern(MyAllocator *my, ACTrie *act, byte *x, Uint len)
+{
+ ACNode *acn = act->root;
+ Uint32 n = ++act->counter; /* Always increase counter, even if it's a
+ duplicate as this may identify the pattern
+ in the final set (not in current interface
+ though) */
+ Uint i = 0;
+
+ while(i < len) {
+ if (acn->g[x[i]] != NULL) {
+ /* node exists, continue */
+ acn = acn->g[x[i]];
+ ++i;
+ } else {
+ /* allocate a new node */
+ ACNode *nn = my_alloc(my,sizeof(ACNode));
+#ifdef HARDDEBUG
+ nn->id = ++(act->idc);
+#endif
+ nn->d = i+1;
+ nn->h = act->root;
+ nn->final = 0;
+ memset(nn->g, 0, sizeof(ACNode *) * ALPHABET_SIZE);
+ acn->g[x[i]] = nn;
+ ++i;
+ acn = nn;
+ }
+ }
+ if (acn->final == 0) { /* New pattern, add to final set */
+ acn->final = n;
+ }
+}
+
+/*
+ * Called when all search patterns are added.
+ */
+static void ac_compute_failure_functions(ACTrie *act, ACNode **qbuff)
+{
+ ACNode *root = act->root;
+ ACNode *parent;
+ int i;
+ int qh = 0,qt = 0;
+ ACNode *child, *r;
+
+ /* Set all children of the root to have the root as failure function */
+ for (i = 0; i < ALPHABET_SIZE; ++i) {
+ if (root->g[i] != NULL) {
+ root->g[i]->h = root;
+ /* Add to que for later traversal */
+ qbuff[qt++] = root->g[i];
+ }
+ }
+
+ /* So, now we've handled children of the root state, traverse the
+ rest of the trie BF... */
+ while (qh < qt) {
+ parent = qbuff[qh++];
+ for (i = 0; i < ALPHABET_SIZE; ++ i) {
+ if ((child = parent->g[i]) != NULL) {
+ /* Visit this node to */
+ qbuff[qt++] = child;
+ /* Search for correct failure function, follow the parent's
+ failure function until you find a similar transition
+ funtion to this child's */
+ r = parent->h;
+ while (r != NULL && r->g[i] == NULL) {
+ r = r->h;
+ }
+ if (r == NULL) {
+ /* Replace NULL failures with the root as we go */
+ child->h = (root->g[i] == NULL) ? root : root->g[i];
+ } else {
+ child->h = r->g[i];
+ /*
+ * The "final" set is scattered among the nodes. When
+ * the failure function points to a member of the final
+ * set, we have a match, but we might not see it in the
+ * current node if we dont mark it as a special type of
+ * final, i.e. foolow the failure function and you will
+ * find a real member of final set. This is marked with
+ * a negative string id and only done if this node does
+ * not represent a member in the final set.
+ */
+ if (!(child->final) && (child->h->final)) {
+ child->final = -1;
+ }
+ }
+ }
+ }
+ }
+ /* Finally the failure function of the root should point to itself */
+ root->h = root;
+}
+
+
+/*
+ * The actual searching for needles in the haystack...
+ * Find first match using Aho-Coracick Trie
+ * return pattern number and fill in mpos + mlen if found, otherwise return 0
+ * Return the matching pattern that *starts* first, and ends
+ * last (difference when overlapping), hence the candidate thing.
+ * Basic AC finds the first end before the first start...
+ *
+ */
+typedef struct {
+ ACNode *q;
+ Uint pos;
+ Uint len;
+ ACNode *candidate;
+ Uint candidate_start;
+} ACFindFirstState;
+
+
+static void ac_init_find_first_match(ACFindFirstState *state, ACTrie *act, Sint startpos, Uint len)
+{
+ state->q = act->root;
+ state->pos = startpos;
+ state->len = len;
+ state->candidate = NULL;
+ state->candidate_start = 0;
+}
+#define AC_OK 0
+#define AC_NOT_FOUND -1
+#define AC_RESTART -2
+
+#define AC_LOOP_FACTOR 10
+
+static int ac_find_first_match(ACFindFirstState *state, byte *haystack,
+ Uint *mpos, Uint *mlen, Uint *reductions)
+{
+ ACNode *q = state->q;
+ Uint i = state->pos;
+ ACNode *candidate = state->candidate, *r;
+ Uint len = state->len;
+ Uint candidate_start = state->candidate_start;
+ Uint rstart;
+ register Uint reds = *reductions;
+
+ while (i < len) {
+ if (--reds == 0) {
+ state->q = q;
+ state->pos = i;
+ state->len = len;
+ state->candidate = candidate;
+ state->candidate_start = candidate_start;
+ return AC_RESTART;
+ }
+
+ while (q->g[haystack[i]] == NULL && q->h != q) {
+ q = q->h;
+ }
+ if (q->g[haystack[i]] != NULL) {
+ q = q->g[haystack[i]];
+ }
+#ifdef HARDDEBUG
+ erts_printf("ch = %c, Current: %u\n", (int) haystack[i], (unsigned) q->id);
+#endif
+ ++i;
+ if (candidate != NULL && (i - q->d) > candidate_start) {
+ break;
+ }
+ if (q->final) {
+ r = q;
+ while (r->final < 0)
+ r = r->h;
+ rstart = i - r->d;
+ if (candidate == NULL || rstart < candidate_start ||
+ (rstart == candidate_start && candidate->d < q->d)) {
+ candidate_start = rstart;
+ candidate = r;
+ }
+ }
+ }
+ *reductions = reds;
+ if (!candidate) {
+ return AC_NOT_FOUND;
+ }
+#ifdef HARDDEBUG
+ dump_ac_node(candidate,0,'?');
+#endif
+ *mpos = candidate_start;
+ *mlen = candidate->d;
+ return AC_OK;
+}
+
+typedef struct _findall_data {
+ Uint pos;
+ Uint len;
+#ifdef HARDDEBUG
+ Uint id;
+#endif
+ Eterm epos;
+ Eterm elen;
+} FindallData;
+
+typedef struct {
+ ACNode *q;
+ Uint pos;
+ Uint len;
+ Uint m;
+ Uint allocated;
+ FindallData *out;
+} ACFindAllState;
+
+static void ac_init_find_all(ACFindAllState *state, ACTrie *act, Sint startpos, Uint len)
+{
+ state->q = act->root;
+ state->pos = startpos;
+ state->len = len;
+ state->m = 0;
+ state->allocated = 0;
+ state->out = NULL;
+}
+
+static void ac_restore_find_all(ACFindAllState *state, char *buff)
+{
+ memcpy(state,buff,sizeof(ACFindAllState));
+ state->out = erts_alloc(ERTS_ALC_T_TMP, sizeof(FindallData) * (state->allocated));
+ memcpy(state->out,buff+sizeof(ACFindAllState),sizeof(FindallData)*state->m);
+}
+
+static void ac_serialize_find_all(ACFindAllState *state, char *buff)
+{
+ memcpy(buff,state,sizeof(ACFindAllState));
+ memcpy(buff+sizeof(ACFindAllState),state->out,sizeof(FindallData)*state->m);
+}
+
+static void ac_clean_find_all(ACFindAllState *state)
+{
+ if (state->out != NULL) {
+ erts_free(ERTS_ALC_T_TMP, state->out);
+ }
+#ifdef HARDDEBUG
+ state->out = NULL;
+ state->allocated = 0;
+#endif
+}
+
+#define SIZEOF_AC_SERIALIZED_FIND_ALL_STATE(S) \
+ (sizeof(ACFindAllState)+(sizeof(FindallData)*(S).m))
+
+/*
+ * Differs to the find_first function in that it stores all matches and the values
+ * arte returned only in the state.
+ */
+static int ac_find_all_non_overlapping(ACFindAllState *state, byte *haystack,
+ Uint *reductions)
+{
+ ACNode *q = state->q;
+ Uint i = state->pos;
+ Uint rstart;
+ ACNode *r;
+ Uint len = state->len;
+ Uint m = state->m, save_m;
+ Uint allocated = state->allocated;
+ FindallData *out = state->out;
+ register Uint reds = *reductions;
+
+
+ while (i < len) {
+ if (--reds == 0) {
+ state->q = q;
+ state->pos = i;
+ state->len = len;
+ state->m = m;
+ state->allocated = allocated;
+ state->out = out;
+ return AC_RESTART;
+ }
+ while (q->g[haystack[i]] == NULL && q->h != q) {
+ q = q->h;
+ }
+ if (q->g[haystack[i]] != NULL) {
+ q = q->g[haystack[i]];
+ }
+ ++i;
+ if (q->final) {
+ r = q;
+ while (r->final) {
+ while (r->final < 0)
+ r = r->h;
+#ifdef HARDDEBUG
+ erts_printf("Trying to add %u\n",(unsigned) r->final);
+#endif
+ rstart = i - r->d;
+ save_m = m;
+ while (m > 0 && (out[m-1].pos > rstart ||
+ (out[m-1].pos == rstart &&
+ out[m-1].len < r->d))) {
+#ifdef HARDDEBUG
+ erts_printf("Popping %u\n",(unsigned) out[m-1].id);
+#endif
+ --m;
+ }
+#ifdef HARDDEBUG
+ if (m > 0) {
+ erts_printf("Pos %u\n",out[m-1].pos);
+ erts_printf("Len %u\n",out[m-1].len);
+ }
+ erts_printf("Rstart %u\n",rstart);
+#endif
+ if (m == 0 || out[m-1].pos + out[m-1].len <= rstart) {
+ if (m >= allocated) {
+ if (!allocated) {
+ allocated = 10;
+ out = erts_alloc(ERTS_ALC_T_TMP,
+ sizeof(FindallData) * allocated);
+ } else {
+ allocated *= 2;
+ out = erts_realloc(ERTS_ALC_T_TMP, out,
+ sizeof(FindallData) *
+ allocated);
+ }
+ }
+ out[m].pos = rstart;
+ out[m].len = r->d;
+#ifdef HARDDEBUG
+ out[m].id = r->final;
+#endif
+ ++m;
+#ifdef HARDDEBUG
+ erts_printf("Pushing %u\n",(unsigned) out[m-1].id);
+#endif
+ } else {
+#ifdef HARDDEBUG
+ erts_printf("Backtracking %d steps\n",save_m - m);
+#endif
+ m = save_m;
+ }
+ r = r->h;
+ }
+ }
+ }
+ *reductions = reds;
+ state->m = m;
+ state->out = out;
+ return (m == 0) ? AC_NOT_FOUND : AC_OK;
+}
+
+/*
+ * Boyer Moore - most obviously implemented more or less exactly as
+ * Christian Charras and Thierry Lecroq describe it in "Handbook of
+ * Exact String-Matching Algorithms"
+ * http://www-igm.univ-mlv.fr/~lecroq/string/
+ */
+
+/*
+ * Call this to compute badshifts array
+ */
+static void compute_badshifts(BMData *bmd)
+{
+ Sint i;
+ Sint m = bmd->len;
+
+ for (i = 0; i < ALPHABET_SIZE; ++i) {
+ bmd->badshift[i] = m;
+ }
+ for (i = 0; i < m - 1; ++i) {
+ bmd->badshift[bmd->x[i]] = m - i - 1;
+ }
+}
+
+/* Helper for "compute_goodshifts" */
+static void compute_suffixes(byte *x, Sint m, Sint *suffixes)
+{
+ int f,g,i;
+
+ suffixes[m - 1] = m;
+
+ f = 0; /* To avoid use before set warning */
+
+ g = m - 1;
+
+ for (i = m - 2; i >= 0; --i) {
+ if (i > g && suffixes[i + m - 1 - f] < i - g) {
+ suffixes[i] = suffixes[i + m - 1 - f];
+ } else {
+ if (i < g) {
+ g = i;
+ }
+ f = i;
+ while ( g >= 0 && x[g] == x[g + m - 1 - f] ) {
+ --g;
+ }
+ suffixes[i] = f - g;
+ }
+ }
+}
+
+/*
+ * Call this to compute goodshift array
+ */
+static void compute_goodshifts(BMData *bmd)
+{
+ Sint m = bmd->len;
+ byte *x = bmd->x;
+ Sint i, j;
+ Sint *suffixes = erts_alloc(ERTS_ALC_T_TMP, m * sizeof(Sint));
+
+ compute_suffixes(x, m, suffixes);
+
+ for (i = 0; i < m; ++i) {
+ bmd->goodshift[i] = m;
+ }
+
+ j = 0;
+
+ for (i = m - 1; i >= -1; --i) {
+ if (i == -1 || suffixes[i] == i + 1) {
+ while (j < m - 1 - i) {
+ if (bmd->goodshift[j] == m) {
+ bmd->goodshift[j] = m - 1 - i;
+ }
+ ++j;
+ }
+ }
+ }
+ for (i = 0; i <= m - 2; ++i) {
+ bmd->goodshift[m - 1 - suffixes[i]] = m - 1 - i;
+ }
+ erts_free(ERTS_ALC_T_TMP, suffixes);
+}
+
+typedef struct {
+ Sint pos;
+ Sint len;
+} BMFindFirstState;
+
+#define BM_OK 0 /* used only for find_all */
+#define BM_NOT_FOUND -1
+#define BM_RESTART -2
+#define BM_LOOP_FACTOR 10 /* Should we have a higher value? */
+
+static void bm_init_find_first_match(BMFindFirstState *state, Sint startpos,
+ Uint len)
+{
+ state->pos = startpos;
+ state->len = (Sint) len;
+}
+
+
+static Sint bm_find_first_match(BMFindFirstState *state, BMData *bmd,
+ byte *haystack, Uint *reductions)
+{
+ Sint blen = bmd->len;
+ Sint len = state->len;
+ Sint *gs = bmd->goodshift;
+ Sint *bs = bmd->badshift;
+ byte *needle = bmd->x;
+ Sint i;
+ Sint j = state->pos;
+ register Uint reds = *reductions;
+
+ while (j <= len - blen) {
+ if (--reds == 0) {
+ state->pos = j;
+ return BM_RESTART;
+ }
+ for (i = blen - 1; i >= 0 && needle[i] == haystack[i + j]; --i)
+ ;
+ if (i < 0) { /* found */
+ *reductions = reds;
+ return j;
+ }
+ j += MAX(gs[i],bs[haystack[i+j]] - blen + 1 + i);
+ }
+ *reductions = reds;
+ return BM_NOT_FOUND;
+}
+
+typedef struct {
+ Sint pos;
+ Sint len;
+ Uint m;
+ Uint allocated;
+ FindallData *out;
+} BMFindAllState;
+
+static void bm_init_find_all(BMFindAllState *state, Sint startpos, Uint len)
+{
+ state->pos = startpos;
+ state->len = (Sint) len;
+ state->m = 0;
+ state->allocated = 0;
+ state->out = NULL;
+}
+
+static void bm_restore_find_all(BMFindAllState *state, char *buff)
+{
+ memcpy(state,buff,sizeof(BMFindAllState));
+ state->out = erts_alloc(ERTS_ALC_T_TMP, sizeof(FindallData) *
+ (state->allocated));
+ memcpy(state->out,buff+sizeof(BMFindAllState),
+ sizeof(FindallData)*state->m);
+}
+
+static void bm_serialize_find_all(BMFindAllState *state, char *buff)
+{
+ memcpy(buff,state,sizeof(BMFindAllState));
+ memcpy(buff+sizeof(BMFindAllState),state->out,
+ sizeof(FindallData)*state->m);
+}
+
+static void bm_clean_find_all(BMFindAllState *state)
+{
+ if (state->out != NULL) {
+ erts_free(ERTS_ALC_T_TMP, state->out);
+ }
+#ifdef HARDDEBUG
+ state->out = NULL;
+ state->allocated = 0;
+#endif
+}
+
+#define SIZEOF_BM_SERIALIZED_FIND_ALL_STATE(S) \
+ (sizeof(BMFindAllState)+(sizeof(FindallData)*(S).m))
+
+/*
+ * Differs to the find_first function in that it stores all matches and the
+ * values are returned only in the state.
+ */
+static Sint bm_find_all_non_overlapping(BMFindAllState *state,
+ BMData *bmd, byte *haystack,
+ Uint *reductions)
+{
+ Sint blen = bmd->len;
+ Sint len = state->len;
+ Sint *gs = bmd->goodshift;
+ Sint *bs = bmd->badshift;
+ byte *needle = bmd->x;
+ Sint i;
+ Sint j = state->pos;
+ Uint m = state->m;
+ Uint allocated = state->allocated;
+ FindallData *out = state->out;
+ register Uint reds = *reductions;
+
+ while (j <= len - blen) {
+ if (--reds == 0) {
+ state->pos = j;
+ state->m = m;
+ state->allocated = allocated;
+ state->out = out;
+ return BM_RESTART;
+ }
+ for (i = blen - 1; i >= 0 && needle[i] == haystack[i + j]; --i)
+ ;
+ if (i < 0) { /* found */
+ if (m >= allocated) {
+ if (!allocated) {
+ allocated = 10;
+ out = erts_alloc(ERTS_ALC_T_TMP, sizeof(FindallData) * allocated);
+ } else {
+ allocated *= 2;
+ out = erts_realloc(ERTS_ALC_T_TMP, out,
+ sizeof(FindallData) * allocated);
+ }
+ }
+ out[m].pos = j;
+ out[m].len = blen;
+ ++m;
+ j += blen;
+ } else {
+ j += MAX(gs[i],bs[haystack[i+j]] - blen + 1 + i);
+ }
+ }
+ state->m = m;
+ state->out = out;
+ *reductions = reds;
+ return (m == 0) ? BM_NOT_FOUND : BM_OK;
+}
+
+/*
+ * Interface functions (i.e. "bif's")
+ */
+
+/*
+ * Search functionality interfaces
+ */
+
+static int do_binary_match_compile(Eterm argument, Eterm *tag, Binary **binp)
+{
+ Eterm t, b, comp_term = NIL;
+ Uint characters;
+ Uint words;
+
+ characters = 0;
+ words = 0;
+
+ if (is_list(argument)) {
+ t = argument;
+ while (is_list(t)) {
+ b = CAR(list_val(t));
+ t = CDR(list_val(t));
+ if (!is_binary(b)) {
+ goto badarg;
+ }
+ if (binary_bitsize(b) != 0) {
+ goto badarg;
+ }
+ ++words;
+ characters += binary_size(b);
+ }
+ if (is_not_nil(t)) {
+ goto badarg;
+ }
+ if (words > 1) {
+ comp_term = argument;
+ } else {
+ comp_term = CAR(list_val(argument));
+ }
+ } else if (is_binary(argument)) {
+ if (binary_bitsize(argument) != 0) {
+ goto badarg;
+ }
+ words = 1;
+ comp_term = argument;
+ characters = binary_size(argument);
+ }
+
+ if (characters == 0) {
+ goto badarg;
+ }
+ ASSERT(words > 0);
+
+ if (words == 1) {
+ byte *bytes;
+ Uint bitoffs, bitsize;
+ byte *temp_alloc = NULL;
+ MyAllocator my;
+ BMData *bmd;
+ Binary *bin;
+
+ ERTS_GET_BINARY_BYTES(comp_term, bytes, bitoffs, bitsize);
+ if (bitoffs != 0) {
+ bytes = erts_get_aligned_binary_bytes(comp_term, &temp_alloc);
+ }
+ bmd = create_bmdata(&my, bytes, characters, &bin);
+ compute_badshifts(bmd);
+ compute_goodshifts(bmd);
+ erts_free_aligned_binary_bytes(temp_alloc);
+ CHECK_ALLOCATOR(my);
+ *tag = am_bm;
+ *binp = bin;
+ return 0;
+ } else {
+ ACTrie *act;
+ MyAllocator my;
+ ACNode **qbuff;
+ Binary *bin;
+
+ act = create_acdata(&my, characters, &qbuff, &bin);
+ t = comp_term;
+ while (is_list(t)) {
+ byte *bytes;
+ Uint bitoffs, bitsize;
+ byte *temp_alloc = NULL;
+ b = CAR(list_val(t));
+ t = CDR(list_val(t));
+ ERTS_GET_BINARY_BYTES(b, bytes, bitoffs, bitsize);
+ if (bitoffs != 0) {
+ bytes = erts_get_aligned_binary_bytes(b, &temp_alloc);
+ }
+ ac_add_one_pattern(&my,act,bytes,binary_size(b));
+ erts_free_aligned_binary_bytes(temp_alloc);
+ }
+ ac_compute_failure_functions(act,qbuff);
+ CHECK_ALLOCATOR(my);
+ erts_free(ERTS_ALC_T_TMP,qbuff);
+ *tag = am_ac;
+ *binp = bin;
+ return 0;
+ }
+ badarg:
+ return -1;
+}
+
+BIF_RETTYPE binary_compile_pattern_1(BIF_ALIST_1)
+{
+ Binary *bin;
+ Eterm tag, ret;
+ Eterm *hp;
+
+ if (do_binary_match_compile(BIF_ARG_1,&tag,&bin)) {
+ BIF_ERROR(BIF_P,BADARG);
+ }
+ hp = HAlloc(BIF_P, PROC_BIN_SIZE+3);
+ ret = erts_mk_magic_binary_term(&hp, &MSO(BIF_P), bin);
+ ret = TUPLE2(hp, tag, ret);
+ BIF_RET(ret);
+}
+
+#define DO_BIN_MATCH_OK 0
+#define DO_BIN_MATCH_BADARG -1
+#define DO_BIN_MATCH_RESTART -2
+
+static int do_binary_match(Process *p, Eterm subject, Uint hsstart, Uint hsend,
+ Eterm type, Binary *bin, Eterm state_term,
+ Eterm *res_term)
+{
+ byte *bytes;
+ Uint bitoffs, bitsize;
+ byte *temp_alloc = NULL;
+
+ ERTS_GET_BINARY_BYTES(subject, bytes, bitoffs, bitsize);
+ if (bitsize != 0) {
+ goto badarg;
+ }
+ if (bitoffs != 0) {
+ bytes = erts_get_aligned_binary_bytes(subject, &temp_alloc);
+ }
+ if (state_term != NIL) {
+ Eterm *ptr = big_val(state_term);
+ type = ptr[1];
+ }
+
+ if (type == am_bm) {
+ BMData *bm;
+ Sint pos;
+ Eterm ret;
+ Eterm *hp;
+ BMFindFirstState state;
+ Uint reds = get_reds(p, BM_LOOP_FACTOR);
+ Uint save_reds = reds;
+
+ bm = (BMData *) ERTS_MAGIC_BIN_DATA(bin);
+#ifdef HARDDEBUG
+ dump_bm_data(bm);
+#endif
+ if (state_term == NIL) {
+ bm_init_find_first_match(&state, hsstart, hsend);
+ } else {
+ Eterm *ptr = big_val(state_term);
+ memcpy(&state,ptr+2,sizeof(state));
+ }
+#ifdef HARDDEBUG
+ erts_printf("(bm) state->pos = %ld, state->len = %lu\n",state.pos,
+ state.len);
+#endif
+ pos = bm_find_first_match(&state, bm, bytes, &reds);
+ if (pos == BM_NOT_FOUND) {
+ ret = am_nomatch;
+ } else if (pos == BM_RESTART) {
+ int x = (sizeof(BMFindFirstState) / sizeof(Eterm)) +
+ !!(sizeof(BMFindFirstState) % sizeof(Eterm));
+#ifdef HARDDEBUG
+ erts_printf("Trap bm!\n");
+#endif
+ hp = HAlloc(p,x+2);
+ hp[0] = make_pos_bignum_header(x+1);
+ hp[1] = type;
+ memcpy(hp+2,&state,sizeof(state));
+ *res_term = make_big(hp);
+ erts_free_aligned_binary_bytes(temp_alloc);
+ return DO_BIN_MATCH_RESTART;
+ } else {
+ Eterm erlen = erts_make_integer((Uint) bm->len, p);
+ ret = erts_make_integer(pos,p);
+ hp = HAlloc(p,3);
+ ret = TUPLE2(hp, ret, erlen);
+ }
+ erts_free_aligned_binary_bytes(temp_alloc);
+ BUMP_REDS(p, (save_reds - reds) / BM_LOOP_FACTOR);
+ *res_term = ret;
+ return DO_BIN_MATCH_OK;
+ } else if (type == am_ac) {
+ ACTrie *act;
+ Uint pos, rlen;
+ int acr;
+ ACFindFirstState state;
+ Eterm ret;
+ Eterm *hp;
+ Uint reds = get_reds(p, AC_LOOP_FACTOR);
+ Uint save_reds = reds;
+
+ act = (ACTrie *) ERTS_MAGIC_BIN_DATA(bin);
+#ifdef HARDDEBUG
+ dump_ac_trie(act);
+#endif
+ if (state_term == NIL) {
+ ac_init_find_first_match(&state, act, hsstart, hsend);
+ } else {
+ Eterm *ptr = big_val(state_term);
+ memcpy(&state,ptr+2,sizeof(state));
+ }
+ acr = ac_find_first_match(&state, bytes, &pos, &rlen, &reds);
+ if (acr == AC_NOT_FOUND) {
+ ret = am_nomatch;
+ } else if (acr == AC_RESTART) {
+ int x = (sizeof(state) / sizeof(Eterm)) +
+ !!(sizeof(BMFindFirstState) % sizeof(Eterm));
+#ifdef HARDDEBUG
+ erts_printf("Trap ac!\n");
+#endif
+ hp = HAlloc(p,x+2);
+ hp[0] = make_pos_bignum_header(x+1);
+ hp[1] = type;
+ memcpy(hp+2,&state,sizeof(state));
+ *res_term = make_big(hp);
+ erts_free_aligned_binary_bytes(temp_alloc);
+ return DO_BIN_MATCH_RESTART;
+ } else {
+ Eterm epos = erts_make_integer(pos+hsstart,p);
+ Eterm erlen = erts_make_integer(rlen,p);
+ hp = HAlloc(p,3);
+ ret = TUPLE2(hp, epos, erlen);
+ }
+ erts_free_aligned_binary_bytes(temp_alloc);
+ BUMP_REDS(p, (save_reds - reds) / AC_LOOP_FACTOR);
+ *res_term = ret;
+ return DO_BIN_MATCH_OK;
+ }
+ badarg:
+ return DO_BIN_MATCH_BADARG;
+}
+
+static int do_binary_matches(Process *p, Eterm subject, Uint hsstart,
+ Uint hsend, Eterm type, Binary *bin,
+ Eterm state_term, Eterm *res_term)
+{
+ byte *bytes;
+ Uint bitoffs, bitsize;
+ byte *temp_alloc = NULL;
+
+ ERTS_GET_BINARY_BYTES(subject, bytes, bitoffs, bitsize);
+ if (bitsize != 0) {
+ goto badarg;
+ }
+ if (bitoffs != 0) {
+ bytes = erts_get_aligned_binary_bytes(subject, &temp_alloc);
+ }
+ if (state_term != NIL) {
+ Eterm *ptr = big_val(state_term);
+ type = ptr[1];
+ }
+
+ if (type == am_bm) {
+ BMData *bm;
+ Sint pos;
+ Eterm ret,tpl;
+ Eterm *hp;
+ BMFindAllState state;
+ Uint reds = get_reds(p, BM_LOOP_FACTOR);
+ Uint save_reds = reds;
+
+ bm = (BMData *) ERTS_MAGIC_BIN_DATA(bin);
+#ifdef HARDDEBUG
+ dump_bm_data(bm);
+#endif
+ if (state_term == NIL) {
+ bm_init_find_all(&state, hsstart, hsend);
+ } else {
+ Eterm *ptr = big_val(state_term);
+ bm_restore_find_all(&state,(char *) (ptr+2));
+ }
+
+ pos = bm_find_all_non_overlapping(&state, bm, bytes, &reds);
+ if (pos == BM_NOT_FOUND) {
+ ret = NIL;
+ } else if (pos == BM_RESTART) {
+ int x =
+ (SIZEOF_BM_SERIALIZED_FIND_ALL_STATE(state) / sizeof(Eterm)) +
+ !!(SIZEOF_BM_SERIALIZED_FIND_ALL_STATE(state) % sizeof(Eterm));
+#ifdef HARDDEBUG
+ erts_printf("Trap bm!\n");
+#endif
+ hp = HAlloc(p,x+2);
+ hp[0] = make_pos_bignum_header(x+1);
+ hp[1] = type;
+ bm_serialize_find_all(&state, (char *) (hp+2));
+ *res_term = make_big(hp);
+ erts_free_aligned_binary_bytes(temp_alloc);
+ bm_clean_find_all(&state);
+ return DO_BIN_MATCH_RESTART;
+ } else {
+ FindallData *fad = state.out;
+ int i;
+ for (i = 0; i < state.m; ++i) {
+ fad[i].epos = erts_make_integer(fad[i].pos,p);
+ fad[i].elen = erts_make_integer(fad[i].len,p);
+ }
+ hp = HAlloc(p,state.m * (3 + 2));
+ ret = NIL;
+ for (i = state.m - 1; i >= 0; --i) {
+ tpl = TUPLE2(hp, fad[i].epos, fad[i].elen);
+ hp +=3;
+ ret = CONS(hp,tpl,ret);
+ hp += 2;
+ }
+ }
+ erts_free_aligned_binary_bytes(temp_alloc);
+ bm_clean_find_all(&state);
+ BUMP_REDS(p, (save_reds - reds) / BM_LOOP_FACTOR);
+ *res_term = ret;
+ return DO_BIN_MATCH_OK;
+ } else if (type == am_ac) {
+ ACTrie *act;
+ int acr;
+ ACFindAllState state;
+ Eterm ret,tpl;
+ Eterm *hp;
+ Uint reds = get_reds(p, AC_LOOP_FACTOR);
+ Uint save_reds = reds;
+
+ act = (ACTrie *) ERTS_MAGIC_BIN_DATA(bin);
+#ifdef HARDDEBUG
+ dump_ac_trie(act);
+#endif
+ if (state_term == NIL) {
+ ac_init_find_all(&state, act, hsstart, hsend);
+ } else {
+ Eterm *ptr = big_val(state_term);
+ ac_restore_find_all(&state,(char *) (ptr+2));
+ }
+ acr = ac_find_all_non_overlapping(&state, bytes, &reds);
+ if (acr == AC_NOT_FOUND) {
+ ret = NIL;
+ } else if (acr == AC_RESTART) {
+ int x =
+ (SIZEOF_AC_SERIALIZED_FIND_ALL_STATE(state) / sizeof(Eterm)) +
+ !!(SIZEOF_AC_SERIALIZED_FIND_ALL_STATE(state) % sizeof(Eterm));
+#ifdef HARDDEBUG
+ erts_printf("Trap ac!\n");
+#endif
+ hp = HAlloc(p,x+2);
+ hp[0] = make_pos_bignum_header(x+1);
+ hp[1] = type;
+ ac_serialize_find_all(&state, (char *) (hp+2));
+ *res_term = make_big(hp);
+ erts_free_aligned_binary_bytes(temp_alloc);
+ ac_clean_find_all(&state);
+ return DO_BIN_MATCH_RESTART;
+ } else {
+ FindallData *fad = state.out;
+ int i;
+ for (i = 0; i < state.m; ++i) {
+ fad[i].epos = erts_make_integer(fad[i].pos,p);
+ fad[i].elen = erts_make_integer(fad[i].len,p);
+ }
+ hp = HAlloc(p,state.m * (3 + 2));
+ ret = NIL;
+ for (i = state.m - 1; i >= 0; --i) {
+ tpl = TUPLE2(hp, fad[i].epos, fad[i].elen);
+ hp +=3;
+ ret = CONS(hp,tpl,ret);
+ hp += 2;
+ }
+ }
+ erts_free_aligned_binary_bytes(temp_alloc);
+ ac_clean_find_all(&state);
+ BUMP_REDS(p, (save_reds - reds) / AC_LOOP_FACTOR);
+ *res_term = ret;
+ return DO_BIN_MATCH_OK;
+ }
+ badarg:
+ return DO_BIN_MATCH_BADARG;
+}
+
+static int parse_match_opts_list(Eterm l, Eterm bin, Uint *posp, Uint *endp)
+{
+ Eterm *tp;
+ Uint pos;
+ Sint len;
+ if (l == ((Eterm) 0) || l == NIL) {
+ /* Invalid term or NIL, we're called from binary_match(es)_2 or
+ have no options*/
+ *posp = 0;
+ *endp = binary_size(bin);
+ return 0;
+ } else if (is_list(l)) {
+ while(is_list(l)) {
+ Eterm t = CAR(list_val(l));
+ Uint orig_size;
+ if (!is_tuple(t)) {
+ goto badarg;
+ }
+ tp = tuple_val(t);
+ if (arityval(*tp) != 2) {
+ goto badarg;
+ }
+ if (tp[1] != am_scope || is_not_tuple(tp[2])) {
+ goto badarg;
+ }
+ tp = tuple_val(tp[2]);
+ if (arityval(*tp) != 2) {
+ goto badarg;
+ }
+ if (!term_to_Uint(tp[1], &pos)) {
+ goto badarg;
+ }
+ if (!term_to_Sint(tp[2], &len)) {
+ goto badarg;
+ }
+ if (len < 0) {
+ Sint lentmp = -len;
+ /* overflow */
+ if (lentmp == len || lentmp < 0 || -lentmp != len) {
+ goto badarg;
+ }
+ len = lentmp;
+ pos -= len;
+ }
+ /* overflow */
+ if ((pos + len) < pos || (len > 0 && (pos + len) == pos)) {
+ goto badarg;
+ }
+ *endp = len + pos;
+ *posp = pos;
+ if ((orig_size = binary_size(bin)) < pos ||
+ orig_size < (*endp)) {
+ goto badarg;
+ }
+ l = CDR(list_val(l));
+ }
+ return 0;
+ } else {
+ badarg:
+ return 1;
+ }
+}
+
+static BIF_RETTYPE binary_match_trap(BIF_ALIST_3)
+{
+ int runres;
+ Eterm result;
+ Binary *bin = ((ProcBin *) binary_val(BIF_ARG_3))->val;
+ runres = do_binary_match(BIF_P,BIF_ARG_1,0,0,NIL,bin,BIF_ARG_2,&result);
+ if (runres == DO_BIN_MATCH_OK) {
+ BIF_RET(result);
+ } else {
+ BUMP_ALL_REDS(BIF_P);
+ BIF_TRAP3(&binary_match_trap_export, BIF_P, BIF_ARG_1, result,
+ BIF_ARG_3);
+ }
+}
+
+static BIF_RETTYPE binary_matches_trap(BIF_ALIST_3)
+{
+ int runres;
+ Eterm result;
+ Binary *bin = ((ProcBin *) binary_val(BIF_ARG_3))->val;
+ runres = do_binary_matches(BIF_P,BIF_ARG_1,0,0,NIL,bin,BIF_ARG_2,&result);
+ if (runres == DO_BIN_MATCH_OK) {
+ BIF_RET(result);
+ } else {
+ BUMP_ALL_REDS(BIF_P);
+ BIF_TRAP3(&binary_matches_trap_export, BIF_P, BIF_ARG_1, result,
+ BIF_ARG_3);
+ }
+}
+
+BIF_RETTYPE binary_match_3(BIF_ALIST_3)
+{
+ Uint hsstart;
+ Uint hsend;
+ Eterm *tp;
+ Eterm type;
+ Binary *bin;
+ Eterm bin_term = NIL;
+ int runres;
+ Eterm result;
+
+ if (is_not_binary(BIF_ARG_1)) {
+ goto badarg;
+ }
+ if (parse_match_opts_list(BIF_ARG_3,BIF_ARG_1,&hsstart,&hsend)) {
+ goto badarg;
+ }
+ if (hsend == 0) {
+ BIF_RET(am_nomatch);
+ }
+ if (is_tuple(BIF_ARG_2)) {
+ tp = tuple_val(BIF_ARG_2);
+ if (arityval(*tp) != 2 || is_not_atom(tp[1])) {
+ goto badarg;
+ }
+ if (((tp[1] != am_bm) && (tp[1] != am_ac)) ||
+ !ERTS_TERM_IS_MAGIC_BINARY(tp[2])) {
+ goto badarg;
+ }
+ type = tp[1];
+ bin = ((ProcBin *) binary_val(tp[2]))->val;
+ if (type == am_bm &&
+ ERTS_MAGIC_BIN_DESTRUCTOR(bin) != cleanup_my_data_bm) {
+ goto badarg;
+ }
+ if (type == am_ac &&
+ ERTS_MAGIC_BIN_DESTRUCTOR(bin) != cleanup_my_data_ac) {
+ goto badarg;
+ }
+ bin_term = tp[2];
+ } else if (do_binary_match_compile(BIF_ARG_2,&type,&bin)) {
+ goto badarg;
+ }
+ runres = do_binary_match(BIF_P,BIF_ARG_1,hsstart,hsend,type,bin,NIL,&result);
+ if (runres == DO_BIN_MATCH_RESTART && bin_term == NIL) {
+ Eterm *hp = HAlloc(BIF_P, PROC_BIN_SIZE);
+ bin_term = erts_mk_magic_binary_term(&hp, &MSO(BIF_P), bin);
+ } else if (bin_term == NIL) {
+ erts_bin_free(bin);
+ }
+ switch (runres) {
+ case DO_BIN_MATCH_OK:
+ BIF_RET(result);
+ case DO_BIN_MATCH_RESTART:
+ BUMP_ALL_REDS(BIF_P);
+ BIF_TRAP3(&binary_match_trap_export, BIF_P, BIF_ARG_1, result, bin_term);
+ default:
+ goto badarg;
+ }
+ badarg:
+ BIF_ERROR(BIF_P,BADARG);
+}
+
+BIF_RETTYPE binary_matches_3(BIF_ALIST_3)
+{
+ Uint hsstart, hsend;
+ Eterm *tp;
+ Eterm type;
+ Binary *bin;
+ Eterm bin_term = NIL;
+ int runres;
+ Eterm result;
+
+ if (is_not_binary(BIF_ARG_1)) {
+ goto badarg;
+ }
+ if (parse_match_opts_list(BIF_ARG_3,BIF_ARG_1,&hsstart,&hsend)) {
+ goto badarg;
+ }
+ if (hsend == 0) {
+ BIF_RET(am_nomatch);
+ }
+ if (is_tuple(BIF_ARG_2)) {
+ tp = tuple_val(BIF_ARG_2);
+ if (arityval(*tp) != 2 || is_not_atom(tp[1])) {
+ goto badarg;
+ }
+ if (((tp[1] != am_bm) && (tp[1] != am_ac)) ||
+ !ERTS_TERM_IS_MAGIC_BINARY(tp[2])) {
+ goto badarg;
+ }
+ type = tp[1];
+ bin = ((ProcBin *) binary_val(tp[2]))->val;
+ if (type == am_bm &&
+ ERTS_MAGIC_BIN_DESTRUCTOR(bin) != cleanup_my_data_bm) {
+ goto badarg;
+ }
+ if (type == am_ac &&
+ ERTS_MAGIC_BIN_DESTRUCTOR(bin) != cleanup_my_data_ac) {
+ goto badarg;
+ }
+ bin_term = tp[2];
+ } else if (do_binary_match_compile(BIF_ARG_2,&type,&bin)) {
+ goto badarg;
+ }
+ runres = do_binary_matches(BIF_P,BIF_ARG_1,hsstart,hsend,type,bin,
+ NIL,&result);
+ if (runres == DO_BIN_MATCH_RESTART && bin_term == NIL) {
+ Eterm *hp = HAlloc(BIF_P, PROC_BIN_SIZE);
+ bin_term = erts_mk_magic_binary_term(&hp, &MSO(BIF_P), bin);
+ } else if (bin_term == NIL) {
+ erts_bin_free(bin);
+ }
+ switch (runres) {
+ case DO_BIN_MATCH_OK:
+ BIF_RET(result);
+ case DO_BIN_MATCH_RESTART:
+ BUMP_ALL_REDS(BIF_P);
+ BIF_TRAP3(&binary_matches_trap_export, BIF_P, BIF_ARG_1, result,
+ bin_term);
+ default:
+ goto badarg;
+ }
+ badarg:
+ BIF_ERROR(BIF_P,BADARG);
+}
+
+
+BIF_RETTYPE binary_match_2(BIF_ALIST_2)
+{
+ return binary_match_3(BIF_P,BIF_ARG_1,BIF_ARG_2,((Eterm) 0));
+}
+
+
+BIF_RETTYPE binary_matches_2(BIF_ALIST_2)
+{
+ return binary_matches_3(BIF_P,BIF_ARG_1,BIF_ARG_2,((Eterm) 0));
+}
+
+
+BIF_RETTYPE erts_binary_part(Process *p, Eterm binary, Eterm epos, Eterm elen)
+{
+ Uint pos;
+ Sint len;
+ size_t orig_size;
+ Eterm orig;
+ Uint offset;
+ Uint bit_offset;
+ Uint bit_size;
+ Eterm* hp;
+ ErlSubBin* sb;
+
+ if (is_not_binary(binary)) {
+ goto badarg;
+ }
+ if (!term_to_Uint(epos, &pos)) {
+ goto badarg;
+ }
+ if (!term_to_Sint(elen, &len)) {
+ goto badarg;
+ }
+ if (len < 0) {
+ Sint lentmp = -len;
+ /* overflow */
+ if (lentmp == len || lentmp < 0 || -lentmp != len) {
+ goto badarg;
+ }
+ len = lentmp;
+ if (len > pos) {
+ goto badarg;
+ }
+ pos -= len;
+ }
+ /* overflow */
+ if ((pos + len) < pos || (len > 0 && (pos + len) == pos)){
+ goto badarg;
+ }
+ if ((orig_size = binary_size(binary)) < pos ||
+ orig_size < (pos + len)) {
+ goto badarg;
+ }
+
+
+
+ hp = HAlloc(p, ERL_SUB_BIN_SIZE);
+
+ ERTS_GET_REAL_BIN(binary, orig, offset, bit_offset, bit_size);
+ sb = (ErlSubBin *) hp;
+ sb->thing_word = HEADER_SUB_BIN;
+ sb->size = len;
+ sb->offs = offset + pos;
+ sb->orig = orig;
+ sb->bitoffs = bit_offset;
+ sb->bitsize = 0;
+ sb->is_writable = 0;
+
+ BIF_RET(make_binary(sb));
+
+ badarg:
+ BIF_ERROR(p, BADARG);
+}
+
+#define ERTS_NEED_GC(p, need) ((HEAP_LIMIT((p)) - HEAP_TOP((p))) <= (need))
+
+BIF_RETTYPE erts_gc_binary_part(Process *p, Eterm *reg, Eterm live, int range_is_tuple)
+{
+ Uint pos;
+ Sint len;
+ size_t orig_size;
+ Eterm orig;
+ Uint offset;
+ Uint bit_offset;
+ Uint bit_size;
+ Eterm* hp;
+ ErlSubBin* sb;
+ Eterm binary;
+ Eterm *tp;
+ Eterm epos, elen;
+ int extra_args;
+
+
+ if (range_is_tuple) {
+ Eterm tpl = reg[live];
+ extra_args = 1;
+ if (is_not_tuple(tpl)) {
+ goto badarg;
+ }
+ tp = tuple_val(tpl);
+ if (arityval(*tp) != 2) {
+ goto badarg;
+ }
+
+ epos = tp[1];
+ elen = tp[2];
+ } else {
+ extra_args = 2;
+ epos = reg[live-1];
+ elen = reg[live];
+ }
+ binary = reg[live-extra_args];
+
+ if (is_not_binary(binary)) {
+ goto badarg;
+ }
+ if (!term_to_Uint(epos, &pos)) {
+ goto badarg;
+ }
+ if (!term_to_Sint(elen, &len)) {
+ goto badarg;
+ }
+ if (len < 0) {
+ Sint lentmp = -len;
+ /* overflow */
+ if (lentmp == len || lentmp < 0 || -lentmp != len) {
+ goto badarg;
+ }
+ len = lentmp;
+ if (len > pos) {
+ goto badarg;
+ }
+ pos -= len;
+ }
+ /* overflow */
+ if ((pos + len) < pos || (len > 0 && (pos + len) == pos)) {
+ goto badarg;
+ }
+ if ((orig_size = binary_size(binary)) < pos ||
+ orig_size < (pos + len)) {
+ goto badarg;
+ }
+
+ if (ERTS_NEED_GC(p, ERL_SUB_BIN_SIZE)) {
+ erts_garbage_collect(p, ERL_SUB_BIN_SIZE, reg, live+1-extra_args); /* I don't need the tuple
+ or indices any more */
+ binary = reg[live-extra_args];
+ }
+
+ hp = p->htop;
+ p->htop += ERL_SUB_BIN_SIZE;
+
+ ERTS_GET_REAL_BIN(binary, orig, offset, bit_offset, bit_size);
+
+ sb = (ErlSubBin *) hp;
+ sb->thing_word = HEADER_SUB_BIN;
+ sb->size = len;
+ sb->offs = offset + pos;
+ sb->orig = orig;
+ sb->bitoffs = bit_offset;
+ sb->bitsize = 0;
+ sb->is_writable = 0;
+
+ BIF_RET(make_binary(sb));
+
+ badarg:
+ BIF_ERROR(p, BADARG);
+}
+/*************************************************************
+ * The actual guard BIFs are in erl_bif_guard.c
+ * but the implementation of both the non-gc and the gc
+ * variants are here. Note that the functions are named so that they do
+ * not clash with the guard bif's erlang:binary_part/2,3
+ *************************************************************/
+
+BIF_RETTYPE binary_binary_part_3(BIF_ALIST_3)
+{
+ return erts_binary_part(BIF_P,BIF_ARG_1,BIF_ARG_2, BIF_ARG_3);
+}
+
+BIF_RETTYPE binary_binary_part_2(BIF_ALIST_2)
+{
+ Eterm *tp;
+ if (is_not_tuple(BIF_ARG_2)) {
+ goto badarg;
+ }
+ tp = tuple_val(BIF_ARG_2);
+ if (arityval(*tp) != 2) {
+ goto badarg;
+ }
+ return erts_binary_part(BIF_P,BIF_ARG_1,tp[1], tp[2]);
+ badarg:
+ BIF_ERROR(BIF_P,BADARG);
+}
+
+typedef struct {
+ int type; /* CL_TYPE_XXX */
+ byte *temp_alloc; /* Used for erts_get/free_aligned, i.e. CL_TYPE_ALIGNED */
+ unsigned char *buff; /* Used for all types, malloced if CL_TYPE_HEAP */
+ Uint bufflen; /* The length (in bytes) of buffer */
+} CommonData;
+
+#define COMMON_LOOP_FACTOR 10
+
+#define DIRECTION_PREFIX 0
+#define DIRECTION_SUFFIX 1
+
+#define CL_OK 0
+#define CL_RESTART 1
+
+/* The type field in the above structure */
+#define CL_TYPE_EMPTY 0 /* End of array */
+#define CL_TYPE_HEAP 1
+#define CL_TYPE_ALIGNED 2
+#define CL_TYPE_COMMON 3 /* emacsulated */
+#define CL_TYPE_HEAP_NOALLOC 4 /* Will need allocating when trapping */
+
+
+static int do_search_forward(CommonData *cd, Uint *posp, Uint *redsp)
+{
+ Uint pos = *posp;
+ Sint reds = (Sint) *redsp;
+ int i;
+ unsigned char current = 0;
+
+ for(;;) {
+ for(i = 0; cd[i].type != CL_TYPE_EMPTY; ++i) {
+ if (pos >= cd[i].bufflen) {
+ *posp = pos;
+ if (reds > 0) {
+ *redsp = (Uint) reds;
+ } else {
+ *redsp = 0;
+ }
+ return CL_OK;
+ }
+ if (i == 0) {
+ current = cd[i].buff[pos];
+ } else {
+ if (cd[i].buff[pos] != current) {
+ *posp = pos;
+ if (reds > 0) {
+ *redsp = (Uint) reds;
+ } else {
+ *redsp = 0;
+ }
+ return CL_OK;
+ }
+ }
+ --reds;
+ }
+ ++pos;
+ if (reds <= 0) {
+ *posp = pos;
+ *redsp = 0;
+ return CL_RESTART;
+ }
+ }
+}
+static int do_search_backward(CommonData *cd, Uint *posp, Uint *redsp)
+{
+ Uint pos = *posp;
+ Sint reds = (Sint) *redsp;
+ int i;
+ unsigned char current = 0;
+
+ for(;;) {
+ for(i = 0; cd[i].type != CL_TYPE_EMPTY; ++i) {
+ if (pos >= cd[i].bufflen) {
+ *posp = pos;
+ if (reds > 0) {
+ *redsp = (Uint) reds;
+ } else {
+ *redsp = 0;
+ }
+ return CL_OK;
+ }
+ if (i == 0) {
+ current = cd[i].buff[cd[i].bufflen - 1 - pos];
+ } else {
+ if (cd[i].buff[cd[i].bufflen - 1 - pos] != current) {
+ *posp = pos;
+ if (reds > 0) {
+ *redsp = (Uint) reds;
+ } else {
+ *redsp = 0;
+ }
+ return CL_OK;
+ }
+ }
+ --reds;
+ }
+ ++pos;
+ if (reds <= 0) {
+ *posp = pos;
+ *redsp = 0;
+ return CL_RESTART;
+ }
+ }
+}
+
+static void cleanup_common_data(Binary *bp)
+{
+ int i;
+ CommonData *cd;
+ cd = (CommonData *) ERTS_MAGIC_BIN_DATA(bp);
+ for (i=0;cd[i].type != CL_TYPE_EMPTY;++i) {
+ switch (cd[i].type) {
+ case CL_TYPE_HEAP:
+ erts_free(ERTS_ALC_T_BINARY_BUFFER,cd[i].buff);
+ break;
+ case CL_TYPE_ALIGNED:
+ erts_free_aligned_binary_bytes_extra(cd[i].temp_alloc, ERTS_ALC_T_BINARY_BUFFER);
+ break;
+ default:
+ break;
+ }
+ }
+ return;
+}
+
+static BIF_RETTYPE do_longest_common(Process *p, Eterm list, int direction)
+{
+ Eterm l = list;
+ int n = 0;
+ Binary *mb;
+ CommonData *cd;
+ int i = 0;
+ Uint reds = get_reds(p, COMMON_LOOP_FACTOR);
+ Uint save_reds = reds;
+ int res;
+ Export *trapper;
+ Uint pos;
+ Eterm epos;
+ Eterm *hp;
+ Eterm bin_term;
+ Eterm b;
+
+ /* First just count the number of binaries */
+ while (is_list(l)) {
+ b = CAR(list_val(l));
+ if (!is_binary(b)) {
+ goto badarg;
+ }
+ ++n;
+ l = CDR(list_val(l));
+ }
+ if (l != NIL || n == 0) {
+ goto badarg;
+ }
+
+ /* OK, now create a buffer of the right size, we can do a magic binary right away,
+ that's not too costly. */
+ mb = erts_create_magic_binary((n+1)*sizeof(CommonData),cleanup_common_data);
+ cd = (CommonData *) ERTS_MAGIC_BIN_DATA(mb);
+ l = list;
+ while (is_list(l)) {
+ Uint bitoffs;
+ Uint bitsize;
+ Uint offset;
+ Eterm real_bin;
+ ProcBin* pb;
+
+ cd[i].type = CL_TYPE_EMPTY;
+ b = CAR(list_val(l));
+ ERTS_GET_REAL_BIN(b, real_bin, offset, bitoffs, bitsize);
+ if (bitsize != 0) {
+ erts_bin_free(mb);
+ goto badarg;
+ }
+ cd[i].bufflen = binary_size(b);
+ cd[i].temp_alloc = NULL;
+ if (*(binary_val(real_bin)) == HEADER_PROC_BIN) {
+ pb = (ProcBin *) binary_val(real_bin);
+ if (pb->flags) {
+ erts_emasculate_writable_binary(pb);
+ }
+ cd[i].buff = erts_get_aligned_binary_bytes_extra(b, &(cd[i].temp_alloc),
+ ERTS_ALC_T_BINARY_BUFFER,0);
+ cd[i].type = (cd[i].temp_alloc != NULL) ? CL_TYPE_ALIGNED : CL_TYPE_COMMON;
+ } else { /* Heap binary */
+ cd[i].buff = erts_get_aligned_binary_bytes_extra(b, &(cd[i].temp_alloc),
+ ERTS_ALC_T_BINARY_BUFFER,0);
+ /* CL_TYPE_HEAP_NOALLOC means you have to copy if trapping */
+ cd[i].type = (cd[i].temp_alloc != NULL) ? CL_TYPE_ALIGNED : CL_TYPE_HEAP_NOALLOC;
+ }
+ ++i;
+ l = CDR(list_val(l));
+ }
+ cd[i].type = CL_TYPE_EMPTY;
+#if defined(DEBUG) || defined(VALGRIND)
+ cd[i].temp_alloc = NULL;
+ cd[i].buff = NULL;
+ cd[i].bufflen = 0;
+#endif
+
+ pos = 0;
+ if (direction == DIRECTION_PREFIX) {
+ trapper = &binary_longest_prefix_trap_export;
+ res = do_search_forward(cd,&pos,&reds);
+ } else {
+ ASSERT(direction == DIRECTION_SUFFIX);
+ trapper = &binary_longest_suffix_trap_export;
+ res = do_search_backward(cd,&pos,&reds);
+ }
+ epos = erts_make_integer(pos,p);
+ if (res == CL_OK) {
+ erts_bin_free(mb);
+ BUMP_REDS(p, (save_reds - reds) / COMMON_LOOP_FACTOR);
+ BIF_RET(epos);
+ } else {
+ ASSERT(res == CL_RESTART);
+ /* Copy all heap binaries that are not already copied (aligned) */
+ for(i = 0; i < n; ++i) {
+ if (cd[i].type == CL_TYPE_HEAP_NOALLOC) {
+ unsigned char *tmp = cd[i].buff;
+ cd[i].buff = erts_alloc(ERTS_ALC_T_BINARY_BUFFER, cd[i].bufflen);
+ memcpy(cd[i].buff,tmp,cd[i].bufflen);
+ cd[i].type = CL_TYPE_HEAP;
+ }
+ }
+ hp = HAlloc(p, PROC_BIN_SIZE);
+ bin_term = erts_mk_magic_binary_term(&hp, &MSO(p), mb);
+ BUMP_ALL_REDS(p);
+ BIF_TRAP3(trapper, p, bin_term, epos,list);
+ }
+ badarg:
+ BIF_ERROR(p,BADARG);
+}
+
+static BIF_RETTYPE do_longest_common_trap(Process *p, Eterm bin_term, Eterm current_pos,
+ Eterm orig_list, int direction)
+{
+ Uint reds = get_reds(p, COMMON_LOOP_FACTOR);
+ Uint save_reds = reds;
+ Uint pos;
+ Binary *bin;
+ CommonData *cd;
+ int res;
+ Eterm epos;
+ Export *trapper;
+
+#ifdef DEBUG
+ int r;
+ r = term_to_Uint(current_pos, &pos);
+ ASSERT(r != 0);
+#else
+ term_to_Uint(current_pos, &pos);
+#endif
+ ASSERT(ERTS_TERM_IS_MAGIC_BINARY(bin_term));
+ bin = ((ProcBin *) binary_val(bin_term))->val;
+ cd = (CommonData *) ERTS_MAGIC_BIN_DATA(bin);
+ if (direction == DIRECTION_PREFIX) {
+ trapper = &binary_longest_prefix_trap_export;
+ res = do_search_forward(cd,&pos,&reds);
+ } else {
+ ASSERT(direction == DIRECTION_SUFFIX);
+ trapper = &binary_longest_suffix_trap_export;
+ res = do_search_backward(cd,&pos,&reds);
+ }
+ epos = erts_make_integer(pos,p);
+ if (res == CL_OK) {
+ BUMP_REDS(p, (save_reds - reds) / COMMON_LOOP_FACTOR);
+ BIF_RET(epos);
+ } else {
+ ASSERT(res == CL_RESTART);
+ /* Copy all heap binaries that are not already copied (aligned) */
+ BUMP_ALL_REDS(p);
+ BIF_TRAP3(trapper, p, bin_term, epos, orig_list);
+ }
+}
+
+static BIF_RETTYPE binary_longest_prefix_trap(BIF_ALIST_3)
+{
+ return do_longest_common_trap(BIF_P,BIF_ARG_1,BIF_ARG_2,BIF_ARG_3,DIRECTION_PREFIX);
+}
+
+static BIF_RETTYPE binary_longest_suffix_trap(BIF_ALIST_3)
+{
+ return do_longest_common_trap(BIF_P,BIF_ARG_1,BIF_ARG_2,BIF_ARG_3,DIRECTION_SUFFIX);
+}
+
+BIF_RETTYPE binary_longest_common_prefix_1(BIF_ALIST_1)
+{
+ return do_longest_common(BIF_P,BIF_ARG_1,DIRECTION_PREFIX);
+}
+
+BIF_RETTYPE binary_longest_common_suffix_1(BIF_ALIST_1)
+{
+ return do_longest_common(BIF_P,BIF_ARG_1,DIRECTION_SUFFIX);
+}
+
+BIF_RETTYPE binary_first_1(BIF_ALIST_1)
+{
+ byte* bytes;
+ Uint byte_size;
+ Uint bit_offs;
+ Uint bit_size;
+ Uint res;
+
+ if (is_not_binary(BIF_ARG_1)) {
+ goto badarg;
+ }
+ byte_size = binary_size(BIF_ARG_1);
+ if (!byte_size) {
+ goto badarg;
+ }
+ ERTS_GET_BINARY_BYTES(BIF_ARG_1,bytes,bit_offs,bit_size);
+ if (bit_size) {
+ goto badarg;
+ }
+ if (bit_offs) {
+ res = ((((Uint) bytes[0]) << bit_offs) | (((Uint) bytes[1]) >> (8-bit_offs))) & 0xFF;
+ } else {
+ res = bytes[0];
+ }
+ BIF_RET(make_small(res));
+ badarg:
+ BIF_ERROR(BIF_P,BADARG);
+}
+
+BIF_RETTYPE binary_last_1(BIF_ALIST_1)
+{
+ byte* bytes;
+ Uint byte_size;
+ Uint bit_offs;
+ Uint bit_size;
+ Uint res;
+
+ if (is_not_binary(BIF_ARG_1)) {
+ goto badarg;
+ }
+ byte_size = binary_size(BIF_ARG_1);
+ if (!byte_size) {
+ goto badarg;
+ }
+ ERTS_GET_BINARY_BYTES(BIF_ARG_1,bytes,bit_offs,bit_size);
+ if (bit_size) {
+ goto badarg;
+ }
+ if (bit_offs) {
+ res = ((((Uint) bytes[byte_size-1]) << bit_offs) |
+ (((Uint) bytes[byte_size]) >> (8-bit_offs))) & 0xFF;
+ } else {
+ res = bytes[byte_size-1];
+ }
+ BIF_RET(make_small(res));
+ badarg:
+ BIF_ERROR(BIF_P,BADARG);
+}
+
+BIF_RETTYPE binary_at_2(BIF_ALIST_2)
+{
+ byte* bytes;
+ Uint byte_size;
+ Uint bit_offs;
+ Uint bit_size;
+ Uint res;
+ Uint index;
+
+ if (is_not_binary(BIF_ARG_1)) {
+ goto badarg;
+ }
+ byte_size = binary_size(BIF_ARG_1);
+ if (!byte_size) {
+ goto badarg;
+ }
+ if (!term_to_Uint(BIF_ARG_2, &index)) {
+ goto badarg;
+ }
+ if (index >= byte_size) {
+ goto badarg;
+ }
+ ERTS_GET_BINARY_BYTES(BIF_ARG_1,bytes,bit_offs,bit_size);
+ if (bit_size) {
+ goto badarg;
+ }
+ if (bit_offs) {
+ res = ((((Uint) bytes[index]) << bit_offs) |
+ (((Uint) bytes[index+1]) >> (8-bit_offs))) & 0xFF;
+ } else {
+ res = bytes[index];
+ }
+ BIF_RET(make_small(res));
+ badarg:
+ BIF_ERROR(BIF_P,BADARG);
+}
+
+#define BIN_TO_LIST_OK 0
+#define BIN_TO_LIST_TRAP 1
+/* No badarg, checked before call */
+
+#define BIN_TO_LIST_LOOP_FACTOR 10
+
+static int do_bin_to_list(Process *p, byte *bytes, Uint bit_offs,
+ Uint start, Sint *lenp, Eterm *termp)
+{
+ Uint reds = get_reds(p, BIN_TO_LIST_LOOP_FACTOR); /* reds can never be 0 */
+ Uint len = *lenp;
+ Uint loops;
+ Eterm *hp;
+ Eterm term = *termp;
+ Uint n;
+
+ ASSERT(reds > 0);
+
+ loops = MIN(reds,len);
+
+ BUMP_REDS(p, loops / BIN_TO_LIST_LOOP_FACTOR);
+
+ hp = HAlloc(p,2*loops);
+ while (loops--) {
+ --len;
+ if (bit_offs) {
+ n = ((((Uint) bytes[start+len]) << bit_offs) |
+ (((Uint) bytes[start+len+1]) >> (8-bit_offs))) & 0xFF;
+ } else {
+ n = bytes[start+len];
+ }
+
+ term = CONS(hp,make_small(n),term);
+ hp +=2;
+ }
+ *termp = term;
+ *lenp = len;
+ if (len) {
+ BUMP_ALL_REDS(p);
+ return BIN_TO_LIST_TRAP;
+ }
+ return BIN_TO_LIST_OK;
+}
+
+
+static BIF_RETTYPE do_trap_bin_to_list(Process *p, Eterm binary,
+ Uint start, Sint len, Eterm sofar)
+{
+ Eterm *hp;
+ Eterm blob;
+
+ hp = HAlloc(p,3);
+ hp[0] = make_pos_bignum_header(2);
+ hp[1] = start;
+ hp[2] = (Uint) len;
+ blob = make_big(hp);
+ BIF_TRAP3(&binary_bin_to_list_trap_export, p, binary, blob, sofar);
+}
+
+static BIF_RETTYPE binary_bin_to_list_trap(BIF_ALIST_3)
+{
+ Eterm *ptr;
+ Uint start;
+ Sint len;
+ byte *bytes;
+ Uint bit_offs;
+ Uint bit_size;
+ Eterm res = BIF_ARG_3;
+
+ ptr = big_val(BIF_ARG_2);
+ start = ptr[1];
+ len = (Sint) ptr[2];
+
+ ERTS_GET_BINARY_BYTES(BIF_ARG_1,bytes,bit_offs,bit_size);
+ if (do_bin_to_list(BIF_P, bytes, bit_offs, start, &len, &res) ==
+ BIN_TO_LIST_OK) {
+ BIF_RET(res);
+ }
+ return do_trap_bin_to_list(BIF_P,BIF_ARG_1,start,len,res);
+}
+
+static BIF_RETTYPE binary_bin_to_list_common(Process *p,
+ Eterm bin,
+ Eterm epos,
+ Eterm elen)
+{
+ Uint pos;
+ Sint len;
+ size_t sz;
+ byte *bytes;
+ Uint bit_offs;
+ Uint bit_size;
+ Eterm res = NIL;
+
+ if (is_not_binary(bin)) {
+ goto badarg;
+ }
+ if (!term_to_Uint(epos, &pos)) {
+ goto badarg;
+ }
+ if (!term_to_Sint(elen, &len)) {
+ goto badarg;
+ }
+ if (len < 0) {
+ Sint lentmp = -len;
+ /* overflow */
+ if (lentmp == len || lentmp < 0 || -lentmp != len) {
+ goto badarg;
+ }
+ len = lentmp;
+ if (len > pos) {
+ goto badarg;
+ }
+ pos -= len;
+ }
+ /* overflow */
+ if ((pos + len) < pos || (len > 0 && (pos + len) == pos)) {
+ goto badarg;
+ }
+ sz = binary_size(bin);
+
+ if (pos+len > sz) {
+ goto badarg;
+ }
+ ERTS_GET_BINARY_BYTES(bin,bytes,bit_offs,bit_size);
+ if (bit_size != 0) {
+ goto badarg;
+ }
+ if(do_bin_to_list(p, bytes, bit_offs, pos, &len, &res) ==
+ BIN_TO_LIST_OK) {
+ BIF_RET(res);
+ }
+ return do_trap_bin_to_list(p,bin,pos,len,res);
+
+ badarg:
+ BIF_ERROR(p,BADARG);
+}
+
+BIF_RETTYPE binary_bin_to_list_3(BIF_ALIST_3)
+{
+ return binary_bin_to_list_common(BIF_P,BIF_ARG_1,BIF_ARG_2,BIF_ARG_3);
+}
+
+BIF_RETTYPE binary_bin_to_list_2(BIF_ALIST_2)
+{
+ Eterm *tp;
+
+ if (is_not_tuple(BIF_ARG_2)) {
+ goto badarg;
+ }
+ tp = tuple_val(BIF_ARG_2);
+ if (arityval(*tp) != 2) {
+ goto badarg;
+ }
+ return binary_bin_to_list_common(BIF_P,BIF_ARG_1,tp[1],tp[2]);
+ badarg:
+ BIF_ERROR(BIF_P,BADARG);
+}
+
+BIF_RETTYPE binary_bin_to_list_1(BIF_ALIST_1)
+{
+ Uint pos = 0;
+ Sint len;
+ byte *bytes;
+ Uint bit_offs;
+ Uint bit_size;
+ Eterm res = NIL;
+
+ if (is_not_binary(BIF_ARG_1)) {
+ goto badarg;
+ }
+ len = binary_size(BIF_ARG_1);
+ ERTS_GET_BINARY_BYTES(BIF_ARG_1,bytes,bit_offs,bit_size);
+ if (bit_size != 0) {
+ goto badarg;
+ }
+ if(do_bin_to_list(BIF_P, bytes, bit_offs, pos, &len, &res) ==
+ BIN_TO_LIST_OK) {
+ BIF_RET(res);
+ }
+ return do_trap_bin_to_list(BIF_P,BIF_ARG_1,pos,len,res);
+ badarg:
+ BIF_ERROR(BIF_P,BADARG);
+}
+
+/*
+ * Ok, erlang:list_to_binary does not interrupt, and we really don't want
+ * an alternative implementation for the exact same thing, why we
+ * have descided to use the old non-restarting implementation for now.
+ * In reality, there are seldom many iterations involved in doing this, so the
+ * problem of long-running bifs is not really that big in this case.
+ * So, for now we use the old implementation also in the module binary.
+ */
+
+BIF_RETTYPE binary_list_to_bin_1(BIF_ALIST_1)
+{
+ return erts_list_to_binary_bif(BIF_P, BIF_ARG_1);
+}
+
+typedef struct {
+ Uint times_left;
+ Uint source_size;
+ int source_type;
+ byte *source;
+ byte *temp_alloc;
+ Uint result_pos;
+ Binary *result;
+} CopyBinState;
+
+#define BC_TYPE_EMPTY 0
+#define BC_TYPE_HEAP 1
+#define BC_TYPE_ALIGNED 2 /* May or may not point to (emasculated) binary, temp_alloc field is set
+ so that erts_free_aligned_binary_bytes_extra can handle either */
+
+
+#define BINARY_COPY_LOOP_FACTOR 100
+
+static void cleanup_copy_bin_state(Binary *bp)
+{
+ CopyBinState *cbs = (CopyBinState *) ERTS_MAGIC_BIN_DATA(bp);
+ if (cbs->result != NULL) {
+ erts_bin_free(cbs->result);
+ cbs->result = NULL;
+ }
+ switch (cbs->source_type) {
+ case BC_TYPE_HEAP:
+ erts_free(ERTS_ALC_T_BINARY_BUFFER,cbs->source);
+ break;
+ case BC_TYPE_ALIGNED:
+ erts_free_aligned_binary_bytes_extra(cbs->temp_alloc,
+ ERTS_ALC_T_BINARY_BUFFER);
+ break;
+ default:
+ /* otherwise do nothing */
+ break;
+ }
+ cbs->source_type = BC_TYPE_EMPTY;
+}
+
+/*
+ * Binary *erts_bin_nrml_alloc(Uint size);
+ * Binary *erts_bin_realloc(Binary *bp, Uint size);
+ * void erts_bin_free(Binary *bp);
+ */
+static BIF_RETTYPE do_binary_copy(Process *p, Eterm bin, Eterm en)
+{
+ Uint n;
+ byte *bytes;
+ Uint bit_offs;
+ Uint bit_size;
+ size_t size;
+ Uint reds = get_reds(p, BINARY_COPY_LOOP_FACTOR);
+ Uint target_size;
+ byte *t;
+ Uint pos;
+
+
+ if (is_not_binary(bin)) {
+ goto badarg;
+ }
+ if (!term_to_Uint(en, &n)) {
+ goto badarg;
+ }
+ if (!n) {
+ Eterm res_term = erts_new_heap_binary(p,NULL,0,&bytes);
+ BIF_RET(res_term);
+ }
+ ERTS_GET_BINARY_BYTES(bin,bytes,bit_offs,bit_size);
+ if (bit_size != 0) {
+ goto badarg;
+ }
+
+ size = binary_size(bin);
+ target_size = size * n;
+
+ if ((target_size - size) >= reds) {
+ Eterm orig;
+ Uint offset;
+ Uint bit_offset;
+ Uint bit_size;
+ CopyBinState *cbs;
+ Eterm *hp;
+ Eterm trap_term;
+ int i;
+
+ /* We will trap, set up the structure for trapping right away */
+ Binary *mb = erts_create_magic_binary(sizeof(CopyBinState),
+ cleanup_copy_bin_state);
+ cbs = ERTS_MAGIC_BIN_DATA(mb);
+
+ cbs->temp_alloc = NULL;
+ cbs->source = NULL;
+
+ ERTS_GET_REAL_BIN(bin, orig, offset, bit_offset, bit_size);
+ if (*(binary_val(orig)) == HEADER_PROC_BIN) {
+ ProcBin* pb = (ProcBin *) binary_val(orig);
+ if (pb->flags) {
+ erts_emasculate_writable_binary(pb);
+ }
+ cbs->source =
+ erts_get_aligned_binary_bytes_extra(bin,
+ &(cbs->temp_alloc),
+ ERTS_ALC_T_BINARY_BUFFER,
+ 0);
+ cbs->source_type = BC_TYPE_ALIGNED;
+ } else { /* Heap binary */
+ cbs->source =
+ erts_get_aligned_binary_bytes_extra(bin,
+ &(cbs->temp_alloc),
+ ERTS_ALC_T_BINARY_BUFFER,
+ 0);
+ if (!(cbs->temp_alloc)) { /* alignment not needed, need to copy */
+ byte *tmp = erts_alloc(ERTS_ALC_T_BINARY_BUFFER,size);
+ memcpy(tmp,cbs->source,size);
+ cbs->source = tmp;
+ cbs->source_type = BC_TYPE_HEAP;
+ } else {
+ cbs->source_type = BC_TYPE_ALIGNED;
+ }
+ }
+ cbs->result = erts_bin_nrml_alloc(target_size); /* Always offheap
+ if trapping */
+ cbs->result->flags = 0;
+ cbs->result->orig_size = target_size;
+ erts_refc_init(&(cbs->result->refc), 1);
+ t = (byte *) cbs->result->orig_bytes; /* No offset or anything */
+ pos = 0;
+ i = 0;
+ while (pos < reds) {
+ memcpy(t+pos,cbs->source, size);
+ pos += size;
+ ++i;
+ }
+ cbs->source_size = size;
+ cbs->result_pos = pos;
+ cbs->times_left = n-i;
+ hp = HAlloc(p,PROC_BIN_SIZE);
+ trap_term = erts_mk_magic_binary_term(&hp, &MSO(p), mb);
+ BUMP_ALL_REDS(p);
+ BIF_TRAP2(&binary_copy_trap_export, p, bin, trap_term);
+ } else {
+ Eterm res_term;
+ byte *temp_alloc = NULL;
+ byte *source =
+ erts_get_aligned_binary_bytes(bin,
+ &temp_alloc);
+ if (target_size <= ERL_ONHEAP_BIN_LIMIT) {
+ res_term = erts_new_heap_binary(p,NULL,target_size,&t);
+ } else {
+ res_term = erts_new_mso_binary(p,NULL,target_size);
+ t = ((ProcBin *) binary_val(res_term))->bytes;
+ }
+ pos = 0;
+ while (pos < target_size) {
+ memcpy(t+pos,source, size);
+ pos += size;
+ }
+ erts_free_aligned_binary_bytes(temp_alloc);
+ BUMP_REDS(p,pos / BINARY_COPY_LOOP_FACTOR);
+ BIF_RET(res_term);
+ }
+ badarg:
+ BIF_ERROR(p,BADARG);
+}
+
+BIF_RETTYPE binary_copy_trap(BIF_ALIST_2)
+{
+ Uint n;
+ size_t size;
+ Uint reds = get_reds(BIF_P, BINARY_COPY_LOOP_FACTOR);
+ byte *t;
+ Uint pos;
+ Binary *mb = ((ProcBin *) binary_val(BIF_ARG_2))->val;
+ CopyBinState *cbs = (CopyBinState *) ERTS_MAGIC_BIN_DATA(mb);
+ Uint opos;
+
+ /* swapout... */
+ n = cbs->times_left;
+ size = cbs->source_size;
+ opos = pos = cbs->result_pos;
+ t = (byte *) cbs->result->orig_bytes; /* "well behaved" binary */
+ if ((n-1) * size >= reds) {
+ Uint i = 0;
+ while ((pos - opos) < reds) {
+ memcpy(t+pos,cbs->source, size);
+ pos += size;
+ ++i;
+ }
+ cbs->result_pos = pos;
+ cbs->times_left -= i;
+ BUMP_ALL_REDS(BIF_P);
+ BIF_TRAP2(&binary_copy_trap_export, BIF_P, BIF_ARG_1, BIF_ARG_2);
+ } else {
+ Binary *save;
+ ProcBin* pb;
+ Uint target_size = cbs->result->orig_size;
+ while (pos < target_size) {
+ memcpy(t+pos,cbs->source, size);
+ pos += size;
+ }
+ save = cbs->result;
+ cbs->result = NULL;
+ cleanup_copy_bin_state(mb); /* now cbs is dead */
+ pb = (ProcBin *) HAlloc(BIF_P, PROC_BIN_SIZE);
+ pb->thing_word = HEADER_PROC_BIN;
+ pb->size = target_size;
+ pb->next = MSO(BIF_P).first;
+ MSO(BIF_P).first = (struct erl_off_heap_header*) pb;
+ pb->val = save;
+ pb->bytes = t;
+ pb->flags = 0;
+
+ OH_OVERHEAD(&(MSO(BIF_P)), target_size / sizeof(Eterm));
+ BUMP_REDS(BIF_P,(pos - opos) / BINARY_COPY_LOOP_FACTOR);
+
+ BIF_RET(make_binary(pb));
+ }
+}
+
+
+BIF_RETTYPE binary_copy_1(BIF_ALIST_1)
+{
+ return do_binary_copy(BIF_P,BIF_ARG_1,make_small(1));
+}
+
+BIF_RETTYPE binary_copy_2(BIF_ALIST_2)
+{
+ return do_binary_copy(BIF_P,BIF_ARG_1,BIF_ARG_2);
+}
+
+BIF_RETTYPE binary_referenced_byte_size_1(BIF_ALIST_1)
+{
+ ErlSubBin *sb;
+ ProcBin *pb;
+ Eterm res;
+ Eterm bin = BIF_ARG_1;
+
+ if (is_not_binary(BIF_ARG_1)) {
+ BIF_ERROR(BIF_P,BADARG);
+ }
+ sb = (ErlSubBin *) binary_val(bin);
+ if (sb->thing_word == HEADER_SUB_BIN) {
+ bin = sb->orig;
+ }
+ pb = (ProcBin *) binary_val(bin);
+ if (pb->thing_word == HEADER_PROC_BIN) {
+ /* XXX:PaN - Halfword - orig_size is a long, we should handle that */
+ res = erts_make_integer((Uint) pb->val->orig_size, BIF_P);
+ } else { /* heap binary */
+ res = erts_make_integer((Uint) ((ErlHeapBin *) pb)->size, BIF_P);
+ }
+ BIF_RET(res);
+}
+
+#define END_BIG 0
+#define END_SMALL 1
+
+#ifdef WORDS_BIGENDIAN
+#define END_NATIVE END_BIG
+#else
+#define END_NATIVE END_SMALL
+#endif
+
+static int get_need(Uint u) {
+#if defined(ARCH_64) && !HALFWORD_HEAP
+ if (u > 0xFFFFFFFFUL) {
+ if (u > 0xFFFFFFFFFFFFUL) {
+ if (u > 0xFFFFFFFFFFFFFFUL) {
+ return 8;
+ }
+ return 7;
+ }
+ if (u > 0xFFFFFFFFFFUL) {
+ return 6;
+ }
+ return 5;
+ }
+#endif
+ if (u > 0xFFFFUL) {
+ if (u > 0xFFFFFFUL) {
+ return 4;
+ }
+ return 3;
+ }
+ if (u > 0xFFUL) {
+ return 2;
+ }
+ return 1;
+}
+
+static BIF_RETTYPE do_encode_unsigned(Process *p, Eterm uns, Eterm endianess)
+{
+ Eterm res;
+ if ((is_not_small(uns) && is_not_big(uns)) || is_not_atom(endianess) ||
+ (endianess != am_big && endianess != am_little)) {
+ goto badarg;
+ }
+ if (is_small(uns)) {
+ Sint x = signed_val(uns);
+ Uint u;
+ int n,i;
+ byte *b;
+
+ if (x < 0) {
+ goto badarg;
+ }
+
+ u = (Uint) x;
+ n = get_need(u);
+ ASSERT(n <= ERL_ONHEAP_BIN_LIMIT);
+ res = erts_new_heap_binary(p, NULL, n, &b);
+ if (endianess == am_big) {
+ for(i=n-1;i>=0;--i) {
+ b[i] = u & 0xFF;
+ u >>= 8;
+ }
+ } else {
+ for(i=0;i<n;++i) {
+ b[i] = u & 0xFF;
+ u >>= 8;
+ }
+ }
+ BIF_RET(res);
+ } else {
+ /* Big */
+ Eterm *bigp = big_val(uns);
+ Uint n;
+ dsize_t num_parts = BIG_SIZE(bigp);
+ Eterm res;
+ byte *b;
+ ErtsDigit d;
+
+ if(BIG_SIGN(bigp)) {
+ goto badarg;
+ }
+ n = (num_parts-1)*sizeof(ErtsDigit)+get_need(BIG_DIGIT(bigp,(num_parts-1)));
+ if (n <= ERL_ONHEAP_BIN_LIMIT) {
+ res = erts_new_heap_binary(p,NULL,n,&b);
+ } else {
+ res = erts_new_mso_binary(p,NULL,n);
+ b = ((ProcBin *) binary_val(res))->bytes;
+ }
+
+ if (endianess == am_big) {
+ Sint i,j;
+ j = 0;
+ d = BIG_DIGIT(bigp,0);
+ for (i=n-1;i>=0;--i) {
+ b[i] = d & 0xFF;
+ if (!((++j) % sizeof(ErtsDigit))) {
+ d = BIG_DIGIT(bigp,j / sizeof(ErtsDigit));
+ } else {
+ d >>= 8;
+ }
+ }
+ } else {
+ Sint i,j;
+ j = 0;
+ d = BIG_DIGIT(bigp,0);
+ for (i=0;i<n;++i) {
+ b[i] = d & 0xFF;
+ if (!((++j) % sizeof(ErtsDigit))) {
+ d = BIG_DIGIT(bigp,j / sizeof(ErtsDigit));
+ } else {
+ d >>= 8;
+ }
+ }
+
+ }
+ BIF_RET(res);
+ }
+ badarg:
+ BIF_ERROR(p,BADARG);
+}
+
+static BIF_RETTYPE do_decode_unsigned(Process *p, Eterm uns, Eterm endianess)
+{
+ byte *bytes;
+ Uint bitoffs, bitsize;
+ Uint size;
+ Eterm res;
+
+ if (is_not_binary(uns) || is_not_atom(endianess) ||
+ (endianess != am_big && endianess != am_little)) {
+ goto badarg;
+ }
+ ERTS_GET_BINARY_BYTES(uns, bytes, bitoffs, bitsize);
+ if (bitsize != 0) {
+ goto badarg;
+ }
+ /* align while rolling */
+ size = binary_size(uns);
+ if (bitoffs) {
+ if (endianess == am_big) {
+ while (size && (((((Uint) bytes[0]) << bitoffs) |
+ (((Uint) bytes[1]) >> (8-bitoffs))) & 0xFF) == 0) {
+ ++bytes;
+ --size;
+ }
+ } else {
+ while(size &&
+ (((((Uint) bytes[size-1]) << bitoffs) |
+ (((Uint) bytes[size]) >> (8-bitoffs))) & 0xFF) == 0) {
+ --size;
+ }
+ }
+ } else {
+ if (endianess == am_big) {
+ while (size && *bytes == 0) {
+ ++bytes;
+ --size;
+ }
+ } else {
+ while(size && bytes[size-1] == 0) {
+ --size;
+ }
+ }
+ }
+ if (!size) {
+ BIF_RET(make_small(0));
+ }
+
+ if (size <= sizeof(Uint)) {
+ Uint u = 0;
+ Sint i;
+
+ if (endianess == am_big) {
+ if (bitoffs) {
+ for(i=0;i<size;++i) {
+ u <<=8;
+ u |= (((((Uint) bytes[i]) << bitoffs) |
+ (((Uint) bytes[i+1]) >> (8-bitoffs))) & 0xFF);
+ }
+ } else {
+ for(i=0;i<size;++i) {
+ u <<=8;
+ u |= bytes[i];
+ }
+ }
+ } else {
+
+ if (bitoffs) {
+ for(i=size-1;i>=0;--i) {
+ u <<=8;
+ u |= (((((Uint) bytes[i]) << bitoffs) |
+ (((Uint) bytes[i+1]) >> (8-bitoffs))) & 0xFF);
+ }
+ } else {
+ for(i=size-1;i>=0;--i) {
+ u <<=8;
+ u |= bytes[i];
+ }
+ }
+ }
+ res = erts_make_integer(u,p);
+ BIF_RET(res);
+ } else {
+ /* Assume big, as we stripped away all zeroes from the MSB part of the binary */
+ dsize_t num_parts = size / sizeof(ErtsDigit) + !!(size % sizeof(ErtsDigit));
+ Eterm *bigp;
+
+ bigp = HAlloc(p, BIG_NEED_SIZE(num_parts));
+ *bigp = make_pos_bignum_header(num_parts);
+ res = make_big(bigp);
+
+ if (endianess == am_big) {
+ Sint i,j;
+ ErtsDigit *d;
+ j = size;
+ d = &(BIG_DIGIT(bigp,num_parts - 1));
+ *d = 0;
+ i = 0;
+ if(bitoffs) {
+ for (;;){
+ (*d) <<= 8;
+ (*d) |= (((((Uint) bytes[i]) << bitoffs) |
+ (((Uint) bytes[i+1]) >> (8-bitoffs))) & 0xFF);
+ if (++i >= size) {
+ break;
+ }
+ if (!(--j % sizeof(ErtsDigit))) {
+ --d;
+ *d = 0;
+ }
+ }
+ } else {
+ for (;;){
+ (*d) <<= 8;
+ (*d) |= bytes[i];
+ if (++i >= size) {
+ break;
+ }
+ if (!(--j % sizeof(ErtsDigit))) {
+ --d;
+ *d = 0;
+ }
+ }
+ }
+ } else {
+ Sint i,j;
+ ErtsDigit *d;
+ j = size;
+ d = &(BIG_DIGIT(bigp,num_parts - 1));
+ *d = 0;
+ i = size-1;
+ if (bitoffs) {
+ for (;;){
+ (*d) <<= 8;
+ (*d) |= (((((Uint) bytes[i]) << bitoffs) |
+ (((Uint) bytes[i+1]) >> (8-bitoffs))) & 0xFF);
+ if (--i < 0) {
+ break;
+ }
+ if (!(--j % sizeof(ErtsDigit))) {
+ --d;
+ *d = 0;
+ }
+ }
+ } else {
+ for (;;){
+ (*d) <<= 8;
+ (*d) |= bytes[i];
+ if (--i < 0) {
+ break;
+ }
+ if (!(--j % sizeof(ErtsDigit))) {
+ --d;
+ *d = 0;
+ }
+ }
+ }
+ }
+ BIF_RET(res);
+ }
+ badarg:
+ BIF_ERROR(p,BADARG);
+}
+
+BIF_RETTYPE binary_encode_unsigned_1(BIF_ALIST_1)
+{
+ return do_encode_unsigned(BIF_P,BIF_ARG_1,am_big);
+}
+
+BIF_RETTYPE binary_encode_unsigned_2(BIF_ALIST_2)
+{
+ return do_encode_unsigned(BIF_P,BIF_ARG_1,BIF_ARG_2);
+}
+
+BIF_RETTYPE binary_decode_unsigned_1(BIF_ALIST_1)
+{
+ return do_decode_unsigned(BIF_P,BIF_ARG_1,am_big);
+}
+
+BIF_RETTYPE binary_decode_unsigned_2(BIF_ALIST_2)
+{
+ return do_decode_unsigned(BIF_P,BIF_ARG_1,BIF_ARG_2);
+}
+
+/*
+ * Hard debug functions (dump) for the search structures
+ */
+
+#ifdef HARDDEBUG
+static void dump_bm_data(BMData *bm)
+{
+ int i,j;
+ erts_printf("Dumping Boyer-Moore structure.\n");
+ erts_printf("=============================\n");
+ erts_printf("Searchstring [%ld]:\n", bm->len);
+ erts_printf("<<");
+ for (i = 0; i < bm->len; ++i) {
+ if (i > 0) {
+ erts_printf(", ");
+ }
+ erts_printf("%d", (int) bm->x[i]);
+ if (bm->x[i] >= 'A') {
+ erts_printf(" ($%c)",(char) bm->x[i]);
+ }
+ }
+ erts_printf(">>\n");
+ erts_printf("GoodShift array:\n");
+ for (i = 0; i < bm->len; ++i) {
+ erts_printf("GoodShift[%d]: %ld\n", i, bm->goodshift[i]);
+ }
+ erts_printf("BadShift array:\n");
+ j = 0;
+ for (i = 0; i < ALPHABET_SIZE; i += j) {
+ for (j = 0; i + j < ALPHABET_SIZE && j < 6; ++j) {
+ erts_printf("BS[%03d]:%02ld, ", i+j, bm->badshift[i+j]);
+ }
+ erts_printf("\n");
+ }
+}
+
+static void dump_ac_node(ACNode *node, int indent, int ch) {
+ int i;
+ char *spaces = erts_alloc(ERTS_ALC_T_TMP, 10 * indent + 1);
+ memset(spaces,' ',10*indent);
+ spaces[10*indent] = '\0';
+ erts_printf("%s-> %c\n",spaces,ch);
+ erts_printf("%sId: %u\n",spaces,(unsigned) node->id);
+ erts_printf("%sD: %u\n",spaces,(unsigned)node->d);
+ erts_printf("%sFinal: %d\n",spaces,(int)node->final);
+ erts_printf("%sFail: %u\n",spaces,(unsigned)node->h->id);
+ erts_free(ERTS_ALC_T_TMP,spaces);
+ for(i=0;i<ALPHABET_SIZE;++i) {
+ if (node->g[i] != NULL && node->g[i] != node) {
+ dump_ac_node(node->g[i],indent+1,i);
+ }
+ }
+}
+
+
+static void dump_ac_trie(ACTrie *act)
+{
+ erts_printf("Aho Corasick Trie dump.\n");
+ erts_printf("=======================\n");
+ erts_printf("Node counter: %u\n", (unsigned) act->idc);
+ erts_printf("Searchstring counter: %u\n", (unsigned) act->counter);
+ erts_printf("Trie:\n");
+ dump_ac_node(act->root, 0, '0');
+ return;
+}
+#endif
diff --git a/erts/emulator/beam/erl_bif_chksum.c b/erts/emulator/beam/erl_bif_chksum.c
index 445ba00ca7..06b7ffdf32 100644
--- a/erts/emulator/beam/erl_bif_chksum.c
+++ b/erts/emulator/beam/erl_bif_chksum.c
@@ -1,19 +1,19 @@
/*
* %CopyrightBegin%
- *
- * Copyright Ericsson AB 2008-2009. All Rights Reserved.
- *
+ *
+ * Copyright Ericsson AB 2008-2010. All Rights Reserved.
+ *
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
* compliance with the License. You should have received a copy of the
* Erlang Public License along with this software. If not, it can be
* retrieved online at http://www.erlang.org/.
- *
+ *
* Software distributed under the License is distributed on an "AS IS"
* basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
* the License for the specific language governing rights and limitations
* under the License.
- *
+ *
* %CopyrightEnd%
*/
@@ -49,9 +49,9 @@ void erts_init_bif_chksum(void)
chksum_md5_2_exp.code[1] = am_atom_put("md5_trap",8);
chksum_md5_2_exp.code[2] = 2;
chksum_md5_2_exp.code[3] =
- (Eterm) em_apply_bif;
+ (BeamInstr) em_apply_bif;
chksum_md5_2_exp.code[4] =
- (Eterm) &md5_2;
+ (BeamInstr) &md5_2;
}
diff --git a/erts/emulator/beam/erl_bif_ddll.c b/erts/emulator/beam/erl_bif_ddll.c
index 9d5f0d9c02..2c2e283f65 100644
--- a/erts/emulator/beam/erl_bif_ddll.c
+++ b/erts/emulator/beam/erl_bif_ddll.c
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2006-2009. All Rights Reserved.
+ * Copyright Ericsson AB 2006-2010. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
@@ -1646,7 +1646,8 @@ static int do_unload_driver_entry(DE_Handle *dh, Eterm *save_name)
if (save_name != NULL) {
*save_name = mkatom(q->name);
}
- /* XXX:PaN Future locking problems? Don't dare to let go of the diver_list lock here!*/
+ /* Future locking problems? Don't dare to let go of the
+ diver_list lock here!*/
if (q->finish) {
int fpe_was_unmasked = erts_block_fpe();
(*(q->finish))();
diff --git a/erts/emulator/beam/erl_bif_guard.c b/erts/emulator/beam/erl_bif_guard.c
index 440b0b4f14..01e6977a2c 100644
--- a/erts/emulator/beam/erl_bif_guard.c
+++ b/erts/emulator/beam/erl_bif_guard.c
@@ -314,6 +314,30 @@ double_to_integer(Process* p, double x)
return res;
}
+/********************************************************************************
+ * binary_part guards. The actual implementation is in erl_bif_binary.c
+ ********************************************************************************/
+BIF_RETTYPE binary_part_3(BIF_ALIST_3)
+{
+ return erts_binary_part(BIF_P,BIF_ARG_1,BIF_ARG_2, BIF_ARG_3);
+}
+
+BIF_RETTYPE binary_part_2(BIF_ALIST_2)
+{
+ Eterm *tp;
+ if (is_not_tuple(BIF_ARG_2)) {
+ goto badarg;
+ }
+ tp = tuple_val(BIF_ARG_2);
+ if (arityval(*tp) != 2) {
+ goto badarg;
+ }
+ return erts_binary_part(BIF_P,BIF_ARG_1,tp[1], tp[2]);
+ badarg:
+ BIF_ERROR(BIF_P,BADARG);
+}
+
+
/*
* The following code is used when a guard that may build on the
* heap is called directly. They must not use HAlloc(), but must
@@ -630,3 +654,16 @@ gc_double_to_integer(Process* p, double x, Eterm* reg, Uint live)
}
return res;
}
+
+/********************************************************************************
+ * binary_part guards. The actual implementation is in erl_bif_binary.c
+ ********************************************************************************/
+Eterm erts_gc_binary_part_3(Process* p, Eterm* reg, Uint live)
+{
+ return erts_gc_binary_part(p,reg,live,0);
+}
+
+Eterm erts_gc_binary_part_2(Process* p, Eterm* reg, Uint live)
+{
+ return erts_gc_binary_part(p,reg,live,1);
+}
diff --git a/erts/emulator/beam/erl_bif_info.c b/erts/emulator/beam/erl_bif_info.c
index a34d400ed8..dace5b9297 100644
--- a/erts/emulator/beam/erl_bif_info.c
+++ b/erts/emulator/beam/erl_bif_info.c
@@ -38,9 +38,6 @@
#include "erl_instrument.h"
#include "dist.h"
#include "erl_gc.h"
-#ifdef ELIB_ALLOC_IS_CLIB
-#include "elib_stat.h"
-#endif
#ifdef HIPE
#include "hipe_arch.h"
#endif
@@ -59,12 +56,19 @@
/* Keep erts_system_version as a global variable for easy access from a core */
static char erts_system_version[] = ("Erlang " ERLANG_OTP_RELEASE
" (erts-" ERLANG_VERSION ")"
+#if !HEAP_ON_C_STACK && !HALFWORD_HEAP
+ " [no-c-stack-objects]"
+#endif
#ifndef OTP_RELEASE
" [source]"
#endif
#ifdef ARCH_64
+#if HALFWORD_HEAP
+ " [64-bit halfword]"
+#else
" [64-bit]"
#endif
+#endif
#ifdef ERTS_SMP
" [smp:%bpu:%bpu]"
#endif
@@ -115,22 +119,26 @@ static char erts_system_version[] = ("Erlang " ERLANG_OTP_RELEASE
#endif
static Eterm
-bld_bin_list(Uint **hpp, Uint *szp, ProcBin* pb)
+bld_bin_list(Uint **hpp, Uint *szp, ErlOffHeap* oh)
{
+ struct erl_off_heap_header* ohh;
Eterm res = NIL;
Eterm tuple;
- for (; pb; pb = pb->next) {
- Eterm val = erts_bld_uint(hpp, szp, (Uint) pb->val);
- Eterm orig_size = erts_bld_uint(hpp, szp, pb->val->orig_size);
-
- if (szp)
- *szp += 4+2;
- if (hpp) {
- Uint refc = (Uint) erts_smp_atomic_read(&pb->val->refc);
- tuple = TUPLE3(*hpp, val, orig_size, make_small(refc));
- res = CONS(*hpp + 4, tuple, res);
- *hpp += 4+2;
+ for (ohh = oh->first; ohh; ohh = ohh->next) {
+ if (ohh->thing_word == HEADER_PROC_BIN) {
+ ProcBin* pb = (ProcBin*) ohh;
+ Eterm val = erts_bld_uword(hpp, szp, (UWord) pb->val);
+ Eterm orig_size = erts_bld_uint(hpp, szp, pb->val->orig_size);
+
+ if (szp)
+ *szp += 4+2;
+ if (hpp) {
+ Uint refc = (Uint) erts_smp_atomic_read(&pb->val->refc);
+ tuple = TUPLE3(*hpp, val, orig_size, make_small(refc));
+ res = CONS(*hpp + 4, tuple, res);
+ *hpp += 4+2;
+ }
}
}
return res;
@@ -169,10 +177,10 @@ static void do_make_one_mon_element(ErtsMonitor *mon, void * vpmlc)
Eterm tup;
Eterm r = (IS_CONST(mon->ref)
? mon->ref
- : STORE_NC(&(pmlc->hp), &MSO(pmlc->p).externals, mon->ref));
+ : STORE_NC(&(pmlc->hp), &MSO(pmlc->p), mon->ref));
Eterm p = (IS_CONST(mon->pid)
? mon->pid
- : STORE_NC(&(pmlc->hp), &MSO(pmlc->p).externals, mon->pid));
+ : STORE_NC(&(pmlc->hp), &MSO(pmlc->p), mon->pid));
tup = TUPLE5(pmlc->hp, pmlc->tag, make_small(mon->type), r, p, mon->name);
pmlc->hp += 6;
pmlc->res = CONS(pmlc->hp, tup, pmlc->res);
@@ -233,7 +241,7 @@ static void do_make_one_lnk_element(ErtsLink *lnk, void * vpllc)
Eterm old_res, targets = NIL;
Eterm p = (IS_CONST(lnk->pid)
? lnk->pid
- : STORE_NC(&(pllc->hp), &MSO(pllc->p).externals, lnk->pid));
+ : STORE_NC(&(pllc->hp), &MSO(pllc->p), lnk->pid));
if (lnk->type == LINK_NODE) {
targets = make_small(ERTS_LINK_REFC(lnk));
} else if (ERTS_LINK_ROOT(lnk) != NULL) {
@@ -624,12 +632,18 @@ static Eterm pi_1_keys[] = {
#define ERTS_PI_1_NO_OF_KEYS (sizeof(pi_1_keys)/sizeof(Eterm))
static Eterm pi_1_keys_list;
-static Uint pi_1_keys_list_heap[2*ERTS_PI_1_NO_OF_KEYS];
+#if HEAP_ON_C_STACK
+static Eterm pi_1_keys_list_heap[2*ERTS_PI_1_NO_OF_KEYS];
+#endif
static void
process_info_init(void)
{
+#if HEAP_ON_C_STACK
Eterm *hp = &pi_1_keys_list_heap[0];
+#else
+ Eterm *hp = erts_alloc(ERTS_ALC_T_LL_TEMP_TERM,sizeof(Eterm)*2*ERTS_PI_1_NO_OF_KEYS);
+#endif
int i;
pi_1_keys_list = NIL;
@@ -998,7 +1012,7 @@ process_info_aux(Process *BIF_P,
hp = HAlloc(BIF_P, 3);
res = am_undefined;
} else {
- Eterm* current;
+ BeamInstr* current;
if (rp->current[0] == am_erlang &&
rp->current[1] == am_process_info &&
@@ -1127,9 +1141,9 @@ process_info_aux(Process *BIF_P,
}
else {
/* Make our copy of the message */
- ASSERT(size_object(msg) == hfp->size);
+ ASSERT(size_object(msg) == hfp->used_size);
msg = copy_struct(msg,
- hfp->size,
+ hfp->used_size,
&hp,
&MSO(BIF_P));
}
@@ -1212,7 +1226,7 @@ process_info_aux(Process *BIF_P,
hp = HAlloc(BIF_P, 3 + mic.sz);
res = NIL;
for (i = 0; i < mic.mi_i; i++) {
- item = STORE_NC(&hp, &MSO(BIF_P).externals, mic.mi[i].entity);
+ item = STORE_NC(&hp, &MSO(BIF_P), mic.mi[i].entity);
res = CONS(hp, item, res);
hp += 2;
}
@@ -1245,9 +1259,7 @@ process_info_aux(Process *BIF_P,
else {
/* Monitor by pid. Build {process, Pid} and cons it. */
Eterm t;
- Eterm pid = STORE_NC(&hp,
- &MSO(BIF_P).externals,
- mic.mi[i].entity);
+ Eterm pid = STORE_NC(&hp, &MSO(BIF_P), mic.mi[i].entity);
t = TUPLE2(hp, am_process, pid);
hp += 3;
res = CONS(hp, t, res);
@@ -1269,7 +1281,7 @@ process_info_aux(Process *BIF_P,
res = NIL;
for (i = 0; i < mic.mi_i; ++i) {
- item = STORE_NC(&hp, &MSO(BIF_P).externals, mic.mi[i].entity);
+ item = STORE_NC(&hp, &MSO(BIF_P), mic.mi[i].entity);
res = CONS(hp, item, res);
hp += 2;
}
@@ -1478,7 +1490,7 @@ process_info_aux(Process *BIF_P,
case am_group_leader: {
int sz = NC_HEAP_SIZE(rp->group_leader);
hp = HAlloc(BIF_P, 3 + sz);
- res = STORE_NC(&hp, &MSO(BIF_P).externals, rp->group_leader);
+ res = STORE_NC(&hp, &MSO(BIF_P), rp->group_leader);
break;
}
@@ -1503,9 +1515,9 @@ process_info_aux(Process *BIF_P,
case am_binary: {
Uint sz = 3;
- (void) bld_bin_list(NULL, &sz, MSO(rp).mso);
+ (void) bld_bin_list(NULL, &sz, &MSO(rp));
hp = HAlloc(BIF_P, sz);
- res = bld_bin_list(&hp, NULL, MSO(rp).mso);
+ res = bld_bin_list(&hp, NULL, &MSO(rp));
break;
}
@@ -1622,6 +1634,14 @@ info_1_tuple(Process* BIF_P, /* Pointer to current process. */
if (sel == am_allocator_sizes && arity == 2) {
return erts_allocator_info_term(BIF_P, *tp, 1);
+ } else if (sel == am_wordsize && arity == 2) {
+ if (tp[0] == am_internal) {
+ return make_small(sizeof(Eterm));
+ }
+ if (tp[0] == am_external) {
+ return make_small(sizeof(UWord));
+ }
+ goto badarg;
} else if (sel == am_allocated) {
if (arity == 2) {
Eterm res = THE_NON_VALUE;
@@ -1871,6 +1891,37 @@ c_compiler_used(Eterm **hpp, Uint *szp)
}
+static int is_snif_term(Eterm module_atom) {
+ int i;
+ Atom *a = atom_tab(atom_val(module_atom));
+ char *aname = (char *) a->name;
+
+ /* if a->name has a '.' then the bif (snif) is bogus i.e a package */
+ for (i = 0; i < a->len; i++) {
+ if (aname[i] == '.')
+ return 0;
+ }
+
+ return 1;
+}
+
+static Eterm build_snif_term(Eterm **hpp, Uint *szp, int ix, Eterm res) {
+ Eterm tup;
+ tup = erts_bld_tuple(hpp, szp, 3, bif_table[ix].module, bif_table[ix].name, make_small(bif_table[ix].arity));
+ res = erts_bld_cons( hpp, szp, tup, res);
+ return res;
+}
+
+static Eterm build_snifs_term(Eterm **hpp, Uint *szp, Eterm res) {
+ int i;
+ for (i = 0; i < BIF_SIZE; i++) {
+ if (is_snif_term(bif_table[i].module)) {
+ res = build_snif_term(hpp, szp, i, res);
+ }
+ }
+ return res;
+}
+
BIF_RETTYPE system_info_1(BIF_ALIST_1)
{
Eterm res;
@@ -1919,6 +1970,15 @@ BIF_RETTYPE system_info_1(BIF_ALIST_1)
BIF_RET(db_get_trace_control_word_0(BIF_P));
} else if (ERTS_IS_ATOM_STR("ets_realloc_moves", BIF_ARG_1)) {
BIF_RET((erts_ets_realloc_always_moves) ? am_true : am_false);
+ } else if (ERTS_IS_ATOM_STR("snifs", BIF_ARG_1)) {
+ Uint size = 0;
+ Uint *szp;
+
+ szp = &size;
+ build_snifs_term(NULL, szp, NIL);
+ hp = HAlloc(BIF_P, size);
+ res = build_snifs_term(&hp, NULL, NIL);
+ BIF_RET(res);
} else if (BIF_ARG_1 == am_sequential_tracer) {
val = erts_get_system_seq_tracer();
ASSERT(is_internal_pid(val) || is_internal_port(val) || val==am_false)
@@ -2063,86 +2123,8 @@ BIF_RETTYPE system_info_1(BIF_ALIST_1)
BIF_RET(erts_alloc_util_allocators((void *) BIF_P));
}
else if (BIF_ARG_1 == am_elib_malloc) {
-#ifdef ELIB_ALLOC_IS_CLIB
- struct elib_stat stat;
- DECL_AM(heap_size);
- DECL_AM(max_alloced_size);
- DECL_AM(alloced_size);
- DECL_AM(free_size);
- DECL_AM(no_alloced_blocks);
- DECL_AM(no_free_blocks);
- DECL_AM(smallest_alloced_block);
- DECL_AM(largest_free_block);
- Eterm atoms[8];
- Eterm ints[8];
- Uint **hpp;
- Uint sz;
- Uint *szp;
- int length;
-#ifdef DEBUG
- Uint *endp;
-#endif
-
- elib_stat(&stat);
-
- /* First find out the heap size needed ... */
- hpp = NULL;
- szp = &sz;
- sz = 0;
-
- build_elib_malloc_term:
- length = 0;
- atoms[length] = AM_heap_size;
- ints[length++] = erts_bld_uint(hpp, szp,
- (Uint) stat.mem_total*sizeof(Uint));
- atoms[length] = AM_max_alloced_size;
- ints[length++] = erts_bld_uint(hpp, szp,
- (Uint) stat.mem_max_alloc*sizeof(Uint));
- atoms[length] = AM_alloced_size;
- ints[length++] = erts_bld_uint(hpp, szp,
- (Uint) stat.mem_alloc*sizeof(Uint));
- atoms[length] = AM_free_size;
- ints[length++] = erts_bld_uint(hpp, szp,
- (Uint) stat.mem_free*sizeof(Uint));
- atoms[length] = AM_no_alloced_blocks;
- ints[length++] = erts_bld_uint(hpp, szp, (Uint) stat.mem_blocks);
- atoms[length] = AM_no_free_blocks;
- ints[length++] = erts_bld_uint(hpp, szp, (Uint) stat.free_blocks);
- atoms[length] = AM_smallest_alloced_block;
- ints[length++] = erts_bld_uint(hpp, szp,
- (Uint) stat.min_used*sizeof(Uint));
- atoms[length] = AM_largest_free_block;
- ints[length++] = erts_bld_uint(hpp, szp,
- (Uint) stat.max_free*sizeof(Uint));
-
-
-
- ASSERT(length <= sizeof(atoms)/sizeof(Eterm));
- ASSERT(length <= sizeof(ints)/sizeof(Eterm));
-
- res = erts_bld_2tup_list(hpp, szp, length, atoms, ints);
-
- if (szp) {
- /* ... and then build the term */
- hp = HAlloc(BIF_P, sz);
-#ifdef DEBUG
- endp = hp + sz;
-#endif
-
- szp = NULL;
- hpp = &hp;
- goto build_elib_malloc_term;
- }
-
-#ifdef DEBUG
- ASSERT(endp == hp);
-#endif
-
-#else /* #ifdef ELIB_ALLOC_IS_CLIB */
- res = am_false;
-#endif /* #ifdef ELIB_ALLOC_IS_CLIB */
-
- BIF_RET(res);
+ /* To be removed in R15 */
+ BIF_RET(am_false);
}
else if (BIF_ARG_1 == am_os_version) {
int major, minor, build;
@@ -2253,6 +2235,15 @@ BIF_RETTYPE system_info_1(BIF_ALIST_1)
} else if (ERTS_IS_ATOM_STR("cpu_topology", BIF_ARG_1)) {
res = erts_get_cpu_topology_term(BIF_P, am_used);
BIF_TRAP1(erts_format_cpu_topology_trap, BIF_P, res);
+ } else if (ERTS_IS_ATOM_STR("update_cpu_info", BIF_ARG_1)) {
+ if (erts_update_cpu_info()) {
+ ERTS_DECL_AM(changed);
+ BIF_RET(AM_changed);
+ }
+ else {
+ ERTS_DECL_AM(unchanged);
+ BIF_RET(AM_unchanged);
+ }
#if defined(__GNUC__) && defined(HAVE_SOLARIS_SPARC_PERFMON)
} else if (ERTS_IS_ATOM_STR("ultrasparc_read_tick1", BIF_ARG_1)) {
register unsigned high asm("%l0");
@@ -2324,7 +2315,10 @@ BIF_RETTYPE system_info_1(BIF_ALIST_1)
}
/* Arguments that are unusual follow ... */
else if (ERTS_IS_ATOM_STR("logical_processors", BIF_ARG_1)) {
- int no = erts_get_cpu_configured(erts_cpuinfo);
+ int no;
+ erts_smp_rwmtx_rlock(&erts_cpu_bind_rwmtx);
+ no = erts_get_cpu_configured(erts_cpuinfo);
+ erts_smp_rwmtx_runlock(&erts_cpu_bind_rwmtx);
if (no > 0)
BIF_RET(make_small((Uint) no));
else {
@@ -2333,7 +2327,10 @@ BIF_RETTYPE system_info_1(BIF_ALIST_1)
}
}
else if (ERTS_IS_ATOM_STR("logical_processors_online", BIF_ARG_1)) {
- int no = erts_get_cpu_online(erts_cpuinfo);
+ int no;
+ erts_smp_rwmtx_rlock(&erts_cpu_bind_rwmtx);
+ no = erts_get_cpu_online(erts_cpuinfo);
+ erts_smp_rwmtx_runlock(&erts_cpu_bind_rwmtx);
if (no > 0)
BIF_RET(make_small((Uint) no));
else {
@@ -2342,7 +2339,10 @@ BIF_RETTYPE system_info_1(BIF_ALIST_1)
}
}
else if (ERTS_IS_ATOM_STR("logical_processors_available", BIF_ARG_1)) {
- int no = erts_get_cpu_available(erts_cpuinfo);
+ int no;
+ erts_smp_rwmtx_rlock(&erts_cpu_bind_rwmtx);
+ no = erts_get_cpu_available(erts_cpuinfo);
+ erts_smp_rwmtx_runlock(&erts_cpu_bind_rwmtx);
if (no > 0)
BIF_RET(make_small((Uint) no));
else {
@@ -2502,6 +2502,8 @@ BIF_RETTYPE system_info_1(BIF_ALIST_1)
BIF_RET(erts_sched_stat_term(BIF_P, 1));
} else if (ERTS_IS_ATOM_STR("taints", BIF_ARG_1)) {
BIF_RET(erts_nif_taints(BIF_P));
+ } else if (ERTS_IS_ATOM_STR("reader_groups_map", BIF_ARG_1)) {
+ BIF_RET(erts_get_reader_groups_map(BIF_P));
}
BIF_ERROR(BIF_P, BADARG);
@@ -2617,7 +2619,7 @@ BIF_RETTYPE port_info_2(BIF_ALIST_2)
hp = HAlloc(BIF_P, 3 + mic.sz);
res = NIL;
for (i = 0; i < mic.mi_i; i++) {
- item = STORE_NC(&hp, &MSO(BIF_P).externals, mic.mi[i].entity);
+ item = STORE_NC(&hp, &MSO(BIF_P), mic.mi[i].entity);
res = CONS(hp, item, res);
hp += 2;
}
@@ -2637,7 +2639,7 @@ BIF_RETTYPE port_info_2(BIF_ALIST_2)
res = NIL;
for (i = 0; i < mic.mi_i; i++) {
Eterm t;
- item = STORE_NC(&hp, &MSO(BIF_P).externals, mic.mi[i].entity);
+ item = STORE_NC(&hp, &MSO(BIF_P), mic.mi[i].entity);
t = TUPLE2(hp, am_process, item);
hp += 3;
res = CONS(hp, t, res);
@@ -2694,7 +2696,7 @@ BIF_RETTYPE port_info_2(BIF_ALIST_2)
erts_doforall_links(prt->nlinks, &one_link_size, &size);
for (bp = prt->bp; bp; bp = bp->next)
- size += sizeof(ErlHeapFragment) + (bp->size - 1)*sizeof(Eterm);
+ size += sizeof(ErlHeapFragment) + (bp->alloc_size - 1)*sizeof(Eterm);
if (prt->linebuf)
size += sizeof(LineBuf) + prt->linebuf->ovsiz;
@@ -2817,7 +2819,7 @@ fun_info_2(Process* p, Eterm fun, Eterm what)
goto error;
}
} else if (is_export(fun)) {
- Export* exp = (Export *) (export_val(fun))[1];
+ Export* exp = (Export *) ((UWord) (export_val(fun))[1]);
switch (what) {
case am_type:
hp = HAlloc(p, 3);
@@ -3010,11 +3012,11 @@ BIF_RETTYPE statistics_1(BIF_ALIST_1)
res = erts_run_queues_len(NULL);
BIF_RET(make_small(res));
} else if (BIF_ARG_1 == am_wall_clock) {
- Uint w1, w2;
+ UWord w1, w2;
Eterm b1, b2;
wall_clock_elapsed_time_both(&w1, &w2);
- b1 = erts_make_integer(w1,BIF_P);
- b2 = erts_make_integer(w2,BIF_P);
+ b1 = erts_make_integer((Uint) w1,BIF_P);
+ b2 = erts_make_integer((Uint) w2,BIF_P);
hp = HAlloc(BIF_P,3);
res = TUPLE2(hp, b1, b2);
BIF_RET(res);
@@ -3365,6 +3367,16 @@ BIF_RETTYPE erts_debug_get_internal_state_1(BIF_ALIST_1)
else if (ERTS_IS_ATOM_STR("fake_scheduler_bindings", tp[1])) {
return erts_fake_scheduler_bindings(BIF_P, tp[2]);
}
+ else if (ERTS_IS_ATOM_STR("reader_groups_map", tp[1])) {
+ Sint groups;
+ if (is_not_small(tp[2]))
+ BIF_ERROR(BIF_P, BADARG);
+ groups = signed_val(tp[2]);
+ if (groups < (Sint) 1 || groups > (Sint) INT_MAX)
+ BIF_ERROR(BIF_P, BADARG);
+
+ BIF_RET(erts_debug_reader_groups_map(BIF_P, (int) groups));
+ }
break;
}
default:
@@ -3546,6 +3558,17 @@ BIF_RETTYPE erts_debug_set_internal_state_2(BIF_ALIST_2)
}
}
}
+ else if (ERTS_IS_ATOM_STR("binary_loop_limit", BIF_ARG_1)) {
+ /* Used by binary_module_SUITE (stdlib) */
+ Uint max_loops;
+ if (is_atom(BIF_ARG_2) && ERTS_IS_ATOM_STR("default", BIF_ARG_2)) {
+ max_loops = erts_binary_set_loop_limit(-1);
+ BIF_RET(make_small(max_loops));
+ } else if (term_to_Uint(BIF_ARG_2, &max_loops) != 0) {
+ max_loops = erts_binary_set_loop_limit(max_loops);
+ BIF_RET(make_small(max_loops));
+ }
+ }
else if (ERTS_IS_ATOM_STR("re_loop_limit", BIF_ARG_1)) {
/* Used by re_SUITE (stdlib) */
Uint max_loops;
@@ -3658,8 +3681,8 @@ static Eterm lcnt_build_lock_stats_term(Eterm **hpp, Uint *szp, erts_lcnt_lock_s
* [{{file, line}, {tries, colls, {seconds, nanoseconds, n_blocks}}}]
*/
- ethr_atomic_read(&stats->tries, (long *)&tries);
- ethr_atomic_read(&stats->colls, (long *)&colls);
+ tries = (unsigned long) ethr_atomic_read(&stats->tries);
+ colls = (unsigned long) ethr_atomic_read(&stats->colls);
line = stats->line;
timer_s = stats->timer.s;
diff --git a/erts/emulator/beam/erl_bif_lists.c b/erts/emulator/beam/erl_bif_lists.c
index a9e8dd86f7..ce13469801 100644
--- a/erts/emulator/beam/erl_bif_lists.c
+++ b/erts/emulator/beam/erl_bif_lists.c
@@ -1,19 +1,19 @@
/*
* %CopyrightBegin%
- *
- * Copyright Ericsson AB 1999-2009. All Rights Reserved.
- *
+ *
+ * Copyright Ericsson AB 1999-2010. All Rights Reserved.
+ *
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
* compliance with the License. You should have received a copy of the
* Erlang Public License along with this software. If not, it can be
* retrieved online at http://www.erlang.org/.
- *
+ *
* Software distributed under the License is distributed on an "AS IS"
* basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
* the License for the specific language governing rights and limitations
* under the License.
- *
+ *
* %CopyrightEnd%
*/
@@ -89,13 +89,14 @@ BIF_RETTYPE append_2(BIF_ALIST_2)
BIF_RET(copy);
}
+#define SMALL_VEC_SIZE 10
BIF_RETTYPE subtract_2(BIF_ALIST_2)
{
Eterm list;
Eterm* hp;
Uint need;
Eterm res;
- Eterm small_vec[10]; /* Preallocated memory for small lists */
+ Eterm small_vec[SMALL_VEC_SIZE]; /* Preallocated memory for small lists */
Eterm* vec_p;
Eterm* vp;
int i;
@@ -115,7 +116,7 @@ BIF_RETTYPE subtract_2(BIF_ALIST_2)
BIF_RET(BIF_ARG_1);
/* allocate element vector */
- if (n <= sizeof(small_vec)/sizeof(small_vec[0]))
+ if (n <= SMALL_VEC_SIZE)
vec_p = small_vec;
else
vec_p = (Eterm*) erts_alloc(ERTS_ALC_T_TMP, n * sizeof(Eterm));
diff --git a/erts/emulator/beam/erl_bif_op.c b/erts/emulator/beam/erl_bif_op.c
index 6da72dcef9..deda7adc1f 100644
--- a/erts/emulator/beam/erl_bif_op.c
+++ b/erts/emulator/beam/erl_bif_op.c
@@ -1,19 +1,19 @@
/*
* %CopyrightBegin%
- *
- * Copyright Ericsson AB 1999-2009. All Rights Reserved.
- *
+ *
+ * Copyright Ericsson AB 1999-2010. All Rights Reserved.
+ *
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
* compliance with the License. You should have received a copy of the
* Erlang Public License along with this software. If not, it can be
* retrieved online at http://www.erlang.org/.
- *
+ *
* Software distributed under the License is distributed on an "AS IS"
* basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
* the License for the specific language governing rights and limitations
* under the License.
- *
+ *
* %CopyrightEnd%
*/
@@ -251,7 +251,7 @@ BIF_RETTYPE is_function_2(BIF_ALIST_2)
BIF_RET(am_true);
}
} else if (is_export(BIF_ARG_1)) {
- Export* exp = (Export *) (export_val(BIF_ARG_1))[1];
+ Export* exp = (Export *) EXPAND_POINTER((export_val(BIF_ARG_1))[1]);
if (exp->code[2] == (Uint) arity) {
BIF_RET(am_true);
diff --git a/erts/emulator/beam/erl_bif_port.c b/erts/emulator/beam/erl_bif_port.c
index f454f2e12d..378c5e73fd 100644
--- a/erts/emulator/beam/erl_bif_port.c
+++ b/erts/emulator/beam/erl_bif_port.c
@@ -1,19 +1,19 @@
/*
* %CopyrightBegin%
- *
- * Copyright Ericsson AB 2001-2009. All Rights Reserved.
- *
+ *
+ * Copyright Ericsson AB 2001-2010. All Rights Reserved.
+ *
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
* compliance with the License. You should have received a copy of the
* Erlang Public License along with this software. If not, it can be
* retrieved online at http://www.erlang.org/.
- *
+ *
* Software distributed under the License is distributed on an "AS IS"
* basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
* the License for the specific language governing rights and limitations
* under the License.
- *
+ *
* %CopyrightEnd%
*/
@@ -21,10 +21,6 @@
# include "config.h"
#endif
-#ifdef _OSE_
-# include "ose.h"
-#endif
-
#include <ctype.h>
#define ERTS_WANT_EXTERNAL_TAGS
@@ -583,8 +579,8 @@ BIF_RETTYPE port_get_data_1(BIF_ALIST_1)
if (prt->bp == NULL) { /* MUST be CONST! */
res = prt->data;
} else {
- Eterm* hp = HAlloc(BIF_P, prt->bp->size);
- res = copy_struct(prt->data, prt->bp->size, &hp, &MSO(BIF_P));
+ Eterm* hp = HAlloc(BIF_P, prt->bp->used_size);
+ res = copy_struct(prt->data, prt->bp->used_size, &hp, &MSO(BIF_P));
}
erts_smp_port_unlock(prt);
BIF_RET(res);
@@ -716,15 +712,17 @@ open_port(Process* p, Eterm name, Eterm settings, int *err_nump)
}
} else if (option == am_cd) {
Eterm iolist;
- Eterm heap[4];
+ DeclareTmpHeap(heap,4,p);
int r;
+ UseTmpHeap(4,p);
heap[0] = *tp;
heap[1] = make_list(heap+2);
heap[2] = make_small(0);
heap[3] = NIL;
iolist = make_list(heap);
r = io_list_to_buf(iolist, (char*) dir, MAXPATHLEN);
+ UnUseTmpHeap(4,p);
if (r < 0) {
goto badarg;
}
@@ -1077,27 +1075,33 @@ struct packet_callback_args
Eterm res; /* Out */
int string_as_bin; /* return strings as binaries (http_bin): */
byte* aligned_ptr;
+ Uint bin_sz;
Eterm orig;
Uint bin_offs;
byte bin_bitoffs;
};
+#define in_area(ptr,start,nbytes) \
+ ((unsigned long)((char*)(ptr) - (char*)(start)) < (nbytes))
+
static Eterm
http_bld_string(struct packet_callback_args* pca, Uint **hpp, Uint *szp,
const char *str, Sint len)
{
Eterm res = THE_NON_VALUE;
Uint size;
+ int make_subbin;
if (pca->string_as_bin) {
size = heap_bin_size(len);
-
+ make_subbin = (size > ERL_SUB_BIN_SIZE
+ && in_area(str, pca->aligned_ptr, pca->bin_sz));
if (szp) {
- *szp += (size > ERL_SUB_BIN_SIZE) ? ERL_SUB_BIN_SIZE : size;
+ *szp += make_subbin ? ERL_SUB_BIN_SIZE : size;
}
if (hpp) {
res = make_binary(*hpp);
- if (size > ERL_SUB_BIN_SIZE) {
+ if (make_subbin) {
ErlSubBin* bin = (ErlSubBin*) *hpp;
bin->thing_word = HEADER_SUB_BIN;
bin->size = len;
@@ -1328,7 +1332,7 @@ BIF_RETTYPE decode_packet_3(BIF_ALIST_3)
int packet_sz; /*-------Binaries involved: ------------------*/
byte* bin_ptr; /*| orig: original binary */
byte bin_bitsz; /*| bin: BIF_ARG_2, may be sub-binary of orig */
- Uint bin_sz; /*| packet: prefix of bin */
+ /*| packet: prefix of bin */
char* body_ptr; /*| body: part of packet to return */
int body_sz; /*| rest: bin without packet */
struct packet_callback_args pca;
@@ -1389,18 +1393,18 @@ BIF_RETTYPE decode_packet_3(BIF_ALIST_3)
}
- bin_sz = binary_size(BIF_ARG_2);
+ pca.bin_sz = binary_size(BIF_ARG_2);
ERTS_GET_BINARY_BYTES(BIF_ARG_2, bin_ptr, pca.bin_bitoffs, bin_bitsz);
if (pca.bin_bitoffs != 0) {
- pca.aligned_ptr = erts_alloc(ERTS_ALC_T_TMP, bin_sz);
- erts_copy_bits(bin_ptr, pca.bin_bitoffs, 1, pca.aligned_ptr, 0, 1, bin_sz*8);
+ pca.aligned_ptr = erts_alloc(ERTS_ALC_T_TMP, pca.bin_sz);
+ erts_copy_bits(bin_ptr, pca.bin_bitoffs, 1, pca.aligned_ptr, 0, 1, pca.bin_sz*8);
}
else {
pca.aligned_ptr = bin_ptr;
}
- packet_sz = packet_get_length(type, (char*)pca.aligned_ptr, bin_sz,
+ packet_sz = packet_get_length(type, (char*)pca.aligned_ptr, pca.bin_sz,
max_plen, trunc_len, &http_state);
- if (!(packet_sz > 0 && packet_sz <= bin_sz)) {
+ if (!(packet_sz > 0 && packet_sz <= pca.bin_sz)) {
if (packet_sz < 0) {
goto error;
}
@@ -1456,7 +1460,7 @@ error:
rest = (ErlSubBin *) hp;
rest->thing_word = HEADER_SUB_BIN;
- rest->size = bin_sz - packet_sz;
+ rest->size = pca.bin_sz - packet_sz;
rest->offs = pca.bin_offs + packet_sz;
rest->orig = pca.orig;
rest->bitoffs = pca.bin_bitoffs;
diff --git a/erts/emulator/beam/erl_bif_re.c b/erts/emulator/beam/erl_bif_re.c
index c027cd5984..d4a8a3aaa7 100644
--- a/erts/emulator/beam/erl_bif_re.c
+++ b/erts/emulator/beam/erl_bif_re.c
@@ -76,8 +76,8 @@ void erts_init_bif_re(void)
re_exec_trap_export.code[0] = am_erlang;
re_exec_trap_export.code[1] = am_re_run_trap;
re_exec_trap_export.code[2] = 3;
- re_exec_trap_export.code[3] = (Eterm) em_apply_bif;
- re_exec_trap_export.code[4] = (Eterm) &re_exec_trap;
+ re_exec_trap_export.code[3] = (BeamInstr) em_apply_bif;
+ re_exec_trap_export.code[4] = (BeamInstr) &re_exec_trap;
grun_trap_exportp = erts_export_put(am_re,am_grun,3);
urun_trap_exportp = erts_export_put(am_re,am_urun,3);
@@ -103,7 +103,7 @@ Sint erts_re_set_loop_limit(Sint limit)
static int term_to_int(Eterm term, int *sp)
{
-#ifdef ARCH_64
+#if defined(ARCH_64) && !HALFWORD_HEAP
if (is_small(term)) {
Uint x = signed_val(term);
@@ -154,7 +154,7 @@ static int term_to_int(Eterm term, int *sp)
static Eterm make_signed_integer(int x, Process *p)
{
-#ifdef ARCH_64
+#if defined(ARCH_64) && !HALFWORD_HEAP
return make_small(x);
#else
Eterm* hp;
diff --git a/erts/emulator/beam/erl_bif_timer.c b/erts/emulator/beam/erl_bif_timer.c
index 172bb37952..4ae2f6ebf4 100644
--- a/erts/emulator/beam/erl_bif_timer.c
+++ b/erts/emulator/beam/erl_bif_timer.c
@@ -1,19 +1,19 @@
/*
* %CopyrightBegin%
- *
- * Copyright Ericsson AB 2005-2009. All Rights Reserved.
- *
+ *
+ * Copyright Ericsson AB 2005-2010. All Rights Reserved.
+ *
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
* compliance with the License. You should have received a copy of the
* Erlang Public License along with this software. If not, it can be
* retrieved online at http://www.erlang.org/.
- *
+ *
* Software distributed under the License is distributed on an "AS IS"
* basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
* the License for the specific language governing rights and limitations
* under the License.
- *
+ *
* %CopyrightEnd%
*/
@@ -155,7 +155,7 @@ create_ref(Uint *hp, Uint32 *ref_numbers, Uint32 len)
erl_exit(1, "%s:%d: Internal error\n", __FILE__, __LINE__);
}
-#ifdef ARCH_64
+#if defined(ARCH_64) && !HALFWORD_HEAP
hp[0] = make_ref_thing_header(len/2 + 1);
datap = (Uint32 *) &hp[1];
*(datap++) = len;
@@ -173,13 +173,13 @@ create_ref(Uint *hp, Uint32 *ref_numbers, Uint32 len)
static int
eq_non_standard_ref_numbers(Uint32 *rn1, Uint32 len1, Uint32 *rn2, Uint32 len2)
{
-#ifdef ARCH_64
+#if defined(ARCH_64) && !HALFWORD_HEAP
#define MAX_REF_HEAP_SZ (1+(ERTS_MAX_REF_NUMBERS/2+1))
#else
#define MAX_REF_HEAP_SZ (1+ERTS_MAX_REF_NUMBERS)
#endif
- Uint r1_hp[MAX_REF_HEAP_SZ];
- Uint r2_hp[MAX_REF_HEAP_SZ];
+ DeclareTmpHeapNoproc(r1_hp,(MAX_REF_HEAP_SZ * 2));
+ Eterm *r2_hp = r1_hp +MAX_REF_HEAP_SZ;
return eq(create_ref(r1_hp, rn1, len1), create_ref(r2_hp, rn2, len2));
#undef MAX_REF_HEAP_SZ
@@ -357,7 +357,7 @@ bif_timer_timeout(ErtsBifTimer* btm)
rp,
&rp_locks);
} else {
- Eterm old_size = bp->size;
+ Eterm old_size = bp->used_size;
bp = erts_resize_message_buffer(bp, old_size + wrap_size,
&message, 1);
hp = &bp->mem[0] + old_size;
@@ -398,7 +398,7 @@ setup_bif_timer(Uint32 xflags,
if (!term_to_Uint(time, &timeout))
return THE_NON_VALUE;
-#ifdef ARCH_64
+#if defined(ARCH_64) && !HALFWORD_HEAP
if ((timeout >> 32) != 0)
return THE_NON_VALUE;
#endif
diff --git a/erts/emulator/beam/erl_bif_trace.c b/erts/emulator/beam/erl_bif_trace.c
index 7dff5e0eeb..0509e51a6f 100644
--- a/erts/emulator/beam/erl_bif_trace.c
+++ b/erts/emulator/beam/erl_bif_trace.c
@@ -1,19 +1,19 @@
/*
* %CopyrightBegin%
- *
- * Copyright Ericsson AB 1999-2009. All Rights Reserved.
- *
+ *
+ * Copyright Ericsson AB 1999-2010. All Rights Reserved.
+ *
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
* compliance with the License. You should have received a copy of the
* Erlang Public License along with this software. If not, it can be
* retrieved online at http://www.erlang.org/.
- *
+ *
* Software distributed under the License is distributed on an "AS IS"
* basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
* the License for the specific language governing rights and limitations
* under the License.
- *
+ *
* %CopyrightEnd%
*/
@@ -40,8 +40,7 @@
#define DECL_AM(S) Eterm AM_ ## S = am_atom_put(#S, sizeof(#S) - 1)
-static erts_smp_mtx_t trace_pattern_mutex;
-const struct trace_pattern_flags erts_trace_pattern_flags_off = {0, 0, 0, 0};
+const struct trace_pattern_flags erts_trace_pattern_flags_off = {0, 0, 0, 0, 0};
static int erts_default_trace_pattern_is_on;
static Binary *erts_default_match_spec;
static Binary *erts_default_meta_match_spec;
@@ -65,7 +64,6 @@ static void clear_trace_bif(int bif_index);
void
erts_bif_trace_init(void)
{
- erts_smp_mtx_init(&trace_pattern_mutex, "trace_pattern");
erts_default_trace_pattern_is_on = 0;
erts_default_match_spec = NULL;
erts_default_meta_match_spec = NULL;
@@ -86,7 +84,7 @@ trace_pattern_2(Process* p, Eterm MFA, Eterm Pattern)
Eterm
trace_pattern_3(Process* p, Eterm MFA, Eterm Pattern, Eterm flaglist)
{
- Eterm mfa[3];
+ DeclareTmpHeap(mfa,3,p); /* Not really heap here, but might be when setting pattern */
int i;
int matches = 0;
int specified = 0;
@@ -101,6 +99,7 @@ trace_pattern_3(Process* p, Eterm MFA, Eterm Pattern, Eterm flaglist)
erts_smp_proc_unlock(p, ERTS_PROC_LOCK_MAIN);
erts_smp_block_system(0);
+ UseTmpHeap(3,p);
/*
* Check and compile the match specification.
*/
@@ -185,6 +184,14 @@ trace_pattern_3(Process* p, Eterm MFA, Eterm Pattern, Eterm flaglist)
flags.breakpoint = 1;
flags.call_count = 1;
break;
+ case am_call_time:
+ if (is_global) {
+ goto error;
+ }
+ flags.breakpoint = 1;
+ flags.call_time = 1;
+ break;
+
default:
goto error;
}
@@ -194,8 +201,8 @@ trace_pattern_3(Process* p, Eterm MFA, Eterm Pattern, Eterm flaglist)
goto error;
}
- if (match_prog_set && !flags.local && !flags.meta && flags.call_count) {
- /* A match prog is not allowed with just call_count */
+ if (match_prog_set && !flags.local && !flags.meta && (flags.call_count || flags.call_time)) {
+ /* A match prog is not allowed with just call_count or call_time*/
goto error;
}
@@ -234,6 +241,8 @@ trace_pattern_3(Process* p, Eterm MFA, Eterm Pattern, Eterm flaglist)
|= flags.meta;
erts_default_trace_pattern_flags.call_count
|= (on == 1) ? flags.call_count : 0;
+ erts_default_trace_pattern_flags.call_time
+ |= (on == 1) ? flags.call_time : 0;
} else {
erts_default_trace_pattern_flags.local
&= ~flags.local;
@@ -241,10 +250,13 @@ trace_pattern_3(Process* p, Eterm MFA, Eterm Pattern, Eterm flaglist)
&= ~flags.meta;
erts_default_trace_pattern_flags.call_count
&= ~flags.call_count;
+ erts_default_trace_pattern_flags.call_time
+ &= ~flags.call_time;
if (! (erts_default_trace_pattern_flags.breakpoint =
erts_default_trace_pattern_flags.local |
erts_default_trace_pattern_flags.meta |
- erts_default_trace_pattern_flags.call_count)) {
+ erts_default_trace_pattern_flags.call_count |
+ erts_default_trace_pattern_flags.call_time)) {
erts_default_trace_pattern_is_on = !!on; /* i.e off */
}
}
@@ -266,8 +278,9 @@ trace_pattern_3(Process* p, Eterm MFA, Eterm Pattern, Eterm flaglist)
if (on) {
if (on != 1) {
flags.call_count = 0;
+ flags.call_time = 0;
}
- flags.breakpoint = flags.local | flags.meta | flags.call_count;
+ flags.breakpoint = flags.local | flags.meta | flags.call_count | flags.call_time;
erts_default_trace_pattern_flags = flags; /* Struct copy */
erts_default_trace_pattern_is_on = !!flags.breakpoint;
}
@@ -312,7 +325,7 @@ trace_pattern_3(Process* p, Eterm MFA, Eterm Pattern, Eterm flaglist)
MatchSetUnref(match_prog_set);
done:
-
+ UnUseTmpHeap(3,p);
erts_smp_release_system();
erts_smp_proc_lock(p, ERTS_PROC_LOCK_MAIN);
@@ -322,6 +335,7 @@ trace_pattern_3(Process* p, Eterm MFA, Eterm Pattern, Eterm flaglist)
MatchSetUnref(match_prog_set);
+ UnUseTmpHeap(3,p);
erts_smp_release_system();
erts_smp_proc_lock(p, ERTS_PROC_LOCK_MAIN);
BIF_ERROR(p, BADARG);
@@ -334,7 +348,6 @@ erts_get_default_trace_pattern(int *trace_pattern_is_on,
struct trace_pattern_flags *trace_pattern_flags,
Eterm *meta_tracer_pid)
{
- erts_smp_mtx_lock(&trace_pattern_mutex);
if (trace_pattern_is_on)
*trace_pattern_is_on = erts_default_trace_pattern_is_on;
if (match_spec)
@@ -345,12 +358,10 @@ erts_get_default_trace_pattern(int *trace_pattern_is_on,
*trace_pattern_flags = erts_default_trace_pattern_flags;
if (meta_tracer_pid)
*meta_tracer_pid = erts_default_meta_tracer_pid;
- erts_smp_mtx_unlock(&trace_pattern_mutex);
}
-
Uint
erts_trace_flag2bit(Eterm flag)
{
@@ -378,7 +389,7 @@ erts_trace_flag2bit(Eterm flag)
default: return 0;
}
}
-
+
/* Scan the argument list and sort out the trace flags.
**
** Returns !0 on success, 0 on failure.
@@ -929,6 +940,7 @@ trace_info_pid(Process* p, Eterm pid_spec, Eterm key)
#define FUNC_TRACE_LOCAL_TRACE (1<<2)
#define FUNC_TRACE_META_TRACE (1<<3)
#define FUNC_TRACE_COUNT_TRACE (1<<4)
+#define FUNC_TRACE_TIME_TRACE (1<<5)
/*
* Returns either FUNC_TRACE_NOEXIST, FUNC_TRACE_UNTRACED,
* FUNC_TRACE_GLOBAL_TRACE, or,
@@ -943,16 +955,18 @@ trace_info_pid(Process* p, Eterm pid_spec, Eterm key)
*
* If the return value contains FUNC_TRACE_COUNT_TRACE, *count is set.
*/
-static int function_is_traced(Eterm mfa[3],
- Binary **ms, /* out */
- Binary **ms_meta, /* out */
+static int function_is_traced(Process *p,
+ Eterm mfa[3],
+ Binary **ms, /* out */
+ Binary **ms_meta, /* out */
Eterm *tracer_pid_meta, /* out */
- Sint *count) /* out */
+ Sint *count, /* out */
+ Eterm *call_time) /* out */
{
Export e;
Export* ep;
int i;
- Uint *code;
+ BeamInstr *code;
/* First look for an export entry */
e.code[0] = mfa[0];
@@ -960,12 +974,12 @@ static int function_is_traced(Eterm mfa[3],
e.code[2] = mfa[2];
if ((ep = export_get(&e)) != NULL) {
if (ep->address == ep->code+3 &&
- ep->code[3] != (Uint) em_call_error_handler) {
- if (ep->code[3] == (Uint) em_call_traced_function) {
+ ep->code[3] != (BeamInstr) em_call_error_handler) {
+ if (ep->code[3] == (BeamInstr) em_call_traced_function) {
*ms = ep->match_prog_set;
return FUNC_TRACE_GLOBAL_TRACE;
}
- if (ep->code[3] == (Uint) em_apply_bif) {
+ if (ep->code[3] == (BeamInstr) em_apply_bif) {
for (i = 0; i < BIF_SIZE; ++i) {
if (bif_export[i] == ep) {
int r = 0;
@@ -978,10 +992,13 @@ static int function_is_traced(Eterm mfa[3],
r |= FUNC_TRACE_LOCAL_TRACE;
*ms = ep->match_prog_set;
}
- if (erts_is_mtrace_bif(ep->code+3, ms_meta,
+ if (erts_is_mtrace_break(ep->code+3, ms_meta,
tracer_pid_meta)) {
r |= FUNC_TRACE_META_TRACE;
}
+ if (erts_is_time_break(p, ep->code+3, call_time)) {
+ r |= FUNC_TRACE_TIME_TRACE;
+ }
}
return r ? r : FUNC_TRACE_UNTRACED;
}
@@ -999,7 +1016,9 @@ static int function_is_traced(Eterm mfa[3],
| (erts_is_mtrace_break(code, ms_meta, tracer_pid_meta)
? FUNC_TRACE_META_TRACE : 0)
| (erts_is_count_break(code, count)
- ? FUNC_TRACE_COUNT_TRACE : 0);
+ ? FUNC_TRACE_COUNT_TRACE : 0)
+ | (erts_is_time_break(p, code, call_time)
+ ? FUNC_TRACE_TIME_TRACE : 0);
return r ? r : FUNC_TRACE_UNTRACED;
}
@@ -1011,15 +1030,19 @@ trace_info_func(Process* p, Eterm func_spec, Eterm key)
{
Eterm* tp;
Eterm* hp;
- Eterm mfa[3];
+ DeclareTmpHeap(mfa,3,p); /* Not really heap here, but might be when setting pattern */
Binary *ms = NULL, *ms_meta = NULL;
Sint count = 0;
Eterm traced = am_false;
Eterm match_spec = am_false;
Eterm retval = am_false;
Eterm meta = am_false;
+ Eterm call_time = NIL;
int r;
+
+ UseTmpHeap(3,p);
+
if (!is_tuple(func_spec)) {
goto error;
}
@@ -1034,12 +1057,29 @@ trace_info_func(Process* p, Eterm func_spec, Eterm key)
mfa[1] = tp[2];
mfa[2] = signed_val(tp[3]);
- r = function_is_traced(mfa, &ms, &ms_meta, &meta, &count);
+#ifdef ERTS_SMP
+ if ( (key == am_call_time) || (key == am_all)) {
+ erts_smp_proc_unlock(p, ERTS_PROC_LOCK_MAIN);
+ erts_smp_block_system(0);
+ }
+#endif
+
+ r = function_is_traced(p, mfa, &ms, &ms_meta, &meta, &count, &call_time);
+
+#ifdef ERTS_SMP
+ if ( (key == am_call_time) || (key == am_all)) {
+ erts_smp_release_system();
+ erts_smp_proc_lock(p, ERTS_PROC_LOCK_MAIN);
+ }
+#endif
+
switch (r) {
case FUNC_TRACE_NOEXIST:
+ UnUseTmpHeap(3,p);
hp = HAlloc(p, 3);
return TUPLE2(hp, key, am_undefined);
case FUNC_TRACE_UNTRACED:
+ UnUseTmpHeap(3,p);
hp = HAlloc(p, 3);
return TUPLE2(hp, key, am_false);
case FUNC_TRACE_GLOBAL_TRACE:
@@ -1085,8 +1125,13 @@ trace_info_func(Process* p, Eterm func_spec, Eterm key)
erts_make_integer(count, p);
}
break;
+ case am_call_time:
+ if (r & FUNC_TRACE_TIME_TRACE) {
+ retval = call_time;
+ }
+ break;
case am_all: {
- Eterm match_spec_meta = am_false, c = am_false, t;
+ Eterm match_spec_meta = am_false, c = am_false, t, ct = am_false;
if (ms) {
match_spec = MatchSetGetSource(ms);
@@ -1104,10 +1149,15 @@ trace_info_func(Process* p, Eterm func_spec, Eterm key)
erts_make_integer(-count-1, p) :
erts_make_integer(count, p);
}
- hp = HAlloc(p, (3+2)*5);
+ if (r & FUNC_TRACE_TIME_TRACE) {
+ ct = call_time;
+ }
+ hp = HAlloc(p, (3+2)*6);
retval = NIL;
t = TUPLE2(hp, am_call_count, c); hp += 3;
retval = CONS(hp, t, retval); hp += 2;
+ t = TUPLE2(hp, am_call_time, ct); hp += 3;
+ retval = CONS(hp, t, retval); hp += 2;
t = TUPLE2(hp, am_meta_match_spec, match_spec_meta); hp += 3;
retval = CONS(hp, t, retval); hp += 2;
t = TUPLE2(hp, am_meta, meta); hp += 3;
@@ -1120,10 +1170,12 @@ trace_info_func(Process* p, Eterm func_spec, Eterm key)
default:
goto error;
}
+ UnUseTmpHeap(3,p);
hp = HAlloc(p, 3);
return TUPLE2(hp, key, retval);
error:
+ UnUseTmpHeap(3,p);
BIF_ERROR(p, BADARG);
}
@@ -1201,6 +1253,13 @@ trace_info_on_load(Process* p, Eterm key)
} else {
return TUPLE2(hp, key, am_false);
}
+ case am_call_time:
+ hp = HAlloc(p, 3);
+ if (erts_default_trace_pattern_flags.call_time) {
+ return TUPLE2(hp, key, am_true);
+ } else {
+ return TUPLE2(hp, key, am_false);
+ }
case am_all:
{
Eterm match_spec = am_false, meta_match_spec = am_false, r = NIL, t;
@@ -1275,6 +1334,7 @@ erts_set_trace_pattern(Eterm* mfa, int specified,
for (j = 0; j < specified && mfa[j] == ep->code[j]; j++) {
/* Empty loop body */
}
+
if (j == specified) {
if (on) {
if (! flags.breakpoint)
@@ -1312,7 +1372,7 @@ erts_set_trace_pattern(Eterm* mfa, int specified,
if (erts_bif_trace_flags[i] & BIF_TRACE_AS_META) {
ASSERT(ExportIsBuiltIn(bif_export[i]));
erts_clear_mtrace_bif
- ((Uint *)bif_export[i]->code + 3);
+ ((BeamInstr *)bif_export[i]->code + 3);
erts_bif_trace_flags[i] &= ~BIF_TRACE_AS_META;
}
set_trace_bif(i, match_prog_set);
@@ -1341,12 +1401,18 @@ erts_set_trace_pattern(Eterm* mfa, int specified,
}
if (flags.meta) {
erts_set_mtrace_bif
- ((Uint *)bif_export[i]->code + 3,
+ ((BeamInstr *)bif_export[i]->code + 3,
meta_match_prog_set, meta_tracer_pid);
erts_bif_trace_flags[i] |= BIF_TRACE_AS_META;
erts_bif_trace_flags[i] &= ~BIF_TRACE_AS_GLOBAL;
m = 1;
}
+ if (flags.call_time) {
+ erts_set_time_trace_bif(bif_export[i]->code + 3, on);
+ /* I don't want to remove any other tracers */
+ erts_bif_trace_flags[i] |= BIF_TRACE_AS_CALL_TIME;
+ m = 1;
+ }
if (erts_bif_trace_flags[i]) {
setup_bif_trace(i);
}
@@ -1361,11 +1427,16 @@ erts_set_trace_pattern(Eterm* mfa, int specified,
if (flags.meta) {
if (erts_bif_trace_flags[i] & BIF_TRACE_AS_META) {
erts_clear_mtrace_bif
- ((Uint *)bif_export[i]->code + 3);
+ ((BeamInstr *)bif_export[i]->code + 3);
erts_bif_trace_flags[i] &= ~BIF_TRACE_AS_META;
}
m = 1;
}
+ if (flags.call_time) {
+ erts_clear_time_trace_bif(bif_export[i]->code + 3);
+ erts_bif_trace_flags[i] &= ~BIF_TRACE_AS_CALL_TIME;
+ m = 1;
+ }
if (! erts_bif_trace_flags[i]) {
reset_bif_trace(i);
}
@@ -1383,6 +1454,7 @@ erts_set_trace_pattern(Eterm* mfa, int specified,
erts_clear_trace_break(mfa, specified);
erts_clear_mtrace_break(mfa, specified);
erts_clear_count_break(mfa, specified);
+ erts_clear_time_break(mfa, specified);
} else {
int m = 0;
if (flags.local) {
@@ -1396,6 +1468,9 @@ erts_set_trace_pattern(Eterm* mfa, int specified,
if (flags.call_count) {
m = erts_set_count_break(mfa, specified, on);
}
+ if (flags.call_time) {
+ m = erts_set_time_break(mfa, specified, on);
+ }
/* All assignments to 'm' above should give the same value,
* so just use the last */
matches += m;
@@ -1411,6 +1486,9 @@ erts_set_trace_pattern(Eterm* mfa, int specified,
if (flags.call_count) {
m = erts_clear_count_break(mfa, specified);
}
+ if (flags.call_time) {
+ m = erts_clear_time_break(mfa, specified);
+ }
/* All assignments to 'm' above should give the same value,
* so just use the last */
matches += m;
@@ -1430,9 +1508,9 @@ static int
setup_func_trace(Export* ep, void* match_prog)
{
if (ep->address == ep->code+3) {
- if (ep->code[3] == (Uint) em_call_error_handler) {
+ if (ep->code[3] == (BeamInstr) em_call_error_handler) {
return 0;
- } else if (ep->code[3] == (Uint) em_call_traced_function) {
+ } else if (ep->code[3] == (BeamInstr) em_call_traced_function) {
MatchSetUnref(ep->match_prog_set);
ep->match_prog_set = match_prog;
MatchSetRef(ep->match_prog_set);
@@ -1452,8 +1530,8 @@ setup_func_trace(Export* ep, void* match_prog)
return 0;
}
- ep->code[3] = (Uint) em_call_traced_function;
- ep->code[4] = (Uint) ep->address;
+ ep->code[3] = (BeamInstr) em_call_traced_function;
+ ep->code[4] = (BeamInstr) ep->address;
ep->address = ep->code+3;
ep->match_prog_set = match_prog;
MatchSetRef(ep->match_prog_set);
@@ -1465,7 +1543,7 @@ static void setup_bif_trace(int bif_index) {
ASSERT(ExportIsBuiltIn(ep));
ASSERT(ep->code[4]);
- ep->code[4] = (Uint) bif_table[bif_index].traced;
+ ep->code[4] = (BeamInstr) bif_table[bif_index].traced;
}
static void set_trace_bif(int bif_index, void* match_prog) {
@@ -1492,9 +1570,9 @@ static int
reset_func_trace(Export* ep)
{
if (ep->address == ep->code+3) {
- if (ep->code[3] == (Uint) em_call_error_handler) {
+ if (ep->code[3] == (BeamInstr) em_call_error_handler) {
return 0;
- } else if (ep->code[3] == (Uint) em_call_traced_function) {
+ } else if (ep->code[3] == (BeamInstr) em_call_traced_function) {
ep->address = (Uint *) ep->code[4];
MatchSetUnref(ep->match_prog_set);
ep->match_prog_set = NULL;
@@ -1527,8 +1605,8 @@ static void reset_bif_trace(int bif_index) {
ASSERT(ExportIsBuiltIn(ep));
ASSERT(ep->code[4]);
ASSERT(! ep->match_prog_set);
- ASSERT(! erts_is_mtrace_bif((Uint *)ep->code+3, NULL, NULL));
- ep->code[4] = (Uint) bif_table[bif_index].f;
+ ASSERT(! erts_is_mtrace_break((BeamInstr *)ep->code+3, NULL, NULL));
+ ep->code[4] = (BeamInstr) bif_table[bif_index].f;
}
static void clear_trace_bif(int bif_index) {
@@ -2083,7 +2161,7 @@ trace_delivered_1(BIF_ALIST_1)
#ifdef ERTS_SMP
bp = new_message_buffer(REF_THING_SIZE + 4);
hp = &bp->mem[0];
- msg_ref = STORE_NC(&hp, &bp->off_heap.externals, ref);
+ msg_ref = STORE_NC(&hp, &bp->off_heap, ref);
#else
hp = HAlloc(BIF_P, 4);
msg_ref = ref;
diff --git a/erts/emulator/beam/erl_binary.h b/erts/emulator/beam/erl_binary.h
index 1f948a9684..a569fe2e85 100644
--- a/erts/emulator/beam/erl_binary.h
+++ b/erts/emulator/beam/erl_binary.h
@@ -21,6 +21,7 @@
#define __ERL_BINARY_H
#include "erl_threads.h"
+#include "bif.h"
/*
* Maximum number of bytes to place in a heap binary.
@@ -150,7 +151,16 @@ do { \
void erts_init_binary(void);
-byte* erts_get_aligned_binary_bytes_extra(Eterm, byte**, unsigned extra);
+byte* erts_get_aligned_binary_bytes_extra(Eterm, byte**, ErtsAlcType_t, unsigned extra);
+
+/*
+ * Common implementation for erlang:list_to_binary/1 and binary:list_to_bin/1
+ */
+
+BIF_RETTYPE erts_list_to_binary_bif(Process *p, Eterm arg);
+BIF_RETTYPE erts_gc_binary_part(Process *p, Eterm *reg, Eterm live, int range_is_tuple);
+BIF_RETTYPE erts_binary_part(Process *p, Eterm binary, Eterm epos, Eterm elen);
+
#if defined(__i386__) || !defined(__GNUC__)
/*
@@ -164,10 +174,11 @@ byte* erts_get_aligned_binary_bytes_extra(Eterm, byte**, unsigned extra);
#endif
#define ERTS_CHK_BIN_ALIGNMENT(B) \
- do { ASSERT(!(B) || (((Uint) &((Binary *)(B))->orig_bytes[0]) & ERTS_BIN_ALIGNMENT_MASK) == ((Uint) 0)) } while(0)
+ do { ASSERT(!(B) || (((UWord) &((Binary *)(B))->orig_bytes[0]) & ERTS_BIN_ALIGNMENT_MASK) == ((UWord) 0)) } while(0)
ERTS_GLB_INLINE byte* erts_get_aligned_binary_bytes(Eterm bin, byte** base_ptr);
ERTS_GLB_INLINE void erts_free_aligned_binary_bytes(byte* buf);
+ERTS_GLB_INLINE void erts_free_aligned_binary_bytes_extra(byte* buf, ErtsAlcType_t);
ERTS_GLB_INLINE Binary *erts_bin_drv_alloc_fnf(Uint size);
ERTS_GLB_INLINE Binary *erts_bin_drv_alloc(Uint size);
ERTS_GLB_INLINE Binary *erts_bin_nrml_alloc(Uint size);
@@ -184,17 +195,23 @@ ERTS_GLB_INLINE Binary *erts_create_magic_binary(Uint size,
ERTS_GLB_INLINE byte*
erts_get_aligned_binary_bytes(Eterm bin, byte** base_ptr)
{
- return erts_get_aligned_binary_bytes_extra(bin, base_ptr, 0);
+ return erts_get_aligned_binary_bytes_extra(bin, base_ptr, ERTS_ALC_T_TMP, 0);
}
ERTS_GLB_INLINE void
-erts_free_aligned_binary_bytes(byte* buf)
+erts_free_aligned_binary_bytes_extra(byte* buf, ErtsAlcType_t allocator)
{
if (buf) {
- erts_free(ERTS_ALC_T_TMP, (void *) buf);
+ erts_free(allocator, (void *) buf);
}
}
+ERTS_GLB_INLINE void
+erts_free_aligned_binary_bytes(byte* buf)
+{
+ erts_free_aligned_binary_bytes_extra(buf,ERTS_ALC_T_TMP);
+}
+
/* Explicit extra bytes allocated to counter buggy drivers.
** These extra bytes where earlier (< R13B04) added by an alignment-bug
** in this code. Do we dare remove this in some major release (R14?) maybe?
diff --git a/erts/emulator/beam/erl_bits.c b/erts/emulator/beam/erl_bits.c
index e4f5d50ddf..88d2c06246 100644
--- a/erts/emulator/beam/erl_bits.c
+++ b/erts/emulator/beam/erl_bits.c
@@ -1,19 +1,19 @@
/*
* %CopyrightBegin%
- *
- * Copyright Ericsson AB 1999-2009. All Rights Reserved.
- *
+ *
+ * Copyright Ericsson AB 1999-2010. All Rights Reserved.
+ *
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
* compliance with the License. You should have received a copy of the
* Erlang Public License along with this software. If not, it can be
* retrieved online at http://www.erlang.org/.
- *
+ *
* Software distributed under the License is distributed on an "AS IS"
* basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
* the License for the specific language governing rights and limitations
* under the License.
- *
+ *
* %CopyrightEnd%
*/
@@ -255,7 +255,7 @@ erts_bs_get_integer_2(Process *p, Uint num_bits, unsigned flags, ErlBinMatchBuff
* Simply shift whole bytes into the result.
*/
switch (BYTE_OFFSET(n)) {
-#ifdef ARCH_64
+#if defined(ARCH_64) && !HALFWORD_HEAP
case 7: w = (w << 8) | *bp++;
case 6: w = (w << 8) | *bp++;
case 5: w = (w << 8) | *bp++;
@@ -360,7 +360,7 @@ erts_bs_get_integer_2(Process *p, Uint num_bits, unsigned flags, ErlBinMatchBuff
case 3:
v32 = LSB[0] + (LSB[1]<<8) + (LSB[2]<<16);
goto big_small;
-#if !defined(ARCH_64)
+#if !defined(ARCH_64) || HALFWORD_HEAP
case 4:
v32 = (LSB[0] + (LSB[1]<<8) + (LSB[2]<<16) + (LSB[3]<<24));
if (!IS_USMALL(sgn, v32)) {
@@ -1335,12 +1335,12 @@ erts_bs_append(Process* c_p, Eterm* reg, Uint live, Eterm build_size_term,
hp += PROC_BIN_SIZE;
pb->thing_word = HEADER_PROC_BIN;
pb->size = used_size_in_bytes;
- pb->next = MSO(c_p).mso;
- MSO(c_p).mso = pb;
+ pb->next = MSO(c_p).first;
+ MSO(c_p).first = (struct erl_off_heap_header*)pb;
pb->val = bptr;
pb->bytes = (byte*) bptr->orig_bytes;
pb->flags = PB_IS_WRITABLE | PB_ACTIVE_WRITER;
- MSO(c_p).overhead += pb->size / sizeof(Eterm);
+ OH_OVERHEAD(&(MSO(c_p)), pb->size / sizeof(Eterm));
/*
* Now allocate the sub binary and set its size to include the
@@ -1506,12 +1506,12 @@ erts_bs_init_writable(Process* p, Eterm sz)
hp += PROC_BIN_SIZE;
pb->thing_word = HEADER_PROC_BIN;
pb->size = 0;
- pb->next = MSO(p).mso;
- MSO(p).mso = pb;
+ pb->next = MSO(p).first;
+ MSO(p).first = (struct erl_off_heap_header*) pb;
pb->val = bptr;
pb->bytes = (byte*) bptr->orig_bytes;
pb->flags = PB_IS_WRITABLE | PB_ACTIVE_WRITER;
- MSO(p).overhead += pb->size / sizeof(Eterm);
+ OH_OVERHEAD(&(MSO(p)), pb->size / sizeof(Eterm));
/*
* Now allocate the sub binary.
diff --git a/erts/emulator/beam/erl_bits.h b/erts/emulator/beam/erl_bits.h
index e3f8e0b679..0f67733fa4 100644
--- a/erts/emulator/beam/erl_bits.h
+++ b/erts/emulator/beam/erl_bits.h
@@ -1,19 +1,19 @@
/*
* %CopyrightBegin%
- *
- * Copyright Ericsson AB 1999-2009. All Rights Reserved.
- *
+ *
+ * Copyright Ericsson AB 1999-2010. All Rights Reserved.
+ *
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
* compliance with the License. You should have received a copy of the
* Erlang Public License along with this software. If not, it can be
* retrieved online at http://www.erlang.org/.
- *
+ *
* Software distributed under the License is distributed on an "AS IS"
* basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
* the License for the specific language governing rights and limitations
* under the License.
- *
+ *
* %CopyrightEnd%
*/
@@ -63,7 +63,7 @@ typedef struct erl_bin_match_struct{
#define HEADER_NUM_SLOTS(hdr) (header_arity(hdr)-sizeof(ErlBinMatchState)/sizeof(Eterm)+1)
#define make_matchstate(_Ms) make_boxed((Eterm*)(_Ms))
-#define ms_matchbuffer(_Ms) &(((ErlBinMatchState*)(_Ms - TAG_PRIMARY_BOXED))->mb)
+#define ms_matchbuffer(_Ms) &(((ErlBinMatchState*) boxed_val(_Ms))->mb)
#if defined(ERTS_SMP)
diff --git a/erts/emulator/beam/erl_db.c b/erts/emulator/beam/erl_db.c
index 15b1c6bb56..b0369a402b 100644
--- a/erts/emulator/beam/erl_db.c
+++ b/erts/emulator/beam/erl_db.c
@@ -78,16 +78,24 @@ enum DbIterSafety {
** The main meta table, containing all ets tables.
*/
#ifdef ERTS_SMP
-# define META_MAIN_TAB_LOCK_CNT 16
-static union {
- erts_smp_spinlock_t lck;
- byte _cache_line_alignment[64];
-}meta_main_tab_locks[META_MAIN_TAB_LOCK_CNT];
+
+#define ERTS_META_MAIN_TAB_LOCK_TAB_BITS 8
+#define ERTS_META_MAIN_TAB_LOCK_TAB_SIZE (1 << ERTS_META_MAIN_TAB_LOCK_TAB_BITS)
+#define ERTS_META_MAIN_TAB_LOCK_TAB_MASK (ERTS_META_MAIN_TAB_LOCK_TAB_SIZE - 1)
+
+typedef union {
+ erts_smp_rwmtx_t rwmtx;
+ byte cache_line_align__[ERTS_ALC_CACHE_LINE_ALIGN_SIZE(
+ sizeof(erts_smp_rwmtx_t))];
+} erts_meta_main_tab_lock_t;
+
+static erts_meta_main_tab_lock_t *meta_main_tab_locks;
+
#endif
static struct {
union {
DbTable *tb; /* Only directly readable if slot is ALIVE */
- Uint next_free; /* (index<<2)|1 if slot is FREE */
+ UWord next_free; /* (index<<2)|1 if slot is FREE */
}u;
} *meta_main_tab;
@@ -104,17 +112,13 @@ static struct {
#define MARK_SLOT_DEAD(i) (meta_main_tab[(i)].u.next_free |= 2)
#define GET_ANY_SLOT_TAB(i) ((DbTable*)(meta_main_tab[(i)].u.next_free & ~(1|2))) /* dead or alive */
-static ERTS_INLINE void meta_main_tab_lock(unsigned slot)
-{
-#ifdef ERTS_SMP
- erts_smp_spin_lock(&meta_main_tab_locks[slot % META_MAIN_TAB_LOCK_CNT].lck);
-#endif
-}
-
-static ERTS_INLINE void meta_main_tab_unlock(unsigned slot)
+static ERTS_INLINE erts_smp_rwmtx_t *
+get_meta_main_tab_lock(unsigned slot)
{
#ifdef ERTS_SMP
- erts_smp_spin_unlock(&meta_main_tab_locks[slot % META_MAIN_TAB_LOCK_CNT].lck);
+ return &meta_main_tab_locks[slot & ERTS_META_MAIN_TAB_LOCK_TAB_MASK].rwmtx;
+#else
+ return NULL;
#endif
}
@@ -166,7 +170,8 @@ struct meta_name_tab_entry* meta_name_tab_bucket(Eterm name,
typedef enum {
LCK_READ=1, /* read only access */
LCK_WRITE=2, /* exclusive table write access */
- LCK_WRITE_REC=3 /* record write access */
+ LCK_WRITE_REC=3, /* record write access */
+ LCK_NONE=4
} db_lock_kind_t;
extern DbTableMethod db_hash;
@@ -187,7 +192,7 @@ static Eterm ms_delete_all_buff[8]; /* To compare with for deletion
static void fix_table_locked(Process* p, DbTable* tb);
static void unfix_table_locked(Process* p, DbTable* tb, db_lock_kind_t* kind);
-static void set_heir(Process* me, DbTable* tb, Eterm heir, Eterm heir_data);
+static void set_heir(Process* me, DbTable* tb, Eterm heir, UWord heir_data);
static void free_heir_data(DbTable*);
static void free_fixations_locked(DbTable *tb);
@@ -214,17 +219,17 @@ Export ets_select_continue_exp;
*/
static Export ets_delete_continue_exp;
-static ERTS_INLINE DbTable* db_ref(DbTable* tb)
+static ERTS_INLINE DbTable* db_ref(DbTable* tb, db_lock_kind_t kind)
{
- if (tb != NULL) {
+ if (tb != NULL && kind != LCK_READ) {
erts_refc_inc(&tb->common.ref, 2);
}
return tb;
}
-static ERTS_INLINE DbTable* db_unref(DbTable* tb)
+static ERTS_INLINE DbTable* db_unref(DbTable* tb, db_lock_kind_t kind)
{
- if (!erts_refc_dectest(&tb->common.ref, 0)) {
+ if (kind != LCK_READ && !erts_refc_dectest(&tb->common.ref, 0)) {
#ifdef HARDDEBUG
if (erts_smp_atomic_read(&tb->common.memory_size) != sizeof(DbTable)) {
erts_fprintf(stderr, "ets: db_unref memory remain=%ld fix=%x\n",
@@ -256,12 +261,19 @@ static ERTS_INLINE DbTable* db_unref(DbTable* tb)
return tb;
}
-static ERTS_INLINE void db_init_lock(DbTable* tb, char *rwname, char* fixname)
+static ERTS_INLINE void db_init_lock(DbTable* tb, int use_frequent_read_lock,
+ char *rwname, char* fixname)
{
+#ifdef ERTS_SMP
+ erts_smp_rwmtx_opt_t rwmtx_opt = ERTS_SMP_THR_OPTS_DEFAULT_INITER;
+ if (use_frequent_read_lock)
+ rwmtx_opt.type = ERTS_SMP_RWMTX_TYPE_FREQUENT_READ;
+#endif
erts_refc_init(&tb->common.ref, 1);
erts_refc_init(&tb->common.fixref, 0);
#ifdef ERTS_SMP
- erts_smp_rwmtx_init_x(&tb->common.rwlock, rwname, tb->common.the_name);
+ erts_smp_rwmtx_init_opt_x(&tb->common.rwlock, &rwmtx_opt,
+ rwname, tb->common.the_name);
erts_smp_mtx_init_x(&tb->common.fixlock, fixname, tb->common.the_name);
tb->common.is_thread_safe = !(tb->common.status & DB_FINE_LOCKED);
#endif
@@ -297,7 +309,7 @@ static ERTS_INLINE void db_lock_take_over_ref(DbTable* tb, db_lock_kind_t kind)
static ERTS_INLINE void db_lock(DbTable* tb, db_lock_kind_t kind)
{
- (void) db_ref(tb);
+ (void) db_ref(tb, kind);
#ifdef ERTS_SMP
db_lock_take_over_ref(tb, kind);
#endif
@@ -331,7 +343,7 @@ static ERTS_INLINE void db_unlock(DbTable* tb, db_lock_kind_t kind)
}
}
#endif
- (void) db_unref(tb); /* May delete table... */
+ (void) db_unref(tb, kind); /* May delete table... */
}
@@ -349,32 +361,49 @@ static ERTS_INLINE void db_meta_unlock(DbTable* tb, db_lock_kind_t kind)
}
static ERTS_INLINE
-DbTable* db_get_table(Process *p,
- Eterm id,
- int what,
- db_lock_kind_t kind)
+DbTable* db_get_table_aux(Process *p,
+ Eterm id,
+ int what,
+ db_lock_kind_t kind,
+ int meta_already_locked)
{
DbTable *tb = NULL;
+ erts_smp_rwmtx_t *mtl = NULL;
if (is_small(id)) {
Uint slot = unsigned_val(id) & meta_main_tab_slot_mask;
- meta_main_tab_lock(slot);
+ if (!meta_already_locked) {
+ mtl = get_meta_main_tab_lock(slot);
+ erts_smp_rwmtx_rlock(mtl);
+ }
+#if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_CHECK)
+ else {
+ erts_smp_rwmtx_t *test_mtl = get_meta_main_tab_lock(slot);
+ ERTS_SMP_LC_ASSERT(erts_lc_rwmtx_is_rlocked(test_mtl)
+ || erts_lc_rwmtx_is_rwlocked(test_mtl));
+ }
+#endif
if (slot < db_max_tabs && IS_SLOT_ALIVE(slot)) {
/* SMP: inc to prevent race, between unlock of meta_main_tab_lock
* and the table locking outside the meta_main_tab_lock
*/
- tb = db_ref(meta_main_tab[slot].u.tb);
+ tb = db_ref(meta_main_tab[slot].u.tb, kind);
}
- meta_main_tab_unlock(slot);
}
else if (is_atom(id)) {
- erts_smp_rwmtx_t* rwlock;
- struct meta_name_tab_entry* bucket = meta_name_tab_bucket(id,&rwlock);
- erts_smp_rwmtx_rlock(rwlock);
+ struct meta_name_tab_entry* bucket = meta_name_tab_bucket(id,&mtl);
+ if (!meta_already_locked)
+ erts_smp_rwmtx_rlock(mtl);
+ else{
+ ERTS_SMP_LC_ASSERT(erts_lc_rwmtx_is_rlocked(mtl)
+ || erts_lc_rwmtx_is_rwlocked(mtl));
+ mtl = NULL;
+ }
+
if (bucket->pu.tb != NULL) {
if (is_atom(bucket->u.name_atom)) { /* single */
if (bucket->u.name_atom == id) {
- tb = db_ref(bucket->pu.tb);
+ tb = db_ref(bucket->pu.tb, kind);
}
}
else { /* multi */
@@ -382,23 +411,33 @@ DbTable* db_get_table(Process *p,
Uint i;
for (i=0; i<cnt; i++) {
if (bucket->pu.mvec[i].u.name_atom == id) {
- tb = db_ref(bucket->pu.mvec[i].pu.tb);
+ tb = db_ref(bucket->pu.mvec[i].pu.tb, kind);
break;
}
}
}
}
- erts_smp_rwmtx_runlock(rwlock);
}
if (tb) {
db_lock_take_over_ref(tb, kind);
- if (tb->common.id == id && ((tb->common.status & what) != 0 ||
- p->id == tb->common.owner)) {
- return tb;
+ if (tb->common.id != id
+ || ((tb->common.status & what) == 0 && p->id != tb->common.owner)) {
+ db_unlock(tb, kind);
+ tb = NULL;
}
- db_unlock(tb, kind);
}
- return NULL;
+ if (mtl)
+ erts_smp_rwmtx_runlock(mtl);
+ return tb;
+}
+
+static ERTS_INLINE
+DbTable* db_get_table(Process *p,
+ Eterm id,
+ int what,
+ db_lock_kind_t kind)
+{
+ return db_get_table_aux(p, id, what, kind, 0);
}
/* Requires meta_main_tab_locks[slot] locked.
@@ -413,15 +452,15 @@ static ERTS_INLINE void free_slot(int slot)
erts_smp_spin_unlock(&meta_main_tab_main_lock);
}
-static int insert_named_tab(Eterm name_atom, DbTable* tb)
+static int insert_named_tab(Eterm name_atom, DbTable* tb, int have_lock)
{
int ret = 0;
erts_smp_rwmtx_t* rwlock;
struct meta_name_tab_entry* new_entry;
struct meta_name_tab_entry* bucket = meta_name_tab_bucket(name_atom,
&rwlock);
-
- erts_smp_rwmtx_rwlock(rwlock);
+ if (!have_lock)
+ erts_smp_rwmtx_rwlock(rwlock);
if (bucket->pu.tb == NULL) { /* empty */
new_entry = bucket;
@@ -468,17 +507,32 @@ static int insert_named_tab(Eterm name_atom, DbTable* tb)
ret = 1; /* Ok */
done:
- erts_smp_rwmtx_rwunlock(rwlock);
+ if (!have_lock)
+ erts_smp_rwmtx_rwunlock(rwlock);
return ret;
}
-static int remove_named_tab(Eterm name_atom)
+static int remove_named_tab(DbTable *tb, int have_lock)
{
int ret = 0;
erts_smp_rwmtx_t* rwlock;
+ Eterm name_atom = tb->common.id;
struct meta_name_tab_entry* bucket = meta_name_tab_bucket(name_atom,
&rwlock);
- erts_smp_rwmtx_rwlock(rwlock);
+#ifdef ERTS_SMP
+ if (!have_lock && erts_smp_rwmtx_tryrwlock(rwlock) == EBUSY) {
+ /*
+ * We keep our increased refc over this op in order to
+ * prevent the table from disapearing.
+ */
+ erts_smp_rwmtx_rwunlock(&tb->common.rwlock);
+ erts_smp_rwmtx_rwlock(rwlock);
+ erts_smp_rwmtx_rwlock(&tb->common.rwlock);
+ }
+#endif
+
+ ERTS_SMP_LC_ASSERT(erts_lc_rwmtx_is_rwlocked(rwlock));
+
if (bucket->pu.tb == NULL) {
goto done;
}
@@ -529,7 +583,8 @@ static int remove_named_tab(Eterm name_atom)
ret = 1; /* Ok */
done:
- erts_smp_rwmtx_rwunlock(rwlock);
+ if (!have_lock)
+ erts_smp_rwmtx_rwunlock(rwlock);
return ret;
}
@@ -704,12 +759,13 @@ BIF_RETTYPE ets_update_element_3(BIF_ALIST_3)
int cret = DB_ERROR_BADITEM;
Eterm list;
Eterm iter;
- Eterm cell[2];
+ DeclareTmpHeap(cell,2,BIF_P);
DbUpdateHandle handle;
if ((tb = db_get_table(BIF_P, BIF_ARG_1, DB_WRITE, LCK_WRITE_REC)) == NULL) {
BIF_ERROR(BIF_P, BADARG);
}
+ UseTmpHeap(2,BIF_P);
if (!(tb->common.status & (DB_SET | DB_ORDERED_SET))) {
goto bail_out;
}
@@ -762,6 +818,7 @@ finalize:
tb->common.meth->db_finalize_dbterm(&handle);
bail_out:
+ UnUseTmpHeap(2,BIF_P);
db_unlock(tb, LCK_WRITE_REC);
switch (cret) {
@@ -794,8 +851,8 @@ BIF_RETTYPE ets_update_counter_3(BIF_ALIST_3)
Eterm* ret_list_currp = NULL;
Eterm* ret_list_prevp = NULL;
Eterm iter;
- Eterm cell[2];
- Eterm tuple[3];
+ DeclareTmpHeap(cell,5,BIF_P);
+ Eterm *tuple = cell+2;
DbUpdateHandle handle;
Uint halloc_size = 0; /* overestimated heap usage */
Eterm* htop; /* actual heap usage */
@@ -805,6 +862,9 @@ BIF_RETTYPE ets_update_counter_3(BIF_ALIST_3)
if ((tb = db_get_table(BIF_P, BIF_ARG_1, DB_WRITE, LCK_WRITE_REC)) == NULL) {
BIF_ERROR(BIF_P, BADARG);
}
+
+ UseTmpHeap(5,BIF_P);
+
if (!(tb->common.status & (DB_SET | DB_ORDERED_SET))) {
goto bail_out;
}
@@ -951,6 +1011,7 @@ finalize:
tb->common.meth->db_finalize_dbterm(&handle);
bail_out:
+ UnUseTmpHeap(5,BIF_P);
db_unlock(tb, LCK_WRITE_REC);
switch (cret) {
@@ -1127,6 +1188,7 @@ BIF_RETTYPE ets_rename_2(BIF_ALIST_2)
{
DbTable* tb;
Eterm ret;
+ erts_smp_rwmtx_t *lck1, *lck2;
#ifdef HARDDEBUG
erts_fprintf(stderr,
@@ -1135,34 +1197,65 @@ BIF_RETTYPE ets_rename_2(BIF_ALIST_2)
BIF_P->initial[0], BIF_P->initial[1], BIF_P->initial[2]);
#endif
- if ((tb = db_get_table(BIF_P, BIF_ARG_1, DB_WRITE, LCK_WRITE)) == NULL) {
+
+ if (is_not_atom(BIF_ARG_2)) {
BIF_ERROR(BIF_P, BADARG);
}
- if (is_not_atom(BIF_ARG_2)) {
- goto badarg;
+ (void) meta_name_tab_bucket(BIF_ARG_2, &lck1);
+
+ if (is_small(BIF_ARG_1)) {
+ Uint slot = unsigned_val(BIF_ARG_1) & meta_main_tab_slot_mask;
+ lck2 = get_meta_main_tab_lock(slot);
}
+ else if (is_atom(BIF_ARG_1)) {
+ (void) meta_name_tab_bucket(BIF_ARG_1, &lck2);
+ if (lck1 == lck2)
+ lck2 = NULL;
+ else if (lck1 > lck2) {
+ erts_smp_rwmtx_t *tmp = lck1;
+ lck1 = lck2;
+ lck2 = tmp;
+ }
+ }
+ else {
+ BIF_ERROR(BIF_P, BADARG);
+ }
+
+ erts_smp_rwmtx_rwlock(lck1);
+ if (lck2)
+ erts_smp_rwmtx_rwlock(lck2);
+
+ tb = db_get_table_aux(BIF_P, BIF_ARG_1, DB_WRITE, LCK_WRITE, 1);
+ if (!tb)
+ goto badarg;
if (is_not_atom(tb->common.id)) { /* Not a named table */
tb->common.the_name = BIF_ARG_2;
goto done;
}
- if (!insert_named_tab(BIF_ARG_2,tb)) {
+ if (!insert_named_tab(BIF_ARG_2, tb, 1))
goto badarg;
- }
- if (!remove_named_tab(tb->common.id)) {
+
+ if (!remove_named_tab(tb, 1))
erl_exit(1,"Could not find named tab %s", tb->common.id);
- }
tb->common.id = tb->common.the_name = BIF_ARG_2;
done:
ret = tb->common.id;
db_unlock(tb, LCK_WRITE);
+ erts_smp_rwmtx_rwunlock(lck1);
+ if (lck2)
+ erts_smp_rwmtx_rwunlock(lck2);
BIF_RET(ret);
badarg:
- db_unlock(tb, LCK_WRITE);
+ if (tb)
+ db_unlock(tb, LCK_WRITE);
+ erts_smp_rwmtx_rwunlock(lck1);
+ if (lck2)
+ erts_smp_rwmtx_rwunlock(lck2);
BIF_ERROR(BIF_P, BADARG);
}
@@ -1180,13 +1273,14 @@ BIF_RETTYPE ets_new_2(BIF_ALIST_2)
Eterm val;
Eterm ret;
Eterm heir;
- Eterm heir_data;
+ UWord heir_data;
Uint32 status;
Sint keypos;
- int is_named, is_fine_locked;
+ int is_named, is_fine_locked, frequent_read;
int cret;
- Eterm meta_tuple[3];
+ DeclareTmpHeap(meta_tuple,3,BIF_P);
DbTableMethod* meth;
+ erts_smp_rwmtx_t *mmtl;
if (is_not_atom(BIF_ARG_1)) {
BIF_ERROR(BIF_P, BADARG);
@@ -1199,8 +1293,9 @@ BIF_RETTYPE ets_new_2(BIF_ALIST_2)
keypos = 1;
is_named = 0;
is_fine_locked = 0;
+ frequent_read = 0;
heir = am_none;
- heir_data = am_undefined;
+ heir_data = (UWord) am_undefined;
list = BIF_ARG_2;
while(is_list(list)) {
@@ -1232,6 +1327,13 @@ BIF_RETTYPE ets_new_2(BIF_ALIST_2)
is_fine_locked = 0;
} else break;
}
+ else if (tp[1] == am_read_concurrency) {
+ if (tp[2] == am_true) {
+ frequent_read = 1;
+ } else if (tp[2] == am_false) {
+ frequent_read = 0;
+ } else break;
+ }
else if (tp[1] == am_heir && tp[2] == am_none) {
heir = am_none;
heir_data = am_undefined;
@@ -1280,6 +1382,11 @@ BIF_RETTYPE ets_new_2(BIF_ALIST_2)
BIF_ERROR(BIF_P, BADARG);
}
+#ifdef ERTS_SMP
+ if (frequent_read && !(status & DB_PRIVATE))
+ status |= DB_FREQ_READ;
+#endif
+
/* we create table outside any table lock
* and take the unusal cost of destroy table if it
* fails to find a slot
@@ -1302,7 +1409,8 @@ BIF_RETTYPE ets_new_2(BIF_ALIST_2)
tb->common.type = status & ERTS_ETS_TABLE_TYPES;
/* Note, 'type' is *read only* from now on... */
#endif
- db_init_lock(tb, "db_tab", "db_tab_fix");
+ db_init_lock(tb, status & (DB_FINE_LOCKED|DB_FREQ_READ),
+ "db_tab", "db_tab_fix");
tb->common.keypos = keypos;
tb->common.owner = BIF_P->id;
set_heir(BIF_P, tb, heir, heir_data);
@@ -1345,15 +1453,17 @@ BIF_RETTYPE ets_new_2(BIF_ALIST_2)
tb->common.id = ret;
tb->common.slot = slot; /* store slot for erase */
- meta_main_tab_lock(slot);
+ mmtl = get_meta_main_tab_lock(slot);
+ erts_smp_rwmtx_rwlock(mmtl);
meta_main_tab[slot].u.tb = tb;
ASSERT(IS_SLOT_ALIVE(slot));
- meta_main_tab_unlock(slot);
+ erts_smp_rwmtx_rwunlock(mmtl);
- if (is_named && !insert_named_tab(BIF_ARG_1, tb)) {
- meta_main_tab_lock(slot);
+ if (is_named && !insert_named_tab(BIF_ARG_1, tb, 0)) {
+ mmtl = get_meta_main_tab_lock(slot);
+ erts_smp_rwmtx_rwlock(mmtl);
free_slot(slot);
- meta_main_tab_unlock(slot);
+ erts_smp_rwmtx_rwunlock(mmtl);
db_lock_take_over_ref(tb,LCK_WRITE);
free_heir_data(tb);
@@ -1375,6 +1485,8 @@ BIF_RETTYPE ets_new_2(BIF_ALIST_2)
erts_smp_atomic_read(&meta_pid_to_fixed_tab->common.memory_size));
#endif
+ UseTmpHeap(3,BIF_P);
+
db_meta_lock(meta_pid_to_tab, LCK_WRITE_REC);
if (db_put_hash(meta_pid_to_tab,
TUPLE2(meta_tuple, BIF_P->id, make_small(slot)),
@@ -1383,6 +1495,8 @@ BIF_RETTYPE ets_new_2(BIF_ALIST_2)
}
db_meta_unlock(meta_pid_to_tab, LCK_WRITE_REC);
+ UnUseTmpHeap(3,BIF_P);
+
BIF_RET(ret);
}
@@ -1489,6 +1603,7 @@ BIF_RETTYPE ets_delete_1(BIF_ALIST_1)
{
int trap;
DbTable* tb;
+ erts_smp_rwmtx_t *mmtl;
#ifdef HARDDEBUG
erts_fprintf(stderr,
@@ -1510,16 +1625,26 @@ BIF_RETTYPE ets_delete_1(BIF_ALIST_1)
tb->common.status &= ~(DB_PROTECTED|DB_PUBLIC|DB_PRIVATE);
tb->common.status |= DB_DELETE;
- meta_main_tab_lock(tb->common.slot);
+ mmtl = get_meta_main_tab_lock(tb->common.slot);
+#ifdef ERTS_SMP
+ if (erts_smp_rwmtx_tryrwlock(mmtl) == EBUSY) {
+ /*
+ * We keep our increased refc over this op in order to
+ * prevent the table from disapearing.
+ */
+ erts_smp_rwmtx_rwunlock(&tb->common.rwlock);
+ erts_smp_rwmtx_rwlock(mmtl);
+ erts_smp_rwmtx_rwlock(&tb->common.rwlock);
+ }
+#endif
/* We must keep the slot, to be found by db_proc_dead() if process dies */
MARK_SLOT_DEAD(tb->common.slot);
- meta_main_tab_unlock(tb->common.slot);
- if (is_atom(tb->common.id)) {
- remove_named_tab(tb->common.id);
- }
+ erts_smp_rwmtx_rwunlock(mmtl);
+ if (is_atom(tb->common.id))
+ remove_named_tab(tb, 0);
if (tb->common.owner != BIF_P->id) {
- Eterm meta_tuple[3];
+ DeclareTmpHeap(meta_tuple,3,BIF_P);
/*
* The table is being deleted by a process other than its owner.
@@ -1527,6 +1652,7 @@ BIF_RETTYPE ets_delete_1(BIF_ALIST_1)
* current process will be killed (e.g. by an EXIT signal), we will
* now transfer the ownership to the current process.
*/
+ UseTmpHeap(3,BIF_P);
db_meta_lock(meta_pid_to_tab, LCK_WRITE_REC);
db_erase_bag_exact2(meta_pid_to_tab, tb->common.owner,
make_small(tb->common.slot));
@@ -1538,6 +1664,7 @@ BIF_RETTYPE ets_delete_1(BIF_ALIST_1)
TUPLE2(meta_tuple,BIF_P->id,make_small(tb->common.slot)),
0);
db_meta_unlock(meta_pid_to_tab, LCK_WRITE_REC);
+ UnUseTmpHeap(3,BIF_P);
}
/* disable inheritance */
free_heir_data(tb);
@@ -1554,9 +1681,15 @@ BIF_RETTYPE ets_delete_1(BIF_ALIST_1)
* (it looks like an continuation pointer), but that is will crash the
* emulator if this BIF is call traced.
*/
+#if HALFWORD_HEAP
+ Eterm *hp = HAlloc(BIF_P, 3);
+ hp[0] = make_pos_bignum_header(2);
+ *((UWord *) (UWord) (hp+1)) = (UWord) tb;
+#else
Eterm *hp = HAlloc(BIF_P, 2);
hp[0] = make_pos_bignum_header(1);
hp[1] = (Eterm) tb;
+#endif
BIF_TRAP1(&ets_delete_continue_exp, BIF_P, make_big(hp));
}
else {
@@ -1571,7 +1704,7 @@ BIF_RETTYPE ets_give_away_3(BIF_ALIST_3)
{
Process* to_proc = NULL;
ErtsProcLocks to_locks = ERTS_PROC_LOCK_MAIN;
- Eterm buf[5];
+ DeclareTmpHeap(buf,5,BIF_P);
Eterm to_pid = BIF_ARG_2;
Eterm from_pid;
DbTable* tb = NULL;
@@ -1593,6 +1726,7 @@ BIF_RETTYPE ets_give_away_3(BIF_ALIST_3)
goto badarg; /* or should we be idempotent? return false maybe */
}
+ UseTmpHeap(5,BIF_P);
db_meta_lock(meta_pid_to_tab, LCK_WRITE_REC);
db_erase_bag_exact2(meta_pid_to_tab, tb->common.owner,
make_small(tb->common.slot));
@@ -1610,6 +1744,7 @@ BIF_RETTYPE ets_give_away_3(BIF_ALIST_3)
TUPLE4(buf, am_ETS_TRANSFER, tb->common.id, from_pid, BIF_ARG_3),
0);
erts_smp_proc_unlock(to_proc, to_locks);
+ UnUseTmpHeap(5,BIF_P);
BIF_RET(am_true);
badarg:
@@ -1624,11 +1759,12 @@ BIF_RETTYPE ets_setopts_2(BIF_ALIST_2)
Eterm* tp;
Eterm opt;
Eterm heir = THE_NON_VALUE;
- Eterm heir_data = THE_NON_VALUE;
+ UWord heir_data = (UWord) THE_NON_VALUE;
Uint32 protection = 0;
- Eterm fakelist[2];
+ DeclareTmpHeap(fakelist,2,BIF_P);
Eterm tail;
+ UseTmpHeap(2,BIF_P);
for (tail = is_tuple(BIF_ARG_2) ? CONS(fakelist, BIF_ARG_2, NIL) : BIF_ARG_2;
is_list(tail);
tail = CDR(list_val(tail))) {
@@ -1681,9 +1817,11 @@ BIF_RETTYPE ets_setopts_2(BIF_ALIST_2)
}
db_unlock (tb,LCK_WRITE);
+ UnUseTmpHeap(2,BIF_P);
BIF_RET(am_true);
badarg:
+ UnUseTmpHeap(2,BIF_P);
if (tb != NULL) {
db_unlock(tb,LCK_WRITE);
}
@@ -1896,14 +2034,15 @@ BIF_RETTYPE ets_all_0(BIF_ALIST_0)
previous = NIL;
j = 0;
for(i = 0; (i < t_max_tabs && j < t_tabs_cnt); i++) {
- meta_main_tab_lock(i);
+ erts_smp_rwmtx_t *mmtl = get_meta_main_tab_lock(i);
+ erts_smp_rwmtx_rlock(mmtl);
if (IS_SLOT_ALIVE(i)) {
j++;
tb = meta_main_tab[i].u.tb;
previous = CONS(hp, tb->common.id, previous);
hp += 2;
}
- meta_main_tab_unlock(i);
+ erts_smp_rwmtx_runlock(mmtl);
}
HRelease(BIF_P, hendp, hp);
BIF_RET(previous);
@@ -1949,29 +2088,37 @@ BIF_RETTYPE ets_match_1(BIF_ALIST_1)
BIF_RETTYPE ets_match_2(BIF_ALIST_2)
{
Eterm ms;
- Eterm buff[8];
+ DeclareTmpHeap(buff,8,BIF_P);
Eterm *hp = buff;
- /*hp = HAlloc(BIF_P, 8);*/
+ Eterm res;
+
+ UseTmpHeap(8,BIF_P);
ms = CONS(hp, am_DollarDollar, NIL);
hp += 2;
ms = TUPLE3(hp, BIF_ARG_2, NIL, ms);
hp += 4;
ms = CONS(hp, ms, NIL);
- return ets_select_2(BIF_P, BIF_ARG_1, ms);
+ res = ets_select_2(BIF_P, BIF_ARG_1, ms);
+ UnUseTmpHeap(8,BIF_P);
+ return res;
}
BIF_RETTYPE ets_match_3(BIF_ALIST_3)
{
Eterm ms;
- Eterm buff[8];
+ DeclareTmpHeap(buff,8,BIF_P);
Eterm *hp = buff;
- /*hp = HAlloc(BIF_P, 8);*/
+ Eterm res;
+
+ UseTmpHeap(8,BIF_P);
ms = CONS(hp, am_DollarDollar, NIL);
hp += 2;
ms = TUPLE3(hp, BIF_ARG_2, NIL, ms);
hp += 4;
ms = CONS(hp, ms, NIL);
- return ets_select_3(BIF_P, BIF_ARG_1, ms, BIF_ARG_3);
+ res = ets_select_3(BIF_P, BIF_ARG_1, ms, BIF_ARG_3);
+ UnUseTmpHeap(8,BIF_P);
+ return res;
}
@@ -2385,29 +2532,37 @@ BIF_RETTYPE ets_match_object_1(BIF_ALIST_1)
BIF_RETTYPE ets_match_object_2(BIF_ALIST_2)
{
Eterm ms;
- Eterm buff[8];
+ DeclareTmpHeap(buff,8,BIF_P);
Eterm *hp = buff;
- /*hp = HAlloc(BIF_P, 8);*/
+ Eterm res;
+
+ UseTmpHeap(8,BIF_P);
ms = CONS(hp, am_DollarUnderscore, NIL);
hp += 2;
ms = TUPLE3(hp, BIF_ARG_2, NIL, ms);
hp += 4;
ms = CONS(hp, ms, NIL);
- return ets_select_2(BIF_P, BIF_ARG_1, ms);
+ res = ets_select_2(BIF_P, BIF_ARG_1, ms);
+ UnUseTmpHeap(8,BIF_P);
+ return res;
}
BIF_RETTYPE ets_match_object_3(BIF_ALIST_3)
{
Eterm ms;
- Eterm buff[8];
+ DeclareTmpHeap(buff,8,BIF_P);
Eterm *hp = buff;
- /*hp = HAlloc(BIF_P, 8);*/
+ Eterm res;
+
+ UseTmpHeap(8,BIF_P);
ms = CONS(hp, am_DollarUnderscore, NIL);
hp += 2;
ms = TUPLE3(hp, BIF_ARG_2, NIL, ms);
hp += 4;
ms = CONS(hp, ms, NIL);
- return ets_select_3(BIF_P, BIF_ARG_1, ms, BIF_ARG_3);
+ res = ets_select_3(BIF_P, BIF_ARG_1, ms, BIF_ARG_3);
+ UnUseTmpHeap(8,BIF_P);
+ return res;
}
/*
@@ -2558,7 +2713,7 @@ BIF_RETTYPE ets_match_spec_run_r_3(BIF_ALIST_3)
BIF_TRAP3(bif_export[BIF_ets_match_spec_run_r_3],
BIF_P,lst,BIF_ARG_2,ret);
}
- res = db_prog_match(BIF_P, mp, CAR(list_val(lst)), 0, &dummy);
+ res = db_prog_match(BIF_P, mp, CAR(list_val(lst)), NULL, 0, &dummy);
if (is_value(res)) {
sz = size_object(res);
hp = HAlloc(BIF_P, sz + 2);
@@ -2585,18 +2740,36 @@ void init_db(void)
{
DbTable init_tb;
int i;
- extern Eterm* em_apply_bif;
+ extern BeamInstr* em_apply_bif;
Eterm *hp;
unsigned bits;
size_t size;
#ifdef ERTS_SMP
- for (i=0; i<META_MAIN_TAB_LOCK_CNT; i++) {
- erts_smp_spinlock_init_x(&meta_main_tab_locks[i].lck, "meta_main_tab_slot", make_small(i));
+ erts_smp_rwmtx_opt_t rwmtx_opt = ERTS_SMP_THR_OPTS_DEFAULT_INITER;
+ rwmtx_opt.type = ERTS_SMP_RWMTX_TYPE_FREQUENT_READ;
+ rwmtx_opt.lived = ERTS_SMP_RWMTX_LONG_LIVED;
+
+ meta_main_tab_locks = erts_alloc(ERTS_ALC_T_DB_TABLES,
+ (sizeof(erts_meta_main_tab_lock_t)
+ * (ERTS_META_MAIN_TAB_LOCK_TAB_SIZE+1)));
+
+ if ((((Uint) meta_main_tab_locks) & ERTS_CACHE_LINE_MASK) != 0)
+ meta_main_tab_locks = ((erts_meta_main_tab_lock_t *)
+ ((((Uint) meta_main_tab_locks)
+ & ~ERTS_CACHE_LINE_MASK)
+ + ERTS_CACHE_LINE_SIZE));
+
+ ASSERT((((Uint) meta_main_tab_locks) & ERTS_CACHE_LINE_MASK) == 0);
+
+ for (i = 0; i < ERTS_META_MAIN_TAB_LOCK_TAB_SIZE; i++) {
+ erts_smp_rwmtx_init_opt_x(&meta_main_tab_locks[i].rwmtx, &rwmtx_opt,
+ "meta_main_tab_slot", make_small(i));
}
erts_smp_spinlock_init(&meta_main_tab_main_lock, "meta_main_tab_main");
for (i=0; i<META_NAME_TAB_LOCK_CNT; i++) {
- erts_smp_rwmtx_init_x(&meta_name_tab_rwlocks[i].lck, "meta_name_tab", make_small(i));
+ erts_smp_rwmtx_init_opt_x(&meta_name_tab_rwlocks[i].lck, &rwmtx_opt,
+ "meta_name_tab", make_small(i));
}
#endif
@@ -2714,9 +2887,9 @@ void init_db(void)
ets_select_delete_continue_exp.code[1] = am_atom_put("delete_trap",11);
ets_select_delete_continue_exp.code[2] = 1;
ets_select_delete_continue_exp.code[3] =
- (Eterm) em_apply_bif;
+ (BeamInstr) em_apply_bif;
ets_select_delete_continue_exp.code[4] =
- (Eterm) &ets_select_delete_1;
+ (BeamInstr) &ets_select_delete_1;
/* Non visual BIF to trap to. */
memset(&ets_select_count_continue_exp, 0, sizeof(Export));
@@ -2726,9 +2899,9 @@ void init_db(void)
ets_select_count_continue_exp.code[1] = am_atom_put("count_trap",11);
ets_select_count_continue_exp.code[2] = 1;
ets_select_count_continue_exp.code[3] =
- (Eterm) em_apply_bif;
+ (BeamInstr) em_apply_bif;
ets_select_count_continue_exp.code[4] =
- (Eterm) &ets_select_count_1;
+ (BeamInstr) &ets_select_count_1;
/* Non visual BIF to trap to. */
memset(&ets_select_continue_exp, 0, sizeof(Export));
@@ -2738,9 +2911,9 @@ void init_db(void)
ets_select_continue_exp.code[1] = am_atom_put("select_trap",11);
ets_select_continue_exp.code[2] = 1;
ets_select_continue_exp.code[3] =
- (Eterm) em_apply_bif;
+ (BeamInstr) em_apply_bif;
ets_select_continue_exp.code[4] =
- (Eterm) &ets_select_trap_1;
+ (BeamInstr) &ets_select_trap_1;
/* Non visual BIF to trap to. */
memset(&ets_delete_continue_exp, 0, sizeof(Export));
@@ -2748,8 +2921,8 @@ void init_db(void)
ets_delete_continue_exp.code[0] = am_ets;
ets_delete_continue_exp.code[1] = am_atom_put("delete_trap",11);
ets_delete_continue_exp.code[2] = 1;
- ets_delete_continue_exp.code[3] = (Eterm) em_apply_bif;
- ets_delete_continue_exp.code[4] = (Eterm) &ets_delete_trap;
+ ets_delete_continue_exp.code[3] = (BeamInstr) em_apply_bif;
+ ets_delete_continue_exp.code[4] = (BeamInstr) &ets_delete_trap;
hp = ms_delete_all_buff;
ms_delete_all = CONS(hp, am_true, NIL);
@@ -2843,9 +3016,9 @@ static int give_away_to_heir(Process* p, DbTable* tb)
{
Process* to_proc;
ErtsProcLocks to_locks = ERTS_PROC_LOCK_MAIN;
- Eterm buf[5];
+ DeclareTmpHeap(buf,5,p);
Eterm to_pid;
- Eterm heir_data;
+ UWord heir_data;
ASSERT(tb->common.owner == p->id);
ASSERT(is_internal_pid(tb->common.heir));
@@ -2856,12 +3029,12 @@ retry:
to_pid, to_locks,
ERTS_P2P_FLG_TRY_LOCK);
if (to_proc == ERTS_PROC_LOCK_BUSY) {
- db_ref(tb); /* while unlocked */
+ db_ref(tb, LCK_NONE); /* while unlocked */
db_unlock(tb,LCK_WRITE);
to_proc = erts_pid2proc(p, ERTS_PROC_LOCK_MAIN,
to_pid, to_locks);
db_lock(tb,LCK_WRITE);
- tb = db_unref(tb);
+ tb = db_unref(tb, LCK_NONE);
ASSERT(tb != NULL);
if (tb->common.owner != p->id) {
@@ -2888,6 +3061,7 @@ retry:
erts_smp_proc_unlock(to_proc, to_locks);
return 0; /* heir dead and pid reused, table still mine */
}
+ UseTmpHeap(5,p);
db_meta_lock(meta_pid_to_tab, LCK_WRITE_REC);
db_erase_bag_exact2(meta_pid_to_tab, tb->common.owner,
make_small(tb->common.slot));
@@ -2899,7 +3073,7 @@ retry:
TUPLE2(buf,to_pid,make_small(tb->common.slot)),
0);
db_meta_unlock(meta_pid_to_tab, LCK_WRITE_REC);
-
+ UnUseTmpHeap(5,p);
db_unlock(tb,LCK_WRITE);
heir_data = tb->common.heir_data;
if (!is_immed(heir_data)) {
@@ -2968,12 +3142,13 @@ erts_db_process_exiting(Process *c_p, ErtsProcLocks c_p_locks)
while (state->slots.ix < state->slots.size) {
DbTable *tb = NULL;
Sint ix = unsigned_val(state->slots.arr[state->slots.ix]);
- meta_main_tab_lock(ix);
+ erts_smp_rwmtx_t *mmtl = get_meta_main_tab_lock(ix);
+ erts_smp_rwmtx_rlock(mmtl);
if (!IS_SLOT_FREE(ix)) {
- tb = db_ref(GET_ANY_SLOT_TAB(ix));
+ tb = db_ref(GET_ANY_SLOT_TAB(ix), LCK_WRITE);
ASSERT(tb);
}
- meta_main_tab_unlock(ix);
+ erts_smp_rwmtx_runlock(mmtl);
if (tb) {
int do_yield;
db_lock_take_over_ref(tb, LCK_WRITE);
@@ -3005,7 +3180,7 @@ erts_db_process_exiting(Process *c_p, ErtsProcLocks c_p_locks)
tb->common.status |= DB_DELETE;
if (is_atom(tb->common.id))
- remove_named_tab(tb->common.id);
+ remove_named_tab(tb, 0);
free_heir_data(tb);
free_fixations_locked(tb);
@@ -3055,12 +3230,13 @@ erts_db_process_exiting(Process *c_p, ErtsProcLocks c_p_locks)
while (state->slots.ix < state->slots.size) {
DbTable *tb = NULL;
Sint ix = unsigned_val(state->slots.arr[state->slots.ix]);
- meta_main_tab_lock(ix);
+ erts_smp_rwmtx_t *mmtl = get_meta_main_tab_lock(ix);
+ erts_smp_rwmtx_rlock(mmtl);
if (IS_SLOT_ALIVE(ix)) {
- tb = db_ref(meta_main_tab[ix].u.tb);
+ tb = db_ref(meta_main_tab[ix].u.tb, LCK_WRITE_REC);
ASSERT(tb);
}
- meta_main_tab_unlock(ix);
+ erts_smp_rwmtx_runlock(mmtl);
if (tb) {
int reds;
DbFixation** pp;
@@ -3145,7 +3321,7 @@ erts_db_process_exiting(Process *c_p, ErtsProcLocks c_p_locks)
static void fix_table_locked(Process* p, DbTable* tb)
{
DbFixation *fix;
- Eterm meta_tuple[3];
+ DeclareTmpHeap(meta_tuple,3,p);
#ifdef ERTS_SMP
erts_smp_mtx_lock(&tb->common.fixlock);
@@ -3179,12 +3355,15 @@ static void fix_table_locked(Process* p, DbTable* tb)
erts_smp_mtx_unlock(&tb->common.fixlock);
#endif
p->flags |= F_USING_DB;
+ UseTmpHeap(3,p);
db_meta_lock(meta_pid_to_fixed_tab, LCK_WRITE_REC);
if (db_put_hash(meta_pid_to_fixed_tab,
TUPLE2(meta_tuple, p->id, make_small(tb->common.slot)),
0) != DB_ERROR_NONE) {
+ UnUseTmpHeap(3,p);
erl_exit(1,"Could not insert ets metadata in safe_fixtable.");
}
+ UnUseTmpHeap(3,p);
db_meta_unlock(meta_pid_to_fixed_tab, LCK_WRITE_REC);
}
@@ -3231,6 +3410,7 @@ unlocked:
#ifdef ERTS_SMP
if (*kind_p == LCK_READ && tb->common.is_thread_safe) {
/* Must have write lock while purging pseudo-deleted (OTP-8166) */
+ db_ref(tb, LCK_WRITE); /* LCK_WRITE need it, but not LCK_READ */
erts_smp_rwmtx_runlock(&tb->common.rwlock);
erts_smp_rwmtx_rwlock(&tb->common.rwlock);
*kind_p = LCK_WRITE;
@@ -3264,7 +3444,7 @@ static void free_fixations_locked(DbTable *tb)
tb->common.fixations = NULL;
}
-static void set_heir(Process* me, DbTable* tb, Eterm heir, Eterm heir_data)
+static void set_heir(Process* me, DbTable* tb, Eterm heir, UWord heir_data)
{
tb->common.heir = heir;
if (heir == am_none) {
@@ -3285,10 +3465,13 @@ static void set_heir(Process* me, DbTable* tb, Eterm heir, Eterm heir_data)
}
if (!is_immed(heir_data)) {
- Eterm tmp[2];
+ DeclareTmpHeap(tmp,2,me);
+
+ UseTmpHeap(2,me);
/* Make a dummy 1-tuple around data to use db_get_term() */
- heir_data = (Eterm) db_get_term(&tb->common, NULL, 0,
+ heir_data = (UWord) db_get_term(&tb->common, NULL, 0,
TUPLE1(tmp,heir_data));
+ UnUseTmpHeap(2,me);
ASSERT(!is_immed(heir_data));
}
tb->common.heir_data = heir_data;
@@ -3311,10 +3494,13 @@ static BIF_RETTYPE ets_delete_trap(Process *p, Eterm cont)
{
int trap;
Eterm* ptr = big_val(cont);
- DbTable *tb = (DbTable *) ptr[1];
+ DbTable *tb = *((DbTable **) (UWord) (ptr + 1));
+#if HALFWORD_HEAP
+ ASSERT(*ptr == make_pos_bignum_header(2));
+#else
ASSERT(*ptr == make_pos_bignum_header(1));
-
+#endif
db_lock(tb, LCK_WRITE);
trap = free_table_cont(p, tb, 0, 1);
db_unlock(tb, LCK_WRITE);
@@ -3337,6 +3523,7 @@ static int free_table_cont(Process *p,
int clean_meta_tab)
{
Eterm result;
+ erts_smp_rwmtx_t *mmtl;
#ifdef HARDDEBUG
if (!first) {
@@ -3362,9 +3549,20 @@ static int free_table_cont(Process *p,
tb->common.id);
#endif
/* Completely done - we will not get called again. */
- meta_main_tab_lock(tb->common.slot);
+ mmtl = get_meta_main_tab_lock(tb->common.slot);
+#ifdef ERTS_SMP
+ if (erts_smp_rwmtx_tryrwlock(mmtl) == EBUSY) {
+ /*
+ * We keep our increased refc over this op in order to
+ * prevent the table from disapearing.
+ */
+ erts_smp_rwmtx_rwunlock(&tb->common.rwlock);
+ erts_smp_rwmtx_rwlock(mmtl);
+ erts_smp_rwmtx_rwlock(&tb->common.rwlock);
+ }
+#endif
free_slot(tb->common.slot);
- meta_main_tab_unlock(tb->common.slot);
+ erts_smp_rwmtx_rwunlock(mmtl);
if (clean_meta_tab) {
db_meta_lock(meta_pid_to_tab, LCK_WRITE_REC);
@@ -3372,7 +3570,7 @@ static int free_table_cont(Process *p,
make_small(tb->common.slot));
db_meta_unlock(meta_pid_to_tab, LCK_WRITE_REC);
}
- db_unref(tb);
+ db_unref(tb, LCK_NONE);
BUMP_REDS(p, 100);
return 0;
}
diff --git a/erts/emulator/beam/erl_db_hash.c b/erts/emulator/beam/erl_db_hash.c
index 4141f9766b..5abd2e50fa 100644
--- a/erts/emulator/beam/erl_db_hash.c
+++ b/erts/emulator/beam/erl_db_hash.c
@@ -623,12 +623,16 @@ int db_create_hash(Process *p, DbTable *tbl)
erts_smp_atomic_init(&tb->is_resizing, 0);
#ifdef ERTS_SMP
if (tb->common.type & DB_FINE_LOCKED) {
+ erts_smp_rwmtx_opt_t rwmtx_opt = ERTS_SMP_THR_OPTS_DEFAULT_INITER;
int i;
+ if (tb->common.type & DB_FREQ_READ)
+ rwmtx_opt.type = ERTS_SMP_RWMTX_TYPE_FREQUENT_READ;
tb->locks = (DbTableHashFineLocks*) erts_db_alloc_fnf(ERTS_ALC_T_DB_SEG, /* Other type maybe? */
(DbTable *) tb,
sizeof(DbTableHashFineLocks));
for (i=0; i<DB_HASH_LOCK_CNT; ++i) {
- erts_rwmtx_init_x(&tb->locks->lck_vec[i].lck, "db_hash_slot", make_small(i));
+ erts_smp_rwmtx_init_opt_x(&tb->locks->lck_vec[i].lck, &rwmtx_opt,
+ "db_hash_slot", make_small(i));
}
/* This important property is needed to guarantee that the buckets
* involved in a grow/shrink operation it protected by the same lock:
@@ -1284,7 +1288,7 @@ static int db_select_continue_hash(Process *p,
(match_res =
db_prog_match(p,mp,
make_tuple(current->dbterm.tpl),
- 0,&dummy),
+ NULL,0,&dummy),
is_value(match_res))) {
if (all_objects) {
hp = HAlloc(p, current->dbterm.size + 2);
@@ -1462,7 +1466,7 @@ static int db_select_chunk_hash(Process *p, DbTable *tbl,
if (current->hvalue != INVALID_HASH) {
match_res = db_prog_match(p,mpi.mp,
make_tuple(current->dbterm.tpl),
- 0,&dummy);
+ NULL,0,&dummy);
if (is_value(match_res)) {
if (mpi.all_objects) {
hp = HAlloc(p, current->dbterm.size + 2);
@@ -1641,7 +1645,7 @@ static int db_select_count_hash(Process *p,
if (current != NULL) {
if (current->hvalue != INVALID_HASH) {
if (db_prog_match(p, mpi.mp, make_tuple(current->dbterm.tpl),
- 0, &dummy) == am_true) {
+ NULL,0, &dummy) == am_true) {
++got;
}
--num_left;
@@ -1792,7 +1796,7 @@ static int db_select_delete_hash(Process *p,
int did_erase = 0;
if ((db_prog_match(p,mpi.mp,
make_tuple((*current)->dbterm.tpl),
- 0,&dummy)) == am_true) {
+ NULL,0,&dummy)) == am_true) {
if (NFIXED(tb) > fixated_by_me) { /* fixated by others? */
if (slot_ix != last_pseudo_delete) {
add_fixed_deletion(tb, slot_ix);
@@ -1904,7 +1908,7 @@ static int db_select_delete_continue_hash(Process *p,
else {
int did_erase = 0;
if ((db_prog_match(p,mp,make_tuple((*current)->dbterm.tpl),
- 0,&dummy)) == am_true) {
+ NULL,0,&dummy)) == am_true) {
if (NFIXED(tb) > fixated_by_me) { /* fixated by others? */
if (slot_ix != last_pseudo_delete) {
add_fixed_deletion(tb, slot_ix);
@@ -2005,7 +2009,7 @@ static int db_select_count_continue_hash(Process *p,
continue;
}
if (db_prog_match(p, mp, make_tuple(current->dbterm.tpl),
- 0,&dummy) == am_true) {
+ NULL,0,&dummy) == am_true) {
++got;
}
--num_left;
@@ -2741,6 +2745,7 @@ static void db_finalize_dbterm_hash(DbUpdateHandle* handle)
ASSERT(&oldp->dbterm == handle->dbterm);
if (handle->mustResize) {
+ ErlOffHeap tmp_offheap;
Eterm* top;
Eterm copy;
DbTerm* newDbTerm;
@@ -2751,18 +2756,15 @@ static void db_finalize_dbterm_hash(DbUpdateHandle* handle)
newDbTerm = &newp->dbterm;
newDbTerm->size = handle->new_size;
- newDbTerm->off_heap.mso = NULL;
- newDbTerm->off_heap.externals = NULL;
- #ifndef HYBRID /* FIND ME! */
- newDbTerm->off_heap.funs = NULL;
- #endif
- newDbTerm->off_heap.overhead = 0;
+ tmp_offheap.first = NULL;
+ tmp_offheap.overhead = 0;
/* make a flat copy */
top = DBTERM_BUF(newDbTerm);
copy = copy_struct(make_tuple(handle->dbterm->tpl),
handle->new_size,
- &top, &newDbTerm->off_heap);
+ &top, &tmp_offheap);
+ newDbTerm->first_oh = tmp_offheap.first;
DBTERM_SET_TPL(newDbTerm,tuple_val(copy));
WUNLOCK_HASH(lck);
@@ -2805,7 +2807,11 @@ void db_foreach_offheap_hash(DbTable *tbl,
for (i = 0; i < nactive; i++) {
list = BUCKET(tb,i);
while(list != 0) {
- (*func)(&(list->dbterm.off_heap), arg);
+ ErlOffHeap tmp_offheap;
+ tmp_offheap.first = list->dbterm.first_oh;
+ tmp_offheap.overhead = 0;
+ (*func)(&tmp_offheap, arg);
+ list->dbterm.first_oh = tmp_offheap.first;
list = list->next;
}
}
diff --git a/erts/emulator/beam/erl_db_tree.c b/erts/emulator/beam/erl_db_tree.c
index b421da591b..5644e85f97 100644
--- a/erts/emulator/beam/erl_db_tree.c
+++ b/erts/emulator/beam/erl_db_tree.c
@@ -443,9 +443,9 @@ void db_initialize_tree(void)
ets_select_reverse_exp.code[1] = am_reverse;
ets_select_reverse_exp.code[2] = 3;
ets_select_reverse_exp.code[3] =
- (Eterm) em_apply_bif;
+ (BeamInstr) em_apply_bif;
ets_select_reverse_exp.code[4] =
- (Eterm) &ets_select_reverse;
+ (BeamInstr) &ets_select_reverse;
return;
};
@@ -1081,11 +1081,12 @@ static int db_select_continue_tree(Process *p,
static int db_select_tree(Process *p, DbTable *tbl,
Eterm pattern, int reverse, Eterm *ret)
{
+ /* Strategy: Traverse backwards to build resulting list from tail to head */
DbTableTree *tb = &tbl->tree;
DbTreeStack* stack;
struct select_context sc;
struct mp_info mpi;
- Eterm lastkey = NIL;
+ Eterm lastkey = THE_NON_VALUE;
Eterm key;
Eterm continuation;
unsigned sz;
@@ -1293,7 +1294,7 @@ static int db_select_count_tree(Process *p, DbTable *tbl,
DbTreeStack* stack;
struct select_count_context sc;
struct mp_info mpi;
- Eterm lastkey = NIL;
+ Eterm lastkey = THE_NON_VALUE;
Eterm key;
Eterm continuation;
unsigned sz;
@@ -1395,7 +1396,7 @@ static int db_select_chunk_tree(Process *p, DbTable *tbl,
DbTreeStack* stack;
struct select_context sc;
struct mp_info mpi;
- Eterm lastkey = NIL;
+ Eterm lastkey = THE_NON_VALUE;
Eterm key;
Eterm continuation;
unsigned sz;
@@ -1636,7 +1637,7 @@ static int db_select_delete_tree(Process *p, DbTable *tbl,
DbTableTree *tb = &tbl->tree;
struct select_delete_context sc;
struct mp_info mpi;
- Eterm lastkey = NIL;
+ Eterm lastkey = THE_NON_VALUE;
Eterm key;
Eterm continuation;
unsigned sz;
@@ -1817,10 +1818,14 @@ do_db_tree_foreach_offheap(TreeDbTerm *tdbt,
void (*func)(ErlOffHeap *, void *),
void * arg)
{
+ ErlOffHeap tmp_offheap;
if(!tdbt)
return;
do_db_tree_foreach_offheap(tdbt->left, func, arg);
- (*func)(&(tdbt->dbterm.off_heap), arg);
+ tmp_offheap.first = tdbt->dbterm.first_oh;
+ tmp_offheap.overhead = 0;
+ (*func)(&tmp_offheap, arg);
+ tdbt->dbterm.first_oh = tmp_offheap.first;
do_db_tree_foreach_offheap(tdbt->right, func, arg);
}
@@ -2574,6 +2579,7 @@ static int db_lookup_dbterm_tree(DbTable *tbl, Eterm key, DbUpdateHandle* handle
static void db_finalize_dbterm_tree(DbUpdateHandle* handle)
{
if (handle->mustResize) {
+ ErlOffHeap tmp_offheap;
Eterm* top;
Eterm copy;
DbTerm* newDbTerm;
@@ -2588,18 +2594,15 @@ static void db_finalize_dbterm_tree(DbUpdateHandle* handle)
newDbTerm = &newp->dbterm;
newDbTerm->size = handle->new_size;
- newDbTerm->off_heap.mso = NULL;
- newDbTerm->off_heap.externals = NULL;
- #ifndef HYBRID /* FIND ME! */
- newDbTerm->off_heap.funs = NULL;
- #endif
- newDbTerm->off_heap.overhead = 0;
+ tmp_offheap.first = NULL;
+ tmp_offheap.overhead = 0;
/* make a flat copy */
top = DBTERM_BUF(newDbTerm);
copy = copy_struct(make_tuple(handle->dbterm->tpl),
handle->new_size,
- &top, &newDbTerm->off_heap);
+ &top, &tmp_offheap);
+ newDbTerm->first_oh = tmp_offheap.first;
DBTERM_SET_TPL(newDbTerm,tuple_val(copy));
db_free_term_data(handle->dbterm);
@@ -2628,7 +2631,7 @@ static void traverse_backwards(DbTableTree *tb,
{
TreeDbTerm *this, *next;
- if (lastkey == NIL) {
+ if (lastkey == THE_NON_VALUE) {
stack->pos = stack->slot = 0;
if (( this = tb->root ) == NULL) {
return;
@@ -2666,7 +2669,7 @@ static void traverse_forward(DbTableTree *tb,
{
TreeDbTerm *this, *next;
- if (lastkey == NIL) {
+ if (lastkey == THE_NON_VALUE) {
stack->pos = stack->slot = 0;
if (( this = tb->root ) == NULL) {
return;
@@ -3023,7 +3026,7 @@ static int doit_select(DbTableTree *tb, TreeDbTerm *this, void *ptr,
}
ret = db_prog_match(sc->p, sc->mp,
make_tuple(this->dbterm.tpl),
- 0, &dummy);
+ NULL,0, &dummy);
if (is_value(ret)) {
Uint sz;
Eterm *hp;
@@ -3072,7 +3075,7 @@ static int doit_select_count(DbTableTree *tb, TreeDbTerm *this, void *ptr,
}
ret = db_prog_match(sc->p, sc->mp,
make_tuple(this->dbterm.tpl),
- 0, &dummy);
+ NULL,0, &dummy);
if (ret == am_true) {
++(sc->got);
}
@@ -3105,7 +3108,7 @@ static int doit_select_chunk(DbTableTree *tb, TreeDbTerm *this, void *ptr,
ret = db_prog_match(sc->p, sc->mp,
make_tuple(this->dbterm.tpl),
- 0, &dummy);
+ NULL,0, &dummy);
if (is_value(ret)) {
Uint sz;
Eterm *hp;
@@ -3158,7 +3161,7 @@ static int doit_select_delete(DbTableTree *tb, TreeDbTerm *this, void *ptr,
return 0;
ret = db_prog_match(sc->p, sc->mp,
make_tuple(this->dbterm.tpl),
- 0, &dummy);
+ NULL,0, &dummy);
if (ret == am_true) {
key = GETKEY(sc->tb, this->dbterm.tpl);
linkout_tree(sc->tb, key);
diff --git a/erts/emulator/beam/erl_db_util.c b/erts/emulator/beam/erl_db_util.c
index 8c373451fd..2f34561234 100644
--- a/erts/emulator/beam/erl_db_util.c
+++ b/erts/emulator/beam/erl_db_util.c
@@ -1,19 +1,19 @@
/*
* %CopyrightBegin%
- *
- * Copyright Ericsson AB 1998-2009. All Rights Reserved.
- *
+ *
+ * Copyright Ericsson AB 1998-2010. All Rights Reserved.
+ *
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
* compliance with the License. You should have received a copy of the
* Erlang Public License along with this software. If not, it can be
* retrieved online at http://www.erlang.org/.
- *
+ *
* Software distributed under the License is distributed on an "AS IS"
* basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
* the License for the specific language governing rights and limitations
* under the License.
- *
+ *
* %CopyrightEnd%
*/
@@ -117,6 +117,10 @@ do { \
erts_free(ERTS_ALC_T_DB_MC_STK, (Name).data); \
} while (0)
+
+#define TermWords(t) (((t) / (sizeof(UWord)/sizeof(Eterm))) + !!((t) % (sizeof(UWord)/sizeof(Eterm))))
+
+
static ERTS_INLINE Process *
get_proc(Process *cp, Uint32 cp_locks, Eterm id, Uint32 id_locks)
{
@@ -281,7 +285,7 @@ typedef struct dmc_guard_bif {
*/
DMC_DECLARE_STACK_TYPE(Eterm);
-DMC_DECLARE_STACK_TYPE(Uint);
+DMC_DECLARE_STACK_TYPE(UWord);
DMC_DECLARE_STACK_TYPE(unsigned);
@@ -359,12 +363,7 @@ static ErtsMatchPseudoProcess *match_pseudo_process;
static ERTS_INLINE void
cleanup_match_pseudo_process(ErtsMatchPseudoProcess *mpsp, int keep_heap)
{
- if (mpsp->process.mbuf
- || mpsp->process.off_heap.mso
-#ifndef HYBRID /* FIND ME! */
- || mpsp->process.off_heap.funs
-#endif
- || mpsp->process.off_heap.externals) {
+ if (mpsp->process.mbuf || mpsp->process.off_heap.first) {
erts_cleanup_empty_process(&mpsp->process);
}
#ifdef DEBUG
@@ -382,7 +381,7 @@ cleanup_match_pseudo_process(ErtsMatchPseudoProcess *mpsp, int keep_heap)
else {
int i;
for (i = 0; i < ERTS_DEFAULT_MS_HEAP_SIZE; i++) {
-#ifdef ARCH_64
+#if defined(ARCH_64) && !HALFWORD_HEAP
mpsp->default_heap[i] = (Eterm) 0xdeadbeefdeadbeef;
#else
mpsp->default_heap[i] = (Eterm) 0xdeadbeef;
@@ -830,42 +829,42 @@ static Uint my_size_object(Eterm t);
static Eterm my_copy_struct(Eterm t, Eterm **hp, ErlOffHeap* off_heap);
/* Guard compilation */
-static void do_emit_constant(DMCContext *context, DMC_STACK_TYPE(Uint) *text,
+static void do_emit_constant(DMCContext *context, DMC_STACK_TYPE(UWord) *text,
Eterm t);
static DMCRet dmc_list(DMCContext *context,
DMCHeap *heap,
- DMC_STACK_TYPE(Uint) *text,
+ DMC_STACK_TYPE(UWord) *text,
Eterm t,
int *constant);
static DMCRet dmc_tuple(DMCContext *context,
DMCHeap *heap,
- DMC_STACK_TYPE(Uint) *text,
+ DMC_STACK_TYPE(UWord) *text,
Eterm t,
int *constant);
static DMCRet dmc_variable(DMCContext *context,
DMCHeap *heap,
- DMC_STACK_TYPE(Uint) *text,
+ DMC_STACK_TYPE(UWord) *text,
Eterm t,
int *constant);
static DMCRet dmc_fun(DMCContext *context,
DMCHeap *heap,
- DMC_STACK_TYPE(Uint) *text,
+ DMC_STACK_TYPE(UWord) *text,
Eterm t,
int *constant);
static DMCRet dmc_expr(DMCContext *context,
DMCHeap *heap,
- DMC_STACK_TYPE(Uint) *text,
+ DMC_STACK_TYPE(UWord) *text,
Eterm t,
int *constant);
static DMCRet compile_guard_expr(DMCContext *context,
DMCHeap *heap,
- DMC_STACK_TYPE(Uint) *text,
+ DMC_STACK_TYPE(UWord) *text,
Eterm t);
/* match expression subroutine */
static DMCRet dmc_one_term(DMCContext *context,
DMCHeap *heap,
DMC_STACK_TYPE(Eterm) *stack,
- DMC_STACK_TYPE(Uint) *text,
+ DMC_STACK_TYPE(UWord) *text,
Eterm c);
@@ -1185,7 +1184,7 @@ Eterm erts_match_set_run(Process *p, Binary *mpsp,
Eterm ret;
ret = db_prog_match(p, mpsp,
- (Eterm) args,
+ NIL, args,
num_args, return_flags);
#if defined(HARDDEBUG)
if (is_non_value(ret)) {
@@ -1204,6 +1203,32 @@ Eterm erts_match_set_run(Process *p, Binary *mpsp,
*/
}
+static Eterm erts_match_set_run_ets(Process *p, Binary *mpsp,
+ Eterm args, int num_args,
+ Uint32 *return_flags)
+{
+ Eterm ret;
+
+ ret = db_prog_match(p, mpsp,
+ args, NULL,
+ num_args, return_flags);
+#if defined(HARDDEBUG)
+ if (is_non_value(ret)) {
+ erts_fprintf(stderr, "Failed\n");
+ } else {
+ erts_fprintf(stderr, "Returning : %T\n", ret);
+ }
+#endif
+ return ret;
+ /* Returns
+ * THE_NON_VALUE if no match
+ * am_false if {message,false} has been called,
+ * am_true if {message,_} has not been called or
+ * if {message,true} has been called,
+ * Msg if {message,Msg} has been called.
+ */
+}
+
/*
** API Used by other erl_db modules.
*/
@@ -1245,7 +1270,7 @@ Binary *db_match_compile(Eterm *matchexpr,
{
DMCHeap heap;
DMC_STACK_TYPE(Eterm) stack;
- DMC_STACK_TYPE(Uint) text;
+ DMC_STACK_TYPE(UWord) text;
DMCContext context;
MatchProg *ret = NULL;
Eterm t;
@@ -1380,7 +1405,7 @@ restart:
/*
** There is one single top variable in the match expression
- ** iff the text is tho Uint's and the single instruction
+ ** iff the text is two Uint's and the single instruction
** is 'matchBind' or it is only a skip.
*/
context.special =
@@ -1491,8 +1516,8 @@ restart:
** A special case is when the match expression is a single binding
** (i.e '$1'), then the field single_variable is set to 1.
*/
- bp = erts_create_magic_binary(((sizeof(MatchProg) - sizeof(Uint)) +
- (DMC_STACK_NUM(text) * sizeof(Uint))),
+ bp = erts_create_magic_binary(((sizeof(MatchProg) - sizeof(UWord)) +
+ (DMC_STACK_NUM(text) * sizeof(UWord))),
erts_db_match_prog_destructor);
ret = Binary2MatchProg(bp);
ret->saved_program_buf = NULL;
@@ -1501,7 +1526,7 @@ restart:
ret->num_bindings = heap.used;
ret->single_variable = context.special;
sys_memcpy(ret->text, DMC_STACK_DATA(text),
- DMC_STACK_NUM(text) * sizeof(Uint));
+ DMC_STACK_NUM(text) * sizeof(UWord));
ret->heap_size = ((heap.used * sizeof(Eterm)) +
(max_eheap_need * sizeof(Eterm)) +
(context.stack_need * sizeof(Eterm *)) +
@@ -1517,10 +1542,9 @@ restart:
*/
context.save = NULL;
error: /* Here is were we land when compilation failed. */
- while (context.save != NULL) {
- ErlHeapFragment *ll = context.save->next;
+ if (context.save != NULL) {
free_message_buffer(context.save);
- context.save = ll;
+ context.save = NULL;
}
DMC_FREE(stack);
DMC_FREE(text);
@@ -1537,15 +1561,11 @@ error: /* Here is were we land when compilation failed. */
void erts_db_match_prog_destructor(Binary *bprog)
{
MatchProg *prog;
- ErlHeapFragment *tmp, *ll;
if (bprog == NULL)
return;
prog = Binary2MatchProg(bprog);
- tmp = prog->term_save;
- while (tmp != NULL) {
- ll = tmp->next;
- free_message_buffer(tmp);
- tmp = ll;
+ if (prog->term_save != NULL) {
+ free_message_buffer(prog->term_save);
}
if (prog->saved_program_buf != NULL)
free_message_buffer(prog->saved_program_buf);
@@ -1591,6 +1611,7 @@ static Eterm dpm_array_to_list(Process *psp, Eterm *arr, int arity)
** i.e. 'DCOMP_TRACE' was specified
*/
Eterm db_prog_match(Process *c_p, Binary *bprog, Eterm term,
+ Eterm *termp,
int arity,
Uint32 *return_flags)
{
@@ -1601,7 +1622,8 @@ Eterm db_prog_match(Process *c_p, Binary *bprog, Eterm term,
Eterm **sp;
Eterm *esp;
Eterm *hp;
- Uint *pc = prog->text;
+ BeamInstr *cp;
+ UWord *pc = prog->text;
Eterm *ehp;
Eterm ret;
Uint n = 0; /* To avoid warning. */
@@ -1616,9 +1638,9 @@ Eterm db_prog_match(Process *c_p, Binary *bprog, Eterm term,
int fail_label;
int atomic_trace;
#ifdef DMC_DEBUG
- unsigned long *heap_fence;
- unsigned long *eheap_fence;
- unsigned long *stack_fence;
+ Uint *heap_fence;
+ Uint *eheap_fence;
+ Uint *stack_fence;
Uint save_op;
#endif /* DMC_DEBUG */
@@ -1654,9 +1676,9 @@ Eterm db_prog_match(Process *c_p, Binary *bprog, Eterm term,
#ifdef DMC_DEBUG
save_op = 0;
- heap_fence = (unsigned long *) mpsp->heap + prog->eheap_offset - 1;
- eheap_fence = (unsigned long *) mpsp->heap + prog->stack_offset - 1;
- stack_fence = (unsigned long *) mpsp->heap + prog->heap_size - 1;
+ heap_fence = (Uint *) mpsp->heap + prog->eheap_offset - 1;
+ eheap_fence = (Uint *) mpsp->heap + prog->stack_offset - 1;
+ stack_fence = (Uint *) mpsp->heap + prog->heap_size - 1;
*heap_fence = FENCE_PATTERN;
*eheap_fence = FENCE_PATTERN;
*stack_fence = FENCE_PATTERN;
@@ -1709,11 +1731,11 @@ restart:
n = *pc++;
if ((int) n != arity)
FAIL();
- ep = (Eterm *) *ep;
+ ep = termp;
break;
case matchArrayBind: /* When the array size is unknown. */
n = *pc++;
- hp[n] = dpm_array_to_list(psp, (Eterm *) term, arity);
+ hp[n] = dpm_array_to_list(psp, termp, arity);
break;
case matchTuple: /* *ep is a tuple of arity n */
if (!is_tuple(*ep))
@@ -1770,29 +1792,34 @@ restart:
FAIL();
if (memcmp(float_val(*ep) + 1, pc, sizeof(double)))
FAIL();
- pc += 2;
+ pc += TermWords(2);
++ep;
break;
case matchEqRef:
if (!is_ref(*ep))
FAIL();
- if (!eq(*ep, make_internal_ref(pc)))
+ if (!eq(*ep, make_internal_ref((Uint *) pc)))
FAIL();
- i = thing_arityval(*pc);
- pc += i+1;
+ i = thing_arityval(*((Uint *) pc));
+ pc += TermWords(i+1);
++ep;
break;
case matchEqBig:
if (!is_big(*ep))
FAIL();
tp = big_val(*ep);
- if (*tp != *pc)
- FAIL();
- i = BIG_ARITY(pc);
- while(i--)
- if (*++tp != *++pc)
+ {
+ Eterm *epc = (Eterm *) pc;
+ if (*tp != *epc)
FAIL();
- ++pc;
+ i = BIG_ARITY(epc);
+ pc += TermWords(i+1);
+ while(i--) {
+ if (*++tp != *++epc) {
+ FAIL();
+ }
+ }
+ }
++ep;
break;
case matchEq:
@@ -1884,7 +1911,7 @@ restart:
break;
case matchPushArrayAsList:
n = arity; /* Only happens when 'term' is an array */
- tp = (Eterm *) term;
+ tp = termp;
*esp++ = make_list(ehp);
while (n--) {
*ehp++ = *tp++;
@@ -1897,7 +1924,7 @@ restart:
break;
case matchPushArrayAsListU:
/* This instruction is NOT efficient. */
- *esp++ = dpm_array_to_list(psp, (Eterm *) term, arity);
+ *esp++ = dpm_array_to_list(psp, termp, arity);
break;
case matchTrue:
if (*--esp != am_true)
@@ -2095,17 +2122,17 @@ restart:
}
break;
case matchCaller:
- if (!(c_p->cp) || !(hp = find_function_from_pc(c_p->cp))) {
+ if (!(c_p->cp) || !(cp = find_function_from_pc(c_p->cp))) {
*esp++ = am_undefined;
} else {
*esp++ = make_tuple(ehp);
- ehp[0] = make_arityval(3);
- ehp[1] = hp[0];
- ehp[2] = hp[1];
- ehp[3] = make_small(hp[2]);
- ehp += 4;
- }
- break;
+ ehp[0] = make_arityval(3);
+ ehp[1] = cp[0];
+ ehp[2] = cp[1];
+ ehp[3] = make_small((Uint) cp[2]);
+ ehp += 4;
+ }
+ break;
case matchSilent:
--esp;
if (*esp == am_true) {
@@ -2300,7 +2327,7 @@ void db_free_dmc_err_info(DMCErrInfo *ei){
*/
Eterm db_add_counter(Eterm** hpp, Eterm counter, Eterm incr)
{
- Eterm big_tmp[2];
+ DeclareTmpHeapNoproc(big_tmp,2);
Eterm res;
Sint ires;
Eterm arg1;
@@ -2318,6 +2345,7 @@ Eterm db_add_counter(Eterm** hpp, Eterm counter, Eterm incr)
}
}
else {
+ UseTmpHeapNoproc(2);
switch(NUMBER_CODE(counter, incr)) {
case SMALL_BIG:
arg1 = small_to_big(signed_val(counter), big_tmp);
@@ -2332,12 +2360,14 @@ Eterm db_add_counter(Eterm** hpp, Eterm counter, Eterm incr)
arg2 = counter;
break;
default:
+ UnUseTmpHeapNoproc(2);
return THE_NON_VALUE;
}
res = big_plus(arg1, arg2, *hpp);
if (is_big(res)) {
*hpp += BIG_NEED_SIZE(big_size(res));
}
+ UnUseTmpHeapNoproc(2);
return res;
}
}
@@ -2421,9 +2451,13 @@ void* db_get_term(DbTableCommon *tb, DbTerm* old, Uint offset, Eterm obj)
DbTerm* p;
Eterm copy;
Eterm *top;
+ ErlOffHeap tmp_offheap;
if (old != 0) {
- erts_cleanup_offheap(&old->off_heap);
+ tmp_offheap.first = old->first_oh;
+ tmp_offheap.overhead = 0;
+ erts_cleanup_offheap(&tmp_offheap);
+ old->first_oh = tmp_offheap.first;
if (size == old->size) {
p = old;
} else {
@@ -2459,15 +2493,12 @@ void* db_get_term(DbTableCommon *tb, DbTerm* old, Uint offset, Eterm obj)
p = (DbTerm*) ((void *)(((char *) structp) + offset));
}
p->size = size;
- p->off_heap.mso = NULL;
- p->off_heap.externals = NULL;
-#ifndef HYBRID /* FIND ME! */
- p->off_heap.funs = NULL;
-#endif
- p->off_heap.overhead = 0;
+ tmp_offheap.first = NULL;
+ tmp_offheap.overhead = 0;
top = DBTERM_BUF(p);
- copy = copy_struct(obj, size, &top, &p->off_heap);
+ copy = copy_struct(obj, size, &top, &tmp_offheap);
+ p->first_oh = tmp_offheap.first;
DBTERM_SET_TPL(p,tuple_val(copy));
return structp;
@@ -2476,7 +2507,10 @@ void* db_get_term(DbTableCommon *tb, DbTerm* old, Uint offset, Eterm obj)
void db_free_term_data(DbTerm* p)
{
- erts_cleanup_offheap(&p->off_heap);
+ ErlOffHeap tmp_offheap;
+ tmp_offheap.first = p->first_oh;
+ tmp_offheap.overhead = 0;
+ erts_cleanup_offheap(&tmp_offheap);
}
@@ -2606,7 +2640,7 @@ static void add_dmc_err(DMCErrInfo *err_info,
static DMCRet dmc_one_term(DMCContext *context,
DMCHeap *heap,
DMC_STACK_TYPE(Eterm) *stack,
- DMC_STACK_TYPE(Uint) *text,
+ DMC_STACK_TYPE(UWord) *text,
Eterm c)
{
Sint n;
@@ -2704,27 +2738,80 @@ static DMCRet dmc_one_term(DMCContext *context,
DMC_PUSH(*stack, c);
break;
case (_TAG_HEADER_REF >> _TAG_PRIMARY_SIZE):
- n = thing_arityval(*internal_ref_val(c));
DMC_PUSH(*text, matchEqRef);
+#if HALFWORD_HEAP
+ {
+ union {
+ UWord u;
+ Uint t[2];
+ } fiddle;
+ ASSERT(thing_arityval(*internal_ref_val(c)) == 3);
+ fiddle.t[0] = *internal_ref_val(c);
+ fiddle.t[1] = (Uint) internal_ref_val(c)[1];
+ DMC_PUSH(*text, fiddle.u);
+ fiddle.t[0] = (Uint) internal_ref_val(c)[2];
+ fiddle.t[1] = (Uint) internal_ref_val(c)[3];
+ DMC_PUSH(*text, fiddle.u);
+ }
+#else
+ n = thing_arityval(*internal_ref_val(c));
DMC_PUSH(*text, *internal_ref_val(c));
for (i = 1; i <= n; ++i) {
DMC_PUSH(*text, (Uint) internal_ref_val(c)[i]);
}
+#endif
break;
case (_TAG_HEADER_POS_BIG >> _TAG_PRIMARY_SIZE):
case (_TAG_HEADER_NEG_BIG >> _TAG_PRIMARY_SIZE):
n = thing_arityval(*big_val(c));
DMC_PUSH(*text, matchEqBig);
+#if HALFWORD_HEAP
+ {
+ union {
+ UWord u;
+ Uint t[2];
+ } fiddle;
+ ASSERT(n >= 1);
+ fiddle.t[0] = *big_val(c);
+ fiddle.t[1] = big_val(c)[1];
+ DMC_PUSH(*text, fiddle.u);
+ for (i = 2; i <= n; ++i) {
+ fiddle.t[0] = big_val(c)[i];
+ if (++i <= n) {
+ fiddle.t[1] = big_val(c)[i];
+ } else {
+ fiddle.t[1] = (Uint) 0;
+ }
+ DMC_PUSH(*text, fiddle.u);
+ }
+ }
+#else
DMC_PUSH(*text, *big_val(c));
for (i = 1; i <= n; ++i) {
DMC_PUSH(*text, (Uint) big_val(c)[i]);
}
+#endif
break;
case (_TAG_HEADER_FLOAT >> _TAG_PRIMARY_SIZE):
DMC_PUSH(*text,matchEqFloat);
+#if HALFWORD_HEAP
+ {
+ union {
+ UWord u;
+ Uint t[2];
+ } fiddle;
+ fiddle.t[0] = float_val(c)[1];
+ fiddle.t[1] = float_val(c)[2];
+ DMC_PUSH(*text, fiddle.u);
+ }
+#else
DMC_PUSH(*text, (Uint) float_val(c)[1]);
- /* XXX: this reads and pushes random junk on ARCH_64 */
+#ifdef ARCH_64
+ DMC_PUSH(*text, (Uint) 0);
+#else
DMC_PUSH(*text, (Uint) float_val(c)[2]);
+#endif
+#endif
break;
default: /* BINARY, FUN, VECTOR, or EXTERNAL */
/*
@@ -2753,7 +2840,7 @@ static DMCRet dmc_one_term(DMCContext *context,
** Match guard compilation
*/
-static void do_emit_constant(DMCContext *context, DMC_STACK_TYPE(Uint) *text,
+static void do_emit_constant(DMCContext *context, DMC_STACK_TYPE(UWord) *text,
Eterm t)
{
int sz;
@@ -2807,7 +2894,7 @@ add_dmc_err((ContextP)->err_info, String, -1, T, dmcWarning)
static DMCRet dmc_list(DMCContext *context,
DMCHeap *heap,
- DMC_STACK_TYPE(Uint) *text,
+ DMC_STACK_TYPE(UWord) *text,
Eterm t,
int *constant)
{
@@ -2843,11 +2930,11 @@ static DMCRet dmc_list(DMCContext *context,
static DMCRet dmc_tuple(DMCContext *context,
DMCHeap *heap,
- DMC_STACK_TYPE(Uint) *text,
+ DMC_STACK_TYPE(UWord) *text,
Eterm t,
int *constant)
{
- DMC_STACK_TYPE(Uint) instr_save;
+ DMC_STACK_TYPE(UWord) instr_save;
int all_constant = 1;
int textpos = DMC_STACK_NUM(*text);
Eterm *p = tuple_val(t);
@@ -2903,7 +2990,7 @@ static DMCRet dmc_tuple(DMCContext *context,
static DMCRet dmc_whole_expression(DMCContext *context,
DMCHeap *heap,
- DMC_STACK_TYPE(Uint) *text,
+ DMC_STACK_TYPE(UWord) *text,
Eterm t,
int *constant)
{
@@ -2931,7 +3018,7 @@ static DMCRet dmc_whole_expression(DMCContext *context,
static DMCRet dmc_variable(DMCContext *context,
DMCHeap *heap,
- DMC_STACK_TYPE(Uint) *text,
+ DMC_STACK_TYPE(UWord) *text,
Eterm t,
int *constant)
{
@@ -2952,7 +3039,7 @@ static DMCRet dmc_variable(DMCContext *context,
static DMCRet dmc_all_bindings(DMCContext *context,
DMCHeap *heap,
- DMC_STACK_TYPE(Uint) *text,
+ DMC_STACK_TYPE(UWord) *text,
Eterm t,
int *constant)
{
@@ -2979,7 +3066,7 @@ static DMCRet dmc_all_bindings(DMCContext *context,
static DMCRet dmc_const(DMCContext *context,
DMCHeap *heap,
- DMC_STACK_TYPE(Uint) *text,
+ DMC_STACK_TYPE(UWord) *text,
Eterm t,
int *constant)
{
@@ -2996,7 +3083,7 @@ static DMCRet dmc_const(DMCContext *context,
static DMCRet dmc_and(DMCContext *context,
DMCHeap *heap,
- DMC_STACK_TYPE(Uint) *text,
+ DMC_STACK_TYPE(UWord) *text,
Eterm t,
int *constant)
{
@@ -3025,7 +3112,7 @@ static DMCRet dmc_and(DMCContext *context,
static DMCRet dmc_or(DMCContext *context,
DMCHeap *heap,
- DMC_STACK_TYPE(Uint) *text,
+ DMC_STACK_TYPE(UWord) *text,
Eterm t,
int *constant)
{
@@ -3055,7 +3142,7 @@ static DMCRet dmc_or(DMCContext *context,
static DMCRet dmc_andalso(DMCContext *context,
DMCHeap *heap,
- DMC_STACK_TYPE(Uint) *text,
+ DMC_STACK_TYPE(UWord) *text,
Eterm t,
int *constant)
{
@@ -3104,7 +3191,7 @@ static DMCRet dmc_andalso(DMCContext *context,
static DMCRet dmc_orelse(DMCContext *context,
DMCHeap *heap,
- DMC_STACK_TYPE(Uint) *text,
+ DMC_STACK_TYPE(UWord) *text,
Eterm t,
int *constant)
{
@@ -3152,7 +3239,7 @@ static DMCRet dmc_orelse(DMCContext *context,
static DMCRet dmc_message(DMCContext *context,
DMCHeap *heap,
- DMC_STACK_TYPE(Uint) *text,
+ DMC_STACK_TYPE(UWord) *text,
Eterm t,
int *constant)
{
@@ -3194,7 +3281,7 @@ static DMCRet dmc_message(DMCContext *context,
static DMCRet dmc_self(DMCContext *context,
DMCHeap *heap,
- DMC_STACK_TYPE(Uint) *text,
+ DMC_STACK_TYPE(UWord) *text,
Eterm t,
int *constant)
{
@@ -3214,7 +3301,7 @@ static DMCRet dmc_self(DMCContext *context,
static DMCRet dmc_return_trace(DMCContext *context,
DMCHeap *heap,
- DMC_STACK_TYPE(Uint) *text,
+ DMC_STACK_TYPE(UWord) *text,
Eterm t,
int *constant)
{
@@ -3244,7 +3331,7 @@ static DMCRet dmc_return_trace(DMCContext *context,
static DMCRet dmc_exception_trace(DMCContext *context,
DMCHeap *heap,
- DMC_STACK_TYPE(Uint) *text,
+ DMC_STACK_TYPE(UWord) *text,
Eterm t,
int *constant)
{
@@ -3276,7 +3363,7 @@ static DMCRet dmc_exception_trace(DMCContext *context,
static DMCRet dmc_is_seq_trace(DMCContext *context,
DMCHeap *heap,
- DMC_STACK_TYPE(Uint) *text,
+ DMC_STACK_TYPE(UWord) *text,
Eterm t,
int *constant)
{
@@ -3302,7 +3389,7 @@ static DMCRet dmc_is_seq_trace(DMCContext *context,
static DMCRet dmc_set_seq_token(DMCContext *context,
DMCHeap *heap,
- DMC_STACK_TYPE(Uint) *text,
+ DMC_STACK_TYPE(UWord) *text,
Eterm t,
int *constant)
{
@@ -3351,7 +3438,7 @@ static DMCRet dmc_set_seq_token(DMCContext *context,
static DMCRet dmc_get_seq_token(DMCContext *context,
DMCHeap *heap,
- DMC_STACK_TYPE(Uint) *text,
+ DMC_STACK_TYPE(UWord) *text,
Eterm t,
int *constant)
{
@@ -3388,7 +3475,7 @@ static DMCRet dmc_get_seq_token(DMCContext *context,
static DMCRet dmc_display(DMCContext *context,
DMCHeap *heap,
- DMC_STACK_TYPE(Uint) *text,
+ DMC_STACK_TYPE(UWord) *text,
Eterm t,
int *constant)
{
@@ -3428,7 +3515,7 @@ static DMCRet dmc_display(DMCContext *context,
static DMCRet dmc_process_dump(DMCContext *context,
DMCHeap *heap,
- DMC_STACK_TYPE(Uint) *text,
+ DMC_STACK_TYPE(UWord) *text,
Eterm t,
int *constant)
{
@@ -3458,7 +3545,7 @@ static DMCRet dmc_process_dump(DMCContext *context,
static DMCRet dmc_enable_trace(DMCContext *context,
DMCHeap *heap,
- DMC_STACK_TYPE(Uint) *text,
+ DMC_STACK_TYPE(UWord) *text,
Eterm t,
int *constant)
{
@@ -3518,7 +3605,7 @@ static DMCRet dmc_enable_trace(DMCContext *context,
static DMCRet dmc_disable_trace(DMCContext *context,
DMCHeap *heap,
- DMC_STACK_TYPE(Uint) *text,
+ DMC_STACK_TYPE(UWord) *text,
Eterm t,
int *constant)
{
@@ -3578,7 +3665,7 @@ static DMCRet dmc_disable_trace(DMCContext *context,
static DMCRet dmc_trace(DMCContext *context,
DMCHeap *heap,
- DMC_STACK_TYPE(Uint) *text,
+ DMC_STACK_TYPE(UWord) *text,
Eterm t,
int *constant)
{
@@ -3652,7 +3739,7 @@ static DMCRet dmc_trace(DMCContext *context,
static DMCRet dmc_caller(DMCContext *context,
DMCHeap *heap,
- DMC_STACK_TYPE(Uint) *text,
+ DMC_STACK_TYPE(UWord) *text,
Eterm t,
int *constant)
{
@@ -3685,7 +3772,7 @@ static DMCRet dmc_caller(DMCContext *context,
static DMCRet dmc_silent(DMCContext *context,
DMCHeap *heap,
- DMC_STACK_TYPE(Uint) *text,
+ DMC_STACK_TYPE(UWord) *text,
Eterm t,
int *constant)
{
@@ -3727,7 +3814,7 @@ static DMCRet dmc_silent(DMCContext *context,
static DMCRet dmc_fun(DMCContext *context,
DMCHeap *heap,
- DMC_STACK_TYPE(Uint) *text,
+ DMC_STACK_TYPE(UWord) *text,
Eterm t,
int *constant)
{
@@ -3844,7 +3931,7 @@ static DMCRet dmc_fun(DMCContext *context,
erl_exit(1,"ets:match() internal error, "
"guard with more than 3 arguments.");
}
- DMC_PUSH(*text, (Uint) b->biff);
+ DMC_PUSH(*text, (UWord) b->biff);
context->stack_used -= (((int) a) - 2);
if (context->stack_used > context->stack_need)
context->stack_need = context->stack_used;
@@ -3853,7 +3940,7 @@ static DMCRet dmc_fun(DMCContext *context,
static DMCRet dmc_expr(DMCContext *context,
DMCHeap *heap,
- DMC_STACK_TYPE(Uint) *text,
+ DMC_STACK_TYPE(UWord) *text,
Eterm t,
int *constant)
{
@@ -3916,7 +4003,7 @@ static DMCRet dmc_expr(DMCContext *context,
static DMCRet compile_guard_expr(DMCContext *context,
DMCHeap *heap,
- DMC_STACK_TYPE(Uint) *text,
+ DMC_STACK_TYPE(UWord) *text,
Eterm l)
{
DMCRet ret;
@@ -4031,7 +4118,7 @@ static int match_compact(ErlHeapFragment *expr, DMCErrInfo *err_info)
DMC_INIT_STACK(heap);
p = expr->mem;
- i = expr->size;
+ i = expr->used_size;
while (i--) {
if (is_thing(*p)) {
a = thing_arityval(*p);
@@ -4060,7 +4147,7 @@ static int match_compact(ErlHeapFragment *expr, DMCErrInfo *err_info)
}
p = expr->mem;
- i = expr->size;
+ i = expr->used_size;
while (i--) {
if (is_thing(*p)) {
a = thing_arityval(*p);
@@ -4230,7 +4317,7 @@ static Eterm match_spec_test(Process *p, Eterm against, Eterm spec, int trace)
Eterm l;
Uint32 ret_flags;
Uint sz;
- Eterm *save_cp;
+ BeamInstr *save_cp;
if (trace && !(is_list(against) || against == NIL)) {
return THE_NON_VALUE;
@@ -4271,17 +4358,18 @@ static Eterm match_spec_test(Process *p, Eterm against, Eterm spec, int trace)
++n;
l = CDR(list_val(l));
}
+ save_cp = p->cp;
+ p->cp = NULL;
+ res = erts_match_set_run(p, mps, arr, n, &ret_flags);
+ p->cp = save_cp;
} else {
n = 0;
- arr = (Eterm *) against;
+ arr = NULL;
+ res = erts_match_set_run_ets(p, mps, against, n, &ret_flags);
}
/* We are in the context of a BIF,
{caller} should return 'undefined' */
- save_cp = p->cp;
- p->cp = NULL;
- res = erts_match_set_run(p, mps, arr, n, &ret_flags);
- p->cp = save_cp;
if (is_non_value(res)) {
res = am_false;
}
@@ -4324,7 +4412,7 @@ static Eterm seq_trace_fake(Process *p, Eterm arg1)
static void db_match_dis(Binary *bp)
{
MatchProg *prog = Binary2MatchProg(bp);
- Uint *t = prog->text;
+ UWord *t = prog->text;
Uint n;
Eterm p;
int first;
@@ -4390,41 +4478,48 @@ static void db_match_dis(Binary *bp)
break;
case matchEqRef:
++t;
- n = thing_arityval(*t);
- ++t;
- erts_printf("EqRef\t(%d) {", (int) n);
- first = 1;
- while (n--) {
- if (first)
- first = 0;
- else
- erts_printf(", ");
-#ifdef ARCH_64
- erts_printf("0x%016bpx", *t);
+ {
+ RefThing *rt = (RefThing *) t;
+ int ri;
+ n = thing_arityval(rt->header);
+ erts_printf("EqRef\t(%d) {", (int) n);
+ first = 1;
+ for (ri = 0; ri < n; ++ri) {
+ if (first)
+ first = 0;
+ else
+ erts_printf(", ");
+#if defined(ARCH_64) && !HALFWORD_HEAP
+ erts_printf("0x%016bpx", rt->data.ui[ri]);
#else
- erts_printf("0x%08bpx", *t);
+ erts_printf("0x%08bpx", rt->data.ui[ri]);
#endif
- ++t;
+ }
}
+ t += TermWords(REF_THING_SIZE);
erts_printf("}\n");
break;
case matchEqBig:
++t;
n = thing_arityval(*t);
- ++t;
- erts_printf("EqBig\t(%d) {", (int) n);
- first = 1;
- while (n--) {
- if (first)
- first = 0;
- else
- erts_printf(", ");
-#ifdef ARCH_64
- erts_printf("0x%016bpx", *t);
+ {
+ Eterm *et = (Eterm *) t;
+ t += TermWords(n+1);
+ erts_printf("EqBig\t(%d) {", (int) n);
+ first = 1;
+ ++n;
+ while (n--) {
+ if (first)
+ first = 0;
+ else
+ erts_printf(", ");
+#if defined(ARCH_64) && !HALFWORD_HEAP
+ erts_printf("0x%016bpx", *et);
#else
- erts_printf("0x%08bpx", *t);
+ erts_printf("0x%08bpx", *et);
#endif
- ++t;
+ ++et;
+ }
}
erts_printf("}\n");
break;
@@ -4432,8 +4527,8 @@ static void db_match_dis(Binary *bp)
++t;
{
double num;
- memcpy(&num,t, 2 * sizeof(*t));
- t += 2;
+ memcpy(&num,t,sizeof(double));
+ t += TermWords(2);
erts_printf("EqFloat\t%f\n", num);
}
break;
diff --git a/erts/emulator/beam/erl_db_util.h b/erts/emulator/beam/erl_db_util.h
index 4fc7b4f52e..0f333e8b34 100644
--- a/erts/emulator/beam/erl_db_util.h
+++ b/erts/emulator/beam/erl_db_util.h
@@ -1,19 +1,19 @@
/*
* %CopyrightBegin%
- *
- * Copyright Ericsson AB 1998-2009. All Rights Reserved.
- *
+ *
+ * Copyright Ericsson AB 1998-2010. All Rights Reserved.
+ *
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
* compliance with the License. You should have received a copy of the
* Erlang Public License along with this software. If not, it can be
* retrieved online at http://www.erlang.org/.
- *
+ *
* Software distributed under the License is distributed on an "AS IS"
* basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
* the License for the specific language governing rights and limitations
* under the License.
- *
+ *
* %CopyrightEnd%
*/
@@ -57,7 +57,7 @@
* A datatype for a database entry stored out of a process heap
*/
typedef struct db_term {
- ErlOffHeap off_heap; /* Off heap data for term. */
+ struct erl_off_heap_header* first_oh; /* Off heap data for term. */
Uint size; /* Size of term in "words" */
Eterm tpl[1]; /* Untagged "constant pointer" to top tuple */
/* (assumed to be first in buffer) */
@@ -212,7 +212,7 @@ typedef struct db_table_common {
#endif
Eterm owner; /* Pid of the creator */
Eterm heir; /* Pid of the heir */
- Eterm heir_data; /* To send in ETS-TRANSFER (is_immed or (DbTerm*) */
+ UWord heir_data; /* To send in ETS-TRANSFER (is_immed or (DbTerm*) */
SysTimeval heir_started; /* To further identify the heir */
Eterm the_name; /* an atom */
Eterm id; /* atom | integer */
@@ -240,8 +240,9 @@ typedef struct db_table_common {
#define DB_DUPLICATE_BAG (1 << 8)
#define DB_ORDERED_SET (1 << 9)
#define DB_DELETE (1 << 10) /* table is being deleted */
+#define DB_FREQ_READ (1 << 11)
-#define ERTS_ETS_TABLE_TYPES (DB_BAG|DB_SET|DB_DUPLICATE_BAG|DB_ORDERED_SET|DB_FINE_LOCKED)
+#define ERTS_ETS_TABLE_TYPES (DB_BAG|DB_SET|DB_DUPLICATE_BAG|DB_ORDERED_SET|DB_FINE_LOCKED|DB_FREQ_READ)
#define IS_HASH_TABLE(Status) (!!((Status) & \
(DB_BAG | DB_SET | DB_DUPLICATE_BAG)))
@@ -304,9 +305,9 @@ typedef struct match_prog {
Uint eheap_offset;
Uint stack_offset;
#ifdef DMC_DEBUG
- Uint* prog_end; /* End of program */
+ UWord* prog_end; /* End of program */
#endif
- Uint text[1]; /* Beginning of program */
+ UWord text[1]; /* Beginning of program */
} MatchProg;
/*
@@ -366,7 +367,7 @@ Binary *db_match_compile(Eterm *matchexpr, Eterm *guards,
Uint flags,
DMCErrInfo *err_info);
/* Returns newly allocated MatchProg binary with refc == 0*/
-Eterm db_prog_match(Process *p, Binary *prog, Eterm term, int arity,
+Eterm db_prog_match(Process *p, Binary *prog, Eterm term, Eterm *termp, int arity,
Uint32 *return_flags /* Zeroed on enter */);
/* returns DB_ERROR_NONE if matches, 1 if not matches and some db error on
error. */
diff --git a/erts/emulator/beam/erl_debug.c b/erts/emulator/beam/erl_debug.c
index e5c3c76fdd..d7d6fcf0a2 100644
--- a/erts/emulator/beam/erl_debug.c
+++ b/erts/emulator/beam/erl_debug.c
@@ -207,11 +207,7 @@ pdisplay1(int to, void *to_arg, Process* p, Eterm obj)
case FLOAT_DEF: {
FloatDef ff;
GET_DOUBLE(obj, ff);
-#ifdef _OSE_
- erts_print(to, to_arg, "%e", ff.fd);
-#else
erts_print(to, to_arg, "%.20e", ff.fd);
-#endif
}
break;
case BINARY_DEF:
@@ -235,9 +231,9 @@ pps(Process* p, Eterm* stop)
}
while(sp >= stop) {
- erts_print(to, to_arg, "%0*lx: ", PTR_SIZE, (Eterm) sp);
+ erts_print(to, to_arg, "%0*lx: ", PTR_SIZE, (UWord) sp);
if (is_catch(*sp)) {
- erts_print(to, to_arg, "catch %d", (Uint)catch_pc(*sp));
+ erts_print(to, to_arg, "catch %ld", (UWord)catch_pc(*sp));
} else {
paranoid_display(to, to_arg, p, *sp);
}
@@ -265,7 +261,7 @@ static int verify_eterm(Process *p,Eterm element)
return 1;
for (mbuf = p->mbuf; mbuf; mbuf = mbuf->next) {
- if (WITHIN(ptr, &mbuf->mem[0], &mbuf->mem[0] + mbuf->size)) {
+ if (WITHIN(ptr, &mbuf->mem[0], &mbuf->mem[0] + mbuf->used_size)) {
return 1;
}
}
@@ -312,7 +308,7 @@ void erts_check_stack(Process *p)
if (IN_HEAP(p, ptr))
continue;
for (mbuf = p->mbuf; mbuf; mbuf = mbuf->next)
- if (WITHIN(ptr, &mbuf->mem[0], &mbuf->mem[0] + mbuf->size)) {
+ if (WITHIN(ptr, &mbuf->mem[0], &mbuf->mem[0] + mbuf->used_size)) {
in_mbuf = 1;
break;
}
@@ -750,7 +746,7 @@ static void print_process_memory(Process *p)
PTR_SIZE, "heap fragments",
dashes, dashes, dashes, dashes);
while (bp) {
- print_untagged_memory(bp->mem,bp->mem + bp->size);
+ print_untagged_memory(bp->mem,bp->mem + bp->used_size);
bp = bp->next;
}
}
@@ -895,5 +891,29 @@ void print_memory_info(Process *p)
#endif
erts_printf("+-----------------%s-%s-%s-%s-+\n",dashes,dashes,dashes,dashes);
}
+#if !HEAP_ON_C_STACK && defined(DEBUG)
+Eterm *erts_debug_allocate_tmp_heap(int size, Process *p)
+{
+ ErtsSchedulerData *sd = ((p == NULL) ? erts_get_scheduler_data() : ERTS_PROC_GET_SCHDATA(p));
+ int offset = sd->num_tmp_heap_used;
+
+ ASSERT(offset+size <= TMP_HEAP_SIZE);
+ return (sd->tmp_heap)+offset;
+}
+void erts_debug_use_tmp_heap(int size, Process *p)
+{
+ ErtsSchedulerData *sd = ((p == NULL) ? erts_get_scheduler_data() : ERTS_PROC_GET_SCHDATA(p));
+
+ sd->num_tmp_heap_used += size;
+ ASSERT(sd->num_tmp_heap_used <= TMP_HEAP_SIZE);
+}
+void erts_debug_unuse_tmp_heap(int size, Process *p)
+{
+ ErtsSchedulerData *sd = ((p == NULL) ? erts_get_scheduler_data() : ERTS_PROC_GET_SCHDATA(p));
+
+ sd->num_tmp_heap_used -= size;
+ ASSERT(sd->num_tmp_heap_used >= 0);
+}
+#endif
#endif
diff --git a/erts/emulator/beam/erl_debug.h b/erts/emulator/beam/erl_debug.h
index 74f4a00b63..bdfbaddbbf 100644
--- a/erts/emulator/beam/erl_debug.h
+++ b/erts/emulator/beam/erl_debug.h
@@ -1,26 +1,27 @@
/*
* %CopyrightBegin%
- *
- * Copyright Ericsson AB 2004-2009. All Rights Reserved.
- *
+ *
+ * Copyright Ericsson AB 2004-2010. All Rights Reserved.
+ *
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
* compliance with the License. You should have received a copy of the
* Erlang Public License along with this software. If not, it can be
* retrieved online at http://www.erlang.org/.
- *
+ *
* Software distributed under the License is distributed on an "AS IS"
* basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
* the License for the specific language governing rights and limitations
* under the License.
- *
+ *
* %CopyrightEnd%
*/
#ifndef _ERL_DEBUG_H_
#define _ERL_DEBUG_H_
-
#ifdef DEBUG
+#include "erl_term.h"
+
#ifdef HIPE
#include "hipe_debug.h"
#endif
@@ -92,6 +93,11 @@ extern void print_tagged_memory(Eterm *start, Eterm *end);
extern void print_untagged_memory(Eterm *start, Eterm *end);
extern void print_memory(Process *p);
extern void print_memory_info(Process *p);
+#if defined(DEBUG) && !HEAP_ON_C_STACK
+extern Eterm *erts_debug_allocate_tmp_heap(int, Process *);
+extern void erts_debug_use_tmp_heap(int, Process *);
+extern void erts_debug_unuse_tmp_heap(int, Process *);
+#endif
#ifdef HYBRID
extern void print_ma_info(void);
diff --git a/erts/emulator/beam/erl_driver.h b/erts/emulator/beam/erl_driver.h
index 489e74d960..9733c0e5b5 100644
--- a/erts/emulator/beam/erl_driver.h
+++ b/erts/emulator/beam/erl_driver.h
@@ -48,6 +48,10 @@
# define SIZEOF_LONG_LONG_SAVED__ SIZEOF_LONG_LONG
# undef SIZEOF_LONG_LONG
#endif
+#ifdef HALFWORD_HEAP_EMULATOR
+# define HALFWORD_HEAP_EMULATOR_SAVED__ HALFWORD_HEAP_EMULATOR
+# undef HALFWORD_HEAP_EMULATOR
+#endif
#include "erl_int_sizes_config.h"
#if defined(SIZEOF_CHAR_SAVED__) && SIZEOF_CHAR_SAVED__ != SIZEOF_CHAR
# error SIZEOF_CHAR mismatch
@@ -65,6 +69,11 @@
# error SIZEOF_LONG_LONG mismatch
#endif
+/* This is OK to override by the NIF/driver implementor */
+#if defined(HALFWORD_HEAP_EMULATOR_SAVED__) && !defined(HALFWORD_HEAP_EMULATOR)
+#define HALFWORD_HEAP_EMULATOR HALFWORD_HEAP_EMULATOR_SAVED__
+#endif
+
#include "erl_drv_nif.h"
#include <stdlib.h>
diff --git a/erts/emulator/beam/erl_drv_thread.c b/erts/emulator/beam/erl_drv_thread.c
index 50d8c25c46..d42820ddf3 100644
--- a/erts/emulator/beam/erl_drv_thread.c
+++ b/erts/emulator/beam/erl_drv_thread.c
@@ -1,19 +1,19 @@
/*
* %CopyrightBegin%
- *
- * Copyright Ericsson AB 2007-2009. All Rights Reserved.
- *
+ *
+ * Copyright Ericsson AB 2007-2010. All Rights Reserved.
+ *
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
* compliance with the License. You should have received a copy of the
* Erlang Public License along with this software. If not, it can be
* retrieved online at http://www.erlang.org/.
- *
+ *
* Software distributed under the License is distributed on an "AS IS"
* basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
* the License for the specific language governing rights and limitations
* under the License.
- *
+ *
* %CopyrightEnd%
*/
@@ -186,10 +186,9 @@ int
erl_drv_mutex_trylock(ErlDrvMutex *dmtx)
{
#ifdef USE_THREADS
- int res = dmtx ? ethr_mutex_trylock(&dmtx->mtx) : EINVAL;
- if (res != 0 && res != EBUSY)
- fatal_error(res, "erl_drv_mutex_trylock()");
- return res;
+ if (!dmtx)
+ fatal_error(EINVAL, "erl_drv_mutex_trylock()");
+ return ethr_mutex_trylock(&dmtx->mtx);
#else
return 0;
#endif
@@ -199,9 +198,9 @@ void
erl_drv_mutex_lock(ErlDrvMutex *dmtx)
{
#ifdef USE_THREADS
- int res = dmtx ? ethr_mutex_lock(&dmtx->mtx) : EINVAL;
- if (res != 0)
- fatal_error(res, "erl_drv_mutex_lock()");
+ if (!dmtx)
+ fatal_error(EINVAL, "erl_drv_mutex_lock()");
+ ethr_mutex_lock(&dmtx->mtx);
#endif
}
@@ -209,9 +208,9 @@ void
erl_drv_mutex_unlock(ErlDrvMutex *dmtx)
{
#ifdef USE_THREADS
- int res = dmtx ? ethr_mutex_unlock(&dmtx->mtx) : EINVAL;
- if (res != 0)
- fatal_error(res, "erl_drv_mutex_unlock()");
+ if (!dmtx)
+ fatal_error(EINVAL, "erl_drv_mutex_unlock()");
+ ethr_mutex_unlock(&dmtx->mtx);
#endif
}
@@ -256,9 +255,9 @@ void
erl_drv_cond_signal(ErlDrvCond *dcnd)
{
#ifdef USE_THREADS
- int res = dcnd ? ethr_cond_signal(&dcnd->cnd) : EINVAL;
- if (res != 0)
- fatal_error(res, "erl_drv_cond_signal()");
+ if (!dcnd)
+ fatal_error(EINVAL, "erl_drv_cond_signal()");
+ ethr_cond_signal(&dcnd->cnd);
#endif
}
@@ -266,9 +265,9 @@ void
erl_drv_cond_broadcast(ErlDrvCond *dcnd)
{
#ifdef USE_THREADS
- int res = dcnd ? ethr_cond_broadcast(&dcnd->cnd) : EINVAL;
- if (res != 0)
- fatal_error(res, "erl_drv_cond_broadcast()");
+ if (!dcnd)
+ fatal_error(EINVAL, "erl_drv_cond_broadcast()");
+ ethr_cond_broadcast(&dcnd->cnd);
#endif
}
@@ -277,18 +276,13 @@ void
erl_drv_cond_wait(ErlDrvCond *dcnd, ErlDrvMutex *dmtx)
{
#ifdef USE_THREADS
- int res;
if (!dcnd || !dmtx) {
- res = EINVAL;
- error:
- fatal_error(res, "erl_drv_cond_wait()");
+ fatal_error(EINVAL, "erl_drv_cond_wait()");
}
while (1) {
- res = ethr_cond_wait(&dcnd->cnd, &dmtx->mtx);
+ int res = ethr_cond_wait(&dcnd->cnd, &dmtx->mtx);
if (res == 0)
break;
- if (res != EINTR)
- goto error;
}
#endif
}
@@ -333,10 +327,9 @@ int
erl_drv_rwlock_tryrlock(ErlDrvRWLock *drwlck)
{
#ifdef USE_THREADS
- int res = drwlck ? ethr_rwmutex_tryrlock(&drwlck->rwmtx) : EINVAL;
- if (res != 0 && res != EBUSY)
- fatal_error(res, "erl_drv_rwlock_tryrlock()");
- return res;
+ if (!drwlck)
+ fatal_error(EINVAL, "erl_drv_rwlock_tryrlock()");
+ return ethr_rwmutex_tryrlock(&drwlck->rwmtx);
#else
return 0;
#endif
@@ -346,9 +339,9 @@ void
erl_drv_rwlock_rlock(ErlDrvRWLock *drwlck)
{
#ifdef USE_THREADS
- int res = drwlck ? ethr_rwmutex_rlock(&drwlck->rwmtx) : EINVAL;
- if (res != 0)
- fatal_error(res, "erl_drv_rwlock_rlock()");
+ if (!drwlck)
+ fatal_error(EINVAL, "erl_drv_rwlock_rlock()");
+ ethr_rwmutex_rlock(&drwlck->rwmtx);
#endif
}
@@ -356,9 +349,9 @@ void
erl_drv_rwlock_runlock(ErlDrvRWLock *drwlck)
{
#ifdef USE_THREADS
- int res = drwlck ? ethr_rwmutex_runlock(&drwlck->rwmtx) : EINVAL;
- if (res != 0)
- fatal_error(res, "erl_drv_rwlock_runlock()");
+ if (!drwlck)
+ fatal_error(EINVAL, "erl_drv_rwlock_runlock()");
+ ethr_rwmutex_runlock(&drwlck->rwmtx);
#endif
}
@@ -366,10 +359,9 @@ int
erl_drv_rwlock_tryrwlock(ErlDrvRWLock *drwlck)
{
#ifdef USE_THREADS
- int res = drwlck ? ethr_rwmutex_tryrwlock(&drwlck->rwmtx) : EINVAL;
- if (res != 0 && res != EBUSY)
- fatal_error(res, "erl_drv_rwlock_tryrwlock()");
- return res;
+ if (!drwlck)
+ fatal_error(EINVAL, "erl_drv_rwlock_tryrwlock()");
+ return ethr_rwmutex_tryrwlock(&drwlck->rwmtx);
#else
return 0;
#endif
@@ -379,9 +371,9 @@ void
erl_drv_rwlock_rwlock(ErlDrvRWLock *drwlck)
{
#ifdef USE_THREADS
- int res = drwlck ? ethr_rwmutex_rwlock(&drwlck->rwmtx) : EINVAL;
- if (res != 0)
- fatal_error(res, "erl_drv_rwlock_rwlock()");
+ if (!drwlck)
+ fatal_error(EINVAL, "erl_drv_rwlock_rwlock()");
+ ethr_rwmutex_rwlock(&drwlck->rwmtx);
#endif
}
@@ -389,9 +381,9 @@ void
erl_drv_rwlock_rwunlock(ErlDrvRWLock *drwlck)
{
#ifdef USE_THREADS
- int res = drwlck ? ethr_rwmutex_rwunlock(&drwlck->rwmtx) : EINVAL;
- if (res != 0)
- fatal_error(res, "erl_drv_rwlock_rwunlock()");
+ if (!drwlck)
+ fatal_error(EINVAL, "erl_drv_rwlock_rwunlock()");
+ ethr_rwmutex_rwunlock(&drwlck->rwmtx);
#endif
}
@@ -603,11 +595,7 @@ erl_drv_thread_create(char *name,
dtid->name = ((char *) dtid) + sizeof(struct ErlDrvTid_);
sys_strcpy(dtid->name, name);
}
-#ifdef ERTS_ENABLE_LOCK_COUNT
- res = erts_lcnt_thr_create(&dtid->tid, erl_drv_thread_wrapper, dtid, use_opts);
-#else
res = ethr_thr_create(&dtid->tid, erl_drv_thread_wrapper, dtid, use_opts);
-#endif
if (res != 0) {
erts_free(ERTS_ALC_T_DRV_TID, dtid);
diff --git a/erts/emulator/beam/erl_fun.c b/erts/emulator/beam/erl_fun.c
index 79e844b315..5dce5ad262 100644
--- a/erts/emulator/beam/erl_fun.c
+++ b/erts/emulator/beam/erl_fun.c
@@ -1,19 +1,19 @@
/*
* %CopyrightBegin%
- *
- * Copyright Ericsson AB 2000-2009. All Rights Reserved.
- *
+ *
+ * Copyright Ericsson AB 2000-2010. All Rights Reserved.
+ *
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
* compliance with the License. You should have received a copy of the
* Erlang Public License along with this software. If not, it can be
* retrieved online at http://www.erlang.org/.
- *
+ *
* Software distributed under the License is distributed on an "AS IS"
* basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
* the License for the specific language governing rights and limitations
* under the License.
- *
+ *
* %CopyrightEnd%
*/
@@ -37,8 +37,6 @@ static erts_smp_rwmtx_t erts_fun_table_lock;
#define erts_fun_read_unlock() erts_smp_rwmtx_runlock(&erts_fun_table_lock)
#define erts_fun_write_lock() erts_smp_rwmtx_rwlock(&erts_fun_table_lock)
#define erts_fun_write_unlock() erts_smp_rwmtx_rwunlock(&erts_fun_table_lock)
-#define erts_fun_init_lock() erts_smp_rwmtx_init(&erts_fun_table_lock, \
- "fun_tab")
static HashValue fun_hash(ErlFunEntry* obj);
static int fun_cmp(ErlFunEntry* obj1, ErlFunEntry* obj2);
@@ -50,15 +48,19 @@ static void fun_free(ErlFunEntry* obj);
* to unloaded_fun[]. The -1 in unloaded_fun[0] will be interpreted
* as an illegal arity when attempting to call a fun.
*/
-static Eterm unloaded_fun_code[3] = {NIL, -1, 0};
-static Eterm* unloaded_fun = unloaded_fun_code + 2;
+static BeamInstr unloaded_fun_code[3] = {NIL, -1, 0};
+static BeamInstr* unloaded_fun = unloaded_fun_code + 2;
void
erts_init_fun_table(void)
{
HashFunctions f;
+ erts_smp_rwmtx_opt_t rwmtx_opt = ERTS_SMP_THR_OPTS_DEFAULT_INITER;
+ rwmtx_opt.type = ERTS_SMP_RWMTX_TYPE_FREQUENT_READ;
+ rwmtx_opt.lived = ERTS_SMP_RWMTX_LONG_LIVED;
+
+ erts_smp_rwmtx_init_opt(&erts_fun_table_lock, &rwmtx_opt, "fun_tab");
- erts_fun_init_lock();
f.hash = (H_FUN) fun_hash;
f.cmp = (HCMP_FUN) fun_cmp;
f.alloc = (HALLOC_FUN) fun_alloc;
@@ -192,22 +194,8 @@ erts_erase_fun_entry(ErlFunEntry* fe)
erts_fun_write_unlock();
}
-#ifndef HYBRID /* FIND ME! */
-void
-erts_cleanup_funs(ErlFunThing* funp)
-{
- while (funp) {
- ErlFunEntry* fe = funp->fe;
- if (erts_refc_dectest(&fe->refc, 0) == 0) {
- erts_erase_fun_entry(fe);
- }
- funp = funp->next;
- }
-}
-#endif
-
void
-erts_cleanup_funs_on_purge(Eterm* start, Eterm* end)
+erts_cleanup_funs_on_purge(BeamInstr* start, BeamInstr* end)
{
int limit;
HashBucket** bucket;
@@ -222,7 +210,7 @@ erts_cleanup_funs_on_purge(Eterm* start, Eterm* end)
while (b) {
ErlFunEntry* fe = (ErlFunEntry *) b;
- Eterm* addr = fe->address;
+ BeamInstr* addr = fe->address;
if (start <= addr && addr < end) {
fe->address = unloaded_fun;
diff --git a/erts/emulator/beam/erl_fun.h b/erts/emulator/beam/erl_fun.h
index fb5e75649b..2f165afa06 100644
--- a/erts/emulator/beam/erl_fun.h
+++ b/erts/emulator/beam/erl_fun.h
@@ -1,19 +1,19 @@
/*
* %CopyrightBegin%
- *
- * Copyright Ericsson AB 2000-2009. All Rights Reserved.
- *
+ *
+ * Copyright Ericsson AB 2000-2010. All Rights Reserved.
+ *
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
* compliance with the License. You should have received a copy of the
* Erlang Public License along with this software. If not, it can be
* retrieved online at http://www.erlang.org/.
- *
+ *
* Software distributed under the License is distributed on an "AS IS"
* basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
* the License for the specific language governing rights and limitations
* under the License.
- *
+ *
* %CopyrightEnd%
*/
@@ -33,10 +33,10 @@ typedef struct erl_fun_entry {
int index; /* New style index. */
int old_uniq; /* Unique number (old_style) */
int old_index; /* Old style index */
- Eterm* address; /* Pointer to code for fun */
+ BeamInstr* address; /* Pointer to code for fun */
#ifdef HIPE
- Eterm* native_address; /* Native entry code for fun. */
+ UWord* native_address; /* Native entry code for fun. */
#endif
Uint arity; /* The arity of the fun. */
@@ -53,12 +53,12 @@ typedef struct erl_fun_entry {
typedef struct erl_fun_thing {
Eterm thing_word; /* Subtag FUN_SUBTAG. */
+ ErlFunEntry* fe; /* Pointer to fun entry. */
#ifndef HYBRID /* FIND ME! */
- struct erl_fun_thing* next; /* Next fun in mso list. */
+ struct erl_off_heap_header* next;
#endif
- ErlFunEntry* fe; /* Pointer to fun entry. */
#ifdef HIPE
- Eterm* native_address; /* Native code for the fun. */
+ UWord* native_address; /* Native code for the fun. */
#endif
Uint arity; /* The arity of the fun. */
Uint num_free; /* Number of free variables (in env). */
@@ -86,7 +86,7 @@ void erts_erase_fun_entry(ErlFunEntry* fe);
#ifndef HYBRID /* FIND ME! */
void erts_cleanup_funs(ErlFunThing* funp);
#endif
-void erts_cleanup_funs_on_purge(Eterm* start, Eterm* end);
+void erts_cleanup_funs_on_purge(BeamInstr* start, BeamInstr* end);
void erts_dump_fun_entries(int, void *);
#endif
diff --git a/erts/emulator/beam/erl_gc.c b/erts/emulator/beam/erl_gc.c
index e9bf37a173..0f4d2a2ef9 100644
--- a/erts/emulator/beam/erl_gc.c
+++ b/erts/emulator/beam/erl_gc.c
@@ -110,9 +110,7 @@ static Uint adjust_after_fullsweep(Process *p, int size_before,
int need, Eterm *objv, int nobj);
static void shrink_new_heap(Process *p, Uint new_sz, Eterm *objv, int nobj);
static void grow_new_heap(Process *p, Uint new_sz, Eterm* objv, int nobj);
-static void sweep_proc_bins(Process *p, int fullsweep);
-static void sweep_proc_funs(Process *p, int fullsweep);
-static void sweep_proc_externals(Process *p, int fullsweep);
+static void sweep_off_heap(Process *p, int fullsweep);
static void offset_heap(Eterm* hp, Uint sz, Sint offs, char* area, Uint area_size);
static void offset_heap_ptr(Eterm* hp, Uint sz, Sint offs, char* area, Uint area_size);
static void offset_rootset(Process *p, Sint offs, char* area, Uint area_size,
@@ -126,7 +124,7 @@ static void disallow_heap_frag_ref_in_old_heap(Process* p);
static void disallow_heap_frag_ref(Process* p, Eterm* n_htop, Eterm* objv, int nobj);
#endif
-#ifdef ARCH_64
+#if defined(ARCH_64) && !HALFWORD_HEAP
# define MAX_HEAP_SIZES 154
#else
# define MAX_HEAP_SIZES 55
@@ -145,6 +143,16 @@ erts_init_gc(void)
{
int i = 0;
+ ASSERT(offsetof(ProcBin,thing_word) == offsetof(struct erl_off_heap_header,thing_word));
+ ASSERT(offsetof(ProcBin,thing_word) == offsetof(ErlFunThing,thing_word));
+ ASSERT(offsetof(ProcBin,thing_word) == offsetof(ExternalThing,header));
+ ASSERT(offsetof(ProcBin,size) == offsetof(struct erl_off_heap_header,size));
+ ASSERT(offsetof(ProcBin,size) == offsetof(ErlSubBin,size));
+ ASSERT(offsetof(ProcBin,size) == offsetof(ErlHeapBin,size));
+ ASSERT(offsetof(ProcBin,next) == offsetof(struct erl_off_heap_header,next));
+ ASSERT(offsetof(ProcBin,next) == offsetof(ErlFunThing,next));
+ ASSERT(offsetof(ProcBin,next) == offsetof(ExternalThing,next));
+
erts_smp_spinlock_init(&info_lck, "gc_info");
garbage_cols = 0;
reclaimed = 0;
@@ -286,25 +294,14 @@ erts_offset_heap_ptr(Eterm* hp, Uint sz, Sint offs,
offset_heap_ptr(hp, sz, offs, (char *) low, ((char *)high)-((char *)low));
}
+
#define ptr_within(ptr, low, high) ((ptr) < (high) && (ptr) >= (low))
void
erts_offset_off_heap(ErlOffHeap *ohp, Sint offs, Eterm* low, Eterm* high)
{
- if (ohp->mso && ptr_within((Eterm *)ohp->mso, low, high)) {
- Eterm** uptr = (Eterm**) (void *) &ohp->mso;
- *uptr += offs;
- }
-
-#ifndef HYBRID /* FIND ME! */
- if (ohp->funs && ptr_within((Eterm *)ohp->funs, low, high)) {
- Eterm** uptr = (Eterm**) (void *) &ohp->funs;
- *uptr += offs;
- }
-#endif
-
- if (ohp->externals && ptr_within((Eterm *)ohp->externals, low, high)) {
- Eterm** uptr = (Eterm**) (void *) &ohp->externals;
+ if (ohp->first && ptr_within((Eterm *)ohp->first, low, high)) {
+ Eterm** uptr = (Eterm**) (void *) &ohp->first;
*uptr += offs;
}
}
@@ -504,14 +501,8 @@ erts_garbage_collect_hibernate(Process* p)
cleanup_rootset(&rootset);
- if (MSO(p).mso) {
- sweep_proc_bins(p, 1);
- }
- if (MSO(p).funs) {
- sweep_proc_funs(p, 1);
- }
- if (MSO(p).externals) {
- sweep_proc_externals(p, 1);
+ if (MSO(p).first) {
+ sweep_off_heap(p, 1);
}
/*
@@ -667,7 +658,7 @@ erts_garbage_collect_literals(Process* p, Eterm* literals, Uint lit_size)
case TAG_PRIMARY_BOXED:
ptr = boxed_val(gval);
val = *ptr;
- if (IS_MOVED(val)) {
+ if (IS_MOVED_BOXED(val)) {
ASSERT(is_boxed(val));
*g_ptr++ = val;
} else if (in_area(ptr, area, area_size)) {
@@ -679,7 +670,7 @@ erts_garbage_collect_literals(Process* p, Eterm* literals, Uint lit_size)
case TAG_PRIMARY_LIST:
ptr = list_val(gval);
val = *ptr;
- if (is_non_value(val)) { /* Moved */
+ if (IS_MOVED_CONS(val)) { /* Moved */
*g_ptr++ = ptr[1];
} else if (in_area(ptr, area, area_size)) {
MOVE_CONS(ptr,val,old_htop,g_ptr++);
@@ -752,7 +743,10 @@ minor_collection(Process* p, int need, Eterm* objv, int nobj, Uint *recl)
* is large enough.
*/
- if (OLD_HEAP(p) && mature <= OLD_HEND(p) - OLD_HTOP(p)) {
+ if (OLD_HEAP(p) &&
+ ((mature <= OLD_HEND(p) - OLD_HTOP(p)) &&
+ ((BIN_VHEAP_MATURE(p) < ( BIN_OLD_VHEAP_SZ(p) - BIN_OLD_VHEAP(p)))) &&
+ ((BIN_OLD_VHEAP_SZ(p) > BIN_OLD_VHEAP(p))) ) ) {
ErlMessage *msgp;
Uint size_after;
Uint need_after;
@@ -913,7 +907,7 @@ do_minor(Process *p, int new_sz, Eterm* objv, int nobj)
case TAG_PRIMARY_BOXED: {
ptr = boxed_val(gval);
val = *ptr;
- if (IS_MOVED(val)) {
+ if (IS_MOVED_BOXED(val)) {
ASSERT(is_boxed(val));
*g_ptr++ = val;
} else if (in_area(ptr, heap, mature_size)) {
@@ -929,7 +923,7 @@ do_minor(Process *p, int new_sz, Eterm* objv, int nobj)
case TAG_PRIMARY_LIST: {
ptr = list_val(gval);
val = *ptr;
- if (is_non_value(val)) { /* Moved */
+ if (IS_MOVED_CONS(val)) { /* Moved */
*g_ptr++ = ptr[1];
} else if (in_area(ptr, heap, mature_size)) {
MOVE_CONS(ptr,val,old_htop,g_ptr++);
@@ -972,7 +966,7 @@ do_minor(Process *p, int new_sz, Eterm* objv, int nobj)
case TAG_PRIMARY_BOXED: {
ptr = boxed_val(gval);
val = *ptr;
- if (IS_MOVED(val)) {
+ if (IS_MOVED_BOXED(val)) {
ASSERT(is_boxed(val));
*n_hp++ = val;
} else if (in_area(ptr, heap, mature_size)) {
@@ -987,7 +981,7 @@ do_minor(Process *p, int new_sz, Eterm* objv, int nobj)
case TAG_PRIMARY_LIST: {
ptr = list_val(gval);
val = *ptr;
- if (is_non_value(val)) {
+ if (IS_MOVED_CONS(val)) {
*n_hp++ = ptr[1];
} else if (in_area(ptr, heap, mature_size)) {
MOVE_CONS(ptr,val,old_htop,n_hp++);
@@ -1008,7 +1002,7 @@ do_minor(Process *p, int new_sz, Eterm* objv, int nobj)
Eterm* origptr = &(mb->orig);
ptr = boxed_val(*origptr);
val = *ptr;
- if (IS_MOVED(val)) {
+ if (IS_MOVED_BOXED(val)) {
*origptr = val;
mb->base = binary_bytes(val);
} else if (in_area(ptr, heap, mature_size)) {
@@ -1041,15 +1035,8 @@ do_minor(Process *p, int new_sz, Eterm* objv, int nobj)
OLD_HTOP(p) = old_htop;
HIGH_WATER(p) = (HEAP_START(p) != HIGH_WATER(p)) ? n_heap : n_htop;
- if (MSO(p).mso) {
- sweep_proc_bins(p, 0);
- }
-
- if (MSO(p).funs) {
- sweep_proc_funs(p, 0);
- }
- if (MSO(p).externals) {
- sweep_proc_externals(p, 0);
+ if (MSO(p).first) {
+ sweep_off_heap(p, 0);
}
#ifdef HARDDEBUG
@@ -1161,7 +1148,7 @@ major_collection(Process* p, int need, Eterm* objv, int nobj, Uint *recl)
case TAG_PRIMARY_BOXED: {
ptr = boxed_val(gval);
val = *ptr;
- if (IS_MOVED(val)) {
+ if (IS_MOVED_BOXED(val)) {
ASSERT(is_boxed(val));
*g_ptr++ = val;
} else if (in_area(ptr, src, src_size) || in_area(ptr, oh, oh_size)) {
@@ -1175,7 +1162,7 @@ major_collection(Process* p, int need, Eterm* objv, int nobj, Uint *recl)
case TAG_PRIMARY_LIST: {
ptr = list_val(gval);
val = *ptr;
- if (is_non_value(val)) {
+ if (IS_MOVED_CONS(val)) {
*g_ptr++ = ptr[1];
} else if (in_area(ptr, src, src_size) || in_area(ptr, oh, oh_size)) {
MOVE_CONS(ptr,val,n_htop,g_ptr++);
@@ -1216,7 +1203,7 @@ major_collection(Process* p, int need, Eterm* objv, int nobj, Uint *recl)
case TAG_PRIMARY_BOXED: {
ptr = boxed_val(gval);
val = *ptr;
- if (IS_MOVED(val)) {
+ if (IS_MOVED_BOXED(val)) {
ASSERT(is_boxed(val));
*n_hp++ = val;
} else if (in_area(ptr, src, src_size) || in_area(ptr, oh, oh_size)) {
@@ -1229,7 +1216,7 @@ major_collection(Process* p, int need, Eterm* objv, int nobj, Uint *recl)
case TAG_PRIMARY_LIST: {
ptr = list_val(gval);
val = *ptr;
- if (is_non_value(val)) {
+ if (IS_MOVED_CONS(val)) {
*n_hp++ = ptr[1];
} else if (in_area(ptr, src, src_size) || in_area(ptr, oh, oh_size)) {
MOVE_CONS(ptr,val,n_htop,n_hp++);
@@ -1249,7 +1236,7 @@ major_collection(Process* p, int need, Eterm* objv, int nobj, Uint *recl)
origptr = &(mb->orig);
ptr = boxed_val(*origptr);
val = *ptr;
- if (IS_MOVED(val)) {
+ if (IS_MOVED_BOXED(val)) {
*origptr = val;
mb->base = binary_bytes(*origptr);
} else if (in_area(ptr, src, src_size) ||
@@ -1271,17 +1258,11 @@ major_collection(Process* p, int need, Eterm* objv, int nobj, Uint *recl)
}
}
- if (MSO(p).mso) {
- sweep_proc_bins(p, 1);
- }
- if (MSO(p).funs) {
- sweep_proc_funs(p, 1);
- }
- if (MSO(p).externals) {
- sweep_proc_externals(p, 1);
+ if (MSO(p).first) {
+ sweep_off_heap(p, 1);
}
- if (OLD_HEAP(p) != NULL) {
+ if (OLD_HEAP(p) != NULL) {
ERTS_HEAP_FREE(ERTS_ALC_T_OLD_HEAP,
OLD_HEAP(p),
(OLD_HEND(p) - OLD_HEAP(p)) * sizeof(Eterm));
@@ -1305,6 +1286,7 @@ major_collection(Process* p, int need, Eterm* objv, int nobj, Uint *recl)
HIGH_WATER(p) = HEAP_TOP(p);
ErtsGcQuickSanityCheck(p);
+
/*
* Copy newly received message onto the end of the new heap.
*/
@@ -1392,17 +1374,12 @@ combined_message_size(Process* p)
static void
remove_message_buffers(Process* p)
{
- ErlHeapFragment* bp = MBUF(p);
-
- MBUF(p) = NULL;
- MBUF_SIZE(p) = 0;
- while (bp != NULL) {
- ErlHeapFragment* next_bp = bp->next;
- free_message_buffer(bp);
- bp = next_bp;
- }
+ if (MBUF(p) != NULL) {
+ free_message_buffer(MBUF(p));
+ MBUF(p) = NULL;
+ }
+ MBUF_SIZE(p) = 0;
}
-
#ifdef HARDDEBUG
/*
@@ -1433,12 +1410,12 @@ disallow_heap_frag_ref(Process* p, Eterm* n_htop, Eterm* objv, int nobj)
case TAG_PRIMARY_BOXED: {
ptr = _unchecked_boxed_val(gval);
val = *ptr;
- if (IS_MOVED(val)) {
+ if (IS_MOVED_BOXED(val)) {
ASSERT(is_boxed(val));
objv++;
} else {
for (qb = mbuf; qb != NULL; qb = qb->next) {
- if (in_area(ptr, qb->mem, qb->size*sizeof(Eterm))) {
+ if (in_area(ptr, qb->mem, qb->alloc_size*sizeof(Eterm))) {
abort();
}
}
@@ -1450,11 +1427,11 @@ disallow_heap_frag_ref(Process* p, Eterm* n_htop, Eterm* objv, int nobj)
case TAG_PRIMARY_LIST: {
ptr = _unchecked_list_val(gval);
val = *ptr;
- if (is_non_value(val)) {
+ if (IS_MOVED_CONS(val)) {
objv++;
} else {
for (qb = mbuf; qb != NULL; qb = qb->next) {
- if (in_area(ptr, qb->mem, qb->size*sizeof(Eterm))) {
+ if (in_area(ptr, qb->mem, qb->alloc_size*sizeof(Eterm))) {
abort();
}
}
@@ -1499,7 +1476,7 @@ disallow_heap_frag_ref_in_heap(Process* p)
ptr = _unchecked_boxed_val(val);
if (!in_area(ptr, heap, heap_size)) {
for (qb = MBUF(p); qb != NULL; qb = qb->next) {
- if (in_area(ptr, qb->mem, qb->size*sizeof(Eterm))) {
+ if (in_area(ptr, qb->mem, qb->alloc_size*sizeof(Eterm))) {
abort();
}
}
@@ -1509,7 +1486,7 @@ disallow_heap_frag_ref_in_heap(Process* p)
ptr = _unchecked_list_val(val);
if (!in_area(ptr, heap, heap_size)) {
for (qb = MBUF(p); qb != NULL; qb = qb->next) {
- if (in_area(ptr, qb->mem, qb->size*sizeof(Eterm))) {
+ if (in_area(ptr, qb->mem, qb->alloc_size*sizeof(Eterm))) {
abort();
}
}
@@ -1551,26 +1528,26 @@ disallow_heap_frag_ref_in_old_heap(Process* p)
val = *hp++;
switch (primary_tag(val)) {
case TAG_PRIMARY_BOXED:
- ptr = (Eterm *) val;
+ ptr = (Eterm *) EXPAND_POINTER(val);
if (!in_area(ptr, old_heap, old_heap_size)) {
if (in_area(ptr, new_heap, new_heap_size)) {
abort();
}
for (qb = MBUF(p); qb != NULL; qb = qb->next) {
- if (in_area(ptr, qb->mem, qb->size*sizeof(Eterm))) {
+ if (in_area(ptr, qb->mem, qb->alloc_size*sizeof(Eterm))) {
abort();
}
}
}
break;
case TAG_PRIMARY_LIST:
- ptr = (Eterm *) val;
+ ptr = (Eterm *) EXPAND_POINTER(val);
if (!in_area(ptr, old_heap, old_heap_size)) {
if (in_area(ptr, new_heap, new_heap_size)) {
abort();
}
for (qb = MBUF(p); qb != NULL; qb = qb->next) {
- if (in_area(ptr, qb->mem, qb->size*sizeof(Eterm))) {
+ if (in_area(ptr, qb->mem, qb->alloc_size*sizeof(Eterm))) {
abort();
}
}
@@ -1610,7 +1587,7 @@ sweep_rootset(Rootset* rootset, Eterm* htop, char* src, Uint src_size)
case TAG_PRIMARY_BOXED: {
ptr = boxed_val(gval);
val = *ptr;
- if (IS_MOVED(val)) {
+ if (IS_MOVED_BOXED(val)) {
ASSERT(is_boxed(val));
*g_ptr++ = val;
} else if (in_area(ptr, src, src_size)) {
@@ -1623,7 +1600,7 @@ sweep_rootset(Rootset* rootset, Eterm* htop, char* src, Uint src_size)
case TAG_PRIMARY_LIST: {
ptr = list_val(gval);
val = *ptr;
- if (is_non_value(val)) { /* Moved */
+ if (IS_MOVED_CONS(val)) {
*g_ptr++ = ptr[1];
} else if (in_area(ptr, src, src_size)) {
MOVE_CONS(ptr,val,htop,g_ptr++);
@@ -1657,7 +1634,7 @@ sweep_one_area(Eterm* n_hp, Eterm* n_htop, char* src, Uint src_size)
case TAG_PRIMARY_BOXED: {
ptr = boxed_val(gval);
val = *ptr;
- if (IS_MOVED(val)) {
+ if (IS_MOVED_BOXED(val)) {
ASSERT(is_boxed(val));
*n_hp++ = val;
} else if (in_area(ptr, src, src_size)) {
@@ -1670,7 +1647,7 @@ sweep_one_area(Eterm* n_hp, Eterm* n_htop, char* src, Uint src_size)
case TAG_PRIMARY_LIST: {
ptr = list_val(gval);
val = *ptr;
- if (is_non_value(val)) {
+ if (IS_MOVED_CONS(val)) {
*n_hp++ = ptr[1];
} else if (in_area(ptr, src, src_size)) {
MOVE_CONS(ptr,val,n_htop,n_hp++);
@@ -1690,7 +1667,7 @@ sweep_one_area(Eterm* n_hp, Eterm* n_htop, char* src, Uint src_size)
origptr = &(mb->orig);
ptr = boxed_val(*origptr);
val = *ptr;
- if (IS_MOVED(val)) {
+ if (IS_MOVED_BOXED(val)) {
*origptr = val;
mb->base = binary_bytes(*origptr);
} else if (in_area(ptr, src, src_size)) {
@@ -1722,7 +1699,7 @@ sweep_one_heap(Eterm* heap_ptr, Eterm* heap_end, Eterm* htop, char* src, Uint sr
case TAG_PRIMARY_BOXED: {
ptr = boxed_val(gval);
val = *ptr;
- if (IS_MOVED(val)) {
+ if (IS_MOVED_BOXED(val)) {
ASSERT(is_boxed(val));
*heap_ptr++ = val;
} else if (in_area(ptr, src, src_size)) {
@@ -1735,7 +1712,7 @@ sweep_one_heap(Eterm* heap_ptr, Eterm* heap_end, Eterm* htop, char* src, Uint sr
case TAG_PRIMARY_LIST: {
ptr = list_val(gval);
val = *ptr;
- if (is_non_value(val)) {
+ if (IS_MOVED_CONS(val)) {
*heap_ptr++ = ptr[1];
} else if (in_area(ptr, src, src_size)) {
MOVE_CONS(ptr,val,htop,heap_ptr++);
@@ -1830,28 +1807,6 @@ collect_heap_frags(Process* p, Eterm* n_hstart, Eterm* n_htop,
return n_htop;
}
-#ifdef DEBUG
-static Eterm follow_moved(Eterm term)
-{
- Eterm* ptr;
- switch (primary_tag(term)) {
- case TAG_PRIMARY_IMMED1:
- break;
- case TAG_PRIMARY_BOXED:
- ptr = boxed_val(term);
- if (IS_MOVED(*ptr)) term = *ptr;
- break;
- case TAG_PRIMARY_LIST:
- ptr = list_val(term);
- if (is_non_value(ptr[0])) term = ptr[1];
- break;
- default:
- abort();
- }
- return term;
-}
-#endif
-
static Uint
setup_rootset(Process *p, Eterm *objv, int nobj, Rootset *rootset)
{
@@ -2030,8 +1985,8 @@ shrink_new_heap(Process *p, Uint new_sz, Eterm *objv, int nobj)
HEAP_SIZE(p) = new_sz;
}
-static Uint
-do_next_vheap_size(Uint vheap, Uint vheap_sz) {
+static Uint64
+do_next_vheap_size(Uint64 vheap, Uint64 vheap_sz) {
/* grow
*
@@ -2048,131 +2003,53 @@ do_next_vheap_size(Uint vheap, Uint vheap_sz) {
* ----------------------
*/
- if (vheap > (Uint) (vheap_sz*3/4)) {
+ if ((Uint64) vheap/3 > (Uint64) (vheap_sz/4)) {
+ Uint64 new_vheap_sz = vheap_sz;
- while(vheap > (Uint) (vheap_sz*3/4)) {
- vheap_sz = vheap_sz*2;
+ while((Uint64) vheap/3 > (Uint64) (vheap_sz/4)) {
+ /* the golden ratio = 1.618 */
+ new_vheap_sz = (Uint64) vheap_sz * 1.618;
+ if (new_vheap_sz < vheap_sz ) {
+ return vheap_sz;
+ }
+ vheap_sz = new_vheap_sz;
}
- return erts_next_heap_size(vheap_sz, 0);
+ return vheap_sz;
}
- if (vheap < (Uint) (vheap_sz/4)) {
- return erts_next_heap_size((Uint) (vheap_sz / 2), 0);
+ if (vheap < (Uint64) (vheap_sz/4)) {
+ return (vheap_sz >> 1);
}
return vheap_sz;
}
-static Uint
-next_vheap_size(Process* p, Uint vheap, Uint vheap_sz) {
- vheap_sz = do_next_vheap_size(vheap, vheap_sz);
- return vheap_sz < p->min_vheap_size ? p->min_vheap_size : vheap_sz;
-}
-
-static void
-sweep_proc_externals(Process *p, int fullsweep)
-{
- ExternalThing** prev;
- ExternalThing* ptr;
- char* oh = 0;
- Uint oh_size = 0;
-
- if (fullsweep == 0) {
- oh = (char *) OLD_HEAP(p);
- oh_size = (char *) OLD_HEND(p) - oh;
- }
-
- prev = &MSO(p).externals;
- ptr = MSO(p).externals;
-
- while (ptr) {
- Eterm* ppt = (Eterm *) ptr;
-
- if (IS_MOVED(*ppt)) { /* Object is alive */
- ExternalThing* ro = external_thing_ptr(*ppt);
-
- *prev = ro; /* Patch to moved pos */
- prev = &ro->next;
- ptr = ro->next;
- } else if (in_area(ppt, oh, oh_size)) {
- /*
- * Object resides on old heap, and we just did a
- * generational collection - keep object in list.
- */
- prev = &ptr->next;
- ptr = ptr->next;
- } else { /* Object has not been moved - deref it */
- erts_deref_node_entry(ptr->node);
- *prev = ptr = ptr->next;
- }
- }
- ASSERT(*prev == NULL);
-}
-
-static void
-sweep_proc_funs(Process *p, int fullsweep)
-{
- ErlFunThing** prev;
- ErlFunThing* ptr;
- char* oh = 0;
- Uint oh_size = 0;
-
- if (fullsweep == 0) {
- oh = (char *) OLD_HEAP(p);
- oh_size = (char *) OLD_HEND(p) - oh;
- }
-
- prev = &MSO(p).funs;
- ptr = MSO(p).funs;
-
- while (ptr) {
- Eterm* ppt = (Eterm *) ptr;
-
- if (IS_MOVED(*ppt)) { /* Object is alive */
- ErlFunThing* ro = (ErlFunThing *) fun_val(*ppt);
-
- *prev = ro; /* Patch to moved pos */
- prev = &ro->next;
- ptr = ro->next;
- } else if (in_area(ppt, oh, oh_size)) {
- /*
- * Object resides on old heap, and we just did a
- * generational collection - keep object in list.
- */
- prev = &ptr->next;
- ptr = ptr->next;
- } else { /* Object has not been moved - deref it */
- ErlFunEntry* fe = ptr->fe;
-
- *prev = ptr = ptr->next;
- if (erts_refc_dectest(&fe->refc, 0) == 0) {
- erts_erase_fun_entry(fe);
- }
- }
- }
- ASSERT(*prev == NULL);
+static Uint64
+next_vheap_size(Process* p, Uint64 vheap, Uint64 vheap_sz) {
+ Uint64 new_vheap_sz = do_next_vheap_size(vheap, vheap_sz);
+ return new_vheap_sz < p->min_vheap_size ? p->min_vheap_size : new_vheap_sz;
}
struct shrink_cand_data {
- ProcBin* new_candidates;
- ProcBin* new_candidates_end;
- ProcBin* old_candidates;
+ struct erl_off_heap_header* new_candidates;
+ struct erl_off_heap_header* new_candidates_end;
+ struct erl_off_heap_header* old_candidates;
Uint no_of_candidates;
Uint no_of_active;
};
static ERTS_INLINE void
link_live_proc_bin(struct shrink_cand_data *shrink,
- ProcBin ***prevppp,
- ProcBin **pbpp,
+ struct erl_off_heap_header*** prevppp,
+ struct erl_off_heap_header** currpp,
int new_heap)
{
- ProcBin *pbp = *pbpp;
-
- *pbpp = pbp->next;
+ ProcBin *pbp = (ProcBin*) *currpp;
+ ASSERT(**prevppp == *currpp);
+ *currpp = pbp->next;
if (pbp->flags & (PB_ACTIVE_WRITER|PB_IS_WRITABLE)) {
ASSERT(((pbp->flags & (PB_ACTIVE_WRITER|PB_IS_WRITABLE))
== (PB_ACTIVE_WRITER|PB_IS_WRITABLE))
@@ -2189,15 +2066,16 @@ link_live_proc_bin(struct shrink_cand_data *shrink,
/* Our allocators are 8 byte aligned, i.e., shrinking with
less than 8 bytes will have no real effect */
if (unused >= 8) { /* A shrink candidate; save in candidate list */
+ **prevppp = pbp->next;
if (new_heap) {
if (!shrink->new_candidates)
- shrink->new_candidates_end = pbp;
+ shrink->new_candidates_end = (struct erl_off_heap_header*)pbp;
pbp->next = shrink->new_candidates;
- shrink->new_candidates = pbp;
+ shrink->new_candidates = (struct erl_off_heap_header*)pbp;
}
else {
pbp->next = shrink->old_candidates;
- shrink->old_candidates = pbp;
+ shrink->old_candidates = (struct erl_off_heap_header*)pbp;
}
shrink->no_of_candidates++;
return;
@@ -2205,83 +2083,117 @@ link_live_proc_bin(struct shrink_cand_data *shrink,
}
}
- /* Not a shrink candidate; keep in original mso list */
- **prevppp = pbp;
+ /* Not a shrink candidate; keep in original mso list */
*prevppp = &pbp->next;
-
}
-static void
-sweep_proc_bins(Process *p, int fullsweep)
+static void
+sweep_off_heap(Process *p, int fullsweep)
{
struct shrink_cand_data shrink = {0};
- ProcBin** prev;
- ProcBin* ptr;
- Binary* bptr;
- char* oh = NULL;
- Uint oh_size = 0;
- Uint bin_vheap = 0;
+ struct erl_off_heap_header* ptr;
+ struct erl_off_heap_header** prev;
+ char* oheap = NULL;
+ Uint oheap_sz = 0;
+ Uint64 bin_vheap = 0;
+#ifdef DEBUG
+ int seen_mature = 0;
+#endif
if (fullsweep == 0) {
- oh = (char *) OLD_HEAP(p);
- oh_size = (char *) OLD_HEND(p) - oh;
+ oheap = (char *) OLD_HEAP(p);
+ oheap_sz = (char *) OLD_HEND(p) - oheap;
}
BIN_OLD_VHEAP(p) = 0;
- prev = &MSO(p).mso;
- ptr = MSO(p).mso;
+ prev = &MSO(p).first;
+ ptr = MSO(p).first;
- /*
- * Note: In R7 we no longer force a fullsweep when we find binaries
- * on the old heap. The reason is that with the introduction of the
- * bit syntax we can expect binaries to be used a lot more. Note that
- * in earlier releases a brand new binary (or any other term) could
- * be put on the old heap during a gen-gc fullsweep, but this is
- * no longer the case in R7.
+ /* Firts part of the list will reside on the (old) new-heap.
+ * Keep if moved, otherwise deref.
*/
while (ptr) {
- Eterm* ppt = (Eterm *) ptr;
-
- if (IS_MOVED(*ppt)) { /* Object is alive */
- bin_vheap += ptr->size / sizeof(Eterm);
- ptr = (ProcBin*) binary_val(*ppt);
- link_live_proc_bin(&shrink,
- &prev,
- &ptr,
- !in_area(ptr, oh, oh_size));
- } else if (in_area(ppt, oh, oh_size)) {
- /*
- * Object resides on old heap, and we just did a
- * generational collection - keep object in list.
- */
- BIN_OLD_VHEAP(p) += ptr->size / sizeof(Eterm); /* for binary gc (words)*/
- link_live_proc_bin(&shrink, &prev, &ptr, 0);
- } else { /* Object has not been moved - deref it */
-
- *prev = ptr->next;
- bptr = ptr->val;
- if (erts_refc_dectest(&bptr->refc, 0) == 0)
- erts_bin_free(bptr);
- ptr = *prev;
- }
+ if (IS_MOVED_BOXED(ptr->thing_word)) {
+ ASSERT(!in_area(ptr, oheap, oheap_sz));
+ *prev = ptr = (struct erl_off_heap_header*) boxed_val(ptr->thing_word);
+ ASSERT(!IS_MOVED_BOXED(ptr->thing_word));
+ if (ptr->thing_word == HEADER_PROC_BIN) {
+ int to_new_heap = !in_area(ptr, oheap, oheap_sz);
+ ASSERT(to_new_heap == !seen_mature || (!to_new_heap && (seen_mature=1)));
+ if (to_new_heap) {
+ bin_vheap += ptr->size / sizeof(Eterm);
+ } else {
+ BIN_OLD_VHEAP(p) += ptr->size / sizeof(Eterm); /* for binary gc (words)*/
+ }
+ link_live_proc_bin(&shrink, &prev, &ptr, to_new_heap);
+ }
+ else {
+ prev = &ptr->next;
+ ptr = ptr->next;
+ }
+ }
+ else if (!in_area(ptr, oheap, oheap_sz)) {
+ /* garbage */
+ switch (thing_subtag(ptr->thing_word)) {
+ case REFC_BINARY_SUBTAG:
+ {
+ Binary* bptr = ((ProcBin*)ptr)->val;
+ if (erts_refc_dectest(&bptr->refc, 0) == 0) {
+ erts_bin_free(bptr);
+ }
+ break;
+ }
+ case FUN_SUBTAG:
+ {
+ ErlFunEntry* fe = ((ErlFunThing*)ptr)->fe;
+ if (erts_refc_dectest(&fe->refc, 0) == 0) {
+ erts_erase_fun_entry(fe);
+ }
+ break;
+ }
+ default:
+ ASSERT(is_external_header(ptr->thing_word));
+ erts_deref_node_entry(((ExternalThing*)ptr)->node);
+ }
+ *prev = ptr = ptr->next;
+ }
+ else break; /* and let old-heap loop continue */
}
- if (BIN_OLD_VHEAP(p) >= BIN_OLD_VHEAP_SZ(p)) {
- FLAGS(p) |= F_NEED_FULLSWEEP;
+ /* The rest of the list resides on old-heap, and we just did a
+ * generational collection - keep objects in list.
+ */
+ while (ptr) {
+ ASSERT(in_area(ptr, oheap, oheap_sz));
+ ASSERT(!IS_MOVED_BOXED(ptr->thing_word));
+ if (ptr->thing_word == HEADER_PROC_BIN) {
+ BIN_OLD_VHEAP(p) += ptr->size / sizeof(Eterm); /* for binary gc (words)*/
+ link_live_proc_bin(&shrink, &prev, &ptr, 0);
+ }
+ else {
+ ASSERT(is_fun_header(ptr->thing_word) ||
+ is_external_header(ptr->thing_word));
+ prev = &ptr->next;
+ ptr = ptr->next;
+ }
}
- BIN_VHEAP_SZ(p) = next_vheap_size(p, bin_vheap, BIN_VHEAP_SZ(p));
- BIN_OLD_VHEAP_SZ(p) = next_vheap_size(p, BIN_OLD_VHEAP(p), BIN_OLD_VHEAP_SZ(p));
- MSO(p).overhead = bin_vheap;
+ if (fullsweep) {
+ BIN_OLD_VHEAP_SZ(p) = next_vheap_size(p, BIN_OLD_VHEAP(p) + MSO(p).overhead, BIN_OLD_VHEAP_SZ(p));
+ }
+ BIN_VHEAP_SZ(p) = next_vheap_size(p, bin_vheap, BIN_VHEAP_SZ(p));
+ MSO(p).overhead = bin_vheap;
+ BIN_VHEAP_MATURE(p) = bin_vheap;
/*
* If we got any shrink candidates, check them out.
*/
if (shrink.no_of_candidates) {
- ProcBin *candlist[] = {shrink.new_candidates, shrink.old_candidates};
+ ProcBin *candlist[] = { (ProcBin*)shrink.new_candidates,
+ (ProcBin*)shrink.old_candidates };
Uint leave_unused = 0;
int i;
@@ -2293,21 +2205,21 @@ sweep_proc_bins(Process *p, int fullsweep)
}
for (i = 0; i < sizeof(candlist)/sizeof(candlist[0]); i++) {
-
- for (ptr = candlist[i]; ptr; ptr = ptr->next) {
- Uint new_size = ptr->size;
+ ProcBin* pb;
+ for (pb = candlist[i]; pb; pb = (ProcBin*)pb->next) {
+ Uint new_size = pb->size;
if (leave_unused) {
new_size += (new_size * 100) / leave_unused;
/* Our allocators are 8 byte aligned, i.e., shrinking with
less than 8 bytes will have no real effect */
- if (new_size + 8 >= ptr->val->orig_size)
+ if (new_size + 8 >= pb->val->orig_size)
continue;
}
- ptr->val = erts_bin_realloc(ptr->val, new_size);
- ptr->val->orig_size = new_size;
- ptr->bytes = (byte *) ptr->val->orig_bytes;
+ pb->val = erts_bin_realloc(pb->val, new_size);
+ pb->val->orig_size = new_size;
+ pb->bytes = (byte *) pb->val->orig_bytes;
}
}
@@ -2316,21 +2228,20 @@ sweep_proc_bins(Process *p, int fullsweep)
* We now potentially have the mso list divided into three lists:
* - shrink candidates on new heap (inactive writable with unused data)
* - shrink candidates on old heap (inactive writable with unused data)
- * - other binaries (read only + active writable ...)
+ * - other binaries (read only + active writable ...) + funs and externals
*
* Put them back together: new candidates -> other -> old candidates
* This order will ensure that the list only refers from new
* generation to old and never from old to new *which is important*.
*/
if (shrink.new_candidates) {
- if (prev == &MSO(p).mso) /* empty other binaries list */
+ if (prev == &MSO(p).first) /* empty other binaries list */
prev = &shrink.new_candidates_end->next;
else
- shrink.new_candidates_end->next = MSO(p).mso;
- MSO(p).mso = shrink.new_candidates;
+ shrink.new_candidates_end->next = MSO(p).first;
+ MSO(p).first = shrink.new_candidates;
}
}
-
*prev = shrink.old_candidates;
}
@@ -2361,15 +2272,17 @@ offset_heap(Eterm* hp, Uint sz, Sint offs, char* area, Uint area_size)
tari = thing_arityval(val);
switch (thing_subtag(val)) {
case REFC_BINARY_SUBTAG:
+ case FUN_SUBTAG:
+ case EXTERNAL_PID_SUBTAG:
+ case EXTERNAL_PORT_SUBTAG:
+ case EXTERNAL_REF_SUBTAG:
{
- ProcBin* pb = (ProcBin*) hp;
- Eterm** uptr = (Eterm **) (void *) &pb->next;
+ struct erl_off_heap_header* oh = (struct erl_off_heap_header*) hp;
- if (*uptr && in_area((Eterm *)pb->next, area, area_size)) {
+ if (in_area(oh->next, area, area_size)) {
+ Eterm** uptr = (Eterm **) (void *) &oh->next;
*uptr += offs; /* Patch the mso chain */
}
- sz -= tari;
- hp += tari + 1;
}
break;
case BIN_MATCHSTATE_SUBTAG:
@@ -2380,40 +2293,11 @@ offset_heap(Eterm* hp, Uint sz, Sint offs, char* area, Uint area_size)
mb->orig = offset_ptr(mb->orig, offs);
mb->base = binary_bytes(mb->orig);
}
- sz -= tari;
- hp += tari + 1;
}
break;
- case FUN_SUBTAG:
- {
- ErlFunThing* funp = (ErlFunThing *) hp;
- Eterm** uptr = (Eterm **) (void *) &funp->next;
-
- if (*uptr && in_area((Eterm *)funp->next, area, area_size)) {
- *uptr += offs;
- }
- sz -= tari;
- hp += tari + 1;
- }
- break;
- case EXTERNAL_PID_SUBTAG:
- case EXTERNAL_PORT_SUBTAG:
- case EXTERNAL_REF_SUBTAG:
- {
- ExternalThing* etp = (ExternalThing *) hp;
- Eterm** uptr = (Eterm **) (void *) &etp->next;
-
- if (*uptr && in_area((Eterm *)etp->next, area, area_size)) {
- *uptr += offs;
- }
- sz -= tari;
- hp += tari + 1;
- }
- break;
- default:
- sz -= tari;
- hp += tari + 1;
}
+ sz -= tari;
+ hp += tari + 1;
break;
}
default:
@@ -2450,18 +2334,8 @@ offset_heap_ptr(Eterm* hp, Uint sz, Sint offs, char* area, Uint area_size)
static void
offset_off_heap(Process* p, Sint offs, char* area, Uint area_size)
{
- if (MSO(p).mso && in_area((Eterm *)MSO(p).mso, area, area_size)) {
- Eterm** uptr = (Eterm**) (void *) &MSO(p).mso;
- *uptr += offs;
- }
-
- if (MSO(p).funs && in_area((Eterm *)MSO(p).funs, area, area_size)) {
- Eterm** uptr = (Eterm**) (void *) &MSO(p).funs;
- *uptr += offs;
- }
-
- if (MSO(p).externals && in_area((Eterm *)MSO(p).externals, area, area_size)) {
- Eterm** uptr = (Eterm**) (void *) &MSO(p).externals;
+ if (MSO(p).first && in_area((Eterm *)MSO(p).first, area, area_size)) {
+ Eterm** uptr = (Eterm**) (void *) &MSO(p).first;
*uptr += offs;
}
}
@@ -2542,7 +2416,7 @@ within2(Eterm *ptr, Process *p, Eterm *real_htop)
return 1;
}
while (bp != NULL) {
- if (bp->mem <= ptr && ptr < bp->mem + bp->size) {
+ if (bp->mem <= ptr && ptr < bp->mem + bp->used_size) {
return 1;
}
bp = bp->next;
@@ -2556,7 +2430,7 @@ within2(Eterm *ptr, Process *p, Eterm *real_htop)
hfp = erts_dist_ext_trailer(mp->data.dist_ext);
else
hfp = NULL;
- if (hfp && hfp->mem <= ptr && ptr < hfp->mem + hfp->size)
+ if (hfp && hfp->mem <= ptr && ptr < hfp->mem + hfp->used_size)
return 1;
}
mp = mp->next;
@@ -2582,8 +2456,8 @@ do { \
__FILE__, __LINE__, #EXP); \
} while (0)
-#ifdef ERTS_OFFHEAP_DEBUG_CHK_CIRCULAR_EXTERNAL_LIST
-# define ERTS_EXTERNAL_VISITED_BIT ((Eterm) 1 << 31)
+#ifdef ERTS_OFFHEAP_DEBUG_CHK_CIRCULAR_LIST
+# define ERTS_OFFHEAP_VISITED_BIT ((Eterm) 1 << 31)
#endif
@@ -2593,62 +2467,45 @@ erts_check_off_heap2(Process *p, Eterm *htop)
Eterm *oheap = (Eterm *) OLD_HEAP(p);
Eterm *ohtop = (Eterm *) OLD_HTOP(p);
int old;
- ProcBin *pb;
- ErlFunThing *eft;
- ExternalThing *et;
+ union erl_off_heap_ptr u;
old = 0;
- for (pb = MSO(p).mso; pb; pb = pb->next) {
- Eterm *ptr = (Eterm *) pb;
- long refc = erts_refc_read(&pb->val->refc, 1);
+ for (u.hdr = MSO(p).first; u.hdr; u.hdr = u.hdr->next) {
+ long refc;
+ switch (thing_subtag(u.hdr->thing_word)) {
+ case REFC_BINARY_SUBTAG:
+ refc = erts_refc_read(&u.pb->val->refc, 1);
+ break;
+ case FUN_SUBTAG:
+ refc = erts_refc_read(&u.fun->fe->refc, 1);
+ break;
+ case EXTERNAL_PID_SUBTAG:
+ case EXTERNAL_PORT_SUBTAG:
+ case EXTERNAL_REF_SUBTAG:
+ refc = erts_refc_read(&u.ext->node->refc, 1);
+ break;
+ default:
+ ASSERT(!!"erts_check_off_heap2: Invalid thing_word");
+ }
ERTS_CHK_OFFHEAP_ASSERT(refc >= 1);
+#ifdef ERTS_OFFHEAP_DEBUG_CHK_CIRCULAR_LIST
+ ERTS_CHK_OFFHEAP_ASSERT(!(u.hdr->thing_word & ERTS_EXTERNAL_VISITED_BIT));
+ u.hdr->thing_word |= ERTS_OFFHEAP_VISITED_BIT;
+#endif
if (old) {
- ERTS_CHK_OFFHEAP_ASSERT(oheap <= ptr && ptr < ohtop);
+ ERTS_CHK_OFFHEAP_ASSERT(oheap <= u.ep && u.ep < ohtop);
}
- else if (oheap <= ptr && ptr < ohtop)
+ else if (oheap <= u.ep && u.ep < ohtop)
old = 1;
else {
- ERTS_CHK_OFFHEAP_ASSERT(within2(ptr, p, htop));
+ ERTS_CHK_OFFHEAP_ASSERT(within2(u.ep, p, htop));
}
}
- old = 0;
- for (eft = MSO(p).funs; eft; eft = eft->next) {
- Eterm *ptr = (Eterm *) eft;
- long refc = erts_refc_read(&eft->fe->refc, 1);
- ERTS_CHK_OFFHEAP_ASSERT(refc >= 1);
- if (old)
- ERTS_CHK_OFFHEAP_ASSERT(oheap <= ptr && ptr < ohtop);
- else if (oheap <= ptr && ptr < ohtop)
- old = 1;
- else
- ERTS_CHK_OFFHEAP_ASSERT(within2(ptr, p, htop));
- }
-
- old = 0;
- for (et = MSO(p).externals; et; et = et->next) {
- Eterm *ptr = (Eterm *) et;
- long refc = erts_refc_read(&et->node->refc, 1);
- ERTS_CHK_OFFHEAP_ASSERT(refc >= 1);
-#ifdef ERTS_OFFHEAP_DEBUG_CHK_CIRCULAR_EXTERNAL_LIST
- ERTS_CHK_OFFHEAP_ASSERT(!(et->header & ERTS_EXTERNAL_VISITED_BIT));
-#endif
- if (old)
- ERTS_CHK_OFFHEAP_ASSERT(oheap <= ptr && ptr < ohtop);
- else if (oheap <= ptr && ptr < ohtop)
- old = 1;
- else
- ERTS_CHK_OFFHEAP_ASSERT(within2(ptr, p, htop));
-#ifdef ERTS_OFFHEAP_DEBUG_CHK_CIRCULAR_EXTERNAL_LIST
- et->header |= ERTS_EXTERNAL_VISITED_BIT;
-#endif
- }
-
#ifdef ERTS_OFFHEAP_DEBUG_CHK_CIRCULAR_EXTERNAL_LIST
- for (et = MSO(p).externals; et; et = et->next)
- et->header &= ~ERTS_EXTERNAL_VISITED_BIT;
+ for (u.hdr = MSO(p).first; u.hdr; u.hdr = u.hdr->next)
+ u.hdr->thing_word &= ~ERTS_OFFHEAP_VISITED_BIT;
#endif
-
}
void
diff --git a/erts/emulator/beam/erl_gc.h b/erts/emulator/beam/erl_gc.h
index af55b6363f..807ef8ae8d 100644
--- a/erts/emulator/beam/erl_gc.h
+++ b/erts/emulator/beam/erl_gc.h
@@ -1,19 +1,19 @@
/*
* %CopyrightBegin%
- *
- * Copyright Ericsson AB 2007-2009. All Rights Reserved.
- *
+ *
+ * Copyright Ericsson AB 2007-2010. All Rights Reserved.
+ *
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
* compliance with the License. You should have received a copy of the
* Erlang Public License along with this software. If not, it can be
* retrieved online at http://www.erlang.org/.
- *
+ *
* Software distributed under the License is distributed on an "AS IS"
* basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
* the License for the specific language governing rights and limitations
* under the License.
- *
+ *
* %CopyrightEnd%
*/
@@ -22,11 +22,12 @@
/* GC declarations shared by beam/erl_gc.c and hipe/hipe_gc.c */
-#ifdef DEBUG
+#if defined(DEBUG) && !ERTS_GLB_INLINE_INCL_FUNC_DEF
# define HARDDEBUG 1
#endif
-#define IS_MOVED(x) (!is_header((x)))
+#define IS_MOVED_BOXED(x) (!is_header((x)))
+#define IS_MOVED_CONS(x) (is_non_value((x)))
#define MOVE_CONS(PTR,CAR,HTOP,ORIG) \
do { \
@@ -69,4 +70,28 @@ extern Uint erts_test_long_gc_sleep;
int within(Eterm *ptr, Process *p);
#endif
+ERTS_GLB_INLINE Eterm follow_moved(Eterm term);
+
+#if ERTS_GLB_INLINE_INCL_FUNC_DEF
+ERTS_GLB_INLINE Eterm follow_moved(Eterm term)
+{
+ Eterm* ptr;
+ switch (primary_tag(term)) {
+ case TAG_PRIMARY_IMMED1:
+ break;
+ case TAG_PRIMARY_BOXED:
+ ptr = boxed_val(term);
+ if (IS_MOVED_BOXED(*ptr)) term = *ptr;
+ break;
+ case TAG_PRIMARY_LIST:
+ ptr = list_val(term);
+ if (IS_MOVED_CONS(ptr[0])) term = ptr[1];
+ break;
+ default:
+ ASSERT(!"strange tag in follow_moved");
+ }
+ return term;
+}
+#endif
+
#endif /* __ERL_GC_H__ */
diff --git a/erts/emulator/beam/erl_goodfit_alloc.c b/erts/emulator/beam/erl_goodfit_alloc.c
index ea2ba4d55c..76b206d76f 100644
--- a/erts/emulator/beam/erl_goodfit_alloc.c
+++ b/erts/emulator/beam/erl_goodfit_alloc.c
@@ -1,19 +1,19 @@
/*
* %CopyrightBegin%
- *
- * Copyright Ericsson AB 2003-2009. All Rights Reserved.
- *
+ *
+ * Copyright Ericsson AB 2003-2010. All Rights Reserved.
+ *
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
* compliance with the License. You should have received a copy of the
* Erlang Public License along with this software. If not, it can be
* retrieved online at http://www.erlang.org/.
- *
+ *
* Software distributed under the License is distributed on an "AS IS"
* basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
* the License for the specific language governing rights and limitations
* under the License.
- *
+ *
* %CopyrightEnd%
*/
@@ -49,30 +49,30 @@
#define MIN_MBC_FIRST_FREE_SZ (4*1024)
#define MAX_SUB_MASK_IX \
- ((((Uint)1) << (NO_OF_BKT_IX_BITS - SUB_MASK_IX_SHIFT)) - 1)
-#define MAX_SUB_BKT_IX ((((Uint)1) << SUB_MASK_IX_SHIFT) - 1)
+ ((((UWord)1) << (NO_OF_BKT_IX_BITS - SUB_MASK_IX_SHIFT)) - 1)
+#define MAX_SUB_BKT_IX ((((UWord)1) << SUB_MASK_IX_SHIFT) - 1)
#define MAX_BKT_IX (NO_OF_BKTS - 1)
-#define MIN_BLK_SZ UNIT_CEILING(sizeof(GFFreeBlock_t) + sizeof(Uint))
+#define MIN_BLK_SZ UNIT_CEILING(sizeof(GFFreeBlock_t) + sizeof(UWord))
-#define IX2SBIX(IX) ((IX) & (~(~((Uint)0) << SUB_MASK_IX_SHIFT)))
+#define IX2SBIX(IX) ((IX) & (~(~((UWord)0) << SUB_MASK_IX_SHIFT)))
#define IX2SMIX(IX) ((IX) >> SUB_MASK_IX_SHIFT)
#define MAKE_BKT_IX(SMIX, SBIX) \
- ((((Uint)(SMIX)) << SUB_MASK_IX_SHIFT) | ((Uint)(SBIX)))
+ ((((UWord)(SMIX)) << SUB_MASK_IX_SHIFT) | ((UWord)(SBIX)))
#define SET_BKT_MASK_IX(BM, IX) \
do { \
int sub_mask_ix__ = IX2SMIX((IX)); \
- (BM).main |= (((Uint) 1) << sub_mask_ix__); \
- (BM).sub[sub_mask_ix__] |= (((Uint)1) << IX2SBIX((IX))); \
+ (BM).main |= (((UWord) 1) << sub_mask_ix__); \
+ (BM).sub[sub_mask_ix__] |= (((UWord)1) << IX2SBIX((IX))); \
} while (0)
#define UNSET_BKT_MASK_IX(BM, IX) \
do { \
int sub_mask_ix__ = IX2SMIX((IX)); \
- (BM).sub[sub_mask_ix__] &= ~(((Uint)1) << IX2SBIX((IX))); \
+ (BM).sub[sub_mask_ix__] &= ~(((UWord)1) << IX2SBIX((IX))); \
if (!(BM).sub[sub_mask_ix__]) \
- (BM).main &= ~(((Uint)1) << sub_mask_ix__); \
+ (BM).main &= ~(((UWord)1) << sub_mask_ix__); \
} while (0)
/* Buckets ... */
@@ -263,8 +263,8 @@ find_bucket(BucketMask_t *bmask, int min_index)
while(max != min) { \
mid = ((max - min) >> 1) + min; \
if((BitMask) \
- & (~(~((Uint) 0) << (mid + 1))) \
- & (~((Uint) 0) << min)) \
+ & (~(~((UWord) 0) << (mid + 1))) \
+ & (~((UWord) 0) << min)) \
max = mid; \
else \
min = mid + 1; \
@@ -272,21 +272,21 @@ find_bucket(BucketMask_t *bmask, int min_index)
(MinBit) = min
- ASSERT(bmask->main < (((Uint) 1) << (MAX_SUB_MASK_IX+1)));
+ ASSERT(bmask->main < (((UWord) 1) << (MAX_SUB_MASK_IX+1)));
sub_mask_ix = IX2SMIX(min_index);
- if ((bmask->main & (~((Uint) 0) << sub_mask_ix)) == 0)
+ if ((bmask->main & (~((UWord) 0) << sub_mask_ix)) == 0)
return -1;
/* There exists a non empty bucket; find it... */
- if (bmask->main & (((Uint) 1) << sub_mask_ix)) {
+ if (bmask->main & (((UWord) 1) << sub_mask_ix)) {
sub_bkt_ix = IX2SBIX(min_index);
- if ((bmask->sub[sub_mask_ix] & (~((Uint) 0) << sub_bkt_ix)) == 0) {
+ if ((bmask->sub[sub_mask_ix] & (~((UWord) 0) << sub_bkt_ix)) == 0) {
sub_mask_ix++;
sub_bkt_ix = 0;
- if ((bmask->main & (~((Uint) 0)<< sub_mask_ix)) == 0)
+ if ((bmask->main & (~((UWord) 0)<< sub_mask_ix)) == 0)
return -1;
}
else
@@ -299,17 +299,17 @@ find_bucket(BucketMask_t *bmask, int min_index)
ASSERT(sub_mask_ix <= MAX_SUB_MASK_IX);
/* Has to be a bit > sub_mask_ix */
- ASSERT(bmask->main & (~((Uint) 0) << (sub_mask_ix)));
+ ASSERT(bmask->main & (~((UWord) 0) << (sub_mask_ix)));
GET_MIN_BIT(sub_mask_ix, bmask->main, sub_mask_ix, MAX_SUB_MASK_IX);
find_sub_bkt_ix:
ASSERT(sub_mask_ix <= MAX_SUB_MASK_IX);
ASSERT(sub_bkt_ix <= MAX_SUB_BKT_IX);
- if ((bmask->sub[sub_mask_ix] & (((Uint) 1) << sub_bkt_ix)) == 0) {
+ if ((bmask->sub[sub_mask_ix] & (((UWord) 1) << sub_bkt_ix)) == 0) {
ASSERT(sub_mask_ix + 1 <= MAX_SUB_BKT_IX);
/* Has to be a bit > sub_bkt_ix */
- ASSERT(bmask->sub[sub_mask_ix] & (~((Uint) 0) << sub_bkt_ix));
+ ASSERT(bmask->sub[sub_mask_ix] & (~((UWord) 0) << sub_bkt_ix));
GET_MIN_BIT(sub_bkt_ix,
bmask->sub[sub_mask_ix],
@@ -336,7 +336,7 @@ search_bucket(Allctr_t *allctr, int ix, Uint size)
Uint min_sz;
Uint blk_sz;
Uint cand_sz = 0;
- Uint max_blk_search;
+ UWord max_blk_search;
GFFreeBlock_t *blk;
GFFreeBlock_t *cand = NULL;
int blk_on_lambc;
@@ -615,9 +615,9 @@ check_block(Allctr_t *allctr, Block_t * blk, int free_block)
Uint blk_sz = BLK_SZ(blk);
bi = BKT_IX(gfallctr, blk_sz);
- ASSERT(gfallctr->bucket_mask.main & (((Uint) 1) << IX2SMIX(bi)));
+ ASSERT(gfallctr->bucket_mask.main & (((UWord) 1) << IX2SMIX(bi)));
ASSERT(gfallctr->bucket_mask.sub[IX2SMIX(bi)]
- & (((Uint) 1) << IX2SBIX(bi)));
+ & (((UWord) 1) << IX2SBIX(bi)));
found = 0;
for (fblk = gfallctr->buckets[bi]; fblk; fblk = fblk->next)
@@ -648,9 +648,9 @@ check_mbc(Allctr_t *allctr, Carrier_t *mbc)
int bi;
for(bi = 0; bi < NO_OF_BKTS; bi++) {
- if ((gfallctr->bucket_mask.main & (((Uint) 1) << IX2SMIX(bi)))
+ if ((gfallctr->bucket_mask.main & (((UWord) 1) << IX2SMIX(bi)))
&& (gfallctr->bucket_mask.sub[IX2SMIX(bi)]
- & (((Uint) 1) << IX2SBIX(bi)))) {
+ & (((UWord) 1) << IX2SBIX(bi)))) {
ASSERT(gfallctr->buckets[bi] != NULL);
}
else {
diff --git a/erts/emulator/beam/erl_goodfit_alloc.h b/erts/emulator/beam/erl_goodfit_alloc.h
index 3d1b8c01f6..a554a6f466 100644
--- a/erts/emulator/beam/erl_goodfit_alloc.h
+++ b/erts/emulator/beam/erl_goodfit_alloc.h
@@ -1,19 +1,19 @@
/*
* %CopyrightBegin%
- *
- * Copyright Ericsson AB 2003-2009. All Rights Reserved.
- *
+ *
+ * Copyright Ericsson AB 2003-2010. All Rights Reserved.
+ *
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
* compliance with the License. You should have received a copy of the
* Erlang Public License along with this software. If not, it can be
* retrieved online at http://www.erlang.org/.
- *
+ *
* Software distributed under the License is distributed on an "AS IS"
* basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
* the License for the specific language governing rights and limitations
* under the License.
- *
+ *
* %CopyrightEnd%
*/
@@ -28,7 +28,7 @@
typedef struct GFAllctr_t_ GFAllctr_t;
typedef struct {
- Uint mbsd;
+ UWord mbsd;
} GFAllctrInit_t;
#define ERTS_DEFAULT_GF_ALLCTR_INIT { \
@@ -49,18 +49,18 @@ Allctr_t *erts_gfalc_start(GFAllctr_t *, GFAllctrInit_t *, AllctrInit_t *);
#include "erl_alloc_util.h"
#define NO_OF_BKT_IX_BITS (8)
-#ifdef ARCH_64
+#if defined(ARCH_64)
# define SUB_MASK_IX_SHIFT (6)
#else
# define SUB_MASK_IX_SHIFT (5)
#endif
-#define NO_OF_BKTS (((Uint) 1) << NO_OF_BKT_IX_BITS)
-#define NO_OF_SUB_MASKS (NO_OF_BKTS/(((Uint) 1) << SUB_MASK_IX_SHIFT))
+#define NO_OF_BKTS (((UWord) 1) << NO_OF_BKT_IX_BITS)
+#define NO_OF_SUB_MASKS (NO_OF_BKTS/(((UWord) 1) << SUB_MASK_IX_SHIFT))
typedef struct {
- Uint main;
- Uint sub[NO_OF_SUB_MASKS];
-} BucketMask_t;
+ UWord main;
+ UWord sub[NO_OF_SUB_MASKS];
+} BucketMask_t;
typedef struct GFFreeBlock_t_ GFFreeBlock_t;
struct GFFreeBlock_t_ {
@@ -74,11 +74,11 @@ struct GFAllctr_t_ {
char * last_aux_mbc_start;
char * last_aux_mbc_end;
- Uint bkt_max_size_d;
- Uint bkt_intrvl_d;
+ UWord bkt_max_size_d;
+ UWord bkt_intrvl_d;
BucketMask_t bucket_mask;
GFFreeBlock_t * buckets[NO_OF_BKTS];
- Uint max_blk_search;
+ UWord max_blk_search;
};
diff --git a/erts/emulator/beam/erl_init.c b/erts/emulator/beam/erl_init.c
index e97ab328cd..14bd10b42c 100644
--- a/erts/emulator/beam/erl_init.c
+++ b/erts/emulator/beam/erl_init.c
@@ -78,6 +78,7 @@ static erts_tid_t main_thread;
erts_cpu_info_t *erts_cpuinfo;
+int erts_reader_groups;
int erts_use_sender_punish;
/*
@@ -110,6 +111,7 @@ int erts_compat_rel;
static int use_multi_run_queue;
static int no_schedulers;
static int no_schedulers_online;
+static int max_reader_groups;
#ifdef DEBUG
Uint32 verbose; /* See erl_debug.h for information about verbose */
@@ -281,6 +283,7 @@ erl_init(void)
init_load();
erts_init_bif();
erts_init_bif_chksum();
+ erts_init_bif_binary();
erts_init_bif_re();
erts_init_unicode(); /* after RE to get access to PCRE unicode */
erts_delay_trap = erts_export_put(am_erlang, am_delay_trap, 2);
@@ -292,9 +295,6 @@ erl_init(void)
#ifdef HIPE
hipe_mode_switch_init(); /* Must be after init_load/beam_catches/init */
#endif
-#ifdef _OSE_
- erl_sys_init_final();
-#endif
packet_parser_init();
erl_nif_init();
}
@@ -338,59 +338,6 @@ init_shared_memory(int argc, char **argv)
#endif
}
-
-/*
- * Create the very first process.
- */
-
-void
-erts_first_process(Eterm modname, void* code, unsigned size, int argc, char** argv)
-{
- int i;
- Eterm args;
- Eterm pid;
- Eterm* hp;
- Process parent;
- Process* p;
- ErlSpawnOpts so;
-
- if (erts_find_function(modname, am_start, 1) == NULL) {
- char sbuf[256];
- Atom* ap;
-
- ap = atom_tab(atom_val(modname));
- memcpy(sbuf, ap->name, ap->len);
- sbuf[ap->len] = '\0';
- erl_exit(5, "No function %s:start/1\n", sbuf);
- }
-
- /*
- * We need a dummy parent process to be able to call erl_create_process().
- */
- erts_init_empty_process(&parent);
- hp = HAlloc(&parent, argc*2 + 4);
- args = NIL;
- for (i = argc-1; i >= 0; i--) {
- int len = sys_strlen(argv[i]);
- args = CONS(hp, new_binary(&parent, (byte*)argv[i], len), args);
- hp += 2;
- }
- args = CONS(hp, new_binary(&parent, code, size), args);
- hp += 2;
- args = CONS(hp, args, NIL);
-
- so.flags = 0;
- pid = erl_create_process(&parent, modname, am_start, args, &so);
- p = process_tab[internal_pid_index(pid)];
- p->group_leader = pid;
-
- erts_cleanup_empty_process(&parent);
-}
-
-/*
- * XXX Old way of starting. Hopefully soon obsolete.
- */
-
static void
erl_first_process_otp(char* modname, void* code, unsigned size, int argc, char** argv)
{
@@ -560,6 +507,7 @@ void erts_usage(void)
ERTS_MIN_COMPAT_REL, this_rel_num());
erts_fprintf(stderr, "-r force ets memory block to be moved on realloc\n");
+ erts_fprintf(stderr, "-rg amount set reader groups limit\n");
erts_fprintf(stderr, "-sbt type set scheduler bind type, valid types are:\n");
erts_fprintf(stderr, " u|ns|ts|ps|s|nnts|nnps|tnnps|db\n");
erts_fprintf(stderr, "-sct cput set cpu topology,\n");
@@ -593,6 +541,50 @@ void erts_usage(void)
erl_exit(-1, "");
}
+#ifdef USE_THREADS
+/*
+ * allocators for thread lib
+ */
+
+static void *ethr_std_alloc(size_t size)
+{
+ return erts_alloc_fnf(ERTS_ALC_T_ETHR_STD, (Uint) size);
+}
+static void *ethr_std_realloc(void *ptr, size_t size)
+{
+ return erts_realloc_fnf(ERTS_ALC_T_ETHR_STD, ptr, (Uint) size);
+}
+static void ethr_std_free(void *ptr)
+{
+ erts_free(ERTS_ALC_T_ETHR_STD, ptr);
+}
+static void *ethr_sl_alloc(size_t size)
+{
+ return erts_alloc_fnf(ERTS_ALC_T_ETHR_SL, (Uint) size);
+}
+static void *ethr_sl_realloc(void *ptr, size_t size)
+{
+ return erts_realloc_fnf(ERTS_ALC_T_ETHR_SL, ptr, (Uint) size);
+}
+static void ethr_sl_free(void *ptr)
+{
+ erts_free(ERTS_ALC_T_ETHR_SL, ptr);
+}
+static void *ethr_ll_alloc(size_t size)
+{
+ return erts_alloc_fnf(ERTS_ALC_T_ETHR_LL, (Uint) size);
+}
+static void *ethr_ll_realloc(void *ptr, size_t size)
+{
+ return erts_realloc_fnf(ERTS_ALC_T_ETHR_LL, ptr, (Uint) size);
+}
+static void ethr_ll_free(void *ptr)
+{
+ erts_free(ERTS_ALC_T_ETHR_LL, ptr);
+}
+
+#endif
+
static void
early_init(int *argc, char **argv) /*
* Only put things here which are
@@ -670,9 +662,15 @@ early_init(int *argc, char **argv) /*
? ncpuavail
: (ncpuonln > 0 ? ncpuonln : no_schedulers));
+#ifdef ERTS_SMP
+ erts_max_main_threads = no_schedulers_online;
+#endif
+
schdlrs = no_schedulers;
schdlrs_onln = no_schedulers_online;
+ max_reader_groups = ERTS_MAX_READER_GROUPS;
+
if (argc && argv) {
int i = 1;
while (i < *argc) {
@@ -682,6 +680,24 @@ early_init(int *argc, char **argv) /*
}
if (argv[i][0] == '-') {
switch (argv[i][1]) {
+ case 'r': {
+ char *sub_param = argv[i]+2;
+ if (has_prefix("g", sub_param)) {
+ char *arg = get_arg(sub_param+1, argv[i+1], &i);
+ if (sscanf(arg, "%d", &max_reader_groups) != 1) {
+ erts_fprintf(stderr,
+ "bad reader groups limit: %s\n", arg);
+ erts_usage();
+ }
+ if (max_reader_groups < 0) {
+ erts_fprintf(stderr,
+ "bad reader groups limit: %d\n",
+ max_reader_groups);
+ erts_usage();
+ }
+ }
+ break;
+ }
case 'S' : {
int tot, onln;
char *arg = get_arg(argv[i]+2, argv[i+1], &i);
@@ -754,9 +770,43 @@ early_init(int *argc, char **argv) /*
erts_early_init_scheduling(); /* Require allocators */
erts_init_utils(); /* Require allocators */
+#ifdef USE_THREADS
+ {
+ erts_thr_late_init_data_t elid = ERTS_THR_LATE_INIT_DATA_DEF_INITER;
+ elid.mem.std.alloc = ethr_std_alloc;
+ elid.mem.std.realloc = ethr_std_realloc;
+ elid.mem.std.free = ethr_std_free;
+ elid.mem.sl.alloc = ethr_sl_alloc;
+ elid.mem.sl.realloc = ethr_sl_realloc;
+ elid.mem.sl.free = ethr_sl_free;
+ elid.mem.ll.alloc = ethr_ll_alloc;
+ elid.mem.ll.realloc = ethr_ll_realloc;
+ elid.mem.ll.free = ethr_ll_free;
+
+#ifdef ERTS_SMP
+ elid.main_threads = erts_max_main_threads;
+#else
+ elid.main_threads = 1;
+#endif
+ elid.reader_groups = (elid.main_threads > 1
+ ? elid.main_threads
+ : 0);
+ if (max_reader_groups <= 1)
+ elid.reader_groups = 0;
+ if (elid.reader_groups > max_reader_groups)
+ elid.reader_groups = max_reader_groups;
+ erts_reader_groups = elid.reader_groups;
+
+ erts_thr_late_init(&elid);
+ }
+#endif
#ifdef ERTS_ENABLE_LOCK_CHECK
erts_lc_late_init();
#endif
+
+#ifdef ERTS_ENABLE_LOCK_COUNT
+ erts_lcnt_late_init();
+#endif
#if defined(HIPE)
hipe_signal_init(); /* must be done very early */
@@ -819,7 +869,13 @@ erl_start(int argc, char **argv)
if (erts_sys_getenv("ERL_THREAD_POOL_SIZE", envbuf, &envbufsz) == 0) {
async_max_threads = atoi(envbuf);
}
-
+
+#if (defined(__APPLE__) && defined(__MACH__)) || defined(__DARWIN__)
+ /*
+ * The default stack size on MacOS X is too small for pcre.
+ */
+ erts_sched_thread_suggested_stack_size = 256;
+#endif
#ifdef DEBUG
verbose = DEBUG_DEFAULT;
@@ -1238,9 +1294,17 @@ erl_start(int argc, char **argv)
erts_async_thread_suggested_stack_size));
break;
- case 'r':
- erts_ets_realloc_always_moves = 1;
+ case 'r': {
+ char *sub_param = argv[i]+2;
+ if (has_prefix("g", sub_param)) {
+ get_arg(sub_param+1, argv[i+1], &i);
+ /* already handled */
+ }
+ else {
+ erts_ets_realloc_always_moves = 1;
+ }
break;
+ }
case 'n': /* XXX obsolete */
break;
case 'c':
@@ -1325,6 +1389,7 @@ erl_start(int argc, char **argv)
erts_sys_main_thread(); /* May or may not return! */
#else
+ erts_thr_set_main_status(1, 1);
set_main_stack_size();
process_main();
#endif
@@ -1398,7 +1463,7 @@ system_cleanup(int exit_code)
erts_cleanup_incgc();
#endif
-#if defined(USE_THREADS) && !defined(ERTS_SMP)
+#if defined(USE_THREADS)
exit_async();
#endif
#if HAVE_ERTS_MSEG
@@ -1447,13 +1512,7 @@ __decl_noreturn void erl_exit0(char *file, int line, int n, char *fmt,...)
if (fmt != NULL && *fmt != '\0')
erl_error(fmt, args); /* Print error message. */
va_end(args);
-#ifdef __WIN32__
- if(n > 0) ConWaitForExit();
- else ConNormalExit();
-#endif
-#if !defined(__WIN32__) && !defined(VXWORKS) && !defined(_OSE_)
- sys_tty_reset();
-#endif
+ sys_tty_reset(n);
if (n == ERTS_INTR_EXIT)
exit(0);
@@ -1493,13 +1552,7 @@ __decl_noreturn void erl_exit(int n, char *fmt,...)
if (fmt != NULL && *fmt != '\0')
erl_error(fmt, args); /* Print error message. */
va_end(args);
-#ifdef __WIN32__
- if(n > 0) ConWaitForExit();
- else ConNormalExit();
-#endif
-#if !defined(__WIN32__) && !defined(VXWORKS) && !defined(_OSE_)
- sys_tty_reset();
-#endif
+ sys_tty_reset(n);
if (n == ERTS_INTR_EXIT)
exit(0);
diff --git a/erts/emulator/beam/erl_instrument.c b/erts/emulator/beam/erl_instrument.c
index 3f022f92b8..f3f3c22933 100644
--- a/erts/emulator/beam/erl_instrument.c
+++ b/erts/emulator/beam/erl_instrument.c
@@ -1,19 +1,19 @@
/*
* %CopyrightBegin%
- *
- * Copyright Ericsson AB 2003-2009. All Rights Reserved.
- *
+ *
+ * Copyright Ericsson AB 2003-2010. All Rights Reserved.
+ *
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
* compliance with the License. You should have received a copy of the
* Erlang Public License along with this software. If not, it can be
* retrieved online at http://www.erlang.org/.
- *
+ *
* Software distributed under the License is distributed on an "AS IS"
* basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
* the License for the specific language governing rights and limitations
* under the License.
- *
+ *
* %CopyrightEnd%
*/
@@ -540,18 +540,18 @@ static void dump_memory_map_to_stream(FILE *fp)
if (is_internal_pid(bp->pid))
fprintf(fp,
"{%lu, %lu, %lu, {%lu,%lu,%lu}}.\n",
- (Uint) bp->type_no,
- (Uint) bp->mem,
- (Uint) bp->size,
- (Uint) pid_channel_no(bp->pid),
- (Uint) pid_number(bp->pid),
- (Uint) pid_serial(bp->pid));
+ (UWord) bp->type_no,
+ (UWord) bp->mem,
+ (UWord) bp->size,
+ (UWord) pid_channel_no(bp->pid),
+ (UWord) pid_number(bp->pid),
+ (UWord) pid_serial(bp->pid));
else
fprintf(fp,
"{%lu, %lu, %lu, undefined}.\n",
- (Uint) bp->type_no,
- (Uint) bp->mem,
- (Uint) bp->size);
+ (UWord) bp->type_no,
+ (UWord) bp->mem,
+ (UWord) bp->size);
}
if (lock)
@@ -638,7 +638,7 @@ Eterm erts_instr_get_memory_map(Process *proc)
hsz += 4;
}
- if ((Uint) bp->mem > MAX_SMALL)
+ if ((UWord) bp->mem > MAX_SMALL)
hsz += BIG_UINT_HEAP_SIZE;
if (bp->size > MAX_SMALL)
hsz += BIG_UINT_HEAP_SIZE;
@@ -749,12 +749,12 @@ Eterm erts_instr_get_memory_map(Process *proc)
#endif
type = make_small(bp->type_no);
- if ((Uint) bp->mem > MAX_SMALL) {
- ptr = uint_to_big((Uint) bp->mem, hp);
+ if ((UWord) bp->mem > MAX_SMALL) {
+ ptr = uint_to_big((UWord) bp->mem, hp);
hp += BIG_UINT_HEAP_SIZE;
}
else
- ptr = make_small((Uint) bp->mem);
+ ptr = make_small((UWord) bp->mem);
if (bp->size > MAX_SMALL) {
size = uint_to_big(bp->size, hp);
@@ -962,12 +962,12 @@ dump_stat_to_stream(FILE *fp, int begin_max_period)
fprintf(fp,
"{total,[{total,[{sizes,%lu,%lu,%lu},{blocks,%lu,%lu,%lu}]}]}.\n",
- stats->tot.size,
- stats->tot.max_size,
- stats->tot.max_size_ever,
- stats->tot.blocks,
- stats->tot.max_blocks,
- stats->tot.max_blocks_ever);
+ (UWord) stats->tot.size,
+ (UWord) stats->tot.max_size,
+ (UWord) stats->tot.max_size_ever,
+ (UWord) stats->tot.blocks,
+ (UWord) stats->tot.max_blocks,
+ (UWord) stats->tot.max_blocks_ever);
a_max = 0;
a_min = ~0;
@@ -992,12 +992,12 @@ dump_stat_to_stream(FILE *fp, int begin_max_period)
"%s{%s,[{sizes,%lu,%lu,%lu},{blocks,%lu,%lu,%lu}]}%s",
i == a_min ? "{allocators,\n [" : " ",
ERTS_ALC_A2AD(i),
- stats->a[i].size,
- stats->a[i].max_size,
- stats->a[i].max_size_ever,
- stats->a[i].blocks,
- stats->a[i].max_blocks,
- stats->a[i].max_blocks_ever,
+ (UWord) stats->a[i].size,
+ (UWord) stats->a[i].max_size,
+ (UWord) stats->a[i].max_size_ever,
+ (UWord) stats->a[i].blocks,
+ (UWord) stats->a[i].max_blocks,
+ (UWord) stats->a[i].max_blocks_ever,
i == a_max ? "]}.\n" : ",\n");
}
}
@@ -1009,12 +1009,12 @@ dump_stat_to_stream(FILE *fp, int begin_max_period)
"%s{%s,[{sizes,%lu,%lu,%lu},{blocks,%lu,%lu,%lu}]}%s",
i == ERTS_ALC_C_MIN ? "{classes,\n [" : " ",
ERTS_ALC_C2CD(i),
- stats->c[i].size,
- stats->c[i].max_size,
- stats->c[i].max_size_ever,
- stats->c[i].blocks,
- stats->c[i].max_blocks,
- stats->c[i].max_blocks_ever,
+ (UWord) stats->c[i].size,
+ (UWord) stats->c[i].max_size,
+ (UWord) stats->c[i].max_size_ever,
+ (UWord) stats->c[i].blocks,
+ (UWord) stats->c[i].max_blocks,
+ (UWord) stats->c[i].max_blocks_ever,
i == ERTS_ALC_C_MAX ? "]}.\n" : ",\n" );
}
@@ -1025,12 +1025,12 @@ dump_stat_to_stream(FILE *fp, int begin_max_period)
"%s{%s,[{sizes,%lu,%lu,%lu},{blocks,%lu,%lu,%lu}]}%s",
i == ERTS_ALC_N_MIN ? "{types,\n [" : " ",
ERTS_ALC_N2TD(i),
- stats->n[i].size,
- stats->n[i].max_size,
- stats->n[i].max_size_ever,
- stats->n[i].blocks,
- stats->n[i].max_blocks,
- stats->n[i].max_blocks_ever,
+ (UWord) stats->n[i].size,
+ (UWord) stats->n[i].max_size,
+ (UWord) stats->n[i].max_size_ever,
+ (UWord) stats->n[i].blocks,
+ (UWord) stats->n[i].max_blocks,
+ (UWord) stats->n[i].max_blocks_ever,
i == ERTS_ALC_N_MAX ? "]}.\n" : ",\n" );
}
diff --git a/erts/emulator/beam/erl_lock_check.c b/erts/emulator/beam/erl_lock_check.c
index 074b08ea57..d6138fa4e4 100644
--- a/erts/emulator/beam/erl_lock_check.c
+++ b/erts/emulator/beam/erl_lock_check.c
@@ -96,16 +96,15 @@ static erts_lc_lock_order_t erts_lock_order[] = {
{ "proc_status", "pid" },
{ "proc_tab", NULL },
{ "ports_snapshot", NULL },
- { "db_tab", "address" },
- { "db_tab_fix", "address" },
{ "meta_name_tab", "address" },
{ "meta_main_tab_slot", "address" },
+ { "db_tab", "address" },
+ { "db_tab_fix", "address" },
{ "meta_main_tab_main", NULL },
{ "db_hash_slot", "address" },
{ "node_table", NULL },
{ "dist_table", NULL },
{ "sys_tracers", NULL },
- { "trace_pattern", NULL },
{ "module_tab", NULL },
{ "export_tab", NULL },
{ "fun_tab", NULL },
@@ -120,9 +119,9 @@ static erts_lc_lock_order_t erts_lock_order[] = {
{ "child_status", NULL },
#endif
#ifdef __WIN32__
- { "sys_driver_data_lock", NULL },
+ { "sys_driver_data_lock", NULL },
#endif
- { "drv_ev_state_grow", NULL, },
+ { "drv_ev_state_grow", NULL, },
{ "drv_ev_state", "address" },
{ "safe_hash", "address" },
{ "pollset_rm_list", NULL },
@@ -154,7 +153,11 @@ static erts_lc_lock_order_t erts_lock_order[] = {
{ "instr", NULL },
{ "fix_alloc", "index" },
{ "alcu_allocator", "index" },
+ { "alcu_delayed_free", "index" },
{ "mseg", NULL },
+#ifdef HALFWORD_HEAP
+ { "pmmap", NULL },
+#endif
#ifdef ERTS_SMP
{ "port_task_pre_alloc_lock", "address" },
{ "port_taskq_pre_alloc_lock", "address" },
@@ -175,17 +178,19 @@ static erts_lc_lock_order_t erts_lock_order[] = {
{ "pix_lock", "address" },
{ "run_queues_lists", NULL },
{ "sched_stat", NULL },
+ { "run_queue_sleep_list", "address" },
#endif
{ "alloc_thr_ix_lock", NULL },
#ifdef ERTS_SMP
- { "proc_lck_wtr_alloc", NULL },
+ { "proc_lck_qs_alloc", NULL },
#endif
#ifdef __WIN32__
#ifdef DEBUG
{ "save_ops_lock", NULL },
#endif
#endif
- { "mtrace_buf", NULL }
+ { "mtrace_buf", NULL },
+ { "erts_alloc_hard_debug", NULL }
};
#define ERTS_LOCK_ORDER_SIZE \
@@ -197,6 +202,8 @@ static erts_lc_lock_order_t erts_lock_order[] = {
& ERTS_LC_FLG_LT_ALL \
& ~(ERTS_LC_FLG_LT_SPINLOCK|ERTS_LC_FLG_LT_RWSPINLOCK)))
+static __decl_noreturn void __noreturn lc_abort(void);
+
static char *
lock_type(Uint16 flags)
{
@@ -220,7 +227,7 @@ rw_op_str(Uint16 flags)
return " (r)";
case ERTS_LC_FLG_LO_WRITE:
erts_fprintf(stderr, "\nInternal error\n");
- abort();
+ lc_abort();
default:
break;
}
@@ -269,28 +276,18 @@ static erts_lc_free_block_t *free_blocks;
#define ERTS_LC_FB_CHUNK_SIZE 10
#endif
-#ifdef ETHR_HAVE_NATIVE_LOCKS
static ethr_spinlock_t free_blocks_lock;
-#define ERTS_LC_LOCK ethr_spin_lock
-#define ERTS_LC_UNLOCK ethr_spin_unlock
-#else
-static ethr_mutex free_blocks_lock;
-#define ERTS_LC_LOCK ethr_mutex_lock
-#define ERTS_LC_UNLOCK ethr_mutex_unlock
-#endif
static ERTS_INLINE void
lc_lock(void)
{
- if (ERTS_LC_LOCK(&free_blocks_lock) != 0)
- abort();
+ ethr_spin_lock(&free_blocks_lock);
}
static ERTS_INLINE void
lc_unlock(void)
{
- if (ERTS_LC_UNLOCK(&free_blocks_lock) != 0)
- abort();
+ ethr_spin_unlock(&free_blocks_lock);
}
static ERTS_INLINE void lc_free(void *p)
@@ -311,7 +308,7 @@ static void *lc_core_alloc(void)
{
lc_unlock();
erts_fprintf(stderr, "Lock checker out of memory!\n");
- abort();
+ lc_abort();
}
#else
@@ -325,7 +322,7 @@ static void *lc_core_alloc(void)
* ERTS_LC_FB_CHUNK_SIZE);
if (!fbs) {
erts_fprintf(stderr, "Lock checker failed to allocate memory!\n");
- abort();
+ lc_abort();
}
for (i = 1; i < ERTS_LC_FB_CHUNK_SIZE - 1; i++) {
#ifdef DEBUG
@@ -365,11 +362,11 @@ create_locked_locks(char *thread_name)
{
erts_lc_locked_locks_t *l_lcks = malloc(sizeof(erts_lc_locked_locks_t));
if (!l_lcks)
- abort();
+ lc_abort();
l_lcks->thread_name = strdup(thread_name ? thread_name : "unknown");
if (!l_lcks->thread_name)
- abort();
+ lc_abort();
l_lcks->tid = erts_thr_self();
l_lcks->required.first = NULL;
@@ -511,7 +508,7 @@ uninitialized_lock(void)
{
erts_fprintf(stderr, "Performing operations on uninitialized lock!\n");
print_curr_locks(get_my_locked_locks());
- abort();
+ lc_abort();
}
static void
@@ -521,7 +518,7 @@ lock_twice(char *prefix, erts_lc_locked_locks_t *l_lcks, erts_lc_lock_t *lck,
erts_fprintf(stderr, "%s%s", prefix, rw_op_str(op_flags));
print_lock(" ", lck, " lock which is already locked by thread!\n");
print_curr_locks(l_lcks);
- abort();
+ lc_abort();
}
static void
@@ -531,7 +528,7 @@ unlock_op_mismatch(erts_lc_locked_locks_t *l_lcks, erts_lc_lock_t *lck,
erts_fprintf(stderr, "Unlocking%s ", rw_op_str(op_flags));
print_lock("", lck, " lock which mismatch previous lock operation!\n");
print_curr_locks(l_lcks);
- abort();
+ lc_abort();
}
static void
@@ -539,7 +536,7 @@ unlock_of_not_locked(erts_lc_locked_locks_t *l_lcks, erts_lc_lock_t *lck)
{
print_lock("Unlocking ", lck, " lock which is not locked by thread!\n");
print_curr_locks(l_lcks);
- abort();
+ lc_abort();
}
static void
@@ -548,7 +545,7 @@ lock_order_violation(erts_lc_locked_locks_t *l_lcks, erts_lc_lock_t *lck)
print_lock("Lock order violation occured when locking ", lck, "!\n");
print_curr_locks(l_lcks);
print_lock_order();
- abort();
+ lc_abort();
}
static void
@@ -559,7 +556,7 @@ type_order_violation(char *op, erts_lc_locked_locks_t *l_lcks,
print_lock(op, lck, "!\n");
ASSERT(l_lcks);
print_curr_locks(l_lcks);
- abort();
+ lc_abort();
}
static void
@@ -611,7 +608,7 @@ lock_mismatch(erts_lc_locked_locks_t *l_lcks, int exact,
}
}
print_curr_locks(l_lcks);
- abort();
+ lc_abort();
}
static void
@@ -619,7 +616,7 @@ unlock_of_required_lock(erts_lc_locked_locks_t *l_lcks, erts_lc_lock_t *lck)
{
print_lock("Unlocking required ", lck, " lock!\n");
print_curr_locks(l_lcks);
- abort();
+ lc_abort();
}
static void
@@ -627,7 +624,7 @@ unrequire_of_not_required_lock(erts_lc_locked_locks_t *l_lcks, erts_lc_lock_t *l
{
print_lock("Unrequire on ", lck, " lock not required!\n");
print_curr_locks(l_lcks);
- abort();
+ lc_abort();
}
static void
@@ -635,7 +632,7 @@ require_twice(erts_lc_locked_locks_t *l_lcks, erts_lc_lock_t *lck)
{
print_lock("Require on ", lck, " lock already required!\n");
print_curr_locks(l_lcks);
- abort();
+ lc_abort();
}
static void
@@ -643,7 +640,7 @@ required_not_locked(erts_lc_locked_locks_t *l_lcks, erts_lc_lock_t *lck)
{
print_lock("Required ", lck, " lock not locked!\n");
print_curr_locks(l_lcks);
- abort();
+ lc_abort();
}
@@ -656,13 +653,23 @@ thread_exit_handler(void)
erts_fprintf(stderr,
"Thread exiting while having locked locks!\n");
print_curr_locks(l_lcks);
- abort();
+ lc_abort();
}
destroy_locked_locks(l_lcks);
/* erts_tsd_set(locks_key, NULL); */
}
}
+static __decl_noreturn void
+lc_abort(void)
+{
+#ifdef __WIN32__
+ DebugBreak();
+#else
+ abort();
+#endif
+}
+
void
erts_lc_set_thread_name(char *thread_name)
{
@@ -674,7 +681,7 @@ erts_lc_set_thread_name(char *thread_name)
free((void *) l_lcks->thread_name);
l_lcks->thread_name = strdup(thread_name ? thread_name : "unknown");
if (!l_lcks->thread_name)
- abort();
+ lc_abort();
}
}
@@ -684,7 +691,7 @@ erts_lc_assert_failed(char *file, int line, char *assertion)
erts_fprintf(stderr, "%s:%d: Lock check assertion \"%s\" failed!\n",
file, line, assertion);
print_curr_locks(get_my_locked_locks());
- abort();
+ lc_abort();
return 0;
}
@@ -697,7 +704,7 @@ void erts_lc_fail(char *fmt, ...)
va_end(args);
erts_fprintf(stderr, "\n");
print_curr_locks(get_my_locked_locks());
- abort();
+ lc_abort();
}
@@ -717,7 +724,7 @@ erts_lc_get_lock_order_id(char *name)
"(update erl_lock_check.c)\n",
name);
}
- abort();
+ lc_abort();
return (Sint16) -1;
}
@@ -893,6 +900,25 @@ erts_lc_check_exact(erts_lc_lock_t *have, int have_len)
}
}
+void
+erts_lc_check_no_locked_of_type(Uint16 flags)
+{
+ erts_lc_locked_locks_t *l_lcks = get_my_locked_locks();
+ if (l_lcks) {
+ erts_lc_locked_lock_t *l_lck = l_lcks->locked.first;
+ for (l_lck = l_lcks->locked.first; l_lck; l_lck = l_lck->next) {
+ if (l_lck->flags & flags) {
+ erts_fprintf(stderr,
+ "Locked lock of type %s found which isn't "
+ "allowed here!\n",
+ lock_type(l_lck->flags));
+ print_curr_locks(l_lcks);
+ lc_abort();
+ }
+ }
+ }
+}
+
int
erts_lc_trylock_force_busy_flg(erts_lc_lock_t *lck, Uint16 op_flags)
{
@@ -1231,6 +1257,7 @@ void
erts_lc_init_lock(erts_lc_lock_t *lck, char *name, Uint16 flags)
{
lck->id = erts_lc_get_lock_order_id(name);
+
lck->extra = make_boxed(&lck->extra);
lck->flags = flags;
lck->inited = ERTS_LC_INITITALIZED;
@@ -1279,13 +1306,8 @@ erts_lc_init(void)
free_blocks = NULL;
#endif /* #ifdef ERTS_LC_STATIC_ALLOC */
-#ifdef ETHR_HAVE_NATIVE_LOCKS
if (ethr_spinlock_init(&free_blocks_lock) != 0)
- abort();
-#else
- if (ethr_mutex_init(&free_blocks_lock) != 0)
- abort();
-#endif
+ lc_abort();
erts_tsd_key_create(&locks_key);
}
diff --git a/erts/emulator/beam/erl_lock_check.h b/erts/emulator/beam/erl_lock_check.h
index d5e2ede9ac..0372e6850d 100644
--- a/erts/emulator/beam/erl_lock_check.h
+++ b/erts/emulator/beam/erl_lock_check.h
@@ -77,6 +77,7 @@ void erts_lc_check(erts_lc_lock_t *have, int have_len,
void erts_lc_check_exact(erts_lc_lock_t *have, int have_len);
void erts_lc_have_locks(int *resv, erts_lc_lock_t *lcks, int len);
void erts_lc_have_lock_ids(int *resv, int *ids, int len);
+void erts_lc_check_no_locked_of_type(Uint16 flags);
int erts_lc_trylock_force_busy_flg(erts_lc_lock_t *lck, Uint16 op_flags);
void erts_lc_trylock_flg(int locked, erts_lc_lock_t *lck, Uint16 op_flags);
void erts_lc_lock_flg(erts_lc_lock_t *lck, Uint16 op_flags);
diff --git a/erts/emulator/beam/erl_lock_count.c b/erts/emulator/beam/erl_lock_count.c
index 0d7e1335c1..239773f366 100644
--- a/erts/emulator/beam/erl_lock_count.c
+++ b/erts/emulator/beam/erl_lock_count.c
@@ -166,8 +166,8 @@ static void print_lock_x(erts_lcnt_lock_t *lock, Uint16 flag, char *action, char
int i;
type = lcnt_lock_type(lock->flag);
- ethr_atomic_read(&lock->r_state, &r_state);
- ethr_atomic_read(&lock->w_state, &w_state);
+ r_state = ethr_atomic_read(&lock->r_state);
+ w_state = ethr_atomic_read(&lock->w_state);
if (lock->flag & flag) {
@@ -257,6 +257,10 @@ void erts_lcnt_init() {
erts_lcnt_clear_counters();
}
+void erts_lcnt_late_init() {
+ erts_thr_install_exit_handler(erts_lcnt_thread_exit_handler);
+}
+
/* list operations */
/* BEGIN ASSUMPTION: lcnt_data_lock taken */
@@ -390,10 +394,10 @@ void erts_lcnt_lock_opt(erts_lcnt_lock_t *lock, Uint16 option) {
ASSERT(eltd);
- ethr_atomic_read(&lock->w_state, &w_state);
+ w_state = ethr_atomic_read(&lock->w_state);
if (option & ERTS_LCNT_LO_WRITE) {
- ethr_atomic_read(&lock->r_state, &r_state);
+ r_state = ethr_atomic_read(&lock->r_state);
ethr_atomic_inc( &lock->w_state);
}
if (option & ERTS_LCNT_LO_READ) {
@@ -419,7 +423,7 @@ void erts_lcnt_lock(erts_lcnt_lock_t *lock) {
if (erts_lcnt_rt_options & ERTS_LCNT_OPT_SUSPEND) return;
- ethr_atomic_read(&lock->w_state, &w_state);
+ w_state = ethr_atomic_read(&lock->w_state);
ethr_atomic_inc( &lock->w_state);
eltd = lcnt_get_thread_data();
@@ -474,7 +478,7 @@ void erts_lcnt_lock_post_x(erts_lcnt_lock_t *lock, char *file, unsigned int line
#ifdef DEBUG
if (!(lock->flag & (ERTS_LCNT_LT_RWMUTEX | ERTS_LCNT_LT_RWSPINLOCK))) {
- ethr_atomic_read(&lock->flowstate, &flowstate);
+ flowstate = ethr_atomic_read(&lock->flowstate);
ASSERT(flowstate == 0);
ethr_atomic_inc( &lock->flowstate);
}
@@ -518,12 +522,12 @@ void erts_lcnt_unlock(erts_lcnt_lock_t *lock) {
if (erts_lcnt_rt_options & ERTS_LCNT_OPT_SUSPEND) return;
#ifdef DEBUG
/* flowstate */
- ethr_atomic_read(&lock->flowstate, &flowstate);
+ flowstate = ethr_atomic_read(&lock->flowstate);
ASSERT(flowstate == 1);
ethr_atomic_dec( &lock->flowstate);
/* write state */
- ethr_atomic_read(&lock->w_state, &w_state);
+ w_state = ethr_atomic_read(&lock->w_state);
ASSERT(w_state > 0)
#endif
ethr_atomic_dec(&lock->w_state);
@@ -554,7 +558,7 @@ void erts_lcnt_trylock(erts_lcnt_lock_t *lock, int res) {
if (res != EBUSY) {
#ifdef DEBUG
- ethr_atomic_read(&lock->flowstate, &flowstate);
+ flowstate = ethr_atomic_read(&lock->flowstate);
ASSERT(flowstate == 0);
ethr_atomic_inc( &lock->flowstate);
#endif
@@ -570,36 +574,26 @@ void erts_lcnt_trylock(erts_lcnt_lock_t *lock, int res) {
/* thread operations */
-static void *lcnt_thr_init(erts_lcnt_thread_data_t *eltd) {
- void *(*function)(void *);
- void *argument;
- void *res;
- function = eltd->function;
- argument = eltd->argument;
-
- ethr_tsd_set(lcnt_thr_data_key, eltd);
-
- res = (void *)function(argument);
- free(eltd);
- return (void *)res;
-}
-
-
-
-int erts_lcnt_thr_create(ethr_tid *tid, void * (*function)(void *), void *arg, ethr_thr_opts *opts) {
+void erts_lcnt_thread_setup(void) {
erts_lcnt_thread_data_t *eltd;
-
+
lcnt_lock();
/* lock for thread id global update */
eltd = lcnt_thread_data_alloc();
lcnt_unlock();
-
- eltd->function = function;
- eltd->argument = arg;
-
- return ethr_thr_create(tid, (void *)lcnt_thr_init, (void *)eltd, opts);
+ ASSERT(eltd);
+ ethr_tsd_set(lcnt_thr_data_key, eltd);
}
+void erts_lcnt_thread_exit_handler() {
+ erts_lcnt_thread_data_t *eltd;
+
+ eltd = ethr_tsd_get(lcnt_thr_data_key);
+
+ if (eltd) {
+ free(eltd);
+ }
+}
/* bindings for bifs */
diff --git a/erts/emulator/beam/erl_lock_count.h b/erts/emulator/beam/erl_lock_count.h
index e3044c371f..6306580ae4 100644
--- a/erts/emulator/beam/erl_lock_count.h
+++ b/erts/emulator/beam/erl_lock_count.h
@@ -155,11 +155,6 @@ typedef struct {
erts_lcnt_time_t timer; /* timer */
int timer_set; /* bool */
int lock_in_conflict; /* bool */
-
- /* function pointer */
- void *(*function)(void *);
- void *argument;
-
} erts_lcnt_thread_data_t;
/* globals */
@@ -169,6 +164,11 @@ extern Uint16 erts_lcnt_rt_options;
/* function declerations */
void erts_lcnt_init(void);
+void erts_lcnt_late_init(void);
+
+/* thread operations */
+void erts_lcnt_thread_setup(void);
+void erts_lcnt_thread_exit_handler(void);
/* list operations (local) */
erts_lcnt_lock_list_t *erts_lcnt_list_init(void);
@@ -194,12 +194,7 @@ void erts_lcnt_unlock_opt(erts_lcnt_lock_t *lock, Uint16 option);
void erts_lcnt_trylock_opt(erts_lcnt_lock_t *lock, int res, Uint16 option);
void erts_lcnt_trylock(erts_lcnt_lock_t *lock, int res);
-/* thread operations */
-
-int erts_lcnt_thr_create(ethr_tid *tid, void * (*function)(void *), void *arg, ethr_thr_opts *opts);
-
/* bif interface */
-
Uint16 erts_lcnt_set_rt_opt(Uint16 opt);
Uint16 erts_lcnt_clear_rt_opt(Uint16 opt);
void erts_lcnt_clear_counters(void);
diff --git a/erts/emulator/beam/erl_message.c b/erts/emulator/beam/erl_message.c
index a056fce0c5..82f272d28a 100644
--- a/erts/emulator/beam/erl_message.c
+++ b/erts/emulator/beam/erl_message.c
@@ -30,6 +30,7 @@
#include "erl_message.h"
#include "erl_process.h"
#include "erl_nmgc.h"
+#include "erl_binary.h"
ERTS_SCHED_PREF_QUICK_ALLOC_IMPL(message,
ErlMessage,
@@ -42,6 +43,15 @@ ERTS_SCHED_PREF_QUICK_ALLOC_IMPL(message,
#undef HARD_DEBUG
#endif
+
+
+
+static ERTS_INLINE int in_heapfrag(const Eterm* ptr, const ErlHeapFragment *bp)
+{
+ return ((unsigned)(ptr - bp->mem) < bp->used_size);
+}
+
+
void
init_message(void)
{
@@ -81,9 +91,12 @@ erts_resize_message_buffer(ErlHeapFragment *bp, Uint size,
#endif
ErlHeapFragment* nbp;
+ /* ToDo: Make use of 'used_size' to avoid realloc
+ when shrinking just a few words */
+
#ifdef DEBUG
{
- Uint off_sz = size < bp->size ? size : bp->size;
+ Uint off_sz = size < bp->used_size ? size : bp->used_size;
for (i = 0; i < brefs_size; i++) {
Eterm *ptr;
if (is_immed(brefs[i]))
@@ -95,12 +108,12 @@ erts_resize_message_buffer(ErlHeapFragment *bp, Uint size,
}
#endif
- if (size == bp->size)
+ if (size == bp->used_size)
return bp;
#ifdef HARD_DEBUG
dbg_brefs = erts_alloc(ERTS_ALC_T_UNDEF, sizeof(Eterm *)*brefs_size);
- dbg_bp = new_message_buffer(bp->size);
+ dbg_bp = new_message_buffer(bp->used_size);
dbg_hp = dbg_bp->mem;
dbg_tot_size = 0;
for (i = 0; i < brefs_size; i++) {
@@ -109,15 +122,15 @@ erts_resize_message_buffer(ErlHeapFragment *bp, Uint size,
dbg_brefs[i] = copy_struct(brefs[i], dbg_size, &dbg_hp,
&dbg_bp->off_heap);
}
- ASSERT(dbg_tot_size == (size < bp->size ? size : bp->size));
+ ASSERT(dbg_tot_size == (size < bp->used_size ? size : bp->used_size));
#endif
nbp = (ErlHeapFragment*) ERTS_HEAP_REALLOC(ERTS_ALC_T_HEAP_FRAG,
(void *) bp,
- ERTS_HEAP_FRAG_SIZE(bp->size),
+ ERTS_HEAP_FRAG_SIZE(bp->alloc_size),
ERTS_HEAP_FRAG_SIZE(size));
if (bp != nbp) {
- Uint off_sz = size < nbp->size ? size : nbp->size;
+ Uint off_sz = size < nbp->used_size ? size : nbp->used_size;
Eterm *sp = &bp->mem[0];
Eterm *ep = sp + off_sz;
Sint offs = &nbp->mem[0] - sp;
@@ -135,7 +148,7 @@ erts_resize_message_buffer(ErlHeapFragment *bp, Uint size,
}
#endif
}
- nbp->size = size;
+ nbp->alloc_size = size;
nbp->used_size = size;
#ifdef HARD_DEBUG
@@ -152,26 +165,40 @@ erts_resize_message_buffer(ErlHeapFragment *bp, Uint size,
void
erts_cleanup_offheap(ErlOffHeap *offheap)
{
- if (offheap->mso) {
- erts_cleanup_mso(offheap->mso);
- }
-#ifndef HYBRID /* FIND ME! */
- if (offheap->funs) {
- erts_cleanup_funs(offheap->funs);
- }
-#endif
- if (offheap->externals) {
- erts_cleanup_externals(offheap->externals);
+ union erl_off_heap_ptr u;
+
+ for (u.hdr = offheap->first; u.hdr; u.hdr = u.hdr->next) {
+ switch (thing_subtag(u.hdr->thing_word)) {
+ case REFC_BINARY_SUBTAG:
+ if (erts_refc_dectest(&u.pb->val->refc, 0) == 0) {
+ erts_bin_free(u.pb->val);
+ }
+ break;
+ case FUN_SUBTAG:
+ if (erts_refc_dectest(&u.fun->fe->refc, 0) == 0) {
+ erts_erase_fun_entry(u.fun->fe);
+ }
+ break;
+ default:
+ ASSERT(is_external_header(u.hdr->thing_word));
+ erts_deref_node_entry(u.ext->node);
+ break;
+ }
}
}
void
free_message_buffer(ErlHeapFragment* bp)
{
- erts_cleanup_offheap(&bp->off_heap);
- ERTS_HEAP_FREE(ERTS_ALC_T_HEAP_FRAG,
- (void *) bp,
- ERTS_HEAP_FRAG_SIZE(bp->size));
+ ASSERT(bp != NULL);
+ do {
+ ErlHeapFragment* next_bp = bp->next;
+
+ erts_cleanup_offheap(&bp->off_heap);
+ ERTS_HEAP_FREE(ERTS_ALC_T_HEAP_FRAG, (void *) bp,
+ ERTS_HEAP_FRAG_SIZE(bp->size));
+ bp = next_bp;
+ }while (bp != NULL);
}
static ERTS_INLINE void
@@ -181,43 +208,19 @@ link_mbuf_to_proc(Process *proc, ErlHeapFragment *bp)
/* Link the message buffer */
bp->next = MBUF(proc);
MBUF(proc) = bp;
- MBUF_SIZE(proc) += bp->size;
+ MBUF_SIZE(proc) += bp->used_size;
FLAGS(proc) |= F_FORCE_GC;
- /* Move any binaries into the process */
- if (bp->off_heap.mso != NULL) {
- ProcBin** next_p = &bp->off_heap.mso;
- while (*next_p != NULL) {
- next_p = &((*next_p)->next);
- }
- *next_p = MSO(proc).mso;
- MSO(proc).mso = bp->off_heap.mso;
- bp->off_heap.mso = NULL;
- MSO(proc).overhead += bp->off_heap.overhead;
- }
-
- /* Move any funs into the process */
-#ifndef HYBRID
- if (bp->off_heap.funs != NULL) {
- ErlFunThing** next_p = &bp->off_heap.funs;
+ /* Move any off_heap's into the process */
+ if (bp->off_heap.first != NULL) {
+ struct erl_off_heap_header** next_p = &bp->off_heap.first;
while (*next_p != NULL) {
next_p = &((*next_p)->next);
}
- *next_p = MSO(proc).funs;
- MSO(proc).funs = bp->off_heap.funs;
- bp->off_heap.funs = NULL;
- }
-#endif
-
- /* Move any external things into the process */
- if (bp->off_heap.externals != NULL) {
- ExternalThing** next_p = &bp->off_heap.externals;
- while (*next_p != NULL) {
- next_p = &((*next_p)->next);
- }
- *next_p = MSO(proc).externals;
- MSO(proc).externals = bp->off_heap.externals;
- bp->off_heap.externals = NULL;
+ *next_p = MSO(proc).first;
+ MSO(proc).first = bp->off_heap.first;
+ bp->off_heap.first = NULL;
+ OH_OVERHEAD(&(MSO(proc)), bp->off_heap.overhead);
}
}
}
@@ -242,7 +245,7 @@ erts_msg_distext2heap(Process *pp,
goto decode_error;
if (is_not_nil(*tokenp)) {
ErlHeapFragment *heap_frag = erts_dist_ext_trailer(dist_extp);
- tok_sz = heap_frag->size;
+ tok_sz = heap_frag->used_size;
sz += tok_sz;
}
if (pp)
@@ -283,12 +286,13 @@ erts_msg_distext2heap(Process *pp,
erts_cleanup_offheap(&heap_frag->off_heap);
}
erts_free_dist_ext_copy(dist_extp);
- if (*bpp)
+ if (*bpp) {
free_message_buffer(*bpp);
+ *bpp = NULL;
+ }
else if (hp) {
HRelease(pp, hp_end, hp);
}
- *bpp = NULL;
return THE_NON_VALUE;
}
@@ -436,11 +440,10 @@ erts_queue_message(Process* receiver,
ERL_MESSAGE_TERM(mp) = message;
ERL_MESSAGE_TOKEN(mp) = seq_trace_token;
mp->next = NULL;
+ mp->data.heap_frag = bp;
#ifdef ERTS_SMP
if (*receiver_locks & ERTS_PROC_LOCK_MAIN) {
- mp->data.heap_frag = bp;
-
/*
* We move 'in queue' to 'private queue' and place
* message at the end of 'private queue' in order
@@ -453,11 +456,9 @@ erts_queue_message(Process* receiver,
LINK_MESSAGE_PRIVQ(receiver, mp);
}
else {
- mp->data.heap_frag = bp;
LINK_MESSAGE(receiver, mp);
}
#else
- mp->data.heap_frag = bp;
LINK_MESSAGE(receiver, mp);
#endif
@@ -491,19 +492,7 @@ erts_link_mbuf_to_proc(struct process *proc, ErlHeapFragment *bp)
void
erts_move_msg_mbuf_to_heap(Eterm** hpp, ErlOffHeap* off_heap, ErlMessage *msg)
{
- /* Unions for typecasts avoids warnings about type-punned pointers and aliasing */
- union {
- Uint** upp;
- ProcBin **pbpp;
- ErlFunThing **efpp;
- ExternalThing **etpp;
- } oh_list_pp, oh_el_next_pp;
- union {
- Uint *up;
- ProcBin *pbp;
- ErlFunThing *efp;
- ExternalThing *etp;
- } oh_el_p;
+ struct erl_off_heap_header* oh;
Eterm term, token, *fhp, *hp;
Sint offs;
Uint sz;
@@ -530,40 +519,33 @@ erts_move_msg_mbuf_to_heap(Eterm** hpp, ErlOffHeap* off_heap, ErlMessage *msg)
#ifdef HARD_DEBUG
dbg_term_sz = size_object(term);
dbg_token_sz = size_object(token);
- ASSERT(bp->size == dbg_term_sz + dbg_token_sz);
-
- dbg_bp = new_message_buffer(bp->size);
+ /*ASSERT(dbg_term_sz + dbg_token_sz == erts_msg_used_frag_sz(msg));
+ Copied size may be smaller due to removed SubBins's or garbage.
+ Copied size may be larger due to duplicated shared terms.
+ */
+ dbg_bp = new_message_buffer(dbg_term_sz + dbg_token_sz);
dbg_hp = dbg_bp->mem;
dbg_term = copy_struct(term, dbg_term_sz, &dbg_hp, &dbg_bp->off_heap);
dbg_token = copy_struct(token, dbg_token_sz, &dbg_hp, &dbg_bp->off_heap);
dbg_thp_start = *hpp;
#endif
- ASSERT(bp);
- msg->data.attached = NULL;
-
- off_heap->overhead += bp->off_heap.overhead;
- sz = bp->size;
-
-#ifdef DEBUG
- if (is_not_immed(term)) {
- ASSERT(bp->mem <= ptr_val(term));
- ASSERT(bp->mem + bp->size > ptr_val(term));
+ if (bp->next != NULL) {
+ move_multi_frags(hpp, off_heap, bp, msg->m, 2);
+ goto copy_done;
}
- if (is_not_immed(token)) {
- ASSERT(bp->mem <= ptr_val(token));
- ASSERT(bp->mem + bp->size > ptr_val(token));
- }
-#endif
+ OH_OVERHEAD(off_heap, bp->off_heap.overhead);
+ sz = bp->used_size;
+
+ ASSERT(is_immed(term) || in_heapfrag(ptr_val(term),bp));
+ ASSERT(is_immed(token) || in_heapfrag(ptr_val(token),bp));
fhp = bp->mem;
hp = *hpp;
offs = hp - fhp;
- oh_list_pp.upp = NULL;
- oh_el_next_pp.upp = NULL; /* Shut up compiler warning */
- oh_el_p.up = NULL; /* Shut up compiler warning */
+ oh = NULL;
while (sz--) {
Uint cpy_sz;
Eterm val = *fhp++;
@@ -574,8 +556,7 @@ erts_move_msg_mbuf_to_heap(Eterm** hpp, ErlOffHeap* off_heap, ErlMessage *msg)
break;
case TAG_PRIMARY_LIST:
case TAG_PRIMARY_BOXED:
- ASSERT(bp->mem <= ptr_val(val));
- ASSERT(bp->mem + bp->size > ptr_val(val));
+ ASSERT(in_heapfrag(ptr_val(val), bp));
*hp++ = offset_ptr(val, offs);
break;
case TAG_PRIMARY_HEADER:
@@ -584,31 +565,18 @@ erts_move_msg_mbuf_to_heap(Eterm** hpp, ErlOffHeap* off_heap, ErlMessage *msg)
case ARITYVAL_SUBTAG:
break;
case REFC_BINARY_SUBTAG:
- oh_list_pp.pbpp = &off_heap->mso;
- oh_el_p.up = (hp-1);
- oh_el_next_pp.pbpp = &(oh_el_p.pbp)->next;
- cpy_sz = thing_arityval(val);
- goto cpy_words;
case FUN_SUBTAG:
-#ifndef HYBRID
- oh_list_pp.efpp = &off_heap->funs;
- oh_el_p.up = (hp-1);
- oh_el_next_pp.efpp = &(oh_el_p.efp)->next;
-#endif
- cpy_sz = thing_arityval(val);
- goto cpy_words;
case EXTERNAL_PID_SUBTAG:
case EXTERNAL_PORT_SUBTAG:
case EXTERNAL_REF_SUBTAG:
- oh_list_pp.etpp = &off_heap->externals;
- oh_el_p.up = (hp-1);
- oh_el_next_pp.etpp = &(oh_el_p.etp)->next;
+ oh = (struct erl_off_heap_header*) (hp-1);
cpy_sz = thing_arityval(val);
goto cpy_words;
default:
cpy_sz = header_arity(val);
cpy_words:
+ ASSERT(sz >= cpy_sz);
sz -= cpy_sz;
while (cpy_sz >= 8) {
cpy_sz -= 8;
@@ -631,44 +599,13 @@ erts_move_msg_mbuf_to_heap(Eterm** hpp, ErlOffHeap* off_heap, ErlMessage *msg)
case 1: *hp++ = *fhp++;
default: break;
}
- if (oh_list_pp.upp) {
-#ifdef HARD_DEBUG
- Uint *dbg_old_oh_list_p = *oh_list_pp.upp;
-#endif
+ if (oh) {
/* Add to offheap list */
- *oh_el_next_pp.upp = *oh_list_pp.upp;
- *oh_list_pp.upp = oh_el_p.up;
- ASSERT(*hpp <= oh_el_p.up);
- ASSERT(hp > oh_el_p.up);
-#ifdef HARD_DEBUG
- switch (val & _HEADER_SUBTAG_MASK) {
- case REFC_BINARY_SUBTAG:
- ASSERT(off_heap->mso == *oh_list_pp.pbpp);
- ASSERT(off_heap->mso->next
- == (ProcBin *) dbg_old_oh_list_p);
- break;
-#ifndef HYBRID
- case FUN_SUBTAG:
- ASSERT(off_heap->funs == *oh_list_pp.efpp);
- ASSERT(off_heap->funs->next
- == (ErlFunThing *) dbg_old_oh_list_p);
- break;
-#endif
- case EXTERNAL_PID_SUBTAG:
- case EXTERNAL_PORT_SUBTAG:
- case EXTERNAL_REF_SUBTAG:
- ASSERT(off_heap->externals
- == *oh_list_pp.etpp);
- ASSERT(off_heap->externals->next
- == (ExternalThing *) dbg_old_oh_list_p);
- break;
- default:
- ASSERT(0);
- }
-#endif
- oh_list_pp.upp = NULL;
-
-
+ oh->next = off_heap->first;
+ off_heap->first = oh;
+ ASSERT(*hpp <= (Eterm*)oh);
+ ASSERT(hp > (Eterm*)oh);
+ oh = NULL;
}
break;
}
@@ -676,12 +613,11 @@ erts_move_msg_mbuf_to_heap(Eterm** hpp, ErlOffHeap* off_heap, ErlMessage *msg)
}
}
- ASSERT(bp->size == hp - *hpp);
+ ASSERT(bp->used_size == hp - *hpp);
*hpp = hp;
if (is_not_immed(token)) {
- ASSERT(bp->mem <= ptr_val(token));
- ASSERT(bp->mem + bp->size > ptr_val(token));
+ ASSERT(in_heapfrag(ptr_val(token), bp));
ERL_MESSAGE_TOKEN(msg) = offset_ptr(token, offs);
#ifdef HARD_DEBUG
ASSERT(dbg_thp_start <= ptr_val(ERL_MESSAGE_TOKEN(msg)));
@@ -690,8 +626,7 @@ erts_move_msg_mbuf_to_heap(Eterm** hpp, ErlOffHeap* off_heap, ErlMessage *msg)
}
if (is_not_immed(term)) {
- ASSERT(bp->mem <= ptr_val(term));
- ASSERT(bp->mem + bp->size > ptr_val(term));
+ ASSERT(in_heapfrag(ptr_val(term),bp));
ERL_MESSAGE_TERM(msg) = offset_ptr(term, offs);
#ifdef HARD_DEBUG
ASSERT(dbg_thp_start <= ptr_val(ERL_MESSAGE_TERM(msg)));
@@ -699,10 +634,12 @@ erts_move_msg_mbuf_to_heap(Eterm** hpp, ErlOffHeap* off_heap, ErlMessage *msg)
#endif
}
+copy_done:
#ifdef HARD_DEBUG
{
int i, j;
+ ErlHeapFragment* frag;
{
ProcBin *mso = off_heap->mso;
i = j = 0;
@@ -710,10 +647,12 @@ erts_move_msg_mbuf_to_heap(Eterm** hpp, ErlOffHeap* off_heap, ErlMessage *msg)
mso = mso->next;
i++;
}
- mso = bp->off_heap.mso;
- while (mso) {
- mso = mso->next;
- j++;
+ for (frag=bp; frag; frag=frag->next) {
+ mso = frag->off_heap.mso;
+ while (mso) {
+ mso = mso->next;
+ j++;
+ }
}
ASSERT(i == j);
}
@@ -724,10 +663,12 @@ erts_move_msg_mbuf_to_heap(Eterm** hpp, ErlOffHeap* off_heap, ErlMessage *msg)
fun = fun->next;
i++;
}
- fun = bp->off_heap.funs;
- while (fun) {
- fun = fun->next;
- j++;
+ for (frag=bp; frag; frag=frag->next) {
+ fun = frag->off_heap.funs;
+ while (fun) {
+ fun = fun->next;
+ j++;
+ }
}
ASSERT(i == j);
}
@@ -738,10 +679,12 @@ erts_move_msg_mbuf_to_heap(Eterm** hpp, ErlOffHeap* off_heap, ErlMessage *msg)
external = external->next;
i++;
}
- external = bp->off_heap.externals;
- while (external) {
- external = external->next;
- j++;
+ for (frag=bp; frag; frag=frag->next) {
+ external = frag->off_heap.externals;
+ while (external) {
+ external = external->next;
+ j++;
+ }
}
ASSERT(i == j);
}
@@ -749,12 +692,9 @@ erts_move_msg_mbuf_to_heap(Eterm** hpp, ErlOffHeap* off_heap, ErlMessage *msg)
#endif
- bp->off_heap.mso = NULL;
-#ifndef HYBRID
- bp->off_heap.funs = NULL;
-#endif
- bp->off_heap.externals = NULL;
+ bp->off_heap.first = NULL;
free_message_buffer(bp);
+ msg->data.heap_frag = NULL;
#ifdef HARD_DEBUG
ASSERT(eq(ERL_MESSAGE_TERM(msg), dbg_term));
@@ -764,6 +704,7 @@ erts_move_msg_mbuf_to_heap(Eterm** hpp, ErlOffHeap* off_heap, ErlMessage *msg)
}
+
Uint
erts_msg_attached_data_size_aux(ErlMessage *msg)
{
@@ -789,7 +730,7 @@ erts_msg_attached_data_size_aux(ErlMessage *msg)
if (is_not_nil(msg->m[1])) {
ErlHeapFragment *heap_frag;
heap_frag = erts_dist_ext_trailer(msg->data.dist_ext);
- sz += heap_frag->size;
+ sz += heap_frag->used_size;
}
return sz;
}
@@ -805,7 +746,7 @@ erts_move_msg_attached_data_to_heap(Eterm **hpp, ErlOffHeap *ohp, ErlMessage *ms
ErlHeapFragment *heap_frag;
heap_frag = erts_dist_ext_trailer(msg->data.dist_ext);
ERL_MESSAGE_TOKEN(msg) = copy_struct(ERL_MESSAGE_TOKEN(msg),
- heap_frag->size,
+ heap_frag->used_size,
hpp,
ohp);
erts_cleanup_offheap(&heap_frag->off_heap);
@@ -1062,3 +1003,4 @@ erts_deliver_exit_message(Eterm from, Process *to, ErtsProcLocks *to_locksp,
erts_queue_message(to, to_locksp, bp, save, NIL);
}
}
+
diff --git a/erts/emulator/beam/erl_message.h b/erts/emulator/beam/erl_message.h
index 5cf7c209bd..5aca0db6fe 100644
--- a/erts/emulator/beam/erl_message.h
+++ b/erts/emulator/beam/erl_message.h
@@ -28,13 +28,22 @@ struct external_thing_;
* but is stored outside of any heap.
*/
-typedef struct erl_off_heap {
- struct proc_bin* mso; /* List of associated binaries. */
-#ifndef HYBRID /* FIND ME! */
- struct erl_fun_thing* funs; /* List of funs. */
+struct erl_off_heap_header {
+ Eterm thing_word;
+ Uint size;
+#if HALFWORD_HEAP
+ void* dummy_ptr_padding__;
#endif
- struct external_thing_* externals; /* List of external things. */
- int overhead; /* Administrative overhead (used to force GC). */
+ struct erl_off_heap_header* next;
+};
+
+#define OH_OVERHEAD(oh, size) do { \
+ (oh)->overhead += size; \
+} while(0)
+
+typedef struct erl_off_heap {
+ struct erl_off_heap_header* first;
+ Uint64 overhead; /* Administrative overhead (used to force GC). */
} ErlOffHeap;
#include "external.h"
@@ -49,7 +58,7 @@ typedef struct erl_heap_fragment ErlHeapFragment;
struct erl_heap_fragment {
ErlHeapFragment* next; /* Next heap fragment */
ErlOffHeap off_heap; /* Offset heap data. */
- unsigned size; /* Size in words of mem */
+ unsigned alloc_size; /* Size in (half)words of mem */
unsigned used_size; /* With terms to be moved to heap by GC */
Eterm mem[1]; /* Data */
};
@@ -75,6 +84,13 @@ typedef struct {
ErlMessage** last; /* point to the last next pointer */
ErlMessage** save;
int len; /* queue length */
+
+ /*
+ * The following two fields are used by the recv_mark/1 and
+ * recv_set/1 instructions.
+ */
+ BeamInstr* mark; /* address to rec_loop/2 instruction */
+ ErlMessage** saved_last; /* saved last pointer */
} ErlMessageQueue;
#ifdef ERTS_SMP
@@ -137,6 +153,7 @@ do { \
(p)->msg.len--; \
if (__mp == NULL) \
(p)->msg.last = (p)->msg.save; \
+ (p)->msg.mark = 0; \
} while(0)
/* Reset message save point (after receive match) */
@@ -191,11 +208,9 @@ do { \
#define ERTS_INIT_HEAP_FRAG(HEAP_FRAG_P, DATA_WORDS) \
do { \
(HEAP_FRAG_P)->next = NULL; \
- (HEAP_FRAG_P)->size = (DATA_WORDS); \
+ (HEAP_FRAG_P)->alloc_size = (DATA_WORDS); \
(HEAP_FRAG_P)->used_size = (DATA_WORDS); \
- (HEAP_FRAG_P)->off_heap.mso = NULL; \
- (HEAP_FRAG_P)->off_heap.funs = NULL; \
- (HEAP_FRAG_P)->off_heap.externals = NULL; \
+ (HEAP_FRAG_P)->off_heap.first = NULL; \
(HEAP_FRAG_P)->off_heap.overhead = 0; \
} while (0)
@@ -219,14 +234,25 @@ void erts_move_msg_attached_data_to_heap(Eterm **, ErlOffHeap *, ErlMessage *);
Eterm erts_msg_distext2heap(Process *, ErtsProcLocks *, ErlHeapFragment **,
Eterm *, ErtsDistExternal *);
+ERTS_GLB_INLINE Uint erts_msg_used_frag_sz(const ErlMessage *msg);
ERTS_GLB_INLINE Uint erts_msg_attached_data_size(ErlMessage *msg);
#if ERTS_GLB_INLINE_INCL_FUNC_DEF
+ERTS_GLB_INLINE Uint erts_msg_used_frag_sz(const ErlMessage *msg)
+{
+ const ErlHeapFragment *bp;
+ Uint sz = 0;
+ for (bp = msg->data.heap_frag; bp!=NULL; bp=bp->next) {
+ sz += bp->used_size;
+ }
+ return sz;
+}
+
ERTS_GLB_INLINE Uint erts_msg_attached_data_size(ErlMessage *msg)
{
ASSERT(msg->data.attached);
if (is_value(ERL_MESSAGE_TERM(msg)))
- return msg->data.heap_frag->size;
+ return erts_msg_used_frag_sz(msg);
else if (msg->data.dist_ext->heap_size < 0)
return erts_msg_attached_data_size_aux(msg);
else {
@@ -234,7 +260,7 @@ ERTS_GLB_INLINE Uint erts_msg_attached_data_size(ErlMessage *msg)
if (is_not_nil(ERL_MESSAGE_TOKEN(msg))) {
ErlHeapFragment *heap_frag;
heap_frag = erts_dist_ext_trailer(msg->data.dist_ext);
- sz += heap_frag->size;
+ sz += heap_frag->used_size;
}
return sz;
}
diff --git a/erts/emulator/beam/erl_mtrace.c b/erts/emulator/beam/erl_mtrace.c
index 8b8ac2ec80..b1478758a1 100644
--- a/erts/emulator/beam/erl_mtrace.c
+++ b/erts/emulator/beam/erl_mtrace.c
@@ -1,19 +1,19 @@
/*
* %CopyrightBegin%
- *
- * Copyright Ericsson AB 2003-2009. All Rights Reserved.
- *
+ *
+ * Copyright Ericsson AB 2003-2010. All Rights Reserved.
+ *
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
* compliance with the License. You should have received a copy of the
* Erlang Public License along with this software. If not, it can be
* retrieved online at http://www.erlang.org/.
- *
+ *
* Software distributed under the License is distributed on an "AS IS"
* basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
* the License for the specific language governing rights and limitations
* under the License.
- *
+ *
* %CopyrightEnd%
*/
@@ -79,7 +79,7 @@ static erts_mtx_t mtrace_buf_mutex;
#define UI16_SZ (2)
#define UI32_SZ (4)
#define UI64_SZ (8)
-#ifdef ARCH_64
+#ifdef ARCH_64 /* XXX:PaN Halfword? (whole file...) */
# define UI_SZ UI64_SZ
#else
# define UI_SZ UI32_SZ
@@ -188,7 +188,7 @@ check_alloc_entry(byte *sp, byte *ep,
byte tag,
Uint16 ct_no, int ct_no_n,
Uint16 type, int type_n,
- Uint res, int res_n,
+ UWord res, int res_n,
Uint size, int size_n,
Uint32 ti,int ti_n);
void
@@ -196,8 +196,8 @@ check_realloc_entry(byte *sp, byte *ep,
byte tag,
Uint16 ct_no, int ct_no_n,
Uint16 type, int type_n,
- Uint res, int res_n,
- Uint ptr, int ptr_n,
+ UWord res, int res_n,
+ UWord ptr, int ptr_n,
Uint size, int size_n,
Uint32 ti,int ti_n);
void
@@ -205,7 +205,7 @@ check_free_entry(byte *sp, byte *ep,
byte tag,
Uint16 ct_no, int ct_no_n,
Uint16 t_no, int t_no_n,
- Uint ptr, int ptr_n,
+ UWord ptr, int ptr_n,
Uint32 ti,int ti_n);
void
check_time_inc_entry(byte *sp, byte *ep,
@@ -585,9 +585,7 @@ void erts_mtrace_init(char *receiver, char *nodename)
Uint16 port;
erts_mtx_init(&mtrace_buf_mutex, "mtrace_buf");
- erts_mtx_set_forksafe(&mtrace_buf_mutex);
erts_mtx_init(&mtrace_op_mutex, "mtrace_op");
- erts_mtx_set_forksafe(&mtrace_op_mutex);
socket_desc = erts_sock_open();
if (socket_desc == ERTS_SOCK_INVALID_SOCKET) {
@@ -785,7 +783,7 @@ write_alloc_entry(byte tag,
tag,
ct_no, ct_no_n,
t_no, t_no_n,
- (Uint) res, res_n,
+ (UWord) res, res_n,
size, size_n,
ti, ti_n);
#endif
@@ -865,8 +863,8 @@ write_realloc_entry(byte tag,
tag,
ct_no, ct_no_n,
t_no, t_no_n,
- (Uint) res, res_n,
- (Uint) ptr, ptr_n,
+ (UWord) res, res_n,
+ (UWord) ptr, ptr_n,
size, size_n,
ti, ti_n);
#endif
@@ -934,7 +932,7 @@ write_free_entry(byte tag,
tag,
ct_no, ct_no_n,
t_no, t_no_n,
- (Uint) ptr, ptr_n,
+ (UWord) ptr, ptr_n,
ti, ti_n);
#endif
}
@@ -1135,7 +1133,7 @@ check_alloc_entry(byte *sp, byte *ep,
byte tag,
Uint16 ct_no, int ct_no_n,
Uint16 t_no, int t_no_n,
- Uint res, int res_n,
+ UWord res, int res_n,
Uint size, int size_n,
Uint32 ti,int ti_n)
{
@@ -1163,8 +1161,8 @@ check_realloc_entry(byte *sp, byte *ep,
byte tag,
Uint16 ct_no, int ct_no_n,
Uint16 t_no, int t_no_n,
- Uint res, int res_n,
- Uint ptr, int ptr_n,
+ UWord res, int res_n,
+ UWord ptr, int ptr_n,
Uint size, int size_n,
Uint32 ti,int ti_n)
{
@@ -1193,7 +1191,7 @@ check_free_entry(byte *sp, byte *ep,
byte tag,
Uint16 ct_no, int ct_no_n,
Uint16 t_no, int t_no_n,
- Uint ptr, int ptr_n,
+ UWord ptr, int ptr_n,
Uint32 ti,int ti_n)
{
byte *p = sp;
diff --git a/erts/emulator/beam/erl_nif.c b/erts/emulator/beam/erl_nif.c
index 585a6c1fdf..1dd9c8bd4a 100644
--- a/erts/emulator/beam/erl_nif.c
+++ b/erts/emulator/beam/erl_nif.c
@@ -48,9 +48,23 @@ struct erl_module_nif {
struct enif_entry_t* entry;
erts_refc_t rt_cnt; /* number of resource types */
erts_refc_t rt_dtor_cnt; /* number of resource types with destructors */
- int is_orphan; /* if erlang module has been purged */
+ Module* mod; /* Can be NULL if orphan with dtor-resources left */
};
+#ifdef DEBUG
+# define READONLY_CHECK
+#endif
+#ifdef READONLY_CHECK
+# define ADD_READONLY_CHECK(ENV,PTR,SIZE) add_readonly_check(ENV,PTR,SIZE)
+static void add_readonly_check(ErlNifEnv*, unsigned char* ptr, unsigned sz);
+#else
+# define ADD_READONLY_CHECK(ENV,PTR,SIZE) ((void)0)
+#endif
+
+#ifdef DEBUG
+static int is_offheap(const ErlOffHeap* off_heap);
+#endif
+
#define MIN_HEAP_FRAG_SZ 200
static Eterm* alloc_heap_heavy(ErlNifEnv* env, unsigned need, Eterm* hp);
@@ -74,7 +88,8 @@ static Eterm* alloc_heap_heavy(ErlNifEnv* env, unsigned need, Eterm* hp)
HEAP_TOP(env->proc) = env->hp;
}
else {
- HRelease(env->proc, env->hp_end, env->hp);
+ env->heap_frag->used_size = hp - env->heap_frag->mem;
+ ASSERT(env->heap_frag->used_size <= env->heap_frag->alloc_size);
}
frag_sz = need + MIN_HEAP_FRAG_SZ;
hp = erts_heap_alloc(env->proc, frag_sz);
@@ -106,6 +121,13 @@ static void pre_nif_noproc(ErlNifEnv* env, struct erl_module_nif* mod_nif)
env->tmp_obj_list = NULL;
}
+/* Temporary object header, auto-deallocated when NIF returns. */
+struct enif_tmp_obj_t {
+ struct enif_tmp_obj_t* next;
+ void (*dtor)(struct enif_tmp_obj_t*);
+ /*char data[];*/
+};
+
static ERTS_INLINE void free_tmp_objs(ErlNifEnv* env)
{
while (env->tmp_obj_list != NULL) {
@@ -126,8 +148,9 @@ void erts_post_nif(ErlNifEnv* env)
}
else {
ASSERT(env->hp_end != HEAP_LIMIT(env->proc));
- ASSERT(env->hp_end - env->hp <= env->heap_frag->size);
- HRelease(env->proc, env->hp_end, env->hp);
+ ASSERT(env->hp_end - env->hp <= env->heap_frag->alloc_size);
+ env->heap_frag->used_size = env->hp - env->heap_frag->mem;
+ ASSERT(env->heap_frag->used_size <= env->heap_frag->alloc_size);
}
free_tmp_objs(env);
}
@@ -141,7 +164,7 @@ static void post_nif_noproc(ErlNifEnv* env)
/* Flush out our cached heap pointers to allow an ordinary HAlloc
*/
-static void enable_halloc(ErlNifEnv* env)
+static void flush_env(ErlNifEnv* env)
{
if (env->heap_frag == NULL) {
ASSERT(env->hp_end == HEAP_LIMIT(env->proc));
@@ -151,14 +174,15 @@ static void enable_halloc(ErlNifEnv* env)
}
else {
ASSERT(env->hp_end != HEAP_LIMIT(env->proc));
- ASSERT(env->hp_end - env->hp <= env->heap_frag->size);
- HRelease(env->proc, env->hp_end, env->hp);
+ ASSERT(env->hp_end - env->hp <= env->heap_frag->alloc_size);
+ env->heap_frag->used_size = env->hp - env->heap_frag->mem;
+ ASSERT(env->heap_frag->used_size <= env->heap_frag->alloc_size);
}
}
-/* Restore cached heap pointers
+/* Restore cached heap pointers to allow alloc_heap again.
*/
-static void disable_halloc(ErlNifEnv* env)
+static void cache_env(ErlNifEnv* env)
{
if (env->heap_frag == NULL) {
ASSERT(env->hp_end == HEAP_LIMIT(env->proc));
@@ -168,35 +192,190 @@ static void disable_halloc(ErlNifEnv* env)
}
else {
ASSERT(env->hp_end != HEAP_LIMIT(env->proc));
- ASSERT(env->hp_end - env->hp <= env->heap_frag->size);
+ ASSERT(env->hp_end - env->hp <= env->heap_frag->alloc_size);
env->heap_frag = MBUF(env->proc);
ASSERT(env->heap_frag != NULL);
env->hp = env->heap_frag->mem + env->heap_frag->used_size;
- env->hp_end = env->heap_frag->mem + env->heap_frag->size;
+ env->hp_end = env->heap_frag->mem + env->heap_frag->alloc_size;
}
}
-
-
void* enif_priv_data(ErlNifEnv* env)
{
return env->mod_nif->priv_data;
}
-void* enif_alloc(ErlNifEnv* env, size_t size)
+void* enif_alloc(size_t size)
{
return erts_alloc_fnf(ERTS_ALC_T_NIF, (Uint) size);
}
-void* enif_realloc(ErlNifEnv* env, void* ptr, size_t size)
+void* enif_realloc(void* ptr, size_t size)
{
return erts_realloc_fnf(ERTS_ALC_T_NIF, ptr, size);
}
-void enif_free(ErlNifEnv* env, void* ptr)
+void enif_free(void* ptr)
{
erts_free(ERTS_ALC_T_NIF, ptr);
}
+struct enif_msg_environment_t
+{
+ ErlNifEnv env;
+ Process phony_proc;
+};
+
+ErlNifEnv* enif_alloc_env(void)
+{
+ struct enif_msg_environment_t* msg_env =
+ erts_alloc_fnf(ERTS_ALC_T_NIF, sizeof(struct enif_msg_environment_t));
+ Eterm* phony_heap = (Eterm*) msg_env; /* dummy non-NULL ptr */
+
+ msg_env->env.hp = phony_heap;
+ msg_env->env.hp_end = phony_heap;
+ msg_env->env.heap_frag = NULL;
+ msg_env->env.mod_nif = NULL;
+ msg_env->env.tmp_obj_list = (struct enif_tmp_obj_t*) 1; /* invalid non-NULL */
+ msg_env->env.proc = &msg_env->phony_proc;
+ memset(&msg_env->phony_proc, 0, sizeof(Process));
+ HEAP_START(&msg_env->phony_proc) = phony_heap;
+ HEAP_TOP(&msg_env->phony_proc) = phony_heap;
+ HEAP_LIMIT(&msg_env->phony_proc) = phony_heap;
+ HEAP_END(&msg_env->phony_proc) = phony_heap;
+ MBUF(&msg_env->phony_proc) = NULL;
+ msg_env->phony_proc.id = ERTS_INVALID_PID;
+#ifdef FORCE_HEAP_FRAGS
+ msg_env->phony_proc.space_verified = 0;
+ msg_env->phony_proc.space_verified_from = NULL;
+#endif
+ return &msg_env->env;
+}
+void enif_free_env(ErlNifEnv* env)
+{
+ enif_clear_env(env);
+ erts_free(ERTS_ALC_T_NIF, env);
+}
+
+static ERTS_INLINE void clear_offheap(ErlOffHeap* oh)
+{
+ oh->first = NULL;
+ oh->overhead = 0;
+}
+
+void enif_clear_env(ErlNifEnv* env)
+{
+ struct enif_msg_environment_t* menv = (struct enif_msg_environment_t*)env;
+ Process* p = &menv->phony_proc;
+ ASSERT(p == menv->env.proc);
+ ASSERT(p->id == ERTS_INVALID_PID);
+ ASSERT(MBUF(p) == menv->env.heap_frag);
+ if (MBUF(p) != NULL) {
+ erts_cleanup_offheap(&MSO(p));
+ clear_offheap(&MSO(p));
+ free_message_buffer(MBUF(p));
+ MBUF(p) = NULL;
+ menv->env.heap_frag = NULL;
+ }
+ ASSERT(HEAP_TOP(p) == HEAP_END(p));
+ menv->env.hp = menv->env.hp_end = HEAP_TOP(p);
+
+ ASSERT(!is_offheap(&MSO(p)));
+}
+int enif_send(ErlNifEnv* env, const ErlNifPid* to_pid,
+ ErlNifEnv* msg_env, ERL_NIF_TERM msg)
+{
+ struct enif_msg_environment_t* menv = (struct enif_msg_environment_t*)msg_env;
+ ErtsProcLocks rp_locks = 0;
+ Process* rp;
+ Process* c_p;
+ ErlHeapFragment* frags;
+#if defined(ERTS_ENABLE_LOCK_CHECK) && defined(ERTS_SMP)
+ ErtsProcLocks rp_had_locks;
+#endif
+ Eterm receiver = to_pid->pid;
+ int flush_me = 0;
+
+ if (env != NULL) {
+ c_p = env->proc;
+ if (receiver == c_p->id) {
+ rp_locks = ERTS_PROC_LOCK_MAIN;
+ flush_me = 1;
+ }
+ }
+ else {
+#ifdef ERTS_SMP
+ c_p = NULL;
+#else
+ erl_exit(ERTS_ABORT_EXIT,"enif_send: env==NULL on non-SMP VM");
+#endif
+ }
+
+#if defined(ERTS_ENABLE_LOCK_CHECK) && defined(ERTS_SMP)
+ rp_had_locks = rp_locks;
+#endif
+ rp = erts_pid2proc_opt(c_p, ERTS_PROC_LOCK_MAIN,
+ receiver, rp_locks, ERTS_P2P_FLG_SMP_INC_REFC);
+ if (rp == NULL) {
+ ASSERT(env == NULL || receiver != c_p->id);
+ return 0;
+ }
+ flush_env(msg_env);
+ frags = menv->env.heap_frag;
+ ASSERT(frags == MBUF(&menv->phony_proc));
+ if (frags != NULL) {
+ /* Move all offheap's from phony proc to the first fragment.
+ Quick and dirty, but erts_move_msg_mbuf_to_heap doesn't care. */
+ ASSERT(!is_offheap(&frags->off_heap));
+ frags->off_heap = MSO(&menv->phony_proc);
+ clear_offheap(&MSO(&menv->phony_proc));
+ menv->env.heap_frag = NULL;
+ MBUF(&menv->phony_proc) = NULL;
+ }
+ ASSERT(!is_offheap(&MSO(&menv->phony_proc)));
+
+ if (flush_me) {
+ flush_env(env); /* Needed for ERTS_HOLE_CHECK */
+ }
+ erts_queue_message(rp, &rp_locks, frags, msg, am_undefined);
+ if (rp_locks) {
+ ERTS_SMP_LC_ASSERT(rp_locks == (rp_had_locks | (ERTS_PROC_LOCK_MSGQ |
+ ERTS_PROC_LOCK_STATUS)));
+ erts_smp_proc_unlock(rp, (ERTS_PROC_LOCK_MSGQ | ERTS_PROC_LOCK_STATUS));
+ }
+ erts_smp_proc_dec_refc(rp);
+ if (flush_me) {
+ cache_env(env);
+ }
+ return 1;
+}
+
+ERL_NIF_TERM enif_make_copy(ErlNifEnv* dst_env, ERL_NIF_TERM src_term)
+{
+ Uint sz;
+ Eterm* hp;
+ sz = size_object(src_term);
+ hp = alloc_heap(dst_env, sz);
+ return copy_struct(src_term, sz, &hp, &MSO(dst_env->proc));
+}
+
+
+#ifdef DEBUG
+static int is_offheap(const ErlOffHeap* oh)
+{
+ return oh->first != NULL;
+}
+#endif
+
+ErlNifPid* enif_self(ErlNifEnv* caller_env, ErlNifPid* pid)
+{
+ pid->pid = caller_env->proc->id;
+ return pid;
+}
+int enif_get_local_pid(ErlNifEnv* env, ERL_NIF_TERM term, ErlNifPid* pid)
+{
+ return is_internal_pid(term) ? (pid->pid=term, 1) : 0;
+}
+
int enif_is_atom(ErlNifEnv* env, ERL_NIF_TERM term)
{
return is_atom(term);
@@ -232,9 +411,19 @@ int enif_is_ref(ErlNifEnv* env, ERL_NIF_TERM term)
return is_ref(term);
}
+int enif_is_tuple(ErlNifEnv* env, ERL_NIF_TERM term)
+{
+ return is_tuple(term);
+}
+
+int enif_is_list(ErlNifEnv* env, ERL_NIF_TERM term)
+{
+ return is_list(term) || is_nil(term);
+}
+
static void aligned_binary_dtor(struct enif_tmp_obj_t* obj)
{
- erts_free_aligned_binary_bytes((byte*)obj);
+ erts_free_aligned_binary_bytes_extra((byte*)obj,ERTS_ALC_T_TMP);
}
int enif_inspect_binary(ErlNifEnv* env, Eterm bin_term, ErlNifBinary* bin)
@@ -244,7 +433,7 @@ int enif_inspect_binary(ErlNifEnv* env, Eterm bin_term, ErlNifBinary* bin)
byte* raw_ptr;
}u;
u.tmp = NULL;
- bin->data = erts_get_aligned_binary_bytes_extra(bin_term, &u.raw_ptr,
+ bin->data = erts_get_aligned_binary_bytes_extra(bin_term, &u.raw_ptr, ERTS_ALC_T_TMP,
sizeof(struct enif_tmp_obj_t));
if (bin->data == NULL) {
return 0;
@@ -257,6 +446,7 @@ int enif_inspect_binary(ErlNifEnv* env, Eterm bin_term, ErlNifBinary* bin)
bin->bin_term = bin_term;
bin->size = binary_size(bin_term);
bin->ref_bin = NULL;
+ ADD_READONLY_CHECK(env, bin->data, bin->size);
return 1;
}
@@ -293,10 +483,11 @@ int enif_inspect_iolist_as_binary(ErlNifEnv* env, Eterm term, ErlNifBinary* bin)
bin->bin_term = THE_NON_VALUE;
bin->ref_bin = NULL;
io_list_to_buf(term, (char*) bin->data, sz);
+ ADD_READONLY_CHECK(env, bin->data, bin->size);
return 1;
}
-int enif_alloc_binary(ErlNifEnv* env, unsigned size, ErlNifBinary* bin)
+int enif_alloc_binary(size_t size, ErlNifBinary* bin)
{
Binary* refbin;
@@ -315,7 +506,7 @@ int enif_alloc_binary(ErlNifEnv* env, unsigned size, ErlNifBinary* bin)
return 1;
}
-int enif_realloc_binary(ErlNifEnv* env, ErlNifBinary* bin, unsigned size)
+int enif_realloc_binary(ErlNifBinary* bin, size_t size)
{
if (bin->ref_bin != NULL) {
Binary* oldbin;
@@ -333,15 +524,15 @@ int enif_realloc_binary(ErlNifEnv* env, ErlNifBinary* bin, unsigned size)
}
else {
unsigned char* old_data = bin->data;
- unsigned cpy_sz = (size < bin->size ? size : bin->size);
- enif_alloc_binary(env, size, bin);
+ size_t cpy_sz = (size < bin->size ? size : bin->size);
+ enif_alloc_binary(size, bin);
sys_memcpy(bin->data, old_data, cpy_sz);
}
return 1;
}
-void enif_release_binary(ErlNifEnv* env, ErlNifBinary* bin)
+void enif_release_binary(ErlNifBinary* bin)
{
if (bin->ref_bin != NULL) {
Binary* refbin = bin->ref_bin;
@@ -357,12 +548,21 @@ void enif_release_binary(ErlNifEnv* env, ErlNifBinary* bin)
#endif
}
-int enif_is_identical(ErlNifEnv* env, Eterm lhs, Eterm rhs)
+unsigned char* enif_make_new_binary(ErlNifEnv* env, size_t size,
+ ERL_NIF_TERM* termp)
+{
+ flush_env(env);
+ *termp = new_binary(env->proc, NULL, size);
+ cache_env(env);
+ return binary_bytes(*termp);
+}
+
+int enif_is_identical(Eterm lhs, Eterm rhs)
{
return EQ(lhs,rhs);
}
-int enif_compare(ErlNifEnv* env, Eterm lhs, Eterm rhs)
+int enif_compare(Eterm lhs, Eterm rhs)
{
return cmp(lhs,rhs);
}
@@ -425,13 +625,13 @@ Eterm enif_make_binary(ErlNifEnv* env, ErlNifBinary* bin)
pb = (ProcBin *) alloc_heap(env, PROC_BIN_SIZE);
pb->thing_word = HEADER_PROC_BIN;
pb->size = bptr->orig_size;
- pb->next = MSO(env->proc).mso;
- MSO(env->proc).mso = pb;
+ pb->next = MSO(env->proc).first;
+ MSO(env->proc).first = (struct erl_off_heap_header*) pb;
pb->val = bptr;
pb->bytes = (byte*) bptr->orig_bytes;
pb->flags = 0;
- MSO(env->proc).overhead += pb->size / sizeof(Eterm);
+ OH_OVERHEAD(&(MSO(env->proc)), pb->size / sizeof(Eterm));
bin_term = make_binary(pb);
if (erts_refc_read(&bptr->refc, 1) == 1) {
/* Total ownership transfer */
@@ -441,15 +641,15 @@ Eterm enif_make_binary(ErlNifEnv* env, ErlNifBinary* bin)
return bin_term;
}
else {
- enable_halloc(env);
+ flush_env(env);
bin->bin_term = new_binary(env->proc, bin->data, bin->size);
- disable_halloc(env);
+ cache_env(env);
return bin->bin_term;
}
}
Eterm enif_make_sub_binary(ErlNifEnv* env, ERL_NIF_TERM bin_term,
- unsigned pos, unsigned size)
+ size_t pos, size_t size)
{
ErlSubBin* sb;
Eterm orig;
@@ -479,9 +679,11 @@ Eterm enif_make_badarg(ErlNifEnv* env)
BIF_ERROR(env->proc, BADARG);
}
-int enif_get_atom(ErlNifEnv* env, Eterm atom, char* buf, unsigned len)
+int enif_get_atom(ErlNifEnv* env, Eterm atom, char* buf, unsigned len,
+ ErlNifCharEncoding encoding)
{
Atom* ap;
+ ASSERT(encoding == ERL_NIF_LATIN1);
if (is_not_atom(atom)) {
return 0;
}
@@ -496,9 +698,9 @@ int enif_get_atom(ErlNifEnv* env, Eterm atom, char* buf, unsigned len)
int enif_get_int(ErlNifEnv* env, Eterm term, int* ip)
{
-#if SIZEOF_INT == SIZEOF_VOID_P
+#if SIZEOF_INT == ERTS_SIZEOF_ETERM
return term_to_Sint(term, (Sint*)ip);
-#elif SIZEOF_LONG == SIZEOF_VOID_P
+#elif SIZEOF_LONG == ERTS_SIZEOF_ETERM
Sint i;
if (!term_to_Sint(term, &i) || i < INT_MIN || i > INT_MAX) {
return 0;
@@ -512,9 +714,9 @@ int enif_get_int(ErlNifEnv* env, Eterm term, int* ip)
int enif_get_uint(ErlNifEnv* env, Eterm term, unsigned* ip)
{
-#if SIZEOF_INT == SIZEOF_VOID_P
+#if SIZEOF_INT == ERTS_SIZEOF_ETERM
return term_to_Uint(term, (Uint*)ip);
-#elif SIZEOF_LONG == SIZEOF_VOID_P
+#elif SIZEOF_LONG == ERTS_SIZEOF_ETERM
Uint i;
if (!term_to_Uint(term, &i) || i > UINT_MAX) {
return 0;
@@ -526,8 +728,11 @@ int enif_get_uint(ErlNifEnv* env, Eterm term, unsigned* ip)
int enif_get_long(ErlNifEnv* env, Eterm term, long* ip)
{
-#if SIZEOF_LONG == SIZEOF_VOID_P
+#if SIZEOF_LONG == ERTS_SIZEOF_ETERM
return term_to_Sint(term, ip);
+#elif SIZEOF_INT == ERTS_SIZEOF_ETERM
+ Sint i;
+ return term_to_Sint(term, &i) ? (*ip = (long) i, 1) : 0;
#else
# error Unknown long word size
#endif
@@ -535,14 +740,29 @@ int enif_get_long(ErlNifEnv* env, Eterm term, long* ip)
int enif_get_ulong(ErlNifEnv* env, Eterm term, unsigned long* ip)
{
-#if SIZEOF_LONG == SIZEOF_VOID_P
+#if SIZEOF_LONG == ERTS_SIZEOF_ETERM
return term_to_Uint(term, ip);
+#elif SIZEOF_INT == ERTS_SIZEOF_ETERM
+ Uint u;
+ return term_to_Uint(term, &u) ? (*ip = (unsigned long) u, 1) : 0;
#else
# error Unknown long word size
#endif
}
-int enif_get_double(ErlNifEnv* env, Eterm term, double* dp)
+#if HAVE_INT64 && SIZEOF_LONG != 8
+int enif_get_int64(ErlNifEnv* env, ERL_NIF_TERM term, ErlNifSInt64* ip)
+{
+ return term_to_Sint64(term, ip);
+}
+
+int enif_get_uint64(ErlNifEnv* env, ERL_NIF_TERM term, ErlNifUInt64* ip)
+{
+ return term_to_Uint64(term, ip);
+}
+#endif /* HAVE_INT64 && SIZEOF_LONG != 8 */
+
+int enif_get_double(ErlNifEnv* env, ERL_NIF_TERM term, double* dp)
{
FloatDef f;
if (is_not_float(term)) {
@@ -553,6 +773,17 @@ int enif_get_double(ErlNifEnv* env, Eterm term, double* dp)
return 1;
}
+int enif_get_atom_length(ErlNifEnv* env, Eterm atom, unsigned* len,
+ ErlNifCharEncoding enc)
+{
+ Atom* ap;
+ ASSERT(enc == ERL_NIF_LATIN1);
+ if (is_not_atom(atom)) return 0;
+ ap = atom_tab(atom_val(atom));
+ *len = ap->len;
+ return 1;
+}
+
int enif_get_list_cell(ErlNifEnv* env, Eterm term, Eterm* head, Eterm* tail)
{
Eterm* val;
@@ -563,29 +794,33 @@ int enif_get_list_cell(ErlNifEnv* env, Eterm term, Eterm* head, Eterm* tail)
return 1;
}
+int enif_get_list_length(ErlNifEnv* env, Eterm term, unsigned* len)
+{
+ if (is_not_list(term) && is_not_nil(term)) return 0;
+ *len = list_length(term);
+ return 1;
+}
+
ERL_NIF_TERM enif_make_int(ErlNifEnv* env, int i)
{
-#if SIZEOF_INT == SIZEOF_VOID_P
+#if SIZEOF_INT == ERTS_SIZEOF_ETERM
return IS_SSMALL(i) ? make_small(i) : small_to_big(i,alloc_heap(env,2));
-#elif SIZEOF_LONG == SIZEOF_VOID_P
+#elif SIZEOF_LONG == ERTS_SIZEOF_ETERM
return make_small(i);
#endif
}
ERL_NIF_TERM enif_make_uint(ErlNifEnv* env, unsigned i)
{
-#if SIZEOF_INT == SIZEOF_VOID_P
+#if SIZEOF_INT == ERTS_SIZEOF_ETERM
return IS_USMALL(0,i) ? make_small(i) : uint_to_big(i,alloc_heap(env,2));
-#elif SIZEOF_LONG == SIZEOF_VOID_P
+#elif SIZEOF_LONG == ERTS_SIZEOF_ETERM
return make_small(i);
#endif
}
ERL_NIF_TERM enif_make_long(ErlNifEnv* env, long i)
{
-#if SIZEOF_LONG != SIZEOF_VOID_P
-# error Unknown long word size
-#endif
return IS_SSMALL(i) ? make_small(i) : small_to_big(i, alloc_heap(env,2));
}
@@ -594,6 +829,26 @@ ERL_NIF_TERM enif_make_ulong(ErlNifEnv* env, unsigned long i)
return IS_USMALL(0,i) ? make_small(i) : uint_to_big(i,alloc_heap(env,2));
}
+#if HAVE_INT64 && SIZEOF_LONG != 8
+ERL_NIF_TERM enif_make_int64(ErlNifEnv* env, ErlNifSInt64 i)
+{
+ Uint* hp;
+ Uint need = 0;
+ erts_bld_sint64(NULL, &need, i);
+ hp = alloc_heap(env, need);
+ return erts_bld_sint64(&hp, NULL, i);
+}
+
+ERL_NIF_TERM enif_make_uint64(ErlNifEnv* env, ErlNifUInt64 i)
+{
+ Uint* hp;
+ Uint need = 0;
+ erts_bld_uint64(NULL, &need, i);
+ hp = alloc_heap(env, need);
+ return erts_bld_uint64(&hp, NULL, i);
+}
+#endif /* HAVE_INT64 && SIZEOF_LONG != 8 */
+
ERL_NIF_TERM enif_make_double(ErlNifEnv* env, double d)
{
Eterm* hp = alloc_heap(env,FLOAT_SIZE_OBJECT);
@@ -605,12 +860,25 @@ ERL_NIF_TERM enif_make_double(ErlNifEnv* env, double d)
ERL_NIF_TERM enif_make_atom(ErlNifEnv* env, const char* name)
{
- return am_atom_put(name, sys_strlen(name));
+ return enif_make_atom_len(env, name, sys_strlen(name));
}
-int enif_make_existing_atom(ErlNifEnv* env, const char* name, ERL_NIF_TERM* atom)
+ERL_NIF_TERM enif_make_atom_len(ErlNifEnv* env, const char* name, size_t len)
{
- return erts_atom_get(name, sys_strlen(name), atom);
+ return am_atom_put(name, len);
+}
+
+int enif_make_existing_atom(ErlNifEnv* env, const char* name, ERL_NIF_TERM* atom,
+ ErlNifCharEncoding enc)
+{
+ return enif_make_existing_atom_len(env, name, sys_strlen(name), atom, enc);
+}
+
+int enif_make_existing_atom_len(ErlNifEnv* env, const char* name, size_t len,
+ ERL_NIF_TERM* atom, ErlNifCharEncoding encoding)
+{
+ ASSERT(encoding == ERL_NIF_LATIN1);
+ return erts_atom_get(name, len, atom);
}
ERL_NIF_TERM enif_make_tuple(ErlNifEnv* env, unsigned cnt, ...)
@@ -689,11 +957,16 @@ ERL_NIF_TERM enif_make_list_from_array(ErlNifEnv* env, const ERL_NIF_TERM arr[],
ERL_NIF_TERM enif_make_string(ErlNifEnv* env, const char* string,
ErlNifCharEncoding encoding)
-{
- Sint n = sys_strlen(string);
- Eterm* hp = alloc_heap(env,n*2);
+{
+ return enif_make_string_len(env, string, sys_strlen(string), encoding);
+}
+
+ERL_NIF_TERM enif_make_string_len(ErlNifEnv* env, const char* string,
+ size_t len, ErlNifCharEncoding encoding)
+{
+ Eterm* hp = alloc_heap(env,len*2);
ASSERT(encoding == ERL_NIF_LATIN1);
- return erts_bld_string_n(&hp,NULL,string,n);
+ return erts_bld_string_n(&hp,NULL,string,len);
}
ERL_NIF_TERM enif_make_ref(ErlNifEnv* env)
@@ -764,7 +1037,8 @@ struct enif_resource_type_t
ErlNifResourceDtor* dtor; /* user destructor function */
erts_refc_t refc; /* num of resources of this type (HOTSPOT warning)
+1 for active erl_module_nif */
- char name[1];
+ Eterm module;
+ Eterm name;
};
/* dummy node in circular list */
@@ -782,14 +1056,14 @@ typedef struct enif_resource_t
#define SIZEOF_ErlNifResource(SIZE) (offsetof(ErlNifResource,data) + (SIZE))
#define DATA_TO_RESOURCE(PTR) ((ErlNifResource*)((char*)(PTR) - offsetof(ErlNifResource,data)))
-static ErlNifResourceType* find_resource_type(const char* name)
+static ErlNifResourceType* find_resource_type(Eterm module, Eterm name)
{
ErlNifResourceType* type;
for (type = resource_type_list.next;
type != &resource_type_list;
type = type->next) {
- if (sys_strcmp(type->name, name) == 0) {
+ if (type->module == module && type->name == name) {
return type;
}
}
@@ -822,33 +1096,42 @@ static void steal_resource_type(ErlNifResourceType* type)
if (type->dtor != NULL
&& erts_refc_dectest(&lib->rt_dtor_cnt, 0) == 0
- && lib->is_orphan) {
+ && lib->mod == NULL) {
/* last type with destructor gone, close orphan lib */
close_lib(lib);
}
if (erts_refc_dectest(&lib->rt_cnt, 0) == 0
- && lib->is_orphan) {
+ && lib->mod == NULL) {
erts_free(ERTS_ALC_T_NIF, lib);
}
}
ErlNifResourceType*
-enif_open_resource_type(ErlNifEnv* env, const char* type_name,
- ErlNifResourceDtor* dtor,
- enum ErlNifResourceFlags flags,
- enum ErlNifResourceFlags* tried)
+enif_open_resource_type(ErlNifEnv* env,
+ const char* module_str,
+ const char* name_str,
+ ErlNifResourceDtor* dtor,
+ ErlNifResourceFlags flags,
+ ErlNifResourceFlags* tried)
{
- ErlNifResourceType* type = find_resource_type(type_name);
- enum ErlNifResourceFlags op = flags;
+ ErlNifResourceType* type = NULL;
+ ErlNifResourceFlags op = flags;
+ Eterm module_am, name_am;
+
ASSERT(erts_smp_is_system_blocked(0));
+ ASSERT(module_str == NULL); /* for now... */
+ module_am = make_atom(env->mod_nif->mod->module);
+ name_am = enif_make_atom(env, name_str);
+
+ type = find_resource_type(module_am, name_am);
if (type == NULL) {
if (flags & ERL_NIF_RT_CREATE) {
type = erts_alloc(ERTS_ALC_T_NIF,
- sizeof(struct enif_resource_type_t)
- + sys_strlen(type_name));
+ sizeof(struct enif_resource_type_t));
type->dtor = dtor;
- sys_strcpy(type->name, type_name);
+ type->module = module_am;
+ type->name = name_am;
erts_refc_init(&type->refc, 1);
type->owner = env->mod_nif;
type->prev = &resource_type_list;
@@ -896,13 +1179,13 @@ static void nif_resource_dtor(Binary* bin)
if (erts_refc_dectest(&type->refc, 0) == 0) {
ASSERT(type->next == NULL);
ASSERT(type->owner != NULL);
- ASSERT(type->owner->is_orphan);
+ ASSERT(type->owner->mod == NULL);
steal_resource_type(type);
erts_free(ERTS_ALC_T_NIF, type);
}
}
-void* enif_alloc_resource(ErlNifEnv* env, ErlNifResourceType* type, unsigned size)
+void* enif_alloc_resource(ErlNifResourceType* type, size_t size)
{
Binary* bin = erts_create_magic_binary(SIZEOF_ErlNifResource(size), &nif_resource_dtor);
ErlNifResource* resource = ERTS_MAGIC_BIN_DATA(bin);
@@ -915,7 +1198,7 @@ void* enif_alloc_resource(ErlNifEnv* env, ErlNifResourceType* type, unsigned siz
return resource->data;
}
-void enif_release_resource(ErlNifEnv* env, void* obj)
+void enif_release_resource(void* obj)
{
ErlNifResource* resource = DATA_TO_RESOURCE(obj);
ErtsBinary* bin = ERTS_MAGIC_BIN_FROM_DATA(resource);
@@ -929,6 +1212,18 @@ void enif_release_resource(ErlNifEnv* env, void* obj)
}
}
+void enif_keep_resource(void* obj)
+{
+ ErlNifResource* resource = DATA_TO_RESOURCE(obj);
+ ErtsBinary* bin = ERTS_MAGIC_BIN_FROM_DATA(resource);
+
+ ASSERT(ERTS_MAGIC_BIN_DESTRUCTOR(bin) == &nif_resource_dtor);
+#ifdef DEBUG
+ erts_refc_inc(&resource->nif_refc, 1);
+#endif
+ erts_refc_inc(&bin->binary.refc, 2);
+}
+
ERL_NIF_TERM enif_make_resource(ErlNifEnv* env, void* obj)
{
ErlNifResource* resource = DATA_TO_RESOURCE(obj);
@@ -937,15 +1232,30 @@ ERL_NIF_TERM enif_make_resource(ErlNifEnv* env, void* obj)
return erts_mk_magic_binary_term(&hp, &MSO(env->proc), &bin->binary);
}
+ERL_NIF_TERM enif_make_resource_binary(ErlNifEnv* env, void* obj,
+ const void* data, size_t size)
+{
+ Eterm bin = enif_make_resource(env, obj);
+ ProcBin* pb = (ProcBin*) binary_val(bin);
+ pb->bytes = (byte*) data;
+ pb->size = size;
+ return bin;
+}
+
int enif_get_resource(ErlNifEnv* env, ERL_NIF_TERM term, ErlNifResourceType* type,
void** objp)
{
+ ProcBin* pb;
Binary* mbin;
ErlNifResource* resource;
if (!ERTS_TERM_IS_MAGIC_BINARY(term)) {
return 0;
}
- mbin = ((ProcBin*) binary_val(term))->val;
+ pb = (ProcBin*) binary_val(term);
+ /*if (pb->size != 0) {
+ return 0; / * Or should we allow "resource binaries" as handles? * /
+ }*/
+ mbin = pb->val;
resource = (ErlNifResource*) ERTS_MAGIC_BIN_DATA(mbin);
if (ERTS_MAGIC_BIN_DESTRUCTOR(mbin) != &nif_resource_dtor
|| resource->type != type) {
@@ -955,7 +1265,7 @@ int enif_get_resource(ErlNifEnv* env, ERL_NIF_TERM term, ErlNifResourceType* typ
return 1;
}
-unsigned enif_sizeof_resource(ErlNifEnv* env, void* obj)
+size_t enif_sizeof_resource(void* obj)
{
ErlNifResource* resource = DATA_TO_RESOURCE(obj);
Binary* bin = &ERTS_MAGIC_BIN_FROM_DATA(resource)->binary;
@@ -967,37 +1277,22 @@ unsigned enif_sizeof_resource(ErlNifEnv* env, void* obj)
***************************************************************************/
-static Uint** get_func_pp(Eterm* mod_code, Eterm f_atom, unsigned arity)
+static BeamInstr** get_func_pp(BeamInstr* mod_code, Eterm f_atom, unsigned arity)
{
int n = (int) mod_code[MI_NUM_FUNCTIONS];
int j;
for (j = 0; j < n; ++j) {
- Uint* code_ptr = (Uint*) mod_code[MI_FUNCTIONS+j];
- ASSERT(code_ptr[0] == (Uint) BeamOp(op_i_func_info_IaaI));
+ BeamInstr* code_ptr = (BeamInstr*) mod_code[MI_FUNCTIONS+j];
+ ASSERT(code_ptr[0] == (BeamInstr) BeamOp(op_i_func_info_IaaI));
if (f_atom == ((Eterm) code_ptr[3])
&& arity == ((unsigned) code_ptr[4])) {
- return (Uint**) &mod_code[MI_FUNCTIONS+j];
+ return (BeamInstr**) &mod_code[MI_FUNCTIONS+j];
}
}
return NULL;
}
-/*static void refresh_cached_nif_data(Eterm* mod_code,
- struct erl_module_nif* mod_nif)
-{
- int i;
- for (i=0; i < mod_nif->entry->num_of_funcs; i++) {
- Eterm f_atom;
- ErlNifFunc* func = &mod_nif->entry->funcs[i];
- Uint* code_ptr;
-
- erts_atom_get(func->name, sys_strlen(func->name), &f_atom);
- code_ptr = *get_func_pp(mod_code, f_atom, func->arity);
- code_ptr[5+2] = (Uint) mod_nif->priv_data;
- }
-}*/
-
static Eterm mkatom(const char *str)
{
return am_atom_put(str, sys_strlen(str));
@@ -1089,7 +1384,7 @@ BIF_RETTYPE load_nif_2(BIF_ALIST_2)
static const char bad_lib[] = "bad_lib";
static const char reload[] = "reload";
static const char upgrade[] = "upgrade";
- char lib_name[256]; /* BUGBUG: Max-length? */
+ char* lib_name = NULL;
void* handle = NULL;
void* init_func;
ErlNifEntry* entry = NULL;
@@ -1098,15 +1393,20 @@ BIF_RETTYPE load_nif_2(BIF_ALIST_2)
Module* mod;
Eterm mod_atom;
Eterm f_atom;
- Eterm* caller;
+ BeamInstr* caller;
ErtsSysDdllError errdesc = ERTS_SYS_DDLL_ERROR_INIT;
Eterm ret = am_ok;
int veto;
struct erl_module_nif* lib = NULL;
- len = intlist_to_buf(BIF_ARG_1, lib_name, sizeof(lib_name)-1);
- if (len < 1) {
- /*erts_fprintf(stderr, "Invalid library path name '%T'\r\n", BIF_ARG_1);*/
+ len = list_length(BIF_ARG_1);
+ if (len < 0) {
+ BIF_ERROR(BIF_P, BADARG);
+ }
+ lib_name = (char *) erts_alloc(ERTS_ALC_T_TMP, len + 1);
+
+ if (intlist_to_buf(BIF_ARG_1, lib_name, len) != len) {
+ erts_free(ERTS_ALC_T_TMP, lib_name);
BIF_ERROR(BIF_P, BADARG);
}
lib_name[len] = '\0';
@@ -1165,7 +1465,7 @@ BIF_RETTYPE load_nif_2(BIF_ALIST_2)
/*erts_fprintf(stderr, "Found module %T\r\n", mod_atom);*/
for (i=0; i < entry->num_of_funcs && ret==am_ok; i++) {
- Uint** code_pp;
+ BeamInstr** code_pp;
ErlNifFunc* f = &entry->funcs[i];
if (!erts_atom_get(f->name, sys_strlen(f->name), &f_atom)
|| (code_pp = get_func_pp(mod->code, f_atom, f->arity))==NULL) {
@@ -1195,7 +1495,7 @@ BIF_RETTYPE load_nif_2(BIF_ALIST_2)
lib->entry = entry;
erts_refc_init(&lib->rt_cnt, 0);
erts_refc_init(&lib->rt_dtor_cnt, 0);
- lib->is_orphan = 0;
+ lib->mod = mod;
env.mod_nif = lib;
if (mod->nif != NULL) { /* Reload */
int k;
@@ -1268,22 +1568,23 @@ BIF_RETTYPE load_nif_2(BIF_ALIST_2)
mod->nif = lib;
for (i=0; i < entry->num_of_funcs; i++)
{
- Uint* code_ptr;
+ BeamInstr* code_ptr;
erts_atom_get(entry->funcs[i].name, sys_strlen(entry->funcs[i].name), &f_atom);
code_ptr = *get_func_pp(mod->code, f_atom, entry->funcs[i].arity);
if (code_ptr[1] == 0) {
- code_ptr[5+0] = (Uint) BeamOp(op_call_nif);
+ code_ptr[5+0] = (BeamInstr) BeamOp(op_call_nif);
}
else { /* Function traced, patch the original instruction word */
- BpData* bp = (BpData*) code_ptr[1];
- bp->orig_instr = (Uint) BeamOp(op_call_nif);
+ BpData** bps = (BpData**) code_ptr[1];
+ BpData* bp = (BpData*) bps[bp_sched2ix()];
+ bp->orig_instr = (BeamInstr) BeamOp(op_call_nif);
}
- code_ptr[5+1] = (Uint) entry->funcs[i].fptr;
- code_ptr[5+2] = (Uint) lib;
+ code_ptr[5+1] = (BeamInstr) entry->funcs[i].fptr;
+ code_ptr[5+2] = (BeamInstr) lib;
}
}
- else {
+ else {
error:
ASSERT(ret != am_ok);
if (lib != NULL) {
@@ -1297,6 +1598,7 @@ BIF_RETTYPE load_nif_2(BIF_ALIST_2)
erts_smp_release_system();
erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
+ erts_free(ERTS_ALC_T_TMP, lib_name);
BIF_RET(ret);
}
@@ -1308,7 +1610,7 @@ erts_unload_nif(struct erl_module_nif* lib)
ErlNifResourceType* next;
ASSERT(erts_smp_is_system_blocked(0));
ASSERT(lib != NULL);
- ASSERT(!lib->is_orphan);
+ ASSERT(lib->mod != NULL);
for (rt = resource_type_list.next;
rt != &resource_type_list;
rt = next) {
@@ -1338,7 +1640,7 @@ erts_unload_nif(struct erl_module_nif* lib)
else {
ASSERT(erts_refc_read(&lib->rt_cnt, 1) > 0);
}
- lib->is_orphan = 1;
+ lib->mod = NULL; /* orphan lib */
}
void erl_nif_init()
@@ -1347,6 +1649,53 @@ void erl_nif_init()
resource_type_list.prev = &resource_type_list;
resource_type_list.dtor = NULL;
resource_type_list.owner = NULL;
- resource_type_list.name[0] = '\0';
+ resource_type_list.module = THE_NON_VALUE;
+ resource_type_list.name = THE_NON_VALUE;
}
+#ifdef READONLY_CHECK
+/* Use checksums to assert that NIFs do not write into inspected binaries
+*/
+static void readonly_check_dtor(struct enif_tmp_obj_t*);
+static unsigned calc_checksum(unsigned char* ptr, unsigned size);
+
+struct readonly_check_t
+{
+ struct enif_tmp_obj_t hdr;
+ unsigned char* ptr;
+ unsigned size;
+ unsigned checksum;
+};
+static void add_readonly_check(ErlNifEnv* env, unsigned char* ptr, unsigned sz)
+{
+ struct readonly_check_t* obj = erts_alloc(ERTS_ALC_T_TMP,
+ sizeof(struct readonly_check_t));
+ obj->hdr.next = env->tmp_obj_list;
+ env->tmp_obj_list = &obj->hdr;
+ obj->hdr.dtor = &readonly_check_dtor;
+ obj->ptr = ptr;
+ obj->size = sz;
+ obj->checksum = calc_checksum(ptr, sz);
+}
+static void readonly_check_dtor(struct enif_tmp_obj_t* o)
+{
+ struct readonly_check_t* obj = (struct readonly_check_t*) o;
+ unsigned chksum = calc_checksum(obj->ptr, obj->size);
+ if (chksum != obj->checksum) {
+ fprintf(stderr, "\r\nReadonly data written by NIF, checksums differ"
+ " %x != %x\r\nABORTING\r\n", chksum, obj->checksum);
+ abort();
+ }
+ erts_free(ERTS_ALC_T_TMP, obj);
+}
+static unsigned calc_checksum(unsigned char* ptr, unsigned size)
+{
+ unsigned i, sum = 0;
+ for (i=0; i<size; i++) {
+ sum ^= ptr[i] << ((i % 4)*8);
+ }
+ return sum;
+}
+
+#endif /* READONLY_CHECK */
+
diff --git a/erts/emulator/beam/erl_nif.h b/erts/emulator/beam/erl_nif.h
index 1ccf00293e..ee3a7cd5f4 100644
--- a/erts/emulator/beam/erl_nif.h
+++ b/erts/emulator/beam/erl_nif.h
@@ -23,18 +23,67 @@
#ifndef __ERL_NIF_H__
#define __ERL_NIF_H__
+
#include "erl_drv_nif.h"
/* Version history:
** 0.1: R13B03
** 1.0: R13B04
+** 2.0: R14A
*/
-#define ERL_NIF_MAJOR_VERSION 1
+#define ERL_NIF_MAJOR_VERSION 2
#define ERL_NIF_MINOR_VERSION 0
#include <stdlib.h>
+#ifdef SIZEOF_CHAR
+# define SIZEOF_CHAR_SAVED__ SIZEOF_CHAR
+# undef SIZEOF_CHAR
+#endif
+#ifdef SIZEOF_SHORT
+# define SIZEOF_SHORT_SAVED__ SIZEOF_SHORT
+# undef SIZEOF_SHORT
+#endif
+#ifdef SIZEOF_INT
+# define SIZEOF_INT_SAVED__ SIZEOF_INT
+# undef SIZEOF_INT
+#endif
+#ifdef SIZEOF_LONG
+# define SIZEOF_LONG_SAVED__ SIZEOF_LONG
+# undef SIZEOF_LONG
+#endif
+#ifdef SIZEOF_LONG_LONG
+# define SIZEOF_LONG_LONG_SAVED__ SIZEOF_LONG_LONG
+# undef SIZEOF_LONG_LONG
+#endif
+#ifdef HALFWORD_HEAP_EMULATOR
+# define HALFWORD_HEAP_EMULATOR_SAVED__ HALFWORD_HEAP_EMULATOR
+# undef HALFWORD_HEAP_EMULATOR
+#endif
+#include "erl_int_sizes_config.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#if (defined(__WIN32__) || defined(_WIN32) || defined(_WIN32_))
+typedef unsigned __int64 ErlNifUInt64;
+typedef __int64 ErlNifSInt64;
+#elif SIZEOF_LONG == 8
+typedef unsigned long ErlNifUInt64;
+typedef long ErlNifSInt64;
+#elif SIZEOF_LONG_LONG == 8
+typedef unsigned long long ErlNifUInt64;
+typedef long long ErlNifSInt64;
+#else
+#error No 64-bit integer type
+#endif
+
+#ifdef HALFWORD_HEAP_EMULATOR
+typedef unsigned int ERL_NIF_TERM;
+#else
typedef unsigned long ERL_NIF_TERM;
+#endif
struct enif_environment_t;
typedef struct enif_environment_t ErlNifEnv;
@@ -63,7 +112,7 @@ typedef struct enif_entry_t
typedef struct
{
- unsigned size;
+ size_t size;
unsigned char* data;
/* Internals (avert your eyes) */
@@ -73,17 +122,22 @@ typedef struct
typedef struct enif_resource_type_t ErlNifResourceType;
typedef void ErlNifResourceDtor(ErlNifEnv*, void*);
-enum ErlNifResourceFlags
+typedef enum
{
ERL_NIF_RT_CREATE = 1,
ERL_NIF_RT_TAKEOVER = 2
-};
+}ErlNifResourceFlags;
typedef enum
{
ERL_NIF_LATIN1 = 1
}ErlNifCharEncoding;
+typedef struct
+{
+ ERL_NIF_TERM pid; /* internal, may change */
+}ErlNifPid;
+
typedef ErlDrvSysInfo ErlNifSysInfo;
typedef struct ErlDrvTid_ *ErlNifTid;
@@ -116,8 +170,6 @@ extern TWinDynNifCallbacks WinDynNifCallbacks;
#endif
-
-
#if (defined(__WIN32__) || defined(_WIN32) || defined(_WIN32_))
# define ERL_NIF_INIT_GLOB TWinDynNifCallbacks WinDynNifCallbacks;
# define ERL_NIF_INIT_DECL(MODNAME) __declspec(dllexport) ErlNifEntry* nif_init(TWinDynNifCallbacks* callbacks)
@@ -133,7 +185,18 @@ extern TWinDynNifCallbacks WinDynNifCallbacks;
#endif
+#ifdef __cplusplus
+}
+# define ERL_NIF_INIT_PROLOGUE extern "C" {
+# define ERL_NIF_INIT_EPILOGUE }
+#else
+# define ERL_NIF_INIT_PROLOGUE
+# define ERL_NIF_INIT_EPILOGUE
+#endif
+
+
#define ERL_NIF_INIT(NAME, FUNCS, LOAD, RELOAD, UPGRADE, UNLOAD) \
+ERL_NIF_INIT_PROLOGUE \
ERL_NIF_INIT_GLOB \
ERL_NIF_INIT_DECL(NAME) \
{ \
@@ -148,7 +211,9 @@ ERL_NIF_INIT_DECL(NAME) \
}; \
ERL_NIF_INIT_BODY; \
return &entry; \
-}
+} \
+ERL_NIF_INIT_EPILOGUE
+
#endif /* __ERL_NIF_H__ */
diff --git a/erts/emulator/beam/erl_nif_api_funcs.h b/erts/emulator/beam/erl_nif_api_funcs.h
index ec07a976b2..eca506593d 100644
--- a/erts/emulator/beam/erl_nif_api_funcs.h
+++ b/erts/emulator/beam/erl_nif_api_funcs.h
@@ -23,29 +23,29 @@
#ifdef ERL_NIF_API_FUNC_DECL
ERL_NIF_API_FUNC_DECL(void*,enif_priv_data,(ErlNifEnv*));
-ERL_NIF_API_FUNC_DECL(void*,enif_alloc,(ErlNifEnv*, size_t size));
-ERL_NIF_API_FUNC_DECL(void,enif_free,(ErlNifEnv*, void* ptr));
+ERL_NIF_API_FUNC_DECL(void*,enif_alloc,(size_t size));
+ERL_NIF_API_FUNC_DECL(void,enif_free,(void* ptr));
ERL_NIF_API_FUNC_DECL(int,enif_is_atom,(ErlNifEnv*, ERL_NIF_TERM term));
ERL_NIF_API_FUNC_DECL(int,enif_is_binary,(ErlNifEnv*, ERL_NIF_TERM term));
ERL_NIF_API_FUNC_DECL(int,enif_is_ref,(ErlNifEnv*, ERL_NIF_TERM term));
ERL_NIF_API_FUNC_DECL(int,enif_inspect_binary,(ErlNifEnv*, ERL_NIF_TERM bin_term, ErlNifBinary* bin));
-ERL_NIF_API_FUNC_DECL(int,enif_alloc_binary,(ErlNifEnv*, unsigned size, ErlNifBinary* bin));
-ERL_NIF_API_FUNC_DECL(int,enif_realloc_binary,(ErlNifEnv*, ErlNifBinary* bin, unsigned size));
-ERL_NIF_API_FUNC_DECL(void,enif_release_binary,(ErlNifEnv*, ErlNifBinary* bin));
+ERL_NIF_API_FUNC_DECL(int,enif_alloc_binary,(size_t size, ErlNifBinary* bin));
+ERL_NIF_API_FUNC_DECL(int,enif_realloc_binary,(ErlNifBinary* bin, size_t size));
+ERL_NIF_API_FUNC_DECL(void,enif_release_binary,(ErlNifBinary* bin));
ERL_NIF_API_FUNC_DECL(int,enif_get_int,(ErlNifEnv*, ERL_NIF_TERM term, int* ip));
ERL_NIF_API_FUNC_DECL(int,enif_get_ulong,(ErlNifEnv*, ERL_NIF_TERM term, unsigned long* ip));
ERL_NIF_API_FUNC_DECL(int,enif_get_double,(ErlNifEnv*, ERL_NIF_TERM term, double* dp));
ERL_NIF_API_FUNC_DECL(int,enif_get_list_cell,(ErlNifEnv* env, ERL_NIF_TERM term, ERL_NIF_TERM* head, ERL_NIF_TERM* tail));
ERL_NIF_API_FUNC_DECL(int,enif_get_tuple,(ErlNifEnv* env, ERL_NIF_TERM tpl, int* arity, const ERL_NIF_TERM** array));
-ERL_NIF_API_FUNC_DECL(int,enif_is_identical,(ErlNifEnv* env, ERL_NIF_TERM lhs, ERL_NIF_TERM rhs));
-ERL_NIF_API_FUNC_DECL(int,enif_compare,(ErlNifEnv* env, ERL_NIF_TERM lhs, ERL_NIF_TERM rhs));
+ERL_NIF_API_FUNC_DECL(int,enif_is_identical,(ERL_NIF_TERM lhs, ERL_NIF_TERM rhs));
+ERL_NIF_API_FUNC_DECL(int,enif_compare,(ERL_NIF_TERM lhs, ERL_NIF_TERM rhs));
ERL_NIF_API_FUNC_DECL(ERL_NIF_TERM,enif_make_binary,(ErlNifEnv* env, ErlNifBinary* bin));
ERL_NIF_API_FUNC_DECL(ERL_NIF_TERM,enif_make_badarg,(ErlNifEnv* env));
ERL_NIF_API_FUNC_DECL(ERL_NIF_TERM,enif_make_int,(ErlNifEnv* env, int i));
ERL_NIF_API_FUNC_DECL(ERL_NIF_TERM,enif_make_ulong,(ErlNifEnv* env, unsigned long i));
ERL_NIF_API_FUNC_DECL(ERL_NIF_TERM,enif_make_double,(ErlNifEnv* env, double d));
ERL_NIF_API_FUNC_DECL(ERL_NIF_TERM,enif_make_atom,(ErlNifEnv* env, const char* name));
-ERL_NIF_API_FUNC_DECL(int,enif_make_existing_atom,(ErlNifEnv* env, const char* name, ERL_NIF_TERM* atom));
+ERL_NIF_API_FUNC_DECL(int,enif_make_existing_atom,(ErlNifEnv* env, const char* name, ERL_NIF_TERM* atom, ErlNifCharEncoding));
ERL_NIF_API_FUNC_DECL(ERL_NIF_TERM,enif_make_tuple,(ErlNifEnv* env, unsigned cnt, ...));
ERL_NIF_API_FUNC_DECL(ERL_NIF_TERM,enif_make_list,(ErlNifEnv* env, unsigned cnt, ...));
ERL_NIF_API_FUNC_DECL(ERL_NIF_TERM,enif_make_list_cell,(ErlNifEnv* env, ERL_NIF_TERM car, ERL_NIF_TERM cdr));
@@ -82,13 +82,13 @@ ERL_NIF_API_FUNC_DECL(int,enif_equal_tids,(ErlNifTid tid1, ErlNifTid tid2));
ERL_NIF_API_FUNC_DECL(void,enif_thread_exit,(void *resp));
ERL_NIF_API_FUNC_DECL(int,enif_thread_join,(ErlNifTid, void **respp));
-ERL_NIF_API_FUNC_DECL(void*,enif_realloc,(ErlNifEnv*, void* ptr, size_t size));
+ERL_NIF_API_FUNC_DECL(void*,enif_realloc,(void* ptr, size_t size));
ERL_NIF_API_FUNC_DECL(void,enif_system_info,(ErlNifSysInfo *sip, size_t si_size));
ERL_NIF_API_FUNC_DECL(int,enif_fprintf,(void/* FILE* */ *filep, const char *format, ...));
ERL_NIF_API_FUNC_DECL(int,enif_inspect_iolist_as_binary,(ErlNifEnv*, ERL_NIF_TERM term, ErlNifBinary* bin));
-ERL_NIF_API_FUNC_DECL(ERL_NIF_TERM,enif_make_sub_binary,(ErlNifEnv*, ERL_NIF_TERM bin_term, unsigned pos, unsigned size));
+ERL_NIF_API_FUNC_DECL(ERL_NIF_TERM,enif_make_sub_binary,(ErlNifEnv*, ERL_NIF_TERM bin_term, size_t pos, size_t size));
ERL_NIF_API_FUNC_DECL(int,enif_get_string,(ErlNifEnv*, ERL_NIF_TERM list, char* buf, unsigned len, ErlNifCharEncoding));
-ERL_NIF_API_FUNC_DECL(int,enif_get_atom,(ErlNifEnv*, ERL_NIF_TERM atom, char* buf, unsigned len));
+ERL_NIF_API_FUNC_DECL(int,enif_get_atom,(ErlNifEnv*, ERL_NIF_TERM atom, char* buf, unsigned len, ErlNifCharEncoding));
ERL_NIF_API_FUNC_DECL(int,enif_is_fun,(ErlNifEnv*, ERL_NIF_TERM term));
ERL_NIF_API_FUNC_DECL(int,enif_is_pid,(ErlNifEnv*, ERL_NIF_TERM term));
ERL_NIF_API_FUNC_DECL(int,enif_is_port,(ErlNifEnv*, ERL_NIF_TERM term));
@@ -99,12 +99,36 @@ ERL_NIF_API_FUNC_DECL(ERL_NIF_TERM,enif_make_long,(ErlNifEnv*, long i));
ERL_NIF_API_FUNC_DECL(ERL_NIF_TERM,enif_make_tuple_from_array,(ErlNifEnv*, const ERL_NIF_TERM arr[], unsigned cnt));
ERL_NIF_API_FUNC_DECL(ERL_NIF_TERM,enif_make_list_from_array,(ErlNifEnv*, const ERL_NIF_TERM arr[], unsigned cnt));
ERL_NIF_API_FUNC_DECL(int,enif_is_empty_list,(ErlNifEnv*, ERL_NIF_TERM term));
-ERL_NIF_API_FUNC_DECL(ErlNifResourceType*,enif_open_resource_type,(ErlNifEnv*, const char* type_name, void (*dtor)(ErlNifEnv*,void *), enum ErlNifResourceFlags flags, enum ErlNifResourceFlags* tried));
-ERL_NIF_API_FUNC_DECL(void*,enif_alloc_resource,(ErlNifEnv*, ErlNifResourceType* type, unsigned size));
-ERL_NIF_API_FUNC_DECL(void,enif_release_resource,(ErlNifEnv*, void* obj));
+ERL_NIF_API_FUNC_DECL(ErlNifResourceType*,enif_open_resource_type,(ErlNifEnv*, const char* module_str, const char* name_str, void (*dtor)(ErlNifEnv*,void *), ErlNifResourceFlags flags, ErlNifResourceFlags* tried));
+ERL_NIF_API_FUNC_DECL(void*,enif_alloc_resource,(ErlNifResourceType* type, size_t size));
+ERL_NIF_API_FUNC_DECL(void,enif_release_resource,(void* obj));
ERL_NIF_API_FUNC_DECL(ERL_NIF_TERM,enif_make_resource,(ErlNifEnv*, void* obj));
ERL_NIF_API_FUNC_DECL(int,enif_get_resource,(ErlNifEnv*, ERL_NIF_TERM term, ErlNifResourceType* type, void** objp));
-ERL_NIF_API_FUNC_DECL(unsigned,enif_sizeof_resource,(ErlNifEnv*, void* obj));
+ERL_NIF_API_FUNC_DECL(size_t,enif_sizeof_resource,(void* obj));
+ERL_NIF_API_FUNC_DECL(unsigned char*,enif_make_new_binary,(ErlNifEnv*,size_t size,ERL_NIF_TERM* termp));
+ERL_NIF_API_FUNC_DECL(int,enif_is_list,(ErlNifEnv*, ERL_NIF_TERM term));
+ERL_NIF_API_FUNC_DECL(int,enif_is_tuple,(ErlNifEnv*, ERL_NIF_TERM term));
+ERL_NIF_API_FUNC_DECL(int,enif_get_atom_length,(ErlNifEnv*, ERL_NIF_TERM atom, unsigned* len, ErlNifCharEncoding));
+ERL_NIF_API_FUNC_DECL(int,enif_get_list_length,(ErlNifEnv* env, ERL_NIF_TERM term, unsigned* len));
+ERL_NIF_API_FUNC_DECL(ERL_NIF_TERM, enif_make_atom_len,(ErlNifEnv* env, const char* name, size_t len));
+ERL_NIF_API_FUNC_DECL(int, enif_make_existing_atom_len,(ErlNifEnv* env, const char* name, size_t len, ERL_NIF_TERM* atom, ErlNifCharEncoding));
+ERL_NIF_API_FUNC_DECL(ERL_NIF_TERM,enif_make_string_len,(ErlNifEnv* env, const char* string, size_t len, ErlNifCharEncoding));
+ERL_NIF_API_FUNC_DECL(ErlNifEnv*,enif_alloc_env,(void));
+ERL_NIF_API_FUNC_DECL(void,enif_free_env,(ErlNifEnv* env));
+ERL_NIF_API_FUNC_DECL(void,enif_clear_env,(ErlNifEnv* env));
+ERL_NIF_API_FUNC_DECL(int,enif_send,(ErlNifEnv* env, const ErlNifPid* to_pid, ErlNifEnv* msg_env, ERL_NIF_TERM msg));
+ERL_NIF_API_FUNC_DECL(ERL_NIF_TERM,enif_make_copy,(ErlNifEnv* dst_env, ERL_NIF_TERM src_term));
+ERL_NIF_API_FUNC_DECL(ErlNifPid*,enif_self,(ErlNifEnv* caller_env, ErlNifPid* pid));
+ERL_NIF_API_FUNC_DECL(int,enif_get_local_pid,(ErlNifEnv* env, ERL_NIF_TERM, ErlNifPid* pid));
+ERL_NIF_API_FUNC_DECL(void,enif_keep_resource,(void* obj));
+ERL_NIF_API_FUNC_DECL(ERL_NIF_TERM,enif_make_resource_binary,(ErlNifEnv*,void* obj,const void* data, size_t size));
+#if SIZEOF_LONG != 8
+ERL_NIF_API_FUNC_DECL(int,enif_get_int64,(ErlNifEnv*, ERL_NIF_TERM term, ErlNifSInt64* ip));
+ERL_NIF_API_FUNC_DECL(int,enif_get_uint64,(ErlNifEnv*, ERL_NIF_TERM term, ErlNifUInt64* ip));
+ERL_NIF_API_FUNC_DECL(ERL_NIF_TERM,enif_make_int64,(ErlNifEnv*, ErlNifSInt64));
+ERL_NIF_API_FUNC_DECL(ERL_NIF_TERM,enif_make_uint64,(ErlNifEnv*, ErlNifUInt64));
+#endif
+
/*
** Add last to keep compatibility on Windows!!!
*/
@@ -195,6 +219,29 @@ ERL_NIF_API_FUNC_DECL(unsigned,enif_sizeof_resource,(ErlNifEnv*, void* obj));
# define enif_make_resource ERL_NIF_API_FUNC_MACRO(enif_make_resource)
# define enif_get_resource ERL_NIF_API_FUNC_MACRO(enif_get_resource)
# define enif_sizeof_resource ERL_NIF_API_FUNC_MACRO(enif_sizeof_resource)
+# define enif_make_new_binary ERL_NIF_API_FUNC_MACRO(enif_make_new_binary)
+# define enif_is_list ERL_NIF_API_FUNC_MACRO(enif_is_list)
+# define enif_is_tuple ERL_NIF_API_FUNC_MACRO(enif_is_tuple)
+# define enif_get_atom_length ERL_NIF_API_FUNC_MACRO(enif_get_atom_length)
+# define enif_get_list_length ERL_NIF_API_FUNC_MACRO(enif_get_list_length)
+# define enif_make_atom_len ERL_NIF_API_FUNC_MACRO(enif_make_atom_len)
+# define enif_make_existing_atom_len ERL_NIF_API_FUNC_MACRO(enif_make_existing_atom_len)
+# define enif_make_string_len ERL_NIF_API_FUNC_MACRO(enif_make_string_len)
+# define enif_alloc_env ERL_NIF_API_FUNC_MACRO(enif_alloc_env)
+# define enif_free_env ERL_NIF_API_FUNC_MACRO(enif_free_env)
+# define enif_clear_env ERL_NIF_API_FUNC_MACRO(enif_clear_env)
+# define enif_send ERL_NIF_API_FUNC_MACRO(enif_send)
+# define enif_make_copy ERL_NIF_API_FUNC_MACRO(enif_make_copy)
+# define enif_self ERL_NIF_API_FUNC_MACRO(enif_self)
+# define enif_get_local_pid ERL_NIF_API_FUNC_MACRO(enif_get_local_pid)
+# define enif_keep_resource ERL_NIF_API_FUNC_MACRO(enif_keep_resource)
+# define enif_make_resource_binary ERL_NIF_API_FUNC_MACRO(enif_make_resource_binary)
+#if SIZEOF_LONG != 8
+# define enif_get_int64 ERL_NIF_API_FUNC_MACRO(enif_get_int64)
+# define enif_get_uint64 ERL_NIF_API_FUNC_MACRO(enif_get_uint64)
+# define enif_make_int64 ERL_NIF_API_FUNC_MACRO(enif_make_int64)
+# define enif_make_uint64 ERL_NIF_API_FUNC_MACRO(enif_make_uint64)
+#endif
#endif
@@ -217,9 +264,15 @@ ERL_NIF_API_FUNC_DECL(unsigned,enif_sizeof_resource,(ErlNifEnv*, void* obj));
# define enif_make_tuple7(ENV,E1,E2,E3,E4,E5,E6,E7) enif_make_tuple(ENV,7,E1,E2,E3,E4,E5,E6,E7)
# define enif_make_tuple8(ENV,E1,E2,E3,E4,E5,E6,E7,E8) enif_make_tuple(ENV,8,E1,E2,E3,E4,E5,E6,E7,E8)
# define enif_make_tuple9(ENV,E1,E2,E3,E4,E5,E6,E7,E8,E9) enif_make_tuple(ENV,9,E1,E2,E3,E4,E5,E6,E7,E8,E9)
+
+# define enif_make_pid(ENV, PID) ((const ERL_NIF_TERM)((PID)->pid))
+
+#if SIZEOF_LONG == 8
+# define enif_get_int64 enif_get_long
+# define enif_get_uint64 enif_get_ulong
+# define enif_make_int64 enif_make_long
+# define enif_make_uint64 enif_make_ulong
#endif
-#ifndef enif_get_data
-# define enif_get_data enif_priv_data /* deprecated */
#endif
diff --git a/erts/emulator/beam/erl_node_container_utils.h b/erts/emulator/beam/erl_node_container_utils.h
index 87dbfc2a04..ae1316eba2 100644
--- a/erts/emulator/beam/erl_node_container_utils.h
+++ b/erts/emulator/beam/erl_node_container_utils.h
@@ -1,19 +1,19 @@
/*
* %CopyrightBegin%
- *
- * Copyright Ericsson AB 2001-2009. All Rights Reserved.
- *
+ *
+ * Copyright Ericsson AB 2001-2010. All Rights Reserved.
+ *
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
* compliance with the License. You should have received a copy of the
* Erlang Public License along with this software. If not, it can be
* retrieved online at http://www.erlang.org/.
- *
+ *
* Software distributed under the License is distributed on an "AS IS"
* basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
* the License for the specific language governing rights and limitations
* under the License.
- *
+ *
* %CopyrightEnd%
*/
@@ -251,7 +251,7 @@ extern int erts_use_r9_pids_ports;
* Refs *
\* */
-#ifdef ARCH_64
+#if defined(ARCH_64) && !HALFWORD_HEAP
#define internal_ref_no_of_numbers(x) \
(internal_ref_data((x))[0])
diff --git a/erts/emulator/beam/erl_node_tables.c b/erts/emulator/beam/erl_node_tables.c
index 42b28d987c..e430b4ad77 100644
--- a/erts/emulator/beam/erl_node_tables.c
+++ b/erts/emulator/beam/erl_node_tables.c
@@ -1,19 +1,19 @@
/*
* %CopyrightBegin%
- *
- * Copyright Ericsson AB 2001-2009. All Rights Reserved.
- *
+ *
+ * Copyright Ericsson AB 2001-2010. All Rights Reserved.
+ *
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
* compliance with the License. You should have received a copy of the
* Erlang Public License along with this software. If not, it can be
* retrieved online at http://www.erlang.org/.
- *
+ *
* Software distributed under the License is distributed on an "AS IS"
* basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
* the License for the specific language governing rights and limitations
* under the License.
- *
+ *
* %CopyrightEnd%
*/
@@ -80,6 +80,8 @@ dist_table_alloc(void *dep_tmpl)
Eterm chnl_nr;
Eterm sysname;
DistEntry *dep;
+ erts_smp_rwmtx_opt_t rwmtx_opt = ERTS_SMP_THR_OPTS_DEFAULT_INITER;
+ rwmtx_opt.type = ERTS_SMP_RWMTX_TYPE_FREQUENT_READ;
if(((DistEntry *) dep_tmpl) == erts_this_dist_entry)
return dep_tmpl;
@@ -92,7 +94,7 @@ dist_table_alloc(void *dep_tmpl)
dep->prev = NULL;
erts_refc_init(&dep->refc, -1);
- erts_smp_rwmtx_init_x(&dep->rwmtx, "dist_entry", chnl_nr);
+ erts_smp_rwmtx_init_opt_x(&dep->rwmtx, &rwmtx_opt, "dist_entry", chnl_nr);
dep->sysname = sysname;
dep->cid = NIL;
dep->connection_id = 0;
@@ -580,6 +582,18 @@ ErlNode *erts_find_or_insert_node(Eterm sysname, Uint creation)
ErlNode ne;
ne.sysname = sysname;
ne.creation = creation;
+
+ erts_smp_rwmtx_rlock(&erts_node_table_rwmtx);
+ res = hash_get(&erts_node_table, (void *) &ne);
+ if (res && res != erts_this_node) {
+ long refc = erts_refc_inctest(&res->refc, 0);
+ if (refc < 2) /* New or pending delete */
+ erts_refc_inc(&res->refc, 1);
+ }
+ erts_smp_rwmtx_runlock(&erts_node_table_rwmtx);
+ if (res)
+ return res;
+
erts_smp_rwmtx_rwlock(&erts_node_table_rwmtx);
res = hash_put(&erts_node_table, (void *) &ne);
ASSERT(res);
@@ -696,8 +710,12 @@ erts_set_this_node(Eterm sysname, Uint creation)
void erts_init_node_tables(void)
{
+ erts_smp_rwmtx_opt_t rwmtx_opt = ERTS_SMP_THR_OPTS_DEFAULT_INITER;
HashFunctions f;
+ rwmtx_opt.type = ERTS_SMP_RWMTX_TYPE_FREQUENT_READ;
+ rwmtx_opt.lived = ERTS_SMP_RWMTX_LONG_LIVED;
+
f.hash = (H_FUN) dist_table_hash;
f.cmp = (HCMP_FUN) dist_table_cmp;
f.alloc = (HALLOC_FUN) dist_table_alloc;
@@ -719,9 +737,10 @@ void erts_init_node_tables(void)
erts_this_dist_entry->prev = NULL;
erts_refc_init(&erts_this_dist_entry->refc, 1); /* erts_this_node */
- erts_smp_rwmtx_init_x(&erts_this_dist_entry->rwmtx,
- "dist_entry",
- make_small(ERST_INTERNAL_CHANNEL_NO));
+ erts_smp_rwmtx_init_opt_x(&erts_this_dist_entry->rwmtx,
+ &rwmtx_opt,
+ "dist_entry",
+ make_small(ERST_INTERNAL_CHANNEL_NO));
erts_this_dist_entry->sysname = am_Noname;
erts_this_dist_entry->cid = NIL;
erts_this_dist_entry->connection_id = 0;
@@ -772,8 +791,8 @@ void erts_init_node_tables(void)
(void) hash_put(&erts_node_table, (void *) erts_this_node);
- erts_smp_rwmtx_init(&erts_node_table_rwmtx, "node_table");
- erts_smp_rwmtx_init(&erts_dist_table_rwmtx, "dist_table");
+ erts_smp_rwmtx_init_opt(&erts_node_table_rwmtx, &rwmtx_opt, "node_table");
+ erts_smp_rwmtx_init_opt(&erts_dist_table_rwmtx, &rwmtx_opt, "dist_table");
references_atoms_need_init = 1;
}
@@ -1087,49 +1106,62 @@ insert_offheap2(ErlOffHeap *oh, void *arg)
static void
insert_offheap(ErlOffHeap *oh, int type, Eterm id)
{
- if(oh->externals) {
- ExternalThing *etp = oh->externals;
- while (etp) {
- insert_node(etp->node, type, id);
- etp = etp->next;
- }
- }
+ union erl_off_heap_ptr u;
+ struct insert_offheap2_arg a;
+ a.type = BIN_REF;
- if(oh->mso) {
- ProcBin *pb;
- struct insert_offheap2_arg a;
- a.type = BIN_REF;
- for(pb = oh->mso; pb; pb = pb->next) {
- if(IsMatchProgBinary(pb->val)) {
+ for (u.hdr = oh->first; u.hdr; u.hdr = u.hdr->next) {
+ switch (thing_subtag(u.hdr->thing_word)) {
+ case REFC_BINARY_SUBTAG:
+ if(IsMatchProgBinary(u.pb->val)) {
InsertedBin *ib;
int insert_bin = 1;
for (ib = inserted_bins; ib; ib = ib->next)
- if(ib->bin_val == pb->val) {
+ if(ib->bin_val == u.pb->val) {
insert_bin = 0;
break;
}
if (insert_bin) {
- Uint id_heap[BIG_UINT_HEAP_SIZE];
+#if HALFWORD_HEAP
+ UWord val = (UWord) u.pb->val;
+ DeclareTmpHeapNoproc(id_heap,BIG_UINT_HEAP_SIZE*2); /* extra place allocated */
+#else
+ DeclareTmpHeapNoproc(id_heap,BIG_UINT_HEAP_SIZE);
+#endif
Uint *hp = &id_heap[0];
InsertedBin *nib;
- a.id = erts_bld_uint(&hp, NULL, (Uint) pb->val);
- erts_match_prog_foreach_offheap(pb->val,
+#if HALFWORD_HEAP
+ int actual_need = BIG_UWORD_HEAP_SIZE(val);
+ ASSERT(actual_need <= (BIG_UINT_HEAP_SIZE*2));
+ UseTmpHeapNoproc(actual_need);
+ a.id = erts_bld_uword(&hp, NULL, (UWord) val);
+#else
+ UseTmpHeapNoproc(BIG_UINT_HEAP_SIZE);
+ a.id = erts_bld_uint(&hp, NULL, (Uint) u.pb->val);
+#endif
+ erts_match_prog_foreach_offheap(u.pb->val,
insert_offheap2,
(void *) &a);
nib = erts_alloc(ERTS_ALC_T_NC_TMP, sizeof(InsertedBin));
- nib->bin_val = pb->val;
+ nib->bin_val = u.pb->val;
nib->next = inserted_bins;
inserted_bins = nib;
+#if HALFWORD_HEAP
+ UnUseTmpHeapNoproc(actual_need);
+#else
+ UnUseTmpHeapNoproc(BIG_UINT_HEAP_SIZE);
+#endif
}
- }
+ }
+ break;
+ case FUN_SUBTAG:
+ break; /* No need to */
+ default:
+ ASSERT(is_external_header(u.hdr->thing_word));
+ insert_node(u.ext->node, type, id);
+ break;
}
}
-
-#if 0
- if(oh->funs) {
- /* No need to */
- }
-#endif
}
static void doit_insert_monitor(ErtsMonitor *monitor, void *p)
@@ -1190,12 +1222,15 @@ static void
insert_bif_timer(Eterm receiver, Eterm msg, ErlHeapFragment *bp, void *arg)
{
if (bp) {
- Eterm heap[3];
+ DeclareTmpHeapNoproc(heap,3);
+
+ UseTmpHeapNoproc(3);
insert_offheap(&bp->off_heap,
TIMER_REF,
(is_internal_pid(receiver)
? receiver
: TUPLE2(&heap[0], AM_process, receiver)));
+ UnUseTmpHeapNoproc(3);
}
}
@@ -1230,7 +1265,7 @@ setup_reference_table(void)
DistEntry *dep;
HashInfo hi;
int i;
- Eterm heap[3];
+ DeclareTmpHeapNoproc(heap,3);
inserted_bins = NULL;
@@ -1251,6 +1286,7 @@ setup_reference_table(void)
/* Go through the hole system, and build a table of all references
to ErlNode and DistEntry structures */
+ UseTmpHeapNoproc(3);
insert_node(erts_this_node,
SYSTEM_REF,
TUPLE2(&heap[0], AM_system, am_undefined));
@@ -1261,11 +1297,13 @@ setup_reference_table(void)
HEAP_REF,
TUPLE2(&heap[0], AM_processes, am_undefined));
#endif
+ UnUseTmpHeapNoproc(3);
/* Insert all processes */
for (i = 0; i < erts_max_processes; i++)
if (process_tab[i]) {
ErlMessage *msg;
+
/* Insert Heap */
insert_offheap(&(process_tab[i]->off_heap),
HEAP_REF,
@@ -1352,21 +1390,22 @@ setup_reference_table(void)
{ /* Add binaries stored elsewhere ... */
ErlOffHeap oh;
- ProcBin pb[2] = {{0},{0}};
- ProcBin *mso = NULL;
+ ProcBin pb[2];
int i = 0;
Binary *default_match_spec;
Binary *default_meta_match_spec;
- /* Only the ProcBin members val and next will be inspected
+ oh.first = NULL;
+ /* Only the ProcBin members thing_word, val and next will be inspected
(by insert_offheap()) */
#undef ADD_BINARY
-#define ADD_BINARY(Bin) \
- if ((Bin)) { \
- pb[i].val = (Bin); \
- pb[i].next = mso; \
- mso = &pb[i]; \
- i++; \
+#define ADD_BINARY(Bin) \
+ if ((Bin)) { \
+ pb[i].thing_word = REFC_BINARY_SUBTAG; \
+ pb[i].val = (Bin); \
+ pb[i].next = oh.first; \
+ oh.first = (struct erl_off_heap_header*) &pb[i]; \
+ i++; \
}
erts_get_default_trace_pattern(NULL,
@@ -1378,11 +1417,6 @@ setup_reference_table(void)
ADD_BINARY(default_match_spec);
ADD_BINARY(default_meta_match_spec);
- oh.mso = mso;
- oh.externals = NULL;
-#ifndef HYBRID /* FIND ME! */
- oh.funs = NULL;
-#endif
insert_offheap(&oh, BIN_REF, AM_match_spec);
#undef ADD_BINARY
}
diff --git a/erts/emulator/beam/erl_node_tables.h b/erts/emulator/beam/erl_node_tables.h
index c48dac6219..eb759b87e9 100644
--- a/erts/emulator/beam/erl_node_tables.h
+++ b/erts/emulator/beam/erl_node_tables.h
@@ -1,19 +1,19 @@
/*
* %CopyrightBegin%
- *
- * Copyright Ericsson AB 2001-2009. All Rights Reserved.
- *
+ *
+ * Copyright Ericsson AB 2001-2010. All Rights Reserved.
+ *
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
* compliance with the License. You should have received a copy of the
* Erlang Public License along with this software. If not, it can be
* retrieved online at http://www.erlang.org/.
- *
+ *
* Software distributed under the License is distributed on an "AS IS"
* basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
* the License for the specific language governing rights and limitations
* under the License.
- *
+ *
* %CopyrightEnd%
*/
@@ -61,7 +61,7 @@
#define ERTS_DE_QFLGS_ALL (ERTS_DE_QFLG_BUSY \
| ERTS_DE_QFLG_EXIT)
-#ifdef ARCH_64
+#if defined(ARCH_64) && !HALFWORD_HEAP
#define ERTS_DIST_OUTPUT_BUF_DBG_PATTERN ((Uint) 0xf713f713f713f713UL)
#else
#define ERTS_DIST_OUTPUT_BUF_DBG_PATTERN ((Uint) 0xf713f713)
diff --git a/erts/emulator/beam/erl_obsolete.c b/erts/emulator/beam/erl_obsolete.c
deleted file mode 100644
index 9c5a7c7ff9..0000000000
--- a/erts/emulator/beam/erl_obsolete.c
+++ /dev/null
@@ -1,186 +0,0 @@
-/*
- * %CopyrightBegin%
- *
- * Copyright Ericsson AB 2004-2009. All Rights Reserved.
- *
- * The contents of this file are subject to the Erlang Public License,
- * Version 1.1, (the "License"); you may not use this file except in
- * compliance with the License. You should have received a copy of the
- * Erlang Public License along with this software. If not, it can be
- * retrieved online at http://www.erlang.org/.
- *
- * Software distributed under the License is distributed on an "AS IS"
- * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
- * the License for the specific language governing rights and limitations
- * under the License.
- *
- * %CopyrightEnd%
- */
-
-#ifdef HAVE_CONFIG_H
-# include "config.h"
-#endif
-
-#include "sys.h"
-#include "erl_driver.h"
-
-/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *\
- * *
- * ------------------------- OBSOLETE! DO NOT USE! ------------------------- *
- * *
-\* */
-
-/* cut from ../obsolete/driver.h (since it doesn't mix well with other
- * headers from the emulator).
- */
-#ifdef __WIN32__
-#ifdef CONST
-# undef CONST
-#endif
-#endif
-
-#if ((defined(__STDC__) || defined(SABER)) && !defined(NO_PROTOTYPE)) || defined(__cplusplus) || defined(USE_PROTOTYPE)
-# define _USING_PROTOTYPES_ 1
-# define _ANSI_ARGS_(x) x
-# define CONST const
-#else
-# define _ANSI_ARGS_(x) ()
-# define CONST
-#endif
-
-typedef void* erl_mutex_t;
-typedef void* erl_cond_t;
-typedef void* erl_thread_t;
-
-EXTERN erl_mutex_t erts_mutex_create _ANSI_ARGS_((void));
-EXTERN int erts_mutex_destroy _ANSI_ARGS_((erl_mutex_t));
-EXTERN int erts_mutex_lock _ANSI_ARGS_((erl_mutex_t));
-EXTERN int erts_mutex_unlock _ANSI_ARGS_((erl_mutex_t));
-
-EXTERN erl_cond_t erts_cond_create _ANSI_ARGS_((void));
-EXTERN int erts_cond_destroy _ANSI_ARGS_((erl_cond_t));
-EXTERN int erts_cond_signal _ANSI_ARGS_((erl_cond_t));
-EXTERN int erts_cond_broadcast _ANSI_ARGS_((erl_cond_t));
-EXTERN int erts_cond_wait _ANSI_ARGS_((erl_cond_t, erl_mutex_t));
-EXTERN int erts_cond_timedwait _ANSI_ARGS_((erl_cond_t, erl_mutex_t, long));
-
-EXTERN int erts_thread_create _ANSI_ARGS_((erl_thread_t*,
- void* (*func)(void*),
- void* arg,
- int detached));
-EXTERN erl_thread_t erts_thread_self _ANSI_ARGS_((void));
-EXTERN void erts_thread_exit _ANSI_ARGS_((void*));
-EXTERN int erts_thread_join _ANSI_ARGS_((erl_thread_t, void**));
-EXTERN int erts_thread_kill _ANSI_ARGS_((erl_thread_t));
-
-/*
- * These functions implement the thread interface in ../obsolete/driver.h.
- * Do *not* use this interface! Within the emulator, use the erl_threads.h,
- * erl_smp.h, or ethread.h interface. From a driver use the thread interface
- * in erl_driver.h.
- */
-
-erl_mutex_t
-erts_mutex_create(void)
-{
- return (erl_mutex_t) erl_drv_mutex_create(NULL);
-}
-
-int
-erts_mutex_destroy(erl_mutex_t mtx)
-{
- erl_drv_mutex_destroy((ErlDrvMutex *) mtx);
- return 0;
-}
-
-int
-erts_mutex_lock(erl_mutex_t mtx)
-{
- erl_drv_mutex_lock((ErlDrvMutex *) mtx);
- return 0;
-}
-
-int
-erts_mutex_unlock(erl_mutex_t mtx)
-{
- erl_drv_mutex_unlock((ErlDrvMutex *) mtx);
- return 0;
-}
-
-erl_cond_t
-erts_cond_create(void)
-{
- return (erl_cond_t) erl_drv_cond_create(NULL);
-}
-
-int
-erts_cond_destroy(erl_cond_t cnd)
-{
- erl_drv_cond_destroy((ErlDrvCond *) cnd);
- return 0;
-}
-
-
-int
-erts_cond_signal(erl_cond_t cnd)
-{
- erl_drv_cond_signal((ErlDrvCond *) cnd);
- return 0;
-}
-
-int
-erts_cond_broadcast(erl_cond_t cnd)
-{
- erl_drv_cond_broadcast((ErlDrvCond *) cnd);
- return 0;
-}
-
-
-int
-erts_cond_wait(erl_cond_t cnd, erl_mutex_t mtx)
-{
- erl_drv_cond_wait((ErlDrvCond *) cnd, (ErlDrvMutex *) mtx);
- return 0;
-}
-
-int
-erts_cond_timedwait(erl_cond_t cnd, erl_mutex_t mtx, long ms)
-{
- return ENOTSUP;
-}
-
-int
-erts_thread_create(erl_thread_t *tid,
- void* (*func)(void*),
- void* arg,
- int detached)
-{
- if (detached)
- return ENOTSUP;
- return erl_drv_thread_create(NULL, (ErlDrvTid *) tid, func, arg, NULL);
-}
-
-erl_thread_t
-erts_thread_self(void)
-{
- return (erl_thread_t) erl_drv_thread_self();
-}
-
-void
-erts_thread_exit(void *res)
-{
- erl_drv_thread_exit(res);
-}
-
-int
-erts_thread_join(erl_thread_t tid, void **respp)
-{
- return erl_drv_thread_join((ErlDrvTid) tid, respp);
-}
-
-int
-erts_thread_kill(erl_thread_t tid)
-{
- return ENOTSUP;
-}
-
diff --git a/erts/emulator/beam/erl_port_task.c b/erts/emulator/beam/erl_port_task.c
index 0b6bb0d8e9..c10724b951 100644
--- a/erts/emulator/beam/erl_port_task.c
+++ b/erts/emulator/beam/erl_port_task.c
@@ -625,6 +625,7 @@ erts_port_task_schedule(Eterm id,
if (!enq_port) {
ERTS_PT_CHK_PRES_PORTQ(runq, pp);
+ erts_smp_runq_unlock(runq);
}
else {
enqueue_port(runq, pp);
@@ -634,9 +635,10 @@ erts_port_task_schedule(Eterm id,
profile_runnable_port(pp, am_active);
}
+ erts_smp_runq_unlock(runq);
+
erts_smp_notify_inc_runq(runq);
}
- erts_smp_runq_unlock(runq);
return 0;
}
@@ -944,8 +946,8 @@ erts_port_task_execute(ErtsRunQueue *runq, Port **curr_port_pp)
enqueue_port(xrunq, pp);
ASSERT(pp->sched.exe_taskq);
pp->sched.exe_taskq = NULL;
- erts_smp_notify_inc_runq(xrunq);
erts_smp_runq_unlock(xrunq);
+ erts_smp_notify_inc_runq(xrunq);
}
#endif
port_was_enqueued = 1;
@@ -969,11 +971,11 @@ erts_port_task_execute(ErtsRunQueue *runq, Port **curr_port_pp)
erts_port_release(pp);
#else
{
- long refc = erts_smp_atomic_dectest(&pp->refc);
+ long refc;
+ erts_smp_mtx_unlock(pp->lock);
+ refc = erts_smp_atomic_dectest(&pp->refc);
ASSERT(refc >= 0);
- if (refc > 0)
- erts_smp_mtx_unlock(pp->lock);
- else {
+ if (refc == 0) {
erts_smp_runq_unlock(runq);
erts_port_cleanup(pp); /* Might aquire runq lock */
erts_smp_runq_lock(runq);
@@ -1112,7 +1114,6 @@ erts_port_migrate(Port *prt, int *prt_locked,
dequeue_port(from_rq, prt);
erts_smp_atomic_set(&prt->run_queue, (long) to_rq);
enqueue_port(to_rq, prt);
- erts_smp_notify_inc_runq(to_rq);
return ERTS_MIGRATE_SUCCESS;
}
diff --git a/erts/emulator/beam/erl_printf_term.c b/erts/emulator/beam/erl_printf_term.c
index 7fe3f3bca5..d9f132f067 100644
--- a/erts/emulator/beam/erl_printf_term.c
+++ b/erts/emulator/beam/erl_printf_term.c
@@ -1,19 +1,19 @@
/*
* %CopyrightBegin%
- *
- * Copyright Ericsson AB 2005-2009. All Rights Reserved.
- *
+ *
+ * Copyright Ericsson AB 2005-2010. All Rights Reserved.
+ *
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
* compliance with the License. You should have received a copy of the
* Erlang Public License along with this software. If not, it can be
* retrieved online at http://www.erlang.org/.
- *
+ *
* Software distributed under the License is distributed on an "AS IS"
* basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
* the License for the specific language governing rights and limitations
* under the License.
- *
+ *
* %CopyrightEnd%
*/
@@ -246,7 +246,7 @@ print_term(fmtfn_t fn, void* arg, Eterm obj, long *dcount)
if (is_CP(obj)) {
PRINT_STRING(res, fn, arg, "<cp/header:");
- PRINT_POINTER(res, fn, arg, obj);
+ PRINT_POINTER(res, fn, arg, cp_val(obj));
PRINT_CHAR(res, fn, arg, '>');
return res;
}
@@ -406,7 +406,7 @@ print_term(fmtfn_t fn, void* arg, Eterm obj, long *dcount)
break;
case EXPORT_DEF:
{
- Export* ep = (Export *) (export_val(obj))[1];
+ Export* ep = *((Export **) (export_val(obj) + 1));
Atom* module = atom_tab(atom_val(ep->code[0]));
Atom* name = atom_tab(atom_val(ep->code[1]));
@@ -438,7 +438,7 @@ print_term(fmtfn_t fn, void* arg, Eterm obj, long *dcount)
break;
default:
PRINT_STRING(res, fn, arg, "<unknown:");
- PRINT_POINTER(res, fn, arg, obj);
+ PRINT_POINTER(res, fn, arg, (UWord) obj);
PRINT_CHAR(res, fn, arg, '>');
break;
}
diff --git a/erts/emulator/beam/erl_process.c b/erts/emulator/beam/erl_process.c
index 996806fc75..761096e9ad 100644
--- a/erts/emulator/beam/erl_process.c
+++ b/erts/emulator/beam/erl_process.c
@@ -38,6 +38,7 @@
#include "erl_instrument.h"
#include "erl_threads.h"
#include "erl_binary.h"
+#include "beam_bp.h"
#define ERTS_RUNQ_CHECK_BALANCE_REDS_PER_SCHED (2000*CONTEXT_REDS)
#define ERTS_RUNQ_CALL_CHECK_BALANCE_REDS \
@@ -45,7 +46,13 @@
#define ERTS_PROC_MIN_CONTEXT_SWITCH_REDS_COST (CONTEXT_REDS/10)
-#define ERTS_SCHED_SLEEP_SPINCOUNT 10000
+#define ERTS_SCHED_SPIN_UNTIL_YIELD 100
+
+#define ERTS_SCHED_SYS_SLEEP_SPINCOUNT 10
+#define ERTS_SCHED_TSE_SLEEP_SPINCOUNT_FACT 1000
+#define ERTS_SCHED_TSE_SLEEP_SPINCOUNT \
+ (ERTS_SCHED_SYS_SLEEP_SPINCOUNT*ERTS_SCHED_TSE_SLEEP_SPINCOUNT_FACT)
+#define ERTS_SCHED_SUSPEND_SLEEP_SPINCOUNT 0
#define ERTS_WAKEUP_OTHER_LIMIT (100*CONTEXT_REDS/2)
#define ERTS_WAKEUP_OTHER_DEC 10
@@ -91,9 +98,9 @@ do { \
#define ERTS_EMPTY_RUNQ(RQ) \
((RQ)->len == 0 && (RQ)->misc.start == NULL)
-extern Eterm beam_apply[];
-extern Eterm beam_exit[];
-extern Eterm beam_continue_exit[];
+extern BeamInstr beam_apply[];
+extern BeamInstr beam_exit[];
+extern BeamInstr beam_continue_exit[];
static Sint p_last;
static Sint p_next;
@@ -105,6 +112,10 @@ Uint erts_no_schedulers;
Uint erts_max_processes = ERTS_DEFAULT_MAX_PROCESSES;
Uint erts_process_tab_index_mask;
+#ifdef ERTS_SMP
+Uint erts_max_main_threads;
+#endif
+
int erts_sched_thread_suggested_stack_size = -1;
#ifdef ERTS_ENABLE_LOCK_CHECK
@@ -115,16 +126,34 @@ ErtsLcPSDLocks erts_psd_required_locks[ERTS_PSD_SIZE];
int erts_disable_proc_not_running_opt;
-#define ERTS_SCHED_CHANGING_ONLINE 1
-#define ERTS_SCHED_CHANGING_MULTI_SCHED 2
+#define ERTS_SCHDLR_SSPND_CHNG_WAITER (((long) 1) << 0)
+#define ERTS_SCHDLR_SSPND_CHNG_MSB (((long) 1) << 1)
+#define ERTS_SCHDLR_SSPND_CHNG_ONLN (((long) 1) << 2)
+
+#ifndef DEBUG
+
+#define ERTS_SCHDLR_SSPND_CHNG_SET(VAL, OLD_VAL) \
+ erts_smp_atomic_set(&schdlr_sspnd.changing, (VAL))
+
+#else
+
+#define ERTS_SCHDLR_SSPND_CHNG_SET(VAL, OLD_VAL) \
+do { \
+ long old_val__ = erts_smp_atomic_xchg(&schdlr_sspnd.changing, \
+ (VAL)); \
+ ASSERT(old_val__ == (OLD_VAL)); \
+} while (0)
+
+#endif
+
static struct {
erts_smp_mtx_t mtx;
erts_smp_cnd_t cnd;
- int changing;
int online;
int curr_online;
int wait_curr_online;
+ erts_smp_atomic_t changing;
erts_smp_atomic_t active;
struct {
erts_smp_atomic_t ongoing;
@@ -180,6 +209,7 @@ static ErtsCpuBindData *scheduler2cpu_map;
erts_smp_rwmtx_t erts_cpu_bind_rwmtx;
typedef enum {
+ ERTS_CPU_BIND_UNDEFINED,
ERTS_CPU_BIND_SPREAD,
ERTS_CPU_BIND_PROCESSOR_SPREAD,
ERTS_CPU_BIND_THREAD_SPREAD,
@@ -190,6 +220,9 @@ typedef enum {
ERTS_CPU_BIND_NONE
} ErtsCpuBindOrder;
+#define ERTS_CPU_BIND_DEFAULT_BIND \
+ ERTS_CPU_BIND_THREAD_NO_NODE_PROCESSOR_SPREAD
+
ErtsCpuBindOrder cpu_bind_order;
static erts_cpu_topology_t *user_cpudata;
@@ -226,6 +259,17 @@ typedef union {
ErtsAlignedSchedulerData *erts_aligned_scheduler_data;
+#ifdef ERTS_SMP
+
+typedef union {
+ ErtsSchedulerSleepInfo ssi;
+ char align[ERTS_ALC_CACHE_LINE_ALIGN_SIZE(sizeof(ErtsSchedulerSleepInfo))];
+} ErtsAlignedSchedulerSleepInfo;
+
+static ErtsAlignedSchedulerSleepInfo *aligned_sched_sleep_info;
+
+#endif
+
#ifndef BM_COUNTERS
static int processes_busy;
#endif
@@ -283,8 +327,15 @@ ERTS_SCHED_PREF_QUICK_ALLOC_IMPL(proclist,
200,
ERTS_ALC_T_PROC_LIST)
-#define ERTS_RUNQ_IX(IX) (&erts_aligned_run_queues[(IX)].runq)
-#define ERTS_SCHEDULER_IX(IX) (&erts_aligned_scheduler_data[(IX)].esd)
+#define ERTS_RUNQ_IX(IX) \
+ (ASSERT_EXPR(0 <= (IX) && (IX) < erts_no_run_queues), \
+ &erts_aligned_run_queues[(IX)].runq)
+#define ERTS_SCHEDULER_IX(IX) \
+ (ASSERT_EXPR(0 <= (IX) && (IX) < erts_no_schedulers), \
+ &erts_aligned_scheduler_data[(IX)].esd)
+#define ERTS_SCHED_SLEEP_INFO_IX(IX) \
+ (ASSERT_EXPR(0 <= (IX) && (IX) < erts_no_schedulers), \
+ &aligned_sched_sleep_info[(IX)].ssi)
#define ERTS_FOREACH_RUNQ(RQVAR, DO) \
do { \
@@ -334,7 +385,7 @@ do { \
static void init_processes_bif(void);
static void save_terminating_process(Process *p);
static void exec_misc_ops(ErtsRunQueue *);
-static void print_function_from_pc(int to, void *to_arg, Eterm* x);
+static void print_function_from_pc(int to, void *to_arg, BeamInstr* x);
static int stack_element_dump(int to, void *to_arg, Process* p, Eterm* sp,
int yreg);
#ifdef ERTS_SMP
@@ -348,6 +399,11 @@ static void signal_schedulers_bind_change(erts_cpu_topology_t *cpudata, int size
#endif
+static int reader_group_lookup(int logical);
+static void create_tmp_cpu_topology_copy(erts_cpu_topology_t **cpudata,
+ int *cpudata_size);
+static void destroy_tmp_cpu_topology_copy(erts_cpu_topology_t *cpudata);
+
static void early_cpu_bind_init(void);
static void late_cpu_bind_init(void);
@@ -388,7 +444,12 @@ erts_pre_init_process(void)
erts_psd_required_locks[ERTS_PSD_DIST_ENTRY].get_locks
= ERTS_PSD_DIST_ENTRY_GET_LOCKS;
erts_psd_required_locks[ERTS_PSD_DIST_ENTRY].set_locks
- = ERTS_PSD_DIST_ENTRY_GET_LOCKS;
+ = ERTS_PSD_DIST_ENTRY_SET_LOCKS;
+
+ erts_psd_required_locks[ERTS_PSD_CALL_TIME_BP].get_locks
+ = ERTS_PSD_CALL_TIME_BP_GET_LOCKS;
+ erts_psd_required_locks[ERTS_PSD_CALL_TIME_BP].set_locks
+ = ERTS_PSD_CALL_TIME_BP_SET_LOCKS;
/* Check that we have locks for all entries */
for (ix = 0; ix < ERTS_PSD_SIZE; ix++) {
@@ -572,6 +633,76 @@ erts_psd_set_init(Process *p, ErtsProcLocks plocks, int ix, void *data)
#ifdef ERTS_SMP
+void
+erts_sched_finish_poke(ErtsSchedulerSleepInfo *ssi, long flags)
+{
+ switch (flags & ERTS_SSI_FLGS_SLEEP_TYPE) {
+ case ERTS_SSI_FLG_POLL_SLEEPING:
+ erts_sys_schedule_interrupt(1);
+ break;
+ case ERTS_SSI_FLG_TSE_SLEEPING:
+ erts_tse_set(ssi->event);
+ break;
+ case 0:
+ break;
+ default:
+ erl_exit(ERTS_ABORT_EXIT, "%s:%d: Internal error\n",
+ __FILE__, __LINE__);
+ break;
+ }
+}
+
+#ifdef ERTS_SMP_SCHEDULERS_NEED_TO_CHECK_CHILDREN
+void
+erts_smp_notify_check_children_needed(void)
+{
+ int i;
+
+ for (i = 0; i < erts_no_schedulers; i++) {
+ long aux_work;
+ ErtsSchedulerSleepInfo *ssi;
+ ssi = ERTS_SCHED_SLEEP_INFO_IX(i);
+ aux_work = erts_smp_atomic_bor(&ssi->aux_work,
+ ERTS_SSI_AUX_WORK_CHECK_CHILDREN);
+ if (!(aux_work & ERTS_SSI_AUX_WORK_CHECK_CHILDREN))
+ erts_sched_poke(ssi);
+ }
+}
+#endif
+
+#ifdef ERTS_SCHED_NEED_BLOCKABLE_AUX_WORK
+static ERTS_INLINE long
+blockable_aux_work(ErtsSchedulerData *esdp,
+ ErtsSchedulerSleepInfo *ssi,
+ long aux_work)
+{
+ if (aux_work & ERTS_SSI_BLOCKABLE_AUX_WORK_MASK) {
+#ifdef ERTS_SMP_SCHEDULERS_NEED_TO_CHECK_CHILDREN
+ if (aux_work & ERTS_SSI_AUX_WORK_CHECK_CHILDREN) {
+ aux_work = erts_smp_atomic_band(&ssi->aux_work,
+ ~ERTS_SSI_AUX_WORK_CHECK_CHILDREN);
+ aux_work &= ~ERTS_SSI_AUX_WORK_CHECK_CHILDREN;
+ erts_check_children();
+ }
+#endif
+ }
+ return aux_work;
+}
+
+#endif
+
+#ifdef ERTS_SCHED_NEED_NONBLOCKABLE_AUX_WORK
+static ERTS_INLINE long
+nonblockable_aux_work(ErtsSchedulerData *esdp,
+ ErtsSchedulerSleepInfo *ssi,
+ long aux_work)
+{
+ if (aux_work & ERTS_SSI_NONBLOCKABLE_AUX_WORK_MASK) {
+
+ }
+}
+#endif
+
static void
prepare_for_block(void *vrq)
{
@@ -624,7 +755,31 @@ erts_active_schedulers(void)
return as;
}
+static ERTS_INLINE int
+prepare_for_sys_schedule(void)
+{
#ifdef ERTS_SMP
+ while (!erts_port_task_have_outstanding_io_tasks()
+ && !erts_smp_atomic_xchg(&doing_sys_schedule, 1)) {
+ if (!erts_port_task_have_outstanding_io_tasks())
+ return 1;
+ erts_smp_atomic_set(&doing_sys_schedule, 0);
+ }
+ return 0;
+#else
+ return !erts_port_task_have_outstanding_io_tasks();
+#endif
+}
+
+#ifdef ERTS_SMP
+
+static ERTS_INLINE void
+sched_change_waiting_sys_to_waiting(Uint no, ErtsRunQueue *rq)
+{
+ ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(rq));
+ ASSERT(rq->waiting < 0);
+ rq->waiting *= -1;
+}
static ERTS_INLINE void
sched_waiting(Uint no, ErtsRunQueue *rq)
@@ -666,7 +821,11 @@ empty_runq(ErtsRunQueue *rq)
if (oifls & ERTS_RUNQ_IFLG_NONEMPTY) {
#ifdef DEBUG
long empty = erts_smp_atomic_read(&no_empty_run_queues);
- ASSERT(0 <= empty && empty < erts_no_run_queues);
+ /*
+ * For a short period of time no_empty_run_queues may have
+ * been increased twice for a specific run queue.
+ */
+ ASSERT(0 <= empty && empty < 2*erts_no_run_queues);
#endif
erts_smp_atomic_inc(&no_empty_run_queues);
}
@@ -679,107 +838,314 @@ non_empty_runq(ErtsRunQueue *rq)
if (!(oifls & ERTS_RUNQ_IFLG_NONEMPTY)) {
#ifdef DEBUG
long empty = erts_smp_atomic_read(&no_empty_run_queues);
- ASSERT(0 < empty && empty <= erts_no_run_queues);
+ /*
+ * For a short period of time no_empty_run_queues may have
+ * been increased twice for a specific run queue.
+ */
+ ASSERT(0 < empty && empty <= 2*erts_no_run_queues);
#endif
erts_smp_atomic_dec(&no_empty_run_queues);
}
}
-static ERTS_INLINE int
-sched_spin_wake(ErtsRunQueue *rq)
+static long
+sched_prep_spin_wait(ErtsSchedulerSleepInfo *ssi)
{
-#if ERTS_SCHED_SLEEP_SPINCOUNT == 0
- return 0;
-#else
- long val;
- ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(rq));
+ long oflgs;
+ long nflgs = (ERTS_SSI_FLG_SLEEPING
+ | ERTS_SSI_FLG_WAITING);
+ long xflgs = 0;
- val = erts_smp_atomic_read(&rq->spin_waiter);
- ASSERT(val >= 0);
- if (val != 0) {
- erts_smp_atomic_inc(&rq->spin_wake);
- return 1;
- }
- return 0;
-#endif
+ do {
+ oflgs = erts_smp_atomic_cmpxchg(&ssi->flags, nflgs, xflgs);
+ if (oflgs == xflgs)
+ return nflgs;
+ xflgs = oflgs;
+ } while (!(oflgs & ERTS_SSI_FLG_SUSPENDED));
+ return oflgs;
}
-static ERTS_INLINE int
-sched_spin_wake_all(ErtsRunQueue *rq)
+static long
+sched_prep_cont_spin_wait(ErtsSchedulerSleepInfo *ssi)
{
-#if ERTS_SCHED_SLEEP_SPINCOUNT == 0
- return 0;
-#else
- long val;
- ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(rq));
+ long oflgs;
+ long nflgs = (ERTS_SSI_FLG_SLEEPING
+ | ERTS_SSI_FLG_WAITING);
+ long xflgs = ERTS_SSI_FLG_WAITING;
- val = erts_smp_atomic_read(&rq->spin_waiter);
- ASSERT(val >= 0);
- if (val != 0)
- erts_smp_atomic_add(&rq->spin_wake, val);
- return val;
-#endif
+ do {
+ oflgs = erts_smp_atomic_cmpxchg(&ssi->flags, nflgs, xflgs);
+ if (oflgs == xflgs)
+ return nflgs;
+ xflgs = oflgs;
+ nflgs |= oflgs & ERTS_SSI_FLG_SUSPENDED;
+ } while (oflgs & ERTS_SSI_FLG_WAITING);
+ return oflgs;
}
+static long
+sched_spin_wait(ErtsSchedulerSleepInfo *ssi, int spincount)
+{
+ long until_yield = ERTS_SCHED_SPIN_UNTIL_YIELD;
+ int sc = spincount;
+ long flgs;
+
+ do {
+ flgs = erts_smp_atomic_read(&ssi->flags);
+ if ((flgs & (ERTS_SSI_FLG_SLEEPING|ERTS_SSI_FLG_WAITING))
+ != (ERTS_SSI_FLG_SLEEPING|ERTS_SSI_FLG_WAITING)) {
+ break;
+ }
+ ERTS_SPIN_BODY;
+ if (--until_yield == 0) {
+ until_yield = ERTS_SCHED_SPIN_UNTIL_YIELD;
+ erts_thr_yield();
+ }
+ } while (--sc > 0);
+ return flgs;
+}
+
+static long
+sched_set_sleeptype(ErtsSchedulerSleepInfo *ssi, long sleep_type)
+{
+ long oflgs;
+ long nflgs = ERTS_SSI_FLG_SLEEPING|ERTS_SSI_FLG_WAITING|sleep_type;
+ long xflgs = ERTS_SSI_FLG_SLEEPING|ERTS_SSI_FLG_WAITING;
+
+ if (sleep_type == ERTS_SSI_FLG_TSE_SLEEPING)
+ erts_tse_reset(ssi->event);
+
+ while (1) {
+ oflgs = erts_smp_atomic_cmpxchg(&ssi->flags, nflgs, xflgs);
+ if (oflgs == xflgs)
+ return nflgs;
+ if ((oflgs & (ERTS_SSI_FLG_SLEEPING|ERTS_SSI_FLG_WAITING))
+ != (ERTS_SSI_FLG_SLEEPING|ERTS_SSI_FLG_WAITING)) {
+ return oflgs;
+ }
+ xflgs = oflgs;
+ nflgs |= oflgs & ERTS_SSI_FLG_SUSPENDED;
+ }
+}
+
+#define ERTS_SCHED_WAIT_WOKEN(FLGS) \
+ (((FLGS) & (ERTS_SSI_FLG_WAITING|ERTS_SSI_FLG_SUSPENDED)) \
+ != ERTS_SSI_FLG_WAITING)
+
static void
-sched_sys_wait(Uint no, ErtsRunQueue *rq)
+scheduler_wait(long *fcalls, ErtsSchedulerData *esdp, ErtsRunQueue *rq)
{
- long dt;
-#if ERTS_SCHED_SLEEP_SPINCOUNT != 0
- int val;
- int spincount = ERTS_SCHED_SLEEP_SPINCOUNT;
+ ErtsSchedulerSleepInfo *ssi = esdp->ssi;
+ int spincount;
+ long flgs;
+#if defined(ERTS_SCHED_NEED_NONBLOCKABLE_AUX_WORK) \
+ || defined(ERTS_SCHED_NEED_BLOCKABLE_AUX_WORK)
+ long aux_work;
+#endif
+
ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(rq));
+ erts_smp_spin_lock(&rq->sleepers.lock);
+ flgs = sched_prep_spin_wait(ssi);
+ if (flgs & ERTS_SSI_FLG_SUSPENDED) {
+ /* Go suspend instead... */
+ erts_smp_spin_unlock(&rq->sleepers.lock);
+ return;
+ }
+
+ ssi->prev = NULL;
+ ssi->next = rq->sleepers.list;
+ if (rq->sleepers.list)
+ rq->sleepers.list->prev = ssi;
+ rq->sleepers.list = ssi;
+ erts_smp_spin_unlock(&rq->sleepers.lock);
+
+ /*
+ * If all schedulers are waiting, one of them *should*
+ * be waiting in erl_sys_schedule()
+ */
+
+ if (!prepare_for_sys_schedule()) {
+
+ sched_waiting(esdp->no, rq);
+
+ erts_smp_runq_unlock(rq);
+
+ spincount = ERTS_SCHED_TSE_SLEEP_SPINCOUNT;
+
+ tse_wait:
+
+#ifdef ERTS_SCHED_NEED_BLOCKABLE_AUX_WORK
+ aux_work = erts_smp_atomic_read(&ssi->aux_work);
+ tse_blockable_aux_work:
+ aux_work = blockable_aux_work(esdp, ssi, aux_work);
#endif
+ erts_smp_activity_begin(ERTS_ACTIVITY_WAIT, NULL, NULL, NULL);
- sched_waiting_sys(no, rq);
+ while (1) {
-#if ERTS_SCHED_SLEEP_SPINCOUNT != 0
- erts_smp_atomic_inc(&rq->spin_waiter);
- erts_smp_runq_unlock(rq);
+#ifdef ERTS_SCHED_NEED_NONBLOCKABLE_AUX_WORK
+#ifndef ERTS_SCHED_NEED_BLOCKABLE_AUX_WORK
+ aux_work = erts_smp_atomic_read(&ssi->aux_work);
+#endif
+ nonblockable_aux_work(esdp, ssi, aux_work);
+#endif
- erl_sys_schedule(1); /* Might give us something to do */
+ flgs = sched_spin_wait(ssi, spincount);
+ if (flgs & ERTS_SSI_FLG_SLEEPING) {
+ ASSERT(flgs & ERTS_SSI_FLG_WAITING);
+ flgs = sched_set_sleeptype(ssi, ERTS_SSI_FLG_TSE_SLEEPING);
+ if (flgs & ERTS_SSI_FLG_SLEEPING) {
+ int res;
+ ASSERT(flgs & ERTS_SSI_FLG_TSE_SLEEPING);
+ ASSERT(flgs & ERTS_SSI_FLG_WAITING);
+ do {
+ res = erts_tse_wait(ssi->event);
+ } while (res == EINTR);
+ }
+ }
- dt = do_time_read_and_reset();
- if (dt) bump_timer(dt);
+ if (!(flgs & ERTS_SSI_FLG_WAITING)) {
+ ASSERT(!(flgs & ERTS_SSI_FLG_SLEEPING));
+ break;
+ }
+
+ flgs = sched_prep_cont_spin_wait(ssi);
+ spincount = ERTS_SCHED_TSE_SLEEP_SPINCOUNT;
+
+ if (!(flgs & ERTS_SSI_FLG_WAITING)) {
+ ASSERT(!(flgs & ERTS_SSI_FLG_SLEEPING));
+ break;
+ }
+
+#ifdef ERTS_SCHED_NEED_BLOCKABLE_AUX_WORK
+ aux_work = erts_smp_atomic_read(&ssi->aux_work);
+ if (aux_work & ERTS_SSI_BLOCKABLE_AUX_WORK_MASK) {
+ erts_smp_activity_end(ERTS_ACTIVITY_WAIT, NULL, NULL, NULL);
+ goto tse_blockable_aux_work;
+ }
+#endif
- while (spincount-- > 0) {
- val = erts_smp_atomic_read(&rq->spin_wake);
- ASSERT(val >= 0);
- if (val != 0) {
- erts_smp_runq_lock(rq);
- val = erts_smp_atomic_read(&rq->spin_wake);
- ASSERT(val >= 0);
- if (val != 0)
- goto woken;
- if (spincount == 0)
- goto sleep;
- erts_smp_runq_unlock(rq);
}
- }
- erts_smp_runq_lock(rq);
- val = erts_smp_atomic_read(&rq->spin_wake);
- ASSERT(val >= 0);
- if (val != 0) {
- woken:
- erts_smp_atomic_dec(&rq->spin_wake);
- ASSERT(erts_smp_atomic_read(&rq->spin_wake) >= 0);
- erts_smp_atomic_dec(&rq->spin_waiter);
- ASSERT(erts_smp_atomic_read(&rq->spin_waiter) >= 0);
+ erts_smp_activity_end(ERTS_ACTIVITY_WAIT, NULL, NULL, NULL);
+
+ if (flgs & ~ERTS_SSI_FLG_SUSPENDED)
+ erts_smp_atomic_band(&ssi->flags, ERTS_SSI_FLG_SUSPENDED);
+
+ erts_smp_runq_lock(rq);
+ sched_active(esdp->no, rq);
+
}
else {
- sleep:
- erts_smp_atomic_dec(&rq->spin_waiter);
- ASSERT(erts_smp_atomic_read(&rq->spin_waiter) >= 0);
+ long dt;
+
+ erts_smp_atomic_set(&function_calls, 0);
+ *fcalls = 0;
+
+ sched_waiting_sys(esdp->no, rq);
+
+ erts_smp_runq_unlock(rq);
+
+ spincount = ERTS_SCHED_SYS_SLEEP_SPINCOUNT;
+
+ while (spincount-- > 0) {
+
+ sys_poll_aux_work:
+
+ erl_sys_schedule(1); /* Might give us something to do */
+
+ dt = do_time_read_and_reset();
+ if (dt) bump_timer(dt);
+
+ sys_aux_work:
+
+#ifdef ERTS_SCHED_NEED_BLOCKABLE_AUX_WORK
+ aux_work = erts_smp_atomic_read(&ssi->aux_work);
+ aux_work = blockable_aux_work(esdp, ssi, aux_work);
+#endif
+#ifdef ERTS_SCHED_NEED_NONBLOCKABLE_AUX_WORK
+#ifndef ERTS_SCHED_NEED_BLOCKABLE_AUX_WORK
+ aux_work = erts_smp_atomic_read(&ssi->aux_work);
+#endif
+ nonblockable_aux_work(esdp, ssi, aux_work);
+#endif
+
+ flgs = erts_smp_atomic_read(&ssi->flags);
+ if (!(flgs & ERTS_SSI_FLG_WAITING)) {
+ ASSERT(!(flgs & ERTS_SSI_FLG_SLEEPING));
+ goto sys_woken;
+ }
+ if (!(flgs & ERTS_SSI_FLG_SLEEPING)) {
+ flgs = sched_prep_cont_spin_wait(ssi);
+ if (!(flgs & ERTS_SSI_FLG_WAITING)) {
+ ASSERT(!(flgs & ERTS_SSI_FLG_SLEEPING));
+ goto sys_woken;
+ }
+ }
+
+ /*
+ * If we got new I/O tasks we aren't allowed to
+ * call erl_sys_schedule() until it is handled.
+ */
+ if (erts_port_task_have_outstanding_io_tasks()) {
+ erts_smp_atomic_set(&doing_sys_schedule, 0);
+ /*
+ * Got to check that we still got I/O tasks; otherwise
+ * we have to continue checking for I/O...
+ */
+ if (!prepare_for_sys_schedule()) {
+ spincount *= ERTS_SCHED_TSE_SLEEP_SPINCOUNT_FACT;
+ goto tse_wait;
+ }
+ }
+ }
+
+ erts_smp_runq_lock(rq);
+
/*
* If we got new I/O tasks we aren't allowed to
* sleep in erl_sys_schedule().
*/
- if (!erts_port_task_have_outstanding_io_tasks()) {
-#endif
+ if (erts_port_task_have_outstanding_io_tasks()) {
+ erts_smp_atomic_set(&doing_sys_schedule, 0);
+ /*
+ * Got to check that we still got I/O tasks; otherwise
+ * we have to wait in erl_sys_schedule() after all...
+ */
+ if (prepare_for_sys_schedule())
+ goto do_sys_schedule;
+
+ /*
+ * Not allowed to wait in erl_sys_schedule;
+ * do tse wait instead...
+ */
+ sched_change_waiting_sys_to_waiting(esdp->no, rq);
+ erts_smp_runq_unlock(rq);
+ spincount = 0;
+ goto tse_wait;
+ }
+ else {
+ do_sys_schedule:
erts_sys_schedule_interrupt(0);
+ flgs = sched_set_sleeptype(ssi, ERTS_SSI_FLG_POLL_SLEEPING);
+ if (!(flgs & ERTS_SSI_FLG_SLEEPING)) {
+ if (!(flgs & ERTS_SSI_FLG_WAITING))
+ goto sys_locked_woken;
+ erts_smp_runq_unlock(rq);
+ flgs = sched_prep_cont_spin_wait(ssi);
+ if (!(flgs & ERTS_SSI_FLG_WAITING)) {
+ ASSERT(!(flgs & ERTS_SSI_FLG_SLEEPING));
+ goto sys_woken;
+ }
+ ASSERT(!erts_port_task_have_outstanding_io_tasks());
+ goto sys_poll_aux_work;
+ }
+
+ ASSERT(flgs & ERTS_SSI_FLG_POLL_SLEEPING);
+ ASSERT(flgs & ERTS_SSI_FLG_WAITING);
+
erts_smp_runq_unlock(rq);
erl_sys_schedule(0);
@@ -787,134 +1153,103 @@ sched_sys_wait(Uint no, ErtsRunQueue *rq)
dt = do_time_read_and_reset();
if (dt) bump_timer(dt);
- erts_smp_runq_lock(rq);
+ flgs = sched_prep_cont_spin_wait(ssi);
+ if (flgs & ERTS_SSI_FLG_WAITING)
+ goto sys_aux_work;
-#if ERTS_SCHED_SLEEP_SPINCOUNT != 0
+ sys_woken:
+ erts_smp_runq_lock(rq);
+ sys_locked_woken:
+ erts_smp_atomic_set(&doing_sys_schedule, 0);
+ if (flgs & ~ERTS_SSI_FLG_SUSPENDED)
+ erts_smp_atomic_band(&ssi->flags, ERTS_SSI_FLG_SUSPENDED);
+ sched_active_sys(esdp->no, rq);
}
}
-#endif
-
- sched_active_sys(no, rq);
-}
-static void
-sched_cnd_wait(Uint no, ErtsRunQueue *rq)
-{
-#if ERTS_SCHED_SLEEP_SPINCOUNT != 0
- int val;
- int spincount = ERTS_SCHED_SLEEP_SPINCOUNT;
ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(rq));
-#endif
-
- sched_waiting(no, rq);
- erts_smp_activity_begin(ERTS_ACTIVITY_WAIT,
- prepare_for_block,
- resume_after_block,
- (void *) rq);
-
-#if ERTS_SCHED_SLEEP_SPINCOUNT == 0
- erts_smp_cnd_wait(&rq->cnd, &rq->mtx);
-#else
- erts_smp_atomic_inc(&rq->spin_waiter);
- erts_smp_mtx_unlock(&rq->mtx);
-
- while (spincount-- > 0) {
- val = erts_smp_atomic_read(&rq->spin_wake);
- ASSERT(val >= 0);
- if (val != 0) {
- erts_smp_mtx_lock(&rq->mtx);
- val = erts_smp_atomic_read(&rq->spin_wake);
- ASSERT(val >= 0);
- if (val != 0)
- goto woken;
- if (spincount == 0)
- goto sleep;
- erts_smp_mtx_unlock(&rq->mtx);
- }
- }
-
- erts_smp_mtx_lock(&rq->mtx);
- val = erts_smp_atomic_read(&rq->spin_wake);
- ASSERT(val >= 0);
- if (val == 0) {
- sleep:
- erts_smp_atomic_dec(&rq->spin_waiter);
- ASSERT(erts_smp_atomic_read(&rq->spin_waiter) >= 0);
- erts_smp_cnd_wait(&rq->cnd, &rq->mtx);
- }
- else {
- woken:
- erts_smp_atomic_dec(&rq->spin_wake);
- ASSERT(erts_smp_atomic_read(&rq->spin_wake) >= 0);
- erts_smp_atomic_dec(&rq->spin_waiter);
- ASSERT(erts_smp_atomic_read(&rq->spin_waiter) >= 0);
- }
-#endif
-
- erts_smp_activity_end(ERTS_ACTIVITY_WAIT,
- prepare_for_block,
- resume_after_block,
- (void *) rq);
-
- sched_active(no, rq);
}
-static void
-wake_one_scheduler(void)
-{
- ASSERT(erts_common_run_queue);
- ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(erts_common_run_queue));
- if (erts_common_run_queue->waiting) {
- if (!sched_spin_wake(erts_common_run_queue)) {
- if (erts_common_run_queue->waiting == -1) /* One scheduler waiting
- and doing so in
- sys_schedule */
- erts_sys_schedule_interrupt(1);
- else
- erts_smp_cnd_signal(&erts_common_run_queue->cnd);
- }
+static ERTS_INLINE long
+ssi_flags_set_wake(ErtsSchedulerSleepInfo *ssi)
+{
+ /* reset all flags but suspended */
+ long oflgs;
+ long nflgs = 0;
+ long xflgs = ERTS_SSI_FLG_SLEEPING|ERTS_SSI_FLG_WAITING;
+ while (1) {
+ oflgs = erts_smp_atomic_cmpxchg(&ssi->flags, nflgs, xflgs);
+ if (oflgs == xflgs)
+ return oflgs;
+ nflgs = oflgs & ERTS_SSI_FLG_SUSPENDED;
+ xflgs = oflgs;
}
}
static void
-wake_scheduler(ErtsRunQueue *rq, int incq)
+wake_scheduler(ErtsRunQueue *rq, int incq, int one)
{
- ASSERT(!erts_common_run_queue);
- ASSERT(-1 <= rq->waiting && rq->waiting <= 1);
- ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(rq));
- if (rq->waiting && !rq->woken) {
- if (!sched_spin_wake(rq)) {
- if (rq->waiting < 0)
- erts_sys_schedule_interrupt(1);
- else
- erts_smp_cnd_signal(&rq->cnd);
+ int res;
+ ErtsSchedulerSleepInfo *ssi;
+ ErtsSchedulerSleepList *sl;
+
+ /*
+ * The unlocked run queue is not strictly necessary
+ * from a thread safety or deadlock prevention
+ * perspective. It will, however, cost us performance
+ * if it is locked during wakup of another scheduler,
+ * so all code *should* handle this without having
+ * the lock on the run queue.
+ */
+ ERTS_SMP_LC_ASSERT(!erts_smp_lc_runq_is_locked(rq));
+
+ sl = &rq->sleepers;
+
+ erts_smp_spin_lock(&sl->lock);
+ ssi = sl->list;
+ if (!ssi)
+ erts_smp_spin_unlock(&sl->lock);
+ else if (one) {
+ long flgs;
+ if (ssi->prev)
+ ssi->prev->next = ssi->next;
+ else {
+ ASSERT(sl->list == ssi);
+ sl->list = ssi->next;
}
- rq->woken = 1;
- if (incq)
+ if (ssi->next)
+ ssi->next->prev = ssi->prev;
+
+ res = sl->list != NULL;
+ erts_smp_spin_unlock(&sl->lock);
+
+ flgs = ssi_flags_set_wake(ssi);
+ erts_sched_finish_poke(ssi, flgs);
+
+ if (incq && !erts_common_run_queue && (flgs & ERTS_SSI_FLG_WAITING))
non_empty_runq(rq);
}
+ else {
+ sl->list = NULL;
+ erts_smp_spin_unlock(&sl->lock);
+ do {
+ ErtsSchedulerSleepInfo *wake_ssi = ssi;
+ ssi = ssi->next;
+ erts_sched_finish_poke(ssi, ssi_flags_set_wake(wake_ssi));
+ } while (ssi);
+ }
}
static void
wake_all_schedulers(void)
{
- if (erts_common_run_queue) {
- erts_smp_runq_lock(erts_common_run_queue);
- if (erts_common_run_queue->waiting) {
- if (erts_common_run_queue->waiting < 0)
- erts_sys_schedule_interrupt(1);
- sched_spin_wake_all(erts_common_run_queue);
- erts_smp_cnd_broadcast(&erts_common_run_queue->cnd);
- }
- erts_smp_runq_unlock(erts_common_run_queue);
- }
+ if (erts_common_run_queue)
+ wake_scheduler(erts_common_run_queue, 0, 0);
else {
int ix;
for (ix = 0; ix < erts_no_run_queues; ix++) {
ErtsRunQueue *rq = ERTS_RUNQ_IX(ix);
- erts_smp_runq_lock(rq);
- wake_scheduler(rq, 0);
- erts_smp_runq_unlock(rq);
+ wake_scheduler(rq, 0, 1);
}
}
}
@@ -929,14 +1264,14 @@ chk_wake_sched(ErtsRunQueue *crq, int ix, int activate)
wrq = ERTS_RUNQ_IX(ix);
iflgs = erts_smp_atomic_read(&wrq->info_flags);
if (!(iflgs & (ERTS_RUNQ_IFLG_SUSPENDED|ERTS_RUNQ_IFLG_NONEMPTY))) {
- erts_smp_xrunq_lock(crq, wrq);
if (activate) {
if (ix == erts_smp_atomic_cmpxchg(&balance_info.active_runqs, ix+1, ix)) {
+ erts_smp_xrunq_lock(crq, wrq);
wrq->flags &= ~ERTS_RUNQ_FLG_INACTIVE;
+ erts_smp_xrunq_unlock(crq, wrq);
}
}
- wake_scheduler(wrq, 0);
- erts_smp_xrunq_unlock(crq, wrq);
+ wake_scheduler(wrq, 0, 1);
return 1;
}
return 0;
@@ -982,15 +1317,13 @@ static ERTS_INLINE void
smp_notify_inc_runq(ErtsRunQueue *runq)
{
#ifdef ERTS_SMP
- if (erts_common_run_queue)
- wake_one_scheduler();
- else
- wake_scheduler(runq, 1);
+ if (runq)
+ wake_scheduler(runq, 1, 1);
#endif
}
void
-erts_smp_notify_inc_runq__(ErtsRunQueue *runq)
+erts_smp_notify_inc_runq(ErtsRunQueue *runq)
{
smp_notify_inc_runq(runq);
}
@@ -1136,20 +1469,23 @@ static void
evacuate_run_queue(ErtsRunQueue *evac_rq, ErtsRunQueue *rq)
{
Port *prt;
+ int notify_to_rq = 0;
int prio;
int prt_locked = 0;
int rq_locked = 0;
int evac_rq_locked = 1;
+ ErtsMigrateResult mres;
erts_smp_runq_lock(evac_rq);
+ erts_smp_atomic_bor(&evac_rq->scheduler->ssi->flags, ERTS_SSI_FLG_SUSPENDED);
+
evac_rq->flags &= ~ERTS_RUNQ_FLGS_IMMIGRATE_QMASK;
evac_rq->flags |= (ERTS_RUNQ_FLGS_EMIGRATE_QMASK
| ERTS_RUNQ_FLGS_EVACUATE_QMASK
| ERTS_RUNQ_FLG_SUSPENDED);
erts_smp_atomic_bor(&evac_rq->info_flags, ERTS_RUNQ_IFLG_SUSPENDED);
-
/*
* Need to set up evacuation paths first since we
* may release the run queue lock on evac_rq
@@ -1177,9 +1513,11 @@ evacuate_run_queue(ErtsRunQueue *evac_rq, ErtsRunQueue *rq)
/* Evacuate scheduled ports */
prt = evac_rq->ports.start;
while (prt) {
- (void) erts_port_migrate(prt, &prt_locked,
+ mres = erts_port_migrate(prt, &prt_locked,
evac_rq, &evac_rq_locked,
rq, &rq_locked);
+ if (mres == ERTS_MIGRATE_SUCCESS)
+ notify_to_rq = 1;
if (prt_locked)
erts_smp_port_unlock(prt);
if (!evac_rq_locked) {
@@ -1208,9 +1546,11 @@ evacuate_run_queue(ErtsRunQueue *evac_rq, ErtsRunQueue *rq)
goto end_of_proc;
}
- (void) erts_proc_migrate(proc, &proc_locks,
+ mres = erts_proc_migrate(proc, &proc_locks,
evac_rq, &evac_rq_locked,
rq, &rq_locked);
+ if (mres == ERTS_MIGRATE_SUCCESS)
+ notify_to_rq = 1;
if (proc_locks)
erts_smp_proc_unlock(proc, proc_locks);
if (!evac_rq_locked) {
@@ -1242,10 +1582,13 @@ evacuate_run_queue(ErtsRunQueue *evac_rq, ErtsRunQueue *rq)
if (rq_locked)
erts_smp_runq_unlock(rq);
- if (!evac_rq_locked)
- erts_smp_runq_lock(evac_rq);
- wake_scheduler(evac_rq, 0);
- erts_smp_runq_unlock(evac_rq);
+ if (evac_rq_locked)
+ erts_smp_runq_unlock(evac_rq);
+
+ if (notify_to_rq)
+ smp_notify_inc_runq(rq);
+
+ wake_scheduler(evac_rq, 0, 1);
}
static int
@@ -1473,31 +1816,6 @@ try_steal_task(ErtsRunQueue *rq)
return res;
}
-#ifdef ERTS_SMP_SCHEDULERS_NEED_TO_CHECK_CHILDREN
-void
-erts_smp_notify_check_children_needed(void)
-{
- int i;
- for (i = 0; i < erts_no_schedulers; i++) {
- erts_smp_runq_lock(ERTS_SCHEDULER_IX(i)->run_queue);
- ERTS_SCHEDULER_IX(i)->check_children = 1;
- if (!erts_common_run_queue)
- wake_scheduler(ERTS_SCHEDULER_IX(i)->run_queue, 0);
- erts_smp_runq_unlock(ERTS_SCHEDULER_IX(i)->run_queue);
- }
- if (ongoing_multi_scheduling_block()) {
- /* Also blocked schedulers need to check children */
- erts_smp_mtx_lock(&schdlr_sspnd.mtx);
- for (i = 0; i < erts_no_schedulers; i++)
- ERTS_SCHEDULER_IX(i)->blocked_check_children = 1;
- erts_smp_cnd_broadcast(&schdlr_sspnd.cnd);
- erts_smp_mtx_unlock(&schdlr_sspnd.mtx);
- }
- if (erts_common_run_queue)
- wake_all_schedulers();
-}
-#endif
-
/* Run queue balancing */
typedef struct {
@@ -2078,16 +2396,20 @@ erts_init_scheduling(int mrq, int no_schedulers, int no_schedulers_online)
erts_aligned_run_queues = erts_alloc(ERTS_ALC_T_RUNQS,
(sizeof(ErtsAlignedRunQueue)*(n+1)));
- if ((((Uint) erts_aligned_run_queues) & ERTS_CACHE_LINE_MASK) == 0)
+ if ((((UWord) erts_aligned_run_queues) & ERTS_CACHE_LINE_MASK) != 0)
erts_aligned_run_queues = ((ErtsAlignedRunQueue *)
- ((((Uint) erts_aligned_run_queues)
+ ((((UWord) erts_aligned_run_queues)
& ~ERTS_CACHE_LINE_MASK)
+ ERTS_CACHE_LINE_SIZE));
+ ASSERT((((UWord) erts_aligned_run_queues) & ERTS_CACHE_LINE_MASK) == 0);
+
#ifdef ERTS_SMP
erts_smp_atomic_init(&no_empty_run_queues, 0);
#endif
+ erts_no_run_queues = n;
+
for (ix = 0; ix < n; ix++) {
int pix, rix;
ErtsRunQueue *rq = ERTS_RUNQ_IX(ix);
@@ -2102,8 +2424,10 @@ erts_init_scheduling(int mrq, int no_schedulers, int no_schedulers_online)
erts_smp_mtx_init_x(&rq->mtx, "run_queue", make_small(ix + 1));
erts_smp_cnd_init(&rq->cnd);
- erts_smp_atomic_init(&rq->spin_waiter, 0);
- erts_smp_atomic_init(&rq->spin_wake, 0);
+#ifdef ERTS_SMP
+ erts_smp_spinlock_init(&rq->sleepers.lock, "run_queue_sleep_list");
+ rq->sleepers.list = NULL;
+#endif
rq->waiting = 0;
rq->woken = 0;
@@ -2154,7 +2478,6 @@ erts_init_scheduling(int mrq, int no_schedulers, int no_schedulers_online)
}
erts_common_run_queue = !mrq ? ERTS_RUNQ_IX(0) : NULL;
- erts_no_run_queues = n;
#ifdef ERTS_SMP
@@ -2169,23 +2492,59 @@ erts_init_scheduling(int mrq, int no_schedulers, int no_schedulers_online)
#endif
+ n = (int) no_schedulers;
+ erts_no_schedulers = n;
+
+#ifdef ERTS_SMP
+ /* Create and initialize scheduler sleep info */
+
+ aligned_sched_sleep_info = erts_alloc(ERTS_ALC_T_SCHDLR_SLP_INFO,
+ (sizeof(ErtsAlignedSchedulerSleepInfo)
+ *(n+1)));
+ if ((((Uint) aligned_sched_sleep_info) & ERTS_CACHE_LINE_MASK) == 0)
+ aligned_sched_sleep_info = ((ErtsAlignedSchedulerSleepInfo *)
+ ((((Uint) aligned_sched_sleep_info)
+ & ~ERTS_CACHE_LINE_MASK)
+ + ERTS_CACHE_LINE_SIZE));
+ for (ix = 0; ix < n; ix++) {
+ ErtsSchedulerSleepInfo *ssi = ERTS_SCHED_SLEEP_INFO_IX(ix);
+#if 0 /* no need to initialize these... */
+ ssi->next = NULL;
+ ssi->prev = NULL;
+#endif
+ erts_smp_atomic_init(&ssi->flags, 0);
+ ssi->event = NULL; /* initialized in sched_thread_func */
+ erts_smp_atomic_init(&ssi->aux_work, 0);
+ }
+#endif
+
/* Create and initialize scheduler specific data */
- n = (int) no_schedulers;
erts_aligned_scheduler_data = erts_alloc(ERTS_ALC_T_SCHDLR_DATA,
(sizeof(ErtsAlignedSchedulerData)
*(n+1)));
- if ((((Uint) erts_aligned_scheduler_data) & ERTS_CACHE_LINE_MASK) == 0)
+ if ((((UWord) erts_aligned_scheduler_data) & ERTS_CACHE_LINE_MASK) != 0)
erts_aligned_scheduler_data = ((ErtsAlignedSchedulerData *)
- ((((Uint) erts_aligned_scheduler_data)
+ ((((UWord) erts_aligned_scheduler_data)
& ~ERTS_CACHE_LINE_MASK)
+ ERTS_CACHE_LINE_SIZE));
+
+ ASSERT((((UWord) erts_aligned_scheduler_data) & ERTS_CACHE_LINE_MASK) == 0);
+
for (ix = 0; ix < n; ix++) {
ErtsSchedulerData *esdp = ERTS_SCHEDULER_IX(ix);
#ifdef ERTS_SMP
erts_bits_init_state(&esdp->erl_bits_state);
esdp->match_pseudo_process = NULL;
+ esdp->ssi = ERTS_SCHED_SLEEP_INFO_IX(ix);
esdp->free_process = NULL;
+#if HALFWORD_HEAP
+ /* Registers need to be heap allocated (correct memory range) for tracing to work */
+ esdp->save_reg = erts_alloc(ERTS_ALC_T_BEAM_REGISTER, ERTS_X_REGS_ALLOCATED * sizeof(Eterm));
+#endif
+#endif
+#if !HEAP_ON_C_STACK
+ esdp->num_tmp_heap_used = 0;
#endif
esdp->no = (Uint) ix+1;
esdp->current_process = NULL;
@@ -2206,11 +2565,6 @@ erts_init_scheduling(int mrq, int no_schedulers, int no_schedulers_online)
}
#ifdef ERTS_SMP
-#ifdef ERTS_SMP_SCHEDULERS_NEED_TO_CHECK_CHILDREN
- esdp->check_children = 0;
- esdp->blocked_check_children = 0;
-#endif
- erts_smp_atomic_init(&esdp->suspended, 0);
erts_smp_atomic_init(&esdp->chk_cpu_bind, 0);
#endif
}
@@ -2219,7 +2573,7 @@ erts_init_scheduling(int mrq, int no_schedulers, int no_schedulers_online)
erts_smp_mtx_init(&schdlr_sspnd.mtx, "schdlr_sspnd");
erts_smp_cnd_init(&schdlr_sspnd.cnd);
- schdlr_sspnd.changing = 0;
+ erts_smp_atomic_init(&schdlr_sspnd.changing, 0);
schdlr_sspnd.online = no_schedulers_online;
schdlr_sspnd.curr_online = no_schedulers;
erts_smp_atomic_init(&schdlr_sspnd.msb.ongoing, 0);
@@ -2242,7 +2596,8 @@ erts_init_scheduling(int mrq, int no_schedulers, int no_schedulers_online)
if (no_schedulers_online < no_schedulers) {
if (erts_common_run_queue) {
for (ix = no_schedulers_online; ix < no_schedulers; ix++)
- erts_smp_atomic_set(&(ERTS_SCHEDULER_IX(ix)->suspended), 1);
+ erts_smp_atomic_bor(&ERTS_SCHED_SLEEP_INFO_IX(ix)->flags,
+ ERTS_SSI_FLG_SUSPENDED);
}
else {
for (ix = no_schedulers_online; ix < erts_no_run_queues; ix++)
@@ -2253,7 +2608,8 @@ erts_init_scheduling(int mrq, int no_schedulers, int no_schedulers_online)
schdlr_sspnd.wait_curr_online = no_schedulers_online;
schdlr_sspnd.curr_online *= 2; /* Boot strapping... */
- schdlr_sspnd.changing = ERTS_SCHED_CHANGING_ONLINE;
+ ERTS_SCHDLR_SSPND_CHNG_SET((ERTS_SCHDLR_SSPND_CHNG_ONLN
+ | ERTS_SCHDLR_SSPND_CHNG_WAITER), 0);
erts_smp_atomic_init(&doing_sys_schedule, 0);
@@ -2401,13 +2757,115 @@ susp_sched_resume_block(void *unused)
}
static void
+scheduler_ix_resume_wake(Uint ix)
+{
+ ErtsSchedulerSleepInfo *ssi = ERTS_SCHED_SLEEP_INFO_IX(ix);
+ long xflgs = (ERTS_SSI_FLG_SLEEPING
+ | ERTS_SSI_FLG_TSE_SLEEPING
+ | ERTS_SSI_FLG_WAITING
+ | ERTS_SSI_FLG_SUSPENDED);
+ long oflgs;
+ do {
+ oflgs = erts_smp_atomic_cmpxchg(&ssi->flags, 0, xflgs);
+ if (oflgs == xflgs) {
+ erts_sched_finish_poke(ssi, oflgs);
+ break;
+ }
+ xflgs = oflgs;
+ } while (oflgs & ERTS_SSI_FLG_SUSPENDED);
+}
+
+static long
+sched_prep_spin_suspended(ErtsSchedulerSleepInfo *ssi, long xpct)
+{
+ long oflgs;
+ long nflgs = (ERTS_SSI_FLG_SLEEPING
+ | ERTS_SSI_FLG_WAITING
+ | ERTS_SSI_FLG_SUSPENDED);
+ long xflgs = xpct;
+
+ do {
+ oflgs = erts_smp_atomic_cmpxchg(&ssi->flags, nflgs, xflgs);
+ if (oflgs == xflgs)
+ return nflgs;
+ xflgs = oflgs;
+ } while (oflgs & ERTS_SSI_FLG_SUSPENDED);
+
+ return oflgs;
+}
+
+static long
+sched_spin_suspended(ErtsSchedulerSleepInfo *ssi, int spincount)
+{
+ int until_yield = ERTS_SCHED_SPIN_UNTIL_YIELD;
+ int sc = spincount;
+ long flgs;
+
+ do {
+ flgs = erts_smp_atomic_read(&ssi->flags);
+ if ((flgs & (ERTS_SSI_FLG_SLEEPING
+ | ERTS_SSI_FLG_WAITING
+ | ERTS_SSI_FLG_SUSPENDED))
+ != (ERTS_SSI_FLG_SLEEPING
+ | ERTS_SSI_FLG_WAITING
+ | ERTS_SSI_FLG_SUSPENDED)) {
+ break;
+ }
+ ERTS_SPIN_BODY;
+ if (--until_yield == 0) {
+ until_yield = ERTS_SCHED_SPIN_UNTIL_YIELD;
+ erts_thr_yield();
+ }
+ } while (--sc > 0);
+ return flgs;
+}
+
+static long
+sched_set_suspended_sleeptype(ErtsSchedulerSleepInfo *ssi)
+{
+ long oflgs;
+ long nflgs = (ERTS_SSI_FLG_SLEEPING
+ | ERTS_SSI_FLG_TSE_SLEEPING
+ | ERTS_SSI_FLG_WAITING
+ | ERTS_SSI_FLG_SUSPENDED);
+ long xflgs = (ERTS_SSI_FLG_SLEEPING
+ | ERTS_SSI_FLG_WAITING
+ | ERTS_SSI_FLG_SUSPENDED);
+
+ erts_tse_reset(ssi->event);
+
+ while (1) {
+ oflgs = erts_smp_atomic_cmpxchg(&ssi->flags, nflgs, xflgs);
+ if (oflgs == xflgs)
+ return nflgs;
+ if ((oflgs & (ERTS_SSI_FLG_SLEEPING
+ | ERTS_SSI_FLG_WAITING
+ | ERTS_SSI_FLG_SUSPENDED))
+ != (ERTS_SSI_FLG_SLEEPING
+ | ERTS_SSI_FLG_WAITING
+ | ERTS_SSI_FLG_SUSPENDED)) {
+ return oflgs;
+ }
+ xflgs = oflgs;
+ }
+}
+
+static void
suspend_scheduler(ErtsSchedulerData *esdp)
{
+ long flgs;
+ int changing;
long no = (long) esdp->no;
ErtsRunQueue *rq = esdp->run_queue;
+ ErtsSchedulerSleepInfo *ssi = esdp->ssi;
long active_schedulers;
int curr_online = 1;
int wake = 0;
+ int reset_read_group = 0;
+#if defined(ERTS_SCHED_NEED_NONBLOCKABLE_AUX_WORK) \
+ || defined(ERTS_SCHED_NEED_BLOCKABLE_AUX_WORK)
+ long aux_work;
+#endif
/*
* Schedulers may be suspended in two different ways:
@@ -2429,113 +2887,151 @@ suspend_scheduler(ErtsSchedulerData *esdp)
if (scheduler2cpu_map[esdp->no].bound_id >= 0
&& erts_unbind_from_cpu(erts_cpuinfo) == 0) {
esdp->cpu_id = scheduler2cpu_map[esdp->no].bound_id = -1;
+ reset_read_group = 1;
}
erts_smp_rwmtx_rwunlock(&erts_cpu_bind_rwmtx);
+ if (reset_read_group)
+ erts_smp_rwmtx_set_reader_group(0);
+
+ if (esdp->no <= erts_max_main_threads)
+ erts_thr_set_main_status(0, 0);
+
if (erts_system_profile_flags.scheduler)
profile_scheduler(make_small(esdp->no), am_inactive);
erts_smp_mtx_lock(&schdlr_sspnd.mtx);
- active_schedulers = erts_smp_atomic_dectest(&schdlr_sspnd.active);
- ASSERT(active_schedulers >= 1);
- if (schdlr_sspnd.changing == ERTS_SCHED_CHANGING_MULTI_SCHED) {
- if (active_schedulers == schdlr_sspnd.msb.wait_active)
- wake = 1;
- if (active_schedulers == 1)
- schdlr_sspnd.changing = 0;
- }
+ flgs = sched_prep_spin_suspended(ssi, ERTS_SSI_FLG_SUSPENDED);
+ if (flgs & ERTS_SSI_FLG_SUSPENDED) {
- while (1) {
-
-#ifdef ERTS_SMP_SCHEDULERS_NEED_TO_CHECK_CHILDREN
- int check_children;
- erts_smp_runq_lock(esdp->run_queue);
- check_children = esdp->check_children;
- esdp->check_children = 0;
- erts_smp_runq_unlock(esdp->run_queue);
- if (check_children) {
- erts_smp_mtx_unlock(&schdlr_sspnd.mtx);
- erts_check_children();
- erts_smp_mtx_lock(&schdlr_sspnd.mtx);
+ active_schedulers = erts_smp_atomic_dectest(&schdlr_sspnd.active);
+ ASSERT(active_schedulers >= 1);
+ changing = erts_smp_atomic_read(&schdlr_sspnd.changing);
+ if (changing & ERTS_SCHDLR_SSPND_CHNG_MSB) {
+ if (active_schedulers == schdlr_sspnd.msb.wait_active)
+ wake = 1;
+ if (active_schedulers == 1) {
+ changing = erts_smp_atomic_band(&schdlr_sspnd.changing,
+ ~ERTS_SCHDLR_SSPND_CHNG_MSB);
+ changing &= ~ERTS_SCHDLR_SSPND_CHNG_MSB;
+ }
}
-#endif
- if (schdlr_sspnd.changing == ERTS_SCHED_CHANGING_ONLINE) {
- int changed = 0;
- if (no > schdlr_sspnd.online && curr_online) {
- schdlr_sspnd.curr_online--;
- curr_online = 0;
- changed = 1;
+ while (1) {
+ if (changing & ERTS_SCHDLR_SSPND_CHNG_ONLN) {
+ int changed = 0;
+ if (no > schdlr_sspnd.online && curr_online) {
+ schdlr_sspnd.curr_online--;
+ curr_online = 0;
+ changed = 1;
+ }
+ else if (no <= schdlr_sspnd.online && !curr_online) {
+ schdlr_sspnd.curr_online++;
+ curr_online = 1;
+ changed = 1;
+ }
+ if (changed
+ && schdlr_sspnd.curr_online == schdlr_sspnd.wait_curr_online)
+ wake = 1;
+ if (schdlr_sspnd.online == schdlr_sspnd.curr_online) {
+ changing = erts_smp_atomic_band(&schdlr_sspnd.changing,
+ ~ERTS_SCHDLR_SSPND_CHNG_ONLN);
+ changing &= ~ERTS_SCHDLR_SSPND_CHNG_ONLN;
+ }
}
- else if (no <= schdlr_sspnd.online && !curr_online) {
- schdlr_sspnd.curr_online++;
- curr_online = 1;
- changed = 1;
+
+ if (wake) {
+ erts_smp_cnd_signal(&schdlr_sspnd.cnd);
+ wake = 0;
}
- if (changed
- && schdlr_sspnd.curr_online == schdlr_sspnd.wait_curr_online)
- wake = 1;
- if (schdlr_sspnd.online == schdlr_sspnd.curr_online)
- schdlr_sspnd.changing = 0;
- }
- if (wake) {
- erts_smp_cnd_broadcast(&schdlr_sspnd.cnd);
- wake = 0;
- }
+ flgs = erts_smp_atomic_read(&ssi->flags);
+ if (!(flgs & ERTS_SSI_FLG_SUSPENDED))
+ break;
+ erts_smp_mtx_unlock(&schdlr_sspnd.mtx);
- if (!(rq->flags & (ERTS_RUNQ_FLG_SHARED_RUNQ|ERTS_RUNQ_FLG_SUSPENDED)))
- break;
- if ((rq->flags & ERTS_RUNQ_FLG_SHARED_RUNQ)
- && !erts_smp_atomic_read(&esdp->suspended))
- break;
+#ifdef ERTS_SCHED_NEED_BLOCKABLE_AUX_WORK
+ aux_work = erts_smp_atomic_read(&ssi->aux_work);
+ blockable_aux_work:
+ blockable_aux_work(esdp, ssi, aux_work);
+#endif
- erts_smp_activity_begin(ERTS_ACTIVITY_WAIT,
- susp_sched_prep_block,
- susp_sched_resume_block,
- NULL);
- while (1) {
+ erts_smp_activity_begin(ERTS_ACTIVITY_WAIT, NULL, NULL, NULL);
+ while (1) {
+ long flgs;
+#ifdef ERTS_SCHED_NEED_NONBLOCKABLE_AUX_WORK
+#ifndef ERTS_SCHED_NEED_BLOCKABLE_AUX_WORK
+ aux_work = erts_smp_atomic_read(&ssi->aux_work);
+#endif
+ nonblockable_aux_work(esdp, ssi, aux_work);
+#endif
-#ifdef ERTS_SMP_SCHEDULERS_NEED_TO_CHECK_CHILDREN
- if (esdp->blocked_check_children)
- break;
+ flgs = sched_spin_suspended(ssi, ERTS_SCHED_SUSPEND_SLEEP_SPINCOUNT);
+ if (flgs == (ERTS_SSI_FLG_SLEEPING
+ | ERTS_SSI_FLG_WAITING
+ | ERTS_SSI_FLG_SUSPENDED)) {
+ flgs = sched_set_suspended_sleeptype(ssi);
+ if (flgs == (ERTS_SSI_FLG_SLEEPING
+ | ERTS_SSI_FLG_TSE_SLEEPING
+ | ERTS_SSI_FLG_WAITING
+ | ERTS_SSI_FLG_SUSPENDED)) {
+ int res;
+ do {
+ res = erts_tse_wait(ssi->event);
+ } while (res == EINTR);
+ }
+ }
+
+ flgs = sched_prep_spin_suspended(ssi, (ERTS_SSI_FLG_WAITING
+ | ERTS_SSI_FLG_SUSPENDED));
+ if (!(flgs & ERTS_SSI_FLG_SUSPENDED))
+ break;
+ changing = erts_smp_atomic_read(&schdlr_sspnd.changing);
+ if (changing & ~ERTS_SCHDLR_SSPND_CHNG_WAITER)
+ break;
+
+
+#ifdef ERTS_SCHED_NEED_BLOCKABLE_AUX_WORK
+ aux_work = erts_smp_atomic_read(&ssi->aux_work);
+ if (aux_work & ERTS_SSI_BLOCKABLE_AUX_WORK_MASK) {
+ erts_smp_activity_end(ERTS_ACTIVITY_WAIT, NULL, NULL, NULL);
+ goto blockable_aux_work;
+ }
#endif
- erts_smp_cnd_wait(&schdlr_sspnd.cnd, &schdlr_sspnd.mtx);
+ }
- if (schdlr_sspnd.changing == ERTS_SCHED_CHANGING_ONLINE)
- break;
+ erts_smp_activity_end(ERTS_ACTIVITY_WAIT, NULL, NULL, NULL);
- if (!(rq->flags & (ERTS_RUNQ_FLG_SHARED_RUNQ
- | ERTS_RUNQ_FLG_SUSPENDED)))
- break;
- if ((rq->flags & ERTS_RUNQ_FLG_SHARED_RUNQ)
- && !erts_smp_atomic_read(&esdp->suspended))
- break;
+ erts_smp_mtx_lock(&schdlr_sspnd.mtx);
+ changing = erts_smp_atomic_read(&schdlr_sspnd.changing);
}
-#ifdef ERTS_SMP_SCHEDULERS_NEED_TO_CHECK_CHILDREN
- esdp->blocked_check_children = 0;
-#endif
+ active_schedulers = erts_smp_atomic_inctest(&schdlr_sspnd.active);
+ changing = erts_smp_atomic_read(&schdlr_sspnd.changing);
+ if ((changing & ERTS_SCHDLR_SSPND_CHNG_MSB)
+ && schdlr_sspnd.online == active_schedulers) {
+ erts_smp_atomic_band(&schdlr_sspnd.changing,
+ ~ERTS_SCHDLR_SSPND_CHNG_MSB);
+ }
- erts_smp_activity_end(ERTS_ACTIVITY_WAIT,
- susp_sched_prep_block,
- susp_sched_resume_block,
- NULL);
- }
+ ASSERT(no <= schdlr_sspnd.online);
+ ASSERT(!erts_smp_atomic_read(&schdlr_sspnd.msb.ongoing));
- active_schedulers = erts_smp_atomic_inctest(&schdlr_sspnd.active);
- if (schdlr_sspnd.changing == ERTS_SCHED_CHANGING_MULTI_SCHED
- && schdlr_sspnd.online == active_schedulers) {
- schdlr_sspnd.changing = 0;
}
+
erts_smp_mtx_unlock(&schdlr_sspnd.mtx);
+ ASSERT(curr_online);
+
if (erts_system_profile_flags.scheduler)
profile_scheduler(make_small(esdp->no), am_active);
+ if (esdp->no <= erts_max_main_threads)
+ erts_thr_set_main_status(1, (int) esdp->no);
+
erts_smp_runq_lock(esdp->run_queue);
non_empty_runq(esdp->run_queue);
@@ -2600,8 +3096,10 @@ erts_schedulers_state(Uint *total,
int yield_allowed)
{
int res;
+ long changing;
erts_smp_mtx_lock(&schdlr_sspnd.mtx);
- if (yield_allowed && schdlr_sspnd.changing)
+ changing = erts_smp_atomic_read(&schdlr_sspnd.changing);
+ if (yield_allowed && (changing & ~ERTS_SCHDLR_SSPND_CHNG_WAITER))
res = ERTS_SCHDLR_SSPND_YIELD_RESTART;
else {
*active = *online = schdlr_sspnd.online;
@@ -2621,6 +3119,7 @@ erts_set_schedulers_online(Process *p,
Sint *old_no)
{
int ix, res, no, have_unlocked_plocks;
+ long changing;
if (new_no < 1 || erts_no_schedulers < new_no)
return ERTS_SCHDLR_SSPND_EINVAL;
@@ -2630,7 +3129,8 @@ erts_set_schedulers_online(Process *p,
have_unlocked_plocks = 0;
no = (int) new_no;
- if (schdlr_sspnd.changing) {
+ changing = erts_smp_atomic_read(&schdlr_sspnd.changing);
+ if (changing) {
res = ERTS_SCHDLR_SSPND_YIELD_RESTART;
}
else {
@@ -2639,17 +3139,19 @@ erts_set_schedulers_online(Process *p,
res = ERTS_SCHDLR_SSPND_DONE;
}
else {
- schdlr_sspnd.changing = ERTS_SCHED_CHANGING_ONLINE;
+ ERTS_SCHDLR_SSPND_CHNG_SET((ERTS_SCHDLR_SSPND_CHNG_ONLN
+ | ERTS_SCHDLR_SSPND_CHNG_WAITER), 0);
schdlr_sspnd.online = no;
if (no > online) {
int ix;
schdlr_sspnd.wait_curr_online = no;
- if (ongoing_multi_scheduling_block())
- /* No schedulers to resume */;
+ if (ongoing_multi_scheduling_block()) {
+ for (ix = online; ix < no; ix++)
+ erts_sched_poke(ERTS_SCHED_SLEEP_INFO_IX(ix));
+ }
else if (erts_common_run_queue) {
for (ix = online; ix < no; ix++)
- erts_smp_atomic_set(&ERTS_SCHEDULER_IX(ix)->suspended,
- 0);
+ scheduler_ix_resume_wake(ix);
}
else {
if (plocks) {
@@ -2663,6 +3165,7 @@ erts_set_schedulers_online(Process *p,
erts_smp_runq_lock(rq);
ERTS_RUNQ_RESET_SUSPEND_INFO(rq, 0x5);
erts_smp_runq_unlock(rq);
+ scheduler_ix_resume_wake(ix);
}
/*
* Spread evacuation paths among all online
@@ -2677,7 +3180,6 @@ erts_set_schedulers_online(Process *p,
erts_smp_mtx_unlock(&balance_info.update_mtx);
erts_smp_mtx_lock(&schdlr_sspnd.mtx);
}
- erts_smp_cnd_broadcast(&schdlr_sspnd.cnd);
res = ERTS_SCHDLR_SSPND_DONE;
}
else /* if (no < online) */ {
@@ -2694,12 +3196,17 @@ erts_set_schedulers_online(Process *p,
schdlr_sspnd.wait_curr_online = no+1;
}
- if (ongoing_multi_scheduling_block())
- erts_smp_cnd_broadcast(&schdlr_sspnd.cnd);
- else if (erts_common_run_queue) {
+ if (ongoing_multi_scheduling_block()) {
for (ix = no; ix < online; ix++)
- erts_smp_atomic_set(&ERTS_SCHEDULER_IX(ix)->suspended,
- 1);
+ erts_sched_poke(ERTS_SCHED_SLEEP_INFO_IX(ix));
+ }
+ else if (erts_common_run_queue) {
+ for (ix = no; ix < online; ix++) {
+ ErtsSchedulerSleepInfo *ssi;
+ ssi = ERTS_SCHED_SLEEP_INFO_IX(ix);
+ erts_smp_atomic_bor(&ssi->flags,
+ ERTS_SSI_FLG_SUSPENDED);
+ }
wake_all_schedulers();
}
else {
@@ -2726,7 +3233,10 @@ erts_set_schedulers_online(Process *p,
erts_smp_atomic_set(&balance_info.used_runqs, no);
erts_smp_mtx_unlock(&balance_info.update_mtx);
erts_smp_mtx_lock(&schdlr_sspnd.mtx);
- ERTS_FOREACH_OP_RUNQ(rq, wake_scheduler(rq, 0));
+ for (ix = no; ix < online; ix++) {
+ ErtsRunQueue *rq = ERTS_RUNQ_IX(ix);
+ wake_scheduler(rq, 0, 1);
+ }
}
}
@@ -2740,6 +3250,12 @@ erts_set_schedulers_online(Process *p,
susp_sched_prep_block,
susp_sched_resume_block,
NULL);
+ ASSERT(res != ERTS_SCHDLR_SSPND_DONE
+ ? (ERTS_SCHDLR_SSPND_CHNG_WAITER
+ & erts_smp_atomic_read(&schdlr_sspnd.changing))
+ : (ERTS_SCHDLR_SSPND_CHNG_WAITER
+ == erts_smp_atomic_read(&schdlr_sspnd.changing)));
+ erts_smp_atomic_band(&schdlr_sspnd.changing, ~ERTS_SCHDLR_SSPND_CHNG_WAITER);
}
}
@@ -2754,15 +3270,16 @@ ErtsSchedSuspendResult
erts_block_multi_scheduling(Process *p, ErtsProcLocks plocks, int on, int all)
{
int ix, res, have_unlocked_plocks = 0;
+ long changing;
ErtsProcList *plp;
erts_smp_mtx_lock(&schdlr_sspnd.mtx);
-
- if (schdlr_sspnd.changing) {
+ changing = erts_smp_atomic_read(&schdlr_sspnd.changing);
+ if (changing) {
res = ERTS_SCHDLR_SSPND_YIELD_RESTART; /* Yield */
}
else if (on) { /* ------ BLOCK ------ */
- if (erts_is_multi_scheduling_blocked()) {
+ if (schdlr_sspnd.msb.procs) {
plp = proclist_create(p);
plp->next = schdlr_sspnd.msb.procs;
schdlr_sspnd.msb.procs = plp;
@@ -2772,19 +3289,22 @@ erts_block_multi_scheduling(Process *p, ErtsProcLocks plocks, int on, int all)
res = ERTS_SCHDLR_SSPND_DONE_MSCHED_BLOCKED;
}
else {
+ int online = schdlr_sspnd.online;
p->flags |= F_HAVE_BLCKD_MSCHED;
if (plocks) {
have_unlocked_plocks = 1;
erts_smp_proc_unlock(p, plocks);
}
+ ASSERT(0 == erts_smp_atomic_read(&schdlr_sspnd.msb.ongoing));
erts_smp_atomic_set(&schdlr_sspnd.msb.ongoing, 1);
- if (schdlr_sspnd.online == 1) {
+ if (online == 1) {
res = ERTS_SCHDLR_SSPND_DONE_MSCHED_BLOCKED;
ASSERT(erts_smp_atomic_read(&schdlr_sspnd.active) == 1);
ASSERT(p->scheduler_data->no == 1);
}
else {
- schdlr_sspnd.changing = ERTS_SCHED_CHANGING_MULTI_SCHED;
+ ERTS_SCHDLR_SSPND_CHNG_SET((ERTS_SCHDLR_SSPND_CHNG_MSB
+ | ERTS_SCHDLR_SSPND_CHNG_WAITER), 0);
if (p->scheduler_data->no == 1) {
res = ERTS_SCHDLR_SSPND_DONE_MSCHED_BLOCKED;
schdlr_sspnd.msb.wait_active = 1;
@@ -2798,17 +3318,19 @@ erts_block_multi_scheduling(Process *p, ErtsProcLocks plocks, int on, int all)
schdlr_sspnd.msb.wait_active = 2;
}
if (erts_common_run_queue) {
- for (ix = 1; ix < schdlr_sspnd.online; ix++)
- erts_smp_atomic_set(&ERTS_SCHEDULER_IX(ix)->suspended, 1);
+ for (ix = 1; ix < online; ix++)
+ erts_smp_atomic_bor(&ERTS_SCHED_SLEEP_INFO_IX(ix)->flags,
+ ERTS_SSI_FLG_SUSPENDED);
wake_all_schedulers();
}
else {
erts_smp_mtx_unlock(&schdlr_sspnd.mtx);
erts_smp_mtx_lock(&balance_info.update_mtx);
erts_smp_atomic_set(&balance_info.used_runqs, 1);
- for (ix = 0; ix < schdlr_sspnd.online; ix++) {
+ for (ix = 0; ix < online; ix++) {
ErtsRunQueue *rq = ERTS_RUNQ_IX(ix);
erts_smp_runq_lock(rq);
+ ASSERT(!(rq->flags & ERTS_RUNQ_FLG_SUSPENDED));
ERTS_RUNQ_RESET_MIGRATION_PATHS(rq, 0x7);
erts_smp_runq_unlock(rq);
}
@@ -2833,6 +3355,13 @@ erts_block_multi_scheduling(Process *p, ErtsProcLocks plocks, int on, int all)
susp_sched_prep_block,
susp_sched_resume_block,
NULL);
+ ASSERT(res != ERTS_SCHDLR_SSPND_DONE_MSCHED_BLOCKED
+ ? (ERTS_SCHDLR_SSPND_CHNG_WAITER
+ & erts_smp_atomic_read(&schdlr_sspnd.changing))
+ : (ERTS_SCHDLR_SSPND_CHNG_WAITER
+ == erts_smp_atomic_read(&schdlr_sspnd.changing)));
+ erts_smp_atomic_band(&schdlr_sspnd.changing,
+ ~ERTS_SCHDLR_SSPND_CHNG_WAITER);
}
plp = proclist_create(p);
plp->next = schdlr_sspnd.msb.procs;
@@ -2876,7 +3405,7 @@ erts_block_multi_scheduling(Process *p, ErtsProcLocks plocks, int on, int all)
if (schdlr_sspnd.msb.procs)
res = ERTS_SCHDLR_SSPND_DONE_MSCHED_BLOCKED;
else {
- schdlr_sspnd.changing = ERTS_SCHED_CHANGING_MULTI_SCHED;
+ ERTS_SCHDLR_SSPND_CHNG_SET(ERTS_SCHDLR_SSPND_CHNG_MSB, 0);
#ifdef DEBUG
ERTS_FOREACH_RUNQ(rq,
{
@@ -2903,13 +3432,13 @@ erts_block_multi_scheduling(Process *p, ErtsProcLocks plocks, int on, int all)
if (schdlr_sspnd.online == 1) {
/* No schedulers to resume */
ASSERT(erts_smp_atomic_read(&schdlr_sspnd.active) == 1);
- schdlr_sspnd.changing = 0;
+ ERTS_SCHDLR_SSPND_CHNG_SET(0, ERTS_SCHDLR_SSPND_CHNG_MSB);
}
else if (erts_common_run_queue) {
for (ix = 1; ix < schdlr_sspnd.online; ix++)
- erts_smp_atomic_set(&ERTS_SCHEDULER_IX(ix)->suspended, 0);
+ erts_smp_atomic_band(&ERTS_SCHED_SLEEP_INFO_IX(ix)->flags,
+ ~ERTS_SSI_FLG_SUSPENDED);
wake_all_schedulers();
- erts_smp_cnd_broadcast(&schdlr_sspnd.cnd);
}
else {
int online = schdlr_sspnd.online;
@@ -2926,6 +3455,7 @@ erts_block_multi_scheduling(Process *p, ErtsProcLocks plocks, int on, int all)
erts_smp_runq_lock(rq);
ERTS_RUNQ_RESET_SUSPEND_INFO(rq, 0x4);
erts_smp_runq_unlock(rq);
+ scheduler_ix_resume_wake(ix);
}
/* Spread evacuation paths among all online run queues */
@@ -2941,7 +3471,6 @@ erts_block_multi_scheduling(Process *p, ErtsProcLocks plocks, int on, int all)
erts_smp_runq_unlock(ERTS_RUNQ_IX(0));
erts_smp_mtx_unlock(&balance_info.update_mtx);
erts_smp_mtx_lock(&schdlr_sspnd.mtx);
- erts_smp_cnd_broadcast(&schdlr_sspnd.cnd);
}
res = ERTS_SCHDLR_SSPND_DONE;
}
@@ -2968,8 +3497,11 @@ erts_dbg_multi_scheduling_return_trap(Process *p, Eterm return_value)
int
erts_is_multi_scheduling_blocked(void)
{
- return (erts_smp_atomic_read(&schdlr_sspnd.msb.ongoing)
- && erts_smp_atomic_read(&schdlr_sspnd.active) == 1);
+ int res;
+ erts_smp_mtx_lock(&schdlr_sspnd.mtx);
+ res = schdlr_sspnd.msb.procs != NULL;
+ erts_smp_mtx_unlock(&schdlr_sspnd.mtx);
+ return res;
}
Eterm
@@ -2978,7 +3510,7 @@ erts_multi_scheduling_blockers(Process *p)
Eterm res = NIL;
erts_smp_mtx_lock(&schdlr_sspnd.mtx);
- if (erts_is_multi_scheduling_blocked()) {
+ if (schdlr_sspnd.msb.procs) {
Eterm *hp, *hp_end;
ErtsProcList *plp1, *plp2;
Uint max_size;
@@ -3010,18 +3542,34 @@ erts_multi_scheduling_blockers(Process *p)
static void *
sched_thread_func(void *vesdp)
{
+#ifdef ERTS_SMP
+ Uint no = ((ErtsSchedulerData *) vesdp)->no;
+#endif
#ifdef ERTS_ENABLE_LOCK_CHECK
{
char buf[31];
- Uint no = ((ErtsSchedulerData *) vesdp)->no;
erts_snprintf(&buf[0], 31, "scheduler %bpu", no);
erts_lc_set_thread_name(&buf[0]);
}
#endif
- erts_alloc_reg_scheduler_id(((ErtsSchedulerData *) vesdp)->no);
+ erts_alloc_reg_scheduler_id(no);
erts_tsd_set(sched_data_key, vesdp);
#ifdef ERTS_SMP
+
+ if (no <= erts_max_main_threads) {
+ erts_thr_set_main_status(1, (int) no);
+ if (erts_reader_groups) {
+ int rg = (int) no;
+ if (rg > erts_reader_groups)
+ rg = (((int) no) - 1) % erts_reader_groups + 1;
+ erts_smp_rwmtx_set_reader_group(rg);
+ }
+ }
+
erts_proc_lock_prepare_proc_lock_waiter();
+ ERTS_SCHED_SLEEP_INFO_IX(no - 1)->event = erts_tse_fetch();
+
+
#endif
erts_register_blockable_thread();
#ifdef HIPE
@@ -3030,30 +3578,30 @@ sched_thread_func(void *vesdp)
erts_thread_init_float();
erts_smp_mtx_lock(&schdlr_sspnd.mtx);
- ASSERT(schdlr_sspnd.changing == ERTS_SCHED_CHANGING_ONLINE);
+ ASSERT(erts_smp_atomic_read(&schdlr_sspnd.changing)
+ & ERTS_SCHDLR_SSPND_CHNG_ONLN);
- schdlr_sspnd.curr_online--;
+ if (--schdlr_sspnd.curr_online == schdlr_sspnd.wait_curr_online) {
+ erts_smp_atomic_band(&schdlr_sspnd.changing,
+ ~ERTS_SCHDLR_SSPND_CHNG_ONLN);
+ if (((ErtsSchedulerData *) vesdp)->no != 1)
+ erts_smp_cnd_signal(&schdlr_sspnd.cnd);
+ }
- if (((ErtsSchedulerData *) vesdp)->no != 1) {
- if (schdlr_sspnd.online == schdlr_sspnd.curr_online) {
- schdlr_sspnd.changing = 0;
- erts_smp_cnd_broadcast(&schdlr_sspnd.cnd);
+ if (((ErtsSchedulerData *) vesdp)->no == 1) {
+ if (schdlr_sspnd.curr_online != schdlr_sspnd.wait_curr_online) {
+ erts_smp_activity_begin(ERTS_ACTIVITY_WAIT,
+ susp_sched_prep_block,
+ susp_sched_resume_block,
+ NULL);
+ while (schdlr_sspnd.curr_online != schdlr_sspnd.wait_curr_online)
+ erts_smp_cnd_wait(&schdlr_sspnd.cnd, &schdlr_sspnd.mtx);
+ erts_smp_activity_end(ERTS_ACTIVITY_WAIT,
+ susp_sched_prep_block,
+ susp_sched_resume_block,
+ NULL);
}
- }
- else if (schdlr_sspnd.curr_online == schdlr_sspnd.wait_curr_online)
- schdlr_sspnd.changing = 0;
- else {
- erts_smp_activity_begin(ERTS_ACTIVITY_WAIT,
- susp_sched_prep_block,
- susp_sched_resume_block,
- NULL);
- while (schdlr_sspnd.curr_online != schdlr_sspnd.wait_curr_online)
- erts_smp_cnd_wait(&schdlr_sspnd.cnd, &schdlr_sspnd.mtx);
- erts_smp_activity_end(ERTS_ACTIVITY_WAIT,
- susp_sched_prep_block,
- susp_sched_resume_block,
- NULL);
- ASSERT(!schdlr_sspnd.changing);
+ ERTS_SCHDLR_SSPND_CHNG_SET(0, ERTS_SCHDLR_SSPND_CHNG_WAITER);
}
erts_smp_mtx_unlock(&schdlr_sspnd.mtx);
@@ -3089,11 +3637,7 @@ erts_start_schedulers(void)
ErtsSchedulerData *esdp = ERTS_SCHEDULER_IX(actual);
actual++;
ASSERT(actual == esdp->no);
-#ifdef ERTS_ENABLE_LOCK_COUNT
- res = erts_lcnt_thr_create(&esdp->tid,sched_thread_func,(void*)esdp,&opts);
-#else
res = ethr_thr_create(&esdp->tid,sched_thread_func,(void*)esdp,&opts);
-#endif
if (res != 0) {
actual--;
break;
@@ -3407,6 +3951,7 @@ processor_order_cmp(const void *vx, const void *vy)
static void
check_cpu_bind(ErtsSchedulerData *esdp)
{
+ int rg = 0;
int res;
int cpu_id;
erts_smp_runq_unlock(esdp->run_queue);
@@ -3425,19 +3970,25 @@ check_cpu_bind(ErtsSchedulerData *esdp)
goto unbind;
}
}
- else if (cpu_id < 0 && scheduler2cpu_map[esdp->no].bound_id >= 0) {
+ else if (cpu_id < 0) {
unbind:
/* Get rid of old binding */
res = erts_unbind_from_cpu(erts_cpuinfo);
if (res == 0)
esdp->cpu_id = scheduler2cpu_map[esdp->no].bound_id = -1;
- else {
+ else if (res != -ENOTSUP) {
erts_dsprintf_buf_t *dsbufp = erts_create_logger_dsbuf();
erts_dsprintf(dsbufp, "Scheduler %d failed to unbind from cpu %d: %s\n",
(int) esdp->no, cpu_id, erl_errno_id(-res));
erts_send_error_to_logger_nogl(dsbufp);
}
}
+ if (erts_reader_groups) {
+ if (esdp->cpu_id >= 0)
+ rg = reader_group_lookup(esdp->cpu_id);
+ else
+ rg = (((int) esdp->no) - 1) % erts_reader_groups + 1;
+ }
erts_smp_runq_lock(esdp->run_queue);
#ifdef ERTS_SMP
if (erts_common_run_queue)
@@ -3448,6 +3999,8 @@ check_cpu_bind(ErtsSchedulerData *esdp)
#endif
erts_smp_rwmtx_rwunlock(&erts_cpu_bind_rwmtx);
+ if (erts_reader_groups)
+ erts_smp_rwmtx_set_reader_group(rg);
}
static void
@@ -3476,11 +4029,13 @@ signal_schedulers_bind_change(erts_cpu_topology_t *cpudata, int size)
wake_all_schedulers();
}
else {
- ERTS_FOREACH_RUNQ(rq,
- {
+ for (s_ix = 0; s_ix < erts_no_run_queues; s_ix++) {
+ ErtsRunQueue *rq = ERTS_RUNQ_IX(s_ix);
+ erts_smp_runq_lock(rq);
rq->flags |= ERTS_RUNQ_FLG_CHK_CPU_BIND;
- wake_scheduler(rq, 0);
- });
+ erts_smp_runq_unlock(rq);
+ wake_scheduler(rq, 0, 1);
+ };
}
#else
check_cpu_bind(erts_get_scheduler_data());
@@ -3496,14 +4051,15 @@ erts_init_scheduler_bind_type(char *how)
if (!system_cpudata && !user_cpudata)
return ERTS_INIT_SCHED_BIND_TYPE_ERROR_NO_CPU_TOPOLOGY;
- if (sys_strcmp(how, "s") == 0)
+ if (sys_strcmp(how, "db") == 0)
+ cpu_bind_order = ERTS_CPU_BIND_DEFAULT_BIND;
+ else if (sys_strcmp(how, "s") == 0)
cpu_bind_order = ERTS_CPU_BIND_SPREAD;
else if (sys_strcmp(how, "ps") == 0)
cpu_bind_order = ERTS_CPU_BIND_PROCESSOR_SPREAD;
else if (sys_strcmp(how, "ts") == 0)
cpu_bind_order = ERTS_CPU_BIND_THREAD_SPREAD;
- else if (sys_strcmp(how, "db") == 0
- || sys_strcmp(how, "tnnps") == 0)
+ else if (sys_strcmp(how, "tnnps") == 0)
cpu_bind_order = ERTS_CPU_BIND_THREAD_NO_NODE_PROCESSOR_SPREAD;
else if (sys_strcmp(how, "nnps") == 0)
cpu_bind_order = ERTS_CPU_BIND_NO_NODE_PROCESSOR_SPREAD;
@@ -3519,6 +4075,490 @@ erts_init_scheduler_bind_type(char *how)
return ERTS_INIT_SCHED_BIND_TYPE_SUCCESS;
}
+/*
+ * reader groups map
+ */
+
+typedef struct {
+ int level[ERTS_TOPOLOGY_MAX_DEPTH+1];
+} erts_avail_cput;
+
+typedef struct {
+ int *map;
+ int size;
+ int groups;
+} erts_reader_groups_map_test;
+
+typedef struct {
+ int id;
+ int sub_levels;
+ int reader_groups;
+} erts_rg_count_t;
+
+typedef struct {
+ int logical;
+ int reader_group;
+} erts_reader_groups_map_t;
+
+typedef struct {
+ erts_reader_groups_map_t *map;
+ int map_size;
+ int logical_processors;
+ int groups;
+} erts_make_reader_groups_map_test;
+
+static int reader_groups_available_cpu_check;
+static int reader_groups_logical_processors;
+static int reader_groups_map_size;
+static erts_reader_groups_map_t *reader_groups_map;
+
+#define ERTS_TOPOLOGY_RG ERTS_TOPOLOGY_MAX_DEPTH
+
+static void
+make_reader_groups_map(erts_make_reader_groups_map_test *test);
+
+static Eterm
+get_reader_groups_map(Process *c_p,
+ erts_reader_groups_map_t *map,
+ int map_size,
+ int logical_processors)
+{
+#ifdef DEBUG
+ Eterm *endp;
+#endif
+ Eterm res = NIL, tuple;
+ Eterm *hp;
+ int i;
+
+ hp = HAlloc(c_p, logical_processors*(2+3));
+#ifdef DEBUG
+ endp = hp + logical_processors*(2+3);
+#endif
+ for (i = map_size - 1; i >= 0; i--) {
+ if (map[i].logical >= 0) {
+ tuple = TUPLE2(hp,
+ make_small(map[i].logical),
+ make_small(map[i].reader_group));
+ hp += 3;
+ res = CONS(hp, tuple, res);
+ hp += 2;
+ }
+ }
+ ASSERT(hp == endp);
+ return res;
+}
+
+Eterm
+erts_debug_reader_groups_map(Process *c_p, int groups)
+{
+ Eterm res;
+ erts_make_reader_groups_map_test test;
+
+ test.groups = groups;
+ make_reader_groups_map(&test);
+ if (!test.map)
+ res = NIL;
+ else {
+ res = get_reader_groups_map(c_p,
+ test.map,
+ test.map_size,
+ test.logical_processors);
+ erts_free(ERTS_ALC_T_TMP, test.map);
+ }
+ return res;
+}
+
+
+Eterm
+erts_get_reader_groups_map(Process *c_p)
+{
+ Eterm res;
+ erts_smp_rwmtx_rlock(&erts_cpu_bind_rwmtx);
+ res = get_reader_groups_map(c_p,
+ reader_groups_map,
+ reader_groups_map_size,
+ reader_groups_logical_processors);
+ erts_smp_rwmtx_runlock(&erts_cpu_bind_rwmtx);
+ return res;
+}
+
+static void
+make_available_cpu_topology(erts_avail_cput *no,
+ erts_avail_cput *avail,
+ erts_cpu_topology_t *cpudata,
+ int *size,
+ int test)
+{
+ int len = *size;
+ erts_cpu_topology_t last;
+ int a, i, j;
+
+ no->level[ERTS_TOPOLOGY_NODE] = -1;
+ no->level[ERTS_TOPOLOGY_PROCESSOR] = -1;
+ no->level[ERTS_TOPOLOGY_PROCESSOR_NODE] = -1;
+ no->level[ERTS_TOPOLOGY_CORE] = -1;
+ no->level[ERTS_TOPOLOGY_THREAD] = -1;
+ no->level[ERTS_TOPOLOGY_LOGICAL] = -1;
+
+ last.node = INT_MIN;
+ last.processor = INT_MIN;
+ last.processor_node = INT_MIN;
+ last.core = INT_MIN;
+ last.thread = INT_MIN;
+ last.logical = INT_MIN;
+
+ a = 0;
+
+ for (i = 0; i < len; i++) {
+
+ if (!test && !erts_is_cpu_available(erts_cpuinfo, cpudata[i].logical))
+ continue;
+
+ if (last.node != cpudata[i].node)
+ goto node;
+ if (last.processor != cpudata[i].processor)
+ goto processor;
+ if (last.processor_node != cpudata[i].processor_node)
+ goto processor_node;
+ if (last.core != cpudata[i].core)
+ goto core;
+ ASSERT(last.thread != cpudata[i].thread);
+ goto thread;
+
+ node:
+ no->level[ERTS_TOPOLOGY_NODE]++;
+ processor:
+ no->level[ERTS_TOPOLOGY_PROCESSOR]++;
+ processor_node:
+ no->level[ERTS_TOPOLOGY_PROCESSOR_NODE]++;
+ core:
+ no->level[ERTS_TOPOLOGY_CORE]++;
+ thread:
+ no->level[ERTS_TOPOLOGY_THREAD]++;
+
+ no->level[ERTS_TOPOLOGY_LOGICAL]++;
+
+ for (j = 0; j < ERTS_TOPOLOGY_LOGICAL; j++)
+ avail[a].level[j] = no->level[j];
+
+ avail[a].level[ERTS_TOPOLOGY_LOGICAL] = cpudata[i].logical;
+ avail[a].level[ERTS_TOPOLOGY_RG] = 0;
+
+ ASSERT(last.logical != cpudata[a].logical);
+
+ last = cpudata[i];
+ a++;
+ }
+
+ no->level[ERTS_TOPOLOGY_NODE]++;
+ no->level[ERTS_TOPOLOGY_PROCESSOR]++;
+ no->level[ERTS_TOPOLOGY_PROCESSOR_NODE]++;
+ no->level[ERTS_TOPOLOGY_CORE]++;
+ no->level[ERTS_TOPOLOGY_THREAD]++;
+ no->level[ERTS_TOPOLOGY_LOGICAL]++;
+
+ *size = a;
+}
+
+static int
+reader_group_lookup(int logical)
+{
+ int start = logical % reader_groups_map_size;
+ int ix = start;
+
+ do {
+ if (reader_groups_map[ix].logical == logical) {
+ ASSERT(reader_groups_map[ix].reader_group > 0);
+ return reader_groups_map[ix].reader_group;
+ }
+ ix++;
+ if (ix == reader_groups_map_size)
+ ix = 0;
+ } while (ix != start);
+
+ erl_exit(ERTS_ABORT_EXIT, "Logical cpu id %d not found\n", logical);
+}
+
+static void
+reader_group_insert(erts_reader_groups_map_t *map, int map_size,
+ int logical, int reader_group)
+{
+ int start = logical % map_size;
+ int ix = start;
+
+ do {
+ if (map[ix].logical < 0) {
+ map[ix].logical = logical;
+ map[ix].reader_group = reader_group;
+ return;
+ }
+ ix++;
+ if (ix == map_size)
+ ix = 0;
+ } while (ix != start);
+
+ erl_exit(ERTS_ABORT_EXIT, "Reader groups map full\n");
+}
+
+
+static int
+sub_levels(erts_rg_count_t *rgc, int level, int aix, int avail_sz, erts_avail_cput *avail)
+{
+ int sub_level = level+1;
+ int last = -1;
+ rgc->sub_levels = 0;
+
+ do {
+ if (last != avail[aix].level[sub_level]) {
+ rgc->sub_levels++;
+ last = avail[aix].level[sub_level];
+ }
+ aix++;
+ }
+ while (aix < avail_sz && rgc->id == avail[aix].level[level]);
+ rgc->reader_groups = 0;
+ return aix;
+}
+
+static int
+write_reader_groups(int *rgp, erts_rg_count_t *rgcp,
+ int level, int a,
+ int avail_sz, erts_avail_cput *avail)
+{
+ int rg = *rgp;
+ int sub_level = level+1;
+ int sl_per_gr = rgcp->sub_levels / rgcp->reader_groups;
+ int xsl = rgcp->sub_levels % rgcp->reader_groups;
+ int sls = 0;
+ int last = -1;
+ int xsl_rg_lim = (rgcp->reader_groups - xsl) + rg + 1;
+
+ ASSERT(level < 0 || avail[a].level[level] == rgcp->id)
+
+ do {
+ if (last != avail[a].level[sub_level]) {
+ if (!sls) {
+ sls = sl_per_gr;
+ rg++;
+ if (rg >= xsl_rg_lim)
+ sls++;
+ }
+ last = avail[a].level[sub_level];
+ sls--;
+ }
+ avail[a].level[ERTS_TOPOLOGY_RG] = rg;
+ a++;
+ } while (a < avail_sz && (level < 0
+ || avail[a].level[level] == rgcp->id));
+
+ ASSERT(rgcp->reader_groups == rg - *rgp);
+
+ *rgp = rg;
+
+ return a;
+}
+
+static int
+rg_count_sub_levels_compare(const void *vx, const void *vy)
+{
+ erts_rg_count_t *x = (erts_rg_count_t *) vx;
+ erts_rg_count_t *y = (erts_rg_count_t *) vy;
+ if (x->sub_levels != y->sub_levels)
+ return y->sub_levels - x->sub_levels;
+ return x->id - y->id;
+}
+
+static int
+rg_count_id_compare(const void *vx, const void *vy)
+{
+ erts_rg_count_t *x = (erts_rg_count_t *) vx;
+ erts_rg_count_t *y = (erts_rg_count_t *) vy;
+ return x->id - y->id;
+}
+
+static void
+make_reader_groups_map(erts_make_reader_groups_map_test *test)
+{
+ int i, spread_level, avail_sz;
+ erts_avail_cput no, *avail;
+ erts_cpu_topology_t *cpudata;
+ erts_reader_groups_map_t *map;
+ int map_sz;
+ int groups = erts_reader_groups;
+
+ if (test) {
+ test->map = NULL;
+ test->map_size = 0;
+ groups = test->groups;
+ }
+
+ if (!groups)
+ return;
+
+ if (!test) {
+ if (reader_groups_map)
+ erts_free(ERTS_ALC_T_RDR_GRPS_MAP, reader_groups_map);
+
+ reader_groups_logical_processors = 0;
+ reader_groups_map_size = 0;
+ reader_groups_map = NULL;
+ }
+
+ create_tmp_cpu_topology_copy(&cpudata, &avail_sz);
+
+ if (!cpudata)
+ return;
+
+ cpu_bind_order_sort(cpudata,
+ avail_sz,
+ ERTS_CPU_BIND_NO_SPREAD,
+ 1);
+
+ avail = erts_alloc(ERTS_ALC_T_TMP,
+ sizeof(erts_avail_cput)*avail_sz);
+
+ make_available_cpu_topology(&no, avail, cpudata,
+ &avail_sz, test != NULL);
+
+ destroy_tmp_cpu_topology_copy(cpudata);
+
+ map_sz = avail_sz*2+1;
+
+ if (test) {
+ map = erts_alloc(ERTS_ALC_T_TMP,
+ (sizeof(erts_reader_groups_map_t)
+ * map_sz));
+ test->map = map;
+ test->map_size = map_sz;
+ test->logical_processors = avail_sz;
+ }
+ else {
+ map = erts_alloc(ERTS_ALC_T_RDR_GRPS_MAP,
+ (sizeof(erts_reader_groups_map_t)
+ * map_sz));
+ reader_groups_map = map;
+ reader_groups_logical_processors = avail_sz;
+ reader_groups_map_size = map_sz;
+
+ }
+
+ for (i = 0; i < map_sz; i++) {
+ map[i].logical = -1;
+ map[i].reader_group = 0;
+ }
+
+ spread_level = ERTS_TOPOLOGY_CORE;
+ for (i = ERTS_TOPOLOGY_NODE; i < ERTS_TOPOLOGY_THREAD; i++) {
+ if (no.level[i] > groups) {
+ spread_level = i;
+ break;
+ }
+ }
+
+ if (no.level[spread_level] <= groups) {
+ int a, rg, last = -1;
+ rg = 0;
+ ASSERT(spread_level == ERTS_TOPOLOGY_CORE);
+ for (a = 0; a < avail_sz; a++) {
+ if (last != avail[a].level[spread_level]) {
+ rg++;
+ last = avail[a].level[spread_level];
+ }
+ reader_group_insert(map,
+ map_sz,
+ avail[a].level[ERTS_TOPOLOGY_LOGICAL],
+ rg);
+ }
+ }
+ else { /* groups < no.level[spread_level] */
+ erts_rg_count_t *rg_count;
+ int a, rg, tl, toplevels;
+
+ tl = spread_level-1;
+
+ if (spread_level == ERTS_TOPOLOGY_NODE)
+ toplevels = 1;
+ else
+ toplevels = no.level[tl];
+
+ rg_count = erts_alloc(ERTS_ALC_T_TMP,
+ toplevels*sizeof(erts_rg_count_t));
+
+ if (toplevels == 1) {
+ rg_count[0].id = 0;
+ rg_count[0].sub_levels = no.level[spread_level];
+ rg_count[0].reader_groups = groups;
+ }
+ else {
+ int rgs_per_tl, rgs;
+ rgs = groups;
+ rgs_per_tl = rgs / toplevels;
+
+ a = 0;
+ for (i = 0; i < toplevels; i++) {
+ rg_count[i].id = avail[a].level[tl];
+ a = sub_levels(&rg_count[i], tl, a, avail_sz, avail);
+ }
+
+ qsort(rg_count,
+ toplevels,
+ sizeof(erts_rg_count_t),
+ rg_count_sub_levels_compare);
+
+ for (i = 0; i < toplevels; i++) {
+ if (rg_count[i].sub_levels < rgs_per_tl) {
+ rg_count[i].reader_groups = rg_count[i].sub_levels;
+ rgs -= rg_count[i].sub_levels;
+ }
+ else {
+ rg_count[i].reader_groups = rgs_per_tl;
+ rgs -= rgs_per_tl;
+ }
+ }
+
+ while (rgs > 0) {
+ for (i = 0; i < toplevels; i++) {
+ if (rg_count[i].sub_levels == rg_count[i].reader_groups)
+ break;
+ else {
+ rg_count[i].reader_groups++;
+ if (--rgs == 0)
+ break;
+ }
+ }
+ }
+
+ qsort(rg_count,
+ toplevels,
+ sizeof(erts_rg_count_t),
+ rg_count_id_compare);
+ }
+
+ a = i = rg = 0;
+ while (a < avail_sz) {
+ a = write_reader_groups(&rg, &rg_count[i], tl,
+ a, avail_sz, avail);
+ i++;
+ }
+
+ ASSERT(groups == rg);
+
+ for (a = 0; a < avail_sz; a++)
+ reader_group_insert(map,
+ map_sz,
+ avail[a].level[ERTS_TOPOLOGY_LOGICAL],
+ avail[a].level[ERTS_TOPOLOGY_RG]);
+
+ erts_free(ERTS_ALC_T_TMP, rg_count);
+ }
+
+ erts_free(ERTS_ALC_T_TMP, avail);
+}
+
+/*
+ * CPU topology
+ */
+
typedef struct {
int *id;
int used;
@@ -3765,11 +4805,11 @@ verify_topology(erts_cpu_topology_t *cpudata, int size)
/* Verify logical ids */
logical = erts_alloc(ERTS_ALC_T_TMP, sizeof(int)*size);
- for (i = 0; i < user_cpudata_size; i++)
- logical[i] = user_cpudata[i].logical;
+ for (i = 0; i < size; i++)
+ logical[i] = cpudata[i].logical;
- qsort(logical, user_cpudata_size, sizeof(int), int_cmp);
- for (i = 0; i < user_cpudata_size-1; i++) {
+ qsort(logical, size, sizeof(int), int_cmp);
+ for (i = 0; i < size-1; i++) {
if (logical[i] == logical[i+1]) {
erts_free(ERTS_ALC_T_TMP, logical);
return ERTS_INIT_CPU_TOPOLOGY_NOT_UNIQUE_LIDS;
@@ -3782,13 +4822,13 @@ verify_topology(erts_cpu_topology_t *cpudata, int size)
/* Verify unique entities */
- for (i = 1; i < user_cpudata_size; i++) {
- if (user_cpudata[i-1].processor == user_cpudata[i].processor
- && user_cpudata[i-1].node == user_cpudata[i].node
- && (user_cpudata[i-1].processor_node
- == user_cpudata[i].processor_node)
- && user_cpudata[i-1].core == user_cpudata[i].core
- && user_cpudata[i-1].thread == user_cpudata[i].thread) {
+ for (i = 1; i < size; i++) {
+ if (cpudata[i-1].processor == cpudata[i].processor
+ && cpudata[i-1].node == cpudata[i].node
+ && (cpudata[i-1].processor_node
+ == cpudata[i].processor_node)
+ && cpudata[i-1].core == cpudata[i].core
+ && cpudata[i-1].thread == cpudata[i].thread) {
return ERTS_INIT_CPU_TOPOLOGY_NOT_UNIQUE_ENTITIES;
}
}
@@ -4032,6 +5072,8 @@ erts_set_cpu_topology(Process *c_p, Eterm term)
sizeof(erts_cpu_topology_t)*cpudata_size);
}
+ make_reader_groups_map(NULL);
+
signal_schedulers_bind_change(cpudata, cpudata_size);
done:
@@ -4146,14 +5188,15 @@ erts_bind_schedulers(Process *c_p, Eterm how)
old_cpu_bind_order = cpu_bind_order;
- if (ERTS_IS_ATOM_STR("spread", how))
+ if (ERTS_IS_ATOM_STR("default_bind", how))
+ cpu_bind_order = ERTS_CPU_BIND_DEFAULT_BIND;
+ else if (ERTS_IS_ATOM_STR("spread", how))
cpu_bind_order = ERTS_CPU_BIND_SPREAD;
else if (ERTS_IS_ATOM_STR("processor_spread", how))
cpu_bind_order = ERTS_CPU_BIND_PROCESSOR_SPREAD;
else if (ERTS_IS_ATOM_STR("thread_spread", how))
cpu_bind_order = ERTS_CPU_BIND_THREAD_SPREAD;
- else if (ERTS_IS_ATOM_STR("default_bind", how)
- || ERTS_IS_ATOM_STR("thread_no_node_processor_spread", how))
+ else if (ERTS_IS_ATOM_STR("thread_no_node_processor_spread", how))
cpu_bind_order = ERTS_CPU_BIND_THREAD_NO_NODE_PROCESSOR_SPREAD;
else if (ERTS_IS_ATOM_STR("no_node_processor_spread", how))
cpu_bind_order = ERTS_CPU_BIND_NO_NODE_PROCESSOR_SPREAD;
@@ -4199,14 +5242,15 @@ erts_fake_scheduler_bindings(Process *p, Eterm how)
int cpudata_size;
Eterm res;
- if (ERTS_IS_ATOM_STR("spread", how))
+ if (ERTS_IS_ATOM_STR("default_bind", how))
+ fake_cpu_bind_order = ERTS_CPU_BIND_DEFAULT_BIND;
+ else if (ERTS_IS_ATOM_STR("spread", how))
fake_cpu_bind_order = ERTS_CPU_BIND_SPREAD;
else if (ERTS_IS_ATOM_STR("processor_spread", how))
fake_cpu_bind_order = ERTS_CPU_BIND_PROCESSOR_SPREAD;
else if (ERTS_IS_ATOM_STR("thread_spread", how))
fake_cpu_bind_order = ERTS_CPU_BIND_THREAD_SPREAD;
- else if (ERTS_IS_ATOM_STR("default_bind", how)
- || ERTS_IS_ATOM_STR("thread_no_node_processor_spread", how))
+ else if (ERTS_IS_ATOM_STR("thread_no_node_processor_spread", how))
fake_cpu_bind_order = ERTS_CPU_BIND_THREAD_NO_NODE_PROCESSOR_SPREAD;
else if (ERTS_IS_ATOM_STR("no_node_processor_spread", how))
fake_cpu_bind_order = ERTS_CPU_BIND_NO_NODE_PROCESSOR_SPREAD;
@@ -4431,7 +5475,12 @@ early_cpu_bind_init(void)
(sizeof(erts_cpu_topology_t)
* system_cpudata_size));
- cpu_bind_order = ERTS_CPU_BIND_NONE;
+ cpu_bind_order = ERTS_CPU_BIND_UNDEFINED;
+
+ reader_groups_available_cpu_check = 1;
+ reader_groups_logical_processors = 0;
+ reader_groups_map_size = 0;
+ reader_groups_map = NULL;
if (!erts_get_cpu_topology(erts_cpuinfo, system_cpudata)
|| ERTS_INIT_CPU_TOPOLOGY_OK != verify_topology(system_cpudata,
@@ -4457,6 +5506,19 @@ late_cpu_bind_init(void)
scheduler2cpu_map[ix].bound_id = -1;
}
+ if (cpu_bind_order == ERTS_CPU_BIND_UNDEFINED) {
+ int ncpus = erts_get_cpu_configured(erts_cpuinfo);
+ if (ncpus < 1 || erts_no_schedulers < ncpus)
+ cpu_bind_order = ERTS_CPU_BIND_NONE;
+ else
+ cpu_bind_order = ((system_cpudata || user_cpudata)
+ && (erts_bind_to_cpu(erts_cpuinfo, -1) != -ENOTSUP)
+ ? ERTS_CPU_BIND_DEFAULT_BIND
+ : ERTS_CPU_BIND_NONE);
+ }
+
+ make_reader_groups_map(NULL);
+
if (cpu_bind_order != ERTS_CPU_BIND_NONE) {
erts_cpu_topology_t *cpudata;
int cpudata_size;
@@ -4467,6 +5529,39 @@ late_cpu_bind_init(void)
}
}
+int
+erts_update_cpu_info(void)
+{
+ int changed;
+ erts_smp_rwmtx_rwlock(&erts_cpu_bind_rwmtx);
+ changed = erts_cpu_info_update(erts_cpuinfo);
+ if (changed) {
+ erts_cpu_topology_t *cpudata;
+ int cpudata_size;
+ erts_free(ERTS_ALC_T_CPUDATA, system_cpudata);
+
+ system_cpudata_size = erts_get_cpu_topology_size(erts_cpuinfo);
+ system_cpudata = erts_alloc(ERTS_ALC_T_CPUDATA,
+ (sizeof(erts_cpu_topology_t)
+ * system_cpudata_size));
+
+ if (!erts_get_cpu_topology(erts_cpuinfo, system_cpudata)
+ || ERTS_INIT_CPU_TOPOLOGY_OK != verify_topology(system_cpudata,
+ system_cpudata_size)) {
+ erts_free(ERTS_ALC_T_CPUDATA, system_cpudata);
+ system_cpudata = NULL;
+ system_cpudata_size = 0;
+ }
+
+ create_tmp_cpu_topology_copy(&cpudata, &cpudata_size);
+ ASSERT(cpudata);
+ signal_schedulers_bind_change(cpudata, cpudata_size);
+ destroy_tmp_cpu_topology_copy(cpudata);
+ }
+ erts_smp_rwmtx_rwunlock(&erts_cpu_bind_rwmtx);
+ return changed;
+}
+
#ifdef ERTS_SMP
static void
@@ -4481,7 +5576,7 @@ add_pend_suspend(Process *suspendee,
sizeof(ErtsPendingSuspend));
psp->next = NULL;
#ifdef DEBUG
-#ifdef ARCH_64
+#if defined(ARCH_64) && !HALFWORD_HEAP
psp->end = (ErtsPendingSuspend *) 0xdeaddeaddeaddead;
#else
psp->end = (ErtsPendingSuspend *) 0xdeaddead;
@@ -5322,7 +6417,7 @@ dequeue_process(ErtsRunQueue *runq, Process *p)
}
/* schedule a process */
-static ERTS_INLINE void
+static ERTS_INLINE ErtsRunQueue *
internal_add_to_runq(ErtsRunQueue *runq, Process *p)
{
Uint32 prev_status = p->status;
@@ -5333,12 +6428,12 @@ internal_add_to_runq(ErtsRunQueue *runq, Process *p)
ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(runq));
if (p->status_flags & ERTS_PROC_SFLG_INRUNQ)
- return;
+ return NULL;
else if (p->runq_flags & ERTS_PROC_RUNQ_FLG_RUNNING) {
ASSERT(p->status != P_SUSPENDED);
ERTS_DBG_CHK_PROCS_RUNQ_NOPROC(runq, p);
p->status_flags |= ERTS_PROC_SFLG_PENDADD2SCHEDQ;
- return;
+ return NULL;
}
ASSERT(!p->scheduler_data);
#endif
@@ -5377,20 +6472,23 @@ internal_add_to_runq(ErtsRunQueue *runq, Process *p)
profile_runnable_proc(p, am_active);
}
- smp_notify_inc_runq(add_runq);
-
if (add_runq != runq)
erts_smp_runq_unlock(add_runq);
+
+ return add_runq;
}
void
erts_add_to_runq(Process *p)
{
+ ErtsRunQueue *notify_runq;
ErtsRunQueue *runq = erts_get_runq_proc(p);
erts_smp_runq_lock(runq);
- internal_add_to_runq(runq, p);
+ notify_runq = internal_add_to_runq(runq, p);
erts_smp_runq_unlock(runq);
+ smp_notify_inc_runq(notify_runq);
+
}
/* Possibly remove a scheduled process we need to suspend */
@@ -5529,8 +6627,6 @@ erts_proc_migrate(Process *p, ErtsProcLocks *plcks,
p->run_queue = to_rq;
enqueue_process(to_rq, p);
- smp_notify_inc_runq(to_rq);
-
return ERTS_MIGRATE_SUCCESS;
}
#endif /* ERTS_SMP */
@@ -5727,30 +6823,6 @@ erts_set_process_priority(Process *p, Eterm new_value)
return old_value;
}
-#ifdef ERTS_SMP
-
-static ERTS_INLINE int
-prepare_for_sys_schedule(void)
-{
- while (!erts_port_task_have_outstanding_io_tasks()
- && !erts_smp_atomic_xchg(&doing_sys_schedule, 1)) {
- if (!erts_port_task_have_outstanding_io_tasks())
- return 1;
- erts_smp_atomic_set(&doing_sys_schedule, 0);
- }
- return 0;
-}
-
-#else
-
-static ERTS_INLINE int
-prepare_for_sys_schedule(void)
-{
- return !erts_port_task_have_outstanding_io_tasks();
-}
-
-#endif
-
/* note that P_RUNNING is only set so that we don't try to remove
** running processes from the schedule queue if they exit - a running
** process not being in the schedule queue!!
@@ -5839,6 +6911,9 @@ Process *schedule(Process *p, int calls)
}
if (IS_TRACED(p)) {
+ if (IS_TRACED_FL(p, F_TRACE_CALLS) && p->status != P_FREE) {
+ erts_schedule_time_break(p, ERTS_BP_CALL_TIME_SCHEDULE_OUT);
+ }
switch (p->status) {
case P_EXITING:
if (ARE_TRACE_FLAGS_ON(p, F_TRACE_SCHED_EXIT))
@@ -5882,8 +6957,11 @@ Process *schedule(Process *p, int calls)
p->status_flags &= ~ERTS_PROC_SFLG_RUNNING;
if (p->status_flags & ERTS_PROC_SFLG_PENDADD2SCHEDQ) {
+ ErtsRunQueue *notify_runq;
p->status_flags &= ~ERTS_PROC_SFLG_PENDADD2SCHEDQ;
- internal_add_to_runq(rq, p);
+ notify_runq = internal_add_to_runq(rq, p);
+ if (notify_runq != rq)
+ smp_notify_inc_runq(notify_runq);
}
#endif
@@ -5951,7 +7029,10 @@ Process *schedule(Process *p, int calls)
| ERTS_RUNQ_FLG_CHK_CPU_BIND
| ERTS_RUNQ_FLG_SUSPENDED)) {
if ((rq->flags & ERTS_RUNQ_FLG_SUSPENDED)
- || erts_smp_atomic_read(&esdp->suspended)) {
+ || (erts_smp_atomic_read(&esdp->ssi->flags)
+ & ERTS_SSI_FLG_SUSPENDED)) {
+ ASSERT(erts_smp_atomic_read(&esdp->ssi->flags)
+ & ERTS_SSI_FLG_SUSPENDED);
suspend_scheduler(esdp);
}
if ((rq->flags & ERTS_RUNQ_FLG_CHK_CPU_BIND)
@@ -5960,12 +7041,21 @@ Process *schedule(Process *p, int calls)
}
}
-#ifdef ERTS_SMP_SCHEDULERS_NEED_TO_CHECK_CHILDREN
- if (esdp->check_children) {
- esdp->check_children = 0;
- erts_smp_runq_unlock(rq);
- erts_check_children();
- erts_smp_runq_lock(rq);
+#if defined(ERTS_SCHED_NEED_BLOCKABLE_AUX_WORK) \
+ || defined(ERTS_SCHED_NEED_NONBLOCKABLE_AUX_WORK)
+ {
+ ErtsSchedulerSleepInfo *ssi = esdp->ssi;
+ long aux_work = erts_smp_atomic_read(&ssi->aux_work);
+ if (aux_work) {
+ erts_smp_runq_unlock(rq);
+#ifdef ERTS_SCHED_NEED_BLOCKABLE_AUX_WORK
+ aux_work = blockable_aux_work(esdp, ssi, aux_work);
+#endif
+#ifdef ERTS_SCHED_NEED_NONBLOCKABLE_AUX_WORK
+ nonblockable_aux_work(esdp, ssi, aux_work);
+#endif
+ erts_smp_runq_lock(rq);
+ }
}
#endif
@@ -5997,7 +7087,10 @@ Process *schedule(Process *p, int calls)
if (rq->flags & (ERTS_RUNQ_FLG_SHARED_RUNQ
| ERTS_RUNQ_FLG_SUSPENDED)) {
if ((rq->flags & ERTS_RUNQ_FLG_SUSPENDED)
- || erts_smp_atomic_read(&esdp->suspended)) {
+ || (erts_smp_atomic_read(&esdp->ssi->flags)
+ & ERTS_SSI_FLG_SUSPENDED)) {
+ ASSERT(erts_smp_atomic_read(&esdp->ssi->flags)
+ & ERTS_SSI_FLG_SUSPENDED);
non_empty_runq(rq);
goto continue_check_activities_to_run;
}
@@ -6014,17 +7107,7 @@ Process *schedule(Process *p, int calls)
}
}
- if (prepare_for_sys_schedule()) {
- erts_smp_atomic_set(&function_calls, 0);
- fcalls = 0;
- sched_sys_wait(esdp->no, rq);
- erts_smp_atomic_set(&doing_sys_schedule, 0);
- }
- else {
- /* If all schedulers are waiting, one of them *should*
- be waiting in erl_sys_schedule() */
- sched_cnd_wait(esdp->no, rq);
- }
+ scheduler_wait(&fcalls, esdp, rq);
non_empty_runq(rq);
@@ -6086,7 +7169,7 @@ Process *schedule(Process *p, int calls)
else {
if (erts_common_run_queue) {
if (erts_common_run_queue->waiting)
- wake_one_scheduler();
+ wake_scheduler(erts_common_run_queue, 0, 1);
}
else if (erts_smp_atomic_read(&no_empty_run_queues) != 0) {
wake_scheduler_on_empty_runq(rq);
@@ -6231,10 +7314,10 @@ Process *schedule(Process *p, int calls)
erts_smp_proc_lock(p, ERTS_PROC_LOCK_MAIN|ERTS_PROC_LOCK_STATUS);
if (erts_sched_stat.enabled) {
- Uint old = ERTS_PROC_SCHED_ID(p,
+ UWord old = ERTS_PROC_SCHED_ID(p,
(ERTS_PROC_LOCK_MAIN
| ERTS_PROC_LOCK_STATUS),
- esdp->no);
+ (UWord) esdp->no);
int migrated = old && old != esdp->no;
erts_smp_spin_lock(&erts_sched_stat.lock);
@@ -6275,7 +7358,11 @@ Process *schedule(Process *p, int calls)
trace_virtual_sched(p, am_in);
break;
}
+ if (IS_TRACED_FL(p, F_TRACE_CALLS)) {
+ erts_schedule_time_break(p, ERTS_BP_CALL_TIME_SCHEDULE_IN);
+ }
}
+
if (p->status != P_EXITING)
p->status = P_RUNNING;
@@ -6397,8 +7484,8 @@ erts_schedule_misc_op(void (*func)(void *), void *arg)
else
rq->misc.start = molp;
rq->misc.end = molp;
- smp_notify_inc_runq(rq);
erts_smp_runq_unlock(rq);
+ smp_notify_inc_runq(rq);
}
static void
@@ -6640,7 +7727,7 @@ erl_create_process(Process* parent, /* Parent of process (default group leader).
Eterm args, /* Arguments for function (must be well-formed list). */
ErlSpawnOpts* so) /* Options for spawn. */
{
- ErtsRunQueue *rq;
+ ErtsRunQueue *rq, *notify_runq;
Process *p;
Sint arity; /* Number of arguments. */
#ifndef HYBRID
@@ -6719,11 +7806,7 @@ erl_create_process(Process* parent, /* Parent of process (default group leader).
/*
* Must initialize binary lists here before copying binaries to process.
*/
- p->off_heap.mso = NULL;
-#ifndef HYBRID /* FIND ME! */
- p->off_heap.funs = NULL;
-#endif
- p->off_heap.externals = NULL;
+ p->off_heap.first = NULL;
p->off_heap.overhead = 0;
heap_need +=
@@ -6757,13 +7840,14 @@ erl_create_process(Process* parent, /* Parent of process (default group leader).
p->bin_vheap_sz = p->min_vheap_size;
p->bin_old_vheap_sz = p->min_vheap_size;
p->bin_old_vheap = 0;
+ p->bin_vheap_mature = 0;
/* No need to initialize p->fcalls. */
p->current = p->initial+INITIAL_MOD;
- p->i = (Eterm *) beam_apply;
- p->cp = (Eterm *) beam_apply+1;
+ p->i = (BeamInstr *) beam_apply;
+ p->cp = (BeamInstr *) beam_apply+1;
p->arg_reg = p->def_arg_reg;
p->max_arg_reg = sizeof(p->def_arg_reg)/sizeof(p->def_arg_reg[0]);
@@ -6813,7 +7897,7 @@ erl_create_process(Process* parent, /* Parent of process (default group leader).
p->group_leader =
IS_CONST(parent->group_leader)
? parent->group_leader
- : STORE_NC(&p->htop, &p->off_heap.externals, parent->group_leader);
+ : STORE_NC(&p->htop, &p->off_heap, parent->group_leader);
}
erts_get_default_tracing(&p->trace_flags, &p->tracer_proc);
@@ -6957,10 +8041,12 @@ erl_create_process(Process* parent, /* Parent of process (default group leader).
#endif
p->status = P_WAITING;
- internal_add_to_runq(rq, p);
+ notify_runq = internal_add_to_runq(rq, p);
erts_smp_runq_unlock(rq);
+ smp_notify_inc_runq(notify_runq);
+
res = p->id;
erts_smp_proc_unlock(p, ERTS_PROC_LOCKS_ALL);
@@ -7007,6 +8093,7 @@ void erts_init_empty_process(Process *p)
p->bin_vheap_sz = BIN_VH_MIN_SIZE;
p->bin_old_vheap_sz = BIN_VH_MIN_SIZE;
p->bin_old_vheap = 0;
+ p->bin_vheap_mature = 0;
#ifdef ERTS_SMP
p->u.ptimer = NULL;
p->bound_runq = NULL;
@@ -7014,11 +8101,7 @@ void erts_init_empty_process(Process *p)
memset(&(p->u.tm), 0, sizeof(ErlTimer));
#endif
p->next = NULL;
- p->off_heap.mso = NULL;
-#ifndef HYBRID /* FIND ME! */
- p->off_heap.funs = NULL;
-#endif
- p->off_heap.externals = NULL;
+ p->off_heap.first = NULL;
p->off_heap.overhead = 0;
p->reg = NULL;
p->heap_sz = 0;
@@ -7165,11 +8248,7 @@ erts_debug_verify_clean_empty_process(Process* p)
/* Thing that erts_cleanup_empty_process() cleans up */
- ASSERT(p->off_heap.mso == NULL);
-#ifndef HYBRID /* FIND ME! */
- ASSERT(p->off_heap.funs == NULL);
-#endif
- ASSERT(p->off_heap.externals == NULL);
+ ASSERT(p->off_heap.first == NULL);
ASSERT(p->off_heap.overhead == 0);
ASSERT(p->mbuf == NULL);
@@ -7180,25 +8259,16 @@ erts_debug_verify_clean_empty_process(Process* p)
void
erts_cleanup_empty_process(Process* p)
{
- ErlHeapFragment* mbufp;
-
/* We only check fields that are known to be used... */
erts_cleanup_offheap(&p->off_heap);
- p->off_heap.mso = NULL;
-#ifndef HYBRID /* FIND ME! */
- p->off_heap.funs = NULL;
-#endif
- p->off_heap.externals = NULL;
+ p->off_heap.first = NULL;
p->off_heap.overhead = 0;
- mbufp = p->mbuf;
- while (mbufp) {
- ErlHeapFragment *next = mbufp->next;
- free_message_buffer(mbufp);
- mbufp = next;
+ if (p->mbuf != NULL) {
+ free_message_buffer(p->mbuf);
+ p->mbuf = NULL;
}
- p->mbuf = NULL;
#if defined(ERTS_ENABLE_LOCK_COUNT) && defined(ERTS_SMP)
erts_lcnt_proc_lock_destroy(p);
#endif
@@ -7214,7 +8284,6 @@ static void
delete_process(Process* p)
{
ErlMessage* mp;
- ErlHeapFragment* bp;
VERBOSE(DEBUG_PROCESSES, ("Removing process: %T\n",p->id));
@@ -7230,7 +8299,7 @@ delete_process(Process* p)
* The mso list should not be used anymore, but if it is, make sure that
* we'll notice.
*/
- p->off_heap.mso = (void *) 0x8DEFFACD;
+ p->off_heap.first = (void *) 0x8DEFFACD;
if (p->arg_reg != p->def_arg_reg) {
erts_free(ERTS_ALC_T_ARG_REG, p->arg_reg);
@@ -7264,11 +8333,8 @@ delete_process(Process* p)
/*
* Free all pending message buffers.
*/
- bp = p->mbuf;
- while (bp != NULL) {
- ErlHeapFragment* next_bp = bp->next;
- free_message_buffer(bp);
- bp = next_bp;
+ if (p->mbuf != NULL) {
+ free_message_buffer(p->mbuf);
}
erts_erase_dicts(p);
@@ -7348,7 +8414,7 @@ set_proc_exiting(Process *p, Eterm reason, ErlHeapFragment *bp)
p->freason = EXTAG_EXIT;
KILL_CATCHES(p);
cancel_timer(p);
- p->i = (Eterm *) beam_exit;
+ p->i = (BeamInstr *) beam_exit;
}
@@ -7778,9 +8844,10 @@ static void doit_exit_monitor(ErtsMonitor *mon, void *vpcontext)
erts_port_release(prt);
} else if (is_internal_pid(mon->pid)) {/* local by name or pid */
Eterm watched;
- Eterm lhp[3];
+ DeclareTmpHeapNoproc(lhp,3);
ErtsProcLocks rp_locks = (ERTS_PROC_LOCK_LINK
| ERTS_PROC_LOCKS_MSG_SEND);
+ UseTmpHeapNoproc(3);
rp = erts_pid2proc(NULL, 0, mon->pid, rp_locks);
if (rp == NULL) {
goto done;
@@ -7795,6 +8862,7 @@ static void doit_exit_monitor(ErtsMonitor *mon, void *vpcontext)
erts_queue_monitor_message(rp, &rp_locks, mon->ref, am_process,
watched, pcontext->reason);
}
+ UnUseTmpHeapNoproc(3);
/* else: demonitor while we exited, i.e. do nothing... */
erts_smp_proc_unlock(rp, rp_locks);
} else { /* external by pid or name */
@@ -8025,8 +9093,13 @@ erts_do_exit_process(Process* p, Eterm reason)
ERTS_SMP_MSGQ_MV_INQ2PRIVQ(p);
#endif
- if (IS_TRACED_FL(p,F_TRACE_PROCS))
- trace_proc(p, p, am_exit, reason);
+ if (IS_TRACED(p)) {
+ if (IS_TRACED_FL(p, F_TRACE_CALLS))
+ erts_schedule_time_break(p, ERTS_BP_CALL_TIME_SCHEDULE_EXITING);
+
+ if (IS_TRACED_FL(p,F_TRACE_PROCS))
+ trace_proc(p, p, am_exit, reason);
+ }
erts_trace_check_exiting(p->id);
@@ -8075,6 +9148,8 @@ continue_exit_process(Process *p
Eterm reason = p->fvalue;
DistEntry *dep;
struct saved_calls *scb;
+ process_breakpoint_time_t *pbt;
+
#ifdef DEBUG
int yield_allowed = 1;
#endif
@@ -8214,6 +9289,7 @@ continue_exit_process(Process *p
? ERTS_PROC_SET_DIST_ENTRY(p, ERTS_PROC_LOCKS_ALL, NULL)
: NULL);
scb = ERTS_PROC_SET_SAVED_CALLS_BUF(p, ERTS_PROC_LOCKS_ALL, NULL);
+ pbt = ERTS_PROC_SET_CALL_TIME(p, ERTS_PROC_LOCKS_ALL, NULL);
erts_smp_proc_unlock(p, ERTS_PROC_LOCKS_ALL);
processes_busy--;
@@ -8228,11 +9304,12 @@ continue_exit_process(Process *p
* Pre-build the EXIT tuple if there are any links.
*/
if (lnk) {
- Eterm tmp_heap[4];
+ DeclareTmpHeap(tmp_heap,4,p);
Eterm exit_tuple;
Uint exit_tuple_sz;
Eterm* hp;
+ UseTmpHeap(4,p);
hp = &tmp_heap[0];
exit_tuple = TUPLE3(hp, am_EXIT, p->id, reason);
@@ -8243,16 +9320,21 @@ continue_exit_process(Process *p
ExitLinkContext context = {p, reason, exit_tuple, exit_tuple_sz};
erts_sweep_links(lnk, &doit_exit_link, &context);
}
+ UnUseTmpHeap(4,p);
}
{
ExitMonitorContext context = {reason, p};
- erts_sweep_monitors(mon,&doit_exit_monitor,&context);
+ erts_sweep_monitors(mon,&doit_exit_monitor,&context); /* Allocates TmpHeap, but we
+ have none here */
}
if (scb)
erts_free(ERTS_ALC_T_CALLS_BUF, (void *) scb);
+ if (pbt)
+ erts_free(ERTS_ALC_T_BPD, (void *) pbt);
+
delete_process(p);
erts_smp_proc_lock(p, ERTS_PROC_LOCK_MAIN);
@@ -8271,7 +9353,7 @@ continue_exit_process(Process *p
ASSERT(p->status == P_EXITING);
- p->i = (Eterm *) beam_continue_exit;
+ p->i = (BeamInstr *) beam_continue_exit;
if (!(curr_locks & ERTS_PROC_LOCK_STATUS)) {
erts_smp_proc_lock(p, ERTS_PROC_LOCK_STATUS);
@@ -8291,7 +9373,7 @@ continue_exit_process(Process *p
static void
timeout_proc(Process* p)
{
- p->i = (Eterm *) p->def_arg_reg[0];
+ p->i = *((BeamInstr **) (UWord) p->def_arg_reg);
p->flags |= F_TIMO;
p->flags &= ~F_INSLPQUEUE;
@@ -8390,9 +9472,9 @@ erts_program_counter_info(int to, void *to_arg, Process *p)
}
static void
-print_function_from_pc(int to, void *to_arg, Eterm* x)
+print_function_from_pc(int to, void *to_arg, BeamInstr* x)
{
- Eterm* addr = find_function_from_pc(x);
+ BeamInstr* addr = find_function_from_pc(x);
if (addr == NULL) {
if (x == beam_exit) {
erts_print(to, to_arg, "<terminate process>");
@@ -8426,7 +9508,7 @@ stack_element_dump(int to, void *to_arg, Process* p, Eterm* sp, int yreg)
}
if (is_CP(x)) {
- erts_print(to, to_arg, "Return addr %p (", (Eterm *) x);
+ erts_print(to, to_arg, "Return addr %p (", (Eterm *) EXPAND_POINTER(x));
print_function_from_pc(to, to_arg, cp_val(x));
erts_print(to, to_arg, ")\n");
yreg = 0;
@@ -9255,8 +10337,8 @@ init_processes_bif(void)
processes_trap_export.code[0] = am_erlang;
processes_trap_export.code[1] = am_processes_trap;
processes_trap_export.code[2] = 2;
- processes_trap_export.code[3] = (Eterm) em_apply_bif;
- processes_trap_export.code[4] = (Eterm) &processes_trap;
+ processes_trap_export.code[3] = (BeamInstr) em_apply_bif;
+ processes_trap_export.code[4] = (BeamInstr) &processes_trap;
#if ERTS_PROCESSES_BIF_DEBUGLEVEL >= ERTS_PROCS_DBGLVL_CHK_TERM_PROC_LIST
erts_get_emu_time(&debug_tv_start);
diff --git a/erts/emulator/beam/erl_process.h b/erts/emulator/beam/erl_process.h
index f58b6932b3..e49710a7ed 100644
--- a/erts/emulator/beam/erl_process.h
+++ b/erts/emulator/beam/erl_process.h
@@ -89,6 +89,7 @@ extern int erts_sched_thread_suggested_stack_size;
#define ERTS_SCHED_THREAD_MAX_STACK_SIZE 8192 /* Kilo words */
#ifdef ERTS_SMP
+extern Uint erts_max_main_threads;
#include "erl_bits.h"
#endif
@@ -179,7 +180,7 @@ extern int erts_sched_thread_suggested_stack_size;
#ifdef DEBUG
-# ifdef ARCH_64
+# if defined(ARCH_64) && !HALFWORD_HEAP
# define ERTS_DBG_SET_INVALID_RUNQP(RQP, N) \
(*((char **) &(RQP)) = (char *) (0xdeadbeefdead0003 | ((N) << 4)))
# define ERTS_DBG_VERIFY_VALID_RUNQP(RQP) \
@@ -194,8 +195,8 @@ do { \
# define ERTS_DBG_VERIFY_VALID_RUNQP(RQP) \
do { \
ASSERT((RQP) != NULL); \
- ASSERT(((((Uint) (RQP)) & ((Uint) 1))) == ((Uint) 0)); \
- ASSERT((((Uint) (RQP)) & ~((Uint) 0xffff)) != ((Uint) 0xdead0000)); \
+ ASSERT(((((UWord) (RQP)) & ((UWord) 1))) == ((UWord) 0)); \
+ ASSERT((((UWord) (RQP)) & ~((UWord) 0xffff)) != ((UWord) 0xdead0000)); \
} while (0)
# endif
#else
@@ -219,6 +220,51 @@ typedef enum {
ERTS_MIGRATE_FAILED_RUNQ_SUSPENDED
} ErtsMigrateResult;
+#define ERTS_SSI_FLG_SLEEPING (((long) 1) << 0)
+#define ERTS_SSI_FLG_POLL_SLEEPING (((long) 1) << 1)
+#define ERTS_SSI_FLG_TSE_SLEEPING (((long) 1) << 2)
+#define ERTS_SSI_FLG_WAITING (((long) 1) << 3)
+#define ERTS_SSI_FLG_SUSPENDED (((long) 1) << 4)
+
+#define ERTS_SSI_FLGS_SLEEP_TYPE \
+ (ERTS_SSI_FLG_TSE_SLEEPING|ERTS_SSI_FLG_POLL_SLEEPING)
+
+#define ERTS_SSI_FLGS_SLEEP \
+ (ERTS_SSI_FLG_SLEEPING|ERTS_SSI_FLGS_SLEEP_TYPE)
+
+#define ERTS_SSI_FLGS_ALL \
+ (ERTS_SSI_FLGS_SLEEP \
+ | ERTS_SSI_FLG_WAITING \
+ | ERTS_SSI_FLG_SUSPENDED)
+
+
+#if !defined(ERTS_SCHED_NEED_BLOCKABLE_AUX_WORK) \
+ && defined(ERTS_SMP_SCHEDULERS_NEED_TO_CHECK_CHILDREN)
+#define ERTS_SCHED_NEED_BLOCKABLE_AUX_WORK
+#endif
+
+#define ERTS_SSI_AUX_WORK_CHECK_CHILDREN (((long) 1) << 0)
+
+#define ERTS_SSI_BLOCKABLE_AUX_WORK_MASK \
+ (ERTS_SSI_AUX_WORK_CHECK_CHILDREN)
+#define ERTS_SSI_NONBLOCKABLE_AUX_WORK_MASK \
+ (0)
+
+typedef struct ErtsSchedulerSleepInfo_ ErtsSchedulerSleepInfo;
+
+typedef struct {
+ erts_smp_spinlock_t lock;
+ ErtsSchedulerSleepInfo *list;
+} ErtsSchedulerSleepList;
+
+struct ErtsSchedulerSleepInfo_ {
+ ErtsSchedulerSleepInfo *next;
+ ErtsSchedulerSleepInfo *prev;
+ erts_smp_atomic_t flags;
+ erts_tse_t *event;
+ erts_smp_atomic_t aux_work;
+};
+
/* times to reschedule low prio process before running */
#define RESCHEDULE_LOW 8
@@ -271,8 +317,9 @@ struct ErtsRunQueue_ {
erts_smp_mtx_t mtx;
erts_smp_cnd_t cnd;
- erts_smp_atomic_t spin_waiter;
- erts_smp_atomic_t spin_wake;
+#ifdef ERTS_SMP
+ ErtsSchedulerSleepList sleepers;
+#endif
ErtsSchedulerData *scheduler;
int waiting; /* < 0 in sys schedule; > 0 on cnd variable */
@@ -344,13 +391,25 @@ struct ErtsSchedulerData_ {
* numbered registers as possible in the same cache
* line).
*/
+#if !HALFWORD_HEAP
Eterm save_reg[ERTS_X_REGS_ALLOCATED]; /* X registers */
+#else
+ Eterm *save_reg;
+#endif
FloatDef freg[MAX_REG]; /* Floating point registers. */
ethr_tid tid; /* Thread id */
struct erl_bits_state erl_bits_state; /* erl_bits.c state */
void *match_pseudo_process; /* erl_db_util.c:db_prog_match() */
+ ErtsSchedulerSleepInfo *ssi;
Process *free_process;
#endif
+#if !HEAP_ON_C_STACK
+ Eterm tmp_heap[TMP_HEAP_SIZE];
+ int num_tmp_heap_used;
+ Eterm beam_emu_tmp_heap[BEAM_EMU_TMP_HEAP_SIZE];
+ Eterm cmp_tmp_heap[CMP_TMP_HEAP_SIZE];
+ Eterm erl_arith_tmp_heap[ERL_ARITH_TMP_HEAP_SIZE];
+#endif
Process *current_process;
Uint no; /* Scheduler number */
@@ -363,11 +422,6 @@ struct ErtsSchedulerData_ {
#ifdef ERTS_SMP
/* NOTE: These fields are modified under held mutexes by other threads */
-#ifdef ERTS_SMP_SCHEDULERS_NEED_TO_CHECK_CHILDREN
- int check_children; /* run queue mutex */
- int blocked_check_children; /* schdlr_sspnd mutex */
-#endif
- erts_smp_atomic_t suspended; /* Only used when common run queue */
erts_smp_atomic_t chk_cpu_bind; /* Only used when common run queue */
#endif
};
@@ -386,8 +440,9 @@ extern ErtsSchedulerData *erts_scheduler_data;
#define ERTS_PSD_SAVED_CALLS_BUF 1
#define ERTS_PSD_SCHED_ID 2
#define ERTS_PSD_DIST_ENTRY 3
+#define ERTS_PSD_CALL_TIME_BP 4
-#define ERTS_PSD_SIZE 4
+#define ERTS_PSD_SIZE 5
typedef struct {
void *data[ERTS_PSD_SIZE];
@@ -408,6 +463,9 @@ typedef struct {
#define ERTS_PSD_DIST_ENTRY_GET_LOCKS ERTS_PROC_LOCK_MAIN
#define ERTS_PSD_DIST_ENTRY_SET_LOCKS ERTS_PROC_LOCK_MAIN
+#define ERTS_PSD_CALL_TIME_BP_GET_LOCKS ERTS_PROC_LOCK_MAIN
+#define ERTS_PSD_CALL_TIME_BP_SET_LOCKS ERTS_PROC_LOCK_MAIN
+
typedef struct {
ErtsProcLocks get_locks;
ErtsProcLocks set_locks;
@@ -479,6 +537,7 @@ struct ErtsPendingSuspend_ {
# define MIN_VHEAP_SIZE(p) (p)->min_vheap_size
# define BIN_VHEAP_SZ(p) (p)->bin_vheap_sz
+# define BIN_VHEAP_MATURE(p) (p)->bin_vheap_mature
# define BIN_OLD_VHEAP_SZ(p) (p)->bin_old_vheap_sz
# define BIN_OLD_VHEAP(p) (p)->bin_old_vheap
@@ -518,8 +577,8 @@ struct process {
unsigned max_arg_reg; /* Maximum number of argument registers available. */
Eterm def_arg_reg[6]; /* Default array for argument registers. */
- Eterm* cp; /* Continuation pointer (for threaded code). */
- Eterm* i; /* Program counter for threaded code. */
+ BeamInstr* cp; /* (untagged) Continuation pointer (for threaded code). */
+ BeamInstr* i; /* Program counter for threaded code. */
Sint catches; /* Number of catches on stack */
Sint fcalls; /*
* Number of reductions left to execute.
@@ -566,11 +625,12 @@ struct process {
Uint seq_trace_lastcnt;
Eterm seq_trace_token; /* Sequential trace token (tuple size 5 see below) */
- Eterm initial[3]; /* Initial module(0), function(1), arity(2) */
- Eterm* current; /* Current Erlang function:
+ BeamInstr initial[3]; /* Initial module(0), function(1), arity(2), often used instead
+ of pointer to funcinfo instruction, hence the BeamInstr datatype */
+ BeamInstr* current; /* Current Erlang function, part of the funcinfo:
* module(0), function(1), arity(2)
* (module and functions are tagged atoms;
- * arity an untagged integer).
+ * arity an untagged integer). BeamInstr * because it references code
*/
/*
@@ -595,9 +655,10 @@ struct process {
Uint mbuf_sz; /* Size of all message buffers */
ErtsPSD *psd; /* Rarely used process specific data */
- Uint bin_vheap_sz; /* Virtual heap block size for binaries */
- Uint bin_old_vheap_sz; /* Virtual old heap block size for binaries */
- Uint bin_old_vheap; /* Virtual old heap size for binaries */
+ Uint64 bin_vheap_sz; /* Virtual heap block size for binaries */
+ Uint64 bin_vheap_mature; /* Virtual heap block size for binaries */
+ Uint64 bin_old_vheap_sz; /* Virtual old heap block size for binaries */
+ Uint64 bin_old_vheap; /* Virtual old heap size for binaries */
union {
#ifdef ERTS_SMP
@@ -753,7 +814,7 @@ ERTS_GLB_INLINE void erts_heap_frag_shrink(Process* p, Eterm* hp)
{
ErlHeapFragment* hf = MBUF(p);
- ASSERT(hf!=NULL && (hp - hf->mem < (unsigned long)hf->size));
+ ASSERT(hf!=NULL && (hp - hf->mem < (unsigned long)hf->alloc_size));
hf->used_size = hp - hf->mem;
}
@@ -812,7 +873,7 @@ extern struct erts_system_profile_flags_t erts_system_profile_flags;
#define F_INSLPQUEUE (1 << 1) /* Set if in timer queue */
#define F_TIMO (1 << 2) /* Set if timeout */
#define F_HEAP_GROW (1 << 3)
-#define F_NEED_FULLSWEEP (1 << 4) /* If process has old binaries & funs. */
+#define F_NEED_FULLSWEEP (1 << 4)
#define F_USING_DB (1 << 5) /* If have created tables */
#define F_DISTRIBUTION (1 << 6) /* Process used in distribution */
#define F_USING_DDLL (1 << 7) /* Process has used the DDLL interface */
@@ -966,6 +1027,7 @@ int erts_init_scheduler_bind_type(char *how);
#define ERTS_INIT_CPU_TOPOLOGY_MISSING 9
int erts_init_cpu_topology(char *topology_str);
+int erts_update_cpu_info(void);
void erts_pre_init_process(void);
void erts_late_init_process(void);
@@ -1069,6 +1131,9 @@ void erts_handle_pending_exit(Process *, ErtsProcLocks);
void erts_deep_process_dump(int, void *);
+Eterm erts_get_reader_groups_map(Process *c_p);
+Eterm erts_debug_reader_groups_map(Process *c_p, int groups);
+
Sint erts_test_next_pid(int, Uint);
Eterm erts_debug_processes(Process *c_p);
Eterm erts_debug_processes_bif_info(Process *c_p);
@@ -1184,7 +1249,7 @@ erts_psd_set(Process *p, ErtsProcLocks plocks, int ix, void *data)
#endif
#define ERTS_PROC_SCHED_ID(P, L, ID) \
- ((Uint) erts_psd_set((P), (L), ERTS_PSD_SCHED_ID, (void *) (ID)))
+ ((UWord) erts_psd_set((P), (L), ERTS_PSD_SCHED_ID, (void *) (ID)))
#define ERTS_PROC_GET_DIST_ENTRY(P) \
((DistEntry *) erts_psd_get((P), ERTS_PSD_DIST_ENTRY))
@@ -1196,6 +1261,12 @@ erts_psd_set(Process *p, ErtsProcLocks plocks, int ix, void *data)
#define ERTS_PROC_SET_SAVED_CALLS_BUF(P, L, SCB) \
((struct saved_calls *) erts_psd_set((P), (L), ERTS_PSD_SAVED_CALLS_BUF, (void *) (SCB)))
+#define ERTS_PROC_GET_CALL_TIME(P) \
+ ((process_breakpoint_time_t *) erts_psd_get((P), ERTS_PSD_CALL_TIME_BP))
+#define ERTS_PROC_SET_CALL_TIME(P, L, PBT) \
+ ((process_breakpoint_time_t *) erts_psd_set((P), (L), ERTS_PSD_CALL_TIME_BP, (void *) (PBT)))
+
+
ERTS_GLB_INLINE Eterm erts_proc_get_error_handler(Process *p);
ERTS_GLB_INLINE Eterm erts_proc_set_error_handler(Process *p,
ErtsProcLocks plocks,
@@ -1209,8 +1280,8 @@ erts_proc_get_error_handler(Process *p)
if (!val)
return am_error_handler;
else {
- ASSERT(is_atom(((Eterm) val)));
- return (Eterm) val;
+ ASSERT(is_atom(((Eterm) (UWord) val)));
+ return (Eterm) (UWord) val;
}
}
@@ -1220,13 +1291,13 @@ erts_proc_set_error_handler(Process *p, ErtsProcLocks plocks, Eterm handler)
void *old_val;
void *new_val;
ASSERT(is_atom(handler));
- new_val = handler == am_error_handler ? NULL : (void *) handler;
+ new_val = (handler == am_error_handler) ? NULL : (void *) (UWord) handler;
old_val = erts_psd_set(p, plocks, ERTS_PSD_ERROR_HANDLER, new_val);
if (!old_val)
return am_error_handler;
else {
- ASSERT(is_atom(((Eterm) old_val)));
- return (Eterm) old_val;
+ ASSERT(is_atom(((Eterm) (UWord) old_val)));
+ return (Eterm) (UWord) old_val;
}
}
@@ -1487,29 +1558,30 @@ extern int erts_disable_proc_not_running_opt;
#define ERTS_MIN_PROCESSES 16
#endif
-#ifdef ERTS_INCLUDE_SCHEDULER_INTERNALS
-ERTS_GLB_INLINE void erts_smp_notify_inc_runq(ErtsRunQueue *runq);
-void erts_smp_notify_inc_runq__(ErtsRunQueue *runq);
-#endif /* ERTS_INCLUDE_SCHEDULER_INTERNALS */
+void erts_smp_notify_inc_runq(ErtsRunQueue *runq);
-#if ERTS_GLB_INLINE_INCL_FUNC_DEF
+#ifdef ERTS_SMP
+void erts_sched_finish_poke(ErtsSchedulerSleepInfo *, long);
+ERTS_GLB_INLINE void erts_sched_poke(ErtsSchedulerSleepInfo *ssi);
-#ifdef ERTS_INCLUDE_SCHEDULER_INTERNALS
+#if ERTS_GLB_INLINE_INCL_FUNC_DEF
ERTS_GLB_INLINE void
-erts_smp_notify_inc_runq(ErtsRunQueue *runq)
+erts_sched_poke(ErtsSchedulerSleepInfo *ssi)
{
-#ifdef ERTS_SMP
- ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(runq));
- if (runq->waiting)
- erts_smp_notify_inc_runq__(runq);
-#endif
+ long flags = erts_smp_atomic_read(&ssi->flags);
+ ASSERT(!(flags & ERTS_SSI_FLG_SLEEPING)
+ || (flags & ERTS_SSI_FLG_WAITING));
+ if (flags & ERTS_SSI_FLG_SLEEPING) {
+ flags = erts_smp_atomic_band(&ssi->flags, ~ERTS_SSI_FLGS_SLEEP);
+ erts_sched_finish_poke(ssi, flags);
+ }
}
-#endif /* ERTS_INCLUDE_SCHEDULER_INTERNALS */
-
#endif /* #if ERTS_GLB_INLINE_INCL_FUNC_DEF */
+#endif /* #ifdef ERTS_SMP */
+
#include "erl_process_lock.h"
#undef ERTS_INCLUDE_SCHEDULER_INTERNALS
diff --git a/erts/emulator/beam/erl_process_dump.c b/erts/emulator/beam/erl_process_dump.c
index 1666509c72..7a7042abe4 100644
--- a/erts/emulator/beam/erl_process_dump.c
+++ b/erts/emulator/beam/erl_process_dump.c
@@ -1,19 +1,19 @@
/*
* %CopyrightBegin%
- *
- * Copyright Ericsson AB 2003-2009. All Rights Reserved.
- *
+ *
+ * Copyright Ericsson AB 2003-2010. All Rights Reserved.
+ *
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
* compliance with the License. You should have received a copy of the
* Erlang Public License along with this software. If not, it can be
* retrieved online at http://www.erlang.org/.
- *
+ *
* Software distributed under the License is distributed on an "AS IS"
* basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
* the License for the specific language governing rights and limitations
* under the License.
- *
+ *
* %CopyrightEnd%
*/
@@ -45,16 +45,16 @@ static void dump_dist_ext(int to, void *to_arg, ErtsDistExternal *edep);
static void dump_element_nl(int to, void *to_arg, Eterm x);
static int stack_element_dump(int to, void *to_arg, Process* p, Eterm* sp,
int yreg);
-static void print_function_from_pc(int to, void *to_arg, Eterm* x);
+static void print_function_from_pc(int to, void *to_arg, BeamInstr* x);
static void heap_dump(int to, void *to_arg, Eterm x);
static void dump_binaries(int to, void *to_arg, Binary* root);
static void dump_externally(int to, void *to_arg, Eterm term);
static Binary* all_binaries;
-extern Eterm beam_apply[];
-extern Eterm beam_exit[];
-extern Eterm beam_continue_exit[];
+extern BeamInstr beam_apply[];
+extern BeamInstr beam_exit[];
+extern BeamInstr beam_continue_exit[];
void
@@ -223,7 +223,7 @@ stack_element_dump(int to, void *to_arg, Process* p, Eterm* sp, int yreg)
}
if (is_CP(x)) {
- erts_print(to, to_arg, "SReturn addr 0x%X (", (Eterm *) x);
+ erts_print(to, to_arg, "SReturn addr 0x%X (", cp_val(x));
print_function_from_pc(to, to_arg, cp_val(x));
erts_print(to, to_arg, ")\n");
yreg = 0;
@@ -239,9 +239,9 @@ stack_element_dump(int to, void *to_arg, Process* p, Eterm* sp, int yreg)
}
static void
-print_function_from_pc(int to, void *to_arg, Eterm* x)
+print_function_from_pc(int to, void *to_arg, BeamInstr* x)
{
- Eterm* addr = find_function_from_pc(x);
+ BeamInstr* addr = find_function_from_pc(x);
if (addr == NULL) {
if (x == beam_exit) {
erts_print(to, to_arg, "<terminate process>");
@@ -273,7 +273,7 @@ heap_dump(int to, void *to_arg, Eterm x)
if (x == OUR_NIL) { /* We are done. */
return;
} if (is_CP(x)) {
- next = (Eterm *) x;
+ next = (Eterm *) EXPAND_POINTER(x);
} else if (is_list(x)) {
ptr = list_val(x);
if (ptr[0] != OUR_NIL) {
@@ -286,7 +286,7 @@ heap_dump(int to, void *to_arg, Eterm x)
ptr[1] = make_small(0);
}
x = ptr[0];
- ptr[0] = (Eterm) next;
+ ptr[0] = (Eterm) COMPRESS_POINTER(next);
next = ptr + 1;
goto again;
}
@@ -316,7 +316,7 @@ heap_dump(int to, void *to_arg, Eterm x)
ptr[0] = OUR_NIL;
} else {
x = ptr[arity];
- ptr[0] = (Eterm) next;
+ ptr[0] = (Eterm) COMPRESS_POINTER(next);
next = ptr + arity - 1;
goto again;
}
@@ -351,7 +351,7 @@ heap_dump(int to, void *to_arg, Eterm x)
Binary* val = pb->val;
if (erts_smp_atomic_xchg(&val->refc, 0) != 0) {
- val->flags = (Uint) all_binaries;
+ val->flags = (UWord) all_binaries;
all_binaries = val;
}
erts_print(to, to_arg, "Yc%X:%X:%X", val,
diff --git a/erts/emulator/beam/erl_process_lock.c b/erts/emulator/beam/erl_process_lock.c
index 52440fb635..a4d12139e9 100644
--- a/erts/emulator/beam/erl_process_lock.c
+++ b/erts/emulator/beam/erl_process_lock.c
@@ -1,19 +1,19 @@
/*
* %CopyrightBegin%
- *
- * Copyright Ericsson AB 2007-2009. All Rights Reserved.
- *
+ *
+ * Copyright Ericsson AB 2007-2010. All Rights Reserved.
+ *
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
* compliance with the License. You should have received a copy of the
* Erlang Public License along with this software. If not, it can be
* retrieved online at http://www.erlang.org/.
- *
+ *
* Software distributed under the License is distributed on an "AS IS"
* basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
* the License for the specific language governing rights and limitations
* under the License.
- *
+ *
* %CopyrightEnd%
*/
@@ -71,9 +71,12 @@ const Process erts_proc_lock_busy;
#ifdef ERTS_SMP
-/*#define ERTS_PROC_LOCK_SPIN_ON_GATE*/
-#define ERTS_PROC_LOCK_SPIN_COUNT_MAX 16000
+#define ERTS_PROC_LOCK_SPIN_COUNT_MAX 2000
+#define ERTS_PROC_LOCK_SPIN_COUNT_SCHED_INC 32
#define ERTS_PROC_LOCK_SPIN_COUNT_BASE 1000
+#define ERTS_PROC_LOCK_AUX_SPIN_COUNT 50
+
+#define ERTS_PROC_LOCK_SPIN_UNTIL_YIELD 25
#ifdef ERTS_PROC_LOCK_DEBUG
#define ERTS_PROC_LOCK_HARD_DEBUG
@@ -83,32 +86,19 @@ const Process erts_proc_lock_busy;
static void check_queue(erts_proc_lock_t *lck);
#endif
-
-typedef struct erts_proc_lock_waiter_t_ erts_proc_lock_waiter_t;
-struct erts_proc_lock_waiter_t_ {
- erts_proc_lock_waiter_t *next;
- erts_proc_lock_waiter_t *prev;
- ErtsProcLocks wait_locks;
- erts_smp_gate_t gate;
- erts_proc_lock_queues_t *queues;
-};
+#if SIZEOF_INT < 4
+#error "The size of the 'uflgs' field of the erts_tse_t type is too small"
+#endif
struct erts_proc_lock_queues_t_ {
erts_proc_lock_queues_t *next;
- erts_proc_lock_waiter_t *queue[ERTS_PROC_LOCK_MAX_BIT+1];
-};
-
-struct erts_proc_lock_thr_spec_data_t_ {
- erts_proc_lock_queues_t *qs;
- erts_proc_lock_waiter_t *wtr;
+ erts_tse_t *queue[ERTS_PROC_LOCK_MAX_BIT+1];
};
static erts_proc_lock_queues_t zeroqs = {0};
-static erts_smp_spinlock_t wtr_lock;
-static erts_proc_lock_waiter_t *waiter_free_list;
+static erts_smp_spinlock_t qs_lock;
static erts_proc_lock_queues_t *queue_free_list;
-static erts_tsd_key_t waiter_key;
#ifdef ERTS_ENABLE_LOCK_CHECK
static struct {
@@ -122,35 +112,26 @@ static struct {
erts_pix_lock_t erts_pix_locks[ERTS_NO_OF_PIX_LOCKS];
static int proc_lock_spin_count;
-static int proc_lock_trans_spin_cost;
+static int aux_thr_proc_lock_spin_count;
-static void cleanup_waiter(void);
+static void cleanup_tse(void);
void
erts_init_proc_lock(void)
{
int i;
int cpus;
- erts_smp_spinlock_init(&wtr_lock, "proc_lck_wtr_alloc");
+ erts_smp_spinlock_init(&qs_lock, "proc_lck_qs_alloc");
for (i = 0; i < ERTS_NO_OF_PIX_LOCKS; i++) {
-#if ERTS_PROC_LOCK_MUTEX_IMPL
-#ifdef ERTS_ENABLE_LOCK_COUNT
- erts_smp_mtx_init_x(&erts_pix_locks[i].u.mtx, "pix_lock", make_small(i));
-#else
- erts_smp_mtx_init(&erts_pix_locks[i].u.mtx, "pix_lock");
-#endif
-#else
#ifdef ERTS_ENABLE_LOCK_COUNT
- erts_smp_spinlock_init_x(&erts_pix_locks[i].u.spnlck, "pix_lock", make_small(i));
+ erts_smp_spinlock_init_x(&erts_pix_locks[i].u.spnlck,
+ "pix_lock", make_small(i));
#else
erts_smp_spinlock_init(&erts_pix_locks[i].u.spnlck, "pix_lock");
#endif
-#endif
}
- waiter_free_list = NULL;
queue_free_list = NULL;
- erts_tsd_key_create(&waiter_key);
- erts_thr_install_exit_handler(cleanup_waiter);
+ erts_thr_install_exit_handler(cleanup_tse);
#ifdef ERTS_ENABLE_LOCK_CHECK
lc_id.proc_lock_main = erts_lc_get_lock_order_id("proc_main");
lc_id.proc_lock_link = erts_lc_get_lock_order_id("proc_link");
@@ -158,86 +139,106 @@ erts_init_proc_lock(void)
lc_id.proc_lock_status = erts_lc_get_lock_order_id("proc_status");
#endif
cpus = erts_get_cpu_configured(erts_cpuinfo);
- if (cpus > 1)
- proc_lock_spin_count = (ERTS_PROC_LOCK_SPIN_COUNT_BASE
- * ((int) erts_no_schedulers));
- else if (cpus == 1)
- proc_lock_spin_count = 0;
- else /* No of cpus unknown. Assume multi proc, but be conservative. */
+ if (cpus > 1) {
proc_lock_spin_count = ERTS_PROC_LOCK_SPIN_COUNT_BASE;
- if (proc_lock_spin_count > ERTS_PROC_LOCK_SPIN_COUNT_MAX)
- proc_lock_spin_count = ERTS_PROC_LOCK_SPIN_COUNT_MAX;
- proc_lock_trans_spin_cost = proc_lock_spin_count/20;
-}
-
-static ERTS_INLINE erts_proc_lock_waiter_t *
-alloc_wtr(void)
-{
- erts_proc_lock_waiter_t *wtr;
- erts_smp_spin_lock(&wtr_lock);
- wtr = waiter_free_list;
- if (wtr) {
- waiter_free_list = wtr->next;
- ERTS_LC_ASSERT(queue_free_list);
- wtr->queues = queue_free_list;
- queue_free_list = wtr->queues->next;
- erts_smp_spin_unlock(&wtr_lock);
+ proc_lock_spin_count += (ERTS_PROC_LOCK_SPIN_COUNT_SCHED_INC
+ * ((int) erts_no_schedulers));
+ aux_thr_proc_lock_spin_count = ERTS_PROC_LOCK_AUX_SPIN_COUNT;
}
- else {
- erts_smp_spin_unlock(&wtr_lock);
- wtr = erts_alloc(ERTS_ALC_T_PROC_LCK_WTR,
- sizeof(erts_proc_lock_waiter_t));
- erts_smp_gate_init(&wtr->gate);
- wtr->wait_locks = (ErtsProcLocks) 0;
- wtr->queues = erts_alloc(ERTS_ALC_T_PROC_LCK_QS,
- sizeof(erts_proc_lock_queues_t));
- sys_memcpy((void *) wtr->queues,
- (void *) &zeroqs,
- sizeof(erts_proc_lock_queues_t));
+ else if (cpus == 1) {
+ proc_lock_spin_count = 0;
+ aux_thr_proc_lock_spin_count = 0;
}
- return wtr;
+ else { /* No of cpus unknown. Assume multi proc, but be conservative. */
+ proc_lock_spin_count = ERTS_PROC_LOCK_SPIN_COUNT_BASE/2;
+ aux_thr_proc_lock_spin_count = ERTS_PROC_LOCK_AUX_SPIN_COUNT/2;
+ }
+ if (proc_lock_spin_count > ERTS_PROC_LOCK_SPIN_COUNT_MAX)
+ proc_lock_spin_count = ERTS_PROC_LOCK_SPIN_COUNT_MAX;
}
#ifdef ERTS_ENABLE_LOCK_CHECK
static void
-check_unused_waiter(erts_proc_lock_waiter_t *wtr)
+check_unused_tse(erts_tse_t *wtr)
{
int i;
- ERTS_LC_ASSERT(wtr->wait_locks == 0);
+ erts_proc_lock_queues_t *queues = wtr->udata;
+ ERTS_LC_ASSERT(wtr->uflgs == 0);
for (i = 0; i <= ERTS_PROC_LOCK_MAX_BIT; i++)
- ERTS_LC_ASSERT(!wtr->queues->queue[i]);
+ ERTS_LC_ASSERT(!queues->queue[i]);
}
-#define CHECK_UNUSED_WAITER(W) check_unused_waiter((W))
+#define CHECK_UNUSED_TSE(W) check_unused_tse((W))
#else
-#define CHECK_UNUSED_WAITER(W)
+#define CHECK_UNUSED_TSE(W)
#endif
+static ERTS_INLINE erts_tse_t *
+tse_fetch(erts_pix_lock_t *pix_lock)
+{
+ erts_tse_t *tse = erts_tse_fetch();
+ if (!tse->udata) {
+ erts_proc_lock_queues_t *qs;
+#if ERTS_PROC_LOCK_SPINLOCK_IMPL && !ERTS_PROC_LOCK_ATOMIC_IMPL
+ if (pix_lock)
+ erts_pix_unlock(pix_lock);
+#endif
+ erts_smp_spin_lock(&qs_lock);
+ qs = queue_free_list;
+ if (qs) {
+ queue_free_list = queue_free_list->next;
+ erts_smp_spin_unlock(&qs_lock);
+ }
+ else {
+ erts_smp_spin_unlock(&qs_lock);
+ qs = erts_alloc(ERTS_ALC_T_PROC_LCK_QS,
+ sizeof(erts_proc_lock_queues_t));
+ sys_memcpy((void *) qs,
+ (void *) &zeroqs,
+ sizeof(erts_proc_lock_queues_t));
+ }
+ tse->udata = qs;
+#if ERTS_PROC_LOCK_SPINLOCK_IMPL && !ERTS_PROC_LOCK_ATOMIC_IMPL
+ if (pix_lock)
+ erts_pix_lock(pix_lock);
+#endif
+ }
+ tse->uflgs = 0;
+ return tse;
+}
static ERTS_INLINE void
-free_wtr(erts_proc_lock_waiter_t *wtr)
+tse_return(erts_tse_t *tse, int force_free_q)
{
- CHECK_UNUSED_WAITER(wtr);
- erts_smp_spin_lock(&wtr_lock);
- wtr->next = waiter_free_list;
- waiter_free_list = wtr;
- wtr->queues->next = queue_free_list;
- queue_free_list = wtr->queues;
- erts_smp_spin_unlock(&wtr_lock);
+ CHECK_UNUSED_TSE(tse);
+ if (force_free_q || erts_tse_is_tmp(tse)) {
+ erts_proc_lock_queues_t *qs = tse->udata;
+ ASSERT(qs);
+ erts_smp_spin_lock(&qs_lock);
+ qs->next = queue_free_list;
+ queue_free_list = qs;
+ erts_smp_spin_unlock(&qs_lock);
+ tse->udata = NULL;
+ }
+ erts_tse_return(tse);
}
void
erts_proc_lock_prepare_proc_lock_waiter(void)
{
- erts_tsd_set(waiter_key, (void *) alloc_wtr());
+ tse_return(tse_fetch(NULL), 0);
}
static void
-cleanup_waiter(void)
+cleanup_tse(void)
{
- erts_proc_lock_waiter_t *wtr = erts_tsd_get(waiter_key);
- if (wtr)
- free_wtr(wtr);
+ erts_tse_t *tse = erts_tse_fetch();
+ if (tse) {
+ if (tse->udata)
+ tse_return(tse, 1);
+ else
+ erts_tse_return(tse);
+ }
}
@@ -250,7 +251,7 @@ cleanup_waiter(void)
static ERTS_INLINE void
enqueue_waiter(erts_proc_lock_queues_t *qs,
int ix,
- erts_proc_lock_waiter_t *wtr)
+ erts_tse_t *wtr)
{
if (!qs->queue[ix]) {
qs->queue[ix] = wtr;
@@ -266,10 +267,10 @@ enqueue_waiter(erts_proc_lock_queues_t *qs,
}
}
-static erts_proc_lock_waiter_t *
+static erts_tse_t *
dequeue_waiter(erts_proc_lock_queues_t *qs, int ix)
{
- erts_proc_lock_waiter_t *wtr = qs->queue[ix];
+ erts_tse_t *wtr = qs->queue[ix];
ERTS_LC_ASSERT(qs->queue[ix]);
if (wtr->next == wtr) {
ERTS_LC_ASSERT(qs->queue[ix]->prev == wtr);
@@ -295,10 +296,10 @@ dequeue_waiter(erts_proc_lock_queues_t *qs, int ix)
* lock.
*/
static ERTS_INLINE void
-try_aquire(erts_proc_lock_t *lck, erts_proc_lock_waiter_t *wtr)
+try_aquire(erts_proc_lock_t *lck, erts_tse_t *wtr)
{
ErtsProcLocks got_locks = (ErtsProcLocks) 0;
- ErtsProcLocks locks = wtr->wait_locks;
+ ErtsProcLocks locks = wtr->uflgs;
int lock_no;
ERTS_LC_ASSERT(lck->queues);
@@ -334,7 +335,7 @@ try_aquire(erts_proc_lock_t *lck, erts_proc_lock_waiter_t *wtr)
}
}
- wtr->wait_locks &= ~got_locks;
+ wtr->uflgs &= ~got_locks;
}
/*
@@ -350,8 +351,8 @@ transfer_locks(Process *p,
int unlock)
{
int transferred = 0;
- erts_proc_lock_waiter_t *wake = NULL;
- erts_proc_lock_waiter_t *wtr;
+ erts_tse_t *wake = NULL;
+ erts_tse_t *wtr;
ErtsProcLocks unset_waiter = 0;
ErtsProcLocks tlocks = trnsfr_lcks;
int lock_no;
@@ -377,11 +378,11 @@ transfer_locks(Process *p,
ERTS_LC_ASSERT(wtr);
if (!qs->queue[lock_no])
unset_waiter |= lock;
- ERTS_LC_ASSERT(wtr->wait_locks & lock);
- wtr->wait_locks &= ~lock;
- if (wtr->wait_locks)
+ ERTS_LC_ASSERT(wtr->uflgs & lock);
+ wtr->uflgs &= ~lock;
+ if (wtr->uflgs)
try_aquire(&p->lock, wtr);
- if (!wtr->wait_locks) {
+ if (!wtr->uflgs) {
/*
* The other thread got all locks it needs;
* need to wake it up.
@@ -412,9 +413,10 @@ transfer_locks(Process *p,
erts_pix_unlock(pix_lock);
do {
- erts_proc_lock_waiter_t *tmp = wake;
+ erts_tse_t *tmp = wake;
wake = wake->next;
- erts_smp_gate_let_through(&tmp->gate, 1);
+ erts_atomic_set(&tmp->uaflgs, 0);
+ erts_tse_set(tmp);
} while (wake);
if (!unlock)
@@ -462,26 +464,16 @@ wait_for_locks(Process *p,
ErtsProcLocks olflgs)
{
erts_pix_lock_t *pix_lock = pixlck ? pixlck : ERTS_PID2PIXLOCK(p->id);
- int tsd;
- erts_proc_lock_waiter_t *wtr;
+ erts_tse_t *wtr;
+ erts_proc_lock_queues_t *qs;
/* Acquire a waiter object on which this thread can wait. */
- wtr = erts_tsd_get(waiter_key);
- if (wtr)
- tsd = 1;
- else {
-#if ERTS_PROC_LOCK_SPINLOCK_IMPL && !ERTS_PROC_LOCK_ATOMIC_IMPL
- erts_pix_unlock(pix_lock);
-#endif
- wtr = alloc_wtr();
- tsd = 0;
-#if ERTS_PROC_LOCK_SPINLOCK_IMPL && !ERTS_PROC_LOCK_ATOMIC_IMPL
- erts_pix_lock(pix_lock);
-#endif
- }
+ wtr = tse_fetch(pix_lock);
/* Record which locks this waiter needs. */
- wtr->wait_locks = need_locks;
+ wtr->uflgs = need_locks;
+
+ ASSERT((wtr->uflgs & ~ERTS_PROC_LOCKS_ALL) == 0);
#if ERTS_PROC_LOCK_ATOMIC_IMPL
erts_pix_lock(pix_lock);
@@ -489,14 +481,16 @@ wait_for_locks(Process *p,
ERTS_LC_ASSERT(erts_lc_pix_lock_is_locked(pix_lock));
+ qs = wtr->udata;
+ ASSERT(qs);
/* Provide the process with waiter queues, if it doesn't have one. */
if (!p->lock.queues) {
- wtr->queues->next = NULL;
- p->lock.queues = wtr->queues;
+ qs->next = NULL;
+ p->lock.queues = qs;
}
else {
- wtr->queues->next = p->lock.queues->next;
- p->lock.queues->next = wtr->queues;
+ qs->next = p->lock.queues->next;
+ p->lock.queues->next = qs;
}
#ifdef ERTS_PROC_LOCK_HARD_DEBUG
@@ -506,46 +500,59 @@ wait_for_locks(Process *p,
/* Try to aquire locks one at a time in lock order and set wait flag */
try_aquire(&p->lock, wtr);
+ ASSERT((wtr->uflgs & ~ERTS_PROC_LOCKS_ALL) == 0);
+
#ifdef ERTS_PROC_LOCK_HARD_DEBUG
check_queue(&p->lock);
#endif
- if (wtr->wait_locks) { /* We didn't get them all; need to wait... */
- /* Got to wait for locks... */
+ if (wtr->uflgs) {
+ /* We didn't get them all; need to wait... */
+
+ ASSERT((wtr->uflgs & ~ERTS_PROC_LOCKS_ALL) == 0);
+
+ erts_atomic_set(&wtr->uaflgs, 1);
erts_pix_unlock(pix_lock);
- /*
- * Wait for needed locks. When we return all needed locks have
- * have been acquired by other threads and transfered to us.
- */
-#ifdef ERTS_PROC_LOCK_SPIN_ON_GATE
- erts_smp_gate_swait(&wtr->gate, proc_lock_spin_count);
-#else
- erts_smp_gate_wait(&wtr->gate);
-#endif
+ while (1) {
+ int res;
+ erts_tse_reset(wtr);
+
+ if (erts_atomic_read(&wtr->uaflgs) == 0)
+ break;
+
+ /*
+ * Wait for needed locks. When we are woken all needed locks have
+ * have been acquired by other threads and transfered to us.
+ * However, we need to be prepared for spurious wakeups.
+ */
+ do {
+ res = erts_tse_wait(wtr); /* might return EINTR */
+ } while (res != 0);
+ }
erts_pix_lock(pix_lock);
+
+ ASSERT(wtr->uflgs == 0);
}
/* Recover some queues to store in the waiter. */
ERTS_LC_ASSERT(p->lock.queues);
if (p->lock.queues->next) {
- wtr->queues = p->lock.queues->next;
- p->lock.queues->next = wtr->queues->next;
+ qs = p->lock.queues->next;
+ p->lock.queues->next = qs->next;
}
else {
- wtr->queues = p->lock.queues;
+ qs = p->lock.queues;
p->lock.queues = NULL;
}
+ wtr->udata = qs;
erts_pix_unlock(pix_lock);
ERTS_LC_ASSERT(locks == (ERTS_PROC_LOCK_FLGS_READ_(&p->lock) & locks));
- if (tsd)
- CHECK_UNUSED_WAITER(wtr);
- else
- free_wtr(wtr);
+ tse_return(wtr, 0);
}
/*
@@ -563,52 +570,57 @@ erts_proc_lock_failed(Process *p,
ErtsProcLocks locks,
ErtsProcLocks old_lflgs)
{
-#ifdef ERTS_PROC_LOCK_SPIN_ON_GATE
- int spin_count = 0;
-#else
- int spin_count = proc_lock_spin_count;
-#endif
-
+ int until_yield = ERTS_PROC_LOCK_SPIN_UNTIL_YIELD;
+ int thr_spin_count;
+ int spin_count;
ErtsProcLocks need_locks = locks;
ErtsProcLocks olflgs = old_lflgs;
- while (need_locks != 0)
- {
- ErtsProcLocks can_grab = in_order_locks(olflgs, need_locks);
+ if (erts_thr_get_main_status())
+ thr_spin_count = proc_lock_spin_count;
+ else
+ thr_spin_count = aux_thr_proc_lock_spin_count;
+
+ spin_count = thr_spin_count;
+
+ while (need_locks != 0) {
+ ErtsProcLocks can_grab;
+
+ can_grab = in_order_locks(olflgs, need_locks);
- if (can_grab == 0)
- {
+ if (can_grab == 0) {
/* Someone already has the lowest-numbered lock we want. */
- if (spin_count-- <= 0)
- {
+ if (spin_count-- <= 0) {
/* Too many retries, give up and sleep for the lock. */
wait_for_locks(p, pixlck, locks, need_locks, olflgs);
return;
}
+ ERTS_SPIN_BODY;
+
+ if (--until_yield == 0) {
+ until_yield = ERTS_PROC_LOCK_SPIN_UNTIL_YIELD;
+ erts_thr_yield();
+ }
+
olflgs = ERTS_PROC_LOCK_FLGS_READ_(&p->lock);
}
- else
- {
+ else {
/* Try to grab all of the grabbable locks at once with cmpxchg. */
ErtsProcLocks grabbed = olflgs | can_grab;
ErtsProcLocks nflgs =
- ERTS_PROC_LOCK_FLGS_CMPXCHG_(&p->lock, grabbed, olflgs);
+ ERTS_PROC_LOCK_FLGS_CMPXCHG_ACQB_(&p->lock, grabbed, olflgs);
- if (nflgs == olflgs)
- {
+ if (nflgs == olflgs) {
/* Success! We grabbed the 'can_grab' locks. */
olflgs = grabbed;
need_locks &= ~can_grab;
-#ifndef ERTS_PROC_LOCK_SPIN_ON_GATE
/* Since we made progress, reset the spin count. */
- spin_count = proc_lock_spin_count;
-#endif
+ spin_count = thr_spin_count;
}
- else
- {
+ else {
/* Compare-and-exchange failed, try again. */
olflgs = nflgs;
}
@@ -1407,7 +1419,7 @@ check_queue(erts_proc_lock_t *lck)
wtr = (((ErtsProcLocks) 1) << lock_no) << ERTS_PROC_LOCK_WAITER_SHIFT;
if (lflgs & wtr) {
int n;
- erts_proc_lock_waiter_t *wtr;
+ erts_tse_t *wtr;
ERTS_LC_ASSERT(lck->queues && lck->queues->queue[lock_no]);
wtr = lck->queues->queue[lock_no];
n = 0;
diff --git a/erts/emulator/beam/erl_process_lock.h b/erts/emulator/beam/erl_process_lock.h
index d71e5a0a6e..7cfc9893fa 100644
--- a/erts/emulator/beam/erl_process_lock.h
+++ b/erts/emulator/beam/erl_process_lock.h
@@ -255,11 +255,7 @@ void erts_proc_lc_unrequire_lock(Process *p, ErtsProcLocks locks);
typedef struct {
union {
-#if ERTS_PROC_LOCK_MUTEX_IMPL
- erts_smp_mtx_t mtx;
-#else
erts_smp_spinlock_t spnlck;
-#endif
char buf[64]; /* Try to get locks in different cache lines */
} u;
} erts_pix_lock_t;
@@ -277,9 +273,12 @@ typedef struct {
((ErtsProcLocks) erts_smp_atomic_band(&(L)->flags, (long) (MSK)))
#define ERTS_PROC_LOCK_FLGS_BOR_(L, MSK) \
((ErtsProcLocks) erts_smp_atomic_bor(&(L)->flags, (long) (MSK)))
-#define ERTS_PROC_LOCK_FLGS_CMPXCHG_(L, NEW, EXPECTED) \
- ((ErtsProcLocks) erts_smp_atomic_cmpxchg(&(L)->flags, \
- (long) (NEW), (long) (EXPECTED)))
+#define ERTS_PROC_LOCK_FLGS_CMPXCHG_ACQB_(L, NEW, EXPECTED) \
+ ((ErtsProcLocks) erts_smp_atomic_cmpxchg_acqb(&(L)->flags, \
+ (long) (NEW), (long) (EXPECTED)))
+#define ERTS_PROC_LOCK_FLGS_CMPXCHG_RELB_(L, NEW, EXPECTED) \
+ ((ErtsProcLocks) erts_smp_atomic_cmpxchg_relb(&(L)->flags, \
+ (long) (NEW), (long) (EXPECTED)))
#define ERTS_PROC_LOCK_FLGS_READ_(L) \
((ErtsProcLocks) erts_smp_atomic_read(&(L)->flags))
@@ -289,6 +288,9 @@ ERTS_GLB_INLINE ErtsProcLocks erts_proc_lock_flags_band(erts_proc_lock_t *,
ErtsProcLocks);
ERTS_GLB_INLINE ErtsProcLocks erts_proc_lock_flags_bor(erts_proc_lock_t *,
ErtsProcLocks);
+ERTS_GLB_INLINE ErtsProcLocks erts_proc_lock_flags_cmpxchg(erts_proc_lock_t *,
+ ErtsProcLocks,
+ ErtsProcLocks);
#if ERTS_GLB_INLINE_INCL_FUNC_DEF
@@ -322,7 +324,9 @@ erts_proc_lock_flags_cmpxchg(erts_proc_lock_t *lck, ErtsProcLocks new,
#define ERTS_PROC_LOCK_FLGS_BAND_(L, MSK) erts_proc_lock_flags_band((L), (MSK))
#define ERTS_PROC_LOCK_FLGS_BOR_(L, MSK) erts_proc_lock_flags_bor((L), (MSK))
-#define ERTS_PROC_LOCK_FLGS_CMPXCHG_(L, NEW, EXPECTED) \
+#define ERTS_PROC_LOCK_FLGS_CMPXCHG_ACQB_(L, NEW, EXPECTED) \
+ erts_proc_lock_flags_cmpxchg((L), (NEW), (EXPECTED))
+#define ERTS_PROC_LOCK_FLGS_CMPXCHG_RELB_(L, NEW, EXPECTED) \
erts_proc_lock_flags_cmpxchg((L), (NEW), (EXPECTED))
#define ERTS_PROC_LOCK_FLGS_READ_(L) ((L)->flags)
@@ -348,9 +352,9 @@ ERTS_GLB_INLINE ErtsProcLocks erts_smp_proc_raw_trylock__(Process *p,
ErtsProcLocks locks);
#ifdef ERTS_ENABLE_LOCK_COUNT
ERTS_GLB_INLINE void erts_smp_proc_lock_x__(Process *,
- erts_pix_lock_t *,
- ErtsProcLocks,
- char *file, unsigned int line);
+ erts_pix_lock_t *,
+ ErtsProcLocks,
+ char *file, unsigned int line);
#else
ERTS_GLB_INLINE void erts_smp_proc_lock__(Process *,
erts_pix_lock_t *,
@@ -372,30 +376,18 @@ ERTS_GLB_INLINE void erts_proc_lock_op_debug(Process *, ErtsProcLocks, int);
ERTS_GLB_INLINE void erts_pix_lock(erts_pix_lock_t *pixlck)
{
ERTS_LC_ASSERT(pixlck);
-#if ERTS_PROC_LOCK_MUTEX_IMPL
- erts_smp_mtx_lock(&pixlck->u.mtx);
-#else
erts_smp_spin_lock(&pixlck->u.spnlck);
-#endif
}
ERTS_GLB_INLINE void erts_pix_unlock(erts_pix_lock_t *pixlck)
{
ERTS_LC_ASSERT(pixlck);
-#if ERTS_PROC_LOCK_MUTEX_IMPL
- erts_smp_mtx_unlock(&pixlck->u.mtx);
-#else
erts_smp_spin_unlock(&pixlck->u.spnlck);
-#endif
}
ERTS_GLB_INLINE int erts_lc_pix_lock_is_locked(erts_pix_lock_t *pixlck)
{
-#if ERTS_PROC_LOCK_MUTEX_IMPL
- return erts_smp_lc_mtx_is_locked(&pixlck->u.mtx);
-#else
return erts_smp_lc_spinlock_is_locked(&pixlck->u.spnlck);
-#endif
}
/*
@@ -417,9 +409,9 @@ erts_smp_proc_raw_trylock__(Process *p, ErtsProcLocks locks)
ErtsProcLocks expct_lflgs = 0;
while (1) {
- ErtsProcLocks lflgs = ERTS_PROC_LOCK_FLGS_CMPXCHG_(&p->lock,
- expct_lflgs | locks,
- expct_lflgs);
+ ErtsProcLocks lflgs = ERTS_PROC_LOCK_FLGS_CMPXCHG_ACQB_(&p->lock,
+ expct_lflgs | locks,
+ expct_lflgs);
if (ERTS_LIKELY(lflgs == expct_lflgs)) {
/* We successfully grabbed all locks. */
return 0;
@@ -535,7 +527,7 @@ erts_smp_proc_unlock__(Process *p,
if (want_lflgs != old_lflgs) {
ErtsProcLocks new_lflgs =
- ERTS_PROC_LOCK_FLGS_CMPXCHG_(&p->lock, want_lflgs, old_lflgs);
+ ERTS_PROC_LOCK_FLGS_CMPXCHG_RELB_(&p->lock, want_lflgs, old_lflgs);
if (new_lflgs != old_lflgs) {
/* cmpxchg failed, try again. */
diff --git a/erts/emulator/beam/erl_smp.h b/erts/emulator/beam/erl_smp.h
index 03d2a586e3..b41fa70476 100644
--- a/erts/emulator/beam/erl_smp.h
+++ b/erts/emulator/beam/erl_smp.h
@@ -1,19 +1,19 @@
/*
* %CopyrightBegin%
- *
- * Copyright Ericsson AB 2005-2009. All Rights Reserved.
- *
+ *
+ * Copyright Ericsson AB 2005-2010. All Rights Reserved.
+ *
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
* compliance with the License. You should have received a copy of the
* Erlang Public License along with this software. If not, it can be
* retrieved online at http://www.erlang.org/.
- *
+ *
* Software distributed under the License is distributed on an "AS IS"
* basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
* the License for the specific language governing rights and limitations
* under the License.
- *
+ *
* %CopyrightEnd%
*/
/*
@@ -43,9 +43,17 @@ typedef erts_thr_init_data_t erts_smp_thr_init_data_t;
typedef erts_tid_t erts_smp_tid_t;
typedef erts_mtx_t erts_smp_mtx_t;
typedef erts_cnd_t erts_smp_cnd_t;
+#define ERTS_SMP_RWMTX_OPT_DEFAULT_INITER ERTS_RWMTX_OPT_DEFAULT_INITER
+#define ERTS_SMP_RWMTX_TYPE_NORMAL ERTS_RWMTX_TYPE_NORMAL
+#define ERTS_SMP_RWMTX_TYPE_FREQUENT_READ ERTS_RWMTX_TYPE_FREQUENT_READ
+#define ERTS_SMP_RWMTX_TYPE_EXTREMELY_FREQUENT_READ \
+ ERTS_RWMTX_TYPE_EXTREMELY_FREQUENT_READ
+#define ERTS_SMP_RWMTX_LONG_LIVED ERTS_RWMTX_LONG_LIVED
+#define ERTS_SMP_RWMTX_SHORT_LIVED ERTS_RWMTX_SHORT_LIVED
+#define ERTS_SMP_RWMTX_UNKNOWN_LIVED ERTS_RWMTX_UNKNOWN_LIVED
+typedef erts_rwmtx_opt_t erts_smp_rwmtx_opt_t;
typedef erts_rwmtx_t erts_smp_rwmtx_t;
typedef erts_tsd_key_t erts_smp_tsd_key_t;
-typedef erts_gate_t erts_smp_gate_t;
typedef ethr_atomic_t erts_smp_atomic_t;
typedef erts_spinlock_t erts_smp_spinlock_t;
typedef erts_rwlock_t erts_smp_rwlock_t;
@@ -54,15 +62,27 @@ void erts_thr_fatal_error(int, char *); /* implemented in erl_init.c */
#else /* #ifdef ERTS_SMP */
-#define ERTS_SMP_THR_OPTS_DEFAULT_INITER 0
+#define ERTS_SMP_THR_OPTS_DEFAULT_INITER {0}
typedef int erts_smp_thr_opts_t;
typedef int erts_smp_thr_init_data_t;
typedef int erts_smp_tid_t;
typedef int erts_smp_mtx_t;
typedef int erts_smp_cnd_t;
+#define ERTS_SMP_RWMTX_OPT_DEFAULT_INITER {0}
+#define ERTS_SMP_RWMTX_TYPE_NORMAL 0
+#define ERTS_SMP_RWMTX_TYPE_FREQUENT_READ 0
+#define ERTS_SMP_RWMTX_TYPE_EXTREMELY_FREQUENT_READ 0
+#define ERTS_SMP_RWMTX_LONG_LIVED 0
+#define ERTS_SMP_RWMTX_SHORT_LIVED 0
+#define ERTS_SMP_RWMTX_UNKNOWN_LIVED 0
+typedef struct {
+ char type;
+ char lived;
+ int main_spincount;
+ int aux_spincount;
+} erts_smp_rwmtx_opt_t;
typedef int erts_smp_rwmtx_t;
typedef int erts_smp_tsd_key_t;
-typedef int erts_smp_gate_t;
typedef long erts_smp_atomic_t;
#if __GNUC__ > 2
typedef struct { } erts_smp_spinlock_t;
@@ -103,8 +123,6 @@ ERTS_GLB_INLINE void erts_smp_mtx_init_locked_x(erts_smp_mtx_t *mtx,
ERTS_GLB_INLINE void erts_smp_mtx_init(erts_smp_mtx_t *mtx, char *name);
ERTS_GLB_INLINE void erts_smp_mtx_init_locked(erts_smp_mtx_t *mtx, char *name);
ERTS_GLB_INLINE void erts_smp_mtx_destroy(erts_smp_mtx_t *mtx);
-ERTS_GLB_INLINE void erts_smp_mtx_set_forksafe(erts_smp_mtx_t *mtx);
-ERTS_GLB_INLINE void erts_smp_mtx_unset_forksafe(erts_smp_mtx_t *mtx);
ERTS_GLB_INLINE int erts_smp_mtx_trylock(erts_smp_mtx_t *mtx);
#ifdef ERTS_ENABLE_LOCK_COUNT
ERTS_GLB_INLINE void erts_smp_mtx_lock_x(erts_smp_mtx_t *mtx, char *file, int line);
@@ -119,9 +137,17 @@ ERTS_GLB_INLINE void erts_smp_cnd_wait(erts_smp_cnd_t *cnd,
erts_smp_mtx_t *mtx);
ERTS_GLB_INLINE void erts_smp_cnd_signal(erts_smp_cnd_t *cnd);
ERTS_GLB_INLINE void erts_smp_cnd_broadcast(erts_smp_cnd_t *cnd);
+ERTS_GLB_INLINE void erts_smp_rwmtx_set_reader_group(int no);
+ERTS_GLB_INLINE void erts_smp_rwmtx_init_opt_x(erts_smp_rwmtx_t *rwmtx,
+ erts_smp_rwmtx_opt_t *opt,
+ char *name,
+ Eterm extra);
ERTS_GLB_INLINE void erts_smp_rwmtx_init_x(erts_smp_rwmtx_t *rwmtx,
char *name,
Eterm extra);
+ERTS_GLB_INLINE void erts_smp_rwmtx_init_opt(erts_smp_rwmtx_t *rwmtx,
+ erts_smp_rwmtx_opt_t *opt,
+ char *name);
ERTS_GLB_INLINE void erts_smp_rwmtx_init(erts_smp_rwmtx_t *rwmtx,
char *name);
ERTS_GLB_INLINE void erts_smp_rwmtx_destroy(erts_smp_rwmtx_t *rwmtx);
@@ -155,6 +181,16 @@ ERTS_GLB_INLINE long erts_smp_atomic_cmpxchg(erts_smp_atomic_t *xchgp,
long expected);
ERTS_GLB_INLINE long erts_smp_atomic_bor(erts_smp_atomic_t *var, long mask);
ERTS_GLB_INLINE long erts_smp_atomic_band(erts_smp_atomic_t *var, long mask);
+ERTS_GLB_INLINE long erts_smp_atomic_read_acqb(erts_smp_atomic_t *var);
+ERTS_GLB_INLINE void erts_smp_atomic_set_relb(erts_smp_atomic_t *var, long i);
+ERTS_GLB_INLINE void erts_smp_atomic_dec_relb(erts_smp_atomic_t *decp);
+ERTS_GLB_INLINE long erts_smp_atomic_dectest_relb(erts_smp_atomic_t *decp);
+ERTS_GLB_INLINE long erts_smp_atomic_cmpxchg_acqb(erts_smp_atomic_t *xchgp,
+ long new,
+ long exp);
+ERTS_GLB_INLINE long erts_smp_atomic_cmpxchg_relb(erts_smp_atomic_t *xchgp,
+ long new,
+ long exp);
ERTS_GLB_INLINE void erts_smp_spinlock_init_x(erts_smp_spinlock_t *lock,
char *name,
Eterm extra);
@@ -190,12 +226,6 @@ ERTS_GLB_INLINE void erts_smp_tsd_key_create(erts_smp_tsd_key_t *keyp);
ERTS_GLB_INLINE void erts_smp_tsd_key_delete(erts_smp_tsd_key_t key);
ERTS_GLB_INLINE void erts_smp_tsd_set(erts_smp_tsd_key_t key, void *value);
ERTS_GLB_INLINE void * erts_smp_tsd_get(erts_smp_tsd_key_t key);
-ERTS_GLB_INLINE void erts_smp_gate_init(erts_smp_gate_t *gp);
-ERTS_GLB_INLINE void erts_smp_gate_destroy(erts_smp_gate_t *gp);
-ERTS_GLB_INLINE void erts_smp_gate_close(erts_smp_gate_t *gp);
-ERTS_GLB_INLINE void erts_smp_gate_let_through(erts_smp_gate_t *gp, unsigned no);
-ERTS_GLB_INLINE void erts_smp_gate_wait(erts_smp_gate_t *gp);
-ERTS_GLB_INLINE void erts_smp_gate_swait(erts_smp_gate_t *gp, int spincount);
#ifdef ERTS_THR_HAVE_SIG_FUNCS
#define ERTS_SMP_THR_HAVE_SIG_FUNCS 1
@@ -331,22 +361,6 @@ erts_smp_mtx_destroy(erts_smp_mtx_t *mtx)
#endif
}
-ERTS_GLB_INLINE void
-erts_smp_mtx_set_forksafe(erts_smp_mtx_t *mtx)
-{
-#ifdef ERTS_SMP
- erts_mtx_set_forksafe(mtx);
-#endif
-}
-
-ERTS_GLB_INLINE void
-erts_smp_mtx_unset_forksafe(erts_smp_mtx_t *mtx)
-{
-#ifdef ERTS_SMP
- erts_mtx_unset_forksafe(mtx);
-#endif
-}
-
ERTS_GLB_INLINE int
erts_smp_mtx_trylock(erts_smp_mtx_t *mtx)
{
@@ -433,6 +447,25 @@ erts_smp_cnd_broadcast(erts_smp_cnd_t *cnd)
}
ERTS_GLB_INLINE void
+erts_smp_rwmtx_set_reader_group(int no)
+{
+#ifdef ERTS_SMP
+ erts_rwmtx_set_reader_group(no);
+#endif
+}
+
+ERTS_GLB_INLINE void
+erts_smp_rwmtx_init_opt_x(erts_smp_rwmtx_t *rwmtx,
+ erts_smp_rwmtx_opt_t *opt,
+ char *name,
+ Eterm extra)
+{
+#ifdef ERTS_SMP
+ erts_rwmtx_init_opt_x(rwmtx, opt, name, extra);
+#endif
+}
+
+ERTS_GLB_INLINE void
erts_smp_rwmtx_init_x(erts_smp_rwmtx_t *rwmtx, char *name, Eterm extra)
{
#ifdef ERTS_SMP
@@ -441,6 +474,16 @@ erts_smp_rwmtx_init_x(erts_smp_rwmtx_t *rwmtx, char *name, Eterm extra)
}
ERTS_GLB_INLINE void
+erts_smp_rwmtx_init_opt(erts_smp_rwmtx_t *rwmtx,
+ erts_smp_rwmtx_opt_t *opt,
+ char *name)
+{
+#ifdef ERTS_SMP
+ erts_rwmtx_init_opt(rwmtx, opt, name);
+#endif
+}
+
+ERTS_GLB_INLINE void
erts_smp_rwmtx_init(erts_smp_rwmtx_t *rwmtx, char *name)
{
#ifdef ERTS_SMP
@@ -709,6 +752,73 @@ erts_smp_atomic_band(erts_smp_atomic_t *var, long mask)
#endif
}
+ERTS_GLB_INLINE long
+erts_smp_atomic_read_acqb(erts_smp_atomic_t *var)
+{
+#ifdef ERTS_SMP
+ return erts_atomic_read_acqb(var);
+#else
+ return *var;
+#endif
+}
+
+ERTS_GLB_INLINE void
+erts_smp_atomic_set_relb(erts_smp_atomic_t *var, long i)
+{
+#ifdef ERTS_SMP
+ erts_atomic_set_relb(var, i);
+#else
+ *var = i;
+#endif
+}
+
+ERTS_GLB_INLINE void erts_smp_atomic_dec_relb(erts_smp_atomic_t *decp)
+{
+#ifdef ERTS_SMP
+ erts_atomic_dec_relb(decp);
+#else
+ --(*decp);
+#endif
+}
+
+ERTS_GLB_INLINE long
+erts_smp_atomic_dectest_relb(erts_smp_atomic_t *decp)
+{
+#ifdef ERTS_SMP
+ return erts_atomic_dectest_relb(decp);
+#else
+ return --(*decp);
+#endif
+}
+
+ERTS_GLB_INLINE long erts_smp_atomic_cmpxchg_acqb(erts_smp_atomic_t *xchgp,
+ long new,
+ long exp)
+{
+#ifdef ERTS_SMP
+ return erts_atomic_cmpxchg_acqb(xchgp, new, exp);
+#else
+ long old = *xchgp;
+ if (old == exp)
+ *xchgp = new;
+ return old;
+#endif
+}
+
+ERTS_GLB_INLINE long erts_smp_atomic_cmpxchg_relb(erts_smp_atomic_t *xchgp,
+ long new,
+ long exp)
+{
+#ifdef ERTS_SMP
+ return erts_atomic_cmpxchg_relb(xchgp, new, exp);
+#else
+ long old = *xchgp;
+ if (old == exp)
+ *xchgp = new;
+ return old;
+#endif
+}
+
ERTS_GLB_INLINE void
erts_smp_spinlock_init_x(erts_smp_spinlock_t *lock, char *name, Eterm extra)
{
@@ -919,54 +1029,6 @@ erts_smp_tsd_get(erts_smp_tsd_key_t key)
#endif
}
-ERTS_GLB_INLINE void
-erts_smp_gate_init(erts_smp_gate_t *gp)
-{
-#ifdef ERTS_SMP
- erts_gate_init((erts_gate_t *) gp);
-#endif
-}
-
-ERTS_GLB_INLINE void
-erts_smp_gate_destroy(erts_smp_gate_t *gp)
-{
-#ifdef ERTS_SMP
- erts_gate_destroy((erts_gate_t *) gp);
-#endif
-}
-
-ERTS_GLB_INLINE void
-erts_smp_gate_close(erts_smp_gate_t *gp)
-{
-#ifdef ERTS_SMP
- erts_gate_close((erts_gate_t *) gp);
-#endif
-}
-
-ERTS_GLB_INLINE void
-erts_smp_gate_let_through(erts_smp_gate_t *gp, unsigned no)
-{
-#ifdef ERTS_SMP
- erts_gate_let_through((erts_gate_t *) gp, no);
-#endif
-}
-
-ERTS_GLB_INLINE void
-erts_smp_gate_wait(erts_smp_gate_t *gp)
-{
-#ifdef ERTS_SMP
- erts_gate_wait((erts_gate_t *) gp);
-#endif
-}
-
-ERTS_GLB_INLINE void
-erts_smp_gate_swait(erts_smp_gate_t *gp, int spincount)
-{
-#ifdef ERTS_SMP
- erts_gate_swait((erts_gate_t *) gp, spincount);
-#endif
-}
-
#ifdef ERTS_THR_HAVE_SIG_FUNCS
#define ERTS_SMP_THR_HAVE_SIG_FUNCS 1
diff --git a/erts/emulator/beam/erl_term.c b/erts/emulator/beam/erl_term.c
index 2924abbd51..c6458a0e45 100644
--- a/erts/emulator/beam/erl_term.c
+++ b/erts/emulator/beam/erl_term.c
@@ -1,19 +1,19 @@
/*
* %CopyrightBegin%
- *
- * Copyright Ericsson AB 2000-2009. All Rights Reserved.
- *
+ *
+ * Copyright Ericsson AB 2000-2010. All Rights Reserved.
+ *
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
* compliance with the License. You should have received a copy of the
* Erlang Public License along with this software. If not, it can be
* retrieved online at http://www.erlang.org/.
- *
+ *
* Software distributed under the License is distributed on an "AS IS"
* basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
* the License for the specific language governing rights and limitations
* under the License.
- *
+ *
* %CopyrightEnd%
*/
@@ -68,7 +68,9 @@ unsigned tag_val_def(Eterm x)
static char msg[32];
switch (x & _TAG_PRIMARY_MASK) {
- case TAG_PRIMARY_LIST: return LIST_DEF;
+ case TAG_PRIMARY_LIST:
+ ET_ASSERT(_list_precond(x),file,line);
+ return LIST_DEF;
case TAG_PRIMARY_BOXED: {
Eterm hdr = *boxed_val(x);
ET_ASSERT(is_header(hdr),file,line);
@@ -103,7 +105,7 @@ unsigned tag_val_def(Eterm x)
break;
}
}
- sprintf(msg, "tag_val_def: %#lx", x);
+ sprintf(msg, "tag_val_def: %#lx", (unsigned long) x);
et_abort(msg, file, line);
#undef file
#undef line
@@ -121,12 +123,12 @@ FUNTY checked_##FUN(ARGTY x, const char *file, unsigned line) \
return _unchecked_##FUN(x); \
}
-ET_DEFINE_CHECKED(Eterm,make_boxed,Eterm*,_is_aligned);
+ET_DEFINE_CHECKED(Eterm,make_boxed,Eterm*,_is_taggable_pointer);
ET_DEFINE_CHECKED(int,is_boxed,Eterm,!is_header);
-ET_DEFINE_CHECKED(Eterm*,boxed_val,Eterm,is_boxed);
-ET_DEFINE_CHECKED(Eterm,make_list,Eterm*,_is_aligned);
+ET_DEFINE_CHECKED(Eterm*,boxed_val,Eterm,_boxed_precond);
+ET_DEFINE_CHECKED(Eterm,make_list,Eterm*,_is_taggable_pointer);
ET_DEFINE_CHECKED(int,is_not_list,Eterm,!is_header);
-ET_DEFINE_CHECKED(Eterm*,list_val,Eterm,is_list);
+ET_DEFINE_CHECKED(Eterm*,list_val,Eterm,_list_precond);
ET_DEFINE_CHECKED(Uint,unsigned_val,Eterm,is_small);
ET_DEFINE_CHECKED(Sint,signed_val,Eterm,is_small);
ET_DEFINE_CHECKED(Uint,atom_val,Eterm,is_atom);
@@ -163,8 +165,8 @@ ET_DEFINE_CHECKED(Uint32*,external_ref_data,Eterm,is_external_ref);
ET_DEFINE_CHECKED(struct erl_node_*,external_ref_node,Eterm,is_external_ref);
ET_DEFINE_CHECKED(Eterm*,export_val,Eterm,is_export);
-ET_DEFINE_CHECKED(Eterm,make_cp,Uint*,_is_aligned);
-ET_DEFINE_CHECKED(Uint*,cp_val,Eterm,is_CP);
+ET_DEFINE_CHECKED(Eterm,make_cp,UWord *,_is_taggable_pointer);
+ET_DEFINE_CHECKED(UWord *,cp_val,Eterm,is_CP);
ET_DEFINE_CHECKED(Uint,catch_val,Eterm,is_catch);
ET_DEFINE_CHECKED(Uint,x_reg_offset,Uint,_is_xreg);
ET_DEFINE_CHECKED(Uint,y_reg_offset,Uint,_is_yreg);
diff --git a/erts/emulator/beam/erl_term.h b/erts/emulator/beam/erl_term.h
index a6596558fa..b8e4473141 100644
--- a/erts/emulator/beam/erl_term.h
+++ b/erts/emulator/beam/erl_term.h
@@ -20,6 +20,32 @@
#ifndef __ERL_TERM_H
#define __ERL_TERM_H
+#include "sys.h" /* defines HALFWORD_HEAP */
+
+#if HALFWORD_HEAP
+# define HEAP_ON_C_STACK 0
+# if HALFWORD_ASSERT
+# ifdef ET_DEBUG
+# undef ET_DEBUG
+# endif
+# define ET_DEBUG 1
+# endif
+# if 1
+# define CHECK_POINTER_MASK 0xFFFFFFFF00000000UL
+# define COMPRESS_POINTER(APointer) ((Eterm) (UWord) (APointer))
+# define EXPAND_POINTER(AnEterm) ((UWord) (AnEterm))
+# else
+# define CHECK_POINTER_MASK 0x0UL
+# define COMPRESS_POINTER(AnUint) (AnUint)
+# define EXPAND_POINTER(APointer) (APointer)
+# endif
+#else
+# define HEAP_ON_C_STACK 1
+# define CHECK_POINTER_MASK 0x0UL
+# define COMPRESS_POINTER(AnUint) (AnUint)
+# define EXPAND_POINTER(APointer) (APointer)
+#endif
+
struct erl_node_; /* Declared in erl_node_tables.h */
/*
@@ -158,9 +184,16 @@ struct erl_node_; /* Declared in erl_node_tables.h */
/* boxed object access methods */
+#if HALFWORD_HEAP
+#define _is_taggable_pointer(x) (((UWord)(x) & (CHECK_POINTER_MASK | 0x3)) == 0)
+#define _boxed_precond(x) (is_boxed(x))
+#else
+#define _is_taggable_pointer(x) (((Uint)(x) & 0x3) == 0)
+#define _boxed_precond(x) (is_boxed(x))
+#endif
#define _is_aligned(x) (((Uint)(x) & 0x3) == 0)
-#define _unchecked_make_boxed(x) ((Uint)(x) + TAG_PRIMARY_BOXED)
-_ET_DECLARE_CHECKED(Eterm,make_boxed,Eterm*)
+#define _unchecked_make_boxed(x) ((Uint) COMPRESS_POINTER(x) + TAG_PRIMARY_BOXED)
+_ET_DECLARE_CHECKED(Eterm,make_boxed,Eterm*);
#define make_boxed(x) _ET_APPLY(make_boxed,(x))
#if 1
#define _is_not_boxed(x) ((x) & (_TAG_PRIMARY_MASK-TAG_PRIMARY_BOXED))
@@ -170,13 +203,13 @@ _ET_DECLARE_CHECKED(int,is_boxed,Eterm)
#else
#define is_boxed(x) (((x) & _TAG_PRIMARY_MASK) == TAG_PRIMARY_BOXED)
#endif
-#define _unchecked_boxed_val(x) ((Eterm*)((x) - TAG_PRIMARY_BOXED))
-_ET_DECLARE_CHECKED(Eterm*,boxed_val,Eterm)
+#define _unchecked_boxed_val(x) ((Eterm*) EXPAND_POINTER(((x) - TAG_PRIMARY_BOXED)))
+_ET_DECLARE_CHECKED(Eterm*,boxed_val,Eterm);
#define boxed_val(x) _ET_APPLY(boxed_val,(x))
/* cons cell ("list") access methods */
-#define _unchecked_make_list(x) ((Uint)(x) + TAG_PRIMARY_LIST)
-_ET_DECLARE_CHECKED(Eterm,make_list,Eterm*)
+#define _unchecked_make_list(x) ((Uint) COMPRESS_POINTER(x) + TAG_PRIMARY_LIST)
+_ET_DECLARE_CHECKED(Eterm,make_list,Eterm*);
#define make_list(x) _ET_APPLY(make_list,(x))
#if 1
#define _unchecked_is_not_list(x) ((x) & (_TAG_PRIMARY_MASK-TAG_PRIMARY_LIST))
@@ -187,8 +220,13 @@ _ET_DECLARE_CHECKED(int,is_not_list,Eterm)
#define is_list(x) (((x) & _TAG_PRIMARY_MASK) == TAG_PRIMARY_LIST)
#define is_not_list(x) (!is_list((x)))
#endif
-#define _unchecked_list_val(x) ((Eterm*)((x) - TAG_PRIMARY_LIST))
-_ET_DECLARE_CHECKED(Eterm*,list_val,Eterm)
+#if HALFWORD_HEAP
+#define _list_precond(x) (is_list(x))
+#else
+#define _list_precond(x) (is_list(x))
+#endif
+#define _unchecked_list_val(x) ((Eterm*) EXPAND_POINTER((x) - TAG_PRIMARY_LIST))
+_ET_DECLARE_CHECKED(Eterm*,list_val,Eterm);
#define list_val(x) _ET_APPLY(list_val,(x))
#define CONS(hp, car, cdr) \
@@ -198,13 +236,13 @@ _ET_DECLARE_CHECKED(Eterm*,list_val,Eterm)
#define CDR(x) ((x)[1])
/* generic tagged pointer (boxed or list) access methods */
-#define _unchecked_ptr_val(x) ((Eterm*)((x) & ~((Uint) 0x3)))
+#define _unchecked_ptr_val(x) ((Eterm*) EXPAND_POINTER((x) & ~((Uint) 0x3)))
#define ptr_val(x) _unchecked_ptr_val((x)) /*XXX*/
#define _unchecked_offset_ptr(x,offs) ((x)+((offs)*sizeof(Eterm)))
#define offset_ptr(x,offs) _unchecked_offset_ptr(x,offs) /*XXX*/
/* fixnum ("small") access methods */
-#if defined(ARCH_64)
+#if defined(ARCH_64) && !HALFWORD_HEAP
#define SMALL_BITS (64-4)
#define SMALL_DIGITS (17)
#else
@@ -329,7 +367,11 @@ _ET_DECLARE_CHECKED(Eterm*,fun_val,Eterm)
_ET_DECLARE_CHECKED(Eterm*,export_val,Eterm)
#define export_val(x) _ET_APPLY(export_val,(x))
#define is_export_header(x) ((x) == HEADER_EXPORT)
+#if HALFWORD_HEAP
+#define HEADER_EXPORT _make_header(2,_TAG_HEADER_EXPORT)
+#else
#define HEADER_EXPORT _make_header(1,_TAG_HEADER_EXPORT)
+#endif
/* bignum access methods */
#define make_pos_bignum_header(sz) _make_header((sz),_TAG_HEADER_POS_BIG)
@@ -353,7 +395,7 @@ _ET_DECLARE_CHECKED(Eterm*,big_val,Eterm)
#define big_val(x) _ET_APPLY(big_val,(x))
/* flonum ("float") access methods */
-#ifdef ARCH_64
+#if defined(ARCH_64) && !HALFWORD_HEAP
#define HEADER_FLONUM _make_header(1,_TAG_HEADER_FLOAT)
#else
#define HEADER_FLONUM _make_header(2,_TAG_HEADER_FLOAT)
@@ -374,12 +416,12 @@ typedef union float_def
byte fb[sizeof(ieee754_8)];
Uint16 fs[sizeof(ieee754_8) / sizeof(Uint16)];
Uint32 fw[sizeof(ieee754_8) / sizeof(Uint32)];
-#ifdef ARCH_64
+#if defined(ARCH_64) && !HALFWORD_HEAP
Uint fdw;
#endif
} FloatDef;
-#ifdef ARCH_64
+#if defined(ARCH_64) && !HALFWORD_HEAP
#define GET_DOUBLE(x, f) (f).fdw = *(float_val(x)+1)
#define PUT_DOUBLE(f, x) *(x) = HEADER_FLONUM, \
@@ -679,7 +721,7 @@ _ET_DECLARE_CHECKED(struct erl_node_*,internal_port_node,Eterm)
#define ERTS_MAX_REF_NUMBERS 3
#define ERTS_REF_NUMBERS ERTS_MAX_REF_NUMBERS
-#ifdef ARCH_64
+#if defined(ARCH_64) && !HALFWORD_HEAP
# define ERTS_REF_WORDS (ERTS_REF_NUMBERS/2 + 1)
# define ERTS_REF_32BIT_WORDS (ERTS_REF_NUMBERS+1)
#else
@@ -701,7 +743,7 @@ typedef struct {
#define make_ref_thing_header(DW) \
_make_header((DW)+REF_THING_HEAD_SIZE-1,_TAG_HEADER_REF)
-#ifdef ARCH_64
+#if defined(ARCH_64) && !HALFWORD_HEAP
/*
* Ref layout on a 64-bit little endian machine:
@@ -779,10 +821,10 @@ _ET_DECLARE_CHECKED(struct erl_node_*,internal_ref_node,Eterm)
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
* |A A A A A A A A A A A A A A A A A A A A A A A A A A|t t t t|0 0| Thing
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
- * |N N N N N N N N N N N N N N N N N N N N N N N N N N N N N N N N| Next
- * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
* |E E E E E E E E E E E E E E E E E E E E E E E E E E E E E E E E| ErlNode
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * |N N N N N N N N N N N N N N N N N N N N N N N N N N N N N N N N| Next
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
* |X X X X X X X X X X X X X X X X X X X X X X X X X X X X X X X X| Data 0
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
* . . .
@@ -793,7 +835,7 @@ _ET_DECLARE_CHECKED(struct erl_node_*,internal_ref_node,Eterm)
* t : External pid thing tag (1100)
* t : External port thing tag (1101)
* t : External ref thing tag (1110)
- * N : Next (external thing) pointer
+ * N : Next (off_heap) pointer
* E : ErlNode pointer
* X : Type specific data
*
@@ -807,11 +849,19 @@ _ET_DECLARE_CHECKED(struct erl_node_*,internal_ref_node,Eterm)
*
*/
+/* XXX:PaN - this structure is not perfect for halfword heap, it takes
+ a lot of memory due to padding, and the array will not begin at the end of the
+ structure, as otherwise expected. Be sure to access data.ui32 array and not try
+ to do pointer manipulation on an Eterm * to reach the actual data...
+
+ XXX:Sverk - Problem made worse by "one off-heap list" when 'next' pointer
+ must align with 'next' in ProcBin, erl_fun_thing and erl_off_heap_header.
+*/
typedef struct external_thing_ {
/* ----+ */
Eterm header; /* | */
- struct external_thing_ *next; /* > External thing head */
- struct erl_node_ *node; /* | */
+ struct erl_node_* node; /* > External thing head */
+ struct erl_off_heap_header* next; /* | */
/* ----+ */
union {
Uint32 ui32[1];
@@ -944,15 +994,15 @@ _ET_DECLARE_CHECKED(struct erl_node_*,external_ref_node,Eterm)
#error "fix yer arch, like"
#endif
-#define _unchecked_make_cp(x) ((Eterm)(x))
-_ET_DECLARE_CHECKED(Eterm,make_cp,Uint*)
+#define _unchecked_make_cp(x) ((Eterm) COMPRESS_POINTER(x))
+_ET_DECLARE_CHECKED(Eterm,make_cp,BeamInstr*);
#define make_cp(x) _ET_APPLY(make_cp,(x))
#define is_not_CP(x) ((x) & _CPMASK)
#define is_CP(x) (!is_not_CP(x))
-#define _unchecked_cp_val(x) ((Uint*)(x))
-_ET_DECLARE_CHECKED(Uint*,cp_val,Eterm)
+#define _unchecked_cp_val(x) ((BeamInstr*) EXPAND_POINTER(x))
+_ET_DECLARE_CHECKED(BeamInstr*,cp_val,Eterm);
#define cp_val(x) _ET_APPLY(cp_val,(x))
#define make_catch(x) (((x) << _TAG_IMMED2_SIZE) | _TAG_IMMED2_CATCH)
diff --git a/erts/emulator/beam/erl_threads.h b/erts/emulator/beam/erl_threads.h
index d635916dd8..0b7269262e 100644
--- a/erts/emulator/beam/erl_threads.h
+++ b/erts/emulator/beam/erl_threads.h
@@ -1,19 +1,19 @@
/*
* %CopyrightBegin%
- *
- * Copyright Ericsson AB 2001-2009. All Rights Reserved.
- *
+ *
+ * Copyright Ericsson AB 2001-2010. All Rights Reserved.
+ *
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
* compliance with the License. You should have received a copy of the
* Erlang Public License along with this software. If not, it can be
* retrieved online at http://www.erlang.org/.
- *
+ *
* Software distributed under the License is distributed on an "AS IS"
* basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
* the License for the specific language governing rights and limitations
* under the License.
- *
+ *
* %CopyrightEnd%
*/
@@ -25,6 +25,11 @@
#ifndef ERL_THREAD_H__
#define ERL_THREAD_H__
+#define ERTS_SPIN_BODY ETHR_SPIN_BODY
+
+#define ERTS_MAX_READER_GROUPS 8
+extern int erts_reader_groups;
+
#include "sys.h"
#ifdef USE_THREADS
@@ -34,6 +39,8 @@
#include "erl_lock_count.h"
#include "erl_term.h"
+#define ERTS_THR_MEMORY_BARRIER ETHR_MEMORY_BARRIER
+
#ifdef ERTS_ENABLE_LOCK_COUNT
#define erts_mtx_lock(L) erts_mtx_lock_x(L, __FILE__, __LINE__)
#define erts_spin_lock(L) erts_spin_lock_x(L, __FILE__, __LINE__)
@@ -46,6 +53,7 @@
#define ERTS_THR_OPTS_DEFAULT_INITER ETHR_THR_OPTS_DEFAULT_INITER
typedef ethr_thr_opts erts_thr_opts_t;
typedef ethr_init_data erts_thr_init_data_t;
+typedef ethr_late_init_data erts_thr_late_init_data_t;
typedef ethr_tid erts_tid_t;
/* mutex */
@@ -71,8 +79,19 @@ typedef struct {
erts_lcnt_lock_t lcnt;
#endif
} erts_rwmtx_t;
+
+#define ERTS_RWMTX_OPT_DEFAULT_INITER ETHR_RWMUTEX_OPT_DEFAULT_INITER
+#define ERTS_RWMTX_TYPE_NORMAL ETHR_RWMUTEX_TYPE_NORMAL
+#define ERTS_RWMTX_TYPE_FREQUENT_READ ETHR_RWMUTEX_TYPE_FREQUENT_READ
+#define ERTS_RWMTX_TYPE_EXTREMELY_FREQUENT_READ \
+ ETHR_RWMUTEX_TYPE_EXTREMELY_FREQUENT_READ
+#define ERTS_RWMTX_LONG_LIVED ETHR_RWMUTEX_LONG_LIVED
+#define ERTS_RWMTX_SHORT_LIVED ETHR_RWMUTEX_SHORT_LIVED
+#define ERTS_RWMTX_UNKNOWN_LIVED ETHR_RWMUTEX_UNKNOWN_LIVED
+typedef ethr_rwmutex_opt erts_rwmtx_opt_t;
+
typedef ethr_tsd_key erts_tsd_key_t;
-typedef ethr_gate erts_gate_t;
+typedef ethr_ts_event erts_tse_t;
typedef ethr_atomic_t erts_atomic_t;
/* spinlock */
@@ -101,36 +120,41 @@ typedef ethr_timeval erts_thr_timeval_t;
__decl_noreturn void __noreturn erts_thr_fatal_error(int, char *);
/* implemented in erl_init.c */
-#ifdef ERTS_ENABLE_LOCK_CHECK
-#define ERTS_REC_MTX_INITER \
- {ETHR_REC_MUTEX_INITER, \
- ERTS_LC_LOCK_INIT(-1,THE_NON_VALUE,ERTS_LC_FLG_LT_MUTEX)}
-#define ERTS_MTX_INITER \
- {ETHR_MUTEX_INITER, \
- ERTS_LC_LOCK_INIT(-1, THE_NON_VALUE, ERTS_LC_FLG_LT_MUTEX)}
-#else
-#define ERTS_REC_MTX_INITER {ETHR_REC_MUTEX_INITER}
-#define ERTS_MTX_INITER {ETHR_MUTEX_INITER}
-#endif
-#define ERTS_CND_INITER ETHR_COND_INITER
#define ERTS_THR_INIT_DATA_DEF_INITER ETHR_INIT_DATA_DEFAULT_INITER
+#define ERTS_THR_LATE_INIT_DATA_DEF_INITER \
+ ETHR_LATE_INIT_DATA_DEFAULT_INITER
#ifdef ETHR_HAVE_ETHR_REC_MUTEX_INIT
# define ERTS_HAVE_REC_MTX_INIT ETHR_HAVE_ETHR_REC_MUTEX_INIT
#endif
-
#else /* #ifdef USE_THREADS */
+#define ERTS_THR_MEMORY_BARRIER
+
#define ERTS_THR_OPTS_DEFAULT_INITER 0
typedef int erts_thr_opts_t;
typedef int erts_thr_init_data_t;
+typedef int erts_thr_late_init_data_t;
typedef int erts_tid_t;
typedef int erts_mtx_t;
typedef int erts_cnd_t;
+#define ERTS_RWMTX_OPT_DEFAULT_INITER {0}
+#define ERTS_RWMTX_TYPE_NORMAL 0
+#define ERTS_RWMTX_TYPE_FREQUENT_READ 0
+#define ERTS_RWMTX_TYPE_EXTREMELY_FREQUENT_READ 0
+#define ERTS_RWMTX_LONG_LIVED 0
+#define ERTS_RWMTX_SHORT_LIVED 0
+#define ERTS_RWMTX_UNKNOWN_LIVED 0
+typedef struct {
+ char type;
+ char lived;
+ int main_spincount;
+ int aux_spincount;
+} erts_rwmtx_opt_t;
typedef int erts_rwmtx_t;
typedef int erts_tsd_key_t;
-typedef int erts_gate_t;
+typedef int erts_tse_t;
typedef long erts_atomic_t;
#if __GNUC__ > 2
typedef struct { } erts_spinlock_t;
@@ -144,7 +168,6 @@ typedef struct {
long tv_nsec;
} erts_thr_timeval_t;
-#define ERTS_REC_MTX_INITER 0
#define ERTS_MTX_INITER 0
#define ERTS_CND_INITER 0
#define ERTS_THR_INIT_DATA_DEF_INITER 0
@@ -154,6 +177,7 @@ typedef struct {
#endif /* #ifdef USE_THREADS */
ERTS_GLB_INLINE void erts_thr_init(erts_thr_init_data_t *id);
+ERTS_GLB_INLINE void erts_thr_late_init(erts_thr_late_init_data_t *id);
ERTS_GLB_INLINE void erts_thr_create(erts_tid_t *tid, void * (*func)(void *),
void *arg, erts_thr_opts_t *opts);
ERTS_GLB_INLINE void erts_thr_join(erts_tid_t tid, void **thr_res);
@@ -162,9 +186,6 @@ ERTS_GLB_INLINE void erts_thr_exit(void *res);
ERTS_GLB_INLINE void erts_thr_install_exit_handler(void (*exit_handler)(void));
ERTS_GLB_INLINE erts_tid_t erts_thr_self(void);
ERTS_GLB_INLINE int erts_equal_tids(erts_tid_t x, erts_tid_t y);
-#ifdef ERTS_HAVE_REC_MTX_INIT
-ERTS_GLB_INLINE void erts_rec_mtx_init(erts_mtx_t *mtx);
-#endif
ERTS_GLB_INLINE void erts_mtx_init_x(erts_mtx_t *mtx, char *name, Eterm extra);
ERTS_GLB_INLINE void erts_mtx_init_x_opt(erts_mtx_t *mtx, char *name, Eterm extra, Uint16 opt);
ERTS_GLB_INLINE void erts_mtx_init_locked_x(erts_mtx_t *mtx,
@@ -173,8 +194,6 @@ ERTS_GLB_INLINE void erts_mtx_init_locked_x(erts_mtx_t *mtx,
ERTS_GLB_INLINE void erts_mtx_init(erts_mtx_t *mtx, char *name);
ERTS_GLB_INLINE void erts_mtx_init_locked(erts_mtx_t *mtx, char *name);
ERTS_GLB_INLINE void erts_mtx_destroy(erts_mtx_t *mtx);
-ERTS_GLB_INLINE void erts_mtx_set_forksafe(erts_mtx_t *mtx);
-ERTS_GLB_INLINE void erts_mtx_unset_forksafe(erts_mtx_t *mtx);
ERTS_GLB_INLINE int erts_mtx_trylock(erts_mtx_t *mtx);
#ifdef ERTS_ENABLE_LOCK_COUNT
ERTS_GLB_INLINE void erts_mtx_lock_x(erts_mtx_t *mtx, char *file, unsigned int line);
@@ -188,9 +207,17 @@ ERTS_GLB_INLINE void erts_cnd_destroy(erts_cnd_t *cnd);
ERTS_GLB_INLINE void erts_cnd_wait(erts_cnd_t *cnd, erts_mtx_t *mtx);
ERTS_GLB_INLINE void erts_cnd_signal(erts_cnd_t *cnd);
ERTS_GLB_INLINE void erts_cnd_broadcast(erts_cnd_t *cnd);
+ERTS_GLB_INLINE void erts_rwmtx_set_reader_group(int no);
+ERTS_GLB_INLINE void erts_rwmtx_init_opt_x(erts_rwmtx_t *rwmtx,
+ erts_rwmtx_opt_t *opt,
+ char *name,
+ Eterm extra);
ERTS_GLB_INLINE void erts_rwmtx_init_x(erts_rwmtx_t *rwmtx,
char *name,
Eterm extra);
+ERTS_GLB_INLINE void erts_rwmtx_init_opt(erts_rwmtx_t *rwmtx,
+ erts_rwmtx_opt_t *opt,
+ char *name);
ERTS_GLB_INLINE void erts_rwmtx_init(erts_rwmtx_t *rwmtx,
char *name);
ERTS_GLB_INLINE void erts_rwmtx_destroy(erts_rwmtx_t *rwmtx);
@@ -224,6 +251,20 @@ ERTS_GLB_INLINE long erts_atomic_cmpxchg(erts_atomic_t *xchgp,
long expected);
ERTS_GLB_INLINE long erts_atomic_bor(erts_atomic_t *var, long mask);
ERTS_GLB_INLINE long erts_atomic_band(erts_atomic_t *var, long mask);
+ERTS_GLB_INLINE long erts_atomic_read_acqb(erts_atomic_t *var);
+ERTS_GLB_INLINE void erts_atomic_set_relb(erts_atomic_t *var, long i);
+ERTS_GLB_INLINE void erts_atomic_dec_relb(erts_atomic_t *decp);
+ERTS_GLB_INLINE long erts_atomic_dectest_relb(erts_atomic_t *decp);
+ERTS_GLB_INLINE long erts_atomic_cmpxchg_acqb(erts_atomic_t *xchgp,
+ long new,
+ long exp);
+ERTS_GLB_INLINE long erts_atomic_cmpxchg_relb(erts_atomic_t *xchgp,
+ long new,
+ long exp);
+ERTS_GLB_INLINE void erts_spinlock_init_x_opt(erts_spinlock_t *lock,
+ char *name,
+ Eterm extra,
+ Uint16 opt);
ERTS_GLB_INLINE void erts_spinlock_init_x(erts_spinlock_t *lock,
char *name,
Eterm extra);
@@ -259,12 +300,16 @@ ERTS_GLB_INLINE void erts_tsd_key_create(erts_tsd_key_t *keyp);
ERTS_GLB_INLINE void erts_tsd_key_delete(erts_tsd_key_t key);
ERTS_GLB_INLINE void erts_tsd_set(erts_tsd_key_t key, void *value);
ERTS_GLB_INLINE void * erts_tsd_get(erts_tsd_key_t key);
-ERTS_GLB_INLINE void erts_gate_init(erts_gate_t *gp);
-ERTS_GLB_INLINE void erts_gate_destroy(erts_gate_t *gp);
-ERTS_GLB_INLINE void erts_gate_close(erts_gate_t *gp);
-ERTS_GLB_INLINE void erts_gate_let_through(erts_gate_t *gp, unsigned no);
-ERTS_GLB_INLINE void erts_gate_wait(erts_gate_t *gp);
-ERTS_GLB_INLINE void erts_gate_swait(erts_gate_t *gp, int spincount);
+ERTS_GLB_INLINE erts_tse_t *erts_tse_fetch(void);
+ERTS_GLB_INLINE void erts_tse_return(erts_tse_t *ep);
+ERTS_GLB_INLINE void erts_tse_set(erts_tse_t *ep);
+ERTS_GLB_INLINE void erts_tse_reset(erts_tse_t *ep);
+ERTS_GLB_INLINE int erts_tse_wait(erts_tse_t *ep);
+ERTS_GLB_INLINE int erts_tse_swait(erts_tse_t *ep, int spincount);
+ERTS_GLB_INLINE int erts_tse_is_tmp(erts_tse_t *ep);
+ERTS_GLB_INLINE void erts_thr_set_main_status(int, int);
+ERTS_GLB_INLINE int erts_thr_get_main_status(void);
+ERTS_GLB_INLINE void erts_thr_yield(void);
#ifdef ETHR_HAVE_ETHR_SIG_FUNCS
#define ERTS_THR_HAVE_SIG_FUNCS 1
@@ -286,15 +331,21 @@ erts_thr_init(erts_thr_init_data_t *id)
}
ERTS_GLB_INLINE void
+erts_thr_late_init(erts_thr_late_init_data_t *id)
+{
+#ifdef USE_THREADS
+ int res = ethr_late_init(id);
+ if (res)
+ erts_thr_fatal_error(res, "complete initialization of thread library");
+#endif
+}
+
+ERTS_GLB_INLINE void
erts_thr_create(erts_tid_t *tid, void * (*func)(void *), void *arg,
erts_thr_opts_t *opts)
{
#ifdef USE_THREADS
-#ifdef ERTS_ENABLE_LOCK_COUNT
- int res = erts_lcnt_thr_create(tid, func, arg, opts);
-#else
int res = ethr_thr_create(tid, func, arg, opts);
-#endif
if (res)
erts_thr_fatal_error(res, "create thread");
#endif
@@ -362,20 +413,6 @@ erts_equal_tids(erts_tid_t x, erts_tid_t y)
#endif
}
-
-#ifdef ERTS_HAVE_REC_MTX_INIT
-ERTS_GLB_INLINE void
-erts_rec_mtx_init(erts_mtx_t *mtx)
-{
-#ifdef USE_THREADS
- int res = ethr_rec_mutex_init(&mtx->mtx);
- if (res)
- erts_thr_fatal_error(res, "initialize recursive mutex");
-#endif
-}
-#endif
-
-
ERTS_GLB_INLINE void
erts_mtx_init_x(erts_mtx_t *mtx, char *name, Eterm extra)
{
@@ -422,9 +459,7 @@ erts_mtx_init_locked_x(erts_mtx_t *mtx, char *name, Eterm extra)
#ifdef ERTS_ENABLE_LOCK_COUNT
erts_lcnt_init_lock_x(&mtx->lcnt, name, ERTS_LCNT_LT_MUTEX, extra);
#endif
- res = ethr_mutex_lock(&mtx->mtx);
- if (res)
- erts_thr_fatal_error(res, "lock mutex");
+ ethr_mutex_lock(&mtx->mtx);
#ifdef ERTS_ENABLE_LOCK_CHECK
erts_lc_trylock(1, &mtx->lc);
#endif
@@ -463,9 +498,7 @@ erts_mtx_init_locked(erts_mtx_t *mtx, char *name)
#ifdef ERTS_ENABLE_LOCK_COUNT
erts_lcnt_init_lock(&mtx->lcnt, name, ERTS_LCNT_LT_MUTEX);
#endif
- res = ethr_mutex_lock(&mtx->mtx);
- if (res)
- erts_thr_fatal_error(res, "lock mutex");
+ ethr_mutex_lock(&mtx->mtx);
#ifdef ERTS_ENABLE_LOCK_CHECK
erts_lc_trylock(1, &mtx->lc);
#endif
@@ -492,26 +525,6 @@ erts_mtx_destroy(erts_mtx_t *mtx)
#endif
}
-ERTS_GLB_INLINE void
-erts_mtx_set_forksafe(erts_mtx_t *mtx)
-{
-#ifdef USE_THREADS
- int res = ethr_mutex_set_forksafe(&mtx->mtx);
- if (res != 0 && res != ENOTSUP)
- erts_thr_fatal_error(res, "set mutex forksafe");
-#endif
-}
-
-ERTS_GLB_INLINE void
-erts_mtx_unset_forksafe(erts_mtx_t *mtx)
-{
-#ifdef USE_THREADS
- int res = ethr_mutex_unset_forksafe(&mtx->mtx);
- if (res != 0 && res != ENOTSUP)
- erts_thr_fatal_error(res, "unset mutex forksafe");
-#endif
-}
-
ERTS_GLB_INLINE int
erts_mtx_trylock(erts_mtx_t *mtx)
{
@@ -531,11 +544,7 @@ erts_mtx_trylock(erts_mtx_t *mtx)
#endif
#ifdef ERTS_ENABLE_LOCK_COUNT
erts_lcnt_trylock(&mtx->lcnt, res);
-#endif
-
- if (res != 0 && res != EBUSY)
- erts_thr_fatal_error(res, "try lock mutex");
-
+#endif
return res;
#else
return 0;
@@ -551,19 +560,16 @@ erts_mtx_lock(erts_mtx_t *mtx)
#endif
{
#ifdef USE_THREADS
- int res;
#ifdef ERTS_ENABLE_LOCK_CHECK
erts_lc_lock(&mtx->lc);
#endif
#ifdef ERTS_ENABLE_LOCK_COUNT
erts_lcnt_lock(&mtx->lcnt);
#endif
- res = ethr_mutex_lock(&mtx->mtx);
+ ethr_mutex_lock(&mtx->mtx);
#ifdef ERTS_ENABLE_LOCK_COUNT
erts_lcnt_lock_post_x(&mtx->lcnt, file, line);
#endif
- if (res)
- erts_thr_fatal_error(res, "lock mutex");
#endif
}
@@ -571,16 +577,13 @@ ERTS_GLB_INLINE void
erts_mtx_unlock(erts_mtx_t *mtx)
{
#ifdef USE_THREADS
- int res;
#ifdef ERTS_ENABLE_LOCK_CHECK
erts_lc_unlock(&mtx->lc);
#endif
#ifdef ERTS_ENABLE_LOCK_COUNT
erts_lcnt_unlock(&mtx->lcnt);
#endif
- res = ethr_mutex_unlock(&mtx->mtx);
- if (res)
- erts_thr_fatal_error(res, "unlock mutex");
+ ethr_mutex_unlock(&mtx->mtx);
#endif
}
@@ -648,9 +651,7 @@ ERTS_GLB_INLINE void
erts_cnd_signal(erts_cnd_t *cnd)
{
#ifdef USE_THREADS
- int res = ethr_cond_signal(cnd);
- if (res)
- erts_thr_fatal_error(res, "signal on condition variable");
+ ethr_cond_signal(cnd);
#endif
}
@@ -659,19 +660,34 @@ ERTS_GLB_INLINE void
erts_cnd_broadcast(erts_cnd_t *cnd)
{
#ifdef USE_THREADS
- int res = ethr_cond_broadcast(cnd);
- if (res)
- erts_thr_fatal_error(res, "broadcast on condition variable");
+ ethr_cond_broadcast(cnd);
#endif
}
/* rwmutex */
ERTS_GLB_INLINE void
-erts_rwmtx_init_x(erts_rwmtx_t *rwmtx, char *name, Eterm extra)
+erts_rwmtx_set_reader_group(int no)
{
#ifdef USE_THREADS
- int res = ethr_rwmutex_init(&rwmtx->rwmtx);
+ int res;
+#ifdef ERTS_ENABLE_LOCK_CHECK
+ erts_lc_check_no_locked_of_type(ERTS_LC_FLG_LT_RWMUTEX);
+#endif
+ res = ethr_rwmutex_set_reader_group(no);
+ if (res != 0)
+ erts_thr_fatal_error(res, "set reader group");
+#endif
+}
+
+ERTS_GLB_INLINE void
+erts_rwmtx_init_opt_x(erts_rwmtx_t *rwmtx,
+ erts_rwmtx_opt_t *opt,
+ char *name,
+ Eterm extra)
+{
+#ifdef USE_THREADS
+ int res = ethr_rwmutex_init_opt(&rwmtx->rwmtx, opt);
if (res != 0)
erts_thr_fatal_error(res, "initialize rwmutex");
#ifdef ERTS_ENABLE_LOCK_CHECK
@@ -684,10 +700,20 @@ erts_rwmtx_init_x(erts_rwmtx_t *rwmtx, char *name, Eterm extra)
}
ERTS_GLB_INLINE void
-erts_rwmtx_init(erts_rwmtx_t *rwmtx, char *name)
+erts_rwmtx_init_x(erts_rwmtx_t *rwmtx,
+ char *name,
+ Eterm extra)
+{
+ erts_rwmtx_init_opt_x(rwmtx, NULL, name, extra);
+}
+
+ERTS_GLB_INLINE void
+erts_rwmtx_init_opt(erts_rwmtx_t *rwmtx,
+ erts_rwmtx_opt_t *opt,
+ char *name)
{
#ifdef USE_THREADS
- int res = ethr_rwmutex_init(&rwmtx->rwmtx);
+ int res = ethr_rwmutex_init_opt(&rwmtx->rwmtx, opt);
if (res != 0)
erts_thr_fatal_error(res, "initialize rwmutex");
#ifdef ERTS_ENABLE_LOCK_CHECK
@@ -700,6 +726,12 @@ erts_rwmtx_init(erts_rwmtx_t *rwmtx, char *name)
}
ERTS_GLB_INLINE void
+erts_rwmtx_init(erts_rwmtx_t *rwmtx, char *name)
+{
+ erts_rwmtx_init_opt(rwmtx, NULL, name);
+}
+
+ERTS_GLB_INLINE void
erts_rwmtx_destroy(erts_rwmtx_t *rwmtx)
{
#ifdef USE_THREADS
@@ -736,9 +768,6 @@ erts_rwmtx_tryrlock(erts_rwmtx_t *rwmtx)
#ifdef ERTS_ENABLE_LOCK_COUNT
erts_lcnt_trylock_opt(&rwmtx->lcnt, res, ERTS_LCNT_LO_READ);
#endif
-
- if (res != 0 && res != EBUSY)
- erts_thr_fatal_error(res, "try read lock rwmutex");
return res;
#else
@@ -754,19 +783,16 @@ erts_rwmtx_rlock(erts_rwmtx_t *rwmtx)
#endif
{
#ifdef USE_THREADS
- int res;
#ifdef ERTS_ENABLE_LOCK_CHECK
erts_lc_lock_flg(&rwmtx->lc, ERTS_LC_FLG_LO_READ);
#endif
#ifdef ERTS_ENABLE_LOCK_COUNT
erts_lcnt_lock_opt(&rwmtx->lcnt, ERTS_LCNT_LO_READ);
#endif
- res = ethr_rwmutex_rlock(&rwmtx->rwmtx);
+ ethr_rwmutex_rlock(&rwmtx->rwmtx);
#ifdef ERTS_ENABLE_LOCK_COUNT
erts_lcnt_lock_post_x(&rwmtx->lcnt, file, line);
#endif
- if (res != 0)
- erts_thr_fatal_error(res, "read lock rwmutex");
#endif
}
@@ -774,16 +800,13 @@ ERTS_GLB_INLINE void
erts_rwmtx_runlock(erts_rwmtx_t *rwmtx)
{
#ifdef USE_THREADS
- int res;
#ifdef ERTS_ENABLE_LOCK_CHECK
erts_lc_unlock_flg(&rwmtx->lc, ERTS_LC_FLG_LO_READ);
#endif
#ifdef ERTS_ENABLE_LOCK_COUNT
erts_lcnt_unlock_opt(&rwmtx->lcnt, ERTS_LCNT_LO_READ);
#endif
- res = ethr_rwmutex_runlock(&rwmtx->rwmtx);
- if (res != 0)
- erts_thr_fatal_error(res, "read unlock rwmutex");
+ ethr_rwmutex_runlock(&rwmtx->rwmtx);
#endif
}
@@ -808,9 +831,6 @@ erts_rwmtx_tryrwlock(erts_rwmtx_t *rwmtx)
#ifdef ERTS_ENABLE_LOCK_COUNT
erts_lcnt_trylock_opt(&rwmtx->lcnt, res, ERTS_LCNT_LO_READ_WRITE);
#endif
-
- if (res != 0 && res != EBUSY)
- erts_thr_fatal_error(res, "try write lock rwmutex");
return res;
#else
@@ -826,19 +846,16 @@ erts_rwmtx_rwlock(erts_rwmtx_t *rwmtx)
#endif
{
#ifdef USE_THREADS
- int res;
#ifdef ERTS_ENABLE_LOCK_CHECK
erts_lc_lock_flg(&rwmtx->lc, ERTS_LC_FLG_LO_READ_WRITE);
#endif
#ifdef ERTS_ENABLE_LOCK_COUNT
erts_lcnt_lock_opt(&rwmtx->lcnt, ERTS_LCNT_LO_READ_WRITE);
#endif
- res = ethr_rwmutex_rwlock(&rwmtx->rwmtx);
+ ethr_rwmutex_rwlock(&rwmtx->rwmtx);
#ifdef ERTS_ENABLE_LOCK_COUNT
erts_lcnt_lock_post_x(&rwmtx->lcnt, file, line);
#endif
- if (res != 0)
- erts_thr_fatal_error(res, "write lock rwmutex");
#endif
}
@@ -846,16 +863,13 @@ ERTS_GLB_INLINE void
erts_rwmtx_rwunlock(erts_rwmtx_t *rwmtx)
{
#ifdef USE_THREADS
- int res;
#ifdef ERTS_ENABLE_LOCK_CHECK
erts_lc_unlock_flg(&rwmtx->lc, ERTS_LC_FLG_LO_READ_WRITE);
#endif
#ifdef ERTS_ENABLE_LOCK_COUNT
erts_lcnt_unlock_opt(&rwmtx->lcnt, ERTS_LCNT_LO_READ_WRITE);
#endif
- res = ethr_rwmutex_rwunlock(&rwmtx->rwmtx);
- if (res != 0)
- erts_thr_fatal_error(res, "write unlock rwmutex");
+ ethr_rwmutex_rwunlock(&rwmtx->rwmtx);
#endif
}
@@ -917,9 +931,7 @@ ERTS_GLB_INLINE void
erts_atomic_init(erts_atomic_t *var, long i)
{
#ifdef USE_THREADS
- int res = ethr_atomic_init(var, i);
- if (res)
- erts_thr_fatal_error(res, "perform atomic init");
+ ethr_atomic_init(var, i);
#else
*var = i;
#endif
@@ -929,9 +941,7 @@ ERTS_GLB_INLINE void
erts_atomic_set(erts_atomic_t *var, long i)
{
#ifdef USE_THREADS
- int res = ethr_atomic_set(var, i);
- if (res)
- erts_thr_fatal_error(res, "perform atomic set");
+ ethr_atomic_set(var, i);
#else
*var = i;
#endif
@@ -941,11 +951,7 @@ ERTS_GLB_INLINE long
erts_atomic_read(erts_atomic_t *var)
{
#ifdef USE_THREADS
- long i;
- int res = ethr_atomic_read(var, &i);
- if (res)
- erts_thr_fatal_error(res, "perform atomic read");
- return i;
+ return ethr_atomic_read(var);
#else
return *var;
#endif
@@ -955,11 +961,7 @@ ERTS_GLB_INLINE long
erts_atomic_inctest(erts_atomic_t *incp)
{
#ifdef USE_THREADS
- long test;
- int res = ethr_atomic_inctest(incp, &test);
- if (res)
- erts_thr_fatal_error(res, "perform atomic increment and test");
- return test;
+ return ethr_atomic_inc_read(incp);
#else
return ++(*incp);
#endif
@@ -969,11 +971,7 @@ ERTS_GLB_INLINE long
erts_atomic_dectest(erts_atomic_t *decp)
{
#ifdef USE_THREADS
- long test;
- int res = ethr_atomic_dectest(decp, &test);
- if (res)
- erts_thr_fatal_error(res, "perform atomic decrement and test");
- return test;
+ return ethr_atomic_dec_read(decp);
#else
return --(*decp);
#endif
@@ -983,9 +981,7 @@ ERTS_GLB_INLINE void
erts_atomic_inc(erts_atomic_t *incp)
{
#ifdef USE_THREADS
- int res = ethr_atomic_inc(incp);
- if (res)
- erts_thr_fatal_error(res, "perform atomic increment");
+ ethr_atomic_inc(incp);
#else
++(*incp);
#endif
@@ -995,9 +991,7 @@ ERTS_GLB_INLINE void
erts_atomic_dec(erts_atomic_t *decp)
{
#ifdef USE_THREADS
- int res = ethr_atomic_dec(decp);
- if (res)
- erts_thr_fatal_error(res, "perform atomic decrement");
+ ethr_atomic_dec(decp);
#else
--(*decp);
#endif
@@ -1007,11 +1001,7 @@ ERTS_GLB_INLINE long
erts_atomic_addtest(erts_atomic_t *addp, long i)
{
#ifdef USE_THREADS
- long test;
- int res = ethr_atomic_addtest(addp, i, &test);
- if (res)
- erts_thr_fatal_error(res, "perform atomic addition and test");
- return test;
+ return ethr_atomic_add_read(addp, i);
#else
return *addp += i;
#endif
@@ -1021,9 +1011,7 @@ ERTS_GLB_INLINE void
erts_atomic_add(erts_atomic_t *addp, long i)
{
#ifdef USE_THREADS
- int res = ethr_atomic_add(addp, i);
- if (res)
- erts_thr_fatal_error(res, "perform atomic addition");
+ ethr_atomic_add(addp, i);
#else
*addp += i;
#endif
@@ -1034,9 +1022,7 @@ erts_atomic_xchg(erts_atomic_t *xchgp, long new)
{
long old;
#ifdef USE_THREADS
- int res = ethr_atomic_xchg(xchgp, new, &old);
- if (res)
- erts_thr_fatal_error(res, "perform atomic exchange");
+ return ethr_atomic_xchg(xchgp, new);
#else
old = *xchgp;
*xchgp = new;
@@ -1048,11 +1034,7 @@ ERTS_GLB_INLINE long
erts_atomic_cmpxchg(erts_atomic_t *xchgp, long new, long expected)
{
#ifdef USE_THREADS
- long old;
- int res = ethr_atomic_cmpxchg(xchgp, new, expected, &old);
- if (ERTS_UNLIKELY(res != 0))
- erts_thr_fatal_error(res, "perform atomic exchange");
- return old;
+ return ethr_atomic_cmpxchg(xchgp, new, expected);
#else
long old = *xchgp;
if (old == expected)
@@ -1064,31 +1046,95 @@ erts_atomic_cmpxchg(erts_atomic_t *xchgp, long new, long expected)
ERTS_GLB_INLINE long
erts_atomic_bor(erts_atomic_t *var, long mask)
{
- long old;
#ifdef USE_THREADS
- int res = ethr_atomic_or_old(var, mask, &old);
- if (res != 0)
- erts_thr_fatal_error(res, "perform atomic bitwise or");
+ return ethr_atomic_read_bor(var, mask);
#else
+ long old;
old = *var;
*var |= mask;
-#endif
return old;
+#endif
}
ERTS_GLB_INLINE long
erts_atomic_band(erts_atomic_t *var, long mask)
{
- long old;
#ifdef USE_THREADS
- int res = ethr_atomic_and_old(var, mask, &old);
- if (res != 0)
- erts_thr_fatal_error(res, "perform atomic bitwise and");
+ return ethr_atomic_read_band(var, mask);
#else
+ long old;
old = *var;
*var &= mask;
+ return old;
#endif
+}
+
+ERTS_GLB_INLINE long
+erts_atomic_read_acqb(erts_atomic_t *var)
+{
+#ifdef USE_THREADS
+ return ethr_atomic_read_acqb(var);
+#else
+ return *var;
+#endif
+}
+
+ERTS_GLB_INLINE void
+erts_atomic_set_relb(erts_atomic_t *var, long i)
+{
+#ifdef USE_THREADS
+ ethr_atomic_set_relb(var, i);
+#else
+ *var = i;
+#endif
+}
+
+ERTS_GLB_INLINE void
+erts_atomic_dec_relb(erts_atomic_t *decp)
+{
+#ifdef USE_THREADS
+ ethr_atomic_dec_relb(decp);
+#else
+ --(*decp);
+#endif
+}
+
+ERTS_GLB_INLINE long
+erts_atomic_dectest_relb(erts_atomic_t *decp)
+{
+#ifdef USE_THREADS
+ return ethr_atomic_dec_read_relb(decp);
+#else
+ return --(*decp);
+#endif
+}
+
+ERTS_GLB_INLINE long erts_atomic_cmpxchg_acqb(erts_atomic_t *xchgp,
+ long new,
+ long exp)
+{
+#ifdef USE_THREADS
+ return ethr_atomic_cmpxchg_acqb(xchgp, new, exp);
+#else
+ long old = *xchgp;
+ if (old == exp)
+ *xchgp = new;
+ return old;
+#endif
+}
+
+ERTS_GLB_INLINE long erts_atomic_cmpxchg_relb(erts_atomic_t *xchgp,
+ long new,
+ long exp)
+{
+#ifdef USE_THREADS
+ return ethr_atomic_cmpxchg_relb(xchgp, new, exp);
+#else
+ long old = *xchgp;
+ if (old == exp)
+ *xchgp = new;
return old;
+#endif
}
/* spinlock */
@@ -1112,6 +1158,26 @@ erts_spinlock_init_x(erts_spinlock_t *lock, char *name, Eterm extra)
}
ERTS_GLB_INLINE void
+erts_spinlock_init_x_opt(erts_spinlock_t *lock, char *name, Eterm extra,
+ Uint16 opt)
+{
+#ifdef USE_THREADS
+ int res = ethr_spinlock_init(&lock->slck);
+ if (res)
+ erts_thr_fatal_error(res, "init spinlock");
+#ifdef ERTS_ENABLE_LOCK_CHECK
+ erts_lc_init_lock_x(&lock->lc, name, ERTS_LC_FLG_LT_SPINLOCK, extra);
+#endif
+#ifdef ERTS_ENABLE_LOCK_COUNT
+ erts_lcnt_init_lock_x(&lock->lcnt, name, ERTS_LCNT_LT_SPINLOCK|opt, extra);
+#endif
+#else
+ (void)lock;
+#endif
+}
+
+
+ERTS_GLB_INLINE void
erts_spinlock_init(erts_spinlock_t *lock, char *name)
{
#ifdef USE_THREADS
@@ -1152,16 +1218,13 @@ ERTS_GLB_INLINE void
erts_spin_unlock(erts_spinlock_t *lock)
{
#ifdef USE_THREADS
- int res;
#ifdef ERTS_ENABLE_LOCK_CHECK
erts_lc_unlock(&lock->lc);
#endif
#ifdef ERTS_ENABLE_LOCK_COUNT
erts_lcnt_unlock(&lock->lcnt);
#endif
- res = ethr_spin_unlock(&lock->slck);
- if (res)
- erts_thr_fatal_error(res, "release spin lock");
+ ethr_spin_unlock(&lock->slck);
#else
(void)lock;
#endif
@@ -1175,19 +1238,16 @@ erts_spin_lock(erts_spinlock_t *lock)
#endif
{
#ifdef USE_THREADS
- int res;
#ifdef ERTS_ENABLE_LOCK_CHECK
erts_lc_lock(&lock->lc);
#endif
#ifdef ERTS_ENABLE_LOCK_COUNT
erts_lcnt_lock(&lock->lcnt);
#endif
- res = ethr_spin_lock(&lock->slck);
+ ethr_spin_lock(&lock->slck);
#ifdef ERTS_ENABLE_LOCK_COUNT
erts_lcnt_lock_post_x(&lock->lcnt, file, line);
#endif
- if (res)
- erts_thr_fatal_error(res, "take spin lock");
#else
(void)lock;
#endif
@@ -1268,16 +1328,13 @@ ERTS_GLB_INLINE void
erts_read_unlock(erts_rwlock_t *lock)
{
#ifdef USE_THREADS
- int res;
#ifdef ERTS_ENABLE_LOCK_CHECK
erts_lc_unlock_flg(&lock->lc, ERTS_LC_FLG_LO_READ);
#endif
#ifdef ERTS_ENABLE_LOCK_COUNT
erts_lcnt_unlock_opt(&lock->lcnt, ERTS_LCNT_LO_READ);
#endif
- res = ethr_read_unlock(&lock->rwlck);
- if (res)
- erts_thr_fatal_error(res, "release read lock");
+ ethr_read_unlock(&lock->rwlck);
#else
(void)lock;
#endif
@@ -1291,19 +1348,16 @@ erts_read_lock(erts_rwlock_t *lock)
#endif
{
#ifdef USE_THREADS
- int res;
#ifdef ERTS_ENABLE_LOCK_CHECK
erts_lc_lock_flg(&lock->lc, ERTS_LC_FLG_LO_READ);
#endif
#ifdef ERTS_ENABLE_LOCK_COUNT
erts_lcnt_lock_opt(&lock->lcnt, ERTS_LCNT_LO_READ);
#endif
- res = ethr_read_lock(&lock->rwlck);
+ ethr_read_lock(&lock->rwlck);
#ifdef ERTS_ENABLE_LOCK_COUNT
erts_lcnt_lock_post_x(&lock->lcnt, file, line);
#endif
- if (res)
- erts_thr_fatal_error(res, "take read lock");
#else
(void)lock;
#endif
@@ -1313,16 +1367,13 @@ ERTS_GLB_INLINE void
erts_write_unlock(erts_rwlock_t *lock)
{
#ifdef USE_THREADS
- int res;
#ifdef ERTS_ENABLE_LOCK_CHECK
erts_lc_unlock_flg(&lock->lc, ERTS_LC_FLG_LO_READ_WRITE);
#endif
#ifdef ERTS_ENABLE_LOCK_COUNT
erts_lcnt_unlock_opt(&lock->lcnt, ERTS_LCNT_LO_READ_WRITE);
#endif
- res = ethr_write_unlock(&lock->rwlck);
- if (res)
- erts_thr_fatal_error(res, "release write lock");
+ ethr_write_unlock(&lock->rwlck);
#else
(void)lock;
#endif
@@ -1336,19 +1387,16 @@ erts_write_lock(erts_rwlock_t *lock)
#endif
{
#ifdef USE_THREADS
- int res;
#ifdef ERTS_ENABLE_LOCK_CHECK
erts_lc_lock_flg(&lock->lc, ERTS_LC_FLG_LO_READ_WRITE);
#endif
#ifdef ERTS_ENABLE_LOCK_COUNT
erts_lcnt_lock_opt(&lock->lcnt, ERTS_LCNT_LO_READ_WRITE);
#endif
- res = ethr_write_lock(&lock->rwlck);
+ ethr_write_lock(&lock->rwlck);
#ifdef ERTS_ENABLE_LOCK_COUNT
erts_lcnt_lock_post_x(&lock->lcnt, file, line);
#endif
- if (res)
- erts_thr_fatal_error(res, "take write lock");
#else
(void)lock;
#endif
@@ -1432,66 +1480,95 @@ erts_tsd_get(erts_tsd_key_t key)
#endif
}
-ERTS_GLB_INLINE void
-erts_gate_init(erts_gate_t *gp)
+ERTS_GLB_INLINE erts_tse_t *erts_tse_fetch(void)
{
#ifdef USE_THREADS
- int res = ethr_gate_init((ethr_gate *) gp);
- if (res != 0)
- erts_thr_fatal_error(res, "initialize gate");
+ return (erts_tse_t *) ethr_get_ts_event();
+#else
+ return (erts_tse_t *) NULL;
#endif
}
-ERTS_GLB_INLINE void
-erts_gate_destroy(erts_gate_t *gp)
+ERTS_GLB_INLINE void erts_tse_return(erts_tse_t *ep)
{
#ifdef USE_THREADS
- int res = ethr_gate_destroy((ethr_gate *) gp);
- if (res != 0)
- erts_thr_fatal_error(res, "destroy gate");
+ ethr_leave_ts_event(ep);
#endif
}
-ERTS_GLB_INLINE void
-erts_gate_close(erts_gate_t *gp)
+ERTS_GLB_INLINE void erts_tse_set(erts_tse_t *ep)
{
#ifdef USE_THREADS
- int res = ethr_gate_close((ethr_gate *) gp);
- if (res != 0)
- erts_thr_fatal_error(res, "close gate");
+ ethr_event_set(&((ethr_ts_event *) ep)->event);
#endif
}
-ERTS_GLB_INLINE void
-erts_gate_let_through(erts_gate_t *gp, unsigned no)
+ERTS_GLB_INLINE void erts_tse_reset(erts_tse_t *ep)
{
#ifdef USE_THREADS
- int res = ethr_gate_let_through((ethr_gate *) gp, no);
- if (res != 0)
- erts_thr_fatal_error(res, "let through gate");
+ ethr_event_reset(&((ethr_ts_event *) ep)->event);
#endif
}
-ERTS_GLB_INLINE void
-erts_gate_wait(erts_gate_t *gp)
+ERTS_GLB_INLINE int erts_tse_wait(erts_tse_t *ep)
{
#ifdef USE_THREADS
- int res = ethr_gate_wait((ethr_gate *) gp);
+ return ethr_event_wait(&((ethr_ts_event *) ep)->event);
+#else
+ return ENOTSUP;
+#endif
+}
+
+ERTS_GLB_INLINE int erts_tse_swait(erts_tse_t *ep, int spincount)
+{
+#ifdef USE_THREADS
+ return ethr_event_swait(&((ethr_ts_event *) ep)->event, spincount);
+#else
+ return ENOTSUP;
+#endif
+}
+
+ERTS_GLB_INLINE int erts_tse_is_tmp(erts_tse_t *ep)
+{
+#ifdef USE_THREADS
+ return (ep->iflgs & ETHR_TS_EV_TMP) == ETHR_TS_EV_TMP;
+#else
+ return 0;
+#endif
+}
+
+ERTS_GLB_INLINE void erts_thr_set_main_status(int on, int no)
+{
+#ifdef USE_THREADS
+ int res = ethr_set_main_thr_status(on, no);
if (res != 0)
- erts_thr_fatal_error(res, "wait on gate");
+ erts_thr_fatal_error(res, "set thread main status");
#endif
}
-ERTS_GLB_INLINE void
-erts_gate_swait(erts_gate_t *gp, int spincount)
+ERTS_GLB_INLINE int erts_thr_get_main_status(void)
{
#ifdef USE_THREADS
- int res = ethr_gate_swait((ethr_gate *) gp, spincount);
+ int main_status;
+ int res = ethr_get_main_thr_status(&main_status);
if (res != 0)
- erts_thr_fatal_error(res, "swait on gate");
+ erts_thr_fatal_error(res, "get thread main status");
+ return main_status;
+#else
+ return 1;
#endif
}
+ERTS_GLB_INLINE void erts_thr_yield(void)
+{
+#ifdef USE_THREADS
+ int res = ETHR_YIELD();
+ if (res != 0)
+ erts_thr_fatal_error(res, "yield");
+#endif
+}
+
+
#ifdef ETHR_HAVE_ETHR_SIG_FUNCS
ERTS_GLB_INLINE void
diff --git a/erts/emulator/beam/erl_time_sup.c b/erts/emulator/beam/erl_time_sup.c
index c15f85f8f1..7b8706ea13 100644
--- a/erts/emulator/beam/erl_time_sup.c
+++ b/erts/emulator/beam/erl_time_sup.c
@@ -650,6 +650,22 @@ local_to_univ(Sint *year, Sint *month, Sint *day,
t.tm_sec = *second;
t.tm_isdst = isdst;
the_clock = mktime(&t);
+ if (the_clock == -1) {
+ if (isdst) {
+ /* If this is a timezone without DST and the OS (correctly)
+ refuses to give us a DST time, we simulate the Linux/Solaris
+ behaviour of giving the same data as if is_dst was not set. */
+ t.tm_isdst = 0;
+ the_clock = mktime(&t);
+ if (the_clock == -1) {
+ /* Failed anyway, something else is bad - will be a badarg */
+ return 0;
+ }
+ } else {
+ /* Something else is the matter, badarg. */
+ return 0;
+ }
+ }
#ifdef HAVE_GMTIME_R
gmtime_r(&the_clock, (tm = &tmbuf));
#else
@@ -663,6 +679,10 @@ local_to_univ(Sint *year, Sint *month, Sint *day,
*second = tm->tm_sec;
return 1;
}
+#if defined(HAVE_POSIX2TIME) && defined(HAVE_DECL_POSIX2TIME) && \
+ !HAVE_DECL_POSIX2TIME
+extern time_t posix2time(time_t);
+#endif
int
univ_to_local(Sint *year, Sint *month, Sint *day,
diff --git a/erts/emulator/beam/erl_trace.c b/erts/emulator/beam/erl_trace.c
index 2842c2361a..3043bb1e8c 100644
--- a/erts/emulator/beam/erl_trace.c
+++ b/erts/emulator/beam/erl_trace.c
@@ -43,8 +43,9 @@
#undef DEBUG_PRINTOUTS
#endif
-extern Eterm beam_return_to_trace[1]; /* OpCode(i_return_to_trace) */
-extern Eterm beam_return_trace[1]; /* OpCode(i_return_trace) */
+extern Eterm beam_return_to_trace[1]; /* OpCode(i_return_to_trace) */
+extern Eterm beam_return_trace[1]; /* OpCode(i_return_trace) */
+extern Eterm beam_return_time_trace[1]; /* OpCode(i_return_time_trace) */
/* Pseudo export entries. Never filled in with data, only used to
yield unique pointers of the correct type. */
@@ -397,11 +398,13 @@ WRITE_SYS_MSG_TO_PORT(Eterm unused_to,
*/
static void
do_send_schedfix_to_port(Port *trace_port, Eterm pid, Eterm timestamp) {
- Eterm local_heap[4+5+5];
+#define LOCAL_HEAP_SIZE (4+5+5)
+ DeclareTmpHeapNoproc(local_heap,LOCAL_HEAP_SIZE);
Eterm message;
Eterm *hp;
Eterm mfarity;
+ UseTmpHeapNoproc(LOCAL_HEAP_SIZE);
ASSERT(is_pid(pid));
ASSERT(is_tuple(timestamp));
ASSERT(*tuple_val(timestamp) == make_arityval(3));
@@ -426,6 +429,8 @@ do_send_schedfix_to_port(Port *trace_port, Eterm pid, Eterm timestamp) {
pid,
SYS_MSG_TYPE_UNDEFINED,
message);
+ UnUseTmpHeapNoproc(LOCAL_HEAP_SIZE);
+#undef LOCAL_HEAP_SIZE
}
#endif
@@ -442,7 +447,9 @@ send_to_port(Process *c_p, Eterm message,
Eterm *tracer_pid, Uint *tracee_flags) {
Port* trace_port;
#ifndef ERTS_SMP
- Eterm ts, local_heap[4], *hp;
+#define LOCAL_HEAP_SIZE (4)
+ Eterm ts, *hp;
+ DeclareTmpHeapNoproc(local_heap,LOCAL_HEAP_SIZE);
#endif
ASSERT(is_internal_port(*tracer_pid));
@@ -486,6 +493,8 @@ send_to_port(Process *c_p, Eterm message,
* (e.g. getting_linked) need not be the current process. That other
* process might not have timestamps enabled.
*/
+ UseTmpHeapNoproc(LOCAL_HEAP_SIZE);
+
if (*tracee_flags & F_TIMESTAMP) {
ASSERT(is_tuple(message));
hp = tuple_val(message);
@@ -522,6 +531,8 @@ send_to_port(Process *c_p, Eterm message,
*/
do_send_schedfix_to_port(trace_port, c_p->id, ts);
}
+ UnUseTmpHeapNoproc(LOCAL_HEAP_SIZE);
+#undef LOCAL_HEAP_SIZE
#endif
}
@@ -589,7 +600,10 @@ seq_trace_send_to_port(Process *c_p,
{
Port* trace_port;
#ifndef ERTS_SMP
- Eterm ts, local_heap[4], *hp;
+ Eterm ts, *hp;
+#define LOCAL_HEAP_SIZE (4)
+ DeclareTmpHeapNoproc(local_heap,LOCAL_HEAP_SIZE);
+ UseTmpHeapNoproc(LOCAL_HEAP_SIZE);
#endif
ASSERT(is_internal_port(seq_tracer));
@@ -607,6 +621,9 @@ seq_trace_send_to_port(Process *c_p,
if (INVALID_TRACER_PORT(trace_port, seq_tracer)) {
invalid_tracer_port:
system_seq_tracer = am_false;
+#ifndef ERTS_SMP
+ UnUseTmpHeapNoproc(LOCAL_HEAP_SIZE);
+#endif
return;
}
@@ -620,6 +637,7 @@ seq_trace_send_to_port(Process *c_p,
message);
#ifndef ERTS_SMP
+ UnUseTmpHeapNoproc(LOCAL_HEAP_SIZE);
return;
}
/* Make a fake schedule only if the current process is traced
@@ -660,6 +678,8 @@ seq_trace_send_to_port(Process *c_p,
*/
do_send_schedfix_to_port(trace_port, c_p->id, ts);
}
+ UnUseTmpHeapNoproc(LOCAL_HEAP_SIZE);
+#undef LOCAL_HEAP_SIZE
#endif
}
@@ -719,7 +739,8 @@ send_to_tracer(Process *tracee,
static void
trace_sched_aux(Process *p, Eterm what, int never_fake_sched)
{
- Eterm local_heap[5+4+1+TS_HEAP_WORDS];
+#define LOCAL_HEAP_SIZE (5+4+1+TS_HEAP_WORDS)
+ DeclareTmpHeap(local_heap,LOCAL_HEAP_SIZE,p);
Eterm tmp, mess, *hp;
ErlHeapFragment *bp = NULL;
ErlOffHeap *off_heap;
@@ -768,8 +789,10 @@ trace_sched_aux(Process *p, Eterm what, int never_fake_sched)
curr_func = p->current != NULL;
}
+ UseTmpHeap(LOCAL_HEAP_SIZE,p);
+
if (to_port)
- hp = &local_heap[0];
+ hp = local_heap;
else {
Uint size = 5;
if (curr_func)
@@ -802,6 +825,8 @@ trace_sched_aux(Process *p, Eterm what, int never_fake_sched)
}
send_to_tracer(p, tracer_ref, mess, &hp, bp, no_fake_sched);
+ UnUseTmpHeap(LOCAL_HEAP_SIZE,p);
+#undef LOCAL_HEAP_SIZE
}
/* Send {trace_ts, Pid, What, {Mod, Func, Arity}, Timestamp}
@@ -848,7 +873,10 @@ trace_send(Process *p, Eterm to, Eterm msg)
}
if (is_internal_port(p->tracer_proc)) {
- Eterm local_heap[11];
+#define LOCAL_HEAP_SIZE (11)
+ DeclareTmpHeapNoproc(local_heap,LOCAL_HEAP_SIZE);
+ UseTmpHeapNoproc(LOCAL_HEAP_SIZE);
+
hp = local_heap;
mess = TUPLE5(hp, am_trace, p->id, operation, msg, to);
hp += 6;
@@ -857,6 +885,8 @@ trace_send(Process *p, Eterm to, Eterm msg)
hp = patch_ts(mess, hp);
}
send_to_port(p, mess, &p->tracer_proc, &p->trace_flags);
+ UnUseTmpHeapNoproc(LOCAL_HEAP_SIZE);
+#undef LOCAL_HEAP_SIZE
erts_smp_mtx_unlock(&smq_mtx);
} else {
Uint need;
@@ -908,7 +938,10 @@ trace_receive(Process *rp, Eterm msg)
Eterm* hp;
if (is_internal_port(rp->tracer_proc)) {
- Eterm local_heap[10];
+#define LOCAL_HEAP_SIZE (10)
+ DeclareTmpHeapNoproc(local_heap,LOCAL_HEAP_SIZE);
+ UseTmpHeapNoproc(LOCAL_HEAP_SIZE);
+
hp = local_heap;
mess = TUPLE4(hp, am_trace, rp->id, am_receive, msg);
hp += 5;
@@ -917,6 +950,8 @@ trace_receive(Process *rp, Eterm msg)
hp = patch_ts(mess, hp);
}
send_to_port(rp, mess, &rp->tracer_proc, &rp->trace_flags);
+ UnUseTmpHeapNoproc(LOCAL_HEAP_SIZE);
+#undef LOCAL_HEAP_SIZE
erts_smp_mtx_unlock(&smq_mtx);
} else {
Uint hsz;
@@ -1018,7 +1053,10 @@ seq_trace_output_generic(Eterm token, Eterm msg, Uint type,
}
if (is_internal_port(seq_tracer)) {
- Eterm local_heap[64];
+#define LOCAL_HEAP_SIZE (64)
+ DeclareTmpHeapNoproc(local_heap,LOCAL_HEAP_SIZE);
+ UseTmpHeapNoproc(LOCAL_HEAP_SIZE);
+
hp = local_heap;
label = SEQ_TRACE_T_LABEL(token);
lastcnt_serial = TUPLE2(hp, SEQ_TRACE_T_LASTCNT(token),
@@ -1043,6 +1081,8 @@ seq_trace_output_generic(Eterm token, Eterm msg, Uint type,
mess = TUPLE4(hp, am_seq_trace, label, mess, ts);
seq_trace_send_to_port(process, seq_tracer, mess, ts);
}
+ UnUseTmpHeapNoproc(LOCAL_HEAP_SIZE);
+#undef LOCAL_HEAP_SIZE
erts_smp_mtx_unlock(&smq_mtx);
} else {
#ifndef ERTS_SMP
@@ -1143,14 +1183,18 @@ seq_trace_output_generic(Eterm token, Eterm msg, Uint type,
* or {trace, Pid, return_to, {Mod, Func, Arity}}
*/
void
-erts_trace_return_to(Process *p, Uint *pc)
+erts_trace_return_to(Process *p, BeamInstr *pc)
{
+#define LOCAL_HEAP_SIZE (4+5+5)
Eterm* hp;
Eterm mfa;
Eterm mess;
- Eterm local_heap[4+5+5];
+ DeclareTmpHeapNoproc(local_heap,LOCAL_HEAP_SIZE);
+
+ BeamInstr *code_ptr = find_function_from_pc(pc);
+
- Eterm *code_ptr = find_function_from_pc(pc);
+ UseTmpHeapNoproc(LOCAL_HEAP_SIZE);
hp = local_heap;
@@ -1196,6 +1240,8 @@ erts_trace_return_to(Process *p, Uint *pc)
mess = copy_struct(mess, size, &hp, off_heap);
ERTS_ENQ_TRACE_MSG(p->id, tracer_ref, mess, bp);
}
+ UnUseTmpHeapNoproc(LOCAL_HEAP_SIZE);
+#undef LOCAL_HEAP_SIZE
erts_smp_mtx_unlock(&smq_mtx);
}
@@ -1204,7 +1250,7 @@ erts_trace_return_to(Process *p, Uint *pc)
* or {trace, Pid, return_from, {Mod, Name, Arity}, Retval}
*/
void
-erts_trace_return(Process* p, Eterm* fi, Eterm retval, Eterm *tracer_pid)
+erts_trace_return(Process* p, BeamInstr* fi, Eterm retval, Eterm *tracer_pid)
{
Eterm* hp;
Eterm mfa;
@@ -1262,7 +1308,9 @@ erts_trace_return(Process* p, Eterm* fi, Eterm retval, Eterm *tracer_pid)
arity = fi[2];
if (is_internal_port(*tracer_pid)) {
- Eterm local_heap[4+6+5];
+#define LOCAL_HEAP_SIZE (4+6+5)
+ DeclareTmpHeapNoproc(local_heap,LOCAL_HEAP_SIZE);
+ UseTmpHeapNoproc(LOCAL_HEAP_SIZE);
hp = local_heap;
mfa = TUPLE3(hp, mod, name, make_small(arity));
hp += 4;
@@ -1273,6 +1321,8 @@ erts_trace_return(Process* p, Eterm* fi, Eterm retval, Eterm *tracer_pid)
hp = patch_ts(mess, hp);
}
send_to_port(p, mess, tracer_pid, tracee_flags);
+ UnUseTmpHeapNoproc(LOCAL_HEAP_SIZE);
+#undef LOCAL_HEAP_SIZE
erts_smp_mtx_unlock(&smq_mtx);
} else {
ErlHeapFragment *bp;
@@ -1331,7 +1381,7 @@ erts_trace_return(Process* p, Eterm* fi, Eterm retval, Eterm *tracer_pid)
* Where Class is atomic but Value is any term.
*/
void
-erts_trace_exception(Process* p, Eterm mfa[3], Eterm class, Eterm value,
+erts_trace_exception(Process* p, BeamInstr mfa[3], Eterm class, Eterm value,
Eterm *tracer_pid)
{
Eterm* hp;
@@ -1385,21 +1435,26 @@ erts_trace_exception(Process* p, Eterm mfa[3], Eterm class, Eterm value,
}
if (is_internal_port(*tracer_pid)) {
- Eterm local_heap[4+3+6+5];
+#define LOCAL_HEAP_SIZE (4+3+6+5)
+ DeclareTmpHeapNoproc(local_heap,LOCAL_HEAP_SIZE);
+ UseTmpHeapNoproc(LOCAL_HEAP_SIZE);
+
hp = local_heap;
- mfa_tuple = TUPLE3(hp, mfa[0], mfa[1], make_small(mfa[2]));
+ mfa_tuple = TUPLE3(hp, (Eterm) mfa[0], (Eterm) mfa[1], make_small((Eterm)mfa[2]));
hp += 4;
cv = TUPLE2(hp, class, value);
hp += 3;
mess = TUPLE5(hp, am_trace, p->id, am_exception_from, mfa_tuple, cv);
hp += 6;
- ASSERT((hp - local_heap)*sizeof(*hp) <= sizeof(local_heap));
+ ASSERT((hp - local_heap) <= LOCAL_HEAP_SIZE);
erts_smp_mtx_lock(&smq_mtx);
if (*tracee_flags & F_TIMESTAMP) {
hp = patch_ts(mess, hp); /* hp += 5 */
- ASSERT((hp - local_heap)*sizeof(*hp) == sizeof(local_heap));
+ ASSERT((hp - local_heap) == LOCAL_HEAP_SIZE);
}
send_to_port(p, mess, tracer_pid, tracee_flags);
+ UnUseTmpHeapNoproc(LOCAL_HEAP_SIZE);
+#undef LOCAL_HEAP_SIZE
erts_smp_mtx_unlock(&smq_mtx);
} else {
ErlHeapFragment *bp;
@@ -1431,7 +1486,7 @@ erts_trace_exception(Process* p, Eterm mfa[3], Eterm class, Eterm value,
* Build the trace tuple and put it into receive queue of the tracer process.
*/
- mfa_tuple = TUPLE3(hp, mfa[0], mfa[1], make_small(mfa[2]));
+ mfa_tuple = TUPLE3(hp, (Eterm) mfa[0], (Eterm) mfa[1], make_small((Eterm) mfa[2]));
hp += 4;
value = copy_struct(value, value_size, &hp, off_heap);
cv = TUPLE2(hp, class, value);
@@ -1468,7 +1523,7 @@ erts_trace_exception(Process* p, Eterm mfa[3], Eterm class, Eterm value,
* if it is a pid or port we do a meta trace.
*/
Uint32
-erts_call_trace(Process* p, Eterm mfa[3], Binary *match_spec,
+erts_call_trace(Process* p, BeamInstr mfa[3], Binary *match_spec,
Eterm* args, int local, Eterm *tracer_pid)
{
Eterm* hp;
@@ -1483,7 +1538,8 @@ erts_call_trace(Process* p, Eterm mfa[3], Binary *match_spec,
Eterm tracee;
#endif
Eterm transformed_args[MAX_ARG];
- ErlSubBin sub_bin_heap;
+ DeclareTmpHeap(sub_bin_heap_et,ERL_SUB_BIN_SIZE,p);
+ ErlSubBin *sub_bin_heap = (ErlSubBin *) sub_bin_heap_et;
ASSERT(tracer_pid);
if (*tracer_pid == am_true) {
@@ -1534,19 +1590,20 @@ erts_call_trace(Process* p, Eterm mfa[3], Binary *match_spec,
* such as size_object() and copy_struct(), we must make sure that we
* temporarily convert any match contexts to sub binaries.
*/
- arity = mfa[2];
+ arity = (Eterm) mfa[2];
+ UseTmpHeap(ERL_SUB_BIN_SIZE,p);
#ifdef DEBUG
- sub_bin_heap.thing_word = 0;
+ sub_bin_heap->thing_word = 0;
#endif
for (i = 0; i < arity; i++) {
Eterm arg = args[i];
if (is_boxed(arg) && header_is_bin_matchstate(*boxed_val(arg))) {
ErlBinMatchState* ms = (ErlBinMatchState *) boxed_val(arg);
ErlBinMatchBuffer* mb = &ms->mb;
- ErlSubBin* sb = &sub_bin_heap;
+ ErlSubBin* sb = sub_bin_heap;
Uint bit_size;
- ASSERT(sub_bin_heap.thing_word == 0); /* At most one of match context */
+ ASSERT(sub_bin_heap->thing_word == 0); /* At most one of match context */
bit_size = mb->size - mb->offset;
sb->thing_word = HEADER_SUB_BIN;
@@ -1564,7 +1621,12 @@ erts_call_trace(Process* p, Eterm mfa[3], Binary *match_spec,
args = transformed_args;
if (is_internal_port(*tracer_pid)) {
+#if HEAP_ON_C_STACK
Eterm local_heap[64+MAX_ARG];
+#else
+ Eterm *local_heap = erts_alloc(ERTS_ALC_T_TEMP_TERM,
+ sizeof(Eterm)*(64+MAX_ARG));
+#endif
hp = local_heap;
if (!erts_is_valid_tracer_port(*tracer_pid)) {
@@ -1579,6 +1641,10 @@ erts_call_trace(Process* p, Eterm mfa[3], Binary *match_spec,
if (is_not_nil(tracee))
erts_smp_proc_unlock(p, ERTS_PROC_LOCKS_ALL_MINOR);
#endif
+#if !HEAP_ON_C_STACK
+ erts_free(ERTS_ALC_T_TEMP_TERM,local_heap);
+#endif
+ UnUseTmpHeap(ERL_SUB_BIN_SIZE,p);
return 0;
}
@@ -1605,6 +1671,10 @@ erts_call_trace(Process* p, Eterm mfa[3], Binary *match_spec,
&return_flags);
if (is_non_value(pam_result)) {
erts_match_set_release_result(p);
+#if !HEAP_ON_C_STACK
+ erts_free(ERTS_ALC_T_TEMP_TERM,local_heap);
+#endif
+ UnUseTmpHeap(ERL_SUB_BIN_SIZE,p);
return 0;
}
}
@@ -1612,16 +1682,28 @@ erts_call_trace(Process* p, Eterm mfa[3], Binary *match_spec,
/* Meta trace */
if (pam_result == am_false) {
erts_match_set_release_result(p);
+#if !HEAP_ON_C_STACK
+ erts_free(ERTS_ALC_T_TEMP_TERM,local_heap);
+#endif
+ UnUseTmpHeap(ERL_SUB_BIN_SIZE,p);
return return_flags;
}
} else {
/* Non-meta trace */
if (*tracee_flags & F_TRACE_SILENT) {
erts_match_set_release_result(p);
+#if !HEAP_ON_C_STACK
+ erts_free(ERTS_ALC_T_TEMP_TERM,local_heap);
+#endif
+ UnUseTmpHeap(ERL_SUB_BIN_SIZE,p);
return 0;
}
if (pam_result == am_false) {
erts_match_set_release_result(p);
+#if !HEAP_ON_C_STACK
+ erts_free(ERTS_ALC_T_TEMP_TERM,local_heap);
+#endif
+ UnUseTmpHeap(ERL_SUB_BIN_SIZE,p);
return return_flags;
}
if (local && (*tracee_flags & F_TRACE_RETURN_TO)) {
@@ -1644,7 +1726,7 @@ erts_call_trace(Process* p, Eterm mfa[3], Binary *match_spec,
hp += 2;
}
}
- mfa_tuple = TUPLE3(hp, mfa[0], mfa[1], mfa_tuple);
+ mfa_tuple = TUPLE3(hp, (Eterm) mfa[0], (Eterm) mfa[1], mfa_tuple);
hp += 4;
/*
@@ -1664,6 +1746,10 @@ erts_call_trace(Process* p, Eterm mfa[3], Binary *match_spec,
send_to_port(p, mess, tracer_pid, tracee_flags);
erts_smp_mtx_unlock(&smq_mtx);
erts_match_set_release_result(p);
+#if !HEAP_ON_C_STACK
+ erts_free(ERTS_ALC_T_TEMP_TERM,local_heap);
+#endif
+ UnUseTmpHeap(ERL_SUB_BIN_SIZE,p);
return *tracer_pid == NIL ? 0 : return_flags;
} else {
@@ -1706,6 +1792,7 @@ erts_call_trace(Process* p, Eterm mfa[3], Binary *match_spec,
if (is_not_nil(tracee))
erts_smp_proc_unlock(p, ERTS_PROC_LOCKS_ALL_MINOR);
#endif
+ UnUseTmpHeap(ERL_SUB_BIN_SIZE,p);
return 0;
}
@@ -1731,6 +1818,7 @@ erts_call_trace(Process* p, Eterm mfa[3], Binary *match_spec,
&return_flags);
if (is_non_value(pam_result)) {
erts_match_set_release_result(p);
+ UnUseTmpHeap(ERL_SUB_BIN_SIZE,p);
return 0;
}
}
@@ -1738,16 +1826,19 @@ erts_call_trace(Process* p, Eterm mfa[3], Binary *match_spec,
/* Meta trace */
if (pam_result == am_false) {
erts_match_set_release_result(p);
+ UnUseTmpHeap(ERL_SUB_BIN_SIZE,p);
return return_flags;
}
} else {
/* Non-meta trace */
if (*tracee_flags & F_TRACE_SILENT) {
erts_match_set_release_result(p);
+ UnUseTmpHeap(ERL_SUB_BIN_SIZE,p);
return 0;
}
if (pam_result == am_false) {
erts_match_set_release_result(p);
+ UnUseTmpHeap(ERL_SUB_BIN_SIZE,p);
return return_flags;
}
if (local && (*tracee_flags & F_TRACE_RETURN_TO)) {
@@ -1798,7 +1889,7 @@ erts_call_trace(Process* p, Eterm mfa[3], Binary *match_spec,
hp += 2;
}
}
- mfa_tuple = TUPLE3(hp, mfa[0], mfa[1], mfa_tuple);
+ mfa_tuple = TUPLE3(hp, (Eterm) mfa[0], (Eterm) mfa[1], mfa_tuple);
hp += 4;
/*
@@ -1831,6 +1922,7 @@ erts_call_trace(Process* p, Eterm mfa[3], Binary *match_spec,
ASSERT(hp == limit);
ERTS_ENQ_TRACE_MSG(tracee, tracer_ref, mess, bp);
erts_smp_mtx_unlock(&smq_mtx);
+ UnUseTmpHeap(ERL_SUB_BIN_SIZE,p);
return return_flags;
}
}
@@ -1850,8 +1942,13 @@ trace_proc(Process *c_p, Process *t_p, Eterm what, Eterm data)
Eterm* hp;
int need;
+ ERTS_SMP_LC_ASSERT((erts_proc_lc_my_proc_locks(t_p) != 0) || erts_is_system_blocked(0));
if (is_internal_port(t_p->tracer_proc)) {
- Eterm local_heap[5+5];
+#define LOCAL_HEAP_SIZE (5+5)
+ DeclareTmpHeapNoproc(local_heap,LOCAL_HEAP_SIZE);
+ UseTmpHeapNoproc(LOCAL_HEAP_SIZE);
+
+
hp = local_heap;
mess = TUPLE4(hp, am_trace, t_p->id, what, data);
hp += 5;
@@ -1868,6 +1965,8 @@ trace_proc(Process *c_p, Process *t_p, Eterm what, Eterm data)
c_p,
#endif
mess, &t_p->tracer_proc, &t_p->trace_flags);
+ UnUseTmpHeapNoproc(LOCAL_HEAP_SIZE);
+#undef LOCAL_HEAP_SIZE
erts_smp_mtx_unlock(&smq_mtx);
} else {
Eterm tmp;
@@ -1919,7 +2018,10 @@ trace_proc_spawn(Process *p, Eterm pid,
Eterm* hp;
if (is_internal_port(p->tracer_proc)) {
- Eterm local_heap[4+6+5];
+#define LOCAL_HEAP_SIZE (4+6+5)
+ DeclareTmpHeapNoproc(local_heap,LOCAL_HEAP_SIZE);
+ UseTmpHeapNoproc(LOCAL_HEAP_SIZE);
+
hp = local_heap;
mfa = TUPLE3(hp, mod, func, args);
hp += 4;
@@ -1930,6 +2032,8 @@ trace_proc_spawn(Process *p, Eterm pid,
hp = patch_ts(mess, hp);
}
send_to_port(p, mess, &p->tracer_proc, &p->trace_flags);
+ UnUseTmpHeapNoproc(LOCAL_HEAP_SIZE);
+#undef LOCAL_HEAP_SIZE
erts_smp_mtx_unlock(&smq_mtx);
} else {
Eterm tmp;
@@ -1991,7 +2095,7 @@ void save_calls(Process *p, Export *e)
*/
Eterm
erts_bif_trace(int bif_index, Process* p,
- Eterm arg1, Eterm arg2, Eterm arg3, Uint *I)
+ Eterm arg1, Eterm arg2, Eterm arg3, BeamInstr *I)
{
Eterm result;
int meta = !!(erts_bif_trace_flags[bif_index] & BIF_TRACE_AS_META);
@@ -2005,28 +2109,22 @@ erts_bif_trace(int bif_index, Process* p,
* no tracing will occur. Doing the whole else branch will
* also do nothing, only slower.
*/
- Eterm (*func)(Process*, Eterm, Eterm, Eterm, Uint*) = bif_table[bif_index].f;
+ Eterm (*func)(Process*, Eterm, Eterm, Eterm, BeamInstr*) = bif_table[bif_index].f;
result = func(p, arg1, arg2, arg3, I);
} else {
- Eterm (*func)(Process*, Eterm, Eterm, Eterm, Uint*);
+ Eterm (*func)(Process*, Eterm, Eterm, Eterm, BeamInstr*);
Export* ep = bif_export[bif_index];
Uint32 flags = 0, flags_meta = 0;
int global = !!(erts_bif_trace_flags[bif_index] & BIF_TRACE_AS_GLOBAL);
int local = !!(erts_bif_trace_flags[bif_index] & BIF_TRACE_AS_LOCAL);
+ int time = !!(erts_bif_trace_flags[bif_index] & BIF_TRACE_AS_CALL_TIME);
Eterm meta_tracer_pid = NIL;
int applying = (I == &(ep->code[3])); /* Yup, the apply code for a bif
* is actually in the
* export entry */
- Eterm *cp = p->cp;
+ BeamInstr *cp = p->cp;
-#ifndef _OSE_
Eterm args[3] = {arg1, arg2, arg3};
-#else
- Eterm args[3];
- args[0] = arg1;
- args[1] = arg2;
- args[2] = arg3;
-#endif
/*
* Make continuation pointer OK, it is not during direct BIF calls,
@@ -2043,6 +2141,17 @@ erts_bif_trace(int bif_index, Process* p,
flags_meta = erts_bif_mtrace(p, ep->code+3, args, local,
&meta_tracer_pid);
}
+ if (time) {
+ BpDataTime *bdt = NULL;
+ BeamInstr *pc = (BeamInstr *)ep->code+3;
+
+ bdt = (BpDataTime *) erts_get_time_break(p, pc);
+ ASSERT(bdt);
+
+ if (!bdt->pause) {
+ erts_trace_time_break(p, pc, bdt, ERTS_BP_CALL_TIME_CALL);
+ }
+ }
/* Restore original continuation pointer (if changed). */
p->cp = cp;
@@ -2051,17 +2160,21 @@ erts_bif_trace(int bif_index, Process* p,
result = func(p, arg1, arg2, arg3, I);
if (applying && (flags & MATCH_SET_RETURN_TO_TRACE)) {
- Uint i_return_trace = beam_return_trace[0];
- Uint i_return_to_trace = beam_return_to_trace[0];
+ BeamInstr i_return_trace = beam_return_trace[0];
+ BeamInstr i_return_to_trace = beam_return_to_trace[0];
+ BeamInstr i_return_time_trace = beam_return_time_trace[0];
Eterm *cpp;
/* Maybe advance cp to skip trace stack frames */
for (cpp = p->stop; ; cp = cp_val(*cpp++)) {
- ASSERT(is_CP((Eterm) cp));
- if (*cp_val((Eterm) cp) == i_return_trace) {
+ if (*cp == i_return_trace) {
/* Skip stack frame variables */
while (is_not_CP(*cpp)) cpp++;
cpp += 2; /* Skip return_trace parameters */
- } else if (*cp_val((Eterm) cp) == i_return_to_trace) {
+ } else if (*cp == i_return_time_trace) {
+ /* Skip stack frame variables */
+ while (is_not_CP(*cpp)) cpp++;
+ cpp += 1; /* Skip return_time_trace parameters */
+ } else if (*cp == i_return_to_trace) {
/* A return_to trace message is going to be generated
* by normal means, so we do not have to.
*/
@@ -2078,7 +2191,8 @@ erts_bif_trace(int bif_index, Process* p,
if (reason != TRAP) {
Eterm class;
Eterm value = p->fvalue;
- Eterm nocatch[3];
+ DeclareTmpHeapNoproc(nocatch,3);
+ UseTmpHeapNoproc(3);
/* Expand error value like in handle_error() */
if (reason & EXF_ARGLIST) {
Eterm *tp;
@@ -2126,6 +2240,7 @@ erts_bif_trace(int bif_index, Process* p,
}
}
}
+ UnUseTmpHeapNoproc(3);
if ((flags_meta|flags) & MATCH_SET_EXCEPTION_TRACE) {
erts_smp_proc_lock(p, ERTS_PROC_LOCKS_ALL_MINOR);
p->trace_flags |= F_EXCEPTION_TRACE;
@@ -2213,15 +2328,19 @@ trace_gc(Process *p, Eterm what)
BIN_OLD_VHEAP(p),
BIN_OLD_VHEAP_SZ(p)
};
- Eterm local_heap[(sizeof(values)/sizeof(Uint))
- *(2/*cons*/ + 3/*2-tuple*/ + BIG_UINT_HEAP_SIZE)
- + 5/*4-tuple */ + TS_HEAP_WORDS];
+#define LOCAL_HEAP_SIZE \
+ (sizeof(values)/sizeof(Eterm)) * \
+ (2/*cons*/ + 3/*2-tuple*/ + BIG_UINT_HEAP_SIZE) + \
+ 5/*4-tuple */ + TS_HEAP_WORDS
+ DeclareTmpHeap(local_heap,LOCAL_HEAP_SIZE,p);
#ifdef DEBUG
Eterm* limit;
#endif
ASSERT(sizeof(values)/sizeof(Uint) == sizeof(tags)/sizeof(Eterm));
+ UseTmpHeap(LOCAL_HEAP_SIZE,p);
+
if (is_internal_port(p->tracer_proc)) {
hp = local_heap;
#ifdef DEBUG
@@ -2252,7 +2371,7 @@ trace_gc(Process *p, Eterm what)
#ifdef DEBUG
limit = hp + size;
- ASSERT(size <= sizeof(local_heap)/sizeof(Eterm));
+ ASSERT(size <= LOCAL_HEAP_SIZE);
#endif
msg = erts_bld_atom_uint_2tup_list(&hp,
@@ -2275,6 +2394,8 @@ trace_gc(Process *p, Eterm what)
else
ERTS_ENQ_TRACE_MSG(p->id, tracer_ref, msg, bp);
erts_smp_mtx_unlock(&smq_mtx);
+ UnUseTmpHeap(LOCAL_HEAP_SIZE,p);
+#undef LOCAL_HEAP_SIZE
}
@@ -2465,7 +2586,9 @@ profile_scheduler(Eterm scheduler_id, Eterm state) {
Uint Ms, s, us;
#ifndef ERTS_SMP
- Eterm local_heap[4 + 7];
+#define LOCAL_HEAP_SIZE (4 + 7)
+ DeclareTmpHeapNoproc(local_heap,LOCAL_HEAP_SIZE);
+ UseTmpHeapNoproc(LOCAL_HEAP_SIZE);
hp = local_heap;
#else
ErlHeapFragment *bp;
@@ -2498,6 +2621,8 @@ profile_scheduler(Eterm scheduler_id, Eterm state) {
#ifndef ERTS_SMP
profile_send(msg);
+ UnUseTmpHeapNoproc(LOCAL_HEAP_SIZE);
+#undef LOCAL_HEAP_SIZE
#else
enqueue_sys_msg_unlocked(SYS_MSG_TYPE_SYSPROF, NIL, NIL, msg, bp);
#endif
@@ -2510,7 +2635,10 @@ profile_scheduler_q(Eterm scheduler_id, Eterm state, Eterm no_schedulers, Uint M
Eterm *hp, msg, timestamp;
#ifndef ERTS_SMP
- Eterm local_heap[4 + 7];
+#define LOCAL_HEAP_SIZE (4 + 7)
+ DeclareTmpHeapNoproc(local_heap,LOCAL_HEAP_SIZE);
+ UseTmpHeapNoproc(LOCAL_HEAP_SIZE);
+
hp = local_heap;
#else
ErlHeapFragment *bp;
@@ -2528,6 +2656,8 @@ profile_scheduler_q(Eterm scheduler_id, Eterm state, Eterm no_schedulers, Uint M
msg = TUPLE6(hp, am_profile, am_scheduler, scheduler_id, state, no_schedulers, timestamp); hp += 7;
#ifndef ERTS_SMP
profile_send(msg);
+ UnUseTmpHeapNoproc(LOCAL_HEAP_SIZE);
+#undef LOCAL_HEAP_SIZE
#else
enqueue_sys_msg_unlocked(SYS_MSG_TYPE_SYSPROF, NIL, NIL, msg, bp);
#endif
@@ -2558,7 +2688,10 @@ trace_port_open(Port *p, Eterm calling_pid, Eterm drv_name) {
Eterm* hp;
if (is_internal_port(p->tracer_proc)) {
- Eterm local_heap[5+6];
+#define LOCAL_HEAP_SIZE (5+6)
+ DeclareTmpHeapNoproc(local_heap,LOCAL_HEAP_SIZE);
+ UseTmpHeapNoproc(LOCAL_HEAP_SIZE);
+
hp = local_heap;
mess = TUPLE5(hp, am_trace, calling_pid, am_open, p->id, drv_name);
@@ -2569,6 +2702,8 @@ trace_port_open(Port *p, Eterm calling_pid, Eterm drv_name) {
}
/* No fake schedule */
send_to_port(NULL, mess, &p->tracer_proc, &p->trace_flags);
+ UnUseTmpHeapNoproc(LOCAL_HEAP_SIZE);
+#undef LOCAL_HEAP_SIZE
erts_smp_mtx_unlock(&smq_mtx);
} else {
ErlHeapFragment *bp;
@@ -2612,8 +2747,13 @@ trace_port(Port *t_p, Eterm what, Eterm data) {
Eterm mess;
Eterm* hp;
+ ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(t_p) || erts_is_system_blocked(0));
+
if (is_internal_port(t_p->tracer_proc)) {
- Eterm local_heap[5+5];
+#define LOCAL_HEAP_SIZE (5+5)
+ DeclareTmpHeapNoproc(local_heap,LOCAL_HEAP_SIZE);
+ UseTmpHeapNoproc(LOCAL_HEAP_SIZE);
+
hp = local_heap;
mess = TUPLE4(hp, am_trace, t_p->id, what, data);
hp += 5;
@@ -2623,6 +2763,8 @@ trace_port(Port *t_p, Eterm what, Eterm data) {
}
/* No fake schedule */
send_to_port(NULL, mess, &t_p->tracer_proc, &t_p->trace_flags);
+ UnUseTmpHeapNoproc(LOCAL_HEAP_SIZE);
+#undef LOCAL_HEAP_SIZE
erts_smp_mtx_unlock(&smq_mtx);
} else {
ErlHeapFragment *bp;
@@ -2674,7 +2816,10 @@ trace_sched_ports_where(Port *p, Eterm what, Eterm where) {
Eterm sched_id = am_undefined;
if (is_internal_port(p->tracer_proc)) {
- Eterm local_heap[5+6];
+#define LOCAL_HEAP_SIZE (5+6)
+ DeclareTmpHeapNoproc(local_heap,LOCAL_HEAP_SIZE);
+ UseTmpHeapNoproc(LOCAL_HEAP_SIZE);
+
hp = local_heap;
if (IS_TRACED_FL(p, F_TRACE_SCHED_NO)) {
@@ -2700,6 +2845,8 @@ trace_sched_ports_where(Port *p, Eterm what, Eterm where) {
/* No fake scheduling */
send_to_port(NULL, mess, &p->tracer_proc, &p->trace_flags);
+ UnUseTmpHeapNoproc(LOCAL_HEAP_SIZE);
+#undef LOCAL_HEAP_SIZE
erts_smp_mtx_unlock(&smq_mtx);
} else {
ErlHeapFragment *bp;
@@ -2750,7 +2897,11 @@ profile_runnable_port(Port *p, Eterm status) {
Eterm count = make_small(0);
#ifndef ERTS_SMP
- Eterm local_heap[4 + 6];
+#define LOCAL_HEAP_SIZE (4 + 6)
+
+ DeclareTmpHeapNoproc(local_heap,LOCAL_HEAP_SIZE);
+ UseTmpHeapNoproc(LOCAL_HEAP_SIZE);
+
hp = local_heap;
#else
@@ -2771,6 +2922,8 @@ profile_runnable_port(Port *p, Eterm status) {
#ifndef ERTS_SMP
profile_send(msg);
+ UnUseTmpHeapNoproc(LOCAL_HEAP_SIZE);
+#undef LOCAL_HEAP_SIZE
#else
enqueue_sys_msg_unlocked(SYS_MSG_TYPE_SYSPROF, NIL, NIL, msg, bp);
#endif
@@ -2785,7 +2938,11 @@ profile_runnable_proc(Process *p, Eterm status){
Eterm where = am_undefined;
#ifndef ERTS_SMP
- Eterm local_heap[4 + 6 + 4];
+#define LOCAL_HEAP_SIZE (4 + 6 + 4)
+
+ DeclareTmpHeapNoproc(local_heap,LOCAL_HEAP_SIZE);
+ UseTmpHeapNoproc(LOCAL_HEAP_SIZE);
+
hp = local_heap;
#else
ErlHeapFragment *bp;
@@ -2818,6 +2975,8 @@ profile_runnable_proc(Process *p, Eterm status){
msg = TUPLE5(hp, am_profile, p->id, status, where, timestamp); hp += 6;
#ifndef ERTS_SMP
profile_send(msg);
+ UnUseTmpHeapNoproc(LOCAL_HEAP_SIZE);
+#undef LOCAL_HEAP_SIZE
#else
enqueue_sys_msg_unlocked(SYS_MSG_TYPE_SYSPROF, NIL, NIL, msg, bp);
#endif
diff --git a/erts/emulator/beam/erl_unicode.c b/erts/emulator/beam/erl_unicode.c
index ab5811c70f..d01a3661f9 100644
--- a/erts/emulator/beam/erl_unicode.c
+++ b/erts/emulator/beam/erl_unicode.c
@@ -1,19 +1,19 @@
/*
* %CopyrightBegin%
- *
- * Copyright Ericsson AB 2008-2009. All Rights Reserved.
- *
+ *
+ * Copyright Ericsson AB 2008-2010. All Rights Reserved.
+ *
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
* compliance with the License. You should have received a copy of the
* Erlang Public License along with this software. If not, it can be
* retrieved online at http://www.erlang.org/.
- *
+ *
* Software distributed under the License is distributed on an "AS IS"
* basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
* the License for the specific language governing rights and limitations
* under the License.
- *
+ *
* %CopyrightEnd%
*/
@@ -90,9 +90,9 @@ void erts_init_unicode(void)
am_atom_put("characters_to_utf8_trap",23);
characters_to_utf8_trap_exp.code[2] = 3;
characters_to_utf8_trap_exp.code[3] =
- (Eterm) em_apply_bif;
+ (BeamInstr) em_apply_bif;
characters_to_utf8_trap_exp.code[4] =
- (Eterm) &characters_to_utf8_trap;
+ (BeamInstr) &characters_to_utf8_trap;
memset(&characters_to_list_trap_1_exp, 0, sizeof(Export));
characters_to_list_trap_1_exp.address =
@@ -102,9 +102,9 @@ void erts_init_unicode(void)
am_atom_put("characters_to_list_trap_1",25);
characters_to_list_trap_1_exp.code[2] = 3;
characters_to_list_trap_1_exp.code[3] =
- (Eterm) em_apply_bif;
+ (BeamInstr) em_apply_bif;
characters_to_list_trap_1_exp.code[4] =
- (Eterm) &characters_to_list_trap_1;
+ (BeamInstr) &characters_to_list_trap_1;
memset(&characters_to_list_trap_2_exp, 0, sizeof(Export));
characters_to_list_trap_2_exp.address =
@@ -114,9 +114,9 @@ void erts_init_unicode(void)
am_atom_put("characters_to_list_trap_2",25);
characters_to_list_trap_2_exp.code[2] = 3;
characters_to_list_trap_2_exp.code[3] =
- (Eterm) em_apply_bif;
+ (BeamInstr) em_apply_bif;
characters_to_list_trap_2_exp.code[4] =
- (Eterm) &characters_to_list_trap_2;
+ (BeamInstr) &characters_to_list_trap_2;
memset(&characters_to_list_trap_3_exp, 0, sizeof(Export));
@@ -127,9 +127,9 @@ void erts_init_unicode(void)
am_atom_put("characters_to_list_trap_3",25);
characters_to_list_trap_3_exp.code[2] = 3;
characters_to_list_trap_3_exp.code[3] =
- (Eterm) em_apply_bif;
+ (BeamInstr) em_apply_bif;
characters_to_list_trap_3_exp.code[4] =
- (Eterm) &characters_to_list_trap_3;
+ (BeamInstr) &characters_to_list_trap_3;
memset(&characters_to_list_trap_4_exp, 0, sizeof(Export));
characters_to_list_trap_4_exp.address =
@@ -139,9 +139,9 @@ void erts_init_unicode(void)
am_atom_put("characters_to_list_trap_4",25);
characters_to_list_trap_4_exp.code[2] = 1;
characters_to_list_trap_4_exp.code[3] =
- (Eterm) em_apply_bif;
+ (BeamInstr) em_apply_bif;
characters_to_list_trap_4_exp.code[4] =
- (Eterm) &characters_to_list_trap_4;
+ (BeamInstr) &characters_to_list_trap_4;
c_to_b_int_trap_exportp = erts_export_put(am_unicode,am_characters_to_binary_int,2);
c_to_l_int_trap_exportp = erts_export_put(am_unicode,am_characters_to_list_int,2);
diff --git a/erts/emulator/beam/erl_vm.h b/erts/emulator/beam/erl_vm.h
index 50b3e5b61c..cd63401581 100644
--- a/erts/emulator/beam/erl_vm.h
+++ b/erts/emulator/beam/erl_vm.h
@@ -50,10 +50,20 @@
#define MAX_ARG 256 /* Max number of arguments allowed */
#define MAX_REG 1024 /* Max number of x(N) registers used */
+/* Scheduler stores data for temporary heaps if
+ !HEAP_ON_C_STACK. Macros (*TmpHeap*) in global.h selects if we put temporary
+ heap data on the C stack or if we use the buffers in the scheduler data. */
+#define TMP_HEAP_SIZE 128 /* Number of Eterm in the schedulers
+ small heap for transient heap data */
+#define CMP_TMP_HEAP_SIZE 2 /* cmp wants its own tmp-heap... */
+#define ERL_ARITH_TMP_HEAP_SIZE 4 /* as does erl_arith... */
+#define BEAM_EMU_TMP_HEAP_SIZE 2 /* and beam_emu... */
+
/*
* The new arithmetic operations need some extra X registers in the register array.
+ * so does the gc_bif's (i_gc_bif3 need 3 extra).
*/
-#define ERTS_X_REGS_ALLOCATED (MAX_REG+2)
+#define ERTS_X_REGS_ALLOCATED (MAX_REG+3)
#define INPUT_REDUCTIONS (2 * CONTEXT_REDS)
@@ -74,6 +84,7 @@
#define ErtsHAllocLockCheck(P) \
ERTS_SMP_LC_ASSERT((ERTS_PROC_LOCK_MAIN & erts_proc_lc_my_proc_locks((P))) \
+ || ((P)->id == ERTS_INVALID_PID) \
|| ((P)->scheduler_data \
&& (P) == (P)->scheduler_data->match_pseudo_process) \
|| erts_is_system_blocked(0))
@@ -130,8 +141,12 @@
#define HeapWordsLeft(p) (HEAP_LIMIT(p) - HEAP_TOP(p))
#if defined(DEBUG) || defined(CHECK_FOR_HOLES)
+#if HALFWORD_HEAP
+# define ERTS_HOLE_MARKER (0xaf5e78ccU)
+#else
# define ERTS_HOLE_MARKER (((0xaf5e78ccUL << 24) << 8) | 0xaf5e78ccUL)
#endif
+#endif
/*
* Allocate heap memory on the ordinary heap, NEVER in a heap
diff --git a/erts/emulator/beam/error.h b/erts/emulator/beam/error.h
index 4930def4ed..ddc2c1396d 100644
--- a/erts/emulator/beam/error.h
+++ b/erts/emulator/beam/error.h
@@ -1,19 +1,19 @@
/*
* %CopyrightBegin%
- *
- * Copyright Ericsson AB 1996-2009. All Rights Reserved.
- *
+ *
+ * Copyright Ericsson AB 1996-2010. All Rights Reserved.
+ *
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
* compliance with the License. You should have received a copy of the
* Erlang Public License along with this software. If not, it can be
* retrieved online at http://www.erlang.org/.
- *
+ *
* Software distributed under the License is distributed on an "AS IS"
* basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
* the License for the specific language governing rights and limitations
* under the License.
- *
+ *
* %CopyrightEnd%
*/
@@ -187,10 +187,10 @@ extern Eterm exception_tag[NUMBER_EXC_TAGS];
struct StackTrace {
Eterm header; /* bignum header - must be first in struct */
Eterm freason; /* original exception reason is saved in the struct */
- Eterm* pc;
- Eterm* current;
+ BeamInstr* pc;
+ BeamInstr* current;
int depth; /* number of saved pointers in trace[] */
- Eterm *trace[1]; /* varying size - must be last in struct */
+ BeamInstr *trace[1]; /* varying size - must be last in struct */
};
#endif /* __ERROR_H__ */
diff --git a/erts/emulator/beam/export.c b/erts/emulator/beam/export.c
index 271b40cf0f..5e81a2d624 100644
--- a/erts/emulator/beam/export.c
+++ b/erts/emulator/beam/export.c
@@ -1,19 +1,19 @@
/*
* %CopyrightBegin%
- *
- * Copyright Ericsson AB 1996-2009. All Rights Reserved.
- *
+ *
+ * Copyright Ericsson AB 1996-2010. All Rights Reserved.
+ *
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
* compliance with the License. You should have received a copy of the
* Erlang Public License along with this software. If not, it can be
* retrieved online at http://www.erlang.org/.
- *
+ *
* Software distributed under the License is distributed on an "AS IS"
* basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
* the License for the specific language governing rights and limitations
* under the License.
- *
+ *
* %CopyrightEnd%
*/
@@ -43,11 +43,9 @@ static erts_smp_rwmtx_t export_table_lock; /* Locks the secondary export table.
#define export_read_unlock() erts_smp_rwmtx_runlock(&export_table_lock)
#define export_write_lock() erts_smp_rwmtx_rwlock(&export_table_lock)
#define export_write_unlock() erts_smp_rwmtx_rwunlock(&export_table_lock)
-#define export_init_lock() erts_smp_rwmtx_init(&export_table_lock, \
- "export_tab")
-extern Eterm* em_call_error_handler;
-extern Uint* em_call_traced_function;
+extern BeamInstr* em_call_error_handler;
+extern BeamInstr* em_call_traced_function;
void
export_info(int to, void *to_arg)
@@ -93,7 +91,7 @@ export_alloc(Export* tmpl)
obj->code[2] = tmpl->code[2];
obj->slot.index = -1;
obj->address = obj->code+3;
- obj->code[3] = (Eterm) em_call_error_handler;
+ obj->code[3] = (BeamInstr) em_call_error_handler;
obj->code[4] = 0;
obj->match_prog_set = NULL;
return obj;
@@ -111,8 +109,12 @@ void
init_export_table(void)
{
HashFunctions f;
+ erts_smp_rwmtx_opt_t rwmtx_opt = ERTS_SMP_THR_OPTS_DEFAULT_INITER;
+ rwmtx_opt.type = ERTS_SMP_RWMTX_TYPE_FREQUENT_READ;
+ rwmtx_opt.lived = ERTS_SMP_RWMTX_LONG_LIVED;
+
+ erts_smp_rwmtx_init_opt(&export_table_lock, &rwmtx_opt, "export_tab");
- export_init_lock();
f.hash = (H_FUN) export_hash;
f.cmp = (HCMP_FUN) export_cmp;
f.alloc = (HALLOC_FUN) export_alloc;
@@ -140,7 +142,7 @@ init_export_table(void)
Export*
erts_find_export_entry(Eterm m, Eterm f, unsigned int a)
{
- HashValue hval = EXPORT_HASH(m, f, a);
+ HashValue hval = EXPORT_HASH((BeamInstr) m, (BeamInstr) f, (BeamInstr) a);
int ix;
HashBucket* b;
@@ -185,7 +187,7 @@ erts_find_function(Eterm m, Eterm f, unsigned int a)
ep = hash_get(&export_table.htable, (void*) &e);
if (ep != NULL && ep->address == ep->code+3 &&
- ep->code[3] != (Uint) em_call_traced_function) {
+ ep->code[3] != (BeamInstr) em_call_traced_function) {
ep = NULL;
}
return ep;
diff --git a/erts/emulator/beam/export.h b/erts/emulator/beam/export.h
index cd6af6dd85..c604fdf7c3 100644
--- a/erts/emulator/beam/export.h
+++ b/erts/emulator/beam/export.h
@@ -1,19 +1,19 @@
/*
* %CopyrightBegin%
- *
- * Copyright Ericsson AB 1996-2009. All Rights Reserved.
- *
+ *
+ * Copyright Ericsson AB 1996-2010. All Rights Reserved.
+ *
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
* compliance with the License. You should have received a copy of the
* Erlang Public License along with this software. If not, it can be
* retrieved online at http://www.erlang.org/.
- *
+ *
* Software distributed under the License is distributed on an "AS IS"
* basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
* the License for the specific language governing rights and limitations
* under the License.
- *
+ *
* %CopyrightEnd%
*/
@@ -37,7 +37,7 @@ typedef struct export
void* address; /* Pointer to code for function. */
struct binary* match_prog_set; /* Match program for tracing. */
- Eterm fake_op_func_info_for_hipe[2]; /* MUST be just before code[] */
+ BeamInstr fake_op_func_info_for_hipe[2]; /* MUST be just before code[] */
/*
* code[0]: Tagged atom for module.
* code[1]: Tagged atom for function.
@@ -52,7 +52,7 @@ typedef struct export
* on_load function that has not been run yet.
* Otherwise: 0.
*/
- Eterm code[5];
+ BeamInstr code[5];
} Export;
@@ -74,6 +74,6 @@ Export *export_get(Export*);
#include "beam_load.h" /* For em_* extern declarations */
#define ExportIsBuiltIn(EntryPtr) \
(((EntryPtr)->address == (EntryPtr)->code + 3) && \
- ((EntryPtr)->code[3] == (Uint) em_apply_bif))
+ ((EntryPtr)->code[3] == (BeamInstr) em_apply_bif))
#endif
diff --git a/erts/emulator/beam/external.c b/erts/emulator/beam/external.c
index 099eddd195..d7c8aa84e9 100644
--- a/erts/emulator/beam/external.c
+++ b/erts/emulator/beam/external.c
@@ -65,11 +65,9 @@
# endif
#endif
-/*
- * For backward compatibility reasons, only encode integers that
- * fit in 28 bits (signed) using INTEGER_EXT.
+/* Does Sint fit in Sint32?
*/
-#define IS_SSMALL28(x) (((Uint) (((x) >> (28-1)) + 1)) < 2)
+#define IS_SSMALL32(x) (((Uint) (((x) >> (32-1)) + 1)) < 2)
/*
* Valid creations for nodes are 1, 2, or 3. 0 can also be sent
@@ -504,7 +502,7 @@ erts_make_dist_ext_copy(ErtsDistExternal *edep, Uint xsize)
ASSERT(edep->ext_endp >= edep->extp);
ext_sz = edep->ext_endp - edep->extp;
- align_sz = ERTS_WORD_ALIGN_PAD_SZ(dist_ext_sz + ext_sz);
+ align_sz = ERTS_EXTRA_DATA_ALIGN_SZ(dist_ext_sz + ext_sz);
new_edep = erts_alloc(ERTS_ALC_T_EXT_TERM_DATA,
dist_ext_sz + ext_sz + align_sz + xsize);
@@ -1470,11 +1468,11 @@ dec_pid(ErtsDistExternal *edep, Eterm** hpp, byte* ep, ErlOffHeap* off_heap, Ete
*hpp += EXTERNAL_THING_HEAD_SIZE + 1;
etp->header = make_external_pid_header(1);
- etp->next = off_heap->externals;
+ etp->next = off_heap->first;
etp->node = node;
etp->data.ui[0] = data;
- off_heap->externals = etp;
+ off_heap->first = (struct erl_off_heap_header*) etp;
*objp = make_external_pid(etp);
}
return ep;
@@ -1489,20 +1487,28 @@ dec_pid(ErtsDistExternal *edep, Eterm** hpp, byte* ep, ErlOffHeap* off_heap, Ete
static byte*
enc_term(ErtsAtomCacheMap *acmp, Eterm obj, byte* ep, Uint32 dflags)
{
- DECLARE_ESTACK(s);
+ DECLARE_WSTACK(s);
Uint n;
Uint i;
Uint j;
Uint* ptr;
Eterm val;
FloatDef f;
+#if HALFWORD_HEAP
+ UWord wobj;
+#endif
+
goto L_jump_start;
outer_loop:
- while (!ESTACK_ISEMPTY(s)) {
- obj = ESTACK_POP(s);
- switch (val = ESTACK_POP(s)) {
+ while (!WSTACK_ISEMPTY(s)) {
+#if HALFWORD_HEAP
+ obj = (Eterm) (wobj = WSTACK_POP(s));
+#else
+ obj = WSTACK_POP(s);
+#endif
+ switch (val = WSTACK_POP(s)) {
case ENC_TERM:
break;
case ENC_ONE_CONS:
@@ -1513,29 +1519,40 @@ enc_term(ErtsAtomCacheMap *acmp, Eterm obj, byte* ep, Uint32 dflags)
obj = CAR(cons);
tl = CDR(cons);
- ESTACK_PUSH(s, is_list(tl) ? ENC_ONE_CONS : ENC_TERM);
- ESTACK_PUSH(s, tl);
+ WSTACK_PUSH(s, is_list(tl) ? ENC_ONE_CONS : ENC_TERM);
+ WSTACK_PUSH(s, tl);
}
break;
case ENC_PATCH_FUN_SIZE:
{
+#if HALFWORD_HEAP
+ byte* size_p = (byte *) wobj;
+#else
byte* size_p = (byte *) obj;
-
+#endif
put_int32(ep - size_p, size_p);
}
goto outer_loop;
case ENC_LAST_ARRAY_ELEMENT:
{
+#if HALFWORD_HEAP
+ Eterm* ptr = (Eterm *) wobj;
+#else
Eterm* ptr = (Eterm *) obj;
+#endif
obj = *ptr;
}
break;
default: /* ENC_LAST_ARRAY_ELEMENT+1 and upwards */
{
+#if HALFWORD_HEAP
+ Eterm* ptr = (Eterm *) wobj;
+#else
Eterm* ptr = (Eterm *) obj;
+#endif
obj = *ptr++;
- ESTACK_PUSH(s, val-1);
- ESTACK_PUSH(s, (Eterm) ptr);
+ WSTACK_PUSH(s, val-1);
+ WSTACK_PUSH(s, (UWord) ptr);
}
break;
}
@@ -1552,19 +1569,23 @@ enc_term(ErtsAtomCacheMap *acmp, Eterm obj, byte* ep, Uint32 dflags)
case SMALL_DEF:
{
+ /* From R14B we no longer restrict INTEGER_EXT to 28 bits,
+ * as done earlier for backward compatibility reasons. */
Sint val = signed_val(obj);
if ((Uint)val < 256) {
*ep++ = SMALL_INTEGER_EXT;
put_int8(val, ep);
ep++;
- } else if (sizeof(Sint) == 4 || IS_SSMALL28(val)) {
+ } else if (sizeof(Sint) == 4 || IS_SSMALL32(val)) {
*ep++ = INTEGER_EXT;
put_int32(val, ep);
ep += 4;
} else {
- Eterm tmp_big[2];
- Eterm big = small_to_big(val, tmp_big);
+ DeclareTmpHeapNoproc(tmp_big,2);
+ Eterm big;
+ UseTmpHeapNoproc(2);
+ big = small_to_big(val, tmp_big);
*ep++ = SMALL_BIG_EXT;
n = big_bytes(big);
ASSERT(n < 256);
@@ -1572,23 +1593,38 @@ enc_term(ErtsAtomCacheMap *acmp, Eterm obj, byte* ep, Uint32 dflags)
ep += 1;
*ep++ = big_sign(big);
ep = big_to_bytes(big, ep);
+ UnUseTmpHeapNoproc(2);
}
}
break;
case BIG_DEF:
- if ((n = big_bytes(obj)) < 256) {
- *ep++ = SMALL_BIG_EXT;
- put_int8(n, ep);
- ep += 1;
- }
- else {
- *ep++ = LARGE_BIG_EXT;
- put_int32(n, ep);
- ep += 4;
+ {
+ int sign = big_sign(obj);
+ n = big_bytes(obj);
+ if (sizeof(Sint)==4 && n<=4) {
+ Uint dig = big_digit(obj,0);
+ Sint val = sign ? -dig : dig;
+ if ((val<0) == sign) {
+ *ep++ = INTEGER_EXT;
+ put_int32(val, ep);
+ ep += 4;
+ break;
+ }
+ }
+ if (n < 256) {
+ *ep++ = SMALL_BIG_EXT;
+ put_int8(n, ep);
+ ep += 1;
+ }
+ else {
+ *ep++ = LARGE_BIG_EXT;
+ put_int32(n, ep);
+ ep += 4;
+ }
+ *ep++ = sign;
+ ep = big_to_bytes(obj, ep);
}
- *ep++ = big_sign(obj);
- ep = big_to_bytes(obj, ep);
break;
case PID_DEF:
@@ -1662,8 +1698,8 @@ enc_term(ErtsAtomCacheMap *acmp, Eterm obj, byte* ep, Uint32 dflags)
ep += 4;
}
if (i > 0) {
- ESTACK_PUSH(s, ENC_LAST_ARRAY_ELEMENT+i-1);
- ESTACK_PUSH(s, (Eterm) ptr);
+ WSTACK_PUSH(s, ENC_LAST_ARRAY_ELEMENT+i-1);
+ WSTACK_PUSH(s, (UWord) ptr);
}
break;
@@ -1741,7 +1777,7 @@ enc_term(ErtsAtomCacheMap *acmp, Eterm obj, byte* ep, Uint32 dflags)
}
case EXPORT_DEF:
{
- Export* exp = (Export *) (export_val(obj))[1];
+ Export* exp = *((Export **) (export_val(obj) + 1));
if ((dflags & DFLAG_EXPORT_PTR_TAG) != 0) {
*ep++ = EXPORT_EXT;
ep = enc_atom(acmp, exp->code[0], ep, dflags);
@@ -1770,8 +1806,8 @@ enc_term(ErtsAtomCacheMap *acmp, Eterm obj, byte* ep, Uint32 dflags)
int ei;
*ep++ = NEW_FUN_EXT;
- ESTACK_PUSH(s, ENC_PATCH_FUN_SIZE);
- ESTACK_PUSH(s, (Eterm) ep); /* Position for patching in size */
+ WSTACK_PUSH(s, ENC_PATCH_FUN_SIZE);
+ WSTACK_PUSH(s, (UWord) ep); /* Position for patching in size */
ep += 4;
*ep = funp->arity;
ep += 1;
@@ -1788,8 +1824,8 @@ enc_term(ErtsAtomCacheMap *acmp, Eterm obj, byte* ep, Uint32 dflags)
fun_env:
for (ei = funp->num_free-1; ei > 0; ei--) {
- ESTACK_PUSH(s, ENC_TERM);
- ESTACK_PUSH(s, funp->env[ei]);
+ WSTACK_PUSH(s, ENC_TERM);
+ WSTACK_PUSH(s, (UWord) funp->env[ei]);
}
if (funp->num_free != 0) {
obj = funp->env[0];
@@ -1832,7 +1868,7 @@ enc_term(ErtsAtomCacheMap *acmp, Eterm obj, byte* ep, Uint32 dflags)
break;
}
}
- DESTROY_ESTACK(s);
+ DESTROY_WSTACK(s);
return ep;
}
@@ -1874,69 +1910,30 @@ is_external_string(Eterm list, int* p_is_string)
return len;
}
-/* Assumes that the ones to undo are preluding the lists. */
+/* Assumes that the ones to undo are preluding the list. */
static void
undo_offheap_in_area(ErlOffHeap* off_heap, Eterm* start, Eterm* end)
{
const Uint area_sz = (end - start) * sizeof(Eterm);
- struct proc_bin* mso;
- struct proc_bin** mso_nextp = NULL;
-#ifndef HYBRID /* FIND ME! */
- struct erl_fun_thing* funs;
- struct erl_fun_thing** funs_nextp = NULL;
-#endif
- struct external_thing_* ext;
- struct external_thing_** ext_nextp = NULL;
-
- for (mso = off_heap->mso; ; mso=mso->next) {
- if (!in_area(mso, start, area_sz)) {
- if (mso_nextp != NULL) {
- *mso_nextp = NULL;
- erts_cleanup_mso(off_heap->mso);
- off_heap->mso = mso;
+ struct erl_off_heap_header* hdr;
+ struct erl_off_heap_header** hdr_nextp = NULL;
+
+ for (hdr = off_heap->first; ; hdr=hdr->next) {
+ if (!in_area(hdr, start, area_sz)) {
+ if (hdr_nextp != NULL) {
+ *hdr_nextp = NULL;
+ erts_cleanup_offheap(off_heap);
+ off_heap->first = hdr;
}
break;
}
- mso_nextp = &mso->next;
+ hdr_nextp = &hdr->next;
}
-#ifndef HYBRID /* FIND ME! */
- for (funs = off_heap->funs; ; funs=funs->next) {
- if (!in_area(funs, start, area_sz)) {
- if (funs_nextp != NULL) {
- *funs_nextp = NULL;
- erts_cleanup_funs(off_heap->funs);
- off_heap->funs = funs;
- }
- break;
- }
- funs_nextp = &funs->next;
- }
-#endif
- for (ext = off_heap->externals; ; ext=ext->next) {
- if (!in_area(ext, start, area_sz)) {
- if (ext_nextp != NULL) {
- *ext_nextp = NULL;
- erts_cleanup_externals(off_heap->externals);
- off_heap->externals = ext;
- }
- break;
- }
- ext_nextp = &ext->next;
- }
-
- /* Assert that the ones to undo were indeed preluding the lists. */
+ /* Assert that the ones to undo were indeed preluding the list. */
#ifdef DEBUG
- for (mso = off_heap->mso; mso != NULL; mso=mso->next) {
- ASSERT(!in_area(mso, start, area_sz));
- }
-# ifndef HYBRID /* FIND ME! */
- for (funs = off_heap->funs; funs != NULL; funs=funs->next) {
- ASSERT(!in_area(funs, start, area_sz));
- }
-# endif
- for (ext = off_heap->externals; ext != NULL; ext=ext->next) {
- ASSERT(!in_area(ext, start, area_sz));
+ for (hdr = off_heap->first; hdr != NULL; hdr = hdr->next) {
+ ASSERT(!in_area(hdr, start, area_sz));
}
#endif /* DEBUG */
}
@@ -1952,11 +1949,11 @@ dec_term(ErtsDistExternal *edep, Eterm** hpp, byte* ep, ErlOffHeap* off_heap, Et
register Eterm* hp = *hpp; /* Please don't take the address of hp */
Eterm* next = objp;
- *next = (Eterm) NULL;
+ *next = (Eterm) (UWord) NULL;
while (next != NULL) {
objp = next;
- next = (Eterm *) (*objp);
+ next = (Eterm *) EXPAND_POINTER(*objp);
switch (*ep++) {
case INTEGER_EXT:
@@ -1964,7 +1961,7 @@ dec_term(ErtsDistExternal *edep, Eterm** hpp, byte* ep, ErlOffHeap* off_heap, Et
Sint sn = get_int32(ep);
ep += 4;
-#if defined(ARCH_64)
+#if defined(ARCH_64) && !HALFWORD_HEAP
*objp = make_small(sn);
#else
if (MY_IS_SSMALL(sn)) {
@@ -2061,7 +2058,7 @@ dec_term_atom_common:
hp += n;
objp = hp - 1;
while (n-- > 0) {
- objp[0] = (Eterm) next;
+ objp[0] = (Eterm) COMPRESS_POINTER(next);
next = objp;
objp--;
}
@@ -2079,12 +2076,12 @@ dec_term_atom_common:
*objp = make_list(hp);
hp += 2*n;
objp = hp - 2;
- objp[0] = (Eterm) (objp+1);
- objp[1] = (Eterm) next;
+ objp[0] = (Eterm) COMPRESS_POINTER((objp+1));
+ objp[1] = (Eterm) COMPRESS_POINTER(next);
next = objp;
objp -= 2;
while (--n > 0) {
- objp[0] = (Eterm) next;
+ objp[0] = (Eterm) COMPRESS_POINTER(next);
objp[1] = make_list(objp + 2);
next = objp;
objp -= 2;
@@ -2180,11 +2177,11 @@ dec_term_atom_common:
hp += EXTERNAL_THING_HEAD_SIZE + 1;
etp->header = make_external_port_header(1);
- etp->next = off_heap->externals;
+ etp->next = off_heap->first;
etp->node = node;
etp->data.ui[0] = num;
- off_heap->externals = etp;
+ off_heap->first = (struct erl_off_heap_header*)etp;
*objp = make_external_port(etp);
}
@@ -2238,32 +2235,39 @@ dec_term_atom_common:
node = erts_find_or_insert_node(sysname, cre);
if(node == erts_this_node) {
RefThing *rtp = (RefThing *) hp;
- hp += REF_THING_HEAD_SIZE;
-#ifdef ARCH_64
+ ref_num = (Uint32 *) (hp + REF_THING_HEAD_SIZE);
+
+#if defined(ARCH_64) && !HALFWORD_HEAP
+ hp += REF_THING_HEAD_SIZE + ref_words/2 + 1;
rtp->header = make_ref_thing_header(ref_words/2 + 1);
#else
+ hp += REF_THING_HEAD_SIZE + ref_words;
rtp->header = make_ref_thing_header(ref_words);
#endif
*objp = make_internal_ref(rtp);
}
else {
ExternalThing *etp = (ExternalThing *) hp;
- hp += EXTERNAL_THING_HEAD_SIZE;
-
-#ifdef ARCH_64
+#if defined(ARCH_64) && !HALFWORD_HEAP
+ hp += EXTERNAL_THING_HEAD_SIZE + ref_words/2 + 1;
+#else
+ hp += EXTERNAL_THING_HEAD_SIZE + ref_words;
+#endif
+
+#if defined(ARCH_64) && !HALFWORD_HEAP
etp->header = make_external_ref_header(ref_words/2 + 1);
#else
etp->header = make_external_ref_header(ref_words);
#endif
- etp->next = off_heap->externals;
+ etp->next = off_heap->first;
etp->node = node;
- off_heap->externals = etp;
+ off_heap->first = (struct erl_off_heap_header*)etp;
*objp = make_external_ref(etp);
+ ref_num = &(etp->data.ui32[0]);
}
- ref_num = (Uint32 *) hp;
-#ifdef ARCH_64
+#if defined(ARCH_64) && !HALFWORD_HEAP
*(ref_num++) = ref_words /* 32-bit arity */;
#endif
ref_num[0] = r0;
@@ -2271,12 +2275,9 @@ dec_term_atom_common:
ref_num[i] = get_int32(ep);
ep += 4;
}
-#ifdef ARCH_64
+#if defined(ARCH_64) && !HALFWORD_HEAP
if ((1 + ref_words) % 2)
ref_num[ref_words] = 0;
- hp += ref_words/2 + 1;
-#else
- hp += ref_words;
#endif
break;
}
@@ -2304,8 +2305,8 @@ dec_term_atom_common:
hp += PROC_BIN_SIZE;
pb->thing_word = HEADER_PROC_BIN;
pb->size = n;
- pb->next = off_heap->mso;
- off_heap->mso = pb;
+ pb->next = off_heap->first;
+ off_heap->first = (struct erl_off_heap_header*)pb;
pb->val = dbin;
pb->bytes = (byte*) dbin->orig_bytes;
pb->flags = 0;
@@ -2341,8 +2342,8 @@ dec_term_atom_common:
pb = (ProcBin *) hp;
pb->thing_word = HEADER_PROC_BIN;
pb->size = n;
- pb->next = off_heap->mso;
- off_heap->mso = pb;
+ pb->next = off_heap->first;
+ off_heap->first = (struct erl_off_heap_header*)pb;
pb->val = dbin;
pb->bytes = (byte*) dbin->orig_bytes;
pb->flags = 0;
@@ -2398,7 +2399,12 @@ dec_term_atom_common:
}
*objp = make_export(hp);
*hp++ = HEADER_EXPORT;
+#if HALFWORD_HEAP
+ *((UWord *) (UWord) hp) = (UWord) erts_export_get_or_make_stub(mod, name, arity);
+ hp += 2;
+#else
*hp++ = (Eterm) erts_export_get_or_make_stub(mod, name, arity);
+#endif
break;
}
break;
@@ -2457,8 +2463,8 @@ dec_term_atom_common:
* It is safe to link the fun into the fun list only when
* no more validity tests can fail.
*/
- funp->next = off_heap->funs;
- off_heap->funs = funp;
+ funp->next = off_heap->first;
+ off_heap->first = (struct erl_off_heap_header*)funp;
#endif
funp->fe = erts_put_fun_entry2(module, old_uniq, old_index,
@@ -2474,11 +2480,11 @@ dec_term_atom_common:
/* Environment */
for (i = num_free-1; i >= 0; i--) {
- funp->env[i] = (Eterm) next;
+ funp->env[i] = (Eterm) COMPRESS_POINTER(next);
next = funp->env + i;
}
/* Creator */
- funp->creator = (Eterm) next;
+ funp->creator = (Eterm) COMPRESS_POINTER(next);
next = &(funp->creator);
break;
}
@@ -2535,8 +2541,8 @@ dec_term_atom_common:
* It is safe to link the fun into the fun list only when
* no more validity tests can fail.
*/
- funp->next = off_heap->funs;
- off_heap->funs = funp;
+ funp->next = off_heap->first;
+ off_heap->first = (struct erl_off_heap_header*)funp;
#endif
old_uniq = unsigned_val(temp);
@@ -2549,7 +2555,7 @@ dec_term_atom_common:
/* Environment */
for (i = num_free-1; i >= 0; i--) {
- funp->env[i] = (Eterm) next;
+ funp->env[i] = (Eterm) COMPRESS_POINTER(next);
next = funp->env + i;
}
break;
@@ -2580,26 +2586,35 @@ dec_term_atom_common:
static Uint
encode_size_struct2(ErtsAtomCacheMap *acmp, Eterm obj, unsigned dflags)
{
- DECLARE_ESTACK(s);
+ DECLARE_WSTACK(s);
Uint m, i, arity;
Uint result = 0;
+#if HALFWORD_HEAP
+ UWord wobj = 0;
+#endif
goto L_jump_start;
outer_loop:
- while (!ESTACK_ISEMPTY(s)) {
- obj = ESTACK_POP(s);
-
+ while (!WSTACK_ISEMPTY(s)) {
+#if HALFWORD_HEAP
+ obj = (Eterm) (wobj = WSTACK_POP(s));
+#else
+ obj = WSTACK_POP(s);
+#endif
handle_popped_obj:
- if (is_CP(obj)) {
+ if (is_CP(obj)) { /* Does not look for CP, looks for "no tag" */
+#if HALFWORD_HEAP
+ Eterm* ptr = (Eterm *) wobj;
+#else
Eterm* ptr = (Eterm *) obj;
-
+#endif
/*
* Pointer into a tuple.
*/
obj = *ptr--;
if (!is_header(obj)) {
- ESTACK_PUSH(s, (Eterm)ptr);
+ WSTACK_PUSH(s, (UWord)ptr);
} else {
/* Reached tuple header */
ASSERT(header_is_arityval(obj));
@@ -2611,7 +2626,7 @@ encode_size_struct2(ErtsAtomCacheMap *acmp, Eterm obj, unsigned dflags)
tl = CDR(cons);
obj = CAR(cons);
- ESTACK_PUSH(s, tl);
+ WSTACK_PUSH(s, tl);
} else if (is_nil(obj)) {
result++;
goto outer_loop;
@@ -2647,17 +2662,22 @@ encode_size_struct2(ErtsAtomCacheMap *acmp, Eterm obj, unsigned dflags)
if ((Uint)val < 256)
result += 1 + 1; /* SMALL_INTEGER_EXT */
- else if (sizeof(Sint) == 4 || IS_SSMALL28(val))
+ else if (sizeof(Sint) == 4 || IS_SSMALL32(val))
result += 1 + 4; /* INTEGER_EXT */
else {
- Eterm tmp_big[2];
+ DeclareTmpHeapNoproc(tmp_big,2);
+ UseTmpHeapNoproc(2);
i = big_bytes(small_to_big(val, tmp_big));
result += 1 + 1 + 1 + i; /* SMALL_BIG_EXT */
+ UnUseTmpHeapNoproc(2);
}
}
break;
case BIG_DEF:
- if ((i = big_bytes(obj)) < 256)
+ i = big_bytes(obj);
+ if (sizeof(Sint)==4 && i <= 4 && (big_digit(obj,0)-big_sign(obj)) < (1<<31))
+ result += 1 + 4; /* INTEGER_EXT */
+ else if (i < 256)
result += 1 + 1 + 1 + i; /* tag,size,sign,digits */
else
result += 1 + 4 + 1 + i; /* tag,size,sign,digits */
@@ -2698,7 +2718,11 @@ encode_size_struct2(ErtsAtomCacheMap *acmp, Eterm obj, unsigned dflags)
result += 1 + 4;
}
ptr += arity;
+#if HALFWORD_HEAP
+ obj = (Eterm) (wobj = (UWord) ptr);
+#else
obj = (Eterm) ptr;
+#endif
goto handle_popped_obj;
}
break;
@@ -2740,14 +2764,14 @@ encode_size_struct2(ErtsAtomCacheMap *acmp, Eterm obj, unsigned dflags)
if (is_not_list(obj)) {
/* Push any non-list terms on the stack */
- ESTACK_PUSH(s, obj);
+ WSTACK_PUSH(s, obj);
} else {
/* Lists must be handled specially. */
if ((m = is_string(obj)) && (m < MAX_STRING_LEN)) {
result += m + 2 + 1;
} else {
result += 5;
- ESTACK_PUSH(s, obj);
+ WSTACK_PUSH(s, obj);
}
}
}
@@ -2760,8 +2784,12 @@ encode_size_struct2(ErtsAtomCacheMap *acmp, Eterm obj, unsigned dflags)
case EXPORT_DEF:
{
- Export* ep = (Export *) (export_val(obj))[1];
+ Export* ep = *((Export **) (export_val(obj) + 1));
+#if HALFWORD_HEAP
+ result += 2;
+#else
result += 1;
+#endif
result += encode_size_struct2(acmp, ep->code[0], dflags);
result += encode_size_struct2(acmp, ep->code[1], dflags);
result += encode_size_struct2(acmp, make_small(ep->code[2]), dflags);
@@ -2774,7 +2802,7 @@ encode_size_struct2(ErtsAtomCacheMap *acmp, Eterm obj, unsigned dflags)
}
}
- DESTROY_ESTACK(s);
+ DESTROY_WSTACK(s);
return result;
}
@@ -2886,7 +2914,7 @@ decoded_size(byte *ep, byte* endp, int no_refc_bins)
ep += 2;
atom_extra_skip = 1 + 4*id_words;
/* In case it is an external ref */
-#ifdef ARCH_64
+#if defined(ARCH_64) && !HALFWORD_HEAP
heap_size += EXTERNAL_THING_HEAD_SIZE + id_words/2 + 1;
#else
heap_size += EXTERNAL_THING_HEAD_SIZE + id_words;
@@ -2961,7 +2989,11 @@ decoded_size(byte *ep, byte* endp, int no_refc_bins)
break;
case EXPORT_EXT:
terms += 3;
+#if HALFWORD_HEAP
+ heap_size += 3;
+#else
heap_size += 2;
+#endif
break;
case NEW_FUN_EXT:
{
diff --git a/erts/emulator/beam/external.h b/erts/emulator/beam/external.h
index eada6d4f27..cee48bbeb0 100644
--- a/erts/emulator/beam/external.h
+++ b/erts/emulator/beam/external.h
@@ -211,8 +211,8 @@ ERTS_GLB_INLINE void *
erts_dist_ext_trailer(ErtsDistExternal *edep)
{
void *res = (void *) (edep->ext_endp
- + ERTS_WORD_ALIGN_PAD_SZ(edep->ext_endp));
- ASSERT((((Uint) res) % sizeof(Uint)) == 0);
+ + ERTS_EXTRA_DATA_ALIGN_SZ(edep->ext_endp));
+ ASSERT((((UWord) res) % sizeof(Uint)) == 0);
return res;
}
diff --git a/erts/emulator/beam/global.h b/erts/emulator/beam/global.h
index cab249a53f..280421952e 100644
--- a/erts/emulator/beam/global.h
+++ b/erts/emulator/beam/global.h
@@ -76,12 +76,6 @@ typedef struct line_buf { /* Buffer used in line oriented I/O */
The rest is the overflow buffer. */
} LineBuf;
-/* Temporary object header, auto-deallocated when NIF returns. */
-struct enif_tmp_obj_t {
- struct enif_tmp_obj_t* next;
- void (*dtor)(struct enif_tmp_obj_t*);
- /*char data[];*/
-};
struct enif_environment_t /* ErlNifEnv */
{
struct erl_module_nif* mod_nif;
@@ -403,7 +397,7 @@ extern Eterm erts_ddll_monitor_driver(Process *p,
/* Add fields in ERTS_INTERNAL_BINARY_FIELDS, otherwise the drivers crash */
#define ERTS_INTERNAL_BINARY_FIELDS \
- Uint flags; \
+ UWord flags; \
erts_refc_t refc; \
ERTS_BINARY_STRUCT_ALIGNMENT
@@ -470,7 +464,10 @@ typedef union {
typedef struct proc_bin {
Eterm thing_word; /* Subtag REFC_BINARY_SUBTAG. */
Uint size; /* Binary size in bytes. */
- struct proc_bin *next; /* Pointer to next ProcBin. */
+#if HALFWORD_HEAP
+ void* dummy_ptr_padding__;
+#endif
+ struct erl_off_heap_header *next;
Binary *val; /* Pointer to Binary structure. */
byte *bytes; /* Pointer to the actual data bytes. */
Uint flags; /* Flag word. */
@@ -500,8 +497,8 @@ erts_mk_magic_binary_term(Eterm **hpp, ErlOffHeap *ohp, Binary *mbp)
pb->thing_word = HEADER_PROC_BIN;
pb->size = 0;
- pb->next = ohp->mso;
- ohp->mso = pb;
+ pb->next = ohp->first;
+ ohp->first = (struct erl_off_heap_header*) pb;
pb->val = mbp;
pb->bytes = (byte *) mbp->orig_bytes;
pb->flags = 0;
@@ -518,6 +515,15 @@ erts_mk_magic_binary_term(Eterm **hpp, ErlOffHeap *ohp, Binary *mbp)
&& (thing_subtag(*binary_val((T))) == REFC_BINARY_SUBTAG) \
&& (((ProcBin *) binary_val((T)))->val->flags & BIN_FLAG_MAGIC))
+
+union erl_off_heap_ptr {
+ struct erl_off_heap_header* hdr;
+ ProcBin *pb;
+ struct erl_fun_thing* fun;
+ struct external_thing_* ext;
+ Eterm* ep;
+};
+
/* arrays that get malloced at startup */
extern Port* erts_port;
extern erts_smp_atomic_t erts_ports_alive;
@@ -723,6 +729,60 @@ do { \
#define ESTACK_POP(s) (*(--ESTK_CONCAT(s,_sp)))
+void erl_grow_wstack(UWord** start, UWord** sp, UWord** end);
+#define WSTK_CONCAT(a,b) a##b
+#define WSTK_SUBSCRIPT(s,i) *((UWord *)((byte *)WSTK_CONCAT(s,_start) + (i)))
+#define DEF_WSTACK_SIZE (16)
+
+#define DECLARE_WSTACK(s) \
+ UWord WSTK_CONCAT(s,_default_stack)[DEF_WSTACK_SIZE]; \
+ UWord* WSTK_CONCAT(s,_start) = WSTK_CONCAT(s,_default_stack); \
+ UWord* WSTK_CONCAT(s,_sp) = WSTK_CONCAT(s,_start); \
+ UWord* WSTK_CONCAT(s,_end) = WSTK_CONCAT(s,_start) + DEF_WSTACK_SIZE
+
+#define DESTROY_WSTACK(s) \
+do { \
+ if (WSTK_CONCAT(s,_start) != WSTK_CONCAT(s,_default_stack)) { \
+ erts_free(ERTS_ALC_T_ESTACK, WSTK_CONCAT(s,_start)); \
+ } \
+} while(0)
+
+#define WSTACK_PUSH(s, x) \
+do { \
+ if (WSTK_CONCAT(s,_sp) == WSTK_CONCAT(s,_end)) { \
+ erl_grow_wstack(&WSTK_CONCAT(s,_start), &WSTK_CONCAT(s,_sp), \
+ &WSTK_CONCAT(s,_end)); \
+ } \
+ *WSTK_CONCAT(s,_sp)++ = (x); \
+} while(0)
+
+#define WSTACK_PUSH2(s, x, y) \
+do { \
+ if (WSTK_CONCAT(s,_sp) > WSTK_CONCAT(s,_end) - 2) { \
+ erl_grow_wstack(&WSTK_CONCAT(s,_start), &WSTK_CONCAT(s,_sp), \
+ &WSTK_CONCAT(s,_end)); \
+ } \
+ *WSTK_CONCAT(s,_sp)++ = (x); \
+ *WSTK_CONCAT(s,_sp)++ = (y); \
+} while(0)
+
+#define WSTACK_PUSH3(s, x, y, z) \
+do { \
+ if (WSTK_CONCAT(s,_sp) > WSTK_CONCAT(s,_end) - 3) { \
+ erl_grow_wstack(&WSTK_CONCAT(s,_start), &WSTK_CONCAT(s,_sp), \
+ &WSTK_CONCAT(s,_end)); \
+ } \
+ *WSTK_CONCAT(s,_sp)++ = (x); \
+ *WSTK_CONCAT(s,_sp)++ = (y); \
+ *WSTK_CONCAT(s,_sp)++ = (z); \
+} while(0)
+
+#define WSTACK_COUNT(s) (WSTK_CONCAT(s,_sp) - WSTK_CONCAT(s,_start))
+
+#define WSTACK_ISEMPTY(s) (WSTK_CONCAT(s,_sp) == WSTK_CONCAT(s,_start))
+#define WSTACK_POP(s) (*(--WSTK_CONCAT(s,_sp)))
+
+
/* port status flags */
#define ERTS_PORT_SFLG_CONNECTED ((Uint32) (1 << 0))
@@ -775,7 +835,6 @@ Eterm erts_new_heap_binary(Process *p, byte *buf, int len, byte** datap);
Eterm erts_new_mso_binary(Process*, byte*, int);
Eterm new_binary(Process*, byte*, int);
Eterm erts_realloc_binary(Eterm bin, size_t size);
-void erts_cleanup_mso(ProcBin* pb);
/* erl_bif_info.c */
@@ -802,7 +861,7 @@ void erts_system_profile_clear(Process *c_p);
int erts_load_module(Process *c_p, ErtsProcLocks c_p_locks,
Eterm group_leader, Eterm* mod, byte* code, int size);
void init_load(void);
-Eterm* find_function_from_pc(Eterm* pc);
+BeamInstr* find_function_from_pc(BeamInstr* pc);
Eterm erts_module_info_0(Process* p, Eterm module);
Eterm erts_module_info_1(Process* p, Eterm module, Eterm what);
Eterm erts_make_stub_module(Process* p, Eterm Mod, Eterm Beam, Eterm Info);
@@ -833,6 +892,8 @@ Eterm copy_object(Eterm, Process*);
Uint size_object(Eterm);
Eterm copy_struct(Eterm, Uint, Eterm**, ErlOffHeap*);
Eterm copy_shallow(Eterm*, Uint, Eterm**, ErlOffHeap*);
+void move_multi_frags(Eterm** hpp, ErlOffHeap*, ErlHeapFragment* first,
+ Eterm* refs, unsigned nrefs);
#ifdef HYBRID
#define RRMA_DEFAULT_SIZE 256
@@ -965,7 +1026,7 @@ void print_pass_through(int, byte*, int);
/* beam_emu.c */
int catchlevel(Process*);
-void init_emulator(_VOID_);
+void init_emulator(void);
void process_main(void);
Eterm build_stacktrace(Process* c_p, Eterm exc);
Eterm expand_error_value(Process* c_p, Uint freason, Eterm Value);
@@ -1030,6 +1091,7 @@ Eterm erts_heap_sizes(Process* p);
void erts_offset_off_heap(ErlOffHeap *, Sint, Eterm*, Eterm*);
void erts_offset_heap_ptr(Eterm*, Uint, Sint, Eterm*, Eterm*);
void erts_offset_heap(Eterm*, Uint, Sint, Eterm*, Eterm*);
+void erts_free_heap_frags(Process* p);
#ifdef HYBRID
int erts_global_garbage_collect(Process*, int, Eterm*, int);
@@ -1144,12 +1206,11 @@ erts_smp_port_unlock(Port *prt)
{
#ifdef ERTS_SMP
long refc;
+ erts_smp_mtx_unlock(prt->lock);
refc = erts_smp_atomic_dectest(&prt->refc);
ASSERT(refc >= 0);
if (refc == 0)
erts_port_cleanup(prt);
- else
- erts_smp_mtx_unlock(prt->lock);
#endif
}
@@ -1171,7 +1232,7 @@ erts_smp_port_unlock(Port *prt)
ERTS_INVALID_PORT_OPT((PP), (ID), ERTS_PORT_SFLGS_INVALID_TRACER_LOOKUP)
#define ERTS_PORT_SCHED_ID(P, ID) \
- ((Uint) erts_prtsd_set((P), ERTS_PSD_SCHED_ID, (void *) (ID)))
+ ((Uint) (UWord) erts_prtsd_set((P), ERTS_PSD_SCHED_ID, (void *) (UWord) (ID)))
#ifdef ERTS_SMP
Port *erts_de2port(DistEntry *, Process *, ErtsProcLocks);
@@ -1449,7 +1510,6 @@ void p_slpq(_VOID_);
void erts_silence_warn_unused_result(long unused);
void erts_cleanup_offheap(ErlOffHeap *offheap);
-void erts_cleanup_externals(ExternalThing *);
Uint erts_fit_in_bits(Uint);
int list_length(Eterm);
@@ -1463,6 +1523,7 @@ Uint32 make_hash(Eterm);
Eterm erts_bld_atom(Uint **hpp, Uint *szp, char *str);
Eterm erts_bld_uint(Uint **hpp, Uint *szp, Uint ui);
+Eterm erts_bld_uword(Uint **hpp, Uint *szp, UWord uw);
Eterm erts_bld_uint64(Uint **hpp, Uint *szp, Uint64 ui64);
Eterm erts_bld_sint64(Uint **hpp, Uint *szp, Sint64 si64);
Eterm erts_bld_cons(Uint **hpp, Uint *szp, Eterm car, Eterm cdr);
@@ -1481,7 +1542,7 @@ erts_bld_atom_2uint_3tup_list(Uint **hpp, Uint *szp, Sint length,
Eterm atoms[], Uint uints1[], Uint uints2[]);
Eterm store_external_or_ref_in_proc_(Process *, Eterm);
-Eterm store_external_or_ref_(Uint **, ExternalThing **, Eterm);
+Eterm store_external_or_ref_(Uint **, ErlOffHeap*, Eterm);
#define NC_HEAP_SIZE(NC) \
(ASSERT_EXPR(is_node_container((NC))), \
@@ -1515,7 +1576,9 @@ Sint cmp(Eterm, Eterm);
#define CMP_EQ(a,b) ((a) == (b) || cmp_eq((a),(b)))
#define CMP_NE(a,b) ((a) != (b) && cmp_ne((a),(b)))
+/* duplicates from big.h */
int term_to_Uint(Eterm term, Uint *up);
+int term_to_UWord(Eterm, UWord*);
#ifdef HAVE_ERTS_NOW_CPU
extern int erts_cpu_timestamp;
@@ -1525,6 +1588,10 @@ void erts_init_bif_chksum(void);
/* erl_bif_re.c */
void erts_init_bif_re(void);
Sint erts_re_set_loop_limit(Sint limit);
+/* erl_bif_binary.c */
+void erts_init_bif_binary(void);
+Sint erts_binary_set_loop_limit(Sint limit);
+
/* erl_unicode.c */
void erts_init_unicode(void);
Sint erts_unicode_set_loop_limit(Sint limit);
@@ -1554,12 +1621,12 @@ void erts_queue_error_logger_message(Eterm, Eterm, ErlHeapFragment *);
void erts_send_sys_msg_proc(Eterm, Eterm, Eterm, ErlHeapFragment *);
void trace_send(Process*, Eterm, Eterm);
void trace_receive(Process*, Eterm);
-Uint32 erts_call_trace(Process *p, Eterm mfa[], Binary *match_spec, Eterm* args,
+Uint32 erts_call_trace(Process *p, BeamInstr mfa[], Binary *match_spec, Eterm* args,
int local, Eterm *tracer_pid);
-void erts_trace_return(Process* p, Eterm* fi, Eterm retval, Eterm *tracer_pid);
-void erts_trace_exception(Process* p, Eterm mfa[], Eterm class, Eterm value,
+void erts_trace_return(Process* p, BeamInstr* fi, Eterm retval, Eterm *tracer_pid);
+void erts_trace_exception(Process* p, BeamInstr mfa[], Eterm class, Eterm value,
Eterm *tracer);
-void erts_trace_return_to(Process *p, Uint *pc);
+void erts_trace_return_to(Process *p, BeamInstr *pc);
void trace_sched(Process*, Eterm);
void trace_proc(Process*, Process*, Eterm, Eterm);
void trace_proc_spawn(Process*, Eterm pid, Eterm mod, Eterm func, Eterm args);
@@ -1589,7 +1656,7 @@ Uint erts_trace_flag2bit(Eterm flag);
int erts_trace_flags(Eterm List,
Uint *pMask, Eterm *pTracer, int *pCpuTimestamp);
Eterm erts_bif_trace(int bif_index, Process* p,
- Eterm arg1, Eterm arg2, Eterm arg3, Uint *I);
+ Eterm arg1, Eterm arg2, Eterm arg3, BeamInstr *I);
#ifdef ERTS_SMP
void erts_send_pending_trace_msgs(ErtsSchedulerData *esdp);
@@ -1606,7 +1673,7 @@ void bin_write(int, void*, byte*, int);
int intlist_to_buf(Eterm, char*, int); /* most callers pass plain char*'s */
struct Sint_buf {
-#ifdef ARCH_64
+#if defined(ARCH_64) && !HALFWORD_HEAP
char s[22];
#else
char s[12];
@@ -1619,7 +1686,7 @@ int io_list_to_buf(Eterm, char*, int);
int io_list_to_buf2(Eterm, char*, int);
int io_list_len(Eterm);
int is_string(Eterm);
-void erl_at_exit(FUNCTION(void,(*),(void*)), void*);
+void erl_at_exit(void (*) (void*), void*);
Eterm collect_memory(Process *);
void dump_memory_to_fd(int);
int dump_memory_data(const char *);
@@ -1654,6 +1721,8 @@ Eterm erts_gc_abs_1(Process* p, Eterm* reg, Uint live);
Eterm erts_gc_float_1(Process* p, Eterm* reg, Uint live);
Eterm erts_gc_round_1(Process* p, Eterm* reg, Uint live);
Eterm erts_gc_trunc_1(Process* p, Eterm* reg, Uint live);
+Eterm erts_gc_binary_part_3(Process* p, Eterm* reg, Uint live);
+Eterm erts_gc_binary_part_2(Process* p, Eterm* reg, Uint live);
Uint erts_current_reductions(Process* current, Process *p);
@@ -1663,7 +1732,6 @@ int erts_print_system_version(int to, void *arg, Process *c_p);
* Interface to erl_init
*/
void erl_init(void);
-void erts_first_process(Eterm modname, void* code, unsigned size, int argc, char** argv);
#define seq_trace_output(token, msg, type, receiver, process) \
seq_trace_output_generic((token), (msg), (type), (receiver), (process), NIL)
@@ -1683,8 +1751,10 @@ struct trace_pattern_flags {
unsigned int local : 1; /* Local call trace breakpoint */
unsigned int meta : 1; /* Metadata trace breakpoint */
unsigned int call_count : 1; /* Fast call count breakpoint */
+ unsigned int call_time : 1; /* Fast call time breakpoint */
};
extern const struct trace_pattern_flags erts_trace_pattern_flags_off;
+extern int erts_call_time_breakpoint_tracing;
int erts_set_trace_pattern(Eterm* mfa, int specified,
Binary* match_prog_set, Binary *meta_match_prog_set,
int on, struct trace_pattern_flags,
@@ -1728,18 +1798,20 @@ extern void erts_match_prog_foreach_offheap(Binary *b,
void (*)(ErlOffHeap *, void *),
void *);
-#define MATCH_SET_RETURN_TRACE 0x1 /* return trace requested */
-#define MATCH_SET_RETURN_TO_TRACE 0x2 /* Misleading name, it is not actually
- set by the match program, but by the
- breakpoint functions */
-#define MATCH_SET_EXCEPTION_TRACE 0x4 /* exception trace requested */
+#define MATCH_SET_RETURN_TRACE (0x1) /* return trace requested */
+#define MATCH_SET_RETURN_TO_TRACE (0x2) /* Misleading name, it is not actually
+ set by the match program, but by the
+ breakpoint functions */
+#define MATCH_SET_EXCEPTION_TRACE (0x4) /* exception trace requested */
#define MATCH_SET_RX_TRACE (MATCH_SET_RETURN_TRACE|MATCH_SET_EXCEPTION_TRACE)
/*
* Flag values when tracing bif
+ * Future note: flag field is 8 bits
*/
-#define BIF_TRACE_AS_LOCAL 0x1
-#define BIF_TRACE_AS_GLOBAL 0x2
-#define BIF_TRACE_AS_META 0x4
+#define BIF_TRACE_AS_LOCAL (0x1)
+#define BIF_TRACE_AS_GLOBAL (0x2)
+#define BIF_TRACE_AS_META (0x4)
+#define BIF_TRACE_AS_CALL_TIME (0x8)
extern erts_driver_t vanilla_driver;
extern erts_driver_t spawn_driver;
@@ -1839,4 +1911,61 @@ erts_alloc_message_heap(Uint size,
#endif /* #if ERTS_GLB_INLINE_INCL_FUNC_DEF */
-#endif
+#if !HEAP_ON_C_STACK
+# if defined(DEBUG)
+# define DeclareTmpHeap(VariableName,Size,Process) \
+ Eterm *VariableName = erts_debug_allocate_tmp_heap(Size,Process)
+# define DeclareTmpHeapNoproc(VariableName,Size) \
+ Eterm *VariableName = erts_debug_allocate_tmp_heap(Size,NULL)
+# define UseTmpHeap(Size,Proc) \
+ do { \
+ erts_debug_use_tmp_heap((Size),(Proc)); \
+ } while (0)
+# define UnUseTmpHeap(Size,Proc) \
+ do { \
+ erts_debug_unuse_tmp_heap((Size),(Proc)); \
+ } while (0)
+# define UseTmpHeapNoproc(Size) \
+ do { \
+ erts_debug_use_tmp_heap(Size,NULL); \
+ } while (0)
+# define UnUseTmpHeapNoproc(Size) \
+ do { \
+ erts_debug_unuse_tmp_heap(Size,NULL); \
+ } while (0)
+# else
+# define DeclareTmpHeap(VariableName,Size,Process) \
+ Eterm *VariableName = (ERTS_PROC_GET_SCHDATA(Process)->tmp_heap)+(ERTS_PROC_GET_SCHDATA(Process)->num_tmp_heap_used)
+# define DeclareTmpHeapNoproc(VariableName,Size) \
+ Eterm *VariableName = (erts_get_scheduler_data()->tmp_heap)+(erts_get_scheduler_data()->num_tmp_heap_used)
+# define UseTmpHeap(Size,Proc) \
+ do { \
+ ERTS_PROC_GET_SCHDATA(Proc)->num_tmp_heap_used += (Size); \
+ } while (0)
+# define UnUseTmpHeap(Size,Proc) \
+ do { \
+ ERTS_PROC_GET_SCHDATA(Proc)->num_tmp_heap_used -= (Size); \
+ } while (0)
+# define UseTmpHeapNoproc(Size) \
+ do { \
+ erts_get_scheduler_data()->num_tmp_heap_used += (Size); \
+ } while (0)
+# define UnUseTmpHeapNoproc(Size) \
+ do { \
+ erts_get_scheduler_data()->num_tmp_heap_used -= (Size); \
+ } while (0)
+
+
+# endif
+
+#else
+# define DeclareTmpHeap(VariableName,Size,Process) \
+ Eterm VariableName[Size]
+# define DeclareTmpHeapNoproc(VariableName,Size) \
+ Eterm VariableName[Size]
+# define UseTmpHeap(Size,Proc) /* Nothing */
+# define UnUseTmpHeap(Size,Proc) /* Nothing */
+# define UseTmpHeapNoproc(Size) /* Nothing */
+# define UnUseTmpHeapNoproc(Size) /* Nothing */
+#endif /* HEAP_ON_C_STACK */
+#endif /* !__GLOBAL_H__ */
diff --git a/erts/emulator/beam/io.c b/erts/emulator/beam/io.c
index 3309b77086..79022d5dd7 100644
--- a/erts/emulator/beam/io.c
+++ b/erts/emulator/beam/io.c
@@ -72,6 +72,15 @@ erts_driver_t fd_driver;
static int init_driver(erts_driver_t *, ErlDrvEntry *, DE_Handle *);
static void terminate_port(Port *p);
static void pdl_init(void);
+#ifdef ERTS_SMP
+static void driver_monitor_lock_pdl(Port *p);
+static void driver_monitor_unlock_pdl(Port *p);
+#define DRV_MONITOR_LOCK_PDL(Port) driver_monitor_lock_pdl(Port)
+#define DRV_MONITOR_UNLOCK_PDL(Port) driver_monitor_unlock_pdl(Port)
+#else
+#define DRV_MONITOR_LOCK_PDL(Port) /* nothing */
+#define DRV_MONITOR_UNLOCK_PDL(Port) /* nothing */
+#endif
static ERTS_INLINE ErlIOQueue*
drvport2ioq(ErlDrvPort drvport)
@@ -271,10 +280,36 @@ erts_test_next_port(int set, Uint next)
return res;
}
+
+static void port_cleanup(Port *prt);
+
+#ifdef ERTS_SMP
+
+static void
+sched_port_cleanup(void *vprt)
+{
+ Port *prt = (Port *) vprt;
+ erts_smp_mtx_lock(prt->lock);
+ port_cleanup(prt);
+}
+
+#endif
+
void
erts_port_cleanup(Port *prt)
{
#ifdef ERTS_SMP
+ if (erts_smp_mtx_trylock(prt->lock) == EBUSY)
+ erts_schedule_misc_op(sched_port_cleanup, (void *) prt);
+ else
+#endif
+ port_cleanup(prt);
+}
+
+void
+port_cleanup(Port *prt)
+{
+#ifdef ERTS_SMP
Uint32 port_specific;
erts_smp_mtx_t *mtx;
#endif
@@ -1540,14 +1575,14 @@ static void deliver_read_message(Port* prt, Eterm to,
pb = (ProcBin *) hp;
pb->thing_word = HEADER_PROC_BIN;
pb->size = len;
- pb->next = ohp->mso;
- ohp->mso = pb;
+ pb->next = ohp->first;
+ ohp->first = (struct erl_off_heap_header*)pb;
pb->val = bptr;
pb->bytes = (byte*) bptr->orig_bytes;
pb->flags = 0;
hp += PROC_BIN_SIZE;
- ohp->overhead += pb->size / sizeof(Eterm);
+ OH_OVERHEAD(ohp, pb->size / sizeof(Eterm));
listp = make_binary(pb);
}
@@ -1690,14 +1725,14 @@ deliver_vec_message(Port* prt, /* Port */
}
pb->thing_word = HEADER_PROC_BIN;
pb->size = iov->iov_len;
- pb->next = ohp->mso;
- ohp->mso = pb;
+ pb->next = ohp->first;
+ ohp->first = (struct erl_off_heap_header*)pb;
pb->val = ErlDrvBinary2Binary(b);
pb->bytes = base;
pb->flags = 0;
hp += PROC_BIN_SIZE;
- ohp->overhead += iov->iov_len / sizeof(Eterm);
+ OH_OVERHEAD(ohp, iov->iov_len / sizeof(Eterm));
if (listp == NIL) { /* compatible with deliver_bin_message */
listp = make_binary(pb);
@@ -1998,12 +2033,13 @@ erts_do_exit_port(Port *p, Eterm from, Eterm reason)
p->nlinks = NULL;
erts_sweep_links(lnk, &sweep_one_link, &sc);
}
+ DRV_MONITOR_LOCK_PDL(p);
{
ErtsMonitor *moni = p->monitors;
p->monitors = NULL;
erts_sweep_monitors(moni, &sweep_one_monitor, NULL);
}
-
+ DRV_MONITOR_UNLOCK_PDL(p);
if ((p->status & ERTS_PORT_SFLG_DISTRIBUTION) && p->dist_entry) {
erts_do_net_exits(p->dist_entry, rreason);
@@ -2223,12 +2259,12 @@ erts_port_control(Process* p, Port* prt, Uint command, Eterm iolist)
ProcBin* pb = (ProcBin *) HAlloc(p, PROC_BIN_SIZE);
pb->thing_word = HEADER_PROC_BIN;
pb->size = dbin->orig_size;
- pb->next = MSO(p).mso;
- MSO(p).mso = pb;
+ pb->next = MSO(p).first;
+ MSO(p).first = (struct erl_off_heap_header*)pb;
pb->val = ErlDrvBinary2Binary(dbin);
pb->bytes = (byte*) dbin->orig_bytes;
pb->flags = 0;
- MSO(p).overhead += dbin->orig_size / sizeof(Eterm);
+ OH_OVERHEAD(&(MSO(p)), dbin->orig_size / sizeof(Eterm));
return make_binary(pb);
}
port_resp = dbin->orig_bytes;
@@ -2997,14 +3033,14 @@ driver_deliver_term(ErlDrvPort port,
driver_binary_inc_refc(b); /* caller will free binary */
pb->thing_word = HEADER_PROC_BIN;
pb->size = size;
- pb->next = ohp->mso;
- ohp->mso = pb;
+ pb->next = ohp->first;
+ ohp->first = (struct erl_off_heap_header*)pb;
pb->val = ErlDrvBinary2Binary(b);
pb->bytes = ((byte*) b->orig_bytes) + offset;
pb->flags = 0;
mess = make_binary(pb);
hp += PROC_BIN_SIZE;
- ohp->overhead += pb->size / sizeof(Eterm);
+ OH_OVERHEAD(ohp, pb->size / sizeof(Eterm));
}
ptr += 3;
break;
@@ -3036,12 +3072,12 @@ driver_deliver_term(ErlDrvPort port,
hp += PROC_BIN_SIZE;
pbp->thing_word = HEADER_PROC_BIN;
pbp->size = size;
- pbp->next = ohp->mso;
- ohp->mso = pbp;
+ pbp->next = ohp->first;
+ ohp->first = (struct erl_off_heap_header*)pbp;
pbp->val = bp;
pbp->bytes = (byte*) bp->orig_bytes;
pbp->flags = 0;
- ohp->overhead += (pbp->size / sizeof(Eterm));
+ OH_OVERHEAD(ohp, pbp->size / sizeof(Eterm));
mess = make_binary(pbp);
}
ptr += 2;
@@ -3536,6 +3572,32 @@ static ERTS_INLINE void pdl_destroy(ErlDrvPDL pdl)
erts_free(ERTS_ALC_T_PORT_DATA_LOCK, pdl);
}
+#ifdef ERTS_SMP
+
+static void driver_monitor_lock_pdl(Port *p) {
+ if (p->port_data_lock) {
+ driver_pdl_lock(p->port_data_lock);
+ }
+ /* Now we either have the port lock or the port_data_lock */
+ ERTS_LC_ASSERT(!p->port_data_lock
+ || erts_lc_mtx_is_locked(&(p->port_data_lock->mtx)));
+ ERTS_SMP_LC_ASSERT(p->port_data_lock
+ || erts_lc_is_port_locked(p));
+}
+
+static void driver_monitor_unlock_pdl(Port *p) {
+ /* We should either have the port lock or the port_data_lock */
+ ERTS_LC_ASSERT(!p->port_data_lock
+ || erts_lc_mtx_is_locked(&(p->port_data_lock->mtx)));
+ ERTS_SMP_LC_ASSERT(p->port_data_lock
+ || erts_lc_is_port_locked(p));
+ if (p->port_data_lock) {
+ driver_pdl_unlock(p->port_data_lock);
+ }
+}
+
+#endif
+
/*
* exported driver_pdl_* functions ...
*/
@@ -3994,7 +4056,7 @@ drv_cancel_timer(Port *prt)
erts_port_task_abort(prt->id, &prt->timeout_task);
}
-int driver_set_timer(ErlDrvPort ix, Uint t)
+int driver_set_timer(ErlDrvPort ix, UWord t)
{
Port* prt = erts_drvport2port(ix);
@@ -4053,12 +4115,16 @@ driver_read_timer(ErlDrvPort ix, unsigned long* t)
int
driver_get_now(ErlDrvNowData *now_data)
{
+ Uint mega,secs,micro;
ERTS_SMP_CHK_NO_PROC_LOCKS;
if (now_data == NULL) {
return -1;
}
- get_now(&(now_data->megasecs),&(now_data->secs),&(now_data->microsecs));
+ get_now(&mega,&secs,&micro);
+ now_data->megasecs = (unsigned long) mega;
+ now_data->secs = (unsigned long) secs;
+ now_data->microsecs = (unsigned long) micro;
return 0;
}
@@ -4072,14 +4138,15 @@ static void ref_to_driver_monitor(Eterm ref, ErlDrvMonitor *mon)
memcpy(mon,refp,sizeof(RefThing));
}
-int driver_monitor_process(ErlDrvPort port,
- ErlDrvTermData process,
- ErlDrvMonitor *monitor)
+
+static int do_driver_monitor_process(Port *prt,
+ Eterm *buf,
+ ErlDrvTermData process,
+ ErlDrvMonitor *monitor)
{
- Port *prt = erts_drvport2port(port);
Process *rp;
Eterm ref;
- Eterm buf[REF_THING_SIZE];
+
if (prt->drv_ptr->process_exit == NULL) {
return -1;
}
@@ -4089,22 +4156,76 @@ int driver_monitor_process(ErlDrvPort port,
if (!rp) {
return 1;
}
+
ref = erts_make_ref_in_buffer(buf);
erts_add_monitor(&(prt->monitors), MON_ORIGIN, ref, rp->id, NIL);
erts_add_monitor(&(rp->monitors), MON_TARGET, ref, prt->id, NIL);
-
+
erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_LINK);
ref_to_driver_monitor(ref,monitor);
return 0;
}
-int driver_demonitor_process(ErlDrvPort port,
- const ErlDrvMonitor *monitor)
+/*
+ * This can be called from a non scheduler thread iff a port_data_lock exists
+ */
+int driver_monitor_process(ErlDrvPort port,
+ ErlDrvTermData process,
+ ErlDrvMonitor *monitor)
+{
+ Port *prt;
+ int ret;
+ Uint32 status;
+ ErtsSchedulerData *sched = erts_get_scheduler_data();
+ int ix = (int) port;
+ if (ix < 0 || erts_max_ports <= ix) {
+ return -1;
+ }
+ prt = &erts_port[ix];
+
+ DRV_MONITOR_LOCK_PDL(prt);
+
+ if (sched) {
+ status = erts_port[ix].status;
+ } else {
+ erts_smp_port_state_lock(prt);
+ status = erts_port[ix].status;
+ erts_smp_port_state_unlock(prt);
+ }
+
+ if (status & ERTS_PORT_SFLGS_INVALID_DRIVER_LOOKUP) {
+ DRV_MONITOR_UNLOCK_PDL(prt);
+ return -1;
+ }
+
+ /* Now (in SMP) we should have either the port lock (if we have a scheduler) or the port data lock
+ (if we're a driver thread) */
+ ERTS_SMP_LC_ASSERT((sched != NULL || prt->port_data_lock));
+
+#if !HEAP_ON_C_STACK
+ if (!sched) {
+ /* Need a separate allocation for the ref :( */
+ Eterm *buf = erts_alloc(ERTS_ALC_T_TEMP_TERM,
+ sizeof(Eterm)*REF_THING_SIZE);
+ ret = do_driver_monitor_process(prt,buf,process,monitor);
+ erts_free(ERTS_ALC_T_TEMP_TERM,buf);
+ } else
+#endif
+ {
+ DeclareTmpHeapNoproc(buf,REF_THING_SIZE);
+ UseTmpHeapNoproc(REF_THING_SIZE);
+ ret = do_driver_monitor_process(prt,buf,process,monitor);
+ UnUseTmpHeapNoproc(REF_THING_SIZE);
+ }
+ DRV_MONITOR_UNLOCK_PDL(prt);
+ return ret;
+}
+
+static int do_driver_demonitor_process(Port *prt, Eterm *buf,
+ const ErlDrvMonitor *monitor)
{
- Port *prt = erts_drvport2port(port);
Process *rp;
Eterm ref;
- Eterm buf[REF_THING_SIZE];
ErtsMonitor *mon;
Eterm to;
@@ -4137,12 +4258,60 @@ int driver_demonitor_process(ErlDrvPort port,
return 0;
}
-ErlDrvTermData driver_get_monitored_process(ErlDrvPort port,
+int driver_demonitor_process(ErlDrvPort port,
+ const ErlDrvMonitor *monitor)
+{
+ Port *prt;
+ int ret;
+ Uint32 status;
+ ErtsSchedulerData *sched = erts_get_scheduler_data();
+ int ix = (int) port;
+ if (ix < 0 || erts_max_ports <= ix) {
+ return -1;
+ }
+ prt = &erts_port[ix];
+
+ DRV_MONITOR_LOCK_PDL(prt);
+
+ if (sched) {
+ status = erts_port[ix].status;
+ } else {
+ erts_smp_port_state_lock(prt);
+ status = erts_port[ix].status;
+ erts_smp_port_state_unlock(prt);
+ }
+
+ if (status & ERTS_PORT_SFLGS_INVALID_DRIVER_LOOKUP) {
+ DRV_MONITOR_UNLOCK_PDL(prt);
+ return -1;
+ }
+
+ /* Now we should have either the port lock (if we have a scheduler) or the port data lock
+ (if we're a driver thread) */
+ ERTS_SMP_LC_ASSERT((sched != NULL || prt->port_data_lock));
+#if !HEAP_ON_C_STACK
+ if (!sched) {
+ /* Need a separate allocation for the ref :( */
+ Eterm *buf = erts_alloc(ERTS_ALC_T_TEMP_TERM,
+ sizeof(Eterm)*REF_THING_SIZE);
+ ret = do_driver_demonitor_process(prt,buf,monitor);
+ erts_free(ERTS_ALC_T_TEMP_TERM,buf);
+ } else
+#endif
+ {
+ DeclareTmpHeapNoproc(buf,REF_THING_SIZE);
+ UseTmpHeapNoproc(REF_THING_SIZE);
+ ret = do_driver_demonitor_process(prt,buf,monitor);
+ UnUseTmpHeapNoproc(REF_THING_SIZE);
+ }
+ DRV_MONITOR_UNLOCK_PDL(prt);
+ return ret;
+}
+
+static ErlDrvTermData do_driver_get_monitored_process(Port *prt, Eterm *buf,
const ErlDrvMonitor *monitor)
{
- Port *prt = erts_drvport2port(port);
Eterm ref;
- Eterm buf[REF_THING_SIZE];
ErtsMonitor *mon;
Eterm to;
@@ -4158,6 +4327,59 @@ ErlDrvTermData driver_get_monitored_process(ErlDrvPort port,
return (ErlDrvTermData) to;
}
+
+ErlDrvTermData driver_get_monitored_process(ErlDrvPort port,
+ const ErlDrvMonitor *monitor)
+{
+ Port *prt;
+ ErlDrvTermData ret;
+ Uint32 status;
+ ErtsSchedulerData *sched = erts_get_scheduler_data();
+ int ix = (int) port;
+ if (ix < 0 || erts_max_ports <= ix) {
+ return driver_term_nil;
+ }
+ prt = &erts_port[ix];
+
+ DRV_MONITOR_LOCK_PDL(prt);
+
+ if (sched) {
+ status = erts_port[ix].status;
+ } else {
+ erts_smp_port_state_lock(prt);
+ status = erts_port[ix].status;
+ erts_smp_port_state_unlock(prt);
+ }
+
+ if (status & ERTS_PORT_SFLGS_INVALID_DRIVER_LOOKUP) {
+ DRV_MONITOR_UNLOCK_PDL(prt);
+ return driver_term_nil;
+ }
+
+ /* Now we should have either the port lock (if we have a scheduler) or the port data lock
+ (if we're a driver thread) */
+ ERTS_SMP_LC_ASSERT((sched != NULL || prt->port_data_lock));
+
+#if !HEAP_ON_C_STACK
+ if (!sched) {
+ /* Need a separate allocation for the ref :( */
+ Eterm *buf = erts_alloc(ERTS_ALC_T_TEMP_TERM,
+ sizeof(Eterm)*REF_THING_SIZE);
+ ret = do_driver_get_monitored_process(prt,buf,monitor);
+ erts_free(ERTS_ALC_T_TEMP_TERM,buf);
+ } else
+#endif
+ {
+ DeclareTmpHeapNoproc(buf,REF_THING_SIZE);
+ UseTmpHeapNoproc(REF_THING_SIZE);
+ ret = do_driver_get_monitored_process(prt,buf,monitor);
+ UnUseTmpHeapNoproc(REF_THING_SIZE);
+ }
+ DRV_MONITOR_UNLOCK_PDL(prt);
+ return ret;
+}
+
+
int driver_compare_monitors(const ErlDrvMonitor *monitor1,
const ErlDrvMonitor *monitor2)
{
@@ -4173,18 +4395,22 @@ void erts_fire_port_monitor(Port *prt, Eterm ref)
ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt));
ASSERT(prt->drv_ptr != NULL);
-
+ DRV_MONITOR_LOCK_PDL(prt);
if (erts_lookup_monitor(prt->monitors,ref) == NULL) {
+ DRV_MONITOR_UNLOCK_PDL(prt);
return;
}
callback = prt->drv_ptr->process_exit;
ASSERT(callback != NULL);
ref_to_driver_monitor(ref,&drv_monitor);
+ DRV_MONITOR_UNLOCK_PDL(prt);
fpe_was_unmasked = erts_block_fpe();
(*callback)((ErlDrvData) (prt->drv_data), &drv_monitor);
erts_unblock_fpe(fpe_was_unmasked);
+ DRV_MONITOR_LOCK_PDL(prt);
/* remove monitor *after* callback */
rmon = erts_remove_monitor(&(prt->monitors),ref);
+ DRV_MONITOR_UNLOCK_PDL(prt);
if (rmon) {
erts_destroy_monitor(rmon);
}
diff --git a/erts/emulator/beam/module.h b/erts/emulator/beam/module.h
index 87d13b3607..694e4ab72f 100644
--- a/erts/emulator/beam/module.h
+++ b/erts/emulator/beam/module.h
@@ -29,8 +29,8 @@ typedef struct erl_module {
IndexSlot slot; /* Must be located at top of struct! */
int module; /* Atom index for module (not tagged). */
- Eterm* code;
- Eterm* old_code;
+ BeamInstr* code;
+ BeamInstr* old_code;
int code_length; /* Length of loaded code in bytes. */
int old_code_length; /* Length of old loaded code in bytes */
unsigned catches, old_catches;
diff --git a/erts/emulator/beam/ops.tab b/erts/emulator/beam/ops.tab
index ce1df74f03..a2439d5582 100644
--- a/erts/emulator/beam/ops.tab
+++ b/erts/emulator/beam/ops.tab
@@ -1,19 +1,19 @@
#
# %CopyrightBegin%
-#
-# Copyright Ericsson AB 1997-2009. All Rights Reserved.
-#
+#
+# Copyright Ericsson AB 1997-2010. All Rights Reserved.
+#
# The contents of this file are subject to the Erlang Public License,
# Version 1.1, (the "License"); you may not use this file except in
# compliance with the License. You should have received a copy of the
# Erlang Public License along with this software. If not, it can be
# retrieved online at http://www.erlang.org/.
-#
+#
# Software distributed under the License is distributed on an "AS IS"
# basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
# the License for the specific language governing rights and limitations
# under the License.
-#
+#
# %CopyrightEnd%
#
@@ -60,12 +60,17 @@ func_info M=a a==am_module_info A=u==0 | label L | move n r => too_old_compiler
func_info M=a a==am_module_info A=u==1 | label L | move n r => too_old_compiler
# The undocumented and unsupported guard BIF is_constant/1 was removed
-# in R13. The is_constant/2 operation is marked as obosolete in genop.tab,
+# in R13. The is_constant/2 operation is marked as obsolete in genop.tab,
# so the loader will automatically generate a too_old_compiler message
# it is used, but we need to handle the is_constant/1 BIF specially here.
bif1 Fail u$func:erlang:is_constant/1 Src Dst => too_old_compiler
+# Since the constant pool was introduced in R12B, empty tuples ({})
+# are literals. Therefore we no longer need to allow put_tuple/2
+# with a tuple size of zero.
+
+put_tuple u==0 d => too_old_compiler
#
# All the other instructions.
@@ -79,6 +84,8 @@ i_trace_breakpoint
i_mtrace_breakpoint
i_debug_breakpoint
i_count_breakpoint
+i_time_breakpoint
+i_return_time_trace
i_return_to_trace
i_yield
i_global_cons
@@ -115,12 +122,6 @@ init Y1 | init Y2 => init2 Y1 Y2
%macro: init2 Init2 -pack
%macro: init3 Init3 -pack
-#
-# Warning: The put_string instruction is specially treated in the loader.
-# Don't change the instruction format unless you change the loader too.
-#
-put_string I I d
-
# Selecting values
select_val S=q Fail=f Size=u Rest=* => const_select_val(S, Fail, Size, Rest)
@@ -335,11 +336,8 @@ i_is_eq_immed f y c
# Putting things.
#
-put_tuple u==0 Dst => i_put_tuple_only u Dst
put_tuple Arity Dst | put V => i_put_tuple Arity V Dst
-i_put_tuple_only A d
-
%macro: i_put_tuple PutTuple -pack
i_put_tuple A x x
i_put_tuple A y x
@@ -1184,12 +1182,6 @@ i_bs_init_bits_fail_heap I j I d
i_bs_init_bits I I d
i_bs_init_bits_heap I I I d
-bs_bits_to_bytes Fail Src Dst => i_bs_bits_to_bytes Src Fail Dst
-
-i_bs_bits_to_bytes r j d
-i_bs_bits_to_bytes x j d
-i_bs_bits_to_bytes y j d
-
bs_add Fail S1=i==0 S2 Unit=u==1 D => move S2 D
bs_add Fail S1 S2 Unit D => i_fetch S1 S2 | i_bs_add Fail Unit D
@@ -1311,7 +1303,7 @@ fmul p FR1 FR2 FR3 => i_fmul FR1 FR2 FR3
fdiv p FR1 FR2 FR3 => i_fdiv FR1 FR2 FR3
fnegate p FR1 FR2 => i_fnegate FR1 FR2
-fconv Int=iq Dst=l => move Int x | fconv x Dst
+fconv Arg=iqan Dst=l => move Arg x | fconv x Dst
fmove q l
fmove d l
@@ -1397,34 +1389,60 @@ bif1 Fail u$bif:erlang:trunc/1 s d => too_old_compiler
# Guard BIFs.
#
gc_bif1 Fail I Bif=u$bif:erlang:length/1 Src Dst=d => \
- gen_guard_bif(Fail, I, Bif, Src, Dst)
+ gen_guard_bif1(Fail, I, Bif, Src, Dst)
gc_bif1 Fail I Bif=u$bif:erlang:size/1 Src Dst=d => \
- gen_guard_bif(Fail, I, Bif, Src, Dst)
+ gen_guard_bif1(Fail, I, Bif, Src, Dst)
gc_bif1 Fail I Bif=u$bif:erlang:bit_size/1 Src Dst=d => \
- gen_guard_bif(Fail, I, Bif, Src, Dst)
+ gen_guard_bif1(Fail, I, Bif, Src, Dst)
gc_bif1 Fail I Bif=u$bif:erlang:byte_size/1 Src Dst=d => \
- gen_guard_bif(Fail, I, Bif, Src, Dst)
+ gen_guard_bif1(Fail, I, Bif, Src, Dst)
gc_bif1 Fail I Bif=u$bif:erlang:abs/1 Src Dst=d => \
- gen_guard_bif(Fail, I, Bif, Src, Dst)
+ gen_guard_bif1(Fail, I, Bif, Src, Dst)
gc_bif1 Fail I Bif=u$bif:erlang:float/1 Src Dst=d => \
- gen_guard_bif(Fail, I, Bif, Src, Dst)
+ gen_guard_bif1(Fail, I, Bif, Src, Dst)
gc_bif1 Fail I Bif=u$bif:erlang:round/1 Src Dst=d => \
- gen_guard_bif(Fail, I, Bif, Src, Dst)
+ gen_guard_bif1(Fail, I, Bif, Src, Dst)
gc_bif1 Fail I Bif=u$bif:erlang:trunc/1 Src Dst=d => \
- gen_guard_bif(Fail, I, Bif, Src, Dst)
+ gen_guard_bif1(Fail, I, Bif, Src, Dst)
+
+gc_bif2 Fail I Bif=u$bif:erlang:binary_part/2 S1 S2 Dst=d => \
+ gen_guard_bif2(Fail, I, Bif, S1, S2, Dst)
+
+gc_bif3 Fail I Bif=u$bif:erlang:binary_part/3 S1 S2 S3 Dst=d => \
+ gen_guard_bif3(Fail, I, Bif, S1, S2, S3, Dst)
i_gc_bif1 Fail Bif V=q Live D => move V x | i_gc_bif1 Fail Bif x Live D
i_gc_bif1 j I s I d
+ii_gc_bif2/6
+
+ii_gc_bif2 Fail Bif S1 S2 Live D => i_fetch S1 S2 | i_gc_bif2 Fail Bif Live D
+
+i_gc_bif2 j I I d
+
+ii_gc_bif3/7
+
+ii_gc_bif3 Fail Bif S1 S2 S3 Live D => move S1 x | i_fetch S2 S3 | i_gc_bif3 Fail Bif x Live D
+
+i_gc_bif3 j I s I d
#
# R13B03
#
on_load
+
+#
+# R14A.
+#
+recv_mark f
+
+recv_set Fail | label Lbl | loop_rec Lf Reg => \
+ i_recv_set | label Lbl | loop_rec Lf Reg
+i_recv_set
diff --git a/erts/emulator/beam/packet_parser.c b/erts/emulator/beam/packet_parser.c
index 8c8029d450..5bcd567b5f 100644
--- a/erts/emulator/beam/packet_parser.c
+++ b/erts/emulator/beam/packet_parser.c
@@ -679,7 +679,7 @@ int packet_parse_http(const char* buf, int len, int* statep,
while (n && SP(ptr)) {
ptr++; n--;
}
- if (ptr==p0) return -1;
+ if (ptr==p0 && n>0) return -1;
/* NOTE: the syntax allows empty reason phrases */
(*statep) = !0;
diff --git a/erts/emulator/beam/register.c b/erts/emulator/beam/register.c
index 964c10a380..c9bb7bbe91 100644
--- a/erts/emulator/beam/register.c
+++ b/erts/emulator/beam/register.c
@@ -39,8 +39,6 @@ static Hash process_reg;
static erts_smp_rwmtx_t regtab_rwmtx;
-#define reg_lock_init() erts_smp_rwmtx_init(&regtab_rwmtx, \
- "reg_tab")
#define reg_try_read_lock() erts_smp_rwmtx_tryrlock(&regtab_rwmtx)
#define reg_try_write_lock() erts_smp_rwmtx_tryrwlock(&regtab_rwmtx)
#define reg_read_lock() erts_smp_rwmtx_rlock(&regtab_rwmtx)
@@ -147,8 +145,11 @@ static void reg_free(RegProc *obj)
void init_register_table(void)
{
HashFunctions f;
+ erts_smp_rwmtx_opt_t rwmtx_opt = ERTS_SMP_THR_OPTS_DEFAULT_INITER;
+ rwmtx_opt.type = ERTS_SMP_RWMTX_TYPE_FREQUENT_READ;
+ rwmtx_opt.lived = ERTS_SMP_RWMTX_LONG_LIVED;
- reg_lock_init();
+ erts_smp_rwmtx_init_opt(&regtab_rwmtx, &rwmtx_opt, "reg_tab");
f.hash = (H_FUN) reg_hash;
f.cmp = (HCMP_FUN) reg_cmp;
@@ -476,8 +477,9 @@ int erts_unregister_name(Process *c_p,
* on c_prt.
*/
- if (!c_p)
+ if (!c_p) {
c_p_locks = 0;
+ }
current_c_p_locks = c_p_locks;
restart:
@@ -489,9 +491,15 @@ int erts_unregister_name(Process *c_p,
if (is_non_value(name)) {
/* Unregister current process name */
ASSERT(c_p);
- if (c_p->reg)
+#ifdef ERTS_SMP
+ if (current_c_p_locks != c_p_locks) {
+ erts_smp_proc_lock(c_p, c_p_locks);
+ current_c_p_locks = c_p_locks;
+ }
+#endif
+ if (c_p->reg) {
r.name = c_p->reg->name;
- else {
+ } else {
/* Name got unregistered while main lock was released */
res = 0;
goto done;
@@ -533,24 +541,25 @@ int erts_unregister_name(Process *c_p,
}
} else if (rp->p) {
- Process* p = rp->p;
+
#ifdef ERTS_SMP
erts_proc_safelock(c_p,
current_c_p_locks,
c_p_locks,
rp->p,
- 0,
+ (c_p == rp->p) ? current_c_p_locks : 0,
ERTS_PROC_LOCK_MAIN);
current_c_p_locks = c_p_locks;
#endif
- p->reg = NULL;
+ rp->p->reg = NULL;
+ if (IS_TRACED_FL(rp->p, F_TRACE_PROCS)) {
+ trace_proc(c_p, rp->p, am_unregister, r.name);
+ }
#ifdef ERTS_SMP
- if (rp->p != c_p)
+ if (rp->p != c_p) {
erts_smp_proc_unlock(rp->p, ERTS_PROC_LOCK_MAIN);
-#endif
- if (IS_TRACED_FL(p, F_TRACE_PROCS)) {
- trace_proc(c_p, p, am_unregister, r.name);
}
+#endif
}
hash_erase(&process_reg, (void*) &r);
res = 1;
@@ -560,14 +569,17 @@ int erts_unregister_name(Process *c_p,
reg_write_unlock();
if (c_prt != port) {
- if (port)
+ if (port) {
erts_smp_port_unlock(port);
- if (c_prt)
+ }
+ if (c_prt) {
erts_smp_port_lock(c_prt);
+ }
}
#ifdef ERTS_SMP
- if (c_p && !current_c_p_locks)
+ if (c_p && !current_c_p_locks) {
erts_smp_proc_lock(c_p, c_p_locks);
+ }
#endif
return res;
}
diff --git a/erts/emulator/beam/sys.h b/erts/emulator/beam/sys.h
index 4b949523fa..0031568af6 100644
--- a/erts/emulator/beam/sys.h
+++ b/erts/emulator/beam/sys.h
@@ -1,37 +1,30 @@
/*
* %CopyrightBegin%
- *
- * Copyright Ericsson AB 1996-2009. All Rights Reserved.
- *
+ *
+ * Copyright Ericsson AB 1996-2010. All Rights Reserved.
+ *
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
* compliance with the License. You should have received a copy of the
* Erlang Public License along with this software. If not, it can be
* retrieved online at http://www.erlang.org/.
- *
+ *
* Software distributed under the License is distributed on an "AS IS"
* basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
* the License for the specific language governing rights and limitations
* under the License.
- *
+ *
* %CopyrightEnd%
*/
#ifndef __SYS_H__
#define __SYS_H__
+
#if defined(VALGRIND) && !defined(NO_FPE_SIGNALS)
# define NO_FPE_SIGNALS
#endif
-/* Never use elib-malloc when purify-memory-tracing */
-#if defined(PURIFY)
-#undef ENABLE_ELIB_MALLOC
-#undef ELIB_HEAP_SBRK
-#undef ELIB_ALLOC_IS_CLIB
-#endif
-
-
/* xxxP __VXWORKS__ */
#ifdef VXWORKS
#include <vxWorks.h>
@@ -57,8 +50,6 @@
# include "erl_win_sys.h"
#elif defined (VXWORKS)
# include "erl_vxworks_sys.h"
-#elif defined (_OSE_)
-# include "erl_ose_sys.h"
#else
# include "erl_unix_sys.h"
#ifndef UNIX
@@ -172,23 +163,6 @@ void erl_assert_error(char* expr, char* file, int line);
#include <stdarg.h>
-#if defined(__STDC__) || defined(_MSC_VER)
-# define EXTERN_FUNCTION(t, f, x) extern t f x
-# define FUNCTION(t, f, x) t f x
-# define _DOTS_ ...
-# define _VOID_ void
-#elif defined(__cplusplus)
-# define EXTERN_FUNCTION(f, x) extern "C" { f x }
-# define FUNCTION(t, f, x) t f x
-# define _DOTS_ ...
-# define _VOID_ void
-#else
-# define EXTERN_FUNCTION(t, f, x) extern t f (/*x*/)
-# define FUNCTION(t, f, x) t f (/*x*/)
-# define _DOTS_
-# define _VOID_
-#endif
-
/* This isn't sys-dependent, but putting it here benefits sys.c and drivers
- allow use of 'const' regardless of compiler */
@@ -198,7 +172,7 @@ void erl_assert_error(char* expr, char* file, int line);
#ifdef VXWORKS
/* Replace VxWorks' printf with a real one that does fprintf(stdout, ...) */
-EXTERN_FUNCTION(int, real_printf, (const char *fmt, ...));
+int real_printf(const char *fmt, ...);
# define printf real_printf
#endif
@@ -230,9 +204,14 @@ EXTERN_FUNCTION(int, real_printf, (const char *fmt, ...));
** Data types:
**
** Eterm: A tagged erlang term (possibly 64 bits)
+** BeamInstr: A beam code instruction unit, possibly larger than Eterm, not smaller.
** UInt: An unsigned integer exactly as large as an Eterm.
** SInt: A signed integer exactly as large as an eterm and therefor large
** enough to hold the return value of the signed_val() macro.
+** UWord: An unsigned integer at least as large as a void * and also as large
+** or larger than an Eterm
+** SWord: A signed integer at least as large as a void * and also as large
+** or larger than an Eterm
** Uint32: An unsigned integer of 32 bits exactly
** Sint32: A signed integer of 32 bits exactly
** Uint16: An unsigned integer of 16 bits exactly
@@ -253,11 +232,43 @@ EXTERN_FUNCTION(int, real_printf, (const char *fmt, ...));
#else
#error Neither 32 nor 64 bit architecture
#endif
+#ifdef ARCH_64
+# ifdef HALFWORD_HEAP_EMULATOR
+# define HALFWORD_HEAP 1
+# define HALFWORD_ASSERT 0
+# else
+# define HALFWORD_HEAP 0
+# define HALFWORD_ASSERT 0
+# endif
+#endif
#if SIZEOF_VOID_P != SIZEOF_SIZE_T
#error sizeof(void*) != sizeof(size_t)
#endif
+#if HALFWORD_HEAP
+
+#if SIZEOF_INT == 4
+typedef unsigned int Eterm;
+typedef unsigned int Uint;
+typedef int Sint;
+#define ERTS_SIZEOF_ETERM SIZEOF_INT
+#else
+#error Found no appropriate type to use for 'Eterm', 'Uint' and 'Sint'
+#endif
+
+#if SIZEOF_VOID_P == SIZEOF_LONG
+typedef unsigned long UWord;
+typedef long SWord;
+#elif SIZEOF_VOID_P == SIZEOF_INT
+typedef unsigned int UWord;
+typedef int SWord;
+#else
+#error Found no appropriate type to use for 'Eterm', 'Uint' and 'Sint'
+#endif
+
+#else /* !HALFWORD_HEAP */
+
#if SIZEOF_VOID_P == SIZEOF_LONG
typedef unsigned long Eterm;
typedef unsigned long Uint;
@@ -272,6 +283,13 @@ typedef int Sint;
#error Found no appropriate type to use for 'Eterm', 'Uint' and 'Sint'
#endif
+typedef Uint UWord;
+typedef Sint SWord;
+
+#endif /* HALFWORD_HEAP */
+
+typedef UWord BeamInstr;
+
#ifndef HAVE_INT64
#if SIZEOF_LONG == 8
#define HAVE_INT64 1
@@ -316,15 +334,8 @@ typedef unsigned char byte;
#error 64-bit architecture, but no appropriate type to use for Uint64 and Sint64 found
#endif
-#if defined(ARCH_64)
-# define ERTS_WORD_ALIGN_PAD_SZ(X) \
+# define ERTS_EXTRA_DATA_ALIGN_SZ(X) \
(((size_t) 8) - (((size_t) (X)) & ((size_t) 7)))
-#elif defined(ARCH_32)
-# define ERTS_WORD_ALIGN_PAD_SZ(X) \
- (((size_t) 4) - (((size_t) (X)) & ((size_t) 3)))
-#else
-#error "Not supported..."
-#endif
#include "erl_lock_check.h"
#include "erl_smp.h"
@@ -413,13 +424,6 @@ extern volatile int erts_writing_erl_crash_dump;
in non-blocking mode - and ioctl FIONBIO on AIX *doesn't* work for
pipes or ttys (O_NONBLOCK does)!!! For now, we'll use FIONBIO for AIX. */
-# ifdef _OSE_
-static const int zero_value = 0, one_value = 1;
-# define SET_BLOCKING(fd) ioctl((fd), FIONBIO, (char*)&zero_value)
-# define SET_NONBLOCKING(fd) ioctl((fd), FIONBIO, (char*)&one_value)
-# define ERRNO_BLOCK EWOULDBLOCK
-# else
-
# ifdef __WIN32__
static unsigned long zero_value = 0, one_value = 1;
@@ -460,7 +464,6 @@ static const int zero_value = 0, one_value = 1;
# endif /* !NB_FIONBIO */
# endif /* _WXWORKS_ */
# endif /* !__WIN32__ */
-# endif /* _OSE_ */
#endif /* WANT_NONBLOCKING */
extern erts_cpu_info_t *erts_cpuinfo; /* erl_init.c */
@@ -538,8 +541,6 @@ typedef struct preload {
* None of the drivers use all of the fields.
*/
-/* OSE: Want process_type and priority in here as well! Needs updates in erl_bif_ports.c! */
-
typedef struct _SysDriverOpts {
int ifd; /* Input file descriptor (fd driver). */
int ofd; /* Outputfile descriptor (fd driver). */
@@ -556,12 +557,6 @@ typedef struct _SysDriverOpts {
char *wd; /* Working directory. */
unsigned spawn_type; /* Bitfield of ERTS_SPAWN_DRIVER |
ERTS_SPAWN_EXTERNAL | both*/
-
-#ifdef _OSE_
- enum PROCESS_TYPE process_type;
- OSPRIORITY priority;
-#endif /* _OSE_ */
-
} SysDriverOpts;
extern char *erts_default_arg0;
@@ -621,7 +616,7 @@ extern char *erts_sys_ddll_error(int code);
/*
- * System interfaces for startup/sae code (functions found in respective sys.c)
+ * System interfaces for startup.
*/
@@ -638,18 +633,14 @@ extern void erts_sys_pre_init(void);
extern void erl_sys_init(void);
extern void erl_sys_args(int *argc, char **argv);
extern void erl_sys_schedule(int);
-#ifdef _OSE_
-extern void erl_sys_init_final(void);
-#else
-void sys_tty_reset(void);
-#endif
+void sys_tty_reset(int);
-EXTERN_FUNCTION(int, sys_max_files, (_VOID_));
+int sys_max_files(void);
void sys_init_io(void);
Preload* sys_preloaded(void);
-EXTERN_FUNCTION(unsigned char*, sys_preload_begin, (Preload*));
-EXTERN_FUNCTION(void, sys_preload_end, (Preload*));
-EXTERN_FUNCTION(int, sys_get_key, (int));
+unsigned char* sys_preload_begin(Preload*);
+void sys_preload_end(Preload*);
+int sys_get_key(int);
void elapsed_time_both(unsigned long *ms_user, unsigned long *ms_sys,
unsigned long *ms_user_diff, unsigned long *ms_sys_diff);
void wall_clock_elapsed_time_both(unsigned long *ms_total,
@@ -666,7 +657,7 @@ int local_to_univ(Sint *year, Sint *month, Sint *day,
Sint *hour, Sint *minute, Sint *second, int isdst);
void get_now(Uint*, Uint*, Uint*);
void get_sys_now(Uint*, Uint*, Uint*);
-EXTERN_FUNCTION(void, set_break_quit, (void (*)(void), void (*)(void)));
+void set_break_quit(void (*)(void), void (*)(void));
void os_flavor(char*, unsigned);
void os_version(int*, int*, int*);
@@ -706,7 +697,7 @@ int erts_write_env(char *key, char *value);
#define ERTS_DEFAULT_MMAP_THRESHOLD (128 * 1024)
#define ERTS_DEFAULT_MMAP_MAX 64
-EXTERN_FUNCTION(int, sys_alloc_opt, (int, int));
+int sys_alloc_opt(int, int);
typedef struct {
Sint trim_threshold;
@@ -715,7 +706,7 @@ typedef struct {
Sint mmap_max;
} SysAllocStat;
-EXTERN_FUNCTION(void, sys_alloc_stat, (SysAllocStat *));
+void sys_alloc_stat(SysAllocStat *);
/* Block the whole system... */
@@ -1104,13 +1095,10 @@ erts_refc_read(erts_refc_t *refcp, long min_val)
extern int erts_use_kernel_poll;
#endif
-void elib_ensure_initialized(void);
-
-
-#if (defined(VXWORKS) || defined(_OSE_))
+#if defined(VXWORKS)
/* NOTE! sys_calloc2 does not exist on other
platforms than VxWorks and OSE */
-EXTERN_FUNCTION(void*, sys_calloc2, (Uint, Uint));
+void* sys_calloc2(Uint, Uint);
#endif /* VXWORKS || OSE */
@@ -1150,14 +1138,14 @@ EXTERN_FUNCTION(void*, sys_calloc2, (Uint, Uint));
/* Standard set of integer macros .. */
-#define get_int64(s) ((((unsigned char*) (s))[0] << 56) | \
- (((unsigned char*) (s))[1] << 48) | \
- (((unsigned char*) (s))[2] << 40) | \
- (((unsigned char*) (s))[3] << 32) | \
- (((unsigned char*) (s))[4] << 24) | \
- (((unsigned char*) (s))[5] << 16) | \
- (((unsigned char*) (s))[6] << 8) | \
- (((unsigned char*) (s))[7]))
+#define get_int64(s) (((Uint64)(((unsigned char*) (s))[0]) << 56) | \
+ (((Uint64)((unsigned char*) (s))[1]) << 48) | \
+ (((Uint64)((unsigned char*) (s))[2]) << 40) | \
+ (((Uint64)((unsigned char*) (s))[3]) << 32) | \
+ (((Uint64)((unsigned char*) (s))[4]) << 24) | \
+ (((Uint64)((unsigned char*) (s))[5]) << 16) | \
+ (((Uint64)((unsigned char*) (s))[6]) << 8) | \
+ (((Uint64)((unsigned char*) (s))[7])))
#define put_int64(i, s) do {((char*)(s))[0] = (char)((Sint64)(i) >> 56) & 0xff;\
((char*)(s))[1] = (char)((Sint64)(i) >> 48) & 0xff;\
@@ -1202,8 +1190,8 @@ EXTERN_FUNCTION(void*, sys_calloc2, (Uint, Uint));
*/
#ifdef DEBUG
-EXTERN_FUNCTION(void, erl_debug, (char* format, ...));
-EXTERN_FUNCTION(void, erl_bin_write, (unsigned char *, int, int));
+void erl_debug(char* format, ...);
+void erl_bin_write(unsigned char *, int, int);
# define DEBUGF(x) erl_debug x
#else
diff --git a/erts/emulator/beam/utils.c b/erts/emulator/beam/utils.c
index 31efddc0f2..ab5e8b5d4a 100644
--- a/erts/emulator/beam/utils.c
+++ b/erts/emulator/beam/utils.c
@@ -32,6 +32,7 @@
#include "erl_binary.h"
#include "erl_bits.h"
#include "packet_parser.h"
+#include "erl_gc.h"
#define ERTS_WANT_DB_INTERNAL__
#include "erl_db.h"
#include "erl_threads.h"
@@ -47,11 +48,11 @@
#undef M_MMAP_THRESHOLD
#undef M_MMAP_MAX
-#if !defined(ELIB_ALLOC_IS_CLIB) && defined(__GLIBC__) && defined(HAVE_MALLOC_H)
+#if defined(__GLIBC__) && defined(HAVE_MALLOC_H)
#include <malloc.h>
#endif
-#if defined(ELIB_ALLOC_IS_CLIB) || !defined(HAVE_MALLOPT)
+#if !defined(HAVE_MALLOPT)
#undef HAVE_MALLOPT
#define HAVE_MALLOPT 0
#endif
@@ -125,7 +126,7 @@ erts_heap_alloc(Process* p, Uint need)
n = need;
bp = MBUF(p);
- if (bp != NULL && need <= (bp->size - bp->used_size)) {
+ if (bp != NULL && need <= (bp->alloc_size - bp->used_size)) {
Eterm* ret = bp->mem + bp->used_size;
bp->used_size += need;
return ret;
@@ -158,16 +159,11 @@ erts_heap_alloc(Process* p, Uint need)
bp->next = MBUF(p);
MBUF(p) = bp;
- bp->size = n;
+ bp->alloc_size = n;
bp->used_size = n;
MBUF_SIZE(p) += n;
- bp->off_heap.mso = NULL;
-#ifndef HYBRID /* FIND ME! */
- bp->off_heap.funs = NULL;
-#endif
- bp->off_heap.externals = NULL;
+ bp->off_heap.first = NULL;
bp->off_heap.overhead = 0;
-
return bp->mem;
}
@@ -204,6 +200,25 @@ erl_grow_stack(Eterm** start, Eterm** sp, Eterm** end)
*end = *start + new_size;
*sp = *start + sp_offs;
}
+/*
+ * Helper function for the ESTACK macros defined in global.h.
+ */
+void
+erl_grow_wstack(UWord** start, UWord** sp, UWord** end)
+{
+ Uint old_size = (*end - *start);
+ Uint new_size = old_size * 2;
+ Uint sp_offs = *sp - *start;
+ if (new_size > 2 * DEF_ESTACK_SIZE) {
+ *start = erts_realloc(ERTS_ALC_T_ESTACK, (void *) *start, new_size*sizeof(UWord));
+ } else {
+ UWord* new_ptr = erts_alloc(ERTS_ALC_T_ESTACK, new_size*sizeof(UWord));
+ sys_memcpy(new_ptr, *start, old_size*sizeof(UWord));
+ *start = new_ptr;
+ }
+ *end = *start + new_size;
+ *sp = *start + sp_offs;
+}
/* CTYPE macros */
@@ -354,6 +369,31 @@ erts_bld_uint(Uint **hpp, Uint *szp, Uint ui)
return res;
}
+/*
+ * Erts_bld_uword is more or less similar to erts_bld_uint, but a pointer
+ * can safely be passed.
+ */
+
+Eterm
+erts_bld_uword(Uint **hpp, Uint *szp, UWord uw)
+{
+ Eterm res = THE_NON_VALUE;
+ if (IS_USMALL(0, uw)) {
+ if (hpp)
+ res = make_small((Uint) uw);
+ }
+ else {
+ if (szp)
+ *szp += BIG_UWORD_HEAP_SIZE(uw);
+ if (hpp) {
+ res = uword_to_big(uw, *hpp);
+ *hpp += BIG_UWORD_HEAP_SIZE(uw);
+ }
+ }
+ return res;
+}
+
+
Eterm
erts_bld_uint64(Uint **hpp, Uint *szp, Uint64 ui64)
{
@@ -364,7 +404,7 @@ erts_bld_uint64(Uint **hpp, Uint *szp, Uint64 ui64)
}
else {
if (szp)
- *szp = ERTS_UINT64_HEAP_SIZE(ui64);
+ *szp += ERTS_UINT64_HEAP_SIZE(ui64);
if (hpp)
res = erts_uint64_to_big(ui64, hpp);
}
@@ -381,7 +421,7 @@ erts_bld_sint64(Uint **hpp, Uint *szp, Sint64 si64)
}
else {
if (szp)
- *szp = ERTS_SINT64_HEAP_SIZE(si64);
+ *szp += ERTS_SINT64_HEAP_SIZE(si64);
if (hpp)
res = erts_sint64_to_big(si64, hpp);
}
@@ -465,7 +505,7 @@ erts_bld_string_n(Uint **hpp, Uint *szp, const char *str, Sint len)
if (hpp) {
res = NIL;
while (--i >= 0) {
- res = CONS(*hpp, make_small(str[i]), res);
+ res = CONS(*hpp, make_small((byte) str[i]), res);
*hpp += 2;
}
}
@@ -711,7 +751,7 @@ hash_binary_bytes(Eterm bin, Uint sz, Uint32 hash)
Uint32 make_hash(Eterm term_arg)
{
- DECLARE_ESTACK(stack);
+ DECLARE_WSTACK(stack);
Eterm term = term_arg;
Eterm hash = 0;
unsigned op;
@@ -770,7 +810,7 @@ tail_recur:
Uint y2 = y1 < 0 ? -(Uint)y1 : y1;
UINT32_HASH_STEP(y2, FUNNY_NUMBER2);
-#ifdef ARCH_64
+#if defined(ARCH_64) && !HALFWORD_HEAP
if (y2 >> 32)
UINT32_HASH_STEP(y2 >> 32, FUNNY_NUMBER2);
#endif
@@ -787,7 +827,7 @@ tail_recur:
}
case EXPORT_DEF:
{
- Export* ep = (Export *) (export_val(term))[1];
+ Export* ep = *((Export **) (export_val(term) + 1));
hash = hash * FUNNY_NUMBER11 + ep->code[2];
hash = hash*FUNNY_NUMBER1 +
@@ -809,7 +849,7 @@ tail_recur:
hash = hash*FUNNY_NUMBER2 + funp->fe->old_uniq;
if (num_free > 0) {
if (num_free > 1) {
- ESTACK_PUSH3(stack, (Eterm) &funp->env[1], (num_free-1), MAKE_HASH_FUN_OP);
+ WSTACK_PUSH3(stack, (UWord) &funp->env[1], (num_free-1), MAKE_HASH_FUN_OP);
}
term = funp->env[0];
goto tail_recur;
@@ -837,9 +877,9 @@ tail_recur:
}
case MAKE_HASH_CDR_PRE_OP:
- term = ESTACK_POP(stack);
+ term = (Eterm) WSTACK_POP(stack);
if (is_not_list(term)) {
- ESTACK_PUSH(stack, MAKE_HASH_CDR_POST_OP);
+ WSTACK_PUSH(stack, (UWord) MAKE_HASH_CDR_POST_OP);
goto tail_recur;
}
/* fall through */
@@ -854,13 +894,13 @@ tail_recur:
hash = hash*FUNNY_NUMBER2 + unsigned_val(*list);
if (is_not_list(CDR(list))) {
- ESTACK_PUSH(stack, MAKE_HASH_CDR_POST_OP);
+ WSTACK_PUSH(stack, MAKE_HASH_CDR_POST_OP);
term = CDR(list);
goto tail_recur;
}
list = list_val(CDR(list));
}
- ESTACK_PUSH2(stack, CDR(list), MAKE_HASH_CDR_PRE_OP);
+ WSTACK_PUSH2(stack, CDR(list), MAKE_HASH_CDR_PRE_OP);
term = CAR(list);
goto tail_recur;
}
@@ -888,7 +928,7 @@ tail_recur:
}
d = BIG_DIGIT(ptr, k);
k = sizeof(ErtsDigit);
-#ifdef ARCH_64
+#if defined(ARCH_64) && !HALFWORD_HEAP
if (!(d >> 32))
k /= 2;
#endif
@@ -904,21 +944,21 @@ tail_recur:
Eterm* ptr = tuple_val(term);
Uint arity = arityval(*ptr);
- ESTACK_PUSH3(stack, arity, (Eterm)(ptr+1), arity);
+ WSTACK_PUSH3(stack, (UWord) arity, (UWord)(ptr+1), (UWord) arity);
op = MAKE_HASH_TUPLE_OP;
}/*fall through*/
case MAKE_HASH_TUPLE_OP:
case MAKE_HASH_FUN_OP:
{
- Uint i = ESTACK_POP(stack);
- Eterm* ptr = (Eterm*) ESTACK_POP(stack);
+ Uint i = (Uint) WSTACK_POP(stack);
+ Eterm* ptr = (Eterm*) WSTACK_POP(stack);
if (i != 0) {
term = *ptr;
- ESTACK_PUSH3(stack, (Eterm)(ptr+1), i-1, op);
+ WSTACK_PUSH3(stack, (UWord)(ptr+1), (UWord) i-1, (UWord) op);
goto tail_recur;
}
if (op == MAKE_HASH_TUPLE_OP) {
- Uint32 arity = ESTACK_POP(stack);
+ Uint32 arity = (Uint32) WSTACK_POP(stack);
hash = hash*FUNNY_NUMBER9 + arity;
}
break;
@@ -928,10 +968,10 @@ tail_recur:
erl_exit(1, "Invalid tag in make_hash(0x%X,0x%X)\n", term, op);
return 0;
}
- if (ESTACK_ISEMPTY(stack)) break;
- op = ESTACK_POP(stack);
+ if (WSTACK_ISEMPTY(stack)) break;
+ op = WSTACK_POP(stack);
}
- DESTROY_ESTACK(stack);
+ DESTROY_WSTACK(stack);
return hash;
#undef UINT32_HASH_STEP
@@ -1002,7 +1042,7 @@ Uint32
make_hash2(Eterm term)
{
Uint32 hash;
- Eterm tmp_big[2];
+ DeclareTmpHeapNoproc(tmp_big,2);
/* (HCONST * {2, ..., 14}) mod 2^32 */
#define HCONST_2 0x3c6ef372UL
@@ -1041,7 +1081,6 @@ make_hash2(Eterm term)
} while(0)
#define IS_SSMALL28(x) (((Uint) (((x) >> (28-1)) + 1)) < 2)
-
/* Optimization. Simple cases before declaration of estack. */
if (primary_tag(term) == TAG_PRIMARY_IMMED1) {
switch (term & _TAG_IMMED1_MASK) {
@@ -1070,6 +1109,7 @@ make_hash2(Eterm term)
Eterm tmp;
DECLARE_ESTACK(s);
+ UseTmpHeapNoproc(2);
hash = 0;
for (;;) {
switch (primary_tag(term)) {
@@ -1123,7 +1163,7 @@ make_hash2(Eterm term)
break;
case EXPORT_SUBTAG:
{
- Export* ep = (Export *) (export_val(term))[1];
+ Export* ep = *((Export **) (export_val(term) + 1));
UINT32_HASH_2
(ep->code[2],
@@ -1314,6 +1354,7 @@ make_hash2(Eterm term)
hash2_common:
if (ESTACK_ISEMPTY(s)) {
DESTROY_ESTACK(s);
+ UnUseTmpHeapNoproc(2);
return hash;
}
term = ESTACK_POP(s);
@@ -1332,7 +1373,7 @@ make_hash2(Eterm term)
Uint32 make_broken_hash(Eterm term)
{
Uint32 hash = 0;
- DECLARE_ESTACK(stack);
+ DECLARE_WSTACK(stack);
unsigned op;
tail_recur:
op = tag_val_def(term);
@@ -1346,7 +1387,7 @@ tail_recur:
(atom_tab(atom_val(term))->slot.bucket.hvalue);
break;
case SMALL_DEF:
-#ifdef ARCH_64
+#if defined(ARCH_64) && !HALFWORD_HEAP
{
Sint y1 = signed_val(term);
Uint y2 = y1 < 0 ? -(Uint)y1 : y1;
@@ -1399,7 +1440,7 @@ tail_recur:
case EXPORT_DEF:
{
- Export* ep = (Export *) (export_val(term))[1];
+ Export* ep = *((Export **) (export_val(term) + 1));
hash = hash * FUNNY_NUMBER11 + ep->code[2];
hash = hash*FUNNY_NUMBER1 +
@@ -1421,7 +1462,7 @@ tail_recur:
hash = hash*FUNNY_NUMBER2 + funp->fe->old_uniq;
if (num_free > 0) {
if (num_free > 1) {
- ESTACK_PUSH3(stack, (Eterm) &funp->env[1], (num_free-1), MAKE_HASH_FUN_OP);
+ WSTACK_PUSH3(stack, (UWord) &funp->env[1], (num_free-1), MAKE_HASH_FUN_OP);
}
term = funp->env[0];
goto tail_recur;
@@ -1456,16 +1497,17 @@ tail_recur:
break;
case MAKE_HASH_CDR_PRE_OP:
- term = ESTACK_POP(stack);
+ term = (Eterm) WSTACK_POP(stack);
if (is_not_list(term)) {
- ESTACK_PUSH(stack, MAKE_HASH_CDR_POST_OP);
+ WSTACK_PUSH(stack, (UWord) MAKE_HASH_CDR_POST_OP);
goto tail_recur;
}
/*fall through*/
case LIST_DEF:
{
Eterm* list = list_val(term);
- ESTACK_PUSH2(stack, CDR(list), MAKE_HASH_CDR_PRE_OP);
+ WSTACK_PUSH2(stack, (UWord) CDR(list),
+ (UWord) MAKE_HASH_CDR_PRE_OP);
term = CAR(list);
goto tail_recur;
}
@@ -1538,21 +1580,21 @@ tail_recur:
Eterm* ptr = tuple_val(term);
Uint arity = arityval(*ptr);
- ESTACK_PUSH3(stack, arity, (Eterm)(ptr+1), arity);
+ WSTACK_PUSH3(stack, (UWord) arity, (UWord) (ptr+1), (UWord) arity);
op = MAKE_HASH_TUPLE_OP;
}/*fall through*/
case MAKE_HASH_TUPLE_OP:
case MAKE_HASH_FUN_OP:
{
- Uint i = ESTACK_POP(stack);
- Eterm* ptr = (Eterm*) ESTACK_POP(stack);
+ Uint i = (Uint) WSTACK_POP(stack);
+ Eterm* ptr = (Eterm*) WSTACK_POP(stack);
if (i != 0) {
term = *ptr;
- ESTACK_PUSH3(stack, (Eterm)(ptr+1), i-1, op);
+ WSTACK_PUSH3(stack, (UWord)(ptr+1), (UWord) i-1, (UWord) op);
goto tail_recur;
}
if (op == MAKE_HASH_TUPLE_OP) {
- Uint32 arity = ESTACK_POP(stack);
+ Uint32 arity = (UWord) WSTACK_POP(stack);
hash = hash*FUNNY_NUMBER9 + arity;
}
break;
@@ -1562,11 +1604,11 @@ tail_recur:
erl_exit(1, "Invalid tag in make_broken_hash\n");
return 0;
}
- if (ESTACK_ISEMPTY(stack)) break;
- op = ESTACK_POP(stack);
+ if (WSTACK_ISEMPTY(stack)) break;
+ op = (Uint) WSTACK_POP(stack);
}
- DESTROY_ESTACK(stack);
+ DESTROY_WSTACK(stack);
return hash;
#undef MAKE_HASH_TUPLE_OP
@@ -1869,7 +1911,7 @@ erts_destroy_tmp_dsbuf(erts_dsprintf_buf_t *dsbufp)
int eq(Eterm a, Eterm b)
{
- DECLARE_ESTACK(stack);
+ DECLARE_WSTACK(stack);
Sint sz;
Eterm* aa;
Eterm* bb;
@@ -1887,7 +1929,7 @@ tailrecur_ne:
Eterm atmp = CAR(aval);
Eterm btmp = CAR(bval);
if (atmp != btmp) {
- ESTACK_PUSH2(stack,CDR(bval),CDR(aval));
+ WSTACK_PUSH2(stack,(UWord) CDR(bval),(UWord) CDR(aval));
a = atmp;
b = btmp;
goto tailrecur_ne;
@@ -1957,8 +1999,8 @@ tailrecur_ne:
case EXPORT_SUBTAG:
{
if (is_export(b)) {
- Export* a_exp = (Export *) (export_val(a))[1];
- Export* b_exp = (Export *) (export_val(b))[1];
+ Export* a_exp = *((Export **) (export_val(a) + 1));
+ Export* b_exp = *((Export **) (export_val(b) + 1));
if (a_exp == b_exp) goto pop_next;
}
break; /* not equal */
@@ -2130,32 +2172,32 @@ term_array: /* arrays in 'aa' and 'bb', length in 'sz' */
goto not_equal;
}
if (i > 1) { /* push the rest */
- ESTACK_PUSH3(stack, i-1, (Eterm)(bp+1),
- ((Eterm)(ap+1)) | TAG_PRIMARY_HEADER);
+ WSTACK_PUSH3(stack, i-1, (UWord)(bp+1),
+ ((UWord)(ap+1)) | TAG_PRIMARY_HEADER);
/* We (ab)use TAG_PRIMARY_HEADER to recognize a term_array */
}
goto tailrecur_ne;
}
pop_next:
- if (!ESTACK_ISEMPTY(stack)) {
- Eterm something = ESTACK_POP(stack);
- if (primary_tag(something) == TAG_PRIMARY_HEADER) { /* a term_array */
+ if (!WSTACK_ISEMPTY(stack)) {
+ UWord something = WSTACK_POP(stack);
+ if (primary_tag((Eterm) something) == TAG_PRIMARY_HEADER) { /* a term_array */
aa = (Eterm*) something;
- bb = (Eterm*) ESTACK_POP(stack);
- sz = ESTACK_POP(stack);
+ bb = (Eterm*) WSTACK_POP(stack);
+ sz = WSTACK_POP(stack);
goto term_array;
}
a = something;
- b = ESTACK_POP(stack);
+ b = WSTACK_POP(stack);
goto tailrecur;
}
- DESTROY_ESTACK(stack);
+ DESTROY_WSTACK(stack);
return 1;
not_equal:
- DESTROY_ESTACK(stack);
+ DESTROY_WSTACK(stack);
return 0;
}
@@ -2210,7 +2252,7 @@ static int cmp_atoms(Eterm a, Eterm b)
Sint cmp(Eterm a, Eterm b)
{
- DECLARE_ESTACK(stack);
+ DECLARE_WSTACK(stack);
Eterm* aa;
Eterm* bb;
int i;
@@ -2327,7 +2369,7 @@ tailrecur_ne:
Eterm atmp = CAR(aa);
Eterm btmp = CAR(bb);
if (atmp != btmp) {
- ESTACK_PUSH2(stack,CDR(bb),CDR(aa));
+ WSTACK_PUSH2(stack,(UWord) CDR(bb),(UWord) CDR(aa));
a = atmp;
b = btmp;
goto tailrecur_ne;
@@ -2392,8 +2434,8 @@ tailrecur_ne:
a_tag = EXPORT_DEF;
goto mixed_types;
} else {
- Export* a_exp = (Export *) (export_val(a))[1];
- Export* b_exp = (Export *) (export_val(b))[1];
+ Export* a_exp = *((Export **) (export_val(a) + 1));
+ Export* b_exp = *((Export **) (export_val(b) + 1));
if ((j = cmp_atoms(a_exp->code[0], b_exp->code[0])) != 0) {
RETURN_NEQ(j);
@@ -2581,7 +2623,11 @@ tailrecur_ne:
{
FloatDef f1, f2;
Eterm big;
- Eterm big_buf[2];
+#if HEAP_ON_C_STACK
+ Eterm big_buf[2]; /* If HEAP_ON_C_STACK */
+#else
+ Eterm *big_buf = erts_get_scheduler_data()->cmp_tmp_heap;
+#endif
switch(_NUMBER_CODE(a_tag, b_tag)) {
case SMALL_BIG:
@@ -2644,7 +2690,7 @@ term_array: /* arrays in 'aa' and 'bb', length in 'i' */
}
} else {
/* (ab)Use TAG_PRIMARY_HEADER to recognize a term_array */
- ESTACK_PUSH3(stack, i, (Eterm)bb, (Eterm)aa | TAG_PRIMARY_HEADER);
+ WSTACK_PUSH3(stack, i, (UWord)bb, (UWord)aa | TAG_PRIMARY_HEADER);
goto tailrecur_ne;
}
}
@@ -2654,20 +2700,20 @@ term_array: /* arrays in 'aa' and 'bb', length in 'i' */
goto tailrecur;
pop_next:
- if (!ESTACK_ISEMPTY(stack)) {
- Eterm something = ESTACK_POP(stack);
- if (primary_tag(something) == TAG_PRIMARY_HEADER) { /* a term_array */
+ if (!WSTACK_ISEMPTY(stack)) {
+ UWord something = WSTACK_POP(stack);
+ if (primary_tag((Eterm) something) == TAG_PRIMARY_HEADER) { /* a term_array */
aa = (Eterm*) something;
- bb = (Eterm*) ESTACK_POP(stack);
- i = ESTACK_POP(stack);
+ bb = (Eterm*) WSTACK_POP(stack);
+ i = WSTACK_POP(stack);
goto term_array;
}
- a = something;
- b = ESTACK_POP(stack);
+ a = (Eterm) something;
+ b = (Eterm) WSTACK_POP(stack);
goto tailrecur;
}
- DESTROY_ESTACK(stack);
+ DESTROY_WSTACK(stack);
return 0;
not_equal:
@@ -2678,21 +2724,8 @@ not_equal:
}
-void
-erts_cleanup_externals(ExternalThing *etp)
-{
- ExternalThing *tetp;
-
- tetp = etp;
-
- while(tetp) {
- erts_deref_node_entry(tetp->node);
- tetp = tetp->next;
- }
-}
-
Eterm
-store_external_or_ref_(Uint **hpp, ExternalThing **etpp, Eterm ns)
+store_external_or_ref_(Uint **hpp, ErlOffHeap* oh, Eterm ns)
{
Uint i;
Uint size;
@@ -2711,8 +2744,8 @@ store_external_or_ref_(Uint **hpp, ExternalThing **etpp, Eterm ns)
erts_refc_inc(&((ExternalThing *) to_hp)->node->refc, 2);
- ((ExternalThing *) to_hp)->next = *etpp;
- *etpp = (ExternalThing *) to_hp;
+ ((struct erl_off_heap_header*) to_hp)->next = oh->first;
+ oh->first = (struct erl_off_heap_header*) to_hp;
return make_external(to_hp);
}
@@ -2741,7 +2774,7 @@ store_external_or_ref_in_proc_(Process *proc, Eterm ns)
sz = NC_HEAP_SIZE(ns);
ASSERT(sz > 0);
hp = HAlloc(proc, sz);
- return store_external_or_ref_(&hp, &MSO(proc).externals, ns);
+ return store_external_or_ref_(&hp, &MSO(proc), ns);
}
void bin_write(int to, void *to_arg, byte* buf, int sz)
diff --git a/erts/emulator/drivers/common/efile_drv.c b/erts/emulator/drivers/common/efile_drv.c
index 95510a16b2..c450f10f48 100644
--- a/erts/emulator/drivers/common/efile_drv.c
+++ b/erts/emulator/drivers/common/efile_drv.c
@@ -1,19 +1,19 @@
/*
* %CopyrightBegin%
- *
- * Copyright Ericsson AB 1996-2009. All Rights Reserved.
- *
+ *
+ * Copyright Ericsson AB 1996-2010. All Rights Reserved.
+ *
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
* compliance with the License. You should have received a copy of the
* Erlang Public License along with this software. If not, it can be
* retrieved online at http://www.erlang.org/.
- *
+ *
* Software distributed under the License is distributed on an "AS IS"
* basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
* the License for the specific language governing rights and limitations
* under the License.
- *
+ *
* %CopyrightEnd%
*/
/*
@@ -53,6 +53,8 @@
#define FILE_IPREAD 27
#define FILE_ALTNAME 28
#define FILE_READ_LINE 29
+#define FILE_FDATASYNC 30
+#define FILE_FADVISE 31
/* Return codes */
@@ -102,7 +104,7 @@
#include <ctype.h>
#include <sys/types.h>
-extern void erl_exit(int n, char *fmt, _DOTS_);
+void erl_exit(int n, char *fmt, ...);
static ErlDrvSysInfo sys_info;
@@ -196,9 +198,9 @@ enum e_timer {timer_idle, timer_again, timer_write};
struct t_data;
typedef struct {
- Sint fd;
+ SWord fd;
ErlDrvPort port;
- unsigned key; /* Async queue key */
+ unsigned int key; /* Async queue key */
unsigned flags; /* Original flags from FILE_OPEN. */
void (*invoke)(void *);
struct t_data *d;
@@ -306,7 +308,7 @@ struct t_data
int result_ok;
Efile_error errInfo;
int flags;
- Sint fd;
+ SWord fd;
/**/
Efile_info info;
EFILE_DIR_HANDLE dir_handle; /* Handle to open directory. */
@@ -357,6 +359,11 @@ struct t_data
struct t_readdir_buf *first_buf;
struct t_readdir_buf *last_buf;
} read_dir;
+ struct {
+ Sint64 offset;
+ Sint64 length;
+ int advise;
+ } fadvise;
} c;
char b[1];
};
@@ -605,7 +612,7 @@ file_start(ErlDrvPort port, char* command)
}
desc->fd = FILE_FD_INVALID;
desc->port = port;
- desc->key = (unsigned) (Uint) port;
+ desc->key = (unsigned int) (UWord) port;
desc->flags = 0;
desc->invoke = NULL;
desc->d = NULL;
@@ -630,7 +637,7 @@ static void free_data(void *data)
EF_FREE(data);
}
-static void do_close(int flags, Sint fd) {
+static void do_close(int flags, SWord fd) {
if (flags & EFILE_COMPRESSED) {
erts_gzclose((gzFile)(fd));
} else {
@@ -709,7 +716,7 @@ static void reply_Uint_posix_error(file_descriptor *desc, Uint num,
TRACE_C('N');
response[0] = FILE_RESP_NUMERR;
-#if SIZEOF_VOID_P == 4
+#if SIZEOF_VOID_P == 4 || HALFWORD_HEAP
put_int32(0, response+1);
#else
put_int32(num>>32, response+1);
@@ -767,7 +774,7 @@ static int reply_Uint(file_descriptor *desc, Uint result) {
TRACE_C('R');
tmp[0] = FILE_RESP_NUMBER;
-#if SIZEOF_VOID_P == 4
+#if SIZEOF_VOID_P == 4 || HALFWORD_HEAP
put_int32(0, tmp+1);
#else
put_int32(result>>32, tmp+1);
@@ -883,6 +890,15 @@ static void invoke_chdir(void *data)
invoke_name(data, efile_chdir);
}
+static void invoke_fdatasync(void *data)
+{
+ struct t_data *d = (struct t_data *) data;
+ int fd = (int) d->fd;
+
+ d->again = 0;
+ d->result_ok = efile_fdatasync(&d->errInfo, fd);
+}
+
static void invoke_fsync(void *data)
{
struct t_data *d = (struct t_data *) data;
@@ -1620,7 +1636,7 @@ static void invoke_open(void *data)
status = efile_may_openfile(&d->errInfo, d->b);
if (status || (d->errInfo.posix_errno != EISDIR)) {
mode = (d->flags & EFILE_MODE_READ) ? "rb" : "wb";
- d->fd = (Sint) erts_gzopen(d->b, mode);
+ d->fd = (SWord) erts_gzopen(d->b, mode);
if ((gzFile)d->fd) {
status = 1;
} else {
@@ -1637,6 +1653,18 @@ static void invoke_open(void *data)
d->result_ok = status;
}
+static void invoke_fadvise(void *data)
+{
+ struct t_data *d = (struct t_data *) data;
+ int fd = (int) d->fd;
+ off_t offset = (off_t) d->c.fadvise.offset;
+ off_t length = (off_t) d->c.fadvise.length;
+ int advise = (int) d->c.fadvise.advise;
+
+ d->again = 0;
+ d->result_ok = efile_fadvise(&d->errInfo, fd, offset, length, advise);
+}
+
static void free_readdir(void *data)
{
struct t_data *d = (struct t_data *) data;
@@ -1919,12 +1947,14 @@ file_async_ready(ErlDrvData e, ErlDrvThreadData data)
case FILE_RMDIR:
case FILE_CHDIR:
case FILE_DELETE:
+ case FILE_FDATASYNC:
case FILE_FSYNC:
case FILE_TRUNCATE:
case FILE_LINK:
case FILE_SYMLINK:
case FILE_RENAME:
case FILE_WRITE_INFO:
+ case FILE_FADVISE:
reply(desc, d->result_ok, &d->errInfo);
free_data(data);
break;
@@ -2209,6 +2239,18 @@ file_output(ErlDrvData e, char* buf, int count)
goto done;
}
+ case FILE_FDATASYNC:
+ {
+ d = EF_SAFE_ALLOC(sizeof(struct t_data));
+
+ d->fd = fd;
+ d->command = command;
+ d->invoke = invoke_fdatasync;
+ d->free = free_data;
+ d->level = 2;
+ goto done;
+ }
+
case FILE_FSYNC:
{
d = EF_SAFE_ALLOC(sizeof(struct t_data));
@@ -2332,6 +2374,21 @@ file_output(ErlDrvData e, char* buf, int count)
goto done;
}
+ case FILE_FADVISE:
+ {
+ d = EF_SAFE_ALLOC(sizeof(struct t_data));
+
+ d->fd = fd;
+ d->command = command;
+ d->invoke = invoke_fadvise;
+ d->free = free_data;
+ d->level = 2;
+ d->c.fadvise.offset = get_int64((uchar*) buf);
+ d->c.fadvise.length = get_int64(((uchar*) buf) + sizeof(Sint64));
+ d->c.fadvise.advise = get_int32(((uchar*) buf) + 2 * sizeof(Sint64));
+ goto done;
+ }
+
}
/*
diff --git a/erts/emulator/drivers/common/erl_efile.h b/erts/emulator/drivers/common/erl_efile.h
index 9aa941e550..ac95c1f949 100644
--- a/erts/emulator/drivers/common/erl_efile.h
+++ b/erts/emulator/drivers/common/erl_efile.h
@@ -1,19 +1,19 @@
/*
* %CopyrightBegin%
- *
- * Copyright Ericsson AB 1997-2009. All Rights Reserved.
- *
+ *
+ * Copyright Ericsson AB 1997-2010. All Rights Reserved.
+ *
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
* compliance with the License. You should have received a copy of the
* Erlang Public License along with this software. If not, it can be
* retrieved online at http://www.erlang.org/.
- *
+ *
* Software distributed under the License is distributed on an "AS IS"
* basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
* the License for the specific language governing rights and limitations
* under the License.
- *
+ *
* %CopyrightEnd%
*/
/*
@@ -32,7 +32,8 @@
#define EFILE_MODE_READ_WRITE 3
#define EFILE_MODE_APPEND 4
#define EFILE_COMPRESSED 8
-#define EFILE_NO_TRUNCATE 16 /* Special for reopening on VxWorks */
+#define EFILE_MODE_EXCL 16
+#define EFILE_NO_TRUNCATE 32 /* Special for reopening on VxWorks */
/*
* Seek modes for efile_seek().
@@ -126,6 +127,7 @@ int efile_readdir(Efile_error* errInfo, char* name,
int efile_openfile(Efile_error* errInfo, char* name, int flags,
int* pfd, Sint64* pSize);
void efile_closefile(int fd);
+int efile_fdatasync(Efile_error* errInfo, int fd);
int efile_fsync(Efile_error* errInfo, int fd);
int efile_fileinfo(Efile_error* errInfo, Efile_info* pInfo,
char *name, int info_for_link);
@@ -150,3 +152,5 @@ int efile_altname(Efile_error* errInfo, char *name,
int efile_link(Efile_error* errInfo, char* old, char* new);
int efile_symlink(Efile_error* errInfo, char* old, char* new);
int efile_may_openfile(Efile_error* errInfo, char *name);
+int efile_fadvise(Efile_error* errInfo, int fd, Sint64 offset, Sint64 length,
+ int advise);
diff --git a/erts/emulator/drivers/common/inet_drv.c b/erts/emulator/drivers/common/inet_drv.c
index c6e23ee647..059288d1cb 100644
--- a/erts/emulator/drivers/common/inet_drv.c
+++ b/erts/emulator/drivers/common/inet_drv.c
@@ -1,19 +1,19 @@
/*
* %CopyrightBegin%
- *
- * Copyright Ericsson AB 1997-2009. All Rights Reserved.
- *
+ *
+ * Copyright Ericsson AB 1997-2010. All Rights Reserved.
+ *
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
* compliance with the License. You should have received a copy of the
* Erlang Public License along with this software. If not, it can be
* retrieved online at http://www.erlang.org/.
- *
+ *
* Software distributed under the License is distributed on an "AS IS"
* basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
* the License for the specific language governing rights and limitations
* under the License.
- *
+ *
* %CopyrightEnd%
*/
@@ -41,14 +41,12 @@
#define STRINGIFY_1(b) IDENTITY(#b)
#define STRINGIFY(a) STRINGIFY_1(a)
-#ifndef _OSE_
#ifdef HAVE_UNISTD_H
#include <unistd.h>
#endif
#ifdef HAVE_SYS_UIO_H
#include <sys/uio.h>
#endif
-#endif
/* All platforms fail on malloc errors. */
@@ -57,6 +55,21 @@
#include "erl_driver.h"
+/* The IS_SOCKET_ERROR macro below is used for portability reasons. While
+ POSIX specifies that errors from socket-related system calls should be
+ indicated with a -1 return value, some users have experienced non-Windows
+ OS kernels that return negative values other than -1. While one can argue
+ that such kernels are technically broken, comparing against values less
+ than 0 covers their out-of-spec return values without imposing incorrect
+ semantics on systems that manage to correctly return -1 for errors, thus
+ increasing Erlang's portability.
+*/
+#ifdef __WIN32__
+#define IS_SOCKET_ERROR(val) ((val) == SOCKET_ERROR)
+#else
+#define IS_SOCKET_ERROR(val) ((val) < 0)
+#endif
+
#ifdef __WIN32__
#define STRNCASECMP strncasecmp
@@ -186,18 +199,8 @@ static unsigned long one_value = 1;
#include <netdb.h>
#endif
-#ifndef _OSE_
#include <sys/socket.h>
#include <netinet/in.h>
-#else
-/* datatypes and macros from Solaris socket.h */
-struct linger {
- int l_onoff; /* option on/off */
- int l_linger; /* linger time */
-};
-#define SO_OOBINLINE 0x0100 /* leave received OOB data in line */
-#define SO_LINGER 0x0080 /* linger on close if data present */
-#endif
#ifdef VXWORKS
#include <rpc/rpctypes.h>
@@ -206,12 +209,10 @@ struct linger {
#include <rpc/types.h>
#endif
-#ifndef _OSE_
#include <netinet/tcp.h>
#include <arpa/inet.h>
-#endif
-#if (!defined(VXWORKS) && !defined(_OSE_))
+#if (!defined(VXWORKS))
#include <sys/param.h>
#ifdef HAVE_ARPA_NAMESER_H
#include <arpa/nameser.h>
@@ -226,33 +227,11 @@ struct linger {
#include <sys/ioctl.h>
#endif
-#ifndef _OSE_
#include <net/if.h>
-#else
-#define IFF_MULTICAST 0x00000800
-#endif
-
-#ifdef _OSE_
-#include "inet.h"
-#include "ineterr.h"
-#include "ose_inet_drv.h"
-#include "nameser.h"
-#include "resolv.h"
-#define SET_ASYNC(s) setsockopt((s), SOL_SOCKET, SO_OSEEVENT, (&(s)), sizeof(int))
-
-extern void select_release(void);
-
-#endif /* _OSE_ */
-
-/* Solaris headers, only to be used with SFK */
-#ifdef _OSE_SFK_
-#include <ctype.h>
-#include <string.h>
-#endif
/* SCTP support -- currently for UNIX platforms only: */
#undef HAVE_SCTP
-#if (!defined(VXWORKS) && !defined(_OSE_) && !defined(__WIN32__) && defined(HAVE_SCTP_H))
+#if (!defined(VXWORKS) && !defined(__WIN32__) && defined(HAVE_SCTP_H))
#include <netinet/sctp.h>
@@ -315,7 +294,7 @@ static int (*p_sctp_bindx)(int sd, struct sockaddr *addrs,
#define DEBUGF(X) printf X
#endif
-#if !defined(__WIN32__) && !defined(HAVE_STRNCASECMP)
+#if !defined(HAVE_STRNCASECMP)
#define STRNCASECMP my_strncasecmp
static int my_strncasecmp(const char *s1, const char *s2, size_t n)
@@ -335,6 +314,7 @@ static int my_strncasecmp(const char *s1, const char *s2, size_t n)
#define INVALID_SOCKET -1
#define INVALID_EVENT -1
#define SOCKET_ERROR -1
+
#define SOCKET int
#define HANDLE long int
#define FD_READ ERL_DRV_READ
@@ -362,20 +342,6 @@ static int my_strncasecmp(const char *s1, const char *s2, size_t n)
#define sock_htons(x) htons((x))
#define sock_htonl(x) htonl((x))
-#ifdef _OSE_
-#define sock_accept(s, addr, len) ose_inet_accept((s), (addr), (len))
-#define sock_send(s,buf,len,flag) ose_inet_send((s),(buf),(len),(flag))
-#define sock_sendto(s,buf,blen,flag,addr,alen) \
- ose_inet_sendto((s),(buf),(blen),(flag),(addr),(alen))
-#define sock_sendv(s, vec, size, np, flag) \
- (*(np) = ose_inet_sendv((s), (SysIOVec*)(vec), (size)))
-#define sock_open(af, type, proto) ose_inet_socket((af), (type), (proto))
-#define sock_close(s) ose_inet_close((s))
-#define sock_hostname(buf, len) ose_gethostname((buf), (len))
-#define sock_getservbyname(name,proto) ose_getservbyname((name), (proto))
-#define sock_getservbyport(port,proto) ose_getservbyport((port), (proto))
-
-#else
#define sock_accept(s, addr, len) accept((s), (addr), (len))
#define sock_send(s,buf,len,flag) send((s),(buf),(len),(flag))
#define sock_sendto(s,buf,blen,flag,addr,alen) \
@@ -391,7 +357,6 @@ static int my_strncasecmp(const char *s1, const char *s2, size_t n)
#define sock_hostname(buf, len) gethostname((buf), (len))
#define sock_getservbyname(name,proto) getservbyname((name), (proto))
#define sock_getservbyport(port,proto) getservbyport((port), (proto))
-#endif /* _OSE_ */
#define sock_recv(s,buf,len,flag) recv((s),(buf),(len),(flag))
#define sock_recvfrom(s,buf,blen,flag,addr,alen) \
@@ -402,13 +367,8 @@ static int my_strncasecmp(const char *s1, const char *s2, size_t n)
#define sock_create_event(d) ((d)->s) /* return file descriptor */
#define sock_close_event(e) /* do nothing */
-#ifdef _OSE_
-#define inet_driver_select(port, e, mode, on) \
- ose_inet_select(port, e, mode, on)
-#else
#define inet_driver_select(port, e, mode, on) \
driver_select(port, e, mode | (on?ERL_DRV_USE:0), on)
-#endif /* _OSE_ */
#define sock_select(d, flags, onoff) do { \
(d)->event_mask = (onoff) ? \
@@ -1085,7 +1045,7 @@ struct erl_drv_entry inet_driver_entry =
};
/* XXX: is this a driver interface function ??? */
-extern void erl_exit(int n, char*, _DOTS_);
+void erl_exit(int n, char*, ...);
/*
* Malloc wrapper,
@@ -3480,13 +3440,9 @@ static int inet_init()
INIT_ATOM(scheme);
/* add TCP, UDP and SCTP drivers */
-#ifdef _OSE_
- add_ose_tcp_drv_entry(&tcp_inet_driver_entry);
- add_ose_udp_drv_entry(&udp_inet_driver_entry);
-#else
add_driver_entry(&tcp_inet_driver_entry);
add_driver_entry(&udp_inet_driver_entry);
-# ifdef HAVE_SCTP
+#ifdef HAVE_SCTP
/* Check the size of SCTP AssocID -- currently both this driver and the
Erlang part require 32 bit: */
ASSERT(sizeof(sctp_assoc_t)==ASSOC_ID_LEN);
@@ -3501,8 +3457,8 @@ static int inet_init()
add_driver_entry(&sctp_inet_driver_entry);
}
}
-# endif
-#endif /* _OSE_ */
+#endif
+
/* remove the dummy inet driver */
remove_driver_entry(&inet_driver_entry);
return 0;
@@ -3744,7 +3700,7 @@ static int inet_ctl_fdopen(inet_descriptor* desc, int domain, int type,
unsigned int sz = sizeof(name);
/* check that it is a socket and that the socket is bound */
- if (sock_name(s, (struct sockaddr*) &name, &sz) == SOCKET_ERROR)
+ if (IS_SOCKET_ERROR(sock_name(s, (struct sockaddr*) &name, &sz)))
return ctl_error(sock_errno(), rbuf, rsize);
desc->s = s;
if ((desc->event = sock_create_event(desc)) == INVALID_EVENT)
@@ -3756,7 +3712,7 @@ static int inet_ctl_fdopen(inet_descriptor* desc, int domain, int type,
desc->state = INET_STATE_BOUND; /* assume bound */
if (type == SOCK_STREAM) { /* check if connected */
sz = sizeof(name);
- if (sock_peer(s, (struct sockaddr*) &name, &sz) != SOCKET_ERROR)
+ if (!IS_SOCKET_ERROR(sock_peer(s, (struct sockaddr*) &name, &sz)))
desc->state = INET_STATE_CONNECTED;
}
@@ -5687,8 +5643,8 @@ static int inet_fill_opts(inet_descriptor* desc,
buf += arg_sz;
len -= arg_sz;
}
- if (sock_getopt(desc->s,proto,type,arg_ptr,&arg_sz) ==
- SOCKET_ERROR) {
+ if (IS_SOCKET_ERROR(sock_getopt(desc->s,proto,type,
+ arg_ptr,&arg_sz))) {
TRUNCATE_TO(0,ptr);
continue;
}
@@ -5705,7 +5661,7 @@ static int inet_fill_opts(inet_descriptor* desc,
RETURN_ERROR();
}
/* We have 5 bytes allocated to ptr */
- if (sock_getopt(desc->s,proto,type,arg_ptr,&arg_sz) == SOCKET_ERROR) {
+ if (IS_SOCKET_ERROR(sock_getopt(desc->s,proto,type,arg_ptr,&arg_sz))) {
TRUNCATE_TO(0,ptr);
continue;
}
@@ -6771,7 +6727,7 @@ static int inet_ctl(inet_descriptor* desc, int cmd, char* buf, int len,
if (len != 0)
return ctl_error(EINVAL, rbuf, rsize);
- if (sock_hostname(tbuf, MAXHOSTNAMELEN) == SOCKET_ERROR)
+ if (IS_SOCKET_ERROR(sock_hostname(tbuf, MAXHOSTNAMELEN)))
return ctl_error(sock_errno(), rbuf, rsize);
return ctl_reply(INET_REP_OK, tbuf, strlen(tbuf), rbuf, rsize);
}
@@ -6788,7 +6744,7 @@ static int inet_ctl(inet_descriptor* desc, int cmd, char* buf, int len,
return ctl_error(ENOTCONN, rbuf, rsize);
if ((ptr = desc->peer_ptr) == NULL) {
ptr = &peer;
- if (sock_peer(desc->s, (struct sockaddr*)ptr,&sz) == SOCKET_ERROR)
+ if (IS_SOCKET_ERROR(sock_peer(desc->s, (struct sockaddr*)ptr,&sz)))
return ctl_error(sock_errno(), rbuf, rsize);
}
if (inet_get_address(desc->sfamily, tbuf, ptr, &sz) < 0)
@@ -6825,7 +6781,7 @@ static int inet_ctl(inet_descriptor* desc, int cmd, char* buf, int len,
if ((ptr = desc->name_ptr) == NULL) {
ptr = &name;
- if (sock_name(desc->s, (struct sockaddr*)ptr, &sz) == SOCKET_ERROR)
+ if (IS_SOCKET_ERROR(sock_name(desc->s, (struct sockaddr*)ptr, &sz)))
return ctl_error(sock_errno(), rbuf, rsize);
}
if (inet_get_address(desc->sfamily, tbuf, ptr, &sz) < 0)
@@ -6864,7 +6820,7 @@ static int inet_ctl(inet_descriptor* desc, int cmd, char* buf, int len,
if (inet_set_address(desc->sfamily, &local, buf, &len) == NULL)
return ctl_error(EINVAL, rbuf, rsize);
- if (sock_bind(desc->s,(struct sockaddr*) &local, len) == SOCKET_ERROR)
+ if (IS_SOCKET_ERROR(sock_bind(desc->s,(struct sockaddr*) &local, len)))
return ctl_error(sock_errno(), rbuf, rsize);
desc->state = INET_STATE_BOUND;
@@ -7297,7 +7253,7 @@ static int tcp_inet_ctl(ErlDrvData e, unsigned int cmd, char* buf, int len,
if (len != 2)
return ctl_error(EINVAL, rbuf, rsize);
backlog = get_int16(buf);
- if (sock_listen(desc->inet.s, backlog) == SOCKET_ERROR)
+ if (IS_SOCKET_ERROR(sock_listen(desc->inet.s, backlog)))
return ctl_error(sock_errno(), rbuf, rsize);
desc->inet.state = TCP_STATE_LISTEN;
return ctl_reply(INET_REP_OK, NULL, 0, rbuf, rsize);
@@ -7331,7 +7287,7 @@ static int tcp_inet_ctl(ErlDrvData e, unsigned int cmd, char* buf, int len,
code = sock_connect(desc->inet.s,
(struct sockaddr*) &desc->inet.remote, len);
- if ((code == SOCKET_ERROR) &&
+ if (IS_SOCKET_ERROR(code) &&
((sock_errno() == ERRNO_BLOCK) || /* Winsock2 */
(sock_errno() == EINPROGRESS))) { /* Unix & OSE!! */
sock_select(INETP(desc), FD_CONNECT, 1);
@@ -7518,7 +7474,6 @@ static int tcp_inet_ctl(ErlDrvData e, unsigned int cmd, char* buf, int len,
tcp_deliver(desc, 0);
return ctl_reply(INET_REP_OK, NULL, 0, rbuf, rsize);
}
-#ifndef _OSE_
case TCP_REQ_SHUTDOWN: {
int how;
DEBUGF(("tcp_inet_ctl(%ld): FDOPEN\r\n", (long)desc->inet.port));
@@ -7535,7 +7490,6 @@ static int tcp_inet_ctl(ErlDrvData e, unsigned int cmd, char* buf, int len,
return ctl_error(sock_errno(), rbuf, rsize);
}
}
-#endif
default:
DEBUGF(("tcp_inet_ctl(%ld): %u\r\n", (long)desc->inet.port, cmd));
return inet_ctl(INETP(desc), cmd, buf, len, rbuf, rsize);
@@ -8009,7 +7963,7 @@ static int tcp_recv(tcp_descriptor* desc, int request_len)
n = sock_recv(desc->inet.s, desc->i_ptr, nread, 0);
- if (n == SOCKET_ERROR) {
+ if (IS_SOCKET_ERROR(n)) {
int err = sock_errno();
if (err == ECONNRESET) {
DEBUGF((" => detected close (connreset)\r\n"));
@@ -8511,8 +8465,8 @@ static int tcp_sendv(tcp_descriptor* desc, ErlIOVec* ev)
(long)desc->inet.port, desc->inet.s, h_len, len));
if (desc->tcp_add_flags & TCP_ADDF_DELAY_SEND) {
n = 0;
- } else if (sock_sendv(desc->inet.s, ev->iov, vsize, &n, 0)
- == SOCKET_ERROR) {
+ } else if (IS_SOCKET_ERROR(sock_sendv(desc->inet.s, ev->iov,
+ vsize, &n, 0))) {
if ((sock_errno() != ERRNO_BLOCK) && (sock_errno() != EINTR)) {
int err = sock_errno();
DEBUGF(("tcp_sendv(%ld): s=%d, "
@@ -8605,7 +8559,7 @@ static int tcp_send(tcp_descriptor* desc, char* ptr, int len)
if (desc->tcp_add_flags & TCP_ADDF_DELAY_SEND) {
sock_send(desc->inet.s, buf, 0, 0);
n = 0;
- } else if (sock_sendv(desc->inet.s,iov,2,&n,0) == SOCKET_ERROR) {
+ } else if (IS_SOCKET_ERROR(sock_sendv(desc->inet.s,iov,2,&n,0))) {
if ((sock_errno() != ERRNO_BLOCK) && (sock_errno() != EINTR)) {
int err = sock_errno();
DEBUGF(("tcp_send(%ld): s=%d,sock_sendv(size=2) errno = %d\r\n",
@@ -8678,7 +8632,7 @@ static int tcp_inet_output(tcp_descriptor* desc, HANDLE event)
int code = sock_peer(desc->inet.s,
(struct sockaddr*) &desc->inet.remote, &sz);
- if (code == SOCKET_ERROR) {
+ if (IS_SOCKET_ERROR(code)) {
desc->inet.state = TCP_STATE_BOUND; /* restore state */
ret = async_error(INETP(desc), sock_errno());
goto done;
@@ -8719,7 +8673,7 @@ static int tcp_inet_output(tcp_descriptor* desc, HANDLE event)
vsize = vsize > MAX_VSIZE ? MAX_VSIZE : vsize;
DEBUGF(("tcp_inet_output(%ld): s=%d, About to send %d items\r\n",
(long)desc->inet.port, desc->inet.s, vsize));
- if (sock_sendv(desc->inet.s, iov, vsize, &n, 0)==SOCKET_ERROR) {
+ if (IS_SOCKET_ERROR(sock_sendv(desc->inet.s, iov, vsize, &n, 0))) {
if ((sock_errno() != ERRNO_BLOCK) && (sock_errno() != EINTR)) {
DEBUGF(("tcp_inet_output(%ld): sock_sendv(%d) errno = %d\r\n",
(long)desc->inet.port, vsize, sock_errno()));
@@ -8988,7 +8942,7 @@ static int packet_inet_ctl(ErlDrvData e, unsigned int cmd, char* buf, int len,
sock_select(desc, FD_CONNECT, 1);
code = sock_connect(desc->s, &remote.sa, len);
- if ((code == SOCKET_ERROR) && (sock_errno() == EINPROGRESS)) {
+ if (IS_SOCKET_ERROR(code) && (sock_errno() == EINPROGRESS)) {
/* XXX: Unix only -- WinSock would have a different cond! */
desc->state = SCTP_STATE_CONNECTING;
if (timeout != INET_INFINITY)
@@ -9028,7 +8982,7 @@ static int packet_inet_ctl(ErlDrvData e, unsigned int cmd, char* buf, int len,
code = sock_connect(desc->s,
(struct sockaddr*) &desc->remote, len);
- if (code == SOCKET_ERROR) {
+ if (IS_SOCKET_ERROR(code)) {
sock_connect(desc->s, (struct sockaddr*) NULL, 0);
desc->state &= ~INET_F_ACTIVE;
return ctl_error(sock_errno(), rbuf, rsize);
@@ -9062,7 +9016,7 @@ static int packet_inet_ctl(ErlDrvData e, unsigned int cmd, char* buf, int len,
return ctl_error(EINVAL, rbuf, rsize);
flag = get_int8(buf);
- if (sock_listen(desc->s, flag) == SOCKET_ERROR)
+ if (IS_SOCKET_ERROR(sock_listen(desc->s, flag)))
return ctl_error(sock_errno(), rbuf, rsize);
desc->state = SCTP_STATE_LISTEN; /* XXX: not used? */
@@ -9267,7 +9221,7 @@ static void packet_inet_command(ErlDrvData e, char* buf, int len)
check_result_code:
/* "code" analysis is the same for both SCTP and UDP cases above: */
#endif
- if (code == SOCKET_ERROR) {
+ if (IS_SOCKET_ERROR(code)) {
int err = sock_errno();
inet_reply_error(desc, err);
}
@@ -9366,7 +9320,7 @@ static int packet_inet_input(udp_descriptor* udesc, HANDLE event)
check_result:
#endif
/* Analyse the result: */
- if (n == SOCKET_ERROR
+ if (IS_SOCKET_ERROR(n)
#ifdef HAVE_SCTP
|| (short_recv = (IS_SCTP(desc) && !(mhdr.msg_flags & MSG_EOR)))
/* NB: here we check for EOR not being set -- this is an error as
@@ -9379,11 +9333,13 @@ static int packet_inet_input(udp_descriptor* udesc, HANDLE event)
if (err != ERRNO_BLOCK) {
if (!desc->active) {
#ifdef HAVE_SCTP
- if (short_recv)
+ if (short_recv) {
async_error_am(desc, am_short_recv);
- else
-#else
+ } else {
async_error(desc, err);
+ }
+#else
+ async_error(desc, err);
#endif
driver_cancel_timer(desc->port);
sock_select(desc,FD_READ,0);
@@ -9481,7 +9437,7 @@ static int packet_inet_output(udp_descriptor* udesc, HANDLE event)
int code = sock_peer(desc->s,
(struct sockaddr*) &desc->remote, &sz);
- if (code == SOCKET_ERROR) {
+ if (IS_SOCKET_ERROR(code)) {
desc->state = PACKET_STATE_BOUND; /* restore state */
ret = async_error(desc, sock_errno());
goto done;
@@ -9922,23 +9878,26 @@ int erts_sock_connect(erts_sock_t socket, byte *ip_addr, int len, Uint16 port)
if (!inet_set_address(AF_INET, &addr, buf, &blen))
return 0;
- if (SOCKET_ERROR == sock_connect(s,
+ if (IS_SOCKET_ERROR(sock_connect(s,
(struct sockaddr *) &addr,
- sizeof(struct sockaddr_in)))
+ sizeof(struct sockaddr_in))))
return 0;
return 1;
}
Sint erts_sock_send(erts_sock_t socket, const void *buf, Sint len)
{
- return (Sint) sock_send((SOCKET) socket, buf, (size_t) len, 0);
+ Sint result = (Sint) sock_send((SOCKET) socket, buf, (size_t) len, 0);
+ if (IS_SOCKET_ERROR(result))
+ return SOCKET_ERROR;
+ return result;
}
int erts_sock_gethostname(char *buf, int bufsz)
{
- if (sock_hostname(buf, bufsz) == SOCKET_ERROR)
- return -1;
+ if (IS_SOCKET_ERROR(sock_hostname(buf, bufsz)))
+ return SOCKET_ERROR;
return 0;
}
diff --git a/erts/emulator/drivers/common/ram_file_drv.c b/erts/emulator/drivers/common/ram_file_drv.c
index 4a39a156e6..abedcc933a 100644
--- a/erts/emulator/drivers/common/ram_file_drv.c
+++ b/erts/emulator/drivers/common/ram_file_drv.c
@@ -35,6 +35,7 @@
#define RAM_FILE_TRUNCATE 14
#define RAM_FILE_PREAD 17
#define RAM_FILE_PWRITE 18
+#define RAM_FILE_FDATASYNC 19
/* other operations */
#define RAM_FILE_GET 30
@@ -45,6 +46,8 @@
#define RAM_FILE_UUENCODE 35 /* uuencode file */
#define RAM_FILE_UUDECODE 36 /* uudecode file */
#define RAM_FILE_SIZE 37 /* get file size */
+#define RAM_FILE_ADVISE 38 /* predeclare the access
+ * pattern for file data */
/* possible new operations include:
DES_ENCRYPT
DES_DECRYPT
@@ -558,6 +561,13 @@ static void rfile_command(ErlDrvData e, char* buf, int count)
numeric_reply(f, 0); /* 0 is not used */
break;
+ case RAM_FILE_FDATASYNC:
+ if (f->flags == 0)
+ error_reply(f, EBADF);
+ else
+ reply(f, 1, 0);
+ break;
+
case RAM_FILE_FSYNC:
if (f->flags == 0)
error_reply(f, EBADF);
@@ -685,6 +695,13 @@ static void rfile_command(ErlDrvData e, char* buf, int count)
case RAM_FILE_UUDECODE: /* uudecode file */
ram_file_uudecode(f);
break;
+
+ case RAM_FILE_ADVISE:
+ if (f->flags == 0)
+ error_reply(f, EBADF);
+ else
+ reply(f, 1, 0);
+ break;
}
/*
* Ignore anything else -- let the caller hang.
diff --git a/erts/emulator/drivers/common/zlib_drv.c b/erts/emulator/drivers/common/zlib_drv.c
index 723efeaa13..f50899a730 100644
--- a/erts/emulator/drivers/common/zlib_drv.c
+++ b/erts/emulator/drivers/common/zlib_drv.c
@@ -1,19 +1,19 @@
/*
* %CopyrightBegin%
- *
- * Copyright Ericsson AB 2003-2009. All Rights Reserved.
- *
+ *
+ * Copyright Ericsson AB 2003-2010. All Rights Reserved.
+ *
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
* compliance with the License. You should have received a copy of the
* Erlang Public License along with this software. If not, it can be
* retrieved online at http://www.erlang.org/.
- *
+ *
* Software distributed under the License is distributed on an "AS IS"
* basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
* the License for the specific language governing rights and limitations
* under the License.
- *
+ *
* %CopyrightEnd%
*/
@@ -115,7 +115,7 @@ typedef struct {
static int zlib_inflate(ZLibData* d, int flush);
static int zlib_deflate(ZLibData* d, int flush);
-#if defined(_OSE_) || defined(__WIN32__)
+#if defined(__WIN32__)
static int i32(char* buf)
#else
static inline int i32(char* buf)
diff --git a/erts/emulator/drivers/unix/mem_drv.c b/erts/emulator/drivers/unix/mem_drv.c
deleted file mode 100644
index 1417ca1121..0000000000
--- a/erts/emulator/drivers/unix/mem_drv.c
+++ /dev/null
@@ -1,145 +0,0 @@
-/*
- * %CopyrightBegin%
- *
- * Copyright Ericsson AB 1996-2009. All Rights Reserved.
- *
- * The contents of this file are subject to the Erlang Public License,
- * Version 1.1, (the "License"); you may not use this file except in
- * compliance with the License. You should have received a copy of the
- * Erlang Public License along with this software. If not, it can be
- * retrieved online at http://www.erlang.org/.
- *
- * Software distributed under the License is distributed on an "AS IS"
- * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
- * the License for the specific language governing rights and limitations
- * under the License.
- *
- * %CopyrightEnd%
- */
-
-/* Purpose: Access to elib memory statistics */
-
-#ifdef HAVE_CONFIG_H
-# include "config.h"
-#endif
-
-#include "sys.h"
-#include "erl_driver.h"
-#include "elib_stat.h"
-
-#define MAP_BUF_SIZE 1000 /* Max map size */
-#define HISTO_BUF_SIZE 100 /* Max histogram buckets */
-
-static ErlDrvData mem_start(ErlDrvPort);
-static int mem_init(void);
-static void mem_stop(ErlDrvData);
-static void mem_command(ErlDrvData, char*, int);
-
-const struct driver_entry mem_driver_entry = {
- mem_init,
- mem_start,
- mem_stop,
- mem_command,
- NULL,
- NULL,
- "mem_drv"
-};
-
-static int mem_init(void)
-{
- return 0;
-}
-
-static ErlDrvData mem_start(ErlDrvPort port, char* buf)
-{
- return (ErlDrvData)port;
-}
-
-static void mem_stop(ErlDrvData port)
-{
-}
-
-void putint32(p, v)
-byte* p; int v;
-{
- p[0] = (v >> 24) & 0xff;
- p[1] = (v >> 16) & 0xff;
- p[2] = (v >> 8) & 0xff;
- p[3] = (v) & 0xff;
-}
-
-int getint16(p)
-byte* p;
-{
- return (p[0] << 8) | p[1];
-}
-
-/*
-** Command:
-** m L1 L0 -> a heap map of length L1*256 + L0 is returned
-** s -> X3 X2 X1 X0 Y3 Y2 Y1 Y0 Z3 Z2 Z1 Z0
-** X == Total heap size bytes
-** Y == Total free bytes
-** Z == Size of largest free block in bytes
-**
-** h L1 L0 B0 -> Generate a logarithm historgram base B with L buckets
-** l L1 L0 S0 -> Generate a linear histogram with step S with L buckets
-*/
-unsigned char outbuf[HISTO_BUF_SIZE*2*4];
-
-static void mem_command(ErlDrvData port, char* buf, int count)
-{
- if ((count == 1) && buf[0] == 's') {
- struct elib_stat info;
- char v[3*4];
-
- elib_stat(&info);
-
- putint32(v, info.mem_total*4);
- putint32(v+4, info.mem_free*4);
- putint32(v+8, info.max_free*4);
- driver_output((ErlDrvPort)port, v, 12);
- return;
- }
- else if ((count == 3) && buf[0] == 'm') {
- char w[MAP_BUF_SIZE];
- int n = getint16(buf+1);
-
- if (n > MAP_BUF_SIZE)
- n = MAP_BUF_SIZE;
- elib_heap_map(w, n);
- driver_output((ErlDrvPort)port, w, n);
- return;
- }
- else if ((count == 4) && (buf[0] == 'h' || buf[0] == 'l')) {
- unsigned long vf[HISTO_BUF_SIZE];
- unsigned long va[HISTO_BUF_SIZE];
- int n = getint16(buf+1);
- int base = (unsigned char) buf[3];
-
- if (n >= HISTO_BUF_SIZE)
- n = HISTO_BUF_SIZE;
- if (buf[0] == 'l')
- base = -base;
- if (elib_histo(vf, va, n, base) < 0) {
- driver_failure((ErlDrvPort)port, -1);
- return;
- }
- else {
- char* p = outbuf;
- int i;
-
- for (i = 0; i < n; i++) {
- putint32(p, vf[i]);
- p += 4;
- }
- for (i = 0; i < n; i++) {
- putint32(p, va[i]);
- p += 4;
- }
- driver_output((ErlDrvPort)port, outbuf, n*8);
- }
- return;
- }
- driver_failure((ErlDrvPort)port, -1);
-}
diff --git a/erts/emulator/drivers/unix/ttsl_drv.c b/erts/emulator/drivers/unix/ttsl_drv.c
index 4cd54c073f..d782b044a9 100644
--- a/erts/emulator/drivers/unix/ttsl_drv.c
+++ b/erts/emulator/drivers/unix/ttsl_drv.c
@@ -314,7 +314,7 @@ static ErlDrvData ttysl_start(ErlDrvPort port, char* buf)
sys_sigset(SIGCONT, cont);
sys_sigset(SIGWINCH, winch);
- driver_select(port, (ErlDrvEvent)(Uint)ttysl_fd, ERL_DRV_READ|ERL_DRV_USE, 1);
+ driver_select(port, (ErlDrvEvent)(UWord)ttysl_fd, ERL_DRV_READ|ERL_DRV_USE, 1);
ttysl_port = port;
/* we need to know this when we enter the break handler */
@@ -394,7 +394,7 @@ static void ttysl_stop(ErlDrvData ttysl_data)
stop_lbuf();
stop_termcap();
tty_reset(ttysl_fd);
- driver_select(ttysl_port, (ErlDrvEvent)(Uint)ttysl_fd, ERL_DRV_READ|ERL_DRV_USE, 0);
+ driver_select(ttysl_port, (ErlDrvEvent)(UWord)ttysl_fd, ERL_DRV_READ|ERL_DRV_USE, 0);
sys_sigset(SIGCONT, SIG_DFL);
sys_sigset(SIGWINCH, SIG_DFL);
}
@@ -685,7 +685,7 @@ static void ttysl_from_tty(ErlDrvData ttysl_data, ErlDrvEvent fd)
utf8buf_size = 0;
}
- if ((i = read((int)(Sint)fd, (char *) p, left)) >= 0) {
+ if ((i = read((int)(SWord)fd, (char *) p, left)) >= 0) {
if (p != b) {
i += (p - b);
}
diff --git a/erts/emulator/drivers/unix/unix_efile.c b/erts/emulator/drivers/unix/unix_efile.c
index d395b68691..b19f632f52 100644
--- a/erts/emulator/drivers/unix/unix_efile.c
+++ b/erts/emulator/drivers/unix/unix_efile.c
@@ -1,19 +1,19 @@
/*
* %CopyrightBegin%
- *
- * Copyright Ericsson AB 1997-2009. All Rights Reserved.
- *
+ *
+ * Copyright Ericsson AB 1997-2010. All Rights Reserved.
+ *
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
* compliance with the License. You should have received a copy of the
* Erlang Public License along with this software. If not, it can be
* retrieved online at http://www.erlang.org/.
- *
+ *
* Software distributed under the License is distributed on an "AS IS"
* basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
* the License for the specific language governing rights and limitations
* under the License.
- *
+ *
* %CopyrightEnd%
*/
/*
@@ -34,17 +34,6 @@
#include <sys/uio.h>
#endif
-#ifdef _OSE_
-#include "efs.h"
-#include <sys/types.h>
-#include <sys/stat.h>
-#include <unistd.h>
-
-#ifdef _OSE_SFK_
-#include <string.h>
-#endif
-#endif /* _OSE_ */
-
#if defined(__APPLE__) && defined(__MACH__) && !defined(__DARWIN__)
#define DARWIN 1
#endif
@@ -88,23 +77,6 @@ extern STATUS copy(char *, char *);
* Macros for testing file types.
*/
-#ifdef _OSE_
-
-#define ISDIR(st) S_ISDIR(((st).st_mode))
-#define ISREG(st) S_ISREG(((st).st_mode))
-#define ISDEV(st) (S_ISCHR(((st).st_mode)) || S_ISBLK(((st).st_mode)))
-#define ISLNK(st) S_ISLNK(((st).st_mode))
-#ifdef NO_UMASK
-#define FILE_MODE (S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH)
-#define DIR_MODE (S_IRUSR | S_IWUSR | S_IXUSR | S_IRGRP | S_IXGRP | S_IROTH | S_IXOTH)
-#else
-#define FILE_MODE (S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP | S_IROTH | S_IWOTH)
-#define DIR_MODE (S_IRUSR | S_IWUSR | S_IXUSR | S_IRGRP | S_IWGRP | S_IXGRP | S_IROTH | \
- S_IWOTH | S_IXOTH)
-#endif
-
-#else /* !_OSE_ */
-
#define ISDIR(st) (((st).st_mode & S_IFMT) == S_IFDIR)
#define ISREG(st) (((st).st_mode & S_IFMT) == S_IFREG)
#define ISDEV(st) \
@@ -118,8 +90,6 @@ extern STATUS copy(char *, char *);
#define DIR_MODE 0777
#endif
-#endif /* _OSE_ */
-
#ifdef VXWORKS /* Currently only used on vxworks */
#define EF_ALLOC(S) driver_alloc((S))
@@ -128,7 +98,7 @@ extern STATUS copy(char *, char *);
#define EF_SAFE_REALLOC(P, S) ef_safe_realloc((P), (S))
#define EF_FREE(P) do { if((P)) driver_free((P)); } while(0)
-extern void erl_exit(int n, char *fmt, _DOTS_);
+void erl_exit(int n, char *fmt, ...);
static void *ef_safe_alloc(Uint s)
{
@@ -157,7 +127,7 @@ static void *ef_safe_realloc(void *op, Uint s)
(s[0] == '.' && (s[1] == '\0' || (s[1] == '.' && s[2] == '\0')))
#ifdef VXWORKS
-static FUNCTION(int, vxworks_to_posix, (int vx_errno));
+static int vxworks_to_posix(int vx_errno);
#endif
/*
@@ -176,7 +146,7 @@ static FUNCTION(int, vxworks_to_posix, (int vx_errno));
#define CHECK_PATHLEN(X,Y) /* Nothing */
#endif
-static FUNCTION(int, check_error, (int result, Efile_error* errInfo));
+static int check_error(int result, Efile_error* errInfo);
static int
check_error(int result, Efile_error *errInfo)
@@ -361,15 +331,6 @@ path_size(char *pathname)
#endif /* VXWORKS */
-#ifdef _OSE_
-static int
-ose_enotsup(Efile_error *errInfo)
-{
- errInfo->posix_errno = errInfo->os_errno = ENOTSUP;
- return 0;
-}
-#endif /* _OSE_ */
-
int
efile_mkdir(Efile_error* errInfo, /* Where to return error codes. */
char* name) /* Name of directory to create. */
@@ -446,18 +407,12 @@ efile_delete_file(Efile_error* errInfo, /* Where to return error codes. */
char* name) /* Name of file to delete. */
{
CHECK_PATHLEN(name,errInfo);
-#ifdef _OSE_
- if (remove(name) == 0) {
- return 1;
- }
-#else
if (unlink(name) == 0) {
return 1;
}
if (errno == EISDIR) { /* Linux sets the wrong error code. */
errno = EPERM;
}
-#endif
return check_error(-1, errInfo);
}
@@ -524,7 +479,7 @@ efile_rename(Efile_error* errInfo, /* Where to return error codes. */
if (errno == ENOTEMPTY) {
errno = EEXIST;
}
-#if defined (sparc) && !defined(VXWORKS) && !defined(_OSE_)
+#if defined (sparc) && !defined(VXWORKS)
/*
* SunOS 4.1.4 reports overwriting a non-empty directory with a
* directory as EINVAL instead of EEXIST (first rule out the correct
@@ -751,6 +706,9 @@ efile_openfile(Efile_error* errInfo, /* Where to return error codes. */
#endif
}
+ if (flags & EFILE_MODE_EXCL) {
+ mode |= O_EXCL;
+ }
#ifdef VXWORKS
if (*name != '/') {
@@ -819,6 +777,17 @@ efile_closefile(int fd)
}
int
+efile_fdatasync(Efile_error *errInfo, /* Where to return error codes. */
+ int fd) /* File descriptor for file to sync data. */
+{
+#ifdef HAVE_FDATASYNC
+ return check_error(fdatasync(fd), errInfo);
+#else
+ return efile_fsync(errInfo, fd);
+#endif
+}
+
+int
efile_fsync(Efile_error *errInfo, /* Where to return error codes. */
int fd) /* File descriptor for file to sync. */
{
@@ -855,7 +824,7 @@ efile_fileinfo(Efile_error* errInfo, Efile_info* pInfo,
CHECK_PATHLEN(name, errInfo);
if (info_for_link) {
-#if (defined(VXWORKS) || defined(_OSE_))
+#if (defined(VXWORKS))
result = stat(name, &statbuf);
#else
result = lstat(name, &statbuf);
@@ -939,11 +908,7 @@ efile_fileinfo(Efile_error* errInfo, Efile_info* pInfo,
pInfo->mode = statbuf.st_mode;
pInfo->links = statbuf.st_nlink;
pInfo->major_device = statbuf.st_dev;
-#ifdef _OSE_
- pInfo->minor_device = 0;
-#else
pInfo->minor_device = statbuf.st_rdev;
-#endif
pInfo->inode = statbuf.st_ino;
pInfo->uid = statbuf.st_uid;
pInfo->gid = statbuf.st_gid;
@@ -989,11 +954,9 @@ efile_write_info(Efile_error *errInfo, Efile_info *pInfo, char *name)
* you don't try to chown a file to someone besides youself.
*/
-#ifndef _OSE_
if (chown(name, pInfo->uid, pInfo->gid) && errno != EPERM) {
return check_error(-1, errInfo);
}
-#endif
if (pInfo->mode != -1) {
mode_t newMode = pInfo->mode & (S_ISUID | S_ISGID |
@@ -1008,8 +971,6 @@ efile_write_info(Efile_error *errInfo, Efile_info *pInfo, char *name)
#endif /* !VXWORKS */
-#ifndef _OSE_
-
if (pInfo->accessTime.year != -1 && pInfo->modifyTime.year != -1) {
struct utimbuf tval;
struct tm timebuf;
@@ -1041,7 +1002,6 @@ efile_write_info(Efile_error *errInfo, Efile_info *pInfo, char *name)
return check_error(utime(name, &tval), errInfo);
#endif
}
-#endif /* !_OSE_ */
return 1;
}
@@ -1451,9 +1411,6 @@ efile_truncate_file(Efile_error* errInfo, int *fd, int flags)
int
efile_readlink(Efile_error* errInfo, char* name, char* buffer, size_t size)
{
-#ifdef _OSE_
- return ose_enotsup(errInfo);
-#else
#ifdef VXWORKS
return vxworks_enotsup(errInfo);
#else
@@ -1466,7 +1423,6 @@ efile_readlink(Efile_error* errInfo, char* name, char* buffer, size_t size)
buffer[len] = '\0';
return 1;
#endif
-#endif
}
int
@@ -1479,27 +1435,30 @@ efile_altname(Efile_error* errInfo, char* name, char* buffer, size_t size)
int
efile_link(Efile_error* errInfo, char* old, char* new)
{
-#ifdef _OSE_
- return ose_enotsup(errInfo);
-#else
#ifdef VXWORKS
return vxworks_enotsup(errInfo);
#else
return check_error(link(old, new), errInfo);
#endif
-#endif
}
int
efile_symlink(Efile_error* errInfo, char* old, char* new)
{
-#ifdef _OSE_
- return ose_enotsup(errInfo);
-#else
#ifdef VXWORKS
return vxworks_enotsup(errInfo);
#else
return check_error(symlink(old, new), errInfo);
#endif
+}
+
+int
+efile_fadvise(Efile_error* errInfo, int fd, Sint64 offset,
+ Sint64 length, int advise)
+{
+#ifdef HAVE_POSIX_FADVISE
+ return check_error(posix_fadvise(fd, offset, length, advise), errInfo);
+#else
+ return check_error(0, errInfo);
#endif
}
diff --git a/erts/emulator/drivers/win32/mem_drv.c b/erts/emulator/drivers/win32/mem_drv.c
deleted file mode 100644
index fa7c46eca8..0000000000
--- a/erts/emulator/drivers/win32/mem_drv.c
+++ /dev/null
@@ -1,141 +0,0 @@
-/*
- * %CopyrightBegin%
- *
- * Copyright Ericsson AB 1997-2009. All Rights Reserved.
- *
- * The contents of this file are subject to the Erlang Public License,
- * Version 1.1, (the "License"); you may not use this file except in
- * compliance with the License. You should have received a copy of the
- * Erlang Public License along with this software. If not, it can be
- * retrieved online at http://www.erlang.org/.
- *
- * Software distributed under the License is distributed on an "AS IS"
- * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
- * the License for the specific language governing rights and limitations
- * under the License.
- *
- * %CopyrightEnd%
- */
-/* Purpose: Access to elib memory statistics */
-
-#include "sys.h"
-#include "erl_driver.h"
-#include "elib_stat.h"
-
-#define MAP_BUF_SIZE 1000 /* Max map size */
-#define HISTO_BUF_SIZE 100 /* Max histogram buckets */
-
-static ErlDrvData mem_start(ErlDrvPort, char*);
-static int mem_init(void);
-static void mem_stop(ErlDrvData);
-static void mem_command(ErlDrvData);
-
-ErlDrvEntry mem_driver_entry = {
- mem_init,
- mem_start,
- mem_stop,
- mem_command,
- NULL,
- NULL,
- "mem_drv"
-};
-
-static int mem_init(void)
-{
- return 0;
-}
-
-static ErlDrvData mem_start(ErlDrvPort port, char* buf)
-{
- return (ErlDrvData)port;
-}
-
-static void mem_stop(ErlDrvData port)
-{
-}
-
-void putint32(p, v)
-byte* p; int v;
-{
- p[0] = (v >> 24) & 0xff;
- p[1] = (v >> 16) & 0xff;
- p[2] = (v >> 8) & 0xff;
- p[3] = (v) & 0xff;
-}
-
-int getint16(p)
-byte* p;
-{
- return (p[0] << 8) | p[1];
-}
-
-/*
-** Command:
-** m L1 L0 -> a heap map of length L1*256 + L0 is returned
-** s -> X3 X2 X1 X0 Y3 Y2 Y1 Y0 Z3 Z2 Z1 Z0
-** X == Total heap size bytes
-** Y == Total free bytes
-** Z == Size of largest free block in bytes
-**
-** h L1 L0 B0 -> Generate a logarithm histogram base B with L buckets
-** l L1 L0 S0 -> Generate a linear histogram with step S with L buckets
-*/
-unsigned char outbuf[HISTO_BUF_SIZE*2*4];
-
-static void mem_command(ErlDrvData port, char* buf, int count)
-{
- if ((count == 1) && buf[0] == 's') {
- struct elib_stat info;
- char v[3*4];
-
- elib_stat(&info);
-
- putint32(v, info.mem_total*4);
- putint32(v+4, info.mem_free*4);
- putint32(v+8, info.max_free*4);
- driver_output((ErlDrvPort)port, v, 12);
- return;
- }
- else if ((count == 3) && buf[0] == 'm') {
- char w[MAP_BUF_SIZE];
- int n = getint16(buf+1);
-
- if (n > MAP_BUF_SIZE)
- n = MAP_BUF_SIZE;
- elib_heap_map(w, n);
- driver_output((ErlDrvPort)port, w, n);
- return;
- }
- else if ((count == 4) && (buf[0] == 'h' || buf[0] == 'l')) {
- unsigned long vf[HISTO_BUF_SIZE];
- unsigned long va[HISTO_BUF_SIZE];
- int n = getint16(buf+1);
- int base = (unsigned char) buf[3];
-
- if (n >= HISTO_BUF_SIZE)
- n = HISTO_BUF_SIZE;
- if (buf[0] == 'l')
- base = -base;
- if (elib_histo(vf, va, n, base) < 0) {
- driver_failure((ErlDrvPort)port, -1);
- return;
- }
- else {
- char* p = outbuf;
- int i;
-
- for (i = 0; i < n; i++) {
- putint32(p, vf[i]);
- p += 4;
- }
- for (i = 0; i < n; i++) {
- putint32(p, va[i]);
- p += 4;
- }
- driver_output((ErlDrvPort)port, outbuf, n*8);
- }
- return;
- }
- driver_failure((ErlDrvPort)port, -1);
-}
-
diff --git a/erts/emulator/drivers/win32/win_efile.c b/erts/emulator/drivers/win32/win_efile.c
index 89aaad31da..04bd1139f5 100644
--- a/erts/emulator/drivers/win32/win_efile.c
+++ b/erts/emulator/drivers/win32/win_efile.c
@@ -1,19 +1,19 @@
/*
* %CopyrightBegin%
- *
- * Copyright Ericsson AB 1997-2009. All Rights Reserved.
- *
+ *
+ * Copyright Ericsson AB 1997-2010. All Rights Reserved.
+ *
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
* compliance with the License. You should have received a copy of the
* Erlang Public License along with this software. If not, it can be
* retrieved online at http://www.erlang.org/.
- *
+ *
* Software distributed under the License is distributed on an "AS IS"
* basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
* the License for the specific language governing rights and limitations
* under the License.
- *
+ *
* %CopyrightEnd%
*/
/*
@@ -689,7 +689,11 @@ Sint64* pSize; /* Where to store the size of the file. */
if (flags & EFILE_MODE_APPEND) {
crFlags = OPEN_ALWAYS;
}
- fd = CreateFile(name, access, FILE_SHARE_READ | FILE_SHARE_WRITE,
+ if (flags & EFILE_MODE_EXCL) {
+ crFlags = CREATE_NEW;
+ }
+ fd = CreateFile(name, access,
+ FILE_SHARE_READ | FILE_SHARE_WRITE | FILE_SHARE_DELETE,
NULL, crFlags, FILE_ATTRIBUTE_NORMAL, NULL);
/*
@@ -764,6 +768,15 @@ int fd; /* File descriptor for file to close. */
}
int
+efile_fdatasync(errInfo, fd)
+Efile_error* errInfo; /* Where to return error codes. */
+int fd; /* File descriptor for file to sync. */
+{
+ /* Not available in Windows, just call regular fsync */
+ return efile_fsync(errInfo, fd);
+}
+
+int
efile_fsync(errInfo, fd)
Efile_error* errInfo; /* Where to return error codes. */
int fd; /* File descriptor for file to sync. */
@@ -1424,3 +1437,12 @@ efile_symlink(Efile_error* errInfo, char* old, char* new)
errno = ENOTSUP;
return check_error(-1, errInfo);
}
+
+int
+efile_fadvise(Efile_error* errInfo, int fd, Sint64 offset,
+ Sint64 length, int advise)
+{
+ /* posix_fadvise is not available on Windows, do nothing */
+ errno = ERROR_SUCCESS;
+ return check_error(0, errInfo);
+}
diff --git a/erts/emulator/hipe/hipe_bif0.c b/erts/emulator/hipe/hipe_bif0.c
index b0abfd2310..2a877d8ace 100644
--- a/erts/emulator/hipe/hipe_bif0.c
+++ b/erts/emulator/hipe/hipe_bif0.c
@@ -440,9 +440,12 @@ BIF_RETTYPE hipe_bifs_alloc_data_2(BIF_ALIST_2)
align != sizeof(long) && align != sizeof(double)))
BIF_ERROR(BIF_P, BADARG);
nrbytes = unsigned_val(BIF_ARG_2);
+ if (nrbytes == 0)
+ BIF_RET(make_small(0));
block = erts_alloc(ERTS_ALC_T_HIPE, nrbytes);
if ((unsigned long)block & (align-1))
- fprintf(stderr, "Yikes! erts_alloc() returned misaligned address %p\r\n", block);
+ fprintf(stderr, "%s: erts_alloc(%lu) returned %p which is not %lu-byte aligned\r\n",
+ __FUNCTION__, (unsigned long)nrbytes, block, (unsigned long)align);
BIF_RET(address_to_term(block, BIF_P));
}
diff --git a/erts/emulator/hipe/hipe_gc.c b/erts/emulator/hipe/hipe_gc.c
index e57e293547..6c9e1d9ba7 100644
--- a/erts/emulator/hipe/hipe_gc.c
+++ b/erts/emulator/hipe/hipe_gc.c
@@ -1,19 +1,19 @@
/*
* %CopyrightBegin%
- *
- * Copyright Ericsson AB 2004-2009. All Rights Reserved.
- *
+ *
+ * Copyright Ericsson AB 2004-2010. All Rights Reserved.
+ *
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
* compliance with the License. You should have received a copy of the
* Erlang Public License along with this software. If not, it can be
* retrieved online at http://www.erlang.org/.
- *
+ *
* Software distributed under the License is distributed on an "AS IS"
* basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
* the License for the specific language governing rights and limitations
* under the License.
- *
+ *
* %CopyrightEnd%
*/
/* $Id$
@@ -86,7 +86,7 @@ Eterm *fullsweep_nstack(Process *p, Eterm *n_htop)
if (is_boxed(gval)) {
Eterm *ptr = boxed_val(gval);
Eterm val = *ptr;
- if (IS_MOVED(val)) {
+ if (IS_MOVED_BOXED(val)) {
ASSERT(is_boxed(val));
*nsp_i = val;
} else if (in_area(ptr, src, src_size) ||
@@ -96,7 +96,7 @@ Eterm *fullsweep_nstack(Process *p, Eterm *n_htop)
} else if (is_list(gval)) {
Eterm *ptr = list_val(gval);
Eterm val = *ptr;
- if (is_non_value(val)) {
+ if (IS_MOVED_CONS(val)) {
*nsp_i = ptr[1];
} else if (in_area(ptr, src, src_size) ||
in_area(ptr, oh, oh_size)) {
@@ -193,7 +193,7 @@ void gensweep_nstack(Process *p, Eterm **ptr_old_htop, Eterm **ptr_n_htop)
if (is_boxed(gval)) {
Eterm *ptr = boxed_val(gval);
Eterm val = *ptr;
- if (IS_MOVED(val)) {
+ if (IS_MOVED_BOXED(val)) {
ASSERT(is_boxed(val));
*nsp_i = val;
} else if (in_area(ptr, heap, mature_size)) {
@@ -205,7 +205,7 @@ void gensweep_nstack(Process *p, Eterm **ptr_old_htop, Eterm **ptr_n_htop)
} else if (is_list(gval)) {
Eterm *ptr = list_val(gval);
Eterm val = *ptr;
- if (is_non_value(val)) {
+ if (IS_MOVED_CONS(val)) {
*nsp_i = ptr[1];
} else if (in_area(ptr, heap, mature_size)) {
MOVE_CONS(ptr, val, old_htop, nsp_i);
diff --git a/erts/emulator/hipe/hipe_mkliterals.c b/erts/emulator/hipe/hipe_mkliterals.c
index a77aec7919..900dfc5a8a 100644
--- a/erts/emulator/hipe/hipe_mkliterals.c
+++ b/erts/emulator/hipe/hipe_mkliterals.c
@@ -261,7 +261,7 @@ static const struct literal {
/* Field offsets in a process struct */
{ "P_HP", offsetof(struct process, htop) },
{ "P_HP_LIMIT", offsetof(struct process, stop) },
- { "P_OFF_HEAP_MSO", offsetof(struct process, off_heap.mso) },
+ { "P_OFF_HEAP_FIRST", offsetof(struct process, off_heap.first) },
{ "P_MBUF", offsetof(struct process, mbuf) },
{ "P_ID", offsetof(struct process, id) },
{ "P_FLAGS", offsetof(struct process, flags) },
@@ -456,7 +456,7 @@ static const struct rts_param {
} rts_params[] = {
{ 1, "P_OFF_HEAP_FUNS",
#if !defined(HYBRID)
- 1, offsetof(struct process, off_heap.funs)
+ 1, offsetof(struct process, off_heap.first)
#endif
},
diff --git a/erts/emulator/obsolete/driver.h b/erts/emulator/obsolete/driver.h
deleted file mode 100644
index 708fe68e1a..0000000000
--- a/erts/emulator/obsolete/driver.h
+++ /dev/null
@@ -1,263 +0,0 @@
-/*
- * %CopyrightBegin%
- *
- * Copyright Ericsson AB 1996-2009. All Rights Reserved.
- *
- * The contents of this file are subject to the Erlang Public License,
- * Version 1.1, (the "License"); you may not use this file except in
- * compliance with the License. You should have received a copy of the
- * Erlang Public License along with this software. If not, it can be
- * retrieved online at http://www.erlang.org/.
- *
- * Software distributed under the License is distributed on an "AS IS"
- * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
- * the License for the specific language governing rights and limitations
- * under the License.
- *
- * %CopyrightEnd%
- */
-/*
- * OLD, OBSOLETE include file for erlang driver writers.
- * New drivers should use erl_driver.h instead.
- */
-
-#ifndef __DRIVER_H__
-#define __DRIVER_H__
-
-#include <stdlib.h>
-#include "driver_int.h"
-
-#undef _ANSI_ARGS_
-#undef CONST
-
-#if ((defined(__STDC__) || defined(SABER)) && !defined(NO_PROTOTYPE)) || defined(__cplusplus) || defined(USE_PROTOTYPE)
-# define _USING_PROTOTYPES_ 1
-# define _ANSI_ARGS_(x) x
-# define CONST const
-#else
-# define _ANSI_ARGS_(x) ()
-# define CONST
-#endif
-
-#ifdef __cplusplus
-# define EXTERN extern "C"
-#else
-# define EXTERN extern
-#endif
-
-/* Values for mode arg to driver_select() */
-
-#define DO_READ (1 << 0)
-#define DO_WRITE (1 << 1)
-
-/* Flags for set_port_control_flags() */
-#define PORT_CONTROL_FLAG_BINARY 1
-#define PORT_CONTROL_FLAG_HEAVY 2
-
-/* This macro is used to name a dynamic driver's init function in */
-/* a way that doesn't lead to conflicts. This is crucial when using */
-/* operating systems that has one namespace for all symbols */
-/* (e.g. VxWorks). Example: if you have an dynamic driver C source */
-/* file named echo_drv.c, you use the macro like this: */
-/* int DRIVER_INIT(echo_drv)(void *handle) */
-#if defined(VXWORKS)
-# define DRIVER_INIT(DRIVER_NAME) DRIVER_NAME ## _init
-#elif defined(__WIN32__)
-# define DRIVER_INIT(DRIVER_NAME) __declspec(dllexport) driver_init
-#else
-# define DRIVER_INIT(DRIVER_NAME) driver_init
-#endif
-
-typedef int (*F_PTR)(); /* a function pointer */
-typedef long (*L_PTR)(); /* pointer to a function returning long */
-
-extern int null_func();
-
-/* This structure MUST match Binary in global.h exactly!!! */
-typedef struct driver_binary {
- int orig_size; /* total length of binary */
- char orig_bytes[1]; /* the data (char instead of byte!) */
-} DriverBinary;
-
-typedef struct {
- int vsize; /* length of vectors */
- int size; /* total size in bytes */
- SysIOVec* iov;
- DriverBinary** binv;
-} ErlIOVec;
-
-/*
- * OLD, OBSOLETE driver entry structure.
- */
-
-typedef struct driver_entry {
- F_PTR init; /* called at system start up (no args) */
- L_PTR start; /* called when some one does an open_port
- args: port, command (nul-terminated),
- additional/alternate args for fd/vanilla/spawn driver.
- return value -1 means failure, other
- is saved and passed to the other funcs */
- F_PTR stop; /* called when port is closed, and when the
- emulator is halted - arg: start_return */
- F_PTR output; /* called when we have output from erlang to the port
- args: start_return, buf, buflen */
- F_PTR ready_input; /* called when we have input from one of the driver's
- file descriptors - args: start_return, fd */
- F_PTR ready_output; /* called when output is possible to one of the driver's
- file descriptors - args: start_return, fd */
- char *driver_name; /* name supplied as {driver,Name,Args} to open_port */
-
- F_PTR finish; /* called before unloading (DYNAMIC DRIVERS ONLY) */
- void *handle; /* file handle (DYNAMIC DRIVERS ONLY) */
- F_PTR control; /* "ioctl" for drivers (invoked by port_command/3) */
- F_PTR timeout; /* Reserved */
- F_PTR outputv; /* Reserved */
- F_PTR ready_async; /* Completion routine for driver_async */
- F_PTR padding1[3]; /* pad to match size of modern driver struct */
- int padding2[4]; /* more pad */
- F_PTR padding3[3]; /* even more padding */
-} DriverEntry;
-
-
-/* These are the kernel functions available for driver writers */
-
-EXTERN int driver_select _ANSI_ARGS_((int,int,int,int));
-
-EXTERN int driver_output _ANSI_ARGS_((int, char*, int));
-EXTERN int driver_output2 _ANSI_ARGS_((int, char*, int, char*, int));
-EXTERN int driver_output_binary _ANSI_ARGS_((int, char*, int,
- DriverBinary*, int, int));
-EXTERN int driver_outputv _ANSI_ARGS_((int, char*,int,ErlIOVec*,int));
-
-EXTERN int driver_vec_to_buf _ANSI_ARGS_((ErlIOVec*, char*, int));
-
-EXTERN int driver_set_timer _ANSI_ARGS_((int, unsigned long));
-EXTERN int driver_cancel_timer _ANSI_ARGS_((int));
-
-/*
- * The following functions are used to initiate a close of a port
- * from a driver.
- */
-EXTERN int driver_failure_eof _ANSI_ARGS_((int));
-EXTERN int driver_failure_atom _ANSI_ARGS_((int, char *));
-EXTERN int driver_failure_posix _ANSI_ARGS_((int, int));
-EXTERN int driver_failure _ANSI_ARGS_((int, int));
-EXTERN int driver_exit _ANSI_ARGS_ ((int, int));
-
-EXTERN char* erl_errno_id _ANSI_ARGS_((int error));
-EXTERN void set_busy_port _ANSI_ARGS_((int, int));
-EXTERN void add_driver_entry _ANSI_ARGS_((DriverEntry *));
-EXTERN int remove_driver_entry _ANSI_ARGS_((DriverEntry *));
-EXTERN void set_port_control_flags _ANSI_ARGS_((int, int));
-
-/* Binary interface */
-/* NOTE: DO NOT overwrite a binary with new data (if the data is delivered);
-** since the binary is a shared object it MUST be written once.
-*/
-
-EXTERN DriverBinary* driver_alloc_binary _ANSI_ARGS_((int));
-EXTERN DriverBinary* driver_realloc_binary _ANSI_ARGS_((DriverBinary*, int));
-EXTERN void driver_free_binary _ANSI_ARGS_((DriverBinary*));
-
-
-/* Queue interface */
-EXTERN int driver_enqv _ANSI_ARGS_((int, ErlIOVec*, int));
-EXTERN int driver_pushqv _ANSI_ARGS_((int, ErlIOVec*, int));
-EXTERN int driver_deq _ANSI_ARGS_((int, int));
-EXTERN SysIOVec* driver_peekq _ANSI_ARGS_((int, int*));
-EXTERN int driver_sizeq _ANSI_ARGS_((int));
-EXTERN int driver_enq_bin _ANSI_ARGS_((int, DriverBinary*, int, int));
-EXTERN int driver_enq _ANSI_ARGS_((int, char*, int));
-EXTERN int driver_pushq_bin _ANSI_ARGS_((int, DriverBinary*, int, int));
-EXTERN int driver_pushq _ANSI_ARGS_((int, char*, int));
-
-/* Memory management */
-EXTERN void *driver_alloc _ANSI_ARGS_((size_t));
-EXTERN void *driver_realloc _ANSI_ARGS_((void*, size_t));
-EXTERN void driver_free _ANSI_ARGS_((void*));
-
-/* Shared / dynamic link libraries */
-EXTERN void *driver_dl_open _ANSI_ARGS_((char *));
-EXTERN void *driver_dl_sym _ANSI_ARGS_((void *, char *));
-EXTERN int driver_dl_close _ANSI_ARGS_((void *));
-EXTERN char *driver_dl_error _ANSI_ARGS_((void));
-
-/* Async IO functions */
-EXTERN long driver_async _ANSI_ARGS_((int,
- unsigned int*,
- void (*)(void*),
- void *,
- void (*)(void*)));
-EXTERN int driver_async_cancel _ANSI_ARGS_((long));
-
-EXTERN int driver_lock_driver _ANSI_ARGS_((int));
-
-/* Threads */
-typedef void* erl_mutex_t;
-typedef void* erl_cond_t;
-typedef void* erl_thread_t;
-
-EXTERN erl_mutex_t erts_mutex_create _ANSI_ARGS_((void));
-EXTERN int erts_mutex_destroy _ANSI_ARGS_((erl_mutex_t));
-EXTERN int erts_mutex_lock _ANSI_ARGS_((erl_mutex_t));
-EXTERN int erts_mutex_unlock _ANSI_ARGS_((erl_mutex_t));
-
-EXTERN erl_cond_t erts_cond_create _ANSI_ARGS_((void));
-EXTERN int erts_cond_destroy _ANSI_ARGS_((erl_cond_t));
-EXTERN int erts_cond_signal _ANSI_ARGS_((erl_cond_t));
-EXTERN int erts_cond_broadcast _ANSI_ARGS_((erl_cond_t));
-EXTERN int erts_cond_wait _ANSI_ARGS_((erl_cond_t, erl_mutex_t));
-EXTERN int erts_cond_timedwait _ANSI_ARGS_((erl_cond_t, erl_mutex_t, long));
-
-EXTERN int erts_thread_create _ANSI_ARGS_((erl_thread_t*,
- void* (*func)(void*),
- void* arg,
- int detached));
-EXTERN erl_thread_t erts_thread_self _ANSI_ARGS_((void));
-EXTERN void erts_thread_exit _ANSI_ARGS_((void*));
-EXTERN int erts_thread_join _ANSI_ARGS_((erl_thread_t, void**));
-EXTERN int erts_thread_kill _ANSI_ARGS_((erl_thread_t));
-
-
-typedef unsigned long DriverTermData;
-
-#define TERM_DATA(x) ((DriverTermData) (x))
-
-/* Possible types to send from driver Argument type */
-#define ERL_DRV_NIL ((DriverTermData) 1) /* None */
-#define ERL_DRV_ATOM ((DriverTermData) 2) /* driver_mk_atom(string) */
-#define ERL_DRV_INT ((DriverTermData) 3) /* int */
-#define ERL_DRV_PORT ((DriverTermData) 4) /* driver_mk_port(ix) */
-#define ERL_DRV_BINARY ((DriverTermData) 5) /* ErlDriverBinary*, int */
-#define ERL_DRV_STRING ((DriverTermData) 6) /* char*, int */
-#define ERL_DRV_TUPLE ((DriverTermData) 7) /* int */
-#define ERL_DRV_LIST ((DriverTermData) 8) /* int */
-#define ERL_DRV_STRING_CONS ((DriverTermData) 9) /* char*, int */
-#define ERL_DRV_PID ((DriverTermData) 10) /* driver_connected,... */
-
-/* DriverTermData is the type to use for casts when building
- * terms that should be sent to connected process,
- * for instance a tuple on the form {tcp, Port, [Tag|Binary]}
- *
- * DriverTermData spec[] = {
- * ERL_DRV_ATOM, driver_mk_atom("tcp"),
- * ERL_DRV_PORT, driver_mk_port(drv->ix),
- * ERL_DRV_INT, REPLY_TAG,
- * ERL_DRV_BIN, 50, TERM_DATA(buffer),
- * ERL_DRV_LIST, 2,
- * ERL_DRV_TUPLE, 3,
- * }
- *
- */
-
-EXTERN DriverTermData driver_mk_atom _ANSI_ARGS_ ((char*));
-EXTERN DriverTermData driver_mk_port _ANSI_ARGS_ ((int));
-EXTERN DriverTermData driver_connected _ANSI_ARGS_((int));
-EXTERN DriverTermData driver_caller _ANSI_ARGS_((int));
-
-EXTERN int driver_output_term _ANSI_ARGS_((int, DriverTermData *, int));
-EXTERN int driver_send_term _ANSI_ARGS_((int, DriverTermData, DriverTermData *, int));
-
-#endif
-
-
diff --git a/erts/emulator/pcre/pcre_compile.c b/erts/emulator/pcre/pcre_compile.c
index 29743362d4..9508c5a697 100644
--- a/erts/emulator/pcre/pcre_compile.c
+++ b/erts/emulator/pcre/pcre_compile.c
@@ -92,6 +92,11 @@ is 4 there is plenty of room. */
#define COMPILE_WORK_SIZE (4096)
+/* The overrun tests check for a slightly smaller size so that they detect the
+overrun before it actually does run off the end of the data block. */
+
+#define WORK_SIZE_CHECK (COMPILE_WORK_SIZE - 100)
+
/* Table for handling escaped characters in the range '0'-'z'. Positive returns
are simple data values; negative values are for special things like \d and so
@@ -2445,7 +2450,7 @@ for (;; ptr++)
#ifdef DEBUG
if (code > cd->hwm) cd->hwm = code; /* High water info */
#endif
- if (code > cd->start_workspace + COMPILE_WORK_SIZE) /* Check for overrun */
+ if (code > cd->start_workspace + WORK_SIZE_CHECK) /* Check for overrun */
{
*errorcodeptr = ERR52;
goto FAILED;
@@ -2494,7 +2499,7 @@ for (;; ptr++)
/* In the real compile phase, just check the workspace used by the forward
reference list. */
- else if (cd->hwm > cd->start_workspace + COMPILE_WORK_SIZE)
+ else if (cd->hwm > cd->start_workspace + WORK_SIZE_CHECK)
{
*errorcodeptr = ERR52;
goto FAILED;
diff --git a/erts/emulator/pcre/pcre_exec.c b/erts/emulator/pcre/pcre_exec.c
index 51625130c3..3fe13ca32e 100644
--- a/erts/emulator/pcre/pcre_exec.c
+++ b/erts/emulator/pcre/pcre_exec.c
@@ -5191,7 +5191,6 @@ for(;;)
EDEBUGF(("Loop limit break detected"));
return PCRE_ERROR_LOOP_LIMIT;
RESTART_INTERRUPTED:
- md->match_call_count = 0;
md->loop_limit = extra_data->loop_limit;
rc = match(NULL,NULL,NULL,0,md,0,NULL,0,0);
*extra_data->loop_counter_return =
diff --git a/erts/emulator/sys/common/erl_mseg.c b/erts/emulator/sys/common/erl_mseg.c
index 5dfd66bd7c..1c4c37b01a 100644
--- a/erts/emulator/sys/common/erl_mseg.c
+++ b/erts/emulator/sys/common/erl_mseg.c
@@ -83,10 +83,20 @@ static int is_cache_check_scheduled;
static int is_cache_check_requested;
#endif
+#if HALFWORD_HEAP
+static int initialize_pmmap(void);
+static void *pmmap(size_t size);
+static int pmunmap(void *p, size_t size);
+static void *pmremap(void *old_address, size_t old_size,
+ size_t new_size);
+#endif
+
#if HAVE_MMAP
/* Mmap ... */
#define MMAP_PROT (PROT_READ|PROT_WRITE)
+
+
#ifdef MAP_ANON
# define MMAP_FLAGS (MAP_ANON|MAP_PRIVATE)
# define MMAP_FD (-1)
@@ -102,7 +112,11 @@ static int mmap_fd;
# define HAVE_MSEG_RECREATE 0
#endif
+#if HALFWORD_HEAP
+#define CAN_PARTLY_DESTROY 0
+#else
#define CAN_PARTLY_DESTROY 1
+#endif
#else /* #if HAVE_MMAP */
#define CAN_PARTLY_DESTROY 0
#error "Not supported"
@@ -232,6 +246,7 @@ static void thread_safe_init(void)
{
erts_mtx_init(&init_atoms_mutex, "mseg_init_atoms");
erts_mtx_init(&mseg_mutex, "mseg");
+
#ifdef ERTS_THREADS_NO_SMP
main_tid = erts_thr_self();
#endif
@@ -302,10 +317,20 @@ mseg_create(Uint size)
#if defined(ERTS_MSEG_FAKE_SEGMENTS)
seg = erts_sys_alloc(ERTS_ALC_N_INVALID, NULL, size);
#elif HAVE_MMAP
+#if HALFWORD_HEAP
+ seg = pmmap(size);
+#else
seg = (void *) mmap((void *) 0, (size_t) size,
MMAP_PROT, MMAP_FLAGS, MMAP_FD, 0);
if (seg == (void *) MAP_FAILED)
seg = NULL;
+#endif
+#if HALFWORD_HEAP
+ if ((unsigned long) seg & CHECK_POINTER_MASK) {
+ erts_fprintf(stderr,"Pointer mask failure (0x%08lx)\n",(unsigned long) seg);
+ return NULL;
+ }
+#endif
#else
#error "Missing mseg_create() implementation"
#endif
@@ -325,9 +350,11 @@ mseg_destroy(void *seg, Uint size)
#ifdef DEBUG
int res =
#endif
-
+#if HALFWORD_HEAP
+ pmunmap((void *) seg, size);
+#else
munmap((void *) seg, size);
-
+#endif
ASSERT(size % page_size == 0);
ASSERT(res == 0);
#else
@@ -351,12 +378,18 @@ mseg_recreate(void *old_seg, Uint old_size, Uint new_size)
#if defined(ERTS_MSEG_FAKE_SEGMENTS)
new_seg = erts_sys_realloc(ERTS_ALC_N_INVALID, NULL, old_seg, new_size);
#elif HAVE_MREMAP
+#if HALFWORD_HEAP
+ new_seg = (void *) pmremap((void *) old_seg,
+ (size_t) old_size,
+ (size_t) new_size);
+#else
new_seg = (void *) mremap((void *) old_seg,
(size_t) old_size,
(size_t) new_size,
MREMAP_MAYMOVE);
if (new_seg == (void *) MAP_FAILED)
new_seg = NULL;
+#endif
#else
#error "Missing mseg_recreate() implementation"
#endif
@@ -1324,6 +1357,10 @@ erts_mseg_init(ErtsMsegInit_t *init)
erl_exit(ERTS_ABORT_EXIT, "erts_mseg: unable to open /dev/zero\n");
#endif
+#if HAVE_MMAP && HALFWORD_HEAP
+ initialize_pmmap();
+#endif
+
page_size = GET_PAGE_SIZE;
page_shift = 1;
@@ -1446,3 +1483,419 @@ erts_mseg_test(unsigned long op,
}
+#if HALFWORD_HEAP
+/*
+ * Very simple page oriented mmap replacer. Works in the lower
+ * 32 bit address range of a 64bit program.
+ * Implements anonymous mmap mremap and munmap with address order first fit.
+ * The free list is expected to be very short...
+ * To be used for compressed pointers in Erlang halfword emulator
+ * implementation. The MacOS X version is more of a toy, it's not really
+ * for production as the halfword erlang VM relies on Linux specific memory
+ * mapping tricks.
+ */
+
+/*#define HARDDEBUG 1*/
+
+#ifdef __APPLE__
+#define MAP_ANONYMOUS MAP_ANON
+#endif
+
+#define INIT_LOCK() do {erts_mtx_init(&pmmap_mutex, "pmmap");} while(0)
+
+#define TAKE_LOCK() do {erts_mtx_lock(&pmmap_mutex);} while(0)
+
+#define RELEASE_LOCK() do {erts_mtx_unlock(&pmmap_mutex);} while(0)
+
+static erts_mtx_t pmmap_mutex; /* Also needed when !USE_THREADS */
+
+typedef struct _free_block {
+ unsigned long num; /*pages*/
+ struct _free_block *next;
+} FreeBlock;
+
+/* Assigned once and for all */
+static size_t pagsz;
+
+/* Protect with lock */
+static FreeBlock *first;
+
+static size_t round_up_to_pagesize(size_t size)
+{
+ size_t x = size / pagsz;
+
+ if ((size % pagsz)) {
+ ++x;
+ }
+
+ return pagsz * x;
+}
+
+static size_t round_down_to_pagesize(size_t size)
+{
+ size_t x = size / pagsz;
+
+ return pagsz * x;
+}
+
+static void *do_map(void *ptr, size_t sz)
+{
+ void *res;
+
+ if (round_up_to_pagesize(sz) != sz) {
+#ifdef HARDDEBUG
+ fprintf(stderr,"Mapping of address %p with size %ld "
+ "does not map complete pages\r\n",
+ (void *) ptr, (unsigned long) sz);
+#endif
+ return NULL;
+ }
+
+ if (((unsigned long) ptr) % pagsz) {
+#ifdef HARDDEBUG
+ fprintf(stderr,"Mapping of address %p with size %ld "
+ "is not page aligned\r\n",
+ (void *) ptr, (unsigned long) sz);
+#endif
+ return NULL;
+ }
+
+
+ res = mmap(ptr, sz,
+ PROT_READ | PROT_WRITE, MAP_PRIVATE |
+ MAP_ANONYMOUS | MAP_FIXED,
+ -1 , 0);
+
+ if (res == MAP_FAILED) {
+#ifdef HARDDEBUG
+ fprintf(stderr,"Mapping of address %p with size %ld failed!\r\n",
+ (void *) ptr, (unsigned long) sz);
+#endif
+ return NULL;
+ }
+
+ return res;
+}
+
+static int do_unmap(void *ptr, size_t sz)
+{
+ void *res;
+
+ if (round_up_to_pagesize(sz) != sz) {
+#ifdef HARDDEBUG
+ fprintf(stderr,"Mapping of address %p with size %ld "
+ "does not map complete pages\r\n",
+ (void *) ptr, (unsigned long) sz);
+#endif
+ return 1;
+ }
+
+ if (((unsigned long) ptr) % pagsz) {
+#ifdef HARDDEBUG
+ fprintf(stderr,"Mapping of address %p with size %ld "
+ "is not page aligned\r\n",
+ (void *) ptr, (unsigned long) sz);
+#endif
+ return 1;
+ }
+
+
+ res = mmap(ptr, sz,
+ PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE
+ | MAP_FIXED,
+ -1 , 0);
+
+ if (res == MAP_FAILED) {
+#ifdef HARDDEBUG
+ fprintf(stderr,"Mapping of address %p with size %ld failed!\r\n",
+ (void *) ptr, (unsigned long) sz);
+#endif
+ return 1;
+ }
+
+ return 0;
+}
+
+#ifdef __APPLE__
+/*
+ * The first 4 gig's are protected on Macos X for 64bit processes :(
+ * The range 0x1000000000 - 0x10FFFFFFFF is selected as an arbitrary
+ * value of a normally unused range... Real MMAP's will avoid
+ * it and all 32bit compressed pointers can be in that range...
+ * More expensive than on Linux where expansion of compressed
+ * poiters involves no masking (as they are in the first 4 gig's).
+ * It's also very uncertain if the MAP_NORESERVE flag really has
+ * any effect in MacOS X. Swap space may always be allocated...
+ */
+#define SET_RANGE_MIN() /* nothing */
+#define RANGE_MIN 0x1000000000UL
+#define RANGE_MAX 0x1100000000UL
+#define RANGE_MASK (RANGE_MIN)
+#define EXTRA_MAP_FLAGS (MAP_FIXED)
+#else
+static size_t range_min;
+#define SET_RANGE_MIN() do { range_min = (size_t) sbrk(0); } while (0)
+#define RANGE_MIN range_min
+#define RANGE_MAX 0x100000000UL
+#define RANGE_MASK 0UL
+#define EXTRA_MAP_FLAGS (0)
+#endif
+
+static int initialize_pmmap(void)
+{
+ char *p,*q,*rptr;
+ size_t rsz;
+ FreeBlock *initial;
+
+
+ pagsz = getpagesize();
+ SET_RANGE_MIN();
+ if (sizeof(void *) != 8) {
+ erl_exit(1,"Halfword emulator cannot be run in 32bit mode");
+ }
+
+ p = (char *) RANGE_MIN;
+ q = (char *) RANGE_MAX;
+
+ rsz = round_down_to_pagesize(q - p);
+
+ rptr = mmap((void *) p, rsz,
+ PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS |
+ MAP_NORESERVE | EXTRA_MAP_FLAGS,
+ -1 , 0);
+#ifdef HARDDEBUG
+ printf("rsz = %ld, pages = %ld, rptr = %p\r\n",
+ (unsigned long) rsz, (unsigned long) (rsz / pagsz),
+ (void *) rptr);
+#endif
+ if (!do_map(rptr,pagsz)) {
+ erl_exit(1,"Could not actually mmap first page for halfword emulator...\n");
+ }
+ initial = (FreeBlock *) rptr;
+ initial->num = (rsz / pagsz);
+ initial->next = NULL;
+ first = initial;
+ INIT_LOCK();
+ return 0;
+}
+
+#ifdef HARDDEBUG
+static void dump_freelist(void)
+{
+ FreeBlock *p = first;
+
+ while (p) {
+ printf("p = %p\r\np->num = %ld\r\np->next = %p\r\n\r\n",
+ (void *) p, (unsigned long) p->num, (void *) p->next);
+ p = p->next;
+ }
+}
+#endif
+
+
+static void *pmmap(size_t size)
+{
+ size_t real_size = round_up_to_pagesize(size);
+ size_t num_pages = real_size / pagsz;
+ FreeBlock **block;
+ FreeBlock *tail;
+ FreeBlock *res;
+ TAKE_LOCK();
+ for (block = &first;
+ *block != NULL && (*block)->num < num_pages;
+ block = &((*block)->next))
+ ;
+ if (!(*block)) {
+ RELEASE_LOCK();
+ return NULL;
+ }
+ if ((*block)->num == num_pages) {
+ /* nice, perfect fit */
+ res = *block;
+ *block = (*block)->next;
+ } else {
+ tail = (FreeBlock *) (((char *) ((void *) (*block))) + real_size);
+ if (!do_map(tail,pagsz)) {
+#ifdef HARDDEBUG
+ fprintf(stderr, "Could not actually allocate page at %p...\r\n",
+ (void *) tail);
+#endif
+ RELEASE_LOCK();
+ return NULL;
+ }
+ tail->num = (*block)->num - num_pages;
+ tail->next = (*block)->next;
+ res = *block;
+ *block = tail;
+ }
+ RELEASE_LOCK();
+ if (!do_map(res,real_size)) {
+#ifdef HARDDEBUG
+ fprintf(stderr, "Could not actually allocate %ld at %p...\r\n",
+ (unsigned long) real_size, (void *) res);
+#endif
+ return NULL;
+ }
+
+ return (void *) res;
+}
+
+static int pmunmap(void *p, size_t size)
+{
+ size_t real_size = round_up_to_pagesize(size);
+ size_t num_pages = real_size / pagsz;
+ FreeBlock *block;
+ FreeBlock *last;
+ FreeBlock *nb = (FreeBlock *) p;
+
+ if (real_size > pagsz) {
+ if (do_unmap(((char *) p) + pagsz,real_size - pagsz)) {
+ return 1;
+ }
+ }
+
+ TAKE_LOCK();
+
+ last = NULL;
+ block = first;
+ while(block != NULL && ((void *) block) < p) {
+ last = block;
+ block = block->next;
+ }
+
+ if (block != NULL &&
+ ((void *) block) == ((void *) (((char *) p) + real_size))) {
+ /* Merge new free block with following */
+ nb->num = block->num + num_pages;
+ nb->next = block->next;
+ if (do_unmap(block,pagsz)) {
+ RELEASE_LOCK();
+ return 1;
+ }
+ } else {
+ /* just link in */
+ nb->num = num_pages;
+ nb->next = block;
+ }
+ if (last != NULL) {
+ if (p == ((void *) (((char *) last) + (last->num * pagsz)))) {
+ /* Merge with previous */
+ last->num += nb->num;
+ last->next = nb->next;
+ if (do_unmap(nb,pagsz)) {
+ RELEASE_LOCK();
+ return 1;
+ }
+ } else {
+ last->next = nb;
+ }
+ } else {
+ first = nb;
+ }
+ RELEASE_LOCK();
+ return 0;
+}
+
+static void *pmremap(void *old_address, size_t old_size,
+ size_t new_size)
+{
+ size_t new_real_size = round_up_to_pagesize(new_size);
+ size_t new_num_pages = new_real_size / pagsz;
+ size_t old_real_size = round_up_to_pagesize(old_size);
+ size_t old_num_pages = old_real_size / pagsz;
+ if (new_num_pages == old_num_pages) {
+ return old_address;
+ } else if (new_num_pages < old_num_pages) { /* Shrink */
+ size_t nfb_pages = old_num_pages - new_num_pages;
+ size_t nfb_real_size = old_real_size - new_real_size;
+ void *vnfb = (void *) (((char *)old_address) + new_real_size);
+ FreeBlock *nfb = (FreeBlock *) vnfb;
+ FreeBlock **block;
+ TAKE_LOCK();
+ for (block = &first;
+ *block != NULL && (*block) < nfb;
+ block = &((*block)->next))
+ ;
+ if (!(*block) ||
+ (*block) > ((FreeBlock *)(((char *) vnfb) + nfb_real_size))) {
+ /* Normal link in */
+ if (nfb_pages > 1) {
+ if (do_unmap((void *)(((char *) vnfb) + pagsz),
+ (nfb_pages - 1)*pagsz)) {
+ return NULL;
+ }
+ }
+ nfb->next = (*block);
+ nfb->num = nfb_pages;
+ (*block) = nfb;
+ } else { /* block merge */
+ nfb->next = (*block)->next;
+ nfb->num = nfb_pages + (*block)->num;
+ /* unmap also the first page of the next freeblock */
+ (*block) = nfb;
+ if (do_unmap((void *)(((char *) vnfb) + pagsz),
+ nfb_pages*pagsz)) {
+ return NULL;
+ }
+ }
+ RELEASE_LOCK();
+ return old_address;
+ } else { /* Enlarge */
+ FreeBlock **block;
+ void *old_end = (void *) (((char *)old_address) + old_real_size);
+ TAKE_LOCK();
+ for (block = &first;
+ *block != NULL && (*block) < (FreeBlock *) old_address;
+ block = &((*block)->next))
+ ;
+ if ((*block) == NULL || old_end > ((void *) RANGE_MAX) ||
+ (*block) != old_end ||
+ (*block)->num < (new_num_pages - old_num_pages)) {
+ /* cannot extend */
+ void *result;
+ RELEASE_LOCK();
+ result = pmmap(new_size);
+ if (result == NULL) {
+ return NULL;
+ }
+ memcpy(result,old_address,old_size);
+ if (pmunmap(old_address,old_size)) {
+ /* Oups... */
+ pmunmap(result,new_size);
+ return NULL;
+ }
+ return result;
+ } else { /* extend */
+ size_t remaining_pages = (*block)->num -
+ (new_num_pages - old_num_pages);
+ if (!remaining_pages) {
+ void *p = (void *) (((char *) (*block)) + pagsz);
+ void *n = (*block)->next;
+ size_t x = ((*block)->num - 1) * pagsz;
+ if (x > 0) {
+ if (do_map(p,x) == NULL) {
+ RELEASE_LOCK();
+ return NULL;
+ }
+ }
+ (*block) = n;
+ } else {
+ FreeBlock *nfb = (FreeBlock *) ((void *)
+ (((char *) old_address) +
+ new_real_size));
+ void *p = (void *) (((char *) (*block)) + pagsz);
+ if (do_map(p,new_real_size - old_real_size) == NULL) {
+ RELEASE_LOCK();
+ return NULL;
+ }
+ nfb->num = remaining_pages;
+ nfb->next = (*block)->next;
+ (*block) = nfb;
+ }
+ RELEASE_LOCK();
+ return old_address;
+ }
+ }
+}
+
+#endif /* HALFWORD_HEAP */
diff --git a/erts/emulator/sys/common/erl_mseg.h b/erts/emulator/sys/common/erl_mseg.h
index 1c5aa63e90..d8053eb0d9 100644
--- a/erts/emulator/sys/common/erl_mseg.h
+++ b/erts/emulator/sys/common/erl_mseg.h
@@ -1,19 +1,19 @@
/*
* %CopyrightBegin%
- *
- * Copyright Ericsson AB 2002-2009. All Rights Reserved.
- *
+ *
+ * Copyright Ericsson AB 2002-2010. All Rights Reserved.
+ *
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
* compliance with the License. You should have received a copy of the
* Erlang Public License along with this software. If not, it can be
* retrieved online at http://www.erlang.org/.
- *
+ *
* Software distributed under the License is distributed on an "AS IS"
* basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
* the License for the specific language governing rights and limitations
* under the License.
- *
+ *
* %CopyrightEnd%
*/
@@ -58,8 +58,8 @@ typedef struct {
typedef struct {
int cache;
int preserv;
- Uint abs_shrink_th;
- Uint rel_shrink_th;
+ UWord abs_shrink_th;
+ UWord rel_shrink_th;
} ErtsMsegOpt_t;
#define ERTS_MSEG_DEFAULT_OPT_INITIALIZER \
diff --git a/erts/emulator/sys/common/erl_poll.c b/erts/emulator/sys/common/erl_poll.c
index 5cca33d7eb..c17806d96c 100644
--- a/erts/emulator/sys/common/erl_poll.c
+++ b/erts/emulator/sys/common/erl_poll.c
@@ -130,13 +130,18 @@
#define ERTS_POLLSET_IS_POLLED(PS) \
((int) erts_smp_atomic_read(&(PS)->polled))
-#define ERTS_POLLSET_SET_POLLER_WOKEN_CHK(PS) \
- ((int) erts_smp_atomic_xchg(&(PS)->woken, (long) 1))
-#define ERTS_POLLSET_SET_POLLER_WOKEN(PS) \
- erts_smp_atomic_set(&(PS)->woken, (long) 1)
-#define ERTS_POLLSET_UNSET_POLLER_WOKEN(PS) \
- erts_smp_atomic_set(&(PS)->woken, (long) 0)
-#define ERTS_POLLSET_IS_POLLER_WOKEN(PS) \
+#define ERTS_POLLSET_SET_POLLER_WOKEN_CHK(PS) set_poller_woken_chk((PS))
+#define ERTS_POLLSET_SET_POLLER_WOKEN(PS) \
+do { \
+ ERTS_THR_MEMORY_BARRIER; \
+ erts_smp_atomic_set(&(PS)->woken, (long) 1); \
+} while (0)
+#define ERTS_POLLSET_UNSET_POLLER_WOKEN(PS) \
+do { \
+ erts_smp_atomic_set(&(PS)->woken, (long) 0); \
+ ERTS_THR_MEMORY_BARRIER; \
+} while (0)
+#define ERTS_POLLSET_IS_POLLER_WOKEN(PS) \
((int) erts_smp_atomic_read(&(PS)->woken))
#else
@@ -194,13 +199,18 @@
#else
-#define ERTS_POLLSET_UNSET_INTERRUPTED_CHK(PS) \
- ((int) erts_smp_atomic_xchg(&(PS)->interrupt, (long) 0))
-#define ERTS_POLLSET_UNSET_INTERRUPTED(PS) \
- erts_smp_atomic_set(&(PS)->interrupt, (long) 0)
-#define ERTS_POLLSET_SET_INTERRUPTED(PS) \
- erts_smp_atomic_set(&(PS)->interrupt, (long) 1)
-#define ERTS_POLLSET_IS_INTERRUPTED(PS) \
+#define ERTS_POLLSET_UNSET_INTERRUPTED_CHK(PS) unset_interrupted_chk((PS))
+#define ERTS_POLLSET_UNSET_INTERRUPTED(PS) \
+do { \
+ erts_smp_atomic_set(&(PS)->interrupt, (long) 0); \
+ ERTS_THR_MEMORY_BARRIER; \
+} while (0)
+#define ERTS_POLLSET_SET_INTERRUPTED(PS) \
+do { \
+ ERTS_THR_MEMORY_BARRIER; \
+ erts_smp_atomic_set(&(PS)->interrupt, (long) 1); \
+} while (0)
+#define ERTS_POLLSET_IS_INTERRUPTED(PS) \
((int) erts_smp_atomic_read(&(PS)->interrupt))
#endif
@@ -276,7 +286,7 @@ struct ErtsPollSet_ {
ErtsPollSet next;
int internal_fd_limit;
ErtsFdStatus *fds_status;
- int no_of_user_fds;
+ erts_smp_atomic_t no_of_user_fds;
int fds_status_len;
#if ERTS_POLL_USE_KERNEL_POLL
int kp_fd;
@@ -336,16 +346,30 @@ struct ErtsPollSet_ {
#endif
};
-#if ERTS_POLL_ASYNC_INTERRUPT_SUPPORT && !defined(ERTS_SMP)
-
static ERTS_INLINE int
unset_interrupted_chk(ErtsPollSet ps)
{
+ int res;
+#if ERTS_POLL_ASYNC_INTERRUPT_SUPPORT && !defined(ERTS_SMP)
/* This operation isn't atomic, but we have no need at all for an
atomic operation here... */
- int res = ps->interrupt;
+ res = ps->interrupt;
ps->interrupt = 0;
+#else
+ res = (int) erts_smp_atomic_xchg(&ps->interrupt, (long) 0);
+ ERTS_THR_MEMORY_BARRIER;
+#endif
return res;
+
+}
+
+#ifdef ERTS_SMP
+
+static ERTS_INLINE int
+set_poller_woken_chk(ErtsPollSet ps)
+{
+ ERTS_THR_MEMORY_BARRIER;
+ return (int) erts_smp_atomic_xchg(&ps->woken, (long) 1);
}
#endif
@@ -828,7 +852,7 @@ write_batch_buf(ErtsPollSet ps, ErtsPollBatchBuf *bbp)
ps->fds_status[fd].flags |= ERTS_POLL_FD_FLG_USEFLBCK;
ASSERT(ps->fds_status[fd].used_events);
ps->fds_status[fd].used_events = 0;
- ps->no_of_user_fds--;
+ erts_smp_atomic_dec(&ps->no_of_user_fds);
update_fallback_pollset(ps, fd);
ASSERT(ps->fds_status[fd].flags & ERTS_POLL_FD_FLG_INFLBCK);
break;
@@ -878,11 +902,11 @@ batch_update_pollset(ErtsPollSet ps, int fd, ErtsPollBatchBuf *bbp)
events = ERTS_POLL_EV_E2N(ps->fds_status[fd].events);
if (!events) {
buf[buf_len].events = POLLREMOVE;
- ps->no_of_user_fds--;
+ erts_smp_atomic_dec(&ps->no_of_user_fds);
}
else if (!ps->fds_status[fd].used_events) {
buf[buf_len].events = events;
- ps->no_of_user_fds++;
+ erts_smp_atomic_inc(&ps->no_of_user_fds);
}
else {
if ((ps->fds_status[fd].flags & ERTS_POLL_FD_FLG_RST)
@@ -972,12 +996,12 @@ batch_update_pollset(ErtsPollSet ps, int fd, ErtsPollBatchBuf *bbp)
}
if (used_events) {
if (!events) {
- ps->no_of_user_fds--;
+ erts_smp_atomic_dec(&ps->no_of_user_fds);
}
}
else {
if (events)
- ps->no_of_user_fds++;
+ erts_smp_atomic_inc(&ps->no_of_user_fds);
}
ASSERT((events & ~(ERTS_POLL_EV_IN|ERTS_POLL_EV_OUT)) == 0);
ASSERT((used_events & ~(ERTS_POLL_EV_IN|ERTS_POLL_EV_OUT)) == 0);
@@ -1051,7 +1075,7 @@ update_pollset(ErtsPollSet ps, int fd)
epe.data.fd = epe_templ.data.fd;
res = epoll_ctl(ps->kp_fd, EPOLL_CTL_DEL, fd, &epe);
} while (res != 0 && errno == EINTR);
- ps->no_of_user_fds--;
+ erts_smp_atomic_dec(&ps->no_of_user_fds);
ps->fds_status[fd].used_events = 0;
}
@@ -1059,11 +1083,11 @@ update_pollset(ErtsPollSet ps, int fd)
/* A note on EPOLL_CTL_DEL: linux kernel versions before 2.6.9
need a non-NULL event pointer even though it is ignored... */
op = EPOLL_CTL_DEL;
- ps->no_of_user_fds--;
+ erts_smp_atomic_dec(&ps->no_of_user_fds);
}
else if (!ps->fds_status[fd].used_events) {
op = EPOLL_CTL_ADD;
- ps->no_of_user_fds++;
+ erts_smp_atomic_inc(&ps->no_of_user_fds);
}
else {
op = EPOLL_CTL_MOD;
@@ -1113,7 +1137,7 @@ update_pollset(ErtsPollSet ps, int fd)
/* Fall through ... */
case EPOLL_CTL_ADD: {
ps->fds_status[fd].flags |= ERTS_POLL_FD_FLG_USEFLBCK;
- ps->no_of_user_fds--;
+ erts_smp_atomic_dec(&ps->no_of_user_fds);
#if ERTS_POLL_USE_CONCURRENT_UPDATE
if (!*update_fallback) {
*update_fallback = 1;
@@ -1201,7 +1225,7 @@ static int update_pollset(ErtsPollSet ps, int fd)
#if ERTS_POLL_USE_FALLBACK
ASSERT(ps->fds_status[fd].flags & ERTS_POLL_FD_FLG_INFLBCK);
#endif
- ps->no_of_user_fds--;
+ erts_smp_atomic_dec(&ps->no_of_user_fds);
last_pix = --ps->no_poll_fds;
if (pix != last_pix) {
/* Move last pix to this pix */
@@ -1228,7 +1252,7 @@ static int update_pollset(ErtsPollSet ps, int fd)
ASSERT(!(ps->fds_status[fd].flags & ERTS_POLL_FD_FLG_INFLBCK)
|| fd == ps->kp_fd);
#endif
- ps->no_of_user_fds++;
+ erts_smp_atomic_inc(&ps->no_of_user_fds);
ps->fds_status[fd].pix = pix = ps->no_poll_fds++;
if (pix >= ps->poll_fds_len)
grow_poll_fds(ps, pix);
@@ -1279,7 +1303,7 @@ static int update_pollset(ErtsPollSet ps, int fd)
if (!ps->fds_status[fd].used_events) {
ASSERT(events);
- ps->no_of_user_fds++;
+ erts_smp_atomic_inc(&ps->no_of_user_fds);
#if ERTS_POLL_USE_FALLBACK
ps->no_select_fds++;
ps->fds_status[fd].flags |= ERTS_POLL_FD_FLG_INFLBCK;
@@ -1287,7 +1311,7 @@ static int update_pollset(ErtsPollSet ps, int fd)
}
else if (!events) {
ASSERT(ps->fds_status[fd].used_events);
- ps->no_of_user_fds--;
+ erts_smp_atomic_dec(&ps->no_of_user_fds);
ps->fds_status[fd].events = events;
#if ERTS_POLL_USE_FALLBACK
ps->no_select_fds--;
@@ -1888,7 +1912,8 @@ static ERTS_INLINE int
check_fd_events(ErtsPollSet ps, SysTimeval *tv, int max_res, int *ps_locked)
{
ASSERT(!*ps_locked);
- if (ps->no_of_user_fds == 0 && tv->tv_usec == 0 && tv->tv_sec == 0) {
+ if (erts_smp_atomic_read(&ps->no_of_user_fds) == 0
+ && tv->tv_usec == 0 && tv->tv_sec == 0) {
/* Nothing to poll and zero timeout; done... */
return 0;
}
@@ -1926,7 +1951,7 @@ check_fd_events(ErtsPollSet ps, SysTimeval *tv, int max_res, int *ps_locked)
* the maximum number of file descriptors in the poll set.
*/
struct dvpoll poll_res;
- int nfds = ps->no_of_user_fds;
+ int nfds = (int) erts_smp_atomic_read(&ps->no_of_user_fds);
#ifdef ERTS_SMP
nfds++; /* Wakeup pipe */
#endif
@@ -2204,7 +2229,7 @@ ERTS_POLL_EXPORT(erts_poll_create_pollset)(void)
ps->internal_fd_limit = 0;
ps->fds_status = NULL;
ps->fds_status_len = 0;
- ps->no_of_user_fds = 0;
+ erts_smp_atomic_init(&ps->no_of_user_fds, 0);
#if ERTS_POLL_USE_KERNEL_POLL
ps->kp_fd = -1;
#if ERTS_POLL_USE_EPOLL
@@ -2302,7 +2327,7 @@ ERTS_POLL_EXPORT(erts_poll_create_pollset)(void)
#if ERTS_POLL_USE_FALLBACK
ps->fallback_used = 0;
#endif
- ps->no_of_user_fds = 0; /* Don't count wakeup pipe and fallback fd */
+ erts_smp_atomic_set(&ps->no_of_user_fds, 0); /* Don't count wakeup pipe and fallback fd */
erts_smp_spin_lock(&pollsets_lock);
ps->next = pollsets;
@@ -2405,6 +2430,7 @@ ERTS_POLL_EXPORT(erts_poll_info)(ErtsPollSet ps, ErtsPollInfo *pip)
while (urqbp) {
size += sizeof(ErtsPollSetUpdateRequestsBlock);
pending_updates += urqbp->len;
+ urqbp = urqbp->next;
}
}
#endif
@@ -2447,7 +2473,7 @@ ERTS_POLL_EXPORT(erts_poll_info)(ErtsPollSet ps, ErtsPollInfo *pip)
pip->memory_size = size;
- pip->poll_set_size = ps->no_of_user_fds;
+ pip->poll_set_size = (int) erts_smp_atomic_read(&ps->no_of_user_fds);
#ifdef ERTS_SMP
pip->poll_set_size++; /* Wakeup pipe */
#endif
diff --git a/erts/emulator/sys/unix/erl9_start.c b/erts/emulator/sys/unix/erl9_start.c
deleted file mode 100644
index 578062d7e2..0000000000
--- a/erts/emulator/sys/unix/erl9_start.c
+++ /dev/null
@@ -1,130 +0,0 @@
-/*
- * %CopyrightBegin%
- *
- * Copyright Ericsson AB 2002-2009. All Rights Reserved.
- *
- * The contents of this file are subject to the Erlang Public License,
- * Version 1.1, (the "License"); you may not use this file except in
- * compliance with the License. You should have received a copy of the
- * Erlang Public License along with this software. If not, it can be
- * retrieved online at http://www.erlang.org/.
- *
- * Software distributed under the License is distributed on an "AS IS"
- * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
- * the License for the specific language governing rights and limitations
- * under the License.
- *
- * %CopyrightEnd%
- */
-#ifdef HAVE_CONFIG_H
-# include "config.h"
-#endif
-#include "sys.h"
-#include "erl_vm.h"
-#include "global.h"
-#include <stdio.h>
-#include <string.h>
-#include <stdlib.h>
-
-/*
- * XXX This is a temporary dummy to make sys.c happy until we'll rewrite it.
- */
-unsigned preloaded_size_ring0 = 1;
-unsigned char preloaded_ring0[1] = {0};
-
-Preload pre_loaded[] = {
- {"ring0", 1, preloaded_ring0},
- {0, 0, 0}
-};
-
-int
-main(int argc, char** argv)
-{
- char sbuf[1024];
- struct {
- void* p;
- int sz;
- } bins[2];
- int bin_num = 0;
- FILE* fp;
- char* progname = argv[0];
- char* eq;
-
- argv++, argc--;
-
- if (argc > 0 && argv[0][0] == '-') {
- argv++, argc--;
- }
- if (argc < 1) {
- abort();
- }
- if ((fp = fopen(argv[0], "r")) == NULL) {
- abort();
- }
-
- /* Needs to be called before any memory allocation */
- erts_short_init();
-
- while (fgets(sbuf, sizeof sbuf, fp)) {
- if (sbuf[0] == '#') {
- continue; /* Comment */
- } else if (sbuf[0] == 'e' && strncmp("exec", sbuf, 4) == 0) {
- continue; /* Comment ;-) */
- } else if ((eq = strchr(sbuf, '=')) != NULL) {
- char* val;
- char* p = strchr(sbuf, '\n');
- if (p) {
- *p = '\0';
- }
- *eq = '\0';
- val = erts_read_env(sbuf);
- if (val == NULL) {
- *eq = '=';
- erts_sys_putenv(sbuf, eq - &sbuf[0]);
- }
- erts_free_read_env(val);
- } else if (sbuf[0] == ':' && '0' <= sbuf[1] && sbuf[1] <= '9') {
- int load_size = atoi(sbuf+1);
- void* bin;
-
- bin = malloc(load_size);
- if (fread(bin, 1, load_size, fp) != load_size) {
- abort();
- }
- bins[bin_num].p = bin;
- bins[bin_num].sz = load_size;
- bin_num++;
- } else if (strcmp(sbuf, "--end--\n") == 0) {
- int rval;
- Eterm mod = NIL;
- char *val;
-
- fclose(fp);
-
- if (bin_num != 2) {
- abort();
- }
-
- val = erts_read_env("ERLBREAKHANDLER");
- if (val) {
- init_break_handler();
- }
- erts_free_read_env(val);
-
- if ((rval = erts_load_module(NULL, 0, NIL, &mod, bins[0].p, bins[0].sz)) < 0) {
- fprintf(stderr, "%s: Load of initial module failed: %d\n",
- progname, rval);
- abort();
- }
- erts_first_process(mod, bins[1].p, bins[1].sz, argc, argv);
- free(bins[0].p);
- free(bins[1].p);
- process_main();
- abort();
- } else {
- fprintf(stderr, "%s: bad line: %s\n", progname, sbuf);
- abort();
- }
- }
- abort();
-}
diff --git a/erts/emulator/sys/unix/sys.c b/erts/emulator/sys/unix/sys.c
index 31ab5d03de..af4ab693dc 100644
--- a/erts/emulator/sys/unix/sys.c
+++ b/erts/emulator/sys/unix/sys.c
@@ -123,8 +123,6 @@ static ErtsSysReportExit *report_exit_transit_list;
extern int check_async_ready(void);
extern int driver_interrupt(int, int);
-/*EXTERN_FUNCTION(void, increment_time, (int));*/
-/*EXTERN_FUNCTION(int, next_time, (_VOID_));*/
extern void do_break(void);
extern void erl_sys_args(int*, char**);
@@ -221,10 +219,10 @@ static struct fd_data {
} *fd_data; /* indexed by fd */
/* static FUNCTION(int, write_fill, (int, char*, int)); unused? */
-static FUNCTION(void, note_child_death, (int, int));
+static void note_child_death(int, int);
#if CHLDWTHR
-static FUNCTION(void *, child_waiter, (void *));
+static void* child_waiter(void *);
#endif
/********************* General functions ****************************/
@@ -367,7 +365,7 @@ erts_sys_misc_mem_sz(void)
/*
* reset the terminal to the original settings on exit
*/
-void sys_tty_reset(void)
+void sys_tty_reset(int exit_code)
{
if (using_oldshell && !replace_intr) {
SET_BLOCKING(0);
@@ -384,18 +382,6 @@ MALLOC_USE_HASH(1);
#endif
#ifdef USE_THREADS
-static void *ethr_internal_alloc(size_t size)
-{
- return erts_alloc_fnf(ERTS_ALC_T_ETHR_INTERNAL, (Uint) size);
-}
-static void *ethr_internal_realloc(void *ptr, size_t size)
-{
- return erts_realloc_fnf(ERTS_ALC_T_ETHR_INTERNAL, ptr, (Uint) size);
-}
-static void ethr_internal_free(void *ptr)
-{
- erts_free(ERTS_ALC_T_ETHR_INTERNAL, ptr);
-}
#ifdef ERTS_THR_HAVE_SIG_FUNCS
/*
@@ -457,6 +443,10 @@ thr_create_prepare_child(void *vtcdp)
{
erts_thr_create_data_t *tcdp = (erts_thr_create_data_t *) vtcdp;
+#ifdef ERTS_ENABLE_LOCK_COUNT
+ erts_lcnt_thread_setup();
+#endif
+
#ifndef NO_FPE_SIGNALS
/*
* We do not want fp exeptions in other threads than the
@@ -484,9 +474,6 @@ erts_sys_pre_init(void)
#ifdef USE_THREADS
{
erts_thr_init_data_t eid = ERTS_THR_INIT_DATA_DEF_INITER;
- eid.alloc = ethr_internal_alloc;
- eid.realloc = ethr_internal_realloc;
- eid.free = ethr_internal_free;
eid.thread_create_child_func = thr_create_prepare_child;
/* Before creation in parent */
@@ -534,13 +521,14 @@ erts_sys_pre_init(void)
#endif
#endif /* USE_THREADS */
erts_smp_atomic_init(&sys_misc_mem_sz, 0);
- erts_smp_rwmtx_init(&environ_rwmtx, "environ");
}
void
erl_sys_init(void)
{
+ erts_smp_rwmtx_init(&environ_rwmtx, "environ");
#if !DISABLE_VFORK
+ {
int res;
char bindir[MAXPATHLEN];
size_t bindirsz = sizeof(bindir);
@@ -570,6 +558,7 @@ erl_sys_init(void)
bindir,
DIR_SEPARATOR_CHAR,
CHILD_SETUP_PROG_NAME);
+ }
#endif
#ifdef USE_SETLINEBUF
@@ -1324,9 +1313,18 @@ static char **build_unix_environment(char *block)
}
}
- for (j = 0; j < i; j++) {
- if (cpp[j][strlen(cpp[j])-1] == '=') {
+ for (j = 0; j < i; ) {
+ size_t last = strlen(cpp[j])-1;
+ if (cpp[j][last] == '=' && strchr(cpp[j], '=') == cpp[j]+last) {
cpp[j] = cpp[--len];
+ if (len < i) {
+ i--;
+ } else {
+ j++;
+ }
+ }
+ else {
+ j++;
}
}
@@ -2562,7 +2560,6 @@ extern Preload pre_loaded[];
void erts_sys_alloc_init(void)
{
- elib_ensure_initialized();
}
void *erts_sys_alloc(ErtsAlcType_t t, void *x, Uint sz)
diff --git a/erts/emulator/sys/unix/sys_float.c b/erts/emulator/sys/unix/sys_float.c
index c59c99f65e..5a682ab045 100644
--- a/erts/emulator/sys/unix/sys_float.c
+++ b/erts/emulator/sys/unix/sys_float.c
@@ -799,8 +799,17 @@ sys_chars_to_double(char* buf, double* fp)
}
#ifdef NO_FPE_SIGNALS
- if (errno == ERANGE && (*fp == 0.0 || *fp == HUGE_VAL || *fp == -HUGE_VAL)) {
- return -1;
+ if (errno == ERANGE) {
+ if (*fp == HUGE_VAL || *fp == -HUGE_VAL) {
+ /* overflow, should give error */
+ return -1;
+ } else if (t == s && *fp == 0.0) {
+ /* This should give 0.0 - OTP-7178 */
+ errno = 0;
+
+ } else if (*fp == 0.0) {
+ return -1;
+ }
}
#endif
return 0;
@@ -810,7 +819,9 @@ int
matherr(struct exception *exc)
{
#if !defined(NO_FPE_SIGNALS)
- set_current_fp_exception((unsigned long)__builtin_return_address(0));
+ volatile unsigned long *fpexnp = erts_get_current_fp_exception();
+ if (fpexnp != NULL)
+ *fpexnp = (unsigned long)__builtin_return_address(0);
#endif
return 1;
}
diff --git a/erts/emulator/sys/vxworks/sys.c b/erts/emulator/sys/vxworks/sys.c
index abddc7e107..411b4b37cf 100644
--- a/erts/emulator/sys/vxworks/sys.c
+++ b/erts/emulator/sys/vxworks/sys.c
@@ -1,19 +1,19 @@
/*
* %CopyrightBegin%
- *
- * Copyright Ericsson AB 1997-2009. All Rights Reserved.
- *
+ *
+ * Copyright Ericsson AB 1997-2010. All Rights Reserved.
+ *
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
* compliance with the License. You should have received a copy of the
* Erlang Public License along with this software. If not, it can be
* retrieved online at http://www.erlang.org/.
- *
+ *
* Software distributed under the License is distributed on an "AS IS"
* basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
* the License for the specific language governing rights and limitations
* under the License.
- *
+ *
* %CopyrightEnd%
*/
/*
@@ -143,6 +143,14 @@ volatile int erts_break_requested;
/********************* General functions ****************************/
+/*
+ * Reset the terminal to the original settings on exit
+ * (nothing to do for WxWorks).
+ */
+void sys_tty_reset(int exit_code)
+{
+}
+
Uint
erts_sys_misc_mem_sz(void)
{
diff --git a/erts/emulator/sys/win32/erl_poll.c b/erts/emulator/sys/win32/erl_poll.c
index d816cc2c07..a766fe9575 100644
--- a/erts/emulator/sys/win32/erl_poll.c
+++ b/erts/emulator/sys/win32/erl_poll.c
@@ -1,19 +1,19 @@
/*
* %CopyrightBegin%
- *
- * Copyright Ericsson AB 2007-2009. All Rights Reserved.
- *
+ *
+ * Copyright Ericsson AB 2007-2010. All Rights Reserved.
+ *
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
* compliance with the License. You should have received a copy of the
* Erlang Public License along with this software. If not, it can be
* retrieved online at http://www.erlang.org/.
- *
+ *
* Software distributed under the License is distributed on an "AS IS"
* basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
* the License for the specific language governing rights and limitations
* under the License.
- *
+ *
* %CopyrightEnd%
*/
#ifdef HAVE_CONFIG_H
@@ -304,24 +304,51 @@ struct ErtsPollSet_ {
erts_smp_atomic_set(&(PS)->polled, (long) 0)
#define ERTS_POLLSET_IS_POLLED(PS) \
((int) erts_smp_atomic_read(&(PS)->polled))
-#define ERTS_POLLSET_SET_POLLER_WOKEN_CHK(PS) \
- ((int) erts_smp_atomic_xchg(&(PS)->woken, (long) 1))
-#define ERTS_POLLSET_SET_POLLER_WOKEN(PS) \
- erts_smp_atomic_set(&(PS)->woken, (long) 1)
-#define ERTS_POLLSET_UNSET_POLLER_WOKEN(PS) \
- erts_smp_atomic_set(&(PS)->woken, (long) 0)
-#define ERTS_POLLSET_IS_POLLER_WOKEN(PS) \
+
+#define ERTS_POLLSET_SET_POLLER_WOKEN_CHK(PS) set_poller_woken_chk((PS))
+#define ERTS_POLLSET_SET_POLLER_WOKEN(PS) \
+do { \
+ ERTS_THR_MEMORY_BARRIER; \
+ erts_smp_atomic_set(&(PS)->woken, (long) 1); \
+} while (0)
+#define ERTS_POLLSET_UNSET_POLLER_WOKEN(PS) \
+do { \
+ erts_smp_atomic_set(&(PS)->woken, (long) 0); \
+ ERTS_THR_MEMORY_BARRIER; \
+} while (0)
+#define ERTS_POLLSET_IS_POLLER_WOKEN(PS) \
((int) erts_smp_atomic_read(&(PS)->woken))
-#define ERTS_POLLSET_UNSET_INTERRUPTED_CHK(PS) \
- ((int) erts_smp_atomic_xchg(&(PS)->interrupt, (long) 0))
-#define ERTS_POLLSET_UNSET_INTERRUPTED(PS) \
- erts_smp_atomic_set(&(PS)->interrupt, (long) 0)
-#define ERTS_POLLSET_SET_INTERRUPTED(PS) \
- erts_smp_atomic_set(&(PS)->interrupt, (long) 1)
-#define ERTS_POLLSET_IS_INTERRUPTED(PS) \
+#define ERTS_POLLSET_UNSET_INTERRUPTED_CHK(PS) unset_interrupted_chk((PS))
+#define ERTS_POLLSET_UNSET_INTERRUPTED(PS) \
+do { \
+ erts_smp_atomic_set(&(PS)->interrupt, (long) 0); \
+ ERTS_THR_MEMORY_BARRIER; \
+} while (0)
+#define ERTS_POLLSET_SET_INTERRUPTED(PS) \
+do { \
+ ERTS_THR_MEMORY_BARRIER; \
+ erts_smp_atomic_set(&(PS)->interrupt, (long) 1); \
+} while (0)
+#define ERTS_POLLSET_IS_INTERRUPTED(PS) \
((int) erts_smp_atomic_read(&(PS)->interrupt))
+static ERTS_INLINE int
+unset_interrupted_chk(ErtsPollSet ps)
+{
+ int res = (int) erts_smp_atomic_xchg(&ps->interrupt, (long) 0);
+ ERTS_THR_MEMORY_BARRIER;
+ return res;
+
+}
+
+static ERTS_INLINE int
+set_poller_woken_chk(ErtsPollSet ps)
+{
+ ERTS_THR_MEMORY_BARRIER;
+ return (int) erts_smp_atomic_xchg(&ps->woken, (long) 1);
+}
+
#else
#define ERTS_POLLSET_LOCK(PS)
diff --git a/erts/emulator/sys/win32/sys.c b/erts/emulator/sys/win32/sys.c
index 3194493ac8..15d4cd7361 100644
--- a/erts/emulator/sys/win32/sys.c
+++ b/erts/emulator/sys/win32/sys.c
@@ -1,19 +1,19 @@
/*
* %CopyrightBegin%
- *
- * Copyright Ericsson AB 1996-2009. All Rights Reserved.
- *
+ *
+ * Copyright Ericsson AB 1996-2010. All Rights Reserved.
+ *
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
* compliance with the License. You should have received a copy of the
* Erlang Public License along with this software. If not, it can be
* retrieved online at http://www.erlang.org/.
- *
+ *
* Software distributed under the License is distributed on an "AS IS"
* basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
* the License for the specific language governing rights and limitations
* under the License.
- *
+ *
* %CopyrightEnd%
*/
/*
@@ -33,10 +33,11 @@
#include "../../drivers/win32/win_con.h"
+
void erts_sys_init_float(void);
void erl_start(int, char**);
-void erl_exit(int n, char*, _DOTS_);
+void erl_exit(int n, char*, ...);
void erl_error(char*, va_list);
void erl_crash_dump(char*, int, char*, ...);
@@ -66,10 +67,10 @@ static void async_read_file(struct async_io* aio, LPVOID buf, DWORD numToRead);
static int async_write_file(struct async_io* aio, LPVOID buf, DWORD numToWrite);
static int get_overlapped_result(struct async_io* aio,
LPDWORD pBytesRead, BOOL wait);
-static FUNCTION(BOOL, CreateChildProcess, (char *, HANDLE, HANDLE,
- HANDLE, LPHANDLE, BOOL,
- LPVOID, LPTSTR, unsigned,
- char **, int *));
+static BOOL CreateChildProcess(char *, HANDLE, HANDLE,
+ HANDLE, LPHANDLE, BOOL,
+ LPVOID, LPTSTR, unsigned,
+ char **, int *);
static int create_pipe(LPHANDLE, LPHANDLE, BOOL, BOOL);
static int ApplicationType(const char* originalName, char fullPath[MAX_PATH],
BOOL search_in_path, BOOL handle_quotes,
@@ -92,9 +93,13 @@ static erts_smp_mtx_t sys_driver_data_lock;
#define APPL_WIN3X 2
#define APPL_WIN32 3
-static FUNCTION(int, driver_write, (long, HANDLE, byte*, int));
+static int driver_write(long, HANDLE, byte*, int);
static void common_stop(int);
static int create_file_thread(struct async_io* aio, int mode);
+#ifdef ERTS_SMP
+static void close_active_handles(ErlDrvPort, const HANDLE* handles, int cnt);
+static DWORD WINAPI threaded_handle_closer(LPVOID param);
+#endif
static DWORD WINAPI threaded_reader(LPVOID param);
static DWORD WINAPI threaded_writer(LPVOID param);
static DWORD WINAPI threaded_exiter(LPVOID param);
@@ -132,6 +137,9 @@ static BOOL win_console = FALSE;
static OSVERSIONINFO int_os_version; /* Version information for Win32. */
+#ifdef ERTS_SMP
+static BOOL (WINAPI *fpCancelIoEx)(HANDLE,LPOVERLAPPED);
+#endif
/* This is the system's main function (which may or may not be called "main")
- do general system-dependent initialization
@@ -187,6 +195,17 @@ erts_sys_misc_mem_sz(void)
return res;
}
+/*
+ * Reset the terminal to the original settings on exit
+ */
+void sys_tty_reset(int exit_code)
+{
+ if (exit_code > 0)
+ ConWaitForExit();
+ else
+ ConNormalExit();
+}
+
void erl_sys_args(int* argc, char** argv)
{
char *event_name;
@@ -665,25 +684,50 @@ release_driver_data(DriverData* dp)
erts_smp_mtx_lock(&sys_driver_data_lock);
#ifdef ERTS_SMP
- /* This is a workaround for the fact that CancelIo cant cancel
- requests issued by another thread and that we still cant use
- CancelIoEx as that's only availabele in Vista etc. */
- if(dp->in.async_io_active && dp->in.fd != INVALID_HANDLE_VALUE) {
- CloseHandle(dp->in.fd);
- dp->in.fd = INVALID_HANDLE_VALUE;
- DEBUGF(("Waiting for the in event thingie"));
- WaitForSingleObject(dp->in.ov.hEvent,INFINITE);
- DEBUGF(("...done\n"));
- }
- if(dp->out.async_io_active && dp->out.fd != INVALID_HANDLE_VALUE) {
- CloseHandle(dp->out.fd);
- dp->out.fd = INVALID_HANDLE_VALUE;
- DEBUGF(("Waiting for the out event thingie"));
- WaitForSingleObject(dp->out.ov.hEvent,INFINITE);
- DEBUGF(("...done\n"));
+ if (fpCancelIoEx != NULL) {
+ if (dp->in.thread == (HANDLE) -1 && dp->in.fd != INVALID_HANDLE_VALUE) {
+ (*fpCancelIoEx)(dp->in.fd, NULL);
+ }
+ if (dp->out.thread == (HANDLE) -1 && dp->out.fd != INVALID_HANDLE_VALUE) {
+ (*fpCancelIoEx)(dp->out.fd, NULL);
+ }
+ }
+ else {
+ /* This is a workaround for the fact that CancelIo cant cancel
+ requests issued by another thread and that we cant use
+ CancelIoEx as that's only availabele in Vista etc.
+ R14: Avoid scheduler deadlock by only wait for 10ms, and then spawn
+ a thread that will keep waiting in in order to close handles. */
+ HANDLE handles[2];
+ int i = 0;
+ int timeout = 10;
+ if(dp->in.async_io_active && dp->in.fd != INVALID_HANDLE_VALUE) {
+ CloseHandle(dp->in.fd);
+ dp->in.fd = INVALID_HANDLE_VALUE;
+ DEBUGF(("Waiting for the in event thingie"));
+ if (WaitForSingleObject(dp->in.ov.hEvent,timeout) == WAIT_TIMEOUT) {
+ handles[i++] = dp->in.ov.hEvent;
+ dp->in.ov.hEvent = NULL;
+ timeout = 0;
+ }
+ DEBUGF(("...done\n"));
+ }
+ if(dp->out.async_io_active && dp->out.fd != INVALID_HANDLE_VALUE) {
+ CloseHandle(dp->out.fd);
+ dp->out.fd = INVALID_HANDLE_VALUE;
+ DEBUGF(("Waiting for the out event thingie"));
+ if (WaitForSingleObject(dp->out.ov.hEvent,timeout) == WAIT_TIMEOUT) {
+ handles[i++] = dp->out.ov.hEvent;
+ dp->out.ov.hEvent = NULL;
+ }
+ DEBUGF(("...done\n"));
+ }
+ if (i > 0) {
+ close_active_handles(dp->port_num, handles, i);
+ }
}
#else
- if (dp->out.thread == (HANDLE) -1 && dp->in.fd != INVALID_HANDLE_VALUE) {
+ if (dp->in.thread == (HANDLE) -1 && dp->in.fd != INVALID_HANDLE_VALUE) {
CancelIo(dp->in.fd);
}
if (dp->out.thread == (HANDLE) -1 && dp->out.fd != INVALID_HANDLE_VALUE) {
@@ -726,6 +770,48 @@ release_driver_data(DriverData* dp)
erts_smp_mtx_unlock(&sys_driver_data_lock);
}
+#ifdef ERTS_SMP
+
+struct handles_to_be_closed
+{
+ int cnt;
+ HANDLE handles[2];
+};
+
+static void close_active_handles(ErlDrvPort port_num, const HANDLE* handles, int cnt)
+{
+ DWORD tid;
+ HANDLE thread;
+ int i;
+ struct handles_to_be_closed* htbc = erts_alloc(ERTS_ALC_T_DRV_TAB,
+ sizeof(struct handles_to_be_closed));
+ htbc->cnt = cnt;
+ for (i=0; i < cnt; ++i) {
+ htbc->handles[i] = handles[i];
+ (void) driver_select(port_num, (ErlDrvEvent)handles[i],
+ ERL_DRV_USE_NO_CALLBACK, 0);
+ }
+ thread = (HANDLE *) _beginthreadex(NULL, 0, threaded_handle_closer, htbc, 0, &tid);
+ CloseHandle(thread);
+}
+
+
+static DWORD WINAPI
+threaded_handle_closer(LPVOID param)
+{
+ struct handles_to_be_closed* htbc = (struct handles_to_be_closed*) param;
+ int i;
+ DEBUGF(("threaded_handle_closer waiting for %d handles\r\n",htbc->cnt));
+ WaitForMultipleObjects(htbc->cnt, htbc->handles, TRUE, INFINITE);
+ for (i=0; i < htbc->cnt; ++i) {
+ CloseHandle(htbc->handles[i]);
+ }
+ erts_free(ERTS_ALC_T_DRV_TAB, htbc);
+ DEBUGF(("threaded_handle_closer terminating\r\n"));
+ return 0;
+}
+#endif /* ERTS_SMP */
+
/*
* Stores input and output file descriptors in the DriverData structure,
* and calls driver_select().
@@ -1015,12 +1101,19 @@ static int
spawn_init()
{
int i;
-
+#ifdef ERTS_SMP
+ HMODULE module = GetModuleHandle("kernel32");
+ fpCancelIoEx = (module != NULL) ?
+ (BOOL (WINAPI *)(HANDLE,LPOVERLAPPED))
+ GetProcAddress(module,"CancelIoEx") : NULL;
+ DEBUGF(("fpCancelIoEx = %p\r\n", fpCancelIoEx));
+#endif
driver_data = (struct driver_data *)
erts_alloc(ERTS_ALC_T_DRV_TAB, max_files * sizeof(struct driver_data));
erts_smp_atomic_add(&sys_misc_mem_sz, max_files*sizeof(struct driver_data));
for (i = 0; i < max_files; i++)
driver_data[i].port_num = PORT_FREE;
+
return 0;
}
@@ -2534,7 +2627,6 @@ erts_sys_main_thread(void)
void erts_sys_alloc_init(void)
{
- elib_ensure_initialized();
}
void *erts_sys_alloc(ErtsAlcType_t t, void *x, Uint sz)
@@ -2881,19 +2973,14 @@ check_supported_os_version(void)
}
#ifdef USE_THREADS
-static void *ethr_internal_alloc(size_t size)
-{
- return erts_alloc_fnf(ERTS_ALC_T_ETHR_INTERNAL, (Uint) size);
-}
-static void *ethr_internal_realloc(void *ptr, size_t size)
-{
- return erts_realloc_fnf(ERTS_ALC_T_ETHR_INTERNAL, ptr, (Uint) size);
-}
-static void ethr_internal_free(void *ptr)
+#ifdef ERTS_ENABLE_LOCK_COUNT
+static void
+thr_create_prepare_child(void *vtcdp)
{
- erts_free(ERTS_ALC_T_ETHR_INTERNAL, ptr);
+ erts_lcnt_thread_setup();
}
-#endif
+#endif /* ERTS_ENABLE_LOCK_COUNT */
+#endif /* USE_THREADS */
void
erts_sys_pre_init(void)
@@ -2904,9 +2991,9 @@ erts_sys_pre_init(void)
#ifdef USE_THREADS
{
erts_thr_init_data_t eid = ERTS_THR_INIT_DATA_DEF_INITER;
- eid.alloc = ethr_internal_alloc;
- eid.realloc = ethr_internal_realloc;
- eid.free = ethr_internal_free;
+#ifdef ERTS_ENABLE_LOCK_COUNT
+ eid.thread_create_child_func = thr_create_prepare_child;
+#endif
erts_thr_init(&eid);
#ifdef ERTS_ENABLE_LOCK_COUNT
erts_lcnt_init();
@@ -2914,15 +3001,8 @@ erts_sys_pre_init(void)
}
#endif
erts_smp_atomic_init(&sys_misc_mem_sz, 0);
- erts_sys_env_init();
}
-/*
- * the last two only used for standalone erlang
- * they should are used by sae_main in beam dll to
- * enable standalone execution via erl_api-routines
- */
-
void noinherit_std_handle(DWORD type)
{
HANDLE h = GetStdHandle(type);
@@ -2936,6 +3016,8 @@ void erl_sys_init(void)
{
HANDLE handle;
+ erts_sys_env_init();
+
noinherit_std_handle(STD_OUTPUT_HANDLE);
noinherit_std_handle(STD_INPUT_HANDLE);
noinherit_std_handle(STD_ERROR_HANDLE);
diff --git a/erts/emulator/sys/win32/sys_env.c b/erts/emulator/sys/win32/sys_env.c
index ac4be3f316..02c8433a10 100644
--- a/erts/emulator/sys/win32/sys_env.c
+++ b/erts/emulator/sys/win32/sys_env.c
@@ -1,19 +1,19 @@
/*
* %CopyrightBegin%
- *
- * Copyright Ericsson AB 2002-2009. All Rights Reserved.
- *
+ *
+ * Copyright Ericsson AB 2002-2010. All Rights Reserved.
+ *
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
* compliance with the License. You should have received a copy of the
* Erlang Public License along with this software. If not, it can be
* retrieved online at http://www.erlang.org/.
- *
+ *
* Software distributed under the License is distributed on an "AS IS"
* basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
* the License for the specific language governing rights and limitations
* under the License.
- *
+ *
* %CopyrightEnd%
*/
@@ -145,15 +145,17 @@ merge_environment(char *old, char *add)
for(j = 0; a_arg[j] != NULL; ++j){
char **tmp;
char *current = a_arg[j];
+ char *eq_p = strchr(current,'=');
+ int unset = (eq_p!=NULL && eq_p[1]=='\0');
if ((tmp = find_arg(c_arg, current)) != NULL) {
- if (current[strlen(current)-1] != '=') {
+ if (!unset) {
*tmp = current;
} else {
*tmp = c_arg[--i];
c_arg[i] = NULL;
}
- } else if (current[strlen(current)-1] != '=') {
+ } else if (!unset) {
c_arg[i++] = current;
c_arg[i] = NULL;
}
diff --git a/erts/emulator/test/Makefile b/erts/emulator/test/Makefile
index b1374950b2..a4c02da626 100644
--- a/erts/emulator/test/Makefile
+++ b/erts/emulator/test/Makefile
@@ -1,19 +1,19 @@
#
# %CopyrightBegin%
-#
-# Copyright Ericsson AB 1997-2009. All Rights Reserved.
-#
+#
+# Copyright Ericsson AB 1997-2010. All Rights Reserved.
+#
# The contents of this file are subject to the Erlang Public License,
# Version 1.1, (the "License"); you may not use this file except in
# compliance with the License. You should have received a copy of the
# Erlang Public License along with this software. If not, it can be
# retrieved online at http://www.erlang.org/.
-#
+#
# Software distributed under the License is distributed on an "AS IS"
# basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
# the License for the specific language governing rights and limitations
# under the License.
-#
+#
# %CopyrightEnd%
#
@@ -61,7 +61,7 @@ MODULES= \
exception_SUITE \
float_SUITE \
fun_SUITE \
- fun_r11_SUITE \
+ fun_r12_SUITE \
gc_SUITE \
guard_SUITE \
hash_SUITE \
@@ -75,12 +75,12 @@ MODULES= \
node_container_SUITE \
nofrag_SUITE \
num_bif_SUITE \
- obsolete_SUITE \
op_SUITE \
port_SUITE \
port_bif_SUITE \
process_SUITE \
pseudoknot_SUITE \
+ receive_SUITE \
ref_SUITE \
register_SUITE \
save_calls_SUITE \
@@ -100,6 +100,7 @@ MODULES= \
trace_local_SUITE \
trace_meta_SUITE \
trace_call_count_SUITE \
+ trace_call_time_SUITE \
scheduler_SUITE \
old_scheduler_SUITE \
z_SUITE \
@@ -117,7 +118,8 @@ NO_OPT= bs_bincomp \
bs_match_int \
bs_match_tail \
bs_match_misc \
- bs_utf
+ bs_utf \
+ guard
NO_OPT_MODULES= $(NO_OPT:%=%_no_opt_SUITE)
diff --git a/erts/emulator/test/beam_SUITE.erl b/erts/emulator/test/beam_SUITE.erl
index cc1626630b..228ff15341 100644
--- a/erts/emulator/test/beam_SUITE.erl
+++ b/erts/emulator/test/beam_SUITE.erl
@@ -1,26 +1,26 @@
%%
%% %CopyrightBegin%
-%%
-%% Copyright Ericsson AB 1998-2009. All Rights Reserved.
-%%
+%%
+%% Copyright Ericsson AB 1998-2010. All Rights Reserved.
+%%
%% The contents of this file are subject to the Erlang Public License,
%% Version 1.1, (the "License"); you may not use this file except in
%% compliance with the License. You should have received a copy of the
%% Erlang Public License along with this software. If not, it can be
%% retrieved online at http://www.erlang.org/.
-%%
+%%
%% Software distributed under the License is distributed on an "AS IS"
%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
%% the License for the specific language governing rights and limitations
%% under the License.
-%%
+%%
%% %CopyrightEnd%
%%
-module(beam_SUITE).
-export([all/1, packed_registers/1, apply_last/1, apply_last_bif/1,
- buildo_mucho/1, heap_sizes/1, big_lists/1]).
+ buildo_mucho/1, heap_sizes/1, big_lists/1, fconv/1]).
-export([applied/2]).
@@ -279,3 +279,26 @@ b() ->
_} ->
ok
end.
+
+fconv(Config) when is_list(Config) ->
+ ?line do_fconv(atom),
+ ?line do_fconv(nil),
+ ?line do_fconv(tuple_literal),
+ ?line 3.0 = do_fconv(1.0, 2.0),
+ ok.
+
+do_fconv(Type) ->
+ try
+ do_fconv(Type, 1.0),
+ test_server:fail()
+ catch
+ error:badarith ->
+ ok
+ end.
+
+do_fconv(atom, Float) when is_float(Float) ->
+ Float + a;
+do_fconv(nil, Float) when is_float(Float) ->
+ Float + [];
+do_fconv(tuple_literal, Float) when is_float(Float) ->
+ Float + {a,b}.
diff --git a/erts/emulator/test/bif_SUITE.erl b/erts/emulator/test/bif_SUITE.erl
index cfbc5dfe81..b4ef0e6d5a 100644
--- a/erts/emulator/test/bif_SUITE.erl
+++ b/erts/emulator/test/bif_SUITE.erl
@@ -1,19 +1,19 @@
%%
%% %CopyrightBegin%
-%%
-%% Copyright Ericsson AB 2005-2009. All Rights Reserved.
-%%
+%%
+%% Copyright Ericsson AB 2005-2010. All Rights Reserved.
+%%
%% The contents of this file are subject to the Erlang Public License,
%% Version 1.1, (the "License"); you may not use this file except in
%% compliance with the License. You should have received a copy of the
%% Erlang Public License along with this software. If not, it can be
%% retrieved online at http://www.erlang.org/.
-%%
+%%
%% Software distributed under the License is distributed on an "AS IS"
%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
%% the License for the specific language governing rights and limitations
%% under the License.
-%%
+%%
%% %CopyrightEnd%
%%
@@ -22,12 +22,13 @@
-include("test_server.hrl").
-export([all/1,init_per_testcase/2,fin_per_testcase/2,
+ types/1,
t_list_to_existing_atom/1,os_env/1,otp_7526/1,
binary_to_atom/1,binary_to_existing_atom/1,
atom_to_binary/1,min_max/1]).
all(suite) ->
- [t_list_to_existing_atom,os_env,otp_7526,
+ [types,t_list_to_existing_atom,os_env,otp_7526,
atom_to_binary,binary_to_atom,binary_to_existing_atom,
min_max].
@@ -39,6 +40,73 @@ fin_per_testcase(_Func, Config) ->
Dog=?config(watchdog, Config),
?t:timetrap_cancel(Dog).
+types(Config) when is_list(Config) ->
+ c:l(erl_bif_types),
+ case erlang:function_exported(erl_bif_types, module_info, 0) of
+ false ->
+ %% Fail cleanly.
+ ?line ?t:fail("erl_bif_types not compiled");
+ true ->
+ types_1()
+ end.
+
+types_1() ->
+ ?line List0 = erlang:system_info(snifs),
+
+ %% Ignore missing type information for hipe BIFs.
+ ?line List = [MFA || {M,_,_}=MFA <- List0, M =/= hipe_bifs],
+
+ case [MFA || MFA <- List, not known_types(MFA)] of
+ [] ->
+ types_2(List);
+ BadTypes ->
+ io:put_chars("No type information:\n"),
+ io:format("~p\n", [lists:sort(BadTypes)]),
+ ?line ?t:fail({length(BadTypes),bifs_without_types})
+ end.
+
+types_2(List) ->
+ BadArity = [MFA || {M,F,A}=MFA <- List,
+ begin
+ Types = erl_bif_types:arg_types(M, F, A),
+ length(Types) =/= A
+ end],
+ case BadArity of
+ [] ->
+ types_3(List);
+ [_|_] ->
+ io:put_chars("Bifs with bad arity\n"),
+ io:format("~p\n", [BadArity]),
+ ?line ?t:fail({length(BadArity),bad_arity})
+ end.
+
+types_3(List) ->
+ BadSmokeTest = [MFA || {M,F,A}=MFA <- List,
+ begin
+ try erl_bif_types:type(M, F, A) of
+ Type ->
+ %% Test that type is returned.
+ not erl_types:is_erl_type(Type)
+ catch
+ Class:Error ->
+ io:format("~p: ~p ~p\n",
+ [MFA,Class,Error]),
+ true
+ end
+ end],
+ case BadSmokeTest of
+ [] ->
+ ok;
+ [_|_] ->
+ io:put_chars("Bifs with failing calls to erlang_bif_types:type/3 "
+ "(or with bogus return values):\n"),
+ io:format("~p\n", [BadSmokeTest]),
+ ?line ?t:fail({length(BadSmokeTest),bad_smoke_test})
+ end.
+
+known_types({M,F,A}) ->
+ erl_bif_types:is_known(M, F, A).
+
t_list_to_existing_atom(Config) when is_list(Config) ->
?line all = list_to_existing_atom("all"),
?line ?MODULE = list_to_existing_atom(?MODULE_STRING),
@@ -308,6 +376,18 @@ min_max(Config) when is_list(Config) ->
?line 42.0 = erlang:min(42.0, 42),
?line 42.0 = erlang:max(42.0, 42),
+ %% And now (R14) they are also autoimported!
+ ?line a = min(id(a), a),
+ ?line a = min(id(a), b),
+ ?line a = min(id(b), a),
+ ?line b = min(id(b), b),
+ ?line a = max(id(a), a),
+ ?line b = max(id(a), b),
+ ?line b = max(id(b), a),
+ ?line b = max(id(b), b),
+
+ ?line 42.0 = min(42.0, 42),
+ ?line 42.0 = max(42.0, 42),
ok.
diff --git a/erts/emulator/test/binary_SUITE.erl b/erts/emulator/test/binary_SUITE.erl
index db2b3e10db..77d2579848 100644
--- a/erts/emulator/test/binary_SUITE.erl
+++ b/erts/emulator/test/binary_SUITE.erl
@@ -55,7 +55,6 @@
otp_5484/1,otp_5933/1,
ordering/1,unaligned_order/1,gc_test/1,
bit_sized_binary_sizes/1,
- bitlevel_roundtrip/1,
otp_6817/1,deep/1,obsolete_funs/1,robustness/1,otp_8117/1,
otp_8180/1]).
@@ -70,7 +69,7 @@ all(suite) ->
bad_binary_to_term_2,safe_binary_to_term2,
bad_binary_to_term, bad_terms, t_hash, bad_size, bad_term_to_binary,
more_bad_terms, otp_5484, otp_5933, ordering, unaligned_order,
- gc_test, bit_sized_binary_sizes, bitlevel_roundtrip, otp_6817, otp_8117,
+ gc_test, bit_sized_binary_sizes, otp_6817, otp_8117,
deep,obsolete_funs,robustness,otp_8180].
init_per_testcase(Func, Config) when is_atom(Func), is_list(Config) ->
@@ -439,11 +438,11 @@ terms(Config) when is_list(Config) ->
ok
end,
Term = binary_to_term(Bin),
- Term = erlang:binary_to_term(Bin, [safe]),
+ Term = binary_to_term(Bin, [safe]),
Unaligned = make_unaligned_sub_binary(Bin),
Term = binary_to_term(Unaligned),
- Term = erlang:binary_to_term(Unaligned, []),
- Term = erlang:binary_to_term(Bin, [safe]),
+ Term = binary_to_term(Unaligned, []),
+ Term = binary_to_term(Bin, [safe]),
BinC = erlang:term_to_binary(Term, [compressed]),
Term = binary_to_term(BinC),
true = size(BinC) =< size(Bin),
@@ -543,7 +542,7 @@ bad_bin_to_term(BadBin) ->
{'EXIT',{badarg,_}} = (catch binary_to_term(BadBin)).
bad_bin_to_term(BadBin,Opts) ->
- {'EXIT',{badarg,_}} = (catch erlang:binary_to_term(BadBin,Opts)).
+ {'EXIT',{badarg,_}} = (catch binary_to_term(BadBin,Opts)).
safe_binary_to_term2(doc) -> "Test safety options for binary_to_term/2";
safe_binary_to_term2(Config) when is_list(Config) ->
@@ -554,7 +553,7 @@ safe_binary_to_term2(Config) when is_list(Config) ->
BadRef = <<131,114,0,3,BadHostAtom/binary,0,<<0,0,0,255>>/binary,
Empty/binary,Empty/binary>>,
?line bad_bin_to_term(BadRef, [safe]), % good ref, with a bad atom
- ?line fullsweep_after = erlang:binary_to_term(<<131,100,0,15,"fullsweep_after">>, [safe]), % should be a good atom
+ ?line fullsweep_after = binary_to_term(<<131,100,0,15,"fullsweep_after">>, [safe]), % should be a good atom
BadExtFun = <<131,113,100,0,4,98,108,117,101,100,0,4,109,111,111,110,97,3>>,
?line bad_bin_to_term(BadExtFun, [safe]),
ok.
@@ -1150,35 +1149,6 @@ bsbs_1(A) ->
Bin = binary_to_term(<<131,$M,5:32,A,0,0,0,0,0>>),
BinSize = bit_size(Bin).
-bitlevel_roundtrip(Config) when is_list(Config) ->
- case ?t:is_release_available("r11b") of
- true -> bitlevel_roundtrip_1();
- false -> {skip,"No R11B found"}
- end.
-
-bitlevel_roundtrip_1() ->
- Name = bitlevelroundtrip,
- ?line N = list_to_atom(atom_to_list(Name) ++ "@" ++ hostname()),
- ?line ?t:start_node(Name, slave, [{erl,[{release,"r11b"}]}]),
-
- ?line {<<128>>,1} = roundtrip(N, <<1:1>>),
- ?line {<<64>>,2} = roundtrip(N, <<1:2>>),
- ?line {<<16#E0>>,3} = roundtrip(N, <<7:3>>),
- ?line {<<16#70>>,4} = roundtrip(N, <<7:4>>),
- ?line {<<16#10>>,5} = roundtrip(N, <<2:5>>),
- ?line {<<16#8>>,6} = roundtrip(N, <<2:6>>),
- ?line {<<16#2>>,7} = roundtrip(N, <<1:7>>),
- ?line {<<8,128>>,1} = roundtrip(N, <<8,1:1>>),
- ?line {<<42,248>>,5} = roundtrip(N, <<42,31:5>>),
-
- ?line ?t:stop_node(N),
- ok.
-
-roundtrip(Node, Term) ->
- {badrpc,{'EXIT',Res}} = rpc:call(Node, erlang, exit, [Term]),
- io:format("<<~p bits>> => ~w", [bit_size(Term),Res]),
- Res.
-
deep(Config) when is_list(Config) ->
?line deep_roundtrip(lists:foldl(fun(E, A) ->
[E,A]
diff --git a/erts/emulator/test/busy_port_SUITE.erl b/erts/emulator/test/busy_port_SUITE.erl
index 9b16170293..7350aef4ec 100644
--- a/erts/emulator/test/busy_port_SUITE.erl
+++ b/erts/emulator/test/busy_port_SUITE.erl
@@ -1,19 +1,19 @@
%%
%% %CopyrightBegin%
-%%
-%% Copyright Ericsson AB 1997-2009. All Rights Reserved.
-%%
+%%
+%% Copyright Ericsson AB 1997-2010. All Rights Reserved.
+%%
%% The contents of this file are subject to the Erlang Public License,
%% Version 1.1, (the "License"); you may not use this file except in
%% compliance with the License. You should have received a copy of the
%% Erlang Public License along with this software. If not, it can be
%% retrieved online at http://www.erlang.org/.
-%%
+%%
%% Software distributed under the License is distributed on an "AS IS"
%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
%% the License for the specific language governing rights and limitations
%% under the License.
-%%
+%%
%% %CopyrightEnd%
%%
@@ -182,7 +182,7 @@ system_monitor(Config) when is_list(Config) ->
?line Master ! {Owner, {command, "u"}},
?line {Busy,beta} = rec(Void),
?line Void = rec(Void),
- ?line NewMonitor = erlang:system_monitor(OldMonitor),
+ ?line _NewMonitor = erlang:system_monitor(OldMonitor),
?line OldMonitor = erlang:system_monitor(),
?line OldMonitor = erlang:system_monitor(OldMonitor),
%%
@@ -361,7 +361,6 @@ soft_busy_driver(Config) when is_list(Config) ->
hs_test(Config, false).
hs_test(Config, HardBusy) when is_list(Config) ->
- ?line Me = self(),
?line DrvName = case HardBusy of
true -> 'hard_busy_drv';
false -> 'soft_busy_drv'
@@ -479,7 +478,7 @@ hs_busy_pcmd(Prt, Opts, StartFun, EndFun) ->
Tester ! {self(), doing_port_command},
Start = os:timestamp(),
Res = try {return,
- erlang:port_command(Prt, [], Opts)}
+ port_command(Prt, [], Opts)}
catch Exception:Error -> {Exception, Error}
end,
End = os:timestamp(),
diff --git a/erts/emulator/test/decode_packet_SUITE.erl b/erts/emulator/test/decode_packet_SUITE.erl
index 13f17e972c..d9e961be2f 100644
--- a/erts/emulator/test/decode_packet_SUITE.erl
+++ b/erts/emulator/test/decode_packet_SUITE.erl
@@ -1,19 +1,19 @@
%%
%% %CopyrightBegin%
-%%
-%% Copyright Ericsson AB 2008-2009. All Rights Reserved.
-%%
+%%
+%% Copyright Ericsson AB 2008-2010. All Rights Reserved.
+%%
%% The contents of this file are subject to the Erlang Public License,
%% Version 1.1, (the "License"); you may not use this file except in
%% compliance with the License. You should have received a copy of the
%% Erlang Public License along with this software. If not, it can be
%% retrieved online at http://www.erlang.org/.
-%%
+%%
%% Software distributed under the License is distributed on an "AS IS"
%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
%% the License for the specific language governing rights and limitations
%% under the License.
-%%
+%%
%% %CopyrightEnd%
%%
@@ -24,10 +24,10 @@
-include("test_server.hrl").
-export([all/1,init_per_testcase/2,fin_per_testcase/2,
- basic/1, packet_size/1, neg/1, http/1, line/1, ssl/1]).
+ basic/1, packet_size/1, neg/1, http/1, line/1, ssl/1, otp_8536/1]).
all(suite) ->
- [basic, packet_size, neg, http, line, ssl].
+ [basic, packet_size, neg, http, line, ssl, otp_8536].
init_per_testcase(Func, Config) when is_atom(Func), is_list(Config) ->
Seed = {S1,S2,S3} = now(),
@@ -304,6 +304,10 @@ http(Config) when is_list(Config) ->
{ok, {http_request, 'GET', ResB, {1,1}}, Rest} = decode_pkt(http_bin,Bin)
end,
lists:foreach(UriF, http_uri_variants()),
+
+ %% Response with empty phrase
+ ?line {ok,{http_response,{1,1},200,[]},<<>>} = decode_pkt(http, <<"HTTP/1.1 200\r\n">>, []),
+ ?line {ok,{http_response,{1,1},200,<<>>},<<>>} = decode_pkt(http_bin, <<"HTTP/1.1 200\r\n">>, []),
ok.
http_with_bin(http) ->
@@ -504,6 +508,27 @@ ssl(Config) when is_list(Config) ->
F(v2hello),
ok.
+otp_8536(doc) -> ["Corrupt sub-binary-strings from httph_bin"];
+otp_8536(Config) when is_list(Config) ->
+ lists:foreach(fun otp_8536_do/1, lists:seq(1,50)),
+ ok.
+
+otp_8536_do(N) ->
+ Data = <<"some data 123">>,
+ Letters = <<"bcdefghijklmnopqrstuvwxyzyxwvutsrqponmlkjihgfedcba">>,
+ <<HdrTail:N/binary,_/binary>> = Letters,
+ Hdr = <<$A, HdrTail/binary>>,
+ Bin = <<Hdr/binary, ": ", Data/binary, "\r\n\r\n">>,
+
+ io:format("Bin='~p'\n",[Bin]),
+ ?line {ok,{http_header,0,Hdr2,undefined,Data2},<<"\r\n">>} = decode_pkt(httph_bin, Bin, []),
+
+ %% Do something to trash the C-stack, how about another decode_packet:
+ decode_pkt(httph_bin,<<Letters/binary, ": ", Data/binary, "\r\n\r\n">>, []),
+
+ %% Now check that we got the expected binaries
+ {Hdr, Data} = {Hdr2, Data2}.
+
decode_pkt(Type,Bin) ->
decode_pkt(Type,Bin,[]).
decode_pkt(Type,Bin,Opts) ->
diff --git a/erts/emulator/test/distribution_SUITE.erl b/erts/emulator/test/distribution_SUITE.erl
index 8f48d8a992..7c19274696 100644
--- a/erts/emulator/test/distribution_SUITE.erl
+++ b/erts/emulator/test/distribution_SUITE.erl
@@ -1,23 +1,24 @@
%%
%% %CopyrightBegin%
-%%
-%% Copyright Ericsson AB 1997-2009. All Rights Reserved.
-%%
+%%
+%% Copyright Ericsson AB 1997-2010. All Rights Reserved.
+%%
%% The contents of this file are subject to the Erlang Public License,
%% Version 1.1, (the "License"); you may not use this file except in
%% compliance with the License. You should have received a copy of the
%% Erlang Public License along with this software. If not, it can be
%% retrieved online at http://www.erlang.org/.
-%%
+%%
%% Software distributed under the License is distributed on an "AS IS"
%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
%% the License for the specific language governing rights and limitations
%% under the License.
-%%
+%%
%% %CopyrightEnd%
%%
-module(distribution_SUITE).
+-compile(r12).
%% Tests distribution and the tcp driver.
diff --git a/erts/emulator/test/driver_SUITE_data/chkio_drv.c b/erts/emulator/test/driver_SUITE_data/chkio_drv.c
index 9e1e5e72c2..b571cb30e6 100644
--- a/erts/emulator/test/driver_SUITE_data/chkio_drv.c
+++ b/erts/emulator/test/driver_SUITE_data/chkio_drv.c
@@ -17,7 +17,7 @@
*/
#ifndef UNIX
-#if !defined(__WIN32__) && !defined(_OSE_) && !defined(VXWORKS)
+#if !defined(__WIN32__) && !defined(VXWORKS)
#define UNIX 1
#endif
#endif
diff --git a/erts/emulator/test/driver_SUITE_data/io_ready_exit_drv.c b/erts/emulator/test/driver_SUITE_data/io_ready_exit_drv.c
index 25d4b17001..6afa46b3a2 100644
--- a/erts/emulator/test/driver_SUITE_data/io_ready_exit_drv.c
+++ b/erts/emulator/test/driver_SUITE_data/io_ready_exit_drv.c
@@ -17,7 +17,7 @@
*/
#ifndef UNIX
-#if !defined(__WIN32__) && !defined(_OSE_) && !defined(VXWORKS)
+#if !defined(__WIN32__) && !defined(VXWORKS)
#define UNIX 1
#endif
#endif
diff --git a/erts/emulator/test/driver_SUITE_data/ioq_exit_drv.c b/erts/emulator/test/driver_SUITE_data/ioq_exit_drv.c
index c7a42aa687..e49de388b4 100644
--- a/erts/emulator/test/driver_SUITE_data/ioq_exit_drv.c
+++ b/erts/emulator/test/driver_SUITE_data/ioq_exit_drv.c
@@ -29,7 +29,7 @@
*/
#ifndef UNIX
-#if !defined(__WIN32__) && !defined(_OSE_) && !defined(VXWORKS)
+#if !defined(__WIN32__) && !defined(VXWORKS)
#define UNIX 1
#endif
#endif
diff --git a/erts/emulator/test/driver_SUITE_data/missing_callback_drv.c b/erts/emulator/test/driver_SUITE_data/missing_callback_drv.c
index c80e492e3f..e7d9a294fa 100644
--- a/erts/emulator/test/driver_SUITE_data/missing_callback_drv.c
+++ b/erts/emulator/test/driver_SUITE_data/missing_callback_drv.c
@@ -17,7 +17,7 @@
*/
#ifndef UNIX
-#if !defined(__WIN32__) && !defined(_OSE_) && !defined(VXWORKS)
+#if !defined(__WIN32__) && !defined(VXWORKS)
#define UNIX 1
#endif
#endif
diff --git a/erts/emulator/test/driver_SUITE_data/peek_non_existing_queue_drv.c b/erts/emulator/test/driver_SUITE_data/peek_non_existing_queue_drv.c
index f429a5b51e..3a5b5af13a 100644
--- a/erts/emulator/test/driver_SUITE_data/peek_non_existing_queue_drv.c
+++ b/erts/emulator/test/driver_SUITE_data/peek_non_existing_queue_drv.c
@@ -28,7 +28,7 @@
*/
#ifndef UNIX
-#if !defined(__WIN32__) && !defined(_OSE_) && !defined(VXWORKS)
+#if !defined(__WIN32__) && !defined(VXWORKS)
#define UNIX 1
#endif
#endif
diff --git a/erts/emulator/test/float_SUITE.erl b/erts/emulator/test/float_SUITE.erl
index 102e472ea6..99e9457985 100644
--- a/erts/emulator/test/float_SUITE.erl
+++ b/erts/emulator/test/float_SUITE.erl
@@ -22,7 +22,10 @@
-include("test_server.hrl").
-export([all/1,init_per_testcase/2,fin_per_testcase/2,
- fpe/1,fp_drv/1,fp_drv_thread/1,denormalized/1,match/1,bad_float_unpack/1]).
+ fpe/1,fp_drv/1,fp_drv_thread/1,denormalized/1,match/1,
+ bad_float_unpack/1]).
+-export([otp_7178/1]).
+
init_per_testcase(Func, Config) when is_atom(Func), is_list(Config) ->
Dog = ?t:timetrap(?t:minutes(3)),
@@ -33,7 +36,29 @@ fin_per_testcase(_Func, Config) ->
?t:timetrap_cancel(Dog).
all(suite) ->
- [fpe,fp_drv,fp_drv_thread,denormalized,match,bad_float_unpack].
+ [fpe,
+ fp_drv,
+ fp_drv_thread,
+ otp_7178,
+ denormalized,
+ match,
+ bad_float_unpack].
+
+%%
+%% OTP-7178, list_to_float on very small numbers should give 0.0
+%% instead of exception, i.e. ignore underflow.
+%%
+otp_7178(suite) ->
+ [];
+otp_7178(doc) ->
+ ["test that list_to_float on very small numbers give 0.0"];
+otp_7178(Config) when is_list(Config) ->
+ ?line X = list_to_float("1.0e-325"),
+ ?line true = (X < 0.00000001) and (X > -0.00000001),
+ ?line Y = list_to_float("1.0e-325325325"),
+ ?line true = (Y < 0.00000001) and (Y > -0.00000001),
+ ?line {'EXIT', {badarg,_}} = (catch list_to_float("1.0e83291083210")),
+ ok.
%% Forces floating point exceptions and tests that subsequent, legal,
%% operations are calculated correctly. Original version by Sebastian
diff --git a/erts/emulator/test/fun_r11_SUITE.erl b/erts/emulator/test/fun_r12_SUITE.erl
index 61ba816cc8..9262731dcb 100644
--- a/erts/emulator/test/fun_r11_SUITE.erl
+++ b/erts/emulator/test/fun_r12_SUITE.erl
@@ -1,24 +1,24 @@
%%
%% %CopyrightBegin%
-%%
-%% Copyright Ericsson AB 2007-2009. All Rights Reserved.
-%%
+%%
+%% Copyright Ericsson AB 2007-2010. All Rights Reserved.
+%%
%% The contents of this file are subject to the Erlang Public License,
%% Version 1.1, (the "License"); you may not use this file except in
%% compliance with the License. You should have received a copy of the
%% Erlang Public License along with this software. If not, it can be
%% retrieved online at http://www.erlang.org/.
-%%
+%%
%% Software distributed under the License is distributed on an "AS IS"
%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
%% the License for the specific language governing rights and limitations
%% under the License.
-%%
+%%
%% %CopyrightEnd%
%%
--module(fun_r11_SUITE).
--compile(r11).
+-module(fun_r12_SUITE).
+-compile(r12).
-export([all/1,init_per_testcase/2,fin_per_testcase/2,dist_old_release/1]).
@@ -37,36 +37,36 @@ fin_per_testcase(_Case, Config) ->
ok.
dist_old_release(Config) when is_list(Config) ->
- case ?t:is_release_available("r11b") of
+ case ?t:is_release_available("r12b") of
true -> do_dist_old(Config);
- false -> {skip,"No R11B found"}
+ false -> {skip,"No R12B found"}
end.
do_dist_old(Config) when is_list(Config) ->
?line Pa = filename:dirname(code:which(?MODULE)),
- Name = fun_dist_r11,
+ Name = fun_dist_r12,
?line {ok,Node} = ?t:start_node(Name, peer,
[{args,"-pa "++Pa},
- {erl,[{release,"r11b"}]}]),
+ {erl,[{release,"r12b"}]}]),
?line Pid = spawn_link(Node,
fun() ->
receive
Fun when is_function(Fun) ->
- R11BFun = fun(H) -> cons(H, [b,c]) end,
- Fun(Fun, R11BFun)
+ R12BFun = fun(H) -> cons(H, [b,c]) end,
+ Fun(Fun, R12BFun)
end
end),
Self = self(),
- Fun = fun(F, R11BFun) ->
+ Fun = fun(F, R12BFun) ->
{pid,Self} = erlang:fun_info(F, pid),
{module,?MODULE} = erlang:fun_info(F, module),
- Self ! {ok,F,R11BFun}
+ Self ! {ok,F,R12BFun}
end,
?line Pid ! Fun,
?line receive
- {ok,Fun,R11BFun} ->
- ?line [a,b,c] = R11BFun(a);
+ {ok,Fun,R12BFun} ->
+ ?line [a,b,c] = R12BFun(a);
Other ->
?line ?t:fail({bad_message,Other})
end,
diff --git a/erts/emulator/test/guard_SUITE.erl b/erts/emulator/test/guard_SUITE.erl
index 23482a20d7..8fef36dfaf 100644
--- a/erts/emulator/test/guard_SUITE.erl
+++ b/erts/emulator/test/guard_SUITE.erl
@@ -1,33 +1,34 @@
%%
%% %CopyrightBegin%
-%%
-%% Copyright Ericsson AB 1997-2009. All Rights Reserved.
-%%
+%%
+%% Copyright Ericsson AB 1997-2010. All Rights Reserved.
+%%
%% The contents of this file are subject to the Erlang Public License,
%% Version 1.1, (the "License"); you may not use this file except in
%% compliance with the License. You should have received a copy of the
%% Erlang Public License along with this software. If not, it can be
%% retrieved online at http://www.erlang.org/.
-%%
+%%
%% Software distributed under the License is distributed on an "AS IS"
%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
%% the License for the specific language governing rights and limitations
%% under the License.
-%%
+%%
%% %CopyrightEnd%
%%
-module(guard_SUITE).
-export([all/1, bad_arith/1, bad_tuple/1, test_heap_guards/1, guard_bifs/1,
- type_tests/1]).
+ type_tests/1,guard_bif_binary_part/1]).
-include("test_server.hrl").
-export([init/3]).
-import(lists, [member/2]).
-all(suite) -> [bad_arith, bad_tuple, test_heap_guards, guard_bifs, type_tests].
+all(suite) -> [bad_arith, bad_tuple, test_heap_guards, guard_bifs,
+ type_tests, guard_bif_binary_part].
bad_arith(doc) -> "Test that a bad arithmetic operation in a guard works correctly.";
bad_arith(Config) when is_list(Config) ->
@@ -136,6 +137,170 @@ init(Fun, Args, Filler) ->
dummy(_) ->
ok.
+-define(MASK_ERROR(EXPR),mask_error((catch (EXPR)))).
+mask_error({'EXIT',{Err,_}}) ->
+ Err;
+mask_error(Else) ->
+ Else.
+
+guard_bif_binary_part(doc) ->
+ ["Test the binary_part/2,3 guard BIF's extensively"];
+guard_bif_binary_part(Config) when is_list(Config) ->
+ %% Overflow tests that need to be unoptimized
+ ?line badarg =
+ ?MASK_ERROR(
+ binary_part(<<1,2,3>>,{16#FFFFFFFFFFFFFFFF,
+ -16#7FFFFFFFFFFFFFFF-1})),
+ ?line badarg =
+ ?MASK_ERROR(
+ binary_part(<<1,2,3>>,{16#FFFFFFFFFFFFFFFF,
+ 16#7FFFFFFFFFFFFFFF})),
+ F = fun(X) ->
+ Master = self(),
+ {Pid,Ref} = spawn_monitor( fun() ->
+ A = lists:duplicate(X,a),
+ B = [do_binary_part_guard() | A],
+ Master ! {self(),hd(B)},
+ ok
+ end),
+ receive
+ {Pid,ok} ->
+ erlang:demonitor(Ref,[flush]),
+ ok;
+ Error ->
+ Error
+ end
+ end,
+ [ ok = F(N) || N <- lists:seq(1,10000) ],
+ ok.
+
+
+do_binary_part_guard() ->
+ ?line 1 = bptest(<<1,2,3>>),
+ ?line 2 = bptest(<<2,1,3>>),
+ ?line error = bptest(<<1>>),
+ ?line error = bptest(<<>>),
+ ?line error = bptest(apa),
+ ?line 3 = bptest(<<2,3,3>>),
+ % With one variable (pos)
+ ?line 1 = bptest(<<1,2,3>>,1),
+ ?line 2 = bptest(<<2,1,3>>,1),
+ ?line error = bptest(<<1>>,1),
+ ?line error = bptest(<<>>,1),
+ ?line error = bptest(apa,1),
+ ?line 3 = bptest(<<2,3,3>>,1),
+ % With one variable (length)
+ ?line 1 = bptesty(<<1,2,3>>,1),
+ ?line 2 = bptesty(<<2,1,3>>,1),
+ ?line error = bptesty(<<1>>,1),
+ ?line error = bptesty(<<>>,1),
+ ?line error = bptesty(apa,1),
+ ?line 3 = bptesty(<<2,3,3>>,2),
+ % With one variable (whole tuple)
+ ?line 1 = bptestx(<<1,2,3>>,{1,1}),
+ ?line 2 = bptestx(<<2,1,3>>,{1,1}),
+ ?line error = bptestx(<<1>>,{1,1}),
+ ?line error = bptestx(<<>>,{1,1}),
+ ?line error = bptestx(apa,{1,1}),
+ ?line 3 = bptestx(<<2,3,3>>,{1,2}),
+ % With two variables
+ ?line 1 = bptest(<<1,2,3>>,1,1),
+ ?line 2 = bptest(<<2,1,3>>,1,1),
+ ?line error = bptest(<<1>>,1,1),
+ ?line error = bptest(<<>>,1,1),
+ ?line error = bptest(apa,1,1),
+ ?line 3 = bptest(<<2,3,3>>,1,2),
+ % Direct (autoimported) call, these will be evaluated by the compiler...
+ ?line <<2>> = binary_part(<<1,2,3>>,1,1),
+ ?line <<1>> = binary_part(<<2,1,3>>,1,1),
+ % Compiler warnings due to constant evaluation expected (3)
+ ?line badarg = ?MASK_ERROR(binary_part(<<1>>,1,1)),
+ ?line badarg = ?MASK_ERROR(binary_part(<<>>,1,1)),
+ ?line badarg = ?MASK_ERROR(binary_part(apa,1,1)),
+ ?line <<3,3>> = binary_part(<<2,3,3>>,1,2),
+ % Direct call through apply
+ ?line <<2>> = apply(erlang,binary_part,[<<1,2,3>>,1,1]),
+ ?line <<1>> = apply(erlang,binary_part,[<<2,1,3>>,1,1]),
+ % Compiler warnings due to constant evaluation expected (3)
+ ?line badarg = ?MASK_ERROR(apply(erlang,binary_part,[<<1>>,1,1])),
+ ?line badarg = ?MASK_ERROR(apply(erlang,binary_part,[<<>>,1,1])),
+ ?line badarg = ?MASK_ERROR(apply(erlang,binary_part,[apa,1,1])),
+ ?line <<3,3>> = apply(erlang,binary_part,[<<2,3,3>>,1,2]),
+ % Constant propagation
+ ?line Bin = <<1,2,3>>,
+ ?line ok = if
+ binary_part(Bin,1,1) =:= <<2>> ->
+ ok;
+ %% Compiler warning, clause cannot match (expected)
+ true ->
+ error
+ end,
+ ?line ok = if
+ binary_part(Bin,{1,1}) =:= <<2>> ->
+ ok;
+ %% Compiler warning, clause cannot match (expected)
+ true ->
+ error
+ end,
+ ok.
+
+
+bptest(B) when length(B) =:= 1337 ->
+ 1;
+bptest(B) when binary_part(B,{1,1}) =:= <<2>> ->
+ 1;
+bptest(B) when erlang:binary_part(B,1,1) =:= <<1>> ->
+ 2;
+bptest(B) when erlang:binary_part(B,{1,2}) =:= <<3,3>> ->
+ 3;
+bptest(_) ->
+ error.
+
+bptest(B,A) when length(B) =:= A ->
+ 1;
+bptest(B,A) when binary_part(B,{A,1}) =:= <<2>> ->
+ 1;
+bptest(B,A) when erlang:binary_part(B,A,1) =:= <<1>> ->
+ 2;
+bptest(B,A) when erlang:binary_part(B,{A,2}) =:= <<3,3>> ->
+ 3;
+bptest(_,_) ->
+ error.
+
+bptestx(B,A) when length(B) =:= A ->
+ 1;
+bptestx(B,A) when binary_part(B,A) =:= <<2>> ->
+ 1;
+bptestx(B,A) when erlang:binary_part(B,A) =:= <<1>> ->
+ 2;
+bptestx(B,A) when erlang:binary_part(B,A) =:= <<3,3>> ->
+ 3;
+bptestx(_,_) ->
+ error.
+
+bptesty(B,A) when length(B) =:= A ->
+ 1;
+bptesty(B,A) when binary_part(B,{1,A}) =:= <<2>> ->
+ 1;
+bptesty(B,A) when erlang:binary_part(B,1,A) =:= <<1>> ->
+ 2;
+bptesty(B,A) when erlang:binary_part(B,{1,A}) =:= <<3,3>> ->
+ 3;
+bptesty(_,_) ->
+ error.
+
+bptest(B,A,_C) when length(B) =:= A ->
+ 1;
+bptest(B,A,C) when binary_part(B,{A,C}) =:= <<2>> ->
+ 1;
+bptest(B,A,C) when erlang:binary_part(B,A,C) =:= <<1>> ->
+ 2;
+bptest(B,A,C) when erlang:binary_part(B,{A,C}) =:= <<3,3>> ->
+ 3;
+bptest(_,_,_) ->
+ error.
+
+
guard_bifs(doc) -> "Test all guard bifs with nasty (but legal arguments).";
guard_bifs(Config) when is_list(Config) ->
?line Big = -237849247829874297658726487367328971246284736473821617265433,
diff --git a/erts/emulator/test/hash_SUITE.erl b/erts/emulator/test/hash_SUITE.erl
index 85bdb8bff8..f5d1871bfb 100644
--- a/erts/emulator/test/hash_SUITE.erl
+++ b/erts/emulator/test/hash_SUITE.erl
@@ -480,14 +480,14 @@ otp_5292_test() ->
S2 = md5([md5(hash_int(S, E, PH)) || {Start, N, Sz} <- d(),
{S, E} <- int(Start, N, Sz)]),
?line Comment = case S1 of
- <<43,186,76,102,87,4,110,245,203,177,206,6,130,69,43,99>> ->
+ <<4,248,208,156,200,131,7,1,173,13,239,173,112,81,16,174>> ->
?line big = erlang:system_info(endian),
"Big endian machine";
- <<21,206,139,15,149,28,167,81,98,225,132,254,49,125,174,195>> ->
+ <<180,28,33,231,239,184,71,125,76,47,227,241,78,184,176,233>> ->
?line little = erlang:system_info(endian),
"Little endian machine"
end,
- ?line <<140,37,79,80,26,242,130,22,20,229,123,240,223,244,43,99>> = S2,
+ ?line <<124,81,198,121,174,233,19,137,10,83,33,80,226,111,238,99>> = S2,
?line 2 = erlang:hash(1, (1 bsl 27) -1),
?line {'EXIT', _} = (catch erlang:hash(1, (1 bsl 27))),
{comment, Comment}.
@@ -507,7 +507,7 @@ hash_int(Start, End, F) ->
{Start, End, md5(HL)}.
md5(T) ->
- erlang:md5(term_to_binary(T)).
+ erlang:md5(term_to_binary(T)).
bit_level_binaries() ->
?line [3511317,7022633,14044578,28087749,56173436,112344123,90467083|_] =
diff --git a/erts/emulator/test/nif_SUITE.erl b/erts/emulator/test/nif_SUITE.erl
index 522caec8f1..f45cfa3e4a 100644
--- a/erts/emulator/test/nif_SUITE.erl
+++ b/erts/emulator/test/nif_SUITE.erl
@@ -20,23 +20,39 @@
-module(nif_SUITE).
%%-define(line_trace,true).
-%%-define(CHECK(Exp,Got), ?line check(Exp,Got,?LINE)).
--define(CHECK(Exp,Got), ?line Exp = Got).
+-define(CHECK(Exp,Got), check(Exp,Got,?LINE)).
+%%-define(CHECK(Exp,Got), ?line Exp = Got).
-include("test_server.hrl").
--export([all/1, fin_per_testcase/2, basic/1, reload/1, upgrade/1, heap_frag/1,
+-export([all/1,
+ %%init_per_testcase/2,
+ fin_per_testcase/2, basic/1, reload/1, upgrade/1, heap_frag/1,
types/1, many_args/1, binaries/1, get_string/1, get_atom/1, api_macros/1,
- from_array/1, iolist_as_binary/1, resource/1, resource_takeover/1,
- threading/1, neg/1]).
+ from_array/1, iolist_as_binary/1, resource/1, resource_binary/1, resource_takeover/1,
+ threading/1, send/1, send2/1, send3/1, send_threaded/1, neg/1, is_checks/1,
+ get_length/1, make_atom/1, make_string/1]).
-export([many_args_100/100]).
+
+
+%% -export([lib_version/0,call_history/0,hold_nif_mod_priv_data/1,nif_mod_call_history/0,
+%% list_seq/1,type_test/0,tuple_2_list/1,is_identical/2,compare/2,
+%% clone_bin/1,make_sub_bin/3,string_to_bin/2,atom_to_bin/2,macros/1,
+%% tuple_2_list_and_tuple/1,iolist_2_bin/1,get_resource_type/1,alloc_resource/2,
+%% make_resource/1,get_resource/2,release_resource/1,last_resource_dtor_call/0,
+%% make_new_resource/2,make_new_resource_binary/1,send_list_seq/2,send_new_blob/2,
+%% alloc_msgenv/0,clear_msgenv/1,grow_blob/2,send_blob/2,send_blob_thread/3,
+%% join_send_thread/1]).
+
+
-define(nif_stub,nif_stub_error(?LINE)).
all(suite) ->
[basic, reload, upgrade, heap_frag, types, many_args, binaries, get_string,
- get_atom, api_macros, from_array, iolist_as_binary, resource,
- resource_takeover, threading, neg].
+ get_atom, api_macros, from_array, iolist_as_binary, resource, resource_binary,
+ resource_takeover, threading, send, send2, send3, send_threaded, neg, is_checks,
+ get_length, make_atom, make_string].
%%init_per_testcase(_Case, Config) ->
%% ?line Dog = ?t:timetrap(?t:seconds(60*60*24)),
@@ -473,12 +489,51 @@ resource_new_do2(Type) ->
{{PtrA,BinA}, {ResB,PtrB,BinB}}.
resource_neg(TypeA) ->
+ resource_neg_do(TypeA),
+
+ catch exit(42), % dummy exception to purge saved stacktraces from earlier exception
+ erlang:garbage_collect(),
+ ?line {_,_,2} = last_resource_dtor_call(),
+ ok.
+
+resource_neg_do(TypeA) ->
TypeB = get_resource_type(1),
- Aptr = alloc_resource(TypeA, <<"Arnold">>),
- Bptr = alloc_resource(TypeB, <<"Bobo">>),
- ?line {'EXIT',{badarg,_}} = (catch get_resource(TypeA, Bptr)),
- ?line {'EXIT',{badarg,_}} = (catch get_resource(TypeB, Aptr)),
+ ResA = make_new_resource(TypeA, <<"Arnold">>),
+ ResB= make_new_resource(TypeB, <<"Bobo">>),
+ ?line {'EXIT',{badarg,_}} = (catch get_resource(TypeA, ResB)),
+ ?line {'EXIT',{badarg,_}} = (catch get_resource(TypeB, ResA)),
ok.
+
+resource_binary(doc) -> ["Test enif_make_resource_binary"];
+resource_binary(suite) -> [];
+resource_binary(Config) when is_list(Config) ->
+ ?line ensure_lib_loaded(Config, 1),
+ ?line {Ptr,Bin} = resource_binary_do(),
+ erlang:garbage_collect(),
+ Last = last_resource_dtor_call(),
+ ?CHECK({Ptr,Bin,1}, Last),
+ ok.
+
+resource_binary_do() ->
+ Bin = <<"Hej Hopp i lingonskogen">>,
+ ?line {Ptr,ResBin1} = make_new_resource_binary(Bin),
+ ?line ResBin1 = Bin,
+ ?line ResInfo = {Ptr,_} = get_resource(binary_resource_type,ResBin1),
+
+ Papa = self(),
+ Forwarder = spawn_link(fun() -> forwarder(Papa) end),
+ io:format("sending to forwarder pid=~p\n",[Forwarder]),
+ Forwarder ! ResBin1,
+ ResBin2 = receive_any(),
+ ?line ResBin2 = ResBin1,
+ ?line ResInfo = get_resource(binary_resource_type,ResBin2),
+ Forwarder ! terminate,
+ ?line {Forwarder, 1} = receive_any(),
+ erlang:garbage_collect(),
+ ?line ResInfo = get_resource(binary_resource_type,ResBin1),
+ ?line ResInfo = get_resource(binary_resource_type,ResBin2),
+ ResInfo.
+
-define(RT_CREATE,1).
-define(RT_TAKEOVER,2).
@@ -743,7 +798,282 @@ threading(Config) when is_list(Config) ->
?line ok = tester:load_nif_lib(Config, "tsd"),
?line ok = tester:run().
+
+send(doc) -> ["Test NIF message sending"];
+send(Config) when is_list(Config) ->
+ ensure_lib_loaded(Config),
+
+ N = 1500,
+ List = lists:seq(1,N),
+ ?line {ok,1} = send_list_seq(N, self),
+ ?line {ok,1} = send_list_seq(N, self()),
+ ?line List = receive_any(),
+ ?line List = receive_any(),
+ Papa = self(),
+ spawn_link(fun() -> ?line {ok,1} = send_list_seq(N, Papa) end),
+ ?line List = receive_any(),
+
+ ?line {ok, 1, BlobS} = send_new_blob(self(), other_term()),
+ ?line BlobR = receive_any(),
+ io:format("Sent ~p\nGot ~p\n", [BlobS, BlobR]),
+ ?line BlobR = BlobS,
+
+ %% send to dead pid
+ {DeadPid, DeadMon} = spawn_monitor(fun() -> void end),
+ ?line {'DOWN', DeadMon, process, DeadPid, normal} = receive_any(),
+ {ok,0} = send_list_seq(7, DeadPid),
+ ok.
+
+send2(doc) -> ["More NIF message sending"];
+send2(Config) when is_list(Config) ->
+ ensure_lib_loaded(Config),
+
+ send2_do1(fun send_blob_dbg/2),
+ ok.
+
+send_threaded(doc) -> ["Send msg from user thread"];
+send_threaded(Config) when is_list(Config) ->
+ case erlang:system_info(smp_support) of
+ true ->
+ send2_do1(fun(ME,To) -> send_blob_thread_dbg(ME,To,join) end),
+ send2_do1(fun(ME,To) -> send_blob_thread_and_join(ME,To) end),
+ ok;
+ false ->
+ {skipped,"No threaded send on non-SMP"}
+ end.
+
+
+send2_do1(SendBlobF) ->
+ io:format("sending to self=~p\n",[self()]),
+ send2_do2(SendBlobF, self()),
+
+ Papa = self(),
+ Forwarder = spawn_link(fun() -> forwarder(Papa) end),
+ io:format("sending to forwarder pid=~p\n",[Forwarder]),
+ send2_do2(SendBlobF, Forwarder),
+ Forwarder ! terminate,
+ ?line {Forwarder, 4} = receive_any(),
+ ok.
+
+send2_do2(SendBlobF, To) ->
+ MsgEnv = alloc_msgenv(),
+ repeat(50, fun(_) -> grow_blob(MsgEnv,other_term()) end, []),
+ ?line {ok,1,Blob0} = SendBlobF(MsgEnv, To),
+ ?line Blob1 = receive_any(),
+ ?line Blob1 = Blob0,
+ clear_msgenv(MsgEnv),
+ repeat(50, fun(_) -> grow_blob(MsgEnv,other_term()) end, []),
+ ?line {ok,1,Blob2} = SendBlobF(MsgEnv, To),
+ ?line Blob3 = receive_any(),
+ ?line Blob3 = Blob2,
+
+ clear_msgenv(MsgEnv),
+ repeat(50, fun(_) -> grow_blob(MsgEnv,other_term()) end, []),
+
+ clear_msgenv(MsgEnv),
+ repeat(50, fun(_) -> grow_blob(MsgEnv,other_term()) end, []),
+ ?line {ok,1,Blob4} = SendBlobF(MsgEnv, To),
+ ?line Blob5 = receive_any(),
+ ?line Blob5 = Blob4,
+
+ clear_msgenv(MsgEnv),
+ clear_msgenv(MsgEnv),
+ repeat(50, fun(_) -> grow_blob(MsgEnv,other_term()) end, []),
+ ?line {ok,1,Blob6} = SendBlobF(MsgEnv, To),
+ ?line Blob7 = receive_any(),
+ ?line Blob7 = Blob6,
+
+ ok.
+
+
+send_blob_thread_and_join(MsgEnv, To) ->
+ ?line {ok,Blob} = send_blob_thread_dbg(MsgEnv, To, no_join),
+ ?line {ok,SendRes} = join_send_thread(MsgEnv),
+ {ok,SendRes,Blob}.
+
+send_blob_dbg(MsgEnv, To) ->
+ Ret = send_blob(MsgEnv, To),
+ %%io:format("send_blob to ~p returned ~p\n",[To,Ret]),
+ Ret.
+
+send_blob_thread_dbg(MsgEnv, To, Join) ->
+ Ret = send_blob_thread(MsgEnv, To, Join),
+ %%io:format("send_blob_thread to ~p Join=~p returned ~p\n",[To,Join,Ret]),
+ Ret.
+
+
+forwarder(To) ->
+ forwarder(To, 0).
+forwarder(To, N) ->
+ case receive_any() of
+ terminate ->
+ To ! {self(), N};
+ Msg ->
+ To ! Msg,
+ forwarder(To, N+1)
+ end.
+
+other_term() ->
+ {fun(X,Y) -> X*Y end, make_ref()}.
+
+send3(doc) -> ["Message sending stress test"];
+send3(Config) when is_list(Config) ->
+ %% Let a number of processes send random message blobs between each other
+ %% using enif_send. Kill and spawn new ones randomly to keep a ~constant
+ %% number of workers running.
+ Seed = now(),
+ io:format("seed: ~p\n",[Seed]),
+ random:seed(Seed),
+ ets:new(nif_SUITE,[named_table,public]),
+ ?line true = ets:insert(nif_SUITE,{send3,0,0,0,0}),
+ timer:send_after(10000, timeout), % Run for 10 seconds
+ SpawnCnt = send3_controller(0, [], [], 20),
+ ?line [{_,Rcv,SndOk,SndFail,Balance}] = ets:lookup(nif_SUITE,send3),
+ io:format("spawns=~p received=~p, sent=~p send-failure=~p balance=~p\n",
+ [SpawnCnt,Rcv,SndOk,SndFail,Balance]),
+ ets:delete(nif_SUITE).
+
+send3_controller(SpawnCnt, [], _, infinity) ->
+ SpawnCnt;
+send3_controller(SpawnCnt0, Mons0, Pids0, Tick) ->
+ receive
+ timeout ->
+ io:format("Timeout. Sending 'halt' to ~p\n",[Pids0]),
+ lists:foreach(fun(P) -> P ! {halt,self()} end, Pids0),
+ lists:foreach(fun(P) -> receive {halted,P} -> ok end end, Pids0),
+ QTot = lists:foldl(fun(P,QSum) ->
+ {message_queue_len,QLen} =
+ erlang:process_info(P,message_queue_len),
+ QSum + QLen
+ end, 0, Pids0),
+ io:format("Total queue length ~p\n",[QTot]),
+ lists:foreach(fun(P) -> P ! die end, Pids0),
+ send3_controller(SpawnCnt0, Mons0, [], infinity);
+ {'DOWN', MonRef, process, _Pid, _} ->
+ Mons1 = lists:delete(MonRef, Mons0),
+ %%io:format("Got DOWN from ~p. Monitors left: ~p\n",[Pid,Mons1]),
+ send3_controller(SpawnCnt0, Mons1, Pids0, Tick)
+ after Tick ->
+ Max = 20,
+ N = length(Pids0),
+ PidN = random:uniform(Max),
+ %%io:format("N=~p PidN=~p Pids0=~p\n", [N,PidN,Pids0]),
+ case PidN > N of
+ true ->
+ {NewPid,Mon} = spawn_opt(fun send3_proc/0, [link,monitor]),
+ lists:foreach(fun(P) -> P ! {is_born,NewPid} end, Pids0),
+ ?line Balance = ets:lookup_element(nif_SUITE,send3,5),
+ Inject = (Balance =< 0),
+ case Inject of
+ true -> ok;
+ false -> ets:update_element(nif_SUITE,send3,{5,-1})
+ end,
+ NewPid ! {pids,Pids0,Inject},
+ send3_controller(SpawnCnt0+1, [Mon|Mons0], [NewPid|Pids0], Tick);
+ false ->
+ KillPid = lists:nth(PidN,Pids0),
+ KillPid ! die,
+ Pids1 = lists:delete(KillPid, Pids0),
+ lists:foreach(fun(P) -> P ! {is_dead,KillPid} end, Pids1),
+ send3_controller(SpawnCnt0, Mons0, Pids1, Tick)
+ end
+ end.
+
+send3_proc() ->
+ %%io:format("Process ~p spawned\n",[self()]),
+ send3_proc([self()], {0,0,0}, {1,2,3,4,5}).
+send3_proc(Pids0, Counters={Rcv,SndOk,SndFail}, State0) ->
+ %%io:format("~p: Pids0=~p", [self(), Pids0]),
+ %%timer:sleep(10),
+ receive
+ {pids, Pids1, Inject} ->
+ %%io:format("~p: got ~p Inject=~p\n", [self(), Pids1, Inject]),
+ ?line Pids0 = [self()],
+ Pids2 = [self() | Pids1],
+ case Inject of
+ true -> send3_proc_send(Pids2, Counters, State0);
+ false -> send3_proc(Pids2, Counters, State0)
+ end;
+ {is_born, Pid} ->
+ %%io:format("~p: is_born ~p, got ~p\n", [self(), Pid, Pids0]),
+ send3_proc([Pid | Pids0], Counters, State0);
+ {is_dead, Pid} ->
+ Pids1 = lists:delete(Pid,Pids0),
+ %%io:format("~p: is_dead ~p, got ~p\n", [self(), Pid, Pids1]),
+ send3_proc(Pids1, Counters, State0);
+ {blob, Blob0} ->
+ %%io:format("~p: blob ~p\n", [self(), Blob0]),
+ State1 = send3_new_state(State0, Blob0),
+ send3_proc_send(Pids0, {Rcv+1,SndOk,SndFail}, State1);
+ die ->
+ %%io:format("Process ~p terminating, stats = ~p\n",[self(),Counters]),
+ {message_queue_len,Dropped} = erlang:process_info(self(),message_queue_len),
+ _R = ets:update_counter(nif_SUITE,send3,
+ [{2,Rcv},{3,SndOk},{4,SndFail},{5,1-Dropped}]),
+ %%io:format("~p: dies R=~p\n", [self(), R]),
+ ok;
+ {halt,Papa} ->
+ Papa ! {halted,self()},
+ io:format("~p halted\n",[self()]),
+ receive die -> ok end,
+ io:format("~p dying\n",[self()])
+ end.
+
+send3_proc_send(Pids, {Rcv,SndOk,SndFail}, State0) ->
+ To = lists:nth(random:uniform(length(Pids)),Pids),
+ Blob = send3_make_blob(),
+ State1 = send3_new_state(State0,Blob),
+ case send3_send(To, Blob) of
+ true ->
+ send3_proc(Pids, {Rcv,SndOk+1,SndFail}, State1);
+ false ->
+ send3_proc(Pids, {Rcv,SndOk,SndFail+1}, State1)
+ end.
+
+
+send3_make_blob() ->
+ case random:uniform(20)-1 of
+ 0 -> {term,[]};
+ N ->
+ MsgEnv = alloc_msgenv(),
+ repeat(N bsr 1,
+ fun(_) -> grow_blob(MsgEnv,other_term(),random:uniform(1 bsl 20))
+ end, void),
+ case (N band 1) of
+ 0 -> {term,copy_blob(MsgEnv)};
+ 1 -> {msgenv,MsgEnv}
+ end
+ end.
+
+send3_send(Pid, Msg) ->
+ %% 90% enif_send and 10% normal bang
+ case random:uniform(10) of
+ 1 -> send3_send_bang(Pid,Msg);
+ _ -> send3_send_nif(Pid,Msg)
+ end.
+send3_send_nif(Pid, {term,Blob}) ->
+ %%io:format("~p send term nif\n",[self()]),
+ send_term(Pid, {blob, Blob}) =:= 1;
+send3_send_nif(Pid, {msgenv,MsgEnv}) ->
+ %%io:format("~p send blob nif\n",[self()]),
+ send3_blob(MsgEnv, Pid, blob) =:= 1.
+
+send3_send_bang(Pid, {term,Blob}) ->
+ %%io:format("~p send term bang\n",[self()]),
+ Pid ! {blob, Blob},
+ true;
+send3_send_bang(Pid, {msgenv,MsgEnv}) ->
+ %%io:format("~p send blob bang\n",[self()]),
+ Pid ! {blob, copy_blob(MsgEnv)},
+ true.
+
+send3_new_state(State, Blob) ->
+ case random:uniform(5+2) of
+ N when N =< 5-> setelement(N, State, Blob);
+ _ -> State % Don't store blob
+ end.
+
neg(doc) -> ["Negative testing of load_nif"];
neg(Config) when is_list(Config) ->
TmpMem = tmpmem(),
@@ -759,7 +1089,17 @@ neg(Config) when is_list(Config) ->
?line verify_tmpmem(TmpMem),
?line ok.
+is_checks(doc) -> ["Test all enif_is functions"];
+is_checks(Config) when is_list(Config) ->
+ ?line ensure_lib_loaded(Config, 1),
+ ?line ok = check_is(hejsan, <<19,98>>, make_ref(), ok, fun() -> ok end,
+ self(), hd(erlang:ports()), [], [1,9,9,8],
+ {hejsan, "hejsan", [$h,"ejs",<<"an">>]}).
+get_length(doc) -> ["Test all enif_get_length functions"];
+get_length(Config) when is_list(Config) ->
+ ?line ensure_lib_loaded(Config, 1),
+ ?line ok = length_test(hejsan, "hejsan", [], [], not_a_list).
ensure_lib_loaded(Config) ->
ensure_lib_loaded(Config, 1).
@@ -773,6 +1113,23 @@ ensure_lib_loaded(Config, Ver) ->
ok
end.
+make_atom(Config) when is_list(Config) ->
+ ?line ensure_lib_loaded(Config, 1),
+ An0Atom = an0atom,
+ An0Atom0 = 'an\000atom\000',
+ ?line Atoms = make_atoms(),
+ ?line 7 = size(Atoms),
+ ?line Atoms = {An0Atom,An0Atom,An0Atom,An0Atom0,An0Atom,An0Atom,An0Atom0}.
+
+make_string(Config) when is_list(Config) ->
+ ?line ensure_lib_loaded(Config, 1),
+ ?line Strings = make_strings(),
+ ?line 5 = size(Strings),
+ A0String = "a0string",
+ A0String0 = [$a,0,$s,$t,$r,$i,$n,$g,0],
+ AStringWithAccents = [$E,$r,$l,$a,$n,$g,$ ,16#e4,$r,$ ,$e,$t,$t,$ ,$g,$e,$n,$e,$r,$e,$l,$l,$t,$ ,$p,$r,$o,$g,$r,$a,$m,$s,$p,$r,16#e5,$k],
+ ?line Strings = {A0String,A0String,A0String,A0String0, AStringWithAccents}.
+
tmpmem() ->
case erlang:system_info({allocator,temp_alloc}) of
false -> undefined;
@@ -821,13 +1178,18 @@ call(Pid,Cmd) ->
receive_any() ->
receive M -> M end.
-%% check(Exp,Got,Line) ->
-%% case Got of
-%% Exp -> Exp;
-%% _ ->
-%% io:format("CHECK at ~p: Expected ~p but got ~p\n",[Line,Exp,Got]),
-%% Got
-%% end.
+repeat(0, _, Arg) ->
+ Arg;
+repeat(N, Fun, Arg0) ->
+ repeat(N-1, Fun, Fun(Arg0)).
+
+check(Exp,Got,Line) ->
+ case Got of
+ Exp -> Exp;
+ _ ->
+ io:format("CHECK at ~p: Expected ~p but got ~p\n",[Line,Exp,Got]),
+ Got
+ end.
%% The NIFs:
@@ -855,6 +1217,23 @@ get_resource(_,_) -> ?nif_stub.
release_resource(_) -> ?nif_stub.
last_resource_dtor_call() -> ?nif_stub.
make_new_resource(_,_) -> ?nif_stub.
+check_is(_,_,_,_,_,_,_,_,_,_) -> ?nif_stub.
+length_test(_,_,_,_,_) -> ?nif_stub.
+make_atoms() -> ?nif_stub.
+make_strings() -> ?nif_stub.
+make_new_resource_binary(_) -> ?nif_stub.
+send_list_seq(_,_) -> ?nif_stub.
+send_new_blob(_,_) -> ?nif_stub.
+alloc_msgenv() -> ?nif_stub.
+clear_msgenv(_) -> ?nif_stub.
+grow_blob(_,_) -> ?nif_stub.
+grow_blob(_,_,_) -> ?nif_stub.
+send_blob(_,_) -> ?nif_stub.
+send3_blob(_,_,_) -> ?nif_stub.
+send_blob_thread(_,_,_) -> ?nif_stub.
+join_send_thread(_) -> ?nif_stub.
+copy_blob(_) -> ?nif_stub.
+send_term(_,_) -> ?nif_stub.
nif_stub_error(Line) ->
exit({nif_not_loaded,module,?MODULE,line,Line}).
diff --git a/erts/emulator/test/nif_SUITE_data/nif_SUITE.c b/erts/emulator/test/nif_SUITE_data/nif_SUITE.c
index 7d05a9a880..5384a32f21 100644
--- a/erts/emulator/test/nif_SUITE_data/nif_SUITE.c
+++ b/erts/emulator/test/nif_SUITE_data/nif_SUITE.c
@@ -1,3 +1,21 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2009-2010. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
#include "erl_nif.h"
#include <stdio.h>
@@ -10,6 +28,12 @@
static int static_cntA; /* zero by default */
static int static_cntB = NIF_SUITE_LIB_VER * 100;
+static ERL_NIF_TERM atom_self;
+static ERL_NIF_TERM atom_ok;
+static ERL_NIF_TERM atom_join;
+static ERL_NIF_TERM atom_binary_resource_type;
+
+
typedef struct
{
int ref_cnt;
@@ -20,7 +44,7 @@ typedef struct
void add_call(ErlNifEnv* env, PrivData* data, const char* func_name)
{
- CallInfo* call = enif_alloc(env, sizeof(CallInfo)+strlen(func_name));
+ CallInfo* call = enif_alloc(sizeof(CallInfo)+strlen(func_name));
strcpy(call->func_name, func_name);
call->lib_ver = NIF_SUITE_LIB_VER;
call->next = data->call_history;
@@ -31,7 +55,7 @@ void add_call(ErlNifEnv* env, PrivData* data, const char* func_name)
call->arg_sz = 0;
}
-#define ADD_CALL(FUNC_NAME) add_call(env, enif_get_data(env),FUNC_NAME)
+#define ADD_CALL(FUNC_NAME) add_call(env, enif_priv_data(env),FUNC_NAME)
static void* resource_dtor_last = NULL;
static unsigned resource_dtor_last_sz = 0;
@@ -42,15 +66,24 @@ static void resource_dtor(ErlNifEnv* env, void* obj)
{
resource_dtor_last = obj;
resource_dtor_cnt++;
- resource_dtor_last_sz = enif_sizeof_resource(env, obj);
+ resource_dtor_last_sz = enif_sizeof_resource(obj);
assert(resource_dtor_last_sz <= sizeof(resource_dtor_last_data));
memcpy(resource_dtor_last_data, obj, resource_dtor_last_sz);
}
+static ErlNifResourceType* msgenv_resource_type;
+static void msgenv_dtor(ErlNifEnv* env, void* obj);
+
+static ErlNifResourceType* binary_resource_type;
+static void binary_resource_dtor(ErlNifEnv* env, void* obj);
+struct binary_resource {
+ unsigned char* data;
+ unsigned size;
+};
+
static int load(ErlNifEnv* env, void** priv_data, ERL_NIF_TERM load_info)
{
- /*ERL_NIF_TERM head, tail;*/
- PrivData* data = enif_alloc(env, sizeof(PrivData));
+ PrivData* data = enif_alloc(sizeof(PrivData));
assert(data != NULL);
data->ref_cnt = 1;
data->call_history = NULL;
@@ -58,41 +91,71 @@ static int load(ErlNifEnv* env, void** priv_data, ERL_NIF_TERM load_info)
add_call(env, data, "load");
- /*
- head = load_info;
- data->rt_cnt = 0;
- for (head=load_info; enif_get_list_cell(env,load_info,&head,&tail);
- head=tail) {
- char buf[20];
- int n = enif_get_string(env,head,buf,sizeof(buf));
- assert(n > 0);
- assert(i < sizeof(data->rt_arr)/sizeof(*data->rt_arr));
- data->rt_arr[data->rt_cnt++].t = enif_create_resource_type(env,buf,resource_dtor,
- ERL_NIF_RT_CREATE,NULL);
- }
- assert(enif_is_empty_list(env,head));
- */
- data->rt_arr[0].t = enif_open_resource_type(env,"Gold",resource_dtor,
+ data->rt_arr[0].t = enif_open_resource_type(env,NULL,"Gold",resource_dtor,
ERL_NIF_RT_CREATE,NULL);
- data->rt_arr[1].t = enif_open_resource_type(env,"Silver",resource_dtor,
+ data->rt_arr[1].t = enif_open_resource_type(env,NULL,"Silver",resource_dtor,
ERL_NIF_RT_CREATE,NULL);
+ binary_resource_type = enif_open_resource_type(env,NULL,"nif_SUITE.binary",
+ binary_resource_dtor,
+ ERL_NIF_RT_CREATE, NULL);
+
+ msgenv_resource_type = enif_open_resource_type(env,NULL,"nif_SUITE.msgenv",
+ msgenv_dtor,
+ ERL_NIF_RT_CREATE, NULL);
+
+ atom_self = enif_make_atom(env,"self");
+ atom_ok = enif_make_atom(env,"ok");
+ atom_join = enif_make_atom(env,"join");
+ atom_binary_resource_type = enif_make_atom(env,"binary_resource_type");
+
*priv_data = data;
return 0;
}
+static void resource_takeover(ErlNifEnv* env, PrivData* priv)
+{
+ ErlNifResourceFlags tried;
+ ErlNifResourceType* rt;
+ rt = enif_open_resource_type(env, NULL, "Gold", resource_dtor,
+ ERL_NIF_RT_TAKEOVER, &tried);
+ assert(rt == priv->rt_arr[0].t);
+ assert(tried == ERL_NIF_RT_TAKEOVER);
+ rt = enif_open_resource_type(env, NULL, "Silver", resource_dtor,
+ ERL_NIF_RT_TAKEOVER, &tried);
+ assert(rt == priv->rt_arr[1].t);
+ assert(tried == ERL_NIF_RT_TAKEOVER);
+
+ rt = enif_open_resource_type(env, NULL, "nif_SUITE.binary", binary_resource_dtor,
+ ERL_NIF_RT_TAKEOVER, &tried);
+ assert(rt != NULL);
+ assert(tried == ERL_NIF_RT_TAKEOVER);
+ assert(binary_resource_type==NULL || binary_resource_type == rt);
+ binary_resource_type = rt;
+
+ rt = enif_open_resource_type(env, NULL, "nif_SUITE.msgenv", msgenv_dtor,
+ ERL_NIF_RT_TAKEOVER, &tried);
+ assert(rt != NULL);
+ assert(tried == ERL_NIF_RT_TAKEOVER);
+ assert(msgenv_resource_type==NULL || msgenv_resource_type == rt);
+ msgenv_resource_type = rt;
+}
+
static int reload(ErlNifEnv* env, void** priv_data, ERL_NIF_TERM load_info)
{
- add_call(env, *priv_data, "reload");
+ PrivData* priv = (PrivData*) *priv_data;
+ add_call(env, priv, "reload");
+ resource_takeover(env,priv);
return 0;
}
static int upgrade(ErlNifEnv* env, void** priv_data, void** old_priv_data, ERL_NIF_TERM load_info)
{
- PrivData* data = *old_priv_data;
- add_call(env, data, "upgrade");
- data->ref_cnt++;
- *priv_data = *old_priv_data;
+ PrivData* priv = (PrivData*) *old_priv_data;
+ add_call(env, priv, "upgrade");
+ priv->ref_cnt++;
+ *priv_data = *old_priv_data;
+ resource_takeover(env,priv);
return 0;
}
@@ -101,7 +164,10 @@ static void unload(ErlNifEnv* env, void* priv_data)
PrivData* data = priv_data;
add_call(env, data, "unload");
if (--data->ref_cnt == 0) {
- enif_free(env, priv_data);
+ if (data->nif_mod != NULL) {
+ NifModPrivData_release(data->nif_mod);
+ }
+ enif_free(priv_data);
}
}
@@ -120,11 +186,10 @@ static ERL_NIF_TERM make_call_history(ErlNifEnv* env, CallInfo** headp)
ERL_NIF_TERM func_term = enif_make_atom(env,call->func_name);
ERL_NIF_TERM tpl;
if (call->arg != NULL) {
- ErlNifBinary arg_bin;
- enif_alloc_binary(env, call->arg_sz, &arg_bin);
- memcpy(arg_bin.data, call->arg, call->arg_sz);
- func_term = enif_make_tuple2(env, func_term,
- enif_make_binary(env, &arg_bin));
+ ERL_NIF_TERM arg_bin;
+ memcpy(enif_make_new_binary(env, call->arg_sz, &arg_bin),
+ call->arg, call->arg_sz);
+ func_term = enif_make_tuple2(env, func_term, arg_bin);
}
tpl = enif_make_tuple4(env, func_term,
enif_make_int(env,call->lib_ver),
@@ -132,28 +197,28 @@ static ERL_NIF_TERM make_call_history(ErlNifEnv* env, CallInfo** headp)
enif_make_int(env,call->static_cntB));
list = enif_make_list_cell(env, tpl, list);
*headp = call->next;
- enif_free(env,call);
+ enif_free(call);
}
return list;
}
static ERL_NIF_TERM call_history(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
{
- PrivData* data = (PrivData*) enif_get_data(env);
+ PrivData* data = (PrivData*) enif_priv_data(env);
return make_call_history(env,&data->call_history);
}
static ERL_NIF_TERM hold_nif_mod_priv_data(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
{
- PrivData* data = (PrivData*) enif_get_data(env);
+ PrivData* data = (PrivData*) enif_priv_data(env);
unsigned long ptr_as_ulong;
if (!enif_get_ulong(env,argv[0],&ptr_as_ulong)) {
return enif_make_badarg(env);
}
- if (data->nif_mod != NULL && --(data->nif_mod->ref_cnt) == 0) {
- enif_free(env,data->nif_mod);
+ if (data->nif_mod != NULL) {
+ NifModPrivData_release(data->nif_mod);
}
data->nif_mod = (NifModPrivData*) ptr_as_ulong;
return enif_make_int(env,++(data->nif_mod->ref_cnt));
@@ -161,7 +226,7 @@ static ERL_NIF_TERM hold_nif_mod_priv_data(ErlNifEnv* env, int argc, const ERL_N
static ERL_NIF_TERM nif_mod_call_history(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
{
- PrivData* data = (PrivData*) enif_get_data(env);
+ PrivData* data = (PrivData*) enif_priv_data(env);
ERL_NIF_TERM ret;
if (data->nif_mod == NULL) {
return enif_make_string(env,"nif_mod pointer is NULL", ERL_NIF_LATIN1);
@@ -231,6 +296,30 @@ static int test_ulong(ErlNifEnv* env, unsigned long i1)
return 1;
}
+static int test_int64(ErlNifEnv* env, ErlNifSInt64 i1)
+{
+ ErlNifSInt64 i2 = 0;
+ ERL_NIF_TERM int_term = enif_make_int64(env, i1);
+ if (!enif_get_int64(env,int_term, &i2) || i1 != i2) {
+ fprintf(stderr, "test_int64(%ld) ...FAILED i2=%ld\r\n",
+ (long)i1, (long)i2);
+ return 0;
+ }
+ return 1;
+}
+
+static int test_uint64(ErlNifEnv* env, ErlNifUInt64 i1)
+{
+ ErlNifUInt64 i2 = 0;
+ ERL_NIF_TERM int_term = enif_make_uint64(env, i1);
+ if (!enif_get_uint64(env,int_term, &i2) || i1 != i2) {
+ fprintf(stderr, "test_ulong(%lu) ...FAILED i2=%lu\r\n",
+ (unsigned long)i1, (unsigned long)i2);
+ return 0;
+ }
+ return 1;
+}
+
static int test_double(ErlNifEnv* env, double d1)
{
double d2 = 0;
@@ -254,6 +343,8 @@ static ERL_NIF_TERM type_test(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[
unsigned uint;
long slong;
unsigned long ulong;
+ ErlNifSInt64 sint64;
+ ErlNifUInt64 uint64;
double d;
ERL_NIF_TERM atom, ref1, ref2;
@@ -287,11 +378,25 @@ static ERL_NIF_TERM type_test(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[
slong -= slong / 3 + 1;
} while (slong >= 0);
+ sint64 = ((ErlNifSInt64)1 << 63); /* INT64_MIN */
+ do {
+ if (!test_int64(env,sint64)) {
+ goto error;
+ }
+ sint64 += ~sint64 / 3 + 1;
+ } while (sint64 < 0);
+ sint64 = ((ErlNifUInt64)1 << 63) - 1; /* INT64_MAX */
+ do {
+ if (!test_int64(env,sint64)) {
+ goto error;
+ }
+ sint64 -= sint64 / 3 + 1;
+ } while (sint64 >= 0);
uint = UINT_MAX;
for (;;) {
if (!test_uint(env,uint)) {
-
+ goto error;
}
if (uint == 0) break;
uint -= uint / 3 + 1;
@@ -299,11 +404,19 @@ static ERL_NIF_TERM type_test(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[
ulong = ULONG_MAX;
for (;;) {
if (!test_ulong(env,ulong)) {
-
+ goto error;
}
if (ulong == 0) break;
ulong -= ulong / 3 + 1;
}
+ uint64 = (ErlNifUInt64)-1; /* UINT64_MAX */
+ for (;;) {
+ if (!test_uint64(env,uint64)) {
+ goto error;
+ }
+ if (uint64 == 0) break;
+ uint64 -= uint64 / 3 + 1;
+ }
if (MAX_SMALL < INT_MAX) { /* 32-bit */
for (i=-10 ; i <= 10; i++) {
@@ -326,24 +439,31 @@ static ERL_NIF_TERM type_test(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[
for (i=-10 ; i < 10; i++) {
if (!test_long(env,MAX_SMALL+i) || !test_ulong(env,MAX_SMALL+i) ||
- !test_long(env,MIN_SMALL+i)) {
+ !test_long(env,MIN_SMALL+i) ||
+ !test_int64(env,MAX_SMALL+i) || !test_uint64(env,MAX_SMALL+i) ||
+ !test_int64(env,MIN_SMALL+i)) {
goto error;
}
+ if (MAX_SMALL < INT_MAX) {
+ if (!test_int(env,MAX_SMALL+i) || !test_uint(env,MAX_SMALL+i) ||
+ !test_int(env,MIN_SMALL+i)) {
+ goto error;
+ }
+ }
}
-
for (d=3.141592e-100 ; d < 1e100 ; d *= 9.97) {
if (!test_double(env,d) || !test_double(env,-d)) {
goto error;
}
}
- if (!enif_make_existing_atom(env,"nif_SUITE", &atom)
- || !enif_is_identical(env,atom,enif_make_atom(env,"nif_SUITE"))) {
+ if (!enif_make_existing_atom(env,"nif_SUITE", &atom, ERL_NIF_LATIN1)
+ || !enif_is_identical(atom,enif_make_atom(env,"nif_SUITE"))) {
fprintf(stderr, "nif_SUITE not an atom?\r\n");
goto error;
}
for (i=2; i; i--) {
- if (enif_make_existing_atom(env,"nif_SUITE_pink_unicorn", &atom)) {
+ if (enif_make_existing_atom(env,"nif_SUITE_pink_unicorn", &atom, ERL_NIF_LATIN1)) {
fprintf(stderr, "pink unicorn exist?\r\n");
goto error;
}
@@ -351,7 +471,7 @@ static ERL_NIF_TERM type_test(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[
ref1 = enif_make_ref(env);
ref2 = enif_make_ref(env);
if (!enif_is_ref(env,ref1) || !enif_is_ref(env,ref2)
- || enif_is_identical(env,ref1,ref2) || enif_compare(env,ref1,ref2)==0) {
+ || enif_is_identical(ref1,ref2) || enif_compare(ref1,ref2)==0) {
fprintf(stderr, "strange refs?\r\n");
goto error;
}
@@ -381,7 +501,7 @@ static ERL_NIF_TERM is_identical(ErlNifEnv* env, int argc, const ERL_NIF_TERM ar
if (argc != 2) {
return enif_make_badarg(env);
}
- return enif_make_atom(env, (enif_is_identical(env,argv[0],argv[1]) ?
+ return enif_make_atom(env, (enif_is_identical(argv[0],argv[1]) ?
"true" : "false"));
}
@@ -390,7 +510,7 @@ static ERL_NIF_TERM compare(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
if (argc != 2) {
return enif_make_badarg(env);
}
- return enif_make_int(env, enif_compare(env,argv[0],argv[1]));
+ return enif_make_int(env, enif_compare(argv[0],argv[1]));
}
static ERL_NIF_TERM many_args_100(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
@@ -412,11 +532,10 @@ static ERL_NIF_TERM clone_bin(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[
{
ErlNifBinary ibin;
if (enif_inspect_binary(env,argv[0],&ibin)) {
- ErlNifBinary obin;
- enif_alloc_binary(env,ibin.size,&obin);
- memcpy(obin.data,ibin.data,ibin.size);
- /*enif_release_binary(env,&ibin);*/
- return enif_make_binary(env,&obin);
+ ERL_NIF_TERM obin;
+ memcpy(enif_make_new_binary(env, ibin.size, &obin),
+ ibin.data, ibin.size);
+ return obin;
}
else {
return enif_make_badarg(env);
@@ -438,7 +557,7 @@ static ERL_NIF_TERM string_to_bin(ErlNifEnv* env, int argc, const ERL_NIF_TERM a
unsigned size;
int n;
if (!enif_get_int(env,argv[1],(int*)&size)
- || !enif_alloc_binary(env,size,&obin)) {
+ || !enif_alloc_binary(size,&obin)) {
return enif_make_badarg(env);
}
n = enif_get_string(env, argv[0], (char*)obin.data, size, ERL_NIF_LATIN1);
@@ -452,10 +571,10 @@ static ERL_NIF_TERM atom_to_bin(ErlNifEnv* env, int argc, const ERL_NIF_TERM arg
unsigned size;
int n;
if (!enif_get_int(env,argv[1],(int*)&size)
- || !enif_alloc_binary(env,size,&obin)) {
+ || !enif_alloc_binary(size,&obin)) {
return enif_make_badarg(env);
}
- n = enif_get_atom(env, argv[0], (char*)obin.data, size);
+ n = enif_get_atom(env, argv[0], (char*)obin.data, size, ERL_NIF_LATIN1);
return enif_make_tuple(env, 2, enif_make_int(env,n),
enif_make_binary(env,&obin));
}
@@ -515,14 +634,14 @@ static ERL_NIF_TERM iolist_2_bin(ErlNifEnv* env, int argc, const ERL_NIF_TERM ar
static ERL_NIF_TERM last_resource_dtor_call(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
{
- ErlNifBinary bin;
ERL_NIF_TERM ret;
if (resource_dtor_last != NULL) {
- enif_alloc_binary(env, resource_dtor_last_sz, &bin);
- memcpy(bin.data, resource_dtor_last_data, resource_dtor_last_sz);
+ ERL_NIF_TERM bin;
+ memcpy(enif_make_new_binary(env, resource_dtor_last_sz, &bin),
+ resource_dtor_last_data, resource_dtor_last_sz);
ret = enif_make_tuple3(env,
enif_make_long(env, (long)resource_dtor_last),
- enif_make_binary(env, &bin),
+ bin,
enif_make_int(env, resource_dtor_cnt));
}
else {
@@ -536,7 +655,7 @@ static ERL_NIF_TERM last_resource_dtor_call(ErlNifEnv* env, int argc, const ERL_
static ERL_NIF_TERM get_resource_type(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
{
- PrivData* data = (PrivData*) enif_get_data(env);
+ PrivData* data = (PrivData*) enif_priv_data(env);
int ix;
if (!enif_get_int(env, argv[0], &ix) || ix >= 2) {
@@ -552,7 +671,7 @@ static ERL_NIF_TERM alloc_resource(ErlNifEnv* env, int argc, const ERL_NIF_TERM
union { void* p; long l;} data;
if (!enif_get_long(env, argv[0], &type.l)
|| !enif_inspect_binary(env, argv[1], &data_bin)
- || (data.p = enif_alloc_resource(env, type.t, data_bin.size))==NULL) {
+ || (data.p = enif_alloc_resource(type.t, data_bin.size))==NULL) {
return enif_make_badarg(env);
}
@@ -577,28 +696,65 @@ static ERL_NIF_TERM make_new_resource(ErlNifEnv* env, int argc, const ERL_NIF_TE
ERL_NIF_TERM ret;
if (!enif_get_long(env, argv[0], &type.l)
|| !enif_inspect_binary(env, argv[1], &data_bin)
- || (data = enif_alloc_resource(env, type.t, data_bin.size))==NULL) {
+ || (data = enif_alloc_resource(type.t, data_bin.size))==NULL) {
return enif_make_badarg(env);
}
ret = enif_make_resource(env, data);
memcpy(data, data_bin.data, data_bin.size);
- enif_release_resource(env, data);
+ enif_release_resource(data);
return ret;
}
+static ERL_NIF_TERM make_new_resource_binary(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
+{
+ ErlNifBinary data_bin;
+ union { struct binary_resource* p; void* vp; long l;} br;
+ void* buf;
+ ERL_NIF_TERM ret;
+ if (!enif_inspect_binary(env, argv[0], &data_bin)
+ || (br.vp = enif_alloc_resource(binary_resource_type,
+ sizeof(struct binary_resource)))==NULL
+ || (buf = enif_alloc(data_bin.size)) == NULL) {
+
+ return enif_make_badarg(env);
+ }
+ memset(br.vp,0xba,sizeof(struct binary_resource)); /* avoid valgrind warning */
+ br.p->data = buf;
+ br.p->size = data_bin.size;
+ memcpy(br.p->data, data_bin.data, data_bin.size);
+ ret = enif_make_resource_binary(env, br.vp, br.p->data, br.p->size);
+ enif_release_resource(br.p);
+ return enif_make_tuple2(env, enif_make_long(env,br.l), ret);
+}
+
+static void binary_resource_dtor(ErlNifEnv* env, void* obj)
+{
+ struct binary_resource* br = (struct binary_resource*) obj;
+ resource_dtor(env,obj);
+ assert(br->data != NULL);
+ enif_free(br->data);
+ br->data = NULL;
+}
+
static ERL_NIF_TERM get_resource(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
{
ErlNifBinary data_bin;
union { ErlNifResourceType* t; long l; } type;
union { void* p; long l; } data;
- if (!enif_get_long(env, argv[0], &type.l)
+ type.t = NULL;
+ if (enif_is_identical(argv[0], atom_binary_resource_type)) {
+ type.t = binary_resource_type;
+ }
+ else {
+ enif_get_long(env, argv[0], &type.l);
+ }
+ if (type.t == NULL
|| !enif_get_resource(env, argv[1], type.t, &data.p)) {
return enif_make_badarg(env);
}
-
- enif_alloc_binary(env, enif_sizeof_resource(env,data.p), &data_bin);
+ enif_alloc_binary(enif_sizeof_resource(data.p), &data_bin);
memcpy(data_bin.data, data.p, data_bin.size);
return enif_make_tuple2(env, enif_make_long(env,data.l),
enif_make_binary(env, &data_bin));
@@ -610,10 +766,592 @@ static ERL_NIF_TERM release_resource(ErlNifEnv* env, int argc, const ERL_NIF_TER
if (!enif_get_long(env, argv[0], &data.l)) {
return enif_make_badarg(env);
}
- enif_release_resource(env, data.p);
+ enif_release_resource(data.p);
return enif_make_atom(env,"ok");
}
+/*
+ * argv[0] an atom
+ * argv[1] a binary
+ * argv[2] a ref
+ * argv[3] 'ok'
+ * argv[4] a fun
+ * argv[5] a pid
+ * argv[6] a port
+ * argv[7] an empty list
+ * argv[8] a non-empty list
+ * argv[9] a tuple
+ */
+static ERL_NIF_TERM check_is(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
+{
+ ERL_NIF_TERM ok_atom = enif_make_atom(env, "ok");
+
+ if (!enif_is_atom(env, argv[0])) return enif_make_badarg(env);
+ if (!enif_is_binary(env, argv[1])) return enif_make_badarg(env);
+ if (!enif_is_ref(env, argv[2])) return enif_make_badarg(env);
+ if (!enif_is_identical(argv[3], ok_atom)) return enif_make_badarg(env);
+ if (!enif_is_fun(env, argv[4])) return enif_make_badarg(env);
+ if (!enif_is_pid(env, argv[5])) return enif_make_badarg(env);
+ if (!enif_is_port(env, argv[6])) return enif_make_badarg(env);
+ if (!enif_is_empty_list(env, argv[7])) return enif_make_badarg(env);
+ if (!enif_is_list(env, argv[7])) return enif_make_badarg(env);
+ if (!enif_is_list(env, argv[8])) return enif_make_badarg(env);
+ if (!enif_is_tuple(env, argv[9])) return enif_make_badarg(env);
+
+ return ok_atom;
+}
+
+/*
+ * argv[0] atom with length of 6
+ * argv[1] list with length of 6
+ * argv[2] empty list
+ * argv[3] not an atom
+ * argv[4] not a list
+ */
+static ERL_NIF_TERM length_test(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
+{
+ unsigned len;
+
+ if (!enif_get_atom_length(env, argv[0], &len, ERL_NIF_LATIN1) || len != 6)
+ return enif_make_badarg(env);
+
+ if (!enif_get_list_length(env, argv[1], &len) || len != 6)
+ return enif_make_badarg(env);
+
+ if (!enif_get_list_length(env, argv[2], &len) || len != 0)
+ return enif_make_badarg(env);
+
+ if (enif_get_atom_length(env, argv[3], &len, ERL_NIF_LATIN1))
+ return enif_make_badarg(env);
+
+ if (enif_get_list_length(env, argv[4], &len))
+ return enif_make_badarg(env);
+
+ return enif_make_atom(env, "ok");
+}
+
+static ERL_NIF_TERM make_atoms(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
+{
+ ERL_NIF_TERM arr[7];
+ ERL_NIF_TERM existingatom0a, existingatom0b;
+ ERL_NIF_TERM existing0atom0;
+ const char * const an0atom = "an0atom";
+ const char an0atom0[8] = {'a','n','\0','a','t','o','m',0};
+
+ arr[0] = enif_make_atom(env, "an0atom");
+ arr[1] = enif_make_atom_len(env, "an0atom", 7);
+ arr[2] = enif_make_atom_len(env, an0atom, 7);
+ arr[3] = enif_make_atom_len(env, an0atom0, 8);
+
+ if (!enif_make_existing_atom(env, "an0atom", &existingatom0a, ERL_NIF_LATIN1))
+ return enif_make_atom(env, "error");
+ arr[4] = existingatom0a;
+
+ if (!enif_make_existing_atom_len(env, an0atom, 7, &existingatom0b, ERL_NIF_LATIN1))
+ return enif_make_atom(env, "error");
+ arr[5] = existingatom0b;
+
+ if (!enif_make_existing_atom_len(env, an0atom0, 8, &existing0atom0, ERL_NIF_LATIN1))
+ return enif_make_atom(env, "error");
+ arr[6] = existing0atom0;
+
+ return enif_make_tuple7(env,
+ arr[0],arr[1],arr[2],arr[3],arr[4],arr[5],arr[6]);
+}
+
+static ERL_NIF_TERM make_strings(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
+{
+ const char a0string[8] = {'a','0','s','t','r','i','n','g'};
+ const char a0string0[9] = {'a','\0','s','t','r','i','n','g',0};
+ const char astringwith8bits[37] = {'E','r','l','a','n','g',' ',0xE4 /* 'ä' */,'r',' ','e','t','t',' ','g','e','n','e','r','e','l','l','t',' ','p','r','o','g','r','a','m','s','p','r', 0xE5 /* 'å' */,'k',0};
+
+ return enif_make_tuple5(env,
+ enif_make_string(env, "a0string", ERL_NIF_LATIN1),
+ enif_make_string_len(env, "a0string", 8, ERL_NIF_LATIN1),
+ enif_make_string_len(env, a0string, 8, ERL_NIF_LATIN1),
+ enif_make_string_len(env, a0string0, 9, ERL_NIF_LATIN1),
+ enif_make_string(env, astringwith8bits, ERL_NIF_LATIN1));
+}
+static ERL_NIF_TERM send_list_seq(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
+{
+ ErlNifPid to;
+ ERL_NIF_TERM msg;
+ ErlNifEnv* msg_env;
+ int i, res;
+
+ if (!enif_get_int(env, argv[0], &i)) {
+ return enif_make_badarg(env);
+ }
+ if (argv[1] == atom_self) {
+ enif_self(env, &to);
+ }
+ else if (!enif_get_local_pid(env, argv[1], &to)) {
+ return enif_make_badarg(env);
+ }
+ msg_env = enif_alloc_env();
+ msg = enif_make_list(msg_env,0);
+ for ( ; i>0 ; i--) {
+ msg = enif_make_list_cell(msg_env, enif_make_int(msg_env, i), msg);
+ }
+ res = enif_send(env, &to, msg_env, msg);
+ enif_free_env(msg_env);
+ return enif_make_tuple2(env, atom_ok, enif_make_int(env,res));
+}
+
+static void fill(void* dst, unsigned bytes, int seed)
+{
+ unsigned char* ptr = dst;
+ int i;
+ for (i=bytes; i>0; i--) {
+ *ptr++ = seed;
+ seed += 7;
+ }
+}
+
+#define MAKE_TERM_REUSE_LEN 16
+struct make_term_info
+{
+ ErlNifEnv* caller_env;
+ ErlNifEnv* dst_env;
+ ERL_NIF_TERM reuse[MAKE_TERM_REUSE_LEN];
+ unsigned reuse_push;
+ unsigned reuse_pull;
+ ErlNifResourceType* resource_type;
+ ERL_NIF_TERM other_term;
+ ERL_NIF_TERM blob;
+ ErlNifPid to_pid;
+ ErlNifTid tid;
+ ErlNifCond* cond;
+ ErlNifMutex* mtx;
+ int send_it;
+ int send_res;
+ unsigned n;
+};
+
+
+static void push_term(struct make_term_info* mti, ERL_NIF_TERM term)
+{
+ unsigned ix = (mti->reuse_push++) % MAKE_TERM_REUSE_LEN;
+ mti->reuse[ix] = term;
+ //enif_fprintf(stderr, "push at %u: %T\r\n", ix, term);
+}
+static ERL_NIF_TERM pull_term(struct make_term_info* mti)
+{
+ unsigned ix;
+ if (mti->reuse_pull >= mti->reuse_push &&
+ mti->reuse_push < MAKE_TERM_REUSE_LEN) {
+ mti->reuse_pull = 0;
+ if (mti->reuse_push == 0) {
+ mti->reuse[0] = enif_make_list(mti->dst_env, 0);
+ }
+ }
+ ix = (mti->reuse_pull++) % MAKE_TERM_REUSE_LEN;
+ //enif_fprintf(stderr, "pull from %u: %T\r\n", ix, mti->reuse[ix]);
+ return mti->reuse[ix];
+}
+
+static int make_term_n(struct make_term_info* mti, int n, ERL_NIF_TERM* res);
+
+static ERL_NIF_TERM make_term_binary(struct make_term_info* mti, int n)
+{
+ ErlNifBinary bin;
+ enif_alloc_binary(100, &bin);
+ fill(bin.data, bin.size, n);
+ return enif_make_binary(mti->dst_env, &bin);
+}
+
+static ERL_NIF_TERM make_term_int(struct make_term_info* mti, int n)
+{
+ int i;
+ fill(&i, sizeof(i), n);
+ return enif_make_int(mti->dst_env, i);
+}
+
+static ERL_NIF_TERM make_term_ulong(struct make_term_info* mti, int n)
+{
+ unsigned long ul;
+ fill(&ul, sizeof(ul), n);
+ return enif_make_ulong(mti->dst_env, ul);
+}
+
+static ERL_NIF_TERM make_term_double(struct make_term_info* mti, int n)
+{
+ double d = 3.141592;
+ return enif_make_double(mti->dst_env, d);
+}
+static ERL_NIF_TERM make_term_atom(struct make_term_info* mti, int n)
+{
+ return enif_make_atom(mti->dst_env, "make_term_n");
+}
+static ERL_NIF_TERM make_term_existing_atom(struct make_term_info* mti, int n)
+{
+ ERL_NIF_TERM res;
+ int exist = enif_make_existing_atom(mti->dst_env, "nif_SUITE", &res,
+ ERL_NIF_LATIN1);
+ assert(exist);
+ return res;
+}
+static ERL_NIF_TERM make_term_string(struct make_term_info* mti, int n)
+{
+ return enif_make_string(mti->dst_env, "Hello!", ERL_NIF_LATIN1);
+}
+static ERL_NIF_TERM make_term_ref(struct make_term_info* mti, int n)
+{
+ return enif_make_ref(mti->dst_env);
+}
+static ERL_NIF_TERM make_term_sub_binary(struct make_term_info* mti, int n)
+{
+ ERL_NIF_TERM orig;
+ unsigned char* ptr = enif_make_new_binary(mti->dst_env, 10, &orig);
+ fill(ptr, 10, n);
+ return enif_make_sub_binary(mti->dst_env, orig, 3, 5);
+}
+static ERL_NIF_TERM make_term_uint(struct make_term_info* mti, int n)
+{
+ unsigned int ui;
+ fill(&ui, sizeof(ui), n);
+ return enif_make_uint(mti->dst_env, ui);
+}
+static ERL_NIF_TERM make_term_long(struct make_term_info* mti, int n)
+{
+ long l;
+ fill(&l, sizeof(l), n);
+ return enif_make_long(mti->dst_env, l);
+}
+static ERL_NIF_TERM make_term_tuple0(struct make_term_info* mti, int n)
+{
+ return enif_make_tuple(mti->dst_env, 0);
+}
+static ERL_NIF_TERM make_term_list0(struct make_term_info* mti, int n)
+{
+ return enif_make_list(mti->dst_env, 0);
+}
+static ERL_NIF_TERM make_term_resource(struct make_term_info* mti, int n)
+{
+ void* resource = enif_alloc_resource(mti->resource_type, 10);
+ fill(resource, 10, n);
+ return enif_make_resource(mti->dst_env, resource);
+}
+static ERL_NIF_TERM make_term_new_binary(struct make_term_info* mti, int n)
+{
+ ERL_NIF_TERM res;
+ unsigned char* ptr = enif_make_new_binary(mti->dst_env,20,&res);
+ fill(ptr, 20, n);
+ return res;
+}
+static ERL_NIF_TERM make_term_caller_pid(struct make_term_info* mti, int n)
+{
+ ErlNifPid pid;
+ return enif_make_pid(mti->dst_env, enif_self(mti->caller_env, &pid));
+}
+
+static ERL_NIF_TERM make_term_tuple(struct make_term_info* mti, int n)
+{
+ ERL_NIF_TERM t[3];
+ t[0] = pull_term(mti);
+ t[1] = pull_term(mti);
+ t[2] = pull_term(mti);
+ return enif_make_tuple3(mti->dst_env, t[0], t[1], t[2]);
+}
+static ERL_NIF_TERM make_term_list(struct make_term_info* mti, int n)
+{
+ ERL_NIF_TERM t[3];
+ t[0] = pull_term(mti);
+ t[1] = pull_term(mti);
+ t[2] = pull_term(mti);
+ return enif_make_list3(mti->dst_env, t[0], t[1], t[2]);
+}
+static ERL_NIF_TERM make_term_list_cell(struct make_term_info* mti, int n)
+{
+ ERL_NIF_TERM t[2];
+ t[0] = pull_term(mti);
+ t[1] = pull_term(mti);
+ return enif_make_list_cell(mti->dst_env, t[0], t[1]);
+}
+static ERL_NIF_TERM make_term_tuple_from_array(struct make_term_info* mti, int n)
+{
+ ERL_NIF_TERM t[3];
+ t[0] = pull_term(mti);
+ t[1] = pull_term(mti);
+ t[2] = pull_term(mti);
+ return enif_make_tuple_from_array(mti->dst_env, t, 3);
+}
+static ERL_NIF_TERM make_term_list_from_array(struct make_term_info* mti, int n)
+{
+ ERL_NIF_TERM t[3];
+ t[0] = pull_term(mti);
+ t[1] = pull_term(mti);
+ t[2] = pull_term(mti);
+ return enif_make_list_from_array(mti->dst_env, t, 3);
+}
+static ERL_NIF_TERM make_term_garbage(struct make_term_info* mti, int n)
+{
+ (void) enif_make_string(mti->dst_env, "garbage string", ERL_NIF_LATIN1);
+ return pull_term(mti);
+}
+static ERL_NIF_TERM make_term_copy(struct make_term_info* mti, int n)
+{
+ return enif_make_copy(mti->dst_env, mti->other_term);
+}
+
+typedef ERL_NIF_TERM Make_term_Func(struct make_term_info*, int);
+static Make_term_Func* make_funcs[] = {
+ make_term_binary,
+ make_term_int,
+ make_term_ulong,
+ make_term_double,
+ make_term_atom,
+ make_term_existing_atom,
+ make_term_string,
+ //make_term_ref,
+ make_term_sub_binary,
+ make_term_uint,
+ make_term_long,
+ make_term_tuple0,
+ make_term_list0,
+ make_term_resource,
+ make_term_new_binary,
+ make_term_caller_pid,
+ make_term_tuple,
+ make_term_list,
+ make_term_list_cell,
+ make_term_tuple_from_array,
+ make_term_list_from_array,
+ make_term_garbage,
+ make_term_copy
+};
+static unsigned num_of_make_funcs()
+{
+ return sizeof(make_funcs)/sizeof(*make_funcs);
+}
+static int make_term_n(struct make_term_info* mti, int n, ERL_NIF_TERM* res)
+{
+ if (n < num_of_make_funcs()) {
+ *res = make_funcs[n](mti, n);
+ push_term(mti, *res);
+ return 1;
+ }
+ return 0;
+}
+
+static ERL_NIF_TERM make_blob(ErlNifEnv* caller_env, ErlNifEnv* dst_env,
+ ERL_NIF_TERM other_term)
+{
+ PrivData* priv = (PrivData*) enif_priv_data(caller_env);
+ ERL_NIF_TERM term, list;
+ int n = 0;
+ struct make_term_info mti;
+ mti.caller_env = caller_env;
+ mti.dst_env = dst_env;
+ mti.reuse_push = 0;
+ mti.reuse_pull = 0;
+ mti.resource_type = priv->rt_arr[0].t;
+ mti.other_term = other_term;
+
+ list = enif_make_list(dst_env, 0);
+ while (make_term_n(&mti, n++, &term)) {
+ list = enif_make_list_cell(dst_env, term, list);
+ }
+ return list;
+}
+
+static ERL_NIF_TERM send_new_blob(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
+{
+ ErlNifPid to;
+ ERL_NIF_TERM msg, copy;
+ ErlNifEnv* msg_env;
+ int res;
+
+ if (!enif_get_local_pid(env, argv[0], &to)) {
+ return enif_make_badarg(env);
+ }
+ msg_env = enif_alloc_env();
+ msg = make_blob(env,msg_env, argv[1]);
+ copy = make_blob(env,env, argv[1]);
+ res = enif_send(env, &to, msg_env, msg);
+ enif_free_env(msg_env);
+ return enif_make_tuple3(env, atom_ok, enif_make_int(env,res), copy);
+}
+
+static ERL_NIF_TERM alloc_msgenv(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
+{
+ PrivData* priv = (PrivData*) enif_priv_data(env);
+ struct make_term_info* mti;
+ ERL_NIF_TERM ret;
+
+ mti = (struct make_term_info*) enif_alloc_resource(msgenv_resource_type,
+ sizeof(*mti));
+ mti->caller_env = NULL;
+ mti->dst_env = enif_alloc_env();
+ mti->reuse_push = 0;
+ mti->reuse_pull = 0;
+ mti->resource_type = priv->rt_arr[0].t;
+ mti->other_term = enif_make_list(mti->dst_env, 0);
+ mti->blob = enif_make_list(mti->dst_env, 0);
+ mti->mtx = enif_mutex_create("nif_SUITE:mtx");
+ mti->cond = enif_cond_create("nif_SUITE:cond");
+ mti->send_res = 0xcafebabe;
+ mti->n = 0;
+ ret = enif_make_resource(env, mti);
+ enif_release_resource(mti);
+ return ret;
+}
+
+static void msgenv_dtor(ErlNifEnv* env, void* obj)
+{
+ struct make_term_info* mti = (struct make_term_info*) obj;
+ if (mti->dst_env != NULL) {
+ enif_free_env(mti->dst_env);
+ }
+ enif_mutex_destroy(mti->mtx);
+ enif_cond_destroy(mti->cond);
+}
+
+static ERL_NIF_TERM clear_msgenv(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
+{
+ union {
+ void* vp;
+ struct make_term_info* p;
+ }mti;
+ if (!enif_get_resource(env, argv[0], msgenv_resource_type, &mti.vp)) {
+ return enif_make_badarg(env);
+ }
+ enif_clear_env(mti.p->dst_env);
+ mti.p->reuse_pull = 0;
+ mti.p->reuse_push = 0;
+ mti.p->blob = enif_make_list(mti.p->dst_env, 0);
+ return atom_ok;
+}
+
+static ERL_NIF_TERM grow_blob(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
+{
+ union { void* vp; struct make_term_info* p; }mti;
+ ERL_NIF_TERM term;
+ if (!enif_get_resource(env, argv[0], msgenv_resource_type, &mti.vp)
+ || (argc>2 && !enif_get_uint(env,argv[2], &mti.p->n))) {
+ return enif_make_badarg(env);
+ }
+ mti.p->caller_env = env;
+ mti.p->other_term = argv[1];
+ mti.p->n %= num_of_make_funcs();
+ make_term_n(mti.p, mti.p->n++, &term);
+ mti.p->blob = enif_make_list_cell(mti.p->dst_env, term, mti.p->blob);
+ return atom_ok;
+}
+
+static ERL_NIF_TERM send_blob(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
+{
+ union { void* vp; struct make_term_info* p; }mti;
+ ErlNifPid to;
+ ERL_NIF_TERM copy;
+ int res;
+ if (!enif_get_resource(env, argv[0], msgenv_resource_type, &mti.vp)
+ || !enif_get_local_pid(env, argv[1], &to)) {
+ return enif_make_badarg(env);
+ }
+ copy = enif_make_copy(env, mti.p->blob);
+ res = enif_send(env, &to, mti.p->dst_env, mti.p->blob);
+ return enif_make_tuple3(env, atom_ok, enif_make_int(env,res), copy);
+}
+
+static ERL_NIF_TERM send3_blob(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
+{
+ union { void* vp; struct make_term_info* p; }mti;
+ ErlNifPid to;
+ ERL_NIF_TERM copy;
+ int res;
+ if (!enif_get_resource(env, argv[0], msgenv_resource_type, &mti.vp)
+ || !enif_get_local_pid(env, argv[1], &to)) {
+ return enif_make_badarg(env);
+ }
+ mti.p->blob = enif_make_tuple2(mti.p->dst_env,
+ enif_make_copy(mti.p->dst_env, argv[2]),
+ mti.p->blob);
+ res = enif_send(env, &to, mti.p->dst_env, mti.p->blob);
+ return enif_make_int(env,res);
+}
+
+void* threaded_sender(void *arg)
+{
+
+ union { void* vp; struct make_term_info* p; }mti;
+ mti.vp = arg;
+
+ enif_mutex_lock(mti.p->mtx);
+ while (!mti.p->send_it) {
+ enif_cond_wait(mti.p->cond, mti.p->mtx);
+ }
+ mti.p->send_it = 0;
+ enif_mutex_unlock(mti.p->mtx);
+ mti.p->send_res = enif_send(NULL, &mti.p->to_pid, mti.p->dst_env, mti.p->blob);
+ return NULL;
+}
+
+static ERL_NIF_TERM send_blob_thread(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
+{
+ union { void* vp; struct make_term_info* p; }mti;
+ ERL_NIF_TERM copy;
+ if (!enif_get_resource(env, argv[0], msgenv_resource_type, &mti.vp)
+ || !enif_get_local_pid(env,argv[1], &mti.p->to_pid)) {
+ return enif_make_badarg(env);
+ }
+ copy = enif_make_copy(env, mti.p->blob);
+
+ mti.p->send_it = enif_is_identical(argv[2],atom_join);
+ if (enif_thread_create("nif_SUITE:send_from_thread", &mti.p->tid,
+ threaded_sender, mti.p, NULL) != 0) {
+ return enif_make_badarg(env);
+ }
+ if (enif_is_identical(argv[2],atom_join)) {
+ int err = enif_thread_join(mti.p->tid, NULL);
+ assert(err == 0);
+ return enif_make_tuple3(env, atom_ok, enif_make_int(env, mti.p->send_res), copy);
+ }
+ else {
+ enif_keep_resource(mti.vp);
+ return enif_make_tuple2(env, atom_ok, copy);
+ }
+}
+
+static ERL_NIF_TERM join_send_thread(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
+{
+ union { void* vp; struct make_term_info* p; }mti;
+ int err;
+ if (!enif_get_resource(env, argv[0], msgenv_resource_type, &mti.vp)) {
+ return enif_make_badarg(env);
+ }
+ enif_mutex_lock(mti.p->mtx);
+ mti.p->send_it = 1;
+ enif_cond_signal(mti.p->cond);
+ enif_mutex_unlock(mti.p->mtx);
+ err = enif_thread_join(mti.p->tid, NULL);
+ assert(err == 0);
+ enif_release_resource(mti.vp);
+ return enif_make_tuple2(env, atom_ok, enif_make_int(env, mti.p->send_res));
+}
+
+static ERL_NIF_TERM copy_blob(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
+{
+ union { void* vp; struct make_term_info* p; }mti;
+ if (!enif_get_resource(env, argv[0], msgenv_resource_type, &mti.vp)) {
+ return enif_make_badarg(env);
+ }
+ return enif_make_copy(env, mti.p->blob);
+}
+
+static ERL_NIF_TERM send_term(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
+{
+ ErlNifEnv* menv;
+ ErlNifPid pid;
+ int ret;
+ if (!enif_get_local_pid(env, argv[0], &pid)) {
+ return enif_make_badarg(env);
+ }
+ menv = enif_alloc_env();
+ ret = enif_send(env, &pid, menv, enif_make_copy(menv, argv[1]));
+ enif_free_env(menv);
+ return enif_make_int(env, ret);
+}
static ErlNifFunc nif_funcs[] =
{
@@ -640,8 +1378,25 @@ static ErlNifFunc nif_funcs[] =
{"get_resource", 2, get_resource},
{"release_resource", 1, release_resource},
{"last_resource_dtor_call", 0, last_resource_dtor_call},
- {"make_new_resource", 2, make_new_resource}
-
+ {"make_new_resource", 2, make_new_resource},
+ {"check_is", 10, check_is},
+ {"length_test", 5, length_test},
+ {"make_atoms", 0, make_atoms},
+ {"make_strings", 0, make_strings},
+ {"make_new_resource", 2, make_new_resource},
+ {"make_new_resource_binary", 1, make_new_resource_binary},
+ {"send_list_seq", 2, send_list_seq},
+ {"send_new_blob", 2, send_new_blob},
+ {"alloc_msgenv", 0, alloc_msgenv},
+ {"clear_msgenv", 1, clear_msgenv},
+ {"grow_blob", 2, grow_blob},
+ {"grow_blob", 3, grow_blob},
+ {"send_blob", 2, send_blob},
+ {"send3_blob", 3, send3_blob},
+ {"send_blob_thread", 3, send_blob_thread},
+ {"join_send_thread", 1, join_send_thread},
+ {"copy_blob", 1, copy_blob},
+ {"send_term", 2, send_term}
};
ERL_NIF_INIT(nif_SUITE,nif_funcs,load,reload,upgrade,unload)
diff --git a/erts/emulator/test/nif_SUITE_data/nif_mod.c b/erts/emulator/test/nif_SUITE_data/nif_mod.c
index c075b74c57..e32d10057c 100644
--- a/erts/emulator/test/nif_SUITE_data/nif_mod.c
+++ b/erts/emulator/test/nif_SUITE_data/nif_mod.c
@@ -1,3 +1,21 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2009-2010. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
#include "erl_nif.h"
#include <string.h>
#include <stdio.h>
@@ -24,6 +42,11 @@ static ERL_NIF_TERM am_resource_type;
static ERL_NIF_TERM am_resource_dtor_A;
static ERL_NIF_TERM am_resource_dtor_B;
+static NifModPrivData* priv_data(ErlNifEnv* env)
+{
+ return (NifModPrivData*) enif_priv_data(env);
+}
+
static void init(ErlNifEnv* env)
{
am_true = enif_make_atom(env, "true");
@@ -36,7 +59,7 @@ static void init(ErlNifEnv* env)
static void add_call_with_arg(ErlNifEnv* env, NifModPrivData* data, const char* func_name,
const char* arg, int arg_sz)
{
- CallInfo* call = enif_alloc(env, sizeof(CallInfo)+strlen(func_name) + arg_sz);
+ CallInfo* call = (CallInfo*)enif_alloc(sizeof(CallInfo)+strlen(func_name) + arg_sz);
strcpy(call->func_name, func_name);
call->lib_ver = NIF_LIB_VER;
call->static_cntA = ++static_cntA;
@@ -60,7 +83,7 @@ static void add_call(ErlNifEnv* env, NifModPrivData* data,const char* func_name)
add_call_with_arg(env, data, func_name, NULL, 0);
}
-#define ADD_CALL(FUNC_NAME) add_call(env, enif_priv_data(env),FUNC_NAME)
+#define ADD_CALL(FUNC_NAME) add_call(env, priv_data(env),FUNC_NAME)
#define STRINGIFY_(X) #X
#define STRINGIFY(X) STRINGIFY_(X)
@@ -69,56 +92,56 @@ static void resource_dtor_A(ErlNifEnv* env, void* a)
{
const char dtor_name[] = "resource_dtor_A_v" STRINGIFY(NIF_LIB_VER);
- add_call_with_arg(env, enif_priv_data(env), dtor_name,
- a, enif_sizeof_resource(env, a));
+ add_call_with_arg(env, priv_data(env), dtor_name, (const char*)a,
+ enif_sizeof_resource(a));
}
static void resource_dtor_B(ErlNifEnv* env, void* a)
{
- const char dtor_name[] = "resource_dtor_B_v" STRINGIFY(NIF_LIB_VER);
+ const char dtor_name[] = "resource_dtor_B_v" STRINGIFY(NIF_LIB_VER);
- add_call_with_arg(env, enif_priv_data(env), dtor_name,
- a, enif_sizeof_resource(env, a));
+ add_call_with_arg(env, priv_data(env), dtor_name, (const char*)a,
+ enif_sizeof_resource(a));
}
/* {resource_type, Ix|null, ErlNifResourceFlags in, "TypeName", dtor(A|B|null), ErlNifResourceFlags out}*/
static void open_resource_type(ErlNifEnv* env, ERL_NIF_TERM op_tpl)
{
- NifModPrivData* data = enif_priv_data(env);
+ NifModPrivData* data = priv_data(env);
const ERL_NIF_TERM* arr;
int arity;
char rt_name[30];
- union { enum ErlNifResourceFlags e; int i; } flags, exp_res, got_res;
+ union { ErlNifResourceFlags e; int i; } flags, exp_res, got_res;
unsigned ix;
ErlNifResourceDtor* dtor;
ErlNifResourceType* got_ptr;
CHECK(enif_get_tuple(env, op_tpl, &arity, &arr));
CHECK(arity == 6);
- CHECK(enif_is_identical(env, arr[0], am_resource_type));
+ CHECK(enif_is_identical(arr[0], am_resource_type));
CHECK(enif_get_int(env, arr[2], &flags.i));
CHECK(enif_get_string(env, arr[3], rt_name, sizeof(rt_name), ERL_NIF_LATIN1) > 0);
CHECK(enif_get_int(env, arr[5], &exp_res.i));
- if (enif_is_identical(env, arr[4], am_null)) {
+ if (enif_is_identical(arr[4], am_null)) {
dtor = NULL;
}
- else if (enif_is_identical(env, arr[4], am_resource_dtor_A)) {
+ else if (enif_is_identical(arr[4], am_resource_dtor_A)) {
dtor = resource_dtor_A;
}
else {
- CHECK(enif_is_identical(env, arr[4], am_resource_dtor_B));
+ CHECK(enif_is_identical(arr[4], am_resource_dtor_B));
dtor = resource_dtor_B;
}
- got_ptr = enif_open_resource_type(env, rt_name, dtor,
+ got_ptr = enif_open_resource_type(env, NULL, rt_name, dtor,
flags.e, &got_res.e);
if (enif_get_uint(env, arr[1], &ix) && ix < RT_MAX && got_ptr != NULL) {
data->rt_arr[ix] = got_ptr;
}
else {
- CHECK(enif_is_identical(env, arr[1], am_null));
+ CHECK(enif_is_identical(arr[1], am_null));
CHECK(got_ptr == NULL);
}
CHECK(got_res.e == exp_res.e);
@@ -126,7 +149,7 @@ static void open_resource_type(ErlNifEnv* env, ERL_NIF_TERM op_tpl)
static void do_load_info(ErlNifEnv* env, ERL_NIF_TERM load_info)
{
- NifModPrivData* data = enif_priv_data(env);
+ NifModPrivData* data = priv_data(env);
ERL_NIF_TERM head, tail;
unsigned ix;
for (ix=0; ix<RT_MAX; ix++) {
@@ -140,17 +163,18 @@ static void do_load_info(ErlNifEnv* env, ERL_NIF_TERM load_info)
CHECK(enif_is_empty_list(env, head));
}
-static int load(ErlNifEnv* env, void** priv_data, ERL_NIF_TERM load_info)
+static int load(ErlNifEnv* env, void** priv, ERL_NIF_TERM load_info)
{
NifModPrivData* data;
init(env);
- data = enif_alloc(env, sizeof(NifModPrivData));
+ data = (NifModPrivData*) enif_alloc(sizeof(NifModPrivData));
CHECK(data != NULL);
- *priv_data = data;
+ *priv = data;
data->mtx = enif_mutex_create("nif_mod_priv_data");
data->ref_cnt = 1;
data->call_history = NULL;
+
add_call(env, data, "load");
do_load_info(env, load_info);
@@ -158,39 +182,35 @@ static int load(ErlNifEnv* env, void** priv_data, ERL_NIF_TERM load_info)
return 0;
}
-static int reload(ErlNifEnv* env, void** priv_data, ERL_NIF_TERM load_info)
+static int reload(ErlNifEnv* env, void** priv, ERL_NIF_TERM load_info)
{
+ NifModPrivData* data = (NifModPrivData*) *priv;
init(env);
- add_call(env, *priv_data, "reload");
+ add_call(env, data, "reload");
do_load_info(env, load_info);
return 0;
}
-static int upgrade(ErlNifEnv* env, void** priv_data, void** old_priv_data, ERL_NIF_TERM load_info)
+static int upgrade(ErlNifEnv* env, void** priv, void** old_priv_data, ERL_NIF_TERM load_info)
{
- NifModPrivData* data = *old_priv_data;
+ NifModPrivData* data = (NifModPrivData*) *old_priv_data;
init(env);
add_call(env, data, "upgrade");
data->ref_cnt++;
- *priv_data = *old_priv_data;
+ *priv = *old_priv_data;
do_load_info(env, load_info);
return 0;
}
-static void unload(ErlNifEnv* env, void* priv_data)
+static void unload(ErlNifEnv* env, void* priv)
{
- NifModPrivData* data = priv_data;
+ NifModPrivData* data = (NifModPrivData*) priv;
+ int is_last;
add_call(env, data, "unload");
- enif_mutex_lock(data->mtx);
- if (--data->ref_cnt == 0) {
- enif_mutex_unlock(data->mtx);
- enif_mutex_destroy(data->mtx);
- enif_free(env, data);
- }
- enif_mutex_unlock(data->mtx);
+ NifModPrivData_release(data);
}
static ERL_NIF_TERM lib_version(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
@@ -202,12 +222,12 @@ static ERL_NIF_TERM lib_version(ErlNifEnv* env, int argc, const ERL_NIF_TERM arg
static ERL_NIF_TERM get_priv_data_ptr(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
{
ADD_CALL("get_priv_data_ptr");
- return enif_make_ulong(env, (unsigned long)enif_priv_data(env));
+ return enif_make_ulong(env, (unsigned long)priv_data(env));
}
static ERL_NIF_TERM make_new_resource(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
{
- NifModPrivData* data = (NifModPrivData*) enif_priv_data(env);
+ NifModPrivData* data = priv_data(env);
ErlNifBinary ibin;
char* a;
ERL_NIF_TERM ret;
@@ -216,22 +236,22 @@ static ERL_NIF_TERM make_new_resource(ErlNifEnv* env, int argc, const ERL_NIF_TE
|| !enif_inspect_binary(env, argv[1], &ibin)) {
return enif_make_badarg(env);
}
- a = enif_alloc_resource(env, data->rt_arr[ix], ibin.size);
+ a = (char*) enif_alloc_resource(data->rt_arr[ix], ibin.size);
memcpy(a, ibin.data, ibin.size);
ret = enif_make_resource(env, a);
- enif_release_resource(env, a);
+ enif_release_resource(a);
return ret;
}
static ERL_NIF_TERM get_resource(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
{
- NifModPrivData* data = (NifModPrivData*) enif_priv_data(env);
+ NifModPrivData* data = priv_data(env);
ErlNifBinary obin;
unsigned ix;
void* a;
if (!enif_get_uint(env, argv[0], &ix) || ix >= RT_MAX
|| !enif_get_resource(env, argv[1], data->rt_arr[ix], &a)
- || !enif_alloc_binary(env, enif_sizeof_resource(env, a), &obin)) {
+ || !enif_alloc_binary(enif_sizeof_resource(a), &obin)) {
return enif_make_badarg(env);
}
memcpy(obin.data, a, obin.size);
diff --git a/erts/emulator/test/nif_SUITE_data/nif_mod.h b/erts/emulator/test/nif_SUITE_data/nif_mod.h
index 0eaf91d6e1..cd0ecf4b54 100644
--- a/erts/emulator/test/nif_SUITE_data/nif_mod.h
+++ b/erts/emulator/test/nif_SUITE_data/nif_mod.h
@@ -20,3 +20,15 @@ typedef struct
ErlNifResourceType* rt_arr[RT_MAX];
}NifModPrivData;
+#define NifModPrivData_release(NMPD) \
+ do { \
+ int is_last; \
+ enif_mutex_lock((NMPD)->mtx); \
+ is_last = (--(NMPD)->ref_cnt == 0); \
+ enif_mutex_unlock((NMPD)->mtx); \
+ if (is_last) { \
+ enif_mutex_destroy((NMPD)->mtx); \
+ enif_free((NMPD)); \
+ } \
+ }while (0)
+
diff --git a/erts/emulator/test/obsolete_SUITE.erl b/erts/emulator/test/obsolete_SUITE.erl
deleted file mode 100644
index 33c4726699..0000000000
--- a/erts/emulator/test/obsolete_SUITE.erl
+++ /dev/null
@@ -1,123 +0,0 @@
-%%
-%% %CopyrightBegin%
-%%
-%% Copyright Ericsson AB 2004-2009. All Rights Reserved.
-%%
-%% The contents of this file are subject to the Erlang Public License,
-%% Version 1.1, (the "License"); you may not use this file except in
-%% compliance with the License. You should have received a copy of the
-%% Erlang Public License along with this software. If not, it can be
-%% retrieved online at http://www.erlang.org/.
-%%
-%% Software distributed under the License is distributed on an "AS IS"
-%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
-%% the License for the specific language governing rights and limitations
-%% under the License.
-%%
-%% %CopyrightEnd%
-%%
-
-
--module(obsolete_SUITE).
--author('[email protected]').
--compile(nowarn_obsolete_guard).
-
--export([all/1]).
-
--export([erl_threads/1]).
-
--include("test_server.hrl").
-
--define(DEFAULT_TIMETRAP_SECS, 240).
-
-all(doc) -> [];
-all(suite) ->
- case catch erlang:system_info(wordsize) of
- 4 -> [erl_threads];
- _ -> {skip, "Only expected to work on 32-bit architectures"}
- end.
-
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-%% %%
-%% Testcases %%
-%% %%
-
-erl_threads(suite) -> [];
-erl_threads(doc) -> [];
-erl_threads(Cfg) ->
- ?line case erlang:system_info(threads) of
- true ->
- ?line drv_case(Cfg, erl_threads);
- false ->
- ?line {skip, "Emulator not compiled with threads support"}
- end.
-
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-%% %%
-%% Internal functions %%
-%% %%
-
-drv_case(Config, CaseName) ->
- drv_case(Config, CaseName, "").
-
-drv_case(Config, CaseName, TimeTrap) when integer(TimeTrap) ->
- drv_case(Config, CaseName, "", TimeTrap);
-drv_case(Config, CaseName, Command) when list(Command) ->
- drv_case(Config, CaseName, Command, ?DEFAULT_TIMETRAP_SECS).
-
-drv_case(Config, CaseName, TimeTrap, Command) when list(Command),
- integer(TimeTrap) ->
- drv_case(Config, CaseName, Command, TimeTrap);
-drv_case(Config, CaseName, Command, TimeTrap) when list(Config),
- atom(CaseName),
- list(Command),
- integer(TimeTrap) ->
- case ?t:os_type() of
- {Family, _} when Family == unix; Family == win32 ->
- ?line run_drv_case(Config, CaseName, Command, TimeTrap);
- SkipOs ->
- ?line {skipped,
- lists:flatten(["Not run on "
- | io_lib:format("~p",[SkipOs])])}
- end.
-
-run_drv_case(Config, CaseName, Command, TimeTrap) ->
- ?line Dog = test_server:timetrap(test_server:seconds(TimeTrap)),
- ?line DataDir = ?config(data_dir,Config),
- case erl_ddll:load_driver(DataDir, CaseName) of
- ok -> ok;
- {error, Error} ->
- io:format("~s\n", [erl_ddll:format_error(Error)]),
- ?line ?t:fail()
- end,
- ?line Port = open_port({spawn, atom_to_list(CaseName)}, []),
- ?line true = is_port(Port),
- ?line Port ! {self(), {command, Command}},
- ?line Result = receive_drv_result(Port, CaseName),
- ?line Port ! {self(), close},
- ?line receive
- {Port, closed} ->
- ok
- end,
- ?line ok = erl_ddll:unload_driver(CaseName),
- ?line test_server:timetrap_cancel(Dog),
- ?line Result.
-
-receive_drv_result(Port, CaseName) ->
- ?line receive
- {print, Port, CaseName, Str} ->
- ?line ?t:format("~s", [Str]),
- ?line receive_drv_result(Port, CaseName);
- {'EXIT', Port, Error} ->
- ?line ?t:fail(Error);
- {'EXIT', error, Error} ->
- ?line ?t:fail(Error);
- {failed, Port, CaseName, Comment} ->
- ?line ?t:fail(Comment);
- {skipped, Port, CaseName, Comment} ->
- ?line {skipped, Comment};
- {succeeded, Port, CaseName, ""} ->
- ?line succeeded;
- {succeeded, Port, CaseName, Comment} ->
- ?line {comment, Comment}
- end.
diff --git a/erts/emulator/test/obsolete_SUITE_data/Makefile.src b/erts/emulator/test/obsolete_SUITE_data/Makefile.src
deleted file mode 100644
index d8e2b861c0..0000000000
--- a/erts/emulator/test/obsolete_SUITE_data/Makefile.src
+++ /dev/null
@@ -1,33 +0,0 @@
-# ``The contents of this file are subject to the Erlang Public License,
-# Version 1.1, (the "License"); you may not use this file except in
-# compliance with the License. You should have received a copy of the
-# Erlang Public License along with this software. If not, it can be
-# retrieved via the world wide web at http://www.erlang.org/.
-#
-# Software distributed under the License is distributed on an "AS IS"
-# basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
-# the License for the specific language governing rights and limitations
-# under the License.
-#
-# The Initial Developer of the Original Code is Ericsson Utvecklings AB.
-# Portions created by Ericsson are Copyright 1999, Ericsson Utvecklings
-# AB. All Rights Reserved.''
-#
-# $Id$
-#
-
-TEST_DRVS = erl_threads@dll@
-CC = @CC@
-LD = @LD@
-CFLAGS = @SHLIB_CFLAGS@ -I@erl_include@ @DEFS@
-SHLIB_EXTRA_LDLIBS = testcase_driver@obj@
-
-all: $(TEST_DRVS)
-
-@SHLIB_RULES@
-
-testcase_driver@obj@: testcase_driver.c testcase_driver.h
-$(TEST_DRVS): testcase_driver@obj@
-
-
-
diff --git a/erts/emulator/test/obsolete_SUITE_data/erl_threads.c b/erts/emulator/test/obsolete_SUITE_data/erl_threads.c
deleted file mode 100644
index 27a5163121..0000000000
--- a/erts/emulator/test/obsolete_SUITE_data/erl_threads.c
+++ /dev/null
@@ -1,302 +0,0 @@
-/* ``The contents of this file are subject to the Erlang Public License,
- * Version 1.1, (the "License"); you may not use this file except in
- * compliance with the License. You should have received a copy of the
- * Erlang Public License along with this software. If not, it can be
- * retrieved via the world wide web at http://www.erlang.org/.
- *
- * Software distributed under the License is distributed on an "AS IS"
- * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
- * the License for the specific language governing rights and limitations
- * under the License.
- *
- * The Initial Developer of the Original Code is Ericsson Utvecklings AB.
- * Portions created by Ericsson are Copyright 1999, Ericsson Utvecklings
- * AB. All Rights Reserved.''
- *
- * $Id$
- */
-
-#include "testcase_driver.h"
-
-#ifndef __WIN32__
-
-#define NO_OF_THREADS 2
-
-#include <unistd.h>
-#include <errno.h>
-
-static int die;
-static int cw_passed;
-static int res_tf0;
-static int res_tf1;
-static erl_mutex_t mtx;
-static erl_cond_t cnd;
-static erl_thread_t tid[NO_OF_THREADS];
-static int need_join[NO_OF_THREADS];
-
-typedef struct {
- int n;
-} thr_arg_t;
-
-
-static void *tf0(void *vta)
-{
- int r;
-
- if (((thr_arg_t *) vta)->n != 0)
- goto fail;
-
- r = erts_mutex_lock(mtx);
- if (r != 0) {
- erts_mutex_unlock(mtx);
- goto fail;
- }
-
- r = erts_cond_wait(cnd, mtx);
- if (r != 0 || die) {
- erts_mutex_unlock(mtx);
- goto fail;
- }
-
- cw_passed++;
-
- r = erts_cond_wait(cnd, mtx);
- if (r != 0 || die) {
- erts_mutex_unlock(mtx);
- goto fail;
- }
-
- cw_passed++;
-
- r = erts_mutex_unlock(mtx);
- if (r != 0)
- goto fail;
-
- res_tf0 = 0;
-
- return (void *) &res_tf0;
-
- fail:
- return NULL;
-}
-
-
-static void *tf1(void *vta)
-{
- int r;
-
- if (((thr_arg_t *) vta)->n != 1)
- goto fail;
-
- r = erts_mutex_lock(mtx);
- if (r != 0) {
- erts_mutex_unlock(mtx);
- goto fail;
- }
-
- r = erts_cond_wait(cnd, mtx);
- if (r != 0 || die) {
- erts_mutex_unlock(mtx);
- goto fail;
- }
-
- cw_passed++;
-
- r = erts_cond_wait(cnd, mtx);
- if (r != 0 || die) {
- erts_mutex_unlock(mtx);
- goto fail;
- }
-
- cw_passed++;
-
- r = erts_mutex_unlock(mtx);
- if (r != 0)
- goto fail;
-
- res_tf1 = 1;
-
- erts_thread_exit((void *) &res_tf1);
-
- res_tf1 = 4711;
-
- fail:
- return NULL;
-}
-
-#endif /* #ifndef __WIN32__ */
-
-void
-testcase_run(TestCaseState_t *tcs)
-{
-#ifdef __WIN32__
- testcase_skipped(tcs, "Nothing to test; not supported on windows.");
-#else
- int i, r;
- void *tres[NO_OF_THREADS];
- thr_arg_t ta[NO_OF_THREADS];
- erl_thread_t t1;
-
- die = 0;
- cw_passed = 0;
-
- for (i = 0; i < NO_OF_THREADS; i++)
- need_join[i] = 0;
-
- res_tf0 = 17;
- res_tf1 = 17;
-
- cnd = mtx = NULL;
-
- /* Create mutex and cond */
- mtx = erts_mutex_create();
- ASSERT(tcs, mtx);
- cnd = erts_cond_create();
- ASSERT(tcs, cnd);
-
- /* Create the threads */
- ta[0].n = 0;
- r = erts_thread_create(&tid[0], tf0, (void *) &ta[0], 0);
- ASSERT(tcs, r == 0);
- need_join[0] = 1;
-
- ta[1].n = 1;
- r = erts_thread_create(&tid[1], tf1, (void *) &ta[1], 0);
- ASSERT(tcs, r == 0);
- need_join[1] = 1;
-
- /* Make sure the threads waits on cond wait */
- sleep(1);
-
- r = erts_mutex_lock(mtx);
- ASSERT_CLNUP(tcs, r == 0, (void) erts_mutex_unlock(mtx));
-
- ASSERT_CLNUP(tcs, cw_passed == 0, (void) erts_mutex_unlock(mtx));
-
-
- /* Let one thread pass one cond wait */
- r = erts_cond_signal(cnd);
- ASSERT_CLNUP(tcs, r == 0, (void) erts_mutex_unlock(mtx));
-
- r = erts_mutex_unlock(mtx);
- ASSERT(tcs, r == 0);
-
- sleep(1);
-
- r = erts_mutex_lock(mtx);
- ASSERT_CLNUP(tcs, r == 0, (void) erts_mutex_unlock(mtx));
-
- ASSERT_CLNUP(tcs, cw_passed == 1, (void) erts_mutex_unlock(mtx));
-
-
- /* Let both threads pass one cond wait */
- r = erts_cond_broadcast(cnd);
- ASSERT_CLNUP(tcs, r == 0, (void) erts_mutex_unlock(mtx));
-
- r = erts_mutex_unlock(mtx);
- ASSERT(tcs, r == 0);
-
- sleep(1);
-
- r = erts_mutex_lock(mtx);
- ASSERT_CLNUP(tcs, r == 0, (void) erts_mutex_unlock(mtx));
-
- ASSERT_CLNUP(tcs, cw_passed == 3, (void) erts_mutex_unlock(mtx));
-
-
- /* Let the thread that only have passed one cond wait pass the other one */
- r = erts_cond_signal(cnd);
- ASSERT_CLNUP(tcs, r == 0, (void) erts_mutex_unlock(mtx));
-
- r = erts_mutex_unlock(mtx);
- ASSERT(tcs, r == 0);
-
- sleep(1);
-
- r = erts_mutex_lock(mtx);
- ASSERT_CLNUP(tcs, r == 0, (void) erts_mutex_unlock(mtx));
-
- ASSERT_CLNUP(tcs, cw_passed == 4, (void) erts_mutex_unlock(mtx));
-
- /* Both threads should have passed both cond waits and exited;
- join them and check returned values */
-
- r = erts_thread_join(tid[0], &tres[0]);
- ASSERT_CLNUP(tcs, r == 0, (void) erts_mutex_unlock(mtx));
- need_join[0] = 0;
-
- ASSERT_CLNUP(tcs, tres[0] == &res_tf0, (void) erts_mutex_unlock(mtx));
- ASSERT_CLNUP(tcs, res_tf0 == 0, (void) erts_mutex_unlock(mtx));
-
- r = erts_thread_join(tid[1], &tres[1]);
- ASSERT_CLNUP(tcs, r == 0, (void) erts_mutex_unlock(mtx));
- need_join[1] = 0;
-
- ASSERT_CLNUP(tcs, tres[1] == &res_tf1, (void) erts_mutex_unlock(mtx));
- ASSERT_CLNUP(tcs, res_tf1 == 1, (void) erts_mutex_unlock(mtx));
-
- /* Test signaling when noone waits */
-
- r = erts_cond_signal(cnd);
- ASSERT_CLNUP(tcs, r == 0, (void) erts_mutex_unlock(mtx));
-
- /* Test broadcasting when noone waits */
-
- r = erts_cond_broadcast(cnd);
- ASSERT_CLNUP(tcs, r == 0, (void) erts_mutex_unlock(mtx));
-
- /* erts_cond_timedwait() not supported anymore */
- r = erts_cond_timedwait(cnd, mtx, 1000);
- ASSERT_CLNUP(tcs, r != 0, (void) erts_mutex_unlock(mtx));
- ASSERT_CLNUP(tcs,
- strcmp(erl_errno_id(r), "enotsup") == 0,
- (void) erts_mutex_unlock(mtx));
-
- r = erts_mutex_unlock(mtx);
- ASSERT(tcs, r == 0);
-
- r = erts_mutex_destroy(mtx);
- ASSERT(tcs, r == 0);
- mtx = NULL;
-
- r = erts_cond_destroy(cnd);
- ASSERT(tcs, r == 0);
- cnd = NULL;
-
- /* ... */
- t1 = erts_thread_self();
-
- if (cw_passed == 4711) {
- /* We don't want to execute this just check that the
- symbol/symbols is/are defined */
- erts_thread_kill(t1);
- }
-
-#endif /* #ifndef __WIN32__ */
-}
-
-char *
-testcase_name(void)
-{
- return "erl_threads";
-}
-
-void
-testcase_cleanup(TestCaseState_t *tcs)
-{
- int i;
- for (i = 0; i < NO_OF_THREADS; i++) {
- if (need_join[i]) {
- erts_mutex_lock(mtx);
- die = 1;
- erts_cond_broadcast(cnd);
- erts_mutex_unlock(mtx);
- erts_thread_join(tid[1], NULL);
- }
- }
- if (mtx)
- erts_mutex_destroy(mtx);
- if (cnd)
- erts_cond_destroy(cnd);
-}
-
diff --git a/erts/emulator/test/obsolete_SUITE_data/testcase_driver.c b/erts/emulator/test/obsolete_SUITE_data/testcase_driver.c
deleted file mode 100644
index 99d5adb041..0000000000
--- a/erts/emulator/test/obsolete_SUITE_data/testcase_driver.c
+++ /dev/null
@@ -1,262 +0,0 @@
-/* ``The contents of this file are subject to the Erlang Public License,
- * Version 1.1, (the "License"); you may not use this file except in
- * compliance with the License. You should have received a copy of the
- * Erlang Public License along with this software. If not, it can be
- * retrieved via the world wide web at http://www.erlang.org/.
- *
- * Software distributed under the License is distributed on an "AS IS"
- * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
- * the License for the specific language governing rights and limitations
- * under the License.
- *
- * The Initial Developer of the Original Code is Ericsson Utvecklings AB.
- * Portions created by Ericsson are Copyright 1999, Ericsson Utvecklings
- * AB. All Rights Reserved.''
- *
- * $Id$
- */
-
-#include "testcase_driver.h"
-#include <stdio.h>
-#include <stdlib.h>
-#include <stdarg.h>
-#include <setjmp.h>
-#include <string.h>
-
-#ifdef __WIN32__
-#undef HAVE_VSNPRINTF
-#define HAVE_VSNPRINTF 1
-#define vsnprintf _vsnprintf
-#endif
-
-#ifndef HAVE_VSNPRINTF
-#define HAVE_VSNPRINTF 0
-#endif
-
-#define COMMENT_BUF_SZ 4096
-
-#define TESTCASE_FAILED 0
-#define TESTCASE_SKIPPED 1
-#define TESTCASE_SUCCEEDED 2
-
-typedef struct {
- TestCaseState_t visible;
- int port;
- int result;
- jmp_buf done_jmp_buf;
- char *comment;
- char comment_buf[COMMENT_BUF_SZ];
-} InternalTestCaseState_t;
-
-long testcase_drv_start(int port, char *command);
-int testcase_drv_stop(long drv_data);
-int testcase_drv_run(long drv_data, char *buf, int len);
-
-static DriverEntry testcase_drv_entry = {
- NULL,
- testcase_drv_start,
- testcase_drv_stop,
- testcase_drv_run
-};
-
-
-int DRIVER_INIT(testcase_drv)(void *arg)
-{
- testcase_drv_entry.driver_name = testcase_name();
- return (int) &testcase_drv_entry;
-}
-
-long
-testcase_drv_start(int port, char *command)
-{
- InternalTestCaseState_t *itcs = (InternalTestCaseState_t *)
- driver_alloc(sizeof(InternalTestCaseState_t));
- if (!itcs) {
- return -1;
- }
-
- itcs->visible.testcase_name = testcase_name();
- itcs->visible.extra = NULL;
- itcs->port = port;
- itcs->result = TESTCASE_FAILED;
- itcs->comment = "";
-
- return (long) itcs;
-}
-
-int
-testcase_drv_stop(long drv_data)
-{
- testcase_cleanup((TestCaseState_t *) drv_data);
- driver_free((void *) drv_data);
- return 0;
-}
-
-int
-testcase_drv_run(long drv_data, char *buf, int len)
-{
- InternalTestCaseState_t *itcs = (InternalTestCaseState_t *) drv_data;
- DriverTermData result_atom;
- DriverTermData msg[12];
-
- itcs->visible.command = buf;
- itcs->visible.command_len = len;
-
- if (setjmp(itcs->done_jmp_buf) == 0) {
- testcase_run((TestCaseState_t *) itcs);
- itcs->result = TESTCASE_SUCCEEDED;
- }
-
- switch (itcs->result) {
- case TESTCASE_SUCCEEDED:
- result_atom = driver_mk_atom("succeeded");
- break;
- case TESTCASE_SKIPPED:
- result_atom = driver_mk_atom("skipped");
- break;
- case TESTCASE_FAILED:
- default:
- result_atom = driver_mk_atom("failed");
- break;
- }
-
- msg[0] = ERL_DRV_ATOM;
- msg[1] = (DriverTermData) result_atom;
-
- msg[2] = ERL_DRV_PORT;
- msg[3] = driver_mk_port(itcs->port);
-
- msg[4] = ERL_DRV_ATOM;
- msg[5] = driver_mk_atom(itcs->visible.testcase_name);
-
- msg[6] = ERL_DRV_STRING;
- msg[7] = (DriverTermData) itcs->comment;
- msg[8] = (DriverTermData) strlen(itcs->comment);
-
- msg[9] = ERL_DRV_TUPLE;
- msg[10] = (DriverTermData) 4;
-
- driver_output_term(itcs->port, msg, 11);
- return 0;
-}
-
-int
-testcase_assertion_failed(TestCaseState_t *tcs,
- char *file, int line, char *assertion)
-{
- testcase_failed(tcs, "%s:%d: Assertion failed: \"%s\"",
- file, line, assertion);
- return 0;
-}
-
-void
-testcase_printf(TestCaseState_t *tcs, char *frmt, ...)
-{
- InternalTestCaseState_t *itcs = (InternalTestCaseState_t *) tcs;
- DriverTermData msg[12];
- va_list va;
- va_start(va, frmt);
-#if HAVE_VSNPRINTF
- vsnprintf(itcs->comment_buf, COMMENT_BUF_SZ, frmt, va);
-#else
- vsprintf(itcs->comment_buf, frmt, va);
-#endif
- va_end(va);
-
- msg[0] = ERL_DRV_ATOM;
- msg[1] = (DriverTermData) driver_mk_atom("print");
-
- msg[2] = ERL_DRV_PORT;
- msg[3] = driver_mk_port(itcs->port);
-
- msg[4] = ERL_DRV_ATOM;
- msg[5] = driver_mk_atom(itcs->visible.testcase_name);
-
- msg[6] = ERL_DRV_STRING;
- msg[7] = (DriverTermData) itcs->comment_buf;
- msg[8] = (DriverTermData) strlen(itcs->comment_buf);
-
- msg[9] = ERL_DRV_TUPLE;
- msg[10] = (DriverTermData) 4;
-
- driver_output_term(itcs->port, msg, 11);
-}
-
-
-void testcase_succeeded(TestCaseState_t *tcs, char *frmt, ...)
-{
- InternalTestCaseState_t *itcs = (InternalTestCaseState_t *) tcs;
- va_list va;
- va_start(va, frmt);
-#if HAVE_VSNPRINTF
- vsnprintf(itcs->comment_buf, COMMENT_BUF_SZ, frmt, va);
-#else
- vsprintf(itcs->comment_buf, frmt, va);
-#endif
- va_end(va);
-
- itcs->result = TESTCASE_SUCCEEDED;
- itcs->comment = itcs->comment_buf;
-
- longjmp(itcs->done_jmp_buf, 1);
-}
-
-void testcase_skipped(TestCaseState_t *tcs, char *frmt, ...)
-{
- InternalTestCaseState_t *itcs = (InternalTestCaseState_t *) tcs;
- va_list va;
- va_start(va, frmt);
-#if HAVE_VSNPRINTF
- vsnprintf(itcs->comment_buf, COMMENT_BUF_SZ, frmt, va);
-#else
- vsprintf(itcs->comment_buf, frmt, va);
-#endif
- va_end(va);
-
- itcs->result = TESTCASE_SKIPPED;
- itcs->comment = itcs->comment_buf;
-
- longjmp(itcs->done_jmp_buf, 1);
-}
-
-void testcase_failed(TestCaseState_t *tcs, char *frmt, ...)
-{
- InternalTestCaseState_t *itcs = (InternalTestCaseState_t *) tcs;
- char buf[10];
- size_t bufsz = sizeof(buf);
- va_list va;
- va_start(va, frmt);
-#if HAVE_VSNPRINTF
- vsnprintf(itcs->comment_buf, COMMENT_BUF_SZ, frmt, va);
-#else
- vsprintf(itcs->comment_buf, frmt, va);
-#endif
- va_end(va);
-
- itcs->result = TESTCASE_FAILED;
- itcs->comment = itcs->comment_buf;
-
- if (erl_drv_getenv("ERL_ABORT_ON_FAILURE", buf, &bufsz) == 0
- && strcmp("true", buf) == 0) {
- fprintf(stderr, "Testcase \"%s\" failed: %s\n",
- itcs->visible.testcase_name, itcs->comment);
- abort();
- }
-
- longjmp(itcs->done_jmp_buf, 1);
-}
-
-void *testcase_alloc(size_t size)
-{
- return driver_alloc(size);
-}
-
-void *testcase_realloc(void *ptr, size_t size)
-{
- return driver_realloc(ptr, size);
-}
-
-void testcase_free(void *ptr)
-{
- driver_free(ptr);
-}
diff --git a/erts/emulator/test/obsolete_SUITE_data/testcase_driver.h b/erts/emulator/test/obsolete_SUITE_data/testcase_driver.h
deleted file mode 100644
index 3d85ca6df0..0000000000
--- a/erts/emulator/test/obsolete_SUITE_data/testcase_driver.h
+++ /dev/null
@@ -1,57 +0,0 @@
-/* ``The contents of this file are subject to the Erlang Public License,
- * Version 1.1, (the "License"); you may not use this file except in
- * compliance with the License. You should have received a copy of the
- * Erlang Public License along with this software. If not, it can be
- * retrieved via the world wide web at http://www.erlang.org/.
- *
- * Software distributed under the License is distributed on an "AS IS"
- * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
- * the License for the specific language governing rights and limitations
- * under the License.
- *
- * The Initial Developer of the Original Code is Ericsson Utvecklings AB.
- * Portions created by Ericsson are Copyright 1999, Ericsson Utvecklings
- * AB. All Rights Reserved.''
- *
- * $Id$
- */
-
-#ifndef TESTCASE_DRIVER_H__
-#define TESTCASE_DRIVER_H__
-
-#include "obsolete/driver.h"
-#include <stdlib.h>
-
-typedef struct {
- char *testcase_name;
- char *command;
- int command_len;
- void *extra;
-} TestCaseState_t;
-
-#define ASSERT_CLNUP(TCS, B, CLN) \
-do { \
- if (!(B)) { \
- CLN; \
- testcase_assertion_failed((TCS), __FILE__, __LINE__, #B); \
- } \
-} while (0)
-
-#define ASSERT(TCS, B) ASSERT_CLNUP(TCS, B, (void) 0)
-
-void testcase_printf(TestCaseState_t *tcs, char *frmt, ...);
-void testcase_succeeded(TestCaseState_t *tcs, char *frmt, ...);
-void testcase_skipped(TestCaseState_t *tcs, char *frmt, ...);
-void testcase_failed(TestCaseState_t *tcs, char *frmt, ...);
-int testcase_assertion_failed(TestCaseState_t *tcs, char *file, int line,
- char *assertion);
-void *testcase_alloc(size_t size);
-void *testcase_realloc(void *ptr, size_t size);
-void testcase_free(void *ptr);
-
-
-char *testcase_name(void);
-void testcase_run(TestCaseState_t *tcs);
-void testcase_cleanup(TestCaseState_t *tcs);
-
-#endif
diff --git a/erts/emulator/test/port_SUITE.erl b/erts/emulator/test/port_SUITE.erl
index b9100738e4..77fa75b78f 100644
--- a/erts/emulator/test/port_SUITE.erl
+++ b/erts/emulator/test/port_SUITE.erl
@@ -88,7 +88,7 @@
otp_3906/1, otp_4389/1, win_massive/1, win_massive_client/1,
mix_up_ports/1, otp_5112/1, otp_5119/1, otp_6224/1,
exit_status_multi_scheduling_block/1, ports/1,
- spawn_driver/1,spawn_executable/1,
+ spawn_driver/1, spawn_executable/1, close_deaf_port/1,
unregister_name/1]).
-export([]).
@@ -113,7 +113,7 @@ all(suite) ->
otp_3906, otp_4389, win_massive, mix_up_ports,
otp_5112, otp_5119,
exit_status_multi_scheduling_block,
- ports, spawn_driver, spawn_executable,
+ ports, spawn_driver, spawn_executable, close_deaf_port,
unregister_name
].
@@ -878,12 +878,20 @@ env2(Config) ->
"nisse" = os:getenv(Long)
end),
-
+
?line env_slave(Temp, [{"must_define_something","some_value"},
- {"certainly_not_existing",false},
+ {"certainly_not_existing",false},
+ {"ends_with_equal", "value="},
{Long,false},
{"glurf","a glorfy string"}]),
+ %% A lot of non existing variables (mingled with existing)
+ NotExistingList = [{lists:flatten(io_lib:format("V~p_not_existing",[X])),false}
+ || X <- lists:seq(1,150)],
+ ExistingList = [{lists:flatten(io_lib:format("V~p_existing",[X])),"a_value"}
+ || X <- lists:seq(1,150)],
+ ?line env_slave(Temp, lists:sort(ExistingList ++ NotExistingList)),
+
?line test_server:timetrap_cancel(Dog),
ok.
@@ -2292,3 +2300,12 @@ load_driver(Dir, Driver) ->
io:format("~s\n", [erl_ddll:format_error(Error)]),
Res
end.
+
+
+close_deaf_port(doc) -> ["Send data to port program that does not read it, then close port."];
+close_deaf_port(suite) -> [];
+close_deaf_port(Config) when is_list(Config) ->
+ Port = open_port({spawn,"sleep 999999"},[]),
+ erlang:port_command(Port,"Hello, can you hear me!?!?"),
+ port_close(Port),
+ ok.
diff --git a/erts/emulator/test/receive_SUITE.erl b/erts/emulator/test/receive_SUITE.erl
new file mode 100644
index 0000000000..40ebf2bd21
--- /dev/null
+++ b/erts/emulator/test/receive_SUITE.erl
@@ -0,0 +1,113 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 2010. All Rights Reserved.
+%%
+%% The contents of this file are subject to the Erlang Public License,
+%% Version 1.1, (the "License"); you may not use this file except in
+%% compliance with the License. You should have received a copy of the
+%% Erlang Public License along with this software. If not, it can be
+%% retrieved online at http://www.erlang.org/.
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% %CopyrightEnd%
+%%
+
+-module(receive_SUITE).
+
+%% Tests receive after.
+
+-include("test_server.hrl").
+
+-export([all/1,
+ call_with_huge_message_queue/1,receive_in_between/1]).
+
+-export([init_per_testcase/2,fin_per_testcase/2]).
+
+all(suite) ->
+ [call_with_huge_message_queue,receive_in_between].
+
+init_per_testcase(Func, Config) when is_atom(Func), is_list(Config) ->
+ Dog=?t:timetrap(?t:minutes(3)),
+ [{watchdog, Dog}|Config].
+
+fin_per_testcase(_Func, Config) ->
+ Dog=?config(watchdog, Config),
+ ?t:timetrap_cancel(Dog).
+
+call_with_huge_message_queue(Config) when is_list(Config) ->
+ ?line Pid = spawn_link(fun echo_loop/0),
+
+ ?line {Time,ok} = tc(fun() -> calls(10, Pid) end),
+
+ ?line [self() ! {msg,N} || N <- lists:seq(1, 500000)],
+ erlang:garbage_collect(),
+ ?line {NewTime,ok} = tc(fun() -> calls(10, Pid) end),
+ io:format("Time for empty message queue: ~p", [Time]),
+ io:format("Time for huge message queue: ~p", [NewTime]),
+
+ case (NewTime+1) / (Time+1) of
+ Q when Q < 10 ->
+ ok;
+ Q ->
+ io:format("Q = ~p", [Q]),
+ ?line ?t:fail()
+ end,
+ ok.
+
+calls(0, _) -> ok;
+calls(N, Pid) ->
+ {ok,{ultimate_answer,42}} = call(Pid, {ultimate_answer,42}),
+ calls(N-1, Pid).
+
+call(Pid, Msg) ->
+ Mref = erlang:monitor(process, Pid),
+ Pid ! {Mref,{self(),Msg}},
+ receive
+ {Mref, Reply} ->
+ erlang:demonitor(Mref, [flush]),
+ {ok, Reply};
+ {'DOWN', Mref, _, _, Reason} ->
+ exit(Reason)
+ end.
+
+receive_in_between(Config) when is_list(Config) ->
+ ?line Pid = spawn_link(fun echo_loop/0),
+ ?line [{ok,{a,b}} = call2(Pid, {a,b}) || _ <- lists:seq(1, 100000)],
+ ok.
+
+call2(Pid, Msg) ->
+ self() ! dummy,
+ Mref = erlang:monitor(process, Pid),
+ Pid ! {Mref,{self(),Msg}},
+ receive_one(),
+ receive
+ {Mref,Reply} ->
+ erlang:demonitor(Mref, [flush]),
+ {ok,Reply};
+ {'DOWN',Mref,_,_,Reason} ->
+ exit(Reason)
+ end.
+
+receive_one() ->
+ receive
+ dummy -> ok
+ end.
+
+%%%
+%%% Common helpers.
+%%%
+
+echo_loop() ->
+ receive
+ {Ref,{Pid,Msg}} ->
+ Pid ! {Ref,Msg},
+ echo_loop()
+ end.
+
+tc(Fun) ->
+ timer:tc(erlang, apply, [Fun,[]]).
diff --git a/erts/emulator/test/scheduler_SUITE.erl b/erts/emulator/test/scheduler_SUITE.erl
index c9101b77c2..4f4458802c 100644
--- a/erts/emulator/test/scheduler_SUITE.erl
+++ b/erts/emulator/test/scheduler_SUITE.erl
@@ -33,7 +33,7 @@
-include("test_server.hrl").
%-compile(export_all).
--export([all/1, init_per_testcase/2, fin_per_testcase/2]).
+-export([all/1, init_per_testcase/2, fin_per_testcase/2, end_per_suite/1]).
-export([equal/1,
few_low/1,
@@ -49,7 +49,8 @@
cpu_topology/1,
sct_cmd/1,
sbt_cmd/1,
- scheduler_suspend/1]).
+ scheduler_suspend/1,
+ reader_groups/1]).
-define(DEFAULT_TIMEOUT, ?t:minutes(10)).
@@ -67,7 +68,8 @@ all(suite) ->
equal_with_high_max,
bound_process,
scheduler_bind,
- scheduler_suspend].
+ scheduler_suspend,
+ reader_groups].
init_per_testcase(Case, Config) when is_list(Config) ->
Dog = ?t:timetrap(?DEFAULT_TIMEOUT),
@@ -81,6 +83,10 @@ fin_per_testcase(_Case, Config) when is_list(Config) ->
?t:timetrap_cancel(Dog),
ok.
+end_per_suite(Config) ->
+ catch erts_debug:set_internal_state(available_internal_state, false),
+ Config.
+
-define(ERTS_RUNQ_CHECK_BALANCE_REDS_PER_SCHED, (2000*2000)).
-define(DEFAULT_TEST_REDS_PER_SCHED, 200000000).
@@ -902,7 +908,8 @@ scheduler_suspend_test(Config, Schedulers) ->
?line [SState] = mcall(Node, [fun () ->
erlang:system_info(schedulers_state)
end]),
- ?line {Sched, _, _} = SState,
+ ?line ?t:format("SState=~p~n", [SState]),
+ ?line {Sched, SchedOnln, _SchedAvail} = SState,
?line true = is_integer(Sched),
?line [ok] = mcall(Node, [fun () -> sst0_loop(300) end]),
?line [ok] = mcall(Node, [fun () -> sst1_loop(300) end]),
@@ -914,6 +921,14 @@ scheduler_suspend_test(Config, Schedulers) ->
fun () -> sst2_loop(200) end,
fun () -> sst3_loop(Sched, 200) end]),
?line [SState] = mcall(Node, [fun () ->
+ case Sched == SchedOnln of
+ false ->
+ Sched = erlang:system_flag(
+ schedulers_online,
+ SchedOnln);
+ true ->
+ ok
+ end,
erlang:system_info(schedulers_state)
end]),
?line stop_node(Node),
@@ -956,12 +971,361 @@ sst3_loop(S, N) ->
erlang:system_flag(schedulers_online, 1),
erlang:system_flag(schedulers_online, S),
sst3_loop(S, N-1).
+
+reader_groups(Config) when is_list(Config) ->
+ %% White box testing. These results are correct, but other results
+ %% could be too...
+
+ %% The actual tilepro64 topology
+ CPUT0 = [{processor,[{node,[{core,{logical,0}},
+ {core,{logical,1}},
+ {core,{logical,2}},
+ {core,{logical,8}},
+ {core,{logical,9}},
+ {core,{logical,10}},
+ {core,{logical,11}},
+ {core,{logical,16}},
+ {core,{logical,17}},
+ {core,{logical,18}},
+ {core,{logical,19}},
+ {core,{logical,24}},
+ {core,{logical,25}},
+ {core,{logical,27}},
+ {core,{logical,29}}]},
+ {node,[{core,{logical,3}},
+ {core,{logical,4}},
+ {core,{logical,5}},
+ {core,{logical,6}},
+ {core,{logical,7}},
+ {core,{logical,12}},
+ {core,{logical,13}},
+ {core,{logical,14}},
+ {core,{logical,15}},
+ {core,{logical,20}},
+ {core,{logical,21}},
+ {core,{logical,22}},
+ {core,{logical,23}},
+ {core,{logical,28}},
+ {core,{logical,30}}]},
+ {node,[{core,{logical,31}},
+ {core,{logical,36}},
+ {core,{logical,37}},
+ {core,{logical,38}},
+ {core,{logical,44}},
+ {core,{logical,45}},
+ {core,{logical,46}},
+ {core,{logical,47}},
+ {core,{logical,51}},
+ {core,{logical,52}},
+ {core,{logical,53}},
+ {core,{logical,54}},
+ {core,{logical,55}},
+ {core,{logical,60}},
+ {core,{logical,61}}]},
+ {node,[{core,{logical,26}},
+ {core,{logical,32}},
+ {core,{logical,33}},
+ {core,{logical,34}},
+ {core,{logical,35}},
+ {core,{logical,39}},
+ {core,{logical,40}},
+ {core,{logical,41}},
+ {core,{logical,42}},
+ {core,{logical,43}},
+ {core,{logical,48}},
+ {core,{logical,49}},
+ {core,{logical,50}},
+ {core,{logical,58}}]}]}],
+
+ ?line [{0,1},{1,1},{2,1},{3,3},{4,3},{5,3},{6,3},{7,3},{8,1},{9,1},{10,1},
+ {11,1},{12,3},{13,3},{14,4},{15,4},{16,2},{17,2},{18,2},{19,2},
+ {20,4},{21,4},{22,4},{23,4},{24,2},{25,2},{26,7},{27,2},{28,4},
+ {29,2},{30,4},{31,5},{32,7},{33,7},{34,7},{35,7},{36,5},{37,5},
+ {38,5},{39,7},{40,7},{41,8},{42,8},{43,8},{44,5},{45,5},{46,5},
+ {47,6},{48,8},{49,8},{50,8},{51,6},{52,6},{53,6},{54,6},{55,6},
+ {58,8},{60,6},{61,6}]
+ = reader_groups_map(CPUT0, 8),
+
+ CPUT1 = [n([p([c([t(l(0)),t(l(1)),t(l(2)),t(l(3))]),
+ c([t(l(4)),t(l(5)),t(l(6)),t(l(7))]),
+ c([t(l(8)),t(l(9)),t(l(10)),t(l(11))]),
+ c([t(l(12)),t(l(13)),t(l(14)),t(l(15))])]),
+ p([c([t(l(16)),t(l(17)),t(l(18)),t(l(19))]),
+ c([t(l(20)),t(l(21)),t(l(22)),t(l(23))]),
+ c([t(l(24)),t(l(25)),t(l(26)),t(l(27))]),
+ c([t(l(28)),t(l(29)),t(l(30)),t(l(31))])])]),
+ n([p([c([t(l(32)),t(l(33)),t(l(34)),t(l(35))]),
+ c([t(l(36)),t(l(37)),t(l(38)),t(l(39))]),
+ c([t(l(40)),t(l(41)),t(l(42)),t(l(43))]),
+ c([t(l(44)),t(l(45)),t(l(46)),t(l(47))])]),
+ p([c([t(l(48)),t(l(49)),t(l(50)),t(l(51))]),
+ c([t(l(52)),t(l(53)),t(l(54)),t(l(55))]),
+ c([t(l(56)),t(l(57)),t(l(58)),t(l(59))]),
+ c([t(l(60)),t(l(61)),t(l(62)),t(l(63))])])]),
+ n([p([c([t(l(64)),t(l(65)),t(l(66)),t(l(67))]),
+ c([t(l(68)),t(l(69)),t(l(70)),t(l(71))]),
+ c([t(l(72)),t(l(73)),t(l(74)),t(l(75))]),
+ c([t(l(76)),t(l(77)),t(l(78)),t(l(79))])]),
+ p([c([t(l(80)),t(l(81)),t(l(82)),t(l(83))]),
+ c([t(l(84)),t(l(85)),t(l(86)),t(l(87))]),
+ c([t(l(88)),t(l(89)),t(l(90)),t(l(91))]),
+ c([t(l(92)),t(l(93)),t(l(94)),t(l(95))])])]),
+ n([p([c([t(l(96)),t(l(97)),t(l(98)),t(l(99))]),
+ c([t(l(100)),t(l(101)),t(l(102)),t(l(103))]),
+ c([t(l(104)),t(l(105)),t(l(106)),t(l(107))]),
+ c([t(l(108)),t(l(109)),t(l(110)),t(l(111))])]),
+ p([c([t(l(112)),t(l(113)),t(l(114)),t(l(115))]),
+ c([t(l(116)),t(l(117)),t(l(118)),t(l(119))]),
+ c([t(l(120)),t(l(121)),t(l(122)),t(l(123))]),
+ c([t(l(124)),t(l(125)),t(l(126)),t(l(127))])])])],
+
+ ?line [{0,1},{1,1},{2,1},{3,1},{4,2},{5,2},{6,2},{7,2},{8,3},{9,3},
+ {10,3},{11,3},{12,4},{13,4},{14,4},{15,4},{16,5},{17,5},{18,5},
+ {19,5},{20,6},{21,6},{22,6},{23,6},{24,7},{25,7},{26,7},{27,7},
+ {28,8},{29,8},{30,8},{31,8},{32,9},{33,9},{34,9},{35,9},{36,10},
+ {37,10},{38,10},{39,10},{40,11},{41,11},{42,11},{43,11},{44,12},
+ {45,12},{46,12},{47,12},{48,13},{49,13},{50,13},{51,13},{52,14},
+ {53,14},{54,14},{55,14},{56,15},{57,15},{58,15},{59,15},{60,16},
+ {61,16},{62,16},{63,16},{64,17},{65,17},{66,17},{67,17},{68,18},
+ {69,18},{70,18},{71,18},{72,19},{73,19},{74,19},{75,19},{76,20},
+ {77,20},{78,20},{79,20},{80,21},{81,21},{82,21},{83,21},{84,22},
+ {85,22},{86,22},{87,22},{88,23},{89,23},{90,23},{91,23},{92,24},
+ {93,24},{94,24},{95,24},{96,25},{97,25},{98,25},{99,25},{100,26},
+ {101,26},{102,26},{103,26},{104,27},{105,27},{106,27},{107,27},
+ {108,28},{109,28},{110,28},{111,28},{112,29},{113,29},{114,29},
+ {115,29},{116,30},{117,30},{118,30},{119,30},{120,31},{121,31},
+ {122,31},{123,31},{124,32},{125,32},{126,32},{127,32}]
+ = reader_groups_map(CPUT1, 128),
+
+ ?line [{0,1},{1,1},{2,1},{3,1},{4,1},{5,1},{6,1},{7,1},{8,1},{9,1},{10,1},
+ {11,1},{12,1},{13,1},{14,1},{15,1},{16,1},{17,1},{18,1},{19,1},
+ {20,1},{21,1},{22,1},{23,1},{24,1},{25,1},{26,1},{27,1},{28,1},
+ {29,1},{30,1},{31,1},{32,1},{33,1},{34,1},{35,1},{36,1},{37,1},
+ {38,1},{39,1},{40,1},{41,1},{42,1},{43,1},{44,1},{45,1},{46,1},
+ {47,1},{48,1},{49,1},{50,1},{51,1},{52,1},{53,1},{54,1},{55,1},
+ {56,1},{57,1},{58,1},{59,1},{60,1},{61,1},{62,1},{63,1},{64,2},
+ {65,2},{66,2},{67,2},{68,2},{69,2},{70,2},{71,2},{72,2},{73,2},
+ {74,2},{75,2},{76,2},{77,2},{78,2},{79,2},{80,2},{81,2},{82,2},
+ {83,2},{84,2},{85,2},{86,2},{87,2},{88,2},{89,2},{90,2},{91,2},
+ {92,2},{93,2},{94,2},{95,2},{96,2},{97,2},{98,2},{99,2},{100,2},
+ {101,2},{102,2},{103,2},{104,2},{105,2},{106,2},{107,2},{108,2},
+ {109,2},{110,2},{111,2},{112,2},{113,2},{114,2},{115,2},{116,2},
+ {117,2},{118,2},{119,2},{120,2},{121,2},{122,2},{123,2},{124,2},
+ {125,2},{126,2},{127,2}]
+ = reader_groups_map(CPUT1, 2),
+
+ ?line [{0,1},{1,1},{2,1},{3,1},{4,2},{5,2},{6,2},{7,2},{8,3},{9,3},{10,3},
+ {11,3},{12,3},{13,3},{14,3},{15,3},{16,4},{17,4},{18,4},{19,4},
+ {20,4},{21,4},{22,4},{23,4},{24,5},{25,5},{26,5},{27,5},{28,5},
+ {29,5},{30,5},{31,5},{32,6},{33,6},{34,6},{35,6},{36,6},{37,6},
+ {38,6},{39,6},{40,7},{41,7},{42,7},{43,7},{44,7},{45,7},{46,7},
+ {47,7},{48,8},{49,8},{50,8},{51,8},{52,8},{53,8},{54,8},{55,8},
+ {56,9},{57,9},{58,9},{59,9},{60,9},{61,9},{62,9},{63,9},{64,10},
+ {65,10},{66,10},{67,10},{68,10},{69,10},{70,10},{71,10},{72,11},
+ {73,11},{74,11},{75,11},{76,11},{77,11},{78,11},{79,11},{80,12},
+ {81,12},{82,12},{83,12},{84,12},{85,12},{86,12},{87,12},{88,13},
+ {89,13},{90,13},{91,13},{92,13},{93,13},{94,13},{95,13},{96,14},
+ {97,14},{98,14},{99,14},{100,14},{101,14},{102,14},{103,14},
+ {104,15},{105,15},{106,15},{107,15},{108,15},{109,15},{110,15},
+ {111,15},{112,16},{113,16},{114,16},{115,16},{116,16},{117,16},
+ {118,16},{119,16},{120,17},{121,17},{122,17},{123,17},{124,17},
+ {125,17},{126,17},{127,17}]
+ = reader_groups_map(CPUT1, 17),
+
+ ?line [{0,1},{1,1},{2,1},{3,1},{4,1},{5,1},{6,1},{7,1},{8,1},{9,1},{10,1},
+ {11,1},{12,1},{13,1},{14,1},{15,1},{16,2},{17,2},{18,2},{19,2},
+ {20,2},{21,2},{22,2},{23,2},{24,2},{25,2},{26,2},{27,2},{28,2},
+ {29,2},{30,2},{31,2},{32,3},{33,3},{34,3},{35,3},{36,3},{37,3},
+ {38,3},{39,3},{40,3},{41,3},{42,3},{43,3},{44,3},{45,3},{46,3},
+ {47,3},{48,4},{49,4},{50,4},{51,4},{52,4},{53,4},{54,4},{55,4},
+ {56,4},{57,4},{58,4},{59,4},{60,4},{61,4},{62,4},{63,4},{64,5},
+ {65,5},{66,5},{67,5},{68,5},{69,5},{70,5},{71,5},{72,5},{73,5},
+ {74,5},{75,5},{76,5},{77,5},{78,5},{79,5},{80,6},{81,6},{82,6},
+ {83,6},{84,6},{85,6},{86,6},{87,6},{88,6},{89,6},{90,6},{91,6},
+ {92,6},{93,6},{94,6},{95,6},{96,7},{97,7},{98,7},{99,7},{100,7},
+ {101,7},{102,7},{103,7},{104,7},{105,7},{106,7},{107,7},{108,7},
+ {109,7},{110,7},{111,7},{112,7},{113,7},{114,7},{115,7},{116,7},
+ {117,7},{118,7},{119,7},{120,7},{121,7},{122,7},{123,7},{124,7},
+ {125,7},{126,7},{127,7}]
+ = reader_groups_map(CPUT1, 7),
+
+ ?line CPUT2 = [p([c(l(0)),c(l(1)),c(l(2)),c(l(3)),c(l(4))]),
+ p([t(l(5)),t(l(6)),t(l(7)),t(l(8)),t(l(9))]),
+ p([t(l(10))]),
+ p([c(l(11)),c(l(12)),c(l(13))]),
+ p([c(l(14)),c(l(15))])],
+
+ ?line [{0,1},{1,1},{2,1},{3,1},{4,1},
+ {5,2},{6,2},{7,2},{8,2},{9,2},
+ {10,3},
+ {11,4},{12,4},{13,4},
+ {14,5},{15,5}] = reader_groups_map(CPUT2, 5),
+
+
+ ?line [{0,1},{1,1},{2,2},{3,2},{4,2},
+ {5,3},{6,3},{7,3},{8,3},{9,3},
+ {10,4},
+ {11,5},{12,5},{13,5},
+ {14,6},{15,6}] = reader_groups_map(CPUT2, 6),
+
+ ?line [{0,1},{1,1},{2,2},{3,2},{4,2},
+ {5,3},{6,3},{7,3},{8,3},{9,3},
+ {10,4},
+ {11,5},{12,6},{13,6},
+ {14,7},{15,7}] = reader_groups_map(CPUT2, 7),
+
+ ?line [{0,1},{1,1},{2,2},{3,2},{4,2},
+ {5,3},{6,3},{7,3},{8,3},{9,3},
+ {10,4},
+ {11,5},{12,6},{13,6},
+ {14,7},{15,8}] = reader_groups_map(CPUT2, 8),
+
+ ?line [{0,1},{1,2},{2,2},{3,3},{4,3},
+ {5,4},{6,4},{7,4},{8,4},{9,4},
+ {10,5},
+ {11,6},{12,7},{13,7},
+ {14,8},{15,9}] = reader_groups_map(CPUT2, 9),
+
+ ?line [{0,1},{1,2},{2,2},{3,3},{4,3},
+ {5,4},{6,4},{7,4},{8,4},{9,4},
+ {10,5},
+ {11,6},{12,7},{13,8},
+ {14,9},{15,10}] = reader_groups_map(CPUT2, 10),
+
+ ?line [{0,1},{1,2},{2,3},{3,4},{4,4},
+ {5,5},{6,5},{7,5},{8,5},{9,5},
+ {10,6},
+ {11,7},{12,8},{13,9},
+ {14,10},{15,11}] = reader_groups_map(CPUT2, 11),
+
+ ?line [{0,1},{1,2},{2,3},{3,4},{4,5},
+ {5,6},{6,6},{7,6},{8,6},{9,6},
+ {10,7},
+ {11,8},{12,9},{13,10},
+ {14,11},{15,12}] = reader_groups_map(CPUT2, 100),
+
+ CPUT3 = [p([t(l(5)),t(l(6)),t(l(7)),t(l(8)),t(l(9))]),
+ p([t(l(10))]),
+ p([c(l(11)),c(l(12)),c(l(13))]),
+ p([c(l(14)),c(l(15))]),
+ p([c(l(0)),c(l(1)),c(l(2)),c(l(3)),c(l(4))])],
+
+ ?line [{0,5},{1,5},{2,6},{3,6},{4,6},
+ {5,1},{6,1},{7,1},{8,1},{9,1},
+ {10,2},{11,3},{12,3},{13,3},
+ {14,4},{15,4}] = reader_groups_map(CPUT3, 6),
+
+ CPUT4 = [p([t(l(0)),t(l(1)),t(l(2)),t(l(3)),t(l(4))]),
+ p([t(l(5))]),
+ p([c(l(6)),c(l(7)),c(l(8))]),
+ p([c(l(9)),c(l(10))]),
+ p([c(l(11)),c(l(12)),c(l(13)),c(l(14)),c(l(15))])],
+
+ ?line [{0,1},{1,1},{2,1},{3,1},{4,1},
+ {5,2},
+ {6,3},{7,3},{8,3},
+ {9,4},{10,4},
+ {11,5},{12,5},{13,6},{14,6},{15,6}] = reader_groups_map(CPUT4, 6),
+
+ ?line [{0,1},{1,1},{2,1},{3,1},{4,1},
+ {5,2},
+ {6,3},{7,4},{8,4},
+ {9,5},{10,5},
+ {11,6},{12,6},{13,7},{14,7},{15,7}] = reader_groups_map(CPUT4, 7),
+
+ ?line [{0,1},{65535,2}] = reader_groups_map([c(l(0)),c(l(65535))], 10),
+
+ ?line ok.
+reader_groups_map(CPUT, Groups) ->
+ Old = erlang:system_info({cpu_topology, defined}),
+ erlang:system_flag(cpu_topology, CPUT),
+ enable_internal_state(),
+ Res = erts_debug:get_internal_state({reader_groups_map, Groups}),
+ erlang:system_flag(cpu_topology, Old),
+ lists:sort(Res).
+
%%
%% Utils
%%
+tilera_cpu_topology() ->
+ [{processor,[{node,[{core,{logical,0}},
+ {core,{logical,1}},
+ {core,{logical,2}},
+ {core,{logical,8}},
+ {core,{logical,9}},
+ {core,{logical,10}},
+ {core,{logical,11}},
+ {core,{logical,16}},
+ {core,{logical,17}},
+ {core,{logical,18}},
+ {core,{logical,19}},
+ {core,{logical,24}},
+ {core,{logical,25}},
+ {core,{logical,27}},
+ {core,{logical,29}}]},
+ {node,[{core,{logical,3}},
+ {core,{logical,4}},
+ {core,{logical,5}},
+ {core,{logical,6}},
+ {core,{logical,7}},
+ {core,{logical,12}},
+ {core,{logical,13}},
+ {core,{logical,14}},
+ {core,{logical,15}},
+ {core,{logical,20}},
+ {core,{logical,21}},
+ {core,{logical,22}},
+ {core,{logical,23}},
+ {core,{logical,28}},
+ {core,{logical,30}}]},
+ {node,[{core,{logical,31}},
+ {core,{logical,36}},
+ {core,{logical,37}},
+ {core,{logical,38}},
+ {core,{logical,44}},
+ {core,{logical,45}},
+ {core,{logical,46}},
+ {core,{logical,47}},
+ {core,{logical,51}},
+ {core,{logical,52}},
+ {core,{logical,53}},
+ {core,{logical,54}},
+ {core,{logical,55}},
+ {core,{logical,60}},
+ {core,{logical,61}}]},
+ {node,[{core,{logical,26}},
+ {core,{logical,32}},
+ {core,{logical,33}},
+ {core,{logical,34}},
+ {core,{logical,35}},
+ {core,{logical,39}},
+ {core,{logical,40}},
+ {core,{logical,41}},
+ {core,{logical,42}},
+ {core,{logical,43}},
+ {core,{logical,48}},
+ {core,{logical,49}},
+ {core,{logical,50}},
+ {core,{logical,58}}]}]}].
+
+l(Id) ->
+ {logical, Id}.
+
+t(X) ->
+ {thread, X}.
+
+c(X) ->
+ {core, X}.
+
+p(X) ->
+ {processor, X}.
+
+n(X) ->
+ {node, X}.
+
mcall(Node, Funs) ->
Parent = self(),
Refs = lists:map(fun (Fun) ->
diff --git a/erts/emulator/test/send_term_SUITE.erl b/erts/emulator/test/send_term_SUITE.erl
index 489adbd660..5fd01a9ac5 100644
--- a/erts/emulator/test/send_term_SUITE.erl
+++ b/erts/emulator/test/send_term_SUITE.erl
@@ -76,40 +76,43 @@ basic(Config) when is_list(Config) ->
?line ExpectedBinTup = term(P, 7),
%% single terms
- ?line [] = term(P, 8), % ERL_DRV_NIL
- ?line '' = term(P, 9), % ERL_DRV_ATOM
- ?line an_atom = term(P, 10), % ERL_DRV_ATOM
- ?line -4711 = term(P, 11), % ERL_DRV_INT
- ?line 4711 = term(P, 12), % ERL_DRV_UINT
- ?line P = term(P, 13), % ERL_DRV_PORT
- ?line <<>> = term(P, 14), % ERL_DRV_BINARY
- ?line <<"hejsan">> = term(P, 15), % ERL_DRV_BINARY
- ?line <<>> = term(P, 16), % ERL_DRV_BUF2BINARY
- ?line <<>> = term(P, 17), % ERL_DRV_BUF2BINARY
- ?line <<"hoppsan">> = term(P, 18), % ERL_DRV_BUF2BINARY
- ?line "" = term(P, 19), % ERL_DRV_STRING
- ?line "" = term(P, 20), % ERL_DRV_STRING
- ?line "hippsan" = term(P, 21), % ERL_DRV_STRING
- ?line {} = term(P, 22), % ERL_DRV_TUPLE
- ?line [] = term(P, 23), % ERL_DRV_LIST
- ?line Self = term(P, 24), % ERL_DRV_PID
- ?line [] = term(P, 25), % ERL_DRV_STRING_CONS
- ?line AFloat = term(P, 26), % ERL_DRV_FLOAT
+ Singles = [{[], 8}, % ERL_DRV_NIL
+ {'', 9}, % ERL_DRV_ATOM
+ {an_atom, 10}, % ERL_DRV_ATOM
+ {-4711, 11}, % ERL_DRV_INT
+ {4711, 12}, % ERL_DRV_UINT
+ {P, 13}, % ERL_DRV_PORT
+ {<<>>, 14}, % ERL_DRV_BINARY
+ {<<"hejsan">>, 15}, % ERL_DRV_BINARY
+ {<<>>, 16}, % ERL_DRV_BUF2BINARY
+ {<<>>, 17}, % ERL_DRV_BUF2BINARY
+ {<<"hoppsan">>, 18}, % ERL_DRV_BUF2BINARY
+ {"", 19}, % ERL_DRV_STRING
+ {"", 20}, % ERL_DRV_STRING
+ {"hippsan", 21}, % ERL_DRV_STRING
+ {{}, 22}, % ERL_DRV_TUPLE
+ {[], 23}, % ERL_DRV_LIST
+ {Self, 24}, % ERL_DRV_PID
+ {[], 25}, % ERL_DRV_STRING_CONS
+ {[], 27}, % ERL_DRV_EXT2TERM
+ {18446744073709551615, 28}, % ERL_DRV_UINT64
+ {20233590931456, 29}, % ERL_DRV_UINT64
+ {4711, 30}, % ERL_DRV_UINT64
+ {0, 31}, % ERL_DRV_UINT64
+ {9223372036854775807, 32}, % ERL_DRV_INT64
+ {20233590931456, 33}, % ERL_DRV_INT64
+ {4711, 34}, % ERL_DRV_INT64
+ {0, 35}, % ERL_DRV_INT64
+ {-1, 36}, % ERL_DRV_INT64
+ {-4711, 37}, % ERL_DRV_INT64
+ {-20233590931456, 38}, % ERL_DRV_INT64
+ {-9223372036854775808, 39}], % ERL_DRV_INT64
+ ?line {Terms, Ops} = lists:unzip(Singles),
+ ?line Terms = term(P,Ops),
+
+ AFloat = term(P, 26), % ERL_DRV_FLOAT
?line true = AFloat < 0.001,
?line true = AFloat > -0.001,
- ?line [] = term(P, 27), % ERL_DRV_EXT2TERM
- ?line 18446744073709551615 = term(P, 28), % ERL_DRV_UINT64
- ?line 20233590931456 = term(P, 29), % ERL_DRV_UINT64
- ?line 4711 = term(P, 30), % ERL_DRV_UINT64
- ?line 0 = term(P, 31), % ERL_DRV_UINT64
- ?line 9223372036854775807 = term(P, 32), % ERL_DRV_INT64
- ?line 20233590931456 = term(P, 33), % ERL_DRV_INT64
- ?line 4711 = term(P, 34), % ERL_DRV_INT64
- ?line 0 = term(P, 35), % ERL_DRV_INT64
- ?line -1 = term(P, 36), % ERL_DRV_INT64
- ?line -4711 = term(P, 37), % ERL_DRV_INT64
- ?line -20233590931456 = term(P, 38), % ERL_DRV_INT64
- ?line -9223372036854775808 = term(P, 39), % ERL_DRV_INT64
%% Failure cases.
?line [] = term(P, 127),
diff --git a/erts/emulator/test/send_term_SUITE_data/send_term_drv.c b/erts/emulator/test/send_term_SUITE_data/send_term_drv.c
index 6638de0560..165cce2e9d 100644
--- a/erts/emulator/test/send_term_SUITE_data/send_term_drv.c
+++ b/erts/emulator/test/send_term_SUITE_data/send_term_drv.c
@@ -17,6 +17,7 @@
*/
#include "erl_driver.h"
+#include <stdio.h>
#include <errno.h>
#include <string.h>
@@ -65,12 +66,21 @@ static void fail_term(ErlDrvTermData* msg, int len, int line);
static void send_term_drv_run(ErlDrvData port, char *buf, int count)
{
- ErlDrvTermData msg[1024];
-
- switch (*buf) {
+ char buf7[1024];
+ ErlDrvTermData spec[1024];
+ ErlDrvTermData* msg = spec;
+ ErlDrvBinary* bins[15];
+ int bin_ix = 0;
+ ErlDrvSInt64 s64[15];
+ int s64_ix = 0;
+ ErlDrvUInt64 u64[15];
+ int u64_ix = 0;
+ int i = 0;
+
+ for (i=0; i<count; i++) switch (buf[i]) {
case 0:
msg[0] = ERL_DRV_NIL;
- output_term(msg, 1);
+ msg += 1;
break;
case 1: /* Most term types inside a tuple. */
@@ -102,7 +112,7 @@ static void send_term_drv_run(ErlDrvData port, char *buf, int count)
msg[22] = driver_connected(erlang_port);
msg[23] = ERL_DRV_TUPLE;
msg[24] = (ErlDrvTermData) 7;
- output_term(msg, 25);
+ msg += 25;
}
break;
@@ -117,7 +127,7 @@ static void send_term_drv_run(ErlDrvData port, char *buf, int count)
msg[i] = ERL_DRV_NIL;
msg[i+1] = ERL_DRV_LIST;
msg[i+2] = (ErlDrvTermData) 201;
- output_term(msg, i+3);
+ msg += i+3;
}
break;
@@ -126,7 +136,7 @@ static void send_term_drv_run(ErlDrvData port, char *buf, int count)
ErlDrvBinary* bin;
int i;
- bin = driver_alloc_binary(256);
+ bin = bins[bin_ix++] = driver_alloc_binary(256);
for (i = 0; i < 256; i++) {
bin->orig_bytes[i] = i;
}
@@ -140,8 +150,7 @@ static void send_term_drv_run(ErlDrvData port, char *buf, int count)
msg[7] = (ErlDrvTermData) 23;
msg[8] = ERL_DRV_TUPLE;
msg[9] = (ErlDrvTermData) 2;
- output_term(msg, 10);
- driver_free_binary(bin);
+ msg += 10;
}
break;
@@ -152,11 +161,11 @@ static void send_term_drv_run(ErlDrvData port, char *buf, int count)
msg[3] = driver_caller(erlang_port);
msg[4] = ERL_DRV_TUPLE;
msg[5] = (ErlDrvTermData) 2;
- output_term(msg, 6);
+ msg += 6;
break;
case 5:
- output_term(msg, make_ext_term_list(msg, 0));
+ msg += make_ext_term_list(msg, 0);
break;
case 6:
@@ -166,94 +175,91 @@ static void send_term_drv_run(ErlDrvData port, char *buf, int count)
msg[3] = ~((ErlDrvTermData) 0);
msg[4] = ERL_DRV_TUPLE;
msg[5] = (ErlDrvTermData) 2;
- output_term(msg, 6);
+ msg += 6;
break;
case 7: {
int len = 0;
- char buf[1024];
- memset(buf, 17, sizeof(buf));
+ memset(buf7, 17, sizeof(buf7));
/* empty heap binary */
msg[len++] = ERL_DRV_BUF2BINARY;
msg[len++] = (ErlDrvTermData) NULL; /* NULL is ok if size == 0 */
msg[len++] = (ErlDrvTermData) 0;
/* empty heap binary again */
msg[len++] = ERL_DRV_BUF2BINARY;
- msg[len++] = (ErlDrvTermData) &buf[0]; /* ptr is ok if size == 0 */
+ msg[len++] = (ErlDrvTermData) buf7; /* ptr is ok if size == 0 */
msg[len++] = (ErlDrvTermData) 0;
/* heap binary */
msg[len++] = ERL_DRV_BUF2BINARY;
- msg[len++] = (ErlDrvTermData) &buf[0];
+ msg[len++] = (ErlDrvTermData) buf7;
msg[len++] = (ErlDrvTermData) 17;
/* off heap binary */
msg[len++] = ERL_DRV_BUF2BINARY;
- msg[len++] = (ErlDrvTermData) &buf[0];
- msg[len++] = (ErlDrvTermData) sizeof(buf);
+ msg[len++] = (ErlDrvTermData) buf7;
+ msg[len++] = (ErlDrvTermData) sizeof(buf7);
msg[len++] = ERL_DRV_TUPLE;
msg[len++] = (ErlDrvTermData) 4;
- output_term(msg, len);
+ msg += len;
break;
}
case 8:
msg[0] = ERL_DRV_NIL;
- output_term(msg, 1);
+ msg += 1;
break;
case 9:
msg[0] = ERL_DRV_ATOM;
msg[1] = (ErlDrvTermData) driver_mk_atom("");
- output_term(msg, 2);
+ msg += 2;
break;
case 10:
msg[0] = ERL_DRV_ATOM;
msg[1] = (ErlDrvTermData) driver_mk_atom("an_atom");
- output_term(msg, 2);
+ msg += 2;
break;
case 11:
msg[0] = ERL_DRV_INT;
msg[1] = (ErlDrvTermData) -4711;
- output_term(msg, 2);
+ msg += 2;
break;
case 12:
msg[0] = ERL_DRV_UINT;
msg[1] = (ErlDrvTermData) 4711;
- output_term(msg, 2);
+ msg += 2;
break;
case 13:
msg[0] = ERL_DRV_PORT;
msg[1] = driver_mk_port(erlang_port);
- output_term(msg, 2);
+ msg += 2;
break;
case 14: {
- ErlDrvBinary *dbin = driver_alloc_binary(0);
+ ErlDrvBinary *dbin = bins[bin_ix++] = driver_alloc_binary(0);
msg[0] = ERL_DRV_BINARY;
msg[1] = (ErlDrvTermData) dbin;
msg[2] = (ErlDrvTermData) 0;
msg[3] = (ErlDrvTermData) 0;
- output_term(msg, 4);
- driver_free_binary(dbin);
+ msg += 4;
break;
}
case 15: {
- char buf[] = "hejsan";
- ErlDrvBinary *dbin = driver_alloc_binary(sizeof(buf)-1);
+ static const char buf[] = "hejsan";
+ ErlDrvBinary *dbin = bins[bin_ix++] = driver_alloc_binary(sizeof(buf)-1);
if (dbin)
memcpy((void *) dbin->orig_bytes, (void *) buf, sizeof(buf)-1);
msg[0] = ERL_DRV_BINARY;
msg[1] = (ErlDrvTermData) dbin;
msg[2] = (ErlDrvTermData) (dbin ? sizeof(buf)-1 : 0);
msg[3] = (ErlDrvTermData) 0;
- output_term(msg, 4);
- driver_free_binary(dbin);
+ msg += 4;
break;
}
@@ -261,24 +267,24 @@ static void send_term_drv_run(ErlDrvData port, char *buf, int count)
msg[0] = ERL_DRV_BUF2BINARY;
msg[1] = (ErlDrvTermData) NULL;
msg[2] = (ErlDrvTermData) 0;
- output_term(msg, 3);
+ msg += 3;
break;
case 17: {
- char buf[] = "";
+ static const char buf[] = "";
msg[0] = ERL_DRV_BUF2BINARY;
msg[1] = (ErlDrvTermData) buf;
msg[2] = (ErlDrvTermData) sizeof(buf)-1;
- output_term(msg, 3);
+ msg += 3;
break;
}
case 18: {
- char buf[] = "hoppsan";
+ static const char buf[] = "hoppsan";
msg[0] = ERL_DRV_BUF2BINARY;
msg[1] = (ErlDrvTermData) buf;
msg[2] = (ErlDrvTermData) sizeof(buf)-1;
- output_term(msg, 3);
+ msg += 3;
break;
}
@@ -286,44 +292,44 @@ static void send_term_drv_run(ErlDrvData port, char *buf, int count)
msg[0] = ERL_DRV_STRING;
msg[1] = (ErlDrvTermData) buf;
msg[2] = (ErlDrvTermData) 0;
- output_term(msg, 3);
+ msg += 3;
break;
case 20: {
- char buf[] = "";
+ static const char buf[] = "";
msg[0] = ERL_DRV_STRING;
msg[1] = (ErlDrvTermData) buf;
msg[2] = (ErlDrvTermData) sizeof(buf)-1;
- output_term(msg, 3);
+ msg += 3;
break;
}
case 21: {
- char buf[] = "hippsan";
+ static const char buf[] = "hippsan";
msg[0] = ERL_DRV_STRING;
msg[1] = (ErlDrvTermData) buf;
msg[2] = (ErlDrvTermData) sizeof(buf)-1;
- output_term(msg, 3);
+ msg += 3;
break;
}
case 22:
msg[0] = ERL_DRV_TUPLE;
msg[1] = (ErlDrvTermData) 0;
- output_term(msg, 2);
+ msg += 2;
break;
case 23:
msg[0] = ERL_DRV_NIL;
msg[1] = ERL_DRV_LIST;
msg[2] = (ErlDrvTermData) 1;
- output_term(msg, 3);
+ msg += 3;
break;
case 24:
msg[0] = ERL_DRV_PID;
msg[1] = driver_connected(erlang_port);
- output_term(msg, 2);
+ msg += 2;
break;
case 25:
@@ -331,132 +337,131 @@ static void send_term_drv_run(ErlDrvData port, char *buf, int count)
msg[1] = ERL_DRV_STRING_CONS;
msg[2] = (ErlDrvTermData) "";
msg[3] = (ErlDrvTermData) 0;
- output_term(msg, 4);
+ msg += 4;
break;
case 26: {
- double my_float = 0.0;
+ static double my_float = 0.0;
msg[0] = ERL_DRV_FLOAT;
msg[1] = (ErlDrvTermData) &my_float;
- output_term(msg, 2);
+ msg += 2;
break;
}
case 27: {
- char buf[] = {131, 106}; /* [] */
+ static char buf[] = {131, 106}; /* [] */
msg[0] = ERL_DRV_EXT2TERM;
msg[1] = (ErlDrvTermData) buf;
msg[2] = (ErlDrvTermData) sizeof(buf);
- output_term(msg, 3);
+ msg += 3;
break;
}
case 28: {
- ErlDrvUInt64 x = ~((ErlDrvUInt64) 0);
+ ErlDrvUInt64* x = &u64[u64_ix++];
+ *x = ~((ErlDrvUInt64) 0);
msg[0] = ERL_DRV_UINT64;
- msg[1] = (ErlDrvTermData) &x;
- output_term(msg, 2);
-
+ msg[1] = (ErlDrvTermData) x;
+ msg += 2;
break;
}
case 29: {
- ErlDrvUInt64 x = ((ErlDrvUInt64) 4711) << 32;
+ ErlDrvUInt64* x = &u64[u64_ix++];
+ *x = ((ErlDrvUInt64) 4711) << 32;
msg[0] = ERL_DRV_UINT64;
- msg[1] = (ErlDrvTermData) &x;
- output_term(msg, 2);
-
+ msg[1] = (ErlDrvTermData) x;
+ msg += 2;
break;
}
case 30: {
- ErlDrvUInt64 x = 4711;
+ ErlDrvUInt64* x = &u64[u64_ix++];
+ *x = 4711;
msg[0] = ERL_DRV_UINT64;
- msg[1] = (ErlDrvTermData) &x;
- output_term(msg, 2);
-
+ msg[1] = (ErlDrvTermData) x;
+ msg += 2;
break;
}
case 31: {
- ErlDrvUInt64 x = 0;
+ ErlDrvUInt64* x = &u64[u64_ix++];
+ *x = 0;
msg[0] = ERL_DRV_UINT64;
- msg[1] = (ErlDrvTermData) &x;
- output_term(msg, 2);
-
+ msg[1] = (ErlDrvTermData) x;
+ msg += 2;
break;
}
case 32: {
- ErlDrvSInt64 x = ((((ErlDrvUInt64) 0x7fffffff) << 32)
- | ((ErlDrvUInt64) 0xffffffff));
+ ErlDrvSInt64* x = &s64[s64_ix++];
+ *x = ((((ErlDrvUInt64) 0x7fffffff) << 32) | ((ErlDrvUInt64) 0xffffffff));
msg[0] = ERL_DRV_INT64;
- msg[1] = (ErlDrvTermData) &x;
- output_term(msg, 2);
-
+ msg[1] = (ErlDrvTermData) x;
+ msg += 2;
break;
}
case 33: {
- ErlDrvSInt64 x = (ErlDrvSInt64) (((ErlDrvUInt64) 4711) << 32);
+ ErlDrvSInt64* x = &s64[s64_ix++];
+ *x = (ErlDrvSInt64) (((ErlDrvUInt64) 4711) << 32);
msg[0] = ERL_DRV_INT64;
- msg[1] = (ErlDrvTermData) &x;
- output_term(msg, 2);
-
+ msg[1] = (ErlDrvTermData) x;
+ msg += 2;
break;
}
case 34: {
- ErlDrvSInt64 x = 4711;
+ ErlDrvSInt64* x = &s64[s64_ix++];
+ *x = 4711;
msg[0] = ERL_DRV_INT64;
- msg[1] = (ErlDrvTermData) &x;
- output_term(msg, 2);
-
+ msg[1] = (ErlDrvTermData) x;
+ msg += 2;
break;
}
case 35: {
- ErlDrvSInt64 x = 0;
+ ErlDrvSInt64* x = &s64[s64_ix++];
+ *x = 0;
msg[0] = ERL_DRV_INT64;
- msg[1] = (ErlDrvTermData) &x;
- output_term(msg, 2);
-
+ msg[1] = (ErlDrvTermData) x;
+ msg += 2;
break;
}
case 36: {
- ErlDrvSInt64 x = -1;
+ ErlDrvSInt64* x = &s64[s64_ix++];
+ *x = -1;
msg[0] = ERL_DRV_INT64;
- msg[1] = (ErlDrvTermData) &x;
- output_term(msg, 2);
-
+ msg[1] = (ErlDrvTermData) x;
+ msg += 2;
break;
}
case 37: {
- ErlDrvSInt64 x = -4711;
+ ErlDrvSInt64* x = &s64[s64_ix++];
+ *x = -4711;
msg[0] = ERL_DRV_INT64;
- msg[1] = (ErlDrvTermData) &x;
- output_term(msg, 2);
-
+ msg[1] = (ErlDrvTermData) x;
+ msg += 2;
break;
}
case 38: {
- ErlDrvSInt64 x = ((ErlDrvSInt64) ((ErlDrvUInt64) 4711) << 32)*-1;
+ ErlDrvSInt64* x = &s64[s64_ix++];
+ *x = ((ErlDrvSInt64) ((ErlDrvUInt64) 4711) << 32)*-1;
msg[0] = ERL_DRV_INT64;
- msg[1] = (ErlDrvTermData) &x;
- output_term(msg, 2);
-
+ msg[1] = (ErlDrvTermData) x;
+ msg += 2;
break;
}
case 39: {
- ErlDrvSInt64 x = ((ErlDrvSInt64) 1) << 63;
+ ErlDrvSInt64* x = &s64[s64_ix++];
+ *x = ((ErlDrvSInt64) 1) << 63;
msg[0] = ERL_DRV_INT64;
- msg[1] = (ErlDrvTermData) &x;
- output_term(msg, 2);
-
+ msg[1] = (ErlDrvTermData) x;
+ msg += 2;
break;
}
@@ -464,7 +469,7 @@ static void send_term_drv_run(ErlDrvData port, char *buf, int count)
case 127: /* Error cases */
{
long refc;
- ErlDrvBinary* bin = driver_alloc_binary(256);
+ ErlDrvBinary* bin = bins[bin_ix++] = driver_alloc_binary(256);
FAIL_TERM(msg, 0);
@@ -537,7 +542,7 @@ static void send_term_drv_run(ErlDrvData port, char *buf, int count)
refc = driver_binary_get_refc(bin);
if (refc > 3) {
char sbuf[128];
- sprintf(sbuf, "bad_refc:%d", refc);
+ sprintf(sbuf, "bad_refc:%ld", refc);
driver_failure_atom(erlang_port, sbuf);
}
driver_free_binary(bin);
@@ -644,6 +649,7 @@ static void send_term_drv_run(ErlDrvData port, char *buf, int count)
/* Signal end of test case */
msg[0] = ERL_DRV_NIL;
driver_output_term(erlang_port, msg, 1);
+ return;
}
break;
@@ -651,6 +657,16 @@ static void send_term_drv_run(ErlDrvData port, char *buf, int count)
driver_failure_atom(erlang_port, "bad_request");
break;
}
+ if (count > 1) {
+ *msg++ = ERL_DRV_NIL;
+ *msg++ = ERL_DRV_LIST;
+ *msg++ = count + 1;
+ }
+ output_term(spec, msg-spec);
+ if ((bin_ix|s64_ix|u64_ix) > 15) abort();
+ while (bin_ix) {
+ driver_free_binary(bins[--bin_ix]);
+ }
}
static void output_term(ErlDrvTermData* msg, int len)
diff --git a/erts/emulator/test/system_info_SUITE.erl b/erts/emulator/test/system_info_SUITE.erl
index e782d2f293..ba433d4e11 100644
--- a/erts/emulator/test/system_info_SUITE.erl
+++ b/erts/emulator/test/system_info_SUITE.erl
@@ -35,12 +35,12 @@
%-compile(export_all).
-export([all/1, init_per_testcase/2, fin_per_testcase/2]).
--export([process_count/1, system_version/1, misc_smoke_tests/1, heap_size/1]).
+-export([process_count/1, system_version/1, misc_smoke_tests/1, heap_size/1, wordsize/1]).
-define(DEFAULT_TIMEOUT, ?t:minutes(2)).
all(doc) -> [];
-all(suite) -> [process_count, system_version, misc_smoke_tests, heap_size].
+all(suite) -> [process_count, system_version, misc_smoke_tests, heap_size, wordsize].
init_per_testcase(_Case, Config) when is_list(Config) ->
Dog = ?t:timetrap(?DEFAULT_TIMEOUT),
@@ -145,3 +145,23 @@ heap_size(Config) when is_list(Config) ->
?line Hmin = proplists:get_value(min_heap_size, GCinf),
ok.
+wordsize(suite) ->
+ [];
+wordsize(doc) ->
+ ["Tests the various wordsize variants"];
+wordsize(Config) when is_list(Config) ->
+ ?line A = erlang:system_info(wordsize),
+ ?line true = is_integer(A),
+ ?line A = erlang:system_info({wordsize,internal}),
+ ?line B = erlang:system_info({wordsize,external}),
+ ?line true = A =< B,
+ case {B,A} of
+ {4,4} ->
+ {comment, "True 32-bit emulator"};
+ {8,8} ->
+ {comment, "True 64-bit emulator"};
+ {8,4} ->
+ {comment, "Halfword 64-bit emulator"};
+ Other ->
+ exit({unexpected_wordsizes,Other})
+ end.
diff --git a/erts/emulator/test/time_SUITE.erl b/erts/emulator/test/time_SUITE.erl
index 2ad1f0d201..095e9dd1af 100644
--- a/erts/emulator/test/time_SUITE.erl
+++ b/erts/emulator/test/time_SUITE.erl
@@ -34,6 +34,8 @@
consistency/1,
now/1, now_unique/1, now_update/1, timestamp/1]).
+-export([local_to_univ_utc/1]).
+
-include("test_server.hrl").
-export([linear_time/1]).
@@ -53,7 +55,40 @@
-define(dst_timezone, 2).
all(suite) -> [univ_to_local, local_to_univ,
- bad_univ_to_local, bad_local_to_univ, consistency, now, timestamp].
+ local_to_univ_utc,
+ bad_univ_to_local, bad_local_to_univ,
+ consistency, now, timestamp].
+
+local_to_univ_utc(suite) ->
+ [];
+local_to_univ_utc(doc) ->
+ ["Test that DST = true on timezones without DST is ignored"];
+local_to_univ_utc(Config) when is_list(Config) ->
+ case os:type() of
+ {unix,_} ->
+ %% TZ variable has a meaning
+ ?line {ok, Node} =
+ test_server:start_node(local_univ_utc,peer,
+ [{args, "-env TZ UTC"}]),
+ ?line {{2008,8,1},{0,0,0}} =
+ rpc:call(Node,
+ erlang,localtime_to_universaltime,
+ [{{2008, 8, 1}, {0, 0, 0}},
+ false]),
+ ?line {{2008,8,1},{0,0,0}} =
+ rpc:call(Node,
+ erlang,localtime_to_universaltime,
+ [{{2008, 8, 1}, {0, 0, 0}},
+ true]),
+ ?line [{{2008,8,1},{0,0,0}}] =
+ rpc:call(Node,
+ calendar,local_time_to_universal_time_dst,
+ [{{2008, 8, 1}, {0, 0, 0}}]),
+ ?line test_server:stop_node(Node),
+ ok;
+ _ ->
+ {skip,"Only valid on Unix"}
+ end.
%% Tests conversion from univeral to local time.
diff --git a/erts/emulator/test/trace_call_time_SUITE.erl b/erts/emulator/test/trace_call_time_SUITE.erl
new file mode 100644
index 0000000000..7bc91addde
--- /dev/null
+++ b/erts/emulator/test/trace_call_time_SUITE.erl
@@ -0,0 +1,614 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 2010. All Rights Reserved.
+%%
+%% The contents of this file are subject to the Erlang Public License,
+%% Version 1.1, (the "License"); you may not use this file except in
+%% compliance with the License. You should have received a copy of the
+%% Erlang Public License along with this software. If not, it can be
+%% retrieved online at http://www.erlang.org/.
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% %CopyrightEnd%
+%%
+
+%%% %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%%%
+%%% Define to run outside of test server
+%%%
+%%% -define(STANDALONE,1).
+%%%
+%%%
+%%% Define for debug output
+%%%
+%%% -define(debug,1).
+
+-module(trace_call_time_SUITE).
+
+%% Exported end user tests
+
+-export([seq/3, seq_r/3]).
+-export([loaded/1, a_function/1, a_called_function/1, dec/1, nif_dec/1]).
+
+-define(US_ERROR, 10000).
+-define(R_ERROR, 0.8).
+-define(SINGLE_CALL_US_TIME, 10).
+
+%% %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%% Result examination macros
+
+-define(CT(P,MFA),{trace,P,call,MFA}).
+-define(CTT(P, MFA),{trace_ts,P,call,MFA,{_,_,_}}).
+-define(RF(P,MFA,V),{trace,P,return_from,MFA,V}).
+-define(RFT(P,MFA,V),{trace_ts,P,return_from,MFA,V,{_,_,_}}).
+-define(RT(P,MFA),{trace,P,return_to,MFA}).
+-define(RTT(P,MFA),{trace_ts,P,return_to,MFA,{_,_,_}}).
+
+-ifdef(debug).
+-define(dbgformat(A,B),io:format(A,B)).
+-else.
+-define(dbgformat(A,B),noop).
+-endif.
+
+%% %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+-include("test_server.hrl").
+
+%% When run in test server.
+-export([all/1, init_per_testcase/2, fin_per_testcase/2, not_run/1]).
+-export([basic/1, on_and_off/1, info/1,
+ pause_and_restart/1, scheduling/1, called_function/1, combo/1, bif/1, nif/1]).
+
+init_per_testcase(_Case, Config) ->
+ ?line Dog=test_server:timetrap(test_server:seconds(400)),
+ erlang:trace_pattern({'_','_','_'}, false, [local,meta,call_time,call_count]),
+ erlang:trace_pattern(on_load, false, [local,meta,call_time,call_count]),
+ timer:now_diff(now(),now()),
+ [{watchdog, Dog}|Config].
+
+fin_per_testcase(_Case, Config) ->
+ erlang:trace_pattern({'_','_','_'}, false, [local,meta,call_time,call_count]),
+ erlang:trace_pattern(on_load, false, [local,meta,call_time,call_count]),
+ erlang:trace(all, false, [all]),
+ Dog=?config(watchdog, Config),
+ test_server:timetrap_cancel(Dog),
+ ok.
+
+all(doc) ->
+ ["Test call count tracing of local function calls."];
+all(suite) ->
+ case test_server:is_native(?MODULE) of
+ true -> [not_run];
+ false -> [basic, on_and_off, info,
+ pause_and_restart, scheduling, combo, bif, nif, called_function]
+ end.
+
+not_run(Config) when is_list(Config) ->
+ {skipped,"Native code"}.
+
+basic(suite) ->
+ [];
+basic(doc) ->
+ ["Tests basic call count trace"];
+basic(Config) when is_list(Config) ->
+ ?line P = erlang:trace_pattern({'_','_','_'}, false, [call_time]),
+ ?line M = 1000,
+ %%
+ ?line 1 = erlang:trace_pattern({?MODULE,seq, '_'}, true, [call_time]),
+ ?line 2 = erlang:trace_pattern({?MODULE,seq_r,'_'}, true, [call_time]),
+ ?line Pid = setup(),
+ ?line {L, T1} = execute(Pid, fun() -> seq(1, M, fun(X) -> (X+1) end) end),
+ ?line ok = check_trace_info({?MODULE, seq, 3}, [{Pid, M, 0, 0}], T1),
+ ?line ok = check_trace_info({?MODULE, seq_r, 3}, [], none),
+
+ ?line {Lr, T2} = execute(Pid, fun() -> seq_r(1, M, fun(X) -> (X+1) end) end),
+ ?line ok = check_trace_info({?MODULE, seq, 3}, [{Pid, M, 0, 0}], T1),
+ ?line ok = check_trace_info({?MODULE, seq_r, 3}, [{Pid, 1, 0, 0}], T2/M),
+ ?line ok = check_trace_info({?MODULE, seq_r, 4}, [{Pid, M, 0, 0}], T2),
+ ?line L = lists:reverse(Lr),
+
+ %%
+ ?line P = erlang:trace_pattern({'_','_','_'}, false, [call_time]),
+ ?line Pid ! quit,
+ ok.
+
+%% %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+
+on_and_off(suite) ->
+ [];
+on_and_off(doc) ->
+ ["Tests turning trace parameters on and off"];
+on_and_off(Config) when is_list(Config) ->
+ ?line P = erlang:trace_pattern({'_','_','_'}, false, [call_time]),
+ ?line M = 100,
+ %%
+ ?line 1 = erlang:trace_pattern({?MODULE,seq,'_'}, true, [call_time]),
+ ?line Pid = setup(),
+ ?line {L, T1} = execute(Pid, {?MODULE, seq, [1, M, fun(X) -> X+1 end]}),
+ ?line ok = check_trace_info({?MODULE, seq, 3}, [{Pid, M, 0, 0}], T1),
+
+ ?line N = erlang:trace_pattern({?MODULE,'_','_'}, true, [call_time]),
+ ?line {L, T2} = execute(Pid, fun() -> seq(1, M, fun(X) -> X+1 end) end),
+ ?line ok = check_trace_info({?MODULE, seq, 3}, [{Pid, M, 0, 0}], T2),
+
+ ?line P = erlang:trace_pattern({'_','_','_'}, true, [call_time]),
+ ?line {L, T3} = execute(Pid, fun() -> seq(1, M, fun(X) -> X+1 end) end),
+ ?line ok = check_trace_info({?MODULE, seq, 3}, [{Pid, M, 0, 0}], T3),
+
+ ?line 1 = erlang:trace_pattern({?MODULE,seq,'_'}, false, [call_time]),
+ ?line ok = check_trace_info({?MODULE, seq, 3}, false, none),
+ ?line {L, _T4} = execute(Pid, fun() -> seq(1, M, fun(X) -> X+1 end) end),
+ ?line ok = check_trace_info({?MODULE, seq, 3}, false, none),
+ ?line ok = check_trace_info({?MODULE, seq_r, 4}, [], none),
+ ?line {Lr, T5} = execute(Pid, fun() -> seq_r(1, M, fun(X) -> X+1 end) end),
+ ?line ok = check_trace_info({?MODULE, seq_r, 4}, [{Pid,M,0,0}], T5),
+
+ ?line N = erlang:trace_pattern({?MODULE,'_','_'}, false, [call_time]),
+ ?line ok = check_trace_info({?MODULE, seq_r, 4}, false, none),
+ ?line {Lr, _T6} = execute(Pid, fun() -> seq_r(1, M, fun(X) -> X+1 end) end),
+ ?line ok = check_trace_info({?MODULE, seq_r, 4}, false, none),
+ ?line L = lists:reverse(Lr),
+ %%
+ ?line Pid ! quit,
+ ?line P = erlang:trace_pattern({'_','_','_'}, false, [call_time]),
+ ok.
+
+%% %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+info(suite) ->
+ [];
+info(doc) ->
+ ["Tests the trace_info BIF"];
+info(Config) when is_list(Config) ->
+ ?line P = erlang:trace_pattern({'_','_','_'}, false, [call_time]),
+ %%
+ ?line 1 = erlang:trace_pattern({?MODULE,seq,3}, true, [call_time]),
+ ?line {call_time,[]} = erlang:trace_info({?MODULE,seq,3}, call_time),
+ ?line 1 = erlang:trace_pattern({?MODULE,seq,'_'}, pause, [call_time]),
+ ?line {call_time,[]} = erlang:trace_info({?MODULE,seq,3}, call_time),
+ ?line {all,[_|_]=L} = erlang:trace_info({?MODULE,seq,3}, all),
+ ?line {value,{call_time,[]}} = lists:keysearch(call_time, 1, L),
+ ?line 1 = erlang:trace_pattern({?MODULE,seq,'_'}, restart, [call_time]),
+ ?line {call_time,[]} = erlang:trace_info({?MODULE,seq,3}, call_time),
+ ?line 1 = erlang:trace_pattern({?MODULE,seq,'_'}, false, [call_time]),
+ ?line {call_time,false} = erlang:trace_info({?MODULE,seq,3}, call_time),
+ ?line {all,false} = erlang:trace_info({?MODULE,seq,3}, all),
+ %%
+ ?line P = erlang:trace_pattern({'_','_','_'}, false, [call_time]),
+ ok.
+
+%% %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+pause_and_restart(suite) ->
+ [];
+pause_and_restart(doc) ->
+ ["Tests pausing and restarting call time counters"];
+pause_and_restart(Config) when is_list(Config) ->
+ ?line P = erlang:trace_pattern({'_','_','_'}, false, [call_time]),
+ ?line M = 100,
+ ?line Pid = setup(),
+ %%
+ ?line 1 = erlang:trace_pattern({?MODULE,seq,'_'}, true, [call_time]),
+ ?line ok = check_trace_info({?MODULE, seq, 3}, [], none),
+ ?line {L, T1} = execute(Pid, fun() -> seq(1, M, fun(X) -> X+1 end) end),
+ ?line ok = check_trace_info({?MODULE, seq, 3}, [{Pid,M,0,0}], T1),
+ ?line 1 = erlang:trace_pattern({?MODULE,seq,'_'}, pause, [call_time]),
+ ?line ok = check_trace_info({?MODULE, seq, 3}, [{Pid,M,0,0}], T1),
+ ?line {L, T2} = execute(Pid, fun() -> seq(1, M, fun(X) -> X+1 end) end),
+ ?line ok = check_trace_info({?MODULE, seq, 3}, [{Pid,M,0,0}], T2),
+ ?line 1 = erlang:trace_pattern({?MODULE,seq,'_'}, restart, [call_time]),
+ ?line ok = check_trace_info({?MODULE, seq, 3}, [], none),
+ ?line {L, T3} = execute(Pid, fun() -> seq(1, M, fun(X) -> X+1 end) end),
+ ?line ok = check_trace_info({?MODULE, seq, 3}, [{Pid,M,0,0}], T3),
+ %%
+ ?line Pid ! quit,
+ ?line P = erlang:trace_pattern({'_','_','_'}, false, [call_time]),
+ ok.
+
+%% %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+scheduling(suite) ->
+ [];
+scheduling(doc) ->
+ ["Tests in/out scheduling of call time counters"];
+scheduling(Config) when is_list(Config) ->
+ ?line P = erlang:trace_pattern({'_','_','_'}, false, [call_time]),
+ ?line M = 1000000,
+ ?line Np = erlang:system_info(schedulers_online),
+ ?line F = 12,
+
+ %% setup load processes
+ %% (single, no internal calls)
+
+ ?line erlang:trace_pattern({?MODULE,loaded,1}, true, [call_time]),
+
+ ?line Pids = [setup() || _ <- lists:seq(1, F*Np)],
+ ?line {_Ls,T1} = execute(Pids, {?MODULE,loaded,[M]}),
+ ?line [Pid ! quit || Pid <- Pids],
+
+ %% logic dictates that each process will get ~ 1/F of the schedulers time
+
+ ?line {call_time, CT} = erlang:trace_info({?MODULE,loaded,1}, call_time),
+
+ ?line lists:foreach(fun (Pid) ->
+ ?line ok = case check_process_time(lists:keysearch(Pid, 1, CT), M, F, T1) of
+ schedule_time_error ->
+ test_server:comment("Warning: Failed time ratio"),
+ ok;
+ Other -> Other
+ end
+ end, Pids),
+ ?line P = erlang:trace_pattern({'_','_','_'}, false, [call_time]),
+ ok.
+
+%% %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+combo(suite) ->
+ [];
+combo(doc) ->
+ ["Tests combining local call trace and meta trace with call time trace"];
+combo(Config) when is_list(Config) ->
+ ?line Self = self(),
+ ?line Nbc = 3,
+ ?line MetaMs = [{'_',[],[{return_trace}]}],
+ ?line Flags = lists:sort([call, return_to]),
+ ?line LocalTracer = spawn_link(fun () -> relay_n(5 + Nbc + 3, Self) end),
+ ?line MetaTracer = spawn_link(fun () -> relay_n(9 + Nbc + 3, Self) end),
+ ?line 2 = erlang:trace_pattern({?MODULE,seq_r,'_'}, [], [local]),
+ ?line 2 = erlang:trace_pattern({?MODULE,seq_r,'_'}, true, [call_time]),
+ ?line 2 = erlang:trace_pattern({?MODULE,seq_r,'_'}, MetaMs, [{meta,MetaTracer}]),
+ ?line 2 = erlang:trace_pattern({?MODULE,seq_r,'_'}, true, [call_count]),
+
+ % bifs
+ ?line 2 = erlang:trace_pattern({erlang, term_to_binary, '_'}, [], [local]),
+ ?line 2 = erlang:trace_pattern({erlang, term_to_binary, '_'}, true, [call_time]),
+ ?line 2 = erlang:trace_pattern({erlang, term_to_binary, '_'}, MetaMs, [{meta,MetaTracer}]),
+ %% not implemented
+ %?line 2 = erlang:trace_pattern({erlang, term_to_binary, '_'}, true, [call_count]),
+
+ ?line 1 = erlang:trace(Self, true, [{tracer,LocalTracer} | Flags]),
+ %%
+ ?line {traced,local} =
+ erlang:trace_info({?MODULE,seq_r,3}, traced),
+ ?line {match_spec,[]} =
+ erlang:trace_info({?MODULE,seq_r,3}, match_spec),
+ ?line {meta,MetaTracer} =
+ erlang:trace_info({?MODULE,seq_r,3}, meta),
+ ?line {meta_match_spec,MetaMs} =
+ erlang:trace_info({?MODULE,seq_r,3}, meta_match_spec),
+ ?line ok = check_trace_info({?MODULE, seq_r, 3}, [], none),
+
+ %% check empty trace_info for ?MODULE:seq_r/3
+ ?line {all,[_|_]=TraceInfo} = erlang:trace_info({?MODULE,seq_r,3}, all),
+ ?line {value,{traced,local}} = lists:keysearch(traced, 1, TraceInfo),
+ ?line {value,{match_spec,[]}} = lists:keysearch(match_spec, 1, TraceInfo),
+ ?line {value,{meta,MetaTracer}} = lists:keysearch(meta, 1, TraceInfo),
+ ?line {value,{meta_match_spec,MetaMs}} = lists:keysearch(meta_match_spec, 1, TraceInfo),
+ ?line {value,{call_count,0}} = lists:keysearch(call_count, 1, TraceInfo),
+ ?line {value,{call_time,[]}} = lists:keysearch(call_time, 1, TraceInfo),
+
+ %% check empty trace_info for erlang:term_to_binary/1
+ ?line {all, [_|_] = TraceInfoBif} = erlang:trace_info({erlang, term_to_binary, 1}, all),
+ ?line {value,{traced,local}} = lists:keysearch(traced, 1, TraceInfoBif),
+ ?line {value,{match_spec,[]}} = lists:keysearch(match_spec, 1, TraceInfoBif),
+ ?line {value,{meta, MetaTracer}} = lists:keysearch(meta, 1, TraceInfoBif),
+ ?line {value,{meta_match_spec,MetaMs}} = lists:keysearch(meta_match_spec, 1, TraceInfoBif),
+ %% not implemented
+ ?line {value,{call_count,false}} = lists:keysearch(call_count, 1, TraceInfoBif),
+ %?line {value,{call_count,0}} = lists:keysearch(call_count, 1, TraceInfoBif),
+ ?line {value,{call_time,[]}} = lists:keysearch(call_time, 1, TraceInfoBif),
+
+ %%
+ ?line [3,2,1] = seq_r(1, 3, fun(X) -> X+1 end),
+ ?line T0 = now(),
+ ?line with_bif(Nbc),
+ ?line T1 = now(),
+ ?line TimeB = timer:now_diff(T1,T0),
+ %%
+
+ ?line List = collect(100),
+ ?line {MetaR, LocalR} =
+ lists:foldl(
+ fun ({P,X}, {M,L}) when P == MetaTracer ->
+ {[X|M],L};
+ ({P,X}, {M,L}) when P == LocalTracer ->
+ {M,[X|L]}
+ end,
+ {[],[]},
+ List),
+ ?line Meta = lists:reverse(MetaR),
+ ?line Local = lists:reverse(LocalR),
+
+ ?line [?CTT(Self,{?MODULE,seq_r,[1,3,_]}),
+ ?CTT(Self,{?MODULE,seq_r,[1,3,_,[]]}),
+ ?CTT(Self,{?MODULE,seq_r,[2,3,_,[1]]}),
+ ?CTT(Self,{?MODULE,seq_r,[3,3,_,[2,1]]}),
+ ?RFT(Self,{?MODULE,seq_r,4},[3,2,1]),
+ ?RFT(Self,{?MODULE,seq_r,4},[3,2,1]),
+ ?RFT(Self,{?MODULE,seq_r,4},[3,2,1]),
+ ?RFT(Self,{?MODULE,seq_r,3},[3,2,1]),
+ ?CTT(Self,{erlang,term_to_binary,[3]}), % bif
+ ?RFT(Self,{erlang,term_to_binary,1},<<131,97,3>>),
+ ?CTT(Self,{erlang,term_to_binary,[2]}),
+ ?RFT(Self,{erlang,term_to_binary,1},<<131,97,2>>)
+ ] = Meta,
+
+ ?line [?CT(Self,{?MODULE,seq_r,[1,3,_]}),
+ ?CT(Self,{?MODULE,seq_r,[1,3,_,[]]}),
+ ?CT(Self,{?MODULE,seq_r,[2,3,_,[1]]}),
+ ?CT(Self,{?MODULE,seq_r,[3,3,_,[2,1]]}),
+ ?RT(Self,{?MODULE,combo,1}),
+ ?CT(Self,{erlang,term_to_binary,[3]}), % bif
+ ?RT(Self,{?MODULE,with_bif,1}),
+ ?CT(Self,{erlang,term_to_binary,[2]}),
+ ?RT(Self,{?MODULE,with_bif,1})
+ ] = Local,
+
+ ?line ok = check_trace_info({?MODULE, seq_r, 3}, [{Self,1,0,0}], 1),
+ ?line ok = check_trace_info({?MODULE, seq_r, 4}, [{Self,3,0,0}], 1),
+ ?line ok = check_trace_info({?MODULE, seq_r, 3}, [{Self,1,0,0}], 1),
+ ?line ok = check_trace_info({?MODULE, seq_r, 4}, [{Self,3,0,0}], 1),
+ ?line ok = check_trace_info({erlang, term_to_binary, 1}, [{self(), Nbc - 1, 0, 0}], TimeB),
+ %%
+ ?line erlang:trace_pattern({'_','_','_'}, false, [local,meta,call_time]),
+ ?line erlang:trace_pattern(on_load, false, [local,meta,call_time]),
+ ?line erlang:trace(all, false, [all]),
+ ok.
+
+%% %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+bif(suite) ->
+ [];
+bif(doc) ->
+ ["Tests tracing of bifs"];
+bif(Config) when is_list(Config) ->
+ ?line P = erlang:trace_pattern({'_','_','_'}, false, [call_time]),
+ ?line M = 1000000,
+ %%
+ ?line 2 = erlang:trace_pattern({erlang, binary_to_term, '_'}, true, [call_time]),
+ ?line 2 = erlang:trace_pattern({erlang, term_to_binary, '_'}, true, [call_time]),
+ ?line Pid = setup(),
+ ?line {L, T1} = execute(Pid, fun() -> with_bif(M) end),
+
+ ?line ok = check_trace_info({erlang, binary_to_term, 1}, [{Pid, M - 1, 0, 0}], T1/2),
+ ?line ok = check_trace_info({erlang, term_to_binary, 1}, [{Pid, M - 1, 0, 0}], T1/2),
+
+ % disable term2binary
+
+ ?line 2 = erlang:trace_pattern({erlang, term_to_binary, '_'}, false, [call_time]),
+
+ ?line {L, T2} = execute(Pid, fun() -> with_bif(M) end),
+
+ ?line ok = check_trace_info({erlang, binary_to_term, 1}, [{Pid, M*2 - 2, 0, 0}], T1/2 + T2),
+ ?line ok = check_trace_info({erlang, term_to_binary, 1}, false, none),
+
+ %%
+ ?line P = erlang:trace_pattern({'_','_','_'}, false, [call_time]),
+ ?line Pid ! quit,
+ ok.
+
+%% %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+nif(suite) ->
+ [];
+nif(doc) ->
+ ["Tests tracing of nifs"];
+nif(Config) when is_list(Config) ->
+ load_nif(Config),
+ ?line P = erlang:trace_pattern({'_','_','_'}, false, [call_time]),
+ ?line M = 1000000,
+ %%
+ ?line 1 = erlang:trace_pattern({?MODULE, nif_dec, '_'}, true, [call_time]),
+ ?line 1 = erlang:trace_pattern({?MODULE, with_nif, '_'}, true, [call_time]),
+ ?line Pid = setup(),
+ ?line {L, T1} = execute(Pid, fun() -> with_nif(M) end),
+
+ % the nif is called M - 1 times, the last time the function with 'with_nif'
+ % returns ok and does not call the nif.
+ ?line ok = check_trace_info({?MODULE, nif_dec, 1}, [{Pid, M-1, 0, 0}], T1/5*4),
+ ?line ok = check_trace_info({?MODULE, with_nif, 1}, [{Pid, M, 0, 0}], T1/5),
+
+ %%
+ ?line P = erlang:trace_pattern({'_','_','_'}, false, [call_time]),
+ ?line Pid ! quit,
+ ok.
+
+%% %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+called_function(suite) ->
+ [];
+called_function(doc) ->
+ ["Tests combining nested function calls and that the time accumulates to the right function"];
+called_function(Config) when is_list(Config) ->
+ ?line P = erlang:trace_pattern({'_','_','_'}, false, [call_time]),
+ ?line M = 2100,
+ ?line Pid = setup(),
+ %%
+ ?line 1 = erlang:trace_pattern({?MODULE,a_function,'_'}, true, [call_time]),
+ ?line {L, T1} = execute(Pid, {?MODULE, a_function, [M]}),
+ ?line ok = check_trace_info({?MODULE, a_function, 1}, [{Pid, M, 0, 0}], T1),
+
+ ?line 1 = erlang:trace_pattern({?MODULE,a_called_function,'_'}, true, [call_time]),
+ ?line {L, T2} = execute(Pid, {?MODULE, a_function, [M]}),
+ ?line ok = check_trace_info({?MODULE, a_function, 1}, [{Pid, M+M, 0, 0}], T1 + M*?SINGLE_CALL_US_TIME),
+ ?line ok = check_trace_info({?MODULE, a_called_function, 1}, [{Pid, M, 0, 0}], T2),
+
+
+ ?line 1 = erlang:trace_pattern({?MODULE,dec,'_'}, true, [call_time]),
+ ?line {L, T3} = execute(Pid, {?MODULE, a_function, [M]}),
+ ?line ok = check_trace_info({?MODULE, a_function, 1}, [{Pid, M+M+M, 0, 0}], T1 + (M+M)*?SINGLE_CALL_US_TIME),
+ ?line ok = check_trace_info({?MODULE, a_called_function, 1}, [{Pid, M+M, 0, 0}], T2 + M*?SINGLE_CALL_US_TIME ),
+ ?line ok = check_trace_info({?MODULE, dec, 1}, [{Pid, M, 0, 0}], T3),
+
+ ?line Pid ! quit,
+ ?line P = erlang:trace_pattern({'_','_','_'}, false, [call_time]),
+ ok.
+
+%%% %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%%% The Tests
+%%%
+
+%% %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%% %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%% Local helpers
+
+
+load_nif(Config) ->
+ ?line Path = ?config(data_dir, Config),
+ ?line ok = erlang:load_nif(filename:join(Path,"trace_nif"), 0).
+
+
+%% Stack recursive seq
+seq(Stop, Stop, Succ) when is_function(Succ) ->
+ [Stop];
+seq(Start, Stop, Succ) when is_function(Succ) ->
+ [Start | seq(Succ(Start), Stop, Succ)].
+
+
+a_function(1) -> a_called_function(1);
+a_function(N) when N > 1 -> a_function(a_called_function(N)).
+
+a_called_function(N) -> dec(N).
+
+with_bif(1) -> ok;
+with_bif(N) ->
+ with_bif(erlang:binary_to_term(erlang:term_to_binary(N)) - 1).
+
+with_nif(0) -> error;
+with_nif(1) -> ok;
+with_nif(N) ->
+ with_nif(?MODULE:nif_dec(N)).
+
+
+nif_dec(N) -> 0.
+
+dec(N) ->
+ loaded(10000),
+ N - 1.
+
+loaded(N) when N > 1 -> loaded(N - 1);
+loaded(_) -> 5.
+
+
+%% Tail recursive seq, result list is reversed
+seq_r(Start, Stop, Succ) when is_function(Succ) ->
+ seq_r(Start, Stop, Succ, []).
+
+seq_r(Stop, Stop, _, R) ->
+ [Stop | R];
+seq_r(Start, Stop, Succ, R) ->
+ seq_r(Succ(Start), Stop, Succ, [Start | R]).
+
+% Check call time tracing data and print mismatches
+check_trace_info(Mfa, [{Pid, C,_,_}] = Expect, Time) ->
+ case erlang:trace_info(Mfa, call_time) of
+ % Time tests are somewhat problematic. We want to know if Time (EXPECTED_TIME) and S*1000000 + Us (ACTUAL_TIME)
+ % is the same.
+ % If the ratio EXPECTED_TIME/ACTUAL_TIME is ~ 1 or if EXPECTED_TIME - ACTUAL_TIME is near zero, the test is ok.
+ {call_time,[{Pid,C,S,Us}]} when S >= 0, Us >= 0, abs(1 - Time/(S*1000000 + Us)) < ?R_ERROR; abs(Time - S*1000000 - Us) < ?US_ERROR ->
+ ok;
+ {call_time,[{Pid,C,S,Us}]} ->
+ Sum = S*1000000 + Us,
+ io:format("Expected ~p -> {call_time, ~p (Time ~p us)}~n - got ~w s. ~w us. = ~w us. - ~w -> delta ~w (ratio ~.2f, should be 1.0)~n",
+ [Mfa, Expect, Time, S, Us, Sum, Time, Sum - Time, Time/Sum]),
+ time_error;
+ Other ->
+ io:format("Expected ~p -> {call_time, ~p (Time ~p us)}~n - got ~p~n", [ Mfa, Expect, Time, Other]),
+ time_count_error
+ end;
+check_trace_info(Mfa, Expect, _) ->
+ case erlang:trace_info(Mfa, call_time) of
+ {call_time, Expect} ->
+ ok;
+ Other ->
+ io:format("Expected ~p -> {call_time, ~p}~n - got ~p~n", [Mfa, Expect, Other]),
+ result_not_expected_error
+ end.
+
+
+%check process time
+check_process_time({value,{Pid, M, S, Us}}, M, F, Time) ->
+ ?line Sum = S*1000000 + Us,
+ if
+ abs(1 - (F/(Time/Sum))) < ?R_ERROR ->
+ ok;
+ true ->
+ io:format("- Pid ~p, Got ratio ~.2f, expected ratio ~w~n", [Pid, Time/Sum,F]),
+ schedule_time_error
+ end;
+check_process_time(Other, M, _, _) ->
+ io:format(" - Got ~p, expected count ~w~n", [Other, M]),
+ error.
+
+
+
+%% Message relay process
+relay_n(0, _) ->
+ ok;
+relay_n(N, Dest) ->
+ receive Msg ->
+ Dest ! {self(), Msg},
+ relay_n(N-1, Dest)
+ end.
+
+
+
+%% Collect received messages
+collect(Time) ->
+ Ref = erlang:start_timer(Time, self(), done),
+ L = lists:reverse(collect([], Ref)),
+ ?dbgformat("Got: ~p~n",[L]),
+ L.
+
+collect(A, 0) ->
+ receive
+ Mess ->
+ collect([Mess | A], 0)
+ after 0 ->
+ A
+ end;
+collect(A, Ref) ->
+ receive
+ {timeout, Ref, done} ->
+ collect(A, 0);
+ Mess ->
+ collect([Mess | A], Ref)
+ end.
+
+setup() ->
+ Pid = spawn_link(fun() -> loop() end),
+ ?line 1 = erlang:trace(Pid, true, [call]),
+ Pid.
+
+execute(Pids, Mfa) when is_list(Pids) ->
+ T0 = now(),
+ [P ! {self(), execute, Mfa} || P <- Pids],
+ As = [receive {P, answer, Answer} -> Answer end || P <- Pids],
+ T1 = now(),
+ {As, timer:now_diff(T1,T0)};
+execute(P, Mfa) ->
+ T0 = now(),
+ P ! {self(), execute, Mfa},
+ A = receive {P, answer, Answer} -> Answer end,
+ T1 = now(),
+ {A, timer:now_diff(T1,T0)}.
+
+
+
+loop() ->
+ receive
+ quit ->
+ ok;
+ {Pid, execute, Fun } when is_function(Fun) ->
+ Pid ! {self(), answer, erlang:apply(Fun, [])},
+ loop();
+ {Pid, execute, {M, F, A}} ->
+ Pid ! {self(), answer, erlang:apply(M, F, A)},
+ loop()
+ end.
diff --git a/erts/emulator/test/trace_call_time_SUITE_data/Makefile.src b/erts/emulator/test/trace_call_time_SUITE_data/Makefile.src
new file mode 100644
index 0000000000..2b2a35bd2c
--- /dev/null
+++ b/erts/emulator/test/trace_call_time_SUITE_data/Makefile.src
@@ -0,0 +1,6 @@
+
+NIF_LIBS = trace_nif@dll@
+
+all: $(NIF_LIBS)
+
+@SHLIB_RULES@
diff --git a/erts/emulator/test/trace_call_time_SUITE_data/trace_nif.c b/erts/emulator/test/trace_call_time_SUITE_data/trace_nif.c
new file mode 100644
index 0000000000..33b346aab7
--- /dev/null
+++ b/erts/emulator/test/trace_call_time_SUITE_data/trace_nif.c
@@ -0,0 +1,37 @@
+#include "erl_nif.h"
+
+
+static int load(ErlNifEnv* env, void** priv_data, ERL_NIF_TERM load_info)
+{
+ return 0;
+}
+
+static int reload(ErlNifEnv* env, void** priv_data, ERL_NIF_TERM load_info)
+{
+ return 0;
+}
+
+static int upgrade(ErlNifEnv* env, void** priv_data, void** old_priv_data, ERL_NIF_TERM load_info)
+{
+ return 0;
+}
+
+static void unload(ErlNifEnv* env, void* priv_data)
+{
+}
+
+static ERL_NIF_TERM nif_dec_1(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
+{
+ int x = 0;
+ enif_get_uint(env, argv[0], &x);
+ return enif_make_int(env, x - 1);
+}
+
+
+
+static ErlNifFunc nif_funcs[] =
+{
+ {"nif_dec", 1, nif_dec_1}
+};
+
+ERL_NIF_INIT(trace_call_time_SUITE,nif_funcs,load,reload,upgrade,unload)
diff --git a/erts/emulator/utils/beam_makeops b/erts/emulator/utils/beam_makeops
index 4a859c3094..de19a2e35b 100755
--- a/erts/emulator/utils/beam_makeops
+++ b/erts/emulator/utils/beam_makeops
@@ -1,20 +1,20 @@
#!/usr/bin/env perl
#
# %CopyrightBegin%
-#
-# Copyright Ericsson AB 1998-2009. All Rights Reserved.
-#
+#
+# Copyright Ericsson AB 1998-2010. All Rights Reserved.
+#
# The contents of this file are subject to the Erlang Public License,
# Version 1.1, (the "License"); you may not use this file except in
# compliance with the License. You should have received a copy of the
# Erlang Public License along with this software. If not, it can be
# retrieved online at http://www.erlang.org/.
-#
+#
# Software distributed under the License is distributed on an "AS IS"
# basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
# the License for the specific language governing rights and limitations
# under the License.
-#
+#
# %CopyrightEnd%
#
use strict;
@@ -499,7 +499,11 @@ sub emulator_output {
print "\n";
print "#ifdef ARCH_64\n";
print "# define BEAM_LOOSE_MASK 0x1FFFUL\n";
+ print "#if HALFWORD_HEAP\n";
+ print "# define BEAM_TIGHT_MASK 0x1FFCUL\n";
+ print "#else\n";
print "# define BEAM_TIGHT_MASK 0x1FF8UL\n";
+ print "#endif\n";
print "# define BEAM_LOOSE_SHIFT 16\n";
print "# define BEAM_TIGHT_SHIFT 16\n";
print "#else\n";
@@ -813,7 +817,7 @@ sub basic_generator {
#
if ($flags =~ /-pack/ && $hot) {
- ($prefix, $pack_spec, @args) = &do_pack(@args);
+ ($prefix, $pack_spec, @args) = &do_pack(@args);
}
#
@@ -907,16 +911,16 @@ sub basic_generator {
my($code);
if (defined $macro{$name}) {
my($macro_code) = "$prefix$macro(" . join(', ', @f) . ");";
- $var_decls .= "Uint tmp_packed1;"
+ $var_decls .= "BeamInstr tmp_packed1;"
if $macro_code =~ /tmp_packed1/;
- $var_decls .= "Uint tmp_packed2;"
+ $var_decls .= "BeamInstr tmp_packed2;"
if $macro_code =~ /tmp_packed2/;
if ($flags =~ /-nonext/) {
$code = "$macro_code\n";
} else {
$code = join("\n",
"{ $var_decls",
- "Eterm* next;",
+ "BeamInstr* next;",
"PreFetch($size, next);",
"$macro_code",
"NextPF($size, next);",
diff --git a/erts/emulator/utils/make_tables b/erts/emulator/utils/make_tables
index b5391234cf..918ef62094 100755
--- a/erts/emulator/utils/make_tables
+++ b/erts/emulator/utils/make_tables
@@ -1,20 +1,20 @@
#!/usr/bin/env perl
#
# %CopyrightBegin%
-#
-# Copyright Ericsson AB 1999-2009. All Rights Reserved.
-#
+#
+# Copyright Ericsson AB 1999-2010. All Rights Reserved.
+#
# The contents of this file are subject to the Erlang Public License,
# Version 1.1, (the "License"); you may not use this file except in
# compliance with the License. You should have received a copy of the
# Erlang Public License along with this software. If not, it can be
# retrieved online at http://www.erlang.org/.
-#
+#
# Software distributed under the License is distributed on an "AS IS"
# basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
# the License for the specific language governing rights and limitations
# under the License.
-#
+#
# %CopyrightEnd%
#
use strict;
@@ -184,7 +184,7 @@ for ($i = 0; $i < @bif; $i++) {
my $arity = $bif[$i]->[2];
my $args = join(', ', 'Process*', ('Eterm') x $arity);
print "Eterm $bif[$i]->[3]($args);\n";
- print "Eterm wrap_$bif[$i]->[3]($args, Uint *I);\n";
+ print "Eterm wrap_$bif[$i]->[3]($args, UWord *I);\n";
}
print "#endif\n";
@@ -225,15 +225,15 @@ for ($i = 0; $i < @bif; $i++) {
for ($arg = 1; $arg <= $arity; $arg++) {
print ", Eterm arg$arg";
}
- print ", Uint *I)\n";
+ print ", UWord *I)\n";
print "{\n";
print " return erts_bif_trace($i, p";
for ($arg = 1; $arg <= 3; $arg++) {
if ($arg <= $arity) {
print ", arg$arg";
- } elsif ($arg == ($arity + 1)) {
- # Place I in correct position
- print ", (Eterm) I";
+ #} elsif ($arg == ($arity + 1)) {
+ # # Place I in correct position
+ # print ", (Eterm) I";
} else {
print ", 0";
}
diff --git a/erts/epmd/src/epmd.c b/erts/epmd/src/epmd.c
index 23ac421446..9c2ce065bb 100644
--- a/erts/epmd/src/epmd.c
+++ b/erts/epmd/src/epmd.c
@@ -1,20 +1,20 @@
/* -*- c-indent-level: 2; c-continued-statement-offset: 2 -*- */
/*
* %CopyrightBegin%
- *
- * Copyright Ericsson AB 1998-2009. All Rights Reserved.
- *
+ *
+ * Copyright Ericsson AB 1998-2010. All Rights Reserved.
+ *
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
* compliance with the License. You should have received a copy of the
* Erlang Public License along with this software. If not, it can be
* retrieved online at http://www.erlang.org/.
- *
+ *
* Software distributed under the License is distributed on an "AS IS"
* basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
* the License for the specific language governing rights and limitations
* under the License.
- *
+ *
* %CopyrightEnd%
*/
@@ -24,11 +24,6 @@
#include "epmd.h" /* Renamed from 'epmd_r4.h' */
#include "epmd_int.h"
-#ifdef _OSE_
-# include "ose.h"
-# include "efs.h"
-#endif
-
#ifdef HAVE_STDLIB_H
# include <stdlib.h>
#endif
@@ -82,86 +77,7 @@ static char *mystrdup(char *s)
return r;
}
-#ifdef _OSE_
-
-struct args_sig {
- SIGSELECT sig_no;
- int argc ;
- char argv[20][20];
-};
-
-union SIGNAL {
- SIGSELECT sig_no;
- struct args_sig args;
-};
-
-/* Start function. It may be called from the start script as well as from
- the OSE shell directly (using late start hooks). It spawns epmd as an
- OSE process which calls the epmd main function. */
-int start_ose_epmd(int argc, char **argv) {
- union SIGNAL *sig;
- PROCESS epmd_;
- OSENTRYPOINT ose_epmd;
- int i;
-
- if(hunt("epmd", 0, &epmd_, NULL)) {
- fprintf(stderr, "Warning! EPMD already exists (%u).\n", epmd_);
- return 0;
- }
- else {
- /* copy start args to signal */
- sig = alloc(sizeof(struct args_sig), 0);
- sig->args.argc = argc;
- for(i=0; i<argc; i++) {
- strcpy((sig->args.argv)[i], argv[i]);
- }
- /* start epmd and send signal */
- epmd_ = create_process(OS_BG_PROC, /* processtype */
- "epmd", /* name */
- ose_epmd, /* entrypoint */
- 16383, /* stacksize */
- 20, /* priority */
- 0, /* timeslice */
- 0, /* block */
- NULL,0,0); /* not used */
- efs_clone(epmd_);
- start(epmd_);
- send(&sig, epmd_);
-#ifdef DEBUG
- printf("EPMD ID: %li\n", epmd_);
-#endif
- }
- return 0;
-}
-
-OS_PROCESS(ose_epmd) {
- union SIGNAL *sig;
- static const SIGSELECT rec_any_sig[] = { 0 };
- int i, argc;
- char **argv;
-
- sig = receive((SIGSELECT*)rec_any_sig);
-
- argc = sig->args.argc;
- argv = (char **)malloc((argc+1)*sizeof(char *));
- for(i=0; i<argc; i++) {
- argv[i] = (char *)malloc(strlen((sig->args.argv)[i])+1);
- strcpy(argv[i], (sig->args.argv)[i]);
- }
- argv[argc] = NULL;
- free_buf(&sig);
-
- epmd(argc, argv);
-
- for(i=0; i<argc; i++) {
- free(argv[i]);
- }
- free(argv);
-}
-
-#else /* ifdef _OSE_ */
-
-/* VxWorks start function */
+#ifdef VXWORKS
int start_epmd(a0, a1, a2, a3, a4, a5, a6, a7, a8, a9)
char *a0, *a1, *a2, *a3, *a4, *a5, *a6, *a7, *a8, *a9;
{
@@ -200,10 +116,7 @@ char *a0, *a1, *a2, *a3, *a4, *a5, *a6, *a7, *a8, *a9;
argc,(int) argv,1,
0,0,0,0,0,0,0);
}
-
-#endif /* _OSE_ */
-
-
+#endif /* WxWorks */
int epmd(int argc, char **argv)
@@ -313,11 +226,17 @@ int main(int argc, char** argv)
else
usage(g);
epmd_cleanup_exit(g,0);
+ } else if (strcmp(argv[0], "-stop") == 0) {
+ if (argc == 2)
+ stop_cli(g, argv[1]);
+ else
+ usage(g);
+ epmd_cleanup_exit(g,0);
}
else
usage(g);
}
- dbg_printf(g,0,"epmd running - daemon = %d",g->is_daemon);
+ dbg_printf(g,1,"epmd running - daemon = %d",g->is_daemon);
#ifndef NO_SYSCONF
if ((g->max_conn = sysconf(_SC_OPEN_MAX)) <= 0)
@@ -453,7 +372,7 @@ static void run_daemon(EpmdVars *g)
}
#endif
-#if (defined(VXWORKS) || defined(_OSE_))
+#if defined(VXWORKS)
static void run_daemon(EpmdVars *g)
{
run(g);
@@ -469,7 +388,7 @@ static void run_daemon(EpmdVars *g)
static void usage(EpmdVars *g)
{
fprintf(stderr, "usage: epmd [-d|-debug] [DbgExtra...] [-port No] [-daemon]\n");
- fprintf(stderr, " [-d|-debug] [-port No] [-names|-kill]\n\n");
+ fprintf(stderr, " [-d|-debug] [-port No] [-names|-kill|-stop name]\n\n");
fprintf(stderr, "See the Erlang epmd manual page for info about the usage.\n");
fprintf(stderr, "The -port and DbgExtra options are\n\n");
fprintf(stderr, " -port No\n");
diff --git a/erts/epmd/src/epmd_cli.c b/erts/epmd/src/epmd_cli.c
index c12f711bc5..2aed861390 100644
--- a/erts/epmd/src/epmd_cli.c
+++ b/erts/epmd/src/epmd_cli.c
@@ -54,6 +54,42 @@ void kill_epmd(EpmdVars *g)
}
}
+void stop_cli(EpmdVars *g, char *name)
+{
+ char buf[1024];
+ int fd, rval, bsize;
+
+ bsize = strlen(name);
+ if (bsize > 1000) {
+ printf("epmd: Name too long!");
+ epmd_cleanup_exit(g, 1);
+ }
+
+ fd = conn_to_epmd(g);
+ bsize++;
+ put_int16(bsize, buf);
+ buf[2] = EPMD_STOP_REQ;
+ bsize += 2;
+ strcpy(buf+3, name);
+
+ if (write(fd, buf, bsize) != bsize) {
+ printf("epmd: Can't write to epmd\n");
+ epmd_cleanup_exit(g,1);
+ }
+ if ((rval = read_fill(fd,buf,7)) == 7) {
+ buf[7] = '\000';
+ printf("%s\n", buf);
+ epmd_cleanup_exit(g,0);
+ } else if (rval < 0) {
+ printf("epmd: failed to read answer from local epmd\n");
+ epmd_cleanup_exit(g,1);
+ } else { /* rval is now 0 or 1 */
+ buf[rval] = '\0';
+ printf("epmd: local epmd responded with <%s>\n", buf);
+ epmd_cleanup_exit(g,1);
+ }
+}
+
/* what == EPMD_NAMES_REQ || EPMD_DUMP_REQ */
void epmd_call(EpmdVars *g,int what)
diff --git a/erts/epmd/src/epmd_int.h b/erts/epmd/src/epmd_int.h
index 65fcf9bacb..5ead553f36 100644
--- a/erts/epmd/src/epmd_int.h
+++ b/erts/epmd/src/epmd_int.h
@@ -37,19 +37,6 @@
#define DONT_USE_MAIN
#endif
-#ifdef _OSE_
-#define NO_SYSLOG
-#define NO_SYSCONF
-#define NO_DAEMON
-#define DONT_USE_MAIN
-#ifndef HAVE_SYS_TIME_H
-#define HAVE_SYS_TIME_H
-#endif
-#ifndef HAVE_UNISTD_H
-#define HAVE_UNISTD_H
-#endif
-#endif
-
/* ************************************************************************ */
/* Standard includes */
@@ -92,7 +79,7 @@
#endif
#endif /* ! VXWORKS */
-#if (!defined(__WIN32__) && !defined(_OSE_))
+#if !defined(__WIN32__)
# include <netinet/in.h>
# include <sys/socket.h>
# include <sys/stat.h>
@@ -105,10 +92,8 @@
# include <netinet/tcp.h>
#endif /* ! WIN32 */
-#ifndef _OSE_
#include <ctype.h>
#include <signal.h>
-#endif
#include <errno.h>
@@ -126,13 +111,6 @@
#include <stdarg.h>
-#ifdef _OSE_
-# include "ose.h"
-# include "inet.h"
-# include "sys/stat.h"
-#endif
-
-
/* ************************************************************************ */
/* Replace some functions by others by making the function name a macro */
@@ -148,10 +126,6 @@
#define sleep(n) taskDelay((n) * sysClkRateGet())
#endif /* VXWORKS */
-#ifdef _OSE_
-#define sleep(n) delay((n))
-#endif
-
#ifdef USE_BCOPY
# define memcpy(a, b, c) bcopy((b), (a), (c))
# define memcmp(a, b, c) bcmp((a), (b), (c))
@@ -167,6 +141,10 @@
# define EADDRINUSE WSAEADDRINUSE
#endif
+#if defined(__WIN32__) && !defined(ECONNABORTED)
+# define ECONNABORTED WSAECONNABORTED
+#endif
+
#ifndef SOMAXCONN
# define SOMAXCONN 128
#endif
@@ -192,26 +170,18 @@
#define FAMILY AF_INET6
#define SET_ADDR_LOOPBACK(addr, af, port) do { \
- static u_int32_t __addr[4] = IN6ADDR_LOOPBACK_INIT; \
memset((char*)&(addr), 0, sizeof(addr)); \
(addr).sin6_family = (af); \
(addr).sin6_flowinfo = 0; \
- (addr).sin6_addr.s6_addr32[0] = __addr[0]; \
- (addr).sin6_addr.s6_addr32[1] = __addr[1]; \
- (addr).sin6_addr.s6_addr32[2] = __addr[2]; \
- (addr).sin6_addr.s6_addr32[3] = __addr[3]; \
+ (addr).sin6_addr = in6addr_loopback; \
(addr).sin6_port = htons(port); \
} while(0)
#define SET_ADDR_ANY(addr, af, port) do { \
- static u_int32_t __addr[4] = IN6ADDR_ANY_INIT; \
memset((char*)&(addr), 0, sizeof(addr)); \
(addr).sin6_family = (af); \
(addr).sin6_flowinfo = 0; \
- (addr).sin6_addr.s6_addr32[0] = __addr[0]; \
- (addr).sin6_addr.s6_addr32[1] = __addr[1]; \
- (addr).sin6_addr.s6_addr32[2] = __addr[2]; \
- (addr).sin6_addr.s6_addr32[3] = __addr[3]; \
+ (addr).sin6_addr = in6addr_any; \
(addr).sin6_port = htons(port); \
} while(0)
diff --git a/erts/epmd/src/epmd_srv.c b/erts/epmd/src/epmd_srv.c
index a033fab244..c836bf0bb7 100644
--- a/erts/epmd/src/epmd_srv.c
+++ b/erts/epmd/src/epmd_srv.c
@@ -138,7 +138,7 @@ void run(EpmdVars *g)
* because addresses will be reused even if they are still in use.
*/
-#if (!defined(__WIN32__) && !defined(_OSE_))
+#if !defined(__WIN32__)
/* We ignore the SIGPIPE signal that is raised when we call write
twice on a socket closed by the other end. */
signal(SIGPIPE, SIG_IGN);
@@ -156,10 +156,6 @@ void run(EpmdVars *g)
accept function is called. We set the listen socket
to be non blocking to prevent us from being hanging
in accept() waiting for the next request. */
-#ifdef _OSE_
- opt = 1;
- if (ioctl(listensock, FIONBIO, (char*)&opt) != 0)
-#else
#if (defined(__WIN32__) || defined(NO_FCNTL))
opt = 1;
if (ioctl(listensock, FIONBIO, &opt) != 0) /* Gives warning in VxWorks */
@@ -167,7 +163,6 @@ void run(EpmdVars *g)
opt = fcntl(listensock, F_GETFL, 0);
if (fcntl(listensock, F_SETFL, opt | O_NONBLOCK) == -1)
#endif /* __WIN32__ || VXWORKS */
-#endif /* _OSE_ */
dbg_perror(g,"failed to set non-blocking mode of listening socket %d",
listensock);
@@ -176,18 +171,6 @@ void run(EpmdVars *g)
SET_ADDR_ANY(iserv_addr, FAMILY, sport);
}
-#ifdef _OSE_
- {
- int optlen = sizeof(opt);
- opt = 1;
- if(getsockopt(listensock, SOL_SOCKET, SO_REUSEADDR,
- (void*)&opt, &optlen) < 0)
- fprintf(stderr, "\n\nGETSOCKOPT FAILS! %d\n\n", errno);
- else if(opt == 1)
- fprintf(stderr, "SO_REUSEADDR is set!\n");
- }
-#endif
-
if(bind(listensock,(struct sockaddr*) &iserv_addr, sizeof(iserv_addr)) < 0 )
{
if (errno == EADDRINUSE)
@@ -227,8 +210,16 @@ void run(EpmdVars *g)
timeout.tv_sec = (g->packet_timeout < IDLE_TIMEOUT) ? 1 : IDLE_TIMEOUT;
timeout.tv_usec = 0;
- if ((ret = select(g->max_conn,&read_mask,(fd_set *)0,(fd_set *)0,&timeout)) < 0)
+ if ((ret = select(g->max_conn,&read_mask,(fd_set *)0,(fd_set *)0,&timeout)) < 0) {
dbg_perror(g,"error in select ");
+ switch (errno) {
+ case EAGAIN:
+ case EINTR:
+ break;
+ default:
+ epmd_cleanup_exit(g,1);
+ }
+ }
else {
time_t now;
if (ret == 0) {
@@ -401,7 +392,14 @@ static int do_accept(EpmdVars *g,int listensock)
if (msgsock < 0) {
dbg_perror(g,"error in accept");
- return EPMD_FALSE;
+ switch (errno) {
+ case EAGAIN:
+ case ECONNABORTED:
+ case EINTR:
+ return EPMD_FALSE;
+ default:
+ epmd_cleanup_exit(g,1);
+ }
}
return conn_open(g,msgsock);
@@ -593,7 +591,7 @@ static void do_request(g, fd, s, buf, bsize)
if (bsize <= 1)
{
- dbg_printf(g,0,"packet to small for request PORT2_REQ (%d)", bsize);
+ dbg_printf(g,0,"packet too small for request PORT2_REQ (%d)", bsize);
return;
}
@@ -742,7 +740,7 @@ static void do_request(g, fd, s, buf, bsize)
dbg_printf(g,1,"** got STOP_REQ");
if (bsize <= 1 )
{
- dbg_printf(g,0,"packet to small for request STOP_REQ (%d)",bsize);
+ dbg_printf(g,0,"packet too small for request STOP_REQ (%d)",bsize);
return;
}
@@ -904,7 +902,7 @@ static void node_init(EpmdVars *g)
/* We have got a close on a connection and it may be a
- EPMD_ALIVE_CLOSE_REQ. Note that this call shouild be called
+ EPMD_ALIVE_CLOSE_REQ. Note that this call should be called
*before* calling conn_close() */
static int node_unreg(EpmdVars *g,char *name)
diff --git a/erts/etc/common/Makefile.in b/erts/etc/common/Makefile.in
index 3db4fcba61..96655662b8 100644
--- a/erts/etc/common/Makefile.in
+++ b/erts/etc/common/Makefile.in
@@ -96,9 +96,9 @@ endif
# On windows we always need reentrant libraries.
ifeq ($(TARGET),win32)
-ERLEXEC_XLIBS=-L../../lib/internal/$(TARGET) -lerts_internal_r$(ERTS_LIB_TYPEMARKER) @ERTS_INTERNAL_X_LIBS@
+ERTS_INTERNAL_LIBS=-L../../lib/internal/$(TARGET) -lerts_internal_r$(ERTS_LIB_TYPEMARKER) @ERTS_INTERNAL_X_LIBS@
else
-ERLEXEC_XLIBS=-L../../lib/internal/$(TARGET) -lerts_internal$(ERTS_LIB_TYPEMARKER) @ERTS_INTERNAL_X_LIBS@
+ERTS_INTERNAL_LIBS=-L../../lib/internal/$(TARGET) -lerts_internal$(ERTS_LIB_TYPEMARKER) @ERTS_INTERNAL_X_LIBS@ -lm
endif
# ----------------------------------------------------
@@ -178,7 +178,7 @@ MC_OUTPUTS= \
MT_FLAG="-MD"
endif
INET_GETHOST = $(BINDIR)/inet_gethost.exe
-INSTALL_EMBEDDED_PROGS += $(BINDIR)/typer.exe $(BINDIR)/dialyzer.exe $(BINDIR)/erlc.exe $(BINDIR)/start_erl.exe $(BINDIR)/escript.exe
+INSTALL_EMBEDDED_PROGS += $(BINDIR)/typer.exe $(BINDIR)/dialyzer.exe $(BINDIR)/erlc.exe $(BINDIR)/start_erl.exe $(BINDIR)/escript.exe $(BINDIR)/run_test.exe
INSTALL_SRC = $(WINETC)/start_erl.c $(WINETC)/Nmakefile.start_erl
ERLEXECDIR=.
INSTALL_LIBS =
@@ -211,7 +211,7 @@ ERLSRV_OBJECTS=
MC_OUTPUTS=
INET_GETHOST = $(BINDIR)/inet_gethost@EXEEXT@
INSTALL_EMBEDDED_PROGS += $(BINDIR)/typer@EXEEXT@ $(BINDIR)/dialyzer@EXEEXT@ \
- $(BINDIR)/erlc@EXEEXT@ $(BINDIR)/escript@EXEEXT@ \
+ $(BINDIR)/erlc@EXEEXT@ $(BINDIR)/escript@EXEEXT@ $(BINDIR)/run_test@EXEEXT@ \
$(BINDIR)/run_erl $(BINDIR)/to_erl $(BINDIR)/dyn_erl
INSTALL_EMBEDDED_DATA = ../unix/start.src ../unix/start_erl.src
INSTALL_TOP = Install
@@ -274,6 +274,7 @@ endif
rm -f $(ERL_TOP)/erts/obj*/$(TARGET)/dyn_erl.o
rm -f $(ERL_TOP)/erts/obj*/$(TARGET)/safe_string.o
rm -f $(ERL_TOP)/erts/obj*/$(TARGET)/typer.o
+ rm -f $(ERL_TOP)/erts/obj*/$(TARGET)/run_test.o
rm -f $(ERL_TOP)/erts/obj*/$(TARGET)/vxcall.o
rm -f $(ERL_TOP)/erts/obj*/$(TARGET)/erl.o
rm -f $(ERL_TOP)/erts/obj*/$(TARGET)/werl.o
@@ -295,7 +296,7 @@ $(OBJDIR)/inet_gethost.o: inet_gethost.c
$(CC) $(CFLAGS) -o $@ -c inet_gethost.c
$(BINDIR)/inet_gethost@EXEEXT@: $(OBJDIR)/inet_gethost.o $(ENTRY_OBJ)
- $(PURIFY) $(LD) $(LDFLAGS) $(ENTRY_LDFLAGS) -o $@ $(OBJDIR)/inet_gethost.o $(ENTRY_OBJ) $(LIBS)
+ $(PURIFY) $(LD) $(LDFLAGS) $(ENTRY_LDFLAGS) -o $@ $(OBJDIR)/inet_gethost.o $(ENTRY_OBJ) $(LIBS) $(ERTS_INTERNAL_LIBS)
$(BINDIR)/run_erl: $(OBJDIR)/safe_string.o $(OBJDIR)/run_erl.o
$(LD) $(LDFLAGS) -o $@ $(OBJDIR)/safe_string.o $(OBJDIR)/run_erl.o $(LIBS)
@@ -320,7 +321,7 @@ $(OBJDIR)/safe_string.o: ../unix/safe_string.c
ifneq ($(TARGET),win32)
$(BINDIR)/$(ERLEXEC): $(OBJDIR)/$(ERLEXEC).o
- $(PURIFY) $(LD) $(LDFLAGS) -o $@ $(OBJDIR)/$(ERLEXEC).o $(ERLEXEC_XLIBS)
+ $(PURIFY) $(LD) $(LDFLAGS) -o $@ $(OBJDIR)/$(ERLEXEC).o $(ERTS_INTERNAL_LIBS)
$(OBJDIR)/$(ERLEXEC).o: $(ERLEXECDIR)/$(ERLEXEC).c
$(CC) -I$(EMUDIR) $(CFLAGS) -o $@ -c $(ERLEXECDIR)/$(ERLEXEC).c
@@ -349,6 +350,13 @@ $(BINDIR)/escript@EXEEXT@: $(OBJDIR)/escript.o
$(OBJDIR)/escript.o: escript.c
$(CC) $(CFLAGS) -o $@ -c escript.c
+$(BINDIR)/run_test@EXEEXT@: $(OBJDIR)/run_test.o
+ $(PURIFY) $(LD) $(LDFLAGS) -o $@ $(OBJDIR)/run_test.o -L$(OBJDIR) $(LIBS)
+
+$(OBJDIR)/run_test.o: run_test.c
+ $(CC) $(CFLAGS) -o $@ -c run_test.c
+
+
#------------------------------------------------------------------------
# Windows specific targets
# The windows platform is quite different from the others. erl/werl are small C programs
@@ -360,7 +368,7 @@ $(OBJDIR)/escript.o: escript.c
ifeq ($(TARGET),win32)
$(BINDIR)/$(ERLEXEC): $(OBJDIR)/erlexec.o $(OBJDIR)/win_erlexec.o $(OBJDIR)/init_file.o $(OBJDIR)/$(ERLRES_OBJ)
- $(LD) -dll $(LDFLAGS) -o $@ $(OBJDIR)/erlexec.o $(OBJDIR)/win_erlexec.o $(OBJDIR)/init_file.o $(OBJDIR)/$(ERLRES_OBJ) $(ERLEXEC_XLIBS)
+ $(LD) -dll $(LDFLAGS) -o $@ $(OBJDIR)/erlexec.o $(OBJDIR)/win_erlexec.o $(OBJDIR)/init_file.o $(OBJDIR)/$(ERLRES_OBJ) $(ERTS_INTERNAL_LIBS)
$(BINDIR)/erl@EXEEXT@: $(OBJDIR)/erl.o $(OBJDIR)/init_file.o $(OBJDIR)/$(ERLRES_OBJ)
$(LD) $(LDFLAGS) -o $@ $(OBJDIR)/erl.o $(OBJDIR)/init_file.o $(OBJDIR)/$(ERLRES_OBJ)
diff --git a/erts/etc/common/erlexec.c b/erts/etc/common/erlexec.c
index f79f5cc978..76ce0a7e3c 100644
--- a/erts/etc/common/erlexec.c
+++ b/erts/etc/common/erlexec.c
@@ -131,6 +131,12 @@ static char *plush_val_switches[] = {
NULL
};
+/* +r arguments with values */
+static char *plusr_val_switches[] = {
+ "g",
+ NULL
+};
+
/*
* Define sleep(seconds) in terms of Sleep() on Windows.
@@ -872,6 +878,21 @@ int main(int argc, char **argv)
i++;
}
break;
+ case 'r':
+ if (!is_one_of_strings(&argv[i][2],
+ plusr_val_switches))
+ goto the_default;
+ else {
+ if (i+1 >= argc
+ || argv[i+1][0] == '-'
+ || argv[i+1][0] == '+')
+ usage(argv[i]);
+ argv[i][0] = '-';
+ add_Eargs(argv[i]);
+ add_Eargs(argv[i+1]);
+ i++;
+ }
+ break;
case 's':
if (!is_one_of_strings(&argv[i][2],
pluss_val_switches))
@@ -1069,11 +1090,12 @@ usage_aux(void)
"[-hybrid] "
#endif
"[-make] [-man [manopts] MANPAGE] [-x] [-emu_args] "
- "[-args_file FILENAME] "
- "[+A THREADS] [+a SIZE] [+B[c|d|i]] [+c] [+h HEAP_SIZE_OPTION] [+K BOOLEAN] "
+ "[-args_file FILENAME] [+A THREADS] [+a SIZE] [+B[c|d|i]] [+c] "
+ "[+h HEAP_SIZE_OPTION] [+K BOOLEAN] "
"[+l] [+M<SUBSWITCH> <ARGUMENT>] [+P MAX_PROCS] [+R COMPAT_REL] "
- "[+r] [+s SCHEDULER_OPTION] [+S NO_SCHEDULERS:NO_SCHEDULERS_ONLINE] [+T LEVEL] [+V] [+v] [+W<i|w>] "
- "[args ...]\n");
+ "[+r] [+rg READER_GROUPS_LIMIT] [+s SCHEDULER_OPTION] "
+ "[+S NO_SCHEDULERS:NO_SCHEDULERS_ONLINE] [+T LEVEL] [+V] [+v] "
+ "[+W<i|w>] [args ...]\n");
exit(1);
}
diff --git a/erts/etc/common/inet_gethost.c b/erts/etc/common/inet_gethost.c
index ff16ee02c4..d3ff4874ac 100644
--- a/erts/etc/common/inet_gethost.c
+++ b/erts/etc/common/inet_gethost.c
@@ -1,19 +1,19 @@
/*
* %CopyrightBegin%
- *
- * Copyright Ericsson AB 1998-2009. All Rights Reserved.
- *
+ *
+ * Copyright Ericsson AB 1998-2010. All Rights Reserved.
+ *
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
* compliance with the License. You should have received a copy of the
* Erlang Public License along with this software. If not, it can be
* retrieved online at http://www.erlang.org/.
- *
+ *
* Software distributed under the License is distributed on an "AS IS"
* basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
* the License for the specific language governing rights and limitations
* under the License.
- *
+ *
* %CopyrightEnd%
*/
/*
@@ -52,6 +52,8 @@
# include "config.h"
#endif
+#include "erl_printf.h"
+
#ifdef WIN32
#define WIN32_LEAN_AND_MEAN
@@ -2552,7 +2554,7 @@ static void debugf(char *format, ...)
sprintf(buff,"%s[%d] (DEBUG):",program_name,(int) getpid());
#endif
ptr = buff + strlen(buff);
- vsprintf(ptr,format,ap);
+ erts_vsnprintf(ptr,sizeof(buff)-strlen(buff)-2,format,ap);
strcat(ptr,"\r\n");
#ifdef WIN32
if (debug_console_allocated != INVALID_HANDLE_VALUE) {
@@ -2574,7 +2576,7 @@ static void warning(char *format, ...)
va_start(ap,format);
sprintf(buff,"%s[%d]: WARNING:",program_name, (int) getpid());
ptr = buff + strlen(buff);
- vsprintf(ptr,format,ap);
+ erts_vsnprintf(ptr,sizeof(buff)-strlen(buff)-2,format,ap);
strcat(ptr,"\r\n");
#ifdef WIN32
{
@@ -2596,7 +2598,7 @@ static void fatal(char *format, ...)
va_start(ap,format);
sprintf(buff,"%s[%d]: FATAL ERROR:",program_name, (int) getpid());
ptr = buff + strlen(buff);
- vsprintf(ptr,format,ap);
+ erts_vsnprintf(ptr,sizeof(buff)-strlen(buff)-2,format,ap);
strcat(ptr,"\r\n");
#ifdef WIN32
{
diff --git a/erts/etc/common/run_test.c b/erts/etc/common/run_test.c
new file mode 100644
index 0000000000..016d9c6afd
--- /dev/null
+++ b/erts/etc/common/run_test.c
@@ -0,0 +1,514 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2010. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+/*
+ * Purpose: Common Test front-end.
+ */
+#ifdef HAVE_CONFIG_H
+# include "config.h"
+#endif
+
+#include "sys.h"
+#ifdef __WIN32__
+#include <winbase.h>
+#endif
+
+#include <ctype.h>
+
+#define NO 0
+#define YES 1
+
+#define ASIZE(a) (sizeof(a)/sizeof(a[0]))
+
+static int debug = 0; /* Bit flags for debug printouts. */
+
+static char** eargv_base; /* Base of vector. */
+static char** eargv; /* First argument for erl. */
+
+static int eargc; /* Number of arguments in eargv. */
+
+#ifdef __WIN32__
+# define QUOTE(s) possibly_quote(s)
+# define IS_DIRSEP(c) ((c) == '/' || (c) == '\\')
+# define ERL_NAME "erl.exe"
+#else
+# define QUOTE(s) s
+# define IS_DIRSEP(c) ((c) == '/')
+# define ERL_NAME "erl"
+#endif
+
+#define UNSHIFT(s) eargc++, eargv--; eargv[0] = QUOTE(s)
+#define PUSH(s) eargv[eargc++] = QUOTE(s)
+#define PUSH2(s, t) PUSH(s); PUSH(t)
+#define PUSH3(s, t, u) PUSH2(s, t); PUSH(u)
+#define PUSH4(s, t, u, v) PUSH2(s, t); PUSH2(u, v)
+
+/*
+ * The possible modes to start Common Test
+ */
+
+#define NORMAL_MODE 0
+#define VTS_MODE 1
+#define CT_SHELL_MODE 2
+#define MASTER_MODE 3
+#define ERL_SHELL_MODE 4
+
+/*
+ * Distribution
+ */
+
+#define SHORT_NAME 0
+#define FULL_NAME 1
+
+/*
+ * Local functions.
+ */
+
+static void error(char* format, ...);
+static char* emalloc(size_t size);
+static char* strsave(char* string);
+static void push_words(char* src);
+static int run_erlang(char* name, char** argv);
+static char* get_default_emulator(char* progname);
+#ifdef __WIN32__
+static char* possibly_quote(char* arg);
+#endif
+
+/*
+ * Supply a strerror() function if libc doesn't.
+ */
+#ifndef HAVE_STRERROR
+
+extern int sys_nerr;
+
+#ifndef SYS_ERRLIST_DECLARED
+extern const char * const sys_errlist[];
+#endif /* !SYS_ERRLIST_DECLARED */
+
+char *strerror(int errnum)
+{
+ static char *emsg[1024];
+
+ if (errnum != 0) {
+ if (errnum > 0 && errnum < sys_nerr)
+ sprintf((char *) &emsg[0], "(%s)", sys_errlist[errnum]);
+ else
+ sprintf((char *) &emsg[0], "errnum = %d ", errnum);
+ }
+ else {
+ emsg[0] = '\0';
+ }
+ return (char *) &emsg[0];
+}
+#endif /* !HAVE_STRERROR */
+
+int
+main(int argc, char** argv)
+{
+ int eargv_size;
+ int eargc_base; /* How many arguments in the base of eargv. */
+ char* emulator;
+ char nodename[100];
+ char browser[100];
+ int ct_mode;
+ int dist_mode;
+ int cnt;
+ int erl_args;
+ char** argv0 = argv;
+
+ emulator = get_default_emulator(argv[0]);
+
+ /*
+ * Allocate the argv vector to be used for arguments to Erlang.
+ * Arrange for starting to pushing information in the middle of
+ * the array, to allow easy addition of commands in the beginning.
+ */
+
+ eargv_size = argc*4+100;
+ eargv_base = (char **) emalloc(eargv_size*sizeof(char*));
+ eargv = eargv_base;
+ eargc = 0;
+ push_words(emulator);
+ eargc_base = eargc;
+ eargv = eargv + eargv_size/2;
+ eargc = 0;
+
+ strcpy(nodename, "ct");
+ dist_mode = SHORT_NAME;
+ browser[0] = '\0';
+ ct_mode = NORMAL_MODE;
+ erl_args = argc;
+ cnt = 1;
+
+ /*
+ * Check various flags before building command line
+ */
+
+ while (cnt < argc) {
+ if (strcmp(argv[1], "-erl_args") == 0) {
+ erl_args = cnt;
+ }
+ else if (strcmp(argv[1], "-sname") == 0) {
+ strcpy(nodename, argv[2]);
+ cnt++, argv++;
+ }
+ else if (strcmp(argv[1], "-name") == 0) {
+ strcpy(nodename, argv[2]);
+ dist_mode = FULL_NAME;
+ cnt++, argv++;
+ }
+ else {
+ if (cnt < erl_args) {
+ if (strcmp(argv[1], "-vts") == 0) {
+ ct_mode = VTS_MODE;
+ }
+ else if (strcmp(argv[1], "-browser") == 0) {
+ strcpy(browser, argv[2]);
+ cnt++, argv++;
+ }
+ else if (strcmp(argv[1], "-shell") == 0) {
+ ct_mode = CT_SHELL_MODE;
+ }
+ else if (strcmp(argv[1], "-ctmaster") == 0) {
+ strcpy(nodename, "ct_master");
+ ct_mode = MASTER_MODE;
+ }
+ else if (strcmp(argv[1], "-ctname") == 0) {
+ strcpy(nodename, argv[2]);
+ ct_mode = ERL_SHELL_MODE;
+ cnt++, argv++;
+ }
+ }
+ }
+ cnt++, argv++;
+ }
+
+ argv = argv0;
+
+ /*
+ * Push initial arguments.
+ */
+
+ if (dist_mode == FULL_NAME) {
+ PUSH2("-name", nodename);
+ }
+ else {
+ PUSH2("-sname", nodename);
+ }
+
+ /*
+ * Push everything else
+ */
+
+ if (ct_mode == VTS_MODE) {
+ PUSH4("-s", "webtool", "script_start", "vts");
+ if (browser[0] != '\0') PUSH(browser);
+ PUSH3("-s", "ct_run", "script_start");
+ }
+ else if (ct_mode == CT_SHELL_MODE) {
+ PUSH3("-s", "ct_run", "script_start");
+ }
+ else if (ct_mode == NORMAL_MODE) {
+ PUSH3("-s", "ct_run", "script_start");
+ PUSH3("-s", "erlang", "halt");
+ }
+
+ cnt = 1;
+ while (cnt < argc) {
+ if (strcmp(argv[1], "-erl_args") == 0) {
+ PUSH("-ct_erl_args");
+ }
+ else if ((strcmp(argv[1], "-sname") == 0) || (strcmp(argv[1], "-name") == 0)) {
+ cnt++, argv++;
+ }
+ else if (cnt < erl_args) {
+ if (strcmp(argv[1], "-config") == 0)
+ PUSH("-ct_config");
+ else if (strcmp(argv[1], "-decrypt_key") == 0)
+ PUSH("-ct_decrypt_key");
+ else if (strcmp(argv[1], "-decrypt_file") == 0)
+ PUSH("-ct_decrypt_file");
+ else
+ PUSH(argv[1]);
+ }
+ else {
+ PUSH(argv[1]);
+ }
+ cnt++, argv++;
+ }
+
+ /*
+ * Move up the commands for invoking the emulator and adjust eargv
+ * accordingly.
+ */
+
+ while (--eargc_base >= 0) {
+ UNSHIFT(eargv_base[eargc_base]);
+ }
+
+ /*
+ * Invoke Erlang with the collected options.
+ */
+
+ PUSH(NULL);
+
+ return run_erlang(eargv[0], eargv);
+}
+
+static void
+push_words(char* src)
+{
+ char sbuf[1024];
+ char* dst;
+
+ dst = sbuf;
+ while ((*dst++ = *src++) != '\0') {
+ if (isspace((int)*src)) {
+ *dst = '\0';
+ PUSH(strsave(sbuf));
+ dst = sbuf;
+ do {
+ src++;
+ } while (isspace((int)*src));
+ }
+ }
+ if (sbuf[0])
+ PUSH(strsave(sbuf));
+}
+#ifdef __WIN32__
+char *make_commandline(char **argv)
+{
+ static char *buff = NULL;
+ static int siz = 0;
+ int num = 0;
+ char **arg, *p;
+
+ if (*argv == NULL) {
+ return "";
+ }
+ for (arg = argv; *arg != NULL; ++arg) {
+ num += strlen(*arg)+1;
+ }
+ if (!siz) {
+ siz = num;
+ buff = malloc(siz*sizeof(char));
+ } else if (siz < num) {
+ siz = num;
+ buff = realloc(buff,siz*sizeof(char));
+ }
+ p = buff;
+ for (arg = argv; *arg != NULL; ++arg) {
+ strcpy(p,*arg);
+ p+=strlen(*arg);
+ *p++=' ';
+ }
+ *(--p) = '\0';
+
+ if (debug) {
+ printf("Processed commandline:%s\n",buff);
+ }
+ return buff;
+}
+
+int my_spawnvp(char **argv)
+{
+ STARTUPINFO siStartInfo;
+ PROCESS_INFORMATION piProcInfo;
+ DWORD ec;
+
+ memset(&siStartInfo,0,sizeof(STARTUPINFO));
+ siStartInfo.cb = sizeof(STARTUPINFO);
+ siStartInfo.dwFlags = STARTF_USESTDHANDLES;
+ siStartInfo.hStdInput = GetStdHandle(STD_INPUT_HANDLE);
+ siStartInfo.hStdOutput = GetStdHandle(STD_OUTPUT_HANDLE);
+ siStartInfo.hStdError = GetStdHandle(STD_ERROR_HANDLE);
+ siStartInfo.wShowWindow = SW_HIDE;
+ siStartInfo.dwFlags |= STARTF_USESHOWWINDOW;
+
+
+ if (!CreateProcess(NULL,
+ make_commandline(argv),
+ NULL,
+ NULL,
+ TRUE,
+ 0,
+ NULL,
+ NULL,
+ &siStartInfo,
+ &piProcInfo)) {
+ return -1;
+ }
+ CloseHandle(piProcInfo.hThread);
+
+ WaitForSingleObject(piProcInfo.hProcess,INFINITE);
+ if (!GetExitCodeProcess(piProcInfo.hProcess,&ec)) {
+ return 0;
+ }
+ return (int) ec;
+}
+#endif /* __WIN32__ */
+
+
+static int
+run_erlang(char* progname, char** argv)
+{
+#ifdef __WIN32__
+ int status;
+#endif
+
+ if (debug) {
+ int i = 0;
+ while (argv[i] != NULL)
+ printf(" %s", argv[i++]);
+ printf("\n");
+ }
+
+#ifdef __WIN32__
+ /*
+ * Alas, we must wait here for the program to finish.
+ * Otherwise, the shell from which we were executed will think
+ * we are finished and print a prompt and read keyboard input.
+ */
+
+ status = my_spawnvp(argv)/*_spawnvp(_P_WAIT,progname,argv)*/;
+ if (status == -1) {
+ fprintf(stderr, "run_test: Error executing '%s': %d", progname,
+ GetLastError());
+ }
+ return status;
+#else
+ execvp(progname, argv);
+ error("Error %d executing \'%s\'.", errno, progname);
+ return 2;
+#endif
+}
+
+static void
+error(char* format, ...)
+{
+ char sbuf[1024];
+ va_list ap;
+
+ va_start(ap, format);
+ vsprintf(sbuf, format, ap);
+ va_end(ap);
+ fprintf(stderr, "run_test: %s\n", sbuf);
+ exit(1);
+}
+
+static char*
+emalloc(size_t size)
+{
+ char *p = malloc(size);
+ if (p == NULL)
+ error("Insufficient memory");
+ return p;
+}
+
+static char*
+strsave(char* string)
+{
+ char* p = emalloc(strlen(string)+1);
+ strcpy(p, string);
+ return p;
+}
+
+static char*
+get_default_emulator(char* progname)
+{
+ char sbuf[MAXPATHLEN];
+ char* s;
+
+ strcpy(sbuf, progname);
+ for (s = sbuf+strlen(sbuf); s >= sbuf; s--) {
+ if (IS_DIRSEP(*s)) {
+ strcpy(s+1, ERL_NAME);
+#ifdef __WIN32__
+ if (_access(sbuf, 0) != -1) {
+ return strsave(sbuf);
+ }
+#else
+ if (access(sbuf, 1) != -1) {
+ return strsave(sbuf);
+ }
+#endif
+ break;
+ }
+ }
+ return ERL_NAME;
+}
+
+#ifdef __WIN32__
+static char*
+possibly_quote(char* arg)
+{
+ int mustQuote = NO;
+ int n = 0;
+ char* s;
+ char* narg;
+
+ if (arg == NULL) {
+ return arg;
+ }
+
+ /*
+ * Scan the string to find out if it needs quoting and return
+ * the original argument if not.
+ */
+
+ for (s = arg; *s; s++, n++) {
+ switch(*s) {
+ case ' ':
+ mustQuote = YES;
+ continue;
+ case '"':
+ mustQuote = YES;
+ n++;
+ continue;
+ case '\\':
+ if(s[1] == '"')
+ n++;
+ continue;
+ default:
+ continue;
+ }
+ }
+ if (!mustQuote) {
+ return arg;
+ }
+
+ /*
+ * Insert the quotes and put a backslash in front of every quote
+ * inside the string.
+ */
+
+ s = narg = emalloc(n+2+1);
+ for (*s++ = '"'; *arg; arg++, s++) {
+ if (*arg == '"' || (*arg == '\\' && arg[1] == '"')) {
+ *s++ = '\\';
+ }
+ *s = *arg;
+ }
+ if (s[-1] == '\\') {
+ *s++ ='\\';
+ }
+ *s++ = '"';
+ *s = '\0';
+ return narg;
+}
+#endif /* __WIN32__ */
diff --git a/erts/etc/unix/Install.src b/erts/etc/unix/Install.src
index 83f9690782..7dead62ab0 100644
--- a/erts/etc/unix/Install.src
+++ b/erts/etc/unix/Install.src
@@ -89,6 +89,7 @@ cp -p $ERL_ROOT/erts-%I_VSN%/bin/erl .
cp -p $ERL_ROOT/erts-%I_VSN%/bin/erlc .
cp -p $ERL_ROOT/erts-%I_VSN%/bin/dialyzer .
cp -p $ERL_ROOT/erts-%I_VSN%/bin/typer .
+cp -p $ERL_ROOT/erts-%I_VSN%/bin/run_test .
cp -p $ERL_ROOT/erts-%I_VSN%/bin/escript .
#
diff --git a/erts/etc/unix/cerl.src b/erts/etc/unix/cerl.src
index f81ef6b0fe..9dab9fcfcc 100644
--- a/erts/etc/unix/cerl.src
+++ b/erts/etc/unix/cerl.src
@@ -1,20 +1,20 @@
#!/bin/sh
#
# %CopyrightBegin%
-#
-# Copyright Ericsson AB 2003-2009. All Rights Reserved.
-#
+#
+# Copyright Ericsson AB 2003-2010. All Rights Reserved.
+#
# The contents of this file are subject to the Erlang Public License,
# Version 1.1, (the "License"); you may not use this file except in
# compliance with the License. You should have received a copy of the
# Erlang Public License along with this software. If not, it can be
# retrieved online at http://www.erlang.org/.
-#
+#
# Software distributed under the License is distributed on an "AS IS"
# basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
# the License for the specific language governing rights and limitations
# under the License.
-#
+#
# %CopyrightEnd%
#
#
@@ -277,6 +277,16 @@ else
;;
esac
+ # Set annotation level for gdb in emacs 22 and higher.
+ emacs_major=`$EMACS --version | head -1 | sed 's,^[^0-9]*\([0-9]*\).*,\1,g'`
+ if [ '!' -z "$emacs_major" -a $emacs_major -gt 21 ]; then
+ # Hack - wait for etp-commands to be loaded and then set
+ # annotation level, could be done more beautifully than with sit-for...
+ gdbcmd="$gdbcmd \
+ (sit-for 1) \
+ (insert-string \"set annotate 3\") \
+ (comint-send-input)"
+ fi
gdbcmd="$gdbcmd $GDBBP \
(insert-string \"source $ROOTDIR/erts/etc/unix/etp-commands\") \
(comint-send-input)"
diff --git a/erts/etc/win32/Install.c b/erts/etc/win32/Install.c
index 4a559cd8a2..ca814e3f80 100644
--- a/erts/etc/win32/Install.c
+++ b/erts/etc/win32/Install.c
@@ -1,19 +1,19 @@
/*
* %CopyrightBegin%
- *
- * Copyright Ericsson AB 2003-2009. All Rights Reserved.
- *
+ *
+ * Copyright Ericsson AB 2003-2010. All Rights Reserved.
+ *
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
* compliance with the License. You should have received a copy of the
* Erlang Public License along with this software. If not, it can be
* retrieved online at http://www.erlang.org/.
- *
+ *
* Software distributed under the License is distributed on an "AS IS"
* basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
* the License for the specific language governing rights and limitations
* under the License.
- *
+ *
* %CopyrightEnd%
*/
/*
@@ -45,8 +45,8 @@ int main(int argc, char **argv)
InitSection *ini_section;
HANDLE module = GetModuleHandle(NULL);
char *binaries[] = { "erl.exe", "werl.exe", "erlc.exe",
- "dialyzer.exe", "typer.exe",
- "escript.exe", NULL };
+ "dialyzer.exe", "typer.exe",
+ "escript.exe", "run_test.exe", NULL };
char *scripts[] = { "start_clean.boot", "start_sasl.boot", NULL };
char fromname[MAX_PATH];
char toname[MAX_PATH];
diff --git a/erts/etc/win32/cygwin_tools/vc/ld.sh b/erts/etc/win32/cygwin_tools/vc/ld.sh
index ac39bf871c..b04935ed9b 100755
--- a/erts/etc/win32/cygwin_tools/vc/ld.sh
+++ b/erts/etc/win32/cygwin_tools/vc/ld.sh
@@ -3,7 +3,7 @@
#
# %CopyrightBegin%
#
-# Copyright Ericsson AB 2002-2009. All Rights Reserved.
+# Copyright Ericsson AB 2002-2010. All Rights Reserved.
#
# The contents of this file are subject to the Erlang Public License,
# Version 1.1, (the "License"); you may not use this file except in
@@ -167,6 +167,9 @@ eval link.exe "$CMD" >/tmp/link.exe.${p}.1 2>/tmp/link.exe.${p}.2
RES=$?
CMANIFEST=`cygpath $MANIFEST`
if [ "$RES" = "0" -a -f "$CMANIFEST" ]; then
+ # Add stuff to manifest to turn off "virtualization"
+ sed -i "s/<\/assembly>/ <ms_asmv2:trustInfo xmlns:ms_asmv2=\"urn:schemas-microsoft-com:asm.v2\">\n <ms_asmv2:security>\n <ms_asmv2:requestedPrivileges>\n <ms_asmv2:requestedExecutionLevel level=\"AsInvoker\" uiAccess=\"false\"\/>\n <\/ms_asmv2:requestedPrivileges>\n <\/ms_asmv2:security>\n <\/ms_asmv2:trustInfo>\n<\/assembly>/" $CMANIFEST
+
eval mt.exe -nologo -manifest "$MANIFEST" -outputresource:"$OUTPUTRES" >>/tmp/link.exe.${p}.1 2>>/tmp/link.exe.${p}.2
RES=$?
if [ "$RES" != "0" ]; then
diff --git a/erts/etc/win32/cygwin_tools/vc/rc.sh b/erts/etc/win32/cygwin_tools/vc/rc.sh
index 6a6921c49e..054c672e64 100755
--- a/erts/etc/win32/cygwin_tools/vc/rc.sh
+++ b/erts/etc/win32/cygwin_tools/vc/rc.sh
@@ -2,20 +2,20 @@
# set -x
#
# %CopyrightBegin%
-#
-# Copyright Ericsson AB 2002-2009. All Rights Reserved.
-#
+#
+# Copyright Ericsson AB 2002-2010. All Rights Reserved.
+#
# The contents of this file are subject to the Erlang Public License,
# Version 1.1, (the "License"); you may not use this file except in
# compliance with the License. You should have received a copy of the
# Erlang Public License along with this software. If not, it can be
# retrieved online at http://www.erlang.org/.
-#
+#
# Software distributed under the License is distributed on an "AS IS"
# basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
# the License for the specific language governing rights and limitations
# under the License.
-#
+#
# %CopyrightEnd%
#
# Save the command line for debug outputs
@@ -80,7 +80,7 @@ if [ "X$RC_SH_DEBUG_LOG" != "X" ]; then
fi
eval $RCC "$CMD" >/tmp/rc.exe.${p}.1 2>/tmp/rc.exe.${p}.2
RES=$?
-tail +2 /tmp/rc.exe.${p}.2 >&2
+tail -n +2 /tmp/rc.exe.${p}.2 >&2
cat /tmp/rc.exe.${p}.1
rm -f /tmp/rc.exe.${p}.2 /tmp/rc.exe.${p}.1
exit $RES
diff --git a/erts/include/erl_int_sizes_config.h.in b/erts/include/erl_int_sizes_config.h.in
index ef49995732..4b8a8e1b98 100644
--- a/erts/include/erl_int_sizes_config.h.in
+++ b/erts/include/erl_int_sizes_config.h.in
@@ -1,19 +1,19 @@
/*
* %CopyrightBegin%
- *
- * Copyright Ericsson AB 2004-2009. All Rights Reserved.
- *
+ *
+ * Copyright Ericsson AB 2004-2010. All Rights Reserved.
+ *
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
* compliance with the License. You should have received a copy of the
* Erlang Public License along with this software. If not, it can be
* retrieved online at http://www.erlang.org/.
- *
+ *
* Software distributed under the License is distributed on an "AS IS"
* basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
* the License for the specific language governing rights and limitations
* under the License.
- *
+ *
* %CopyrightEnd%
*/
@@ -31,3 +31,6 @@
/* The number of bytes in a long long. */
#undef SIZEOF_LONG_LONG
+
+/* Define if building a halfword-heap 64bit emulator (needed for NIF's) */
+#undef HALFWORD_HEAP_EMULATOR
diff --git a/erts/include/internal/erl_misc_utils.h b/erts/include/internal/erl_misc_utils.h
index 82e9ba3798..507e1726f4 100644
--- a/erts/include/internal/erl_misc_utils.h
+++ b/erts/include/internal/erl_misc_utils.h
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2006-2009. All Rights Reserved.
+ * Copyright Ericsson AB 2006-2010. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
@@ -34,7 +34,7 @@ typedef struct {
erts_cpu_info_t *erts_cpu_info_create(void);
void erts_cpu_info_destroy(erts_cpu_info_t *cpuinfo);
-void erts_cpu_info_update(erts_cpu_info_t *cpuinfo);
+int erts_cpu_info_update(erts_cpu_info_t *cpuinfo);
int erts_get_cpu_configured(erts_cpu_info_t *cpuinfo);
int erts_get_cpu_online(erts_cpu_info_t *cpuinfo);
int erts_get_cpu_available(erts_cpu_info_t *cpuinfo);
@@ -50,4 +50,9 @@ int erts_unbind_from_cpu_str(char *str);
int erts_milli_sleep(long);
+#ifdef __WIN32__
+int erts_map_win_error_to_errno(DWORD win_error);
+int erts_get_last_win_errno(void);
+#endif
+
#endif /* #ifndef ERL_MISC_UTILS_H_ */
diff --git a/erts/include/internal/ethr_internal.h b/erts/include/internal/ethr_internal.h
new file mode 100644
index 0000000000..e9c3daf783
--- /dev/null
+++ b/erts/include/internal/ethr_internal.h
@@ -0,0 +1,67 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2010. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+/*
+ * Description: Internal ethread exports
+ * Author: Rickard Green
+ */
+
+#ifndef ETHR_INTERNAL_H__
+#define ETHR_INTERNAL_H__
+
+#include "erl_misc_utils.h"
+
+extern ethr_memory_allocators ethr_mem__;
+extern erts_cpu_info_t *ethr_cpu_info__;
+extern size_t ethr_pagesize__;
+extern size_t ethr_min_stack_size__; /* kilo words */
+extern size_t ethr_max_stack_size__; /* kilo words */
+extern int ethr_not_completely_inited__;
+extern int ethr_not_inited__;
+
+extern void *(*ethr_thr_prepare_func__)(void);
+extern void (*ethr_thr_parent_func__)(void *);
+extern void (*ethr_thr_child_func__)(void *);
+
+#define ETHR_PAGE_ALIGN(SZ) \
+ (((((size_t) (SZ)) - 1)/ethr_pagesize__ + 1)*ethr_pagesize__)
+#define ETHR_B2KW(B) ((((size_t) (B)) - 1)/(sizeof(void *)*1024) + 1)
+#define ETHR_KW2B(KW) (((size_t) (KW))*sizeof(void *)*1024)
+
+#undef ETHR_STACK_GUARD_SIZE
+#ifdef ETHR_HAVE_PTHREAD_ATTR_SETGUARDSIZE
+# define ETHR_STACK_GUARD_SIZE (ethr_pagesize__)
+#endif
+
+/* implemented in lib_src/<thr-lib>/ethread.c */
+int ethr_set_tse__(ethr_ts_event *tsep);
+ethr_ts_event *ethr_get_tse__(void);
+ETHR_PROTO_NORETURN__ ethr_abort__(void);
+#ifdef ETHR_WIN32_THREADS
+int ethr_win_get_errno__(void);
+#endif
+
+/* implemented in lib_src/common/ethread_aux.c */
+int ethr_init_common__(ethr_init_data *id);
+int ethr_late_init_common__(ethr_late_init_data *lid);
+void ethr_run_exit_handlers__(void);
+void ethr_ts_event_destructor__(void *vtsep);
+
+
+#endif
diff --git a/erts/include/internal/ethr_mutex.h b/erts/include/internal/ethr_mutex.h
new file mode 100644
index 0000000000..4ce3e75c78
--- /dev/null
+++ b/erts/include/internal/ethr_mutex.h
@@ -0,0 +1,510 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2010. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+/*
+ * Description: Mutex, rwmutex and condition variable implementation
+ * Author: Rickard Green
+ */
+
+#ifndef ETHR_MUTEX_H__
+#define ETHR_MUTEX_H__
+
+#define ETHR_RWMUTEX_INITIALIZED 0x99999999
+#define ETHR_MUTEX_INITIALIZED 0x77777777
+#define ETHR_COND_INITIALIZED 0x55555555
+
+#if 0
+# define ETHR_MTX_HARD_DEBUG
+#endif
+
+#ifdef ETHR_MTX_HARD_DEBUG
+# ifdef __GNUC__
+# warning ETHR_MTX_HARD_DEBUG
+# endif
+/*# define ETHR_MTX_HARD_DEBUG_LFS*/
+/*# define ETHR_MTX_HARD_DEBUG_FENCE*/
+/*# define ETHR_MTX_HARD_DEBUG_Q*/
+# define ETHR_MTX_HARD_DEBUG_WSQ
+
+# if !defined(ETHR_MTX_HARD_DEBUG_WSQ) && defined(ETHR_MTX_HARD_DEBUG_Q)
+# define ETHR_MTX_HARD_DEBUG_WSQ
+# endif
+#endif
+
+#if defined(ETHR_USE_OWN_RWMTX_IMPL__) || defined(ETHR_USE_OWN_MTX_IMPL__)
+
+#if 0
+# define ETHR_MTX_Q_LOCK_SPINLOCK__
+# define ETHR_MTX_QLOCK_TYPE__ ethr_spinlock_t
+#elif defined(ETHR_PTHREADS)
+# define ETHR_MTX_Q_LOCK_PTHREAD_MUTEX__
+# define ETHR_MTX_QLOCK_TYPE__ pthread_mutex_t
+#elif defined(ETHR_WIN32_THREADS)
+# define ETHR_MTX_Q_LOCK_CRITICAL_SECTION__
+# define ETHR_MTX_QLOCK_TYPE__ CRITICAL_SECTION
+#else
+# error Need a qlock implementation
+#endif
+
+#define ETHR_RWMTX_W_FLG__ (((long) 1) << 31)
+#define ETHR_RWMTX_W_WAIT_FLG__ (((long) 1) << 30)
+#define ETHR_RWMTX_R_WAIT_FLG__ (((long) 1) << 29)
+
+/* frequent read kind */
+#define ETHR_RWMTX_R_FLG__ (((long) 1) << 28)
+#define ETHR_RWMTX_R_PEND_UNLCK_MASK__ (ETHR_RWMTX_R_FLG__ - 1)
+#define ETHR_RWMTX_R_MASK__ (ETHR_RWMTX_R_WAIT_FLG__ - 1)
+
+/* normal kind */
+#define ETHR_RWMTX_RS_MASK__ (ETHR_RWMTX_R_WAIT_FLG__ - 1)
+
+#define ETHR_RWMTX_WAIT_FLGS__ \
+ (ETHR_RWMTX_W_WAIT_FLG__|ETHR_RWMTX_R_WAIT_FLG__)
+
+#define ETHR_CND_WAIT_FLG__ ETHR_RWMTX_R_WAIT_FLG__
+
+struct ethr_mutex_base_ {
+#ifdef ETHR_MTX_HARD_DEBUG_FENCE
+ long pre_fence;
+#endif
+ ethr_atomic_t flgs;
+ ETHR_MTX_QLOCK_TYPE__ qlck;
+ ethr_ts_event *q;
+ short aux_scnt;
+ short main_scnt;
+#ifdef ETHR_MTX_HARD_DEBUG_WSQ
+ int ws;
+#endif
+#ifdef ETHR_MTX_HARD_DEBUG_LFS
+ ethr_atomic_t hdbg_lfs;
+#endif
+};
+
+#endif
+
+typedef struct {
+ int main_spincount;
+ int aux_spincount;
+} ethr_mutex_opt;
+
+typedef struct {
+ int main_spincount;
+ int aux_spincount;
+} ethr_cond_opt;
+
+#ifdef ETHR_USE_OWN_MTX_IMPL__
+
+typedef struct ethr_mutex_ ethr_mutex;
+struct ethr_mutex_ {
+ struct ethr_mutex_base_ mtxb;
+#ifdef ETHR_MTX_HARD_DEBUG_FENCE
+ long post_fence;
+#endif
+#if ETHR_XCHK
+ int initialized;
+#endif
+};
+
+typedef struct ethr_cond_ ethr_cond;
+struct ethr_cond_ {
+#ifdef ETHR_MTX_HARD_DEBUG_FENCE
+ struct {
+ long pre_fence;
+ } mtxb; /* mtxb allows us to use same macro as for mutex and rwmutex... */
+#endif
+ ETHR_MTX_QLOCK_TYPE__ qlck;
+ ethr_ts_event *q;
+ short aux_scnt;
+ short main_scnt;
+#ifdef ETHR_MTX_HARD_DEBUG_FENCE
+ long post_fence;
+#endif
+#if ETHR_XCHK
+ int initialized;
+#endif
+};
+
+#else /* pthread */
+
+typedef struct ethr_mutex_ ethr_mutex;
+struct ethr_mutex_ {
+ pthread_mutex_t pt_mtx;
+#if ETHR_XCHK
+ int initialized;
+#endif
+};
+
+typedef struct ethr_cond_ ethr_cond;
+struct ethr_cond_ {
+ pthread_cond_t pt_cnd;
+#if ETHR_XCHK
+ int initialized;
+#endif
+};
+
+#endif /* pthread */
+
+int ethr_mutex_init_opt(ethr_mutex *, ethr_mutex_opt *);
+int ethr_mutex_init(ethr_mutex *);
+int ethr_mutex_destroy(ethr_mutex *);
+#if !defined(ETHR_TRY_INLINE_FUNCS) || defined(ETHR_MUTEX_IMPL__)
+int ethr_mutex_trylock(ethr_mutex *);
+void ethr_mutex_lock(ethr_mutex *);
+void ethr_mutex_unlock(ethr_mutex *);
+#endif
+int ethr_cond_init_opt(ethr_cond *, ethr_cond_opt *);
+int ethr_cond_init(ethr_cond *);
+int ethr_cond_destroy(ethr_cond *);
+void ethr_cond_signal(ethr_cond *);
+void ethr_cond_broadcast(ethr_cond *);
+int ethr_cond_wait(ethr_cond *, ethr_mutex *);
+
+typedef enum {
+ ETHR_RWMUTEX_TYPE_NORMAL,
+ ETHR_RWMUTEX_TYPE_FREQUENT_READ,
+ ETHR_RWMUTEX_TYPE_EXTREMELY_FREQUENT_READ
+} ethr_rwmutex_type;
+
+typedef enum {
+ ETHR_RWMUTEX_LONG_LIVED,
+ ETHR_RWMUTEX_SHORT_LIVED,
+ ETHR_RWMUTEX_UNKNOWN_LIVED
+} ethr_rwmutex_lived;
+
+typedef struct {
+ ethr_rwmutex_type type;
+ ethr_rwmutex_lived lived;
+ int main_spincount;
+ int aux_spincount;
+} ethr_rwmutex_opt;
+
+#define ETHR_RWMUTEX_OPT_DEFAULT_INITER \
+ {ETHR_RWMUTEX_TYPE_NORMAL, ETHR_RWMUTEX_UNKNOWN_LIVED, -1, -1}
+
+#ifdef ETHR_USE_OWN_RWMTX_IMPL__
+
+typedef union {
+ struct {
+ ethr_atomic_t readers;
+ int waiting_readers;
+ int byte_offset;
+ ethr_rwmutex_lived lived;
+ } data;
+ char align__[ETHR_CACHE_LINE_SIZE];
+} ethr_rwmtx_readers_array__;
+
+typedef struct ethr_rwmutex_ ethr_rwmutex;
+struct ethr_rwmutex_ {
+ struct ethr_mutex_base_ mtxb;
+ ethr_rwmutex_type type;
+ ethr_ts_event *rq_end;
+ union {
+ ethr_rwmtx_readers_array__ *ra;
+ int rs;
+ } tdata;
+#ifdef ETHR_MTX_HARD_DEBUG_FENCE
+ long post_fence;
+#endif
+#if ETHR_XCHK
+ int initialized;
+#endif
+};
+
+#else /* pthread_rwlock */
+
+typedef struct ethr_rwmutex_ ethr_rwmutex;
+struct ethr_rwmutex_ {
+ pthread_rwlock_t pt_rwlock;
+#if ETHR_XCHK
+ int initialized;
+#endif
+};
+
+#endif /* pthread_rwlock */
+
+int ethr_rwmutex_set_reader_group(int);
+int ethr_rwmutex_init_opt(ethr_rwmutex *, ethr_rwmutex_opt *);
+int ethr_rwmutex_init(ethr_rwmutex *);
+int ethr_rwmutex_destroy(ethr_rwmutex *);
+#if defined(ETHR_USE_OWN_RWMTX_IMPL__) \
+ || !defined(ETHR_TRY_INLINE_FUNCS) \
+ || defined(ETHR_MUTEX_IMPL__)
+int ethr_rwmutex_tryrlock(ethr_rwmutex *);
+void ethr_rwmutex_rlock(ethr_rwmutex *);
+void ethr_rwmutex_runlock(ethr_rwmutex *);
+int ethr_rwmutex_tryrwlock(ethr_rwmutex *);
+void ethr_rwmutex_rwlock(ethr_rwmutex *);
+void ethr_rwmutex_rwunlock(ethr_rwmutex *);
+#endif
+
+#ifdef ETHR_MTX_HARD_DEBUG
+#define ETHR_MTX_HARD_ASSERT(A) \
+ ((void) ((A) ? 1 : ethr_assert_failed(__FILE__, __LINE__, #A)))
+#else
+#define ETHR_MTX_HARD_ASSERT(A) ((void) 1)
+#endif
+
+#ifdef ETHR_MTX_HARD_DEBUG_LFS
+# define ETHR_MTX_HARD_DEBUG_LFS_INIT(MTXB) \
+do { \
+ ethr_atomic_init(&(MTXB)->hdbg_lfs, 0); \
+} while (0)
+# define ETHR_MTX_HARD_DEBUG_LFS_RLOCK(MTXB) \
+do { \
+ long val__; \
+ ETHR_COMPILER_BARRIER; \
+ val__ = ethr_atomic_inc_read(&(MTXB)->hdbg_lfs); \
+ ETHR_MTX_HARD_ASSERT(val__ > 0); \
+} while (0)
+# define ETHR_MTX_HARD_DEBUG_LFS_TRYRLOCK(MTXB, RES) \
+do { \
+ ETHR_COMPILER_BARRIER; \
+ if ((RES) == 0) \
+ ETHR_MTX_HARD_DEBUG_LFS_RLOCK((MTXB)); \
+ else \
+ ETHR_MTX_HARD_ASSERT((RES) == EBUSY); \
+} while (0)
+# define ETHR_MTX_HARD_DEBUG_LFS_RUNLOCK(MTXB) \
+do { \
+ long val__ = ethr_atomic_dec_read(&(MTXB)->hdbg_lfs); \
+ ETHR_MTX_HARD_ASSERT(val__ >= 0); \
+ ETHR_COMPILER_BARRIER; \
+} while (0)
+# define ETHR_MTX_HARD_DEBUG_LFS_RWLOCK(MTXB) \
+do { \
+ long val__; \
+ ETHR_COMPILER_BARRIER; \
+ val__ = ethr_atomic_dec_read(&(MTXB)->hdbg_lfs); \
+ ETHR_MTX_HARD_ASSERT(val__ == -1); \
+} while (0)
+# define ETHR_MTX_HARD_DEBUG_LFS_TRYRWLOCK(MTXB, RES) \
+do { \
+ ETHR_COMPILER_BARRIER; \
+ if ((RES) == 0) \
+ ETHR_MTX_HARD_DEBUG_LFS_RWLOCK((MTXB)); \
+ else \
+ ETHR_MTX_HARD_ASSERT((RES) == EBUSY); \
+} while (0)
+# define ETHR_MTX_HARD_DEBUG_LFS_RWUNLOCK(MTXB) \
+do { \
+ long val__ = ethr_atomic_inctest(&(MTXB)->hdbg_lfs); \
+ ETHR_MTX_HARD_ASSERT(val__ == 0); \
+ ETHR_COMPILER_BARRIER; \
+} while (0)
+#else
+# define ETHR_MTX_HARD_DEBUG_LFS_INIT(MTXB)
+# define ETHR_MTX_HARD_DEBUG_LFS_RLOCK(MTXB)
+# define ETHR_MTX_HARD_DEBUG_LFS_TRYRLOCK(MTXB, RES)
+# define ETHR_MTX_HARD_DEBUG_LFS_RUNLOCK(MTXB)
+# define ETHR_MTX_HARD_DEBUG_LFS_RWLOCK(MTXB)
+# define ETHR_MTX_HARD_DEBUG_LFS_TRYRWLOCK(MTXB, RES)
+# define ETHR_MTX_HARD_DEBUG_LFS_RWUNLOCK(MTXB)
+#endif
+
+#ifdef ETHR_MTX_HARD_DEBUG_FENCE
+
+#if ETHR_SIZEOF_PTR == 8
+# define ETHR_MTX_HARD_DEBUG_PRE_FENCE 0xdeadbeefdeadbeefL
+# define ETHR_MTX_HARD_DEBUG_POST_FENCE 0xdeaddeaddeaddeadL
+#else
+# define ETHR_MTX_HARD_DEBUG_PRE_FENCE 0xdeaddeadL
+# define ETHR_MTX_HARD_DEBUG_POST_FENCE 0xdeaddeadL
+#endif
+
+#define ETHR_MTX_HARD_DEBUG_FENCE_CHK(X) \
+do { \
+ ETHR_COMPILER_BARRIER; \
+ ETHR_MTX_HARD_ASSERT((X)->mtxb.pre_fence == ETHR_MTX_HARD_DEBUG_PRE_FENCE);\
+ ETHR_MTX_HARD_ASSERT((X)->post_fence == ETHR_MTX_HARD_DEBUG_POST_FENCE); \
+ ETHR_COMPILER_BARRIER; \
+} while (0)
+#define ETHR_MTX_HARD_DEBUG_FENCE_INIT(X) \
+do { \
+ (X)->mtxb.pre_fence = ETHR_MTX_HARD_DEBUG_PRE_FENCE; \
+ (X)->post_fence = ETHR_MTX_HARD_DEBUG_POST_FENCE; \
+} while (0)
+#else
+#define ETHR_MTX_HARD_DEBUG_FENCE_CHK(X)
+#define ETHR_MTX_HARD_DEBUG_FENCE_INIT(X)
+#endif
+
+#ifdef ETHR_USE_OWN_MTX_IMPL__
+
+#define ETHR_MTX_DEFAULT_MAIN_SPINCOUNT 1000
+#define ETHR_MTX_DEFAULT_AUX_SPINCOUNT 50
+
+#define ETHR_CND_DEFAULT_MAIN_SPINCOUNT 0
+#define ETHR_CND_DEFAULT_AUX_SPINCOUNT 0
+
+#if defined(ETHR_TRY_INLINE_FUNCS) || defined(ETHR_MUTEX_IMPL__)
+
+void ethr_mutex_lock_wait__(ethr_mutex *, long);
+void ethr_mutex_unlock_wake__(ethr_mutex *, long);
+
+static ETHR_INLINE int
+ETHR_INLINE_FUNC_NAME_(ethr_mutex_trylock)(ethr_mutex *mtx)
+{
+ long act;
+ int res;
+ ETHR_MTX_HARD_DEBUG_FENCE_CHK(mtx);
+
+ act = ethr_atomic_cmpxchg_acqb(&mtx->mtxb.flgs, ETHR_RWMTX_W_FLG__, 0);
+ res = (act == 0) ? 0 : EBUSY;
+
+ ETHR_MTX_HARD_DEBUG_LFS_TRYRWLOCK(&mtx->mtxb, res);
+ ETHR_MTX_HARD_DEBUG_FENCE_CHK(mtx);
+
+ ETHR_COMPILER_BARRIER;
+ return res;
+}
+
+static ETHR_INLINE void
+ETHR_INLINE_FUNC_NAME_(ethr_mutex_lock)(ethr_mutex *mtx)
+{
+ long act;
+ ETHR_MTX_HARD_DEBUG_FENCE_CHK(mtx);
+
+ act = ethr_atomic_cmpxchg_acqb(&mtx->mtxb.flgs, ETHR_RWMTX_W_FLG__, 0);
+ if (act != 0)
+ ethr_mutex_lock_wait__(mtx, act);
+
+ ETHR_MTX_HARD_DEBUG_LFS_RWLOCK(&mtx->mtxb);
+ ETHR_MTX_HARD_DEBUG_FENCE_CHK(mtx);
+
+ ETHR_COMPILER_BARRIER;
+}
+
+static ETHR_INLINE void
+ETHR_INLINE_FUNC_NAME_(ethr_mutex_unlock)(ethr_mutex *mtx)
+{
+ long act;
+ ETHR_COMPILER_BARRIER;
+ ETHR_MTX_HARD_DEBUG_FENCE_CHK(mtx);
+ ETHR_MTX_HARD_DEBUG_LFS_RWUNLOCK(&mtx->mtxb);
+
+ act = ethr_atomic_cmpxchg_relb(&mtx->mtxb.flgs, 0, ETHR_RWMTX_W_FLG__);
+ if (act != ETHR_RWMTX_W_FLG__)
+ ethr_mutex_unlock_wake__(mtx, act);
+
+ ETHR_MTX_HARD_DEBUG_FENCE_CHK(mtx);
+}
+
+#endif /* ETHR_TRY_INLINE_FUNCS */
+
+#else /* pthread_mutex */
+
+#if defined(ETHR_TRY_INLINE_FUNCS) || defined(ETHR_MUTEX_IMPL__)
+
+static ETHR_INLINE int
+ETHR_INLINE_FUNC_NAME_(ethr_mutex_trylock)(ethr_mutex *mtx)
+{
+ int res;
+ res = pthread_mutex_trylock(&mtx->pt_mtx);
+ if (res != 0 && res != EBUSY)
+ ETHR_FATAL_ERROR__(res);
+ return res;
+}
+
+static ETHR_INLINE void
+ETHR_INLINE_FUNC_NAME_(ethr_mutex_lock)(ethr_mutex *mtx)
+{
+ int res = pthread_mutex_lock(&mtx->pt_mtx);
+ if (res != 0)
+ ETHR_FATAL_ERROR__(res);
+}
+
+static ETHR_INLINE void
+ETHR_INLINE_FUNC_NAME_(ethr_mutex_unlock)(ethr_mutex *mtx)
+{
+ int res = pthread_mutex_unlock(&mtx->pt_mtx);
+ if (res != 0)
+ ETHR_FATAL_ERROR__(res);
+}
+
+#endif /* ETHR_TRY_INLINE_FUNCS */
+
+#endif /* pthread_mutex */
+
+#ifdef ETHR_USE_OWN_RWMTX_IMPL__
+
+#define ETHR_RWMTX_DEFAULT_MAIN_SPINCOUNT 1000
+#define ETHR_RWMTX_DEFAULT_AUX_SPINCOUNT 50
+
+#else /* pthread_rwlock */
+
+#if defined(ETHR_TRY_INLINE_FUNCS) || defined(ETHR_MUTEX_IMPL__)
+
+static ETHR_INLINE int
+ETHR_INLINE_FUNC_NAME_(ethr_rwmutex_tryrlock)(ethr_rwmutex *rwmtx)
+{
+ int res = pthread_rwlock_tryrdlock(&rwmtx->pt_rwlock);
+ if (res != 0 && res != EBUSY)
+ ETHR_FATAL_ERROR__(res);
+ return res;
+}
+
+static ETHR_INLINE void
+ETHR_INLINE_FUNC_NAME_(ethr_rwmutex_rlock)(ethr_rwmutex *rwmtx)
+{
+ int res = pthread_rwlock_rdlock(&rwmtx->pt_rwlock);
+ if (res != 0)
+ ETHR_FATAL_ERROR__(res);
+}
+
+static ETHR_INLINE void
+ETHR_INLINE_FUNC_NAME_(ethr_rwmutex_runlock)(ethr_rwmutex *rwmtx)
+{
+ int res = pthread_rwlock_unlock(&rwmtx->pt_rwlock);
+ if (res != 0)
+ ETHR_FATAL_ERROR__(res);
+}
+
+static ETHR_INLINE int
+ETHR_INLINE_FUNC_NAME_(ethr_rwmutex_tryrwlock)(ethr_rwmutex *rwmtx)
+{
+ int res = pthread_rwlock_trywrlock(&rwmtx->pt_rwlock);
+ if (res != 0 && res != EBUSY)
+ ETHR_FATAL_ERROR__(res);
+ return res;
+}
+
+static ETHR_INLINE void
+ETHR_INLINE_FUNC_NAME_(ethr_rwmutex_rwlock)(ethr_rwmutex *rwmtx)
+{
+ int res = pthread_rwlock_wrlock(&rwmtx->pt_rwlock);
+ if (res != 0)
+ ETHR_FATAL_ERROR__(res);
+}
+
+static ETHR_INLINE void
+ETHR_INLINE_FUNC_NAME_(ethr_rwmutex_rwunlock)(ethr_rwmutex *rwmtx)
+{
+ int res = pthread_rwlock_unlock(&rwmtx->pt_rwlock);
+ if (res != 0)
+ ETHR_FATAL_ERROR__(res);
+}
+
+#endif /* ETHR_TRY_INLINE_FUNCS */
+
+#endif /* pthread_rwlock */
+
+int ethr_mutex_lib_init(int);
+int ethr_mutex_lib_late_init(int, int);
+
+#endif /* #ifndef ETHR_MUTEX_H__ */
diff --git a/erts/include/internal/ethr_optimized_fallbacks.h b/erts/include/internal/ethr_optimized_fallbacks.h
new file mode 100644
index 0000000000..2f9f987d0b
--- /dev/null
+++ b/erts/include/internal/ethr_optimized_fallbacks.h
@@ -0,0 +1,185 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2010. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+/*
+ * Description: "Optimized" fallbacks used when native ops are missing
+ * Author: Rickard Green
+ */
+
+#ifndef ETHR_OPTIMIZED_FALLBACKS_H__
+#define ETHR_OPTIMIZED_FALLBACKS_H__
+
+#ifdef ETHR_HAVE_NATIVE_ATOMICS
+#define ETHR_HAVE_OPTIMIZED_ATOMIC_OPS 1
+#endif
+
+#ifdef ETHR_HAVE_NATIVE_SPINLOCKS
+#define ETHR_HAVE_OPTIMIZED_SPINLOCKS 1
+#elif defined(ETHR_HAVE_PTHREAD_SPIN_LOCK)
+/* --- Optimized spinlocks using pthread spinlocks -------------------------- */
+#define ETHR_HAVE_OPTIMIZED_SPINLOCKS 1
+
+typedef pthread_spinlock_t ethr_opt_spinlock_t;
+
+#if defined(ETHR_TRY_INLINE_FUNCS) || defined(ETHR_AUX_IMPL__)
+
+static ETHR_INLINE int
+ethr_opt_spinlock_init(ethr_opt_spinlock_t *lock)
+{
+ return pthread_spin_init((pthread_spinlock_t *) lock, 0);
+}
+
+static ETHR_INLINE int
+ethr_opt_spinlock_destroy(ethr_opt_spinlock_t *lock)
+{
+ return pthread_spin_destroy((pthread_spinlock_t *) lock);
+}
+
+
+static ETHR_INLINE int
+ethr_opt_spin_unlock(ethr_opt_spinlock_t *lock)
+{
+ return pthread_spin_unlock((pthread_spinlock_t *) lock);
+}
+
+static ETHR_INLINE int
+ethr_opt_spin_lock(ethr_opt_spinlock_t *lock)
+{
+ return pthread_spin_lock((pthread_spinlock_t *) lock);
+}
+
+#endif
+
+#elif defined(ETHR_HAVE_NATIVE_ATOMICS)
+/* --- Native spinlocks using native atomics -------------------------------- */
+#define ETHR_HAVE_NATIVE_SPINLOCKS 1
+#define ETHR_HAVE_OPTIMIZED_SPINLOCKS 1
+
+typedef ethr_native_atomic_t ethr_native_spinlock_t;
+
+#if defined(ETHR_TRY_INLINE_FUNCS) || defined(ETHR_AUX_IMPL__)
+
+static ETHR_INLINE void
+ethr_native_spinlock_init(ethr_native_spinlock_t *lock)
+{
+ ethr_native_atomic_init((ethr_native_atomic_t *) lock, 0);
+}
+
+static ETHR_INLINE void
+ethr_native_spin_unlock(ethr_native_spinlock_t *lock)
+{
+ ETHR_COMPILER_BARRIER;
+ ETHR_ASSERT(ethr_native_atomic_read((ethr_native_atomic_t *) lock) == 1);
+ ethr_native_atomic_set_relb((ethr_native_atomic_t *) lock, 0);
+}
+
+static ETHR_INLINE void
+ethr_native_spin_lock(ethr_native_spinlock_t *lock)
+{
+ while (ethr_native_atomic_cmpxchg_acqb((ethr_native_atomic_t *) lock,
+ (long) 1, (long) 0) != 0) {
+ ETHR_SPIN_BODY;
+ }
+ ETHR_COMPILER_BARRIER;
+}
+
+#endif
+
+#endif
+
+
+#ifdef ETHR_HAVE_NATIVE_RWSPINLOCKS
+#define ETHR_HAVE_OPTIMIZED_RWSPINLOCKS 1
+#elif defined(ETHR_HAVE_NATIVE_ATOMICS)
+/* --- Native rwspinlocks using native atomics ------------------------------ */
+#define ETHR_HAVE_NATIVE_RWSPINLOCKS 1
+#define ETHR_HAVE_OPTIMIZED_RWSPINLOCKS 1
+
+typedef ethr_native_atomic_t ethr_native_rwlock_t;
+
+#if defined(ETHR_TRY_INLINE_FUNCS) || defined(ETHR_AUX_IMPL__)
+
+#define ETHR_WLOCK_FLAG__ (((long) 1) << 30)
+
+static ETHR_INLINE void
+ethr_native_rwlock_init(ethr_native_rwlock_t *lock)
+{
+ ethr_native_atomic_init((ethr_native_atomic_t *) lock, 0);
+}
+
+static ETHR_INLINE void
+ethr_native_read_unlock(ethr_native_rwlock_t *lock)
+{
+ ETHR_COMPILER_BARRIER;
+#ifdef DEBUG
+ ETHR_ASSERT(ethr_native_atomic_read((ethr_native_atomic_t *) lock) >= 0);
+#endif
+ ethr_native_atomic_dec_relb((ethr_native_atomic_t *) lock);
+}
+
+static ETHR_INLINE void
+ethr_native_read_lock(ethr_native_rwlock_t *lock)
+{
+ long act, exp = 0;
+ while (1) {
+ act = ethr_native_atomic_cmpxchg_acqb((ethr_native_atomic_t *) lock,
+ exp+1, exp);
+ if (act == exp)
+ break;
+ ETHR_SPIN_BODY;
+ exp = (act & ETHR_WLOCK_FLAG__) ? 0 : act;
+ }
+ ETHR_COMPILER_BARRIER;
+}
+
+static ETHR_INLINE void
+ethr_native_write_unlock(ethr_native_rwlock_t *lock)
+{
+ ETHR_COMPILER_BARRIER;
+ ETHR_ASSERT(ethr_native_atomic_read((ethr_native_atomic_t *) lock)
+ == ETHR_WLOCK_FLAG__);
+ ethr_native_atomic_set_relb((ethr_native_atomic_t *) lock, 0);
+}
+
+static ETHR_INLINE void
+ethr_native_write_lock(ethr_native_rwlock_t *lock)
+{
+ long act, exp = 0;
+ while (1) {
+ act = ethr_native_atomic_cmpxchg_acqb((ethr_native_atomic_t *) lock,
+ exp|ETHR_WLOCK_FLAG__, exp);
+ if (act == exp)
+ break;
+ ETHR_SPIN_BODY;
+ exp = act & ~ETHR_WLOCK_FLAG__;
+ }
+ act |= ETHR_WLOCK_FLAG__;
+ /* Wait for readers to leave */
+ while (act != ETHR_WLOCK_FLAG__) {
+ ETHR_SPIN_BODY;
+ act = ethr_native_atomic_read_acqb((ethr_native_atomic_t *) lock);
+ }
+ ETHR_COMPILER_BARRIER;
+}
+
+#endif
+
+#endif
+
+#endif
diff --git a/erts/include/internal/ethread.h b/erts/include/internal/ethread.h
index 934a79c6f9..4a205699bd 100644
--- a/erts/include/internal/ethread.h
+++ b/erts/include/internal/ethread.h
@@ -1,19 +1,19 @@
/*
* %CopyrightBegin%
- *
- * Copyright Ericsson AB 2004-2009. All Rights Reserved.
- *
+ *
+ * Copyright Ericsson AB 2004-2010. All Rights Reserved.
+ *
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
* compliance with the License. You should have received a copy of the
* Erlang Public License along with this software. If not, it can be
* retrieved online at http://www.erlang.org/.
- *
+ *
* Software distributed under the License is distributed on an "AS IS"
* basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
* the License for the specific language governing rights and limitations
* under the License.
- *
+ *
* %CopyrightEnd%
*/
@@ -33,20 +33,9 @@
#include <stdlib.h>
#include "erl_errno.h"
-/*
- * Extra memory barrier requirements:
- * - ethr_atomic_or_old() needs to enforce a memory barrier sufficient
- * for a lock operation.
- * - ethr_atomic_and_old() needs to enforce a memory barrier sufficient
- * for an unlock operation.
- * - ethr_atomic_cmpxchg() needs to enforce a memory barrier sufficient
- * for a lock and unlock operation.
- */
-
-
-#undef ETHR_USE_RWMTX_FALLBACK
#undef ETHR_HAVE_OPTIMIZED_ATOMIC_OPS
-#undef ETHR_HAVE_OPTIMIZED_LOCKS
+#undef ETHR_HAVE_OPTIMIZED_SPINLOCK
+#undef ETHR_HAVE_OPTIMIZED_RWSPINLOCK
typedef struct {
long tv_sec;
@@ -54,6 +43,10 @@ typedef struct {
} ethr_timeval;
#if defined(DEBUG)
+# define ETHR_DEBUG
+#endif
+
+#if defined(ETHR_DEBUG)
# undef ETHR_XCHK
# define ETHR_XCHK 1
#else
@@ -68,47 +61,57 @@ typedef struct {
#elif defined(__WIN32__)
# define ETHR_INLINE __forceinline
#endif
-#if defined(DEBUG) || !defined(ETHR_INLINE) || ETHR_XCHK \
+#if defined(ETHR_DEBUG) || !defined(ETHR_INLINE) || ETHR_XCHK \
|| (defined(__GNUC__) && defined(ERTS_MIXED_CYGWIN_VC))
# undef ETHR_INLINE
# define ETHR_INLINE
# undef ETHR_TRY_INLINE_FUNCS
#endif
-#ifdef ETHR_FORCE_INLINE_FUNCS
-# define ETHR_TRY_INLINE_FUNCS
-#endif
-#if !defined(ETHR_DISABLE_NATIVE_IMPLS) \
- && (defined(PURIFY) || defined(VALGRIND) || defined(ERTS_MIXED_CYGWIN_VC))
+#if !defined(ETHR_DISABLE_NATIVE_IMPLS) && (defined(PURIFY)||defined(VALGRIND))
# define ETHR_DISABLE_NATIVE_IMPLS
#endif
-#define ETHR_RWMUTEX_INITIALIZED 0x99999999
-#define ETHR_MUTEX_INITIALIZED 0x77777777
-#define ETHR_COND_INITIALIZED 0x55555555
+/* Assume 64-byte cache line size */
+#define ETHR_CACHE_LINE_SIZE 64L
+#define ETHR_CACHE_LINE_MASK (ETHR_CACHE_LINE_SIZE - 1)
-#define ETHR_CACHE_LINE_SIZE 64
+#define ETHR_CACHE_LINE_ALIGN_SIZE(SZ) \
+ (((((SZ) - 1) / ETHR_CACHE_LINE_SIZE) + 1) * ETHR_CACHE_LINE_SIZE)
-#ifdef ETHR_INLINE_FUNC_NAME_
-# define ETHR_CUSTOM_INLINE_FUNC_NAME_
-#else
+#ifndef ETHR_INLINE_FUNC_NAME_
# define ETHR_INLINE_FUNC_NAME_(X) X
#endif
-#define ETHR_COMPILER_BARRIER ethr_compiler_barrier()
-#ifdef __GNUC__
-# undef ETHR_COMPILER_BARRIER
-# define ETHR_COMPILER_BARRIER __asm__ __volatile__("":::"memory")
+#if !defined(__func__)
+# if !defined(__STDC_VERSION__) || __STDC_VERSION__ < 199901L
+# if !defined(__GNUC__) || __GNUC__ < 2
+# define __func__ "[unknown_function]"
+# else
+# define __func__ __FUNCTION__
+# endif
+# endif
#endif
-#ifdef DEBUG
+int ethr_assert_failed(const char *file, int line, const char *func, char *a);
+#ifdef ETHR_DEBUG
#define ETHR_ASSERT(A) \
- ((void) ((A) ? 1 : ethr_assert_failed(__FILE__, __LINE__, #A)))
-int ethr_assert_failed(char *f, int l, char *a);
+ ((void) ((A) ? 1 : ethr_assert_failed(__FILE__, __LINE__, __func__, #A)))
#else
#define ETHR_ASSERT(A) ((void) 1)
#endif
+#if defined(__GNUC__)
+# define ETHR_PROTO_NORETURN__ void __attribute__((noreturn))
+# define ETHR_IMPL_NORETURN__ void
+#elif defined(__WIN32__) && defined(_MSC_VER)
+# define ETHR_PROTO_NORETURN__ __declspec(noreturn) void
+# define ETHR_IMPL_NORETURN__ __declspec(noreturn) void
+#else
+# define ETHR_PROTO_NORETURN__ void
+# define ETHR_IMPL_NORETURN__ void
+#endif
+
#if defined(ETHR_PTHREADS)
/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *\
* The pthread implementation *
@@ -118,7 +121,9 @@ int ethr_assert_failed(char *f, int l, char *a);
#error "_GNU_SOURCE not defined. Please, compile all files with -D_GNU_SOURCE."
#endif
-#if defined(ETHR_HAVE_MIT_PTHREAD_H)
+#if defined(ETHR_NEED_NPTL_PTHREAD_H)
+#include <nptl/pthread.h>
+#elif defined(ETHR_HAVE_MIT_PTHREAD_H)
#include <pthread/mit/pthread.h>
#elif defined(ETHR_HAVE_PTHREAD_H)
#include <pthread.h>
@@ -128,130 +133,23 @@ int ethr_assert_failed(char *f, int l, char *a);
typedef pthread_t ethr_tid;
-typedef struct ethr_mutex_ ethr_mutex;
-struct ethr_mutex_ {
- pthread_mutex_t pt_mtx;
- int is_rec_mtx;
- ethr_mutex *prev;
- ethr_mutex *next;
-#if ETHR_XCHK
- int initialized;
-#endif
-};
-
-typedef struct ethr_cond_ ethr_cond;
-struct ethr_cond_ {
- pthread_cond_t pt_cnd;
-#if ETHR_XCHK
- int initialized;
-#endif
-};
+typedef pthread_key_t ethr_tsd_key;
-#ifndef ETHR_HAVE_PTHREAD_RWLOCK_INIT
-#define ETHR_USE_RWMTX_FALLBACK
-#else
-typedef struct ethr_rwmutex_ ethr_rwmutex;
-struct ethr_rwmutex_ {
- pthread_rwlock_t pt_rwlock;
-#if ETHR_XCHK
- int initialized;
-#endif
-};
-#endif
+#define ETHR_HAVE_ETHR_SIG_FUNCS 1
-/* Static initializers */
-#if ETHR_XCHK
-#define ETHR_MUTEX_XCHK_INITER , ETHR_MUTEX_INITIALIZED
-#define ETHR_COND_XCHK_INITER , ETHR_COND_INITIALIZED
-#else
-#define ETHR_MUTEX_XCHK_INITER
-#define ETHR_COND_XCHK_INITER
+#if defined(PURIFY) || defined(VALGRIND)
+# define ETHR_FORCE_PTHREAD_RWLOCK
+# define ETHR_FORCE_PTHREAD_MUTEX
#endif
-#define ETHR_MUTEX_INITER {PTHREAD_MUTEX_INITIALIZER, 0, NULL, NULL ETHR_MUTEX_XCHK_INITER}
-#define ETHR_COND_INITER {PTHREAD_COND_INITIALIZER ETHR_COND_XCHK_INITER}
-
-#if defined(ETHR_HAVE_PTHREAD_MUTEXATTR_SETTYPE) \
- || defined(ETHR_HAVE_PTHREAD_MUTEXATTR_SETKIND_NP)
-# define ETHR_HAVE_ETHR_REC_MUTEX_INIT 1
-# ifdef PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP
-# define ETHR_REC_MUTEX_INITER \
- {PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP, 1, NULL, NULL ETHR_MUTEX_XCHK_INITER}
-# endif
-#else
-# undef ETHR_HAVE_ETHR_REC_MUTEX_INIT
+#if !defined(ETHR_FORCE_PTHREAD_RWLOCK)
+# define ETHR_USE_OWN_RWMTX_IMPL__
#endif
-#ifndef ETHR_HAVE_PTHREAD_ATFORK
-# define ETHR_NO_FORKSAFETY 1
+#if !defined(ETHR_FORCE_PTHREAD_MUTEX) && 0
+# define ETHR_USE_OWN_MTX_IMPL__
#endif
-typedef pthread_key_t ethr_tsd_key;
-
-#define ETHR_HAVE_ETHR_SIG_FUNCS 1
-
-#ifdef ETHR_TRY_INLINE_FUNCS
-
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_mutex_trylock)(ethr_mutex *mtx)
-{
- return pthread_mutex_trylock(&mtx->pt_mtx);
-}
-
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_mutex_lock)(ethr_mutex *mtx)
-{
- return pthread_mutex_lock(&mtx->pt_mtx);
-}
-
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_mutex_unlock)(ethr_mutex *mtx)
-{
- return pthread_mutex_unlock(&mtx->pt_mtx);
-}
-
-#ifdef ETHR_HAVE_PTHREAD_RWLOCK_INIT
-
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_rwmutex_tryrlock)(ethr_rwmutex *rwmtx)
-{
- return pthread_rwlock_tryrdlock(&rwmtx->pt_rwlock);
-}
-
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_rwmutex_rlock)(ethr_rwmutex *rwmtx)
-{
- return pthread_rwlock_rdlock(&rwmtx->pt_rwlock);
-}
-
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_rwmutex_runlock)(ethr_rwmutex *rwmtx)
-{
- return pthread_rwlock_unlock(&rwmtx->pt_rwlock);
-}
-
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_rwmutex_tryrwlock)(ethr_rwmutex *rwmtx)
-{
- return pthread_rwlock_trywrlock(&rwmtx->pt_rwlock);
-}
-
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_rwmutex_rwlock)(ethr_rwmutex *rwmtx)
-{
- return pthread_rwlock_wrlock(&rwmtx->pt_rwlock);
-}
-
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_rwmutex_rwunlock)(ethr_rwmutex *rwmtx)
-{
- return pthread_rwlock_unlock(&rwmtx->pt_rwlock);
-}
-
-#endif /* ETHR_HAVE_PTHREAD_RWLOCK_INIT */
-
-#endif /* ETHR_TRY_INLINE_FUNCS */
-
#elif defined(ETHR_WIN32_THREADS)
/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *\
* The native win32 threads implementation *
@@ -273,403 +171,22 @@ ETHR_INLINE_FUNC_NAME_(ethr_rwmutex_rwunlock)(ethr_rwmutex *rwmtx)
# undef WIN32_LEAN_AND_MEAN
#endif
-/* Types */
-typedef long ethr_tid; /* thread id type */
-typedef struct {
- volatile int initialized;
- CRITICAL_SECTION cs;
-#if ETHR_XCHK
- int is_rec_mtx;
-#endif
-} ethr_mutex;
-
-typedef struct cnd_wait_event__ cnd_wait_event_;
+struct ethr_join_data_;
+/* Types */
typedef struct {
- volatile int initialized;
- CRITICAL_SECTION cs;
- cnd_wait_event_ *queue;
- cnd_wait_event_ *queue_end;
-} ethr_cond;
-
-#define ETHR_USE_RWMTX_FALLBACK
-
-/* Static initializers */
-
-#define ETHR_MUTEX_INITER {0}
-#define ETHR_COND_INITER {0}
-
-#define ETHR_REC_MUTEX_INITER ETHR_MUTEX_INITER
-
-#define ETHR_HAVE_ETHR_REC_MUTEX_INIT 1
+ long id;
+ struct ethr_join_data_ *jdata;
+} ethr_tid; /* thread id type */
typedef DWORD ethr_tsd_key;
#undef ETHR_HAVE_ETHR_SIG_FUNCS
-#ifdef ETHR_TRY_INLINE_FUNCS
-int ethr_fake_static_mutex_init(ethr_mutex *mtx);
-
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_mutex_trylock)(ethr_mutex *mtx)
-{
- if (!mtx->initialized) {
- int res = ethr_fake_static_mutex_init(mtx);
- if (res != 0)
- return res;
- }
- return TryEnterCriticalSection(&mtx->cs) ? 0 : EBUSY;
-}
-
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_mutex_lock)(ethr_mutex *mtx)
-{
- if (!mtx->initialized) {
- int res = ethr_fake_static_mutex_init(mtx);
- if (res != 0)
- return res;
- }
- EnterCriticalSection(&mtx->cs);
- return 0;
-}
-
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_mutex_unlock)(ethr_mutex *mtx)
-{
- LeaveCriticalSection(&mtx->cs);
- return 0;
-}
-
-#endif /* #ifdef ETHR_TRY_INLINE_FUNCS */
-
-#ifdef ERTS_MIXED_CYGWIN_VC
-
-/* atomics */
-
-#ifdef _MSC_VER
-# if _MSC_VER < 1300
-# define ETHR_IMMED_ATOMIC_SET_GET_SAFE__ 0 /* Dont trust really old compilers */
-# else
-# if defined(_M_IX86)
-# define ETHR_IMMED_ATOMIC_SET_GET_SAFE__ 1
-# else /* I.e. IA64 */
-# if _MSC_VER >= 1400
-# define ETHR_IMMED_ATOMIC_SET_GET_SAFE__ 1
-# else
-# define ETHR_IMMED_ATOMIC_SET_GET_SAFE__ 0
-# endif
-# endif
-# endif
-# if _MSC_VER >= 1400
-# include <intrin.h>
-# undef ETHR_COMPILER_BARRIER
-# define ETHR_COMPILER_BARRIER _ReadWriteBarrier()
-# endif
-#pragma intrinsic(_ReadWriteBarrier)
-#pragma intrinsic(_InterlockedAnd)
-#pragma intrinsic(_InterlockedOr)
-#else
-# define ETHR_IMMED_ATOMIC_SET_GET_SAFE__ 0
-#endif
-
-#define ETHR_HAVE_OPTIMIZED_ATOMIC_OPS 1
-#define ETHR_HAVE_OPTIMIZED_LOCKS 1
-
-typedef struct {
- volatile LONG value;
-} ethr_atomic_t;
-
-typedef struct {
- volatile LONG locked;
-} ethr_spinlock_t;
-
-typedef struct {
- volatile LONG counter;
-} ethr_rwlock_t;
-#define ETHR_WLOCK_FLAG__ (((LONG) 1) << 30)
-
-#ifdef ETHR_TRY_INLINE_FUNCS
-
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_atomic_init)(ethr_atomic_t *var, long i)
-{
-#if ETHR_IMMED_ATOMIC_SET_GET_SAFE__
- var->value = (LONG) i;
-#else
- (void) InterlockedExchange(&var->value, (LONG) i);
-#endif
- return 0;
-}
-
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_atomic_set)(ethr_atomic_t *var, long i)
-{
-#if ETHR_IMMED_ATOMIC_SET_GET_SAFE__
- var->value = (LONG) i;
-#else
- (void) InterlockedExchange(&var->value, (LONG) i);
-#endif
- return 0;
-}
-
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_atomic_read)(ethr_atomic_t *var, long *i)
-{
-#if ETHR_IMMED_ATOMIC_SET_GET_SAFE__
- *i = var->value;
-#else
- *i = InterlockedExchangeAdd(&var->value, (LONG) 0);
-#endif
- return 0;
-}
+#define ETHR_USE_OWN_RWMTX_IMPL__
+#define ETHR_USE_OWN_MTX_IMPL__
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_atomic_add)(ethr_atomic_t *var, long incr)
-{
- (void) InterlockedExchangeAdd(&var->value, (LONG) incr);
- return 0;
-}
-
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_atomic_addtest)(ethr_atomic_t *var,
- long i,
- long *testp)
-{
- *testp = InterlockedExchangeAdd(&var->value, (LONG) i);
- *testp += i;
- return 0;
-}
-
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_atomic_inc)(ethr_atomic_t *var)
-{
- (void) InterlockedIncrement(&var->value);
- return 0;
-}
-
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_atomic_dec)(ethr_atomic_t *var)
-{
- (void) InterlockedDecrement(&var->value);
- return 0;
-}
-
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_atomic_inctest)(ethr_atomic_t *var, long *testp)
-{
- *testp = (long) InterlockedIncrement(&var->value);
- return 0;
-}
-
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_atomic_dectest)(ethr_atomic_t *var, long *testp)
-{
- *testp = (long) InterlockedDecrement(&var->value);
- return 0;
-}
-
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_atomic_and_old)(ethr_atomic_t *var,
- long mask,
- long *old)
-{
- /*
- * See "Extra memory barrier requirements" note at the top
- * of the file.
- *
- * According to msdn _InterlockedAnd() provides a full
- * memory barrier.
- */
- *old = (long) _InterlockedAnd(&var->value, mask);
- return 0;
-}
-
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_atomic_or_old)(ethr_atomic_t *var,
- long mask,
- long *old)
-{
- /*
- * See "Extra memory barrier requirements" note at the top
- * of the file.
- *
- * According to msdn _InterlockedOr() provides a full
- * memory barrier.
- */
- *old = (long) _InterlockedOr(&var->value, mask);
- return 0;
-}
-
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_atomic_cmpxchg)(ethr_atomic_t *var,
- long new,
- long expected,
- long *old)
-{
- /*
- * See "Extra memory barrier requirements" note at the top
- * of the file.
- *
- * According to msdn _InterlockedCompareExchange() provides a full
- * memory barrier.
- */
- *old = _InterlockedCompareExchange(&var->value, (LONG) new, (LONG) expected);
- return 0;
-}
-
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_atomic_xchg)(ethr_atomic_t *var,
- long new,
- long *old)
-{
- *old = (long) InterlockedExchange(&var->value, (LONG) new);
- return 0;
-}
-
-/*
- * According to msdn InterlockedExchange() provides a full
- * memory barrier.
- */
-
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_spinlock_init)(ethr_spinlock_t *lock)
-{
-#if ETHR_IMMED_ATOMIC_SET_GET_SAFE__
- lock->locked = (LONG) 0;
-#else
- (void) InterlockedExchange(&lock->locked, (LONG) 0);
-#endif
- return 0;
-}
-
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_spinlock_destroy)(ethr_spinlock_t *lock)
-{
- return 0;
-}
-
-
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_spin_unlock)(ethr_spinlock_t *lock)
-{
- ETHR_COMPILER_BARRIER;
- {
-#ifdef DEBUG
- LONG old =
-#endif
- InterlockedExchange(&lock->locked, (LONG) 0);
-#ifdef DEBUG
- ETHR_ASSERT(old == 1);
-#endif
- }
- return 0;
-}
-
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_spin_lock)(ethr_spinlock_t *lock)
-{
- LONG old;
- do {
- old = InterlockedExchange(&lock->locked, (LONG) 1);
- } while (old != (LONG) 0);
- ETHR_COMPILER_BARRIER;
- return 0;
-}
-
-/*
- * According to msdn InterlockedIncrement, InterlockedDecrement,
- * and InterlockedExchangeAdd(), _InterlockedAnd, and _InterlockedOr
- * provides full memory barriers.
- */
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_rwlock_init)(ethr_rwlock_t *lock)
-{
-#if ETHR_IMMED_ATOMIC_SET_GET_SAFE__
- lock->counter = (LONG) 0;
-#else
- (void) InterlockedExchange(&lock->counter, (LONG) 0);
-#endif
- return 0;
-}
-
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_rwlock_destroy)(ethr_rwlock_t *lock)
-{
- return 0;
-}
-
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_read_unlock)(ethr_rwlock_t *lock)
-{
- ETHR_COMPILER_BARRIER;
- {
-#ifdef DEBUG
- LONG old =
-#endif
- InterlockedDecrement(&lock->counter);
- ETHR_ASSERT(old != 0);
- }
- return 0;
-}
-
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_read_lock)(ethr_rwlock_t *lock)
-{
- while (1) {
- LONG old = InterlockedIncrement(&lock->counter);
- if ((old & ETHR_WLOCK_FLAG__) == 0)
- break; /* Got read lock */
- /* Restore and wait for writers to unlock */
- old = InterlockedDecrement(&lock->counter);
- while (old & ETHR_WLOCK_FLAG__) {
-#if ETHR_IMMED_ATOMIC_SET_GET_SAFE__
- old = lock->counter;
-#else
- old = InterlockedExchangeAdd(&lock->counter, (LONG) 0);
-#endif
- }
- }
- ETHR_COMPILER_BARRIER;
- return 0;
-}
-
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_write_unlock)(ethr_rwlock_t *lock)
-{
- ETHR_COMPILER_BARRIER;
- {
-#ifdef DEBUG
- LONG old =
-#endif
- _InterlockedAnd(&lock->counter, ~ETHR_WLOCK_FLAG__);
- ETHR_ASSERT(old & ETHR_WLOCK_FLAG__);
- }
- return 0;
-}
-
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_write_lock)(ethr_rwlock_t *lock)
-{
- LONG old;
- do {
- old = _InterlockedOr(&lock->counter, ETHR_WLOCK_FLAG__);
- } while (old & ETHR_WLOCK_FLAG__);
- /* We got the write part of the lock; wait for readers to unlock */
- while ((old & ~ETHR_WLOCK_FLAG__) != 0) {
-#if ETHR_IMMED_ATOMIC_SET_GET_SAFE__
- old = lock->counter;
-#else
- old = InterlockedExchangeAdd(&lock->counter, (LONG) 0);
-#endif
- ETHR_ASSERT(old & ETHR_WLOCK_FLAG__);
- }
- ETHR_COMPILER_BARRIER;
- return 0;
-}
-
-#endif /* #ifdef ETHR_TRY_INLINE_FUNCS */
-
-#endif /* #ifdef ERTS_MIXED_CYGWIN_VC */
+#define ETHR_YIELD() (Sleep(0), 0)
#else /* No supported thread lib found */
@@ -681,6 +198,12 @@ ETHR_INLINE_FUNC_NAME_(ethr_write_lock)(ethr_rwlock_t *lock)
#endif
+#ifdef SIZEOF_LONG
+#if SIZEOF_LONG < ETHR_SIZEOF_PTR
+#error size of long currently needs to be at least the same as size of void *
+#endif
+#endif
+
/* __builtin_expect() is needed by both native atomics code
* and the fallback code */
#if !defined(__GNUC__) || (__GNUC__ < 2) || (__GNUC__ == 2 && __GNUC_MINOR__ < 96)
@@ -688,131 +211,167 @@ ETHR_INLINE_FUNC_NAME_(ethr_write_lock)(ethr_rwlock_t *lock)
#endif
/* For CPU-optimised atomics, spinlocks, and rwlocks. */
-#if !defined(ETHR_DISABLE_NATIVE_IMPLS) && defined(__GNUC__)
-# if ETHR_SIZEOF_PTR == 4
-# if defined(__i386__)
-# include "i386/ethread.h"
-# elif (defined(__powerpc__) || defined(__ppc__)) && !defined(__powerpc64__)
-# include "ppc32/ethread.h"
-# elif defined(__sparc__)
-# include "sparc32/ethread.h"
-# elif defined(__tile__)
-# include "tile/ethread.h"
+#if !defined(ETHR_DISABLE_NATIVE_IMPLS)
+# if defined(__GNUC__)
+# if defined(ETHR_PREFER_GCC_NATIVE_IMPLS)
+# include "gcc/ethread.h"
+# elif defined(ETHR_PREFER_LIBATOMIC_OPS_NATIVE_IMPLS)
+# include "libatomic_ops/ethread.h"
# endif
-# elif ETHR_SIZEOF_PTR == 8
-# if defined(__x86_64__)
-# include "x86_64/ethread.h"
-# elif defined(__sparc__) && defined(__arch64__)
-# include "sparc64/ethread.h"
+# ifndef ETHR_HAVE_NATIVE_ATOMICS
+# if ETHR_SIZEOF_PTR == 4
+# if defined(__i386__)
+# include "i386/ethread.h"
+# elif (defined(__powerpc__)||defined(__ppc__))&&!defined(__powerpc64__)
+# include "ppc32/ethread.h"
+# elif defined(__sparc__)
+# include "sparc32/ethread.h"
+# elif defined(__tile__)
+# include "tile/ethread.h"
+# endif
+# elif ETHR_SIZEOF_PTR == 8
+# if defined(__x86_64__)
+# include "x86_64/ethread.h"
+# elif defined(__sparc__) && defined(__arch64__)
+# include "sparc64/ethread.h"
+# endif
+# endif
+# include "gcc/ethread.h"
+# include "libatomic_ops/ethread.h"
# endif
+# elif defined(ETHR_WIN32_THREADS)
+# include "win/ethread.h"
# endif
-#endif /* !defined(ETHR_DISABLE_NATIVE_IMPLS) && defined(__GNUC__) */
-
-#ifdef ETHR_HAVE_OPTIMIZED_ATOMIC_OPS
-# undef ETHR_HAVE_NATIVE_ATOMICS
-#endif
-#ifdef ETHR_HAVE_OPTIMIZED_LOCKS
-# undef ETHR_HAVE_NATIVE_LOCKS
-#endif
+#endif /* !ETHR_DISABLE_NATIVE_IMPLS */
-#ifdef ETHR_HAVE_NATIVE_ATOMICS
-#define ETHR_HAVE_OPTIMIZED_ATOMIC_OPS 1
-#endif
-#ifdef ETHR_HAVE_NATIVE_LOCKS
-#define ETHR_HAVE_OPTIMIZED_LOCKS 1
+#if defined(__GNUC__)
+# ifndef ETHR_COMPILER_BARRIER
+# define ETHR_COMPILER_BARRIER __asm__ __volatile__("" : : : "memory")
+# endif
+# ifndef ETHR_SPIN_BODY
+# if defined(__i386__) || defined(__x86_64__)
+# define ETHR_SPIN_BODY __asm__ __volatile__("rep;nop" : : : "memory")
+# elif defined(__ia64__)
+# define ETHR_SPIN_BODY __asm__ __volatile__("hint @pause" : : : "memory")
+# elif defined(__sparc__)
+# define ETHR_SPIN_BODY __asm__ __volatile__("membar #LoadLoad")
+# else
+# define ETHR_SPIN_BODY ETHR_COMPILER_BARRIER
+# endif
+# endif
+#elif defined(ETHR_WIN32_THREADS)
+# ifndef ETHR_COMPILER_BARRIER
+# include <intrin.h>
+# pragma intrinsic(_ReadWriteBarrier)
+# define ETHR_COMPILER_BARRIER _ReadWriteBarrier()
+# endif
+# ifndef ETHR_SPIN_BODY
+# define ETHR_SPIN_BODY do {YieldProcessor();ETHR_COMPILER_BARRIER;} while(0)
+# endif
#endif
-typedef struct {
- unsigned open;
- ethr_mutex mtx;
- ethr_cond cnd;
-} ethr_gate;
+#define ETHR_YIELD_AFTER_BUSY_LOOPS 50
-#ifdef ETHR_HAVE_NATIVE_ATOMICS
+#ifndef ETHR_HAVE_NATIVE_ATOMICS
/*
- * Map ethread native atomics to ethread API atomics.
+ * ETHR_*MEMORY_BARRIER orders between locked and atomic accesses only,
+ * i.e. when our lock based atomic fallback is used, a noop is sufficient.
*/
-typedef ethr_native_atomic_t ethr_atomic_t;
+#define ETHR_MEMORY_BARRIER do { } while (0)
+#define ETHR_WRITE_MEMORY_BARRIER do { } while (0)
+#define ETHR_READ_MEMORY_BARRIER do { } while (0)
+#define ETHR_READ_DEPEND_MEMORY_BARRIER do { } while (0)
#endif
-#ifdef ETHR_HAVE_NATIVE_LOCKS
-/*
- * Map ethread native spinlocks to ethread API spinlocks.
- */
-typedef ethr_native_spinlock_t ethr_spinlock_t;
-/*
- * Map ethread native rwlocks to ethread API rwlocks.
- */
-typedef ethr_native_rwlock_t ethr_rwlock_t;
+#ifndef ETHR_WRITE_MEMORY_BARRIER
+# define ETHR_WRITE_MEMORY_BARRIER ETHR_MEMORY_BARRIER
+# define ETHR_WRITE_MEMORY_BARRIER_IS_FULL
#endif
-
-#ifdef ETHR_USE_RWMTX_FALLBACK
-typedef struct {
- ethr_mutex mtx;
- ethr_cond rcnd;
- ethr_cond wcnd;
- unsigned readers;
- unsigned waiting_readers;
- unsigned waiting_writers;
-#if ETHR_XCHK
- int initialized;
+#ifndef ETHR_READ_MEMORY_BARRIER
+# define ETHR_READ_MEMORY_BARRIER ETHR_MEMORY_BARRIER
+# define ETHR_READ_MEMORY_BARRIER_IS_FULL
#endif
-} ethr_rwmutex;
+#ifndef ETHR_READ_DEPEND_MEMORY_BARRIER
+# define ETHR_READ_DEPEND_MEMORY_BARRIER ETHR_COMPILER_BARRIER
+# define ETHR_READ_DEPEND_MEMORY_BARRIER_IS_COMPILER_BARRIER
#endif
-#ifndef ETHR_HAVE_OPTIMIZED_ATOMIC_OPS
-typedef long ethr_atomic_t;
-#endif
+#define ETHR_FATAL_ERROR__(ERR) \
+ ethr_fatal_error__(__FILE__, __LINE__, __func__, (ERR))
-#ifndef ETHR_HAVE_OPTIMIZED_LOCKS
+ETHR_PROTO_NORETURN__ ethr_fatal_error__(const char *file,
+ int line,
+ const char *func,
+ int err);
-#if defined(ETHR_WIN32_THREADS)
-typedef struct {
- CRITICAL_SECTION cs;
-} ethr_spinlock_t;
-typedef struct {
- CRITICAL_SECTION cs;
- unsigned counter;
-} ethr_rwlock_t;
+void ethr_compiler_barrier_fallback(void);
+#ifndef ETHR_COMPILER_BARRIER
+# define ETHR_COMPILER_BARRIER ethr_compiler_barrier_fallback()
+#endif
-int ethr_do_spinlock_init(ethr_spinlock_t *lock);
-int ethr_do_rwlock_init(ethr_rwlock_t *lock);
+#ifndef ETHR_SPIN_BODY
+# define ETHR_SPIN_BODY ETHR_COMPILER_BARRIER
+#endif
-#define ETHR_RWLOCK_WRITERS (((unsigned) 1) << 31)
+#ifndef ETHR_YIELD
+# if defined(ETHR_HAVE_SCHED_YIELD)
+# ifdef ETHR_HAVE_SCHED_H
+# include <sched.h>
+# endif
+# include <errno.h>
+# if defined(ETHR_SCHED_YIELD_RET_INT)
+# define ETHR_YIELD() (sched_yield() < 0 ? errno : 0)
+# else
+# define ETHR_YIELD() (sched_yield(), 0)
+# endif
+# elif defined(ETHR_HAVE_PTHREAD_YIELD)
+# if defined(ETHR_PTHREAD_YIELD_RET_INT)
+# define ETHR_YIELD() pthread_yield()
+# else
+# define ETHR_YIELD() (pthread_yield(), 0)
+# endif
+# else
+# define ETHR_YIELD() (ethr_compiler_barrier(), 0)
+# endif
+#endif
+
+#include "ethr_optimized_fallbacks.h"
-#elif defined(ETHR_HAVE_PTHREAD_SPIN_LOCK)
-typedef struct {
- pthread_spinlock_t spnlck;
-} ethr_spinlock_t;
typedef struct {
- pthread_spinlock_t spnlck;
- unsigned counter;
-} ethr_rwlock_t;
-#define ETHR_RWLOCK_WRITERS (((unsigned) 1) << 31)
+ void *(*thread_create_prepare_func)(void);
+ void (*thread_create_parent_func)(void *);
+ void (*thread_create_child_func)(void *);
+} ethr_init_data;
-#else /* ethr mutex/rwmutex */
+#define ETHR_INIT_DATA_DEFAULT_INITER {NULL, NULL, NULL}
typedef struct {
- ethr_mutex mtx;
-} ethr_spinlock_t;
+ void *(*alloc)(size_t);
+ void *(*realloc)(void *, size_t);
+ void (*free)(void *);
+} ethr_memory_allocator;
+
+#define ETHR_MEM_ALLOC_DEF_INITER__ {NULL, NULL, NULL}
typedef struct {
- ethr_rwmutex rwmtx;
-} ethr_rwlock_t;
+ ethr_memory_allocator std;
+ ethr_memory_allocator sl;
+ ethr_memory_allocator ll;
+} ethr_memory_allocators;
-#endif /* end mutex/rwmutex */
-#endif /* ETHR_HAVE_OPTIMIZED_LOCKS */
+#define ETHR_MEM_ALLOCS_DEF_INITER__ \
+ {ETHR_MEM_ALLOC_DEF_INITER__, \
+ ETHR_MEM_ALLOC_DEF_INITER__, \
+ ETHR_MEM_ALLOC_DEF_INITER__}
typedef struct {
- void *(*alloc)(size_t);
- void *(*realloc)(void *, size_t);
- void (*free)(void *);
- void *(*thread_create_prepare_func)(void);
- void (*thread_create_parent_func)(void *);
- void (*thread_create_child_func)(void *);
-} ethr_init_data;
+ ethr_memory_allocators mem;
+ int reader_groups;
+ int main_threads;
+} ethr_late_init_data;
-#define ETHR_INIT_DATA_DEFAULT_INITER {malloc, realloc, free, NULL, NULL, NULL}
+#define ETHR_LATE_INIT_DATA_DEFAULT_INITER \
+ {ETHR_MEM_ALLOCS_DEF_INITER__, 0, 0}
typedef struct {
int detached; /* boolean (default false) */
@@ -821,18 +380,15 @@ typedef struct {
#define ETHR_THR_OPTS_DEFAULT_INITER {0, -1}
-#if defined(ETHR_CUSTOM_INLINE_FUNC_NAME_) || !defined(ETHR_TRY_INLINE_FUNCS)
-# define ETHR_NEED_MTX_PROTOTYPES__
-# define ETHR_NEED_RWMTX_PROTOTYPES__
+
+#if !defined(ETHR_TRY_INLINE_FUNCS) || defined(ETHR_AUX_IMPL__)
# define ETHR_NEED_SPINLOCK_PROTOTYPES__
+# define ETHR_NEED_RWSPINLOCK_PROTOTYPES__
# define ETHR_NEED_ATOMIC_PROTOTYPES__
#endif
-#if !defined(ETHR_NEED_RWMTX_PROTOTYPES__) && defined(ETHR_USE_RWMTX_FALLBACK)
-# define ETHR_NEED_RWMTX_PROTOTYPES__
-#endif
-
int ethr_init(ethr_init_data *);
+int ethr_late_init(ethr_late_init_data *);
int ethr_install_exit_handler(void (*funcp)(void));
int ethr_thr_create(ethr_tid *, void * (*)(void *), void *, ethr_thr_opts *);
int ethr_thr_join(ethr_tid, void **);
@@ -840,65 +396,6 @@ int ethr_thr_detach(ethr_tid);
void ethr_thr_exit(void *);
ethr_tid ethr_self(void);
int ethr_equal_tids(ethr_tid, ethr_tid);
-int ethr_mutex_init(ethr_mutex *);
-#ifdef ETHR_HAVE_ETHR_REC_MUTEX_INIT
-int ethr_rec_mutex_init(ethr_mutex *);
-#endif
-int ethr_mutex_destroy(ethr_mutex *);
-int ethr_mutex_set_forksafe(ethr_mutex *);
-int ethr_mutex_unset_forksafe(ethr_mutex *);
-#ifdef ETHR_NEED_MTX_PROTOTYPES__
-int ethr_mutex_trylock(ethr_mutex *);
-int ethr_mutex_lock(ethr_mutex *);
-int ethr_mutex_unlock(ethr_mutex *);
-#endif
-int ethr_cond_init(ethr_cond *);
-int ethr_cond_destroy(ethr_cond *);
-int ethr_cond_signal(ethr_cond *);
-int ethr_cond_broadcast(ethr_cond *);
-int ethr_cond_wait(ethr_cond *, ethr_mutex *);
-int ethr_cond_timedwait(ethr_cond *, ethr_mutex *, ethr_timeval *);
-
-int ethr_rwmutex_init(ethr_rwmutex *);
-int ethr_rwmutex_destroy(ethr_rwmutex *);
-#ifdef ETHR_NEED_RWMTX_PROTOTYPES__
-int ethr_rwmutex_tryrlock(ethr_rwmutex *);
-int ethr_rwmutex_rlock(ethr_rwmutex *);
-int ethr_rwmutex_runlock(ethr_rwmutex *);
-int ethr_rwmutex_tryrwlock(ethr_rwmutex *);
-int ethr_rwmutex_rwlock(ethr_rwmutex *);
-int ethr_rwmutex_rwunlock(ethr_rwmutex *);
-#endif
-
-#ifdef ETHR_NEED_ATOMIC_PROTOTYPES__
-int ethr_atomic_init(ethr_atomic_t *, long);
-int ethr_atomic_set(ethr_atomic_t *, long);
-int ethr_atomic_read(ethr_atomic_t *, long *);
-int ethr_atomic_inctest(ethr_atomic_t *, long *);
-int ethr_atomic_dectest(ethr_atomic_t *, long *);
-int ethr_atomic_inc(ethr_atomic_t *);
-int ethr_atomic_dec(ethr_atomic_t *);
-int ethr_atomic_addtest(ethr_atomic_t *, long, long *);
-int ethr_atomic_add(ethr_atomic_t *, long);
-int ethr_atomic_and_old(ethr_atomic_t *, long, long *);
-int ethr_atomic_or_old(ethr_atomic_t *, long, long *);
-int ethr_atomic_xchg(ethr_atomic_t *, long, long *);
-int ethr_atomic_cmpxchg(ethr_atomic_t *, long, long, long *);
-#endif
-
-#ifdef ETHR_NEED_SPINLOCK_PROTOTYPES__
-int ethr_spinlock_init(ethr_spinlock_t *);
-int ethr_spinlock_destroy(ethr_spinlock_t *);
-int ethr_spin_unlock(ethr_spinlock_t *);
-int ethr_spin_lock(ethr_spinlock_t *);
-
-int ethr_rwlock_init(ethr_rwlock_t *);
-int ethr_rwlock_destroy(ethr_rwlock_t *);
-int ethr_read_unlock(ethr_rwlock_t *);
-int ethr_read_lock(ethr_rwlock_t *);
-int ethr_write_unlock(ethr_rwlock_t *);
-int ethr_write_lock(ethr_rwlock_t *);
-#endif
int ethr_time_now(ethr_timeval *);
int ethr_tsd_key_create(ethr_tsd_key *);
@@ -906,13 +403,6 @@ int ethr_tsd_key_delete(ethr_tsd_key);
int ethr_tsd_set(ethr_tsd_key, void *);
void *ethr_tsd_get(ethr_tsd_key);
-int ethr_gate_init(ethr_gate *);
-int ethr_gate_destroy(ethr_gate *);
-int ethr_gate_close(ethr_gate *);
-int ethr_gate_let_through(ethr_gate *, unsigned);
-int ethr_gate_wait(ethr_gate *);
-int ethr_gate_swait(ethr_gate *, int);
-
#ifdef ETHR_HAVE_ETHR_SIG_FUNCS
#include <signal.h>
int ethr_sigmask(int how, const sigset_t *set, sigset_t *oset);
@@ -921,528 +411,579 @@ int ethr_sigwait(const sigset_t *set, int *sig);
void ethr_compiler_barrier(void);
-#ifdef ETHR_TRY_INLINE_FUNCS
-
-#ifdef ETHR_HAVE_NATIVE_ATOMICS
-
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_atomic_init)(ethr_atomic_t *var, long i)
-{
- ethr_native_atomic_init(var, i);
- return 0;
-}
-
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_atomic_set)(ethr_atomic_t *var, long i)
-{
- ethr_native_atomic_set(var, i);
- return 0;
-}
-
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_atomic_read)(ethr_atomic_t *var, long *i)
-{
- *i = ethr_native_atomic_read(var);
- return 0;
-}
-
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_atomic_add)(ethr_atomic_t *var, long incr)
-{
- ethr_native_atomic_add(var, incr);
- return 0;
-}
-
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_atomic_addtest)(ethr_atomic_t *var,
- long i,
- long *testp)
-{
- *testp = ethr_native_atomic_add_return(var, i);
- return 0;
-}
-
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_atomic_inc)(ethr_atomic_t *var)
-{
- ethr_native_atomic_inc(var);
- return 0;
-}
-
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_atomic_dec)(ethr_atomic_t *var)
-{
- ethr_native_atomic_dec(var);
- return 0;
-}
-
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_atomic_inctest)(ethr_atomic_t *var, long *testp)
-{
- *testp = ethr_native_atomic_inc_return(var);
- return 0;
-}
-
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_atomic_dectest)(ethr_atomic_t *var, long *testp)
-{
- *testp = ethr_native_atomic_dec_return(var);
- return 0;
-}
-
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_atomic_and_old)(ethr_atomic_t *var,
- long mask,
- long *old)
-{
- /*
- * See "Extra memory barrier requirements" note at the top
- * of the file.
- */
- *old = ethr_native_atomic_and_retold(var, mask);
- return 0;
-}
-
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_atomic_or_old)(ethr_atomic_t *var,
- long mask,
- long *old)
-{
- /*
- * See "Extra memory barrier requirements" note at the top
- * of the file.
- */
- *old = ethr_native_atomic_or_retold(var, mask);
- return 0;
-}
-
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_atomic_xchg)(ethr_atomic_t *var,
- long new,
- long *old)
-{
- *old = ethr_native_atomic_xchg(var, new);
- return 0;
-}
-
-/*
- * If *var == *old, replace *old with new, else do nothing.
- * In any case return the original value of *var in *old.
- */
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_atomic_cmpxchg)(ethr_atomic_t *var,
- long new,
- long expected,
- long *old)
-{
- /*
- * See "Extra memory barrier requirements" note at the top
- * of the file.
- */
- *old = ethr_native_atomic_cmpxchg(var, new, expected);
- return 0;
-}
+#if defined(ETHR_HAVE_NATIVE_SPINLOCKS)
+typedef ethr_native_spinlock_t ethr_spinlock_t;
+#elif defined(ETHR_HAVE_OPTIMIZED_SPINLOCKS)
+typedef ethr_opt_spinlock_t ethr_spinlock_t;
+#elif defined(__WIN32__)
+typedef CRITICAL_SECTION ethr_spinlock_t;
+#else
+typedef pthread_mutex_t ethr_spinlock_t;
+#endif
-#endif /* ETHR_HAVE_NATIVE_ATOMICS */
+#ifdef ETHR_NEED_SPINLOCK_PROTOTYPES__
+int ethr_spinlock_init(ethr_spinlock_t *);
+int ethr_spinlock_destroy(ethr_spinlock_t *);
+void ethr_spin_unlock(ethr_spinlock_t *);
+void ethr_spin_lock(ethr_spinlock_t *);
+#endif
-#ifdef ETHR_HAVE_NATIVE_LOCKS
+#if defined(ETHR_TRY_INLINE_FUNCS) || defined(ETHR_AUX_IMPL__)
static ETHR_INLINE int
ETHR_INLINE_FUNC_NAME_(ethr_spinlock_init)(ethr_spinlock_t *lock)
{
+#ifdef ETHR_HAVE_NATIVE_SPINLOCKS
ethr_native_spinlock_init(lock);
return 0;
+#elif defined(ETHR_HAVE_OPTIMIZED_SPINLOCKS)
+ return ethr_opt_spinlock_init((ethr_opt_spinlock_t *) lock);
+#elif defined(__WIN32__)
+ if (!InitializeCriticalSectionAndSpinCount((CRITICAL_SECTION *) lock, INT_MAX))
+ return ethr_win_get_errno__();
+ return 0;
+#else
+ return pthread_mutex_init((pthread_mutex_t *) lock, NULL);
+#endif
}
static ETHR_INLINE int
ETHR_INLINE_FUNC_NAME_(ethr_spinlock_destroy)(ethr_spinlock_t *lock)
{
+#ifdef ETHR_HAVE_NATIVE_SPINLOCKS
return 0;
+#elif defined(ETHR_HAVE_OPTIMIZED_SPINLOCKS)
+ return ethr_opt_spinlock_destroy((ethr_opt_spinlock_t *) lock);
+#elif defined(__WIN32__)
+ DeleteCriticalSection((CRITICAL_SECTION *) lock);
+ return 0;
+#else
+ return pthread_mutex_destroy((pthread_mutex_t *) lock);
+#endif
}
-static ETHR_INLINE int
+static ETHR_INLINE void
ETHR_INLINE_FUNC_NAME_(ethr_spin_unlock)(ethr_spinlock_t *lock)
{
+#ifdef ETHR_HAVE_NATIVE_SPINLOCKS
ethr_native_spin_unlock(lock);
- return 0;
+#elif defined(ETHR_HAVE_OPTIMIZED_SPINLOCKS)
+ int err = ethr_opt_spin_unlock((ethr_opt_spinlock_t *) lock);
+ if (err)
+ ETHR_FATAL_ERROR__(err);
+#elif defined(__WIN32__)
+ LeaveCriticalSection((CRITICAL_SECTION *) lock);
+#else
+ int err = pthread_mutex_unlock((pthread_mutex_t *) lock);
+ if (err)
+ ETHR_FATAL_ERROR__(err);
+#endif
}
-static ETHR_INLINE int
+static ETHR_INLINE void
ETHR_INLINE_FUNC_NAME_(ethr_spin_lock)(ethr_spinlock_t *lock)
{
+#ifdef ETHR_HAVE_NATIVE_SPINLOCKS
ethr_native_spin_lock(lock);
- return 0;
-}
-
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_rwlock_init)(ethr_rwlock_t *lock)
-{
- ethr_native_rwlock_init(lock);
- return 0;
-}
-
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_rwlock_destroy)(ethr_rwlock_t *lock)
-{
- return 0;
-}
-
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_read_unlock)(ethr_rwlock_t *lock)
-{
- ethr_native_read_unlock(lock);
- return 0;
-}
-
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_read_lock)(ethr_rwlock_t *lock)
-{
- ethr_native_read_lock(lock);
- return 0;
-}
-
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_write_unlock)(ethr_rwlock_t *lock)
-{
- ethr_native_write_unlock(lock);
- return 0;
-}
-
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_write_lock)(ethr_rwlock_t *lock)
-{
- ethr_native_write_lock(lock);
- return 0;
+#elif defined(ETHR_HAVE_OPTIMIZED_SPINLOCKS)
+ int err = ethr_opt_spin_lock((ethr_opt_spinlock_t *) lock);
+ if (err)
+ ETHR_FATAL_ERROR__(err);
+#elif defined(__WIN32__)
+ EnterCriticalSection((CRITICAL_SECTION *) lock);
+#else
+ int err = pthread_mutex_lock((pthread_mutex_t *) lock);
+ if (err)
+ ETHR_FATAL_ERROR__(err);
+#endif
}
-#endif /* ETHR_HAVE_NATIVE_LOCKS */
-
#endif /* ETHR_TRY_INLINE_FUNCS */
+#ifdef ETHR_HAVE_NATIVE_ATOMICS
/*
- * Fallbacks for atomics used in absence of optimized implementation.
+ * Map ethread native atomics to ethread API atomics.
*/
-#ifndef ETHR_HAVE_OPTIMIZED_ATOMIC_OPS
+typedef ethr_native_atomic_t ethr_atomic_t;
+#else
+typedef long ethr_atomic_t;
+#endif
-#define ETHR_ATOMIC_ADDR_BITS 4
-#define ETHR_ATOMIC_ADDR_SHIFT 3
+#ifdef ETHR_NEED_ATOMIC_PROTOTYPES__
+void ethr_atomic_init(ethr_atomic_t *, long);
+void ethr_atomic_set(ethr_atomic_t *, long);
+long ethr_atomic_read(ethr_atomic_t *);
+long ethr_atomic_inc_read(ethr_atomic_t *);
+long ethr_atomic_dec_read(ethr_atomic_t *);
+void ethr_atomic_inc(ethr_atomic_t *);
+void ethr_atomic_dec(ethr_atomic_t *);
+long ethr_atomic_add_read(ethr_atomic_t *, long);
+void ethr_atomic_add(ethr_atomic_t *, long);
+long ethr_atomic_read_band(ethr_atomic_t *, long);
+long ethr_atomic_read_bor(ethr_atomic_t *, long);
+long ethr_atomic_xchg(ethr_atomic_t *, long);
+long ethr_atomic_cmpxchg(ethr_atomic_t *, long, long);
+long ethr_atomic_read_acqb(ethr_atomic_t *);
+long ethr_atomic_inc_read_acqb(ethr_atomic_t *);
+void ethr_atomic_set_relb(ethr_atomic_t *, long);
+void ethr_atomic_dec_relb(ethr_atomic_t *);
+long ethr_atomic_dec_read_relb(ethr_atomic_t *);
+long ethr_atomic_cmpxchg_acqb(ethr_atomic_t *, long, long);
+long ethr_atomic_cmpxchg_relb(ethr_atomic_t *, long, long);
+#endif
+
+#if defined(ETHR_TRY_INLINE_FUNCS) || defined(ETHR_AUX_IMPL__)
+
+#ifndef ETHR_HAVE_NATIVE_ATOMICS
+/*
+ * Fallbacks for atomics used in absence of a native implementation.
+ */
+
+#define ETHR_ATOMIC_ADDR_BITS 10
+#define ETHR_ATOMIC_ADDR_SHIFT 6
typedef struct {
union {
-#if defined(ETHR_HAVE_PTHREAD_SPIN_LOCK)
- pthread_spinlock_t spnlck;
-#else
- ethr_mutex mtx;
-#endif
+ ethr_spinlock_t lck;
char buf[ETHR_CACHE_LINE_SIZE];
} u;
} ethr_atomic_protection_t;
extern ethr_atomic_protection_t ethr_atomic_protection__[1 << ETHR_ATOMIC_ADDR_BITS];
-
-#if defined(ETHR_HAVE_PTHREAD_SPIN_LOCK)
-
#define ETHR_ATOMIC_PTR2LCK__(PTR) \
(&ethr_atomic_protection__[((((unsigned long) (PTR)) >> ETHR_ATOMIC_ADDR_SHIFT) \
- & ((1 << ETHR_ATOMIC_ADDR_BITS) - 1))].u.spnlck)
+ & ((1 << ETHR_ATOMIC_ADDR_BITS) - 1))].u.lck)
#define ETHR_ATOMIC_OP_FALLBACK_IMPL__(AP, EXPS) \
do { \
- pthread_spinlock_t *slp__ = ETHR_ATOMIC_PTR2LCK__((AP)); \
- int res__ = pthread_spin_lock(slp__); \
- if (res__ != 0) \
- return res__; \
+ ethr_spinlock_t *slp__ = ETHR_ATOMIC_PTR2LCK__((AP)); \
+ ethr_spin_lock(slp__); \
{ EXPS; } \
- return pthread_spin_unlock(slp__); \
+ ethr_spin_unlock(slp__); \
} while (0)
-#else /* ethread mutex */
-
-#define ETHR_ATOMIC_PTR2LCK__(PTR) \
-(&ethr_atomic_protection__[((((unsigned long) (PTR)) >> ETHR_ATOMIC_ADDR_SHIFT) \
- & ((1 << ETHR_ATOMIC_ADDR_BITS) - 1))].u.mtx)
-
-#define ETHR_ATOMIC_OP_FALLBACK_IMPL__(AP, EXPS) \
-do { \
- ethr_mutex *mtxp__ = ETHR_ATOMIC_PTR2LCK__((AP)); \
- int res__ = ETHR_INLINE_FUNC_NAME_(ethr_mutex_lock)(mtxp__); \
- if (res__ != 0) \
- return res__; \
- { EXPS; } \
- return ETHR_INLINE_FUNC_NAME_(ethr_mutex_unlock)(mtxp__); \
-} while (0)
-
-#endif /* end ethread mutex */
-
-#ifdef ETHR_TRY_INLINE_FUNCS
+#endif
-static ETHR_INLINE int
+static ETHR_INLINE void
ETHR_INLINE_FUNC_NAME_(ethr_atomic_init)(ethr_atomic_t *var, long i)
{
- ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, *var = (ethr_atomic_t) i);
+#ifdef ETHR_HAVE_NATIVE_ATOMICS
+ ethr_native_atomic_init(var, i);
+#else
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, *var = i);
+#endif
}
-static ETHR_INLINE int
+static ETHR_INLINE void
ETHR_INLINE_FUNC_NAME_(ethr_atomic_set)(ethr_atomic_t *var, long i)
{
- ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, *var = (ethr_atomic_t) i);
+#ifdef ETHR_HAVE_NATIVE_ATOMICS
+ ethr_native_atomic_set(var, i);
+#else
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, *var = i);
+#endif
}
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_atomic_read)(ethr_atomic_t *var, long *i)
+static ETHR_INLINE long
+ETHR_INLINE_FUNC_NAME_(ethr_atomic_read)(ethr_atomic_t *var)
{
- ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, *i = (long) *var);
+#ifdef ETHR_HAVE_NATIVE_ATOMICS
+ return ethr_native_atomic_read(var);
+#else
+ long res;
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, res = (long) *var);
+ return res;
+#endif
}
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_atomic_inctest)(ethr_atomic_t *incp, long *testp)
+static ETHR_INLINE void
+ETHR_INLINE_FUNC_NAME_(ethr_atomic_add)(ethr_atomic_t *var, long incr)
{
- ETHR_ATOMIC_OP_FALLBACK_IMPL__(incp, *testp = (long) ++(*incp));
+#ifdef ETHR_HAVE_NATIVE_ATOMICS
+ ethr_native_atomic_add(var, incr);
+#else
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, *var += incr);
+#endif
+}
+
+static ETHR_INLINE long
+ETHR_INLINE_FUNC_NAME_(ethr_atomic_add_read)(ethr_atomic_t *var, long i)
+{
+#ifdef ETHR_HAVE_NATIVE_ATOMICS
+ return ethr_native_atomic_add_return(var, i);
+#else
+ long res;
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, *var += i; res = *var);
+ return res;
+#endif
}
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_atomic_dectest)(ethr_atomic_t *decp, long *testp)
+static ETHR_INLINE void
+ETHR_INLINE_FUNC_NAME_(ethr_atomic_inc)(ethr_atomic_t *var)
{
- ETHR_ATOMIC_OP_FALLBACK_IMPL__(decp, *testp = (long) --(*decp));
+#ifdef ETHR_HAVE_NATIVE_ATOMICS
+ ethr_native_atomic_inc(var);
+#else
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, ++(*var));
+#endif
}
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_atomic_add)(ethr_atomic_t *var, long incr)
-{
- ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, *var += incr);
-}
-
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_atomic_addtest)(ethr_atomic_t *incp,
- long i,
- long *testp)
+static ETHR_INLINE void
+ETHR_INLINE_FUNC_NAME_(ethr_atomic_dec)(ethr_atomic_t *var)
{
- ETHR_ATOMIC_OP_FALLBACK_IMPL__(incp, *incp += i; *testp = *incp);
+#ifdef ETHR_HAVE_NATIVE_ATOMICS
+ ethr_native_atomic_dec(var);
+#else
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, --(*var));
+#endif
}
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_atomic_inc)(ethr_atomic_t *incp)
+static ETHR_INLINE long
+ETHR_INLINE_FUNC_NAME_(ethr_atomic_inc_read)(ethr_atomic_t *var)
{
- ETHR_ATOMIC_OP_FALLBACK_IMPL__(incp, ++(*incp));
+#ifdef ETHR_HAVE_NATIVE_ATOMICS
+ return ethr_native_atomic_inc_return(var);
+#else
+ long res;
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, res = (long) ++(*var));
+ return res;
+#endif
}
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_atomic_dec)(ethr_atomic_t *decp)
+static ETHR_INLINE long
+ETHR_INLINE_FUNC_NAME_(ethr_atomic_dec_read)(ethr_atomic_t *var)
{
- ETHR_ATOMIC_OP_FALLBACK_IMPL__(decp, --(*decp));
+#ifdef ETHR_HAVE_NATIVE_ATOMICS
+ return ethr_native_atomic_dec_return(var);
+#else
+ long res;
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, res = (long) --(*var));
+ return res;
+#endif
}
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_atomic_and_old)(ethr_atomic_t *var,
- long mask,
- long *old)
+static ETHR_INLINE long
+ETHR_INLINE_FUNC_NAME_(ethr_atomic_read_band)(ethr_atomic_t *var,
+ long mask)
{
- /*
- * See "Extra memory barrier requirements" note at the top
- * of the file.
- */
- ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, *old = *var; *var &= mask);
+#ifdef ETHR_HAVE_NATIVE_ATOMICS
+ return ethr_native_atomic_and_retold(var, mask);
+#else
+ long res;
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, res = *var; *var &= mask);
+ return res;
+#endif
}
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_atomic_or_old)(ethr_atomic_t *var,
- long mask,
- long *old)
+static ETHR_INLINE long
+ETHR_INLINE_FUNC_NAME_(ethr_atomic_read_bor)(ethr_atomic_t *var,
+ long mask)
{
- /*
- * See "Extra memory barrier requirements" note at the top
- * of the file.
- */
- ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, *old = *var; *var |= mask);
+#ifdef ETHR_HAVE_NATIVE_ATOMICS
+ return ethr_native_atomic_or_retold(var, mask);
+#else
+ long res;
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, res = *var; *var |= mask);
+ return res;
+#endif
}
-static ETHR_INLINE int
+static ETHR_INLINE long
ETHR_INLINE_FUNC_NAME_(ethr_atomic_xchg)(ethr_atomic_t *var,
- long new,
- long *old)
+ long new)
{
- ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, *old = *var; *var = new);
-}
+#ifdef ETHR_HAVE_NATIVE_ATOMICS
+ return ethr_native_atomic_xchg(var, new);
+#else
+ long res;
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, res = *var; *var = new);
+ return res;
+#endif
+}
-/*
- * If *var == *old, replace *old with new, else do nothing.
- * In any case return the original value of *var in *old.
- */
-static ETHR_INLINE int
+static ETHR_INLINE long
ETHR_INLINE_FUNC_NAME_(ethr_atomic_cmpxchg)(ethr_atomic_t *var,
long new,
- long expected,
- long *old)
+ long exp)
{
- /*
- * See "Extra memory barrier requirements" note at the top
- * of the file.
- */
- ETHR_ATOMIC_OP_FALLBACK_IMPL__(
- var,
- long old_val = *var;
- *old = old_val;
- if (__builtin_expect(old_val == expected, 1))
- *var = new;
- );
- return 0;
+#ifdef ETHR_HAVE_NATIVE_ATOMICS
+ return ethr_native_atomic_cmpxchg(var, new, exp);
+#else
+ long res;
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var,
+ {
+ res = *var;
+ if (__builtin_expect(res == exp, 1))
+ *var = new;
+ });
+ return res;
+#endif
}
-#endif /* #ifdef ETHR_TRY_INLINE_FUNCS */
-#endif /* #ifndef ETHR_HAVE_OPTIMIZED_ATOMIC_OPS */
-
/*
- * Fallbacks for spin locks, and rw spin locks used in absence of
- * optimized implementation.
+ * Important memory barrier requirements.
+ *
+ * The following atomic operations *must* supply a memory barrier of
+ * at least the type specified by its suffix:
+ * _acqb = acquire barrier
+ * _relb = release barrier
*/
-#ifndef ETHR_HAVE_OPTIMIZED_LOCKS
-#ifdef ETHR_TRY_INLINE_FUNCS
+static ETHR_INLINE long
+ETHR_INLINE_FUNC_NAME_(ethr_atomic_read_acqb)(ethr_atomic_t *var)
+{
+#ifdef ETHR_HAVE_NATIVE_ATOMICS
+ return ethr_native_atomic_read_acqb(var);
+#else
+ return ETHR_INLINE_FUNC_NAME_(ethr_atomic_read)(var);
+#endif
+}
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_spinlock_init)(ethr_spinlock_t *lock)
+static ETHR_INLINE long
+ETHR_INLINE_FUNC_NAME_(ethr_atomic_inc_read_acqb)(ethr_atomic_t *var)
{
-#if defined(ETHR_HAVE_PTHREAD_SPIN_LOCK)
- return pthread_spin_init(&lock->spnlck, 0);
+#ifdef ETHR_HAVE_NATIVE_ATOMICS
+ return ethr_native_atomic_inc_return_acqb(var);
#else
- return ethr_mutex_init(&lock->mtx);
+ return ETHR_INLINE_FUNC_NAME_(ethr_atomic_inc_read)(var);
#endif
}
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_spinlock_destroy)(ethr_spinlock_t *lock)
+static ETHR_INLINE void
+ETHR_INLINE_FUNC_NAME_(ethr_atomic_set_relb)(ethr_atomic_t *var, long val)
{
-#if defined(ETHR_HAVE_PTHREAD_SPIN_LOCK)
- return pthread_spin_destroy(&lock->spnlck);
+#ifdef ETHR_HAVE_NATIVE_ATOMICS
+ ethr_native_atomic_set_relb(var, val);
#else
- return ethr_mutex_destroy(&lock->mtx);
+ return ETHR_INLINE_FUNC_NAME_(ethr_atomic_set)(var, val);
#endif
}
+static ETHR_INLINE void
+ETHR_INLINE_FUNC_NAME_(ethr_atomic_dec_relb)(ethr_atomic_t *var)
+{
+#ifdef ETHR_HAVE_NATIVE_ATOMICS
+ ethr_native_atomic_dec_relb(var);
+#else
+ ETHR_INLINE_FUNC_NAME_(ethr_atomic_dec)(var);
+#endif
+}
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_spin_unlock)(ethr_spinlock_t *lock)
+static ETHR_INLINE long
+ETHR_INLINE_FUNC_NAME_(ethr_atomic_dec_read_relb)(ethr_atomic_t *var)
{
-#if defined(ETHR_HAVE_PTHREAD_SPIN_LOCK)
- return pthread_spin_unlock(&lock->spnlck);
+#ifdef ETHR_HAVE_NATIVE_ATOMICS
+ return ethr_native_atomic_dec_return_relb(var);
#else
- return ETHR_INLINE_FUNC_NAME_(ethr_mutex_unlock)(&lock->mtx);
+ return ETHR_INLINE_FUNC_NAME_(ethr_atomic_dec_read)(var);
#endif
}
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_spin_lock)(ethr_spinlock_t *lock)
+static ETHR_INLINE long
+ETHR_INLINE_FUNC_NAME_(ethr_atomic_cmpxchg_acqb)(ethr_atomic_t *var,
+ long new,
+ long exp)
{
-#if defined(ETHR_HAVE_PTHREAD_SPIN_LOCK)
- return pthread_spin_lock(&lock->spnlck);
+#ifdef ETHR_HAVE_NATIVE_ATOMICS
+ return ethr_native_atomic_cmpxchg_acqb(var, new, exp);
#else
- return ETHR_INLINE_FUNC_NAME_(ethr_mutex_lock)(&lock->mtx);
+ return ETHR_INLINE_FUNC_NAME_(ethr_atomic_cmpxchg)(var, new, exp);
#endif
}
-#ifdef ETHR_USE_RWMTX_FALLBACK
-#define ETHR_RWLOCK_RWMTX_FALLBACK_NAME_(X) X
+static ETHR_INLINE long
+ETHR_INLINE_FUNC_NAME_(ethr_atomic_cmpxchg_relb)(ethr_atomic_t *var,
+ long new,
+ long exp)
+{
+#ifdef ETHR_HAVE_NATIVE_ATOMICS
+ return ethr_native_atomic_cmpxchg_relb(var, new, exp);
#else
-#define ETHR_RWLOCK_RWMTX_FALLBACK_NAME_(X) ETHR_INLINE_FUNC_NAME_(X)
+ return ETHR_INLINE_FUNC_NAME_(ethr_atomic_cmpxchg)(var, new, exp);
#endif
+}
+
+#endif /* ETHR_TRY_INLINE_FUNCS */
+
+typedef struct ethr_ts_event_ ethr_ts_event; /* Needed by ethr_mutex.h */
+
+#if defined(ETHR_WIN32_THREADS)
+# include "win/ethr_event.h"
+#else
+# include "pthread/ethr_event.h"
+#endif
+
+int ethr_set_main_thr_status(int, int);
+int ethr_get_main_thr_status(int *);
+
+struct ethr_ts_event_ {
+ ethr_ts_event *next;
+ ethr_ts_event *prev;
+ ethr_event event;
+ void *udata;
+ ethr_atomic_t uaflgs;
+ unsigned uflgs;
+ unsigned iflgs; /* for ethr lib only */
+ short rgix; /* for ethr lib only */
+ short mtix; /* for ethr lib only */
+};
+
+#define ETHR_TS_EV_ETHREAD (((unsigned) 1) << 0)
+#define ETHR_TS_EV_INITED (((unsigned) 1) << 1)
+#define ETHR_TS_EV_TMP (((unsigned) 1) << 2)
+#define ETHR_TS_EV_MAIN_THR (((unsigned) 1) << 3)
+
+int ethr_get_tmp_ts_event__(ethr_ts_event **tsepp);
+int ethr_free_ts_event__(ethr_ts_event *tsep);
+int ethr_make_ts_event__(ethr_ts_event **tsepp);
+
+#if !defined(ETHR_TRY_INLINE_FUNCS) || defined(ETHREAD_IMPL__)
+ethr_ts_event *ethr_get_ts_event(void);
+void ethr_leave_ts_event(ethr_ts_event *);
+#endif
+
+#if defined(ETHR_PTHREADS)
+
+#if defined(ETHR_TRY_INLINE_FUNCS) || defined(ETHREAD_IMPL__)
+
+extern pthread_key_t ethr_ts_event_key__;
+
+static ETHR_INLINE ethr_ts_event *
+ETHR_INLINE_FUNC_NAME_(ethr_get_ts_event)(void)
+{
+ ethr_ts_event *tsep = pthread_getspecific(ethr_ts_event_key__);
+ if (!tsep) {
+ int res = ethr_make_ts_event__(&tsep);
+ if (res != 0)
+ ETHR_FATAL_ERROR__(res);
+ ETHR_ASSERT(tsep);
+ }
+ return tsep;
+}
+
+static ETHR_INLINE void
+ETHR_INLINE_FUNC_NAME_(ethr_leave_ts_event)(ethr_ts_event *tsep)
+{
+
+}
+
+#endif
+
+#elif defined(ETHR_WIN32_THREADS)
+
+#if defined(ETHR_TRY_INLINE_FUNCS) || defined(ETHREAD_IMPL__)
+
+extern DWORD ethr_ts_event_key__;
+
+static ETHR_INLINE ethr_ts_event *
+ETHR_INLINE_FUNC_NAME_(ethr_get_ts_event)(void)
+{
+ ethr_ts_event *tsep = TlsGetValue(ethr_ts_event_key__);
+ if (!tsep) {
+ int res = ethr_get_tmp_ts_event__(&tsep);
+ if (res != 0)
+ ETHR_FATAL_ERROR__(res);
+ ETHR_ASSERT(tsep);
+ }
+ return tsep;
+}
+
+static ETHR_INLINE void
+ETHR_INLINE_FUNC_NAME_(ethr_leave_ts_event)(ethr_ts_event *tsep)
+{
+ if (tsep->iflgs & ETHR_TS_EV_TMP) {
+ int res = ethr_free_ts_event__(tsep);
+ if (res != 0)
+ ETHR_FATAL_ERROR__(res);
+ }
+}
+
+#endif
+
+#endif
+
+#include "ethr_mutex.h" /* Need atomic declarations and tse */
+
+#ifdef ETHR_HAVE_NATIVE_RWSPINLOCKS
+typedef ethr_native_rwlock_t ethr_rwlock_t;
+#else
+typedef ethr_rwmutex ethr_rwlock_t;
+#endif
+
+#ifdef ETHR_NEED_RWSPINLOCK_PROTOTYPES__
+int ethr_rwlock_init(ethr_rwlock_t *);
+int ethr_rwlock_destroy(ethr_rwlock_t *);
+void ethr_read_unlock(ethr_rwlock_t *);
+void ethr_read_lock(ethr_rwlock_t *);
+void ethr_write_unlock(ethr_rwlock_t *);
+void ethr_write_lock(ethr_rwlock_t *);
+#endif
+
+#if defined(ETHR_TRY_INLINE_FUNCS) || defined(ETHR_AUX_IMPL__)
static ETHR_INLINE int
ETHR_INLINE_FUNC_NAME_(ethr_rwlock_init)(ethr_rwlock_t *lock)
{
-#if defined(ETHR_HAVE_PTHREAD_SPIN_LOCK)
- lock->counter = 0;
- return pthread_spin_init(&lock->spnlck, 0);
+#ifdef ETHR_HAVE_NATIVE_RWSPINLOCKS
+ ethr_native_rwlock_init(lock);
+ return 0;
#else
- return ethr_rwmutex_init(&lock->rwmtx);
+ return ethr_rwmutex_init_opt((ethr_rwmutex *) lock, NULL);
#endif
}
static ETHR_INLINE int
ETHR_INLINE_FUNC_NAME_(ethr_rwlock_destroy)(ethr_rwlock_t *lock)
{
-#if defined(ETHR_HAVE_PTHREAD_SPIN_LOCK)
- return pthread_spin_destroy(&lock->spnlck);
+#ifdef ETHR_HAVE_NATIVE_RWSPINLOCKS
+ return 0;
#else
- return ethr_rwmutex_destroy(&lock->rwmtx);
+ return ethr_rwmutex_destroy((ethr_rwmutex *) lock);
#endif
}
-static ETHR_INLINE int
+static ETHR_INLINE void
ETHR_INLINE_FUNC_NAME_(ethr_read_unlock)(ethr_rwlock_t *lock)
{
-#if defined(ETHR_HAVE_PTHREAD_SPIN_LOCK)
- int res = pthread_spin_lock(&lock->spnlck);
- if (res != 0)
- return res;
- lock->counter--;
- return pthread_spin_unlock(&lock->spnlck);
+#ifdef ETHR_HAVE_NATIVE_RWSPINLOCKS
+ ethr_native_read_unlock(lock);
#else
- return ETHR_RWLOCK_RWMTX_FALLBACK_NAME_(ethr_rwmutex_runlock)(&lock->rwmtx);
+ ethr_rwmutex_runlock((ethr_rwmutex *) lock);
#endif
}
-static ETHR_INLINE int
+static ETHR_INLINE void
ETHR_INLINE_FUNC_NAME_(ethr_read_lock)(ethr_rwlock_t *lock)
{
-#if defined(ETHR_HAVE_PTHREAD_SPIN_LOCK)
- int locked = 0;
- do {
- int res = pthread_spin_lock(&lock->spnlck);
- if (res != 0)
- return res;
- if ((lock->counter & ETHR_RWLOCK_WRITERS) == 0) {
- lock->counter++;
- locked = 1;
- }
- res = pthread_spin_unlock(&lock->spnlck);
- if (res != 0)
- return res;
- } while (!locked);
- return 0;
+#ifdef ETHR_HAVE_NATIVE_RWSPINLOCKS
+ ethr_native_read_lock(lock);
#else
- return ETHR_RWLOCK_RWMTX_FALLBACK_NAME_(ethr_rwmutex_rlock)(&lock->rwmtx);
+ ethr_rwmutex_rlock((ethr_rwmutex *) lock);
#endif
}
-static ETHR_INLINE int
+static ETHR_INLINE void
ETHR_INLINE_FUNC_NAME_(ethr_write_unlock)(ethr_rwlock_t *lock)
{
-#if defined(ETHR_HAVE_PTHREAD_SPIN_LOCK)
- lock->counter = 0;
- return pthread_spin_unlock(&lock->spnlck);
+#ifdef ETHR_HAVE_NATIVE_RWSPINLOCKS
+ ethr_native_write_unlock(lock);
#else
- return ETHR_RWLOCK_RWMTX_FALLBACK_NAME_(ethr_rwmutex_rwunlock)(&lock->rwmtx);
+ ethr_rwmutex_rwunlock((ethr_rwmutex *) lock);
#endif
}
-static ETHR_INLINE int
+static ETHR_INLINE void
ETHR_INLINE_FUNC_NAME_(ethr_write_lock)(ethr_rwlock_t *lock)
{
-#if defined(ETHR_HAVE_PTHREAD_SPIN_LOCK)
- while (1) {
- int res = pthread_spin_lock(&lock->spnlck);
- if (res != 0)
- return res;
- lock->counter |= ETHR_RWLOCK_WRITERS;
- if (lock->counter == ETHR_RWLOCK_WRITERS)
- return 0;
- res = pthread_spin_unlock(&lock->spnlck);
- if (res != 0)
- return res;
- }
+#ifdef ETHR_HAVE_NATIVE_RWSPINLOCKS
+ ethr_native_write_lock(lock);
#else
- return ETHR_RWLOCK_RWMTX_FALLBACK_NAME_(ethr_rwmutex_rwlock)(&lock->rwmtx);
+ ethr_rwmutex_rwlock((ethr_rwmutex *) lock);
#endif
}
-#endif /* #ifdef ETHR_TRY_INLINE_FUNCS */
-
-#endif /* ETHR_HAVE_OPTIMIZED_LOCKS */
-
-#if defined(ETHR_HAVE_OPTIMIZED_LOCKS) || defined(ETHR_HAVE_PTHREAD_SPIN_LOCK)
-# define ETHR_HAVE_OPTIMIZED_SPINLOCK
-#endif
+#endif /* ETHR_TRY_INLINE_FUNCS */
#endif /* #ifndef ETHREAD_H__ */
diff --git a/erts/include/internal/ethread_header_config.h.in b/erts/include/internal/ethread_header_config.h.in
index e5b4946a53..5debb44756 100644
--- a/erts/include/internal/ethread_header_config.h.in
+++ b/erts/include/internal/ethread_header_config.h.in
@@ -1,19 +1,19 @@
/*
* %CopyrightBegin%
- *
- * Copyright Ericsson AB 2004-2009. All Rights Reserved.
- *
+ *
+ * Copyright Ericsson AB 2004-2010. All Rights Reserved.
+ *
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
* compliance with the License. You should have received a copy of the
* Erlang Public License along with this software. If not, it can be
* retrieved online at http://www.erlang.org/.
- *
+ *
* Software distributed under the License is distributed on an "AS IS"
* basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
* the License for the specific language governing rights and limitations
* under the License.
- *
+ *
* %CopyrightEnd%
*/
@@ -29,26 +29,76 @@
/* Define if you have pthreads */
#undef ETHR_PTHREADS
+/* Define if you need the <nptl/pthread.h> header file. */
+#undef ETHR_NEED_NPTL_PTHREAD_H
+
/* Define if you have the <pthread.h> header file. */
#undef ETHR_HAVE_PTHREAD_H
/* Define if the pthread.h header file is in pthread/mit directory. */
#undef ETHR_HAVE_MIT_PTHREAD_H
-/* Define if you have the pthread_mutexattr_settype function. */
-#undef ETHR_HAVE_PTHREAD_MUTEXATTR_SETTYPE
+/* Define if you have the pthread_spin_lock function. */
+#undef ETHR_HAVE_PTHREAD_SPIN_LOCK
-/* Define if you have the pthread_mutexattr_setkind_np function. */
-#undef ETHR_HAVE_PTHREAD_MUTEXATTR_SETKIND_NP
+/* Define if you want to force usage of pthread rwlocks */
+#undef ETHR_FORCE_PTHREAD_RWLOCK
-/* Define if you have the pthread_atfork function. */
-#undef ETHR_HAVE_PTHREAD_ATFORK
+/* Define if you have the pthread_rwlockattr_setkind_np() function. */
+#undef ETHR_HAVE_PTHREAD_RWLOCKATTR_SETKIND_NP
-/* Define if you have the pthread_spin_lock function. */
-#undef ETHR_HAVE_PTHREAD_SPIN_LOCK
+/* Define if you have the PTHREAD_RWLOCK_PREFER_WRITER_NONRECURSIVE_NP rwlock
+ attribute. */
+#undef ETHR_HAVE_PTHREAD_RWLOCK_PREFER_WRITER_NONRECURSIVE_NP
+
+/* Define if you have a linux futex implementation. */
+#undef ETHR_HAVE_LINUX_FUTEX
+
+/* Define if you have gcc atomic operations */
+#undef ETHR_HAVE_GCC_ATOMIC_OPS
+
+/* Define if you prefer gcc native ethread implementations */
+#undef ETHR_PREFER_GCC_NATIVE_IMPLS
+
+/* Define if you have the <sched.h> header file. */
+#undef ETHR_HAVE_SCHED_H
+
+/* Define if you have the sched_yield() function. */
+#undef ETHR_HAVE_SCHED_YIELD
+
+/* Define if you have the pthread_yield() function. */
+#undef ETHR_HAVE_PTHREAD_YIELD
+
+/* Define if pthread_yield() returns an int. */
+#undef ETHR_PTHREAD_YIELD_RET_INT
+
+/* Define if sched_yield() returns an int. */
+#undef ETHR_SCHED_YIELD_RET_INT
+
+/* Define if you want compatibilty with x86 processors before pentium4. */
+#undef ETHR_PRE_PENTIUM4_COMPAT
+
+/* Define if you have the pthread_rwlockattr_setkind_np() function. */
+#undef ETHR_HAVE_PTHREAD_RWLOCKATTR_SETKIND_NP
+
+/* Define if you have the PTHREAD_RWLOCK_PREFER_WRITER_NONRECURSIVE_NP rwlock
+ attribute. */
+#undef ETHR_HAVE_PTHREAD_RWLOCK_PREFER_WRITER_NONRECURSIVE_NP
+
+/* Define if you have gcc atomic operations */
+#undef ETHR_HAVE_GCC_ATOMIC_OPS
+
+/* Define if you prefer gcc native ethread implementations */
+#undef ETHR_PREFER_GCC_NATIVE_IMPLS
+
+/* Define if you have libatomic_ops atomic operations */
+#undef ETHR_HAVE_LIBATOMIC_OPS
+
+/* Define if you prefer libatomic_ops native ethread implementations */
+#undef ETHR_PREFER_LIBATOMIC_OPS_NATIVE_IMPLS
-/* Define if you have a pthread_rwlock implementation that can be used */
-#undef ETHR_HAVE_PTHREAD_RWLOCK_INIT
+/* Define to the size of AO_t if libatomic_ops is used */
+#undef ETHR_SIZEOF_AO_T
/* Define if you want to turn on extra sanity checking in the ethread library */
#undef ETHR_XCHK
diff --git a/erts/include/internal/gcc/ethr_atomic.h b/erts/include/internal/gcc/ethr_atomic.h
new file mode 100644
index 0000000000..5fe6e23477
--- /dev/null
+++ b/erts/include/internal/gcc/ethr_atomic.h
@@ -0,0 +1,183 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2010. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+/*
+ * Description: Native atomics ethread support using gcc's builtins
+ * Author: Rickard Green
+ */
+
+#ifndef ETHR_GCC_ATOMIC_H__
+#define ETHR_GCC_ATOMIC_H__
+
+#if !defined(ETHR_HAVE_NATIVE_ATOMICS) && defined(ETHR_HAVE_GCC_ATOMIC_OPS)
+#define ETHR_HAVE_NATIVE_ATOMICS 1
+
+#define ETHR_IMMED_ATOMIC_SET_GET_SAFE__ 0
+/* Enable immediate read/write on platforms where we know it is safe */
+#if defined(__i386__) || defined(__x86_64__) || defined(__sparc__) \
+ || defined(__powerpc__) || defined(__ppc__)
+# undef ETHR_IMMED_ATOMIC_SET_GET_SAFE__
+# define ETHR_IMMED_ATOMIC_SET_GET_SAFE__ 1
+#endif
+
+typedef struct {
+ volatile long counter;
+} ethr_native_atomic_t;
+
+
+/*
+ * According to the documentation this is what we want:
+ * #define ETHR_MEMORY_BARRIER __sync_synchronize()
+ * However, __sync_synchronize() is known to erroneously be
+ * a noop on at least some platforms with some gcc versions.
+ * This has suposedly been fixed in some gcc version, but we
+ * don't know from which version. Therefore, we use the
+ * workaround implemented below on all gcc versions.
+ */
+#define ETHR_MEMORY_BARRIER \
+do { \
+ volatile long x___ = 0; \
+ (void) __sync_val_compare_and_swap(&x___, (long) 0, (long) 1); \
+} while (0)
+#define ETHR_READ_DEPEND_MEMORY_BARRIER ETHR_MEMORY_BARRIER
+
+#if defined(ETHR_TRY_INLINE_FUNCS) || defined(ETHR_AUX_IMPL__)
+
+static ETHR_INLINE void
+ethr_native_atomic_set(ethr_native_atomic_t *var, long value)
+{
+#if ETHR_IMMED_ATOMIC_SET_GET_SAFE__
+ var->counter = value;
+#else
+ /*
+ * Unfortunately no __sync_store() or similar exist in the gcc atomic
+ * op interface. We therefore have to simulate it this way...
+ */
+ long act = 0, exp;
+ do {
+ exp = act;
+ act = __sync_val_compare_and_swap(&var->counter, exp, value);
+ } while (act != exp);
+#endif
+}
+
+#define ethr_native_atomic_init ethr_native_atomic_set
+
+static ETHR_INLINE long
+ethr_native_atomic_read(ethr_native_atomic_t *var)
+{
+#if ETHR_IMMED_ATOMIC_SET_GET_SAFE__
+ return var->counter;
+#else
+ /*
+ * Unfortunately no __sync_fetch() or similar exist in the gcc atomic
+ * op interface. We therefore have to simulate it this way...
+ */
+ return __sync_add_and_fetch(&var->counter, (long) 0);
+#endif
+}
+
+static ETHR_INLINE void
+ethr_native_atomic_add(ethr_native_atomic_t *var, long incr)
+{
+ (void) __sync_add_and_fetch(&var->counter, incr);
+}
+
+static ETHR_INLINE long
+ethr_native_atomic_add_return(ethr_native_atomic_t *var, long incr)
+{
+ return __sync_add_and_fetch(&var->counter, incr);
+}
+
+static ETHR_INLINE void
+ethr_native_atomic_inc(ethr_native_atomic_t *var)
+{
+ (void) __sync_add_and_fetch(&var->counter, (long) 1);
+}
+
+static ETHR_INLINE void
+ethr_native_atomic_dec(ethr_native_atomic_t *var)
+{
+ (void) __sync_sub_and_fetch(&var->counter, (long) 1);
+}
+
+static ETHR_INLINE long
+ethr_native_atomic_inc_return(ethr_native_atomic_t *var)
+{
+ return __sync_add_and_fetch(&var->counter, (long) 1);
+}
+
+static ETHR_INLINE long
+ethr_native_atomic_dec_return(ethr_native_atomic_t *var)
+{
+ return __sync_sub_and_fetch(&var->counter, (long) 1);
+}
+
+static ETHR_INLINE long
+ethr_native_atomic_and_retold(ethr_native_atomic_t *var, long mask)
+{
+ return __sync_fetch_and_and(&var->counter, mask);
+}
+
+static ETHR_INLINE long
+ethr_native_atomic_or_retold(ethr_native_atomic_t *var, long mask)
+{
+ return (long) __sync_fetch_and_or(&var->counter, mask);
+}
+
+static ETHR_INLINE long
+ethr_native_atomic_cmpxchg(ethr_native_atomic_t *var, long new, long old)
+{
+ return __sync_val_compare_and_swap(&var->counter, old, new);
+}
+
+static ETHR_INLINE long
+ethr_native_atomic_xchg(ethr_native_atomic_t *var, long new)
+{
+ long exp, act = 0;
+ do {
+ exp = act;
+ act = __sync_val_compare_and_swap(&var->counter, exp, new);
+ } while (act != exp);
+ return act;
+}
+
+/*
+ * Atomic ops with at least specified barriers.
+ */
+
+static ETHR_INLINE long
+ethr_native_atomic_read_acqb(ethr_native_atomic_t *var)
+{
+ return __sync_add_and_fetch(&var->counter, (long) 0);
+}
+
+#define ethr_native_atomic_inc_return_acqb ethr_native_atomic_inc_return
+#define ethr_native_atomic_set_relb ethr_native_atomic_xchg
+#define ethr_native_atomic_dec_relb ethr_native_atomic_dec_return
+#define ethr_native_atomic_dec_return_relb ethr_native_atomic_dec_return
+
+#define ethr_native_atomic_cmpxchg_acqb ethr_native_atomic_cmpxchg
+#define ethr_native_atomic_cmpxchg_relb ethr_native_atomic_cmpxchg
+
+#endif
+
+#endif
+
+#endif
diff --git a/erts/include/internal/gcc/ethread.h b/erts/include/internal/gcc/ethread.h
new file mode 100644
index 0000000000..bb378e31e0
--- /dev/null
+++ b/erts/include/internal/gcc/ethread.h
@@ -0,0 +1,30 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2010. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+/*
+ * Description: Native atomic ethread support when using gcc
+ * Author: Rickard Green
+ */
+
+#ifndef ETHREAD_GCC_H__
+#define ETHREAD_GCC_H__
+
+#include "ethr_atomic.h"
+
+#endif
diff --git a/erts/include/internal/i386/atomic.h b/erts/include/internal/i386/atomic.h
index 3291ad38e5..f28258059f 100644
--- a/erts/include/internal/i386/atomic.h
+++ b/erts/include/internal/i386/atomic.h
@@ -1,19 +1,19 @@
/*
* %CopyrightBegin%
- *
- * Copyright Ericsson AB 2005-2009. All Rights Reserved.
- *
+ *
+ * Copyright Ericsson AB 2005-2010. All Rights Reserved.
+ *
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
* compliance with the License. You should have received a copy of the
* Erlang Public License along with this software. If not, it can be
* retrieved online at http://www.erlang.org/.
- *
+ *
* Software distributed under the License is distributed on an "AS IS"
* basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
* the License for the specific language governing rights and limitations
* under the License.
- *
+ *
* %CopyrightEnd%
*/
@@ -32,7 +32,22 @@ typedef struct {
volatile long counter;
} ethr_native_atomic_t;
-#ifdef ETHR_TRY_INLINE_FUNCS
+#if defined(__x86_64__) || !defined(ETHR_PRE_PENTIUM4_COMPAT)
+#define ETHR_MEMORY_BARRIER __asm__ __volatile__("mfence" : : : "memory")
+#define ETHR_WRITE_MEMORY_BARRIER __asm__ __volatile__("sfence" : : : "memory")
+#define ETHR_READ_MEMORY_BARRIER __asm__ __volatile__("lfence" : : : "memory")
+#define ETHR_READ_DEPEND_MEMORY_BARRIER __asm__ __volatile__("" : : : "memory")
+#else
+#define ETHR_MEMORY_BARRIER \
+do { \
+ volatile long x___ = 0; \
+ __asm__ __volatile__("lock; incl %0" : "=m"(x___) : "m"(x___) : "memory"); \
+} while (0)
+#endif
+
+#define ETHR_ATOMIC_HAVE_INC_DEC_INSTRUCTIONS 1
+
+#if defined(ETHR_TRY_INLINE_FUNCS) || defined(ETHR_AUX_IMPL__)
#ifdef __x86_64__
#define LONG_SUFFIX "q"
@@ -148,6 +163,22 @@ ethr_native_atomic_xchg(ethr_native_atomic_t *var, long val)
return tmp;
}
+/*
+ * Atomic ops with at least specified barriers.
+ */
+
+#define ethr_native_atomic_read_acqb ethr_native_atomic_read
+#define ethr_native_atomic_inc_return_acqb ethr_native_atomic_inc_return
+#if defined(__x86_64__) || !defined(ETHR_PRE_PENTIUM4_COMPAT)
+#define ethr_native_atomic_set_relb ethr_native_atomic_set
+#else
+#define ethr_native_atomic_set_relb ethr_native_atomic_xchg
+#endif
+#define ethr_native_atomic_dec_relb ethr_native_atomic_dec
+#define ethr_native_atomic_dec_return_relb ethr_native_atomic_dec_return
+#define ethr_native_atomic_cmpxchg_acqb ethr_native_atomic_cmpxchg
+#define ethr_native_atomic_cmpxchg_relb ethr_native_atomic_cmpxchg
+
#undef LONG_SUFFIX
#endif /* ETHR_TRY_INLINE_FUNCS */
diff --git a/erts/include/internal/i386/ethread.h b/erts/include/internal/i386/ethread.h
index fad8b108fa..ed43e77279 100644
--- a/erts/include/internal/i386/ethread.h
+++ b/erts/include/internal/i386/ethread.h
@@ -29,6 +29,7 @@
#include "rwlock.h"
#define ETHR_HAVE_NATIVE_ATOMICS 1
-#define ETHR_HAVE_NATIVE_LOCKS 1
+#define ETHR_HAVE_NATIVE_SPINLOCKS 1
+#define ETHR_HAVE_NATIVE_RWSPINLOCKS 1
#endif /* ETHREAD_I386_ETHREAD_H */
diff --git a/erts/include/internal/i386/rwlock.h b/erts/include/internal/i386/rwlock.h
index c009be8ef1..be47f459ce 100644
--- a/erts/include/internal/i386/rwlock.h
+++ b/erts/include/internal/i386/rwlock.h
@@ -1,19 +1,19 @@
/*
* %CopyrightBegin%
- *
- * Copyright Ericsson AB 2005-2009. All Rights Reserved.
- *
+ *
+ * Copyright Ericsson AB 2005-2010. All Rights Reserved.
+ *
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
* compliance with the License. You should have received a copy of the
* Erlang Public License along with this software. If not, it can be
* retrieved online at http://www.erlang.org/.
- *
+ *
* Software distributed under the License is distributed on an "AS IS"
* basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
* the License for the specific language governing rights and limitations
* under the License.
- *
+ *
* %CopyrightEnd%
*/
@@ -31,7 +31,7 @@ typedef struct {
volatile int lock;
} ethr_native_rwlock_t;
-#ifdef ETHR_TRY_INLINE_FUNCS
+#if defined(ETHR_TRY_INLINE_FUNCS) || defined(ETHR_AUX_IMPL__)
#define ETHR_RWLOCK_OFFSET (1<<24)
diff --git a/erts/include/internal/i386/spinlock.h b/erts/include/internal/i386/spinlock.h
index 2b4832e26a..0325324895 100644
--- a/erts/include/internal/i386/spinlock.h
+++ b/erts/include/internal/i386/spinlock.h
@@ -1,19 +1,19 @@
/*
* %CopyrightBegin%
- *
- * Copyright Ericsson AB 2005-2009. All Rights Reserved.
- *
+ *
+ * Copyright Ericsson AB 2005-2010. All Rights Reserved.
+ *
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
* compliance with the License. You should have received a copy of the
* Erlang Public License along with this software. If not, it can be
* retrieved online at http://www.erlang.org/.
- *
+ *
* Software distributed under the License is distributed on an "AS IS"
* basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
* the License for the specific language governing rights and limitations
* under the License.
- *
+ *
* %CopyrightEnd%
*/
@@ -31,7 +31,7 @@ typedef struct {
volatile unsigned int lock;
} ethr_native_spinlock_t;
-#ifdef ETHR_TRY_INLINE_FUNCS
+#if defined(ETHR_TRY_INLINE_FUNCS) || defined(ETHR_AUX_IMPL__)
static ETHR_INLINE void
ethr_native_spinlock_init(ethr_native_spinlock_t *lock)
@@ -46,7 +46,7 @@ ethr_native_spin_unlock(ethr_native_spinlock_t *lock)
* On i386 this needs to be a locked operation
* to avoid Pentium Pro errata 66 and 92.
*/
-#if defined(__x86_64__)
+#if defined(__x86_64__) || !defined(ETHR_PRE_PENTIUM4_COMPAT)
__asm__ __volatile__("" : : : "memory");
*(unsigned char*)&lock->lock = 0;
#else
diff --git a/erts/include/internal/libatomic_ops/ethr_atomic.h b/erts/include/internal/libatomic_ops/ethr_atomic.h
new file mode 100644
index 0000000000..a6eb43a0bd
--- /dev/null
+++ b/erts/include/internal/libatomic_ops/ethr_atomic.h
@@ -0,0 +1,292 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2010. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+/*
+ * Description: Native atomics ethread support using libatomic_ops
+ * Author: Rickard Green
+ */
+
+#ifndef ETHR_LIBATOMIC_OPS_ATOMIC_H__
+#define ETHR_LIBATOMIC_OPS_ATOMIC_H__
+
+#if !defined(ETHR_HAVE_NATIVE_ATOMICS) && defined(ETHR_HAVE_LIBATOMIC_OPS)
+#define ETHR_HAVE_NATIVE_ATOMICS 1
+
+#if (defined(__i386__) && !defined(ETHR_PRE_PENTIUM4_COMPAT)) \
+ || defined(__x86_64__)
+#define AO_USE_PENTIUM4_INSTRS
+#endif
+
+#include "atomic_ops.h"
+
+/*
+ * libatomic_ops can be downloaded from:
+ * http://www.hpl.hp.com/research/linux/atomic_ops/
+ *
+ * These operations need to be defined by libatomic_ops;
+ * otherwise, we won't compile:
+ * - AO_nop_full()
+ * - AO_load()
+ * - AO_store()
+ * - AO_compare_and_swap()
+ *
+ * The `AO_t' type also have to be at least as large as
+ * `void *' and `long' types.
+ */
+
+#if ETHR_SIZEOF_AO_T < ETHR_SIZEOF_PTR
+#error The AO_t type is too small
+#endif
+
+typedef struct {
+ volatile AO_t counter;
+} ethr_native_atomic_t;
+
+#define ETHR_MEMORY_BARRIER AO_nop_full()
+#ifdef AO_HAVE_nop_write
+# define ETHR_WRITE_MEMORY_BARRIER AO_nop_write()
+#else
+# define ETHR_WRITE_MEMORY_BARRIER ETHR_MEMORY_BARRIER
+#endif
+#ifdef AO_HAVE_nop_read
+# define ETHR_READ_MEMORY_BARRIER AO_nop_read()
+#else
+# define ETHR_READ_MEMORY_BARRIER ETHR_MEMORY_BARRIER
+#endif
+#ifdef AO_NO_DD_ORDERING
+# define ETHR_READ_DEPEND_MEMORY_BARRIER ETHR_READ_MEMORY_BARRIER
+#else
+# define ETHR_READ_DEPEND_MEMORY_BARRIER __asm__ __volatile__("":::"memory")
+#endif
+
+#if defined(ETHR_TRY_INLINE_FUNCS) || defined(ETHR_AUX_IMPL__)
+
+static ETHR_INLINE void
+ethr_native_atomic_set(ethr_native_atomic_t *var, long value)
+{
+ AO_store(&var->counter, (AO_t) value);
+}
+
+static ETHR_INLINE void
+ethr_native_atomic_init(ethr_native_atomic_t *var, long value)
+{
+ ethr_native_atomic_set(var, value);
+}
+
+static ETHR_INLINE long
+ethr_native_atomic_read(ethr_native_atomic_t *var)
+{
+ return (long) AO_load(&var->counter);
+}
+
+static ETHR_INLINE long
+ethr_native_atomic_add_return(ethr_native_atomic_t *var, long incr)
+{
+#ifdef AO_HAVE_fetch_and_add
+ return ((long) AO_fetch_and_add(&var->counter, (AO_t) incr)) + incr;
+#else
+ while (1) {
+ AO_t exp = AO_load(&var->counter);
+ AO_t new = exp + (AO_t) incr;
+ if (AO_compare_and_swap(&var->counter, exp, new))
+ return (long) new;
+ }
+#endif
+}
+
+static ETHR_INLINE void
+ethr_native_atomic_add(ethr_native_atomic_t *var, long incr)
+{
+ (void) ethr_native_atomic_add_return(var, incr);
+}
+
+static ETHR_INLINE long
+ethr_native_atomic_inc_return(ethr_native_atomic_t *var)
+{
+#ifdef AO_HAVE_fetch_and_add1
+ return ((long) AO_fetch_and_add1(&var->counter)) + 1;
+#else
+ return ethr_native_atomic_add_return(var, 1);
+#endif
+}
+
+static ETHR_INLINE void
+ethr_native_atomic_inc(ethr_native_atomic_t *var)
+{
+ (void) ethr_native_atomic_inc_return(var);
+}
+
+static ETHR_INLINE long
+ethr_native_atomic_dec_return(ethr_native_atomic_t *var)
+{
+#ifdef AO_HAVE_fetch_and_sub1
+ return ((long) AO_fetch_and_sub1(&var->counter)) - 1;
+#else
+ return ethr_native_atomic_add_return(var, -1);
+#endif
+}
+
+static ETHR_INLINE void
+ethr_native_atomic_dec(ethr_native_atomic_t *var)
+{
+ (void) ethr_native_atomic_dec_return(var);
+}
+
+static ETHR_INLINE long
+ethr_native_atomic_and_retold(ethr_native_atomic_t *var, long mask)
+{
+ while (1) {
+ AO_t exp = AO_load(&var->counter);
+ AO_t new = exp & ((AO_t) mask);
+ if (AO_compare_and_swap(&var->counter, exp, new))
+ return (long) exp;
+ }
+}
+
+static ETHR_INLINE long
+ethr_native_atomic_or_retold(ethr_native_atomic_t *var, long mask)
+{
+ while (1) {
+ AO_t exp = AO_load(&var->counter);
+ AO_t new = exp | ((AO_t) mask);
+ if (AO_compare_and_swap(&var->counter, exp, new))
+ return (long) exp;
+ }
+}
+
+static ETHR_INLINE long
+ethr_native_atomic_cmpxchg(ethr_native_atomic_t *var, long new, long exp)
+{
+ long act;
+ do {
+ if (AO_compare_and_swap(&var->counter, (AO_t) exp, (AO_t) new))
+ return exp;
+ act = (long) AO_load(&var->counter);
+ } while (act == exp);
+ return act;
+}
+
+static ETHR_INLINE long
+ethr_native_atomic_xchg(ethr_native_atomic_t *var, long new)
+{
+ while (1) {
+ AO_t exp = AO_load(&var->counter);
+ if (AO_compare_and_swap(&var->counter, exp, (AO_t) new))
+ return (long) exp;
+ }
+}
+
+/*
+ * Atomic ops with at least specified barriers.
+ */
+
+static ETHR_INLINE long
+ethr_native_atomic_read_acqb(ethr_native_atomic_t *var)
+{
+#ifdef AO_HAVE_load_acquire
+ return (long) AO_load_acquire(&var->counter);
+#else
+ long res = ethr_native_atomic_read(var);
+ ETHR_MEMORY_BARRIER;
+ return res;
+#endif
+}
+
+static ETHR_INLINE long
+ethr_native_atomic_inc_return_acqb(ethr_native_atomic_t *var)
+{
+#ifdef AO_HAVE_fetch_and_add1_acquire
+ return ((long) AO_fetch_and_add1_acquire(&var->counter)) + 1;
+#else
+ long res = ethr_native_atomic_add_return(var, 1);
+ ETHR_MEMORY_BARRIER;
+ return res;
+#endif
+}
+
+static ETHR_INLINE void
+ethr_native_atomic_set_relb(ethr_native_atomic_t *var, long value)
+{
+#ifdef AO_HAVE_store_release
+ AO_store_release(&var->counter, (AO_t) value);
+#else
+ ETHR_MEMORY_BARRIER;
+ ethr_native_atomic_set(var, value);
+#endif
+}
+
+static ETHR_INLINE long
+ethr_native_atomic_dec_return_relb(ethr_native_atomic_t *var)
+{
+#ifdef AO_HAVE_fetch_and_sub1_release
+ return ((long) AO_fetch_and_sub1_release(&var->counter)) - 1;
+#else
+ ETHR_MEMORY_BARRIER;
+ return ethr_native_atomic_dec_return(var);
+#endif
+}
+
+static ETHR_INLINE void
+ethr_native_atomic_dec_relb(ethr_native_atomic_t *var)
+{
+ (void) ethr_native_atomic_dec_return_relb(var);
+}
+
+static ETHR_INLINE long
+ethr_native_atomic_cmpxchg_acqb(ethr_native_atomic_t *var, long new, long exp)
+{
+#ifdef AO_HAVE_compare_and_swap_acquire
+ long act;
+ do {
+ if (AO_compare_and_swap_acquire(&var->counter, (AO_t) exp, (AO_t) new))
+ return exp;
+ act = (long) AO_load(&var->counter);
+ } while (act == exp);
+ AO_nop_full();
+ return act;
+#else
+ long act = ethr_native_atomic_cmpxchg(var, new, exp);
+ ETHR_MEMORY_BARRIER;
+ return act;
+#endif
+}
+
+static ETHR_INLINE long
+ethr_native_atomic_cmpxchg_relb(ethr_native_atomic_t *var, long new, long exp)
+{
+#ifdef AO_HAVE_compare_and_swap_release
+ long act;
+ do {
+ if (AO_compare_and_swap_release(&var->counter, (AO_t) exp, (AO_t) new))
+ return exp;
+ act = (long) AO_load(&var->counter);
+ } while (act == exp);
+ return act;
+#else
+ ETHR_MEMORY_BARRIER;
+ return ethr_native_atomic_cmpxchg(var, new, exp);
+#endif
+}
+
+
+#endif
+
+#endif
+
+#endif
diff --git a/erts/include/internal/libatomic_ops/ethread.h b/erts/include/internal/libatomic_ops/ethread.h
new file mode 100644
index 0000000000..ee73ba73bc
--- /dev/null
+++ b/erts/include/internal/libatomic_ops/ethread.h
@@ -0,0 +1,30 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2010. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+/*
+ * Description: Native atomics ethread support using libatomic_ops
+ * Author: Rickard Green
+ */
+
+#ifndef ETHREAD_LIBATOMIC_OPS_H__
+#define ETHREAD_LIBATOMIC_OPS_H__
+
+#include "ethr_atomic.h"
+
+#endif
diff --git a/erts/include/internal/ppc32/atomic.h b/erts/include/internal/ppc32/atomic.h
index fa701c6a92..f21f7c9588 100644
--- a/erts/include/internal/ppc32/atomic.h
+++ b/erts/include/internal/ppc32/atomic.h
@@ -1,19 +1,19 @@
/*
* %CopyrightBegin%
- *
- * Copyright Ericsson AB 2005-2009. All Rights Reserved.
- *
+ *
+ * Copyright Ericsson AB 2005-2010. All Rights Reserved.
+ *
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
* compliance with the License. You should have received a copy of the
* Erlang Public License along with this software. If not, it can be
* retrieved online at http://www.erlang.org/.
- *
+ *
* Software distributed under the License is distributed on an "AS IS"
* basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
* the License for the specific language governing rights and limitations
* under the License.
- *
+ *
* %CopyrightEnd%
*/
@@ -32,8 +32,9 @@ typedef struct {
volatile int counter;
} ethr_native_atomic_t;
+#define ETHR_MEMORY_BARRIER __asm__ __volatile__("sync" : : : "memory")
-#ifdef ETHR_TRY_INLINE_FUNCS
+#if defined(ETHR_TRY_INLINE_FUNCS) || defined(ETHR_AUX_IMPL__)
static ETHR_INLINE void
ethr_native_atomic_init(ethr_native_atomic_t *var, int i)
@@ -204,6 +205,26 @@ ethr_native_atomic_cmpxchg(ethr_native_atomic_t *var, int new, int expected)
return old;
}
+/*
+ * Atomic ops with at least specified barriers.
+ */
+
+static ETHR_INLINE long
+ethr_native_atomic_read_acqb(ethr_native_atomic_t *var)
+{
+ long res = ethr_native_atomic_read(var);
+ ETHR_MEMORY_BARRIER;
+ return res;
+}
+
+#define ethr_native_atomic_set_relb ethr_native_atomic_xchg
+#define ethr_native_atomic_inc_return_acqb ethr_native_atomic_inc_return
+#define ethr_native_atomic_dec_relb ethr_native_atomic_dec_return
+#define ethr_native_atomic_dec_return_relb ethr_native_atomic_dec_return
+
+#define ethr_native_atomic_cmpxchg_acqb ethr_native_atomic_cmpxchg
+#define ethr_native_atomic_cmpxchg_relb ethr_native_atomic_cmpxchg
+
#endif /* ETHR_TRY_INLINE_FUNCS */
#endif /* ETHREAD_PPC_ATOMIC_H */
diff --git a/erts/include/internal/ppc32/ethread.h b/erts/include/internal/ppc32/ethread.h
index d2a72c3dc1..12efc1b653 100644
--- a/erts/include/internal/ppc32/ethread.h
+++ b/erts/include/internal/ppc32/ethread.h
@@ -29,6 +29,7 @@
#include "rwlock.h"
#define ETHR_HAVE_NATIVE_ATOMICS 1
-#define ETHR_HAVE_NATIVE_LOCKS 1
+#define ETHR_HAVE_NATIVE_SPINLOCKS 1
+#define ETHR_HAVE_NATIVE_RWSPINLOCKS 1
#endif /* ETHREAD_PPC32_ETHREAD_H */
diff --git a/erts/include/internal/ppc32/rwlock.h b/erts/include/internal/ppc32/rwlock.h
index 9bdab12826..19ec26ab68 100644
--- a/erts/include/internal/ppc32/rwlock.h
+++ b/erts/include/internal/ppc32/rwlock.h
@@ -1,19 +1,19 @@
/*
* %CopyrightBegin%
- *
- * Copyright Ericsson AB 2005-2009. All Rights Reserved.
- *
+ *
+ * Copyright Ericsson AB 2005-2010. All Rights Reserved.
+ *
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
* compliance with the License. You should have received a copy of the
* Erlang Public License along with this software. If not, it can be
* retrieved online at http://www.erlang.org/.
- *
+ *
* Software distributed under the License is distributed on an "AS IS"
* basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
* the License for the specific language governing rights and limitations
* under the License.
- *
+ *
* %CopyrightEnd%
*/
@@ -34,7 +34,7 @@ typedef struct {
volatile int lock;
} ethr_native_rwlock_t;
-#ifdef ETHR_TRY_INLINE_FUNCS
+#if defined(ETHR_TRY_INLINE_FUNCS) || defined(ETHR_AUX_IMPL__)
static ETHR_INLINE void
ethr_native_rwlock_init(ethr_native_rwlock_t *lock)
diff --git a/erts/include/internal/ppc32/spinlock.h b/erts/include/internal/ppc32/spinlock.h
index 034c20c143..c8460a3e8a 100644
--- a/erts/include/internal/ppc32/spinlock.h
+++ b/erts/include/internal/ppc32/spinlock.h
@@ -1,19 +1,19 @@
/*
* %CopyrightBegin%
- *
- * Copyright Ericsson AB 2005-2009. All Rights Reserved.
- *
+ *
+ * Copyright Ericsson AB 2005-2010. All Rights Reserved.
+ *
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
* compliance with the License. You should have received a copy of the
* Erlang Public License along with this software. If not, it can be
* retrieved online at http://www.erlang.org/.
- *
+ *
* Software distributed under the License is distributed on an "AS IS"
* basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
* the License for the specific language governing rights and limitations
* under the License.
- *
+ *
* %CopyrightEnd%
*/
@@ -34,7 +34,7 @@ typedef struct {
volatile unsigned int lock;
} ethr_native_spinlock_t;
-#ifdef ETHR_TRY_INLINE_FUNCS
+#if defined(ETHR_TRY_INLINE_FUNCS) || defined(ETHR_AUX_IMPL__)
static ETHR_INLINE void
ethr_native_spinlock_init(ethr_native_spinlock_t *lock)
diff --git a/erts/include/internal/pthread/ethr_event.h b/erts/include/internal/pthread/ethr_event.h
new file mode 100644
index 0000000000..104ec287e0
--- /dev/null
+++ b/erts/include/internal/pthread/ethr_event.h
@@ -0,0 +1,151 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2009-2010. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+/*
+ * Author: Rickard Green
+ */
+
+#if defined(ETHR_HAVE_LINUX_FUTEX) && defined(ETHR_HAVE_NATIVE_ATOMICS)
+/* --- Linux futex implementation of ethread events ------------------------- */
+#define ETHR_LINUX_FUTEX_IMPL__
+
+#include <sys/syscall.h>
+#include <unistd.h>
+#include <linux/futex.h>
+#include <sys/time.h>
+
+/*
+ * Note: Linux futexes operate on 32-bit integers, but
+ * ethr_native_atomic_t are 64-bits on 64-bit
+ * platforms. This has to be taken into account.
+ * Therefore, in each individual value used each
+ * byte look the same.
+ */
+
+#if ETHR_SIZEOF_PTR == 8
+
+#define ETHR_EVENT_OFF_WAITER__ 0xffffffffffffffffL
+#define ETHR_EVENT_OFF__ 0x7777777777777777L
+#define ETHR_EVENT_ON__ 0L
+
+#elif ETHR_SIZEOF_PTR == 4
+
+#define ETHR_EVENT_OFF_WAITER__ 0xffffffffL
+#define ETHR_EVENT_OFF__ 0x77777777L
+#define ETHR_EVENT_ON__ 0L
+
+#else
+
+#error ehrm...
+
+#endif
+
+#if defined(FUTEX_WAIT_PRIVATE) && defined(FUTEX_WAKE_PRIVATE)
+# define ETHR_FUTEX_WAIT__ FUTEX_WAIT_PRIVATE
+# define ETHR_FUTEX_WAKE__ FUTEX_WAKE_PRIVATE
+#else
+# define ETHR_FUTEX_WAIT__ FUTEX_WAIT
+# define ETHR_FUTEX_WAKE__ FUTEX_WAKE
+#endif
+
+typedef struct {
+ ethr_atomic_t futex;
+} ethr_event;
+
+#define ETHR_FUTEX__(FTX, OP, VAL) \
+ (-1 == syscall(__NR_futex, (void *) (FTX), (OP), (int) (VAL), NULL, NULL, 0)\
+ ? errno : 0)
+
+#if defined(ETHR_TRY_INLINE_FUNCS) || defined(ETHR_EVENT_IMPL__)
+
+static void ETHR_INLINE
+ETHR_INLINE_FUNC_NAME_(ethr_event_set)(ethr_event *e)
+{
+ long val;
+ ETHR_WRITE_MEMORY_BARRIER;
+ val = ethr_atomic_xchg(&e->futex, ETHR_EVENT_ON__);
+ if (val == ETHR_EVENT_OFF_WAITER__) {
+ int res = ETHR_FUTEX__(&e->futex, ETHR_FUTEX_WAKE__, 1);
+ if (res != 0)
+ ETHR_FATAL_ERROR__(res);
+ }
+}
+
+static void ETHR_INLINE
+ETHR_INLINE_FUNC_NAME_(ethr_event_reset)(ethr_event *e)
+{
+ ethr_atomic_set(&e->futex, ETHR_EVENT_OFF__);
+ ETHR_MEMORY_BARRIER;
+}
+
+#endif
+
+#elif defined(ETHR_PTHREADS)
+/* --- Posix mutex/cond implementation of events ---------------------------- */
+
+typedef struct {
+ ethr_atomic_t state;
+ pthread_mutex_t mtx;
+ pthread_cond_t cnd;
+} ethr_event;
+
+#define ETHR_EVENT_OFF_WAITER__ -1L
+#define ETHR_EVENT_OFF__ 1L
+#define ETHR_EVENT_ON__ 0L
+
+#if defined(ETHR_TRY_INLINE_FUNCS) || defined(ETHR_EVENT_IMPL__)
+
+static void ETHR_INLINE
+ETHR_INLINE_FUNC_NAME_(ethr_event_set)(ethr_event *e)
+{
+ long val;
+ ETHR_WRITE_MEMORY_BARRIER;
+ val = ethr_atomic_xchg(&e->state, ETHR_EVENT_ON__);
+ if (val == ETHR_EVENT_OFF_WAITER__) {
+ int res = pthread_mutex_lock(&e->mtx);
+ if (res != 0)
+ ETHR_FATAL_ERROR__(res);
+ res = pthread_cond_signal(&e->cnd);
+ if (res != 0)
+ ETHR_FATAL_ERROR__(res);
+ res = pthread_mutex_unlock(&e->mtx);
+ if (res != 0)
+ ETHR_FATAL_ERROR__(res);
+ }
+}
+
+static void ETHR_INLINE
+ETHR_INLINE_FUNC_NAME_(ethr_event_reset)(ethr_event *e)
+{
+ ethr_atomic_set(&e->state, ETHR_EVENT_OFF__);
+ ETHR_MEMORY_BARRIER;
+}
+
+#endif
+
+#endif
+
+int ethr_event_init(ethr_event *e);
+int ethr_event_destroy(ethr_event *e);
+int ethr_event_wait(ethr_event *e);
+int ethr_event_swait(ethr_event *e, int spincount);
+#if !defined(ETHR_TRY_INLINE_FUNCS) || defined(ETHR_EVENT_IMPL__)
+void ethr_event_set(ethr_event *e);
+void ethr_event_reset(ethr_event *e);
+#endif
diff --git a/erts/include/internal/sparc32/atomic.h b/erts/include/internal/sparc32/atomic.h
index d6fdc6b2a4..2a995d4465 100644
--- a/erts/include/internal/sparc32/atomic.h
+++ b/erts/include/internal/sparc32/atomic.h
@@ -1,19 +1,19 @@
/*
* %CopyrightBegin%
- *
- * Copyright Ericsson AB 2005-2009. All Rights Reserved.
- *
+ *
+ * Copyright Ericsson AB 2005-2010. All Rights Reserved.
+ *
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
* compliance with the License. You should have received a copy of the
* Erlang Public License along with this software. If not, it can be
* retrieved online at http://www.erlang.org/.
- *
+ *
* Software distributed under the License is distributed on an "AS IS"
* basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
* the License for the specific language governing rights and limitations
* under the License.
- *
+ *
* %CopyrightEnd%
*/
@@ -28,7 +28,11 @@ typedef struct {
volatile long counter;
} ethr_native_atomic_t;
-#ifdef ETHR_TRY_INLINE_FUNCS
+#define ETHR_MEMORY_BARRIER \
+ __asm__ __volatile__("membar #LoadLoad|#LoadStore|#StoreLoad|#StoreStore\n" \
+ : : : "memory")
+
+#if defined(ETHR_TRY_INLINE_FUNCS) || defined(ETHR_AUX_IMPL__)
#if defined(__arch64__)
#define CASX "casx"
@@ -168,6 +172,43 @@ ethr_native_atomic_cmpxchg(ethr_native_atomic_t *var, long new, long old)
return new;
}
+/*
+ * Atomic ops with at least specified barriers.
+ */
+
+static ETHR_INLINE long
+ethr_native_atomic_read_acqb(ethr_native_atomic_t *var)
+{
+ long res = ethr_native_atomic_read(var);
+ __asm__ __volatile__("membar #StoreLoad|#StoreStore");
+ return res;
+}
+
+static ETHR_INLINE void
+ethr_native_atomic_set_relb(ethr_native_atomic_t *var, long i)
+{
+ __asm__ __volatile__("membar #LoadStore|#StoreStore");
+ ethr_native_atomic_set(var, i);
+}
+
+static ETHR_INLINE void
+ethr_native_atomic_dec_relb(ethr_native_atomic_t *var)
+{
+ __asm__ __volatile__("membar #LoadStore|#StoreStore");
+ ethr_native_atomic_dec(var);
+}
+
+static ETHR_INLINE long
+ethr_native_atomic_dec_return_relb(ethr_native_atomic_t *var)
+{
+ __asm__ __volatile__("membar #LoadStore|#StoreStore");
+ return ethr_native_atomic_dec_return(var);
+}
+
+#define ethr_native_atomic_inc_return_acqb ethr_native_atomic_inc_return
+#define ethr_native_atomic_cmpxchg_acqb ethr_native_atomic_cmpxchg
+#define ethr_native_atomic_cmpxchg_relb ethr_native_atomic_cmpxchg
+
#endif /* ETHR_TRY_INLINE_FUNCS */
#endif /* ETHR_SPARC32_ATOMIC_H */
diff --git a/erts/include/internal/sparc32/ethread.h b/erts/include/internal/sparc32/ethread.h
index 1d55399640..dca113b4d6 100644
--- a/erts/include/internal/sparc32/ethread.h
+++ b/erts/include/internal/sparc32/ethread.h
@@ -29,6 +29,7 @@
#include "rwlock.h"
#define ETHR_HAVE_NATIVE_ATOMICS 1
-#define ETHR_HAVE_NATIVE_LOCKS 1
+#define ETHR_HAVE_NATIVE_SPINLOCKS 1
+#define ETHR_HAVE_NATIVE_RWSPINLOCKS 1
#endif /* ETHREAD_SPARC32_ETHREAD_H */
diff --git a/erts/include/internal/sparc32/rwlock.h b/erts/include/internal/sparc32/rwlock.h
index 12448e0b06..465ec96866 100644
--- a/erts/include/internal/sparc32/rwlock.h
+++ b/erts/include/internal/sparc32/rwlock.h
@@ -1,19 +1,19 @@
/*
* %CopyrightBegin%
- *
- * Copyright Ericsson AB 2005-2009. All Rights Reserved.
- *
+ *
+ * Copyright Ericsson AB 2005-2010. All Rights Reserved.
+ *
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
* compliance with the License. You should have received a copy of the
* Erlang Public License along with this software. If not, it can be
* retrieved online at http://www.erlang.org/.
- *
+ *
* Software distributed under the License is distributed on an "AS IS"
* basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
* the License for the specific language governing rights and limitations
* under the License.
- *
+ *
* %CopyrightEnd%
*/
@@ -29,7 +29,7 @@ typedef struct {
volatile int lock;
} ethr_native_rwlock_t;
-#ifdef ETHR_TRY_INLINE_FUNCS
+#if defined(ETHR_TRY_INLINE_FUNCS) || defined(ETHR_AUX_IMPL__)
static ETHR_INLINE void
ethr_native_rwlock_init(ethr_native_rwlock_t *lock)
diff --git a/erts/include/internal/sparc32/spinlock.h b/erts/include/internal/sparc32/spinlock.h
index b4fe48b714..493d514210 100644
--- a/erts/include/internal/sparc32/spinlock.h
+++ b/erts/include/internal/sparc32/spinlock.h
@@ -1,19 +1,19 @@
/*
* %CopyrightBegin%
- *
- * Copyright Ericsson AB 2005-2009. All Rights Reserved.
- *
+ *
+ * Copyright Ericsson AB 2005-2010. All Rights Reserved.
+ *
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
* compliance with the License. You should have received a copy of the
* Erlang Public License along with this software. If not, it can be
* retrieved online at http://www.erlang.org/.
- *
+ *
* Software distributed under the License is distributed on an "AS IS"
* basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
* the License for the specific language governing rights and limitations
* under the License.
- *
+ *
* %CopyrightEnd%
*/
@@ -29,7 +29,7 @@ typedef struct {
volatile unsigned char lock;
} ethr_native_spinlock_t;
-#ifdef ETHR_TRY_INLINE_FUNCS
+#if defined(ETHR_TRY_INLINE_FUNCS) || defined(ETHR_AUX_IMPL__)
static ETHR_INLINE void
ethr_native_spinlock_init(ethr_native_spinlock_t *lock)
diff --git a/erts/include/internal/tile/atomic.h b/erts/include/internal/tile/atomic.h
index 59a9250e7c..69569d82d1 100644
--- a/erts/include/internal/tile/atomic.h
+++ b/erts/include/internal/tile/atomic.h
@@ -32,7 +32,9 @@ typedef struct {
volatile long counter;
} ethr_native_atomic_t;
-#ifdef ETHR_TRY_INLINE_FUNCS
+#define ETHR_MEMORY_BARRIER __insn_mf()
+
+#if defined(ETHR_TRY_INLINE_FUNCS) || defined(ETHR_AUX_IMPL__)
static ETHR_INLINE void
ethr_native_atomic_init(ethr_native_atomic_t *var, long i)
@@ -43,7 +45,6 @@ ethr_native_atomic_init(ethr_native_atomic_t *var, long i)
static ETHR_INLINE void
ethr_native_atomic_set(ethr_native_atomic_t *var, long i)
{
- __insn_mf();
atomic_exchange_acq(&var->counter, i);
}
@@ -56,28 +57,24 @@ ethr_native_atomic_read(ethr_native_atomic_t *var)
static ETHR_INLINE void
ethr_native_atomic_add(ethr_native_atomic_t *var, long incr)
{
- __insn_mf();
atomic_add(&var->counter, incr);
}
static ETHR_INLINE void
ethr_native_atomic_inc(ethr_native_atomic_t *var)
{
- __insn_mf();
atomic_increment(&var->counter);
}
static ETHR_INLINE void
ethr_native_atomic_dec(ethr_native_atomic_t *var)
{
- __insn_mf();
atomic_decrement(&var->counter);
}
static ETHR_INLINE long
ethr_native_atomic_add_return(ethr_native_atomic_t *var, long incr)
{
- __insn_mf();
return atomic_exchange_and_add(&var->counter, incr) + incr;
}
@@ -96,33 +93,81 @@ ethr_native_atomic_dec_return(ethr_native_atomic_t *var)
static ETHR_INLINE long
ethr_native_atomic_and_retold(ethr_native_atomic_t *var, long mask)
{
- /* Implement a barrier suitable for a mutex unlock. */
- __insn_mf();
return atomic_and_val(&var->counter, mask);
}
static ETHR_INLINE long
ethr_native_atomic_or_retold(ethr_native_atomic_t *var, long mask)
{
- __insn_mf();
return atomic_or_val(&var->counter, mask);
}
static ETHR_INLINE long
ethr_native_atomic_xchg(ethr_native_atomic_t *var, long val)
{
- __insn_mf();
return atomic_exchange_acq(&var->counter, val);
}
static ETHR_INLINE long
ethr_native_atomic_cmpxchg(ethr_native_atomic_t *var, long new, long expected)
{
- /* Implement a barrier suitable for a mutex unlock. */
- __insn_mf();
return atomic_compare_and_exchange_val_acq(&var->counter, new, expected);
}
+/*
+ * Atomic ops with at least specified barriers.
+ */
+
+static ETHR_INLINE long
+ethr_native_atomic_read_acqb(ethr_native_atomic_t *var)
+{
+ long res = ethr_native_atomic_read(var);
+ ETHR_MEMORY_BARRIER;
+ return res;
+}
+
+static ETHR_INLINE long
+ethr_native_atomic_inc_return_acqb(ethr_native_atomic_t *var)
+{
+ long res = ethr_native_atomic_inc_return(var);
+ ETHR_MEMORY_BARRIER;
+ return res;
+}
+
+static ETHR_INLINE void
+ethr_native_atomic_set_relb(ethr_native_atomic_t *var, long val)
+{
+ ETHR_MEMORY_BARRIER;
+ ethr_native_atomic_set(var, val);
+}
+
+static ETHR_INLINE void
+ethr_native_atomic_dec_relb(ethr_native_atomic_t *var)
+{
+ ETHR_MEMORY_BARRIER;
+ ethr_native_atomic_dec(var);
+}
+
+static ETHR_INLINE long
+ethr_native_atomic_dec_return_relb(ethr_native_atomic_t *var)
+{
+ ETHR_MEMORY_BARRIER;
+ return ethr_native_atomic_dec_return(var);
+}
+
+static ETHR_INLINE long
+ethr_native_atomic_cmpxchg_acqb(ethr_native_atomic_t *var, long new, long exp)
+{
+ return ethr_native_atomic_cmpxchg(var, new, exp);
+}
+
+static ETHR_INLINE long
+ethr_native_atomic_cmpxchg_relb(ethr_native_atomic_t *var, long new, long exp)
+{
+ ETHR_MEMORY_BARRIER;
+ return ethr_native_atomic_cmpxchg(var, new, exp);
+}
+
#endif /* ETHR_TRY_INLINE_FUNCS */
#endif /* ETHREAD_TILE_ATOMIC_H */
diff --git a/erts/include/internal/win/ethr_atomic.h b/erts/include/internal/win/ethr_atomic.h
new file mode 100644
index 0000000000..500459dd6c
--- /dev/null
+++ b/erts/include/internal/win/ethr_atomic.h
@@ -0,0 +1,244 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2010. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+/*
+ * Description: Native atomics ethread support when using VC++
+ * Author: Rickard Green
+ */
+
+#ifndef ETHR_WIN_ATOMIC_H__
+#define ETHR_WIN_ATOMIC_H__
+
+#ifdef _MSC_VER
+# if _MSC_VER < 1300
+# define ETHR_IMMED_ATOMIC_SET_GET_SAFE__ 0 /* Dont trust really old compilers */
+# else
+# if defined(_M_IX86)
+# define ETHR_IMMED_ATOMIC_SET_GET_SAFE__ 1
+# else /* I.e. IA64 */
+# if _MSC_VER >= 1400
+# define ETHR_IMMED_ATOMIC_SET_GET_SAFE__ 1
+# else
+# define ETHR_IMMED_ATOMIC_SET_GET_SAFE__ 0
+# endif
+# endif
+# endif
+# if _MSC_VER >= 1400
+# include <intrin.h>
+# undef ETHR_COMPILER_BARRIER
+# define ETHR_COMPILER_BARRIER _ReadWriteBarrier()
+# endif
+#pragma intrinsic(_ReadWriteBarrier)
+#pragma intrinsic(_InterlockedAnd)
+#pragma intrinsic(_InterlockedOr)
+#else
+# define ETHR_IMMED_ATOMIC_SET_GET_SAFE__ 0
+#endif
+
+/*
+ * No configure test checking for _Interlocked*_{acq,rel} and
+ * Interlocked*{Acquire,Release} have been written yet...
+ *
+ * Note, that these are pure optimizations for the itanium
+ * processor.
+ */
+
+#ifdef ETHR_HAVE_INTERLOCKEDCOMPAREEXCHANGE_ACQ
+#pragma intrinsic(_InterlockedCompareExchange_acq)
+#endif
+#ifdef ETHR_HAVE_INTERLOCKEDCOMPAREEXCHANGE_REL
+#pragma intrinsic(_InterlockedCompareExchange_rel)
+#endif
+
+
+typedef struct {
+ volatile LONG value;
+} ethr_native_atomic_t;
+
+#define ETHR_MEMORY_BARRIER \
+do { \
+ volatile LONG x___ = 0; \
+ _InterlockedCompareExchange(&x___, (LONG) 1, (LONG) 0); \
+} while (0)
+
+#if defined(ETHR_TRY_INLINE_FUNCS) || defined(ETHR_AUX_IMPL__)
+
+static ETHR_INLINE void
+ethr_native_atomic_init(ethr_native_atomic_t *var, long i)
+{
+ var->value = (LONG) i;
+}
+
+static ETHR_INLINE void
+ethr_native_atomic_set(ethr_native_atomic_t *var, long i)
+{
+#if ETHR_IMMED_ATOMIC_SET_GET_SAFE__
+ var->value = (LONG) i;
+#else
+ (void) InterlockedExchange(&var->value, (LONG) i);
+#endif
+}
+
+static ETHR_INLINE long
+ethr_native_atomic_read(ethr_native_atomic_t *var)
+{
+#if ETHR_IMMED_ATOMIC_SET_GET_SAFE__
+ return var->value;
+#else
+ return InterlockedExchangeAdd(&var->value, (LONG) 0);
+#endif
+}
+
+static ETHR_INLINE void
+ethr_native_atomic_add(ethr_native_atomic_t *var, long incr)
+{
+ (void) InterlockedExchangeAdd(&var->value, (LONG) incr);
+}
+
+static ETHR_INLINE long
+ethr_native_atomic_add_return(ethr_native_atomic_t *var, long i)
+{
+ LONG tmp = InterlockedExchangeAdd(&var->value, (LONG) i);
+ return tmp + i;
+}
+
+static ETHR_INLINE void
+ethr_native_atomic_inc(ethr_native_atomic_t *var)
+{
+ (void) InterlockedIncrement(&var->value);
+}
+
+static ETHR_INLINE void
+ethr_native_atomic_dec(ethr_native_atomic_t *var)
+{
+ (void) InterlockedDecrement(&var->value);
+}
+
+static ETHR_INLINE long
+ethr_native_atomic_inc_return(ethr_native_atomic_t *var)
+{
+ return (long) InterlockedIncrement(&var->value);
+}
+
+static ETHR_INLINE long
+ethr_native_atomic_dec_return(ethr_native_atomic_t *var)
+{
+ return (long) InterlockedDecrement(&var->value);
+}
+
+static ETHR_INLINE long
+ethr_native_atomic_and_retold(ethr_native_atomic_t *var, long mask)
+{
+ return (long) _InterlockedAnd(&var->value, mask);
+}
+
+static ETHR_INLINE long
+ethr_native_atomic_or_retold(ethr_native_atomic_t *var, long mask)
+{
+ return (long) _InterlockedOr(&var->value, mask);
+}
+
+
+static ETHR_INLINE long
+ethr_native_atomic_cmpxchg(ethr_native_atomic_t *var, long new, long old)
+{
+ return (long) _InterlockedCompareExchange(&var->value, (LONG) new, (LONG) old);
+}
+
+
+static ETHR_INLINE long
+ethr_native_atomic_xchg(ethr_native_atomic_t *var, long new)
+{
+ return (long) InterlockedExchange(&var->value, (LONG) new);
+}
+
+/*
+ * Atomic ops with at least specified barriers.
+ */
+
+static ETHR_INLINE long
+ethr_native_atomic_read_acqb(ethr_native_atomic_t *var)
+{
+#ifdef ETHR_HAVE_INTERLOCKEDEXCHANGEADDACQUIRE
+ return (long) InterlockedExchangeAddAcquire(&var->value, (LONG) 0);
+#else
+ return (long) InterlockedExchangeAdd(&var->value, (LONG) 0);
+#endif
+}
+
+static ETHR_INLINE long
+ethr_native_atomic_inc_return_acqb(ethr_native_atomic_t *var)
+{
+#ifdef ETHR_HAVE_INTERLOCKEDINCREMENTACQUIRE
+ return (long) InterlockedIncrementAcquire(&var->value);
+#else
+ return (long) InterlockedIncrement(&var->value);
+#endif
+}
+
+static ETHR_INLINE void
+ethr_native_atomic_set_relb(ethr_native_atomic_t *var, long i)
+{
+ (void) InterlockedExchange(&var->value, (LONG) i);
+}
+
+static ETHR_INLINE void
+ethr_native_atomic_dec_relb(ethr_native_atomic_t *var)
+{
+#ifdef ETHR_HAVE_INTERLOCKEDDECREMENTRELEASE
+ (void) InterlockedDecrementRelease(&var->value);
+#else
+ (void) InterlockedDecrement(&var->value);
+#endif
+}
+
+static ETHR_INLINE long
+ethr_native_atomic_dec_return_relb(ethr_native_atomic_t *var)
+{
+#ifdef ETHR_HAVE_INTERLOCKEDDECREMENTRELEASE
+ return (long) InterlockedDecrementRelease(&var->value);
+#else
+ return (long) InterlockedDecrement(&var->value);
+#endif
+}
+
+static ETHR_INLINE long
+ethr_native_atomic_cmpxchg_acqb(ethr_native_atomic_t *var, long new, long old)
+{
+#ifdef ETHR_HAVE_INTERLOCKEDCOMPAREEXCHANGE_ACQ
+ return (long) _InterlockedCompareExchange_acq(&var->value, (LONG) new, (LONG) old);
+#else
+ return (long) _InterlockedCompareExchange(&var->value, (LONG) new, (LONG) old);
+#endif
+}
+
+static ETHR_INLINE long
+ethr_native_atomic_cmpxchg_relb(ethr_native_atomic_t *var, long new, long old)
+{
+
+#ifdef ETHR_HAVE_INTERLOCKEDCOMPAREEXCHANGE_REL
+ return (long) _InterlockedCompareExchange_rel(&var->value, (LONG) new, (LONG) old);
+#else
+ return (long) _InterlockedCompareExchange(&var->value, (LONG) new, (LONG) old);
+#endif
+}
+
+#endif
+
+#endif
diff --git a/erts/include/internal/win/ethr_event.h b/erts/include/internal/win/ethr_event.h
new file mode 100644
index 0000000000..af57c20f91
--- /dev/null
+++ b/erts/include/internal/win/ethr_event.h
@@ -0,0 +1,62 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2009-2010. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+/*
+ * Author: Rickard Green
+ */
+
+#define ETHR_EVENT_OFF_WAITER__ ((LONG) -1)
+#define ETHR_EVENT_OFF__ ((LONG) 1)
+#define ETHR_EVENT_ON__ ((LONG) 0)
+
+typedef struct {
+ volatile LONG state;
+ HANDLE handle;
+} ethr_event;
+
+#if defined(ETHR_TRY_INLINE_FUNCS) || defined(ETHR_EVENT_IMPL__)
+
+static ETHR_INLINE void
+ETHR_INLINE_FUNC_NAME_(ethr_event_set)(ethr_event *e)
+{
+ /* InterlockedExchange() imply a full memory barrier which is important */
+ LONG state = InterlockedExchange(&e->state, ETHR_EVENT_ON__);
+ if (state == ETHR_EVENT_OFF_WAITER__) {
+ if (!SetEvent(e->handle))
+ ETHR_FATAL_ERROR__(ethr_win_get_errno__());
+ }
+}
+
+static ETHR_INLINE void
+ETHR_INLINE_FUNC_NAME_(ethr_event_reset)(ethr_event *e)
+{
+ /* InterlockedExchange() imply a full memory barrier which is important */
+ InterlockedExchange(&e->state, ETHR_EVENT_OFF__);
+}
+
+#endif
+
+int ethr_event_init(ethr_event *e);
+int ethr_event_destroy(ethr_event *e);
+int ethr_event_wait(ethr_event *e);
+int ethr_event_swait(ethr_event *e, int spincount);
+#if !defined(ETHR_TRY_INLINE_FUNCS) || defined(ETHR_EVENT_IMPL__)
+void ethr_event_set(ethr_event *e);
+void ethr_event_reset(ethr_event *e);
+#endif
diff --git a/erts/include/internal/win/ethread.h b/erts/include/internal/win/ethread.h
new file mode 100644
index 0000000000..b52710f6a3
--- /dev/null
+++ b/erts/include/internal/win/ethread.h
@@ -0,0 +1,31 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2010. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+/*
+ * Description: Native atomic and spinlock ethread support when using VC++
+ * Author: Rickard Green
+ */
+
+#ifndef ETHREAD_WIN_H__
+#define ETHREAD_WIN_H__
+
+#include "ethr_atomic.h"
+#define ETHR_HAVE_NATIVE_ATOMICS 1
+
+#endif
diff --git a/erts/lib_src/Makefile.in b/erts/lib_src/Makefile.in
index 49f5b1f048..0d3181cace 100644
--- a/erts/lib_src/Makefile.in
+++ b/erts/lib_src/Makefile.in
@@ -280,8 +280,13 @@ endif
#
# ethread library
#
+ETHR_THR_LIB_BASE_DIR=@ETHR_THR_LIB_BASE_DIR@
ifneq ($(strip $(ETHR_LIB_NAME)),)
-ETHREAD_LIB_SRC=common/ethread.c
+ETHREAD_LIB_SRC=common/ethr_aux.c \
+ common/ethr_mutex.c \
+ common/ethr_cbf.c \
+ $(ETHR_THR_LIB_BASE_DIR)/ethread.c \
+ $(ETHR_THR_LIB_BASE_DIR)/ethr_event.c
ETHREAD_LIB_NAME=ethread$(TYPE_SUFFIX)
ifeq ($(USING_VC),yes)
@@ -379,13 +384,13 @@ $(ERTS_LIB): $(ERTS_LIB_OBJS)
$(r_OBJ_DIR)/%.o: common/%.c
$(CC) $(THR_DEFS) $(CFLAGS) $(INCLUDES) -c $< -o $@
-$(r_OBJ_DIR)/%.o: $(ERLANG_OSTYPE)/%.c
+$(r_OBJ_DIR)/%.o: $(ETHR_THR_LIB_BASE_DIR)/%.c
$(CC) $(THR_DEFS) $(CFLAGS) $(INCLUDES) -c $< -o $@
$(OBJ_DIR)/%.o: common/%.c
$(CC) $(CFLAGS) $(INCLUDES) -c $< -o $@
-$(OBJ_DIR)/%.o: $(ERLANG_OSTYPE)/%.c
+$(OBJ_DIR)/%.o: $(ETHR_THR_LIB_BASE_DIR)/%.c
$(CC) $(CFLAGS) $(INCLUDES) -c $< -o $@
# Win32 specific
@@ -393,25 +398,25 @@ $(OBJ_DIR)/%.o: $(ERLANG_OSTYPE)/%.c
$(MD_OBJ_DIR)/%.o: common/%.c
$(CC) $(THR_DEFS) $(CFLAGS) -MD $(INCLUDES) -c $< -o $@
-$(MD_OBJ_DIR)/%.o: $(ERLANG_OSTYPE)/%.c
+$(MD_OBJ_DIR)/%.o: $(ETHR_THR_LIB_BASE_DIR)/%.c
$(CC) $(THR_DEFS) $(CFLAGS) -MD $(INCLUDES) -c $< -o $@
$(MDd_OBJ_DIR)/%.o: common/%.c
$(CC) $(THR_DEFS) $(CFLAGS) -MDd $(INCLUDES) -c $< -o $@
-$(MDd_OBJ_DIR)/%.o: $(ERLANG_OSTYPE)/%.c
+$(MDd_OBJ_DIR)/%.o: $(ETHR_THR_LIB_BASE_DIR)/%.c
$(CC) $(THR_DEFS) $(CFLAGS) -MDd $(INCLUDES) -c $< -o $@
$(MT_OBJ_DIR)/%.o: common/%.c
$(CC) $(THR_DEFS) $(CFLAGS) -MT $(INCLUDES) -c $< -o $@
-$(MT_OBJ_DIR)/%.o: $(ERLANG_OSTYPE)/%.c
+$(MT_OBJ_DIR)/%.o: $(ETHR_THR_LIB_BASE_DIR)/%.c
$(CC) $(THR_DEFS) $(CFLAGS) -MT $(INCLUDES) -c $< -o $@
$(MTd_OBJ_DIR)/%.o: common/%.c
$(CC) $(THR_DEFS) $(CFLAGS) -MTd $(INCLUDES) -c $< -o $@
-$(MTd_OBJ_DIR)/%.o: $(ERLANG_OSTYPE)/%.c
+$(MTd_OBJ_DIR)/%.o: $(ETHR_THR_LIB_BASE_DIR)/%.c
$(CC) $(THR_DEFS) $(CFLAGS) -MTd $(INCLUDES) -c $< -o $@
#
@@ -438,6 +443,8 @@ RELEASE_LIBS=$(ERTS_LIBS)
INTERNAL_RELEASE_INCLUDES= \
$(ERTS_INCL_INT)/README \
$(ERTS_INCL_INT)/ethread.h \
+ $(ERTS_INCL_INT)/ethr_mutex.h \
+ $(ERTS_INCL_INT)/ethr_optimized_fallbacks.h \
$(ERTS_INCL_INT)/$(TARGET)/ethread.mk \
$(ERTS_INCL_INT)/$(TARGET)/erts_internal.mk \
$(ERTS_INCL_INT)/$(TARGET)/ethread_header_config.h \
@@ -447,7 +454,8 @@ INTERNAL_RELEASE_INCLUDES= \
$(ERTS_INCL_INT)/erl_misc_utils.h \
$(ERTS_INCL_INT)/erl_errno.h
-INTERNAL_X_RELEASE_INCLUDE_DIRS= i386 x86_64 ppc32 sparc32 sparc64 tile
+INTERNAL_X_RELEASE_INCLUDE_DIRS= \
+ i386 x86_64 ppc32 sparc32 sparc64 tile gcc pthread win libatomic_ops
INTERNAL_RELEASE_LIBS= \
../lib/internal/README \
diff --git a/erts/lib_src/common/erl_misc_utils.c b/erts/lib_src/common/erl_misc_utils.c
index 9c25d33a3c..116c9886d8 100644
--- a/erts/lib_src/common/erl_misc_utils.c
+++ b/erts/lib_src/common/erl_misc_utils.c
@@ -1,19 +1,19 @@
/*
* %CopyrightBegin%
- *
- * Copyright Ericsson AB 2006-2009. All Rights Reserved.
- *
+ *
+ * Copyright Ericsson AB 2006-2010. All Rights Reserved.
+ *
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
* compliance with the License. You should have received a copy of the
* Erlang Public License along with this software. If not, it can be
* retrieved online at http://www.erlang.org/.
- *
+ *
* Software distributed under the License is distributed on an "AS IS"
* basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
* the License for the specific language governing rights and limitations
* under the License.
- *
+ *
* %CopyrightEnd%
*/
@@ -21,10 +21,13 @@
#include "config.h"
#endif
+#if defined(__WIN32__)
+# include <windows.h>
+#endif
+
#include "erl_misc_utils.h"
#if defined(__WIN32__)
-# include <windows.h>
#elif defined(VXWORKS)
# include <selectLib.h>
#else /* UNIX */
@@ -59,8 +62,25 @@
# endif
#endif
-#ifdef HAVE_SCHED_xETAFFINITY
+#if defined(HAVE_SCHED_xETAFFINITY)
# include <sched.h>
+# define ERTS_HAVE_MISC_UTIL_AFFINITY_MASK__
+#define ERTS_MU_GET_PROC_AFFINITY__(CPUINFOP, CPUSET) \
+ (sched_getaffinity((CPUINFOP)->pid, \
+ sizeof(cpu_set_t), \
+ (CPUSET)) != 0 ? -errno : 0)
+#define ERTS_MU_SET_THR_AFFINITY__(SETP) \
+ (sched_setaffinity(0, sizeof(cpu_set_t), (SETP)) != 0 ? -errno : 0)
+#elif defined(__WIN32__)
+# define ERTS_HAVE_MISC_UTIL_AFFINITY_MASK__
+# define cpu_set_t DWORD
+# define CPU_SETSIZE (sizeof(DWORD)*8)
+# define CPU_ZERO(SETP) (*(SETP) = (DWORD) 0)
+# define CPU_SET(CPU, SETP) (*(SETP) |= (((DWORD) 1) << (CPU)))
+# define CPU_CLR(CPU, SETP) (*(SETP) &= ~(((DWORD) 1) << (CPU)))
+# define CPU_ISSET(CPU, SETP) ((*(SETP) & (((DWORD) 1) << (CPU))) != (DWORD) 0)
+#define ERTS_MU_GET_PROC_AFFINITY__ get_proc_affinity
+#define ERTS_MU_SET_THR_AFFINITY__ set_thr_affinity
#endif
#ifdef HAVE_PSET_INFO
# include <sys/pset.h>
@@ -82,6 +102,26 @@
static int read_topology(erts_cpu_info_t *cpuinfo);
+#if defined(ERTS_HAVE_MISC_UTIL_AFFINITY_MASK__)
+static int
+cpu_sets_are_eq(cpu_set_t *x, cpu_set_t *y)
+{
+ int i;
+ for (i = 0; i < CPU_SETSIZE; i++) {
+ if (CPU_ISSET(i, x)) {
+ if (!CPU_ISSET(i, y))
+ return 0;
+ }
+ else {
+ if (CPU_ISSET(i, y))
+ return 0;
+ }
+ }
+ return 1;
+}
+
+#endif
+
int
erts_milli_sleep(long ms)
{
@@ -105,30 +145,66 @@ struct erts_cpu_info_t_ {
int available;
int topology_size;
erts_cpu_topology_t *topology;
-#if defined(HAVE_SCHED_xETAFFINITY)
+#if defined(ERTS_HAVE_MISC_UTIL_AFFINITY_MASK__)
char *affinity_str;
char affinity_str_buf[CPU_SETSIZE/4+2];
cpu_set_t cpuset;
+#if defined(HAVE_SCHED_xETAFFINITY)
pid_t pid;
+#endif
#elif defined(HAVE_PSET_INFO)
processorid_t *cpuids;
#endif
};
+#if defined(__WIN32__)
+
+static __forceinline int
+get_proc_affinity(erts_cpu_info_t *cpuinfo, cpu_set_t *cpuset)
+{
+ DWORD pamask, samask;
+ if (GetProcessAffinityMask(GetCurrentProcess(), &pamask, &samask)) {
+ *cpuset = (cpu_set_t) pamask;
+ return 0;
+ }
+ else {
+ *cpuset = (cpu_set_t) 0;
+ return -erts_get_last_win_errno();
+ }
+}
+
+static __forceinline int
+set_thr_affinity(cpu_set_t *set)
+{
+ if (*set == (cpu_set_t) 0)
+ return -ENOTSUP;
+ if (SetThreadAffinityMask(GetCurrentThread(), *set) == 0)
+ return -erts_get_last_win_errno();
+ else
+ return 0;
+}
+
+#endif
+
erts_cpu_info_t *
erts_cpu_info_create(void)
{
erts_cpu_info_t *cpuinfo = malloc(sizeof(erts_cpu_info_t));
if (!cpuinfo)
return NULL;
-#if defined(HAVE_SCHED_xETAFFINITY)
+#if defined(ERTS_HAVE_MISC_UTIL_AFFINITY_MASK__)
cpuinfo->affinity_str = NULL;
+#if defined(HAVE_SCHED_xETAFFINITY)
cpuinfo->pid = getpid();
+#endif
#elif defined(HAVE_PSET_INFO)
cpuinfo->cpuids = NULL;
#endif
cpuinfo->topology_size = 0;
cpuinfo->topology = NULL;
+ cpuinfo->configured = -1;
+ cpuinfo->online = -1;
+ cpuinfo->available = -1;
erts_cpu_info_update(cpuinfo);
return cpuinfo;
}
@@ -153,31 +229,40 @@ erts_cpu_info_destroy(erts_cpu_info_t *cpuinfo)
}
}
-void
+int
erts_cpu_info_update(erts_cpu_info_t *cpuinfo)
{
- cpuinfo->configured = 0;
- cpuinfo->online = 0;
- cpuinfo->available = 0;
+ int changed = 0;
+ int configured = 0;
+ int online = 0;
+ int available = 0;
+ erts_cpu_topology_t *old_topology;
+ int old_topology_size;
+#if defined(ERTS_HAVE_MISC_UTIL_AFFINITY_MASK__)
+ cpu_set_t cpuset;
+#endif
#ifdef __WIN32__
{
+ int i;
SYSTEM_INFO sys_info;
GetSystemInfo(&sys_info);
- cpuinfo->configured = (int) sys_info.dwNumberOfProcessors;
-
+ configured = (int) sys_info.dwNumberOfProcessors;
+ for (i = 0; i < sizeof(DWORD)*8; i++)
+ if (sys_info.dwActiveProcessorMask & (((DWORD) 1) << i))
+ online++;
}
#elif !defined(NO_SYSCONF) && (defined(_SC_NPROCESSORS_CONF) \
|| defined(_SC_NPROCESSORS_ONLN))
#ifdef _SC_NPROCESSORS_CONF
- cpuinfo->configured = (int) sysconf(_SC_NPROCESSORS_CONF);
- if (cpuinfo->configured < 0)
- cpuinfo->configured = 0;
+ configured = (int) sysconf(_SC_NPROCESSORS_CONF);
+ if (configured < 0)
+ configured = 0;
#endif
#ifdef _SC_NPROCESSORS_ONLN
- cpuinfo->online = (int) sysconf(_SC_NPROCESSORS_ONLN);
- if (cpuinfo->online < 0)
- cpuinfo->online = 0;
+ online = (int) sysconf(_SC_NPROCESSORS_ONLN);
+ if (online < 0)
+ online = 0;
#endif
#elif defined(HAVE_SYS_SYSCTL_H) && defined(CTL_HW) && (defined(HW_NCPU) \
|| defined(HW_AVAILCPU))
@@ -189,71 +274,138 @@ erts_cpu_info_update(erts_cpu_info_t *cpuinfo)
len = sizeof(int);
mib[0] = CTL_HW;
mib[1] = HW_NCPU;
- if (sysctl(&mib[0], 2, &cpuinfo->configured, &len, NULL, 0) < 0)
- cpuinfo->configured = 0;
+ if (sysctl(&mib[0], 2, &configured, &len, NULL, 0) < 0)
+ configured = 0;
#endif
#ifdef HW_AVAILCPU
len = sizeof(int);
mib[0] = CTL_HW;
mib[1] = HW_AVAILCPU;
- if (sysctl(&mib[0], 2, &cpuinfo->online, &len, NULL, 0) < 0)
- cpuinfo->online = 0;
+ if (sysctl(&mib[0], 2, &online, &len, NULL, 0) < 0)
+ online = 0;
#endif
}
#endif
- if (cpuinfo->online > cpuinfo->configured)
- cpuinfo->online = cpuinfo->configured;
+ if (online > configured)
+ online = configured;
-#ifdef HAVE_SCHED_xETAFFINITY
- if (sched_getaffinity(cpuinfo->pid, sizeof(cpu_set_t), &cpuinfo->cpuset) == 0) {
- int i, c, cn, si;
- c = cn = 0;
- si = sizeof(cpuinfo->affinity_str_buf) - 1;
- cpuinfo->affinity_str_buf[si] = '\0';
- for (i = 0; i < CPU_SETSIZE; i++) {
- if (CPU_ISSET(i, &cpuinfo->cpuset)) {
- c |= 1 << cn;
- cpuinfo->available++;
+ if (cpuinfo->configured != configured)
+ changed = 1;
+ if (cpuinfo->online != online)
+ changed = 1;
+
+#if defined(ERTS_HAVE_MISC_UTIL_AFFINITY_MASK__)
+ if (ERTS_MU_GET_PROC_AFFINITY__(cpuinfo, &cpuset) == 0) {
+ if (!changed && !cpu_sets_are_eq(&cpuset, &cpuinfo->cpuset))
+ changed = 1;
+
+ if (!changed)
+ available = cpuinfo->available;
+ else {
+ int i, c, cn, si;
+
+ memcpy((void *) &cpuinfo->cpuset,
+ (void *) &cpuset,
+ sizeof(cpu_set_t));
+
+ c = cn = 0;
+ si = sizeof(cpuinfo->affinity_str_buf) - 1;
+ cpuinfo->affinity_str_buf[si] = '\0';
+ for (i = 0; i < CPU_SETSIZE; i++) {
+ if (CPU_ISSET(i, &cpuinfo->cpuset)) {
+ c |= 1 << cn;
+ available++;
+ }
+ cn++;
+ if (cn == 4) {
+ cpuinfo->affinity_str_buf[--si] = (c < 10
+ ? '0' + c
+ : 'A' + c - 10);
+ c = cn = 0;
+ }
}
- cn++;
- if (cn == 4) {
+ if (c)
cpuinfo->affinity_str_buf[--si] = (c < 10
? '0' + c
: 'A' + c - 10);
- c = cn = 0;
- }
+ while (cpuinfo->affinity_str_buf[si] == '0')
+ si++;
+ cpuinfo->affinity_str = &cpuinfo->affinity_str_buf[si];
}
- if (c)
- cpuinfo->affinity_str_buf[--si] = (c < 10
- ? '0' + c
- : 'A' + c - 10);
- while (cpuinfo->affinity_str_buf[si] == '0')
- si++;
- cpuinfo->affinity_str = &cpuinfo->affinity_str_buf[si];
}
#elif defined(HAVE_PSET_INFO)
{
- uint_t numcpus = cpuinfo->configured;
- if (cpuinfo->cpuids)
- free(cpuinfo->cpuids);
- cpuinfo->cpuids = malloc(sizeof(processorid_t)*numcpus);
- if (cpuinfo->cpuids) {
- if (pset_info(PS_MYID, NULL, &numcpus, &cpuinfo->cpuids) == 0)
- cpuinfo->available = (int) numcpus;
- if (cpuinfo->available < 0) {
- free(cpuinfo->cpuid);
- cpuinfo->available = 0;
+ processorid_t *cpuids;
+ uint_t numcpus = configured;
+ cpuids = malloc(sizeof(processorid_t)*numcpus);
+ if (cpuids) {
+ if (pset_info(PS_MYID, NULL, &numcpus, &cpuids) == 0)
+ available = (int) numcpus;
+ if (available < 0) {
+ free(cpuids);
+ cpuids = NULL;
+ available = 0;
}
}
+ if (!cpuids) {
+ if (cpuinfo->cpuids)
+ changed = 1;
+ }
+ else {
+ if (cpuinfo->cpuids)
+ changed = 1;
+ if (memcmp((void *) cpuinfo->cpuids,
+ (void *) cpuids,
+ sizeof(processorid_t)*numcpus) != 0)
+ changed = 1;
+
+ }
+ if (!changed) {
+ if (cpuids)
+ free(cpuids);
+ }
+ else {
+ if (cpuinfo->cpuids)
+ free(cpuinfo->cpuids);
+ cpuinfo->cpuids = cpuids;
+ }
}
#endif
- if (cpuinfo->available > cpuinfo->online)
- cpuinfo->available = cpuinfo->online;
+ if (available > online)
+ available = online;
+
+ if (cpuinfo->available != available)
+ changed = 1;
+
+ cpuinfo->configured = configured;
+ cpuinfo->online = online;
+ cpuinfo->available = available;
+
+ old_topology = cpuinfo->topology;
+ old_topology_size = cpuinfo->topology_size;
+ cpuinfo->topology = NULL;
read_topology(cpuinfo);
+ if (cpuinfo->topology_size != old_topology_size
+ || (old_topology_size != 0
+ && memcmp((void *) cpuinfo->topology,
+ (void *) old_topology,
+ (sizeof(erts_cpu_topology_t)
+ * old_topology_size)) != 0)) {
+ changed = 1;
+ if (old_topology)
+ free(old_topology);
+ }
+ else {
+ if (cpuinfo->topology)
+ free(cpuinfo->topology);
+ cpuinfo->topology = old_topology;
+ }
+
+ return changed;
}
int
@@ -289,7 +441,7 @@ erts_get_cpu_available(erts_cpu_info_t *cpuinfo)
char *
erts_get_unbind_from_cpu_str(erts_cpu_info_t *cpuinfo)
{
-#if defined(HAVE_SCHED_xETAFFINITY)
+#if defined(ERTS_HAVE_MISC_UTIL_AFFINITY_MASK__)
if (!cpuinfo)
return "false";
return cpuinfo->affinity_str;
@@ -303,7 +455,7 @@ erts_get_available_cpu(erts_cpu_info_t *cpuinfo, int no)
{
if (!cpuinfo || no < 1 || cpuinfo->available < no)
return -EINVAL;
-#ifdef HAVE_SCHED_xETAFFINITY
+#if defined(ERTS_HAVE_MISC_UTIL_AFFINITY_MASK__)
{
cpu_set_t *allowed = &cpuinfo->cpuset;
int ix, n;
@@ -335,8 +487,8 @@ int
erts_is_cpu_available(erts_cpu_info_t *cpuinfo, int id)
{
if (cpuinfo && 0 <= id) {
-#ifdef HAVE_SCHED_xETAFFINITY
- if (id <= CPU_SETSIZE)
+#if defined(ERTS_HAVE_MISC_UTIL_AFFINITY_MASK__)
+ if (id < CPU_SETSIZE)
return CPU_ISSET(id, &cpuinfo->cpuset);
#elif defined(HAVE_PROCESSOR_BIND)
int no;
@@ -373,8 +525,8 @@ erts_get_cpu_topology(erts_cpu_info_t *cpuinfo,
return 0;
memcpy((void *) topology,
(void *) cpuinfo->topology,
- cpuinfo->configured*sizeof(erts_cpu_topology_t));
- return cpuinfo->configured;
+ cpuinfo->topology_size*sizeof(erts_cpu_topology_t));
+ return cpuinfo->topology_size;
}
int
@@ -388,7 +540,7 @@ erts_bind_to_cpu(erts_cpu_info_t *cpuinfo, int cpu)
*/
if (!cpuinfo)
return -EINVAL;
-#ifdef HAVE_SCHED_xETAFFINITY
+#if defined(ERTS_HAVE_MISC_UTIL_AFFINITY_MASK__)
{
cpu_set_t bind_set;
if (cpu < 0)
@@ -398,9 +550,7 @@ erts_bind_to_cpu(erts_cpu_info_t *cpuinfo, int cpu)
CPU_ZERO(&bind_set);
CPU_SET(cpu, &bind_set);
- if (sched_setaffinity(0, sizeof(cpu_set_t), &bind_set) != 0)
- return -errno;
- return 0;
+ return ERTS_MU_SET_THR_AFFINITY__(&bind_set);
}
#elif defined(HAVE_PROCESSOR_BIND)
if (cpu < 0)
@@ -418,10 +568,8 @@ erts_unbind_from_cpu(erts_cpu_info_t *cpuinfo)
{
if (!cpuinfo)
return -EINVAL;
-#if defined(HAVE_SCHED_xETAFFINITY)
- if (sched_setaffinity(0, sizeof(cpu_set_t), &cpuinfo->cpuset) != 0)
- return -errno;
- return 0;
+#if defined(ERTS_HAVE_MISC_UTIL_AFFINITY_MASK__)
+ return ERTS_MU_SET_THR_AFFINITY__(&cpuinfo->cpuset);
#elif defined(HAVE_PROCESSOR_BIND)
if (processor_bind(P_LWPID, P_MYID, PBIND_NONE, NULL) != 0)
return -errno;
@@ -434,7 +582,7 @@ erts_unbind_from_cpu(erts_cpu_info_t *cpuinfo)
int
erts_unbind_from_cpu_str(char *str)
{
-#if defined(HAVE_SCHED_xETAFFINITY)
+#if defined(ERTS_HAVE_MISC_UTIL_AFFINITY_MASK__)
char *c = str;
int cpus = 0;
int shft = 0;
@@ -486,9 +634,7 @@ erts_unbind_from_cpu_str(char *str)
if (!cpus)
return -EINVAL;
- if (sched_setaffinity(0, sizeof(cpu_set_t), &cpuset) != 0)
- return -errno;
- return 0;
+ return ERTS_MU_SET_THR_AFFINITY__(&cpuset);
#elif defined(HAVE_PROCESSOR_BIND)
if (processor_bind(P_LWPID, P_MYID, PBIND_NONE, NULL) != 0)
return -errno;
@@ -541,6 +687,56 @@ cpu_cmp(const void *vx, const void *vy)
return 0;
}
+static void
+adjust_processor_nodes(erts_cpu_info_t *cpuinfo, int no_nodes)
+{
+ erts_cpu_topology_t *prev, *this, *last;
+ if (no_nodes > 1) {
+ int processor = -1;
+ int processor_node = 0;
+ int node = -1;
+
+ qsort(cpuinfo->topology,
+ cpuinfo->topology_size,
+ sizeof(erts_cpu_topology_t),
+ pn_cmp);
+
+ prev = NULL;
+ this = &cpuinfo->topology[0];
+ last = &cpuinfo->topology[cpuinfo->configured-1];
+ while (1) {
+ if (processor == this->processor) {
+ if (node != this->node)
+ processor_node = 1;
+ }
+ else {
+ if (processor_node) {
+ make_processor_node:
+ while (prev->processor == processor) {
+ prev->processor_node = prev->node;
+ prev->node = -1;
+ if (prev == &cpuinfo->topology[0])
+ break;
+ prev--;
+ }
+ processor_node = 0;
+ }
+ processor = this->processor;
+ node = this->node;
+ }
+ if (this == last) {
+ if (processor_node) {
+ prev = this;
+ goto make_processor_node;
+ }
+ break;
+ }
+ prev = this++;
+ }
+ }
+}
+
+
#ifdef __linux__
static int
@@ -594,9 +790,6 @@ read_topology(erts_cpu_info_t *cpuinfo)
errno = 0;
- if (cpuinfo->topology)
- free(cpuinfo->topology);
-
if (cpuinfo->configured < 1)
goto error;
@@ -710,49 +903,7 @@ read_topology(erts_cpu_info_t *cpuinfo)
cpuinfo->topology = t;
}
- if (no_nodes > 1) {
- int processor = -1;
- int processor_node = 0;
- int node = -1;
-
- qsort(cpuinfo->topology,
- cpuinfo->topology_size,
- sizeof(erts_cpu_topology_t),
- pn_cmp);
-
- prev = NULL;
- this = &cpuinfo->topology[0];
- last = &cpuinfo->topology[cpuinfo->configured-1];
- while (1) {
- if (processor == this->processor) {
- if (node != this->node)
- processor_node = 1;
- }
- else {
- if (processor_node) {
- make_processor_node:
- while (prev->processor == processor) {
- prev->processor_node = prev->node;
- prev->node = -1;
- if (prev == &cpuinfo->topology[0])
- break;
- prev--;
- }
- processor_node = 0;
- }
- processor = this->processor;
- node = this->node;
- }
- if (this == last) {
- if (processor_node) {
- prev = this;
- goto make_processor_node;
- }
- break;
- }
- prev = this++;
- }
- }
+ adjust_processor_nodes(cpuinfo, no_nodes);
qsort(cpuinfo->topology,
cpuinfo->topology_size,
@@ -849,9 +1000,6 @@ read_topology(erts_cpu_info_t *cpuinfo)
errno = 0;
- if (cpuinfo->topology)
- free(cpuinfo->topology);
-
if (cpuinfo->configured < 1)
goto error;
@@ -938,6 +1086,8 @@ read_topology(erts_cpu_info_t *cpuinfo)
}
}
+ adjust_processor_nodes(cpuinfo, 1);
+
error:
if (res == 0) {
@@ -956,6 +1106,275 @@ read_topology(erts_cpu_info_t *cpuinfo)
}
+#elif defined(__WIN32__)
+
+/*
+ * We cannot use Relation* out of the box since all of them are not
+ * always part of the LOGICAL_PROCESSOR_RELATIONSHIP enum. They are
+ * however documented as follows...
+ */
+#define ERTS_MU_RELATION_PROCESSOR_CORE 0 /* RelationProcessorCore */
+#define ERTS_MU_RELATION_NUMA_NODE 1 /* RelationNumaNode */
+#define ERTS_MU_RELATION_CACHE 2 /* RelationCache */
+#define ERTS_MU_RELATION_PROCESSOR_PACKAGE 3 /* RelationProcessorPackage */
+
+static __forceinline int
+rel_cmp_val(int r)
+{
+ switch (r) {
+ case ERTS_MU_RELATION_NUMA_NODE: return 0;
+ case ERTS_MU_RELATION_PROCESSOR_PACKAGE: return 1;
+ case ERTS_MU_RELATION_PROCESSOR_CORE: return 2;
+ default: /* currently not used */ return 3;
+ }
+}
+
+static int
+slpi_cmp(const void *vx, const void *vy)
+{
+ PSYSTEM_LOGICAL_PROCESSOR_INFORMATION x, y;
+ x = (PSYSTEM_LOGICAL_PROCESSOR_INFORMATION) vx;
+ y = (PSYSTEM_LOGICAL_PROCESSOR_INFORMATION) vy;
+
+ if ((int) x->Relationship != (int) y->Relationship)
+ return (rel_cmp_val((int) x->Relationship)
+ - rel_cmp_val((int) y->Relationship));
+
+ switch ((int) x->Relationship) {
+ case ERTS_MU_RELATION_NUMA_NODE:
+ if (x->NumaNode.NodeNumber == y->NumaNode.NodeNumber)
+ break;
+ return ((int) x->NumaNode.NodeNumber) - ((int) y->NumaNode.NodeNumber);
+ case ERTS_MU_RELATION_PROCESSOR_CORE:
+ case ERTS_MU_RELATION_PROCESSOR_PACKAGE:
+ default:
+ break;
+ }
+
+ if (x->ProcessorMask == y->ProcessorMask)
+ return 0;
+ return x->ProcessorMask < y->ProcessorMask ? -1 : 1;
+}
+
+typedef BOOL (WINAPI *glpi_t)(PSYSTEM_LOGICAL_PROCESSOR_INFORMATION, PDWORD);
+
+static int
+read_topology(erts_cpu_info_t *cpuinfo)
+{
+ int res = 0;
+ glpi_t glpi;
+ int *core_id = NULL;
+ PSYSTEM_LOGICAL_PROCESSOR_INFORMATION slpip = NULL;
+ int wix, rix, max_l, l, packages, nodes, no_slpi;
+ DWORD slpi_size = 0;
+
+
+ glpi = (glpi_t) GetProcAddress(GetModuleHandle("kernel32"),
+ "GetLogicalProcessorInformation");
+ if (!glpi)
+ return -ENOTSUP;
+
+ cpuinfo->topology = NULL;
+
+ if (cpuinfo->configured < 1 || sizeof(ULONG_PTR)*8 < cpuinfo->configured)
+ goto error;
+
+ while (1) {
+ DWORD werr;
+ if (TRUE == glpi(slpip, &slpi_size))
+ break;
+ werr = GetLastError();
+ if (werr != ERROR_INSUFFICIENT_BUFFER) {
+ res = -erts_map_win_error_to_errno(werr);
+ goto error;
+ }
+ if (slpip)
+ free(slpip);
+ slpip = malloc(slpi_size);
+ if (!slpip) {
+ res = -ENOMEM;
+ goto error;
+ }
+ }
+
+ no_slpi = (int) slpi_size/sizeof(SYSTEM_LOGICAL_PROCESSOR_INFORMATION);
+
+ qsort(slpip,
+ no_slpi,
+ sizeof(SYSTEM_LOGICAL_PROCESSOR_INFORMATION),
+ slpi_cmp);
+
+ /*
+ * Now numa node relations appear before package relations which
+ * appear before core relations which appear before relations
+ * we aren't interested in...
+ */
+
+ max_l = 0;
+ packages = 0;
+ nodes = 0;
+ for (rix = 0; rix < no_slpi; rix++) {
+ PSYSTEM_LOGICAL_PROCESSOR_INFORMATION this = &slpip[rix];
+ for (l = sizeof(ULONG_PTR)*8 - 1; l > 0; l--) {
+ if (slpip[rix].ProcessorMask & (((ULONG_PTR) 1) << l)) {
+ if (max_l < l)
+ max_l = l;
+ break;
+ }
+ }
+ if ((int) slpip[rix].Relationship == ERTS_MU_RELATION_PROCESSOR_PACKAGE)
+ packages++;
+ if ((int) slpip[rix].Relationship == ERTS_MU_RELATION_NUMA_NODE)
+ nodes++;
+ }
+
+ core_id = malloc(sizeof(int)*(packages ? packages : 1));
+ if (!core_id) {
+ res = -ENOMEM;
+ goto error;
+ }
+
+ for (rix = 0; rix < packages; rix++)
+ core_id[rix] = 0;
+
+ cpuinfo->topology_size = max_l + 1;
+ cpuinfo->topology = malloc(sizeof(erts_cpu_topology_t)
+ * cpuinfo->topology_size);
+ if (!cpuinfo->topology) {
+ res = -ENOMEM;
+ goto error;
+ }
+
+ for (wix = 0; wix < cpuinfo->topology_size; wix++) {
+ cpuinfo->topology[wix].node = -1;
+ cpuinfo->topology[wix].processor = -1;
+ cpuinfo->topology[wix].processor_node = -1;
+ cpuinfo->topology[wix].core = -1;
+ cpuinfo->topology[wix].thread = -1;
+ cpuinfo->topology[wix].logical = -1;
+ }
+
+ nodes = 0;
+ packages = 0;
+
+ for (rix = 0; rix < no_slpi; rix++) {
+
+ switch ((int) slpip[rix].Relationship) {
+ case ERTS_MU_RELATION_NUMA_NODE:
+ for (l = 0; l < sizeof(ULONG_PTR)*8; l++) {
+ if (slpip[rix].ProcessorMask & (((ULONG_PTR) 1) << l)) {
+ cpuinfo->topology[l].logical = l;
+ cpuinfo->topology[l].node = slpip[rix].NumaNode.NodeNumber;
+ }
+ }
+ nodes++;
+ break;
+ case ERTS_MU_RELATION_PROCESSOR_PACKAGE:
+ for (l = 0; l < sizeof(ULONG_PTR)*8; l++) {
+ if (slpip[rix].ProcessorMask & (((ULONG_PTR) 1) << l)) {
+ cpuinfo->topology[l].logical = l;
+ cpuinfo->topology[l].processor = packages;
+ }
+ }
+ packages++;
+ break;
+ case ERTS_MU_RELATION_PROCESSOR_CORE: {
+ int thread = 0;
+ int processor = -1;
+ for (l = 0; l < sizeof(ULONG_PTR)*8; l++) {
+ /*
+ * Nodes and packages may not be supported; pretend
+ * that there are one if this is the case...
+ */
+ if (!nodes)
+ cpuinfo->topology[l].node = 0;
+ if (!packages)
+ cpuinfo->topology[l].processor = 0;
+ if (slpip[rix].ProcessorMask & (((ULONG_PTR) 1) << l)) {
+ if (processor < 0) {
+ processor = cpuinfo->topology[l].processor;
+ if (processor < 0) {
+ res = -EINVAL;
+ goto error;
+ }
+ }
+ else if (processor != cpuinfo->topology[l].processor) {
+ res = -EINVAL;
+ goto error;
+ }
+ cpuinfo->topology[l].logical = l;
+ cpuinfo->topology[l].thread = thread;
+ cpuinfo->topology[l].core = core_id[processor];
+ thread++;
+ }
+ }
+ core_id[processor]++;
+ break;
+ }
+ default:
+ /*
+ * We have reached the end of the relationships
+ * that we (currently) are interested in...
+ */
+ goto relationships_done;
+ }
+ }
+
+ relationships_done:
+
+ /*
+ * There may be unused entries; remove them...
+ */
+ for (rix = wix = 0; rix < cpuinfo->topology_size; rix++) {
+ if (cpuinfo->topology[rix].logical >= 0) {
+ if (wix != rix)
+ cpuinfo->topology[wix] = cpuinfo->topology[rix];
+ wix++;
+ }
+ }
+
+ if (cpuinfo->topology_size != wix) {
+ erts_cpu_topology_t *new = cpuinfo->topology;
+ new = realloc(cpuinfo->topology,
+ sizeof(erts_cpu_topology_t)*wix);
+ if (!new) {
+ res = -ENOMEM;
+ goto error;
+ }
+ cpuinfo->topology = new;
+ cpuinfo->topology_size = wix;
+ }
+
+ res = wix;
+
+ adjust_processor_nodes(cpuinfo, nodes);
+
+ qsort(cpuinfo->topology,
+ cpuinfo->topology_size,
+ sizeof(erts_cpu_topology_t),
+ cpu_cmp);
+
+ if (res < cpuinfo->online)
+ res = -EINVAL;
+
+ error:
+
+ if (res <= 0) {
+ cpuinfo->topology_size = 0;
+ if (cpuinfo->topology) {
+ free(cpuinfo->topology);
+ cpuinfo->topology = NULL;
+ }
+ }
+
+ if (slpip)
+ free(slpip);
+ if (core_id)
+ free(core_id);
+
+ return res;
+}
+
#else
static int
@@ -965,3 +1384,98 @@ read_topology(erts_cpu_info_t *cpuinfo)
}
#endif
+
+#if defined(__WIN32__)
+
+int
+erts_map_win_error_to_errno(DWORD win_error)
+{
+ switch (win_error) {
+ case ERROR_INVALID_FUNCTION: return EINVAL; /* 1 */
+ case ERROR_FILE_NOT_FOUND: return ENOENT; /* 2 */
+ case ERROR_PATH_NOT_FOUND: return ENOENT; /* 3 */
+ case ERROR_TOO_MANY_OPEN_FILES: return EMFILE; /* 4 */
+ case ERROR_ACCESS_DENIED: return EACCES; /* 5 */
+ case ERROR_INVALID_HANDLE: return EBADF; /* 6 */
+ case ERROR_ARENA_TRASHED: return ENOMEM; /* 7 */
+ case ERROR_NOT_ENOUGH_MEMORY: return ENOMEM; /* 8 */
+ case ERROR_INVALID_BLOCK: return ENOMEM; /* 9 */
+ case ERROR_BAD_ENVIRONMENT: return E2BIG; /* 10 */
+ case ERROR_BAD_FORMAT: return ENOEXEC; /* 11 */
+ case ERROR_INVALID_ACCESS: return EINVAL; /* 12 */
+ case ERROR_INVALID_DATA: return EINVAL; /* 13 */
+ case ERROR_OUTOFMEMORY: return ENOMEM; /* 14 */
+ case ERROR_INVALID_DRIVE: return ENOENT; /* 15 */
+ case ERROR_CURRENT_DIRECTORY: return EACCES; /* 16 */
+ case ERROR_NOT_SAME_DEVICE: return EXDEV; /* 17 */
+ case ERROR_NO_MORE_FILES: return ENOENT; /* 18 */
+ case ERROR_WRITE_PROTECT: return EACCES; /* 19 */
+ case ERROR_BAD_UNIT: return EACCES; /* 20 */
+ case ERROR_NOT_READY: return EACCES; /* 21 */
+ case ERROR_BAD_COMMAND: return EACCES; /* 22 */
+ case ERROR_CRC: return EACCES; /* 23 */
+ case ERROR_BAD_LENGTH: return EACCES; /* 24 */
+ case ERROR_SEEK: return EACCES; /* 25 */
+ case ERROR_NOT_DOS_DISK: return EACCES; /* 26 */
+ case ERROR_SECTOR_NOT_FOUND: return EACCES; /* 27 */
+ case ERROR_OUT_OF_PAPER: return EACCES; /* 28 */
+ case ERROR_WRITE_FAULT: return EACCES; /* 29 */
+ case ERROR_READ_FAULT: return EACCES; /* 30 */
+ case ERROR_GEN_FAILURE: return EACCES; /* 31 */
+ case ERROR_SHARING_VIOLATION: return EACCES; /* 32 */
+ case ERROR_LOCK_VIOLATION: return EACCES; /* 33 */
+ case ERROR_WRONG_DISK: return EACCES; /* 34 */
+ case ERROR_SHARING_BUFFER_EXCEEDED: return EACCES; /* 36 */
+ case ERROR_BAD_NETPATH: return ENOENT; /* 53 */
+ case ERROR_NETWORK_ACCESS_DENIED: return EACCES; /* 65 */
+ case ERROR_BAD_NET_NAME: return ENOENT; /* 67 */
+ case ERROR_FILE_EXISTS: return EEXIST; /* 80 */
+ case ERROR_CANNOT_MAKE: return EACCES; /* 82 */
+ case ERROR_FAIL_I24: return EACCES; /* 83 */
+ case ERROR_INVALID_PARAMETER: return EINVAL; /* 87 */
+ case ERROR_NO_PROC_SLOTS: return EAGAIN; /* 89 */
+ case ERROR_DRIVE_LOCKED: return EACCES; /* 108 */
+ case ERROR_BROKEN_PIPE: return EPIPE; /* 109 */
+ case ERROR_DISK_FULL: return ENOSPC; /* 112 */
+ case ERROR_INVALID_TARGET_HANDLE: return EBADF; /* 114 */
+ case ERROR_WAIT_NO_CHILDREN: return ECHILD; /* 128 */
+ case ERROR_CHILD_NOT_COMPLETE: return ECHILD; /* 129 */
+ case ERROR_DIRECT_ACCESS_HANDLE: return EBADF; /* 130 */
+ case ERROR_NEGATIVE_SEEK: return EINVAL; /* 131 */
+ case ERROR_SEEK_ON_DEVICE: return EACCES; /* 132 */
+ case ERROR_DIR_NOT_EMPTY: return ENOTEMPTY;/* 145 */
+ case ERROR_NOT_LOCKED: return EACCES; /* 158 */
+ case ERROR_BAD_PATHNAME: return ENOENT; /* 161 */
+ case ERROR_MAX_THRDS_REACHED: return EAGAIN; /* 164 */
+ case ERROR_LOCK_FAILED: return EACCES; /* 167 */
+ case ERROR_ALREADY_EXISTS: return EEXIST; /* 183 */
+ case ERROR_INVALID_STARTING_CODESEG: return ENOEXEC; /* 188 */
+ case ERROR_INVALID_STACKSEG: return ENOEXEC; /* 189 */
+ case ERROR_INVALID_MODULETYPE: return ENOEXEC; /* 190 */
+ case ERROR_INVALID_EXE_SIGNATURE: return ENOEXEC; /* 191 */
+ case ERROR_EXE_MARKED_INVALID: return ENOEXEC; /* 192 */
+ case ERROR_BAD_EXE_FORMAT: return ENOEXEC; /* 193 */
+ case ERROR_ITERATED_DATA_EXCEEDS_64k: return ENOEXEC; /* 194 */
+ case ERROR_INVALID_MINALLOCSIZE: return ENOEXEC; /* 195 */
+ case ERROR_DYNLINK_FROM_INVALID_RING: return ENOEXEC; /* 196 */
+ case ERROR_IOPL_NOT_ENABLED: return ENOEXEC; /* 197 */
+ case ERROR_INVALID_SEGDPL: return ENOEXEC; /* 198 */
+ case ERROR_AUTODATASEG_EXCEEDS_64k: return ENOEXEC; /* 199 */
+ case ERROR_RING2SEG_MUST_BE_MOVABLE: return ENOEXEC; /* 200 */
+ case ERROR_RELOC_CHAIN_XEEDS_SEGLIM: return ENOEXEC; /* 201 */
+ case ERROR_INFLOOP_IN_RELOC_CHAIN: return ENOEXEC; /* 202 */
+ case ERROR_FILENAME_EXCED_RANGE: return ENOENT; /* 206 */
+ case ERROR_NESTING_NOT_ALLOWED: return EAGAIN; /* 215 */
+ case ERROR_NOT_ENOUGH_QUOTA: return ENOMEM; /* 1816 */
+ default: return EINVAL;
+ }
+}
+
+int
+erts_get_last_win_errno(void)
+{
+ return erts_map_win_error_to_errno(GetLastError());
+}
+
+
+#endif
diff --git a/erts/lib_src/common/ethr_aux.c b/erts/lib_src/common/ethr_aux.c
new file mode 100644
index 0000000000..4db4cffd3a
--- /dev/null
+++ b/erts/lib_src/common/ethr_aux.c
@@ -0,0 +1,762 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2010. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+/*
+ * Description: A Thread library for use in the ERTS and other OTP
+ * applications.
+ * Author: Rickard Green
+ */
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include <stdio.h>
+
+#define ETHR_INLINE_FUNC_NAME_(X) X ## __
+#define ETHR_AUX_IMPL__
+
+#include "ethread.h"
+#include "ethr_internal.h"
+#include <string.h>
+#include <limits.h>
+
+#ifndef __WIN32__
+#include <unistd.h>
+#endif
+
+#define ERTS_TS_EV_ALLOC_DEFAULT_POOL_SIZE 100
+#define ERTS_TS_EV_ALLOC_POOL_SIZE 25
+
+erts_cpu_info_t *ethr_cpu_info__;
+
+int ethr_not_completely_inited__ = 1;
+int ethr_not_inited__ = 1;
+
+ethr_memory_allocators ethr_mem__ = ETHR_MEM_ALLOCS_DEF_INITER__;
+
+#ifndef ETHR_HAVE_OPTIMIZED_ATOMIC_OPS
+ethr_atomic_protection_t ethr_atomic_protection__[1 << ETHR_ATOMIC_ADDR_BITS];
+#endif
+
+void *(*ethr_thr_prepare_func__)(void) = NULL;
+void (*ethr_thr_parent_func__)(void *) = NULL;
+void (*ethr_thr_child_func__)(void *) = NULL;
+
+typedef struct ethr_xhndl_list_ ethr_xhndl_list;
+struct ethr_xhndl_list_ {
+ ethr_xhndl_list *next;
+ void (*funcp)(void);
+};
+
+size_t ethr_pagesize__;
+size_t ethr_min_stack_size__; /* kilo words */
+size_t ethr_max_stack_size__; /* kilo words */
+
+ethr_rwmutex xhndl_rwmtx;
+ethr_xhndl_list *xhndl_list;
+
+static int main_threads;
+
+static int init_ts_event_alloc(void);
+
+int
+ethr_init_common__(ethr_init_data *id)
+{
+ int res;
+ if (id) {
+ ethr_thr_prepare_func__ = id->thread_create_prepare_func;
+ ethr_thr_parent_func__ = id->thread_create_parent_func;
+ ethr_thr_child_func__ = id->thread_create_child_func;
+ }
+
+ ethr_cpu_info__ = erts_cpu_info_create();
+ if (!ethr_cpu_info__)
+ return ENOMEM;
+
+#ifdef _SC_PAGESIZE
+ ethr_pagesize__ = (size_t) sysconf(_SC_PAGESIZE);
+#elif defined(HAVE_GETPAGESIZE)
+ ethr_pagesize__ = (size_t) getpagesize();
+#else
+ ethr_pagesize__ = (size_t) 4*1024; /* Guess 4 KB */
+#endif
+
+ /* User needs at least 4 KB */
+ ethr_min_stack_size__ = 4*1024;
+#if SIZEOF_VOID_P == 8
+ /* Double that on 64-bit archs */
+ ethr_min_stack_size__ *= 2;
+#endif
+ /* On some systems as much as about 4 KB is used by the system */
+ ethr_min_stack_size__ += 4*1024;
+ /* There should be room for signal handlers */
+#ifdef SIGSTKSZ
+ ethr_min_stack_size__ += SIGSTKSZ;
+#else
+ ethr_min_stack_size__ += ethr_pagesize__;
+#endif
+ /* The system may think that we need more stack */
+#if defined(PTHREAD_STACK_MIN)
+ if (ethr_min_stack_size__ < PTHREAD_STACK_MIN)
+ ethr_min_stack_size__ = PTHREAD_STACK_MIN;
+#elif defined(_SC_THREAD_STACK_MIN)
+ {
+ size_t thr_min_stk_sz = (size_t) sysconf(_SC_THREAD_STACK_MIN);
+ if (ethr_min_stack_size__ < thr_min_stk_sz)
+ ethr_min_stack_size__ = thr_min_stk_sz;
+ }
+#endif
+ /* The guard is at least on some platforms included in the stack size
+ passed when creating threads */
+#ifdef ETHR_STACK_GUARD_SIZE
+ ethr_min_stack_size__ += ETHR_STACK_GUARD_SIZE;
+#endif
+ ethr_min_stack_size__ = ETHR_PAGE_ALIGN(ethr_min_stack_size__);
+
+ ethr_min_stack_size__ = ETHR_B2KW(ethr_min_stack_size__);
+
+ ethr_max_stack_size__ = 32*1024*1024;
+#if SIZEOF_VOID_P == 8
+ ethr_max_stack_size__ *= 2;
+#endif
+ ethr_max_stack_size__ = ETHR_B2KW(ethr_max_stack_size__);
+
+#ifndef ETHR_HAVE_OPTIMIZED_ATOMIC_OPS
+ {
+ int i;
+ for (i = 0; i < (1 << ETHR_ATOMIC_ADDR_BITS); i++) {
+ res = ethr_spinlock_init(&ethr_atomic_protection__[i].u.lck);
+ if (res != 0)
+ return res;
+ }
+ }
+#endif
+
+ res = ethr_mutex_lib_init(erts_get_cpu_configured(ethr_cpu_info__));
+ if (res != 0)
+ return res;
+
+ xhndl_list = NULL;
+
+ return 0;
+}
+
+int
+ethr_late_init_common__(ethr_late_init_data *lid)
+{
+ ethr_ts_event *tsep = NULL;
+ int reader_groups;
+ int res;
+ int i;
+ ethr_memory_allocator *m[] = {&ethr_mem__.std,
+ &ethr_mem__.sl,
+ &ethr_mem__.ll};
+ if (lid)
+ ethr_mem__ = lid->mem;
+ if (!ethr_mem__.std.alloc
+ || !ethr_mem__.std.realloc
+ || !ethr_mem__.std.free) {
+ ethr_mem__.std.alloc = malloc;
+ ethr_mem__.std.realloc = realloc;
+ ethr_mem__.std.free = free;
+ }
+ for (i = 0; i < sizeof(m)/sizeof(m[0]); i++) {
+ if (!m[i]->alloc || !m[i]->realloc || !m[i]->free) {
+ m[i]->alloc = ethr_mem__.std.alloc;
+ m[i]->realloc = ethr_mem__.std.realloc;
+ m[i]->free = ethr_mem__.std.free;
+ }
+
+ }
+ res = init_ts_event_alloc();
+ if (res != 0)
+ return res;
+ res = ethr_make_ts_event__(&tsep);
+ if (res == 0)
+ tsep->iflgs |= ETHR_TS_EV_ETHREAD;
+ if (!lid) {
+ main_threads = 0;
+ reader_groups = 0;
+ }
+ else {
+ if (lid->main_threads < 0 || USHRT_MAX < lid->main_threads)
+ return res;
+ main_threads = lid->main_threads;
+ reader_groups = lid->reader_groups;
+ }
+ res = ethr_mutex_lib_late_init(reader_groups, main_threads);
+ if (res != 0)
+ return res;
+ ethr_not_completely_inited__ = 0; /* Need it for
+ rwmutex_init */
+ res = ethr_rwmutex_init(&xhndl_rwmtx);
+ ethr_not_completely_inited__ = 1;
+ if (res != 0)
+ return res;
+ return 0;
+}
+
+int
+ethr_install_exit_handler(void (*funcp)(void))
+{
+ ethr_xhndl_list *xhp;
+
+#if ETHR_XCHK
+ if (ethr_not_completely_inited__) {
+ ETHR_ASSERT(0);
+ return EACCES;
+ }
+#endif
+
+ if (!funcp)
+ return EINVAL;
+
+ xhp = (ethr_xhndl_list *) ethr_mem__.std.alloc(sizeof(ethr_xhndl_list));
+ if (!xhp)
+ return ENOMEM;
+
+ ethr_rwmutex_rwlock(&xhndl_rwmtx);
+
+ xhp->funcp = funcp;
+ xhp->next = xhndl_list;
+ xhndl_list = xhp;
+
+ ethr_rwmutex_rwunlock(&xhndl_rwmtx);
+
+ return 0;
+}
+
+void
+ethr_run_exit_handlers__(void)
+{
+ ethr_xhndl_list *xhp;
+
+ ethr_rwmutex_rlock(&xhndl_rwmtx);
+
+ xhp = xhndl_list;
+
+ ethr_rwmutex_runlock(&xhndl_rwmtx);
+
+ for (; xhp; xhp = xhp->next)
+ (*xhp->funcp)();
+}
+
+/*
+ * Thread specific event alloc, etc.
+ *
+ * Note that we don't know when it is safe to destroy an event, but
+ * we know when it is safe to reuse it. ts_event_free() therefore
+ * never destroys an event (but makes freed events available for
+ * reuse).
+ *
+ * We could easily keep track of the usage of events, and by this
+ * make it possible to destroy events. We would however suffer a
+ * performance penalty for this and save very little memory.
+ */
+
+typedef union {
+ ethr_ts_event ts_ev;
+ char align[ETHR_CACHE_LINE_ALIGN_SIZE(sizeof(ethr_ts_event))];
+} ethr_aligned_ts_event;
+
+static ethr_spinlock_t ts_ev_alloc_lock;
+static ethr_ts_event *free_ts_ev;
+
+#if SIZEOF_VOID_P == SIZEOF_INT
+typedef unsigned int EthrPtrSzUInt;
+#elif SIZEOF_VOID_P == SIZEOF_LONG
+typedef unsigned long EthrPtrSzUInt;
+#else
+#error No pointer sized integer type
+#endif
+
+static ethr_ts_event *ts_event_pool(int size, ethr_ts_event **endpp)
+{
+ int i;
+ ethr_aligned_ts_event *atsev;
+ atsev = ethr_mem__.std.alloc(sizeof(ethr_aligned_ts_event) * size
+ + ETHR_CACHE_LINE_SIZE);
+ if (!atsev)
+ return NULL;
+ if ((((EthrPtrSzUInt) atsev) & ETHR_CACHE_LINE_MASK) == 0)
+ atsev = ((ethr_aligned_ts_event *)
+ ((((EthrPtrSzUInt) atsev) & ~ETHR_CACHE_LINE_MASK)
+ + ETHR_CACHE_LINE_SIZE));
+ for (i = 1; i < size; i++) {
+ atsev[i-1].ts_ev.next = &atsev[i].ts_ev;
+ ethr_atomic_init(&atsev[i-1].ts_ev.uaflgs, 0);
+ atsev[i-1].ts_ev.iflgs = 0;
+ }
+ ethr_atomic_init(&atsev[size-1].ts_ev.uaflgs, 0);
+ atsev[size-1].ts_ev.iflgs = 0;
+ atsev[size-1].ts_ev.next = NULL;
+ if (endpp)
+ *endpp = &atsev[size-1].ts_ev;
+ return &atsev[0].ts_ev;
+}
+
+static int init_ts_event_alloc(void)
+{
+ free_ts_ev = ts_event_pool(ERTS_TS_EV_ALLOC_DEFAULT_POOL_SIZE,
+ NULL);
+ if (!free_ts_ev)
+ return ENOMEM;
+ return ethr_spinlock_init(&ts_ev_alloc_lock);
+}
+
+static ethr_ts_event *ts_event_alloc(void)
+{
+ ethr_ts_event *ts_ev;
+ ethr_spin_lock(&ts_ev_alloc_lock);
+ if (free_ts_ev) {
+ ts_ev = free_ts_ev;
+ free_ts_ev = ts_ev->next;
+ ethr_spin_unlock(&ts_ev_alloc_lock);
+ }
+ else {
+ ethr_ts_event *ts_ev_pool_end;
+ ethr_spin_unlock(&ts_ev_alloc_lock);
+
+ ts_ev = ts_event_pool(ERTS_TS_EV_ALLOC_POOL_SIZE, &ts_ev_pool_end);
+ if (!ts_ev)
+ return NULL;
+
+ ethr_spin_lock(&ts_ev_alloc_lock);
+ ts_ev_pool_end->next = free_ts_ev;
+ free_ts_ev = ts_ev->next;
+ ethr_spin_unlock(&ts_ev_alloc_lock);
+ }
+ return ts_ev;
+}
+
+static void ts_event_free(ethr_ts_event *ts_ev)
+{
+ ETHR_ASSERT(!ts_ev->udata);
+ ethr_spin_lock(&ts_ev_alloc_lock);
+ ts_ev->next = free_ts_ev;
+ free_ts_ev = ts_ev;
+ ethr_spin_unlock(&ts_ev_alloc_lock);
+}
+
+int ethr_make_ts_event__(ethr_ts_event **tsepp)
+{
+ int res;
+ ethr_ts_event *tsep = *tsepp;
+
+ if (!tsep) {
+ tsep = ts_event_alloc();
+ if (!tsep)
+ return ENOMEM;
+ }
+
+ if ((tsep->iflgs & ETHR_TS_EV_INITED) == 0) {
+ res = ethr_event_init(&tsep->event);
+ if (res != 0) {
+ ts_event_free(tsep);
+ return res;
+ }
+ }
+
+ tsep->iflgs = ETHR_TS_EV_INITED;
+ tsep->udata = NULL;
+ tsep->rgix = 0;
+ tsep->mtix = 0;
+
+ res = ethr_set_tse__(tsep);
+ if (res != 0 && tsepp && *tsepp) {
+ ts_event_free(tsep);
+ return res;
+ }
+
+ if (tsepp)
+ *tsepp = tsep;
+
+ return 0;
+}
+
+int ethr_get_tmp_ts_event__(ethr_ts_event **tsepp)
+{
+ int res;
+ ethr_ts_event *tsep = *tsepp;
+
+ if (!tsep) {
+ tsep = ts_event_alloc();
+ if (!tsep)
+ return ENOMEM;
+ }
+
+ if ((tsep->iflgs & ETHR_TS_EV_INITED) == 0) {
+ res = ethr_event_init(&tsep->event);
+ if (res != 0) {
+ ts_event_free(tsep);
+ return res;
+ }
+ }
+
+ tsep->iflgs = ETHR_TS_EV_INITED|ETHR_TS_EV_TMP;
+ tsep->udata = NULL;
+
+ if (tsepp)
+ *tsepp = tsep;
+
+ return 0;
+}
+
+int ethr_free_ts_event__(ethr_ts_event *tsep)
+{
+ ts_event_free(tsep);
+ return 0;
+}
+
+void ethr_ts_event_destructor__(void *vtsep)
+{
+ if (vtsep) {
+ ethr_ts_event *tsep = (ethr_ts_event *) vtsep;
+ ts_event_free(tsep);
+ ethr_set_tse__(NULL);
+ }
+}
+
+int ethr_set_main_thr_status(int on, int no)
+{
+ ethr_ts_event *tsep = ethr_get_tse__();
+ if (!tsep)
+ return EINVAL;
+ if (on) {
+ if (no < 1 || main_threads < no)
+ return EINVAL;
+ tsep->mtix = (unsigned short) no;
+ tsep->iflgs |= ETHR_TS_EV_MAIN_THR;
+ }
+ else {
+ tsep->iflgs &= ~ETHR_TS_EV_MAIN_THR;
+ tsep->mtix = (unsigned short) 0;
+ }
+ return 0;
+}
+
+int ethr_get_main_thr_status(int *on)
+{
+ ethr_ts_event *tsep = ethr_get_tse__();
+ if (!tsep)
+ *on = 0;
+ else {
+ if (tsep->iflgs & ETHR_TS_EV_MAIN_THR)
+ *on = 1;
+ else
+ *on = 0;
+ }
+ return 0;
+}
+
+
+/* Atomics */
+
+void
+ethr_atomic_init(ethr_atomic_t *var, long i)
+{
+ ETHR_ASSERT(var);
+ ethr_atomic_init__(var, i);
+}
+
+void
+ethr_atomic_set(ethr_atomic_t *var, long i)
+{
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(var);
+ ethr_atomic_set__(var, i);
+}
+
+long
+ethr_atomic_read(ethr_atomic_t *var)
+{
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(var);
+ return ethr_atomic_read__(var);
+}
+
+
+long
+ethr_atomic_add_read(ethr_atomic_t *var, long incr)
+{
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(var);
+ return ethr_atomic_add_read__(var, incr);
+}
+
+long
+ethr_atomic_inc_read(ethr_atomic_t *var)
+{
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(var);
+ return ethr_atomic_inc_read__(var);
+}
+
+long
+ethr_atomic_dec_read(ethr_atomic_t *var)
+{
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(var);
+ return ethr_atomic_dec_read__(var);
+}
+
+void
+ethr_atomic_add(ethr_atomic_t *var, long incr)
+{
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(var);
+ ethr_atomic_add__(var, incr);
+}
+
+void
+ethr_atomic_inc(ethr_atomic_t *var)
+{
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(var);
+ ethr_atomic_inc__(var);
+}
+
+void
+ethr_atomic_dec(ethr_atomic_t *var)
+{
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(var);
+ ethr_atomic_dec__(var);
+}
+
+long
+ethr_atomic_read_band(ethr_atomic_t *var, long mask)
+{
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(var);
+ return ethr_atomic_read_band__(var, mask);
+}
+
+long
+ethr_atomic_read_bor(ethr_atomic_t *var, long mask)
+{
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(var);
+ return ethr_atomic_read_bor__(var, mask);
+}
+
+long
+ethr_atomic_xchg(ethr_atomic_t *var, long new)
+{
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(var);
+ return ethr_atomic_xchg__(var, new);
+}
+
+long
+ethr_atomic_cmpxchg(ethr_atomic_t *var, long new, long expected)
+{
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(var);
+ return ethr_atomic_cmpxchg__(var, new, expected);
+}
+
+long
+ethr_atomic_read_acqb(ethr_atomic_t *var)
+{
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(var);
+ return ethr_atomic_read_acqb__(var);
+}
+
+long
+ethr_atomic_inc_read_acqb(ethr_atomic_t *var)
+{
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(var);
+ return ethr_atomic_inc_read_acqb__(var);
+}
+
+void
+ethr_atomic_set_relb(ethr_atomic_t *var, long i)
+{
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(var);
+ ethr_atomic_set_relb__(var, i);
+}
+
+void
+ethr_atomic_dec_relb(ethr_atomic_t *var)
+{
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(var);
+ ethr_atomic_dec_relb__(var);
+}
+
+long
+ethr_atomic_dec_read_relb(ethr_atomic_t *var)
+{
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(var);
+ return ethr_atomic_dec_read_relb__(var);
+}
+
+long
+ethr_atomic_cmpxchg_acqb(ethr_atomic_t *var, long new, long exp)
+{
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(var);
+ return ethr_atomic_cmpxchg_acqb__(var, new, exp);
+}
+
+long
+ethr_atomic_cmpxchg_relb(ethr_atomic_t *var, long new, long exp)
+{
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(var);
+ return ethr_atomic_cmpxchg_relb__(var, new, exp);
+}
+
+
+/* Spinlocks and rwspinlocks */
+
+int
+ethr_spinlock_init(ethr_spinlock_t *lock)
+{
+#if ETHR_XCHK
+ if (!lock) {
+ ETHR_ASSERT(0);
+ return EINVAL;
+ }
+#endif
+ return ethr_spinlock_init__(lock);
+}
+
+int
+ethr_spinlock_destroy(ethr_spinlock_t *lock)
+{
+#if ETHR_XCHK
+ if (ethr_not_inited__) {
+ ETHR_ASSERT(0);
+ return EACCES;
+ }
+ if (!lock) {
+ ETHR_ASSERT(0);
+ return EINVAL;
+ }
+#endif
+ return ethr_spinlock_destroy__(lock);
+}
+
+void
+ethr_spin_unlock(ethr_spinlock_t *lock)
+{
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(lock);
+ ethr_spin_unlock__(lock);
+}
+
+void
+ethr_spin_lock(ethr_spinlock_t *lock)
+{
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(lock);
+ ethr_spin_lock__(lock);
+}
+
+int
+ethr_rwlock_init(ethr_rwlock_t *lock)
+{
+#if ETHR_XCHK
+ if (!lock) {
+ ETHR_ASSERT(0);
+ return EINVAL;
+ }
+#endif
+ return ethr_rwlock_init__(lock);
+}
+
+int
+ethr_rwlock_destroy(ethr_rwlock_t *lock)
+{
+#if ETHR_XCHK
+ if (ethr_not_inited__) {
+ ETHR_ASSERT(0);
+ return EACCES;
+ }
+ if (!lock) {
+ ETHR_ASSERT(0);
+ return EINVAL;
+ }
+#endif
+ return ethr_rwlock_destroy__(lock);
+}
+
+void
+ethr_read_unlock(ethr_rwlock_t *lock)
+{
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(lock);
+ ethr_read_unlock__(lock);
+}
+
+void
+ethr_read_lock(ethr_rwlock_t *lock)
+{
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(lock);
+ ethr_read_lock__(lock);
+}
+
+void
+ethr_write_unlock(ethr_rwlock_t *lock)
+{
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(lock);
+ ethr_write_unlock__(lock);
+}
+
+void
+ethr_write_lock(ethr_rwlock_t *lock)
+{
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(lock);
+ ethr_write_lock__(lock);
+}
+
+ETHR_IMPL_NORETURN__ ethr_fatal_error__(const char *file,
+ int line,
+ const char *func,
+ int err)
+{
+ char *errstr;
+ if (err == ENOTSUP)
+ errstr = "Operation not supported";
+ else {
+ errstr = strerror(err);
+ if (!errstr)
+ errstr = "Unknown error";
+ }
+ fprintf(stderr, "%s:%d: Fatal error in %s(): %s (%d)\n",
+ file, line, func, errstr, err);
+ ethr_abort__();
+}
+
+int ethr_assert_failed(const char *file, int line, const char *func, char *a)
+{
+ fprintf(stderr, "%s:%d: %s(): Assertion failed: %s\n", file, line, func, a);
+ ethr_abort__();
+ return 0;
+}
diff --git a/erts/lib_src/common/ethr_cbf.c b/erts/lib_src/common/ethr_cbf.c
new file mode 100644
index 0000000000..04feceec89
--- /dev/null
+++ b/erts/lib_src/common/ethr_cbf.c
@@ -0,0 +1,36 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2010. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+
+/*
+ * We keep this function alone in a separate file so the
+ * compiler wont optimize it away.
+ */
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include "ethread.h"
+
+void
+ethr_compiler_barrier_fallback(void)
+{
+
+}
diff --git a/erts/lib_src/common/ethr_mutex.c b/erts/lib_src/common/ethr_mutex.c
new file mode 100644
index 0000000000..aac0d44a32
--- /dev/null
+++ b/erts/lib_src/common/ethr_mutex.c
@@ -0,0 +1,2758 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2010. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+/*
+ * Description: Mutex, rwmutex and condition variable implementation
+ * Author: Rickard Green
+ */
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#define ETHR_INLINE_FUNC_NAME_(X) X ## __
+#define ETHR_MUTEX_IMPL__
+
+#include <limits.h>
+#include "ethread.h"
+#include "ethr_internal.h"
+
+#define ETHR_SPIN_WITH_WAITERS 1
+
+#define ETHR_MTX_MAX_FLGS_SPIN 1000
+
+#ifdef ETHR_USE_OWN_RWMTX_IMPL__
+static int default_rwmtx_main_spincount;
+static int default_rwmtx_aux_spincount;
+#endif
+#ifdef ETHR_USE_OWN_MTX_IMPL__
+static int default_mtx_main_spincount;
+static int default_mtx_aux_spincount;
+static int default_cnd_main_spincount;
+static int default_cnd_aux_spincount;
+#endif
+
+static int no_spin;
+
+#ifndef ETHR_USE_OWN_RWMTX_IMPL__
+static pthread_rwlockattr_t write_pref_attr_data;
+static pthread_rwlockattr_t *write_pref_attr;
+#endif
+
+#if defined(ETHR_MTX_Q_LOCK_SPINLOCK__)
+# define ETHR_MTX_QLOCK_INIT ethr_spinlock_init
+# define ETHR_MTX_QLOCK_DESTROY ethr_spinlock_destroy
+# define ETHR_MTX_Q_LOCK ethr_spin_lock
+# define ETHR_MTX_Q_UNLOCK ethr_spin_unlock
+#elif defined(ETHR_MTX_Q_LOCK_PTHREAD_MUTEX__)
+# define ETHR_MTX_QLOCK_INIT(QL) pthread_mutex_init((QL), NULL)
+# define ETHR_MTX_QLOCK_DESTROY pthread_mutex_destroy
+# define ETHR_MTX_Q_LOCK(L) \
+do { \
+ int res__ = pthread_mutex_lock(L); \
+ if (res__ != 0) \
+ ETHR_FATAL_ERROR__(res__); \
+} while (0)
+# define ETHR_MTX_Q_UNLOCK(L) \
+do { \
+ int res__ = pthread_mutex_unlock(L); \
+ if (res__ != 0) \
+ ETHR_FATAL_ERROR__(res__); \
+} while (0)
+#elif defined(ETHR_MTX_Q_LOCK_CRITICAL_SECTION__)
+# define ETHR_MTX_QLOCK_INIT(QL) (InitializeCriticalSection((QL)), 0)
+# define ETHR_MTX_QLOCK_DESTROY(QL) (DeleteCriticalSection((QL)), 0)
+# define ETHR_MTX_Q_LOCK(QL) EnterCriticalSection((QL))
+# define ETHR_MTX_Q_UNLOCK(QL) LeaveCriticalSection((QL))
+#endif
+
+int
+ethr_mutex_lib_init(int cpu_conf)
+{
+ int res = 0;
+
+ no_spin = cpu_conf == 1;
+
+#ifdef ETHR_USE_OWN_MTX_IMPL__
+ default_mtx_main_spincount = ETHR_MTX_DEFAULT_MAIN_SPINCOUNT;
+ default_mtx_aux_spincount = ETHR_MTX_DEFAULT_AUX_SPINCOUNT;
+ default_cnd_main_spincount = ETHR_CND_DEFAULT_MAIN_SPINCOUNT;
+ default_cnd_aux_spincount = ETHR_CND_DEFAULT_AUX_SPINCOUNT;
+#endif
+
+#ifdef ETHR_USE_OWN_RWMTX_IMPL__
+
+ default_rwmtx_main_spincount = ETHR_RWMTX_DEFAULT_MAIN_SPINCOUNT;
+ default_rwmtx_aux_spincount = ETHR_RWMTX_DEFAULT_AUX_SPINCOUNT;
+
+#else
+
+#if defined(ETHR_HAVE_PTHREAD_RWLOCKATTR_SETKIND_NP) \
+ && defined(ETHR_HAVE_PTHREAD_RWLOCK_PREFER_WRITER_NONRECURSIVE_NP)
+ res = pthread_rwlockattr_init(&write_pref_attr_data);
+ if (res != 0)
+ return res;
+ res = pthread_rwlockattr_setkind_np(
+ &write_pref_attr_data,
+ PTHREAD_RWLOCK_PREFER_WRITER_NONRECURSIVE_NP);
+ write_pref_attr = &write_pref_attr_data;
+#else
+ write_pref_attr = NULL;
+#endif
+
+#endif
+
+ return res;
+}
+
+#ifdef ETHR_USE_OWN_RWMTX_IMPL__
+
+#ifdef ETHR_ATOMIC_HAVE_INC_DEC_INSTRUCTIONS
+#if 0 /*
+ * When inc and dec are real atomic instructions as on x86, the
+ * ETHR_RLOCK_WITH_INC_DEC implementations performs better with
+ * lots of read locks compared to the cmpxchg based implementation.
+ * It, however, performs worse with lots of mixed reads and writes.
+ * It could be used for rwlocks that are known to be read locked
+ * much, but the readers array based implementation outperforms it
+ * by far. Therefore, it has been disabled, and will probably be
+ * removed some time in the future.
+ */
+# define ETHR_RLOCK_WITH_INC_DEC
+#endif
+#endif
+
+static int reader_groups_array_size = 0;
+static int main_threads_array_size = 0;
+
+#endif
+
+int
+ethr_mutex_lib_late_init(int no_reader_groups, int no_main_threads)
+{
+#ifdef ETHR_USE_OWN_RWMTX_IMPL__
+ reader_groups_array_size = (no_reader_groups <= 1
+ ? 1
+ : no_reader_groups + 1);
+ main_threads_array_size = (no_main_threads <= 1
+ ? 1
+ : no_main_threads + 1);
+#endif
+ return 0;
+}
+
+int
+ethr_rwmutex_set_reader_group(int ix)
+{
+#ifdef ETHR_USE_OWN_RWMTX_IMPL__
+ ethr_ts_event *tse;
+
+ if (ix < 0 || reader_groups_array_size <= ix)
+ return EINVAL;
+
+ tse = ethr_get_ts_event();
+
+ if ((tse->iflgs & ETHR_TS_EV_ETHREAD) == 0) {
+ ethr_leave_ts_event(tse);
+ return EINVAL;
+ }
+
+ tse->rgix = ix;
+
+ ethr_leave_ts_event(tse);
+#endif
+ return 0;
+}
+
+#if defined(ETHR_MTX_HARD_DEBUG_Q) || defined(ETHR_MTX_HARD_DEBUG_WSQ)
+static void hard_debug_chk_q__(struct ethr_mutex_base_ *, int);
+#define ETHR_RWMTX_HARD_DEBUG_CHK_Q(RWMTX) hard_debug_chk_q__(&(RWMTX)->mtxb,1)
+#define ETHR_MTX_HARD_DEBUG_CHK_Q(MTX) hard_debug_chk_q__(&(MTX)->mtxb, 0)
+#else
+#define ETHR_RWMTX_HARD_DEBUG_CHK_Q(RWMTX)
+#define ETHR_MTX_HARD_DEBUG_CHK_Q(MTX)
+#endif
+
+#ifdef ETHR_USE_OWN_RWMTX_IMPL__
+static void
+rwmutex_unlock_wake(ethr_rwmutex *rwmtx,
+ int have_w,
+ long initial);
+static int
+rwmutex_try_complete_runlock(ethr_rwmutex *rwmtx,
+ long initial,
+ ethr_ts_event *tse,
+ int start_next_ix,
+ int check_before_try,
+ int try_write_lock);
+#endif
+
+#if defined(ETHR_USE_OWN_RWMTX_IMPL__) || defined(ETHR_USE_OWN_MTX_IMPL__)
+
+/* -- Utilities operating both on ordinary mutexes and read write mutexes -- */
+
+static ETHR_INLINE void
+rwmutex_freqread_wtng_rdrs_inc(ethr_rwmutex *rwmtx, ethr_ts_event *tse)
+{
+ int ix = (rwmtx->type == ETHR_RWMUTEX_TYPE_FREQUENT_READ
+ ? tse->rgix
+ : tse->mtix);
+ rwmtx->tdata.ra[ix].data.waiting_readers++;
+}
+
+static ETHR_INLINE void
+rwmutex_freqread_rdrs_add(ethr_rwmutex *rwmtx,
+ ethr_rwmutex_type type,
+ int ix,
+ int inc)
+{
+ if (type == ETHR_RWMUTEX_TYPE_FREQUENT_READ || ix == 0)
+ ethr_atomic_add(&rwmtx->tdata.ra[ix].data.readers, inc);
+ else {
+ ETHR_ASSERT(type == ETHR_RWMUTEX_TYPE_EXTREMELY_FREQUENT_READ);
+ ETHR_ASSERT(ethr_atomic_read(&rwmtx->tdata.ra[ix].data.readers) == 0);
+ ETHR_ASSERT(inc == 1);
+ ethr_atomic_set(&rwmtx->tdata.ra[ix].data.readers, (long) 1);
+ }
+}
+
+static ETHR_INLINE void
+rwmutex_freqread_rdrs_inc(ethr_rwmutex *rwmtx, ethr_ts_event *tse)
+{
+ int ix;
+ if (rwmtx->type == ETHR_RWMUTEX_TYPE_FREQUENT_READ) {
+ ix = tse->rgix;
+ atomic_inc:
+ ethr_atomic_inc(&rwmtx->tdata.ra[ix].data.readers);
+ }
+ else {
+ ix = tse->mtix;
+ if (ix == 0)
+ goto atomic_inc;
+ ETHR_ASSERT(rwmtx->type == ETHR_RWMUTEX_TYPE_EXTREMELY_FREQUENT_READ);
+ ETHR_ASSERT(ethr_atomic_read(&rwmtx->tdata.ra[ix].data.readers) == 0);
+ ethr_atomic_set(&rwmtx->tdata.ra[ix].data.readers, (long) 1);
+ }
+}
+
+static ETHR_INLINE void
+rwmutex_freqread_rdrs_dec(ethr_rwmutex *rwmtx, ethr_ts_event *tse)
+{
+ int ix;
+ if (rwmtx->type == ETHR_RWMUTEX_TYPE_FREQUENT_READ) {
+ ix = tse->rgix;
+ atomic_dec:
+ ethr_atomic_dec(&rwmtx->tdata.ra[ix].data.readers);
+ }
+ else {
+ ix = tse->mtix;
+ if (ix == 0)
+ goto atomic_dec;
+ ETHR_ASSERT(rwmtx->type == ETHR_RWMUTEX_TYPE_EXTREMELY_FREQUENT_READ);
+ ETHR_ASSERT(ethr_atomic_read(&rwmtx->tdata.ra[ix].data.readers) == 1);
+ ethr_atomic_set(&rwmtx->tdata.ra[ix].data.readers, (long) 0);
+ }
+}
+
+static ETHR_INLINE long
+rwmutex_freqread_rdrs_dec_read(ethr_rwmutex *rwmtx, ethr_ts_event *tse)
+{
+ int ix;
+ if (rwmtx->type == ETHR_RWMUTEX_TYPE_FREQUENT_READ) {
+ ix = tse->rgix;
+ atomic_dec_read:
+ return ethr_atomic_dec_read(&rwmtx->tdata.ra[ix].data.readers);
+ }
+ else {
+ ix = tse->mtix;
+ if (ix == 0)
+ goto atomic_dec_read;
+ ETHR_ASSERT(rwmtx->type == ETHR_RWMUTEX_TYPE_EXTREMELY_FREQUENT_READ);
+ ETHR_ASSERT(ethr_atomic_read(&rwmtx->tdata.ra[ix].data.readers) == 1);
+ ethr_atomic_set(&rwmtx->tdata.ra[ix].data.readers, (long) 0);
+ return (long) 0;
+ }
+}
+
+static ETHR_INLINE long
+rwmutex_freqread_rdrs_dec_read_relb(ethr_rwmutex *rwmtx, ethr_ts_event *tse)
+{
+ int ix;
+ if (rwmtx->type == ETHR_RWMUTEX_TYPE_FREQUENT_READ) {
+ ix = tse->rgix;
+ atomic_dec_read:
+ return ethr_atomic_dec_read_relb(&rwmtx->tdata.ra[ix].data.readers);
+ }
+ else {
+ ix = tse->mtix;
+ if (ix == 0)
+ goto atomic_dec_read;
+ ETHR_ASSERT(rwmtx->type == ETHR_RWMUTEX_TYPE_EXTREMELY_FREQUENT_READ);
+ ETHR_ASSERT(ethr_atomic_read(&rwmtx->tdata.ra[ix].data.readers) == 1);
+ ethr_atomic_set_relb(&rwmtx->tdata.ra[ix].data.readers, (long) 0);
+ return (long) 0;
+ }
+}
+
+static ETHR_INLINE long
+rwmutex_freqread_rdrs_read(ethr_rwmutex *rwmtx, int ix)
+{
+ long res = ethr_atomic_read(&rwmtx->tdata.ra[ix].data.readers);
+#ifdef ETHR_DEBUG
+ switch (rwmtx->type) {
+ case ETHR_RWMUTEX_TYPE_FREQUENT_READ:
+ ETHR_ASSERT(res >= 0);
+ break;
+ case ETHR_RWMUTEX_TYPE_EXTREMELY_FREQUENT_READ:
+ ETHR_ASSERT(res == 0 || res == 1);
+ break;
+ default:
+ ETHR_ASSERT(0);
+ break;
+ }
+#endif
+ return res;
+}
+
+
+static ETHR_INLINE void
+enqueue(ethr_ts_event **queue,
+ ethr_ts_event *tse_start,
+ ethr_ts_event *tse_end)
+{
+ if (!*queue) {
+ *queue = tse_start;
+ tse_start->prev = tse_end;
+ tse_end->next = tse_start;
+ }
+ else {
+ tse_end->next = *queue;
+ tse_start->prev = (*queue)->prev;
+ (*queue)->prev->next = tse_start;
+ (*queue)->prev = tse_end;
+ }
+}
+
+static ETHR_INLINE void
+insert(ethr_ts_event *tse_pred, ethr_ts_event *tse)
+{
+ tse->next = tse_pred->next;
+ tse->prev = tse_pred;
+ tse_pred->next->prev = tse;
+ tse_pred->next = tse;
+}
+
+static ETHR_INLINE void
+dequeue(ethr_ts_event **queue,
+ ethr_ts_event *tse_start,
+ ethr_ts_event *tse_end)
+{
+ if (tse_start->prev == tse_end) {
+ ETHR_ASSERT(*queue == tse_start && tse_end->next == tse_start);
+ *queue = NULL;
+ }
+ else {
+ if (*queue == tse_start)
+ *queue = tse_end->next;
+ tse_end->next->prev = tse_start->prev;
+ tse_start->prev->next = tse_end->next;
+ }
+}
+
+static void
+event_wait(struct ethr_mutex_base_ *mtxb,
+ ethr_ts_event *tse,
+ int spincount,
+ long type,
+ int is_rwmtx,
+ int is_freq_read)
+{
+ int locked = 0;
+ long act;
+ int need_try_complete_runlock = 0;
+
+ /* Need to enqueue and wait... */
+
+ tse->uflgs = type;
+ ethr_atomic_set(&tse->uaflgs, type);
+
+ ETHR_MTX_Q_LOCK(&mtxb->qlck);
+ locked = 1;
+
+#ifdef ETHR_MTX_HARD_DEBUG_Q
+ hard_debug_chk_q__(mtxb, is_rwmtx);
+#endif
+
+ act = ethr_atomic_read(&mtxb->flgs);
+
+ if (act & type) {
+
+ /* Wait bit already there; enqueue... */
+
+ ETHR_ASSERT(mtxb->q);
+ if (type == ETHR_RWMTX_W_WAIT_FLG__) {
+ enqueue(&mtxb->q, tse, tse);
+#ifdef ETHR_MTX_HARD_DEBUG_WSQ
+ mtxb->ws++;
+#endif
+ }
+ else {
+ ethr_rwmutex *rwmtx = (ethr_rwmutex *) mtxb;
+ ETHR_ASSERT(is_rwmtx);
+ ETHR_ASSERT(rwmtx->rq_end);
+ insert(rwmtx->rq_end, tse);
+ rwmtx->rq_end = tse;
+ if (is_freq_read)
+ rwmutex_freqread_wtng_rdrs_inc(rwmtx, tse);
+ else
+ rwmtx->tdata.rs++;
+ }
+ }
+ else {
+
+ /* Set wait bit */
+
+ while (1) {
+ long new, exp = act;
+ int freqread_tryrlock = 0;
+ need_try_complete_runlock = 0;
+
+ if (type == ETHR_RWMTX_W_WAIT_FLG__) {
+ if (is_freq_read && act == ETHR_RWMTX_R_FLG__)
+ need_try_complete_runlock = 1;
+ if (act != 0)
+ new = act | ETHR_RWMTX_W_WAIT_FLG__;
+ else
+ new = ETHR_RWMTX_W_FLG__; /* Try to get it */
+ }
+ else {
+ ETHR_ASSERT(is_rwmtx);
+
+ if (!is_freq_read) {
+ if (act & (ETHR_RWMTX_W_FLG__| ETHR_RWMTX_W_WAIT_FLG__))
+ new = act | ETHR_RWMTX_R_WAIT_FLG__;
+ else
+ new = act + 1; /* Try to get it */
+ }
+ else {
+ if (act & ~ETHR_RWMTX_R_FLG__)
+ new = act | ETHR_RWMTX_R_WAIT_FLG__;
+ else { /* Try to get it */
+ ethr_rwmutex *rwmtx = (ethr_rwmutex *) mtxb;
+ rwmutex_freqread_rdrs_inc(rwmtx, tse);
+ ETHR_MEMORY_BARRIER;
+ new = act | ETHR_RWMTX_R_FLG__;
+ freqread_tryrlock = 1;
+ }
+ }
+ }
+
+ act = ethr_atomic_cmpxchg_acqb(&mtxb->flgs, new, exp);
+ if (exp == act) {
+ if (new & type) {
+ act = new;
+ break;
+ }
+ else {
+ /* Got it */
+ goto done;
+ }
+ }
+
+ if (freqread_tryrlock) {
+ ethr_rwmutex *rwmtx = (ethr_rwmutex *) mtxb;
+
+ /* We didn't set ETHR_RWMTX_R_FLG__, however someone
+ else might have */
+ if (act == ETHR_RWMTX_R_FLG__)
+ goto done; /* Got it by help from someone else */
+
+ ETHR_ASSERT((act & ETHR_RWMTX_WAIT_FLGS__) == 0);
+ /*
+ * We know that no waiter flags have been set, i.e.,
+ * we cannot get into a situation where we need to wake
+ * someone up here. Just restore the readers counter
+ * and do it over again...
+ */
+ rwmutex_freqread_rdrs_dec(rwmtx, tse);
+ }
+ }
+
+ /* Enqueue */
+
+ if (type == ETHR_RWMTX_R_WAIT_FLG__) {
+ ethr_rwmutex *rwmtx = (ethr_rwmutex *) mtxb;
+ ETHR_ASSERT(is_rwmtx);
+ ETHR_ASSERT(!rwmtx->rq_end);
+ rwmtx->rq_end = tse;
+ if (is_freq_read)
+ rwmutex_freqread_wtng_rdrs_inc(rwmtx, tse);
+ else
+ rwmtx->tdata.rs++;
+ }
+#ifdef ETHR_MTX_HARD_DEBUG_WSQ
+ else {
+ mtxb->ws++;
+ }
+#endif
+
+ enqueue(&mtxb->q, tse, tse);
+ }
+
+#ifdef ETHR_MTX_HARD_DEBUG_Q
+ hard_debug_chk_q__(mtxb, is_rwmtx);
+#endif
+
+ /* Wait */
+ locked = 0;
+ ETHR_MTX_Q_UNLOCK(&mtxb->qlck);
+
+ if (need_try_complete_runlock) {
+ ETHR_ASSERT(((ethr_rwmutex *) mtxb)->type
+ != ETHR_RWMUTEX_TYPE_NORMAL);
+ /*
+ * We were the only one in queue when we enqueued, and it
+ * was seemingly read locked. We need to try to complete a
+ * runlock otherwise we might be hanging forever. If the
+ * runlock could be completed we will be dequeued and
+ * woken by ourselves.
+ */
+ rwmutex_try_complete_runlock((ethr_rwmutex *) mtxb,
+ act, tse, 0, 1, 0);
+ }
+
+ while (1) {
+ ethr_event_reset(&tse->event);
+
+ act = ethr_atomic_read_acqb(&tse->uaflgs);
+ if (!act)
+ goto done; /* Got it */
+
+ ETHR_ASSERT(act == type);
+ ethr_event_swait(&tse->event, spincount);
+ /* swait result: 0 || EINTR */
+
+ act = ethr_atomic_read_acqb(&tse->uaflgs);
+ if (!act)
+ goto done; /* Got it */
+ }
+
+ done:
+ if (locked)
+ ETHR_MTX_Q_UNLOCK(&mtxb->qlck);
+}
+
+static void
+wake_writer(struct ethr_mutex_base_ *mtxb, int is_rwmtx)
+{
+ ethr_ts_event *tse;
+
+ tse = mtxb->q;
+ ETHR_ASSERT(tse);
+ dequeue(&mtxb->q, tse, tse);
+
+ ETHR_ASSERT(tse->uflgs == ETHR_RWMTX_W_WAIT_FLG__);
+ ETHR_ASSERT(ethr_atomic_read(&tse->uaflgs) == ETHR_RWMTX_W_WAIT_FLG__);
+#ifdef ETHR_MTX_HARD_DEBUG_WSQ
+ mtxb->ws--;
+#endif
+#if defined(ETHR_MTX_HARD_DEBUG_Q) || defined(ETHR_MTX_HARD_DEBUG_WSQ)
+ hard_debug_chk_q__(mtxb, is_rwmtx);
+#endif
+
+ ETHR_MTX_Q_UNLOCK(&mtxb->qlck);
+
+ ethr_atomic_set(&tse->uaflgs, 0);
+ ethr_event_set(&tse->event);
+}
+
+static ETHR_INLINE int
+initial_spincount(struct ethr_mutex_base_ *mtxb)
+{
+ return (mtxb->aux_scnt < ETHR_MTX_MAX_FLGS_SPIN
+ ? mtxb->aux_scnt
+ : ETHR_MTX_MAX_FLGS_SPIN);
+}
+
+static ETHR_INLINE int
+update_spincount(struct ethr_mutex_base_ *mtxb,
+ ethr_ts_event *tse,
+ int *start_scnt,
+ int *scnt)
+{
+ int sscnt = *start_scnt;
+ if (sscnt < 0) {
+ *scnt = ((tse->iflgs & ETHR_TS_EV_MAIN_THR)
+ ? mtxb->main_scnt
+ : mtxb->aux_scnt);
+ *scnt -= ETHR_MTX_MAX_FLGS_SPIN;
+ }
+ else {
+ *scnt = ((tse->iflgs & ETHR_TS_EV_MAIN_THR)
+ ? mtxb->main_scnt
+ : mtxb->aux_scnt);
+ *scnt -= sscnt;
+ if (*scnt > 0 && sscnt < ETHR_MTX_MAX_FLGS_SPIN) {
+ *scnt = ETHR_MTX_MAX_FLGS_SPIN - sscnt;
+ *start_scnt = -1;
+ return 0;
+ }
+ }
+ return 1;
+}
+
+int check_readers_array(ethr_rwmutex *rwmtx,
+ int start_rix,
+ int length,
+ int pre_check);
+
+static ETHR_INLINE void
+write_lock_wait(struct ethr_mutex_base_ *mtxb,
+ long initial,
+ int is_rwmtx,
+ int is_freq_read)
+{
+ long act = initial;
+ int scnt, start_scnt;
+ ethr_ts_event *tse = NULL;
+ int until_yield = ETHR_YIELD_AFTER_BUSY_LOOPS;
+ int res;
+ int freq_read_size = -1;
+ int freq_read_start_ix = -1;
+
+ ETHR_ASSERT(!is_freq_read || is_rwmtx);
+
+ start_scnt = scnt = initial_spincount(mtxb);
+
+ /*
+ * Spin trying to write lock for a while. If unsuccessful,
+ * wait on event.
+ */
+
+ while (1) {
+ long exp;
+
+ while (act != 0) {
+
+ if (is_freq_read && act == ETHR_RWMTX_R_FLG__) {
+ ethr_rwmutex *rwmtx = (ethr_rwmutex *) mtxb;
+ if (!tse)
+ tse = ethr_get_ts_event();
+ if (freq_read_size < 0) {
+ if (rwmtx->type == ETHR_RWMUTEX_TYPE_FREQUENT_READ) {
+ freq_read_size = reader_groups_array_size;
+ freq_read_start_ix = tse->rgix;
+ }
+ else {
+ freq_read_size = main_threads_array_size;
+ freq_read_start_ix = tse->mtix;
+ }
+ }
+ res = check_readers_array(rwmtx,
+ freq_read_start_ix,
+ freq_read_size,
+ 1);
+ scnt--;
+ if (res == 0) {
+ act = ethr_atomic_read(&mtxb->flgs);
+ if (act & ETHR_RWMTX_R_MASK__) {
+ res = rwmutex_try_complete_runlock(rwmtx, act,
+ tse, 0, 0,
+ 1);
+ if (res != EBUSY)
+ goto done; /* Got it */
+ }
+ if (scnt <= 0)
+ goto chk_spin;
+ if (--until_yield == 0) {
+ until_yield = ETHR_YIELD_AFTER_BUSY_LOOPS;
+ ETHR_YIELD();
+ }
+ continue;
+ }
+ }
+
+ if (scnt <= 0) {
+ chk_spin:
+ scnt = 0;
+
+ if (!tse)
+ tse = ethr_get_ts_event();
+ if (update_spincount(mtxb, tse, &start_scnt, &scnt)) {
+ event_wait(mtxb, tse, scnt, ETHR_RWMTX_W_WAIT_FLG__,
+ is_rwmtx, is_freq_read);
+ goto done;
+ }
+ }
+ ETHR_SPIN_BODY;
+ if (--until_yield == 0) {
+ until_yield = ETHR_YIELD_AFTER_BUSY_LOOPS;
+ ETHR_YIELD();
+ }
+ act = ethr_atomic_read(&mtxb->flgs);
+ scnt--;
+ }
+ ETHR_ASSERT(scnt >= 0);
+
+ exp = act;
+
+ act = ethr_atomic_cmpxchg_acqb(&mtxb->flgs,
+ ETHR_RWMTX_W_FLG__,
+ exp);
+ if (act == 0)
+ goto done; /* Got it */
+ }
+
+ done:
+ if (tse)
+ ethr_leave_ts_event(tse);
+}
+
+static int
+mtxb_init(struct ethr_mutex_base_ *mtxb,
+ int def_main_scnt,
+ int main_scnt,
+ int def_aux_scnt,
+ int aux_scnt)
+{
+ ETHR_MTX_HARD_DEBUG_LFS_INIT(mtxb);
+#ifdef ETHR_MTX_HARD_DEBUG_WSQ
+ mtxb->ws = 0;
+#endif
+ if (no_spin) {
+ mtxb->main_scnt = 0;
+ mtxb->aux_scnt = 0;
+ }
+ else {
+ if (main_scnt > SHRT_MAX)
+ mtxb->main_scnt = SHRT_MAX;
+ else if (main_scnt < 0)
+ mtxb->main_scnt = def_main_scnt;
+ else
+ mtxb->main_scnt = (short) main_scnt;
+ if (aux_scnt > SHRT_MAX)
+ mtxb->aux_scnt = SHRT_MAX;
+ else if (aux_scnt < 0)
+ mtxb->aux_scnt = def_aux_scnt;
+ else
+ mtxb->aux_scnt = (short) aux_scnt;
+ if (mtxb->main_scnt < mtxb->aux_scnt)
+ mtxb->main_scnt = mtxb->aux_scnt;
+
+ }
+ mtxb->q = NULL;
+ ethr_atomic_init(&mtxb->flgs, 0);
+ return ETHR_MTX_QLOCK_INIT(&mtxb->qlck);
+}
+
+static int
+mtxb_destroy(struct ethr_mutex_base_ *mtxb)
+{
+ long act;
+ ETHR_MTX_Q_LOCK(&mtxb->qlck);
+ act = ethr_atomic_read(&mtxb->flgs);
+ ETHR_MTX_Q_UNLOCK(&mtxb->qlck);
+ if (act != 0)
+ return EINVAL;
+ return ETHR_MTX_QLOCK_DESTROY(&mtxb->qlck);
+}
+
+
+#endif /* ETHR_USE_OWN_RWMTX_IMPL__ || ETHR_USE_OWN_MTX_IMPL__ */
+
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *\
+ * Mutex and condition variable implementation *
+\* */
+
+#ifdef ETHR_USE_OWN_MTX_IMPL__
+
+/* -- Mutex ---------------------------------------------------------------- */
+
+int
+ethr_mutex_init_opt(ethr_mutex *mtx, ethr_mutex_opt *opt)
+{
+ int res;
+#if ETHR_XCHK
+ if (!mtx) {
+ ETHR_ASSERT(0);
+ return EINVAL;
+ }
+ mtx->initialized = ETHR_MUTEX_INITIALIZED;
+#endif
+ ETHR_MTX_HARD_DEBUG_FENCE_INIT(mtx);
+ res = mtxb_init(&mtx->mtxb,
+ default_mtx_main_spincount,
+ opt ? opt->main_spincount : -1,
+ default_mtx_aux_spincount,
+ opt ? opt->aux_spincount : -1);
+#if ETHR_XCHK
+ if (res != 0)
+ mtx->initialized = 0;
+#endif
+ return res;
+}
+
+int
+ethr_mutex_init(ethr_mutex *mtx)
+{
+ return ethr_mutex_init_opt(mtx, NULL);
+}
+
+int
+ethr_mutex_destroy(ethr_mutex *mtx)
+{
+#if ETHR_XCHK
+ if (ethr_not_inited__) {
+ ETHR_ASSERT(0);
+ return EACCES;
+ }
+ if (!mtx) {
+ ETHR_ASSERT(0);
+ return EINVAL;
+ }
+ mtx->initialized = 0;
+#endif
+ return mtxb_destroy(&mtx->mtxb);
+}
+
+void
+ethr_mutex_lock_wait__(ethr_mutex *mtx, long initial)
+{
+ write_lock_wait(&mtx->mtxb, initial, 0, 0);
+}
+
+void
+ethr_mutex_unlock_wake__(ethr_mutex *mtx, long initial)
+{
+ ethr_ts_event *tse;
+
+ ETHR_MTX_Q_LOCK(&mtx->mtxb.qlck);
+ tse = mtx->mtxb.q;
+
+ ETHR_ASSERT(tse);
+ ETHR_ASSERT(ethr_atomic_read(&mtx->mtxb.flgs)
+ == (ETHR_RWMTX_W_FLG__|ETHR_RWMTX_W_WAIT_FLG__));
+ ETHR_ASSERT(initial & ETHR_RWMTX_W_WAIT_FLG__);
+ ETHR_MTX_HARD_DEBUG_CHK_Q(mtx);
+
+ /*
+ * If we have multiple waiters, there is no need to modify
+ * mtxb->flgs; otherwise, we need to clear the write wait bit...
+ */
+ if (tse->next == mtx->mtxb.q)
+ ethr_atomic_set(&mtx->mtxb.flgs, ETHR_RWMTX_W_FLG__);
+
+ wake_writer(&mtx->mtxb, 0);
+}
+
+/* -- Condition variables -------------------------------------------------- */
+
+static void
+enqueue_mtx(ethr_mutex *mtx, ethr_ts_event *tse_start, ethr_ts_event *tse_end)
+{
+ long act;
+
+ /*
+ * `ethr_cond_signal()' and `ethr_cond_broadcast()' end up here. If `mtx'
+ * is not currently locked by current thread, we almost certainly have a
+ * hard to debug race condition. There might however be some (strange)
+ * use for it. POSIX also allow a call to `pthread_cond_signal' or
+ * `pthread_cond_broadcast' even though the the associated mutex isn't
+ * locked by the caller. Therefore, we also allow this kind of strange
+ * usage, but optimize for the case where the mutex is locked by the
+ * calling thread.
+ */
+
+ ETHR_MTX_Q_LOCK(&mtx->mtxb.qlck);
+
+ ETHR_MTX_HARD_DEBUG_CHK_Q(mtx);
+
+#ifdef ETHR_MTX_HARD_DEBUG_WSQ
+ {
+ int dbg_nws__ = 0;
+ ethr_ts_event *dbg_tse__;
+ for (dbg_tse__ = tse_start;
+ dbg_tse__ != tse_end;
+ dbg_tse__ = dbg_tse__->next)
+ dbg_nws__++;
+ mtx->mtxb.ws += dbg_nws__ + 1;
+ }
+#endif
+
+ act = ethr_atomic_read(&mtx->mtxb.flgs);
+ ETHR_ASSERT(act == 0
+ || act == ETHR_RWMTX_W_FLG__
+ || act == (ETHR_RWMTX_W_FLG__|ETHR_RWMTX_W_WAIT_FLG__));
+ if (act & ETHR_RWMTX_W_FLG__) {
+ /* The normal sane case */
+ if (!(act & ETHR_RWMTX_W_WAIT_FLG__)) {
+ ETHR_ASSERT(!mtx->mtxb.q);
+ act = ethr_atomic_cmpxchg(&mtx->mtxb.flgs,
+ (ETHR_RWMTX_W_FLG__
+ | ETHR_RWMTX_W_WAIT_FLG__),
+ ETHR_RWMTX_W_FLG__);
+ if (act != ETHR_RWMTX_W_FLG__) {
+ /*
+ * Sigh... this wasn't so sane after all since, the mutex was
+ * obviously not locked by the current thread....
+ */
+ ETHR_ASSERT(act == 0);
+ goto mtx_unlocked;
+ }
+ }
+
+#ifdef ETHR_DEBUG
+ if (act & ETHR_RWMTX_W_WAIT_FLG__)
+ ETHR_ASSERT(mtx->mtxb.q);
+ else
+ ETHR_ASSERT(!mtx->mtxb.q);
+#endif
+
+ enqueue(&mtx->mtxb.q, tse_start, tse_end);
+
+ ETHR_MTX_HARD_DEBUG_CHK_Q(mtx);
+ ETHR_MTX_Q_UNLOCK(&mtx->mtxb.qlck);
+
+ }
+ else {
+ int multi;
+ mtx_unlocked:
+ /* Sigh... mutex isn't locked... */
+
+ multi = tse_start != tse_end;
+
+ while (1) {
+ long new, exp = act;
+
+ if (multi || (act & ETHR_RWMTX_W_FLG__))
+ new = ETHR_RWMTX_W_FLG__|ETHR_RWMTX_W_WAIT_FLG__;
+ else
+ new = ETHR_RWMTX_W_FLG__;
+
+ act = ethr_atomic_cmpxchg(&mtx->mtxb.flgs, new, exp);
+ if (exp == act) {
+ ETHR_ASSERT(!mtx->mtxb.q);
+ if (act & ETHR_RWMTX_W_FLG__) {
+ enqueue(&mtx->mtxb.q, tse_start, tse_end);
+
+ ETHR_MTX_HARD_DEBUG_CHK_Q(mtx);
+ ETHR_MTX_Q_UNLOCK(&mtx->mtxb.qlck);
+
+ }
+ else {
+ ETHR_ASSERT(!mtx->mtxb.q);
+ /*
+ * Acquired the mutex on behalf of the
+ * first thread in the queue; wake
+ * it and enqueue the rest...
+ */
+#ifdef ETHR_MTX_HARD_DEBUG_WSQ
+ mtx->mtxb.ws--;
+#endif
+ if (multi) {
+ enqueue(&mtx->mtxb.q, tse_start->next, tse_end);
+ ETHR_ASSERT(mtx->mtxb.q);
+ }
+
+ ETHR_MTX_HARD_DEBUG_CHK_Q(mtx);
+ ETHR_MTX_Q_UNLOCK(&mtx->mtxb.qlck);
+
+ ethr_atomic_set(&tse_start->uaflgs, 0);
+ ethr_event_set(&tse_start->event);
+ }
+ break;
+ }
+ }
+ }
+}
+
+int
+ethr_cond_init_opt(ethr_cond *cnd, ethr_cond_opt *opt)
+{
+ int res;
+#if ETHR_XCHK
+ if (!cnd) {
+ ETHR_ASSERT(0);
+ return EINVAL;
+ }
+ cnd->initialized = ETHR_COND_INITIALIZED;
+#endif
+ ETHR_MTX_HARD_DEBUG_FENCE_INIT(cnd);
+ cnd->q = NULL;
+ if (no_spin) {
+ cnd->main_scnt = 0;
+ cnd->aux_scnt = 0;
+ }
+ else {
+ if (!opt || opt->main_spincount < 0)
+ cnd->main_scnt = default_cnd_main_spincount;
+ else if (opt->main_spincount > SHRT_MAX)
+ cnd->main_scnt = SHRT_MAX;
+ else
+ cnd->main_scnt = (short) opt->main_spincount;
+ if (!opt || opt->aux_spincount < 0)
+ cnd->aux_scnt = default_cnd_aux_spincount;
+ else if (opt->aux_spincount > SHRT_MAX)
+ cnd->aux_scnt = SHRT_MAX;
+ else
+ cnd->aux_scnt = (short) opt->aux_spincount;
+ if (cnd->main_scnt < cnd->aux_scnt)
+ cnd->main_scnt = cnd->aux_scnt;
+ }
+ ETHR_MTX_QLOCK_INIT(&cnd->qlck);
+ return 0;
+}
+
+int
+ethr_cond_init(ethr_cond *cnd)
+{
+ return ethr_cond_init_opt(cnd, NULL);
+}
+
+int
+ethr_cond_destroy(ethr_cond *cnd)
+{
+#if ETHR_XCHK
+ if (ethr_not_inited__) {
+ ETHR_ASSERT(0);
+ return EACCES;
+ }
+ if (!cnd || cnd->initialized != ETHR_COND_INITIALIZED) {
+ ETHR_ASSERT(0);
+ return EINVAL;
+ }
+ cnd->initialized = 0;
+#endif
+ return ETHR_MTX_QLOCK_DESTROY(&cnd->qlck);
+}
+
+void
+ethr_cond_signal(ethr_cond *cnd)
+{
+ ethr_ts_event *tse;
+
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(cnd);
+ ETHR_ASSERT(cnd->initialized == ETHR_COND_INITIALIZED);
+ ETHR_MTX_HARD_DEBUG_FENCE_CHK(cnd);
+
+ ETHR_MTX_Q_LOCK(&cnd->qlck);
+
+ tse = cnd->q;
+
+ if (!tse) {
+ ETHR_MTX_HARD_DEBUG_FENCE_CHK(cnd);
+ ETHR_MTX_Q_UNLOCK(&cnd->qlck);
+ }
+ else {
+ ethr_mutex *mtx = (ethr_mutex *) tse->udata;
+
+ ETHR_MTX_HARD_DEBUG_FENCE_CHK(mtx);
+ ETHR_ASSERT(tse->uflgs == ETHR_RWMTX_W_WAIT_FLG__);
+ ETHR_ASSERT(ethr_atomic_read(&tse->uaflgs) == ETHR_CND_WAIT_FLG__);
+
+ ethr_atomic_set(&tse->uaflgs, ETHR_RWMTX_W_WAIT_FLG__);
+
+ dequeue(&cnd->q, tse, tse);
+
+ ETHR_MTX_Q_UNLOCK(&cnd->qlck);
+
+ tse->next = tse->prev = NULL;
+
+ enqueue_mtx(mtx, tse, tse);
+
+ ETHR_MTX_HARD_DEBUG_FENCE_CHK(cnd);
+ ETHR_MTX_HARD_DEBUG_FENCE_CHK(mtx);
+ }
+}
+
+void
+ethr_cond_broadcast(ethr_cond *cnd)
+{
+ int res;
+ int got_all;
+ ethr_ts_event *tse;
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(cnd);
+ ETHR_ASSERT(cnd->initialized == ETHR_COND_INITIALIZED);
+
+ ETHR_MTX_HARD_DEBUG_FENCE_CHK(cnd);
+ do {
+ got_all = 1;
+
+ ETHR_MTX_Q_LOCK(&cnd->qlck);
+
+ tse = cnd->q;
+
+ if (!tse) {
+ ETHR_MTX_HARD_DEBUG_FENCE_CHK(cnd);
+ ETHR_MTX_Q_UNLOCK(&cnd->qlck);
+ }
+ else {
+ ethr_mutex *mtx = (ethr_mutex *) tse->udata;
+ ethr_ts_event *tse_tmp, *tse_end;
+
+ ETHR_MTX_HARD_DEBUG_FENCE_CHK(mtx);
+ tse_end = cnd->q->prev;
+
+ tse_tmp = tse;
+
+ do {
+
+ if (mtx == (ethr_mutex *) tse_tmp->udata) {
+ /* The normal case */
+
+ ETHR_ASSERT(tse_tmp->uflgs == ETHR_RWMTX_W_WAIT_FLG__);
+ ETHR_ASSERT(ethr_atomic_read(&tse_tmp->uaflgs)
+ == ETHR_CND_WAIT_FLG__);
+
+ ethr_atomic_set(&tse_tmp->uaflgs, ETHR_RWMTX_W_WAIT_FLG__);
+ }
+ else {
+ /* Should be very unusual */
+ ETHR_MTX_HARD_DEBUG_FENCE_CHK(mtx);
+ tse_end = tse_tmp->prev;
+ got_all = 0;
+ break;
+ }
+
+ tse_tmp = tse_tmp->next;
+
+ } while (tse_tmp != cnd->q);
+
+ dequeue(&cnd->q, tse, tse_end);
+
+ ETHR_MTX_Q_UNLOCK(&cnd->qlck);
+
+ enqueue_mtx(mtx, tse, tse_end);
+ }
+
+ } while (!got_all);
+
+ ETHR_MTX_HARD_DEBUG_FENCE_CHK(cnd);
+}
+
+int
+ethr_cond_wait(ethr_cond *cnd, ethr_mutex *mtx)
+{
+ int woken;
+ int scnt;
+ int res;
+ void *udata = NULL;
+ ethr_ts_event *tse;
+
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(cnd);
+ ETHR_ASSERT(cnd->initialized == ETHR_COND_INITIALIZED);
+ ETHR_ASSERT(mtx);
+ ETHR_ASSERT(mtx->initialized == ETHR_MUTEX_INITIALIZED);
+
+ tse = ethr_get_ts_event();
+
+ scnt = ((tse->iflgs & ETHR_TS_EV_MAIN_THR)
+ ? cnd->main_scnt
+ : cnd->aux_scnt);
+
+ ETHR_MTX_HARD_DEBUG_FENCE_CHK(cnd);
+ ETHR_MTX_HARD_DEBUG_FENCE_CHK(mtx);
+
+ udata = tse->udata; /* Got to restore udata before returning */
+ tse->udata = (void *) mtx;
+
+ tse->uflgs = ETHR_RWMTX_W_WAIT_FLG__; /* Prep for mutex lock op */
+ ethr_atomic_set(&tse->uaflgs, ETHR_CND_WAIT_FLG__);
+
+ ETHR_MTX_Q_LOCK(&cnd->qlck);
+
+ enqueue(&cnd->q, tse, tse);
+
+ ETHR_MTX_Q_UNLOCK(&cnd->qlck);
+
+ ethr_mutex_unlock(mtx);
+
+ /* Wait */
+ woken = 0;
+ while (1) {
+ long act;
+
+ ethr_event_reset(&tse->event);
+
+ act = ethr_atomic_read_acqb(&tse->uaflgs);
+ if (!act)
+ break; /* Mtx locked */
+
+ /* First time, got EINTR, or spurious wakeup... */
+
+ ETHR_ASSERT(act == ETHR_CND_WAIT_FLG__
+ || act == ETHR_RWMTX_W_WAIT_FLG__);
+
+ if (woken) {
+ /*
+ * If act == ETHR_RWMTX_W_WAIT_FLG__, we have already been enqueued
+ * on the mutex; continue wait until locked...
+ */
+ if (act == ETHR_CND_WAIT_FLG__) {
+ ETHR_MTX_Q_LOCK(&cnd->qlck);
+ act = ethr_atomic_read(&tse->uaflgs);
+ ETHR_ASSERT(act == ETHR_CND_WAIT_FLG__
+ || act == ETHR_RWMTX_W_WAIT_FLG__);
+ /*
+ * If act == ETHR_RWMTX_W_WAIT_FLG__, we have already
+ * enqueued on the mutex; continue wait until locked...
+ */
+ if (act == ETHR_CND_WAIT_FLG__)
+ dequeue(&cnd->q, tse, tse);
+
+ ETHR_MTX_Q_UNLOCK(&cnd->qlck);
+
+ if (act == ETHR_CND_WAIT_FLG__) {
+ tse->udata = udata;
+ ethr_leave_ts_event(tse);
+ ETHR_MTX_HARD_DEBUG_FENCE_CHK(cnd);
+ ethr_mutex_lock(mtx);
+ return EINTR;
+ }
+ }
+ ETHR_ASSERT(act == ETHR_RWMTX_W_WAIT_FLG__);
+ }
+ ethr_event_swait(&tse->event, scnt);
+ /* swait result: 0 || EINTR */
+ woken = 1;
+ }
+
+ ETHR_MTX_HARD_DEBUG_LFS_RWLOCK(&mtx->mtxb);
+ ETHR_MTX_HARD_DEBUG_FENCE_CHK(cnd);
+ ETHR_MTX_HARD_DEBUG_FENCE_CHK(mtx);
+
+ tse->udata = udata;
+ ethr_leave_ts_event(tse);
+ return 0;
+}
+
+#else
+/* -- pthread mutex and condition variables -------------------------------- */
+
+int
+ethr_mutex_init(ethr_mutex *mtx)
+{
+#if ETHR_XCHK
+ if (!mtx) {
+ ETHR_ASSERT(0);
+ return EINVAL;
+ }
+ mtx->initialized = ETHR_MUTEX_INITIALIZED;
+#endif
+ return pthread_mutex_init(&mtx->pt_mtx, NULL);
+}
+
+int
+ethr_mutex_destroy(ethr_mutex *mtx)
+{
+#if ETHR_XCHK
+ if (ethr_not_inited__) {
+ ETHR_ASSERT(0);
+ return EACCES;
+ }
+ if (!mtx || mtx->initialized != ETHR_MUTEX_INITIALIZED) {
+ ETHR_ASSERT(0);
+ return EINVAL;
+ }
+#endif
+#if ETHR_XCHK
+ mtx->initialized = 0;
+#endif
+ return pthread_mutex_destroy(&mtx->pt_mtx);
+}
+
+int
+ethr_cond_init(ethr_cond *cnd)
+{
+#if ETHR_XCHK
+ if (!cnd) {
+ ETHR_ASSERT(0);
+ return EINVAL;
+ }
+ cnd->initialized = ETHR_COND_INITIALIZED;
+#endif
+ return pthread_cond_init(&cnd->pt_cnd, NULL);
+}
+
+int
+ethr_cond_destroy(ethr_cond *cnd)
+{
+#if ETHR_XCHK
+ if (ethr_not_inited__) {
+ ETHR_ASSERT(0);
+ return EACCES;
+ }
+ if (!cnd || cnd->initialized != ETHR_COND_INITIALIZED) {
+ ETHR_ASSERT(0);
+ return EINVAL;
+ }
+ cnd->initialized = 0;
+#endif
+ return pthread_cond_destroy(&cnd->pt_cnd);
+}
+
+void
+ethr_cond_signal(ethr_cond *cnd)
+{
+ int res;
+
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(cnd);
+ ETHR_ASSERT(cnd->initialized == ETHR_COND_INITIALIZED);
+
+ res = pthread_cond_signal(&cnd->pt_cnd);
+ if (res != 0)
+ ETHR_FATAL_ERROR__(res);
+}
+
+void
+ethr_cond_broadcast(ethr_cond *cnd)
+{
+ int res;
+
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(cnd);
+ ETHR_ASSERT(cnd->initialized == ETHR_COND_INITIALIZED);
+
+ res = pthread_cond_broadcast(&cnd->pt_cnd);
+ if (res != 0)
+ ETHR_FATAL_ERROR__(res);
+}
+
+int
+ethr_cond_wait(ethr_cond *cnd, ethr_mutex *mtx)
+{
+ int res;
+
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(cnd);
+ ETHR_ASSERT(cnd->initialized == ETHR_COND_INITIALIZED);
+ ETHR_ASSERT(mtx);
+ ETHR_ASSERT(mtx->initialized == ETHR_MUTEX_INITIALIZED);
+
+ res = pthread_cond_wait(&cnd->pt_cnd, &mtx->pt_mtx);
+ if (res != 0 && res != EINTR)
+ ETHR_FATAL_ERROR__(res);
+ return res;
+}
+
+#endif /* pthread_mutex */
+
+/* -- Exported symbols of inline functions --------------------------------- */
+
+int
+ethr_mutex_trylock(ethr_mutex *mtx)
+{
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(mtx);
+ ETHR_ASSERT(mtx->initialized == ETHR_MUTEX_INITIALIZED);
+
+ return ethr_mutex_trylock__(mtx);
+}
+
+void
+ethr_mutex_lock(ethr_mutex *mtx)
+{
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(mtx);
+ ETHR_ASSERT(mtx->initialized == ETHR_MUTEX_INITIALIZED);
+
+ ethr_mutex_lock__(mtx);
+}
+
+void
+ethr_mutex_unlock(ethr_mutex *mtx)
+{
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(mtx);
+ ETHR_ASSERT(mtx->initialized == ETHR_MUTEX_INITIALIZED);
+
+ ethr_mutex_unlock__(mtx);
+}
+
+
+#ifdef ETHR_USE_OWN_RWMTX_IMPL__
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *\
+ * Read/Write Mutex *
+\* */
+
+static void
+wake_readers(ethr_rwmutex *rwmtx, int rs)
+{
+ ethr_ts_event *tse;
+#ifdef ETHR_DEBUG
+ int drs = 0;
+#endif
+
+ tse = rwmtx->mtxb.q;
+ ETHR_ASSERT(tse);
+ ETHR_ASSERT(rwmtx->rq_end);
+ dequeue(&rwmtx->mtxb.q, tse, rwmtx->rq_end);
+ rwmtx->rq_end->next = NULL;
+ rwmtx->rq_end = NULL;
+
+ ETHR_ASSERT(!rwmtx->mtxb.q
+ || (ethr_atomic_read(&rwmtx->mtxb.q->uaflgs)
+ == ETHR_RWMTX_W_WAIT_FLG__));
+
+ ETHR_RWMTX_HARD_DEBUG_CHK_Q(rwmtx);
+ ETHR_MTX_Q_UNLOCK(&rwmtx->mtxb.qlck);
+
+ while (tse) {
+ ethr_ts_event *tse_next;
+
+#ifdef ETHR_DEBUG
+ ETHR_ASSERT(tse->uflgs == ETHR_RWMTX_R_WAIT_FLG__);
+ ETHR_ASSERT(ethr_atomic_read(&tse->uaflgs)
+ == ETHR_RWMTX_R_WAIT_FLG__);
+ drs++;
+#endif
+
+ tse_next = tse->next; /* we aren't allowed to read tse->next
+ after we have reset uaflgs */
+
+ ethr_atomic_set(&tse->uaflgs, 0);
+ ethr_event_set(&tse->event);
+ tse = tse_next;
+ }
+
+ ETHR_ASSERT(rs == drs);
+}
+
+static ETHR_INLINE int
+is_w_waiter(ethr_ts_event *tse)
+{
+ ETHR_ASSERT(tse->uflgs == ETHR_RWMTX_W_WAIT_FLG__
+ || tse->uflgs == ETHR_RWMTX_R_WAIT_FLG__);
+ return tse->uflgs == ETHR_RWMTX_W_WAIT_FLG__;
+}
+
+static ETHR_INLINE int
+multiple_w_waiters(ethr_rwmutex *rwmtx)
+{
+ ETHR_ASSERT(rwmtx->mtxb.q);
+ ETHR_ASSERT(rwmtx->mtxb.q->uflgs == ETHR_RWMTX_W_WAIT_FLG__);
+
+ if (!rwmtx->rq_end)
+ return rwmtx->mtxb.q->next != rwmtx->mtxb.q;
+ else {
+ ETHR_ASSERT(rwmtx->mtxb.q->next != rwmtx->mtxb.q);
+ if (rwmtx->mtxb.q->next->uflgs == ETHR_RWMTX_W_WAIT_FLG__)
+ return 1;
+ ETHR_ASSERT(rwmtx->rq_end->next == rwmtx->mtxb.q
+ || rwmtx->rq_end->next->uflgs == ETHR_RWMTX_W_WAIT_FLG__);
+ return rwmtx->rq_end->next != rwmtx->mtxb.q;
+ }
+}
+
+int check_readers_array(ethr_rwmutex *rwmtx,
+ int start_rix,
+ int length,
+ int pre_check)
+{
+ int ix = start_rix;
+
+#ifndef ETHR_READ_MEMORY_BARRIER_IS_FULL
+ if (pre_check)
+ ETHR_READ_MEMORY_BARRIER;
+ else
+#endif
+ ETHR_MEMORY_BARRIER;
+
+ do {
+ long act = rwmutex_freqread_rdrs_read(rwmtx, ix);
+ if (act != 0)
+ return EBUSY;
+ ix++;
+ if (ix == length)
+ ix = 0;
+ } while (ix != start_rix);
+
+ return 0;
+}
+
+static ETHR_INLINE void
+rwmutex_freqread_restore_failed_tryrlock(ethr_rwmutex *rwmtx,
+ ethr_ts_event *tse)
+{
+ long act;
+ /*
+ * Restore failed increment
+ */
+ act = rwmutex_freqread_rdrs_dec_read(rwmtx, tse);
+
+ ETHR_WRITE_MEMORY_BARRIER;
+
+ if (act == 0) {
+
+#ifndef ETHR_WRITE_MEMORY_BARRIER_IS_FULL
+ ETHR_READ_MEMORY_BARRIER;
+#endif
+
+ act = ethr_atomic_read(&rwmtx->mtxb.flgs);
+
+ if ((act & ETHR_RWMTX_W_FLG__) == 0
+ && act & (ETHR_RWMTX_WAIT_FLGS__|ETHR_RWMTX_R_PEND_UNLCK_MASK__)) {
+ /*
+ * We either got waiters, or someone else trying
+ * to read unlock which we might have to help.
+ */
+ rwmutex_try_complete_runlock(rwmtx, act, tse, 1, 1, 0);
+ }
+ }
+}
+
+static int
+rwmutex_try_complete_runlock(ethr_rwmutex *rwmtx,
+ long initial,
+ ethr_ts_event *tse,
+ int start_next_ix,
+ int check_before_try,
+ int try_write_lock)
+{
+ ethr_ts_event *tse_tmp;
+ long act = initial;
+ int six, res, length;
+
+ tse_tmp = tse;
+ if (!tse_tmp)
+ tse_tmp = ethr_get_ts_event();
+
+ if ((act & ETHR_RWMTX_WAIT_FLGS__)
+ && (act & ~ETHR_RWMTX_WAIT_FLGS__) == 0)
+ goto check_waiters;
+
+ if (rwmtx->type == ETHR_RWMUTEX_TYPE_FREQUENT_READ) {
+ length = reader_groups_array_size;
+ six = tse_tmp->rgix;
+ }
+ else {
+ length = main_threads_array_size;
+ six = tse_tmp->mtix;
+ }
+ if (start_next_ix) {
+ six++;
+ if (six >= length)
+ six = 0;
+ }
+
+ if (!tse)
+ ethr_leave_ts_event(tse_tmp);
+
+ if (check_before_try) {
+ res = check_readers_array(rwmtx, six, length, 1);
+ if (res == EBUSY)
+ return try_write_lock ? EBUSY : 0;
+ }
+
+ while (1) {
+ long exp = act;
+ long new = act+1;
+
+ ETHR_ASSERT((act & ETHR_RWMTX_R_PEND_UNLCK_MASK__)
+ < ETHR_RWMTX_R_PEND_UNLCK_MASK__);
+
+ act = ethr_atomic_cmpxchg(&rwmtx->mtxb.flgs, new, exp);
+ if (exp == act) {
+ act = new;
+ break;
+ }
+ if (!try_write_lock) {
+ if (act == ETHR_RWMTX_W_FLG__ || act == 0)
+ return 0;
+ if ((act & ETHR_RWMTX_WAIT_FLGS__) == 0) {
+ if ((act & ETHR_RWMTX_R_FLG__) == 0)
+ return 0;
+ }
+ else if ((act & ETHR_RWMTX_R_FLG__) == 0) {
+ if (act & ETHR_RWMTX_R_PEND_UNLCK_MASK__)
+ return 0;
+ goto check_waiters;
+ }
+ }
+ else {
+ if (act == 0)
+ goto tryrwlock;
+ if (act & (ETHR_RWMTX_W_FLG__|ETHR_RWMTX_WAIT_FLGS__))
+ return EBUSY;
+ }
+ }
+
+ res = check_readers_array(rwmtx, six, length, 0);
+ if (res == EBUSY) {
+ act = ethr_atomic_dec_read(&rwmtx->mtxb.flgs);
+ if (act & ETHR_RWMTX_R_MASK__)
+ return try_write_lock ? EBUSY : 0;
+ }
+ else {
+ while (1) {
+ long exp = act;
+ long new = act;
+ new &= ~ETHR_RWMTX_R_FLG__;
+ new--;
+
+ ETHR_ASSERT(act & ETHR_RWMTX_R_PEND_UNLCK_MASK__);
+
+ act = ethr_atomic_cmpxchg(&rwmtx->mtxb.flgs, new, exp);
+ if (exp == act) {
+ if (new & ETHR_RWMTX_R_PEND_UNLCK_MASK__)
+ return try_write_lock ? EBUSY : 0;
+ act = new;
+ break;
+ }
+ }
+ }
+
+ /*
+ * Read unlock completed, but we have to check if
+ * threads have to be woken (or if we should try
+ * to write lock it).
+ */
+
+ if (act & ETHR_RWMTX_W_FLG__)
+ return try_write_lock ? EBUSY : 0;
+
+ if (act & ETHR_RWMTX_WAIT_FLGS__) {
+ check_waiters:
+ rwmutex_unlock_wake(rwmtx, 0, act);
+ return try_write_lock ? EBUSY : 0;
+ }
+
+ if (!try_write_lock)
+ return 0;
+
+ tryrwlock:
+ /* Try to write lock it */
+
+ act = ethr_atomic_cmpxchg_acqb(&rwmtx->mtxb.flgs,
+ ETHR_RWMTX_W_FLG__,
+ 0);
+ return act == 0 ? 0 : EBUSY;
+}
+
+#ifdef ETHR_RLOCK_WITH_INC_DEC
+
+static ETHR_INLINE void
+rwmutex_incdec_restore_failed_tryrlock(ethr_rwmutex *rwmtx)
+{
+ long act;
+ /*
+ * Restore failed increment
+ */
+ act = ethr_atomic_dec_read(&rwmtx->mtxb.flgs);
+ if ((act & ETHR_RWMTX_WAIT_FLGS__)
+ && (act & ~ETHR_RWMTX_WAIT_FLGS__) == 0) {
+ rwmutex_unlock_wake(rwmtx, 0, act);
+ }
+}
+
+#endif
+
+static void
+rwmutex_normal_rlock_wait(ethr_rwmutex *rwmtx,
+ long initial)
+{
+ long act = initial, exp;
+ int scnt, start_scnt;
+ ethr_ts_event *tse = NULL;
+ int until_yield = ETHR_YIELD_AFTER_BUSY_LOOPS;
+
+ start_scnt = scnt = initial_spincount(&rwmtx->mtxb);
+
+ /*
+ * Spin trying to read lock for a while. If unsuccessful,
+ * wait on event.
+ */
+
+ while (1) {
+
+#ifdef ETHR_RLOCK_WITH_INC_DEC
+ rwmutex_incdec_restore_failed_tryrlock(rwmtx);
+ act = ethr_atomic_read(&rwmtx->mtxb.flgs);
+#endif
+
+ while (act & (ETHR_RWMTX_W_FLG__|ETHR_RWMTX_W_WAIT_FLG__)) {
+ if (scnt == 0) {
+ tse = ethr_get_ts_event();
+ if (update_spincount(&rwmtx->mtxb, tse, &start_scnt, &scnt)) {
+ event_wait(&rwmtx->mtxb, tse, scnt,
+ ETHR_RWMTX_R_WAIT_FLG__, 1, 0);
+ goto done;
+ }
+ }
+ ETHR_SPIN_BODY;
+ if (--until_yield == 0) {
+ until_yield = ETHR_YIELD_AFTER_BUSY_LOOPS;
+ ETHR_YIELD();
+ }
+ act = ethr_atomic_read(&rwmtx->mtxb.flgs);
+ scnt--;
+ }
+ exp = act;
+ ETHR_ASSERT(scnt >= 0);
+
+#ifdef ETHR_RLOCK_WITH_INC_DEC
+ act = ethr_atomic_inc_read(&rwmtx->mtxb.flgs);
+ if ((act & (ETHR_RWMTX_W_FLG__|ETHR_RWMTX_W_WAIT_FLG__)) == 0)
+ goto done; /* Got it */
+#else
+ act = ethr_atomic_cmpxchg_acqb(&rwmtx->mtxb.flgs, exp+1, exp);
+ if (act == exp)
+ goto done; /* Got it */
+#endif
+ }
+
+ done:
+ if (tse)
+ ethr_leave_ts_event(tse);
+}
+
+static void
+rwmutex_freqread_rlock_wait(ethr_rwmutex *rwmtx,
+ ethr_ts_event *tse,
+ long initial)
+{
+ long act = initial;
+ int scnt, start_scnt;
+ int until_yield = ETHR_YIELD_AFTER_BUSY_LOOPS;
+
+ start_scnt = scnt = initial_spincount(&rwmtx->mtxb);
+
+ /*
+ * Spin trying to read lock for a while. If unsuccessful,
+ * wait on event.
+ */
+
+ while (1) {
+
+ rwmutex_freqread_restore_failed_tryrlock(rwmtx, tse);
+
+ act = ethr_atomic_read(&rwmtx->mtxb.flgs);
+
+ while (act & ~(ETHR_RWMTX_R_FLG__|ETHR_RWMTX_R_WAIT_FLG__)) {
+ if (scnt == 0) {
+ if (update_spincount(&rwmtx->mtxb, tse, &start_scnt, &scnt)) {
+ event_wait(&rwmtx->mtxb, tse, scnt,
+ ETHR_RWMTX_R_WAIT_FLG__, 1, 1);
+ return; /* Got it */
+ }
+ }
+ ETHR_SPIN_BODY;
+ if (--until_yield == 0) {
+ until_yield = ETHR_YIELD_AFTER_BUSY_LOOPS;
+ ETHR_YIELD();
+ }
+ act = ethr_atomic_read(&rwmtx->mtxb.flgs);
+ scnt--;
+ }
+
+ ETHR_ASSERT(scnt >= 0);
+
+ rwmutex_freqread_rdrs_inc(rwmtx, tse);
+
+ ETHR_MEMORY_BARRIER;
+
+ act = ethr_atomic_read(&rwmtx->mtxb.flgs);
+
+ if (act == ETHR_RWMTX_R_FLG__)
+ return; /* Got it */
+
+ while (1) {
+ long exp, new;
+
+ if (act & ~(ETHR_RWMTX_R_FLG__|ETHR_RWMTX_R_WAIT_FLG__))
+ break; /* Busy (need to restore inc) */
+
+ if (act & ETHR_RWMTX_R_FLG__)
+ return; /* Got it */
+
+ exp = act;
+ new = act | ETHR_RWMTX_R_FLG__;
+ act = ethr_atomic_cmpxchg(&rwmtx->mtxb.flgs, new, exp);
+ if (act == exp)
+ return; /* Got it */
+ }
+ }
+}
+
+static void
+rwmutex_normal_rwlock_wait(ethr_rwmutex *rwmtx, long initial)
+{
+ write_lock_wait(&rwmtx->mtxb, initial, 1, 0);
+}
+
+static void
+rwmutex_freqread_rwlock_wait(ethr_rwmutex *rwmtx, long initial)
+{
+ write_lock_wait(&rwmtx->mtxb, initial, 1, 1);
+}
+
+static ETHR_INLINE void
+rwlock_wake_set_flags(ethr_rwmutex *rwmtx, long new_initial, int act_initial)
+{
+ long act, act_mask;
+ if (rwmtx->type != ETHR_RWMUTEX_TYPE_NORMAL) {
+ /* r pend unlock mask may vary and must be retained */
+ act_mask = ETHR_RWMTX_R_PEND_UNLCK_MASK__;
+ }
+ else {
+#ifdef ETHR_RLOCK_WITH_INC_DEC
+ /* rs mask may vary and must be retained */
+ act_mask = ETHR_RWMTX_RS_MASK__;
+#else
+ /* rs mask always zero */
+ ETHR_ASSERT((act_initial & ETHR_RWMTX_RS_MASK__) == 0);
+ ethr_atomic_set(&rwmtx->mtxb.flgs, new_initial);
+ return;
+#endif
+ }
+
+ act = act_initial;
+ while (1) {
+ long exp = act;
+ long new = new_initial + (act & act_mask);
+ act = ethr_atomic_cmpxchg(&rwmtx->mtxb.flgs, new, exp);
+ if (act == exp)
+ break;
+ exp = act;
+ }
+}
+
+#ifdef ETHR_DEBUG
+
+static void
+dbg_unlock_wake(ethr_rwmutex *rwmtx,
+ int have_w,
+ ethr_ts_event *tse)
+{
+ long exp, act, imask;
+
+ exp = have_w ? ETHR_RWMTX_W_FLG__ : 0;
+
+ if (rwmtx->type != ETHR_RWMUTEX_TYPE_NORMAL)
+ imask = ETHR_RWMTX_R_PEND_UNLCK_MASK__;
+ else {
+#ifdef ETHR_RLOCK_WITH_INC_DEC
+ imask = ETHR_RWMTX_RS_MASK__;
+#else
+ imask = 0;
+#endif
+ }
+
+ ETHR_ASSERT(tse);
+
+ if (is_w_waiter(tse)) {
+
+ exp |= ETHR_RWMTX_W_WAIT_FLG__;
+ if (rwmtx->rq_end) {
+ exp |= ETHR_RWMTX_R_WAIT_FLG__;
+ }
+ act = ethr_atomic_read(&rwmtx->mtxb.flgs);
+ ETHR_ASSERT((exp & ~imask) == (act & ~imask));
+
+ ETHR_RWMTX_HARD_DEBUG_CHK_Q(rwmtx);
+
+ }
+ else {
+
+ exp |= ETHR_RWMTX_R_WAIT_FLG__;
+ if (rwmtx->rq_end->next != rwmtx->mtxb.q)
+ exp |= ETHR_RWMTX_W_WAIT_FLG__;
+ act = ethr_atomic_read(&rwmtx->mtxb.flgs);
+ ETHR_ASSERT((exp & ~imask) == (act & ~imask));
+
+ ETHR_RWMTX_HARD_DEBUG_CHK_Q(rwmtx);
+
+ }
+}
+
+#endif
+
+static void
+rwmutex_unlock_wake(ethr_rwmutex *rwmtx, int have_w, long initial)
+{
+ long new, act = initial;
+ ethr_ts_event *tse;
+
+ if ((act & ETHR_RWMTX_WAIT_FLGS__) == 0) {
+ if (!have_w)
+ return;
+ else {
+ while ((act & ETHR_RWMTX_WAIT_FLGS__) == 0) {
+ long exp = act;
+ new = exp & ~ETHR_RWMTX_W_FLG__;
+ act = ethr_atomic_cmpxchg(&rwmtx->mtxb.flgs, new, exp);
+ if (act == exp)
+ return;
+ }
+ }
+ }
+
+ ETHR_MTX_Q_LOCK(&rwmtx->mtxb.qlck);
+ tse = rwmtx->mtxb.q;
+
+ if (!have_w) {
+ if (!tse) {
+#ifdef ETHR_DEBUG
+ act = ethr_atomic_read(&rwmtx->mtxb.flgs);
+ ETHR_ASSERT((act & ETHR_RWMTX_WAIT_FLGS__) == 0);
+#endif
+ goto already_served;
+ }
+ act = ethr_atomic_read(&rwmtx->mtxb.flgs);
+ if (act & ~ETHR_RWMTX_WAIT_FLGS__) {
+ already_served:
+ ETHR_MTX_Q_UNLOCK(&rwmtx->mtxb.qlck);
+ return;
+ }
+ }
+
+#ifdef ETHR_DEBUG
+ dbg_unlock_wake(rwmtx, have_w, tse);
+#endif
+
+ if (is_w_waiter(tse)) {
+
+ if (!have_w) {
+ act = ethr_atomic_read_bor(&rwmtx->mtxb.flgs,
+ ETHR_RWMTX_W_FLG__);
+ ETHR_ASSERT((act & ~ETHR_RWMTX_WAIT_FLGS__) == 0);
+ ETHR_ASSERT(act & ETHR_RWMTX_W_WAIT_FLG__);
+ act |= ETHR_RWMTX_W_FLG__;
+ }
+
+ /*
+ * If we have multiple write waiters, there
+ * is no need to modify mtxb->flgs; otherwise,
+ * we need to clear the write wait bit...
+ */
+ if (!multiple_w_waiters(rwmtx)) {
+ new = ETHR_RWMTX_W_FLG__;
+ if (tse->next != rwmtx->mtxb.q) {
+ ETHR_ASSERT(tse->next->uflgs == ETHR_RWMTX_R_WAIT_FLG__);
+ new |= ETHR_RWMTX_R_WAIT_FLG__;
+ }
+
+ rwlock_wake_set_flags(rwmtx, new, act);
+ }
+
+ wake_writer(&rwmtx->mtxb, 1);
+ }
+ else {
+ int rs;
+
+ if (rwmtx->type == ETHR_RWMUTEX_TYPE_NORMAL) {
+ rs = rwmtx->tdata.rs;
+ new = (long) rs;
+ rwmtx->tdata.rs = 0;
+ }
+ else {
+ ethr_rwmutex_type type = rwmtx->type;
+ int length = (type == ETHR_RWMUTEX_TYPE_FREQUENT_READ
+ ? reader_groups_array_size
+ : main_threads_array_size);
+ int ix;
+
+ rs = 0;
+ for (ix = 0; ix < length; ix++) {
+ int wrs = rwmtx->tdata.ra[ix].data.waiting_readers;
+ rwmtx->tdata.ra[ix].data.waiting_readers = 0;
+ ETHR_ASSERT(wrs >= 0);
+ if (wrs) {
+ rs += wrs;
+ rwmutex_freqread_rdrs_add(rwmtx, type, ix, wrs);
+ }
+ }
+ new = ETHR_RWMTX_R_FLG__;
+ }
+
+ if (rwmtx->rq_end->next != rwmtx->mtxb.q)
+ new |= ETHR_RWMTX_W_WAIT_FLG__;
+
+ rwlock_wake_set_flags(rwmtx, new, act);
+ wake_readers(rwmtx, rs);
+ }
+}
+
+static ethr_rwmtx_readers_array__ *
+alloc_readers_array(int length, ethr_rwmutex_lived lived)
+{
+ ethr_rwmtx_readers_array__ *ra;
+ size_t sz;
+ void *mem;
+
+ sz = sizeof(ethr_rwmtx_readers_array__) * (length + 1);
+
+ switch (lived) {
+ case ETHR_RWMUTEX_LONG_LIVED:
+ mem = ethr_mem__.ll.alloc(sz);
+ break;
+ case ETHR_RWMUTEX_SHORT_LIVED:
+ mem = ethr_mem__.sl.alloc(sz);
+ break;
+ default:
+ mem = ethr_mem__.std.alloc(sz);
+ break;
+ }
+ if (!mem)
+ return NULL;
+
+ if ((((unsigned long) mem) & ETHR_CACHE_LINE_MASK) == 0) {
+ ra = (ethr_rwmtx_readers_array__ *) mem;
+ ra->data.byte_offset = 0;
+ }
+ else {
+ ra = ((ethr_rwmtx_readers_array__ *)
+ ((((unsigned long) mem) & ~ETHR_CACHE_LINE_MASK)
+ + ETHR_CACHE_LINE_SIZE));
+ ra->data.byte_offset = (int) ((unsigned long) ra
+ - (unsigned long) mem);
+ }
+ ra->data.lived = lived;
+ return ra;
+}
+
+static void
+free_readers_array(ethr_rwmtx_readers_array__ *ra)
+{
+ void *ptr = (void *) (((char *) ra) - ra->data.byte_offset);
+ switch (ra->data.lived) {
+ case ETHR_RWMUTEX_LONG_LIVED:
+ ethr_mem__.ll.free(ptr);
+ break;
+ case ETHR_RWMUTEX_SHORT_LIVED:
+ ethr_mem__.sl.free(ptr);
+ break;
+ default:
+ ethr_mem__.std.free(ptr);
+ break;
+ }
+}
+
+int
+ethr_rwmutex_init_opt(ethr_rwmutex *rwmtx, ethr_rwmutex_opt *opt)
+{
+ int res;
+ ethr_rwmtx_readers_array__ *ra = NULL;
+#if ETHR_XCHK
+ if (ethr_not_completely_inited__) {
+ ETHR_ASSERT(0);
+ return EACCES;
+ }
+ if (!rwmtx) {
+ ETHR_ASSERT(0);
+ return EINVAL;
+ }
+ rwmtx->initialized = ETHR_RWMUTEX_INITIALIZED;
+#endif
+ ETHR_MTX_HARD_DEBUG_FENCE_INIT(rwmtx);
+ rwmtx->rq_end = NULL;
+ rwmtx->type = opt ? opt->type : ETHR_RWMUTEX_TYPE_NORMAL;
+ switch (rwmtx->type) {
+ case ETHR_RWMUTEX_TYPE_FREQUENT_READ:
+ if (main_threads_array_size <= reader_groups_array_size) {
+ /* No point using reader groups... */
+ rwmtx->type = ETHR_RWMUTEX_TYPE_EXTREMELY_FREQUENT_READ;
+ }
+ /* Fall through */
+ case ETHR_RWMUTEX_TYPE_EXTREMELY_FREQUENT_READ: {
+ int length;
+
+ length = (rwmtx->type == ETHR_RWMUTEX_TYPE_EXTREMELY_FREQUENT_READ
+ ? main_threads_array_size
+ : reader_groups_array_size);
+
+ if (length == 1) {
+ /* No point using a frequent reader type... */
+ rwmtx->type = ETHR_RWMUTEX_TYPE_NORMAL;
+ }
+ else {
+ int ix;
+ ra = alloc_readers_array(length,
+ (opt
+ ? opt->lived
+ : ETHR_RWMUTEX_UNKNOWN_LIVED));
+ if (!ra) {
+ res = ENOMEM;
+ goto error;
+ }
+
+ rwmtx->tdata.ra = ra;
+
+ for (ix = 0; ix < length; ix++) {
+ ethr_atomic_init(&rwmtx->tdata.ra[ix].data.readers, 0);
+ rwmtx->tdata.ra[ix].data.waiting_readers = 0;
+ }
+ break;
+ }
+ }
+ case ETHR_RWMUTEX_TYPE_NORMAL:
+ rwmtx->tdata.rs = 0;
+ break;
+ default:
+ res = EINVAL;
+ goto error;
+ }
+ res = mtxb_init(&rwmtx->mtxb,
+ default_rwmtx_main_spincount,
+ opt ? opt->main_spincount : -1,
+ default_rwmtx_aux_spincount,
+ opt ? opt->aux_spincount : -1);
+ if (res == 0)
+ return 0;
+
+ error:
+
+ if (ra)
+ free_readers_array(ra);
+
+#if ETHR_XCHK
+ rwmtx->initialized = 0;
+#endif
+ return res;
+}
+
+int
+ethr_rwmutex_init(ethr_rwmutex *rwmtx)
+{
+ return ethr_rwmutex_init_opt(rwmtx, NULL);
+}
+
+int
+ethr_rwmutex_destroy(ethr_rwmutex *rwmtx)
+{
+ int res;
+#if ETHR_XCHK
+ if (ethr_not_inited__) {
+ ETHR_ASSERT(0);
+ return EACCES;
+ }
+ if (!rwmtx || rwmtx->initialized != ETHR_RWMUTEX_INITIALIZED) {
+ ETHR_ASSERT(0);
+ return EINVAL;
+ }
+#endif
+ if (rwmtx->type != ETHR_RWMUTEX_TYPE_NORMAL) {
+ long act = ethr_atomic_read(&rwmtx->mtxb.flgs);
+ if (act == ETHR_RWMTX_R_FLG__)
+ rwmutex_try_complete_runlock(rwmtx, act, NULL, 0, 0, 0);
+ }
+ res = mtxb_destroy(&rwmtx->mtxb);
+ if (res != 0)
+ return res;
+ if (rwmtx->type != ETHR_RWMUTEX_TYPE_NORMAL)
+ free_readers_array(rwmtx->tdata.ra);
+#if ETHR_XCHK
+ rwmtx->initialized = 0;
+#endif
+ return 0;
+}
+
+#define ETHR_MAX_TRYRLOCK_TRIES 5
+
+int
+ethr_rwmutex_tryrlock(ethr_rwmutex *rwmtx)
+{
+ int res = 0;
+ long act;
+
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(rwmtx);
+ ETHR_ASSERT(rwmtx->initialized == ETHR_RWMUTEX_INITIALIZED);
+
+ ETHR_MTX_HARD_DEBUG_FENCE_CHK(rwmtx);
+
+ switch (rwmtx->type) {
+ case ETHR_RWMUTEX_TYPE_NORMAL: {
+#ifdef ETHR_RLOCK_WITH_INC_DEC
+ act = ethr_atomic_read(&rwmtx->mtxb.flgs);
+ if (act & (ETHR_RWMTX_W_FLG__|ETHR_RWMTX_W_WAIT_FLG__))
+ res = EBUSY;
+ else {
+ act = ethr_atomic_inc_read_acqb(&rwmtx->mtxb.flgs);
+ if (act & (ETHR_RWMTX_W_FLG__|ETHR_RWMTX_W_WAIT_FLG__)) {
+ rwmutex_incdec_restore_failed_tryrlock(rwmtx);
+ res = EBUSY;
+ }
+ }
+#else
+ long exp = 0;
+ int tries = 0;
+
+ while (1) {
+ act = ethr_atomic_cmpxchg_acqb(&rwmtx->mtxb.flgs, exp+1, exp);
+ if (act == exp) {
+ res = 0;
+ break;
+ }
+ if (tries > ETHR_MAX_TRYRLOCK_TRIES
+ || (act & (ETHR_RWMTX_W_FLG__|ETHR_RWMTX_W_WAIT_FLG__))) {
+ res = EBUSY;
+ break;
+ }
+ tries++;
+ exp = act;
+ }
+#endif
+ break;
+ }
+
+ case ETHR_RWMUTEX_TYPE_FREQUENT_READ:
+ case ETHR_RWMUTEX_TYPE_EXTREMELY_FREQUENT_READ: {
+ ethr_ts_event *tse = ethr_get_ts_event();
+
+ rwmutex_freqread_rdrs_inc(rwmtx, tse);
+
+ ETHR_MEMORY_BARRIER;
+
+ act = ethr_atomic_read_acqb(&rwmtx->mtxb.flgs);
+
+ if (act != ETHR_RWMTX_R_FLG__) {
+ while (1) {
+ long exp, new;
+
+ if (act & ~(ETHR_RWMTX_R_FLG__|ETHR_RWMTX_R_WAIT_FLG__)) {
+ rwmutex_freqread_restore_failed_tryrlock(rwmtx, tse);
+ res = EBUSY;
+ break;
+ }
+
+ if (act & ETHR_RWMTX_R_FLG__)
+ break;
+
+ exp = act;
+ new = act | ETHR_RWMTX_R_FLG__;
+ act = ethr_atomic_cmpxchg_acqb(&rwmtx->mtxb.flgs, new, exp);
+ if (act == exp)
+ break;
+ }
+ }
+
+ ethr_leave_ts_event(tse);
+ break;
+ }
+ }
+
+ ETHR_MTX_HARD_DEBUG_LFS_TRYRLOCK(&rwmtx->mtxb, res);
+ ETHR_MTX_HARD_DEBUG_FENCE_CHK(rwmtx);
+
+ return res;
+}
+
+void
+ethr_rwmutex_rlock(ethr_rwmutex *rwmtx)
+{
+ long act;
+
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(rwmtx);
+ ETHR_ASSERT(rwmtx->initialized == ETHR_RWMUTEX_INITIALIZED);
+
+ ETHR_MTX_HARD_DEBUG_FENCE_CHK(rwmtx);
+
+ switch (rwmtx->type) {
+ case ETHR_RWMUTEX_TYPE_NORMAL: {
+#ifdef ETHR_RLOCK_WITH_INC_DEC
+ act = ethr_atomic_inc_read_acqb(&rwmtx->mtxb.flgs);
+ if (act & (ETHR_RWMTX_W_FLG__|ETHR_RWMTX_W_WAIT_FLG__))
+ rwmutex_normal_rlock_wait(rwmtx, act);
+#else
+ long exp = 0;
+
+ while (1) {
+ act = ethr_atomic_cmpxchg_acqb(&rwmtx->mtxb.flgs, exp+1, exp);
+ if (act == exp) {
+ break;
+ }
+
+ if (act & (ETHR_RWMTX_W_FLG__|ETHR_RWMTX_W_WAIT_FLG__)) {
+ rwmutex_normal_rlock_wait(rwmtx, act);
+ break;
+ }
+ exp = act;
+ }
+#endif
+ break;
+ }
+
+ case ETHR_RWMUTEX_TYPE_FREQUENT_READ:
+ case ETHR_RWMUTEX_TYPE_EXTREMELY_FREQUENT_READ: {
+ ethr_ts_event *tse = ethr_get_ts_event();
+
+ rwmutex_freqread_rdrs_inc(rwmtx, tse);
+
+ ETHR_MEMORY_BARRIER;
+
+ act = ethr_atomic_read_acqb(&rwmtx->mtxb.flgs);
+
+ if (act != ETHR_RWMTX_R_FLG__) {
+ while (1) {
+ long exp, new;
+
+ if (act & ~(ETHR_RWMTX_R_FLG__|ETHR_RWMTX_R_WAIT_FLG__)) {
+ rwmutex_freqread_rlock_wait(rwmtx, tse, act);
+ break;
+ }
+
+ if (act & ETHR_RWMTX_R_FLG__)
+ break;
+
+ exp = act;
+ new = act | ETHR_RWMTX_R_FLG__;
+ act = ethr_atomic_cmpxchg_acqb(&rwmtx->mtxb.flgs, new, exp);
+ if (act == exp)
+ break;
+ }
+ }
+
+ ethr_leave_ts_event(tse);
+ break;
+ }
+ }
+
+ ETHR_MTX_HARD_DEBUG_LFS_RLOCK(&rwmtx->mtxb);
+ ETHR_MTX_HARD_DEBUG_FENCE_CHK(rwmtx);
+}
+
+void
+ethr_rwmutex_runlock(ethr_rwmutex *rwmtx)
+{
+ long act;
+
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(rwmtx);
+ ETHR_ASSERT(rwmtx->initialized == ETHR_RWMUTEX_INITIALIZED);
+
+ ETHR_MTX_HARD_DEBUG_FENCE_CHK(rwmtx);
+ ETHR_MTX_HARD_DEBUG_LFS_RUNLOCK(&rwmtx->mtxb);
+
+ switch (rwmtx->type) {
+ case ETHR_RWMUTEX_TYPE_NORMAL:
+ act = ethr_atomic_dec_read_relb(&rwmtx->mtxb.flgs);
+ if ((act & ETHR_RWMTX_WAIT_FLGS__)
+ && (act & ~ETHR_RWMTX_WAIT_FLGS__) == 0) {
+ ETHR_ASSERT((act & ETHR_RWMTX_W_FLG__) == 0);
+ rwmutex_unlock_wake(rwmtx, 0, act);
+ }
+ break;
+
+ case ETHR_RWMUTEX_TYPE_FREQUENT_READ:
+ case ETHR_RWMUTEX_TYPE_EXTREMELY_FREQUENT_READ: {
+ ethr_ts_event *tse = ethr_get_ts_event();
+
+ act = rwmutex_freqread_rdrs_dec_read_relb(rwmtx, tse);
+
+ ETHR_ASSERT(act >= 0);
+
+ ETHR_WRITE_MEMORY_BARRIER;
+
+ if (act == 0) {
+
+#ifndef ETHR_WRITE_MEMORY_BARRIER_IS_FULL
+ ETHR_READ_MEMORY_BARRIER;
+#endif
+ act = ethr_atomic_read(&rwmtx->mtxb.flgs);
+
+ if ((act & ETHR_RWMTX_W_FLG__) == 0
+ && (act & (ETHR_RWMTX_WAIT_FLGS__
+ | ETHR_RWMTX_R_PEND_UNLCK_MASK__))) {
+ rwmutex_try_complete_runlock(rwmtx, act, tse, 1, 0, 0);
+ }
+
+ }
+
+ ethr_leave_ts_event(tse);
+ break;
+ }
+ }
+
+ ETHR_MTX_HARD_DEBUG_FENCE_CHK(rwmtx);
+}
+
+int
+ethr_rwmutex_tryrwlock(ethr_rwmutex *rwmtx)
+{
+ int res = 0;
+ long act;
+
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(rwmtx);
+ ETHR_ASSERT(rwmtx->initialized == ETHR_RWMUTEX_INITIALIZED);
+
+ ETHR_MTX_HARD_DEBUG_FENCE_CHK(rwmtx);
+
+ switch (rwmtx->type) {
+ case ETHR_RWMUTEX_TYPE_NORMAL:
+ act = ethr_atomic_cmpxchg_acqb(&rwmtx->mtxb.flgs,
+ ETHR_RWMTX_W_FLG__, 0);
+ if (act != 0)
+ res = EBUSY;
+ break;
+
+ case ETHR_RWMUTEX_TYPE_FREQUENT_READ:
+ case ETHR_RWMUTEX_TYPE_EXTREMELY_FREQUENT_READ:
+
+ res = 0;
+ act = ethr_atomic_read(&rwmtx->mtxb.flgs);
+
+ do {
+
+ if (act & (ETHR_RWMTX_W_FLG__|ETHR_RWMTX_WAIT_FLGS__)) {
+ res = EBUSY;
+ break;
+ }
+
+ if (act & ETHR_RWMTX_R_MASK__) {
+ res = rwmutex_try_complete_runlock(rwmtx, act, NULL,
+ 0, 1, 1);
+ break;
+ }
+
+ act = ethr_atomic_cmpxchg_acqb(&rwmtx->mtxb.flgs,
+ ETHR_RWMTX_W_FLG__, 0);
+
+ } while (act != 0);
+
+ break;
+ }
+
+ ETHR_MTX_HARD_DEBUG_LFS_TRYRWLOCK(&rwmtx->mtxb, res);
+ ETHR_MTX_HARD_DEBUG_FENCE_CHK(rwmtx);
+
+ return res;
+}
+
+void
+ethr_rwmutex_rwlock(ethr_rwmutex *rwmtx)
+{
+ long act;
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(rwmtx);
+ ETHR_ASSERT(rwmtx->initialized == ETHR_RWMUTEX_INITIALIZED);
+
+ ETHR_MTX_HARD_DEBUG_FENCE_CHK(rwmtx);
+
+ switch (rwmtx->type) {
+ case ETHR_RWMUTEX_TYPE_NORMAL:
+ act = ethr_atomic_cmpxchg_acqb(&rwmtx->mtxb.flgs,
+ ETHR_RWMTX_W_FLG__, 0);
+ if (act != 0)
+ rwmutex_normal_rwlock_wait(rwmtx, act);
+ break;
+
+ case ETHR_RWMUTEX_TYPE_FREQUENT_READ:
+ case ETHR_RWMUTEX_TYPE_EXTREMELY_FREQUENT_READ:
+
+ act = ethr_atomic_read(&rwmtx->mtxb.flgs);
+
+ do {
+
+ if (act != 0) {
+ rwmutex_freqread_rwlock_wait(rwmtx, act);
+ break;
+ }
+
+ act = ethr_atomic_cmpxchg_acqb(&rwmtx->mtxb.flgs,
+ ETHR_RWMTX_W_FLG__, 0);
+
+ } while (act != 0);
+
+ break;
+ }
+
+ ETHR_MTX_HARD_DEBUG_LFS_RWLOCK(&rwmtx->mtxb);
+ ETHR_MTX_HARD_DEBUG_FENCE_CHK(rwmtx);
+
+}
+
+void
+ethr_rwmutex_rwunlock(ethr_rwmutex *rwmtx)
+{
+ long act;
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(rwmtx);
+ ETHR_ASSERT(rwmtx->initialized == ETHR_RWMUTEX_INITIALIZED);
+
+ ETHR_MTX_HARD_DEBUG_FENCE_CHK(rwmtx);
+ ETHR_MTX_HARD_DEBUG_LFS_RWUNLOCK(&rwmtx->mtxb);
+
+ switch (rwmtx->type) {
+ case ETHR_RWMUTEX_TYPE_NORMAL:
+ act = ethr_atomic_cmpxchg_relb(&rwmtx->mtxb.flgs,
+ 0, ETHR_RWMTX_W_FLG__);
+ if (act != ETHR_RWMTX_W_FLG__)
+ rwmutex_unlock_wake(rwmtx, 1, act);
+ break;
+
+ case ETHR_RWMUTEX_TYPE_FREQUENT_READ:
+ case ETHR_RWMUTEX_TYPE_EXTREMELY_FREQUENT_READ:
+ act = ethr_atomic_cmpxchg_relb(&rwmtx->mtxb.flgs, 0,
+ ETHR_RWMTX_W_FLG__);
+ if (act != ETHR_RWMTX_W_FLG__)
+ rwmutex_unlock_wake(rwmtx, 1, act);
+ break;
+ }
+
+ ETHR_MTX_HARD_DEBUG_FENCE_CHK(rwmtx);
+}
+
+#else
+/* -- pthread read/write mutex --------------------------------------------- */
+
+int
+ethr_rwmutex_init(ethr_rwmutex *rwmtx)
+{
+#if ETHR_XCHK
+ if (!rwmtx) {
+ ETHR_ASSERT(0);
+ return EINVAL;
+ }
+ rwmtx->initialized = ETHR_RWMUTEX_INITIALIZED;
+#endif
+ return pthread_rwlock_init(&rwmtx->pt_rwlock, write_pref_attr);
+}
+
+int
+ethr_rwmutex_init_opt(ethr_rwmutex *rwmtx, ethr_rwmutex_opt *opt)
+{
+ return ethr_rwmutex_init(rwmtx);
+}
+
+int
+ethr_rwmutex_destroy(ethr_rwmutex *rwmtx)
+{
+ int res;
+#if ETHR_XCHK
+ if (ethr_not_inited__) {
+ ETHR_ASSERT(0);
+ return EACCES;
+ }
+ if (!rwmtx || rwmtx->initialized != ETHR_RWMUTEX_INITIALIZED) {
+ ETHR_ASSERT(0);
+ return EINVAL;
+ }
+#endif
+ res = pthread_rwlock_destroy(&rwmtx->pt_rwlock);
+#if ETHR_XCHK
+ rwmtx->initialized = 0;
+#endif
+ return res;
+}
+
+/* -- Exported symbols of inline functions --------------------------------- */
+
+int
+ethr_rwmutex_tryrlock(ethr_rwmutex *rwmtx)
+{
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(rwmtx);
+ ETHR_ASSERT(rwmtx->initialized == ETHR_RWMUTEX_INITIALIZED);
+
+ return ethr_rwmutex_tryrlock__(rwmtx);
+}
+
+void
+ethr_rwmutex_rlock(ethr_rwmutex *rwmtx)
+{
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(rwmtx);
+ ETHR_ASSERT(rwmtx->initialized == ETHR_RWMUTEX_INITIALIZED);
+
+ ethr_rwmutex_rlock__(rwmtx);
+}
+
+void
+ethr_rwmutex_runlock(ethr_rwmutex *rwmtx)
+{
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(rwmtx);
+ ETHR_ASSERT(rwmtx->initialized == ETHR_RWMUTEX_INITIALIZED);
+
+ ethr_rwmutex_runlock__(rwmtx);
+}
+
+int
+ethr_rwmutex_tryrwlock(ethr_rwmutex *rwmtx)
+{
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(rwmtx);
+ ETHR_ASSERT(rwmtx->initialized == ETHR_RWMUTEX_INITIALIZED);
+
+ return ethr_rwmutex_tryrwlock__(rwmtx);
+}
+
+void
+ethr_rwmutex_rwlock(ethr_rwmutex *rwmtx)
+{
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(rwmtx);
+ ETHR_ASSERT(rwmtx->initialized == ETHR_RWMUTEX_INITIALIZED);
+
+ return ethr_rwmutex_rwlock__(rwmtx);
+}
+
+void
+ethr_rwmutex_rwunlock(ethr_rwmutex *rwmtx)
+{
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(rwmtx);
+ ETHR_ASSERT(rwmtx->initialized == ETHR_RWMUTEX_INITIALIZED);
+
+ ethr_rwmutex_rwunlock__(rwmtx);
+}
+
+#endif /* pthread */
+
+
+#if defined(ETHR_USE_OWN_RWMTX_IMPL__) || defined(ETHR_USE_OWN_MTX_IMPL__)
+
+#ifdef ETHR_MTX_HARD_DEBUG_Q
+static void
+hard_debug_chk_q__(struct ethr_mutex_base_ *mtxb, int is_rwmtx)
+{
+ int res;
+ long flgs = ethr_atomic_read(&mtxb->flgs);
+
+ ETHR_MTX_HARD_ASSERT(res == 0);
+
+ ETHR_MTX_HARD_ASSERT(!(flgs & ETHR_RWMTX_R_WAIT_FLG__) || is_rwmtx);
+
+ if (!(flgs & ETHR_RWMTX_WAIT_FLGS__)) {
+ ETHR_MTX_HARD_ASSERT(!mtxb->q);
+ if (is_rwmtx) {
+ ethr_rwmutex *rwmtx = (ethr_rwmutex *) mtxb;
+ ETHR_MTX_HARD_ASSERT(!rwmtx->rq_end);
+ ETHR_MTX_HARD_ASSERT(!rwmtx->rs);
+ }
+ }
+ else {
+ ethr_ts_event *tse;
+ int ws = 0, rs = 0, rsf = 0, ref = 0;
+
+ ETHR_MTX_HARD_ASSERT(mtxb->q);
+
+ tse = mtxb->q;
+
+ do {
+ long type;
+
+ ETHR_MTX_HARD_ASSERT(tse->next->prev == tse);
+ ETHR_MTX_HARD_ASSERT(tse->prev->next == tse);
+
+ type = ethr_atomic_read(&tse->uaflgs);
+ ETHR_MTX_HARD_ASSERT(type == tse->uflgs);
+ switch (type) {
+ case ETHR_RWMTX_W_WAIT_FLG__:
+ ws++;
+ break;
+ case ETHR_RWMTX_R_WAIT_FLG__: {
+ ethr_rwmutex *rwmtx = (ethr_rwmutex *) mtxb;
+ ETHR_MTX_HARD_ASSERT(is_rwmtx);
+ if (!rsf)
+ rsf = 1;
+ ETHR_MTX_HARD_ASSERT(!ref);
+ if (rwmtx->rq_end == tse) {
+ ETHR_MTX_HARD_ASSERT(
+ tse->next == rwmtx->mtxb.q
+ || tse->next->uflgs == ETHR_RWMTX_W_WAIT_FLG__);
+ ref = 1;
+ }
+ rs++;
+ break;
+ }
+ default:
+ ETHR_MTX_HARD_ASSERT(! "invalid wait type found");
+ }
+
+ tse = tse->next;
+ } while (tse != mtxb->q);
+
+ if (is_rwmtx) {
+ ethr_rwmutex *rwmtx = (ethr_rwmutex *) mtxb;
+ ETHR_MTX_HARD_ASSERT(rs == rwmtx->rs);
+ }
+
+#ifdef ETHR_MTX_HARD_DEBUG_WSQ
+ ETHR_MTX_HARD_ASSERT(ws == mtxb->ws);
+#endif
+
+ if (flgs & ETHR_RWMTX_W_WAIT_FLG__)
+ ETHR_MTX_HARD_ASSERT(ws);
+ else
+ ETHR_MTX_HARD_ASSERT(!ws);
+
+ if (flgs & ETHR_RWMTX_R_WAIT_FLG__) {
+ ethr_rwmutex *rwmtx = (ethr_rwmutex *) mtxb;
+ ETHR_MTX_HARD_ASSERT(is_rwmtx);
+ ETHR_MTX_HARD_ASSERT(rwmtx->rq_end);
+ ETHR_MTX_HARD_ASSERT(rsf);
+ ETHR_MTX_HARD_ASSERT(ref);
+ ETHR_MTX_HARD_ASSERT(rs);
+ }
+ else {
+ if (is_rwmtx) {
+ ethr_rwmutex *rwmtx = (ethr_rwmutex *) mtxb;
+ ETHR_MTX_HARD_ASSERT(!rwmtx->rq_end);
+ }
+ ETHR_MTX_HARD_ASSERT(!rsf);
+ ETHR_MTX_HARD_ASSERT(!ref);
+ ETHR_MTX_HARD_ASSERT(!rs);
+ }
+ }
+}
+
+#elif defined(ETHR_MTX_HARD_DEBUG_WSQ)
+
+static void
+hard_debug_chk_q__(struct ethr_mutex_base_ *mtxb, int is_rwmtx)
+{
+ int ws = 0;
+ int rs = 0;
+
+ if (mtxb->q) {
+ ethr_ts_event *tse = mtxb->q;
+ do {
+ switch (tse->uflgs) {
+ case ETHR_RWMTX_W_WAIT_FLG__:
+ ws++;
+ break;
+ case ETHR_RWMTX_R_WAIT_FLG__:
+ rs++;
+ break;
+ default:
+ ETHR_MTX_HARD_ASSERT(0);
+ break;
+ }
+ tse = tse->next;
+ } while (tse != mtxb->q);
+ }
+
+ ETHR_MTX_HARD_ASSERT(mtxb->ws == ws);
+ if (is_rwmtx) {
+ ethr_rwmutex *rwmtx = (ethr_rwmutex *) mtxb;
+ ETHR_MTX_HARD_ASSERT(rwmtx->rs == rs);
+ }
+}
+
+#endif
+
+#endif
diff --git a/erts/lib_src/common/ethread.c b/erts/lib_src/common/ethread.c
deleted file mode 100644
index eb4d0cad20..0000000000
--- a/erts/lib_src/common/ethread.c
+++ /dev/null
@@ -1,3346 +0,0 @@
-/*
- * %CopyrightBegin%
- *
- * Copyright Ericsson AB 2004-2009. All Rights Reserved.
- *
- * The contents of this file are subject to the Erlang Public License,
- * Version 1.1, (the "License"); you may not use this file except in
- * compliance with the License. You should have received a copy of the
- * Erlang Public License along with this software. If not, it can be
- * retrieved online at http://www.erlang.org/.
- *
- * Software distributed under the License is distributed on an "AS IS"
- * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
- * the License for the specific language governing rights and limitations
- * under the License.
- *
- * %CopyrightEnd%
- */
-
-/*
- * Description: A Thread library for use in the ERTS and other OTP
- * applications.
- * Author: Rickard Green
- */
-
-#ifdef HAVE_CONFIG_H
-#include "config.h"
-#endif
-
-#undef ETHR_STACK_GUARD_SIZE
-
-#if defined(ETHR_PTHREADS)
-
-#ifdef ETHR_TIME_WITH_SYS_TIME
-# include <time.h>
-# include <sys/time.h>
-#else
-# ifdef ETHR_HAVE_SYS_TIME_H
-# include <sys/time.h>
-# else
-# include <time.h>
-# endif
-#endif
-#include <sys/types.h>
-#include <unistd.h>
-#include <signal.h>
-
-#ifdef ETHR_HAVE_PTHREAD_ATTR_SETGUARDSIZE
-# define ETHR_STACK_GUARD_SIZE (pagesize)
-#endif
-
-#elif defined(ETHR_WIN32_THREADS)
-
-#undef WIN32_LEAN_AND_MEAN
-#define WIN32_LEAN_AND_MEAN
-#include <windows.h>
-#include <process.h>
-#include <winerror.h>
-
-#else
-#error "Missing thread implementation"
-#endif
-
-#include <limits.h>
-
-#define ETHR_FORCE_INLINE_FUNCS
-#define ETHR_INLINE_FUNC_NAME_(X) X ## __
-#include "ethread.h"
-
-#ifndef ETHR_HAVE_ETHREAD_DEFINES
-#error Missing configure defines
-#endif
-
-/*
- * ----------------------------------------------------------------------------
- * Common stuff
- * ----------------------------------------------------------------------------
- */
-
-#define ETHR_MAX_THREADS 2048 /* Has to be an even power of 2 */
-
-static int ethr_not_inited = 1;
-
-#define ASSERT(A) ETHR_ASSERT((A))
-
-static void *(*allocp)(size_t) = malloc;
-static void *(*reallocp)(void *, size_t) = realloc;
-static void (*freep)(void *) = free;
-
-#ifndef ETHR_HAVE_OPTIMIZED_ATOMIC_OPS
-ethr_atomic_protection_t ethr_atomic_protection__[1 << ETHR_ATOMIC_ADDR_BITS];
-#endif
-
-void *(*thread_create_prepare_func)(void) = NULL;
-void (*thread_create_parent_func)(void *) = NULL;
-void (*thread_create_child_func)(void *) = NULL;
-
-typedef struct ethr_xhndl_list_ ethr_xhndl_list;
-struct ethr_xhndl_list_ {
- ethr_xhndl_list *next;
- void (*funcp)(void);
-};
-
-static size_t pagesize;
-#define ETHR_PAGE_ALIGN(SZ) (((((size_t) (SZ)) - 1)/pagesize + 1)*pagesize)
-static size_t min_stack_size; /* kilo words */
-static size_t max_stack_size; /* kilo words */
-#define ETHR_B2KW(B) ((((size_t) (B)) - 1)/(sizeof(void *)*1024) + 1)
-#define ETHR_KW2B(KW) (((size_t) (KW))*sizeof(void *)*1024)
-
-ethr_mutex xhndl_mtx;
-ethr_xhndl_list *xhndl_list;
-
-static int
-init_common(ethr_init_data *id)
-{
- int res;
- if (id) {
- allocp = id->alloc;
- reallocp = id->realloc;
- freep = id->free;
- thread_create_prepare_func = id->thread_create_prepare_func;
- thread_create_parent_func = id->thread_create_parent_func;
- thread_create_child_func = id->thread_create_child_func;
- }
- if (!allocp || !reallocp || !freep)
- return EINVAL;
-
-#ifdef _SC_PAGESIZE
- pagesize = (size_t) sysconf(_SC_PAGESIZE);
-#elif defined(HAVE_GETPAGESIZE)
- pagesize = (size_t) getpagesize();
-#else
- pagesize = (size_t) 4*1024; /* Guess 4 KB */
-#endif
-
- /* User needs at least 4 KB */
- min_stack_size = 4*1024;
-#if SIZEOF_VOID_P == 8
- /* Double that on 64-bit archs */
- min_stack_size *= 2;
-#endif
- /* On some systems as much as about 4 KB is used by the system */
- min_stack_size += 4*1024;
- /* There should be room for signal handlers */
-#ifdef SIGSTKSZ
- min_stack_size += SIGSTKSZ;
-#else
- min_stack_size += pagesize;
-#endif
- /* The system may think that we need more stack */
-#if defined(PTHREAD_STACK_MIN)
- if (min_stack_size < PTHREAD_STACK_MIN)
- min_stack_size = PTHREAD_STACK_MIN;
-#elif defined(_SC_THREAD_STACK_MIN)
- {
- size_t thr_min_stk_sz = (size_t) sysconf(_SC_THREAD_STACK_MIN);
- if (min_stack_size < thr_min_stk_sz)
- min_stack_size = thr_min_stk_sz;
- }
-#endif
- /* The guard is at least on some platforms included in the stack size
- passed when creating threads */
-#ifdef ETHR_STACK_GUARD_SIZE
- min_stack_size += ETHR_STACK_GUARD_SIZE;
-#endif
- min_stack_size = ETHR_PAGE_ALIGN(min_stack_size);
-
- min_stack_size = ETHR_B2KW(min_stack_size);
-
- max_stack_size = 32*1024*1024;
-#if SIZEOF_VOID_P == 8
- max_stack_size *= 2;
-#endif
- max_stack_size = ETHR_B2KW(max_stack_size);
-
- xhndl_list = NULL;
-
- res = ethr_mutex_init(&xhndl_mtx);
- if (res != 0)
- return res;
-
- res = ethr_mutex_set_forksafe(&xhndl_mtx);
- if (res != 0 && res != ENOTSUP)
- return res;
-
- return 0;
-}
-
-int
-ethr_install_exit_handler(void (*funcp)(void))
-{
- ethr_xhndl_list *xhp;
- int res;
-
-#if ETHR_XCHK
- if (ethr_not_inited) {
- ASSERT(0);
- return EACCES;
- }
-#endif
-
- if (!funcp)
- return EINVAL;
-
- xhp = (ethr_xhndl_list *) (*allocp)(sizeof(ethr_xhndl_list));
- if (!xhp)
- return ENOMEM;
-
- res = ethr_mutex_lock__(&xhndl_mtx);
- if (res != 0) {
- (*freep)((void *) xhp);
- return res;
- }
-
- xhp->funcp = funcp;
- xhp->next = xhndl_list;
- xhndl_list = xhp;
-
- res = ethr_mutex_unlock__(&xhndl_mtx);
- if (res != 0)
- abort();
-
- return res;
-}
-
-static void
-run_exit_handlers(void)
-{
- int res;
- ethr_xhndl_list *xhp;
-
- res = ethr_mutex_lock__(&xhndl_mtx);
- if (res != 0)
- abort();
-
- xhp = xhndl_list;
-
- res = ethr_mutex_unlock__(&xhndl_mtx);
- if (res != 0)
- abort();
-
- for (; xhp; xhp = xhp->next)
- (*xhp->funcp)();
-}
-
-#if defined(ETHR_PTHREADS)
-/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *\
- * pthread implementation *
-\* */
-
-typedef struct {
- pthread_mutex_t mtx;
- pthread_cond_t cnd;
- int initialized;
- void *(*thr_func)(void *);
- void *arg;
- void *prep_func_res;
-} thr_wrap_data_;
-
-static int no_ethreads;
-static ethr_mutex no_ethrs_mtx;
-
-#ifndef ETHR_HAVE_PTHREAD_ATFORK
-#define ETHR_HAVE_PTHREAD_ATFORK 0
-#endif
-
-#if !ETHR_HAVE_PTHREAD_ATFORK
-#warning "Cannot enforce fork-safety"
-#endif
-
-/*
- * ----------------------------------------------------------------------------
- * Static functions
- * ----------------------------------------------------------------------------
- */
-
-/*
- * Functions with safe_ prefix aborts on failure. To be used when
- * we cannot recover after failure.
- */
-
-static ETHR_INLINE void
-safe_mutex_lock(pthread_mutex_t *mtxp)
-{
- int res = pthread_mutex_lock(mtxp);
- if (res != 0)
- abort();
-}
-
-static ETHR_INLINE void
-safe_mutex_unlock(pthread_mutex_t *mtxp)
-{
- int res = pthread_mutex_unlock(mtxp);
- if (res != 0)
- abort();
-}
-
-static ETHR_INLINE void
-safe_cond_signal(pthread_cond_t *cndp)
-{
- int res = pthread_cond_signal(cndp);
- if (res != 0)
- abort();
-}
-
-#ifdef ETHR_HAVE_ETHR_REC_MUTEX_INIT
-
-static volatile int rec_mtx_attr_need_init = 1;
-static pthread_mutexattr_t rec_mtx_attr;
-
-static int init_rec_mtx_attr(void);
-
-#endif
-
-#if ETHR_HAVE_PTHREAD_ATFORK
-
-static ethr_mutex forksafe_mtx = ETHR_MUTEX_INITER;
-
-static void lock_mutexes(void)
-{
- ethr_mutex *m = &forksafe_mtx;
- do {
-
- safe_mutex_lock(&m->pt_mtx);
-
- m = m->next;
-
- } while (m != &forksafe_mtx);
-}
-
-static void unlock_mutexes(void)
-{
- ethr_mutex *m = forksafe_mtx.prev;
- do {
-
- safe_mutex_unlock(&m->pt_mtx);
-
- m = m->prev;
-
- } while (m->next != &forksafe_mtx);
-}
-
-#if ETHR_INIT_MUTEX_IN_CHILD_AT_FORK
-
-static void reinit_mutexes(void)
-{
- ethr_mutex *m = forksafe_mtx.prev;
- do {
- pthread_mutexattr_t *attrp = NULL;
-
-#ifdef ETHR_HAVE_ETHR_REC_MUTEX_INIT
- if (m->is_rec_mtx) {
- if (rec_mtx_attr_need_init) {
- int res = init_rec_mtx_attr();
- if (res != 0)
- abort();
- }
- attrp = &rec_mtx_attr;
- }
-#endif
- if (pthread_mutex_init(&m->pt_mtx, attrp) != 0)
- abort();
-
- m = m->prev;
-
- } while (m->next != &forksafe_mtx);
-}
-
-#endif
-
-static int
-init_forksafe(void)
-{
- static int init_done = 0;
- int res = 0;
-
- if (init_done)
- return res;
-
- forksafe_mtx.prev = &forksafe_mtx;
- forksafe_mtx.next = &forksafe_mtx;
-
- res = pthread_atfork(lock_mutexes,
- unlock_mutexes,
-#if ETHR_INIT_MUTEX_IN_CHILD_AT_FORK
- reinit_mutexes
-#else
- unlock_mutexes
-#endif
- );
-
- init_done = 1;
- return res;
-}
-
-#endif
-
-
-#ifdef ETHR_HAVE_ETHR_REC_MUTEX_INIT
-
-#if defined(ETHR_HAVE_PTHREAD_MUTEXATTR_SETTYPE)
-
-#define SET_REC_MUTEX_ATTR(AP) \
- pthread_mutexattr_settype((AP), PTHREAD_MUTEX_RECURSIVE);
-
-#elif defined(ETHR_HAVE_PTHREAD_MUTEXATTR_SETKIND_NP)
-
-#define SET_REC_MUTEX_ATTR(AP) \
- pthread_mutexattr_setkind_np((AP), PTHREAD_MUTEX_RECURSIVE_NP);
-
-#else
-
-#error "Don't know how to set recursive mutex attributes"
-
-#endif
-
-static int
-init_rec_mtx_attr(void)
-{
- int res, mres;
- static pthread_mutex_t attrinit_mtx = PTHREAD_MUTEX_INITIALIZER;
-
- mres = pthread_mutex_lock(&attrinit_mtx);
- if (mres != 0)
- return mres;
- /* Got here under race conditions; check again ... */
- if (!rec_mtx_attr_need_init)
- res = 0;
- else {
- res = pthread_mutexattr_init(&rec_mtx_attr);
- if (res == 0) {
- res = SET_REC_MUTEX_ATTR(&rec_mtx_attr);
- if (res == 0)
- rec_mtx_attr_need_init = 0;
- else
- (void) pthread_mutexattr_destroy(&rec_mtx_attr);
- }
- }
-
- mres = pthread_mutex_unlock(&attrinit_mtx);
- if (mres != 0)
- return mres;
- return res;
-}
-
-#endif /* #if ETHR_HAVE_ETHR_REC_MUTEX_INIT */
-
-static ETHR_INLINE void thr_exit_cleanup(void)
-{
- run_exit_handlers();
- safe_mutex_lock(&no_ethrs_mtx.pt_mtx);
- ASSERT(no_ethreads > 0);
- no_ethreads--;
- safe_mutex_unlock(&no_ethrs_mtx.pt_mtx);
-}
-
-static void *thr_wrapper(void *vtwd)
-{
- void *res;
- thr_wrap_data_ *twd = (thr_wrap_data_ *) vtwd;
- void *(*thr_func)(void *) = twd->thr_func;
- void *arg = twd->arg;
-
- safe_mutex_lock(&twd->mtx);
-
- if (thread_create_child_func)
- (*thread_create_child_func)(twd->prep_func_res);
-
- twd->initialized = 1;
-
- safe_cond_signal(&twd->cnd);
- safe_mutex_unlock(&twd->mtx);
-
- res = (*thr_func)(arg);
- thr_exit_cleanup();
- return res;
-}
-
-
-/*
- * ----------------------------------------------------------------------------
- * Exported functions
- * ----------------------------------------------------------------------------
- */
-
-int
-ethr_init(ethr_init_data *id)
-{
- int res;
-
- if (!ethr_not_inited)
- return EINVAL;
-
- ethr_not_inited = 0;
-
- res = init_common(id);
- if (res != 0)
- goto error;
-
-#if ETHR_HAVE_PTHREAD_ATFORK
- init_forksafe();
-#endif
-
- no_ethreads = 1;
- res = ethr_mutex_init(&no_ethrs_mtx);
- if (res != 0)
- goto error;
- res = ethr_mutex_set_forksafe(&no_ethrs_mtx);
- if (res != 0 && res != ENOTSUP)
- goto error;
-
-#ifndef ETHR_HAVE_OPTIMIZED_ATOMIC_OPS
- {
- int i;
- for (i = 0; i < (1 << ETHR_ATOMIC_ADDR_BITS); i++) {
-#ifdef ETHR_HAVE_PTHREAD_SPIN_LOCK
- res = pthread_spin_init(&ethr_atomic_protection__[i].u.spnlck, 0);
-#else
- res = ethr_mutex_init(&ethr_atomic_protection__[i].u.mtx);
-#endif
- if (res != 0)
- goto error;
- }
- }
-#endif
-
- return 0;
-
- error:
- ethr_not_inited = 1;
- return res;
-
-}
-
-int
-ethr_thr_create(ethr_tid *tid, void * (*func)(void *), void *arg,
- ethr_thr_opts *opts)
-{
- thr_wrap_data_ twd;
- pthread_attr_t attr;
- int res, dres;
- int use_stack_size = (opts && opts->suggested_stack_size >= 0
- ? opts->suggested_stack_size
- : -1 /* Use system default */);
-
-#ifdef ETHR_MODIFIED_DEFAULT_STACK_SIZE
- if (use_stack_size < 0)
- use_stack_size = ETHR_MODIFIED_DEFAULT_STACK_SIZE;
-#endif
-
- twd.initialized = 0;
- twd.thr_func = func;
- twd.arg = arg;
-
-#if ETHR_XCHK
- if (ethr_not_inited) {
- ASSERT(0);
- return EACCES;
- }
- if (!tid || !func) {
- ASSERT(0);
- return EINVAL;
- }
-#endif
-
- /* Call prepare func if it exist */
- if (thread_create_prepare_func)
- twd.prep_func_res = (*thread_create_prepare_func)();
- else
- twd.prep_func_res = NULL;
-
- /* Set som thread attributes */
- res = pthread_attr_init(&attr);
- if (res != 0)
- goto cleanup_parent_func;
- res = pthread_mutex_init(&twd.mtx, NULL);
- if (res != 0)
- goto cleanup_attr_destroy;
- res = pthread_cond_init(&twd.cnd, NULL);
- if (res != 0)
- goto cleanup_mutex_destroy;
-
- /* Schedule child thread in system scope (if possible) ... */
- res = pthread_attr_setscope(&attr, PTHREAD_SCOPE_SYSTEM);
- if (res != 0 && res != ENOTSUP)
- goto cleanup_cond_destroy;
-
- if (use_stack_size >= 0) {
- size_t suggested_stack_size = (size_t) use_stack_size;
- size_t stack_size;
-#ifdef DEBUG
- suggested_stack_size /= 2; /* Make sure we got margin */
-#endif
-#ifdef ETHR_STACK_GUARD_SIZE
- /* The guard is at least on some platforms included in the stack size
- passed when creating threads */
- suggested_stack_size += ETHR_B2KW(ETHR_STACK_GUARD_SIZE);
-#endif
- if (suggested_stack_size < min_stack_size)
- stack_size = ETHR_KW2B(min_stack_size);
- else if (suggested_stack_size > max_stack_size)
- stack_size = ETHR_KW2B(max_stack_size);
- else
- stack_size = ETHR_PAGE_ALIGN(ETHR_KW2B(suggested_stack_size));
- (void) pthread_attr_setstacksize(&attr, stack_size);
- }
-
-#ifdef ETHR_STACK_GUARD_SIZE
- (void) pthread_attr_setguardsize(&attr, ETHR_STACK_GUARD_SIZE);
-#endif
-
- /* Detached or joinable... */
- res = pthread_attr_setdetachstate(&attr,
- (opts && opts->detached
- ? PTHREAD_CREATE_DETACHED
- : PTHREAD_CREATE_JOINABLE));
- if (res != 0)
- goto cleanup_cond_destroy;
-
- res = pthread_mutex_lock(&twd.mtx);
-
- if (res != 0)
- goto cleanup_cond_destroy;
-
- safe_mutex_lock(&no_ethrs_mtx.pt_mtx);
- if (no_ethreads < ETHR_MAX_THREADS) {
- no_ethreads++;
- safe_mutex_unlock(&no_ethrs_mtx.pt_mtx);
- }
- else {
- res = EAGAIN;
- safe_mutex_unlock(&no_ethrs_mtx.pt_mtx);
- goto cleanup_mutex_unlock;
- }
-
- res = pthread_create((pthread_t *) tid, &attr, thr_wrapper, (void *) &twd);
-
- if (res != 0) {
- safe_mutex_lock(&no_ethrs_mtx.pt_mtx);
- ASSERT(no_ethreads > 0);
- no_ethreads--;
- safe_mutex_unlock(&no_ethrs_mtx.pt_mtx);
- }
- else {
-
- /* Wait for child to initialize... */
- while (!twd.initialized) {
- res = pthread_cond_wait(&twd.cnd, &twd.mtx);
- if (res != 0 && res != EINTR)
- break;
- }
-
- }
-
- /* Cleanup... */
- cleanup_mutex_unlock:
- dres = pthread_mutex_unlock(&twd.mtx);
- if (res == 0)
- res = dres;
- cleanup_cond_destroy:
- dres = pthread_cond_destroy(&twd.cnd);
- if (res == 0)
- res = dres;
- cleanup_mutex_destroy:
- dres = pthread_mutex_destroy(&twd.mtx);
- if (res == 0)
- res = dres;
- cleanup_attr_destroy:
- dres = pthread_attr_destroy(&attr);
- if (res == 0)
- res = dres;
- cleanup_parent_func:
- if (thread_create_parent_func)
- (*thread_create_parent_func)(twd.prep_func_res);
-
- return res;
-}
-
-int
-ethr_thr_join(ethr_tid tid, void **res)
-{
-#if ETHR_XCHK
- if (ethr_not_inited) {
- ASSERT(0);
- return EACCES;
- }
-#endif
- return pthread_join((pthread_t) tid, res);
-}
-
-int
-ethr_thr_detach(ethr_tid tid)
-{
-#if ETHR_XCHK
- if (ethr_not_inited) {
- ASSERT(0);
- return EACCES;
- }
-#endif
- return pthread_detach((pthread_t) tid);
-}
-
-void
-ethr_thr_exit(void *res)
-{
-#if ETHR_XCHK
- if (ethr_not_inited) {
- ASSERT(0);
- return;
- }
-#endif
- thr_exit_cleanup();
- pthread_exit(res);
-}
-
-ethr_tid
-ethr_self(void)
-{
- return (ethr_tid) pthread_self();
-}
-
-int
-ethr_equal_tids(ethr_tid tid1, ethr_tid tid2)
-{
- return pthread_equal((pthread_t) tid1, (pthread_t) tid2);
-}
-
-
-/*
- * Mutex functions
- */
-
-
-int
-ethr_mutex_init(ethr_mutex *mtx)
-{
-#if ETHR_XCHK
- if (ethr_not_inited) {
- ASSERT(0);
- return EACCES;
- }
- if (!mtx) {
- ASSERT(0);
- return EINVAL;
- }
- mtx->initialized = ETHR_MUTEX_INITIALIZED;
-#endif
- mtx->prev = NULL;
- mtx->next = NULL;
- mtx->is_rec_mtx = 0;
- return pthread_mutex_init(&mtx->pt_mtx, NULL);
-}
-
-#ifdef ETHR_HAVE_ETHR_REC_MUTEX_INIT
-
-int
-ethr_rec_mutex_init(ethr_mutex *mtx)
-{
-#if ETHR_XCHK
- if (ethr_not_inited) {
- ASSERT(0);
- return EACCES;
- }
- if (!mtx) {
- ASSERT(0);
- return EINVAL;
- }
- mtx->initialized = ETHR_MUTEX_INITIALIZED;
-#endif
- if (rec_mtx_attr_need_init)
- init_rec_mtx_attr();
-
- mtx->prev = NULL;
- mtx->next = NULL;
- mtx->is_rec_mtx = 1;
- return pthread_mutex_init(&mtx->pt_mtx, &rec_mtx_attr);
-}
-
-#endif /* #if ETHR_HAVE_ETHR_REC_MUTEX_INIT */
-
-int
-ethr_mutex_destroy(ethr_mutex *mtx)
-{
-#if ETHR_XCHK
- if (ethr_not_inited) {
- ASSERT(0);
- return EACCES;
- }
- if (!mtx || mtx->initialized != ETHR_MUTEX_INITIALIZED) {
- ASSERT(0);
- return EINVAL;
- }
-#endif
- if (mtx->next) {
- ASSERT(mtx->prev);
- ethr_mutex_unset_forksafe(mtx);
- }
-#if ETHR_XCHK
- mtx->initialized = 0;
-#endif
- return pthread_mutex_destroy(&mtx->pt_mtx);
-}
-
-int ethr_mutex_set_forksafe(ethr_mutex *mtx)
-{
- int res;
-#if ETHR_XCHK
- if (ethr_not_inited) {
- ASSERT(0);
- return EACCES;
- }
- if (!mtx || mtx->initialized != ETHR_MUTEX_INITIALIZED) {
- ASSERT(0);
- return EINVAL;
- }
-#endif
-#if ETHR_HAVE_PTHREAD_ATFORK
- res = pthread_mutex_lock(&forksafe_mtx.pt_mtx);
- if (res != 0)
- return res;
- if (!forksafe_mtx.next) {
- ASSERT(!forksafe_mtx.prev);
- init_forksafe();
- }
- if (mtx->next) {
- /* forksafe already set for this mutex */
- ASSERT(mtx->prev);
- }
- else {
- mtx->next = forksafe_mtx.next;
- mtx->prev = &forksafe_mtx;
- forksafe_mtx.next->prev = mtx;
- forksafe_mtx.next = mtx;
- }
-
- res = pthread_mutex_unlock(&forksafe_mtx.pt_mtx);
-
-#else /* #if ETHR_HAVE_PTHREAD_ATFORK */
- res = ENOTSUP;
-#endif /* #if ETHR_HAVE_PTHREAD_ATFORK */
- return res;
-}
-
-int ethr_mutex_unset_forksafe(ethr_mutex *mtx)
-{
- int res;
-#if ETHR_XCHK
- if (ethr_not_inited) {
- ASSERT(0);
- return EACCES;
- }
- if (!mtx || mtx->initialized != ETHR_MUTEX_INITIALIZED) {
- ASSERT(0);
- return EINVAL;
- }
-#endif
-#if ETHR_HAVE_PTHREAD_ATFORK
- res = pthread_mutex_lock(&forksafe_mtx.pt_mtx);
- if (res != 0)
- return res;
- if (!forksafe_mtx.next) {
- ASSERT(!forksafe_mtx.prev);
- init_forksafe();
- }
- if (!mtx->next) {
- /* forksafe already unset for this mutex */
- ASSERT(!mtx->prev);
- }
- else {
- mtx->prev->next = mtx->next;
- mtx->next->prev = mtx->prev;
- mtx->next = NULL;
- mtx->prev = NULL;
- }
- res = pthread_mutex_unlock(&forksafe_mtx.pt_mtx);
-
-#else /* #if ETHR_HAVE_PTHREAD_ATFORK */
- res = ENOTSUP;
-#endif /* #if ETHR_HAVE_PTHREAD_ATFORK */
- return res;
-}
-
-int
-ethr_mutex_trylock(ethr_mutex *mtx)
-{
-#if ETHR_XCHK
- if (ethr_not_inited) {
- ASSERT(0);
- return EACCES;
- }
- if (!mtx || mtx->initialized != ETHR_MUTEX_INITIALIZED) {
- ASSERT(0);
- return EINVAL;
- }
-#endif
- return ethr_mutex_trylock__(mtx);
-}
-
-int
-ethr_mutex_lock(ethr_mutex *mtx)
-{
-#if ETHR_XCHK
- if (ethr_not_inited) {
- ASSERT(0);
- return EACCES;
- }
- if (!mtx || mtx->initialized != ETHR_MUTEX_INITIALIZED) {
- ASSERT(0);
- return EINVAL;
- }
-#endif
- return ethr_mutex_lock__(mtx);
-}
-
-int
-ethr_mutex_unlock(ethr_mutex *mtx)
-{
-#if ETHR_XCHK
- if (ethr_not_inited) {
- ASSERT(0);
- return EACCES;
- }
- if (!mtx || mtx->initialized != ETHR_MUTEX_INITIALIZED) {
- ASSERT(0);
- return EINVAL;
- }
-#endif
- return ethr_mutex_unlock__(mtx);
-}
-
-/*
- * Condition variable functions
- */
-
-int
-ethr_cond_init(ethr_cond *cnd)
-{
-#if ETHR_XCHK
- if (ethr_not_inited) {
- ASSERT(0);
- return EACCES;
- }
- if (!cnd) {
- ASSERT(0);
- return EINVAL;
- }
- cnd->initialized = ETHR_COND_INITIALIZED;
-#endif
- return pthread_cond_init(&cnd->pt_cnd, NULL);
-}
-
-int
-ethr_cond_destroy(ethr_cond *cnd)
-{
-#if ETHR_XCHK
- if (ethr_not_inited) {
- ASSERT(0);
- return EACCES;
- }
- if (!cnd || cnd->initialized != ETHR_COND_INITIALIZED) {
- ASSERT(0);
- return EINVAL;
- }
- cnd->initialized = 0;
-#endif
- return pthread_cond_destroy(&cnd->pt_cnd);
-}
-
-int
-ethr_cond_signal(ethr_cond *cnd)
-{
-#if ETHR_XCHK
- if (ethr_not_inited) {
- ASSERT(0);
- return EACCES;
- }
- if (!cnd || cnd->initialized != ETHR_COND_INITIALIZED) {
- ASSERT(0);
- return EINVAL;
- }
-#endif
- return pthread_cond_signal(&cnd->pt_cnd);
-}
-
-int
-ethr_cond_broadcast(ethr_cond *cnd)
-{
-#if ETHR_XCHK
- if (ethr_not_inited) {
- ASSERT(0);
- return EACCES;
- }
- if (!cnd || cnd->initialized != ETHR_COND_INITIALIZED) {
- ASSERT(0);
- return EINVAL;
- }
-#endif
- return pthread_cond_broadcast(&cnd->pt_cnd);
-}
-
-int
-ethr_cond_wait(ethr_cond *cnd, ethr_mutex *mtx)
-{
-#if ETHR_XCHK
- if (ethr_not_inited) {
- ASSERT(0);
- return EACCES;
- }
- if (!cnd
- || cnd->initialized != ETHR_COND_INITIALIZED
- || !mtx
- || mtx->initialized != ETHR_MUTEX_INITIALIZED) {
- ASSERT(0);
- return EINVAL;
- }
-#endif
- return pthread_cond_wait(&cnd->pt_cnd, &mtx->pt_mtx);
-}
-
-int
-ethr_cond_timedwait(ethr_cond *cnd, ethr_mutex *mtx, ethr_timeval *timeout)
-{
- struct timespec to;
-#if ETHR_XCHK
- if (ethr_not_inited) {
- ASSERT(0);
- return EACCES;
- }
- if (!cnd
- || cnd->initialized != ETHR_COND_INITIALIZED
- || !mtx
- || mtx->initialized != ETHR_MUTEX_INITIALIZED
- || !timeout) {
- ASSERT(0);
- return EINVAL;
- }
-#endif
-
- to.tv_sec = timeout->tv_sec;
- to.tv_nsec = timeout->tv_nsec;
-
- return pthread_cond_timedwait(&cnd->pt_cnd, &mtx->pt_mtx, &to);
-}
-
-
-#ifdef ETHR_HAVE_PTHREAD_RWLOCK_INIT
-
-int
-ethr_rwmutex_init(ethr_rwmutex *rwmtx)
-{
-#if ETHR_XCHK
- if (ethr_not_inited) {
- ASSERT(0);
- return EACCES;
- }
- if (!rwmtx) {
- ASSERT(0);
- return EINVAL;
- }
- rwmtx->initialized = ETHR_RWMUTEX_INITIALIZED;
-#endif
- return pthread_rwlock_init(&rwmtx->pt_rwlock, NULL);
-}
-
-int
-ethr_rwmutex_destroy(ethr_rwmutex *rwmtx)
-{
- int res;
-#if ETHR_XCHK
- if (ethr_not_inited) {
- ASSERT(0);
- return EACCES;
- }
- if (!rwmtx || rwmtx->initialized != ETHR_RWMUTEX_INITIALIZED) {
- ASSERT(0);
- return EINVAL;
- }
-#endif
- res = pthread_rwlock_destroy(&rwmtx->pt_rwlock);
-#if ETHR_XCHK
- rwmtx->initialized = 0;
-#endif
- return res;
-}
-
-int
-ethr_rwmutex_tryrlock(ethr_rwmutex *rwmtx)
-{
-#if ETHR_XCHK
- if (ethr_not_inited) {
- ASSERT(0);
- return EACCES;
- }
- if (!rwmtx || rwmtx->initialized != ETHR_RWMUTEX_INITIALIZED) {
- ASSERT(0);
- return EINVAL;
- }
-#endif
- return ethr_rwmutex_tryrlock__(rwmtx);
-}
-
-int
-ethr_rwmutex_rlock(ethr_rwmutex *rwmtx)
-{
-#if ETHR_XCHK
- if (ethr_not_inited) {
- ASSERT(0);
- return EACCES;
- }
- if (!rwmtx || rwmtx->initialized != ETHR_RWMUTEX_INITIALIZED) {
- ASSERT(0);
- return EINVAL;
- }
-#endif
- return ethr_rwmutex_rlock__(rwmtx);
-}
-
-int
-ethr_rwmutex_runlock(ethr_rwmutex *rwmtx)
-{
-#if ETHR_XCHK
- if (ethr_not_inited) {
- ASSERT(0);
- return EACCES;
- }
- if (!rwmtx || rwmtx->initialized != ETHR_RWMUTEX_INITIALIZED) {
- ASSERT(0);
- return EINVAL;
- }
-#endif
- return ethr_rwmutex_runlock__(rwmtx);
-}
-
-int
-ethr_rwmutex_tryrwlock(ethr_rwmutex *rwmtx)
-{
-#if ETHR_XCHK
- if (ethr_not_inited) {
- ASSERT(0);
- return EACCES;
- }
- if (!rwmtx || rwmtx->initialized != ETHR_RWMUTEX_INITIALIZED) {
- ASSERT(0);
- return EINVAL;
- }
-#endif
- return ethr_rwmutex_tryrwlock__(rwmtx);
-}
-
-int
-ethr_rwmutex_rwlock(ethr_rwmutex *rwmtx)
-{
-#if ETHR_XCHK
- if (ethr_not_inited) {
- ASSERT(0);
- return EACCES;
- }
- if (!rwmtx || rwmtx->initialized != ETHR_RWMUTEX_INITIALIZED) {
- ASSERT(0);
- return EINVAL;
- }
-#endif
- return ethr_rwmutex_rwlock__(rwmtx);
-}
-
-int
-ethr_rwmutex_rwunlock(ethr_rwmutex *rwmtx)
-{
-#if ETHR_XCHK
- if (ethr_not_inited) {
- ASSERT(0);
- return EACCES;
- }
- if (!rwmtx || rwmtx->initialized != ETHR_RWMUTEX_INITIALIZED) {
- ASSERT(0);
- return EINVAL;
- }
-#endif
- return ethr_rwmutex_rwunlock__(rwmtx);
-}
-
-#endif /* #ifdef ETHR_HAVE_PTHREAD_RWLOCK_INIT */
-
-/*
- * Current time
- */
-
-int
-ethr_time_now(ethr_timeval *time)
-{
- int res;
- struct timeval tv;
-#if ETHR_XCHK
- if (ethr_not_inited) {
- ASSERT(0);
- return EACCES;
- }
- if (!time) {
- ASSERT(0);
- return EINVAL;
- }
-#endif
-
- res = gettimeofday(&tv, NULL);
- time->tv_sec = (long) tv.tv_sec;
- time->tv_nsec = ((long) tv.tv_usec)*1000;
- return res;
-}
-
-/*
- * Thread specific data
- */
-
-int
-ethr_tsd_key_create(ethr_tsd_key *keyp)
-{
-#if ETHR_XCHK
- if (ethr_not_inited) {
- ASSERT(0);
- return EACCES;
- }
- if (!keyp) {
- ASSERT(0);
- return EINVAL;
- }
-#endif
- return pthread_key_create((pthread_key_t *) keyp, NULL);
-}
-
-int
-ethr_tsd_key_delete(ethr_tsd_key key)
-{
-#if ETHR_XCHK
- if (ethr_not_inited) {
- ASSERT(0);
- return EACCES;
- }
-#endif
- return pthread_key_delete((pthread_key_t) key);
-}
-
-int
-ethr_tsd_set(ethr_tsd_key key, void *value)
-{
-#if ETHR_XCHK
- if (ethr_not_inited) {
- ASSERT(0);
- return EACCES;
- }
-#endif
- return pthread_setspecific((pthread_key_t) key, value);
-}
-
-void *
-ethr_tsd_get(ethr_tsd_key key)
-{
-#if ETHR_XCHK
- if (ethr_not_inited) {
- ASSERT(0);
- return NULL;
- }
-#endif
- return pthread_getspecific((pthread_key_t) key);
-}
-
-/*
- * Signal functions
- */
-
-#if ETHR_HAVE_ETHR_SIG_FUNCS
-
-int ethr_sigmask(int how, const sigset_t *set, sigset_t *oset)
-{
-#if ETHR_XCHK
- if (ethr_not_inited) {
- ASSERT(0);
- return EACCES;
- }
- if (!set && !oset) {
- ASSERT(0);
- return EINVAL;
- }
-#endif
- return pthread_sigmask(how, set, oset);
-}
-
-int ethr_sigwait(const sigset_t *set, int *sig)
-{
-#if ETHR_XCHK
- if (ethr_not_inited) {
- ASSERT(0);
- return EACCES;
- }
- if (!set || !sig) {
- ASSERT(0);
- return EINVAL;
- }
-#endif
- if (sigwait(set, sig) < 0)
- return errno;
- return 0;
-}
-
-#endif /* #if ETHR_HAVE_ETHR_SIG_FUNCS */
-
-#elif defined(ETHR_WIN32_THREADS)
-/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *\
- * Native win32 threads implementation *
-\* */
-
-#define INVALID_TID -1
-
-/* The spin count values are more or less taken out of the blue */
-#define ETHR_MUTEX_SPIN_COUNT 5000
-#define ETHR_COND_SPIN_COUNT 1000
-
-ethr_tid serial_shift; /* Bits to shift serial when constructing a tid */
-ethr_tid last_serial; /* Last thread table serial used */
-ethr_tid last_ix; /* Last thread table index used */
-ethr_tid thr_ix_mask; /* Mask used to mask out thread table index from a tid */
-
-/* Event used for conditional variables. On per thread. */
-/*typedef struct cnd_wait_event__ cnd_wait_event_;*/
-struct cnd_wait_event__ {
- HANDLE handle;
- cnd_wait_event_ *prev;
- cnd_wait_event_ *next;
- int in_queue;
-};
-
-/* Thread specific data. Stored in the thread table */
-typedef struct {
- ethr_tid thr_id;
- HANDLE thr_handle;
- ethr_tid joiner;
- void *result;
- cnd_wait_event_ wait_event;
-} thr_data_;
-
-/* Argument passed to thr_wrapper() */
-typedef struct {
- void * (*func)(void *);
- void * arg;
- thr_data_ *ptd;
- thr_data_ *td;
- int res;
- void *prep_func_res;
-} thr_wrap_data_;
-
-
-static CRITICAL_SECTION thr_table_cs; /* Critical section used to protect
- the thread table from concurrent
- accesses. */
-static CRITICAL_SECTION fake_static_init_cs; /* Critical section used to protect
- initialazition of 'statically
- initialized' mutexes */
-static thr_data_ * thr_table[ETHR_MAX_THREADS]; /* The thread table */
-
-static DWORD tls_own_thr_data;
-
-static thr_data_ main_thr_data;
-
-#define THR_IX(TID) ((TID) & thr_ix_mask)
-#define OWN_THR_DATA ((thr_data_ *) TlsGetValue(tls_own_thr_data))
-
-/*
- * ----------------------------------------------------------------------------
- * Static functions
- * ----------------------------------------------------------------------------
- */
-
-static int
-get_errno(void)
-{
- switch (GetLastError()) {
- case ERROR_INVALID_FUNCTION: return EINVAL; /* 1 */
- case ERROR_FILE_NOT_FOUND: return ENOENT; /* 2 */
- case ERROR_PATH_NOT_FOUND: return ENOENT; /* 3 */
- case ERROR_TOO_MANY_OPEN_FILES: return EMFILE; /* 4 */
- case ERROR_ACCESS_DENIED: return EACCES; /* 5 */
- case ERROR_INVALID_HANDLE: return EBADF; /* 6 */
- case ERROR_ARENA_TRASHED: return ENOMEM; /* 7 */
- case ERROR_NOT_ENOUGH_MEMORY: return ENOMEM; /* 8 */
- case ERROR_INVALID_BLOCK: return ENOMEM; /* 9 */
- case ERROR_BAD_ENVIRONMENT: return E2BIG; /* 10 */
- case ERROR_BAD_FORMAT: return ENOEXEC; /* 11 */
- case ERROR_INVALID_ACCESS: return EINVAL; /* 12 */
- case ERROR_INVALID_DATA: return EINVAL; /* 13 */
- case ERROR_OUTOFMEMORY: return ENOMEM; /* 14 */
- case ERROR_INVALID_DRIVE: return ENOENT; /* 15 */
- case ERROR_CURRENT_DIRECTORY: return EACCES; /* 16 */
- case ERROR_NOT_SAME_DEVICE: return EXDEV; /* 17 */
- case ERROR_NO_MORE_FILES: return ENOENT; /* 18 */
- case ERROR_WRITE_PROTECT: return EACCES; /* 19 */
- case ERROR_BAD_UNIT: return EACCES; /* 20 */
- case ERROR_NOT_READY: return EACCES; /* 21 */
- case ERROR_BAD_COMMAND: return EACCES; /* 22 */
- case ERROR_CRC: return EACCES; /* 23 */
- case ERROR_BAD_LENGTH: return EACCES; /* 24 */
- case ERROR_SEEK: return EACCES; /* 25 */
- case ERROR_NOT_DOS_DISK: return EACCES; /* 26 */
- case ERROR_SECTOR_NOT_FOUND: return EACCES; /* 27 */
- case ERROR_OUT_OF_PAPER: return EACCES; /* 28 */
- case ERROR_WRITE_FAULT: return EACCES; /* 29 */
- case ERROR_READ_FAULT: return EACCES; /* 30 */
- case ERROR_GEN_FAILURE: return EACCES; /* 31 */
- case ERROR_SHARING_VIOLATION: return EACCES; /* 32 */
- case ERROR_LOCK_VIOLATION: return EACCES; /* 33 */
- case ERROR_WRONG_DISK: return EACCES; /* 34 */
- case ERROR_SHARING_BUFFER_EXCEEDED: return EACCES; /* 36 */
- case ERROR_BAD_NETPATH: return ENOENT; /* 53 */
- case ERROR_NETWORK_ACCESS_DENIED: return EACCES; /* 65 */
- case ERROR_BAD_NET_NAME: return ENOENT; /* 67 */
- case ERROR_FILE_EXISTS: return EEXIST; /* 80 */
- case ERROR_CANNOT_MAKE: return EACCES; /* 82 */
- case ERROR_FAIL_I24: return EACCES; /* 83 */
- case ERROR_INVALID_PARAMETER: return EINVAL; /* 87 */
- case ERROR_NO_PROC_SLOTS: return EAGAIN; /* 89 */
- case ERROR_DRIVE_LOCKED: return EACCES; /* 108 */
- case ERROR_BROKEN_PIPE: return EPIPE; /* 109 */
- case ERROR_DISK_FULL: return ENOSPC; /* 112 */
- case ERROR_INVALID_TARGET_HANDLE: return EBADF; /* 114 */
- case ERROR_WAIT_NO_CHILDREN: return ECHILD; /* 128 */
- case ERROR_CHILD_NOT_COMPLETE: return ECHILD; /* 129 */
- case ERROR_DIRECT_ACCESS_HANDLE: return EBADF; /* 130 */
- case ERROR_NEGATIVE_SEEK: return EINVAL; /* 131 */
- case ERROR_SEEK_ON_DEVICE: return EACCES; /* 132 */
- case ERROR_DIR_NOT_EMPTY: return ENOTEMPTY;/* 145 */
- case ERROR_NOT_LOCKED: return EACCES; /* 158 */
- case ERROR_BAD_PATHNAME: return ENOENT; /* 161 */
- case ERROR_MAX_THRDS_REACHED: return EAGAIN; /* 164 */
- case ERROR_LOCK_FAILED: return EACCES; /* 167 */
- case ERROR_ALREADY_EXISTS: return EEXIST; /* 183 */
- case ERROR_INVALID_STARTING_CODESEG: return ENOEXEC; /* 188 */
- case ERROR_INVALID_STACKSEG: return ENOEXEC; /* 189 */
- case ERROR_INVALID_MODULETYPE: return ENOEXEC; /* 190 */
- case ERROR_INVALID_EXE_SIGNATURE: return ENOEXEC; /* 191 */
- case ERROR_EXE_MARKED_INVALID: return ENOEXEC; /* 192 */
- case ERROR_BAD_EXE_FORMAT: return ENOEXEC; /* 193 */
- case ERROR_ITERATED_DATA_EXCEEDS_64k: return ENOEXEC; /* 194 */
- case ERROR_INVALID_MINALLOCSIZE: return ENOEXEC; /* 195 */
- case ERROR_DYNLINK_FROM_INVALID_RING: return ENOEXEC; /* 196 */
- case ERROR_IOPL_NOT_ENABLED: return ENOEXEC; /* 197 */
- case ERROR_INVALID_SEGDPL: return ENOEXEC; /* 198 */
- case ERROR_AUTODATASEG_EXCEEDS_64k: return ENOEXEC; /* 199 */
- case ERROR_RING2SEG_MUST_BE_MOVABLE: return ENOEXEC; /* 200 */
- case ERROR_RELOC_CHAIN_XEEDS_SEGLIM: return ENOEXEC; /* 201 */
- case ERROR_INFLOOP_IN_RELOC_CHAIN: return ENOEXEC; /* 202 */
- case ERROR_FILENAME_EXCED_RANGE: return ENOENT; /* 206 */
- case ERROR_NESTING_NOT_ALLOWED: return EAGAIN; /* 215 */
- case ERROR_NOT_ENOUGH_QUOTA: return ENOMEM; /* 1816 */
- default: return EINVAL;
- }
-}
-
-static ETHR_INLINE thr_data_ *
-tid2thr(ethr_tid tid)
-{
- ethr_tid ix;
- thr_data_ *td;
-
- if (tid < 0)
- return NULL;
- ix = THR_IX(tid);
- if (ix >= ETHR_MAX_THREADS)
- return NULL;
- td = thr_table[ix];
- if (!td)
- return NULL;
- if (td->thr_id != tid)
- return NULL;
- return td;
-}
-
-static ETHR_INLINE void
-new_tid(ethr_tid *new_tid, ethr_tid *new_serial, ethr_tid *new_ix)
-{
- ethr_tid tmp_serial = last_serial;
- ethr_tid tmp_ix = last_ix + 1;
- ethr_tid start_ix = tmp_ix;
-
-
- do {
- if (tmp_ix >= ETHR_MAX_THREADS) {
- tmp_serial++;
- if ((tmp_serial << serial_shift) < 0)
- tmp_serial = 0;
- tmp_ix = 0;
- }
- if (!thr_table[tmp_ix]) {
- *new_tid = (tmp_serial << serial_shift) | tmp_ix;
- *new_serial = tmp_serial;
- *new_ix = tmp_ix;
- return;
- }
- tmp_ix++;
- } while (tmp_ix != start_ix);
-
- *new_tid = INVALID_TID;
- *new_serial = INVALID_TID;
- *new_ix = INVALID_TID;
-
-}
-
-
-static void thr_exit_cleanup(thr_data_ *td, void *res)
-{
-
- ASSERT(td == OWN_THR_DATA);
-
- run_exit_handlers();
-
- EnterCriticalSection(&thr_table_cs);
- CloseHandle(td->wait_event.handle);
- if (td->thr_handle == INVALID_HANDLE_VALUE) {
- /* We are detached; cleanup thread table */
- ASSERT(td->joiner == INVALID_TID);
- ASSERT(td == thr_table[THR_IX(td->thr_id)]);
- thr_table[THR_IX(td->thr_id)] = NULL;
- if (td != &main_thr_data)
- (*freep)((void *) td);
- }
- else {
- /* Save result and let joining thread cleanup */
- td->result = res;
- }
- LeaveCriticalSection(&thr_table_cs);
-}
-
-static unsigned __stdcall thr_wrapper(LPVOID args)
-{
- void *(*func)(void*) = ((thr_wrap_data_ *) args)->func;
- void *arg = ((thr_wrap_data_ *) args)->arg;
- thr_data_ *td = ((thr_wrap_data_ *) args)->td;
-
- td->wait_event.handle = CreateEvent(NULL, FALSE, FALSE, NULL);
- if (td->wait_event.handle == INVALID_HANDLE_VALUE
- || !TlsSetValue(tls_own_thr_data, (LPVOID) td)) {
- ((thr_wrap_data_ *) args)->res = get_errno();
- if (td->wait_event.handle != INVALID_HANDLE_VALUE)
- CloseHandle(td->wait_event.handle);
- SetEvent(((thr_wrap_data_ *) args)->ptd->wait_event.handle);
- _endthreadex((unsigned) 0);
- ASSERT(0);
- }
-
- td->wait_event.prev = NULL;
- td->wait_event.next = NULL;
- td->wait_event.in_queue = 0;
-
- if (thread_create_child_func)
- (*thread_create_child_func)(((thr_wrap_data_ *) args)->prep_func_res);
-
- ASSERT(td == OWN_THR_DATA);
-
- ((thr_wrap_data_ *) args)->res = 0;
- SetEvent(((thr_wrap_data_ *) args)->ptd->wait_event.handle);
-
- thr_exit_cleanup(td, (*func)(arg));
- return 0;
-}
-
-int
-ethr_fake_static_mutex_init(ethr_mutex *mtx)
-{
- EnterCriticalSection((CRITICAL_SECTION *) &fake_static_init_cs);
- /* Got here under race conditions; check again... */
- if (!mtx->initialized) {
- if (!InitializeCriticalSectionAndSpinCount(&mtx->cs,
- ETHR_MUTEX_SPIN_COUNT))
- return get_errno();
- mtx->initialized = ETHR_MUTEX_INITIALIZED;
- }
- LeaveCriticalSection((CRITICAL_SECTION *) &fake_static_init_cs);
- return 0;
-}
-
-static int
-fake_static_cond_init(ethr_cond *cnd)
-{
- EnterCriticalSection((CRITICAL_SECTION *) &fake_static_init_cs);
- /* Got here under race conditions; check again... */
- if (!cnd->initialized) {
- if (!InitializeCriticalSectionAndSpinCount(&cnd->cs,
- ETHR_COND_SPIN_COUNT))
- return get_errno();
- cnd->queue = NULL;
- cnd->queue_end = NULL;
- cnd->initialized = ETHR_COND_INITIALIZED;
- }
- LeaveCriticalSection((CRITICAL_SECTION *) &fake_static_init_cs);
- return 0;
-}
-
-#ifdef __GNUC__
-#define LL_LITERAL(X) X##LL
-#else
-#define LL_LITERAL(X) X##i64
-#endif
-
-#define EPOCH_JULIAN_DIFF LL_LITERAL(11644473600)
-
-static ETHR_INLINE void
-get_curr_time(long *sec, long *nsec)
-{
- SYSTEMTIME t;
- FILETIME ft;
- LONGLONG lft;
-
- GetSystemTime(&t);
- SystemTimeToFileTime(&t, &ft);
- memcpy(&lft, &ft, sizeof(lft));
- *nsec = ((long) (lft % LL_LITERAL(10000000)))*100;
- *sec = (long) ((lft / LL_LITERAL(10000000)) - EPOCH_JULIAN_DIFF);
-}
-
-static cnd_wait_event_ *cwe_freelist;
-static CRITICAL_SECTION cwe_cs;
-
-static int
-alloc_cwe(cnd_wait_event_ **cwe_res)
-{
- cnd_wait_event_ *cwe;
- EnterCriticalSection(&cwe_cs);
- cwe = cwe_freelist;
- if (cwe) {
- cwe_freelist = cwe->next;
- LeaveCriticalSection(&cwe_cs);
- }
- else {
- LeaveCriticalSection(&cwe_cs);
- cwe = (*allocp)(sizeof(cnd_wait_event_));
- if (!cwe)
- return ENOMEM;
- cwe->handle = CreateEvent(NULL, FALSE, FALSE, NULL);
- if (cwe->handle == INVALID_HANDLE_VALUE) {
- int res = get_errno();
- (*freep)(cwe);
- return res;
- }
- }
- *cwe_res = cwe;
- return 0;
-}
-
-static
-free_cwe(cnd_wait_event_ *cwe)
-{
- EnterCriticalSection(&cwe_cs);
- cwe->next = cwe_freelist;
- cwe_freelist = cwe;
- LeaveCriticalSection(&cwe_cs);
-}
-
-static ETHR_INLINE int
-condwait(ethr_cond *cnd,
- ethr_mutex *mtx,
- int with_timeout,
- ethr_timeval *timeout)
-{
- int res;
- thr_data_ *td;
- cnd_wait_event_ *cwe;
- DWORD code;
- long time; /* time until timeout in milli seconds */
-
-#if ETHR_XCHK
- if (ethr_not_inited) {
- ASSERT(0);
- return EACCES;
- }
-
- if (!mtx
- || mtx->initialized != ETHR_MUTEX_INITIALIZED
- || !cnd
- || (cnd->initialized && cnd->initialized != ETHR_COND_INITIALIZED)
- || (with_timeout && !timeout)) {
- ASSERT(0);
- return EINVAL;
- }
-#endif
-
- td = OWN_THR_DATA;
- if (td)
- cwe = &td->wait_event;
- else { /* A non-ethread thread */
- res = alloc_cwe(&cwe);
- if (res != 0)
- return res;
- }
-
- if (!cnd->initialized)
- fake_static_cond_init(cnd);
- EnterCriticalSection(&cnd->cs);
-
- ASSERT(!cwe->in_queue);
- if (cnd->queue_end) {
- ASSERT(cnd->queue);
- cwe->prev = cnd->queue_end;
- cwe->next = NULL;
- cnd->queue_end->next = cwe;
- cnd->queue_end = cwe;
- }
- else {
- ASSERT(!cnd->queue);
- cwe->prev = NULL;
- cwe->next = NULL;
- cnd->queue = cwe;
- cnd->queue_end = cwe;
- }
- cwe->in_queue = 1;
-
- LeaveCriticalSection(&cnd->cs);
-
- LeaveCriticalSection(&mtx->cs);
-
- if (!with_timeout)
- time = INFINITE;
- else {
- long sec, nsec;
- ASSERT(timeout);
- get_curr_time(&sec, &nsec);
- time = (timeout->tv_sec - sec)*1000;
- time += (timeout->tv_nsec - nsec + 500)/1000000;
- if (time < 0)
- time = 0;
- }
-
- /* wait for event to signal */
- code = WaitForSingleObject(cwe->handle, time);
-
- EnterCriticalSection(&mtx->cs);
-
- if (code == WAIT_OBJECT_0) {
- /* We were woken by a signal or a broadcast ... */
- res = 0;
-
- /* ... no need to remove event from wait queue since this was
- taken care of by the signal or broadcast */
-#ifdef DEBUG
- EnterCriticalSection(&cnd->cs);
- ASSERT(!cwe->in_queue);
- LeaveCriticalSection(&cnd->cs);
-#endif
-
- }
- else {
- /* We timed out... */
- res = ETIMEDOUT;
-
- /* ... probably have to remove event from wait queue ... */
- EnterCriticalSection(&cnd->cs);
-
- if (cwe->in_queue) { /* ... but we must check that we are in queue
- since a signal or broadcast after timeout
- may have removed us from the queue */
- if (cwe->prev) {
- cwe->prev->next = cwe->next;
- }
- else {
- ASSERT(cnd->queue == cwe);
- cnd->queue = cwe->next;
- }
-
- if (cwe->next) {
- cwe->next->prev = cwe->prev;
- }
- else {
- ASSERT(cnd->queue_end == cwe);
- cnd->queue_end = cwe->prev;
- }
- cwe->in_queue = 0;
- }
-
- LeaveCriticalSection(&cnd->cs);
-
- }
-
- if (!td)
- free_cwe(cwe);
-
- return res;
-
-}
-
-
-/*
- * ----------------------------------------------------------------------------
- * Exported functions
- * ----------------------------------------------------------------------------
- */
-
-int
-ethr_init(ethr_init_data *id)
-{
-#ifdef _WIN32_WINNT
- DWORD major = (_WIN32_WINNT >> 8) & 0xff;
- DWORD minor = _WIN32_WINNT & 0xff;
- OSVERSIONINFO os_version;
-#endif
- int err = 0;
- thr_data_ *td = &main_thr_data;
- unsigned long i;
-
- if (!ethr_not_inited)
- return EINVAL;
-
-#ifdef _WIN32_WINNT
- os_version.dwOSVersionInfoSize = sizeof(OSVERSIONINFO);
- GetVersionEx(&os_version);
- if (os_version.dwPlatformId != VER_PLATFORM_WIN32_NT
- || os_version.dwMajorVersion < major
- || (os_version.dwMajorVersion == major
- && os_version.dwMinorVersion < minor))
- return ENOTSUP;
-#endif
-
- ASSERT(ETHR_MAX_THREADS > 0);
- for (i = ETHR_MAX_THREADS - 1, serial_shift = 0;
- i;
- serial_shift++, i >>= 1);
- thr_ix_mask = ~(~((ethr_tid) 0) << serial_shift);
-
- tls_own_thr_data = TlsAlloc();
- if (tls_own_thr_data == TLS_OUT_OF_INDEXES)
- goto error;
-
- last_serial = 0;
- last_ix = 0;
-
- td->thr_id = 0;
- td->thr_handle = GetCurrentThread();
- td->joiner = INVALID_TID;
- td->result = NULL;
- td->wait_event.handle = CreateEvent(NULL, FALSE, FALSE, NULL);
- if (td->wait_event.handle == INVALID_HANDLE_VALUE)
- goto error;
- td->wait_event.prev = NULL;
- td->wait_event.next = NULL;
- td->wait_event.in_queue = 0;
- thr_table[0] = td;
-
- if (!TlsSetValue(tls_own_thr_data, (LPVOID) td))
- goto error;
-
- ASSERT(td == OWN_THR_DATA);
-
-
- cwe_freelist = NULL;
- if (!InitializeCriticalSectionAndSpinCount(&cwe_cs,
- ETHR_MUTEX_SPIN_COUNT))
- goto error;
-
- for (i = 1; i < ETHR_MAX_THREADS; i++)
- thr_table[i] = NULL;
-
- if (!InitializeCriticalSectionAndSpinCount(&thr_table_cs,
- ETHR_MUTEX_SPIN_COUNT))
- goto error;
- if (!InitializeCriticalSectionAndSpinCount(&fake_static_init_cs,
- ETHR_MUTEX_SPIN_COUNT))
- goto error;
- ethr_not_inited = 0;
-
- err = init_common(id);
- if (err)
- goto error;
-
- return 0;
-
- error:
- ethr_not_inited = 1;
- if (err == 0)
- err = get_errno();
- ASSERT(err != 0);
- if (td->thr_handle != INVALID_HANDLE_VALUE)
- CloseHandle(td->thr_handle);
- if (td->wait_event.handle != INVALID_HANDLE_VALUE)
- CloseHandle(td->wait_event.handle);
- return err;
-}
-
-/*
- * Thread functions.
- */
-
-int
-ethr_thr_create(ethr_tid *tid, void * (*func)(void *), void *arg,
- ethr_thr_opts *opts)
-{
- int err = 0;
- thr_wrap_data_ twd;
- thr_data_ *my_td, *child_td = NULL;
- ethr_tid child_tid, child_serial, child_ix;
- DWORD code;
- unsigned ID;
- unsigned stack_size = 0; /* 0 = system default */
- int use_stack_size = (opts && opts->suggested_stack_size >= 0
- ? opts->suggested_stack_size
- : -1 /* Use system default */);
-
-#ifdef ETHR_MODIFIED_DEFAULT_STACK_SIZE
- if (use_stack_size < 0)
- use_stack_size = ETHR_MODIFIED_DEFAULT_STACK_SIZE;
-#endif
-
-#if ETHR_XCHK
- if (ethr_not_inited) {
- ASSERT(0);
- return EACCES;
- }
- if (!tid || !func) {
- ASSERT(0);
- return EINVAL;
- }
-#endif
-
- my_td = OWN_THR_DATA;
- if (!my_td) {
- /* Only ethreads are allowed to call this function */
- ASSERT(0);
- return EACCES;
- }
-
- if (use_stack_size >= 0) {
- size_t suggested_stack_size = (size_t) use_stack_size;
-#ifdef DEBUG
- suggested_stack_size /= 2; /* Make sure we got margin */
-#endif
- if (suggested_stack_size < min_stack_size)
- stack_size = (unsigned) ETHR_KW2B(min_stack_size);
- else if (suggested_stack_size > max_stack_size)
- stack_size = (unsigned) ETHR_KW2B(max_stack_size);
- else
- stack_size =
- (unsigned) ETHR_PAGE_ALIGN(ETHR_KW2B(suggested_stack_size));
- }
-
- EnterCriticalSection(&thr_table_cs);
-
- /* Call prepare func if it exist */
- if (thread_create_prepare_func)
- twd.prep_func_res = (*thread_create_prepare_func)();
- else
- twd.prep_func_res = NULL;
-
- /* Find a new thread id to use */
- new_tid(&child_tid, &child_serial, &child_ix);
- if (child_tid == INVALID_TID) {
- err = EAGAIN;
- goto error;
- }
-
- ASSERT(child_ix == THR_IX(child_tid));
-
- *tid = child_tid;
-
- ASSERT(!thr_table[child_ix]);
-
- /* Alloc thread data */
- thr_table[child_ix] = child_td = (thr_data_ *) (*allocp)(sizeof(thr_data_));
- if (!child_td) {
- err = ENOMEM;
- goto error;
- }
-
- /* Init thread data */
-
- child_td->thr_id = child_tid;
- child_td->thr_handle = INVALID_HANDLE_VALUE;
- child_td->joiner = INVALID_TID;
- child_td->result = NULL;
- /* 'child_td->wait_event' is initialized by child thread */
-
-
- /* Init thread wrapper data */
-
- twd.func = func;
- twd.arg = arg;
- twd.ptd = my_td;
- twd.td = child_td;
- twd.res = 0;
-
- ASSERT(!my_td->wait_event.in_queue);
-
- /* spawn the thr_wrapper function */
- child_td->thr_handle = (HANDLE) _beginthreadex(NULL,
- stack_size,
- thr_wrapper,
- (LPVOID) &twd,
- 0,
- &ID);
- if (child_td->thr_handle == (HANDLE) 0) {
- child_td->thr_handle = INVALID_HANDLE_VALUE;
- goto error;
- }
-
- ASSERT(child_td->thr_handle != INVALID_HANDLE_VALUE);
-
- /* Wait for child to finish initialization */
- code = WaitForSingleObject(my_td->wait_event.handle, INFINITE);
- if (twd.res || code != WAIT_OBJECT_0) {
- err = twd.res;
- goto error;
- }
-
- if (opts && opts->detached) {
- CloseHandle(child_td->thr_handle);
- child_td->thr_handle = INVALID_HANDLE_VALUE;
- }
-
- last_serial = child_serial;
- last_ix = child_ix;
-
- ASSERT(thr_table[child_ix] == child_td);
-
- if (thread_create_parent_func)
- (*thread_create_parent_func)(twd.prep_func_res);
-
- LeaveCriticalSection(&thr_table_cs);
-
- return 0;
-
- error:
-
- if (err == 0)
- err = get_errno();
- ASSERT(err != 0);
-
- if (thread_create_parent_func)
- (*thread_create_parent_func)(twd.prep_func_res);
-
- if (child_ix != INVALID_TID) {
-
- if (child_td) {
- ASSERT(thr_table[child_ix] == child_td);
-
- if (child_td->thr_handle != INVALID_HANDLE_VALUE) {
- WaitForSingleObject(child_td->thr_handle, INFINITE);
- CloseHandle(child_td->thr_handle);
- }
-
- (*freep)((void *) child_td);
- thr_table[child_ix] = NULL;
- }
- }
-
- *tid = INVALID_TID;
-
- LeaveCriticalSection(&thr_table_cs);
- return err;
-}
-
-int ethr_thr_join(ethr_tid tid, void **res)
-{
- int err = 0;
- DWORD code;
- thr_data_ *td;
- thr_data_ *my_td;
-
-#if ETHR_XCHK
- if (ethr_not_inited) {
- ASSERT(0);
- return EACCES;
- }
-#endif
-
- my_td = OWN_THR_DATA;
-
- if (!my_td) {
- /* Only ethreads are allowed to call this function */
- ASSERT(0);
- return EACCES;
- }
-
- EnterCriticalSection(&thr_table_cs);
-
- td = tid2thr(tid);
- if (!td)
- err = ESRCH;
- else if (td->thr_handle == INVALID_HANDLE_VALUE /* i.e. detached */
- || td->joiner != INVALID_TID) /* i.e. someone else is joining */
- err = EINVAL;
- else if (my_td == td)
- err = EDEADLK;
- else
- td->joiner = my_td->thr_id;
-
- LeaveCriticalSection(&thr_table_cs);
-
- if (err)
- goto error;
-
- /* Wait for thread to terminate */
- code = WaitForSingleObject(td->thr_handle, INFINITE);
- if (code != WAIT_OBJECT_0)
- goto error;
-
- EnterCriticalSection(&thr_table_cs);
-
- ASSERT(td == tid2thr(tid));
- ASSERT(td->thr_handle != INVALID_HANDLE_VALUE);
- ASSERT(td->joiner == my_td->thr_id);
-
- if (res)
- *res = td->result;
-
- CloseHandle(td->thr_handle);
- ASSERT(td == thr_table[THR_IX(td->thr_id)]);
- thr_table[THR_IX(td->thr_id)] = NULL;
- if (td != &main_thr_data)
- (*freep)((void *) td);
-
- LeaveCriticalSection(&thr_table_cs);
-
- return 0;
-
- error:
- if (err == 0)
- err = get_errno();
- ASSERT(err != 0);
- return err;
-}
-
-
-int
-ethr_thr_detach(ethr_tid tid)
-{
- int res;
- DWORD code;
- thr_data_ *td;
-
-#if ETHR_XCHK
- if (ethr_not_inited) {
- ASSERT(0);
- return EACCES;
- }
-#endif
-
- if (!OWN_THR_DATA) {
- /* Only ethreads are allowed to call this function */
- ASSERT(0);
- return EACCES;
- }
-
- EnterCriticalSection(&thr_table_cs);
-
- td = tid2thr(tid);
- if (!td)
- res = ESRCH;
- if (td->thr_handle == INVALID_HANDLE_VALUE /* i.e. detached */
- || td->joiner != INVALID_TID) /* i.e. someone is joining */
- res = EINVAL;
- else {
- res = 0;
- CloseHandle(td->thr_handle);
- td->thr_handle = INVALID_HANDLE_VALUE;
- }
-
- LeaveCriticalSection(&thr_table_cs);
-
- return res;
-}
-
-
-void
-ethr_thr_exit(void *res)
-{
- thr_data_ *td;
-#if ETHR_XCHK
- if (ethr_not_inited) {
- ASSERT(0);
- return;
- }
-#endif
- td = OWN_THR_DATA;
- if (!td) {
- /* Only ethreads are allowed to call this function */
- ASSERT(0);
- return;
- }
- thr_exit_cleanup(td, res);
- _endthreadex((unsigned) 0);
-}
-
-ethr_tid
-ethr_self(void)
-{
- thr_data_ *td;
-#if ETHR_XCHK
- if (ethr_not_inited) {
- ASSERT(0);
- return INVALID_TID;
- }
-#endif
- /* It is okay for non-ethreads (i.e. native win32 threads) to call
- ethr_self(). They will however be returned the INVALID_TID. */
- td = OWN_THR_DATA;
- if (!td)
- return INVALID_TID;
- return td->thr_id;
-}
-
-int
-ethr_equal_tids(ethr_tid tid1, ethr_tid tid2)
-{
- /* INVALID_TID does not equal any tid, not even the INVALID_TID */
- return tid1 == tid2 && tid1 != INVALID_TID;
-}
-
-/*
- * Mutex functions.
- */
-
-int
-ethr_mutex_init(ethr_mutex *mtx)
-{
-#if ETHR_XCHK
- if (ethr_not_inited) {
- ASSERT(0);
- return EACCES;
- }
- if (!mtx) {
- ASSERT(0);
- return EINVAL;
- }
-#endif
- if (!InitializeCriticalSectionAndSpinCount(&mtx->cs, ETHR_MUTEX_SPIN_COUNT))
- return get_errno();
- mtx->initialized = ETHR_MUTEX_INITIALIZED;
-#if ETHR_XCHK
- mtx->is_rec_mtx = 0;
-#endif
- return 0;
-}
-
-int
-ethr_rec_mutex_init(ethr_mutex *mtx)
-{
- int res;
- res = ethr_mutex_init(mtx);
-#if ETHR_XCHK
- mtx->is_rec_mtx = 1;
-#endif
- return res;
-}
-
-int
-ethr_mutex_destroy(ethr_mutex *mtx)
-{
-#if ETHR_XCHK
- if (ethr_not_inited) {
- ASSERT(0);
- return EACCES;
- }
- if (!mtx || mtx->initialized != ETHR_MUTEX_INITIALIZED) {
- ASSERT(0);
- return EINVAL;
- }
-#endif
- DeleteCriticalSection(&mtx->cs);
- mtx->initialized = 0;
- return 0;
-}
-
-int ethr_mutex_set_forksafe(ethr_mutex *mtx)
-{
-#if ETHR_XCHK
- if (ethr_not_inited) {
- ASSERT(0);
- return EACCES;
- }
-#endif
- return 0; /* No fork() */
-}
-
-int ethr_mutex_unset_forksafe(ethr_mutex *mtx)
-{
-#if ETHR_XCHK
- if (ethr_not_inited) {
- ASSERT(0);
- return EACCES;
- }
-#endif
- return 0; /* No fork() */
-}
-
-int
-ethr_mutex_trylock(ethr_mutex *mtx)
-{
-#if ETHR_XCHK
- if (ethr_not_inited) {
- ASSERT(0);
- return EACCES;
- }
- if (!mtx
- || (mtx->initialized && mtx->initialized != ETHR_MUTEX_INITIALIZED)) {
- ASSERT(0);
- return EINVAL;
- }
-#endif
- if (!mtx->initialized) {
- int res = ethr_fake_static_mutex_init(mtx);
- if (res != 0)
- return res;
- }
- return ethr_mutex_trylock__(mtx);
-}
-
-int
-ethr_mutex_lock(ethr_mutex *mtx)
-{
- int res;
-#if ETHR_XCHK
- if (ethr_not_inited) {
- ASSERT(0);
- return EACCES;
- }
- if (!mtx
- || (mtx->initialized && mtx->initialized != ETHR_MUTEX_INITIALIZED)) {
- ASSERT(0);
- return EINVAL;
- }
-#endif
- return ethr_mutex_lock__(mtx);
-}
-
-int
-ethr_mutex_unlock(ethr_mutex *mtx)
-{
-#if ETHR_XCHK
- int res;
- if (ethr_not_inited) {
- ASSERT(0);
- return EACCES;
- }
- if (!mtx || mtx->initialized != ETHR_MUTEX_INITIALIZED) {
- ASSERT(0);
- return EINVAL;
- }
-#endif
- return ethr_mutex_unlock__(mtx);
-}
-
-/*
- * Condition variable functions.
- */
-
-int
-ethr_cond_init(ethr_cond *cnd)
-{
-#if ETHR_XCHK
- if (ethr_not_inited) {
- ASSERT(0);
- return EACCES;
- }
- if (!cnd) {
- ASSERT(0);
- return EINVAL;
- }
-#endif
- if (!InitializeCriticalSectionAndSpinCount(&cnd->cs, ETHR_COND_SPIN_COUNT))
- return get_errno();
- cnd->queue = NULL;
- cnd->queue_end = NULL;
- cnd->initialized = ETHR_COND_INITIALIZED;
- return 0;
-}
-
-int
-ethr_cond_destroy(ethr_cond *cnd)
-{
-#if ETHR_XCHK
- if (ethr_not_inited) {
- ASSERT(0);
- return EACCES;
- }
- if (!cnd
- || (cnd->initialized && cnd->initialized != ETHR_COND_INITIALIZED)
- || cnd->queue) {
- ASSERT(0);
- return EINVAL;
- }
-#endif
- DeleteCriticalSection(&cnd->cs);
- cnd->initialized = 0;
- return 0;
-}
-
-int
-ethr_cond_signal(ethr_cond *cnd)
-{
- cnd_wait_event_ *cwe;
-#if ETHR_XCHK
- if (ethr_not_inited) {
- ASSERT(0);
- return EACCES;
- }
- if (!cnd
- || (cnd->initialized && cnd->initialized != ETHR_COND_INITIALIZED)) {
- ASSERT(0);
- return EINVAL;
- }
-#endif
- if (!cnd->initialized) {
- int res = fake_static_cond_init(cnd);
- if (res != 0)
- return res;
- }
- EnterCriticalSection(&cnd->cs);
- cwe = cnd->queue;
- if (cwe) {
- ASSERT(cwe->in_queue);
- SetEvent(cnd->queue->handle);
- if (cwe->next)
- cwe->next->prev = NULL;
- else {
- ASSERT(cnd->queue_end == cnd->queue);
- cnd->queue_end = NULL;
- }
- cnd->queue = cwe->next;
- cwe->in_queue = 0;
- }
- LeaveCriticalSection(&cnd->cs);
- return 0;
-}
-
-int
-ethr_cond_broadcast(ethr_cond *cnd)
-{
- cnd_wait_event_ *cwe;
-
-#if ETHR_XCHK
- if (ethr_not_inited) {
- ASSERT(0);
- return EACCES;
- }
- if (!cnd
- || (cnd->initialized && cnd->initialized != ETHR_COND_INITIALIZED)) {
- ASSERT(0);
- return EINVAL;
- }
-#endif
- if (!cnd->initialized) {
- int res = fake_static_cond_init(cnd);
- if (res != 0)
- return res;
- }
- EnterCriticalSection(&cnd->cs);
- for (cwe = cnd->queue; cwe; cwe = cwe->next) {
- ASSERT(cwe->in_queue);
- SetEvent(cwe->handle);
- cwe->in_queue = 0;
- }
- cnd->queue = NULL;
- cnd->queue_end = NULL;
- LeaveCriticalSection(&cnd->cs);
- return 0;
-
-}
-
-int
-ethr_cond_wait(ethr_cond *cnd, ethr_mutex *mtx)
-{
- return condwait(cnd, mtx, 0, NULL);
-}
-
-int
-ethr_cond_timedwait(ethr_cond *cnd, ethr_mutex *mtx, ethr_timeval *timeout)
-{
- return condwait(cnd, mtx, 1, timeout);
-}
-
-int
-ethr_time_now(ethr_timeval *time)
-{
-#if ETHR_XCHK
- if (ethr_not_inited) {
- ASSERT(0);
- return EACCES;
- }
- if (!time) {
- ASSERT(0);
- return EINVAL;
- }
-#endif
- get_curr_time(&time->tv_sec, &time->tv_nsec);
- return 0;
-}
-
-/*
- * Thread specific data
- */
-
-int
-ethr_tsd_key_create(ethr_tsd_key *keyp)
-{
- DWORD key;
-#if ETHR_XCHK
- if (ethr_not_inited) {
- ASSERT(0);
- return EACCES;
- }
- if (!keyp) {
- ASSERT(0);
- return EINVAL;
- }
-#endif
- key = TlsAlloc();
- if (key == TLS_OUT_OF_INDEXES)
- return get_errno();
- *keyp = (ethr_tsd_key) key;
- return 0;
-}
-
-int
-ethr_tsd_key_delete(ethr_tsd_key key)
-{
-#if ETHR_XCHK
- if (ethr_not_inited) {
- ASSERT(0);
- return EACCES;
- }
-#endif
- if (!TlsFree((DWORD) key))
- return get_errno();
- return 0;
-}
-
-int
-ethr_tsd_set(ethr_tsd_key key, void *value)
-{
-#if ETHR_XCHK
- if (ethr_not_inited) {
- ASSERT(0);
- return EACCES;
- }
-#endif
- if (!TlsSetValue((DWORD) key, (LPVOID) value))
- return get_errno();
- return 0;
-}
-
-void *
-ethr_tsd_get(ethr_tsd_key key)
-{
-#if ETHR_XCHK
- if (ethr_not_inited) {
- ASSERT(0);
- return NULL;
- }
-#endif
- return (void *) TlsGetValue((DWORD) key);
-}
-
-/* Misc */
-
-#ifndef ETHR_HAVE_OPTIMIZED_LOCKS
-
-int
-ethr_do_spinlock_init(ethr_spinlock_t *lock)
-{
-#if ETHR_XCHK
- if (ethr_not_inited) {
- ASSERT(0);
- return EACCES;
- }
- if (!lock) {
- ASSERT(0);
- return EINVAL;
- }
-#endif
- if (InitializeCriticalSectionAndSpinCount(&lock->cs, INT_MAX))
- return 0;
- else
- return get_errno();
-}
-
-int
-ethr_do_rwlock_init(ethr_rwlock_t *lock)
-{
-#if ETHR_XCHK
- if (ethr_not_inited) {
- ASSERT(0);
- return EACCES;
- }
- if (!lock) {
- ASSERT(0);
- return EINVAL;
- }
-#endif
- lock->counter = 0;
- if (InitializeCriticalSectionAndSpinCount(&lock->cs, INT_MAX))
- return 0;
- else
- return get_errno();
-}
-
-#endif /* #ifndef ETHR_HAVE_OPTIMIZED_ATOMIC_OPS */
-
-#else
-#error "Missing thread implementation"
-#endif
-
-/* Atomics */
-
-int
-ethr_atomic_init(ethr_atomic_t *var, long i)
-{
-#if ETHR_XCHK
- if (ethr_not_inited) {
- ASSERT(0);
- return EACCES;
- }
- if (!var) {
- ASSERT(0);
- return EINVAL;
- }
-#endif
- return ethr_atomic_init__(var, i);
-}
-
-int
-ethr_atomic_set(ethr_atomic_t *var, long i)
-{
-#if ETHR_XCHK
- if (ethr_not_inited) {
- ASSERT(0);
- return EACCES;
- }
- if (!var) {
- ASSERT(0);
- return EINVAL;
- }
-#endif
- return ethr_atomic_set__(var, i);
-}
-
-int
-ethr_atomic_read(ethr_atomic_t *var, long *i)
-{
-#if ETHR_XCHK
- if (ethr_not_inited) {
- ASSERT(0);
- return EACCES;
- }
- if (!var || !i) {
- ASSERT(0);
- return EINVAL;
- }
-#endif
- return ethr_atomic_read__(var, i);
-}
-
-
-int
-ethr_atomic_addtest(ethr_atomic_t *var, long incr, long *testp)
-{
-#if ETHR_XCHK
- if (ethr_not_inited) {
- ASSERT(0);
- return EACCES;
- }
- if (!var || !testp) {
- ASSERT(0);
- return EINVAL;
- }
-#endif
- return ethr_atomic_addtest__(var, incr, testp);
-}
-
-int
-ethr_atomic_inctest(ethr_atomic_t *incp, long *testp)
-{
-#if ETHR_XCHK
- if (ethr_not_inited) {
- ASSERT(0);
- return EACCES;
- }
- if (!incp || !testp) {
- ASSERT(0);
- return EINVAL;
- }
-#endif
- return ethr_atomic_inctest__(incp, testp);
-}
-
-int
-ethr_atomic_dectest(ethr_atomic_t *decp, long *testp)
-{
-#if ETHR_XCHK
- if (ethr_not_inited) {
- ASSERT(0);
- return EACCES;
- }
- if (!decp || !testp) {
- ASSERT(0);
- return EINVAL;
- }
-#endif
- return ethr_atomic_dectest__(decp, testp);
-}
-
-int
-ethr_atomic_add(ethr_atomic_t *var, long incr)
-{
-#if ETHR_XCHK
- if (ethr_not_inited) {
- ASSERT(0);
- return EACCES;
- }
- if (!var) {
- ASSERT(0);
- return EINVAL;
- }
-#endif
- return ethr_atomic_add__(var, incr);
-}
-
-int
-ethr_atomic_inc(ethr_atomic_t *incp)
-{
-#if ETHR_XCHK
- if (ethr_not_inited) {
- ASSERT(0);
- return EACCES;
- }
- if (!incp) {
- ASSERT(0);
- return EINVAL;
- }
-#endif
- return ethr_atomic_inc__(incp);
-}
-
-int
-ethr_atomic_dec(ethr_atomic_t *decp)
-{
-#if ETHR_XCHK
- if (ethr_not_inited) {
- ASSERT(0);
- return EACCES;
- }
- if (!decp) {
- ASSERT(0);
- return EINVAL;
- }
-#endif
- return ethr_atomic_dec__(decp);
-}
-
-int
-ethr_atomic_and_old(ethr_atomic_t *var, long mask, long *old)
-{
-#if ETHR_XCHK
- if (ethr_not_inited) {
- ASSERT(0);
- return EACCES;
- }
- if (!var || !old) {
- ASSERT(0);
- return EINVAL;
- }
-#endif
- return ethr_atomic_and_old__(var, mask, old);
-}
-
-int
-ethr_atomic_or_old(ethr_atomic_t *var, long mask, long *old)
-{
-#if ETHR_XCHK
- if (ethr_not_inited) {
- ASSERT(0);
- return EACCES;
- }
- if (!var || !old) {
- ASSERT(0);
- return EINVAL;
- }
-#endif
- return ethr_atomic_or_old__(var, mask, old);
-}
-
-int
-ethr_atomic_xchg(ethr_atomic_t *var, long new, long *old)
-{
-#if ETHR_XCHK
- if (ethr_not_inited) {
- ASSERT(0);
- return EACCES;
- }
- if (!var || !old) {
- ASSERT(0);
- return EINVAL;
- }
-#endif
- return ethr_atomic_xchg__(var, new, old);
-}
-
-int
-ethr_atomic_cmpxchg(ethr_atomic_t *var, long new, long expected, long *old)
-{
-#if ETHR_XCHK
- if (ethr_not_inited) {
- ASSERT(0);
- return EACCES;
- }
- if (!var || !old) {
- ASSERT(0);
- return EINVAL;
- }
-#endif
- return ethr_atomic_cmpxchg__(var, new, expected, old);
-}
-
-/* Spinlocks and rwspinlocks */
-
-int
-ethr_spinlock_init(ethr_spinlock_t *lock)
-{
-#if ETHR_XCHK
- if (ethr_not_inited) {
- ASSERT(0);
- return EACCES;
- }
- if (!lock) {
- ASSERT(0);
- return EINVAL;
- }
-#endif
- return ethr_spinlock_init__(lock);
-}
-
-int
-ethr_spinlock_destroy(ethr_spinlock_t *lock)
-{
-#if ETHR_XCHK
- if (ethr_not_inited) {
- ASSERT(0);
- return EACCES;
- }
- if (!lock) {
- ASSERT(0);
- return EINVAL;
- }
-#endif
- return ethr_spinlock_destroy__(lock);
-}
-
-
-int
-ethr_spin_unlock(ethr_spinlock_t *lock)
-{
-#if ETHR_XCHK
- if (ethr_not_inited) {
- ASSERT(0);
- return EACCES;
- }
- if (!lock) {
- ASSERT(0);
- return EINVAL;
- }
-#endif
- return ethr_spin_unlock__(lock);
-}
-
-int
-ethr_spin_lock(ethr_spinlock_t *lock)
-{
-#if ETHR_XCHK
- if (ethr_not_inited) {
- ASSERT(0);
- return EACCES;
- }
- if (!lock) {
- ASSERT(0);
- return EINVAL;
- }
-#endif
- return ethr_spin_lock__(lock);
-}
-
-int
-ethr_rwlock_init(ethr_rwlock_t *lock)
-{
-#if ETHR_XCHK
- if (ethr_not_inited) {
- ASSERT(0);
- return EACCES;
- }
- if (!lock) {
- ASSERT(0);
- return EINVAL;
- }
-#endif
- return ethr_rwlock_init__(lock);
-}
-
-int
-ethr_rwlock_destroy(ethr_rwlock_t *lock)
-{
-#if ETHR_XCHK
- if (ethr_not_inited) {
- ASSERT(0);
- return EACCES;
- }
- if (!lock) {
- ASSERT(0);
- return EINVAL;
- }
-#endif
- return ethr_rwlock_destroy__(lock);
-}
-
-int
-ethr_read_unlock(ethr_rwlock_t *lock)
-{
-#if ETHR_XCHK
- if (ethr_not_inited) {
- ASSERT(0);
- return EACCES;
- }
- if (!lock) {
- ASSERT(0);
- return EINVAL;
- }
-#endif
- return ethr_read_unlock__(lock);
-}
-
-int
-ethr_read_lock(ethr_rwlock_t *lock)
-{
-#if ETHR_XCHK
- if (ethr_not_inited) {
- ASSERT(0);
- return EACCES;
- }
- if (!lock) {
- ASSERT(0);
- return EINVAL;
- }
-#endif
- return ethr_read_lock__(lock);
-}
-
-int
-ethr_write_unlock(ethr_rwlock_t *lock)
-{
-#if ETHR_XCHK
- if (ethr_not_inited) {
- ASSERT(0);
- return EACCES;
- }
- if (!lock) {
- ASSERT(0);
- return EINVAL;
- }
-#endif
- return ethr_write_unlock__(lock);
-}
-
-int
-ethr_write_lock(ethr_rwlock_t *lock)
-{
-#if ETHR_XCHK
- if (ethr_not_inited) {
- ASSERT(0);
- return EACCES;
- }
- if (!lock) {
- ASSERT(0);
- return EINVAL;
- }
-#endif
- return ethr_write_lock__(lock);
-}
-
-
-int
-ethr_gate_init(ethr_gate *gp)
-{
- int res;
-#if ETHR_XCHK
- if (ethr_not_inited) {
- ASSERT(0);
- return EACCES;
- }
- if (!gp) {
- ASSERT(0);
- return EINVAL;
- }
-#endif
- res = ethr_mutex_init(&gp->mtx);
- if (res != 0)
- return res;
- res = ethr_cond_init(&gp->cnd);
- if (res != 0) {
- ethr_mutex_destroy(&gp->mtx);
- return res;
- }
- gp->open = 0;
- return 0;
-}
-
-int
-ethr_gate_destroy(ethr_gate *gp)
-{
- int res, dres;
-#if ETHR_XCHK
- if (ethr_not_inited) {
- ASSERT(0);
- return EACCES;
- }
- if (!gp) {
- ASSERT(0);
- return EINVAL;
- }
-#endif
- res = ethr_mutex_destroy(&gp->mtx);
- dres = ethr_cond_destroy(&gp->cnd);
- if (res == 0)
- res = dres;
- gp->open = 0;
- return res;
-}
-
-int
-ethr_gate_close(ethr_gate *gp)
-{
- int res;
-#if ETHR_XCHK
- if (ethr_not_inited) {
- ASSERT(0);
- return EACCES;
- }
- if (!gp) {
- ASSERT(0);
- return EINVAL;
- }
-#endif
- res = ethr_mutex_lock__(&gp->mtx);
- if (res != 0)
- return res;
- gp->open = 0;
- res = ethr_mutex_unlock__(&gp->mtx);
- return res;
-}
-
-int
-ethr_gate_let_through(ethr_gate *gp, unsigned no)
-{
- int res, ures;
-#if ETHR_XCHK
- if (ethr_not_inited) {
- ASSERT(0);
- return EACCES;
- }
- if (!gp) {
- ASSERT(0);
- return EINVAL;
- }
-#endif
- res = ethr_mutex_lock__(&gp->mtx);
- if (res != 0)
- return res;
- gp->open += no;
- res = (gp->open == 1
- ? ethr_cond_signal(&gp->cnd)
- : ethr_cond_broadcast(&gp->cnd));
- ures = ethr_mutex_unlock__(&gp->mtx);
- if (res != 0)
- res = ures;
- return res;
-}
-
-int
-ethr_gate_swait(ethr_gate *gp, int spincount)
-{
- int res, ures, n;
-#if ETHR_XCHK
- if (ethr_not_inited) {
- ASSERT(0);
- return EACCES;
- }
- if (!gp) {
- ASSERT(0);
- return EINVAL;
- }
-#endif
- n = spincount;
- res = ethr_mutex_lock__(&gp->mtx);
- if (res != 0)
- return res;
- while (n >= 0 && !gp->open) {
- res = ethr_mutex_unlock__(&gp->mtx);
- if (res != 0)
- return res;
- res = ethr_mutex_lock__(&gp->mtx);
- if (res != 0)
- return res;
- n--;
- }
- while (!gp->open) {
- res = ethr_cond_wait(&gp->cnd, &gp->mtx);
- if (res != 0 && res != EINTR)
- goto done;
- }
- gp->open--;
- done:
- ures = ethr_mutex_unlock__(&gp->mtx);
- if (res == 0)
- res = ures;
- return res;
-}
-
-
-int
-ethr_gate_wait(ethr_gate *gp)
-{
- return ethr_gate_swait(gp, 0);
-}
-
-
-/* rwmutex fallback */
-#ifdef ETHR_USE_RWMTX_FALLBACK
-
-int
-ethr_rwmutex_init(ethr_rwmutex *rwmtx)
-{
- int res;
-#if ETHR_XCHK
- if (!rwmtx) {
- ASSERT(0);
- return EINVAL;
- }
-#endif
- res = ethr_mutex_init(&rwmtx->mtx);
- if (res != 0)
- return res;
- ethr_cond_init(&rwmtx->rcnd);
- if (res != 0)
- goto error_cleanup1;
- res = ethr_cond_init(&rwmtx->wcnd);
- if (res != 0)
- goto error_cleanup2;
- rwmtx->readers = 0;
- rwmtx->waiting_readers = 0;
- rwmtx->waiting_writers = 0;
-#if ETHR_XCHK
- rwmtx->initialized = ETHR_RWMUTEX_INITIALIZED;
-#endif
- return 0;
- error_cleanup2:
- ethr_cond_destroy(&rwmtx->rcnd);
- error_cleanup1:
- ethr_mutex_destroy(&rwmtx->mtx);
- return res;
-}
-
-int
-ethr_rwmutex_destroy(ethr_rwmutex *rwmtx)
-{
- int res, pres;
-#if ETHR_XCHK
- if (!rwmtx || rwmtx->initialized != ETHR_RWMUTEX_INITIALIZED) {
- ASSERT(0);
- return EINVAL;
- }
- rwmtx->initialized = 0;
-#endif
- res = ethr_mutex_destroy(&rwmtx->mtx);
- pres = ethr_cond_destroy(&rwmtx->rcnd);
- if (res == 0)
- res = pres;
- pres = ethr_cond_destroy(&rwmtx->wcnd);
- if (res == 0)
- res = pres;
- return res;
-}
-
-int
-ethr_rwmutex_tryrlock(ethr_rwmutex *rwmtx)
-{
- int res;
-#if ETHR_XCHK
- if (!rwmtx || rwmtx->initialized != ETHR_RWMUTEX_INITIALIZED) {
- ASSERT(0);
- return EINVAL;
- }
-#endif
- res = ethr_mutex_trylock__(&rwmtx->mtx);
- if (res != 0)
- return res;
- if (!rwmtx->waiting_writers) {
- res = ethr_mutex_unlock__(&rwmtx->mtx);
- if (res == 0)
- return EBUSY;
- return res;
- }
- rwmtx->readers++;
- return ethr_mutex_unlock__(&rwmtx->mtx);
-}
-
-int
-ethr_rwmutex_rlock(ethr_rwmutex *rwmtx)
-{
- int res;
-#if ETHR_XCHK
- if (!rwmtx || rwmtx->initialized != ETHR_RWMUTEX_INITIALIZED) {
- ASSERT(0);
- return EINVAL;
- }
-#endif
- res = ethr_mutex_lock__(&rwmtx->mtx);
- if (res != 0)
- return res;
- while (rwmtx->waiting_writers) {
- rwmtx->waiting_readers++;
- res = ethr_cond_wait(&rwmtx->rcnd, &rwmtx->mtx);
- rwmtx->waiting_readers--;
- if (res != 0 && res != EINTR) {
- (void) ethr_mutex_unlock__(&rwmtx->mtx);
- return res;
- }
- }
- rwmtx->readers++;
- return ethr_mutex_unlock__(&rwmtx->mtx);
-}
-
-int
-ethr_rwmutex_runlock(ethr_rwmutex *rwmtx)
-{
- int res, ures;
-#if ETHR_XCHK
- if (!rwmtx || rwmtx->initialized != ETHR_RWMUTEX_INITIALIZED) {
- ASSERT(0);
- return EINVAL;
- }
-#endif
- res = ethr_mutex_lock__(&rwmtx->mtx);
- if (res != 0)
- return res;
- rwmtx->readers--;
- if (!rwmtx->readers && rwmtx->waiting_writers)
- res = ethr_cond_signal(&rwmtx->wcnd);
- ures = ethr_mutex_unlock__(&rwmtx->mtx);
- if (res == 0)
- res = ures;
- return res;
-}
-
-int
-ethr_rwmutex_tryrwlock(ethr_rwmutex *rwmtx)
-{
- int res;
-#if ETHR_XCHK
- if (!rwmtx || rwmtx->initialized != ETHR_RWMUTEX_INITIALIZED) {
- ASSERT(0);
- return EINVAL;
- }
-#endif
- res = ethr_mutex_trylock__(&rwmtx->mtx);
- if (res != 0)
- return res;
- if (!rwmtx->readers && !rwmtx->waiting_writers)
- return 0;
- else {
- res = ethr_mutex_unlock__(&rwmtx->mtx);
- if (res == 0)
- return EBUSY;
- return res;
- }
-}
-
-int
-ethr_rwmutex_rwlock(ethr_rwmutex *rwmtx)
-{
- int res;
-#if ETHR_XCHK
- if (!rwmtx || rwmtx->initialized != ETHR_RWMUTEX_INITIALIZED) {
- ASSERT(0);
- return EINVAL;
- }
-#endif
- res = ethr_mutex_lock__(&rwmtx->mtx);
- if (res != 0)
- return res;
- if (!rwmtx->readers && !rwmtx->waiting_writers)
- return 0;
-
- while (rwmtx->readers) {
- rwmtx->waiting_writers++;
- res = ethr_cond_wait(&rwmtx->wcnd, &rwmtx->mtx);
- rwmtx->waiting_writers--;
- if (res != 0 && res != EINTR) {
- (void) ethr_rwmutex_rwunlock(rwmtx);
- return res;
- }
- }
- return 0;
-}
-
-int
-ethr_rwmutex_rwunlock(ethr_rwmutex *rwmtx)
-{
- int res, ures;
-#if ETHR_XCHK
- if (!rwmtx || rwmtx->initialized != ETHR_RWMUTEX_INITIALIZED) {
- ASSERT(0);
- return EINVAL;
- }
-#endif
- res = 0;
- if (rwmtx->waiting_writers)
- res = ethr_cond_signal(&rwmtx->wcnd);
- else if (rwmtx->waiting_readers)
- res = ethr_cond_broadcast(&rwmtx->rcnd);
- ures = ethr_mutex_unlock__(&rwmtx->mtx);
- if (res == 0)
- res = ures;
- return res;
-}
-
-#endif /* #ifdef ETHR_USE_RWMTX_FALLBACK */
-
-void
-ethr_compiler_barrier(void)
-{
-
-}
-
-#ifdef DEBUG
-
-#include <stdio.h>
-int ethr_assert_failed(char *f, int l, char *a)
-{
- fprintf(stderr, "%s:%d: Assertion failed: %s\n", f, l, a);
- abort();
- return 0;
-}
-
-#endif
-
-
diff --git a/erts/lib_src/pthread/ethr_event.c b/erts/lib_src/pthread/ethr_event.c
new file mode 100644
index 0000000000..6731c0eb46
--- /dev/null
+++ b/erts/lib_src/pthread/ethr_event.c
@@ -0,0 +1,219 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2009-2010. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+/*
+ * Author: Rickard Green
+ */
+
+#define ETHR_INLINE_FUNC_NAME_(X) X ## __
+#define ETHR_EVENT_IMPL__
+
+#include "ethread.h"
+
+#if defined(ETHR_LINUX_FUTEX_IMPL__)
+/* --- Linux futex implementation of ethread events ------------------------- */
+
+#include <sched.h>
+#include <errno.h>
+
+#define ETHR_YIELD_AFTER_BUSY_LOOPS 50
+
+int
+ethr_event_init(ethr_event *e)
+{
+ ethr_atomic_init(&e->futex, ETHR_EVENT_OFF__);
+ return 0;
+}
+
+int
+ethr_event_destroy(ethr_event *e)
+{
+ return 0;
+}
+
+static ETHR_INLINE int
+wait__(ethr_event *e, int spincount)
+{
+ unsigned sc = spincount;
+ int res;
+ long val;
+ int until_yield = ETHR_YIELD_AFTER_BUSY_LOOPS;
+
+ if (spincount < 0)
+ ETHR_FATAL_ERROR__(EINVAL);
+
+ while (1) {
+ while (1) {
+ val = ethr_atomic_read(&e->futex);
+ if (val == ETHR_EVENT_ON__)
+ return 0;
+ if (sc == 0)
+ break;
+ sc--;
+ ETHR_SPIN_BODY;
+ if (--until_yield == 0) {
+ until_yield = ETHR_YIELD_AFTER_BUSY_LOOPS;
+ res = ETHR_YIELD();
+ if (res != 0)
+ ETHR_FATAL_ERROR__(res);
+ }
+ }
+
+ if (val != ETHR_EVENT_OFF_WAITER__) {
+ val = ethr_atomic_cmpxchg(&e->futex,
+ ETHR_EVENT_OFF_WAITER__,
+ ETHR_EVENT_OFF__);
+
+ if (val == ETHR_EVENT_ON__)
+ return 0;
+ ETHR_ASSERT(val == ETHR_EVENT_OFF__);
+ }
+
+ res = ETHR_FUTEX__(&e->futex, ETHR_FUTEX_WAIT__, ETHR_EVENT_OFF_WAITER__);
+ if (res == EINTR)
+ break;
+ if (res != 0 && res != EWOULDBLOCK)
+ ETHR_FATAL_ERROR__(res);
+ }
+
+ return res;
+}
+
+#elif defined(ETHR_PTHREADS)
+/* --- Posix mutex/cond implementation of events ---------------------------- */
+
+int
+ethr_event_init(ethr_event *e)
+{
+ int res;
+ ethr_atomic_init(&e->state, ETHR_EVENT_OFF__);
+ res = pthread_mutex_init(&e->mtx, NULL);
+ if (res != 0)
+ return res;
+ res = pthread_cond_init(&e->cnd, NULL);
+ if (res != 0) {
+ pthread_mutex_destroy(&e->mtx);
+ return res;
+ }
+ return 0;
+}
+
+int
+ethr_event_destroy(ethr_event *e)
+{
+ int res;
+ res = pthread_mutex_destroy(&e->mtx);
+ if (res != 0)
+ return res;
+ res = pthread_cond_destroy(&e->cnd);
+ if (res != 0)
+ return res;
+ return 0;
+}
+
+static ETHR_INLINE int
+wait__(ethr_event *e, int spincount)
+{
+ int sc = spincount;
+ long val;
+ int res, ulres;
+ int until_yield = ETHR_YIELD_AFTER_BUSY_LOOPS;
+
+ if (spincount < 0)
+ ETHR_FATAL_ERROR__(EINVAL);
+
+ while (1) {
+ val = ethr_atomic_read(&e->state);
+ if (val == ETHR_EVENT_ON__)
+ return 0;
+ if (sc == 0)
+ break;
+ sc--;
+ ETHR_SPIN_BODY;
+ if (--until_yield == 0) {
+ until_yield = ETHR_YIELD_AFTER_BUSY_LOOPS;
+ res = ETHR_YIELD();
+ if (res != 0)
+ ETHR_FATAL_ERROR__(res);
+ }
+ }
+
+ if (val != ETHR_EVENT_OFF_WAITER__) {
+ val = ethr_atomic_cmpxchg(&e->state,
+ ETHR_EVENT_OFF_WAITER__,
+ ETHR_EVENT_OFF__);
+ if (val == ETHR_EVENT_ON__)
+ return 0;
+ ETHR_ASSERT(val == ETHR_EVENT_OFF__);
+ }
+
+ ETHR_ASSERT(val == ETHR_EVENT_OFF_WAITER__
+ || val == ETHR_EVENT_OFF__);
+
+ res = pthread_mutex_lock(&e->mtx);
+ if (res != 0)
+ ETHR_FATAL_ERROR__(res);
+
+ while (1) {
+
+ val = ethr_atomic_read(&e->state);
+ if (val == ETHR_EVENT_ON__)
+ break;
+
+ res = pthread_cond_wait(&e->cnd, &e->mtx);
+ if (res == EINTR)
+ break;
+ if (res != 0)
+ ETHR_FATAL_ERROR__(res);
+ }
+
+ ulres = pthread_mutex_unlock(&e->mtx);
+ if (ulres != 0)
+ ETHR_FATAL_ERROR__(ulres);
+
+ return res; /* 0 || EINTR */
+}
+
+#else
+#error No ethread event implementation
+#endif
+
+void
+ethr_event_reset(ethr_event *e)
+{
+ ethr_event_reset__(e);
+}
+
+void
+ethr_event_set(ethr_event *e)
+{
+ ethr_event_set__(e);
+}
+
+int
+ethr_event_wait(ethr_event *e)
+{
+ return wait__(e, 0);
+}
+
+int
+ethr_event_swait(ethr_event *e, int spincount)
+{
+ return wait__(e, spincount);
+}
diff --git a/erts/lib_src/pthread/ethread.c b/erts/lib_src/pthread/ethread.c
new file mode 100644
index 0000000000..ea1d9d43f0
--- /dev/null
+++ b/erts/lib_src/pthread/ethread.c
@@ -0,0 +1,477 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2010. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+/*
+ * Description: Pthread implementation of the ethread library
+ * Author: Rickard Green
+ */
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#define ETHR_CHILD_WAIT_SPIN_COUNT 4000
+
+#include <stdio.h>
+#ifdef ETHR_TIME_WITH_SYS_TIME
+# include <time.h>
+# include <sys/time.h>
+#else
+# ifdef ETHR_HAVE_SYS_TIME_H
+# include <sys/time.h>
+# else
+# include <time.h>
+# endif
+#endif
+#include <sys/types.h>
+#include <unistd.h>
+#include <signal.h>
+
+#include <limits.h>
+
+#define ETHR_INLINE_FUNC_NAME_(X) X ## __
+#define ETHREAD_IMPL__
+
+#include "ethread.h"
+#include "ethr_internal.h"
+
+#ifndef ETHR_HAVE_ETHREAD_DEFINES
+#error Missing configure defines
+#endif
+
+pthread_key_t ethr_ts_event_key__;
+static int child_wait_spin_count;
+
+/*
+ * --------------------------------------------------------------------------
+ * Static functions
+ * --------------------------------------------------------------------------
+ */
+
+static void thr_exit_cleanup(void)
+{
+ ethr_run_exit_handlers__();
+}
+
+
+/* Argument passed to thr_wrapper() */
+typedef struct {
+ ethr_atomic_t result;
+ ethr_ts_event *tse;
+ void *(*thr_func)(void *);
+ void *arg;
+ void *prep_func_res;
+} ethr_thr_wrap_data__;
+
+static void *thr_wrapper(void *vtwd)
+{
+ long result;
+ void *res;
+ ethr_thr_wrap_data__ *twd = (ethr_thr_wrap_data__ *) vtwd;
+ void *(*thr_func)(void *) = twd->thr_func;
+ void *arg = twd->arg;
+ ethr_ts_event *tsep = NULL;
+
+ result = (long) ethr_make_ts_event__(&tsep);
+
+ if (result == 0) {
+ tsep->iflgs |= ETHR_TS_EV_ETHREAD;
+ if (ethr_thr_child_func__)
+ ethr_thr_child_func__(twd->prep_func_res);
+ }
+
+ tsep = twd->tse; /* We aren't allowed to follow twd after
+ result has been set! */
+
+ ethr_atomic_set(&twd->result, result);
+
+ ethr_event_set(&tsep->event);
+
+ res = result == 0 ? (*thr_func)(arg) : NULL;
+
+ thr_exit_cleanup();
+ return res;
+}
+
+/* internal exports */
+
+int ethr_set_tse__(ethr_ts_event *tsep)
+{
+ return pthread_setspecific(ethr_ts_event_key__, (void *) tsep);
+}
+
+ethr_ts_event *ethr_get_tse__(void)
+{
+ return pthread_getspecific(ethr_ts_event_key__);
+}
+
+/*
+ * --------------------------------------------------------------------------
+ * Exported functions
+ * --------------------------------------------------------------------------
+ */
+
+int
+ethr_init(ethr_init_data *id)
+{
+ int res;
+
+ if (!ethr_not_inited__)
+ return EINVAL;
+
+ ethr_not_inited__ = 0;
+
+ res = ethr_init_common__(id);
+ if (res != 0)
+ goto error;
+
+ child_wait_spin_count = ETHR_CHILD_WAIT_SPIN_COUNT;
+ if (erts_get_cpu_configured(ethr_cpu_info__) == 1)
+ child_wait_spin_count = 0;
+
+ res = pthread_key_create(&ethr_ts_event_key__, ethr_ts_event_destructor__);
+
+ return 0;
+ error:
+ ethr_not_inited__ = 1;
+ return res;
+
+}
+
+int
+ethr_late_init(ethr_late_init_data *id)
+{
+ int res = ethr_late_init_common__(id);
+ if (res != 0)
+ return res;
+ ethr_not_completely_inited__ = 0;
+ return res;
+}
+
+int
+ethr_thr_create(ethr_tid *tid, void * (*func)(void *), void *arg,
+ ethr_thr_opts *opts)
+{
+ ethr_thr_wrap_data__ twd;
+ pthread_attr_t attr;
+ int res, dres;
+ int use_stack_size = (opts && opts->suggested_stack_size >= 0
+ ? opts->suggested_stack_size
+ : -1 /* Use system default */);
+
+#ifdef ETHR_MODIFIED_DEFAULT_STACK_SIZE
+ if (use_stack_size < 0)
+ use_stack_size = ETHR_MODIFIED_DEFAULT_STACK_SIZE;
+#endif
+
+#if ETHR_XCHK
+ if (ethr_not_completely_inited__) {
+ ETHR_ASSERT(0);
+ return EACCES;
+ }
+ if (!tid || !func) {
+ ETHR_ASSERT(0);
+ return EINVAL;
+ }
+#endif
+
+ ethr_atomic_init(&twd.result, -1);
+ twd.tse = ethr_get_ts_event();
+ twd.thr_func = func;
+ twd.arg = arg;
+
+ res = pthread_attr_init(&attr);
+ if (res != 0)
+ return res;
+
+ /* Error cleanup needed after this point */
+
+ /* Schedule child thread in system scope (if possible) ... */
+ res = pthread_attr_setscope(&attr, PTHREAD_SCOPE_SYSTEM);
+ if (res != 0 && res != ENOTSUP)
+ goto error;
+
+ if (use_stack_size >= 0) {
+ size_t suggested_stack_size = (size_t) use_stack_size;
+ size_t stack_size;
+#ifdef ETHR_DEBUG
+ suggested_stack_size /= 2; /* Make sure we got margin */
+#endif
+#ifdef ETHR_STACK_GUARD_SIZE
+ /* The guard is at least on some platforms included in the stack size
+ passed when creating threads */
+ suggested_stack_size += ETHR_B2KW(ETHR_STACK_GUARD_SIZE);
+#endif
+ if (suggested_stack_size < ethr_min_stack_size__)
+ stack_size = ETHR_KW2B(ethr_min_stack_size__);
+ else if (suggested_stack_size > ethr_max_stack_size__)
+ stack_size = ETHR_KW2B(ethr_max_stack_size__);
+ else
+ stack_size = ETHR_PAGE_ALIGN(ETHR_KW2B(suggested_stack_size));
+ (void) pthread_attr_setstacksize(&attr, stack_size);
+ }
+
+#ifdef ETHR_STACK_GUARD_SIZE
+ (void) pthread_attr_setguardsize(&attr, ETHR_STACK_GUARD_SIZE);
+#endif
+
+ /* Detached or joinable... */
+ res = pthread_attr_setdetachstate(&attr,
+ (opts && opts->detached
+ ? PTHREAD_CREATE_DETACHED
+ : PTHREAD_CREATE_JOINABLE));
+ if (res != 0)
+ goto error;
+
+ /* Call prepare func if it exist */
+ if (ethr_thr_prepare_func__)
+ twd.prep_func_res = ethr_thr_prepare_func__();
+ else
+ twd.prep_func_res = NULL;
+
+ res = pthread_create((pthread_t *) tid, &attr, thr_wrapper, (void*) &twd);
+
+ if (res == 0) {
+ int spin_count = child_wait_spin_count;
+
+ /* Wait for child to initialize... */
+ while (1) {
+ long result;
+ ethr_event_reset(&twd.tse->event);
+
+ result = ethr_atomic_read(&twd.result);
+ if (result == 0)
+ break;
+
+ if (result > 0) {
+ res = (int) result;
+ goto error;
+ }
+
+ res = ethr_event_swait(&twd.tse->event, spin_count);
+ if (res != 0 && res != EINTR)
+ goto error;
+ spin_count = 0;
+ }
+ }
+
+ /* Cleanup... */
+
+ error:
+ dres = pthread_attr_destroy(&attr);
+ if (res == 0)
+ res = dres;
+ if (ethr_thr_parent_func__)
+ ethr_thr_parent_func__(twd.prep_func_res);
+ return res;
+}
+
+int
+ethr_thr_join(ethr_tid tid, void **res)
+{
+#if ETHR_XCHK
+ if (ethr_not_inited__) {
+ ETHR_ASSERT(0);
+ return EACCES;
+ }
+#endif
+ return pthread_join((pthread_t) tid, res);
+}
+
+int
+ethr_thr_detach(ethr_tid tid)
+{
+#if ETHR_XCHK
+ if (ethr_not_inited__) {
+ ETHR_ASSERT(0);
+ return EACCES;
+ }
+#endif
+ return pthread_detach((pthread_t) tid);
+}
+
+void
+ethr_thr_exit(void *res)
+{
+#if ETHR_XCHK
+ if (ethr_not_inited__) {
+ ETHR_ASSERT(0);
+ return;
+ }
+#endif
+ thr_exit_cleanup();
+ pthread_exit(res);
+}
+
+ethr_tid
+ethr_self(void)
+{
+ return (ethr_tid) pthread_self();
+}
+
+int
+ethr_equal_tids(ethr_tid tid1, ethr_tid tid2)
+{
+ return pthread_equal((pthread_t) tid1, (pthread_t) tid2);
+}
+
+
+/*
+ * Thread specific events
+ */
+
+ethr_ts_event *
+ethr_get_ts_event(void)
+{
+ return ethr_get_ts_event__();
+}
+
+void
+ethr_leave_ts_event(ethr_ts_event *tsep)
+{
+ ethr_leave_ts_event__(tsep);
+}
+
+/*
+ * Current time
+ */
+
+int
+ethr_time_now(ethr_timeval *time)
+{
+ int res;
+ struct timeval tv;
+#if ETHR_XCHK
+ if (ethr_not_inited__) {
+ ETHR_ASSERT(0);
+ return EACCES;
+ }
+ if (!time) {
+ ETHR_ASSERT(0);
+ return EINVAL;
+ }
+#endif
+
+ res = gettimeofday(&tv, NULL);
+ time->tv_sec = (long) tv.tv_sec;
+ time->tv_nsec = ((long) tv.tv_usec)*1000;
+ return res;
+}
+
+/*
+ * Thread specific data
+ */
+
+int
+ethr_tsd_key_create(ethr_tsd_key *keyp)
+{
+#if ETHR_XCHK
+ if (ethr_not_inited__) {
+ ETHR_ASSERT(0);
+ return EACCES;
+ }
+ if (!keyp) {
+ ETHR_ASSERT(0);
+ return EINVAL;
+ }
+#endif
+ return pthread_key_create((pthread_key_t *) keyp, NULL);
+}
+
+int
+ethr_tsd_key_delete(ethr_tsd_key key)
+{
+#if ETHR_XCHK
+ if (ethr_not_inited__) {
+ ETHR_ASSERT(0);
+ return EACCES;
+ }
+#endif
+ return pthread_key_delete((pthread_key_t) key);
+}
+
+int
+ethr_tsd_set(ethr_tsd_key key, void *value)
+{
+#if ETHR_XCHK
+ if (ethr_not_inited__) {
+ ETHR_ASSERT(0);
+ return EACCES;
+ }
+#endif
+ return pthread_setspecific((pthread_key_t) key, value);
+}
+
+void *
+ethr_tsd_get(ethr_tsd_key key)
+{
+#if ETHR_XCHK
+ if (ethr_not_inited__) {
+ ETHR_ASSERT(0);
+ return NULL;
+ }
+#endif
+ return pthread_getspecific((pthread_key_t) key);
+}
+
+/*
+ * Signal functions
+ */
+
+#if ETHR_HAVE_ETHR_SIG_FUNCS
+
+int ethr_sigmask(int how, const sigset_t *set, sigset_t *oset)
+{
+#if ETHR_XCHK
+ if (ethr_not_inited__) {
+ ETHR_ASSERT(0);
+ return EACCES;
+ }
+ if (!set && !oset) {
+ ETHR_ASSERT(0);
+ return EINVAL;
+ }
+#endif
+ return pthread_sigmask(how, set, oset);
+}
+
+int ethr_sigwait(const sigset_t *set, int *sig)
+{
+#if ETHR_XCHK
+ if (ethr_not_inited__) {
+ ETHR_ASSERT(0);
+ return EACCES;
+ }
+ if (!set || !sig) {
+ ETHR_ASSERT(0);
+ return EINVAL;
+ }
+#endif
+ if (sigwait(set, sig) < 0)
+ return errno;
+ return 0;
+}
+
+#endif /* #if ETHR_HAVE_ETHR_SIG_FUNCS */
+
+ETHR_IMPL_NORETURN__
+ethr_abort__(void)
+{
+ abort();
+}
diff --git a/erts/lib_src/win/ethr_event.c b/erts/lib_src/win/ethr_event.c
new file mode 100644
index 0000000000..ddb4780ff1
--- /dev/null
+++ b/erts/lib_src/win/ethr_event.c
@@ -0,0 +1,120 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2009-2010. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+/*
+ * Author: Rickard Green
+ */
+
+#define ETHR_INLINE_FUNC_NAME_(X) X ## __
+#define ETHR_EVENT_IMPL__
+
+#include "ethread.h"
+
+/* --- Windows implementation of thread events ------------------------------ */
+
+int
+ethr_event_init(ethr_event *e)
+{
+ e->state = ETHR_EVENT_OFF__;
+ e->handle = CreateEvent(NULL, FALSE, FALSE, NULL);
+ if (e->handle == INVALID_HANDLE_VALUE)
+ return ethr_win_get_errno__();
+ return 0;
+}
+
+int
+ethr_event_destroy(ethr_event *e)
+{
+ BOOL res = CloseHandle(e->handle);
+ return res == 0 ? ethr_win_get_errno__() : 0;
+}
+
+void
+ethr_event_set(ethr_event *e)
+{
+ ethr_event_set__(e);
+}
+
+void
+ethr_event_reset(ethr_event *e)
+{
+ ethr_event_reset__(e);
+}
+
+static ETHR_INLINE int
+wait(ethr_event *e, int spincount)
+{
+ LONG state;
+ DWORD code;
+ int sc, res, until_yield = ETHR_YIELD_AFTER_BUSY_LOOPS;
+
+ if (spincount < 0)
+ ETHR_FATAL_ERROR__(EINVAL);
+
+ sc = spincount;
+
+ while (1) {
+ long on;
+ while (1) {
+#if ETHR_IMMED_ATOMIC_SET_GET_SAFE__
+ state = e->state;
+#else
+ state = InterlockedExchangeAdd(&e->state, (LONG) 0);
+#endif
+ if (state == ETHR_EVENT_ON__)
+ return 0;
+ if (sc == 0)
+ break;
+ sc--;
+ ETHR_SPIN_BODY;
+ if (--until_yield == 0) {
+ until_yield = ETHR_YIELD_AFTER_BUSY_LOOPS;
+ res = ETHR_YIELD();
+ if (res != 0)
+ ETHR_FATAL_ERROR__(res);
+ }
+ }
+
+ if (state != ETHR_EVENT_OFF_WAITER__) {
+ state = _InterlockedCompareExchange(&e->state,
+ ETHR_EVENT_OFF_WAITER__,
+ ETHR_EVENT_OFF__);
+ if (state == ETHR_EVENT_ON__)
+ return 0;
+ ETHR_ASSERT(state == ETHR_EVENT_OFF__);
+ }
+
+ code = WaitForSingleObject(e->handle, INFINITE);
+ if (code != WAIT_OBJECT_0)
+ ETHR_FATAL_ERROR__(ethr_win_get_errno__());
+ }
+
+}
+
+int
+ethr_event_wait(ethr_event *e)
+{
+ return wait(e, 0);
+}
+
+int
+ethr_event_swait(ethr_event *e, int spincount)
+{
+ return wait(e, spincount);
+}
diff --git a/erts/lib_src/win/ethread.c b/erts/lib_src/win/ethread.c
new file mode 100644
index 0000000000..69523edf94
--- /dev/null
+++ b/erts/lib_src/win/ethread.c
@@ -0,0 +1,625 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2010. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+/*
+ * Description: Windows native threads implementation of the ethread library
+ * Author: Rickard Green
+ */
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#define ETHR_CHILD_WAIT_SPIN_COUNT 4000
+
+#undef WIN32_LEAN_AND_MEAN
+#define WIN32_LEAN_AND_MEAN
+#include <windows.h>
+#include <process.h>
+#include <winerror.h>
+#include <stdio.h>
+#include <limits.h>
+
+#define ETHR_INLINE_FUNC_NAME_(X) X ## __
+#define ETHREAD_IMPL__
+
+#include "ethread.h"
+#include "ethr_internal.h"
+
+#ifndef ETHR_HAVE_ETHREAD_DEFINES
+#error Missing configure defines
+#endif
+
+/* Argument passed to thr_wrapper() */
+typedef struct {
+ ethr_tid *tid;
+ ethr_atomic_t result;
+ ethr_ts_event *tse;
+ void *(*thr_func)(void *);
+ void *arg;
+ void *prep_func_res;
+} ethr_thr_wrap_data__;
+
+#define ETHR_INVALID_TID_ID -1
+
+struct ethr_join_data_ {
+ HANDLE handle;
+ void *res;
+};
+
+static ethr_atomic_t thread_id_counter;
+static DWORD own_tid_key;
+static ethr_tid main_thr_tid;
+static int child_wait_spin_count;
+
+DWORD ethr_ts_event_key__;
+
+#define ETHR_GET_OWN_TID__ ((ethr_tid *) TlsGetValue(own_tid_key))
+
+/*
+ * --------------------------------------------------------------------------
+ * Static functions
+ * --------------------------------------------------------------------------
+ */
+
+static void thr_exit_cleanup(ethr_tid *tid, void *res)
+{
+
+ ETHR_ASSERT(tid == ETHR_GET_OWN_TID__);
+
+ if (tid->jdata)
+ tid->jdata->res = res;
+
+ ethr_run_exit_handlers__();
+ ethr_ts_event_destructor__((void *) ethr_get_tse__());
+}
+
+static unsigned __stdcall thr_wrapper(LPVOID vtwd)
+{
+ ethr_tid my_tid;
+ long result;
+ void *res;
+ ethr_thr_wrap_data__ *twd = (ethr_thr_wrap_data__ *) vtwd;
+ void *(*thr_func)(void *) = twd->thr_func;
+ void *arg = twd->arg;
+ ethr_ts_event *tsep = NULL;
+
+ result = (long) ethr_make_ts_event__(&tsep);
+
+ if (result == 0) {
+ tsep->iflgs |= ETHR_TS_EV_ETHREAD;
+ my_tid = *twd->tid;
+ if (!TlsSetValue(own_tid_key, (LPVOID) &my_tid)) {
+ result = (long) ethr_win_get_errno__();
+ ethr_free_ts_event__(tsep);
+ }
+ else {
+ if (ethr_thr_child_func__)
+ ethr_thr_child_func__(twd->prep_func_res);
+ }
+ }
+
+ tsep = twd->tse; /* We aren't allowed to follow twd after
+ result has been set! */
+
+ ethr_atomic_set(&twd->result, result);
+
+ ethr_event_set(&tsep->event);
+
+ res = result == 0 ? (*thr_func)(arg) : NULL;
+
+ thr_exit_cleanup(&my_tid, res);
+ return 0;
+}
+
+#ifdef __GNUC__
+#define LL_LITERAL(X) X##LL
+#else
+#define LL_LITERAL(X) X##i64
+#endif
+
+#define EPOCH_JULIAN_DIFF LL_LITERAL(11644473600)
+
+static ETHR_INLINE void
+get_curr_time(long *sec, long *nsec)
+{
+ SYSTEMTIME t;
+ FILETIME ft;
+ LONGLONG lft;
+
+ GetSystemTime(&t);
+ SystemTimeToFileTime(&t, &ft);
+ memcpy(&lft, &ft, sizeof(lft));
+ *nsec = ((long) (lft % LL_LITERAL(10000000)))*100;
+ *sec = (long) ((lft / LL_LITERAL(10000000)) - EPOCH_JULIAN_DIFF);
+}
+
+/* internal exports */
+
+int
+ethr_win_get_errno__(void)
+{
+ return erts_get_last_win_errno();
+}
+
+int ethr_set_tse__(ethr_ts_event *tsep)
+{
+ return (TlsSetValue(ethr_ts_event_key__, (LPVOID) tsep)
+ ? 0
+ : ethr_win_get_errno__());
+}
+
+ethr_ts_event *ethr_get_tse__(void)
+{
+ return (ethr_ts_event *) TlsGetValue(ethr_ts_event_key__);
+}
+
+ETHR_IMPL_NORETURN__
+ethr_abort__(void)
+{
+#if 1
+ DebugBreak();
+#else
+ abort();
+#endif
+}
+
+/*
+ * ----------------------------------------------------------------------------
+ * Exported functions
+ * ----------------------------------------------------------------------------
+ */
+
+int
+ethr_init(ethr_init_data *id)
+{
+#ifdef _WIN32_WINNT
+ DWORD major = (_WIN32_WINNT >> 8) & 0xff;
+ DWORD minor = _WIN32_WINNT & 0xff;
+ OSVERSIONINFO os_version;
+#endif
+ int err = 0;
+ unsigned long i;
+
+ if (!ethr_not_inited__)
+ return EINVAL;
+
+#ifdef _WIN32_WINNT
+ os_version.dwOSVersionInfoSize = sizeof(OSVERSIONINFO);
+ GetVersionEx(&os_version);
+ if (os_version.dwPlatformId != VER_PLATFORM_WIN32_NT
+ || os_version.dwMajorVersion < major
+ || (os_version.dwMajorVersion == major
+ && os_version.dwMinorVersion < minor))
+ return ENOTSUP;
+#endif
+ err = ethr_init_common__(id);
+ if (err)
+ goto error;
+
+ own_tid_key = TlsAlloc();
+ if (own_tid_key == TLS_OUT_OF_INDEXES)
+ goto error;
+
+ ethr_atomic_init(&thread_id_counter, 0);
+
+ main_thr_tid.id = 0;
+ main_thr_tid.jdata = NULL;
+
+ if (!TlsSetValue(own_tid_key, (LPVOID) &main_thr_tid))
+ goto error;
+
+ ETHR_ASSERT(&main_thr_tid == ETHR_GET_OWN_TID__);
+
+ ethr_ts_event_key__ = TlsAlloc();
+ if (ethr_ts_event_key__ == TLS_OUT_OF_INDEXES)
+ goto error;
+
+ child_wait_spin_count = ETHR_CHILD_WAIT_SPIN_COUNT;
+ if (erts_get_cpu_configured(ethr_cpu_info__) == 1)
+ child_wait_spin_count = 0;
+
+ ethr_not_inited__ = 0;
+
+ return 0;
+
+ error:
+ ethr_not_inited__ = 1;
+ if (err == 0)
+ err = ethr_win_get_errno__();
+ ETHR_ASSERT(err != 0);
+ return err;
+}
+
+int
+ethr_late_init(ethr_late_init_data *id)
+{
+ int res = ethr_late_init_common__(id);
+ if (res != 0)
+ return res;
+ ethr_not_completely_inited__ = 0;
+ return res;
+}
+
+
+/*
+ * Thread functions.
+ */
+
+int
+ethr_thr_create(ethr_tid *tid, void * (*func)(void *), void *arg,
+ ethr_thr_opts *opts)
+{
+ HANDLE handle = INVALID_HANDLE_VALUE;
+ int err = 0;
+ ethr_thr_wrap_data__ twd;
+ DWORD code;
+ unsigned ID;
+ unsigned stack_size = 0; /* 0 = system default */
+ int use_stack_size = (opts && opts->suggested_stack_size >= 0
+ ? opts->suggested_stack_size
+ : -1 /* Use system default */);
+
+#ifdef ETHR_MODIFIED_DEFAULT_STACK_SIZE
+ if (use_stack_size < 0)
+ use_stack_size = ETHR_MODIFIED_DEFAULT_STACK_SIZE;
+#endif
+
+#if ETHR_XCHK
+ if (ethr_not_completely_inited__) {
+ ETHR_ASSERT(0);
+ return EACCES;
+ }
+ if (!tid || !func) {
+ ETHR_ASSERT(0);
+ return EINVAL;
+ }
+#endif
+
+ do {
+ tid->id = ethr_atomic_inc_read(&thread_id_counter);
+ } while (tid->id == ETHR_INVALID_TID_ID);
+
+ if (opts && opts->detached)
+ tid->jdata = NULL;
+ else {
+ tid->jdata = ethr_mem__.std.alloc(sizeof(struct ethr_join_data_));
+ if (!tid->jdata)
+ return ENOMEM;
+ tid->jdata->handle = INVALID_HANDLE_VALUE;
+ tid->jdata->res = NULL;
+ }
+
+ if (use_stack_size >= 0) {
+ size_t suggested_stack_size = (size_t) use_stack_size;
+#ifdef ETHR_DEBUG
+ suggested_stack_size /= 2; /* Make sure we got margin */
+#endif
+ if (suggested_stack_size < ethr_min_stack_size__)
+ stack_size = (unsigned) ETHR_KW2B(ethr_min_stack_size__);
+ else if (suggested_stack_size > ethr_max_stack_size__)
+ stack_size = (unsigned) ETHR_KW2B(ethr_max_stack_size__);
+ else
+ stack_size = (unsigned)
+ ETHR_PAGE_ALIGN(ETHR_KW2B(suggested_stack_size));
+ }
+
+ ethr_atomic_init(&twd.result, -1);
+
+ twd.tid = tid;
+ twd.thr_func = func;
+ twd.arg = arg;
+ twd.tse = ethr_get_ts_event();
+
+ /* Call prepare func if it exist */
+ if (ethr_thr_prepare_func__)
+ twd.prep_func_res = ethr_thr_prepare_func__();
+ else
+ twd.prep_func_res = NULL;
+
+ /* spawn the thr_wrapper function */
+ handle = (HANDLE) _beginthreadex(NULL, stack_size, thr_wrapper,
+ (LPVOID) &twd, 0, &ID);
+ if (handle == (HANDLE) 0) {
+ handle = INVALID_HANDLE_VALUE;
+ goto error;
+ }
+ else {
+ int spin_count = child_wait_spin_count;
+
+ ETHR_ASSERT(handle != INVALID_HANDLE_VALUE);
+
+ if (!tid->jdata)
+ CloseHandle(handle);
+ else
+ tid->jdata->handle = handle;
+
+ /* Wait for child to initialize... */
+ while (1) {
+ long result;
+ int err;
+ ethr_event_reset(&twd.tse->event);
+
+ result = ethr_atomic_read(&twd.result);
+ if (result == 0)
+ break;
+
+ if (result > 0) {
+ err = (int) result;
+ goto error;
+ }
+
+ err = ethr_event_swait(&twd.tse->event, spin_count);
+ if (err && err != EINTR)
+ goto error;
+ spin_count = 0;
+ }
+ }
+
+ if (ethr_thr_parent_func__)
+ ethr_thr_parent_func__(twd.prep_func_res);
+
+ if (twd.tse)
+ ethr_leave_ts_event(twd.tse);
+
+ return 0;
+
+ error:
+
+ if (err == 0)
+ err = ethr_win_get_errno__();
+ ETHR_ASSERT(err != 0);
+
+ if (ethr_thr_parent_func__)
+ ethr_thr_parent_func__(twd.prep_func_res);
+
+ if (handle != INVALID_HANDLE_VALUE) {
+ WaitForSingleObject(handle, INFINITE);
+ CloseHandle(handle);
+ }
+
+ if (tid->jdata) {
+ ethr_mem__.std.free(tid->jdata);
+ tid->jdata = NULL;
+ }
+
+ tid->id = ETHR_INVALID_TID_ID;
+
+ if (twd.tse)
+ ethr_leave_ts_event(twd.tse);
+
+ return err;
+}
+
+int ethr_thr_join(ethr_tid tid, void **res)
+{
+ DWORD code;
+
+#if ETHR_XCHK
+ if (ethr_not_inited__) {
+ ETHR_ASSERT(0);
+ return EACCES;
+ }
+#endif
+
+ if (tid.id == ETHR_INVALID_TID_ID || !tid.jdata)
+ return EINVAL;
+
+ /* Wait for thread to terminate */
+ code = WaitForSingleObject(tid.jdata->handle, INFINITE);
+ if (code != WAIT_OBJECT_0)
+ return ethr_win_get_errno__();
+
+ CloseHandle(tid.jdata->handle);
+ tid.jdata->handle = INVALID_HANDLE_VALUE;
+
+ if (res)
+ *res = tid.jdata->res;
+
+ /*
+ * User better not try to join or detach again; or
+ * bad things will happen... (users responsibility)
+ */
+
+ ethr_mem__.std.free(tid.jdata);
+
+ return 0;
+}
+
+
+int
+ethr_thr_detach(ethr_tid tid)
+{
+#if ETHR_XCHK
+ if (ethr_not_inited__) {
+ ETHR_ASSERT(0);
+ return EACCES;
+ }
+#endif
+
+ if (tid.id == ETHR_INVALID_TID_ID || !tid.jdata)
+ return EINVAL;
+
+ CloseHandle(tid.jdata->handle);
+ tid.jdata->handle = INVALID_HANDLE_VALUE;
+
+ /*
+ * User better not try to join or detach again; or
+ * bad things will happen... (users responsibility)
+ */
+
+ ethr_mem__.std.free(tid.jdata);
+
+ return 0;
+}
+
+
+void
+ethr_thr_exit(void *res)
+{
+ ethr_tid *tid;
+#if ETHR_XCHK
+ if (ethr_not_inited__) {
+ ETHR_ASSERT(0);
+ return;
+ }
+#endif
+ tid = ETHR_GET_OWN_TID__;
+ if (!tid) {
+ ETHR_ASSERT(0);
+ _endthreadex((unsigned) 0);
+ }
+ thr_exit_cleanup(tid, res);
+ _endthreadex((unsigned) 0);
+}
+
+ethr_tid
+ethr_self(void)
+{
+ ethr_tid *tid;
+#if ETHR_XCHK
+ if (ethr_not_inited__) {
+ ethr_tid dummy_tid = {ETHR_INVALID_TID_ID, NULL};
+ ETHR_ASSERT(0);
+ return dummy_tid;
+ }
+#endif
+ /* It is okay for non-ethreads (i.e. native win32 threads) to call
+ ethr_self(). They will however be returned an invalid tid. */
+ tid = ETHR_GET_OWN_TID__;
+ if (!tid) {
+ ethr_tid dummy_tid = {ETHR_INVALID_TID_ID, NULL};
+ return dummy_tid;
+ }
+ return *tid;
+}
+
+int
+ethr_equal_tids(ethr_tid tid1, ethr_tid tid2)
+{
+ /* An invalid tid does not equal any tid, not even an invalid tid */
+ return tid1.id == tid2.id && tid1.id != ETHR_INVALID_TID_ID;
+}
+
+int
+ethr_time_now(ethr_timeval *time)
+{
+#if ETHR_XCHK
+ if (ethr_not_inited__) {
+ ETHR_ASSERT(0);
+ return EACCES;
+ }
+ if (!time) {
+ ETHR_ASSERT(0);
+ return EINVAL;
+ }
+#endif
+ get_curr_time(&time->tv_sec, &time->tv_nsec);
+ return 0;
+}
+
+/*
+ * Thread specific data
+ */
+
+int
+ethr_tsd_key_create(ethr_tsd_key *keyp)
+{
+ DWORD key;
+#if ETHR_XCHK
+ if (ethr_not_inited__) {
+ ETHR_ASSERT(0);
+ return EACCES;
+ }
+ if (!keyp) {
+ ETHR_ASSERT(0);
+ return EINVAL;
+ }
+#endif
+ key = TlsAlloc();
+ if (key == TLS_OUT_OF_INDEXES)
+ return ethr_win_get_errno__();
+ *keyp = (ethr_tsd_key) key;
+ return 0;
+}
+
+int
+ethr_tsd_key_delete(ethr_tsd_key key)
+{
+#if ETHR_XCHK
+ if (ethr_not_inited__) {
+ ETHR_ASSERT(0);
+ return EACCES;
+ }
+#endif
+ if (!TlsFree((DWORD) key))
+ return ethr_win_get_errno__();
+ return 0;
+}
+
+int
+ethr_tsd_set(ethr_tsd_key key, void *value)
+{
+#if ETHR_XCHK
+ if (ethr_not_inited__) {
+ ETHR_ASSERT(0);
+ return EACCES;
+ }
+#endif
+ if (!TlsSetValue((DWORD) key, (LPVOID) value))
+ return ethr_win_get_errno__();
+ return 0;
+}
+
+void *
+ethr_tsd_get(ethr_tsd_key key)
+{
+#if ETHR_XCHK
+ if (ethr_not_inited__) {
+ ETHR_ASSERT(0);
+ return NULL;
+ }
+#endif
+ return (void *) TlsGetValue((DWORD) key);
+}
+
+
+/*
+ * Thread specific events
+ */
+
+ethr_ts_event *
+ethr_get_ts_event(void)
+{
+ return ethr_get_ts_event__();
+}
+
+void
+ethr_leave_ts_event(ethr_ts_event *tsep)
+{
+ ethr_leave_ts_event__(tsep);
+}
+
+ethr_ts_event *
+ethr_create_ts_event__(void)
+{
+ ethr_ts_event *tsep;
+ ethr_make_ts_event__(&tsep);
+ return tsep;
+}
diff --git a/erts/preloaded/ebin/erl_prim_loader.beam b/erts/preloaded/ebin/erl_prim_loader.beam
index 304ac41dce..7b824f1ab4 100644
--- a/erts/preloaded/ebin/erl_prim_loader.beam
+++ b/erts/preloaded/ebin/erl_prim_loader.beam
Binary files differ
diff --git a/erts/preloaded/ebin/erlang.beam b/erts/preloaded/ebin/erlang.beam
index 21e5525b2f..3cab91b722 100644
--- a/erts/preloaded/ebin/erlang.beam
+++ b/erts/preloaded/ebin/erlang.beam
Binary files differ
diff --git a/erts/preloaded/ebin/init.beam b/erts/preloaded/ebin/init.beam
index 8ccc553c13..9f88ddd51e 100644
--- a/erts/preloaded/ebin/init.beam
+++ b/erts/preloaded/ebin/init.beam
Binary files differ
diff --git a/erts/preloaded/ebin/otp_ring0.beam b/erts/preloaded/ebin/otp_ring0.beam
index 8ae73ea9a7..4c9cc9675b 100644
--- a/erts/preloaded/ebin/otp_ring0.beam
+++ b/erts/preloaded/ebin/otp_ring0.beam
Binary files differ
diff --git a/erts/preloaded/ebin/prim_file.beam b/erts/preloaded/ebin/prim_file.beam
index 3acda843fd..9a4904b6a1 100644
--- a/erts/preloaded/ebin/prim_file.beam
+++ b/erts/preloaded/ebin/prim_file.beam
Binary files differ
diff --git a/erts/preloaded/ebin/prim_inet.beam b/erts/preloaded/ebin/prim_inet.beam
index 9457b6d360..ac336c1b2d 100644
--- a/erts/preloaded/ebin/prim_inet.beam
+++ b/erts/preloaded/ebin/prim_inet.beam
Binary files differ
diff --git a/erts/preloaded/ebin/prim_zip.beam b/erts/preloaded/ebin/prim_zip.beam
index 6837cb4661..c57d37725e 100644
--- a/erts/preloaded/ebin/prim_zip.beam
+++ b/erts/preloaded/ebin/prim_zip.beam
Binary files differ
diff --git a/erts/preloaded/ebin/zlib.beam b/erts/preloaded/ebin/zlib.beam
index 4e4f85d312..6b14d3fd66 100644
--- a/erts/preloaded/ebin/zlib.beam
+++ b/erts/preloaded/ebin/zlib.beam
Binary files differ
diff --git a/erts/preloaded/src/erl_prim_loader.erl b/erts/preloaded/src/erl_prim_loader.erl
index a13292d5ab..024b20eadb 100644
--- a/erts/preloaded/src/erl_prim_loader.erl
+++ b/erts/preloaded/src/erl_prim_loader.erl
@@ -405,7 +405,7 @@ handle_timeout(State = #state{loader = inet}, Parent) ->
efile_multi_get_file_from_port(State, ModFiles, Paths, Fun) ->
Ref = make_ref(),
%% More than 200 processes is no gain.
- Max = min(200, erlang:system_info(thread_pool_size)),
+ Max = erlang:min(200, erlang:system_info(thread_pool_size)),
efile_multi_get_file_from_port2(ModFiles, 0, Max, State, Paths, Fun, Ref, ok).
efile_multi_get_file_from_port2([MF | MFs], Out, Max, State, Paths, Fun, Ref, Ret) when Out < Max ->
@@ -1189,9 +1189,6 @@ keyins(X, I, [Y | T]) when X < element(I,Y) -> [X,Y|T];
keyins(X, I, [Y | T]) -> [Y | keyins(X, I, T)];
keyins(X, _I, []) -> [X].
-min(X, Y) when X < Y -> X;
-min(_X, Y) -> Y.
-
to_strs([P|Paths]) when is_atom(P) ->
[atom_to_list(P)|to_strs(Paths)];
to_strs([P|Paths]) when is_list(P) ->
diff --git a/erts/preloaded/src/erlang.erl b/erts/preloaded/src/erlang.erl
index 1edb5e72db..935c2de253 100644
--- a/erts/preloaded/src/erlang.erl
+++ b/erts/preloaded/src/erlang.erl
@@ -44,7 +44,14 @@
-deprecated([hash/2]).
-deprecated([concat_binary/1]).
--compile(nowarn_bif_clash).
+% Get rid of autoimports of spawn to avoid clashes with ourselves.
+-compile({no_auto_import,[spawn/1]}).
+-compile({no_auto_import,[spawn/4]}).
+-compile({no_auto_import,[spawn_link/1]}).
+-compile({no_auto_import,[spawn_link/4]}).
+-compile({no_auto_import,[spawn_opt/2]}).
+-compile({no_auto_import,[spawn_opt/4]}).
+-compile({no_auto_import,[spawn_opt/5]}).
%%--------------------------------------------------------------------------
@@ -55,10 +62,10 @@
%%--------------------------------------------------------------------------
apply(Fun, Args) ->
- apply(Fun, Args).
+ erlang:apply(Fun, Args).
apply(Mod, Name, Args) ->
- apply(Mod, Name, Args).
+ erlang:apply(Mod, Name, Args).
%% Spawns with a fun
diff --git a/erts/preloaded/src/prim_file.erl b/erts/preloaded/src/prim_file.erl
index 43e6f6cd88..7f24889bb2 100644
--- a/erts/preloaded/src/prim_file.erl
+++ b/erts/preloaded/src/prim_file.erl
@@ -1,19 +1,19 @@
%%
%% %CopyrightBegin%
-%%
-%% Copyright Ericsson AB 2000-2009. All Rights Reserved.
-%%
+%%
+%% Copyright Ericsson AB 2000-2010. All Rights Reserved.
+%%
%% The contents of this file are subject to the Erlang Public License,
%% Version 1.1, (the "License"); you may not use this file except in
%% compliance with the License. You should have received a copy of the
%% Erlang Public License along with this software. If not, it can be
%% retrieved online at http://www.erlang.org/.
-%%
+%%
%% Software distributed under the License is distributed on an "AS IS"
%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
%% the License for the specific language governing rights and limitations
%% under the License.
-%%
+%%
%% %CopyrightEnd%
%%
-module(prim_file).
@@ -25,7 +25,7 @@
%%% Interface towards a single file's contents. Uses ?FD_DRV.
%% Generic file contents operations
--export([open/2, close/1, sync/1, position/2, truncate/1,
+-export([open/2, close/1, datasync/1, sync/1, advise/4, position/2, truncate/1,
write/2, pwrite/2, pwrite/3, read/2, read_line/1, pread/2, pread/3, copy/3]).
%% Specialized file operations
@@ -96,6 +96,8 @@
-define(FILE_IPREAD, 27).
-define(FILE_ALTNAME, 28).
-define(FILE_READ_LINE, 29).
+-define(FILE_FDATASYNC, 30).
+-define(FILE_ADVISE, 31).
%% Driver responses
-define(FILE_RESP_OK, 0).
@@ -114,9 +116,10 @@
-define(EFILE_MODE_READ_WRITE, 3).
-define(EFILE_MODE_APPEND, 4).
-define(EFILE_COMPRESSED, 8).
+-define(EFILE_MODE_EXCL, 16).
%% Use this mask to get just the mode bits to be passed to the driver.
--define(EFILE_MODE_MASK, 15).
+-define(EFILE_MODE_MASK, 31).
%% Seek modes for the driver's seek function.
-define(EFILE_SEEK_SET, 0).
@@ -130,6 +133,13 @@
%% IPREAD variants
-define(IPREAD_S32BU_P32BU, 0).
+%% POSIX file advises
+-define(POSIX_FADV_NORMAL, 0).
+-define(POSIX_FADV_RANDOM, 1).
+-define(POSIX_FADV_SEQUENTIAL, 2).
+-define(POSIX_FADV_WILLNEED, 3).
+-define(POSIX_FADV_DONTNEED, 4).
+-define(POSIX_FADV_NOREUSE, 5).
%%%-----------------------------------------------------------------
@@ -220,7 +230,35 @@ close(#file_descriptor{module = ?MODULE, data = {Port, _}}) ->
close(Port) when is_port(Port) ->
drv_close(Port).
+-define(ADVISE(Offs, Len, Adv),
+ <<?FILE_ADVISE, Offs:64/signed, Len:64/signed,
+ Adv:32/signed>>).
+%% Returns {error, Reason} | ok.
+advise(#file_descriptor{module = ?MODULE, data = {Port, _}},
+ Offset, Length, Advise) ->
+ case Advise of
+ normal ->
+ Cmd = ?ADVISE(Offset, Length, ?POSIX_FADV_NORMAL),
+ drv_command(Port, Cmd);
+ random ->
+ Cmd = ?ADVISE(Offset, Length, ?POSIX_FADV_RANDOM),
+ drv_command(Port, Cmd);
+ sequential ->
+ Cmd = ?ADVISE(Offset, Length, ?POSIX_FADV_SEQUENTIAL),
+ drv_command(Port, Cmd);
+ will_need ->
+ Cmd = ?ADVISE(Offset, Length, ?POSIX_FADV_WILLNEED),
+ drv_command(Port, Cmd);
+ dont_need ->
+ Cmd = ?ADVISE(Offset, Length, ?POSIX_FADV_DONTNEED),
+ drv_command(Port, Cmd);
+ no_reuse ->
+ Cmd = ?ADVISE(Offset, Length, ?POSIX_FADV_NOREUSE),
+ drv_command(Port, Cmd);
+ _ ->
+ {error, einval}
+ end.
%% Returns {error, Reason} | ok.
write(#file_descriptor{module = ?MODULE, data = {Port, _}}, Bytes) ->
@@ -292,6 +330,9 @@ pwrite(#file_descriptor{module = ?MODULE}, _, _) ->
{error, badarg}.
+%% Returns {error, Reason} | ok.
+datasync(#file_descriptor{module = ?MODULE, data = {Port, _}}) ->
+ drv_command(Port, [?FILE_FDATASYNC]).
%% Returns {error, Reason} | ok.
sync(#file_descriptor{module = ?MODULE, data = {Port, _}}) ->
@@ -918,6 +959,8 @@ open_mode([compressed|Rest], Mode, Portopts, Setopts) ->
open_mode([append|Rest], Mode, Portopts, Setopts) ->
open_mode(Rest, Mode bor ?EFILE_MODE_APPEND bor ?EFILE_MODE_WRITE,
Portopts, Setopts);
+open_mode([exclusive|Rest], Mode, Portopts, Setopts) ->
+ open_mode(Rest, Mode bor ?EFILE_MODE_EXCL, Portopts, Setopts);
open_mode([delayed_write|Rest], Mode, Portopts, Setopts) ->
open_mode([{delayed_write, 64*1024, 2000}|Rest], Mode,
Portopts, Setopts);
diff --git a/erts/preloaded/src/prim_zip.erl b/erts/preloaded/src/prim_zip.erl
index 3f5a5b9721..6a9856fdad 100644
--- a/erts/preloaded/src/prim_zip.erl
+++ b/erts/preloaded/src/prim_zip.erl
@@ -65,34 +65,55 @@ filter_fun() ->
open(F) ->
open(filter_fun(), undefined, F).
-open(FilterFun, FilterAcc, F) ->
- case ?CATCH do_open(FilterFun, FilterAcc, F) of
- {ok, PrimZip, Acc} ->
- {ok, PrimZip, Acc};
- Error ->
- {error, Error}
- end.
+open(FilterFun, FilterAcc, F) when is_function(FilterFun, 2) ->
+ try
+ do_open(FilterFun, FilterAcc, F)
+ catch
+ throw:{filter_fun_throw, Reason} ->
+ throw(Reason);
+ throw:InternalReason ->
+ {error, InternalReason};
+ Class:Reason ->
+ erlang:error(erlang:raise(Class, Reason, erlang:get_stacktrace()))
+ end;
+open(_, _, _) ->
+ {error, einval}.
do_open(FilterFun, FilterAcc, F) ->
Input = get_zip_input(F),
In0 = Input({open, F, [read, binary, raw]}, []),
Z = zlib:open(),
PrimZip = #primzip{files = [], zlib = Z, in = In0, input = Input},
- {PrimZip2, FilterAcc2} = get_central_dir(PrimZip, FilterFun, FilterAcc),
- {ok, PrimZip2, FilterAcc2}.
+ try
+ {PrimZip2, FilterAcc2} = get_central_dir(PrimZip, FilterFun, FilterAcc),
+ {ok, PrimZip2, FilterAcc2}
+ catch
+ Class:Reason ->
+ close(PrimZip),
+ erlang:error(erlang:raise(Class, Reason, erlang:get_stacktrace()))
+ end.
%% iterate over all files in a zip archive
-foldl(FilterFun, FilterAcc, #primzip{files = Files} = PrimZip) ->
- case ?CATCH do_foldl(FilterFun, FilterAcc, Files, [], PrimZip, PrimZip) of
- {ok, FilterAcc2, PrimZip2} -> {ok, PrimZip2, FilterAcc2};
- Error -> {error, Error}
+foldl(FilterFun, FilterAcc, #primzip{files = Files} = PrimZip)
+ when is_function(FilterFun, 2) ->
+ try
+ {ok, FilterAcc2, PrimZip2} =
+ do_foldl(FilterFun, FilterAcc, Files, [], PrimZip, PrimZip),
+ {ok, PrimZip2, FilterAcc2}
+ catch
+ throw:{filter_fun_throw, Reason} ->
+ throw(Reason);
+ throw:InternalReason ->
+ {error, InternalReason};
+ Class:Reason ->
+ erlang:error(erlang:raise(Class, Reason, erlang:get_stacktrace()))
end;
foldl(_, _, _) ->
{error, einval}.
do_foldl(FilterFun, FilterAcc, [PF | Tail], Acc0, PrimZip, PrimZipOrig) ->
#primzip_file{name = F, get_info = GetInfo, get_bin = GetBin} = PF,
- case FilterFun({F, GetInfo, GetBin}, FilterAcc) of
+ try FilterFun({F, GetInfo, GetBin}, FilterAcc) of
{Continue, Include, FilterAcc2} ->
Acc1 = include_acc(Include, PF, Acc0),
case Continue of
@@ -103,6 +124,9 @@ do_foldl(FilterFun, FilterAcc, [PF | Tail], Acc0, PrimZip, PrimZipOrig) ->
end;
FilterRes ->
throw({illegal_filter, FilterRes})
+ catch
+ throw:Reason ->
+ throw({filter_fun_throw, Reason})
end;
do_foldl(_FilterFun, FilterAcc, [], Acc, PrimZip, _PrimZipOrig) ->
{ok, FilterAcc, PrimZip#primzip{files = reverse(Acc)}}.
@@ -121,12 +145,14 @@ include_acc(Include, PF, Acc) ->
List when is_list(List) ->
%% Add new entries
Fun = fun(I, A) -> include_acc(I, PF, A) end,
- lists_foldl(Fun, Acc, List)
+ lists_foldl(Fun, Acc, List);
+ Bad ->
+ throw({illegal_filter, Bad})
end.
lists_foldl(F, Accu, [Hd|Tail]) ->
lists_foldl(F, F(Hd, Accu), Tail);
-lists_foldl(F, Accu, []) when is_function(F, 2) ->
+lists_foldl(F, Accu, []) when is_function(F, 2) ->
Accu.
%% close a zip archive
@@ -139,7 +165,9 @@ close(_) ->
get_zip_input({F, B}) when is_binary(B), is_list(F) ->
fun binary_io/2;
get_zip_input(F) when is_list(F) ->
- fun prim_file_io/2.
+ fun prim_file_io/2;
+get_zip_input(_) ->
+ throw(einval).
%% get a file from the archive
get_z_file(F, Offset, ChunkSize, #primzip{zlib = Z, in = In0, input = Input}) ->
@@ -218,15 +246,15 @@ get_cd_loop(N, BCD, Acc0, PrimZip, FileName, Offset, CFH, EndOffset, FilterFun,
GetInfo = fun() -> cd_file_header_to_file_info(FileName, CFH, <<>>) end,
GetBin = fun() -> get_z_file(FileName, Offset, Size, PrimZip) end,
PF = #primzip_file{name = FileName, get_info = GetInfo, get_bin = GetBin},
- case FilterFun({FileName, GetInfo, GetBin}, FilterAcc) of
+ try FilterFun({FileName, GetInfo, GetBin}, FilterAcc) of
{Continue, Include, FilterAcc2} ->
Acc1 =
case Include of
- false ->
+ false ->
Acc0;
true ->
[PF | Acc0];
- {true, Nick} ->
+ {true, Nick} ->
[PF#primzip_file{name = Nick} | Acc0];
{true, Nick, GI, GB} ->
PF2 = #primzip_file{name = Nick, get_info = GI, get_bin = GB},
@@ -247,13 +275,16 @@ get_cd_loop(N, BCD, Acc0, PrimZip, FileName, Offset, CFH, EndOffset, FilterFun,
end;
FilterRes ->
throw({illegal_filter, FilterRes})
+ catch
+ throw:Reason ->
+ throw({filter_fun_throw, Reason})
end.
get_file_header(BCD) ->
BCFH =
case BCD of
<<?CENTRAL_FILE_MAGIC:32/little,
- B:(?CENTRAL_FILE_HEADER_SZ-4)/binary,
+ B:(?CENTRAL_FILE_HEADER_SZ-4)/binary,
_/binary>> ->
B;
_ ->
@@ -266,11 +297,11 @@ get_file_header(BCD) ->
ToGet = FileNameLen + ExtraLen + CommentLen,
{B2, BCDRest} =
case BCD of
- <<_:?CENTRAL_FILE_HEADER_SZ/binary,
+ <<_:?CENTRAL_FILE_HEADER_SZ/binary,
G:ToGet/binary,
- Rest/binary>> ->
+ Rest/binary>> ->
{G, Rest};
- _ ->
+ _ ->
throw(bad_central_directory)
end,
FileName = get_filename_from_b2(B2, FileNameLen, ExtraLen, CommentLen),
@@ -319,9 +350,9 @@ prim_file_io({file_info, F}, _) ->
end;
prim_file_io({open, FN, Opts}, _) ->
case ?CATCH prim_file:open(FN, Opts++[binary]) of
- {ok, H} ->
+ {ok, H} ->
H;
- {error, E} ->
+ {error, E} ->
throw(E)
end;
prim_file_io({read, N}, H) ->
@@ -476,7 +507,7 @@ cd_file_header_to_file_info(FileName,
%% FI; % not yet supported, and not widely used
add_extra_info(FI, _) ->
FI.
-%%
+%%
%% unix_extra_field_and_var_from_bin(<<TSize:16/little,
%% ATime:32/little,
%% MTime:32/little,
@@ -500,7 +531,7 @@ dos_date_time_to_datetime(DosDate, DosTime) ->
<<Hour:5, Min:6, Sec:5>> = <<DosTime:16>>,
<<YearFrom1980:7, Month:4, Day:5>> = <<DosDate:16>>,
{{YearFrom1980+1980, Month, Day},
- {Hour, Min, Sec}}.
+ {Hour, Min, Sec}}.
cd_file_header_from_bin(<<VersionMadeBy:16/little,
VersionNeeded:16/little,
@@ -622,7 +653,7 @@ reverse(X) ->
reverse([H|T], Y) ->
reverse(T, [H|Y]);
-reverse([], X) ->
+reverse([], X) ->
X.
last([E|Es]) -> last(E, Es).
diff --git a/erts/test/erlexec_SUITE_data/erlexec_tests.c b/erts/test/erlexec_SUITE_data/erlexec_tests.c
index a49a0b21a8..1d1ca881d9 100644
--- a/erts/test/erlexec_SUITE_data/erlexec_tests.c
+++ b/erts/test/erlexec_SUITE_data/erlexec_tests.c
@@ -1,19 +1,19 @@
/*
* %CopyrightBegin%
- *
- * Copyright Ericsson AB 2008-2009. All Rights Reserved.
- *
+ *
+ * Copyright Ericsson AB 2008-2010. All Rights Reserved.
+ *
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
* compliance with the License. You should have received a copy of the
* Erlang Public License along with this software. If not, it can be
* retrieved online at http://www.erlang.org/.
- *
+ *
* Software distributed under the License is distributed on an "AS IS"
* basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
* the License for the specific language governing rights and limitations
* under the License.
- *
+ *
* %CopyrightEnd%
*/
/* Used by test case otp_7461 to spawn a child process with a given
@@ -22,7 +22,7 @@
* Author: Sverker Eriksson
*/
-#if defined (__WIN32__) || defined(VXWORKS) || defined(_OSE_)
+#if defined (__WIN32__) || defined(VXWORKS)
int main() {return 0;}
#else /* UNIX only */
diff --git a/erts/test/ethread_SUITE.erl b/erts/test/ethread_SUITE.erl
index bbc79e9381..0cc315e9be 100644
--- a/erts/test/ethread_SUITE.erl
+++ b/erts/test/ethread_SUITE.erl
@@ -37,21 +37,16 @@
equal_tids/1,
mutex/1,
try_lock_mutex/1,
- recursive_mutex/1,
time_now/1,
cond_wait/1,
- cond_timedwait/1,
broadcast/1,
detached_thread/1,
max_threads/1,
- forksafety/1,
- vfork/1,
tsd/1,
spinlock/1,
rwspinlock/1,
rwmutex/1,
- atomic/1,
- gate/1]).
+ atomic/1]).
-include_lib("test_server/include/test_server.hrl").
@@ -60,21 +55,16 @@ tests() ->
equal_tids,
mutex,
try_lock_mutex,
- recursive_mutex,
time_now,
cond_wait,
- cond_timedwait,
broadcast,
detached_thread,
max_threads,
- forksafety,
- vfork,
tsd,
spinlock,
rwspinlock,
rwmutex,
- atomic,
- gate].
+ atomic].
all(doc) -> [];
all(suite) -> tests().
@@ -114,13 +104,6 @@ try_lock_mutex(suite) ->
try_lock_mutex(Config) ->
run_case(Config, "try_lock_mutex", "").
-recursive_mutex(doc) ->
- ["Tests recursive mutexes."];
-recursive_mutex(suite) ->
- [];
-recursive_mutex(Config) ->
- run_case(Config, "recursive_mutex", "").
-
time_now(doc) ->
["Tests ethr_time_now by comparing time values with Erlang."];
time_now(suite) ->
@@ -169,13 +152,6 @@ cond_wait(suite) ->
cond_wait(Config) ->
run_case(Config, "cond_wait", "").
-cond_timedwait(doc) ->
- ["Tests ethr_cond_timedwait with ethr_cond_signal and ethr_cond_broadcast."];
-cond_timedwait(suite) ->
- [];
-cond_timedwait(Config) ->
- run_case(Config, "cond_timedwait", "").
-
broadcast(doc) ->
["Tests that a ethr_cond_broadcast really wakes up all waiting threads"];
broadcast(suite) ->
@@ -197,25 +173,6 @@ max_threads(suite) ->
max_threads(Config) ->
run_case(Config, "max_threads", "").
-forksafety(doc) ->
- ["Tests forksafety."];
-forksafety(suite) ->
- [];
-forksafety(Config) ->
- run_case(Config, "forksafety", "").
-
-vfork(doc) ->
- ["Tests vfork with threads."];
-vfork(suite) ->
- case ?t:os_type() of
- {unix, osf1} ->
- {skip, "vfork() known to hang multi-threaded applications on osf1"};
- _ ->
- []
- end;
-vfork(Config) ->
- run_case(Config, "vfork", "").
-
tsd(doc) ->
["Tests thread specific data."];
tsd(suite) ->
@@ -251,13 +208,6 @@ atomic(suite) ->
atomic(Config) ->
run_case(Config, "atomic", "").
-gate(doc) ->
- ["Tests gates."];
-gate(suite) ->
- [];
-gate(Config) ->
- run_case(Config, "gate", "").
-
%%
%%
%% Auxiliary functions
diff --git a/erts/test/ethread_SUITE_data/ethread_tests.c b/erts/test/ethread_SUITE_data/ethread_tests.c
index f779f13c51..7fc71d8047 100644
--- a/erts/test/ethread_SUITE_data/ethread_tests.c
+++ b/erts/test/ethread_SUITE_data/ethread_tests.c
@@ -1,19 +1,19 @@
/*
* %CopyrightBegin%
- *
- * Copyright Ericsson AB 2004-2009. All Rights Reserved.
- *
+ *
+ * Copyright Ericsson AB 2004-2010. All Rights Reserved.
+ *
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
* compliance with the License. You should have received a copy of the
* Erlang Public License along with this software. If not, it can be
* retrieved online at http://www.erlang.org/.
- *
+ *
* Software distributed under the License is distributed on an "AS IS"
* basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
* the License for the specific language governing rights and limitations
* under the License.
- *
+ *
* %CopyrightEnd%
*/
@@ -90,10 +90,12 @@ static void print_line(char *frmt,...)
print_eol();
}
+#if 0 /* Currently not used; silence annoying warning... */
static void print(char *frmt,...)
{
PRINT_VA_LIST(frmt);
}
+#endif
static void fail(char *frmt,...)
{
@@ -197,8 +199,8 @@ create_join_thread_test(void)
}
for (i = 1; i <= CJTT_NO_THREADS; i++) {
- int *tres;
- res = ethr_thr_join(cjtt_tids[i], (void **) &tres);
+ void *tres;
+ res = ethr_thr_join(cjtt_tids[i], &tres);
ASSERT(res == 0);
ASSERT(tres == &cjtt_res[i]);
ASSERT(cjtt_res[i] == i);
@@ -216,8 +218,8 @@ create_join_thread_test(void)
#define ETT_THREADS 100000
static ethr_tid ett_tids[3];
-static ethr_mutex ett_mutex = ETHR_MUTEX_INITER;
-static ethr_cond ett_cond = ETHR_COND_INITER;
+static ethr_mutex ett_mutex;
+static ethr_cond ett_cond;
static int ett_terminate;
static void *
@@ -234,14 +236,12 @@ static void *
ett_thread2(void *unused)
{
int res;
- res = ethr_mutex_lock(&ett_mutex);
- ASSERT(res == 0);
+ ethr_mutex_lock(&ett_mutex);
while (!ett_terminate) {
res = ethr_cond_wait(&ett_cond, &ett_mutex);
ASSERT(res == 0);
}
- res = ethr_mutex_unlock(&ett_mutex);
- ASSERT(res == 0);
+ ethr_mutex_unlock(&ett_mutex);
return NULL;
}
@@ -250,6 +250,10 @@ equal_tids_test(void)
{
int res, i;
+ res = ethr_mutex_init(&ett_mutex);
+ ASSERT(res == 0);
+ res = ethr_cond_init(&ett_cond);
+ ASSERT(res == 0);
ett_tids[0] = ethr_self();
res = ethr_thr_create(&ett_tids[1], ett_thread, (void *) &ett_tids[1], NULL);
@@ -298,13 +302,10 @@ equal_tids_test(void)
ASSERT(res == 0);
}
- res = ethr_mutex_lock(&ett_mutex);
- ASSERT(res == 0);
+ ethr_mutex_lock(&ett_mutex);
ett_terminate = 1;
- res = ethr_cond_signal(&ett_cond);
- ASSERT(res == 0);
- res = ethr_mutex_unlock(&ett_mutex);
- ASSERT(res == 0);
+ ethr_cond_signal(&ett_cond);
+ ethr_mutex_unlock(&ett_mutex);
res = ethr_thr_join(ett_tids[1], NULL);
ASSERT(res == 0);
@@ -321,17 +322,14 @@ equal_tids_test(void)
* Tests mutexes.
*/
-static ethr_mutex mt_mutex = ETHR_MUTEX_INITER;
+static ethr_mutex mt_mutex;
static int mt_data;
void *
mt_thread(void *unused)
{
- int res;
-
print_line("Aux thread tries to lock mutex");
- res = ethr_mutex_lock(&mt_mutex);
- ASSERT(res == 0);
+ ethr_mutex_lock(&mt_mutex);
print_line("Aux thread locked mutex");
ASSERT(mt_data == 0);
@@ -345,8 +343,7 @@ mt_thread(void *unused)
ASSERT(mt_data == 1);
- res = ethr_mutex_unlock(&mt_mutex);
- ASSERT(res == 0);
+ ethr_mutex_unlock(&mt_mutex);
print_line("Aux thread unlocked mutex");
return NULL;
@@ -356,18 +353,18 @@ mt_thread(void *unused)
static void
mutex_test(void)
{
- int do_restart = 1;
int res;
ethr_tid tid;
- print_line("Running test with statically initialized mutex");
+ print_line("Trying to initialize mutex");
+ res = ethr_mutex_init(&mt_mutex);
+ ASSERT(res == 0);
+ print_line("Initialized mutex");
- restart:
mt_data = 0;
print_line("Main thread tries to lock mutex");
- res = ethr_mutex_lock(&mt_mutex);
- ASSERT(res == 0);
+ ethr_mutex_lock(&mt_mutex);
print_line("Main thread locked mutex");
ASSERT(mt_data == 0);
@@ -383,8 +380,7 @@ mutex_test(void)
ASSERT(mt_data == 0);
- res = ethr_mutex_unlock(&mt_mutex);
- ASSERT(res == 0);
+ ethr_mutex_unlock(&mt_mutex);
print_line("Main thread unlocked mutex");
print_line("Main thread goes to sleep for 1 second");
@@ -392,8 +388,7 @@ mutex_test(void)
print_line("Main thread woke up");
print_line("Main thread tries to lock mutex");
- res = ethr_mutex_lock(&mt_mutex);
- ASSERT(res == 0);
+ ethr_mutex_lock(&mt_mutex);
print_line("Main thread locked mutex");
ASSERT(mt_data == 1);
@@ -404,8 +399,7 @@ mutex_test(void)
ASSERT(mt_data == 1);
- res = ethr_mutex_unlock(&mt_mutex);
- ASSERT(res == 0);
+ ethr_mutex_unlock(&mt_mutex);
print_line("Main thread unlocked mutex");
res = ethr_thr_join(tid, NULL);
@@ -416,20 +410,6 @@ mutex_test(void)
ASSERT(res == 0);
print_line("Main thread destroyed mutex");
- if (do_restart) {
- do_restart = 0;
-
- print_line("Running test with dynamically initialized mutex");
-
- print_line("Trying to initialize mutex");
- res = ethr_mutex_init(&mt_mutex);
- ASSERT(res == 0);
- print_line("Initialized mutex");
-
- goto restart;
-
- }
-
}
/*
@@ -438,9 +418,9 @@ mutex_test(void)
* Tests try lock mutex operation.
*/
-static ethr_mutex tlmt_mtx1 = ETHR_MUTEX_INITER;
-static ethr_mutex tlmt_mtx2 = ETHR_MUTEX_INITER;
-static ethr_cond tlmt_cnd2 = ETHR_COND_INITER;
+static ethr_mutex tlmt_mtx1;
+static ethr_mutex tlmt_mtx2;
+static ethr_cond tlmt_cnd2;
static int tlmt_mtx1_locked;
static int tlmt_mtx1_do_unlock;
@@ -450,32 +430,24 @@ tlmt_thread(void *unused)
{
int res;
- res = ethr_mutex_lock(&tlmt_mtx1);
- ASSERT(res == 0);
- res = ethr_mutex_lock(&tlmt_mtx2);
- ASSERT(res == 0);
+ ethr_mutex_lock(&tlmt_mtx1);
+ ethr_mutex_lock(&tlmt_mtx2);
tlmt_mtx1_locked = 1;
- res = ethr_cond_signal(&tlmt_cnd2);
- ASSERT(res == 0);
+ ethr_cond_signal(&tlmt_cnd2);
while (!tlmt_mtx1_do_unlock) {
res = ethr_cond_wait(&tlmt_cnd2, &tlmt_mtx2);
ASSERT(res == 0 || res == EINTR);
}
- res = ethr_mutex_unlock(&tlmt_mtx2);
- ASSERT(res == 0);
- res = ethr_mutex_unlock(&tlmt_mtx1);
- ASSERT(res == 0);
+ ethr_mutex_unlock(&tlmt_mtx2);
+ ethr_mutex_unlock(&tlmt_mtx1);
- res = ethr_mutex_lock(&tlmt_mtx2);
- ASSERT(res == 0);
+ ethr_mutex_lock(&tlmt_mtx2);
tlmt_mtx1_locked = 0;
- res = ethr_cond_signal(&tlmt_cnd2);
- ASSERT(res == 0);
- res = ethr_mutex_unlock(&tlmt_mtx2);
- ASSERT(res == 0);
+ ethr_cond_signal(&tlmt_cnd2);
+ ethr_mutex_unlock(&tlmt_mtx2);
return NULL;
}
@@ -486,48 +458,49 @@ try_lock_mutex_test(void)
int i, res;
ethr_tid tid;
+ res = ethr_mutex_init(&tlmt_mtx1);
+ ASSERT(res == 0);
+ res = ethr_mutex_init(&tlmt_mtx2);
+ ASSERT(res == 0);
+ res = ethr_cond_init(&tlmt_cnd2);
+ ASSERT(res == 0);
+
tlmt_mtx1_locked = 0;
tlmt_mtx1_do_unlock = 0;
res = ethr_thr_create(&tid, tlmt_thread, NULL, NULL);
ASSERT(res == 0);
- res = ethr_mutex_lock(&tlmt_mtx2);
- ASSERT(res == 0);
+ ethr_mutex_lock(&tlmt_mtx2);
while (!tlmt_mtx1_locked) {
res = ethr_cond_wait(&tlmt_cnd2, &tlmt_mtx2);
ASSERT(res == 0 || res == EINTR);
}
- res = ethr_mutex_unlock(&tlmt_mtx2);
- ASSERT(res == 0);
+ ethr_mutex_unlock(&tlmt_mtx2);
for (i = 0; i < 10; i++) {
res = ethr_mutex_trylock(&tlmt_mtx1);
ASSERT(res == EBUSY);
}
- res = ethr_mutex_lock(&tlmt_mtx2);
- ASSERT(res == 0);
+ ethr_mutex_lock(&tlmt_mtx2);
tlmt_mtx1_do_unlock = 1;
- res = ethr_cond_signal(&tlmt_cnd2);
- ASSERT(res == 0);
+ ethr_cond_signal(&tlmt_cnd2);
while (tlmt_mtx1_locked) {
res = ethr_cond_wait(&tlmt_cnd2, &tlmt_mtx2);
ASSERT(res == 0 || res == EINTR);
}
- res = ethr_mutex_unlock(&tlmt_mtx2);
- ASSERT(res == 0);
+ ethr_mutex_unlock(&tlmt_mtx2);
res = ethr_mutex_trylock(&tlmt_mtx1);
ASSERT(res == 0);
- res = ethr_mutex_unlock(&tlmt_mtx1);
- ASSERT(res == 0);
+ ethr_mutex_unlock(&tlmt_mtx1);
res = ethr_thr_join(tid, NULL);
ASSERT(res == 0);
@@ -541,159 +514,6 @@ try_lock_mutex_test(void)
}
/*
- * The recursive mutex test case.
- *
- * Tests recursive mutexes.
- */
-
-#ifdef ETHR_HAVE_ETHR_REC_MUTEX_INIT
-
-static ethr_mutex rmt_mutex
-#ifdef ETHR_REC_MUTEX_INITER
- = ETHR_REC_MUTEX_INITER
-#endif
- ;
-static int rmt_data;
-
-void *
-rmt_thread(void *unused)
-{
- int res;
-
- print_line("Aux thread tries to lock mutex");
- res = ethr_mutex_lock(&rmt_mutex);
- ASSERT(res == 0);
- print_line("Aux thread locked mutex");
-
- ASSERT(rmt_data == 0);
-
- rmt_data = 1;
- print_line("Aux thread wrote");
-
- print_line("Aux thread goes to sleep for 1 second");
- do_sleep(1);
- print_line("Aux thread woke up");
-
- ASSERT(rmt_data == 1);
-
- res = ethr_mutex_unlock(&rmt_mutex);
- ASSERT(res == 0);
- print_line("Aux thread unlocked mutex");
-
- return NULL;
-}
-
-#endif
-
-static void
-recursive_mutex_test(void)
-{
-#ifdef ETHR_HAVE_ETHR_REC_MUTEX_INIT
- int do_restart = 1;
- int res;
- ethr_tid tid;
-
-#ifdef ETHR_REC_MUTEX_INITER
- print_line("Running test with statically initialized mutex");
-#else
- goto dynamic_init;
-#endif
-
- restart:
- rmt_data = 0;
-
- print_line("Main thread tries to lock mutex");
- res = ethr_mutex_lock(&rmt_mutex);
- ASSERT(res == 0);
- print_line("Main thread locked mutex");
-
- print_line("Main thread tries to lock mutex again");
- res = ethr_mutex_lock(&rmt_mutex);
- ASSERT(res == 0);
- print_line("Main thread locked mutex again");
-
- ASSERT(rmt_data == 0);
-
- print_line("Main thread about to create aux thread");
- res = ethr_thr_create(&tid, rmt_thread, NULL, NULL);
- ASSERT(res == 0);
- print_line("Main thread created aux thread");
-
- print_line("Main thread goes to sleep for 1 second");
- do_sleep(1);
- print_line("Main thread woke up");
-
- ASSERT(rmt_data == 0);
-
- res = ethr_mutex_unlock(&rmt_mutex);
- ASSERT(res == 0);
- print_line("Main thread unlocked mutex");
-
- print_line("Main thread goes to sleep for 1 second");
- do_sleep(1);
- print_line("Main thread woke up");
-
- ASSERT(rmt_data == 0);
-
- res = ethr_mutex_unlock(&rmt_mutex);
- ASSERT(res == 0);
- print_line("Main thread unlocked mutex again");
-
- print_line("Main thread goes to sleep for 1 second");
- do_sleep(1);
- print_line("Main thread woke up");
-
- print_line("Main thread tries to lock mutex");
- res = ethr_mutex_lock(&rmt_mutex);
- ASSERT(res == 0);
- print_line("Main thread locked mutex");
-
- ASSERT(rmt_data == 1);
-
- print_line("Main thread goes to sleep for 1 second");
- do_sleep(1);
- print_line("Main thread woke up");
-
- ASSERT(rmt_data == 1);
-
- res = ethr_mutex_unlock(&rmt_mutex);
- ASSERT(res == 0);
- print_line("Main thread unlocked mutex");
-
- res = ethr_thr_join(tid, NULL);
- ASSERT(res == 0);
- print_line("Main thread joined aux thread");
-
- res = ethr_mutex_destroy(&rmt_mutex);
- ASSERT(res == 0);
- print_line("Main thread destroyed mutex");
-
- if (do_restart) {
-#ifndef ETHR_REC_MUTEX_INITER
- dynamic_init:
-#endif
- do_restart = 0;
-
- print_line("Running test with dynamically initialized mutex");
-
- print_line("Trying to initialize mutex");
- res = ethr_rec_mutex_init(&rmt_mutex);
- ASSERT(res == 0);
- print_line("Initialized mutex");
-
- goto restart;
- }
-
-#ifndef ETHR_REC_MUTEX_INITER
- succeed("Static initializer for recursive mutexes not supported");
-#endif
-
-#else /* #ifdef ETHR_HAVE_ETHR_REC_MUTEX_INIT */
- skip("Recursive mutexes not supported");
-#endif /* #ifdef ETHR_HAVE_ETHR_REC_MUTEX_INIT */
-}
-
-/*
* The time now test.
*
* Tests ethr_time_now by comparing time values with Erlang.
@@ -763,106 +583,82 @@ time_now_test(void)
*/
-static ethr_mutex cwt_mutex = ETHR_MUTEX_INITER;
-static ethr_cond cwt_cond = ETHR_COND_INITER;
+static ethr_mutex cwt_mutex;
+static ethr_cond cwt_cond;
static int cwt_counter;
void *
-cwt_thread(void *is_timedwait_test_ptr)
+cwt_thread(void *unused)
{
- int use_timedwait = *((int *) is_timedwait_test_ptr);
int res;
- res = ethr_mutex_lock(&cwt_mutex);
- ASSERT(res == 0);
-
- if (use_timedwait) {
- ethr_timeval tv;
- res = ethr_time_now(&tv);
- ASSERT(res == 0);
- tv.tv_sec += 3600; /* Make sure we won't time out */
+ ethr_mutex_lock(&cwt_mutex);
- do {
- res = ethr_cond_timedwait(&cwt_cond, &cwt_mutex, &tv);
- } while (res == EINTR);
- ASSERT(res == 0);
- }
- else {
- do {
- res = ethr_cond_wait(&cwt_cond, &cwt_mutex);
- } while (res == EINTR);
- ASSERT(res == 0);
- }
+ do {
+ res = ethr_cond_wait(&cwt_cond, &cwt_mutex);
+ } while (res == EINTR);
+ ASSERT(res == 0);
cwt_counter++;
- res = ethr_mutex_unlock(&cwt_mutex);
- ASSERT(res == 0);
+ ethr_mutex_unlock(&cwt_mutex);
return NULL;
}
static void
-cond_wait_test(int is_timedwait_test)
+cond_wait_test(void)
{
- int do_restart = !is_timedwait_test;
ethr_tid tid1, tid2;
int res;
- if (!is_timedwait_test)
- print_line("Running test with statically initialized mutex and cond");
+ res = ethr_mutex_init(&cwt_mutex);
+ ASSERT(res == 0);
+ res = ethr_cond_init(&cwt_cond);
+ ASSERT(res == 0);
- restart:
/* Wake with signal */
cwt_counter = 0;
- res = ethr_thr_create(&tid1, cwt_thread, (void *) &is_timedwait_test, NULL);
+ res = ethr_thr_create(&tid1, cwt_thread, NULL, NULL);
ASSERT(res == 0);
- res = ethr_thr_create(&tid2, cwt_thread, (void *) &is_timedwait_test, NULL);
+ res = ethr_thr_create(&tid2, cwt_thread, NULL, NULL);
ASSERT(res == 0);
do_sleep(1); /* Make sure threads waits on cond var */
- res = ethr_mutex_lock(&cwt_mutex);
- ASSERT(res == 0);
+ ethr_mutex_lock(&cwt_mutex);
- res = ethr_cond_signal(&cwt_cond); /* Wake one thread */
- ASSERT(res == 0);
+ ethr_cond_signal(&cwt_cond); /* Wake one thread */
do_sleep(1); /* Make sure awakened thread waits on mutex */
ASSERT(cwt_counter == 0);
- res = ethr_mutex_unlock(&cwt_mutex);
- ASSERT(res == 0);
+ ethr_mutex_unlock(&cwt_mutex);
do_sleep(1); /* Let awakened thread proceed */
- res = ethr_mutex_lock(&cwt_mutex);
- ASSERT(res == 0);
+ ethr_mutex_lock(&cwt_mutex);
ASSERT(cwt_counter == 1);
- res = ethr_cond_signal(&cwt_cond); /* Wake the other thread */
- ASSERT(res == 0);
+ ethr_cond_signal(&cwt_cond); /* Wake the other thread */
do_sleep(1); /* Make sure awakened thread waits on mutex */
ASSERT(cwt_counter == 1);
- res = ethr_mutex_unlock(&cwt_mutex);
- ASSERT(res == 0);
+ ethr_mutex_unlock(&cwt_mutex);
do_sleep(1); /* Let awakened thread proceed */
- res = ethr_mutex_lock(&cwt_mutex);
- ASSERT(res == 0);
+ ethr_mutex_lock(&cwt_mutex);
ASSERT(cwt_counter == 2);
- res = ethr_mutex_unlock(&cwt_mutex);
- ASSERT(res == 0);
+ ethr_mutex_unlock(&cwt_mutex);
res = ethr_thr_join(tid1, NULL);
ASSERT(res == 0);
@@ -875,35 +671,30 @@ cond_wait_test(int is_timedwait_test)
cwt_counter = 0;
- res = ethr_thr_create(&tid1, cwt_thread, (void *) &is_timedwait_test, NULL);
+ res = ethr_thr_create(&tid1, cwt_thread, NULL, NULL);
ASSERT(res == 0);
- res = ethr_thr_create(&tid2, cwt_thread, (void *) &is_timedwait_test, NULL);
+ res = ethr_thr_create(&tid2, cwt_thread, NULL, NULL);
ASSERT(res == 0);
do_sleep(1); /* Make sure threads waits on cond var */
- res = ethr_mutex_lock(&cwt_mutex);
- ASSERT(res == 0);
+ ethr_mutex_lock(&cwt_mutex);
- res = ethr_cond_broadcast(&cwt_cond); /* Wake the threads */
- ASSERT(res == 0);
+ ethr_cond_broadcast(&cwt_cond); /* Wake the threads */
do_sleep(1); /* Make sure awakened threads wait on mutex */
ASSERT(cwt_counter == 0);
- res = ethr_mutex_unlock(&cwt_mutex);
- ASSERT(res == 0);
+ ethr_mutex_unlock(&cwt_mutex);
do_sleep(1); /* Let awakened threads proceed */
- res = ethr_mutex_lock(&cwt_mutex);
- ASSERT(res == 0);
+ ethr_mutex_lock(&cwt_mutex);
ASSERT(cwt_counter == 2);
- res = ethr_mutex_unlock(&cwt_mutex);
- ASSERT(res == 0);
+ ethr_mutex_unlock(&cwt_mutex);
res = ethr_thr_join(tid1, NULL);
ASSERT(res == 0);
@@ -916,113 +707,6 @@ cond_wait_test(int is_timedwait_test)
res = ethr_cond_destroy(&cwt_cond);
ASSERT(res == 0);
- if (do_restart) {
- do_restart = 0;
- res = ethr_mutex_init(&cwt_mutex);
- ASSERT(res == 0);
- res = ethr_cond_init(&cwt_cond);
- ASSERT(res == 0);
- print_line("Running test with dynamically initialized mutex and cond");
- goto restart;
- }
-}
-
-/*
- * The cond timedwait test case.
- *
- * Tests ethr_cond_timedwait with ethr_cond_signal and ethr_cond_broadcast.
- */
-
-#define CTWT_MAX_TIME_DIFF 100000
-
-static long
-ctwt_check_timeout(long to)
-{
- int res;
- ethr_timeval tva, tvb;
- long diff, abs_diff;
-
- res = ethr_time_now(&tva);
- ASSERT(res == 0);
-
- tva.tv_sec += to / 1000;
- tva.tv_nsec += (to % 1000) * 1000000;
- if (tva.tv_nsec >= 1000000000) {
- tva.tv_sec++;
- tva.tv_nsec -= 1000000000;
- ASSERT(tva.tv_nsec < 1000000000);
- }
-
- do {
- res = ethr_cond_timedwait(&cwt_cond, &cwt_mutex, &tva);
- } while (res == EINTR);
- ASSERT(res == ETIMEDOUT);
-
- res = ethr_time_now(&tvb);
- ASSERT(res == 0);
-
- diff = (tvb.tv_sec - tva.tv_sec) * 1000000;
- diff += (tvb.tv_nsec - tva.tv_nsec)/1000;
-
- print("Timeout=%ld; ", to);
- print("tva.tv_sec=%ld tva.tv_nsec=%ld; ", tva.tv_sec, tva.tv_nsec);
- print("tvb.tv_sec=%ld tvb.tv_nsec=%ld; ", tvb.tv_sec, tvb.tv_nsec);
- print_line("diff (tvb - tva) = %ld us", diff);
-
- abs_diff = (long) abs((int) diff);
-
- ASSERT(CTWT_MAX_TIME_DIFF >= abs_diff);
- return abs_diff;
-}
-
-static void
-cond_timedwait_test(void)
-{
- int do_restart = 1;
- long abs_diff, max_abs_diff = 0;
- int res;
-
-#define CTWT_UPD_MAX_DIFF if (abs_diff > max_abs_diff) max_abs_diff = abs_diff;
-
- print_line("Running test with statically initialized mutex and cond");
-
- print_line("CTWT_MAX_TIME_DIFF=%d", CTWT_MAX_TIME_DIFF);
-
- restart:
-
- res = ethr_mutex_lock(&cwt_mutex);
- ASSERT(res == 0);
-
- abs_diff = ctwt_check_timeout(300);
- CTWT_UPD_MAX_DIFF;
- abs_diff = ctwt_check_timeout(700);
- CTWT_UPD_MAX_DIFF;
- abs_diff = ctwt_check_timeout(1000);
- CTWT_UPD_MAX_DIFF;
- abs_diff = ctwt_check_timeout(2300);
- CTWT_UPD_MAX_DIFF;
- abs_diff = ctwt_check_timeout(5100);
- CTWT_UPD_MAX_DIFF;
-
- res = ethr_mutex_unlock(&cwt_mutex);
- ASSERT(res == 0);
-
- cond_wait_test(1);
-
- if (do_restart) {
- do_restart = 0;
- res = ethr_mutex_init(&cwt_mutex);
- ASSERT(res == 0);
- res = ethr_cond_init(&cwt_cond);
- ASSERT(res == 0);
- print_line("Running test with dynamically initialized mutex and cond");
- goto restart;
- }
-
- print_line("Max absolute diff = %d us", max_abs_diff);
- succeed("Max absolute diff = %d us", max_abs_diff);
-
-#undef CTWT_UPD_MAX_DIFF
}
/*
@@ -1037,9 +721,9 @@ cond_timedwait_test(void)
static int bct_woken = 0;
static int bct_waiting = 0;
static int bct_done = 0;
-static ethr_mutex bct_mutex = ETHR_MUTEX_INITER;
-static ethr_cond bct_cond = ETHR_COND_INITER;
-static ethr_cond bct_cntrl_cond = ETHR_COND_INITER;
+static ethr_mutex bct_mutex;
+static ethr_cond bct_cond;
+static ethr_cond bct_cntrl_cond;
static void *
@@ -1047,30 +731,24 @@ bct_thread(void *unused)
{
int res;
- res = ethr_mutex_lock(&bct_mutex);
- ASSERT(res == 0);
+ ethr_mutex_lock(&bct_mutex);
while (!bct_done) {
bct_waiting++;
- if (bct_waiting == BCT_THREADS) {
- res = ethr_cond_signal(&bct_cntrl_cond);
- ASSERT(res == 0);
- }
+ if (bct_waiting == BCT_THREADS)
+ ethr_cond_signal(&bct_cntrl_cond);
do {
res = ethr_cond_wait(&bct_cond, &bct_mutex);
} while (res == EINTR);
ASSERT(res == 0);
bct_woken++;
- if (bct_woken == BCT_THREADS) {
- res = ethr_cond_signal(&bct_cntrl_cond);
- ASSERT(res == 0);
- }
+ if (bct_woken == BCT_THREADS)
+ ethr_cond_signal(&bct_cntrl_cond);
}
- res = ethr_mutex_unlock(&bct_mutex);
- ASSERT(res == 0);
+ ethr_mutex_unlock(&bct_mutex);
return NULL;
}
@@ -1081,14 +759,20 @@ broadcast_test(void)
int res, i;
ethr_tid tid[BCT_THREADS];
+ res = ethr_mutex_init(&bct_mutex);
+ ASSERT(res == 0);
+ res = ethr_cond_init(&bct_cntrl_cond);
+ ASSERT(res == 0);
+ res = ethr_cond_init(&bct_cond);
+ ASSERT(res == 0);
+
for (i = 0; i < BCT_THREADS; i++) {
res = ethr_thr_create(&tid[i], bct_thread, NULL, NULL);
ASSERT(res == 0);
}
- res = ethr_mutex_lock(&bct_mutex);
- ASSERT(res == 0);
+ ethr_mutex_lock(&bct_mutex);
for (i = 0; i < BCT_NO_OF_WAITS; i++) {
@@ -1101,8 +785,7 @@ broadcast_test(void)
bct_woken = 0;
/* Wake all threads */
- res = ethr_cond_broadcast(&bct_cond);
- ASSERT(res == 0);
+ ethr_cond_broadcast(&bct_cond);
while (bct_woken != BCT_THREADS) {
res = ethr_cond_wait(&bct_cntrl_cond, &bct_mutex);
@@ -1114,13 +797,11 @@ broadcast_test(void)
bct_done = 1;
/* Wake all threads */
- res = ethr_cond_broadcast(&bct_cond);
- ASSERT(res == 0);
+ ethr_cond_broadcast(&bct_cond);
- res = ethr_mutex_unlock(&bct_mutex);
- ASSERT(res == 0);
+ ethr_mutex_unlock(&bct_mutex);
- for (i = 0; i < BCT_THREADS - 1; i++) {
+ for (i = 0; i < BCT_THREADS; i++) {
res = ethr_thr_join(tid[i], NULL);
ASSERT(res == 0);
}
@@ -1143,26 +824,22 @@ broadcast_test(void)
#define DT_THREADS (50*1024)
#define DT_BATCH_SIZE 64
-static ethr_mutex dt_mutex = ETHR_MUTEX_INITER;
-static ethr_cond dt_cond = ETHR_COND_INITER;
+static ethr_mutex dt_mutex;
+static ethr_cond dt_cond;
static int dt_count;
static int dt_limit;
static void *
dt_thread(void *unused)
{
- int res;
-
- res = ethr_mutex_lock(&dt_mutex);
- ASSERT(res == 0);
+ ethr_mutex_lock(&dt_mutex);
dt_count++;
if (dt_count >= dt_limit)
ethr_cond_signal(&dt_cond);
- res = ethr_mutex_unlock(&dt_mutex);
- ASSERT(res == 0);
+ ethr_mutex_unlock(&dt_mutex);
return NULL;
}
@@ -1174,6 +851,11 @@ detached_thread_test(void)
ethr_tid tid[DT_BATCH_SIZE];
int i, j, res;
+ res = ethr_mutex_init(&dt_mutex);
+ ASSERT(res == 0);
+ res = ethr_cond_init(&dt_cond);
+ ASSERT(res == 0);
+
thr_opts.detached = 1;
dt_count = 0;
dt_limit = 0;
@@ -1187,14 +869,12 @@ detached_thread_test(void)
ASSERT(res == 0);
}
- res = ethr_mutex_lock(&dt_mutex);
- ASSERT(res == 0);
+ ethr_mutex_lock(&dt_mutex);
while (dt_count < dt_limit) {
res = ethr_cond_wait(&dt_cond, &dt_mutex);
ASSERT(res == 0 || res == EINTR);
}
- res = ethr_mutex_unlock(&dt_mutex);
- ASSERT(res == 0);
+ ethr_mutex_unlock(&dt_mutex);
print_line("dt_count = %d", dt_count);
}
@@ -1211,8 +891,8 @@ detached_thread_test(void)
#define MTT_TIMES 10
static int mtt_terminate;
-static ethr_mutex mtt_mutex = ETHR_MUTEX_INITER;
-static ethr_cond mtt_cond = ETHR_COND_INITER;
+static ethr_mutex mtt_mutex;
+static ethr_cond mtt_cond;
static char mtt_string[22*MTT_TIMES]; /* 22 is enough for ", %d" */
@@ -1220,16 +900,14 @@ void *mtt_thread(void *unused)
{
int res;
- res = ethr_mutex_lock(&mtt_mutex);
- ASSERT(res == 0);
+ ethr_mutex_lock(&mtt_mutex);
while (!mtt_terminate) {
res = ethr_cond_wait(&mtt_cond, &mtt_mutex);
ASSERT(res == 0 || res == EINTR);
}
- res = ethr_mutex_unlock(&mtt_mutex);
- ASSERT(res == 0);
+ ethr_mutex_unlock(&mtt_mutex);
return NULL;
}
@@ -1265,16 +943,13 @@ mtt_create_join_threads(void)
print_line("%d = ethr_thr_create()", res);
print_line("Number of created threads: %d", no_threads);
- res = ethr_mutex_lock(&mtt_mutex);
- ASSERT(res == 0);
+ ethr_mutex_lock(&mtt_mutex);
mtt_terminate = 1;
- res = ethr_cond_broadcast(&mtt_cond);
- ASSERT(res == 0);
+ ethr_cond_broadcast(&mtt_cond);
- res = ethr_mutex_unlock(&mtt_mutex);
- ASSERT(res == 0);
+ ethr_mutex_unlock(&mtt_mutex);
while (ix) {
res = ethr_thr_join(tids[--ix], NULL);
@@ -1292,9 +967,14 @@ mtt_create_join_threads(void)
static void
max_threads_test(void)
{
- int no_threads[MTT_TIMES], i, up, down, eq;
+ int no_threads[MTT_TIMES], i, up, down, eq, res;
char *str;
+ res = ethr_mutex_init(&mtt_mutex);
+ ASSERT(res == 0);
+ res = ethr_cond_init(&mtt_cond);
+ ASSERT(res == 0);
+
for (i = 0; i < MTT_TIMES; i++) {
no_threads[i] = mtt_create_join_threads();
}
@@ -1326,272 +1006,6 @@ max_threads_test(void)
}
-
-/*
- * The forksafety test case.
- *
- * Tests forksafety.
- */
-#ifdef __WIN32__
-#define NO_FORK_PRESENT
-#endif
-
-#ifndef NO_FORK_PRESENT
-
-static ethr_mutex ft_test_inner_mutex = ETHR_MUTEX_INITER;
-static ethr_mutex ft_test_outer_mutex = ETHR_MUTEX_INITER;
-static ethr_mutex ft_go_mutex = ETHR_MUTEX_INITER;
-static ethr_cond ft_go_cond = ETHR_COND_INITER;
-static int ft_go;
-static int ft_have_forked;
-
-static void *
-ft_thread(void *unused)
-{
- int res;
-
- res = ethr_mutex_lock(&ft_test_outer_mutex);
- ASSERT(res == 0);
-
- res = ethr_mutex_lock(&ft_go_mutex);
- ASSERT(res == 0);
-
- ft_go = 1;
- res = ethr_cond_signal(&ft_go_cond);
- ASSERT(res == 0);
- res = ethr_mutex_unlock(&ft_go_mutex);
- ASSERT(res == 0);
-
- do_sleep(1);
- ASSERT(!ft_have_forked);
-
- res = ethr_mutex_lock(&ft_test_inner_mutex);
- ASSERT(res == 0);
-
- res = ethr_mutex_unlock(&ft_test_inner_mutex);
- ASSERT(res == 0);
-
- do_sleep(1);
- ASSERT(!ft_have_forked);
-
- res = ethr_mutex_unlock(&ft_test_outer_mutex);
- ASSERT(res == 0);
-
- do_sleep(1);
- ASSERT(ft_have_forked);
-
-
- return NULL;
-}
-
-#endif /* #ifndef NO_FORK_PRESENT */
-
-static void
-forksafety_test(void)
-{
-#ifdef NO_FORK_PRESENT
- skip("No fork() present; nothing to test");
-#elif defined(DEBUG)
- skip("Doesn't work in debug build");
-#else
- char snd_msg[] = "ok, bye!";
- char rec_msg[sizeof(snd_msg)*2];
- ethr_tid tid;
- int res;
- int fds[2];
-
-
- res = ethr_mutex_set_forksafe(&ft_test_inner_mutex);
- if (res == ENOTSUP) {
- skip("Forksafety not supported on this platform!");
- }
- ASSERT(res == 0);
- res = ethr_mutex_set_forksafe(&ft_test_outer_mutex);
- ASSERT(res == 0);
-
-
- res = pipe(fds);
- ASSERT(res == 0);
-
- ft_go = 0;
- ft_have_forked = 0;
-
- res = ethr_mutex_lock(&ft_go_mutex);
- ASSERT(res == 0);
-
- res = ethr_thr_create(&tid, ft_thread, NULL, NULL);
- ASSERT(res == 0);
-
- do {
- res = ethr_cond_wait(&ft_go_cond, &ft_go_mutex);
- } while (res == EINTR || !ft_go);
- ASSERT(res == 0);
-
- res = ethr_mutex_unlock(&ft_go_mutex);
- ASSERT(res == 0);
-
- res = fork();
- ft_have_forked = 1;
- if (res == 0) {
- close(fds[0]);
- res = ethr_mutex_lock(&ft_test_outer_mutex);
- if (res != 0)
- _exit(1);
- res = ethr_mutex_lock(&ft_test_inner_mutex);
- if (res != 0)
- _exit(1);
- res = ethr_mutex_unlock(&ft_test_inner_mutex);
- if (res != 0)
- _exit(1);
- res = ethr_mutex_unlock(&ft_test_outer_mutex);
- if (res != 0)
- _exit(1);
-
- res = ethr_mutex_destroy(&ft_test_inner_mutex);
- if (res != 0)
- _exit(1);
- res = ethr_mutex_destroy(&ft_test_outer_mutex);
- if (res != 0)
- _exit(1);
-
- res = (int) write(fds[1], (void *) snd_msg, sizeof(snd_msg));
- if (res != sizeof(snd_msg))
- _exit(1);
- close(fds[1]);
- _exit(0);
- }
- ASSERT(res > 0);
- close(fds[1]);
-
- res = (int) read(fds[0], (void *) rec_msg, sizeof(rec_msg));
- ASSERT(res == (int) sizeof(snd_msg));
- ASSERT(strcmp(snd_msg, rec_msg) == 0);
-
- close(fds[0]);
-#endif
-}
-
-
-/*
- * The vfork test case.
- *
- * Tests vfork with threads.
- */
-
-#ifdef __WIN32__
-#define NO_VFORK_PRESENT
-#endif
-
-#ifndef NO_VFORK_PRESENT
-
-#undef vfork
-
-static ethr_mutex vt_mutex = ETHR_MUTEX_INITER;
-
-static void *
-vt_thread(void *vprog)
-{
- char *prog = (char *) vprog;
- int res;
- char snd_msg[] = "ok, bye!";
- char rec_msg[sizeof(snd_msg)*2];
- int fds[2];
- char closefd[20];
- char writefd[20];
-
- res = pipe(fds);
- ASSERT(res == 0);
-
- res = sprintf(closefd, "%d", fds[0]);
- ASSERT(res <= 20);
- res = sprintf(writefd, "%d", fds[1]);
- ASSERT(res <= 20);
-
- print("parent: About to vfork and execute ");
- print("execlp(\"%s\", \"%s\", \"%s\", \"%s\", \"%s\", \"%s\", \"%s\", NULL)",
- prog, prog, "vfork", "exec", snd_msg, closefd, writefd);
- print_line(" in child");
- res = vfork();
- if (res == 0) {
- execlp(prog, prog, "vfork", "exec", snd_msg, closefd, writefd, NULL);
- _exit(1);
- }
- ASSERT(res > 0);
-
- print_line("parent: I'm back");
-
- close(fds[1]);
-
- res = (int) read(fds[0], (void *) rec_msg, sizeof(rec_msg));
- print_line("parent: %d = read()", res);
- print_line("parent: rec_msg=\"%s\"", rec_msg);
- ASSERT(res == (int) sizeof(snd_msg));
- ASSERT(strcmp(snd_msg, rec_msg) == 0);
-
- close(fds[0]);
-
- return NULL;
-}
-
-#endif /* #ifndef NO_VFORK_PRESENT */
-
-static void
-vfork_test(int argc, char *argv[])
-{
-#ifdef NO_VFORK_PRESENT
- skip("No vfork() present; nothing to test");
-#else
- int res;
- ethr_tid tid;
-
- if (argc == 6 && strcmp("exec", argv[2]) == 0) {
- /* We are child after vfork() and execlp() ... */
-
- char *snd_msg;
- int closefd;
- int writefd;
-
- snd_msg = argv[3];
- closefd = atoi(argv[4]);
- writefd = atoi(argv[5]);
-
- print_line("child: snd_msg=\"%s\"; closefd=%d writefd=%d",
- snd_msg, closefd, writefd);
-
- close(closefd);
-
- res = (int) write(writefd, (void *) snd_msg, strlen(snd_msg)+1);
- print_line("child: %d = write()", res);
- if (res != strlen(snd_msg)+1)
- exit(1);
- close(writefd);
- print_line("child: bye");
- exit(0);
- }
- ASSERT(argc == 2);
-
- res = ethr_mutex_set_forksafe(&vt_mutex);
- ASSERT(res == 0 || res == ENOTSUP);
- res = ethr_mutex_lock(&vt_mutex);
- ASSERT(res == 0);
-
- res = ethr_thr_create(&tid, vt_thread, (void *) argv[0], NULL);
- ASSERT(res == 0);
-
- do_sleep(1);
-
- res = ethr_mutex_unlock(&vt_mutex);
- ASSERT(res == 0);
-
- res = ethr_thr_join(tid, NULL);
- ASSERT(res == 0);
-
- res = ethr_mutex_destroy(&vt_mutex);
- ASSERT(res == 0);
-#endif
-}
-
-
/*
* The tsd test case.
*
@@ -1651,11 +1065,8 @@ static int st_data;
void *
st_thread(void *unused)
{
- int res;
-
print_line("Aux thread tries to lock spinlock");
- res = ethr_spin_lock(&st_spinlock);
- ASSERT(res == 0);
+ ethr_spin_lock(&st_spinlock);
print_line("Aux thread locked spinlock");
ASSERT(st_data == 0);
@@ -1669,8 +1080,7 @@ st_thread(void *unused)
ASSERT(st_data == 1);
- res = ethr_spin_unlock(&st_spinlock);
- ASSERT(res == 0);
+ ethr_spin_unlock(&st_spinlock);
print_line("Aux thread unlocked spinlock");
return NULL;
@@ -1691,8 +1101,7 @@ spinlock_test(void)
st_data = 0;
print_line("Main thread tries to lock spinlock");
- res = ethr_spin_lock(&st_spinlock);
- ASSERT(res == 0);
+ ethr_spin_lock(&st_spinlock);
print_line("Main thread locked spinlock");
ASSERT(st_data == 0);
@@ -1708,8 +1117,7 @@ spinlock_test(void)
ASSERT(st_data == 0);
- res = ethr_spin_unlock(&st_spinlock);
- ASSERT(res == 0);
+ ethr_spin_unlock(&st_spinlock);
print_line("Main thread unlocked spinlock");
print_line("Main thread goes to sleep for 1 second");
@@ -1717,8 +1125,7 @@ spinlock_test(void)
print_line("Main thread woke up");
print_line("Main thread tries to lock spinlock");
- res = ethr_spin_lock(&st_spinlock);
- ASSERT(res == 0);
+ ethr_spin_lock(&st_spinlock);
print_line("Main thread locked spinlock");
ASSERT(st_data == 1);
@@ -1729,8 +1136,7 @@ spinlock_test(void)
ASSERT(st_data == 1);
- res = ethr_spin_unlock(&st_spinlock);
- ASSERT(res == 0);
+ ethr_spin_unlock(&st_spinlock);
print_line("Main thread unlocked spinlock");
res = ethr_thr_join(tid, NULL);
@@ -1757,23 +1163,19 @@ void *
rwst_thread(void *unused)
{
int data;
- int res;
print_line("Aux thread tries to read lock rwspinlock");
- res = ethr_read_lock(&rwst_rwspinlock);
- ASSERT(res == 0);
+ ethr_read_lock(&rwst_rwspinlock);
print_line("Aux thread read locked rwspinlock");
ASSERT(rwst_data == 4711);
print_line("Aux thread tries to read unlock rwspinlock");
- res = ethr_read_unlock(&rwst_rwspinlock);
- ASSERT(res == 0);
+ ethr_read_unlock(&rwst_rwspinlock);
print_line("Aux thread read unlocked rwspinlock");
print_line("Aux thread tries to write lock rwspinlock");
- res = ethr_write_lock(&rwst_rwspinlock);
- ASSERT(res == 0);
+ ethr_write_lock(&rwst_rwspinlock);
print_line("Aux thread write locked rwspinlock");
data = ++rwst_data;
@@ -1787,8 +1189,7 @@ rwst_thread(void *unused)
++rwst_data;
print_line("Aux thread tries to write unlock rwspinlock");
- res = ethr_write_unlock(&rwst_rwspinlock);
- ASSERT(res == 0);
+ ethr_write_unlock(&rwst_rwspinlock);
print_line("Aux thread write unlocked rwspinlock");
return NULL;
@@ -1810,8 +1211,7 @@ rwspinlock_test(void)
rwst_data = 4711;
print_line("Main thread tries to read lock rwspinlock");
- res = ethr_read_lock(&rwst_rwspinlock);
- ASSERT(res == 0);
+ ethr_read_lock(&rwst_rwspinlock);
print_line("Main thread read locked rwspinlock");
ASSERT(rwst_data == 4711);
@@ -1828,13 +1228,11 @@ rwspinlock_test(void)
ASSERT(rwst_data == 4711);
print_line("Main thread tries to read unlock rwspinlock");
- res = ethr_read_unlock(&rwst_rwspinlock);
- ASSERT(res == 0);
+ ethr_read_unlock(&rwst_rwspinlock);
print_line("Main thread read unlocked rwspinlock");
print_line("Main thread tries to write lock rwspinlock");
- res = ethr_write_lock(&rwst_rwspinlock);
- ASSERT(res == 0);
+ ethr_write_lock(&rwst_rwspinlock);
print_line("Main thread write locked rwspinlock");
data = ++rwst_data;
@@ -1847,8 +1245,7 @@ rwspinlock_test(void)
++rwst_data;
print_line("Main thread tries to write unlock rwspinlock");
- res = ethr_write_unlock(&rwst_rwspinlock);
- ASSERT(res == 0);
+ ethr_write_unlock(&rwst_rwspinlock);
print_line("Main thread write unlocked rwspinlock");
res = ethr_thr_join(tid, NULL);
@@ -1875,23 +1272,19 @@ void *
rwmt_thread(void *unused)
{
int data;
- int res;
print_line("Aux thread tries to read lock rwmutex");
- res = ethr_rwmutex_rlock(&rwmt_rwmutex);
- ASSERT(res == 0);
+ ethr_rwmutex_rlock(&rwmt_rwmutex);
print_line("Aux thread read locked rwmutex");
ASSERT(rwmt_data == 4711);
print_line("Aux thread tries to read unlock rwmutex");
- res = ethr_rwmutex_runlock(&rwmt_rwmutex);
- ASSERT(res == 0);
+ ethr_rwmutex_runlock(&rwmt_rwmutex);
print_line("Aux thread read unlocked rwmutex");
print_line("Aux thread tries to write lock rwmutex");
- res = ethr_rwmutex_rwlock(&rwmt_rwmutex);
- ASSERT(res == 0);
+ ethr_rwmutex_rwlock(&rwmt_rwmutex);
print_line("Aux thread write locked rwmutex");
data = ++rwmt_data;
@@ -1905,8 +1298,7 @@ rwmt_thread(void *unused)
++rwmt_data;
print_line("Aux thread tries to write unlock rwmutex");
- res = ethr_rwmutex_rwunlock(&rwmt_rwmutex);
- ASSERT(res == 0);
+ ethr_rwmutex_rwunlock(&rwmt_rwmutex);
print_line("Aux thread write unlocked rwmutex");
return NULL;
@@ -1928,8 +1320,7 @@ rwmutex_test(void)
rwmt_data = 4711;
print_line("Main thread tries to read lock rwmutex");
- res = ethr_rwmutex_rlock(&rwmt_rwmutex);
- ASSERT(res == 0);
+ ethr_rwmutex_rlock(&rwmt_rwmutex);
print_line("Main thread read locked rwmutex");
ASSERT(rwmt_data == 4711);
@@ -1946,13 +1337,11 @@ rwmutex_test(void)
ASSERT(rwmt_data == 4711);
print_line("Main thread tries to read unlock rwmutex");
- res = ethr_rwmutex_runlock(&rwmt_rwmutex);
- ASSERT(res == 0);
+ ethr_rwmutex_runlock(&rwmt_rwmutex);
print_line("Main thread read unlocked rwmutex");
print_line("Main thread tries to write lock rwmutex");
- res = ethr_rwmutex_rwlock(&rwmt_rwmutex);
- ASSERT(res == 0);
+ ethr_rwmutex_rwlock(&rwmt_rwmutex);
print_line("Main thread write locked rwmutex");
data = ++rwmt_data;
@@ -1965,8 +1354,7 @@ rwmutex_test(void)
++rwmt_data;
print_line("Main thread tries to write unlock rwmutex");
- res = ethr_rwmutex_rwunlock(&rwmt_rwmutex);
- ASSERT(res == 0);
+ ethr_rwmutex_rwunlock(&rwmt_rwmutex);
print_line("Main thread write unlocked rwmutex");
res = ethr_thr_join(tid, NULL);
@@ -1998,65 +1386,53 @@ static ethr_atomic_t at_data;
void *
at_thread(void *unused)
{
- int res, i;
+ int i;
long val, go;
- res = ethr_atomic_inctest(&at_ready, &val);
- ASSERT(res == 0);
+ val = ethr_atomic_inc_read(&at_ready);
ASSERT(val > 0);
ASSERT(val <= AT_THREADS);
do {
- res = ethr_atomic_read(&at_go, &go);
- ASSERT(res == 0);
+ go = ethr_atomic_read(&at_go);
} while (!go);
for (i = 0; i < AT_ITER; i++) {
- res = ethr_atomic_or_old(&at_data, at_set_val, &val);
- ASSERT(res == 0);
+ val = ethr_atomic_read_bor(&at_data, at_set_val);
ASSERT(val >= (i == 0 ? 0 : at_set_val) + (long) 4711);
ASSERT(val <= at_max_val);
- res = ethr_atomic_and_old(&at_data, ~at_rm_val, &val);
- ASSERT(res == 0);
+ val = ethr_atomic_read_band(&at_data, ~at_rm_val);
ASSERT(val >= at_set_val + (long) 4711);
ASSERT(val <= at_max_val);
- res = ethr_atomic_read(&at_data, &val);
- ASSERT(res == 0);
+ val = ethr_atomic_read(&at_data);
ASSERT(val >= at_set_val + (long) 4711);
ASSERT(val <= at_max_val);
- res = ethr_atomic_inctest(&at_data, &val);
- ASSERT(res == 0);
+ val = ethr_atomic_inc_read(&at_data);
ASSERT(val > at_set_val + (long) 4711);
ASSERT(val <= at_max_val);
- res = ethr_atomic_dectest(&at_data, &val);
- ASSERT(res == 0);
+ val = ethr_atomic_dec_read(&at_data);
ASSERT(val >= at_set_val + (long) 4711);
ASSERT(val <= at_max_val);
- res = ethr_atomic_inc(&at_data);
- ASSERT(res == 0);
+ ethr_atomic_inc(&at_data);
- res = ethr_atomic_dec(&at_data);
- ASSERT(res == 0);
+ ethr_atomic_dec(&at_data);
- res = ethr_atomic_addtest(&at_data, (long) 4711, &val);
- ASSERT(res == 0);
+ val = ethr_atomic_add_read(&at_data, (long) 4711);
ASSERT(val >= at_set_val + (long) 2*4711);
ASSERT(val <= at_max_val);
- res = ethr_atomic_add(&at_data, (long) -4711);
- ASSERT(res == 0);
+ ethr_atomic_add(&at_data, (long) -4711);
ASSERT(val >= at_set_val + (long) 4711);
ASSERT(val <= at_max_val);
}
- res = ethr_atomic_inc(&at_done);
- ASSERT(res == 0);
+ ethr_atomic_inc(&at_done);
return NULL;
}
@@ -2069,14 +1445,13 @@ atomic_test(void)
ethr_tid tid[AT_THREADS];
ethr_thr_opts thr_opts = ETHR_THR_OPTS_DEFAULT_INITER;
- if (sizeof(long) > 4) {
+#if ETHR_SIZEOF_PTR > 4
at_rm_val = ((long) 1) << 57;
at_set_val = ((long) 1) << 60;
- }
- else {
+#else
at_rm_val = ((long) 1) << 27;
at_set_val = ((long) 1) << 30;
- }
+#endif
at_max_val = at_set_val + at_rm_val + ((long) AT_THREADS + 1) * 4711;
data_init = at_rm_val + (long) 4711;
@@ -2085,22 +1460,15 @@ atomic_test(void)
thr_opts.detached = 1;
print_line("Initializing");
- res = ethr_atomic_init(&at_ready, 0);
- ASSERT(res == 0);
- res = ethr_atomic_init(&at_go, 0);
- ASSERT(res == 0);
- res = ethr_atomic_init(&at_done, data_init);
- ASSERT(res == 0);
- res = ethr_atomic_init(&at_data, data_init);
- ASSERT(res == 0);
+ ethr_atomic_init(&at_ready, 0);
+ ethr_atomic_init(&at_go, 0);
+ ethr_atomic_init(&at_done, data_init);
+ ethr_atomic_init(&at_data, data_init);
- res = ethr_atomic_read(&at_data, &val);
- ASSERT(res == 0);
+ val = ethr_atomic_read(&at_data);
ASSERT(val == data_init);
- res = ethr_atomic_set(&at_done, 0);
- ASSERT(res == 0);
- res = ethr_atomic_read(&at_done, &val);
- ASSERT(res == 0);
+ ethr_atomic_set(&at_done, 0);
+ val = ethr_atomic_read(&at_done);
ASSERT(val == 0);
print_line("Creating threads");
@@ -2111,31 +1479,27 @@ atomic_test(void)
print_line("Waiting for threads to ready up");
do {
- res = ethr_atomic_read(&at_ready, &val);
- ASSERT(res == 0);
+ val = ethr_atomic_read(&at_ready);
ASSERT(val >= 0);
ASSERT(val <= AT_THREADS);
} while (val != AT_THREADS);
print_line("Letting threads loose");
- res = ethr_atomic_xchg(&at_go, 17, &val);
- ASSERT(res == 0);
+ val = ethr_atomic_xchg(&at_go, 17);
ASSERT(val == 0);
- res = ethr_atomic_read(&at_go, &val);
- ASSERT(res == 0);
+ val = ethr_atomic_read(&at_go);
ASSERT(val == 17);
print_line("Waiting for threads to finish");
do {
- res = ethr_atomic_read(&at_done, &val);
- ASSERT(res == 0);
+ val = ethr_atomic_read(&at_done);
ASSERT(val >= 0);
ASSERT(val <= AT_THREADS);
} while (val != AT_THREADS);
print_line("Checking result");
- res = ethr_atomic_read(&at_data, &val);
+ val = ethr_atomic_read(&at_data);
ASSERT(res == 0);
ASSERT(val == data_final);
print_line("Result ok");
@@ -2143,190 +1507,6 @@ atomic_test(void)
}
-/*
- * The gate test case.
- *
- * Tests gates.
- */
-
-#define GT_THREADS 10
-
-static ethr_atomic_t gt_wait1;
-static ethr_atomic_t gt_wait2;
-static ethr_atomic_t gt_done;
-
-static ethr_gate gt_gate1;
-static ethr_gate gt_gate2;
-
-void *
-gt_thread(void *thr_no)
-{
- int no = (int)(long) thr_no;
- int swait = no % 2 == 0;
- int res;
- long done;
-
-
- do {
-
- res = ethr_atomic_inc(&gt_wait1);
- ASSERT(res == 0);
-
- if (swait)
- res = ethr_gate_swait(&gt_gate1, INT_MAX);
- else
- res = ethr_gate_wait(&gt_gate1);
- ASSERT(res == 0);
-
- res = ethr_atomic_dec(&gt_wait1);
- ASSERT(res == 0);
-
- res = ethr_atomic_inc(&gt_wait2);
- ASSERT(res == 0);
-
- if (swait)
- res = ethr_gate_swait(&gt_gate2, INT_MAX);
- else
- res = ethr_gate_wait(&gt_gate2);
- ASSERT(res == 0);
-
- res = ethr_atomic_dec(&gt_wait2);
- ASSERT(res == 0);
-
- res = ethr_atomic_read(&gt_done, &done);
- ASSERT(res == 0);
- } while (!done);
- return NULL;
-}
-
-
-static void
-gate_test(void)
-{
- long val;
- int res, i;
- ethr_tid tid[GT_THREADS];
-
- print_line("Initializing");
- res = ethr_atomic_init(&gt_wait1, 0);
- ASSERT_EQ(res, 0, "%d");
- res = ethr_atomic_init(&gt_wait2, 0);
- ASSERT_EQ(res, 0, "%d");
- res = ethr_atomic_init(&gt_done, 0);
- ASSERT_EQ(res, 0, "%d");
- res = ethr_gate_init(&gt_gate1);
- ASSERT_EQ(res, 0, "%d");
- res = ethr_gate_init(&gt_gate2);
- ASSERT_EQ(res, 0, "%d");
-
- print_line("Creating threads");
- for (i = 0; i < GT_THREADS; i++) {
- res = ethr_thr_create(&tid[i], gt_thread, (void *) i, NULL);
- ASSERT_EQ(res, 0, "%d");
- }
-
- print_line("Waiting for threads to ready up");
- do {
- res = ethr_atomic_read(&gt_wait1, &val);
- ASSERT_EQ(res, 0, "%d");
- ASSERT(0 <= val && val <= GT_THREADS);
- } while (val != GT_THREADS);
-
- print_line("Testing");
-
- res = ethr_gate_let_through(&gt_gate1, 8);
- ASSERT_EQ(res, 0, "%d");
-
- WAIT_UNTIL_LIM((res = ethr_atomic_read(&gt_wait2, &val),
- (res != 0 || val == 8)),
- 60);
-
- res = ethr_atomic_read(&gt_wait1, &val);
- ASSERT_EQ(res, 0, "%d");
- ASSERT_EQ(val, GT_THREADS - 8, "%ld");
-
- res = ethr_atomic_read(&gt_wait2, &val);
- ASSERT_EQ(res, 0, "%d");
- ASSERT_EQ(val, 8, "%ld");
-
- res = ethr_gate_let_through(&gt_gate2, 4);
- ASSERT_EQ(res, 0, "%d");
-
- WAIT_UNTIL_LIM((res = ethr_atomic_read(&gt_wait2, &val),
- (res != 0 || val == 4)),
- 60);
-
- res = ethr_atomic_read(&gt_wait1, &val);
- ASSERT_EQ(res, 0, "%d");
- ASSERT_EQ(val, GT_THREADS - 4, "%ld");
-
- res = ethr_atomic_read(&gt_wait2, &val);
- ASSERT_EQ(res, 0, "%d");
- ASSERT_EQ(val, 4, "%ld");
-
- res = ethr_gate_let_through(&gt_gate1, GT_THREADS);
- ASSERT_EQ(res, 0, "%d");
-
- WAIT_UNTIL_LIM((res = ethr_atomic_read(&gt_wait2, &val),
- (res != 0 || val == GT_THREADS)),
- 60);
- res = ethr_atomic_read(&gt_wait1, &val);
- ASSERT_EQ(res, 0, "%d");
- ASSERT_EQ(val, 0, "%ld");
-
- res = ethr_atomic_read(&gt_wait2, &val);
- ASSERT_EQ(res, 0, "%d");
- ASSERT_EQ(val, GT_THREADS, "%ld");
-
- res = ethr_gate_let_through(&gt_gate2, GT_THREADS);
- ASSERT_EQ(res, 0, "%d");
-
- WAIT_UNTIL_LIM((res = ethr_atomic_read(&gt_wait2, &val),
- (res != 0 || val == 4)),
- 60);
- res = ethr_atomic_read(&gt_wait1, &val);
- ASSERT_EQ(res, 0, "%d");
- ASSERT_EQ(val, GT_THREADS - 4, "%ld");
-
- res = ethr_atomic_read(&gt_wait2, &val);
- ASSERT_EQ(res, 0, "%d");
- ASSERT_EQ(val, 4, "%ld");
-
- res = ethr_atomic_set(&gt_done, 1);
- ASSERT_EQ(res, 0, "%d");
-
- res = ethr_gate_let_through(&gt_gate2, GT_THREADS);
- ASSERT_EQ(res, 0, "%d");
- res = ethr_gate_let_through(&gt_gate1, GT_THREADS - 4);
- ASSERT_EQ(res, 0, "%d");
-
- WAIT_UNTIL_LIM(((res = ethr_atomic_read(&gt_wait1, &val)) != 0
- || (val == 0
- && ((res = ethr_atomic_read(&gt_wait2, &val)) != 0
- || val == 0))),
- 60);
-
- res = ethr_atomic_read(&gt_wait1, &val);
- ASSERT_EQ(res, 0, "%d");
- ASSERT_EQ(val, 0, "%ld");
-
- res = ethr_atomic_read(&gt_wait2, &val);
- ASSERT_EQ(res, 0, "%d");
- ASSERT_EQ(val, 0, "%ld");
-
- print_line("Joining threads");
- for (i = 0; i < GT_THREADS; i++) {
- res = ethr_thr_join(tid[i], NULL);
- ASSERT_EQ(res, 0, "%d");
- }
-
- res = ethr_gate_destroy(&gt_gate1);
- ASSERT_EQ(res, 0, "%d");
- res = ethr_gate_destroy(&gt_gate2);
- ASSERT_EQ(res, 0, "%d");
-
-}
-
#endif /* #ifndef ETHR_NO_THREAD_LIB */
/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
@@ -2342,14 +1522,12 @@ main(int argc, char *argv[])
#ifndef ETHR_NO_THREAD_LIB
{
char *testcase;
- int res;
send_my_pid();
testcase = argv[1];
- res = ethr_init(NULL);
- if (res != 0)
+ if (ethr_init(NULL) != 0 || ethr_late_init(NULL) != 0)
fail("Failed to initialize the ethread library");
if (strcmp(testcase, "create_join_thread") == 0)
@@ -2360,24 +1538,16 @@ main(int argc, char *argv[])
mutex_test();
else if (strcmp(testcase, "try_lock_mutex") == 0)
try_lock_mutex_test();
- else if (strcmp(testcase, "recursive_mutex") == 0)
- recursive_mutex_test();
else if (strcmp(testcase, "time_now") == 0)
time_now_test();
else if (strcmp(testcase, "cond_wait") == 0)
- cond_wait_test(0);
- else if (strcmp(testcase, "cond_timedwait") == 0)
- cond_timedwait_test();
+ cond_wait_test();
else if (strcmp(testcase, "broadcast") == 0)
broadcast_test();
else if (strcmp(testcase, "detached_thread") == 0)
detached_thread_test();
else if (strcmp(testcase, "max_threads") == 0)
max_threads_test();
- else if (strcmp(testcase, "forksafety") == 0)
- forksafety_test();
- else if (strcmp(testcase, "vfork") == 0)
- vfork_test(argc, argv);
else if (strcmp(testcase, "tsd") == 0)
tsd_test();
else if (strcmp(testcase, "spinlock") == 0)
@@ -2388,8 +1558,6 @@ main(int argc, char *argv[])
rwmutex_test();
else if (strcmp(testcase, "atomic") == 0)
atomic_test();
- else if (strcmp(testcase, "gate") == 0)
- gate_test();
else
skip("Test case \"%s\" not implemented yet", testcase);
diff --git a/erts/vsn.mk b/erts/vsn.mk
index 14c6a3e4dd..ce6330e6dd 100644
--- a/erts/vsn.mk
+++ b/erts/vsn.mk
@@ -17,8 +17,8 @@
# %CopyrightEnd%
#
-VSN = 5.7.5.1
-SYSTEM_VSN = R13B04
+VSN = 5.8.1
+SYSTEM_VSN = R14B
# Port number 4365 in 4.2
# Port number 4366 in 4.3